query
stringlengths
9
3.4k
document
stringlengths
9
87.4k
metadata
dict
negatives
sequencelengths
4
101
negative_scores
sequencelengths
4
101
document_score
stringlengths
3
10
document_rank
stringclasses
102 values
takes csv files, parses with panda and returns result
def import_data(csv_file): # skips bad lines data = pd.read_csv(csv_file, error_bad_lines=False) return data
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def read_csv():", "def csv_loader(csv_file):\n df = pd.read_csv(csv_file, sep=';', parse_dates=['Data_Alteraçao'])\n pd.set_option('display.float_format', '{:.0f}'.format)\n\n df = df.fillna(0)\n df = df.drop(columns=['Cod. Pareamento', 'Cod. UF', 'Sigla UF', 'Cod. Subarea',\n 'Nome Subarea', 'Cod. Municipio', 'Nome Municipio', 'Codigo Agencia',\n 'Nome Agencia', 'Cod. Setor', 'Cod. Logradouro CNEFE',\n 'Tipo Logradouro CNEFE', 'Titulo Logradouro CNEFE',\n 'Nome Logradouro CNEFE', 'Nome Tratado CNEFE', 'Tipo Logradouro DNE',\n 'Titulo Logradouro DNE', 'Nome Logradouro DNE', 'Nome Tratado DNE',\n 'Logradouro Completo DNE', 'Distancia', 'Cod. Match', 'Motivo Match',\n 'CEPs Face', 'Localidade Face',\n 'Alterar Logradouro para DNE?', 'Observaçao', 'SIAPE Alteração',\n 'Nome Alteraçao', 'Data_Alteraçao', 'Status', 'Unnamed: 33'])\n\n # df.astype({'CEP Logradouro CNEFE': 'int32'}).dtypes\n\n df['CEP'] = df['CEP'].str.replace(' ', '', regex=False)\n\n ceps_dne = []\n for index, row in df.iterrows():\n if type(row.CEP) == str:\n for cep in row.CEP.split(','):\n # print(index, cep)\n ceps_dne.append(int(cep))\n\n ceps_cnefe = df['CEP Logradouro CNEFE'].astype(int).tolist()\n ceps = ceps_dne + ceps_cnefe\n ceps = list(set(ceps))\n return pd.Series(ceps)", "def loadCSV(input_file):", "def _parse_csv(csv_file: str) -> pd.DataFrame:\n return pd.read_csv(csv_file, header=0)", "def parseFile()-> None:\n logging.info(f\"Parsing file with Pandas {getTime()}\")\n with open(DATA_FILE) as f:\n data = pd.read_csv(f)\n db = connect(\"result.db\")\n\n data.to_sql(\"data\",db,if_exists=\"replace\")\n\n result = pd.DataFrame({\"Uniqe Countries\":[len(set(data[\"location\"]))]})\n\n with open(RESULT_FILE,\"w\") as f:\n f.write(result.to_csv(index=False))\n logging.info(f\"Finsied parsing {getTime()}\")", "def parse(file_name):\n \n return pd.read_csv(file_name, na_values = '---')", "def withPandas()-> None:\n \n def parseFile()-> None:\n \"\"\"\n Parses the data.csv file, creates the local DB file and the result file\n \"\"\"\n logging.info(f\"Parsing file with Pandas {getTime()}\")\n with open(DATA_FILE) as f:\n data = pd.read_csv(f)\n db = connect(\"result.db\")\n\n data.to_sql(\"data\",db,if_exists=\"replace\")\n\n result = pd.DataFrame({\"Uniqe Countries\":[len(set(data[\"location\"]))]})\n\n with open(RESULT_FILE,\"w\") as f:\n f.write(result.to_csv(index=False))\n logging.info(f\"Finsied parsing {getTime()}\")\n parseFile()", "def from_csv(self,path):\n self.csv_path = path\n\n try:\n fh = open(self.csv_path, \"r\")\n except IOError:\n print(\"Error: no such file or directory\") \n\n self.csv_dataframe = pd.DataFrame(pd.read_csv(self.csv_path, header=0, keep_default_na=False)).dropna(axis=0, how='any')\n test = pd.DataFrame(pd.read_csv(self.csv_path)).dropna(axis=0, how='any')\n types = [0 for i in range(len(test.dtypes))]\n a = fh.readline()\n a = a[:-1] # remove '\\n'\n x = a.split(',') # x stores the name of each column\n fh.close()\n\n #type transformation\n for i in range(len(test.dtypes)):\n if test.dtypes[i].name[0:3] == 'int' or test.dtypes[i].name[0:5] == 'float':\n if (x[i][0] == \"'\" or x[i][0] == '\"'):\n x[i] = x[i].replace('\\'', '').replace('\"', '')\n for j in test[x[i]]:\n if not (j == 0 or (j > 1000 and j < 2100)):\n types[i] = test.dtypes[i].name[0:5]\n break\n else:\n types[i] = 'year'\n elif test.dtypes[i].name[0:6] == 'object':\n if (x[i][0] == \"'\" or x[i][0] == '\"'):\n x[i] = x[i].replace('\\'', '').replace('\"', '')\n for j in test[x[i]]:\n if j != 0 and not(re.search(r'\\d+[/-]\\d+[/-]\\d+', j)):\n types[i] = 'varchar'\n break\n else:\n types[i] = 'date'\n \n name = path.rsplit('/', 1)[-1][:-4]\n self.table_info(name, x, types)\n self.import_method = methods_of_import[2] # = 'csv'\n\n self.show_csv_info()", "def _csv_engine(filename, node):\n sep = node.get(\"sep\", \",\")\n header = node.get(\"header\", 0)\n logger.debug(\n \"Parsing CSV '{}'. sep={}, header={}.\".format(filename, sep, header)\n )\n index = node.get(\"index\")\n encoding = node.get(\"encoding\")\n if not index:\n raise InvalidConfig(\"An 'index' column is required. It should \"\n \"be the sample id column.\")\n\n df = pd.read_csv(filename, sep=sep, header=header, encoding=encoding)\n df.set_index(index, verify_integrity=True, inplace=True, drop=True)\n df.index = df.index.astype(str)\n\n return df", "def buildFromCSV(self, filepath):\r\n\t\t# TODO: Implement\r\n\t\traise NotImplementedError('This function has not yet been implemented.')\r\n\t\t# with open(filepath, 'r') as scheduleFile:\r\n\t\t# \t# Reusing Parser.parseCSVs(), but not in the intended way; ok because validation is not yet implemented\r\n\t\t# \t# TODO: Split Parser.parseCSVs() into separate people/set file parsers \r\n\t\t# \tn, people, setConstraints = Parser.parseCSVs(-1, scheduleFile, [])\r", "def process_csv(filepath):\n suburb = get_suburb(filepath)\n read_file = pd.read_csv(filepath,\n infer_datetime_format=True,\n parse_dates=[\"SALE DATE\"],\n dayfirst=True)\n read_file[\"SUBURB\"] = suburb\n separate_date(read_file)\n return read_file", "def import_csv_dataset():\n import_fields = pd.read_csv('redacted-2020-june-30-wprdc-.csv', header=None).to_numpy()[0, :]\n import_values = pd.read_csv('redacted-2020-june-30-wprdc-.csv').to_numpy()\n import_values = clean_values(import_values)\n return import_fields, import_values", "def importData(filename):\r\n data = pd.read_csv(filename)\r\n return data", "def data_from_csv(self, filepath):\n self.dataframe = pd.load_csv(filepath, separator='')", "def merge_data(csv_files, delimiter = ',', parse_dates = ['Date']):\n \n for csv in csv_files:\n \n # date formats in source data is slightly different (/2019 vs. /19), \n # TODO: check for better method to catch this error\n \n \n try:\n df_new = pd.read_csv(csv, parse_dates=parse_dates,\n date_parser=lambda x: pd.datetime.strptime(str(x), '%d/%m/%Y'), delimiter=delimiter)\n \n except:\n df_new = pd.read_csv(csv, parse_dates=parse_dates,\n date_parser=lambda x: pd.datetime.strptime(str(x), '%d/%m/%y'), delimiter=delimiter)\n \n \n\n \n df_new['season'] = df_new.Date.max().year # add season column, defined as the year of the last matchday\n df_new['first_match_day'] = False \n df_new.loc[0:9, 'first_match_day'] = True # declare first 10 games as first match day\n df_new['matchDay'] = 0\n \n try:\n df = df.append(df_new,sort=False)\n except:\n df = df_new\n \n return df", "def loader(filename,sep=',',rowskip=[], colskip=[], axis=1,names=1,fromstring=0):\n\n #manages excpetions to the csv file incase of missing data\n if (type(filename)==str) and (fromstring==1):\n iterable=filename.strip('\\n').split('\\n')\n content=np.array([i for i in csv.reader(iterable,delimiter=sep)])\n elif type(filename)==np.ndarray:\n content=filename\n else:\n content=np.array([i for i in\\\n csv.reader(open(filename,'r'),delimiter=sep)])\n #content=np.genfromtxt(filename,delimiter=sep,dtype=str)\n\n if rowskip:\n #rowskip.sort(reverse=True)\n content=np.delete(content,rowskip,0)\n #for i in rowskip: content.pop(i)\n\n if colskip:\n #colskip.sort(reverse=True)\n content=np.delete(content,colskip,1)\n #for i in colskip: content.pop(i)\n\n if axis==0: # if the file oriented column-wise\n #content=list(map(list,zip(*content)))\n content=content.T\n\n\n\n if names is 0:\n variables=np.arange(content.shape[1]).tolist()\n offset=0\n else:\n variables=content[0].tolist()\n offset=1\n\n try:\n content=np.array([conv_col(col) for col in\n content[offset:].T],dtype='object')\n arity=np.array([np.unique(i).size for i in content])\n return dataset(variables,content.T,arity)\n except ValueError: \n print( 'Data could not be loaded, failed converting to float.')\n return content", "def csv_parser(lines): \n\n data_points = []\n for line in lines:\n items = line.strip().split(\",\")\n try: #will fail on header line in file\n data_points.append(map(float, items[1:])) #first item is the label\n except ValueError: #must be the header\n continue\n return data_points", "def load_from_csv(self):\n\n self._logger.info('Reading data coming from CSV files')\n\n sta = self.stations\n\n if sta != None:\n msta = \", \".join(sta)\n self._logger.debug('Using only stations {0}'.format(msta))\n\n # load the data\n v = list(self.variables)\n v.append('metadata')\n for i in v:\n if i in self.dataConfig:\n\n self._logger.debug('Reading %s...' % self.dataConfig[i])\n if i == 'metadata':\n dp_final = pd.read_csv(self.dataConfig[i],\n index_col='primary_id')\n #Ensure all stations are all caps.\n dp_final.index = [s.upper() for s in dp_final.index]\n\n elif self.dataConfig[i]:\n dp_full = pd.read_csv(self.dataConfig[i],\n index_col='date_time',\n parse_dates=[0])\n dp_full.columns = [s.upper() for s in dp_full.columns]\n\n if sta is not None:\n\n data_sta = dp_full.columns.str.upper()\n\n # Grab IDs from user list thats also in Data\n self.stations = [s for s in data_sta if s in sta]\n dp = dp_full[dp_full.columns[(data_sta).isin(sta)]]\n\n else:\n dp = dp_full\n\n # Only get the desired dates\n dp_final = dp[self.start_date:self.end_date]\n\n if dp_final.empty:\n raise Exception(\"No CSV data found for {0}\"\n \"\".format(i))\n\n setattr(self, i, dp_final)", "def nlp_tc_df_parser(path : str, *args) -> DataFrame:\n data_frame = reduce(lambda x, y: _parser(y, x), args, read_csv(path)) if len(args) > 0 else read_csv(path)\n return data_frame", "def parse_csv(csv_file):\n if os.path.isfile(csv_file) == True:\n num_lines = sum(1 for line in open(csv_file))\n if num_lines > 1:\n try:\n data = pd.read_csv(csv_file, index_col=False)\n data.insert(0, 'id', range(1, 1 + len(data)))\n return(data)\n except pd.parser.CParserError, err:\n message = \"Can't parse REDCap data. Check CSV file: \" + csv_file\n print(message)\n logging.critical(message)\n exit(3)\n else:\n message = \"CSV file does not contain data: \" + csv_file\n print(message)\n logging.warning(message)\n return(None)\n else:\n message = \"Can't read CSV file: \" + csv_file\n print(message)\n logging.critical(message)\n exit(4)", "def from_csv(self, path_to_load):\n import pandas as pd\n\n df = pd.read_csv(path_to_load)\n df = df.loc[:, ~df.columns.str.contains('^Unnamed')] # Remove unnnamed\n\n self.results['cids'] = list()\n self.results['differences'] = list()\n self.results['experimental_values'] = list()\n\n pd_dict = df.to_dict()\n length = len(pd_dict['cids'])\n for cid in [pd_dict['cids'][i] for i in range(0, length)]:\n self._results['cids'].append(cid)\n for cid in [pd_dict['differences'][i] for i in range(0, length)]:\n self._results['differences'].append(cid)\n for cid in [pd_dict['experimental_values'][i]\n for i in range(0, length)]:\n self._results['experimental_values'].append(cid)", "def parsing(self, data_path, header=0):\n df_content = []\n csvfiles = glob.glob(data_path)\n selected_cols = list(self.data_features)\n selected_cols.append('workload.type')\n selected_cols.append('workload.appname')\n\n for csv in csvfiles:\n data = pd.read_csv(csv, index_col=None, header=header, usecols=selected_cols)\n data[self.data_features] = self.abnormal_detection(data[self.data_features])\n df_content.append(data.dropna(axis=0))\n self.dataset = pd.concat(df_content, sort=False)", "def from_csv_to_database():\r\n for year, path in FileNamePath.items():\r\n # load csv files\r\n with open(path, encoding='cp1251') as dataset:\r\n print(f\"Download {year} data\")\r\n get_curr_data(dataset, year)", "def parse_csv_file(self, csv_file: str):\n try:\n df = pd.read_csv(csv_file)\n\n if not set(['Question', 'Answer']).issubset(df.columns):\n raise BadCSVFile(\n \"CSV file does not contain ['Question', 'Answer'] columns.\")\n\n df.dropna(inplace=True)\n\n except Exception as e:\n raise BadCSVFile(\n \"Error while reading the csv file. Please check the path of the file or the file might be curropted.\")\n\n return df", "def get_data(filename):\r\n return pd.read_csv(filename)", "def load_data_csv():\r\n \r\n # Load lookup table\r\n path = 'data/id_lookup.csv'\r\n lookup_table = pd.read_csv(path, index_col=0)\r\n\r\n # Load song data\r\n path2 = 'data/data_lyrics_features.csv'\r\n data = pd.read_csv(path2, index_col=0)\r\n\r\n return data, lookup_table", "def read_csv(self, path):\n for file in os.listdir(path):\n if file[-4:] == \".csv\":\n name = file[:-4]\n table_index_header = cfg.get_list(\"table_index_header\", name)\n filename = os.path.join(path, file)\n self.input_data[name] = pd.read_csv(\n filename,\n index_col=list(range(int(table_index_header[0]))),\n header=list(range(int(table_index_header[1]))),\n squeeze=(\"series\" not in name),\n )\n self.check_input_data(warning=False)\n self.add_meta_data()\n return self", "def parse_csv_files(csv_files, **kwargs):\n\n per_token_savings = defaultdict(lambda: defaultdict(lambda: defaultdict(list)))\n slip_price_diff_splits = defaultdict(lambda: defaultdict(lambda: defaultdict(list)))\n\n for file in csv_files:\n per_file_base_prices = {}\n for _, _, trade_size, token, exchange, exchange_price, _, totle_price, pct_savings, splits, _ in csv_row_gen(file, **kwargs):\n if not per_file_base_prices.get(token): # this assumes prices recorded from lowest to highest for a token\n per_file_base_prices[token] = totle_price # should be same for all aggs, but is slightly different sometimes\n\n slip = (totle_price / per_file_base_prices[token]) - 1.0 # should be 0 for the lowest trade_size\n # i.e. slip = (totle_price - per_file_base_prices[token]) / per_file_base_prices[token]\n\n slip = 0.0 if slip < 0.0 and slip > -0.00001 else slip # get rid of -0.0000\n price_diff = (totle_price - exchange_price) / exchange_price\n\n slip_price_diff_splits[token][trade_size][exchange].append((slip, price_diff, splits))\n per_token_savings[token][trade_size][exchange].append(pct_savings)\n\n\n return per_token_savings, slip_price_diff_splits", "def process_file_pd(file_name):\n try:\n df = pd.read_csv(file_name)\n return df\n except OSError as e:\n print('Error' + str(e))\n raise", "def run(self) -> pd.DataFrame:\n with open(self.file_path, 'r') as in_file:\n headers = in_file.readline()\n headers = headers.replace(\"\\n\", \"\")\n\n if ',' in headers:\n headers = headers.split(',')\n else:\n headers = headers.split()\n\n if headers == self.NORMAL_HEADERS:\n return self.normal_csv()\n else:\n return self.read_data_columns_to_rows()", "def get_data(self, csv_file):\n pass", "def read_csv_file(self):\n pass", "def parse_csv(filename):\n cases = []\n\n with open(filename) as file:\n # Read the rest of the lines\n for line in file:\n cases.append(Case(line))\n\n return cases", "def read_partslist_csv(csv: str)->pd.DataFrame:\n try:\n p_df = pd.read_csv(csv, sep='\\t', header=0, engine='python', na_values='', skipfooter=3,\n dtype={'BLItemNo': str, 'BLColorId': int, 'LDrawColorId': int, 'Qty': int})\n p_df = p_df.fillna({'BLColorId': '', 'Qty': 0})\n p_df = p_df.rename(mapper={'BLItemNo': 'ItemId', 'BLColorId': 'Color'}, axis=1)\n p_df = p_df.drop(columns=['ElementId', 'LdrawId', 'LDrawColorId'])\n return p_df\n except FileNotFoundError as e:\n print(e)\n return pd.DataFrame()", "def parse_file(args):\n\n data_types = []\n headers = []\n\n with open(args.input, \"r\") as csvfile:\n reader = csv.reader(csvfile)\n have_columns = False\n\n for line in reader:\n if have_columns:\n index = 0\n for col in line:\n if col != \"\": \n if data_types[index] != TYPE_STRING and data_types[index] != TYPE_FLOAT:\n data_types[index] = get_data_type(col)\n # else:\n # data_types[index] = TYPE_STRING\n index += 1\n\n else:\n headers = line \n for col in line:\n data_types.append(\"\")\n have_columns = True \n\n return headers, data_types", "def get_data_from_csv_full_path(filepath, datatypes, date_column_list):\n\n dataframe = pandas.read_csv(filepath, dtype=datatypes, date_parser=pandas.to_datetime, parse_dates=date_column_list)\n\n return dataframe", "def parse(csvfilename):\n table = []\n with open(csvfilename, \"r\") as csvfile:\n for line in csvfile:\n line = line.rstrip()\n columns = line.split(',')\n table.append(columns)\n return table", "def parse_csv(db: sqlite3.Connection, symbols_meta):\n\n logger.info('Parsing csv files for days.')\n\n csv_directory = settings.DATA_DIRECTORY / 'csv'\n\n # Get list of days by enumerating csv files in directory.\n csv_list = sorted(os.listdir(csv_directory), reverse=False)\n for jdx, f in enumerate(csv_list):\n csv_path = csv_directory / f\n if csv_path.is_file and csv_path.suffix == '.csv':\n\n day = csv_path.name[:-4]\n day = '-'.join((day[:4], day[4:6], day[6:8]))\n\n db.execute('INSERT INTO iex_days(date) VALUES(?);', (day,))\n db.commit()\n day_id = db.execute('SELECT last_insert_rowid();').fetchone()[0]\n\n logger.info(f'Found day {jdx+1} of {len(csv_list)} : {day} @ {f}.')\n\n with open(csv_path, 'r') as csv_file:\n reader = csv.reader(csv_file, delimiter=',')\n date_str = '-'.join((f[:4], f[4:6], f[6:8]))\n\n rows = list()\n for idx, row in enumerate(reader):\n timestamp, symbol, price, size = row\n qdl_symbol = symbol.replace('.', '_').replace('-', '_')\n if qdl_symbol in symbols_meta:\n rows.append((date_str, timestamp, qdl_symbol, price, size))\n\n logger.info(f'Storing {len(rows)} of {idx+1} messages to database.')\n\n db.executemany('''\nINSERT INTO iex_trade_reports(day, timestamp, symbol, price, size)\nVALUES(?, ?, ?, ?, ?);\n''', rows)\n db.commit()", "def readATPMatchesParseTime(dirname):\n\tallFiles = glob.glob(dirname + \"/atp_matches_\" + \"20??.csv\")\n\tallFiles = allFiles[:-1] ## avoid 2017 since its incomplete\n\tmatches = pd.DataFrame()\n\tcontainer = list()\n\tfor filen in allFiles:\n\t\tdf = pd.read_csv(filen,\n\t\t\t\t\t\t index_col=None,\n\t\t\t\t\t\t header=0,\n\t\t\t\t\t\t parse_dates=[5],\n\t\t\t\t\t\t encoding = \"ISO-8859-1\",\n\t\t\t\t\t\t date_parser=lambda t:parse(t)) ##errored out here\n\t\tcontainer.append(df)\n\tmatches = pd.concat(container)\n\treturn matches", "def test_parser():\n data = parse_csv(TEST_DATA)\n assert data['2020-01-03'] == ['recycle']\n assert data['2020-01-08'] == ['bio', 'trash']\n assert data['2021-01-09'] == ['christmas']", "def runs_loader(path):\n files = sorted(glob.glob(f\"{path}/*_runs.csv\"))\n df_lis = list(range(len(files)))\n for i, f in enumerate(files):\n try:\n df_lis[i] = pd.read_csv(f, sep=\",\", header=0)\n print('Read runs.csv\\n', f, df_lis[i].shape,\n df_lis[i]['dataset__id'][0], df_lis[i]['pipeline__id'][0])\n except Exception as e:\n print(e)\n continue\n df = pd.concat(df_lis, axis=0, sort=False).reset_index()\n # with pd.option_context('display.max_rows', None,\n # 'display.max_columns', None):\n # msg = tabulate.tabulate(df, headers='keys', tablefmt='psql')\n # print(msg)\n return df", "def read_csv(csv_file_path):\n res = [] #list\n # f = open(csv_file_path) #read file\n with open(csv_file_path,\"r\") as f:", "def parse(self, filename: str, input_format='csv', **kwargs):\n if 'delimiter' not in kwargs:\n kwargs['delimiter'] = self._extention_types[input_format]\n if filename.endswith('.tar'):\n with tarfile.open(filename) as tar:\n for member in tar.getmembers():\n f = tar.extractfile(member)\n df = pd.read_csv(f, comment='#', **kwargs) # type: pd.DataFrame\n if member.name == 'nodes.csv':\n self.load_nodes(df)\n elif member.name == 'edges.csv':\n self.load_edges(df)\n else:\n raise Exception('Tar file contains unrecognized member {}'.format(member.name))\n else:\n df = pd.read_csv(filename, comment='#', **kwargs) # type: pd.DataFrame\n self.load(df)", "def parseCsvFile(filename, sep=\",\", delimiter=None):\n col_types = dict(\n fn_name=str,\n nthreads=int,\n with_cg=bool,\n mean=float,\n stddev=float,\n max=float,\n min=float,\n mean_per_nodes=float,\n stddev_per_nodes=float,\n )\n\n seq = pd.read_csv(\n filename,\n header=0,\n sep=sep,\n delim_whitespace=False,\n quoting=2,\n index_col=[0, 1, 2],\n dtype=col_types,\n )\n\n return seq", "def read_csv_data(csv_path):\n\n return pd.read_csv(csv_path, sep=',', engine='python')", "def importing(files_list):\n\n dataframes = []\n\n for file in files_list:\n imported_df = pd.read_csv(f'full_data/{file}.csv')(file)\n imported_df.columns = imported_df.columns.str.strip().str.lower()\n dataframes.append(imported_df)\n\n return dataframes", "def _read_csv(self) -> pd.DataFrame:\n\n return pd.concat(\n [\n pd.read_csv(f, usecols=[1, 2, 3, 4, 5])\n for f in self.temp_path.iterdir()\n if f.name.endswith(\".csv\")\n ]\n )", "def parse_csv(csv_path):\n song_list = []\n\n try:\n with open(csv_path, encoding='utf-8') as playlist:\n print(\"Parsing \" + csv_path)\n reader = csv.reader(playlist, delimiter=',')\n next(reader) # skip csv header\n for row in reader:\n song_list.append(row[2] + \" - \" + row[1])\n # todo: parse CSV, then check to see which songs already exist in current dir\n # move non-existent results to new list and return that\n except IndexError as error:\n # consider validating playlists when parsing\n # from API on web server instead\n print(str(error))\n \n return song_list", "def process(df, *args): #input filename\n# try:\n# pubdate = datetime.strptime(pubdate, \"%a, %d %b %Y %H:%M:%S %Z\")\n# pubdate.replace(tzinfo=pytz.timezone(\"GMT\"))\n# # pubdate = pubdate.astimezone(pytz.timezone('EST'))\n# # pubdate.replace(tzinfo=None)\n# except ValueError:\n# pubdate = datetime.strptime(pubdate, \"%a, %d %b %Y %H:%M:%S %z\")\n\n# df = pd.read_csv('20131126.csv')\n ret = []\n for index,row in df.iterrows():\n country_Code = row['Actor1CountryCode']\n sqldate = row['SQLDATE']\n month_year = row['MonthYear']\n tone = row['AvgTone']\n url = row['SOURCEURL']\n news = News(country_Code,sqldate,month_year,tone,url)\n ret.append(news)\n print('\\nThere are %d items in News.'% len(ret))\n return ret", "def readCsv(variables, path, pathCsv, estacion):\n # os.makedirs('../data/totalData/')\n dataVa = df.DataFrame()\n variables = variables\n mypath = path\n patron = re.compile(variables + '_'+estacion+'_\\d\\d\\d\\d-\\d\\d-\\d\\d' + '.*')\n for base, dirs, filess in os.walk(mypath, topdown=False):\n filess = sorted(filess)\n for value in filess:\n if patron.match(value) != None:\n tempData = df.read_csv(mypath + value)\n #tempData = completeMet(tempData)\n tempData = tempData.iloc[0:24, :]\n dataVa = concat([tempData, dataVa], axis=0)\n dataVa = dataVa.reset_index()\n dataVa = dataVa.drop(labels='index', axis=1)\n dataVa.to_csv(pathCsv + variables + '_'+ estacion +'_total.csv', encoding='utf-8', index=False)\n dataVa = df.DataFrame()", "def importAllDatasets(directory):\n head_index = findIndex(temp_list, \"Gaze\")\n point_index = findIndex(temp_list, \"Point\")\n grab_index = findIndex(temp_list, \"Grab\")\n pos_index = findIndex(temp_list, \"Position\")\n\n head_data = pd.read_csv(temp_list[head_index]) if head_index != None else None\n point_data = pd.read_csv(temp_list[point_index]) if point_index != None else None\n grab_data = pd.read_csv(temp_list[grab_index]) if grab_index != None else None\n pos_data = pd.read_csv(temp_list[pos_index]) if pos_index != None else None\n\n\n return head_data, point_data, grab_data, pos_data", "def parse(raw_file, delimiter):\n\t#open csv file\n\topened_file = open(raw_file)\n\t\n\t#read csv file\n\tcsv_data = csv.reader(opened_file,delimiter=delimiter)\n\t\n\t#build parsed data\n\tparsed_data = []\n\t\n\t#define headers\n\tfields = csv_data.next()\n\t\n\t#Iterate over each row of the csv file, zip together field->value pairs\n\tfor row in csv_data:\n\t\tparsed_data.append(dict(zip(fields, row)))\n\t\n\t#close csv file\n\topened_file.close()\n\t\n\treturn parsed_data", "def import_data(catalog='xmatch_TGAS_Simbad.csv', params=None, nrows=None, delimiter=','):\n print \"Loading %s and creating DataFrame..\" % catalog\n df_imported = pd.read_csv(catalog, delimiter=delimiter, header=0, usecols=params, nrows=nrows)\n print \"..Done\\n----------\"\n return df_imported", "def process_csv(csv_file=None):\n if csv_file:\n _process_csv_data(csv_file, USER_DATA_MAP)\n else:\n csv_files_list = [os.path.join(DATA_DIR, f) for f in os.listdir(DATA_DIR) if f.endswith('.csv')]\n for fl in csv_files_list:\n _process_csv_data(fl, USER_DATA_MAP)\n return USER_DATA_MAP", "def from_csv(self):\n timestamp_logname = \"from_csv_\" + datetime.today().strftime('%Y_%m_%d_%H_%M_%S')\n csv_files = [f for f in self.args.files if f.endswith('.csv')]\n if not csv_files:\n self.logger.error(\"No CSV files found.\")\n return False\n\n # Create an instance of the Ingestor class with common options set.\n ingestor = Ingestor(**self.options)\n\n # Ingest from each CSV file.\n for csv_file in csv_files:\n data_groups = Ingestor.process_csv(csv_file)\n for mask, routes, deployment_number in data_groups:\n ingestor.load_queue(mask, routes, deployment_number)\n ingestor.ingest_from_queue()\n\n # Write out any failed ingestions from the entire batch to a new CSV file.\n if ingestor.failed_ingestions:\n ingestor.write_failures_to_csv(timestamp_logname)\n\n self.logger.info('')\n self.logger.info(\"Ingestion completed.\")\n return True", "def load_results_runs(filename):\n # Load the necessary columns from the csv into panda\n data = pd.read_csv(filename)\n\n # Cleans the data\n data = data[['Run', 'Total Distance']]\n data['Run'] = pd.to_numeric(data['Run'])\n data['Total Distance'] = pd.to_numeric(data['Total Distance'])\n\n return data", "def parse(self, sources: List[str], aux: Dict[str, DataFrame], **parse_opts) -> DataFrame:\n # Some read options are passed as parse_opts\n read_opts = {k: v for k, v in parse_opts.items() if k in (\"sep\",)}\n return self.parse_dataframes(self._read(sources, **read_opts), aux, **parse_opts)", "def read_data(filepath):\n df = pd.read_csv(filepath)\n return df", "def read_data(filepath):\n df = pd.read_csv(filepath)\n return df", "def read_data(filepath):\n df = pd.read_csv(filepath)\n return df", "def load_csv(*, path, filename, sep=\"\\t\", verbose=True):\n \n os.chdir(path)\n if len(glob.glob(filename))==1: \n df = pd.read_csv(filename, sep=sep, low_memory=False)\n \n # display example,\n if verbose==True:\n display(df.head(3))\n print(df.shape)\n else:\n pass\n \n # return,\n return df\n \n else:\n if verbose==True:\n print(f\"\"\"ERROR :csv file {filename}, was not found in: \\n {path}\"\"\")\n else:\n pass", "def read_records_file(records_file):\n if records_file.endswith('.csv'):\n df = pd.read_csv(records_file)\n else: \n sys.exit('File extension is not recognised')\n\n return df", "def import_data():\n data = pd.read_csv('partA/bikes_October18.csv', ',')\n return data", "def read_rf_csv():\n if os.path.exists(\"rf.csv\"):\n #print (\"--decision trees CSV imported\\n\")\n results = pd.read_csv(\"rf.csv\", index_col=0)\n else:\n print(\"log not found\")\n\n return results", "def extract_data(first, second, third):\n\n first_df = pd.read_csv(first)\n second_df = pd.read_csv(second)\n third_df = pd.read_csv(third)\n\n return first_df, second_df, third_df", "def loadFiles(analyzer,totalFiles):\n for filename in totalFiles:\n if filename.endswith('.csv'):\n print('Cargando archivo: ' + filename)\n loadTrips(analyzer, filename)\n print(\"Cargando información extra...\")\n model.findPopulars(analyzer)\n model.findPopularsAdd(analyzer)\n return analyzer", "def doImport(self, params):\n #logging.info(params.filename)\n raw_data = self.open_filename(params.filename)\n #logging.info('file open ok')\n data = self.split_to_blocks(raw_data)\n #logging.info('splitting ok')\n chromatograms = self.process_data_to_chromatograms(data)\n #logging.info('extract ok')\n \n processed_data = []\n \n zero = params.field_results[\"zero\"]\n MIC = params.field_results[\"MIC\"]\n TIC = params.field_results[\"TIC\"]\n round_MZ = params.field_results[\"round_MZ\"]\n \n for chrom in chromatograms:\n \n prefix = chrom['short_title']\n if (\".\" in prefix) and round_MZ:\n first,last = prefix.split('.')\n prefix = first\n \n if (\"MIC\" in prefix) and (not MIC):\n pass\n elif (\"TIC\" in prefix) and (not TIC):\n pass\n else:\n x = ImportDataset1D(name=prefix+'_time',data = chrom['time'])\n if zero:\n y = ImportDataset1D(name=prefix+'_signal',data = chrom['signal_zero'])\n else:\n y = ImportDataset1D(name=prefix+'_signal',data = chrom['signal'])\n processed_data.append(x)\n processed_data.append(y)\n \n return processed_data", "def load(tweets_file, rtt_file):\n try:\n tw_df = pd.read_csv(tweets_file)\n rtt_df = pd.read_csv(rtt_file)\n except:\n print('one or several files were not found')\n sys.exit()\n\n return tw_df, rtt_df", "def from_csv(cls, filename, pulse_number=None):\n df = pd.read_csv(filename)\n return cls._sort_and_filter_dataframe(df, pulse_number)", "def _read_csv(self):\n self.function_name = '_read_csv'\n with open(os.path.join(self.task.downloads, self.csv_name)) as csv_file:\n reader = csv.reader(csv_file, dialect='excel')\n for row in reader:\n self.input_data.append(row)", "def parse_csv(filename, newline='\\n'):\n\n # Define the relevant stations\n relevant_stops = [\n 'NRCH',\n 'DISS',\n 'STWMRKT',\n 'IPSWICH',\n 'MANNGTR',\n 'CLCHSTR',\n 'CHLMSFD',\n 'STFD',\n 'LIVST',\n ]\n\n # Open the filename (csv extension can be included)\n filename = filename.replace('.csv', '')\n\n # Convert the times into 4 digit ints\n fmt_time = lambda time: int(time.replace(':', '')) if time != '' else None\n\n try:\n with open(filename + '.csv', newline=newline) as input_file:\n reader = csv.reader(input_file)\n\n for i, row in enumerate(reader):\n # Ignores the final row and header\n if len(row) > 1 and i != 0 and row[1] in relevant_stops:\n yield ({\n 'id': row[0],\n 'date': datetime.strptime(row[0][:8], '%Y%m%d'),\n 'location': row[1],\n 'pla_a': fmt_time(row[2]),\n 'pla_d': fmt_time(row[3]),\n 'act_a': fmt_time(row[16]),\n 'act_d': fmt_time(row[18]),\n })\n except:\n pass\n # print('File does not exist.')", "def process_results_file(f_path):\n results = pd.read_csv(f_path, sep='\\t', header=0)\n keep_cols = {'GS', 'SIZE', 'ES', 'NES', 'p-val'}\n results = results[:20].filter(keep_cols)\n return results", "def load_csv_model(filename) -> tuple:\n dat_sci = pd.read_csv(resources_folder(filename), index_col=0)\n commenter('data from ' + filename, lambda: print(dat_sci))\n\n ind = dat_sci.index\n # commenter('index', lambda: print(ind))\n col = dat_sci.columns\n # commenter('columns', lambda: print(col))\n # self.data = np.asmatrix(dat_sci.values)\n # commenter('data', lambda: print(self.data))\n # print(type(dat_sci))\n\n return dat_sci, ind, col", "def read_files(files):\n if len(files) == 1:\n return pd.read_csv(files[0], comment='#', names=[\"time\", \"volts\"])\n\n elif len(files)>1:\n df = []\n for f in files:\n data = pd.read_csv(f, comment='#', names=[\"time\", \"volts\"])\n df.append(data)\n new_df = pd.concat(df)\n new_df = new_df.drop_duplicates(subset='time')\n new_df.reset_index(drop=True, inplace=True)\n return new_df", "def load_data(self, filepath, sep=\",\"):\n if filepath.split('.')[-1] == 'csv':\n self.data = pd.read_csv(filepath, sep=sep)\n elif filepath.split('.')[-1] == 'json':\n self.data = pd.read_json(filepath)\n else:\n print 'Please select a csv or json file'", "def initial_csv_wrangling(csv_file):\n df = pd.read_csv(csv_file)\n df = df.fillna('')\n columns = list(df.columns)\n\n # check that \"url\" column exists (required)\n if 'url' not in columns:\n raise Exception('Input csv file requires a \"url\" column, which does not seem to exist. Exiting.')\n\n # check if \"pos_concepts\" column exists and parse accordingly (not required)\n if 'pos_concepts' in columns:\n print('Found \"pos_concepts\" column. Values will be split by pipe/vertical bar \"|\" into a python list.')\n df['pos_concepts'] = df['pos_concepts'].map(lambda x: list(set(x.split('|'))))\n\n # check if \"neg_concepts\" column exists and parse accordingly (not required)\n if \"neg_concepts\" in columns:\n print('Found \"neg_concepts\" column. Values will be split by pipe/vertical bar \"|\" into a python list.')\n df['neg_concepts'] = df['neg_concepts'].map(lambda x: list(set(x.split('|'))))\n\n # check if \"metadata\" column exists and load accordingly (not required)\n if \"metadata\" in columns:\n print('Found \"metadata\" column. Attempting to ingest.')\n try:\n df['metadata'] = df['metadata'].replace('','{}').map(json.loads)\n except:\n raise Exception('Value in \"metadata\" column does not seem to be a properly JSON formatted str.')\n\n return df", "def load_and_clean(self,in_path):\n in_path = Path(in_path)\n try:\n df = pd.read_csv(in_path, index_col = 0, parse_dates = True, infer_datetime_format = True)\n except:\n print(\"Could not read csv file. Please check the path\")\n finally:\n #attempt to clean df\n df.dropna(inplace = True)\n df.drop_duplicates(inplace = True)\n df.sort_index()\n return df", "def load_data(self, filename):\r\n #sqlcontext = SQLContext(self.sc)\r\n #df = sqlcontext.read.format('com.databricks.spark.csv').options(header='false', inferschema='true').load(filename)\r\n #df = sc.textFile(r\"C:\\Users\\mohan\\Downloads\\patches.csv\").map(lambda line: line.split(\",\"))\r\n #print (df.count())\r\n df = self.sc.textFile(filename).map(lambda line: line.split(\",\"))\r\n l = df.map(lambda w: [int(float(c)) for c in w]).zipWithIndex()\r\n return l\r\n raise NotImplementedError", "def parse_data(filename):\n x, y = [], []\n with open(filename) as f:\n reader = csv.reader(f)\n for row in reader:\n x.append(datetime.strptime(row[1], DATE_FORMAT))\n y.append(row[0])\n\n return x, y", "def get_data_from_csv(filepath, filename, datatypes, date_column_list):\n\n concatenated_file = os.path.join(filepath, filename)\n\n dataframe = get_data_from_csv_full_path(concatenated_file, datatypes, date_column_list)\n\n return dataframe", "def _load_file(cls,\n path: str,\n sep: Optional[str] = '\\t',\n header: Optional[str] = 'infer',\n columns: Optional[Union[List[str], List[int]]] = None,\n encoding: Optional[str] = 'utf-8') -> Tuple[List[Tuple], Optional[List[str]]]:\n # Get all paths\n if isinstance(path, str) and os.path.isdir(path):\n file_paths = [os.path.join(path, name) for name in os.listdir(path)]\n file_paths = sorted(file_paths)\n else:\n file_paths = [path]\n\n data: List = []\n for file_path in file_paths:\n # Don't fail on buggy files\n try:\n examples = pd.read_csv(file_path,\n sep=sep,\n header=header,\n index_col=False,\n dtype=str,\n encoding=encoding,\n keep_default_na=False)\n # Select columns\n if columns is not None:\n examples = examples[columns]\n data.extend(examples.values.tolist())\n except Exception as e:\n print(\"Warning: failed to load file {file_path}\")\n print(e)\n\n if len(data) == 0:\n raise ValueError(f\"No data found at {path}\")\n\n # Take the named columns from the columns parameter\n # if they are strings or try to use the pd.DataFrame\n # column names if they are strings.\n named_cols: List[str] = []\n if columns:\n for i, c in enumerate(columns): # type: ignore\n if isinstance(c, str):\n named_cols.append(c)\n elif all(isinstance(c, str) for c in examples.columns):\n named_cols = examples.columns.tolist()\n\n return data, named_cols if len(named_cols) > 0 else None", "def read_data_from_csv(filename: str) -> pd.DataFrame:\n try:\n data = pd.read_csv(filename)\n return data\n except(FileNotFoundError):\n print('Error: Could not read the data from csv.')\n return None", "def import_func(path_):\n\n datasets_dic = {}\n\n for dataset_path in path_:\n # Parse labels from filenames\n dataset_label = os.path.split(dataset_path)[1].split('.')[0]\n\n # Read from csv to Pandas\n dataset = pd.read_csv(dataset_path)\n\n # insert dataset label to the dataframes\n dataset.insert(0, 'trial', dataset_label)\n dataset.insert(0, 'maneuver', dataset_label.split('_')[0])\n\n # Datasets are stored in a dictionary\n datasets_dic.update({dataset_label: dataset})\n\n # list of imported maneuvers\n dataset_names = list(datasets_dic.keys())\n\n return datasets_dic, dataset_names", "def get_data(path):\n df = pd.read_csv(path)\n\n return df", "def read_csv(self, filepath):\n try:\n self.df = pd.read_csv(filepath)\n return self\n except FileNotFoundError as e:\n raise OperationError(f\"File not found - {filepath}\") from e\n except ParserError as e:\n raise OperationError(f\"Fails to parse file - {e}\") from e", "def CSV_Load_File( self, infilename ):\n print( 'Loading \"{}\"'.format(infilename) )\n IN = open( infilename, 'r' )\n standname = None\n laststand = None\n for L in IN:\n if( L[0:9] == 'Site/Plot' ): continue\n col = L.split( ',' )\n standname = col[0]\n year = int(col[1])\n #if( re.search( '-', standname ) != None ):\n # loc = re.search( '-', standname )\n # year = int(standname[loc.start()+1:])\n # standname = standname[0:loc.start()]\n #print standname, year\n if( (standname != None ) & (standname != laststand) ): self.Data.Stand[standname] = StandData( standname )\n (treeno, species, dbh, ht, live, status, cclass, tpa) = \\\n (int(col[2]), col[3], float(col[4]), float(col[5]), col[6], col[7], int(float(col[8])), float(col[9]))\n if( OPT['d'] ):\n if( dbh > 10.0 ): dbh *= 1.25\n if( dbh > 15.0 ): dbh *= 1.50\n for t in range( 1, int( math.ceil( tpa ))+1, 1 ):\n ntree = len( self.Data.Stand[standname].Tree ) + 1\n self.Data.Stand[standname].Tree[ntree] = TreeData( species, TreeNumber=treeno )\n self.Data.Stand[standname].Tree[ntree].Year[year] = MeasurementData( dbh, ht, '', 1, live, status, cclass )\n laststand = standname\n IN.close()", "def parse_csv_file(file_path):\n\n complete_data_list = []\n\n try:\n import_file = open(file_path, \"rb\")\n\n except IOError:\n print 'An error occured trying to read the file.'\n\n else:\n reader_file = csv.DictReader(import_file)\n complete_data_list = get_file_data(reader_file)\n import_file.close()\n\n return complete_data_list", "def load_data(filepath):\n\tlogging.info(f\"Load data from {filepath}\")\n\tdf = pd.read_csv(filepath)\n\tdf = set_dtypes(df)\n\tdf = df.sort_values(by='query_date')\n\n\treturn df", "def parse_separated(filename, delimiter):\n with csvhelper.UnicodeReader(filename,\n delimiter=delimiter) as report_reader:\n return parse_generic(report_reader)", "def open_convert_and_clean_csv(csv_data_file):\n imported_data = tablib.Dataset().load(open(csv_data_file).read())\n dataset = []\n for row in imported_data:\n if float(row[1]) > 0 and float(row[2]) > 0:\n dataset.append((row[0], float(row[1]), float(row[2])))\n return dataset", "def __loaddata(filename, datatype='flightcsv', minprob=0.001, maxprob=0.20):\n if datatype is 'flightcsv':\n return extract_flight_csv(filename, minprob=minprob, maxprob=maxprob)\n else:\n raise Exception('unknown datatype %s' % datatype)", "def parse(raw_file):\n parsed_data = []\n with open(raw_file, 'r') as r:\n rows = csv.reader(r)\n fields = rows.next()\n counter = 0\n for r in rows:\n parsed_data.append(dict(zip(fields, r)))\n\n return parsed_data", "def read_csv(self, csv_file):\n mylog.debug('Reading csv file %s for data' % csv_file)\n csv_data = pandas.read_csv(csv_file)\n mylog.debug('Read of csv file complete.')\n #mylog.debug('%s' % csv_data)\n #sometimes the csv has an empty dataframe #\n if csv_data.empty:\n mylog.debug('Data frame is empty; repopuating data')\n csv_info = []\n for item in csv_data:\n #add the data one cell at a time to the list #\n #for some reason, some csvs have the data #\n #with random decimal points #\n csv_info.append(item.split(\".\")[0])\n df = pandas.DataFrame(columns=csv_info)\n df.loc[0]=csv_info\n #write the data from the list back into the cells#\n #one at a time #\n for column in range(0, len(csv_info)): \n df.iloc[0,column] = csv_info[column]\n csv_data = df \n return csv_data", "def data_import(path):\n train_path = os.path.join(path, \"train.csv\")\n test_path = os.path.join(path, \"test.csv\")\n df_train = pd.read_csv(train_path)\n df_test = pd.read_csv(test_path)\n return df_train, df_test", "def load_from_csv(path, delimiter=','):\n return pd.read_csv(path,encoding = \"ISO-8859-1\",dtype=object)", "def _parse(self, filename: str, comment: str = None) -> pd.DataFrame:\n try:\n return pd.read_csv(filename, comment=comment)\n except pd.errors.EmptyDataError:\n mesg = f'Provided data file \\'{filename}\\' was empty'\n raise ValueError(mesg)\n except Exception as e:\n print(f'Could not parse file {filename}')\n raise e", "def main():\n master_csv = open(\"master.csv\", \"w\")\n master_csv.write(\"lat, lon, year, month, day, T_max, T_min, PrecipMM, T_ave, PrecipCM, RelHum\\n\")\n MoLS_comp_csv = open(\"MoLS_comp.csv\", \"w\")\n lat_lon_csv = open(\"lat_lon.csv\", \"w\")\n\n for csvFilename in os.listdir('.'):\n\n if not csvFilename.endswith('.csv') or csvFilename == \"master.csv\" or csvFilename == \"MoLS_comp.csv\" or csvFilename == \"lat_lon.csv\":\n continue # skip non-csv files\n\n csvFileObj = open(csvFilename)\n readerObj = csv.reader(csvFileObj)\n print(\"Currently parsing \" + str(csvFilename))\n for row in readerObj:\n if readerObj.line_num <= 8 :\n continue\n\n year = int(row[0])\n vp = float(row[8]) / 1000.0\n month, day = get_month(int(row[1]))\n T_ave = (float(row[6]) + float(row[7])) / 2\n PrecipCM = float(row[3]) / 10\n svp = .611 * math.e ** (5321 * ((1 / 273.0) - (1 / (T_ave + 273.15))))\n rh_ave = round((vp / svp) * 100, 2)\n\n # print([[csvFilename], , row[0], row[1], row[3], row[6], row[7], row[8]])\n # print(readerObj.line_num)\n # print([csvFilename.split(\"_\")[0], csvFilename.split(\"_\")[1].split(\".csv\")[0], row[0], row[1], row[3],\n # row[6], row[7], row[8]])\n\n master_csv.write(str(csvFilename.split(\"_\")[0]) + \",\" + str(csvFilename.split(\"_\")[1].split(\".csv\")[0]) +\n \",\" + str(year) + \",\" + str(month) + \",\" + str(day) + \",\" + str(row[6]) + \",\" + str(row[7])\n + \",\" + str(row[3]) + \",\" + str(T_ave) + \",\" + str(PrecipCM) + \",\" + str(rh_ave) + \"\\n\")\n MoLS_comp_csv.write(str(year) + \",\" + str(month) + \",\" + str(day) + \",\" + str(row[6]) + \",\" + str(row[7])\n + \",\" + str(row[3]) + \",\" + str(T_ave) + \",\" + str(PrecipCM) + \",\" + str(rh_ave) + \"\\n\")\n lat_lon_csv.write(str(csvFilename.split(\"_\")[0]) + \",\" + str(csvFilename.split(\"_\")[1].split(\".csv\")[0]) + \"\\n\")\n\n master_csv.close()\n MoLS_comp_csv.close()\n lat_lon_csv.close()", "def load_data(filename):\n #Admittedly copy-pasted from Heredity project cuz I'm resourceful like that\n #Makes 2 lists, one for evidence and one for labels\n evidence = []\n labels = []\n #Open csv file\n with open(\"shopping.csv\") as f:\n reader = csv.reader(f)\n next(reader)\n #Iterate through user rows of file\n for row in reader:\n i = 0\n tmp_list = []\n for column in row:\n if i in [0,2,4,11,12,13,14]:\n column = int(column)\n if i in [1,3,5,6,7,8,9]:\n column = float(column)\n if i == 10:\n if column == \"Jan\":\n column = 0\n if column == \"Feb\":\n column = 1\n if column == \"Mar\":\n column = 2\n if column == \"Apr\":\n column = 3\n if column == \"May\":\n column = 4\n if column == \"June\":\n column = 5\n if column == \"Jul\":\n column = 6\n if column == \"Aug\":\n column = 7\n if column == \"Sep\":\n column = 8\n if column == \"Oct\":\n column = 9\n if column == \"Nov\":\n column = 10\n if column == \"Dec\":\n column = 11\n if i in [15,16]:\n if column == \"Returning_Visitor\" or column == \"TRUE\":\n column = 1\n else:\n column = 0\n if i == 17:\n if column == \"TRUE\":\n column = 1\n else:\n column = 0\n labels.append(column)\n else:\n tmp_list.append(column)\n i+=1\n evidence.append(tmp_list)\n \n return (evidence,labels)", "def separate_file(self):\n df = pd.read_csv(\"nfl_drafts.csv\", names = ['Pick', 'Team', 'Player_name', 'POS', \n 'Age', 'Last_played', 'AP1', 'PB', 'ST', 'CarAV', 'DrAV', 'G_perS', 'PaCmp', 'PaAtt', \n 'PaYds', 'PaTD', 'Int', 'Att', 'Yds', 'RuTD', 'Rec', 'ReYds', 'ReTD', 'Solo', 'DeInt', \n 'Sk', 'Coll/Univ', 'Stat'], error_bad_lines = False)\n return df", "def readATPMatches(dirname):\n\tallFiles = glob.glob(dirname + \"/atp_matches_\" + \"20??.csv\") ##restrict training set to matches from 2000s\n\tmatches = pd.DataFrame()\n\tcontainer = list()\n\tfor filen in allFiles:\n\t\tdf = pd.read_csv(filen,\n\t\t\t\t\t\t index_col=None,\n\t\t\t\t\t\t header=0)\n\t\tcontainer.append(df)\n\tmatches = pd.concat(container)\n\treturn matches" ]
[ "0.6957248", "0.6680294", "0.66800976", "0.6676007", "0.6644503", "0.65280634", "0.64709973", "0.6345747", "0.63367206", "0.63359916", "0.6311614", "0.63049614", "0.630401", "0.62790114", "0.62762284", "0.6265179", "0.6254957", "0.6253625", "0.62445545", "0.6242823", "0.62291914", "0.6204025", "0.61897814", "0.6122783", "0.6113504", "0.611005", "0.6104946", "0.607535", "0.606989", "0.60566235", "0.6047939", "0.6046518", "0.6033876", "0.602804", "0.6019154", "0.60021526", "0.59987885", "0.5989318", "0.59829533", "0.59826696", "0.59714466", "0.59668607", "0.5963708", "0.59629273", "0.5935636", "0.59336907", "0.59295046", "0.59281725", "0.59228873", "0.5919007", "0.5915179", "0.591305", "0.591112", "0.59008443", "0.5891261", "0.5880576", "0.5860969", "0.58565676", "0.58565676", "0.58565676", "0.58516115", "0.58495086", "0.58446383", "0.58432084", "0.58419687", "0.5837279", "0.58367807", "0.58237445", "0.5812306", "0.5811423", "0.5808738", "0.580471", "0.5803882", "0.5803807", "0.5798459", "0.5796614", "0.57910264", "0.57871073", "0.5780487", "0.57793635", "0.5776114", "0.5772011", "0.57473475", "0.5746948", "0.5741227", "0.5740731", "0.5738545", "0.5737039", "0.57353365", "0.57323307", "0.5729917", "0.57272476", "0.57249373", "0.5722119", "0.57157445", "0.5712445", "0.5711582", "0.57091945", "0.57066494", "0.57011676" ]
0.61578256
23
cleans data by lowers cases and removing accentuated chars then extracts word tokens of at least 2 chars
def cleanData(s): # extract only word tokens of at least 2 chars re.compile(r"\b\w\w + \b", re.U).findall(s) # xml_dict = {';': '', '&lt': '<', '&amp': '&', '&gt': '>', '&quot': '"', # '&apos': '\''} # for key, value in xml_dict.iteritems(): # s = s.replace(key, value) s.translate(maketrans('?!,.', ' ')) with open('stopwords.txt') as stop_words: stop_words = {line.strip().lower() for line in stop_words if line!='\n'} return s
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def processes_and_tokenize(raw_document):\n\ttokenizer = RegexpTokenizer(r'\\w+')\n\ttokens = tokenizer.tokenize(raw_document.lower())\t\t# tokens = nltk.word_tokenize(corpus.lower()) # without removing punctiation\n\n\t#remove stop words\n\tstop_words = set(nltk.corpus.stopwords.words('english'))\n\t#stop_words = set(stopwords.words('english'))\n\tfiltered_tokens = [w for w in tokens if not w in stop_words]\n\treturn filtered_tokens", "def tokenize_wordchars(lines):\n return", "def get_tokens(data_clean):\n #sentence tokenization\n data_sent = sent_tokenize(data_clean)\n #tokenizer\n data_tokenized_punc = [word for sent in data_sent for word in nltk.word_tokenize(sent)]\n data_word = [word.lower() for word in data_tokenized_punc if word.isalpha()]\n\n return data_word, data_sent", "def tokenize2(text):\n # words = re.findall(\"[a-zåàâäæçéèêëîïôöœßùûüÿA-ZÅÀÂÄÆÇÉÈÊËÎÏÔÖŒÙÛÜŸ’\\-]+\", text)\n # words = re.findall(\"\\w+\", text)\n words = regex.findall(\"\\p{L}+\", text)\n return words", "def tokens(doc):\n return (tok.lower() for tok in re.findall(r\"\\w+\", doc))", "def tokenize(document):\n raw_words=[word.lower() for word in nltk.word_tokenize(document) if word.isalpha()]\n raw_words=[word.lower() for word in raw_words]\n final_words=[]\n for word in raw_words:\n if word not in string.punctuation and word not in nltk.corpus.stopwords.words(\"english\"):\n final_words.append(word.lower())\n return final_words", "def simple_tokenize(document):\n #document = document.lower()\n #document = re.sub('[^\\u4e00-\\u9fa5]', '', document)\n #return document.strip().split()\n return document", "def clean_data(self, data):\r\n data=data.lower()\r\n doc=nlp(data, disable=['parser', 'ner'])\r\n \r\n #Removing stopwords, digits and punctuation from data\r\n tokens = [token.lemma_ for token in doc if not (token.is_stop\r\n or token.is_digit\r\n or token.is_punct\r\n )]\r\n \r\n tokens = \" \".join(tokens)\r\n return tokens", "def data_cleaner(doc):\n \n sw = stopwords.words('english')\n regex_token = RegexpTokenizer(r\"([a-zA-Z]+(?:’[a-z]+)?)\")\n doc = regex_token.tokenize(doc)\n doc = [word.lower() for word in doc]\n doc = [word for word in doc if word not in sw]\n #print(doc)\n doc = pos_tag(doc)\n doc = [(word[0], get_wordnet_pos(word[1])) for word in doc]\n #print(doc)\n lemmatizer = WordNetLemmatizer() \n doc = [lemmatizer.lemmatize(word[0], word[1]) for word in doc]\n #print(' '.join(doc))\n return ' '.join(doc)", "def _split(self):\n \n self._words = []\n \n # (1) Expand contractions\n text = self._text.replace(\"'m \", \" am \")\n text = text.replace(\"'d \", \" would \")\n text = text.replace(\"'ll \", \" will \")\n text = text.replace(\"'ve \", \" have \")\n text = text.replace(\"'re \", \" are \")\n text = text.replace(\"can't \", \"can not \")\n text = text.replace(\"won't \", \"will not \")\n text = text.replace(\"n't \", \" not \")\n # Assume possesives are contractions of is\n text = text.replace(\"'s \", \" is \")\n text = text.replace(\"s' \", \"s \")\n \n # (2) Replace newlines, carriage returns, tabs, form feed with space.\n text = re.sub('[\\r\\n\\t\\f]', ' ', text)\n \n # (3) remove duplicate spaces\n text = re.sub(' +', ' ', text.strip())\n \n # Empty text\n if len(text) == 0:\n return \n \n # (4) Split text by whitespace (tokenize).\n words = text.split(' ')\n \n # (5) Separate out punctuation\n for word in words:\n length = len(word)\n \n begin = 0\n for i in range(0,length):\n if not word[i].isdigit() and not word[i].isalpha():\n # decimal, thousandths, fraction symbol\n if word[i] in ['.', ',', '/'] and i < length-1 and word[i+1].isdigit():\n continue\n # degree\n if word[i] in ['°'] and i < length-1 and word[i+1] in [ 'f', 'F', 'c', 'C']:\n continue\n # sign symbol\n if word[i] in ['-', '+'] and i < length-1 and (word[i+1].isdigit() or word[i+1] in ['.', ',']):\n # first char or exponent\n if begin == i or word[i-1] in ['e', 'E']:\n continue\n \n if begin != i:\n self._words.append( { 'word': word[begin:i], 'tag': Vocabulary.UNTAG } )\n if word[i] in [ '.', '?', '!', ',', ':', ';', '(', ')', '[', ']', '\"', '\\'', '¿', '¡']:\n self._words.append( { 'word': word[i], 'tag': Vocabulary.PUNCT } )\n # non-printable ascii\n elif (ord(word[i]) >= 0 and ord(word[i]) <= 7) or (ord(word[i]) >= 14 and ord(word[i]) <= 31):\n pass\n else:\n self._words.append( { 'word': word[i], 'tag': Vocabulary.SYMBOL } )\n begin = i + 1\n if begin < length:\n self._words.append( { 'word': word[begin:], 'tag': Vocabulary.UNTAG } )", "def tokenize(text):\n # YOUR CODE HERE\n t = text.lower()\n words = re.findall(r'[a-z]+',t)\n return words", "def tokenize(document):\n terms = document.lower().split()\n space = ' '\n return [term.strip(characters) for term in terms if term not in space]", "def tokenized_text(text: Text):\n words = nltk.word_tokenize(text)\n return (w.lower().strip() for w in words if ONLY_LETTER_WORDS.match(w))", "def clean_doc(doc):\n tokens = doc.split()\n table = str.maketrans('', '', punctuation)\n tokens = [w.translate(table) for w in tokens]\n tokens = [word.lower() for word in tokens if word.isalpha()]\n stop_words = set(stopwords.words('english'))\n tokens = [w for w in tokens if not w in stop_words]\n tokens = [word for word in tokens if len(word) > 1]\n return tokens", "def preprocess_sent(sent):\n #tokenized = word_tokenize(sent.lower())\n tokenizer = Tok()\n tokenized = tokenizer.tokenize(sent.lower())\n return tokenized", "def processText(text):\n\n no_punc = [word for word in text.split() if word.isalpha()] # and word not in stopwords.words('english')]\n #removes non-letter characters and only includes words not included in stopwords\n no_punc = \" \".join(no_punc) \n clean_words = nltk.word_tokenize(no_punc) #splits the punctuation marks from the real words\n return clean_words", "def tokenize_text(text):\n text = re.sub(\"_\", \"\", text)\n text = text.lower()\n tokens = re.split(\"\\W+\", text)\n tokens = [t for t in tokens if t]\n #print(tokens[0:10])\n return tokens", "def clean_non_word_chars(tokens):\n toks = []\n for token in tokens:\n t = re.sub(r'\\W', \"\", token)\n if len(t) > 1:\n toks.append(t)\n\n return toks", "def get_words(doc):\n splitter = re.compile('\\\\W*')\n # Split the words by non-alpha characters\n words = [s.lower() for s in splitter.split(doc) \n if len(s)>2 and len(s)<20]\n # Return the unique set of words only\n return dict([(w,1) for w in words])", "def tokenize_into_words(myblob):\n set_constraint = re.compile(r'[^a-zA-Z0-9]')\n tokenize_to_text = set_constraint.split(myblob) # The blob is spilt into words and the given constraints are applied\n words = [word for word in tokenize_to_text if word]\n return words", "def initial_clean(text):\n text = re.sub(\"[^a-zA-Z ]\", \"\", text)\n text = text.lower() # lower case text\n text = nltk.word_tokenize(text)\n return (text)", "def text_process(mess):\n nopunc= [char for char in mess if char not in string.punctuation]\n nopunc=''.join(nopunc)\n return [word for word in nopunc.split() if word.lower() not in stopwords.words('english') and len(word)>2]", "def tokenize(text, sw):\n \n #get lines from text\n lines = text.readlines() \n\n #tokenize text\n clr = []\n for i in lines:\n if ' ' in i:\n clr.append(i)\n else:\n continue\n final_list = []\n if len(clr) == 1:\n final_list = clr[0].split(' ')\n else:\n for l in clr:\n final_list += l.replace('\\n',' ').split(' ')\n\n fl = np.array(final_list).squeeze()\n filtered = []\n for word in fl:\n if word.lower() in sw or len(word)>25 or not word.isalpha():\n continue\n else:\n filtered.append(word.lower())\n \n return filtered", "def tokenizer(s):\n\n tokens = tokenize(s.lower()) # apply the nltk tokenizer\n tokens = [t for t in tokens if doc_frequency[t]>5 and t not in stop_words]# and doc_frequency[t]<3000]\n \n return tokens", "def text_token_analyser(text):\r\n\tglobal word_buffer\r\n\tglobal temp\r\n\ttoken_match = token_re.match(text)\r\n\tif token_match:\r\n\t\ttoken = token_match.group(1)\r\n\t\tif uppercase_re.match(token):\r\n\t\t\tword_buffer.append(token)\r\n\r\n\t\telse:\r\n\t\t if len(word_buffer)>1: # Helps to identify the break in the Named Entity (eg. Los Angeles last)\r\n\t\t \r\n\t\t temp=1\r\n\t\t \r\n\t\t else:\r\n\t\t word_buffer = []\r\n\t\treturn token, token_match.group(2)\r\n\treturn None, text", "def filter_data(text):\n\tlist_of_words = text.split()\n\t#remove non-alphabetical characters and convert to lower case\n\tlist_of_words = [''.join([char for char in word if char in string.ascii_letters]).lower() for word in list_of_words]\n\t#remove empty spaces\n\tlist_of_words = [word for word in list_of_words if word.isalpha()]\n\t#print(list_of_words)\n\treturn list_of_words", "def tokens(text) -> Tuple[Word]:\n return tuple(re.findall('[a-z]+', text.lower()))", "def _preprocess(self):\n _words = []\n \n # Preprocess Token List.\n wasCaps = False\n nwords = len(self._words)\n for index in range(nwords):\n word = self._words[index]\n length = len(word['word'])\n\n # (1) Remove periods from abbreviations\n if word['word'] == '.':\n # Preceded by a single letter\n if len(_words) > 0 and len(_words[-1]['word']) == 1 and _words[-1]['word'].isalpha():\n # Set previous word as Abbreviation\n if _words[-1]['tag'] not in [Vocabulary.NAME, Vocabulary.TITLE]:\n _words[-1]['tag'] = Vocabulary.ABBR\n # Drop the punct!\n # Proceeded by an abbreviated name title\n elif self._punct == False and len(_words) > 0 and (_words[-1]['tag'] in [Vocabulary.NAME, Vocabulary.TITLE] or _words[-1]['tag'] == Vocabulary.DATE):\n # Drop the punct!\n pass\n else:\n _words.append(word)\n \n # Single character\n elif length == 1:\n # Lowercase the single letter\n if word['word'].isupper():\n word['word'] = word['word'].lower()\n \n if word['word'].isalpha():\n # Continuation of a Name\n if len(_words) > 0 and _words[-1]['tag'] == Vocabulary.NAME:\n word['tag'] = Vocabulary.NAME\n \n # Keep single letter word\n _words.append(word)\n \n wasCaps = False\n \n # Multiple Character \n else:\n # All Uppercased (can't start with digit)\n if word['word'].isupper() and not word['word'][0].isdigit() and not word['word'][0] == '°':\n # (2) Identify Acronyms\n # If the next word is uppercased, it is a title line, not an acronym\n # If last word is uppercased, it is a title line, not an acronym\n word['word'] = word['word'].lower()\n if not (index+1 < nwords and self._words[index+1]['word'].isupper()) and (index+1 != nwords or wasCaps == False):\n try:\n v = vocab[word['word']]\n if Vocabulary.NAME in v['tag']:\n word['tag'] = Vocabulary.NAME\n # Word is a title (e.g., CEO)\n elif Vocabulary.TITLE in v['tag']:\n word['tag'] = Vocabulary.TITLE\n itag = v['tag'].index(Vocabulary.TITLE)\n word['word'] = v['lemma'][itag]\n else:\n word['tag'] = Vocabulary.ACRONYM\n except:\n word['tag'] = Vocabulary.ACRONYM\n \n wasCaps = True\n \n # First Letter is Capitalized\n elif word['word'][0].isupper():\n # First Word \n if len(_words) == 0:\n pass\n # Follows abbreviated title\n elif len(_words) > 1 and _words[-1]['word'] == '.' and _words[-2]['tag'] == Vocabulary.TITLE:\n word['tag'] = Vocabulary.NAME\n # Start of Sentence\n elif _words[-1]['tag'] == Vocabulary.PUNCT and _words[-1]['word'] not in [',', ':']:\n pass\n elif word['word'] in ['Jan', 'January', 'Feb', 'February', 'Mar', 'March', 'Apr', 'April', 'May', 'Jun', 'June', 'Jul', 'July', 'Aug', 'August', 'Sep', 'Sept', 'September', 'Oct', 'October', 'Nov', 'November', 'Dec', 'December']:\n word['tag'] = Vocabulary.DATE\n # (3) Identify Proper Names\n # Word is capitalized and not proceeded by period (.), question (?) or exclamation (!)\n # or single/double quote\n else:\n word['tag'] = Vocabulary.NAME\n # Proceeding Acronym is a really part of a name\n if len(_words) > 0 and _words[-1]['tag'] == Vocabulary.ACRONYM:\n _words[-1]['tag'] = Vocabulary.NAME\n # Proceeding Word is a Title of a name (e.g., Mr)\n else:\n try:\n v = vocab[_words[-1]['word']]\n if Vocabulary.TITLE in v['tag']:\n _words[-1]['tag'] = Vocabulary.TITLE\n itag = v['tag'].index(Vocabulary.TITLE)\n _words[-1]['word'] = v['lemma'][itag]\n \n except:\n # Word is an ending title in a name\n try:\n v = vocab[word['word'].lower()]\n if Vocabulary.TITLE in v['tag'] and Vocabulary.STREET_TYPE not in v['tag'] and Vocabulary.STATE not in v['tag']:\n word['tag'] = Vocabulary.TITLE\n itag = v['tag'].index(Vocabulary.TITLE)\n word['word'] = v['lemma'][itag]\n except: pass\n wasCaps = False\n \n # First Letter is a Digit\n elif word['word'][0].isdigit():\n cont = False\n # Check if this is a number combined with a unit\n for i in range(1, len(word['word'])):\n # Separate the number from the proceeding text\n if word['word'][i].isalpha():\n token = word['word'][i:].lower()\n # Check if the proceeding text is a Unit of Measurement\n try:\n v = vocab[token]\n if Vocabulary.UNIT in v['tag']:\n itag = v['tag'].index(Vocabulary.UNIT)\n _words.append( { 'word': word['word'][0:i], 'tag': Vocabulary.NUMBER } )\n _words.append( { 'word': v['lemma'][itag], 'tag': Vocabulary.UNIT } )\n cont = True\n except: pass\n break\n elif not word['word'][i].isdigit() and word['word'][i] != Words.DECIMAL:\n break\n if cont == True:\n continue\n \n # lowercase\n word['word'] = word['word'].lower()\n # romanize\n if self._roman:\n word['word'] = unidecode(word['word'])\n _words.append(word)\n \n self._words = _words", "def clean_article(self):\n # split into tokens by white space\n tokens = self.text.split(\" \")\n # remove punctuation from each token\n table = str.maketrans('', '', punctuation)\n tokens = [w.translate(table) for w in tokens] # type: List[Any]\n # remove remaining tokens that are not alphabetic\n tokens = [word for word in tokens if word.isalpha()]\n # filter out stop words\n stop_words = set(stopwords.words('english'))\n tokens = [w for w in tokens if not w in stop_words]\n # lemmatization and lowercase\n lmtzr = WordNetLemmatizer()\n tokens = [lmtzr.lemmatize(w.lower()) for w in tokens]\n # filter out short tokens\n tokens = [word for word in tokens if len(word) > 1]\n return tokens", "def clean_raw_data(self, text):\r\n return [token.lower() for token in nltk.word_tokenize(text)\r\n if token not in self.stop_words and token not in punctuation]", "def tokenize(txt):\n Depunc = depunctuate(txt).lower()\n Tokens = word_tokenize(Depunc)\n \n return Tokens", "def text2tokens(raw_text):\n clean_text = raw_text.lower().translate(translate_tab)\n tokens = [token.strip() for token in tokenizer.tokenize(clean_text)]\n tokens = [token for token in tokens if token not in eng_stopwords]\n stemmed_tokens = [stemmer.stem(token) for token in tokens]\n return [token for token in stemmed_tokens if len(token) > 2] # skip short tokens", "def simple_tokeniser(sent):\n sent = re_tok_apos.sub(r\"\\1 's\", sent)\n sent = re_tok_mw_punc.sub(r\"\\1 \\2\", sent)\n sent = re_tok_punc.sub(r\" \\1 \", sent).replace('-', ' - ')\n sent = re_tok_punc.sub(r\" \\1 \", sent)\n sent = re_tok_mult_space.sub(' ', sent)\n return sent.lower().split()", "def char_analyzer(text):\n tokens = text.split()\n return [token[i: i + 3] for token in tokens for i in range(len(token) - 2)]", "def tokenize(text):\n return [token.lower() for token in simple_preprocess(text) if token not in STOPWORDS]", "def tokenize_text(text):\n tokens = []\n for sent in nltk.sent_tokenize(text):\n for word in nltk.word_tokenize(sent):\n if len(word) < 2:\n continue\n tokens.append(word.lower())\n return tokens", "def _tokenize(self, text):\n if not text:\n return []\n\n text = PUNCTUATION_CHARS.sub(' ', text)\n\n words = [\n t[:128].lower() for t in text.split()\n if len(t) >= MIN_WORD_LENGTH and t.lower() not in STOP_WORDS\n ]\n\n return words", "def tokenize_text(text):\r\n return [word for word in word_tokenize(text) if (word.isalpha() == 1)]", "def clean_text(txt):\n\n alphabet= 'abcdefghijklmnopqrstuvwxyz '\n\n ALPHABET= 'ABCDEFGHIJKLMNOPQRSTUVWXYZ '\n\n new_words=''\n \n for i in txt:\n if i in alphabet or i in ALPHABET:\n new_words+= i\n\n clean=new_words.lower().split()\n\n return clean", "def get_words(s):\n return tokenize(lower_without_diacritics(s))", "def words(self):\n punctuation = '''!()-[]{};:'\"\\,<>./?@#$%^&*_~'''\n lst = []\n for lines in self.lines:\n words = lines.split(' ')\n for word in words:\n no_punc = ''\n for c in word:\n if c not in punctuation:\n no_punc += c.lower()\n if no_punc != '' and no_punc != '\\n':\n lst.append(no_punc.strip('\\n'))\n return lst\n #no_punc += word.lower()\n #for word in no_punc.split(' ')[:-1]:\n #for word in no_punc:\n # lst.append(word)\n #line = lines.strip(os.linesep) # strips away spaces, \\t (tabs), and \\n (new-lines/enter)\n #print(no_punc)\n #print(lst)", "def get_words(self, cleaner):\n return cleaner.clean(self.get_text())", "def text_to_corpus(text, accented_chars=True,\n convert_num=True, extra_whitespace=True, \n lemmatization=True, lowercase=True, punctuations=True,\n remove_html=True, remove_num=True, special_chars=True, \n stop_words=True): \n \n\n \"\"\"preprocess text with default option set to true for all steps\"\"\"\n if remove_html == True: #remove html tags\n text = strip_html_tags(text)\n if extra_whitespace == True: #remove extra whitespaces\n text = remove_whitespace(text)\n if accented_chars == True: #remove accented characters\n text = remove_accented_chars(text)\n if lowercase == True: #convert all characters to lowercase\n text = text.lower()\n \n # add a period to the end of the text:\n if len(text) > 0 and text[-1] != '.':\n text += '.'\n \n doc = nlp(text) #tokenise text \n clean_text = []\n \n for token in doc:\n \n flag = True\n edit = token.text\n # print(\"Word: \", edit, \" Type: \", token.pos_)\n \n # remove stop words\n if stop_words == True and token.is_stop and token.pos_ != 'NUM': \n flag = False\n \n # remove punctuations\n if punctuations == True and (token.pos_ == 'PUNCT' and not token.tag_ == '.') and flag == True: \n flag = False\n \n # remove 'X' characters:\n if token.pos_ == 'X':\n flag = False\n \n # remove special characters\n if special_chars == True and token.pos_ == 'SYM' and flag == True: \n flag = False\n \n # remove numbers\n if remove_num == True and (token.pos_ == 'NUM' or token.text.isnumeric()) \\\n and flag == True:\n flag = False\n \n # convert number words to numeric numbers\n if convert_num == True and token.pos_ == 'NUM' and flag == True:\n edit = w2n.word_to_num(token.text)\n \n # convert tokens to base form\n elif lemmatization == True and token.lemma_ != \"-PRON-\" and flag == True:\n edit = token.lemma_\n \n # convert all closing punctuation ('.', '!', '?', '...' to periods)\n if token.tag_ == '.' and flag == True:\n clean_text.append('.')\n \n # add text lemmas to the clean text:\n elif edit != \"\" and flag == True:\n clean_text.append(edit)\n \n return ' '.join(clean_text)", "def preprocessing(data):\n #tokenizer = RegexpTokenizer(r'\\w+') # allow charachter only\n #words = tokenizer.tokenize(data) # tokenize : convert to words\n words = word_tokenize(data)\n # remove stop words & stemming\n new_words = []\n for word in words:\n if word not in stop_words:\n new_words.append(stemmer.stem(word)) # append to new words with stemming\n \n if '' in new_words: new_words.remove('') # remove space from list\n #print(\"Preprocessing : {}\".format(new_words))\n return new_words", "def tokenize(text):\n # words = re.split(\"[\\s\\-,;:!?.’\\'«»()–...&‘’“”*—]+\", text)\n # words = re.split(\"[^a-zåàâäæçéèêëîïôöœßùûüÿA-ZÅÀÂÄÆÇÉÈÊËÎÏÔÖŒÙÛÜŸ’\\-]+\", text)\n # words = re.split(\"\\W+\", text)\n words = regex.split(\"\\P{L}+\", text)\n words.remove('')\n return words", "def tokenize(text):\n text = text.lower()\n remove = re.compile('[' + string.punctuation + '0-9\\\\r\\\\t\\\\n]')\n cleanText = re.sub(remove, \" \", text)\n tokens = nltk.word_tokenize(cleanText)\n tokens = [w.lower() for w in tokens if (len(w) >= 3 and w not in ENGLISH_STOP_WORDS)]\n return tokens", "def _extract(txt):\n words = []\n blank = re.compile(r\"\\s+\")\n num = re.compile(r\"[0-9]\")\n punc = re.compile(r\"[^a-z]\")\n for t in blank.split(txt.lower()):\n # omit words that are stop words or contain numbers\n if num.match(t):\n continue\n # remove punctuations\n t = punc.sub(\"\", t)\n if t == \"\" or t in _stopwords():\n continue\n words.append(t)\n return words", "def _clean_words(self, title, filter_stopwords=False):\n chars = '\"[]():;?!,\\'-'\n translation = dict((ord(c), u' ') for c in chars)\n def translate(text):\n if isinstance(text, unicode):\n translated = text.translate(translation)\n else:\n translated = text.translate(None, chars)\n return translated\n strips = '.'\n words = [\n x.strip(strips)\n for x in translate(title).split()\n ]\n for word in words:\n if len(word) >= self.min_word_length:\n if filter_stopwords and word.lower() not in STOPWORDS:\n continue\n # if the word contains non-ascii characters, try to convert\n # it to a ascii equivalent so that it's possible to type\n # \"naive\" when you don't even know how to type \"naïve\"\n try:\n word.encode('ascii')\n except UnicodeEncodeError:\n # it contains non-ascii characters\n ascii_word = unidecode(word)\n yield unicode(ascii_word).lower()\n yield word.lower()\n # yield ''.join(c for c in word if c.isalnum())", "def tokenize(document):\n token = nltk.word_tokenize(document)\n\n output = [word.lower() for word in token if (word not in string.punctuation and word not in nltk.corpus.stopwords.words(\"english\"))]\n\n return output", "def preProcess(text):\n\ttext = text.lower() # lower case the text\n\t# Q4 replace the word with expanded contractions\n\tfor k,v in general_contraction.items():\n\t\tif k in text.split():\n\t\t\ttext = text.replace(k,v)\n\t# Q4 remove speacial char including all puncuattions and replace it with a space\n\ttext = re.sub('[^A-Za-z0-9]+',' ',text) \n\t# tokenise\n\ttokens = text.split()\n\t# stop word removal\n\ttokens = [w for w in tokens if w not in stopwords ]\n\t# Q4 Stemming\n\ttokens = [str(porter.stem(w)) for w in tokens]\n\t# if word is non-english return its english form # too much time-complexity\n\t# tokens = [porter.stem(w) if porter.stem(w) in set(words.words()) else w for w in tokens ]\n\t# for words having digits such as 12gb, 1st, etc expanding the token list\n\tfor k in tokens:\n\t\tif len(k) >2 and re.match(r'[0-9]+',k):\t\t\t\n\t\t\tif len(k) >2 and not k.isdigit():\n\t\t\t\tl = re.split(r'(\\d+)',k)\n\t\t\t\tl = [w for w in l if w is not '' ]\n\t\t\t\tif l and len(l) <= 3:\n\t\t\t\t\tfor i in l:\n\t\t\t\t\t\tif i in digit_contractions.keys():\n\t\t\t\t\t\t\tl = list(map(lambda b: b.replace(i,digit_contractions[i]), l))\n\t\t\t\t\ttokens.remove(k)\n\t\t\t\t\ttokens = tokens+l\n\t\t\t\telse:\n\t\t\t\t\ttokens.remove(k)\n\tfor k,v in digit_contractions.items():\n\t\tif k in tokens:\n\t\t\tif tokens[tokens.index(k)-1].isdigit():\t\n\t\t\t\ttokens = list(map(lambda b: b.replace(k,v), tokens))\n\t# remove tokens of size less than 2\n\ttokens = [t for t in tokens if len(t) > 2]\n\treturn tokens", "def _process_trans(self):\n\t\tt_word = list()\n\t\t# with open(self.trans_file_path, 'r', encoding='utf-8') as in_f:\n\t\twith open(self.trans_file_path, 'r') as in_f:\n\t\t\tf = iter(in_f)\n\t\t\tfor line in f:\n\t\t\t\tword = line.lower().split();\n\t\t\t\tif len(word) == 0 or len(word) == 1:\n\t\t\t\t\tcontinue\n\t\t\t\t'''\n\t\t\t\tif w_0[len(w_0)-1] == '+': # ignore the label starting with #pat+ || #doc+\n\t\t\t\t\tcontinue;\n\t\t\t\t'''\n\t\t\t\tfor i in range(len(word)):\n\t\t\t\t\tif i != 0: # exclude the channel\n\t\t\t\t\t\tw = word[i]\n\t\t\t\t\t\tif w == \"<name>\":\n\t\t\t\t\t\t\tcontinue\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tt_word.append([w, 0, 0])\n\t\treturn t_word", "def tokens(text):\n t = re.sub(\"[A-Z]\", lambda x: \"_\" + x.group(0).lower(), text)\n return set(re.findall('[a-z]+', t))", "def _get_tokens(self, remove_stopwords=True, remove_words_with_ascii=True,\n min_wordlength=2, stopword_lang=\"russian\"):\n if not self.tokens:\n text = self._get_text()\n tokens = self.tokenizer.tokenize(text)\n # FIXME: learn how to print unicode lists in logging mudule so\n # as they are in readable form\n\n # filter by minimal length of a word\n if min_wordlength:\n with_shortw = filter(lambda s: len(s) <= min_wordlength, tokens)\n self.log.debug(\"Removed too short words: {}\"\n \"\".format(with_shortw))\n tokens = filter(lambda s: len(s) > min_wordlength, tokens)\n # filter by stopwords\n if remove_stopwords:\n sw = stopwords.words(stopword_lang)\n tokens = filter(lambda s: s not in sw, tokens)\n # remove words that contains ascii symbols\n if remove_words_with_ascii:\n # TODO: decide where is the right place to compile regex\n ASCII_RE = \"[\\x00-\\x7F]\"\n filterre = re.compile(ASCII_RE, re.UNICODE)\n with_ascii = filter(lambda s: bool(filterre.search(s)), tokens)\n tokens = filter(lambda s: not bool(filterre.search(s)), tokens)\n self.log.debug(\"Removed ASCII words: {}\"\n \"\".format(with_ascii))\n if self.lemmatize:\n lemm_tokens = []\n for t in tokens:\n # [0] stands for the most probable lemma\n p = self.morph.parse(t)[0]\n # only some of the POS are allowed\n if p.tag.POS not in self.allowed_POS:\n self.log.debug(\"Removing because of part of speech: {}\"\n \"\".format(t))\n continue\n lemm_tokens.append(p.normal_form)\n tokens = lemm_tokens\n self.tokens = tokens\n return self.tokens\n # FIXME: try with stemming all words before lda results\n return [Token(t) for t in self.tokenizer.tokenize(text)]", "def checkWords(line):\n\n words = []\n parts = re.sub('[^a-zA-Z0-9@ ]', '', line)\n parts = parts.lower()\n parts = parts.split(' ')\n for w in parts:\n if w is not '' and len(w) > 4 and len(w) < 15 and w not in commonWords:\n # if w is not '':\n words.append(w)\n\n return words", "def tokenize(document):\n words = [word.lower() for word in nltk.word_tokenize(document) if word not in string.punctuation and word not in nltk.corpus.stopwords.words(\"english\") ]\n\n return sorted(words)", "def _process(self, word: str) -> List[str]:\n # if a blank arrives from splitting, just return an empty list\n if len(word.strip()) == 0:\n return []\n word = self.convert_consonantal_i(word)\n my_word = \" \" + word + \" \"\n letters = list(my_word)\n positions = []\n for dipth in self.diphthongs:\n if dipth in my_word:\n dipth_matcher = re.compile(\"{}\".format(dipth))\n matches = dipth_matcher.finditer(my_word)\n for match in matches:\n (start, end) = match.span()\n positions.append(start)\n matches = self.kw_matcher.finditer(my_word)\n for match in matches:\n (start, end) = match.span()\n positions.append(start)\n letters = string_utils.merge_next(letters, positions)\n letters = string_utils.remove_blanks(letters)\n positions.clear()\n if not self._contains_vowels(\"\".join(letters)):\n return [\n \"\".join(letters).strip()\n ] # occurs when only 'qu' appears by ellision\n positions = self._starting_consonants_only(letters)\n while len(positions) > 0:\n letters = string_utils.move_consonant_right(letters, positions)\n letters = string_utils.remove_blanks(letters)\n positions = self._starting_consonants_only(letters)\n positions = self._ending_consonants_only(letters)\n while len(positions) > 0:\n letters = string_utils.move_consonant_left(letters, positions)\n letters = string_utils.remove_blanks(letters)\n positions = self._ending_consonants_only(letters)\n positions = self._find_solo_consonant(letters)\n while len(positions) > 0:\n letters = self._move_consonant(letters, positions)\n letters = string_utils.remove_blanks(letters)\n positions = self._find_solo_consonant(letters)\n positions = self._find_consonant_cluster(letters)\n while len(positions) > 0:\n letters = self._move_consonant(letters, positions)\n letters = string_utils.remove_blanks(letters)\n positions = self._find_consonant_cluster(letters)\n return letters", "def preprocess(tokens):\n result = []\n for token in tokens:\n result.append(token.lower())\n return result", "def extract_words(s):\n\n # Convert the data the data into normal for (Eg: 'ç' to 'c') and lowercase it.\n s = unicodedata.normalize('NFKD', s).lower()\n\n # Replace the punctuation with a space using the _regex and filter stopwords.\n wordlist = [w for w in _regex.sub(' ', s).split() if w not in _stopwords]\n\n return wordlist", "def preprocessing(raw_text):\n words_list = tokenize(raw_text)\n words_list = remove_stop_words(words_list)\n words_list = remove_punctuations(words_list)\n words_list = lemmatization(words_list)\n return words_list", "def full_cleanse(data):\n tokenizer = RegexpTokenizer(r'\\w+')\n stops = set(stopwords.words('english'))\n\n sent_toks = []\n for text in data:\n try:\n text = tokenizer.tokenize(text)\n pos_tagged = nltk.pos_tag(text)\n words = [w[0] for w in pos_tagged if w[1].capitalize() != 'NNP']\n words = [WordNetLemmatizer().lemmatize(w) for w in words]\n words = [w.lower() for w in words if not w.lower() in stops]\n words = [w for w in words if not w.isdigit()]\n sent_toks.append(words)\n except TypeError:\n pass\n return sent_toks", "def cleaning(self, document):\n remove_punct = ''.join(i for i in document.lower() if i not in self.punctuation)\n tokenized = [i for i in remove_punct.split() if i not in self.stopwords]\n if self.lang is not 'chinese':\n # Lemmatizes if not chinese\n tokenized = [self.lemmatize.lemmatize(i) for i in tokenized]\n return tokenized", "def normalize_tokens(tokens, language):\n try:\n stopwords = set(nltk.corpus.stopwords.words(language))\n except IOError:\n stopwords = {}\n return [t for t in tokens if t.isalnum() and t not in stopwords]", "def tokenize(self, raw_text):\n # TODO implement\n raw_tokens = word_tokenize(raw_text.decode('utf8'))\n return self.filter_tokens(raw_tokens)\n # return self.split_by(raw_tokens, '-')", "def _words_and_emoticons(self):\n wes = self.text.split()\n words_punc_dict = self._words_plus_punc()\n wes = [we for we in wes if len(we) > 1]\n \n for i, we in enumerate(wes):\n if we in words_punc_dict:\n wes[i] = words_punc_dict[we]\n return wes", "def tokenize_words(line):\n return", "def clean_doc(doc):\n tokens = doc.split()\n # Remove punctuation marks [., ;, :, \", e.t.c]\n re_punc = re.compile('[%s]' % re.escape(string.punctuation))\n tokens = [re_punc.sub('', w) for w in tokens]\n # Keep only alphabetic words (Remove words with numbers)\n tokens = [word for word in tokens if word.isalpha()]\n # Remove stop words like \"and, but, a, the, an\" e.t.c\n stop_words = set(stopwords.words('english'))\n tokens = [w for w in tokens if not w in stop_words]\n # Remove all other 1-character words if they're still in there\n tokens = [word for word in tokens if len(word) > 1]\n return tokens", "def tokenize(text):\n regex = re.compile(r'\\W+')\n tokens = regex.split(text.lower())\n tokens = [token for token in tokens if token]\n return tokens", "def filter_tokens(self, tokens):\n # TODO get hyphenated words\n filtered = [w.lower() for w in tokens\n if len(w) > self.MIN_WORD_LENGTH\n and w.isalpha()\n and w.lower() not in self.stopwords]\n return filtered", "def tokenize(text):\n #Clean data, remove all character except character and number,such as punctuation etc.\n text = re.sub(r'[^a-zA-Z0-9]', ' ', text.lower())\n tokens = word_tokenize(text)\n tokens = [WordNetLemmatizer().lemmatize(word) for word in tokens if word not in ST_english]\n return tokens", "def preprocessSentence(sentence):\n tokenizedSentence = tokenize.word_tokenize(sentence.lower())\n lemmatized = [lemma.lemmatize(token) for token in tokenizedSentence]\n\n noStopwords = [lemma for lemma in lemmatized\n if lemma not in englishStopwords\n and len(lemma) > 2\n and lemma.count(\"'\") != 1]\n noOddChars = [re.sub('[^\\w\\s]','',word) for word in noStopwords]\n return noOddChars", "def tokenize(self):\n return word_tokenize(self.lower_and_remove_punc())", "def tokenize(text):\n text = re.sub(r'[^0-9A-Za-z]',\" \", text).lower()\n text = word_tokenize(text)\n return [w for w in text if not w in stop_words]", "def clean_and_twokenize(text):\n cleaned_text = clean_twitter_tokens(text)\n twokenized_text = twokenize.tokenize(cleaned_text)\n\n return twokenized_text", "def tokenize(doc, keep_internal_punct=False):\n words=doc.lower()\n if keep_internal_punct:\n words= re.findall('[\\w_][^\\s]*[\\w_]|[\\w_]',words)\n \n else:\n \n words=re.sub('\\W+', ' ', words).split()\n \n \n \n a1=np.array(words)\n return a1", "def preprocess(text):\n text = text.translate(None, string.punctuation)\n words = filter(None, re.split('\\s+', text))\n words = nltk.pos_tag(words)\n words = [(word.lower(), nltk.simplify_wsj_tag(tag)) for word, tag in words]\n words = [(word, 'V') if tag.startswith('V') else (word, tag)\n for word, tag in words]\n return words", "def tokenize(document):\n import string\n\n # tokenize the given document\n words = nltk.tokenize.word_tokenize(document)\n words = [word.lower() for word in words]\n\n # filter words from punctuations and stopwords\n loop_words = words.copy()\n for word in loop_words:\n if word in [char for char in string.punctuation] + nltk.corpus.stopwords.words(\"english\"):\n words.remove(word)\n\n return words", "def separate_words(text, min_word_return_size=2):\n splitter = re.compile('[^a-zA-Z0-9_\\\\+\\\\-/]')\n words = []\n for single_word in splitter.split(text):\n current_word = single_word.strip().lower()\n # leave numbers in phrase, but don't count as words, since they tend to invalidate scores of their phrases\n if len(current_word) > min_word_return_size and \\\n current_word != '' and \\\n not is_number(current_word):\n words.append(current_word)\n return words", "def tokenize(document):\n\n # Unable to acquire 'stopwords' without these snippets due to my python config\n # import ssl\n # ssl._create_default_https_context = ssl._create_unverified_context\n # nltk.download('stopwords')\n\n stops = nltk.corpus.stopwords.words(\"english\")\n\n all_words = list()\n cleaned_words = list()\n\n all_words = nltk.word_tokenize(document)\n\n for word in all_words:\n word = word.strip()\n word = word.lower()\n\n if word in stops \\\n or not word \\\n or word in string.punctuation \\\n or word.strip(\"=\") != word:\n continue\n else:\n cleaned_words.append(word)\n\n return cleaned_words", "def process_text(text):\n words = word_tokenize(text)\n return [word.lower() for word in words if word not in string.punctuation]", "def preprocess_corpus(corpus): \n \n # print 'preprocessing words'\n # remove space\n # text = re.findall(r'\\w+', corpus) # for [a-zA-Z0-9_]\n text = re.findall(r'[a-zA-Z]+', corpus) # for [a-zA-Z] keep words only no numbers and '_' \n words = [w.lower() for w in text]\n # print words \n \n # stemmer based on existing ones in the current list\n lemma = nltk.WordNetLemmatizer()\t\t\t#extract the original word pattern\n lemmed_words = [lemma.lemmatize(w) for w in words]\n \n # tag lemmed_words\n tagged_words = nltk.pos_tag(lemmed_words)\n # print tagged_words \n \n processed_words = []\n tag_list = ['CC', 'DT', 'EX', 'IN', 'MD', \n 'PDT', 'POS', 'PRP', 'PRP$', 'TO', \n 'WDT', 'WP', 'WRB']\n for word, tag in tagged_words:\n if tag in tag_list:\n pass \n else: \n processed_words.append(word)\n \n return processed_words", "def tokenize(text):\n tokens = TreebankWordTokenizer().tokenize(text)\n tokens = lemmatize(tokens)\n tokens = filter(lambda s: len(s) > 2, tokens) # remove tokens with < 3 chars\n return tokens", "def remove_acc(sentence):\n newsent = []\n for word in sentence:\n if re.search(r'[áéíóúàèìòùäëïöü]', word):\n newsent.append(remove_accents(word))\n else:\n newsent.append(word)\n return newsent", "def contentextract(text):\n stopword=stopwords.words('english')\n punctuation=['.','?','!',',',';',\"''\",'\"\"',\"'\",\"--\",\"``\",\"|\",\"<\",\">\",\"...\",\"......\",\"'s\",':','[',']',\n '(',')','#','*','$','%','@','^','-','+','=','/','{','}','\\\\','\"','&']\n content=[w for w in text if w.lower() not in stopword]\n content=[w for w in content if w not in punctuation]\n return content", "def naive(self, text):\n\t\t#print(text)\n\t\ttokenizedText = []\n\t\tfor k in text: #look at each entity in one sentence\n\t\t\t\n\t\t\ta = \"\"#stores the current word \n\t\t\trun = []; #appends all words in a particular sentence\n\t\t\tfor i in range(len(k)):\n\t\t\t\t\n\t\t\t\tif(k[i] == ' ' or k[i] == '\t'): #tokenization at space or tab\n\t\t\t\t\t\n\t\t\t\t\tif(a!=\"\"):\n\t\t\t\t\t\tif(a[-1] == ',' or a[-1] == '-' or a[-1] == \"\\'\" or a[-1] == \";\" or a[-1] == \":\" or a[-1] ==\"!\" or a[-1] == \"?\" or a[-1] ==\"\\\"\") : #but remove mentioned punctuations from the end of the word, if present\n\t\t\t\t\t\t\ta = a[:-1]\n\t\t\t\t\t\tif(len(a)>0 and a[0] == \"\\\"\"):#remove starting quotes\n\t\t\t\t\t\t\ta = a[1:]\n\t\t\t\t\t\tif(len(a)>0):\n\t\t\t\t\t\t\trun.append(a)\n\t\t\t\t\t\t\t\n\t\t\t\t\t\t\ta = \"\"\n\n\n\t\t\t\telif(i == len(k)-1): #remove the last punctuation mark, if present\n\t\t\t\t\t\n\t\t\t\t\ta = a+k[i];\n\t\t\t\t\t\n\t\t\t\t\tif(a[-1] == '.' or a[-1] == '\\\"' or a[-1] ==\"!\" or a[-1] == \"?\" or a[-1] ==\"\\'\" ):\n\t\t\t\t\t\ta = a[:-1]\n\t\t\t\t\tif(len(a)>0 and a[0] == \"\\\"\"):\n\t\t\t\t\t\ta = a[1:]\n\t\t\t\t\tif(len(a)>0):\n\t\t\t\t\t\trun.append(a)\n\t\t\t\t\t\t\n\t\t\t\t\t\ta = \"\"\n\n\n\t\t\t\telse:\n\t\t\t\t\t\n\t\t\t\t\tif((k[i] == ',' or k[i] == ':' or k[i] == ';') and k[i+1]!= ' ' ): # for other punctuation marks followed by a space\n\t\t\t\t\t\t#print(k[i-1])\n\t\t\t\t\t\tif(len(a)>0):\n\t\t\t\t\t\t\tif(a[-1] == '\\\"' or a[-1] ==\"!\" or a[-1] == \"?\" ):\n\t\t\t\t\t\t\t\ta = a[:-1]\n\t\t\t\t\t\t\tif(len(a)>0 and a[0] == \"\\\"\"):\n\t\t\t\t\t\t\t\ta = a[1:]\n\t\t\t\t\t\t\tif(len(a)>0):\n\t\t\t\t\t\t\t\trun.append(a)\n\t\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\ta = \"\"\n\n\n\t\t\t\t\telse:\n\n\t\t\t\t\t\ta = a+k[i];\n\n\t\t\ttokenizedText.append(run)\t\t\n\n\t\t\n\t\t\t\n\n\n\n\n\t\t#Fill in code here\n\n\t\treturn tokenizedText", "def Tokenize(comment):\n\n # words = comment.split()\n # words = comment.lower()\n # words = words.translate(str.maketrans('', '', string.digits))\n # words = words.translate(str.maketrans('', '', string.punctuation))\n # words = re.findall(r\"[\\w']+\", words)\n # words = [word for word in words if (len(word) > 1)] #8.75\n\n # words = comment.lower()\n # # words = words.translate(str.maketrans('', '', string.digits))\n # # words = words.translate(str.maketrans('', '', string.punctuation))\n # # words = re.split('; |, |\\' |\\*|\\n | |\\.', words)\n # # words = re.findall(r\"[\\w']+\", words)\n # #\n # regex = re.compile('[^a-zA-Z]')\n # words = regex.sub(' ', words).split()\n # # words = re.sub(\"[^\\w]\", \" \", words).split()\n # #\n # # words = [word.split(\"\\\\'\") for word in words if (len(word) > 1)\n # # & (word not in string.digits) & (word not in string.punctuation)]\n # stop_words =[]\n # if True:\n # stop_words = open(\"StopWords.txt\", \"r\").read()\n # stop_words = re.split(\"\\W+\", stop_words)\n # words = [word for word in words if (word not in stop_words) & (len(word) > 1)]\n\n words = comment.split()\n words_ = []\n regex = re.compile('[^a-zA-Z]')\n for word in words:\n tmp = regex.sub(' ', word.lower()).split()\n for word_ in tmp:\n if len(word_) > 1:\n words_.append(word_)\n\n return words_", "def find_abecedarian_words():\n pass", "def preprocess(text, freq=5):\n text = re.sub(r\"[^A-Za-z0-9(),!?\\'\\`]\", \" \", text)\n text = re.sub(r\"\\'s\", \" \\'s\", text)\n text = re.sub(r\"\\'ve\", \" \\'ve\", text)\n text = re.sub(r\"n\\'t\", \" n\\'t\", text)\n text = re.sub(r\"\\'re\", \" \\'re\", text)\n text = re.sub(r\"\\'d\", \" \\'d\", text)\n text = re.sub(r\"\\'ll\", \" \\'ll\", text)\n text = re.sub(r\",\", \" , \", text)\n text = re.sub(r\"!\", \" ! \", text)\n text = re.sub(r\"\\(\", \" \\( \", text)\n text = re.sub(r\"\\)\", \" \\) \", text)\n text = re.sub(r\"\\?\", \" \\? \", text)\n text = re.sub(r\"\\s{2,}\", \" \", text)\n\n words_raw = text.strip().lower().split()\n word_counts = Counter(words_raw)\n words = [w for w in words_raw if word_counts[w] > freq]\n vocab = set(words)\n vocab2index = {w: idx for idx, w in enumerate(vocab)}\n index2vocab = {idx: w for idx, w in enumerate(vocab)}\n words_int = [vocab2index[w] for w in words]\n return words_int, vocab2index, index2vocab", "def tokenize(text):\n return text.split(' ')", "def extract_words(s):\n\n if has_dashdash(s):\n s = myreplace(\"--\",\" \", s)\n\n s = s.lower().split()\n word_list = []\n\n for word in s:\n\n word_list.append(cleanword(word))\n\n return word_list", "def _tokenize(self, sentence: str) -> List[str]: \n tokens = word_tokenize(sentence)\n tokens = [t.lower() for t in tokens if len(t) < 25 and self.pattern.match(t)]\n \n if self.include_stopword == True:\n return tokens\n stop_words = set(stopwords.words('english'))\n res = []\n for t in tokens:\n if t in stop_words:\n continue\n res.append(t)\n return res", "def clean_text(text):\n\n lemmizer = WordNetLemmatizer()\n stemmer = porter.PorterStemmer()\n\n stop = stopwords.words('english')\n stop += ['.', ',', ':', '...', '!\"', '?\"', \"'\", '\"', ' - ', ' — ', ',\"', '.\"', '!', ';', '♫♫', '♫', \\\n '.\\'\"', '[', ']', '—', \".\\'\", 'ok', 'okay', 'yeah', 'ya', 'stuff', ' 000 ', ' em ', \\\n ' oh ', 'thank', 'thanks', 'la', 'was', 'wa', '?', 'like', 'go', ' le ', ' ca ', ' I ', \" ? \", \"s\", \" t \",\n \"ve\", \"re\"]\n # stop = set(stop)\n\n cleaned_text = []\n\n for post in text:\n cleaned_words = []\n\n # remove parentheticals\n clean_parens = re.sub(r'\\([^)]*\\)', ' ', post)\n\n #clean_parens = [line.decode('utf-8').strip() for line in clean_parens]\n\n # tokenize into words\n for word in wordpunct_tokenize(clean_parens):\n\n\n # lowercase and throw out any words in stop words\n if word.lower() not in stop:\n\n # lemmatize to roots\n low_word = lemmizer.lemmatize(word)\n\n # stem and lowercase ( an alternative to lemmatize)\n # low_word = stemmer.stem(root.lower())\n\n # keep if not in stopwords (yes, again)\n if low_word.lower() not in stop:\n # put into a list of words for each document\n cleaned_words.append(low_word.lower())\n\n # keep corpus of cleaned words for each document\n cleaned_text.append(' '.join(cleaned_words))\n\n\n return cleaned_text", "def preprocess(text):\n text_words = word_tokenize(text)\n words = []\n ix = 0\n for rawWord in text_words:\n lowerWord = rawWord.lower()\n if lowerWord not in stop_words_slovene:\n words.append((lowerWord, ix))\n ix += 1\n return words", "def tokenize(text) -> set:\n no_brackets = re.sub(r\"\\([^)]*\\)\", \"\", str(text).lower())\n return set(re.findall(r\"\\w+(?:'\\w+)?|[^\\w\\s,]\", no_brackets))", "def clean(words):\r\n tokens = []\r\n try:\r\n for token in words:\r\n token = re.sub(r'[\\W\\d_]', \" \", token)\r\n tokens.append(token)\r\n except:\r\n token = \"\"\r\n tokens.append(token)\r\n \r\n return tokens", "def get_words(text):\n\n only_words_text = re.compile(r'[^0-9^a-z^A-Z\\s]').sub('', text)\n return only_words_text.split(' ')", "def subword_tokenize(self, word: str) -> List[str]:\r\n end_idx = min([len(word), self.ngram_max])\r\n sw_tokens = [self.SOW]\r\n start_idx = 0\r\n\r\n while start_idx < len(word):\r\n subword = word[start_idx:end_idx]\r\n if subword in self.bpe_vocab:\r\n sw_tokens.append(subword)\r\n start_idx = end_idx\r\n end_idx = min([len(word), start_idx + self.ngram_max])\r\n elif len(subword) == 1:\r\n sw_tokens.append(self.UNK)\r\n start_idx = end_idx\r\n end_idx = min([len(word), start_idx + self.ngram_max])\r\n else:\r\n end_idx -= 1\r\n\r\n sw_tokens.append(self.EOW)\r\n return sw_tokens", "def tokenize(doc):\n text = doc\n doc = doc.lower()\n doc = re.sub('[,;]', ' ', doc)\n doc = re.split('\\s+', doc)\n doc = sorted(list(filter(None, doc)))\n ent = le.stanfordTagger(text)\n print(ent)\n l = []\n for item in ent:\n if ent[item] in ['LOCATION', 'GPE','PERSON']:\n l.append(item)\n ent = l#ent = sorted(list(le.stanfordTagger(text).keys()))\n #print(ent)\n #ent = [e.lower() for e in ent]\n crime_type = fileCrimeClassify.extractCrimeWord(text, returnOnlyLabels=True)\n crime_type = [c.lower() for c in crime_type]\n #print(crime_type + ent)\n #print(doc)\n return doc, ent + crime_type", "def sanitize_text(tokens, stopwords=None):\n\n tokens = [x.lower() for x in tokens]\n regex = re.compile('[^a-z]')\n\n for index in range(len(tokens)):\n tokens[index] = regex.sub('', tokens[index])\n if stopwords and tokens[index] in stopwords:\n tokens[index] = ''\n\n # remove empty elements\n tokens = [token for token in tokens if token != '']\n return tokens", "def get_tokens(sent):\n return word_tokenize(sent)", "def _tokenize(self, text):\n if not text:\n return []\n\n text = self.PUNCTUATION_CHARS.sub(' ', text)\n\n words = [t[:128] for t in text.split() if len(t) >= self.MIN_WORD_LENGTH and t.lower() not in self.STOP_WORDS]\n\n return words" ]
[ "0.6861259", "0.6852118", "0.67767626", "0.6744147", "0.67203104", "0.6711012", "0.6704925", "0.66891634", "0.66849273", "0.6602998", "0.6546478", "0.65269834", "0.65201867", "0.65182596", "0.64966595", "0.6496125", "0.64865", "0.6471367", "0.6440876", "0.64231676", "0.6407263", "0.6400728", "0.6387554", "0.63751113", "0.63652396", "0.63639313", "0.63453394", "0.632225", "0.6304455", "0.63030046", "0.6290585", "0.628694", "0.62516093", "0.62444854", "0.6241711", "0.6235515", "0.6233964", "0.62275785", "0.6221657", "0.62193894", "0.6207686", "0.6199839", "0.6191293", "0.6179897", "0.6178715", "0.61727446", "0.61708426", "0.6168543", "0.6159402", "0.6158576", "0.61570317", "0.6155919", "0.61512816", "0.61442995", "0.6144076", "0.61389786", "0.6123714", "0.6116849", "0.6113365", "0.6109747", "0.61004704", "0.6079744", "0.60772204", "0.6074959", "0.60738677", "0.60691833", "0.606704", "0.60634524", "0.606286", "0.6062347", "0.60594815", "0.6053102", "0.60528606", "0.6049425", "0.60478234", "0.60382426", "0.60363966", "0.60352916", "0.6034892", "0.60304266", "0.6029738", "0.6017798", "0.6012711", "0.600723", "0.5998674", "0.599626", "0.5995879", "0.599034", "0.5988818", "0.5965507", "0.596149", "0.5945956", "0.5942874", "0.593377", "0.59251904", "0.5924867", "0.5922892", "0.59228474", "0.59225726", "0.59222436" ]
0.60811347
61
Support the following DHCP DeviceManager calls.
def create_dhcp_port(self, port): LOG.debug("create_dhcp_port: %s", port) port['port']['id'] = port['port']['network_id'] # The following MAC address will be assigned to the Linux dummy # interface that # networking_calico.agent.linux.interface.RoutedInterfaceDriver # creates. Therefore it will never actually be used or involved in the # sending or receiving of any real data. Hence it should not matter # that we use a hardcoded value here, and the same value on every # networking-calico compute host. The '2' bit of the first byte means # 'locally administered', which makes sense for a hardcoded value like # this and distinguishes it from the space of managed MAC addresses. port['port']['mac_address'] = '02:00:00:00:00:00' port['port']['device_owner'] = constants.DEVICE_OWNER_DHCP return dhcp.DictModel(port['port'])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def setup_dhcp_env(device):\n raise NotImplementedError", "def _RunDHCPCD(self, **kwargs):\n del kwargs\n clear_ifconfig_command = 'ifconfig {interface} 0.0.0.0'.format(\n interface=self.interface)\n # -K: Don't receive link messages for carrier status. You should\n # only have to use this with buggy device drivers or running\n # dhcpcd through a network manager.\n # -c: Location to the hooks file. If the default location happens to be\n # empty, dhcpcd will fail. So we set the hooks file to /dev/null.\n dhcp_command = ('dhcpcd -K -t {timeout} -c /dev/null {interface}').format(\n timeout=self._dhcp_timeout,\n interface=self.interface)\n dhcp_timeout_command = 'timeout {timeout} {cmd}'.format(\n timeout=self._dhcp_timeout,\n cmd=dhcp_command)\n force_kill_command = 'pgrep dhcpcd | xargs -r kill -9'\n\n logging.info('Killing any existing dhcpcd processes...')\n self._device.Call(force_kill_command)\n\n logging.info('Clearing any existing ifconfig networks...')\n self._device.Call(clear_ifconfig_command)\n\n logging.info('Starting dhcpcd...')\n self._device.CheckCall(dhcp_timeout_command)\n\n logging.info('Verifying IP address...')\n ip = self._LeasedIP()\n if not ip:\n self._device.Call(force_kill_command)\n raise WiFiError('DHCP bind failed')\n logging.info('Success: bound to IP %s', ip)\n\n yield ip # We have bound an IP; yield back to the caller.\n\n logging.info('Killing any remaining dhcpcd processes...')\n self._device.Call(force_kill_command)\n\n yield # We have released the IP.", "def setup_dhcp_config(self, board_config):\n raise NotImplementedError", "def elAddNetworkConfigurationWithDhcp(self, device):\n commandSection = self.sectionByName(\"command\")\n # see http://docs.redhat.com/docs/en-US/Red_Hat_Enterprise_Linux/6/html/Installation_Guide/s1-kickstart2-options.html\n deviceMatch = re.match(r\"([^0-9]+)([0-9])\", device)\n if deviceMatch:\n # e.g. \"eth0\"\n devicePrefix = deviceMatch.group(1)\n deviceNumber = deviceMatch.group(2)\n deviceNumber = int(deviceNumber)\n for i in range(8, deviceNumber - 1, -1):\n deviceI = devicePrefix + str(i)\n deviceIPlus1 = devicePrefix + str(i + 1)\n # move up by one device each network configuration\n commandSection.string = re.sub(r\"(?m)^([ \\t]*network[ \\t]+.*--device[ \\t]*(?:=|[ \\t])[ \\t]*)\" + re.escape(deviceI) + r\"(.*)$\",\n r\"\\g<1>\" + deviceIPlus1 + r\"\\g<2>\",\n commandSection.string)\n # not --noipv6\n networkConfiguration = \"network --device=\" + device + \" --bootproto=dhcp --onboot=yes --activate\"\n if deviceMatch and deviceNumber == 0:\n # having configuration of eth0 first appears to be more conducive to overall success,\n # and also, per http://fedoraproject.org/wiki/Anaconda/Kickstart#network, supposedly\n # \"... in installer environment. Device of the first network command is activated if network is required,\n # e.g. in case of network installation ...\",\n commandSection.string = networkConfiguration + \"\\n\" \\\n + \"#\\n\" \\\n + commandSection.string\n else:\n commandSection.string = commandSection.string \\\n + \"#\\n\" \\\n + networkConfiguration + \"\\n\"", "async def test_dhcp(hass: HomeAssistant) -> None:\n result = await hass.config_entries.flow.async_init(\n DOMAIN,\n data=dhcp.DhcpServiceInfo(\n ip=\"1.2.3.4\", macaddress=\"01:23:45:67:89:ab\", hostname=\"mock_hostname\"\n ),\n context={\"source\": config_entries.SOURCE_DHCP},\n )\n\n assert result.get(\"type\") == FlowResultType.FORM\n assert result.get(\"step_id\") == \"user\"", "def dhcp(self, dhcp):\n\n self._dhcp = dhcp", "def answerDHCP(self, shouldAnswer):\n assert False, \"Deriving class must implement\"", "def configureDHCP():\n dhcpStart = config.get(\"hotspot\", \"dhcpstart\")\n dhcpEnd = config.get(\"hotspot\", \"dhcpend\")\n dnsmasqConfig = f\"\"\"#PI Hotspot config\ndomain-needed\nbogus-priv\ndhcp-option=option:dns-server\ndhcp-authoritative\ndhcp-range={dhcpStart},{dhcpEnd},1h\n\"\"\"\n confFile = open(\"/etc/dnsmasq.conf\", \"w\")\n confFile.write(dnsmasqConfig)\n confFile.close()", "def dhcp_callback(self, state, target_mac=None, target_ip=None, exception=None):\n self.record_result('dhcp', info=target_mac, ip=target_ip, state=state, exception=exception)\n self.target_mac = target_mac\n self.target_ip = target_ip\n if exception:\n self._state_transition(_STATE.ERROR, _STATE.DHCP)\n self.runner.target_set_error(self.port_set, exception)\n else:\n self._state_transition(_STATE.BASE, _STATE.DHCP)", "def dhcp_used(self, dhcp_used):\n\n self._dhcp_used = dhcp_used", "def enable_dhcp_helper(self, network_id):\n try:\n network = self.plugin_rpc.get_network_info(network_id)\n except:\n self.needs_resync = True\n LOG.exception(_('Network %s RPC info call failed.') % network_id)\n return\n\n if not network.admin_state_up:\n return\n\n for subnet in network.subnets:\n if subnet.enable_dhcp:\n if self.call_driver('enable', network):\n self.cache.put(network)\n break", "def fill (self, wanted_opts, msg):\n if msg.SUBNET_MASK_OPT in wanted_opts:\n msg.add_option(pkt.DHCP.DHCPSubnetMaskOption(self.subnet))\n if msg.ROUTERS_OPT in wanted_opts and self.router_addr is not None:\n msg.add_option(pkt.DHCP.DHCPRoutersOption(self.router_addr))\n if msg.DNS_SERVER_OPT in wanted_opts and self.dns_addr is not None:\n msg.add_option(pkt.DHCP.DHCPDNSServersOption(self.dns_addr))\n msg.add_option(pkt.DHCP.DHCPIPAddressLeaseTimeOption(self.lease_time))", "def setup_dhcpmeta_access(self):\n if cfg.CONF.NSX.agent_mode == config.AgentModes.AGENT:\n self._setup_rpc_dhcp_metadata()\n mod = nvp_rpc\n elif cfg.CONF.NSX.agent_mode == config.AgentModes.AGENTLESS:\n self._setup_nvp_dhcp_metadata()\n mod = nvp_svc\n self.handle_network_dhcp_access_delegate = (\n mod.handle_network_dhcp_access\n )\n self.handle_port_dhcp_access_delegate = (\n mod.handle_port_dhcp_access\n )\n self.handle_port_metadata_access_delegate = (\n mod.handle_port_metadata_access\n )\n self.handle_metadata_access_delegate = (\n mod.handle_router_metadata_access\n )", "def is_configure_with_dhcp(self):\n\t\treturn bool(call_sdk_function('PrlVmDevNet_IsConfigureWithDhcp', self.handle))", "def _get_net_dhcp_relay(self, context, net_id):\n pass", "async def test_aiodiscover_does_not_call_again_on_shorter_hostname(\n hass: HomeAssistant,\n) -> None:\n with patch.object(hass.config_entries.flow, \"async_init\") as mock_init, patch(\n \"homeassistant.components.dhcp.DiscoverHosts.async_discover\",\n return_value=[\n {\n dhcp.DISCOVERY_IP_ADDRESS: \"192.168.210.56\",\n dhcp.DISCOVERY_HOSTNAME: \"irobot-abc\",\n dhcp.DISCOVERY_MAC_ADDRESS: \"b8b7f16db533\",\n },\n {\n dhcp.DISCOVERY_IP_ADDRESS: \"192.168.210.56\",\n dhcp.DISCOVERY_HOSTNAME: \"irobot-abcdef\",\n dhcp.DISCOVERY_MAC_ADDRESS: \"b8b7f16db533\",\n },\n {\n dhcp.DISCOVERY_IP_ADDRESS: \"192.168.210.56\",\n dhcp.DISCOVERY_HOSTNAME: \"irobot-abc\",\n dhcp.DISCOVERY_MAC_ADDRESS: \"b8b7f16db533\",\n },\n ],\n ):\n device_tracker_watcher = dhcp.NetworkWatcher(\n hass,\n {},\n [\n {\n \"domain\": \"mock-domain\",\n \"hostname\": \"irobot-*\",\n \"macaddress\": \"B8B7F1*\",\n }\n ],\n )\n await device_tracker_watcher.async_start()\n await hass.async_block_till_done()\n await device_tracker_watcher.async_stop()\n await hass.async_block_till_done()\n\n assert len(mock_init.mock_calls) == 2\n assert mock_init.mock_calls[0][1][0] == \"mock-domain\"\n assert mock_init.mock_calls[0][2][\"context\"] == {\n \"source\": config_entries.SOURCE_DHCP\n }\n assert mock_init.mock_calls[0][2][\"data\"] == dhcp.DhcpServiceInfo(\n ip=\"192.168.210.56\",\n hostname=\"irobot-abc\",\n macaddress=\"b8b7f16db533\",\n )\n assert mock_init.mock_calls[1][1][0] == \"mock-domain\"\n assert mock_init.mock_calls[1][2][\"context\"] == {\n \"source\": config_entries.SOURCE_DHCP\n }\n assert mock_init.mock_calls[1][2][\"data\"] == dhcp.DhcpServiceInfo(\n ip=\"192.168.210.56\",\n hostname=\"irobot-abcdef\",\n macaddress=\"b8b7f16db533\",\n )", "def __init__(self, ifname):\n\n self._dbus_loop = gobject.MainLoop()\n self._bus = dbus.SystemBus()\n wait_bus_owner_timeout = 5 # Wait for 5s to have an owner for the bus name we are expecting\n logger.debug('Going to wait for an owner on bus name ' + RemoteDhcpClientControl.DBUS_NAME)\n while not self._bus.name_has_owner(RemoteDhcpClientControl.DBUS_NAME):\n time.sleep(0.2)\n wait_bus_owner_timeout -= 0.2\n if wait_bus_owner_timeout <= 0: # We timeout without having an owner for the expected bus name\n raise Exception('No owner found for bus name ' + RemoteDhcpClientControl.DBUS_NAME)\n \n logger.debug('Got an owner for bus name ' + RemoteDhcpClientControl.DBUS_NAME)\n gobject.threads_init() # Allow the mainloop to run as an independent thread\n dbus.mainloop.glib.threads_init()\n \n dbus_object_name = RemoteDhcpClientControl.DBUS_OBJECT_ROOT + '/' + str(ifname)\n logger.debug('Going to communicate with object ' + dbus_object_name)\n self._dhcp_client_proxy = self._bus.get_object(RemoteDhcpClientControl.DBUS_SERVICE_INTERFACE, dbus_object_name)\n self._dbus_iface = dbus.Interface(self._dhcp_client_proxy, RemoteDhcpClientControl.DBUS_SERVICE_INTERFACE)\n \n logger.debug(\"Connected to D-Bus\")\n self._dhcp_client_proxy.connect_to_signal(\"IpConfigApplied\",\n self._handleIpConfigApplied,\n dbus_interface = RemoteDhcpClientControl.DBUS_SERVICE_INTERFACE,\n message_keyword='dbus_message') # Handle the IpConfigApplied signal\n \n self._dhcp_client_proxy.connect_to_signal(\"LeaseLost\",\n self._handleLeaseLost,\n dbus_interface = RemoteDhcpClientControl.DBUS_SERVICE_INTERFACE,\n message_keyword='dbus_message') # Handle the IpConfigApplied signal\n \n #Lionel: the following line is used for D-Bus debugging only\n #self._bus.add_signal_receiver(catchall_signal_handler, interface_keyword='dbus_interface', member_keyword='member')\n self._dbus_loop_thread = threading.Thread(target = self._loopHandleDbus) # Start handling D-Bus messages in a background thread\n self._dbus_loop_thread.setDaemon(True) # D-Bus loop should be forced to terminate when main program exits\n self._dbus_loop_thread.start()\n \n self._bus.watch_name_owner(RemoteDhcpClientControl.DBUS_NAME, self._handleBusOwnerChanged) # Install a callback to run when the bus owner changes\n \n self._callback_new_lease_mutex = threading.Lock() # This mutex protects writes to the _callback_new_lease attribute\n self._callback_new_lease = None\n \n self._exit_unlock_event = threading.Event() # Create a new threading event that will allow the exit() method to wait for the child to terminate properly\n self._getversion_unlock_event = threading.Event() # Create a new threading event that will allow the GetVersion() D-Bus call below to execute within a timed limit \n\n self.status = DhcpLeaseStatus.DhcpLeaseStatus()\n\n self._getversion_unlock_event.clear()\n self._remote_version = ''\n self._dbus_iface.GetVersion(reply_handler = self._getVersionUnlock, error_handler = self._getVersionError)\n if not self._getversion_unlock_event.wait(10): # We give 10s for slave to answer the GetVersion() request\n logfile = tempfile.NamedTemporaryFile(prefix='TimeoutOnGetVersion-', suffix='.log', delete=False)\n if logfile:\n print('Saving TimeoutOnGetVersion environment dump to file \"' + logfile.name + '\"', file=sys.stderr)\n print('TimeoutOnGetVersion', file=logfile)\n subprocess.call('ps -ef', stdout=logfile, shell=True)\n subprocess.call('perl ./dbus-introspect.pl --system com.legrandelectric.RobotFrameworkIPC.DhcpClientLibrary /com/legrandelectric/RobotFrameworkIPC/DhcpClientLibrary/eth1', stdout=logfile, shell=True)\n subprocess.call('dbus-send --system --type=method_call --print-reply --dest=com.legrandelectric.RobotFrameworkIPC.DhcpClientLibrary /com/legrandelectric/RobotFrameworkIPC/DhcpClientLibrary/eth1 com.legrandelectric.RobotFrameworkIPC.DhcpClientLibrary.GetVersion', stdout=logfile, shell=True)\n logfile.close()\n raise Exception('TimeoutOnGetVersion')\n else:\n logger.debug('Slave version: ' + self._remote_version)", "def __init__(self):\n self.dhcp_client_state = store.MacToIP() # mac => DHCP_State", "def launch (no_flow = False,\n network = \"192.168.0.0/24\", # Address range\n first = 1, last = None, count = None, # Address range\n ip = \"192.168.0.254\",\n router = (), # Auto\n dns = (), # Auto\n dpid = None, # All\n ports = None, # All\n __INSTANCE__ = None):\n def fixint (i):\n i = str(i)\n if i.lower() == \"none\": return None\n if i.lower() == \"true\": return None\n return int(i)\n def fix (i):\n i = str(i)\n if i.lower() == \"none\": return None\n if i.lower() == \"true\": return None\n if i == '()': return ()\n return i\n first,last,count = map(fixint,(first,last,count))\n router,dns = map(fix,(router,dns))\n\n if ports is not None:\n ports = ports.split(\",\")\n ports = set(int(p) if p.isdigit() else p for p in ports)\n\n pool = SimpleAddressPool(network = network, first = first, last = last,\n count = count)\n\n inst = DHCPD(install_flow = not no_flow, pool = pool,\n ip_address = ip, router_address = router,\n dns_address = dns, dpid = dpid, ports = ports)\n\n if __INSTANCE__[0] == 0:\n # First or only instance\n core.register(inst)\n\n log.debug(\"DHCP serving a%s\", str(pool)[2:-1])", "def __parse_ldap_to_dhcp_attribute_map(self):\n import shlex\n self.ldap_to_dhcp_attribute_map = dict()\n options = shlex.split(self.options.get(\"ldap_to_dhcp_attribute_map\", \\\n self.DEFAULT_LDAP_TO_DHCP_ATTRIBUTE_MAP))\n for option in options:\n ldap_attr_name, dhcp_attr_name = option.split('=',1)\n self.ldap_to_dhcp_attribute_map[ldap_attr_name] = dhcp_attr_name", "async def test_dhcp_invalid_option(hass: HomeAssistant) -> None:\n integration_matchers = [{\"domain\": \"mock-domain\", \"hostname\": \"nomatch*\"}]\n\n packet = Ether(RAW_DHCP_REQUEST)\n\n packet[DHCP].options = [\n (\"message-type\", 3),\n (\"max_dhcp_size\", 1500),\n (\"requested_addr\", \"192.168.208.55\"),\n (\"server_id\", \"192.168.208.1\"),\n (\"param_req_list\", [1, 3, 28, 6]),\n \"hostname\",\n ]\n\n async_handle_dhcp_packet = await _async_get_handle_dhcp_packet(\n hass, integration_matchers\n )\n with patch.object(hass.config_entries.flow, \"async_init\") as mock_init:\n await async_handle_dhcp_packet(packet)\n\n assert len(mock_init.mock_calls) == 0", "def setup_dhcp6_config(self, board_config):\n raise NotImplementedError", "def refresh_dhcp_helper(self, network_id):\n old_network = self.cache.get_network_by_id(network_id)\n if not old_network:\n # DHCP current not running for network.\n return self.enable_dhcp_helper(network_id)\n\n try:\n network = self.plugin_rpc.get_network_info(network_id)\n except:\n self.needs_resync = True\n LOG.exception(_('Network %s RPC info call failed.') % network_id)\n return\n\n old_cidrs = set(s.cidr for s in old_network.subnets if s.enable_dhcp)\n new_cidrs = set(s.cidr for s in network.subnets if s.enable_dhcp)\n\n if new_cidrs and old_cidrs == new_cidrs:\n self.call_driver('reload_allocations', network)\n self.cache.put(network)\n elif new_cidrs:\n if self.call_driver('restart', network):\n self.cache.put(network)\n else:\n self.disable_dhcp_helper(network.id)", "def update_cmts_isc_dhcp_config(self, board_config):\n self.setup_dhcp_config(board_config)\n self.setup_dhcp6_config(board_config)\n raise NotImplementedError", "async def test_registered_devices(hass: HomeAssistant) -> None:\n integration_matchers = [\n {\"domain\": \"not-matching\", \"registered_devices\": True},\n {\"domain\": \"mock-domain\", \"registered_devices\": True},\n ]\n\n packet = Ether(RAW_DHCP_RENEWAL)\n\n registry = dr.async_get(hass)\n config_entry = MockConfigEntry(domain=\"mock-domain\", data={})\n config_entry.add_to_hass(hass)\n registry.async_get_or_create(\n config_entry_id=config_entry.entry_id,\n connections={(dr.CONNECTION_NETWORK_MAC, \"50147903852c\")},\n name=\"name\",\n )\n # Not enabled should not get flows\n config_entry2 = MockConfigEntry(domain=\"mock-domain-2\", data={})\n config_entry2.add_to_hass(hass)\n registry.async_get_or_create(\n config_entry_id=config_entry2.entry_id,\n connections={(dr.CONNECTION_NETWORK_MAC, \"50147903852c\")},\n name=\"name\",\n )\n\n async_handle_dhcp_packet = await _async_get_handle_dhcp_packet(\n hass, integration_matchers\n )\n with patch.object(hass.config_entries.flow, \"async_init\") as mock_init:\n await async_handle_dhcp_packet(packet)\n # Ensure no change is ignored\n await async_handle_dhcp_packet(packet)\n\n assert len(mock_init.mock_calls) == 1\n assert mock_init.mock_calls[0][1][0] == \"mock-domain\"\n assert mock_init.mock_calls[0][2][\"context\"] == {\n \"source\": config_entries.SOURCE_DHCP\n }\n assert mock_init.mock_calls[0][2][\"data\"] == dhcp.DhcpServiceInfo(\n ip=\"192.168.1.120\",\n hostname=\"irobot-ae9ec12dd3b04885bcbfa36afb01e1cc\",\n macaddress=\"50147903852c\",\n )", "def device_config(deviceIP):\r\n\r\n \r\n api_device_edge = '/dataservice/system/device/vedges?deviceIP='+deviceIP+\"&\"\r\n url_device_edge = url(vmanage_host,vmanage_port,api_device_edge)\r\n device_edge = Operation.get_method(url_device_edge,header)\r\n\r\n \r\n \r\n if device_edge['data'] == []:\r\n return( \" Unable to find the System IP \" + deviceIP )\r\n \r\n elif device_edge['data'][0][\"configOperationMode\"] == \"cli\":\r\n return( \"SystemIP \" + deviceIP + \" is in CLI mode unable to compare \" )\r\n \r\n elif device_edge['data'][0][\"configOperationMode\"] == \"vmanage\":\r\n \r\n \r\n api_Template_Device_config = '/dataservice/template/config/attached/'+device_edge['data'][0][\"uuid\"]+\"?type=CFS\"\r\n url_Template_Device_config = url(vmanage_host,vmanage_port,api_Template_Device_config)\r\n Template_Device_config = Operation.get_method(url_Template_Device_config , header)\r\n\r\n return Template_Device_config['config']", "def main():\n cli = DhcpClientCLI()\n\n parser = argparse.ArgumentParser(\n description='Management CLI for Mobility DHCP Client',\n formatter_class=argparse.ArgumentDefaultsHelpFormatter,\n )\n\n # Add sub commands\n subparsers = parser.add_subparsers(title='subcommands', dest='cmd')\n\n # List\n subparser = subparsers.add_parser(\n 'list_dhcp_records',\n help='Lists all records from Redis',\n )\n subparser.set_defaults(func=cli.list_all_record)\n\n # Add\n subparser = subparsers.add_parser(\n 'add_rec',\n help='Add ip allocation record',\n )\n subparser.add_argument(\n 'mac', help='Mac address, e.g. \"8a:00:00:00:0b:11\"',\n type=str,\n )\n subparser.add_argument(\n 'ip', help='IP address, e.g. \"1.1.1.1\"',\n type=ip_address,\n )\n\n subparser.add_argument(\n 'state',\n help='DHCP protocol state 1 to 7, e.g. \"1\"',\n type=int,\n )\n subparser.add_argument(\n 'subnet',\n help='IP address subnet, e.g. \"1.1.1.0/24\"',\n type=ipaddress.ip_network,\n )\n\n subparser.add_argument('dhcp', help='DHCP IP address, e.g. \"1.1.1.100\"')\n subparser.add_argument('lease', help='Lease time in seconds, e.g. \"100\"')\n subparser.set_defaults(func=cli.add_record)\n\n # del\n subparser = subparsers.add_parser(\n 'del_rec',\n help='Add ip allocation record',\n )\n subparser.add_argument('mac', help='Mac address, e.g. \"8a:00:00:00:0b:11\"')\n subparser.set_defaults(func=cli.del_record)\n\n # set default gw\n subparser = subparsers.add_parser(\n 'set_default_gw',\n help='Set default GW',\n )\n subparser.add_argument('ip', help='IP address, e.g. \"1.1.1.1\"')\n\n subparser.set_defaults(func=cli.set_deafult_gw)\n\n # set gw mac\n subparser = subparsers.add_parser(\n 'set_gw_mac',\n help='Set GW Mac address',\n )\n subparser.add_argument('mac', help='Mac address, e.g. \"8a:00:00:00:0b:11\"')\n\n subparser.set_defaults(func=cli.set_deafult_gw)\n\n # Parse the args\n args = parser.parse_args()\n if not args.cmd:\n parser.print_usage()\n sys.exit(1)\n\n # Execute the sub-command function\n args.func(args)", "def dhcp_cmd(args):\n if VERSION_LIVEBOX == 'lb28':\n dhcpv4_object = 'NMC'\n else:\n dhcpv4_object = 'DHCPv4.Server.Pool.default'\n requete_print(dhcpv4_object + \":getStaticLeases\")", "def config_and_verify_dhcp_option(ssh_conn_obj, dut, ztp_params, data, expect_reboot=False, reboot_on_success=list(), cli_type=\"\"):\n cli_type = st.get_ui_type(dut,cli_type=cli_type)\n cli_type = \"klish\" if cli_type in [\"rest-put\", \"rest-patch\"] else cli_type\n retry_count = data.retry_count if \"retry_count\" in data and data.retry_count else 0\n iteration = data.iteration if \"iteration\" in data and data.iteration else 300\n delay = data.delay if \"delay\" in data and data.delay else 3\n if \"func_name\" in data:\n syslog_file_names = [\"syslog_1_{}\".format(data.func_name), \"syslog_{}\".format(data.func_name)]\n # basic_obj.copy_config_db_to_temp(dut, data.config_db_path, data.config_db_temp)\n if \"config_file_type\" in data and data.config_file_type == \"text\":\n file_path = \"/tmp/file_temp.json\"\n basic_obj.write_to_file(ssh_conn_obj, data.json_content, file_path, device=\"server\")\n elif \"config_file_type\" in data and data.config_file_type == \"EoL\":\n file_path = \"\"\n else:\n file_path = basic_obj.write_to_json_file(data.json_content)\n if file_path:\n destination_path = \"{}{}/{}\".format(ztp_params.home_path, ztp_params.config_path, data.config_file)\n basic_obj.copy_file_from_client_to_server(ssh_conn_obj, src_path=file_path, dst_path=destination_path)\n if \"config_db_location\" in data and data.config_db_location == \"json\":\n st.download_file_from_dut(dut, data.config_db_temp, file_path)\n destination_path = \"{}{}/{}\".format(ztp_params.home_path, ztp_params.config_path, data.config_db_file_name)\n basic_obj.copy_file_from_client_to_server(ssh_conn_obj, src_path=file_path, dst_path=destination_path)\n if \"scenario\" in data and data.scenario == \"invalid-json\":\n st.log(\"Writing invalid content to make invalid json ...\")\n basic_obj.write_to_file_to_line(ssh_conn_obj, \",\", 5, destination_path, \"server\")\n if data.option_type == \"67\":\n st.log(\"Creating {} file on DHCP server ...\".format(data.config_file))\n data.search_pattern = r'\\s*option\\s+bootfile-name\\s*\\S*\\s*\"\\S+\";'\n data.option_string = \"option bootfile-name\"\n if data.type == \"http\":\n data.option_url = \"http://{}{}/{}\".format(data.static_ip, data.config_path, data.config_file)\n elif data.type == \"tftp\":\n data.option_url = \"tftp://{}/{}/{}\".format(data.static_ip, data.config_path, data.config_file)\n elif data.type == \"ftp\":\n data.option_url = \"ftp://{}/{}/{}\".format(data.static_ip, data.config_path, data.config_file)\n write_option_to_dhcp_server(ssh_conn_obj, data)\n basic_obj.service_operations(ssh_conn_obj, data.dhcp_service_name, data.action, data.device)\n if not verify_dhcpd_service_status(ssh_conn_obj, data.dhcpd_pid):\n st.log(\"{} service not running\".format(data.dhcp_service_name))\n st.report_fail(\"service_not_running\", data.dhcp_service_name)\n # write_option_67_to_dhcp_server(ssh_conn_obj, data)\n data.device_action = \"reboot\" if cli_type == \"klish\" else data.device_action\n if data.device_action == \"reboot\":\n reboot_type = data.reboot_type if \"reboot_type\" in data and data.reboot_type else \"normal\"\n basic_obj.remove_file(dut, data.config_db_path)\n st.reboot(dut, reboot_type, skip_port_wait=True)\n st.wait_system_status(dut, 500)\n elif data.device_action == \"run\":\n ztp_operations(dut, data.device_action)\n if \"band_type\" in data and data.band_type==\"inband\":\n if not basic_obj.poll_for_system_status(dut):\n st.log(\"Sytem is not ready ..\")\n st.report_env_fail(\"system_not_ready\")\n if not basic_obj.check_interface_status(dut, ztp_params.oob_port,\"up\"):\n basic_obj.ifconfig_operation(dut, ztp_params.oob_port, \"down\")\n interface_status = basic_obj.check_interface_status(dut, ztp_params.inband_port, \"up\")\n if interface_status is not None:\n if not interface_status:\n intf_obj.interface_noshutdown(dut, ztp_params.inband_port, cli_type=cli_type)\n if \"service\" in data and data.service == \"disable\":\n basic_obj.service_operations_by_systemctl(dut, \"ztp\", \"stop\")\n if basic_obj.verify_service_status(dut, \"ztp\"):\n st.log(\"ZTP status is not stopped\")\n st.report_fail(\"service_not_stopped\", \"ztp\")\n basic_obj.service_operations_by_systemctl(dut, \"ztp\", \"start\")\n if not poll_ztp_status(dut, [\"IN-PROGRESS\", \"Not Started\", \"SUCCESS\"], cli_type=cli_type):\n st.report_fail(\"ztp_max_polling_interval\")\n if \"check\" in data and data.check == \"not\":\n if verify_ztp_status(dut, retry_count, iteration, delay, cli_type=cli_type):\n if \"logs_path\" in data and \"func_name\" in data:\n capture_syslogs(dut, data.logs_path, syslog_file_names)\n st.log(\"ZTP status verification failed\")\n st.report_fail(\"ztp_status_verification_failed\")\n else:\n st.log(\"Iteration count {}\".format(iteration))\n st.log(\"REBOOT ON SUCCESS - {}\".format(reboot_on_success))\n if reboot_on_success:\n if \"configdb-json\" in reboot_on_success:\n st.wait_system_reboot(dut)\n st.wait_system_status(dut, 300)\n result = verify_ztp_status(dut, retry_count, iteration, delay, expect_reboot=expect_reboot, reboot_on_success=reboot_on_success, cli_type=cli_type)\n else:\n result = verify_ztp_status(dut, retry_count, iteration, delay, expect_reboot=expect_reboot, cli_type=cli_type)\n if not result:\n if \"logs_path\" in data and \"func_name\" in data:\n capture_syslogs(dut, data.logs_path, syslog_file_names)\n st.log(\"ZTP status verification failed\")\n st.report_fail(\"ztp_status_verification_failed\")\n if reboot_on_success:\n output = show_ztp_status(dut, cli_type=cli_type)\n if output[\"status\"] != \"SUCCESS\":\n st.wait(300, \"Waiting for device to reboot after success...\")\n st.wait_system_status(dut, 300)\n # st.wait_system_reboot(dut)\n if not verify_ztp_status(dut, retry_count, iteration, delay, cli_type=cli_type):\n if \"logs_path\" in data and \"func_name\" in data:\n capture_syslogs(dut, data.logs_path, syslog_file_names)\n st.log(\"ZTP status verification failed\")\n st.report_fail(\"ztp_status_verification_failed\")\n st.banner(boot_up_obj.sonic_installer_list(dut))\n verify_ztp_filename_logs(dut, data)\n if \"ztp_log_string\" in data and data.ztp_log_string:\n if not basic_obj.poll_for_error_logs(dut, data.ztp_log_path, data.ztp_log_string):\n st.log(\"ZTP log {} verification failed for message {}\".format(data.ztp_log_path, data.ztp_log_string))\n if not basic_obj.poll_for_error_logs(dut, data.ztp_log_path_1, data.ztp_log_string):\n st.log(\"ZTP log {} verification failed for message {}\".format(data.ztp_log_path_1, data.ztp_log_string))\n st.report_fail(\"ztp_log_verification_failed\", data.ztp_log_path_1, data.ztp_log_string)\n if \"result\" in data and data.result == \"pass\":\n st.report_pass(\"test_case_passed\")", "def call_driver(self, action, network):\n if self.conf.use_namespaces:\n namespace = NS_PREFIX + network.id\n else:\n namespace = None\n try:\n # the Driver expects something that is duck typed similar to\n # the base models.\n driver = self.dhcp_driver_cls(self.conf,\n network,\n self.conf.root_helper,\n self.device_manager,\n namespace)\n getattr(driver, action)()\n return True\n\n except Exception, e:\n self.needs_resync = True\n LOG.exception('Unable to %s dhcp.' % action)", "def is_configure_with_dhcp(self):\n\t\treturn bool(call_sdk_function('PrlSrvCfgNet_IsConfigureWithDhcp', self.handle))", "def _RunDHCPClient(self, dhclient_script_path=None, **kwargs):\n del kwargs\n PID_FILE = os.path.join(self._tmp_dir, 'dhclient.pid')\n clear_ifconfig_command = 'ifconfig {interface} 0.0.0.0'.format(\n interface=self.interface)\n dhcp_command = ('echo \"\" | ' # dhclient expects STDIN for some reason\n 'dhclient -4 ' # only run on IPv4\n '-nw ' # immediately daemonize\n '-pf {pid_file} '\n '-sf {dhclient_script} '\n '-lf /dev/null ' # don't keep a leases file\n '-v {interface}'.format(\n pid_file=PID_FILE,\n dhclient_script=dhclient_script_path,\n interface=self.interface))\n kill_command = 'cat {pid_file} | xargs -r kill; rm {pid_file}'.format(\n pid_file=PID_FILE)\n force_kill_command = 'pgrep dhclient | xargs -r kill -9'\n\n logging.info('Killing any existing dhclient processes...')\n self._device.Call(force_kill_command)\n\n logging.info('Clearing any existing ifconfig networks...')\n self._device.Call(clear_ifconfig_command)\n\n logging.info('Starting dhclient...')\n self._device.CheckCall(dhcp_command)\n\n logging.info('Waiting to lease an IP...')\n ip = sync_utils.WaitFor(self._LeasedIP, self._dhcp_timeout)\n if not ip:\n self._device.Call(kill_command)\n raise WiFiError('DHCP bind failed')\n logging.info('Success: bound to IP %s', ip)\n\n yield ip # We have bound an IP; yield back to the caller.\n\n logging.info('Stopping dhclient...')\n self._device.Call(kill_command)\n self._device.Call(force_kill_command)\n self._device.Call(clear_ifconfig_command)\n\n yield # We have released the IP.", "def renew_dhcp_lease(self):\n\t\tresponse = os.system(\"/sbin/dhclient -r;/sbin/dhclient\")\n\t\tif response != 0:\n\t\t\tprint \"Network restart failed. DHCP Lease failed.\"", "def test_enable_dpdk(self):\n logging.info('Pre-flight check')\n self._dpdk_pre_post_flight_check()\n self._ovs_br_ex_port_is_system_interface()\n\n self.enable_hugepages_vfio_on_hvs_in_vms(4)\n with self.config_change(\n {\n 'enable-dpdk': False,\n 'dpdk-driver': '',\n },\n {\n 'enable-dpdk': True,\n 'dpdk-driver': 'vfio-pci',\n },\n application_name='ovn-chassis'):\n logging.info('Checking openvswitch-switch-dpdk is installed')\n self._openvswitch_switch_dpdk_installed()\n logging.info('Checking DPDK is configured in OVS')\n self._ovs_dpdk_init_configured()\n logging.info('Checking DPDK is successfully initialized in OVS')\n self._ovs_dpdk_initialized()\n logging.info('Checking that br-ex configed with DPDK interface...')\n self._ovs_br_ex_port_is_dpdk_interface()\n logging.info('and is not in error.')\n self._ovs_br_ex_interface_not_in_error()\n\n logging.info('Post-flight check')\n self._dpdk_pre_post_flight_check()\n\n self.disable_hugepages_vfio_on_hvs_in_vms()\n self._ovs_br_ex_port_is_system_interface()", "def set_dhcp_conn(nic):\n nic.EnableDHCP()\n # After static DNS servers are specified to start using Dynamic Host\n # Configuration Protocol (DHCP) instead of static DNS servers,\n # you can call the method without supplying \"in\" parameters.\n nic.SetDNSServerSearchOrder()", "def dhcp_free(self, dhcp_free):\n\n self._dhcp_free = dhcp_free", "def device_discovery(endless):\r\n click.echo(\"start device discovery ...\")\r\n _device_discovery(endless)", "def dhcp_options_id(self, dhcp_options_id):\n self._dhcp_options_id = dhcp_options_id", "def config_dhcpv6_options(ssh_conn_obj, ztp_params, config_params, options=dict(), cli_type=\"\"):\n cli_type = st.get_ui_type(config_params.dut, cli_type=cli_type)\n retry_count = config_params.retry_count if \"retry_count\" in config_params and config_params.retry_count else 0\n iteration = config_params.iteration if \"iteration\" in config_params and config_params.iteration else 300\n delay = config_params.delay if \"delay\" in config_params and config_params.delay else 3\n expect_reboot = True if \"expect_reboot\" in options and options [\"expect_reboot\"] else False\n st.log(config_params)\n if \"func_name\" in config_params:\n syslog_file_names = [\"syslog_1_{}\".format(config_params.func_name), \"syslog_{}\".format(config_params.func_name)]\n if \"json_content\" in config_params:\n file_path = basic_obj.write_to_json_file(config_params.json_content)\n st.log(file_path)\n if file_path:\n destination_path = \"{}{}/{}\".format(config_params.home_path, ztp_params.config_path, config_params.ztp_file)\n st.log(destination_path)\n basic_obj.copy_file_from_client_to_server(ssh_conn_obj, src_path=file_path, dst_path=destination_path)\n config_params.option_59_url = \"http://[{}]{}/{}\".format(config_params.static_ip, ztp_params.config_path, config_params.ztp_file)\n config_params.search_pattern = r'\\s*option\\s+dhcp6.boot-file-url\\s+\"\\S+\";'\n write_option_59_to_dhcp_server(ssh_conn_obj, config_params)\n basic_obj.service_operations(ssh_conn_obj, config_params.dhcp6_service_name, \"restart\", \"server\")\n if not verify_dhcpd_service_status(ssh_conn_obj, config_params.dhcpd6_pid):\n st.log(\"{} service is running which is not expected\".format(config_params.dhcp6_service_name))\n st.report_fail(\"service_running_not_expected\", config_params.dhcp6_service_name)\n reboot_type = config_params.reboot_type if \"reboot_type\" in config_params and config_params.reboot_type else \"normal\"\n if \"ztp_operation\" in config_params:\n config_params.ztp_operation = \"reboot\" if cli_type == \"klish\" else config_params.ztp_operation\n if config_params.ztp_operation == \"reboot\":\n basic_obj.remove_file(config_params.dut, config_params.config_db_path)\n st.reboot(config_params.dut, reboot_type, skip_port_wait=True)\n elif config_params.ztp_operation == \"run\":\n ztp_operations(config_params.dut, config_params.ztp_operation)\n else:\n st.log(\"ZTP operation is not mentioned hence rebooting the device ...\")\n basic_obj.remove_file(config_params.dut, config_params.config_db_path)\n st.reboot(config_params.dut, reboot_type, skip_port_wait=True)\n if \"reboot_on_success\" in options and options[\"reboot_on_success\"]:\n result = verify_ztp_status(config_params.dut, retry_count, iteration, delay, expect_reboot=expect_reboot, reboot_on_success=options[\"reboot_on_success\"], cli_type=cli_type)\n else:\n result = verify_ztp_status(config_params.dut, retry_count, iteration, delay, expect_reboot=expect_reboot, cli_type=cli_type)\n if not result:\n if \"logs_path\" in config_params and \"func_name\" in config_params:\n capture_syslogs(config_params.dut, config_params.logs_path, syslog_file_names)\n st.log(\"ZTP status verification failed\")\n st.report_fail(\"ztp_status_verification_failed\")\n if \"reboot_on_success\" in options and options[\"reboot_on_success\"]:\n reboot_obj.config_reload(config_params.dut)\n st.wait(5)\n if not ip_obj.ping(config_params.dut, config_params.static_ip, family=\"ipv6\"):\n st.log(\"Pinging to DHCP server failed from DUT, issue either with DUT or server\")\n # intf_obj.enable_dhcp_on_interface(config_params.dut, config_params.network_port, \"v6\")\n if not verify_ztp_status(config_params.dut, retry_count, iteration, delay, cli_type=cli_type):\n if \"logs_path\" in config_params and \"func_name\" in config_params:\n capture_syslogs(config_params.dut, config_params.logs_path, syslog_file_names)\n st.log(\"ZTP status verification failed\")\n st.report_fail(\"ztp_status_verification_failed\")\n verify_ztp_filename_logs(config_params.dut, config_params)\n if \"ztp_log_string\" in config_params and config_params.ztp_log_string:\n if not basic_obj.poll_for_error_logs(config_params.dut, config_params.ztp_log_path, config_params.ztp_log_string):\n st.log(\"ZTP log {} verification failed for message {}\".format(config_params.ztp_log_path, config_params.ztp_log_string))\n if not basic_obj.poll_for_error_logs(config_params.dut, config_params.ztp_log_path_1, config_params.ztp_log_string):\n st.log(\"ZTP log {} verification failed for message {}\".format(config_params.ztp_log_path_1, config_params.ztp_log_string))\n st.report_fail(\"ztp_log_verification_failed\", config_params.ztp_log_path_1, config_params.ztp_log_string)\n if \"result\" in config_params and config_params.result == \"pass\":\n st.report_pass(\"test_case_passed\")", "async def test_dhcp_match_hostname_and_macaddress(hass: HomeAssistant) -> None:\n integration_matchers = [\n {\"domain\": \"mock-domain\", \"hostname\": \"connect\", \"macaddress\": \"B8B7F1*\"}\n ]\n packet = Ether(RAW_DHCP_REQUEST)\n\n async_handle_dhcp_packet = await _async_get_handle_dhcp_packet(\n hass, integration_matchers\n )\n with patch.object(hass.config_entries.flow, \"async_init\") as mock_init:\n await async_handle_dhcp_packet(packet)\n # Ensure no change is ignored\n await async_handle_dhcp_packet(packet)\n\n assert len(mock_init.mock_calls) == 1\n assert mock_init.mock_calls[0][1][0] == \"mock-domain\"\n assert mock_init.mock_calls[0][2][\"context\"] == {\n \"source\": config_entries.SOURCE_DHCP\n }\n assert mock_init.mock_calls[0][2][\"data\"] == dhcp.DhcpServiceInfo(\n ip=\"192.168.210.56\",\n hostname=\"connect\",\n macaddress=\"b8b7f16db533\",\n )", "def _has_native_dhcp_metadata(self):\n pass", "async def test_dhcp_nomatch(hass: HomeAssistant) -> None:\n integration_matchers = [{\"domain\": \"mock-domain\", \"macaddress\": \"ABC123*\"}]\n\n packet = Ether(RAW_DHCP_REQUEST)\n\n async_handle_dhcp_packet = await _async_get_handle_dhcp_packet(\n hass, integration_matchers\n )\n with patch.object(hass.config_entries.flow, \"async_init\") as mock_init:\n await async_handle_dhcp_packet(packet)\n\n assert len(mock_init.mock_calls) == 0", "def dns():\r\n print('''\\n%s at %s acting as user %s\r\n\\nDNS Configuration Menu''' % (PACKETMASTER.model, ADDRESS, USERNAME))\r\n choice = moves.input('''\r\n 1 - Get current DNS configuration\r\n 2 - Change DNS configuration\r\n 3 - Back\r\n 4 - Quit \\n\r\n Enter selection number: ''')\r\n try:\r\n choice = int(choice)\r\n except ValueError as reason:\r\n print(\"That is not a valid selection.\", reason)\r\n dns()\r\n execute = {1: PACKETMASTER.get_dns,\r\n 2: PACKETMASTER.set_dns_guided,\r\n 3: hardwareconfig,\r\n 4: exit}\r\n if choice in execute:\r\n try:\r\n select = execute[choice]\r\n run = select()\r\n print(run)\r\n dns()\r\n except KeyError as reason:\r\n print(reason)\r\n else:\r\n print(\"That is not a valid selection.\")\r\n dns()", "def get_dhcp(cls, vcpe, mgmt = 'eth0'):\n\n def put_dhcp():\n VSGAccess.restore_interface_config(mgmt, vcpe = vcpe)\n\n vcpe_ip = VSGAccess.vcpe_get_dhcp(vcpe, mgmt = mgmt)\n if vcpe_ip is not None:\n cls.restore_methods.append(put_dhcp)\n return vcpe_ip", "def post_process(self, packet: 'dict[str, Any]') -> 'SMFIdentificationBasedDPDOption':\n ret = super().post_process(packet) # type: SMFIdentificationBasedDPDOption\n ret.mode = Enum_SMFDPDMode.H_DPD\n return ret", "async def test_discovered_by_dhcp_or_integration_discovery(\n hass: HomeAssistant, source, data, bulb_type, extended_white_range, name\n) -> None:\n with _patch_wizlight(\n device=None, extended_white_range=extended_white_range, bulb_type=bulb_type\n ):\n result = await hass.config_entries.flow.async_init(\n DOMAIN, context={\"source\": source}, data=data\n )\n await hass.async_block_till_done()\n\n assert result[\"type\"] == FlowResultType.FORM\n assert result[\"step_id\"] == \"discovery_confirm\"\n\n with _patch_wizlight(\n device=None, extended_white_range=extended_white_range, bulb_type=bulb_type\n ), patch(\n \"homeassistant.components.wiz.async_setup_entry\",\n return_value=True,\n ) as mock_setup_entry, patch(\n \"homeassistant.components.wiz.async_setup\", return_value=True\n ) as mock_setup:\n result2 = await hass.config_entries.flow.async_configure(\n result[\"flow_id\"],\n {},\n )\n await hass.async_block_till_done()\n\n assert result2[\"type\"] == \"create_entry\"\n assert result2[\"title\"] == name\n assert result2[\"data\"] == {\n CONF_HOST: \"1.1.1.1\",\n }\n assert len(mock_setup.mock_calls) == 1\n assert len(mock_setup_entry.mock_calls) == 1", "async def test_device_tracker_hostname_and_macaddress_after_start(\n hass: HomeAssistant,\n) -> None:\n\n with patch.object(hass.config_entries.flow, \"async_init\") as mock_init:\n device_tracker_watcher = dhcp.DeviceTrackerWatcher(\n hass,\n {},\n [{\"domain\": \"mock-domain\", \"hostname\": \"connect\", \"macaddress\": \"B8B7F1*\"}],\n )\n await device_tracker_watcher.async_start()\n await hass.async_block_till_done()\n hass.states.async_set(\n \"device_tracker.august_connect\",\n STATE_HOME,\n {\n ATTR_HOST_NAME: \"Connect\",\n ATTR_IP: \"192.168.210.56\",\n ATTR_SOURCE_TYPE: SourceType.ROUTER,\n ATTR_MAC: \"B8:B7:F1:6D:B5:33\",\n },\n )\n await hass.async_block_till_done()\n await device_tracker_watcher.async_stop()\n await hass.async_block_till_done()\n\n assert len(mock_init.mock_calls) == 1\n assert mock_init.mock_calls[0][1][0] == \"mock-domain\"\n assert mock_init.mock_calls[0][2][\"context\"] == {\n \"source\": config_entries.SOURCE_DHCP\n }\n assert mock_init.mock_calls[0][2][\"data\"] == dhcp.DhcpServiceInfo(\n ip=\"192.168.210.56\",\n hostname=\"connect\",\n macaddress=\"b8b7f16db533\",\n )", "def dhcp_utilization(self, dhcp_utilization):\n\n self._dhcp_utilization = dhcp_utilization", "def dhcp_agent_network_add(self, dhcp_net_info):\n self.turn_on_dhcp_check()", "def get_devices_spt(self):\n\n #import pdb; pdb.set_trace()\n if self._drives or self.firmware_version or self.product_name or self.vendor_name or \\\n self.serial_number or self.target_port:\n user_options = True\n else:\n user_options = False\n try:\n # Note: Extra logic to optimize spt device directory scanning.\n if not user_options:\n if self._include_enclosures:\n message = \"Find SCSI Devices\"\n command = \"{tool} show devices dtype=direct,hostmanaged,enclosure\".format(tool=self.tool)\n else:\n message = \"Find SCSI Disk Drives\"\n command = \"{tool} show devices dtype=direct,hostmanaged\".format(tool=self.tool)\n # Use common execute below.\n else:\n # Request enclosures separately.\n if self._include_enclosures:\n message = \"Find SCSI Enclosures\"\n command = \"{tool} show devices dtype=enclosure ofmt=json\".format(tool=self.tool)\n pdata = self._run_command(command=command, message=message,\n logger=self._logger, shell=False, expected_failure=True)\n if pdata['exit_code'] == self.EXIT_STATUS_SUCCESS and pdata['stdout']:\n devices = json.loads(pdata['stdout'])\n self.parse_devices_spt(devices)\n\n message = \"Find SCSI Disk Drives\"\n # Selective drives or all direct access (disk drives).\n if self._drives:\n command = \"{tool} show edt dtype=direct,hostmanaged devices={drives}\"\\\n .format(tool=self.tool, drives=\",\".join(self._drives))\n else:\n command = \"{tool} show devices dtype=direct,hostmanaged\".format(tool=self.tool)\n # Apply optional parameters.\n if self.product_name:\n command += \" pid={product}\".format(product=self.product_name)\n if self.vendor_name:\n command += \" vid={vendor}\".format(vendor=self.vendor_name)\n if self.serial_number:\n command += \" serial={serial}\".format(serial=self.serial_number)\n if self.target_port:\n command += \" tport={target}\".format(target=self.target_port)\n if self.firmware_version:\n command += \" fw_version={firmware}\".format(firmware=self.firmware_version)\n\n # Add common spt options, we want JSON output!\n if self._exclude:\n command += \" exclude={drives}\".format(drives=\",\".join(self._exclude))\n command += \" ofmt=json\"\n # Finally, execute spt and parse its' JSON output (if any).\n pdata = self._run_command(command=command, message=message,\n logger=self._logger, shell=False, expected_failure=True)\n # spt emits warning status (1) and no JSON output if no devices found.\n if pdata['exit_code'] == self.EXIT_STATUS_SUCCESS and pdata['stdout']:\n devices = json.loads(pdata['stdout'])\n self.parse_devices_spt(devices)\n\n except RuntimeError as exc:\n self._logger.error(\"Failed to acquire SCSI devices: {0}\".format(exc))\n raise exc\n\n except ValueError as exc:\n self._logger.error(\"Failed to parse spts' JSON output: {0}\".format(exc))\n raise exc", "def dhcpservers(self, site_id, dhcpserver_id, data, tenant_id=None, api_version=\"v2.2\"):\n\n if tenant_id is None and self._parent_class.tenant_id:\n # Pull tenant_id from parent namespace cache.\n tenant_id = self._parent_class.tenant_id\n elif not tenant_id:\n # No value for tenant_id.\n raise TypeError(\"tenant_id is required but not set or cached.\")\n cur_ctlr = self._parent_class.controller\n\n url = str(cur_ctlr) + \"/{}/api/tenants/{}/sites/{}/dhcpservers/{}\".format(api_version,\n tenant_id,\n site_id,\n dhcpserver_id)\n\n api_logger.debug(\"URL = %s\", url)\n return self._parent_class.rest_call(url, \"put\", data=data)", "def createWIFIAccessPoint():\n ifname = config.get(\"interface\", \"wifi\")\n ipaddress = config.get(\"hotspot\", \"ip\")\n prefix = int(config.get(\"hotspot\", \"prefix\"))\n ssid = config.get(\"hotspot\", \"ssid\")\n password = config.get(\"hotspot\", \"password\")\n ################################\n s_wifi = dbus.Dictionary(\n {\n \"ssid\": dbus.ByteArray(ssid.encode(\"utf-8\")),\n \"mode\": \"ap\",\n })\n s_wsec = dbus.Dictionary(\n {\n \"key-mgmt\": \"wpa-psk\",\n \"psk\": password\n })\n s_con = dbus.Dictionary(\n {\"type\": \"802-11-wireless\",\n \"interface-name\":ifname ,\n \"uuid\": str(uuid.uuid4()),\n \"id\": ssid,\n \"autoconnect\":dbus.Boolean(True)\n })\n addr1 = dbus.Dictionary({\"address\": ipaddress, \"prefix\": dbus.UInt32(prefix)})\n dns = []\n s_ip4 = dbus.Dictionary(\n {\n \"address-data\": dbus.Array([addr1], signature=dbus.Signature(\"a{sv}\")),\n \"dns\": dbus.Array(dns, signature=dbus.Signature('u'), variant_level=1),\n \"method\": \"manual\",\n })\n s_ip6 = dbus.Dictionary({\"method\": \"ignore\"})\n con = dbus.Dictionary(\n {\n \"802-11-wireless\": s_wifi,\n \"802-11-wireless-security\":s_wsec,\n \"connection\": s_con,\n \"ipv4\": s_ip4,\n \"ipv6\": s_ip6\n })\n try:\n logging.info(\"Creating hotspot connection: {} - {}\".format(s_con[\"id\"], s_con[\"uuid\"]))\n ##########\n bus = dbus.SystemBus()\n proxy = bus.get_object(\n \"org.freedesktop.NetworkManager\", \"/org/freedesktop/NetworkManager/Settings\"\n )\n settings = dbus.Interface(proxy, \"org.freedesktop.NetworkManager.Settings\")\n connection = settings.AddConnection(con)\n logging.info(f\"Created access point connection {connection}\")\n except Exception as e:\n logging.error(\"Hotspot connection creation failed\")\n logging.error(e)", "async def test_aiodiscover_finds_new_hosts(hass: HomeAssistant) -> None:\n with patch.object(hass.config_entries.flow, \"async_init\") as mock_init, patch(\n \"homeassistant.components.dhcp.DiscoverHosts.async_discover\",\n return_value=[\n {\n dhcp.DISCOVERY_IP_ADDRESS: \"192.168.210.56\",\n dhcp.DISCOVERY_HOSTNAME: \"connect\",\n dhcp.DISCOVERY_MAC_ADDRESS: \"b8b7f16db533\",\n }\n ],\n ):\n device_tracker_watcher = dhcp.NetworkWatcher(\n hass,\n {},\n [{\"domain\": \"mock-domain\", \"hostname\": \"connect\", \"macaddress\": \"B8B7F1*\"}],\n )\n await device_tracker_watcher.async_start()\n await hass.async_block_till_done()\n await device_tracker_watcher.async_stop()\n await hass.async_block_till_done()\n\n assert len(mock_init.mock_calls) == 1\n assert mock_init.mock_calls[0][1][0] == \"mock-domain\"\n assert mock_init.mock_calls[0][2][\"context\"] == {\n \"source\": config_entries.SOURCE_DHCP\n }\n assert mock_init.mock_calls[0][2][\"data\"] == dhcp.DhcpServiceInfo(\n ip=\"192.168.210.56\",\n hostname=\"connect\",\n macaddress=\"b8b7f16db533\",\n )", "def web_data(self):\n return {'dhcp_passthrough': self.dhcp_passthrough}", "async def test_device_tracker_hostname_and_macaddress_exists_before_start(\n hass: HomeAssistant,\n) -> None:\n hass.states.async_set(\n \"device_tracker.august_connect\",\n STATE_HOME,\n {\n ATTR_HOST_NAME: \"Connect\",\n ATTR_IP: \"192.168.210.56\",\n ATTR_SOURCE_TYPE: SourceType.ROUTER,\n ATTR_MAC: \"B8:B7:F1:6D:B5:33\",\n },\n )\n\n with patch.object(hass.config_entries.flow, \"async_init\") as mock_init:\n device_tracker_watcher = dhcp.DeviceTrackerWatcher(\n hass,\n {},\n [{\"domain\": \"mock-domain\", \"hostname\": \"connect\", \"macaddress\": \"B8B7F1*\"}],\n )\n await device_tracker_watcher.async_start()\n await hass.async_block_till_done()\n await device_tracker_watcher.async_stop()\n await hass.async_block_till_done()\n\n assert len(mock_init.mock_calls) == 1\n assert mock_init.mock_calls[0][1][0] == \"mock-domain\"\n assert mock_init.mock_calls[0][2][\"context\"] == {\n \"source\": config_entries.SOURCE_DHCP\n }\n assert mock_init.mock_calls[0][2][\"data\"] == dhcp.DhcpServiceInfo(\n ip=\"192.168.210.56\",\n hostname=\"connect\",\n macaddress=\"b8b7f16db533\",\n )", "def dhcp_options_id(self):\n return self._dhcp_options_id", "def _read_opt_smf_dpd(self, code, *, desc):\n _type = self._read_opt_type(code)\n _size = self._read_unpack(1)\n _tidd = self._read_binary(1)\n\n if _tidd[0] == '0':\n _mode = 'I-DPD'\n _tidt = _TID_TYPE.get(_tidd[1:4], 'Unassigned')\n _tidl = int(_tidd[4:], base=2)\n\n if _tidt == _TID_TYPE.NULL:\n if _tidl != 0:\n raise ProtocolError(f'{self.alias}: [OptNo {code}] invalid format')\n _iden = self._read_fileng(_size-1)\n\n opt = dict(\n desc=desc,\n type=_type,\n length=_size + 2,\n dpd_type=_mode,\n tid_type=_tidt,\n tid_len=_tidl,\n id=_iden,\n )\n elif _tidt == _TID_TYPE.IPv4:\n if _tidl != 3:\n raise ProtocolError(f'{self.alias}: [OptNo {code}] invalid format')\n _tidf = self._read_fileng(4)\n _iden = self._read_fileng(_size-4)\n\n opt = dict(\n desc=desc,\n type=_type,\n length=_size + 2,\n dpd_type=_mode,\n tid_type=_tidt,\n tid_len=_tidl,\n tid=ipaddress.ip_address(_tidf),\n id=_iden,\n )\n elif _tidt == _TID_TYPE.IPv6:\n if _tidl != 15:\n raise ProtocolError(f'{self.alias}: [OptNo {code}] invalid format')\n _tidf = self._read_fileng(15)\n _iden = self._read_fileng(_size-15)\n\n opt = dict(\n desc=desc,\n type=_type,\n length=_size + 2,\n dpd_type=_mode,\n tid_type=_tidt,\n tid_len=_tidl,\n tid=ipaddress.ip_address(_tidf),\n id=_iden,\n )\n else:\n _tidf = self._read_unpack(_tidl+1)\n _iden = self._read_fileng(_size-_tidl-2)\n\n opt = dict(\n desc=desc,\n type=_type,\n length=_size + 2,\n dpd_type=_mode,\n tid_type=_tidt,\n tid_len=_tidl,\n tid=_tidf,\n id=_iden,\n )\n elif _tidd[0] == '1':\n _mode = 'H-DPD'\n _tidt = _TID_TYPE.get(_tidd[1:4])\n _data = self._read_binary(_size-1)\n\n opt = dict(\n desc=desc,\n type=_type,\n length=_size + 2,\n dpd_type=_mode,\n tid_type=_tidt,\n hav=_tidd[1:] + _data,\n )\n else:\n raise ProtocolError(f'{self.alias}: [OptNo {code}] invalid format')\n\n return opt", "def get_device_list_guest(ip_address, mac_address, selected_device, i_display_start, i_display_length, s_search, sEcho, sSortDir_0, iSortCol_0, userid, html_var={}):\n # This is a empty list variable used for storing the device list\n device_list = []\n master_slave_select = []\n master_slave = \"\"\n device_dict = {}\n device_type = selected_device\n if device_type == '' or device_type == None:\n device_type = 'odu'\n device_list_state = \"enabled\"\n global sqlalche_obj\n # try block starts\n try:\n # here we create the session of sqlalchemy\n\n # this is the query which returns the multidimensional array of hosts table and store in device_tuple\n# device_tuple = sqlalche_obj.session.query(Hosts.host_id,Hosts.host_alias,Hosts.ip_address,Hosts.mac_address,Hosts.device_type_id,Hosts.reconcile_health,Hosts.config_profile_id).\\\n# filter(and_(Hosts.is_deleted == 0,Hosts.ip_address.like('%s%%'%(ip_address)),\\\n# Hosts.mac_address.like('%s%%'%(mac_address)),Hosts.device_type_id.like('%s%%'%(device_type)),UsersGroups.user_id=='%s'%(userid),\\\n# UsersGroups.group_id==HostgroupsGroups.group_id,HostsHostgroups.hostgroup_id==HostgroupsGroups.hostgroup_id,Hosts.host_id==HostsHostgroups.host_id))\\\n# .order_by(Hosts.host_alias).order_by(Hosts.ip_address).all()\n\n device_dict = data_table_data_sqlalchemy(\n ip_address, mac_address, selected_device, i_display_start, i_display_length, s_search, sEcho, sSortDir_0, iSortCol_0, userid, html_var)\n\n device_tuple = device_dict[\"aaData\"]\n index = int(device_dict[\"i_display_start\"])\n\n sqlalche_obj.sql_alchemy_db_connection_open()\n device_status_host_id = \"\"\n #[36, \"172.22.0.111\", \"Default\", \"172.22.0.111\", \"FF:FF:FF:FF:FF:FF\", \" \", \"odu16\", 0, 304]\n # this loop create a mutildimesional list of host\n for i in range(0, len(device_tuple)):\n if device_tuple[i][6] == \"odu16\":\n master_slave_select = sqlalche_obj.session.query(\n GetOdu16_ru_conf_table.default_node_type).filter(GetOdu16_ru_conf_table.host_id == device_tuple[i][0]).all()\n if len(master_slave_select) > 0:\n if int(master_slave_select[0][0]) == 0 or int(master_slave_select[0][0]) == 2:\n slave_data = \"-\"\n master_slave = \"RM18 (Master)\"\n else:\n slave_data = \"\"\n master_host_id = sqlalche_obj.session.query(MasterSlaveLinking.master).filter(\n MasterSlaveLinking.slave == device_tuple[i][0]).all()\n if len(master_host_id) > 0:\n host_data = sqlalche_obj.session.query(Hosts.host_alias, Hosts.host_asset_id).filter(\n and_(Hosts.host_id == master_host_id[0][0], Hosts.is_deleted == 0)).all()\n host_alias = host_data[0].host_alias\n else:\n host_alias = \"\"\n## host_asset_data = sqlalche_obj.session.query(HostAssets.ra_mac).filter(HostAssets.host_asset_id==host_data[0].host_asset_id).all()\n# master_mac = str(host_asset_data[0].ra_mac if len(host_asset_data)>0\n# else \"\")\n peer_status = sqlalche_obj.session.query(GetOdu16PeerNodeStatusTable.sig_strength, GetOdu16PeerNodeStatusTable.link_status).\\\n filter(GetOdu16PeerNodeStatusTable.host_id == device_tuple[i][0]).order_by(\n desc(GetOdu16PeerNodeStatusTable.timestamp)).limit(1).all()\n if len(peer_status) > 0:\n if peer_status[0].sig_strength == None:\n slave_data = str(host_alias) + \" ( )\"\n elif int(peer_status[0].sig_strength) == 1111111:\n slave_data = str(\n host_alias) + \" (Device Unreachable)\"\n\n else:\n if peer_status[0].link_status == 1:\n slave_data = str(\n host_alias) + \"( Link Disconnected )\"\n else:\n slave_data = str(host_alias) + \" (\" + str(\n peer_status[0].sig_strength) + \"dBm)\"\n else:\n slave_data = str(host_alias) + \"(-)\"\n master_slave = \"RM18 (S)\"\n\n else:\n master_slave = \"Rm18(-)\"\n ru_data = sqlalche_obj.session.query(SetOdu16RUConfTable.adminstate).filter(\n SetOdu16RUConfTable.config_profile_id == device_tuple[i][8]).all()\n ra_data = sqlalche_obj.session.query(SetOdu16RAConfTable.raAdminState).filter(\n SetOdu16RAConfTable.config_profile_id == device_tuple[i][8]).all()\n sync_data = sqlalche_obj.session.query(SetOdu16SyncConfigTable.adminStatus).filter(\n SetOdu16SyncConfigTable.config_profile_id == device_tuple[i][8]).all()\n else:\n master_slave_select = sqlalche_obj.session.query(Odu100RuConfTable.defaultNodeType).filter(\n Odu100RuConfTable.config_profile_id == device_tuple[i][8]).all()\n if len(master_slave_select) > 0:\n if master_slave_select[0][0] == 0 or master_slave_select[0][0] == 2:\n slave_data = \"-\"\n master_slave = \"RM (Master)\"\n else:\n # slave_data = \"0\"\n slave_data = \"\"\n master_host_id = sqlalche_obj.session.query(MasterSlaveLinking.master).filter(\n MasterSlaveLinking.slave == device_tuple[i][0]).all()\n if len(master_host_id) > 0:\n host_data = sqlalche_obj.session.query(\n Hosts.host_alias, Hosts.host_asset_id).filter(Hosts.host_id == master_host_id[0][0]).all()\n host_alias = host_data[0].host_alias\n else:\n host_alias = \"\"\n## host_asset_data = sqlalche_obj.session.query(HostAssets.ra_mac).filter(HostAssets.host_asset_id==host_data[0].host_asset_id).all()\n## master_mac = str(host_asset_data[0].ra_mac if len(host_asset_data)>0 else \"\")+\",\"\n## peer_status = sqlalche_obj.session.query(Odu100PeerNodeStatusTable.sigStrength1).\\\n# filter(and_(Odu100PeerNodeStatusTable.host_id==device_tuple[i][0],or_(Odu100PeerNodeStatusTable.peerMacAddr==master_mac,Odu100PeerNodeStatusTable.sigStrength1==1))).order_by(desc(Odu100PeerNodeStatusTable.timestamp)).limit(1).all()\n peer_status = sqlalche_obj.session.query(Odu100PeerNodeStatusTable.sigStrength1, Odu100PeerNodeStatusTable.linkStatus).\\\n filter(Odu100PeerNodeStatusTable.host_id == device_tuple[i][0]\n ).order_by(desc(Odu100PeerNodeStatusTable.timestamp)).limit(1).all()\n if len(peer_status) > 0:\n if peer_status[0].sigStrength1 == None:\n slave_data = str(host_alias) + \"()\"\n elif int(peer_status[0].sigStrength1) == 1111111:\n slave_data = str(\n host_alias) + \" (Device Unreachable)\"\n else:\n if int(peer_status[0].linkStatus) == 1:\n slave_data = str(\n host_alias) + \" ( Link Disconnected )\"\n else:\n slave_data = str(host_alias) + \" (\" + str(\n peer_status[0].sigStrength1) + \"dBm)\"\n else:\n slave_data = str(host_alias) + \"(-)\"\n## else:\n## slave_data = \"(-)\"\n master_slave = \"RM (S)\"\n else:\n master_slave = \"RM(-)\"\n ru_data = sqlalche_obj.session.query(Odu100RuConfTable.adminstate).filter(\n Odu100RuConfTable.config_profile_id == device_tuple[i][8]).all()\n ra_data = sqlalche_obj.session.query(Odu100RaConfTable.raAdminState).filter(\n Odu100RaConfTable.config_profile_id == device_tuple[i][8]).all()\n sync_data = sqlalche_obj.session.query(Odu100SyncConfigTable.adminStatus).filter(\n Odu100SyncConfigTable.config_profile_id == device_tuple[i][8]).all()\n\n if len(ru_data) > 0:\n if ru_data[0][0] == None:\n ru_state = 1\n image_ru_title = \"RU State UnLocked\"\n image_ru_path = \"images/temp/green_dot.png\"\n else:\n if int(ru_data[0][0]) == 0:\n ru_state = 0\n image_ru_title = \"RU State Locked\"\n image_ru_path = \"images/temp/red_dot.png\"\n else:\n ru_state = 1\n image_ru_title = \"RU State UnLocked\"\n image_ru_path = \"images/temp/green_dot.png\"\n else:\n ru_state = 1\n image_ru_title = \"RU State UnLocked\"\n image_ru_path = \"images/temp/green_dot.png\"\n if len(ra_data) > 0:\n if ra_data[0][0] == None:\n ra_state = 1\n image_ra_title = \"RA State Unlocked\"\n image_ra_path = \"images/temp/green_dot.png\"\n else:\n if int(ra_data[0][0]) == 0:\n ra_state = 0\n image_ra_title = \"RA State Locked\"\n image_ra_path = \"images/temp/red_dot.png\"\n else:\n ra_state = 1\n image_ra_title = \"RA State Unlocked\"\n image_ra_path = \"images/temp/green_dot.png\"\n else:\n ra_state = 1\n image_ra_title = \"RA State Unlocked\"\n image_ra_path = \"images/temp/green_dot.png\"\n if len(sync_data) > 0:\n if sync_data[0][0] == None:\n sync_state = 1\n image_sync_title = \" SYNC State Unlocked\"\n image_sync_path = \"images/temp/green_dot.png\"\n else:\n if int(sync_data[0][0]) == 0:\n sync_state = 0\n image_sync_title = \"SYNC State Locked\"\n image_sync_path = \"images/temp/red_dot.png\"\n else:\n sync_state = 1\n image_sync_title = \" SYNC State Unlocked\"\n image_sync_path = \"images/temp/green_dot.png\"\n else:\n sync_state = 1\n image_sync_title = \" SYNC State Unlocked\"\n image_sync_path = \"images/temp/green_dot.png\"\n\n op_status = essential_obj.get_hoststatus(device_tuple[i][0])\n if op_status == None:\n op_img = \"images/host_status0.png\"\n op_title = host_status_dic[0]\n elif op_status == 0:\n op_img = \"images/host_status0.png\"\n op_title = host_status_dic[op_status]\n else:\n op_img = \"images/host_status1.png\"\n op_title = host_status_dic[op_status]\n\n if device_tuple[i][7] <= 35:\n images = 'images/new/r-red.png'\n elif device_tuple[i][7] <= 90:\n images = 'images/new/r-black.png'\n else:\n images = 'images/new/r-green.png'\n\n admin_dic = {'ru_admin': 1 if ru_data[0][0] == None else int(ru_data[0][0]) if len(ru_data) > 0 else 1,\n 'ra_admin': 1 if ra_data[0][0] == None else int(ra_data[0][0]) if len(ra_data) > 0 else 1,\n 'sync_admin': 1 if sync_data[0][0] == None else int(sync_data[0][0]) if len(sync_data) > 0 else 1}\n snmp_up_time_data = sqlalche_obj.db.execute(\n \"select trap_event_id from system_alarm_table where host_id='%s' order by timestamp desc limit 1\" % (device_tuple[i][0]))\n snmp_up_down_time = \"\"\n for row in snmp_up_time_data:\n snmp_up_down_time = row['trap_event_id']\n if snmp_up_down_time == \"\":\n device_status = \"Device Unreachable\"\n device_status_image_path = \"images/temp/red_dot.png\"\n elif int(snmp_up_down_time) == 50001:\n device_status = \"Device Unreachable\"\n device_status_image_path = \"images/temp/red_dot.png\"\n else:\n device_status = \"Device reachable\"\n device_status_image_path = \"images/temp/green_dot.png\"\n if i == len(device_tuple) - 1:\n device_status_host_id += str(device_tuple[i][0])\n else:\n device_status_host_id += str(device_tuple[i][0]) + \",\"\n\n monitoring_status = \"<a target=\\\"main\\\" href=\\\"%s?host_id=%s&device_type=%s&device_list_state=%s\\\"><img src=\\\"images/new/info.png\\\" style=\\\"width:16px;height:16px;\\\" title=\\\"Current Device Status\\\" class=\\\"imgbutton n-reconcile w-reconcile\\\"/></a>\" % ('sp_status_profiling.py',\n device_tuple[i][0], device_tuple[i][6], device_list_state) if device_tuple[i][6] == \"odu100\" else \"<img src=\\\"images/new/info1.png\\\" style=\\\"width:16px;height:16px;\\\" title=\\\"Current Device Status\\\" class=\\\"imgbutton n-reconcile\\\"/>\"\n live_monitoring = \"&nbsp;&nbsp;<a target=\\\"main\\\" href=\\\"live_monitoring.py?host_id=%s&device_type=%s\\\"><img src=\\\"images/new/star-empty.png\\\" title=\\\"Live Monitoring\\\" style=\\\"width:16px;height:16px;\\\" class=\\\"imgbutton n-reconcile\\\" /></a>\" % (device_tuple[i][0], device_tuple[i][6])\\\n if device_tuple[i][6] == \"odu100\" else \"&nbsp;&nbsp;<img src=\\\"images/new/star-empty.png\\\" title=\\\"Live Monitoring Not Available\\\" style=\\\"width:16px;height:16px;\\\" class=\\\"imgbutton n-reconcile\\\" />\"\n\n device_list.append(\n [\"<center><img id=\\\"device_status\\\" name=\\\"device_status\\\" src=\\\"%s\\\" title=\\\"%s\\\" style=\\\"width:8px;height:8px;\\\" class=\\\"imgbutton w-reconcile\\\"/></center>&nbsp;&nbsp;\" % (device_status_image_path, device_status), device_tuple[i][1], device_tuple[i][2], device_tuple[i][3], device_tuple[i][4], \"-\" if device_tuple[i][5] == \" \" else device_tuple[i][5], master_slave, slave_data,\n \"<ul class=\\\"button_group\\\" style=\\\"width:115px;\\\">\\\n <li>\\\n <a class=\\\"%s n-reconcile\\\" id=\\\"ru.ruConfTable.adminstate\\\" name=\\\"ru.ruConfTable.adminstate\\\" title=\\\"%s\\\" state=\\\"%s\\\" onclick=\\\"adminStateCheck(event,this,'%s','%s','ru.ruConfTable.adminstate')\\\">RU</a>\\\n </li>\\\n <li>\\\n <a class=\\\"%s n-reconcile\\\" id=\\\"ru.ra.raConfTable.raAdminState\\\" name=\\\"ru.ra.raConfTable.raAdminState\\\" title=\\\"%s\\\" state=\\\"%s\\\" onclick=\\\"adminStateCheck(event,this,'%s','%s','ru.ra.raConfTable.raAdminState')\\\">RA</a>\\\n </li>\\\n <li>\\\n <a class=\\\"%s n-reconcile\\\" id=\\\"ru.syncClock.syncConfigTable.adminStatus\\\" name=\\\"ru.syncClock.syncConfigTable.adminStatus\\\" title=\\\"%s\\\" state=\\\"%s\\\" onclick=\\\"adminStateCheck(event,this,'%s','%s','ru.syncClock.syncConfigTable.adminStatus')\\\">SYNC</a>\\\n </li>\\\n </ul>\"\n % (\n \"red\" if ru_state == 0 else \"green\", image_ru_title, ru_state, device_tuple[\n i][0], device_tuple[i][6],\n \"red\" if ra_state == 0 else \"green\", image_ra_title, ra_state, device_tuple[\n i][0], device_tuple[i][6],\n \"red\" if sync_state == 0 else \"green\", image_sync_title, sync_state, device_tuple[i][0], device_tuple[i][6]),\n \"<a target=\\\"main\\\">\\\n <img id=\\\"%s\\\" src=\\\"images/new/edit.png\\\" title=\\\"Edit Profile\\\" style=\\\"width:16px;height:16px;\\\" class=\\\"imgbutton n-reconcile\\\"/></a>&nbsp;\\\n <a target=\\\"main\\\" href=\\\"%s?host_id=%s&device_type=%s&device_list_state=%s\\\"><img src=\\\"images/new/graph.png\\\" style=\\\"width:16px;height:16px;\\\" title=\\\"Performance Monitoring\\\" class=\\\"imgbutton n-reconcile\\\"/></a>&nbsp;\\\n <a target=\\\"main\\\" href=\\\"status_snmptt.py?ip_address=%s-\\\"><img src=\\\"images/new/alert.png\\\" style=\\\"width:16px;height:16px;\\\" title=\\\"Device Alarm\\\" class=\\\"imgbutton n-reconcile\\\"/></a>&nbsp;\\\n <a target=\\\"main\\\"><img src=\\\"images/new/update.png\\\" title=\\\"Firmware Upgrade\\\" class=\\\"imgbutton n-reconcile\\\"/ ></a>&nbsp;\\\n <img src=\\\"%s\\\" title=\\\"Reconciliation %s%% Done\\\" style=\\\"width:16px;height:16px;\\\" class=\\\"imgbutton n-reconcile imgEditodu16\\\" state_rec=0\\\"/>\\\n %s&nbsp;%s\\\n %s\"\n % (\n device_tuple[i][0], 'sp_dashboard_profiling.py' if device_tuple[i][\n 6] == \"odu100\" else 'sp_dashboard_profiling.py',\n device_tuple[i][\n 0], device_tuple[\n i][\n 6], device_list_state, device_tuple[i][3],\n images, device_tuple[i][7],\n live_monitoring, monitoring_status,\n \"<input type=\\\"hidden\\\" value=\\\"%s\\\" name=\\\"host_id\\\" id=\\\"host_id\\\" />\" % (device_status_host_id) if i == len(device_tuple) - 1 else \"\"),\n \"<center><img id=\\\"operation_status\\\" name=\\\"operation_status\\\" src=\\\"%s\\\" title=\\\"%s\\\" style=\\\"width:12px;height:12px;\\\"class=\\\"imgbutton n-reconcile\\\"/></center>&nbsp;&nbsp;\" % (op_img, op_title)])\n\n device_dict[\"aaData\"] = device_list\n\n sqlalche_obj.sql_alchemy_db_connection_close()\n return device_dict\n # try block ends\n # href=\\\"javascript:apFormwareUpdate('%s','%s','%s');\n # exception starts\n except Exception as e:\n\n # return device_list\n sqlalche_obj.sql_alchemy_db_connection_close()\n output2 = {\n \"sEcho\": 1,\n \"iTotalRecords\": 10,\n \"iTotalDisplayRecords\": 10,\n \"aaData\": [],\n \"query\": str(e)\n }\n return output2\n finally:\n sqlalche_obj.sql_alchemy_db_connection_close()", "async def test_dhcp_match_macaddress_without_hostname(hass: HomeAssistant) -> None:\n integration_matchers = [{\"domain\": \"mock-domain\", \"macaddress\": \"606BBD*\"}]\n\n packet = Ether(RAW_DHCP_REQUEST_WITHOUT_HOSTNAME)\n\n async_handle_dhcp_packet = await _async_get_handle_dhcp_packet(\n hass, integration_matchers\n )\n with patch.object(hass.config_entries.flow, \"async_init\") as mock_init:\n await async_handle_dhcp_packet(packet)\n\n assert len(mock_init.mock_calls) == 1\n assert mock_init.mock_calls[0][1][0] == \"mock-domain\"\n assert mock_init.mock_calls[0][2][\"context\"] == {\n \"source\": config_entries.SOURCE_DHCP\n }\n assert mock_init.mock_calls[0][2][\"data\"] == dhcp.DhcpServiceInfo(\n ip=\"192.168.107.151\",\n hostname=\"\",\n macaddress=\"606bbd59e4b4\",\n )", "def configure_dmd(self) -> \"DmdPlatformInterface\":\n raise NotImplementedError", "def test_list_dhcp_agent_hosting_network(self):\n self.admin_networks_client.list_dhcp_agents_on_hosting_network(\n self.network['id'])", "def test_device_mgmt(self, gateway_with_devs):\n gateway_with_devs.restart('daq')\n assert gateway_with_devs.daq\n gateway_with_devs.remove('daq')\n with pytest.raises(AttributeError):\n gateway_with_devs.daq", "async def test_dhcp_match_macaddress(hass: HomeAssistant) -> None:\n integration_matchers = [{\"domain\": \"mock-domain\", \"macaddress\": \"B8B7F1*\"}]\n\n packet = Ether(RAW_DHCP_REQUEST)\n\n async_handle_dhcp_packet = await _async_get_handle_dhcp_packet(\n hass, integration_matchers\n )\n with patch.object(hass.config_entries.flow, \"async_init\") as mock_init:\n await async_handle_dhcp_packet(packet)\n\n assert len(mock_init.mock_calls) == 1\n assert mock_init.mock_calls[0][1][0] == \"mock-domain\"\n assert mock_init.mock_calls[0][2][\"context\"] == {\n \"source\": config_entries.SOURCE_DHCP\n }\n assert mock_init.mock_calls[0][2][\"data\"] == dhcp.DhcpServiceInfo(\n ip=\"192.168.210.56\",\n hostname=\"connect\",\n macaddress=\"b8b7f16db533\",\n )", "def preparePrivateAfter(self):\n\n self._log(\"prepare-private-after\").debug4(\"%s: interface preparePrivateAfter was called\", self.name)\n\n numServices = 0\n\n # managemnet\n if self.isManagementEnabled():\n numServices+=1\n\n if self.prepareManagement() != ReturnCodes.kOk:\n return ReturnCodes.kGeneralError\n\n # line\n if self.isLineEnabled():\n numServices+=1\n\n if self.prepareLine() != ReturnCodes.kOk:\n return ReturnCodes.kGeneralError\n\n # delivery\n if self.isDeliveryEnabled():\n numServices+=1\n\n if self.prepareDelivery() != ReturnCodes.kOk:\n return ReturnCodes.kGeneralError\n\n if numServices > 1:\n self._log(\"too-many-functions\").notice(\"Unsupported combination of functions for interface '%s'\", self.name)\n self.setConfigErrorStr(\"content and management combination is not supported\")\n return ReturnCodes.kGeneralError\n\n # in line after DPDK is up the eth device not exposed by Linux kernel\n if numServices==1 and not self.isLineEnabled():\n deviceName = self.deviceName()\n if ((deviceName is None) or (a.sys.net.lnx.device.DeviceUtils.isDeviceExists(deviceName) is False)):\n self._log(\"os-device-no-exist\").notice(\"Interface '%s' associated os device '%s' is invalid\", self.name, deviceName)\n self.setConfigErrorStr(\"Interface '%s': associated OS device '%s' is invalid\" % (self.name, deviceName))\n return ReturnCodes.kGeneralError\n\n # static configuration on mini platform\n if not self.allowDynamicConfig:\n if self.prepareForStaticCfg() != ReturnCodes.kOk:\n return ReturnCodes.kGeneralError\n\n # on success only\n if self.isDeliveryEnabled():\n self.tableid = (Interface.DELIVERY_TABLE_ID_PATTERN % self.ifIndex)\n elif ((self.isManagementEnabled()) and (self.candidateTechMode is False)):\n self.tableid = Interface.MANAGEMENT_TABLE_ID\n else:\n self.tableid = None\n\n self.isTrxStart = True\n return ReturnCodes.kOk", "async def test_dhcp_match_hostname(hass: HomeAssistant) -> None:\n integration_matchers = [{\"domain\": \"mock-domain\", \"hostname\": \"connect\"}]\n\n packet = Ether(RAW_DHCP_REQUEST)\n\n async_handle_dhcp_packet = await _async_get_handle_dhcp_packet(\n hass, integration_matchers\n )\n with patch.object(hass.config_entries.flow, \"async_init\") as mock_init:\n await async_handle_dhcp_packet(packet)\n\n assert len(mock_init.mock_calls) == 1\n assert mock_init.mock_calls[0][1][0] == \"mock-domain\"\n assert mock_init.mock_calls[0][2][\"context\"] == {\n \"source\": config_entries.SOURCE_DHCP\n }\n assert mock_init.mock_calls[0][2][\"data\"] == dhcp.DhcpServiceInfo(\n ip=\"192.168.210.56\",\n hostname=\"connect\",\n macaddress=\"b8b7f16db533\",\n )", "def get_configuration(self):\n\t\tdevice = DeviceBase(self.name)\n\n\t\tif len(self.master_url) > 0:\n\t\t\tdevice.master_url = self.master_url\n\t\t\tr = requests.get(self.master_url + '/configuration/' + self.name)\n\n\t\t\tif r.status_code == 200:\n\t\t\t\ttry:\n\t\t\t\t\t#Request success\n\t\t\t\t\tconfig = json.loads(r.text)\n\t\t\t\t\tif config['deviceType'] == 1:\n\t\t\t\t\t\t\"\"\" HID Reader \"\"\"\n\t\t\t\t\t\tdevice = HIDReader(self.name)\n\t\t\t\t\tif config['deviceType'] == 2:\n\t\t\t\t\t\t\"\"\" ZK45Reader \"\"\"\n\t\t\t\t\t\tdevice = ZK45Reader(self.name)\n\t\t\t\t\tif config['deviceType'] == 4:\n\t\t\t\t\t\t\"\"\" ZFM20Reader \"\"\"\n\t\t\t\t\t\tdevice = ZFM20Reader(self.name)\n\t\t\t\t\tif config['deviceType'] == 5:\n\t\t\t\t\t\t\"\"\" IOEcho \"\"\"\n\t\t\t\t\t\tdevice = IOEcho(name=self.name, pin_and_label_matrix='')\n\t\t\t\t\telif config['deviceType'] == 0:\n\t\t\t\t\t\t\"\"\" None \"\"\"\n\t\t\t\t\t\tdevice = DeviceBase(name=self.name)\n\t\t\t\t\telse:\n\t\t\t\t\t\t\"\"\" Disable \"\"\"\n\t\t\t\t\t\tdevice = DeviceBase(self.name)\n\n\t\t\t\t\tdevice.zone_id = config['zone']\n\n\t\t\t\t\tdevice.is_zone_enabled = config['enabled']\n\t\t\t\t\tdevice.is_zone_day_time_only = config['dayTimeOnly']\n\t\t\t\t\tdevice.is_configuration_loaded = True\n\n\t\t\t\t\tdevice.master_secret = config['secret']\n\t\t\t\t\tdevice.master_url = self.master_url\n\n\t\t\t\t\tdevice.is_in_error = False\n\t\t\t\t\tdevice.error_status = \"OK\"\n\t\t\t\t\tdevice.type = config['deviceType']\n\n\t\t\t\t\tprint(\"Configuration loaded.\")\n\t\t\t\texcept Exception as e:\n\t\t\t\t\terror_message = \"Device type not supported by current platform. Configuration aborted. (\" + str(e) + \")\"\n\t\t\t\t\tprint(error_message)\n\t\t\t\t\tdevice.zone_id = 1\n\n\t\t\t\t\tdevice.is_zone_enabled = False\n\t\t\t\t\tdevice.is_zone_day_time_only = False\n\t\t\t\t\tdevice.is_in_error = True\n\t\t\t\t\tdevice.error_status = error_message\n\t\t\telse:\n\t\t\t\tprint(\"Configuration loading failed. (Server response : \" + str(r.status_code) + \")\")\n\t\t\t\tdevice.zone_id = 1\n\t\t\t\tdevice.is_zone_enabled = False\n\t\t\t\tdevice.is_zone_day_time_only = False\n\t\t\t\tdevice.is_in_error = True\n\t\t\t\tdevice.error_status = \"Configuration loading failed. (Server response : \" + str(r.status_code) + \")\"\n\t\telse:\n\t\t\tself.zone_id = 1\n\t\t\tself.is_zone_enabled = True\n\t\t\tself.is_zone_day_time_only = True\n\t\t\tdevice.is_in_error = True\n\t\t\tdevice.error_status = \"No master URL defined\"\n\n\t\tdevice.report_state()\n\t\treturn device", "def verify_dhcpd_service_status(dut, process_id):\n st.log(\"Verifying DHCPD for {} \".format(process_id))\n dhcpd_pid = \"/run/dhcp-server/{}\".format(process_id)\n ps_aux = basic_obj.get_ps_aux(dut, dhcpd_pid, device=\"server\")\n st.log(ps_aux)\n config_string = \"\"\n if process_id == \"dhcpd6.pid\":\n config_string = \"-cf /etc/dhcp/dhcpd6.conf\"\n if process_id == \"dhcpd.pid\":\n config_string = \"-cf /etc/dhcp/dhcpd.conf\"\n st.log(\"Verifying the output with {}\".format(config_string))\n if config_string not in ps_aux:\n st.log(\"Required DHCPD service not found ...\")\n return False\n return True", "def Execute(self, client_ip, interface_name=None, interface_mac=None, csv=False, bash=False, client_user=sfdefaults.client_user, client_pass=sfdefaults.client_pass, debug=False):\n del self\n dhcp = Get(**locals())\n\n if dhcp:\n if bash or csv:\n sys.stdout.write(\"true\")\n sys.stdout.write(\"\\n\")\n sys.stdout.flush()\n else:\n mylog.info(\"DHCP is enabled\")\n else:\n if bash or csv:\n sys.stdout.write(\"false\")\n sys.stdout.write(\"\\n\")\n sys.stdout.flush()\n else:\n mylog.info(\"DHCP is not enabled\")\n return True", "def set_configure_with_dhcp(self, bConfigureWithDhcp):\n\t\tcall_sdk_function('PrlVmDevNet_SetConfigureWithDhcp', self.handle, bConfigureWithDhcp)", "def dhcp_renew(ifname):\n\n logging.debug('Renewing %s DHCP lease...', ifname)\n\n try:\n subprocess.call(['dhcpcd', '--rebind', ifname])\n except OSError, err:\n if err.errno != errno.ENOENT:\n raise err\n\n try:\n call_timeout(['dhclient', '-1', ifname], timeout=5)\n except OSError, err:\n if err.errno == errno.ENOENT:\n logging.critical('Neither dhcpcd nor dhclient were found.')\n else:\n raise err", "def Get(self, client_ip, interface_name=None, interface_mac=None, csv=False, bash=False, client_user=sfdefaults.client_user, client_pass=sfdefaults.client_pass, debug=False):\n\n self.ValidateArgs(locals())\n if debug:\n mylog.console.setLevel(logging.DEBUG)\n if bash or csv:\n mylog.silence = True\n\n client = SfClient()\n try:\n mylog.info(\"Connecting to \" + client_ip)\n client.Connect(client_ip, client_user, client_pass)\n mylog.info(\"Checking for DHCP\")\n dhcp = client.GetDhcpEnabled(interface_name, interface_mac)\n except ClientError as e:\n mylog.error(e.message)\n self.RaiseFailureEvent(message=str(e), clientIP=client_ip, exception=e)\n return False\n\n self.SetSharedValue(SharedValues.clientDHCPEnabled, dhcp)\n self.SetSharedValue(client_ip + \"-clientDHCPEnabled\", hostname)\n return dhcp", "def get_table_dhcp_relay(self, dhcp_relay_ipv6=False):\n pass", "def get_ddwrt_data(self):\n if self.protocol == 'http':\n if not self.hostname_cache:\n _LOGGER.debug('Getting hostnames')\n # get hostnames from dhcp leases\n url = 'http://{}/Status_Lan.live.asp'.format(self.host)\n data = self.http_connection(url)\n\n # no data received\n if data is None:\n _LOGGER.debug('No hostname data received')\n return None\n\n dhcp_leases = data.get('dhcp_leases', None)\n\n # parse and cache leases\n if dhcp_leases:\n _LOGGER.debug('Parsing http leases')\n self.hostname_cache = _parse_http_leases(dhcp_leases)\n\n _LOGGER.debug('Getting active clients')\n # get active wireless clients\n url = 'http://{}/Status_Wireless.live.asp'.format(self.host)\n data = self.http_connection(url)\n\n if data is None:\n _LOGGER.debug('No active clients received')\n return None\n\n _LOGGER.debug('Parsing http clients')\n return _parse_http_wireless(data.get('active_wireless', None))\n\n elif self.protocol == 'ssh':\n active_clients = []\n # when no cache get leases\n if not self.hostname_cache:\n host_data = self.ssh_connection(self.host,\n [_DDWRT_LEASES_CMD,\n self.ddwrt_cmd])\n _LOGGER.debug(\n 'host_cache_data: {0}'.format(str(host_data)))\n if not host_data:\n return None\n\n self.hostname_cache = {l.split(\",\")[0]: l.split(\",\")[1]\n for l in host_data[0]}\n active_clients = [mac.lower() for mac in host_data[1]]\n else:\n host_data = self.ssh_connection(self.host, [self.ddwrt_cmd])\n _LOGGER.debug('host_data: {0}'.format(str(host_data)))\n if host_data:\n active_clients = [mac.lower() for mac in host_data[0]]\n\n for ap in self.aps:\n ap_data = self.ssh_connection(ap, [self.ddwrt_cmd])\n _LOGGER.debug('ap_data: {0}'.format(str(ap_data)))\n if ap_data:\n active_clients.extend([m.lower() for m in ap_data[0]])\n\n return active_clients", "def test_get_device(self):\n pass", "def test_get_device(self):\n pass", "def create_dhcp_pool(options, vsm_obj, range, default_gateway):\n edge = Edge(vsm_obj, '4.0')\n edge_id = get_edge(vsm_obj)\n edge.id = edge_id\n\n dhcp_py_dict = {\n 'enabled': True,\n 'logging': {'loglevel': 'info', 'enable': False},\n 'ippools': [\n {\n 'autoconfiguredns': True,\n 'defaultGateway': default_gateway,\n 'iprange': range,\n }\n ],\n }\n dhcp_client = DHCP(edge)\n print(\"Creating dhcp ippool with range %s\" % range)\n dhcp_schema_object = dhcp_client.get_schema_object(dhcp_py_dict)\n existing_dhcp_schema = dhcp_client.read()\n if existing_dhcp_schema and existing_dhcp_schema.ipPools:\n print \"append dhcp ippool to existing list\"\n dhcp_schema_object.ipPools = existing_dhcp_schema.ipPools + \\\n dhcp_schema_object.ipPools\n result = dhcp_client.create(dhcp_schema_object)\n\n if (result[0].response.status != 204):\n r_vars = vars(result[0])\n print(\"Create IP Pool error: %s\" % result[0].response.reason)\n print ', '.join(\"%s: %s\" % item for item in r_vars.items())\n return False\n return True", "def disable_dhcp_helper(self, network_id):\n network = self.cache.get_network_by_id(network_id)\n if network:\n if self.call_driver('disable', network):\n self.cache.remove(network)", "def do(self):\n super().do()\n\n device = self.target\n device._scan_id = 0\n\n device._sdp_addresses = {\n \"outputHost\": [],\n \"outputMac\": [],\n \"outputPort\": [],\n }\n device._sdp_links_active = [\n False,\n ]\n device._sdp_output_data_rate = 0.0\n\n device._config_id = \"\"\n\n # JSON string, deliberately left in Tango layer\n device._last_scan_configuration = \"\"\n\n # _list_of_devices_completed_task: for each task/command reports\n # the list of the devices that successfully completed the task.\n # Implemented as a defualt dictionary:\n # keys: the command name in lower case (configurescan, assignresources, etc.)\n # values: the list of devices' FQDN\n device._list_of_devices_completed_task = defaultdict(list)\n\n # _cmd_progress: command execution's progress percentage\n # implemented as a default dictionary:\n # keys: the command name in lower case(configurescan,..)\n # values: the progress percentage (default 0)\n device._cmd_progress = defaultdict(int)\n\n # _cmd_maximun_duration: command execution's expected maximum duration (sec.)\n # implemented as a default dictionary:\n # keys: the command name in lower case(configurescan, assignresources,..)\n # values: the expected maximum duration in sec.\n device._cmd_maximum_duration = defaultdict(float)\n\n # _cmd_measure_duration: command execution's measured duration (sec.)\n # implemented as a default dictionary:\n # keys: the command name in lower case(configurescan, assignresources,..)\n # values: the measured execution time (sec.)\n device._cmd_measured_duration = defaultdict(float)\n\n # _timeout_expired: boolean flag to signal timeout during command execution.\n # To check and reset before a command execution.\n # keys: the command name in lower case(configurescan, assignresources,..)\n # values: True/False\n device._timeout_expired = defaultdict(bool)\n # configure the flags to push event from the device server\n device.set_change_event(\"configureScanTimeoutExpiredFlag\", True, True)\n device.set_archive_event(\"configureScanTimeoutExpiredFlag\", True, True)\n device.set_change_event(\"assignResourcesTimeoutExpiredFlag\", True, True)\n device.set_archive_event(\"assignResourcesTimeoutExpiredFlag\", True, True)\n device.set_change_event(\"releaseResourcesTimeoutExpiredFlag\", True, True)\n device.set_archive_event(\"releaseResourcesTimeoutExpiredFlag\", True, True)\n\n message = \"CspSubElementSubarray Init command completed OK\"\n device.logger.info(message)\n return (ResultCode.OK, message)", "def _validate_expose_in_dhcp_and_mac(self):\n from ralph.networks.models import IPAddress\n try:\n if not self.mac and self.ipaddress.dhcp_expose:\n raise ValidationError(\n _('MAC cannot be empty if record is exposed in DHCP')\n )\n except IPAddress.DoesNotExist:\n pass", "def __init__(self, busRestriction=0, devAddressRestriction=0, serialNumber=\"\"):\n self.handle = libcaer.caerDeviceOpen(1, libcaer.CAER_DEVICE_DAVIS, busRestriction, devAddressRestriction, serialNumber)\n self.info = libcaer.caerDavisInfoGet(self.handle)\n\n print(\"device ID: \" + str(libcaer.caer_davis_info_deviceID_get(self.info)))\n\n if (libcaer.caer_davis_info_deviceIsMaster_get(self.info)):\n print(\"device is Master\")\n else:\n print(\"device is Slave\")\n\n print(\"device Serial Number: \" + str(libcaer.caer_davis_info_deviceSerialNumber_get(self.info)))\n print(libcaer.caer_davis_info_deviceString_get(self.info))\n\n self.dvsSizeX = libcaer.caer_davis_info_dvsSizeX_get(self.info)\n self.dvsSizeY = libcaer.caer_davis_info_dvsSizeY_get(self.info)\n\n self.apsSizeX = libcaer.caer_davis_info_apsSizeX_get(self.info)\n self.apsSizeY = libcaer.caer_davis_info_apsSizeY_get(self.info)\n\n # init default biases\n ret = libcaer.caerDeviceSendDefaultConfig(self.handle)\n if(ret == True):\n print(\"Default biases loaded\")\n else:\n print(\"Error while loading default biases\")\n raise Exception\n\n # set blocking data exchange\n ret = libcaer.caerDeviceConfigSet(self.handle, libcaer.CAER_HOST_CONFIG_DATAEXCHANGE, libcaer.CAER_HOST_CONFIG_DATAEXCHANGE_BLOCKING, True)\n if(ret == True):\n print(\"Data exchange set to blocking mode\")\n else:\n print(\"Error in communicating with the device, please check your setup\")\n raise Exception\n\n # start data transfer from device\n ret = libcaer.caerDeviceDataStart(self.handle, None, None, None, None, None)\n if(ret == True):\n print(\"Data transfer started\")\n else:\n print(\"Error in starting data transfer\")\n raise Exception", "def get_device_list(ip_address, mac_address, selected_device, i_display_start, i_display_length, s_search, sEcho, sSortDir_0, iSortCol_0, userid=None, html_var={}): # ,sSortDir_0,iSortCol_0\n # This is a empty list variable used for storing the device list\n device_list = []\n master_slave_select = []\n master_slave = \"\"\n device_dict = {}\n device_type = selected_device\n l = 0\n if device_type == '' or device_type == None:\n device_type = 'odu'\n device_list_state = \"enabled\"\n global sqlalche_obj\n # try block starts\n try:\n # here we create the session of sqlalchemy\n\n # this is the query which returns the multidimensional array of hosts table and store in device_tuple\n# device_tuple = sqlalche_obj.session.query(Hosts.host_id,Hosts.host_alias,Hosts.ip_address,Hosts.mac_address,Hosts.device_type_id,Hosts.reconcile_health,Hosts.config_profile_id).\\\n# filter(and_(Hosts.is_deleted == 0,Hosts.ip_address.like('%s%%'%(ip_address)),\\\n# Hosts.mac_address.like('%s%%'%(mac_address)),Hosts.device_type_id.like('%s%%'%(device_type)),UsersGroups.user_id=='%s'%(userid),\\\n# UsersGroups.group_id==HostgroupsGroups.group_id,HostsHostgroups.hostgroup_id==HostgroupsGroups.hostgroup_id,Hosts.host_id==HostsHostgroups.host_id))\\\n# .order_by(Hosts.host_alias).order_by(Hosts.ip_address).all()\n\n device_dict = data_table_data_sqlalchemy(\n ip_address, mac_address, selected_device, i_display_start, i_display_length, s_search, sEcho, sSortDir_0, iSortCol_0, userid, html_var)\n # return device_dict\n device_tuple = device_dict[\"aaData\"]\n index = int(device_dict[\"i_display_start\"])\n sqlalche_obj.sql_alchemy_db_connection_open()\n device_status_host_id = \"\"\n global host_status_dic\n global essential_obj\n slave_data = \"-\"\n op_img = \"images/host_status0.png\"\n op_title = host_status_dic[0]\n ru_op_state = 1\n ra_op_state = 1\n sync_op_state = 1\n #[36, \"172.22.0.111\", \"Default\", \"172.22.0.111\", \"FF:FF:FF:FF:FF:FF\", \" \", \"odu16\", 0, 304]\n # this loop create a mutildimesional list of host\n for i in range(0, len(device_tuple)):\n if device_tuple[i][6] == \"odu16\":\n master_slave_select = sqlalche_obj.session.query(\n GetOdu16_ru_conf_table.default_node_type).filter(GetOdu16_ru_conf_table.host_id == device_tuple[i][0]).all()\n if len(master_slave_select) > 0:\n if int(master_slave_select[0][0]) == 0 or int(master_slave_select[0][0]) == 2:\n slave_data = \"-\"\n master_slave = \"RM18 (M)\"\n else:\n slave_data = \"\"\n host_alias = \"\"\n\n master_host_id = sqlalche_obj.session.query(MasterSlaveLinking.master, Hosts.host_alias, Hosts.host_asset_id).\\\n outerjoin(Hosts, MasterSlaveLinking.slave == Hosts.host_id).filter(\n MasterSlaveLinking.slave == device_tuple[i][0]).all()\n\n # master_host_id =\n # sqlalche_obj.session.query(MasterSlaveLinking.master).filter(MasterSlaveLinking.slave\n # == device_tuple[i][0]).all()\n if len(master_host_id) > 0:\n # host_data =\n # sqlalche_obj.session.query(Hosts.host_alias,Hosts.host_asset_id).filter(and_(Hosts.host_id==master_host_id[0][0],Hosts.is_deleted==0)).all()\n host_alias = master_host_id[0].host_alias\n # else:\n # host_alias = \"\"\n## host_asset_data = sqlalche_obj.session.query(HostAssets.ra_mac).filter(HostAssets.host_asset_id==host_data[0].host_asset_id).all()\n# master_mac = str(host_asset_data[0].ra_mac if len(host_asset_data)>0\n# else \"\")\n\n peer_status = sqlalche_obj.session.query(GetOdu16PeerNodeStatusTable.sig_strength, GetOdu16PeerNodeStatusTable.link_status).\\\n filter(GetOdu16PeerNodeStatusTable.host_id == device_tuple[i][0]).\\\n order_by(desc(GetOdu16PeerNodeStatusTable.get_odu16_peer_node_status_table_id)\n ).limit(1).all()\n if len(peer_status) > 0:\n if peer_status[0].sig_strength == None:\n slave_data = str(host_alias) + \" ( )\"\n elif int(peer_status[0].sig_strength) == 1111111:\n slave_data = str(\n host_alias) + \" (Device Unreachable)\"\n\n else:\n if peer_status[0].link_status == 1:\n slave_data = str(\n host_alias) + \"( Link Disconnected )\"\n else:\n slave_data = str(host_alias) + \" (\" + str(\n peer_status[0].sig_strength) + \"dBm)\"\n else:\n if host_alias != \"\" and host_alias != None:\n slave_data = str(host_alias) + \"(-)\"\n master_slave = \"RM18 (S)\"\n else:\n master_slave = \"RM18 (-)\"\n\n ru_data = sqlalche_obj.session.query(Odu100RuConfTable.adminstate, Odu100RaConfTable.raAdminState, Odu100SyncConfigTable.adminStatus).\\\n outerjoin(Odu100RaConfTable, Odu100RuConfTable.config_profile_id == Odu100RaConfTable.config_profile_id).\\\n outerjoin(Odu100SyncConfigTable, Odu100RuConfTable.config_profile_id == Odu100SyncConfigTable.config_profile_id).\\\n filter(\n Odu100RuConfTable.config_profile_id == device_tuple[i][8]).all()\n# ru_data = sqlalche_obj.session.query(SetOdu16RUConfTable.adminstate).filter(SetOdu16RUConfTable.config_profile_id==device_tuple[i][8]).all()\n# ra_data = sqlalche_obj.session.query(SetOdu16RAConfTable.raAdminState).filter(SetOdu16RAConfTable.config_profile_id==device_tuple[i][8]).all()\n# sync_data =\n# sqlalche_obj.session.query(SetOdu16SyncConfigTable.adminStatus).filter(SetOdu16SyncConfigTable.config_profile_id==device_tuple[i][8]).all()\n\n if len(ru_data) > 0 and ru_data[0][0] != None and int(ru_data[0][0]) != 0:\n ru_state = 0\n image_ru_title = \"RU State Locked\"\n\n else:\n ru_state = 1\n image_ru_title = \"RU State UnLocked\"\n\n if len(ru_data) > 0 and ru_data[0][0] != None and int(ru_data[0][0]) != 0:\n ra_state = 0\n image_ra_title = \"RA State Locked\"\n else:\n ra_state = 1\n image_ra_title = \"RA State Unlocked\"\n\n if len(ru_data) > 0 and ru_data[0][0] != None and int(ru_data[0][0]) != 0:\n sync_state = 0\n image_sync_title = \" SYNC State Locked\"\n else:\n sync_state = 0\n image_sync_title = \" SYNC State Unlocked\"\n\n else:\n master_slave_select = sqlalche_obj.session.query(Odu100RuConfTable.defaultNodeType).filter(\n Odu100RuConfTable.config_profile_id == device_tuple[i][8]).all()\n if len(master_slave_select) > 0:\n if master_slave_select[0][0] == 0 or master_slave_select[0][0] == 2:\n slave_data = \"-\"\n master_slave = \"RM (M)\"\n else:\n # slave_data = \"0\"\n slave_data = \"\"\n host_alias = \"\"\n\n # master_host_id =\n # sqlalche_obj.session.query(MasterSlaveLinking.master).filter(MasterSlaveLinking.slave\n # == device_tuple[i][0]).all()\n\n master_host_id = sqlalche_obj.session.query(MasterSlaveLinking.master, Hosts.host_alias, Hosts.host_asset_id).\\\n outerjoin(Hosts, MasterSlaveLinking.slave == Hosts.host_id).filter(\n MasterSlaveLinking.slave == device_tuple[i][0]).all()\n\n if len(master_host_id) > 0:\n # host_data =\n # sqlalche_obj.session.query(Hosts.host_alias,Hosts.host_asset_id).filter(Hosts.host_id==master_host_id[0][0]).all()\n host_alias = master_host_id[0].host_alias\n # else: # commented from raju\n # host_alias = \"\" # commented from raju\n\n## host_asset_data = sqlalche_obj.session.query(HostAssets.ra_mac).filter(HostAssets.host_asset_id==host_data[0].host_asset_id).all()\n## master_mac = str(host_asset_data[0].ra_mac if len(host_asset_data)>0 else \"\")+\",\"\n## peer_status = sqlalche_obj.session.query(Odu100PeerNodeStatusTable.sigStrength1).\\\n# filter(and_(Odu100PeerNodeStatusTable.host_id==device_tuple[i][0],or_(Odu100PeerNodeStatusTable.peerMacAddr==master_mac,Odu100PeerNodeStatusTable.sigStrength1==1))).order_by(desc(Odu100PeerNodeStatusTable.timestamp)).limit(1).all()\n\n peer_status = sqlalche_obj.session.query(Odu100PeerNodeStatusTable.sigStrength1, Odu100PeerNodeStatusTable.linkStatus).\\\n filter(Odu100PeerNodeStatusTable.host_id == device_tuple[i][0]).order_by(desc(Odu100PeerNodeStatusTable.odu100_peerNodeStatusTable_id)).\\\n limit(1).all()\n\n# peer_status = sqlalche_obj.session.query(Odu100PeerNodeStatusTable.sigStrength1,Odu100PeerNodeStatusTable.linkStatus).\\\n# filter(Odu100PeerNodeStatusTable.host_id==device_tuple[i][0]).order_by(desc(Odu100PeerNodeStatusTable.timestamp)).limit(1).all()\n\n if len(peer_status) > 0:\n if peer_status[0].sigStrength1 == None:\n slave_data = str(host_alias) + \"()\"\n elif int(peer_status[0].sigStrength1) == 1111111:\n slave_data = str(\n host_alias) + \" (Device Unreachable)\"\n elif int(peer_status[0].sigStrength1) == 1:\n slave_data = str(\n host_alias) + \" (Device Unreachable)\"\n else:\n if int(peer_status[0].linkStatus) == 1:\n slave_data = str(\n host_alias) + \" ( Link Disconnected )\"\n else:\n slave_data = str(host_alias) + \" (\" + str(\n peer_status[0].sigStrength1) + \"dBm)\"\n else:\n if host_alias != \"\" and host_alias != None:\n slave_data = str(host_alias) + \"(-)\"\n## else:\n## slave_data = \"(-)\"\n master_slave = \"RM (S)\"\n else:\n master_slave = \"RM (-)\"\n\n ru_data = sqlalche_obj.session.query(Odu100RuConfTable.adminstate, Odu100RaConfTable.raAdminState, Odu100SyncConfigTable.adminStatus).\\\n outerjoin(Odu100RaConfTable, Odu100RuConfTable.config_profile_id == Odu100RaConfTable.config_profile_id).\\\n outerjoin(Odu100SyncConfigTable, Odu100RuConfTable.config_profile_id == Odu100SyncConfigTable.config_profile_id).\\\n filter(\n Odu100RuConfTable.config_profile_id == device_tuple[i][8]).all()\n\n# ru_data = sqlalche_obj.session.query(Odu100RuConfTable.adminstate).filter(Odu100RuConfTable.config_profile_id==device_tuple[i][8]).all()\n# ra_data = sqlalche_obj.session.query(Odu100RaConfTable.raAdminState).filter(Odu100RaConfTable.config_profile_id==device_tuple[i][8]).all()\n# sync_data =\n# sqlalche_obj.session.query(Odu100SyncConfigTable.adminStatus).filter(Odu100SyncConfigTable.config_profile_id==device_tuple[i][8]).all()\n\n # host_count =\n # session.query(Hosts).filter(and_(or_(Hosts.host_alias ==\n # host_alias,Hosts.ip_address == ip_address, Hosts.mac_address ==\n # mac_address),Hosts.is_deleted == 0)).count()\n\n ru_status = sqlalche_obj.session.query(Odu100RuStatusTable.ruoperationalState).filter(\n Odu100RuStatusTable.host_id == device_tuple[i][0]).all()\n ra_status = sqlalche_obj.session.query(Odu100RaStatusTable.raoperationalState).filter(\n Odu100RaStatusTable.host_id == device_tuple[i][0]).order_by(desc(Odu100RaStatusTable.timestamp)).all()\n sync_status = sqlalche_obj.session.query(Odu100SynchStatusTable.syncoperationalState).filter(\n Odu100SynchStatusTable.host_id == device_tuple[i][0]).order_by(desc(Odu100SynchStatusTable.timestamp)).all()\n\n if len(ru_status) > 0 and ru_status[0].ruoperationalState != None:\n ru_op_state = ru_status[0].ruoperationalState\n else:\n ru_op_state = 1\n\n if len(ra_status) > 0 and ra_status[0].raoperationalState != None:\n ra_op_state = ra_status[0].raoperationalState\n else:\n ra_op_state = 1\n\n if len(sync_status) > 0 and sync_status[0].syncoperationalState == None:\n sync_op_state = sync_status[0].syncoperationalState\n else:\n sync_op_state = 1\n\n if len(ru_data) > 0:\n if ru_data[0][0] != None and (int(ru_data[0][0]) == 0 or int(ru_op_state) == 0):\n ru_state = 0\n image_ru_title = \"RU State Locked\"\n else:\n ru_state = 1\n image_ru_title = \"RU State UnLocked\"\n\n if ru_data[0][1] != None and (int(ru_data[0][1]) == 0 or int(ra_op_state) == 0):\n ra_state = 0\n image_ra_title = \"RA State Locked\"\n else:\n ra_state = 1\n image_ra_title = \"RA State UnLocked\"\n\n if ru_data[0][2] != None and (int(ru_data[0][2]) == 0 or int(sync_op_state) == 0):\n sync_state = 0\n image_sync_title = \"SYNC State Locked\"\n else:\n sync_state = 1\n image_sync_title = \"SYNC State UnLocked\"\n else:\n ru_state = 1\n image_ru_title = \"RU State UnLocked\"\n\n ra_state = 1\n image_ra_title = \"RA State UnLocked\"\n\n sync_state = 1\n image_sync_title = \"SYNC State UnLocked\"\n\n# if device_tuple[i][6] == \"odu100\":\n# if len(ru_data)>0:\n# if ru_data[0][0]==None:\n# ru_state = 1\n# image_ru_title = \"RU State UnLocked\"\n# else:\n# if int(ru_data[0][0])==0:\n# ru_state = 0\n# image_ru_title = \"RU State Locked\"\n# else:\n# if int(ru_op_state)==0:\n# ru_state = 0\n# image_ru_title = \"RU State UnLocked\"\n# else:\n# ru_state = 1\n# image_ru_title = \"RU State UnLocked\"\n# else:\n# ru_state = 1\n# image_ru_title = \"RU State UnLocked\"\n#\n# if len(ru_data)>0:\n# if ru_data[0][1]==None:\n# ra_state = 1\n# image_ra_title = \"RA State Unlocked\"\n#\n# else:\n# if int(ru_data[0][1]) == 0:\n# ra_state = 0\n# image_ra_title = \"RA State Locked\"\n#\n# else:\n# if int(ra_op_state)==0:\n# ra_state = 0\n# image_ra_title = \"RA State Unlocked\"\n# else:\n# ra_state = 1\n# image_ra_title = \"RA State Unlocked\"\n# else:\n# ra_state = 1\n# image_ra_title = \"RA State Unlocked\"\n#\n# if len(ru_data)>0:\n# if ru_data[0][2]==None:\n# sync_state = 1\n# image_sync_title = \" SYNC State Unlocked\"\n#\n# else:\n# if int(ru_data[0][2]) == 0:\n# sync_state = 0\n# image_sync_title = \"SYNC State Locked\"\n#\n# else:\n# if int(sync_op_state)==0:\n# sync_state = 0\n# image_sync_title = \" SYNC State Unlocked\"\n# else:\n# sync_state = 1\n# image_sync_title = \" SYNC State Unlocked\"\n# else:\n# sync_state = 1\n# image_sync_title = \" SYNC State Unlocked\"\n# else:\n# if len(ru_data)>0:\n# if ru_data[0][0]==None:\n# ru_state = 1\n# image_ru_title = \"RU State UnLocked\"\n#\n# else:\n# if int(ru_data[0][0])==0:\n# ru_state = 0\n# image_ru_title = \"RU State Locked\"\n#\n# else:\n# ru_state = 1\n# image_ru_title = \"RU State UnLocked\"\n#\n# else:\n# ru_state = 1\n# image_ru_title = \"RU State UnLocked\"\n#\n# if len(ru_data)>0:\n# if ru_data[0][1]==None:\n# ra_state = 1\n# image_ra_title = \"RA State Unlocked\"\n#\n# else:\n# if int(ru_data[0][1]) == 0:\n# ra_state = 0\n# image_ra_title = \"RA State Locked\"\n#\n# else:\n# ra_state = 1\n# image_ra_title = \"RA State Unlocked\"\n#\n# else:\n# ra_state = 1\n# image_ra_title = \"RA State Unlocked\"\n#\n# if len(ru_data)>0:\n# if ru_data[0][2]==None:\n# sync_state = 1\n# image_sync_title = \" SYNC State Unlocked\"\n#\n# else:\n# if int(ru_data[0][2]) == 0:\n# sync_state = 0\n# image_sync_title = \"SYNC State Locked\"\n#\n# else:\n# sync_state = 1\n# image_sync_title = \" SYNC State Unlocked\"\n#\n# else:\n# sync_state = 1\n# image_sync_title = \" SYNC State Unlocked\"\n if device_tuple[i][7] <= 35:\n images = 'images/new/r-red.png'\n elif device_tuple[i][7] <= 90:\n images = 'images/new/r-black.png'\n else:\n images = 'images/new/r-green.png'\n\n## admin_dic = {'ru_admin':1 if ru_data[0][0]==None else int(ru_data[0][0]) if len(ru_data)>0 else 1,\\\n## 'ra_admin':1 if ra_data[0][0]==None else int(ra_data[0][0]) if len(ra_data)>0 else 1,\\\n## 'sync_admin':1 if sync_data[0][0]==None else int(sync_data[0][0]) if len(sync_data)>0 else 1}\n\n snmp_up_time_data = sqlalche_obj.db.execute(\n \"select trap_event_id,timestamp from system_alarm_table where host_id='%s' order by timestamp desc limit 1\" % (device_tuple[i][0]))\n snmp_up_down_time = \"\"\n for row in snmp_up_time_data:\n snmp_up_down_time = row['trap_event_id']\n timer_val = datetime.strftime(\n row['timestamp'], \"%d-%b-%Y %a %I:%M:%S %p\")\n\n if snmp_up_down_time == '50001':\n device_status = \"Device Unreachable since \" + str(timer_val)\n device_status_image_path = \"images/temp/red_dot.png\"\n else:\n device_status = \"Device Reachable\"\n device_status_image_path = \"images/temp/green_dot.png\"\n\n# if snmp_up_down_time==\"\":\n# device_status = \"Device Reachable\"\n# device_status_image_path = \"images/temp/green_dot.png\"\n# elif int(snmp_up_down_time) == 50001:\n# device_status = \"Device Unreachable since \"+str(timer_val)\n# device_status_image_path = \"images/temp/red_dot.png\"\n# else:\n# device_status = \"Device Reachable\"\n# device_status_image_path = \"images/temp/green_dot.png\"\n op_status = essential_obj.get_hoststatus(device_tuple[i][0])\n if op_status == None:\n op_img = \"images/host_status0.png\"\n op_title = host_status_dic[0]\n elif op_status == 0:\n op_img = \"images/host_status0.png\"\n op_title = host_status_dic[op_status]\n else:\n op_img = \"images/host_status1.png\"\n op_title = host_status_dic[op_status]\n\n if i == len(device_tuple) - 1:\n device_status_host_id += str(device_tuple[i][0])\n else:\n device_status_host_id += str(device_tuple[i][0]) + \",\"\n\n monitoring_status = \"<a target=\\\"main\\\" href=\\\"%s?host_id=%s&device_type=%s&device_list_state=%s\\\"><img src=\\\"images/new/info.png\\\" style=\\\"width:16px;height:16px;\\\" title=\\\"Current Device Status\\\" class=\\\"imgbutton n-reconcile w-reconcile\\\"/></a>\" % ('sp_status_profiling.py',\n\n device_tuple[i][0], device_tuple[i][6], device_list_state) if device_tuple[i][6] == \"odu100\" else \"<img src=\\\"images/new/info1.png\\\" style=\\\"width:16px;height:16px;\\\" title=\\\"Current Device Status\\\" class=\\\"imgbutton n-reconcile\\\"/>\"\n\n live_monitoring = \"&nbsp;&nbsp;<a target=\\\"main\\\" href=\\\"live_monitoring.py?host_id=%s&device_type=%s\\\"><img src=\\\"images/new/star-empty.png\\\" title=\\\"Live Monitoring\\\" style=\\\"width:16px;height:16px;\\\" class=\\\"imgbutton n-reconcile\\\" /></a>\" % (device_tuple[i][0], device_tuple[i][6])\\\n if device_tuple[i][6] == \"odu100\" else \"&nbsp;&nbsp;<img src=\\\"images/new/star-empty.png\\\" title=\\\"Live Monitoring Not Available\\\" style=\\\"width:16px;height:16px;\\\" class=\\\"imgbutton n-reconcile\\\" />\"\n\n device_list.append(\n [\"<center><img id=\\\"device_status\\\" name=\\\"device_status\\\" src=\\\"%s\\\" title=\\\"%s\\\" style=\\\"width:8px;height:8px;\\\" class=\\\"imgbutton w-reconcile\\\" original-title=\\\"%s\\\" /></center>&nbsp;&nbsp;\" % (device_status_image_path, device_status, device_status), device_tuple[i][1], device_tuple[i][2], device_tuple[i][3], device_tuple[i][4], \"-\" if device_tuple[i][5] == \" \" else device_tuple[i][5], master_slave, slave_data,\n \"<ul class=\\\"button_group\\\" style=\\\"width:80px;\\\">\\\n <li>\\\n <a class=\\\"%s n-reconcile\\\" id=\\\"ru.ruConfTable.adminstate\\\" name=\\\"ru.ruConfTable.adminstate\\\" title=\\\"%s\\\" state=\\\"%s\\\" onclick=\\\"adminStateCheck(event,this,'%s','%s','ru.ruConfTable.adminstate')\\\">RU</a>\\\n </li>\\\n <li>\\\n <a class=\\\"%s n-reconcile\\\" id=\\\"ru.syncClock.syncConfigTable.adminStatus\\\" name=\\\"ru.syncClock.syncConfigTable.adminStatus\\\" title=\\\"%s\\\" state=\\\"%s\\\" onclick=\\\"adminStateCheck(event,this,'%s','%s','ru.syncClock.syncConfigTable.adminStatus')\\\">SYN</a>\\\n </li>\\\n <li>\\\n <a class=\\\"%s n-reconcile\\\" id=\\\"ru.ra.raConfTable.raAdminState\\\" name=\\\"ru.ra.raConfTable.raAdminState\\\" title=\\\"%s\\\" state=\\\"%s\\\" onclick=\\\"adminStateCheck(event,this,'%s','%s','ru.ra.raConfTable.raAdminState')\\\">RA</a>\\\n </li>\\\n </ul>\"\n % (\n \"red\" if ru_state == 0 else \"green\", image_ru_title, ru_state, device_tuple[\n i][0], device_tuple[i][6],\n \"red\" if sync_state == 0 else \"green\", image_sync_title, sync_state, device_tuple[\n i][0], device_tuple[i][6],\n \"red\" if ra_state == 0 else \"green\", image_ra_title, ra_state, device_tuple[i][0], device_tuple[i][6]),\n \"<a target=\\\"main\\\" href=\\\"odu_profiling.py?host_id=%s&device_type=%s&device_list_state=%s\\\">\\\n <img id=\\\"%s\\\" src=\\\"images/new/edit.png\\\" title=\\\"Edit Profile\\\" style=\\\"width:16px;height:16px;\\\" class=\\\"imgbutton n-reconcile\\\"/></a>&nbsp;&nbsp;\\\n <a target=\\\"main\\\" href=\\\"%s?host_id=%s&device_type=%s&device_list_state=%s\\\"><img src=\\\"images/new/graph.png\\\" style=\\\"width:16px;height:16px;\\\" title=\\\"Performance Monitoring\\\" class=\\\"imgbutton n-reconcile\\\"/></a>&nbsp;\\\n <a target=\\\"main\\\" href=\\\"status_snmptt.py?ip_address=%s-\\\"><img src=\\\"images/new/alert.png\\\" style=\\\"width:16px;height:16px;\\\" title=\\\"Device Alarm\\\" class=\\\"imgbutton n-reconcile\\\"/></a>&nbsp;\\\n <a target=\\\"main\\\" href=\\\"javascript:apFormwareUpdate('%s','%s','%s');\\\"><img src=\\\"images/new/update.png\\\" title=\\\"Firmware Upgrade\\\" class=\\\"imgbutton n-reconcile\\\"/ ></a>&nbsp;\\\n <img src=\\\"%s\\\" title=\\\"Reconciliation %s%% Done\\\" style=\\\"width:16px;height:16px;\\\" class=\\\"imgbutton n-reconcile imgEditodu16\\\" onclick=\\\"imgReconcile(this,'%s','%s','odu16_','imgodu16'); state_rec=0\\\"/>\\\n %s&nbsp;%s&nbsp;\\\n <img src=\\\"images/new/cong_download.png\\\" title=\\\"Configuration Download \\\" class=\\\"imgbutton n-reconcile\\\" onclick=\\\"cnfigurationReportDownload(%s);\\\" />\\\n %s\"\n % (\n device_tuple[\n i][\n 0], device_tuple[\n i][6], device_list_state,\n device_tuple[i][0], 'sp_dashboard_profiling.py' if device_tuple[i][\n 6] == \"odu100\" else 'sp_dashboard_profiling.py',\n device_tuple[i][\n 0], device_tuple[\n i][\n 6], device_list_state, device_tuple[i][3],\n device_tuple[i][0], device_tuple[i][\n 6], device_list_state, images, device_tuple[i][7], device_tuple[i][0], device_tuple[i][6],\n live_monitoring, monitoring_status, device_tuple[\n i][0],\n \"<input type=\\\"hidden\\\" value=\\\"%s\\\" name=\\\"host_id\\\" id=\\\"host_id\\\" />\" % (device_status_host_id) if i == len(device_tuple) - 1 else \"\"), \"<center><img id=\\\"operation_status\\\" name=\\\"operation_status\\\" src=\\\"%s\\\" title=\\\"%s\\\" style=\\\"width:12px;height:12px;\\\"class=\\\"imgbutton n-reconcile\\\" original-title=\\\"%s\\\"/></center>&nbsp;&nbsp;\" % (op_img, op_title, op_title)])\n\n device_dict[\"aaData\"] = device_list\n sqlalche_obj.sql_alchemy_db_connection_close()\n return device_dict\n # try block ends\n # href=\\\"javascript:apFormwareUpdate('%s','%s','%s');\n # exception starts\n except Exception as e:\n\n # return device_list\n sqlalche_obj.sql_alchemy_db_connection_close()\n output2 = {\n \"sEcho\": 1,\n \"iTotalRecords\": 10,\n \"iTotalDisplayRecords\": 10,\n \"aaData\": [],\n \"query\": str(e)\n }\n return output2\n finally:\n sqlalche_obj.sql_alchemy_db_connection_close()", "def network_create_end(self, payload):\n network_id = payload['network']['id']\n self.enable_dhcp_helper(network_id)", "def getDeviceList(self):\r\n\r\n self._logger.debug(\"In getDeviceList()...\")\r\n\r\n # update the security token if needed \r\n if self._checkToken():\r\n\r\n response = self._callAPI(_API_GET_DEVICE_LIST, useSession=True)\r\n\r\n if response is not None:\r\n\r\n deviceInfo = response.json()\r\n \r\n if response.status_code == 200 and \"items\" in deviceInfo:\r\n\r\n deviceList = []\r\n\r\n for dev in deviceInfo[\"items\"]:\r\n\r\n # pull out common attributes\r\n deviceID = dev[\"serial_number\"]\r\n deviceType = dev[\"device_family\"]\r\n description = dev.get(\"name\", deviceType + \" \" + deviceID[-4:])\r\n\r\n # uncomment the next line to inspect the devices returned from the MyQ service\r\n self._logger.debug(\"Device Found - Device ID: %s, Device Type: %s, Description: %s\", deviceID, deviceType, description)\r\n\r\n # add device to the list with properties based on type\r\n if deviceType == API_DEVICE_TYPE_GATEWAY:\r\n\r\n # get gateway attributes\r\n online = dev[\"state\"][\"online\"]\r\n lastUpdated = dev[\"state\"][\"last_status\"]\r\n\r\n # add gateway device to list\r\n deviceList.append({\r\n \"type\": deviceType,\r\n \"id\": deviceID,\r\n \"description\": description,\r\n \"online\": online,\r\n \"last_updated\": lastUpdated\r\n })\r\n\r\n elif deviceType == API_DEVICE_TYPE_OPENER:\r\n \r\n # get the door attributes\r\n parentID = dev[\"parent_device_id\"] \r\n state = dev[\"state\"][\"door_state\"]\r\n lastChanged = dev[\"state\"][\"last_update\"]\r\n lastUpdated = dev[\"state\"][\"last_status\"]\r\n\r\n # add garage door opener device to list\r\n deviceList.append({\r\n \"type\": deviceType,\r\n \"id\": deviceID,\r\n \"parent_id\": parentID,\r\n \"description\": description,\r\n \"state\": state,\r\n \"last_changed\": lastChanged,\r\n \"last_updated\": lastUpdated\r\n })\r\n \r\n elif deviceType == API_DEVICE_TYPE_LAMP:\r\n\r\n # get the lamp attributes\r\n parentID = dev[\"parent_device_id\"] \r\n state = dev[\"state\"][\"lamp_state\"] \r\n lastChanged = dev[\"state\"][\"last_update\"]\r\n lastUpdated = dev[\"state\"][\"last_status\"]\r\n\r\n # add lamp device to list\r\n deviceList.append({\r\n \"type\": deviceType,\r\n \"id\": deviceID,\r\n \"parent_id\": parentID,\r\n \"description\": description,\r\n \"state\": state,\r\n \"last_changed\": lastChanged,\r\n \"last_updated\": lastUpdated\r\n })\r\n \r\n return deviceList\r\n \r\n elif response.status_code == 401:\r\n \r\n self._logger.error(\"There was an authentication error with the MyQ account: %s\", _parseResponseMsg(response))\r\n return None\r\n\r\n else:\r\n \r\n self._logger.error(\"Error retrieving device list: %s\", _parseResponseMsg(response))\r\n return None\r\n\r\n else:\r\n # Error logged in _callAPI function\r\n return None\r\n\r\n else:\r\n # Check token failed - wait and see if next call successful\r\n return None", "def _ovs_dpdk_init_configured(self):\n cmd = 'ovs-vsctl get open-vswitch . other_config:dpdk-init'\n for unit in zaza.model.get_units(self.application_name):\n result = zaza.utilities.juju.remote_run(\n unit.name,\n cmd,\n model_name=self.model_name,\n fatal=True).rstrip()\n assert result == '\"true\"', (\n 'DPDK not configured on {}'.format(unit.name))", "def main():\n\n args = TrafficScriptArg(['tx_src_ip', 'tx_dst_ip'])\n\n tx_if = args.get_arg('tx_if')\n rx_if = args.get_arg('rx_if')\n\n rxq = RxQueue(rx_if)\n txq = TxQueue(tx_if)\n\n tx_src_ip = args.get_arg('tx_src_ip')\n tx_dst_ip = args.get_arg('tx_dst_ip')\n\n sent_packets = []\n\n dhcp_discover = Ether(dst=\"ff:ff:ff:ff:ff:ff\") / \\\n IP(src=tx_src_ip, dst=tx_dst_ip) / \\\n UDP(sport=UDP_SERVICES.bootpc, dport=UDP_SERVICES.bootps) / \\\n BOOTP(op=1,) / \\\n DHCP(options=[(\"message-type\", \"discover\"),\n \"end\"])\n\n sent_packets.append(dhcp_discover)\n txq.send(dhcp_discover)\n\n for _ in range(10):\n dhcp_discover = rxq.recv(2)\n if is_discover(dhcp_discover):\n break\n else:\n raise RuntimeError(\"DHCP DISCOVER Rx timeout\")\n\n sys.exit(0)", "def test_gwservice_createdevice(self, setup_controller):\n configuration = {'uuid': '1'}\n payload = {'serialNumber': 'DEADBEEF0011',\n 'UUID': '123456',\n 'configuration': configuration,\n 'deviceType': 'AP',\n 'location': '',\n 'macAddress': 'DE:AD:BE:EF:00:11',\n 'manufacturer': 'Testing',\n 'owner': ''}\n print(json.dumps(payload))\n resp = setup_controller.request(\"gw\", \"device/DEADBEEF0011\", \"POST\", None, json.dumps(payload))\n body = resp.url + \",\" + str(resp.status_code) + ',' + resp.text\n allure.attach(name=\"gw create devices\", body=body)\n if resp.status_code != 200:\n assert False\n devices = json.loads(resp.text)\n print (devices)\n\n resp = setup_controller.request(\"gw\", \"device/DEADBEEF0011\", \"GET\", None, None)\n body = resp.url + \",\" + str(resp.status_code) + ',' + resp.text\n allure.attach(name=\"gw create device verify\", body=body)\n if resp.status_code != 200:\n assert False\n\n resp = setup_controller.request(\"gw\", \"device/DEADBEEF0011\", \"DELETE\", None, None)\n body = resp.url + \",\" + str(resp.status_code) + ',' + resp.text\n allure.attach(name=\"gw create device delete\", body=body)\n if resp.status_code != 200:\n assert False", "def _validate_change_when_exposing_in_dhcp(self):\n if self.pk and settings.DHCP_ENTRY_FORBID_CHANGE:\n from ralph.networks.models import IPAddress\n old_obj = self.__class__._default_manager.get(pk=self.pk)\n try:\n if old_obj.ipaddress.dhcp_expose:\n if old_obj.mac != self.mac:\n raise ValidationError(\n 'Cannot change MAC when exposing in DHCP'\n )\n except IPAddress.DoesNotExist:\n pass", "def network_delete_end(self, payload):\n self.disable_dhcp_helper(payload['network_id'])", "def parse_dhcp_opt(options):\n char_found = False\n print(\" - DHCP -\")\n for option in options:\n warn = False\n if type(option) is tuple:\n opt_name = option[0]\n opt_value = format(option[1])\n if any((char in WARNCHARS) for char in opt_value):\n char_found, warn = True, True\n if warn is True:\n print(BOLD_RED + b' {}: {} {}'.format(opt_name, opt_value, WARNING) + END)\n else:\n print(b' {}: {}'.format(opt_name, opt_value))\n return char_found", "async def test_device_tracker_registered(hass: HomeAssistant) -> None:\n with patch.object(hass.config_entries.flow, \"async_init\") as mock_init:\n device_tracker_watcher = dhcp.DeviceTrackerRegisteredWatcher(\n hass,\n {},\n [{\"domain\": \"mock-domain\", \"hostname\": \"connect\", \"macaddress\": \"B8B7F1*\"}],\n )\n await device_tracker_watcher.async_start()\n await hass.async_block_till_done()\n async_dispatcher_send(\n hass,\n CONNECTED_DEVICE_REGISTERED,\n {\"ip\": \"192.168.210.56\", \"mac\": \"b8b7f16db533\", \"host_name\": \"connect\"},\n )\n await hass.async_block_till_done()\n\n assert len(mock_init.mock_calls) == 1\n assert mock_init.mock_calls[0][1][0] == \"mock-domain\"\n assert mock_init.mock_calls[0][2][\"context\"] == {\n \"source\": config_entries.SOURCE_DHCP\n }\n assert mock_init.mock_calls[0][2][\"data\"] == dhcp.DhcpServiceInfo(\n ip=\"192.168.210.56\",\n hostname=\"connect\",\n macaddress=\"b8b7f16db533\",\n )\n await device_tracker_watcher.async_stop()\n await hass.async_block_till_done()", "async def test_discovered_by_dhcp_connection_fails(\n hass: HomeAssistant, source, data\n) -> None:\n with patch(\n \"homeassistant.components.wiz.wizlight.getBulbConfig\",\n side_effect=WizLightTimeOutError,\n ):\n result = await hass.config_entries.flow.async_init(\n DOMAIN, context={\"source\": source}, data=data\n )\n await hass.async_block_till_done()\n\n assert result[\"type\"] == FlowResultType.ABORT\n assert result[\"reason\"] == \"cannot_connect\"", "def get_device_list_old(ip_address, mac_address, selected_device, i_display_start, i_display_length, s_search, sEcho, sSortDir_0, iSortCol_0, userid=None, html_var={}): # ,sSortDir_0,iSortCol_0\n # This is a empty list variable used for storing the device list\n device_list = []\n master_slave_select = []\n master_slave = \"\"\n device_dict = {}\n device_type = selected_device\n l = 0\n if device_type == '' or device_type == None:\n device_type = 'odu'\n device_list_state = \"enabled\"\n global sqlalche_obj\n # try block starts\n try:\n # here we create the session of sqlalchemy\n\n # this is the query which returns the multidimensional array of hosts table and store in device_tuple\n# device_tuple = sqlalche_obj.session.query(Hosts.host_id,Hosts.host_alias,Hosts.ip_address,Hosts.mac_address,Hosts.device_type_id,Hosts.reconcile_health,Hosts.config_profile_id).\\\n# filter(and_(Hosts.is_deleted == 0,Hosts.ip_address.like('%s%%'%(ip_address)),\\\n# Hosts.mac_address.like('%s%%'%(mac_address)),Hosts.device_type_id.like('%s%%'%(device_type)),UsersGroups.user_id=='%s'%(userid),\\\n# UsersGroups.group_id==HostgroupsGroups.group_id,HostsHostgroups.hostgroup_id==HostgroupsGroups.hostgroup_id,Hosts.host_id==HostsHostgroups.host_id))\\\n# .order_by(Hosts.host_alias).order_by(Hosts.ip_address).all()\n\n device_dict = data_table_data_sqlalchemy(\n ip_address, mac_address, selected_device, i_display_start, i_display_length, s_search, sEcho, sSortDir_0, iSortCol_0, userid, html_var)\n # return device_dict\n device_tuple = device_dict[\"aaData\"]\n index = int(device_dict[\"i_display_start\"])\n sqlalche_obj.sql_alchemy_db_connection_open()\n device_status_host_id = \"\"\n global host_status_dic\n global essential_obj\n slave_data = \"-\"\n op_img = \"images/host_status0.png\"\n op_title = host_status_dic[0]\n ru_op_state = 1\n ra_op_state = 1\n sync_op_state = 1\n #[36, \"172.22.0.111\", \"Default\", \"172.22.0.111\", \"FF:FF:FF:FF:FF:FF\", \" \", \"odu16\", 0, 304]\n # this loop create a mutildimesional list of host\n for i in range(0, len(device_tuple)):\n if device_tuple[i][6] == \"odu16\":\n master_slave_select = sqlalche_obj.session.query(\n GetOdu16_ru_conf_table.default_node_type).filter(GetOdu16_ru_conf_table.host_id == device_tuple[i][0]).all()\n if len(master_slave_select) > 0:\n if int(master_slave_select[0][0]) == 0 or int(master_slave_select[0][0]) == 2:\n slave_data = \"-\"\n master_slave = \"RM18 (M)\"\n else:\n slave_data = \"\"\n master_host_id = sqlalche_obj.session.query(MasterSlaveLinking.master).filter(\n MasterSlaveLinking.slave == device_tuple[i][0]).all()\n if len(master_host_id) > 0:\n host_data = sqlalche_obj.session.query(Hosts.host_alias, Hosts.host_asset_id).filter(\n and_(Hosts.host_id == master_host_id[0][0], Hosts.is_deleted == 0)).all()\n host_alias = host_data[0].host_alias\n else:\n host_alias = \"\"\n## host_asset_data = sqlalche_obj.session.query(HostAssets.ra_mac).filter(HostAssets.host_asset_id==host_data[0].host_asset_id).all()\n# master_mac = str(host_asset_data[0].ra_mac if len(host_asset_data)>0\n# else \"\")\n peer_status = sqlalche_obj.session.query(GetOdu16PeerNodeStatusTable.sig_strength, GetOdu16PeerNodeStatusTable.link_status).\\\n filter(GetOdu16PeerNodeStatusTable.host_id == device_tuple[i][0]).order_by(\n desc(GetOdu16PeerNodeStatusTable.timestamp)).limit(1).all()\n if len(peer_status) > 0:\n if peer_status[0].sig_strength == None:\n slave_data = str(host_alias) + \" ( )\"\n elif int(peer_status[0].sig_strength) == 1111111:\n slave_data = str(\n host_alias) + \" (Device Unreachable)\"\n\n else:\n if peer_status[0].link_status == 1:\n slave_data = str(\n host_alias) + \"( Link Disconnected )\"\n else:\n slave_data = str(host_alias) + \" (\" + str(\n peer_status[0].sig_strength) + \"dBm)\"\n else:\n if host_alias != \"\" and host_alias != None:\n slave_data = str(host_alias) + \"(-)\"\n master_slave = \"RM18 (S)\"\n else:\n master_slave = \"RM18 (-)\"\n ru_data = sqlalche_obj.session.query(SetOdu16RUConfTable.adminstate).filter(\n SetOdu16RUConfTable.config_profile_id == device_tuple[i][8]).all()\n ra_data = sqlalche_obj.session.query(SetOdu16RAConfTable.raAdminState).filter(\n SetOdu16RAConfTable.config_profile_id == device_tuple[i][8]).all()\n sync_data = sqlalche_obj.session.query(SetOdu16SyncConfigTable.adminStatus).filter(\n SetOdu16SyncConfigTable.config_profile_id == device_tuple[i][8]).all()\n else:\n master_slave_select = sqlalche_obj.session.query(Odu100RuConfTable.defaultNodeType).filter(\n Odu100RuConfTable.config_profile_id == device_tuple[i][8]).all()\n if len(master_slave_select) > 0:\n if master_slave_select[0][0] == 0 or master_slave_select[0][0] == 2:\n slave_data = \"-\"\n master_slave = \"RM (M)\"\n else:\n # slave_data = \"0\"\n slave_data = \"\"\n master_host_id = sqlalche_obj.session.query(MasterSlaveLinking.master).filter(\n MasterSlaveLinking.slave == device_tuple[i][0]).all()\n if len(master_host_id) > 0:\n host_data = sqlalche_obj.session.query(\n Hosts.host_alias, Hosts.host_asset_id).filter(Hosts.host_id == master_host_id[0][0]).all()\n host_alias = host_data[0].host_alias\n else:\n host_alias = \"\"\n## host_asset_data = sqlalche_obj.session.query(HostAssets.ra_mac).filter(HostAssets.host_asset_id==host_data[0].host_asset_id).all()\n## master_mac = str(host_asset_data[0].ra_mac if len(host_asset_data)>0 else \"\")+\",\"\n## peer_status = sqlalche_obj.session.query(Odu100PeerNodeStatusTable.sigStrength1).\\\n# filter(and_(Odu100PeerNodeStatusTable.host_id==device_tuple[i][0],or_(Odu100PeerNodeStatusTable.peerMacAddr==master_mac,Odu100PeerNodeStatusTable.sigStrength1==1))).order_by(desc(Odu100PeerNodeStatusTable.timestamp)).limit(1).all()\n peer_status = sqlalche_obj.session.query(Odu100PeerNodeStatusTable.sigStrength1, Odu100PeerNodeStatusTable.linkStatus).\\\n filter(Odu100PeerNodeStatusTable.host_id == device_tuple[i][0]\n ).order_by(desc(Odu100PeerNodeStatusTable.timestamp)).limit(1).all()\n if len(peer_status) > 0:\n if peer_status[0].sigStrength1 == None:\n slave_data = str(host_alias) + \"()\"\n elif int(peer_status[0].sigStrength1) == 1111111:\n slave_data = str(\n host_alias) + \" (Device Unreachable)\"\n elif int(peer_status[0].sigStrength1) == 1:\n slave_data = str(\n host_alias) + \" (Device Unreachable)\"\n else:\n if int(peer_status[0].linkStatus) == 1:\n slave_data = str(\n host_alias) + \" ( Link Disconnected )\"\n else:\n slave_data = str(host_alias) + \" (\" + str(\n peer_status[0].sigStrength1) + \"dBm)\"\n else:\n if host_alias != \"\" and host_alias != None:\n slave_data = str(host_alias) + \"(-)\"\n## else:\n## slave_data = \"(-)\"\n master_slave = \"RM (S)\"\n else:\n master_slave = \"RM (-)\"\n ru_data = sqlalche_obj.session.query(Odu100RuConfTable.adminstate).filter(\n Odu100RuConfTable.config_profile_id == device_tuple[i][8]).all()\n ra_data = sqlalche_obj.session.query(Odu100RaConfTable.raAdminState).filter(\n Odu100RaConfTable.config_profile_id == device_tuple[i][8]).all()\n sync_data = sqlalche_obj.session.query(Odu100SyncConfigTable.adminStatus).filter(\n Odu100SyncConfigTable.config_profile_id == device_tuple[i][8]).all()\n ru_status = sqlalche_obj.session.query(Odu100RuStatusTable.ruoperationalState).filter(\n Odu100RuStatusTable.host_id == device_tuple[i][0]).all()\n ra_status = sqlalche_obj.session.query(Odu100RaStatusTable.raoperationalState).filter(\n Odu100RaStatusTable.host_id == device_tuple[i][0]).order_by(desc(Odu100RaStatusTable.timestamp)).all()\n sync_status = sqlalche_obj.session.query(Odu100SynchStatusTable.syncoperationalState).filter(\n Odu100SynchStatusTable.host_id == device_tuple[i][0]).order_by(desc(Odu100SynchStatusTable.timestamp)).all()\n if len(ru_status) > 0:\n if ru_status[0].ruoperationalState == None:\n ru_op_state = 1\n else:\n ru_op_state = ru_status[0].ruoperationalState\n else:\n ru_op_state = 1\n\n if len(ra_status) > 0:\n if ra_status[0].raoperationalState == None:\n ra_op_state = 1\n else:\n ra_op_state = ra_status[0].raoperationalState\n else:\n ra_op_state = 1\n\n if len(sync_status) > 0:\n if sync_status[0].syncoperationalState == None:\n sync_op_state = 1\n else:\n sync_op_state = sync_status[0].syncoperationalState\n else:\n sync_op_state = 1\n if device_tuple[i][6] == \"odu100\":\n if len(ru_data) > 0:\n if ru_data[0][0] == None:\n ru_state = 1\n image_ru_title = \"RU State UnLocked\"\n else:\n if int(ru_data[0][0]) == 0:\n ru_state = 0\n image_ru_title = \"RU State Locked\"\n else:\n if int(ru_op_state) == 0:\n ru_state = 0\n image_ru_title = \"RU State UnLocked\"\n else:\n ru_state = 1\n image_ru_title = \"RU State UnLocked\"\n else:\n ru_state = 1\n image_ru_title = \"RU State UnLocked\"\n\n if len(ra_data) > 0:\n if ra_data[0][0] == None:\n ra_state = 1\n image_ra_title = \"RA State Unlocked\"\n\n else:\n if int(ra_data[0][0]) == 0:\n ra_state = 0\n image_ra_title = \"RA State Locked\"\n\n else:\n if int(ra_op_state) == 0:\n ra_state = 0\n image_ra_title = \"RA State Unlocked\"\n else:\n ra_state = 1\n image_ra_title = \"RA State Unlocked\"\n else:\n ra_state = 1\n image_ra_title = \"RA State Unlocked\"\n\n if len(sync_data) > 0:\n if sync_data[0][0] == None:\n sync_state = 1\n image_sync_title = \" SYNC State Unlocked\"\n\n else:\n if int(sync_data[0][0]) == 0:\n sync_state = 0\n image_sync_title = \"SYNC State Locked\"\n\n else:\n if int(sync_op_state) == 0:\n sync_state = 0\n image_sync_title = \" SYNC State Unlocked\"\n else:\n sync_state = 1\n image_sync_title = \" SYNC State Unlocked\"\n else:\n sync_state = 1\n image_sync_title = \" SYNC State Unlocked\"\n else:\n if len(ru_data) > 0:\n if ru_data[0][0] == None:\n ru_state = 1\n image_ru_title = \"RU State UnLocked\"\n\n else:\n if int(ru_data[0][0]) == 0:\n ru_state = 0\n image_ru_title = \"RU State Locked\"\n\n else:\n ru_state = 1\n image_ru_title = \"RU State UnLocked\"\n\n else:\n ru_state = 1\n image_ru_title = \"RU State UnLocked\"\n\n if len(ra_data) > 0:\n if ra_data[0][0] == None:\n ra_state = 1\n image_ra_title = \"RA State Unlocked\"\n\n else:\n if int(ra_data[0][0]) == 0:\n ra_state = 0\n image_ra_title = \"RA State Locked\"\n\n else:\n ra_state = 1\n image_ra_title = \"RA State Unlocked\"\n\n else:\n ra_state = 1\n image_ra_title = \"RA State Unlocked\"\n\n if len(sync_data) > 0:\n if sync_data[0][0] == None:\n sync_state = 1\n image_sync_title = \" SYNC State Unlocked\"\n\n else:\n if int(sync_data[0][0]) == 0:\n sync_state = 0\n image_sync_title = \"SYNC State Locked\"\n\n else:\n sync_state = 1\n image_sync_title = \" SYNC State Unlocked\"\n\n else:\n sync_state = 1\n image_sync_title = \" SYNC State Unlocked\"\n\n if device_tuple[i][7] <= 35:\n images = 'images/new/r-red.png'\n elif device_tuple[i][7] <= 90:\n images = 'images/new/r-black.png'\n else:\n images = 'images/new/r-green.png'\n\n## admin_dic = {'ru_admin':1 if ru_data[0][0]==None else int(ru_data[0][0]) if len(ru_data)>0 else 1,\\\n## 'ra_admin':1 if ra_data[0][0]==None else int(ra_data[0][0]) if len(ra_data)>0 else 1,\\\n## 'sync_admin':1 if sync_data[0][0]==None else int(sync_data[0][0]) if len(sync_data)>0 else 1}\n\n snmp_up_time_data = sqlalche_obj.db.execute(\n \"select trap_event_id,timestamp from system_alarm_table where host_id='%s' order by timestamp desc limit 1\" % (device_tuple[i][0]))\n snmp_up_down_time = \"\"\n for row in snmp_up_time_data:\n snmp_up_down_time = row['trap_event_id']\n timer_val = datetime.strftime(\n row['timestamp'], \"%d-%b-%Y %a %I:%M:%S %p\")\n if snmp_up_down_time == \"\":\n device_status = \"Device Reachable\"\n device_status_image_path = \"images/temp/green_dot.png\"\n elif int(snmp_up_down_time) == 50001:\n device_status = \"Device Unreachable since \" + str(timer_val)\n device_status_image_path = \"images/temp/red_dot.png\"\n else:\n device_status = \"Device Reachable\"\n device_status_image_path = \"images/temp/green_dot.png\"\n\n op_status = essential_obj.get_hoststatus(device_tuple[i][0])\n if op_status == None:\n op_img = \"images/host_status0.png\"\n op_title = host_status_dic[0]\n elif op_status == 0:\n op_img = \"images/host_status0.png\"\n op_title = host_status_dic[op_status]\n else:\n op_img = \"images/host_status1.png\"\n op_title = host_status_dic[op_status]\n\n if i == len(device_tuple) - 1:\n device_status_host_id += str(device_tuple[i][0])\n else:\n device_status_host_id += str(device_tuple[i][0]) + \",\"\n\n monitoring_status = \"<a target=\\\"main\\\" href=\\\"%s?host_id=%s&device_type=%s&device_list_state=%s\\\"><img src=\\\"images/new/info.png\\\" style=\\\"width:16px;height:16px;\\\" title=\\\"Current Device Status\\\" class=\\\"imgbutton n-reconcile w-reconcile\\\"/></a>\" % ('sp_status_profiling.py',\n\n device_tuple[i][0], device_tuple[i][6], device_list_state) if device_tuple[i][6] == \"odu100\" else \"<img src=\\\"images/new/info1.png\\\" style=\\\"width:16px;height:16px;\\\" title=\\\"Current Device Status\\\" class=\\\"imgbutton n-reconcile\\\"/>\"\n\n live_monitoring = \"&nbsp;&nbsp;<a target=\\\"main\\\" href=\\\"live_monitoring.py?host_id=%s&device_type=%s\\\"><img src=\\\"images/new/star-empty.png\\\" title=\\\"Live Monitoring\\\" style=\\\"width:16px;height:16px;\\\" class=\\\"imgbutton n-reconcile\\\" /></a>\" % (device_tuple[i][0], device_tuple[i][6])\\\n if device_tuple[i][6] == \"odu100\" else \"&nbsp;&nbsp;<img src=\\\"images/new/star-empty.png\\\" title=\\\"Live Monitoring Not Available\\\" style=\\\"width:16px;height:16px;\\\" class=\\\"imgbutton n-reconcile\\\" />\"\n\n device_list.append(\n [\"<center><img id=\\\"device_status\\\" name=\\\"device_status\\\" src=\\\"%s\\\" title=\\\"%s\\\" style=\\\"width:8px;height:8px;\\\" class=\\\"imgbutton w-reconcile\\\" original-title=\\\"%s\\\" /></center>&nbsp;&nbsp;\" % (device_status_image_path, device_status, device_status), device_tuple[i][1], device_tuple[i][2], device_tuple[i][3], device_tuple[i][4], \"-\" if device_tuple[i][5] == \" \" else device_tuple[i][5], master_slave, slave_data,\n \"<ul class=\\\"button_group\\\" style=\\\"width:80px;\\\">\\\n <li>\\\n <a class=\\\"%s n-reconcile\\\" id=\\\"ru.ruConfTable.adminstate\\\" name=\\\"ru.ruConfTable.adminstate\\\" title=\\\"%s\\\" state=\\\"%s\\\" onclick=\\\"adminStateCheck(event,this,'%s','%s','ru.ruConfTable.adminstate')\\\">RU</a>\\\n </li>\\\n <li>\\\n <a class=\\\"%s n-reconcile\\\" id=\\\"ru.syncClock.syncConfigTable.adminStatus\\\" name=\\\"ru.syncClock.syncConfigTable.adminStatus\\\" title=\\\"%s\\\" state=\\\"%s\\\" onclick=\\\"adminStateCheck(event,this,'%s','%s','ru.syncClock.syncConfigTable.adminStatus')\\\">SYN</a>\\\n </li>\\\n <li>\\\n <a class=\\\"%s n-reconcile\\\" id=\\\"ru.ra.raConfTable.raAdminState\\\" name=\\\"ru.ra.raConfTable.raAdminState\\\" title=\\\"%s\\\" state=\\\"%s\\\" onclick=\\\"adminStateCheck(event,this,'%s','%s','ru.ra.raConfTable.raAdminState')\\\">RA</a>\\\n </li>\\\n </ul>\"\n % (\n \"red\" if ru_state == 0 else \"green\", image_ru_title, ru_state, device_tuple[\n i][0], device_tuple[i][6],\n \"red\" if sync_state == 0 else \"green\", image_sync_title, sync_state, device_tuple[\n i][0], device_tuple[i][6],\n \"red\" if ra_state == 0 else \"green\", image_ra_title, ra_state, device_tuple[i][0], device_tuple[i][6]),\n \"<a target=\\\"main\\\" href=\\\"odu_profiling.py?host_id=%s&device_type=%s&device_list_state=%s\\\">\\\n <img id=\\\"%s\\\" src=\\\"images/new/edit.png\\\" title=\\\"Edit Profile\\\" style=\\\"width:16px;height:16px;\\\" class=\\\"imgbutton n-reconcile\\\"/></a>&nbsp;&nbsp;\\\n <a target=\\\"main\\\" href=\\\"%s?host_id=%s&device_type=%s&device_list_state=%s\\\"><img src=\\\"images/new/graph.png\\\" style=\\\"width:16px;height:16px;\\\" title=\\\"Performance Monitoring\\\" class=\\\"imgbutton n-reconcile\\\"/></a>&nbsp;\\\n <a target=\\\"main\\\" href=\\\"status_snmptt.py?ip_address=%s-\\\"><img src=\\\"images/new/alert.png\\\" style=\\\"width:16px;height:16px;\\\" title=\\\"Device Alarm\\\" class=\\\"imgbutton n-reconcile\\\"/></a>&nbsp;\\\n <a target=\\\"main\\\" href=\\\"javascript:apFormwareUpdate('%s','%s','%s');\\\"><img src=\\\"images/new/update.png\\\" title=\\\"Firmware Upgrade\\\" class=\\\"imgbutton n-reconcile\\\"/ ></a>&nbsp;\\\n <img src=\\\"%s\\\" title=\\\"Reconciliation %s%% Done\\\" style=\\\"width:16px;height:16px;\\\" class=\\\"imgbutton n-reconcile imgEditodu16\\\" onclick=\\\"imgReconcile(this,'%s','%s','odu16_','imgodu16'); state_rec=0\\\"/>\\\n %s&nbsp;%s\\\n %s\"\n % (\n device_tuple[\n i][\n 0], device_tuple[\n i][6], device_list_state,\n device_tuple[i][0], 'sp_dashboard_profiling.py' if device_tuple[i][\n 6] == \"odu100\" else 'sp_dashboard_profiling.py',\n device_tuple[i][\n 0], device_tuple[\n i][\n 6], device_list_state, device_tuple[i][3],\n device_tuple[i][0], device_tuple[i][\n 6], device_list_state, images, device_tuple[i][7], device_tuple[i][0], device_tuple[i][6],\n live_monitoring, monitoring_status,\n \"<input type=\\\"hidden\\\" value=\\\"%s\\\" name=\\\"host_id\\\" id=\\\"host_id\\\" />\" % (device_status_host_id) if i == len(device_tuple) - 1 else \"\"), \"<center><img id=\\\"operation_status\\\" name=\\\"operation_status\\\" src=\\\"%s\\\" title=\\\"%s\\\" style=\\\"width:12px;height:12px;\\\"class=\\\"imgbutton n-reconcile\\\" original-title=\\\"%s\\\"/></center>&nbsp;&nbsp;\" % (op_img, op_title, op_title)])\n\n device_dict[\"aaData\"] = device_list\n sqlalche_obj.sql_alchemy_db_connection_close()\n return device_dict\n # try block ends\n # href=\\\"javascript:apFormwareUpdate('%s','%s','%s');\n # exception starts\n except Exception as e:\n\n # return device_list\n sqlalche_obj.sql_alchemy_db_connection_close()\n output2 = {\n \"sEcho\": 1,\n \"iTotalRecords\": 10,\n \"iTotalDisplayRecords\": 10,\n \"aaData\": [],\n \"query\": str(e)\n }\n return output2\n finally:\n sqlalche_obj.sql_alchemy_db_connection_close()", "def test_get_pci_device_list(self):\n pass", "def setup(hass: HomeAssistant, base_config: ConfigType) -> bool: # noqa: C901\n\n hass.data[DOMAIN] = {}\n\n # Parse configuration into a dict of device name to physical address\n # represented as a list of four elements.\n device_aliases = {}\n devices = base_config[DOMAIN].get(CONF_DEVICES, {})\n _LOGGER.debug(\"Parsing config %s\", devices)\n device_aliases.update(parse_mapping(devices))\n _LOGGER.debug(\"Parsed devices: %s\", device_aliases)\n\n platform = base_config[DOMAIN].get(CONF_PLATFORM, SWITCH)\n\n loop = (\n # Create own thread if more than 1 CPU\n hass.loop\n if multiprocessing.cpu_count() < 2\n else None\n )\n host = base_config[DOMAIN].get(CONF_HOST)\n display_name = base_config[DOMAIN].get(CONF_DISPLAY_NAME, DEFAULT_DISPLAY_NAME)\n if host:\n adapter = TcpAdapter(host, name=display_name, activate_source=False)\n else:\n adapter = CecAdapter(name=display_name[:12], activate_source=False)\n hdmi_network = HDMINetwork(adapter, loop=loop)\n\n def _adapter_watchdog(now=None):\n _LOGGER.debug(\"Reached _adapter_watchdog\")\n event.call_later(hass, WATCHDOG_INTERVAL, _adapter_watchdog_job)\n if not adapter.initialized:\n _LOGGER.info(\"Adapter not initialized; Trying to restart\")\n hass.bus.fire(EVENT_HDMI_CEC_UNAVAILABLE)\n adapter.init()\n\n _adapter_watchdog_job = HassJob(_adapter_watchdog, cancel_on_shutdown=True)\n\n @callback\n def _async_initialized_callback(*_: Any):\n \"\"\"Add watchdog on initialization.\"\"\"\n return event.async_call_later(hass, WATCHDOG_INTERVAL, _adapter_watchdog_job)\n\n hdmi_network.set_initialized_callback(_async_initialized_callback)\n\n def _volume(call: ServiceCall) -> None:\n \"\"\"Increase/decrease volume and mute/unmute system.\"\"\"\n mute_key_mapping = {\n ATTR_TOGGLE: KEY_MUTE_TOGGLE,\n ATTR_ON: KEY_MUTE_ON,\n ATTR_OFF: KEY_MUTE_OFF,\n }\n for cmd, att in call.data.items():\n if cmd == CMD_UP:\n _process_volume(KEY_VOLUME_UP, att)\n elif cmd == CMD_DOWN:\n _process_volume(KEY_VOLUME_DOWN, att)\n elif cmd == CMD_MUTE:\n hdmi_network.send_command(\n KeyPressCommand(mute_key_mapping[att], dst=ADDR_AUDIOSYSTEM)\n )\n hdmi_network.send_command(KeyReleaseCommand(dst=ADDR_AUDIOSYSTEM))\n _LOGGER.info(\"Audio muted\")\n else:\n _LOGGER.warning(\"Unknown command %s\", cmd)\n\n def _process_volume(cmd, att):\n if isinstance(att, (str,)):\n att = att.strip()\n if att == CMD_PRESS:\n hdmi_network.send_command(KeyPressCommand(cmd, dst=ADDR_AUDIOSYSTEM))\n elif att == CMD_RELEASE:\n hdmi_network.send_command(KeyReleaseCommand(dst=ADDR_AUDIOSYSTEM))\n else:\n att = 1 if att == \"\" else int(att)\n for _ in range(0, att):\n hdmi_network.send_command(KeyPressCommand(cmd, dst=ADDR_AUDIOSYSTEM))\n hdmi_network.send_command(KeyReleaseCommand(dst=ADDR_AUDIOSYSTEM))\n\n def _tx(call: ServiceCall) -> None:\n \"\"\"Send CEC command.\"\"\"\n data = call.data\n if ATTR_RAW in data:\n command = CecCommand(data[ATTR_RAW])\n else:\n src = data.get(ATTR_SRC, ADDR_UNREGISTERED)\n dst = data.get(ATTR_DST, ADDR_BROADCAST)\n if ATTR_CMD in data:\n cmd = data[ATTR_CMD]\n else:\n _LOGGER.error(\"Attribute 'cmd' is missing\")\n return\n if ATTR_ATT in data:\n if isinstance(data[ATTR_ATT], (list,)):\n att = data[ATTR_ATT]\n else:\n att = reduce(lambda x, y: f\"{x}:{y:x}\", data[ATTR_ATT])\n else:\n att = \"\"\n command = CecCommand(cmd, dst, src, att)\n hdmi_network.send_command(command)\n\n def _standby(call: ServiceCall) -> None:\n hdmi_network.standby()\n\n def _power_on(call: ServiceCall) -> None:\n hdmi_network.power_on()\n\n def _select_device(call: ServiceCall) -> None:\n \"\"\"Select the active device.\"\"\"\n if not (addr := call.data[ATTR_DEVICE]):\n _LOGGER.error(\"Device not found: %s\", call.data[ATTR_DEVICE])\n return\n if addr in device_aliases:\n addr = device_aliases[addr]\n else:\n entity = hass.states.get(addr)\n _LOGGER.debug(\"Selecting entity %s\", entity)\n if entity is not None:\n addr = entity.attributes[\"physical_address\"]\n _LOGGER.debug(\"Address acquired: %s\", addr)\n if addr is None:\n _LOGGER.error(\n \"Device %s has not physical address\", call.data[ATTR_DEVICE]\n )\n return\n if not isinstance(addr, (PhysicalAddress,)):\n addr = PhysicalAddress(addr)\n hdmi_network.active_source(addr)\n _LOGGER.info(\"Selected %s (%s)\", call.data[ATTR_DEVICE], addr)\n\n def _update(call: ServiceCall) -> None:\n \"\"\"Update if device update is needed.\n\n Called by service, requests CEC network to update data.\n \"\"\"\n hdmi_network.scan()\n\n def _new_device(device):\n \"\"\"Handle new devices which are detected by HDMI network.\"\"\"\n key = f\"{DOMAIN}.{device.name}\"\n hass.data[DOMAIN][key] = device\n ent_platform = base_config[DOMAIN][CONF_TYPES].get(key, platform)\n discovery.load_platform(\n hass,\n ent_platform,\n DOMAIN,\n discovered={ATTR_NEW: [key]},\n hass_config=base_config,\n )\n\n def _shutdown(call):\n hdmi_network.stop()\n\n def _start_cec(callback_event):\n \"\"\"Register services and start HDMI network to watch for devices.\"\"\"\n hass.services.register(\n DOMAIN, SERVICE_SEND_COMMAND, _tx, SERVICE_SEND_COMMAND_SCHEMA\n )\n hass.services.register(\n DOMAIN, SERVICE_VOLUME, _volume, schema=SERVICE_VOLUME_SCHEMA\n )\n hass.services.register(\n DOMAIN,\n SERVICE_UPDATE_DEVICES,\n _update,\n schema=SERVICE_UPDATE_DEVICES_SCHEMA,\n )\n hass.services.register(DOMAIN, SERVICE_POWER_ON, _power_on)\n hass.services.register(DOMAIN, SERVICE_STANDBY, _standby)\n hass.services.register(DOMAIN, SERVICE_SELECT_DEVICE, _select_device)\n\n hdmi_network.set_new_device_callback(_new_device)\n hdmi_network.start()\n\n hass.bus.listen_once(EVENT_HOMEASSISTANT_START, _start_cec)\n hass.bus.listen_once(EVENT_HOMEASSISTANT_STOP, _shutdown)\n return True", "async def test_aiodiscover_finds_new_hosts_after_interval(hass: HomeAssistant) -> None:\n with patch.object(hass.config_entries.flow, \"async_init\") as mock_init, patch(\n \"homeassistant.components.dhcp.DiscoverHosts.async_discover\",\n return_value=[],\n ):\n device_tracker_watcher = dhcp.NetworkWatcher(\n hass,\n {},\n [{\"domain\": \"mock-domain\", \"hostname\": \"connect\", \"macaddress\": \"B8B7F1*\"}],\n )\n await device_tracker_watcher.async_start()\n await hass.async_block_till_done()\n\n assert len(mock_init.mock_calls) == 0\n\n with patch.object(hass.config_entries.flow, \"async_init\") as mock_init, patch(\n \"homeassistant.components.dhcp.DiscoverHosts.async_discover\",\n return_value=[\n {\n dhcp.DISCOVERY_IP_ADDRESS: \"192.168.210.56\",\n dhcp.DISCOVERY_HOSTNAME: \"connect\",\n dhcp.DISCOVERY_MAC_ADDRESS: \"b8b7f16db533\",\n }\n ],\n ):\n async_fire_time_changed(hass, dt_util.utcnow() + datetime.timedelta(minutes=65))\n await hass.async_block_till_done()\n await device_tracker_watcher.async_stop()\n await hass.async_block_till_done()\n\n assert len(mock_init.mock_calls) == 1\n assert mock_init.mock_calls[0][1][0] == \"mock-domain\"\n assert mock_init.mock_calls[0][2][\"context\"] == {\n \"source\": config_entries.SOURCE_DHCP\n }\n assert mock_init.mock_calls[0][2][\"data\"] == dhcp.DhcpServiceInfo(\n ip=\"192.168.210.56\",\n hostname=\"connect\",\n macaddress=\"b8b7f16db533\",\n )", "def write_option_67_to_dhcp_server(ssh_conn_obj, data):\n option_67_config = \"option bootfile-name\"\n if data.type == \"http\":\n config_json_url = \"http://{}{}/{}\".format(data.static_ip, data.config_path, data.config_file)\n elif data.type == \"tftp\":\n config_json_url = \"tftp://{}/{}/{}\".format(data.static_ip, data.config_path, data.config_file)\n elif data.type == \"ftp\":\n config_json_url = \"ftp://{}/{}/{}\".format(data.static_ip, data.config_path, data.config_file)\n option_67_config_string = '{} \"{}\";'.format(option_67_config, config_json_url)\n if not basic_obj.write_update_file(ssh_conn_obj, option_67_config,\n option_67_config_string, data.dhcp_config_file):\n st.log(\"Written content in file {} not found\".format(data.dhcp_config_file))\n st.report_fail(\"content_not_found\")\n basic_obj.service_operations(ssh_conn_obj, data.dhcp_service_name, data.action, data.device)\n if not verify_dhcpd_service_status(ssh_conn_obj, data.dhcpd_pid):\n st.log(\"{} service not running\".format(data.dhcp_service_name))\n st.report_fail(\"service_not_running\", data.dhcp_service_name)", "def hsdpa_physical_downlink_settings_carrier2(self):\r\r\n carrier = 2\r\r\n\r\r\n config_list = []\r\r\n\r\r\n config_list.append ( \"%-24s %-18s\" % (\"Channel( Carrier 2)\", \"Level\"))\r\r\n config_list.append ( \"%-24s %-18s\" % (\"==================\", \"=====\"))\r\r\n\r\r\n pcpich_level = -11\r\r\n self.set_pcpich_code_level(carrier=carrier, leveldB=pcpich_level)\r\r\n config_list.append ( \"%-24s %-18s\" % (\"P-CPICH\", pcpich_level))\r\r\n\r\r\n\r\r\n # *****************************************************************************\r\r\n # Configure 2 HS-SCCH: level, channelization code, UE ID and dummy UE ID\r\r\n # *****************************************************************************\r\r\n hssch_level_1 = -18.0\r\r\n hssch_level_2 = -18.0\r\r\n self.set_hssch_level(hssch_num=1, carrier=carrier, leveldB=hssch_level_1)\r\r\n self.set_hssch_level(hssch_num=2, carrier=carrier, leveldB=hssch_level_2)\r\r\n self.set_hssch_code(hssch_num=1, carrier=carrier, codeNum=2)\r\r\n self.set_hssch_code(hssch_num=2, carrier=carrier, codeNum=7)\r\r\n\r\r\n config_list.append ( \"%-24s %-18s\" % (\"HS-SCCH #1\", hssch_level_1))\r\r\n config_list.append ( \"%-24s %-18s\" % (\"HS-SCCH #2\", hssch_level_2))\r\r\n\r\r\n self.set_default_ue_id_hssch(carrier=carrier)\r\r\n\r\r\n # HS-PDSCH Enhanced Settings\r\r\n self.set_hsdsch_mpo(carrier=carrier, control=\"AUTO\", pwrOffsetManual=\"\")\r\r\n self.hsdsch_unsched_frames(carrier=carrier, usFrameType='DUMMY')\r\r\n\r\r\n # *****************************************************************************\r\r\n # Configure HS-PDSCH: level and first channelization code number\r\r\n # *****************************************************************************\r\r\n hsdsch_level = -1.6\r\r\n self.set_hsdsch_level(carrier=carrier, leveldB = hsdsch_level)\r\r\n self.set_hsdsch_chanelisation_code(carrier=carrier, code=1)\r\r\n config_list.append ( \"%-24s %-18s\" % (\"HS-PDSCH\", hsdsch_level))\r\r\n\r\r\n config_list.append (\"\")\r\r\n\r\r\n for line in config_list:\r\r\n print line", "def post_process(self, packet: 'dict[str, Any]') -> 'SMFIdentificationBasedDPDOption':\n ret = super().post_process(packet) # type: SMFIdentificationBasedDPDOption\n ret.mode = Enum_SMFDPDMode.I_DPD\n return ret", "async def async_setup(self) -> None:\n await self.hass.async_add_executor_job(self._setup)\n\n # set already known devices to away instead of unavailable\n device_registry = dr.async_get(self.hass)\n devices = dr.async_entries_for_config_entry(device_registry, self.entry_id)\n for device_entry in devices:\n if device_entry.via_device_id is None:\n continue # do not add the router itself\n\n device_mac = dict(device_entry.connections).get(dr.CONNECTION_NETWORK_MAC)\n self.devices[device_mac] = {\n \"mac\": device_mac,\n \"name\": device_entry.name,\n \"active\": False,\n \"last_seen\": dt_util.utcnow() - timedelta(days=365),\n \"device_model\": None,\n \"device_type\": None,\n \"type\": None,\n \"link_rate\": None,\n \"signal\": None,\n \"ip\": None,\n }\n\n await self.async_update_device_trackers()\n self.entry.async_on_unload(\n async_track_time_interval(\n self.hass, self.async_update_device_trackers, SCAN_INTERVAL\n )\n )\n\n async_dispatcher_send(self.hass, self.signal_device_new)", "def hardwareconfig():\r\n print('''\\n%s at %s acting as user %s\r\n\\nHardware Configuration Menu''' % (PACKETMASTER.model, ADDRESS, USERNAME))\r\n choice = moves.input('''\r\n 1 - Model\r\n 2 - Serial Number\r\n 3 - Hardware Generation\r\n 4 - Firmware version\r\n 5 - API Level\r\n 6 - Temperature and Fans\r\n 7 - ID LED Status\r\n 8 - ID LED on/off\r\n 9 - OS and CPU Load Averages\r\n 10 - TCAM Flows\r\n 11 - Memory Usage\r\n 12 - CCH Server Revision\r\n 13 - Device OpenFlow Datapath ID\r\n 14 - Set Vitrum License\r\n 15 - Device Label and Notes Submenu\r\n 16 - IP Configuration Submenu\r\n 17 - DNS Configuration Submenu\r\n 18 - Port Configuration Submenu\r\n 19 - Telnet service submenu\r\n 20 - Webserver Submenu\r\n 21 - Controller Submenu\r\n 22 - Reboot Packetmaster\r\n 23 - Back\r\n 24 - Quit \\n\r\n Enter selection number: ''')\r\n try:\r\n choice = int(choice)\r\n except ValueError as reason:\r\n print(\"That is not a valid selection.\", reason)\r\n hardwareconfig()\r\n execute = {1: PACKETMASTER.device_model,\r\n 2: PACKETMASTER.serial_number,\r\n 3: PACKETMASTER.hardware_generation,\r\n 4: PACKETMASTER.firmware_version,\r\n 5: PACKETMASTER.api_level,\r\n 6: PACKETMASTER.env_info,\r\n 7: PACKETMASTER.id_led,\r\n 8: PACKETMASTER.set_id_led_guided,\r\n 9: PACKETMASTER.load_info,\r\n 10: PACKETMASTER.tcam,\r\n 11: PACKETMASTER.mem_free,\r\n 12: PACKETMASTER.server_revision,\r\n 13: PACKETMASTER.get_dpid,\r\n 14: PACKETMASTER.set_license_guided,\r\n 15: notesmenu,\r\n 16: ipconfig,\r\n 17: dns,\r\n 18: portconfig,\r\n 19: telnet,\r\n 20: web,\r\n 21: controller,\r\n 22: PACKETMASTER.reboot,\r\n 23: manage,\r\n 24: exit}\r\n if choice in execute:\r\n try:\r\n select = execute[choice]\r\n run = select()\r\n print(run)\r\n hardwareconfig()\r\n except KeyError as reason:\r\n print(reason)\r\n else:\r\n print(\"That is not a valid selection.\")\r\n hardwareconfig()", "def dhcp_release(ifname):\n\n logging.debug('Releasing %s...', ifname)\n\n try:\n subprocess.call(['dhcpcd', '--release', ifname])\n except OSError, err:\n if err.errno != errno.ENOENT:\n raise err\n\n try:\n call_timeout(['dhclient', '-1', '-r', ifname], timeout=5)\n except OSError, err:\n if err.errno == errno.ENOENT:\n logging.critical('Neither dhcpcd nor dhclient were found.')\n else:\n raise err" ]
[ "0.6585499", "0.6445782", "0.63871115", "0.62862945", "0.6191916", "0.61801624", "0.61206764", "0.60355914", "0.59601206", "0.5855561", "0.57844305", "0.5700581", "0.56996197", "0.56953347", "0.5656746", "0.5649535", "0.5625262", "0.5623599", "0.5604455", "0.5591835", "0.5564239", "0.5551833", "0.55373263", "0.55247784", "0.5512395", "0.546546", "0.54503095", "0.5440689", "0.5436789", "0.5429301", "0.5416953", "0.5404971", "0.5390362", "0.5338337", "0.53271365", "0.5314541", "0.53107816", "0.53071284", "0.53045106", "0.53028816", "0.5283281", "0.5269", "0.5267591", "0.5263794", "0.52516943", "0.5236098", "0.52212626", "0.5220192", "0.52026427", "0.5198342", "0.5195314", "0.5188257", "0.51818484", "0.5177039", "0.51685655", "0.516573", "0.5161953", "0.5125724", "0.5124528", "0.5121159", "0.5096832", "0.5088833", "0.5086179", "0.5075421", "0.50698274", "0.50636965", "0.50621885", "0.50592715", "0.5057174", "0.5055564", "0.50551933", "0.50496405", "0.5045912", "0.50374746", "0.50374746", "0.5029077", "0.50053716", "0.49977928", "0.4997618", "0.49899057", "0.49866474", "0.49862492", "0.49766326", "0.49734688", "0.4970855", "0.49698734", "0.49696147", "0.49693078", "0.4964357", "0.49636853", "0.49607703", "0.49573666", "0.4954419", "0.4949133", "0.49412003", "0.49386227", "0.49339038", "0.49251574", "0.4925104", "0.49245086", "0.4915651" ]
0.0
-1
Support the following DHCP DeviceManager calls. self.plugin.release_dhcp_port(network.id, self.get_device_id(network))
def release_dhcp_port(self, network_id, device_id): LOG.debug("release_dhcp_port: %s %s", network_id, device_id)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def release_dhcp_port(self, network_id, device_id):\n return self.call(self.context,\n self.make_msg('release_dhcp_port',\n network_id=network_id,\n device_id=device_id,\n host=self.host),\n topic=self.topic)", "def destroy(self, network, device_name):\n if self.conf.use_namespaces:\n namespace = NS_PREFIX + network.id\n else:\n namespace = None\n\n self.driver.unplug(device_name, namespace=namespace)\n\n self.plugin.release_dhcp_port(network.id,\n self.get_device_id(network))", "def unplug_port_from_network(self, device_id, device_owner, hostname,\n port_id, network_id, tenant_id, sg, vnic_type,\n switch_bindings=None, segments=None):", "def dhcp_release(ifname):\n\n logging.debug('Releasing %s...', ifname)\n\n try:\n subprocess.call(['dhcpcd', '--release', ifname])\n except OSError, err:\n if err.errno != errno.ENOENT:\n raise err\n\n try:\n call_timeout(['dhclient', '-1', '-r', ifname], timeout=5)\n except OSError, err:\n if err.errno == errno.ENOENT:\n logging.critical('Neither dhcpcd nor dhclient were found.')\n else:\n raise err", "def network_delete_end(self, payload):\n self.disable_dhcp_helper(payload['network_id'])", "def create_dhcp_port(self, port):\n LOG.debug(\"create_dhcp_port: %s\", port)\n port['port']['id'] = port['port']['network_id']\n\n # The following MAC address will be assigned to the Linux dummy\n # interface that\n # networking_calico.agent.linux.interface.RoutedInterfaceDriver\n # creates. Therefore it will never actually be used or involved in the\n # sending or receiving of any real data. Hence it should not matter\n # that we use a hardcoded value here, and the same value on every\n # networking-calico compute host. The '2' bit of the first byte means\n # 'locally administered', which makes sense for a hardcoded value like\n # this and distinguishes it from the space of managed MAC addresses.\n port['port']['mac_address'] = '02:00:00:00:00:00'\n port['port']['device_owner'] = constants.DEVICE_OWNER_DHCP\n return dhcp.DictModel(port['port'])", "def get_dhcp_port(self, network_id, device_id):\n return DictModel(self.call(self.context,\n self.make_msg('get_dhcp_port',\n network_id=network_id,\n device_id=device_id,\n host=self.host),\n topic=self.topic))", "def network_create_end(self, payload):\n network_id = payload['network']['id']\n self.enable_dhcp_helper(network_id)", "def release_port_fixed_ip(self, network_id, device_id, subnet_id):\n return self.call(self.context,\n self.make_msg('release_port_fixed_ip',\n network_id=network_id,\n subnet_id=subnet_id,\n device_id=device_id,\n host=self.host),\n topic=self.topic)", "def setup_dhcp_config(self, board_config):\n raise NotImplementedError", "def disable_dhcp_helper(self, network_id):\n network = self.cache.get_network_by_id(network_id)\n if network:\n if self.call_driver('disable', network):\n self.cache.remove(network)", "def dhcp(self, dhcp):\n\n self._dhcp = dhcp", "def adb_down(self, port):\n self.adb_transport = None\n self.check_adb([\"disconnect\", \"localhost:%d\" % port])\n\n # Wait until QEMU's forward has expired\n CONNECT_MAX_TRIES = 15\n connect_tries = 0\n while True:\n try:\n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n sock.connect((\"localhost\", port))\n sock.close()\n connect_tries += 1\n if connect_tries >= CONNECT_MAX_TRIES:\n raise Timeout(\"Wait for port forward to go away\",\n CONNECT_MAX_TRIES)\n time.sleep(1)\n except IOError:\n break", "def port_nic_remove(switch, port):\n client.port.detach_nic(switch, port)", "def dhcp_free(self, dhcp_free):\n\n self._dhcp_free = dhcp_free", "def delete_port_postcommit(self, mech_context):\n\n LOG.debug(\"delete_port_postcommit: called\")\n port = mech_context.current\n port_id = port['id']\n network_id = port['network_id']\n tenant_id = port['tenant_id']\n host_id = mech_context._binding.host\n context = mech_context._plugin_context\n\n try:\n network = seamicro_db.get_network(context, network_id)\n except Exception:\n LOG.exception(\n _LE(\"SeaMicro Mechanism: failed to get network %s from db\"),\n network_id)\n raise Exception(\n _(\"SeaMicro Mechanism: failed to get network %s from db\"),\n network_id)\n\n vlan_id = network['vlan']\n\n switch_ip, server_id, nics = _get_switch_info(self._switch, host_id)\n if switch_ip is not None and server_id is not None and nics is not None:\n try:\n interfaces = self.client[switch_ip].interfaces.list()\n for interface in interfaces:\n interface.remove_tagged_vlan(vlan_id)\n\n server = self.client[switch_ip].servers.get(server_id)\n if nics:\n server.unset_tagged_vlan(vlan_id, nics=nics)\n else:\n server.unset_tagged_vlan(vlan_id)\n except seamicro_client_exception.ClientException as ex:\n LOG.exception(\n _LE(\"SeaMicro driver: failed to delete port\"\n \" with the following error: %(error)s\"),\n {'error': ex.message})\n raise Exception(\n _(\"SeaMicro Mechanism: delete_port_postcommit failed\"))\n\n LOG.info(\n _LI(\"delete port (postcommit): port_id=%(port_id)s\"\n \" network_id=%(network_id)s tenant_id=%(tenant_id)s\"\n \" switch_ip=%(switch_ip)s server_id=%(server_id)s\"),\n {'port_id': port_id,\n 'network_id': network_id, 'tenant_id': tenant_id,\n 'switch_ip': switch_ip, 'server_id': server_id})", "def dhcp_agent_network_remove(self, dhcp_net_info):\n self.turn_on_dhcp_check()", "def plug_port_into_network(self, device_id, host_id, port_id,\n net_id, tenant_id, port_name, device_owner,\n sg, orig_sg, vnic_type, segments=None,\n switch_bindings=None, vlan_type=None):", "def _unplug_interface(self, context, tenant_id, net_id, port_id):\n LOG.debug(_(\"QuantumRestProxyV2: _unplug_interface() called\"))\n\n # delete from network ctrl. Remote error on delete is ignored\n try:\n resource = ATTACHMENT_PATH % (tenant_id, net_id, port_id)\n ret = self.servers.delete(resource)\n if not self.servers.action_success(ret):\n raise RemoteRestError(ret[2])\n except RemoteRestError as e:\n LOG.error(_(\"QuantumRestProxyV2: Unable to update remote port: \"\n \"%s\"), e.message)", "def disconnect_port(self, iface):\n self.iface_config(iface, adminMode='Down')", "def network_update_end(self, payload):\n network_id = payload['network']['id']\n if payload['network']['admin_state_up']:\n self.enable_dhcp_helper(network_id)\n else:\n self.disable_dhcp_helper(network_id)", "def dhcp_callback(self, state, target_mac=None, target_ip=None, exception=None):\n self.record_result('dhcp', info=target_mac, ip=target_ip, state=state, exception=exception)\n self.target_mac = target_mac\n self.target_ip = target_ip\n if exception:\n self._state_transition(_STATE.ERROR, _STATE.DHCP)\n self.runner.target_set_error(self.port_set, exception)\n else:\n self._state_transition(_STATE.BASE, _STATE.DHCP)", "def handle_link_down (self, port):\n for dest in self.hosts.keys():\n currPort = self.hosts[dest][0]\n if currPort == port:\n del self.hosts[dest]\n \n deleteDests = set()\n for dest in self.routesToDest:\n currPort = self.routesToDest[dest][0]\n \n if currPort == port:\n\n if dest in self.hosts:\n self.routesToDest[dest] = self.hosts[dest]\n packet = basics.RoutePacket(dest, self.routesToDest[dest][1])\n self.send(packet, self.routesToDest[dest][0], True)\n else:\n self.sendPoison(dest)\n deleteDests.add(dest)\n\n\n for dest in deleteDests:\n del self.routesToDest[dest]\n\n del self.neighbours[port]", "def _close(self):\n \n # Close device\n logger.debug(\"%s: UDP port closing started...\" % \\\n self.__class__.__name__)\n self._router = None\n self._platform = None\n self._udp_socket.close()\n logger.debug(\"%s: ...UDP port closing complete.\" % \\\n self.__class__.__name__)", "def exit(self):\n if self._dbus_iface is None:\n raise Exception('Method invoked on non existing D-Bus interface')\n self._dbus_iface.Release(reply_handler = self._exitUnlock, error_handler = self._exitUnlock) # Call Exit() but ignore whether it gets acknowledged or not... this is because slave process may terminate before even acknowledge\n self._exit_unlock_event.wait(timeout = 5) # Give 5s for slave to acknowledge the Exit() D-Bus method call... otherwise, ignore and continue\n # Once we have instructed the slave to send a Release, we can stop our own D-Bus loop (we won't communicate with the slave anymore)\n # Stop the dbus loop\n if not self._dbus_loop is None:\n self._dbus_loop.quit()\n \n self._dbus_loop = None\n \n logger.debug('Sending Exit() to remote DHCP client')\n self._exit_unlock_event.clear()", "def setup_dhcp_env(device):\n raise NotImplementedError", "def unplug(self, bridge):\n ovsdb = self.bridge.ovsdb\n with ovsdb.transaction() as txn:\n txn.add(ovsdb.del_port(self.patch_port_int_name,\n bridge.br_name))", "def create_port(self, context, port):\n LOG.debug(_(\"NeutronRestProxyV2: create_port() called\"))\n\n # Update DB in new session so exceptions rollback changes\n with context.session.begin(subtransactions=True):\n port[\"port\"][\"admin_state_up\"] = False\n dhcp_opts = port['port'].get(edo_ext.EXTRADHCPOPTS, [])\n new_port = super(NeutronRestProxyV2, self).create_port(context,\n port)\n if (portbindings.HOST_ID in port['port']\n and 'id' in new_port):\n host_id = port['port'][portbindings.HOST_ID]\n porttracker_db.put_port_hostid(context, new_port['id'],\n host_id)\n self._process_port_create_extra_dhcp_opts(context, new_port,\n dhcp_opts)\n new_port = self._extend_port_dict_binding(context, new_port)\n net = super(NeutronRestProxyV2,\n self).get_network(context, new_port[\"network_id\"])\n\n if self.add_meta_server_route:\n if new_port['device_owner'] == 'network:dhcp':\n destination = METADATA_SERVER_IP + '/32'\n self._add_host_route(context, destination, new_port)\n\n # create on network ctrl\n mapped_port = self._map_state_and_status(new_port)\n self.servers.rest_create_port(net, mapped_port)\n\n # connect device to network, if present\n device_id = port[\"port\"].get(\"device_id\")\n if device_id:\n try:\n self.servers.rest_plug_interface(net[\"tenant_id\"], net[\"id\"],\n new_port, device_id)\n except RemoteRestError:\n with excutils.save_and_reraise_exception():\n port_update = {\"port\": {\"status\": \"ERROR\"}}\n super(NeutronRestProxyV2, self).update_port(\n context,\n new_port[\"id\"],\n port_update\n )\n # Set port state up and return that port\n port_update = {\"port\": {\"admin_state_up\": True}}\n new_port = super(NeutronRestProxyV2, self).update_port(context,\n new_port[\"id\"],\n port_update)\n return self._extend_port_dict_binding(context, new_port)", "def _RunDHCPCD(self, **kwargs):\n del kwargs\n clear_ifconfig_command = 'ifconfig {interface} 0.0.0.0'.format(\n interface=self.interface)\n # -K: Don't receive link messages for carrier status. You should\n # only have to use this with buggy device drivers or running\n # dhcpcd through a network manager.\n # -c: Location to the hooks file. If the default location happens to be\n # empty, dhcpcd will fail. So we set the hooks file to /dev/null.\n dhcp_command = ('dhcpcd -K -t {timeout} -c /dev/null {interface}').format(\n timeout=self._dhcp_timeout,\n interface=self.interface)\n dhcp_timeout_command = 'timeout {timeout} {cmd}'.format(\n timeout=self._dhcp_timeout,\n cmd=dhcp_command)\n force_kill_command = 'pgrep dhcpcd | xargs -r kill -9'\n\n logging.info('Killing any existing dhcpcd processes...')\n self._device.Call(force_kill_command)\n\n logging.info('Clearing any existing ifconfig networks...')\n self._device.Call(clear_ifconfig_command)\n\n logging.info('Starting dhcpcd...')\n self._device.CheckCall(dhcp_timeout_command)\n\n logging.info('Verifying IP address...')\n ip = self._LeasedIP()\n if not ip:\n self._device.Call(force_kill_command)\n raise WiFiError('DHCP bind failed')\n logging.info('Success: bound to IP %s', ip)\n\n yield ip # We have bound an IP; yield back to the caller.\n\n logging.info('Killing any remaining dhcpcd processes...')\n self._device.Call(force_kill_command)\n\n yield # We have released the IP.", "def renew_dhcp_lease(self):\n\t\tresponse = os.system(\"/sbin/dhclient -r;/sbin/dhclient\")\n\t\tif response != 0:\n\t\t\tprint \"Network restart failed. DHCP Lease failed.\"", "def del_connection(self, switch_name, port1, port2, bidir=False):\n raise NotImplementedError()", "def disconnect(self, mac):\n self._servicer.process_port_assign(mac, None)", "def Disconnect(self):\n if not self._auth_process or not self._dhcp_process:\n raise WiFiError('Must connect before disconnecting')\n\n self.ip = None\n dhcp_process, self._dhcp_process = self._dhcp_process, None\n auth_process, self._auth_process = self._auth_process, None\n next(dhcp_process)\n next(auth_process)\n\n # Remove temporary directory.\n if not self._user_tmp_dir:\n self._tmp_dir_handle.__exit__(None, None, None)\n self._tmp_dir = None", "def unplug(self, bridge):\n ovsdb = self.bridge.ovsdb\n with ovsdb.transaction() as txn:\n if self.bridge.exists():\n txn.add(ovsdb.del_port(self.patch_port_trunk_name,\n self.bridge.br_name))\n txn.add(ovsdb.del_port(self.patch_port_int_name,\n bridge.br_name))", "def teardown_logical_port_connectivity(self, context, port_db):\n pass", "def detach_from_server(openstack_resource, device_id):\n port_id = openstack_resource.resource_id\n # Check if the port is provided or not\n if not device_id:\n raise NonRecoverableError(\n 'Unable to attach port to device {0},'\n ' `device_id` is missing'.format(\n device_id)\n )\n # Unlink port connection from server\n # No need to detach floating ip from the port because when delete port\n # with floating ip assigned to port it can removed without any issue\n _update_port_association(openstack_resource.client_config,\n port_id)", "def test_add_remove_network_from_dhcp_agent(self):\n # The agent is now bound to the network, we can free the port\n self.ports_client.delete_port(self.port['id'])\n agent = dict()\n agent['agent_type'] = None\n body = self.admin_agents_client.list_agents()\n agents = body['agents']\n for a in agents:\n if a['agent_type'] == 'DHCP agent':\n agent = a\n break\n self.assertEqual(agent['agent_type'], 'DHCP agent', 'Could not find '\n 'DHCP agent in agent list though dhcp_agent_scheduler'\n ' is enabled.')\n network = self.create_network()\n network_id = network['id']\n if self._check_network_in_dhcp_agent(network_id, agent):\n self._remove_network_from_dhcp_agent(network_id, agent)\n self._add_dhcp_agent_to_network(network_id, agent)\n else:\n self._add_dhcp_agent_to_network(network_id, agent)\n self._remove_network_from_dhcp_agent(network_id, agent)", "def launch (no_flow = False,\n network = \"192.168.0.0/24\", # Address range\n first = 1, last = None, count = None, # Address range\n ip = \"192.168.0.254\",\n router = (), # Auto\n dns = (), # Auto\n dpid = None, # All\n ports = None, # All\n __INSTANCE__ = None):\n def fixint (i):\n i = str(i)\n if i.lower() == \"none\": return None\n if i.lower() == \"true\": return None\n return int(i)\n def fix (i):\n i = str(i)\n if i.lower() == \"none\": return None\n if i.lower() == \"true\": return None\n if i == '()': return ()\n return i\n first,last,count = map(fixint,(first,last,count))\n router,dns = map(fix,(router,dns))\n\n if ports is not None:\n ports = ports.split(\",\")\n ports = set(int(p) if p.isdigit() else p for p in ports)\n\n pool = SimpleAddressPool(network = network, first = first, last = last,\n count = count)\n\n inst = DHCPD(install_flow = not no_flow, pool = pool,\n ip_address = ip, router_address = router,\n dns_address = dns, dpid = dpid, ports = ports)\n\n if __INSTANCE__[0] == 0:\n # First or only instance\n core.register(inst)\n\n log.debug(\"DHCP serving a%s\", str(pool)[2:-1])", "def detach(openstack_resource, port_id):\n device_id = openstack_resource.resource_id\n # Check if the port is provided or not\n if not port_id:\n raise NonRecoverableError(\n 'Unable to attach port to device {0},'\n ' `port_id` is missing'.format(\n device_id)\n )\n # Unlink port connection from server\n # No need to detach floating ip from the port because when delete port\n # with floating ip assigned to port it can removed without any issue\n _update_port_association(openstack_resource.client_config,\n port_id)", "def port_delete(switch, port):\n client.port.delete(switch, port)", "def delete_port_mac(self, context, port):\n self._get_driver_for_provider(constants.l2gw\n ).delete_port_mac(context, port)", "def stop_dhcp_response(self, mac):\n self.change_dhcp_response_time(mac, -1)", "def _delete_internal_port(self, port_id):\n with self.client_plugin('neutron').ignore_not_found:\n self.client('neutron').delete_port(port_id)\n\n self._data_update_ports(port_id, 'delete')", "def test_dhcp_bind_uninstall(self):\n self._common_uninstall_delete(\n 'esg_id|bind_id', dhcp_bind.delete,\n {'bind': {}},\n delete_args=['dhcpStaticBindingID'],\n delete_kwargs={\n 'uri_parameters': {'edgeId': 'esg_id', 'bindingID': 'bind_id'}\n }\n )", "def _get_net_dhcp_relay(self, context, net_id):\n pass", "def del_port(bridge, port):\n _run('ovs-vsctl', 'del-port', bridge, port)", "def stop(self):\n\n if not self._dhcp_client_ctrl is None:\n self._dhcp_client_ctrl.exit()\n if not self._slave_dhcp_process is None:\n self._slave_dhcp_process.kill()\n logger.debug('DHCP client stopped on ' + self._ifname)\n \n self._new_lease_event.clear()\n self._dhcp_client_ctrl = None # Destroy the control object\n self._slave_dhcp_process = None # Destroy the slave DHCP object", "def dhcp_renew(ifname):\n\n logging.debug('Renewing %s DHCP lease...', ifname)\n\n try:\n subprocess.call(['dhcpcd', '--rebind', ifname])\n except OSError, err:\n if err.errno != errno.ENOENT:\n raise err\n\n try:\n call_timeout(['dhclient', '-1', ifname], timeout=5)\n except OSError, err:\n if err.errno == errno.ENOENT:\n logging.critical('Neither dhcpcd nor dhclient were found.')\n else:\n raise err", "def change_port( self ):\n # disconnect and delete controller\n self.delete_controller()\n \n # update port\n self.update_port()", "def ReleasePort(self, *args, **kwargs):\n # type: (*Any, **Any) -> None\n payload = { \"Arg1\": self }\n for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]\n for item in kwargs.items(): payload[item[0]] = item[1]\n return self._execute('releasePort', payload=payload, response_object=None)", "def _close(self):\n \n # Close device\n logger.debug(\"%s: UDP port closing started...\" % \\\n self.__class__.__name__)\n self._udp_socket.close()\n self._socket = None\n logger.debug(\"%s: ...UDP port closing complete.\" % \\\n self.__class__.__name__)", "def allocate_hosting_port(self, context, router_id, port_db, network_type,\n hosting_device_id):\n pass", "def close(self):\n self.port.send_command(\"atz\")\n self.port.close()\n self.port = None", "def answerDHCP(self, shouldAnswer):\n assert False, \"Deriving class must implement\"", "def elAddNetworkConfigurationWithDhcp(self, device):\n commandSection = self.sectionByName(\"command\")\n # see http://docs.redhat.com/docs/en-US/Red_Hat_Enterprise_Linux/6/html/Installation_Guide/s1-kickstart2-options.html\n deviceMatch = re.match(r\"([^0-9]+)([0-9])\", device)\n if deviceMatch:\n # e.g. \"eth0\"\n devicePrefix = deviceMatch.group(1)\n deviceNumber = deviceMatch.group(2)\n deviceNumber = int(deviceNumber)\n for i in range(8, deviceNumber - 1, -1):\n deviceI = devicePrefix + str(i)\n deviceIPlus1 = devicePrefix + str(i + 1)\n # move up by one device each network configuration\n commandSection.string = re.sub(r\"(?m)^([ \\t]*network[ \\t]+.*--device[ \\t]*(?:=|[ \\t])[ \\t]*)\" + re.escape(deviceI) + r\"(.*)$\",\n r\"\\g<1>\" + deviceIPlus1 + r\"\\g<2>\",\n commandSection.string)\n # not --noipv6\n networkConfiguration = \"network --device=\" + device + \" --bootproto=dhcp --onboot=yes --activate\"\n if deviceMatch and deviceNumber == 0:\n # having configuration of eth0 first appears to be more conducive to overall success,\n # and also, per http://fedoraproject.org/wiki/Anaconda/Kickstart#network, supposedly\n # \"... in installer environment. Device of the first network command is activated if network is required,\n # e.g. in case of network installation ...\",\n commandSection.string = networkConfiguration + \"\\n\" \\\n + \"#\\n\" \\\n + commandSection.string\n else:\n commandSection.string = commandSection.string \\\n + \"#\\n\" \\\n + networkConfiguration + \"\\n\"", "def create_dhcp_relay(self, iface_name='global', server_ip=None, fwd_iface_name=None):\n pass", "def del_host(self, ipv4, rem_dpid, rem_port):\n assert(ipv4 is not None)\n assert(rem_dpid is not None)\n assert(rem_port is not None)\n LOG.info(\"Try to del host=%s -> (%s:%d)\" % (ipv4, rem_dpid, rem_port))\n\n ip_ = convert_ipv4_to_int(ipv4)\n self.del_link(ip_, 0, rem_dpid, rem_port)\n self.del_link(rem_dpid, rem_port, ip_, 0)\n self.del_node(ip_)", "def get_device_id(self, network):\n # There could be more than one dhcp server per network, so create\n # a device id that combines host and network ids\n\n host_uuid = uuid.uuid5(uuid.NAMESPACE_DNS, socket.gethostname())\n return 'dhcp%s-%s' % (host_uuid, network.id)", "def cleanup_dpdk_framework(node, if1, if2):\n if node[u\"type\"] == NodeType.DUT:\n pci_address1 = Topology.get_interface_pci_addr(node, if1)\n pci_address2 = Topology.get_interface_pci_addr(node, if2)\n # We are not supporting more than one driver yet.\n nic_driver = Topology.get_interface_driver(node, if1)\n\n command = f\"{Constants.REMOTE_FW_DIR}/{Constants.RESOURCES_LIB_SH}\"\\\n f\"/entry/cleanup_dpdk.sh \" \\\n f\"{nic_driver} {pci_address1} {pci_address2}\"\n message = u\"Cleanup the DPDK failed!\"\n exec_cmd_no_error(node, command, timeout=1200, message=message)", "def test_delete_network_from_dhcp_agent(self):\n network_id = self._create_and_prepare_network_for_agent(\n self.agent['id'])\n self.agents_client.add_dhcp_agent_to_network(\n self.agent['id'], network_id=network_id)\n # Clean up is not necessary and might result in 409 being raised.\n\n with self.override_role():\n self.agents_client.delete_network_from_dhcp_agent(\n self.agent['id'], network_id=network_id)", "def port_delete_end(self, payload):\n port = self.cache.get_port_by_id(payload['port_id'])\n if port:\n network = self.cache.get_network_by_id(port.network_id)\n self.cache.remove_port(port)\n self.call_driver('reload_allocations', network)", "def delete_port(self, port):\n try:\n self.client.delete_nic(port.vm.backend_id, port.backend_id)\n except VMwareError as e:\n raise VMwareBackendError(e)", "def _RunDHCPClient(self, dhclient_script_path=None, **kwargs):\n del kwargs\n PID_FILE = os.path.join(self._tmp_dir, 'dhclient.pid')\n clear_ifconfig_command = 'ifconfig {interface} 0.0.0.0'.format(\n interface=self.interface)\n dhcp_command = ('echo \"\" | ' # dhclient expects STDIN for some reason\n 'dhclient -4 ' # only run on IPv4\n '-nw ' # immediately daemonize\n '-pf {pid_file} '\n '-sf {dhclient_script} '\n '-lf /dev/null ' # don't keep a leases file\n '-v {interface}'.format(\n pid_file=PID_FILE,\n dhclient_script=dhclient_script_path,\n interface=self.interface))\n kill_command = 'cat {pid_file} | xargs -r kill; rm {pid_file}'.format(\n pid_file=PID_FILE)\n force_kill_command = 'pgrep dhclient | xargs -r kill -9'\n\n logging.info('Killing any existing dhclient processes...')\n self._device.Call(force_kill_command)\n\n logging.info('Clearing any existing ifconfig networks...')\n self._device.Call(clear_ifconfig_command)\n\n logging.info('Starting dhclient...')\n self._device.CheckCall(dhcp_command)\n\n logging.info('Waiting to lease an IP...')\n ip = sync_utils.WaitFor(self._LeasedIP, self._dhcp_timeout)\n if not ip:\n self._device.Call(kill_command)\n raise WiFiError('DHCP bind failed')\n logging.info('Success: bound to IP %s', ip)\n\n yield ip # We have bound an IP; yield back to the caller.\n\n logging.info('Stopping dhclient...')\n self._device.Call(kill_command)\n self._device.Call(force_kill_command)\n self._device.Call(clear_ifconfig_command)\n\n yield # We have released the IP.", "def remote_destroyTunnel(self, name, targetIP):\r\n if name not in self._bridges:\r\n raise InternalError('Bridge does not exist.')\r\n\r\n key = (name, targetIP)\r\n\r\n if key not in self._uid:\r\n raise InternalError('Tunnel deos not exist.')\r\n\r\n return execute(('/usr/bin/ovs-vsctl', 'del-port',\r\n 'gre-{0}'.format(self._uid.pop(key))),\r\n reactor=self._reactor)", "def delete_host(self, conf, tenant_id, network_id, host_id):\n\t\tpass", "def run(self, network_create_args=None,\n port_create_args=None, ports_per_network=1):\n network = self._get_or_create_network(**(network_create_args or {}))\n for i in range(ports_per_network):\n port = self.neutron.create_port(\n network[\"id\"], **(port_create_args or {}))\n\n self.neutron.delete_port(port[\"id\"])", "def _stop(self):\n\n if self._daemon_id:\n pyro_proxy_name = 'PySwitchLib.' + self._daemon_id\n uri = None\n\n try:\n with Pyro4.locateNS(host='localhost', port=self._pyro_ns_port) as ns:\n try:\n uri = ns.lookup(pyro_proxy_name)\n except:\n pass\n\n if uri:\n ns.remove(pyro_proxy_name)\n except:\n pass\n finally:\n ns_daemon_dict = ConfigFileUtil().read(filename=pyswitchlib_ns_daemon_file)\n\n if self._daemon_id in ns_daemon_dict:\n uri = ns_daemon_dict[self._daemon_id]\n del ns_daemon_dict[self._daemon_id]\n\n if len(ns_daemon_dict):\n ConfigFileUtil().write(filename=pyswitchlib_ns_daemon_file, conf_dict=ns_daemon_dict, do_merge=False)\n else:\n try:\n os.unlink(pyswitchlib_ns_daemon_file)\n except:\n pass\n\n if uri:\n try:\n with Pyro4.Proxy(uri) as pyro_proxy:\n pyro_proxy.shutdown()\n pyro_proxy._pyroRelease()\n except:\n pass\n\n super(PySwitchLibApiDaemonRunner, self)._stop()", "def _delete_router_port(self, method, api, header, data):\n self._execute_api(method, api, header, data)", "def enable_dhcp_helper(self, network_id):\n try:\n network = self.plugin_rpc.get_network_info(network_id)\n except:\n self.needs_resync = True\n LOG.exception(_('Network %s RPC info call failed.') % network_id)\n return\n\n if not network.admin_state_up:\n return\n\n for subnet in network.subnets:\n if subnet.enable_dhcp:\n if self.call_driver('enable', network):\n self.cache.put(network)\n break", "def create_port(self, context, port):\n LOG.debug(_(\"QuantumRestProxyV2: create_port() called\"))\n\n # Update DB\n port[\"port\"][\"admin_state_up\"] = False\n new_port = super(QuantumRestProxyV2, self).create_port(context, port)\n net = super(QuantumRestProxyV2,\n self).get_network(context, new_port[\"network_id\"])\n\n # create on networl ctrl\n try:\n resource = PORT_RESOURCE_PATH % (net[\"tenant_id\"], net[\"id\"])\n data = {\n \"port\": {\n \"id\": new_port[\"id\"],\n \"state\": \"ACTIVE\",\n }\n }\n ret = self.servers.post(resource, data)\n if not self.servers.action_success(ret):\n raise RemoteRestError(ret[2])\n\n # connect device to network, if present\n if port[\"port\"].get(\"device_id\"):\n self._plug_interface(context,\n net[\"tenant_id\"], net[\"id\"],\n new_port[\"id\"], new_port[\"id\"] + \"00\")\n except RemoteRestError as e:\n LOG.error(_(\"QuantumRestProxyV2: Unable to create remote port: \"\n \"%s\"), e.message)\n super(QuantumRestProxyV2, self).delete_port(context,\n new_port[\"id\"])\n raise\n\n # Set port state up and return that port\n port_update = {\"port\": {\"admin_state_up\": True}}\n return super(QuantumRestProxyV2, self).update_port(context,\n new_port[\"id\"],\n port_update)", "def refresh_dhcp_helper(self, network_id):\n old_network = self.cache.get_network_by_id(network_id)\n if not old_network:\n # DHCP current not running for network.\n return self.enable_dhcp_helper(network_id)\n\n try:\n network = self.plugin_rpc.get_network_info(network_id)\n except:\n self.needs_resync = True\n LOG.exception(_('Network %s RPC info call failed.') % network_id)\n return\n\n old_cidrs = set(s.cidr for s in old_network.subnets if s.enable_dhcp)\n new_cidrs = set(s.cidr for s in network.subnets if s.enable_dhcp)\n\n if new_cidrs and old_cidrs == new_cidrs:\n self.call_driver('reload_allocations', network)\n self.cache.put(network)\n elif new_cidrs:\n if self.call_driver('restart', network):\n self.cache.put(network)\n else:\n self.disable_dhcp_helper(network.id)", "def handle_link_down(self, port):\n # Update the latency for this port to something greater than INFINITY\n self.ports_to_latencies[port] = INFINITY + 1\n\n for dest in self.hosts_to_unused_ports:\n\n #Determine the prior distance\n\n prior_distance = INFINITY\n if dest in self.hosts_to_unused_ports:\n prior_distance = self.hosts_to_ports[dest].latency\n\n #Take out all the entries that have the same port as the one being passed in\n self.hosts_to_unused_ports[dest] = [host for host in self.hosts_to_unused_ports[dest] if host.port != port] \n self.hosts_to_ports[dest] = self.find_minium_latency_unused_ports(self.hosts_to_unused_ports[dest])\n\n if prior_distance != self.get_latency(dest):\n # if self.get_latency(dest) < INFINITY:\n\n distance_vector = self.hosts_to_ports[dest]\n\n # Send normal route packet\n packet = basics.RoutePacket(dest, distance_vector.latency)\n self.send(packet, port)\n\n # Send poison packet if POISON_MODE is true\n if self.POISON_MODE == True:\n poison_packet = basics.RoutePacket(dest, INFINITY)\n self.send(poison_packet, port) \n\n # elif self.POISON_MODE == True:\n # self.advertise_route_to_neighbors(destination)", "def dhcp_used(self, dhcp_used):\n\n self._dhcp_used = dhcp_used", "def _closeLink(self, element):\n to_connect = []\n dynamic = False\n templates = element.get_pad_template_list()\n for template in templates:\n if not template.direction == gst.PAD_SRC:\n continue\n if template.presence == gst.PAD_ALWAYS:\n pad = element.get_pad(template.name_template)\n to_connect.append(pad)\n elif template.presence == gst.PAD_SOMETIMES:\n pad = element.get_pad(template.name_template)\n if pad:\n to_connect.append(pad)\n else:\n dynamic = True\n else:\n self.log(\"Template %s is a request pad, ignoring\" % pad.name_template)\n\n if dynamic:\n self.debug(\"%s is a dynamic element\" % element.get_name())\n self._controlDynamicElement(element)\n\n for pad in to_connect:\n self._closePadLink(element, pad, pad.get_caps())", "def one_stack_port_down(self, dpid, dp_name, port):\n self.set_port_down(port, dpid, wait=False)\n self.wait_for_stack_port_status(dpid, dp_name, port, 4)", "def port_nic():", "def run(self):\n self.network_ctrl.connect_with_remote_system()\n cmd = self.create_command(self.on_or_off, self.port)\n self.network_ctrl.send_command(cmd)\n\n check = self._port_status(self.port)\n result = self.network_ctrl.send_command(check)\n result = result[0]\n if self.on_or_off:\n if result == \"1\":\n self.router.mode = Mode.normal\n logging.info(\"[+] Successfully switched on port \" + str(self.port))\n else:\n self.router.mode = Mode.unknown\n logging.info(\"[-] Error switching on port \" + str(self.port))\n else:\n if result == \"0\":\n self.router.mode = Mode.off\n logging.info(\"[+] Successfully switched off port \" + str(self.port))\n else:\n self.router.mode = Mode.unknown\n logging.info(\"[-] Error switching off port \" + str(self.port))\n\n self.network_ctrl.exit()", "def test_dhcp_pool_uninstall(self):\n self._common_uninstall_delete(\n 'esg_id|pool_id', dhcp_pool.delete,\n {'pool': {}},\n delete_args=['dhcpPoolID'],\n delete_kwargs={\n 'uri_parameters': {'edgeId': 'esg_id', 'poolID': 'pool_id'}\n }\n )", "def remove_network_adapter(self, network_obj):\n\n nic_spec = vim.vm.device.VirtualDeviceSpec()\n nic_spec.operation = vim.vm.device.VirtualDeviceSpec.Operation.remove\n nic_spec.device = network_obj\n config_spec = vim.vm.ConfigSpec()\n config_spec.deviceChange = [nic_spec]\n return self.vm_obj.ReconfigVM_Task(spec=config_spec)", "def setup_dhcp6_config(self, board_config):\n raise NotImplementedError", "def terminate(self):\n set_sysctl(self, 'net.ipv4.ip_forward', 0)\n set_sysctl(self, 'net.ipv6.conf.all.forwarding', 0)\n super(LinuxRouter, self).terminate()", "def configureDHCP():\n dhcpStart = config.get(\"hotspot\", \"dhcpstart\")\n dhcpEnd = config.get(\"hotspot\", \"dhcpend\")\n dnsmasqConfig = f\"\"\"#PI Hotspot config\ndomain-needed\nbogus-priv\ndhcp-option=option:dns-server\ndhcp-authoritative\ndhcp-range={dhcpStart},{dhcpEnd},1h\n\"\"\"\n confFile = open(\"/etc/dnsmasq.conf\", \"w\")\n confFile.write(dnsmasqConfig)\n confFile.close()", "def cleanup(self):\n byteblower_instance = ByteBlower.InstanceGet()\n if self.port_1:\n self.server.PortDestroy(self.port_1)\n self.port_1 = None\n\n if self.port_2:\n self.server.PortDestroy(self.port_2)\n self.port_2 = None\n\n if self.server is not None:\n byteblower_instance.ServerRemove(self.server)\n self.server = None", "def test_port_update_is_host_aware(self):\n with self.network() as network:\n segment = self._test_create_segment(\n network_id=network['network']['id'],\n physical_network='physnet',\n network_type=constants.TYPE_VLAN)\n\n # Map the host to the segment\n self._setup_host_mappings([(segment['segment']['id'], 'fakehost')])\n\n # Create a bound port with no IP address (since there is no subnet)\n response = self._create_port(self.fmt,\n net_id=network['network']['id'],\n tenant_id=network['network']['tenant_id'],\n is_admin=True,\n arg_list=(portbindings.HOST_ID,),\n **{portbindings.HOST_ID: 'fakehost'})\n port = self.deserialize(self.fmt, response)\n\n # Create the subnet and try to update the port to get an IP\n with self.subnet(network=network,\n segment_id=segment['segment']['id']) as subnet:\n self._validate_l2_adjacency(network['network']['id'],\n is_adjacent=False)\n # Try requesting an IP (but the only subnet is on a segment)\n data = {'port': {\n 'fixed_ips': [{'subnet_id': subnet['subnet']['id']}]}}\n port_id = port['port']['id']\n port_req = self.new_update_request('ports', data, port_id)\n response = port_req.get_response(self.api)\n\n # Since port is bound and there is a mapping to segment, it succeeds.\n self.assertEqual(webob.exc.HTTPOk.code, response.status_int)\n self._assert_one_ip_in_subnet(response, subnet['subnet']['cidr'])", "def _delete_current_gw_port(self, context, router_id, router, new_network):\n LOG.debug(\"OVNL3RouterPlugin::_delete_current_gw_port\")\n #check if port requires delete or not.\n port_requires_deletion = (\n router.gw_port and\n (router.gw_port['network_id'] != new_network)\n )\n if not port_requires_deletion:\n return\n # delete gw_port and db.\n gw_port_id = router.gw_port['id']\n provnet_gw_ip = self.get_subnet_gateway_ips(context, router.gw_port['fixed_ips'])[0]\n super(OVNL3RouterPlugin, self)._delete_current_gw_port(context, router_id, router, new_network)\n # delete gw router and transit network resources.\n self.delete_gw_router_port_in_ovn(router_id, gw_port_id, provnet_gw_ip)", "def test_create_ports_in_vsd_managed_l2domain_dhcp_unmanaged_neg(self):\n # Given I have a VSD-L2-Unmanaged subnet\n vsd_l2_domain_template, vsd_l2_domain = \\\n self._given_vsd_l2_dhcp_disabled_domain()\n\n # create Openstack IPv4 subnet on Openstack based on VSD l2domain\n net_name = data_utils.rand_name('network-')\n network = self.create_network(network_name=net_name)\n ipv4_subnet = self.create_subnet(\n network,\n cidr=self.cidr4, mask_bits=self.mask_bits4_unsliced,\n gateway=None, enable_dhcp=False,\n nuagenet=vsd_l2_domain.id, net_partition=Topology.def_netpartition)\n self.assertEqual(\n str(next(IPNetwork(self.cidr4).subnet(self.mask_bits4_unsliced))),\n ipv4_subnet['cidr'])\n\n # shall not create a port with fixed-ip IPv6 in ipv4 subnet\n port_args = {'fixed_ips':\n [{'subnet_id': ipv4_subnet['id'],\n 'ip_address': IPAddress(self.cidr6.first + 21)}]}\n self.assertRaisesRegex(\n tempest_exceptions.BadRequest,\n \"IP address %s is not a valid IP for the specified subnet\" %\n (IPAddress(self.cidr6.first + 21)),\n self.create_port,\n network,\n **port_args)\n\n # create Openstack IPv6 subnet\n ipv6_subnet = self.create_subnet(\n network,\n ip_version=6,\n cidr=self.cidr6, mask_bits=self.cidr6.prefixlen,\n gateway=vsd_l2_domain_template.ipv6_gateway, enable_dhcp=False,\n nuagenet=vsd_l2_domain.id, net_partition=Topology.def_netpartition)\n\n # shall not create port with IP already in use\n port_args = {'fixed_ips': [{'subnet_id': ipv4_subnet['id'],\n 'ip_address':\n IPAddress(self.cidr4.first + 10)},\n {'subnet_id': ipv6_subnet['id'],\n 'ip_address':\n IPAddress(self.cidr6.first + 10)}]}\n\n valid_port = self.create_port(network, **port_args)\n self.assertIsNotNone(valid_port)\n\n port_args = {'fixed_ips': [{'subnet_id': ipv4_subnet['id'],\n 'ip_address': IPAddress(\n self.cidr4.first + 11)},\n {'subnet_id': ipv6_subnet['id'],\n 'ip_address': IPAddress(\n self.cidr6.first + 10)}]}\n\n self.assertRaisesRegex(\n tempest_exceptions.Conflict,\n 'IP address {} already allocated in '\n 'subnet {}'.format(IPAddress(self.cidr6.first + 10),\n ipv6_subnet['id']),\n self.create_port,\n network,\n **port_args)\n\n # shall not create port with fixed ip in outside cidr\n port_args = {'fixed_ips': [{'subnet_id': ipv4_subnet['id'],\n 'ip_address': IPAddress(\n self.cidr4.first + 12)},\n {'subnet_id': ipv6_subnet['id'],\n 'ip_address': IPAddress(\n self.cidr6.first - 20)}]}\n self.assertRaisesRegex(\n tempest_exceptions.BadRequest,\n \"IP address %s is not a valid IP for the specified subnet\" %\n (IPAddress(self.cidr6.first - 20)),\n self.create_port,\n network,\n **port_args)", "def change_adp(self, network: str):\r\n self.ip = network\r\n self.adp = self.ipv4_adp[network]\r\n self.mac = self.ipv4_mac[network].replace('-', ':')\r\n # print(self.adp, self.ip, self.mac)\r", "def teardown_test(self):\n self.log.info('Tearing down the test case')\n self.iperf_server.stop()\n self.access_point.bridge.teardown(self.brconfigs)\n self.access_point.close()\n wputils.reset_host_interface(self.pkt_sender.interface)\n self.mon.usb('on')", "def update_cmts_isc_dhcp_config(self, board_config):\n self.setup_dhcp_config(board_config)\n self.setup_dhcp6_config(board_config)\n raise NotImplementedError", "def stop(self):\n # remove all tap interfaces\n for i in range(self._vport_id):\n tapx = 'tap' + str(i)\n tap_cmd_list = ['sudo', 'ip', 'tuntap', 'del', tapx, 'mode', 'tap']\n # let's assume, that all VMs have NIC QUEUES enabled or disabled\n # at the same time\n if int(settings.getValue('GUEST_NIC_QUEUES')[0]):\n tap_cmd_list += ['multi_queue']\n tasks.run_task(tap_cmd_list, self._logger, 'Deleting ' + tapx, False)\n self._vport_id = 0\n\n # remove datapath before vswitch shutdown\n dpctl = DPCtl()\n dpctl.del_dp()\n\n super(OvsVanilla, self).stop()\n\n # give vswitch time to terminate before modules are removed\n time.sleep(5)\n self._module_manager.remove_modules()", "def execute_closure_methods():\n user_account = get_user_account_data()\n # PORT FORWARDING\n if user_account and user_account.upnp():\n # Account exists and UPnP was approved\n delete_port_mapping()", "def _unset_dns_mitm(self, device_name: str) -> None:\n device = getattr(self.dev_mgr, device_name)\n host = f\"{device_name}.boardfarm.com\"\n if device:\n self.dev_mgr.wan.modify_dns_hosts(\n {host: device.dns.dnsv4[host] + device.dns.dnsv6[host]}\n )\n self.mitm_dns_active.remove(device_name)\n else:\n logger.warning(f\"Device {device_name} is not found in device manager.\")", "def delete(clients, context):\n port_id = context['port_id']\n logger.info(\"Taking action port.delete {}\".format(port_id))\n neutron = clients.get_neutron()\n neutron.delete_port(port_id)", "def delete_ports(module, system):\n changed = False\n\n host = system.hosts.get(name=module.params['host'])\n for wwn_port in module.params['wwns']:\n wwn = WWN(wwn_port)\n if system.hosts.get_host_by_initiator_address(wwn) == host:\n if not module.check_mode:\n host.remove_port(wwn)\n changed = True\n for iscsi_port in module.params['iqns']:\n iscsi_name = make_iscsi_name(iscsi_port)\n if system.hosts.get_host_by_initiator_address(iscsi_name) == host:\n if not module.check_mode:\n host.remove_port(iscsi_name)\n changed = True\n return changed", "def update_port(self, context, port_id, port):\n LOG.debug(_(\"NeutronRestProxyV2: update_port() called\"))\n\n self._warn_on_state_status(port['port'])\n\n # Validate Args\n orig_port = super(NeutronRestProxyV2, self).get_port(context, port_id)\n with context.session.begin(subtransactions=True):\n # Update DB\n new_port = super(NeutronRestProxyV2,\n self).update_port(context, port_id, port)\n self._update_extra_dhcp_opts_on_port(context, port_id, port,\n new_port)\n if (portbindings.HOST_ID in port['port']\n and 'id' in new_port):\n host_id = port['port'][portbindings.HOST_ID]\n porttracker_db.put_port_hostid(context, new_port['id'],\n host_id)\n new_port = self._extend_port_dict_binding(context, new_port)\n\n # update on networl ctrl\n mapped_port = self._map_state_and_status(new_port)\n self.servers.rest_update_port(orig_port[\"tenant_id\"],\n orig_port[\"network_id\"],\n mapped_port, port_id)\n\n if (new_port.get(\"device_id\") != orig_port.get(\"device_id\") and\n orig_port.get(\"device_id\")):\n try:\n self.servers.rest_unplug_interface(orig_port[\"tenant_id\"],\n orig_port[\"network_id\"],\n orig_port[\"id\"])\n device_id = new_port.get(\"device_id\")\n if device_id:\n self.rest_plug_interface(new_port[\"tenant_id\"],\n new_port[\"network_id\"],\n new_port, device_id)\n\n except RemoteRestError:\n with excutils.save_and_reraise_exception():\n port_update = {\"port\": {\"status\": \"ERROR\"}}\n super(NeutronRestProxyV2, self).update_port(\n context,\n new_port[\"id\"],\n port_update\n )\n\n # return new_port\n return new_port", "def del_record(self, args):\n\n mac = MacAddress(args.mac)\n desc = self.dhcp_client_state[mac.as_redis_key()]\n print(\"Deleted mac %s with DHCP rec %s\" % (str(mac), desc))\n self.dhcp_client_state[mac.as_redis_key()] = None", "def _close(self):\n \n # Close device\n logger.debug(\"%s: TCP port closing started...\" % \\\n self.__class__.__name__)\n self._router = None\n self._platform = None\n self._tcp_socket.close()\n logger.debug(\"%s: ...TCP port closing complete.\" % \\\n self.__class__.__name__)", "def clean_rebind_test(**kwargs):\n if 'verify_traffic' not in kwargs:\n kwargs['verify_traffic'] = False\n prepare_subscriber_traffic(**kwargs)\n device_id = kwargs.get('device_id', bbe.get_devices(device_tags='dut', id_only=True)[0])\n switch_id = kwargs.get('switch_id', 'r1')\n switch_handle = t.get_handle(switch_id)\n switch_access_intf = bbe.get_interfaces(switch_id, interfaces='access')\n status = True\n for iteration in range(1, int(kwargs.get('iteration', 1)) + 1):\n t.log(\"disable access ports in switch in iteration #{}\".format(iteration))\n port_command_list = []\n status = True\n for access in switch_access_intf:\n port_command_list.append(\"set interfaces {} disable\".format(access.interface_pic))\n switch_handle.config(command_list=port_command_list)\n switch_handle.commit()\n t.log(\"verify access ports in down state\")\n for access in switch_access_intf:\n resp = switch_handle.pyez('get_interface_information', level_extra='terse',\n interface_name=access.interface_pic).resp\n if resp.findtext('physical-interface/admin-status') == 'down' and resp.findtext(\n 'physical-interface/oper-status') == 'down':\n t.log(\"interface {} is in down state\".format(access.interface_pic))\n else:\n t.log('WARN', \"interface {} is in state {}\".format(access.interface_pic, resp))\n status = False\n\n if not status:\n for access in switch_access_intf:\n port_command_list.append(\"delete interfaces {} disable\".format(access.interface_pic))\n switch_handle.config(command_list=port_command_list)\n switch_handle.commit()\n raise Exception(\"some interfaces failed to be in down state after disable\")\n base_time = time.time()\n while time.time() - base_time < 1800:\n router_count = get_router_sub_summary(device_id)['client']\n tester_count = get_rt_subs_info()['rt_sessions_up']\n if router_count == 0 and tester_count == 0:\n duration = time.time() - base_time\n t.log(\"all subscribers cleared from tester and router after {}s in iteration #{}\".format(duration,\n iteration))\n break\n t.log(\"sleep 30s , waiting for clients cleared\")\n time.sleep(30)\n\n result = get_router_sub_summary(device_id)\n\n if result['client'] != 0 or 'terminated' in result or 'terminating' in result or 'init' in result:\n status = False\n t.log('WARN', 'some subscribers stuck in unexpected state in iteration #{}'.format(iteration))\n\n for access in switch_access_intf:\n port_command_list.append(\"delete interfaces {} disable\".format(access.interface_pic))\n switch_handle.config(command_list=port_command_list)\n switch_handle.commit()\n time.sleep(10)\n t.log(\"verify access ports in up state in iteration {}\".format(iteration))\n for access in switch_access_intf:\n resp = switch_handle.pyez('get_interface_information', level_extra='terse',\n interface_name=access.interface_pic).resp\n if resp.findtext('physical-interface/admin-status') == 'up' and resp.findtext(\n 'physical-interface/oper-status') == 'up':\n t.log(\"interface {} is in up state\".format(access.interface_pic))\n else:\n t.log('WARN', \"interface {} is in state {}\".format(access.interface_pic, resp))\n status = False\n\n if not status:\n raise Exception(\"clean test failed\")\n ##set the rt subscriber state to stopped, since it is not teared down by actions\n t.log(\"login subscriber and verify traffic after restore the connection in iteration #{}\".format(iteration))\n prepare_subscriber_traffic(**kwargs)", "def _update_port_association(client_config, port_id, device_id=''):\n # Check if the port is provided or not\n if not port_id:\n raise NonRecoverableError(\n 'Unable to attach port to device {0},'\n ' `port_id` is missing'.format(\n device_id)\n )\n # Prepare the port instance to attach/detach server from/to the current\n # port\n port_resource = OpenstackPort(client_config=client_config,\n logger=ctx.logger)\n\n # Set port id\n port_resource.resource_id = port_id\n\n # Update port\n port_resource.update({'device_id': device_id})", "def close(self):\n self._relaypid = None\n self._portoffset = None" ]
[ "0.8054723", "0.71377176", "0.6566923", "0.6155076", "0.5985578", "0.59473217", "0.58076227", "0.5672177", "0.5659102", "0.5577378", "0.5557", "0.55073285", "0.5491355", "0.54843926", "0.5454663", "0.5447936", "0.5415151", "0.54091704", "0.5385533", "0.53768826", "0.53333277", "0.5326683", "0.5321701", "0.5321672", "0.5281091", "0.5274553", "0.5266224", "0.5262475", "0.52518713", "0.5204779", "0.5199943", "0.5185985", "0.5181357", "0.51648414", "0.5158935", "0.5158245", "0.5148604", "0.5136463", "0.5114741", "0.51131845", "0.509418", "0.50856423", "0.50750864", "0.5068624", "0.5044844", "0.5035676", "0.50294304", "0.5015343", "0.5004674", "0.4992267", "0.49922448", "0.49885392", "0.49695176", "0.4964907", "0.4964766", "0.49314165", "0.48994693", "0.48571497", "0.48505512", "0.48409525", "0.4836767", "0.4818481", "0.48163798", "0.48146313", "0.48102584", "0.48087057", "0.48085228", "0.47833657", "0.47736838", "0.47645202", "0.47604674", "0.47598895", "0.47524536", "0.4744842", "0.47279504", "0.47068423", "0.47049773", "0.47040737", "0.47031617", "0.46981934", "0.46877468", "0.4678796", "0.46784735", "0.46697056", "0.4662666", "0.46596676", "0.46573293", "0.4651234", "0.46509144", "0.46508637", "0.46489078", "0.46456477", "0.4640521", "0.46317658", "0.46273822", "0.46256942", "0.4624016", "0.4622054", "0.46189797", "0.46164715" ]
0.84403765
0
Construct and return an empty network model.
def empty_network(network_id=NETWORK_ID): return make_net_model({"id": network_id, "subnets": [], "ports": [], "tenant_id": "calico", "mtu": neutron_constants.DEFAULT_NETWORK_MTU})
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_model():\n\n class Net(nn.Cell):\n def construct(self, x, y):\n return x\n\n net = Net()\n model_simple = Model(net)\n\n return model_simple", "def create_network(self):\n\n print ('Creating network, changing data will have no effect beyond this point.')\n n = IMNN.IMNN(parameters=self.parameters)\n\n if self.load_network:\n n.restore_network()\n else:\n n.setup(network = self.network, load_data = self.data)\n\n return n", "def _make_network(self):\n inp = Input(shape = (self.input_dim,))\n x = Dense(256, activation='relu')(inp)\n x = GaussianNoise(1.0)(x)\n #x = Flatten()(x) # I assume this is if the input is a convolutional neural net?\n x = Dense(128, activation='relu')(x)\n x = GaussianNoise(1.0)(x)\n out = Dense(self.output_dim, activation='tanh', kernel_initializer=RandomUniform())(x)\n out = Lambda(lambda i: i * self.act_range)(out)\n return Model(inp, out)", "def new_network():\n new_names = Names()\n new_devices = Devices(new_names)\n return Network(new_names, new_devices)", "def createModel(self):\n outputs, inputs = baseUNet(self.input_shape,\n self.conv_depth,\n self.n_classes,\n self.init_w,\n self.dropout)\n \n if self.regression == True:\n outputs = Lambda(getPropOfGround)(outputs)\n \n model = Model(inputs = inputs,outputs = outputs)\n \n model.compile(optimizer = self.optimizer,\n loss=self.loss_function,\n metrics=self.metrics)\n\n if self.old_weights != None:\n model.set_weights(self.old_weights)\n self.model = model", "def make_model(self):\n onnx_graph = onnx.helper.make_graph(\n self._nodes, self._name, self._inputs, self._outputs, self._initializers\n )\n kwargs = {}\n kwargs[\"opset_imports\"] = self._get_opsets()\n kwargs[\"producer_name\"] = \"TVM Relay\"\n kwargs[\"producer_version\"] = tvm.__version__\n\n return onnx.helper.make_model(onnx_graph, **kwargs)", "def _create_nn(self):\n with tf.name_scope('policy_network'):\n with tf.variable_scope(\"policy_network\"):\n model = tf.keras.Sequential(name='policy_network_model')\n model.add(tf.keras.layers.Dense(self.neurons_in_each_layer[0], activation=tf.nn.relu,\n input_shape=(1, self.neurons_in_each_layer[0])))\n for num_neurons in self.neurons_in_each_layer[1:-1]:\n model.add(tf.keras.layers.Dense(num_neurons, activation=tf.nn.relu))\n model.add(tf.keras.layers.Dense(self.neurons_in_each_layer[-1], name='policy_output_layer'))\n\n return model", "def build_model(self):\n if self.args.network_type == 'unet':\n self.shared = models.Unet(self.args)\n else:\n raise NotImplementedError(f'Network type '\n f'`{self.args.network_type}` is not '\n f'defined')\n self.controller = models.Controller(self.args)\n\n if self.args.num_gpu == 1:\n self.shared.cuda()\n self.controller.cuda()\n elif self.args.num_gpu > 1:\n raise NotImplementedError('`num_gpu > 1` is in progress')", "def create_nn(self):\n\n\t\tmodel = Sequential()\n\t\tmodel.add(Dense(32, input_dim=self.state_size, activation='relu'))\n\t\tmodel.add(Dense(32, activation='relu'))\n\t\tmodel.add(Dense(64, activation='relu'))\n\t\tmodel.add(Dense(self.action_size, activation='linear'))\n\t\tmodel.compile(loss='mse', optimizer=Adam(lr=self.learning_rate))\n\t\treturn model", "def init_model(self) -> keras.Model:\n model_input = keras.Input(shape=(self.num_classes, self.nun_models))\n\n layer_out = Conv1D(64, kernel_size=self.num_classes, activation=\"sigmoid\")(\n model_input\n )\n layer_out = Dropout(0.2)(layer_out)\n\n layer_out = Dense(128)(layer_out)\n layer_out = Dropout(0.2)(layer_out)\n\n layer_out = Flatten()(layer_out)\n\n layer_out = Dense(128)(layer_out)\n layer_out = Dropout(0.2)(layer_out)\n output = Dense(self.num_classes, activation=\"softmax\")(layer_out)\n\n return keras.Model(inputs=model_input, outputs=output)", "def initModel(self):\n input_shape = (self.params[\"nb_features\"],)\n x = input_tensor = Input(input_shape)\n x = Dense(self.params[\"nb_neurons\"], activation=\"relu\")(x)\n for i in range(2, self.params[\"nb_layers\"] + 1):\n x = Dense(self.params[\"nb_neurons\"], activation=\"relu\")(x)\n if self.params[\"dropout\"]:\n x = Dropout(self.params[\"dropout\"])(x)\n x = output_tensor = Dense(4)(x)\n model = Model(input_tensor, output_tensor)\n return model", "def create_network(layers):\r\n return NeuronNetwork(layers)", "def create_model() -> Model:\n # Create a neural network model that includes several dense layers with hyperbolic tangent activations, L2 regularization, and batch normalization\n regularizer = l2(0)\n dropout = 0\n activation = 'tanh'\n model = Sequential([\n InputLayer(input_shape=(16,)),\n BatchNormalization(),\n Dense(12, activation=activation, kernel_regularizer=regularizer),\n Dropout(dropout),\n Dense(8, activation=activation, kernel_regularizer=regularizer),\n Dropout(dropout),\n Dense(1, kernel_regularizer=regularizer)\n ])\n # Output a summary of the model's architecture\n print(model.summary())\n # Use a mean squared error loss function and an Adam optimizer; do not print accuracy because this is a regression task\n model.compile(\n optimizer='adam',\n loss='mse',\n metrics=['mae']\n )\n # Return the untrained model\n return model", "def _build_network(self):\n pass", "def initialise_network(self):\n raise NotImplementedError", "def build_empty_graph(input_dim, output_dim, num_intermediate):\n from .models import DAG\n num_emit, num_rec = num_intermediate + input_dim, num_intermediate + output_dim\n activations = torch.zeros(num_rec, dtype=torch.long)\n connections = torch.zeros(num_rec, num_emit, dtype=torch.long)\n\n return DAG(input_dim, output_dim, num_intermediate, connections, activations, check_valid=True)", "def empty_instance():\n from weighted_graph import Graph\n return Graph()", "def create_network(self):\n from dallinger.networks import Star\n\n return Star(max_size=2)", "def make_model():\n # create the base pre-trained model\n base_model = efn.EfficientNetB0(input_shape=(img_width, img_height, 3), include_top=False)\n # add a global spatial average pooling layer\n x = base_model.output\n x = GlobalAveragePooling2D()(x)\n predictions = Dense(num_classes, activation=\"softmax\")(x)\n model = Model(inputs=base_model.input, outputs=predictions)\n\n model.compile(optimizer=\"adam\", loss=\"categorical_crossentropy\", metrics=[\"accuracy\"])\n\n return base_model, model", "def build_model(self):\n cfg = self.cfg\n\n print('Building model')\n self.model = SimpleNet(cfg, cfg.MODEL, 0, **cfg.MODEL.BACKBONE.PARAMS)\n self.model.to(self.device)\n print('# params: {:,}'.format(count_num_param(self.model)))\n self.optim = build_optimizer(self.model, cfg.OPTIM)\n self.sched = build_lr_scheduler(self.optim, cfg.OPTIM)\n self.register_model('model', self.model, self.optim, self.sched)\n\n fdim = self.model.fdim\n self.classifier = nn.Linear(fdim, self.num_classes)\n print('# params: {:,}'.format(count_num_param(self.classifier)))\n self.classifier.to(self.device)\n self.optim_classifier = build_optimizer(self.classifier, cfg.OPTIM)\n self.sched_classifier = build_lr_scheduler(self.optim_classifier, cfg.OPTIM)\n self.register_model('classifier', self.classifier, self.optim_classifier, self.sched_classifier)", "def new(self):\n self.define_layers()\n self.model = nn.Sequential(*self.layers)\n self.model.cuda()\n self.model = orthogonal_init(self.model)\n\n # Re-count N\n self.count_params()", "def create_network(model_file=DEFAULT_MODEL_FILE, pretrained=DEFAULT_PRETRAINED, *args, **kwargs):\n net = imagenet_classifier(*args,**kwargs)\n net.set_phase_test()\n net.set_mode_cpu()\n return net", "def create_neural_network():\n network_input = keras.layers.Input((NETWORK_INPUT_SIZE,))\n network_layer = keras.layers.Dense(100, kernel_initializer='random_uniform', activation='tanh')(network_input)\n network_layer = keras.layers.Dense(100, kernel_initializer='random_uniform', activation='tanh')(network_layer)\n network_output = keras.layers.Dense(NETWORK_OUTPUT_SIZE, kernel_initializer='random_uniform', activation='linear')(network_layer)\n network = keras.models.Model(inputs=network_input, outputs=network_output)\n network.compile(loss=\"mse\", optimizer=\"Adam\")\n return network", "def create_model(project_parameters):\n model = Net(project_parameters=project_parameters)\n if project_parameters.checkpoint_path is not None:\n model = load_checkpoint(model=model, num_classes=project_parameters.num_classes,\n use_cuda=project_parameters.use_cuda, checkpoint_path=project_parameters.checkpoint_path)\n return model", "def create_model(self):\n # Create the generator and discriminators\n self.generator_lungs = self.generator_model()\n self.generator_organs = self.generator_model()\n\n self.disc_lungs = self.discriminator_model_lungs()\n self.disc_organs = self.discriminator_model_organs()\n\n # Initialize the optimizer and backend\n self.generator_optimizer = tf.keras.optimizers.Adam(learning_rate=2e-4, beta_1=0.5)\n self.discriminator_optimizer = tf.keras.optimizers.Adam(learning_rate=2e-4, beta_1=0.5)\n self.set_backend = tf.keras.backend.set_floatx('float32')\n\n # Create the summary writer\n self.create_summary_writer()\n print('Models are created.')\n return self", "def create_simple_model():\n\n input_shape = (160, 320, 3)\n \n m = Sequential()\n\n # 1. Add Normalization\n m.add(Lambda(lambda x: x/255.0 - 0.5,\n input_shape=input_shape,\n ))\n\n # 2. Flatten + 1 fully connected layer\n m.add(Flatten())\n m.add(Dense(10, activation='relu', init=my_init))\n \n # 3. Output Layer is a Dense layer with no activation function\n m.add(Dense(1))\n \n return m", "def create_nueral_network(X, y, epochs=8):\n model = Sequential()\n model.add(layers.Dense(500, input_dim=X.shape[1]))\n model.add(layers.Dropout(.2))\n model.add(layers.Dense(128, activation='relu'))\n model.add(layers.Dropout(.2))\n model.add(layers.Dense(64, activation='relu'))\n model.add(layers.Dropout(.2))\n model.add(layers.Dense(32, activation='relu'))\n model.add(layers.Dense(5,activation='softmax'))\n\n model.compile(optimizer='adam', loss='categorical_crossentropy',metrics=['accuracy'])\n print(model.summary())\n model.fit(X, y, epochs=epochs, batch_size=500)\n return model", "def init_model(self):\n model = Sequential()\n model.add(Dense(units=24, input_dim=self.input_shape[0],\n activation='relu'))\n model.add(Dense(units=24, activation='relu'))\n # We want rewards instead of probability, so use linear here\n model.add(Dense(units=self.output_num, activation='linear'))\n model.compile(loss='mse', optimizer=Adam(lr=self.eta))\n return model", "def create_base_network(NumberOfFeatures, NumberOfClasses,init_mode='glorot_normal'):\n network = Sequential()\n network.add(Dense(44, activation='sigmoid', kernel_initializer=init_mode,input_dim=NumberOfFeatures))\n# network.add(Dense(22, activation='sigmoid',kernel_initializer=init_mode))\n network.add(Dense(NumberOfClasses, activation='softmax',kernel_initializer=init_mode))\n return network", "def create_model():\n\n # Create a sequential model (a simple NN is created) adding a softmax activation at the end with 10 units:\n model = Sequential()\n model.add(Dense(units=128, activation=\"relu\", input_shape=(784,)))\n model.add(Dense(units=128, activation=\"relu\"))\n model.add(Dense(units=128, activation=\"relu\"))\n model.add(Dense(units=10, activation=\"softmax\"))\n\n # Compile the model using the loss function \"categorical_crossentropy\" and Stocastic Gradient Descent optimizer:\n model.compile(optimizer=SGD(0.001), loss=\"categorical_crossentropy\", metrics=[\"accuracy\"])\n\n # Return the created model\n return model", "def create(self) -> tf.keras.models.Model:\n raise NotImplementedError()", "def create_model(self): # noqa: D103\n # reference for creation of the model https://yilundu.github.io/2016/12/24/Deep-Q-Learning-on-Space-Invaders.html\n model=Sequential()\n model.add(Flatten( input_shape=(84,84,4)))\n model.add(Dense(self.num_actions)) \n\n return model", "def network_initial(request, SPIC_group, SPIC_id):\n SPIC_obj = get_object_or_404(SPIC, group=SPIC_group, local_id=SPIC_id)\n network_obj, created = Network.objects.get_or_create(user_id=request.user.pk, SPIC=SPIC_obj, local_id=0, deleted=False)\n\n if created is True:\n # Check if prototype exists\n prototype = get_object_or_404(Network, user_id=0, SPIC=SPIC_obj)\n network_obj.nodes_json = prototype.nodes_json\n network_obj.links_json = prototype.links_json\n network_obj.save()\n\n return network(request, SPIC_group, SPIC_id, 0)", "def create_network():\n net = ln.models.TinyYolo(CLASSES, CONF_THRESH, NMS_THRESH)\n\n net.load(args.weight)\n net.eval()\n net.postprocess.append(ln.data.transform.TensorToBrambox(NETWORK_SIZE, LABELS))\n net = net.to(device)\n return net", "def create_model(self):\n self.model = None\n pass", "def build_model(self):\n for link in self.links:\n # if from neuron is input to graph, add it to input_neurons set\n if self.is_input_neuron(link.from_neuron_id):\n self.input_neurons.add(link.from_neuron_id)\n # add weight to neuron\n if link.to_neuron_id not in self.weights:\n self.weights[link.to_neuron_id] = []\n self.weights[link.to_neuron_id].append(link.weight)\n # add input to neuron\n if link.to_neuron_id not in self.connections:\n self.connections[link.to_neuron_id] = []\n self.connections[link.to_neuron_id].append(link.from_neuron_id)", "def buildNetwork(self):\n\n # create the network node for our module\n self.networkNode = cmds.createNode(\"network\", name=self.modName)\n\n # create attributes\n self.addAttributes()\n\n return self.networkNode", "def build(obs_space: Box, action_space: Box, spec: Spec) -> MLPModel:\n model = MLPModel(obs_space, action_space, spec.network)\n model.initialize_parameters(spec.initializer)\n if spec.residual:\n model = ResidualStochasticModel(model)\n return model", "def construct_private_model(input_size, model):\n # get rank of current process\n rank = comm.get().get_rank()\n dummy_input = torch.empty(input_size)\n\n # party 0 always gets the actual model; remaining parties get dummy model\n if rank == 0:\n model_upd = model\n else:\n model_upd = LeNet()\n private_model = crypten.nn.from_pytorch(model_upd, dummy_input).encrypt(src=0)\n return private_model", "def _create_network(self, name):\n network = self.network(self.num_actions, self.quantile_embedding_dim,\n name=name)\n return network", "def _get_model(self):\n layers = []\n\n # inner / hidden network layers + non-linearities\n for l in self.network_layers:\n layers.append(Dense(l))\n layers.append(Relu)\n\n # output layer (no non-linearity)\n layers.append(Dense(self.output_dimension))\n \n # make jax stax object\n model = stax.serial(*layers)\n\n return model", "def network(self):\n inp = Input((self.env_dim))\n # #\n # x = Dense(256, activation='relu')(inp)\n # x = GaussianNoise(1.0)(x)\n # #\n # x = Flatten()(x)\n # x = Dense(128, activation='relu')(x)\n # x = GaussianNoise(1.0)(x)\n # #\n # out = Dense(self.act_dim, activation='tanh', kernel_initializer=RandomUniform())(x)\n # out = Lambda(lambda i: i * self.act_range)(out)\n # #\n\n x = conv_block(inp, 32, (2, 2), 8)\n x = conv_block(x, 64, (2, 2), 4)\n x = conv_block(x, 64, (2, 2), 3)\n x = Flatten()(x)\n x = Dense(256, activation='relu')(x)\n\n x = Dense(self.act_dim, activation='tanh', kernel_initializer=RandomUniform())(x)\n out = Lambda(lambda i: i * self.act_range)(x)\n\n return Model(inp, out)", "def make_model(self):\n if self.model_type=='densenet_121':\n model = self.make_densenet_121(self.weights)\n\n\n return model", "def build_network(config):\n network_cfg = config['network']\n\n network_name = network_cfg['name']\n\n network_params = list(inspect.signature(eval(network_name).__init__).parameters)[1:]\n\n args = [f'{param}={network_cfg[param]}' for param in network_params if network_cfg.get(param)]\n\n try:\n model = eval('{}({})'.format(network_name, ', '.join(args)))\n except:\n raise ValueError('Can\\'t load network.')\n\n return model.to(device='cuda')", "def build_simple_model():\n model = keras.Sequential()\n\n model.add(Flatten(input_shape=(32, 32, 1)))\n model.add(Dense(128, activation='relu'))\n model.add(Dense(256, activation='relu'))\n model.add(Dense(43, activation='softmax'))\n\n model.compile(optimizer=keras.optimizers.Adam(),\n loss='sparse_categorical_crossentropy',\n metrics=['accuracy'])\n\n return model", "def create_network(normalized_input, n_vocab):\n \n # Create sequential Keras model\n model = Sequential()\n model.add(CuDNNLSTM(256,\n input_shape=(normalized_input.shape[1], normalized_input.shape[2]),\n return_sequences=True))\n model.add(Dropout(0.3))\n model.add(CuDNNLSTM(256))\n model.add(Dense(256, activation='relu'))\n model.add(Dropout(0.3))\n model.add(Dense(n_vocab, activation='softmax'))\n model.compile(loss='categorical_crossentropy', optimizer='rmsprop')\n \n # Load the weights to each node\n model.load_weights('weights/weights_final.hdf5')\n\n return model", "def create_network(self, *, name: t.Optional[str] = None) -> Network:\n network = Network(self, name=name)\n self._networks.add(network)\n return network", "def create_model(self) -> Sequential:\r\n model = Sequential()\r\n\r\n model.add(Conv2D(32, kernel_size=(1, 16), strides=(1, 8),\r\n input_shape=self.OBSERVATION_SPACE_VALUES))\r\n model.add(Activation('relu'))\r\n\r\n model.add(Conv2D(64, kernel_size=(1, 8), strides=(1, 4)))\r\n model.add(Activation('relu'))\r\n\r\n model.add(Conv2D(32, kernel_size=(1, 4), strides=(1, 2)))\r\n model.add(Activation('relu'))\r\n\r\n model.add(Flatten())\r\n model.add(Dense(256, activation='relu'))\r\n\r\n model.add(Dense(len(self.actions), activation='linear'))\r\n model.compile(loss=\"mse\", optimizer=RMSprop(lr=0.00025, momentum=0.95),\r\n metrics=['accuracy'])\r\n model.summary()\r\n return model", "def createNet(hyper, rescaleParameter, full_dim, nettype):\n\n\tshape = hyper[\"shape\"]\n\tnodes = hyper[\"nodes\"]\n\tlayer = hyper[\"layer\"]\n\tactiv = hyper[\"activationFunction\"]\n\n\tnetshape, nodesTotal = getNodesPerLayer(shape, nodes, layer, full_dim)\n\n\tif nettype == 'regression':\n\t\tmodel = Net_reg(netshape, activ)\n\telif nettype == 'classification':\n\t\tmodel = Net_cla(netshape, activ)\n\t\n\treturn model", "def _model(self):\n\t\tmodel = Sequential()\n\t\tmodel.add(Dense(units=64, input_dim=self.state_size, activation=\"relu\"))\n\t\tmodel.add(Dense(units=32, activation=\"relu\"))\n\t\tmodel.add(Dense(units=16, activation=\"relu\"))\n\t\tmodel.add(Dense(units=8, activation=\"relu\"))\n\t\tmodel.add(Dense(self.action_size, activation=\"linear\"))\n\t\tmodel.compile(loss=\"mse\", optimizer=Adam(lr=self.learning_rate))\n\n\t\treturn model", "def create_basic_cnn_model(num_classes: int):\n model = Sequential()\n\n # Convolutional + spooling layers\n model.add(Conv2D(64, (5, 5), input_shape=(config.ROI_IMG_SIZE['HEIGHT'], config.ROI_IMG_SIZE['WIDTH'], 1)))\n model.add(MaxPooling2D((2, 2), strides=(2, 2)))\n model.add(Conv2D(32, (5, 5), padding='same'))\n model.add(MaxPooling2D((2, 2), strides=(2, 2)))\n model.add(Flatten())\n\n # Dropout\n model.add(Dropout(0.5, seed=config.RANDOM_SEED, name=\"Dropout_1\"))\n\n # FC\n model.add(Dense(1024, activation='relu', name='Dense_2'))\n\n # Output\n if num_classes == 2:\n model.add(Dense(1, activation='sigmoid', kernel_initializer=\"random_uniform\", name='Output'))\n else:\n model.add(Dense(num_classes, activation='softmax', kernel_initializer=\"random_uniform\", name='Output'))\n\n # Print model details if running in debug mode.\n if config.verbose_mode:\n print(model.summary())\n\n return model", "def create_model():\n model = models.Sequential()\n model.add(layers.Conv2D(32, (3, 3), activation='relu', input_shape=(1024, 1024, 1)))\n model.add(layers.MaxPooling2D((2, 2)))\n model.add(layers.Dropout(0.25))\n\n model.add(layers.Conv2D(64, (3, 3), activation='relu'))\n model.add(layers.MaxPooling2D((2, 2)))\n model.add(layers.Dropout(0.25))\n\n model.add(layers.Flatten())\n model.add(layers.Dense(64, activation='relu'))\n model.add(layers.Dropout(0.5))\n model.add(layers.Dense(2, activation='softmax'))\n\n model.summary()\n\n model.compile(optimizer='adam', loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),\n metrics=['accuracy'])\n\n return model", "def create_net(self, shape1, shape2, op, precision, ir_version, opset=None):\n\n #\n # Create ONNX model\n #\n\n from onnx import helper\n from onnx import TensorProto\n\n if op not in ['Add', 'Sub', 'Mul', 'Div']:\n raise ValueError(\"Operation has to be either Add or Mul or Sub or Div\")\n\n input = helper.make_tensor_value_info('input', TensorProto.FLOAT, shape1)\n output = helper.make_tensor_value_info('output', TensorProto.FLOAT, shape1)\n\n min_val = 1 if op == 'Div' else -127\n if shape2:\n const = np.random.randint(min_val, 127, shape2).astype(float)\n else:\n const = np.random.randint(min_val, 127, 1).astype(float)\n # TODO: add check when MO remove redundant layer (as Add/Sub if const = 0 or Mul/Div if const = 1)\n if const in [0, 1]:\n const = np.array([2], dtype=float)\n\n node_const_def = helper.make_node(\n 'Constant',\n inputs=[],\n outputs=['const'],\n value=helper.make_tensor(\n name='const_tensor',\n data_type=TensorProto.FLOAT,\n dims=const.shape,\n vals=const.flatten(),\n ),\n )\n\n node_def = helper.make_node(\n op,\n inputs=['input', 'const'],\n outputs=['output']\n )\n\n # Create the graph (GraphProto)\n graph_def = helper.make_graph(\n [node_const_def, node_def],\n 'test_model',\n [input],\n [output],\n )\n\n # Create the model (ModelProto)\n args = dict(producer_name='test_model')\n if opset:\n args['opset_imports'] = [helper.make_opsetid(\"\", opset)]\n onnx_net = helper.make_model(graph_def, **args)\n\n # Create reference IR net\n if op == 'Div':\n const = np.power(const, -1)\n elif op == 'Sub':\n const = -const\n\n ref_net = None\n\n return onnx_net, ref_net", "def basic_network(cm=False):\n # fmt: off\n tpm = np.array([\n [0, 0, 0],\n [0, 0, 1],\n [1, 0, 1],\n [1, 0, 0],\n [1, 1, 0],\n [1, 1, 1],\n [1, 1, 1],\n [1, 1, 0],\n ])\n if cm is False:\n cm = np.array([\n [0, 0, 1],\n [1, 0, 1],\n [1, 1, 0],\n ])\n # fmt: on\n else:\n cm = None\n return Network(tpm, cm=cm, node_labels=LABELS[:tpm.shape[1]])", "def _construct_model(self):\n self.model = AutoEncoderConvolutional(self.n_latent_features, self.reduced_size)\n self.model = self.model.to(self.device, non_blocking=True)", "def setupNetwork(self):\n\t\tin_layer = Input(shape=(28, ))\n\t\td1 = Dense(40, activation='relu')(in_layer)\n\t\td2 = Dense(10, activation='relu')(d1)\n\t\tout = Dense(1, activation='sigmoid')(d2)\n\n\t\tself.model = tf.keras.Model(inputs=in_layer, outputs=out)", "def get_network(network: str, config):\n using_spatial = False # If true input is fed as patches.\n using_attention = False\n patch_return_size = 1\n\n if network == 'cohen':\n model = CohenMLP(seq_len=config.seq_len)\n elif network == 'oksuz_rnn':\n model = OksuzRNN(config.gru, input_size=config.rnn_input_size, hidden_size=config.rnn_hidden_size,\n seq_len=config.seq_len, num_layers=config.rnn_num_layers,\n bidirectional=config.rnn_bidirectional)\n elif network == 'hoppe':\n spatial_pooling = None if config.spatial_pooling.lower() == 'none' else config.spatial_pooling.lower()\n using_spatial = True if spatial_pooling is not None else False\n model = Hoppe(config.gru, input_size=config.rnn_input_size, hidden_size=config.rnn_hidden_size,\n seq_len=config.seq_len, num_layers=config.rnn_num_layers,\n bidirectional=config.rnn_bidirectional, spatial_pooling=spatial_pooling,\n patch_size=config.patch_size)\n elif network == 'rnn_attention':\n using_attention = True\n model = RNNAttention(input_size=config.rnn_input_size, hidden_size=config.rnn_hidden_size,\n batch_size=config.batch_size, seq_len=config.seq_len,\n num_layers=config.rnn_num_layers, bidirectional=config.rnn_bidirectional)\n elif network == 'song':\n using_attention=True\n model = Song(seq_len=config.seq_len)\n elif network == 'soyak':\n using_spatial = True\n patch_return_size = config.patch_size - 2\n model = Soyak(patch_size=config.patch_size, seq_len=config.seq_len)\n elif network == 'patch_size':\n using_spatial = True\n model = PatchSizeTest(seq_len=config.seq_len, patch_size=config.patch_size)\n elif network == 'balsiger':\n using_spatial = True\n model = Balsiger(seq_len=config.seq_len, patch_size=config.patch_size)\n elif network == 'rca_unet':\n using_spatial = True\n patch_return_size = config.patch_size\n using_attention = config.rcab_attention\n model = RCAUNet(seq_len=config.seq_len, patch_size=config.patch_size,\n temporal_features=config.num_temporal_features, attention=config.rcab_attention)\n elif network == 'r2plus1d':\n using_spatial = True\n using_attention = True if config.non_local_level > 0 else False\n model = R2Plus1D(patch_size=config.patch_size, seq_len=config.seq_len, factorise=config.factorise,\n dimensionality_reduction_level=config.dimensionality_reduction_level,\n non_local_level=config.non_local_level)\n elif network == 'r1d':\n model = R1D(seq_len=config.seq_len)\n else:\n import sys # Should not be able to reach here as we provide a choice.\n print(\"Invalid network. Exiting...\")\n sys.exit(1)\n\n return model, using_spatial, using_attention, patch_return_size", "def create_empty_node():\n from linked_list import Node\n return Node()", "def create_model():\r\n parser = argparse.ArgumentParser()\r\n parser.add_argument('--DISC_LR', type=float, default=1e-4)\r\n parser.add_argument('--GEN_LR', type=float, default=1e-3)\r\n parser.add_argument('--GEN_BETA1', type=float, default=0.9)\r\n parser.add_argument('--GEN_BETA2', type=float, default=0.999)\r\n parser.add_argument('--IMAGE_SIZE', type=int, default=None)\r\n parser.add_argument('--QUES_SIZE', type=int, default=None)\r\n parser.add_argument('--QUES_EMBED_SIZE', type=int, default=2048)\r\n parser.add_argument('--WORD_EMBED_SIZE', type=int, default=512)\r\n parser.add_argument('--VOCAB_SIZE', type=int, default=1004)\r\n args, task_args = parser.parse_known_args()\r\n override_if_not_in_args('--max_steps', '1000', task_args)\r\n override_if_not_in_args('--batch_size', '64', task_args)\r\n override_if_not_in_args('--eval_set_size', '370', task_args)\r\n override_if_not_in_args('--eval_interval_secs', '2', task_args)\r\n override_if_not_in_args('--log_interval_secs', '2', task_args)\r\n override_if_not_in_args('--min_train_eval_rate', '2', task_args)\r\n\r\n return Model(args.DISC_LR, args.GEN_LR, args.GEN_BETA1, args.GEN_BETA2,\r\n args.IMAGE_SIZE, args.QUES_EMBED_SIZE, args.WORD_EMBED_SIZE,\r\n args.QUES_SIZE, args.VOCAB_SIZE), task_args", "def construct_model(self, output_model_path):\n\n input_tensor = helper.make_tensor_value_info(\"input\", TensorProto.FLOAT, [1, 1, 7, 7])\n output_tensor = helper.make_tensor_value_info(\"output\", TensorProto.FLOAT, [1, 1, 8, 8])\n ini_w = helper.make_tensor(\"weight\", TensorProto.FLOAT, [1, 1, 2, 2], [1.0, 1.0, 1.0, 1.0])\n ini_b = helper.make_tensor(\"bias\", TensorProto.FLOAT, [1], [0.17])\n conv_tranpose_node = onnx.helper.make_node(\n \"ConvTranspose\",\n [\"input\", \"weight\", \"bias\"],\n [\"output\"],\n kernel_shape=[2, 2],\n output_padding=[0, 0],\n pads=[0, 0, 0, 0],\n strides=[1, 1],\n dilations=[1, 1],\n group=1,\n )\n graph = helper.make_graph(\n [conv_tranpose_node],\n \"conv_transpose_test\",\n [input_tensor],\n [output_tensor],\n initializer=[ini_w, ini_b],\n )\n model = helper.make_model(graph, opset_imports=[helper.make_opsetid(\"\", 13)])\n model.ir_version = 7 # use stable onnx ir version\n\n onnx.save(model, output_model_path)", "def build_network(self):\n # Position the node centers\n self.set_node_centers()\n\n # Set the nodes\n self.nodes = []\n for i in range(self.n_states):\n node = Node(\n self.node_centers[i],\n self.node_radius,\n self.labels[i]\n )\n self.nodes.append(node)", "def build_net(model_file, weight_file):\n if not os.path.exists(model_file):\n raise ValueError('cannot find model file: {}'.format(model_file))\n if not os.path.exists(weight_file):\n raise ValueError('cannot find weight file: {}'.format(weight_file))\n\n net = caffe.Net(model_file, weight_file, caffe.TEST)\n return net", "def init_model(self):\n cxnlib.CXNNetInitModel(self.handle)", "def build_model(self, model_def_path: Optional[str] = None) -> 'nn.Module':\n cfg = self.cfg\n model = cfg.model.build(\n num_classes=cfg.data.num_classes,\n in_channels=cfg.data.img_channels,\n save_dir=self.modules_dir,\n hubconf_dir=model_def_path,\n img_sz=cfg.data.img_sz)\n return model", "def build_model(self) -> nn.Module:\n pass", "def _create_model(self):\n ref = 0 if self.m_cfg['configs']['recursive'] else -1\n out_t, l_t, models = [], [], []\n in_t = [tf.keras.Input(batch_size=self.m_cfg['configs']['batch'],\n shape=self.m_cfg['configs']['patch'])]\n for level in np.arange(self.levels):\n if not self.m_cfg['configs']['recursive'] or not level:\n lat, res, layers = self._set_level_ops(in_t[-1], level)\n opt = self._inst_optimizer()\n self.opt += [opt]\n curr_layers = sum(layers, [])\n vars = sum(list(map(lambda l: l.variables, curr_layers)), [])\n self.vars.append(vars)\n elif self.m_cfg['configs']['recursive']:\n lat, res, layers = self._set_level_ops(in_t[-1], level, layers)\n\n out_t += [res]\n l_t += [lat]\n in_t += [tf.keras.layers.Subtract()([in_t[ref], out_t[-1]])]\n\n inputs, outputs = in_t[0], [in_t[:-1], l_t, out_t]\n self.model = tf.keras.Model(inputs=inputs, outputs=outputs)\n self.loss = Losses(self.m_cfg['configs']['loss']).value", "def net(self):\n model = self.get('forward','model')\n weights = self.get('forward','weights')\n return caffe.Net(model, weights, caffe.TEST)", "def _get_default_model(num_concepts, num_hidden_acts):\n\n return tf.keras.models.Sequential([\n tf.keras.layers.Dense(\n 500,\n input_dim=num_concepts,\n activation='relu'\n ),\n tf.keras.layers.Dense(\n num_hidden_acts,\n activation=None,\n ),\n ])", "def build_model(self):\n self.model = models.Sequential()\n self.model.add(layers.Conv2D(32, (3, 3), activation='relu', input_shape=(28, 28, 1)))\n self.model.add(layers.MaxPooling2D((2, 2)))\n self.model.add(layers.Conv2D(64, (3, 3), activation='relu'))\n self.model.add(layers.MaxPooling2D((2, 2)))\n self.model.add(layers.Conv2D(64, (3, 3), activation='relu'))\n self.model.compile(optimizer=optimizers.RMSprop(lr=0.001), loss='mse', metrics=['mae'])\n self.model.add(layers.Flatten())\n self.model.add(layers.Dense(64, activation='relu'))\n self.model.add(layers.Dense(10, activation='softmax'))\n self.model.compile(optimizer='rmsprop',\n loss='categorical_crossentropy',\n metrics=['accuracy'])", "def network_with_devices():\n new_names = Names()\n new_devices = Devices(new_names)\n new_network = Network(new_names, new_devices)\n\n [SW1_ID, SW2_ID, OR1_ID] = new_names.lookup([\"Sw1\", \"Sw2\", \"Or1\"])\n\n # Add devices\n new_devices.make_device(SW1_ID, new_devices.SWITCH, 0)\n new_devices.make_device(SW2_ID, new_devices.SWITCH, 0)\n new_devices.make_device(OR1_ID, new_devices.OR, 2)\n\n return new_network", "def create_model(self):\n model = solph.Model(self.es)\n return model", "def __build_model(self) -> Sequential:\n self.__name = 'Training model'\n input_dim, *hidden_dims, output_dim = parameters.ANET_DIMENSIONS\n\n model = Sequential()\n model.add(Input(shape=(input_dim,)))\n\n for dimension in hidden_dims:\n model.add(Dense(dimension, activation=self.__activation_function))\n\n model.add(Dense(output_dim, activation=softmax))\n\n model.compile(\n optimizer=(self.__optimizer(learning_rate=self.__learning_rate) if self.__learning_rate is not None else self.__optimizer()),\n loss=self.__loss_function\n )\n model.summary()\n return model", "def create(fpath):\n model_info = json.load(open(fpath))\n\n model_shape = model_info['model']\n model_settings = model_info['config']\n dropout_chance = model_info['config']['dropout_chance']\n\n nn = NeuralNetwork(model_shape, model_settings, dropout_probability=dropout_chance)\n return nn", "def create_model(window, input_shape, num_actions,\n model_name='q_network'):\n if model_name == 0:\n model = linear_model(window, input_shape, num_actions)\n elif model_name == 1:\n model = deep_model(window, input_shape, num_actions)\n elif model_name == 2:\n model = dueling_deep(window, input_shape, num_actions)\n else:\n print(\"No suitable models found.\")\n exit()\n print(model.summary())\n return model", "def build_nn(dropout: float=0.3,verbosity: int=0):\n model = Sequential()\n model.add(Dense(1024, input_shape=(1024,), activation='relu', kernel_regularizer=regularizers.l2(0.02)))\n model.add(BatchNormalization())\n model.add(Dropout(dropout))\n\n model.add(Dense(1024, activation='relu'))\n model.add(BatchNormalization())\n model.add(Dropout(dropout))\n\n model.add(Dense(512, activation='relu'))\n model.add(BatchNormalization())\n model.add(Dropout(dropout))\n\n model.add(Dense(512, activation='relu'))\n model.add(BatchNormalization())\n model.add(Dropout(dropout))\n\n model.add(Dense(1, activation='sigmoid'))\n \n if verbosity > 0:\n model.summary()\n return model", "def test_empty_networkx(self):\n g = nx.DiGraph()\n GraphData.from_networkx(g)", "def build_model(self):\n model = None\n if self.model_name == 'vgg16':\n model = vgg16(img_dim=(self.ex_input_size, self.ex_input_size, self.ex_channels))\n elif self.model_name == 'resnet':\n model = ResNet(img_dim=(self.ex_input_size, self.ex_input_size, self.ex_channels), num_classes=num_classes).create()\n elif self.model_name == 'densenet':\n model = DenseNet(img_dim=(self.ex_input_size, self.ex_input_size, self.ex_channels), num_classes=num_classes).create()\n elif self.model_name == 'mobilenet':\n model = MobileNet(img_dim=(self.ex_input_size, self.ex_input_size, self.ex_channels), num_classes=num_classes).create()\n elif self.model_name == 'efficientnet':\n model = efficientnet(input_shape=(self.ex_input_size, self.ex_input_size, self.ex_channels))\n elif self.model_name == 'vit':\n model = VisionTransformer(image_size=self.ex_input_size,\n patch_size=vit_settings['patch_size'],\n num_layers=vit_settings['num_layers'],\n num_classes=num_classes,\n d_model=vit_settings['d_model'],\n num_heads=vit_settings['num_heads'],\n mlp_dim=vit_settings['mlp_dim'],\n channels=self.ex_channels,\n dropout=vit_settings['dropout']).build_VisionTransformer()\n model.load_weights(self.path_to_weights)\n model.summary()\n return model", "def initialize_default_model(config: BareConfig, model_class) -> torch.nn.Module:\n model = model_class()\n default_model_path = f\"{config.get_default_model_folder_path()}/{model_class.__name__}.model\"\n model.load_state_dict(torch.load(default_model_path))\n return model", "def create_model(self):\r\n model = self.model_fn(self.flags)\r\n print(model)\r\n return model", "def build_model(self) -> DM:\n model = DM()\n model[self.modelroot] = content = DM()\n\n content['potential'] = DM()\n content['potential']['key'] = self.potential_key\n content['potential']['id'] = self.potential_id\n content['implementation'] = DM()\n content['implementation']['key'] = self.potential_LAMMPS_key\n content['implementation']['id'] = self.potential_LAMMPS_id\n\n for subset in self.subsets:\n subset.build_model(content)\n\n self._set_model(model)\n return model", "def create_network(address=None, **options):\n return NetworkDefinition(address, **options)", "def __init__(self, network=None, additional_info=None): # noqa: E501 # noqa: E501\n self._network = None\n self._additional_info = None\n self.discriminator = None\n self.network = network\n self.additional_info = additional_info", "def MLP_model(self):\n print(\"Building model..\")\n self.model = Sequential()\n\n # first hidden layer (0)\n self.model.add(Dense(self.h_nodes0, input_dim=self.input_size, use_bias=True))\n self.model.add(Activation(self.activation0))\n self.model.add(Dropout(self.dropout0))\n\n # second hidden layer (1)\n if self.h_nodes1 != None:\n self.model.add(Dense(self.h_nodes1, use_bias=True))\n self.model.add(Activation(self.activation1))\n self.model.add(Dropout(self.dropout1))\n\n # third hidden layer (2)\n if self.h_nodes2 != None:\n self.model.add(Dense(self.h_nodes2, use_bias=True))\n self.model.add(Activation(self.activation2))\n self.model.add(Dropout(self.dropout2))\n\n #output layer\n self.model.add(Dense(self.output_size))\n self.model.add(Activation(self.activation_out))\n\n #compile model\n self.model.compile(loss=self.loss, optimizer=self.optimizer, metrics=[R_squared])\n\n return self.model", "def _build_model(self, name, obs_dim, action_dim, action_bounds):\n with tf.variable_scope(name):\n network = TFNetwork(name)\n num_layers = len(self.hidden_nodes)\n\n x = tf.placeholder(dtype=tf.float32, shape=[None, obs_dim], name='observation')\n network.add_layer(x)\n h = x\n\n # Set layer_func to Fully-Connected or Batch-Normalization layer\n layer_func = fc_layer\n if self.batch_norm:\n layer_func = bn_layer\n\n # Hidden layers\n for i in range(num_layers):\n h, h_weights = layer_func(h, self.hidden_nodes[i], tf.nn.relu, layer_idx=i, phase=self.is_training)\n network.add_layer(h, h_weights)\n\n # Output layer\n n_in = h.get_shape().as_list()[1]\n w_init = tf.random_uniform([n_in, action_dim], minval=-3e-3, maxval=3e-3)\n output, output_weights = fc_layer(h, action_dim, tf.nn.tanh, w_init=w_init, name='mu', phase=self.is_training)\n network.add_layer(output, output_weights)\n scaled_output = tf.multiply(output, action_bounds, name='mu_scaled')\n network.add_layer(scaled_output)\n\n return scaled_output, x, network", "def build_model(hyperparameters):\r\n model = keras.Sequential()\r\n\r\n model.add(layers.BatchNormalization(input_shape=[hyperparameters['input_size']]))\r\n model.add(layers.Dense(hyperparameters['nodes'], activation='relu'))\r\n model.add(layers.BatchNormalization())\r\n model.add(layers.Dropout(hyperparameters['dropout_value']))\r\n model.add(layers.Dense(hyperparameters['nodes'], activation='sigmoid'))\r\n model.add(layers.BatchNormalization())\r\n model.add(layers.Dropout(hyperparameters['dropout_value']))\r\n model.add(layers.Dense(hyperparameters['nodes'], activation='relu'))\r\n model.add(layers.BatchNormalization())\r\n model.add(layers.Dropout(hyperparameters['dropout_value']))\r\n model.add(layers.Dense(hyperparameters['nodes'], activation='sigmoid'))\r\n model.add(layers.BatchNormalization())\r\n model.add(layers.Dropout(hyperparameters['dropout_value']))\r\n model.add(layers.Dense(5, activation='softmax'))\r\n\r\n model.compile(optimizer=keras.optimizers.Adam(learning_rate=hyperparameters['learning_rate']),\r\n loss='categorical_crossentropy',\r\n metrics=['categorical_accuracy'])\r\n\r\n return model", "def build_network(self, dimList, actType=\"Tanh\", verbose=True):\n self.Q_network = Model(dimList, actType, verbose=verbose)\n self.target_network = Model(dimList, actType)\n\n if self.device == torch.device(\"cuda\"):\n self.Q_network.cuda()\n self.target_network.cuda()\n\n self.build_optimizer()", "def _create_network(self):\n self.z_mean, self.z_log_sigma_sq = self._recognition_network()\n tf.add_to_collection(\"outputs\", self.z_mean)\n\n # Draw one sample z from Gaussian distribution\n eps = tf.random_normal((self.batch_size, self.output_size), 0, 1, dtype=tf.float32)\n # z = mu + sigma*epsilon\n self.z_latent = tf.add(self.z_mean, tf.multiply(tf.sqrt(tf.exp(self.z_log_sigma_sq)), eps))\n tf.add_to_collection(\"latents\", self.z_latent)\n\n # Use generator to determine mean of\n # Bernoulli distribution of reconstructed input\n self.x_decoded = self._generator_network()\n tf.add_to_collection(\"generators\", self.x_decoded)\n tf.add_to_collection(\"targets\", tf.zeros([self.batch_size], dtype=tf.int32))", "def empty_model() -> Model:\n yield Model()", "def MakeModel(self):\n pass", "def build_model(path_to_network_model, path_to_weights):\n\n # with tf.device('/gpu:0'):\n json_file = open(path_to_network_model, 'r')\n model_json = json_file.read()\n json_file.close()\n # custom_objects={\"backend\": K, \"tf\": tf}\n model = model_from_json(model_json, custom_objects={\"tf\": tf})\n model.compile(\n loss='logcosh',\n optimizer='adam')\n model.load_weights(path_to_weights)\n return model", "def get_unet_model(self):\n # create optimizer instance\n config = {\n 'class_name': self.optimizer,\n 'config': self.optimizer_params}\n optimizer = get_optimizer(config)\n\n self.model = unet(optimizer=optimizer,\n loss=self.loss,\n metrics=self.metrics,\n input_size=self.input_size,\n pretrained_weights=self.pretrained_weights)", "def create_neural_network():\n model = Sequential()\n model.add(LSTM(32, input_shape=(4, 45))) # 4 time-steps and 45 features\n model.add(Dense(64))\n model.add(Activation('tanh'))\n model.add(Dense(units=45)) # 45 is the number of class\n model.add(Activation('softmax')) # Output the density of probability\n\n model.compile(optimizer=adam(lr=0.001, decay=1e-6),\n loss=\"categorical_crossentropy\",\n metrics=['accuracy'])\n\n model.summary()\n print(\"Creation of the Neural Network is finished.\")\n return model", "def create_network(self, body=None):\r\n return self.post(self.networks_path, body=body)", "def create_model(self, input_state, layer1=450, layer2=350):\n # create the DQN\n self.model = Sequential()\n self.model.add(Dense(units=layer1, input_dim=input_state.nn_input.size))\n self.model.add(Activation('relu'))\n\n self.model.add(Dense(units=layer2))\n self.model.add(Activation('relu'))\n\n self.model.add(Dense(units=(input_state.size_graph+1)))\n self.model.add(Activation('linear'))\n\n self.model.compile(optimizer='rmsprop', loss='mse')\n\n self.model.predict(input_state.nn_input.reshape(1, input_state.nn_input.size), batch_size=1)", "def make_model():\n model = Sequential()\n model.add(Dense(1000, input_shape=(INPUT_SIZE,), activation='relu'))\n model.add(Dense(1000, activation='relu'))\n model.add(Dense(4, activation='sigmoid'))\n model.compile(loss='mse', metrics=['accuracy'])\n return model", "def create_model():\n model = Sequential()\n\n model.add(Lambda(lambda x: x/127.5-1.0, input_shape=INPUT_SHAPE))\n\n # Now we are going to add some Convulation Layers identical to paper\n\n model.add(Conv2D(24, (5, 5), activation='elu', strides=(2, 2)))\n model.add(BatchNormalization()) \n model.add(Conv2D(36, (5, 5), activation='elu', strides=(2, 2)))\n model.add(BatchNormalization()) \n model.add(Conv2D(48, (5, 5), activation='elu', strides=(2, 2)))\n model.add(BatchNormalization()) \n model.add(Conv2D(64, (3, 3), activation='elu'))\n model.add(BatchNormalization()) \n model.add(Conv2D(64, (3, 3), activation='elu'))\n\n # And now finally we will Flatten our layers and eventually use Fully Connected Layers to reduce features.\n\n model.add(Dropout(0.4))\n model.add(Flatten())\n\n model.add(Dense(256, activation='elu'))\n model.add(Dropout(0.2))\n model.add(Dense(100, activation='elu'))\n model.add(Dropout(0.2))\n model.add(Dense(25, activation='elu'))\n model.add(Dense(1))\n\n model.summary()\n\n return model", "def test_build_basic(self):\n # Get the components for a network\n data = array([[0, 1], [1, 0]])\n cdata = CData(data)\n encoder = BinaryEncoding(cdata)\n layer = ProductAnsatz(2)\n measure = Measurement(2, [0])\n\n # Make the network\n qnn = Network([encoder, layer, measure], computer=\"2q-qvm\")\n\n # Build each circuit for the network\n net0 = qnn._build(0)\n net1 = qnn._build(1)\n\n # Check that each circuit is a BaseAnsatz\n self.assertEqual(type(net0), BaseAnsatz)\n self.assertEqual(type(net1), BaseAnsatz)", "def create_model():\n input_shape = (768,)\n input = keras.Input(shape=input_shape)\n x = layers.Dense(64, activation=\"sigmoid\")(input)\n x = layers.Dense(64, activation=\"sigmoid\")(x)\n x = layers.Dense(64, activation=\"sigmoid\")(x)\n x = layers.Dense(64, activation=\"sigmoid\")(x)\n out = layers.Dense(1, activation='sigmoid')(x)\n model = keras.Model(input, out)\n model.summary(line_length=200)\n return model", "def test_ctor(self):\r\n # the network model itself\r\n model = densenet.DenseNet(\r\n depth=40,\r\n Block=densenet.BasicBlock,\r\n growth_rate=12,\r\n compression_rate=1.0,\r\n mask=True,\r\n num_classes=100,\r\n )\r\n num_params = model_utils.get_model_num_params(model)\r\n\r\n self.assertAlmostEqual(num_params, 1.06, places=1) # around 1.7\r\n self.assertEqual(model_utils.get_num_conv2d_layers(model), 40)", "def build_model(\n model_purpose: str,\n name: str,\n init_w: str,\n input_shape: np.ndarray,\n classes: int,\n dropout_rate: np.float32,\n) -> keras.Model:\n\n if model_purpose.startswith(\"segmentation\"):\n seg_builder = sm.Seg_model_builder(name, input_shape, classes, dropout_rate)\n model = seg_builder.get_model()\n\n elif model_purpose == \"inversion\":\n reg_builder = rm.Reg_model_builder(name, input_shape, classes, init_w)\n model = reg_builder.get_model()\n\n elif model_purpose == \"pixel_concentration_retrieval\":\n model = pwrm.Unet_2(input_shape, classes)\n\n return model" ]
[ "0.703458", "0.6730677", "0.6650045", "0.66165984", "0.65974486", "0.65902346", "0.6558993", "0.645672", "0.6456341", "0.6397473", "0.6397053", "0.63881767", "0.6345971", "0.6332582", "0.63210064", "0.62721264", "0.62575936", "0.6252461", "0.62491995", "0.6246717", "0.62378824", "0.62359154", "0.6229708", "0.62017465", "0.62000144", "0.6160766", "0.6146506", "0.6130534", "0.6127554", "0.61217946", "0.61188704", "0.61159086", "0.61044776", "0.60940176", "0.60825825", "0.60718596", "0.606202", "0.6038196", "0.60324806", "0.6025846", "0.5991802", "0.5982826", "0.59779143", "0.5975849", "0.5972481", "0.59506005", "0.5948161", "0.59433407", "0.593299", "0.5922563", "0.59186816", "0.59055364", "0.5905115", "0.5899333", "0.5895974", "0.5894768", "0.588473", "0.5884538", "0.5882863", "0.5879232", "0.587649", "0.58764565", "0.5875903", "0.5873595", "0.58620197", "0.5854353", "0.5848448", "0.5848289", "0.58434916", "0.5842572", "0.58344895", "0.58325124", "0.58224916", "0.5812933", "0.5809845", "0.58095413", "0.57989234", "0.5798196", "0.5788265", "0.57811946", "0.57792264", "0.57771534", "0.57712567", "0.5768404", "0.5763731", "0.576295", "0.5756421", "0.57562417", "0.5752865", "0.57474947", "0.57390934", "0.57377887", "0.573717", "0.57270527", "0.5718238", "0.5713715", "0.57108486", "0.5709307", "0.5707794", "0.56920046" ]
0.749267
0
Construct and return a copy of an existing network model.
def copy_network(source_net): return make_net_model({"id": source_net.id, "subnets": source_net.subnets, "ports": source_net.ports, "tenant_id": source_net.tenant_id, "mtu": source_net.mtu})
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def copy(self):\n new_model = Model(\n name=self.name,\n functions=copy.deepcopy(self.functions),\n domain=self.domain.copy(),\n density=self.density.copy(),\n )\n new_model.update()\n\n return new_model", "def clone(self):\n return _libsbml.Model_clone(self)", "def copy(self):\n model_copy = BayesianModel()\n model_copy.add_nodes_from(self.nodes())\n model_copy.add_edges_from(self.edges())\n if self.cpds:\n model_copy.add_cpds(*[cpd.copy() for cpd in self.cpds])\n return model_copy", "def clone(self):\n return _libsbml.ModelCreator_clone(self)", "def clone(self):\n return _libsbml.ModelDefinition_clone(self)", "def copy(self):\n kopy = self.__class__()\n # Copy the source net\n kopy.source_net = nx.DiGraph(self.source_net)\n return kopy", "def copy_model(self, tf_seed=0):\n\n # Assemble network_list\n target = NDN(self.network_list, ffnet_out=self.ffnet_out,\n noise_dist=self.noise_dist, tf_seed=tf_seed)\n\n target.poisson_unit_norm = self.poisson_unit_norm\n target.data_pipe_type = self.data_pipe_type\n target.batch_size = self.batch_size\n\n # Copy all the parameters\n for nn in range(self.num_networks):\n for ll in range(self.networks[nn].num_layers):\n target.networks[nn].layers[ll].weights = \\\n self.networks[nn].layers[ll ].weights.copy()\n target.networks[nn].layers[ll].biases = \\\n self.networks[nn].layers[ll].biases.copy()\n target.networks[nn].layers[ll].reg = \\\n self.networks[nn].layers[ll].reg.reg_copy()\n target.networks[nn].input_masks = deepcopy(self.networks[nn].input_masks)\n return target", "def copy (self):\n copy = NFFG(id=self.id, name=self.name, version=self.version,\n mode=self.mode, metadata=self.metadata.copy(),\n status=self.status)\n copy.network = self.network.copy()\n return copy", "def copy(self):\r\n clone = NeuralNetLayer(self.input_size, self.output_size)\r\n clone.weights = self.weights.copy()\r\n return clone", "def copy(self):\r\n clone = NeuralNet()\r\n for layer in self.layers:\r\n clone.layers.append(layer.copy())\r\n return clone", "def clone(self, camera = None, light = None):\r\n newModel = Model(file_string = \"__clone__\", x=self.unif[0], y=self.unif[1], z=self.unif[2],\r\n rx=self.unif[3], ry=self.unif[4], rz=self.unif[5], sx=self.unif[6], sy=self.unif[7], sz=self.unif[8],\r\n cx=self.unif[9], cy=self.unif[10], cz=self.unif[11])\r\n newModel.buf = self.buf\r\n newModel.vGroup = self.vGroup\r\n newModel.shader = self.shader\r\n newModel.textures = self.textures\r\n return newModel", "def deepcopy(self):\r\n newNN = NeuralNetwork(self.max_epochs, self.loss, self.metric, self.momentum_rate,\r\n self.regularization_rate, self.type, self.batch_size, self.type_classifier)\r\n [newNN.add_layer(layer.deepcopy()) for layer in self.layers]\r\n return newNN", "def clone(self):\r\n obj = CylinderModel()\r\n obj.params = copy.deepcopy(self.params)\r\n return obj", "def copy(self, new_name):\n new_model = dill.loads(dill.dumps(self.model))\n model_fn = lambda: new_model\n return self.__class__(new_name, model_fn)", "def create_and_copy_model(model, create_model_func, **kwargs):\n new_model = create_model_func(**kwargs)\n\n update_model_weights( # copy trainable weights\n new_model, model.trainable_weights,\n weights_structure=get_model_weights_structure(new_model, trainable=True),\n trainable=True, force_update=True)\n\n update_model_weights( # copy non-trainable weights\n new_model, model.non_trainable_weights,\n weights_structure=get_model_weights_structure(new_model, trainable=False),\n trainable=False, force_update=True)\n\n # make sure that model is \"built\" and new variables are not created\n build_model(new_model, model.input_shape)\n\n return new_model", "def new_network():\n new_names = Names()\n new_devices = Devices(new_names)\n return Network(new_names, new_devices)", "def clone_keras_model(target, custom_objects=None):\n new_model = model_from_json(target.to_json(),custom_objects)\n new_model.set_weights(target.get_weights())\n return new_model", "def copy(self):\n return self.__class__(\n self.kind, self.link_ids.copy(), self.included_nodes.copy(), self.mass,\n self.name, self.crossring_cleavages.copy(), self.composition.copy())", "def copy(self):\n model = LBM(\n n_row_clusters=self.n_row_clusters,\n n_column_clusters=self.n_column_clusters,\n max_iter=self.max_iter,\n n_init=self.n_init,\n n_init_total_run=self.n_init_total_run,\n n_iter_early_stop=self.nb_iter_early_stop,\n rtol=self.rtol,\n atol=self.atol,\n verbosity=self.verbosity,\n use_gpu=self.use_gpu,\n gpu_index=self.gpu_index,\n )\n model._nb_rows = self._nb_rows\n model._nb_cols = self._nb_cols\n model.loglikelihood_ = self.loglikelihood_\n model._np = self._np\n model._cupyx = self._cupyx\n model.trained_successfully_ = self.trained_successfully_\n model.pi_ = copy.copy(self.pi_)\n model.alpha_1_ = copy.copy(self.alpha_1_)\n model.alpha_2_ = copy.copy(self.alpha_2_)\n model.tau_1_ = copy.copy(self.tau_1_)\n model.tau_2_ = copy.copy(self.tau_2_)\n return model", "def clone(self):\n\n clone = self.__class__.__new__(self.__class__)\n clone._graph_state = self._graph_state\n clone._molecule_state = self._molecule_state\n return clone", "def clone(self):\n return _libsbml.Submodel_clone(self)", "def clone_model(model, input_tensors=None):\n if isinstance(model, Sequential):\n return _clone_sequential_model(model, input_tensors=input_tensors)\n else:\n return _clone_functional_model(model, input_tensors=input_tensors)", "def clone(self):\n return _libsbml.ExternalModelDefinition_clone(self)", "def make_non_parallel_copy(model):\n def replace_data_parallel(container):\n for name, module in container.named_children():\n if isinstance(module, nn.DataParallel):\n setattr(container, name, module.module)\n if has_children(module):\n replace_data_parallel(module)\n\n # Make a copy of the model, because we're going to change it\n new_model = deepcopy(model)\n if isinstance(new_model, nn.DataParallel):\n new_model = new_model.module\n replace_data_parallel(new_model)\n\n return new_model", "def copy(self):\n copyPreprocessors = []\n copyModels = []\n try:\n #package is defined here once and passed to _cloneObject.\n #When further modules are required, further imports will be necessary\n moduleObject = {\"sklearn\": importlib.import_module(\"sklearn.base\")}\n except(ImportError):\n moduleObject = None\n for preprocessor in self.preprocessors:\n copyPrep = self._cloneObject(preprocessor, moduleObject=moduleObject)\n copyPreprocessors.append(copyPrep)\n\n for model in self.models:\n copyModel = self._cloneObject(model, moduleObject=moduleObject)\n copyModels.append(copyModel)\n return Layer(models=copyModels, preprocessors=copyPreprocessors)", "def __deepcopy__(self, memodict={}):\n nodes = [deepcopy(n) for n in self.nodes]\n return Network(nodes)", "def create_model():\n\n class Net(nn.Cell):\n def construct(self, x, y):\n return x\n\n net = Net()\n model_simple = Model(net)\n\n return model_simple", "def copy(self):\n \n \n G = DiGraph()\n G.node_set = copy.deepcopy(self.node_set)\n G.prefix = copy.deepcopy(self.prefix)\n G.suffix = copy.deepcopy(self.suffix)\n G.num_node = copy.deepcopy(self.num_node)\n G.edges = copy.deepcopy(self.edges)\n \n return G", "def copy(self):\n new = self.__class__()\n do_not_copy_by_ref = {\"alleles\", \"strains\", \"base_cobra_model\", \"notes\",\n \"annotation\"}\n for attr in self.__dict__:\n if attr not in do_not_copy_by_ref:\n new.__dict__[attr] = self.__dict__[attr]\n new.notes = deepcopy(self.notes)\n new.annotation = deepcopy(self.annotation)\n\n new.alleles = DictList()\n do_not_copy_by_ref = {\"_strains\", \"_model\"}\n for allele in self.alleles:\n new_allele = allele.__class__()\n for attr, value in iteritems(allele.__dict__):\n if attr not in do_not_copy_by_ref:\n new_allele.__dict__[attr] = copy(\n value) if attr == \"formula\" else value\n new_allele._model = new\n new.alleles.append(new_allele)\n\n new.strains = DictList()\n do_not_copy_by_ref = {\"_model\", \"_alleles\", \"_base_cobra_model\"}\n for strain in self.strains:\n new_strain = strain.__class__()\n for attr, value in iteritems(strain.__dict__):\n if attr not in do_not_copy_by_ref:\n new_strain.__dict__[attr] = copy(value)\n new_strain._model = new\n new.strains.append(new_strain)\n # update awareness\n for allele, stoic in iteritems(strain._alleles):\n new_allele = new.alleles.get_by_id(allele.id)\n new_strain._alleles[new_allele] = stoic\n new_allele._strain.add(new_strain)\n # it doesn't make sense to retain the context of a copied model so\n # assign a new empty context\n new._contexts = list()", "def clone(self,\n from_model: entities.Model,\n model_name: str,\n dataset: entities.Dataset = None,\n configuration: dict = None,\n status=None,\n scope=None,\n project_id: str = None,\n labels: list = None,\n description: str = None,\n tags: list = None,\n train_filter: entities.Filters = None,\n validation_filter: entities.Filters = None,\n ) -> entities.Model:\n from_json = {\"name\": model_name,\n \"packageId\": from_model.package_id,\n \"configuration\": from_model.configuration,\n \"metadata\": from_model.metadata,\n \"outputType\": from_model.output_type,\n \"inputType\": from_model.input_type}\n if project_id is None:\n project_id = self.project.id\n from_json['projectId'] = project_id\n if dataset is not None:\n if labels is None:\n labels = list(dataset.labels_flat_dict.keys())\n from_json['datasetId'] = dataset.id\n if labels is not None:\n from_json['labels'] = labels\n # if there are new labels - pop the mapping from the original\n _ = from_json['configuration'].pop('id_to_label_map', None)\n _ = from_json['configuration'].pop('label_to_id_map', None)\n if configuration is not None:\n from_json['configuration'].update(configuration)\n if description is not None:\n from_json['description'] = description\n if tags is not None:\n from_json['tags'] = tags\n if scope is not None:\n from_json['scope'] = scope\n if status is not None:\n from_json['status'] = status\n\n metadata = self._set_model_filter(metadata=from_model.metadata,\n train_filter=train_filter,\n validation_filter=validation_filter)\n if metadata['system']:\n from_json['metadata'] = metadata\n success, response = self._client_api.gen_request(req_type='post',\n path='/ml/models/{}/clone'.format(from_model.id),\n json_req=from_json)\n if not success:\n raise exceptions.PlatformException(response)\n new_model = entities.Model.from_json(_json=response.json(),\n client_api=self._client_api,\n project=self._project,\n package=from_model._package)\n\n if new_model._dataset is not None and new_model._dataset.readonly is False:\n logger.warning(\n \"Model is using an unlocked dataset {!r}. Make it readonly for training reproducibility\".format(\n new_model.dataset.name))\n\n return new_model", "def copy_model_state(model):\n model_state = deepcopy(model.state_dict())\n return model_state", "def _try_clone_model(model):\n try:\n return copy.deepcopy(model)\n except Exception:\n warnings.warn(\n \"Failed to clone model. Model state might be mutated during verification.\"\n )\n return model", "def clone(self) -> \"Graph\":\n return Graph(seed=self.seed,\n layout=self.layout,\n community_n=self.community_n,\n community_size_mean=self.community_size_mean,\n community_size_std=self.community_size_std,\n community_p_in=self.community_p_in,\n community_p_out=self.community_p_out,\n considered_immune_threshold=self.considered_immune_threshold)", "def copy(self):\n cls = self.__class__\n new_graph = cls.__new__(cls)\n new_graph._nodes = self._nodes[:]\n new_graph._node_wip = self._node_wip[:]\n new_graph._edges = self._edges[:]\n if self._sorted_nodes:\n new_graph._sorted_nodes = self._sorted_nodes[:]\n else:\n new_graph._sorted_nodes = None\n new_graph.predecessors = {}\n for key, val in self.predecessors.items():\n new_graph.predecessors[key] = self.predecessors[key][:]\n new_graph.successors = {}\n for key, val in self.successors.items():\n new_graph.successors[key] = self.successors[key][:]\n return new_graph", "def create_network(self):\n\n print ('Creating network, changing data will have no effect beyond this point.')\n n = IMNN.IMNN(parameters=self.parameters)\n\n if self.load_network:\n n.restore_network()\n else:\n n.setup(network = self.network, load_data = self.data)\n\n return n", "def copy(self) -> \"Pipeline\":\n model = PipelineModel(self._config.as_dict(), vocab=copy.deepcopy(self.vocab))\n config = copy.deepcopy(self._config)\n\n pipeline_copy = Pipeline(model, config)\n pipeline_copy._model.load_state_dict(self._model.state_dict())\n\n return pipeline_copy", "def clone(self) -> 'State':\n return State(self.sim, state=self.get_state().copy())", "def copy(self):\n node_new = Node(self.state.copy(), self.parent, self.children.copy(), self.RRT, self.path_length)\n node_new.vs = self.vs.copy()\n node_new.RRT = self.RRT\n node_new.observed = self.observed\n node_new.observation_node = self.observation_node\n node_new.observation_area = self.observation_area\n\n return node_new", "def copy(self):\n G = WeightedGraph(self.V, self.edges.copy(), self.weights.copy())\n return G", "def copy(self):\n new = self.__class__(integration=None, data=None)\n for attribute, value in self.__dict__.items():\n if attribute in self.referenced_attributes:\n setattr(new, attribute, value)\n elif hasattr(value, 'copy'):\n setattr(new, attribute, value.copy())\n else:\n setattr(new, attribute, deepcopy(value))\n return new", "def clone(self):\n return self.__class__(self.name, *self)", "def createModel(self):\n outputs, inputs = baseUNet(self.input_shape,\n self.conv_depth,\n self.n_classes,\n self.init_w,\n self.dropout)\n \n if self.regression == True:\n outputs = Lambda(getPropOfGround)(outputs)\n \n model = Model(inputs = inputs,outputs = outputs)\n \n model.compile(optimizer = self.optimizer,\n loss=self.loss_function,\n metrics=self.metrics)\n\n if self.old_weights != None:\n model.set_weights(self.old_weights)\n self.model = model", "def construct_private_model(input_size, model):\n # get rank of current process\n rank = comm.get().get_rank()\n dummy_input = torch.empty(input_size)\n\n # party 0 always gets the actual model; remaining parties get dummy model\n if rank == 0:\n model_upd = model\n else:\n model_upd = LeNet()\n private_model = crypten.nn.from_pytorch(model_upd, dummy_input).encrypt(src=0)\n return private_model", "def clone(self) -> 'BoardGraph':\n return self.__class__(self.board_class)", "def deep_copy(self):\n return self.__class__(self.inputs, self.outputs, self.middle)", "def __copy__(self):\n #new = MCTS(copy=True) # don't run _predict() twice\n new = MCTS(self.env, copy=True) # don't set pi and Q twice\n new.env = self.env.__copy__()\n # can't use __dict__.update() without effecting env __copy__()\n # in theory, you don't need to copy the env. just use one copy for simulating, and restore it to root\n # since _Q() evaluates the env.done() of children, you need self.done = env.done() in __init__()\n # same for env.winner\n new.pi = []\n new. Q = 0\n new.net = self.net\n new.t = self.t\n new.expl = self.expl\n new.children = []\n new.parent = None\n return new", "def create_original_model():\n model = Sequential()\n model.add(Embedding(max_features,\n embedding_dims,\n input_length=maxlen))\n model.add(Dropout(0.2))\n model.add(Conv1D(filters,\n kernel_size,\n padding='valid',\n activation='relu',\n strides=1))\n model.add(GlobalMaxPooling1D())\n model.add(Dense(hidden_dims))\n model.add(Dropout(0.2))\n model.add(Activation('relu'))\n model.add(Dense(2))\n model.add(Activation('softmax'))\n\n model.compile(loss='categorical_crossentropy',\n optimizer='adam',\n metrics=['accuracy'])\n\n return model", "def deepcopy(self):\n return ModelFile(self._key)", "def clone(self):\n return _libsbml.ModelHistory_clone(self)", "def clone(self):\n return self", "def copy(self):\n cls = self.__class__\n result = cls.__new__(cls)\n result.__dict__.update(self.__dict__)\n return result", "def make_model(self):\n onnx_graph = onnx.helper.make_graph(\n self._nodes, self._name, self._inputs, self._outputs, self._initializers\n )\n kwargs = {}\n kwargs[\"opset_imports\"] = self._get_opsets()\n kwargs[\"producer_name\"] = \"TVM Relay\"\n kwargs[\"producer_version\"] = tvm.__version__\n\n return onnx.helper.make_model(onnx_graph, **kwargs)", "def create_model(project_parameters):\n model = Net(project_parameters=project_parameters)\n if project_parameters.checkpoint_path is not None:\n model = load_checkpoint(model=model, num_classes=project_parameters.num_classes,\n use_cuda=project_parameters.use_cuda, checkpoint_path=project_parameters.checkpoint_path)\n return model", "def copy(self):\n out = type(self).__new__(self.__class__)\n out.__dict__.update(self.__dict__)\n # make sure the copy has its own unique random number generator\n seed_seq = self.rng._bit_generator._seed_seq.spawn(1)[0]\n out.__dict__['rng'] = get_generator(seed_seq)\n return out", "def clone(self):\n return self.__class__(self, self.spectrum, wallet=self.wallet)", "def _clone_sequential_model(model, input_tensors=None):\n if not isinstance(model, Sequential):\n raise ValueError('Expected `model` argument '\n 'to be a `Sequential` model instance, '\n 'but got:', model)\n\n def clone(layer):\n return layer.__class__.from_config(layer.get_config())\n\n layers = [clone(layer) for layer in model.layers]\n if input_tensors is None:\n return Sequential(layers=layers, name=model.name)\n else:\n if len(to_list(input_tensors)) != 1:\n raise ValueError('To clone a `Sequential` model, we expect '\n ' at most one tensor '\n 'as part of `input_tensors`.')\n x = to_list(input_tensors)[0]\n if K.is_keras_tensor(x):\n origin_layer = x._keras_history[0]\n if isinstance(origin_layer, InputLayer):\n return Sequential(layers=[origin_layer] + layers,\n name=model.name)\n else:\n raise ValueError('Cannot clone a `Sequential` model on top '\n 'of a tensor that comes from a Keras layer '\n 'other than an `InputLayer`. '\n 'Use the functional API instead.')\n input_tensor = Input(tensor=x,\n name='input_wrapper_for_' + str(x.name))\n input_layer = input_tensor._keras_history[0]\n return Sequential(layers=[input_layer] + layers, name=model.name)", "def clone(self):\n cloned = Graph()\n for v in self.vertices:\n cloned.vertices[v] = self.vertices[v].clone()\n return cloned", "def copy(self):\n new = object.__new__(type(self))\n new.bot = self.bot\n new.description = self.description\n new.icon_hash = self.icon_hash\n new.icon_type = self.icon_type\n new.id = 0\n new.name = self.name\n return new", "def clone(self):\n return self.copy()", "def copy(self, shareWeights):\n newNode = SparseLinear(self.inputDim, self.outputDim, self.stdv)\n #newNode.receiveGradFrom = self.receiveGradFrom[:]\n #newNode.receiveInputFrom = self.receiveInputFrom[:]\n if shareWeights:\n newNode.weight = self.weight\n newNode.gradWeight = self.gradWeight\n newNode.bias = self.bias\n newNode.gradBias = self.gradBias\n return newNode", "def copy(self):\n new_ann = ForwardArtificialNeuralNectwork(self.dim_in-1, self.dim_hid, self.dim_out)\n new_ann.weight[:,:] = self.weight\n new_ann.connectivity[:,:] = self.connectivity\n new_ann.hidden[:] = self.hidden\n return new_ann", "def clone(self):\n cloned = org_copy.deepcopy(self)\n return cloned", "def clone(self):\n return _libsbml.CompModelPlugin_clone(self)", "def _clone(self):\n c = self.__class__(\n model=self.model,\n query=self.query.chain(),\n using=self._db,\n hints=self._hints,\n )\n c._sticky_filter = self._sticky_filter\n c._for_write = self._for_write\n c._prefetch_related_lookups = self._prefetch_related_lookups[:]\n c._known_related_objects = self._known_related_objects\n c._iterable_class = self._iterable_class\n c._fields = self._fields\n return c", "def copy_state(model):\n copy_dict = OrderedDict()\n state_dict = model.state_dict()\n for k, v in state_dict.items():\n copy_dict[k] = v.cpu() if v.is_cuda else v.clone()\n\n return copy_dict", "def copy(self, new=True):\n return UndirectedGraph(self._nodes, self._edges)", "def clone(self) -> Self:\n return clone(self, safe=True)", "def copy(self):\n return AncestralGraph(self.nodes, self.directed, self.bidirected, self.undirected)", "def clone(self):\n return attr.evolve(self)", "def copy(self):\n try:\n return self.__class__(self, copy=True)\n except TypeError:\n new = self.__class__(copy.deepcopy(self))\n return new", "def clone(self):\n return _libsbml.ListOfSubmodels_clone(self)", "def clone(self):\n return _libsbml.Port_clone(self)", "def copy(self):\n return self.__class__(self.name, list(self.gRNAs))", "def clone(self):\n from copy import deepcopy\n return deepcopy(self)", "def clone(self):\n return copy.deepcopy(self)", "def clone(self):\n return attr.evolve(self, env=dict(self._env))", "def copy(self):\n\t\t\n\t\taCopy = self.__class__()\n\t\taCopy.mip = self.mip\n\t\taCopy.msg = self.msg\n\t\taCopy.options = self.options\n\t\treturn aCopy", "def get_copy_of_graph(self):\r\n return deepcopy(self)", "def copy(self):\n import copy as pcopy\n return pcopy.deepcopy(self)", "def _make_network(self):\n inp = Input(shape = (self.input_dim,))\n x = Dense(256, activation='relu')(inp)\n x = GaussianNoise(1.0)(x)\n #x = Flatten()(x) # I assume this is if the input is a convolutional neural net?\n x = Dense(128, activation='relu')(x)\n x = GaussianNoise(1.0)(x)\n out = Dense(self.output_dim, activation='tanh', kernel_initializer=RandomUniform())(x)\n out = Lambda(lambda i: i * self.act_range)(out)\n return Model(inp, out)", "def copy(self):\n\t\ttemp = self.__class__()\n\t\ttemp.copy_from(self)\n\t\treturn temp", "def duplicate(self):\n\t\treturn Graph(self.vertices[:], self.edges[:])", "def clone(self):\r\n cp = self.__class__(self.type, self.data, self.name)\r\n cp.tag = copy(self.tag)\r\n return cp", "def copy(self):\n return self.__class__(self)", "def copy(self):\n return self.__class__(self)", "def clone(self):\n return deepcopy(self)", "def clone(self):\n return deepcopy(self)", "def clone(self):\n return deepcopy(self)", "def clone(self):\n return deepcopy(self)", "def copy(self):\n edges, weights = [], []\n \n # This is a microoptimization\n edges_append = edges.append\n weights_append = weights.append\n \n for edge, weight in self.edges(and_weights=True):\n edges_append(edge)\n weights_append(weight)\n \n return type(self)(edges, weights)", "def clone(self):\r\n #return copy(self)\r\n cp = self.__class__(self.type, None, None, self.name)\r\n cp.tag = copy(self.tag)\r\n return cp", "def copy(self):\n return object.__new__(type(self))", "def copy(self):\n return self.__class__(**vars(self))", "def __deepcopy__(self, memodict={}):\n return Node(deepcopy(self.location), self.weight)", "def clone(self):\r\n cp = self.__class__(self.op, self.inputs, [output.clone() for output in self.outputs])\r\n cp.tag = copy(self.tag)\r\n return cp", "def clone(self):\n return self.__class__(self.delegate.clone(), self.collection)", "def clone(self):\n return shallow_clone(self)", "def create(self) -> tf.keras.models.Model:\n raise NotImplementedError()", "def copy(self):\n\t\treturn pythoncopy.deepcopy(self)", "def clone(self):\n raise NotImplementedError" ]
[ "0.7690615", "0.7457661", "0.7384238", "0.7347668", "0.69865394", "0.6961353", "0.6951828", "0.6855561", "0.6832441", "0.6813686", "0.6811424", "0.6808021", "0.66637117", "0.66387904", "0.66021425", "0.6570278", "0.65329057", "0.65179616", "0.6502068", "0.64940655", "0.64485437", "0.64023626", "0.63872206", "0.6369688", "0.6359906", "0.63584393", "0.6280947", "0.6265574", "0.6226902", "0.6220534", "0.6214386", "0.6200075", "0.6185579", "0.61837447", "0.6178532", "0.6150662", "0.61157596", "0.6077799", "0.605563", "0.6048957", "0.60427624", "0.60367316", "0.60360956", "0.6032158", "0.6023458", "0.5996656", "0.59867984", "0.5959252", "0.5958771", "0.5954634", "0.5952735", "0.59199864", "0.591918", "0.59171534", "0.5909947", "0.59026074", "0.5898734", "0.58915806", "0.5887723", "0.58825964", "0.58686876", "0.58613306", "0.58416736", "0.5840346", "0.5816456", "0.5801067", "0.57937706", "0.5790909", "0.57903516", "0.57884276", "0.57870966", "0.57870215", "0.5786387", "0.5786174", "0.5780456", "0.5779673", "0.5776724", "0.57755965", "0.57666796", "0.5763765", "0.57607776", "0.57580465", "0.57548153", "0.57427496", "0.57427496", "0.57416064", "0.57416064", "0.57416064", "0.57416064", "0.574043", "0.5738774", "0.57266444", "0.5726547", "0.57258224", "0.571573", "0.5713639", "0.5709865", "0.57071644", "0.5703763", "0.5701001" ]
0.68326706
8
Handler for endpoint creations and updates.
def on_endpoint_set(self, response, name): try: hostname, orchestrator, workload_id, endpoint_id = \ split_endpoint_name(name) except ValueError: # For some reason this endpoint's name does not have the expected # form. Ignore it. LOG.warning("Unexpected form for endpoint name: %s", name) return if hostname != self.hostname: LOG.info("Endpoint not on this node: %s", name) return # Get the endpoint spec. endpoint = etcdutils.safe_decode_json(response.value, 'endpoint') if not (isinstance(endpoint, dict) and 'spec' in endpoint and isinstance(endpoint['spec'], dict) and 'interfaceName' in endpoint['spec'] and 'ipNetworks' in endpoint['spec'] and 'mac' in endpoint['spec']): # Endpoint data is invalid; treat as deletion. LOG.warning("Invalid endpoint data: %s => %s", response.value, endpoint) self.on_endpoint_delete(None, name) return annotations = endpoint.get('metadata', {}).get('annotations', {}) endpoint = endpoint['spec'] # If the endpoint has no ipNetworks, treat as deletion. This happens # when a resync from the mechanism driver overlaps with a port/VM being # deleted. if not endpoint['ipNetworks']: LOG.info("Endpoint has no ipNetworks: %s", endpoint) self.on_endpoint_delete(None, name) return # Construct NetModel port equivalent of Calico's endpoint data. fixed_ips = [] dns_assignments = [] fqdn = annotations.get(datamodel_v3.ANN_KEY_FQDN) network_id = annotations.get(datamodel_v3.ANN_KEY_NETWORK_ID) allowedIps = [e.split('/')[0] for e in endpoint.get('allowedIps', [])] for addrm in endpoint['ipNetworks']: ip_addr = addrm.split('/')[0] if ip_addr in allowedIps: continue subnet_id = self.subnet_watcher.get_subnet_id_for_addr( ip_addr, network_id ) or self.v1_subnet_watcher.get_subnet_id_for_addr( ip_addr, network_id ) if subnet_id is None: LOG.warning("Missing subnet data for one of port's IPs") continue fixed_ips.append({'subnet_id': subnet_id, 'ip_address': ip_addr}) if fqdn: dns_assignments.append({'hostname': fqdn.split('.')[0], 'ip_address': ip_addr, 'fqdn': fqdn}) if not fixed_ips: LOG.warning("Endpoint has no DHCP-served IPs: %s", endpoint) return extra_dhcp_opts = [] mtu = self.mtu_watcher.get_mtu(endpoint['interfaceName']) self.mtu_watcher.watch_port(endpoint_id, endpoint['interfaceName']) if mtu: extra_dhcp_opts.append(self.get_mtu_option(mtu)) port = {'id': endpoint_id, 'device_owner': 'calico', 'device_id': endpoint['interfaceName'], 'fixed_ips': fixed_ips, 'mac_address': endpoint['mac'], # FIXME: Calico currently does not handle extra DHCP # options, other than MTU, but there might be use cases # where it should handle further options. # https://bugs.launchpad.net/networking-calico/+bug/1553348 'extra_dhcp_opts': extra_dhcp_opts} if fqdn: port['dns_assignment'] = dns_assignments # Ensure that the cache includes the network and subnets for this port, # and set the port's network ID correctly. try: port['network_id'] = self._ensure_net_and_subnets(port) except SubnetIDNotFound: LOG.warning("Missing data for one of port's subnets") return # Report this at INFO level if it is a new port. Note, we # come through this code periodically for existing ports also, # because of how we watch the etcd DB for changes. if endpoint_id not in self.local_endpoint_ids: LOG.info("New port: %s", port) self.local_endpoint_ids.add(endpoint_id) else: LOG.debug("Refresh already known port: %s", port) # Add this port into the NetModel. self.agent.cache.put_port(dhcp.DictModel(port)) # If we have seen the TAP interface, schedule updating Dnsmasq; # otherwise wait until we do see the TAP interface, whereupon # _update_dnsmasq will be called again. Dnsmasq updates can # take a little time, and they run in series, so it's best to # wait if we don't have the information we need yet, to avoid # delaying the correct Dnsmasq update that we really want. if mtu: self._update_dnsmasq(port['network_id'])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_handler(event, context):\n return update_endpoint(event)", "def endpoint_create(self, endpoint_name=None, config=None):\n if config is None:\n raise Exception(\"Config required!\")\n if endpoint_name is None:\n self.request('/v1.1/endpoint', 'PUT', body=config)\n else:\n self.request('/v1.1/endpoints/%s' % endpoint_name, 'PUT', body=config)", "def endpoint_update(self, endpoint_name=None, config=None):\n if config is None:\n raise Exception(\"Config required!\")\n if endpoint_name is None:\n self.request('/v1.1/endpoint', 'POST', body=config)\n else:\n self.request('/v1.1/endpoints/%s' % endpoint_name, 'POST', body=config)", "def post(self, endpoint):\n context = pecan.request.context\n endpoint_dict = endpoint.as_dict()\n\n endpoint_dict['project_id'] = context.project_id\n endpoint_dict['user_id'] = context.user_id\n\n if endpoint_dict.get('name') is None:\n endpoint_dict['name'] = None\n if endpoint_dict.get('url') is None:\n endpoint_dict['url'] = None\n if endpoint_dict.get('desc') is None:\n endpoint_dict['desc'] = None\n\n endpoint = objects.Endpoint(context, **endpoint_dict)\n\n endpoint.create()\n\n # pecan.request.rpcapi.function_create(function, function_create_timeout=1000)\n\n # Set the HTTP Location Header\n # pecan.response.location = link.build_url('functions',\n # function.id)\n return Endpoint.convert_with_links(endpoint)", "def poll_create(event, context):\n endpoint_name = get_endpoint_name(event)\n logger.info('Polling for update of endpoint: %s', endpoint_name)\n return is_endpoint_ready(endpoint_name)", "def register_endpoint(self, **kwargs):\n self._database.update('endpoint', kwargs, kwargs, upsert=True)", "def create_endpoint(self, endpoint_id, endpoint_ref):\n raise exception.NotImplemented() # pragma: no cover", "def testPostEndpoints(self):\n # Register an exporter\n context = self.framework.get_bundle_context()\n exporter = Exporter(context)\n context.register_service(pelix.remote.SERVICE_EXPORT_PROVIDER,\n exporter, {})\n\n # Register an importer\n importer = ImportListener()\n context.register_service(pelix.remote.SERVICE_IMPORT_ENDPOINT_LISTENER,\n importer,\n {pelix.remote.PROP_REMOTE_CONFIGS_SUPPORTED:\n exporter.configs[0]})\n\n # Register a service\n context.register_service(\"sample.spec\", object(),\n {pelix.remote.PROP_EXPORTED_INTERFACES: \"*\"})\n\n # Get the endpoint bean\n endpoint = exporter.endpoints[-1]\n\n # Get its representation\n status, response = self._http_get(\"/endpoint/{0}\".format(endpoint.uid))\n self.assertEqual(status, 200)\n\n # Change its UID and framework UID\n endpoint_data = json.loads(response)\n endpoint_data['uid'] = 'other-uid'\n endpoint_data['name'] = 'other-name'\n endpoint_data['sender'] = 'other-framework'\n\n # Send the 'discovered' event\n status, response = self._http_post(\"endpoints\",\n json.dumps([endpoint_data]))\n self.assertEqual(status, 200)\n self.assertEqual(response, 'OK')\n\n # Ensure that the service has been registered\n imported_endpoint = importer.endpoints[endpoint_data['uid']]\n self.assertEqual(imported_endpoint.uid, endpoint_data['uid'])\n self.assertEqual(imported_endpoint.framework, endpoint_data['sender'])\n self.assertEqual(imported_endpoint.name, endpoint_data['name'])", "def endpoint_present(\n name,\n publicurl=None,\n internalurl=None,\n adminurl=None,\n region=None,\n profile=None,\n url=None,\n interface=None,\n **connection_args\n):\n ret = {\"name\": name, \"changes\": {}, \"result\": True, \"comment\": \"\"}\n\n _api_version(profile=profile, **connection_args)\n\n endpoint = __salt__[\"keystone.endpoint_get\"](\n name, region, profile=profile, interface=interface, **connection_args\n )\n\n def _changes(desc):\n return ret.get(\"comment\", \"\") + desc + \"\\n\"\n\n def _create_endpoint():\n if _OS_IDENTITY_API_VERSION > 2:\n ret[\"changes\"] = __salt__[\"keystone.endpoint_create\"](\n name,\n region=region,\n url=url,\n interface=interface,\n profile=profile,\n **connection_args\n )\n else:\n ret[\"changes\"] = __salt__[\"keystone.endpoint_create\"](\n name,\n region=region,\n publicurl=publicurl,\n adminurl=adminurl,\n internalurl=internalurl,\n profile=profile,\n **connection_args\n )\n\n if endpoint and \"Error\" not in endpoint and endpoint.get(\"region\") == region:\n\n if _OS_IDENTITY_API_VERSION > 2:\n\n change_url = False\n change_interface = False\n\n if endpoint.get(\"url\", None) != url:\n ret[\"comment\"] = _changes(\n 'URL changes from \"{}\" to \"{}\"'.format(\n endpoint.get(\"url\", None), url\n )\n )\n change_url = True\n\n if endpoint.get(\"interface\", None) != interface:\n ret[\"comment\"] = _changes(\n 'Interface changes from \"{}\" to \"{}\"'.format(\n endpoint.get(\"interface\", None), interface\n )\n )\n change_interface = True\n\n if __opts__.get(\"test\") and (change_url or change_interface):\n ret[\"result\"] = None\n ret[\"changes\"][\"Endpoint\"] = \"Will be updated\"\n ret[\"comment\"] += 'Endpoint for service \"{}\" will be updated'.format(\n name\n )\n return ret\n\n if change_url:\n ret[\"changes\"][\"url\"] = url\n\n if change_interface:\n ret[\"changes\"][\"interface\"] = interface\n\n else:\n change_publicurl = False\n change_adminurl = False\n change_internalurl = False\n\n if endpoint.get(\"publicurl\", None) != publicurl:\n change_publicurl = True\n\n ret[\"comment\"] = _changes(\n 'Public URL changes from \"{}\" to \"{}\"'.format(\n endpoint.get(\"publicurl\", None), publicurl\n )\n )\n\n if endpoint.get(\"adminurl\", None) != adminurl:\n change_adminurl = True\n ret[\"comment\"] = _changes(\n 'Admin URL changes from \"{}\" to \"{}\"'.format(\n endpoint.get(\"adminurl\", None), adminurl\n )\n )\n\n if endpoint.get(\"internalurl\", None) != internalurl:\n change_internalurl = True\n ret[\"comment\"] = _changes(\n 'Internal URL changes from \"{}\" to \"{}\"'.format(\n endpoint.get(\"internalurl\", None), internalurl\n )\n )\n\n if __opts__.get(\"test\") and (\n change_publicurl or change_adminurl or change_internalurl\n ):\n ret[\"result\"] = None\n ret[\"comment\"] += 'Endpoint for service \"{}\" will be updated'.format(\n name\n )\n ret[\"changes\"][\"Endpoint\"] = \"Will be updated\"\n return ret\n\n if change_publicurl:\n ret[\"changes\"][\"publicurl\"] = publicurl\n\n if change_adminurl:\n ret[\"changes\"][\"adminurl\"] = adminurl\n\n if change_internalurl:\n ret[\"changes\"][\"internalurl\"] = internalurl\n\n if ret[\"comment\"]: # changed\n __salt__[\"keystone.endpoint_delete\"](\n name, region, profile=profile, interface=interface, **connection_args\n )\n _create_endpoint()\n ret[\"comment\"] += 'Endpoint for service \"{}\" has been updated'.format(name)\n\n else:\n # Add new endpoint\n if __opts__.get(\"test\"):\n ret[\"result\"] = None\n ret[\"changes\"][\"Endpoint\"] = \"Will be created\"\n ret[\"comment\"] = 'Endpoint for service \"{}\" will be added'.format(name)\n return ret\n _create_endpoint()\n ret[\"comment\"] = 'Endpoint for service \"{}\" has been added'.format(name)\n\n if ret[\"comment\"] == \"\": # => no changes\n ret[\"comment\"] = 'Endpoint for service \"{}\" already exists'.format(name)\n return ret", "def update_endpoint(self, endpoint):\n exists = self.get_endpoint(endpoint)\n if exists:\n self.delete_endpoint(endpoint)\n self.add_endpoint(endpoint)", "def _construct_endpoints(self):\n # Functions\n async def get_function_list_data(request: web.Request):\n entrypoints = [elm.to_dict() for elm in self._function_manager.definitions.values()]\n return web.json_response(entrypoints)\n\n async def get_function_list_text(request: web.Request):\n rows = []\n for definition in self._function_manager.definitions.values():\n rows.append(definition.function_name)\n rows.append(' URL:')\n rows.append(f' async api: /{definition.function_name}')\n rows.append(f' block api: /{definition.function_name}/keep-connection')\n rows.append(f' Max Concurrency: {definition.max_concurrency}')\n rows.append(' Description:')\n rows.append(f' {definition.description}')\n if len(definition.arg_definitions) == 0:\n rows.append(' No Args')\n else:\n rows.append(' Args')\n for arg in definition.arg_definitions:\n rows.append(f' {arg.name} {arg.type.name} {\"Requiered\" if arg.is_required else \"NOT-Required\"}')\n if arg.description != '':\n rows.append(f' {arg.description}')\n rows.append(f' Timeout: {definition.timeout} sec')\n rows.append('\\n')\n\n return web.Response(text='\\n'.join(rows))\n\n # function\n async def get_function_definition(request: web.Request):\n function_name = request.match_info['function_name']\n\n if function_name not in self._function_manager.definitions:\n raise web.HTTPNotFound()\n\n return web.json_response(self._function_manager.definitions[function_name].to_dict())\n\n async def get_function_running_count(request: web.Request):\n function_name = request.match_info['function_name']\n\n ret = self._function_manager.get_current_number_of_execution(function_name)\n if ret is None:\n raise web.HTTPNotFound()\n\n return web.json_response(ret)\n\n # Tasks\n async def get_task_info(request: web.Request):\n if 'task_id' not in request.match_info:\n raise web.HTTPBadRequest()\n\n task_id = request.match_info['task_id']\n\n task_info = self._function_manager.get_task_info(task_id)\n if task_info is None:\n raise web.HTTPNotFound()\n\n return web.json_response(task_info.to_dict())\n\n async def get_task_done(request: web.Request):\n if 'task_id' not in request.match_info:\n raise web.HTTPBadRequest()\n\n task_id = request.match_info['task_id']\n\n task_info = self._function_manager.get_task_info(task_id)\n if task_info is None:\n raise web.HTTPNotFound()\n\n return web.json_response(task_info.is_done())\n\n async def get_task_result(request: web.Request):\n if 'task_id' not in request.match_info:\n raise web.HTTPBadRequest()\n\n task_id = request.match_info['task_id']\n\n task_info = self._function_manager.get_task_info(task_id)\n if task_info is None:\n raise web.HTTPNotFound()\n return web.json_response(task_info.result)\n\n async def get_task_list(request: web.Request):\n if 'function_name' not in request.match_info:\n raise web.HTTPBadRequest()\n\n function_name = request.match_info['function_name']\n\n tasks = self._function_manager.list_task_info(function_name)\n if tasks is None:\n raise web.HTTPNotFound()\n\n return web.json_response([elm.to_dict() for elm in tasks])\n\n # Termination\n async def post_terminate_function(request: web.Request):\n if 'function_name' not in request.match_info:\n raise web.HTTPBadRequest()\n\n function_name = request.match_info['function_name']\n\n self._function_manager.terminate_function(function_name)\n return web.json_response({})\n\n async def post_terminate_task(request: web.Request, task_id: str):\n if 'task_id' not in request.match_info:\n raise web.HTTPBadRequest()\n\n task_id = request.match_info['task_id']\n self._function_manager.terminate_task(task_id)\n\n return web.json_response({})\n\n api_list = [\n web.get('/function/list/data', get_function_list_data),\n web.get('/function/list/text', get_function_list_text),\n web.get(r'/function/definition/{function_name}', get_function_definition),\n web.get(r'/function/running-count/{function_name}', get_function_running_count),\n web.get(r'/task/info/{task_id}', get_task_info),\n web.get(r'/task/done/{task_id}', get_task_done),\n web.get(r'/task/result/{task_id}', get_task_result),\n web.get(r'/task/list/{function_name}', get_task_list),\n web.post(r'/terminate/function/{function_name}', post_terminate_function),\n web.post(r'/terminate/task/{task_id}', post_terminate_task),\n ]\n\n async def index(request: web.Request):\n return web.Response(text='\\n'.join([elm.path for elm in api_list])+'\\n')\n\n self._app.add_routes([*api_list, web.get('/', index)])", "def add_endpoint(self, endpoint):\n exists = self.get_endpoint(endpoint)\n if not exists:\n self.endpoints.append((endpoint, now()))", "def update_endpoint(EndpointName=None, EndpointConfigName=None):\n pass", "def update(self, request, phone):\n try:\n attrs = self.flatten_dict(request.POST)\n #if self.exists(**attrs):\n #return rc.DUPLICATE_ENTRY\n #else:\n endpoint = Endpoint.objects.get(uid__exact=phone, site__name__exact=request.user)\n if attrs.get('effective_caller_id_name'):\n endpoint.effective_caller_id_name = attrs.get('effective_caller_id_name')\n if attrs.get('password'):\n endpoint.password = attrs.get('password')\n if attrs.get('description'):\n endpoint.description = attrs.get('description')\n if attrs.get(\"enabled\") == \"false\":\n endpoint.enable = False\n elif attrs.get(\"enabled\") == \"true\":\n endpoint.enable = True\n if attrs.get(\"enable\") == \"false\":\n endpoint.enable = False\n elif attrs.get(\"enable\") == \"true\":\n endpoint.enable = True\n endpoint.save()\n return endpoint\n except:\n return rc.NOT_HERE", "def _route_get(self):\n if self.path == '/status':\n self._create_status()\n else:\n self._create_method_not_allowed()", "def register():\n\n print(\"Request: \", request)\n print(\"foo: \", request.app.ep_mapping)\n print(json.load(request.body))\n endpoint_details = json.load(request.body)\n print(endpoint_details)\n\n # Here we want to start an executor client.\n # Make sure to not put anything into the client, until after an interchange has\n # connected to avoid clogging up the pipe. Submits will block if the client has\n # no endpoint connected.\n endpoint_id = str(uuid.uuid4())\n fw = spawn_forwarder(request.app.address, endpoint_id=endpoint_id)\n connection_info = fw.connection_info\n ret_package = {'endpoint_id': endpoint_id}\n ret_package.update(connection_info)\n print(\"Ret_package : \", ret_package)\n\n print(\"Ep_id: \", endpoint_id)\n request.app.ep_mapping[endpoint_id] = ret_package\n return ret_package", "def get_endpoint(self, *args):\n\t\traise NotImplementedError", "def create_endpoint(*args):\n endpoint = ''\n for arg in args:\n endpoint = endpoint + str(arg) + \"/\"\n endpoint = endpoint[:-1]\n endpoint = endpoint + \".json\"\n return endpoint", "def post(self):\n new_uuid = str(uuid.uuid4())\n data = request.json\n data[\"_id\"] = new_uuid\n data[\"created_at\"] = time.time() # unix epoch\n data[\"tenants\"] = []\n data[\"shared\"][\"sharing_list\"] = {}\n\n for field in self.req_fields:\n try:\n _ = data[field]\n except KeyError:\n return f\"Error: Required fields: {self.req_fields}\", 400\n\n # Check that the Function location is registered\n location_id = request.json[\"location\"].lower()\n request.json[\"location\"] = location_id\n location = mongoUtils.find(\"location\", {\"id\": location_id})\n if not location:\n return f\"Location {location_id} is not registered. Please add the location first\", 400\n location[\"functions\"].append(data[\"id\"])\n\n try:\n new_uuid = mongoUtils.add(\"func\", data)\n except pymongo.errors.DuplicateKeyError:\n return f\"Network Function with id {data['id']} already exists\", 400\n mongoUtils.update(\"location\", location[\"_id\"], location)\n return new_uuid, 201", "def _send_to_endpoint(self, events):\n raise NotImplementedError('Please implement _send_to_endpoint().')", "def associate(self, endpoint_name=None, instance_id=None):\n if instance_id is None:\n raise Exception(\"Instance required!\")\n if endpoint_name is None:\n self.request('/v1.1/endpoint/instances/%s' % instance_id, 'POST')\n else:\n self.request('/v1.1/endpoints/%s/instances/%s' % (endpoint_name, instance_id), 'POST')", "def make_new_handler(self, *args, **kwargs):", "def update_endpoint(self, endpoint_id, endpoint_ref):\n raise exception.NotImplemented() # pragma: no cover", "def test_basic_add_multiple_endpoint(self):\n args = self.get_args()\n config = self.create_config_file_before()\n self.write_config_file(config, args)\n collector = execute_tool(args, test_mode=True)\n\n time.sleep(2)\n mac1 = '00:11:22:33:33:34'\n ip1 = '3.4.3.5'\n self.add_endpoint(mac1, ip1, 'intersite-testsuite', 'app', 'epg')\n mac2 = '00:11:22:33:33:35'\n ip2 = '3.4.3.6'\n self.add_endpoint(mac2, ip2, 'intersite-testsuite', 'app', 'epg')\n time.sleep(2)\n\n self.assertTrue(self.verify_remote_site_has_entry_before(mac1, ip1))\n self.assertTrue(self.verify_remote_site_has_entry_before(mac2, ip2))\n\n config = self.create_config_file_after()\n self.write_config_file(config, args)\n collector.reload_config()\n time.sleep(2)\n self.assertTrue(self.verify_remote_site_has_entry_after(mac1, ip1))\n self.assertTrue(self.verify_remote_site_has_entry_after(mac2, ip2))", "def add_endpoint(self, endpoint, **kwargs):\n endpoint.arguments = kwargs\n self.endpoints.append(endpoint)", "def delete_handler(event, context):\n delete_endpoint_config(event)", "def register_endpoints(api):\n api.add_resource(EventList, '/events')", "def endpoint_changed(self, endpoint_event, matched_filter):\n # type: (EndpointEvent, Any) -> None\n event_type = endpoint_event.get_type()\n ed = endpoint_event.get_endpoint_description()\n ed_id = ed.get_id()\n\n if event_type == EndpointEvent.ADDED:\n # if it's an add event, we call handle_endpoint_added\n imported_reg = self._import_added_endpoint(ed)\n # get exception from ImportRegistration\n exc = imported_reg.get_exception()\n # if there was exception on import, print out messages\n if exc:\n _logger.exception(\n \"BasicTopologyManager import failed for endpoint.id=%s\",\n ed_id,\n )\n else:\n _logger.debug(\n \"BasicTopologyManager: service imported! \"\n \"endpoint.id=%s, service_ref=%s\",\n ed_id,\n imported_reg.get_reference(),\n )\n elif event_type == EndpointEvent.REMOVED:\n self._unimport_removed_endpoint(ed)\n _logger.debug(\n \"BasicTopologyManager: endpoint removed. endpoint.id=%s\", ed_id\n )\n elif event_type == EndpointEvent.MODIFIED:\n self._update_imported_endpoint(ed)\n _logger.debug(\n \"BasicTopologyManager: endpoint updated. endpoint.id=%s\", ed_id\n )", "def add_endpoint(self, endpoint):\n name = endpoint.get_name()\n self._calls[name] = endpoint", "def custom_service_endpoint(self) -> global___Snippet.ClientInitialization.ServiceEndpoint:", "def test_basic_add_multiple_endpoint(self):\n args = self.get_args()\n config = self.create_config_file()\n self.write_config_file(config, args)\n execute_tool(args, test_mode=True)\n\n time.sleep(2)\n mac1 = '00:11:22:33:33:34'\n ip1 = '3.4.3.5'\n self.add_endpoint(mac1, ip1, 'intersite-testsuite', 'app', 'epg')\n mac2 = '00:11:22:33:33:35'\n ip2 = '3.4.3.6'\n self.add_endpoint(mac2, ip2, 'intersite-testsuite', 'app', 'epg')\n time.sleep(2)\n\n self.assertTrue(self.verify_remote_site_has_entry_with_provided_contract(mac1, ip1, 'intersite-testsuite', 'l3out',\n 'intersite-testsuite-app-epg', 'contract-1'))\n self.assertTrue(self.verify_remote_site_has_entry_with_provided_contract(mac2, ip2, 'intersite-testsuite', 'l3out',\n 'intersite-testsuite-app-epg', 'contract-1'))", "def on_create(self, payload):\n pass", "def _handler(self, bot, update, *args, **kwargs):\n raise NotImplementedError('Not implemented command handler method.')", "def test_basic_add_multiple_endpoint(self):\n mac1, ip1 = self.setup_with_endpoint()\n mac2 = '00:11:22:33:33:35'\n ip2 = '3.4.3.6'\n self.add_endpoint(mac2, ip2, 'intersite-testsuite', 'app', 'epg')\n time.sleep(2)\n\n self.assertTrue(self.verify_remote_site_has_entry(mac1, ip1, 'intersite-testsuite',\n 'l3out', 'intersite-testsuite-app-epg'))\n self.assertTrue(self.verify_remote_site_has_entry(mac2, ip2, 'intersite-testsuite',\n 'l3out', 'intersite-testsuite-app-epg'))", "def add_endpoint(self, resource, method, data):\n self.endpoints.setdefault(resource, {})[method] = data", "def create_endpoint(EndpointName=None, EndpointConfigName=None, Tags=None):\n pass", "def post_route_target_create(self, resource_dict):\n pass", "def test_basic_add_multiple_endpoint(self):\n mac1, ip1 = self.setup_with_endpoint()\n mac2 = '00:11:22:33:33:35'\n ip2 = '3.4.3.6'\n self.add_endpoint(mac2, ip2, 'intersite-testsuite', 'app', 'epg2')\n time.sleep(2)\n\n self.assertTrue(self.verify_remote_site_has_entry(mac1, ip1, 'intersite-testsuite', 'l3out',\n 'intersite-testsuite-app-epg1'))\n self.assertTrue(self.verify_remote_site_has_entry(mac2, ip2, 'intersite-testsuite', 'l3out',\n 'intersite-testsuite-app-epg2'))", "def create_new_config():\n\n print (\"\")\n log_app.debug(\"create_new_config\")\n log_app.debug(\"create_new_config / method : %s\", request.method )\n\n req_json = request.get_json()\n log_app.debug(\"create_new_config / req_json : \\n%s\", pformat(req_json) )\n\n new_uuid = req_json['model_uuid']\n new_admin_email = req_json['model_admin_email']\n new_admin_name = req_json['model_admin_name']\n new_admin_surname = req_json['model_admin_surname']\n\n new_endpoint_data = req_json.get('new_endpoint_data', None)\n\n ### target right config collection\n allowedCollections = [\"global\" , \"footer\", \"navbar\", \"tabs\", \"endpoints\" , \"styles\" , \"routes\", \"socials\" ]\n \n if request.method == 'POST':\n\n log_app.debug(\"config app route / POST\" )\n \n ### check if model can be used\n modelCheck = get_config_model(new_uuid, returnDict=True, noRemap=True)\n log_app.debug(\"create_new_config / modelCheck : \\n%s\", pformat(modelCheck) )\n\n ### check if uuid is new and not already used\n globalColl = mongoConfigColls['global']\n usedUuids = globalColl.distinct('apiviz_front_uuid')\n log_app.debug(\"create_new_config / usedUuids : \\n%s\", pformat(usedUuids) )\n \n modelIsUsed = new_uuid in usedUuids\n # modelIsUsed = False ### only for debugging\n log_app.debug(\"create_new_config / modelIsUsed : %s\", modelIsUsed )\n\n canUseModel = modelIsUsed == False and modelCheck != None \n log_app.debug(\"create_new_config / canUseModel : %s\", canUseModel )\n\n ### start copying documents if allowed\n if canUseModel : \n\n query = {'apiviz_front_uuid' : new_uuid}\n \n # loop collections and copy paste docs\n for coll in allowedCollections : \n \n # get corresponding documents without _id\n mongoColl = mongoConfigColls[coll]\n results = list(mongoColl.find(query, { '_id' : 0 } ))\n \n # replace 'apiviz_front_uuid' field's value by new_uuid\n # set 'is_default' field's value as False\n for doc in results :\n\n log_app.debug(\"create_new_config / coll-doc['field'] : %s-%s\" %(coll,doc['field']) )\n \n doc['apiviz_front_uuid'] = new_uuid\n doc['is_default'] = False\n\n # specific cases\n if doc['field'] == 'app_title' : \n doc['content'] = req_json['new_title']\n\n if doc['field'] == 'app_logo' : \n doc['url'] = req_json['new_logoUrl']\n\n # save documents list back in collection\n mongoColl.insert(results)\n\n print()\n\n msg = 'your new website is ready'\n resp_code = 200\n resp_msgCode = 'instane_created_1a'\n \n # create new default uuid_auth\n new_uuid_auth_doc = uuid_auth_model.copy()\n\n new_uuid_auth_doc[\"apiviz_front_uuid\"] = new_uuid\n new_uuid_auth_doc[\"apiviz_front_name\"] = req_json['new_title']\n\n new_uuid_auth_doc[\"date_added\"] = datetime.datetime.now()\n new_uuid_auth_doc[\"added_by\"][\"name\"] = new_admin_name\n new_uuid_auth_doc[\"added_by\"][\"surname\"] = new_admin_surname\n new_uuid_auth_doc[\"added_by\"][\"email\"] = new_admin_email\n\n ### setting admin list\n new_uuid_auth_doc[\"auth_role_users\"][\"admin_list\"] = [new_admin_email]\n \n uuidsAuthColl = mongoConfigColls['uuids_auth']\n uuidsAuthColl.insert(new_uuid_auth_doc)\n\n\n else : \n msg = 'errors : '\n if modelIsUsed :\n msg += 'your current uuid is already used... '\n resp_msgCode = 'uuid_is_current_1a'\n if modelCheck == None : \n msg += \"you can't use this model.. \"\n resp_msgCode = 'instance_used_1a'\n resp_code = 400\n log_app.debug(\"create_new_config / msg : %s\", msg )\n\n return jsonify({\n 'msg' : msg,\n 'status' : resp_code,\n 'msg_code' : resp_msgCode,\n 'uuid' : new_uuid,\n 'request' : req_json,\n })\n\n\n\n\n ### TO DO \n elif request.method == 'DELETE' : \n\n log_app.debug(\"config app route / DELETE\" )\n \n return jsonify({\n 'msg' : 'your apiviz instance has been deleted',\n 'status' : 200,\n 'msg_code' : 'instane_deleted_1a',\n 'uuid' : new_uuid,\n 'request' : req_json,\n })\n\n\n # not a POST nor a DELETE request\n else : \n return jsonify({\n 'msg' : 'there was an error during the process (method not allowed)',\n 'status' : 500,\n 'msg_code' : 'error_1a',\n 'uuid' : new_uuid,\n 'request' : req_json,\n }), 500", "def add_endpoint_hit(db_session, endpoint, time, test, version, job_id):\n endpoint_id = db_session.query(Endpoint.id).filter(Endpoint.name == endpoint).first().id\n test_id = db_session.query(Test.id).filter(Test.name == test).first().id\n db_session.add(TestEndpoint(endpoint_id=endpoint_id, test_id=test_id, duration=time, app_version=version,\n travis_job_id=job_id))", "def test_basic_add_endpoint(self):\n args = self.get_args()\n config = self.create_config_file('l3out1')\n self.write_config_file(config, args)\n collector = execute_tool(args, test_mode=True)\n\n mac = '00:11:22:33:33:33'\n ip = '3.4.3.4'\n time.sleep(2)\n self.assertFalse(self.verify_remote_site_has_entry(mac, ip, 'intersite-testsuite',\n 'l3out1', 'intersite-testsuite-app-epg'))\n\n time.sleep(2)\n self.add_endpoint(mac, ip, 'intersite-testsuite', 'app', 'epg')\n time.sleep(2)\n\n self.assertTrue(self.verify_remote_site_has_entry(mac, ip, 'intersite-testsuite',\n 'l3out1', 'intersite-testsuite-app-epg'))\n self.assertTrue(self.verify_remote_site_has_policy('intersite-testsuite',\n 'l3out1', 'intersite-testsuite-app-epg'))\n config = self.create_config_file('l3out2')\n self.write_config_file(config, args)\n collector.reload_config()\n time.sleep(4)\n\n self.assertFalse(self.verify_remote_site_has_policy('intersite-testsuite',\n 'l3out1', 'intersite-testsuite-app-epg'))\n self.assertTrue(self.verify_remote_site_has_policy('intersite-testsuite',\n 'l3out2', 'intersite-testsuite-app-epg'))\n self.assertFalse(self.verify_remote_site_has_entry(mac, ip, 'intersite-testsuite',\n 'l3out1', 'intersite-testsuite-app-epg'))\n self.assertTrue(self.verify_remote_site_has_entry(mac, ip, 'intersite-testsuite',\n 'l3out2', 'intersite-testsuite-app-epg'))", "def create_endpoint_template_v2(manager, region, service, publicurl, adminurl,\n internalurl):\n service_id = manager.resolve_service_id(service)\n for ep in [e._info for e in manager.api.endpoints.list()]:\n if ep['service_id'] == service_id and ep['region'] == region:\n log(\"Endpoint template already exists for '%s' in '%s'\"\n % (service, region))\n\n up_to_date = True\n for k in ['publicurl', 'adminurl', 'internalurl']:\n if ep.get(k) != locals()[k]:\n up_to_date = False\n\n if up_to_date:\n return\n else:\n # delete endpoint and recreate if endpoint urls need updating.\n log(\"Updating endpoint template with new endpoint urls.\")\n manager.api.endpoints.delete(ep['id'])\n\n manager.create_endpoints(region=region,\n service_id=service_id,\n publicurl=publicurl,\n adminurl=adminurl,\n internalurl=internalurl)\n log(\"Created new endpoint template for '%s' in '%s'\" % (region, service),\n level=DEBUG)", "def build_endpoint(self, **kwargs):\n\n raise NotImplementedError()", "def post_routing_instance_create(self, resource_dict):\n pass", "def test_basic_add_endpoint(self):\n args = self.get_args()\n config = self.create_config_file()\n self.write_config_file(config, args)\n collector = execute_tool(args, test_mode=True)\n time.sleep(2)\n\n config['config'].append(self.create_export_policy())\n self.write_config_file(config, args)\n collector.reload_config()\n time.sleep(2)\n\n mac = '00:11:22:33:33:33'\n ip = '3.4.3.4'\n self.assertTrue(self.verify_remote_site_has_entry(mac, ip, 'intersite-testsuite', 'l3out',\n 'intersite-testsuite-app-epg'))", "def newEndpointDetected(self, endpoint):\n if self._printSWAP == True:\n print \"New endpoint with Reg ID = \" + str(endpoint.getRegId()) + \" : \" + endpoint.name", "def create_api_endpoints(app):\n manager = APIManager(app, flask_sqlalchemy_db=models.database)\n manager.create_api(models.State, results_per_page=0)\n manager.create_api(models.Party, results_per_page=0)\n manager.create_api(models.Candidate, results_per_page=0)\n manager.create_api(models.Election, results_per_page=0)\n manager.create_api(models.ElectoralCollege,\n results_per_page=0, collection_name='electoralcollege')\n manager.create_api(models.PartiesInvolved,\n results_per_page=0, collection_name='partiesinvolved')\n manager.create_api(models.ElectionsToState,\n results_per_page=0, collection_name='electionstostate')", "def post(self):\n\n try:\n creation_event_type_id = self.jbody[\"creationEventTypeId\"]\n follows_id = self.jbody.get(\"followsId\")\n for_creator = self.jbody.get(\"forCreator\", False)\n for_owner = self.jbody.get(\"forOwner\", True)\n description = self.jbody[\"description\"]\n except KeyError as err:\n raise exc.BadRequest(\n \"Missing Required Argument: {}\".format(err.message)\n )\n except ValueError as err:\n raise exc.BadRequest(err.message)\n\n log.info(\n \"FATE: Create fate (event-type {}, follows {}, for creator {}, \"\n \"for owner {}, desc {})\".format(\n creation_event_type_id, follows_id,\n for_creator, for_owner, description\n )\n )\n\n creation_event_type = (\n self.session.query(EventType).get(creation_event_type_id)\n )\n\n if creation_event_type is None:\n self.write_error(400, message=\"Bad creation event type\")\n return\n\n try:\n fate = Fate.create(\n self.session, creation_event_type,\n follows_id=follows_id, for_creator=for_creator,\n for_owner=for_owner,\n description=description\n )\n except IntegrityError as err:\n raise exc.Conflict(err.orig.message)\n except exc.ValidationError as err:\n raise exc.BadRequest(err.message)\n\n self.session.commit()\n\n json = fate.to_dict(self.href_prefix)\n json[\"href\"] = \"/api/v1/fates/{}\".format(fate.id)\n\n self.created(\"/api/v1/fates/{}\".format(fate.id), json)\n\n log.info(\n \"FATE: Created fate {} (event-type {}, follows {}, \"\n \"for creator {}, for owner {}, desc {})\".format(\n fate.id,\n creation_event_type_id, follows_id,\n for_creator, for_owner, description\n )\n )", "def __call__(self, req):\n if req.method == 'GET':\n product = self.produce(req.query_string, USER=authenticate(req))\n elif req.method == 'POST':\n product = self.replace(req.POST.get('old'), req.POST.get('new'),\n req.query_string, USER=authenticate(req))\n else:\n raise HTTPMethodNotAllowed()\n with self.db:\n format = product.format or accept(req.environ)\n headerlist = emit_headers(format, product)\n app_iter = list(emit(format, product))\n return Response(headerlist=headerlist, app_iter=app_iter)", "def add_service_endpoint(key, name, description, type, url, region):\n service_names = {service.name: service.id for service in key.services.list()}\n if name in service_names.keys():\n service_id = service_names[name]\n else:\n service = key.services.create(name=name, service_type=type, description=description)\n print(\"Created service '{}' of type '{}'\".format(name, type))\n service_id = service.id\n\n for endpoint in key.endpoints.list():\n if endpoint.service_id == service_id:\n if endpoint.publicurl == url and endpoint.adminurl == url and endpoint.internalurl == url:\n return True\n else:\n key.endpoints.delete(endpoint.id)\n\n key.endpoints.create(region=region, service_id=service_id, publicurl=url, adminurl=url, internalurl=url)\n print(\"Added service endpoint '{}' at '{}'\".format(name, url))\n return True", "def post_service_instance_create(self, resource_dict):\n pass", "def _exe(self, method):\n request_path = self.request.path\n path = request_path.split('/')\n services_and_params = list(filter(lambda x: x!='',path))\n\n # Get all function names configured in the class RestHandler\n functions = list(filter(lambda op: hasattr(getattr(self,op),'_service_name') == True and inspect.ismethod(getattr(self,op)) == True, dir(self)))\n # Get all http methods configured in the class RestHandler\n http_methods = list(map(lambda op: getattr(getattr(self,op),'_method'), functions))\n\n if method not in http_methods:\n raise tornado.web.HTTPError(405,'The service not have %s verb' % method)\n for operation in list(map(lambda op: getattr(self,op), functions)):\n service_name = getattr(operation,\"_service_name\")\n service_params = getattr(operation,\"_service_params\")\n # If the _types is not specified, assumes str types for the params\n services_from_request = list(filter(lambda x: x in path,service_name))\n\n if operation._method == self.request.method and service_name == services_from_request and len(service_params) + len(service_name) == len(services_and_params):\n try:\n params_values = self._find_params_value_of_url(service_name,request_path) + self._find_params_value_of_arguments(operation)\n p_values = self._convert_params_values(params_values)\n body = str(self.request.body,'utf-8')\n self.request_data = None\n if body:\n self.request_data = json.loads(body)\n response = operation(*p_values)\n self.request_data = None\n\n if response == None:\n return\n\n self.set_header(\"Content-Type\",'application/json')\n self.write(json.dumps(response))\n self.finish()\n except Exception as detail:\n self.request_data = None\n self.gen_http_error(500,\"Internal Server Error : %s\"%detail)\n raise", "def create(self, *args, **kwargs):\n pass", "def test_updated_handler_called(self):\n self.client.ensure_path(\"/services/db/1.1.1.1\")\n self.client.set(\"/services/db/1.1.1.1\",\n json.dumps({\"enabled\": \"1\"}))\n handler = Mock()\n z = ZkFarmExporter(self.client, \"/services/db\", self.conf, handler)\n z.loop(2, timeout=self.TIMEOUT)\n self.client.set(\"/services/db/1.1.1.1\",\n json.dumps({\"enabled\": \"0\"}))\n z.loop(1, timeout=self.TIMEOUT)\n handler.assert_called_once_with()", "def post(self):\n\n # we need a unique tx number so we can look these back up again\n # as well as for logging\n # FIXME: how can we guarantee uniqueness here?\n tx = int(time.time() * 100000) + random.randrange(10000, 99999)\n\n log.info(\"EVENTS [{}]: Creating events\".format(tx))\n\n try:\n user = self.jbody[\"user\"]\n if not EMAIL_REGEX.match(user):\n user += \"@\" + self.domain\n event_type_id = self.jbody.get(\"eventTypeId\", None)\n category = self.jbody.get(\"category\", None)\n state = self.jbody.get(\"state\", None)\n note = self.jbody.get(\"note\", None)\n except KeyError as err:\n raise exc.BadRequest(\n \"Missing Required Argument: {}\".format(err.message)\n )\n except ValueError as err:\n raise exc.BadRequest(err.message)\n\n if not event_type_id and (not category and not state):\n raise exc.BadRequest(\n \"Must specify an event type id or both category and state\"\n )\n\n if event_type_id:\n event_type = self.session.query(EventType).get(event_type_id)\n else:\n event_type = self.session.query(EventType).filter(\n and_(\n EventType.category == category,\n EventType.state == state\n )\n ).one()\n\n if event_type is None:\n self.write_error(400, message=\"Bad event type\")\n return\n\n category = event_type.category\n state = event_type.state\n\n hostnames = (\n [self.jbody.get(\"hostname\", None)]\n if self.jbody.get(\"hostname\", None) else []\n )\n\n if \"hostnames\" in self.jbody:\n hostnames.extend(self.jbody.get(\"hostnames\"))\n\n log.info(\n \"EVENTS [{}]: Will create event {} {}\".format(\n tx, category, state\n )\n )\n\n log.info(\n \"EVENTS [{}]: Hostnames specified: {}\".format(\n tx, \", \".join(hostnames)\n )\n )\n\n # If a host query was specified, we need to talk to the external\n # query server to resolve this into a list of hostnames\n if \"hostQuery\" in self.jbody:\n query = self.jbody[\"hostQuery\"]\n log.info(\"EVENTS [{}]: Running query {}\".format(tx, query))\n response = PluginHelper.request_get(params={\"query\": query})\n if response.json()[\"status\"] == \"ok\":\n hostnames.extend(response.json()[\"results\"])\n log.info(\n \"EVENTS [{}]: Hostnames after query: {}\".format(\n tx, \", \".join(hostnames)\n )\n )\n\n # If a quest Id was given, look up the labors in that quest and\n # get all the hostnames for those labors.\n if \"questId\" in self.jbody:\n log.info(\"EVENTS [{}]: Looking up quest {}\".format(\n tx, self.jbody[\"questId\"])\n )\n quest = self.session.query(Quest).filter_by(\n id=self.jbody[\"questId\"]\n ).scalar()\n if not quest:\n raise exc.NotFound(\"No such Quest {} found\".format(id))\n for labor in quest.labors:\n hostnames.append(labor.host.hostname)\n\n log.info(\n \"EVENTS [{}]: Hostnames after quest expansion: {}\".format(\n tx, \", \".join(hostnames)\n )\n )\n\n # We need to create a list of hostnames that don't have a Host record\n new_hosts_needed = set(hostnames)\n hosts = (\n self.session.query(Host).filter(Host.hostname.in_(hostnames)).all()\n )\n\n for host in hosts:\n new_hosts_needed.remove(str(host.hostname))\n\n # if we need to create hosts, do them all at once\n if new_hosts_needed:\n log.info(\"EVENTS [{}]: Creating hosts {}\".format(\n tx, \", \".join(new_hosts_needed)\n ))\n Host.create_many(self.session, new_hosts_needed)\n hosts = (\n self.session.query(Host).filter(\n Host.hostname.in_(hostnames)\n ).all()\n )\n\n if not hosts:\n raise exc.BadRequest(\"No hosts found with given list\")\n\n try:\n if len(hosts) > 1:\n # if we are supposed to create many events,\n # we want to do them as a giant batch\n log.info(\"EVENTS [{}]: Creating multiple events\".format(tx))\n events_to_create = []\n for host in hosts:\n events_to_create.append({\n \"host_id\": host.id,\n \"user\": user,\n \"event_type_id\": event_type.id,\n \"note\": note,\n \"tx\": tx\n })\n Event.create_many(self.session, events_to_create, tx)\n else:\n # if we are just creating one event, do it the simple way\n log.info(\"EVENTS [{}]: Creating 1 event\".format(tx))\n event = Event.create(\n self.session, hosts[0], user, event_type, note=note\n )\n\n except IntegrityError as err:\n raise exc.Conflict(err.orig.message)\n except exc.ValidationError as err:\n raise exc.BadRequest(err.message)\n\n log.info(\"EVENTS [{}]: Flushing and committing\".format(tx))\n self.session.flush()\n log.info(\"EVENTS [{}]: Flushed\".format(tx))\n self.session.commit()\n log.info(\"EVENTS [{}]: Committed\".format(tx))\n\n if len(hosts) == 1:\n json = event.to_dict(self.href_prefix)\n json[\"href\"] = \"/api/v1/events/{}\".format(event.id)\n self.created(\n \"/api/v1/events/{}\".format(event.id), json\n )\n else:\n # if we created many events, we need to look them up by the TX\n # number to figure out what they were since the were created in bulk\n created_events = self.session.query(Event).filter(Event.tx == tx).all()\n self.created(\n data={\n \"events\": (\n [event.to_dict(self.href_prefix) for event in created_events]\n ),\n \"totalEvents\": len(created_events)\n }\n )\n\n log.info(\"EVENTS [{}]: Created event {} {} for {}\".format(\n tx, category, state,\n \", \".join(hostnames)\n ))", "async def post(self):\n\n # decrypt the request\n payload = decrypt_message(self.request.body, self.fernet_secret)\n if not payload:\n raise tornado.web.HTTPError(status_code=401)\n\n # ignore all requests for echo to this handler\n if payload[\"request\"] == \"echo\":\n LOGGER.error(\"This handler can't echo things.\")\n raise tornado.web.HTTPError(status_code=400)\n\n # get the request ID\n reqid = payload.get(\"reqid\")\n if reqid is None:\n raise ValueError(\n \"No request ID provided. \" \"Ignoring this request.\"\n )\n\n # rate limit the request if this is turned on\n if self.ratelimits:\n # get the frontend client IP addr\n frontend_client_ipaddr = payload.get(\"client_ipaddr\")\n\n if not frontend_client_ipaddr:\n LOGGER.error(\n \"[%s] request: '%s' is missing a payload \"\n \"value: 'client_ipaddr' \"\n \"needed to calculate rate, dropping this request.\"\n % (reqid, payload[\"request\"])\n )\n raise tornado.web.HTTPError(status_code=400)\n\n self.ratelimit_request(\n reqid,\n payload[\"request\"],\n frontend_client_ipaddr,\n request_body=payload[\"body\"],\n )\n\n # if we successfully got past host, decryption, rate-limit validation,\n # then process the request\n try:\n\n #\n # dispatch the action handler function\n #\n\n # inject the request ID into the body of the request so the backend\n # function can report on it\n payload[\"body\"][\"reqid\"] = reqid\n\n # inject the PII salt into the body of the request as well\n payload[\"body\"][\"pii_salt\"] = self.pii_salt\n\n #\n # validate the request and choose the function to dispatch\n #\n handler_func, problems, validate_msgs = validate_and_get_function(\n payload[\"request\"], payload[\"body\"]\n )\n\n if handler_func is None:\n problems[\"failure_reason\"] = \"invalid request parameters\"\n response = {\n \"success\": False,\n \"response\": problems,\n \"messages\": [validate_msgs],\n }\n else:\n # inject the config object into the backend function call\n # this passes along any secrets or settings from environ\n # directly to those functions\n backend_func = partial(\n handler_func, payload[\"body\"], config=self.config\n )\n # run the function associated with the request type\n loop = tornado.ioloop.IOLoop.current()\n response = await loop.run_in_executor(\n self.executor,\n backend_func,\n )\n\n #\n # see if the request was one that requires an email and password. in\n # this case, we'll apply backoff to slow down repeated failed\n # passwords\n #\n passcheck_requests = {\"user-login\", \"user-passcheck-nosession\"}\n\n if (\n payload[\"request\"] in passcheck_requests\n and response[\"success\"] is False\n ):\n\n (\n failure_status,\n failure_count,\n failure_wait,\n ) = await self.handle_failed_logins(payload)\n\n # if the user is locked for repeated login failures, handle that\n if failure_status == \"locked\":\n response = await self.lockuser_repeated_login_failures(\n payload, unlock_after_seconds=self.config.userlocktime\n )\n elif failure_status == \"wait\":\n LOGGER.warning(\n \"[%s] User with email: %s is being rate-limited \"\n \"after %s failed login attempts. \"\n \"Current wait time: %.1f seconds.\"\n % (\n reqid,\n pii_hash(payload[\"body\"][\"email\"], self.pii_salt),\n failure_count,\n failure_wait,\n )\n )\n\n # reset the failed counter to zero for each successful attempt\n elif (\n payload[\"request\"] in passcheck_requests\n and response[\"success\"] is True\n ):\n\n self.failed_passchecks.pop(payload[\"body\"][\"email\"], None)\n\n #\n # trim the failed_passchecks dict\n #\n if len(self.failed_passchecks) > 1000:\n self.failed_passchecks.pop(self.failed_passchecks.keys()[0])\n\n #\n # form and send the response\n #\n await self.send_response(response, reqid)\n\n except Exception:\n\n LOGGER.exception(\"Failed to understand request.\")\n raise tornado.web.HTTPError(status_code=400)", "def add_endpoint(self, endpoint):\n self._endpoints.append(endpoint)", "def on_post(self, req, resp):\n # A map of supported actions to the handlers for tasks for those actions\n supported_actions = {\n 'validate_design': TasksResource.task_validate_design,\n 'verify_site': TasksResource.task_verify_site,\n 'prepare_site': TasksResource.task_prepare_site,\n 'verify_nodes': TasksResource.task_verify_nodes,\n 'prepare_nodes': TasksResource.task_prepare_nodes,\n 'deploy_nodes': TasksResource.task_deploy_nodes,\n 'destroy_nodes': TasksResource.task_destroy_nodes,\n 'relabel_nodes': TasksResource.task_relabel_nodes,\n }\n\n try:\n json_data = self.req_json(req)\n\n action = json_data.get('action', None)\n if supported_actions.get(action, None) is None:\n self.error(req.context, \"Unsupported action %s\" % action)\n self.return_error(resp,\n falcon.HTTP_400,\n message=\"Unsupported action %s\" % action,\n retry=False)\n else:\n supported_actions.get(action)(self, req, resp, json_data)\n except Exception as ex:\n self.error(\n req.context,\n \"Unknown error: %s\\n%s\" % (str(ex), traceback.format_exc()))\n self.return_error(resp,\n falcon.HTTP_500,\n message=\"Unknown error\",\n retry=False)", "def __init__(self, endpoint_a, endpoint_b):\n self.endpoint_a = endpoint_a\n self.endpoint_b = endpoint_b", "def process_request(self, httpverb, endpoints, **kwargs):\n\n # check if endpoints starts with '/' and update if not present\n if not endpoints.startswith('/'):\n endpoints = '/' + endpoints\n\n # we have already added api to __url\n if endpoints.startswith('/api'):\n endpoints = endpoints.replace('/api','')\n\n # add endpoints to the base url\n url = self.__url + endpoints\n #logging.info('Processing method -> ' + httpverb + '-> url :' + url)\n\n payload = {}\n file_data = None\n raw_response = None\n\n # check for the payload and files in kwargs\n if len(list(kwargs.keys())) > 0:\n for key1 in kwargs.keys():\n if 'payload' in key1 or 'data' in key1:\n # convert python object to json string\n payload = json.dumps(kwargs[key1])\n elif 'file' in key1:\n # in case you want to post the file\n file_name = kwargs[key1]\n file_data = [('mapFileFormFile', (file_name, open(file_name, 'rb'), 'application/json'))]\n\n # process the http verb\n if httpverb.lower() == 'get':\n raw_response = self.__session.get(url)\n elif httpverb.lower() == 'put':\n self.__session.headers.update(self.__headers)\n raw_response = self.__session.put(url, data=payload)\n elif httpverb.lower() == 'post':\n raw_response = self.__session.post(url, data=payload, files=file_data)\n elif httpverb.lower() == 'delete':\n raw_response = self.__session.delete(url)\n else:\n # looks like method did not match, raise exception\n logging.error('ERROR : The HTTP-VERB ' + httpverb + ' not found. Must be \"GET PUT POST and DELETE\"')\n raise ValueError('ERROR : The HTTP-VERB ' + httpverb + ' not found. Must be \"GET PUT POST and DELETE\"')\n\n if not raw_response.ok:\n # ERROR handling\n try :\n raw_response.raise_for_status()\n except Exception as error_massage:\n error_content = raw_response.content\n logging.critical(str(error_massage) + ' ' + str(error_content))\n raise requests.HTTPError(error_massage, error_content)\n\n # process the response\n end_result = process_response(raw_response)\n return end_result", "def create_item(obj: endpoint_model):\n # should this error if exists?\n new_obj = db.save(obj)\n return new_obj", "def __create_routes__(self):\n self.app.add_url_rule('/', 'main_page', self.main_page)\n self.app.add_url_rule('/day', 'day', self.get_current_iteration, methods=['GET'])\n self.app.add_url_rule('/traders', 'traders', self.register_trader, methods=['POST'])\n self.app.add_url_rule('/traders/<id>', 'trader', self.get_trader_state, methods=['GET'])\n self.app.add_url_rule('/stock/price', 'price', self.get_stock_price, methods=['GET'])\n self.app.add_url_rule('/stock/history', 'history', self.get_stock_price_history, methods=['GET'])\n self.app.add_url_rule('/stock/buy', 'buy', self.place_buy_order, methods=['POST'])\n self.app.add_url_rule('/stock/sell', 'sell', self.place_sell_order, methods=['POST'])\n self.app.add_url_rule('/simulation/step', 'step', self.market_session_step, methods=['POST'])\n self.app.add_url_rule('/simulation/run', 'run', self.run_simulation, methods=['POST'])", "def create_routes(self):\r\n self._app.route('/api/autoconf',\r\n methods=['GET'],\r\n endpoint='api_autoconf')(self.entrypoint)\r\n self._app.route('/api/autoconf/<string:session_id>',\r\n methods=['GET', 'POST', 'DELETE'],\r\n endpoint='api_autoconf_status')(self.entrypoint)\r\n self._app.route('/api/autoconf/rgc',\r\n methods=['POST', 'DELETE', 'GET', 'PATCH'],\r\n endpoint='api_autoconf_rgc')(self.entrypoint)\r\n self._app.route('/api/autoconf/pd',\r\n methods=['POST', 'DELETE', 'GET', 'PATCH'],\r\n endpoint='api_autoconf_pd')(self.entrypoint)", "def create(self):\n ...", "def test_basic_add_multiple_endpoint(self):\n args = self.get_args()\n config = self.create_config_file('l3out1')\n self.write_config_file(config, args)\n collector = execute_tool(args, test_mode=True)\n\n time.sleep(2)\n mac1 = '00:11:22:33:33:34'\n ip1 = '3.4.3.5'\n self.add_endpoint(mac1, ip1, 'intersite-testsuite', 'app', 'epg')\n mac2 = '00:11:22:33:33:35'\n ip2 = '3.4.3.6'\n self.add_endpoint(mac2, ip2, 'intersite-testsuite', 'app', 'epg')\n time.sleep(2)\n\n self.assertTrue(self.verify_remote_site_has_entry(mac1, ip1, 'intersite-testsuite',\n 'l3out1', 'intersite-testsuite-app-epg'))\n self.assertTrue(self.verify_remote_site_has_entry(mac2, ip2, 'intersite-testsuite',\n 'l3out1', 'intersite-testsuite-app-epg'))\n\n config = self.create_config_file('l3out2')\n self.write_config_file(config, args)\n collector.reload_config()\n time.sleep(2)\n self.assertTrue(self.verify_remote_site_has_entry(mac1, ip1, 'intersite-testsuite',\n 'l3out2', 'intersite-testsuite-app-epg'))\n self.assertTrue(self.verify_remote_site_has_entry(mac2, ip2, 'intersite-testsuite',\n 'l3out2', 'intersite-testsuite-app-epg'))", "def update_resolver_endpoint(ResolverEndpointId=None, Name=None):\n pass", "def update(cls, webhook_endpoint_id, url=None, events=None, status=None):\n data = {}\n if url:\n data['url'] = url\n if events:\n data['events'] = events\n if status:\n data['status'] = status\n return WebhookEndpoint(Requester.patch(cls.endpoint + '/' + webhook_endpoint_id, data=data))", "def put(self, endpoint: str, json: Any = None) -> Any:\n pass", "def update_endpoint(self, endpoint_id, service_id=None, interface=None,\n url=None, region=None, enabled=None, **kwargs):\n doc = common.Document()\n endpoint = common.Element(\"endpoint\")\n doc.append(endpoint)\n\n if service_id:\n endpoint.add_attr(\"service_id\", service_id)\n if interface:\n endpoint.add_attr(\"interface\", interface)\n if url:\n endpoint.add_attr(\"url\", url)\n if region:\n endpoint.add_attr(\"region\", region)\n\n if 'force_enabled' in kwargs:\n endpoint.add_attr(\"enabled\", kwargs['force_enabled'])\n elif enabled is not None:\n endpoint.add_attr(\"enabled\", str(enabled).lower())\n\n resp, body = self.patch('endpoints/%s' % str(endpoint_id), str(doc))\n body = self._parse_body(etree.fromstring(body))\n return resp, body", "def lambda_handler(event, context):\n\n try:\n created_item = create_new_table_item(event)\n return {\"statusCode\": 201, \"body\": json.dumps(f\"{created_item}\")}\n\n except BlankRequestBody as exception:\n logging.error(f\"{exception}\")\n return {\"statusCode\": 400, \"body\": json.dumps(MISSING_PARAMETERS_MESSAGE)}\n\n except ValidationError as exception:\n logging.error(f\"{exception}\")\n return {\"statusCode\": 400, \"body\": json.dumps(INCORRECT_PARAMETERS_MESSAGE)}\n\n except Exception as exception:\n logging.error(f\"{exception}\")\n return {\"statusCode\": 500, \"body\": json.dumps(\"Internal server error\")}", "def post(self, request, *args, **kwargs):\n return super().create(*args, **kwargs)", "def handle_request(self, t):\n\n # respect ur elders\n super(StoreServer,self).handle_request(t)\n\n # convert out tuple to a request\n request = StoreRequest(t)\n\n # come up w/ a response for them\n response = StoreResponse(key=request.key,\n url=self.get_url(),\n port=self.get_port())\n\n # now spin up a handler for that port\n handler = StoreHandler(self,(response.url,response.port))\n\n # send back our response\n self.bb_client.put(response)", "def _attach_endpoints(self):\n for name, endpoint in inspect.getmembers(self):\n is_class = inspect.isclass(endpoint)\n is_subclass = is_class and issubclass(endpoint, self.Endpoint)\n not_endpoint = endpoint is not self.Endpoint\n\n if is_subclass and not_endpoint:\n endpoint_instance = endpoint(self.session)\n setattr(self, name.lower(), endpoint_instance)", "def publish_endpoint_acls(self, endpoint_uuid, acls):\n log.info(\"Publishing ACL Update %s for %s\" % (acls, endpoint_uuid))\n update = {\"type\": \"ACLUPDATE\",\n \"issued\": time.time() * 1000,\n \"acls\": acls}\n self.pub_lock.acquire()\n self.pub_socket.send_multipart([endpoint_uuid.encode(\"utf-8\"),\n json.dumps(update).encode(\"utf-8\")])\n self.pub_lock.release()", "def endpoints(self, endpoints):\n\n self._endpoints = endpoints", "def post_route_table_create(self, resource_dict):\n pass", "def testEndpoint(self):\n # Register an exporter\n context = self.framework.get_bundle_context()\n exporter = Exporter(context)\n context.register_service(pelix.remote.SERVICE_EXPORT_PROVIDER,\n exporter, {})\n\n # With no UID given\n status, _ = self._http_get(\"/endpoint\")\n\n # Check result\n self.assertEqual(status, 404)\n\n # Register a service\n svc_reg = context.register_service(\n \"sample.spec\", object(),\n {pelix.remote.PROP_EXPORTED_INTERFACES: \"*\"})\n\n # Get the endpoint bean\n endpoint = exporter.endpoints[-1]\n\n # Request the details of the endpoint\n status, response = self._http_get(\"/endpoint/{0}\".format(endpoint.uid))\n\n # Check result\n self.assertEqual(status, 200)\n\n # Check the content\n data = json.loads(response)\n for key, attr in (('uid', 'uid'), ('sender', 'framework'),\n ('name', 'name')):\n self.assertEqual(data[key], getattr(endpoint, attr))\n\n # Unregister it\n svc_reg.unregister()\n\n # Request the list of endpoints\n status, _ = self._http_get(\"/endpoint/{0}\".format(endpoint.uid))\n\n # Check result\n self.assertEqual(status, 404)", "def post(self):\n data = request.json\n create_ue(data)\n return None, 201", "def handle(self, args):\n\n logger.debug('ARGS: %s', args)\n args = json.loads(args)\n\n try:\n logger.info('Handling %s request.' % args['method'])\n method = 'handle_' + args['method'].lower()\n if callable(getattr(self, method, None)):\n return operator.methodcaller(method, args)(self)\n else:\n return self.error('Invalid method for this endpoint', httplib.METHOD_NOT_ALLOWED)\n except ValueError as e:\n msg = 'ValueError: %s' % e.message\n return self.error(msg, httplib.BAD_REQUEST)\n except splunk.RESTException as e:\n return self.error('RESTexception: %s' % e, httplib.INTERNAL_SERVER_ERROR)\n except Exception as e:\n msg = 'Unknown exception: %s' % e\n logger.exception(msg)\n return self.error(msg, httplib.INTERNAL_SERVER_ERROR)", "def handle(self, *args, **options):\n self.create_indices()\n self.bulk()", "def create(self):\n\n pass", "def handler(event, context):\n response = None\n try:\n if 'Records' in event:\n record = testSerializer(event['Records'][0]['dynamodb']['NewImage'])\n type = record['type']\n if type == \"PostCreatedEvent\":\n addPost(record)\n elif type == \"PostUpdatedEvent\":\n updatePost(record)\n elif type == \"PostDeletedEvent\":\n deletePost(record)\n elif type == \"PostVoteEvent\":\n votePost(record)\n response = None\n else:\n if event['httpMethod'] == \"GET\":\n response = respond(None, getPost(event))\n except Exception as error:\n logger.exception(error)\n response = {\n 'status': 500,\n 'error': {\n 'type': type(error).__name__,\n 'description': str(error),\n },\n }\n finally:\n return response", "def on_post_resource(self, req, resp, *args, **params):\n instance = self.get_object(**params)\n self.save_object(req.params, req, resp, instance, **params)", "def post_routing_instance_update(self, resource_id, resource_dict):\n pass", "def create(self):\n pass", "def create(self):\n pass", "def create(self):\n pass", "def create(self):", "def test_140_keystone_endpoint(self):\n u.log.debug('Checking keystone api endpoint data...')\n endpoints = self.keystone_v2.endpoints.list()\n admin_port = '35357'\n internal_port = public_port = '5000'\n expected = {\n 'id': u.not_null,\n 'region': 'RegionOne',\n 'adminurl': u.valid_url,\n 'internalurl': u.valid_url,\n 'publicurl': u.valid_url,\n 'service_id': u.not_null\n }\n ret = u.validate_endpoint_data(endpoints, admin_port, internal_port,\n public_port, expected)\n if ret:\n amulet.raise_status(amulet.FAIL,\n msg='keystone endpoint: {}'.format(ret))", "def post(self, *args, **kwargs):\n json_data = request.get_json()\n\n relationship_field, model_relationship_field, related_type_, related_id_field = self._get_relationship_data()\n\n if 'data' not in json_data:\n raise BadRequest('/data', 'You must provide data with a \"data\" route node')\n if isinstance(json_data['data'], dict):\n if 'type' not in json_data['data']:\n raise BadRequest('/data/type', 'Missing type in \"data\" node')\n if 'id' not in json_data['data']:\n raise BadRequest('/data/id', 'Missing id in \"data\" node')\n if json_data['data']['type'] != related_type_:\n raise InvalidType('/data/type', 'The type field does not match the resource type')\n if isinstance(json_data['data'], list):\n for obj in json_data['data']:\n if 'type' not in obj:\n raise BadRequest('/data/type', 'Missing type in \"data\" node')\n if 'id' not in obj:\n raise BadRequest('/data/id', 'Missing id in \"data\" node')\n if obj['type'] != related_type_:\n raise InvalidType('/data/type', 'The type provided does not match the resource type')\n\n self.before_post(args, kwargs, json_data=json_data)\n\n obj_, updated = self._data_layer.create_relationship(json_data,\n model_relationship_field,\n related_id_field,\n kwargs)\n\n qs = QSManager(request.args, self.schema)\n includes = qs.include\n if relationship_field not in qs.include:\n includes.append(relationship_field)\n schema = compute_schema(self.schema, dict(), qs, includes)\n\n if updated is False:\n return '', 204\n\n result = schema.dump(obj_).data\n if result.get('links', {}).get('self') is not None:\n result['links']['self'] = request.path\n self.after_post(result)\n return result, 200", "def add_endpoint_set(self, other_endpoint_set):\n for fep in other_endpoint_set.get_flask_endpoints():\n other_endpoint = other_endpoint_set.get_endpoint(fep)\n self.add_endpoint(other_endpoint)\n return", "def addEndpoints(self, endpoints):\n self.endpoints.extend(endpoints)\n self._connectOrBind(endpoints)", "def _create_json_endpoint(self, endpoint, is_v1):\n json_endpoint = {}\n\n if endpoint.service_name:\n json_endpoint[\"serviceName\"] = endpoint.service_name\n elif is_v1:\n # serviceName is mandatory in v1\n json_endpoint[\"serviceName\"] = \"\"\n if endpoint.port and endpoint.port != 0:\n json_endpoint[\"port\"] = endpoint.port\n if endpoint.ipv4 is not None:\n json_endpoint[\"ipv4\"] = endpoint.ipv4\n if endpoint.ipv6 is not None:\n json_endpoint[\"ipv6\"] = endpoint.ipv6\n\n return json_endpoint", "def create_object_object_inst(self, endpoint_name, object_id, object_inst_id=None):\n \n if self.endpoint_dict.has_key(endpoint_name):\n endpoint = self.endpoint_dict[endpoint_name][\"object\"]\n if lwm2m_dict_objects.has_key(str(object_id)):\n object_multi_instance = lwm2m_dict_objects[str(object_id)][\"multiInst\"]\n if not object_multi_instance:\n self.logger.error(\"Multi-Instance of the Object %s is not allowed !\", object_id)\n message = \"Multi-Instance of the object is not allowed\"\n return message\n else:\n if object_inst_id is None:\n object_inst_id = 0\n object_id_object_inst_id = str(object_id) + \"_\" + str(object_inst_id)\n while True:\n if endpoint.objects_dict.has_key(object_id_object_inst_id):\n object_inst_id += 1\n object_id_object_inst_id = str(object_id) + \"_\" + str(object_inst_id)\n else:\n break\n object_id_object_inst_id = str(object_id) + \"_\" + str(object_inst_id)\n \n if not endpoint.objects_dict.has_key(object_id_object_inst_id): \n endpoint.objects_dict.update({\n object_id_object_inst_id : {\"object\" : None, \"object_id\" : object_id, \\\n \"object_inst_id\" : object_inst_id}\n })\n endpoint.objects_dict[object_id_object_inst_id][\"object\"] = DeviceMgmtObject(object_id_object_inst_id, endpoint.local_ip, endpoint.local_port)\n \n self.object_collection.process_object(endpoint.objects_dict[object_id_object_inst_id][\"object\"], object_id, endpoint.local_ip, endpoint.local_port)\n return int(object_inst_id)\n else:\n message = \"Object Instance already exists\"\n return message\n else:\n message = \"Endpoint doesn't exist\"\n return message", "def _register_view(self, app, resource, *urls, **kwargs):\n endpoint = kwargs.pop('endpoint', None) or resource.__name__.lower()\n self.endpoints.add(endpoint)\n\n if endpoint in getattr(app, 'view_class', {}):\n existing_view_class = app.view_functions[endpoint].__dict__['view_class']\n\n # if you override the endpoint with a different class, avoid the collision by raising an exception\n if existing_view_class != resource:\n raise ValueError('Endpoint {!r} is already set to {!r}.'\n .format(endpoint, existing_view_class.__name__))\n\n if not hasattr(resource, 'endpoint'): # Don't replace existing endpoint\n resource.endpoint = endpoint\n resource_func = self.output(resource.as_view(endpoint))\n\n for decorator in chain(kwargs.pop('decorators', ()), self.decorators):\n resource_func = decorator(resource_func)\n\n for url in urls:\n rule = self._make_url(url, self.blueprint.url_prefix if self.blueprint else None)\n\n # If this Api has a blueprint\n if self.blueprint:\n # And this Api has been setup\n if self.blueprint_setup:\n # Set the rule to a string directly, as the blueprint\n # is already set up.\n self.blueprint_setup.add_url_rule(self._make_url(url, None), view_func=resource_func, **kwargs)\n continue\n else:\n # Set the rule to a function that expects the blueprint\n # prefix to construct the final url. Allows deferment\n # of url finalization in the case that the Blueprint\n # has not yet been registered to an application, so we\n # can wait for the registration prefix\n rule = partial(self._make_url, url)\n else:\n # If we've got no Blueprint, just build a url with no prefix\n rule = self._make_url(url, None)\n # Add the url to the application or blueprint\n app.add_url_rule(rule, view_func=resource_func, **kwargs)", "def post(self):\r\n try:\r\n self.valid_args()\r\n data = json.loads(request.data)\r\n # Clean HATEOAS args\r\n data = self.hateoas.remove_links(data)\r\n inst = self.__class__(**data)\r\n self._update_object(inst)\r\n getattr(require, self.__class__.__name__.lower()).create(inst)\r\n db.session.add(inst)\r\n db.session.commit()\r\n return json.dumps(inst.dictize())\r\n except IntegrityError:\r\n db.session.rollback()\r\n raise\r\n except Exception as e:\r\n return error.format_exception(\r\n e,\r\n target=self.__class__.__name__.lower(),\r\n action='POST')", "def post(self, request): # FIRST EXAMPLE\n model = self._create_booking(\n request=request) # when _create_booking is invoked, historio Client will log model\n print('save me')", "def post(self, request, *args, **kwargs):\n return super().create(request, *args, **kwargs)", "def add_endpoint_to_sipserver(self, endpoint: str, password: str) -> None:", "def endpoint_schema(endpoint, extra_definitions={}):\n # load common schema template and update metadata\n schema = common.load_json(\"./templates/provider/endpoint.json\")\n schema[\"$id\"] = schema[\"$id\"].replace(\"endpoint.json\", f\"{endpoint}.json\")\n schema[\"title\"] = schema[\"title\"].replace(\"endpoint\", endpoint)\n\n # merge custom definitions with relevant common definitions\n definitions = common.load_definitions(\n \"string\",\n \"timestamp\",\n \"uuid\",\n \"version\",\n common.MDS_FEATURE_POINT\n )\n definitions.update(common.point_definition())\n definitions.update(extra_definitions)\n\n endpoint_schema = common.load_json(f\"./templates/provider/{endpoint}.json\")\n\n # for all but stops, merge standard vehicle info with items schema\n if endpoint not in [\"stops\"]:\n items = endpoint_schema[endpoint][\"items\"]\n vehicle = common.vehicle_definition()\n items[\"required\"] = vehicle[\"required\"] + items[\"required\"]\n items[\"properties\"] = { **vehicle[\"properties\"], **items[\"properties\"] }\n definitions.update(common.load_definitions(\"propulsion_type\", \"propulsion_types\", \"vehicle_type\"))\n\n # merge endpoint schema into the endpoint template\n data_schema = schema[\"properties\"][\"data\"]\n data_schema[\"required\"] = [endpoint]\n data_schema[\"properties\"] = endpoint_schema\n\n # insert definitions\n schema[\"definitions\"].update(definitions)\n\n return schema" ]
[ "0.73057497", "0.68444353", "0.633986", "0.6060341", "0.6045354", "0.59219885", "0.5871254", "0.5845486", "0.58109283", "0.5770383", "0.5706339", "0.5677897", "0.5664306", "0.5596975", "0.55959713", "0.5579243", "0.5576328", "0.554373", "0.553638", "0.55146235", "0.5514128", "0.5504019", "0.54895186", "0.5462709", "0.5450519", "0.5419087", "0.5416733", "0.5412604", "0.54013664", "0.53698355", "0.53534096", "0.534065", "0.5336596", "0.5310314", "0.53076833", "0.5305451", "0.5296533", "0.52907825", "0.5289336", "0.528179", "0.5279655", "0.5277406", "0.52705336", "0.52669823", "0.52587914", "0.5251832", "0.5230865", "0.52186507", "0.51971406", "0.5194352", "0.51907474", "0.51763093", "0.51714945", "0.516362", "0.5160597", "0.5158909", "0.51578206", "0.5154996", "0.5151277", "0.5122929", "0.51172173", "0.5116813", "0.5106787", "0.5101805", "0.5093639", "0.50904596", "0.50885856", "0.5086472", "0.5085663", "0.50850624", "0.5083623", "0.50797266", "0.5074042", "0.50677764", "0.50627154", "0.5059267", "0.50580627", "0.5056067", "0.5055966", "0.50454205", "0.5045044", "0.5044831", "0.5044733", "0.5044646", "0.50438017", "0.50438017", "0.50438017", "0.5043559", "0.5043458", "0.5043205", "0.5042026", "0.5041795", "0.5036587", "0.50337106", "0.50336903", "0.5033259", "0.50270534", "0.50244296", "0.50243515", "0.50109065" ]
0.6030018
5
Ensure that the cache has a NetModel and subnets for PORT.
def _ensure_net_and_subnets(self, port): # Gather the subnet IDs that we need for this port, and get the # NetModel if we already have it in the cache. needed_subnet_ids = set() net = None for fixed_ip in port['fixed_ips']: subnet_id = fixed_ip.get('subnet_id') if subnet_id: needed_subnet_ids.add(subnet_id) if not net: net = self.agent.cache.get_network_by_subnet_id(subnet_id) LOG.debug("Needed subnet IDs: %s", needed_subnet_ids) LOG.debug("Existing network model by subnet ID: %s", net) # For each subnet that we need, get its data from SubnetWatcher and # hold for adding into the cache. new_subnets = {} for subnet_id in needed_subnet_ids: # Get data for this subnet from the SubnetWatchers. subnet = (self.subnet_watcher.get_subnet(subnet_id) or self.v1_subnet_watcher.get_subnet(subnet_id)) if subnet is None: LOG.warning("No data for subnet %s", subnet_id) raise SubnetIDNotFound() new_subnets[subnet_id] = subnet if not net: # We don't already have a NetModel, so look for a cached NetModel # with the right network ID. (In this case we must have new # subnets to add into the cache, and the cached NetModel must have # subnets other than the ones that we're adding in this iteration; # otherwise we would have already found it when searching by # subnet_id above.) assert new_subnets network_id = list(new_subnets.values())[0]['network_id'] net = self.agent.cache.get_network_by_id(network_id) LOG.debug("Existing network model by network ID: %s", net) if not net: # We still have no NetModel for the relevant network ID, so create # a new one. In this case we _must_ be adding new subnets. assert new_subnets net = empty_network(network_id) LOG.debug("New network %s", net) elif new_subnets: # We have a NetModel that was already in the cache and are about to # modify it. Cache replacement only works if the new NetModel is a # distinct object from the existing one, so make a copy here. net = copy_network(net) LOG.debug("Copied network %s", net) if new_subnets: # Add the new subnets into the NetModel. assert net net.subnets = [s for s in net.subnets if s.id not in new_subnets] net.subnets += list(new_subnets.values()) # Add (or update) the NetModel in the cache. LOG.debug("Net: %s", net) _fix_network_cache_port_lookup(self.agent, net.id) self.agent.cache.put(net) return net.id
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _fix_network_cache_port_lookup(agent, network_id):\n\n # If there is an existing NetModel for this network ID, ensure that all\n # its ports are in the port_lookup dict.\n if network_id in agent.cache.cache:\n for port in agent.cache.cache[network_id].ports:\n agent.cache.port_lookup[port.id] = network_id", "def test_get_valid_networks_for_virtualization_realm(self):\n pass", "def test_port_update_with_fixed_ips_ok_if_no_binding_host(self):\n with self.network() as network:\n segment = self._test_create_segment(\n network_id=network['network']['id'],\n physical_network='physnet',\n network_type=constants.TYPE_VLAN)\n\n # Create a port with no IP address (since there is no subnet)\n port = self._create_deferred_ip_port(network)\n\n # Create the subnet and try to update the port to get an IP\n with self.subnet(network=network,\n segment_id=segment['segment']['id']) as subnet:\n # Try requesting an IP (but the only subnet is on a segment)\n data = {'port': {\n 'fixed_ips': [{'subnet_id': subnet['subnet']['id']}]}}\n port_id = port['port']['id']\n port_req = self.new_update_request('ports', data, port_id)\n response = port_req.get_response(self.api)\n\n # The IP is allocated since there is no binding host info any\n # subnet can be used for allocation.\n self.assertEqual(webob.exc.HTTPOk.code, response.status_int)", "def check_port_validity(self):\n # Check if ports provided are already present in VPLEX\n if self.ports:\n LOG.info(\"Validating the ports\")\n for port in self.ports:\n obj = None\n try:\n obj = self.storageview.get_port(self.cl_name, port)\n except (utils.ApiException, ValueError, TypeError) as err:\n msg = \"Could not get port {0} details in {1} due to\"\n err_msg = msg.format(port, self.cl_name) + \" error {0}\"\n e_msg = utils.display_error(err_msg, err)\n LOG.error(\"%s\\n%s\\n\", e_msg, err)\n self.module.fail_json(msg=e_msg)\n\n if obj is None:\n msg = (\"Could not get port {0} details in {1}\"\n .format(port, self.cl_name))\n LOG.error(msg)\n self.module.fail_json(msg=msg)", "def _check_and_set_network(self) -> None:\n from hathor.transaction.storage.exceptions import WrongNetworkError\n\n network = settings.NETWORK_NAME\n stored_network = self.get_network()\n\n if stored_network is None:\n # no network is set, let's try to infer it\n self._checked_set_network(network)\n elif stored_network != network:\n # the stored network does not match, something is wrong\n raise WrongNetworkError(f'Databases created on {stored_network}, expected {network}')\n else:\n # the network is what is expected, nothing to do here\n pass", "def _validatePortConfig(self):\n if config.BindHTTPPorts:\n if config.HTTPPort == 0:\n raise UsageError(\n \"HTTPPort required if BindHTTPPorts is not empty\"\n )\n elif config.HTTPPort != 0:\n config.BindHTTPPorts = [config.HTTPPort]\n if config.BindSSLPorts:\n if config.SSLPort == 0:\n raise UsageError(\n \"SSLPort required if BindSSLPorts is not empty\"\n )\n elif config.SSLPort != 0:\n config.BindSSLPorts = [config.SSLPort]", "def test_insufficient_space(self):\n\n self._test_find_next_subnet(\n network=\"10.0.0.0/24\",\n subnets=[\"10.0.0.64/25\"],\n requests=[25],\n expected=None,\n )", "def test_port_update_allocate_no_segments(self):\n with self.network() as network:\n pass\n\n # Create a bound port with no IP address (since there is not subnet)\n port = self._create_deferred_ip_port(network)\n\n # Create the subnet and try to update the port to get an IP\n with self.subnet(network=network) as subnet:\n # Try requesting an IP (but the only subnet is on a segment)\n data = {'port': {\n 'fixed_ips': [{'subnet_id': subnet['subnet']['id']}]}}\n port_id = port['port']['id']\n port_req = self.new_update_request('ports', data, port_id)\n response = port_req.get_response(self.api)\n\n # Since port is bound and there is a mapping to segment, it succeeds.\n self.assertEqual(webob.exc.HTTPOk.code, response.status_int)\n self._assert_one_ip_in_subnet(response, subnet['subnet']['cidr'])", "def test_networking_project_network_update(self):\n pass", "def _check_port_available(hostname, port):\n for config_file in config_files:\n network_config = networkConfig(config_file)\n for name, host in network_config.hostDict.items():\n if port == host.port:\n return False\n\n return _check_socket_is_free(hostname, port)", "def _update_cachesize(self):\n san_res = self.san_interface\n _load = not self.san_interface.runmode\n if self.cachesize > 0:\n pvds = self._get_pvds()\n if len(pvds) < 1:\n # not suppposed to get here\n return (1,'Error no valid provider/path was found when setting cache')\n logger.eventlog.debug('in update cache for %s , cachedrdev: %s' % (str(self),str(self.cachedrdev)))\n # check if this is a single path case or replicated cache (multipath)\n if len(pvds) == 1 and len(self.cachepvds) < 2 and not self.cachedrdev:\n (e,pt) = ext2path(self,san_res.providers[pvds[0]])\n if e:\n return (e,'Error updating cache, '+pt)\n (e,r) = san_res.providers[pvds[0]].add_cache(pt,self.cachesize)\n if e:\n return (e,r)\n else:\n #\n # more than 1 path\n #\n\n # one path with cacheon and is running return ok\n for pt in self.paths():\n if pt.cacheon:\n if pt.state == ObjState.running:\n return (0,'Cache is ok')\n logger.eventlog.warning('cache for %s is ON but path is not running !' % str(self))\n\n # no running path with cache on\n self.cachepresent=False\n\n #\n cvolname=obj2volstr(self)\n cvolname=cvolname.replace(':',CACHESEP) # replace ':' with a legal volume char\n drname=CACHEPFX+cvolname\n cache_loadonly=False\n #\n\n # self.cachedrdev ?\n if self.san_interface.raids.has_key(drname):\n # found drbd dev for cache (fail-over or load??):\n # del tgt (old), remove cache (old), promote (new),\n # cache load (new), add targets (new)\n logger.eventlog.warning('Cache for %s is not on, while DR device is detected during update' % str(self))\n drdev = self.san_interface.raids[drname]\n if not drdev:\n logger.eventlog.error('cant update cache dr for %s , drdev not found' % (str(self)))\n return (1,'cant update Cache dr')\n if not drdev.provider:\n drdev.promote_one(checkluns=False)\n if not drdev.provider:\n logger.eventlog.error('cant update cache dr for %s , drdev provider not detected' % (str(self)))\n return (1,'cant update Cache dr')\n # debug\n #logger.eventlog.debug(\"cachepresent: %s\" % str(self.cachepresent))\n #for p in self.paths():\n # if p.provider==drdev.provider:\n # logger.eventlog.debug(\"p: %s\" % str(p))\n # logger.eventlog.debug(\"state: %s\" % str(p.state))\n # logger.eventlog.debug(\"cacheon: %s\" % str(p.cacheon))\n # end debug\n e,prim = ext2path(self,drdev.provider)\n if e:\n logger.eventlog.error('valid path not found for %s on %s in update' % (str(self),str(drdev.provider)))\n return (1,'valid path not found')\n #logger.eventlog.debug(\"prim: %s\" % str(prim))\n cache_loadonly=True\n else:\n if len(self.cachepvds)==1 or len(self.cachepvds)>2:\n # has only 1 cache LV (load, absent?) ?? or >2 (old ones redetected)\n logger.eventlog.error('Found %d Cache LVs for %s in update' % (len(self.cachepvds),str(self)))\n return (1,'Found %d Cache LVs for %s in update' % (len(self.cachepvds),str(self)))\n\n if len(self.cachepvds) == 2:\n # if has 2 cache LVs, no DR (load): create drbd, load cache\n (e1,path1) = ext2path(self,san_res.providers[self.cachepvds[0]])\n (e2,path2) = ext2path(self,san_res.providers[self.cachepvds[1]])\n print 'cache paths: ',str(path1),str(path2)\n if e1 or e2:\n logger.eventlog.error('valid paths not found for %s in update' % str(self))\n return (1,'valid path not found')\n vol1 = san_res.providers[self.cachepvds[0]].cachevg.volumes[cvolname]\n vol2 = san_res.providers[self.cachepvds[1]].cachevg.volumes[cvolname]\n cache_loadonly=True\n\n else:\n # else (new) : select 2 paths, create 2 LVs,\n # create & promote DRBD, Create cache on master\n\n e,path1,path2 = self._get_2_pvds_paths()\n if e:\n logger.eventlog.error(path1)\n return (1,path1)\n\n # create 2 cache LVs\n (e,vol1) = path1.provider.add_lv_for_cache(self,self.cachesize)\n if e > 1:\n tmp='cant create Cache LV1 for %s on %s in update: %s' % (self.name,path1.provider.name,vol1)\n logger.eventlog.error(tmp)\n return (1,tmp)\n (e,vol2) = path2.provider.add_lv_for_cache(self,self.cachesize)\n if e > 1:\n vol1.provider.cachevg.volumes.delete(vol1,force=True)\n tmp='cant create Cache LV2 for %s on %s in update: %s' % (self.name,path2.provider.name,vol2)\n logger.eventlog.error(tmp)\n return (1,tmp)\n #\n print 'cache vols: ',str(vol1),str(vol2)\n\n # create new drbd device\n drdev = san_res.raids.add(drname,SanRaidGrp(drname,None))\n if not drdev :\n logger.eventlog.error('failed to create/updare dr device for cache in %s' % str(self))\n return (1,'failed to create/updare dr device')\n drdev.raid=RaidLevel.dr\n drdev.iscachedr=True\n drdev.devices=[vol1,vol2]\n (e,txt)=drdev.update()\n print 'create dr device:',e,txt\n if e:\n logger.eventlog.error('cant create Cache dr for %s , %s' % (str(self),txt))\n return (1,'cant create Cache dr')\n if drdev.provider is path1.provider:\n prim=path1\n else:\n prim=path2\n\n logger.eventlog.debug('create cache on %s , loadonly: %s , drname: %s' % \\\n (drdev.provider.name, cache_loadonly, drname))\n #loadonly=(self.cachepvds<>[]) # check if we already had cache LVs\n\n # create CacheDev\n # on loadonly we also forcing devname update\n (e,r) = drdev.provider.create_cache(prim,drdev,cvolname,loadonly=cache_loadonly,force=cache_loadonly)\n logger.eventlog.debug('create cache response: %s %s' % (e,r))\n if e:\n return (e, 'error creating cache on %s: %s' % (drdev.provider.name,r))\n else:\n (e,r) = self._remove_cache()\n if e:\n return (e,'error removing cache on %s: %s' % (str(self),r))\n return (0,'')", "def test_port_update_deferred_allocation_no_ipam(self):\n with self.network() as network:\n with self.subnet(network=network):\n pass\n\n response = self._create_port(self.fmt,\n net_id=network['network']['id'],\n tenant_id=network['network']['tenant_id'],\n fixed_ips=[],\n is_admin=True)\n port = self.deserialize(self.fmt, response)\n ips = port['port']['fixed_ips']\n self.assertEqual(0, len(ips))\n\n # Create the subnet and try to update the port to get an IP\n data = {'port': {portbindings.HOST_ID: 'fakehost'}}\n port_id = port['port']['id']\n port_req = self.new_update_request('ports', data, port_id,\n as_admin=True)\n response = port_req.get_response(self.api)\n\n self.assertEqual(webob.exc.HTTPOk.code, response.status_int)\n res = self.deserialize(self.fmt, response)\n self.assertEqual(0, len(res['port']['fixed_ips']))", "def oci_load_balancer_attack_surface_open_tcp_port_check(cache, awsAccountId, awsRegion, awsPartition, ociTenancyId, ociUserId, ociRegionName, ociCompartments, ociUserApiKeyFingerprint):\n # ISO Time\n iso8601Time = datetime.datetime.now(datetime.timezone.utc).isoformat()\n for loadbalancer in get_oci_load_balancers(cache, ociTenancyId, ociUserId, ociRegionName, ociCompartments, ociUserApiKeyFingerprint):\n # B64 encode all of the details for the Asset\n assetJson = json.dumps(loadbalancer,default=str).encode(\"utf-8\")\n assetB64 = base64.b64encode(assetJson)\n compartmentId = loadbalancer[\"compartment_id\"]\n loadBalancerId = loadbalancer[\"id\"]\n loadBalancerName = loadbalancer[\"display_name\"]\n lbLifecycleState = loadbalancer[\"lifecycle_state\"]\n createdAt = str(loadbalancer[\"time_created\"])\n\n # Create a list comprehension to check if any Public IPs are assigned to the LB - an empty list\n # means we should skip. In the event there are more than one Public IPs we will just take the first\n # as they all go to the same place after all...\n publicIps = [ip[\"ip_address\"] for ip in loadbalancer[\"ip_addresses\"] if ip[\"is_public\"] is True]\n if not publicIps:\n continue\n else:\n pubIp = publicIps[0]\n # Submit details to the scanner function\n scanner = scan_host(pubIp, loadBalancerName, \"OCI Load Balancer\")\n # NoneType returned on KeyError due to Nmap errors\n if scanner == None:\n continue\n else:\n # Loop the results of the scan - starting with Open Ports which require a combination of\n # a Public Instance, an open SG rule, and a running service/server on the host itself\n # use enumerate and a fixed offset to product the Check Title ID number\n for index, p in enumerate(scanner[pubIp][\"ports\"]):\n # Parse out the Protocol, Port, Service, and State/State Reason from NMAP Results\n checkIdNumber = str(int(index + 1))\n portNumber = int(p[\"portid\"])\n if portNumber == 8089:\n serviceName = 'SPLUNKD'\n elif portNumber == 10250:\n serviceName = 'KUBERNETES-API'\n elif portNumber == 5672:\n serviceName = 'RABBITMQ'\n elif portNumber == 4040:\n serviceName = 'SPARK-WEBUI'\n else:\n try:\n serviceName = str(p[\"service\"][\"name\"]).upper()\n except KeyError:\n serviceName = \"Unknown\"\n serviceStateReason = str(p[\"reason\"])\n serviceState = str(p[\"state\"])\n # This is a failing check\n if serviceState == \"open\":\n finding = {\n \"SchemaVersion\": \"2018-10-08\",\n \"Id\": f\"{ociTenancyId}/{ociRegionName}/{compartmentId}/{loadBalancerId}/oci-attack-surface-lb-open-{serviceName}-check\",\n \"ProductArn\": f\"arn:{awsPartition}:securityhub:{awsRegion}:{awsAccountId}:product/{awsAccountId}/default\",\n \"GeneratorId\": f\"{ociTenancyId}/{ociRegionName}/{compartmentId}/{loadBalancerId}/oci-attack-surface-lb-open-{serviceName}-check\",\n \"AwsAccountId\": awsAccountId,\n \"Types\": [\n \"Software and Configuration Checks/AWS Security Best Practices/Network Reachability\",\n \"TTPs/Discovery\"\n ],\n \"FirstObservedAt\": iso8601Time,\n \"CreatedAt\": iso8601Time,\n \"UpdatedAt\": iso8601Time,\n \"Severity\": {\"Label\": \"HIGH\"},\n \"Confidence\": 99,\n \"Title\": f\"[AttackSurface.OCI.LoadBalancer.{checkIdNumber}] Load Balancers should not be publicly reachable on {serviceName}\",\n \"Description\": f\"Oracle Load Balancer {loadBalancerName} in Compartment {compartmentId} in {ociRegionName} is publicly reachable on port {portNumber} which corresponds to the {serviceName} service. When Services are successfully fingerprinted by the ElectricEye Attack Surface Management Auditor it means the load balancer is public (mapped 'ip_address` and 'is_public' is True within the list of IP Addresses), has an open Security List or Network Security Group, and a running service on the host which adversaries can also see. Refer to the remediation insturctions for an example of a way to secure OCI Load Balancers.\",\n \"Remediation\": {\n \"Recommendation\": {\n \"Text\": \"OCI Load Balancers instances should only have the minimum necessary ports open to achieve their purposes, allow traffic from authorized sources, and use other defense-in-depth and hardening strategies. For a basic view on traffic authorization into your instances refer to the Network Security Groups section of the Oracle Cloud Infrastructure Documentation for Networks.\",\n \"Url\": \"https://docs.oracle.com/en-us/iaas/Content/Network/Concepts/networksecuritygroups.htm#support\"\n }\n },\n \"ProductFields\": {\n \"ProductName\": \"ElectricEye\",\n \"Provider\": \"OCI\",\n \"ProviderType\": \"CSP\",\n \"ProviderAccountId\": ociTenancyId,\n \"AssetRegion\": ociRegionName,\n \"AssetDetails\": assetB64,\n \"AssetClass\": \"Networking\",\n \"AssetService\": \"Oracle Cloud Load Balancer\",\n \"AssetComponent\": \"Load Balancer\"\n },\n \"Resources\": [\n {\n \"Type\": \"OciCloudLoadBalancerLoadBalancer\",\n \"Id\": loadBalancerId,\n \"Partition\": awsPartition,\n \"Region\": awsRegion,\n \"Details\": {\n \"Other\": {\n \"TenancyId\": ociTenancyId,\n \"CompartmentId\": compartmentId,\n \"Region\": ociRegionName,\n \"Name\": loadBalancerName,\n \"Id\": loadBalancerId,\n \"CreatedAt\": createdAt,\n \"LifecycleState\": lbLifecycleState\n }\n }\n }\n ],\n \"Compliance\": {\n \"Status\": \"FAILED\",\n \"RelatedRequirements\": [\n \"NIST CSF V1.1 PR.AC-3\",\n \"NIST SP 800-53 Rev. 4 AC-1\",\n \"NIST SP 800-53 Rev. 4 AC-17\",\n \"NIST SP 800-53 Rev. 4 AC-19\",\n \"NIST SP 800-53 Rev. 4 AC-20\",\n \"NIST SP 800-53 Rev. 4 SC-15\",\n \"AICPA TSC CC6.6\",\n \"ISO 27001:2013 A.6.2.1\",\n \"ISO 27001:2013 A.6.2.2\",\n \"ISO 27001:2013 A.11.2.6\",\n \"ISO 27001:2013 A.13.1.1\",\n \"ISO 27001:2013 A.13.2.1\",\n \"MITRE ATT&CK T1040\",\n \"MITRE ATT&CK T1046\",\n \"MITRE ATT&CK T1580\",\n \"MITRE ATT&CK T1590\",\n \"MITRE ATT&CK T1592\",\n \"MITRE ATT&CK T1595\"\n ]\n },\n \"Workflow\": {\"Status\": \"NEW\"},\n \"RecordState\": \"ACTIVE\"\n }\n yield finding\n else:\n finding = {\n \"SchemaVersion\": \"2018-10-08\",\n \"Id\": f\"{ociTenancyId}/{ociRegionName}/{compartmentId}/{loadBalancerId}/oci-attack-surface-lb-open-{serviceName}-check\",\n \"ProductArn\": f\"arn:{awsPartition}:securityhub:{awsRegion}:{awsAccountId}:product/{awsAccountId}/default\",\n \"GeneratorId\": f\"{ociTenancyId}/{ociRegionName}/{compartmentId}/{loadBalancerId}/oci-attack-surface-lb-open-{serviceName}-check\",\n \"AwsAccountId\": awsAccountId,\n \"Types\": [\n \"Software and Configuration Checks/AWS Security Best Practices/Network Reachability\",\n \"TTPs/Discovery\"\n ],\n \"FirstObservedAt\": iso8601Time,\n \"CreatedAt\": iso8601Time,\n \"UpdatedAt\": iso8601Time,\n \"Severity\": {\"Label\": \"INFORMATIONAL\"},\n \"Confidence\": 99,\n \"Title\": f\"[AttackSurface.OCI.LoadBalancer.{checkIdNumber}] Load Balancers should not be publicly reachable on {serviceName}\",\n \"Description\": f\"Oracle Load Balancer {loadBalancerName} in Compartment {compartmentId} in {ociRegionName} is not publicly reachable on port {portNumber} which corresponds to the {serviceName} service due to {serviceStateReason}. OCI Load Balancers and their respective Security Lists and/or Network Security Groups should still be reviewed for minimum necessary access.\",\n \"Remediation\": {\n \"Recommendation\": {\n \"Text\": \"OCI Load Balancers instances should only have the minimum necessary ports open to achieve their purposes, allow traffic from authorized sources, and use other defense-in-depth and hardening strategies. For a basic view on traffic authorization into your instances refer to the Network Security Groups section of the Oracle Cloud Infrastructure Documentation for Networks.\",\n \"Url\": \"https://docs.oracle.com/en-us/iaas/Content/Network/Concepts/networksecuritygroups.htm#support\"\n }\n },\n \"ProductFields\": {\n \"ProductName\": \"ElectricEye\",\n \"Provider\": \"OCI\",\n \"ProviderType\": \"CSP\",\n \"ProviderAccountId\": ociTenancyId,\n \"AssetRegion\": ociRegionName,\n \"AssetDetails\": assetB64,\n \"AssetClass\": \"Networking\",\n \"AssetService\": \"Oracle Cloud Load Balancer\",\n \"AssetComponent\": \"Load Balancer\"\n },\n \"Resources\": [\n {\n \"Type\": \"OciCloudLoadBalancerLoadBalancer\",\n \"Id\": loadBalancerId,\n \"Partition\": awsPartition,\n \"Region\": awsRegion,\n \"Details\": {\n \"Other\": {\n \"TenancyId\": ociTenancyId,\n \"CompartmentId\": compartmentId,\n \"Region\": ociRegionName,\n \"Name\": loadBalancerName,\n \"Id\": loadBalancerId,\n \"CreatedAt\": createdAt,\n \"LifecycleState\": lbLifecycleState\n }\n }\n }\n ],\n \"Compliance\": {\n \"Status\": \"PASSED\",\n \"RelatedRequirements\": [\n \"NIST CSF V1.1 PR.AC-3\",\n \"NIST SP 800-53 Rev. 4 AC-1\",\n \"NIST SP 800-53 Rev. 4 AC-17\",\n \"NIST SP 800-53 Rev. 4 AC-19\",\n \"NIST SP 800-53 Rev. 4 AC-20\",\n \"NIST SP 800-53 Rev. 4 SC-15\",\n \"AICPA TSC CC6.6\",\n \"ISO 27001:2013 A.6.2.1\",\n \"ISO 27001:2013 A.6.2.2\",\n \"ISO 27001:2013 A.11.2.6\",\n \"ISO 27001:2013 A.13.1.1\",\n \"ISO 27001:2013 A.13.2.1\",\n \"MITRE ATT&CK T1040\",\n \"MITRE ATT&CK T1046\",\n \"MITRE ATT&CK T1580\",\n \"MITRE ATT&CK T1590\",\n \"MITRE ATT&CK T1592\",\n \"MITRE ATT&CK T1595\"\n ]\n },\n \"Workflow\": {\"Status\": \"RESOLVED\"},\n \"RecordState\": \"ARCHIVED\"\n }\n yield finding", "def test_port_update_deferred_allocation_no_segments_empty_alloc(self):\n with self.network() as network:\n pass\n\n port = self._create_deferred_ip_port(network)\n\n # Create the subnet and update the port but specify no IPs\n with self.subnet(network=network):\n data = {'port': {\n portbindings.HOST_ID: 'fakehost',\n 'fixed_ips': []}}\n port_id = port['port']['id']\n port_req = self.new_update_request('ports', data, port_id,\n as_admin=True)\n response = port_req.get_response(self.api)\n\n self.assertEqual(webob.exc.HTTPOk.code, response.status_int)\n res = self.deserialize(self.fmt, response)\n # Since I specifically requested no IP addresses, I shouldn't get one.\n self.assertEqual(0, len(res['port']['fixed_ips']))", "def test_get_networks(self):\n pass", "def test_patch_host_subnet(self):\n pass", "def test_read_host_subnet(self):\n pass", "def test_port_create_with_segment_subnets(self):\n network, segment, subnet = self._create_test_segment_with_subnet()\n response = self._create_port(self.fmt,\n net_id=network['network']['id'],\n tenant_id=network['network']['tenant_id'])\n res = self.deserialize(self.fmt, response)\n # Don't allocate IPs in this case because we didn't give binding info\n self.assertEqual(0, len(res['port']['fixed_ips']))", "def check_routable(self, from_subnets: List[Subnet], to_subnets: List[Subnet]) -> dict:\n # check what ports from subnets allow to any to subnets\n ports = {} # port: (to_subnet, from_subnet)\n for from_subnet in from_subnets:\n for to_subnet in to_subnets:\n # check if traffic from subnet is stopped by to subnet nacl\n if from_subnet.name in to_subnet.nacls:\n if 'ICMP' not in ports:\n ports['ICMP'] = (from_subnet.cidr, to_subnet.cidr)\n if 'all' in to_subnet.nacls[from_subnet.name]['in']:\n # if all ports accepted in then set ports to all and we are done\n return {'all': (from_subnet.cidr, to_subnet.cidr)}\n elif 'None' in to_subnet.nacls[from_subnet.name]['in']:\n # If you don't have access to Enteprise network, you can't act on Operational Host\n # TODO refactor this hacky fix\n permission = self.check_for_enterprise_sessions()\n ports = {'all': (from_subnet.cidr, to_subnet.cidr)} if permission else {}\n return ports\n \n else:\n # we only add the ports in rules to our accepted ports\n for rule in to_subnet.nacls[from_subnet.name]['in']:\n if rule['PortRange'] is int and rule['PortRange'] not in ports:\n ports[rule[\"PortRange\"]] = (from_subnet.cidr, to_subnet.cidr)\n else:\n for p in range(rule[\"PortRange\"][0], rule[\"PortRange\"][1]):\n if p not in ports:\n ports[p] = (from_subnet.cidr, to_subnet.cidr)\n elif 'all' in to_subnet.nacls:\n if 'ICMP' not in ports:\n ports['ICMP'] = (from_subnet.cidr, to_subnet.cidr)\n # if all ports accepted out then use inbound rules only\n if 'all' in to_subnet.nacls['all']['in']:\n # if all ports accepted in then set ports to all and we are done\n return {'all': (from_subnet.cidr, to_subnet.cidr)}\n else:\n # we only add the ports in rules to our accepted ports\n for rule in to_subnet.nacls['all']['in']:\n if rule['PortRange'] is int and rule['PortRange'] not in ports:\n ports[rule[\"PortRange\"]] = (from_subnet.cidr, to_subnet.cidr)\n else:\n for p in range(rule[\"PortRange\"][0], rule[\"PortRange\"][1]):\n if p not in ports:\n ports[p] = (from_subnet.cidr, to_subnet.cidr)\n else:\n # this means that traffic cannot reach move between these 2 subnets\n continue\n\n return ports", "def test_networking_project_network_get(self):\n pass", "def ensure_mapping(self):\n if not self.host_mapping:\n self.get_interfaces()", "def check_model(self):\n layers_map = self.core.query_network(network=self.network,\n device_name=self.device)\n\n unsupported_layers = [\n l for l in self.network.layers.keys() if l not in layers_map\n ]\n\n if (unsupported_layers != []):\n sys.exit(\"Those mention layers in your model are not supported by OpenVino Inference Engine:\" \\\n \" \\n\\t\" + \"\\n\\t\".join(unsupported_layers))", "def test_network_too_small(self):\n\n self._test_find_next_subnet(\n network=\"10.0.0.0/25\",\n requests=[24],\n expected=None,\n )", "def test_network_full(self):\n\n self._test_find_next_subnet(\n network=\"10.0.0.0/24\",\n subnets=[\"10.0.0.0/24\"],\n requests=[24],\n expected=None,\n )", "def test_port_without_ip_not_deferred_no_binding(self):\n with self.network() as network:\n pass\n\n # Create a unbound port with no IP address (since there is no subnet)\n response = self._create_port_and_show(network)\n self.assertEqual([], response['port']['fixed_ips'])\n self.assertEqual(ipalloc_apidef.IP_ALLOCATION_IMMEDIATE,\n response['port'][ipalloc_apidef.IP_ALLOCATION])", "def __init_cache__(self) -> None:\n try:\n self.cache = caches[CACHE_NAME]\n logging.info(\"GeoIP2 - successfully initialised cache\")\n except InvalidCacheBackendError as ex:\n raise MiddlewareNotUsed(f\"GeoIP2 - cache configuration error: {ex}\") from ex", "def check(self) -> None:\n # validate pruning config\n super().check()\n\n assert self.config[\"TRAIN_CONFIG\"][\"MODEL_NAME\"] in {\n \"densenet\",\n \"quant_densenet\",\n \"simplenet\",\n \"quant_simplenet\",\n }, f\"{self.config['TRAIN_CONFIG']['MODEL_NAME']} is not supported\"", "def _validate_ens_net_portsecurity(self, net_data):\n pass", "def test_port_create_with_no_fixed_ips_no_ipam(self):\n with self.network() as network:\n with self.subnet(network=network):\n pass\n\n # Create an unbound port requesting no IP addresses\n response = self._create_port_and_show(network, fixed_ips=[])\n\n self.assertEqual([], response['port']['fixed_ips'])\n self.assertEqual(ipalloc_apidef.IP_ALLOCATION_NONE,\n response['port'][ipalloc_apidef.IP_ALLOCATION])", "def test_update_network_external_ports(self):\n policies_ports = [(self.qos_policies[0].id, {self.ports[0].id})]\n self.ports[2].qos_policy_id = self.qos_policies[0].id\n self.ports[2].update()\n port_obj.PortBinding(self.ctx, port_id=self.ports[1].id, host='host',\n profile={}, vif_type='',\n vnic_type=portbindings_api.VNIC_DIRECT).create()\n with mock.patch.object(self.qos_driver._driver._nb_idl,\n 'get_lswitch_port') as mock_lsp:\n mock_lsp.side_effect = [\n mock.Mock(type=ovn_const.LSP_TYPE_LOCALNET),\n mock.Mock(type=ovn_const.LSP_TYPE_EXTERNAL)]\n for qos_policy_id, reference_ports in policies_ports:\n self.networks[0].qos_policy_id = qos_policy_id\n self.networks[0].update()\n original_network = {'qos_policy_id': self.qos_policies[0]}\n reviewed_port_ids, _, _ = self.qos_driver.update_network(\n mock.ANY, self.networks[0], original_network, reset=True)\n self.assertEqual(reference_ports, reviewed_port_ids)\n calls = [mock.call(\n mock.ANY, self.ports[0].id, self.ports[0].network_id,\n qos_policy_id, None)]\n self.mock_rules.assert_has_calls(calls)\n self.mock_rules.reset_mock()", "def test_aws_service_api_network_subnets_get(self):\n pass", "def test_port_without_ip_not_deferred(self):\n with self.network() as network:\n pass\n\n # Create a bound port with no IP address (since there is no subnet)\n response = self._create_port(self.fmt,\n net_id=network['network']['id'],\n tenant_id=network['network']['tenant_id'],\n is_admin=True,\n arg_list=(portbindings.HOST_ID,),\n **{portbindings.HOST_ID: 'fakehost'})\n port = self.deserialize(self.fmt, response)\n request = self.new_show_request('ports', port['port']['id'])\n response = self.deserialize(self.fmt, request.get_response(self.api))\n\n self.assertEqual([], response['port']['fixed_ips'])\n self.assertEqual(ipalloc_apidef.IP_ALLOCATION_IMMEDIATE,\n response['port'][ipalloc_apidef.IP_ALLOCATION])", "def test_port_update_deferred_allocation_no_segments(self):\n with self.network() as network:\n pass\n\n port = self._create_deferred_ip_port(network)\n\n # Create the subnet and try to update the port to get an IP\n with self.subnet(network=network):\n data = {'port': {portbindings.HOST_ID: 'fakehost'}}\n port_id = port['port']['id']\n port_req = self.new_update_request('ports', data, port_id,\n as_admin=True)\n response = port_req.get_response(self.api)\n\n self.assertEqual(webob.exc.HTTPOk.code, response.status_int)\n res = self.deserialize(self.fmt, response)\n self.assertEqual(0, len(res['port']['fixed_ips']))", "def test_get_unavailable_ports(self):\n self.assertEqual(set(self.cisco_sp_kadiweu.get_unavailable_ports()),\n set([self.port_sp_kadiweu_2, self.port_sp_kadiweu_1,\n self.port_sp_kadiweu_3, self.port_sp_kadiweu_4]))", "def test_02_verify_ipv6_network_redundant(self):\n\n self.createIpv6NetworkOffering(True)\n self.createIpv6NetworkOfferingForUpdate(True)\n self.createTinyServiceOffering()\n self.deployNetwork()\n self.deployNetworkVm()\n self.checkIpv6Network()\n self.checkIpv6NetworkVm()\n self.prepareRoutingTestResourcesInBackground()\n self.restartNetworkWithCleanup()\n self.checkIpv6Network()\n self.updateNetworkWithOffering()\n self.checkIpv6Network()\n self.checkIpv6NetworkRouting()\n self.checkIpv6FirewallRule()\n self.checkNetworkVRRedundancy()", "def check(self):\n badCachePath = list()\n badCacheNode = list()\n cacheIn = getCacheInfoFromMaya()\n cacheInScene = cacheIn.getCacheFromScene()\n # get the templates\n\n if not TYPE == 'MULTI':\n cachePublishTemplate = self.parent.app.get_template_by_name(\n 'fx_cacheseq_shot_publish')\n mayaCachePublishTemplate = self.parent.app.get_template_by_name(\n 'maya_fx_cacheseq_shot_publish')\n else:\n cachePublishTemplate = self.parent.app.get_template_by_name(\n 'fx_cacheseq_shot_publish')\n mayaCachePublishTemplate = self.parent.app.get_template_by_name(\n 'maya_fx_cacheseq_shot_publish')\n\n for cacheFrom, cacheVal in cacheInScene.iteritems():\n fileNode = cacheVal\n for nodes, nodeVal in cacheVal.iteritems():\n for cacheNumber, cacheVal in nodeVal.iteritems():\n filePath = cacheVal['path']\n\n if cachePublishTemplate.validate(filePath, skip_keys=[\"SEQ\"]):\n continue\n\n elif mayaCachePublishTemplate.validate(filePath, skip_keys=[\"SEQ\"]):\n continue\n\n else:\n badCachePath.append(pm.Path(filePath))\n badCacheNode.append(nodes)\n continue\n\n if not badCachePath:\n self.status = \"OK\"\n else:\n self.status = self.errorMode\n self.errorNodes = badCacheNode\n for node in badCachePath:\n self.addError(\"%s is not in the library\" % node)\n\n self.errorMessage = \"%s Cache not in library\" % (len(badCachePath))", "def _check_ip_port_split(self):\n if self._type == \"A\":\n formatted_value = self._value.split(':')\n self._ip = formatted_value[0]\n self._port = int(formatted_value[1])", "def _check_whole_network(self):\n if not self.network.check_network():\n # check_network has failed, issue error\n self._display_semantic_error(\"network\")", "def test_port_create_with_binding_and_no_subnets(self):\n with self.network() as network:\n segment = self._test_create_segment(\n network_id=network['network']['id'],\n physical_network='physnet',\n network_type=constants.TYPE_VLAN)\n\n # Map the host to the segment\n self._setup_host_mappings([(segment['segment']['id'], 'fakehost')])\n\n response = self._create_port(self.fmt,\n net_id=network['network']['id'],\n tenant_id=network['network']['tenant_id'],\n is_admin=True,\n arg_list=(portbindings.HOST_ID,),\n **{portbindings.HOST_ID: 'fakehost'})\n res = self.deserialize(self.fmt, response)\n\n # No subnets, so no allocation. But, it shouldn't be an error.\n self.assertEqual(0, len(res['port']['fixed_ips']))", "def test_get_network(self):\n pass", "def servicenow_sspm_performance_monitoring_ip_restriction_check(cache: dict, awsAccountId: str, awsRegion: str, awsPartition: str):\n iso8601Time = datetime.datetime.now(datetime.timezone.utc).isoformat()\n\n # Name of the property to evaluate against\n evalTarget = \"glide.custom.ip.authenticate.allow\"\n # Get cached props\n sysPropCache = get_servicenow_sys_properties(cache)\n\n # There should not ever be a duplicate system property, use next() and a list comprehension to check if the\n # property we're evaluating is in the list of properties we get from the cache. If it is NOT then set the\n # value as `False` and we can fill in fake values. Not having a property for security hardening is the same\n # as a failed finding with a lot less fan fair\n propFinder = next((sysprop for sysprop in sysPropCache if sysprop[\"name\"] == evalTarget), False)\n # If we cannot find the property set \"NOT_CONFIGURED\" which will fail whatever the value should be\n if propFinder == False:\n propertyValue = \"NOT_CONFIGURED\"\n propDescription = \"\"\n propId = \"\"\n propCreatedOn = \"\"\n propCreatedBy = \"\"\n propUpdatedOn = \"\"\n propUpdatedBy = \"\"\n propScope = \"\"\n assetB64 = None\n else:\n propertyValue = str(propFinder[\"value\"])\n propDescription = str(propFinder[\"description\"]).replace(\"\\n \", \"\")\n propId = str(propFinder[\"sys_id\"])\n propCreatedOn = str(propFinder[\"sys_created_on\"])\n propCreatedBy = str(propFinder[\"sys_created_by\"])\n propUpdatedOn = str(propFinder[\"sys_updated_on\"])\n propUpdatedBy = str(propFinder[\"sys_updated_by\"])\n propScope = str(propFinder[\"sys_scope\"][\"value\"])\n # B64 encode all of the details for the Asset\n assetJson = json.dumps(propFinder,default=str).encode(\"utf-8\")\n assetB64 = base64.b64encode(assetJson) \n # NOTE: This is where the check evaluation happens - in SNOW these may be Bools or Numbers but will come back as Strings\n # always evaluate a failing condition first which should be the OPPOSITE of the SNOW reccomendation as sometimes the values\n # are not a simple Boolean expression\n if propertyValue == (\"\" or \"NOT_CONFIGURED\"):\n finding = {\n \"SchemaVersion\": \"2018-10-08\",\n \"Id\": f\"servicenow/{SNOW_INSTANCE_NAME}/sys_properties/{evalTarget}/check\",\n \"ProductArn\": f\"arn:{awsPartition}:securityhub:{awsRegion}:{awsAccountId}:product/{awsAccountId}/default\",\n \"GeneratorId\": f\"servicenow/{SNOW_INSTANCE_NAME}/sys_properties/{evalTarget}/check\",\n \"AwsAccountId\": awsAccountId,\n \"Types\": [\"Software and Configuration Checks\"],\n \"FirstObservedAt\": iso8601Time,\n \"CreatedAt\": iso8601Time,\n \"UpdatedAt\": iso8601Time,\n \"Severity\": {\"Label\": \"LOW\"},\n \"Confidence\": 99,\n \"Title\": \"[SSPM.Servicenow.AccessControl.15] Instance should configure an IP restriction list to protect performance monitoring from unauthorized access\",\n \"Description\": f\"Servicenow instance {SNOW_INSTANCE_NAME} does not configure an IP restriction list to protect performance monitoring from unauthorized access. Use the 'glide.custom.ip.authenticate.allow' property to enable only a specified comma-separated list or a range of IP addresses access to stats.do, threads.do, and replication.do pages. If this property is not enabled, it is possible to access those types of pages from any IP address. Unnecessary exposure to the target instance on the internet should be restricted with the help of IP access controls functionality. Refer to the remediation instructions if this configuration is not intended.\",\n \"Remediation\": {\n \"Recommendation\": {\n \"Text\": \"For more information refer to the Performance monitoring IP restriction (instance security hardening) section of the Servicenow Product Documentation.\",\n \"Url\": \"https://docs.servicenow.com/bundle/utah-platform-security/page/administer/security/reference/performance-monitoring-ip-restriction.html\",\n }\n },\n \"ProductFields\": {\n \"ProductName\": \"ElectricEye\",\n \"Provider\": \"ServiceNow\",\n \"ProviderType\": \"SaaS\",\n \"ProviderAccountId\": SNOW_INSTANCE_NAME,\n \"AssetRegion\": SNOW_INSTANCE_REGION,\n \"AssetDetails\": assetB64,\n \"AssetClass\": \"Management & Governance\",\n \"AssetService\": \"System Properties\",\n \"AssetComponent\": \"System Property\"\n },\n \"Resources\": [\n {\n \"Type\": \"ServicenowInstance\",\n \"Id\": f\"{SNOW_INSTANCE_NAME}/sys_properties/{evalTarget}\",\n \"Partition\": awsPartition,\n \"Region\": awsRegion,\n \"Details\": {\n \"Other\": {\n \"ServicenowInstance\": SNOW_INSTANCE_NAME,\n \"SysId\": propId,\n \"PropertyName\": evalTarget,\n \"PropertyValue\": propertyValue,\n \"Description\": propDescription,\n \"CreatedBy\": propCreatedBy,\n \"CreatedOn\": propCreatedOn,\n \"UpdatedBy\": propUpdatedBy,\n \"UpdatedOn\": propUpdatedOn,\n \"Scope\": propScope\n }\n }\n }\n ],\n \"Compliance\": {\n \"Status\": \"FAILED\",\n \"RelatedRequirements\": [\n \"NIST CSF V1.1 PR.PT-3\",\n \"NIST SP 800-53 Rev. 4 AC-3\",\n \"NIST SP 800-53 Rev. 4 CM-7\",\n \"AICPA TSC CC6.1\",\n \"ISO 27001:2013 A.6.2.2\", \n \"ISO 27001:2013 A.9.1.2\",\n \"ISO 27001:2013 A.9.4.1\",\n \"ISO 27001:2013 A.9.4.4\",\n \"ISO 27001:2013 A.9.4.5\",\n \"ISO 27001:2013 A.13.1.1\",\n \"ISO 27001:2013 A.14.1.2\",\n \"ISO 27001:2013 A.14.1.3\",\n \"ISO 27001:2013 A.18.1.3\"\n ]\n },\n \"Workflow\": {\"Status\": \"NEW\"},\n \"RecordState\": \"ACTIVE\"\n }\n yield finding\n else:\n finding = {\n \"SchemaVersion\": \"2018-10-08\",\n \"Id\": f\"servicenow/{SNOW_INSTANCE_NAME}/sys_properties/{evalTarget}/check\",\n \"ProductArn\": f\"arn:{awsPartition}:securityhub:{awsRegion}:{awsAccountId}:product/{awsAccountId}/default\",\n \"GeneratorId\": f\"servicenow/{SNOW_INSTANCE_NAME}/sys_properties/{evalTarget}/check\",\n \"AwsAccountId\": awsAccountId,\n \"Types\": [\"Software and Configuration Checks\"],\n \"FirstObservedAt\": iso8601Time,\n \"CreatedAt\": iso8601Time,\n \"UpdatedAt\": iso8601Time,\n \"Severity\": {\"Label\": \"INFORMATIONAL\"},\n \"Confidence\": 99,\n \"Title\": \"[SSPM.Servicenow.AccessControl.15] Instance should configure an IP restriction list to protect performance monitoring from unauthorized access\",\n \"Description\": f\"Servicenow instance {SNOW_INSTANCE_NAME} configures an IP restriction list to protect performance monitoring from unauthorized access.\",\n \"Remediation\": {\n \"Recommendation\": {\n \"Text\": \"For more information refer to the Performance monitoring IP restriction (instance security hardening) section of the Servicenow Product Documentation.\",\n \"Url\": \"https://docs.servicenow.com/bundle/utah-platform-security/page/administer/security/reference/performance-monitoring-ip-restriction.html\",\n }\n },\n \"ProductFields\": {\n \"ProductName\": \"ElectricEye\",\n \"Provider\": \"ServiceNow\",\n \"ProviderType\": \"SaaS\",\n \"ProviderAccountId\": SNOW_INSTANCE_NAME,\n \"AssetRegion\": SNOW_INSTANCE_REGION,\n \"AssetDetails\": assetB64,\n \"AssetClass\": \"Management & Governance\",\n \"AssetService\": \"System Properties\",\n \"AssetComponent\": \"System Property\"\n },\n \"Resources\": [\n {\n \"Type\": \"ServicenowInstance\",\n \"Id\": f\"{SNOW_INSTANCE_NAME}/sys_properties/{evalTarget}\",\n \"Partition\": awsPartition,\n \"Region\": awsRegion,\n \"Details\": {\n \"Other\": {\n \"ServicenowInstance\": SNOW_INSTANCE_NAME,\n \"SysId\": propId,\n \"PropertyName\": evalTarget,\n \"PropertyValue\": propertyValue,\n \"Description\": propDescription,\n \"CreatedBy\": propCreatedBy,\n \"CreatedOn\": propCreatedOn,\n \"UpdatedBy\": propUpdatedBy,\n \"UpdatedOn\": propUpdatedOn,\n \"Scope\": propScope\n }\n }\n }\n ],\n \"Compliance\": {\n \"Status\": \"PASSED\",\n \"RelatedRequirements\": [\n \"NIST CSF V1.1 PR.PT-3\",\n \"NIST SP 800-53 Rev. 4 AC-3\",\n \"NIST SP 800-53 Rev. 4 CM-7\",\n \"AICPA TSC CC6.1\",\n \"ISO 27001:2013 A.6.2.2\", \n \"ISO 27001:2013 A.9.1.2\",\n \"ISO 27001:2013 A.9.4.1\",\n \"ISO 27001:2013 A.9.4.4\",\n \"ISO 27001:2013 A.9.4.5\",\n \"ISO 27001:2013 A.13.1.1\",\n \"ISO 27001:2013 A.14.1.2\",\n \"ISO 27001:2013 A.14.1.3\",\n \"ISO 27001:2013 A.18.1.3\"\n ]\n },\n \"Workflow\": {\"Status\": \"RESOLVED\"},\n \"RecordState\": \"ARCHIVED\"\n }\n yield finding", "def check(self):\n badCachePath = list()\n badCacheNode = list()\n cacheIn = getCacheInfoFromMaya()\n cacheInScene = cacheIn.getCacheFromScene()\n # get the templates\n\n if not TYPE == 'MULTI':\n cacheWorkTemplate = self.parent.app.get_template_by_name(\n 'fx_cacheseq_shot_work')\n cachePublishTemplate = self.parent.app.get_template_by_name(\n 'fx_cacheseq_shot_publish')\n mayaCachePublishTemplate = self.parent.app.get_template_by_name(\n 'maya_asset_publish_cache_multi')\n mayaCacheWorkTemplate = self.parent.app.get_template_by_name(\n 'maya_asset_work_cache_multi')\n else:\n cacheWorkTemplate = self.parent.app.get_template_by_name(\n 'fx_cacheseq_shot_work')\n cachePublishTemplate = self.parent.app.get_template_by_name(\n 'fx_cacheseq_shot_publish')\n mayaCachePublishTemplate = self.parent.app.get_template_by_name(\n 'maya_asset_publish_cache')\n mayaCacheWorkTemplate = self.parent.app.get_template_by_name(\n 'maya_asset_work_cache')\n\n for cacheFrom, cacheVal in cacheInScene.iteritems():\n\n fileNode = cacheVal\n for nodes, nodeVal in cacheVal.iteritems():\n for cacheNumber, cacheVal in nodeVal.iteritems():\n filePath = cacheVal['path']\n\n if cacheWorkTemplate.validate(filePath, skip_keys=[\"SEQ\"]):\n continue\n\n elif mayaCacheWorkTemplate.validate(filePath, skip_keys=[\"SEQ\"]):\n continue\n\n elif cachePublishTemplate.validate(filePath, skip_keys=[\"SEQ\"]):\n continue\n\n elif mayaCachePublishTemplate.validate(filePath, skip_keys=[\"SEQ\"]):\n continue\n\n else:\n badCachePath.append(pm.Path(filePath))\n badCacheNode.append(nodes)\n continue\n\n if not badCachePath:\n self.status = \"OK\"\n else:\n self.status = self.errorMode\n self.errorNodes = badCacheNode\n for node in badCachePath:\n self.addError(\"%s is not in the library\" % node)\n\n self.errorMessage = \"%s Cache not in library\" % (len(badCachePath))", "def check_init(self):\n if self.Nlayer > 1:\n raise Exception(\"Nlayer == 1 currently\")", "def setup_net(self):\n pass", "def test_nets(self):\n\n good_nets = self.good.nets[:]\n\n self.assertEqual(len(good_nets), 5)\n\n for net in self.actual.nets:\n for goodnet in good_nets:\n if set(net.points) == set(goodnet.points):\n good_nets.remove(goodnet)\n break\n else:\n raise Exception('bad net', net)\n\n self.assertEqual(good_nets, [])", "def is_sim_layer_cached(self):\n layers = [self._wrap_ns(self.setup_config[\"sim_layer\"])]\n input_nodes = self.get_layer_nodes_info(layers)\n return self.is_node_cached(input_nodes.values()[0])", "def test_port_update_deferred_allocation_no_ips(self):\n network, segments, subnets = self._create_test_segments_with_subnets(2)\n\n self._setup_host_mappings([(segments[0]['segment']['id'], 'fakehost2'),\n (segments[1]['segment']['id'], 'fakehost')])\n\n port = self._create_deferred_ip_port(network)\n\n # Update the subnet on the second segment to be out of IPs\n subnet_data = {'subnet': {'allocation_pools': []}}\n subnet_req = self.new_update_request('subnets',\n subnet_data,\n subnets[1]['subnet']['id'])\n subnet_response = subnet_req.get_response(self.api)\n res = self.deserialize(self.fmt, subnet_response)\n\n # Try requesting an IP (but the subnet ran out of ips)\n data = {'port': {portbindings.HOST_ID: 'fakehost'}}\n port_id = port['port']['id']\n port_req = self.new_update_request('ports', data, port_id,\n as_admin=True)\n response = port_req.get_response(self.api)\n res = self.deserialize(self.fmt, response)\n\n # Since port is bound and there is a mapping to segment, it succeeds.\n self.assertEqual(webob.exc.HTTPConflict.code, response.status_int)\n self.assertEqual(n_exc.IpAddressGenerationFailure.__name__,\n res['NeutronError']['type'])", "def test_update_network_no_policy_change(self):\n for qos_policy_id in (self.qos_policies[0].id, None):\n self.networks[0].qos_policy_id = qos_policy_id\n self.networks[0].update()\n original_network = {'qos_policy_id': qos_policy_id}\n port_ids, fip_ids, router_ids = self.qos_driver.update_network(\n mock.ANY, self.networks[0], original_network)\n self.assertEqual(set([]), port_ids)\n self.assertEqual(set([]), fip_ids)\n self.assertEqual(set([]), router_ids)\n self.mock_rules.assert_not_called()", "def test_port_update_is_host_aware(self):\n with self.network() as network:\n segment = self._test_create_segment(\n network_id=network['network']['id'],\n physical_network='physnet',\n network_type=constants.TYPE_VLAN)\n\n # Map the host to the segment\n self._setup_host_mappings([(segment['segment']['id'], 'fakehost')])\n\n # Create a bound port with no IP address (since there is no subnet)\n response = self._create_port(self.fmt,\n net_id=network['network']['id'],\n tenant_id=network['network']['tenant_id'],\n is_admin=True,\n arg_list=(portbindings.HOST_ID,),\n **{portbindings.HOST_ID: 'fakehost'})\n port = self.deserialize(self.fmt, response)\n\n # Create the subnet and try to update the port to get an IP\n with self.subnet(network=network,\n segment_id=segment['segment']['id']) as subnet:\n self._validate_l2_adjacency(network['network']['id'],\n is_adjacent=False)\n # Try requesting an IP (but the only subnet is on a segment)\n data = {'port': {\n 'fixed_ips': [{'subnet_id': subnet['subnet']['id']}]}}\n port_id = port['port']['id']\n port_req = self.new_update_request('ports', data, port_id)\n response = port_req.get_response(self.api)\n\n # Since port is bound and there is a mapping to segment, it succeeds.\n self.assertEqual(webob.exc.HTTPOk.code, response.status_int)\n self._assert_one_ip_in_subnet(response, subnet['subnet']['cidr'])", "def test_port_create_with_no_fixed_ips_no_ipam_on_routed_network(self):\n with self.network() as network:\n segment = self._test_create_segment(\n network_id=network['network']['id'],\n physical_network='physnet',\n network_type=constants.TYPE_VLAN)\n with self.subnet(network=network,\n segment_id=segment['segment']['id']):\n pass\n\n # Create an unbound port requesting no IP addresses\n response = self._create_port_and_show(network, fixed_ips=[])\n self.assertEqual([], response['port']['fixed_ips'])\n self.assertEqual(ipalloc_apidef.IP_ALLOCATION_NONE,\n response['port'][ipalloc_apidef.IP_ALLOCATION])", "def test_replace_host_subnet(self):\n pass", "def test_list_host_subnet(self):\n pass", "def validate_nic_down(self): \n\n pool = WorkerPool()\n\n try: \n for nic, hostname in self.nic_to_address_map:\n address = self.nic_to_address_map[(nic, hostname)]\n cmd = Ping('ping validation', address, ctxt=REMOTE, remoteHost='localhost')\n pool.addCommand(cmd)\n pool.join()\n\n for cmd in pool.getCompletedItems():\n results = cmd.get_results()\n if results.rc == 0:\n return False\n finally:\n pool.haltWork()\n pool.joinWorkers()\n pool.join()\n\n tinctest.logger.info(\"Successfully brought down nics ...\") \n return True", "def internet_facing_clb_https_listener_check(cache: dict, session, awsAccountId: str, awsRegion: str, awsPartition: str) -> dict:\n # ISO Time\n iso8601Time = (datetime.datetime.utcnow().replace(tzinfo=datetime.timezone.utc).isoformat())\n for lb in describe_clbs(cache, session):\n # B64 encode all of the details for the Asset\n assetJson = json.dumps(lb,default=str).encode(\"utf-8\")\n assetB64 = base64.b64encode(assetJson)\n clbName = lb[\"LoadBalancerName\"]\n clbArn = f\"arn:{awsPartition}:elasticloadbalancing:{awsRegion}:{awsAccountId}:loadbalancer/{clbName}\"\n dnsName = lb[\"DNSName\"]\n lbSgs = lb[\"SecurityGroups\"]\n lbSubnets = lb[\"Subnets\"]\n lbAzs = lb[\"AvailabilityZones\"]\n lbVpc = lb[\"VPCId\"]\n clbScheme = lb[\"Scheme\"]\n if clbScheme == \"internet-facing\":\n for listeners in lb[\"ListenerDescriptions\"]:\n if listeners[\"Listener\"][\"Protocol\"] != \"HTTPS\" or \"SSL\":\n finding = {\n \"SchemaVersion\": \"2018-10-08\",\n \"Id\": clbArn + \"/classic-loadbalancer-secure-listener-check\",\n \"ProductArn\": f\"arn:{awsPartition}:securityhub:{awsRegion}:{awsAccountId}:product/{awsAccountId}/default\",\n \"GeneratorId\": clbArn,\n \"AwsAccountId\": awsAccountId,\n \"Types\": [\"Software and Configuration Checks/AWS Security Best Practices\"],\n \"FirstObservedAt\": iso8601Time,\n \"CreatedAt\": iso8601Time,\n \"UpdatedAt\": iso8601Time,\n \"Severity\": {\"Label\": \"MEDIUM\"},\n \"Confidence\": 99,\n \"Title\": \"[ELB.1] Classic load balancers that are internet-facing should use secure listeners\",\n \"Description\": \"Classic load balancer \"\n + clbName\n + \" does not use a secure listener (HTTPS or SSL). Refer to the remediation instructions to remediate this behavior\",\n \"Remediation\": {\n \"Recommendation\": {\n \"Text\": \"For more information on classic load balancer HTTPS listeners refer to the Create a Classic Load Balancer with an HTTPS Listener section of the Classic Load Balancers User Guide.\",\n \"Url\": \"https://docs.aws.amazon.com/elasticloadbalancing/latest/classic/elb-create-https-ssl-load-balancer.html\",\n }\n },\n \"ProductFields\": {\n \"ProductName\": \"ElectricEye\",\n \"Provider\": \"AWS\",\n \"ProviderType\": \"CSP\",\n \"ProviderAccountId\": awsAccountId,\n \"AssetRegion\": awsRegion,\n \"AssetDetails\": assetB64,\n \"AssetClass\": \"Networking\",\n \"AssetService\": \"AWS Elastic Load Balancer\",\n \"AssetComponent\": \"Classic Load Balancer\"\n },\n \"Resources\": [\n {\n \"Type\": \"AwsElbLoadBalancer\",\n \"Id\": clbArn,\n \"Partition\": awsPartition,\n \"Region\": awsRegion,\n \"Details\": {\n \"AwsElbLoadBalancer\": {\n \"DnsName\": dnsName,\n \"Scheme\": clbScheme,\n \"SecurityGroups\": lbSgs,\n \"Subnets\": lbSubnets,\n \"VpcId\": lbVpc,\n \"AvailabilityZones\": lbAzs,\n \"LoadBalancerName\": clbName\n }\n }\n }\n ],\n \"Compliance\": {\n \"Status\": \"FAILED\",\n \"RelatedRequirements\": [\n \"NIST CSF V1.1 PR.DS-2\",\n \"NIST SP 800-53 Rev. 4 SC-8\",\n \"NIST SP 800-53 Rev. 4 SC-11\",\n \"NIST SP 800-53 Rev. 4 SC-12\",\n \"AICPA TSC CC6.1\",\n \"ISO 27001:2013 A.8.2.3\",\n \"ISO 27001:2013 A.13.1.1\",\n \"ISO 27001:2013 A.13.2.1\",\n \"ISO 27001:2013 A.13.2.3\",\n \"ISO 27001:2013 A.14.1.2\",\n \"ISO 27001:2013 A.14.1.3\",\n ],\n },\n \"Workflow\": {\"Status\": \"NEW\"},\n \"RecordState\": \"ACTIVE\",\n }\n yield finding\n else:\n finding = {\n \"SchemaVersion\": \"2018-10-08\",\n \"Id\": clbArn + \"/classic-loadbalancer-secure-listener-check\",\n \"ProductArn\": f\"arn:{awsPartition}:securityhub:{awsRegion}:{awsAccountId}:product/{awsAccountId}/default\",\n \"GeneratorId\": clbArn,\n \"AwsAccountId\": awsAccountId,\n \"Types\": [\"Software and Configuration Checks/AWS Security Best Practices\"],\n \"FirstObservedAt\": iso8601Time,\n \"CreatedAt\": iso8601Time,\n \"UpdatedAt\": iso8601Time,\n \"Severity\": {\"Label\": \"INFORMATIONAL\"},\n \"Confidence\": 99,\n \"Title\": \"[ELB.1] Classic load balancers that are internet-facing should use secure listeners\",\n \"Description\": \"Classic load balancer \"\n + clbName\n + \" uses a secure listener (HTTPS or SSL).\",\n \"Remediation\": {\n \"Recommendation\": {\n \"Text\": \"For more information on classic load balancer HTTPS listeners refer to the Create a Classic Load Balancer with an HTTPS Listener section of the Classic Load Balancers User Guide.\",\n \"Url\": \"https://docs.aws.amazon.com/elasticloadbalancing/latest/classic/elb-create-https-ssl-load-balancer.html\",\n }\n },\n \"ProductFields\": {\n \"ProductName\": \"ElectricEye\",\n \"Provider\": \"AWS\",\n \"ProviderType\": \"CSP\",\n \"ProviderAccountId\": awsAccountId,\n \"AssetRegion\": awsRegion,\n \"AssetDetails\": assetB64,\n \"AssetClass\": \"Networking\",\n \"AssetService\": \"AWS Elastic Load Balancer\",\n \"AssetComponent\": \"Classic Load Balancer\"\n },\n \"Resources\": [\n {\n \"Type\": \"AwsElbLoadBalancer\",\n \"Id\": clbArn,\n \"Partition\": awsPartition,\n \"Region\": awsRegion,\n \"Details\": {\n \"AwsElbLoadBalancer\": {\n \"DnsName\": dnsName,\n \"Scheme\": clbScheme,\n \"SecurityGroups\": lbSgs,\n \"Subnets\": lbSubnets,\n \"VpcId\": lbVpc,\n \"AvailabilityZones\": lbAzs,\n \"LoadBalancerName\": clbName\n }\n }\n }\n ],\n \"Compliance\": {\n \"Status\": \"PASSED\",\n \"RelatedRequirements\": [\n \"NIST CSF V1.1 PR.DS-2\",\n \"NIST SP 800-53 Rev. 4 SC-8\",\n \"NIST SP 800-53 Rev. 4 SC-11\",\n \"NIST SP 800-53 Rev. 4 SC-12\",\n \"AICPA TSC CC6.1\",\n \"ISO 27001:2013 A.8.2.3\",\n \"ISO 27001:2013 A.13.1.1\",\n \"ISO 27001:2013 A.13.2.1\",\n \"ISO 27001:2013 A.13.2.3\",\n \"ISO 27001:2013 A.14.1.2\",\n \"ISO 27001:2013 A.14.1.3\",\n ],\n },\n \"Workflow\": {\"Status\": \"RESOLVED\"},\n \"RecordState\": \"ARCHIVED\",\n }\n yield finding\n else:\n continue", "def before_update(self, introspection_data, node_info, **kwargs):\n inventory = utils.get_inventory(introspection_data)\n\n ironic_ports = node_info.ports()\n\n for iface in inventory['interfaces']:\n if iface['name'] not in introspection_data['all_interfaces']:\n continue\n\n mac_address = iface['mac_address']\n port = ironic_ports.get(mac_address)\n if not port:\n LOG.debug(\"Skipping physical network processing for interface \"\n \"%s, matching port not found in Ironic.\",\n mac_address,\n node_info=node_info, data=introspection_data)\n continue\n\n # Determine the physical network for this port.\n # Port not touched in here.\n physnet = self.get_physnet(port, iface['name'], introspection_data)\n if physnet is None:\n LOG.debug(\"Skipping physical network processing for interface \"\n \"%s, no physical network mapping\",\n mac_address,\n node_info=node_info, data=introspection_data)\n continue\n\n patch = self._get_physnet_patch(physnet, port)\n if patch is None:\n LOG.debug(\"Skipping physical network processing for interface \"\n \"%s, no update required\",\n mac_address,\n node_info=node_info, data=introspection_data)\n continue\n\n try:\n node_info.patch_port(port, [patch])\n except exceptions.BadRequestException as e:\n LOG.warning(\"Failed to update port %(uuid)s: %(error)s\",\n {'uuid': port.id, 'error': e},\n node_info=node_info)", "def test_port_update_deferred_allocation_no_segments_manual_alloc(self):\n with self.network() as network:\n pass\n\n port = self._create_deferred_ip_port(network)\n\n # Create the subnet and try to update the port to get an IP\n with self.subnet(network=network) as subnet:\n data = {'port': {\n portbindings.HOST_ID: 'fakehost',\n 'fixed_ips': [{'subnet_id': subnet['subnet']['id']}]}}\n port_id = port['port']['id']\n port_req = self.new_update_request('ports', data, port_id,\n as_admin=True)\n response = port_req.get_response(self.api)\n\n self.assertEqual(webob.exc.HTTPOk.code, response.status_int)\n self._assert_one_ip_in_subnet(response, subnet['subnet']['cidr'])\n\n # Do a show to be sure that only one IP is recorded\n port_req = self.new_show_request('ports', port_id)\n response = port_req.get_response(self.api)\n self.assertEqual(webob.exc.HTTPOk.code, response.status_int)\n self._assert_one_ip_in_subnet(response, subnet['subnet']['cidr'])", "def clb_connection_draining_check(cache: dict, session, awsAccountId: str, awsRegion: str, awsPartition: str) -> dict:\n elb = session.client(\"elb\")\n # ISO Time\n iso8601Time = (datetime.datetime.utcnow().replace(tzinfo=datetime.timezone.utc).isoformat())\n for lb in describe_clbs(cache, session):\n # B64 encode all of the details for the Asset\n assetJson = json.dumps(lb,default=str).encode(\"utf-8\")\n assetB64 = base64.b64encode(assetJson)\n clbName = lb[\"LoadBalancerName\"]\n clbArn = f\"arn:{awsPartition}:elasticloadbalancing:{awsRegion}:{awsAccountId}:loadbalancer/{clbName}\"\n dnsName = lb[\"DNSName\"]\n lbSgs = lb[\"SecurityGroups\"]\n lbSubnets = lb[\"Subnets\"]\n lbAzs = lb[\"AvailabilityZones\"]\n lbVpc = lb[\"VPCId\"]\n clbScheme = lb[\"Scheme\"]\n # Get Attrs\n response = elb.describe_load_balancer_attributes(LoadBalancerName=clbName)\n connectionDrainCheck = str(\n response[\"LoadBalancerAttributes\"][\"ConnectionDraining\"][\"Enabled\"]\n )\n if connectionDrainCheck == \"False\":\n finding = {\n \"SchemaVersion\": \"2018-10-08\",\n \"Id\": clbArn + \"/classic-loadbalancer-connection-draining-check\",\n \"ProductArn\": f\"arn:{awsPartition}:securityhub:{awsRegion}:{awsAccountId}:product/{awsAccountId}/default\",\n \"GeneratorId\": clbArn,\n \"AwsAccountId\": awsAccountId,\n \"Types\": [\"Software and Configuration Checks/AWS Security Best Practices\"],\n \"FirstObservedAt\": iso8601Time,\n \"CreatedAt\": iso8601Time,\n \"UpdatedAt\": iso8601Time,\n \"Severity\": {\"Label\": \"LOW\"},\n \"Confidence\": 99,\n \"Title\": \"[ELB.4] Classic load balancers should have connection draining configured\",\n \"Description\": \"Classic load balancer \"\n + clbName\n + \" does not have connection draining configured. Refer to the remediation instructions to remediate this behavior\",\n \"Remediation\": {\n \"Recommendation\": {\n \"Text\": \"For more information on connection draining refer to the Configure Connection Draining for Your Classic Load Balancer section of the Classic Load Balancers User Guide.\",\n \"Url\": \"https://docs.aws.amazon.com/elasticloadbalancing/latest/classic/config-conn-drain.html\",\n }\n },\n \"ProductFields\": {\n \"ProductName\": \"ElectricEye\",\n \"Provider\": \"AWS\",\n \"ProviderType\": \"CSP\",\n \"ProviderAccountId\": awsAccountId,\n \"AssetRegion\": awsRegion,\n \"AssetDetails\": assetB64,\n \"AssetClass\": \"Networking\",\n \"AssetService\": \"AWS Elastic Load Balancer\",\n \"AssetComponent\": \"Classic Load Balancer\"\n },\n \"Resources\": [\n {\n \"Type\": \"AwsElbLoadBalancer\",\n \"Id\": clbArn,\n \"Partition\": awsPartition,\n \"Region\": awsRegion,\n \"Details\": {\n \"AwsElbLoadBalancer\": {\n \"DnsName\": dnsName,\n \"Scheme\": clbScheme,\n \"SecurityGroups\": lbSgs,\n \"Subnets\": lbSubnets,\n \"VpcId\": lbVpc,\n \"AvailabilityZones\": lbAzs,\n \"LoadBalancerName\": clbName\n }\n }\n }\n ],\n \"Compliance\": {\n \"Status\": \"FAILED\",\n \"RelatedRequirements\": [\n \"NIST CSF V1.1 ID.BE-5\",\n \"NIST CSF V1.1 PR.DS-4\",\n \"NIST CSF V1.1 PR.PT-5\",\n \"NIST SP 800-53 Rev. 4 AU-4\",\n \"NIST SP 800-53 Rev. 4 CP-2\",\n \"NIST SP 800-53 Rev. 4 CP-7\",\n \"NIST SP 800-53 Rev. 4 CP-8\",\n \"NIST SP 800-53 Rev. 4 CP-11\",\n \"NIST SP 800-53 Rev. 4 CP-13\",\n \"NIST SP 800-53 Rev. 4 PL-8\",\n \"NIST SP 800-53 Rev. 4 SA-14\",\n \"NIST SP 800-53 Rev. 4 SC-5\",\n \"NIST SP 800-53 Rev. 4 SC-6\",\n \"AICPA TSC CC3.1\",\n \"AICPA TSC A1.1\",\n \"AICPA TSC A1.2\",\n \"ISO 27001:2013 A.11.1.4\",\n \"ISO 27001:2013 A.12.3.1\",\n \"ISO 27001:2013 A.17.1.1\",\n \"ISO 27001:2013 A.17.1.2\",\n \"ISO 27001:2013 A.17.2.1\"\n ]\n },\n \"Workflow\": {\"Status\": \"NEW\"},\n \"RecordState\": \"ACTIVE\",\n }\n yield finding\n else:\n finding = {\n \"SchemaVersion\": \"2018-10-08\",\n \"Id\": clbArn + \"/classic-loadbalancer-connection-draining-check\",\n \"ProductArn\": f\"arn:{awsPartition}:securityhub:{awsRegion}:{awsAccountId}:product/{awsAccountId}/default\",\n \"GeneratorId\": clbArn,\n \"AwsAccountId\": awsAccountId,\n \"Types\": [\"Software and Configuration Checks/AWS Security Best Practices\"],\n \"FirstObservedAt\": iso8601Time,\n \"CreatedAt\": iso8601Time,\n \"UpdatedAt\": iso8601Time,\n \"Severity\": {\"Label\": \"INFORMATIONAL\"},\n \"Confidence\": 99,\n \"Title\": \"[ELB.4] Classic load balancers should have connection draining configured\",\n \"Description\": \"Classic load balancer \"\n + clbName\n + \" does not have connection draining configured.\",\n \"Remediation\": {\n \"Recommendation\": {\n \"Text\": \"For more information on connection draining refer to the Configure Connection Draining for Your Classic Load Balancer section of the Classic Load Balancers User Guide.\",\n \"Url\": \"https://docs.aws.amazon.com/elasticloadbalancing/latest/classic/config-conn-drain.html\",\n }\n },\n \"ProductFields\": {\n \"ProductName\": \"ElectricEye\",\n \"Provider\": \"AWS\",\n \"ProviderType\": \"CSP\",\n \"ProviderAccountId\": awsAccountId,\n \"AssetRegion\": awsRegion,\n \"AssetDetails\": assetB64,\n \"AssetClass\": \"Networking\",\n \"AssetService\": \"AWS Elastic Load Balancer\",\n \"AssetComponent\": \"Classic Load Balancer\"\n },\n \"Resources\": [\n {\n \"Type\": \"AwsElbLoadBalancer\",\n \"Id\": clbArn,\n \"Partition\": awsPartition,\n \"Region\": awsRegion,\n \"Details\": {\n \"AwsElbLoadBalancer\": {\n \"DnsName\": dnsName,\n \"Scheme\": clbScheme,\n \"SecurityGroups\": lbSgs,\n \"Subnets\": lbSubnets,\n \"VpcId\": lbVpc,\n \"AvailabilityZones\": lbAzs,\n \"LoadBalancerName\": clbName\n }\n }\n }\n ],\n \"Compliance\": {\n \"Status\": \"PASSED\",\n \"RelatedRequirements\": [\n \"NIST CSF V1.1 ID.BE-5\",\n \"NIST CSF V1.1 PR.DS-4\",\n \"NIST CSF V1.1 PR.PT-5\",\n \"NIST SP 800-53 Rev. 4 AU-4\",\n \"NIST SP 800-53 Rev. 4 CP-2\",\n \"NIST SP 800-53 Rev. 4 CP-7\",\n \"NIST SP 800-53 Rev. 4 CP-8\",\n \"NIST SP 800-53 Rev. 4 CP-11\",\n \"NIST SP 800-53 Rev. 4 CP-13\",\n \"NIST SP 800-53 Rev. 4 PL-8\",\n \"NIST SP 800-53 Rev. 4 SA-14\",\n \"NIST SP 800-53 Rev. 4 SC-5\",\n \"NIST SP 800-53 Rev. 4 SC-6\",\n \"AICPA TSC CC3.1\",\n \"AICPA TSC A1.1\",\n \"AICPA TSC A1.2\",\n \"ISO 27001:2013 A.11.1.4\",\n \"ISO 27001:2013 A.12.3.1\",\n \"ISO 27001:2013 A.17.1.1\",\n \"ISO 27001:2013 A.17.1.2\",\n \"ISO 27001:2013 A.17.2.1\"\n ]\n },\n \"Workflow\": {\"Status\": \"RESOLVED\"},\n \"RecordState\": \"ARCHIVED\",\n }\n yield finding", "def _validate_update_network(self, context, net_id, original_net,\n net_data):\n extern_net = self._network_is_external(context, net_id)\n with_qos = validators.is_attr_set(\n net_data.get(qos_consts.QOS_POLICY_ID))\n\n # Do not allow QoS on external networks\n if with_qos:\n if extern_net:\n raise nsx_exc.QoSOnExternalNet()\n self._validate_qos_policy_id(\n context, net_data.get(qos_consts.QOS_POLICY_ID))\n\n # Do not support changing external/non-external networks\n if (extnet_apidef.EXTERNAL in net_data and\n net_data[extnet_apidef.EXTERNAL] != extern_net):\n err_msg = _(\"Cannot change the router:external flag of a network\")\n raise n_exc.InvalidInput(error_message=err_msg)\n\n is_ens_net = self._is_ens_tz_net(context, net_id)\n if is_ens_net:\n self._assert_on_ens_with_qos(net_data)", "def update_port_ip_address(self):\n leases = None\n req = dict(ip='0.0.0.0')\n instances = self.get_vms_for_this_req(**req)\n if instances is None:\n return\n\n for vm in instances:\n if not leases:\n # For the first time finding the leases file.\n leases = self._get_ip_leases()\n if not leases:\n # File does not exist.\n return\n\n for line in leases:\n if line.startswith('lease') and line.endswith('{\\n'):\n ip_addr = line.split()[1]\n if 'hardware ethernet' in line:\n if vm.mac == line.replace(';', '').split()[2]:\n LOG.info(_LI('Find IP address %(ip)s for %(mac)s'),\n {'ip': ip_addr, 'mac': vm.mac})\n try:\n rule_info = dict(ip=ip_addr, mac=vm.mac,\n port=vm.port_id,\n status='up')\n self.neutron_event.update_ip_rule(str(vm.host),\n str(rule_info))\n except (rpc.MessagingTimeout, rpc.RPCException,\n rpc.RemoteError):\n LOG.error(_LE(\"RPC error: Failed to update\"\n \"rules.\"))\n else:\n params = dict(columns=dict(ip=ip_addr))\n self.update_vm_db(vm.port_id, **params)\n\n # Send update to the agent.\n vm_info = dict(status=vm.status, vm_mac=vm.mac,\n segmentation_id=vm.segmentation_id,\n host=vm.host, port_uuid=vm.port_id,\n net_uuid=vm.network_id,\n oui=dict(ip_addr=ip_addr,\n vm_name=vm.name,\n vm_uuid=vm.instance_id,\n gw_mac=vm.gw_mac,\n fwd_mod=vm.fwd_mod,\n oui_id='cisco'))\n try:\n self.neutron_event.send_vm_info(vm.host,\n str(vm_info))\n except (rpc.MessagingTimeout, rpc.RPCException,\n rpc.RemoteError):\n LOG.error(_LE('Failed to send VM info to '\n 'agent.'))", "def test_01_verify_ipv6_network(self):\n\n self.createIpv6NetworkOffering()\n self.createIpv6NetworkOfferingForUpdate()\n self.createTinyServiceOffering()\n self.deployNetwork()\n self.deployNetworkVm()\n self.checkIpv6Network()\n self.checkIpv6NetworkVm()\n self.prepareRoutingTestResourcesInBackground()\n self.restartNetworkWithCleanup()\n self.checkIpv6Network()\n self.updateNetworkWithOffering()\n self.checkIpv6Network()\n self.checkIpv6NetworkRouting()\n self.checkIpv6FirewallRule()", "def _validate_port_can_commit(self, res_port, req_port,\n session=None):\n switchport_ids = [p[\"id\"] for p in res_port[\"switch:ports\"]]\n\n if not switchport_ids:\n msg = (\"Cannot attach, no switchports found\")\n raise exc.InvalidInput(error_message=msg)\n\n bound_port_ids = []\n if switchport_ids:\n # Fetch all existing networks we are attached to.\n portbindings = db.filter_switchport_bindings_by_switch_port_ids(\n switchport_ids, session=session)\n portbindings = list(portbindings)\n bound_port_ids = set([pb.port_id for pb in portbindings])\n\n # We can't attach to a non-trunked network if the port is already\n # attached to another network.\n if bound_port_ids and (res_port[\"trunked\"] is False):\n msg = (\"Cannot attach non-trunked network, port \"\n \"already bound to network(s) %s\" % (bound_port_ids))\n raise exc.InvalidInput(error_message=msg)\n\n for bound_port_id in bound_port_ids:\n # We can't attach a trunked network if we are already attached\n # to a non-trunked network.\n port_ext = db.get_port_ext(bound_port_id, session=session)\n if not port_ext.trunked:\n msg = (\"Already attached via non-trunked \"\n \"port %s\" % (bound_port_id))\n raise exc.InvalidInput(error_message=msg)", "def _validate_instance_internal_ips(internal_ips, settings):\n if not internal_ips:\n raise serializers.ValidationError(\n {'internal_ips_set': _('Instance should be connected to at least one network.')})\n subnets = [internal_ip.subnet for internal_ip in internal_ips]\n for subnet in subnets:\n if subnet.settings != settings:\n message = _('Subnet %s does not belong to the same service settings as service project link.') % subnet\n raise serializers.ValidationError({'internal_ips_set': message})\n duplicates = [subnet for subnet, count in collections.Counter(subnets).items() if count > 1]\n if duplicates:\n raise serializers.ValidationError(_('It is impossible to connect to subnet %s twice.') % duplicates[0])", "def test_port_create_fixed_ips_with_segment_subnets_no_binding_info(self):\n network, segment, subnet = self._create_test_segment_with_subnet()\n response = self._create_port(self.fmt,\n net_id=network['network']['id'],\n tenant_id=network['network']['tenant_id'],\n is_admin=True,\n fixed_ips=[\n {'subnet_id': subnet['subnet']['id']}\n ])\n res = self.deserialize(self.fmt, response)\n # We gave fixed_ips, allocate IPs in this case despite no binding info\n self._validate_immediate_ip_allocation(res['port']['id'])", "def check_ip_fwd(duthosts, all_cfg_facts, nbrhosts, tbinfo):\n for porttype in [\"ethernet\", \"portchannel\"]:\n for version in [4, 6]:\n\n ports = pick_ports(duthosts, all_cfg_facts, nbrhosts, tbinfo, port_type_a=porttype, version=version)\n\n for ttl, size in [(2, 64), (1, 1450)]:\n # local interfaces\n check_packet(sonic_ping, ports, 'portB', 'portA', size=size, ttl=ttl, ttl_change=0)\n\n # local neighbors\n check_packet(sonic_ping, ports, 'portA', 'portA',\n dst_ip_fld='nbr_ip', size=size, ttl=ttl, ttl_change=0)\n\n vm_host_to_A = nbrhosts[ports['portA']['nbr_vm']]['host']\n\n check_packet(eos_ping, ports, 'portD', 'portA', dst_ip_fld='my_lb4096_ip', src_ip_fld='nbr_lb',\n dev=vm_host_to_A, size=size, ttl=ttl)\n\n # loopbacks\n check_packet(sonic_ping, ports, 'portA', 'portA', dst_ip_fld='nbr_lb', size=size, ttl=ttl, ttl_change=0)\n\n # inband\n check_packet(sonic_ping, ports, 'portA', 'portA', src_ip_fld='inband', size=size, ttl=ttl, ttl_change=0)\n\n # DUT loopback\n # these don't decrement ttl\n check_packet(sonic_ping, ports, 'portA', 'portA', src_ip_fld='my_lb_ip', dst_ip_fld='my_ip', size=size,\n ttl=ttl, ttl_change=0)\n check_packet(sonic_ping, ports, 'portA', 'portA', src_ip_fld='my_lb_ip', dst_ip_fld='nbr_ip', size=size,\n ttl=ttl, ttl_change=0)\n check_packet(sonic_ping, ports, 'portA', 'portA', src_ip_fld='my_lb_ip', dst_ip_fld='nbr_lb', size=size,\n ttl=ttl, ttl_change=0)\n\n vm_host_to_A = nbrhosts[ports['portA']['nbr_vm']]['host']\n check_packet(eos_ping, ports, 'portA', 'portA', dst_ip_fld='my_lb4096_ip', src_ip_fld='nbr_lb',\n dev=vm_host_to_A, size=size, ttl=ttl, ttl_change=0)\n\n # end to end\n vm_host_to_A = nbrhosts[ports['portA']['nbr_vm']]['host']\n check_packet(eos_ping, ports, 'portB', 'portA', dst_ip_fld='nbr_lb', src_ip_fld='nbr_lb',\n dev=vm_host_to_A, size=size, ttl=ttl)\n check_packet(eos_ping, ports, 'portC', 'portA', dst_ip_fld='nbr_lb', src_ip_fld='nbr_lb',\n dev=vm_host_to_A, size=size, ttl=ttl)\n check_packet(eos_ping, ports, 'portD', 'portA', dst_ip_fld='nbr_lb', src_ip_fld='nbr_lb',\n dev=vm_host_to_A, size=size, ttl=ttl)", "def check_port_connections(self):\n all_ports = crest.get_all_ports(self.model)\n influences_to_target = {p: [] for p in all_ports}\n updates_to_target = {p: [] for p in all_ports}\n actions_to_target = {p: [] for p in all_ports}\n\n # fill data stores\n for inf in crest.get_all_influences(self.model):\n influences_to_target[inf.target].append(inf)\n\n for up in crest.get_all_updates(self.model):\n updates_to_target[up.target].append(up)\n\n for action in crest.get_all_actions(self.model):\n actions_to_target[action.target].append(action)\n\n for port in all_ports:\n assert not (len(influences_to_target[port]) > 0 and (\n len(updates_to_target[port]) > 0 or len(actions_to_target[port]) > 0)\n ), f\"There are [influences and (updates or actions)] writing to port {port._name} (entity: {port._parent._name})\"\n\n assert len(influences_to_target[port]) < 2, f\"There are two influences writing to {port._name}\"\n\n states = [update.state for update in updates_to_target[port]]\n assert len(states) == len(set(states)), f\"Port {port._name} (entity: {port._parent._name}) is written by multiple updates linked to the same state\"\n\n transitions = [action.transition for action in actions_to_target[port]]\n assert len(transitions) == len(set(transitions)), f\"Port {port._name} (entity: {port._parent._name}) is written by multiple actions linked to the same transition\"", "def _update_network_config(port_config, allow_multiple=False):\n # Get network id from port config\n network_id = port_config.get('network_id')\n\n # Get the network id from relationship if any\n rel_network_ids = find_openstack_ids_of_connected_nodes_by_openstack_type(\n ctx, NETWORK_OPENSTACK_TYPE)\n\n rel_network_id = rel_network_ids[0] if rel_network_ids else None\n # Check if network config comes from two sources or not\n if network_id and rel_network_id and not allow_multiple:\n raise NonRecoverableError('Port can\\'t both have the '\n '\"network_id\" property and be '\n 'connected to a network via a '\n 'relationship at the same time')\n\n port_config['network_id'] = network_id or rel_network_id", "def port_update_end(self, payload):\n port = DictModel(payload['port'])\n network = self.cache.get_network_by_id(port.network_id)\n if network:\n self.cache.put_port(port)\n self.call_driver('reload_allocations', network)", "def test_networking_project_network_service_get(self):\n pass", "def get_physnet(self, port, iface_name, introspection_data):", "def update_resources_for_this_host(cache, db):\n free_cpu, free_mem = get_resources()\n my_ip = cache[\"ip\"]\n\n logger.info(\"UPDATING\", extra = {\"cpu\": free_cpu, \"mem\": free_mem, \"ip\": my_ip})\n try:\n db.hset(my_ip, mapping={\"cpu\": free_cpu, \"mem\": free_mem})\n except Exception as e:\n logger.error(e)\n raise e", "def network_refresh(self, kwargs=None):\n scode, hosts = Rest.get('Host')\n filter = {}\n n = 1\n e = {}\n data = []\n for host in hosts:\n os.environ[\"DOCKER_HOST\"] = host['Ip'] + \":\" + str(host['Port'])\n filter['Ip'] = os.environ[\"DOCKER_HOST\"].split(':')[0]\n self.client = docker.from_env()\n try:\n networks = self.client.networks.list(**kwargs)\n except docker.errors.APIError as e:\n Console.error(e.explanation)\n continue\n\n if len(networks) == 0:\n Console.info(\"No network exist\" + host['Ip'])\n continue\n\n for networkm in networks:\n network = networkm.__dict__['attrs']\n network['Ip'] = os.environ[\"DOCKER_HOST\"].split(':')[0]\n data.append(network)\n d = {}\n d['Ip'] = os.environ[\"DOCKER_HOST\"].split(':')[0]\n d['Id'] = network['Id']\n d['Name'] = network['Name']\n d['Containers'] = network['Containers']\n e[n] = d\n n = n + 1\n r = Rest.delete('Network', filter)\n r = Rest.post('Network', data)\n Console.ok(str(Printer.dict_table(e, order=['Ip', 'Id', 'Name', 'Containers'])))", "def servicenow_sspm_ip_allowlist_check(cache: dict, awsAccountId: str, awsRegion: str, awsPartition: str):\n iso8601Time = datetime.datetime.now(datetime.timezone.utc).isoformat()\n\n # Name of the property to evaluate against\n evalTarget = \"com.snc.ipauthenticator\"\n # Get cached props\n sysPropCache = get_servicenow_sys_properties(cache)\n\n # There should not ever be a duplicate system property, use next() and a list comprehension to check if the\n # property we're evaluating is in the list of properties we get from the cache. If it is NOT then set the\n # value as `False` and we can fill in fake values. Not having a property for security hardening is the same\n # as a failed finding with a lot less fan fair\n propFinder = next((sysprop for sysprop in sysPropCache if sysprop[\"name\"] == evalTarget), False)\n # If we cannot find the property set \"NOT_CONFIGURED\" which will fail whatever the value should be\n if propFinder == False:\n propertyValue = \"NOT_CONFIGURED\"\n propDescription = \"\"\n propId = \"\"\n propCreatedOn = \"\"\n propCreatedBy = \"\"\n propUpdatedOn = \"\"\n propUpdatedBy = \"\"\n propScope = \"\"\n assetB64 = None\n else:\n propertyValue = str(propFinder[\"value\"])\n propDescription = str(propFinder[\"description\"]).replace(\"\\n \", \"\")\n propId = str(propFinder[\"sys_id\"])\n propCreatedOn = str(propFinder[\"sys_created_on\"])\n propCreatedBy = str(propFinder[\"sys_created_by\"])\n propUpdatedOn = str(propFinder[\"sys_updated_on\"])\n propUpdatedBy = str(propFinder[\"sys_updated_by\"])\n propScope = str(propFinder[\"sys_scope\"][\"value\"])\n # B64 encode all of the details for the Asset\n assetJson = json.dumps(propFinder,default=str).encode(\"utf-8\")\n assetB64 = base64.b64encode(assetJson) \n # NOTE: This is where the check evaluation happens - in SNOW these may be Bools or Numbers but will come back as Strings\n # always evaluate a failing condition first which should be the OPPOSITE of the SNOW reccomendation as sometimes the values\n # are not a simple Boolean expression\n if propertyValue != \"Active\":\n finding = {\n \"SchemaVersion\": \"2018-10-08\",\n \"Id\": f\"servicenow/{SNOW_INSTANCE_NAME}/sys_properties/{evalTarget}/check\",\n \"ProductArn\": f\"arn:{awsPartition}:securityhub:{awsRegion}:{awsAccountId}:product/{awsAccountId}/default\",\n \"GeneratorId\": f\"servicenow/{SNOW_INSTANCE_NAME}/sys_properties/{evalTarget}/check\",\n \"AwsAccountId\": awsAccountId,\n \"Types\": [\"Software and Configuration Checks\"],\n \"FirstObservedAt\": iso8601Time,\n \"CreatedAt\": iso8601Time,\n \"UpdatedAt\": iso8601Time,\n \"Severity\": {\"Label\": \"LOW\"},\n \"Confidence\": 99,\n \"Title\": \"[SSPM.Servicenow.AccessControl.18] Instance should restrict access to specific IP ranges\",\n \"Description\": f\"Servicenow instance {SNOW_INSTANCE_NAME} does not restrict public access to specific IP ranges. Use the com.snc.ipauthenticator property to restrict access to specific IP ranges. Unless public access is intended for the instance, administrators should limit access to their assigned IP net blocks. Before setting this property, you must activate the IP Range Based Authentication com.snc.ipauthenticator plugin. To learn more, see IP range based authentication and in the Steps to configure section (below). Unnecessary exposure to the target instance on the internet should be restricted with the help of IP access controls functionality. Refer to the remediation instructions if this configuration is not intended.\",\n \"Remediation\": {\n \"Recommendation\": {\n \"Text\": \"For more information refer to the Restrict access to specific IP ranges (instance security hardening) section of the Servicenow Product Documentation.\",\n \"Url\": \"https://docs.servicenow.com/bundle/utah-platform-security/page/administer/security/reference/restrict-access-to-specific-ip-ranges.html\",\n }\n },\n \"ProductFields\": {\n \"ProductName\": \"ElectricEye\",\n \"Provider\": \"ServiceNow\",\n \"ProviderType\": \"SaaS\",\n \"ProviderAccountId\": SNOW_INSTANCE_NAME,\n \"AssetRegion\": SNOW_INSTANCE_REGION,\n \"AssetDetails\": assetB64,\n \"AssetClass\": \"Management & Governance\",\n \"AssetService\": \"System Properties\",\n \"AssetComponent\": \"System Property\"\n },\n \"Resources\": [\n {\n \"Type\": \"ServicenowInstance\",\n \"Id\": f\"{SNOW_INSTANCE_NAME}/sys_properties/{evalTarget}\",\n \"Partition\": awsPartition,\n \"Region\": awsRegion,\n \"Details\": {\n \"Other\": {\n \"ServicenowInstance\": SNOW_INSTANCE_NAME,\n \"SysId\": propId,\n \"PropertyName\": evalTarget,\n \"PropertyValue\": propertyValue,\n \"Description\": propDescription,\n \"CreatedBy\": propCreatedBy,\n \"CreatedOn\": propCreatedOn,\n \"UpdatedBy\": propUpdatedBy,\n \"UpdatedOn\": propUpdatedOn,\n \"Scope\": propScope\n }\n }\n }\n ],\n \"Compliance\": {\n \"Status\": \"FAILED\",\n \"RelatedRequirements\": [\n \"NIST CSF V1.1 PR.PT-3\",\n \"NIST SP 800-53 Rev. 4 AC-3\",\n \"NIST SP 800-53 Rev. 4 CM-7\",\n \"AICPA TSC CC6.1\",\n \"ISO 27001:2013 A.6.2.2\", \n \"ISO 27001:2013 A.9.1.2\",\n \"ISO 27001:2013 A.9.4.1\",\n \"ISO 27001:2013 A.9.4.4\",\n \"ISO 27001:2013 A.9.4.5\",\n \"ISO 27001:2013 A.13.1.1\",\n \"ISO 27001:2013 A.14.1.2\",\n \"ISO 27001:2013 A.14.1.3\",\n \"ISO 27001:2013 A.18.1.3\"\n ]\n },\n \"Workflow\": {\"Status\": \"NEW\"},\n \"RecordState\": \"ACTIVE\"\n }\n yield finding\n else:\n finding = {\n \"SchemaVersion\": \"2018-10-08\",\n \"Id\": f\"servicenow/{SNOW_INSTANCE_NAME}/sys_properties/{evalTarget}/check\",\n \"ProductArn\": f\"arn:{awsPartition}:securityhub:{awsRegion}:{awsAccountId}:product/{awsAccountId}/default\",\n \"GeneratorId\": f\"servicenow/{SNOW_INSTANCE_NAME}/sys_properties/{evalTarget}/check\",\n \"AwsAccountId\": awsAccountId,\n \"Types\": [\"Software and Configuration Checks\"],\n \"FirstObservedAt\": iso8601Time,\n \"CreatedAt\": iso8601Time,\n \"UpdatedAt\": iso8601Time,\n \"Severity\": {\"Label\": \"INFORMATIONAL\"},\n \"Confidence\": 99,\n \"Title\": \"[SSPM.Servicenow.AccessControl.18] Instance should restrict access to specific IP ranges\",\n \"Description\": f\"Servicenow instance {SNOW_INSTANCE_NAME} does restrict public access to specific IP ranges.\",\n \"Remediation\": {\n \"Recommendation\": {\n \"Text\": \"For more information refer to the Restrict access to specific IP ranges (instance security hardening) section of the Servicenow Product Documentation.\",\n \"Url\": \"https://docs.servicenow.com/bundle/utah-platform-security/page/administer/security/reference/restrict-access-to-specific-ip-ranges.html\",\n }\n },\n \"ProductFields\": {\n \"ProductName\": \"ElectricEye\",\n \"Provider\": \"ServiceNow\",\n \"ProviderType\": \"SaaS\",\n \"ProviderAccountId\": SNOW_INSTANCE_NAME,\n \"AssetRegion\": SNOW_INSTANCE_REGION,\n \"AssetDetails\": assetB64,\n \"AssetClass\": \"Management & Governance\",\n \"AssetService\": \"System Properties\",\n \"AssetComponent\": \"System Property\"\n },\n \"Resources\": [\n {\n \"Type\": \"ServicenowInstance\",\n \"Id\": f\"{SNOW_INSTANCE_NAME}/sys_properties/{evalTarget}\",\n \"Partition\": awsPartition,\n \"Region\": awsRegion,\n \"Details\": {\n \"Other\": {\n \"ServicenowInstance\": SNOW_INSTANCE_NAME,\n \"SysId\": propId,\n \"PropertyName\": evalTarget,\n \"PropertyValue\": propertyValue,\n \"Description\": propDescription,\n \"CreatedBy\": propCreatedBy,\n \"CreatedOn\": propCreatedOn,\n \"UpdatedBy\": propUpdatedBy,\n \"UpdatedOn\": propUpdatedOn,\n \"Scope\": propScope\n }\n }\n }\n ],\n \"Compliance\": {\n \"Status\": \"PASSED\",\n \"RelatedRequirements\": [\n \"NIST CSF V1.1 PR.PT-3\",\n \"NIST SP 800-53 Rev. 4 AC-3\",\n \"NIST SP 800-53 Rev. 4 CM-7\",\n \"AICPA TSC CC6.1\",\n \"ISO 27001:2013 A.6.2.2\", \n \"ISO 27001:2013 A.9.1.2\",\n \"ISO 27001:2013 A.9.4.1\",\n \"ISO 27001:2013 A.9.4.4\",\n \"ISO 27001:2013 A.9.4.5\",\n \"ISO 27001:2013 A.13.1.1\",\n \"ISO 27001:2013 A.14.1.2\",\n \"ISO 27001:2013 A.14.1.3\",\n \"ISO 27001:2013 A.18.1.3\"\n ]\n },\n \"Workflow\": {\"Status\": \"RESOLVED\"},\n \"RecordState\": \"ARCHIVED\"\n }\n yield finding", "def oci_compute_attack_surface_open_tcp_port_check(cache, awsAccountId, awsRegion, awsPartition, ociTenancyId, ociUserId, ociRegionName, ociCompartments, ociUserApiKeyFingerprint):\n # ISO Time\n iso8601Time = datetime.datetime.now(datetime.timezone.utc).isoformat()\n for instance in get_oci_compute_instances(cache, ociTenancyId, ociUserId, ociRegionName, ociCompartments, ociUserApiKeyFingerprint):\n # B64 encode all of the details for the Asset\n assetJson = json.dumps(instance,default=str).encode(\"utf-8\")\n assetB64 = base64.b64encode(assetJson)\n instanceId = instance[\"id\"]\n instanceName = instance[\"display_name\"]\n compartmentId = instance[\"compartment_id\"]\n imageId = instance[\"image_id\"]\n shape = instance[\"shape\"]\n lifecycleState = instance[\"lifecycle_state\"]\n # Get the VNIC info\n instanceVnic = get_compute_instance_vnic(ociTenancyId, ociUserId, ociRegionName, ociUserApiKeyFingerprint, compartmentId, instanceId)\n # Skip over instances that are not public\n pubIp = instanceVnic[\"public_ip\"]\n if instanceVnic[\"public_ip\"] is None:\n continue\n # Submit details to the scanner function\n scanner = scan_host(pubIp, instanceName, \"OCI Cloud Compute instance\")\n # NoneType returned on KeyError due to Nmap errors\n if scanner == None:\n continue\n else:\n # Loop the results of the scan - starting with Open Ports which require a combination of\n # a Public Instance, an open SG rule, and a running service/server on the host itself\n # use enumerate and a fixed offset to product the Check Title ID number\n for index, p in enumerate(scanner[pubIp][\"ports\"]):\n # Parse out the Protocol, Port, Service, and State/State Reason from NMAP Results\n checkIdNumber = str(int(index + 1))\n portNumber = int(p[\"portid\"])\n if portNumber == 8089:\n serviceName = 'SPLUNKD'\n elif portNumber == 10250:\n serviceName = 'KUBERNETES-API'\n elif portNumber == 5672:\n serviceName = 'RABBITMQ'\n elif portNumber == 4040:\n serviceName = 'SPARK-WEBUI'\n else:\n try:\n serviceName = str(p[\"service\"][\"name\"]).upper()\n except KeyError:\n serviceName = \"Unknown\"\n serviceStateReason = str(p[\"reason\"])\n serviceState = str(p[\"state\"])\n # This is a failing check\n if serviceState == \"open\":\n finding = {\n \"SchemaVersion\": \"2018-10-08\",\n \"Id\": f\"{ociTenancyId}/{ociRegionName}/{compartmentId}/{instanceId}/oci-attack-surface-compute-instance-open-{serviceName}-check\",\n \"ProductArn\": f\"arn:{awsPartition}:securityhub:{awsRegion}:{awsAccountId}:product/{awsAccountId}/default\",\n \"GeneratorId\": f\"{ociTenancyId}/{ociRegionName}/{compartmentId}/{instanceId}/oci-attack-surface-compute-instance-open-{serviceName}-check\",\n \"AwsAccountId\": awsAccountId,\n \"Types\": [\n \"Software and Configuration Checks/AWS Security Best Practices/Network Reachability\",\n \"TTPs/Discovery\"\n ],\n \"FirstObservedAt\": iso8601Time,\n \"CreatedAt\": iso8601Time,\n \"UpdatedAt\": iso8601Time,\n \"Severity\": {\"Label\": \"HIGH\"},\n \"Confidence\": 99,\n \"Title\": f\"[AttackSurface.OCI.ComputeInstance.{checkIdNumber}] Cloud Compute instances should not be publicly reachable on {serviceName}\",\n \"Description\": f\"Oracle Cloud Compute instance {instanceName} in Compartment {compartmentId} in {ociRegionName} is publicly reachable on port {portNumber} which corresponds to the {serviceName} service. When Services are successfully fingerprinted by the ElectricEye Attack Surface Management Auditor it means the instance is public (mapped 'public_ip` in the associated vNIC), has an open Security List or Network Security Group, and a running service on the host which adversaries can also see. Refer to the remediation insturctions for an example of a way to secure OCI Cloud Compute instances.\",\n \"Remediation\": {\n \"Recommendation\": {\n \"Text\": \"OCI Cloud Compute instances should only have the minimum necessary ports open to achieve their purposes, allow traffic from authorized sources, and use other defense-in-depth and hardening strategies. For a basic view on traffic authorization into your instances refer to the Public IP Addresses section of the Oracle Cloud Infrastructure Documentation for Networks.\",\n \"Url\": \"https://docs.oracle.com/en-us/iaas/Content/Network/Tasks/managingpublicIPs.htm#Public_IP_Addresses\"\n }\n },\n \"ProductFields\": {\n \"ProductName\": \"ElectricEye\",\n \"Provider\": \"OCI\",\n \"ProviderType\": \"CSP\",\n \"ProviderAccountId\": ociTenancyId,\n \"AssetRegion\": ociRegionName,\n \"AssetDetails\": assetB64,\n \"AssetClass\": \"Compute\",\n \"AssetService\": \"Oracle Cloud Compute\",\n \"AssetComponent\": \"Instance\"\n },\n \"Resources\": [\n {\n \"Type\": \"OciCloudComputeInstance\",\n \"Id\": instanceId,\n \"Partition\": awsPartition,\n \"Region\": awsRegion,\n \"Details\": {\n \"Other\": {\n \"TenancyId\": ociTenancyId,\n \"CompartmentId\": compartmentId,\n \"Region\": ociRegionName,\n \"Name\": instanceName,\n \"Id\": instanceId,\n \"ImageId\": imageId,\n \"Shape\": shape,\n \"LifecycleState\": lifecycleState\n }\n }\n }\n ],\n \"Compliance\": {\n \"Status\": \"FAILED\",\n \"RelatedRequirements\": [\n \"NIST CSF V1.1 PR.AC-3\",\n \"NIST SP 800-53 Rev. 4 AC-1\",\n \"NIST SP 800-53 Rev. 4 AC-17\",\n \"NIST SP 800-53 Rev. 4 AC-19\",\n \"NIST SP 800-53 Rev. 4 AC-20\",\n \"NIST SP 800-53 Rev. 4 SC-15\",\n \"AICPA TSC CC6.6\",\n \"ISO 27001:2013 A.6.2.1\",\n \"ISO 27001:2013 A.6.2.2\",\n \"ISO 27001:2013 A.11.2.6\",\n \"ISO 27001:2013 A.13.1.1\",\n \"ISO 27001:2013 A.13.2.1\",\n \"MITRE ATT&CK T1040\",\n \"MITRE ATT&CK T1046\",\n \"MITRE ATT&CK T1580\",\n \"MITRE ATT&CK T1590\",\n \"MITRE ATT&CK T1592\",\n \"MITRE ATT&CK T1595\"\n ]\n },\n \"Workflow\": {\"Status\": \"NEW\"},\n \"RecordState\": \"ACTIVE\"\n }\n yield finding\n else:\n finding = {\n \"SchemaVersion\": \"2018-10-08\",\n \"Id\": f\"{ociTenancyId}/{ociRegionName}/{compartmentId}/{instanceId}/oci-attack-surface-compute-instance-open-{serviceName}-check\",\n \"ProductArn\": f\"arn:{awsPartition}:securityhub:{awsRegion}:{awsAccountId}:product/{awsAccountId}/default\",\n \"GeneratorId\": f\"{ociTenancyId}/{ociRegionName}/{compartmentId}/{instanceId}/oci-attack-surface-compute-instance-open-{serviceName}-check\",\n \"AwsAccountId\": awsAccountId,\n \"Types\": [\n \"Software and Configuration Checks/AWS Security Best Practices/Network Reachability\",\n \"TTPs/Discovery\"\n ],\n \"FirstObservedAt\": iso8601Time,\n \"CreatedAt\": iso8601Time,\n \"UpdatedAt\": iso8601Time,\n \"Severity\": {\"Label\": \"INFORMATIONAL\"},\n \"Confidence\": 99,\n \"Title\": f\"[AttackSurface.OCI.ComputeInstance.{checkIdNumber}] Cloud Compute instances should not be publicly reachable on {serviceName}\",\n \"Description\": f\"Oracle Cloud Compute instance {instanceName} in Compartment {compartmentId} in {ociRegionName} is not publicly reachable on port {portNumber} which corresponds to the {serviceName} service due to {serviceStateReason}. OCI Cloud Compute instances and their respective Security Lists and/or Network Security Groups should still be reviewed for minimum necessary access.\",\n \"Remediation\": {\n \"Recommendation\": {\n \"Text\": \"OCI Cloud Compute instances should only have the minimum necessary ports open to achieve their purposes, allow traffic from authorized sources, and use other defense-in-depth and hardening strategies. For a basic view on traffic authorization into your instances refer to the Public IP Addresses section of the Oracle Cloud Infrastructure Documentation for Networks.\",\n \"Url\": \"https://docs.oracle.com/en-us/iaas/Content/Network/Tasks/managingpublicIPs.htm#Public_IP_Addresses\"\n }\n },\n \"ProductFields\": {\n \"ProductName\": \"ElectricEye\",\n \"Provider\": \"OCI\",\n \"ProviderType\": \"CSP\",\n \"ProviderAccountId\": ociTenancyId,\n \"AssetRegion\": ociRegionName,\n \"AssetDetails\": assetB64,\n \"AssetClass\": \"Compute\",\n \"AssetService\": \"Oracle Cloud Compute\",\n \"AssetComponent\": \"Instance\"\n },\n \"Resources\": [\n {\n \"Type\": \"OciCloudComputeInstance\",\n \"Id\": instanceId,\n \"Partition\": awsPartition,\n \"Region\": awsRegion,\n \"Details\": {\n \"Other\": {\n \"TenancyId\": ociTenancyId,\n \"CompartmentId\": compartmentId,\n \"Region\": ociRegionName,\n \"Name\": instanceName,\n \"Id\": instanceId,\n \"ImageId\": imageId,\n \"Shape\": shape,\n \"LifecycleState\": lifecycleState\n }\n }\n }\n ],\n \"Compliance\": {\n \"Status\": \"PASSED\",\n \"RelatedRequirements\": [\n \"NIST CSF V1.1 PR.AC-3\",\n \"NIST SP 800-53 Rev. 4 AC-1\",\n \"NIST SP 800-53 Rev. 4 AC-17\",\n \"NIST SP 800-53 Rev. 4 AC-19\",\n \"NIST SP 800-53 Rev. 4 AC-20\",\n \"NIST SP 800-53 Rev. 4 SC-15\",\n \"AICPA TSC CC6.6\",\n \"ISO 27001:2013 A.6.2.1\",\n \"ISO 27001:2013 A.6.2.2\",\n \"ISO 27001:2013 A.11.2.6\",\n \"ISO 27001:2013 A.13.1.1\",\n \"ISO 27001:2013 A.13.2.1\",\n \"MITRE ATT&CK T1040\",\n \"MITRE ATT&CK T1046\",\n \"MITRE ATT&CK T1580\",\n \"MITRE ATT&CK T1590\",\n \"MITRE ATT&CK T1592\",\n \"MITRE ATT&CK T1595\"\n ]\n },\n \"Workflow\": {\"Status\": \"RESOLVED\"},\n \"RecordState\": \"ARCHIVED\"\n }\n yield finding", "def test_port_update_deferred_allocation_no_host_mapping(self):\n network, segment, subnet = self._create_test_segment_with_subnet()\n\n port = self._create_deferred_ip_port(network)\n self._validate_deferred_ip_allocation(port['port']['id'])\n\n # Try requesting an IP (but the only subnet is on a segment)\n data = {'port': {portbindings.HOST_ID: 'fakehost'}}\n port_id = port['port']['id']\n port_req = self.new_update_request('ports', data, port_id,\n as_admin=True)\n response = port_req.get_response(self.api)\n res = self.deserialize(self.fmt, response)\n\n # Gets conflict because it can't map the host to a segment\n self.assertEqual(webob.exc.HTTPConflict.code, response.status_int)\n self.assertEqual(segment_exc.HostNotConnectedToAnySegment.__name__,\n res['NeutronError']['type'])", "def initialize_network_los() -> bool:\n return True", "def servicenow_sspm_strict_ip_restriction_check(cache: dict, awsAccountId: str, awsRegion: str, awsPartition: str):\n iso8601Time = datetime.datetime.now(datetime.timezone.utc).isoformat()\n\n # Name of the property to evaluate against\n evalTarget = \"glide.ip.authenticate.strict\"\n # Get cached props\n sysPropCache = get_servicenow_sys_properties(cache)\n\n # There should not ever be a duplicate system property, use next() and a list comprehension to check if the\n # property we're evaluating is in the list of properties we get from the cache. If it is NOT then set the\n # value as `False` and we can fill in fake values. Not having a property for security hardening is the same\n # as a failed finding with a lot less fan fair\n propFinder = next((sysprop for sysprop in sysPropCache if sysprop[\"name\"] == evalTarget), False)\n # If we cannot find the property set \"NOT_CONFIGURED\" which will fail whatever the value should be\n if propFinder == False:\n propertyValue = \"NOT_CONFIGURED\"\n propDescription = \"\"\n propId = \"\"\n propCreatedOn = \"\"\n propCreatedBy = \"\"\n propUpdatedOn = \"\"\n propUpdatedBy = \"\"\n propScope = \"\"\n assetB64 = None\n else:\n propertyValue = str(propFinder[\"value\"])\n propDescription = str(propFinder[\"description\"]).replace(\"\\n \", \"\")\n propId = str(propFinder[\"sys_id\"])\n propCreatedOn = str(propFinder[\"sys_created_on\"])\n propCreatedBy = str(propFinder[\"sys_created_by\"])\n propUpdatedOn = str(propFinder[\"sys_updated_on\"])\n propUpdatedBy = str(propFinder[\"sys_updated_by\"])\n propScope = str(propFinder[\"sys_scope\"][\"value\"])\n # B64 encode all of the details for the Asset\n assetJson = json.dumps(propFinder,default=str).encode(\"utf-8\")\n assetB64 = base64.b64encode(assetJson) \n # NOTE: This is where the check evaluation happens - in SNOW these may be Bools or Numbers but will come back as Strings\n # always evaluate a failing condition first which should be the OPPOSITE of the SNOW reccomendation as sometimes the values\n # are not a simple Boolean expression\n if propertyValue != \"true\":\n finding = {\n \"SchemaVersion\": \"2018-10-08\",\n \"Id\": f\"servicenow/{SNOW_INSTANCE_NAME}/sys_properties/{evalTarget}/check\",\n \"ProductArn\": f\"arn:{awsPartition}:securityhub:{awsRegion}:{awsAccountId}:product/{awsAccountId}/default\",\n \"GeneratorId\": f\"servicenow/{SNOW_INSTANCE_NAME}/sys_properties/{evalTarget}/check\",\n \"AwsAccountId\": awsAccountId,\n \"Types\": [\"Software and Configuration Checks\"],\n \"FirstObservedAt\": iso8601Time,\n \"CreatedAt\": iso8601Time,\n \"UpdatedAt\": iso8601Time,\n \"Severity\": {\"Label\": \"LOW\"},\n \"Confidence\": 99,\n \"Title\": \"[SSPM.Servicenow.AccessControl.22] Instance should implement strict IP restriction for ServiceNow employee access to the instance\",\n \"Description\": f\"Servicenow instance {SNOW_INSTANCE_NAME} does not implement strict IP restriction for ServiceNow employee access to the instance. Use the 'glide.ip.authenticate.strict' property to enable a strict set of IP addresses, such as DC and secure VPN, to access this instance. If this property is not enabled, ServiceNow employees can access the customer's instance through all the IP ranges. Enabling the property restricts access to a secure set of IP ranges (Secure VPN, DC). Not setting this causes unecessary exposure of instance access to wider group of people. Refer to the remediation instructions if this configuration is not intended.\",\n \"Remediation\": {\n \"Recommendation\": {\n \"Text\": \"For more information refer to the Strict IP restriction (instance security hardening) section of the Servicenow Product Documentation.\",\n \"Url\": \"https://docs.servicenow.com/bundle/utah-platform-security/page/administer/security/reference/strict-ip-restriction.html\",\n }\n },\n \"ProductFields\": {\n \"ProductName\": \"ElectricEye\",\n \"Provider\": \"ServiceNow\",\n \"ProviderType\": \"SaaS\",\n \"ProviderAccountId\": SNOW_INSTANCE_NAME,\n \"AssetRegion\": SNOW_INSTANCE_REGION,\n \"AssetDetails\": assetB64,\n \"AssetClass\": \"Management & Governance\",\n \"AssetService\": \"System Properties\",\n \"AssetComponent\": \"System Property\"\n },\n \"Resources\": [\n {\n \"Type\": \"ServicenowInstance\",\n \"Id\": f\"{SNOW_INSTANCE_NAME}/sys_properties/{evalTarget}\",\n \"Partition\": awsPartition,\n \"Region\": awsRegion,\n \"Details\": {\n \"Other\": {\n \"ServicenowInstance\": SNOW_INSTANCE_NAME,\n \"SysId\": propId,\n \"PropertyName\": evalTarget,\n \"PropertyValue\": propertyValue,\n \"Description\": propDescription,\n \"CreatedBy\": propCreatedBy,\n \"CreatedOn\": propCreatedOn,\n \"UpdatedBy\": propUpdatedBy,\n \"UpdatedOn\": propUpdatedOn,\n \"Scope\": propScope\n }\n }\n }\n ],\n \"Compliance\": {\n \"Status\": \"FAILED\",\n \"RelatedRequirements\": [\n \"NIST CSF V1.1 PR.PT-3\",\n \"NIST SP 800-53 Rev. 4 AC-3\",\n \"NIST SP 800-53 Rev. 4 CM-7\",\n \"AICPA TSC CC6.1\",\n \"ISO 27001:2013 A.6.2.2\", \n \"ISO 27001:2013 A.9.1.2\",\n \"ISO 27001:2013 A.9.4.1\",\n \"ISO 27001:2013 A.9.4.4\",\n \"ISO 27001:2013 A.9.4.5\",\n \"ISO 27001:2013 A.13.1.1\",\n \"ISO 27001:2013 A.14.1.2\",\n \"ISO 27001:2013 A.14.1.3\",\n \"ISO 27001:2013 A.18.1.3\"\n ]\n },\n \"Workflow\": {\"Status\": \"NEW\"},\n \"RecordState\": \"ACTIVE\"\n }\n yield finding\n else:\n finding = {\n \"SchemaVersion\": \"2018-10-08\",\n \"Id\": f\"servicenow/{SNOW_INSTANCE_NAME}/sys_properties/{evalTarget}/check\",\n \"ProductArn\": f\"arn:{awsPartition}:securityhub:{awsRegion}:{awsAccountId}:product/{awsAccountId}/default\",\n \"GeneratorId\": f\"servicenow/{SNOW_INSTANCE_NAME}/sys_properties/{evalTarget}/check\",\n \"AwsAccountId\": awsAccountId,\n \"Types\": [\"Software and Configuration Checks\"],\n \"FirstObservedAt\": iso8601Time,\n \"CreatedAt\": iso8601Time,\n \"UpdatedAt\": iso8601Time,\n \"Severity\": {\"Label\": \"INFORMATIONAL\"},\n \"Confidence\": 99,\n \"Title\": \"[SSPM.Servicenow.AccessControl.22] Instance should implement strict IP restriction for ServiceNow employee access to the instance\",\n \"Description\": f\"Servicenow instance {SNOW_INSTANCE_NAME} does implement strict IP restriction for ServiceNow employee access to the instance.\",\n \"Remediation\": {\n \"Recommendation\": {\n \"Text\": \"For more information refer to the Strict IP restriction (instance security hardening) section of the Servicenow Product Documentation.\",\n \"Url\": \"https://docs.servicenow.com/bundle/utah-platform-security/page/administer/security/reference/strict-ip-restriction.html\",\n }\n },\n \"ProductFields\": {\n \"ProductName\": \"ElectricEye\",\n \"Provider\": \"ServiceNow\",\n \"ProviderType\": \"SaaS\",\n \"ProviderAccountId\": SNOW_INSTANCE_NAME,\n \"AssetRegion\": SNOW_INSTANCE_REGION,\n \"AssetDetails\": assetB64,\n \"AssetClass\": \"Management & Governance\",\n \"AssetService\": \"System Properties\",\n \"AssetComponent\": \"System Property\"\n },\n \"Resources\": [\n {\n \"Type\": \"ServicenowInstance\",\n \"Id\": f\"{SNOW_INSTANCE_NAME}/sys_properties/{evalTarget}\",\n \"Partition\": awsPartition,\n \"Region\": awsRegion,\n \"Details\": {\n \"Other\": {\n \"ServicenowInstance\": SNOW_INSTANCE_NAME,\n \"SysId\": propId,\n \"PropertyName\": evalTarget,\n \"PropertyValue\": propertyValue,\n \"Description\": propDescription,\n \"CreatedBy\": propCreatedBy,\n \"CreatedOn\": propCreatedOn,\n \"UpdatedBy\": propUpdatedBy,\n \"UpdatedOn\": propUpdatedOn,\n \"Scope\": propScope\n }\n }\n }\n ],\n \"Compliance\": {\n \"Status\": \"PASSED\",\n \"RelatedRequirements\": [\n \"NIST CSF V1.1 PR.PT-3\",\n \"NIST SP 800-53 Rev. 4 AC-3\",\n \"NIST SP 800-53 Rev. 4 CM-7\",\n \"AICPA TSC CC6.1\",\n \"ISO 27001:2013 A.6.2.2\", \n \"ISO 27001:2013 A.9.1.2\",\n \"ISO 27001:2013 A.9.4.1\",\n \"ISO 27001:2013 A.9.4.4\",\n \"ISO 27001:2013 A.9.4.5\",\n \"ISO 27001:2013 A.13.1.1\",\n \"ISO 27001:2013 A.14.1.2\",\n \"ISO 27001:2013 A.14.1.3\",\n \"ISO 27001:2013 A.18.1.3\"\n ]\n },\n \"Workflow\": {\"Status\": \"RESOLVED\"},\n \"RecordState\": \"ARCHIVED\"\n }\n yield finding", "def __len__ (self):\n return len(self.network)", "def test_create_host_subnet(self):\n pass", "def test_db_got_error_without_cache(self):\n mock_method_path = ('dbtobindzone.fetcher.host_data_fetcher'\n '.HostDataFetcher.is_fetch_success')\n with mock.patch(mock_method_path) as mock_method:\n mock_method.return_value = False\n self.host_updater.refresh_cache()\n self.assertEqual(self.host_updater.data, [])", "def test_parameter_net_invalid(self, mock_ghn, mock_grnam, mock_pwnam):\n # Should pass\n self.driver.check_for_setup_error()\n # Should throw exceptions\n self._fail_network_list = True\n self.configuration.hgst_net = 'Fred'\n self.assertRaises(exception.VolumeDriverException,\n self.driver.check_for_setup_error)\n self._fail_network_list = False", "def __init__(self, checker, port):\r\n self._checker = checker\r\n self._port = port\r\n\r\n self._network = Network()\r\n self._balancer = LoadBalancer()\r\n self._distributor = Distributor()\r\n\r\n self._users = {}\r\n self._pendingContainer = {}", "def test_aws_service_api_networks_get(self):\n pass", "def sagemaker_model_network_isolation_check(cache: dict, session, awsAccountId: str, awsRegion: str, awsPartition: str) -> dict:\n sagemaker = session.client(\"sagemaker\")\n # loop through sagemaker models\n response = sagemaker.list_models()\n mySageMakerModels = response[\"Models\"]\n for models in mySageMakerModels:\n modelName = str(models[\"ModelName\"])\n modelArn = str(models[\"ModelArn\"])\n response = sagemaker.describe_model(ModelName=modelName)\n # B64 encode all of the details for the Asset\n assetJson = json.dumps(response,default=str).encode(\"utf-8\")\n assetB64 = base64.b64encode(assetJson)\n networkIsolationCheck = str(response[\"EnableNetworkIsolation\"])\n iso8601Time = datetime.datetime.utcnow().replace(tzinfo=datetime.timezone.utc).isoformat()\n if networkIsolationCheck == \"False\":\n finding = {\n \"SchemaVersion\": \"2018-10-08\",\n \"Id\": modelArn + \"/sagemaker-model-network-isolation-check\",\n \"ProductArn\": f\"arn:{awsPartition}:securityhub:{awsRegion}:{awsAccountId}:product/{awsAccountId}/default\",\n \"GeneratorId\": modelArn,\n \"AwsAccountId\": awsAccountId,\n \"Types\": [\n \"Software and Configuration Checks/AWS Security Best Practices\",\n \"Effects/Data Exposure\",\n ],\n \"FirstObservedAt\": iso8601Time,\n \"CreatedAt\": iso8601Time,\n \"UpdatedAt\": iso8601Time,\n \"Severity\": {\"Label\": \"MEDIUM\"},\n \"Confidence\": 99,\n \"Title\": \"[SageMaker.5] SageMaker models should have network isolation enabled\",\n \"Description\": \"SageMaker model \"\n + modelName\n + \" does not have network isolation enabled. Refer to the remediation instructions to remediate this behavior\",\n \"Remediation\": {\n \"Recommendation\": {\n \"Text\": \"For more information on SageMaker model network isolation and how to configure it refer to the Training and Inference Containers Run in Internet-Free Mode section of the Amazon SageMaker Developer Guide\",\n \"Url\": \"https://docs.aws.amazon.com/sagemaker/latest/dg/mkt-algo-model-internet-free.html\",\n }\n },\n \"ProductFields\": {\n \"ProductName\": \"ElectricEye\",\n \"Provider\": \"AWS\",\n \"ProviderType\": \"CSP\",\n \"ProviderAccountId\": awsAccountId,\n \"AssetRegion\": awsRegion,\n \"AssetDetails\": assetB64,\n \"AssetClass\": \"Machine Learning\",\n \"AssetService\": \"Amazon SageMaker\",\n \"AssetComponent\": \"Model\"\n },\n \"Resources\": [\n {\n \"Type\": \"AwsSagemakerModel\",\n \"Id\": modelArn,\n \"Partition\": \"aws\",\n \"Region\": awsRegion,\n \"Details\": {\"Other\": {\"ModelName\": modelName}},\n }\n ],\n \"Compliance\": {\n \"Status\": \"FAILED\",\n \"RelatedRequirements\": [\n \"NIST CSF V1.1 PR.AC-5\",\n \"NIST SP 800-53 Rev. 4 AC-4\",\n \"NIST SP 800-53 Rev. 4 AC-10\",\n \"NIST SP 800-53 Rev. 4 SC-7\",\n \"AICPA TSC CC6.1\",\n \"ISO 27001:2013 A.13.1.1\",\n \"ISO 27001:2013 A.13.1.3\",\n \"ISO 27001:2013 A.13.2.1\",\n \"ISO 27001:2013 A.14.1.2\",\n \"ISO 27001:2013 A.14.1.3\"\n ]\n },\n \"Workflow\": {\"Status\": \"NEW\"},\n \"RecordState\": \"ACTIVE\"\n }\n yield finding\n else:\n finding = {\n \"SchemaVersion\": \"2018-10-08\",\n \"Id\": modelArn + \"/sagemaker-model-network-isolation-check\",\n \"ProductArn\": f\"arn:{awsPartition}:securityhub:{awsRegion}:{awsAccountId}:product/{awsAccountId}/default\",\n \"GeneratorId\": modelArn,\n \"AwsAccountId\": awsAccountId,\n \"Types\": [\n \"Software and Configuration Checks/AWS Security Best Practices\",\n \"Effects/Data Exposure\",\n ],\n \"FirstObservedAt\": iso8601Time,\n \"CreatedAt\": iso8601Time,\n \"UpdatedAt\": iso8601Time,\n \"Severity\": {\"Label\": \"INFORMATIONAL\"},\n \"Confidence\": 99,\n \"Title\": \"[SageMaker.5] SageMaker models should have network isolation enabled\",\n \"Description\": \"SageMaker model \" + modelName + \" has network isolation enabled.\",\n \"Remediation\": {\n \"Recommendation\": {\n \"Text\": \"For more information on SageMaker model network isolation and how to configure it refer to the Training and Inference Containers Run in Internet-Free Mode section of the Amazon SageMaker Developer Guide\",\n \"Url\": \"https://docs.aws.amazon.com/sagemaker/latest/dg/mkt-algo-model-internet-free.html\",\n }\n },\n \"ProductFields\": {\n \"ProductName\": \"ElectricEye\",\n \"Provider\": \"AWS\",\n \"ProviderType\": \"CSP\",\n \"ProviderAccountId\": awsAccountId,\n \"AssetRegion\": awsRegion,\n \"AssetDetails\": assetB64,\n \"AssetClass\": \"Machine Learning\",\n \"AssetService\": \"Amazon SageMaker\",\n \"AssetComponent\": \"Model\"\n },\n \"Resources\": [\n {\n \"Type\": \"AwsSagemakerModel\",\n \"Id\": modelArn,\n \"Partition\": \"aws\",\n \"Region\": awsRegion,\n \"Details\": {\"Other\": {\"ModelName\": modelName}},\n }\n ],\n \"Compliance\": {\n \"Status\": \"PASSED\",\n \"RelatedRequirements\": [\n \"NIST CSF V1.1 PR.AC-5\",\n \"NIST SP 800-53 Rev. 4 AC-4\",\n \"NIST SP 800-53 Rev. 4 AC-10\",\n \"NIST SP 800-53 Rev. 4 SC-7\",\n \"AICPA TSC CC6.1\",\n \"ISO 27001:2013 A.13.1.1\",\n \"ISO 27001:2013 A.13.1.3\",\n \"ISO 27001:2013 A.13.2.1\",\n \"ISO 27001:2013 A.14.1.2\",\n \"ISO 27001:2013 A.14.1.3\"\n ]\n },\n \"Workflow\": {\"Status\": \"RESOLVED\"},\n \"RecordState\": \"ARCHIVED\"\n }\n yield finding", "def _validate_create_network(self, context, net_data):\n external = net_data.get(extnet_apidef.EXTERNAL)\n is_external_net = validators.is_attr_set(external) and external\n with_qos = validators.is_attr_set(\n net_data.get(qos_consts.QOS_POLICY_ID))\n\n if with_qos:\n self._validate_qos_policy_id(\n context, net_data.get(qos_consts.QOS_POLICY_ID))\n if is_external_net:\n raise nsx_exc.QoSOnExternalNet()", "def __fillCache(self):\n assert (not self.__modelCache)\n\n # Assemble a list of model IDs to look up\n numModelIDs = len(self.__modelIDs) if self.__modelIDs else 0\n\n if self.__nextIndex >= numModelIDs:\n return\n\n idRange = self.__nextIndex + self.__CACHE_LIMIT\n if idRange > numModelIDs:\n idRange = numModelIDs\n\n lookupIDs = self.__modelIDs[self.__nextIndex:idRange]\n\n self.__nextIndex += (idRange - self.__nextIndex)\n\n # Query Nupic for model info of all models in the look-up list\n # NOTE: the order of results may not be the same as lookupIDs\n infoList = _clientJobsDB().modelsInfo(lookupIDs)\n assert len(infoList) == len(lookupIDs), \\\n \"modelsInfo returned %s elements; expected %s.\" % \\\n (len(infoList), len(lookupIDs))\n\n # Create _NupicModelInfo instances and add them to cache\n for rawInfo in infoList:\n modelInfo = _NupicModelInfo(rawInfo=rawInfo)\n self.__modelCache.append(modelInfo)\n\n assert len(self.__modelCache) == len(lookupIDs), \\\n \"Added %s elements to modelCache; expected %s.\" % \\\n (len(self.__modelCache), len(lookupIDs))\n\n if self.debug:\n _emit(Verbosity.DEBUG,\n \"MODELITERATOR: Leaving __fillCache(); modelCacheLen=%s\" % \\\n (len(self.__modelCache),))", "def fetch(self) -> None:\n self.__networks__.clear()\n networks = process_google_rr_ranges(self.__address_list_record__, self.loader_class)\n for network in networks:\n self.__networks__.append(network)\n self.updated = datetime.now()\n self.__networks__.sort(key=attrgetter('version', 'cidr'))", "def filter_update_port_attributes(cls, port, context):\n cls.add_security_groups(port, context)\n try_del(port, ['network_id', 'id', 'status', 'mac_address',\n 'tenant_id', 'fixed_ips'])", "def test_create_network_settings_not_in_config(*args):\n NEUTRON.create_subnet = mock.MagicMock(\n return_value={\"subnet\": SUBNETS}\n )\n sub = OSSubnet('12', CONFIG, CONN)\n subs = sub.get_or_create()\n assert subs.name is not None\n assert subs.cidr is not None", "def load_networks(self, which_epoch):\n for name in self.model_names:\n if isinstance(name, str):\n filename = '%s_net_%s.pth' % (which_epoch, name)\n path = os.path.join(self.save_dir, filename)\n net = getattr(self, 'net_' + name)\n try:\n state_dict = torch.load(path)\n state_dict = {name.replace('module.', '', 1) : param for name, param in state_dict.items()}\n # net.load_state_dict(torch.load(path))\n net.load_state_dict(state_dict)\n except:\n pretrained_dict = torch.load(path)\n model_dict = net.state_dict()\n try:\n pretrained_dict = {k:v for k,v in pretrained_dict.items() if k in model_dict}\n net.load_state_dict(pretrained_dict)\n print('Pretrained network %s has excessive layers; Only loading layers that are used' % name)\n except:\n print('Pretrained network %s has fewer layers; The following are not initialized:' % name)\n not_initialized = set()\n for k, v in pretrained_dict.items():\n if v.size() == model_dict[k].size():\n model_dict[k] = v\n\n for k, v in model_dict.items():\n if k not in pretrained_dict or v.size() != pretrained_dict[k].size():\n not_initialized.add(k.split('.')[0])\n print(sorted(not_initialized))\n net.load_state_dict(model_dict)\n if len(self.gpu_ids) > 0 and torch.cuda.is_available():\n net.cuda()\n if not self.isTrain:\n net.eval()", "def _validate_router_tz(self, context, tier0_uuid, subnets):\n pass", "def _getusableport():\r\n port_found = False\r\n port_min = 63000\r\n port_max = 63150\r\n port_iter = port_min\r\n local_addr = getmyip()\r\n\r\n while not port_found:\r\n if port_iter > port_max:\r\n raise Exception(\"Network restriction error! Unable to find a free port!\")\r\n try:\r\n udp_test_socket = recvmess(local_addr, port_iter, _dummy_function)\r\n stopcomm(udp_test_socket)\r\n port_found = True\r\n except Exception, e:\r\n port_iter += 1\r\n\r\n return port_iter", "def test_networking_project_network_list(self):\n pass", "def validate(self):\n if self.swarm_size is None or self.swarm_size < 0:\n raise ConfigError(\"swarm_size must be >= 0\")", "def test_update_network(self):\n policies_ports = [\n (None, {self.ports[0].id}),\n (self.qos_policies[1].id, {self.ports[0].id})]\n\n self.ports[1].qos_policy_id = self.qos_policies[0].id\n self.ports[1].update()\n self.ports[2].qos_policy_id = self.qos_policies[1].id\n self.ports[2].update()\n for qos_policy_id, reference_ports in policies_ports:\n self.networks[0].qos_policy_id = qos_policy_id\n self.networks[0].update()\n original_network = {'qos_policy_id': self.qos_policies[0]}\n reviewed_port_ids, _, _ = self.qos_driver.update_network(\n mock.ANY, self.networks[0], original_network)\n self.assertEqual(reference_ports, reviewed_port_ids)\n calls = [mock.call(mock.ANY, self.ports[0].id,\n self.ports[0].network_id, qos_policy_id,\n None)]\n self.mock_rules.assert_has_calls(calls)\n self.mock_rules.reset_mock()", "def gen_port_resources(self, server, ports):\n if (self.SuppressServerStatuses is False):\n print \"\\t* Adding all the port interface resources\"\n data = {}\n port_idx = \"0\"\n for idx, port in enumerate(ports):\n\n # get fixedips\n fixed_ip = port._info[\"fixed_ips\"]\n fixed_ip_address = fixed_ip[0][\"ip_address\"]\n\n # filter all_nets by subnet_id\n net_data = []\n fip = None\n for x in self.all_nets:\n for fip in fixed_ip:\n if x[0][\"id\"] in fip[\"subnet_id\"]:\n net_data.append(x)\n\n if len(net_data) > 0:\n net = net_data[0][1]\n subnet = net_data[0][2]\n\n networkID = [netw['id'] for netw in self.neutronclient.list_networks()['networks'] if netw['name'] == net][0]\n networkIsShared = self.neutronclient.show_network(networkID)['network']['shared']\n\n if networkIsShared is True:\n port_properties_ = {\n \"network_id\": networkID,\n \"fixed_ips\": [\n {\"subnet_id\": fip[\"subnet_id\"]}\n ]\n }\n else:\n port_properties_ = {\n \"network_id\": {\"get_resource\": net},\n \"fixed_ips\": [\n {\"subnet_id\": {\"get_resource\": subnet}}\n ]\n }\n if self.staticips:\n fixed_ips = []\n for address in server.addresses:\n server_ip_address = server.addresses[address][0]['addr']\n if server_ip_address == fixed_ip_address:\n fixed_ips.append({\"ip_address\": server_ip_address})\n\n port_properties_ = {\n \"network_id\": {\"get_resource\": net},\n \"fixed_ips\": fixed_ips\n }\n data = {\"type\": \"OS::Neutron::Port\",\"properties\": port_properties_}\n else:\n print \"!!Probable error grabbing port information for server %s!!\" % (server.name)\n data = {\"type\": \"OS::Neutron::Port\"}\n\n self.compute_data[\"resources\"][\"%s_port%s\" % (server.name, port_idx)] = data\n if len(ports) >= 1:\n port_idx = str(1 + idx)", "def test_init_skips_touch_bucket_if_local_network_id_is_key(self):\n self.node.routing_table.touch_bucket = mock.MagicMock()\n Lookup(FindNode, self.node.network_id, self.node, self.event_loop)\n self.assertEqual(self.node.routing_table.touch_bucket.call_count, 0)", "def validate_input(self, IP, Port):\n exception = ServerInitError\n try:\n if IP != None:\n if re.match(IPRegex, IP):\n self.IP = IP\n else:\n exception = InavlidIPError\n raise\n\n if Port != None:\n if int(Port) in range(0,65535):\n self.Port = Port\n else:\n exception = InvalidPortError\n raise\n except:\n traceback_print_exc()", "def _ports(self):\n try:\n return self._graph.node[self.node_id][\"_ports\"]\n except KeyError:\n log.debug(\"No interfaces initialised for %s\" % self)\n return", "def port_in_use(port_num):\n\n try:\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n s.bind(('0.0.0.0', port_num))\n except OSError:\n return True\n else:\n return False", "def _build_network(self):\n pass" ]
[ "0.5711651", "0.5685266", "0.5486034", "0.5470297", "0.53793174", "0.53505814", "0.52206916", "0.520188", "0.50624466", "0.50617975", "0.5060805", "0.5031961", "0.5029634", "0.50271976", "0.5011506", "0.5004499", "0.5002426", "0.49931684", "0.49833202", "0.4974392", "0.4967064", "0.49566424", "0.49401653", "0.49341184", "0.49323916", "0.4928432", "0.4922822", "0.49191308", "0.49160984", "0.49144766", "0.4861575", "0.48592365", "0.48500496", "0.48487937", "0.4846527", "0.48427024", "0.48366636", "0.4830815", "0.48127478", "0.48102164", "0.4804601", "0.48009098", "0.4795434", "0.47859907", "0.47704768", "0.4767341", "0.4766239", "0.47624674", "0.4759715", "0.47456443", "0.47309345", "0.47262526", "0.47211504", "0.47086525", "0.4702078", "0.46925458", "0.4690221", "0.46855238", "0.4683646", "0.46689627", "0.4667406", "0.46645787", "0.46617988", "0.4660609", "0.4658483", "0.4656547", "0.46497434", "0.4649276", "0.4648573", "0.4636364", "0.4635374", "0.46278152", "0.4627149", "0.46268132", "0.46237856", "0.46196267", "0.46156523", "0.4611833", "0.46014288", "0.45842212", "0.4584126", "0.45811567", "0.4579401", "0.4578829", "0.45754308", "0.45746362", "0.45692345", "0.45687887", "0.45649195", "0.45506907", "0.45481446", "0.45479026", "0.45382395", "0.4535068", "0.45344645", "0.45307624", "0.45231804", "0.45141366", "0.45128518", "0.4503059" ]
0.7904054
0
Start/stop/restart Dnsmasq for NETWORK_ID.
def _update_dnsmasq(self, network_id): # Check whether we should really do the following processing. if self.suppress_dnsmasq_updates: LOG.debug("Don't update dnsmasq yet;" " must be processing a snapshot") self.dirty_networks.add(network_id) return self.dnsmasq_updater.update_network(network_id)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def startServices():\n # dnsmasq\n out_dnsmasq = subprocess.run([\"systemctl\", \"restart\", \"dnsmasq\"], stdout=subprocess.PIPE)\n if out_dnsmasq.returncode == 0:\n logging.info(\"dnsmasq service started/restarted successfully\")\n else:\n logging.error(\"dnsmasq service start restart error\")\n # 3proxy\n out_3proxy = subprocess.run([\"systemctl\", \"restart\", \"3proxy\"], stdout=subprocess.PIPE)\n if out_3proxy.returncode == 0:\n logging.info(\"3proxy service started/restarted successfully\")\n else:\n logging.error(\"3proxy service start restart error\")", "def launch (no_flow = False,\n network = \"192.168.0.0/24\", # Address range\n first = 1, last = None, count = None, # Address range\n ip = \"192.168.0.254\",\n router = (), # Auto\n dns = (), # Auto\n dpid = None, # All\n ports = None, # All\n __INSTANCE__ = None):\n def fixint (i):\n i = str(i)\n if i.lower() == \"none\": return None\n if i.lower() == \"true\": return None\n return int(i)\n def fix (i):\n i = str(i)\n if i.lower() == \"none\": return None\n if i.lower() == \"true\": return None\n if i == '()': return ()\n return i\n first,last,count = map(fixint,(first,last,count))\n router,dns = map(fix,(router,dns))\n\n if ports is not None:\n ports = ports.split(\",\")\n ports = set(int(p) if p.isdigit() else p for p in ports)\n\n pool = SimpleAddressPool(network = network, first = first, last = last,\n count = count)\n\n inst = DHCPD(install_flow = not no_flow, pool = pool,\n ip_address = ip, router_address = router,\n dns_address = dns, dpid = dpid, ports = ports)\n\n if __INSTANCE__[0] == 0:\n # First or only instance\n core.register(inst)\n\n log.debug(\"DHCP serving a%s\", str(pool)[2:-1])", "def enable_dhcp_helper(self, network_id):\n try:\n network = self.plugin_rpc.get_network_info(network_id)\n except:\n self.needs_resync = True\n LOG.exception(_('Network %s RPC info call failed.') % network_id)\n return\n\n if not network.admin_state_up:\n return\n\n for subnet in network.subnets:\n if subnet.enable_dhcp:\n if self.call_driver('enable', network):\n self.cache.put(network)\n break", "def startservers():\n try:\n dns = subprocess.Popen(['python', FAKE_LOC, '-c', DNS_LOC])\n except IOError:\n sys.exit('>> Unable to locate FakeDns')\n\n try:\n httpd = MyTCPServer(('', 80), MyHandler)\n except socket.error:\n dns.kill()\n sys.exit('>> Port 80 already in use')\n try:\n print '>> Starting HTTP Server...'\n httpd.serve_forever()\n except KeyboardInterrupt:\n httpd.shutdown()\n httpd.server_close()\n dns.kill()\n sys.exit()", "def restartHTTPd(htconf):\n parentpid = pidHTTPd(htconf)\n if parentpid <= 1:\n return\n# hopefulle killing the parent proc. will do the trick\n print >> FileKeyUtils.WMSlog, 'restartHTTPd> kill parentpid:', parentpid\n os.system('kill -TERM '+repr(parentpid))\n apache = '/devstore/apache2/bin/httpd -f /devstore/apache2/conf/' + htconf\n print >> FileKeyUtils.WMSlog, 'restartHTTPd> via:', apache\n time.sleep(0.5) # give it time to complete proc. termination\n os.system('/devstore/apache2/bin/httpd -f /devstore/apache2/conf/' + htconf)", "def start_srv(start, process):\n if not \"conf_option\" in world.cfg:\n world.cfg[\"conf_option\"] = \"\"\n\n world.cfg['log_file'] = build_log_path()\n fabric_sudo_command('cat /dev/null >' + world.cfg['log_file'])\n world.cfg[\"dhcp_log_file\"] = world.cfg['log_file']\n\n log = \"local7\"\n if world.f_cfg.isc_dhcp_log_facility != \"\":\n log = world.f_cfg.isc_dhcp_log_facility\n\n world.cfg['log_facility'] = '''\\nlog-facility {log};\\n'''.format(**locals())\n\n add_defaults()\n cfg_write()\n log.debug(\"Start ISC-DHCP with generated config:\")\n convert_cfg_file(world.cfg[\"cfg_file\"])\n fabric_send_file(world.cfg[\"cfg_file\"] + '_processed', world.cfg[\"cfg_file\"] + '_processed')\n copy_configuration_file(world.cfg[\"cfg_file\"] + '_processed')\n remove_local_file(world.cfg[\"cfg_file\"])\n #set_ethernet_interface()\n stop_srv()\n\n world.cfg['leases'] = build_leases_path()\n\n #fabric_sudo_command('echo y |rm ' + world.cfg['leases'])\n fabric_sudo_command('touch ' + world.cfg['leases'])\n\n result = fabric_sudo_command('(' + os.path.join(world.f_cfg.software_install_path, 'sbin/dhcpd') + ' -cf server.cfg_processed'\n + ' -lf ' + world.cfg['leases']\n + '&); sleep ' + str(world.f_cfg.sleep_time_1) + ';')\n\n check_process_result(start, result, process)\n\n # clear configs in case we would like make couple configs in one test\n world.cfg[\"conf_time\"] = \"\"\n world.cfg[\"log_facility\"] = \"\"\n world.cfg[\"custom_lines\"] = \"\"\n world.cfg[\"conf_option\"] = \"\"\n world.cfg[\"conf_vendor\"] = \"\"", "def start_inetsim(self):\n\t\tif os.path.exists(\"/var/run/inetsim.pid\"):\n\t\t\tos.remove(\"/var/run/inetsim.pid\")\n\t\tcmd = [\"/usr/bin/pkill\",\"inetsim\"]\n\t\toutput = self.check_output_safe(cmd)\n\t\tself.log.info(output)\n\t\tcmd = [\"/usr/bin/inetsim\",\"--bind-address\",self.cfg.net_eth1,\"--config\",self.cfg.inetsim_cfg_path,\"--log-dir\",self.cfg.inetsim_log_dir,\"--data-dir\",self.cfg.inetsim_data_dir,\"--report-dir\",self.cfg.inetsim_log_report_dir]\n\t\tself.p_inetsim = subprocess.Popen( cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n\t\tself.log.info(\"inetsim starts, pid: %d\",self.p_inetsim.pid)", "def refresh_dhcp_helper(self, network_id):\n old_network = self.cache.get_network_by_id(network_id)\n if not old_network:\n # DHCP current not running for network.\n return self.enable_dhcp_helper(network_id)\n\n try:\n network = self.plugin_rpc.get_network_info(network_id)\n except:\n self.needs_resync = True\n LOG.exception(_('Network %s RPC info call failed.') % network_id)\n return\n\n old_cidrs = set(s.cidr for s in old_network.subnets if s.enable_dhcp)\n new_cidrs = set(s.cidr for s in network.subnets if s.enable_dhcp)\n\n if new_cidrs and old_cidrs == new_cidrs:\n self.call_driver('reload_allocations', network)\n self.cache.put(network)\n elif new_cidrs:\n if self.call_driver('restart', network):\n self.cache.put(network)\n else:\n self.disable_dhcp_helper(network.id)", "def start(self):\n if self.isRunning():\n raise Exception('DhcpClientAlreadyStarted')\n cmd = ['sudo', self._slave_dhcp_client_path, '-i', self._ifname, '-A', '-S']\n if self._logger is not None:\n self._logger.debug('Running command ' + str(cmd))\n #self._slave_dhcp_client_proc = robot.libraries.Process.Process()\n #self._slave_dhcp_client_proc.start_process('sudo', self._slave_dhcp_client_path, '-i', self._ifname, '-A', '-S')\n self._slave_dhcp_client_proc = subprocess.Popen(cmd)#, stdout=open(os.devnull, 'wb'), stderr=subprocess.STDOUT)\n self._slave_dhcp_client_pid = self._slave_dhcp_client_proc.pid\n self.addSlavePid(self._slave_dhcp_client_proc.pid) # Add the PID of the child to the list of subprocesses (note: we get sudo's PID here, not the slave PID, that we will get later on via D-Bus (see RemoteDhcpClientControl.getPid())", "def run(self):\n try:\n self.ssh_connection.connect()\n dns_response = self.query_dns_server()\n result = self.process_dns_response(dns_response)\n self.handle_result(result)\n\n except Exception as e:\n print(f\"Error: {str(e)}\")\n sys.exit(2)", "def sync_dns(self,):\n\n for server_name, server_ip in self.get_instances():\n self.dnsmanager.ensure_a_record(server_name, server_ip)", "def restart(self, cleanup=False): \n params = {'command':'restartNetwork',\n 'id':self.id,\n 'cleanup':cleanup}\n\n name = self.name\n self.logger.debug('Restart network %s' % name)\n\n try:\n response = self.send_request(params)\n res = json.loads(response)\n clsk_job_id = res['restartnetworkresponse']['jobid']\n self.logger.debug('Start job over %s.%s - %s: %s' % (\n self._obj_type, self.name, \n 'restartNetwork', res))\n return clsk_job_id\n except KeyError as ex :\n self.logger.error('Error parsing json data: %s' % ex)\n raise ClskError('Error parsing json data: %s' % ex)\n except ApiError as ex:\n self.logger.error(ex)\n raise ClskError(ex)", "def configureDHCP():\n dhcpStart = config.get(\"hotspot\", \"dhcpstart\")\n dhcpEnd = config.get(\"hotspot\", \"dhcpend\")\n dnsmasqConfig = f\"\"\"#PI Hotspot config\ndomain-needed\nbogus-priv\ndhcp-option=option:dns-server\ndhcp-authoritative\ndhcp-range={dhcpStart},{dhcpEnd},1h\n\"\"\"\n confFile = open(\"/etc/dnsmasq.conf\", \"w\")\n confFile.write(dnsmasqConfig)\n confFile.close()", "def network_create_end(self, payload):\n network_id = payload['network']['id']\n self.enable_dhcp_helper(network_id)", "def set_dhcp_conn(nic):\n nic.EnableDHCP()\n # After static DNS servers are specified to start using Dynamic Host\n # Configuration Protocol (DHCP) instead of static DNS servers,\n # you can call the method without supplying \"in\" parameters.\n nic.SetDNSServerSearchOrder()", "def _restart(self):\n\n daemon_prefix = ConfigUtil().get_prefix_for_daemon_id(daemon_id=self._daemon_id, conf_dict=self._pyswitchlib_conf)\n\n if daemon_prefix:\n if self._daemon_id in self._pyswitchlib_conf:\n daemon_prefixes = self._pyswitchlib_conf[self._daemon_id].split(':')\n\n if len(daemon_prefixes) > 1:\n daemon_prefixes.remove(daemon_prefix)\n daemon_prefixes.insert(0, daemon_prefix)\n\n self._pyswitchlib_conf[self._daemon_id] = ':'.join(daemon_prefixes)\n ConfigFileUtil().write(filename=pyswitchlib_conf_file, conf_dict=self._pyswitchlib_conf)\n\n super(PySwitchLibApiDaemonRunner, self)._restart()", "def disable_dhcp_helper(self, network_id):\n network = self.cache.get_network_by_id(network_id)\n if network:\n if self.call_driver('disable', network):\n self.cache.remove(network)", "def _RunDHCPClient(self, dhclient_script_path=None, **kwargs):\n del kwargs\n PID_FILE = os.path.join(self._tmp_dir, 'dhclient.pid')\n clear_ifconfig_command = 'ifconfig {interface} 0.0.0.0'.format(\n interface=self.interface)\n dhcp_command = ('echo \"\" | ' # dhclient expects STDIN for some reason\n 'dhclient -4 ' # only run on IPv4\n '-nw ' # immediately daemonize\n '-pf {pid_file} '\n '-sf {dhclient_script} '\n '-lf /dev/null ' # don't keep a leases file\n '-v {interface}'.format(\n pid_file=PID_FILE,\n dhclient_script=dhclient_script_path,\n interface=self.interface))\n kill_command = 'cat {pid_file} | xargs -r kill; rm {pid_file}'.format(\n pid_file=PID_FILE)\n force_kill_command = 'pgrep dhclient | xargs -r kill -9'\n\n logging.info('Killing any existing dhclient processes...')\n self._device.Call(force_kill_command)\n\n logging.info('Clearing any existing ifconfig networks...')\n self._device.Call(clear_ifconfig_command)\n\n logging.info('Starting dhclient...')\n self._device.CheckCall(dhcp_command)\n\n logging.info('Waiting to lease an IP...')\n ip = sync_utils.WaitFor(self._LeasedIP, self._dhcp_timeout)\n if not ip:\n self._device.Call(kill_command)\n raise WiFiError('DHCP bind failed')\n logging.info('Success: bound to IP %s', ip)\n\n yield ip # We have bound an IP; yield back to the caller.\n\n logging.info('Stopping dhclient...')\n self._device.Call(kill_command)\n self._device.Call(force_kill_command)\n self._device.Call(clear_ifconfig_command)\n\n yield # We have released the IP.", "def _set_nameserver(self, instance):\n ctxt = context.get_admin_context()\n ip = db.instance_get_fixed_address(ctxt, instance['id'])\n network = db.fixed_ip_get_network(ctxt, ip)\n\n try:\n _, err = utils.execute('sudo', 'vzctl', 'set', instance['id'],\n '--save', '--nameserver', network['dns'])\n if err:\n LOG.error(err)\n except Exception as err:\n LOG.error(err)\n raise exception.Error('Unable to set nameserver for %s' %\n instance['id'])", "def set_static_conn(nic, ip_addr, subnet_mask, default_gateway, dns_servers):\n if isinstance(ip_addr, str):\n ip_addr = [ip_addr,]\n if isinstance(subnet_mask, str):\n subnet_mask = [subnet_mask,]\n if isinstance(default_gateway, str):\n default_gateway = [default_gateway, ]\n\n # set defult gateway. return value:\n # 0: success & no reboot required, \n # 1: sucess & reboot required\n ret = nic.SetGateways(default_gateway)\n print 'Default Gateway updated (status %d)' % ret\n\n # Set IP adrress & subnet mask. return value:\n # 0: success & no reboot required, \n # 1: sucess & reboot required\n ret = nic.EnableStatic(IPAddress=ip_addr, SubnetMask=subnet_mask)\n print 'IP Address / Subnet Mask updated (status %d)' % ret\n\n # set dns servers\n if dns_servers:\n #assert 0 == nic.EnableDNS(DNSServerSearchOrder=dns_servers)\n # or \n ret = nic.SetDNSServerSearchOrder(dns_servers)\n print 'DNS Server updated (status %d)' % ret", "def start_maintenance(ServerName=None):\n pass", "def launch_vrouter_instance(self):\n # Add code to start vrouter\n if (self.vr_args['vtest_only']):\n self.logger.info(\"VTEST_ONLY_RETURN \" +\n str(self.vr_args['vtest_only']))\n return\n cpid = os.fork()\n if cpid == 0:\n vrouter_cmd_args = [\"taskset\", self.vr_args['taskset'],\n self.vr_args['vrouter_path'], \"--no-daemon\",\n \"--no-huge\", \"--vr_packet_sz\", \"2048\"]\n if self.vr_args['dpdk_args']:\n for dpdk_arg in self.vr_args['dpdk_args'].split(' '):\n vrouter_cmd_args.append(dpdk_arg)\n vrouter_cmd_args.extend([\"--vr_socket_dir\",\n self.vr_args['socket_dir']])\n os.execvp(\"taskset\", vrouter_cmd_args)\n else:\n self.logger.info(\n \"Running cmd - taskset %s %s --no-daemon --no-huge \"\n \"--vr_packet_sz 2048 --vr_socket_dir %s\" %\n (self.vr_args['taskset'],\n self.vr_args['vrouter_path'],\n self.vr_args['socket_dir']))\n self.logger.info(\"pid = \" + str(cpid))\n self.pid = cpid\n count = 0\n ret = 0\n while (count < 10):\n cmd = \"lsof \" + self.vr_args['socket_dir'] +\\\n \"/dpdk_netlink | wc -l\"\n self.logger.info(\"Running cmd - {}\".format(cmd))\n try:\n ret = subprocess.check_output(cmd, shell=True)\n # check if the netlink is up using the ret value\n if (ret == \"2\\n\"):\n break\n else:\n time.sleep(1)\n count += 1\n except Exception as e:\n self.logger.error(e)\n time.sleep(1)\n count += 1\n if (ret != \"2\\n\"):\n self.logger.error(\"Failed to bringup vrouter\")\n return -1\n else:\n return 0", "def Restart(self, udp=False):\n self.Stop()\n self.Start(udp)", "def start_ddos_wall():\n\n if Setup.parse_options()['setup'] or Setup.parse_options()['reset']:\n Setup.write_firewall_script()\n httpd = SocketServer.ThreadingTCPServer(('', Setup.parse_options()['port']), Proxy)\n print('Proxy is running on port ', Setup.parse_options()['port'])\n monitor = Monitoring()\n monitor.start()\n httpd.serve_forever()", "def run(self):\n factory = FoghornDNSServerFactory(\n clients=[self.foghorn, client.Resolver(resolv='/etc/resolv.conf')]\n )\n protocol = dns.DNSDatagramProtocol(controller=factory)\n\n # Pylint can't seem to find these methods.\n # pylint: disable=E1101\n reactor.listenUDP(self.settings.dns_port, protocol)\n reactor.listenTCP(self.settings.dns_port, factory)\n reactor.run()\n self.foghorn.save_state()", "def restart_nginx():\n run_command_on_selected_server(_restart_nginx)", "def test_snat_with_docker_restart(self):\n client1, client2, client3, client4 = self.setup_common_namespaces_pods(isolation=True,\n ip_fabric_snat=True,\n ip_fabric_forwarding=True)\n self.verify_ping_between_pods_across_namespaces_and_public_network(client1, client2,\n client3, client4)\n self.inputs.restart_service(service_name = \"containerd\",\n host_ips = self.inputs.k8s_slave_ips)\n time.sleep(60) # Wait timer for all contrail service to come up.\n self.verify_ping_between_pods_across_namespaces_and_public_network(client1, client2,\n client3, client4)", "def restart_local(drain=False):\n if _TRAFFICCTL:\n cmd = _traffic_ctl(\"server\", \"restart\", \"--manager\")\n else:\n cmd = _traffic_line(\"-L\")\n\n if drain:\n cmd = cmd + [\"--drain\"]\n\n return _subprocess(cmd)", "def start(self, ifname = None):\n \n if not self._slave_dhcp_process is None:\n raise Exception('DhcpClientAlreadyStarted')\n \n if not ifname is None:\n self._ifname = ifname\n \n if self._ifname is None:\n raise Exception('NoInterfaceProvided')\n \n self._slave_dhcp_process = SlaveDhcpClientProcess(dhcp_client_daemon_exec_path=self._dhcp_client_daemon_exec_path, ifname=self._ifname, logger=logger)\n self._slave_dhcp_process.start()\n self._new_lease_event.clear()\n self._dhcp_client_ctrl = RemoteDhcpClientControl(ifname=self._ifname) # Create a RemoteDhcpClientControl object that symbolizes the control on the remote process (over D-Bus)\n self._dhcp_client_ctrl.notifyNewLease(self._got_new_lease) # Ask underlying RemoteDhcpClientControl object to call self._new_lease_retrieved() as soon as we get a new lease \n logger.debug('DHCP client started on ' + self._ifname)\n slave_pid = self._dhcp_client_ctrl.getRemotePid()\n if slave_pid is None:\n logger.warn('Could not get remote process PID')\n raise('RemoteCommunicationError')\n else:\n logger.debug('Slave has PID ' + str(slave_pid)) \n self._slave_dhcp_process.addSlavePid(slave_pid)\n\n self._dhcp_client_ctrl.sendDiscover()", "def enable_dns_management(self):\n self._request({\"enable-dns-management\": True})", "def run(self):\n self.node_id = CONFIG.node_id\n self.running = Event()\n if not CONFIG.master_pub or not CONFIG.master_repl:\n print colors.yellow(\"Master IP:port is not set in config file (%s)\"\n % CONFIG._fn)\n master_pub = raw_input(\"Enter Master PUB uri (IP or IP:port):\")\n if \":\" in master_pub:\n ip, _, port = master_pub.rpartition(\":\")\n else:\n ip = master_pub\n port = 5551\n CONFIG.update(\"General\", \"master_pub\", \"%s:%s\" % (ip,\n port))\n master_repl = raw_input(\"Enter Master REPLY uri (IP or IP:port), \"\n \"hit ENTER for default(%s:5552):\" % ip)\n if not master_repl:\n port = 5552\n elif \":\" in master_repl:\n ip, _, port = master_repl.rpartition(\":\")\n else:\n ip = master_repl\n port = 5552\n CONFIG.update(\"General\", \"master_repl\", \"%s:%s\" % (ip,\n port))\n CONFIG.reload()\n\n if not validate_address(CONFIG.master_pub) or \\\n not validate_address(CONFIG.master_repl):\n LOG.error('Server IP not present in config or is not valid.\\n'\n 'Check your config')\n exit(1)\n\n if not self.node_id:\n LOG.error(\"The node id not set in config. \"\n \"Run program with config option first\")\n exit(1)\n\n self.backend = self.transport_class.from_config(\n CONFIG, **vars(self.args))\n load_plugins(CONFIG)\n self.sessions = {}\n self.matcher = Matcher(self.node_id, self.backend.meta())\n\n LOG.info(\"Starting node\")\n self.details()\n self._sig_int = signal.getsignal(signal.SIGINT)\n self._sig_term = signal.getsignal(signal.SIGTERM)\n\n if os.name == 'nt':\n # Use Ctrl+C to invoke clean on Windows\n import win32api\n win32api.SetConsoleCtrlHandler(self.clean, True)\n else:\n signal.signal(signal.SIGINT, self._handle_terminate)\n signal.signal(signal.SIGTERM, self._handle_terminate)\n\n # Invoke clean for sessions\n signal.signal(signal.SIGHUP, self.clean)\n\n if not self.backend.prepare():\n LOG.info(\"Cannot start transport backend\")\n self._handle_terminate()\n exit(1)\n\n def request_processor():\n req_queue = self.backend.consume_queue('requests',\n ident=\"DISPATCHER\")\n poller = self.backend.create_poller(req_queue)\n while not self.running.is_set():\n try:\n ready = poller.poll(200)\n if not ready:\n continue\n if req_queue in ready:\n message = req_queue.recv()[0]\n if not message:\n continue\n job = JobTarget.build(message)\n if job:\n self.target_match(job)\n except ConnectionError:\n break\n except Exception:\n continue\n req_queue.close()\n\n Thread(target=request_processor).start()\n\n self.backend.loop()\n\n LOG.info(\"Node exited\")", "def restart():\n require('PROJECT_NAME')\n\n sudo('supervisorctl restart {0}'.format(env.PROJECT_NAME))", "def run(self, network_update_args, network_create_args=None):\n network = self.neutron.create_network(**(network_create_args or {}))\n self.neutron.update_network(network[\"id\"], **network_update_args)", "def start_nanny():\n global listener\n log.info(\"%s %s.\", settings.MUD_NAME_FULL, __version__)\n listener = TelnetServer(address=settings.BIND_ADDRESS,\n port=settings.BIND_PORT,\n timeout=0,\n create_client=False)\n channels.subscribe(**{\"server-reload-request\": _handle_reload_request})\n server = ServerProcess()\n listener.on_connect = _on_connect\n server.start()\n servers[server.pid] = server\n try:\n while True:\n dead_servers = []\n for server in servers.values():\n if not server.alive:\n log.debug(\"Process %s finished with code %s.\",\n server.pid, server.exit_code)\n dead_servers.append(server)\n for server in dead_servers:\n del servers[server.pid]\n if not servers:\n log.info(\"No servers running, goodbye.\")\n break\n listener.poll()\n channels.get_message()\n sleep(0.1)\n except KeyboardInterrupt: # pragma: no cover\n pass\n finally:\n listener.stop()\n channels.unsubscribe() # pragma: no cover", "def flush_dns_cache():\n\n print(\"Flushing the DNS cache to utilize new hosts file...\")\n print(\n \"Flushing the DNS cache requires administrative privileges. You might need to enter your password.\"\n )\n\n dns_cache_found = False\n\n if platform.system() == \"Darwin\":\n if subprocess.call(SUDO + [\"killall\", \"-HUP\", \"mDNSResponder\"]):\n print_failure(\"Flushing the DNS cache failed.\")\n elif os.name == \"nt\":\n print(\"Automatically flushing the DNS cache is not yet supported.\")\n print(\n \"Please copy and paste the command 'ipconfig /flushdns' in \"\n \"administrator command prompt after running this script.\"\n )\n else:\n nscd_prefixes = [\"/etc\", \"/etc/rc.d\"]\n nscd_msg = \"Flushing the DNS cache by restarting nscd {result}\"\n\n for nscd_prefix in nscd_prefixes:\n nscd_cache = nscd_prefix + \"/init.d/nscd\"\n\n if os.path.isfile(nscd_cache):\n dns_cache_found = True\n\n if subprocess.call(SUDO + [nscd_cache, \"restart\"]):\n print_failure(nscd_msg.format(result=\"failed\"))\n else:\n print_success(nscd_msg.format(result=\"succeeded\"))\n\n centos_file = \"/etc/init.d/network\"\n centos_msg = \"Flushing the DNS cache by restarting network {result}\"\n\n if os.path.isfile(centos_file):\n if subprocess.call(SUDO + [centos_file, \"restart\"]):\n print_failure(centos_msg.format(result=\"failed\"))\n else:\n print_success(centos_msg.format(result=\"succeeded\"))\n\n system_prefixes = [\"/usr\", \"\"]\n service_types = [\"NetworkManager\", \"wicd\", \"dnsmasq\", \"networking\"]\n restarted_services = []\n\n for system_prefix in system_prefixes:\n systemctl = system_prefix + \"/bin/systemctl\"\n system_dir = system_prefix + \"/lib/systemd/system\"\n\n for service_type in service_types:\n service = service_type + \".service\"\n if service in restarted_services:\n continue\n\n service_file = path_join_robust(system_dir, service)\n service_msg = (\n \"Flushing the DNS cache by restarting \" + service + \" {result}\"\n )\n\n if os.path.isfile(service_file):\n if 0 != subprocess.call(\n [systemctl, \"status\", service], stdout=subprocess.DEVNULL\n ):\n continue\n dns_cache_found = True\n\n if subprocess.call(SUDO + [systemctl, \"restart\", service]):\n print_failure(service_msg.format(result=\"failed\"))\n else:\n print_success(service_msg.format(result=\"succeeded\"))\n restarted_services.append(service)\n\n dns_clean_file = \"/etc/init.d/dns-clean\"\n dns_clean_msg = \"Flushing the DNS cache via dns-clean executable {result}\"\n\n if os.path.isfile(dns_clean_file):\n dns_cache_found = True\n\n if subprocess.call(SUDO + [dns_clean_file, \"start\"]):\n print_failure(dns_clean_msg.format(result=\"failed\"))\n else:\n print_success(dns_clean_msg.format(result=\"succeeded\"))\n\n if not dns_cache_found:\n print_failure(\"Unable to determine DNS management tool.\")", "def start():\n if env.latest:\n if env.python3:\n sudo('/bin/systemctl start demo-latest-py3', shell=False)\n else:\n sudo('/bin/systemctl start demo-latest.service', shell=False)\n else:\n with cd(env.directory):\n sudo('./bin/supervisorctl start zeoserver', user=env.deploy_user)\n sudo(\"sleep 2\")\n sudo('./bin/supervisorctl start zeoclient1', user=env.deploy_user)\n sudo(\"sleep 2\")\n sudo('./bin/supervisorctl start zeoclient2', user=env.deploy_user)", "def dnsgate(ctx, no_restart_dnsmasq, backup):\n config = configparser.ConfigParser()\n if 'dnsgate configure' not in ' '.join(sys.argv):\n if 'dnsgate.py configure' not in ' '.join(sys.argv):\n try:\n with open(CONFIG_FILE, 'r') as cf:\n config.read_file(cf)\n except FileNotFoundError:\n eprint(\"No configuration file found, run \" +\n \"\\\"dnsgate configure --help\\\". Exiting.\", level=LOG['ERROR'])\n quit(1)\n\n mode = config['DEFAULT']['mode']\n\n try:\n output_path = config['DEFAULT']['output']\n except KeyError:\n eprint('ERROR: ' + CONFIG_FILE + ' has no \"output\" defined. ' +\n \"run 'dnsgate configure --help' to fix. Exiting.\",\n level=LOG['ERROR'])\n quit(1)\n assert isinstance(output_path, str)\n if not os.path.exists(os.path.dirname(output_path)):\n eprint(\"ERROR: dnsgate is configured for 'mode = dnsmasq' in \" +\n CONFIG_FILE + \" but dnsmasq_config_file is not set. \" +\n \"run 'dnsgate configure --help' to fix. Exiting.\",\n level=LOG['ERROR'])\n\n quit(1)\n\n block_at_psl = config['DEFAULT'].getboolean('block_at_psl')\n dest_ip = config['DEFAULT']['dest_ip'] # todo validate ip or False/None\n if dest_ip == 'False':\n dest_ip = None\n sources = ast.literal_eval(config['DEFAULT']['sources']) # configparser has no .getlist()?\n if mode == 'dnsmasq':\n try:\n dnsmasq_config_file = \\\n click.open_file(config['DEFAULT']['dnsmasq_config_file'], 'w',\n atomic=True, lazy=True)\n dnsmasq_config_file.close() # it exists and is writeable\n except KeyError:\n eprint(\"ERROR: dnsgate is configured for 'mode = dnsmasq' in \" +\n CONFIG_FILE + \" but dnsmasq_config_file is not set. \" +\n \"run 'dnsgate configure --help' to fix. Exiting.\",\n level=LOG['ERROR'])\n quit(1)\n\n ctx.obj = Dnsgate_Config(mode=mode, block_at_psl=block_at_psl,\n dest_ip=dest_ip, no_restart_dnsmasq=no_restart_dnsmasq,\n dnsmasq_config_file=dnsmasq_config_file, backup=backup,\n sources=sources, output=output_path)\n else:\n if not dest_ip:\n dest_ip = '0.0.0.0'\n ctx.obj = Dnsgate_Config(mode=mode, block_at_psl=block_at_psl,\n dest_ip=dest_ip, no_restart_dnsmasq=no_restart_dnsmasq,\n backup=backup, sources=sources, output=output_path)\n\n os.makedirs(CACHE_DIRECTORY, exist_ok=True)", "def renew_dhcp_lease(self):\n\t\tresponse = os.system(\"/sbin/dhclient -r;/sbin/dhclient\")\n\t\tif response != 0:\n\t\t\tprint \"Network restart failed. DHCP Lease failed.\"", "def dns_sync(self, args):\r\n dns = DNSManager(self.client)\r\n vsi = VSManager(self.client)\r\n\r\n vs_id = resolve_id(vsi.resolve_ids, args.get('<identifier>'), 'VS')\r\n instance = vsi.get_instance(vs_id)\r\n zone_id = resolve_id(dns.resolve_ids, instance['domain'], name='zone')\r\n\r\n def sync_a_record():\r\n \"\"\" Sync A record \"\"\"\r\n records = dns.get_records(\r\n zone_id,\r\n host=instance['hostname'],\r\n )\r\n\r\n if not records:\r\n # don't have a record, lets add one to the base zone\r\n dns.create_record(\r\n zone['id'],\r\n instance['hostname'],\r\n 'a',\r\n instance['primaryIpAddress'],\r\n ttl=args['--ttl'])\r\n else:\r\n recs = [x for x in records if x['type'].lower() == 'a']\r\n if len(recs) != 1:\r\n raise CLIAbort(\"Aborting A record sync, found %d \"\r\n \"A record exists!\" % len(recs))\r\n rec = recs[0]\r\n rec['data'] = instance['primaryIpAddress']\r\n rec['ttl'] = args['--ttl']\r\n dns.edit_record(rec)\r\n\r\n def sync_ptr_record():\r\n \"\"\" Sync PTR record \"\"\"\r\n host_rec = instance['primaryIpAddress'].split('.')[-1]\r\n ptr_domains = self.client['Virtual_Guest'].\\\r\n getReverseDomainRecords(id=instance['id'])[0]\r\n edit_ptr = None\r\n for ptr in ptr_domains['resourceRecords']:\r\n if ptr['host'] == host_rec:\r\n ptr['ttl'] = args['--ttl']\r\n edit_ptr = ptr\r\n break\r\n\r\n if edit_ptr:\r\n edit_ptr['data'] = instance['fullyQualifiedDomainName']\r\n dns.edit_record(edit_ptr)\r\n else:\r\n dns.create_record(\r\n ptr_domains['id'],\r\n host_rec,\r\n 'ptr',\r\n instance['fullyQualifiedDomainName'],\r\n ttl=args['--ttl'])\r\n\r\n if not instance['primaryIpAddress']:\r\n raise CLIAbort('No primary IP address associated with this VS')\r\n\r\n zone = dns.get_zone(zone_id)\r\n\r\n go_for_it = args['--really'] or confirm(\r\n \"Attempt to update DNS records for %s\"\r\n % instance['fullyQualifiedDomainName'])\r\n\r\n if not go_for_it:\r\n raise CLIAbort(\"Aborting DNS sync\")\r\n\r\n both = False\r\n if not args['--ptr'] and not args['-a']:\r\n both = True\r\n\r\n if both or args['-a']:\r\n sync_a_record()\r\n\r\n if both or args['--ptr']:\r\n sync_ptr_record()", "def start(self, iface='', network='', bootstrap=[], cb=None, name=None, nodeid=None):\n from urlparse import urlparse\n import socket\n _log.info(\"PROXY start\")\n o=urlparse(self.master_uri)\n fqdn = socket.getfqdn(o.hostname)\n self._server_node_name = fqdn.decode('unicode-escape')\n self.node.network.join([self.master_uri],\n callback=CalvinCB(self._start_link_cb, org_cb=cb),\n corresponding_server_node_names=[self._server_node_name])", "def set_daemon_running(self, status):\n if status:\n log.debug(\"The DHCP daemon is running\")\n else:\n log.debug(\"The DHCP daemon is NOT running\")\n\n self.daemon_running = status\n\n # XXX: write the network log\n\n return defer.succeed(None)", "def start(args):\n # Create the controller\n factory = ServerFactory(args)\n \n protocol = dns.DNSDatagramProtocol(controller=factory)\n \n reactor.listenUDP(args.port, protocol, args.addr)\n reactor.listenTCP(args.port, factory, 50, args.addr)\n\n _LOG.info(\"DNS server listening on %s:%d...\", args.addr, args.port)\n reactor.run()", "def restart():\n log('reiniciando servicos', yellow)\n nginx_stop()\n nginx_start()\n nginx_restart()\n nginx_reload()\n supervisor_stop()\n supervisor_start()", "def start_interface():\n\n last_ip = None\n\n while True:\n time.sleep(5)\n current_ips = get_local_ip().split()\n\n # check if a network address was found\n if len(current_ips) == 0:\n communication = interaction.Communication.instance()\n communication.lost_connection()\n continue\n elif len(current_ips) == 1:\n if not current_ips[0][:3] == \"192\":\n communication = interaction.Communication.instance()\n communication.lost_connection()\n continue\n else:\n current_ip = current_ips[0]\n else:\n if current_ips[0][:3] == \"192\":\n current_ip = current_ips[0]\n else:\n current_ip = current_ips[1]\n\n # restar webservers if the IP is new\n if not current_ip == last_ip:\n last_ip = current_ip\n print(f\"Found new ip: {current_ip}\")\n\n agent = Agent.instance()\n communication = interaction.Communication.instance()\n communication.set_local_ip(current_ip)\n driver = Driver.instance()\n sensor_manager = SensorManager.instance()\n action_manager = interaction.ActionManager.instance()\n\n interface = WebInterface(agent, driver, sensor_manager, action_manager)\n interface.start(current_ip)", "def restartFluidinfo():\n for port in range(9001, 9009):\n sudo('stop fluidinfo-api-node PORT=%d || true' % port)\n sudo('start fluidinfo-api-node PORT=%d' % port)\n with settings(warn_only=True):\n sudo('kill -USR1 $(cat /var/run/nginx.pid)')", "def main():\n try:\n if get_global_option('daemon'):\n daemon = DynamicDynamoDBDaemon(\n '{0}/dynamic-dynamodb.{1}.pid'.format(\n get_global_option('pid_file_dir'),\n get_global_option('instance')))\n\n if get_global_option('daemon') == 'start':\n logger.debug('Starting daemon')\n try:\n daemon.start()\n logger.info('Daemon started')\n except IOError as error:\n logger.error('Could not create pid file: {0}'.format(error))\n logger.error('Daemon not started')\n elif get_global_option('daemon') == 'stop':\n logger.debug('Stopping daemon')\n daemon.stop()\n logger.info('Daemon stopped')\n sys.exit(0)\n\n elif get_global_option('daemon') == 'restart':\n logger.debug('Restarting daemon')\n daemon.restart()\n logger.info('Daemon restarted')\n\n elif get_global_option('daemon') in ['foreground', 'fg']:\n logger.debug('Starting daemon in foreground')\n daemon.run()\n logger.info('Daemon started in foreground')\n\n else:\n print(\n 'Valid options for --daemon are start, '\n 'stop, restart, and foreground')\n sys.exit(1)\n else:\n if get_global_option('run_once'):\n execute()\n else:\n while True:\n execute()\n\n except Exception as error:\n logger.exception(error)", "def _daemon_loop(self, daemon_id='', daemon_prefix='', pyro_ns_port=None):\n\n if daemon_id:\n pyro_daemon, pyro_uri = self._get_configured_daemon(daemon_id=daemon_id, daemon_prefix=daemon_prefix)\n\n try:\n with Pyro4.locateNS(host='localhost', port=pyro_ns_port) as ns:\n ns.register(\"PySwitchLib.\" + daemon_id, pyro_uri)\n except:\n pass\n finally:\n pyro_daemon.requestLoop()\n pyro_daemon.close()", "def elReplaceStaticIP(self, ipaddress, netmask=\"255.255.255.0\", gateway=None, nameservers=None):\n # see http://docs.redhat.com/docs/en-US/Red_Hat_Enterprise_Linux/6/html/Installation_Guide/s1-kickstart2-options.html\n # sanity check\n normalizedStaticIp = NetworkConfigurationStaticParameters.normalizeStaticIp(ipaddress, netmask, gateway, nameservers)\n commandSection = self.sectionByName(\"command\")\n # several set\n commandSection.string = re.sub(r\"(?m)^([ \\t]*network[ \\t]+.*--ip[ \\t]*(?:=|[ \\t])[ \\t]*)[^\\s]+(.*)$\",\n r\"\\g<1>\" + normalizedStaticIp.ipaddress + r\"\\g<2>\",\n commandSection.string)\n commandSection.string = re.sub(r\"(?m)^([ \\t]*network[ \\t]+.*--netmask[ \\t]*(?:=|[ \\t])[ \\t]*)[^\\s]+(.*)$\",\n r\"\\g<1>\" + normalizedStaticIp.netmask + r\"\\g<2>\",\n commandSection.string)\n commandSection.string = re.sub(r\"(?m)^([ \\t]*network[ \\t]+.*--gateway[ \\t]*(?:=|[ \\t])[ \\t]*)[^\\s]+(.*)$\",\n r\"\\g<1>\" + normalizedStaticIp.gateway + r\"\\g<2>\",\n commandSection.string)\n if normalizedStaticIp.nameservers:\n commandSection.string = re.sub(r\"(?m)^([ \\t]*network[ \\t]+.*--nameserver[ \\t]*(?:=|[ \\t])[ \\t]*)[^\\s]+(.*)$\",\n r\"\\g<1>\" + \",\".join(normalizedStaticIp.nameservers) + r\"\\g<2>\",\n commandSection.string)\n else:\n # remove option --nameserver\n commandSection.string = re.sub(r\"(?m)^([ \\t]*network[ \\t]+.*)--nameserver[ \\t]*(?:=|[ \\t])[ \\t]*[^\\s]+(.*)$\",\n r\"\\g<1>\" + r\"\\g<2>\",\n commandSection.string)\n return self", "def reconfigure(self):\n log.debug('Reconfiguring and restarting the DHCP daemon...')\n\n # Don't set the daemon running status here, but let the status\n # check take care of that.\n\n p = Properties(self.storage, CONFIG_SECTION)\n p.addCallback(self.changed).\\\n addCallback(lambda trigger: p.load()).\\\n addCallback(self.emit_config, p).\\\n addCallback(self.restart_daemon).\\\n addErrback(self.restart_error)", "def run_server(instance):\n cpu = ['9', '10'][instance] # on which cpu\n server_delay = [0, slow][instance]\n args = {\n 'bin': slow_receiver_exp,\n 'cpu': cpu,\n 'count_queue': count_queue,\n 'sysmod': 'bess' if sysmod == 'bess-bp' else sysmod,\n 'mode': 'server',\n 'inst': instance,\n 'delay': server_delay,\n 'source_ip': _server_ips[instance],\n 'bidi': 'false'\n }\n if PORT_TYPE == PMD:\n vdev = ['virtio_user0,path=/tmp/ex_vhost0.sock,queues='+str(count_queue),\n 'virtio_user2,path=/tmp/ex_vhost2.sock,queues='+str(count_queue)][instance]\n prefix = 'slow_receiver_server_{}'.format(instance)\n args['vdev'] = vdev\n args['file-prefix'] = prefix\n cmd = ('sudo {bin} --no-pci --lcores=\"{cpu}\" --file-prefix={file-prefix} '\n '--vdev=\"{vdev}\" --socket-mem=128 -- '\n 'bidi={bidi} {source_ip} {count_queue} {sysmod} {mode} {delay}').format(**args)\n else:\n vdev = ['ex_vhost0','ex_vhost2'][instance]\n prefix = 'bessd-dpdk-prefix'\n args['vdev'] = vdev\n args['file-prefix'] = prefix\n cmd = ('sudo {bin} --no-pci --lcores=\"{cpu}\" --file-prefix={file-prefix} '\n '--proc-type=secondary --socket-mem=128 -- '\n 'bidi={bidi} vport={vdev} {source_ip} {count_queue} '\n '{sysmod} {mode} {delay}').format(**args)\n\n print(\"=\" * 32)\n print(\" \" * 13 + \"server\")\n print(cmd)\n print(\"=\" * 32, end='\\n\\n')\n # Run in background\n if not DIRECT_OUTPUT:\n p = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE,\n stderr=subprocess.PIPE)\n else:\n p = subprocess.Popen(cmd, shell=True)\n return p", "def dhcp_renew(ifname):\n\n logging.debug('Renewing %s DHCP lease...', ifname)\n\n try:\n subprocess.call(['dhcpcd', '--rebind', ifname])\n except OSError, err:\n if err.errno != errno.ENOENT:\n raise err\n\n try:\n call_timeout(['dhclient', '-1', ifname], timeout=5)\n except OSError, err:\n if err.errno == errno.ENOENT:\n logging.critical('Neither dhcpcd nor dhclient were found.')\n else:\n raise err", "def restart_scrapy_daemon():\n global REPO_BASE_PATH\n logger.info('Scrapy daemon restarting...')\n arguments = ['python'] + [REPO_BASE_PATH+'/deploy/sqs_ranking_spiders/scrapy_daemon.py'] + sys.argv[1:]\n if 'restarted' not in arguments:\n arguments += ['restarted']\n else:\n logger.error('Error while restarting scrapy daemon. '\n 'Already restarted.')\n return\n logging.info('Starting %s with args %s' % (sys.executable, arguments))\n os.execv(sys.executable, arguments)", "def start_sysdig(self):\n\t\ttarget_pid = self.info[\"target_pid\"]\n\t\tlog_file = os.path.join(self.cfg.file_log_dir,self.info[\"hash_md5\"]+\".scap\")\n\t\tself.info[\"sysdig_log_path\"] = log_file\n\t\tcmd = [\"/usr/bin/sysdig\",\"-n%d\"%(self.cfg.sysdig_limit),\"-w\"+self.info[\"sysdig_log_path\"] ]\n\t\tself.p_sysdig = subprocess.Popen( cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n\t\tself.log.info(\"sysdig starts, logfile:%s\",self.info[\"sysdig_log_path\"] )", "def run():\n server = current_server()\n server._auto_stop = True\n return start()", "def run(self, network_create_args=None):\n network = self.neutron.create_network(**(network_create_args or {}))\n self.neutron.delete_network(network[\"id\"])", "def startNAT( root, inetIntf='eth0', subnet='10.0/8' ):\n \n # Identify the interface connecting to the mininet network\n localIntf = root.defaultIntf()\n \n # Flush any currently active rules\n root.cmd( 'iptables -F' )\n root.cmd( 'iptables -t nat -F' )\n \n # Create default entries for unmatched traffic\n root.cmd( 'iptables -P INPUT ACCEPT' )\n root.cmd( 'iptables -P OUTPUT ACCEPT' )\n root.cmd( 'iptables -P FORWARD DROP' )\n \n # Configure NAT\n root.cmd( 'iptables -I FORWARD -i', localIntf, '-d', subnet, '-j DROP' )\n root.cmd( 'iptables -A FORWARD -i', localIntf, '-s', subnet, '-j ACCEPT' )\n root.cmd( 'iptables -A FORWARD -i', inetIntf, '-d', subnet, '-j ACCEPT' )\n root.cmd( 'iptables -t nat -A POSTROUTING -o ', inetIntf, '-j MASQUERADE' )\n \n # Instruct the kernel to perform forwarding\n root.cmd( 'sysctl net.ipv4.ip_forward=1' )", "def webserver_restart():\n try:\n run(\"kill -HUP $(cat %s)\" % GUNICORN_PIDFILE)\n except:\n webserver_start()", "def start(self):\n listener = eventlet.listen(cfg.CONF.dhcp_lease_relay_socket,\n family=socket.AF_UNIX)\n eventlet.spawn(eventlet.serve, listener, self._handler)", "def dvs_remote_sg_simple(self):\n self.show_step(1)\n self.env.revert_snapshot(\"dvs_vcenter_systest_setup\")\n cluster_id = self.fuel_web.get_last_created_cluster()\n\n os_ip = self.fuel_web.get_public_vip(cluster_id)\n os_conn = os_actions.OpenStackActions(\n os_ip, SERVTEST_USERNAME,\n SERVTEST_PASSWORD,\n SERVTEST_TENANT)\n\n tenant = os_conn.get_tenant(SERVTEST_TENANT)\n\n # Create security group with rules for ssh and ping\n security_group = os_conn.create_sec_group_for_ssh()\n\n self.show_step(2)\n net_1 = os_conn.create_network(\n network_name=self.net_data[0].keys()[0],\n tenant_id=tenant.id)['network']\n\n subnet = os_conn.create_subnet(\n subnet_name=net_1['name'],\n network_id=net_1['id'],\n cidr=self.net_data[0][self.net_data[0].keys()[0]],\n ip_version=4)\n\n # Check that network are created\n assert_true(os_conn.get_network(net_1['name'])['id'] == net_1['id'])\n\n # Create Router_01, set gateway and add interface to external network\n router_1 = os_conn.create_router('router_1', tenant=tenant)\n\n # Add net_1 to router_1\n os_conn.add_router_interface(router_id=router_1[\"id\"],\n subnet_id=subnet[\"id\"])\n\n self.show_step(3)\n sg1 = os_conn.nova.security_groups.create('SG1', \"descr\")\n sg2 = os_conn.nova.security_groups.create('SG2', \"descr\")\n\n self.show_step(4)\n _sg_rules = os_conn.neutron.list_security_group_rules()\n sg_rules = [sg_rule for sg_rule in _sg_rules['security_group_rules']\n if sg_rule['security_group_id'] in [sg1.id, sg2.id]]\n for rule in sg_rules:\n os_conn.neutron.delete_security_group_rule(rule['id'])\n\n self.show_step(5)\n self.show_step(6)\n for sg in [sg1, sg2]:\n for rule in [self.icmp, self.tcp]:\n rule[\"security_group_rule\"][\"security_group_id\"] = sg.id\n rule[\"security_group_rule\"][\"remote_group_id\"] = sg.id\n rule[\"security_group_rule\"][\"direction\"] = \"ingress\"\n os_conn.neutron.create_security_group_rule(rule)\n rule[\"security_group_rule\"][\"direction\"] = \"egress\"\n os_conn.neutron.create_security_group_rule(rule)\n\n # Create access_point to instances from SG1 and SG2\n _, access_point_ip = openstack.create_access_point(\n os_conn=os_conn,\n nics=[{'net-id': net_1['id']}],\n security_groups=[security_group.name, sg1.name, sg2.name])\n\n self.show_step(7)\n istances_sg1 = openstack.create_instances(\n os_conn=os_conn,\n nics=[{'net-id': net_1['id']}],\n vm_count=1,\n security_groups=[sg1.name])\n\n self.show_step(8)\n istances_sg2 = openstack.create_instances(\n os_conn=os_conn,\n nics=[{'net-id': net_1['id']}],\n vm_count=1,\n security_groups=[sg2.name])\n openstack.verify_instance_state(os_conn)\n\n # Get private ips of instances\n ips = {\n 'SG1': [os_conn.get_nova_instance_ip(i, net_name=net_1['name'])\n for i in istances_sg1],\n 'SG2': [os_conn.get_nova_instance_ip(i, net_name=net_1['name'])\n for i in istances_sg2]\n }\n\n self.show_step(9)\n self.show_step(10)\n for group in ips:\n ip_pair = dict.fromkeys(ips[group])\n for key in ip_pair:\n ip_pair[key] = [value for value in ips[group] if key != value]\n openstack.check_connection_through_host(\n access_point_ip, ip_pair, timeout=60 * 5)\n\n self.show_step(11)\n ip_pair = dict.fromkeys(ips['SG1'])\n for key in ip_pair:\n ip_pair[key] = ips['SG2']\n openstack.check_connection_through_host(\n access_point_ip, ip_pair, result_of_command=1, timeout=60 * 5)", "def dvs_remote_ip_prefix(self):\n self.show_step(1)\n self.env.revert_snapshot(\"dvs_vcenter_systest_setup\")\n cluster_id = self.fuel_web.get_last_created_cluster()\n\n os_ip = self.fuel_web.get_public_vip(cluster_id)\n os_conn = os_actions.OpenStackActions(\n os_ip, SERVTEST_USERNAME,\n SERVTEST_PASSWORD,\n SERVTEST_TENANT)\n\n tenant = os_conn.get_tenant(SERVTEST_TENANT)\n\n # Create security group with rules for ssh and ping\n security_group = os_conn.create_sec_group_for_ssh()\n\n self.show_step(2)\n net_1 = os_conn.create_network(network_name=self.net_data[0].keys()[0],\n tenant_id=tenant.id)['network']\n\n subnet = os_conn.create_subnet(\n subnet_name=net_1['name'],\n network_id=net_1['id'],\n cidr=self.net_data[0][self.net_data[0].keys()[0]],\n ip_version=4)\n\n # Check that network are created\n assert_true(os_conn.get_network(net_1['name'])['id'] == net_1['id'])\n\n # Create Router_01, set gateway and add interface to external network.\n router_1 = os_conn.create_router('router_1', tenant=tenant)\n\n # Add net_1 to router_1\n os_conn.add_router_interface(router_id=router_1[\"id\"],\n subnet_id=subnet[\"id\"])\n\n self.show_step(3)\n self.show_step(4)\n self.show_step(5)\n sg1 = os_conn.nova.security_groups.create('SG1', \"descr\")\n sg2 = os_conn.nova.security_groups.create('SG2', \"descr\")\n\n access_point_1, access_point_ip_1 = openstack.create_access_point(\n os_conn=os_conn,\n nics=[{'net-id': net_1['id']}],\n security_groups=[security_group.name, sg1.name])\n\n access_point_2, access_point_ip_2 = openstack.create_access_point(\n os_conn=os_conn,\n nics=[{'net-id': net_1['id']}],\n security_groups=[security_group.name, sg2.name])\n\n self.show_step(6)\n _sg_rules = os_conn.neutron.list_security_group_rules()\n sg_rules = [sg_rule for sg_rule in _sg_rules['security_group_rules']\n if sg_rule['security_group_id'] in [sg1.id, sg2.id]]\n for rule in sg_rules:\n os_conn.neutron.delete_security_group_rule(rule['id'])\n\n self.show_step(7)\n for rule in [self.icmp, self.tcp]:\n rule[\"security_group_rule\"][\"security_group_id\"] = sg1.id\n rule[\"security_group_rule\"][\"remote_ip_prefix\"] = access_point_ip_1\n rule[\"security_group_rule\"][\"direction\"] = \"ingress\"\n os_conn.neutron.create_security_group_rule(rule)\n rule[\"security_group_rule\"][\"direction\"] = \"egress\"\n os_conn.neutron.create_security_group_rule(rule)\n\n # Get private ip of access_point_2\n private_ip = os_conn.get_nova_instance_ip(access_point_2,\n net_name=net_1['name'])\n\n self.show_step(8)\n self.tcp[\"security_group_rule\"][\"security_group_id\"] = sg2.id\n self.tcp[\"security_group_rule\"][\"remote_ip_prefix\"] = private_ip\n os_conn.neutron.create_security_group_rule(self.tcp)\n self.tcp[\"security_group_rule\"][\"direction\"] = \"ingress\"\n os_conn.neutron.create_security_group_rule(self.tcp)\n\n self.show_step(9)\n istances_sg1 = openstack.create_instances(\n os_conn=os_conn,\n nics=[{'net-id': net_1['id']}],\n vm_count=1,\n security_groups=[sg1.name])\n\n istances_sg2 = openstack.create_instances(\n os_conn=os_conn,\n nics=[{'net-id': net_1['id']}],\n vm_count=1,\n security_groups=[sg2.name])\n openstack.verify_instance_state(os_conn)\n\n # Get private ips of instances\n ips = {\n 'SG1': [os_conn.assign_floating_ip(i).ip for i in istances_sg1],\n 'SG2': [os_conn.get_nova_instance_ip(i, net_name=net_1['name'])\n for i in istances_sg2]\n }\n\n self.show_step(10)\n ip_pair = dict.fromkeys(ips['SG1'])\n for key in ip_pair:\n ip_pair[key] = [access_point_ip_1]\n openstack.check_connection_through_host(access_point_ip_1,\n ip_pair,\n timeout=60 * 4)\n\n self.show_step(11)\n ip_pair = dict.fromkeys(ips['SG1'])\n for key in ip_pair:\n ip_pair[key] = [value for value in ips['SG1'] if key != value]\n openstack.check_connection_through_host(\n access_point_ip_1, ip_pair, result_of_command=1)\n\n self.show_step(12)\n self.show_step(13)\n ip_pair = dict.fromkeys(ips['SG2'])\n for key in ip_pair:\n ip_pair[key] = [value for value in ips['SG2'] if key != value]\n openstack.check_connection_through_host(\n access_point_ip_2, ip_pair, result_of_command=1)", "def dvs_update_network(self):\n self.show_step(1)\n self.env.revert_snapshot(\"dvs_vcenter_systest_setup\")\n\n cluster_id = self.fuel_web.get_last_created_cluster()\n\n self.show_step(2)\n os_ip = self.fuel_web.get_public_vip(cluster_id)\n os_conn = os_actions.OpenStackActions(\n os_ip, SERVTEST_USERNAME,\n SERVTEST_PASSWORD,\n SERVTEST_TENANT)\n\n tenant = os_conn.get_tenant(SERVTEST_TENANT)\n\n net_1 = os_conn.create_network(\n network_name=self.net_data[0].keys()[0],\n tenant_id=tenant.id)['network']\n\n os_conn.create_subnet(\n subnet_name=net_1['name'],\n network_id=net_1['id'],\n cidr=self.net_data[0][self.net_data[0].keys()[0]],\n ip_version=4)\n\n assert_true(os_conn.get_network(net_1['name'])['id'] == net_1['id'])\n\n self.show_step(3)\n os_conn.neutron.update_network(net_1[\"id\"],\n {\"network\": {\"name\": 'net_2'}})\n\n assert_true(os_conn.get_network('net_2')['id'] == net_1['id'])\n\n self.show_step(4)\n default_net = os_conn.nova.networks.find(label=self.inter_net_name)\n os_conn.neutron.update_network(\n default_net.id, {\"network\": {\"name\": 'spring'}})\n\n assert_true(os_conn.get_network('spring')['id'] == default_net.id)", "def start():\n Networker.stop()\n Networker.Instance = Networker()", "def run(self):\n self.stopped = False\n # receives incoming 'host up' requests\n serverSocket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, 0)\n serverSocket.bind(('255.255.255.255', Globals.BROADCAST_PORT))\n \n # wait for UDP broadcast, send TCP ACK\n while 1:\n \n # open a socket and listen for a message\n value,address = serverSocket.recvfrom(256)\n host,port = address\n \n # this actually prevents a seg fault ;( for some reason\n if self.stopped:\n return\n \n if value == 'host up':\n \n sendSocket = socket.socket(socket.AF_INET, \n socket.SOCK_STREAM, 0)\n sendSocket.connect((host, Globals.ACK_PORT))\n sendSocket.send('host up ack')\n sendSocket.close()\n sendSocket = None\n self._addHost(host)\n \n elif value.find('host down') == 0:\n self._removeHost(host)\n \n elif value.find('add group') == 0:\n self._postEvent(value)\n \n elif value.find('remove group') == 0:\n self._postEvent(value)\n \n elif value.find('group beat') == 0:\n self._postEvent(value)\n \n serverSocket.close()", "def network_delete_end(self, payload):\n self.disable_dhcp_helper(payload['network_id'])", "def run(server_class=HTTPServer, handler_class=CPUUsageHandler):\n server_address = (settings.HOST, settings.PORT)\n httpd = server_class(server_address, handler_class)\n print settings.START_MSG\n try:\n httpd.serve_forever()\n except KeyboardInterrupt:\n httpd.socket.close()\n print settings.STOP_MSG\n except Exception:\n raise", "def stop_and_restart():\n U.stop()\n os.execl(sys.executable, sys.executable, *sys.argv)", "def run(self, enable_snat=True, network_create_args=None,\n router_create_args=None):\n network_create_args = network_create_args or {}\n router_create_args = router_create_args or {}\n\n ext_net = self.neutron.create_network(**network_create_args)\n router = self.neutron.create_router(**router_create_args)\n self.neutron.add_gateway_to_router(router_id=router[\"id\"],\n network_id=ext_net[\"id\"],\n enable_snat=enable_snat)\n self.neutron.remove_gateway_from_router(router[\"id\"])", "def start(self):\r\n for srv in self._servers:\r\n srv.start()", "def main():\n\n args = TrafficScriptArg(['tx_src_ip', 'tx_dst_ip'])\n\n tx_if = args.get_arg('tx_if')\n rx_if = args.get_arg('rx_if')\n\n rxq = RxQueue(rx_if)\n txq = TxQueue(tx_if)\n\n tx_src_ip = args.get_arg('tx_src_ip')\n tx_dst_ip = args.get_arg('tx_dst_ip')\n\n sent_packets = []\n\n dhcp_discover = Ether(dst=\"ff:ff:ff:ff:ff:ff\") / \\\n IP(src=tx_src_ip, dst=tx_dst_ip) / \\\n UDP(sport=UDP_SERVICES.bootpc, dport=UDP_SERVICES.bootps) / \\\n BOOTP(op=1,) / \\\n DHCP(options=[(\"message-type\", \"discover\"),\n \"end\"])\n\n sent_packets.append(dhcp_discover)\n txq.send(dhcp_discover)\n\n for _ in range(10):\n dhcp_discover = rxq.recv(2)\n if is_discover(dhcp_discover):\n break\n else:\n raise RuntimeError(\"DHCP DISCOVER Rx timeout\")\n\n sys.exit(0)", "def network_update_end(self, payload):\n network_id = payload['network']['id']\n if payload['network']['admin_state_up']:\n self.enable_dhcp_helper(network_id)\n else:\n self.disable_dhcp_helper(network_id)", "def disable_static_nat(self, ipaddressid): \n params = {'command':'disableStaticNat',\n 'ipaddressid':ipaddressid} \n\n try:\n response = self.send_request(params)\n res = json.loads(response)\n clsk_job_id = res['disablestaticnatresponse']['jobid']\n self.logger.debug('Start job - disableStaticNat: %s' % res)\n return clsk_job_id\n except KeyError as ex:\n raise ClskError('Error parsing json data: %s' % ex)\n except ApiError as ex:\n raise ClskError(ex)", "def start():\n\n start_server()", "def run(self, network_create_args=None):\n network = self.neutron.create_network(**(network_create_args or {}))\n self.neutron.get_network(network[\"id\"])", "def _RunDHCPCD(self, **kwargs):\n del kwargs\n clear_ifconfig_command = 'ifconfig {interface} 0.0.0.0'.format(\n interface=self.interface)\n # -K: Don't receive link messages for carrier status. You should\n # only have to use this with buggy device drivers or running\n # dhcpcd through a network manager.\n # -c: Location to the hooks file. If the default location happens to be\n # empty, dhcpcd will fail. So we set the hooks file to /dev/null.\n dhcp_command = ('dhcpcd -K -t {timeout} -c /dev/null {interface}').format(\n timeout=self._dhcp_timeout,\n interface=self.interface)\n dhcp_timeout_command = 'timeout {timeout} {cmd}'.format(\n timeout=self._dhcp_timeout,\n cmd=dhcp_command)\n force_kill_command = 'pgrep dhcpcd | xargs -r kill -9'\n\n logging.info('Killing any existing dhcpcd processes...')\n self._device.Call(force_kill_command)\n\n logging.info('Clearing any existing ifconfig networks...')\n self._device.Call(clear_ifconfig_command)\n\n logging.info('Starting dhcpcd...')\n self._device.CheckCall(dhcp_timeout_command)\n\n logging.info('Verifying IP address...')\n ip = self._LeasedIP()\n if not ip:\n self._device.Call(force_kill_command)\n raise WiFiError('DHCP bind failed')\n logging.info('Success: bound to IP %s', ip)\n\n yield ip # We have bound an IP; yield back to the caller.\n\n logging.info('Killing any remaining dhcpcd processes...')\n self._device.Call(force_kill_command)\n\n yield # We have released the IP.", "def set_network_id(self, sNetworkId):\n\t\tcall_sdk_function('PrlVirtNet_SetNetworkId', self.handle, sNetworkId)", "def run_services():\n for service in (\"minvd\", \"httpd\", \"ntpd\"):\n sudo(\"service %s start\" % service)\n sudo(\"chkconfig %s on\" % service)", "def do_start(self, line):\n\n if not line:\n line = \"cortex\"\n\n # First, check that the name isn't already taken\n clients = self.registry.get_clients()\n if clients.has_key(line):\n print \"A server already exists with that name (%s)\" % line\n return False\n\n subprocess.Popen([\"python\", \"cortex.py\", line])\n # Wait for the system to init\n time.sleep(1)\n print \"Started server, connecting...\"\n return self.do_connect(line)", "def restart(self, _id):\n\n try:\n UpstartJob(_id).restart()\n except DBusException as e:\n raise ServiceOperationError(e)", "def startNAT( root, inetIntf='eth0', subnet='10.0/8' ):\n\n # Identify the interface connecting to the mininet network\n localIntf = root.defaultIntf()\n\n # Flush any currently active rules\n root.cmd( 'iptables -F' )\n root.cmd( 'iptables -t nat -F' )\n\n # Create default entries for unmatched traffic\n root.cmd( 'iptables -P INPUT ACCEPT' )\n root.cmd( 'iptables -P OUTPUT ACCEPT' )\n root.cmd( 'iptables -P FORWARD DROP' )\n\n # Configure NAT\n root.cmd( 'iptables -I FORWARD -i', localIntf, '-d', subnet, '-j DROP' )\n root.cmd( 'iptables -A FORWARD -i', localIntf, '-s', subnet, '-j ACCEPT' )\n root.cmd( 'iptables -A FORWARD -i', inetIntf, '-d', subnet, '-j ACCEPT' )\n root.cmd( 'iptables -t nat -A POSTROUTING -o ', inetIntf, '-j MASQUERADE' )\n\n # Instruct the kernel to perform forwarding\n root.cmd( 'sysctl net.ipv4.ip_forward=1' )", "def check_daemon(self):\n log.debug(\"Checking for the DHCP daemon status.\")\n\n d = defer.Deferred()\n args = ( self.initscript, 'status' )\n env = dict(os.environ)\n\n process = reactor.spawnProcess(OutputGrabber(d), self.initscript,\n args=args, env=env)\n\n def try_restart(ign, status):\n self.script_running = False\n\n if not status:\n log.debug(\"Scheduling for immediate restart.\")\n self.schedule('restart', 0)\n else:\n log.debug(\"Scheduling the next status check.\")\n self.schedule('check', self.daemon_check_interval)\n\n def complete(output, timer):\n try:\n timer.cancel()\n except:\n pass\n\n # Use heuristics to determine whether the script succeeded\n # or not.\n running = output.find('is running') != -1\n\n return defer.succeed(running).\\\n addCallback(self.set_daemon_running).\\\n addCallback(try_restart, running)\n\n def abort(process, d):\n log.error(\"Script timeout while checking DHCP daemon status. \" + \n \"Killing the script.\")\n try:\n process.signalProcess(SIGKILL)\n except:\n log.exception(\"Unable to kill the script process.\")\n\n # Don't call errback here, but let the process die and\n # complete function above handle the error.\n\n #d.errback(Failure(RuntimeError('Init script timeout (check)')))\n\n def error(failure):\n log.exception(failure)\n\n return defer.succeed(False).\\\n addCallback(self.set_daemon_running).\\\n addCallback(try_restart, False)\n\n timer = reactor.callLater(self.timeout, abort, process, d)\n\n return d.addCallback(complete, timer).\\\n addErrback(error)", "def set_dns_servers(self, hDnsServersList):\n\t\tcall_sdk_function('PrlVmDevNet_SetDnsServers', self.handle, conv_handle_arg(hDnsServersList))", "def _RestartServer( self ):\n with self._gocode_lock:\n self._StopServer()\n self._StartServer()", "def main():\n config = _config()\n\n resolver = Resolver()\n resolver.nameservers = config['initial_nameservers']\n LOG.debug(\"Resolving namdservers %s\", config['nameservers'])\n nameservers = [resolver.address(_) for _ in config['nameservers']]\n\n resolver.nameservers = nameservers\n\n addresses = {}\n for domain in config['domains']:\n addresses[domain] = resolver.address(domain)\n LOG.debug(\"Found addresses: %s\", addresses)\n\n account = Account(**config['credentials'])\n client = Client(account)\n domains = client.get_domains()\n\n for domain, address in addresses.items():\n if domain not in domains:\n raise ValueError(\"%s not in client list of domains\" % domain)\n current = client.get_records(domain)[0]['data']\n if current != address:\n LOG.info('updating %s (%s -> %s)', domain, current, address)\n client.update_record_ip(address, domain, '@', 'A')\n else:\n LOG.info('Record up-to-date %s (%s)', domain, address)\n LOG.debug(\"complete\")", "def run(self):\n self.network_ctrl.connect_with_remote_system()\n cmd = self.create_command(self.on_or_off, self.port)\n self.network_ctrl.send_command(cmd)\n\n check = self._port_status(self.port)\n result = self.network_ctrl.send_command(check)\n result = result[0]\n if self.on_or_off:\n if result == \"1\":\n self.router.mode = Mode.normal\n logging.info(\"[+] Successfully switched on port \" + str(self.port))\n else:\n self.router.mode = Mode.unknown\n logging.info(\"[-] Error switching on port \" + str(self.port))\n else:\n if result == \"0\":\n self.router.mode = Mode.off\n logging.info(\"[+] Successfully switched off port \" + str(self.port))\n else:\n self.router.mode = Mode.unknown\n logging.info(\"[-] Error switching off port \" + str(self.port))\n\n self.network_ctrl.exit()", "def restart_server(self, server_id):\n status, data, errors, messages = self._make_post_request(MCAPIRoutes.RESTART, extra_params={'id': server_id})\n \n if status == 200:\n return True\n elif status == 500:\n self._check_errors(errors, messages)", "def restart_nginx():\n sudo('/etc/init.d/nginx restart')", "def restart_openvpn():\n cmd = 'service openvpn restart'\n result = call_command(cmd)", "def runserver():\n local_addr = \"0.0.0.0:8000\"\n local(\"{} exec web python3 manage.py runserver {} {}\".format(\n dc, local_addr, settings))", "async def restart_node(request: web.Request) -> web.Response:\n\n path_params = parse_request_path_parameters_as(_NodePathParams, request)\n\n await director_v2_api.restart_dynamic_service(request.app, f\"{path_params.node_id}\")\n\n raise web.HTTPNoContent()", "def main():\n if configuration['global']['daemon']:\n pid_file = '/tmp/dynamic-dynamodb.{0}.pid'.format(\n configuration['global']['instance'])\n daemon = DynamicDynamoDBDaemon(pid_file)\n\n if configuration['global']['daemon'] == 'start':\n daemon.start(\n check_interval=configuration['global']['check_interval'])\n\n elif configuration['global']['daemon'] == 'stop':\n daemon.stop()\n\n elif configuration['global']['daemon'] == 'restart':\n daemon.restart()\n\n elif configuration['global']['daemon'] in ['foreground', 'fg']:\n daemon.run(\n check_interval=configuration['global']['check_interval'])\n\n else:\n print 'Valid options for --daemon are start, stop and restart'\n sys.exit(1)\n else:\n table_names = set()\n used_keys = set()\n configured_tables = configuration['tables'].keys()\n\n # Add regexp table names\n for table_name in core.dynamodb.list_tables():\n for key_name in configured_tables:\n if re.match(key_name, table_name):\n logger.debug(\"Table {0} match with config key {1}\".format(\n table_name, key_name))\n table_names.add((table_name, key_name))\n used_keys.add(key_name)\n\n # Remove used tables\n for table_name in used_keys:\n configured_tables.remove(table_name)\n\n # Ensure provisioning\n for table_name, key_name in sorted(table_names):\n core.ensure_provisioning(table_name, key_name)", "def restart_all():\n\n restart_nginx()\n restart_supervisor()", "def dnssd_servient():\n\n servient = Servient(\n catalogue_port=find_free_port(),\n dnssd_enabled=True,\n dnssd_instance_name=Faker().pystr())\n\n yield servient\n\n @tornado.gen.coroutine\n def shutdown():\n yield servient.shutdown()\n\n tornado.ioloop.IOLoop.current().run_sync(shutdown)", "def devices_discover_view(request):\n logger.info(\"Restarting device discovery daemon...\")\n supervisor.restart_program('device_discovery')", "def start_frida(self, daemonize=True, restart=False):\n if not self.available():\n return False\n\n if self.is_frida_running():\n if not restart:\n return True\n\n self.kill_frida()\n\n if not daemonize:\n if self._alternate_frida_name:\n result = self.su_cmd('frida-server &')\n else:\n result = self.su_cmd('frida &')\n else:\n # with nox it starts frida fine but keeps running\n # without return so it needs some timeout here\n if self._alternate_frida_name:\n result = self.su_cmd('frida-server -D', timeout=5)\n else:\n result = self.su_cmd('frida -D', timeout=5)\n\n if result and 'Unable to start server' in result:\n return False\n\n return self.is_frida_running()", "def start_nginx():\n sudo('/etc/init.d/nginx start')", "def run(self, floating_network=None):\n floating_network = self.neutron.find_network(floating_network,\n external=True)\n floating_ip = self.neutron.create_floatingip(\n floating_network=floating_network)\n\n private_network = self.neutron.create_network()\n subnet = self.neutron.create_subnet(network_id=private_network[\"id\"])\n port = self.neutron.create_port(network_id=private_network[\"id\"])\n\n router = self.neutron.create_router()\n self.neutron.add_gateway_to_router(\n router[\"id\"], network_id=floating_network[\"id\"])\n self.neutron.add_interface_to_router(\n subnet_id=subnet[\"id\"], router_id=router[\"id\"])\n\n self.neutron.associate_floatingip(\n floatingip_id=floating_ip[\"id\"], port_id=port[\"id\"])\n self.neutron.dissociate_floatingip(floatingip_id=floating_ip[\"id\"])", "def restart_django(restart_url=None):\n with env.cd(settings.PROJECT_PATH):\n env.run('touch rnacentral/rnacentral/wsgi.py')\n if restart_url:\n requests.get(restart_url)", "def start(self, name=None):\n server = self.cloudman.get_server(name)['id']\n r = self.cloudman.compute.start_server(server)\n return r", "def start_srv(self, address, family, proto=socket.IPPROTO_UDP):\n assert address\n assert address[0] # host\n assert address[1] # port\n assert family\n assert proto\n if family == socket.AF_INET6:\n if not socket.has_ipv6:\n raise NotImplementedError(\"[start_srv] IPv6 is not supported by socket {0}\"\n .format(socket))\n elif family != socket.AF_INET:\n raise NotImplementedError(\"[start_srv] unsupported protocol family {0}\".format(family))\n\n if proto == socket.IPPROTO_TCP:\n socktype = socket.SOCK_STREAM\n elif proto == socket.IPPROTO_UDP:\n socktype = socket.SOCK_DGRAM\n else:\n raise NotImplementedError(\"[start_srv] unsupported protocol {0}\".format(proto))\n\n if self.thread is None:\n self.thread = threading.Thread(target=self.query_io)\n self.thread.start()\n with self.condition:\n self.condition.wait()\n\n for srv_sock in self.srv_socks:\n if (srv_sock.family == family\n and srv_sock.getsockname()[:2] == address\n and srv_sock.proto == proto):\n return\n\n sock = socket.socket(family, socktype, proto)\n sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n\n # Add address to interface when running from Deckard\n if self.if_manager is not None:\n if address[0] not in self.if_manager.added_addresses:\n self.if_manager.add_address(address[0])\n\n # A lot of addresses are added to the interface while runnning from Deckard in\n # the small amount of time which caused ocassional hiccups while binding to them\n # right afterwards in testing. Therefore, we retry a few times.\n final_ex = None\n for i in range(self.RETRIES_ON_BIND):\n try:\n sock.bind(address)\n break\n except OSError as ex:\n # Exponential backoff\n time.sleep((2 ** i) + random.random())\n final_ex = ex\n continue\n else:\n print(final_ex, address)\n raise final_ex\n\n if proto == socket.IPPROTO_TCP:\n sock.listen(5)\n self.srv_socks.append(sock)", "def run(self):\n httpd = HTTPServer((self.host, self.port), self._Handler)\n sa = httpd.socket.getsockname()\n serve_message = \"Serving HTTP on {host} port {port} (http://{host}:{port}/) ...\"\n print(serve_message.format(host=sa[0], port=sa[1]))\n try:\n httpd.serve_forever()\n except KeyboardInterrupt:\n print(\"\\nKeyboard interrupt received, exiting.\")\n httpd.shutdown()" ]
[ "0.57506925", "0.5736145", "0.5705236", "0.5628683", "0.56053996", "0.55890465", "0.5588804", "0.5274913", "0.5227108", "0.5199843", "0.5196082", "0.5161446", "0.51592255", "0.51375973", "0.51361805", "0.51020324", "0.50229216", "0.49852252", "0.49827528", "0.49735984", "0.49720192", "0.49456844", "0.49299705", "0.49244526", "0.48847145", "0.48569202", "0.4825394", "0.48033556", "0.48005086", "0.47935653", "0.4789241", "0.47855633", "0.47824404", "0.4779794", "0.47789186", "0.47640413", "0.47605646", "0.47588825", "0.47565186", "0.47477555", "0.47356206", "0.47329116", "0.47268578", "0.47210723", "0.47206262", "0.47190815", "0.46968484", "0.46962082", "0.4690513", "0.4689", "0.46887276", "0.4686477", "0.46820447", "0.46738178", "0.46649352", "0.46589866", "0.46565947", "0.46546268", "0.4642126", "0.4636419", "0.463568", "0.46345866", "0.46284693", "0.46260098", "0.4625847", "0.46249852", "0.4622518", "0.4610944", "0.46098042", "0.46088484", "0.4599891", "0.45998663", "0.45879227", "0.4583871", "0.45728526", "0.45616156", "0.45597792", "0.45528698", "0.45515752", "0.45261076", "0.45216528", "0.45144016", "0.4514046", "0.45137447", "0.4512044", "0.45111674", "0.45081285", "0.4498203", "0.44947466", "0.4492106", "0.44894493", "0.44890592", "0.44777954", "0.44743958", "0.44612595", "0.44568476", "0.44526577", "0.44523033", "0.4451312", "0.44484073" ]
0.6410348
0
Handler for endpoint deletion.
def on_endpoint_delete(self, response_ignored, name): try: hostname, orchestrator, workload_id, endpoint_id = \ split_endpoint_name(name) except ValueError: # For some reason this endpoint's name does not have the expected # form. Ignore it. LOG.warning("Unexpected form for endpoint name: %s", name) return # Remove endpoint ID from our cache. Note, it might not be # there because we haven't checked whether the endpoint just # deleted is a local one; hence 'discard' instead of 'remove'. self.local_endpoint_ids.discard(endpoint_id) # Find the corresponding port in the DHCP agent's cache. port = self.agent.cache.get_port_by_id(endpoint_id) if port: LOG.debug("deleted port: %s", port) self.mtu_watcher.unwatch_port(endpoint_id, port.device_id) self.agent.cache.remove_port(port) self._update_dnsmasq(port.network_id)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def delete_handler(event, context):\n delete_endpoint_config(event)", "def delete_endpoint(self):\n logger.warning(f\"Deleting hosting endpoint '{self.endpoint_name}'...\")\n self._realtime_predictor.delete_endpoint()", "def delete_endpoint(EndpointName=None):\n pass", "def delete_endpoint(self, endpoint_id):\n raise exception.NotImplemented() # pragma: no cover", "def delete_endpoint(self, endpoint_id):\n if EndpointService.delete_endpoint(endpoint_id) is None:\n abort(404)\n\n return {}", "def delete_endpoint(self, endpoint):\n exists = self.get_endpoint(endpoint)\n if exists:\n self.endpoints.remove(exists)", "def on_delete(self, payload):\n pass", "def delete(self, pattern, handler):\n return self.route(Router.DELETE, pattern, handler)", "def delete(self, url_pattern):\n return self.route(url_pattern, methods=['DELETE'])", "def delete(self, endpoint: str) -> HorizonResponse:\n return HorizonResponse(\n self._session.delete(urljoin(base=self._root_url, url=endpoint)),\n )", "def delete(self, *args, **kwargs):\n return self.handle_delete_request()", "async def delete(self, delete: TPayload) -> None:", "def handle_delete_response(self, response):\n\n self.handle_response(response)", "def delete(cls, webhook_endpoint_id):\n return Requester.delete(cls.endpoint + '/' + webhook_endpoint_id)", "def access_gemini_url_delete_method(context, endpoint):\n url = urljoin(context.gemini_api_url, endpoint)\n context.response = requests.delete(url)", "def delete(self, endpoint, ttl=5000, **kwargs):\n return self.__api_call('DELETE', endpoint, kwargs, ttl, True)", "def delete(self, *args, **kwargs) -> Any:\n pass", "def delete(self, *args, **kwargs):\n pass", "def delete(self, *args, **kwargs):\n pass", "def delete(self, endpoint, content=None, params=None):\n\t\treturn self._call(\"DELETE\", endpoint, content, params)", "def _delete_router(self, method, api, header, data):\n self._execute_api(method, api, header, data)", "def delete(self, *args, **kwargs):\n self.request(\"delete\", *args, **kwargs)", "def delete(self):\n self.request().delete()", "def delete(self):\n self.method = \"DELETE\"\n self.send()", "def delete(self, _uri):\n print(\"Deleting '%s'\"%(_uri))\n response = self.__httpsRequest('DELETE', _uri, '')", "def delete(self, app_prefix, path):\n return self.handle_request('delete', app_prefix, path)", "def delete_dev_endpoint(self):\n self.glue_engine.delete_dev_endpoint(EndpointName=self.dev_endpoint_name)", "def handle_delete(self, api, command):\n return self._make_request_from_command('DELETE', command)", "def do_DELETE(self):\n global data_store\n logger.info(\"Received a DELETE request: {}\".format(self.path))\n data = self.parse_data()\n # Data should be the key name\n logger.info(\"Data received {}: {}\".format(type(data), data))\n path = self.strip_path()\n if path == SET_OP:\n logger.info(\"Running delete\")\n data_store.delete_data(data)\n response_code = 200\n else:\n logger.error(\"Invalid DELETE operation {} was received.\".format(path))\n response_code = 404\n self.send_response(response_code)", "def remove(self, *args, **kwargs):\n raise InvalidEndpointOperation(\n 'Not a valid operation on this endpoint.'\n )", "def remove(self, *args, **kwargs):\n raise InvalidEndpointOperation(\n 'Not a valid operation on this endpoint.'\n )", "def delete(self):\n ...", "def delete(self, call, params={}): \n # Build an endpoint using the parameters...\n endpoint = self._calls[call](params)\n url = '{}/{}'.format(str(self), str(endpoint))\n return self.deleter.respond(url)", "def event_delete(req):\n event_id = req.match_dict['event_id']\n try:\n db_conn.event_delete(event_id)\n json = {'deleted': True}\n except Exception as e:\n json = {'errors': [str(e)]}\n return req.Response(json=json)", "def do_DELETE(self,):\n self.http_method = 'DELETE'\n self.response()", "def delete(self):\r\n return http.Request('DELETE', '{0}'.format(\r\n self.get_url())), parsers.parse_json", "def test_basic_remove_endpoint(self):\n mac, ip = self.setup_with_endpoint()\n time.sleep(2)\n\n self.assertTrue(self.verify_remote_site_has_entry(mac, ip, 'intersite-testsuite', 'l3out',\n 'intersite-testsuite-app-epg1'))\n self.remove_endpoint(mac, ip, 'intersite-testsuite', 'app', 'epg1')\n self.assertFalse(self.verify_remote_site_has_entry(mac, ip, 'intersite-testsuite', 'l3out',\n 'intersite-testsuite-app-epg1'))", "def _delete(self, *args, **kwargs):\n return self._request('delete', *args, **kwargs)", "def delete(self, endpoint, params):\n\n return self._call(requests.delete, endpoint, params=params)", "def delete_callback(self):\n pass", "def delete(self):\n pass", "def delete(self):\n pass", "def delete(self):\n pass", "def delete(self):\n pass", "def delete():", "def delete(self, request, phone):\n attrs = self.flatten_dict(request.POST)\n try:\n endpoint = Endpoint.objects.get(uid__exact=phone, site__name__exact=request.user)\n np = NumberPlan.objects.get(phone_number=phone, site__name__exact=request.user)\n endpoint.enable=False\n np.status=2\n endpoint.save()\n np.save()\n # TODO add parking\n return rc.DELETED\n except:\n return rc.NOT_HERE", "def post_route_target_delete(self, resource_id, resource_dict):\n pass", "async def deregister_endpoint(self, handle: str) -> None:\n await self.AD.http.deregister_endpoint(handle, self.name)", "def _delete_local_endpoint(self, resource, event, trigger, **kwargs):\n router_id = kwargs.get('router_id')\n # delete the local endpoint from the NSX\n local_ep_id = self._search_local_endpint(router_id)\n if local_ep_id:\n self._nsx_vpn.local_endpoint.delete(local_ep_id)\n # delete the neutron port with this IP\n ctx = n_context.get_admin_context()\n port = self._find_vpn_service_port(ctx, router_id)\n if port:\n self.l3_plugin.delete_port(ctx, port['id'], force_delete_vpn=True)", "def process_IN_DELETE(self, event):", "def delete(self, endpoint):\n\n try:\n while 1:\n response = requests.delete(\n f'https://{self.hostname}:{self.port}/{endpoint}',\n headers=self._headers,\n verify=False\n )\n\n if response.status_code >= 400:\n if 'SM_http_unauthorized' in str(response.content):\n self._refresh_connection()\n else:\n raise NimOSAPIError(response.json())\n else:\n break\n\n return response.json()\n\n except requests.exceptions.RequestException as error:\n logging.exception(error)\n raise ConnectionError(f\"Error communicating with {self.hostname}\")", "def test_basic_remove_endpoint(self):\n args = self.get_args()\n config = self.create_config_file()\n self.write_config_file(config, args)\n execute_tool(args, test_mode=True)\n\n time.sleep(2)\n mac = '00:11:22:33:33:33'\n ip = '3.4.3.4'\n self.add_endpoint(mac, ip, 'intersite-testsuite', 'app1', 'epg1')\n time.sleep(2)\n\n self.assertTrue(self.verify_remote_site_has_entry(mac, ip, 'intersite-testsuite',\n 'l3out', 'intersite-testsuite-app1-epg1'))\n self.remove_endpoint(mac, ip, 'intersite-testsuite', 'app1', 'epg1')\n time.sleep(2)\n self.assertFalse(self.verify_remote_site_has_entry(mac, ip, 'intersite-testsuite',\n 'l3out', 'intersite-testsuite-app1-epg1'))", "def delete(self, *args, **kwargs):\n return 0", "def delete(self):\n return self.get_request_handler(request.headers).get_health()", "def test_handler():\n event = {\n 'pathParameters': {\n 'id': 'id'\n },\n }\n expected = {\n 'statusCode': 200,\n 'body': '\"Post deleted\"',\n 'headers': {\n 'Access-Control-Allow-Origin': '*',\n 'Access-Control-Allow-Credentials': True\n }\n }\n with patch('src.delete_post.PostModel') as model:\n result = delete_post.handler(event, None)\n model().delete.assert_called_with('id')\n assert result == expected", "def post_routing_instance_delete(self, resource_id, resource_dict):\n pass", "def delete_endpoint_config(EndpointConfigName=None):\n pass", "def on_delete(self, req, resp, table, id):\n user = req.context['user']\n engine = user_db_engine(user)\n query = \"DELETE FROM {} WHERE id=:id\".format(table)\n\n with engine.new_session() as conn:\n result = conn.execute(query, { \"id\": id })\n\n resp.context['result'] = {'result': 'ok'}\n resp.status = falcon.HTTP_200", "def test_basic_remove_endpoint(self):\n args = self.get_args()\n config = self.create_config_file()\n self.write_config_file(config, args)\n execute_tool(args, test_mode=True)\n\n time.sleep(2)\n mac = '00:11:22:33:33:33'\n ip = '3.4.3.4'\n\n self.assertTrue(self.verify_remote_site_has_entry(mac, ip, 'intersite-testsuite',\n 'l3out', 'intersite-testsuite-app-epg'))\n self.remove_endpoint(mac, ip, 'intersite-testsuite', 'app', 'epg')\n time.sleep(2)\n self.assertFalse(self.verify_remote_site_has_entry(mac, ip, 'intersite-testsuite',\n 'l3out', 'intersite-testsuite-app-epg'))", "def delete(self, request , pk=None): \n return Response({'message':'DELETE'})", "def delete(self, _id):", "def delete(self, *args, **kwargs):\n raise NotImplementedError()", "def test_otoroshi_controllers_adminapi_tcp_service_api_controller_delete_entity_action(self):\n pass", "def delete(self, data):\r\n pass", "def delete():\n # Must be logged in to perform any delete commands.\n auth_required()\n pass", "def do_DELETE(self):\n note_details = NoteDetails\n if self.path == '/note/api/delete':\n response_data=note_details.delete_data(self)\n Response(self).jsonResponse(status=200, data=response_data)", "def link_delete_callback(self):\n pass", "def delete(self, uuid):\n try:\n handle_delete(uuid)\n return make_response(200, {\"success\": True})\n except Exception, e:\n return make_response(400, {\"success\": False, \"error\": e.message})", "def delete(self, uuid):\n try:\n handle_delete(uuid)\n return make_response(200, {\"success\": True})\n except Exception, e:\n return make_response(400, {\"success\": False, \"error\": e.message})", "def test_delete():\n\n start_ln = len(routes.routes['DELETE'])\n\n @delete('/s/foo')\n def foo_route(request):\n return 200, ''\n\n for path, fn in routes.routes['DELETE']:\n if fn == foo_route:\n found = (path, fn)\n assert found\n routes.routes['DELETE'].remove(found)\n assert len(routes.routes['DELETE']) == start_ln", "def post_virtual_router_delete(self, resource_id, resource_dict):\n pass", "def delete(self, url):\n return self.request(url, \"DELETE\")", "def view_delete():\n\n return jsonify(\n get_dict(\"url\", \"args\", \"form\", \"data\", \"origin\", \"headers\", \"files\", \"json\")\n )", "def http_delete(self, **kwargs):\n return self.rabjcallable.delete(**kwargs)", "def test_delete_namespaced_route(self):\n pass", "def delete(self):\n raise NotImplementedError(\"Deleting not supported for servers\")", "def _delete(self, url):\n return self._request(url, method=\"DELETE\")", "def delete(self, method, uri, query_param, request_param, headers, **kwargs):\n raise NotImplementedError", "def do_DELETE(self): # pylint: disable=C0103\r\n if self.path == \"/del_config\" or self.path == \"/del_config/\":\r\n self.server.config = dict()\r\n self.log_message(\"Reset Server Configuration.\")\r\n self.send_response(200)\r\n else:\r\n self.send_response(404)", "def test_basic_remove_one_of_multiple_endpoint(self):\n mac1, ip1 = self.setup_with_endpoint()\n mac2 = '00:11:22:33:33:35'\n ip2 = '3.4.3.6'\n self.add_endpoint(mac2, ip2, 'intersite-testsuite', 'app', 'epg')\n time.sleep(2)\n\n self.assertTrue(self.verify_remote_site_has_entry(mac1, ip1, 'intersite-testsuite',\n 'l3out', 'intersite-testsuite-app-epg'))\n self.assertTrue(self.verify_remote_site_has_entry(mac2, ip2, 'intersite-testsuite',\n 'l3out', 'intersite-testsuite-app-epg'))\n\n self.remove_endpoint(mac1, ip1, 'intersite-testsuite', 'app', 'epg')\n self.assertFalse(self.verify_remote_site_has_entry(mac1, ip1, 'intersite-testsuite',\n 'l3out', 'intersite-testsuite-app-epg'))\n self.assertTrue(self.verify_remote_site_has_entry(mac2, ip2, 'intersite-testsuite',\n 'l3out', 'intersite-testsuite-app-epg'))", "def api_delete(self, *args, **kwargs):\n return self.api_delete_with_response(*args, **kwargs)[0]", "def delete(self, *route, **req_data):\n # Read the file ID from the request, with safety.\n try:\n file_id = UUID(req_data['file_id']).hex\n except ValueError:\n return Response(status='400 Bad Request')\n\n # Retrieve and delete the file.\n stored_files = StoredFile.collection()\n to_delete = stored_files.first(id=file_id)\n\n log_activity('%s deleted file %s'%(\n context.user.link, to_delete.filename\n ))\n\n stored_files.delete(to_delete)\n get_bucket().delete(to_delete.data_id)\n\n return Response(status='200 OK')", "def delete_model_endpoint(\n project: str,\n endpoint_id: str,\n ):\n model_endpoint_store = get_model_endpoint_store(\n project=project,\n secret_provider=mlrun.api.crud.secrets.get_project_secret_provider(\n project=project\n ),\n )\n\n model_endpoint_store.delete_model_endpoint(endpoint_id=endpoint_id)\n\n logger.info(\"Model endpoint table cleared\", endpoint_id=endpoint_id)", "def test_basic_remove_one_of_multiple_endpoint(self):\n mac1, ip1 = self.setup_with_endpoint()\n mac2 = '00:11:22:33:33:35'\n ip2 = '3.4.3.6'\n self.add_endpoint(mac2, ip2, 'intersite-testsuite', 'app', 'epg1')\n time.sleep(2)\n\n self.assertTrue(self.verify_remote_site_has_entry(mac1, ip1, 'intersite-testsuite', 'l3out',\n 'intersite-testsuite-app-epg1'))\n self.assertTrue(self.verify_remote_site_has_entry(mac2, ip2, 'intersite-testsuite', 'l3out',\n 'intersite-testsuite-app-epg1'))\n\n self.remove_endpoint(mac1, ip1, 'intersite-testsuite', 'app', 'epg1')\n self.assertFalse(self.verify_remote_site_has_entry(mac1, ip1, 'intersite-testsuite', 'l3out',\n 'intersite-testsuite-app-epg1'))\n self.assertTrue(self.verify_remote_site_has_entry(mac2, ip2, 'intersite-testsuite', 'l3out',\n 'intersite-testsuite-app-epg1'))", "def cleanup(self):\n self.sagemaker.delete_endpoint(EndpointName=self.endpoint_name)\n self.sagemaker.delete_endpoint_config(EndpointConfigName=self.endpoint_name)", "def basemap_post_delete(sender, instance, **kwargs):\n instance.url.delete(False)", "def delete(self, *args, **kw):\n kw['method'] = 'DELETE'\n return self.open(*args, **kw)", "def delete(self):\r\n request = http.Request('DELETE', self.get_url())\r\n\r\n return request, parsers.parse_empty", "def delete(self, path):\n params = request.args.to_dict()\n if params.get(\"instances\"):\n int_list = params.get(\"instances\")\n return items_delete_response(path, int_list)\n abort(405)", "def delete():\n click.echo('delete was called.')", "def destroy(self, request, pk=None): #delete a specific object\n return Response({'http_method': 'DELETE'})", "def delete(self, request, *args, **kwargs):\n self.object = self.get_object()\n self.object.delete()\n return JsonResponse({'status': 'ok'})", "def test_basic_remove_endpoint(self):\n args = self.get_args()\n config = self.create_config_file()\n config['config'].append(self.create_export_policy())\n self.write_config_file(config, args)\n\n collector = execute_tool(args, test_mode=True)\n\n time.sleep(2)\n mac = '00:11:22:33:33:33'\n ip = '3.4.3.4'\n self.assertTrue(self.verify_remote_site_has_entry(mac, ip, 'intersite-testsuite', 'l3out',\n 'intersite-testsuite-app-epg'))\n\n config = self.create_config_file()\n self.write_config_file(config, args)\n collector.reload_config()\n time.sleep(2)\n self.assertFalse(self.verify_remote_site_has_entry(mac, ip, 'intersite-testsuite', 'l3out',\n 'intersite-testsuite-app-epg'))", "def delete(self):\n # type: () -> BoundAction\n return self._client.delete(self)", "def on_delete(key):\n pass", "def delete(self):\n raise NotImplementedError", "def delete(self, uri, where, selectionArgs):\n pass", "def pre_route_target_delete(self, resource_id):\n pass", "def route_removed(self, prefix, next_hop, as_path):", "def delete(self):\r\n self.require_collection()\r\n request = http.Request('DELETE', self.get_url())\r\n\r\n return request, parsers.parse_empty" ]
[ "0.8157214", "0.76216006", "0.74637145", "0.7360159", "0.73364997", "0.7239041", "0.70955694", "0.69749475", "0.6959527", "0.69571376", "0.6916459", "0.68548656", "0.67948025", "0.6789727", "0.6783907", "0.6775918", "0.6722421", "0.6706916", "0.6706916", "0.66402143", "0.66396976", "0.659712", "0.65657365", "0.65585935", "0.6557356", "0.6524605", "0.6501186", "0.6483698", "0.64659023", "0.64607936", "0.64607936", "0.64399666", "0.64388496", "0.6423408", "0.6401055", "0.6396331", "0.63955474", "0.63805944", "0.6368605", "0.636558", "0.63342375", "0.63342375", "0.63342375", "0.63342375", "0.6321038", "0.631985", "0.6311491", "0.6309869", "0.63073456", "0.6307214", "0.6297113", "0.62911403", "0.628542", "0.6280189", "0.62622625", "0.6254176", "0.6252909", "0.6231206", "0.6229568", "0.621741", "0.6211306", "0.62112886", "0.6207453", "0.6200041", "0.6197583", "0.6169595", "0.6165558", "0.61606395", "0.61606395", "0.61527765", "0.613184", "0.61280966", "0.61239535", "0.6123345", "0.6121458", "0.61113733", "0.6111353", "0.6105156", "0.60996765", "0.6099258", "0.60864323", "0.6084056", "0.60805476", "0.6077263", "0.6072413", "0.6066803", "0.60666215", "0.6062825", "0.6058774", "0.60574", "0.6051648", "0.6043221", "0.6035521", "0.6033773", "0.6032978", "0.6029533", "0.60230553", "0.6005973", "0.59959006", "0.5992608" ]
0.76158285
2
Called when a new snapshot is about to be read from etcdv3.
def _pre_snapshot_hook(self): # Add all current networks to the dirty set, so that we will stop their # Dnsmasqs if no longer needed. Also remove all port and subnet # information. LOG.debug("Reset cache for new snapshot") for network_id in list(self.agent.cache.get_network_ids()): self.dirty_networks.add(network_id) _fix_network_cache_port_lookup(self.agent, network_id) self.agent.cache.put(empty_network(network_id)) # Suppress Dnsmasq updates until we've processed the whole snapshot. self.suppress_dnsmasq_updates = True return None
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def load_snapshot(self):\r\n assert self.snapshot is not None\r\n self.name = self.snapshot[0]\r\n self.size = self.snapshot[1]\r\n self.cells = self.snapshot[2]\r\n self.bucket_array.load_snapshot()", "def load_snapshot(self):\r\n assert self.snapshot is not None\r\n self.gain = self.snapshot[0]\r\n self.block = self.snapshot[1]\r\n self.locked = self.snapshot[2]\r\n self.bucket_num = self.snapshot[3]", "def selectSnapshot(self, event):\r\n item = event.GetItem()\r\n try:\r\n nodeData = self._infoTree.GetItemPyData(item)\r\n snapshot = nodeData.getObject()\r\n snapshot.restore(self.restoreControllerParams())\r\n except AttributeError:\r\n event.Skip()", "def create_volume_from_snapshot(self, volume, snapshot):\n snap_name = self.get_snap_name(snapshot.id)\n view_name = self.get_view_name(volume.id)\n vol_name = self.get_volume_name(volume.id)\n cview = src_attach_info = dest_attach_info = None\n rpolicy = self.get_policy()\n properties = volume_utils.brick_get_connector_properties(\n self.configuration.use_multipath_for_image_xfer,\n self.configuration.enforce_multipath_for_image_xfer)\n LOG.debug(\"Searching for snapshot: %s in K2.\", snap_name)\n snap_rs = self.client.search(\"snapshots\", short_name=snap_name)\n if hasattr(snap_rs, 'hits') and snap_rs.total != 0:\n snap = snap_rs.hits[0]\n LOG.debug(\"Creating a view: %(view)s from snapshot: %(snap)s\",\n {'view': view_name, 'snap': snap_name})\n try:\n cview = self.client.new(\"snapshots\",\n short_name=view_name,\n source=snap, retention_policy=rpolicy,\n is_exposable=True).save()\n except Exception as ex:\n LOG.exception(\"Creating a view: %(view)s from snapshot: \"\n \"%(snap)s failed\", {\"view\": view_name,\n \"snap\": snap_name})\n raise KaminarioCinderDriverException(reason=ex)\n\n else:\n msg = _(\"Snapshot: %s search failed in K2.\") % snap_name\n LOG.error(msg)\n raise KaminarioCinderDriverException(reason=msg)\n\n try:\n conn = self.initialize_connection(cview, properties)\n src_attach_info = self._connect_device(conn)\n self.create_volume(volume)\n conn = self.initialize_connection(volume, properties)\n dest_attach_info = self._connect_device(conn)\n volume_utils.copy_volume(src_attach_info['device']['path'],\n dest_attach_info['device']['path'],\n snapshot.volume.size * units.Ki,\n self.configuration.volume_dd_blocksize,\n sparse=True)\n self._kaminario_disconnect_volume(src_attach_info,\n dest_attach_info)\n self.terminate_connection(volume, properties)\n self.terminate_connection(cview, properties)\n cview.delete()\n except Exception as ex:\n self._kaminario_disconnect_volume(src_attach_info,\n dest_attach_info)\n self.terminate_connection(cview, properties)\n self.terminate_connection(volume, properties)\n cview.delete()\n self.delete_volume(volume)\n LOG.exception(\"Copy to volume: %(vol)s from view: %(view)s \"\n \"failed\", {\"vol\": vol_name, \"view\": view_name})\n raise KaminarioCinderDriverException(reason=ex)", "def read_snapshot(self, fname):\n f = gzip.open(fname, 'rb')\n state = pickle.load(f)\n self._setstate(state)", "def load_snapshot(self):\r\n assert self.snapshot is not None\r\n self.max_gain = self.snapshot[0]\r\n self.array = self.snapshot[1]\r\n self.free_cell_list = self.snapshot[2]", "def load_snapshot_done(self) -> None:\n self._phase = APIPhase.BEFORE_SHOULD_INIT", "def load_snapshot(args):\n html_doc = document.Document(get_code(args.file))\n snapshot = html_doc.load(args.index, date=args.edition, region=args.region)\n set_code(args.file, html_doc)\n print('Loaded snapshot {0!r:} - {1:%B} {1.day:2}, {1:%Y %l:%M:%S.%f %p}'.format(snapshot[1], snapshot[0]))", "def snapshot(self, snapshot):\n self._context[\"snapshot\"] = snapshot", "def take_snapshot(self):\r\n self.snapshot = self.gain, self.block, self.locked, self.bucket_num", "def load_snapshot(self):\r\n assert self.snapshot is not None\r\n self.blockA = self.snapshot[0]\r\n self.blockB = self.snapshot[1]\r\n self.blockA_locked = self.snapshot[2]\r\n self.blockB_locked = self.snapshot[3]\r\n self.blockA_free = self.snapshot[4]\r\n self.blockB_free = self.snapshot[5]\r\n self.blockA_cells = self.snapshot[6]\r\n self.blockB_cells = self.snapshot[7]\r\n self.cut = self.snapshot[8]", "def snapshot(self):\n pass", "def deserialize_snapshot(self, serialized_snapshot):\n snapshot = list(serializers.deserialize(\n 'python', [serialized_snapshot]\n ))[0].object\n snapshot.__version__ = serialized_snapshot['version']\n snapshot.__extra_fields__ = serialized_snapshot['extra_fields']\n # override extra fields\n for name, value in serialized_snapshot['extra_fields'].items():\n if value:\n if isinstance(value, dict):\n value = self.deserialize_snapshot(value)\n setattr(snapshot, name, value)\n return snapshot", "def load_from_snapshot(self, when):\n self.state_manager_.restore(when)", "def test_volume_snapshot_create_get_list_delete(self):\n volume = self.create_volume()\n self.addCleanup(self.delete_volume, volume['id'])\n\n s_name = data_utils.rand_name(self.__class__.__name__ + '-Snapshot')\n # Create snapshot\n snapshot = self.snapshots_client.create_snapshot(\n volume_id=volume['id'],\n display_name=s_name)['snapshot']\n\n def delete_snapshot(snapshot_id):\n waiters.wait_for_volume_resource_status(self.snapshots_client,\n snapshot_id,\n 'available')\n # Delete snapshot\n self.snapshots_client.delete_snapshot(snapshot_id)\n self.snapshots_client.wait_for_resource_deletion(snapshot_id)\n\n self.addCleanup(delete_snapshot, snapshot['id'])\n self.assertEqual(volume['id'], snapshot['volumeId'])\n # Get snapshot\n fetched_snapshot = self.snapshots_client.show_snapshot(\n snapshot['id'])['snapshot']\n self.assertEqual(s_name, fetched_snapshot['displayName'])\n self.assertEqual(volume['id'], fetched_snapshot['volumeId'])\n # Fetch all snapshots\n snapshots = self.snapshots_client.list_snapshots()['snapshots']\n self.assertIn(snapshot['id'], map(lambda x: x['id'], snapshots))", "def create_volume_from_snapshot(self, snapshot, volume, volume_db):\n self.authenticate_user()\n\n if self.configuration.vipr_emulate_snapshot == 'True':\n self.create_cloned_volume(volume, snapshot)\n return\n\n ctxt = context.get_admin_context()\n\n src_snapshot_name = None\n\n #src_snapshot_name = snapshot['display_name']\n src_vol_ref = volume_db.volume_get(ctxt, snapshot['volume_id'])\n new_volume_name = self._get_volume_name(volume)\n number_of_volumes = 1\n\n try:\n src_vol_name, src_vol_uri = self._get_vipr_volume_name(src_vol_ref, True)\n src_snapshot_name = self._get_vipr_snapshot_name(snapshot , src_vol_uri)\n\n (storageresType, storageresTypename) = self.volume_obj.get_storageAttributes(\n src_vol_name\n , None\n , src_snapshot_name)\n\n resource_id = self.volume_obj.storageResource_query(storageresType,\n src_vol_name,\n None,\n src_snapshot_name,\n self.configuration.vipr_project,\n self.configuration.vipr_tenant)\n\n self.volume_obj.clone(\n new_volume_name,\n number_of_volumes,\n resource_id,\n sync=True)\n\n except vipr_utils.SOSError as e:\n if(e.err_code == vipr_utils.SOSError.SOS_FAILURE_ERR):\n raise vipr_utils.SOSError(\n vipr_utils.SOSError.SOS_FAILURE_ERR,\n \"Snapshot \" +\n src_snapshot_name +\n \": clone failed\\n\" +\n e.err_text)\n else:\n with excutils.save_and_reraise_exception():\n LOG.exception(\n _(\"Snapshot : %s clone failed\") % src_snapshot_name)", "def take_snapshot(self):\r\n self.snapshot = self.name, self.size, copy.copy(self.cells)\r\n self.bucket_array.take_snapshot()", "def restore_from_snapshot(self, volume_id, snapshot_id):\r\n self.iscsi_svc.restoreFromSnapshot(snapshot_id, id=volume_id)", "def onReadNodeCreated():\n ...", "def restore_from_snapshot(SnapshotId=None):\n pass", "def test_snapshot(self):\n # Try to create a snapshot with a wrong machine_uuid.\n status = self.proxy.snapshot.create(\n PROVIDER_ID, \"Doesn't exist\"\n )\n self.check_xmlrpc_command_result(status, has_error=True)\n\n # Try to create a snapshot with a wrong provider.\n status = self.proxy.snapshot.create(\n \"Doesn't exist\", self.machine_uuid\n )\n self.check_xmlrpc_command_result(status, has_error=True)\n\n # Create a snapshot.\n status = self.proxy.snapshot.create(\n PROVIDER_ID, self.machine_uuid\n )\n self.check_xmlrpc_command_result(status)\n\n # Try to destroy snapshots with a wrong provider.\n status = self.proxy.snapshot.destroy(\n \"Doesn't exist\", self.machine_uuid\n )\n self.check_xmlrpc_command_result(status, has_error=True)\n\n # Destroy snapshots.\n status = self.proxy.snapshot.destroy(\n PROVIDER_ID, self.machine_uuid\n )\n self.check_xmlrpc_command_result(status)", "def snapshot(self):\n self._client.snapshot()", "def snapshot(snapshot_type, result_q, time_delta):", "def create_volume_from_snapshot(self, volume, snapshot):\n snapshotname = huawei_utils.encode_name(snapshot['id'])\n\n snapshot_id = snapshot.get('provider_location', None)\n if snapshot_id is None:\n snapshot_id = self.restclient.get_snapshotid_by_name(snapshotname)\n if snapshot_id is None:\n err_msg = (_(\n 'create_volume_from_snapshot: Snapshot %(name)s '\n 'does not exist.')\n % {'name': snapshotname})\n LOG.error(err_msg)\n raise exception.VolumeBackendAPIException(data=err_msg)\n\n lun_info = self.create_volume(volume)\n\n tgt_lun_id = lun_info['ID']\n luncopy_name = huawei_utils.encode_name(volume['id'])\n\n LOG.info(_LI(\n 'create_volume_from_snapshot: src_lun_id: %(src_lun_id)s, '\n 'tgt_lun_id: %(tgt_lun_id)s, copy_name: %(copy_name)s.'),\n {'src_lun_id': snapshot_id,\n 'tgt_lun_id': tgt_lun_id,\n 'copy_name': luncopy_name})\n\n event_type = 'LUNReadyWaitInterval'\n\n wait_interval = huawei_utils.get_wait_interval(self.xml_file_path,\n event_type)\n\n def _volume_ready():\n result = self.restclient.get_lun_info(tgt_lun_id)\n\n if (result['HEALTHSTATUS'] == constants.STATUS_HEALTH\n and result['RUNNINGSTATUS'] == constants.STATUS_VOLUME_READY):\n return True\n return False\n\n huawei_utils.wait_for_condition(self.xml_file_path,\n _volume_ready,\n wait_interval,\n wait_interval * 10)\n\n self._copy_volume(volume, luncopy_name,\n snapshot_id, tgt_lun_id)\n\n return {'ID': lun_info['ID'],\n 'lun_info': lun_info}", "def create_snapshot(self, context, volume_id, snapshot_id):\n\n context = context.elevated()\n snapshot_ref = self.db.snapshot_get(context, snapshot_id)\n display_name = snapshot_ref['display_name']\n display_description = snapshot_ref['display_description']\n LOG.info(_(\"snapshot %s: creating\"), snapshot_ref['id'])\n\n self._notify_about_snapshot_usage(\n context, snapshot_ref, \"create.start\")\n\n vol_ref = self.db.volume_get(context, volume_id)\n LOG.info(_(\"Cascade info: create snapshot while cascade id is:%s\"),\n vol_ref['mapping_uuid'])\n\n try:\n vol_ref = self.db.volume_get(context, volume_id)\n casecaded_volume_id = vol_ref['mapping_uuid']\n cinderClient = self._get_cinder_cascaded_user_client(context)\n bodyResponse = cinderClient.volume_snapshots.create(\n volume_id=casecaded_volume_id,\n force=False,\n name=display_name,\n description=display_description)\n\n LOG.info(_(\"Cascade info: create snapshot while response is:%s\"),\n bodyResponse._info)\n if bodyResponse._info['status'] == 'creating':\n self._heal_snapshot_mapping_cache(snapshot_id,\n bodyResponse._info['id'],\n \"add\")\n self.db.snapshot_update(\n context,\n snapshot_ref['id'],\n {'mapping_uuid': bodyResponse._info['id']})\n\n except Exception:\n with excutils.save_and_reraise_exception():\n self.db.snapshot_update(context,\n snapshot_ref['id'],\n {'status': 'error'})\n return\n\n self.db.snapshot_update(context,\n snapshot_ref['id'], {'status': 'available',\n 'progress': '100%'})\n# vol_ref = self.db.volume_get(context, volume_id)\n\n if vol_ref.bootable:\n try:\n self.db.volume_glance_metadata_copy_to_snapshot(\n context, snapshot_ref['id'], volume_id)\n except exception.CinderException as ex:\n LOG.exception(_(\"Failed updating %(snapshot_id)s\"\n \" metadata using the provided volumes\"\n \" %(volume_id)s metadata\") %\n {'volume_id': volume_id,\n 'snapshot_id': snapshot_id})\n raise exception.MetadataCopyFailure(reason=ex)\n\n LOG.info(_(\"Cascade info: snapshot %s, created successfully\"),\n snapshot_ref['id'])\n self._notify_about_snapshot_usage(context, snapshot_ref, \"create.end\")\n\n return snapshot_id", "def _AddSnapshot(self, snapshot):\n if self._history.count(snapshot) == 0:\n self._history.append(snapshot)", "def create_snapshot(store, dataset, snapshot, description_fields, snapshot_changes):\n validate_snapshot_name(store, dataset, snapshot)\n validate_datalad_config(store, dataset)\n update_description(store, dataset, description_fields)\n update_changes(store, dataset, snapshot, snapshot_changes)\n save_snapshot(store, dataset, snapshot)\n return get_snapshot(store, dataset, snapshot)", "def restore(self, snapshot):\n self.unit_name = snapshot[\"unit_name\"]", "def snapshots_created(self):\n # log.debug(\"Getting snaps created for volume {0}\".format(self.volume_id))\n snaps_info = []\n for snap in self._derived_snapshots:\n snap_info = {}\n try:\n if snap.volume_id == self.volume_id:\n snap.update()\n snap_info['snap_id'] = snap.id\n snap_info['snap_progress'] = snap.progress\n snap_info['snap_status'] = snap.status\n snap_info['snap_desc'] = snap.description\n snaps_info.append(snap_info)\n except EC2ResponseError, e:\n log.warning(\"EC2ResponseError getting snapshot status: {0} \"\n \"(code {1}; status {2})\"\n .format(e.message, e.error_code, e.status))\n return snaps_info", "def show_volume_snapshot(self, snapshot, check=True):\n cmd = 'cinder snapshot-show ' + snapshot.id\n\n exit_code, stdout, stderr = self.execute_command(\n cmd, timeout=config.SNAPSHOT_SHOW_TIMEOUT, check=check)\n\n snapshot_table = output_parser.table(stdout)\n show_result = {key: value for key, value in snapshot_table['values']}\n\n if check:\n assert_that(show_result['id'], is_(snapshot.id))\n if snapshot.name:\n assert_that(show_result['name'], is_(snapshot.name))\n if snapshot.description:\n assert_that(show_result['description'],\n is_(snapshot.description))", "def test_aws_service_api_snapshot_delete(self):\n pass", "def snapshot_info(self) -> MetaFile:\n raise NotImplementedError", "def test_csi_volumesnapshot_basic(set_random_backupstore, # NOQA\n volumesnapshotclass, # NOQA\n volumesnapshot, # NOQA\n client, # NOQA\n core_api, # NOQA\n volume_name, # NOQA\n csi_pv, # NOQA\n pvc, # NOQA\n pod_make, # NOQA\n volsnapshotclass_delete_policy, # NOQA\n backup_is_deleted): # NOQA\n\n csisnapclass = \\\n volumesnapshotclass(name=\"snapshotclass\",\n deletepolicy=volsnapshotclass_delete_policy)\n\n pod_name, pv_name, pvc_name, md5sum = \\\n prepare_pod_with_data_in_mb(client, core_api,\n csi_pv, pvc, pod_make,\n volume_name,\n data_path=\"/data/test\")\n\n # Create volumeSnapshot test\n csivolsnap = volumesnapshot(volume_name + \"-volumesnapshot\",\n \"default\",\n csisnapclass[\"metadata\"][\"name\"],\n \"persistentVolumeClaimName\",\n pvc_name)\n\n volume = client.by_id_volume(volume_name)\n\n for i in range(RETRY_COUNTS):\n snapshots = volume.snapshotList()\n if len(snapshots) == 2:\n break\n time.sleep(RETRY_INTERVAL)\n\n lh_snapshot = None\n snapshots = volume.snapshotList()\n for snapshot in snapshots:\n if snapshot[\"name\"] == \"snapshot-\" + csivolsnap[\"metadata\"][\"uid\"]:\n lh_snapshot = snapshot\n assert lh_snapshot is not None\n\n wait_for_volumesnapshot_ready(csivolsnap[\"metadata\"][\"name\"],\n csivolsnap[\"metadata\"][\"namespace\"])\n\n bv1, b = find_backup(client, volume_name, lh_snapshot[\"name\"])\n\n assert b[\"snapshotName\"] == lh_snapshot[\"name\"]\n\n restore_pvc_name = pvc[\"metadata\"][\"name\"] + \"-restore\"\n restore_pvc_size = pvc[\"spec\"][\"resources\"][\"requests\"][\"storage\"]\n\n restore_csi_volume_snapshot(core_api,\n client,\n csivolsnap,\n restore_pvc_name,\n restore_pvc_size)\n\n restore_pod = pod_make()\n restore_pod_name = restore_pod[\"metadata\"][\"name\"]\n restore_pod['spec']['volumes'] = [create_pvc_spec(restore_pvc_name)]\n\n create_and_wait_pod(core_api, restore_pod)\n restore_md5sum = \\\n get_pod_data_md5sum(core_api, restore_pod_name, path=\"/data/test\")\n assert restore_md5sum == md5sum\n\n # Delete volumeSnapshot test\n delete_volumesnapshot(csivolsnap[\"metadata\"][\"name\"], \"default\")\n\n if backup_is_deleted is False:\n find_backup(client, volume_name, b[\"snapshotName\"])\n else:\n wait_for_backup_delete(client, volume_name, b[\"name\"])", "def database_volume_snapshot_add(volume_snapshot_obj):\n db = database_get()\n session = db.session()\n query = session.query(model.VolumeSnapshot)\n query = query.filter(model.VolumeSnapshot.uuid == volume_snapshot_obj.uuid)\n volume_snapshot = query.first()\n if not volume_snapshot:\n volume_snapshot = model.VolumeSnapshot()\n volume_snapshot.uuid = volume_snapshot_obj.uuid\n volume_snapshot.name = volume_snapshot_obj.name\n volume_snapshot.description = volume_snapshot_obj.description\n volume_snapshot.size_gb = volume_snapshot_obj.size_gb\n volume_snapshot.volume_uuid = volume_snapshot_obj.volume_uuid\n volume_snapshot.nfvi_volume_snapshot_data = \\\n json.dumps(volume_snapshot_obj.nfvi_volume_snapshot.as_dict())\n session.add(volume_snapshot)\n else:\n volume_snapshot.name = volume_snapshot_obj.name\n volume_snapshot.description = volume_snapshot_obj.description\n volume_snapshot.size_gb = volume_snapshot_obj.size_gb\n volume_snapshot.volume_uuid = volume_snapshot_obj.volume_uuid\n volume_snapshot.nfvi_volume_snapshot_data = \\\n json.dumps(volume_snapshot_obj.nfvi_volume_snapshot.as_dict())\n db.commit()", "def test_aws_service_api_snapshots_get(self):\n pass", "def create_volume_from_snapshot(snapshots, objects_created,\n wait_for_available=120):\n if type(snapshots) is not list:\n snapshots = [snapshots]\n v = []\n for snapshot in snapshots:\n command = 'cinder create --snapshot-id %s --name %s' % \\\n (snapshot['id'], snapshot['display_name'])\n volume_from_snapshot = parse_output(Popen(\n command.split(), stdout=STDOUT, stderr=STDERR).communicate()[0])\n volume_from_snapshot['device'] = snapshot['device']\n volume_from_snapshot['bootable'] = snapshot['bootable']\n v.append(volume_from_snapshot)\n if wait_for_available > 0:\n wait = 0\n again = False\n while wait < wait_for_available:\n time.sleep(5)\n wait += 5\n again = False\n for volume in v:\n command = 'cinder show %s' % volume['id']\n status = parse_output(Popen(command.split(), stdout=STDOUT,\n stderr=STDERR).communicate()[0]\n )['status']\n if status == 'error':\n # clean up and create volume again\n command = 'cinder delete %s' % volume['id']\n a = Popen(command.split(), stdout=STDOUT,\n stderr=STDERR).communicate()[0]\n command = 'cinder create --snapshot-id %s' % \\\n volume['snapshot_id']\n volume_info = parse_output(Popen(\n command.split(), stdout=STDOUT,\n stderr=STDERR).communicate()[0])\n volume_info['bootable'] = volume['bootable']\n volume_info['device'] = volume['device']\n volume = volume_info\n again = True\n break\n elif status == 'creating':\n again = True\n break\n elif status == 'available':\n volume['status'] = status\n pass\n if again:\n continue\n else:\n break\n if again: # Loop ended due to timeout\n print 'Error creating volume from snapshot!'\n print 'The following entities were created in the process:'\n print_objects_created(objects_created)\n sys.exit(-1)\n return v", "def test_backup_restore_with_audit(self):\n gen = BlobGenerator(\"ent-backup\", \"ent-backup-\", self.value_size, end=self.num_items)\n self._load_all_buckets(self.master, gen, \"create\", 0)\n audit_obj = audit(AUDITBACKUPID, self.backupset.cluster_host)\n status = audit_obj.getAuditStatus()\n self.log.info(\"Audit status on {0} is {1}\".format(self.backupset.cluster_host.ip, status))\n if not status:\n self.log.info(\"Enabling audit on {0}\".format(self.backupset.cluster_host.ip))\n audit_obj.setAuditEnable('true')\n self.backup_create()\n self.backup_cluster()\n field_verified, value_verified = audit_obj.validateEvents(self._get_event_expected_results(action='backup'))\n self.assertTrue(field_verified, \"One of the fields is not matching\")\n self.assertTrue(value_verified, \"Values for one of the fields is not matching\")\n audit_obj = audit(AUDITBACKUPID, self.backupset.restore_cluster_host)\n status = audit_obj.getAuditStatus()\n self.log.info(\"Audit status on {0} is {1}\".format(self.backupset.restore_cluster_host.ip, status))\n if not status:\n self.log.info(\"Enabling audit on {0}\".format(self.backupset.restore_cluster_host.ip))\n audit_obj.setAuditEnable('true')\n self.backup_restore()\n audit_obj = audit(AUDITRESTOREID, self.backupset.restore_cluster_host)\n field_verified, value_verified = audit_obj.validateEvents(self._get_event_expected_results(action='restore'))\n self.assertTrue(field_verified, \"One of the fields is not matching\")\n self.assertTrue(value_verified, \"Values for one of the fields is not matching\")", "def edit_snapshot(self) -> Generator[Snapshot, None, None]:\n with self.edit(Snapshot.type) as snapshot:\n if not isinstance(snapshot, Snapshot):\n raise RuntimeError(\"Unexpected snapshot type\")\n yield snapshot", "def xtest_snapshot_api(self):\n\n req = httplib2.Http(\".cache\")\n body = r\"\"\"{ \"snapshot\": { \"instanceId\": \"123\", \"name\": \"dbapi_test\" } }\"\"\"\n \n # Test creating an snapshot without a body in the request.\n LOG.info(\"* Creating an snapshot without a body\")\n resp, content = req.request(API_URL + \"snapshots\", \"POST\", \"\", AUTH_HEADER)\n LOG.debug(resp)\n LOG.debug(content)\n self.assertEqual(404, resp.status)\n\n # Test creating an snapshot with a malformed body.\n LOG.info(\"* Creating an snapshot with a malformed body\")\n bad_body = r\"\"\"{ \"snapshot\": {}]\"\"\"\n resp, content = req.request(API_URL + \"snapshots\", \"POST\", bad_body, AUTH_HEADER)\n LOG.debug(resp)\n LOG.debug(content)\n self.assertEqual(500, resp.status)\n\n # Test listing all snapshots with a body in the request.\n LOG.info(\"* Listing all snapshots with a body\")\n resp, content = req.request(API_URL + \"snapshots\", \"GET\", body, AUTH_HEADER)\n LOG.debug(resp)\n LOG.debug(content)\n self.assertEqual(404, resp.status)\n\n # Test listing all snapshots for a specific instance with a body in the request.\n LOG.info(\"* Listing all snapshots for a specific instance with a body\")\n resp, content = req.request(API_URL + \"snapshots?instanceId=\" + self.instance_id, \"GET\", body, AUTH_HEADER)\n LOG.debug(resp)\n LOG.debug(content)\n self.assertEqual(404, resp.status) \n\n # Test listing all snapshots for a specific tenant with a body in the request.\n LOG.info(\"* Listing all snapshots for a specific instance with a body\") \n resp, content = req.request(API_URL + \"snapshots?tenantId=\" + TENANT_ID, \"GET\", body, AUTH_HEADER)\n LOG.debug(resp)\n LOG.debug(content)\n self.assertEqual(404, resp.status)\n\n # Test getting a non-existent snapshot.\n LOG.info(\"* Getting dummy snapshot\")\n resp, content = req.request(API_URL + \"snapshots/dummy\", \"GET\", \"\", AUTH_HEADER)\n content = json.loads(content)\n LOG.debug(resp)\n LOG.debug(content)\n self.assertEqual(404, resp.status)\n \n # Test creating a new instance from a dummy snapshot.\n instance_body = r\"\"\"\n {\"instance\": {\n \"name\": \"dbapi_test\",\n \"flavorRef\": \"102\",\n \"port\": \"3306\",\n \"dbtype\": {\n \"name\": \"mysql\",\n \"version\": \"5.1.2\"\n },\n \"databases\": [\n {\n \"name\": \"testdb\",\n \"character_set\": \"utf8\",\n \"collate\": \"utf8_general_ci\"\n },\n {\n \"name\": \"abcdefg\"\n }\n ],\n \"volume\":\n {\n \"size\": \"2\"\n }\n }\n }\"\"\"\n \n LOG.info(\"* Creating instance from dummy snapshot\")\n snap_body = json.loads(instance_body)\n snap_body['instance']['snapshotId'] = \"dummy\"\n snap_body = json.dumps(snap_body)\n resp, content = req.request(API_URL + \"instances\", \"POST\", snap_body, AUTH_HEADER)\n LOG.debug(resp)\n LOG.debug(content)\n content = json.loads(content)\n self.assertEqual(500, resp.status)\n \n # This test is handled by the error handling in the API server\n# # Test creating a new instance from bad snapshot data in the body.\n# LOG.debug(\"* Creating instance from bad snapshot data in the body\")\n# snap_body = json.loads(instance_body)\n# snap_body['instance']['snapshotId'] = {}\n# snap_body = json.dumps(snap_body)\n# resp, content = req.request(API_URL + \"instances\", \"POST\", snap_body, AUTH_HEADER)\n# LOG.debug(resp)\n# LOG.debug(content)\n# content = json.loads(content)\n# self.assertEqual(500, resp.status) \n \n # Test deleting a non-existent snapshot.\n LOG.info(\"* Deleting dummy snapshot\")\n resp, content = req.request(API_URL + \"snapshots/dummy\", \"DELETE\", \"\", AUTH_HEADER)\n content = json.loads(content)\n LOG.debug(resp)\n LOG.debug(content)\n self.assertEqual(404, resp.status)", "def snapshot(self, snapshot, keep=0):\n if keep:\n until = util.today() + datetime.timedelta(days=keep)\n snapshot = snapshot + \"-keep-until-\" + until.strftime(\"%Y%m%d\")\n if snapshot in [x.snapname for x in self.ceph.root.snapshots]:\n self.log.info(\"snapshot-exists\", snapshot=snapshot)\n return\n self.log.info(\"snapshot-create\", name=snapshot)\n with self.frozen_vm() as frozen:\n if frozen:\n self.ceph.root.snapshots.create(snapshot)\n else:\n self.log.error(\"snapshot-ignore\", reason=\"not frozen\")\n raise RuntimeError(\"VM not frozen, not making snapshot.\")", "def get_snapshot_object(session, key, snapshot=None):\n # type: (Session, Text, Optional[Text]) -> Any\n url_tail = \"/{}/{}/{}/{}/{}\".format(\n CoordConstsV2.RSC_NETWORKS,\n session.network,\n CoordConstsV2.RSC_SNAPSHOTS,\n session.get_snapshot(snapshot),\n CoordConstsV2.RSC_OBJECTS,\n )\n return _get_stream(session, url_tail, {CoordConstsV2.QP_KEY: key})", "def create_volume_from_snapshot(self, volume, snapshot):\n self._ensure_shares_mounted()\n\n snapshot_vol = self._get_snapshot_volume(snapshot)\n nfs_share = snapshot_vol['provider_location']\n volume['provider_location'] = nfs_share\n nms = self.share2nms[nfs_share]\n\n vol, dataset = self._get_share_datasets(nfs_share)\n snapshot_name = '%s/%s/%s@%s' % (vol, dataset, snapshot['volume_name'],\n snapshot['name'])\n folder = '%s/%s' % (dataset, volume['name'])\n nms.folder.clone(snapshot_name, '%s/%s' % (vol, folder))\n\n try:\n self._share_folder(nms, vol, folder)\n except utils.NexentaException:\n try:\n nms.folder.destroy('%s/%s' % (vol, folder), '')\n except utils.NexentaException:\n LOG.warning(\"Cannot destroy cloned folder: \"\n \"%(vol)s/%(folder)s\",\n {'vol': vol, 'folder': folder})\n raise\n\n if self._get_nfs_server_version(nfs_share) < 4:\n sub_share, mnt_path = self._get_subshare_mount_point(nfs_share,\n volume)\n self._ensure_share_mounted(sub_share, mnt_path)\n\n if (('size' in volume) and (\n volume['size'] > snapshot['volume_size'])):\n self.extend_volume(volume, volume['size'])\n\n return {'provider_location': volume['provider_location']}", "def test_snapshots(self):\n def get_snapshots(*_args, **_kwargs):\n return {\n 'items': [\n {'selfLink': 'url/snapshot'},\n ],\n }\n self.mock(snapshots.gce.Project, 'get_snapshots', get_snapshots)\n\n key = self.create_entity('project', 'name', ['key:value'])\n expected_urls = ['url/snapshot']\n urls = snapshots.fetch(key)\n self.assertItemsEqual(urls, expected_urls)", "def __init__(self, snapshot_file: FilePath = None, report_time: datetime = datetime.min):\n\n self.snapshot = TourneySnapshot.default_snapshot\n\n if snapshot_file is not None:\n self.snapshot = json.load(open(snapshot_file, 'r'))\n elif report_time != datetime.min:\n self._create_snapshot_from_tourney_state(report_time)\n self._compute_normalised_scores()\n else:\n raise NotImplementedError(\"Error: TourneySnapshot constructor must take one of {snapshot_file, datetime} \"\n \"as an argument\")", "def _ensure_snapshot(connection, volume, interval, name):\n if interval not in VALID_INTERVALS:\n logging.warning(kayvee.formatLog(\"ebs-snapshots\", \"warning\", \"invalid snapshotting interval\", {\n \"volume\": volume.id,\n \"interval\": interval\n }))\n return\n\n snapshots = connection.get_all_snapshots(filters={'volume-id': volume.id})\n\n # Create a snapshot if we don't have any\n if not snapshots:\n _create_snapshot(connection, volume, name)\n return\n\n min_delta = 3600 * 24 * 365 * 10 # 10 years :)\n for snapshot in snapshots:\n timestamp = datetime.datetime.strptime(\n snapshot.start_time,\n '%Y-%m-%dT%H:%M:%S.000Z')\n delta_seconds = int(\n (datetime.datetime.utcnow() - timestamp).total_seconds())\n\n if delta_seconds < min_delta:\n min_delta = delta_seconds\n\n logging.info(kayvee.formatLog(\"ebs-snapshots\", \"info\", 'The newest snapshot for {} is {} seconds old'.format(volume.id, min_delta)))\n\n if interval == 'hourly' and min_delta > 3600:\n _create_snapshot(connection, volume, name)\n elif interval == 'daily' and min_delta > 3600*24:\n _create_snapshot(connection, volume, name)\n elif interval == 'weekly' and min_delta > 3600*24*7:\n _create_snapshot(connection, volume, name)\n elif interval == 'monthly' and min_delta > 3600*24*30:\n _create_snapshot(connection, volume, name)\n elif interval == 'yearly' and min_delta > 3600*24*365:\n _create_snapshot(connection, volume, name)\n else:\n logging.info(kayvee.formatLog(\"ebs-snapshots\", \"info\", \"no snapshot needed\", {\"volume\": volume.id}))", "def test_backup_restore_with_update_notifications(self):\n gen = BlobGenerator(\"ent-backup\", \"ent-backup-\", self.value_size, end=self.num_items)\n self._load_all_buckets(self.master, gen, \"create\", 0)\n rest = RestConnection(self.backupset.cluster_host)\n rest.update_notifications(\"true\")\n self.backup_create()\n self.backup_cluster_validate()\n self.backup_restore_validate(compare_uuid=False, seqno_compare_function=\">=\")", "def show_snapshot(self, snapshot_id):\n url = \"snapshots/%s\" % snapshot_id\n resp, body = self.get(url)\n body = json.loads(body)\n self.validate_response(schema.show_snapshot, resp, body)\n return rest_client.ResponseBody(resp, body)", "def snapshot(self, snapshot_id):\r\n return self.connection.create_dbsnapshot(snapshot_id, self.id)", "def _load_changelog(self):\n\n changelog_json_file = self._project.get_changelog_path()\n if not os.path.isfile(changelog_json_file):\n logger.warning('Changelog File \"{}\" does not exists!'.format(changelog_json_file))\n return\n\n logger.warning('Loading Changelog from: \"{}\"'.format(changelog_json_file))\n\n with open(changelog_json_file, 'r') as f:\n if changelog_json_file.endswith('.json'):\n changelog_data = json.load(f, object_pairs_hook=OrderedDict)\n else:\n changelog_data = yaml.load(f, Loader=yamlordereddictloader.Loader)\n if not changelog_data:\n return\n\n changelog_versions = [key for key in changelog_data.keys()]\n ordered_versions = self._order_changelog_versions(changelog_versions)\n\n for version in reversed(ordered_versions):\n self._create_version(str(version), changelog_data[str(version)])\n\n last_version_item = self.version_accordion.item_at(0)\n last_version_item.set_collapsed(False)", "def create_snapshot(self, snapshot):\n LOG.info(_LI('Creating snapshot: %s'), snapshot['name'])\n lcfg = self.configuration\n snap_name = self._create_snapshot_name()\n self.zfssa.create_snapshot(lcfg.zfssa_nfs_pool, lcfg.zfssa_nfs_project,\n lcfg.zfssa_nfs_share, snap_name)\n\n src_file = snap_name + '/' + snapshot['volume_name']\n\n try:\n self.zfssa.create_snapshot_of_volume_file(src_file=src_file,\n dst_file=\n snapshot['name'])\n except Exception:\n with excutils.save_and_reraise_exception():\n LOG.debug('Error thrown during snapshot: %s creation',\n snapshot['name'])\n finally:\n self.zfssa.delete_snapshot(lcfg.zfssa_nfs_pool,\n lcfg.zfssa_nfs_project,\n lcfg.zfssa_nfs_share, snap_name)", "def test_snapshot(self):\n converter = InstructionToQobjConverter(PulseQobjInstruction, meas_level=2)\n instruction = Snapshot(name='label', snap_type='type')\n\n valid_qobj = PulseQobjInstruction(\n name='snapshot',\n t0=0,\n label='label',\n type='type'\n )\n\n self.assertEqual(converter(0, instruction), valid_qobj)", "def restore_snapshot(net, snapshot ):\n checkpoint = torch.load(snapshot, map_location=torch.device('cpu'))\n print(\"Checkpoint Load Compelete\")\n\n if 'state_dict' in checkpoint:\n net = forgiving_state_restore1(net, checkpoint['state_dict'])\n else:\n net = forgiving_state_restore1(net, checkpoint)\n\n return net", "def snapshot(self):\n return self.journal.create_checkpoint()", "def load_snapshot(base_path, snap_num, subvolumes, group, fields, matches):\n n_init = []\n\n snap_key = 'N{}_ThisFile_Redshift'.format('groups' if group == 'Haloprop' else 'subgroups')\n for subvolume in subvolumes: \n n_init.append(load_header(base_path, subvolume)[snap_key][snap_num])\n \n # initialize objects structure\n result = {}\n \n with h5py.File(file_path(base_path, subvolumes[0], 'subvolume'), 'r') as f:\n # galprop and haloprop both have a redshift quantity so we can use that to query for the snapshot we want\n filter_field = '{}Redshift'.format(group)\n \n if not fields:\n fields = list(f[group].keys())\n\n # make sure the redshift field is included in fields\n if filter_field not in fields:\n fields.append(filter_field) \n \n for field in fields:\n if field not in f[group].keys():\n raise Exception(\"Catalog does not have requested field [{}]!\".format(field))\n\n shape = list(f[group][field].shape)\n shape[0] = np.sum(n_init)\n\n # allocate within return dict\n result[field] = np.zeros(shape, dtype=f[group][field].dtype)\n\n if matches:\n with h5py.File(file_path(base_path, subvolumes[0], 'matches'), 'r') as f:\n for field in f[group].keys():\n result[field] = np.zeros(shape, dtype=f[group][field].dtype)\n\n header = load_header(base_path, subvolumes[0])\n filter_condition = header['Redshifts'][snap_num]\n\n offset = 0\n\n for subvolume in subvolumes:\n subvol_result = load_subvolume(base_path, subvolume, group, fields, matches, False)\n\n idx = subvol_result[filter_field][:] == filter_condition\n\n for field in subvol_result.keys():\n if len(subvol_result[field].shape) != 1:\n result[field][offset:offset+n_init[0], :] = subvol_result[field][idx]\n else:\n result[field][offset:offset+n_init[0]] = subvol_result[field][idx]\n\n offset += n_init[0]\n del n_init[0]\n \n return result", "async def new_snapshot(self, data, full=False):\n if ATTR_NAME not in data:\n # provide a default name if none was supplied.\n data[ATTR_NAME] = datetime.now(self._hass.config.time_zone).strftime(\n \"%A, %b %d, %Y\"\n )\n\n _LOGGER.debug(\"Creating snapshot %s\", data[ATTR_NAME])\n\n command = COMMAND_SNAPSHOT_FULL if full else COMMAND_SNAPSHOT_PARTIAL\n keep_days = data.pop(ATTR_KEEP_DAYS, None)\n backup_path = data.pop(ATTR_BACKUP_PATH, None)\n\n if full:\n # performing full backup.\n exclude = data.pop(ATTR_EXCLUDE, None)\n if exclude:\n # handle exclude config.\n command = COMMAND_SNAPSHOT_PARTIAL\n\n # append addons.\n addons = await self.get_addons()\n if addons:\n excluded_addons = await self._replace_addon_names(\n exclude[ATTR_ADDONS], addons\n )\n\n data[ATTR_ADDONS] = [\n addon[\"slug\"]\n for addon in addons\n if addon[\"slug\"] not in excluded_addons\n ]\n\n # append folders.\n excluded_folders = self._replace_folder_names(exclude[ATTR_FOLDERS])\n data[ATTR_FOLDERS] = [\n folder\n for folder in DEFAULT_SNAPSHOT_FOLDERS.values()\n if folder not in excluded_folders\n ]\n\n else:\n # performing partial backup.\n # replace addon names with their appropriate slugs.\n if ATTR_ADDONS in data:\n data[ATTR_ADDONS] = await self._replace_addon_names(data[ATTR_ADDONS])\n # replace friendly folder names.\n if ATTR_FOLDERS in data:\n data[ATTR_FOLDERS] = self._replace_folder_names(data[ATTR_FOLDERS])\n\n _LOGGER.debug(\n \"New snapshot; command: %s, keep_days: %s, data: %s, timeout: %s\",\n command,\n keep_days,\n data,\n self._backup_timeout,\n )\n\n # add to pending snapshots and update sensor.\n self._pending_snapshots += 1\n if self.update_sensor_callback:\n self.update_sensor_callback()\n\n # make request to create new snapshot.\n try:\n result = await self.send_command(\n command, payload=data, timeout=self._backup_timeout\n )\n\n _LOGGER.debug(\"Snapshot create result: %s\" % result)\n\n slug = result.get(\"data\", {}).get(\"slug\")\n if slug is None:\n error = \"There may be a backup already in progress.\"\n if data.get(\"message\"):\n error = f\"{error} {data.get('message')}\"\n raise HassioAPIError(error)\n\n # snapshot creation was successful\n _LOGGER.info(\n \"Snapshot created successfully; '%s' (%s)\", data[ATTR_NAME], slug\n )\n self._hass.bus.async_fire(\n f\"{DOMAIN}.snapshot_successful\", {\"name\": data[ATTR_NAME], \"slug\": slug}\n )\n\n if keep_days is not None:\n # set snapshot expiry\n self._snapshots_expiry[slug] = datetime.now(timezone.utc) + timedelta(\n days=float(keep_days)\n )\n # write snapshot expiry to storage\n await self._snapshots_store.async_save(self._snapshots_expiry)\n\n # copy snapshot to location if specified\n if backup_path:\n await self.copy_snapshot(data[ATTR_NAME], slug, backup_path)\n\n except HassioAPIError as err:\n _LOGGER.error(\"Error during backup. %s\", err)\n self._hass.bus.async_fire(\n f\"{DOMAIN}.snapshot_failed\",\n {\"name\": data[ATTR_NAME], \"error\": str(err)},\n )\n self.last_failure = data[ATTR_NAME]\n\n # remove from pending snapshots and update sensor.\n self._pending_snapshots -= 1\n if self.update_sensor_callback:\n self.update_sensor_callback()\n\n # purging old snapshots\n if self._auto_purge:\n await self.purge_snapshots()", "def get_snapshot(project: Optional[str] = None,\n snapshot: Optional[str] = None,\n opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetSnapshotResult:\n __args__ = dict()\n __args__['project'] = project\n __args__['snapshot'] = snapshot\n opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)\n __ret__ = pulumi.runtime.invoke('google-native:compute/alpha:getSnapshot', __args__, opts=opts, typ=GetSnapshotResult).value\n\n return AwaitableGetSnapshotResult(\n architecture=pulumi.get(__ret__, 'architecture'),\n auto_created=pulumi.get(__ret__, 'auto_created'),\n chain_name=pulumi.get(__ret__, 'chain_name'),\n creation_size_bytes=pulumi.get(__ret__, 'creation_size_bytes'),\n creation_timestamp=pulumi.get(__ret__, 'creation_timestamp'),\n description=pulumi.get(__ret__, 'description'),\n disk_size_gb=pulumi.get(__ret__, 'disk_size_gb'),\n download_bytes=pulumi.get(__ret__, 'download_bytes'),\n enable_confidential_compute=pulumi.get(__ret__, 'enable_confidential_compute'),\n guest_flush=pulumi.get(__ret__, 'guest_flush'),\n guest_os_features=pulumi.get(__ret__, 'guest_os_features'),\n kind=pulumi.get(__ret__, 'kind'),\n label_fingerprint=pulumi.get(__ret__, 'label_fingerprint'),\n labels=pulumi.get(__ret__, 'labels'),\n license_codes=pulumi.get(__ret__, 'license_codes'),\n licenses=pulumi.get(__ret__, 'licenses'),\n location_hint=pulumi.get(__ret__, 'location_hint'),\n max_retention_days=pulumi.get(__ret__, 'max_retention_days'),\n name=pulumi.get(__ret__, 'name'),\n satisfies_pzs=pulumi.get(__ret__, 'satisfies_pzs'),\n self_link=pulumi.get(__ret__, 'self_link'),\n self_link_with_id=pulumi.get(__ret__, 'self_link_with_id'),\n snapshot_encryption_key=pulumi.get(__ret__, 'snapshot_encryption_key'),\n snapshot_type=pulumi.get(__ret__, 'snapshot_type'),\n source_disk=pulumi.get(__ret__, 'source_disk'),\n source_disk_encryption_key=pulumi.get(__ret__, 'source_disk_encryption_key'),\n source_disk_for_recovery_checkpoint=pulumi.get(__ret__, 'source_disk_for_recovery_checkpoint'),\n source_disk_id=pulumi.get(__ret__, 'source_disk_id'),\n source_instant_snapshot=pulumi.get(__ret__, 'source_instant_snapshot'),\n source_instant_snapshot_id=pulumi.get(__ret__, 'source_instant_snapshot_id'),\n source_snapshot_schedule_policy=pulumi.get(__ret__, 'source_snapshot_schedule_policy'),\n source_snapshot_schedule_policy_id=pulumi.get(__ret__, 'source_snapshot_schedule_policy_id'),\n status=pulumi.get(__ret__, 'status'),\n storage_bytes=pulumi.get(__ret__, 'storage_bytes'),\n storage_bytes_status=pulumi.get(__ret__, 'storage_bytes_status'),\n storage_locations=pulumi.get(__ret__, 'storage_locations'),\n user_licenses=pulumi.get(__ret__, 'user_licenses'))", "def do_takesnapshot(self, str_arg):\n img = None\n fname = validateString(str_arg)\n try:\n # self.adbc.wake()\n printLog(self.threadName + 'taking snapshot (0,50,%d,%d) ...' %\n (self.scn_width, self.scn_height))\n img = self.adbc.takeSnapshot(reconnect=True)\n # PIL code\n img = img.crop((0, 50, self.scn_width, self.scn_height))\n img.save(fname, SNAPSHOT_IMAGE_FORMAT)\n # if self.scn_width>SNAPSHOT_WIDTH:\n # self.compressImage(fname)\n # os.remove(fname)\n # im.save(fname)\n printLog(self.threadName + 'snapshot saved as %s' % fname)\n except EnvironmentError:\n self.resultFlag = False\n if DEBUG:\n traceback.print_exc()\n finally:\n img = None", "def test_snapshot_unspecified(self):\n key = self.create_entity('project', None, [])\n urls = snapshots.fetch(key)\n self.failIf(urls)", "def _got_new_lease(self):\n self._new_lease_event.set()", "def create_volume_from_snapshot(self, volume, snapshot):\n\n free_size = self._get_spdk_lvs_free_space(\n self._get_spdk_lvs_uuid(\n self._get_spdk_volume_name(snapshot.name)))\n\n if free_size < volume.size:\n raise exception.VolumeBackendAPIException(\n data=_('Not enough space to create snapshot with SPDK'))\n\n return self._create_volume(volume, snapshot)", "async def load_snapshots_expiry(self):\n data = await self._snapshots_store.async_load()\n\n if data is not None:\n for slug, expiry in data.items():\n self._snapshots_expiry[slug] = datetime.fromisoformat(expiry)", "def update_snapshot_data(self, sSnapshotUuid, sNewName, sNewDescription = ''):\n\t\treturn Job(SDK.PrlVm_UpdateSnapshotData(self.handle, sSnapshotUuid, sNewName, sNewDescription)[0])", "def delete_snapshot(self, snapshot):\n aname = \"cinder_v%s.delete_snapshot\" % self.version\n with atomic.ActionTimer(self, aname):\n self._get_client().volume_snapshots.delete(snapshot)\n bench_utils.wait_for_status(\n snapshot,\n ready_statuses=[\"deleted\"],\n check_deletion=True,\n update_resource=self._update_resource,\n timeout=CONF.openstack.cinder_volume_delete_timeout,\n check_interval=(CONF.openstack\n .cinder_volume_delete_poll_interval)\n )", "def vm_snapshotlist(args):\n snapshot = args.snapshot\n name = args.name\n config = Kconfig(client=args.client, debug=args.debug, region=args.region, zone=args.zone, namespace=args.namespace)\n k = config.k\n common.pprint(\"Listing snapshots of %s...\" % name)\n snapshots = k.snapshot(snapshot, name, listing=True)\n if isinstance(snapshots, dict):\n common.pprint(\"Vm %s not found\" % name, color='red')\n return\n else:\n for snapshot in snapshots:\n print(snapshot)\n return", "def test_snapshot_current_version(uvm_nano):\n vm = uvm_nano\n vm.start()\n\n version = get_firecracker_version_from_toml()\n # normalize to a snapshot version\n target_version = f\"{version.major}.{version.minor}.0\"\n snapshot = vm.snapshot_full(target_version=target_version)\n\n # Fetch Firecracker binary for the latest version\n fc_binary, _ = get_firecracker_binaries()\n # Verify the output of `--describe-snapshot` command line parameter\n cmd = [str(fc_binary)] + [\"--describe-snapshot\", str(snapshot.vmstate)]\n\n code, stdout, stderr = run_cmd(cmd)\n assert code == 0, stderr\n assert stderr == \"\"\n assert target_version in stdout", "def test_backup_restore_with_views(self):\n if \"ephemeral\" in self.input.param(\"bucket_type\", 'membase'):\n self.log.info(\"\\n****** view does not support on ephemeral bucket ******\")\n return\n rest_src = RestConnection(self.backupset.cluster_host)\n if \"community\" in self.cb_version:\n rest_src.add_node(self.servers[1].rest_username, self.servers[1].rest_password,\n self.servers[1].cluster_ip, services=['kv', 'index', 'n1ql'])\n else:\n rest_src.add_node(self.servers[1].rest_username, self.servers[1].rest_password,\n self.servers[1].cluster_ip, services=['index', 'kv'])\n rebalance = self.cluster.async_rebalance(self.cluster_to_backup, [], [])\n rebalance.result()\n gen = BlobGenerator(\"ent-backup\", \"ent-backup-\", self.value_size, end=self.num_items)\n self._load_all_buckets(self.master, gen, \"create\", 0)\n self.backup_create()\n default_map_func = \"function (doc) {\\n emit(doc._id, doc);\\n}\"\n default_view_name = \"test\"\n default_ddoc_name = \"ddoc_test\"\n prefix = \"dev_\"\n query = {\"full_set\": \"true\", \"stale\": \"false\", \"connection_timeout\": 60000}\n view = View(default_view_name, default_map_func)\n task = self.cluster.async_create_view(self.backupset.cluster_host,\n default_ddoc_name, view, \"default\")\n task.result()\n self.backup_cluster_validate()\n rest_target = RestConnection(self.backupset.restore_cluster_host)\n if self.input.clusters[0][1].ip != self.servers[1].ip:\n rest_target.add_node(self.input.clusters[0][1].rest_username,\n self.input.clusters[0][1].rest_password,\n self.input.clusters[0][1].cluster_ip, services=['kv', 'index'])\n rebalance = self.cluster.async_rebalance(self.cluster_to_restore, [], [])\n rebalance.result()\n self.backup_restore_validate(compare_uuid=False, seqno_compare_function=\">=\")\n try:\n result = self.cluster.query_view(self.backupset.restore_cluster_host,\n prefix + default_ddoc_name,\n default_view_name, query, timeout=30)\n self.assertEqual(len(result['rows']), self.num_items,\n \"Querying view on restore cluster did not return expected number of items\")\n self.log.info(\"Querying view on restore cluster returned expected number of items\")\n except TimeoutError:\n self.fail(\"View could not be queried in restore cluster within timeout\")", "def test_restore_snapshot(self):\n if self.is_v4:\n self.skipTest('Restore snapshot by generation does '\n 'not work on the V4.')\n snapshot_info, sg_name = self.create_sg_snapshot()\n snap_name = snapshot_info.get('name')\n self.replication.restore_snapshot(sg_id=sg_name,\n snap_name=snap_name, gen_num=0)\n snap_details = self._test_get_ss_gen_detail(\n sg_name, snap_name, gen_num=0, check_restored=True)\n self.assertTrue('Restored' in snap_details.get('state'))", "def add_snapshot(self):\n\n\t\tself.mu_values = self.cvt_handler.mu_values\n\t\tdim_mu = self.mu_values.shape[1]\n\t\taux_snapshot = self.file_handler.parse(self.namefile_prefix + str(dim_mu-1) + self.file_format, self.output_name)\n\t\tsnapshot = aux_snapshot.reshape(aux_snapshot.shape[0],1)\n\t\tself.snapshots = np.append(self.snapshots, snapshot, 1)\n\t\t\n\t\tself.print_info()", "def format_snapshot(self, snapshot, user_id, data_path):\n snapshot_obj = Snapshot.FromString(snapshot)\n json_snapshot = {\n \"datetime\": self._format_datetime(snapshot_obj.datetime),\n \"depth_image\": self._depth_image_handler(snapshot_obj, user_id,\n data_path),\n \"color_image\": self._color_image_handler(snapshot_obj, user_id,\n data_path),\n \"feelings\": self._feeling_handler(snapshot_obj),\n \"pose\": self._pose_handler(snapshot_obj),\n }\n return json_snapshot", "def test_aws_service_api_snapshots_post(self):\n pass", "def test_create_volume_from_snapshot(self, mock_ghn):\n ctxt = context.get_admin_context()\n extra_specs = {}\n type_ref = volume_types.create(ctxt, 'hgst-1', extra_specs)\n snap = {'id': '1', 'name': 'volume1', 'display_name': '',\n 'volume_type_id': type_ref['id'], 'size': 10,\n 'provider_id': 'space_orig'}\n volume = {'id': '2', 'name': 'volume2', 'display_name': '',\n 'volume_type_id': type_ref['id'], 'size': 10}\n pid = self.driver.create_volume_from_snapshot(volume, snap)\n # We must copy entier underlying storage, ~12GB, not just 10GB\n self.assertEqual(11444 * units.Mi, self.dd_count)\n self.assertEqual('1M', self.bs)\n # Check space-create command\n expected = {'redundancy': '0', 'group': 'xanadu',\n 'name': 'volume2', 'mode': '0777',\n 'user': 'kane', 'net': 'net1',\n 'storageserver': 'stor1:gbd0,stor2:gbd0,',\n 'size': '12'}\n self.assertDictMatch(expected, self.created)\n # Check the returned provider\n expected_pid = {'provider_id': 'volume2'}\n self.assertDictMatch(expected_pid, pid)", "def handle_read(self):\n data = self.recv(8192)\n meta = Metadata(time=datetime.now(), pid=None,\n comm=None, unit=self.unit)\n\n if meta.unit is not None:\n log = self.log_manager.get(meta.unit, self.logname)\n log.write(data, meta)", "def test_snapshot_listing(self):\n page_size = 5\n with mock.patch.object(TDRClient, 'page_size', page_size):\n paged_snapshots = self._public_tdr_client.snapshot_names_by_id()\n snapshots = self._public_tdr_client.snapshot_names_by_id()\n self.assertEqual(snapshots, paged_snapshots)", "def post_get_snapshot(self, response: pubsub.Snapshot) -> pubsub.Snapshot:\n return response", "def test_load_from_v3(self) -> None:\n self.save_new_valid_exploration(\n 'Exp1', '[email protected]', end_state_name='End')\n collection = collection_domain.Collection.from_yaml(\n 'cid', self.YAML_CONTENT_V3)\n self.assertEqual(collection.to_yaml(), self._LATEST_YAML_CONTENT)", "def castle_collection_snapshot(conn, coll):\n pycastle_log.debug(\"entering with conn = \"+str(conn)+\" coll = \"+str(coll))\n old_v = libcastle.castle_version_p()\n ret = libcastle.castle_collection_snapshot(conn, coll, old_v.cast())\n if ret != 0:\n raise CastleCollectionSnapshotException(ret)\n pycastle_log.debug(\"returning old_v = \"+str(old_v)+\", old_v.value() = \"+str(old_v.value()))\n pycastle_log.debug(\"Created snapshot of version \"+str(old_v.value()))\n return old_v.value()", "def data_snapshot(self) -> Dict[str, Any]:\n self.__logger.debug('Eva.data_snapshot called')\n return self.__http_client.data_snapshot()", "def create_snapshot(self, snapshot):\n vg_name = self.get_volume_group_name(snapshot.volume_id)\n snap_name = self.get_snap_name(snapshot.id)\n rpolicy = self.get_policy()\n try:\n LOG.debug(\"Searching volume_group: %s in K2.\", vg_name)\n vg = self.client.search(\"volume_groups\", name=vg_name).hits[0]\n LOG.debug(\"Creating a snapshot: %(snap)s from vg: %(vg)s\",\n {'snap': snap_name, 'vg': vg_name})\n self.client.new(\"snapshots\", short_name=snap_name,\n source=vg, retention_policy=rpolicy,\n is_auto_deleteable=False).save()\n except Exception as ex:\n LOG.exception(\"Creation of snapshot: %s failed.\", snap_name)\n raise KaminarioCinderDriverException(reason=ex)", "def __call__(\n self,\n request: pubsub.GetSnapshotRequest,\n *,\n retry: OptionalRetry = gapic_v1.method.DEFAULT,\n timeout: Optional[float] = None,\n metadata: Sequence[Tuple[str, str]] = (),\n ) -> pubsub.Snapshot:\n\n http_options: List[Dict[str, str]] = [\n {\n \"method\": \"get\",\n \"uri\": \"/v1/{snapshot=projects/*/snapshots/*}\",\n },\n ]\n request, metadata = self._interceptor.pre_get_snapshot(request, metadata)\n pb_request = pubsub.GetSnapshotRequest.pb(request)\n transcoded_request = path_template.transcode(http_options, pb_request)\n\n uri = transcoded_request[\"uri\"]\n method = transcoded_request[\"method\"]\n\n # Jsonify the query params\n query_params = json.loads(\n json_format.MessageToJson(\n transcoded_request[\"query_params\"],\n including_default_value_fields=False,\n use_integers_for_enums=True,\n )\n )\n query_params.update(self._get_unset_required_fields(query_params))\n\n query_params[\"$alt\"] = \"json;enum-encoding=int\"\n\n # Send the request\n headers = dict(metadata)\n headers[\"Content-Type\"] = \"application/json\"\n response = getattr(self._session, method)(\n \"{host}{uri}\".format(host=self._host, uri=uri),\n timeout=timeout,\n headers=headers,\n params=rest_helpers.flatten_query_params(query_params, strict=True),\n )\n\n # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception\n # subclass.\n if response.status_code >= 400:\n raise core_exceptions.from_http_response(response)\n\n # Return the response\n resp = pubsub.Snapshot()\n pb_resp = pubsub.Snapshot.pb(resp)\n\n json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True)\n resp = self._interceptor.post_get_snapshot(resp)\n return resp", "def test_delete_snapshot(self):\n ctxt = context.get_admin_context()\n extra_specs = {}\n type_ref = volume_types.create(ctxt, 'hgst-1', extra_specs)\n snapshot = {'id': '1', 'name': 'volume1',\n 'display_name': '',\n 'volume_type_id': type_ref['id'],\n 'size': 10,\n 'provider_id': 'snap10'}\n self.driver.delete_snapshot(snapshot)\n expected = {'name': 'snap10'}\n self.assertDictMatch(expected, self.deleted)", "def snapshot_metadata(self, snapshot_metadata):\n if snapshot_metadata is None:\n raise ValueError(\"Invalid value for `snapshot_metadata`, must not be `None`\")\n\n self._snapshot_metadata = snapshot_metadata", "def get_state(self):\n try:\n json_data = open(self.state_file)\n data = json.load(json_data)\n self.state_timestamp = data[\"timestamp\"]\n json_data.close()\n\n except IOError:\n self.logger.info(\"'%s' not found: an initial state file will be create\" % \\\n self.state_file)\n data = {\"timestamp\": self.state_timestamp}\n with open(self.state_file, 'w') as out_file:\n json.dump(data, out_file, indent=4)\n out_file.close()", "def manage_existing_snapshot_get_size(self, snapshot, existing_ref):\n return self.manage_existing_get_size(snapshot, existing_ref)", "def manage(self, volume_id, ref, name=None, description=None,\n metadata=None):\n body = {'snapshot': {'volume_id': volume_id,\n 'ref': ref,\n 'name': name,\n 'description': description,\n 'metadata': metadata\n }\n }\n return self._create('/os-snapshot-manage', body, 'snapshot')", "def get_snapshot_children(self, snapshot):\n LOG.debug('get_snapshot_children starts.')\n pool_name = self.configuration.rbd_pool\n volume_name = \\\n 'volume-%s' % encodeutils.safe_encode(snapshot[\"volume_id\"])\n snap_name = 'snapshot-%s' % encodeutils.safe_encode(snapshot['id'])\n children = list()\n children_on_snap = \\\n self._get_snapshot_children(pool_name, volume_name, snap_name)\n if children_on_snap is not None:\n for child in children_on_snap:\n item = dict()\n if len(child) == 2:\n item[\"pool_name\"] = child[0]\n item[\"volume_name\"] = child[1]\n if child[1].startswith(\"volume-\"):\n item[\"type\"] = \"volume\"\n item[\"uuid\"] = child[1][len(\"volume-\"):]\n elif uuidutils.is_uuid_like(child[1]):\n item[\"type\"] = \"volume\"\n item[\"uuid\"] = child[1]\n else:\n item[\"type\"] = \"\"\n item[\"uuid\"] = \"\"\n children.append(item)\n\n LOG.debug('snapshot children: %s', children)\n LOG.debug('get_snapshot_children finished.')\n return children", "def load_snapshot(device, net, snapshot_name, optimizer=None):\n\ttry:\n\t\tcheckpoint = torch.load(snapshot_name+'.pth', map_location=device)\n\t\tnet.load_state_dict(checkpoint['model_state_dict'])\n\t\tif optimizer:\n\t\t\trestore_optimizer(optimizer, checkpoint)\n\texcept:\n\t\tcheckpoint = None\t\n\treturn checkpoint", "def pre_get_snapshot(\n self, request: pubsub.GetSnapshotRequest, metadata: Sequence[Tuple[str, str]]\n ) -> Tuple[pubsub.GetSnapshotRequest, Sequence[Tuple[str, str]]]:\n return request, metadata", "def getContainerSnapshots(self,node,vmid):\n data = self.connect('get','nodes/%s/lxc/%s/snapshot' % (node,vmid),None)\n return data", "def test_snapshot(model):\n with open(join(DATA_PATH, \"store.json\")) as file_handle:\n validator = Draft4Validator(json.load(file_handle))\n code, result = memote.test_model(\n model=model, results=True, pytest_args=[\"--tb\", \"no\"])\n assert validator.is_valid(result)\n config = memote.ReportConfiguration.load()\n report = memote.SnapshotReport(result=result, configuration=config)\n obj = report.render_json()\n assert validator.is_valid(obj)", "def test_csi_volumesnapshot_restore_existing_backup(set_random_backupstore, # NOQA\n client, # NOQA\n core_api, # NOQA\n volume_name, # NOQA\n csi_pv, # NOQA\n pvc, # NOQA\n pod_make, # NOQA\n volumesnapshotclass, # NOQA\n volumesnapshotcontent,\n volumesnapshot, # NOQA\n volsnapshotclass_delete_policy, # NOQA\n backup_is_deleted): # NOQA\n csisnapclass = \\\n volumesnapshotclass(name=\"snapshotclass\",\n deletepolicy=volsnapshotclass_delete_policy)\n\n pod_name, pv_name, pvc_name, md5sum = \\\n prepare_pod_with_data_in_mb(client, core_api,\n csi_pv, pvc, pod_make,\n volume_name,\n data_path=\"/data/test\")\n\n volume = client.by_id_volume(volume_name)\n snap = create_snapshot(client, volume_name)\n volume.snapshotBackup(name=snap.name)\n wait_for_backup_completion(client, volume_name, snap.name)\n bv, b = find_backup(client, volume_name, snap.name)\n\n csivolsnap_name = volume_name + \"-volumesnapshot\"\n csivolsnap_namespace = \"default\"\n\n volsnapcontent = \\\n volumesnapshotcontent(\"volsnapcontent\",\n csisnapclass[\"metadata\"][\"name\"],\n \"Delete\",\n \"bs://\" + volume_name + \"/\" + b.name,\n csivolsnap_name,\n csivolsnap_namespace)\n\n csivolsnap = volumesnapshot(csivolsnap_name,\n csivolsnap_namespace,\n csisnapclass[\"metadata\"][\"name\"],\n \"volumeSnapshotContentName\",\n volsnapcontent[\"metadata\"][\"name\"])\n\n restore_pvc_name = pvc[\"metadata\"][\"name\"] + \"-restore\"\n restore_pvc_size = pvc[\"spec\"][\"resources\"][\"requests\"][\"storage\"]\n\n restore_csi_volume_snapshot(core_api,\n client,\n csivolsnap,\n restore_pvc_name,\n restore_pvc_size)\n\n restore_pod = pod_make()\n restore_pod_name = restore_pod[\"metadata\"][\"name\"]\n restore_pod['spec']['volumes'] = [create_pvc_spec(restore_pvc_name)]\n\n create_and_wait_pod(core_api, restore_pod)\n restore_md5sum = \\\n get_pod_data_md5sum(core_api, restore_pod_name, path=\"/data/test\")\n\n assert restore_md5sum == md5sum\n\n # Delete volumeSnapshot test\n delete_volumesnapshot(csivolsnap[\"metadata\"][\"name\"], \"default\")\n\n if backup_is_deleted is False:\n find_backup(client, volume_name, b[\"snapshotName\"])\n else:\n wait_for_backup_delete(client, volume_name, b[\"name\"])", "def run(self):\n\n # Connection to the ES index\n conn = Connection(app.config.get(\"ELASTIC_SEARCH_HOST\"), index='_snapshot')\n\n try:\n client = ESSnapshotsClient(conn, app.config['ELASTIC_SEARCH_SNAPSHOT_REPOSITORY'])\n resp = client.request_snapshot()\n if resp.status_code == 200:\n job = self.background_job\n job.add_audit_message(\"ElasticSearch backup requested. Response: \" + resp.text)\n else:\n raise Exception(\"Status code {0} received from snapshots plugin.\".format(resp.text))\n\n except Exception as e:\n app_email.send_mail(\n to=[app.config.get('ADMIN_EMAIL', '[email protected]')],\n fro=app.config.get('SYSTEM_EMAIL_FROM', '[email protected]'),\n subject='Alert: DOAJ ElasticSearch backup failure',\n msg_body=\"The ElasticSearch snapshot could not requested. Error: \\n\" + str(e)\n )\n raise e", "def show_snapshot_metadata(self, snapshot_id):\n url = \"snapshots/%s/metadata\" % snapshot_id\n resp, body = self.get(url)\n body = json.loads(body)\n self.validate_response(schema.show_snapshot_metadata, resp, body)\n return rest_client.ResponseBody(resp, body)", "def redo(self):\n if self._snapshot_index <= len(self._snapshots) - 2:\n snapshot = self._snapshots[self._snapshot_index + 1]\n for chunk_location in snapshot:\n dimension, cx, cz = chunk_location\n chunk = self._unserialise_chunk(dimension, cx, cz, 1)\n self._chunk_cache[chunk_location] = chunk\n self._snapshot_index += 1", "def create_snapshot(args, **_):\n\n volume_id = \\\n utils.get_external_resource_id_or_raise(\n 'create snapshot', ctx.instance)\n\n ctx.logger.info(\n 'Trying to create a snapshot of EBS volume {0}.'\n .format(volume_id))\n\n volume_object = _get_volumes_from_id(volume_id)\n\n if not args:\n snapshot_desc = \\\n unicode(datetime.datetime.now()) + \\\n ctx.instance.runtime_properties[constants.EXTERNAL_RESOURCE_ID]\n args = dict(description=snapshot_desc)\n\n try:\n new_snapshot = volume_object.create_snapshot(**args)\n except (boto.exception.EC2ResponseError,\n boto.exception.BotoServerError) as e:\n raise NonRecoverableError('{0}'.format(str(e)))\n\n ctx.logger.info(\n 'Created snapshot of EBS volume {0}.'.format(volume_id))\n\n if constants.VOLUME_SNAPSHOT_ATTRIBUTE not in \\\n ctx.instance.runtime_properties:\n ctx.instance.runtime_properties[\n constants.VOLUME_SNAPSHOT_ATTRIBUTE] = list()\n\n ctx.instance.runtime_properties[\n constants.VOLUME_SNAPSHOT_ATTRIBUTE].append(new_snapshot.id)", "async def async_snapshot(self, switchinput):\n if self._state == STATE_UNAVAILABLE:\n return\n\n if not self._slave_mode:\n self._snapshot_active = True\n self._snap_source = self._source\n self._snap_state = self._state\n self._snap_nometa = self._nometa\n self._snap_playing_mediabrowser = self._playing_mediabrowser\n self._snap_media_source_uri = self._media_source_uri\n self._snap_playhead_position = self._playhead_position\n\n if self._playing_localfile or self._playing_spotify or self._playing_webplaylist:\n if self._state in [STATE_PLAYING, STATE_PAUSED]:\n self._snap_seek = True\n\n elif self._playing_stream or self._playing_mediabrowser:\n if self._state in [STATE_PLAYING, STATE_PAUSED] and self._playing_mediabrowser:\n self._snap_seek = True\n\n _LOGGER.debug(\"For %s SNAPSHOT, source: %s, volume: %s, uri: %s, seek: %s, pos: %s\", self.name, self._source, self._snap_volume, self._media_uri_final, self._snap_seek, self._playhead_position)\n\n if self._source == \"Network\":\n self._snap_uri = self._media_uri_final\n \n\n if self._playing_spotify:\n if not switchinput:\n await self.async_preset_snap_via_upnp(str(self._preset_key))\n await self.async_call_linkplay_httpapi(\"setPlayerCmd:stop\", None)\n else:\n self._snap_spotify_volumeonly = True\n self._snap_spotify = True\n self._snap_volume = int(self._volume)\n return\n\n elif self._playing_mass:\n await self.hass.services.async_call(\"mass\",\"queue_command\", service_data = {\"entity_id\": self.entity_id, \"command\": \"snapshot_create\"})\n self._snap_mass = True\n self._snap_volume = int(self._volume)\n\n elif self._state == STATE_IDLE:\n self._snap_volume = int(self._volume)\n\n elif switchinput and not self._playing_stream:\n value = await self.async_call_linkplay_httpapi(\"setPlayerCmd:switchmode:wifi\", None)\n await asyncio.sleep(0.2)\n await self.async_call_linkplay_httpapi(\"setPlayerCmd:stop\", None)\n if value == \"OK\":\n await asyncio.sleep(2) # have to wait for the sound fade-in of the unit when physical source is changed, otherwise volume value will be incorrect\n await self.async_get_status()\n if self._player_statdata is not None:\n try:\n self._snap_volume = int(self._player_statdata['vol'])\n except ValueError:\n _LOGGER.warning(\"Erroneous JSON during snapshot volume reading: %s, %s\", self.entity_id, self._name)\n self._snap_volume = 0\n else:\n self._snap_volume = 0\n else:\n self._snap_volume = 0\n else:\n self._snap_volume = int(self._volume)\n if self._playing_stream:\n if self._fwvercheck(self._fw_ver) >= self._fwvercheck(FW_SLOW_STREAMS):\n await self.async_call_linkplay_httpapi(\"setPlayerCmd:pause\", None)\n else:\n await self.async_call_linkplay_httpapi(\"setPlayerCmd:stop\", None)\n else:\n return\n #await self._master.async_snapshot(switchinput)", "def execute(self):\n\n c = self.config\n regions = dict((x.name, x) for x in boto.ec2.regions(\n aws_access_key_id=c['access_key'],\n aws_secret_access_key=c['secret_access_key']))\n connect = regions[c['region']].connect(\n aws_access_key_id=c['access_key'],\n aws_secret_access_key=c['secret_access_key'])\n volume = connect.get_all_volumes([c['volume_id']])[0]\n volume.create_snapshot(c['volume_id'])\n snapshots = {}\n for x in connect.get_all_snapshots():\n if x.volume_id == c['volume_id']:\n snapshots.update({x.id: x.start_time})\n snapshots = sorted(snapshots.items(), key=lambda (k, v): (v, k), reverse=True)\n for i in range(int(c['keep']), len(snapshots)):\n connect.delete_snapshot(snapshots[i][0])", "def test_backup_with_update_on_disk_of_snapshot_markers(self):\n version = RestConnection(self.backupset.backup_host).get_nodes_version()\n if version[:5] == \"6.5.0\":\n self.log.info(\"\\n\\n******* Due to issue in MB-36904, \\\n \\nthis test will be skipped in 6.5.0 ********\\n\")\n return\n gen1 = BlobGenerator(\"ent-backup1\", \"ent-backup-\", self.value_size, end=100000)\n gen2 = BlobGenerator(\"ent-backup2\", \"ent-backup-\", self.value_size, end=100000)\n gen3 = BlobGenerator(\"ent-backup3\", \"ent-backup-\", self.value_size, end=100000)\n rest_conn = RestConnection(self.backupset.cluster_host)\n rest_conn.create_bucket(bucket=\"bucket0\", ramQuotaMB=1024)\n self.buckets = rest_conn.get_buckets()\n authentication = \"-u Administrator -p password\"\n\n self._load_all_buckets(self.master, gen1, \"create\", 0)\n self.log.info(\"Stop persistent\")\n cluster_nodes = rest_conn.get_nodes()\n clusters = copy.deepcopy(cluster_nodes)\n shell = RemoteMachineShellConnection(self.backupset.backup_host)\n for node in clusters:\n shell.execute_command(\"%scbepctl%s %s:11210 -b %s stop %s\" % \\\n (self.cli_command_location,\n self.cmd_ext,\n node.ip,\n \"bucket0\",\n authentication))\n shell.disconnect()\n self.log.info(\"Load 2nd batch docs\")\n self._load_all_buckets(self.master, gen2, \"create\", 0)\n self.log.info(\"Run full backup with cbbackupwrapper\")\n shell = RemoteMachineShellConnection(self.backupset.backup_host)\n backup_dir = self.tmp_path + \"backup\" + self.master.ip\n shell.execute_command(\"rm -rf %s\" % backup_dir)\n shell.execute_command(\"mkdir %s\" % backup_dir)\n shell.execute_command(\"cd %s;./cbbackupwrapper%s http://%s:8091 %s -m full %s\"\n % (self.cli_command_location, self.cmd_ext,\n self.backupset.cluster_host.ip,\n backup_dir,\n authentication))\n self.log.info(\"Load 3rd batch docs\")\n self._load_all_buckets(self.master, gen3, \"create\", 0)\n self.log.info(\"Run diff backup with cbbackupwrapper\")\n output, _ = shell.execute_command(\"cd %s;./cbbackupwrapper%s http://%s:8091 %s -m diff %s\"\n % (self.cli_command_location, self.cmd_ext,\n self.backupset.cluster_host.ip,\n backup_dir,\n authentication))\n\n if output and \"SUCCESSFULLY COMPLETED\" not in output[1]:\n self.fail(\"Failed to backup as the fix in MB-25727\")\n shell.disconnect()", "def get_snapshot(self, name=None, snapshot_id=None):\n if snapshot_id:\n return self._search_snapshot(key=\"snapshot_id\", value=snapshot_id)\n elif name:\n return self._search_snapshot(key=\"name\", value=name)\n else:\n raise ValueError(\"name or snapshot_id must be provided\")", "def on_start(self): # noqa: D401\n logger.debug(\"on_start()\")\n\n state = self.read_state(self.statefile)\n if state:\n self.restore_state(state)", "async def _handle_new_logs(self) -> None:\n async for block in self._new_blocks():\n self._handle_block_data(block)\n logs = self._get_logs_from_block(block.number)\n self.logger.info(\n \"Eth1 Monitor got new eth1 block: %s, number of logs contained in the block: %s\",\n block,\n len(logs),\n )\n self._process_logs(logs, block.number)" ]
[ "0.61946166", "0.6155114", "0.57125986", "0.56532395", "0.5642027", "0.5526328", "0.55234843", "0.552134", "0.5452202", "0.54247737", "0.5324958", "0.52862036", "0.52843034", "0.5257525", "0.5253173", "0.5240661", "0.52345574", "0.520435", "0.5192152", "0.51732963", "0.50897086", "0.50894517", "0.50662565", "0.5057012", "0.50144756", "0.49632132", "0.4963196", "0.49578187", "0.49283838", "0.48711053", "0.48689735", "0.48606995", "0.48599488", "0.48562205", "0.48525786", "0.48448205", "0.48398063", "0.483662", "0.4833871", "0.482029", "0.4810714", "0.47980642", "0.47945148", "0.4775849", "0.47715923", "0.47706342", "0.47589335", "0.4757481", "0.4751973", "0.47499284", "0.47481376", "0.47455543", "0.47455066", "0.47453946", "0.47443163", "0.47432736", "0.47426578", "0.47254083", "0.47239342", "0.4722466", "0.47190166", "0.47057465", "0.4682043", "0.46804973", "0.4674887", "0.4672642", "0.46603984", "0.4657505", "0.46437252", "0.4638401", "0.46362704", "0.46321002", "0.46279985", "0.46246618", "0.46182245", "0.4618104", "0.46155187", "0.46146354", "0.4605426", "0.46036014", "0.46029574", "0.4598777", "0.45971787", "0.45961457", "0.45941514", "0.45893472", "0.4587272", "0.45834127", "0.45616752", "0.4559931", "0.45578626", "0.455393", "0.45476893", "0.45475492", "0.45414236", "0.45413068", "0.45407894", "0.4534737", "0.4532529", "0.453229" ]
0.49107116
29
Fix NetworkCache before removing or replacing a network. neutron.agent.dhcp.agent is bugged in that it adds the DHCP port into the cache without updating the cache's port_lookup dict, but then NetworkCache.remove() barfs if there is a port in network.ports but not in that dict... NetworkCache.put() implicitly does a remove() first if there is already a NetModel in the cache with the same ID. So a put() to update or replace a network also hits this problem. This method avoids that problem by ensuring that all of a network's ports are in the port_lookup dict. A caller should call this immediately before a remove() or a put().
def _fix_network_cache_port_lookup(agent, network_id): # If there is an existing NetModel for this network ID, ensure that all # its ports are in the port_lookup dict. if network_id in agent.cache.cache: for port in agent.cache.cache[network_id].ports: agent.cache.port_lookup[port.id] = network_id
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _ensure_net_and_subnets(self, port):\n\n # Gather the subnet IDs that we need for this port, and get the\n # NetModel if we already have it in the cache.\n needed_subnet_ids = set()\n net = None\n for fixed_ip in port['fixed_ips']:\n subnet_id = fixed_ip.get('subnet_id')\n if subnet_id:\n needed_subnet_ids.add(subnet_id)\n if not net:\n net = self.agent.cache.get_network_by_subnet_id(subnet_id)\n LOG.debug(\"Needed subnet IDs: %s\", needed_subnet_ids)\n LOG.debug(\"Existing network model by subnet ID: %s\", net)\n\n # For each subnet that we need, get its data from SubnetWatcher and\n # hold for adding into the cache.\n new_subnets = {}\n for subnet_id in needed_subnet_ids:\n # Get data for this subnet from the SubnetWatchers.\n subnet = (self.subnet_watcher.get_subnet(subnet_id) or\n self.v1_subnet_watcher.get_subnet(subnet_id))\n if subnet is None:\n LOG.warning(\"No data for subnet %s\", subnet_id)\n raise SubnetIDNotFound()\n new_subnets[subnet_id] = subnet\n\n if not net:\n # We don't already have a NetModel, so look for a cached NetModel\n # with the right network ID. (In this case we must have new\n # subnets to add into the cache, and the cached NetModel must have\n # subnets other than the ones that we're adding in this iteration;\n # otherwise we would have already found it when searching by\n # subnet_id above.)\n assert new_subnets\n network_id = list(new_subnets.values())[0]['network_id']\n net = self.agent.cache.get_network_by_id(network_id)\n LOG.debug(\"Existing network model by network ID: %s\", net)\n\n if not net:\n # We still have no NetModel for the relevant network ID, so create\n # a new one. In this case we _must_ be adding new subnets.\n assert new_subnets\n net = empty_network(network_id)\n LOG.debug(\"New network %s\", net)\n elif new_subnets:\n # We have a NetModel that was already in the cache and are about to\n # modify it. Cache replacement only works if the new NetModel is a\n # distinct object from the existing one, so make a copy here.\n net = copy_network(net)\n LOG.debug(\"Copied network %s\", net)\n\n if new_subnets:\n # Add the new subnets into the NetModel.\n assert net\n net.subnets = [s for s in net.subnets\n if s.id not in new_subnets]\n net.subnets += list(new_subnets.values())\n\n # Add (or update) the NetModel in the cache.\n LOG.debug(\"Net: %s\", net)\n _fix_network_cache_port_lookup(self.agent, net.id)\n self.agent.cache.put(net)\n\n return net.id", "def update_host_routes(self, config, cache):\n db = cache.get_or_create('host_routes', lambda: {})\n for net in config.networks:\n\n # For each subnet...\n for subnet in net.subnets:\n cidr = str(subnet.cidr)\n\n # determine the set of previously written routes for this cidr\n if cidr not in db:\n db[cidr] = set()\n\n current = db[cidr]\n\n # build a set of new routes for this cidr\n latest = set()\n for r in subnet.host_routes:\n latest.add((r.destination, r.next_hop))\n\n # If the set of previously written routes contains routes that\n # aren't defined in the new config, run commands to delete them\n for x in current - latest:\n if self._alter_route(net.interface.ifname, 'del', *x):\n current.remove(x)\n\n # If the new config contains routes that aren't defined in the\n # set of previously written routes, run commands to add them\n for x in latest - current:\n if self._alter_route(net.interface.ifname, 'add', *x):\n current.add(x)\n\n if not current:\n del db[cidr]\n\n cache.set('host_routes', db)", "def _update_network_config(port_config, allow_multiple=False):\n # Get network id from port config\n network_id = port_config.get('network_id')\n\n # Get the network id from relationship if any\n rel_network_ids = find_openstack_ids_of_connected_nodes_by_openstack_type(\n ctx, NETWORK_OPENSTACK_TYPE)\n\n rel_network_id = rel_network_ids[0] if rel_network_ids else None\n # Check if network config comes from two sources or not\n if network_id and rel_network_id and not allow_multiple:\n raise NonRecoverableError('Port can\\'t both have the '\n '\"network_id\" property and be '\n 'connected to a network via a '\n 'relationship at the same time')\n\n port_config['network_id'] = network_id or rel_network_id", "def update_network(self, dbnetwork, qipinfo):\n\n # We don't want to add the plenary to self.plenaries if we aren't going\n # to change anything\n plenary = Plenary.get_plenary(dbnetwork)\n updated = False\n\n if dbnetwork.name != qipinfo.name:\n self.logger.client_info(\"Setting network {0!s} name to {1}\"\n .format(dbnetwork, qipinfo.name))\n dbnetwork.name = qipinfo.name\n if dbnetwork.network_type != qipinfo.network_type:\n self.logger.client_info(\"Setting network {0!s} type to {1}\"\n .format(dbnetwork, qipinfo.network_type))\n dbnetwork.network_type = qipinfo.network_type\n if dbnetwork.location != qipinfo.location:\n self.logger.client_info(\"Setting network {0!s} location to {1:l}\"\n .format(dbnetwork, qipinfo.location))\n dbnetwork.location = qipinfo.location\n if dbnetwork.side != qipinfo.side:\n self.logger.client_info(\"Setting network {0!s} side to {1}\"\n .format(dbnetwork, qipinfo.side))\n dbnetwork.side = qipinfo.side\n if dbnetwork.network_compartment != qipinfo.compartment:\n self.logger.client_info(\"Setting network {0!s} compartment to {1!s}\"\n .format(dbnetwork, qipinfo.compartment))\n dbnetwork.network_compartment = qipinfo.compartment\n\n if dbnetwork in self.session.dirty:\n updated = True\n\n old_rtrs = set(dbnetwork.router_ips)\n new_rtrs = set(qipinfo.routers)\n\n del_routers = []\n for router in dbnetwork.routers:\n if router.ip in old_rtrs - new_rtrs:\n del_routers.append(router)\n\n for router in del_routers:\n self.logger.client_info(\"Removing router {0:s} from \"\n \"{1:l}\".format(router.ip, dbnetwork))\n for dns_rec in router.dns_records:\n if dns_rec.is_unused:\n delete_dns_record(dns_rec)\n dbnetwork.routers.remove(router)\n updated = True\n\n for ip in new_rtrs - old_rtrs:\n self.add_router(dbnetwork, ip)\n updated = True\n\n if updated:\n self.plenaries.append(plenary)\n\n # TODO: add support for updating router locations\n\n return dbnetwork.netmask == qipinfo.address.netmask", "def update_network(**kwargs):\n\n ip_addr = kwargs.get('ip_addr')\n is_private = kwargs.get('is_private')\n name = kwargs.get('name')\n dns_names = kwargs.get('dns_names')\n is_scanning = kwargs.get('is_scanning', False)\n network_id = make_shortuuid(name)\n\n network = {\n 'dns_names': dns_names,\n 'ip_addr': ip_addr,\n 'is_private' : is_private,\n 'name': name,\n 'id': network_id,\n 'is_scanning': is_scanning,\n 'updated_count': 0\n\n }\n\n network_exists = r.table(\"networks\").insert([network], conflict=\"update\")\n\n return network_exists.run(conn)", "def test_update_network_no_policy_change(self):\n for qos_policy_id in (self.qos_policies[0].id, None):\n self.networks[0].qos_policy_id = qos_policy_id\n self.networks[0].update()\n original_network = {'qos_policy_id': qos_policy_id}\n port_ids, fip_ids, router_ids = self.qos_driver.update_network(\n mock.ANY, self.networks[0], original_network)\n self.assertEqual(set([]), port_ids)\n self.assertEqual(set([]), fip_ids)\n self.assertEqual(set([]), router_ids)\n self.mock_rules.assert_not_called()", "def refresh_dhcp_helper(self, network_id):\n old_network = self.cache.get_network_by_id(network_id)\n if not old_network:\n # DHCP current not running for network.\n return self.enable_dhcp_helper(network_id)\n\n try:\n network = self.plugin_rpc.get_network_info(network_id)\n except:\n self.needs_resync = True\n LOG.exception(_('Network %s RPC info call failed.') % network_id)\n return\n\n old_cidrs = set(s.cidr for s in old_network.subnets if s.enable_dhcp)\n new_cidrs = set(s.cidr for s in network.subnets if s.enable_dhcp)\n\n if new_cidrs and old_cidrs == new_cidrs:\n self.call_driver('reload_allocations', network)\n self.cache.put(network)\n elif new_cidrs:\n if self.call_driver('restart', network):\n self.cache.put(network)\n else:\n self.disable_dhcp_helper(network.id)", "def update_network(self, context, net_id, network):\n\n LOG.debug(_(\"QuantumRestProxyV2.update_network() called\"))\n\n # Validate Args\n if network[\"network\"].get(\"admin_state_up\"):\n if network[\"network\"][\"admin_state_up\"] is False:\n LOG.warning(_(\"Network with admin_state_up=False are not yet \"\n \"supported by this plugin. Ignoring setting for \"\n \"network %s\", net_name))\n\n # update DB\n orig_net = super(QuantumRestProxyV2, self).get_network(context, net_id)\n tenant_id = orig_net[\"tenant_id\"]\n new_net = super(QuantumRestProxyV2, self).update_network(\n context, net_id, network)\n\n # update network on network controller\n if new_net[\"name\"] != orig_net[\"name\"]:\n try:\n resource = NETWORKS_PATH % (tenant_id, net_id)\n data = {\n \"network\": new_net,\n }\n ret = self.servers.put(resource, data)\n if not self.servers.action_success(ret):\n raise RemoteRestError(ret[2])\n except RemoteRestError as e:\n LOG.error(_(\"QuantumRestProxyV2: Unable to update remote \"\n \"network: %s\"), e.message)\n # reset network to original state\n super(QuantumRestProxyV2, self).update_network(\n context, id, orig_net)\n raise\n\n # return updated network\n return new_net", "def network_refresh(self, kwargs=None):\n scode, hosts = Rest.get('Host')\n filter = {}\n n = 1\n e = {}\n data = []\n for host in hosts:\n os.environ[\"DOCKER_HOST\"] = host['Ip'] + \":\" + str(host['Port'])\n filter['Ip'] = os.environ[\"DOCKER_HOST\"].split(':')[0]\n self.client = docker.from_env()\n try:\n networks = self.client.networks.list(**kwargs)\n except docker.errors.APIError as e:\n Console.error(e.explanation)\n continue\n\n if len(networks) == 0:\n Console.info(\"No network exist\" + host['Ip'])\n continue\n\n for networkm in networks:\n network = networkm.__dict__['attrs']\n network['Ip'] = os.environ[\"DOCKER_HOST\"].split(':')[0]\n data.append(network)\n d = {}\n d['Ip'] = os.environ[\"DOCKER_HOST\"].split(':')[0]\n d['Id'] = network['Id']\n d['Name'] = network['Name']\n d['Containers'] = network['Containers']\n e[n] = d\n n = n + 1\n r = Rest.delete('Network', filter)\n r = Rest.post('Network', data)\n Console.ok(str(Printer.dict_table(e, order=['Ip', 'Id', 'Name', 'Containers'])))", "def test_port_update_deferred_allocation_no_ipam(self):\n with self.network() as network:\n with self.subnet(network=network):\n pass\n\n response = self._create_port(self.fmt,\n net_id=network['network']['id'],\n tenant_id=network['network']['tenant_id'],\n fixed_ips=[],\n is_admin=True)\n port = self.deserialize(self.fmt, response)\n ips = port['port']['fixed_ips']\n self.assertEqual(0, len(ips))\n\n # Create the subnet and try to update the port to get an IP\n data = {'port': {portbindings.HOST_ID: 'fakehost'}}\n port_id = port['port']['id']\n port_req = self.new_update_request('ports', data, port_id,\n as_admin=True)\n response = port_req.get_response(self.api)\n\n self.assertEqual(webob.exc.HTTPOk.code, response.status_int)\n res = self.deserialize(self.fmt, response)\n self.assertEqual(0, len(res['port']['fixed_ips']))", "def port_update_end(self, payload):\n port = DictModel(payload['port'])\n network = self.cache.get_network_by_id(port.network_id)\n if network:\n self.cache.put_port(port)\n self.call_driver('reload_allocations', network)", "def test_update_network(self):\n policies_ports = [\n (None, {self.ports[0].id}),\n (self.qos_policies[1].id, {self.ports[0].id})]\n\n self.ports[1].qos_policy_id = self.qos_policies[0].id\n self.ports[1].update()\n self.ports[2].qos_policy_id = self.qos_policies[1].id\n self.ports[2].update()\n for qos_policy_id, reference_ports in policies_ports:\n self.networks[0].qos_policy_id = qos_policy_id\n self.networks[0].update()\n original_network = {'qos_policy_id': self.qos_policies[0]}\n reviewed_port_ids, _, _ = self.qos_driver.update_network(\n mock.ANY, self.networks[0], original_network)\n self.assertEqual(reference_ports, reviewed_port_ids)\n calls = [mock.call(mock.ANY, self.ports[0].id,\n self.ports[0].network_id, qos_policy_id,\n None)]\n self.mock_rules.assert_has_calls(calls)\n self.mock_rules.reset_mock()", "def _internal_network_removed(self, ri, port, ex_gw_port):\n itfc_deleted = False\n driver = self.driver_manager.get_driver(ri.id)\n vrf_name = driver._get_vrf_name(ri)\n network_name = ex_gw_port['hosting_info'].get('network_name')\n if self._router_ids_by_vrf_and_ext_net.get(\n vrf_name, {}).get(network_name) and (\n ri.router['id'] in\n self._router_ids_by_vrf_and_ext_net[vrf_name][network_name]):\n # If this is the last port for this neutron router,\n # then remove this router from the list\n if len(ri.internal_ports) == 1 and port in ri.internal_ports:\n self._router_ids_by_vrf_and_ext_net[\n vrf_name][network_name].remove(ri.router['id'])\n\n # Check if any other routers in this VRF have this network,\n # and if not, set the flag to remove the interface\n if not self._router_ids_by_vrf_and_ext_net[vrf_name].get(\n network_name):\n LOG.debug(\"++ REMOVING NETWORK %s\" % network_name)\n itfc_deleted = True\n del self._router_ids_by_vrf_and_ext_net[\n vrf_name][network_name]\n if not self._router_ids_by_vrf_and_ext_net.get(vrf_name):\n del self._router_ids_by_vrf_and_ext_net[vrf_name]\n\n driver.internal_network_removed(ri, port,\n itfc_deleted=itfc_deleted)\n if ri.snat_enabled and ex_gw_port:\n driver.disable_internal_network_NAT(ri, port, ex_gw_port,\n itfc_deleted=itfc_deleted)", "def test_port_update_deferred_allocation_no_segments(self):\n with self.network() as network:\n pass\n\n port = self._create_deferred_ip_port(network)\n\n # Create the subnet and try to update the port to get an IP\n with self.subnet(network=network):\n data = {'port': {portbindings.HOST_ID: 'fakehost'}}\n port_id = port['port']['id']\n port_req = self.new_update_request('ports', data, port_id,\n as_admin=True)\n response = port_req.get_response(self.api)\n\n self.assertEqual(webob.exc.HTTPOk.code, response.status_int)\n res = self.deserialize(self.fmt, response)\n self.assertEqual(0, len(res['port']['fixed_ips']))", "def test_update_network_external_ports(self):\n policies_ports = [(self.qos_policies[0].id, {self.ports[0].id})]\n self.ports[2].qos_policy_id = self.qos_policies[0].id\n self.ports[2].update()\n port_obj.PortBinding(self.ctx, port_id=self.ports[1].id, host='host',\n profile={}, vif_type='',\n vnic_type=portbindings_api.VNIC_DIRECT).create()\n with mock.patch.object(self.qos_driver._driver._nb_idl,\n 'get_lswitch_port') as mock_lsp:\n mock_lsp.side_effect = [\n mock.Mock(type=ovn_const.LSP_TYPE_LOCALNET),\n mock.Mock(type=ovn_const.LSP_TYPE_EXTERNAL)]\n for qos_policy_id, reference_ports in policies_ports:\n self.networks[0].qos_policy_id = qos_policy_id\n self.networks[0].update()\n original_network = {'qos_policy_id': self.qos_policies[0]}\n reviewed_port_ids, _, _ = self.qos_driver.update_network(\n mock.ANY, self.networks[0], original_network, reset=True)\n self.assertEqual(reference_ports, reviewed_port_ids)\n calls = [mock.call(\n mock.ANY, self.ports[0].id, self.ports[0].network_id,\n qos_policy_id, None)]\n self.mock_rules.assert_has_calls(calls)\n self.mock_rules.reset_mock()", "def update_network_postcommit(self, context):\n if self.rpc_handler is None:\n return\n network = self._get_network_info(context._network)\n for _, _network in network.items():\n network_type = _network.get('network_type', '')\n if network_type not in CentecConstant.SUPPORTED_NETWORK_TYPES and len(CentecConstant.SUPPORTED_NETWORK_TYPES) > 0:\n return\n if network is not None:\n try:\n self.rpc_handler.update_network(network)\n except:\n pass", "def reset_cache(self):\n if self.cache_address is not None:\n for add in self.cache:\n os.remove(add + \".cd\")\n os.remove(add + \".cl\")\n self.cache = [None] * len(self)", "def __fillCache(self):\n assert (not self.__modelCache)\n\n # Assemble a list of model IDs to look up\n numModelIDs = len(self.__modelIDs) if self.__modelIDs else 0\n\n if self.__nextIndex >= numModelIDs:\n return\n\n idRange = self.__nextIndex + self.__CACHE_LIMIT\n if idRange > numModelIDs:\n idRange = numModelIDs\n\n lookupIDs = self.__modelIDs[self.__nextIndex:idRange]\n\n self.__nextIndex += (idRange - self.__nextIndex)\n\n # Query Nupic for model info of all models in the look-up list\n # NOTE: the order of results may not be the same as lookupIDs\n infoList = _clientJobsDB().modelsInfo(lookupIDs)\n assert len(infoList) == len(lookupIDs), \\\n \"modelsInfo returned %s elements; expected %s.\" % \\\n (len(infoList), len(lookupIDs))\n\n # Create _NupicModelInfo instances and add them to cache\n for rawInfo in infoList:\n modelInfo = _NupicModelInfo(rawInfo=rawInfo)\n self.__modelCache.append(modelInfo)\n\n assert len(self.__modelCache) == len(lookupIDs), \\\n \"Added %s elements to modelCache; expected %s.\" % \\\n (len(self.__modelCache), len(lookupIDs))\n\n if self.debug:\n _emit(Verbosity.DEBUG,\n \"MODELITERATOR: Leaving __fillCache(); modelCacheLen=%s\" % \\\n (len(self.__modelCache),))", "def sync_state(self):\n LOG.info(_('Synchronizing state'))\n known_networks = set(self.cache.get_network_ids())\n\n try:\n active_networks = set(self.plugin_rpc.get_active_networks())\n for deleted_id in known_networks - active_networks:\n self.disable_dhcp_helper(deleted_id)\n\n for network_id in active_networks:\n self.refresh_dhcp_helper(network_id)\n except:\n self.needs_resync = True\n LOG.exception(_('Unable to sync network state.'))", "def test_port_update_with_fixed_ips_ok_if_no_binding_host(self):\n with self.network() as network:\n segment = self._test_create_segment(\n network_id=network['network']['id'],\n physical_network='physnet',\n network_type=constants.TYPE_VLAN)\n\n # Create a port with no IP address (since there is no subnet)\n port = self._create_deferred_ip_port(network)\n\n # Create the subnet and try to update the port to get an IP\n with self.subnet(network=network,\n segment_id=segment['segment']['id']) as subnet:\n # Try requesting an IP (but the only subnet is on a segment)\n data = {'port': {\n 'fixed_ips': [{'subnet_id': subnet['subnet']['id']}]}}\n port_id = port['port']['id']\n port_req = self.new_update_request('ports', data, port_id)\n response = port_req.get_response(self.api)\n\n # The IP is allocated since there is no binding host info any\n # subnet can be used for allocation.\n self.assertEqual(webob.exc.HTTPOk.code, response.status_int)", "def test_update_external_network(self):\n network_policies = [(self.qos_policies[1].id,\n {self.fips[1].id},\n {self.router_fips.id}),\n (None,\n {self.fips[1].id},\n {self.router_fips.id})]\n\n self.fips[0].qos_policy_id = self.qos_policies[0].id\n self.fips[0].update()\n for qos_policy_id, ref_fips, ref_routers in network_policies:\n self.fips_network.qos_policy_id = qos_policy_id\n self.fips_network.update()\n original_network = {'qos_policy_id': self.qos_policies[0]}\n _, reviewed_fips_ids, reviewed_router_ids = (\n self.qos_driver.update_network(\n mock.Mock(), self.fips_network, original_network))\n self.assertEqual(ref_fips, reviewed_fips_ids)\n self.assertEqual(ref_routers, reviewed_router_ids)", "def updateNetwork(self, session: Session, network: Network) -> Network:\n try:\n return NetworkManager().updateNetwork(session, network)\n except TortugaException as ex:\n raise\n except Exception as ex:\n self._logger.exception(str(ex))\n raise TortugaException(exception=ex)", "def dcnm_network_delete_event(self, network_info):\n seg_id = network_info.get('segmentation_id')\n if not seg_id:\n LOG.error(_LE('Failed to delete network. Invalid network '\n 'info %s.'), network_info)\n query_net = self.get_network_by_segid(seg_id)\n if not query_net:\n LOG.info(_LI('dcnm_network_delete_event: network %(segid)s '\n 'does not exist.'), {'segid': seg_id})\n return\n if self.fw_api.is_network_source_fw(query_net, query_net.name):\n LOG.info(_LI(\"Service network %s, returning\"), query_net.name)\n return\n # Send network delete request to neutron\n try:\n del_net = self.network.pop(query_net.network_id)\n self.neutronclient.delete_network(query_net.network_id)\n self.delete_network_db(query_net.network_id)\n except Exception as exc:\n # Failed to delete network.\n # Put back the entry to the local cache???\n self.network[query_net.network_id] = del_net\n LOG.exception(_LE('dcnm_network_delete_event: Failed to delete '\n '%(network)s. Reason %(err)s.'),\n {'network': query_net.name, 'err': str(exc)})", "def fusion_api_edit_network_set(self, body=None, uri=None, api=None, headers=None):\n return self.network_set.update(body, uri, api, headers)", "def test_port_update_deferred_allocation_no_host_mapping(self):\n network, segment, subnet = self._create_test_segment_with_subnet()\n\n port = self._create_deferred_ip_port(network)\n self._validate_deferred_ip_allocation(port['port']['id'])\n\n # Try requesting an IP (but the only subnet is on a segment)\n data = {'port': {portbindings.HOST_ID: 'fakehost'}}\n port_id = port['port']['id']\n port_req = self.new_update_request('ports', data, port_id,\n as_admin=True)\n response = port_req.get_response(self.api)\n res = self.deserialize(self.fmt, response)\n\n # Gets conflict because it can't map the host to a segment\n self.assertEqual(webob.exc.HTTPConflict.code, response.status_int)\n self.assertEqual(segment_exc.HostNotConnectedToAnySegment.__name__,\n res['NeutronError']['type'])", "def alter_network(self, add=[], remove=[]):\n\n self.network.edges.add_many(add)\n self.network.edges.remove_many(remove)\n return self.score_network()", "def before_update(self, introspection_data, node_info, **kwargs):\n inventory = utils.get_inventory(introspection_data)\n\n ironic_ports = node_info.ports()\n\n for iface in inventory['interfaces']:\n if iface['name'] not in introspection_data['all_interfaces']:\n continue\n\n mac_address = iface['mac_address']\n port = ironic_ports.get(mac_address)\n if not port:\n LOG.debug(\"Skipping physical network processing for interface \"\n \"%s, matching port not found in Ironic.\",\n mac_address,\n node_info=node_info, data=introspection_data)\n continue\n\n # Determine the physical network for this port.\n # Port not touched in here.\n physnet = self.get_physnet(port, iface['name'], introspection_data)\n if physnet is None:\n LOG.debug(\"Skipping physical network processing for interface \"\n \"%s, no physical network mapping\",\n mac_address,\n node_info=node_info, data=introspection_data)\n continue\n\n patch = self._get_physnet_patch(physnet, port)\n if patch is None:\n LOG.debug(\"Skipping physical network processing for interface \"\n \"%s, no update required\",\n mac_address,\n node_info=node_info, data=introspection_data)\n continue\n\n try:\n node_info.patch_port(port, [patch])\n except exceptions.BadRequestException as e:\n LOG.warning(\"Failed to update port %(uuid)s: %(error)s\",\n {'uuid': port.id, 'error': e},\n node_info=node_info)", "def _update_fixed_ips_config(port_config):\n fixed_ip_prop = ctx.node.properties.get('fixed_ip')\n if not (port_config.get('fixed_ips') or fixed_ip_prop):\n return\n\n elif not port_config.get('fixed_ips'):\n port_config['fixed_ips'] = []\n\n if fixed_ip_prop:\n for item in port_config['fixed_ips']:\n if item.get('ip_address') and item['ip_address'] == fixed_ip_prop:\n break\n else:\n port_config['fixed_ips'].append({'ip_address': fixed_ip_prop})", "def reset_cache(self):\n self.cache = [None] * self.n_layers\n self.offset = 0\n logger.debug('Reset cache.')", "def test_networking_project_network_update(self):\n pass", "def delete_networks(self):\n logging.debug(\"cleanup called\")\n # for network in self.networks.key():\n # self.networks[network].delete()\n for network in self.networks.values():\n logging.warn(\"Deleting network '%s'\" % network)\n print \"Deleting network '%s'\" % network\n # print self.networks[network]\n network.delete()\n self.networks = {}", "def filter_update_network_attributes(network, context):\n try_del(network, ['id', 'status', 'subnets', 'tenant_id'])", "def ModifyNetwork(self, network, reason=None, **kwargs):\n query = []\n _AppendReason(query, reason)\n\n return self._SendRequest(HTTP_PUT,\n (\"/%s/networks/%s/modify\" %\n (GANETI_RAPI_VERSION, network)), None, kwargs)", "def _update_cache(self):\n \n # Check if the model cache is full\n if self.__class__.cache_limit == len(self.__class__.model_cache):\n # Remove the oldest item from the cache if exceeding cache limit\n self.__class__.model_cache.popitem(last=False)\n \n # Remove the obsolete version of the model from the cache\n if self.model.name in self.__class__.model_cache:\n del self.__class__.model_cache[self.model.name]\n \n # Add the current model to the cache\n self.__class__.model_cache[self.model.name] = self.model\n \n # Debug information is printed to the terminal and logs if the paramater debug = true\n if self.model.debug:\n self._print_log(8)", "def reset_network(self, instance):\n LOG.debug(\"reset_network\")\n return", "def fusion_api_edit_ethernet_network(self, body, uri, api=None, headers=None):\n return self.ethernet_network.update(body, uri, api, headers)", "def test_add_remove_network_from_dhcp_agent(self):\n # The agent is now bound to the network, we can free the port\n self.ports_client.delete_port(self.port['id'])\n agent = dict()\n agent['agent_type'] = None\n body = self.admin_agents_client.list_agents()\n agents = body['agents']\n for a in agents:\n if a['agent_type'] == 'DHCP agent':\n agent = a\n break\n self.assertEqual(agent['agent_type'], 'DHCP agent', 'Could not find '\n 'DHCP agent in agent list though dhcp_agent_scheduler'\n ' is enabled.')\n network = self.create_network()\n network_id = network['id']\n if self._check_network_in_dhcp_agent(network_id, agent):\n self._remove_network_from_dhcp_agent(network_id, agent)\n self._add_dhcp_agent_to_network(network_id, agent)\n else:\n self._add_dhcp_agent_to_network(network_id, agent)\n self._remove_network_from_dhcp_agent(network_id, agent)", "def replace_node(self, network_node: Node, node: Node) -> None:\n index = self.network.index(network_node)\n self.network[index] = node", "def update_resources_for_this_host(cache, db):\n free_cpu, free_mem = get_resources()\n my_ip = cache[\"ip\"]\n\n logger.info(\"UPDATING\", extra = {\"cpu\": free_cpu, \"mem\": free_mem, \"ip\": my_ip})\n try:\n db.hset(my_ip, mapping={\"cpu\": free_cpu, \"mem\": free_mem})\n except Exception as e:\n logger.error(e)\n raise e", "def testPutNetworkLocalIp(self):\n models.System.objects.all().delete()\n self._saveSystem()\n old_count = models.Network.objects.count()\n self._put('inventory/networks/1/',\n data=testsxml.network_put_xml_opt_ip_addr % \"169.254.4.4\",\n username=\"admin\", password=\"password\")\n self.assertEquals(old_count, models.Network.objects.count())\n\n self._put('inventory/networks/1/',\n data=testsxml.network_put_xml_opt_ip_addr % \"4.4.4.4\",\n username=\"admin\", password=\"password\")\n self.assertEquals(old_count + 1, models.Network.objects.count())", "def update_network(self, context, net_id, network):\n LOG.debug(_(\"NeutronRestProxyV2.update_network() called\"))\n\n self._warn_on_state_status(network['network'])\n\n session = context.session\n with session.begin(subtransactions=True):\n new_net = super(NeutronRestProxyV2, self).update_network(\n context, net_id, network)\n self._process_l3_update(context, new_net, network['network'])\n\n # update network on network controller\n self._send_update_network(new_net, context)\n return new_net", "def fill_from_cache(self):\n move_count = min(\n len(self._replacement_cache),\n constants.K - len(self._contacts)\n )\n\n for _ in range(move_count):\n self.add_contact(self._replacement_cache.pop())", "def test_port_update_deferred_allocation_no_ips(self):\n network, segments, subnets = self._create_test_segments_with_subnets(2)\n\n self._setup_host_mappings([(segments[0]['segment']['id'], 'fakehost2'),\n (segments[1]['segment']['id'], 'fakehost')])\n\n port = self._create_deferred_ip_port(network)\n\n # Update the subnet on the second segment to be out of IPs\n subnet_data = {'subnet': {'allocation_pools': []}}\n subnet_req = self.new_update_request('subnets',\n subnet_data,\n subnets[1]['subnet']['id'])\n subnet_response = subnet_req.get_response(self.api)\n res = self.deserialize(self.fmt, subnet_response)\n\n # Try requesting an IP (but the subnet ran out of ips)\n data = {'port': {portbindings.HOST_ID: 'fakehost'}}\n port_id = port['port']['id']\n port_req = self.new_update_request('ports', data, port_id,\n as_admin=True)\n response = port_req.get_response(self.api)\n res = self.deserialize(self.fmt, response)\n\n # Since port is bound and there is a mapping to segment, it succeeds.\n self.assertEqual(webob.exc.HTTPConflict.code, response.status_int)\n self.assertEqual(n_exc.IpAddressGenerationFailure.__name__,\n res['NeutronError']['type'])", "def fusion_api_edit_fc_network(self, body, uri, api=None, headers=None):\n return self.fc_network.update(body, uri, api, headers)", "def update(self, oid, name, network_id, fixed_ips, host_id=None, \n profile=None, vnic_type=None, device_owner=None, device_id=None,\n security_groups=None):\n data = {\n \"port\": {\n }\n }\n if network_id is not None:\n data['port']['network_id'] = network_id\n if name is not None:\n data['port']['name'] = name\n if fixed_ips is not None:\n data['port']['fixed_ips'] = fixed_ips\n if host_id is not None:\n data['port']['binding:host_id'] = host_id\n if profile is not None:\n data['port']['binding:profile'] = profile\n if host_id is not None:\n data['port']['binding:vnic_type'] = vnic_type\n if device_owner is not None:\n data['port']['device_owner'] = device_owner\n if device_id is not None:\n data['port']['device_id'] = device_id\n if security_groups is not None:\n data['port']['security_groups'] = security_groups\n \n path = '%s/ports/%s' % (self.ver, oid)\n res = self.client.call(path, 'PUT', data=json.dumps(data), \n token=self.manager.identity.token)\n self.logger.debug('Update openstack port: %s' % truncate(res))\n return res[0]['port']", "def update_fix(self, tn):\n new_dict = {}\n for k in self.fix_dict:\n if 'fix_to' in tn.network.nodes[k[0]]:\n c = tn.network.nodes[k[0]]['fix_to']\n new_k = (k[0], tuple(c))\n new_dict[new_k] = self.fix_dict[k]\n new_dict[new_k][0] = list(c)[0]\n else:\n new_dict[k] = self.fix_dict[k]\n self.fix_dict = new_dict\n self.preprocess = False", "def set_network(self, addr, netmask, value):\n\n if len(addr) == 4:\n ipset.ipmap_ipv4_set_network(self.map, addr, netmask, value)\n return\n\n elif len(addr) == 16:\n ipset.ipmap_ipv6_set_network(self.map, addr, netmask, value)\n return\n\n else:\n raise ValueError(\"Invalid address\")", "def test_port_update_deferred_allocation(self):\n network, segment, subnet = self._create_test_segment_with_subnet()\n\n # Map the host to the segment\n self._setup_host_mappings([(segment['segment']['id'], 'fakehost')])\n\n port = self._create_deferred_ip_port(network)\n self._validate_deferred_ip_allocation(port['port']['id'])\n\n # Try requesting an IP (but the only subnet is on a segment)\n data = {'port': {portbindings.HOST_ID: 'fakehost'}}\n port_id = port['port']['id']\n port_req = self.new_update_request('ports', data, port_id,\n as_admin=True)\n response = port_req.get_response(self.api)\n\n # Port update succeeds and allocates a new IP address.\n self.assertEqual(webob.exc.HTTPOk.code, response.status_int)\n self._assert_one_ip_in_subnet(response, subnet['subnet']['cidr'])", "def put(self, id):\n context = request.environ.get('context')\n net_obj = dbapi.networks_update(context, id, request.json)\n return jsonutils.to_primitive(net_obj), 200, None", "def _set_rules_storage(self, gnp_config, network, host):\n\n addr_pool = self.dbapi.address_pool_get(network.pool_uuid)\n ip_version = IPAddress(f\"{addr_pool.network}\").version\n self._add_source_net_filter(gnp_config[\"spec\"][\"ingress\"],\n f\"{addr_pool.network}/{addr_pool.prefix}\")\n if (ip_version == 6):\n self._add_source_net_filter(gnp_config[\"spec\"][\"ingress\"], LINK_LOCAL)\n if (ip_version == 4):\n # add rule to allow DHCP requests (dhcp-offer have src addr == 0.0.0.0)\n rule = self._get_dhcp_rule(host.personality, \"UDP\", ip_version)\n gnp_config[\"spec\"][\"ingress\"].append(rule)", "def test_port_update_with_fixed_ips_fail_if_host_not_on_segment(self):\n network, segments, subnets = self._create_test_segments_with_subnets(2)\n\n # Setup host mappings\n self._setup_host_mappings([(segments[0]['segment']['id'], 'fakehost')])\n\n # Create a port and validate immediate ip allocation\n res = self._create_port_and_show(network,\n arg_list=(portbindings.HOST_ID,),\n **{portbindings.HOST_ID: 'fakehost'})\n self._validate_immediate_ip_allocation(res['port']['id'])\n\n # Try requesting an new IP, but the subnet does not match host segment\n port_id = res['port']['id']\n data = {'port': {\n 'fixed_ips': [{'subnet_id': subnets[1]['subnet']['id']}]}}\n port_req = self.new_update_request('ports', data, port_id)\n response = port_req.get_response(self.api)\n\n # Port update fails.\n self.assertEqual(webob.exc.HTTPBadRequest.code, response.status_int)", "async def fix_cache(self, ctx):\n self.initial_config(ctx.message.server.id)\n self.settings[server.id]['usercache'] = []\n self.save_json()", "def network_update_end(self, payload):\n network_id = payload['network']['id']\n if payload['network']['admin_state_up']:\n self.enable_dhcp_helper(network_id)\n else:\n self.disable_dhcp_helper(network_id)", "def test_replace_cluster_network(self):\n pass", "def _validate_update_network(self, context, net_id, original_net,\n net_data):\n extern_net = self._network_is_external(context, net_id)\n with_qos = validators.is_attr_set(\n net_data.get(qos_consts.QOS_POLICY_ID))\n\n # Do not allow QoS on external networks\n if with_qos:\n if extern_net:\n raise nsx_exc.QoSOnExternalNet()\n self._validate_qos_policy_id(\n context, net_data.get(qos_consts.QOS_POLICY_ID))\n\n # Do not support changing external/non-external networks\n if (extnet_apidef.EXTERNAL in net_data and\n net_data[extnet_apidef.EXTERNAL] != extern_net):\n err_msg = _(\"Cannot change the router:external flag of a network\")\n raise n_exc.InvalidInput(error_message=err_msg)\n\n is_ens_net = self._is_ens_tz_net(context, net_id)\n if is_ens_net:\n self._assert_on_ens_with_qos(net_data)", "def _update_cachesize(self):\n san_res = self.san_interface\n _load = not self.san_interface.runmode\n if self.cachesize > 0:\n pvds = self._get_pvds()\n if len(pvds) < 1:\n # not suppposed to get here\n return (1,'Error no valid provider/path was found when setting cache')\n logger.eventlog.debug('in update cache for %s , cachedrdev: %s' % (str(self),str(self.cachedrdev)))\n # check if this is a single path case or replicated cache (multipath)\n if len(pvds) == 1 and len(self.cachepvds) < 2 and not self.cachedrdev:\n (e,pt) = ext2path(self,san_res.providers[pvds[0]])\n if e:\n return (e,'Error updating cache, '+pt)\n (e,r) = san_res.providers[pvds[0]].add_cache(pt,self.cachesize)\n if e:\n return (e,r)\n else:\n #\n # more than 1 path\n #\n\n # one path with cacheon and is running return ok\n for pt in self.paths():\n if pt.cacheon:\n if pt.state == ObjState.running:\n return (0,'Cache is ok')\n logger.eventlog.warning('cache for %s is ON but path is not running !' % str(self))\n\n # no running path with cache on\n self.cachepresent=False\n\n #\n cvolname=obj2volstr(self)\n cvolname=cvolname.replace(':',CACHESEP) # replace ':' with a legal volume char\n drname=CACHEPFX+cvolname\n cache_loadonly=False\n #\n\n # self.cachedrdev ?\n if self.san_interface.raids.has_key(drname):\n # found drbd dev for cache (fail-over or load??):\n # del tgt (old), remove cache (old), promote (new),\n # cache load (new), add targets (new)\n logger.eventlog.warning('Cache for %s is not on, while DR device is detected during update' % str(self))\n drdev = self.san_interface.raids[drname]\n if not drdev:\n logger.eventlog.error('cant update cache dr for %s , drdev not found' % (str(self)))\n return (1,'cant update Cache dr')\n if not drdev.provider:\n drdev.promote_one(checkluns=False)\n if not drdev.provider:\n logger.eventlog.error('cant update cache dr for %s , drdev provider not detected' % (str(self)))\n return (1,'cant update Cache dr')\n # debug\n #logger.eventlog.debug(\"cachepresent: %s\" % str(self.cachepresent))\n #for p in self.paths():\n # if p.provider==drdev.provider:\n # logger.eventlog.debug(\"p: %s\" % str(p))\n # logger.eventlog.debug(\"state: %s\" % str(p.state))\n # logger.eventlog.debug(\"cacheon: %s\" % str(p.cacheon))\n # end debug\n e,prim = ext2path(self,drdev.provider)\n if e:\n logger.eventlog.error('valid path not found for %s on %s in update' % (str(self),str(drdev.provider)))\n return (1,'valid path not found')\n #logger.eventlog.debug(\"prim: %s\" % str(prim))\n cache_loadonly=True\n else:\n if len(self.cachepvds)==1 or len(self.cachepvds)>2:\n # has only 1 cache LV (load, absent?) ?? or >2 (old ones redetected)\n logger.eventlog.error('Found %d Cache LVs for %s in update' % (len(self.cachepvds),str(self)))\n return (1,'Found %d Cache LVs for %s in update' % (len(self.cachepvds),str(self)))\n\n if len(self.cachepvds) == 2:\n # if has 2 cache LVs, no DR (load): create drbd, load cache\n (e1,path1) = ext2path(self,san_res.providers[self.cachepvds[0]])\n (e2,path2) = ext2path(self,san_res.providers[self.cachepvds[1]])\n print 'cache paths: ',str(path1),str(path2)\n if e1 or e2:\n logger.eventlog.error('valid paths not found for %s in update' % str(self))\n return (1,'valid path not found')\n vol1 = san_res.providers[self.cachepvds[0]].cachevg.volumes[cvolname]\n vol2 = san_res.providers[self.cachepvds[1]].cachevg.volumes[cvolname]\n cache_loadonly=True\n\n else:\n # else (new) : select 2 paths, create 2 LVs,\n # create & promote DRBD, Create cache on master\n\n e,path1,path2 = self._get_2_pvds_paths()\n if e:\n logger.eventlog.error(path1)\n return (1,path1)\n\n # create 2 cache LVs\n (e,vol1) = path1.provider.add_lv_for_cache(self,self.cachesize)\n if e > 1:\n tmp='cant create Cache LV1 for %s on %s in update: %s' % (self.name,path1.provider.name,vol1)\n logger.eventlog.error(tmp)\n return (1,tmp)\n (e,vol2) = path2.provider.add_lv_for_cache(self,self.cachesize)\n if e > 1:\n vol1.provider.cachevg.volumes.delete(vol1,force=True)\n tmp='cant create Cache LV2 for %s on %s in update: %s' % (self.name,path2.provider.name,vol2)\n logger.eventlog.error(tmp)\n return (1,tmp)\n #\n print 'cache vols: ',str(vol1),str(vol2)\n\n # create new drbd device\n drdev = san_res.raids.add(drname,SanRaidGrp(drname,None))\n if not drdev :\n logger.eventlog.error('failed to create/updare dr device for cache in %s' % str(self))\n return (1,'failed to create/updare dr device')\n drdev.raid=RaidLevel.dr\n drdev.iscachedr=True\n drdev.devices=[vol1,vol2]\n (e,txt)=drdev.update()\n print 'create dr device:',e,txt\n if e:\n logger.eventlog.error('cant create Cache dr for %s , %s' % (str(self),txt))\n return (1,'cant create Cache dr')\n if drdev.provider is path1.provider:\n prim=path1\n else:\n prim=path2\n\n logger.eventlog.debug('create cache on %s , loadonly: %s , drname: %s' % \\\n (drdev.provider.name, cache_loadonly, drname))\n #loadonly=(self.cachepvds<>[]) # check if we already had cache LVs\n\n # create CacheDev\n # on loadonly we also forcing devname update\n (e,r) = drdev.provider.create_cache(prim,drdev,cvolname,loadonly=cache_loadonly,force=cache_loadonly)\n logger.eventlog.debug('create cache response: %s %s' % (e,r))\n if e:\n return (e, 'error creating cache on %s: %s' % (drdev.provider.name,r))\n else:\n (e,r) = self._remove_cache()\n if e:\n return (e,'error removing cache on %s: %s' % (str(self),r))\n return (0,'')", "def disable_duplicate_address_detection(self, network):\n # For non-external networks, duplicate address detection isn't\n # necessary (and it sometimes results in race conditions for services\n # that attempt to bind to addresses before they're ready).\n\n if network.network_type != network.TYPE_EXTERNAL:\n real_ifname = self.generic_to_host(network.interface.ifname)\n try:\n utils.execute([\n 'sysctl', '-w', 'net.ipv6.conf.%s.accept_dad=0'\n % real_ifname\n ], self.root_helper)\n except RuntimeError:\n LOG.debug(\n 'Failed to disable v6 dad on %s' % real_ifname\n )", "def filter_update_port_attributes(cls, port, context):\n cls.add_security_groups(port, context)\n try_del(port, ['network_id', 'id', 'status', 'mac_address',\n 'tenant_id', 'fixed_ips'])", "def release_port_fixed_ip(self, network_id, device_id, subnet_id):\n return self.call(self.context,\n self.make_msg('release_port_fixed_ip',\n network_id=network_id,\n subnet_id=subnet_id,\n device_id=device_id,\n host=self.host),\n topic=self.topic)", "def reset_rpki_rtr_session(self, host, port):\n cur = self.sql.cursor()\n cur.execute(\"PRAGMA foreign_keys = on\")\n cur.execute(\"DELETE FROM cache WHERE host = ? and port = ?\", (host, port))\n self.sql.commit()", "def test_port_update_deferred_allocation_no_segments_empty_alloc(self):\n with self.network() as network:\n pass\n\n port = self._create_deferred_ip_port(network)\n\n # Create the subnet and update the port but specify no IPs\n with self.subnet(network=network):\n data = {'port': {\n portbindings.HOST_ID: 'fakehost',\n 'fixed_ips': []}}\n port_id = port['port']['id']\n port_req = self.new_update_request('ports', data, port_id,\n as_admin=True)\n response = port_req.get_response(self.api)\n\n self.assertEqual(webob.exc.HTTPOk.code, response.status_int)\n res = self.deserialize(self.fmt, response)\n # Since I specifically requested no IP addresses, I shouldn't get one.\n self.assertEqual(0, len(res['port']['fixed_ips']))", "def _retry_delete_network(self, context, network_id):\n first_try = True\n while True:\n try:\n with db_api.CONTEXT_WRITER.using(context):\n self._process_l3_delete(context, network_id)\n return super(NsxPluginV3Base, self).delete_network(\n context, network_id)\n except n_exc.NetworkInUse:\n # There is a race condition in delete_network() that we need\n # to work around here. delete_network() issues a query to\n # automatically delete DHCP ports and then checks to see if any\n # ports exist on the network. If a network is created and\n # deleted quickly, such as when running tempest, the DHCP agent\n # may be creating its port for the network around the same time\n # that the network is deleted. This can result in the DHCP\n # port getting created in between these two queries in\n # delete_network(). To work around that, we'll call\n # delete_network() a second time if we get a NetworkInUse\n # exception but the only port(s) that exist are ones that\n # delete_network() is supposed to automatically delete.\n if not first_try:\n # We tried once to work around the known race condition,\n # but we still got the exception, so something else is\n # wrong that we can't recover from.\n raise\n first_try = False\n if self._has_active_port(context, network_id):\n # There is a port on the network that is not going to be\n # automatically deleted (such as a tenant created port), so\n # we have nothing else to do but raise the exception.\n raise", "def fetch(self) -> None:\n self.__networks__.clear()\n networks = process_google_rr_ranges(self.__address_list_record__, self.loader_class)\n for network in networks:\n self.__networks__.append(network)\n self.updated = datetime.now()\n self.__networks__.sort(key=attrgetter('version', 'cidr'))", "def alter_network(self, add=[], remove=[]):\n\n # make the required changes\n # NOTE: remove existing edges *before* adding new ones. \n # if edge e is in `add`, `remove` and `self.network`, \n # it should exist in the new network. (the add and remove cancel out.\n self.network.edges.remove_many(remove)\n self.network.edges.add_many(add) \n\n # check whether changes lead to valid DAG (raise error if they don't)\n affected_nodes = set(unzip(add, 1))\n if affected_nodes and not self.network.is_acyclic(affected_nodes):\n self.network.edges.remove_many(add)\n self.network.edges.add_many(remove)\n raise CyclicNetworkError()\n \n \n # accept changes: \n # 1) determine dirtynodes\n # 2) backup state\n # 3) score network (but only rescore dirtynodes)\n self.dirtynodes.update(set(unzip(add+remove, 1)))\n self._backup_state(add, remove)\n self.score = self._score_network_core()\n #print\"calculated score = \" + str(self.score)\n return self.score", "def test_port_update_deferred_allocation_no_segments_manual_alloc(self):\n with self.network() as network:\n pass\n\n port = self._create_deferred_ip_port(network)\n\n # Create the subnet and try to update the port to get an IP\n with self.subnet(network=network) as subnet:\n data = {'port': {\n portbindings.HOST_ID: 'fakehost',\n 'fixed_ips': [{'subnet_id': subnet['subnet']['id']}]}}\n port_id = port['port']['id']\n port_req = self.new_update_request('ports', data, port_id,\n as_admin=True)\n response = port_req.get_response(self.api)\n\n self.assertEqual(webob.exc.HTTPOk.code, response.status_int)\n self._assert_one_ip_in_subnet(response, subnet['subnet']['cidr'])\n\n # Do a show to be sure that only one IP is recorded\n port_req = self.new_show_request('ports', port_id)\n response = port_req.get_response(self.api)\n self.assertEqual(webob.exc.HTTPOk.code, response.status_int)\n self._assert_one_ip_in_subnet(response, subnet['subnet']['cidr'])", "def flushCaches(self):\n self.rehabTreeCache = {} \n self.frailRehabTreeCache = {} \n self.frailTreeCache = {}", "def _check_and_set_network(self) -> None:\n from hathor.transaction.storage.exceptions import WrongNetworkError\n\n network = settings.NETWORK_NAME\n stored_network = self.get_network()\n\n if stored_network is None:\n # no network is set, let's try to infer it\n self._checked_set_network(network)\n elif stored_network != network:\n # the stored network does not match, something is wrong\n raise WrongNetworkError(f'Databases created on {stored_network}, expected {network}')\n else:\n # the network is what is expected, nothing to do here\n pass", "def _update_dnsmasq(self, network_id):\n\n # Check whether we should really do the following processing.\n if self.suppress_dnsmasq_updates:\n LOG.debug(\"Don't update dnsmasq yet;\"\n \" must be processing a snapshot\")\n self.dirty_networks.add(network_id)\n return\n\n self.dnsmasq_updater.update_network(network_id)", "def check_cacheable_attr___fix():\n from stalker import Asset\n from anima.dcc import mayaEnv\n\n cacheable_nodes = auxiliary.get_cacheable_nodes()\n cacheable_node = None\n\n if len(cacheable_nodes) == 0:\n # no cacheable node create cacheable attribute on the root node\n from anima.dcc.mayaEnv import rigging\n\n # assumes there is only one root node\n root_node = auxiliary.get_root_nodes()[0]\n rigging.Rigging.add_cacheable_attribute(root_node)\n cacheable_node = root_node\n elif len(cacheable_nodes) > 1:\n # there is more than one cacheable node,\n # keep the highest in the hierarchy\n # delete the other\n lowest_rank = 1e4\n lowest_rank_node = None\n for cacheable_node in cacheable_nodes:\n cacheable_node_rank = len(cacheable_node.getAllParents())\n if cacheable_node_rank < lowest_rank:\n lowest_rank_node = cacheable_node\n lowest_rank = cacheable_node_rank\n # now remove the cacheable attr on the lowest hierarchy items\n for cacheable_node in cacheable_nodes:\n if cacheable_node != lowest_rank_node:\n cacheable_node.deleteAttr(\"cacheable\")\n cacheable_node = lowest_rank_node\n\n m_env = mayaEnv.Maya()\n v = m_env.get_current_version()\n t = v.task\n asset_code = None\n if isinstance(t.parent, Asset):\n asset_code = t.parent.code\n\n if asset_code:\n if cacheable_node.name().lower().startswith(asset_code.lower()):\n cacheable_attr_value = cacheable_node.name()\n else:\n cacheable_attr_value = asset_code\n else:\n if cacheable_node.isReferenced() is True:\n cacheable_attr_value = str(cacheable_node.stripNamespace())\n else:\n cacheable_attr_value = cacheable_node.name()\n\n cacheable_node.setAttr(\"cacheable\", cacheable_attr_value.lower())", "def update_servers(self, req, closest_serv):\n\n\t\t# Request is a put --> Key, value insert in the cache of the closest server\n\t\tif req.type == 0:\n\t\t\t# Iterate through all servers\n\t\t\tfor serv in self.servers:\n\t\t\t\tif serv.ip != closest_serv.ip: # If Server has not been updated\n\t\t\t\t\tserv.cache.put(req.key, req.value) # Update server cache\n\n\t\t# Request is a get --> Value retrived from the cache of the closest server\n\t\telif req.type == 1:\n\t\t\t# Iterate through all servers\n\t\t\tfor serv in self.servers:\n\t\t\t\tif serv.ip != closest_serv.ip: # If server has not been updated\n\t\t\t\t\tserv.cache.get(req.key)", "def port_update(self, context, **kwargs):\n self._refresh_bridge_mappings_to_neutron()", "def test_port_update_fails_if_host_on_wrong_segment(self):\n network, segments, subnets = self._create_test_segments_with_subnets(2)\n\n self._setup_host_mappings([(segments[0]['segment']['id'], 'fakehost2'),\n (segments[1]['segment']['id'], 'fakehost')])\n\n # Create a bound port with an IP address\n response = self._create_port(self.fmt,\n net_id=network['network']['id'],\n tenant_id=network['network']['tenant_id'],\n is_admin=True,\n arg_list=(portbindings.HOST_ID,),\n **{portbindings.HOST_ID: 'fakehost'})\n self._assert_one_ip_in_subnet(response, subnets[1]['subnet']['cidr'])\n port = self.deserialize(self.fmt, response)\n\n # Now, try to update binding to a host on the other segment\n data = {'port': {portbindings.HOST_ID: 'fakehost2'}}\n port_req = self.new_update_request('ports', data, port['port']['id'],\n as_admin=True)\n response = port_req.get_response(self.api)\n\n # It fails since the IP address isn't compatible with the new segment\n self.assertEqual(webob.exc.HTTPConflict.code, response.status_int)", "def remove_network_adapter(self, network_obj):\n\n nic_spec = vim.vm.device.VirtualDeviceSpec()\n nic_spec.operation = vim.vm.device.VirtualDeviceSpec.Operation.remove\n nic_spec.device = network_obj\n config_spec = vim.vm.ConfigSpec()\n config_spec.deviceChange = [nic_spec]\n return self.vm_obj.ReconfigVM_Task(spec=config_spec)", "async def _send_network_configuration_to_dynamic_sidecar(\n scheduler: DynamicSidecarsScheduler,\n project_id: ProjectID,\n new_networks_with_aliases: NetworksWithAliases,\n existing_networks_with_aliases: NetworksWithAliases,\n) -> None:\n\n # REMOVING\n to_remove_items: set[_ToRemove] = set()\n\n # if network no longer exist remove it from all nodes\n for new_network_name, node_ids_and_aliases in new_networks_with_aliases.items():\n if new_network_name not in existing_networks_with_aliases:\n for node_id in node_ids_and_aliases:\n to_remove_items.add(\n _ToRemove(\n project_id=project_id,\n node_id=node_id,\n network_name=new_network_name,\n )\n )\n # if node does not exist for the network, remove it\n # if alias is different remove the network\n for new_network_name, node_ids_and_aliases in new_networks_with_aliases.items():\n existing_node_ids_and_aliases = existing_networks_with_aliases.get(\n new_network_name, {}\n )\n for node_id, alias in node_ids_and_aliases.items():\n # node does not exist\n if node_id not in existing_node_ids_and_aliases:\n to_remove_items.add(\n _ToRemove(\n project_id=project_id,\n node_id=node_id,\n network_name=new_network_name,\n )\n )\n else:\n existing_alias = existing_networks_with_aliases[new_network_name][\n node_id\n ]\n # alias is different\n if existing_alias != alias:\n to_remove_items.add(\n _ToRemove(\n project_id=project_id,\n node_id=node_id,\n network_name=new_network_name,\n )\n )\n\n await logged_gather(\n *[\n scheduler.detach_project_network(\n node_id=UUID(to_remove.node_id),\n project_network=to_remove.network_name,\n )\n for to_remove in to_remove_items\n ]\n )\n\n # ADDING\n to_add_items: set[_ToAdd] = set()\n # all aliases which are different or missing should be added\n for new_network_name, node_ids_and_aliases in new_networks_with_aliases.items():\n existing_node_ids_and_aliases = existing_networks_with_aliases.get(\n new_network_name, {}\n )\n for node_id, alias in node_ids_and_aliases.items():\n existing_alias = existing_node_ids_and_aliases.get(node_id)\n if alias != existing_alias:\n to_add_items.add(\n _ToAdd(\n project_id=project_id,\n node_id=node_id,\n network_name=new_network_name,\n network_alias=alias,\n )\n )\n\n await logged_gather(\n *[\n scheduler.attach_project_network(\n node_id=UUID(to_add.node_id),\n project_network=to_add.network_name,\n network_alias=to_add.network_alias,\n )\n for to_add in to_add_items\n ]\n )", "def reset(self):\n print('Network reset to its original copy')\n self.net = self.copy.copy()\n self.current_threshold = None\n self.method = None", "def test_patch_cluster_network(self):\n pass", "def unset_ip_routing(self):\n os_type = os.getenv('server_os_type', None)\n if self.remote is not True and os_type not in ['Linux']:\n return\n self.log_output('Unsetting IP forwarding and iptables rules on {} host'.format(\n os_type))\n\n command = (\n \"echo '{0}' | sudo -S iptables -F && \"\n \"echo '{0}' | sudo -S iptables -X && \"\n \"echo '{0}' | sudo -S iptables -t nat -F && \"\n \"echo '{0}' | sudo -S iptables -t nat -X && \"\n \"echo '{0}' | sudo -S iptables -t mangle -F && \"\n \"echo '{0}' | sudo -S iptables -t mangle -X && \"\n \"echo '{0}' | sudo -S iptables -P INPUT ACCEPT && \"\n \"echo '{0}' | sudo -S iptables -P FORWARD ACCEPT && \"\n \"echo '{0}' | sudo -S iptables -P OUTPUT ACCEPT && \"\n \"echo '{0}' | sudo -S sysctl -w net.ipv4.ip_forward=0 && \"\n \"echo '{0}' | sudo -S sysctl -w net.ipv6.conf.all.forwarding=0 && \"\n \"echo '{0}' | sudo -S sysctl -w net.ipv4.conf.all.send_redirects=1\"\n )\n self.run_command(command.format(self.ssh_password))", "def _invalidate_http_cache(self):\n self._requests_cache = {}", "def _update_flip(context, flip_id, ip_type, requested_ports):\n # This list will hold flips that require notifications.\n # Using sets to avoid dups, if any.\n notifications = {\n 'ip.associate': set(),\n 'ip.disassociate': set()\n }\n\n context.session.begin()\n try:\n flip = db_api.floating_ip_find(context, id=flip_id, scope=db_api.ONE)\n if not flip:\n if ip_type == ip_types.SCALING:\n raise q_exc.ScalingIpNotFound(id=flip_id)\n raise q_exc.FloatingIpNotFound(id=flip_id)\n current_ports = flip.ports\n\n # Determine what ports are being removed, being added, and remain\n req_port_ids = [request_port.get('port_id')\n for request_port in requested_ports]\n curr_port_ids = [curr_port.id for curr_port in current_ports]\n added_port_ids = [port_id for port_id in req_port_ids\n if port_id and port_id not in curr_port_ids]\n removed_port_ids = [port_id for port_id in curr_port_ids\n if port_id not in req_port_ids]\n remaining_port_ids = set(curr_port_ids) - set(removed_port_ids)\n\n # Validations just for floating ip types\n if (ip_type == ip_types.FLOATING and curr_port_ids and\n curr_port_ids == req_port_ids):\n d = dict(flip_id=flip_id, port_id=curr_port_ids[0])\n raise q_exc.PortAlreadyAssociatedToFloatingIp(**d)\n if (ip_type == ip_types.FLOATING and\n not curr_port_ids and not req_port_ids):\n raise q_exc.FloatingIpUpdateNoPortIdSupplied()\n\n port_fixed_ips = {}\n\n # Keep the ports and fixed ips that have not changed\n for port_id in remaining_port_ids:\n port = db_api.port_find(context, id=port_id, scope=db_api.ONE)\n fixed_ip = _get_flip_fixed_ip_by_port_id(flip, port_id)\n port_fixed_ips[port_id] = {'port': port, 'fixed_ip': fixed_ip}\n\n # Disassociate the ports and fixed ips from the flip that were\n # associated to the flip but are not anymore\n for port_id in removed_port_ids:\n port = db_api.port_find(context, id=port_id, scope=db_api.ONE)\n flip = db_api.port_disassociate_ip(context, [port], flip)\n notifications['ip.disassociate'].add(flip)\n fixed_ip = _get_flip_fixed_ip_by_port_id(flip, port_id)\n if fixed_ip:\n flip = db_api.floating_ip_disassociate_fixed_ip(\n context, flip, fixed_ip)\n\n # Validate the new ports with the flip and associate the new ports\n # and fixed ips with the flip\n for port_id in added_port_ids:\n port = db_api.port_find(context, id=port_id, scope=db_api.ONE)\n if not port:\n raise n_exc.PortNotFound(port_id=port_id)\n if any(ip for ip in port.ip_addresses\n if (ip.get('address_type') == ip_types.FLOATING)):\n raise q_exc.PortAlreadyContainsFloatingIp(port_id=port_id)\n if any(ip for ip in port.ip_addresses\n if (ip.get('address_type') == ip_types.SCALING)):\n raise q_exc.PortAlreadyContainsScalingIp(port_id=port_id)\n fixed_ip = _get_next_available_fixed_ip(port)\n LOG.info('new fixed ip: %s' % fixed_ip)\n if not fixed_ip:\n raise q_exc.NoAvailableFixedIpsForPort(port_id=port_id)\n port_fixed_ips[port_id] = {'port': port, 'fixed_ip': fixed_ip}\n flip = db_api.port_associate_ip(context, [port], flip, [port_id])\n notifications['ip.associate'].add(flip)\n flip = db_api.floating_ip_associate_fixed_ip(context, flip,\n fixed_ip)\n\n flip_driver = registry.DRIVER_REGISTRY.get_driver()\n # If there are not any remaining ports and no new ones are being added,\n # remove the floating ip from unicorn\n if not remaining_port_ids and not added_port_ids:\n flip_driver.remove_floating_ip(flip)\n # If new ports are being added but there previously was not any ports,\n # then register a new floating ip with the driver because it is\n # assumed it does not exist\n elif added_port_ids and not curr_port_ids:\n flip_driver.register_floating_ip(flip, port_fixed_ips)\n else:\n flip_driver.update_floating_ip(flip, port_fixed_ips)\n context.session.commit()\n except Exception:\n context.session.rollback()\n raise\n\n # Send notifications for possible associate/disassociate events\n for notif_type, flip_set in notifications.iteritems():\n for flip in flip_set:\n billing.notify(context, notif_type, flip)\n\n # NOTE(blogan): ORM does not seem to update the model to the real state\n # of the database, so I'm doing an explicit refresh for now.\n context.session.refresh(flip)\n return flip", "def set(self, key, value):\n _log.debug(\"setting '%s' = '%s' on network\" % (key, value))\n dkey = digest(key)\n node = Node(dkey)\n\n def store(nodes):\n _log.debug(\"setting '%s' to %s on %s\" % (key, value, map(str, nodes)))\n # if this node is close too, then store here as well\n if (not nodes or self.node.distanceTo(node) < max([n.distanceTo(node) for n in nodes]) or\n dkey in self.storage):\n _log.debug(\"setting '%s' to %s locally\" % (key, value))\n self.storage[dkey] = value\n ds = [self.protocol.callStore(n, dkey, value) for n in nodes]\n return defer.DeferredList(ds).addCallback(self._anyRespondSuccess)\n\n nearest = self.protocol.router.findNeighbors(node)\n if len(nearest) == 0:\n _log.warning(\"There are no known neighbors to set key %s\" % key)\n return defer.succeed(False)\n spider = NodeSpiderCrawl(self.protocol, node, nearest, self.ksize, self.alpha)\n return spider.find().addCallback(store)", "def randomize(self):\n for network in self.networks.values():\n network.database = []\n self.env = Environment(self.networks)", "def update_net(self) -> None:\n self.units.update_net()", "def _invalidate_branch_cache(self):\n self._cached_overlapping_branch_list = None", "def setNetwork(self, network):\n # type: (str)->None\n\n self._validator.validate_one(\n 'network', VALID_OPTS['network'], network)\n self._ifAttributes['network'] = network", "def set(self, key, value):\n self.log.debug(\"setting '%s' = '%s' on network\" % (key, value))\n dkey = digest(key)\n node = Node(dkey)\n\n def store(nodes):\n self.log.info(\"setting '%s' on %s\" % (key, list(map(str, nodes))))\n # if this node is close too, then store here as well\n if self.node.distanceTo(node) < max([n.distanceTo(node) for n in nodes]):\n self.storage[dkey] = value\n ds = [self.protocol.callStore(n, dkey, value) for n in nodes]\n d = defer.DeferredList(ds)\n d.addCallback(self._anyRespondSuccess)\n d.addErrback(self.onError)\n return d\n\n nearest = self.protocol.router.findNeighbors(node)\n if len(nearest) == 0:\n self.log.warning(\"There are no known neighbors to set key %s\" % key)\n return defer.succeed(False)\n spider = NodeSpiderCrawl(self.protocol, node, nearest, self.ksize, self.alpha)\n d = spider.find()\n d.addCallback(store)\n d.addErrback(self.onError)\n return d", "def replace(self, other_cache):\n # This is used in avant-idle to replace the content of the cache\n # in a process (where no storage normally takes place) by\n # that of another where the actual caching of the source is done.\n self.cache.clear()\n for key in other_cache:\n self.add(key, other_cache[key])", "def _set_rules_mgmt(self, gnp_config, network, host):\n addr_pool = self.dbapi.address_pool_get(network.pool_uuid)\n ip_version = IPAddress(f\"{addr_pool.network}\").version\n self._add_source_net_filter(gnp_config[\"spec\"][\"ingress\"],\n f\"{addr_pool.network}/{addr_pool.prefix}\")\n if (ip_version == 6):\n self._add_source_net_filter(gnp_config[\"spec\"][\"ingress\"], LINK_LOCAL)\n if (ip_version == 4):\n # add rule to allow DHCP requests (dhcp-offer have src addr == 0.0.0.0)\n # worker/storage nodes request IP dynamically\n rule = self._get_dhcp_rule(host.personality, \"UDP\", ip_version)\n gnp_config[\"spec\"][\"ingress\"].append(rule)\n\n # copy the TCP rule and do the same for IGMP\n igmp_proto = 2\n igmp_egr_rule = copy.deepcopy(gnp_config[\"spec\"][\"egress\"][0])\n igmp_egr_rule[\"protocol\"] = igmp_proto\n igmp_egr_rule[\"metadata\"][\"annotations\"][\"name\"] = \\\n f\"stx-egr-{host.personality}-{network.type}-igmp{ip_version}\"\n gnp_config[\"spec\"][\"egress\"].append(igmp_egr_rule)\n igmp_ingr_rule = copy.deepcopy(gnp_config[\"spec\"][\"ingress\"][0])\n igmp_ingr_rule[\"protocol\"] = igmp_proto\n igmp_ingr_rule[\"metadata\"][\"annotations\"][\"name\"] = \\\n f\"stx-ingr-{host.personality}-{network.type}-igmp{ip_version}\"\n gnp_config[\"spec\"][\"ingress\"].append(igmp_ingr_rule)", "def truncate_cache(self, cache):\n if cache[0] is not None:\n for lth in range(self.n_layers):\n cache_size = self.cache_sizes[lth]\n if cache[lth]['input_san'].size(1) > cache_size:\n cache[lth]['input_san'] = cache[lth]['input_san'][:, -cache_size:]\n return cache", "def delete_network_postcommit(self, context):\n if self.rpc_handler is None:\n return\n network = self._get_network_info(context._network)\n for _, _network in network.items():\n network_type = _network.get('network_type', '')\n if network_type not in CentecConstant.SUPPORTED_NETWORK_TYPES and len(CentecConstant.SUPPORTED_NETWORK_TYPES) > 0:\n return\n if network is not None:\n try:\n self.rpc_handler.delete_network(network)\n except:\n pass", "def deinstallnetifs(self):\n for n in sorted(self._objs.keys()):\n emanenode = self._objs[n]\n emanenode.deinstallnetifs()", "def update_port_precommit(self, port_context):\n # TODO(ijw): optimisation: the update port may leave the\n # binding state the same as before if someone updated\n # something other than the binding on the port, but this\n # way we always send it out and it's the far end's job to\n # ignore it. Doing less work is nevertheless good, so we\n # should in future avoid the send.\n\n # unbind port from old host, if already bound\n if port_context.original_binding_levels is not None:\n prev_bind = port_context.original_binding_levels[-1]\n\n if (prev_bind is not None and\n prev_bind.get(api.BOUND_DRIVER) == self.MECH_NAME and\n port_context.host != port_context.original_host):\n\n # Note that we skip this step if the change happens while\n # 'unbinding' and rebinding to the same host - it's probably\n # an update of extraneous detail and not really a request\n # that requires binding.\n\n self.communicator.unbind(port_context._plugin_context.session,\n port_context.original,\n port_context.original_host,\n prev_bind[api.BOUND_SEGMENT]\n )\n\n # (Re)bind port to the new host, if it needs to be bound\n if port_context.binding_levels is not None:\n current_bind = port_context.binding_levels[-1]\n\n if (current_bind is not None and\n current_bind.get(api.BOUND_DRIVER) == self.MECH_NAME):\n\n binding_type = self.get_vif_type(port_context)\n # Remove port membership from any previously associated\n # security groups for updating remote_security_group_id ACLs\n self.communicator.unbind_port_from_remote_groups(\n port_context._plugin_context.session,\n port_context.original,\n port_context.current)\n\n self.communicator.bind(port_context._plugin_context.session,\n port_context.current,\n current_bind[api.BOUND_SEGMENT],\n port_context.host,\n binding_type)\n\n # TODO(ijW): The agent driver checks for a change of\n # host, but we're oddly seeing that the orig_host is\n # always set. Should confirm if this is a problem or\n # not.\n self._insert_provisioning_block(port_context)", "def cleanup_networks(self):\n for network in self.networks:\n try:\n network.remove()\n network.client.api.close()\n network.client.close()\n self.log_message(\n f'{dateutils.get_current_time()} '\n f'destroying docker network {network}'\n )\n except Exception:\n self.log_message(\n f'{dateutils.get_current_time()} ERROR: Could not remove docker '\n f'network {network}'\n )\n self.networks.clear()", "def _clear_cache(self):\n keys = [\"nodes\", \"availability\", \"capacity\", \"cost\"]\n for key in keys:\n if key in self.__dict__:\n del self.__dict__[key]", "def _pre_snapshot_hook(self):\n\n # Add all current networks to the dirty set, so that we will stop their\n # Dnsmasqs if no longer needed. Also remove all port and subnet\n # information.\n LOG.debug(\"Reset cache for new snapshot\")\n for network_id in list(self.agent.cache.get_network_ids()):\n self.dirty_networks.add(network_id)\n _fix_network_cache_port_lookup(self.agent, network_id)\n self.agent.cache.put(empty_network(network_id))\n\n # Suppress Dnsmasq updates until we've processed the whole snapshot.\n self.suppress_dnsmasq_updates = True\n return None", "def disable_dhcp_helper(self, network_id):\n network = self.cache.get_network_by_id(network_id)\n if network:\n if self.call_driver('disable', network):\n self.cache.remove(network)", "def network_delete_event(self, network_info):\n\n net_id = network_info['network_id']\n if net_id not in self.network:\n LOG.error(_LE('network_delete_event: net_id %s does not exist.'),\n net_id)\n return\n\n segid = self.network[net_id].get('segmentation_id')\n tenant_id = self.network[net_id].get('tenant_id')\n tenant_name = self.get_project_name(tenant_id)\n net = utils.Dict2Obj(self.network[net_id])\n if not tenant_name:\n LOG.error(_LE('Project %(tenant_id)s does not exist.'),\n {'tenant_id': tenant_id})\n self.update_network_db(net.id, constants.DELETE_FAIL)\n return\n\n try:\n self.dcnm_client.delete_network(tenant_name, net)\n # Put back the segmentation id into the pool.\n self.seg_drvr.release_segmentation_id(segid)\n\n # Remove entry from database and cache.\n self.delete_network_db(net_id)\n del self.network[net_id]\n snets = [k for k in self.subnet if (\n self.subnet[k].get('network_id') == net_id)]\n [self.subnet.pop(s) for s in snets]\n except dexc.DfaClientRequestFailed:\n LOG.error(_LE('Failed to create network %(net)s.'),\n {'net': net.name})\n self.update_network_db(net_id, constants.DELETE_FAIL)\n # deleting all related VMs\n instances = self.get_vms()\n instances_related = [k for k in instances if k.network_id == net_id]\n for vm in instances_related:\n LOG.debug(\"deleting vm %s because network is deleted\", vm.name)\n self.delete_vm_function(vm.port_id, vm)\n self.network_del_notif(tenant_id, tenant_name, net_id)", "def put(self, id):\n context = request.environ.get('context')\n net_obj = dbapi.net_interfaces_update(context, id, request.json)\n return jsonutils.to_primitive(net_obj), 200, None", "def test_port_update_deferred_allocation_binding_info_and_new_mac(self):\n network, segment, subnet = self._create_test_segment_with_subnet()\n\n # Map the host to the segment\n self._setup_host_mappings([(segment['segment']['id'], 'fakehost')])\n\n port = self._create_deferred_ip_port(network)\n self._validate_deferred_ip_allocation(port['port']['id'])\n\n # Try requesting an IP (but the only subnet is on a segment)\n data = {'port': {portbindings.HOST_ID: 'fakehost',\n port_apidef.PORT_MAC_ADDRESS: '00:00:00:00:00:01'}}\n port_id = port['port']['id']\n port_req = self.new_update_request('ports', data, port_id,\n as_admin=True)\n response = port_req.get_response(self.api)\n\n # Port update succeeds and allocates a new IP address.\n self.assertEqual(webob.exc.HTTPOk.code, response.status_int)\n self._assert_one_ip_in_subnet(response, subnet['subnet']['cidr'])", "def test_rebuild_with_wrong_shared_storage(self, mock_remove_allocs):\n with mock.patch.object(self.compute.driver, 'instance_on_disk',\n side_effect=lambda x: True) as mock_inst:\n self.assertRaises(exception.InvalidSharedStorage,\n lambda: self._rebuild(on_shared_storage=False))\n\n # Should remain on original host\n instance = db.instance_get(self.context, self.inst.id)\n self.assertEqual(instance['host'], 'fake_host_2')\n self.assertTrue(mock_inst.called)\n mock_remove_allocs.assert_called_once_with(\n mock.ANY, instance.uuid, self.rt.compute_nodes[NODENAME].uuid)", "def set_cache(self, cached):\n for i in range(self.num_layers):\n if hasattr(getattr(self, \"conv{}\".format(i+1)), \"set_cache\"):\n getattr(self, \"conv{}\".format(i+1)).set_cache(cached)" ]
[ "0.6243899", "0.5750837", "0.55582297", "0.5493871", "0.54649335", "0.53545606", "0.5274179", "0.52181244", "0.51568764", "0.51559347", "0.5134361", "0.51047474", "0.5044345", "0.5022658", "0.50088584", "0.50081223", "0.50078607", "0.49672914", "0.4941016", "0.49056178", "0.49003807", "0.48882106", "0.48862943", "0.48803425", "0.48673484", "0.48665342", "0.4862084", "0.48397473", "0.48308828", "0.4825883", "0.48119712", "0.48082116", "0.4807649", "0.47968215", "0.47879475", "0.47790226", "0.4770138", "0.47670585", "0.47580492", "0.47522336", "0.47302306", "0.47266653", "0.47158897", "0.47151047", "0.46987805", "0.4691087", "0.46896812", "0.4678337", "0.46694353", "0.4663509", "0.465867", "0.4651559", "0.46349278", "0.46342155", "0.46338078", "0.46230018", "0.46104252", "0.4608093", "0.46052969", "0.46033546", "0.46019205", "0.45996097", "0.4595952", "0.4585996", "0.45705616", "0.4569351", "0.45681807", "0.45605645", "0.4559627", "0.45541972", "0.45494112", "0.45491654", "0.454764", "0.4546074", "0.45441684", "0.4535711", "0.45319808", "0.45209056", "0.45165712", "0.4516129", "0.45000896", "0.44993523", "0.44937494", "0.44889453", "0.4477736", "0.4472863", "0.44679645", "0.44670382", "0.44644356", "0.44578633", "0.44574726", "0.44527853", "0.44516492", "0.44486627", "0.4444513", "0.44363606", "0.44335753", "0.44313097", "0.44309035", "0.44242752" ]
0.7541457
0
Handler for subnet creations and updates.
def on_subnet_set(self, response, subnet_id): LOG.debug("Subnet %s created or updated", subnet_id) subnet_data = etcdutils.safe_decode_json(response.value, 'subnet') if subnet_data is None: LOG.warning("Invalid subnet data %s", response.value) return if not (isinstance(subnet_data, dict) and 'cidr' in subnet_data and 'gateway_ip' in subnet_data): LOG.warning("Invalid subnet data: %s", subnet_data) return self.subnets_by_id[subnet_id] = subnet_data return
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def subnet_create_event(self, subnet_info):\n\n subnet = subnet_info.get('subnet')\n if subnet:\n self.create_subnet(subnet)\n else:\n # Check whether request is for subnets.\n subnets = subnet_info.get('subnets')\n if subnets:\n for subnet in subnets:\n self.create_subnet(subnet)", "def post_subnet_create(self, resource_dict):\n pass", "def create_subnet_postcommit(self, context):\n if self.rpc_handler is None:\n return\n subnet = self._get_subnet_info(context._subnet)\n if subnet is not None:\n try:\n self.rpc_handler.create_subnet(subnet)\n except:\n pass", "def run(self, subnet_update_args, network_create_args=None,\n subnet_create_args=None, subnet_cidr_start=None,\n subnets_per_network=1):\n network = self.neutron.create_network(**(network_create_args or {}))\n subnets = []\n for _ in range(subnets_per_network):\n subnets.append(\n self.neutron.create_subnet(\n network[\"id\"], start_cidr=subnet_cidr_start,\n **(subnet_create_args or {}))\n )\n for subnet in subnets:\n self.neutron.update_subnet(subnet[\"id\"], **subnet_update_args)", "def create_subnet_postcommit(self, mech_context):\n LOG.debug(\"create_subnetwork_postcommit: called\")", "def pre_subnet_create(self, resource_dict):\n pass", "def post_subnet_update(self, resource_id, resource_dict):\n pass", "def update_subnet_postcommit(self, context):\n if self.rpc_handler is None:\n return\n subnet = self._get_subnet_info(context._subnet)\n if subnet is not None:\n try:\n self.rpc_handler.update_subnet(subnet)\n except:\n pass", "def create_subnet(self, body=None):\r\n return self.post(self.subnets_path, body=body)", "def run(self, network_create_args=None, subnet_create_args=None,\n subnet_cidr_start=None, subnets_per_network=1):\n network = self._get_or_create_network(**(network_create_args or {}))\n subnets = []\n for _ in range(subnets_per_network):\n subnets.append(\n self.neutron.create_subnet(\n network[\"id\"], start_cidr=subnet_cidr_start,\n **(subnet_create_args or {}))\n )\n for subnet in subnets:\n self.neutron.delete_subnet(subnet[\"id\"])", "def post_save_subnet(self, sender, instance, **kwargs):\n RecurseNetworks.add_or_update_entry(subnet=str(instance.ip_network), net_name=instance.name)", "def run(self, network_create_args=None,\n subnet_create_args=None, subnet_cidr_start=None,\n subnets_per_network=1):\n network = self._get_or_create_network(**(network_create_args or {}))\n subnets = []\n for _ in range(subnets_per_network):\n subnets.append(\n self.neutron.create_subnet(\n network[\"id\"], start_cidr=subnet_cidr_start,\n **(subnet_create_args or {}))\n )\n for subnet in subnets:\n self.neutron.get_subnet(subnet[\"id\"])", "def run(self, network_create_args=None, subnet_create_args=None,\n subnet_cidr_start=None, subnets_per_network=1):\n network = self.neutron.create_network(**(network_create_args or {}))\n for _ in range(subnets_per_network):\n self.neutron.create_subnet(network[\"id\"],\n start_cidr=subnet_cidr_start,\n **(subnet_create_args or {}))\n self.neutron.list_subnets()", "def create_subnet(self, network_name, subnet_name, cidr):\n _net_id = self.get_net_id(network_name)\n if not isinstance(_net_id, unicode):\n return\n\n _url = \"http://\" + self.host_ip + \":9696/v2.0/subnets.json\"\n _headers = {'Content-type': 'application/json',\n 'x-auth-token': self.project_info[\"token_project\"]}\n _subnet_info = {\"subnet\":\n {\"ip_version\": 4,\n \"network_id\": _net_id,\n \"cidr\": cidr,\n \"name\": subnet_name}}\n\n _body = json.dumps(_subnet_info)\n\n LOG_OBJ.debug(\"Creating subnet in network %s of tenant %s.\"\n % (_net_id, self.project_info[\"project_id\"]))\n\n response = self.request(\"POST\", _url, _headers, _body)\n if response is None:\n LOG_OBJ.error(\"No response from Server while creating subnet\")\n return response\n if response.status not in [200, 201, 202, 203, 204]:\n LOG_OBJ.error(\"Creation of subnet Failed with status %s \" %\n response.status)\n return response.status\n\n output = json.loads(response.data)\n LOG_OBJ.info(\"Subnet details : %s \" % output['subnet'])\n return output['subnet']['id']", "def update_subnet_postcommit(self, mech_context):\n LOG.debug(\"update_subnet_postcommit: called\")", "def test_create_subnet(self):\n client_token = generate_client_token()\n subnet_name = 'test_subnet_name1' + client_token\n subnet_cidr = '192.168.0.64/26'\n self.assertEqual(\n type(self.the_client.create_subnet(subnet_name,\n 'cn-bj-a',\n subnet_cidr,\n vpc_id,\n client_token=client_token)),\n baidubce.bce_response.BceResponse)", "def subnet_create(request, network_id, **kwargs):\n LOG.debug(\"subnet_create(): netid=%(network_id)s, kwargs=%(kwargs)s\",\n {'network_id': network_id, 'kwargs': kwargs})\n body = {'subnet': {'network_id': network_id}}\n if 'tenant_id' not in kwargs:\n kwargs['tenant_id'] = request.user.project_id\n body['subnet'].update(kwargs)\n subnet = neutronclient(request).create_subnet(body=body).get('subnet')\n return Subnet(subnet)", "def subnet_update_end(self, payload):\n network_id = payload['subnet']['network_id']\n self.refresh_dhcp_helper(network_id)", "def pre_subnet_update(self, resource_id, resource_dict):\n pass", "def test_create_host_subnet(self):\n pass", "def post_subnet_read(self, resource_id, resource_dict):\n pass", "def create_subnet_precommit(self, mech_context):\n LOG.debug(\"create_subnetwork_precommit: called\")", "def filter_create_subnet_attributes(subnet, context):\n pass", "def create_namespaced_host_subnet(self, body, **kwargs):\n\n all_params = ['body', 'pretty']\n all_params.append('callback')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method create_namespaced_host_subnet\" % key\n )\n params[key] = val\n del params['kwargs']\n\n # verify the required parameter 'body' is set\n if ('body' not in params) or (params['body'] is None):\n raise ValueError(\"Missing the required parameter `body` when calling `create_namespaced_host_subnet`\")\n\n resource_path = '/oapi/v1/hostsubnets'.replace('{format}', 'json')\n path_params = {}\n\n query_params = {}\n if 'pretty' in params:\n query_params['pretty'] = params['pretty']\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n if 'body' in params:\n body_params = params['body']\n\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json', 'application/yaml'])\n if not header_params['Accept']:\n del header_params['Accept']\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type(['*/*'])\n\n # Authentication setting\n auth_settings = []\n\n response = self.api_client.call_api(resource_path, 'POST',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type='V1HostSubnet',\n auth_settings=auth_settings,\n callback=params.get('callback'))\n return response", "def fusion_api_create_ipv4_subnet(self, body, sessionID=None, api=None, headers=None):\n return self.ipv4subnet.create(body, sessionID, api, headers)", "def _get_subnet_info(self, context):\n\n subnet = {}\n data = {}\n subnet_id = str(context.get('id', ''))\n data['subnet_id'] = subnet_id\n data['subnet_name'] = str(context.get('name', ''))\n data['tenant_id'] = str(context.get('tenant_id', ''))\n data['network_id'] = str(context.get('network_id', ''))\n data['ip_version'] = str(context.get('ip_version', ''))\n data['gateway_ip'] = str(context.get('gateway_ip', ''))\n ip_mask = str(context.get('cidr', ''))\n data['enable_dhcp'] = context.get('enable_dhcp', '')\n data['shared'] = context.get('shared', '')\n if subnet_id == '':\n LOG.error(_('Get creating subnet information failed'))\n return None\n data['network'], data['network_mask'] = ip_mask.split('/')\n\n context_str = json.dumps(data, sort_keys=True)\n data['md5sum'] = hashlib.md5(context_str).hexdigest()\n\n data['field_not_in_md5'] = ['md5sum']\n\n if subnet_id != '':\n subnet[subnet_id] = data\n return subnet", "def add_subnets(self, router_name, netname):\n for subnet in self.router_data['properties']['networks'].keys():\n resource = str(router_name + '_' + subnet)\n subnet_resource = OrderedDict({ \n resource: {\n 'type': 'OS::Neutron::Subnet',\n 'properties': {\n 'name': resource,\n 'network_id': { \n 'get_resource': netname, \n },\n 'cidr': { \n 'get_param': resource + '_net_cidr'\n },\n 'gateway_ip': { \n 'get_param': resource + '_net_gateway'\n },\n 'allocation_pools': [{\n 'start': { 'get_param': resource + '_net_pool_start' },\n 'end': { 'get_param': resource + '_net_pool_end' }\n }],\n }\n }\n })\n self.template['resources'].update(subnet_resource)\n cidr = self.set_cidr(subnet)\n gw = self.set_gatewayIP(subnet, cidr)\n self.template['parameters'].update(OrderedDict({\n resource + '_net_cidr': {\n 'type': 'string',\n 'default': cidr\n }}))\n self.template['parameters'].update(OrderedDict({\n resource + '_net_gateway': {\n 'type': 'string',\n 'default': gw\n }}))\n self.template['parameters'].update(OrderedDict({\n resource + '_net_pool_start': {\n 'type': 'string',\n 'default': self.set_dhcp_pools(cidr)[0]\n }}))\n self.template['parameters'].update(OrderedDict({\n resource + '_net_pool_end': {\n 'type': 'string',\n 'default': self.set_dhcp_pools(cidr)[1]\n }}))", "def create(self, name, network_id, tenant_id, gateway_ip, cidr, \n allocation_pools=None, enable_dhcp=True, host_routes=None,\n dns_nameservers=['8.8.8.7', '8.8.8.8']):\n data = {\n \"subnet\": {\n \"name\": name,\n \"network_id\": network_id,\n \"tenant_id\": tenant_id,\n \"ip_version\": 4,\n \"cidr\": cidr,\n \"gateway_ip\": gateway_ip,\n }\n }\n if allocation_pools is not None:\n data['subnet']['allocation_pools'] = allocation_pools\n if host_routes is not None:\n data['subnet']['host_routes'] = host_routes\n if enable_dhcp is not None:\n data['subnet']['enable_dhcp'] = enable_dhcp\n if dns_nameservers is not None:\n data['subnet']['dns_nameservers'] = dns_nameservers\n\n path = '%s/subnets' % self.ver\n res = self.client.call(path, 'POST', data=json.dumps(data), \n token=self.manager.identity.token)\n self.logger.debug('Create openstack subnet: %s' % truncate(res))\n return res[0]['subnet']", "def _precreate_network(self):\n # check cidr format\n net_cidr = CONF.azure.vnet_cidr\n subnet_cidr = CONF.azure.vsubnet_cidr\n if not (self._is_valid_cidr(net_cidr) and\n self._is_valid_cidr(subnet_cidr)):\n msg = 'Invalid network: %(net_cidr)s/subnet: %(subnet_cidr)s' \\\n ' CIDR' % dict(net_cidr=net_cidr, subnet_cidr=subnet_cidr)\n LOG.error(msg)\n raise exception.NetworkCreateFailure(reason=msg)\n # Creaet Network\n try:\n nets = self.network.virtual_networks.list(\n CONF.azure.resource_group)\n net_exist = False\n for i in nets:\n if i.name == CONF.azure.vnet_name:\n net_exist = True\n break\n if not net_exist:\n network_info = dict(location=CONF.azure.location,\n address_space=dict(\n address_prefixes=[net_cidr]))\n async_vnet_creation = \\\n self.network.virtual_networks.create_or_update(\n CONF.azure.resource_group,\n CONF.azure.vnet_name,\n network_info)\n async_vnet_creation.wait(CONF.azure.async_timeout)\n LOG.info(_LI(\"Create Network\"))\n except Exception as e:\n msg = six.text_type(e)\n ex = exception.NetworkCreateFailure(reason=msg)\n LOG.exception(msg)\n raise ex\n\n # Create Subnet\n try:\n # subnet can't recreate, check existing before create.\n subnets = self.network.subnets.list(\n CONF.azure.resource_group,\n CONF.azure.vnet_name)\n subnet_exist = False\n subnet_details = None\n for i in subnets:\n if i.name == CONF.azure.vsubnet_name:\n subnet_exist = True\n subnet_details = i\n break\n if not subnet_exist:\n subnet_info = {'address_prefix': subnet_cidr}\n async_subnet_creation = self.network.subnets.create_or_update(\n CONF.azure.resource_group,\n CONF.azure.vnet_name,\n CONF.azure.vsubnet_name,\n subnet_info\n )\n subnet_details = async_subnet_creation.result()\n except Exception as e:\n # delete network if subnet create fail.\n try:\n async_vm_action = self.network.virtual_networks.delete(\n CONF.azure.resource_group, CONF.azure.vnet_name)\n async_vm_action.wait(CONF.azure.async_timeout)\n LOG.info(_LI(\"Deleted Network %s after Subnet create \"\n \"failed.\"), CONF.azure.vnet_name)\n except Exception:\n LOG.error(_LE('Delete Network %s failed after Subnet create '\n 'failed.'), CONF.azure.vnet_name)\n msg = six.text_type(e)\n ex = exception.SubnetCreateFailure(reason=msg)\n LOG.exception(msg)\n raise ex\n CONF.set_override('vsubnet_id', subnet_details.id, 'azure')\n LOG.info(_LI(\"Create/Update Subnet: %s\"), CONF.azure.vsubnet_id)", "def create_subnet(\n self,\n network_name_or_id,\n cidr=None,\n ip_version=4,\n enable_dhcp=False,\n subnet_name=None,\n tenant_id=None,\n allocation_pools=None,\n gateway_ip=None,\n disable_gateway_ip=False,\n dns_nameservers=None,\n host_routes=None,\n ipv6_ra_mode=None,\n ipv6_address_mode=None,\n prefixlen=None,\n use_default_subnetpool=False,\n **kwargs,\n ):\n\n if tenant_id is not None:\n filters = {'tenant_id': tenant_id}\n else:\n filters = None\n\n network = self.get_network(network_name_or_id, filters)\n if not network:\n raise exc.OpenStackCloudException(\n \"Network %s not found.\" % network_name_or_id\n )\n\n if disable_gateway_ip and gateway_ip:\n raise exc.OpenStackCloudException(\n 'arg:disable_gateway_ip is not allowed with arg:gateway_ip'\n )\n\n if not cidr and not use_default_subnetpool:\n raise exc.OpenStackCloudException(\n 'arg:cidr is required when a subnetpool is not used'\n )\n\n if cidr and use_default_subnetpool:\n raise exc.OpenStackCloudException(\n 'arg:cidr must be set to None when use_default_subnetpool == '\n 'True'\n )\n\n # Be friendly on ip_version and allow strings\n if isinstance(ip_version, str):\n try:\n ip_version = int(ip_version)\n except ValueError:\n raise exc.OpenStackCloudException(\n 'ip_version must be an integer'\n )\n\n # The body of the neutron message for the subnet we wish to create.\n # This includes attributes that are required or have defaults.\n subnet = dict(\n {\n 'network_id': network['id'],\n 'ip_version': ip_version,\n 'enable_dhcp': enable_dhcp,\n },\n **kwargs,\n )\n\n # Add optional attributes to the message.\n if cidr:\n subnet['cidr'] = cidr\n if subnet_name:\n subnet['name'] = subnet_name\n if tenant_id:\n subnet['tenant_id'] = tenant_id\n if allocation_pools:\n subnet['allocation_pools'] = allocation_pools\n if gateway_ip:\n subnet['gateway_ip'] = gateway_ip\n if disable_gateway_ip:\n subnet['gateway_ip'] = None\n if dns_nameservers:\n subnet['dns_nameservers'] = dns_nameservers\n if host_routes:\n subnet['host_routes'] = host_routes\n if ipv6_ra_mode:\n subnet['ipv6_ra_mode'] = ipv6_ra_mode\n if ipv6_address_mode:\n subnet['ipv6_address_mode'] = ipv6_address_mode\n if prefixlen:\n subnet['prefixlen'] = prefixlen\n if use_default_subnetpool:\n subnet['use_default_subnetpool'] = True\n\n return self.network.create_subnet(**subnet)", "def update(self, oid, name=None, network_id=None, tenant_id=None, \n gateway_ip=None, cidr=None, allocation_pools=None, \n enable_dhcp=None, host_routes=None, dns_nameservers=None):\n data = {\n \"subnet\": {\n }\n }\n \n if network_id is not None:\n data['subnet']['network_id'] = network_id\n if tenant_id is not None:\n data['subnet']['tenant_id'] = tenant_id\n if cidr is not None:\n data['subnet']['cidr'] = cidr\n if gateway_ip is not None:\n data['subnet']['gateway_ip'] = gateway_ip\n if name is not None:\n data['subnet']['name'] = name\n if allocation_pools is not None:\n data['subnet']['allocation_pools'] = allocation_pools\n if host_routes is not None:\n data['subnet']['host_routes'] = host_routes\n if enable_dhcp is not None:\n data['subnet']['enable_dhcp'] = enable_dhcp\n if dns_nameservers is not None:\n data['subnet']['dns_nameservers'] = dns_nameservers \n \n path = '%s/subnets/%s' % (self.ver, oid)\n res = self.client.call(path, 'PUT', data=json.dumps(data), \n token=self.manager.identity.token)\n self.logger.debug('Update openstack subnet: %s' % truncate(res))\n return res[0]['subnet']", "def add_to_subnet(self, subnet_properties: 'SubnetAffiliation'):\n self.subnets.append(subnet_properties)", "def post_subnet_delete(self, resource_id, resource_dict):\n pass", "def create_subnet ( vpc_conn,\n ec2_conn,\n vpc_id,\n subnet_cidr,\n zone_name,\n subnet_basename ) :\n subnet = vpc_conn.create_subnet( vpc_id, subnet_cidr, zone_name )\n aws_cmd( ec2_conn.create_tags, [ subnet.id,\n { \"Name\": subnet_basename + \"-\" + zone_name[-1].upper( ) + \"-Subnet\" } ] )\n return subnet", "def replace_namespaced_host_subnet(self, body, name, **kwargs):\n\n all_params = ['body', 'name', 'pretty']\n all_params.append('callback')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method replace_namespaced_host_subnet\" % key\n )\n params[key] = val\n del params['kwargs']\n\n # verify the required parameter 'body' is set\n if ('body' not in params) or (params['body'] is None):\n raise ValueError(\"Missing the required parameter `body` when calling `replace_namespaced_host_subnet`\")\n # verify the required parameter 'name' is set\n if ('name' not in params) or (params['name'] is None):\n raise ValueError(\"Missing the required parameter `name` when calling `replace_namespaced_host_subnet`\")\n\n resource_path = '/oapi/v1/hostsubnets/{name}'.replace('{format}', 'json')\n path_params = {}\n if 'name' in params:\n path_params['name'] = params['name']\n\n query_params = {}\n if 'pretty' in params:\n query_params['pretty'] = params['pretty']\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n if 'body' in params:\n body_params = params['body']\n\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json', 'application/yaml'])\n if not header_params['Accept']:\n del header_params['Accept']\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type(['*/*'])\n\n # Authentication setting\n auth_settings = []\n\n response = self.api_client.call_api(resource_path, 'PUT',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type='V1HostSubnet',\n auth_settings=auth_settings,\n callback=params.get('callback'))\n return response", "def delete_subnet_postcommit(self, context):\n if self.rpc_handler is None:\n return\n try:\n self.rpc_handler.delete_subnet({str(context._subnet.get('id', '')): {}})\n except:\n pass", "def test_update_subnet(self):\n self.assertEqual(\n type(self.the_client.update_subnet(subnet_id, 'test_update_name1',\n 'test_update_description1')),\n baidubce.bce_response.BceResponse)", "def update_subnet(\n self,\n name_or_id,\n subnet_name=None,\n enable_dhcp=None,\n gateway_ip=None,\n disable_gateway_ip=None,\n allocation_pools=None,\n dns_nameservers=None,\n host_routes=None,\n ):\n subnet = {}\n if subnet_name:\n subnet['name'] = subnet_name\n if enable_dhcp is not None:\n subnet['enable_dhcp'] = enable_dhcp\n if gateway_ip:\n subnet['gateway_ip'] = gateway_ip\n if disable_gateway_ip:\n subnet['gateway_ip'] = None\n if allocation_pools:\n subnet['allocation_pools'] = allocation_pools\n if dns_nameservers:\n subnet['dns_nameservers'] = dns_nameservers\n if host_routes:\n subnet['host_routes'] = host_routes\n\n if not subnet:\n self.log.debug(\"No subnet data to update\")\n return\n\n if disable_gateway_ip and gateway_ip:\n raise exc.OpenStackCloudException(\n 'arg:disable_gateway_ip is not allowed with arg:gateway_ip'\n )\n\n curr_subnet = self.get_subnet(name_or_id)\n if not curr_subnet:\n raise exc.OpenStackCloudException(\n \"Subnet %s not found.\" % name_or_id\n )\n\n return self.network.update_subnet(curr_subnet, **subnet)", "def subnet_create(auth=None, **kwargs):\n cloud = get_operator_cloud(auth)\n kwargs = _clean_kwargs(**kwargs)\n return cloud.create_subnet(**kwargs)", "def test_create_network_and_subnet(self):\n suffix = datetime.utcnow().strftime('%Y%m%d%H%M%S')\n network_name = TEST_NETWORK_PREFIX + \"_\" + suffix\n network_cidr = TEST_CIDR_PATTERN % 254\n self.__create_network_and_subnet_test_helper__(network_name, network_cidr)", "def patch_namespaced_host_subnet(self, body, name, **kwargs):\n\n all_params = ['body', 'name', 'pretty']\n all_params.append('callback')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method patch_namespaced_host_subnet\" % key\n )\n params[key] = val\n del params['kwargs']\n\n # verify the required parameter 'body' is set\n if ('body' not in params) or (params['body'] is None):\n raise ValueError(\"Missing the required parameter `body` when calling `patch_namespaced_host_subnet`\")\n # verify the required parameter 'name' is set\n if ('name' not in params) or (params['name'] is None):\n raise ValueError(\"Missing the required parameter `name` when calling `patch_namespaced_host_subnet`\")\n\n resource_path = '/oapi/v1/hostsubnets/{name}'.replace('{format}', 'json')\n path_params = {}\n if 'name' in params:\n path_params['name'] = params['name']\n\n query_params = {}\n if 'pretty' in params:\n query_params['pretty'] = params['pretty']\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n if 'body' in params:\n body_params = params['body']\n\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json', 'application/yaml'])\n if not header_params['Accept']:\n del header_params['Accept']\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type(['application/json-patch+json', 'application/merge-patch+json', 'application/strategic-merge-patch+json'])\n\n # Authentication setting\n auth_settings = []\n\n response = self.api_client.call_api(resource_path, 'PATCH',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type='V1HostSubnet',\n auth_settings=auth_settings,\n callback=params.get('callback'))\n return response", "def fusion_api_edit_ipv4_subnet(self, body, uri, api=None, headers=None):\n return self.ipv4subnet.update(body, uri, api, headers)", "def update_subnet_precommit(self, mech_context):\n LOG.debug(\"update_subnet_precommit(self: called\")", "def subnet_delete_end(self, payload):\n subnet_id = payload['subnet_id']\n network = self.cache.get_network_by_subnet_id(subnet_id)\n if network:\n self.refresh_dhcp_helper(network.id)", "def post_delete_subnet(self, sender, instance, **kwargs):\n RecurseNetworks.delete_entries(subnet=str(instance.ip_network), net_name=instance.name)", "def test_patch_host_subnet(self):\n pass", "def fusion_api_patch_ipv4_subnet(self, body, uri, param='', api=None, headers=None):\n return self.ipv4subnet.patch(body, uri, param, api, headers)", "def ensure_subnet_created(vpc):\n return _ensure_subnet_created(\n vpc,\n Constants['SubnetAvailabilityZone'],\n Constants['SubnetName'],\n Constants['SubnetCidr'],\n '00'\n )", "def subnet_id(self, subnet_id):\n self._subnet_id = subnet_id", "def subnet_update(auth=None, **kwargs):\n cloud = get_operator_cloud(auth)\n kwargs = _clean_kwargs(**kwargs)\n return cloud.update_subnet(**kwargs)", "def run(self, network_create_args=None, subnet_create_args=None,\n subnet_cidr_start=None, subnets_per_network=1,\n router_create_args=None):\n subnet_create_args = dict(subnet_create_args or {})\n subnet_create_args[\"start_cidr\"] = subnet_cidr_start\n\n net_topo = self.neutron.create_network_topology(\n network_create_args=(network_create_args or {}),\n router_create_args=(router_create_args or {}),\n router_per_subnet=True,\n subnet_create_args=subnet_create_args,\n subnets_count=subnets_per_network\n )\n\n for e in range(subnets_per_network):\n router = net_topo[\"routers\"][e]\n subnet = net_topo[\"subnets\"][e]\n self.neutron.remove_interface_from_router(subnet_id=subnet[\"id\"],\n router_id=router[\"id\"])\n self.neutron.delete_router(router[\"id\"])", "def test_aws_service_api_network_subnets_get(self):\n pass", "def add_subnet(self, subnet_type, quantity=None, vlan_id=None, version=4,\r\n test_order=False):\r\n package = self.client['Product_Package']\r\n category = 'sov_sec_ip_addresses_priv'\r\n desc = ''\r\n if version == 4:\r\n if subnet_type == 'global':\r\n quantity = 0\r\n category = 'global_ipv4'\r\n elif subnet_type == 'public':\r\n category = 'sov_sec_ip_addresses_pub'\r\n else:\r\n category = 'static_ipv6_addresses'\r\n if subnet_type == 'global':\r\n quantity = 0\r\n category = 'global_ipv6'\r\n desc = 'Global'\r\n elif subnet_type == 'public':\r\n desc = 'Portable'\r\n\r\n # In the API, every non-server item is contained within package ID 0.\r\n # This means that we need to get all of the items and loop through them\r\n # looking for the items we need based upon the category, quantity, and\r\n # item description.\r\n price_id = None\r\n quantity_str = str(quantity)\r\n for item in package.getItems(id=0, mask='itemCategory'):\r\n category_code = lookup(item, 'itemCategory', 'categoryCode')\r\n if all([category_code == category,\r\n item.get('capacity') == quantity_str,\r\n version == 4 or (version == 6 and\r\n desc in item['description'])]):\r\n price_id = item['prices'][0]['id']\r\n break\r\n\r\n if not price_id:\r\n raise TypeError('Invalid combination specified for ordering a'\r\n ' subnet.')\r\n\r\n order = {\r\n 'packageId': 0,\r\n 'prices': [{'id': price_id}],\r\n 'quantity': 1,\r\n # This is necessary in order for the XML-RPC endpoint to select the\r\n # correct order container\r\n 'complexType': 'SoftLayer_Container_Product_Order_Network_Subnet',\r\n }\r\n\r\n if subnet_type != 'global':\r\n order['endPointVlanId'] = vlan_id\r\n\r\n if test_order:\r\n return self.client['Product_Order'].verifyOrder(order)\r\n else:\r\n return self.client['Product_Order'].placeOrder(order)", "def run(self, router_update_args, network_create_args=None,\n subnet_create_args=None, subnet_cidr_start=None,\n subnets_per_network=1, router_create_args=None):\n subnet_create_args = dict(subnet_create_args or {})\n subnet_create_args[\"start_cidr\"] = subnet_cidr_start\n\n net_topo = self.neutron.create_network_topology(\n network_create_args=(network_create_args or {}),\n router_create_args=(router_create_args or {}),\n router_per_subnet=True,\n subnet_create_args=subnet_create_args,\n subnets_count=subnets_per_network\n )\n\n for router in net_topo[\"routers\"]:\n self.neutron.update_router(router[\"id\"], **router_update_args)", "def pre_subnet_delete(self, resource_id):\n pass", "def test_replace_host_subnet(self):\n pass", "def fusion_api_allocate_ipv4_subnet(self, body, uri, api=None, headers=None):\n return self.ipv4subnet.allocate(body, uri, api, headers)", "def _ensure_net_and_subnets(self, port):\n\n # Gather the subnet IDs that we need for this port, and get the\n # NetModel if we already have it in the cache.\n needed_subnet_ids = set()\n net = None\n for fixed_ip in port['fixed_ips']:\n subnet_id = fixed_ip.get('subnet_id')\n if subnet_id:\n needed_subnet_ids.add(subnet_id)\n if not net:\n net = self.agent.cache.get_network_by_subnet_id(subnet_id)\n LOG.debug(\"Needed subnet IDs: %s\", needed_subnet_ids)\n LOG.debug(\"Existing network model by subnet ID: %s\", net)\n\n # For each subnet that we need, get its data from SubnetWatcher and\n # hold for adding into the cache.\n new_subnets = {}\n for subnet_id in needed_subnet_ids:\n # Get data for this subnet from the SubnetWatchers.\n subnet = (self.subnet_watcher.get_subnet(subnet_id) or\n self.v1_subnet_watcher.get_subnet(subnet_id))\n if subnet is None:\n LOG.warning(\"No data for subnet %s\", subnet_id)\n raise SubnetIDNotFound()\n new_subnets[subnet_id] = subnet\n\n if not net:\n # We don't already have a NetModel, so look for a cached NetModel\n # with the right network ID. (In this case we must have new\n # subnets to add into the cache, and the cached NetModel must have\n # subnets other than the ones that we're adding in this iteration;\n # otherwise we would have already found it when searching by\n # subnet_id above.)\n assert new_subnets\n network_id = list(new_subnets.values())[0]['network_id']\n net = self.agent.cache.get_network_by_id(network_id)\n LOG.debug(\"Existing network model by network ID: %s\", net)\n\n if not net:\n # We still have no NetModel for the relevant network ID, so create\n # a new one. In this case we _must_ be adding new subnets.\n assert new_subnets\n net = empty_network(network_id)\n LOG.debug(\"New network %s\", net)\n elif new_subnets:\n # We have a NetModel that was already in the cache and are about to\n # modify it. Cache replacement only works if the new NetModel is a\n # distinct object from the existing one, so make a copy here.\n net = copy_network(net)\n LOG.debug(\"Copied network %s\", net)\n\n if new_subnets:\n # Add the new subnets into the NetModel.\n assert net\n net.subnets = [s for s in net.subnets\n if s.id not in new_subnets]\n net.subnets += list(new_subnets.values())\n\n # Add (or update) the NetModel in the cache.\n LOG.debug(\"Net: %s\", net)\n _fix_network_cache_port_lookup(self.agent, net.id)\n self.agent.cache.put(net)\n\n return net.id", "def subnets(self, subnets):\n\n self._subnets = subnets", "def allocate_subnet(self):\n if len(self.subnet_list) == 0:\n subnet = '192.168.1.0/24'\n self.subnet_list.append(subnet)\n return subnet\n else:\n subnet = self.subnet_list[::-1][0]\n ip = ipaddress.IPv4Network(subnet)[0]\n s = ipaddress.IPv4Address(ip) + 256\n return '{}{}'.format(s, '/24')", "def create(subnetModeDetails):\n\n # Remove id as it's created automatically\n if 'id' in subnetModeDetails:\n del subnetModeDetails['id']\n\n schema = SubnetModeSchema()\n new_subnetMode = schema.load(subnetModeDetails, session=db.session)\n db.session.add(new_subnetMode)\n db.session.commit()\n\n # Serialize and return the newly created deployment\n # in the response\n data = schema.dump(new_subnetMode)\n return data, 201", "def create_subnet(ec2, vpc, \n subnet_name,\n subnet_region, \n subnet_cidr_block,\n subnet_type=\"private\"):\n # create a public subnet within the VPC\n print(\"\\n===Creating a \"+subnet_type+\" subnet...\")\n subnet = ec2.create_subnet(\n AvailabilityZone=subnet_region,\n CidrBlock=subnet_cidr_block,\n VpcId=vpc.vpc_id,\n DryRun=False,\n TagSpecifications=[{\n \"ResourceType\":\"subnet\",\n \"Tags\":[{\"Key\": \"Name\", \"Value\": subnet_name},\n ]\n }])\n \n print(f\"===Subnet {subnet_name} is available!\")\n return subnet", "def test_port_create_with_segment_subnets(self):\n network, segment, subnet = self._create_test_segment_with_subnet()\n response = self._create_port(self.fmt,\n net_id=network['network']['id'],\n tenant_id=network['network']['tenant_id'])\n res = self.deserialize(self.fmt, response)\n # Don't allocate IPs in this case because we didn't give binding info\n self.assertEqual(0, len(res['port']['fixed_ips']))", "def list_namespaced_host_subnet(self, **kwargs):\n\n all_params = ['pretty', 'label_selector', 'field_selector', 'watch', 'resource_version', 'timeout_seconds']\n all_params.append('callback')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method list_namespaced_host_subnet\" % key\n )\n params[key] = val\n del params['kwargs']\n\n\n resource_path = '/oapi/v1/hostsubnets'.replace('{format}', 'json')\n path_params = {}\n\n query_params = {}\n if 'pretty' in params:\n query_params['pretty'] = params['pretty']\n if 'label_selector' in params:\n query_params['labelSelector'] = params['label_selector']\n if 'field_selector' in params:\n query_params['fieldSelector'] = params['field_selector']\n if 'watch' in params:\n query_params['watch'] = params['watch']\n if 'resource_version' in params:\n query_params['resourceVersion'] = params['resource_version']\n if 'timeout_seconds' in params:\n query_params['timeoutSeconds'] = params['timeout_seconds']\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json', 'application/yaml'])\n if not header_params['Accept']:\n del header_params['Accept']\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type(['*/*'])\n\n # Authentication setting\n auth_settings = []\n\n response = self.api_client.call_api(resource_path, 'GET',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type='V1HostSubnetList',\n auth_settings=auth_settings,\n callback=params.get('callback'))\n return response", "def __init__(__self__, *,\n subnet_resource_id: Optional[pulumi.Input[str]] = None):\n if subnet_resource_id is not None:\n pulumi.set(__self__, \"subnet_resource_id\", subnet_resource_id)", "def __init__(self, network, subnetSize=24):\n self.network = ipaddress.ip_network(unicode(network), strict=False)\n if subnetSize < self.network.prefixlen:\n raise Exception(\"Invalid subnetSize {} for network {}\".format(\n subnetSize, network))\n\n subnets = self.network.subnets(new_prefix=subnetSize)\n numSubnets = 2 ** (subnetSize - self.network.prefixlen)\n\n super(NetworkPool, self).__init__(subnets, numSubnets)", "def run(self, network_create_args=None, subnet_create_args=None,\n subnet_cidr_start=None, subnets_per_network=1,\n router_create_args=None):\n subnet_create_args = dict(subnet_create_args or {})\n subnet_create_args[\"start_cidr\"] = subnet_cidr_start\n\n net_topo = self.neutron.create_network_topology(\n network_create_args=(network_create_args or {}),\n router_create_args=(router_create_args or {}),\n router_per_subnet=True,\n subnet_create_args=subnet_create_args,\n subnets_count=subnets_per_network\n )\n\n for router in net_topo[\"routers\"]:\n self.neutron.get_router(router[\"id\"])", "def run(self, network_create_args=None, subnet_create_args=None,\n subnet_cidr_start=None, subnets_per_network=1,\n router_create_args=None):\n subnet_create_args = dict(subnet_create_args or {})\n subnet_create_args[\"start_cidr\"] = subnet_cidr_start\n\n self.neutron.create_network_topology(\n network_create_args=(network_create_args or {}),\n router_create_args=(router_create_args or {}),\n router_per_subnet=True,\n subnet_create_args=subnet_create_args,\n subnets_count=subnets_per_network\n )\n self.neutron.list_routers()", "def post_virtual_network_create(self, resource_dict):\n pass", "def delete_subnet_postcommit(self, mech_context):\n LOG.debug(\"delete_subnetwork_postcommit: called\")", "def test_change_subnet(self):\n lease = CustomerIpLeaseModel.fetch_subscriber_lease(\n customer_mac='1:2:3:4:5:6',\n device_mac='12:13:14:15:16:17',\n device_port=2,\n is_dynamic=True\n )\n self.assertIsNotNone(lease)\n self.assertEqual(lease.ip_address, '10.11.12.2')\n self.assertEqual(lease.customer, self.customer)\n self.assertTrue(lease.is_dynamic)\n\n ippool2 = NetworkIpPool.objects.create(\n network='10.10.11.0/24',\n kind=NetworkIpPoolKind.NETWORK_KIND_INTERNET,\n description='test',\n ip_start='10.10.11.2',\n ip_end='10.10.11.254',\n gateway='10.10.11.1',\n is_dynamic=True\n )\n self.ippool.groups.remove(self.group)\n ippool2.groups.add(self.group)\n\n lease = CustomerIpLeaseModel.fetch_subscriber_lease(\n customer_mac='1:2:3:4:5:6',\n device_mac='12:13:14:15:16:17',\n device_port=2,\n is_dynamic=True\n )\n self.assertIsNotNone(lease)\n self.assertEqual(lease.ip_address, '10.10.11.2')\n self.assertEqual(lease.customer, self.customer)\n self.assertTrue(lease.is_dynamic)\n\n lease = CustomerIpLeaseModel.fetch_subscriber_lease(\n customer_mac='1:2:3:4:5:7',\n device_mac='12:13:14:15:16:17',\n device_port=2,\n is_dynamic=True\n )\n self.assertIsNotNone(lease)\n self.assertEqual(lease.ip_address, '10.10.11.3')\n self.assertEqual(lease.customer, self.customer)\n self.assertTrue(lease.is_dynamic)\n\n lease = CustomerIpLeaseModel.fetch_subscriber_lease(\n customer_mac='1:2:3:4:5:6',\n device_mac='12:13:14:15:16:17',\n device_port=2,\n is_dynamic=True\n )\n self.assertIsNotNone(lease)\n self.assertEqual(lease.ip_address, '10.10.11.2')\n self.assertEqual(lease.customer, self.customer)\n self.assertTrue(lease.is_dynamic)", "def list_subnet(self):\n _url = \"http://\" + self.host_ip + \":9696/v2.0/subnets\"\n _headers = {'Content-type': 'application/json',\n 'x-auth-token': self.project_info[\"token_project\"]}\n _body = None\n\n response = self.request(\"GET\", _url, _headers, _body)\n if response is None:\n LOG_OBJ.error(\"No response from Server while listing subnet.\")\n return response\n\n if response.status not in [200, 201, 202, 203, 204]:\n LOG_OBJ.error(\"Get subnet list Failed with status %s \" %\n response.status)\n return response.status\n output = json.loads(response.data)\n\n LOG_OBJ.info(\"subnet List : %s \" % output)\n return output[\"subnets\"]", "def subnet(template, name, vpc, availability_zone='eu-west-1a', cidr='10.0.36.0/24', gateway=None, nat=None,\n map_public_ip=False, acl_table=None):\n s = Subnet(name, template=template)\n s.Tags = Tags(Name=aws_name(s.title))\n s.VpcId = Ref(vpc)\n s.CidrBlock = cidr\n s.MapPublicIpOnLaunch = map_public_ip\n\n if availability_zone:\n s.AvailabilityZone = Ref(availability_zone)\n\n if gateway and nat:\n raise(RuntimeError(\"Don't provide an internet gateway (public) and nat gateway (private) at the same time.\"))\n\n # add public route if an internet gateway is given\n if gateway:\n # route table\n rt = RouteTable('{}RouteTable'.format(name), template=template)\n rt.Tags = Tags(Name=aws_name(rt.title))\n rt.VpcId = Ref(vpc)\n\n # route\n r = Route('{}Route'.format(name), template=template)\n r.DestinationCidrBlock = '0.0.0.0/0'\n r.GatewayId = Ref(gateway)\n # r.DependsOn = InternetGatewayAttachment.title\n r.RouteTableId = Ref(rt)\n\n # associate\n SubnetRouteTableAssociation('{}SubnetRouteTableAssociation'.format(name), template=template,\n RouteTableId=Ref(rt), SubnetId=Ref(s))\n\n # add nat route if an nat gateway is given\n if nat:\n # route table\n rt = RouteTable('{}RouteTable'.format(name), template=template)\n rt.Tags = Tags(Name=aws_name(rt.title))\n rt.VpcId = Ref(vpc)\n\n # route\n r = Route('{}Route'.format(name), template=template)\n r.DestinationCidrBlock = '0.0.0.0/0'\n r.NatGatewayId = Ref(nat)\n # r.DependsOn = InternetGatewayAttachment.title\n r.RouteTableId = Ref(rt)\n\n # associate\n SubnetRouteTableAssociation('{}SubnetRouteTableAssociation'.format(name), template=template,\n RouteTableId=Ref(rt), SubnetId=Ref(s))\n\n # add acl table if one is provided. Defaults to vpc default acl if None is provided\n if acl_table:\n at = SubnetNetworkAclAssociation('{}SubnetAclTableAssociation'.format(name), template=template)\n at.SubnetId = Ref(s)\n at.NetworkAclId = Ref(acl_table)\n\n return s", "def pre_subnet_read(self, resource_id):\n pass", "def read_namespaced_host_subnet(self, name, **kwargs):\n\n all_params = ['name', 'pretty', 'export', 'exact']\n all_params.append('callback')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method read_namespaced_host_subnet\" % key\n )\n params[key] = val\n del params['kwargs']\n\n # verify the required parameter 'name' is set\n if ('name' not in params) or (params['name'] is None):\n raise ValueError(\"Missing the required parameter `name` when calling `read_namespaced_host_subnet`\")\n\n resource_path = '/oapi/v1/hostsubnets/{name}'.replace('{format}', 'json')\n path_params = {}\n if 'name' in params:\n path_params['name'] = params['name']\n\n query_params = {}\n if 'pretty' in params:\n query_params['pretty'] = params['pretty']\n if 'export' in params:\n query_params['export'] = params['export']\n if 'exact' in params:\n query_params['exact'] = params['exact']\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json', 'application/yaml'])\n if not header_params['Accept']:\n del header_params['Accept']\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type(['*/*'])\n\n # Authentication setting\n auth_settings = []\n\n response = self.api_client.call_api(resource_path, 'GET',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type='V1HostSubnet',\n auth_settings=auth_settings,\n callback=params.get('callback'))\n return response", "def run(self):\n tenant_id = self.context[\"tenant\"][\"id\"]\n users = self.context[\"tenants\"][tenant_id][\"users\"]\n number = users.index(self.context[\"user\"])\n for network in self.context[\"tenants\"][tenant_id][\"networks\"]:\n # delete one of subnets based on the user sequential number\n subnet_id = network[\"subnets\"][number]\n self.neutron.delete_subnet(subnet_id)", "def delete_namespaced_host_subnet(self, body, name, **kwargs):\n\n all_params = ['body', 'name', 'pretty']\n all_params.append('callback')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method delete_namespaced_host_subnet\" % key\n )\n params[key] = val\n del params['kwargs']\n\n # verify the required parameter 'body' is set\n if ('body' not in params) or (params['body'] is None):\n raise ValueError(\"Missing the required parameter `body` when calling `delete_namespaced_host_subnet`\")\n # verify the required parameter 'name' is set\n if ('name' not in params) or (params['name'] is None):\n raise ValueError(\"Missing the required parameter `name` when calling `delete_namespaced_host_subnet`\")\n\n resource_path = '/oapi/v1/hostsubnets/{name}'.replace('{format}', 'json')\n path_params = {}\n if 'name' in params:\n path_params['name'] = params['name']\n\n query_params = {}\n if 'pretty' in params:\n query_params['pretty'] = params['pretty']\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n if 'body' in params:\n body_params = params['body']\n\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json', 'application/yaml'])\n if not header_params['Accept']:\n del header_params['Accept']\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type(['*/*'])\n\n # Authentication setting\n auth_settings = []\n\n response = self.api_client.call_api(resource_path, 'DELETE',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type='UnversionedStatus',\n auth_settings=auth_settings,\n callback=params.get('callback'))\n return response", "def subnet(self) -> Optional[pulumi.Input['ResourceIdArgs']]:\n return pulumi.get(self, \"subnet\")", "def filter_update_subnet_attributes(subnet, context):\n try_del(subnet, ['id', 'network_id', 'ip_version', 'cidr',\n 'allocation_pools', 'tenant_id'])", "def tag_instance_subnet(self, tags):\n self._request({\"instance-subnet-tags\": dict(tags)})", "def watch_namespaced_host_subnet(self, name, **kwargs):\n\n all_params = ['name', 'pretty', 'label_selector', 'field_selector', 'watch', 'resource_version', 'timeout_seconds']\n all_params.append('callback')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method watch_namespaced_host_subnet\" % key\n )\n params[key] = val\n del params['kwargs']\n\n # verify the required parameter 'name' is set\n if ('name' not in params) or (params['name'] is None):\n raise ValueError(\"Missing the required parameter `name` when calling `watch_namespaced_host_subnet`\")\n\n resource_path = '/oapi/v1/watch/hostsubnets/{name}'.replace('{format}', 'json')\n path_params = {}\n if 'name' in params:\n path_params['name'] = params['name']\n\n query_params = {}\n if 'pretty' in params:\n query_params['pretty'] = params['pretty']\n if 'label_selector' in params:\n query_params['labelSelector'] = params['label_selector']\n if 'field_selector' in params:\n query_params['fieldSelector'] = params['field_selector']\n if 'watch' in params:\n query_params['watch'] = params['watch']\n if 'resource_version' in params:\n query_params['resourceVersion'] = params['resource_version']\n if 'timeout_seconds' in params:\n query_params['timeoutSeconds'] = params['timeout_seconds']\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json'])\n if not header_params['Accept']:\n del header_params['Accept']\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type(['*/*'])\n\n # Authentication setting\n auth_settings = []\n\n response = self.api_client.call_api(resource_path, 'GET',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type='JsonWatchEvent',\n auth_settings=auth_settings,\n callback=params.get('callback'))\n return response", "def on_subnet_del(self, response, subnet_id):\n LOG.info(\"Subnet %s deleted\", subnet_id)\n if subnet_id in self.subnets_by_id:\n del self.subnets_by_id[subnet_id]\n return", "def run(self, network_update_args, network_create_args=None):\n network = self.neutron.create_network(**(network_create_args or {}))\n self.neutron.update_network(network[\"id\"], **network_update_args)", "def test_delete_host_subnet(self):\n pass", "def subnet(self) -> pulumi.Output[Optional['outputs.ResourceIdResponse']]:\n return pulumi.get(self, \"subnet\")", "def create_subnet(availability_zone, cidr, vpc_id):\n response = EC2.create_subnet(\n AvailabilityZone=availability_zone,\n CidrBlock=cidr,\n VpcId=vpc_id,\n )\n return response", "def __init__(__self__, *,\n ignore_missing_v_net_service_endpoint: Optional[pulumi.Input[bool]] = None,\n subnet_id: Optional[pulumi.Input[str]] = None):\n if ignore_missing_v_net_service_endpoint is not None:\n pulumi.set(__self__, \"ignore_missing_v_net_service_endpoint\", ignore_missing_v_net_service_endpoint)\n if subnet_id is not None:\n pulumi.set(__self__, \"subnet_id\", subnet_id)", "def test_get_subnet(self):\n self.assertEqual(\n type(self.the_client.get_subnet(subnet_id)),\n baidubce.bce_response.BceResponse)", "def rule_40_extend_subnet_cidr(session):\n\n config, conn = session[\"config\"], session[\"conn\"]\n\n def append_cidr(config_side, conn_vpc):\n\n cidr = conn_vpc.get_all_subnets([\n config_side[\"res\"][\"subnet_id\"]\n ])[0].cidr_block\n\n for user_cidr in config_side[\"ipsec\"][\"subnets\"]:\n if cidr_overlaps(cidr, user_cidr):\n return\n\n config_side[\"ipsec\"][\"subnets\"].append(cidr)\n\n append_cidr(config[\"server\"], conn[\"server\"](\"vpc\"))\n append_cidr(config[\"client\"], conn[\"client\"](\"vpc\"))\n\n return True", "def subnetpool_create(request, name, prefixes, **kwargs):\n LOG.debug(\"subnetpool_create(): name=%(name)s, prefixes=%(prefixes)s, \"\n \"kwargs=%(kwargs)s\", {'name': name, 'prefixes': prefixes,\n 'kwargs': kwargs})\n body = {'subnetpool':\n {'name': name,\n 'prefixes': prefixes,\n }\n }\n if 'tenant_id' not in kwargs:\n kwargs['tenant_id'] = request.user.project_id\n body['subnetpool'].update(kwargs)\n subnetpool = \\\n neutronclient(request).create_subnetpool(body=body).get('subnetpool')\n return SubnetPool(subnetpool)", "def gen_net_resources(self):\n\n print \"\\t* Adding net and subnet resources to compute template\"\n\n networks = self.neutronclient.list_networks()[\"networks\"]\n\n # filter all networks that match\n filtered_networks = [net for net in networks if (net[\"tenant_id\"] == self.tenant_id or\n (net[\"shared\"] == True) and net['router:external'] == False)]\n\n # obtain subnet information\n for network in filtered_networks:\n if network[\"shared\"] is not True:\n\n for subnet in network[\"subnets\"]:\n subnet_info = self.neutronclient.show_subnet(subnet)[\"subnet\"]\n\n # save this information for router interfaces\n self.all_nets.append((subnet_info, \"%s\" % network[\"name\"], \"%s\" % subnet_info[\"name\"]))\n\n # generate private net\n data = {\"type\": \"OS::Neutron::Net\",\n \"properties\":\n {\"name\":\n {\"get_param\": \"%s_%s_name\" % (network[\"name\"], \"net\")}\n }\n }\n\n start_ = {\"get_param\": \"%s_%s_pool_start\" % (network[\"name\"], subnet_info[\"name\"])}\n\n data2 = {\"type\": \"OS::Neutron::Subnet\",\n \"properties\": {\n \"name\": subnet_info[\"name\"],\n \"network_id\": {\"get_resource\": \"%s\" % network[\"name\"]},\n \"cidr\": {\"get_param\": \"%s_%s_cidr\" % (network[\"name\"], subnet_info[\"name\"])},\n \"gateway_ip\": {\"get_param\": \"%s_%s_gateway\" % (network[\"name\"], subnet_info[\"name\"])},\n \"allocation_pools\": [\n {\"start\": start_, \"end\": {\"get_param\": \"%s_%s_pool_end\" % (network[\"name\"], subnet_info[\"name\"])}}\n ]\n }\n }\n self.compute_data[\"resources\"][\"%s\" % network[\"name\"]] = data\n self.compute_data[\"resources\"][\"%s\" % subnet_info[\"name\"]] = data2\n else:\n # add shared network to the full list of networks\n for subnet in network[\"subnets\"]:\n subnet_info = self.neutronclient.show_subnet(subnet)[\"subnet\"]\n self.all_nets.append((subnet_info, \"%s\" % network[\"name\"], \"%s\" % subnet_info[\"name\"]))", "def test_one_subnet(self):\n\n self._test_find_next_subnet(\n network=\"10.0.0.0/16\", \n subnets=[\"10.0.0.0/24\"],\n requests=[24],\n expected=[\"10.0.1.0/24\"],\n )", "def create_network_postcommit(self, context):\n if self.rpc_handler is None:\n return\n network = self._get_network_info(context._network)\n for _, _network in network.items():\n network_type = _network.get('network_type', '')\n if network_type not in CentecConstant.SUPPORTED_NETWORK_TYPES and len(CentecConstant.SUPPORTED_NETWORK_TYPES) > 0:\n return\n if network is not None:\n try:\n self.rpc_handler.create_network(network)\n except:\n pass", "def get_subnet(self, subnet_id):\n LOG.debug(\"Get subnet %s\", subnet_id)\n\n if subnet_id not in self.subnets_by_id:\n return None\n\n data = self.subnets_by_id[subnet_id]\n LOG.debug(\"Subnet data: %s\", data)\n\n # Convert to form expected by NetModel.\n ip_version = 6 if ':' in data['cidr'] else 4\n subnet = {'enable_dhcp': True,\n 'ip_version': ip_version,\n 'cidr': data['cidr'],\n 'dns_nameservers': data.get('dns_servers') or [],\n 'id': subnet_id,\n 'gateway_ip': data['gateway_ip'],\n 'host_routes': data.get('host_routes', []),\n 'network_id': data.get('network_id', NETWORK_ID)}\n if ip_version == 6:\n subnet['ipv6_address_mode'] = DHCPV6_STATEFUL\n subnet['ipv6_ra_mode'] = DHCPV6_STATEFUL\n\n return dhcp.DictModel(subnet)", "def createGRSAZ(gwtable, inputsubnets, Routetargets):\n ec2 = boto3.client(\"ec2\")\n elb = boto3.client('elb')\n\n #clean the inputsubnets\n vpcid = elb.describe_load_balancers(LoadBalancerNames=[elbname])['LoadBalancerDescriptions'][0]['VPCId']\n subnetsvpc = ec2.describe_subnets(Filters=[{'Name': \"vpc-id\", 'Values': [vpcid]}])\n notrealsubnets = set(inputsubnets)-set([s['SubnetId'] for s in subnetsvpc['Subnets']])\n if len(notrealsubnets) > 0:\n print('the following are not real subnets in your VPC: ', notrealsubnets)\n cleaninputsubnets = list(set(inputsubnets) - notrealsubnets)\n\n #find all the routing tables already associated with any healthy gws and their associated subnets \n rt2 = ec2.describe_route_tables(Filters=[{'Name': 'association.subnet-id', 'Values': cleaninputsubnets}])\n #disassociate subnets from RTs if used by gateway ...later\n\n M = []\n for r in rt2['RouteTables']:\n if set(Routetargets) <= set([rr['DestinationCidrBlock'] for rr in r['Routes'] if 'InstanceId' in rr.keys() and rr['InstanceId'] in [g[0] for g in gwtable if g[1] == 'InService']]):\n for s in [ass for ass in r['Associations'] if ass['SubnetId'] in cleaninputsubnets]:\n goodinstance = [rr['InstanceId'] for rr in r['Routes'] if 'InstanceId' in rr.keys() and rr['InstanceId'] in [g[0] for g in gwtable if g[1] == 'InService']].pop()\n M.append(tuple([goodinstance,\n r['RouteTableId'],\n s['SubnetId'],\n 1]))\n\n # add route tables that have the routes but no live GWs with index 2....we'll reuse these RTs and routes\n elif set(Routetargets) <= set([rr['DestinationCidrBlock'] for rr in r['Routes']]):\n for s in r['Associations']:\n M.append(tuple(['NoGW',\n r['RouteTableId'],\n s['SubnetId'],\n 2]))\n\n #add new RTs for any subnets that are not in the table. mark the GWs as NoGW and index at 3 so that we know that we need to add new routes\n subnets1 = ec2.describe_subnets(Filters=[{'Name': \"subnet-id\", 'Values': list(set([m[2] for m in M]) | set(cleaninputsubnets))}])\n subnets2 = {s['SubnetId']: s for s in subnets1['Subnets']}\n for sub in cleaninputsubnets:\n if not (sub in [m[2] for m in M]):\n if subnets2[sub]['VpcId'] == vpcid:\n rass = []\n for rt in rt2['RouteTables']:\n for ass in rt['Associations']:\n if ass['SubnetId'] == sub:\n rass.append(ass['RouteTableAssociationId'])\n if len(rass) > 0:\n ec2.disassociate_route_table(AssociationId=rass.pop())\n print('removed RT association from subnet ', sub)\n RTforS = ec2.create_route_table(VpcId=vpcid)['RouteTable']['RouteTableId']\n ec2.associate_route_table(SubnetId=sub, RouteTableId=RTforS)\n print('created route table ', RTforS, ' and associated it with subnet ', sub)\n M.append(tuple(['NoGW', RTforS, sub, 3]))\n else:\n print('Subnet ', sub, ' is in VPC ', subnets2[sub]['VpcId'], ' which is not in the same vpc as your gateways: (', vpcid, '). Ignoring!')\n \n # Convert to a list and add AZ info into table\n MM = [list(n) for n in set(M)]\n for r in MM:\n r.insert(3, subnets2[r[2]]['AvailabilityZone'])\n\n return MM", "def delete_subnet(self, subnet):\r\n return self.delete(self.subnet_path % (subnet))", "def deletecollection_namespaced_host_subnet(self, **kwargs):\n\n all_params = ['pretty', 'label_selector', 'field_selector', 'watch', 'resource_version', 'timeout_seconds']\n all_params.append('callback')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method deletecollection_namespaced_host_subnet\" % key\n )\n params[key] = val\n del params['kwargs']\n\n\n resource_path = '/oapi/v1/hostsubnets'.replace('{format}', 'json')\n path_params = {}\n\n query_params = {}\n if 'pretty' in params:\n query_params['pretty'] = params['pretty']\n if 'label_selector' in params:\n query_params['labelSelector'] = params['label_selector']\n if 'field_selector' in params:\n query_params['fieldSelector'] = params['field_selector']\n if 'watch' in params:\n query_params['watch'] = params['watch']\n if 'resource_version' in params:\n query_params['resourceVersion'] = params['resource_version']\n if 'timeout_seconds' in params:\n query_params['timeoutSeconds'] = params['timeout_seconds']\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json', 'application/yaml'])\n if not header_params['Accept']:\n del header_params['Accept']\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type(['*/*'])\n\n # Authentication setting\n auth_settings = []\n\n response = self.api_client.call_api(resource_path, 'DELETE',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type='UnversionedStatus',\n auth_settings=auth_settings,\n callback=params.get('callback'))\n return response", "def test_list_subnets(self):\n print(self.the_client.list_subnets())", "def filter_create_network_attributes(network, context):\n try_del(network, ['status', 'subnets'])", "def test_list_host_subnet(self):\n pass" ]
[ "0.7963921", "0.7852703", "0.77121514", "0.74027985", "0.72448415", "0.7210939", "0.7181357", "0.70052475", "0.68840075", "0.6855804", "0.6780139", "0.6724793", "0.66803986", "0.6633644", "0.65841454", "0.6573212", "0.65678436", "0.6544619", "0.6485936", "0.6472504", "0.64677894", "0.6441883", "0.6441877", "0.6399792", "0.6382998", "0.6291744", "0.62745106", "0.6272387", "0.6250954", "0.6242609", "0.62367594", "0.61953217", "0.61832607", "0.61762345", "0.61748666", "0.61504585", "0.6132364", "0.60887074", "0.60560066", "0.6054926", "0.6029537", "0.5994672", "0.5963296", "0.5874713", "0.5828105", "0.5813948", "0.5811822", "0.5804178", "0.58038175", "0.57694644", "0.5752557", "0.57219154", "0.57163006", "0.5692651", "0.5682367", "0.566547", "0.56169915", "0.55979925", "0.55827636", "0.55707866", "0.5520497", "0.55022746", "0.54745346", "0.54741997", "0.5473846", "0.5471861", "0.54641974", "0.54612166", "0.5455909", "0.54364455", "0.54144037", "0.5402257", "0.53963274", "0.53860044", "0.5333037", "0.5321979", "0.53154737", "0.5305356", "0.53038836", "0.53004175", "0.5296181", "0.52853495", "0.5281601", "0.52659583", "0.5260227", "0.52577573", "0.52522457", "0.5248188", "0.52472514", "0.524518", "0.5234192", "0.52227753", "0.5222206", "0.52197695", "0.521583", "0.52134377", "0.52061796", "0.51993895", "0.51902616", "0.5180665" ]
0.68601817
9
Handler for subnet deletions.
def on_subnet_del(self, response, subnet_id): LOG.info("Subnet %s deleted", subnet_id) if subnet_id in self.subnets_by_id: del self.subnets_by_id[subnet_id] return
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def post_delete_subnet(self, sender, instance, **kwargs):\n RecurseNetworks.delete_entries(subnet=str(instance.ip_network), net_name=instance.name)", "def subnet_delete_end(self, payload):\n subnet_id = payload['subnet_id']\n network = self.cache.get_network_by_subnet_id(subnet_id)\n if network:\n self.refresh_dhcp_helper(network.id)", "def delete_subnet_postcommit(self, context):\n if self.rpc_handler is None:\n return\n try:\n self.rpc_handler.delete_subnet({str(context._subnet.get('id', '')): {}})\n except:\n pass", "def post_subnet_delete(self, resource_id, resource_dict):\n pass", "def delete_subnet_postcommit(self, mech_context):\n LOG.debug(\"delete_subnetwork_postcommit: called\")", "def pre_subnet_delete(self, resource_id):\n pass", "def test_delete_host_subnet(self):\n pass", "def test_delete_collection_host_subnet(self):\n pass", "def delete_subnet(self, subnet):\r\n return self.delete(self.subnet_path % (subnet))", "def delete_subnet_precommit(self, mech_context):\n LOG.debug(\"delete_subnetwork_precommit: called\")", "def test_delete_subnet(self):\n self.assertEqual(\n type(self.the_client.delete_subnet(subnet_id)),\n baidubce.bce_response.BceResponse)", "def run(self):\n tenant_id = self.context[\"tenant\"][\"id\"]\n users = self.context[\"tenants\"][tenant_id][\"users\"]\n number = users.index(self.context[\"user\"])\n for network in self.context[\"tenants\"][tenant_id][\"networks\"]:\n # delete one of subnets based on the user sequential number\n subnet_id = network[\"subnets\"][number]\n self.neutron.delete_subnet(subnet_id)", "def subnet_delete(auth=None, **kwargs):\n cloud = get_operator_cloud(auth)\n kwargs = _clean_kwargs(**kwargs)\n return cloud.delete_subnet(**kwargs)", "def deletecollection_namespaced_host_subnet(self, **kwargs):\n\n all_params = ['pretty', 'label_selector', 'field_selector', 'watch', 'resource_version', 'timeout_seconds']\n all_params.append('callback')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method deletecollection_namespaced_host_subnet\" % key\n )\n params[key] = val\n del params['kwargs']\n\n\n resource_path = '/oapi/v1/hostsubnets'.replace('{format}', 'json')\n path_params = {}\n\n query_params = {}\n if 'pretty' in params:\n query_params['pretty'] = params['pretty']\n if 'label_selector' in params:\n query_params['labelSelector'] = params['label_selector']\n if 'field_selector' in params:\n query_params['fieldSelector'] = params['field_selector']\n if 'watch' in params:\n query_params['watch'] = params['watch']\n if 'resource_version' in params:\n query_params['resourceVersion'] = params['resource_version']\n if 'timeout_seconds' in params:\n query_params['timeoutSeconds'] = params['timeout_seconds']\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json', 'application/yaml'])\n if not header_params['Accept']:\n del header_params['Accept']\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type(['*/*'])\n\n # Authentication setting\n auth_settings = []\n\n response = self.api_client.call_api(resource_path, 'DELETE',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type='UnversionedStatus',\n auth_settings=auth_settings,\n callback=params.get('callback'))\n return response", "def fusion_api_delete_ipv4_subnet(self, name=None, uri=None, api=None, headers=None):\n return self.ipv4subnet.delete(name, uri, api, headers)", "def network_delete_event(self, network_info):\n\n net_id = network_info['network_id']\n if net_id not in self.network:\n LOG.error(_LE('network_delete_event: net_id %s does not exist.'),\n net_id)\n return\n\n segid = self.network[net_id].get('segmentation_id')\n tenant_id = self.network[net_id].get('tenant_id')\n tenant_name = self.get_project_name(tenant_id)\n net = utils.Dict2Obj(self.network[net_id])\n if not tenant_name:\n LOG.error(_LE('Project %(tenant_id)s does not exist.'),\n {'tenant_id': tenant_id})\n self.update_network_db(net.id, constants.DELETE_FAIL)\n return\n\n try:\n self.dcnm_client.delete_network(tenant_name, net)\n # Put back the segmentation id into the pool.\n self.seg_drvr.release_segmentation_id(segid)\n\n # Remove entry from database and cache.\n self.delete_network_db(net_id)\n del self.network[net_id]\n snets = [k for k in self.subnet if (\n self.subnet[k].get('network_id') == net_id)]\n [self.subnet.pop(s) for s in snets]\n except dexc.DfaClientRequestFailed:\n LOG.error(_LE('Failed to create network %(net)s.'),\n {'net': net.name})\n self.update_network_db(net_id, constants.DELETE_FAIL)\n # deleting all related VMs\n instances = self.get_vms()\n instances_related = [k for k in instances if k.network_id == net_id]\n for vm in instances_related:\n LOG.debug(\"deleting vm %s because network is deleted\", vm.name)\n self.delete_vm_function(vm.port_id, vm)\n self.network_del_notif(tenant_id, tenant_name, net_id)", "def delete_namespaced_host_subnet(self, body, name, **kwargs):\n\n all_params = ['body', 'name', 'pretty']\n all_params.append('callback')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method delete_namespaced_host_subnet\" % key\n )\n params[key] = val\n del params['kwargs']\n\n # verify the required parameter 'body' is set\n if ('body' not in params) or (params['body'] is None):\n raise ValueError(\"Missing the required parameter `body` when calling `delete_namespaced_host_subnet`\")\n # verify the required parameter 'name' is set\n if ('name' not in params) or (params['name'] is None):\n raise ValueError(\"Missing the required parameter `name` when calling `delete_namespaced_host_subnet`\")\n\n resource_path = '/oapi/v1/hostsubnets/{name}'.replace('{format}', 'json')\n path_params = {}\n if 'name' in params:\n path_params['name'] = params['name']\n\n query_params = {}\n if 'pretty' in params:\n query_params['pretty'] = params['pretty']\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n if 'body' in params:\n body_params = params['body']\n\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json', 'application/yaml'])\n if not header_params['Accept']:\n del header_params['Accept']\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type(['*/*'])\n\n # Authentication setting\n auth_settings = []\n\n response = self.api_client.call_api(resource_path, 'DELETE',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type='UnversionedStatus',\n auth_settings=auth_settings,\n callback=params.get('callback'))\n return response", "def delete_subnet(self, subnet_id):\n\n LOG_OBJ.debug(\"Deleting subnet %s\" % subnet_id)\n\n _url = \"http://\" + self.host_ip + \":9696/v2.0/subnets/\" + \\\n subnet_id + \".json\"\n _headers = {'Content-type': 'application/json',\n 'x-auth-token': self.project_info[\"token_project\"]}\n _body = None\n\n response = self.request(\"DELETE\", _url, _headers, _body)\n if response is None:\n LOG_OBJ.error(\"No response from Server while deleting subnet:%s\" %\n subnet_id)\n return response\n if response.status not in [200, 201, 202, 203, 204]:\n LOG_OBJ.error(\"Deletion of subnet Failed with status %s \" %\n response.status)\n return response.status\n\n LOG_OBJ.info(\"Deleted the subnet : %s \" % subnet_id)\n return True", "def network_delete_end(self, payload):\n self.disable_dhcp_helper(payload['network_id'])", "def subnet_update_end(self, payload):\n network_id = payload['subnet']['network_id']\n self.refresh_dhcp_helper(network_id)", "def delete_subnet(self, name_or_id):\n subnet = self.network.find_subnet(name_or_id, ignore_missing=True)\n if not subnet:\n self.log.debug(\"Subnet %s not found for deleting\", name_or_id)\n return False\n\n self.network.delete_subnet(subnet)\n\n return True", "def delete_network_segments(self, tenant_id, network_segments):", "def do_nic_delete(cc, args):\n cc.nic.delete(args.uuid)\n print(_(\"%s deleted\" % args.uuid))", "def post_virtual_network_delete(self, resource_id, resource_dict):\n pass", "def test_host_routes_create_two_subnets_then_delete_one(self):\n gateway_ips = ['10.0.1.1', '10.0.2.1']\n cidrs = ['10.0.1.0/24', '10.0.2.0/24']\n net, subnet0, subnet1 = self._create_subnets_segments(gateway_ips,\n cidrs)\n\n sh_req = self.new_show_request('subnets', subnet1['id'])\n raw_res = sh_req.get_response(self.api)\n sub_res = self.deserialize(self.fmt, raw_res)\n self.assertEqual([{'destination': cidrs[0],\n 'nexthop': gateway_ips[1]}],\n sub_res['subnet']['host_routes'])\n\n del_req = self.new_delete_request('subnets', subnet0['id'])\n del_req.get_response(self.api)\n\n sh_req = self.new_show_request('subnets', subnet1['id'])\n raw_res = sh_req.get_response(self.api)\n sub_res = self.deserialize(self.fmt, raw_res)\n\n self.assertEqual([], sub_res['subnet']['host_routes'])", "def deserialize_guild_ban_remove_event(\n self, shard: gateway_shard.GatewayShard, payload: data_binding.JSONObject\n ) -> guild_events.BanDeleteEvent:", "def delete_network_postcommit(self, context):\n if self.rpc_handler is None:\n return\n network = self._get_network_info(context._network)\n for _, _network in network.items():\n network_type = _network.get('network_type', '')\n if network_type not in CentecConstant.SUPPORTED_NETWORK_TYPES and len(CentecConstant.SUPPORTED_NETWORK_TYPES) > 0:\n return\n if network is not None:\n try:\n self.rpc_handler.delete_network(network)\n except:\n pass", "def pre_virtual_network_delete(self, resource_id):\n pass", "def iptables_delete(nid, rid):\n if nid != -1 and (hl.getNode(\"ID\",nid) and hl.getNode(\"ID\",nid)[\"Address\"] != \"self\"):\n url = hl.getNode(\"ID\", nid)[\"Address\"] \n hl.nodePost(url+\"/deleterule/\",{\"ID\" : rid}) \n else:\n hl.removeIPRule(rid)\n \n return redirect(url_for('confirm', confirmed = \"IP Table Rule Deleted!\"))", "def filter_update_subnet_attributes(subnet, context):\n try_del(subnet, ['id', 'network_id', 'ip_version', 'cidr',\n 'allocation_pools', 'tenant_id'])", "def delete(self, *args, **kwargs):\n\n if args:\n self.service.remove(EtherAddress(args[0]))\n else:\n self.service.remove_all()", "def remove_segment(**kwargs):\n sessiontoken = kwargs['sessiontoken']\n proxy = kwargs['proxy']\n segment_name = kwargs[\"objectname\"]\n segment=search_nsx_json(proxy, sessiontoken, \"Segment\", segment_name)\n if len(segment['results']) > 0:\n segment_path = segment['results'][0]['path']\n status = remove_segment_json(proxy, sessiontoken, segment_path)\n if status == 200:\n print(f'The following network has been removed: {segment_name}')\n else:\n print(\"The segment was not removed. Please check your syntax and try again.\")\n sys.exit(1)\n else:\n print(\"The segment does not exist.\")", "def delete(oid):\n # Does the subnetMode to delete exist?\n existing_subnetMode = SubnetMode.query.filter(SubnetMode.id == oid).one_or_none()\n\n # if found?\n if existing_subnetMode is not None:\n db.session.delete(existing_subnetMode)\n db.session.commit()\n\n return make_response(f\"SubnetMode {oid} successfully deleted\", 200)\n\n # Otherwise, nope, subnetMode to delete not found\n else:\n abort(404, f\"SubnetMode {oid} not found\")", "def update_subnet_postcommit(self, context):\n if self.rpc_handler is None:\n return\n subnet = self._get_subnet_info(context._subnet)\n if subnet is not None:\n try:\n self.rpc_handler.update_subnet(subnet)\n except:\n pass", "def on_account_removal(self, address: ChecksumEthAddress) -> None:\n ...", "def dhcp_agent_network_remove(self, dhcp_net_info):\n self.turn_on_dhcp_check()", "def _do_remove_block(self, args):\r\n bus_type = args[1]\r\n slave_id = int(args[2])\r\n name = args[3]\r\n if bus_type == 'rtu':\r\n slave = self.server._servers[0].get_slave(slave_id)\r\n elif bus_type == 'tcp':\r\n slave = self.server._servers[1].get_slave(slave_id)\r\n slave.remove_block(name)", "def delete_network_postcommit(self, mech_context):\n\n LOG.debug(\"delete_network_postcommit: called\")\n network = mech_context.current\n network_id = network['id']\n vlan_id = network['provider:segmentation_id']\n tenant_id = network['tenant_id']\n\n for switch_ip in self._switch:\n try:\n system = self.client[switch_ip].system.list()\n system[0].remove_segment(vlan_id)\n except seamicro_client_exception.ClientException as ex:\n LOG.exception(_LE(\"SeaMicr driver: failed to delete network\"\n \" with the following error: %(error)s\"),\n {'error': ex.message})\n raise Exception(\n _(\"Seamicro switch exception, delete_network_postcommit\"\n \" failed\"))\n\n LOG.info(_LI(\"delete network (postcommit): %(network_id)s\"\n \" with vlan = %(vlan_id)s\"\n \" for tenant %(tenant_id)s on switch %(switch_ip)s\"),\n {'network_id': network_id,\n 'vlan_id': vlan_id,\n 'tenant_id': tenant_id,\n 'switch_ip': switch_ip})", "def test_dhcp_bind_uninstall(self):\n self._common_uninstall_delete(\n 'esg_id|bind_id', dhcp_bind.delete,\n {'bind': {}},\n delete_args=['dhcpStaticBindingID'],\n delete_kwargs={\n 'uri_parameters': {'edgeId': 'esg_id', 'bindingID': 'bind_id'}\n }\n )", "def delete(self, uuid):\n result = mongoUtils.get(\"func\", uuid)\n if result:\n if len(result[\"tenants\"]) > 0:\n return f\"Error: Function is used by slices {result['tenants']}\"\n mongoUtils.delete(\"func\", uuid)\n # Update the location removing the Function\n location = mongoUtils.find(\"location\", {\"id\": result[\"location\"].lower()})\n if location:\n location[\"functions\"].remove(result[\"id\"])\n mongoUtils.update(\"location\", location[\"_id\"], location)\n return f\"Deleted Network Function {uuid}\", 200\n else:\n # if uuid is not found, return error\n return f\"Error: No such Network Function: {uuid}\", 404", "def remove_ipv4_address(self, net_interface, address):\n self._runner.run('ip addr del %s dev %s' % (address, net_interface))", "def post_floating_ip_delete(self, resource_id, resource_dict):\n pass", "def delete(self, id):\n context = request.environ.get('context')\n dbapi.net_interfaces_delete(context, id)\n return None, 204, None", "def delete(self, request, domain_id):\n domain = get_object_or_404(models.IPBlocklist, id=domain_id)\n domain.delete()\n return redirect(\"settings-ip-blocks\")", "def unplug_vifs(self, instance, network_info):\n raise NotImplementedError()", "def test_bgp_neighbour_uninstall(self):\n self._common_uninstall_read_update(\n 'esg_id|ip|remoteAS|protocolIp|forwardingIp',\n dlr_bgp_neighbour.delete,\n {},\n read_args=['routingBGP'],\n read_kwargs={'uri_parameters': {'edgeId': 'esg_id'}},\n read_response={\n 'body': test_nsx_base.DLR_BGP_NEIGHBOUR_WITH_FILTER_AFTER,\n 'status': 204\n },\n update_args=['routingBGP'],\n update_kwargs={\n 'request_body_dict': test_nsx_base.DLR_BGP_NEIGHBOUR_BEFORE,\n 'uri_parameters': {'edgeId': 'esg_id'}\n }\n )", "def post_instance_ip_delete(self, resource_id, resource_dict):\n pass", "def delete(env, identifier, listener):\n\n mgr = SoftLayer.LoadBalancerManager(env.client)\n uuid, _ = mgr.get_lbaas_uuid_id(identifier)\n try:\n mgr.remove_lb_listener(uuid, listener)\n click.secho(\"Success\", fg='green')\n except SoftLayerAPIError as exception:\n click.secho(f\"ERROR: {exception.faultString}\", fg='red')", "def unlink(address):", "def dcnm_network_delete_event(self, network_info):\n seg_id = network_info.get('segmentation_id')\n if not seg_id:\n LOG.error(_LE('Failed to delete network. Invalid network '\n 'info %s.'), network_info)\n query_net = self.get_network_by_segid(seg_id)\n if not query_net:\n LOG.info(_LI('dcnm_network_delete_event: network %(segid)s '\n 'does not exist.'), {'segid': seg_id})\n return\n if self.fw_api.is_network_source_fw(query_net, query_net.name):\n LOG.info(_LI(\"Service network %s, returning\"), query_net.name)\n return\n # Send network delete request to neutron\n try:\n del_net = self.network.pop(query_net.network_id)\n self.neutronclient.delete_network(query_net.network_id)\n self.delete_network_db(query_net.network_id)\n except Exception as exc:\n # Failed to delete network.\n # Put back the entry to the local cache???\n self.network[query_net.network_id] = del_net\n LOG.exception(_LE('dcnm_network_delete_event: Failed to delete '\n '%(network)s. Reason %(err)s.'),\n {'network': query_net.name, 'err': str(exc)})", "def post_loadbalancer_member_delete(self, resource_id, resource_dict):\n pass", "def delete_network_postcommit(self, context):\n for _switch in self.switches:\n self._remove_from_switch(_switch, context)", "def test_delete_network(self):\n pass", "def delete(self, **kwargs):\n if not any([i in kwargs for i in ('host', 'address', 'addresses')]):\n raise TypeError('Expected host, address, or addresses.')\n self.dbdel('vuln', kwargs)", "def cleanup(self, tenant, inside_vlan_arg, outside_vlan_arg,\n inside_ip, inside_mask,\n outside_ip, outside_mask,\n interface_in, interface_out):\n LOG.debug(\"asa_cleanup: %s %d %d %s %s %s %s\",\n tenant, inside_vlan_arg, outside_vlan_arg,\n inside_ip, inside_mask, outside_ip, outside_mask)\n inside_vlan = str(inside_vlan_arg)\n outside_vlan = str(outside_vlan_arg)\n context = tenant\n cmds = [\"conf t\", \"changeto system\"]\n cmds.append(\"no context \" + context + \" noconfirm\")\n inside_int = interface_in + '.' + inside_vlan\n outside_int = interface_out + '.' + outside_vlan\n cmds.append(\"no interface \" + inside_int)\n cmds.append(\"no interface \" + outside_int)\n cmds.append(\"write memory\")\n cmds.append(\"del /noconfirm disk0:/\" + context + \".cfg\")\n\n if tenant in self.tenant_rule:\n for rule in self.tenant_rule[tenant].get('rule_lst'):\n del self.rule_tbl[rule]\n del self.tenant_rule[tenant]\n data = {\"commands\": cmds}\n return self.rest_send_cli(data)", "def process_IN_DELETE(self, event):", "def delete_net(self, net_id):\n LOG_OBJ.debug(\"Deleting network %s\" % net_id)\n _url = \"http://\" + self.host_ip + \":9696/v2.0/networks/\" + \\\n net_id + \".json\"\n _headers = {'Content-type': 'application/json',\n 'x-auth-token': self.project_info[\"token_project\"]}\n _body = None\n\n response = self.request(\"DELETE\", _url, _headers, _body)\n if response is None:\n LOG_OBJ.error(\"No response from Server while deleting net:%s\" %\n net_id)\n return response\n\n if response.status not in [200, 201, 202, 203, 204]:\n LOG_OBJ.error(\"Deletion of Network Failed with status %s \" %\n response.status)\n return response.status\n\n LOG_OBJ.info(\"Deleted the network : %s \" % net_id)\n return True", "def delete_internal_interface(self, oid, subnet):\n data = {\"subnet_id\": subnet}\n path = '%s/routers/%s/remove_router_interface' % (self.ver, oid)\n res = self.client.call(path, 'PUT', data=json.dumps(data),\n token=self.manager.identity.token)\n self.logger.debug('Delete an internal interface from openstack router %s: %s' % \n (oid, truncate(res)))\n return res[0]", "def pre_floating_ip_delete(self, resource_id):\n pass", "def test_networking_project_network_tag_delete(self):\n pass", "def _internal_network_removed(self, ri, port, ex_gw_port):\n itfc_deleted = False\n driver = self.driver_manager.get_driver(ri.id)\n vrf_name = driver._get_vrf_name(ri)\n network_name = ex_gw_port['hosting_info'].get('network_name')\n if self._router_ids_by_vrf_and_ext_net.get(\n vrf_name, {}).get(network_name) and (\n ri.router['id'] in\n self._router_ids_by_vrf_and_ext_net[vrf_name][network_name]):\n # If this is the last port for this neutron router,\n # then remove this router from the list\n if len(ri.internal_ports) == 1 and port in ri.internal_ports:\n self._router_ids_by_vrf_and_ext_net[\n vrf_name][network_name].remove(ri.router['id'])\n\n # Check if any other routers in this VRF have this network,\n # and if not, set the flag to remove the interface\n if not self._router_ids_by_vrf_and_ext_net[vrf_name].get(\n network_name):\n LOG.debug(\"++ REMOVING NETWORK %s\" % network_name)\n itfc_deleted = True\n del self._router_ids_by_vrf_and_ext_net[\n vrf_name][network_name]\n if not self._router_ids_by_vrf_and_ext_net.get(vrf_name):\n del self._router_ids_by_vrf_and_ext_net[vrf_name]\n\n driver.internal_network_removed(ri, port,\n itfc_deleted=itfc_deleted)\n if ri.snat_enabled and ex_gw_port:\n driver.disable_internal_network_NAT(ri, port, ex_gw_port,\n itfc_deleted=itfc_deleted)", "def del_returned_route_on_gw(self, context, router_id, subnet_id):\n LOG.debug('OVNL3RouterPlugin::')\n ovn_router_name = utils.ovn_gateway_name(router_id)\n subnet = self._plugin.get_subnet(context, subnet_id)\n route = {'destination': subnet['cidr'], 'nexthop': '169.254.128.2'}\n with self._ovn.transaction(check_error=True) as txn:\n txn.add(self._ovn.delete_static_route(ovn_router_name,\n ip_prefix=route['destination'],\n nexthop=route['nexthop']))", "def del_reservation(self, src, dst):\n\n # PART 1, TASK 4.1 remove the reservation from the switch, controller and update links capacities.", "def handle_remove(self):\r\n self.del_common()", "def post_virtual_ip_delete(self, resource_id, resource_dict):\n pass", "def test_list_host_subnet(self):\n pass", "def del_record(self, args):\n\n mac = MacAddress(args.mac)\n desc = self.dhcp_client_state[mac.as_redis_key()]\n print(\"Deleted mac %s with DHCP rec %s\" % (str(mac), desc))\n self.dhcp_client_state[mac.as_redis_key()] = None", "def delete(self, oid):\n path = '%s/networks/%s' % (self.ver, oid)\n res = self.client.call(path, 'DELETE', data='', \n token=self.manager.identity.token)\n self.logger.debug('Delete openstack network: %s' % truncate(res))\n return res[0]", "def post_subnet_create(self, resource_dict):\n pass", "def delete_loadbalancer(self, context, lb):\n deployment_model = self._get_setting(\n lb.tenant_id, \"lbaas_settings\", \"deployment_model\"\n )\n hostnames = self._get_hostname(lb)\n if deployment_model in [\"PER_TENANT\", \"PER_SUBNET\"]:\n vapv = self._get_vapv(hostnames)\n if not vapv.tip_group.list():\n self._destroy_vapv(hostnames, lb)\n elif deployment_model == \"PER_TENANT\":\n # Delete subnet ports if no longer required\n if self.openstack_connector.subnet_in_use(lb) is False:\n self._detach_subnet_port(vapv, hostnames, lb)\n for hostname in hostnames:\n port_ids = self.openstack_connector.get_server_port_ids(\n hostname\n )\n self.openstack_connector.delete_ip_from_ports(\n lb.vip_address, port_ids\n )\n elif deployment_model == \"PER_LOADBALANCER\":\n self._destroy_vapv(hostnames, lb)", "def list_namespaced_host_subnet(self, **kwargs):\n\n all_params = ['pretty', 'label_selector', 'field_selector', 'watch', 'resource_version', 'timeout_seconds']\n all_params.append('callback')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method list_namespaced_host_subnet\" % key\n )\n params[key] = val\n del params['kwargs']\n\n\n resource_path = '/oapi/v1/hostsubnets'.replace('{format}', 'json')\n path_params = {}\n\n query_params = {}\n if 'pretty' in params:\n query_params['pretty'] = params['pretty']\n if 'label_selector' in params:\n query_params['labelSelector'] = params['label_selector']\n if 'field_selector' in params:\n query_params['fieldSelector'] = params['field_selector']\n if 'watch' in params:\n query_params['watch'] = params['watch']\n if 'resource_version' in params:\n query_params['resourceVersion'] = params['resource_version']\n if 'timeout_seconds' in params:\n query_params['timeoutSeconds'] = params['timeout_seconds']\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json', 'application/yaml'])\n if not header_params['Accept']:\n del header_params['Accept']\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type(['*/*'])\n\n # Authentication setting\n auth_settings = []\n\n response = self.api_client.call_api(resource_path, 'GET',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type='V1HostSubnetList',\n auth_settings=auth_settings,\n callback=params.get('callback'))\n return response", "def pre_instance_ip_delete(self, resource_id):\n pass", "def update_subnet_postcommit(self, mech_context):\n LOG.debug(\"update_subnet_postcommit: called\")", "def delete_handler(event, context):\n delete_endpoint_config(event)", "def post_subnet_read(self, resource_id, resource_dict):\n pass", "def delete_this_region(self):", "def post_subnet_update(self, resource_id, resource_dict):\n pass", "def delete_network_bulk(self, tenant_id, network_id_list, sync=False):", "def _do_remove_all_blocks(self, args):\r\n bus_type = args[1]\r\n slave_id = int(args[2])\r\n if bus_type == 'rtu':\r\n slave = self.server._servers[0].get_slave(slave_id)\r\n elif bus_type == 'tcp':\r\n slave = self.server._servers[1].get_slave(slave_id)\r\n slave.remove_all_blocks()", "def test_replace_host_subnet(self):\n pass", "def test_networking_project_network_delete(self):\n pass", "def post_floating_ip_pool_delete(self, resource_id, resource_dict):\n pass", "def pre_virtual_ip_delete(self, resource_id):\n pass", "def delete(self):\r\n return self.connection.delete_load_balancer(self.name)", "def del_net_res(self, obj, net_id):\n self._graph_executor.del_net_res(obj, net_id)", "def instance_deleted(sender, instance, **kwargs):\n try:\n if instance.is_registered and not instance.deregister(save=False):\n logger.warn(\"Could not unregister {0} on delete.\".format(\n sender\n ))\n except SNSException:\n # Avoid that invalid arn token cause error when deleting instance\n pass", "def unplug_vifs(self, instance, network_info):\n try:\n for viface in network_info:\n self.vif_driver.unplug(instance, viface)\n self.stop_firewall(instance, network_info)\n except Exception as ex:\n with excutils.save_and_reraise_exception():\n LOG.error(_LE('Failed to remove container network'\n ' for %(instance)s: %(ex)s'),\n {'instance': instance.name, 'ex': ex},\n instance=instance)", "def _do_remove_slave(self, args):\r\n bus_type = args[1]\r\n slave_id = int(args[2])\r\n if bus_type == 'rtu':\r\n self.server._servers[0].remove_slave(slave_id)\r\n elif bus_type == 'tcp':\r\n self.server._servers[1].remove_slave(slave_id)\r\n return \"\"", "def _Delete(self):\n cmd = self.cmd_prefix + [\n 'redshift', 'delete-cluster-subnet-group',\n '--cluster-subnet-group-name', self.name\n ]\n vm_util.IssueCommand(cmd, raise_on_failure=False)", "def _cleanup_deleted_nics(self):\n try:\n nics = self.network.network_interfaces.list(\n CONF.azure.resource_group)\n except Exception as e:\n msg = six.text_type(e)\n LOG.exception(msg)\n return\n residual_ids = [i.name for i in nics if not i.virtual_machine]\n to_delete_ids = set(self.residual_nics) & set(residual_ids)\n self.residual_nics = list(set(self.residual_nics) | set(residual_ids))\n if not to_delete_ids:\n LOG.info(_LI('No residual nic in Azure'))\n return\n for i in to_delete_ids:\n try:\n self.network.network_interfaces.delete(\n CONF.azure.resource_group, i\n )\n except Exception as e:\n LOG.warning(_LW(\"Unable to delete network_interfaces \"\n \"%(nic)s in Azure because %(reason)s\"),\n dict(nic=i,\n reason=six.text_type(e)))\n else:\n self.residual_nics.remove(i)\n LOG.info(_LI('Delete residual Nic: %s in Azure'), i)\n else:\n LOG.info(_LI('Delete all residual Nics in Azure'))", "def delete(self): \n params = {'command':'deleteNetwork',\n 'id':self.id}\n \n self.logger.debug('Remove network %s' % self.name)\n \n try:\n response = self.send_request(params)\n res = json.loads(response)\n clsk_job_id = res['deletenetworkresponse']['jobid']\n self.logger.debug('Start job over %s.%s - %s: %s' % (\n self._obj_type, self.name, \n 'deleteNetwork', res))\n return clsk_job_id\n except KeyError as ex :\n self.logger.error('Error parsing json data: %s' % ex)\n raise ClskError('Error parsing json data: %s' % ex)\n except ApiError as ex:\n self.logger.error(ex)\n raise ClskError(ex)", "def run(self, network_create_args=None, subnet_create_args=None,\n subnet_cidr_start=None, subnets_per_network=1):\n network = self._get_or_create_network(**(network_create_args or {}))\n subnets = []\n for _ in range(subnets_per_network):\n subnets.append(\n self.neutron.create_subnet(\n network[\"id\"], start_cidr=subnet_cidr_start,\n **(subnet_create_args or {}))\n )\n for subnet in subnets:\n self.neutron.delete_subnet(subnet[\"id\"])", "def test_ipam_ip_addresses_delete(self):\n pass", "def _unplug_interface(self, context, tenant_id, net_id, port_id):\n LOG.debug(_(\"QuantumRestProxyV2: _unplug_interface() called\"))\n\n # delete from network ctrl. Remote error on delete is ignored\n try:\n resource = ATTACHMENT_PATH % (tenant_id, net_id, port_id)\n ret = self.servers.delete(resource)\n if not self.servers.action_success(ret):\n raise RemoteRestError(ret[2])\n except RemoteRestError as e:\n LOG.error(_(\"QuantumRestProxyV2: Unable to update remote port: \"\n \"%s\"), e.message)", "def on_endpoint_delete(self, response_ignored, name):\n try:\n hostname, orchestrator, workload_id, endpoint_id = \\\n split_endpoint_name(name)\n except ValueError:\n # For some reason this endpoint's name does not have the expected\n # form. Ignore it.\n LOG.warning(\"Unexpected form for endpoint name: %s\", name)\n return\n\n # Remove endpoint ID from our cache. Note, it might not be\n # there because we haven't checked whether the endpoint just\n # deleted is a local one; hence 'discard' instead of 'remove'.\n self.local_endpoint_ids.discard(endpoint_id)\n\n # Find the corresponding port in the DHCP agent's cache.\n port = self.agent.cache.get_port_by_id(endpoint_id)\n if port:\n LOG.debug(\"deleted port: %s\", port)\n self.mtu_watcher.unwatch_port(endpoint_id, port.device_id)\n self.agent.cache.remove_port(port)\n self._update_dnsmasq(port.network_id)", "def remove_ban(self, vapor_id_or_ip):\n identity = vapor_id_or_ip if len(vapor_id_or_ip) == 36 else vapor_id_or_ip.split(\":\")[0] \\\n if ':' in vapor_id_or_ip else vapor_id_or_ip\n cmd = '{}removeBan {}'.format(self.console, identity)\n self.write_command(cmd)", "def delete(self):\n \n logging.info(\"Deleting network %s\" % self.cloudnet)\n # res = cn.delete(self.cloudnet)\n res = self.cloudnet.delete()\n return res", "def delete_callback(self):\n pass", "def pre_network_ipam_delete(self, resource_id):\n pass", "def vpp_lb_add_del_vip(node, **kwargs):\n if node[u\"type\"] == NodeType.DUT:\n vip_addr = kwargs.pop(u\"vip_addr\", \"0.0.0.0\")\n protocol = kwargs.pop(u\"protocol\", 255)\n port = kwargs.pop(u\"port\", 0)\n encap = kwargs.pop(u\"encap\", 0)\n dscp = kwargs.pop(u\"dscp\", 0)\n srv_type = kwargs.pop(u\"srv_type\", 0)\n target_port = kwargs.pop(u\"target_port\", 0)\n node_port = kwargs.pop(u\"node_port\", 0)\n new_len = kwargs.pop(u\"new_len\", 1024)\n src_ip_sticky = kwargs.pop(u\"src_ip_sticky\", 0)\n is_del = kwargs.pop(u\"is_del\", 0)\n\n cmd = u\"lb_add_del_vip_v2\"\n err_msg = f\"Failed to add vip on host {node[u'host']}\"\n\n vip_addr = ip_address(vip_addr).packed\n args = dict(\n pfx={\n u\"len\": 128,\n u\"address\": {u\"un\": {u\"ip4\": vip_addr}, u\"af\": 0}\n },\n protocol=protocol,\n port=port,\n encap=htonl(encap),\n dscp=dscp,\n type=srv_type,\n target_port=target_port,\n node_port=node_port,\n new_flows_table_length=int(new_len),\n src_ip_sticky=src_ip_sticky,\n is_del=is_del,\n )\n\n with PapiSocketExecutor(node) as papi_exec:\n papi_exec.add(cmd, **args).get_reply(err_msg)\n else:\n raise ValueError(\n f\"Node {node[u'host']} has unknown NodeType: '{node[u'type']}'\"\n )" ]
[ "0.7702621", "0.75508094", "0.7533499", "0.7260703", "0.72326857", "0.70582956", "0.6918789", "0.6917834", "0.6906911", "0.66798496", "0.6626956", "0.6558707", "0.6398431", "0.63931185", "0.6186472", "0.60645396", "0.6064328", "0.5995911", "0.59661895", "0.59549713", "0.59370154", "0.5823155", "0.5600699", "0.557985", "0.55455333", "0.55418813", "0.5538818", "0.54998285", "0.54825056", "0.5480995", "0.54338396", "0.5426016", "0.5392319", "0.53802437", "0.53722554", "0.53495497", "0.5329188", "0.5324472", "0.5315158", "0.53096646", "0.5302259", "0.52929074", "0.52888346", "0.5288779", "0.52823406", "0.52785814", "0.5270349", "0.5269833", "0.5262394", "0.5261558", "0.5251524", "0.52509093", "0.5243965", "0.52421635", "0.5228505", "0.5207153", "0.51999146", "0.51785105", "0.51685125", "0.5162792", "0.51604766", "0.5157525", "0.515749", "0.5154061", "0.51518625", "0.51454586", "0.5134521", "0.51307565", "0.5126678", "0.5123411", "0.5109173", "0.5107385", "0.51064384", "0.5106167", "0.5103437", "0.50969905", "0.5090673", "0.5079021", "0.50764465", "0.506225", "0.50562173", "0.50385416", "0.5037609", "0.5034412", "0.50315374", "0.5031091", "0.5030293", "0.50294787", "0.50272816", "0.50217515", "0.5010158", "0.5001282", "0.49994195", "0.49968618", "0.49838898", "0.49718818", "0.4964668", "0.49636495", "0.49588266", "0.4957312" ]
0.73326564
3
Get data for the specified subnet.
def get_subnet(self, subnet_id): LOG.debug("Get subnet %s", subnet_id) if subnet_id not in self.subnets_by_id: return None data = self.subnets_by_id[subnet_id] LOG.debug("Subnet data: %s", data) # Convert to form expected by NetModel. ip_version = 6 if ':' in data['cidr'] else 4 subnet = {'enable_dhcp': True, 'ip_version': ip_version, 'cidr': data['cidr'], 'dns_nameservers': data.get('dns_servers') or [], 'id': subnet_id, 'gateway_ip': data['gateway_ip'], 'host_routes': data.get('host_routes', []), 'network_id': data.get('network_id', NETWORK_ID)} if ip_version == 6: subnet['ipv6_address_mode'] = DHCPV6_STATEFUL subnet['ipv6_ra_mode'] = DHCPV6_STATEFUL return dhcp.DictModel(subnet)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def show_subnet(self, subnet, **_params):\r\n return self.get(self.subnet_path % (subnet), params=_params)", "def get_subnet_details(self, subnet_name=\"dummy_subnet\", subnet_id=None):\n _url = \"http://\" + self.host_ip + \":9696/v2.0/subnets\"\n _headers = {'x-auth-token': self.project_info[\"token_project\"]}\n _body = None\n\n result = self.request(\"GET\", _url, _headers, _body)\n if result is None:\n LOG_OBJ.error(\"No response from Server while getting subnets\")\n return result\n if result.status not in [200, 201, 202, 203, 204]:\n LOG_OBJ.error(\"Get subnet details Failed with status %s \" %\n result.status)\n return result.status\n\n output = json.loads(result.data)\n\n for subnets in output['subnets']:\n if (subnet_id is not None and (subnets['id'] == subnet_id)) or\\\n subnets['name'].lower() == subnet_name.lower():\n LOG_OBJ.debug(\"Subnet Details: %s\" % subnets)\n return subnets\n\n LOG_OBJ.error(\"Subnet with name:%s or with id:%s is Not Found\" %\n (subnet_name, subnet_id))", "def get_subnet(self, subnet_id, **kwargs):\r\n if 'mask' not in kwargs:\r\n kwargs['mask'] = DEFAULT_SUBNET_MASK\r\n\r\n return self.subnet.getObject(id=subnet_id, **kwargs)", "def subnet(self) -> pulumi.Output[Optional['outputs.ResourceIdResponse']]:\n return pulumi.get(self, \"subnet\")", "def list_subnet(self):\n _url = \"http://\" + self.host_ip + \":9696/v2.0/subnets\"\n _headers = {'Content-type': 'application/json',\n 'x-auth-token': self.project_info[\"token_project\"]}\n _body = None\n\n response = self.request(\"GET\", _url, _headers, _body)\n if response is None:\n LOG_OBJ.error(\"No response from Server while listing subnet.\")\n return response\n\n if response.status not in [200, 201, 202, 203, 204]:\n LOG_OBJ.error(\"Get subnet list Failed with status %s \" %\n response.status)\n return response.status\n output = json.loads(response.data)\n\n LOG_OBJ.info(\"subnet List : %s \" % output)\n return output[\"subnets\"]", "def subnet(self) -> Optional['outputs.ApiEntityReferenceResponse']:\n return pulumi.get(self, \"subnet\")", "def test_get_subnet(self):\n self.assertEqual(\n type(self.the_client.get_subnet(subnet_id)),\n baidubce.bce_response.BceResponse)", "def subnet(self) -> Optional[pulumi.Input['ResourceIdArgs']]:\n return pulumi.get(self, \"subnet\")", "def get_subnet_by_id(self, id):\n return self.network.get_subnet(id)", "def _get_subnet_info(self, context):\n\n subnet = {}\n data = {}\n subnet_id = str(context.get('id', ''))\n data['subnet_id'] = subnet_id\n data['subnet_name'] = str(context.get('name', ''))\n data['tenant_id'] = str(context.get('tenant_id', ''))\n data['network_id'] = str(context.get('network_id', ''))\n data['ip_version'] = str(context.get('ip_version', ''))\n data['gateway_ip'] = str(context.get('gateway_ip', ''))\n ip_mask = str(context.get('cidr', ''))\n data['enable_dhcp'] = context.get('enable_dhcp', '')\n data['shared'] = context.get('shared', '')\n if subnet_id == '':\n LOG.error(_('Get creating subnet information failed'))\n return None\n data['network'], data['network_mask'] = ip_mask.split('/')\n\n context_str = json.dumps(data, sort_keys=True)\n data['md5sum'] = hashlib.md5(context_str).hexdigest()\n\n data['field_not_in_md5'] = ['md5sum']\n\n if subnet_id != '':\n subnet[subnet_id] = data\n return subnet", "def getDataAt(self, address: ghidra.program.model.address.Address) -> ghidra.program.model.listing.Data:\n ...", "def getDataAtAddress(program: ghidra.program.model.listing.Program, address: ghidra.program.model.address.Address) -> ghidra.program.model.listing.Data:\n ...", "def test_read_host_subnet(self):\n pass", "def subnet_get(auth=None, **kwargs):\n cloud = get_operator_cloud(auth)\n kwargs = _clean_kwargs(**kwargs)\n return cloud.get_subnet(**kwargs)", "def getDataContaining(self, address: ghidra.program.model.address.Address) -> ghidra.program.model.listing.Data:\n ...", "def read_namespaced_host_subnet(self, name, **kwargs):\n\n all_params = ['name', 'pretty', 'export', 'exact']\n all_params.append('callback')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method read_namespaced_host_subnet\" % key\n )\n params[key] = val\n del params['kwargs']\n\n # verify the required parameter 'name' is set\n if ('name' not in params) or (params['name'] is None):\n raise ValueError(\"Missing the required parameter `name` when calling `read_namespaced_host_subnet`\")\n\n resource_path = '/oapi/v1/hostsubnets/{name}'.replace('{format}', 'json')\n path_params = {}\n if 'name' in params:\n path_params['name'] = params['name']\n\n query_params = {}\n if 'pretty' in params:\n query_params['pretty'] = params['pretty']\n if 'export' in params:\n query_params['export'] = params['export']\n if 'exact' in params:\n query_params['exact'] = params['exact']\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json', 'application/yaml'])\n if not header_params['Accept']:\n del header_params['Accept']\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type(['*/*'])\n\n # Authentication setting\n auth_settings = []\n\n response = self.api_client.call_api(resource_path, 'GET',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type='V1HostSubnet',\n auth_settings=auth_settings,\n callback=params.get('callback'))\n return response", "def create_subnet(self, network_name, subnet_name, cidr):\n _net_id = self.get_net_id(network_name)\n if not isinstance(_net_id, unicode):\n return\n\n _url = \"http://\" + self.host_ip + \":9696/v2.0/subnets.json\"\n _headers = {'Content-type': 'application/json',\n 'x-auth-token': self.project_info[\"token_project\"]}\n _subnet_info = {\"subnet\":\n {\"ip_version\": 4,\n \"network_id\": _net_id,\n \"cidr\": cidr,\n \"name\": subnet_name}}\n\n _body = json.dumps(_subnet_info)\n\n LOG_OBJ.debug(\"Creating subnet in network %s of tenant %s.\"\n % (_net_id, self.project_info[\"project_id\"]))\n\n response = self.request(\"POST\", _url, _headers, _body)\n if response is None:\n LOG_OBJ.error(\"No response from Server while creating subnet\")\n return response\n if response.status not in [200, 201, 202, 203, 204]:\n LOG_OBJ.error(\"Creation of subnet Failed with status %s \" %\n response.status)\n return response.status\n\n output = json.loads(response.data)\n LOG_OBJ.info(\"Subnet details : %s \" % output['subnet'])\n return output['subnet']['id']", "def get_subnets(self):\n url = '%s/v2.0/subnets' % self.catalog['network']\n res = self.get(url)\n if res['status'] == 200:\n return json.loads(res['body'])['subnets']\n else:\n LOG.error('Get subnets failed: %s %s %s' %\n (res['status'], res['reason'], res['body']))\n raise InvalidResponse(res)", "def test_aws_service_api_network_subnets_get(self):\n pass", "def get(self, oid=None, name=None):\n if oid is not None:\n path = '%s/subnets/%s' % (self.ver, oid)\n elif name is not None:\n path = '%s/subnets?display_name=%s' % (self.ver, name)\n else:\n raise OpenstackError('Specify at least subnet id or name')\n res = self.client.call(path, 'GET', data='', token=self.manager.identity.token)\n self.logger.debug('Get openstack subnets: %s' % truncate(res))\n if oid is not None:\n server = res[0]['subnet']\n elif name is not None:\n server = res[0]['subnets'][0]\n \n return server", "def delete_subnet(self, subnet):\r\n return self.delete(self.subnet_path % (subnet))", "def service_subnet(self) -> str:\n return pulumi.get(self, \"service_subnet\")", "def subnet_id(self):\n return self._subnet_id", "def subnet_id(self) -> str:\n return pulumi.get(self, \"subnet_id\")", "def post_subnet_read(self, resource_id, resource_dict):\n pass", "def get_subnet_output(expand: Optional[pulumi.Input[Optional[str]]] = None,\n resource_group_name: Optional[pulumi.Input[str]] = None,\n subnet_name: Optional[pulumi.Input[str]] = None,\n virtual_network_name: Optional[pulumi.Input[str]] = None,\n opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetSubnetResult]:\n ...", "def fusion_api_get_ipv4_subnet(self, uri=None, param='', api=None, headers=None):\n return self.ipv4subnet.get(uri=uri, api=api, headers=headers, param=param)", "async def i2c_read_data(self, address):\n if address in self.i2c_map:\n map_entry = self.i2c_map.get(address)\n data = map_entry.get('value')\n return data\n else:\n return None", "async def i2c_read_data(self, address):\n if address in self.i2c_map:\n map_entry = self.i2c_map.get(address)\n data = map_entry.get('value')\n return data\n else:\n return None", "def get_subnet(self, name_or_id, filters=None):\n if not filters:\n filters = {}\n return self.network.find_subnet(\n name_or_id=name_or_id, ignore_missing=True, **filters\n )", "def fusion_api_collect_ipv4_subnet(self, body, uri, api=None, headers=None):\n return self.ipv4subnet.collect(body, uri, api, headers)", "def subnet_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"subnet_id\")", "def subnet_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"subnet_id\")", "def subnet_id(self) -> Optional[str]:\n return pulumi.get(self, \"subnet_id\")", "def get_subnet(availability_zone: Optional[str] = None,\n availability_zone_id: Optional[str] = None,\n cidr_block: Optional[str] = None,\n default_for_az: Optional[bool] = None,\n filters: Optional[Sequence[pulumi.InputType['GetSubnetFilterArgs']]] = None,\n id: Optional[str] = None,\n ipv6_cidr_block: Optional[str] = None,\n state: Optional[str] = None,\n tags: Optional[Mapping[str, str]] = None,\n vpc_id: Optional[str] = None,\n opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetSubnetResult:\n __args__ = dict()\n __args__['availabilityZone'] = availability_zone\n __args__['availabilityZoneId'] = availability_zone_id\n __args__['cidrBlock'] = cidr_block\n __args__['defaultForAz'] = default_for_az\n __args__['filters'] = filters\n __args__['id'] = id\n __args__['ipv6CidrBlock'] = ipv6_cidr_block\n __args__['state'] = state\n __args__['tags'] = tags\n __args__['vpcId'] = vpc_id\n opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)\n __ret__ = pulumi.runtime.invoke('aws:ec2/getSubnet:getSubnet', __args__, opts=opts, typ=GetSubnetResult).value\n\n return AwaitableGetSubnetResult(\n arn=pulumi.get(__ret__, 'arn'),\n assign_ipv6_address_on_creation=pulumi.get(__ret__, 'assign_ipv6_address_on_creation'),\n availability_zone=pulumi.get(__ret__, 'availability_zone'),\n availability_zone_id=pulumi.get(__ret__, 'availability_zone_id'),\n available_ip_address_count=pulumi.get(__ret__, 'available_ip_address_count'),\n cidr_block=pulumi.get(__ret__, 'cidr_block'),\n customer_owned_ipv4_pool=pulumi.get(__ret__, 'customer_owned_ipv4_pool'),\n default_for_az=pulumi.get(__ret__, 'default_for_az'),\n enable_dns64=pulumi.get(__ret__, 'enable_dns64'),\n enable_lni_at_device_index=pulumi.get(__ret__, 'enable_lni_at_device_index'),\n enable_resource_name_dns_a_record_on_launch=pulumi.get(__ret__, 'enable_resource_name_dns_a_record_on_launch'),\n enable_resource_name_dns_aaaa_record_on_launch=pulumi.get(__ret__, 'enable_resource_name_dns_aaaa_record_on_launch'),\n filters=pulumi.get(__ret__, 'filters'),\n id=pulumi.get(__ret__, 'id'),\n ipv6_cidr_block=pulumi.get(__ret__, 'ipv6_cidr_block'),\n ipv6_cidr_block_association_id=pulumi.get(__ret__, 'ipv6_cidr_block_association_id'),\n ipv6_native=pulumi.get(__ret__, 'ipv6_native'),\n map_customer_owned_ip_on_launch=pulumi.get(__ret__, 'map_customer_owned_ip_on_launch'),\n map_public_ip_on_launch=pulumi.get(__ret__, 'map_public_ip_on_launch'),\n outpost_arn=pulumi.get(__ret__, 'outpost_arn'),\n owner_id=pulumi.get(__ret__, 'owner_id'),\n private_dns_hostname_type_on_launch=pulumi.get(__ret__, 'private_dns_hostname_type_on_launch'),\n state=pulumi.get(__ret__, 'state'),\n tags=pulumi.get(__ret__, 'tags'),\n vpc_id=pulumi.get(__ret__, 'vpc_id'))", "def on_subnet_set(self, response, subnet_id):\n LOG.debug(\"Subnet %s created or updated\", subnet_id)\n subnet_data = etcdutils.safe_decode_json(response.value, 'subnet')\n\n if subnet_data is None:\n LOG.warning(\"Invalid subnet data %s\", response.value)\n return\n\n if not (isinstance(subnet_data, dict) and\n 'cidr' in subnet_data and\n 'gateway_ip' in subnet_data):\n LOG.warning(\"Invalid subnet data: %s\", subnet_data)\n return\n\n self.subnets_by_id[subnet_id] = subnet_data\n return", "def get_vpc_subnets(self, vpc_data, region, field=\"\"):\n if not vpc_data:\n return None\n # pylint: disable=line-too-long\n tmp_vpc_client = ibm.client(region=region)\n subnets_attached_to_routing_table = tmp_vpc_client.list_subnets(\n routing_table_id=vpc_data[\"default_routing_table\"][\"id\"]\n ).get_result()[\"subnets\"]\n if field:\n return [subnet[field] for subnet in subnets_attached_to_routing_table]\n else:\n return subnets_attached_to_routing_table", "def list(self, tenant=None, network=None, gateway_ip=None, cidr=None):\n path = '%s/subnets' % self.ver \n \n query = {}\n if tenant is not None:\n query['tenant_id'] = tenant\n if network is not None:\n query['network_id'] = network\n if gateway_ip is not None:\n query['gateway_ip '] = gateway_ip\n if cidr is not None:\n query['cidr '] = cidr \n path = '%s?%s' % (path, urlencode(query))\n \n res = self.client.call(path, 'GET', data='', token=self.manager.identity.token)\n self.logger.debug('Get openstack subnets: %s' % truncate(res))\n return res[0]['subnets']", "def subnets(self) -> pulumi.Output[Optional[Sequence['outputs.ResourceIdResponse']]]:\n return pulumi.get(self, \"subnets\")", "def delete_subnet(self, subnet_id):\n\n LOG_OBJ.debug(\"Deleting subnet %s\" % subnet_id)\n\n _url = \"http://\" + self.host_ip + \":9696/v2.0/subnets/\" + \\\n subnet_id + \".json\"\n _headers = {'Content-type': 'application/json',\n 'x-auth-token': self.project_info[\"token_project\"]}\n _body = None\n\n response = self.request(\"DELETE\", _url, _headers, _body)\n if response is None:\n LOG_OBJ.error(\"No response from Server while deleting subnet:%s\" %\n subnet_id)\n return response\n if response.status not in [200, 201, 202, 203, 204]:\n LOG_OBJ.error(\"Deletion of subnet Failed with status %s \" %\n response.status)\n return response.status\n\n LOG_OBJ.info(\"Deleted the subnet : %s \" % subnet_id)\n return True", "def get_subnet_output(availability_zone: Optional[pulumi.Input[Optional[str]]] = None,\n availability_zone_id: Optional[pulumi.Input[Optional[str]]] = None,\n cidr_block: Optional[pulumi.Input[Optional[str]]] = None,\n default_for_az: Optional[pulumi.Input[Optional[bool]]] = None,\n filters: Optional[pulumi.Input[Optional[Sequence[pulumi.InputType['GetSubnetFilterArgs']]]]] = None,\n id: Optional[pulumi.Input[Optional[str]]] = None,\n ipv6_cidr_block: Optional[pulumi.Input[Optional[str]]] = None,\n state: Optional[pulumi.Input[Optional[str]]] = None,\n tags: Optional[pulumi.Input[Optional[Mapping[str, str]]]] = None,\n vpc_id: Optional[pulumi.Input[Optional[str]]] = None,\n opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetSubnetResult]:\n ...", "def subnetting(self):\n ip = netaddr.IPNetwork(addr=self.subnet)\n subnets = list(ip.subnet(prefixlen=24))\n list_subnets = [str(subnet) for subnet in subnets]\n return list_subnets", "def getData(self, taskId:int):\n return self.pool.getData(taskId)", "def private_subnet(template, name):\n return template.resources[name]", "def test_one_subnet(self):\n\n self._test_find_next_subnet(\n network=\"10.0.0.0/16\", \n subnets=[\"10.0.0.0/24\"],\n requests=[24],\n expected=[\"10.0.1.0/24\"],\n )", "def _get_subnet_by_cidrs(subnet_cidrs, tag_prefix,\n vpc_id=None, ec2_client=None, region_name=None):\n subnet_by_cidrs = OrderedDict()\n if not ec2_client:\n ec2_client = boto3.client('ec2', region_name=region_name)\n if not vpc_id:\n vpc_id, _ = _get_vpc_id(tag_prefix, ec2_client=ec2_client,\n region_name=region_name)\n for cidr_block in subnet_cidrs:\n resp = ec2_client.describe_subnets(Filters=[\n {'Name': 'vpc-id', 'Values': [vpc_id]},\n {'Name': 'cidr-block', 'Values': [cidr_block]}])\n if len(resp['Subnets']) > 1:\n raise RuntimeError(\n \"%s There are more than one subnet for CIDR block %s\" % (\n tag_prefix, cidr_block))\n if resp['Subnets']:\n subnet = resp['Subnets'][0]\n LOGGER.info(\n \"%s found subnet %s in zone %s for cidr %s\",\n tag_prefix, subnet['SubnetId'], subnet['AvailabilityZone'],\n cidr_block)\n subnet_by_cidrs[cidr_block] = subnet\n else:\n subnet_by_cidrs[cidr_block] = None\n return subnet_by_cidrs", "def getData(self, slice=None):\n\t\traise NotImplementedError", "def cluster_subnet(self) -> str:\n return pulumi.get(self, \"cluster_subnet\")", "def sc_subnet(self):\n return self._sc_subnet", "def read_one(oid):\n\n subnetMode = SubnetMode.query.filter(SubnetMode.id == oid).one_or_none()\n\n if subnetMode is not None:\n # Serialize the data for the response\n subnetMode_schema = SubnetModeSchema()\n data = subnetMode_schema.dump(subnetMode)\n return data\n else:\n abort(404, f\"SubnetMode with id {oid} not found\")", "def get_data(self, addr):\n\t\tret_val = None\n\t\tif addr < 0:\n\t\t\tprint(\"FAIL - negative address\")\n\t\tif addr >= len(self.data):\n\t\t\ttry:\n\t\t\t\tret_val = self.regs[ addr ]\n\t\t\texcept:\n\t\t\t\tret_val = 0\n\t\telse:\n\t\t\tret_val = self.data[ addr ]\n\n\t\treturn ret_val", "def get_subnet(expand: Optional[str] = None,\n resource_group_name: Optional[str] = None,\n subnet_name: Optional[str] = None,\n virtual_network_name: Optional[str] = None,\n opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetSubnetResult:\n __args__ = dict()\n __args__['expand'] = expand\n __args__['resourceGroupName'] = resource_group_name\n __args__['subnetName'] = subnet_name\n __args__['virtualNetworkName'] = virtual_network_name\n opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)\n __ret__ = pulumi.runtime.invoke('azure-native:network:getSubnet', __args__, opts=opts, typ=GetSubnetResult).value\n\n return AwaitableGetSubnetResult(\n address_prefix=pulumi.get(__ret__, 'address_prefix'),\n address_prefixes=pulumi.get(__ret__, 'address_prefixes'),\n application_gateway_ip_configurations=pulumi.get(__ret__, 'application_gateway_ip_configurations'),\n delegations=pulumi.get(__ret__, 'delegations'),\n etag=pulumi.get(__ret__, 'etag'),\n id=pulumi.get(__ret__, 'id'),\n ip_allocations=pulumi.get(__ret__, 'ip_allocations'),\n ip_configuration_profiles=pulumi.get(__ret__, 'ip_configuration_profiles'),\n ip_configurations=pulumi.get(__ret__, 'ip_configurations'),\n name=pulumi.get(__ret__, 'name'),\n nat_gateway=pulumi.get(__ret__, 'nat_gateway'),\n network_security_group=pulumi.get(__ret__, 'network_security_group'),\n private_endpoint_network_policies=pulumi.get(__ret__, 'private_endpoint_network_policies'),\n private_endpoints=pulumi.get(__ret__, 'private_endpoints'),\n private_link_service_network_policies=pulumi.get(__ret__, 'private_link_service_network_policies'),\n provisioning_state=pulumi.get(__ret__, 'provisioning_state'),\n purpose=pulumi.get(__ret__, 'purpose'),\n resource_navigation_links=pulumi.get(__ret__, 'resource_navigation_links'),\n route_table=pulumi.get(__ret__, 'route_table'),\n service_association_links=pulumi.get(__ret__, 'service_association_links'),\n service_endpoint_policies=pulumi.get(__ret__, 'service_endpoint_policies'),\n service_endpoints=pulumi.get(__ret__, 'service_endpoints'),\n type=pulumi.get(__ret__, 'type'))", "def subnet_id_lookup(session, subnet_domain):\n if session is None:\n return None\n\n client = session.client('ec2')\n response = client.describe_subnets(Filters=[{\"Name\": \"tag:Name\", \"Values\": [subnet_domain]}])\n if len(response['Subnets']) == 0:\n return None\n else:\n return response['Subnets'][0]['SubnetId']", "def get_data(self, label: str) -> Any:\r\n return self._get_resource(label, self._data, \"data\")", "def _create_test_segment_with_subnet(self,\n network=None,\n cidr='2001:db8:0:0::/64',\n physnet='physnet'):\n network, segment = self._create_test_network_and_segment(network,\n physnet)\n subnet = self._create_test_subnet_with_segment(network, segment, cidr)\n return network, segment, subnet", "def getData(self):\n if len(self.data)==2+4:\n return (socket.AF_INET,socket.inet_ntop(socket.AF_INET,self.data[2:]))\n elif len(self.data)==2+16:\n return (socket.AF_INET6,socket.inet_ntop(socket.AF_INET6,self.data[2:]))\n else:\n raise InvalidAddressTypeException(self)", "def pull_data(self, sub_folder): \n\n data = self.data_interface.load_files_subdirect(sub_folder)\n\n return data", "def getData(self, label):\n\n data = self._getData(label)\n if data is not None:\n return data.copy()", "def subnets(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:\n return pulumi.get(self, \"subnets\")", "def subnets(self) -> pulumi.Output[Sequence[str]]:\n return pulumi.get(self, \"subnets\")", "def subnets(self) -> pulumi.Output[Sequence[str]]:\n return pulumi.get(self, \"subnets\")", "def to_internal_value(self, data):\n floating_ip = None\n if 'url' in data:\n # use HyperlinkedRelatedModelSerializer (parent of NestedFloatingIPSerializer)\n # method to convert \"url\" to FloatingIP object\n floating_ip = super(NestedFloatingIPSerializer, self).to_internal_value(data)\n\n # use HyperlinkedModelSerializer (parent of HyperlinkedRelatedModelSerializer)\n # to convert \"subnet\" to SubNet object\n internal_value = super(core_serializers.HyperlinkedRelatedModelSerializer, self).to_internal_value(data)\n subnet = internal_value['internal_ip']['subnet']\n\n return floating_ip, subnet", "def subnets(self) -> pulumi.Input[Sequence[pulumi.Input[str]]]:\n return pulumi.get(self, \"subnets\")", "def on_subnet_del(self, response, subnet_id):\n LOG.info(\"Subnet %s deleted\", subnet_id)\n if subnet_id in self.subnets_by_id:\n del self.subnets_by_id[subnet_id]\n return", "def run(self, network_create_args=None,\n subnet_create_args=None, subnet_cidr_start=None,\n subnets_per_network=1):\n network = self._get_or_create_network(**(network_create_args or {}))\n subnets = []\n for _ in range(subnets_per_network):\n subnets.append(\n self.neutron.create_subnet(\n network[\"id\"], start_cidr=subnet_cidr_start,\n **(subnet_create_args or {}))\n )\n for subnet in subnets:\n self.neutron.get_subnet(subnet[\"id\"])", "def read_all():\n\n # Create the list of subnetModes from our data\n subnetMode = SubnetMode.query.order_by(SubnetMode.key).all()\n app.logger.debug(pformat(subnetMode))\n # Serialize the data for the response\n subnetMode_schema = SubnetModeSchema(many=True)\n data = subnetMode_schema.dump(subnetMode)\n return data", "def get_data(self, variable):\n return self.data.get(variable)", "def pre_subnet_read(self, resource_id):\n pass", "def update_subnet(\n self,\n name_or_id,\n subnet_name=None,\n enable_dhcp=None,\n gateway_ip=None,\n disable_gateway_ip=None,\n allocation_pools=None,\n dns_nameservers=None,\n host_routes=None,\n ):\n subnet = {}\n if subnet_name:\n subnet['name'] = subnet_name\n if enable_dhcp is not None:\n subnet['enable_dhcp'] = enable_dhcp\n if gateway_ip:\n subnet['gateway_ip'] = gateway_ip\n if disable_gateway_ip:\n subnet['gateway_ip'] = None\n if allocation_pools:\n subnet['allocation_pools'] = allocation_pools\n if dns_nameservers:\n subnet['dns_nameservers'] = dns_nameservers\n if host_routes:\n subnet['host_routes'] = host_routes\n\n if not subnet:\n self.log.debug(\"No subnet data to update\")\n return\n\n if disable_gateway_ip and gateway_ip:\n raise exc.OpenStackCloudException(\n 'arg:disable_gateway_ip is not allowed with arg:gateway_ip'\n )\n\n curr_subnet = self.get_subnet(name_or_id)\n if not curr_subnet:\n raise exc.OpenStackCloudException(\n \"Subnet %s not found.\" % name_or_id\n )\n\n return self.network.update_subnet(curr_subnet, **subnet)", "def subnets(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ResourceIdArgs']]]]:\n return pulumi.get(self, \"subnets\")", "def subnet_create_event(self, subnet_info):\n\n subnet = subnet_info.get('subnet')\n if subnet:\n self.create_subnet(subnet)\n else:\n # Check whether request is for subnets.\n subnets = subnet_info.get('subnets')\n if subnets:\n for subnet in subnets:\n self.create_subnet(subnet)", "def test_list_subnets(self):\n print(self.the_client.list_subnets())", "def read_block_data(self, addr, reg):\n raise NotImplementedError()", "async def route(self, subnet):\n cmd = subprocess.check_output([\"birdc\", \"show\", \"route\", \"for\", str(subnet)])\n for page in chat_formatting.pagify(cmd.decode(), ['\\n', ' '], shorten_by=12):\n await self.bot.say(chat_formatting.box(page))", "async def __getDataFromBalance(self, account) -> dict:\n _LOGGER.info(\"Getting appliance usage data\")\n\n data = {}\n\n URL_BALANCE = API_HOST + \"/api/resources/account/{account}/balance?count=-1\"\n\n try:\n async with async_timeout.timeout(TIMEOUT):\n response = await self.session.get(URL_BALANCE.format(account=account))\n if response.status == 200:\n data = (await response.json())[\"data\"]\n\n indice = [i for i, x in enumerate(data) if x[\"details\"] == \"DEBT\"][\n 0\n ]\n\n deb = data[indice][\"amount\"]\n\n except Exception as e:\n _LOGGER.error(e)\n\n return {\"balance_data\": data}", "def run(self, subnet='8.8.8.8/32', **kwargs):\n results = []\n gateways = netifaces.gateways()\n gateway_ip, ifc_guid = gateways['default'][netifaces.AF_INET]\n ifc = [interface['name'] for interface in\n scapy.arch.windows.get_windows_if_list()\n if interface['guid'] == ifc_guid][0]\n responses, no_responses = srp(Ether(dst=\"ff:ff:ff:ff:ff:ff\") / ARP(pdst=gateway_ip), iface=ifc,\n timeout=self.TIMEOUT, verbose=0)\n if len(responses) != 1:\n return\n\n arp_request, arp_reply = responses[0]\n gateway_mac = arp_reply.hwsrc\n\n responses, no_responses = srp(Ether(dst=gateway_mac) / IP(dst=subnet) / ICMP(), iface=ifc,\n timeout=self.TIMEOUT, verbose=0)\n for request, reply in responses:\n if reply.payload.payload.fields['type'] == 0 and reply.payload.payload.fields['code'] == 0:\n results.append(WANIPScanResult(reply[IP].src))\n\n return ScanResult(self.NAME, results)", "def subnet_id(self, subnet_id):\n self._subnet_id = subnet_id", "def list_namespaced_host_subnet(self, **kwargs):\n\n all_params = ['pretty', 'label_selector', 'field_selector', 'watch', 'resource_version', 'timeout_seconds']\n all_params.append('callback')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method list_namespaced_host_subnet\" % key\n )\n params[key] = val\n del params['kwargs']\n\n\n resource_path = '/oapi/v1/hostsubnets'.replace('{format}', 'json')\n path_params = {}\n\n query_params = {}\n if 'pretty' in params:\n query_params['pretty'] = params['pretty']\n if 'label_selector' in params:\n query_params['labelSelector'] = params['label_selector']\n if 'field_selector' in params:\n query_params['fieldSelector'] = params['field_selector']\n if 'watch' in params:\n query_params['watch'] = params['watch']\n if 'resource_version' in params:\n query_params['resourceVersion'] = params['resource_version']\n if 'timeout_seconds' in params:\n query_params['timeoutSeconds'] = params['timeout_seconds']\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json', 'application/yaml'])\n if not header_params['Accept']:\n del header_params['Accept']\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type(['*/*'])\n\n # Authentication setting\n auth_settings = []\n\n response = self.api_client.call_api(resource_path, 'GET',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type='V1HostSubnetList',\n auth_settings=auth_settings,\n callback=params.get('callback'))\n return response", "def get_data(self, data_to_cpu=False, *args):\n raise NotImplementedError", "def data(self, key):\n with self._cv:\n _, val = self._get(key)\n return val.data", "def get_value(self, address):\n\n return self.data[address]", "def list_subnets(self, identifier=None, datacenter=None, version=0,\r\n subnet_type=None, **kwargs):\r\n if 'mask' not in kwargs:\r\n kwargs['mask'] = DEFAULT_SUBNET_MASK\r\n\r\n _filter = NestedDict(kwargs.get('filter') or {})\r\n\r\n if identifier:\r\n _filter['subnets']['networkIdentifier'] = query_filter(identifier)\r\n if datacenter:\r\n _filter['subnets']['datacenter']['name'] = \\\r\n query_filter(datacenter)\r\n if version:\r\n _filter['subnets']['version'] = query_filter(version)\r\n if subnet_type:\r\n _filter['subnets']['subnetType'] = query_filter(subnet_type)\r\n else:\r\n # This filters out global IPs from the subnet listing.\r\n _filter['subnets']['subnetType'] = {'operation': '!= GLOBAL_IP'}\r\n\r\n kwargs['filter'] = _filter.to_dict()\r\n\r\n return self.account.getSubnets(**kwargs)", "def test_nic_to_subnets(neo4j_session):\n _ensure_local_neo4j_has_test_subnet_data(neo4j_session)\n _ensure_local_neo4j_has_test_instance_data(neo4j_session)\n subnet_query = \"\"\"\n MATCH (nic:GCPNetworkInterface{id:$NicId})-[:PART_OF_SUBNET]->(subnet:GCPSubnet)\n return nic.nic_id, nic.private_ip, subnet.id, subnet.gateway_address, subnet.ip_cidr_range\n \"\"\"\n nodes = neo4j_session.run(\n subnet_query,\n NicId='projects/project-abc/zones/europe-west2-b/instances/instance-1-test/networkinterfaces/nic0',\n )\n actual_nodes = {\n (\n n['nic.nic_id'],\n n['nic.private_ip'],\n n['subnet.id'],\n n['subnet.gateway_address'],\n n['subnet.ip_cidr_range'],\n ) for n in nodes\n }\n expected_nodes = {(\n 'projects/project-abc/zones/europe-west2-b/instances/instance-1-test/networkinterfaces/nic0',\n '10.0.0.3',\n 'projects/project-abc/regions/europe-west2/subnetworks/default',\n '10.0.0.1',\n '10.0.0.0/20',\n )}\n assert actual_nodes == expected_nodes", "def get_layer_data(self, layer_id, unit = None, flatten = False):\n data = self.net.blobs[layer_id].data\n return data.flatten() if flatten else (data[0] if unit is None else data[0,unit])", "def virtual_network_subnet_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"virtual_network_subnet_id\")", "def virtual_network_subnet_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"virtual_network_subnet_id\")", "def get_ip_address(self, ip_address=None):\r\n svc = self.client['Network_Subnet_IpAddress']\r\n return svc.getByIpAddress(ip_address)", "def calcBroadcastBySubnet(subnet, mask):\n if not isValidMask(mask):\n return False\n\n try:\n subnet_num = ip2int(subnet)\n\n # calc host bit num\n host_bit = bin(ip2int(mask)).count('1')\n\n # replace 32 - host_bit numbers 0 to 1\n binstr = ''\n if host_bit < 32:\n binstr = bin(subnet_num)[host_bit - 32:]\n\n binstr = ''.join('1' for b in binstr)\n binstr = ''.join([bin(subnet_num)[:host_bit + 2], binstr])\n\n broadcast_num = int(binstr, 2)\n return int2ip(broadcast_num)\n except Exception:\n return False", "async def get_balance(sochain_url:str, network:str, address:str):\n try:\n balance = await sochain_api.get_balance(sochain_url, network, address)\n if balance == None:\n raise Exception(\"Invalid Address\")\n return balance\n except Exception as err:\n raise Exception(str(err))", "def virtual_network_subnet_id(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"virtual_network_subnet_id\")", "def test_list_host_subnet(self):\n pass", "def getRegionData(self, polygon: Polygon, epsg: int, region: str):\n pipeline = self.runPipeline(polygon, epsg, region)\n arr = pipeline.arrays[0]\n return self.makeGeoDf(arr)", "def get_data(node):\n return node['data']", "def test_create_subnet(self):\n client_token = generate_client_token()\n subnet_name = 'test_subnet_name1' + client_token\n subnet_cidr = '192.168.0.64/26'\n self.assertEqual(\n type(self.the_client.create_subnet(subnet_name,\n 'cn-bj-a',\n subnet_cidr,\n vpc_id,\n client_token=client_token)),\n baidubce.bce_response.BceResponse)", "def get_data(self, key):\n if type(key) is str:\n idx = self.get_data_id(key)\n elif type(key) is int:\n idx = key\n else:\n raise TypeError('key must be a string or a integer.')\n n = self.data_count()\n if 0 > idx or idx > n - 1:\n raise ValueError('Bad value for key parameter')\n return(self.data[idx])", "def get_data(self, out_format: str='json'):\n if self.data:\n return self.data\n self.data_ready = self.check_available()\n if not self.data_ready:\n raise DataNotReady(\"The run {} has not yet finished, data not available yet.\".format(self))\n resp = self.ph.conn.request(\n 'GET', self.ph.URLS['getdata'].format(self.run_token), dict(api_key=self.ph.api_key, format=out_format))\n data = resp.data.decode('utf-8')\n self.data = self.parse_json_data(data)\n return self.data", "def _get_router_by_ip_address(self, subnet_cidr):\n router_id = None\n\n subnets = self.neutron.list_subnets(cidr=subnet_cidr, tenant_id=self.project_id)\n try:\n subnet_id = subnets['subnets'][0]['id']\n except (IndexError, KeyError) as e:\n msg = \"No subnet found with cidr address %s!\" % subnet_cidr\n logger.error(' '.join([msg, \"ERROR:\", str(e)]))\n raise VIMAgentsException(ERROR, msg)\n\n ports = self.neutron.list_ports(device_owner=\"network:router_interface\", tenant_id=self.project_id)\n for port in ports['ports']:\n for port_ip in port['fixed_ips']:\n if port_ip['subnet_id'] == subnet_id:\n router_id = port['device_id']\n break\n if router_id:\n break\n\n if router_id is None:\n msg = \"No virtual router interface attached to subnet %s\" % subnet_cidr\n logger.error(msg)\n raise VIMAgentsException(ERROR, msg)\n\n router = self.neutron.show_router(router_id)\n\n return router['router']", "def get_data_thread(self, ip):\n get_data_thread = Thread(target=self.get_data, args=[ip])\n get_data_thread.start()", "def _run_query(self):\n self._search_query()\n payload_describe = {\n 'zone': self._zone,\n 'owner': self._owner,\n }\n if self._subnet_name:\n payload_describe.update({\"subnet_name\": self._subnet_name})\n _resp = SubnetService.describe_subnet(payload_describe)\n if _resp is None:\n self._record_total = self._record_filtered = 0\n return []\n _ret_list = _resp[\"ret_set\"]\n self._record_filtered = self._record_total = _resp.get(\"total_count\") or 100\n return _ret_list", "def subnets(self) -> Sequence[str]:\n return pulumi.get(self, \"subnets\")" ]
[ "0.65944624", "0.6546326", "0.64507025", "0.61743736", "0.6011078", "0.6010956", "0.58433086", "0.58370644", "0.57784945", "0.57641536", "0.5734289", "0.57341164", "0.5653118", "0.56335646", "0.556625", "0.556153", "0.5557205", "0.54509723", "0.54425985", "0.5385783", "0.53671175", "0.5328859", "0.5318224", "0.52709305", "0.5257651", "0.52563286", "0.5235928", "0.5233104", "0.5233104", "0.5178869", "0.5161708", "0.5156851", "0.5156851", "0.51234215", "0.51120466", "0.50971276", "0.50845677", "0.5075697", "0.5067147", "0.5067141", "0.50652313", "0.50554246", "0.50417787", "0.5024407", "0.5023029", "0.50174236", "0.5011668", "0.50028294", "0.5000888", "0.49834004", "0.49822438", "0.4980626", "0.49288395", "0.49214357", "0.49180824", "0.49071983", "0.48883492", "0.48786795", "0.4875381", "0.48751822", "0.48751822", "0.4874264", "0.48592204", "0.48573217", "0.48569214", "0.485502", "0.4854342", "0.48540956", "0.4850154", "0.48016322", "0.47644785", "0.47404885", "0.47354144", "0.47263977", "0.47238114", "0.47196507", "0.4713071", "0.46983585", "0.46978283", "0.46962065", "0.4688625", "0.46772763", "0.46703312", "0.46672547", "0.46671072", "0.46671072", "0.4661982", "0.46619257", "0.4659974", "0.46596014", "0.46570218", "0.46377823", "0.4628124", "0.46227658", "0.46128875", "0.46081707", "0.4596451", "0.45866537", "0.45844632", "0.45782566" ]
0.6255019
3
Run the EtcdWatcher loop.
def run(self): self.etcd.start()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def run(self):\n watcher = self._watcher(self.on_recv)\n watcher.loop()", "def run(self):\n logging.basicConfig(level=logging.INFO,\n format='%(asctime)s - %(message)s',\n datefmt='%Y-%m-%d %H:%M:%S')\n\n event_handler = PatternMatchingEventHandler(\n self.patterns,\n self.ignore_patterns,\n self.ignore_directories,\n self.case_sensitive\n )\n\n # event_handler.on_deleted = self.on_deleted\n # event_handler.on_modified = self.on_modified\n event_handler.on_moved = self.on_moved\n event_handler.on_created = self.on_created\n\n go_recursively = True\n\n observer = Observer()\n observer.schedule(event_handler, self.path, recursive=go_recursively)\n observer.start()\n\n try:\n while True:\n time.sleep(1)\n except KeyboardInterrupt:\n observer.stop()\n observer.join()", "def run(self):\n self.logger.info(\"starting Dashi consumer\")\n while not self.shutdown:\n self.rpc.listen()", "def run_inner(self):\n for event in self.inotify.event_gen():\n self.process_inotify_event(event)", "def start(self):\n self.watcher.start()\n self._asyncio_loop.run_forever()", "def run(self):\n while True:\n time.sleep(RTM_READ_DELAY)\n for event in self._slack_client.rtm_read():\n self.handle_event(event)", "def _main_loop(self):\n observer = Observer()\n observer.schedule(self.changes_event_handler, path=self.base_dir, recursive=False)\n observer.start()\n while True:\n if os.path.exists(self.todo_local_file):\n with open(self.todo_local_file, 'rb') as f:\n obj_list = pickle.load(f)\n\n today_todo_list = [i for i in obj_list if self.is_today_todo(i['time'])]\n self.solve_one_day_todo_events(todo_items_list=today_todo_list)\n else:\n time.sleep(60)\n pass", "def run(self):\n if not self.running:\n self.loop.run_forever()", "def event_loop(self):\n while self.ack is False:\n gevent.sleep(self.loop_interval)\n output_service = self.get_directory_service_proxy().get_service(\"mock-output-service\")\n output_service.put(\"test-worker-work-result\")\n self.ack = True", "def run(self): \n #\n\n \n # forever loop\n while True: \n \n for app in self.app_list:\n self.check(app) \n #print(\"check\")\n \n gevent.sleep(SLEEP_SECONDS)", "def listen(self):\n self.init_delete_batch_processing()\n self.init_file_batch_processing()\n self.init_symlink_batch_processing()\n\n self.loop.create_task(self.start_watching_roots())\n\n self.revisit_cond = asyncio.Condition()\n self.loop.create_task(self.start_polling_revisits())\n\n self.start_polling_changes()\n self.loop.run_forever()\n self.stop_polling_changes()", "def run_forever(self):\n reactor.run()", "def run_forever(self):\n reactor.run()", "def run(self):\n # for running indefinitely if 'watch' is passed\n if self._arguments.watch:\n while True:\n self.watch(self.main(), int(self._arguments.watch))\n else:\n self.main()", "def main() -> None:\n runner()\n asyncio.get_event_loop().run_forever()", "def main() -> None:\n runner()\n asyncio.get_event_loop().run_forever()", "def run(self):\n self.connect()\n self.run_forever()", "def main():\n channel_watcher = ChannelWatcher()\n channel_watcher.create_threads()\n for thread in channel_watcher.threads:\n thread.join()\n return", "def run(self):\n self.logger.info(f'Running {self.__class__.__name__}')\n while True:\n last_check = time.time()\n self.collect_new_events()\n while time.time() - last_check < self._check_for_new_events_interval:\n self.logger.debug('Waiting for new events collection: new collection in {}s'.format(\n self._check_for_new_events_interval - (time.time() - last_check)))\n time.sleep(1)", "def run(self):\n self.poller = select.epoll()\n self.pollmask = select.EPOLLIN | select.EPOLLHUP | select.EPOLLERR\n self.poller.register(self.server,self.pollmask)\n self.timeout = float(self.webconfig.parameters[\"timeout\"])\n lastSweep = time.time()\n\n while True:\n # poll sockets\n\n if (time.time() - lastSweep) > .5: #sweet through every half second\n self.socketCheck()\n lastSweep = time.time()\n try:\n fds = self.poller.poll(timeout=1.0)\n except:\n return\n fd = 0\n for (fd,event) in fds:\n # handle errors\n if event & (select.POLLHUP | select.POLLERR):\n self.handleError(fd)\n continue\n # handle the server socket\n if fd == self.server.fileno():\n self.handleServer()\n continue\n # handle client socket\n result = self.handleClient(fd)", "def run(self):\n self.cmdloop()", "def run(self):\n\n self._daemon_thread.start()\n\n while True:\n time.sleep(5)", "def _event_loop(self):\n while True:\n self.scheduler.run(blocking=True)\n time.sleep(1)", "def run(self):\n while True:\n try:\n if not self._read_new_entries(False):\n time.sleep(0.1)\n self._update_all_tasks()\n except KeyboardInterrupt:\n break", "def main(dir_to_watch):\n event_handler = AudioCreatedHandler()\n observer = Observer()\n observer.schedule(event_handler, dir_to_watch, recursive=True)\n observer.start()\n try:\n while True:\n time.sleep(1) \n except KeyboardInterrupt:\n print \"Stopping...\"\n observer.stop()\n observer.join()", "def run(self):\n # get the active node before we start anything...\n self.active_node_ip_port = self.get_active_node()\n if self.active_node_ip_port is None:\n logger.critical(\"ERROR: Could not get active vault node from \"\n \"Consul. Exiting.\")\n raise SystemExit(3)\n logger.warning(\"Initial Vault active node: %s\",\n self.active_node_ip_port)\n site = Site(VaultRedirectorSite(self))\n # setup our HTTP(S) listener\n if self.tls_factory is not None:\n self.listentls(site)\n else:\n self.listentcp(site)\n # setup the update_active_node poll every POLL_INTERVAL seconds\n self.add_update_loop()\n logger.warning('Starting Twisted reactor (event loop)')\n self.run_reactor()", "def run(self):\n while self.running:\n self.handle_request()", "def run(self):\n try:\n self.eventloop.run_forever()\n except KeyboardInterrupt:\n self.logger.info(\"Interrupt received, shutting down.\")\n except Exception:\n self.logger.exception(\"Unhandled exception raised, shutting down.\")\n finally:\n self._shutdown()\n self.logger.debug(\"Closing event loop\")\n self.eventloop.close()\n if self._restarting:\n self.logger.info(f\"Restarting with command line: {sys.argv}\")\n os.execl(sys.executable, sys.executable, *sys.argv)", "def loop(self):\n keys.mode = 'main'\n for line in client.readlines('/event'):\n if not self.alive:\n break\n self.dispatch(*line.split(' ', 1))\n self.alive = False", "def _run_loop(self):\n loop = asyncio.new_event_loop()\n asyncio.set_event_loop(loop)\n\n self._server = websockets.serve(self._log_message, self._host, self._port)\n\n loop.run_until_complete(self._server)\n loop.run_forever()", "async def _main(self):\n while True:\n time.sleep(1)", "def run_forever(self):\n while True:\n self.listener.ready()\n yield self.listener.wait()\n self.listener.ok()", "def run_forever(self, *args, **kwargs):\n try:\n self.logger.debug('Begin account update')\n\n # get account-updater server ownership\n self.get_ownership_obj = threading.Thread(target = self.msg.get_my_ownership)\n self.get_ownership_obj.setDaemon(True)\n self.get_ownership_obj.start()\n\n self.walker_obj = Walker(self.walker_map, self.__param, self.logger)\n self.walker_obj.setDaemon(True)\n self.walker_obj.start()\n self.logger.info(\"Walker Started\")\n self.reader_obj = Reader(self.walker_map, self.reader_map, \\\n self.__param, self.logger)\n self.reader_obj.setDaemon(True)\n self.reader_obj.start() \n self.logger.info(\"Reader Started\")\n self.account_sweeper = AccountSweep(self.__param, self.logger)\n self.account_sweeper.setDaemon(True)\n self.account_sweeper.start()\n self.logger.info(\"Account Sweeper Started\") \n self.updater_obj = Updater(self.walker_map, self.reader_map, \\\n self.__param, self.logger)\n self.updater_obj.setDaemon(True)\n self.updater_obj.start() \n self.logger.info(\"Updater Started\") \n self.container_sweeper = ContainerSweeper(self.walker_map, \\\n self.reader_map, self.__param, self.logger)\n self.container_sweeper.setDaemon(True)\n self.container_sweeper.start()\n self.logger.info(\"Container Sweeper Started\") \n\n account_updater_server = ThreadedAccountUpdaterServer(\\\n (self.__get_node_ip(gethostname()), \\\n self.__account_updater_port), HttpListener)\n account_updater_server.serve_forever()\n except Exception as ex:\n self.logger.error(\"Exception occured: %s\" % ex)", "def run(self):\n while not self.stop_event.is_set():\n self.manage_cache_tasks()", "def _run(self):\n while(self._loop):\n pass", "def run(self):\n self.logger.info(\"start consuming api calls\")\n while not self.shutdown:\n self.rpc.listen()", "def run():\n\n while True:\n\n # get event, blah\n event_name, event_data = revent.get_event(block=True, timeout=5)\n\n if event_name is not None:\n print 'received: %s' % event_name\n\n if event_name.endswith('_oembed_details'):\n handle_new_oembed_details(event_data)\n\n elif event_name == 'new_tweet':\n handle_new_tweet(event_data)\n\n # and we're done\n assert revent.verify_msg(event_name, event_data), \\\n \"Could not verify %s\" % event_name", "def run(self):\r\n\r\n while self.running:\r\n while self.has_forks is False:\r\n self.think()\r\n self.get_forks()\r\n self.eat()\r\n self.put_forks()", "def run(self):\n self._start_servers()\n monitor = KodiMonitor(self.nx_common, self.nx_common.log)\n while not monitor.abortRequested():\n monitor.update_playback_progress()\n try:\n if self.library_update_scheduled() and self._is_idle():\n self.update_library()\n except RuntimeError as exc:\n self.nx_common.log(\n 'RuntimeError: {}'.format(exc), xbmc.LOGERROR)\n if monitor.waitForAbort(5):\n break\n self._shutdown()", "def main(ip, port):\n conffile = os.path.expanduser('~') + '/.onedirclient'\n conffile = os.path.join(conffile, 'client.json')\n conffile = os.path.abspath(conffile)\n jd = open(conffile)\n conf = json.load(jd)\n jd.close()\n conf['ip'] = ip\n conf['port'] = port\n ListenerContainer.login = conf\n ListenerContainer.client = OneDirFtpClient(\n ip,\n port,\n conf['username'],\n conf['nick'],\n conf['password'],\n conf['root_dir']\n )\n t = Thread(target=checker, name='checker', args=())\n t.start()\n ListenerContainer.is_syncing = conf['is_syncing']\n ListenerContainer.root_dir = conf['root_dir']\n ListenerContainer.nick = conf['nick']\n notifier = pyinotify.Notifier(ListenerContainer.watch_manager, EventHandler())\n ListenerContainer.add_watch(conf['root_dir'])\n ListenerContainer.last_sync = conf['last_sync']\n ListenerContainer.add_config(conffile)\n db = os.path.expanduser('~')\n db = db + '/.onedirclient/sync.db'\n ListenerContainer.sync_db = TableManager(db, 'local')\n if not conf['is_syncing']:\n ListenerContainer.sync_db.connect()\n while True:\n try:\n # notifier.process_events()\n # if notifier.check_events():\n # notifier.read_events()\n notifier.loop()\n except KeyboardInterrupt:\n if ListenerContainer.move_to_folder:\n try:\n ListenerContainer.client.delete_folder(ListenerContainer.move_to_folder)\n except error_perm:\n pass # Nothing to do\n if ListenerContainer.move_to_file:\n try:\n ListenerContainer.client.delete_file(ListenerContainer.move_to_file)\n except error_perm:\n pass # Nothing to do\n ListenerContainer.is_checking = False\n notifier.stop()\n update_last_sync()\n break\n except not KeyboardInterrupt as e:\n reset()", "def run_version(watcher: inotify.adapters.Inotify, config: Config) -> None:\n for directory in config[\"directories\"]:\n make_dir(directory)\n\n for watch_job in config[\"watches\"]:\n if watch_job[\"type\"] == \"simple\":\n watch_job_simple = cast(ConfigSimpleValue, watch_job)\n watch_directory_recursively(watcher, watch_job_simple[\"path\"])\n elif watch_job[\"type\"] == \"regex\":\n watch_job_regex = cast(ConfigRegexValue, watch_job)\n watch_directory_recursively(watcher, watch_job_regex[\"base_path\"])\n else:\n logging.warning(f\"Unknown watch job type: {watch_job['type']}\")\n\n for _, type_names, path, filename in watcher.event_gen(yield_nones=False):\n filepath = os.path.join(path, filename)\n\n # print(f\"event: {type_names}, {path}, {filename}\")\n if \"IN_CREATE\" in type_names and \"IN_ISDIR\" in type_names: # Directory was created\n watcher.add_watch(filepath)\n logging.warning(f\"Watching new directory {filepath}\")\n continue\n\n if \"IN_CLOSE_WRITE\" not in type_names: # Skip anything else as we're after events after a file has been written\n continue\n\n simple_conf, regex_conf = get_watch_job(filepath, config)\n\n if simple_conf: # Process simple files put in directory\n slug = simple_conf[\"slug\"]\n blob_name = f\"{slug}/{filename}\"\n\n upload_file(filepath, simple_conf[\"dsn\"], simple_conf[\"container\"], blob_name)\n\n elif regex_conf: # Check if filepath matches regex\n local_path = filepath.replace(regex_conf[\"base_path\"], \"\").lstrip(\"/\")\n match = re.match(regex_conf[\"regex\"], local_path)\n if not match:\n logging.warning(f\"No watches to cover file: {filename}\")\n continue\n\n match_data = match.groupdict()\n match_data[\"filename\"] = filename\n blob_name = regex_conf[\"dest_path\"].format(**match_data)\n upload_file(filepath, regex_conf[\"dsn\"], regex_conf[\"container\"], blob_name)\n\n else:\n logging.warning(f\"No watches to cover file: {filename}\")\n\n return None", "def run(self) -> None:\n\n while not self.stop_event.is_set():\n if self.my_queue:\n # if heartbeat received at '/heartbeat' route from the monitored peer,\n # sleep until next\n self.my_queue.clear()\n time.sleep(7)\n\n else:\n # else drop peer data from database and inform central server appending '0'\n # to my queue\n self.db_access.drop_peer(self.peer_id)\n self.my_queue.append(0)\n break", "def loop_forever(self):\n self.client.loop_forever()", "def run(self):\n while True:\n try:\n # probing the parent process.\n if self._target_pid is not None:\n os.kill(self._target_pid, 0)\n time.sleep(self._poll_interval)\n except OSError:\n self._on_stop_callback()\n break", "def run(self):\n ioloop.IOLoop.current().start()", "def run_forever(self):\n asyncio.run(self._loop_body())", "async def run(self):\n while True:\n await asyncio.sleep(0)\n # See if any sockets have anything\n try:\n socks, events = self.poller.poll(1000)\n for sock, event in zip(socks,events):\n if sock in self.subscriptions:\n states = sock.recv_json()\n await self.main_server.sync_states(states)\n\n # Nothing to report - Poller did not find any sockets with updates\n except ValueError:\n pass\n # Exiting\n except KeyboardInterrupt:\n break", "def _mainloop(self):\n while not self._shutdown:\n events = self._selector.select(timeout=0.01)\n for key, _ in events:\n key.data(key.fileobj)\n self.close()", "def run(self):\n if self.config.daemon():\n self.logger.info(\"Running forever, in a loop\")\n self.run_forever()\n else:\n self.logger.info(\"Running once, to completion\")\n self.run_once()", "def run(self):\n while True:\n try:\n sleep(influx_settings.write_freq)\n self._redis2influx()\n except InterruptExceptions as ex:\n raise ex", "def run_loop(self):\r\n server_log.info('Server now accepting client connections.')\r\n while not self.clients_done():\r\n asyncore.loop(timeout=config[\"server_timeout\"], count=config[\"server_loop_count\"])", "def run(self):\n self.listen(self.input_topics.filter_by(transmission='tcp'))\n\n logging.info('Getting into the listening loop')\n self.running = True\n while self.running:\n self.loop()", "def main_loop(self):\n LOGGER.info('Entering main event loop...')\n try:\n while self._handle_faucet_events():\n while not self._faucet_events.event_socket_connected:\n LOGGER.info('Attempting faucet event sock connection...')\n time.sleep(1)\n try:\n self._faucet_events.connect()\n self._restore_states()\n self._faucet_collector.set_state_restored(True)\n except Exception as e:\n LOGGER.error(\"Cannot restore states or connect to faucet: %s\", e)\n self._faucet_collector.set_state_restored(False, e)\n except KeyboardInterrupt:\n LOGGER.info('Keyboard interrupt. Exiting.')\n self._faucet_events.disconnect()\n except Exception as e:\n LOGGER.error(\"Exception: %s\", e)\n raise", "def run(self):\n self._setupLogger()\n self.setup()\n\n self.logger.info(self.moduleName + \" starting run loop.\")\n\n while True:\n self.loop()", "def run_daemon(self):\n logging.info(\"Starting in daemon mode.\")\n while True:\n logging.info(\"Starting a new iteration.\")\n self.request_messages()\n logging.info(\"Iteration finished.\")\n try:\n sleep(self.daemon_interval)\n except KeyboardInterrupt:\n logging.warning(\"Ctrl-C received, stopping daemon.\")\n break\n logging.info(\"Nothing more to be done, we will exit.\")\n exit(0)", "async def watchForFileSystemEvents(self):\n\n # Things that can throw this off:\n #\n # * Moving a watched directory out of the watch tree (will still\n # generate events even when outside of directory tree)\n #\n # * Doing two changes on a directory or something before the program\n # has a time to handle it (this will also throw off a lot of inotify\n # code, though)\n #\n # * Moving a watched directory within a watched directory will get the\n # wrong path. This needs to use the cookie system to link events\n # together and complete the move properly, which can still make some\n # events get the wrong path if you get file events during the move or\n # something silly like that, since MOVED_FROM and MOVED_TO aren't\n # guaranteed to be contiguous. That exercise is left up to the\n # reader.\n #\n # * Trying to watch a path that doesn't exist won't automatically\n # create it or anything of the sort.\n #\n # * Deleting and recreating or moving the watched directory won't do\n # anything special, but it probably should.\n #\n async for event in self.inotify:\n\n if not self.continueWatchingFS :\n return\n\n # If this is a creation event, add a watch for the new path (and its\n # subdirectories if any)\n #\n if Mask.CREATE in event.mask and event.path is not None :\n await self.watchAPath(event.path)\n\n if Mask.DELETE_SELF in event.mask and event.path is not None :\n await self.unWatchAPath(event.path, event.watch)\n\n # If there are some bits in the cpMask in the event.mask yield this\n # event\n #\n if event.mask & self.cpMask:\n yield event\n else:\n # Note that these events are needed for cleanup purposes.\n # We'll always get IGNORED events so the watch can be removed\n # from the inotify. We don't need to do anything with the\n # events, but they do need to be generated for cleanup.\n # We don't need to pass IGNORED events up, because the end-user\n # doesn't have the inotify instance anyway, and IGNORED is just\n # used for management purposes.\n #\n self.logger.debug(f'UNYIELDED EVENT: {event}')", "def run_forever(self):\n self._loop.run_until_complete(self._loop_body())", "def run(self):\n\n self.dbg_state = \"running\"\n\n while self.active:\n try:\n sel_in, sel_out, sel_err = \\\n select.select(self.sockets(), [], self.sockets(), 1)\n except:\n print( sys.exc_info())\n self.logger.error(\"Select error, disconnecting\")\n self.disconnect()\n\n for s in sel_err:\n self.logger.error(\"Got socket error on: \" + str(s) + \", disconnecting\")\n self.disconnect()\n\n for s in sel_in:\n if self._socket_ready_handle(s) == -1:\n self.disconnect()\n\n # End of main loop\n self.dbg_state = \"closing\"\n self.logger.info(\"Exiting controller thread\")\n self.shutdown()", "async def _run(self) -> None:\n\n while True:\n # The \"Exiting event loop\" checks are a bit ugly. They're in place\n # so that the event loop exits on its own at predefined positions\n # instead of randomly getting thrown a CancelledError.\n #\n # Now that I think about it, the whole function looks kinda ugly.\n # Maybe one day (yeah, right), I'll clean this up. I want to get it\n # working first though.\n\n if self._state != self._RUNNING:\n logger.debug(\"Exiting event loop\")\n return\n\n if self._ws is not None:\n try:\n logger.debug(\"Receiving ws packets\")\n async for packet in self._ws:\n logger.debug(f\"Received packet {packet}\")\n packet_data = json.loads(packet)\n self._process_packet(packet_data)\n except websockets.ConnectionClosed:\n logger.debug(\"Stopped receiving ws packets\")\n else:\n logger.debug(\"No ws connection found\")\n\n if self._state != self._RUNNING:\n logger.debug(\"Exiting event loop\")\n return\n\n logger.debug(\"Attempting to reconnect\")\n while not await self._reconnect():\n logger.debug(\"Reconnect attempt not successful\")\n\n if self._state != self._RUNNING:\n logger.debug(\"Exiting event loop\")\n return\n\n logger.debug(f\"Sleeping for {self.RECONNECT_DELAY}s and retrying\")\n await asyncio.sleep(self.RECONNECT_DELAY)", "def run(self):\n while True:\n if self.timeout - time() <= 0:\n if self.state in [State.CANDIDATE, State.FOLLOWER]:\n self.start_election()\n\n elif self.state is State.LEADER:\n self.send_appends()\n self.commit_entries()\n\n else:\n self.unknown_state()\n\n self.handle_message()", "def _event_loop(self, folders, **kwargs):\n\n observer = kwargs['observer']\n\n for folder in folders:\n observer.schedule(folder.stream)\n\n observer.start()", "def _run(self):\n\n while self._thread_alive_event.is_set():\n reported_events = self._poll.poll(self.POLL_TIMEOUT)\n\n for fd_event_pair in reported_events:\n fd, event = fd_event_pair\n\n if event & select.POLLIN or event & select.POLLPRI:\n self._recv(fd)\n\n elif event & select.POLLERR:\n self.logger.error(\"Error condition of some sort\")\n self._thread_alive_event.clear()\n break\n\n elif event & select.POLLNVAL:\n self.logger.error(\"Invalid request: descriptor not open\")\n self._thread_alive_event.clear()\n break", "def run(self):\n try:\n while self._running:\n time.sleep(1)\n finally:\n self._exit()", "def run(self):\n print('starting up on {} port {}'.format(*self.listener_address))\n self.selector.register(self.listener, selectors.EVENT_READ)\n\n # Serialize our listener's host and port\n serializedAdd = fxp_bytes_subscriber.serialize_address(\n self.listener_address[0], self.listener_address[1])\n\n # Contact with Publisher\n self.listener.sendto(serializedAdd, self.gcd_address)\n\n while True:\n events = self.selector.select(CHECK_INTERVAL)\n for key, mask in events:\n data = self.receive_message()\n self.removeOldQuote()\n self.createGraph(data)\n self.arbitrage()\n self.checkTimeout()", "def run_forever(self):\n self.factory.manager.run_forever()", "def run(self):\n while not self.terminate_event.is_set():\n while self.count > 0 and self.start_event.is_set() and self.interval > 0:\n if self.tick_log:\n if (self.count * self.sleep_chunk - int(self.count * self.sleep_chunk)) == 0.0:\n self.log.debug(\"{name} countdown: {tick} ({interval}s @ step {step:.2f}s)\"\n .format(name=self.name, tick=self.count,\n interval=self.interval, step=self.sleep_chunk))\n if self.reset_event.wait(self.sleep_chunk):\n self.reset_event.clear()\n self.count = self.interval / self.sleep_chunk\n self.count -= 1\n if self.count <= 0:\n self._callback()\n self.count = self.interval / self.sleep_chunk", "def _run(self):\n #print(\"try to connect run\")\n while True:\n self._connect()\n while not self.connected and self.auto_retry is not None:\n gevent.sleep(self.auto_retry)\n self._connect()\n if self.connected:\n self.run()\n if self.auto_retry is None:\n break", "def run(self):\n\n while not self.done:\n\n self.event_loop()\n\n self.update()", "def loop(self):\n pass", "async def async_run_forever(self):\n raise NotImplementedError", "def Listen(self):\n while True:\n time.sleep(1)", "def run(self):\n while self.container.process(): pass", "def run(self):\n operation_manager = self._core.get_operation_manager()\n while True:\n while operation_manager.process_next():\n pass\n sleep(2)", "def run(self):\n if self.pollable:\n self.poll()\n if not self.EventsFactory.is_alive():\n self.EventsFactory.start()\n while True:\n if not self.EventsFactory.is_alive():\n self.logger.error(f'{self} events factory has died..')\n raise SubThreadException(self.EventsFactory)\n update_start_time = time.time()\n self.handle_events()\n wait_for(lambda: time.time() - update_start_time > self.handle_events_every and not self._busy_mutext.locked(),\n logger=self.logger, message='Waiting for work timeout to finish.')", "async def run(self):\n # Create dictionaries to hold configured sources and models\n await self.setup()\n await self.start()\n # Load\n if self.mc_config is not None:\n # Restore atomic after config is set, allow setting for now\n atomic = self.mc_atomic\n self.mc_atomic = False\n await self.register_directory(self.mc_config)\n self.mc_atomic = atomic\n # Write out port to file\n if self.portfile is not None:\n pathlib.Path(self.portfile).write_text(str(self.port))\n try:\n # If we are testing then RUN_YIELD will be an asyncio.Event\n if self.RUN_YIELD_START is not False:\n await self.RUN_YIELD_START.put(self)\n await self.RUN_YIELD_FINISH.wait()\n else: # pragma: no cov\n # Wait for ctrl-c\n while True:\n await asyncio.sleep(60)\n finally:\n await self.app.cleanup()\n await self.site.stop()", "def run(self):\n\n self.steer()\n self.drive()", "def loop(self):\n while not self.should_exit:\n self._run_once()\n\n self.on_exit()", "def run(self):\n while True:\n req = self._requests.get()[1]\n req.start()\n logging.info('Running request %s', req)", "def start_watcher():\n while True:\n request_date = datetime.datetime.utcnow().strftime(\"%Y%m%d\")\n pull_request_from_remote(remote_files=\"*%s*\" % request_date)\n new_requests = check_for_new_request(request_date=request_date)\n if not new_requests:\n time.sleep(5)\n continue\n\n # noinspection PyTypeChecker\n for r in new_requests:\n print(\"Processing %s\" % r)\n try:\n ret = process_new_request(r, request_date=request_date,\n add2db=True)\n print(ret)\n except:\n os.system('cp -r %s /home/sedm/growth_marshal/archived/failed/'\n % r)\n os.system('cp -r %s /home/sedm/growth_marshal/archived/%s/' %\n (r, request_date))\n\n print(\"Waiting %ss before checking for new request\" % 5)\n time.sleep(5)", "def _main():\n parser = _create_parser()\n args = parser.parse_args()\n\n if args.interval is None:\n args.interval = 10\n\n if args.what_if is None:\n args.what_if = False\n\n loop = asyncio.get_event_loop()\n\n params = {\n \"connection_string\": args.connection_string,\n \"name\": args.name,\n \"interval\": args.interval,\n \"what_if\": args.what_if\n }\n\n loop.run_until_complete(_run(params))", "def run(self):\n \n # Loop through all checkers to do an initial state check\n for checker in self.checkers:\n checker.update_last_state()\n\n # Send initial heartbeat\n self._send_heartbeat()\n \n # Main loop\n while True: \n html = \"\"\n for checker in self.checkers:\n if checker.just_changed_state():\n log.warn(\"Checker {} has changed state.\"\n .format(checker.name))\n html += \"<li>\" + checker.html() + \"</li>\\n\"\n \n if isinstance(checker, Process) and checker.state() == FAIL:\n log.warn(\"Process {} is not running.\"\n .format(checker.name))\n html += (\"<li>Attempting to restart \" + \n escape(checker.name) + \"...</li>\\n\")\n try:\n checker.restart()\n except MaxRetriesError, e:\n self.shutdown_reason = str(e)\n return\n time.sleep(5)\n html += (\"<li>State after restart: \" + \n checker.html() + \"</li>\\n\")\n\n if html:\n html = \"<h2>STATE CHANGED:</h2>\\n<ul>\\n\" + html + \"</ul>\\n\" \n html += self.html()\n html += run_commands(self.state_change_cmds)\n self.send_email_with_time(html=html,\n subject=\"Babysitter detected\"\n \" state change.\")\n\n if self._need_to_send_heartbeat():\n self._send_heartbeat()\n\n # Check if a new data subdir has been created\n if self.base_data_dir and self.sub_data_dir:\n if self._find_last_numeric_subdir() != self.sub_data_dir:\n self._send_heartbeat(\"<p>New subdir found so about to restart \"\n \"babysitter. Below are the last stats \"\n \"for the old data subdirectory.</p>\\n\")\n raise NewDataDirError()\n \n time.sleep(UPDATE_PERIOD)", "def run(self):\n \n # Wrap the outer loop in a try block so we can do an orderly shutdown\n # should an exception occur:\n try:\n # Send out a STARTUP event:\n self.dispatchEvent(weewx.Event(weewx.STARTUP))\n \n syslog.syslog(syslog.LOG_INFO, \"engine: Starting main packet loop.\")\n\n last_gc = int(time.time())\n\n # This is the outer loop. \n while True:\n\n # See if garbage collection is scheduled:\n if int(time.time()) - last_gc > self.gc_interval:\n ngc = gc.collect()\n syslog.syslog(syslog.LOG_INFO, \"engine: garbage collected %d objects\" % ngc)\n last_gc = int(time.time())\n\n # First, let any interested services know the packet LOOP is\n # about to start\n self.dispatchEvent(weewx.Event(weewx.PRE_LOOP))\n \n # Get ready to enter the main packet loop. An exception of type\n # BreakLoop will get thrown when a service wants to break the\n # loop and interact with the console.\n try:\n \n # And this is the main packet LOOP. It will continuously\n # generate LOOP packets until some service breaks it by\n # throwing an exception (usually when an archive period\n # has passed).\n for packet in self.console.genLoopPackets():\n \n # Package the packet as an event, then dispatch it.\n self.dispatchEvent(weewx.Event(weewx.NEW_LOOP_PACKET, packet=packet))\n\n # Allow services to break the loop by throwing\n # an exception:\n self.dispatchEvent(weewx.Event(weewx.CHECK_LOOP, packet=packet))\n\n syslog.syslog(syslog.LOG_CRIT, \"engine: Internal error. Packet loop has exited.\")\n \n except BreakLoop:\n \n # Send out an event saying the packet LOOP is done:\n self.dispatchEvent(weewx.Event(weewx.POST_LOOP))\n\n finally:\n # The main loop has exited. Shut the engine down.\n syslog.syslog(syslog.LOG_DEBUG, \"engine: Main loop exiting. Shutting engine down.\")\n self.shutDown()", "def run(updater: Updater):\n logger = getLogger()\n logger.info(\"Starting polling\")\n updater.start_polling()", "def _run_cycle(self):\n pass", "async def start_watching_roots(self):\n db.clear_visits(self.db_conn)\n for root in self.config.roots:\n await self.watch_tree(root)\n\n for path in db.get_unvisited_files(self.db_conn):\n print(path)\n await self.process_change(path, None)", "def run(self):\n\t\tfor item in self.pubSub.listen():\n\t\t\tself.processItem(item)", "def run_forever(self):\n while True:\n self.run_once()\n\n self.logger.info(f\"Sleeping for {self.config.sleep()} seconds\")\n time.sleep(self.config.sleep())", "def run(self):\n print('checking for expired cache items...')\n\n while True:\n # Do something\n self.check()\n time.sleep(self.interval)", "def run(self):\n os.chdir(ServerFolder)\n while True:\n request = self.client_socket.recv(1024).decode().strip()\n if not request:\n print(\"Disconnecting from client {}:{}\".format(\n self.client_ip, self.client_port))\n self.client_socket.shutdown(socket.SHUT_RDWR)\n self.client_socket.close()\n break\n request = request.split(\",\")\n\n if request[0] == \"LS\":\n self.ls()\n elif request[0] == \"PWD\":\n self.pwd()\n elif request[0] == \"CD\":\n self.cd(request[1])\n elif request[0] == \"MKDIR\":\n self.mkdir(request[1])\n elif request[0] == \"RMDIR\":\n self.rmdir(request[1])\n elif request[0] == \"RM\":\n self.rm(request[1])\n\n elif request[0] == \"rget\" and len(request[1:]) == 1:\n self.send_file(*request[1:])\n\n elif request[0] == \"rput\" and len(request[1:]) == 2:\n self.receive_file(*request[1:])", "def loop_run(self):\n super(EventLoop, self).loop_run()\n self.inq = self.cothread.EventQueue()", "def run(self):\n time_to_quit = False\n while True:\n time_to_quit = self.run_to_yield_or_quit()\n if time_to_quit:\n print(self, 'quitting')\n break\n else:\n time.sleep(self.polling_interval)", "def run(self):\n while True:\n task = self.queue.get()\n if task is None:\n break\n fslfile = os.path.join(task.directory, task.fsl_file)\n # TODO: there must be a better way to set the workdir\n workdir = '/'.join(task.directory.split('/')[-3:])\n logger.info('Docker task %s %s workdir %s',\n task.id, task.fsl_file, workdir)\n fslcmds = ['save_model(close)',\n \"chdir('{}')\".format(workdir)]\n \n with open(fslfile) as f:\n fslcmds += f.readlines()\n r = [json.loads(s)\n for s in self.container.send_fsl(\n '\\n'.join(fslcmds), publish_receive)[:2]]\n try:\n if r[0]['status'] == 'ok':\n task.status = 'C'\n else:\n task.status = 'X'\n except:\n task.status = 'X'\n logger.info(\"Finished %s\", r)\n if self.stoponend:\n self.container.quit()\n # lets hope that docker will always restart this container\n self.queue.task_done()", "def cli():\n while True:\n try:\n # Get the whole information on each edge.\n l_edge = list()\n s_rsc = '{}/edge'.format(etcdc.prefix)\n \n try:\n r = etcdc.read(s_rsc, recursive=True)\n except etcd.EtcdKeyNotFound as e:\n log.error(e)\n else:\n for child in r.children:\n l_app = list()\n d = ast.literal_eval(child.value)\n # get hosts\n print(PROJECT_ROOT + '/' + d['endpoint'])\n l_hosts = kube_list_node(PROJECT_ROOT + '/' + d['endpoint'])\n d['hosts'] = len(l_hosts)\n d_nodes = dict() # {'name': 'ip', ...}\n for item in l_hosts:\n d_nodes[item.metadata.name] = item.status.addresses[0].address\n # log.debug(d_nodes)\n # get # of tenants and apps\n l_tenants = get_tenant(d['name'])\n d['tenants'] = len(l_tenants)\n d['apps'] = 0\n for e in l_tenants:\n if 'app' in e:\n d['apps'] += len(e['app'])\n \n d['cpu'] = 0\n d['memory'] = 0\n i_total_cores = 0\n i_total_memory = 0\n i_total_storage = 0\n for h in l_hosts:\n i_total_cores += int(h.status.capacity['cpu'])\n i_total_memory += int(h.status.capacity['memory'].\n replace('Ki', ''))\n d['tot_cpu'] = i_total_cores\n d['tot_mem'] = int(i_total_memory / (1024*1024))\n \n # Get loadavg and free mem\n if d['name'] == 'edge1':\n ssh_server = 'harden.iorchard.co.kr'\n elif d['name'] == 'edge2':\n ssh_server = 'durant.iorchard.co.kr'\n RSC = 'ssh -p42544 {} get_rsc.sh'.format(ssh_server)\n (b_res, s_out) = cmd(RSC, 3, False)\n l = s_out.split(\"\\n\")\n d['used_cpu'] = (float(l[0]) + float(l[1]) + float(l[2]))\n avail_mem = (int(l[3]) + int(l[4]) + int(l[5])) / (1024*1024)\n d['used_mem'] = d['tot_mem'] - avail_mem\n d['cpu'] = int(d['used_cpu'] / d['tot_cpu'] * 100)\n d['memory'] = int(d['used_mem'] / d['tot_mem'] * 100)\n # ceph storage\n CEPH = \"kubectl --kubeconfig \" + PROJECT_ROOT + '/' \\\n + d['endpoint'] + \" -n rook-ceph exec -it \" \\\n + \"$(kubectl --kubeconfig \" + PROJECT_ROOT + '/' \\\n + d['endpoint'] + \" -n rook-ceph get po \" \\\n + \"-l app=rook-ceph-tools \" \\\n + \"-o jsonpath='{.items[0].metadata.name}') -- \" \\\n + \"ceph df --format json\"\n (b_res, s_out) = cmd(CEPH, 3, False)\n print(s_out)\n d['status'] = 'Healthy' if b_res else 'Unhealthy'\n d_stor = ast.literal_eval(s_out)\n d['tot_stor'] = int(d_stor['stats']['total_bytes'] / pow(1024, 3))\n d['used_stor'] = int(d_stor['stats']['total_used_bytes'] / pow(1024, 3))\n d['storage'] = int(d['used_stor'] / d['tot_stor'] * 100)\n # Update etcd status\n try:\n s = '{}/edge/{}'.format(etcdc.prefix,\n d['name'])\n # log.debug(d)\n etcdc.write(s, d, prevExist=True)\n except etcd.EtcdKeyNotFound as e:\n log.error(e)\n \n # Update app status\n s_app = '{}/app'.format(etcdc.prefix)\n try:\n r_app = etcdc.read(s_app, recursive=True)\n except etcd.EtcdKeyNotFound as e:\n log.error(e)\n else:\n for app_child in r_app.children:\n if app_child.value is not None:\n d_app = dict()\n app = ast.literal_eval(app_child.value)\n if app['edge'] == d['name']:\n d_app['name'] = app['name']\n d_app['username'] = GUAC_USER\n d_app['password'] = GUAC_PASS\n # Get catalog info.\n s_cat = '{}/catalog/{}'.format(etcdc.prefix,\n app['catalog'])\n try:\n r_cat = etcdc.read(s_cat)\n except etcd.EtcdKeyNotFound as e:\n log.error(e)\n else:\n cat = ast.literal_eval(r_cat.value)\n app['cat_type'] = cat['type']\n app['cat_name'] = cat['name']\n app['cat_logo'] = cat['logo']\n # Get app status\n if app['cat_type'] == 'vm':\n # first, look at DataVolume status of app.\n CMD = \"kubectl --kubeconfig \" + PROJECT_ROOT + '/' \\\n + d['endpoint'] + ' get dv ' \\\n + app['name'] \\\n + \" -o jsonpath='{range .status}{.phase},{.progress}{end}'\"\n (b_res, s_out) = cmd(CMD, 5, False)\n l_out = s_out.split(',')\n if l_out[0] == 'Succeeded':\n # Get vm status of app\n CMD = \"kubectl --kubeconfig \" + PROJECT_ROOT \\\n + '/' \\\n + d['endpoint'] + ' get vm ' \\\n + app['name'] \\\n + \" -o jsonpath='{.status.ready}'\"\n (b_res, s_out) = cmd(CMD, 5, False)\n if b_res and s_out == 'true':\n # update app status 'running'.\n app.update({'status': 'running'})\n \n if app['edge'] == d['name']:\n # Get where app is running.\n CMD = \"kubectl --kubeconfig \" \\\n + PROJECT_ROOT + '/' \\\n + d['endpoint'] + ' get vmi ' \\\n + app['name'] \\\n + \" -o jsonpath='{.status.nodeName}'\"\n (b_res, s_out) = cmd(CMD, 5, False)\n if b_res:\n d_app['hostname'] = d_nodes[s_out]\n # Get nodeport for app.\n CMD = \"kubectl --kubeconfig \" \\\n + PROJECT_ROOT + '/' \\\n + d['endpoint'] + ' get svc ' \\\n + app['name'] \\\n + \" -o jsonpath='{.spec.ports[0].nodePort}'\"\n (b_res, s_out) = cmd(CMD, 5, False)\n if b_res:\n d_app['port'] = s_out\n else:\n # update app status 'stopped'\n app.update({'status': 'stopped'})\n elif l_out[0] == 'ImportInProgress':\n # update app status 'building' and \n app.update({'status': 'building ({})'.format(l_out[1])})\n elif app['cat_type'] == 'container':\n app.update({'status': 'running'})\n \n try:\n s = '{}/app/{}'.format(etcdc.prefix,\n app['name'])\n # log.debug(app)\n etcdc.write(s, app, prevExist=True)\n except etcd.EtcdKeyNotFound as e:\n log.error(e)\n \n if 'port' in d_app:\n l_app.append(d_app)\n # render guac-config.j2 and copy it to guac broker server\n log.debug(l_app)\n template = env.get_template('broker.j2')\n s_out = template.render(l_app=l_app)\n s_tmp = '/tmp/{}.broker'.format(d['name'])\n try:\n with open(s_tmp, 'w') as f:\n f.write(s_out)\n except Exception as e:\n log.error(e)\n else:\n CMD = \"scp \" \\\n + \"-P42544 {} {}\".format(s_tmp, d['broker_ip']) \\\n + \":/etc/guacamole/noauth-config.xml\"\n log.debug(CMD)\n (b_res, s_out) = cmd(CMD, 5, False)\n if b_res:\n d_app['port'] = s_out\n \n l_edge.append(d)\n \n # log.debug(l_edge)\n log.debug(l_app)\n \n time.sleep(1)\n except:\n log.error('unknown error')", "def start(self):\n if self.isAlive == False:\n try:\n time.sleep(1)\n os.remove(os.path.join(self.inbox, 'stop_service.txt'))\n except:\n pass\n try:\n time.sleep(1)\n os.remove(os.path.join(self.inbox, 'ReadDirectoryChangesW.txt'))\n except:\n pass\n return\n \n serviceconfig.logger.debug('*** \"%s\": Starting the worker thread' % self.inbox)\n self.queue = Queue()\n t = Thread(target=self.worker)\n t.start()\n \n \"\"\"\n If files were dropped during the recovering process,\n we need to handle those files\n \"\"\"\n timer = Timer(1, self.triggerChangeEvent, kwargs={})\n timer.start()\n \n while self.isAlive:\n self.queue.put(win32file.ReadDirectoryChangesW (\n self.hDir,\n 1024,\n True,\n win32con.FILE_NOTIFY_CHANGE_FILE_NAME |\n win32con.FILE_NOTIFY_CHANGE_DIR_NAME |\n win32con.FILE_NOTIFY_CHANGE_ATTRIBUTES |\n win32con.FILE_NOTIFY_CHANGE_SIZE |\n win32con.FILE_NOTIFY_CHANGE_LAST_WRITE |\n win32con.FILE_NOTIFY_CHANGE_SECURITY,\n None,\n None\n ))\n self.queue.join()\n timer.join()\n \n \"\"\"\n Delete the stop_service.txt file generated by stopping the service\n \"\"\"\n try:\n os.remove(os.path.join(self.inbox, 'stop_service.txt'))\n except:\n pass", "def watch(self):\n wm = pyinotify.WatchManager()\n self.notifier = pyinotify.Notifier(wm, default_proc_fun=self.callback)\n wm.add_watch(self.directory, pyinotify.ALL_EVENTS)\n try:\n self.notifier.loop()\n except (KeyboardInterrupt, AttributeError):\n print_notification(\"Stopping\")\n finally:\n self.notifier.stop()\n self.terminate_processes()", "def run(self):\n self._logger.info(\"Locator started main loop\")\n self._running = True\n while self._running:\n self._update_locations()\n time.sleep(self.interval)", "def main():\n parser = create_parser()\n input_args = parser.parse_args()\n if not input_args:\n parser.print_usage()\n sys.exit(1)\n logger_initiate()\n start_time = datetime.datetime.now()\n logger_banner('Started', start_time)\n\n signal.signal(signal.SIGINT, signal_handler)\n signal.signal(signal.SIGTERM, signal_handler)\n logger.info('Searching directory=\"{0}\" ext=\"{1}\" interval=\"{2}\" text=\"{3}\" '.format(input_args.directory, input_args.ext, input_args.poll, input_args.magictext))\n while not exit_flag:\n try:\n find_files(input_args.directory,\n input_args.ext, input_args.magictext)\n except OSError as e:\n logger.error('Directory does not exist: {}'.format(e))\n except Exception as e:\n logger.error('Unknown/unhandled error: {}'.format(e))\n time.sleep(input_args.poll)\n\n total_time = datetime.datetime.now() - start_time\n logger_banner('Ended', total_time, start=False)", "def run(self):\n self.logger.debug(\"Resource manager main thread started\")\n\n while not self._stop_flag:\n try:\n self._handle_requests()\n self._accept_requests()\n\n except Exception as ex:\n self.logger.exception(\"Resource manager failed. \"\n \"Reason: %s\", ex)\n\n self.logger.debug(\"Resource manager thread is down\")", "def run(self):\n self._server = self._get_server()\n self._server.serve_forever()", "def poller(self):\n\n def watcher(watched_event):\n if watched_event.type and watched_event.path:\n msg = \"child changed, try to get master again. type %s, state %s, path %s.\" % (\n watched_event.type, watched_event.state, watched_event.path)\n logger.info(\"[ %s(%s) ] %s\" % (self.path, \"master\" if self.is_master else \"slave\", msg))\n self.workers = self.get_workers()\n logger.debug(\"poller call register start\")\n self.register_service()\n self.register_leadership()\n logger.debug(\"poller call register end\")\n\n try:\n children = self.zk.get_children(self.SERVICE_PATH, watcher)\n except:\n logger.error(traceback.format_exc())\n return\n logger.debug(\"current worker services are %s\" % children)" ]
[ "0.60920537", "0.60423386", "0.6004358", "0.59458005", "0.5935371", "0.5914737", "0.5896926", "0.58915985", "0.58893985", "0.5884968", "0.58668745", "0.5844063", "0.5844063", "0.58165354", "0.58041525", "0.58041525", "0.5799248", "0.57745546", "0.57614356", "0.5740617", "0.57405484", "0.57402986", "0.57360905", "0.5727484", "0.5718508", "0.5704776", "0.5692617", "0.5688287", "0.5687442", "0.5672505", "0.56697375", "0.56668895", "0.5666699", "0.5662442", "0.5653489", "0.5628617", "0.56220645", "0.56164676", "0.5605313", "0.55992633", "0.5586054", "0.5580198", "0.5572569", "0.55651265", "0.55564743", "0.5556074", "0.553353", "0.5515738", "0.55154485", "0.55138534", "0.550087", "0.5491553", "0.54870594", "0.5457861", "0.5455489", "0.5448777", "0.54449743", "0.54394644", "0.5432333", "0.54252386", "0.5422802", "0.54153985", "0.5411738", "0.5411731", "0.54116976", "0.54089", "0.5408869", "0.5407402", "0.53903466", "0.53840804", "0.53831017", "0.53774565", "0.53666306", "0.5359836", "0.53537405", "0.53506", "0.5347614", "0.5346474", "0.5346092", "0.533931", "0.5336037", "0.5327702", "0.53271246", "0.532636", "0.5316566", "0.53145176", "0.5300003", "0.5298409", "0.5287488", "0.5279", "0.5271327", "0.5250253", "0.5246852", "0.52319616", "0.5220001", "0.520945", "0.52046275", "0.5204424", "0.5202504", "0.5200084" ]
0.6893604
0
starting_position (x,y) tuple representing current_position
def init(starting_position, steering_noise, distance_noise, sonar_noise, measurement_noise, speed, turning_speed, gps_delay, execution_cpu_time_limit): raise NotImplementedError()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_start_loc(self) -> Tuple[int, int]:\n assert self.pos_marker\n return self.pos_marker.working_loc", "def set_starting_pos(self):\n if self.start and self.is_unoccupied(*self.start):\n self.current_pos = self.start[:]\n else:\n self.set_random_pos('starting')", "def get_pre_start_coordinate(self):\r\n if self.__orientation == Direction.VERTICAL:\r\n pre_start_coordinate = (self.__location[0] - 1,\r\n self.__location[1])\r\n if self.__orientation == Direction.HORIZONTAL:\r\n pre_start_coordinate = (self.__location[0],\r\n self.__location[1] - 1)\r\n return pre_start_coordinate", "def get_starting_coordinate(self):\n if type(self._starting_coordinate) != tuple:\n self._logger.write(\"Error! starting_coordinate should be a tupple\")\n elif self._starting_coordinate == None:\n #this is dead code\n self._logger.write(\"Error! starting_coordinate does not contain a value\")\n else:\n try:\n return self._starting_coordinate\n except Exception as e:\n self._logger.write(\"Error! could not get starting_coordinate: %s\" % e)", "def starting_position(self) -> aws_cdk.aws_lambda.StartingPosition:\n return self._values.get('starting_position')", "def starting_position(self) -> aws_cdk.aws_lambda.StartingPosition:\n return self._values.get('starting_position')", "def starting_position(self) -> aws_cdk.aws_lambda.StartingPosition:\n return self._values.get('starting_position')", "def get_current_position(self) -> Tuple[int, int]:\n return self.__row_position, self.__col_position", "def startingState(self):\n # Returns starting position and 4 false because no corners are visited yet\n return (self.startingPosition, (False, False, False, False))", "def next_pos(self):\n rotate = self.action - 1\n self.rotation = math.fmod(\n self.rotation + rotate * self.rotSpeed,360)\n self.xdir = math.cos( math.radians(self.rotation))\n self.ydir = math.sin( math.radians(self.rotation))\n self.oldx = self.x\n self.oldy = self.y\n x = self.x + self.xdir * self.speed\n y = self.y + self.ydir * self.speed\n #print(x,y)\n #if(x>21 or x < 0 or y>21 or y < 0):\n #print(\"dead\")\n return (x,y)\n # self.path.append((int(self.x),int(self.y)))", "def starting_position(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"starting_position\")", "def getPosition(self):\n return self.target, min(self.points), max(self.points)", "def last_pos(self) -> tuple[int, int]:\n if not self.actions:\n return (self.start_x, self.start_y)\n else:\n box = self.get_hitbox_at(self.time_consumed)\n return box.pos_x, box.pos_y", "def firstMove(self):\n return (10, 10)", "def coordinates_from(self, start):\r\n x, y, z = start\r\n return (\r\n x + self.x,\r\n y + self.y,\r\n z + self.z\r\n )", "def starting_position(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"starting_position\")", "def get_position(self):\n return (self.x_pos, self.y_pos)", "def start(self) -> global___Pos:", "def get_position(self, position):", "def getStartState(self):\n \"\"\" A state space can be the start coordinates and a list to hold visited corners\"\"\"\n return (self.startingPosition, [])\n # util.raiseNotDefined()", "def getPosition(self):\n\tleft = self.getLeft()\n\ttop = self.getTop()\n\treturn (left,top)", "def get_next_position(self):", "def get_pos(self):\n return (self.x, self.y)", "def grab_current_point(self):\n self.open_gripper(80)\n time.sleep(2.5)\n self.execute_action((0, 0, -10), self.GRAB_ORIENTATION)\n self.open_gripper(-30)\n time.sleep(2.5)\n self.execute_action((0, 0, 10), self.GRAB_ORIENTATION)\n time.sleep(2.5)\n self.initial_position = np.array(self.get_current_cartesian_position().position)\n print self.initial_position", "def get_starting_direction_vector(self):\n\n total_length = len(self.pixel_list)\n\n if total_length < 2:\n return None\n elif total_length < 15:\n delta_x = self.pixel_list[-1].x - self.pixel_list[0].x\n delta_y = self.pixel_list[-1].y - self.pixel_list[0].y\n return delta_y, delta_x\n else:\n delta_x = self.pixel_list[-15].x - self.pixel_list[0].x\n delta_y = self.pixel_list[-15].y - self.pixel_list[0].y\n return delta_y, delta_x", "def getPosition(self):\n return self.x", "def position(self):\n return self.x, self.y", "def relative_position(self) -> Tuple[int, int]:\n return self.position[0] - self.region.rect.x, self.position[1] - self.region.rect.y", "def relative_position(self) -> Tuple[int, int]:\n return self.position[0] - self.region.rect.x, self.position[1] - self.region.rect.y", "def move_start_node(self, x, y):", "def get_start_cell(self):\n return (self.st_row, self.st_col)", "def next_point(self, start_pos, goal_pos):\r\n\t\tself.shift = 0\r\n\t\tself.start_pos = start_pos\r\n\t\tself.goal_pos = goal_pos", "def to_position(self, x, y, i, j):\n return (x * self.SIZE + i, y * self.SIZE + j)", "def get_initial_point(self):\r\n if isinstance(self.pieces[0], LineSegment):\r\n return self.pieces[0].start", "def find_start_pose(self):\n\n # Find start position\n y,x = [k for k,v in self.mp.items() if v == 94 or v == 60 \\\n or v == 62 or v == 118][0]\n\n\n # Assign orientation\n dy,dx, theta = 0,0, 0\n if self.mp[y,x] == ord('^'): theta = np.pi/2\n elif mp[y,x] == ord('<'): theta = -np.pi\n elif mp[y,x] == ord('>'): theta = 0\n else: theta = -np.pi/2\n\n return y, x, theta", "def get_previous_position(self) -> Tuple[int, int]:\n return self.__previous_row_position, self.__previous_col_position", "def get_position(self) -> Tuple[int]:\n return self.position.copy()", "def initialCoordinates():\r\n return (-250,-250)", "def start(self, x, y):\n self.last_x = x\n if self.min_x is not None and x is not None:\n self.last_x = max(self.last_x, self.min_x)\n if self.max_x is not None and x is not None:\n self.last_x = min(self.last_x, self.max_x)\n self.last_y = y", "def store_current_position_as_previous(self):\n pos = self.get_current_position()\n self.previous_xloc = pos[0]\n self.previous_yloc = pos[1]\n self.previous_zloc = pos[2]\n return pos", "def player_location(self):\n x = 0\n y = 0\n for line in self.grid:\n for i in line:\n if i == \"P\":\n return x, y\n \n y+=1\n x += 1\n y = 0", "def get_pos(self) -> tuple:\n return self.pos", "def get_new_position(cls, position_x, position_y, direction):\n new_position_x = cls.calculate_position_x(position_x, direction)\n new_position_y = cls.calculate_position_y(position_y, direction)\n return new_position_x, new_position_y", "def get_location(self):\r\n return self.__x, self.__y", "def start_location(self) -> Point2:\n return self._game_info.player_start_location", "def set_start_coords(self, x:int, y:int) -> None:\r\n self.start_x = x\r\n self.start_y = y", "def get_base_pos_on_screen(self,position):\n\n return self.seq_xstart+float(position-1)*self.base_scale.get(),self.seq_row", "def get_position(self) -> typing.Tuple[int, int]:\n raise NotImplementedError", "def get_start_point_transform(self, start_x_in_cam_2=0, start_y_in_cam_2=0):\n corresponding_start_x_in_cam_1 = self.coord_transformer['B2F']['x'].predict([[start_x_in_cam_2]])[0][0]\n corresponding_start_y_in_cam_1 = self.coord_transformer['B2F']['y'].predict([[start_y_in_cam_2]])[0][0]\n # print(corresponding_start_x_in_cam_1,corresponding_start_y_in_cam_1)\n self.starting_point = [corresponding_start_x_in_cam_1, corresponding_start_y_in_cam_1]\n return self.starting_point", "def getPosition(*args):", "def getPosition(*args):", "def getPosition(*args):", "def getPosition(*args):", "def getPosition(*args):", "def getPosition(*args):", "def getPosition(*args):", "def getPosition(*args):", "def getPosition(*args):", "def getPosition(*args):", "def getPosition(*args):", "def get_start_point(self):\n return self.first_point", "def move_to(numbers, p_current, relative = False):\n if len(numbers) != 2:\n return None\n\n p_start = Point(numbers[0], numbers[1]) #first point\n if relative:\n p_start += p_current\n return p_start", "def position(self):\n return self._x, self._y", "def coord (i, j):\r\n return j, i", "def initial_position(self):\n\n if isinstance(self._initial_position, (list, tuple)):\n return self._initial_position\n if isinstance(self._initial_position, PositionAreaSampler):\n return self._initial_position.sample()\n\n return self._initial_position", "def get_pos(x, y):\r\n return normalize(x) // 2, normalize(y) // 4", "def GetPosition(self):\n ...", "def start(self) -> pos.Pos:\n return self.__start", "def getStartVertex(self):", "def getStartState(self):\n #return (self.position, self.food.copy())\n return self.position", "def get_pos(self, cx, cy):\n x = self.min_x + cx*(self.size+0.5)\n y = self.min_y + cy*(self.size+0.5)\n return (x,y)", "def position(self):\n return self.source.position + self.position_relative", "def get_min_position(self):\n raise NotImplementedError()", "def get_pos(self):\n return (self.x/3, 3**0.5*self.y/3, self.r/3)", "def event_starting_point_extractor(row) -> int:\n to_return = None\n # First, define the variables that we will need for the rest of this\n # function.\n positions_list = literal_eval(row[\"positions\"])\n assert isinstance(positions_list, list)\n assert 1 <= len(positions_list) <= 2\n\n # Next, extract the starting and ending positions.\n raw_starting_x = positions_list[0].get(\"x\")\n raw_starting_y = positions_list[0].get(\"y\")\n\n starting_x = (raw_starting_x/100)*104\n starting_y = (raw_starting_y/100)*68\n\n # Finally, validate and return the result.\n to_return = [starting_x, starting_y]\n\n return to_return", "def nextPositionOffset(self):\n if self.dir == \"N\":\n return (0, -1)\n elif self.dir == \"S\":\n return (0, 1)\n elif self.dir == \"E\":\n return (1, 0)\n elif self.dir == \"W\":\n return (-1, 0)\n else:\n raise TypeError(\"invalid direction '%s'\" % self.dir)", "def position(self) -> Tuple[int, int]:\n return self.row, self.col", "def backtrack_to_start(board, end):\r\n cell = board.at(end)\r\n # print(cell)\r\n path = []\r\n lis = []\r\n while cell != None:\r\n path.append(cell)\r\n cell = cell.path_from\r\n for i in path[-1:]:\r\n for j in i.position:\r\n lis.append(j)\r\n next_move = lis[-4:-2]\r\n\r\n return next_move", "def start_point(self) -> Vec3:\n v = list(self.vertices([self.dxf.start_angle]))\n return v[0]", "def move_origin(self, x, y):\n return Position(self.x - x, self.y - y)", "def position(self, x, y):\n self.x = x \n self.y = y\n self.pos[0] = x \n self.pos[1] = y", "def _coord(self, x, y):\n gridEdge = 7 # originally 5\n y = gridEdge - y\n cx = 100 * (x - 1) + 50\n cy = 100 * (y - 1) + 50\n r = 20\n return (cx - r, cy - r, cx + r, cy + r)", "def get_position(self):\n position = (self.position_x * SPRITE_SIZE, self.position_y * SPRITE_SIZE)\n return position", "def get_new_position(row_delta, column_delta):\n new_row = start_row - row_delta\n new_column = start_column + column_delta\n return new_row, new_column", "def origin(self):\n return (self._x_origin, self._y_origin)", "def get_pick_position(self):\n x0 = int(self.GetPickX1())\n x1 = int(self.GetPickX2())\n y0 = int(self.GetPickY1())\n y1 = int(self.GetPickY2())\n return x0, y0, x1, y1", "def getPos(self):\n return self.__current_pos", "def min_position(self):\n raise NotImplementedError", "def get_position(self): # maybe encoded in filepath at some point\n result = (self.iter * self.row_step)% self.row_size, self.iter // (self.row_size * self.row_step)* self.col_step\n self.iter += 1\n return result", "def position(self):\r\n pass", "def get_pos(self) -> tuple:\n return self.rect.center", "def getNewLocation(self, currentLocation, directionalMovement):\n x = currentLocation[0] + directionalMovement[0]\n y = currentLocation[1] + directionalMovement[1]\n return (x, y)", "def start(self, x, y):\n self.last_x = x\n self.last_y = y\n self.aperture_id = None", "def get_cursor_pos(self):\n return (self.text_maker.pos[0] + 9, self.text_maker.pos[1] + 120 + 8)", "def get_pos(self):\r\n return self.pos", "def position(self):\n return self._position", "def origin(self):\r\n\r\n return self.ox, self.oy, self.oz", "def position(self):\n return (self.__position)", "def _get_pos(self):\n return self._pos", "def get_start_plus_coordinate(self):\r\n if self.__orientation == Direction.VERTICAL:\r\n start_plus_coordinate = (self.__location[0] + 1,\r\n self.__location[1])\r\n if self.__orientation == Direction.HORIZONTAL:\r\n start_plus_coordinate = (self.__location[0],\r\n self.__location[1] + 1)\r\n return start_plus_coordinate", "def position_index(x, y):\r\n position_action_idx = x + y*8\r\n return position_action_idx" ]
[ "0.7147887", "0.7056669", "0.69363606", "0.6935041", "0.69009805", "0.69009805", "0.69009805", "0.6888912", "0.6874065", "0.68649364", "0.6857859", "0.67927134", "0.67808557", "0.6730022", "0.67153525", "0.67080766", "0.6668199", "0.66649705", "0.66494197", "0.6641967", "0.66367376", "0.663565", "0.6633456", "0.66099876", "0.65679437", "0.6547187", "0.65338844", "0.6518865", "0.6518865", "0.65166104", "0.6515363", "0.65050745", "0.64811796", "0.64786774", "0.6470572", "0.6445557", "0.6431633", "0.64171094", "0.6400986", "0.6399733", "0.6393386", "0.63913757", "0.63614774", "0.6338203", "0.6338065", "0.63359916", "0.6334207", "0.63291806", "0.6327436", "0.6326284", "0.6326284", "0.6326284", "0.6326284", "0.6326284", "0.6326284", "0.6326284", "0.6326284", "0.6326284", "0.6326284", "0.6326284", "0.63258165", "0.6324089", "0.6313078", "0.6305599", "0.6291986", "0.6284298", "0.62784624", "0.6274491", "0.6272009", "0.62710893", "0.6270739", "0.62600493", "0.62594026", "0.6248937", "0.6247428", "0.6245478", "0.62412155", "0.62361366", "0.6233441", "0.62323236", "0.622736", "0.62265354", "0.6218678", "0.62040895", "0.6198206", "0.6178935", "0.6163388", "0.6159394", "0.6159247", "0.61573744", "0.6144502", "0.61417353", "0.6137581", "0.6133733", "0.61170644", "0.60977554", "0.6096897", "0.60940707", "0.6091636", "0.60904026", "0.6086123" ]
0.0
-1
React to sensory data
def on_sense_sonar(self, dist): raise NotImplementedError()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def update_data():\n pass", "def data_changed(self):\n return", "def update_data(self):\n # Just set data_changed, the component should do the rest.\n self.data_changed = True", "def update(self):", "def update(self):", "def update(self):", "def manage_info():", "def updateWidget(self):\n pass", "def update_has_data(self):\n self.main()", "def update(self):\r\n pass", "def store(self):\n\n pass", "def slot_history_changed(self, _sender, _data):\r\n self.change_type = TYPE_HISTORY\r\n self.do_paint()\r\n self.change_type = None", "def update_state(self):\n if self._coordinator.data:\n # get consumption value\n value_list = self._coordinator.data['values']\n values = [v['value'] for v in value_list]\n self._state = f\"{sum(values):.2f}\"", "def update(self):\n pass", "def update(self):\n pass", "def update(self):\n pass", "def update(self):\n pass", "def update(self):\n pass", "def update(self):\n pass", "def update(self):\n pass", "def update(self):\n pass", "def update(self):\n pass", "def update(self):\n pass", "def update(self):\n pass", "def update(self):\n pass", "def update(self):\n pass", "def update(self):\n pass", "def update(self):\n pass", "def update_state(self, context):\n pass", "def update_list_view(self):\n self.model.dataChanged.emit(self.model.index(0, 1),\n self.model.index(len(self.model.data_list), 1))\n #self.pBar.setValue(localization.localizationProgress() * 100)", "def show_data():", "def update_original_data(self):\n pass", "def store_state(self):\n if self.is_main:\n super(AbstractFlowComponent, self).store_state()", "def update_count(self):\n pass # Do nothing", "def _update(self):\n pass", "def onUpdated(self):", "def _idx_changed(self, idx):\n self.refresh_memory()", "def save_state(self):\n pass", "def view(self):", "def dashboard():", "def update_count(self):\n pass", "def compt_afficher(self):\n self.distrib.distrib_state = Menu(self.distrib)", "def update(self):\n if self.size() < 2: return\n idx = random.randint(0, 100) % 3\n if idx < 2:\n slot = self.slots[idx]\n if slot.get_state() == Slot.CLEAN:\n slot.set_state(Slot.DIRTY)\n # self.slots[idx] = slot", "def __handle_view_item(self, gamestate_component):", "def handle_panel_update(self, section_dict):", "def render(self):", "def empty_clicked(self, widget):\n\n\t\tself.liststore.clear()\n\t\tself.update_keyword_usage()\n\t\tself.update_notebook()", "def data(self):", "def refresh(self):\n pass", "def refresh(self):\n pass", "def _update_editor(self):\n root = self.model.data_list\n root.append(RowModel(name='', value=''))\n del root[-1]", "def save_data(self):\n pass", "def refreshView(self):\n chldn = self.tDisp.get_children()\n for chld in chldn:\n self.tDisp.delete(chld)\n if len(self.conflict.infeasibles) > 0:\n self.conflict.recalculateFeasibleStates()\n for infeas in self.conflict.infeasibles:\n key = infeas.name\n self.tDisp.insert('', 'end', key, text=key)\n self.tDisp.set(key, 'state', key)\n self.tDisp.set(key, 'stDes', str(2**(key.count('-'))))\n self.tDisp.set(key, 'stRem', str(infeas.statesRemoved))", "def _dirty (self):\n pass", "def handle_item_consumption(self):\n self.tooltip_focus = None\n self.active_item_index = None\n self.refresh_inventory()\n self.refresh_equipment()", "def store(self, name, value):\n # ...but only when the context has been entered (and locks acquired etc.)\n if not self.ready:\n raise RuntimeError(\"SnapshotView is a context manager. Never use it directly!\")\n # Do not ask for permission - overwrite the old entry if necessary\n self.data[name] = value", "def update_journal(self):\n self.kittens_rescued += 1", "def on_edit(self, dataobj):", "def __init__(self):\n self.data = {}\n self.refresh()", "def refresh_screen(self):", "def serialize_dirty(self):\n pass", "def updateData(self):\n self.needsData.emit(self.property(\"number\"))", "def _update_state(self) -> None:\n raise NotImplementedError(\"\")", "def update(self):\n\n pass", "def updateCounter(self):\n self.counter = self.counter + 1\n self.syncDataStructure[\"+\"][str(self.instanceID)] = self.counter", "def update_controller(self):", "def saveData(self):\n pass", "def staged(self):\n return", "def update( ):\r\n pass", "def update(self):\n self.write_state(bytes([]))", "def on_collection_updated(self, obj, state, old_name):\n icon_files = {\n \"gerber\": self.app.resource_location + \"/flatcam_icon16.png\",\n \"excellon\": self.app.resource_location + \"/drill16.png\",\n \"cncjob\": self.app.resource_location + \"/cnc16.png\",\n \"geometry\": self.app.resource_location + \"/geometry16.png\",\n \"script\": self.app.resource_location + \"/script_new16.png\",\n \"document\": self.app.resource_location + \"/notes16_1.png\"\n }\n\n if state == 'append':\n for act in self.app.ui.menuobjects.actions():\n try:\n act.triggered.disconnect()\n except TypeError:\n pass\n self.app.ui.menuobjects.clear()\n\n gerber_list = []\n exc_list = []\n cncjob_list = []\n geo_list = []\n script_list = []\n doc_list = []\n\n for name in self.get_names():\n obj_named = self.get_by_name(name)\n if obj_named.kind == 'gerber':\n gerber_list.append(name)\n elif obj_named.kind == 'excellon':\n exc_list.append(name)\n elif obj_named.kind == 'cncjob':\n cncjob_list.append(name)\n elif obj_named.kind == 'geometry':\n geo_list.append(name)\n elif obj_named.kind == 'script':\n script_list.append(name)\n elif obj_named.kind == 'document':\n doc_list.append(name)\n\n def add_act(o_name):\n obj_for_icon = self.get_by_name(o_name)\n menu_action = QtWidgets.QAction(parent=self.app.ui.menuobjects)\n menu_action.setCheckable(True)\n menu_action.setText(o_name)\n menu_action.setIcon(QtGui.QIcon(icon_files[obj_for_icon.kind]))\n menu_action.triggered.connect(\n lambda: self.set_active(o_name) if menu_action.isChecked() is True else\n self.set_inactive(o_name))\n self.app.ui.menuobjects.addAction(menu_action)\n\n for name in gerber_list:\n add_act(name)\n self.app.ui.menuobjects.addSeparator()\n\n for name in exc_list:\n add_act(name)\n self.app.ui.menuobjects.addSeparator()\n\n for name in cncjob_list:\n add_act(name)\n self.app.ui.menuobjects.addSeparator()\n\n for name in geo_list:\n add_act(name)\n self.app.ui.menuobjects.addSeparator()\n\n for name in script_list:\n add_act(name)\n self.app.ui.menuobjects.addSeparator()\n\n for name in doc_list:\n add_act(name)\n\n self.app.ui.menuobjects.addSeparator()\n self.app.ui.menuobjects_selall = self.app.ui.menuobjects.addAction(\n QtGui.QIcon(self.app.resource_location + '/select_all.png'),\n _('Select All')\n )\n self.app.ui.menuobjects_unselall = self.app.ui.menuobjects.addAction(\n QtGui.QIcon(self.app.resource_location + '/deselect_all32.png'),\n _('Deselect All')\n )\n self.app.ui.menuobjects_selall.triggered.connect(lambda: self.on_objects_selection(True))\n self.app.ui.menuobjects_unselall.triggered.connect(lambda: self.on_objects_selection(False))\n\n elif state == 'delete':\n for act in self.app.ui.menuobjects.actions():\n if act.text() == obj.options['name']:\n try:\n act.triggered.disconnect()\n except TypeError:\n pass\n self.app.ui.menuobjects.removeAction(act)\n break\n elif state == 'rename':\n for act in self.app.ui.menuobjects.actions():\n if act.text() == old_name:\n add_action = QtWidgets.QAction(parent=self.app.ui.menuobjects)\n add_action.setText(obj.options['name'])\n add_action.setIcon(QtGui.QIcon(icon_files[obj.kind]))\n add_action.triggered.connect(\n lambda: self.set_active(obj.options['name']) if add_action.isChecked() is True else\n self.set_inactive(obj.options['name']))\n\n self.app.ui.menuobjects.insertAction(act, add_action)\n\n try:\n act.triggered.disconnect()\n except TypeError:\n pass\n self.app.ui.menuobjects.removeAction(act)\n break\n elif state == 'delete_all':\n for act in self.app.ui.menuobjects.actions():\n try:\n act.triggered.disconnect()\n except TypeError:\n pass\n self.app.ui.menuobjects.clear()\n\n self.app.ui.menuobjects.addSeparator()\n self.app.ui.menuobjects_selall = self.app.ui.menuobjects.addAction(\n QtGui.QIcon(self.app.resource_location + '/select_all.png'),\n _('Select All')\n )\n self.app.ui.menuobjects_unselall = self.app.ui.menuobjects.addAction(\n QtGui.QIcon(self.app.resource_location + '/deselect_all32.png'),\n _('Deselect All')\n )\n self.app.ui.menuobjects_selall.triggered.connect(lambda: self.on_objects_selection(True))\n self.app.ui.menuobjects_unselall.triggered.connect(lambda: self.on_objects_selection(False))", "def __display(self) -> None:\n ligne = 0\n for key, value in self.values.items():\n self.my_data(self.master, key, value, ligne, 0, 1, 1, 2, 2)\n ligne += 2", "def collect_data(self,sensation,action,reward,next_sensation):\n pass", "def update_info(self):\n self.execution_status_widget.update()\n self.execution_info_widget.update()\n self.cluster_widget.update() # update the cluster info even if it is not being displayed\n self.details.original_widget.update()", "def __setstate__(self, state):\n super().__setstate__(state)\n\n self.annotations = SortedList(self.annotations)\n self.links = SortedList(self.links)\n self.groups = SortedList(self.groups)\n self.generics = SortedList(self.generics)\n\n self.index = DataIndex()\n self.index.update_basic_index(list(self.annotations))\n self.index.update_basic_index(list(self.links))\n self.index.update_basic_index(list(self.groups))\n self.index.update_basic_index(list(self.generics))\n\n for a in self.annotations:\n a.set_pack(self)\n\n for a in self.links:\n a.set_pack(self)\n\n for a in self.groups:\n a.set_pack(self)\n\n for a in self.generics:\n a.set_pack(self)", "def show_database(self):\n self.db_window.update()\n self.db_window.deiconify()\n self.selected_glycan = None", "def state(self):\n pass", "def level_data(self):\n self.level(self.data)", "def sync_widgets(self):\n self.data_changed.emit(self.value)", "def flush(self):\n super().flush()\n self.dists = {}", "def __call__(self):\n self.brain._update_fscale(self.factor)\n for key in self.brain.keys:\n if self.widgets[key] is not None:\n self.widgets[key].set_value(self.brain._data[key])", "def on_draw(self):\n self.clear()\n self.manager.draw()", "def actuator_environment(self):\n self.INPUT_DATA_SIZE = 23\n self.pushButton_reload.setEnabled(0)\n self.listWidget_link.setEnabled(0)\n self.listWidget_link.clear()", "def updateControl(self, event):\r\n print(\"updating...\")\r\n product_dict = [\r\n {\"title\":\"Core Python Programming\", \"author\":\"Wesley Chun\",\r\n \"isbn\":\"0132269937\", \"mfg\":\"Prentice Hall\"},\r\n {\"title\":\"Python Programming for the Absolute Beginner\",\r\n \"author\":\"Michael Dawson\", \"isbn\":\"1598631128\",\r\n \"mfg\":\"Course Technology\"},\r\n {\"title\":\"Learning Python\", \"author\":\"Mark Lutz\",\r\n \"isbn\":\"0596513984\", \"mfg\":\"O'Reilly\"}\r\n ]\r\n data = self.products + product_dict\r\n self.dataOlv.SetObjects(data)", "def save_shelf(self, shelf_name, data):\r\n shelf_path = os.path.join(self.full_dir, shelf_name)\r\n with shelve.open(shelf_path, 'c') as shelf:\r\n shelf['data'] = data", "def on_refresh(self):\n pass", "def update():", "def update():", "def update_counter(self, counter, entity):", "def erase(self):\n\tself.state={}\n\tself.display(update_board=0)", "def dummy():\n\t\t\tself.edit = True", "def siloview(self, silo):\n \n # Only authorized users can view state information.\n # Should this be restricted to admins and managers only, or shoud users too be able to see this information?\n # Going with restricting this information to admins and managers \n if not ag.granary.issilo(silo):\n abort(404)\n\n ident = request.environ.get('repoze.who.identity')\n if not ident:\n abort(401, \"Not Authorised\")\n silos = ag.authz(ident)\n if silo not in silos:\n abort(403, \"Forbidden\")\n silos_admin = ag.authz(ident, permission='administrator')\n silos_manager = ag.authz(ident, permission='manager')\n #if not ident.get('role') in [\"admin\", \"manager\"]:\n if not (silo in silos_admin or silo in silos_manager):\n abort(403, \"Forbidden. You should be an administrator or manager to view this information\")\n\n options = request.GET\n start = 0\n if 'start' in options and options['start']:\n try:\n start = int(options['start'])\n except:\n start = 0\n rows = 1000\n if 'rows' in options and options['rows']:\n try:\n rows = int(options['rows'])\n except:\n rows = 1000\n\n rdfsilo = ag.granary.get_rdf_silo(silo)\n state_info = ag.granary.describe_silo(silo)\n state_info['silo'] = silo\n state_info['uri_base'] = ''\n if rdfsilo.state and rdfsilo.state['uri_base']:\n state_info['uri_base'] = rdfsilo.state['uri_base']\n state_info['number of data packages'] = get_datasets_count(silo)\n state_info['params'] = {'start':start, 'rows':rows}\n items = {}\n #for item in rdfsilo.list_items():\n for item in get_datasets(silo, start=start, rows=rows):\n items[item] = {}\n try:\n items[item]['embargo_info'] = is_embargoed(rdfsilo, item)\n except:\n pass\n state_info['datasets'] = items\n\n # conneg return\n # Always return application/json\n response.content_type = 'application/json; charset=\"UTF-8\"'\n response.status_int = 200\n response.status = \"200 OK\"\n return simplejson.dumps(state_info)", "def show(self):", "def update(self):\n raise NotImplementedError", "def info_cache(self):\n self.info.info()\n self.dataset.info()\n self.category.info()", "def update(self) -> None:\n ...", "def hook_displaymovediagnostics(self,statsDict):\n ui.plotmovediagnostics(statsDict)", "def refresh(self):\n ida_strlist.build_strlist()\n self.size = ida_strlist.get_strlist_qty()", "def dashboard(self):\r\n return {}", "def storeState(self):\n\n self.action_history[self.trial] = self.action\n self.ball_history[self.trial] = self.ballcolor", "def _save_data(self):\n super()._save_data()\n if self.data:\n self.state['inserted_elements'] = len(SeaLevelRiseMeasure.objects.bulk_create(self.data))\n self.logger.info('Successfully saved %d elements.' % self.state['inserted_elements'])\n else:\n self.logger.info('No elements were saved because no elements were available.')\n self.data = None" ]
[ "0.5340235", "0.5297468", "0.5195286", "0.51405054", "0.51405054", "0.51405054", "0.50970787", "0.497346", "0.497207", "0.49673796", "0.49305034", "0.48768204", "0.48723578", "0.485973", "0.485973", "0.485973", "0.485973", "0.485973", "0.485973", "0.485973", "0.485973", "0.485973", "0.485973", "0.485973", "0.485973", "0.485973", "0.485973", "0.485973", "0.48582897", "0.48493633", "0.48464382", "0.48293942", "0.47801942", "0.47800055", "0.47781074", "0.4773682", "0.4759466", "0.4746492", "0.47401872", "0.4733624", "0.47297472", "0.47281575", "0.47272882", "0.47178885", "0.47159243", "0.47118822", "0.4709183", "0.47050005", "0.46999052", "0.46999052", "0.46989226", "0.46987352", "0.46944842", "0.4691398", "0.46895432", "0.4686693", "0.46783212", "0.4676414", "0.4671187", "0.46655494", "0.46619755", "0.46592504", "0.46476865", "0.46424392", "0.46423286", "0.46402004", "0.46184713", "0.46179286", "0.46130913", "0.46097252", "0.4605658", "0.45988506", "0.4598012", "0.45857573", "0.45851767", "0.45848072", "0.45836633", "0.45763367", "0.4576316", "0.45661357", "0.45600298", "0.4545866", "0.45452142", "0.45422128", "0.45409593", "0.45382905", "0.45348862", "0.45348862", "0.45345965", "0.45342913", "0.45316646", "0.45152304", "0.45134577", "0.45061305", "0.45035282", "0.4503415", "0.45028177", "0.4502631", "0.449915", "0.449779", "0.44957313" ]
0.0
-1
React to sensory data
def on_sense_field(self, field_type, field_parameter): raise NotImplementedError()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def update_data():\n pass", "def data_changed(self):\n return", "def update_data(self):\n # Just set data_changed, the component should do the rest.\n self.data_changed = True", "def update(self):", "def update(self):", "def update(self):", "def manage_info():", "def updateWidget(self):\n pass", "def update_has_data(self):\n self.main()", "def update(self):\r\n pass", "def store(self):\n\n pass", "def slot_history_changed(self, _sender, _data):\r\n self.change_type = TYPE_HISTORY\r\n self.do_paint()\r\n self.change_type = None", "def update_state(self):\n if self._coordinator.data:\n # get consumption value\n value_list = self._coordinator.data['values']\n values = [v['value'] for v in value_list]\n self._state = f\"{sum(values):.2f}\"", "def update(self):\n pass", "def update(self):\n pass", "def update(self):\n pass", "def update(self):\n pass", "def update(self):\n pass", "def update(self):\n pass", "def update(self):\n pass", "def update(self):\n pass", "def update(self):\n pass", "def update(self):\n pass", "def update(self):\n pass", "def update(self):\n pass", "def update(self):\n pass", "def update(self):\n pass", "def update(self):\n pass", "def update_state(self, context):\n pass", "def update_list_view(self):\n self.model.dataChanged.emit(self.model.index(0, 1),\n self.model.index(len(self.model.data_list), 1))\n #self.pBar.setValue(localization.localizationProgress() * 100)", "def show_data():", "def update_original_data(self):\n pass", "def store_state(self):\n if self.is_main:\n super(AbstractFlowComponent, self).store_state()", "def update_count(self):\n pass # Do nothing", "def _update(self):\n pass", "def onUpdated(self):", "def _idx_changed(self, idx):\n self.refresh_memory()", "def save_state(self):\n pass", "def view(self):", "def dashboard():", "def update_count(self):\n pass", "def compt_afficher(self):\n self.distrib.distrib_state = Menu(self.distrib)", "def update(self):\n if self.size() < 2: return\n idx = random.randint(0, 100) % 3\n if idx < 2:\n slot = self.slots[idx]\n if slot.get_state() == Slot.CLEAN:\n slot.set_state(Slot.DIRTY)\n # self.slots[idx] = slot", "def __handle_view_item(self, gamestate_component):", "def handle_panel_update(self, section_dict):", "def render(self):", "def empty_clicked(self, widget):\n\n\t\tself.liststore.clear()\n\t\tself.update_keyword_usage()\n\t\tself.update_notebook()", "def data(self):", "def refresh(self):\n pass", "def refresh(self):\n pass", "def _update_editor(self):\n root = self.model.data_list\n root.append(RowModel(name='', value=''))\n del root[-1]", "def save_data(self):\n pass", "def refreshView(self):\n chldn = self.tDisp.get_children()\n for chld in chldn:\n self.tDisp.delete(chld)\n if len(self.conflict.infeasibles) > 0:\n self.conflict.recalculateFeasibleStates()\n for infeas in self.conflict.infeasibles:\n key = infeas.name\n self.tDisp.insert('', 'end', key, text=key)\n self.tDisp.set(key, 'state', key)\n self.tDisp.set(key, 'stDes', str(2**(key.count('-'))))\n self.tDisp.set(key, 'stRem', str(infeas.statesRemoved))", "def _dirty (self):\n pass", "def handle_item_consumption(self):\n self.tooltip_focus = None\n self.active_item_index = None\n self.refresh_inventory()\n self.refresh_equipment()", "def store(self, name, value):\n # ...but only when the context has been entered (and locks acquired etc.)\n if not self.ready:\n raise RuntimeError(\"SnapshotView is a context manager. Never use it directly!\")\n # Do not ask for permission - overwrite the old entry if necessary\n self.data[name] = value", "def update_journal(self):\n self.kittens_rescued += 1", "def on_edit(self, dataobj):", "def __init__(self):\n self.data = {}\n self.refresh()", "def refresh_screen(self):", "def serialize_dirty(self):\n pass", "def updateData(self):\n self.needsData.emit(self.property(\"number\"))", "def _update_state(self) -> None:\n raise NotImplementedError(\"\")", "def update(self):\n\n pass", "def updateCounter(self):\n self.counter = self.counter + 1\n self.syncDataStructure[\"+\"][str(self.instanceID)] = self.counter", "def update_controller(self):", "def saveData(self):\n pass", "def staged(self):\n return", "def update( ):\r\n pass", "def update(self):\n self.write_state(bytes([]))", "def on_collection_updated(self, obj, state, old_name):\n icon_files = {\n \"gerber\": self.app.resource_location + \"/flatcam_icon16.png\",\n \"excellon\": self.app.resource_location + \"/drill16.png\",\n \"cncjob\": self.app.resource_location + \"/cnc16.png\",\n \"geometry\": self.app.resource_location + \"/geometry16.png\",\n \"script\": self.app.resource_location + \"/script_new16.png\",\n \"document\": self.app.resource_location + \"/notes16_1.png\"\n }\n\n if state == 'append':\n for act in self.app.ui.menuobjects.actions():\n try:\n act.triggered.disconnect()\n except TypeError:\n pass\n self.app.ui.menuobjects.clear()\n\n gerber_list = []\n exc_list = []\n cncjob_list = []\n geo_list = []\n script_list = []\n doc_list = []\n\n for name in self.get_names():\n obj_named = self.get_by_name(name)\n if obj_named.kind == 'gerber':\n gerber_list.append(name)\n elif obj_named.kind == 'excellon':\n exc_list.append(name)\n elif obj_named.kind == 'cncjob':\n cncjob_list.append(name)\n elif obj_named.kind == 'geometry':\n geo_list.append(name)\n elif obj_named.kind == 'script':\n script_list.append(name)\n elif obj_named.kind == 'document':\n doc_list.append(name)\n\n def add_act(o_name):\n obj_for_icon = self.get_by_name(o_name)\n menu_action = QtWidgets.QAction(parent=self.app.ui.menuobjects)\n menu_action.setCheckable(True)\n menu_action.setText(o_name)\n menu_action.setIcon(QtGui.QIcon(icon_files[obj_for_icon.kind]))\n menu_action.triggered.connect(\n lambda: self.set_active(o_name) if menu_action.isChecked() is True else\n self.set_inactive(o_name))\n self.app.ui.menuobjects.addAction(menu_action)\n\n for name in gerber_list:\n add_act(name)\n self.app.ui.menuobjects.addSeparator()\n\n for name in exc_list:\n add_act(name)\n self.app.ui.menuobjects.addSeparator()\n\n for name in cncjob_list:\n add_act(name)\n self.app.ui.menuobjects.addSeparator()\n\n for name in geo_list:\n add_act(name)\n self.app.ui.menuobjects.addSeparator()\n\n for name in script_list:\n add_act(name)\n self.app.ui.menuobjects.addSeparator()\n\n for name in doc_list:\n add_act(name)\n\n self.app.ui.menuobjects.addSeparator()\n self.app.ui.menuobjects_selall = self.app.ui.menuobjects.addAction(\n QtGui.QIcon(self.app.resource_location + '/select_all.png'),\n _('Select All')\n )\n self.app.ui.menuobjects_unselall = self.app.ui.menuobjects.addAction(\n QtGui.QIcon(self.app.resource_location + '/deselect_all32.png'),\n _('Deselect All')\n )\n self.app.ui.menuobjects_selall.triggered.connect(lambda: self.on_objects_selection(True))\n self.app.ui.menuobjects_unselall.triggered.connect(lambda: self.on_objects_selection(False))\n\n elif state == 'delete':\n for act in self.app.ui.menuobjects.actions():\n if act.text() == obj.options['name']:\n try:\n act.triggered.disconnect()\n except TypeError:\n pass\n self.app.ui.menuobjects.removeAction(act)\n break\n elif state == 'rename':\n for act in self.app.ui.menuobjects.actions():\n if act.text() == old_name:\n add_action = QtWidgets.QAction(parent=self.app.ui.menuobjects)\n add_action.setText(obj.options['name'])\n add_action.setIcon(QtGui.QIcon(icon_files[obj.kind]))\n add_action.triggered.connect(\n lambda: self.set_active(obj.options['name']) if add_action.isChecked() is True else\n self.set_inactive(obj.options['name']))\n\n self.app.ui.menuobjects.insertAction(act, add_action)\n\n try:\n act.triggered.disconnect()\n except TypeError:\n pass\n self.app.ui.menuobjects.removeAction(act)\n break\n elif state == 'delete_all':\n for act in self.app.ui.menuobjects.actions():\n try:\n act.triggered.disconnect()\n except TypeError:\n pass\n self.app.ui.menuobjects.clear()\n\n self.app.ui.menuobjects.addSeparator()\n self.app.ui.menuobjects_selall = self.app.ui.menuobjects.addAction(\n QtGui.QIcon(self.app.resource_location + '/select_all.png'),\n _('Select All')\n )\n self.app.ui.menuobjects_unselall = self.app.ui.menuobjects.addAction(\n QtGui.QIcon(self.app.resource_location + '/deselect_all32.png'),\n _('Deselect All')\n )\n self.app.ui.menuobjects_selall.triggered.connect(lambda: self.on_objects_selection(True))\n self.app.ui.menuobjects_unselall.triggered.connect(lambda: self.on_objects_selection(False))", "def __display(self) -> None:\n ligne = 0\n for key, value in self.values.items():\n self.my_data(self.master, key, value, ligne, 0, 1, 1, 2, 2)\n ligne += 2", "def collect_data(self,sensation,action,reward,next_sensation):\n pass", "def update_info(self):\n self.execution_status_widget.update()\n self.execution_info_widget.update()\n self.cluster_widget.update() # update the cluster info even if it is not being displayed\n self.details.original_widget.update()", "def __setstate__(self, state):\n super().__setstate__(state)\n\n self.annotations = SortedList(self.annotations)\n self.links = SortedList(self.links)\n self.groups = SortedList(self.groups)\n self.generics = SortedList(self.generics)\n\n self.index = DataIndex()\n self.index.update_basic_index(list(self.annotations))\n self.index.update_basic_index(list(self.links))\n self.index.update_basic_index(list(self.groups))\n self.index.update_basic_index(list(self.generics))\n\n for a in self.annotations:\n a.set_pack(self)\n\n for a in self.links:\n a.set_pack(self)\n\n for a in self.groups:\n a.set_pack(self)\n\n for a in self.generics:\n a.set_pack(self)", "def show_database(self):\n self.db_window.update()\n self.db_window.deiconify()\n self.selected_glycan = None", "def state(self):\n pass", "def level_data(self):\n self.level(self.data)", "def sync_widgets(self):\n self.data_changed.emit(self.value)", "def flush(self):\n super().flush()\n self.dists = {}", "def __call__(self):\n self.brain._update_fscale(self.factor)\n for key in self.brain.keys:\n if self.widgets[key] is not None:\n self.widgets[key].set_value(self.brain._data[key])", "def on_draw(self):\n self.clear()\n self.manager.draw()", "def actuator_environment(self):\n self.INPUT_DATA_SIZE = 23\n self.pushButton_reload.setEnabled(0)\n self.listWidget_link.setEnabled(0)\n self.listWidget_link.clear()", "def updateControl(self, event):\r\n print(\"updating...\")\r\n product_dict = [\r\n {\"title\":\"Core Python Programming\", \"author\":\"Wesley Chun\",\r\n \"isbn\":\"0132269937\", \"mfg\":\"Prentice Hall\"},\r\n {\"title\":\"Python Programming for the Absolute Beginner\",\r\n \"author\":\"Michael Dawson\", \"isbn\":\"1598631128\",\r\n \"mfg\":\"Course Technology\"},\r\n {\"title\":\"Learning Python\", \"author\":\"Mark Lutz\",\r\n \"isbn\":\"0596513984\", \"mfg\":\"O'Reilly\"}\r\n ]\r\n data = self.products + product_dict\r\n self.dataOlv.SetObjects(data)", "def save_shelf(self, shelf_name, data):\r\n shelf_path = os.path.join(self.full_dir, shelf_name)\r\n with shelve.open(shelf_path, 'c') as shelf:\r\n shelf['data'] = data", "def on_refresh(self):\n pass", "def update():", "def update():", "def update_counter(self, counter, entity):", "def erase(self):\n\tself.state={}\n\tself.display(update_board=0)", "def dummy():\n\t\t\tself.edit = True", "def siloview(self, silo):\n \n # Only authorized users can view state information.\n # Should this be restricted to admins and managers only, or shoud users too be able to see this information?\n # Going with restricting this information to admins and managers \n if not ag.granary.issilo(silo):\n abort(404)\n\n ident = request.environ.get('repoze.who.identity')\n if not ident:\n abort(401, \"Not Authorised\")\n silos = ag.authz(ident)\n if silo not in silos:\n abort(403, \"Forbidden\")\n silos_admin = ag.authz(ident, permission='administrator')\n silos_manager = ag.authz(ident, permission='manager')\n #if not ident.get('role') in [\"admin\", \"manager\"]:\n if not (silo in silos_admin or silo in silos_manager):\n abort(403, \"Forbidden. You should be an administrator or manager to view this information\")\n\n options = request.GET\n start = 0\n if 'start' in options and options['start']:\n try:\n start = int(options['start'])\n except:\n start = 0\n rows = 1000\n if 'rows' in options and options['rows']:\n try:\n rows = int(options['rows'])\n except:\n rows = 1000\n\n rdfsilo = ag.granary.get_rdf_silo(silo)\n state_info = ag.granary.describe_silo(silo)\n state_info['silo'] = silo\n state_info['uri_base'] = ''\n if rdfsilo.state and rdfsilo.state['uri_base']:\n state_info['uri_base'] = rdfsilo.state['uri_base']\n state_info['number of data packages'] = get_datasets_count(silo)\n state_info['params'] = {'start':start, 'rows':rows}\n items = {}\n #for item in rdfsilo.list_items():\n for item in get_datasets(silo, start=start, rows=rows):\n items[item] = {}\n try:\n items[item]['embargo_info'] = is_embargoed(rdfsilo, item)\n except:\n pass\n state_info['datasets'] = items\n\n # conneg return\n # Always return application/json\n response.content_type = 'application/json; charset=\"UTF-8\"'\n response.status_int = 200\n response.status = \"200 OK\"\n return simplejson.dumps(state_info)", "def show(self):", "def update(self):\n raise NotImplementedError", "def info_cache(self):\n self.info.info()\n self.dataset.info()\n self.category.info()", "def update(self) -> None:\n ...", "def hook_displaymovediagnostics(self,statsDict):\n ui.plotmovediagnostics(statsDict)", "def refresh(self):\n ida_strlist.build_strlist()\n self.size = ida_strlist.get_strlist_qty()", "def dashboard(self):\r\n return {}", "def storeState(self):\n\n self.action_history[self.trial] = self.action\n self.ball_history[self.trial] = self.ballcolor", "def _save_data(self):\n super()._save_data()\n if self.data:\n self.state['inserted_elements'] = len(SeaLevelRiseMeasure.objects.bulk_create(self.data))\n self.logger.info('Successfully saved %d elements.' % self.state['inserted_elements'])\n else:\n self.logger.info('No elements were saved because no elements were available.')\n self.data = None" ]
[ "0.5340235", "0.5297468", "0.5195286", "0.51405054", "0.51405054", "0.51405054", "0.50970787", "0.497346", "0.497207", "0.49673796", "0.49305034", "0.48768204", "0.48723578", "0.485973", "0.485973", "0.485973", "0.485973", "0.485973", "0.485973", "0.485973", "0.485973", "0.485973", "0.485973", "0.485973", "0.485973", "0.485973", "0.485973", "0.485973", "0.48582897", "0.48493633", "0.48464382", "0.48293942", "0.47801942", "0.47800055", "0.47781074", "0.4773682", "0.4759466", "0.4746492", "0.47401872", "0.4733624", "0.47297472", "0.47281575", "0.47272882", "0.47178885", "0.47159243", "0.47118822", "0.4709183", "0.47050005", "0.46999052", "0.46999052", "0.46989226", "0.46987352", "0.46944842", "0.4691398", "0.46895432", "0.4686693", "0.46783212", "0.4676414", "0.4671187", "0.46655494", "0.46619755", "0.46592504", "0.46476865", "0.46424392", "0.46423286", "0.46402004", "0.46184713", "0.46179286", "0.46130913", "0.46097252", "0.4605658", "0.45988506", "0.4598012", "0.45857573", "0.45851767", "0.45848072", "0.45836633", "0.45763367", "0.4576316", "0.45661357", "0.45600298", "0.4545866", "0.45452142", "0.45422128", "0.45409593", "0.45382905", "0.45348862", "0.45348862", "0.45345965", "0.45342913", "0.45316646", "0.45152304", "0.45134577", "0.45061305", "0.45035282", "0.4503415", "0.45028177", "0.4502631", "0.449915", "0.449779", "0.44957313" ]
0.0
-1
React to sensory data
def on_sense_gps(self, x, y): raise NotImplementedError()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def update_data():\n pass", "def data_changed(self):\n return", "def update_data(self):\n # Just set data_changed, the component should do the rest.\n self.data_changed = True", "def update(self):", "def update(self):", "def update(self):", "def manage_info():", "def updateWidget(self):\n pass", "def update_has_data(self):\n self.main()", "def update(self):\r\n pass", "def store(self):\n\n pass", "def slot_history_changed(self, _sender, _data):\r\n self.change_type = TYPE_HISTORY\r\n self.do_paint()\r\n self.change_type = None", "def update_state(self):\n if self._coordinator.data:\n # get consumption value\n value_list = self._coordinator.data['values']\n values = [v['value'] for v in value_list]\n self._state = f\"{sum(values):.2f}\"", "def update(self):\n pass", "def update(self):\n pass", "def update(self):\n pass", "def update(self):\n pass", "def update(self):\n pass", "def update(self):\n pass", "def update(self):\n pass", "def update(self):\n pass", "def update(self):\n pass", "def update(self):\n pass", "def update(self):\n pass", "def update(self):\n pass", "def update(self):\n pass", "def update(self):\n pass", "def update(self):\n pass", "def update_state(self, context):\n pass", "def update_list_view(self):\n self.model.dataChanged.emit(self.model.index(0, 1),\n self.model.index(len(self.model.data_list), 1))\n #self.pBar.setValue(localization.localizationProgress() * 100)", "def show_data():", "def update_original_data(self):\n pass", "def store_state(self):\n if self.is_main:\n super(AbstractFlowComponent, self).store_state()", "def update_count(self):\n pass # Do nothing", "def _update(self):\n pass", "def onUpdated(self):", "def _idx_changed(self, idx):\n self.refresh_memory()", "def save_state(self):\n pass", "def view(self):", "def dashboard():", "def update_count(self):\n pass", "def compt_afficher(self):\n self.distrib.distrib_state = Menu(self.distrib)", "def update(self):\n if self.size() < 2: return\n idx = random.randint(0, 100) % 3\n if idx < 2:\n slot = self.slots[idx]\n if slot.get_state() == Slot.CLEAN:\n slot.set_state(Slot.DIRTY)\n # self.slots[idx] = slot", "def __handle_view_item(self, gamestate_component):", "def handle_panel_update(self, section_dict):", "def render(self):", "def empty_clicked(self, widget):\n\n\t\tself.liststore.clear()\n\t\tself.update_keyword_usage()\n\t\tself.update_notebook()", "def data(self):", "def refresh(self):\n pass", "def refresh(self):\n pass", "def _update_editor(self):\n root = self.model.data_list\n root.append(RowModel(name='', value=''))\n del root[-1]", "def save_data(self):\n pass", "def refreshView(self):\n chldn = self.tDisp.get_children()\n for chld in chldn:\n self.tDisp.delete(chld)\n if len(self.conflict.infeasibles) > 0:\n self.conflict.recalculateFeasibleStates()\n for infeas in self.conflict.infeasibles:\n key = infeas.name\n self.tDisp.insert('', 'end', key, text=key)\n self.tDisp.set(key, 'state', key)\n self.tDisp.set(key, 'stDes', str(2**(key.count('-'))))\n self.tDisp.set(key, 'stRem', str(infeas.statesRemoved))", "def _dirty (self):\n pass", "def handle_item_consumption(self):\n self.tooltip_focus = None\n self.active_item_index = None\n self.refresh_inventory()\n self.refresh_equipment()", "def store(self, name, value):\n # ...but only when the context has been entered (and locks acquired etc.)\n if not self.ready:\n raise RuntimeError(\"SnapshotView is a context manager. Never use it directly!\")\n # Do not ask for permission - overwrite the old entry if necessary\n self.data[name] = value", "def update_journal(self):\n self.kittens_rescued += 1", "def on_edit(self, dataobj):", "def __init__(self):\n self.data = {}\n self.refresh()", "def refresh_screen(self):", "def serialize_dirty(self):\n pass", "def updateData(self):\n self.needsData.emit(self.property(\"number\"))", "def _update_state(self) -> None:\n raise NotImplementedError(\"\")", "def update(self):\n\n pass", "def updateCounter(self):\n self.counter = self.counter + 1\n self.syncDataStructure[\"+\"][str(self.instanceID)] = self.counter", "def update_controller(self):", "def saveData(self):\n pass", "def staged(self):\n return", "def update( ):\r\n pass", "def update(self):\n self.write_state(bytes([]))", "def on_collection_updated(self, obj, state, old_name):\n icon_files = {\n \"gerber\": self.app.resource_location + \"/flatcam_icon16.png\",\n \"excellon\": self.app.resource_location + \"/drill16.png\",\n \"cncjob\": self.app.resource_location + \"/cnc16.png\",\n \"geometry\": self.app.resource_location + \"/geometry16.png\",\n \"script\": self.app.resource_location + \"/script_new16.png\",\n \"document\": self.app.resource_location + \"/notes16_1.png\"\n }\n\n if state == 'append':\n for act in self.app.ui.menuobjects.actions():\n try:\n act.triggered.disconnect()\n except TypeError:\n pass\n self.app.ui.menuobjects.clear()\n\n gerber_list = []\n exc_list = []\n cncjob_list = []\n geo_list = []\n script_list = []\n doc_list = []\n\n for name in self.get_names():\n obj_named = self.get_by_name(name)\n if obj_named.kind == 'gerber':\n gerber_list.append(name)\n elif obj_named.kind == 'excellon':\n exc_list.append(name)\n elif obj_named.kind == 'cncjob':\n cncjob_list.append(name)\n elif obj_named.kind == 'geometry':\n geo_list.append(name)\n elif obj_named.kind == 'script':\n script_list.append(name)\n elif obj_named.kind == 'document':\n doc_list.append(name)\n\n def add_act(o_name):\n obj_for_icon = self.get_by_name(o_name)\n menu_action = QtWidgets.QAction(parent=self.app.ui.menuobjects)\n menu_action.setCheckable(True)\n menu_action.setText(o_name)\n menu_action.setIcon(QtGui.QIcon(icon_files[obj_for_icon.kind]))\n menu_action.triggered.connect(\n lambda: self.set_active(o_name) if menu_action.isChecked() is True else\n self.set_inactive(o_name))\n self.app.ui.menuobjects.addAction(menu_action)\n\n for name in gerber_list:\n add_act(name)\n self.app.ui.menuobjects.addSeparator()\n\n for name in exc_list:\n add_act(name)\n self.app.ui.menuobjects.addSeparator()\n\n for name in cncjob_list:\n add_act(name)\n self.app.ui.menuobjects.addSeparator()\n\n for name in geo_list:\n add_act(name)\n self.app.ui.menuobjects.addSeparator()\n\n for name in script_list:\n add_act(name)\n self.app.ui.menuobjects.addSeparator()\n\n for name in doc_list:\n add_act(name)\n\n self.app.ui.menuobjects.addSeparator()\n self.app.ui.menuobjects_selall = self.app.ui.menuobjects.addAction(\n QtGui.QIcon(self.app.resource_location + '/select_all.png'),\n _('Select All')\n )\n self.app.ui.menuobjects_unselall = self.app.ui.menuobjects.addAction(\n QtGui.QIcon(self.app.resource_location + '/deselect_all32.png'),\n _('Deselect All')\n )\n self.app.ui.menuobjects_selall.triggered.connect(lambda: self.on_objects_selection(True))\n self.app.ui.menuobjects_unselall.triggered.connect(lambda: self.on_objects_selection(False))\n\n elif state == 'delete':\n for act in self.app.ui.menuobjects.actions():\n if act.text() == obj.options['name']:\n try:\n act.triggered.disconnect()\n except TypeError:\n pass\n self.app.ui.menuobjects.removeAction(act)\n break\n elif state == 'rename':\n for act in self.app.ui.menuobjects.actions():\n if act.text() == old_name:\n add_action = QtWidgets.QAction(parent=self.app.ui.menuobjects)\n add_action.setText(obj.options['name'])\n add_action.setIcon(QtGui.QIcon(icon_files[obj.kind]))\n add_action.triggered.connect(\n lambda: self.set_active(obj.options['name']) if add_action.isChecked() is True else\n self.set_inactive(obj.options['name']))\n\n self.app.ui.menuobjects.insertAction(act, add_action)\n\n try:\n act.triggered.disconnect()\n except TypeError:\n pass\n self.app.ui.menuobjects.removeAction(act)\n break\n elif state == 'delete_all':\n for act in self.app.ui.menuobjects.actions():\n try:\n act.triggered.disconnect()\n except TypeError:\n pass\n self.app.ui.menuobjects.clear()\n\n self.app.ui.menuobjects.addSeparator()\n self.app.ui.menuobjects_selall = self.app.ui.menuobjects.addAction(\n QtGui.QIcon(self.app.resource_location + '/select_all.png'),\n _('Select All')\n )\n self.app.ui.menuobjects_unselall = self.app.ui.menuobjects.addAction(\n QtGui.QIcon(self.app.resource_location + '/deselect_all32.png'),\n _('Deselect All')\n )\n self.app.ui.menuobjects_selall.triggered.connect(lambda: self.on_objects_selection(True))\n self.app.ui.menuobjects_unselall.triggered.connect(lambda: self.on_objects_selection(False))", "def __display(self) -> None:\n ligne = 0\n for key, value in self.values.items():\n self.my_data(self.master, key, value, ligne, 0, 1, 1, 2, 2)\n ligne += 2", "def collect_data(self,sensation,action,reward,next_sensation):\n pass", "def update_info(self):\n self.execution_status_widget.update()\n self.execution_info_widget.update()\n self.cluster_widget.update() # update the cluster info even if it is not being displayed\n self.details.original_widget.update()", "def __setstate__(self, state):\n super().__setstate__(state)\n\n self.annotations = SortedList(self.annotations)\n self.links = SortedList(self.links)\n self.groups = SortedList(self.groups)\n self.generics = SortedList(self.generics)\n\n self.index = DataIndex()\n self.index.update_basic_index(list(self.annotations))\n self.index.update_basic_index(list(self.links))\n self.index.update_basic_index(list(self.groups))\n self.index.update_basic_index(list(self.generics))\n\n for a in self.annotations:\n a.set_pack(self)\n\n for a in self.links:\n a.set_pack(self)\n\n for a in self.groups:\n a.set_pack(self)\n\n for a in self.generics:\n a.set_pack(self)", "def show_database(self):\n self.db_window.update()\n self.db_window.deiconify()\n self.selected_glycan = None", "def state(self):\n pass", "def level_data(self):\n self.level(self.data)", "def sync_widgets(self):\n self.data_changed.emit(self.value)", "def flush(self):\n super().flush()\n self.dists = {}", "def __call__(self):\n self.brain._update_fscale(self.factor)\n for key in self.brain.keys:\n if self.widgets[key] is not None:\n self.widgets[key].set_value(self.brain._data[key])", "def on_draw(self):\n self.clear()\n self.manager.draw()", "def actuator_environment(self):\n self.INPUT_DATA_SIZE = 23\n self.pushButton_reload.setEnabled(0)\n self.listWidget_link.setEnabled(0)\n self.listWidget_link.clear()", "def updateControl(self, event):\r\n print(\"updating...\")\r\n product_dict = [\r\n {\"title\":\"Core Python Programming\", \"author\":\"Wesley Chun\",\r\n \"isbn\":\"0132269937\", \"mfg\":\"Prentice Hall\"},\r\n {\"title\":\"Python Programming for the Absolute Beginner\",\r\n \"author\":\"Michael Dawson\", \"isbn\":\"1598631128\",\r\n \"mfg\":\"Course Technology\"},\r\n {\"title\":\"Learning Python\", \"author\":\"Mark Lutz\",\r\n \"isbn\":\"0596513984\", \"mfg\":\"O'Reilly\"}\r\n ]\r\n data = self.products + product_dict\r\n self.dataOlv.SetObjects(data)", "def save_shelf(self, shelf_name, data):\r\n shelf_path = os.path.join(self.full_dir, shelf_name)\r\n with shelve.open(shelf_path, 'c') as shelf:\r\n shelf['data'] = data", "def on_refresh(self):\n pass", "def update():", "def update():", "def update_counter(self, counter, entity):", "def erase(self):\n\tself.state={}\n\tself.display(update_board=0)", "def dummy():\n\t\t\tself.edit = True", "def siloview(self, silo):\n \n # Only authorized users can view state information.\n # Should this be restricted to admins and managers only, or shoud users too be able to see this information?\n # Going with restricting this information to admins and managers \n if not ag.granary.issilo(silo):\n abort(404)\n\n ident = request.environ.get('repoze.who.identity')\n if not ident:\n abort(401, \"Not Authorised\")\n silos = ag.authz(ident)\n if silo not in silos:\n abort(403, \"Forbidden\")\n silos_admin = ag.authz(ident, permission='administrator')\n silos_manager = ag.authz(ident, permission='manager')\n #if not ident.get('role') in [\"admin\", \"manager\"]:\n if not (silo in silos_admin or silo in silos_manager):\n abort(403, \"Forbidden. You should be an administrator or manager to view this information\")\n\n options = request.GET\n start = 0\n if 'start' in options and options['start']:\n try:\n start = int(options['start'])\n except:\n start = 0\n rows = 1000\n if 'rows' in options and options['rows']:\n try:\n rows = int(options['rows'])\n except:\n rows = 1000\n\n rdfsilo = ag.granary.get_rdf_silo(silo)\n state_info = ag.granary.describe_silo(silo)\n state_info['silo'] = silo\n state_info['uri_base'] = ''\n if rdfsilo.state and rdfsilo.state['uri_base']:\n state_info['uri_base'] = rdfsilo.state['uri_base']\n state_info['number of data packages'] = get_datasets_count(silo)\n state_info['params'] = {'start':start, 'rows':rows}\n items = {}\n #for item in rdfsilo.list_items():\n for item in get_datasets(silo, start=start, rows=rows):\n items[item] = {}\n try:\n items[item]['embargo_info'] = is_embargoed(rdfsilo, item)\n except:\n pass\n state_info['datasets'] = items\n\n # conneg return\n # Always return application/json\n response.content_type = 'application/json; charset=\"UTF-8\"'\n response.status_int = 200\n response.status = \"200 OK\"\n return simplejson.dumps(state_info)", "def show(self):", "def update(self):\n raise NotImplementedError", "def info_cache(self):\n self.info.info()\n self.dataset.info()\n self.category.info()", "def update(self) -> None:\n ...", "def hook_displaymovediagnostics(self,statsDict):\n ui.plotmovediagnostics(statsDict)", "def refresh(self):\n ida_strlist.build_strlist()\n self.size = ida_strlist.get_strlist_qty()", "def dashboard(self):\r\n return {}", "def storeState(self):\n\n self.action_history[self.trial] = self.action\n self.ball_history[self.trial] = self.ballcolor", "def _save_data(self):\n super()._save_data()\n if self.data:\n self.state['inserted_elements'] = len(SeaLevelRiseMeasure.objects.bulk_create(self.data))\n self.logger.info('Successfully saved %d elements.' % self.state['inserted_elements'])\n else:\n self.logger.info('No elements were saved because no elements were available.')\n self.data = None" ]
[ "0.5340235", "0.5297468", "0.5195286", "0.51405054", "0.51405054", "0.51405054", "0.50970787", "0.497346", "0.497207", "0.49673796", "0.49305034", "0.48768204", "0.48723578", "0.485973", "0.485973", "0.485973", "0.485973", "0.485973", "0.485973", "0.485973", "0.485973", "0.485973", "0.485973", "0.485973", "0.485973", "0.485973", "0.485973", "0.485973", "0.48582897", "0.48493633", "0.48464382", "0.48293942", "0.47801942", "0.47800055", "0.47781074", "0.4773682", "0.4759466", "0.4746492", "0.47401872", "0.4733624", "0.47297472", "0.47281575", "0.47272882", "0.47178885", "0.47159243", "0.47118822", "0.4709183", "0.47050005", "0.46999052", "0.46999052", "0.46989226", "0.46987352", "0.46944842", "0.4691398", "0.46895432", "0.4686693", "0.46783212", "0.4676414", "0.4671187", "0.46655494", "0.46619755", "0.46592504", "0.46476865", "0.46424392", "0.46423286", "0.46402004", "0.46184713", "0.46179286", "0.46130913", "0.46097252", "0.4605658", "0.45988506", "0.4598012", "0.45857573", "0.45851767", "0.45848072", "0.45836633", "0.45763367", "0.4576316", "0.45661357", "0.45600298", "0.4545866", "0.45452142", "0.45422128", "0.45409593", "0.45382905", "0.45348862", "0.45348862", "0.45345965", "0.45342913", "0.45316646", "0.45152304", "0.45134577", "0.45061305", "0.45035282", "0.4503415", "0.45028177", "0.4502631", "0.449915", "0.449779", "0.44957313" ]
0.0
-1
Compiles robot from given file and returns class object
def compile_robot(file_name, module_name = "contestant_module"): global counter_module module_name += str(counter_module) counter_module += 1 mod = importCode(file_name, module_name) compiled_class = None for symbol in dir(mod): if hasattr(getattr(mod, symbol), "act") and getattr(mod, symbol).__name__ != "RobotController": compiled_class = getattr(mod, symbol) print compiled_class globals()[compiled_class.__name__] = compiled_class if compiled_class is None: raise KrakrobotException("Not found class with act() function named different than RobotController in provided .py") return compiled_class, mod
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def prepareRobot(self):\n f = StringIO.StringIO()\n f.write(self.zipfile)\n\n zip = zipfile.ZipFile(f)\n \n #modules of the form {\"robot\" : \"from sr import *...\", ...}\n modules = dict([(os.path.splitext(z.filename)[0], zip.open(z.filename).read())\n for z in zip. infolist() \\\n if os.path.splitext(z.filename)[1] == \".py\"])\n\n class Loader:\n \"\"\"\n An object capable of bringing the python in the contents string\n to life. This works as part of the import hooks structure.\n \"\"\"\n def __init__(self, fullname, contents):\n self.fullname = fullname\n self.contents = contents\n\n def load_module(self, fullname):\n if fullname in sys.modules:\n return sys.modules[fullname]\n\n mod = sys.modules.setdefault(fullname, imp.new_module(fullname))\n mod.__file__ = \"<memory/%s>\" % fullname\n mod.__loader__ = self\n\n code = compile(self.contents, mod.__file__, \"exec\")\n\n exec code in mod.__dict__\n return mod\n\n class Finder:\n \"\"\"\n An object to provide loaders for modules present as strings in the\n modules dict.\n \"\"\"\n def __init__(self, modules):\n self.modules = modules\n\n def find_module(self, fullname, path=None):\n if (fullname in self.modules) and (path == None):\n return Loader(fullname, self.modules[fullname])\n\n return None\n\n #Register the finder with the system\n sys.meta_path.append(Finder(modules))", "def createInstanceSource(pcol, path, nr_robots, smallest_robot_id):\n\n # prevent alphabet related bugs by including e and f objects in alphabet\n if (\"e\" not in pcol.A):\n pcol.A.append(\"e\")\n if (\"f\" not in pcol.A):\n pcol.A.append(\"f\")\n\n with open(path + \".c\", \"w\") as fout:\n fout.write(\"\"\"#include \"%s.h\"\n\n#ifdef NEEDING_WILDCARD_EXPANSION\n #include \"wild_expand.h\"\n#endif\n\n#ifdef PCOL_SIM\"\"\" % path.split(\"/\")[-1]) #only filename\n\n fout.write(\"\"\"\\n char* objectNames[] = {[NO_OBJECT] = \"no_object\", \"\"\")\n for obj in pcol.A:\n fout.write(\"\"\"[OBJECT_ID_%s] = \"%s\", \"\"\" % (obj.upper(), obj))\n\n fout.write(\"\"\"};\n char* agentNames[] = {\"\"\")\n for ag_name in pcol.B:\n fout.write(\"\"\"[AGENT_%s] = \"%s\", \"\"\" % (ag_name.upper(), ag_name))\n fout.write(\"\"\"};\n#endif\n\n//the smallest kilo_uid from the swarm\nconst uint16_t smallest_robot_uid = %d;\n//the number of robots that make up the swarm\nconst uint16_t nr_swarm_robots = %d;\n\nvoid lulu_init(Pcolony_t *pcol) {\"\"\" % (smallest_robot_id, nr_robots) )\n\n # call initPcolony()\n fout.write(\"\"\"\\n //init Pcolony with alphabet size = %d, nr of agents = %d, capacity = %d\n initPcolony(pcol, %d, %d, %d);\"\"\" % (len(pcol.A), len(pcol.B), pcol.n, len(pcol.A), len(pcol.B), pcol.n))\n fout.write(\"\"\"\\n //Pcolony.alphabet = %s\"\"\" % pcol.A)\n\n # init environment\n fout.write(\"\"\"\\n\\n //init environment\"\"\")\n counter = 0;\n for obj, nr in pcol.env.items():\n #replace %id and * with $id and $ respectively\n\n fout.write(\"\"\"\\n pcol->env.items[%d].id = OBJECT_ID_%s;\"\"\" % (counter, obj.upper()))\n fout.write(\"\"\"\\n pcol->env.items[%d].nr = %d;\\n\"\"\" % (counter, nr))\n counter += 1\n fout.write(\"\"\"\\n //end init environment\"\"\")\n\n fout.write(\"\"\"\\n\\n //init global pswarm environment\"\"\")\n if (pcol.parentSwarm == None or len(pcol.parentSwarm.global_env) == 0):\n fout.write(\"\"\"\\n pcol->pswarm.global_env.items[0].id = OBJECT_ID_E;\"\"\")\n fout.write(\"\"\"\\n pcol->pswarm.global_env.items[0].nr = 1;\"\"\")\n else:\n counter = 0\n for obj, nr in pcol.parentSwarm.global_env.items():\n #replace %id and * with $id and $ respectively\n\n fout.write(\"\"\"\\n pcol->pswarm.global_env.items[%d].id = OBJECT_ID_%s;\"\"\" % (counter, obj.upper()))\n fout.write(\"\"\"\\n pcol->pswarm.global_env.items[%d].nr = %d;\"\"\" % (counter, nr))\n counter += 1\n fout.write(\"\"\"\\n //end init global pswarm environment\"\"\")\n\n fout.write(\"\"\"\\n\\n //init INPUT global pswarm environment\"\"\")\n if (pcol.parentSwarm == None or len(pcol.parentSwarm.in_global_env) == 0):\n fout.write(\"\"\"\\n pcol->pswarm.in_global_env.items[0].id = OBJECT_ID_E;\"\"\")\n fout.write(\"\"\"\\n pcol->pswarm.in_global_env.items[0].nr = 1;\"\"\")\n else:\n counter = 0\n for obj, nr in pcol.parentSwarm.in_global_env.items():\n #replace %id and * with $id and $ respectively\n\n fout.write(\"\"\"\\n pcol->pswarm.in_global_env.items[%d].id = OBJECT_ID_%s;\"\"\" % (counter, obj.upper()))\n fout.write(\"\"\"\\n pcol->pswarm.in_global_env.items[%d].nr = %d;\"\"\" % (counter, nr))\n counter += 1\n fout.write(\"\"\"\\n //end init INPUT global pswarm environment\"\"\")\n\n fout.write(\"\"\"\\n\\n //init OUTPUT global pswarm environment\"\"\")\n if (pcol.parentSwarm == None or len(pcol.parentSwarm.out_global_env) == 0):\n fout.write(\"\"\"\\n pcol->pswarm.out_global_env.items[0].id = OBJECT_ID_E;\"\"\")\n fout.write(\"\"\"\\n pcol->pswarm.out_global_env.items[0].nr = 1;\"\"\")\n else:\n counter = 0\n for obj, nr in pcol.parentSwarm.out_global_env.items():\n #replace %id and * with $id and $ respectively\n\n fout.write(\"\"\"\\n pcol->pswarm.out_global_env.items[%d].id = OBJECT_ID_%s;\"\"\" % (counter, obj.upper()))\n fout.write(\"\"\"\\n pcol->pswarm.out_global_env.items[%d].nr = %d;\"\"\" % (counter, nr))\n counter += 1\n fout.write(\"\"\"\\n //end init OUTPUT global pswarm environment\"\"\")\n\n for ag_name in pcol.B:\n fout.write(\"\"\"\\n\\n //init agent %s\"\"\" % ag_name)\n #fout.write(\"\"\"\\n\\n initAgent(&pcol->agents[AGENT_%s], pcol, %d);\"\"\" % (ag_name.upper(), len(pcol.agents[ag_name].programs)))\n fout.write(\"\"\"\\n\\n initAgent(&pcol->agents[AGENT_%s], pcol, %d);\"\"\" % (ag_name.upper(), getNrOfProgramsAfterExpansion(pcol.agents[ag_name], nr_robots- 1)))\n\n fout.write(\"\"\"\\n //init obj multiset\"\"\")\n counter = 0;\n for obj, nr in pcol.agents[ag_name].obj.items():\n #replace %id and * with $id and $ respectively\n\n for i in range(nr):\n fout.write(\"\"\"\\n pcol->agents[AGENT_%s].obj.items[%d] = OBJECT_ID_%s;\"\"\" % (ag_name.upper(), counter, obj.upper()))\n counter += 1\n\n fout.write(\"\"\"\\n\\n //init programs\"\"\")\n for prg_nr, prg in enumerate(pcol.agents[ag_name].programs):\n fout.write(\"\"\"\\n\\n initProgram(&pcol->agents[AGENT_%s].programs[%d], %d);\"\"\" % (ag_name.upper(), prg_nr, getNrOfRulesWithoutRepetitions(prg)))\n fout.write(\"\"\"\\n //init program %d: < %s >\"\"\" % (prg_nr, prg.print()))\n\n rule_index = 0\n for rule_nr, rule in enumerate(prg):\n # skip rules that contain identical operands and thus have no effect\n if (rule.lhs == rule.rhs and rule.lhs == 'e' and rule.main_type != sim.RuleType.conditional):\n continue\n\n fout.write(\"\"\"\\n //init rule %d: %s\"\"\" % (rule_nr, rule.print(toString=True)) )\n if (rule.main_type != sim.RuleType.conditional):\n fout.write(\"\"\"\\n initRule(&pcol->agents[AGENT_%s].programs[%d].rules[%d], RULE_TYPE_%s, OBJECT_ID_%s, OBJECT_ID_%s, NO_OBJECT, NO_OBJECT);\"\"\" % (ag_name.upper(), prg_nr, rule_index, rule.type.name.upper(), rule.lhs.upper(), rule.rhs.upper()))\n else:\n fout.write(\"\"\"\\n initRule(&pcol->agents[AGENT_%s].programs[%d].rules[%d], RULE_TYPE_CONDITIONAL_%s_%s, OBJECT_ID_%s, OBJECT_ID_%s, OBJECT_ID_%s, OBJECT_ID_%s);\"\"\" % (ag_name.upper(), prg_nr, rule_index, rule.type.name.upper(), rule.alt_type.name.upper(), rule.lhs.upper(), rule.rhs.upper(), rule.alt_lhs.upper(), rule.alt_rhs.upper()))\n\n #increase rule_index\n rule_index += 1\n fout.write(\"\"\"\\n //end init program %d\n pcol->agents[AGENT_%s].init_program_nr++;\"\"\" % (prg_nr, ag_name.upper()))\n fout.write(\"\"\"\\n //end init programs\"\"\")\n\n fout.write(\"\"\"\\n //end init agent %s\"\"\" % ag_name)\n\n fout.write(\"\"\"\\n}\"\"\")\n fout.write(\"\"\"\\n\\nvoid lulu_destroy(Pcolony_t *pcol) {\n //destroys all of the subcomponents\n destroyPcolony(pcol);\n}\"\"\")\n fout.write(\"\"\"\\n\n#ifdef NEEDING_WILDCARD_EXPANSION\nuint16_t expand_pcolony(Pcolony_t *pcol, uint16_t my_id) {\n //used for a cleaner iteration through the P colony\n //instead of using agents[i] all of the time, we use just agent\n Agent_t *agent;\n\"\"\")\n\n fout.write(\"\"\"\\n uint8_t obj_with_id[] = {\"\"\")\n obj_with_id_size = 0\n for obj in pcol.A:\n if (\"_W_ID\" in obj):\n fout.write(\"OBJECT_ID_%s, \" % obj.upper())\n obj_with_id_size += 1\n fout.write(\"\"\"};\n uint8_t obj_with_id_size = %d;\"\"\" % (obj_with_id_size))\n\n fout.write(\"\"\"\\n uint8_t obj_with_any[] = {\"\"\")\n obj_with_any_size = 0\n is_obj_with_any_followed_by_id = []\n for i, obj in enumerate(pcol.A):\n if (obj.endswith(\"_W_ALL\")):\n fout.write(\"OBJECT_ID_%s, \" % obj.upper())\n # if we are at least 2 objects before the end of the list\n if (i < len(pcol.A) - 1):\n # check if this _$ wildcarded object is followed by a _$id object\n if (\"_W_ID\" in pcol.A[i+1]):\n is_obj_with_any_followed_by_id.append(1)\n else:\n is_obj_with_any_followed_by_id.append(0)\n else:\n # this (_$) object is the last one in the list\n is_obj_with_any_followed_by_id.append(0)\n obj_with_any_size += 1\n fout.write(\"\"\"};\n uint8_t obj_with_any_size = %d;\n uint8_t is_obj_with_any_followed_by_id[] = {%s};\"\"\" % (obj_with_any_size,\n str(is_obj_with_any_followed_by_id).replace(\"[\", \"\").replace(\"]\", \"\")))\n\n fout.write(\"\"\"\\n\\n uint16_t my_symbolic_id = my_id - smallest_robot_uid;\n\n //replace W_ID wildcarded objects with the object corresponding to the symbolic id\n // e.g.: B_W_ID -> B_0 for my_symbolic_id = 0\n replacePcolonyWildID(pcol, obj_with_id, obj_with_id_size, my_symbolic_id);\n\n //expand each obj_with_any[] element into nr_swarm_robots objects except my_symbolic id.\n // e.g.: B_W_ALL -> B_0, B_2 for nr_swarm_robots = 3 and my_symbolic_id = 1\n expandPcolonyWildAny(pcol, obj_with_any, is_obj_with_any_followed_by_id, obj_with_any_size, my_symbolic_id, nr_swarm_robots);\n\n return my_symbolic_id;\n}\n#endif\"\"\")", "def _compile(self, source: str, filename: str) -> CodeType:\n return compile(source, filename, \"exec\") # type: ignore", "def __init__(self, file):\n self.file = file # maybe should be a folder?\n self.file = self.file.rsplit('.')[0] + \".asm\"\n self.name = None\n self.func_name = None\n self.if_count = 0\n self.call_count = 0", "def compile_contract(file: str, class_call: str) -> str:\n print(f\"Compiling {file}.py ....\")\n exit_code = os.system(\n f\"~/smartpy-cli/SmartPy.sh compile contract/contracts/{file}.py \\\"{class_call}\\\" contract/build\")\n if exit_code != 0:\n raise Exception(f\"Failed to compile Contract : {file}.py\")", "def Build(self, out_file):\n raise NotImplementedError", "def __init__(self, code=\"\", lang=\"\", input=\"\", id=0):\n self.code = code\n self.lang = lang\n self.input = input\n self.id = str(id)\n self.output = \"\"\n self.status = \"\"\n self.create_file()\n if(self.lang == \"PYTHON\"):\n self.compile_python()\n elif(self.lang == \"C\"):\n self.compile_c()\n elif(self.lang == \"CPP\"):\n self.compile_cpp()\n elif(self.lang == \"JAVA\"): # For Java File \n self.compile_java()\n elif(self.lang==\"JS\"):\n self.compile_js()\n self.delete_file()", "def default_robot(): #py:default_robot\n class Robot(UsedRobot):\n def __init__(self):\n self.body = RUR._default_robot_body_()\n return Robot()", "def NewRobot(self,module,s,d,x,y,xT,yT,rgb): #Add robot\n\t\tsys.path.insert(0, './studentRobots')\n\t\tmname = module[:-3]\n\t\t\n\t\tspec = importlib.util.find_spec(mname)\n\t\tif spec is None:\n\t\t\tprint(\"can't find the module\")\n\t\telse:\n\t\t\t# the actual import ...\n\t\t\tmodule = importlib.util.module_from_spec(spec)\n\t\t\tspec.loader.exec_module(module)\n\t\tprint(mname,module)\n\t\trbot = module.s1Robot(s,d,x,y,xT,yT,rgb)\n\t\tself.__robotList.append(rbot)", "def compile(cls, node, filename):\n compiler = cls(filename)\n compiler.visit(node)\n code_ops = compiler.code_ops\n code = Code(\n code_ops, [], ['identifiers', 'toolkit'], False, False, True, \n node.name, filename, node.lineno, node.doc,\n )\n return code", "def compile_class(self):\n\t\t\n\t\txml = '<class>\\n' + self.tokenizer.keyword() + self.tokenizer.identifier() + self.tokenizer.symbol()\n\n\t\tself.outfile.write(xml)", "def main():\n parser = argparse.ArgumentParser(description='REA Robot')\n parser.add_argument('--c', metavar='FILE', type=str, required=False, help='File with commands to execute. One command per line')\n args = parser.parse_args()\n\n # Get list of commands to execute\n commands = load_command_list(args.c)\n if len(commands) == 0:\n commands = read_commands_from_console()\n\n logger.debug('List of commands to execute: {}'.format(commands))\n\n # Run the Robot\n robot = Robot()\n cmd_parser = CommandsParser(commands)\n while True:\n cmd_and_args = cmd_parser.get_next_command()\n if cmd_and_args:\n cmd_and_args[0].run(robot, cmd_and_args[1])\n else:\n break", "def compile(path_to_src, path_to_dest, connections, tunables, file_type=None):\n\n # if not provided a file type, infer from file extension\n if file_type == None:\n file_type = path_to_src.split(\".\")[-1]\n\n assert file_type in tokenizers\n tokenizer = tokenizers[file_type]\n\n graph = build_graph(connections)\n\n with open(path_to_src, 'r') as file:\n src = file.read()\n\n tokens = tokenizer.tokenize(src)\n\n tokens = first_pass(tokens, graph)\n\n #tokens = second_pass(tokens, gates)\n\n #tokens = third_pass(tokens, gates)\n\n compiled = tokenizer.detokenize(tokens)\n\n with open(path_to_dest, 'w') as file:\n file.write(compiled)", "def compile_class(self):\r\n self.tokenizer.advance() # ignore 'class' keyword\r\n self.class_name = self.tokenizer.identifier()\r\n self.tokenizer.advance()\r\n self.tokenizer.advance() # ignore '{' symbol\r\n while self.tokenizer.curtok < len(self.tokenizer.tokens) - 1:\r\n dec = self.tokenizer.key_word()\r\n if dec == \"field\" or dec == \"static\":\r\n self.compile_var_dec()\r\n else:\r\n self.compile_subroutine()\r\n self.tokenizer.advance()", "def compile(expression: str) -> Compiled:\r\n e = Compiled(expression)\r\n e.tokenize()\r\n return e", "def __init__(self, xml_name, recompile_cpp=False, rendering=True):\n if recompile_cpp:\n self._update_wrapper()\n\n if sys.platform.startswith('darwin'):\n cdll_path = os.path.join(os.path.dirname(__file__), \"libsimenv.dylib\")\n elif sys.platform.startswith('linux'):\n cdll_path = os.path.join(os.path.dirname(__file__), \"libsimenv.so\")\n elif sys.platform.startswith('win32'):\n cdll_path = os.path.join(os.path.dirname(__file__), \"libsimenv.dll\")\n else:\n raise EnvironmentError(\"Unknown operating system found.\")\n\n model_path = os.path.join(pathlib.Path(__file__).parent, \"mujoco_model/\", xml_name).encode('utf-8')\n self.rendering = rendering\n\n # C++ control engine.\n self.wrapper = ctypes.CDLL(cdll_path)\n self.instance = self.wrapper.get_instance(ctypes.c_char_p(model_path), ctypes.c_bool(rendering))\n\n # Indices of the object bodies.\n self.obstacle_body_index = self.get_body_index(\"obstacle\")\n self.agent_body_index = self.get_body_index(\"agent\")\n\n # Indices of the joints.\n self.obstacle_jnt_index = self.get_jnt_index(\"slider:obstacle\")\n self.agent_jnt_x_index = self.get_jnt_index(\"slider:agent-obstacle_x\")\n self.agent_jnt_y_index = self.get_jnt_index(\"slider:agent-y\")\n\n # Initial positions from the configuration.\n self.obstacle_pos = self.get_body_ini_pos(self.obstacle_body_index)\n self.agent_pos = self.get_body_ini_pos(self.agent_body_index)", "def PrepareCompile(file):\n global oilcc_I,oilcc_o,oilcc_S,oilcc_target\n fp = open(file,'r')\n # some flags\n item = ''; #one item is minimum object such as TASK,ALARM ...\n barcenum = 0;\n flag = False; #has \" { \" encountered or not\n start = False #has match an obj start or not\n for line in fp.readlines():\n #firstly, filter out the comment on this line\n el = DropComment(line);\n if(start == False):\n #{\n item = ''; \n barcenum = 0;\n flag = False;\n if(IsIt('osekObj',el)):\n start = True;\n item += el;\n if(el.count('{') > 0): #so at comment should not include '{}'\n flag = True;\n barcenum += el.count('{');\n if(el.count('}') > 0):\n barcenum -= el.count('}');\n if((flag == True) and (barcenum == 0)): #in one line\n #filter out the multi-line comment\n item = DropComment(item)\n oilcc_texts.append(item);\n start = False\n else: # special process for include\n inc = GetIt('include',el)\n if(inc != None): #include file\n flag_inc = False\n for I in oilcc_I:\n finc = I + '/' + inc[0]\n if(os.path.exists(finc)):\n print 'INFO:parse include file <%s> in the path <%s>'%(inc[0],I)\n PrepareCompile(finc);\n flag_inc = True;\n if(flag_inc == False):\n print 'ERROR:cann\\'t find out the file %s!'%(inc[0])\n sys.exit(-1)\n #}\n else:\n #{\n if(el.count('{') > 0): #so at comment should not include '{}'\n flag = True;\n barcenum += el.count('{');\n if(el.count('}') > 0):\n barcenum -= el.count('}');\n item += el;\n if((flag == True) and (barcenum == 0)):\n #filter out the multi-line comment\n item = DropComment(item)\n oilcc_texts.append(item);\n start = False\n #}\n fp.close()", "def __init__(self):\n BuildSystemBase.__init__(self, \"makefile\")", "def load_crawl():\n\n\tmodule_path = dirname(__file__)\n\twith open(sys.argv[1]) as csv_file:\n\t\tdata_file = csv.reader(csv_file)\n\t\ttemp = next(data_file)\n\t\tn_samples = int(temp[0])\n\t\tn_features = int(temp[1])\n\t\ttarget_names = np.array(temp[2:4])\n\t\tdata = np.empty((n_samples, n_features))\n\t\ttarget = np.empty((n_samples,), dtype=np.int)\n\n\t\tfor count, value in enumerate(data_file):\n\t\t\tif len(value)<28:\n\t\t\t\tcontinue\n\t\t\ttry:\n\t\t\t\tdata[count] = np.asarray(value[:28], dtype=np.float)\n\t\t\t\ttarget[count] = np.asarray(value[28], dtype=np.int)\t\n\t\t\texcept:\n\t\t\t\tcontinue\n\t\t\t#print \"data is \" + str(data[count])\n\t\t\t#print \"target is \" + str(target[count])\n\n\twith open(join(module_path, 'descr', 'crawl.rst')) as rst_file:\n\t\tfdescr = rst_file.read()\n\n\treturn Bunch(data=data, target=target,\n\t\t\t target_names=target_names,\n\t\t\t DESCR=fdescr,\n\t\t\t feature_names = ['evalCount', 'setInterval', 'setTimeout', 'link', \n\t\t\t\t\t\t\t 'search', 'exec','escape', 'unescape', 'ratio', \n\t\t\t\t\t\t\t 'emtropyAvg', 'entropyScript', 'longStrings', \n\t\t\t\t\t\t\t 'maxEntropy', 'stringAvg', 'maxLength', 'longVarFunc', \n\t\t\t\t\t\t\t 'stringAssignments', 'stringModFuncsCount', 'eventFuncsCount', \n\t\t\t\t\t\t\t 'domModFuncsCounter', 'suspStrings', 'whiteSpaceRatio', \n\t\t\t\t\t\t\t 'hexaStrings', 'maxNonPrintableCharactersinString', 'lineAvg', \n\t\t\t\t\t\t\t 'iframeCount', 'malTagCount', 'jsLength'])", "def compile(c_file: File) -> File:\n os.system(\"gcc -c {c_file}\".format(c_file=c_file.path))\n return File(c_file.path.replace('.c', '.o'))", "def makeTestProcessor(test_processor_path):\r\n\r\n className = splitext(basename(test_processor_path))[0]\r\n\r\n with open(test_processor_path, 'w') as f:\r\n f.write(\"\"\"\\\r\n'''\r\nTest processor class - should be deleted upon completion of test\r\n'''\r\n\r\n'''___Built-In Modules___'''\r\nimport sys\r\nfrom os.path import dirname\r\n\r\n'''___Third-Party Modules___'''\r\n\r\n'''___NPL Modules___'''\r\ndataProcessing_directory = dirname(dirname(__file__))\r\nsys.path.append(dataProcessing_directory)\r\nfrom AbstractProcessor import AbstractProcessor\r\n\r\nclass %s(AbstractProcessor):\r\n processor_directory = dirname(__file__)\r\n\r\n\r\nif __name__ == \"__main__\":\r\n pass\r\n\"\"\" % (className))\r\n\r\n return 0", "def _get_codeobj(pyfile):\n from imp import PY_COMPILED, PY_SOURCE\n\n result, fileobj, fullpath = _check_if_pyc(pyfile)\n\n # WARNING:\n # fp.read() can blowup if the module is extremely large file.\n # Lookout for overflow errors.\n try:\n data = fileobj.read()\n finally:\n fileobj.close()\n\n # This is a .pyc file. Treat accordingly.\n if result is PY_COMPILED:\n # .pyc format is as follows:\n # 0 - 4 bytes: Magic number, which changes with each create of .pyc file.\n # First 2 bytes change with each marshal of .pyc file. Last 2 bytes is \"\\r\\n\".\n # 4 - 8 bytes: Datetime value, when the .py was last changed.\n # 8 - EOF: Marshalled code object data.\n # So to get code object, just read the 8th byte onwards till EOF, and\n # UN-marshal it.\n import marshal\n code_obj = marshal.loads(data[8:])\n\n elif result is PY_SOURCE:\n # This is a .py file.\n code_obj = compile(data, fullpath, 'exec')\n\n else:\n # Unsupported extension\n raise Exception(\"Input file is unknown format: {0}\".format(fullpath))\n\n # Return code object\n return code_obj", "def get_codecoolers_from_file(cls, file_name):\n constructors = cls.load_data_from_file(file_name)\n\n for constructor in constructors:\n name, surname, login, password, email = constructor\n\n cls(name, surname, login, password, email)", "def compile(self, workdir):\n with open(workdir) as f:\n ast = self.parser.generate_ast(f.read())\n f.close()\n\n return None", "def compile(self,\n compile_dir: pathlib.Path) -> str:\n\n self.compile_dir = pathlib.Path(compile_dir).absolute()\n\n self.modules = subprocess.run('module list', shell=True, stderr=subprocess.PIPE).stderr\n\n # check compile directory.\n if not self.compile_dir.is_dir():\n warnings.warn(str(self.compile_dir.absolute()) + ' directory does not exist, creating')\n self.compile_dir.mkdir(parents=True)\n\n # Remove run directory if it exists in the source_dir\n source_compile_dir = self.source_dir.joinpath('Run')\n if source_compile_dir.is_dir():\n shutil.rmtree(str(source_compile_dir.absolute()))\n\n # Get directory for setEnvar\n compile_options_file = self.source_dir.joinpath('compile_options.sh')\n\n # Write setEnvar file\n with compile_options_file.open(mode='w') as file:\n for option, value in self.compile_options.items():\n file.write(\"export {}={}\\n\".format(option, value))\n\n # Compile\n # Create compile command for machine spec\n compile_cmd = '/bin/bash -c \"'\n if self.pre_compile_cmd is not None:\n compile_cmd += self.pre_compile_cmd + '; '\n compile_cmd += './configure ' + self.compiler + '; '\n compile_cmd += './compile_offline_NoahMP.sh '\n compile_cmd += str(compile_options_file.absolute())\n compile_cmd += '\"'\n compile_cmd = shlex.split(compile_cmd)\n\n self.compile_log = subprocess.run(\n compile_cmd,\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE,\n cwd=str(self.source_dir.absolute())\n )\n\n # Add in unique ID file to match this object to prevent assosciating\n # this directory with another object\n self.object_id = str(uuid.uuid4())\n\n with self.compile_dir.joinpath('.uid').open(mode='w') as f:\n f.write(self.object_id)\n\n if self.compile_log.returncode == 0:\n # Open permissions on compiled files\n subprocess.run(['chmod', '-R', '755', str(self.source_dir.joinpath('Run'))])\n\n # Wrf hydro always puts files in source directory under a new directory called 'Run'\n # Copy files to the specified simulation directory if its not the same as the\n # source code directory\n if len(self.table_files) == 0:\n self.table_files = list(self.source_dir.joinpath('Run').glob('*.TBL'))\n\n shutil.copyfile(str(self.source_dir.joinpath('Run').joinpath('wrf_hydro.exe')),\n str(self.compile_dir.joinpath('wrf_hydro.exe')))\n\n # Remove old files\n # shutil.rmtree(str(self.source_dir.joinpath('Run')))\n\n # Open permissions on copied compiled files\n subprocess.run(['chmod', '-R', '755', str(self.compile_dir)])\n\n # Get file lists as attributes\n # Get list of table file paths\n\n # Get wrf_hydro.exe file path\n self.wrf_hydro_exe = self.compile_dir.joinpath('wrf_hydro.exe')\n\n # Save the object out to the compile directory\n with self.compile_dir.joinpath('WrfHydroModel.pkl').open(mode='wb') as f:\n pickle.dump(self, f, 2)\n\n print('Model successfully compiled into ' + str(self.compile_dir.absolute()))\n else:\n # Save the object out to the compile directory\n with self.compile_dir.joinpath('WrfHydroModel.pkl').open(mode='wb') as f:\n pickle.dump(self, f, 2)\n raise ValueError('Model did not successfully compile.' +\n self.compile_log.stderr.decode('utf-8'))", "def createClassFile( p ):\n create_modules( p[\"package\"] )\n name = p[\"protocol\"][\"name\"]\n name.lower()\n path = os.path.join( *p[\"package\"].split( \".\" ) )\n with open( \"./%s/%s.py\" % ( path, name ), \"w\" ) as f:\n for i in p[\"imports\"]:\n createClassFile( i )\n\n c = Klass( package=p[\"package\"], includes=p[\"imports\"], **p[\"protocol\"] )\t\n\n f.write( c.generate() )", "def compile(self):\n\n\t\twhile(self.tokenizer.has_more_tokens()):\n\n\t\t\tif self.tokenizer.get_token() == 'class':\n\t\t\t\tself.compile_class()\n\t\t\telif self.tokenizer.get_token() in ['field','static']:\n\t\t\t\tself.compile_class_var_dec()\n\t\t\telif self.tokenizer.get_token() in ['function', 'method', 'constructor']:\n\t\t\t\tself.compile_subroutine()\n\n\t\tself.outfile.write('<symbol> } </symbol>\\n' + '</class>')\n\t\tself.outfile.close()", "def from_stan_file(\n cls,\n stan_file: str,\n model_data: Optional[str] = None,\n *,\n stanc_args: List[str] = [],\n make_args: List[str] = [],\n seed: int = 1234,\n capture_stan_prints: bool = True,\n ):\n result = compile_model(stan_file, stanc_args=stanc_args, make_args=make_args)\n return cls(\n str(result), model_data, seed=seed, capture_stan_prints=capture_stan_prints\n )", "def compile(self):\n raise NotImplementedError()", "def FromFile(cls, filename):\n test_run = cls()\n test_run.FromDejaGnuOutput(filename)\n test_run.CleanUpTestResults()\n return test_run", "def make_prog(prog_path: str, c_files: List[File]) -> File:\n o_files = [\n compile(c_file)\n for c_file in c_files\n ]\n prog_file = link(prog_path, o_files)\n return prog_file", "def compile(cls, node, filename):\n compiler = cls(filename)\n compiler.visit(node)\n return compiler.stack.pop()", "def run_compiler(text_or_file: str, compiler_suite: str, fail_when: ErrorCode = ERROR) -> Any:\n preprocessor, parser, ast, compiler = load_compiler_suite(compiler_suite)\n return compileDSL(text_or_file, preprocessor(), parser(), ast(), compiler(), fail_when)", "def compile(self, document):\n file = self._create_file(document)\n return file", "def compile(self, args, classpath, sources, classes_output_dir, analysis_file):\r\n raise NotImplementedError()", "def LoadProject(filename, toolchains):\n\n print 'Processing %s...' % filename\n # Default src directory is the directory the description was found in\n desc = open(filename, 'r').read()\n desc = eval(desc, {}, {})\n\n # Verify the format of this file\n if not ValidateFormat(desc, DSC_FORMAT):\n ErrorExit('Failed to validate: ' + filename)\n\n # Check if we are actually interested in this example\n match = False\n for toolchain in toolchains:\n if toolchain in desc['TOOLS']:\n match = True\n break\n if not match:\n return None\n\n desc['FILENAME'] = filename\n return desc", "def __init__(self, path_to_the_file):", "def __init__(self, path_to_the_file):", "def compile_file(self, path: str, incl_search_paths: Optional[List[str]]=None, defines: Optional[Dict[str,str]]=None) -> FileInfo:\n\n if incl_search_paths is None:\n incl_search_paths = []\n\n input_stream, included_files = preprocessor.preprocess_file(self.env, path, incl_search_paths, defines)\n\n # Run Antlr parser on input\n parsed_tree = sa_systemrdl.parse(\n input_stream,\n \"root\",\n messages.RdlSaErrorListener(self.msg)\n )\n\n if self.msg.had_error:\n self.msg.fatal(\"Parse aborted due to previous errors\")\n\n # Traverse parse tree with RootVisitor\n self.visitor.visit(parsed_tree)\n\n # Reset default property assignments from namespace.\n # They should not be shared between files since that would be confusing.\n self.namespace.default_property_ns_stack = [{}]\n\n if self.msg.had_error:\n self.msg.fatal(\"Compile aborted due to previous errors\")\n\n return FileInfo(input_stream.strdata, included_files)", "def build_from_source(obj):\n if (obj.method == 'robot'):\n print(\"TODO: build obo and owl\")\n elif (obj.method == 'jenkins-archive'):\n print(\"TODO: download and unzip\")\n elif (obj.method == 'github-archive'):\n print(\"TODO: download and unzip\")\n elif (obj.method == 'svn-co'):\n print(\"TODO: run svn\")\n else:\n print(\"UNKNOWN METHOD:\"+obj.method)", "def cl_program_from_file(context, filename):\n return cl.Program(context, open(os.path.join(CL_PATH, filename)).read())", "def import_classifier(name):\n classinput=open(name,'rb')\n main_class=load(classinput)\n classinput.close()\n return main_class", "def __init__(self, root_dir, relpath, must_exist=True):\r\n\r\n path = os.path.abspath(os.path.join(root_dir, relpath))\r\n buildfile = os.path.join(path, BuildFile._CANONICAL_NAME) if os.path.isdir(path) else path\r\n\r\n if os.path.isdir(buildfile):\r\n raise IOError(\"%s is a directory\" % buildfile)\r\n\r\n if must_exist:\r\n if not os.path.exists(buildfile):\r\n raise IOError(\"BUILD file does not exist at: %s\" % buildfile)\r\n\r\n if not BuildFile._is_buildfile_name(os.path.basename(buildfile)):\r\n raise IOError(\"%s is not a BUILD file\" % buildfile)\r\n\r\n if not os.path.exists(buildfile):\r\n raise IOError(\"BUILD file does not exist at: %s\" % buildfile)\r\n\r\n self.root_dir = os.path.realpath(root_dir)\r\n self.full_path = os.path.realpath(buildfile)\r\n\r\n self.name = os.path.basename(self.full_path)\r\n self.parent_path = os.path.dirname(self.full_path)\r\n\r\n self._bytecode_path = os.path.join(self.parent_path, '.%s.%s.pyc' % (\r\n self.name, PythonIdentity.get()))\r\n\r\n self.relpath = os.path.relpath(self.full_path, self.root_dir)\r\n self.canonical_relpath = os.path.join(os.path.dirname(self.relpath), BuildFile._CANONICAL_NAME)", "def runRobot():", "def __init__(self, filename):\r\n self._results = SpecParser(filename).parse()", "def get_robots(url):\n rp = robotparser.RobotFileParser()\n rp.set_url(urljoin(url, '/robots.txt'))\n html_ = urlopen(urljoin(url, '/robots.txt')).read().decode('utf-8',errors = 'ignore').split('\\n')\n rp.parse(html_)#rp.read()解析出错UnicodeDecodeError: 'utf-8' codec can't decode byte\n return rp", "def __init__(self, input_file):\n file_content = None\n self.input_file_name = input_file\n\n # opening the input jack file\n print(\"file is \"+input_file)\n file = open(input_file, \"r\")\n if file.mode == \"r\":\n # extracting the entire text\n self.file_content = file.read()\n\n # removing jack documentation\n self.file_content = re.sub(r\"(/{2}.*?\\n)\", \"\",file_content)\n self.file_content = re.sub(r\"/\\*{1,2}(.|\\n)*?\\*/\", \"\", file_content, re.DOTALL)\n print(file_content)\n file.close()\n\n # creating a list and filtering the list from empty strings\n if self.file_content is not None:\n\n # setting other variables\n self.token_type = None\n self.token = None\n self.inside_string = False\n self.word = \"\"\n self.key_words = {\"class\", \"constructor\", \"function\", \"method\",\n \"field\", \"static\", \"var\", \"int\", \"char\", \"true\",\n \"boolean\", \"void\", \"false\", \"null\", \"this\",\n \"let\", \"do\", \"if\", \"else\", \"while\", \"return\"}\n\n self.symbols = {\"{\", \"}\", \"(\", \")\", \"[\", \"]\", \".\", \",\", \";\", \"+\",\n \"-\", \"*\", \"/\", \"&\", \"|\", \"<\", \">\", \"=\", \"~\"}\n self.double_symbols = {\"<=\", \">=\", \"!=\"}#todo", "def parseFile(self,filename):\n\n name = '[0-9a-zA-Z_]+'\n string = '\\\\\"(.+)\\\\\"'\n\n testclass = None\n functionName = None\n\n fin = open(filename, 'r')\n for line in fin:\n # testclass starts\n res = re.match('class ('+name+')', line)\n if res != None:\n testclass = res.group(1)\n\n # end of testclass \n if re.match('};', line) != None:\n testclass = None\n\n # function start\n res = re.match('\\\\s+void ('+name+')\\\\(\\\\)', line)\n if res != None:\n functionName = res.group(1)\n\n elif re.match('\\\\s+}', line) != None:\n functionName = None\n\n if functionName == None:\n continue\n\n # check\n res = re.match('\\s+check.*\\('+string, line)\n if res != None:\n code = res.group(1)\n\n # code..\n res = re.match('\\\\s+'+string, line)\n if res != None:\n code = code + res.group(1)\n\n # assert\n res = re.match('\\\\s+ASSERT_EQUALS\\\\(\\\\\"([^\"]*)\\\\\",', line)\n if res != None and len(code) > 10:\n node = { 'testclass':testclass,\n 'functionName':functionName,\n 'code':code,\n 'expected':res.group(1) }\n self.nodes.append(node)\n code = ''\n\n # close test file\n fin.close()", "def visit(self, file_object):\n\n module = ast.parse(file_object.read())\n # TODO remove prefixes such as C:\\Users\\...\n module_name = file_object.name.rstrip(\".py\").replace(\"/\", \".\")\n\n exec(\"import %s\"%module_name)\n self._module = eval(module_name)\n\n self.found = {\"vars\":[], \"classes\":[], \"funcs\":[]}\n self.found_classes = set()\n\n self._explorer(self, [module_name]).visit(module)", "def __init__(\n self,\n source_dir: str,\n model_config: str,\n hydro_namelist_config_file: str=None,\n hrldas_namelist_config_file: str=None,\n compile_options_config_file: str=None,\n compiler: str = 'gfort',\n pre_compile_cmd: str = None,\n compile_options: dict = None\n ):\n\n # Instantiate all attributes and methods\n # Attributes set by init args\n self.source_dir = pathlib.Path(source_dir)\n \"\"\"pathlib.Path: pathlib.Path object for source code directory.\"\"\"\n\n self.model_config = model_config.lower()\n \"\"\"str: Specified configuration for which the model is to be used, e.g. 'nwm_ana'\"\"\"\n\n self.compiler = compiler\n \"\"\"str: The compiler chosen at compile time.\"\"\"\n\n self.pre_compile_cmd = pre_compile_cmd\n \"\"\"str: Command string to be executed prior to model compilation, e.g. to load modules\"\"\"\n\n self.compile_options = dict()\n \"\"\"dict: Compile-time options. Defaults are loaded from json file stored with source\n code.\"\"\"\n\n # Set nameilst config file defaults while allowing None to be passed.\n self.hydro_namelist_config_file = hydro_namelist_config_file\n \"\"\"Namelist: Hydro namelist file specified for model config\"\"\"\n self.hrldas_namelist_config_file = hrldas_namelist_config_file\n \"\"\"Namelist: HRLDAS namelist file specified for model config.\"\"\"\n self.compile_options_config_file = compile_options_config_file\n \"\"\"Namelist: Compile options file specified for model config.\"\"\"\n\n default_hydro_namelist_config_file = 'hydro_namelists.json'\n default_hrldas_namelist_config_file = 'hrldas_namelists.json'\n default_compile_options_config_file = 'compile_options.json'\n\n if self.hydro_namelist_config_file is None:\n self.hydro_namelist_config_file = default_hydro_namelist_config_file\n if self.hrldas_namelist_config_file is None:\n self.hrldas_namelist_config_file = default_hrldas_namelist_config_file\n if self.compile_options_config_file is None:\n self.compile_options_config_file = default_compile_options_config_file\n\n # Load master namelists\n self.hydro_namelists = JSONNamelist(\n str(self.source_dir.joinpath(self.hydro_namelist_config_file))\n )\n \"\"\"Namelist: Hydro namelist for specified model config\"\"\"\n self.hydro_namelists = self.hydro_namelists.get_config(self.model_config)\n\n self.hrldas_namelists = JSONNamelist(\n str(self.source_dir.joinpath(self.hrldas_namelist_config_file))\n )\n \"\"\"Namelist: HRLDAS namelist for specified model config\"\"\"\n self.hrldas_namelists = self.hrldas_namelists.get_config(self.model_config)\n\n # Attributes set by other methods\n self.compile_dir = None\n \"\"\"pathlib.Path: pathlib.Path object pointing to the compile directory.\"\"\"\n\n self.git_hash = self._get_githash()\n \"\"\"str: The git revision hash if seld.source_dir is a git repository\"\"\"\n\n self.version = None\n \"\"\"str: Source code version from .version file stored with the source code.\"\"\"\n\n self.compile_dir = None\n \"\"\"pathlib.Path: pathlib.Path object pointing to the compile directory.\"\"\"\n\n self.configure_log = None\n \"\"\"CompletedProcess: The subprocess object generated at configure.\"\"\"\n\n self.compile_log = None\n \"\"\"CompletedProcess: The subprocess object generated at compile.\"\"\"\n\n self.object_id = None\n \"\"\"str: A unique id to join object to compile directory.\"\"\"\n\n self.table_files = list()\n \"\"\"list: pathlib.Paths to *.TBL files generated at compile-time.\"\"\"\n\n self.wrf_hydro_exe = None\n \"\"\"pathlib.Path: pathlib.Path to wrf_hydro.exe file generated at compile-time.\"\"\"\n\n # Set attributes\n # Get code version\n with self.source_dir.joinpath('.version').open() as f:\n self.version = f.read()\n\n # Load compile options\n self.compile_options = JSONNamelist(\n str(self.source_dir.joinpath(self.compile_options_config_file))\n )\n \"\"\"Namelist: Hydro namelist for specified model config\"\"\"\n self.compile_options = self.compile_options.get_config(self.model_config)\n\n # \"compile_options\" is the argument to __init__\n if compile_options is not None:\n self.compile_options.update(compile_options)\n\n # Add compiler and compile options as attributes and update if needed\n self.compiler = compiler", "def run_cpp_scanner(srcfile, tokfile):\n path = resourcedir\n shutil.copyfile(srcfile, os.path.join(path, 'srcfile'))\n #open(os.path.join(path, 'srcfile'), 'w').write(open(srcfile).read())\n \n if sys.platform[:3] == 'win':\n print 'need win exe'\n elif sys.platform[:5] == 'linux':\n exec_name = 'cpp_scanner_ubuntu'\n cmd = 'cd %s; ./%s; cd -' % (path, exec_name)\n os.popen(cmd)\n \n shutil.copyfile(os.path.join(path, 'tokfile'), tokfile) \n #open(tokfile, 'w').write( open(os.path.join(path, 'tokfile')).read() )", "def get_doc(filename: str) -> str:\n\n # Create the header.\n doc = \"# `\" + filename.split(\"/\")[-1] + \"`\\n\\n\"\n\n lines: List[str] = Path(filename).read_text().split(\"\\n\")\n\n for i in range(len(lines)):\n # Create a class description.\n if lines[i].startswith(\"class\"):\n # Skip private classes.\n match = re.search(\"class _(.*):\", lines[i])\n if match is not None:\n continue\n # Add the name of the class\n class_name = re.search(\"class (.*):\", lines[i]).group(1)\n doc += f\"## `{class_name}`\\n\\n\"\n # Add an example.\n class_example = f\"`from tdw.{filename[:-3].replace('/', '.')} import \" + re.sub(r\"(.*)\\((.*)\\)\", r'\\1',\n class_name) + \"`\"\n doc += class_example + \"\\n\\n\"\n doc += PyDocGen.get_class_description(lines, i)\n # Parse an enum.\n if re.search(r\"class (.*)\\(Enum\\):\", lines[i]) is not None:\n doc += \"\\n\\n\" + PyDocGen.get_enum_values(lines, i)\n doc += \"\\n\\n***\\n\\n\"\n # Create a function description.\n elif lines[i].strip().startswith(\"def\"):\n # Skip private functions.\n match = re.search(\"def _(.*)\", lines[i])\n if match is not None and \"__init__\" not in lines[i]:\n continue\n # Append the function description.\n doc += PyDocGen.get_function_documentation(lines, i) + \"\\n\\n***\\n\\n\"\n\n # Move the \"main class\" to the top of the document.\n main_class_name = ''.join(x.capitalize() or '_' for x in filename[:-3].split('_'))\n main_class = re.search(\"(## `\" + main_class_name + \"`((.|\\n)*))\", doc)\n if main_class is not None:\n main_class = main_class.group(1)\n doc_header = re.search(\"(.*)\\n\\n\", doc).group(0)\n doc_temp = doc.replace(main_class, \"\").replace(doc_header, \"\")\n doc = doc_header + main_class + doc_temp\n\n return doc", "def get_class(fileName):\n module = __import__(fileName)\n return getattr(module, fileName)", "def test_class_definition_with_base(self):\n self.script(\"# script.py\\n\"\n \"class C(object):\\n\"\n \" 'cdoc'\\n\"\n \" pass\\n\")\n self.compile()\n\n script = self.find_code_component(name=\"script.py\")\n class_def = self.find_code_component(name=\"C\")\n var_object = self.find_code_component(name=\"object\")\n\n self.assertEqual(class_def.type, \"class_def\")\n self.assertEqual(class_def.mode, \"w\")\n self.assertEqual(class_def.first_char_line, 2)\n self.assertEqual(class_def.first_char_column, 0)\n self.assertEqual(class_def.last_char_line, 4)\n self.assertEqual(class_def.last_char_column, 8)\n self.assertEqual(class_def.container_id, script.id)\n\n self.assertEqual(var_object.type, \"name\")\n self.assertEqual(var_object.mode, \"r\")\n self.assertEqual(var_object.first_char_line, 2)\n self.assertEqual(var_object.first_char_column, 8)\n self.assertEqual(var_object.last_char_line, 2)\n self.assertEqual(var_object.last_char_column, 14)\n self.assertEqual(var_object.container_id, class_def.id)\n\n class_def_block = self.metascript.code_blocks_store[class_def.id]\n self.assertEqual(class_def_block.code,\n \"class C(object):\\n\"\n \" 'cdoc'\\n\"\n \" pass\")\n self.assertEqual(class_def_block.docstring, \"cdoc\")\n self.assertTrue(bool(class_def_block.code_hash))", "def _genspider(self, module, name, domain, template_name, template_file):\n tvars = {\n 'project_name': settings.get('BOT_NAME'),\n 'ProjectName': string_camelcase(settings.get('BOT_NAME')),\n 'module': module,\n 'name': name,\n 'domain': domain,\n 'classname': '%sSpider' % ''.join([s.capitalize() \\\n for s in module.split('_')])\n }\n\n spiders_module = __import__(settings['NEWSPIDER_MODULE'], {}, {}, [''])\n spiders_dir = abspath(dirname(spiders_module.__file__))\n spider_file = \"%s.py\" % join(spiders_dir, module)\n\n shutil.copyfile(template_file, spider_file)\n render_templatefile(spider_file, **tvars)\n print \"Created spider %r using template %r in module:\" % (name, \\\n template_name)\n print \" %s.%s\" % (spiders_module.__name__, module)", "def build_robot(cls, mujoco_simulation: SType, physical: bool) -> Robot:\n pass", "def __init__(self, robot):\n self._robot = robot", "def __init__(self, robot):\n self._robot = robot", "def __init__(self, robot):\n self._robot = robot", "def compile(self):\n return None # pragma: no cover", "def from_crystfel_file(cls, filename):\n return translate.load_crystfel(cls, filename)", "def from_crystfel_file(cls, filename):\n return translate.load_crystfel(cls, filename)", "def compile(path: str) -> bytes:\n if not path.endswith('.py'):\n raise InvalidPathException(path)\n\n return Compiler().compile(path)", "def get_code(self, fullname):\n source_path = self.get_filename(fullname)\n source_bytes = self.get_data(source_path)\n return compile(source_bytes, source_path, 'exec',\n dont_inherit=True)", "def compile(self, dirpath, params=None, compiler='ast_native', verbose=False):\n compiler_handle = ctypes.c_void_p()\n _check_call(_LIB.TreeliteCompilerCreate(c_str(compiler),\n ctypes.byref(compiler_handle)))\n _params = dict(params) if isinstance(params, list) else params\n self._set_compiler_param(compiler_handle, _params or {})\n _check_call(_LIB.TreeliteCompilerGenerateCode(\n compiler_handle,\n self.handle,\n ctypes.c_int(1 if verbose else 0),\n c_str(dirpath)))\n _check_call(_LIB.TreeliteCompilerFree(compiler_handle))", "def _compile(self, filename, source):\n \n if source and source[-1] != '\\n':\n source = source + '\\n'\n code = __builtin__.compile(source, filename.cStr(), 'exec')\n\n # try to cache the compiled code\n pycFilename = Filename(filename)\n pycFilename.setExtension(pycExtension)\n try:\n f = open(pycFilename, 'wb')\n except IOError:\n pass\n else:\n f.write('\\0\\0\\0\\0')\n f.write(struct.pack('<I', self.timestamp))\n f.write(marshal.dumps(code))\n f.flush()\n f.seek(0, 0)\n f.write(imp.get_magic())\n f.close()\n\n return code", "def genCode(self, fileName, allowedTypes, genGraph = 1, isRootNode = 0, \r\n metaModelName = None, export = 0, newTypes = None, \r\n nodesToGenList = [], openModelStringList=[], attrGenFix=False):\r\n file = open(fileName, \"w+t\" )\r\n\r\n dir, fil = os.path.split(fileName)\r\n funcName = string.split (fil, \".\")\t\t\t\t\t# compose class name\r\n\r\n if export == 0:\r\n file.write('\"\"\"\\n')\r\n file.write(\"__\"+ fil +\"_____________________________________________________\\n\")\r\n file.write(\"\\n\") \r\n file.write(\"Automatically generated AToM3 Model File (Do not modify directly)\\n\")\r\n file.write(\"Author: \"+USER_NAME+\"\\n\")\r\n file.write(\"Modified: \"+time.asctime()+\"\\n\") \r\n file.write(\"__\"+ len(fil)*\"_\" +\"_____________________________________________________\\n\")\r\n file.write('\"\"\"\\n')\r\n #file.write('from graph_ASG_ERmetaMetaModel import *\\n')\t\t# just for the case!\r\n file.write('from stickylink import *\\n')\t\t\t\t# necessary if we describe some graphLinks...\r\n file.write('from widthXfillXdecoration import *\\n')\t\t\t# necessary if we describe some graphLinks...\r\n\r\n # import the subclass ...\r\n if( self.getClass() not in self.nodeTypes ):\r\n file.write('from '+self.getClass()+' import *\\n')\r\n \r\n # import all the node types...\r\n for nodetype in self.nodeTypes:\r\n if( self.listNodes[nodetype] != [] ): \r\n file.write('from '+nodetype+' import *\\n') \r\n \r\n # Import all the graphical appearences of the node types... that\r\n # are actually used! \r\n # Added by Denis Dube, last modified on Sept. 9, 2004\r\n if( genGraph ): \r\n # STEP 1: Find all graphObjects used in the model\r\n graph_objectDict = dict()\r\n for nodetype in self.listNodes.keys():\r\n for node in self.listNodes[nodetype]:\r\n if( node.graphClass_ ):\r\n graph_objectDict[ node.graphObject_.getGraphClassName() ]=1\r\n # STEP 2: Create the import statements for each graphObject\r\n for graphObject in graph_objectDict.keys():\r\n file.write('from '+graphObject+' import *\\n')\r\n # NOTE: I think the next two statements are caution overkill...\r\n #file.write('try: from '+graphObject+' import *\\n')\r\n #file.write('except: print \"WARNING: unable to load the graphical appearence file: '+graphObject+'.py\" \\n')\r\n \r\n # import the basic types...\r\n for typ in allowedTypes.keys():\r\n typeInstance, params = allowedTypes[typ]\r\n typeName = typeInstance.__name__\r\n file.write('from '+typeName+' import *\\n')\r\n \r\n # Generate the ASG constructor\r\n if( attrGenFix ):\r\n self.__genASGconstructor( file, funcName ) \r\n else:\r\n # Old way\r\n file.write('\\ndef '+funcName[0]+'(self, rootNode):\\n')\r\n \r\n # Generate code for the ASGroot attributes\r\n if( isRootNode ): \r\n # Should attrGenFix be always true? More testing required\r\n #todo: attrGenFix == True always?\r\n if( attrGenFix ): self.__genAttributesROOT( file )\r\n else: self.genAttributesCode(file, genGraph, \"rootNode\")\r\n\r\n self.writeGraph2File(file, genGraph, isRootNode, None, \" \", 1, funcName[0], nodesToGenList=nodesToGenList)\r\n\r\n # generate code for the sub-models\r\n counter = 0\r\n if( not nodesToGenList ):\r\n for nodetype in self.nodeTypes:\r\n for node in self.listNodes[nodetype]: \r\n newFile = funcName[0]+str(counter)\r\n res = node.genCode(os.path.join(dir, newFile+'.py'), allowedTypes, genGraph, 0)\r\n counter = counter + 1\r\n else: \r\n for node in nodesToGenList:\r\n newFile = funcName[0]+str(counter)\r\n res = node.genCode(os.path.join(dir, newFile+'.py'), allowedTypes, genGraph, 0)\r\n counter = counter + 1\r\n \r\n\r\n if isRootNode:\r\n hierarchical = self.isHierarchical()\r\n if export == 0:\r\n if hierarchical:\r\n file.write('def main'+funcName[0]+'(self, ASGroot):\\n')\r\n # file.write(' self.ASGroot = '+self.getClass()+'(self)\\n')\r\n file.write(' self.'+funcName[0]+'(self, ASGroot)\\n\\n')\r\n file.write(' self.'+funcName[0]+'_connections(self, ASGroot)\\n\\n')\r\n file.write('newfunction = main'+funcName[0]+'\\n\\n')\r\n else:\r\n file.write('newfunction = '+funcName[0]+'\\n\\n')\r\n if newTypes and len(newTypes)>0: # generate a list of newly added types\r\n file.write('loadedTypes = [')\r\n counter = 0\r\n for nt in newTypes:\r\n if counter > 0: file.write(',')\r\n file.write(str(nt))\r\n counter = counter + 1\r\n file.write(']\\n')\r\n \r\n self.genLoadedMMName( file )\r\n if( attrGenFix ): file.write( '\\natom3version = \\'0.3\\'\\n' )\r\n file.close()\r\n return funcName[0] \t\t\t\t# this indicates that we've done something\r", "def CompileClass(self):\n\n ## Go to first token\n self.Tokenizer.advance()\n\n ## Expecting class keyword\n self._eat('class')\n self._write_opener('class')\n self._write_entry('keyword','class')\n\n ## Now handle the identifier\n\n if not self.Tokenizer.currentTokenType == \"IDENTIFIER\":\n raise ValueError(\"ERROR_UNEXPECTED_TOKEN: \" + self.Tokenizer.currentTokenType + \" \" + self.Tokenizer.currentToken )\n else:\n self._write_entry(self.Tokenizer.currentTokenType.lower(), self.Tokenizer.currentToken)\n\n self.Tokenizer.advance()\n\n ## Now opening curly bracket\n self._eat('{')\n self._write_entry('symbol','{')\n\n #self.Tokenizer.advance()\n\n\n # Now expecting 0 or more classVarDec\n\n # self.Tokenizer.advance()\n #\n # if self.Tokenizer.currentTokenType == \"KEYWORD\" and self.Tokenizer.currentToken in [\"static\", \"field\"]:\n # self._write_closer('class')\n # self.outputFile.close()\n\n\n ## Finally the closing brace\n try:\n self._eat('}')\n self._write_entry('symbol', '}')\n self._write_closer('class')\n except:\n print(\"waah\")\n\n self.outputFile.close()", "def load_assemble_file(task_file):\n return imp.load_source('assemblefile', task_file)", "def gen_csource(protocol):\n\tdef format_default(reg):\n\t\t\"\"\"Given a reg, return its default value formatted as a string for inclusion in\n\t\t a C source file.\"\"\"\n\t\tif reg.size == \"accum\":\n\t\t\treturn str(float(reg.default)) + \"k\"\n\t\telse:\n\t\t\treturn str(int(reg.default)) + \"L\"\n\n\ts = \"\"\"/* Junior Design Sp2018 Final Project\n * Robot Firmware - RPi <-> Microcontroller Communication\n * Nick Ames 2018\n * WARNING: This file is automatically generated by gen-files.py\n * Any changes you make will be erased.\n */\n#include <avr/interrupt.h>\n#include <util/atomic.h>\n#include \"protocol.h\"\n#include \"spi.h\"\n\n\"\"\"\n\ts += \"volatile struct comm_data_t Data = {\\n\"\n\tfor r in protocol:\n\t\ts += \"\\t.\" + r.name + \" = \" + format_default(r) + \", /* \" + r.desc + \" */\\n\"\n\ts += \"};\\n\\n\"\n\ts += \"\\n\"\n\t\n\tfor r in protocol:\n\t\ts += \"%s get_%s(void){ /* %s */\\n\"%(r.size, r.name, r.desc)\n\t\ts += \"\"\"\\t%s v;\n\tATOMIC_BLOCK(ATOMIC_RESTORESTATE){\n\t\tv = Data.%s;\n\t}\n\treturn v;\n}\n\"\"\"%(r.size, r.name)\n\t\ts += \"void set_%s(%s v){ /* %s */\\n\"%(r.name, r.size, r.desc)\n\t\ts += \"\"\"\\tATOMIC_BLOCK(ATOMIC_RESTORESTATE){\n\t\tData.%s = v;\n\t}\n}\n\n\"\"\"%(r.name)\n\ts += \"\"\"ISR(SPI0_STC_vect){\n\tuint8_t reg_num = SPDR0;\n\tswitch(reg_num){\n\"\"\"\n\t\n\tfor r in protocol:\n\t\tif r.write:\n\t\t\ts += \"\\t\\tcase % 2d: /* Write %s (%s) */\\n\"%(r.number, r.name, r.desc)\n\t\t\ts += \"\\t\\t\\tspi_rx((uint8_t *) &Data.%s, sizeof(Data.%s));\\n\"%(r.name, r.name)\n\t\t\ts += \"\\t\\t\\tbreak;\\n\"\n\t\tif r.read:\n\t\t\ts += \"\\t\\tcase 0x80 + % 2d: /* Read %s (%s) */\\n\"%(r.number, r.name, r.desc)\n\t\t\ts += \"\\t\\t\\tspi_tx((uint8_t *) &Data.%s, sizeof(Data.%s));\\n\"%(r.name, r.name)\n\t\t\ts += \"\\t\\t\\tbreak;\\n\"\n\ts += \"\"\"\t}\n\n\t/* Clear SPIF flag */\n\treg_num = SPSR0;\n\treg_num = SPDR0;\n}\n\"\"\"\t\n\treturn s", "def compile_run(\n path,\n host,\n params={}\n ):\n\n compiled_path = MyCLI.compile(path)\n MyCLI.run(compiled_path, host, params)", "def build(which):\n return subprocess.Popen([p['paths']['w2l'],'obj',os.path.abspath(which)]).wait()", "def compile_target(target_obj, search_paths, compile_path, ref_controller, globals_cached=None, **kwargs):\n start = time.time()\n compile_objs = target_obj[\"compile\"]\n ext_vars = target_obj[\"vars\"]\n target_name = ext_vars[\"target\"]\n\n if globals_cached:\n cached.from_dict(globals_cached)\n\n use_go_jsonnet = kwargs.get(\"use_go_jsonnet\", False)\n if use_go_jsonnet:\n logger.debug(\"Using go-jsonnet over jsonnet\")\n\n for comp_obj in compile_objs:\n input_type = comp_obj[\"input_type\"]\n output_path = comp_obj[\"output_path\"]\n input_params = comp_obj.setdefault(\"input_params\", {})\n\n if input_type == \"jinja2\":\n input_compiler = Jinja2(compile_path, search_paths, ref_controller, comp_obj)\n elif input_type == \"jsonnet\":\n input_compiler = Jsonnet(compile_path, search_paths, ref_controller, use_go=use_go_jsonnet)\n elif input_type == \"kadet\":\n input_compiler = Kadet(compile_path, search_paths, ref_controller, input_params=input_params)\n elif input_type == \"helm\":\n input_compiler = Helm(compile_path, search_paths, ref_controller, comp_obj)\n elif input_type == \"copy\":\n ignore_missing = comp_obj.get(\"ignore_missing\", False)\n input_compiler = Copy(compile_path, search_paths, ref_controller, ignore_missing)\n elif input_type == \"remove\":\n input_compiler = Remove(compile_path, search_paths, ref_controller)\n elif input_type == \"external\":\n input_compiler = External(compile_path, search_paths, ref_controller)\n if \"args\" in comp_obj:\n input_compiler.set_args(comp_obj[\"args\"])\n if \"env_vars\" in comp_obj:\n input_compiler.set_env_vars(comp_obj[\"env_vars\"])\n else:\n err_msg = 'Invalid input_type: \"{}\". Supported input_types: jsonnet, jinja2, kadet, helm, copy, remove, external'\n raise CompileError(err_msg.format(input_type))\n\n input_compiler.make_compile_dirs(target_name, output_path, **kwargs)\n input_compiler.compile_obj(comp_obj, ext_vars, **kwargs)\n\n logger.info(\"Compiled %s (%.2fs)\", target_obj[\"target_full_path\"], time.time() - start)", "def compile(self):\n self.model = compile_model(self.model_filename, include_paths = self.include_paths)\n self.simulation = compile_model(self.sim_filename, include_paths = self.include_paths)", "def _get_backbone_model_from_file(filepath, in_chans, num_classes):\n sys.path.append('{}'.format(dirname(filepath)))\n class_name = basename(filepath).split('.')[0]\n exec('from {} import {}'.format(*[class_name]*2))\n return eval('{}(in_chans={}, num_classes={})'.format(class_name, in_chans, num_classes))", "def setup_java_class(content_to_add):\n template = \"\"\"\npublic class Lambda {\n\n public static void main(String args[]) {\n %s\n }\n}\n \"\"\"\n return template % content_to_add", "def build(self, gyp_file, target=None, **kw):\n raise NotImplementedError", "def makefile(self, *args, **kwargs):\n return self._file", "def test_execute_with_single_file_builds(self):\n review, review_file = self.run_tool_execute(\n checkout_dir=self.checkout_dir,\n filename='Hello.java',\n file_contents=(\n b'class Hello {\\n'\n b' int test() {\\n'\n b' String s = null;\\n'\n b' return s.length();\\n'\n b' }\\n'\n b'}\\n'\n ),\n tool_settings={\n 'build_type': 'javac',\n })\n\n self.assertEqual(review.comments, [\n {\n 'filediff_id': review_file.id,\n 'first_line': 4,\n 'issue_opened': True,\n 'num_lines': 1,\n 'rich_text': True,\n 'text': (\n 'object `s` last assigned on line 3 could be null and '\n 'is dereferenced at line 4.\\n'\n '\\n'\n 'Severity: ERROR\\n'\n 'Error code: Null Dereference'\n ),\n },\n ])\n self.assertEqual(review.general_comments, [])\n\n self.assertSpyCalledWith(\n execute,\n [\n self.tool_exe_path,\n 'run',\n '--no-progress-bar',\n '--',\n 'javac',\n 'Hello.java',\n ],\n ignore_errors=True,\n with_errors=True)", "def test_class_definition(self):\n self.script(\"# script.py\\n\"\n \"class C():\\n\"\n \" 'cdoc'\\n\"\n \" pass\\n\")\n self.compile()\n\n script = self.find_code_component(name=\"script.py\")\n class_def = self.find_code_component(name=\"C\")\n\n self.assertEqual(class_def.type, \"class_def\")\n self.assertEqual(class_def.mode, \"w\")\n self.assertEqual(class_def.first_char_line, 2)\n self.assertEqual(class_def.first_char_column, 0)\n self.assertEqual(class_def.last_char_line, 4)\n self.assertEqual(class_def.last_char_column, 8)\n self.assertEqual(class_def.container_id, script.id)\n\n class_def_block = self.metascript.code_blocks_store[class_def.id]\n self.assertEqual(class_def_block.code,\n \"class C():\\n\"\n \" 'cdoc'\\n\"\n \" pass\")\n self.assertEqual(class_def_block.docstring, \"cdoc\")\n self.assertTrue(bool(class_def_block.code_hash))", "def do_something(compiler):\n\n # Might need multiple files here. would be nice.\n files = AS[\"args\"][\"Files\"]\n\n if len(files):\n for f in files:\n logger.info(\"Loading: %s\" % f)\n compiler.compile_file(f)\n\n # Run the repl.\n if AS[\"args\"][\"repl\"]:\n repl(compiler, prompt=get_in(AS, [\"args\", \"prompt\"]))", "def GetClassConstructor(filename):\n ode_name = GetOdeSystemName(filename)\n\n class_constr_str = (ode_name + \"::~\" + ode_name + \"()\\n\"\n \"{\\n\" \n \"}\\n\\n\")\n\n return class_constr_str", "def __init__(\n self,\n source_dir: str,\n model_config: str\n ):\n # Instantiate all attributes and methods\n self.source_dir = None\n \"\"\"pathlib.Path: pathlib.Path object for source code directory.\"\"\"\n self.model_config = None\n \"\"\"str: String indicating model configuration for compile options, must be one of 'NWM', \n 'Gridded', or 'Reach'.\"\"\"\n self.hydro_namelists = dict()\n \"\"\"dict: Master dictionary of all hydro.namelists stored with the source code.\"\"\"\n self.hrldas_namelists = dict()\n \"\"\"dict: Master dictionary of all namelist.hrldas stored with the source code.\"\"\"\n self.compile_options = dict()\n \"\"\"dict: Compile-time options. Defaults are loaded from json file stored with source \n code.\"\"\"\n self.git_hash = None\n self.version = None\n \"\"\"str: Source code version from .version file stored with the source code.\"\"\"\n self.compile_dir = None\n \"\"\"pathlib.Path: pathlib.Path object pointing to the compile directory.\"\"\"\n self.compile_dir = None\n \"\"\"pathlib.Path: pathlib.Path object pointing to the compile directory.\"\"\"\n self.compiler = None\n \"\"\"str: The compiler chosen at compile time.\"\"\"\n self.configure_log = None\n \"\"\"CompletedProcess: The subprocess object generated at configure.\"\"\"\n self.compile_log = None\n \"\"\"CompletedProcess: The subprocess object generated at compile.\"\"\"\n self.object_id = None\n \"\"\"str: A unique id to join object to compile directory.\"\"\"\n self.table_files = list()\n \"\"\"list: pathlib.Paths to *.TBL files generated at compile-time.\"\"\"\n self.wrf_hydro_exe = None\n \"\"\"pathlib.Path: pathlib.Path to wrf_hydro.exe file generated at compile-time.\"\"\"\n\n # Set attributes\n ## Setup directory paths\n self.source_dir = pathlib.Path(source_dir).absolute()\n\n ## Load master namelists\n self.hydro_namelists = \\\n json.load(open(self.source_dir.joinpath('hydro_namelists.json')))\n\n self.hrldas_namelists = \\\n json.load(open(self.source_dir.joinpath('hrldas_namelists.json')))\n\n ## Get code version\n with open(self.source_dir.joinpath('.version')) as f:\n self.version = f.read()\n\n ## Load compile options\n self.model_config = model_config\n compile_options = json.load(open(self.source_dir.joinpath('compile_options.json')))\n self.compile_options = compile_options[self.version][self.model_config]", "def createInstanceHeader(pcol, path, originalFilename, nr_robots):\n\n needsWildcardExpansion = False\n\n with open(path, \"w\") as fout:\n fout.write(\"\"\"// vim:filetype=c\n/**\n * @file lulu_instance.h\n * @brief Lulu P colony simulator internal structure corresponding to the P colony defined in '%s'.\n * In this header we define the structure of the Pcolony that will power the simulated robot\n * This file was generated automatically by lulu_c.py on %s\n * @author Andrei G. Florea\n * @author Catalin Buiu\n * @date 2016-02-29\n */\n#ifndef LULU_INSTANCE_H\n#define LULU_INSTANCE_H\n\n#include \"lulu.h\" \"\"\" % (originalFilename, time.strftime(\"%d %h %Y at %H:%M\")))\n\n fout.write(\"\\nenum objects {\")\n # extend wildcard objects to _0, _1, ... _n where n = nr_robots\n for a in pcol.A[:]:\n # both $ and $id wildcards need extended objects\n if (\"_W_ALL\" in a or \"_W_ID\" in a):\n needsWildcardExpansion = True\n logging.debug(\"Extending %s wildcarded object\" % a)\n # construct extended object list\n extension = [a.replace(\"W_ID\", \"%d\" % i).replace(\"W_ALL\", \"%d\" % i) for i in range(nr_robots)]\n # if this extension has not been previously added\n if (not set(extension).issubset(set(pcol.A))):\n #add the extetendet object list to the alphabet\n pcol.A.extend(extension)\n\n # sort objects naturally\n pcol.A = natsort.natsorted(pcol.A, key=lambda x: x.replace('_W_ID', '/').replace('_W_ALL', '.'))\n for i, obj in enumerate(pcol.A):\n if (obj in ['e', 'f']):\n continue; # they are already defined in lulu.h\n if (i == 0):\n # NO_OBJECT = 0, OBJECT_ID_E = 1, OBJECT_ID_F = 2\n fout.write(\"\\n OBJECT_ID_%s = 3,\" % obj.upper());\n else:\n fout.write(\"\\n OBJECT_ID_%s,\" % obj.upper());\n\n fout.write(\"\\n};\")\n\n fout.write(\"\\n\\nenum agents {\")\n for i, agent_name in enumerate(pcol.B):\n fout.write(\"\\n AGENT_%s,\" % agent_name.upper());\n\n fout.write(\"\\n};\")\n\n if (needsWildcardExpansion):\n fout.write(\"\"\"\\n#define NEEDING_WILDCARD_EXPANSION //this ensures that the wildcard expansion code is included\"\"\")\n\n if (\"motion\" in pcol.B):\n fout.write(\"\\n#define USING_AGENT_MOTION //this ensures that the code associated with the MOTION agent is included in Lulu_kilobot\")\n if (\"led_rgb\" in pcol.B):\n fout.write(\"\\n#define USING_AGENT_LED_RGB //this ensures that the code associated with the LED_RGB agent is included in Lulu_kilobot\")\n if (\"msg_distance\" in pcol.B):\n fout.write(\"\\n#define USING_AGENT_MSG_DISTANCE //this ensures that the code associated with the MSG_DISTANCE agent is included in Lulu_kilobot\")\n if (\"timer\" in pcol.B):\n fout.write(\"\\n#define USING_AGENT_TIMER //this ensures that the code associated with the TIMER agent is included in Lulu_kilobot\")\n\n fout.write(\"\\n\")\n if (\"d_all\" in pcol.A):\n fout.write(\"\"\"\\n#define USING_OBJECT_D_ALL //this ensures that the code associated with processing D_ALL objects is included in Lulu_kilobot\"\"\")\n if (\"d_next\" in pcol.A):\n fout.write(\"\"\"\\n#define USING_OBJECT_D_NEXT //this ensures that the code associated with processing D_NEXT objects is included in Lulu_kilobot\"\"\")\n\n # check if using {IN,OUT}_EXTEROCEPTIVE rules (<I=> or <=O>)\n using_in_out_exteroceptive_rules = False\n for agent in pcol.agents.values():\n for program in agent.programs:\n for rule in program:\n if (rule.type == sim.RuleType.in_exteroceptive or rule.type == sim.RuleType.out_exteroceptive or\n rule.alt_type == sim.RuleType.in_exteroceptive or rule.alt_type == sim.RuleType.out_exteroceptive):\n using_in_out_exteroceptive_rules = True\n break;\n if (using_in_out_exteroceptive_rules):\n fout.write(\"\"\"\\n#define USING_IN_OUT_EXTEROCEPTIVE_RULES //this ensures that the code associated with processing IN_EXTEROCEPTIVE (<I=>) or OUT_EXTEROCEPTIVE (<=O>) rules is included in Lulu_kilobot\"\"\")\n\n fout.write(\"\"\"\\n\\n//if building Pcolony simulator for PC\n#ifdef PCOL_SIM\n //define array of names for objects and agents for debug\n extern char* objectNames[];\n extern char* agentNames[];\n#endif\n\n/**\n * @brief The smallest kilo_uid from the swarm (is set in instance.c by lulu_c.py)\n */\nextern const uint16_t smallest_robot_uid;\n\n/**\n * @brief The number of robots that make up the swarm (is set in instance.c by lulu_c.py)\n */\nextern const uint16_t nr_swarm_robots;\"\"\");\n\n fout.write(\"\"\"\\n\\n/**\n * @brief Initialises the pcol object and all of it's components\n *\n * @param pcol The P colony that will be initialized\n */\nvoid lulu_init(Pcolony_t *pcol);\n\n/**\n * @brief Destroys the pcol objects and all of it's components\n *\n * @param pcol The P colony that will be destroyed\n */\nvoid lulu_destroy(Pcolony_t *pcol);\n\n#ifdef NEEDING_WILDCARD_EXPANSION\n /**\n * @brief Expands and replaces wildcarded objects with the appropriate objects\n * Objects that end with _W_ID are replaced with _i where i is the the id of the robot, provided with my_id parameter\n *\n * @param pcol The pcolony where the replacements will take place\n * @param my_id The kilo_uid of the robot\n * @return The symbolic id that corresponds to this robot (my_id - smallest_robot_uid)\n */\n uint16_t expand_pcolony(Pcolony_t *pcol, uint16_t my_id);\n#endif\n#endif\"\"\")", "def __init__(self,\n source_path='./*.py',\n template_path='./docs/templates/*_template.md',\n output_path='./docs/documentation.md',\n ignore=['extra']\n ):\n\n template_files = glob.glob(template_path)\n # filename = t.split('/')[-1]\n self.sources = {os.path.basename(s).split('.')[0]: os.path.normpath(s) for s in glob.glob(source_path) if not any(i in s for i in ignore)}\n self.templates = {os.path.basename(t).split('_')[0]: os.path.normpath(t) for t in template_files}\n self.output_path = output_path\n\n self.template_content = {}\n for k, v in self.templates.items():\n path = v\n with open(path, 'r') as template_file:\n self.template_content[k] = template_file.read()\n\n self.text = ''\n self.classes = []\n self.headers = ['Params', 'Returns', 'Attributes']\n self.hierarchy = [\n 'class',\n 'method',\n 'parameter',\n 'pinfo',\n 'extra'\n ]\n self.tab_length = 6", "def GetClassDefinition(filename, model):\n ode_name = GetOdeSystemName(filename)\n\n ##### EDITED ###################### - in this case, the rules are also variables (as totals of other variables)\n odes_dict = translator.GetOdesDictionary(model)\n rules_dict = translator.GetRulesDictionary(model)\n num_species = len(odes_dict) + len(rules_dict)\n\n class_defn_str = (translator.GetBlockCommentDefinition(0, \"SBML ODE System\", True) + \n ode_name + \"::\" + ode_name + \" (std::vector<double> stateVariables)\\n\" +\n translator.AddTabs(1) + \": AbstractOdeSystem(\" + str(num_species) + \")\\n\" \n \"{\\n\" + \n translator.AddTabs(1) + \"mpSystemInfo.reset(new CellwiseOdeSystemInformation<\" + ode_name + \">);\\n\"\n \"\\n\" + \n translator.AddTabs(1) + \"Init();\\n\\n\" + \n translator.GetDefaultInitialConditionString(model) + \n \"\\n\" + \n translator.AddTabs(1) + \"if (stateVariables != std::vector<double>())\" +\n translator.AddTabs(1) + \"{\\n\" + \n translator.AddTabs(2) + \"SetStateVariables(stateVariables);\\n\" + \n translator.AddTabs(1) + \"}\\n\" + \n \"}\\n\\n\")\n\n return class_defn_str", "def test_to_code(self) -> None:\n class_def = ClassDef(\n name=\"Classy\",\n bases=tuple(),\n decorator_list=[],\n body=[],\n keywords=tuple(),\n identifier_name=None,\n expr=None,\n )\n\n with patch(\"doctrans.source_transformer.version_info\", (3, 9, 0)):\n import doctrans.source_transformer\n\n self.assertEqual(\n doctrans.source_transformer.to_code(class_def).rstrip(\"\\n\"),\n \"class Classy:\",\n ) if PY_GTE_3_9 else self.assertRaises(\n AttributeError, lambda: doctrans.source_transformer.to_code(class_def)\n )\n\n with patch(\"doctrans.source_transformer.version_info\", (3, 8, 0)):\n import doctrans.source_transformer\n\n self.assertEqual(\n doctrans.source_transformer.to_code(class_def).rstrip(\"\\n\"),\n \"class Classy:\",\n )", "def dynamic_import_from(source_file: str, class_name: str) -> Any:\n module = importlib.import_module(source_file)\n return getattr(module, class_name)", "def __init__(self, fileName, fpga):\n self.fileHandle = open(fileName + '.tcl', 'a+')\n self.fpga = fpga", "def compile(md_file, execute):\n lines = open(md_file, 'r').readlines()\n blocks = extract_blocks(lines)\n nb = compile_nb(blocks, execute=execute)\n fname = os.path.basename(md_file)\n title = os.path.splitext(fname)[0]\n with open('{}.ipynb'.format(title), 'w') as f:\n write(nb, f)", "def load(source_file):\n return loads(source_file.read())", "def __init__(self, inpt, outpt):\r\n self.tokenizer = JackTokenizer(inpt)\r\n self.symbol_table = SymbolTable()\r\n self.code_writer = VMWriter(outpt)\r\n self.class_name = None\r\n self.label_index = 0\r\n self.compile_class()", "def build(\n base_filename, src, bsp_c3, crt0_asm, march, opt_level, mmap,\n lang='c3', bin_format=None, elf_format=None, code_image='code'):\n list_filename = base_filename + '.html'\n\n with HtmlReportGenerator(open(list_filename, 'w')) as reporter:\n o1 = asm(crt0_asm, march)\n if lang == 'c3':\n srcs = [\n relpath('..', 'librt', 'io.c3'),\n bsp_c3,\n io.StringIO(src)]\n o2 = c3c(\n srcs, [], march, opt_level=opt_level,\n reporter=reporter, debug=True)\n objs = [o1, o2]\n elif lang == 'bf':\n o3 = bfcompile(src, march, reporter=reporter)\n o2 = c3c(\n [bsp_c3], [], march, reporter=reporter)\n objs = [o1, o2, o3]\n elif lang == 'c':\n o2 = c3c(\n [bsp_c3], [], march, reporter=reporter)\n coptions = COptions()\n include_path1 = relpath('..', 'librt', 'libc')\n coptions.add_include_path(include_path1)\n with open(relpath('..', 'librt', 'libc', 'lib.c'), 'r') as f:\n o3 = cc(\n f, march, coptions=coptions,\n reporter=reporter)\n o4 = cc(\n io.StringIO(src), march, coptions=coptions,\n reporter=reporter)\n objs = [o1, o2, o3, o4]\n else:\n raise NotImplementedError('language not implemented')\n obj = link(\n objs, layout=mmap,\n use_runtime=True, reporter=reporter, debug=True)\n\n # Save object:\n obj_file = base_filename + '.oj'\n with open(obj_file, 'w') as f:\n obj.save(f)\n\n if elf_format:\n elf_filename = base_filename + '.' + elf_format\n objcopy(obj, code_image, elf_format, elf_filename)\n\n # Export code image to some format:\n if bin_format:\n sample_filename = base_filename + '.' + bin_format\n objcopy(obj, code_image, bin_format, sample_filename)\n\n return obj", "def load(path):\n agent = ppo.PPOTrainer(config=config, env=\"compiler_gym\")\n agent.restore(path)\n return agent", "def __init__(self, module: ast3.Module, filename: str) -> None:\n self.doc = None\n self.body = module.body\n self.path = filename\n self.name = self.path.split(\"/\")[-1]\n self.functions = [\n Function(x, self) for x in self.body if isinstance(x, ast3.FunctionDef)\n ]\n self.classes = [\n Class(x, self) for x in self.body if isinstance(x, ast3.ClassDef)\n ]\n if isinstance(self.body[0], ast3.Expr):\n # this is most likely a doc string\n self.doc = Doc(self.body[0], Doc.Type.MODULE)", "def setup_class(cls):\n own_dir = os.path.dirname(os.path.abspath(__file__))\n\n hocr_file = \"output.tesseract.hocr\"\n hocr_path = os.path.join(own_dir, \"data\", hocr_file)\n with open(hocr_path) as f:\n hocr_data = f.read()\n\n expected_file = hocr_file.rsplit(\".\", 1)[0] + \".expected.json\"\n expected_path = os.path.join(own_dir, \"data\", expected_file)\n with open(expected_path, encoding=\"utf-8\") as f:\n expected_data = f.read()\n\n cls.document = parser.HOCRParser(hocr_path, is_path=True)\n cls.soup = BeautifulSoup(hocr_data, \"html.parser\")\n cls.expected = json.loads(expected_data)", "def fromFile(cls, filepath):\r\n with open(filepath) as f:\r\n return cls('\\n'.join(f.readlines()))", "def example(self, request):\n testdir = request.getfixturevalue(\"testdir\")\n import sys\n print(testdir, file=sys.stderr)\n p = testdir.makepyfile(\"\")\n p.write(\"class AClass:\\n pass\\n \\n\\n# too many spaces\")\n return p", "def compile_target_file(target_file, search_path, compile_path, **kwargs):\n target_obj = load_target(target_file)\n target_name = target_obj[\"vars\"][\"target\"]\n compile_obj = target_obj[\"compile\"]\n ext_vars = target_obj[\"vars\"]\n\n for obj in compile_obj:\n if obj[\"type\"] == \"jsonnet\":\n compile_file_sp = os.path.join(search_path, obj[\"path\"])\n if os.path.exists(compile_file_sp):\n _compile_path = os.path.join(compile_path, target_name, obj[\"name\"])\n os.makedirs(_compile_path)\n logger.debug(\"Compiling %s\", compile_file_sp)\n compile_jsonnet(compile_file_sp, _compile_path, search_path,\n ext_vars, output=obj[\"output\"], **kwargs)\n else:\n raise IOError(\"Path not found in search_path: %s\" % obj[\"path\"])\n\n if obj[\"type\"] == \"jinja2\":\n compile_path_sp = os.path.join(search_path, obj[\"path\"])\n if os.path.exists(compile_path_sp):\n _compile_path = os.path.join(compile_path, target_name, obj[\"name\"])\n os.makedirs(_compile_path)\n # copy ext_vars to dedicated jinja2 context so we can update it\n ctx = ext_vars.copy()\n ctx[\"inventory\"] = inventory(search_path, target_name)\n ctx[\"inventory_global\"] = inventory(search_path, None)\n compile_jinja2(compile_path_sp, ctx, _compile_path, **kwargs)\n else:\n raise IOError(\"Path not found in search_path: %s\" % obj[\"path\"])\n logger.info(\"Compiled %s\", target_file)", "def parsed(self):\n if not self._parsed:\n\n self._parsed = compile(self.content, self.path, \"exec\")\n\n return self._parsed" ]
[ "0.5830989", "0.55374384", "0.55307627", "0.54860145", "0.54444677", "0.5422501", "0.5409071", "0.5406872", "0.5393358", "0.5390054", "0.5387643", "0.52916986", "0.52400696", "0.5232065", "0.522042", "0.5217083", "0.52124226", "0.52053887", "0.5201384", "0.5178366", "0.5175187", "0.51626575", "0.5162059", "0.5149698", "0.514757", "0.5133447", "0.51303416", "0.51298493", "0.51174104", "0.5113116", "0.5111886", "0.5111336", "0.51051164", "0.5103783", "0.5081586", "0.50810176", "0.5075807", "0.5075807", "0.50748616", "0.5074398", "0.5072561", "0.50714225", "0.50683445", "0.50535494", "0.50528884", "0.5036741", "0.501785", "0.5012006", "0.50103647", "0.49915397", "0.49714178", "0.49649385", "0.4964692", "0.49565664", "0.4954772", "0.49506733", "0.49307105", "0.49307105", "0.49307105", "0.49247503", "0.4924145", "0.4924145", "0.4922897", "0.49206826", "0.49186018", "0.49184716", "0.49156603", "0.49137294", "0.49051186", "0.4894828", "0.4889128", "0.48821673", "0.4876993", "0.4876529", "0.48757648", "0.4874401", "0.4871175", "0.486868", "0.48655272", "0.48492336", "0.48414376", "0.48408544", "0.4840722", "0.4838189", "0.4833936", "0.48249477", "0.48230216", "0.48182788", "0.48136157", "0.4780238", "0.47598907", "0.475879", "0.47539207", "0.4746731", "0.4743871", "0.47430074", "0.47423133", "0.47314566", "0.47288638", "0.47211522" ]
0.7441334
0
Returns the associated driver with some custom settings.
def _get_selenium_browser(navigator, fLOG=noLOG): with warnings.catch_warnings(): warnings.simplefilter("ignore", ImportWarning) from selenium import webdriver from selenium.webdriver.common.desired_capabilities import DesiredCapabilities fLOG("[webshot] navigator=", navigator) if navigator == "firefox": firefox_capabilities = DesiredCapabilities.FIREFOX.copy() firefox_capabilities['marionette'] = True firefox_capabilities[ 'binary'] = r"C:\Program Files (x86)\Mozilla Firefox\firefox.exe" browser = webdriver.Firefox(capabilities=firefox_capabilities) elif navigator == "chrome": if sys.platform.startswith("win"): chromed = where_in_path("chromedriver.exe") if chromed is None: install_chromedriver(fLOG=fLOG) chromed = where_in_path("chromedriver.exe") if chromed is None: raise FileNotFoundError( "unable to install 'chromedriver.exe'") else: fLOG("[_get_selenium_browser] found chromedriver:", chromed) else: chromed = 'chromedriver' start_navi = True if start_navi: fLOG("[_get_selenium_browser] start", navigator) chrome_options = webdriver.ChromeOptions() chrome_options.add_argument('--headless') chrome_options.add_argument('--no-sandbox') chrome_options.add_argument('--verbose') browser = webdriver.Chrome(executable_path=chromed, chrome_options=chrome_options) else: with warnings.catch_warnings(): warnings.simplefilter("ignore", ImportWarning) import selenium.webdriver.chrome.service as wservice fLOG("[_get_selenium_browser] create service") service = wservice.Service(chromed) fLOG("[_get_selenium_browser] start service") service.start() fLOG("[_get_selenium_browser] declare remote") capabilities = {'chrome.binary': chromed} browser = webdriver.Remote(service.service_url, capabilities) elif navigator == "ie": browser = webdriver.Ie() elif navigator == "opera": if sys.platform.startswith("win"): chromed = where_in_path("operadriver.exe") if chromed is None: install_operadriver(fLOG=fLOG) chromed = where_in_path("operadriver.exe") if chromed is None: raise FileNotFoundError( "unable to install operadriver.exe") else: fLOG("[_get_selenium_browser] found chromedriver:", chromed) else: chromed = 'operadriver' browser = webdriver.Opera(chromed) # pylint: disable=E1101 elif navigator == "edge": browser = webdriver.Edge() else: raise RuntimeError( f"unable to interpret the navigator '{navigator}'") fLOG("[_get_selenium_browser] navigator is started") return browser
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_driver(drv):\n return GenericDriver.get_driver(drv)", "def get_driver(self):\n\t\treturn self.driver", "def get_driver(self):\n return self.driver", "def driver(self):\n \n return self.__driver", "def _get_driver(self, driver_name):\n driver = lb_const.SERVICE_TYPE + driver_name\n return self.drivers[driver]", "def get_driver(self, **kwargs) -> Driver:\n from squirrel.framework.plugins.plugin_manager import squirrel_plugin_manager\n\n plugins: list[list[type[Driver]]] = squirrel_plugin_manager.hook.squirrel_drivers()\n for plugin in plugins:\n for driver_cls in plugin:\n if driver_cls.name == self.driver_name:\n # Problem: If users provide \"storage_options\" in the `kwargs` and the `self.driver_kwargs`\n # already defines \"storage_options\", then vanilla dict merging\n # (i.e., {**self.driver_kwargs, **kwargs}) will overwrite the \"storage_options\" in\n # `self.driver_kwargs` entirely. This is undesired, since important information like\n # bucket configurations (e.g., \"requester_pays\") may be stored in the `self.driver_kwargs`\n # \"storage_options\", which users don't want to provide again using `kwargs`.\n # Solution: The below mechanism merges the \"storage_options\" in `kwargs` with the existing\n # \"storage_options\" in `self.driver_kwargs` (while the newly passed \"storage_options\"\n # in `kwargs` take precendence).\n kwargs[\"storage_options\"] = {\n **self.driver_kwargs.get(\"storage_options\", {}),\n **kwargs.get(\"storage_options\", {}),\n }\n return driver_cls(catalog=self._catalog, **{**self.driver_kwargs, **kwargs})\n\n raise ValueError(f\"driver {self.driver_name} not found\")", "def _getDriver(self):\n if not hasattr(self, '_driver'):\n with self._getDatasetLock:\n if not self.dataset or not self.dataset.GetDriver():\n self._driver = None\n else:\n self._driver = self.dataset.GetDriver().ShortName\n return self._driver", "def driver(self):\n return self.rpc.call(MsfRpcMethod.DbDriver, [{}])['driver']", "def get_driver(driver_name):\n try:\n o = drivers[driver_name]\n if type(o) == str:\n exec 'd = %s()' % o\n else:\n d = o()\n return d\n except KeyError:\n raise ValueError('Unknown driver name: \"{0}\"'.format(driver_name))", "def driver(self):\r\n ext = self.extensions[0]\r\n return ext.obj if ext.obj else ext.plugin", "def driver(self):\n\n if not self._driver_cache:\n self._driver_cache = self._driver(self)\n\n return self._driver_cache", "def driver(self) -> GraphDatabase.driver:\n raise NotImplementedError\n # if not self._driver:\n # self._driver = GraphDatabase.driver(\n # self.url,\n # auth=(self.username, self.password),\n # )\n #\n # return self._driver", "def driver(self) -> 'outputs.CSIVXFlexOSSpecDriver':\n return pulumi.get(self, \"driver\")", "def get_driver(secret_key=config.DEFAULT_SECRET_KEY, userid=config.DEFAULT_USERID,\n provider=config.DEFAULT_PROVIDER):\n\n if hasattr(config, 'get_driver'):\n logger.debug('get_driver %s' % config.get_driver)\n return config.get_driver()\n else:\n logger.debug('get_driver {0}@{1}'.format(userid, provider))\n return libcloud.compute.providers.get_driver(\n config.PROVIDERS[provider])(userid, secret_key)", "def _get_driver():\n return etcd_driver.get_driver()", "def _getDriver( client ):\n\ttry:\n\t\tdatabaseSchema = client.schema.findParent( requiredType = dbschema.DatabaseSchema )\n\texcept NameError:\n\t\tpass\n\telse:\n\t\tif (\n\t\t\tdatabaseSchema and\n\t\t\thasattr( databaseSchema, 'driver') and\n\t\t\tdatabaseSchema.driver\n\t\t):\n\t\t\treturn databaseSchema.driver", "def _get_driver_from_dsn(self, dsn):\n\n return dsn.split(':')[0]", "def _get_driver(\n cls, platform: str, variant: Optional[str]\n ) -> Tuple[Union[Type[NetworkDriver], Type[GenericDriver]], Dict[str, Any]]:\n additional_kwargs: Dict[str, Any] = {}\n final_driver: Union[Type[GenericDriver], Type[NetworkDriver]]\n\n if platform in cls.CORE_PLATFORM_MAP:\n final_driver = cls.CORE_PLATFORM_MAP[platform]\n msg = f\"Driver '{final_driver}' selected from scrapli core drivers\"\n else:\n final_driver, additional_kwargs = cls._get_community_driver(\n community_platform_name=platform, variant=variant\n )\n msg = (\n f\"Driver '{final_driver}' selected from scrapli community platforms, with the \"\n f\"following platform arguments: '{additional_kwargs}'\"\n )\n\n logger.info(msg)\n return final_driver, additional_kwargs", "def driver(self):\n from dallinger.config import get_config\n\n config = get_config()\n if not config.ready:\n config.load()\n driver_url = config.get(\"webdriver_url\", None)\n driver_type = config.get(\"webdriver_type\")\n driver = None\n\n if driver_url:\n capabilities = CAPABILITY_MAP.get(driver_type.lower())\n if capabilities is None:\n raise ValueError(\n \"Unsupported remote webdriver_type: {}\".format(driver_type)\n )\n driver = webdriver.Remote(\n desired_capabilities=capabilities, command_executor=driver_url\n )\n else:\n driver_class = DRIVER_MAP.get(driver_type.lower())\n if driver_class is not None:\n kwargs = {}\n if driver_type.lower() == \"chrome_headless\":\n from selenium.webdriver.chrome.options import Options\n\n chrome_options = Options()\n chrome_options.add_argument(\"--headless\")\n kwargs = {\"options\": chrome_options}\n driver = driver_class(**kwargs)\n\n if driver is None:\n raise ValueError(\"Unsupported webdriver_type: {}\".format(driver_type))\n\n driver.set_window_size(1024, 768)\n logger.info(\"Created {} webdriver.\".format(driver_type))\n return driver", "def driver(self) -> 'outputs.CSIPowerMaxSpecDriver':\n return pulumi.get(self, \"driver\")", "def driver(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"driver\")", "def driver(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"driver\")", "def get_driver(self, shard_id):\n\n try:\n return self._drivers[shard_id]\n except KeyError:\n # NOTE(cpp-cabrera): cache storage driver connection\n self._drivers[shard_id] = self._init_driver(shard_id)\n\n return self._drivers[shard_id]", "def choose_driver(is_remote, t_browser):\n if is_remote:\n return remote_driver(t_browser)\n return custom_driver(t_browser)", "def get_driver(self, pool_id, pool_conf=None):\n\n try:\n return self._drivers[pool_id]\n except KeyError:\n # NOTE(cpp-cabrera): cache storage driver connection\n self._drivers[pool_id] = self._init_driver(pool_id, pool_conf)\n\n return self._drivers[pool_id]", "def __setup_driver(driver_type: str) -> webdriver:\n if driver_type == \"chrome\":\n return __setup_chrome()\n if driver_type == \"edge\":\n return __setup_edge()\n if driver_type == \"safari\":\n return __setup_safari()\n if driver_type == \"firefox\":\n return __setup_firefox()", "def find_driver_class(self, scheme_or_url: str) -> Optional[Type[Driver]]:\n index = scheme_or_url.find(\":\")\n if index > 0:\n scheme = scheme_or_url[0:index]\n else:\n scheme = scheme_or_url\n\n return self.drivers.get(scheme.lower())", "def get_driver(driver_name):\n if driver_name == 'sqlite3':\n import sqlite3 as db_driver\n elif driver_name == 'cx_Oracle':\n import cx_Oracle as db_driver\n elif driver_name == 'pyodbc':\n import pyodbc as db_driver\n elif driver_name == 'pypyodbc':\n import pypyodbc as db_driver\n elif driver_name == 'psycopg2':\n import psycopg2 as db_driver\n elif driver_name == 'PyMySql':\n import PyMySql as db_driver\n elif driver_name == 'pymssql':\n import pymssql as db_driver\n else:\n # TODO: pick a better exception type and message\n raise ImportError\n return db_driver", "def get_driver_with_options():\n #options = get_driver_options()\n #return webdriver.Chrome(options=options)\n return webdriver.Chrome()", "def driver_from_file(input_file):\n file_ext = os.path.splitext(input_file)[1].split(\".\")[1]\n try:\n driver = _file_ext_to_driver()[file_ext]\n except KeyError:\n raise errors.MapcheteDriverError(\n \"no driver could be found for file extension %s\" % file_ext)\n if len(driver) == 1:\n return driver[0]\n else:\n raise errors.MapcheteDriverError(\n \"error determining read driver from file %s\" % input_file)", "def get_driver():\n # user_agent = user_agent_rotator.get_random_user_agent()\n try:\n if len(sys.argv) > 1:\n if Driver._driver is None:\n chrome_options = Options()\n chrome_options.add_argument('--headless')\n chrome_options.add_argument('--no-sandbox')\n chrome_options.add_argument('--disable-extensions')\n chrome_options.add_argument('--profile-directory=Default')\n chrome_options.add_argument(\"--incognito\")\n chrome_options.add_argument(\"--disable-plugins-discovery\")\n chrome_options.add_argument(\"--start-maximized\")\n chrome_options.add_argument(f'--user-agent={USER_AGENT}')\n Driver._driver = webdriver.Chrome(executable_path=sys.argv[1], chrome_options=chrome_options)\n logging.info(f\"Driver loaded succesfully from {sys.argv[1]}\")\n return Driver._driver\n else:\n logging.error(f\"Driver cannot be found. User entered: {sys.argv[1]}\")\n raise Exception(\"Please provide a path to the chromedriver\")\n\n except Exception as e:\n logging.error(f\"Driver cannot be found. User entered: {sys.argv[1]}\")\n raise FileNotFoundError(\"Could not execute webdriver. Make sure you provided the correct path to the \"\n \"chromedriver\", e)", "def driver(self) -> 'outputs.CSIUnitySpecDriver':\n return pulumi.get(self, \"driver\")", "def driver_name(self):\n return self._driver_name", "def get_boot_driver(self):\n return self._boot_driver", "def make(self, key):\n if key in self.drivers:\n self.driver = self.app.resolve(self.drivers[key])\n return self.driver\n\n raise DriverNotFound(\"Could not find the driver {}\".format(key))", "def _get_driver(\n platform: str, variant: Optional[str], _async: bool = False\n) -> Tuple[Union[Type[NetworkDriver], Type[GenericDriver]], Dict[str, Any]]:\n additional_kwargs: Dict[str, Any] = {}\n\n if platform in SYNC_CORE_PLATFORM_MAP:\n if _async is False:\n final_driver = SYNC_CORE_PLATFORM_MAP[platform]\n else:\n final_driver = ASYNC_CORE_PLATFORM_MAP[platform]\n msg = f\"Driver `{final_driver}` selected from scrapli core drivers\"\n else:\n final_driver, additional_kwargs = _get_community_driver(\n community_platform_name=platform, variant=variant, _async=_async\n )\n msg = (\n f\"Driver `{final_driver}` selected from scrapli community platforms, with the following\"\n f\" platform arguments: `{additional_kwargs}`\"\n )\n\n LOG.info(msg)\n return final_driver, additional_kwargs", "def _load_driver(backend, **kargs):\n bk_module = importlib.import_module('backend', __package__)\n driver_cls = getattr(bk_module, str.capitalize(backend) + 'Backend')\n return driver_cls(**kargs)", "def _get_driver(\n cls, platform: str, variant: Optional[str]\n ) -> Tuple[Union[Type[AsyncNetworkDriver], Type[AsyncGenericDriver]], Dict[str, Any]]:\n additional_kwargs: Dict[str, Any] = {}\n final_driver: Union[Type[AsyncGenericDriver], Type[AsyncNetworkDriver]]\n\n if platform in cls.CORE_PLATFORM_MAP:\n final_driver = cls.CORE_PLATFORM_MAP[platform]\n msg = f\"Driver '{final_driver}' selected from scrapli core drivers\"\n else:\n final_driver, additional_kwargs = cls._get_community_driver(\n community_platform_name=platform, variant=variant\n )\n msg = (\n f\"Driver '{final_driver}' selected from scrapli community platforms, with the \"\n f\"following platform arguments: '{additional_kwargs}'\"\n )\n\n logger.info(msg)\n return final_driver, additional_kwargs", "def _create_driver(self, config):\n raise NotImplementedError(\"Must override WebAccess::_create_driver.\")", "def _init_driver(self, shard_id):\n shard = self._shards_ctrl.get(shard_id, detailed=True)\n conf = utils.dynamic_conf(shard['uri'], shard['options'])\n return utils.load_storage_driver(conf, self._cache)", "def get_libcloud_driver():\n cls = get_driver(Provider.EC2)\n driver = cls(ACCESS_KEY, SECRET_KEY, region=REGION)\n return driver", "def _instantiate_driver(self) -> webdriver:\n\n if self.driver is None: return Browser.run_chromedriver()\n\n return self.driver", "def DRIVER():\n return \"podman\"", "def get_driver(url='neo4j', neo4j_auth='neo4j/neo4j'):\n from neo4j import GraphDatabase\n\n auth_parts = neo4j_auth.split('/')\n if len(auth_parts) == 2:\n driver = GraphDatabase.driver('bolt://' + url + ':7687',\n auth=(auth_parts[0], auth_parts[1]))\n else:\n driver = GraphDatabase.driver('bolt://' + url + ':7687')\n\n return driver", "def get_instance(driver_info: DriverInfo) -> webdriver:\n if driver_info.get_driver_type() == \"chrome\":\n options = webdriver.ChromeOptions()\n options.add_argument(\"--start-maximized\")\n return webdriver.Chrome(\n executable_path=os.path.join(\n driver_info.get_driver_path(),\n \"chromedriver\"\n ),\n chrome_options=options\n )", "def GetWPADriver(self):\n return str(self.wifi.wpa_driver)", "def aisappium_get_driver_instance(self):\n return self._current_application()", "def access_db(self):\n try:\n driver = GraphDatabase.driver(self.url, auth=(self.username, self.password))\n except Exception:\n raise ConnectionError\n return driver", "def _load_driver_module(self):\n driver = get_dbapi_module(self.driver_module)\n exceptions.register(driver.DatabaseError)\n return driver", "def driver_from_extension(file_extension: str) -> str:\n file_extension = file_extension.lstrip(\".\")\n all_drivers_extensions = {}\n for v in drivers:\n driver = v.load()\n try:\n driver_extensions = driver.METADATA.get(\"file_extensions\", []).copy()\n all_drivers_extensions[driver.METADATA[\"driver_name\"]] = driver_extensions\n if driver_extensions and file_extension in driver_extensions:\n return driver.METADATA[\"driver_name\"]\n except AttributeError: # pragma: no cover\n pass\n else:\n raise ValueError(\n f\"driver name for file extension {file_extension} could not be found: {all_drivers_extensions}\"\n )", "def driver(self) -> 'outputs.CSIIsilonSpecDriver':\n return pulumi.get(self, \"driver\")", "def disk_driver(self, disk_id):\n try:\n driver = self.disk(disk_id).find(\"DRIVER\").text\n except AttributeError:\n return None", "def getDrivers(self):\n return [self.driver]", "def get_backend():\n return __SETTINGS__._BACKEND", "def get_driver(self, driver_id):\n \n # TODO add df_feature information\n return Driver(self.trajectories, self.grouped_df.get_group(driver_id), None)", "def configure_driver(self, config):\n raise NotImplementedError", "def _get_driver_class(\n platform_details: Dict[str, Any], variant: Optional[str], _async: bool = False\n) -> Union[\n Type[AsyncNetworkDriver], Type[AsyncGenericDriver], Type[NetworkDriver], Type[GenericDriver]\n]:\n if variant and platform_details[\"variants\"][variant].get(\"driver_type\"):\n variant_final_driver: Union[\n Type[AsyncNetworkDriver],\n Type[AsyncGenericDriver],\n Type[NetworkDriver],\n Type[GenericDriver],\n ]\n variant_driver_data = platform_details[\"variants\"][variant].pop(\"driver_type\")\n if _async is False:\n variant_final_driver = variant_driver_data[\"sync\"]\n else:\n variant_final_driver = variant_driver_data[\"async\"]\n return variant_final_driver\n\n if isinstance(platform_details[\"driver_type\"], str):\n driver_type = platform_details[\"driver_type\"]\n if _async is False:\n standard_final_driver = SYNC_DRIVER_MAP.get(driver_type, None)\n else:\n standard_final_driver = ASYNC_DRIVER_MAP.get(driver_type, None)\n if standard_final_driver:\n return standard_final_driver\n\n custom_base_final_driver: Union[\n Type[AsyncNetworkDriver],\n Type[AsyncGenericDriver],\n Type[NetworkDriver],\n Type[GenericDriver],\n ]\n if _async is False:\n custom_base_final_driver = platform_details[\"driver_type\"][\"sync\"]\n else:\n custom_base_final_driver = platform_details[\"driver_type\"][\"async\"]\n return custom_base_final_driver", "def get_storage_backend(self):\n return self.client.info()['Driver']", "def driver(self):\n driver = c_int()\n ckresult(_dll.FMOD_System_GetDriver(self._ptr, byref(driver)))\n return driver.value", "def get_driver(_os, _browser):\n if _os == \"mac\":\n if _browser == \"chrome\":\n return webdriver.Chrome(executable_path=\"./Binary/mac/chromedriver\")\n elif _browser == \"firefox\":\n return webdriver.Firefox(executable_path=\"./Binary/mac/geckodriver\")\n elif _browser == \"opera\":\n return webdriver.Opera(executable_path=\"./Binary/mac/operadriver\")\n elif _os == \"windows\":\n if _browser == \"chrome\":\n return webdriver.Chrome(executable_path=\"./Binary/windows/chromedriver.exe\")\n elif _browser == \"firefox\":\n return webdriver.Firefox(executable_path=\"./Binary/windows/geckodriver.exe\")\n if _browser == \"opera\":\n return webdriver.Opera(executable_path=\"./Binary/windows/operadriver.exe\")\n elif _os == \"linux\":\n if _browser == \"chrome\":\n return webdriver.Chrome(executable_path=\"/usr/lib/chromium-browser/chromedriver\")\n elif _browser == \"firefox\":\n return webdriver.Firefox(executable_path=\"./Binary/linux/geckodriver\")\n if _browser == \"opera\":\n return webdriver.Opera(executable_path=\"./Binary/linux/operadriver\")", "def set_driver(self, driver):\n self.driver = driver", "def defaultDriver(self):\n return Enums.SQLite3", "def get_instance():\n \"\"\"Add more judgement for selecting more database backend\"\"\"\n return IMPL", "def get_name(cls):\n return DRIVER_NAME", "def get_backend():\n return Connection()", "def get_driver_info(self, aaidee):\n name = create_string_buffer(256)\n guid = GUID()\n system_rate = c_int()\n speaker_mode = c_int()\n channels = c_int()\n ckresult(\n _dll.FMOD_System_GetDriverInfo(\n self._ptr,\n aaidee,\n name,\n 256,\n byref(guid),\n byref(system_rate),\n byref(speaker_mode),\n byref(channels),\n )\n )\n return so(\n name=name.value,\n guid=guid,\n system_rate=system_rate.value,\n speaker_mode=speaker_mode.value,\n speaker_mode_channels=channels.value,\n )", "def _browser(self):\n RemoteConnection.set_timeout(CONNECTION_TIMEOUT)\n\n profile = webdriver.FirefoxProfile()\n preferences = self.config.get('preferences', {})\n for key, value in preferences.items():\n profile.set_preference(key, value)\n\n driver = webdriver.Firefox(profile)\n # Wait for UI events to complete before failing to find an element.\n driver.implicitly_wait(IMPLICIT_TIMEOUT)\n\n return driver", "def prompt_for_driver_settings(driver):\n settings = dict()\n try:\n __import__(driver)\n driver_module = sys.modules[driver]\n loader_function = getattr(driver_module, 'confeditor_loader')\n editor = loader_function()\n settings[driver_module.DRIVER_NAME] = editor.prompt_for_settings()\n except AttributeError:\n pass\n return settings", "def get_backend(name):\n return _DEFAULT_PROVIDER.get_backend(name)", "def get_connection():\n\n return MongoClientManager().client.__getattr__(MONGODB_SETTINGS['db'])", "def add_driver(self, driver):\n drv_cond = SQLBinaryExpr(SQLFuncExpr(self.db_func_map[DB_FUNC_NAME_LOWER],\n COL_NAME_DRIVERS_NAME),\n OP_EQ, SQLLiteral(driver[COL_NAME_DRIVERS_NAME].lower()))\n entries = self.select_generic_data(table_list=[TABLE_NAME_DRIVERS], where=drv_cond)\n if len(entries) <= 0:\n drvid = self._get_next_id(TABLE_NAME_DRIVERS, COL_NAME_DRIVERS_DRIVERID)\n driver[COL_NAME_DRIVERS_DRIVERID] = drvid\n self.add_generic_data(driver, TABLE_NAME_DRIVERS)\n return drvid\n else:\n if self.error_tolerance < ERROR_TOLERANCE_LOW:\n raise AdasDBError(\"Driver '%s' exists already in the catalog.\" % driver[COL_NAME_DRIVERS_NAME])\n else:\n warn(\"Driver '\" + entries[COL_NAME_DRIVERS_NAME] + \"' already exists in the catalog.\")\n if len(entries) == 1:\n return entries[0][COL_NAME_DRIVERS_DRIVERID]\n elif len(entries) > 1:\n tmp = \"Driver'%s' \" % (driver[COL_NAME_DRIVERS_NAME])\n tmp += \"cannot be resolved because it is ambiguous. (%s)\" % entries\n raise AdasDBError(tmp)", "def get_configured_provider():\n return config.is_provider_configured(\n __opts__, _get_active_provider_name() or __virtualname__, (\"url\",)\n )", "def new_driver(name=\"chrome\"):\n if not name in DRIVERS:\n raise Exception(\"No driver support for '%s'\" % name)\n return DRIVERS[name]()", "def get_driver(sport=\"football/nfl\", sleep=1.5):\n\n driver = base_driver()\n driver.get(ROOT_URL + \"sports/\" + sport)\n\n if sleep:\n time.sleep(sleep)\n\n return driver", "def _get_backend(args):\n if args.backend == 'gatttool':\n backend = GatttoolBackend\n elif args.backend == 'bluepy':\n backend = BluepyBackend\n elif args.backend == 'pygatt':\n backend = PygattBackend\n else:\n raise Exception('unknown backend: {}'.format(args.backend))\n return backend", "def configure_driver(self, config: DriverConfig, number: str, platform_settings: dict) -> \"DriverPlatformInterface\":\n raise NotImplementedError", "def get_driver(browser):\n\n # Browser name aliases\n chrome = ('chrome', 'google', 'google chrome', 'googlechrome', 'google-chrome', 'google_chrome')\n firefox = ('firefox', 'ff', 'mozilla', 'gecko', 'geckodriver', 'fire fox', 'fire_fox', 'fire-fox')\n opera = ('opera', 'opera gx', 'operagx', 'opera_gx', 'opera-gx')\n explorer = ('explorer', 'ie', 'internet explorer', 'internet-explorer', 'internet_explorer')\n edge = ('edge', 'microsoft edge', 'microsoft_edge', 'microsoft-edge')\n\n # Download browser binaries according to settings.json\n if browser.lower() in chrome:\n return webdriver.Chrome(ChromeDriverManager().install())\n\n elif browser.lower() in firefox:\n return webdriver.Firefox(executable_path=GeckoDriverManager().install())\n\n elif browser.lower() in opera:\n return webdriver.Opera(OperaDriverManager().install())\n\n elif browser.lower() in explorer:\n return webdriver.Ie(IEDriverManager().install())\n\n elif browser.lower() in edge:\n return webdriver.Edge(executable_path=EdgeChromiumDriverManager().install())\n\n else:\n raise RuntimeError('Browser not found {}'.format(browser.lower()))", "def __getattr__(self, item):\n if item != 'driver':\n return getattr(self.driver, item)\n raise KeyError(item)", "def get_nic_driver(pci_id):\n driverlist = dict(zip(NICS.values(), DRIVERS.keys()))\n try:\n driver = DRIVERS[driverlist[pci_id]]\n except Exception as e:\n driver = None\n return driver", "def _get_driver_class(\n cls, platform_details: Dict[str, Any], variant: Optional[str]\n ) -> Union[Type[NetworkDriver], Type[GenericDriver]]:\n final_driver: Union[\n Type[NetworkDriver],\n Type[GenericDriver],\n ]\n\n if variant and platform_details[\"variants\"][variant].get(\"driver_type\"):\n variant_driver_data = platform_details[\"variants\"][variant].pop(\"driver_type\")\n final_driver = variant_driver_data[\"sync\"]\n return final_driver\n\n if isinstance(platform_details[\"driver_type\"], str):\n driver_type = platform_details[\"driver_type\"]\n standard_final_driver = cls.DRIVER_MAP.get(driver_type, None)\n if standard_final_driver:\n return standard_final_driver\n\n final_driver = platform_details[\"driver_type\"][\"sync\"]\n return final_driver", "def get_configured_provider():\n return config.is_provider_configured(\n __opts__,\n _get_active_provider_name() or __virtualname__,\n (\"username\", \"password\", \"datacenter_id\"),\n )", "def driver_id(self):\n return self._driver_id", "def _get_infrastructure_engine():\n\n LOG.debug(\"Infrastructure engine {engine} is loading\".format(\n engine=CONF.infrastructure_engine))\n\n return _load_driver('sahara.infrastructure.engine',\n CONF.infrastructure_engine)", "def get_connection():\n return {\n 'default': {\n 'ENGINE': 'django.db.backends.postgresql',\n 'NAME': os.environ.get('database'),\n 'USER': os.environ.get('user'),\n 'PASSWORD': os.environ.get('password'),\n 'HOST': os.environ.get('host'),\n 'PORT': '5432',\n }\n }", "def wd(self):\n return self.driver", "def get_driver(type='chrome', executable_path=None):\n if type == 'chrome':\n driver = get_chrome_driver(options_list=OPTIONS_LIST, executable_path=executable_path)\n elif type == 'firefox':\n driver = get_firefox_driver(options_list=OPTIONS_LIST, executable_path=executable_path)\n else:\n raise (\"Type must be either 'chrome' or 'firefox'.\")\n\n driver.set_window_size(1920, 1080)\n\n return driver", "def test_get_driver_test_class(self):\n global locator, config_paths\n locator.load_config(config_paths[0])\n self.assertIsInstance(locator.get_driver(), TestDriver,\n 'get_driver did not return a test_driver when it was expected to.')\n self.assertNotIsInstance(locator.get_driver(), SimDriver,\n 'get_driver returned a sim_driver when it was expect to return a test_driver')", "def test_get_driver_test_class_usable(self):\n global locator, config_paths\n locator.load_config(config_paths[0])\n \n driver = locator.get_driver()\n try:\n driver.get_wind_dir()\n driver.get_sail()\n except Exception:\n self.fail('Could not call get_wind_dir and get_sail on driver from get_driver')", "def get_query_from_driver(driver, wrapper=None):\n if wrapper:\n return driver.query.from_object(\n wrapper,\n ).first().query\n\n return driver.query", "def register_driver(self, driver, alias):\n return self._drivers.register(driver, alias)", "def driver(self):\n # Lazy load\n if self._driver is None:\n logging.debug(\"Initializing driver.\")\n options = selenium.webdriver.FirefoxOptions()\n options.add_argument('--headless')\n self._driver = selenium.webdriver.Firefox(\n options=options,\n service_log_path=\"{}/geckodriver.log\".format(LOG_DIR))\n logging.debug(\"Finished initializing driver.\")\n return self._driver", "def _get_webdriver(self):\n driver = webdriver.Chrome(self.CHROME_DRIVER_PATH, options=self.CHROME_DRIVER_OPTIONS)\n driver.implicitly_wait(1)\n driver.set_window_size(1024, 768)\n\n return driver", "def get_backend_setting(cls, name, default=None):\n backend_settings = get_backend_settings(cls.BACKEND)\n if default is not None:\n return backend_settings.get(name, default)\n else:\n try:\n return backend_settings[name]\n except KeyError:\n raise ImproperlyConfigured(\"getpaid '%s' requires backend '%s' setting\" % (cls.BACKEND, name))", "def get_adapter(self):\n\t\timportlib.import_module('app.adapters.{0}'.format(self.builder.name))\n\n\t\tclasses = inspect.getmembers(\n\t\t\tsys.modules['app.adapters.{0}'.format(self.builder.name)],\n\t\t\tinspect.isclass\n\t\t)\n\n\t\tadapter = next(\n\t\t\tcls_ for cls_ in classes \\\n\t\t\tif hasattr(cls_[1], 'tech') \\\n\t\t\t and cls_[1].tech == self.builder.__class__.tech \\\n\t\t\t and hasattr(cls_[1], 'ctx') \\\n\t\t\t and cls_[1].ctx == self.builder.__class__.ctx\n\t\t)[1]\n\n\t\treturn adapter(self.builder())", "def selenium(self):\n return self.builtin.get_library_instance(\"SeleniumLibrary\")", "def get_browser(self, settings=None):\n browser = Browser(self.get_wsgi_application())\n if settings is not None:\n settings(browser)\n self._browsers.append(browser)\n return browser", "def test_get_driver_sim_class(self):\n global locator, config_paths\n locator.load_config(config_paths[1])\n self.assertNotIsInstance(locator.get_driver(), TestDriver,\n 'get_driver returned a test_driver when it was expect to return a sim_driver')\n self.assertIsInstance(locator.get_driver(), SimDriver,\n 'get_driver did not return a sim_driver when it was expected to.')", "def get_driver_id(self, driver_name):\n cond = SQLBinaryExpr(SQLFuncExpr(self.db_func_map[DB_FUNC_NAME_LOWER],\n COL_NAME_DRIVERS_NAME),\n OP_EQ, SQLLiteral(driver_name.lower()))\n entries = self.select_generic_data(select_list=[COL_NAME_DRIVERS_DRIVERID],\n table_list=[TABLE_NAME_DRIVERS],\n where=cond)\n if len(entries) == 1:\n return entries[0][COL_NAME_DRIVERS_DRIVERID]\n elif len(entries) > 1:\n raise AdasDBError(\"Driver '%s' cannot be resolved because it is ambiguous. (%s)\" % (driver_name, entries))\n\n raise AdasDBError(\"No resolution of '%s'. (%s)\" % (driver_name, entries))", "def get_driver_filename(self, os_name=None):\n raise NotImplementedError", "def get_strategy_from_settings():\n path = settings.MITHRIL_STRATEGY\n mod = __import__('.'.join(path.split('.')[:-1]))\n components = path.split('.')\n for comp in components[1:]:\n mod = getattr(mod, comp)\n return mod", "def driver_load(self, name):\r\n return AbstractServiceManager.service_load(self, name)" ]
[ "0.7748907", "0.7700252", "0.75033057", "0.73619497", "0.6932479", "0.6926911", "0.68707806", "0.6858391", "0.6836347", "0.6701114", "0.65786314", "0.65523374", "0.6457696", "0.6406544", "0.63908494", "0.6353427", "0.6292279", "0.62488717", "0.62426007", "0.6222315", "0.62124884", "0.6180808", "0.61748934", "0.6162856", "0.6160326", "0.6151294", "0.61403847", "0.61067957", "0.60482603", "0.604432", "0.60424125", "0.6040021", "0.60336584", "0.59941036", "0.59719217", "0.59461105", "0.59316224", "0.59279424", "0.59233886", "0.5910027", "0.58830583", "0.5869732", "0.58566266", "0.5831107", "0.5824544", "0.5803442", "0.5800137", "0.57747406", "0.57692945", "0.5768845", "0.5761616", "0.57560843", "0.57362443", "0.57205784", "0.57138157", "0.57091916", "0.57012266", "0.56934696", "0.5664712", "0.5663406", "0.56546414", "0.56345034", "0.5634319", "0.56340134", "0.56073827", "0.5591638", "0.5591062", "0.55402267", "0.5537562", "0.5520339", "0.5515144", "0.5506015", "0.55011284", "0.54943675", "0.5493086", "0.54905754", "0.5483362", "0.5470107", "0.54684645", "0.54568374", "0.54367477", "0.5434055", "0.54325765", "0.5410487", "0.53878146", "0.5385191", "0.53849554", "0.5373898", "0.5366303", "0.5345134", "0.5339392", "0.5338796", "0.5338403", "0.5335629", "0.5324714", "0.53198075", "0.53170013", "0.53112864", "0.52885973", "0.5283412", "0.5275199" ]
0.0
-1
>>> isinstance(lab7_q1(), str) True
def lab7_q1(): return """ YOUR EXPLANATION HERE """
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def is_str ( self, s ):\r\n\t\treturn isinstance ( s, type( str () ) )", "def is_str(x):\n return isinstance(x, str)", "def _is_string(arg):\n return isinstance(arg, types.StringTypes)", "def isString(x):\n if type(x) == str:\n return True\n else:\n return False", "def test_is_str_rep_string(self):\n self.assertIsInstance(cardutils.Card(10,1).__str__(), str)", "def _is_string(data):\n return len(data) and isinstance(_to_ndarray(data).flat[0], str)", "def strtype(x):\n if type(x) == str:\n return True\n if type(x) == unicode:\n return True\n return False", "def is_string(value):\n return isinstance(value, basestring)", "def is_string(obj):\n return isinstance(obj, basestring)", "def check_solution(self, solution):\n return isinstance(solution, str)", "def _check_datatype_to_string(prediction):\n if isinstance(prediction, str):\n return True\n raise TypeError('Prediction is not in string type.')", "def is_string(obj):\n return isinstance(obj, str)", "def _is_str(item):\n return isinstance(item, str)", "def is_string(document):\r\n return isinstance(document, str)", "def is_str(value):\n return isinstance(value, str)", "def is_string(atype):\n if atype == str:\n return True\n elif PY2:\n if atype == unicode:\n return True\n return False", "def has_string_type(obj: _std_typing.Any) -> bool:\n return obj.dtype == sc.DType.string", "def is_string(val):\n return (\n isinstance(val, unicode) or \\\n isinstance(val, str) \n )", "def test_str(self):\n self.assertEqual(str(self.bs), str(self.wbs))\n self.assertEqual(str(self.be), str(self.be))\n # str(us) fails in Python 2\n self.assertEqual(str, type(str(self.wus)))\n # str(ue) fails in Python 2\n self.assertEqual(str, type(str(self.wue)))", "def is_string(value):\n return isinstance(value, (str, bytes))", "def isString(data):\n\ttry:\n\t\tfrom types import UnicodeType, StringType\n\t\tif type(data) == UnicodeType or type(data) == StringType:\n\t\t\treturn True\n\texcept ImportError:\n\t\tif type(data) == type(\"\"):\n\t\t\treturn True\n\treturn False", "def is_string(self):\n answer = self._call('is_string')\n return answer.yes", "def test_str(self):\n s1 = Square(4, 2, 3, 47)\n self.assertEqual(str(s1), \"[Square] (47) 2/3 - 4\")", "def is_string(value):\n return isinstance(value, string_types)", "def typeIsString(obj):\n return type(obj) is str or _haveTypeUnicode and type(obj) is unicode", "def test_str(self):\r\n f = lambda id_, seq, qual: len(seq) > 3\r\n s1 = 'aa'\r\n s2 = 'aaaa'\r\n sq = SeqQualBad('Q', f)\r\n self.assertEqual(sq('x', s1, [1, 2, 3]), False)\r\n self.assertEqual(str(sq), 'Q\\t0')\r\n self.assertEqual(sq('y', s2, [1, 2, 3]), True)\r\n self.assertEqual(str(sq), 'Q\\t1')", "def test_strmethod(self):\n b1 = BaseModel()\n self.assertEqual(type(str(b1)), str)", "def _is_string(self, obj):\n return isinstance(obj, unicode) or isinstance(obj, str)", "def canAdapt(self, stru):\n return isinstance(stru, crystal)", "def is_string(s):\n try:\n return isinstance(s, basestring)\n except NameError:\n return isinstance(s, str)", "def isString(s):\r\n try:\r\n return isinstance(s, unicode) or isinstance(s, basestring)\r\n except NameError:\r\n return isinstance(s, str)", "def test_string(self,address):\n t=type(address) == str\n assert t, \"not a string\"", "def test_str():\n # Test for string special method with scalar Rnode objects\n x = Rnode(1.0)\n try:\n assert str(x) == 'Reverse-mode Rnode Object ( Values: 1.0 )'\n except AssertionError as e:\n print(e)\n raise AssertionError", "def isString(s):\r\n if not isinstance(s, util.AtomicString):\r\n return isinstance(s, basestring)\r\n return False", "def not_a_string(obj):\n my_type = str(type(obj))\n if is_py3():\n is_str = my_type.find('bytes') < 0 and my_type.find('str') < 0\n return is_str\n\n return my_type.find('str') < 0 and \\\n my_type.find('unicode') < 0", "def is_string(item: Any) -> bool:\n if isinstance(item, (bytes, bytearray, str)):\n return True\n elif (isinstance(item, (tuple, list)) and all(is_string(x) for x in item)):\n return True\n elif (isinstance(item, np.ndarray) and # binary or unicode\n (item.dtype.kind in (\"U\", \"S\") or item.dtype == object)):\n return True\n return False", "def _is_unicode(arr):\n if (isinstance(arr, str) or\n issubclass(numpy.asarray(arr).dtype.type, str)):\n return True\n return False", "def is_str(value):\n if not type(value) is str:\n return False\n else:\n return True", "def is_string(space, w_obj):\n return space.wrap(w_obj.tp == space.tp_str)", "def is_string_type(self):\n raise exceptions.NotImplementedError()", "def test_str_method(self):\n s1 = Square(4, 6, 2, 12)\n self.assertEqual(s1.__str__(), '[Square] (12) 6/2 - 4')", "def string_check(param, name):\n\tif not isinstance(param, strcomp):\n\t\traise TypeError(\"Keyword arg '%s' must be of type string. Got: %s\" % (\n\t\t\tname, type(param)))\n\telse:\n\t\tpass", "def test___str__(self):\n self.assertEqual(\n str(self.mineral),\n 'mineralxy')", "def test_str(self, r, rep):\n assert str(r) == rep", "def _isstr(value):\n\n if isinstance(value, string_types + (bytes,)):\n return True\n elif hasattr(value, \"__iter__\"):\n return all(_isstr(v) for v in value)\n else:\n return False", "def assert_is_string(object_to_test):\n if( (type(object_to_test) == type(\"\")) or (type(object_to_test) == type(u\"\")) ):\n return\n logging.error(\"assert_is_string() test failed!\")\n logging.critical(repr(locals()))\n raise(ValueError)", "def test_speciality_str_representation(speciality):\n assert str(speciality) == \"Electrical\"", "def test_str(self):\n self.assertEqual(str(self.john), \"J. Doe\")\n self.assertEqual(str(self.solar), \"N. Graule\")", "def test_str(self):\r\n statobj = inputtypes.Status('test')\r\n self.assertEqual(str(statobj), 'test')\r\n self.assertEqual(unicode(statobj), u'test')", "def stringable(self):\n return True", "def is_string(value):\n try:\n basestring\n def is_string(value):\n \"\"\"Python 2 compatible implementation of is_string(value).\"\"\"\n return isinstance(value, basestring)\n except NameError:\n def is_string(value):\n \"\"\"Python 3 compatible implementation of is_string(value).\"\"\"\n return isinstance(value, (str, bytes))\n return is_string(value)", "def _is_valid_target_str(self, target):\n if isinstance(target, str):\n return True\n else:\n return False", "def is_my_type(type_str):\n raise NotImplementedError()", "def test_str(self):\n self.assertEqual(str(self.subject), \"Test Subject\")", "def test_string(self):\n htype = h5t.py_create('S1')\n self.assertIsInstance(htype, h5t.TypeStringID)\n self.assertEqual(htype.get_size(), 1)", "def test_str(self):\n arm = self.ar[2009][11]\n self.assertEqual(str(arm), '<AwstatsMonth 2009-11>')", "def test_stringfield_return_text_type():\n stringify = fields.StringField().adapt\n assert isinstance(stringify(\"Hello World\"), text_type)", "def issparse(qob):\n return isinstance(qob, sp.spmatrix)", "def test_str(self, concept_embedding: ConceptEmbedding):\n # printing should not rise error\n str(concept_embedding)", "def test_str(self, string, application):\n assert string == str(application)", "def test_repr(self):\n qg = ConcentricGrid(2, 3, 4)\n s = str(qg)\n assert \"ConcentricGrid\" in s\n assert \"jacobi\" in s\n assert \"L=2\" in s\n assert \"M=3\" in s\n assert \"N=4\" in s", "def test_str(self):\n tour = G(Tour, display_name='test1')\n self.assertEqual('test1', str(tour))", "def check_type(s: pd.Series) -> Tuple[bool, str]:\n\n error_string = (\n \"should be TextSeries: the input Series should consist only of strings in every cell.\"\n \" See help(hero.HeroTypes) for more information.\"\n )\n\n try:\n first_non_nan_value = s.loc[s.first_valid_index()]\n if not isinstance(first_non_nan_value, str):\n return False, error_string\n except KeyError: # Only NaNs in Series -> same warning applies\n return False, error_string\n\n return True, \"\"", "def testStringInput(self):\r\n from pydsl.Check import BNFChecker\r\n from pydsl.contrib.bnfgrammar import productionset0\r\n grammardef = productionset0\r\n checker = BNFChecker(grammardef)\r\n self.assertTrue(checker.check(\"SR\"))\r\n self.assertTrue(checker.check(\"SR\"))\r\n self.assertTrue(checker.check((\"S\",\"R\")))\r\n self.assertFalse(checker.check(\"SL\"))\r\n self.assertFalse(checker.check((\"S\",\"L\")))\r\n self.assertFalse(checker.check(\"\"))", "def test_str(self):\n step = G(Step, display_name='test1')\n self.assertEqual('test1', str(step))", "def is_valid(self, qstr):\r\n pass", "def _has_str_elems(obj):\n return all([isinstance(elem, str) for elem in obj])", "def value_type_of_str(str1, type1):\n #FOUND A BETTER FUNCTION, MultiDict\n if issubclass(type1, Collection):\n try:\n # obj = eval(str1)\n obj = ast.literal_eval(str1)\n if isinstance(obj, Collection):\n return obj\n except:\n return type1(str1)\n else:\n try:\n return type1(str1)\n except:\n return type1()", "async def test_str(\n submission_fixture: Submission,\n) -> None:\n AssertThat(str(submission_fixture)).IsInstanceOf(str)\n AssertThat(str(submission_fixture)).IsEqualTo(submission_fixture.url)", "def DataIsString(self):\n return self.data_type in (definitions.REG_SZ, definitions.REG_EXPAND_SZ)", "def is_sequence_of_str(items):\n return all(isinstance(item, basestring) for item in items)", "def test_str(self):\n self.assertEqual(str(self.bioe), str(self.wbioe))\n self.assertEqual(str(self.uioe), str(self.wuioe))\n self.assertEqual(str(self.bose), str(self.wbose))\n self.assertEqual(str(self.uose), str(self.wuose))", "def _is_proper_sequence(seq):\n return (isinstance(seq, collections.abc.Sequence) and\n not isinstance(seq, str))", "def test_if_str(self):\n self.assertTrue(type(self.new.email) is str)\n self.assertTrue(type(self.new.password) is str)\n self.assertTrue(type(self.new.first_name) is str)\n self.assertTrue(type(self.new.last_name) is str)", "def test_str_type(self):\n\n expected = TypeError\n input_ = 'c'\n with self.assertRaises(expected):\n math.factorial(input_)", "def test_str():\n post = models.Post(title=\"Test Post\")\n\n assert str(post) == post.title", "def test_str(self):\n ary = self.ar[2009]\n self.assertEqual(str(ary), '<AwstatsYear 2009: 11, 12>')", "def test_str(self):\n faction = self.faction\n\n self.assertEqual(str(faction), self.faction_raw['name'])", "def test_native_str(self):\n if PY2:\n import __builtin__\n builtin_str = __builtin__.str\n else:\n import builtins\n builtin_str = builtins.str\n\n inputs = [b'blah', u'blah', 'blah']\n for s in inputs:\n self.assertEqual(native_str(s), builtin_str(s))\n self.assertTrue(isinstance(native_str(s), builtin_str))", "def is_tuple_consists_of_strings(an_array):\n return isinstance(an_array, tuple) and is_array_type(an_array, str)", "def check_series(s: pd.Series) -> bool:\n\n error_string = (\n \"The input Series should consist only of strings in every cell.\"\n \" See help(hero.HeroSeries) for more information.\"\n )\n\n if not isinstance(s.iloc[0], str) or s.index.nlevels != 1:\n raise TypeError(error_string)", "def test_repr(self):\n fz = FourierZernikeBasis(L=6, M=3, N=0)\n s = str(fz)\n assert \"FourierZernikeBasis\" in s\n assert \"ansi\" in s\n assert \"L=6\" in s\n assert \"M=3\" in s\n assert \"N=0\" in s", "def test_output_type():\n assert type(song_decoder(\"WUWUBUBWUBUWUB\")) is str", "def test_string():", "def check_statement(self, statement):\n return isinstance(statement, str)", "def test_string(self):\n htype = h5t.py_create('S10', logical=True)\n self.assertIsInstance(htype, h5t.TypeStringID)\n self.assertEqual(htype.get_size(), 10)", "def check_type( string_key ) : \r\n\r\n if type( string_key ) != type( '' ) : \r\n\r\n # raise self.__class__( \"'%s': EGI wants the key to be four _characters_ (not %s) !\" % (type(string_key), ) ) \r\n raise Eggog( \"'%s': EGI wants the key to be four _characters_ (not %s) !\" % (type(string_key), ) ) \r\n \r\n else :\r\n \r\n return True", "def test_typeerror_in_case_of_string(self):\n eq_(None,grepit(\"\",\"\",\"\"))", "def _is_encodable_simple(sv):\n return sv not in (None, str(), int())", "def str_check(*args, func=None):\n func = func or inspect.stack()[2][3]\n for var in args:\n if not isinstance(var, (str, collections.UserString, collections.abc.Sequence)):\n name = type(var).__name__\n raise StringError(\n 'Function {} expected str, {} got instead.'.format(func, name))", "def test_str_(self):\n str(self.standardcode)\n repr(self.standardcode)", "def test_string(self):\n htype = h5t.py_create('S10')\n self.assertIsInstance(htype, h5t.TypeStringID)\n self.assertEqual(htype.get_size(), 10)", "def _str_validator(arg):\n if arg is None or arg is '' or type(arg) != str:\n raise ValueError('Incorrect value: input should be a string')", "def does_signature_contain_str(signature=None):\n\n # if we did not receive a signature we assume the model could require\n # a string in it's input\n if signature is None:\n return True\n\n return any(v.dtype == dtypes.string.as_datatype_enum\n for v in signature.inputs.values())", "def test_training_content(self):\n self.assertIsInstance(self.one_off_training.content, str)\n self.assertEqual(self.one_off_training.content, \"1h d'endurance\")", "def test_string_or_number():\n assert is_string_or_number(None) is None\n assert is_string_or_number(1) is None\n assert is_string_or_number(1.1) is None\n assert is_string_or_number('1.1') is None\n assert is_string_or_number([])", "def _isstrvar(self, index):\n return self._typlist[index] <= 244", "def test_str(self):\n self.assertEqual(str(self.content), \"Test Content\")", "def is_action_str(string: str) -> bool:", "def test_str():\n c = Circle(4) \n assert c.__str__() == 'Circle with radius: 4.000000'", "def string_p(value):\n if type(value) is not str:\n raise Invalid(\"invalid value type {value}\".format(value=value))" ]
[ "0.69384867", "0.6921196", "0.68661743", "0.6843844", "0.68393165", "0.6797242", "0.6774741", "0.67398", "0.6725461", "0.6714078", "0.6644135", "0.6597622", "0.6534236", "0.6527023", "0.6520744", "0.65187854", "0.65055484", "0.6498066", "0.64673746", "0.6463188", "0.64524394", "0.6443579", "0.6414285", "0.63970673", "0.6364012", "0.63613147", "0.63569623", "0.6353186", "0.63526535", "0.6331008", "0.62987405", "0.62885463", "0.62773335", "0.6266793", "0.6226672", "0.622112", "0.6203269", "0.61950046", "0.6182019", "0.61704683", "0.61555237", "0.6138092", "0.61068", "0.6077514", "0.6065137", "0.6053797", "0.60189605", "0.6018361", "0.59859663", "0.59822285", "0.59779507", "0.59598124", "0.59516084", "0.5928724", "0.5921045", "0.5917034", "0.59141415", "0.588793", "0.5863675", "0.5848382", "0.5846776", "0.58281356", "0.5817336", "0.5815031", "0.5814573", "0.5814351", "0.5805", "0.579188", "0.5783944", "0.5750365", "0.5739757", "0.5731726", "0.57272404", "0.57100654", "0.57048506", "0.5704286", "0.5695412", "0.56787455", "0.56784797", "0.567775", "0.567228", "0.5663766", "0.56489795", "0.56436855", "0.5636182", "0.56240696", "0.5623567", "0.5623237", "0.5618856", "0.5618568", "0.5616895", "0.56117886", "0.56069165", "0.56006086", "0.55995077", "0.5596734", "0.5590234", "0.5585116", "0.5582061", "0.55814797", "0.5566014" ]
0.0
-1
>>> isinstance(lab7_q2(), str) True
def lab7_q2(): return """ YOUR EXPLANATION HERE """
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_is_str_rep_string(self):\n self.assertIsInstance(cardutils.Card(10,1).__str__(), str)", "def is_str ( self, s ):\r\n\t\treturn isinstance ( s, type( str () ) )", "def _is_string(data):\n return len(data) and isinstance(_to_ndarray(data).flat[0], str)", "def _is_string(arg):\n return isinstance(arg, types.StringTypes)", "def is_str(x):\n return isinstance(x, str)", "def check_solution(self, solution):\n return isinstance(solution, str)", "def isString(x):\n if type(x) == str:\n return True\n else:\n return False", "def is_string(obj):\n return isinstance(obj, basestring)", "def is_string(value):\n return isinstance(value, basestring)", "def strtype(x):\n if type(x) == str:\n return True\n if type(x) == unicode:\n return True\n return False", "def _check_datatype_to_string(prediction):\n if isinstance(prediction, str):\n return True\n raise TypeError('Prediction is not in string type.')", "def test_str(self):\n s1 = Square(4, 2, 3, 47)\n self.assertEqual(str(s1), \"[Square] (47) 2/3 - 4\")", "def is_string(obj):\n return isinstance(obj, str)", "def test_str(self):\n self.assertEqual(str(self.bs), str(self.wbs))\n self.assertEqual(str(self.be), str(self.be))\n # str(us) fails in Python 2\n self.assertEqual(str, type(str(self.wus)))\n # str(ue) fails in Python 2\n self.assertEqual(str, type(str(self.wue)))", "def test_str(self):\r\n f = lambda id_, seq, qual: len(seq) > 3\r\n s1 = 'aa'\r\n s2 = 'aaaa'\r\n sq = SeqQualBad('Q', f)\r\n self.assertEqual(sq('x', s1, [1, 2, 3]), False)\r\n self.assertEqual(str(sq), 'Q\\t0')\r\n self.assertEqual(sq('y', s2, [1, 2, 3]), True)\r\n self.assertEqual(str(sq), 'Q\\t1')", "def _is_str(item):\n return isinstance(item, str)", "def is_string(val):\n return (\n isinstance(val, unicode) or \\\n isinstance(val, str) \n )", "def is_string(atype):\n if atype == str:\n return True\n elif PY2:\n if atype == unicode:\n return True\n return False", "def is_str(value):\n return isinstance(value, str)", "def test_str():\n # Test for string special method with scalar Rnode objects\n x = Rnode(1.0)\n try:\n assert str(x) == 'Reverse-mode Rnode Object ( Values: 1.0 )'\n except AssertionError as e:\n print(e)\n raise AssertionError", "def is_string(document):\r\n return isinstance(document, str)", "def is_string(value):\n return isinstance(value, (str, bytes))", "def test_strmethod(self):\n b1 = BaseModel()\n self.assertEqual(type(str(b1)), str)", "def has_string_type(obj: _std_typing.Any) -> bool:\n return obj.dtype == sc.DType.string", "def canAdapt(self, stru):\n return isinstance(stru, crystal)", "def isString(data):\n\ttry:\n\t\tfrom types import UnicodeType, StringType\n\t\tif type(data) == UnicodeType or type(data) == StringType:\n\t\t\treturn True\n\texcept ImportError:\n\t\tif type(data) == type(\"\"):\n\t\t\treturn True\n\treturn False", "def is_string(self):\n answer = self._call('is_string')\n return answer.yes", "def typeIsString(obj):\n return type(obj) is str or _haveTypeUnicode and type(obj) is unicode", "def _is_string(self, obj):\n return isinstance(obj, unicode) or isinstance(obj, str)", "def test_str_method(self):\n s1 = Square(4, 6, 2, 12)\n self.assertEqual(s1.__str__(), '[Square] (12) 6/2 - 4')", "def is_string(value):\n return isinstance(value, string_types)", "def test_string(self,address):\n t=type(address) == str\n assert t, \"not a string\"", "def not_a_string(obj):\n my_type = str(type(obj))\n if is_py3():\n is_str = my_type.find('bytes') < 0 and my_type.find('str') < 0\n return is_str\n\n return my_type.find('str') < 0 and \\\n my_type.find('unicode') < 0", "def _is_unicode(arr):\n if (isinstance(arr, str) or\n issubclass(numpy.asarray(arr).dtype.type, str)):\n return True\n return False", "def isString(s):\r\n try:\r\n return isinstance(s, unicode) or isinstance(s, basestring)\r\n except NameError:\r\n return isinstance(s, str)", "def is_string(s):\n try:\n return isinstance(s, basestring)\n except NameError:\n return isinstance(s, str)", "def is_string(space, w_obj):\n return space.wrap(w_obj.tp == space.tp_str)", "def test_str(self, r, rep):\n assert str(r) == rep", "def is_string(item: Any) -> bool:\n if isinstance(item, (bytes, bytearray, str)):\n return True\n elif (isinstance(item, (tuple, list)) and all(is_string(x) for x in item)):\n return True\n elif (isinstance(item, np.ndarray) and # binary or unicode\n (item.dtype.kind in (\"U\", \"S\") or item.dtype == object)):\n return True\n return False", "def isString(s):\r\n if not isinstance(s, util.AtomicString):\r\n return isinstance(s, basestring)\r\n return False", "def test_speciality_str_representation(speciality):\n assert str(speciality) == \"Electrical\"", "def test___str__(self):\n self.assertEqual(\n str(self.mineral),\n 'mineralxy')", "def issparse(qob):\n return isinstance(qob, sp.spmatrix)", "def assert_is_string(object_to_test):\n if( (type(object_to_test) == type(\"\")) or (type(object_to_test) == type(u\"\")) ):\n return\n logging.error(\"assert_is_string() test failed!\")\n logging.critical(repr(locals()))\n raise(ValueError)", "def is_str(value):\n if not type(value) is str:\n return False\n else:\n return True", "def is_string_type(self):\n raise exceptions.NotImplementedError()", "def test_str(self):\n arm = self.ar[2009][11]\n self.assertEqual(str(arm), '<AwstatsMonth 2009-11>')", "def test_repr(self):\n qg = ConcentricGrid(2, 3, 4)\n s = str(qg)\n assert \"ConcentricGrid\" in s\n assert \"jacobi\" in s\n assert \"L=2\" in s\n assert \"M=3\" in s\n assert \"N=4\" in s", "def _isstr(value):\n\n if isinstance(value, string_types + (bytes,)):\n return True\n elif hasattr(value, \"__iter__\"):\n return all(_isstr(v) for v in value)\n else:\n return False", "def string_check(param, name):\n\tif not isinstance(param, strcomp):\n\t\traise TypeError(\"Keyword arg '%s' must be of type string. Got: %s\" % (\n\t\t\tname, type(param)))\n\telse:\n\t\tpass", "def is_my_type(type_str):\n raise NotImplementedError()", "def test_str(self):\n self.assertEqual(str(self.john), \"J. Doe\")\n self.assertEqual(str(self.solar), \"N. Graule\")", "def stringable(self):\n return True", "def _is_valid_target_str(self, target):\n if isinstance(target, str):\n return True\n else:\n return False", "def test_stringfield_return_text_type():\n stringify = fields.StringField().adapt\n assert isinstance(stringify(\"Hello World\"), text_type)", "def test_str(self):\r\n statobj = inputtypes.Status('test')\r\n self.assertEqual(str(statobj), 'test')\r\n self.assertEqual(unicode(statobj), u'test')", "def is_string(value):\n try:\n basestring\n def is_string(value):\n \"\"\"Python 2 compatible implementation of is_string(value).\"\"\"\n return isinstance(value, basestring)\n except NameError:\n def is_string(value):\n \"\"\"Python 3 compatible implementation of is_string(value).\"\"\"\n return isinstance(value, (str, bytes))\n return is_string(value)", "def is_valid(self, qstr):\r\n pass", "def test_output_type():\n assert type(song_decoder(\"WUWUBUBWUBUWUB\")) is str", "def test_str(self):\n self.assertEqual(str(self.subject), \"Test Subject\")", "def test_string(self):\n htype = h5t.py_create('S1')\n self.assertIsInstance(htype, h5t.TypeStringID)\n self.assertEqual(htype.get_size(), 1)", "def test_str(self, string, application):\n assert string == str(application)", "def test_repr(self):\n fz = FourierZernikeBasis(L=6, M=3, N=0)\n s = str(fz)\n assert \"FourierZernikeBasis\" in s\n assert \"ansi\" in s\n assert \"L=6\" in s\n assert \"M=3\" in s\n assert \"N=0\" in s", "def value_type_of_str(str1, type1):\n #FOUND A BETTER FUNCTION, MultiDict\n if issubclass(type1, Collection):\n try:\n # obj = eval(str1)\n obj = ast.literal_eval(str1)\n if isinstance(obj, Collection):\n return obj\n except:\n return type1(str1)\n else:\n try:\n return type1(str1)\n except:\n return type1()", "def test_str(self):\n tour = G(Tour, display_name='test1')\n self.assertEqual('test1', str(tour))", "def test_str(self):\n ary = self.ar[2009]\n self.assertEqual(str(ary), '<AwstatsYear 2009: 11, 12>')", "def test_str(self, concept_embedding: ConceptEmbedding):\n # printing should not rise error\n str(concept_embedding)", "def DataIsString(self):\n return self.data_type in (definitions.REG_SZ, definitions.REG_EXPAND_SZ)", "def testStringInput(self):\r\n from pydsl.Check import BNFChecker\r\n from pydsl.contrib.bnfgrammar import productionset0\r\n grammardef = productionset0\r\n checker = BNFChecker(grammardef)\r\n self.assertTrue(checker.check(\"SR\"))\r\n self.assertTrue(checker.check(\"SR\"))\r\n self.assertTrue(checker.check((\"S\",\"R\")))\r\n self.assertFalse(checker.check(\"SL\"))\r\n self.assertFalse(checker.check((\"S\",\"L\")))\r\n self.assertFalse(checker.check(\"\"))", "def test_str(self):\n step = G(Step, display_name='test1')\n self.assertEqual('test1', str(step))", "def test_str(self):\n self.assertEqual(str(self.meter), \"1 * m\")\n\n newton = self.kgram * self.meter / (self.second ** 2)\n\n self.assertEqual(str(newton), \"1.0 * kg * m * s^-2\")", "def _has_str_elems(obj):\n return all([isinstance(elem, str) for elem in obj])", "def test_native_str(self):\n if PY2:\n import __builtin__\n builtin_str = __builtin__.str\n else:\n import builtins\n builtin_str = builtins.str\n\n inputs = [b'blah', u'blah', 'blah']\n for s in inputs:\n self.assertEqual(native_str(s), builtin_str(s))\n self.assertTrue(isinstance(native_str(s), builtin_str))", "def test_str(self):\n self.assertEqual(str(self.bioe), str(self.wbioe))\n self.assertEqual(str(self.uioe), str(self.wuioe))\n self.assertEqual(str(self.bose), str(self.wbose))\n self.assertEqual(str(self.uose), str(self.wuose))", "def check_type(s: pd.Series) -> Tuple[bool, str]:\n\n error_string = (\n \"should be TextSeries: the input Series should consist only of strings in every cell.\"\n \" See help(hero.HeroTypes) for more information.\"\n )\n\n try:\n first_non_nan_value = s.loc[s.first_valid_index()]\n if not isinstance(first_non_nan_value, str):\n return False, error_string\n except KeyError: # Only NaNs in Series -> same warning applies\n return False, error_string\n\n return True, \"\"", "def is_tuple_consists_of_strings(an_array):\n return isinstance(an_array, tuple) and is_array_type(an_array, str)", "def test_str_type(self):\n\n expected = TypeError\n input_ = 'c'\n with self.assertRaises(expected):\n math.factorial(input_)", "def test_str():\n c = Circle(4) \n assert c.__str__() == 'Circle with radius: 4.000000'", "def test_str_(self):\n str(self.standardcode)\n repr(self.standardcode)", "async def test_str(\n submission_fixture: Submission,\n) -> None:\n AssertThat(str(submission_fixture)).IsInstanceOf(str)\n AssertThat(str(submission_fixture)).IsEqualTo(submission_fixture.url)", "def test_str(self):\n faction = self.faction\n\n self.assertEqual(str(faction), self.faction_raw['name'])", "def test_if_str(self):\n self.assertTrue(type(self.new.email) is str)\n self.assertTrue(type(self.new.password) is str)\n self.assertTrue(type(self.new.first_name) is str)\n self.assertTrue(type(self.new.last_name) is str)", "def test_str(self):\n f66: Fraction = Fraction(6, 6)\n f24: Fraction = Fraction(2, 4)\n self.assertEqual(str(f66), \"6/6\")\n self.assertNotEqual(str(f24), \"8/6\")\n self.assertTrue(str(f24), \"2/4\")", "def testStringRepresentationOnInstantiation(self):\r\n self.assertEqual(str(self.tv), 'table')", "def _is_input_or_output_type(io: type, type_str: str):\n if isinstance(io, type) and io.__module__.startswith(mldesigner_pkg):\n if type_str in io.__name__:\n return True\n return False", "def test_car_model_str_representation(car_model):\n assert str(car_model) == \"Golf\"", "def test_string():", "def test_typeerror_in_case_of_string(self):\n eq_(None,grepit(\"\",\"\",\"\"))", "def is_sequence_of_str(items):\n return all(isinstance(item, basestring) for item in items)", "def _is_proper_sequence(seq):\n return (isinstance(seq, collections.abc.Sequence) and\n not isinstance(seq, str))", "def test_string(self):\n htype = h5t.py_create('S10', logical=True)\n self.assertIsInstance(htype, h5t.TypeStringID)\n self.assertEqual(htype.get_size(), 10)", "def test_X_approximate_distribution_is_str(self):\n\n # Check valid case of \"count\" which is not included in valid object\n self.validator.adata.uns[\"X_approximate_distribution\"] = \"count\"\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [])\n\n # Invalid type: list\n self.validator.adata.uns[\"X_approximate_distribution\"] = [\"count\"]\n self.validator.validate_adata()\n self.assertEqual(\n self.validator.errors,\n [\n \"ERROR: '['count']' in 'uns['X_approximate_distribution']' \"\n \"is not valid, it must be a string.\"\n ],\n )", "def test_str():\n post = models.Post(title=\"Test Post\")\n\n assert str(post) == post.title", "def test_compare_to_string(self):\n r = self.RNA(\"UCC\")\n self.assertEqual(r, \"UCC\")", "def test_string(self):\n htype = h5t.py_create('S10')\n self.assertIsInstance(htype, h5t.TypeStringID)\n self.assertEqual(htype.get_size(), 10)", "def test_return_type(self):\n self.assertEqual(type(self.obj.to_json_string(self.valid_ld)), str)", "def check_type( string_key ) : \r\n\r\n if type( string_key ) != type( '' ) : \r\n\r\n # raise self.__class__( \"'%s': EGI wants the key to be four _characters_ (not %s) !\" % (type(string_key), ) ) \r\n raise Eggog( \"'%s': EGI wants the key to be four _characters_ (not %s) !\" % (type(string_key), ) ) \r\n \r\n else :\r\n \r\n return True", "def check_statement(self, statement):\n return isinstance(statement, str)", "def _is_encodable_simple(sv):\n return sv not in (None, str(), int())", "def test_str(self):\n self.assertEqual(str(self.content), \"Test Content\")", "def test_hood_str(self):\n hood = Hood({\"warning\": False, \"closed\": True})\n\n string = str(hood)\n assert isinstance(string, str)\n assert string == \"{'warning': False, 'closed': True}\"" ]
[ "0.68337935", "0.67917323", "0.67564535", "0.6700116", "0.6695061", "0.6639795", "0.66282505", "0.659525", "0.6584475", "0.6577788", "0.65511394", "0.64796364", "0.6450336", "0.64198357", "0.63853145", "0.63774383", "0.6376907", "0.63764954", "0.63604254", "0.6353656", "0.6352152", "0.6349954", "0.6344598", "0.63382787", "0.6317277", "0.63081133", "0.6265168", "0.6262456", "0.6246638", "0.62438077", "0.6215744", "0.6212183", "0.6183518", "0.61268973", "0.61084783", "0.6105643", "0.61020577", "0.6087771", "0.6073096", "0.6060183", "0.604345", "0.60430926", "0.60423386", "0.6041285", "0.6035493", "0.6034089", "0.59994876", "0.5962439", "0.5957662", "0.5954475", "0.5943554", "0.5924958", "0.58960515", "0.58859706", "0.5843589", "0.58434546", "0.58369637", "0.583344", "0.58201045", "0.58150697", "0.5776841", "0.577293", "0.5768276", "0.5751513", "0.5743866", "0.5738674", "0.5737333", "0.571344", "0.57054937", "0.570243", "0.56807715", "0.56670445", "0.56604946", "0.5656666", "0.5655514", "0.56548315", "0.56432545", "0.5642863", "0.5640944", "0.56386364", "0.56327575", "0.5601785", "0.55943346", "0.5576254", "0.5573858", "0.55681753", "0.55635834", "0.55596113", "0.5557332", "0.5551559", "0.5543342", "0.5541243", "0.55401504", "0.55353343", "0.5535096", "0.55311465", "0.55235916", "0.55219823", "0.55123866", "0.55114657", "0.5510411" ]
0.0
-1
>>> isinstance(lab7_q3(), str) True
def lab7_q3(): return """ YOUR EXPLANATION HERE """
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _is_string(data):\n return len(data) and isinstance(_to_ndarray(data).flat[0], str)", "def is_str ( self, s ):\r\n\t\treturn isinstance ( s, type( str () ) )", "def is_str(x):\n return isinstance(x, str)", "def isString(x):\n if type(x) == str:\n return True\n else:\n return False", "def strtype(x):\n if type(x) == str:\n return True\n if type(x) == unicode:\n return True\n return False", "def test_is_str_rep_string(self):\n self.assertIsInstance(cardutils.Card(10,1).__str__(), str)", "def _is_string(arg):\n return isinstance(arg, types.StringTypes)", "def _check_datatype_to_string(prediction):\n if isinstance(prediction, str):\n return True\n raise TypeError('Prediction is not in string type.')", "def is_string(obj):\n return isinstance(obj, basestring)", "def check_solution(self, solution):\n return isinstance(solution, str)", "def is_string(value):\n return isinstance(value, basestring)", "def _is_str(item):\n return isinstance(item, str)", "def isString(data):\n\ttry:\n\t\tfrom types import UnicodeType, StringType\n\t\tif type(data) == UnicodeType or type(data) == StringType:\n\t\t\treturn True\n\texcept ImportError:\n\t\tif type(data) == type(\"\"):\n\t\t\treturn True\n\treturn False", "def is_string(obj):\n return isinstance(obj, str)", "def is_string(atype):\n if atype == str:\n return True\n elif PY2:\n if atype == unicode:\n return True\n return False", "def is_string(val):\n return (\n isinstance(val, unicode) or \\\n isinstance(val, str) \n )", "def not_a_string(obj):\n my_type = str(type(obj))\n if is_py3():\n is_str = my_type.find('bytes') < 0 and my_type.find('str') < 0\n return is_str\n\n return my_type.find('str') < 0 and \\\n my_type.find('unicode') < 0", "def has_string_type(obj: _std_typing.Any) -> bool:\n return obj.dtype == sc.DType.string", "def is_string(document):\r\n return isinstance(document, str)", "def _is_unicode(arr):\n if (isinstance(arr, str) or\n issubclass(numpy.asarray(arr).dtype.type, str)):\n return True\n return False", "def is_str(value):\n return isinstance(value, str)", "def is_string(value):\n return isinstance(value, (str, bytes))", "def is_string(item: Any) -> bool:\n if isinstance(item, (bytes, bytearray, str)):\n return True\n elif (isinstance(item, (tuple, list)) and all(is_string(x) for x in item)):\n return True\n elif (isinstance(item, np.ndarray) and # binary or unicode\n (item.dtype.kind in (\"U\", \"S\") or item.dtype == object)):\n return True\n return False", "def canAdapt(self, stru):\n return isinstance(stru, crystal)", "def test_str(self):\n s1 = Square(4, 2, 3, 47)\n self.assertEqual(str(s1), \"[Square] (47) 2/3 - 4\")", "def test_str(self):\r\n f = lambda id_, seq, qual: len(seq) > 3\r\n s1 = 'aa'\r\n s2 = 'aaaa'\r\n sq = SeqQualBad('Q', f)\r\n self.assertEqual(sq('x', s1, [1, 2, 3]), False)\r\n self.assertEqual(str(sq), 'Q\\t0')\r\n self.assertEqual(sq('y', s2, [1, 2, 3]), True)\r\n self.assertEqual(str(sq), 'Q\\t1')", "def test_str(self):\n self.assertEqual(str(self.bs), str(self.wbs))\n self.assertEqual(str(self.be), str(self.be))\n # str(us) fails in Python 2\n self.assertEqual(str, type(str(self.wus)))\n # str(ue) fails in Python 2\n self.assertEqual(str, type(str(self.wue)))", "def typeIsString(obj):\n return type(obj) is str or _haveTypeUnicode and type(obj) is unicode", "def test_string(self,address):\n t=type(address) == str\n assert t, \"not a string\"", "def test_str():\n # Test for string special method with scalar Rnode objects\n x = Rnode(1.0)\n try:\n assert str(x) == 'Reverse-mode Rnode Object ( Values: 1.0 )'\n except AssertionError as e:\n print(e)\n raise AssertionError", "def _is_string(self, obj):\n return isinstance(obj, unicode) or isinstance(obj, str)", "def is_string(self):\n answer = self._call('is_string')\n return answer.yes", "def isString(s):\r\n try:\r\n return isinstance(s, unicode) or isinstance(s, basestring)\r\n except NameError:\r\n return isinstance(s, str)", "def is_string(value):\n return isinstance(value, string_types)", "def isString(s):\r\n if not isinstance(s, util.AtomicString):\r\n return isinstance(s, basestring)\r\n return False", "def is_string(s):\n try:\n return isinstance(s, basestring)\n except NameError:\n return isinstance(s, str)", "def is_string(space, w_obj):\n return space.wrap(w_obj.tp == space.tp_str)", "def test_strmethod(self):\n b1 = BaseModel()\n self.assertEqual(type(str(b1)), str)", "def assert_is_string(object_to_test):\n if( (type(object_to_test) == type(\"\")) or (type(object_to_test) == type(u\"\")) ):\n return\n logging.error(\"assert_is_string() test failed!\")\n logging.critical(repr(locals()))\n raise(ValueError)", "def test_speciality_str_representation(speciality):\n assert str(speciality) == \"Electrical\"", "def is_str(value):\n if not type(value) is str:\n return False\n else:\n return True", "def string_check(param, name):\n\tif not isinstance(param, strcomp):\n\t\traise TypeError(\"Keyword arg '%s' must be of type string. Got: %s\" % (\n\t\t\tname, type(param)))\n\telse:\n\t\tpass", "def issparse(qob):\n return isinstance(qob, sp.spmatrix)", "def test_str(self):\n self.assertEqual(str(self.john), \"J. Doe\")\n self.assertEqual(str(self.solar), \"N. Graule\")", "def test_str_method(self):\n s1 = Square(4, 6, 2, 12)\n self.assertEqual(s1.__str__(), '[Square] (12) 6/2 - 4')", "def test_str(self):\n arm = self.ar[2009][11]\n self.assertEqual(str(arm), '<AwstatsMonth 2009-11>')", "def test___str__(self):\n self.assertEqual(\n str(self.mineral),\n 'mineralxy')", "def is_string(value):\n try:\n basestring\n def is_string(value):\n \"\"\"Python 2 compatible implementation of is_string(value).\"\"\"\n return isinstance(value, basestring)\n except NameError:\n def is_string(value):\n \"\"\"Python 3 compatible implementation of is_string(value).\"\"\"\n return isinstance(value, (str, bytes))\n return is_string(value)", "def is_string_type(self):\n raise exceptions.NotImplementedError()", "def _is_valid_target_str(self, target):\n if isinstance(target, str):\n return True\n else:\n return False", "def is_valid(self, qstr):\r\n pass", "def test_str(self, r, rep):\n assert str(r) == rep", "def _isstr(value):\n\n if isinstance(value, string_types + (bytes,)):\n return True\n elif hasattr(value, \"__iter__\"):\n return all(_isstr(v) for v in value)\n else:\n return False", "def is_my_type(type_str):\n raise NotImplementedError()", "def test_repr(self):\n qg = ConcentricGrid(2, 3, 4)\n s = str(qg)\n assert \"ConcentricGrid\" in s\n assert \"jacobi\" in s\n assert \"L=2\" in s\n assert \"M=3\" in s\n assert \"N=4\" in s", "def stringable(self):\n return True", "def test_stringfield_return_text_type():\n stringify = fields.StringField().adapt\n assert isinstance(stringify(\"Hello World\"), text_type)", "def test_str(self):\n self.assertEqual(str(self.subject), \"Test Subject\")", "def test_string(self):\n htype = h5t.py_create('S1')\n self.assertIsInstance(htype, h5t.TypeStringID)\n self.assertEqual(htype.get_size(), 1)", "def is_sequence_of_str(items):\n return all(isinstance(item, basestring) for item in items)", "def test_str(self, string, application):\n assert string == str(application)", "def _has_str_elems(obj):\n return all([isinstance(elem, str) for elem in obj])", "def test_str(self, concept_embedding: ConceptEmbedding):\n # printing should not rise error\n str(concept_embedding)", "def check_type(s: pd.Series) -> Tuple[bool, str]:\n\n error_string = (\n \"should be TextSeries: the input Series should consist only of strings in every cell.\"\n \" See help(hero.HeroTypes) for more information.\"\n )\n\n try:\n first_non_nan_value = s.loc[s.first_valid_index()]\n if not isinstance(first_non_nan_value, str):\n return False, error_string\n except KeyError: # Only NaNs in Series -> same warning applies\n return False, error_string\n\n return True, \"\"", "def DataIsString(self):\n return self.data_type in (definitions.REG_SZ, definitions.REG_EXPAND_SZ)", "def test_str(self):\r\n statobj = inputtypes.Status('test')\r\n self.assertEqual(str(statobj), 'test')\r\n self.assertEqual(unicode(statobj), u'test')", "def test_output_type():\n assert type(song_decoder(\"WUWUBUBWUBUWUB\")) is str", "def is_tuple_consists_of_strings(an_array):\n return isinstance(an_array, tuple) and is_array_type(an_array, str)", "def test_str(self):\n ary = self.ar[2009]\n self.assertEqual(str(ary), '<AwstatsYear 2009: 11, 12>')", "def test_str(self):\n tour = G(Tour, display_name='test1')\n self.assertEqual('test1', str(tour))", "def test_str(self):\n faction = self.faction\n\n self.assertEqual(str(faction), self.faction_raw['name'])", "def check_type( string_key ) : \r\n\r\n if type( string_key ) != type( '' ) : \r\n\r\n # raise self.__class__( \"'%s': EGI wants the key to be four _characters_ (not %s) !\" % (type(string_key), ) ) \r\n raise Eggog( \"'%s': EGI wants the key to be four _characters_ (not %s) !\" % (type(string_key), ) ) \r\n \r\n else :\r\n \r\n return True", "def _is_proper_sequence(seq):\n return (isinstance(seq, collections.abc.Sequence) and\n not isinstance(seq, str))", "async def test_str(\n submission_fixture: Submission,\n) -> None:\n AssertThat(str(submission_fixture)).IsInstanceOf(str)\n AssertThat(str(submission_fixture)).IsEqualTo(submission_fixture.url)", "def test_repr(self):\n fz = FourierZernikeBasis(L=6, M=3, N=0)\n s = str(fz)\n assert \"FourierZernikeBasis\" in s\n assert \"ansi\" in s\n assert \"L=6\" in s\n assert \"M=3\" in s\n assert \"N=0\" in s", "def test_if_str(self):\n self.assertTrue(type(self.new.email) is str)\n self.assertTrue(type(self.new.password) is str)\n self.assertTrue(type(self.new.first_name) is str)\n self.assertTrue(type(self.new.last_name) is str)", "def test_str(self):\n self.assertEqual(str(self.bioe), str(self.wbioe))\n self.assertEqual(str(self.uioe), str(self.wuioe))\n self.assertEqual(str(self.bose), str(self.wbose))\n self.assertEqual(str(self.uose), str(self.wuose))", "def _type_check(data):\n if data.__class__.__name__ != \"Matrix3\":\n return False\n return True", "def test_string_or_number():\n assert is_string_or_number(None) is None\n assert is_string_or_number(1) is None\n assert is_string_or_number(1.1) is None\n assert is_string_or_number('1.1') is None\n assert is_string_or_number([])", "def test_str():\n c = Circle(4) \n assert c.__str__() == 'Circle with radius: 4.000000'", "def test_str_type(self):\n\n expected = TypeError\n input_ = 'c'\n with self.assertRaises(expected):\n math.factorial(input_)", "def check_statement(self, statement):\n return isinstance(statement, str)", "def _isstrvar(self, index):\n return self._typlist[index] <= 244", "def value_type_of_str(str1, type1):\n #FOUND A BETTER FUNCTION, MultiDict\n if issubclass(type1, Collection):\n try:\n # obj = eval(str1)\n obj = ast.literal_eval(str1)\n if isinstance(obj, Collection):\n return obj\n except:\n return type1(str1)\n else:\n try:\n return type1(str1)\n except:\n return type1()", "def test_str():\n post = models.Post(title=\"Test Post\")\n\n assert str(post) == post.title", "def test_str(self):\n step = G(Step, display_name='test1')\n self.assertEqual('test1', str(step))", "def _is_input_or_output_type(io: type, type_str: str):\n if isinstance(io, type) and io.__module__.startswith(mldesigner_pkg):\n if type_str in io.__name__:\n return True\n return False", "def isdatatype(object):\n return isinstance(object, (str, int, bool, float, type(None)))", "def check_proc_type(image_proc_type):\n\n if isinstance(image_proc_type, str):\n return True\n else:\n logging.warning('Image processing type is not a string')\n print('Please choose only one processing technique.')\n raise ValueError('Please choose only one processing technique.')", "def is_action_str(string: str) -> bool:", "def test_typeerror_in_case_of_string(self):\n eq_(None,grepit(\"\",\"\",\"\"))", "def does_signature_contain_str(signature=None):\n\n # if we did not receive a signature we assume the model could require\n # a string in it's input\n if signature is None:\n return True\n\n return any(v.dtype == dtypes.string.as_datatype_enum\n for v in signature.inputs.values())", "def testStringInput(self):\r\n from pydsl.Check import BNFChecker\r\n from pydsl.contrib.bnfgrammar import productionset0\r\n grammardef = productionset0\r\n checker = BNFChecker(grammardef)\r\n self.assertTrue(checker.check(\"SR\"))\r\n self.assertTrue(checker.check(\"SR\"))\r\n self.assertTrue(checker.check((\"S\",\"R\")))\r\n self.assertFalse(checker.check(\"SL\"))\r\n self.assertFalse(checker.check((\"S\",\"L\")))\r\n self.assertFalse(checker.check(\"\"))", "def isstringlike(item):\n ret = 1\n try:\n float(item)\n ret = 0\n except ValueError:\n pass\n return ret", "def test_string(self):\n htype = h5t.py_create('S10', logical=True)\n self.assertIsInstance(htype, h5t.TypeStringID)\n self.assertEqual(htype.get_size(), 10)", "def test_string(self):\n htype = h5t.py_create('S10')\n self.assertIsInstance(htype, h5t.TypeStringID)\n self.assertEqual(htype.get_size(), 10)", "def test_is_hand_string_correct(self):\n self.assertEqual(self.hand.getPostCurrentHandString(), 'Three of a Kind As')", "def ISTEXT(value):\n return isinstance(value, (basestring, AltText))", "def str_check(*args, func=None):\n func = func or inspect.stack()[2][3]\n for var in args:\n if not isinstance(var, (str, collections.UserString, collections.abc.Sequence)):\n name = type(var).__name__\n raise StringError(\n 'Function {} expected str, {} got instead.'.format(func, name))", "def test_name3(self):\n new = self.value()\n self.assertEqual(type(new.name), str)", "def test_car_model_str_representation(car_model):\n assert str(car_model) == \"Golf\"" ]
[ "0.6728935", "0.6689395", "0.6678108", "0.6677781", "0.66326827", "0.6617523", "0.660961", "0.6559037", "0.6538614", "0.6513645", "0.65115064", "0.6501745", "0.64532465", "0.63854235", "0.63740855", "0.63691497", "0.6352172", "0.6336235", "0.6310395", "0.63009924", "0.6293086", "0.6275217", "0.62670386", "0.6266259", "0.6256279", "0.6245958", "0.6215468", "0.6211528", "0.6207749", "0.62029314", "0.61844504", "0.6169733", "0.61582625", "0.6154571", "0.611913", "0.61138177", "0.6105698", "0.60688865", "0.604377", "0.60398227", "0.5994003", "0.59836566", "0.59369123", "0.592272", "0.59219515", "0.5884979", "0.5882277", "0.58790547", "0.58700347", "0.5849685", "0.5832641", "0.5830591", "0.5812385", "0.5803675", "0.57714546", "0.5769227", "0.57533526", "0.57438457", "0.57364243", "0.5730007", "0.5719257", "0.5716591", "0.56983286", "0.5697645", "0.5690764", "0.56845003", "0.5684232", "0.56793547", "0.567169", "0.5658346", "0.5658259", "0.5653338", "0.56350815", "0.56226", "0.5607362", "0.56066746", "0.5581262", "0.5571833", "0.5563804", "0.55605793", "0.5557532", "0.5557094", "0.5546725", "0.5536782", "0.5536599", "0.55315036", "0.55183643", "0.5517919", "0.5516206", "0.5503789", "0.5502888", "0.54990923", "0.5498974", "0.5495859", "0.5487039", "0.54781556", "0.5477996", "0.5476783", "0.5475476", "0.5474442", "0.5471056" ]
0.0
-1
>>> isinstance(lab7_q4(), str) True
def lab7_q4(): return """ YOUR EXPLANATION HERE """
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_is_str_rep_string(self):\n self.assertIsInstance(cardutils.Card(10,1).__str__(), str)", "def is_str(x):\n return isinstance(x, str)", "def is_str ( self, s ):\r\n\t\treturn isinstance ( s, type( str () ) )", "def _is_string(data):\n return len(data) and isinstance(_to_ndarray(data).flat[0], str)", "def isString(x):\n if type(x) == str:\n return True\n else:\n return False", "def is_string(obj):\n return isinstance(obj, basestring)", "def strtype(x):\n if type(x) == str:\n return True\n if type(x) == unicode:\n return True\n return False", "def is_string(value):\n return isinstance(value, basestring)", "def _is_string(arg):\n return isinstance(arg, types.StringTypes)", "def check_solution(self, solution):\n return isinstance(solution, str)", "def is_string(obj):\n return isinstance(obj, str)", "def _check_datatype_to_string(prediction):\n if isinstance(prediction, str):\n return True\n raise TypeError('Prediction is not in string type.')", "def is_str(value):\n return isinstance(value, str)", "def has_string_type(obj: _std_typing.Any) -> bool:\n return obj.dtype == sc.DType.string", "def _is_str(item):\n return isinstance(item, str)", "def is_string(val):\n return (\n isinstance(val, unicode) or \\\n isinstance(val, str) \n )", "def test_str(self):\n s1 = Square(4, 2, 3, 47)\n self.assertEqual(str(s1), \"[Square] (47) 2/3 - 4\")", "def is_string(document):\r\n return isinstance(document, str)", "def isString(data):\n\ttry:\n\t\tfrom types import UnicodeType, StringType\n\t\tif type(data) == UnicodeType or type(data) == StringType:\n\t\t\treturn True\n\texcept ImportError:\n\t\tif type(data) == type(\"\"):\n\t\t\treturn True\n\treturn False", "def is_string(value):\n return isinstance(value, (str, bytes))", "def is_string(atype):\n if atype == str:\n return True\n elif PY2:\n if atype == unicode:\n return True\n return False", "def test_str(self):\n self.assertEqual(str(self.bs), str(self.wbs))\n self.assertEqual(str(self.be), str(self.be))\n # str(us) fails in Python 2\n self.assertEqual(str, type(str(self.wus)))\n # str(ue) fails in Python 2\n self.assertEqual(str, type(str(self.wue)))", "def canAdapt(self, stru):\n return isinstance(stru, crystal)", "def test_str(self):\r\n f = lambda id_, seq, qual: len(seq) > 3\r\n s1 = 'aa'\r\n s2 = 'aaaa'\r\n sq = SeqQualBad('Q', f)\r\n self.assertEqual(sq('x', s1, [1, 2, 3]), False)\r\n self.assertEqual(str(sq), 'Q\\t0')\r\n self.assertEqual(sq('y', s2, [1, 2, 3]), True)\r\n self.assertEqual(str(sq), 'Q\\t1')", "def test_str():\n # Test for string special method with scalar Rnode objects\n x = Rnode(1.0)\n try:\n assert str(x) == 'Reverse-mode Rnode Object ( Values: 1.0 )'\n except AssertionError as e:\n print(e)\n raise AssertionError", "def is_string(value):\n return isinstance(value, string_types)", "def is_string(self):\n answer = self._call('is_string')\n return answer.yes", "def typeIsString(obj):\n return type(obj) is str or _haveTypeUnicode and type(obj) is unicode", "def _is_string(self, obj):\n return isinstance(obj, unicode) or isinstance(obj, str)", "def not_a_string(obj):\n my_type = str(type(obj))\n if is_py3():\n is_str = my_type.find('bytes') < 0 and my_type.find('str') < 0\n return is_str\n\n return my_type.find('str') < 0 and \\\n my_type.find('unicode') < 0", "def test_strmethod(self):\n b1 = BaseModel()\n self.assertEqual(type(str(b1)), str)", "def is_string(s):\n try:\n return isinstance(s, basestring)\n except NameError:\n return isinstance(s, str)", "def test_string(self,address):\n t=type(address) == str\n assert t, \"not a string\"", "def test_str_method(self):\n s1 = Square(4, 6, 2, 12)\n self.assertEqual(s1.__str__(), '[Square] (12) 6/2 - 4')", "def _is_unicode(arr):\n if (isinstance(arr, str) or\n issubclass(numpy.asarray(arr).dtype.type, str)):\n return True\n return False", "def is_string(item: Any) -> bool:\n if isinstance(item, (bytes, bytearray, str)):\n return True\n elif (isinstance(item, (tuple, list)) and all(is_string(x) for x in item)):\n return True\n elif (isinstance(item, np.ndarray) and # binary or unicode\n (item.dtype.kind in (\"U\", \"S\") or item.dtype == object)):\n return True\n return False", "def is_str(value):\n if not type(value) is str:\n return False\n else:\n return True", "def isString(s):\r\n try:\r\n return isinstance(s, unicode) or isinstance(s, basestring)\r\n except NameError:\r\n return isinstance(s, str)", "def is_string(space, w_obj):\n return space.wrap(w_obj.tp == space.tp_str)", "def isString(s):\r\n if not isinstance(s, util.AtomicString):\r\n return isinstance(s, basestring)\r\n return False", "def test_str(self, r, rep):\n assert str(r) == rep", "def is_string_type(self):\n raise exceptions.NotImplementedError()", "def test___str__(self):\n self.assertEqual(\n str(self.mineral),\n 'mineralxy')", "def test_str(self):\n arm = self.ar[2009][11]\n self.assertEqual(str(arm), '<AwstatsMonth 2009-11>')", "def test_speciality_str_representation(speciality):\n assert str(speciality) == \"Electrical\"", "def _isstr(value):\n\n if isinstance(value, string_types + (bytes,)):\n return True\n elif hasattr(value, \"__iter__\"):\n return all(_isstr(v) for v in value)\n else:\n return False", "def assert_is_string(object_to_test):\n if( (type(object_to_test) == type(\"\")) or (type(object_to_test) == type(u\"\")) ):\n return\n logging.error(\"assert_is_string() test failed!\")\n logging.critical(repr(locals()))\n raise(ValueError)", "def is_string(value):\n try:\n basestring\n def is_string(value):\n \"\"\"Python 2 compatible implementation of is_string(value).\"\"\"\n return isinstance(value, basestring)\n except NameError:\n def is_string(value):\n \"\"\"Python 3 compatible implementation of is_string(value).\"\"\"\n return isinstance(value, (str, bytes))\n return is_string(value)", "def test_str(self):\n self.assertEqual(str(self.john), \"J. Doe\")\n self.assertEqual(str(self.solar), \"N. Graule\")", "def _is_valid_target_str(self, target):\n if isinstance(target, str):\n return True\n else:\n return False", "def stringable(self):\n return True", "def string_check(param, name):\n\tif not isinstance(param, strcomp):\n\t\traise TypeError(\"Keyword arg '%s' must be of type string. Got: %s\" % (\n\t\t\tname, type(param)))\n\telse:\n\t\tpass", "def test_string(self):\n htype = h5t.py_create('S1')\n self.assertIsInstance(htype, h5t.TypeStringID)\n self.assertEqual(htype.get_size(), 1)", "def test_stringfield_return_text_type():\n stringify = fields.StringField().adapt\n assert isinstance(stringify(\"Hello World\"), text_type)", "def is_valid(self, qstr):\r\n pass", "def test_str(self):\n self.assertEqual(str(self.subject), \"Test Subject\")", "def is_my_type(type_str):\n raise NotImplementedError()", "def test_repr(self):\n qg = ConcentricGrid(2, 3, 4)\n s = str(qg)\n assert \"ConcentricGrid\" in s\n assert \"jacobi\" in s\n assert \"L=2\" in s\n assert \"M=3\" in s\n assert \"N=4\" in s", "def DataIsString(self):\n return self.data_type in (definitions.REG_SZ, definitions.REG_EXPAND_SZ)", "def _has_str_elems(obj):\n return all([isinstance(elem, str) for elem in obj])", "def test_str(self):\r\n statobj = inputtypes.Status('test')\r\n self.assertEqual(str(statobj), 'test')\r\n self.assertEqual(unicode(statobj), u'test')", "def test_str(self):\n ary = self.ar[2009]\n self.assertEqual(str(ary), '<AwstatsYear 2009: 11, 12>')", "def issparse(qob):\n return isinstance(qob, sp.spmatrix)", "def test_str(self, string, application):\n assert string == str(application)", "def check_type( string_key ) : \r\n\r\n if type( string_key ) != type( '' ) : \r\n\r\n # raise self.__class__( \"'%s': EGI wants the key to be four _characters_ (not %s) !\" % (type(string_key), ) ) \r\n raise Eggog( \"'%s': EGI wants the key to be four _characters_ (not %s) !\" % (type(string_key), ) ) \r\n \r\n else :\r\n \r\n return True", "def test_str(self, concept_embedding: ConceptEmbedding):\n # printing should not rise error\n str(concept_embedding)", "def test_output_type():\n assert type(song_decoder(\"WUWUBUBWUBUWUB\")) is str", "def test_str(self):\n tour = G(Tour, display_name='test1')\n self.assertEqual('test1', str(tour))", "def test_repr(self):\n fz = FourierZernikeBasis(L=6, M=3, N=0)\n s = str(fz)\n assert \"FourierZernikeBasis\" in s\n assert \"ansi\" in s\n assert \"L=6\" in s\n assert \"M=3\" in s\n assert \"N=0\" in s", "def test_str(self):\n self.assertEqual(str(self.bioe), str(self.wbioe))\n self.assertEqual(str(self.uioe), str(self.wuioe))\n self.assertEqual(str(self.bose), str(self.wbose))\n self.assertEqual(str(self.uose), str(self.wuose))", "def test_string(self):\n htype = h5t.py_create('S10')\n self.assertIsInstance(htype, h5t.TypeStringID)\n self.assertEqual(htype.get_size(), 10)", "def test_str():\n c = Circle(4) \n assert c.__str__() == 'Circle with radius: 4.000000'", "def test_string(self):\n htype = h5t.py_create('S10', logical=True)\n self.assertIsInstance(htype, h5t.TypeStringID)\n self.assertEqual(htype.get_size(), 10)", "def test_str(self):\n faction = self.faction\n\n self.assertEqual(str(faction), self.faction_raw['name'])", "async def test_str(\n submission_fixture: Submission,\n) -> None:\n AssertThat(str(submission_fixture)).IsInstanceOf(str)\n AssertThat(str(submission_fixture)).IsEqualTo(submission_fixture.url)", "def test_str(self):\n step = G(Step, display_name='test1')\n self.assertEqual('test1', str(step))", "def test_str(self):\n f66: Fraction = Fraction(6, 6)\n f24: Fraction = Fraction(2, 4)\n self.assertEqual(str(f66), \"6/6\")\n self.assertNotEqual(str(f24), \"8/6\")\n self.assertTrue(str(f24), \"2/4\")", "def test_car_model_str_representation(car_model):\n assert str(car_model) == \"Golf\"", "def is_sequence_of_str(items):\n return all(isinstance(item, basestring) for item in items)", "def test_if_str(self):\n self.assertTrue(type(self.new.email) is str)\n self.assertTrue(type(self.new.password) is str)\n self.assertTrue(type(self.new.first_name) is str)\n self.assertTrue(type(self.new.last_name) is str)", "def testStringRepresentationOnInstantiation(self):\r\n self.assertEqual(str(self.tv), 'table')", "def test_str():\n post = models.Post(title=\"Test Post\")\n\n assert str(post) == post.title", "def is_tuple_consists_of_strings(an_array):\n return isinstance(an_array, tuple) and is_array_type(an_array, str)", "def test_str4(self):\n with self.assertRaises(TypeError):\n r4 = Rectangle(1, 1, 1, 1, 1, 1, 1, 1)", "def testStringInput(self):\r\n from pydsl.Check import BNFChecker\r\n from pydsl.contrib.bnfgrammar import productionset0\r\n grammardef = productionset0\r\n checker = BNFChecker(grammardef)\r\n self.assertTrue(checker.check(\"SR\"))\r\n self.assertTrue(checker.check(\"SR\"))\r\n self.assertTrue(checker.check((\"S\",\"R\")))\r\n self.assertFalse(checker.check(\"SL\"))\r\n self.assertFalse(checker.check((\"S\",\"L\")))\r\n self.assertFalse(checker.check(\"\"))", "def does_signature_contain_str(signature=None):\n\n # if we did not receive a signature we assume the model could require\n # a string in it's input\n if signature is None:\n return True\n\n return any(v.dtype == dtypes.string.as_datatype_enum\n for v in signature.inputs.values())", "def check_statement(self, statement):\n return isinstance(statement, str)", "def test_vlstring_log(self):\n dt = h5t.special_dtype(vlen=str)\n htype = h5t.py_create(dt, logical=True)\n self.assertIsInstance(htype, h5t.TypeStringID)\n self.assertEqual(htype.is_variable_str(), True)\n self.assertEqual(htype.get_cset(), h5t.CSET_ASCII)\n self.assertEqual(htype.get_strpad(), h5t.STR_NULLTERM)", "def check_type(s: pd.Series) -> Tuple[bool, str]:\n\n error_string = (\n \"should be TextSeries: the input Series should consist only of strings in every cell.\"\n \" See help(hero.HeroTypes) for more information.\"\n )\n\n try:\n first_non_nan_value = s.loc[s.first_valid_index()]\n if not isinstance(first_non_nan_value, str):\n return False, error_string\n except KeyError: # Only NaNs in Series -> same warning applies\n return False, error_string\n\n return True, \"\"", "def test_str_(self):\n str(self.standardcode)\n repr(self.standardcode)", "def _isstrvar(self, index):\n return self._typlist[index] <= 244", "def test_string():", "def _is_encodable_simple(sv):\n return sv not in (None, str(), int())", "def test_str(self):\n self.assertEqual(str(self.content), \"Test Content\")", "def _is_proper_sequence(seq):\n return (isinstance(seq, collections.abc.Sequence) and\n not isinstance(seq, str))", "def is_str_or_coll(value):\n return bool(is_str(value)) or bool(is_tuple_or_list(value))", "def test_is_hand_string_correct(self):\n self.assertEqual(self.hand.getPostCurrentHandString(), 'Three of a Kind As')", "def string_p(value):\n if type(value) is not str:\n raise Invalid(\"invalid value type {value}\".format(value=value))", "def checkDataType(self,str):\n accepted_vals = [\"HEXA\",\"NEHU\",\"NEDS\",\"NEDU\",\"NDHU\",\"NDDU\"]\n assert str in accepted_vals, \"Error: Data Type not accepted: \" + str\n if (str == 'HEXA') | (str[2] == 'H'):\n self.base = 16\n if str[3] == 'S':\n self.signed = True", "def test_str(self):\r\n x = self.FWP({'x': 3})\r\n lines = ['FWP parameters:', 't:True', 'Application:None',\r\n 'Algorithm:None', 'Citation:None', 'x:3']\r\n self.assertEqual(str(x), '\\n'.join(lines))", "def test_node_str():\n a_left = Node(7, data='pl left')\n a_right = Node(42, data='pl right')\n a = Node(13, data='pl a', left=a_left, right=a_right)\n string_a = str(a)\n expect_string = '13'\n assert string_a == expect_string" ]
[ "0.6849578", "0.6827361", "0.68070114", "0.6767357", "0.6741126", "0.6712229", "0.6685362", "0.6681972", "0.66778773", "0.6619105", "0.6560577", "0.6549617", "0.64978635", "0.64887595", "0.64553833", "0.6445665", "0.64326364", "0.6430888", "0.64252806", "0.64158726", "0.6405644", "0.63907605", "0.6374683", "0.63628757", "0.6350983", "0.63365126", "0.6318022", "0.6312128", "0.6302784", "0.6298406", "0.62576604", "0.6231427", "0.6216048", "0.6181953", "0.61765814", "0.61752266", "0.61746764", "0.6172221", "0.6151165", "0.6124843", "0.611869", "0.6092743", "0.60889363", "0.608543", "0.60339624", "0.6021266", "0.6014103", "0.6008419", "0.60040265", "0.5993178", "0.59913677", "0.5928166", "0.5924508", "0.59072065", "0.58950543", "0.58721066", "0.5865422", "0.58279604", "0.58258504", "0.58251077", "0.58187926", "0.581811", "0.5813704", "0.5813124", "0.58049107", "0.5794509", "0.57699245", "0.5755029", "0.574851", "0.57441217", "0.5732877", "0.57286626", "0.57284915", "0.5711327", "0.569229", "0.56816226", "0.56791985", "0.5678433", "0.5676578", "0.5675187", "0.56729054", "0.5659077", "0.5643158", "0.5623174", "0.5614481", "0.5612526", "0.5612115", "0.5607514", "0.5599786", "0.5596832", "0.5592217", "0.55800545", "0.55762535", "0.5573503", "0.55580014", "0.55517745", "0.55497426", "0.5541767", "0.5539469", "0.5527482", "0.55263305" ]
0.0
-1
>>> isinstance(lab8_q2(), str) True
def lab8_q2(): return """ YOUR EXPLANATION HERE """
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _is_string(data):\n return len(data) and isinstance(_to_ndarray(data).flat[0], str)", "def _is_string(arg):\n return isinstance(arg, types.StringTypes)", "def test_is_str_rep_string(self):\n self.assertIsInstance(cardutils.Card(10,1).__str__(), str)", "def is_str ( self, s ):\r\n\t\treturn isinstance ( s, type( str () ) )", "def is_string(obj):\n return isinstance(obj, basestring)", "def is_string(value):\n return isinstance(value, basestring)", "def is_str(x):\n return isinstance(x, str)", "def has_string_type(obj: _std_typing.Any) -> bool:\n return obj.dtype == sc.DType.string", "def isString(x):\n if type(x) == str:\n return True\n else:\n return False", "def strtype(x):\n if type(x) == str:\n return True\n if type(x) == unicode:\n return True\n return False", "def _check_datatype_to_string(prediction):\n if isinstance(prediction, str):\n return True\n raise TypeError('Prediction is not in string type.')", "def is_string(atype):\n if atype == str:\n return True\n elif PY2:\n if atype == unicode:\n return True\n return False", "def test_str():\n # Test for string special method with scalar Rnode objects\n x = Rnode(1.0)\n try:\n assert str(x) == 'Reverse-mode Rnode Object ( Values: 1.0 )'\n except AssertionError as e:\n print(e)\n raise AssertionError", "def test_str(self):\n s1 = Square(4, 2, 3, 47)\n self.assertEqual(str(s1), \"[Square] (47) 2/3 - 4\")", "def is_string(value):\n return isinstance(value, (str, bytes))", "def is_string(obj):\n return isinstance(obj, str)", "def _is_unicode(arr):\n if (isinstance(arr, str) or\n issubclass(numpy.asarray(arr).dtype.type, str)):\n return True\n return False", "def test_strmethod(self):\n b1 = BaseModel()\n self.assertEqual(type(str(b1)), str)", "def check_solution(self, solution):\n return isinstance(solution, str)", "def test_str(self):\r\n f = lambda id_, seq, qual: len(seq) > 3\r\n s1 = 'aa'\r\n s2 = 'aaaa'\r\n sq = SeqQualBad('Q', f)\r\n self.assertEqual(sq('x', s1, [1, 2, 3]), False)\r\n self.assertEqual(str(sq), 'Q\\t0')\r\n self.assertEqual(sq('y', s2, [1, 2, 3]), True)\r\n self.assertEqual(str(sq), 'Q\\t1')", "def canAdapt(self, stru):\n return isinstance(stru, crystal)", "def test_str(self):\n self.assertEqual(str(self.bs), str(self.wbs))\n self.assertEqual(str(self.be), str(self.be))\n # str(us) fails in Python 2\n self.assertEqual(str, type(str(self.wus)))\n # str(ue) fails in Python 2\n self.assertEqual(str, type(str(self.wue)))", "def is_string(val):\n return (\n isinstance(val, unicode) or \\\n isinstance(val, str) \n )", "def typeIsString(obj):\n return type(obj) is str or _haveTypeUnicode and type(obj) is unicode", "def is_string(self):\n answer = self._call('is_string')\n return answer.yes", "def is_string(value):\n return isinstance(value, string_types)", "def isString(data):\n\ttry:\n\t\tfrom types import UnicodeType, StringType\n\t\tif type(data) == UnicodeType or type(data) == StringType:\n\t\t\treturn True\n\texcept ImportError:\n\t\tif type(data) == type(\"\"):\n\t\t\treturn True\n\treturn False", "def _is_string(self, obj):\n return isinstance(obj, unicode) or isinstance(obj, str)", "def is_string(document):\r\n return isinstance(document, str)", "def is_str(value):\n return isinstance(value, str)", "def test_str_method(self):\n s1 = Square(4, 6, 2, 12)\n self.assertEqual(s1.__str__(), '[Square] (12) 6/2 - 4')", "def test_string(self,address):\n t=type(address) == str\n assert t, \"not a string\"", "def test_repr(self):\n qg = ConcentricGrid(2, 3, 4)\n s = str(qg)\n assert \"ConcentricGrid\" in s\n assert \"jacobi\" in s\n assert \"L=2\" in s\n assert \"M=3\" in s\n assert \"N=4\" in s", "def is_string_type(self):\n raise exceptions.NotImplementedError()", "def not_a_string(obj):\n my_type = str(type(obj))\n if is_py3():\n is_str = my_type.find('bytes') < 0 and my_type.find('str') < 0\n return is_str\n\n return my_type.find('str') < 0 and \\\n my_type.find('unicode') < 0", "def is_string(s):\n try:\n return isinstance(s, basestring)\n except NameError:\n return isinstance(s, str)", "def issparse(qob):\n return isinstance(qob, sp.spmatrix)", "def is_string(item: Any) -> bool:\n if isinstance(item, (bytes, bytearray, str)):\n return True\n elif (isinstance(item, (tuple, list)) and all(is_string(x) for x in item)):\n return True\n elif (isinstance(item, np.ndarray) and # binary or unicode\n (item.dtype.kind in (\"U\", \"S\") or item.dtype == object)):\n return True\n return False", "def _is_str(item):\n return isinstance(item, str)", "def test_string(self):\n htype = h5t.py_create('S1')\n self.assertIsInstance(htype, h5t.TypeStringID)\n self.assertEqual(htype.get_size(), 1)", "def isString(s):\r\n try:\r\n return isinstance(s, unicode) or isinstance(s, basestring)\r\n except NameError:\r\n return isinstance(s, str)", "def test_output_type():\n assert type(song_decoder(\"WUWUBUBWUBUWUB\")) is str", "def isString(s):\r\n if not isinstance(s, util.AtomicString):\r\n return isinstance(s, basestring)\r\n return False", "def assert_is_string(object_to_test):\n if( (type(object_to_test) == type(\"\")) or (type(object_to_test) == type(u\"\")) ):\n return\n logging.error(\"assert_is_string() test failed!\")\n logging.critical(repr(locals()))\n raise(ValueError)", "def test_str(self, r, rep):\n assert str(r) == rep", "def string_check(param, name):\n\tif not isinstance(param, strcomp):\n\t\traise TypeError(\"Keyword arg '%s' must be of type string. Got: %s\" % (\n\t\t\tname, type(param)))\n\telse:\n\t\tpass", "def is_string(space, w_obj):\n return space.wrap(w_obj.tp == space.tp_str)", "def test_str(self):\n self.assertEqual(str(self.meter), \"1 * m\")\n\n newton = self.kgram * self.meter / (self.second ** 2)\n\n self.assertEqual(str(newton), \"1.0 * kg * m * s^-2\")", "def test_str(self):\n arm = self.ar[2009][11]\n self.assertEqual(str(arm), '<AwstatsMonth 2009-11>')", "def test_repr(self):\n fz = FourierZernikeBasis(L=6, M=3, N=0)\n s = str(fz)\n assert \"FourierZernikeBasis\" in s\n assert \"ansi\" in s\n assert \"L=6\" in s\n assert \"M=3\" in s\n assert \"N=0\" in s", "def test_speciality_str_representation(speciality):\n assert str(speciality) == \"Electrical\"", "def is_my_type(type_str):\n raise NotImplementedError()", "def stringable(self):\n return True", "def is_string(value):\n try:\n basestring\n def is_string(value):\n \"\"\"Python 2 compatible implementation of is_string(value).\"\"\"\n return isinstance(value, basestring)\n except NameError:\n def is_string(value):\n \"\"\"Python 3 compatible implementation of is_string(value).\"\"\"\n return isinstance(value, (str, bytes))\n return is_string(value)", "def test___str__(self):\n self.assertEqual(\n str(self.mineral),\n 'mineralxy')", "def test_string(self):\n htype = h5t.py_create('S10', logical=True)\n self.assertIsInstance(htype, h5t.TypeStringID)\n self.assertEqual(htype.get_size(), 10)", "def test_str(self):\r\n statobj = inputtypes.Status('test')\r\n self.assertEqual(str(statobj), 'test')\r\n self.assertEqual(unicode(statobj), u'test')", "def is_str(value):\n if not type(value) is str:\n return False\n else:\n return True", "def test_string(self):\n htype = h5t.py_create('S10')\n self.assertIsInstance(htype, h5t.TypeStringID)\n self.assertEqual(htype.get_size(), 10)", "def _isstr(value):\n\n if isinstance(value, string_types + (bytes,)):\n return True\n elif hasattr(value, \"__iter__\"):\n return all(_isstr(v) for v in value)\n else:\n return False", "def test_native_str(self):\n if PY2:\n import __builtin__\n builtin_str = __builtin__.str\n else:\n import builtins\n builtin_str = builtins.str\n\n inputs = [b'blah', u'blah', 'blah']\n for s in inputs:\n self.assertEqual(native_str(s), builtin_str(s))\n self.assertTrue(isinstance(native_str(s), builtin_str))", "def DataIsString(self):\n return self.data_type in (definitions.REG_SZ, definitions.REG_EXPAND_SZ)", "def test_label_cannot_be_converted_to_string(self):\n\n class NoStr:\n def __str__(self) -> str:\n raise NotImplementedError\n\n with pytest.raises(TypeError, match=\"The given label\"):\n State(\"water\", label=NoStr())", "def test_str():\n c = Circle(4) \n assert c.__str__() == 'Circle with radius: 4.000000'", "def test_stringfield_return_text_type():\n stringify = fields.StringField().adapt\n assert isinstance(stringify(\"Hello World\"), text_type)", "def is_valid(self, qstr):\r\n pass", "def test_car_model_str_representation(car_model):\n assert str(car_model) == \"Golf\"", "def test_vlstring_log(self):\n dt = h5t.special_dtype(vlen=str)\n htype = h5t.py_create(dt, logical=True)\n self.assertIsInstance(htype, h5t.TypeStringID)\n self.assertEqual(htype.is_variable_str(), True)\n self.assertEqual(htype.get_cset(), h5t.CSET_ASCII)\n self.assertEqual(htype.get_strpad(), h5t.STR_NULLTERM)", "def test_str(self):\n self.assertEqual(str(self.john), \"J. Doe\")\n self.assertEqual(str(self.solar), \"N. Graule\")", "def test_str(self):\n ary = self.ar[2009]\n self.assertEqual(str(ary), '<AwstatsYear 2009: 11, 12>')", "def test_str_type(self):\n\n expected = TypeError\n input_ = 'c'\n with self.assertRaises(expected):\n math.factorial(input_)", "def test_str(self):\n self.assertEqual(str(self.subject), \"Test Subject\")", "def _is_valid_target_str(self, target):\n if isinstance(target, str):\n return True\n else:\n return False", "def test_str(self, concept_embedding: ConceptEmbedding):\n # printing should not rise error\n str(concept_embedding)", "def does_signature_contain_str(signature=None):\n\n # if we did not receive a signature we assume the model could require\n # a string in it's input\n if signature is None:\n return True\n\n return any(v.dtype == dtypes.string.as_datatype_enum\n for v in signature.inputs.values())", "def test_label_seconde(self):\n self.assertIsInstance(self.address.label_second, str)\n self.assertEqual(self.address.label_second, \"\")", "def test_compare_to_string(self):\n r = self.RNA(\"UCC\")\n self.assertEqual(r, \"UCC\")", "def testStringRepresentationOnInstantiation(self):\r\n self.assertEqual(str(self.tv), 'table')", "def test_str(self):\n q0 = sym.Symbol(\"q0\")\n q2 = sym.Symbol(\"q2\")\n f = (q0 + q2) / np.sqrt(2)\n\n rrt = RegRefTransform(f)\n\n assert rrt.__str__() == \"0.707106781186547*q0 + 0.707106781186547*q2\"", "def test_str(self):\n f66: Fraction = Fraction(6, 6)\n f24: Fraction = Fraction(2, 4)\n self.assertEqual(str(f66), \"6/6\")\n self.assertNotEqual(str(f24), \"8/6\")\n self.assertTrue(str(f24), \"2/4\")", "def testStringInput(self):\n nb.rescale_length(\"2.0\")\n self.assertEqual(2.0, nb.rscale)", "def _is_input_or_output_type(io: type, type_str: str):\n if isinstance(io, type) and io.__module__.startswith(mldesigner_pkg):\n if type_str in io.__name__:\n return True\n return False", "def check_type( string_key ) : \r\n\r\n if type( string_key ) != type( '' ) : \r\n\r\n # raise self.__class__( \"'%s': EGI wants the key to be four _characters_ (not %s) !\" % (type(string_key), ) ) \r\n raise Eggog( \"'%s': EGI wants the key to be four _characters_ (not %s) !\" % (type(string_key), ) ) \r\n \r\n else :\r\n \r\n return True", "def test_str(self):\n tour = G(Tour, display_name='test1')\n self.assertEqual('test1', str(tour))", "def _is_encodable_simple(sv):\n return sv not in (None, str(), int())", "def value_type_of_str(str1, type1):\n #FOUND A BETTER FUNCTION, MultiDict\n if issubclass(type1, Collection):\n try:\n # obj = eval(str1)\n obj = ast.literal_eval(str1)\n if isinstance(obj, Collection):\n return obj\n except:\n return type1(str1)\n else:\n try:\n return type1(str1)\n except:\n return type1()", "def test_str(self, string, application):\n assert string == str(application)", "def testStringInput(self):\r\n from pydsl.Check import BNFChecker\r\n from pydsl.contrib.bnfgrammar import productionset0\r\n grammardef = productionset0\r\n checker = BNFChecker(grammardef)\r\n self.assertTrue(checker.check(\"SR\"))\r\n self.assertTrue(checker.check(\"SR\"))\r\n self.assertTrue(checker.check((\"S\",\"R\")))\r\n self.assertFalse(checker.check(\"SL\"))\r\n self.assertFalse(checker.check((\"S\",\"L\")))\r\n self.assertFalse(checker.check(\"\"))", "def test_str_(self):\n str(self.standardcode)\n repr(self.standardcode)", "def is_tuple_consists_of_strings(an_array):\n return isinstance(an_array, tuple) and is_array_type(an_array, str)", "def test_X_approximate_distribution_is_str(self):\n\n # Check valid case of \"count\" which is not included in valid object\n self.validator.adata.uns[\"X_approximate_distribution\"] = \"count\"\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [])\n\n # Invalid type: list\n self.validator.adata.uns[\"X_approximate_distribution\"] = [\"count\"]\n self.validator.validate_adata()\n self.assertEqual(\n self.validator.errors,\n [\n \"ERROR: '['count']' in 'uns['X_approximate_distribution']' \"\n \"is not valid, it must be a string.\"\n ],\n )", "def _is_proper_sequence(seq):\n return (isinstance(seq, collections.abc.Sequence) and\n not isinstance(seq, str))", "def test_str(self):\n step = G(Step, display_name='test1')\n self.assertEqual('test1', str(step))", "def test_str(self):\n self.assertEqual(str(self.bioe), str(self.wbioe))\n self.assertEqual(str(self.uioe), str(self.wuioe))\n self.assertEqual(str(self.bose), str(self.wbose))\n self.assertEqual(str(self.uose), str(self.wuose))", "def checkDataType(self,str):\n accepted_vals = [\"HEXA\",\"NEHU\",\"NEDS\",\"NEDU\",\"NDHU\",\"NDDU\"]\n assert str in accepted_vals, \"Error: Data Type not accepted: \" + str\n if (str == 'HEXA') | (str[2] == 'H'):\n self.base = 16\n if str[3] == 'S':\n self.signed = True", "async def test_str(\n submission_fixture: Submission,\n) -> None:\n AssertThat(str(submission_fixture)).IsInstanceOf(str)\n AssertThat(str(submission_fixture)).IsEqualTo(submission_fixture.url)", "def _has_str_elems(obj):\n return all([isinstance(elem, str) for elem in obj])", "def test_unexpectedType(self):\n self.assertRaises(TypeError, nativeString, 1)", "def is_valid_label(label: Union[str, List[str]]) -> bool:\n if isinstance(label, list):\n for item in label:\n if not isinstance(item, str):\n return False\n return True\n return isinstance(label, str)", "def test_node_str():\n a_left = Node(7, data='pl left')\n a_right = Node(42, data='pl right')\n a = Node(13, data='pl a', left=a_left, right=a_right)\n string_a = str(a)\n expect_string = '13'\n assert string_a == expect_string", "def test_string():" ]
[ "0.69167274", "0.6739174", "0.67315763", "0.6592448", "0.6537371", "0.65342003", "0.64775366", "0.64470917", "0.6442934", "0.6439365", "0.64354664", "0.6411122", "0.63913774", "0.63811743", "0.6379064", "0.6348017", "0.6300297", "0.6297146", "0.6295145", "0.6274249", "0.62652045", "0.62576663", "0.62566346", "0.6245129", "0.6211513", "0.61876637", "0.6173198", "0.6160844", "0.6159182", "0.61534035", "0.61415815", "0.6102877", "0.60967225", "0.6085535", "0.60814065", "0.60617006", "0.6053945", "0.6040353", "0.60390985", "0.6022298", "0.601897", "0.60110426", "0.60096484", "0.60022867", "0.59704053", "0.5969721", "0.5929993", "0.5896661", "0.5882377", "0.58561766", "0.58490527", "0.5848842", "0.5806207", "0.58007413", "0.5797206", "0.57942766", "0.579084", "0.5788963", "0.5784084", "0.57491624", "0.5748637", "0.57362384", "0.5736139", "0.57336277", "0.57221365", "0.5697235", "0.5695593", "0.56669354", "0.56667924", "0.56508595", "0.56411505", "0.56315154", "0.5618677", "0.5608521", "0.56038046", "0.5593778", "0.5588942", "0.5570212", "0.55675673", "0.5561114", "0.5543736", "0.5541332", "0.55339277", "0.5527524", "0.5527509", "0.5521373", "0.5520044", "0.5518805", "0.5512037", "0.5508507", "0.54775554", "0.5477407", "0.5476032", "0.54738116", "0.547039", "0.54534113", "0.54469943", "0.5445199", "0.544298", "0.543333", "0.5432046" ]
0.0
-1
>>> isinstance(lab8_q3(), str) True
def lab8_q3(): return """ YOUR EXPLANATION HERE """
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _is_string(data):\n return len(data) and isinstance(_to_ndarray(data).flat[0], str)", "def _is_string(arg):\n return isinstance(arg, types.StringTypes)", "def is_str ( self, s ):\r\n\t\treturn isinstance ( s, type( str () ) )", "def test_is_str_rep_string(self):\n self.assertIsInstance(cardutils.Card(10,1).__str__(), str)", "def isString(x):\n if type(x) == str:\n return True\n else:\n return False", "def is_string(obj):\n return isinstance(obj, basestring)", "def strtype(x):\n if type(x) == str:\n return True\n if type(x) == unicode:\n return True\n return False", "def is_str(x):\n return isinstance(x, str)", "def _check_datatype_to_string(prediction):\n if isinstance(prediction, str):\n return True\n raise TypeError('Prediction is not in string type.')", "def is_string(value):\n return isinstance(value, basestring)", "def _is_unicode(arr):\n if (isinstance(arr, str) or\n issubclass(numpy.asarray(arr).dtype.type, str)):\n return True\n return False", "def has_string_type(obj: _std_typing.Any) -> bool:\n return obj.dtype == sc.DType.string", "def is_string(atype):\n if atype == str:\n return True\n elif PY2:\n if atype == unicode:\n return True\n return False", "def isString(data):\n\ttry:\n\t\tfrom types import UnicodeType, StringType\n\t\tif type(data) == UnicodeType or type(data) == StringType:\n\t\t\treturn True\n\texcept ImportError:\n\t\tif type(data) == type(\"\"):\n\t\t\treturn True\n\treturn False", "def is_string(obj):\n return isinstance(obj, str)", "def is_string(value):\n return isinstance(value, (str, bytes))", "def not_a_string(obj):\n my_type = str(type(obj))\n if is_py3():\n is_str = my_type.find('bytes') < 0 and my_type.find('str') < 0\n return is_str\n\n return my_type.find('str') < 0 and \\\n my_type.find('unicode') < 0", "def is_string(item: Any) -> bool:\n if isinstance(item, (bytes, bytearray, str)):\n return True\n elif (isinstance(item, (tuple, list)) and all(is_string(x) for x in item)):\n return True\n elif (isinstance(item, np.ndarray) and # binary or unicode\n (item.dtype.kind in (\"U\", \"S\") or item.dtype == object)):\n return True\n return False", "def is_string(val):\n return (\n isinstance(val, unicode) or \\\n isinstance(val, str) \n )", "def test_str():\n # Test for string special method with scalar Rnode objects\n x = Rnode(1.0)\n try:\n assert str(x) == 'Reverse-mode Rnode Object ( Values: 1.0 )'\n except AssertionError as e:\n print(e)\n raise AssertionError", "def check_solution(self, solution):\n return isinstance(solution, str)", "def _is_str(item):\n return isinstance(item, str)", "def typeIsString(obj):\n return type(obj) is str or _haveTypeUnicode and type(obj) is unicode", "def canAdapt(self, stru):\n return isinstance(stru, crystal)", "def test_str(self):\r\n f = lambda id_, seq, qual: len(seq) > 3\r\n s1 = 'aa'\r\n s2 = 'aaaa'\r\n sq = SeqQualBad('Q', f)\r\n self.assertEqual(sq('x', s1, [1, 2, 3]), False)\r\n self.assertEqual(str(sq), 'Q\\t0')\r\n self.assertEqual(sq('y', s2, [1, 2, 3]), True)\r\n self.assertEqual(str(sq), 'Q\\t1')", "def is_string(document):\r\n return isinstance(document, str)", "def test_str(self):\n s1 = Square(4, 2, 3, 47)\n self.assertEqual(str(s1), \"[Square] (47) 2/3 - 4\")", "def is_string(self):\n answer = self._call('is_string')\n return answer.yes", "def is_string(value):\n return isinstance(value, string_types)", "def _is_string(self, obj):\n return isinstance(obj, unicode) or isinstance(obj, str)", "def is_str(value):\n return isinstance(value, str)", "def test_string(self,address):\n t=type(address) == str\n assert t, \"not a string\"", "def is_string(s):\n try:\n return isinstance(s, basestring)\n except NameError:\n return isinstance(s, str)", "def isString(s):\r\n if not isinstance(s, util.AtomicString):\r\n return isinstance(s, basestring)\r\n return False", "def isString(s):\r\n try:\r\n return isinstance(s, unicode) or isinstance(s, basestring)\r\n except NameError:\r\n return isinstance(s, str)", "def test_str(self):\n self.assertEqual(str(self.bs), str(self.wbs))\n self.assertEqual(str(self.be), str(self.be))\n # str(us) fails in Python 2\n self.assertEqual(str, type(str(self.wus)))\n # str(ue) fails in Python 2\n self.assertEqual(str, type(str(self.wue)))", "def assert_is_string(object_to_test):\n if( (type(object_to_test) == type(\"\")) or (type(object_to_test) == type(u\"\")) ):\n return\n logging.error(\"assert_is_string() test failed!\")\n logging.critical(repr(locals()))\n raise(ValueError)", "def string_check(param, name):\n\tif not isinstance(param, strcomp):\n\t\traise TypeError(\"Keyword arg '%s' must be of type string. Got: %s\" % (\n\t\t\tname, type(param)))\n\telse:\n\t\tpass", "def test_strmethod(self):\n b1 = BaseModel()\n self.assertEqual(type(str(b1)), str)", "def issparse(qob):\n return isinstance(qob, sp.spmatrix)", "def is_string(space, w_obj):\n return space.wrap(w_obj.tp == space.tp_str)", "def test_string(self):\n htype = h5t.py_create('S1')\n self.assertIsInstance(htype, h5t.TypeStringID)\n self.assertEqual(htype.get_size(), 1)", "def is_string_type(self):\n raise exceptions.NotImplementedError()", "def test_repr(self):\n qg = ConcentricGrid(2, 3, 4)\n s = str(qg)\n assert \"ConcentricGrid\" in s\n assert \"jacobi\" in s\n assert \"L=2\" in s\n assert \"M=3\" in s\n assert \"N=4\" in s", "def test_speciality_str_representation(speciality):\n assert str(speciality) == \"Electrical\"", "def is_string(value):\n try:\n basestring\n def is_string(value):\n \"\"\"Python 2 compatible implementation of is_string(value).\"\"\"\n return isinstance(value, basestring)\n except NameError:\n def is_string(value):\n \"\"\"Python 3 compatible implementation of is_string(value).\"\"\"\n return isinstance(value, (str, bytes))\n return is_string(value)", "def test_output_type():\n assert type(song_decoder(\"WUWUBUBWUBUWUB\")) is str", "def test_str_method(self):\n s1 = Square(4, 6, 2, 12)\n self.assertEqual(s1.__str__(), '[Square] (12) 6/2 - 4')", "def test_str(self):\n arm = self.ar[2009][11]\n self.assertEqual(str(arm), '<AwstatsMonth 2009-11>')", "def is_str(value):\n if not type(value) is str:\n return False\n else:\n return True", "def is_valid(self, qstr):\r\n pass", "def test_str(self, r, rep):\n assert str(r) == rep", "def DataIsString(self):\n return self.data_type in (definitions.REG_SZ, definitions.REG_EXPAND_SZ)", "def is_my_type(type_str):\n raise NotImplementedError()", "def test_string(self):\n htype = h5t.py_create('S10', logical=True)\n self.assertIsInstance(htype, h5t.TypeStringID)\n self.assertEqual(htype.get_size(), 10)", "def test_str(self):\n self.assertEqual(str(self.john), \"J. Doe\")\n self.assertEqual(str(self.solar), \"N. Graule\")", "def _type_check(data):\n if data.__class__.__name__ != \"Matrix3\":\n return False\n return True", "def test_repr(self):\n fz = FourierZernikeBasis(L=6, M=3, N=0)\n s = str(fz)\n assert \"FourierZernikeBasis\" in s\n assert \"ansi\" in s\n assert \"L=6\" in s\n assert \"M=3\" in s\n assert \"N=0\" in s", "def test_string(self):\n htype = h5t.py_create('S10')\n self.assertIsInstance(htype, h5t.TypeStringID)\n self.assertEqual(htype.get_size(), 10)", "def stringable(self):\n return True", "def test_vlstring_log(self):\n dt = h5t.special_dtype(vlen=str)\n htype = h5t.py_create(dt, logical=True)\n self.assertIsInstance(htype, h5t.TypeStringID)\n self.assertEqual(htype.is_variable_str(), True)\n self.assertEqual(htype.get_cset(), h5t.CSET_ASCII)\n self.assertEqual(htype.get_strpad(), h5t.STR_NULLTERM)", "def check_type( string_key ) : \r\n\r\n if type( string_key ) != type( '' ) : \r\n\r\n # raise self.__class__( \"'%s': EGI wants the key to be four _characters_ (not %s) !\" % (type(string_key), ) ) \r\n raise Eggog( \"'%s': EGI wants the key to be four _characters_ (not %s) !\" % (type(string_key), ) ) \r\n \r\n else :\r\n \r\n return True", "def test___str__(self):\n self.assertEqual(\n str(self.mineral),\n 'mineralxy')", "def test_stringfield_return_text_type():\n stringify = fields.StringField().adapt\n assert isinstance(stringify(\"Hello World\"), text_type)", "def does_signature_contain_str(signature=None):\n\n # if we did not receive a signature we assume the model could require\n # a string in it's input\n if signature is None:\n return True\n\n return any(v.dtype == dtypes.string.as_datatype_enum\n for v in signature.inputs.values())", "def test_str(self):\r\n statobj = inputtypes.Status('test')\r\n self.assertEqual(str(statobj), 'test')\r\n self.assertEqual(unicode(statobj), u'test')", "def _is_valid_target_str(self, target):\n if isinstance(target, str):\n return True\n else:\n return False", "def _isstr(value):\n\n if isinstance(value, string_types + (bytes,)):\n return True\n elif hasattr(value, \"__iter__\"):\n return all(_isstr(v) for v in value)\n else:\n return False", "def test_str():\n c = Circle(4) \n assert c.__str__() == 'Circle with radius: 4.000000'", "def test_str(self, concept_embedding: ConceptEmbedding):\n # printing should not rise error\n str(concept_embedding)", "def test_car_model_str_representation(car_model):\n assert str(car_model) == \"Golf\"", "def test_str(self):\n ary = self.ar[2009]\n self.assertEqual(str(ary), '<AwstatsYear 2009: 11, 12>')", "def test_str(self):\n self.assertEqual(str(self.subject), \"Test Subject\")", "def _is_proper_sequence(seq):\n return (isinstance(seq, collections.abc.Sequence) and\n not isinstance(seq, str))", "def is_sequence_of_str(items):\n return all(isinstance(item, basestring) for item in items)", "def test_str(self):\n self.assertEqual(str(self.meter), \"1 * m\")\n\n newton = self.kgram * self.meter / (self.second ** 2)\n\n self.assertEqual(str(newton), \"1.0 * kg * m * s^-2\")", "def is_tuple_consists_of_strings(an_array):\n return isinstance(an_array, tuple) and is_array_type(an_array, str)", "def test_str_type(self):\n\n expected = TypeError\n input_ = 'c'\n with self.assertRaises(expected):\n math.factorial(input_)", "def _has_str_elems(obj):\n return all([isinstance(elem, str) for elem in obj])", "def test_str(self, string, application):\n assert string == str(application)", "def check_type(s: pd.Series) -> Tuple[bool, str]:\n\n error_string = (\n \"should be TextSeries: the input Series should consist only of strings in every cell.\"\n \" See help(hero.HeroTypes) for more information.\"\n )\n\n try:\n first_non_nan_value = s.loc[s.first_valid_index()]\n if not isinstance(first_non_nan_value, str):\n return False, error_string\n except KeyError: # Only NaNs in Series -> same warning applies\n return False, error_string\n\n return True, \"\"", "def _is_input_or_output_type(io: type, type_str: str):\n if isinstance(io, type) and io.__module__.startswith(mldesigner_pkg):\n if type_str in io.__name__:\n return True\n return False", "async def test_str(\n submission_fixture: Submission,\n) -> None:\n AssertThat(str(submission_fixture)).IsInstanceOf(str)\n AssertThat(str(submission_fixture)).IsEqualTo(submission_fixture.url)", "def test_label_cannot_be_converted_to_string(self):\n\n class NoStr:\n def __str__(self) -> str:\n raise NotImplementedError\n\n with pytest.raises(TypeError, match=\"The given label\"):\n State(\"water\", label=NoStr())", "def test_bytestr(self):\n dset = self.f.create_dataset('x', (1,), dtype=h5py.string_dtype(encoding='ascii'))\n dset[0] = b\"Hello there!\"\n self.assertEqual(type(dset[0]), bytes)", "def test_str(self):\n tour = G(Tour, display_name='test1')\n self.assertEqual('test1', str(tour))", "def test_native_str(self):\n if PY2:\n import __builtin__\n builtin_str = __builtin__.str\n else:\n import builtins\n builtin_str = builtins.str\n\n inputs = [b'blah', u'blah', 'blah']\n for s in inputs:\n self.assertEqual(native_str(s), builtin_str(s))\n self.assertTrue(isinstance(native_str(s), builtin_str))", "def isFloat(string):\n return (True)", "def checkDataType(self,str):\n accepted_vals = [\"HEXA\",\"NEHU\",\"NEDS\",\"NEDU\",\"NDHU\",\"NDDU\"]\n assert str in accepted_vals, \"Error: Data Type not accepted: \" + str\n if (str == 'HEXA') | (str[2] == 'H'):\n self.base = 16\n if str[3] == 'S':\n self.signed = True", "def test_str(self):\n faction = self.faction\n\n self.assertEqual(str(faction), self.faction_raw['name'])", "def testStringRepresentationOnInstantiation(self):\r\n self.assertEqual(str(self.tv), 'table')", "def test_compare_to_string(self):\n r = self.RNA(\"UCC\")\n self.assertEqual(r, \"UCC\")", "def test_str(self):\n f66: Fraction = Fraction(6, 6)\n f24: Fraction = Fraction(2, 4)\n self.assertEqual(str(f66), \"6/6\")\n self.assertNotEqual(str(f24), \"8/6\")\n self.assertTrue(str(f24), \"2/4\")", "def test_computer_string(self):\n # Computer to use for the network\n comp = \"1q-qvm\"\n\n # Get a network with the computer\n qnn = self.get_test_network(comp)\n\n # Checks\n self.assertEqual(type(qnn.computer), QuantumComputer)", "def test_str(self):\n self.assertEqual(str(self.bioe), str(self.wbioe))\n self.assertEqual(str(self.uioe), str(self.wuioe))\n self.assertEqual(str(self.bose), str(self.wbose))\n self.assertEqual(str(self.uose), str(self.wuose))", "def ISTEXT(value):\n return isinstance(value, (basestring, AltText))", "def isdatatype(object):\n return isinstance(object, (str, int, bool, float, type(None)))", "def str_check(*args, func=None):\n func = func or inspect.stack()[2][3]\n for var in args:\n if not isinstance(var, (str, collections.UserString, collections.abc.Sequence)):\n name = type(var).__name__\n raise StringError(\n 'Function {} expected str, {} got instead.'.format(func, name))", "def test_node_str():\n a_left = Node(7, data='pl left')\n a_right = Node(42, data='pl right')\n a = Node(13, data='pl a', left=a_left, right=a_right)\n string_a = str(a)\n expect_string = '13'\n assert string_a == expect_string", "def check_proc_type(image_proc_type):\n\n if isinstance(image_proc_type, str):\n return True\n else:\n logging.warning('Image processing type is not a string')\n print('Please choose only one processing technique.')\n raise ValueError('Please choose only one processing technique.')", "def check_series(s: pd.Series) -> bool:\n\n error_string = (\n \"The input Series should consist only of strings in every cell.\"\n \" See help(hero.HeroSeries) for more information.\"\n )\n\n if not isinstance(s.iloc[0], str) or s.index.nlevels != 1:\n raise TypeError(error_string)" ]
[ "0.69046277", "0.66528326", "0.6531326", "0.6531229", "0.6520508", "0.6515218", "0.6508465", "0.6506103", "0.64962703", "0.64860326", "0.647134", "0.6461159", "0.64014983", "0.636057", "0.6318982", "0.63128656", "0.6281687", "0.6279039", "0.626913", "0.6251587", "0.6250445", "0.62187624", "0.6208345", "0.62012726", "0.6183523", "0.61636186", "0.61615324", "0.61571276", "0.6141381", "0.6122345", "0.6122203", "0.6111475", "0.6109311", "0.60996985", "0.6099489", "0.60673845", "0.6042279", "0.6028084", "0.6021783", "0.5994942", "0.5960177", "0.59558725", "0.5937446", "0.59178317", "0.5888427", "0.58670205", "0.5852721", "0.5806558", "0.578937", "0.57858866", "0.57516116", "0.57283485", "0.57235837", "0.57191074", "0.57138175", "0.57121086", "0.57075346", "0.5705017", "0.5701595", "0.56937116", "0.5690483", "0.5680751", "0.56721485", "0.566406", "0.56557274", "0.56460387", "0.56414205", "0.5640649", "0.56388026", "0.5630444", "0.5606669", "0.560401", "0.5598789", "0.55846155", "0.5583991", "0.5582754", "0.5574058", "0.55714214", "0.5551474", "0.5531245", "0.5523456", "0.54958445", "0.5494562", "0.5484324", "0.5482227", "0.547016", "0.54674786", "0.54670405", "0.54664946", "0.5460828", "0.545771", "0.5453327", "0.5452266", "0.5439579", "0.5430493", "0.5418464", "0.5418217", "0.5410887", "0.54066354", "0.54044074", "0.5403962" ]
0.0
-1
>>> isinstance(lab8_q4(), str) True
def lab8_q4(): return """ YOUR EXPLANATION HERE """
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _is_string(data):\n return len(data) and isinstance(_to_ndarray(data).flat[0], str)", "def test_is_str_rep_string(self):\n self.assertIsInstance(cardutils.Card(10,1).__str__(), str)", "def _is_string(arg):\n return isinstance(arg, types.StringTypes)", "def is_string(obj):\n return isinstance(obj, basestring)", "def is_string(value):\n return isinstance(value, basestring)", "def is_str(x):\n return isinstance(x, str)", "def is_str ( self, s ):\r\n\t\treturn isinstance ( s, type( str () ) )", "def has_string_type(obj: _std_typing.Any) -> bool:\n return obj.dtype == sc.DType.string", "def isString(x):\n if type(x) == str:\n return True\n else:\n return False", "def strtype(x):\n if type(x) == str:\n return True\n if type(x) == unicode:\n return True\n return False", "def is_string(obj):\n return isinstance(obj, str)", "def _check_datatype_to_string(prediction):\n if isinstance(prediction, str):\n return True\n raise TypeError('Prediction is not in string type.')", "def is_string(value):\n return isinstance(value, (str, bytes))", "def is_string(atype):\n if atype == str:\n return True\n elif PY2:\n if atype == unicode:\n return True\n return False", "def test_str():\n # Test for string special method with scalar Rnode objects\n x = Rnode(1.0)\n try:\n assert str(x) == 'Reverse-mode Rnode Object ( Values: 1.0 )'\n except AssertionError as e:\n print(e)\n raise AssertionError", "def test_str(self):\n s1 = Square(4, 2, 3, 47)\n self.assertEqual(str(s1), \"[Square] (47) 2/3 - 4\")", "def canAdapt(self, stru):\n return isinstance(stru, crystal)", "def check_solution(self, solution):\n return isinstance(solution, str)", "def _is_unicode(arr):\n if (isinstance(arr, str) or\n issubclass(numpy.asarray(arr).dtype.type, str)):\n return True\n return False", "def is_string(val):\n return (\n isinstance(val, unicode) or \\\n isinstance(val, str) \n )", "def is_string(value):\n return isinstance(value, string_types)", "def isString(data):\n\ttry:\n\t\tfrom types import UnicodeType, StringType\n\t\tif type(data) == UnicodeType or type(data) == StringType:\n\t\t\treturn True\n\texcept ImportError:\n\t\tif type(data) == type(\"\"):\n\t\t\treturn True\n\treturn False", "def test_str(self):\r\n f = lambda id_, seq, qual: len(seq) > 3\r\n s1 = 'aa'\r\n s2 = 'aaaa'\r\n sq = SeqQualBad('Q', f)\r\n self.assertEqual(sq('x', s1, [1, 2, 3]), False)\r\n self.assertEqual(str(sq), 'Q\\t0')\r\n self.assertEqual(sq('y', s2, [1, 2, 3]), True)\r\n self.assertEqual(str(sq), 'Q\\t1')", "def is_str(value):\n return isinstance(value, str)", "def is_string(self):\n answer = self._call('is_string')\n return answer.yes", "def typeIsString(obj):\n return type(obj) is str or _haveTypeUnicode and type(obj) is unicode", "def is_string(document):\r\n return isinstance(document, str)", "def test_str(self):\n self.assertEqual(str(self.bs), str(self.wbs))\n self.assertEqual(str(self.be), str(self.be))\n # str(us) fails in Python 2\n self.assertEqual(str, type(str(self.wus)))\n # str(ue) fails in Python 2\n self.assertEqual(str, type(str(self.wue)))", "def test_strmethod(self):\n b1 = BaseModel()\n self.assertEqual(type(str(b1)), str)", "def _is_string(self, obj):\n return isinstance(obj, unicode) or isinstance(obj, str)", "def is_string(s):\n try:\n return isinstance(s, basestring)\n except NameError:\n return isinstance(s, str)", "def test_string(self):\n htype = h5t.py_create('S1')\n self.assertIsInstance(htype, h5t.TypeStringID)\n self.assertEqual(htype.get_size(), 1)", "def not_a_string(obj):\n my_type = str(type(obj))\n if is_py3():\n is_str = my_type.find('bytes') < 0 and my_type.find('str') < 0\n return is_str\n\n return my_type.find('str') < 0 and \\\n my_type.find('unicode') < 0", "def is_string_type(self):\n raise exceptions.NotImplementedError()", "def is_string(item: Any) -> bool:\n if isinstance(item, (bytes, bytearray, str)):\n return True\n elif (isinstance(item, (tuple, list)) and all(is_string(x) for x in item)):\n return True\n elif (isinstance(item, np.ndarray) and # binary or unicode\n (item.dtype.kind in (\"U\", \"S\") or item.dtype == object)):\n return True\n return False", "def _is_str(item):\n return isinstance(item, str)", "def test_str_method(self):\n s1 = Square(4, 6, 2, 12)\n self.assertEqual(s1.__str__(), '[Square] (12) 6/2 - 4')", "def test_string(self,address):\n t=type(address) == str\n assert t, \"not a string\"", "def isString(s):\r\n try:\r\n return isinstance(s, unicode) or isinstance(s, basestring)\r\n except NameError:\r\n return isinstance(s, str)", "def isString(s):\r\n if not isinstance(s, util.AtomicString):\r\n return isinstance(s, basestring)\r\n return False", "def test_str(self, r, rep):\n assert str(r) == rep", "def test_string(self):\n htype = h5t.py_create('S10')\n self.assertIsInstance(htype, h5t.TypeStringID)\n self.assertEqual(htype.get_size(), 10)", "def test_string(self):\n htype = h5t.py_create('S10', logical=True)\n self.assertIsInstance(htype, h5t.TypeStringID)\n self.assertEqual(htype.get_size(), 10)", "def assert_is_string(object_to_test):\n if( (type(object_to_test) == type(\"\")) or (type(object_to_test) == type(u\"\")) ):\n return\n logging.error(\"assert_is_string() test failed!\")\n logging.critical(repr(locals()))\n raise(ValueError)", "def test_repr(self):\n qg = ConcentricGrid(2, 3, 4)\n s = str(qg)\n assert \"ConcentricGrid\" in s\n assert \"jacobi\" in s\n assert \"L=2\" in s\n assert \"M=3\" in s\n assert \"N=4\" in s", "def test_str(self):\n arm = self.ar[2009][11]\n self.assertEqual(str(arm), '<AwstatsMonth 2009-11>')", "def is_string(value):\n try:\n basestring\n def is_string(value):\n \"\"\"Python 2 compatible implementation of is_string(value).\"\"\"\n return isinstance(value, basestring)\n except NameError:\n def is_string(value):\n \"\"\"Python 3 compatible implementation of is_string(value).\"\"\"\n return isinstance(value, (str, bytes))\n return is_string(value)", "def is_string(space, w_obj):\n return space.wrap(w_obj.tp == space.tp_str)", "def test_output_type():\n assert type(song_decoder(\"WUWUBUBWUBUWUB\")) is str", "def string_check(param, name):\n\tif not isinstance(param, strcomp):\n\t\traise TypeError(\"Keyword arg '%s' must be of type string. Got: %s\" % (\n\t\t\tname, type(param)))\n\telse:\n\t\tpass", "def is_str(value):\n if not type(value) is str:\n return False\n else:\n return True", "def stringable(self):\n return True", "def test_vlstring_log(self):\n dt = h5t.special_dtype(vlen=str)\n htype = h5t.py_create(dt, logical=True)\n self.assertIsInstance(htype, h5t.TypeStringID)\n self.assertEqual(htype.is_variable_str(), True)\n self.assertEqual(htype.get_cset(), h5t.CSET_ASCII)\n self.assertEqual(htype.get_strpad(), h5t.STR_NULLTERM)", "def test___str__(self):\n self.assertEqual(\n str(self.mineral),\n 'mineralxy')", "def test_speciality_str_representation(speciality):\n assert str(speciality) == \"Electrical\"", "def DataIsString(self):\n return self.data_type in (definitions.REG_SZ, definitions.REG_EXPAND_SZ)", "def test_repr(self):\n fz = FourierZernikeBasis(L=6, M=3, N=0)\n s = str(fz)\n assert \"FourierZernikeBasis\" in s\n assert \"ansi\" in s\n assert \"L=6\" in s\n assert \"M=3\" in s\n assert \"N=0\" in s", "def test_car_model_str_representation(car_model):\n assert str(car_model) == \"Golf\"", "def issparse(qob):\n return isinstance(qob, sp.spmatrix)", "def test_str():\n c = Circle(4) \n assert c.__str__() == 'Circle with radius: 4.000000'", "def check_type( string_key ) : \r\n\r\n if type( string_key ) != type( '' ) : \r\n\r\n # raise self.__class__( \"'%s': EGI wants the key to be four _characters_ (not %s) !\" % (type(string_key), ) ) \r\n raise Eggog( \"'%s': EGI wants the key to be four _characters_ (not %s) !\" % (type(string_key), ) ) \r\n \r\n else :\r\n \r\n return True", "def _isstr(value):\n\n if isinstance(value, string_types + (bytes,)):\n return True\n elif hasattr(value, \"__iter__\"):\n return all(_isstr(v) for v in value)\n else:\n return False", "def test_stringfield_return_text_type():\n stringify = fields.StringField().adapt\n assert isinstance(stringify(\"Hello World\"), text_type)", "def test_str(self):\n self.assertEqual(str(self.john), \"J. Doe\")\n self.assertEqual(str(self.solar), \"N. Graule\")", "def is_my_type(type_str):\n raise NotImplementedError()", "def does_signature_contain_str(signature=None):\n\n # if we did not receive a signature we assume the model could require\n # a string in it's input\n if signature is None:\n return True\n\n return any(v.dtype == dtypes.string.as_datatype_enum\n for v in signature.inputs.values())", "def test_str(self):\r\n statobj = inputtypes.Status('test')\r\n self.assertEqual(str(statobj), 'test')\r\n self.assertEqual(unicode(statobj), u'test')", "def is_valid(self, qstr):\r\n pass", "def _is_valid_target_str(self, target):\n if isinstance(target, str):\n return True\n else:\n return False", "def test_str4(self):\n with self.assertRaises(TypeError):\n r4 = Rectangle(1, 1, 1, 1, 1, 1, 1, 1)", "def test_str(self):\n ary = self.ar[2009]\n self.assertEqual(str(ary), '<AwstatsYear 2009: 11, 12>')", "def test_str(self):\n self.assertEqual(str(self.subject), \"Test Subject\")", "def test_str(self, concept_embedding: ConceptEmbedding):\n # printing should not rise error\n str(concept_embedding)", "def test_str(self):\n f66: Fraction = Fraction(6, 6)\n f24: Fraction = Fraction(2, 4)\n self.assertEqual(str(f66), \"6/6\")\n self.assertNotEqual(str(f24), \"8/6\")\n self.assertTrue(str(f24), \"2/4\")", "def testStringRepresentationOnInstantiation(self):\r\n self.assertEqual(str(self.tv), 'table')", "def test_label_cannot_be_converted_to_string(self):\n\n class NoStr:\n def __str__(self) -> str:\n raise NotImplementedError\n\n with pytest.raises(TypeError, match=\"The given label\"):\n State(\"water\", label=NoStr())", "def checkDataType(self,str):\n accepted_vals = [\"HEXA\",\"NEHU\",\"NEDS\",\"NEDU\",\"NDHU\",\"NDDU\"]\n assert str in accepted_vals, \"Error: Data Type not accepted: \" + str\n if (str == 'HEXA') | (str[2] == 'H'):\n self.base = 16\n if str[3] == 'S':\n self.signed = True", "def test_str(self):\n self.assertEqual(str(self.meter), \"1 * m\")\n\n newton = self.kgram * self.meter / (self.second ** 2)\n\n self.assertEqual(str(newton), \"1.0 * kg * m * s^-2\")", "def _has_str_elems(obj):\n return all([isinstance(elem, str) for elem in obj])", "def test_str(self, string, application):\n assert string == str(application)", "def test_native_str(self):\n if PY2:\n import __builtin__\n builtin_str = __builtin__.str\n else:\n import builtins\n builtin_str = builtins.str\n\n inputs = [b'blah', u'blah', 'blah']\n for s in inputs:\n self.assertEqual(native_str(s), builtin_str(s))\n self.assertTrue(isinstance(native_str(s), builtin_str))", "def test_str(self):\n self.assertEqual(str(self.bioe), str(self.wbioe))\n self.assertEqual(str(self.uioe), str(self.wuioe))\n self.assertEqual(str(self.bose), str(self.wbose))\n self.assertEqual(str(self.uose), str(self.wuose))", "def _is_encodable_simple(sv):\n return sv not in (None, str(), int())", "def test_compare_to_string(self):\n r = self.RNA(\"UCC\")\n self.assertEqual(r, \"UCC\")", "def test_str(self):\n tour = G(Tour, display_name='test1')\n self.assertEqual('test1', str(tour))", "def test_bytestr(self):\n dset = self.f.create_dataset('x', (1,), dtype=h5py.string_dtype(encoding='ascii'))\n dset[0] = b\"Hello there!\"\n self.assertEqual(type(dset[0]), bytes)", "async def test_str(\n submission_fixture: Submission,\n) -> None:\n AssertThat(str(submission_fixture)).IsInstanceOf(str)\n AssertThat(str(submission_fixture)).IsEqualTo(submission_fixture.url)", "def test_graphid_str():\n id1 = _ir.GraphId(\"g1\")\n assert id1.str() == \"g1\"\n id2 = _ir.GraphId(\"foobar\")\n assert id2.str() == \"foobar\"", "def test_vlstring_lit(self):\n dt = h5t.special_dtype(vlen=str)\n htype = h5t.py_create(dt)\n self.assertIsInstance(htype, h5t.TypeOpaqueID)\n self.assertEqual(htype, h5t.PYTHON_OBJECT)", "def is_tuple_consists_of_strings(an_array):\n return isinstance(an_array, tuple) and is_array_type(an_array, str)", "def test_str(self):\n # Continuous ROMs\n model = roi._core.InferredContinuousROM(\"A\")\n assert str(model) == \\\n \"Reduced-order model structure: dx / dt = Ax(t)\"\n model.modelform = \"cA\"\n assert str(model) == \\\n \"Reduced-order model structure: dx / dt = c + Ax(t)\"\n model.modelform = \"HB\"\n assert str(model) == \\\n \"Reduced-order model structure: dx / dt = H(x(t) ⊗ x(t)) + Bu(t)\"\n model.modelform = \"G\"\n assert str(model) == \\\n \"Reduced-order model structure: dx / dt = G(x(t) ⊗ x(t) ⊗ x(t))\"\n model.modelform = \"cH\"\n assert str(model) == \\\n \"Reduced-order model structure: dx / dt = c + H(x(t) ⊗ x(t))\"\n\n # Discrete ROMs\n model = roi._core.IntrusiveDiscreteROM(\"A\")\n assert str(model) == \\\n \"Reduced-order model structure: x_{j+1} = Ax_{j}\"\n model.modelform = \"cB\"\n assert str(model) == \\\n \"Reduced-order model structure: x_{j+1} = c + Bu_{j}\"\n model.modelform = \"H\"\n assert str(model) == \\\n \"Reduced-order model structure: x_{j+1} = H(x_{j} ⊗ x_{j})\"", "def test_X_approximate_distribution_is_str(self):\n\n # Check valid case of \"count\" which is not included in valid object\n self.validator.adata.uns[\"X_approximate_distribution\"] = \"count\"\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [])\n\n # Invalid type: list\n self.validator.adata.uns[\"X_approximate_distribution\"] = [\"count\"]\n self.validator.validate_adata()\n self.assertEqual(\n self.validator.errors,\n [\n \"ERROR: '['count']' in 'uns['X_approximate_distribution']' \"\n \"is not valid, it must be a string.\"\n ],\n )", "def test_str(self):\n step = G(Step, display_name='test1')\n self.assertEqual('test1', str(step))", "def _is_proper_sequence(seq):\n return (isinstance(seq, collections.abc.Sequence) and\n not isinstance(seq, str))", "def test_str(self):\n # Continuous ROMs\n model = roi._core.InterpolatedInferredContinuousROM(\"A\")\n assert str(model) == \\\n \"Reduced-order model structure: dx / dt = Ax(t)\"\n model.c_ = lambda t: t\n model.A_ = lambda t: t\n model.modelform = \"cA\"\n assert str(model) == \\\n \"Reduced-order model structure: dx / dt = c(µ) + A(µ)x(t)\"\n model.Hc_ = None\n model.Gc_ = lambda t: t\n model.B_ = None\n model.modelform = \"HB\"\n assert str(model) == \\\n \"Reduced-order model structure: dx / dt = H(x(t) ⊗ x(t)) + Bu(t)\"\n model.modelform = \"G\"\n assert str(model) == \\\n \"Reduced-order model structure: dx / dt = G(µ)(x(t) ⊗ x(t) ⊗ x(t))\"\n\n # Discrete ROMs\n model = roi._core.AffineIntrusiveDiscreteROM(\"cH\")\n assert str(model) == \\\n \"Reduced-order model structure: x_{j+1} = c + H(x_{j} ⊗ x_{j})\"\n model.c_ = lambda t: t\n model.Hc_ = None\n assert str(model) == \\\n \"Reduced-order model structure: x_{j+1} = c(µ) + H(x_{j} ⊗ x_{j})\"", "def _is_input_or_output_type(io: type, type_str: str):\n if isinstance(io, type) and io.__module__.startswith(mldesigner_pkg):\n if type_str in io.__name__:\n return True\n return False", "def is_sequence_of_str(items):\n return all(isinstance(item, basestring) for item in items)", "def testStringInput(self):\r\n from pydsl.Check import BNFChecker\r\n from pydsl.contrib.bnfgrammar import productionset0\r\n grammardef = productionset0\r\n checker = BNFChecker(grammardef)\r\n self.assertTrue(checker.check(\"SR\"))\r\n self.assertTrue(checker.check(\"SR\"))\r\n self.assertTrue(checker.check((\"S\",\"R\")))\r\n self.assertFalse(checker.check(\"SL\"))\r\n self.assertFalse(checker.check((\"S\",\"L\")))\r\n self.assertFalse(checker.check(\"\"))", "def test_string():", "def test_str():\n post = models.Post(title=\"Test Post\")\n\n assert str(post) == post.title", "def test_node_str():\n a_left = Node(7, data='pl left')\n a_right = Node(42, data='pl right')\n a = Node(13, data='pl a', left=a_left, right=a_right)\n string_a = str(a)\n expect_string = '13'\n assert string_a == expect_string" ]
[ "0.69331145", "0.67646885", "0.669969", "0.66482395", "0.6627652", "0.66142136", "0.6606139", "0.6588437", "0.6559902", "0.65317225", "0.6450003", "0.6448838", "0.6428933", "0.64125264", "0.64066905", "0.63528246", "0.6321325", "0.6315828", "0.63132316", "0.631172", "0.63043916", "0.6299842", "0.6296413", "0.62874496", "0.6282237", "0.6269029", "0.6254019", "0.6235926", "0.62267023", "0.6198726", "0.61945695", "0.618103", "0.6174491", "0.61532867", "0.61320966", "0.6101464", "0.6099212", "0.6092293", "0.60762304", "0.6069043", "0.6030528", "0.59896886", "0.5987907", "0.59847736", "0.5983128", "0.59796095", "0.59744006", "0.5960313", "0.59573185", "0.59295636", "0.5925511", "0.58935577", "0.58822775", "0.586795", "0.5851809", "0.58434635", "0.584003", "0.58373016", "0.58369184", "0.58287644", "0.5824311", "0.58053106", "0.57962096", "0.57764316", "0.57712805", "0.57677996", "0.5758834", "0.5758148", "0.5740548", "0.5739705", "0.5727156", "0.57141066", "0.5688215", "0.56828475", "0.5668645", "0.5632779", "0.5630731", "0.56049263", "0.5601001", "0.5595145", "0.5590691", "0.5587116", "0.55743176", "0.55710846", "0.555765", "0.55473584", "0.5545388", "0.55387276", "0.5531052", "0.55194885", "0.55098933", "0.5493107", "0.5492187", "0.5491084", "0.54906887", "0.5488591", "0.54879355", "0.5484195", "0.5481326", "0.5476884", "0.5471215" ]
0.0
-1
>>> isinstance(lab8_q5(), str) True
def lab8_q5(): return """ YOUR EXPLANATION HERE """
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _is_string(data):\n return len(data) and isinstance(_to_ndarray(data).flat[0], str)", "def is_string(value):\n return isinstance(value, basestring)", "def _is_string(arg):\n return isinstance(arg, types.StringTypes)", "def test_is_str_rep_string(self):\n self.assertIsInstance(cardutils.Card(10,1).__str__(), str)", "def is_str ( self, s ):\r\n\t\treturn isinstance ( s, type( str () ) )", "def isString(x):\n if type(x) == str:\n return True\n else:\n return False", "def is_string(value):\n return isinstance(value, (str, bytes))", "def is_string(obj):\n return isinstance(obj, basestring)", "def is_str(x):\n return isinstance(x, str)", "def has_string_type(obj: _std_typing.Any) -> bool:\n return obj.dtype == sc.DType.string", "def strtype(x):\n if type(x) == str:\n return True\n if type(x) == unicode:\n return True\n return False", "def is_string(atype):\n if atype == str:\n return True\n elif PY2:\n if atype == unicode:\n return True\n return False", "def is_string(value):\n return isinstance(value, string_types)", "def _check_datatype_to_string(prediction):\n if isinstance(prediction, str):\n return True\n raise TypeError('Prediction is not in string type.')", "def isString(data):\n\ttry:\n\t\tfrom types import UnicodeType, StringType\n\t\tif type(data) == UnicodeType or type(data) == StringType:\n\t\t\treturn True\n\texcept ImportError:\n\t\tif type(data) == type(\"\"):\n\t\t\treturn True\n\treturn False", "def is_string(obj):\n return isinstance(obj, str)", "def test_string(self):\n htype = h5t.py_create('S1')\n self.assertIsInstance(htype, h5t.TypeStringID)\n self.assertEqual(htype.get_size(), 1)", "def test_str(self):\n s1 = Square(4, 2, 3, 47)\n self.assertEqual(str(s1), \"[Square] (47) 2/3 - 4\")", "def is_string(val):\n return (\n isinstance(val, unicode) or \\\n isinstance(val, str) \n )", "def is_str(value):\n return isinstance(value, str)", "def test_str():\n # Test for string special method with scalar Rnode objects\n x = Rnode(1.0)\n try:\n assert str(x) == 'Reverse-mode Rnode Object ( Values: 1.0 )'\n except AssertionError as e:\n print(e)\n raise AssertionError", "def is_string(s):\n try:\n return isinstance(s, basestring)\n except NameError:\n return isinstance(s, str)", "def _is_unicode(arr):\n if (isinstance(arr, str) or\n issubclass(numpy.asarray(arr).dtype.type, str)):\n return True\n return False", "def is_string(self):\n answer = self._call('is_string')\n return answer.yes", "def test_strmethod(self):\n b1 = BaseModel()\n self.assertEqual(type(str(b1)), str)", "def isString(s):\r\n try:\r\n return isinstance(s, unicode) or isinstance(s, basestring)\r\n except NameError:\r\n return isinstance(s, str)", "def typeIsString(obj):\n return type(obj) is str or _haveTypeUnicode and type(obj) is unicode", "def test_string(self):\n htype = h5t.py_create('S10')\n self.assertIsInstance(htype, h5t.TypeStringID)\n self.assertEqual(htype.get_size(), 10)", "def is_string(item: Any) -> bool:\n if isinstance(item, (bytes, bytearray, str)):\n return True\n elif (isinstance(item, (tuple, list)) and all(is_string(x) for x in item)):\n return True\n elif (isinstance(item, np.ndarray) and # binary or unicode\n (item.dtype.kind in (\"U\", \"S\") or item.dtype == object)):\n return True\n return False", "def test_string(self):\n htype = h5t.py_create('S10', logical=True)\n self.assertIsInstance(htype, h5t.TypeStringID)\n self.assertEqual(htype.get_size(), 10)", "def isString(s):\r\n if not isinstance(s, util.AtomicString):\r\n return isinstance(s, basestring)\r\n return False", "def test_string(self,address):\n t=type(address) == str\n assert t, \"not a string\"", "def test_str(self):\r\n f = lambda id_, seq, qual: len(seq) > 3\r\n s1 = 'aa'\r\n s2 = 'aaaa'\r\n sq = SeqQualBad('Q', f)\r\n self.assertEqual(sq('x', s1, [1, 2, 3]), False)\r\n self.assertEqual(str(sq), 'Q\\t0')\r\n self.assertEqual(sq('y', s2, [1, 2, 3]), True)\r\n self.assertEqual(str(sq), 'Q\\t1')", "def is_string(value):\n try:\n basestring\n def is_string(value):\n \"\"\"Python 2 compatible implementation of is_string(value).\"\"\"\n return isinstance(value, basestring)\n except NameError:\n def is_string(value):\n \"\"\"Python 3 compatible implementation of is_string(value).\"\"\"\n return isinstance(value, (str, bytes))\n return is_string(value)", "def is_string(document):\r\n return isinstance(document, str)", "def check_solution(self, solution):\n return isinstance(solution, str)", "def test_str(self):\n self.assertEqual(str(self.bs), str(self.wbs))\n self.assertEqual(str(self.be), str(self.be))\n # str(us) fails in Python 2\n self.assertEqual(str, type(str(self.wus)))\n # str(ue) fails in Python 2\n self.assertEqual(str, type(str(self.wue)))", "def not_a_string(obj):\n my_type = str(type(obj))\n if is_py3():\n is_str = my_type.find('bytes') < 0 and my_type.find('str') < 0\n return is_str\n\n return my_type.find('str') < 0 and \\\n my_type.find('unicode') < 0", "def test_str_method(self):\n s1 = Square(4, 6, 2, 12)\n self.assertEqual(s1.__str__(), '[Square] (12) 6/2 - 4')", "def is_string_type(self):\n raise exceptions.NotImplementedError()", "def _is_string(self, obj):\n return isinstance(obj, unicode) or isinstance(obj, str)", "def canAdapt(self, stru):\n return isinstance(stru, crystal)", "def assert_is_string(object_to_test):\n if( (type(object_to_test) == type(\"\")) or (type(object_to_test) == type(u\"\")) ):\n return\n logging.error(\"assert_is_string() test failed!\")\n logging.critical(repr(locals()))\n raise(ValueError)", "def test_vlstring_log(self):\n dt = h5t.special_dtype(vlen=str)\n htype = h5t.py_create(dt, logical=True)\n self.assertIsInstance(htype, h5t.TypeStringID)\n self.assertEqual(htype.is_variable_str(), True)\n self.assertEqual(htype.get_cset(), h5t.CSET_ASCII)\n self.assertEqual(htype.get_strpad(), h5t.STR_NULLTERM)", "def _is_str(item):\n return isinstance(item, str)", "def is_str(value):\n if not type(value) is str:\n return False\n else:\n return True", "def is_string(space, w_obj):\n return space.wrap(w_obj.tp == space.tp_str)", "def test_repr(self):\n qg = ConcentricGrid(2, 3, 4)\n s = str(qg)\n assert \"ConcentricGrid\" in s\n assert \"jacobi\" in s\n assert \"L=2\" in s\n assert \"M=3\" in s\n assert \"N=4\" in s", "def string_check(param, name):\n\tif not isinstance(param, strcomp):\n\t\traise TypeError(\"Keyword arg '%s' must be of type string. Got: %s\" % (\n\t\t\tname, type(param)))\n\telse:\n\t\tpass", "def test_speciality_str_representation(speciality):\n assert str(speciality) == \"Electrical\"", "def _isstr(value):\n\n if isinstance(value, string_types + (bytes,)):\n return True\n elif hasattr(value, \"__iter__\"):\n return all(_isstr(v) for v in value)\n else:\n return False", "def test_str(self):\n arm = self.ar[2009][11]\n self.assertEqual(str(arm), '<AwstatsMonth 2009-11>')", "def test_output_type():\n assert type(song_decoder(\"WUWUBUBWUBUWUB\")) is str", "def test_str(self, r, rep):\n assert str(r) == rep", "def issparse(qob):\n return isinstance(qob, sp.spmatrix)", "def stringable(self):\n return True", "def test_repr(self):\n fz = FourierZernikeBasis(L=6, M=3, N=0)\n s = str(fz)\n assert \"FourierZernikeBasis\" in s\n assert \"ansi\" in s\n assert \"L=6\" in s\n assert \"M=3\" in s\n assert \"N=0\" in s", "def DataIsString(self):\n return self.data_type in (definitions.REG_SZ, definitions.REG_EXPAND_SZ)", "def test_stringfield_return_text_type():\n stringify = fields.StringField().adapt\n assert isinstance(stringify(\"Hello World\"), text_type)", "def test___str__(self):\n self.assertEqual(\n str(self.mineral),\n 'mineralxy')", "def is_my_type(type_str):\n raise NotImplementedError()", "def test_str(self, string, application):\n assert string == str(application)", "def test_car_model_str_representation(car_model):\n assert str(car_model) == \"Golf\"", "def test_bytestr(self):\n dset = self.f.create_dataset('x', (1,), dtype=h5py.string_dtype(encoding='ascii'))\n dset[0] = b\"Hello there!\"\n self.assertEqual(type(dset[0]), bytes)", "def test_native_str(self):\n if PY2:\n import __builtin__\n builtin_str = __builtin__.str\n else:\n import builtins\n builtin_str = builtins.str\n\n inputs = [b'blah', u'blah', 'blah']\n for s in inputs:\n self.assertEqual(native_str(s), builtin_str(s))\n self.assertTrue(isinstance(native_str(s), builtin_str))", "def test_str():\n c = Circle(4) \n assert c.__str__() == 'Circle with radius: 4.000000'", "def test_str(self):\r\n statobj = inputtypes.Status('test')\r\n self.assertEqual(str(statobj), 'test')\r\n self.assertEqual(unicode(statobj), u'test')", "def does_signature_contain_str(signature=None):\n\n # if we did not receive a signature we assume the model could require\n # a string in it's input\n if signature is None:\n return True\n\n return any(v.dtype == dtypes.string.as_datatype_enum\n for v in signature.inputs.values())", "def check_type( string_key ) : \r\n\r\n if type( string_key ) != type( '' ) : \r\n\r\n # raise self.__class__( \"'%s': EGI wants the key to be four _characters_ (not %s) !\" % (type(string_key), ) ) \r\n raise Eggog( \"'%s': EGI wants the key to be four _characters_ (not %s) !\" % (type(string_key), ) ) \r\n \r\n else :\r\n \r\n return True", "def test_vlstring_lit(self):\n dt = h5t.special_dtype(vlen=str)\n htype = h5t.py_create(dt)\n self.assertIsInstance(htype, h5t.TypeOpaqueID)\n self.assertEqual(htype, h5t.PYTHON_OBJECT)", "def test_str(self):\n self.assertEqual(str(self.john), \"J. Doe\")\n self.assertEqual(str(self.solar), \"N. Graule\")", "def _is_valid_target_str(self, target):\n if isinstance(target, str):\n return True\n else:\n return False", "def _is_str_matching_builtin_type(str_value: str) -> bool:\n builtin_types = [\n getattr(builtins, d)\n for d in dir(builtins)\n if isinstance(getattr(builtins, d), type)\n ]\n return f\"<class '{str_value}'>\" in [str(bt) for bt in builtin_types]", "def testStringRepresentationOnInstantiation(self):\r\n self.assertEqual(str(self.tv), 'table')", "def testStringInput(self):\r\n from pydsl.Check import BNFChecker\r\n from pydsl.contrib.bnfgrammar import productionset0\r\n grammardef = productionset0\r\n checker = BNFChecker(grammardef)\r\n self.assertTrue(checker.check(\"SR\"))\r\n self.assertTrue(checker.check(\"SR\"))\r\n self.assertTrue(checker.check((\"S\",\"R\")))\r\n self.assertFalse(checker.check(\"SL\"))\r\n self.assertFalse(checker.check((\"S\",\"L\")))\r\n self.assertFalse(checker.check(\"\"))", "def test_compare_to_string(self):\n r = self.RNA(\"UCC\")\n self.assertEqual(r, \"UCC\")", "def test_label_cannot_be_converted_to_string(self):\n\n class NoStr:\n def __str__(self) -> str:\n raise NotImplementedError\n\n with pytest.raises(TypeError, match=\"The given label\"):\n State(\"water\", label=NoStr())", "def test_str(self):\n self.assertEqual(str(self.subject), \"Test Subject\")", "def test_str_type(self):\n\n expected = TypeError\n input_ = 'c'\n with self.assertRaises(expected):\n math.factorial(input_)", "def is_tuple_consists_of_strings(an_array):\n return isinstance(an_array, tuple) and is_array_type(an_array, str)", "def _is_proper_sequence(seq):\n return (isinstance(seq, collections.abc.Sequence) and\n not isinstance(seq, str))", "def test_str(self):\n tour = G(Tour, display_name='test1')\n self.assertEqual('test1', str(tour))", "def is_valid(self, qstr):\r\n pass", "def test_str(self):\n self.assertEqual(str(self.meter), \"1 * m\")\n\n newton = self.kgram * self.meter / (self.second ** 2)\n\n self.assertEqual(str(newton), \"1.0 * kg * m * s^-2\")", "def test_string():", "def test_str(self):\n self.assertEqual(str(self.bioe), str(self.wbioe))\n self.assertEqual(str(self.uioe), str(self.wuioe))\n self.assertEqual(str(self.bose), str(self.wbose))\n self.assertEqual(str(self.uose), str(self.wuose))", "def test_str(self):\n f66: Fraction = Fraction(6, 6)\n f24: Fraction = Fraction(2, 4)\n self.assertEqual(str(f66), \"6/6\")\n self.assertNotEqual(str(f24), \"8/6\")\n self.assertTrue(str(f24), \"2/4\")", "def str_check(*args, func=None):\n func = func or inspect.stack()[2][3]\n for var in args:\n if not isinstance(var, (str, collections.UserString, collections.abc.Sequence)):\n name = type(var).__name__\n raise StringError(\n 'Function {} expected str, {} got instead.'.format(func, name))", "def test_str(self, concept_embedding: ConceptEmbedding):\n # printing should not rise error\n str(concept_embedding)", "def test_str(self):\n ary = self.ar[2009]\n self.assertEqual(str(ary), '<AwstatsYear 2009: 11, 12>')", "async def test_str(\n submission_fixture: Submission,\n) -> None:\n AssertThat(str(submission_fixture)).IsInstanceOf(str)\n AssertThat(str(submission_fixture)).IsEqualTo(submission_fixture.url)", "def test_is_hand_string_correct(self):\n self.assertEqual(self.hand.getPostCurrentHandString(), 'Three of a Kind As')", "def check_type(s: pd.Series) -> Tuple[bool, str]:\n\n error_string = (\n \"should be TextSeries: the input Series should consist only of strings in every cell.\"\n \" See help(hero.HeroTypes) for more information.\"\n )\n\n try:\n first_non_nan_value = s.loc[s.first_valid_index()]\n if not isinstance(first_non_nan_value, str):\n return False, error_string\n except KeyError: # Only NaNs in Series -> same warning applies\n return False, error_string\n\n return True, \"\"", "def test_str_(self):\n str(self.standardcode)\n repr(self.standardcode)", "def test_computer_string(self):\n # Computer to use for the network\n comp = \"1q-qvm\"\n\n # Get a network with the computer\n qnn = self.get_test_network(comp)\n\n # Checks\n self.assertEqual(type(qnn.computer), QuantumComputer)", "def test_str(self):\n step = G(Step, display_name='test1')\n self.assertEqual('test1', str(step))", "def _has_str_elems(obj):\n return all([isinstance(elem, str) for elem in obj])", "def check_series(s: pd.Series) -> bool:\n\n error_string = (\n \"The input Series should consist only of strings in every cell.\"\n \" See help(hero.HeroSeries) for more information.\"\n )\n\n if not isinstance(s.iloc[0], str) or s.index.nlevels != 1:\n raise TypeError(error_string)", "def _is_encodable_simple(sv):\n return sv not in (None, str(), int())", "def _is_input_or_output_type(io: type, type_str: str):\n if isinstance(io, type) and io.__module__.startswith(mldesigner_pkg):\n if type_str in io.__name__:\n return True\n return False", "def is_sequence_of_str(items):\n return all(isinstance(item, basestring) for item in items)" ]
[ "0.69726", "0.6854784", "0.6776574", "0.67209005", "0.66947055", "0.66858095", "0.6676679", "0.6650898", "0.66201556", "0.66064316", "0.6565904", "0.65590566", "0.6524552", "0.64751035", "0.6468422", "0.6456738", "0.64451706", "0.6415069", "0.6407343", "0.639586", "0.63950634", "0.6383032", "0.637916", "0.6368139", "0.6298485", "0.62887704", "0.62849355", "0.62774163", "0.6270384", "0.62675786", "0.62672305", "0.6255988", "0.6251189", "0.6239149", "0.623268", "0.62157476", "0.6214156", "0.61955774", "0.61931676", "0.6192111", "0.618772", "0.6156484", "0.61391306", "0.61252195", "0.6089721", "0.6071539", "0.60190153", "0.6014804", "0.59727335", "0.59215117", "0.59194475", "0.5886669", "0.5883996", "0.58831394", "0.5877242", "0.5849825", "0.58485687", "0.58343977", "0.58298606", "0.5817812", "0.58111125", "0.5792244", "0.5776127", "0.57710564", "0.5762701", "0.57451737", "0.57421", "0.5739363", "0.57282794", "0.5712972", "0.5704495", "0.5696658", "0.5673809", "0.56618476", "0.56523156", "0.56473464", "0.5639851", "0.5636639", "0.5632723", "0.5628641", "0.5628571", "0.5619805", "0.56163263", "0.56128675", "0.55975866", "0.55975765", "0.5596722", "0.5584884", "0.55751187", "0.55565935", "0.5554104", "0.5548613", "0.55262035", "0.55202985", "0.5514416", "0.55126125", "0.5511348", "0.5509861", "0.55063725", "0.5504504", "0.5494294" ]
0.0
-1
>>> isinstance(lab9_q2(), str) True
def lab9_q2(): return """ YOUR EXPLANATION HERE """
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_is_str_rep_string(self):\n self.assertIsInstance(cardutils.Card(10,1).__str__(), str)", "def _is_string(arg):\n return isinstance(arg, types.StringTypes)", "def is_str ( self, s ):\r\n\t\treturn isinstance ( s, type( str () ) )", "def is_str(x):\n return isinstance(x, str)", "def _is_string(data):\n return len(data) and isinstance(_to_ndarray(data).flat[0], str)", "def isString(x):\n if type(x) == str:\n return True\n else:\n return False", "def is_string(obj):\n return isinstance(obj, basestring)", "def strtype(x):\n if type(x) == str:\n return True\n if type(x) == unicode:\n return True\n return False", "def is_string(value):\n return isinstance(value, basestring)", "def is_string(obj):\n return isinstance(obj, str)", "def check_solution(self, solution):\n return isinstance(solution, str)", "def _check_datatype_to_string(prediction):\n if isinstance(prediction, str):\n return True\n raise TypeError('Prediction is not in string type.')", "def test_str(self):\n self.assertEqual(str(self.bs), str(self.wbs))\n self.assertEqual(str(self.be), str(self.be))\n # str(us) fails in Python 2\n self.assertEqual(str, type(str(self.wus)))\n # str(ue) fails in Python 2\n self.assertEqual(str, type(str(self.wue)))", "def is_string(atype):\n if atype == str:\n return True\n elif PY2:\n if atype == unicode:\n return True\n return False", "def test_str():\n # Test for string special method with scalar Rnode objects\n x = Rnode(1.0)\n try:\n assert str(x) == 'Reverse-mode Rnode Object ( Values: 1.0 )'\n except AssertionError as e:\n print(e)\n raise AssertionError", "def test_str(self):\n s1 = Square(4, 2, 3, 47)\n self.assertEqual(str(s1), \"[Square] (47) 2/3 - 4\")", "def test_strmethod(self):\n b1 = BaseModel()\n self.assertEqual(type(str(b1)), str)", "def is_string(val):\n return (\n isinstance(val, unicode) or \\\n isinstance(val, str) \n )", "def is_str(value):\n return isinstance(value, str)", "def _is_str(item):\n return isinstance(item, str)", "def canAdapt(self, stru):\n return isinstance(stru, crystal)", "def has_string_type(obj: _std_typing.Any) -> bool:\n return obj.dtype == sc.DType.string", "def typeIsString(obj):\n return type(obj) is str or _haveTypeUnicode and type(obj) is unicode", "def is_string(document):\r\n return isinstance(document, str)", "def test_str(self):\r\n f = lambda id_, seq, qual: len(seq) > 3\r\n s1 = 'aa'\r\n s2 = 'aaaa'\r\n sq = SeqQualBad('Q', f)\r\n self.assertEqual(sq('x', s1, [1, 2, 3]), False)\r\n self.assertEqual(str(sq), 'Q\\t0')\r\n self.assertEqual(sq('y', s2, [1, 2, 3]), True)\r\n self.assertEqual(str(sq), 'Q\\t1')", "def is_string(value):\n return isinstance(value, (str, bytes))", "def isString(data):\n\ttry:\n\t\tfrom types import UnicodeType, StringType\n\t\tif type(data) == UnicodeType or type(data) == StringType:\n\t\t\treturn True\n\texcept ImportError:\n\t\tif type(data) == type(\"\"):\n\t\t\treturn True\n\treturn False", "def is_string(self):\n answer = self._call('is_string')\n return answer.yes", "def _is_string(self, obj):\n return isinstance(obj, unicode) or isinstance(obj, str)", "def is_string(value):\n return isinstance(value, string_types)", "def not_a_string(obj):\n my_type = str(type(obj))\n if is_py3():\n is_str = my_type.find('bytes') < 0 and my_type.find('str') < 0\n return is_str\n\n return my_type.find('str') < 0 and \\\n my_type.find('unicode') < 0", "def test_string(self,address):\n t=type(address) == str\n assert t, \"not a string\"", "def test_str_method(self):\n s1 = Square(4, 6, 2, 12)\n self.assertEqual(s1.__str__(), '[Square] (12) 6/2 - 4')", "def is_string(s):\n try:\n return isinstance(s, basestring)\n except NameError:\n return isinstance(s, str)", "def isString(s):\r\n if not isinstance(s, util.AtomicString):\r\n return isinstance(s, basestring)\r\n return False", "def isString(s):\r\n try:\r\n return isinstance(s, unicode) or isinstance(s, basestring)\r\n except NameError:\r\n return isinstance(s, str)", "def _is_unicode(arr):\n if (isinstance(arr, str) or\n issubclass(numpy.asarray(arr).dtype.type, str)):\n return True\n return False", "def test_str(self, r, rep):\n assert str(r) == rep", "def is_string_type(self):\n raise exceptions.NotImplementedError()", "def string_check(param, name):\n\tif not isinstance(param, strcomp):\n\t\traise TypeError(\"Keyword arg '%s' must be of type string. Got: %s\" % (\n\t\t\tname, type(param)))\n\telse:\n\t\tpass", "def test_speciality_str_representation(speciality):\n assert str(speciality) == \"Electrical\"", "def assert_is_string(object_to_test):\n if( (type(object_to_test) == type(\"\")) or (type(object_to_test) == type(u\"\")) ):\n return\n logging.error(\"assert_is_string() test failed!\")\n logging.critical(repr(locals()))\n raise(ValueError)", "def is_string(space, w_obj):\n return space.wrap(w_obj.tp == space.tp_str)", "def is_str(value):\n if not type(value) is str:\n return False\n else:\n return True", "def test_str(self):\n arm = self.ar[2009][11]\n self.assertEqual(str(arm), '<AwstatsMonth 2009-11>')", "def test___str__(self):\n self.assertEqual(\n str(self.mineral),\n 'mineralxy')", "def issparse(qob):\n return isinstance(qob, sp.spmatrix)", "def is_my_type(type_str):\n raise NotImplementedError()", "def is_string(item: Any) -> bool:\n if isinstance(item, (bytes, bytearray, str)):\n return True\n elif (isinstance(item, (tuple, list)) and all(is_string(x) for x in item)):\n return True\n elif (isinstance(item, np.ndarray) and # binary or unicode\n (item.dtype.kind in (\"U\", \"S\") or item.dtype == object)):\n return True\n return False", "def test_repr(self):\n qg = ConcentricGrid(2, 3, 4)\n s = str(qg)\n assert \"ConcentricGrid\" in s\n assert \"jacobi\" in s\n assert \"L=2\" in s\n assert \"M=3\" in s\n assert \"N=4\" in s", "def test_str(self):\n self.assertEqual(str(self.john), \"J. Doe\")\n self.assertEqual(str(self.solar), \"N. Graule\")", "def _isstr(value):\n\n if isinstance(value, string_types + (bytes,)):\n return True\n elif hasattr(value, \"__iter__\"):\n return all(_isstr(v) for v in value)\n else:\n return False", "def stringable(self):\n return True", "def test_str(self):\n self.assertEqual(str(self.subject), \"Test Subject\")", "def test_str(self):\r\n statobj = inputtypes.Status('test')\r\n self.assertEqual(str(statobj), 'test')\r\n self.assertEqual(unicode(statobj), u'test')", "def is_valid(self, qstr):\r\n pass", "def test_stringfield_return_text_type():\n stringify = fields.StringField().adapt\n assert isinstance(stringify(\"Hello World\"), text_type)", "def _is_valid_target_str(self, target):\n if isinstance(target, str):\n return True\n else:\n return False", "def is_string(value):\n try:\n basestring\n def is_string(value):\n \"\"\"Python 2 compatible implementation of is_string(value).\"\"\"\n return isinstance(value, basestring)\n except NameError:\n def is_string(value):\n \"\"\"Python 3 compatible implementation of is_string(value).\"\"\"\n return isinstance(value, (str, bytes))\n return is_string(value)", "def test_output_type():\n assert type(song_decoder(\"WUWUBUBWUBUWUB\")) is str", "def test_str(self):\n self.assertEqual(str(self.meter), \"1 * m\")\n\n newton = self.kgram * self.meter / (self.second ** 2)\n\n self.assertEqual(str(newton), \"1.0 * kg * m * s^-2\")", "def test_str_type(self):\n\n expected = TypeError\n input_ = 'c'\n with self.assertRaises(expected):\n math.factorial(input_)", "def test_repr(self):\n fz = FourierZernikeBasis(L=6, M=3, N=0)\n s = str(fz)\n assert \"FourierZernikeBasis\" in s\n assert \"ansi\" in s\n assert \"L=6\" in s\n assert \"M=3\" in s\n assert \"N=0\" in s", "def test_str(self):\n ary = self.ar[2009]\n self.assertEqual(str(ary), '<AwstatsYear 2009: 11, 12>')", "def test_string(self):\n htype = h5t.py_create('S1')\n self.assertIsInstance(htype, h5t.TypeStringID)\n self.assertEqual(htype.get_size(), 1)", "def test_compare_to_string(self):\n r = self.RNA(\"UCC\")\n self.assertEqual(r, \"UCC\")", "def test_str(self, concept_embedding: ConceptEmbedding):\n # printing should not rise error\n str(concept_embedding)", "def test_str():\n c = Circle(4) \n assert c.__str__() == 'Circle with radius: 4.000000'", "def test_str(self):\n tour = G(Tour, display_name='test1')\n self.assertEqual('test1', str(tour))", "def testStringRepresentationOnInstantiation(self):\r\n self.assertEqual(str(self.tv), 'table')", "def value_type_of_str(str1, type1):\n #FOUND A BETTER FUNCTION, MultiDict\n if issubclass(type1, Collection):\n try:\n # obj = eval(str1)\n obj = ast.literal_eval(str1)\n if isinstance(obj, Collection):\n return obj\n except:\n return type1(str1)\n else:\n try:\n return type1(str1)\n except:\n return type1()", "async def test_str(\n submission_fixture: Submission,\n) -> None:\n AssertThat(str(submission_fixture)).IsInstanceOf(str)\n AssertThat(str(submission_fixture)).IsEqualTo(submission_fixture.url)", "def test_str(self, string, application):\n assert string == str(application)", "def test_native_str(self):\n if PY2:\n import __builtin__\n builtin_str = __builtin__.str\n else:\n import builtins\n builtin_str = builtins.str\n\n inputs = [b'blah', u'blah', 'blah']\n for s in inputs:\n self.assertEqual(native_str(s), builtin_str(s))\n self.assertTrue(isinstance(native_str(s), builtin_str))", "def test_car_model_str_representation(car_model):\n assert str(car_model) == \"Golf\"", "def test_str(self):\n self.assertEqual(str(self.bioe), str(self.wbioe))\n self.assertEqual(str(self.uioe), str(self.wuioe))\n self.assertEqual(str(self.bose), str(self.wbose))\n self.assertEqual(str(self.uose), str(self.wuose))", "def test_str_(self):\n str(self.standardcode)\n repr(self.standardcode)", "def _has_str_elems(obj):\n return all([isinstance(elem, str) for elem in obj])", "def test_str(self):\n step = G(Step, display_name='test1')\n self.assertEqual('test1', str(step))", "def testStringInput(self):\r\n from pydsl.Check import BNFChecker\r\n from pydsl.contrib.bnfgrammar import productionset0\r\n grammardef = productionset0\r\n checker = BNFChecker(grammardef)\r\n self.assertTrue(checker.check(\"SR\"))\r\n self.assertTrue(checker.check(\"SR\"))\r\n self.assertTrue(checker.check((\"S\",\"R\")))\r\n self.assertFalse(checker.check(\"SL\"))\r\n self.assertFalse(checker.check((\"S\",\"L\")))\r\n self.assertFalse(checker.check(\"\"))", "def DataIsString(self):\n return self.data_type in (definitions.REG_SZ, definitions.REG_EXPAND_SZ)", "def test_if_str(self):\n self.assertTrue(type(self.new.email) is str)\n self.assertTrue(type(self.new.password) is str)\n self.assertTrue(type(self.new.first_name) is str)\n self.assertTrue(type(self.new.last_name) is str)", "def _is_encodable_simple(sv):\n return sv not in (None, str(), int())", "def test_typeerror_in_case_of_string(self):\n eq_(None,grepit(\"\",\"\",\"\"))", "def test_return_type(self):\n self.assertEqual(type(self.obj.to_json_string(self.valid_ld)), str)", "def _is_proper_sequence(seq):\n return (isinstance(seq, collections.abc.Sequence) and\n not isinstance(seq, str))", "def check_type(s: pd.Series) -> Tuple[bool, str]:\n\n error_string = (\n \"should be TextSeries: the input Series should consist only of strings in every cell.\"\n \" See help(hero.HeroTypes) for more information.\"\n )\n\n try:\n first_non_nan_value = s.loc[s.first_valid_index()]\n if not isinstance(first_non_nan_value, str):\n return False, error_string\n except KeyError: # Only NaNs in Series -> same warning applies\n return False, error_string\n\n return True, \"\"", "def test_str(self):\n faction = self.faction\n\n self.assertEqual(str(faction), self.faction_raw['name'])", "def test_str(self):\n f66: Fraction = Fraction(6, 6)\n f24: Fraction = Fraction(2, 4)\n self.assertEqual(str(f66), \"6/6\")\n self.assertNotEqual(str(f24), \"8/6\")\n self.assertTrue(str(f24), \"2/4\")", "def test_str():\n post = models.Post(title=\"Test Post\")\n\n assert str(post) == post.title", "def test_string_or_number():\n assert is_string_or_number(None) is None\n assert is_string_or_number(1) is None\n assert is_string_or_number(1.1) is None\n assert is_string_or_number('1.1') is None\n assert is_string_or_number([])", "def _str_validator(arg):\n if arg is None or arg is '' or type(arg) != str:\n raise ValueError('Incorrect value: input should be a string')", "def test_string(self):\n htype = h5t.py_create('S10')\n self.assertIsInstance(htype, h5t.TypeStringID)\n self.assertEqual(htype.get_size(), 10)", "def test_string(self):\n htype = h5t.py_create('S10', logical=True)\n self.assertIsInstance(htype, h5t.TypeStringID)\n self.assertEqual(htype.get_size(), 10)", "def check_statement(self, statement):\n return isinstance(statement, str)", "def test_if_it_accepts_string_datatype(self):\n with self.assertRaises(TypeError):\n prime_numbers(\"string\")", "def test_string():", "def test_node_str():\n a_left = Node(7, data='pl left')\n a_right = Node(42, data='pl right')\n a = Node(13, data='pl a', left=a_left, right=a_right)\n string_a = str(a)\n expect_string = '13'\n assert string_a == expect_string", "def test_label_cannot_be_converted_to_string(self):\n\n class NoStr:\n def __str__(self) -> str:\n raise NotImplementedError\n\n with pytest.raises(TypeError, match=\"The given label\"):\n State(\"water\", label=NoStr())", "def _is_input_or_output_type(io: type, type_str: str):\n if isinstance(io, type) and io.__module__.startswith(mldesigner_pkg):\n if type_str in io.__name__:\n return True\n return False", "def is_tuple_consists_of_strings(an_array):\n return isinstance(an_array, tuple) and is_array_type(an_array, str)" ]
[ "0.69377685", "0.680619", "0.67842895", "0.67540807", "0.67217183", "0.6663479", "0.6607859", "0.65969336", "0.6555628", "0.6487519", "0.64803255", "0.6448528", "0.64470345", "0.6439503", "0.6410575", "0.6409048", "0.640582", "0.6395083", "0.63912946", "0.63880926", "0.6378769", "0.63416517", "0.6318737", "0.6315305", "0.63124216", "0.63088936", "0.6277891", "0.62526166", "0.62406164", "0.62314594", "0.62237436", "0.619573", "0.619155", "0.6144755", "0.6137708", "0.6126793", "0.6108195", "0.6099947", "0.6093594", "0.60867476", "0.60702956", "0.6066459", "0.60615224", "0.6060987", "0.60444295", "0.60399634", "0.60340154", "0.59934574", "0.59783274", "0.5953721", "0.5921602", "0.58943194", "0.58874625", "0.5869544", "0.586796", "0.58618367", "0.5853139", "0.5841299", "0.5831566", "0.5829585", "0.5786857", "0.5777177", "0.577229", "0.5763289", "0.57590795", "0.5741271", "0.57350606", "0.57276", "0.5726921", "0.5681698", "0.56719655", "0.56462747", "0.5645215", "0.56402284", "0.56364954", "0.56290966", "0.56278545", "0.5627603", "0.56272686", "0.56181526", "0.56006265", "0.559503", "0.5590467", "0.558873", "0.55876595", "0.5584793", "0.55798787", "0.55781615", "0.5562491", "0.5555984", "0.55544543", "0.5552051", "0.55404264", "0.55349106", "0.55291885", "0.5528396", "0.55189216", "0.55180526", "0.55055916", "0.54967505", "0.54913247" ]
0.0
-1
>>> isinstance(lab9_q3(), str) True
def lab9_q3(): return """ YOUR EXPLANATION HERE """
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def is_str(x):\n return isinstance(x, str)", "def test_is_str_rep_string(self):\n self.assertIsInstance(cardutils.Card(10,1).__str__(), str)", "def isString(x):\n if type(x) == str:\n return True\n else:\n return False", "def _is_string(data):\n return len(data) and isinstance(_to_ndarray(data).flat[0], str)", "def is_str ( self, s ):\r\n\t\treturn isinstance ( s, type( str () ) )", "def _is_string(arg):\n return isinstance(arg, types.StringTypes)", "def strtype(x):\n if type(x) == str:\n return True\n if type(x) == unicode:\n return True\n return False", "def is_string(obj):\n return isinstance(obj, basestring)", "def _is_str(item):\n return isinstance(item, str)", "def _check_datatype_to_string(prediction):\n if isinstance(prediction, str):\n return True\n raise TypeError('Prediction is not in string type.')", "def is_string(value):\n return isinstance(value, basestring)", "def isString(data):\n\ttry:\n\t\tfrom types import UnicodeType, StringType\n\t\tif type(data) == UnicodeType or type(data) == StringType:\n\t\t\treturn True\n\texcept ImportError:\n\t\tif type(data) == type(\"\"):\n\t\t\treturn True\n\treturn False", "def is_string(obj):\n return isinstance(obj, str)", "def is_string(atype):\n if atype == str:\n return True\n elif PY2:\n if atype == unicode:\n return True\n return False", "def not_a_string(obj):\n my_type = str(type(obj))\n if is_py3():\n is_str = my_type.find('bytes') < 0 and my_type.find('str') < 0\n return is_str\n\n return my_type.find('str') < 0 and \\\n my_type.find('unicode') < 0", "def check_solution(self, solution):\n return isinstance(solution, str)", "def is_string(val):\n return (\n isinstance(val, unicode) or \\\n isinstance(val, str) \n )", "def has_string_type(obj: _std_typing.Any) -> bool:\n return obj.dtype == sc.DType.string", "def canAdapt(self, stru):\n return isinstance(stru, crystal)", "def is_str(value):\n return isinstance(value, str)", "def _is_unicode(arr):\n if (isinstance(arr, str) or\n issubclass(numpy.asarray(arr).dtype.type, str)):\n return True\n return False", "def is_string(document):\r\n return isinstance(document, str)", "def typeIsString(obj):\n return type(obj) is str or _haveTypeUnicode and type(obj) is unicode", "def test_str():\n # Test for string special method with scalar Rnode objects\n x = Rnode(1.0)\n try:\n assert str(x) == 'Reverse-mode Rnode Object ( Values: 1.0 )'\n except AssertionError as e:\n print(e)\n raise AssertionError", "def is_string(value):\n return isinstance(value, (str, bytes))", "def test_str(self):\n self.assertEqual(str(self.bs), str(self.wbs))\n self.assertEqual(str(self.be), str(self.be))\n # str(us) fails in Python 2\n self.assertEqual(str, type(str(self.wus)))\n # str(ue) fails in Python 2\n self.assertEqual(str, type(str(self.wue)))", "def test_str(self):\r\n f = lambda id_, seq, qual: len(seq) > 3\r\n s1 = 'aa'\r\n s2 = 'aaaa'\r\n sq = SeqQualBad('Q', f)\r\n self.assertEqual(sq('x', s1, [1, 2, 3]), False)\r\n self.assertEqual(str(sq), 'Q\\t0')\r\n self.assertEqual(sq('y', s2, [1, 2, 3]), True)\r\n self.assertEqual(str(sq), 'Q\\t1')", "def test_string(self,address):\n t=type(address) == str\n assert t, \"not a string\"", "def isString(s):\r\n if not isinstance(s, util.AtomicString):\r\n return isinstance(s, basestring)\r\n return False", "def is_string(item: Any) -> bool:\n if isinstance(item, (bytes, bytearray, str)):\n return True\n elif (isinstance(item, (tuple, list)) and all(is_string(x) for x in item)):\n return True\n elif (isinstance(item, np.ndarray) and # binary or unicode\n (item.dtype.kind in (\"U\", \"S\") or item.dtype == object)):\n return True\n return False", "def test_str(self):\n s1 = Square(4, 2, 3, 47)\n self.assertEqual(str(s1), \"[Square] (47) 2/3 - 4\")", "def _is_string(self, obj):\n return isinstance(obj, unicode) or isinstance(obj, str)", "def isString(s):\r\n try:\r\n return isinstance(s, unicode) or isinstance(s, basestring)\r\n except NameError:\r\n return isinstance(s, str)", "def is_string(self):\n answer = self._call('is_string')\n return answer.yes", "def is_string(value):\n return isinstance(value, string_types)", "def is_string(s):\n try:\n return isinstance(s, basestring)\n except NameError:\n return isinstance(s, str)", "def test_strmethod(self):\n b1 = BaseModel()\n self.assertEqual(type(str(b1)), str)", "def assert_is_string(object_to_test):\n if( (type(object_to_test) == type(\"\")) or (type(object_to_test) == type(u\"\")) ):\n return\n logging.error(\"assert_is_string() test failed!\")\n logging.critical(repr(locals()))\n raise(ValueError)", "def string_check(param, name):\n\tif not isinstance(param, strcomp):\n\t\traise TypeError(\"Keyword arg '%s' must be of type string. Got: %s\" % (\n\t\t\tname, type(param)))\n\telse:\n\t\tpass", "def is_string(space, w_obj):\n return space.wrap(w_obj.tp == space.tp_str)", "def test_speciality_str_representation(speciality):\n assert str(speciality) == \"Electrical\"", "def is_str(value):\n if not type(value) is str:\n return False\n else:\n return True", "def issparse(qob):\n return isinstance(qob, sp.spmatrix)", "def test_str(self):\n arm = self.ar[2009][11]\n self.assertEqual(str(arm), '<AwstatsMonth 2009-11>')", "def test_str(self):\n self.assertEqual(str(self.john), \"J. Doe\")\n self.assertEqual(str(self.solar), \"N. Graule\")", "def is_string_type(self):\n raise exceptions.NotImplementedError()", "def test___str__(self):\n self.assertEqual(\n str(self.mineral),\n 'mineralxy')", "def is_valid(self, qstr):\r\n pass", "def is_string(value):\n try:\n basestring\n def is_string(value):\n \"\"\"Python 2 compatible implementation of is_string(value).\"\"\"\n return isinstance(value, basestring)\n except NameError:\n def is_string(value):\n \"\"\"Python 3 compatible implementation of is_string(value).\"\"\"\n return isinstance(value, (str, bytes))\n return is_string(value)", "def test_str_method(self):\n s1 = Square(4, 6, 2, 12)\n self.assertEqual(s1.__str__(), '[Square] (12) 6/2 - 4')", "def test_str(self, r, rep):\n assert str(r) == rep", "def is_my_type(type_str):\n raise NotImplementedError()", "def _is_valid_target_str(self, target):\n if isinstance(target, str):\n return True\n else:\n return False", "def test_str(self):\n self.assertEqual(str(self.subject), \"Test Subject\")", "def test_repr(self):\n qg = ConcentricGrid(2, 3, 4)\n s = str(qg)\n assert \"ConcentricGrid\" in s\n assert \"jacobi\" in s\n assert \"L=2\" in s\n assert \"M=3\" in s\n assert \"N=4\" in s", "def test_stringfield_return_text_type():\n stringify = fields.StringField().adapt\n assert isinstance(stringify(\"Hello World\"), text_type)", "def stringable(self):\n return True", "def _isstr(value):\n\n if isinstance(value, string_types + (bytes,)):\n return True\n elif hasattr(value, \"__iter__\"):\n return all(_isstr(v) for v in value)\n else:\n return False", "def test_str(self, concept_embedding: ConceptEmbedding):\n # printing should not rise error\n str(concept_embedding)", "def test_str(self):\r\n statobj = inputtypes.Status('test')\r\n self.assertEqual(str(statobj), 'test')\r\n self.assertEqual(unicode(statobj), u'test')", "def test_str(self):\n ary = self.ar[2009]\n self.assertEqual(str(ary), '<AwstatsYear 2009: 11, 12>')", "def test_string(self):\n htype = h5t.py_create('S1')\n self.assertIsInstance(htype, h5t.TypeStringID)\n self.assertEqual(htype.get_size(), 1)", "def _has_str_elems(obj):\n return all([isinstance(elem, str) for elem in obj])", "def is_sequence_of_str(items):\n return all(isinstance(item, basestring) for item in items)", "def test_output_type():\n assert type(song_decoder(\"WUWUBUBWUBUWUB\")) is str", "def _is_proper_sequence(seq):\n return (isinstance(seq, collections.abc.Sequence) and\n not isinstance(seq, str))", "def test_str_type(self):\n\n expected = TypeError\n input_ = 'c'\n with self.assertRaises(expected):\n math.factorial(input_)", "def _type_check(data):\n if data.__class__.__name__ != \"Matrix3\":\n return False\n return True", "async def test_str(\n submission_fixture: Submission,\n) -> None:\n AssertThat(str(submission_fixture)).IsInstanceOf(str)\n AssertThat(str(submission_fixture)).IsEqualTo(submission_fixture.url)", "def test_str(self):\n tour = G(Tour, display_name='test1')\n self.assertEqual('test1', str(tour))", "def test_str():\n c = Circle(4) \n assert c.__str__() == 'Circle with radius: 4.000000'", "def check_type(s: pd.Series) -> Tuple[bool, str]:\n\n error_string = (\n \"should be TextSeries: the input Series should consist only of strings in every cell.\"\n \" See help(hero.HeroTypes) for more information.\"\n )\n\n try:\n first_non_nan_value = s.loc[s.first_valid_index()]\n if not isinstance(first_non_nan_value, str):\n return False, error_string\n except KeyError: # Only NaNs in Series -> same warning applies\n return False, error_string\n\n return True, \"\"", "def test_str(self):\n faction = self.faction\n\n self.assertEqual(str(faction), self.faction_raw['name'])", "def test_str(self, string, application):\n assert string == str(application)", "def test_repr(self):\n fz = FourierZernikeBasis(L=6, M=3, N=0)\n s = str(fz)\n assert \"FourierZernikeBasis\" in s\n assert \"ansi\" in s\n assert \"L=6\" in s\n assert \"M=3\" in s\n assert \"N=0\" in s", "def test_if_str(self):\n self.assertTrue(type(self.new.email) is str)\n self.assertTrue(type(self.new.password) is str)\n self.assertTrue(type(self.new.first_name) is str)\n self.assertTrue(type(self.new.last_name) is str)", "def check_type( string_key ) : \r\n\r\n if type( string_key ) != type( '' ) : \r\n\r\n # raise self.__class__( \"'%s': EGI wants the key to be four _characters_ (not %s) !\" % (type(string_key), ) ) \r\n raise Eggog( \"'%s': EGI wants the key to be four _characters_ (not %s) !\" % (type(string_key), ) ) \r\n \r\n else :\r\n \r\n return True", "def DataIsString(self):\n return self.data_type in (definitions.REG_SZ, definitions.REG_EXPAND_SZ)", "def test_string_or_number():\n assert is_string_or_number(None) is None\n assert is_string_or_number(1) is None\n assert is_string_or_number(1.1) is None\n assert is_string_or_number('1.1') is None\n assert is_string_or_number([])", "def test_compare_to_string(self):\n r = self.RNA(\"UCC\")\n self.assertEqual(r, \"UCC\")", "def _isstrvar(self, index):\n return self._typlist[index] <= 244", "def test_str():\n post = models.Post(title=\"Test Post\")\n\n assert str(post) == post.title", "def check_statement(self, statement):\n return isinstance(statement, str)", "def test_str(self):\n self.assertEqual(str(self.bioe), str(self.wbioe))\n self.assertEqual(str(self.uioe), str(self.wuioe))\n self.assertEqual(str(self.bose), str(self.wbose))\n self.assertEqual(str(self.uose), str(self.wuose))", "def testStringRepresentationOnInstantiation(self):\r\n self.assertEqual(str(self.tv), 'table')", "def test_car_model_str_representation(car_model):\n assert str(car_model) == \"Golf\"", "def test_name3(self):\n new = self.value()\n self.assertEqual(type(new.name), str)", "def isstringlike(item):\n ret = 1\n try:\n float(item)\n ret = 0\n except ValueError:\n pass\n return ret", "def test_typeerror_in_case_of_string(self):\n eq_(None,grepit(\"\",\"\",\"\"))", "def is_tuple_consists_of_strings(an_array):\n return isinstance(an_array, tuple) and is_array_type(an_array, str)", "def isdatatype(object):\n return isinstance(object, (str, int, bool, float, type(None)))", "def test_node_str():\n a_left = Node(7, data='pl left')\n a_right = Node(42, data='pl right')\n a = Node(13, data='pl a', left=a_left, right=a_right)\n string_a = str(a)\n expect_string = '13'\n assert string_a == expect_string", "def test_is_hand_string_correct(self):\n self.assertEqual(self.hand.getPostCurrentHandString(), 'Three of a Kind As')", "def test_if_it_accepts_string_datatype(self):\n with self.assertRaises(TypeError):\n prime_numbers(\"string\")", "def test_str(self):\n self.assertEqual(str(self.meter), \"1 * m\")\n\n newton = self.kgram * self.meter / (self.second ** 2)\n\n self.assertEqual(str(newton), \"1.0 * kg * m * s^-2\")", "def test_string(self):\n htype = h5t.py_create('S10')\n self.assertIsInstance(htype, h5t.TypeStringID)\n self.assertEqual(htype.get_size(), 10)", "def str_check(*args, func=None):\n func = func or inspect.stack()[2][3]\n for var in args:\n if not isinstance(var, (str, collections.UserString, collections.abc.Sequence)):\n name = type(var).__name__\n raise StringError(\n 'Function {} expected str, {} got instead.'.format(func, name))", "def test_str(self):\n step = G(Step, display_name='test1')\n self.assertEqual('test1', str(step))", "def test_string(self):\n\n new_jawn = Amenity()\n name = getattr(new_jawn, \"name\")\n self.assertIsInstance(name, str)", "def _is_encodable_simple(sv):\n return sv not in (None, str(), int())", "def test_string(self):\n htype = h5t.py_create('S10', logical=True)\n self.assertIsInstance(htype, h5t.TypeStringID)\n self.assertEqual(htype.get_size(), 10)" ]
[ "0.67149335", "0.66803926", "0.66799915", "0.66795844", "0.6666466", "0.6657703", "0.662522", "0.6536551", "0.64925563", "0.6467223", "0.64577836", "0.64136744", "0.64020425", "0.6394686", "0.63700676", "0.63617307", "0.63562024", "0.6313202", "0.62957275", "0.6292955", "0.6282623", "0.6266023", "0.62419885", "0.62377214", "0.6211834", "0.62058556", "0.61772364", "0.61745554", "0.6172704", "0.6171448", "0.61712646", "0.6169488", "0.61563426", "0.61428446", "0.6130367", "0.6126729", "0.6102548", "0.60659164", "0.6063203", "0.60554266", "0.605156", "0.59899724", "0.59235525", "0.59090203", "0.5900157", "0.58976775", "0.58541137", "0.585028", "0.58426887", "0.5841543", "0.58218575", "0.5799523", "0.5796606", "0.5789222", "0.5748842", "0.57425725", "0.5741484", "0.57285666", "0.5711052", "0.56950617", "0.56849325", "0.56779534", "0.5672168", "0.5660435", "0.5659071", "0.56540865", "0.5646578", "0.5635208", "0.56325907", "0.56181306", "0.56146604", "0.5612443", "0.56123143", "0.56095093", "0.56054455", "0.5586782", "0.55755115", "0.5571332", "0.5571099", "0.5560819", "0.5559453", "0.55576813", "0.55447453", "0.55396545", "0.5528844", "0.5518738", "0.5517373", "0.550837", "0.55040693", "0.5504023", "0.54967636", "0.54636645", "0.5460176", "0.54581046", "0.54524153", "0.54473054", "0.54464406", "0.5445606", "0.5445116", "0.5444956", "0.54445857" ]
0.0
-1
>>> isinstance(lab9_q4(), str) True
def lab9_q4(): return """ YOUR EXPLANATION HERE """
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_is_str_rep_string(self):\n self.assertIsInstance(cardutils.Card(10,1).__str__(), str)", "def is_str(x):\n return isinstance(x, str)", "def is_str ( self, s ):\r\n\t\treturn isinstance ( s, type( str () ) )", "def isString(x):\n if type(x) == str:\n return True\n else:\n return False", "def _is_string(data):\n return len(data) and isinstance(_to_ndarray(data).flat[0], str)", "def _is_string(arg):\n return isinstance(arg, types.StringTypes)", "def is_string(obj):\n return isinstance(obj, basestring)", "def strtype(x):\n if type(x) == str:\n return True\n if type(x) == unicode:\n return True\n return False", "def is_string(value):\n return isinstance(value, basestring)", "def is_string(obj):\n return isinstance(obj, str)", "def is_str(value):\n return isinstance(value, str)", "def has_string_type(obj: _std_typing.Any) -> bool:\n return obj.dtype == sc.DType.string", "def check_solution(self, solution):\n return isinstance(solution, str)", "def _check_datatype_to_string(prediction):\n if isinstance(prediction, str):\n return True\n raise TypeError('Prediction is not in string type.')", "def canAdapt(self, stru):\n return isinstance(stru, crystal)", "def _is_str(item):\n return isinstance(item, str)", "def is_string(val):\n return (\n isinstance(val, unicode) or \\\n isinstance(val, str) \n )", "def is_string(atype):\n if atype == str:\n return True\n elif PY2:\n if atype == unicode:\n return True\n return False", "def test_str():\n # Test for string special method with scalar Rnode objects\n x = Rnode(1.0)\n try:\n assert str(x) == 'Reverse-mode Rnode Object ( Values: 1.0 )'\n except AssertionError as e:\n print(e)\n raise AssertionError", "def test_str(self):\n self.assertEqual(str(self.bs), str(self.wbs))\n self.assertEqual(str(self.be), str(self.be))\n # str(us) fails in Python 2\n self.assertEqual(str, type(str(self.wus)))\n # str(ue) fails in Python 2\n self.assertEqual(str, type(str(self.wue)))", "def isString(data):\n\ttry:\n\t\tfrom types import UnicodeType, StringType\n\t\tif type(data) == UnicodeType or type(data) == StringType:\n\t\t\treturn True\n\texcept ImportError:\n\t\tif type(data) == type(\"\"):\n\t\t\treturn True\n\treturn False", "def is_string(document):\r\n return isinstance(document, str)", "def is_string(value):\n return isinstance(value, (str, bytes))", "def test_str(self):\n s1 = Square(4, 2, 3, 47)\n self.assertEqual(str(s1), \"[Square] (47) 2/3 - 4\")", "def typeIsString(obj):\n return type(obj) is str or _haveTypeUnicode and type(obj) is unicode", "def is_string(value):\n return isinstance(value, string_types)", "def test_strmethod(self):\n b1 = BaseModel()\n self.assertEqual(type(str(b1)), str)", "def test_str(self):\r\n f = lambda id_, seq, qual: len(seq) > 3\r\n s1 = 'aa'\r\n s2 = 'aaaa'\r\n sq = SeqQualBad('Q', f)\r\n self.assertEqual(sq('x', s1, [1, 2, 3]), False)\r\n self.assertEqual(str(sq), 'Q\\t0')\r\n self.assertEqual(sq('y', s2, [1, 2, 3]), True)\r\n self.assertEqual(str(sq), 'Q\\t1')", "def not_a_string(obj):\n my_type = str(type(obj))\n if is_py3():\n is_str = my_type.find('bytes') < 0 and my_type.find('str') < 0\n return is_str\n\n return my_type.find('str') < 0 and \\\n my_type.find('unicode') < 0", "def is_string(self):\n answer = self._call('is_string')\n return answer.yes", "def _is_string(self, obj):\n return isinstance(obj, unicode) or isinstance(obj, str)", "def is_string(s):\n try:\n return isinstance(s, basestring)\n except NameError:\n return isinstance(s, str)", "def isString(s):\r\n if not isinstance(s, util.AtomicString):\r\n return isinstance(s, basestring)\r\n return False", "def is_str(value):\n if not type(value) is str:\n return False\n else:\n return True", "def test_string(self,address):\n t=type(address) == str\n assert t, \"not a string\"", "def isString(s):\r\n try:\r\n return isinstance(s, unicode) or isinstance(s, basestring)\r\n except NameError:\r\n return isinstance(s, str)", "def _is_unicode(arr):\n if (isinstance(arr, str) or\n issubclass(numpy.asarray(arr).dtype.type, str)):\n return True\n return False", "def test_str(self):\n arm = self.ar[2009][11]\n self.assertEqual(str(arm), '<AwstatsMonth 2009-11>')", "def test_str(self, r, rep):\n assert str(r) == rep", "def is_string_type(self):\n raise exceptions.NotImplementedError()", "def test_str_method(self):\n s1 = Square(4, 6, 2, 12)\n self.assertEqual(s1.__str__(), '[Square] (12) 6/2 - 4')", "def is_string(space, w_obj):\n return space.wrap(w_obj.tp == space.tp_str)", "def is_string(item: Any) -> bool:\n if isinstance(item, (bytes, bytearray, str)):\n return True\n elif (isinstance(item, (tuple, list)) and all(is_string(x) for x in item)):\n return True\n elif (isinstance(item, np.ndarray) and # binary or unicode\n (item.dtype.kind in (\"U\", \"S\") or item.dtype == object)):\n return True\n return False", "def test___str__(self):\n self.assertEqual(\n str(self.mineral),\n 'mineralxy')", "def test_speciality_str_representation(speciality):\n assert str(speciality) == \"Electrical\"", "def assert_is_string(object_to_test):\n if( (type(object_to_test) == type(\"\")) or (type(object_to_test) == type(u\"\")) ):\n return\n logging.error(\"assert_is_string() test failed!\")\n logging.critical(repr(locals()))\n raise(ValueError)", "def string_check(param, name):\n\tif not isinstance(param, strcomp):\n\t\traise TypeError(\"Keyword arg '%s' must be of type string. Got: %s\" % (\n\t\t\tname, type(param)))\n\telse:\n\t\tpass", "def test_str(self):\n self.assertEqual(str(self.john), \"J. Doe\")\n self.assertEqual(str(self.solar), \"N. Graule\")", "def is_string(value):\n try:\n basestring\n def is_string(value):\n \"\"\"Python 2 compatible implementation of is_string(value).\"\"\"\n return isinstance(value, basestring)\n except NameError:\n def is_string(value):\n \"\"\"Python 3 compatible implementation of is_string(value).\"\"\"\n return isinstance(value, (str, bytes))\n return is_string(value)", "def stringable(self):\n return True", "def _isstr(value):\n\n if isinstance(value, string_types + (bytes,)):\n return True\n elif hasattr(value, \"__iter__\"):\n return all(_isstr(v) for v in value)\n else:\n return False", "def _is_valid_target_str(self, target):\n if isinstance(target, str):\n return True\n else:\n return False", "def test_str(self):\n self.assertEqual(str(self.subject), \"Test Subject\")", "def is_valid(self, qstr):\r\n pass", "def test_string(self):\n htype = h5t.py_create('S1')\n self.assertIsInstance(htype, h5t.TypeStringID)\n self.assertEqual(htype.get_size(), 1)", "def test_stringfield_return_text_type():\n stringify = fields.StringField().adapt\n assert isinstance(stringify(\"Hello World\"), text_type)", "def is_my_type(type_str):\n raise NotImplementedError()", "def test_str(self):\n ary = self.ar[2009]\n self.assertEqual(str(ary), '<AwstatsYear 2009: 11, 12>')", "def issparse(qob):\n return isinstance(qob, sp.spmatrix)", "def test_str(self):\r\n statobj = inputtypes.Status('test')\r\n self.assertEqual(str(statobj), 'test')\r\n self.assertEqual(unicode(statobj), u'test')", "def test_repr(self):\n qg = ConcentricGrid(2, 3, 4)\n s = str(qg)\n assert \"ConcentricGrid\" in s\n assert \"jacobi\" in s\n assert \"L=2\" in s\n assert \"M=3\" in s\n assert \"N=4\" in s", "def test_str(self, concept_embedding: ConceptEmbedding):\n # printing should not rise error\n str(concept_embedding)", "def test_str():\n c = Circle(4) \n assert c.__str__() == 'Circle with radius: 4.000000'", "def _has_str_elems(obj):\n return all([isinstance(elem, str) for elem in obj])", "def test_repr(self):\n fz = FourierZernikeBasis(L=6, M=3, N=0)\n s = str(fz)\n assert \"FourierZernikeBasis\" in s\n assert \"ansi\" in s\n assert \"L=6\" in s\n assert \"M=3\" in s\n assert \"N=0\" in s", "def testStringRepresentationOnInstantiation(self):\r\n self.assertEqual(str(self.tv), 'table')", "def test_output_type():\n assert type(song_decoder(\"WUWUBUBWUBUWUB\")) is str", "def test_car_model_str_representation(car_model):\n assert str(car_model) == \"Golf\"", "def check_type( string_key ) : \r\n\r\n if type( string_key ) != type( '' ) : \r\n\r\n # raise self.__class__( \"'%s': EGI wants the key to be four _characters_ (not %s) !\" % (type(string_key), ) ) \r\n raise Eggog( \"'%s': EGI wants the key to be four _characters_ (not %s) !\" % (type(string_key), ) ) \r\n \r\n else :\r\n \r\n return True", "def test_string(self):\n htype = h5t.py_create('S10')\n self.assertIsInstance(htype, h5t.TypeStringID)\n self.assertEqual(htype.get_size(), 10)", "def test_str(self):\n tour = G(Tour, display_name='test1')\n self.assertEqual('test1', str(tour))", "def test_string(self):\n htype = h5t.py_create('S10', logical=True)\n self.assertIsInstance(htype, h5t.TypeStringID)\n self.assertEqual(htype.get_size(), 10)", "def DataIsString(self):\n return self.data_type in (definitions.REG_SZ, definitions.REG_EXPAND_SZ)", "def test_str(self):\n self.assertEqual(str(self.bioe), str(self.wbioe))\n self.assertEqual(str(self.uioe), str(self.wuioe))\n self.assertEqual(str(self.bose), str(self.wbose))\n self.assertEqual(str(self.uose), str(self.wuose))", "async def test_str(\n submission_fixture: Submission,\n) -> None:\n AssertThat(str(submission_fixture)).IsInstanceOf(str)\n AssertThat(str(submission_fixture)).IsEqualTo(submission_fixture.url)", "def test_str(self, string, application):\n assert string == str(application)", "def test_compare_to_string(self):\n r = self.RNA(\"UCC\")\n self.assertEqual(r, \"UCC\")", "def test_str4(self):\n with self.assertRaises(TypeError):\n r4 = Rectangle(1, 1, 1, 1, 1, 1, 1, 1)", "def test_if_str(self):\n self.assertTrue(type(self.new.email) is str)\n self.assertTrue(type(self.new.password) is str)\n self.assertTrue(type(self.new.first_name) is str)\n self.assertTrue(type(self.new.last_name) is str)", "def test_str(self):\n faction = self.faction\n\n self.assertEqual(str(faction), self.faction_raw['name'])", "def test_str():\n post = models.Post(title=\"Test Post\")\n\n assert str(post) == post.title", "def test_str(self):\n f66: Fraction = Fraction(6, 6)\n f24: Fraction = Fraction(2, 4)\n self.assertEqual(str(f66), \"6/6\")\n self.assertNotEqual(str(f24), \"8/6\")\n self.assertTrue(str(f24), \"2/4\")", "def _is_encodable_simple(sv):\n return sv not in (None, str(), int())", "def _isstrvar(self, index):\n return self._typlist[index] <= 244", "def test_vlstring_log(self):\n dt = h5t.special_dtype(vlen=str)\n htype = h5t.py_create(dt, logical=True)\n self.assertIsInstance(htype, h5t.TypeStringID)\n self.assertEqual(htype.is_variable_str(), True)\n self.assertEqual(htype.get_cset(), h5t.CSET_ASCII)\n self.assertEqual(htype.get_strpad(), h5t.STR_NULLTERM)", "def check_statement(self, statement):\n return isinstance(statement, str)", "def is_sequence_of_str(items):\n return all(isinstance(item, basestring) for item in items)", "def test_str(self):\n step = G(Step, display_name='test1')\n self.assertEqual('test1', str(step))", "def _is_proper_sequence(seq):\n return (isinstance(seq, collections.abc.Sequence) and\n not isinstance(seq, str))", "def test_str_(self):\n str(self.standardcode)\n repr(self.standardcode)", "def does_signature_contain_str(signature=None):\n\n # if we did not receive a signature we assume the model could require\n # a string in it's input\n if signature is None:\n return True\n\n return any(v.dtype == dtypes.string.as_datatype_enum\n for v in signature.inputs.values())", "def test_node_str():\n a_left = Node(7, data='pl left')\n a_right = Node(42, data='pl right')\n a = Node(13, data='pl a', left=a_left, right=a_right)\n string_a = str(a)\n expect_string = '13'\n assert string_a == expect_string", "def testStringInput(self):\r\n from pydsl.Check import BNFChecker\r\n from pydsl.contrib.bnfgrammar import productionset0\r\n grammardef = productionset0\r\n checker = BNFChecker(grammardef)\r\n self.assertTrue(checker.check(\"SR\"))\r\n self.assertTrue(checker.check(\"SR\"))\r\n self.assertTrue(checker.check((\"S\",\"R\")))\r\n self.assertFalse(checker.check(\"SL\"))\r\n self.assertFalse(checker.check((\"S\",\"L\")))\r\n self.assertFalse(checker.check(\"\"))", "def test_is_hand_string_correct(self):\n self.assertEqual(self.hand.getPostCurrentHandString(), 'Three of a Kind As')", "def test_string(self):\n\n new_jawn = Amenity()\n name = getattr(new_jawn, \"name\")\n self.assertIsInstance(name, str)", "def test_name(self):\n self.assertTrue(type(x.name) == str)", "def test_string_or_number():\n assert is_string_or_number(None) is None\n assert is_string_or_number(1) is None\n assert is_string_or_number(1.1) is None\n assert is_string_or_number('1.1') is None\n assert is_string_or_number([])", "def test_str(self):\n self.assertEqual(str(self.content), \"Test Content\")", "def check_type(s: pd.Series) -> Tuple[bool, str]:\n\n error_string = (\n \"should be TextSeries: the input Series should consist only of strings in every cell.\"\n \" See help(hero.HeroTypes) for more information.\"\n )\n\n try:\n first_non_nan_value = s.loc[s.first_valid_index()]\n if not isinstance(first_non_nan_value, str):\n return False, error_string\n except KeyError: # Only NaNs in Series -> same warning applies\n return False, error_string\n\n return True, \"\"", "def test_is_hand_string_correct(self):\n self.assertEqual(self.hand.getPostCurrentHandString(), 'Four of a Kind Ts')", "def test_string():" ]
[ "0.6942101", "0.6876074", "0.67846864", "0.67608213", "0.6748942", "0.6748524", "0.6707322", "0.66833335", "0.6645385", "0.65739566", "0.6512699", "0.64795643", "0.6473345", "0.64647806", "0.64464295", "0.64428717", "0.64399874", "0.6432312", "0.64164597", "0.63935167", "0.6393086", "0.6385117", "0.6366803", "0.63640577", "0.6335321", "0.6333616", "0.6311632", "0.630805", "0.63045734", "0.63025504", "0.62816584", "0.6253579", "0.6191963", "0.6182476", "0.6178683", "0.6175436", "0.6166576", "0.61511534", "0.61382526", "0.6131829", "0.61240524", "0.6105313", "0.6093565", "0.6081401", "0.6073317", "0.6034119", "0.6022652", "0.59930855", "0.5984026", "0.5972785", "0.5951475", "0.59301716", "0.5921898", "0.5915701", "0.58994675", "0.58916223", "0.5882159", "0.58569944", "0.58350563", "0.5833487", "0.5832781", "0.581637", "0.5805207", "0.5779816", "0.5777427", "0.57633066", "0.5760612", "0.5750679", "0.5747107", "0.5736695", "0.5735951", "0.5721382", "0.57210666", "0.57181466", "0.5706955", "0.570191", "0.57016635", "0.5678622", "0.56694853", "0.56675714", "0.5666367", "0.56569016", "0.5652198", "0.56377053", "0.5624036", "0.5618053", "0.56134427", "0.56050265", "0.5593211", "0.5583307", "0.5579383", "0.55527145", "0.55521804", "0.5551113", "0.5546153", "0.55346787", "0.55304533", "0.55276495", "0.55273455", "0.5515876", "0.55123633" ]
0.0
-1
>>> isinstance(lab9_q5(), str) True
def lab9_q5(): return """ YOUR EXPLANATION HERE """
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_is_str_rep_string(self):\n self.assertIsInstance(cardutils.Card(10,1).__str__(), str)", "def is_string(value):\n return isinstance(value, basestring)", "def is_str(x):\n return isinstance(x, str)", "def isString(x):\n if type(x) == str:\n return True\n else:\n return False", "def is_str ( self, s ):\r\n\t\treturn isinstance ( s, type( str () ) )", "def _is_string(arg):\n return isinstance(arg, types.StringTypes)", "def _is_string(data):\n return len(data) and isinstance(_to_ndarray(data).flat[0], str)", "def is_string(obj):\n return isinstance(obj, basestring)", "def strtype(x):\n if type(x) == str:\n return True\n if type(x) == unicode:\n return True\n return False", "def is_string(value):\n return isinstance(value, (str, bytes))", "def is_str(value):\n return isinstance(value, str)", "def is_string(obj):\n return isinstance(obj, str)", "def is_string(atype):\n if atype == str:\n return True\n elif PY2:\n if atype == unicode:\n return True\n return False", "def is_string(value):\n return isinstance(value, string_types)", "def isString(data):\n\ttry:\n\t\tfrom types import UnicodeType, StringType\n\t\tif type(data) == UnicodeType or type(data) == StringType:\n\t\t\treturn True\n\texcept ImportError:\n\t\tif type(data) == type(\"\"):\n\t\t\treturn True\n\treturn False", "def is_string(val):\n return (\n isinstance(val, unicode) or \\\n isinstance(val, str) \n )", "def has_string_type(obj: _std_typing.Any) -> bool:\n return obj.dtype == sc.DType.string", "def _check_datatype_to_string(prediction):\n if isinstance(prediction, str):\n return True\n raise TypeError('Prediction is not in string type.')", "def test_str(self):\n s1 = Square(4, 2, 3, 47)\n self.assertEqual(str(s1), \"[Square] (47) 2/3 - 4\")", "def test_str():\n # Test for string special method with scalar Rnode objects\n x = Rnode(1.0)\n try:\n assert str(x) == 'Reverse-mode Rnode Object ( Values: 1.0 )'\n except AssertionError as e:\n print(e)\n raise AssertionError", "def is_string(s):\n try:\n return isinstance(s, basestring)\n except NameError:\n return isinstance(s, str)", "def test_strmethod(self):\n b1 = BaseModel()\n self.assertEqual(type(str(b1)), str)", "def _is_str(item):\n return isinstance(item, str)", "def is_string(self):\n answer = self._call('is_string')\n return answer.yes", "def is_string(document):\r\n return isinstance(document, str)", "def test_str(self):\n self.assertEqual(str(self.bs), str(self.wbs))\n self.assertEqual(str(self.be), str(self.be))\n # str(us) fails in Python 2\n self.assertEqual(str, type(str(self.wus)))\n # str(ue) fails in Python 2\n self.assertEqual(str, type(str(self.wue)))", "def check_solution(self, solution):\n return isinstance(solution, str)", "def isString(s):\r\n if not isinstance(s, util.AtomicString):\r\n return isinstance(s, basestring)\r\n return False", "def isString(s):\r\n try:\r\n return isinstance(s, unicode) or isinstance(s, basestring)\r\n except NameError:\r\n return isinstance(s, str)", "def typeIsString(obj):\n return type(obj) is str or _haveTypeUnicode and type(obj) is unicode", "def test_string(self,address):\n t=type(address) == str\n assert t, \"not a string\"", "def not_a_string(obj):\n my_type = str(type(obj))\n if is_py3():\n is_str = my_type.find('bytes') < 0 and my_type.find('str') < 0\n return is_str\n\n return my_type.find('str') < 0 and \\\n my_type.find('unicode') < 0", "def is_str(value):\n if not type(value) is str:\n return False\n else:\n return True", "def canAdapt(self, stru):\n return isinstance(stru, crystal)", "def _is_string(self, obj):\n return isinstance(obj, unicode) or isinstance(obj, str)", "def test_str(self):\r\n f = lambda id_, seq, qual: len(seq) > 3\r\n s1 = 'aa'\r\n s2 = 'aaaa'\r\n sq = SeqQualBad('Q', f)\r\n self.assertEqual(sq('x', s1, [1, 2, 3]), False)\r\n self.assertEqual(str(sq), 'Q\\t0')\r\n self.assertEqual(sq('y', s2, [1, 2, 3]), True)\r\n self.assertEqual(str(sq), 'Q\\t1')", "def _is_unicode(arr):\n if (isinstance(arr, str) or\n issubclass(numpy.asarray(arr).dtype.type, str)):\n return True\n return False", "def is_string(value):\n try:\n basestring\n def is_string(value):\n \"\"\"Python 2 compatible implementation of is_string(value).\"\"\"\n return isinstance(value, basestring)\n except NameError:\n def is_string(value):\n \"\"\"Python 3 compatible implementation of is_string(value).\"\"\"\n return isinstance(value, (str, bytes))\n return is_string(value)", "def test_str_method(self):\n s1 = Square(4, 6, 2, 12)\n self.assertEqual(s1.__str__(), '[Square] (12) 6/2 - 4')", "def is_string(item: Any) -> bool:\n if isinstance(item, (bytes, bytearray, str)):\n return True\n elif (isinstance(item, (tuple, list)) and all(is_string(x) for x in item)):\n return True\n elif (isinstance(item, np.ndarray) and # binary or unicode\n (item.dtype.kind in (\"U\", \"S\") or item.dtype == object)):\n return True\n return False", "def test_string(self):\n htype = h5t.py_create('S1')\n self.assertIsInstance(htype, h5t.TypeStringID)\n self.assertEqual(htype.get_size(), 1)", "def assert_is_string(object_to_test):\n if( (type(object_to_test) == type(\"\")) or (type(object_to_test) == type(u\"\")) ):\n return\n logging.error(\"assert_is_string() test failed!\")\n logging.critical(repr(locals()))\n raise(ValueError)", "def is_string_type(self):\n raise exceptions.NotImplementedError()", "def is_string(space, w_obj):\n return space.wrap(w_obj.tp == space.tp_str)", "def test_speciality_str_representation(speciality):\n assert str(speciality) == \"Electrical\"", "def test_string(self):\n htype = h5t.py_create('S10')\n self.assertIsInstance(htype, h5t.TypeStringID)\n self.assertEqual(htype.get_size(), 10)", "def test_str(self):\n arm = self.ar[2009][11]\n self.assertEqual(str(arm), '<AwstatsMonth 2009-11>')", "def _isstr(value):\n\n if isinstance(value, string_types + (bytes,)):\n return True\n elif hasattr(value, \"__iter__\"):\n return all(_isstr(v) for v in value)\n else:\n return False", "def string_check(param, name):\n\tif not isinstance(param, strcomp):\n\t\traise TypeError(\"Keyword arg '%s' must be of type string. Got: %s\" % (\n\t\t\tname, type(param)))\n\telse:\n\t\tpass", "def test_string(self):\n htype = h5t.py_create('S10', logical=True)\n self.assertIsInstance(htype, h5t.TypeStringID)\n self.assertEqual(htype.get_size(), 10)", "def test___str__(self):\n self.assertEqual(\n str(self.mineral),\n 'mineralxy')", "def test_str(self, r, rep):\n assert str(r) == rep", "def stringable(self):\n return True", "def test_str(self):\n self.assertEqual(str(self.john), \"J. Doe\")\n self.assertEqual(str(self.solar), \"N. Graule\")", "def test_stringfield_return_text_type():\n stringify = fields.StringField().adapt\n assert isinstance(stringify(\"Hello World\"), text_type)", "def is_my_type(type_str):\n raise NotImplementedError()", "def test_vlstring_log(self):\n dt = h5t.special_dtype(vlen=str)\n htype = h5t.py_create(dt, logical=True)\n self.assertIsInstance(htype, h5t.TypeStringID)\n self.assertEqual(htype.is_variable_str(), True)\n self.assertEqual(htype.get_cset(), h5t.CSET_ASCII)\n self.assertEqual(htype.get_strpad(), h5t.STR_NULLTERM)", "def test_str(self, string, application):\n assert string == str(application)", "def test_repr(self):\n qg = ConcentricGrid(2, 3, 4)\n s = str(qg)\n assert \"ConcentricGrid\" in s\n assert \"jacobi\" in s\n assert \"L=2\" in s\n assert \"M=3\" in s\n assert \"N=4\" in s", "def _is_valid_target_str(self, target):\n if isinstance(target, str):\n return True\n else:\n return False", "def issparse(qob):\n return isinstance(qob, sp.spmatrix)", "def test_str(self):\n self.assertEqual(str(self.subject), \"Test Subject\")", "def test_str(self):\r\n statobj = inputtypes.Status('test')\r\n self.assertEqual(str(statobj), 'test')\r\n self.assertEqual(unicode(statobj), u'test')", "def test_repr(self):\n fz = FourierZernikeBasis(L=6, M=3, N=0)\n s = str(fz)\n assert \"FourierZernikeBasis\" in s\n assert \"ansi\" in s\n assert \"L=6\" in s\n assert \"M=3\" in s\n assert \"N=0\" in s", "def test_str(self):\n tour = G(Tour, display_name='test1')\n self.assertEqual('test1', str(tour))", "def test_compare_to_string(self):\n r = self.RNA(\"UCC\")\n self.assertEqual(r, \"UCC\")", "def testStringRepresentationOnInstantiation(self):\r\n self.assertEqual(str(self.tv), 'table')", "def _is_proper_sequence(seq):\n return (isinstance(seq, collections.abc.Sequence) and\n not isinstance(seq, str))", "def test_str():\n c = Circle(4) \n assert c.__str__() == 'Circle with radius: 4.000000'", "def _is_str_matching_builtin_type(str_value: str) -> bool:\n builtin_types = [\n getattr(builtins, d)\n for d in dir(builtins)\n if isinstance(getattr(builtins, d), type)\n ]\n return f\"<class '{str_value}'>\" in [str(bt) for bt in builtin_types]", "def test_str_type(self):\n\n expected = TypeError\n input_ = 'c'\n with self.assertRaises(expected):\n math.factorial(input_)", "def test_output_type():\n assert type(song_decoder(\"WUWUBUBWUBUWUB\")) is str", "def test_str(self):\n self.assertEqual(str(self.bioe), str(self.wbioe))\n self.assertEqual(str(self.uioe), str(self.wuioe))\n self.assertEqual(str(self.bose), str(self.wbose))\n self.assertEqual(str(self.uose), str(self.wuose))", "def is_valid(self, qstr):\r\n pass", "def test_str(self, concept_embedding: ConceptEmbedding):\n # printing should not rise error\n str(concept_embedding)", "async def test_str(\n submission_fixture: Submission,\n) -> None:\n AssertThat(str(submission_fixture)).IsInstanceOf(str)\n AssertThat(str(submission_fixture)).IsEqualTo(submission_fixture.url)", "def test_car_model_str_representation(car_model):\n assert str(car_model) == \"Golf\"", "def DataIsString(self):\n return self.data_type in (definitions.REG_SZ, definitions.REG_EXPAND_SZ)", "def test_str(self):\n ary = self.ar[2009]\n self.assertEqual(str(ary), '<AwstatsYear 2009: 11, 12>')", "def testStringInput(self):\r\n from pydsl.Check import BNFChecker\r\n from pydsl.contrib.bnfgrammar import productionset0\r\n grammardef = productionset0\r\n checker = BNFChecker(grammardef)\r\n self.assertTrue(checker.check(\"SR\"))\r\n self.assertTrue(checker.check(\"SR\"))\r\n self.assertTrue(checker.check((\"S\",\"R\")))\r\n self.assertFalse(checker.check(\"SL\"))\r\n self.assertFalse(checker.check((\"S\",\"L\")))\r\n self.assertFalse(checker.check(\"\"))", "def _has_str_elems(obj):\n return all([isinstance(elem, str) for elem in obj])", "def check_statement(self, statement):\n return isinstance(statement, str)", "def test_str():\n post = models.Post(title=\"Test Post\")\n\n assert str(post) == post.title", "def test_is_hand_string_correct(self):\n self.assertEqual(self.hand.getPostCurrentHandString(), 'Three of a Kind As')", "def test_native_str(self):\n if PY2:\n import __builtin__\n builtin_str = __builtin__.str\n else:\n import builtins\n builtin_str = builtins.str\n\n inputs = [b'blah', u'blah', 'blah']\n for s in inputs:\n self.assertEqual(native_str(s), builtin_str(s))\n self.assertTrue(isinstance(native_str(s), builtin_str))", "def test_string():", "def check_type(s: pd.Series) -> Tuple[bool, str]:\n\n error_string = (\n \"should be TextSeries: the input Series should consist only of strings in every cell.\"\n \" See help(hero.HeroTypes) for more information.\"\n )\n\n try:\n first_non_nan_value = s.loc[s.first_valid_index()]\n if not isinstance(first_non_nan_value, str):\n return False, error_string\n except KeyError: # Only NaNs in Series -> same warning applies\n return False, error_string\n\n return True, \"\"", "def test_str_(self):\n str(self.standardcode)\n repr(self.standardcode)", "def check_type( string_key ) : \r\n\r\n if type( string_key ) != type( '' ) : \r\n\r\n # raise self.__class__( \"'%s': EGI wants the key to be four _characters_ (not %s) !\" % (type(string_key), ) ) \r\n raise Eggog( \"'%s': EGI wants the key to be four _characters_ (not %s) !\" % (type(string_key), ) ) \r\n \r\n else :\r\n \r\n return True", "def str_check(*args, func=None):\n func = func or inspect.stack()[2][3]\n for var in args:\n if not isinstance(var, (str, collections.UserString, collections.abc.Sequence)):\n name = type(var).__name__\n raise StringError(\n 'Function {} expected str, {} got instead.'.format(func, name))", "def test_str(self):\n step = G(Step, display_name='test1')\n self.assertEqual('test1', str(step))", "def test_if_str(self):\n self.assertTrue(type(self.new.email) is str)\n self.assertTrue(type(self.new.password) is str)\n self.assertTrue(type(self.new.first_name) is str)\n self.assertTrue(type(self.new.last_name) is str)", "def is_tuple_consists_of_strings(an_array):\n return isinstance(an_array, tuple) and is_array_type(an_array, str)", "def test_str(self):\n faction = self.faction\n\n self.assertEqual(str(faction), self.faction_raw['name'])", "def is_sequence_of_str(items):\n return all(isinstance(item, basestring) for item in items)", "def test_string_or_number():\n assert is_string_or_number(None) is None\n assert is_string_or_number(1) is None\n assert is_string_or_number(1.1) is None\n assert is_string_or_number('1.1') is None\n assert is_string_or_number([])", "def _isstrvar(self, index):\n return self._typlist[index] <= 244", "def test_is_hand_string_correct(self):\n self.assertEqual(self.hand.getPostCurrentHandString(), 'Four of a Kind Ts')", "def test_node_str():\n a_left = Node(7, data='pl left')\n a_right = Node(42, data='pl right')\n a = Node(13, data='pl a', left=a_left, right=a_right)\n string_a = str(a)\n expect_string = '13'\n assert string_a == expect_string", "def test_string(self):\n\n new_jawn = Amenity()\n name = getattr(new_jawn, \"name\")\n self.assertIsInstance(name, str)", "def _is_encodable_simple(sv):\n return sv not in (None, str(), int())" ]
[ "0.6877922", "0.6834238", "0.68263847", "0.6825494", "0.68185055", "0.6800018", "0.6793659", "0.66846204", "0.66589445", "0.6589347", "0.657317", "0.6551655", "0.65453273", "0.6524285", "0.6505871", "0.6494237", "0.64909095", "0.64687836", "0.64158064", "0.6410573", "0.6397513", "0.63719976", "0.636623", "0.6357994", "0.6354848", "0.6348176", "0.63450825", "0.63383013", "0.63307023", "0.6319121", "0.6308429", "0.62939405", "0.62707114", "0.6241678", "0.6241462", "0.6237527", "0.6224175", "0.621058", "0.6209309", "0.6206134", "0.61965585", "0.61727893", "0.61512256", "0.6122408", "0.6084492", "0.60469824", "0.60423917", "0.6033841", "0.6033564", "0.6023943", "0.6010542", "0.59838146", "0.5906969", "0.5893875", "0.58928335", "0.5883546", "0.58825064", "0.5870558", "0.5856585", "0.58484745", "0.58403057", "0.5828066", "0.5805187", "0.5768886", "0.5765224", "0.5763426", "0.5755072", "0.5729192", "0.5726469", "0.57155526", "0.5712669", "0.5710405", "0.57097995", "0.57017434", "0.57007146", "0.5697161", "0.56966275", "0.56947947", "0.5694658", "0.5677635", "0.56652725", "0.5663006", "0.56573594", "0.5655477", "0.5654544", "0.56369853", "0.56354666", "0.56220144", "0.5619942", "0.5612889", "0.5606399", "0.56011087", "0.56002223", "0.5596177", "0.5590934", "0.5589888", "0.55878824", "0.55852723", "0.55756384", "0.55679286", "0.55655515" ]
0.0
-1
>>> isinstance(lab10_q2(), str) True
def lab10_q1(): return """ Since there has to be at least two, len(self.fruits) >=2 and at least one cup, self.cups (number == True) Decrease cups by 1 because we use one to create mixed juice. first pop gives the 0 index fruit, second pop gives the next 0 index which is originally at 1 index. len(item) to show number of letters then -1 because of hyphen. """
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_is_str_rep_string(self):\n self.assertIsInstance(cardutils.Card(10,1).__str__(), str)", "def is_str ( self, s ):\r\n\t\treturn isinstance ( s, type( str () ) )", "def _is_string(arg):\n return isinstance(arg, types.StringTypes)", "def is_str(x):\n return isinstance(x, str)", "def _is_string(data):\n return len(data) and isinstance(_to_ndarray(data).flat[0], str)", "def isString(x):\n if type(x) == str:\n return True\n else:\n return False", "def is_string(obj):\n return isinstance(obj, basestring)", "def test_str(self):\n s1 = Square(4, 2, 3, 47)\n self.assertEqual(str(s1), \"[Square] (47) 2/3 - 4\")", "def test_str():\n # Test for string special method with scalar Rnode objects\n x = Rnode(1.0)\n try:\n assert str(x) == 'Reverse-mode Rnode Object ( Values: 1.0 )'\n except AssertionError as e:\n print(e)\n raise AssertionError", "def is_string(value):\n return isinstance(value, basestring)", "def strtype(x):\n if type(x) == str:\n return True\n if type(x) == unicode:\n return True\n return False", "def test_strmethod(self):\n b1 = BaseModel()\n self.assertEqual(type(str(b1)), str)", "def is_string(obj):\n return isinstance(obj, str)", "def canAdapt(self, stru):\n return isinstance(stru, crystal)", "def test_str(self):\n self.assertEqual(str(self.bs), str(self.wbs))\n self.assertEqual(str(self.be), str(self.be))\n # str(us) fails in Python 2\n self.assertEqual(str, type(str(self.wus)))\n # str(ue) fails in Python 2\n self.assertEqual(str, type(str(self.wue)))", "def test_str(self):\r\n f = lambda id_, seq, qual: len(seq) > 3\r\n s1 = 'aa'\r\n s2 = 'aaaa'\r\n sq = SeqQualBad('Q', f)\r\n self.assertEqual(sq('x', s1, [1, 2, 3]), False)\r\n self.assertEqual(str(sq), 'Q\\t0')\r\n self.assertEqual(sq('y', s2, [1, 2, 3]), True)\r\n self.assertEqual(str(sq), 'Q\\t1')", "def test_str_method(self):\n s1 = Square(4, 6, 2, 12)\n self.assertEqual(s1.__str__(), '[Square] (12) 6/2 - 4')", "def _check_datatype_to_string(prediction):\n if isinstance(prediction, str):\n return True\n raise TypeError('Prediction is not in string type.')", "def check_solution(self, solution):\n return isinstance(solution, str)", "def is_str(value):\n return isinstance(value, str)", "def has_string_type(obj: _std_typing.Any) -> bool:\n return obj.dtype == sc.DType.string", "def _is_str(item):\n return isinstance(item, str)", "def is_string(atype):\n if atype == str:\n return True\n elif PY2:\n if atype == unicode:\n return True\n return False", "def is_string(val):\n return (\n isinstance(val, unicode) or \\\n isinstance(val, str) \n )", "def is_string(document):\r\n return isinstance(document, str)", "def typeIsString(obj):\n return type(obj) is str or _haveTypeUnicode and type(obj) is unicode", "def isString(data):\n\ttry:\n\t\tfrom types import UnicodeType, StringType\n\t\tif type(data) == UnicodeType or type(data) == StringType:\n\t\t\treturn True\n\texcept ImportError:\n\t\tif type(data) == type(\"\"):\n\t\t\treturn True\n\treturn False", "def is_string(value):\n return isinstance(value, (str, bytes))", "def _is_string(self, obj):\n return isinstance(obj, unicode) or isinstance(obj, str)", "def is_string(self):\n answer = self._call('is_string')\n return answer.yes", "def is_string(value):\n return isinstance(value, string_types)", "def test_str(self, r, rep):\n assert str(r) == rep", "def test___str__(self):\n self.assertEqual(\n str(self.mineral),\n 'mineralxy')", "def test_str(self):\n arm = self.ar[2009][11]\n self.assertEqual(str(arm), '<AwstatsMonth 2009-11>')", "def issparse(qob):\n return isinstance(qob, sp.spmatrix)", "def test_speciality_str_representation(speciality):\n assert str(speciality) == \"Electrical\"", "def string_check(param, name):\n\tif not isinstance(param, strcomp):\n\t\traise TypeError(\"Keyword arg '%s' must be of type string. Got: %s\" % (\n\t\t\tname, type(param)))\n\telse:\n\t\tpass", "def not_a_string(obj):\n my_type = str(type(obj))\n if is_py3():\n is_str = my_type.find('bytes') < 0 and my_type.find('str') < 0\n return is_str\n\n return my_type.find('str') < 0 and \\\n my_type.find('unicode') < 0", "def test_string(self,address):\n t=type(address) == str\n assert t, \"not a string\"", "def is_string(s):\n try:\n return isinstance(s, basestring)\n except NameError:\n return isinstance(s, str)", "def is_string_type(self):\n raise exceptions.NotImplementedError()", "def isString(s):\r\n if not isinstance(s, util.AtomicString):\r\n return isinstance(s, basestring)\r\n return False", "def assert_is_string(object_to_test):\n if( (type(object_to_test) == type(\"\")) or (type(object_to_test) == type(u\"\")) ):\n return\n logging.error(\"assert_is_string() test failed!\")\n logging.critical(repr(locals()))\n raise(ValueError)", "def isString(s):\r\n try:\r\n return isinstance(s, unicode) or isinstance(s, basestring)\r\n except NameError:\r\n return isinstance(s, str)", "def is_string(space, w_obj):\n return space.wrap(w_obj.tp == space.tp_str)", "def test_repr(self):\n qg = ConcentricGrid(2, 3, 4)\n s = str(qg)\n assert \"ConcentricGrid\" in s\n assert \"jacobi\" in s\n assert \"L=2\" in s\n assert \"M=3\" in s\n assert \"N=4\" in s", "def test_str(self):\n self.assertEqual(str(self.meter), \"1 * m\")\n\n newton = self.kgram * self.meter / (self.second ** 2)\n\n self.assertEqual(str(newton), \"1.0 * kg * m * s^-2\")", "def is_str(value):\n if not type(value) is str:\n return False\n else:\n return True", "def test_str(self):\n self.assertEqual(str(self.john), \"J. Doe\")\n self.assertEqual(str(self.solar), \"N. Graule\")", "def _is_unicode(arr):\n if (isinstance(arr, str) or\n issubclass(numpy.asarray(arr).dtype.type, str)):\n return True\n return False", "def test_str(self):\r\n statobj = inputtypes.Status('test')\r\n self.assertEqual(str(statobj), 'test')\r\n self.assertEqual(unicode(statobj), u'test')", "def test_str(self):\n self.assertEqual(str(self.subject), \"Test Subject\")", "def is_my_type(type_str):\n raise NotImplementedError()", "def stringable(self):\n return True", "def test_repr(self):\n fz = FourierZernikeBasis(L=6, M=3, N=0)\n s = str(fz)\n assert \"FourierZernikeBasis\" in s\n assert \"ansi\" in s\n assert \"L=6\" in s\n assert \"M=3\" in s\n assert \"N=0\" in s", "def is_string(item: Any) -> bool:\n if isinstance(item, (bytes, bytearray, str)):\n return True\n elif (isinstance(item, (tuple, list)) and all(is_string(x) for x in item)):\n return True\n elif (isinstance(item, np.ndarray) and # binary or unicode\n (item.dtype.kind in (\"U\", \"S\") or item.dtype == object)):\n return True\n return False", "def test_stringfield_return_text_type():\n stringify = fields.StringField().adapt\n assert isinstance(stringify(\"Hello World\"), text_type)", "def test_str():\n c = Circle(4) \n assert c.__str__() == 'Circle with radius: 4.000000'", "def testStringRepresentationOnInstantiation(self):\r\n self.assertEqual(str(self.tv), 'table')", "def test_output_type():\n assert type(song_decoder(\"WUWUBUBWUBUWUB\")) is str", "def _isstr(value):\n\n if isinstance(value, string_types + (bytes,)):\n return True\n elif hasattr(value, \"__iter__\"):\n return all(_isstr(v) for v in value)\n else:\n return False", "def _is_valid_target_str(self, target):\n if isinstance(target, str):\n return True\n else:\n return False", "def is_valid(self, qstr):\r\n pass", "def test_car_model_str_representation(car_model):\n assert str(car_model) == \"Golf\"", "def test_string(self):\n htype = h5t.py_create('S1')\n self.assertIsInstance(htype, h5t.TypeStringID)\n self.assertEqual(htype.get_size(), 1)", "def test_str(self, concept_embedding: ConceptEmbedding):\n # printing should not rise error\n str(concept_embedding)", "def test_compare_to_string(self):\n r = self.RNA(\"UCC\")\n self.assertEqual(r, \"UCC\")", "def test_string(self):\n htype = h5t.py_create('S10')\n self.assertIsInstance(htype, h5t.TypeStringID)\n self.assertEqual(htype.get_size(), 10)", "def test_str(self):\n ary = self.ar[2009]\n self.assertEqual(str(ary), '<AwstatsYear 2009: 11, 12>')", "def test_str_type(self):\n\n expected = TypeError\n input_ = 'c'\n with self.assertRaises(expected):\n math.factorial(input_)", "def test_string(self):\n htype = h5t.py_create('S10', logical=True)\n self.assertIsInstance(htype, h5t.TypeStringID)\n self.assertEqual(htype.get_size(), 10)", "def test_str(self):\n step = G(Step, display_name='test1')\n self.assertEqual('test1', str(step))", "def is_string(value):\n try:\n basestring\n def is_string(value):\n \"\"\"Python 2 compatible implementation of is_string(value).\"\"\"\n return isinstance(value, basestring)\n except NameError:\n def is_string(value):\n \"\"\"Python 3 compatible implementation of is_string(value).\"\"\"\n return isinstance(value, (str, bytes))\n return is_string(value)", "def test_str(self):\n tour = G(Tour, display_name='test1')\n self.assertEqual('test1', str(tour))", "def testStringInput(self):\r\n from pydsl.Check import BNFChecker\r\n from pydsl.contrib.bnfgrammar import productionset0\r\n grammardef = productionset0\r\n checker = BNFChecker(grammardef)\r\n self.assertTrue(checker.check(\"SR\"))\r\n self.assertTrue(checker.check(\"SR\"))\r\n self.assertTrue(checker.check((\"S\",\"R\")))\r\n self.assertFalse(checker.check(\"SL\"))\r\n self.assertFalse(checker.check((\"S\",\"L\")))\r\n self.assertFalse(checker.check(\"\"))", "def test_str(self):\n f66: Fraction = Fraction(6, 6)\n f24: Fraction = Fraction(2, 4)\n self.assertEqual(str(f66), \"6/6\")\n self.assertNotEqual(str(f24), \"8/6\")\n self.assertTrue(str(f24), \"2/4\")", "def test_str(self, string, application):\n assert string == str(application)", "def test_str_(self):\n str(self.standardcode)\n repr(self.standardcode)", "def test_K_str(self):\n r = Rectangle(5, 2)\n s = '[Rectangle] (1) 0/0 - 5/2'\n self.assertEqual(str(r), s)\n r = Rectangle(1, 1, 1)\n s = '[Rectangle] (2) 1/0 - 1/1'\n self.assertEqual(str(r), s)\n r = Rectangle(3, 4, 5, 6)\n s = '[Rectangle] (3) 5/6 - 3/4'\n self.assertEqual(str(r), s)\n\n Base._Base__nb_objects = 0\n r1 = Rectangle(4, 6, 2, 1, 12)\n self.assertEqual(str(r1), \"[Rectangle] (12) 2/1 - 4/6\")\n\n r2 = Rectangle(5, 5, 1)\n self.assertEqual(str(r2), \"[Rectangle] (1) 1/0 - 5/5\")", "def test_str(self):\n self.assertEqual(str(self.bioe), str(self.wbioe))\n self.assertEqual(str(self.uioe), str(self.wuioe))\n self.assertEqual(str(self.bose), str(self.wbose))\n self.assertEqual(str(self.uose), str(self.wuose))", "def test_random_programming_quote(self):\n quote = Quote().print_programming_quote()\n self.assertTrue(type(quote) == str)", "def test_native_str(self):\n if PY2:\n import __builtin__\n builtin_str = __builtin__.str\n else:\n import builtins\n builtin_str = builtins.str\n\n inputs = [b'blah', u'blah', 'blah']\n for s in inputs:\n self.assertEqual(native_str(s), builtin_str(s))\n self.assertTrue(isinstance(native_str(s), builtin_str))", "def test_str(self):\n faction = self.faction\n\n self.assertEqual(str(faction), self.faction_raw['name'])", "def _has_str_elems(obj):\n return all([isinstance(elem, str) for elem in obj])", "def test_if_it_accepts_string_datatype(self):\n with self.assertRaises(TypeError):\n prime_numbers(\"string\")", "def DataIsString(self):\n return self.data_type in (definitions.REG_SZ, definitions.REG_EXPAND_SZ)", "def test_str():\n post = models.Post(title=\"Test Post\")\n\n assert str(post) == post.title", "def test_is_hand_string_correct(self):\n self.assertEqual(self.hand.getPostCurrentHandString(), 'Three of a Kind As')", "def check_type(s: pd.Series) -> Tuple[bool, str]:\n\n error_string = (\n \"should be TextSeries: the input Series should consist only of strings in every cell.\"\n \" See help(hero.HeroTypes) for more information.\"\n )\n\n try:\n first_non_nan_value = s.loc[s.first_valid_index()]\n if not isinstance(first_non_nan_value, str):\n return False, error_string\n except KeyError: # Only NaNs in Series -> same warning applies\n return False, error_string\n\n return True, \"\"", "def test_string():", "def _is_str_matching_builtin_type(str_value: str) -> bool:\n builtin_types = [\n getattr(builtins, d)\n for d in dir(builtins)\n if isinstance(getattr(builtins, d), type)\n ]\n return f\"<class '{str_value}'>\" in [str(bt) for bt in builtin_types]", "def test_X_approximate_distribution_is_str(self):\n\n # Check valid case of \"count\" which is not included in valid object\n self.validator.adata.uns[\"X_approximate_distribution\"] = \"count\"\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [])\n\n # Invalid type: list\n self.validator.adata.uns[\"X_approximate_distribution\"] = [\"count\"]\n self.validator.validate_adata()\n self.assertEqual(\n self.validator.errors,\n [\n \"ERROR: '['count']' in 'uns['X_approximate_distribution']' \"\n \"is not valid, it must be a string.\"\n ],\n )", "def test_label_cannot_be_converted_to_string(self):\n\n class NoStr:\n def __str__(self) -> str:\n raise NotImplementedError\n\n with pytest.raises(TypeError, match=\"The given label\"):\n State(\"water\", label=NoStr())", "def test_string_or_number():\n assert is_string_or_number(None) is None\n assert is_string_or_number(1) is None\n assert is_string_or_number(1.1) is None\n assert is_string_or_number('1.1') is None\n assert is_string_or_number([])", "async def test_str(\n submission_fixture: Submission,\n) -> None:\n AssertThat(str(submission_fixture)).IsInstanceOf(str)\n AssertThat(str(submission_fixture)).IsEqualTo(submission_fixture.url)", "def test_return_type(self):\n self.assertEqual(type(self.obj.to_json_string(self.valid_ld)), str)", "def test_node_str():\n a_left = Node(7, data='pl left')\n a_right = Node(42, data='pl right')\n a = Node(13, data='pl a', left=a_left, right=a_right)\n string_a = str(a)\n expect_string = '13'\n assert string_a == expect_string", "def value_type_of_str(str1, type1):\n #FOUND A BETTER FUNCTION, MultiDict\n if issubclass(type1, Collection):\n try:\n # obj = eval(str1)\n obj = ast.literal_eval(str1)\n if isinstance(obj, Collection):\n return obj\n except:\n return type1(str1)\n else:\n try:\n return type1(str1)\n except:\n return type1()", "def check_r_type(r):\n if type(r) is str:\n raise TypeError('Get Error message.')", "def test_if_str(self):\n self.assertTrue(type(self.new.email) is str)\n self.assertTrue(type(self.new.password) is str)\n self.assertTrue(type(self.new.first_name) is str)\n self.assertTrue(type(self.new.last_name) is str)", "def _is_encodable_simple(sv):\n return sv not in (None, str(), int())" ]
[ "0.70104426", "0.6694115", "0.66540843", "0.6631845", "0.65846175", "0.6546801", "0.6527376", "0.6501472", "0.6483732", "0.6467406", "0.64658016", "0.64406013", "0.64081883", "0.6402971", "0.6375239", "0.63414836", "0.6304244", "0.62933546", "0.62872994", "0.6280994", "0.62789834", "0.6259358", "0.62485385", "0.6246819", "0.6235869", "0.62175083", "0.61921865", "0.6186148", "0.6162182", "0.61596453", "0.61141074", "0.61032003", "0.6101763", "0.61001945", "0.6075113", "0.60745525", "0.60655224", "0.60612863", "0.60440636", "0.6043444", "0.60422903", "0.6037869", "0.60372865", "0.6025946", "0.602139", "0.60171425", "0.5982923", "0.59644324", "0.59496844", "0.59410375", "0.59311235", "0.5875609", "0.5872721", "0.5870502", "0.58510303", "0.5830357", "0.5823361", "0.5818003", "0.58174515", "0.5802583", "0.5801132", "0.57964104", "0.5790019", "0.57768327", "0.5767355", "0.57623684", "0.5754325", "0.57468283", "0.57455415", "0.5743413", "0.57382673", "0.572689", "0.5715788", "0.56898403", "0.567495", "0.5672585", "0.5666557", "0.5665925", "0.56302464", "0.56216335", "0.55910885", "0.5580515", "0.5576054", "0.5572291", "0.55700415", "0.55472004", "0.5538149", "0.5534275", "0.5530309", "0.55208504", "0.5519285", "0.5519032", "0.55165094", "0.5515356", "0.5504813", "0.55045116", "0.55017865", "0.54990613", "0.54949707", "0.5491545", "0.54854816" ]
0.0
-1
>>> isinstance(lab10_q3(), str) True
def lab10_q2(): return """ Make a 'total' to add up the total revenue as while loop plays. item = qvm.dispense, makes it so item is the juice. While item: because if there is a juice this while loop will continue. If there aren't two fruits, there wont be a juice so while loop stops. total += qvm.collect_money(item). (this should be += not = right?) to add the revenue from that 'item' revalue item to the next juice. return total """
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_is_str_rep_string(self):\n self.assertIsInstance(cardutils.Card(10,1).__str__(), str)", "def is_str(x):\n return isinstance(x, str)", "def isString(x):\n if type(x) == str:\n return True\n else:\n return False", "def is_str ( self, s ):\r\n\t\treturn isinstance ( s, type( str () ) )", "def _is_string(data):\n return len(data) and isinstance(_to_ndarray(data).flat[0], str)", "def _is_string(arg):\n return isinstance(arg, types.StringTypes)", "def strtype(x):\n if type(x) == str:\n return True\n if type(x) == unicode:\n return True\n return False", "def is_string(obj):\n return isinstance(obj, basestring)", "def is_string(value):\n return isinstance(value, basestring)", "def _is_str(item):\n return isinstance(item, str)", "def isString(data):\n\ttry:\n\t\tfrom types import UnicodeType, StringType\n\t\tif type(data) == UnicodeType or type(data) == StringType:\n\t\t\treturn True\n\texcept ImportError:\n\t\tif type(data) == type(\"\"):\n\t\t\treturn True\n\treturn False", "def canAdapt(self, stru):\n return isinstance(stru, crystal)", "def _check_datatype_to_string(prediction):\n if isinstance(prediction, str):\n return True\n raise TypeError('Prediction is not in string type.')", "def is_string(obj):\n return isinstance(obj, str)", "def test_str():\n # Test for string special method with scalar Rnode objects\n x = Rnode(1.0)\n try:\n assert str(x) == 'Reverse-mode Rnode Object ( Values: 1.0 )'\n except AssertionError as e:\n print(e)\n raise AssertionError", "def has_string_type(obj: _std_typing.Any) -> bool:\n return obj.dtype == sc.DType.string", "def test_str(self):\n s1 = Square(4, 2, 3, 47)\n self.assertEqual(str(s1), \"[Square] (47) 2/3 - 4\")", "def is_string(atype):\n if atype == str:\n return True\n elif PY2:\n if atype == unicode:\n return True\n return False", "def is_string(val):\n return (\n isinstance(val, unicode) or \\\n isinstance(val, str) \n )", "def test_str(self):\r\n f = lambda id_, seq, qual: len(seq) > 3\r\n s1 = 'aa'\r\n s2 = 'aaaa'\r\n sq = SeqQualBad('Q', f)\r\n self.assertEqual(sq('x', s1, [1, 2, 3]), False)\r\n self.assertEqual(str(sq), 'Q\\t0')\r\n self.assertEqual(sq('y', s2, [1, 2, 3]), True)\r\n self.assertEqual(str(sq), 'Q\\t1')", "def not_a_string(obj):\n my_type = str(type(obj))\n if is_py3():\n is_str = my_type.find('bytes') < 0 and my_type.find('str') < 0\n return is_str\n\n return my_type.find('str') < 0 and \\\n my_type.find('unicode') < 0", "def check_solution(self, solution):\n return isinstance(solution, str)", "def is_str(value):\n return isinstance(value, str)", "def is_string(document):\r\n return isinstance(document, str)", "def test_str(self):\n self.assertEqual(str(self.bs), str(self.wbs))\n self.assertEqual(str(self.be), str(self.be))\n # str(us) fails in Python 2\n self.assertEqual(str, type(str(self.wus)))\n # str(ue) fails in Python 2\n self.assertEqual(str, type(str(self.wue)))", "def typeIsString(obj):\n return type(obj) is str or _haveTypeUnicode and type(obj) is unicode", "def test_strmethod(self):\n b1 = BaseModel()\n self.assertEqual(type(str(b1)), str)", "def _is_unicode(arr):\n if (isinstance(arr, str) or\n issubclass(numpy.asarray(arr).dtype.type, str)):\n return True\n return False", "def is_string(value):\n return isinstance(value, (str, bytes))", "def isString(s):\r\n if not isinstance(s, util.AtomicString):\r\n return isinstance(s, basestring)\r\n return False", "def _is_string(self, obj):\n return isinstance(obj, unicode) or isinstance(obj, str)", "def isString(s):\r\n try:\r\n return isinstance(s, unicode) or isinstance(s, basestring)\r\n except NameError:\r\n return isinstance(s, str)", "def is_string(self):\n answer = self._call('is_string')\n return answer.yes", "def string_check(param, name):\n\tif not isinstance(param, strcomp):\n\t\traise TypeError(\"Keyword arg '%s' must be of type string. Got: %s\" % (\n\t\t\tname, type(param)))\n\telse:\n\t\tpass", "def test_speciality_str_representation(speciality):\n assert str(speciality) == \"Electrical\"", "def is_string(s):\n try:\n return isinstance(s, basestring)\n except NameError:\n return isinstance(s, str)", "def test_string(self,address):\n t=type(address) == str\n assert t, \"not a string\"", "def is_string(value):\n return isinstance(value, string_types)", "def is_string(item: Any) -> bool:\n if isinstance(item, (bytes, bytearray, str)):\n return True\n elif (isinstance(item, (tuple, list)) and all(is_string(x) for x in item)):\n return True\n elif (isinstance(item, np.ndarray) and # binary or unicode\n (item.dtype.kind in (\"U\", \"S\") or item.dtype == object)):\n return True\n return False", "def assert_is_string(object_to_test):\n if( (type(object_to_test) == type(\"\")) or (type(object_to_test) == type(u\"\")) ):\n return\n logging.error(\"assert_is_string() test failed!\")\n logging.critical(repr(locals()))\n raise(ValueError)", "def is_string(space, w_obj):\n return space.wrap(w_obj.tp == space.tp_str)", "def issparse(qob):\n return isinstance(qob, sp.spmatrix)", "def test_str(self):\n arm = self.ar[2009][11]\n self.assertEqual(str(arm), '<AwstatsMonth 2009-11>')", "def test_str_method(self):\n s1 = Square(4, 6, 2, 12)\n self.assertEqual(s1.__str__(), '[Square] (12) 6/2 - 4')", "def test___str__(self):\n self.assertEqual(\n str(self.mineral),\n 'mineralxy')", "def test_str(self):\n self.assertEqual(str(self.john), \"J. Doe\")\n self.assertEqual(str(self.solar), \"N. Graule\")", "def is_str(value):\n if not type(value) is str:\n return False\n else:\n return True", "def is_string_type(self):\n raise exceptions.NotImplementedError()", "def test_str(self, r, rep):\n assert str(r) == rep", "def test_repr(self):\n qg = ConcentricGrid(2, 3, 4)\n s = str(qg)\n assert \"ConcentricGrid\" in s\n assert \"jacobi\" in s\n assert \"L=2\" in s\n assert \"M=3\" in s\n assert \"N=4\" in s", "def test_str(self):\n self.assertEqual(str(self.subject), \"Test Subject\")", "def is_valid(self, qstr):\r\n pass", "def is_string(value):\n try:\n basestring\n def is_string(value):\n \"\"\"Python 2 compatible implementation of is_string(value).\"\"\"\n return isinstance(value, basestring)\n except NameError:\n def is_string(value):\n \"\"\"Python 3 compatible implementation of is_string(value).\"\"\"\n return isinstance(value, (str, bytes))\n return is_string(value)", "def _is_valid_target_str(self, target):\n if isinstance(target, str):\n return True\n else:\n return False", "def test_str(self):\r\n statobj = inputtypes.Status('test')\r\n self.assertEqual(str(statobj), 'test')\r\n self.assertEqual(unicode(statobj), u'test')", "def stringable(self):\n return True", "def test_str(self, concept_embedding: ConceptEmbedding):\n # printing should not rise error\n str(concept_embedding)", "def is_my_type(type_str):\n raise NotImplementedError()", "def test_stringfield_return_text_type():\n stringify = fields.StringField().adapt\n assert isinstance(stringify(\"Hello World\"), text_type)", "def test_string(self):\n htype = h5t.py_create('S1')\n self.assertIsInstance(htype, h5t.TypeStringID)\n self.assertEqual(htype.get_size(), 1)", "def test_str():\n c = Circle(4) \n assert c.__str__() == 'Circle with radius: 4.000000'", "def test_repr(self):\n fz = FourierZernikeBasis(L=6, M=3, N=0)\n s = str(fz)\n assert \"FourierZernikeBasis\" in s\n assert \"ansi\" in s\n assert \"L=6\" in s\n assert \"M=3\" in s\n assert \"N=0\" in s", "def test_str(self):\n ary = self.ar[2009]\n self.assertEqual(str(ary), '<AwstatsYear 2009: 11, 12>')", "def _isstr(value):\n\n if isinstance(value, string_types + (bytes,)):\n return True\n elif hasattr(value, \"__iter__\"):\n return all(_isstr(v) for v in value)\n else:\n return False", "def testStringRepresentationOnInstantiation(self):\r\n self.assertEqual(str(self.tv), 'table')", "def test_string(self):\n htype = h5t.py_create('S10')\n self.assertIsInstance(htype, h5t.TypeStringID)\n self.assertEqual(htype.get_size(), 10)", "def test_output_type():\n assert type(song_decoder(\"WUWUBUBWUBUWUB\")) is str", "def test_string(self):\n htype = h5t.py_create('S10', logical=True)\n self.assertIsInstance(htype, h5t.TypeStringID)\n self.assertEqual(htype.get_size(), 10)", "def test_str_type(self):\n\n expected = TypeError\n input_ = 'c'\n with self.assertRaises(expected):\n math.factorial(input_)", "def test_str(self):\n self.assertEqual(str(self.meter), \"1 * m\")\n\n newton = self.kgram * self.meter / (self.second ** 2)\n\n self.assertEqual(str(newton), \"1.0 * kg * m * s^-2\")", "def test_car_model_str_representation(car_model):\n assert str(car_model) == \"Golf\"", "def test_str(self, string, application):\n assert string == str(application)", "def _has_str_elems(obj):\n return all([isinstance(elem, str) for elem in obj])", "def test_str(self):\n faction = self.faction\n\n self.assertEqual(str(faction), self.faction_raw['name'])", "def test_str(self):\n tour = G(Tour, display_name='test1')\n self.assertEqual('test1', str(tour))", "def test_compare_to_string(self):\n r = self.RNA(\"UCC\")\n self.assertEqual(r, \"UCC\")", "def check_type(s: pd.Series) -> Tuple[bool, str]:\n\n error_string = (\n \"should be TextSeries: the input Series should consist only of strings in every cell.\"\n \" See help(hero.HeroTypes) for more information.\"\n )\n\n try:\n first_non_nan_value = s.loc[s.first_valid_index()]\n if not isinstance(first_non_nan_value, str):\n return False, error_string\n except KeyError: # Only NaNs in Series -> same warning applies\n return False, error_string\n\n return True, \"\"", "def is_sequence_of_str(items):\n return all(isinstance(item, basestring) for item in items)", "def _is_proper_sequence(seq):\n return (isinstance(seq, collections.abc.Sequence) and\n not isinstance(seq, str))", "def test_str(self):\n step = G(Step, display_name='test1')\n self.assertEqual('test1', str(step))", "def test_string_or_number():\n assert is_string_or_number(None) is None\n assert is_string_or_number(1) is None\n assert is_string_or_number(1.1) is None\n assert is_string_or_number('1.1') is None\n assert is_string_or_number([])", "def _isstrvar(self, index):\n return self._typlist[index] <= 244", "def check_type( string_key ) : \r\n\r\n if type( string_key ) != type( '' ) : \r\n\r\n # raise self.__class__( \"'%s': EGI wants the key to be four _characters_ (not %s) !\" % (type(string_key), ) ) \r\n raise Eggog( \"'%s': EGI wants the key to be four _characters_ (not %s) !\" % (type(string_key), ) ) \r\n \r\n else :\r\n \r\n return True", "def DataIsString(self):\n return self.data_type in (definitions.REG_SZ, definitions.REG_EXPAND_SZ)", "def test_str(self):\n self.assertEqual(str(self.bioe), str(self.wbioe))\n self.assertEqual(str(self.uioe), str(self.wuioe))\n self.assertEqual(str(self.bose), str(self.wbose))\n self.assertEqual(str(self.uose), str(self.wuose))", "def test_str(self):\n f66: Fraction = Fraction(6, 6)\n f24: Fraction = Fraction(2, 4)\n self.assertEqual(str(f66), \"6/6\")\n self.assertNotEqual(str(f24), \"8/6\")\n self.assertTrue(str(f24), \"2/4\")", "def test_str():\n post = models.Post(title=\"Test Post\")\n\n assert str(post) == post.title", "def test_if_it_accepts_string_datatype(self):\n with self.assertRaises(TypeError):\n prime_numbers(\"string\")", "def is_tuple_consists_of_strings(an_array):\n return isinstance(an_array, tuple) and is_array_type(an_array, str)", "async def test_str(\n submission_fixture: Submission,\n) -> None:\n AssertThat(str(submission_fixture)).IsInstanceOf(str)\n AssertThat(str(submission_fixture)).IsEqualTo(submission_fixture.url)", "def check_statement(self, statement):\n return isinstance(statement, str)", "def _type_check(data):\n if data.__class__.__name__ != \"Matrix3\":\n return False\n return True", "def testStringInput(self):\r\n from pydsl.Check import BNFChecker\r\n from pydsl.contrib.bnfgrammar import productionset0\r\n grammardef = productionset0\r\n checker = BNFChecker(grammardef)\r\n self.assertTrue(checker.check(\"SR\"))\r\n self.assertTrue(checker.check(\"SR\"))\r\n self.assertTrue(checker.check((\"S\",\"R\")))\r\n self.assertFalse(checker.check(\"SL\"))\r\n self.assertFalse(checker.check((\"S\",\"L\")))\r\n self.assertFalse(checker.check(\"\"))", "def str_check(*args, func=None):\n func = func or inspect.stack()[2][3]\n for var in args:\n if not isinstance(var, (str, collections.UserString, collections.abc.Sequence)):\n name = type(var).__name__\n raise StringError(\n 'Function {} expected str, {} got instead.'.format(func, name))", "def test_is_hand_string_correct(self):\n self.assertEqual(self.hand.getPostCurrentHandString(), 'Three of a Kind As')", "def test_random_programming_quote(self):\n quote = Quote().print_programming_quote()\n self.assertTrue(type(quote) == str)", "def test_if_str(self):\n self.assertTrue(type(self.new.email) is str)\n self.assertTrue(type(self.new.password) is str)\n self.assertTrue(type(self.new.first_name) is str)\n self.assertTrue(type(self.new.last_name) is str)", "def test_name3(self):\n new = self.value()\n self.assertEqual(type(new.name), str)", "def test_computer_string(self):\n # Computer to use for the network\n comp = \"1q-qvm\"\n\n # Get a network with the computer\n qnn = self.get_test_network(comp)\n\n # Checks\n self.assertEqual(type(qnn.computer), QuantumComputer)", "def test_node_str():\n a_left = Node(7, data='pl left')\n a_right = Node(42, data='pl right')\n a = Node(13, data='pl a', left=a_left, right=a_right)\n string_a = str(a)\n expect_string = '13'\n assert string_a == expect_string", "def test_name(self):\n self.assertTrue(type(x.name) == str)" ]
[ "0.6767156", "0.662008", "0.65915257", "0.65897584", "0.6570212", "0.6540568", "0.6512019", "0.6466866", "0.6387287", "0.6369226", "0.634048", "0.6340179", "0.6338254", "0.633478", "0.6307848", "0.6269958", "0.6269275", "0.6240808", "0.6229523", "0.62286043", "0.62249273", "0.62194455", "0.62020504", "0.61821586", "0.6153329", "0.6148121", "0.61335737", "0.6122168", "0.6102936", "0.60865355", "0.6084942", "0.60679907", "0.60637856", "0.6052069", "0.60492474", "0.6049187", "0.6045467", "0.6038228", "0.60355186", "0.6033634", "0.6023668", "0.598508", "0.5974316", "0.59718025", "0.5927041", "0.5925883", "0.590352", "0.58674383", "0.5841899", "0.58236194", "0.5788886", "0.5786031", "0.5762349", "0.57533216", "0.573421", "0.572819", "0.57232666", "0.5720383", "0.5713556", "0.57067513", "0.5700244", "0.5687953", "0.56717116", "0.565715", "0.565216", "0.5649498", "0.564461", "0.5643724", "0.56414104", "0.563331", "0.562592", "0.56255627", "0.5620423", "0.5601635", "0.55986965", "0.55942255", "0.5576689", "0.55708486", "0.5550938", "0.5548421", "0.5542471", "0.5529778", "0.5528771", "0.5528624", "0.55262333", "0.55237466", "0.55175424", "0.5515109", "0.55109525", "0.55058295", "0.5496301", "0.54961586", "0.549394", "0.5493721", "0.54932606", "0.5487032", "0.5483757", "0.54821163", "0.5480545", "0.5457732", "0.54549855" ]
0.0
-1
>>> isinstance(lab10_q4(), str) True
def lab10_q3(): return """ Use list comprehension max(lst_of_qvm, key=lambda qvm : total_revenue(qvm)) This makes each element of the list go through the key which gives total_revenue for each one. Then just get the max in that list """
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_is_str_rep_string(self):\n self.assertIsInstance(cardutils.Card(10,1).__str__(), str)", "def is_str(x):\n return isinstance(x, str)", "def is_str ( self, s ):\r\n\t\treturn isinstance ( s, type( str () ) )", "def isString(x):\n if type(x) == str:\n return True\n else:\n return False", "def is_string(obj):\n return isinstance(obj, basestring)", "def _is_string(data):\n return len(data) and isinstance(_to_ndarray(data).flat[0], str)", "def _is_string(arg):\n return isinstance(arg, types.StringTypes)", "def is_string(value):\n return isinstance(value, basestring)", "def strtype(x):\n if type(x) == str:\n return True\n if type(x) == unicode:\n return True\n return False", "def is_string(obj):\n return isinstance(obj, str)", "def test_str():\n # Test for string special method with scalar Rnode objects\n x = Rnode(1.0)\n try:\n assert str(x) == 'Reverse-mode Rnode Object ( Values: 1.0 )'\n except AssertionError as e:\n print(e)\n raise AssertionError", "def test_str(self):\n s1 = Square(4, 2, 3, 47)\n self.assertEqual(str(s1), \"[Square] (47) 2/3 - 4\")", "def canAdapt(self, stru):\n return isinstance(stru, crystal)", "def has_string_type(obj: _std_typing.Any) -> bool:\n return obj.dtype == sc.DType.string", "def is_str(value):\n return isinstance(value, str)", "def test_strmethod(self):\n b1 = BaseModel()\n self.assertEqual(type(str(b1)), str)", "def _check_datatype_to_string(prediction):\n if isinstance(prediction, str):\n return True\n raise TypeError('Prediction is not in string type.')", "def test_str(self):\r\n f = lambda id_, seq, qual: len(seq) > 3\r\n s1 = 'aa'\r\n s2 = 'aaaa'\r\n sq = SeqQualBad('Q', f)\r\n self.assertEqual(sq('x', s1, [1, 2, 3]), False)\r\n self.assertEqual(str(sq), 'Q\\t0')\r\n self.assertEqual(sq('y', s2, [1, 2, 3]), True)\r\n self.assertEqual(str(sq), 'Q\\t1')", "def test_str(self):\n self.assertEqual(str(self.bs), str(self.wbs))\n self.assertEqual(str(self.be), str(self.be))\n # str(us) fails in Python 2\n self.assertEqual(str, type(str(self.wus)))\n # str(ue) fails in Python 2\n self.assertEqual(str, type(str(self.wue)))", "def check_solution(self, solution):\n return isinstance(solution, str)", "def _is_str(item):\n return isinstance(item, str)", "def is_string(val):\n return (\n isinstance(val, unicode) or \\\n isinstance(val, str) \n )", "def isString(data):\n\ttry:\n\t\tfrom types import UnicodeType, StringType\n\t\tif type(data) == UnicodeType or type(data) == StringType:\n\t\t\treturn True\n\texcept ImportError:\n\t\tif type(data) == type(\"\"):\n\t\t\treturn True\n\treturn False", "def is_string(document):\r\n return isinstance(document, str)", "def is_string(atype):\n if atype == str:\n return True\n elif PY2:\n if atype == unicode:\n return True\n return False", "def is_string(value):\n return isinstance(value, (str, bytes))", "def test_str_method(self):\n s1 = Square(4, 6, 2, 12)\n self.assertEqual(s1.__str__(), '[Square] (12) 6/2 - 4')", "def typeIsString(obj):\n return type(obj) is str or _haveTypeUnicode and type(obj) is unicode", "def is_string(value):\n return isinstance(value, string_types)", "def test_str(self):\n arm = self.ar[2009][11]\n self.assertEqual(str(arm), '<AwstatsMonth 2009-11>')", "def is_string(self):\n answer = self._call('is_string')\n return answer.yes", "def _is_string(self, obj):\n return isinstance(obj, unicode) or isinstance(obj, str)", "def is_string(s):\n try:\n return isinstance(s, basestring)\n except NameError:\n return isinstance(s, str)", "def not_a_string(obj):\n my_type = str(type(obj))\n if is_py3():\n is_str = my_type.find('bytes') < 0 and my_type.find('str') < 0\n return is_str\n\n return my_type.find('str') < 0 and \\\n my_type.find('unicode') < 0", "def test___str__(self):\n self.assertEqual(\n str(self.mineral),\n 'mineralxy')", "def test_str(self, r, rep):\n assert str(r) == rep", "def is_str(value):\n if not type(value) is str:\n return False\n else:\n return True", "def isString(s):\r\n if not isinstance(s, util.AtomicString):\r\n return isinstance(s, basestring)\r\n return False", "def is_string_type(self):\n raise exceptions.NotImplementedError()", "def isString(s):\r\n try:\r\n return isinstance(s, unicode) or isinstance(s, basestring)\r\n except NameError:\r\n return isinstance(s, str)", "def test_speciality_str_representation(speciality):\n assert str(speciality) == \"Electrical\"", "def is_string(space, w_obj):\n return space.wrap(w_obj.tp == space.tp_str)", "def test_string(self,address):\n t=type(address) == str\n assert t, \"not a string\"", "def test_str(self):\n self.assertEqual(str(self.john), \"J. Doe\")\n self.assertEqual(str(self.solar), \"N. Graule\")", "def assert_is_string(object_to_test):\n if( (type(object_to_test) == type(\"\")) or (type(object_to_test) == type(u\"\")) ):\n return\n logging.error(\"assert_is_string() test failed!\")\n logging.critical(repr(locals()))\n raise(ValueError)", "def string_check(param, name):\n\tif not isinstance(param, strcomp):\n\t\traise TypeError(\"Keyword arg '%s' must be of type string. Got: %s\" % (\n\t\t\tname, type(param)))\n\telse:\n\t\tpass", "def _is_unicode(arr):\n if (isinstance(arr, str) or\n issubclass(numpy.asarray(arr).dtype.type, str)):\n return True\n return False", "def stringable(self):\n return True", "def is_string(item: Any) -> bool:\n if isinstance(item, (bytes, bytearray, str)):\n return True\n elif (isinstance(item, (tuple, list)) and all(is_string(x) for x in item)):\n return True\n elif (isinstance(item, np.ndarray) and # binary or unicode\n (item.dtype.kind in (\"U\", \"S\") or item.dtype == object)):\n return True\n return False", "def test_str(self):\n self.assertEqual(str(self.subject), \"Test Subject\")", "def test_string(self):\n htype = h5t.py_create('S10')\n self.assertIsInstance(htype, h5t.TypeStringID)\n self.assertEqual(htype.get_size(), 10)", "def test_string(self):\n htype = h5t.py_create('S10', logical=True)\n self.assertIsInstance(htype, h5t.TypeStringID)\n self.assertEqual(htype.get_size(), 10)", "def test_repr(self):\n qg = ConcentricGrid(2, 3, 4)\n s = str(qg)\n assert \"ConcentricGrid\" in s\n assert \"jacobi\" in s\n assert \"L=2\" in s\n assert \"M=3\" in s\n assert \"N=4\" in s", "def test_string(self):\n htype = h5t.py_create('S1')\n self.assertIsInstance(htype, h5t.TypeStringID)\n self.assertEqual(htype.get_size(), 1)", "def test_str():\n c = Circle(4) \n assert c.__str__() == 'Circle with radius: 4.000000'", "def _is_valid_target_str(self, target):\n if isinstance(target, str):\n return True\n else:\n return False", "def testStringRepresentationOnInstantiation(self):\r\n self.assertEqual(str(self.tv), 'table')", "def is_string(value):\n try:\n basestring\n def is_string(value):\n \"\"\"Python 2 compatible implementation of is_string(value).\"\"\"\n return isinstance(value, basestring)\n except NameError:\n def is_string(value):\n \"\"\"Python 3 compatible implementation of is_string(value).\"\"\"\n return isinstance(value, (str, bytes))\n return is_string(value)", "def test_car_model_str_representation(car_model):\n assert str(car_model) == \"Golf\"", "def test_str(self):\r\n statobj = inputtypes.Status('test')\r\n self.assertEqual(str(statobj), 'test')\r\n self.assertEqual(unicode(statobj), u'test')", "def _isstr(value):\n\n if isinstance(value, string_types + (bytes,)):\n return True\n elif hasattr(value, \"__iter__\"):\n return all(_isstr(v) for v in value)\n else:\n return False", "def test_stringfield_return_text_type():\n stringify = fields.StringField().adapt\n assert isinstance(stringify(\"Hello World\"), text_type)", "def issparse(qob):\n return isinstance(qob, sp.spmatrix)", "def test_repr(self):\n fz = FourierZernikeBasis(L=6, M=3, N=0)\n s = str(fz)\n assert \"FourierZernikeBasis\" in s\n assert \"ansi\" in s\n assert \"L=6\" in s\n assert \"M=3\" in s\n assert \"N=0\" in s", "def test_str(self):\n ary = self.ar[2009]\n self.assertEqual(str(ary), '<AwstatsYear 2009: 11, 12>')", "def test_str(self, concept_embedding: ConceptEmbedding):\n # printing should not rise error\n str(concept_embedding)", "def is_valid(self, qstr):\r\n pass", "def is_my_type(type_str):\n raise NotImplementedError()", "def test_str(self):\n f66: Fraction = Fraction(6, 6)\n f24: Fraction = Fraction(2, 4)\n self.assertEqual(str(f66), \"6/6\")\n self.assertNotEqual(str(f24), \"8/6\")\n self.assertTrue(str(f24), \"2/4\")", "def test_output_type():\n assert type(song_decoder(\"WUWUBUBWUBUWUB\")) is str", "def test_str(self, string, application):\n assert string == str(application)", "def _has_str_elems(obj):\n return all([isinstance(elem, str) for elem in obj])", "def test_str4(self):\n with self.assertRaises(TypeError):\n r4 = Rectangle(1, 1, 1, 1, 1, 1, 1, 1)", "def test_compare_to_string(self):\n r = self.RNA(\"UCC\")\n self.assertEqual(r, \"UCC\")", "def test_str(self):\n step = G(Step, display_name='test1')\n self.assertEqual('test1', str(step))", "def test_str(self):\n tour = G(Tour, display_name='test1')\n self.assertEqual('test1', str(tour))", "def test_str(self):\n self.assertEqual(str(self.bioe), str(self.wbioe))\n self.assertEqual(str(self.uioe), str(self.wuioe))\n self.assertEqual(str(self.bose), str(self.wbose))\n self.assertEqual(str(self.uose), str(self.wuose))", "def check_type( string_key ) : \r\n\r\n if type( string_key ) != type( '' ) : \r\n\r\n # raise self.__class__( \"'%s': EGI wants the key to be four _characters_ (not %s) !\" % (type(string_key), ) ) \r\n raise Eggog( \"'%s': EGI wants the key to be four _characters_ (not %s) !\" % (type(string_key), ) ) \r\n \r\n else :\r\n \r\n return True", "def test_str(self):\n self.assertEqual(str(self.meter), \"1 * m\")\n\n newton = self.kgram * self.meter / (self.second ** 2)\n\n self.assertEqual(str(newton), \"1.0 * kg * m * s^-2\")", "def DataIsString(self):\n return self.data_type in (definitions.REG_SZ, definitions.REG_EXPAND_SZ)", "def test_str(self):\n faction = self.faction\n\n self.assertEqual(str(faction), self.faction_raw['name'])", "def test_str():\n post = models.Post(title=\"Test Post\")\n\n assert str(post) == post.title", "def test_vlstring_log(self):\n dt = h5t.special_dtype(vlen=str)\n htype = h5t.py_create(dt, logical=True)\n self.assertIsInstance(htype, h5t.TypeStringID)\n self.assertEqual(htype.is_variable_str(), True)\n self.assertEqual(htype.get_cset(), h5t.CSET_ASCII)\n self.assertEqual(htype.get_strpad(), h5t.STR_NULLTERM)", "def test_str_(self):\n str(self.standardcode)\n repr(self.standardcode)", "def testStringInput(self):\r\n from pydsl.Check import BNFChecker\r\n from pydsl.contrib.bnfgrammar import productionset0\r\n grammardef = productionset0\r\n checker = BNFChecker(grammardef)\r\n self.assertTrue(checker.check(\"SR\"))\r\n self.assertTrue(checker.check(\"SR\"))\r\n self.assertTrue(checker.check((\"S\",\"R\")))\r\n self.assertFalse(checker.check(\"SL\"))\r\n self.assertFalse(checker.check((\"S\",\"L\")))\r\n self.assertFalse(checker.check(\"\"))", "def test_K_str(self):\n r = Rectangle(5, 2)\n s = '[Rectangle] (1) 0/0 - 5/2'\n self.assertEqual(str(r), s)\n r = Rectangle(1, 1, 1)\n s = '[Rectangle] (2) 1/0 - 1/1'\n self.assertEqual(str(r), s)\n r = Rectangle(3, 4, 5, 6)\n s = '[Rectangle] (3) 5/6 - 3/4'\n self.assertEqual(str(r), s)\n\n Base._Base__nb_objects = 0\n r1 = Rectangle(4, 6, 2, 1, 12)\n self.assertEqual(str(r1), \"[Rectangle] (12) 2/1 - 4/6\")\n\n r2 = Rectangle(5, 5, 1)\n self.assertEqual(str(r2), \"[Rectangle] (1) 1/0 - 5/5\")", "def _isstrvar(self, index):\n return self._typlist[index] <= 244", "def test_is_hand_string_correct(self):\n self.assertEqual(self.hand.getPostCurrentHandString(), 'Three of a Kind As')", "async def test_str(\n submission_fixture: Submission,\n) -> None:\n AssertThat(str(submission_fixture)).IsInstanceOf(str)\n AssertThat(str(submission_fixture)).IsEqualTo(submission_fixture.url)", "def test_name(self):\n self.assertTrue(type(x.name) == str)", "def test_random_programming_quote(self):\n quote = Quote().print_programming_quote()\n self.assertTrue(type(quote) == str)", "def test_is_hand_string_correct(self):\n self.assertEqual(self.hand.getPostCurrentHandString(), 'Four of a Kind Ts')", "def check_statement(self, statement):\n return isinstance(statement, str)", "def test_if_str(self):\n self.assertTrue(type(self.new.email) is str)\n self.assertTrue(type(self.new.password) is str)\n self.assertTrue(type(self.new.first_name) is str)\n self.assertTrue(type(self.new.last_name) is str)", "def test_string():", "def test_node_str():\n a_left = Node(7, data='pl left')\n a_right = Node(42, data='pl right')\n a = Node(13, data='pl a', left=a_left, right=a_right)\n string_a = str(a)\n expect_string = '13'\n assert string_a == expect_string", "def _is_str_matching_builtin_type(str_value: str) -> bool:\n builtin_types = [\n getattr(builtins, d)\n for d in dir(builtins)\n if isinstance(getattr(builtins, d), type)\n ]\n return f\"<class '{str_value}'>\" in [str(bt) for bt in builtin_types]", "def _is_encodable_simple(sv):\n return sv not in (None, str(), int())", "def test_str(self):\n self.assertEqual(str(self.content), \"Test Content\")", "def test_string(self):\n\n new_jawn = Amenity()\n name = getattr(new_jawn, \"name\")\n self.assertIsInstance(name, str)", "def is_sequence_of_str(items):\n return all(isinstance(item, basestring) for item in items)" ]
[ "0.7007634", "0.6753712", "0.6680644", "0.6641429", "0.66254205", "0.66071725", "0.6594754", "0.65537024", "0.6541653", "0.6496038", "0.6468674", "0.6443504", "0.6440572", "0.6406669", "0.6398117", "0.6318649", "0.63153744", "0.6313979", "0.63125205", "0.6301631", "0.6300007", "0.62920177", "0.6290773", "0.6286347", "0.62455815", "0.62354743", "0.6229251", "0.62289184", "0.62151897", "0.62013865", "0.6199701", "0.6186818", "0.6149062", "0.61369073", "0.61357903", "0.61297506", "0.6080551", "0.6077569", "0.60651034", "0.6055003", "0.60538405", "0.60506356", "0.60346884", "0.60093415", "0.59971035", "0.598713", "0.597522", "0.5950169", "0.5928735", "0.5921458", "0.59157735", "0.58968073", "0.58945996", "0.5891567", "0.58848625", "0.58797175", "0.5877188", "0.58755374", "0.5874094", "0.5869925", "0.58493656", "0.5845363", "0.5840347", "0.5840008", "0.5834216", "0.58223665", "0.5819956", "0.57522106", "0.5740335", "0.5729923", "0.5714313", "0.57125026", "0.5711467", "0.5709861", "0.56977355", "0.56939", "0.5693831", "0.56786114", "0.567035", "0.5656646", "0.564546", "0.5632595", "0.5625999", "0.56146234", "0.56051606", "0.55851734", "0.5569352", "0.5568757", "0.55635995", "0.5562388", "0.55506235", "0.55503434", "0.5544436", "0.55440456", "0.55388796", "0.5532398", "0.5520508", "0.5518942", "0.5516842", "0.5507924", "0.5499651" ]
0.0
-1
>>> isinstance(lab10_q5(), str) True
def lab10_q4(): return """ Use 'for i in range(len(seq)//2):' to give i be the indexing for the first half of the lst. Then make sure it is equal to the opposite indexing which is [-i-1] or [len(seq)-1-i] if it is not equal return false if the for loop is done without fail it means they are all equal so return True """
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_is_str_rep_string(self):\n self.assertIsInstance(cardutils.Card(10,1).__str__(), str)", "def is_string(value):\n return isinstance(value, basestring)", "def is_str ( self, s ):\r\n\t\treturn isinstance ( s, type( str () ) )", "def isString(x):\n if type(x) == str:\n return True\n else:\n return False", "def is_str(x):\n return isinstance(x, str)", "def _is_string(arg):\n return isinstance(arg, types.StringTypes)", "def _is_string(data):\n return len(data) and isinstance(_to_ndarray(data).flat[0], str)", "def is_string(obj):\n return isinstance(obj, basestring)", "def strtype(x):\n if type(x) == str:\n return True\n if type(x) == unicode:\n return True\n return False", "def is_string(obj):\n return isinstance(obj, str)", "def test_str(self):\n s1 = Square(4, 2, 3, 47)\n self.assertEqual(str(s1), \"[Square] (47) 2/3 - 4\")", "def is_string(value):\n return isinstance(value, (str, bytes))", "def is_str(value):\n return isinstance(value, str)", "def test_str():\n # Test for string special method with scalar Rnode objects\n x = Rnode(1.0)\n try:\n assert str(x) == 'Reverse-mode Rnode Object ( Values: 1.0 )'\n except AssertionError as e:\n print(e)\n raise AssertionError", "def has_string_type(obj: _std_typing.Any) -> bool:\n return obj.dtype == sc.DType.string", "def isString(data):\n\ttry:\n\t\tfrom types import UnicodeType, StringType\n\t\tif type(data) == UnicodeType or type(data) == StringType:\n\t\t\treturn True\n\texcept ImportError:\n\t\tif type(data) == type(\"\"):\n\t\t\treturn True\n\treturn False", "def is_string(value):\n return isinstance(value, string_types)", "def test_strmethod(self):\n b1 = BaseModel()\n self.assertEqual(type(str(b1)), str)", "def is_string(atype):\n if atype == str:\n return True\n elif PY2:\n if atype == unicode:\n return True\n return False", "def is_string(val):\n return (\n isinstance(val, unicode) or \\\n isinstance(val, str) \n )", "def _check_datatype_to_string(prediction):\n if isinstance(prediction, str):\n return True\n raise TypeError('Prediction is not in string type.')", "def is_string(s):\n try:\n return isinstance(s, basestring)\n except NameError:\n return isinstance(s, str)", "def test_str_method(self):\n s1 = Square(4, 6, 2, 12)\n self.assertEqual(s1.__str__(), '[Square] (12) 6/2 - 4')", "def test_str(self):\n self.assertEqual(str(self.bs), str(self.wbs))\n self.assertEqual(str(self.be), str(self.be))\n # str(us) fails in Python 2\n self.assertEqual(str, type(str(self.wus)))\n # str(ue) fails in Python 2\n self.assertEqual(str, type(str(self.wue)))", "def canAdapt(self, stru):\n return isinstance(stru, crystal)", "def is_string(self):\n answer = self._call('is_string')\n return answer.yes", "def is_string(document):\r\n return isinstance(document, str)", "def _is_str(item):\n return isinstance(item, str)", "def test_str(self):\r\n f = lambda id_, seq, qual: len(seq) > 3\r\n s1 = 'aa'\r\n s2 = 'aaaa'\r\n sq = SeqQualBad('Q', f)\r\n self.assertEqual(sq('x', s1, [1, 2, 3]), False)\r\n self.assertEqual(str(sq), 'Q\\t0')\r\n self.assertEqual(sq('y', s2, [1, 2, 3]), True)\r\n self.assertEqual(str(sq), 'Q\\t1')", "def typeIsString(obj):\n return type(obj) is str or _haveTypeUnicode and type(obj) is unicode", "def isString(s):\r\n if not isinstance(s, util.AtomicString):\r\n return isinstance(s, basestring)\r\n return False", "def isString(s):\r\n try:\r\n return isinstance(s, unicode) or isinstance(s, basestring)\r\n except NameError:\r\n return isinstance(s, str)", "def test_string(self):\n htype = h5t.py_create('S10')\n self.assertIsInstance(htype, h5t.TypeStringID)\n self.assertEqual(htype.get_size(), 10)", "def test_string(self):\n htype = h5t.py_create('S10', logical=True)\n self.assertIsInstance(htype, h5t.TypeStringID)\n self.assertEqual(htype.get_size(), 10)", "def _is_string(self, obj):\n return isinstance(obj, unicode) or isinstance(obj, str)", "def test_string(self):\n htype = h5t.py_create('S1')\n self.assertIsInstance(htype, h5t.TypeStringID)\n self.assertEqual(htype.get_size(), 1)", "def test_string(self,address):\n t=type(address) == str\n assert t, \"not a string\"", "def is_str(value):\n if not type(value) is str:\n return False\n else:\n return True", "def check_solution(self, solution):\n return isinstance(solution, str)", "def not_a_string(obj):\n my_type = str(type(obj))\n if is_py3():\n is_str = my_type.find('bytes') < 0 and my_type.find('str') < 0\n return is_str\n\n return my_type.find('str') < 0 and \\\n my_type.find('unicode') < 0", "def assert_is_string(object_to_test):\n if( (type(object_to_test) == type(\"\")) or (type(object_to_test) == type(u\"\")) ):\n return\n logging.error(\"assert_is_string() test failed!\")\n logging.critical(repr(locals()))\n raise(ValueError)", "def is_string(value):\n try:\n basestring\n def is_string(value):\n \"\"\"Python 2 compatible implementation of is_string(value).\"\"\"\n return isinstance(value, basestring)\n except NameError:\n def is_string(value):\n \"\"\"Python 3 compatible implementation of is_string(value).\"\"\"\n return isinstance(value, (str, bytes))\n return is_string(value)", "def is_string_type(self):\n raise exceptions.NotImplementedError()", "def test_str(self):\n arm = self.ar[2009][11]\n self.assertEqual(str(arm), '<AwstatsMonth 2009-11>')", "def is_string(space, w_obj):\n return space.wrap(w_obj.tp == space.tp_str)", "def is_string(item: Any) -> bool:\n if isinstance(item, (bytes, bytearray, str)):\n return True\n elif (isinstance(item, (tuple, list)) and all(is_string(x) for x in item)):\n return True\n elif (isinstance(item, np.ndarray) and # binary or unicode\n (item.dtype.kind in (\"U\", \"S\") or item.dtype == object)):\n return True\n return False", "def test_speciality_str_representation(speciality):\n assert str(speciality) == \"Electrical\"", "def _is_unicode(arr):\n if (isinstance(arr, str) or\n issubclass(numpy.asarray(arr).dtype.type, str)):\n return True\n return False", "def test___str__(self):\n self.assertEqual(\n str(self.mineral),\n 'mineralxy')", "def string_check(param, name):\n\tif not isinstance(param, strcomp):\n\t\traise TypeError(\"Keyword arg '%s' must be of type string. Got: %s\" % (\n\t\t\tname, type(param)))\n\telse:\n\t\tpass", "def test_str(self, r, rep):\n assert str(r) == rep", "def _isstr(value):\n\n if isinstance(value, string_types + (bytes,)):\n return True\n elif hasattr(value, \"__iter__\"):\n return all(_isstr(v) for v in value)\n else:\n return False", "def test_str(self):\n self.assertEqual(str(self.john), \"J. Doe\")\n self.assertEqual(str(self.solar), \"N. Graule\")", "def test_repr(self):\n qg = ConcentricGrid(2, 3, 4)\n s = str(qg)\n assert \"ConcentricGrid\" in s\n assert \"jacobi\" in s\n assert \"L=2\" in s\n assert \"M=3\" in s\n assert \"N=4\" in s", "def stringable(self):\n return True", "def issparse(qob):\n return isinstance(qob, sp.spmatrix)", "def test_str(self, string, application):\n assert string == str(application)", "def test_vlstring_log(self):\n dt = h5t.special_dtype(vlen=str)\n htype = h5t.py_create(dt, logical=True)\n self.assertIsInstance(htype, h5t.TypeStringID)\n self.assertEqual(htype.is_variable_str(), True)\n self.assertEqual(htype.get_cset(), h5t.CSET_ASCII)\n self.assertEqual(htype.get_strpad(), h5t.STR_NULLTERM)", "def test_stringfield_return_text_type():\n stringify = fields.StringField().adapt\n assert isinstance(stringify(\"Hello World\"), text_type)", "def testStringRepresentationOnInstantiation(self):\r\n self.assertEqual(str(self.tv), 'table')", "def test_str(self):\r\n statobj = inputtypes.Status('test')\r\n self.assertEqual(str(statobj), 'test')\r\n self.assertEqual(unicode(statobj), u'test')", "def test_repr(self):\n fz = FourierZernikeBasis(L=6, M=3, N=0)\n s = str(fz)\n assert \"FourierZernikeBasis\" in s\n assert \"ansi\" in s\n assert \"L=6\" in s\n assert \"M=3\" in s\n assert \"N=0\" in s", "def test_str(self):\n self.assertEqual(str(self.subject), \"Test Subject\")", "def test_car_model_str_representation(car_model):\n assert str(car_model) == \"Golf\"", "def _is_valid_target_str(self, target):\n if isinstance(target, str):\n return True\n else:\n return False", "def is_my_type(type_str):\n raise NotImplementedError()", "def test_str():\n c = Circle(4) \n assert c.__str__() == 'Circle with radius: 4.000000'", "def test_compare_to_string(self):\n r = self.RNA(\"UCC\")\n self.assertEqual(r, \"UCC\")", "def _is_str_matching_builtin_type(str_value: str) -> bool:\n builtin_types = [\n getattr(builtins, d)\n for d in dir(builtins)\n if isinstance(getattr(builtins, d), type)\n ]\n return f\"<class '{str_value}'>\" in [str(bt) for bt in builtin_types]", "def testStringInput(self):\r\n from pydsl.Check import BNFChecker\r\n from pydsl.contrib.bnfgrammar import productionset0\r\n grammardef = productionset0\r\n checker = BNFChecker(grammardef)\r\n self.assertTrue(checker.check(\"SR\"))\r\n self.assertTrue(checker.check(\"SR\"))\r\n self.assertTrue(checker.check((\"S\",\"R\")))\r\n self.assertFalse(checker.check(\"SL\"))\r\n self.assertFalse(checker.check((\"S\",\"L\")))\r\n self.assertFalse(checker.check(\"\"))", "def test_str(self):\n tour = G(Tour, display_name='test1')\n self.assertEqual('test1', str(tour))", "def test_output_type():\n assert type(song_decoder(\"WUWUBUBWUBUWUB\")) is str", "def test_str(self, concept_embedding: ConceptEmbedding):\n # printing should not rise error\n str(concept_embedding)", "def test_str(self):\n self.assertEqual(str(self.bioe), str(self.wbioe))\n self.assertEqual(str(self.uioe), str(self.wuioe))\n self.assertEqual(str(self.bose), str(self.wbose))\n self.assertEqual(str(self.uose), str(self.wuose))", "def test_str(self):\n step = G(Step, display_name='test1')\n self.assertEqual('test1', str(step))", "def test_str_type(self):\n\n expected = TypeError\n input_ = 'c'\n with self.assertRaises(expected):\n math.factorial(input_)", "def test_is_hand_string_correct(self):\n self.assertEqual(self.hand.getPostCurrentHandString(), 'Three of a Kind As')", "def test_str(self):\n self.assertEqual(str(self.meter), \"1 * m\")\n\n newton = self.kgram * self.meter / (self.second ** 2)\n\n self.assertEqual(str(newton), \"1.0 * kg * m * s^-2\")", "def test_str(self):\n ary = self.ar[2009]\n self.assertEqual(str(ary), '<AwstatsYear 2009: 11, 12>')", "def test_K_str(self):\n r = Rectangle(5, 2)\n s = '[Rectangle] (1) 0/0 - 5/2'\n self.assertEqual(str(r), s)\n r = Rectangle(1, 1, 1)\n s = '[Rectangle] (2) 1/0 - 1/1'\n self.assertEqual(str(r), s)\n r = Rectangle(3, 4, 5, 6)\n s = '[Rectangle] (3) 5/6 - 3/4'\n self.assertEqual(str(r), s)\n\n Base._Base__nb_objects = 0\n r1 = Rectangle(4, 6, 2, 1, 12)\n self.assertEqual(str(r1), \"[Rectangle] (12) 2/1 - 4/6\")\n\n r2 = Rectangle(5, 5, 1)\n self.assertEqual(str(r2), \"[Rectangle] (1) 1/0 - 5/5\")", "def str_check(*args, func=None):\n func = func or inspect.stack()[2][3]\n for var in args:\n if not isinstance(var, (str, collections.UserString, collections.abc.Sequence)):\n name = type(var).__name__\n raise StringError(\n 'Function {} expected str, {} got instead.'.format(func, name))", "def is_valid(self, qstr):\r\n pass", "def DataIsString(self):\n return self.data_type in (definitions.REG_SZ, definitions.REG_EXPAND_SZ)", "def test_string():", "def test_str(self):\n f66: Fraction = Fraction(6, 6)\n f24: Fraction = Fraction(2, 4)\n self.assertEqual(str(f66), \"6/6\")\n self.assertNotEqual(str(f24), \"8/6\")\n self.assertTrue(str(f24), \"2/4\")", "def test_str_(self):\n str(self.standardcode)\n repr(self.standardcode)", "def _has_str_elems(obj):\n return all([isinstance(elem, str) for elem in obj])", "def test_str():\n post = models.Post(title=\"Test Post\")\n\n assert str(post) == post.title", "def test_is_hand_string_correct(self):\n self.assertEqual(self.hand.getPostCurrentHandString(), 'Four of a Kind Ts')", "def check_statement(self, statement):\n return isinstance(statement, str)", "def _is_proper_sequence(seq):\n return (isinstance(seq, collections.abc.Sequence) and\n not isinstance(seq, str))", "def test_native_str(self):\n if PY2:\n import __builtin__\n builtin_str = __builtin__.str\n else:\n import builtins\n builtin_str = builtins.str\n\n inputs = [b'blah', u'blah', 'blah']\n for s in inputs:\n self.assertEqual(native_str(s), builtin_str(s))\n self.assertTrue(isinstance(native_str(s), builtin_str))", "def test_str(self):\n faction = self.faction\n\n self.assertEqual(str(faction), self.faction_raw['name'])", "def test_if_it_accepts_string_datatype(self):\n with self.assertRaises(TypeError):\n prime_numbers(\"string\")", "def is_tuple_consists_of_strings(an_array):\n return isinstance(an_array, tuple) and is_array_type(an_array, str)", "def check_type( string_key ) : \r\n\r\n if type( string_key ) != type( '' ) : \r\n\r\n # raise self.__class__( \"'%s': EGI wants the key to be four _characters_ (not %s) !\" % (type(string_key), ) ) \r\n raise Eggog( \"'%s': EGI wants the key to be four _characters_ (not %s) !\" % (type(string_key), ) ) \r\n \r\n else :\r\n \r\n return True", "def check_type(s: pd.Series) -> Tuple[bool, str]:\n\n error_string = (\n \"should be TextSeries: the input Series should consist only of strings in every cell.\"\n \" See help(hero.HeroTypes) for more information.\"\n )\n\n try:\n first_non_nan_value = s.loc[s.first_valid_index()]\n if not isinstance(first_non_nan_value, str):\n return False, error_string\n except KeyError: # Only NaNs in Series -> same warning applies\n return False, error_string\n\n return True, \"\"", "def is_action_str(string: str) -> bool:", "async def test_str(\n submission_fixture: Submission,\n) -> None:\n AssertThat(str(submission_fixture)).IsInstanceOf(str)\n AssertThat(str(submission_fixture)).IsEqualTo(submission_fixture.url)", "def test_computer_string(self):\n # Computer to use for the network\n comp = \"1q-qvm\"\n\n # Get a network with the computer\n qnn = self.get_test_network(comp)\n\n # Checks\n self.assertEqual(type(qnn.computer), QuantumComputer)", "def test_node_str():\n a_left = Node(7, data='pl left')\n a_right = Node(42, data='pl right')\n a = Node(13, data='pl a', left=a_left, right=a_right)\n string_a = str(a)\n expect_string = '13'\n assert string_a == expect_string" ]
[ "0.69396424", "0.6750825", "0.67340475", "0.6716429", "0.6714517", "0.66726655", "0.665966", "0.66213614", "0.6544444", "0.64893436", "0.64751285", "0.64728814", "0.64611757", "0.6450565", "0.6434197", "0.6419371", "0.6414928", "0.638308", "0.6381245", "0.6368124", "0.6322138", "0.6312567", "0.6292983", "0.6271539", "0.62687546", "0.62655073", "0.6261044", "0.6252003", "0.624323", "0.6241017", "0.6237343", "0.623536", "0.6206511", "0.617976", "0.61767673", "0.61728776", "0.61696684", "0.61687195", "0.616169", "0.61500466", "0.6137914", "0.6108411", "0.6099802", "0.6080076", "0.60794014", "0.60674417", "0.6065302", "0.60556436", "0.60532516", "0.6020806", "0.5969043", "0.5947046", "0.5906997", "0.58946514", "0.58786213", "0.5875994", "0.5872118", "0.58658177", "0.58604616", "0.58589816", "0.5847253", "0.58260643", "0.58062613", "0.58053106", "0.5796051", "0.5787008", "0.57829875", "0.57623637", "0.57608986", "0.5728306", "0.5714183", "0.5683523", "0.5681079", "0.56782496", "0.56736964", "0.5673529", "0.5663031", "0.5662918", "0.5658129", "0.56486815", "0.5643716", "0.56409466", "0.5639037", "0.56359065", "0.56325954", "0.5625592", "0.561705", "0.5606504", "0.5602089", "0.55990833", "0.55930835", "0.5581659", "0.5581574", "0.5579207", "0.5579032", "0.55729824", "0.5566125", "0.5550791", "0.55416685", "0.55412173", "0.55392194" ]
0.0
-1
>>> isinstance(lab10_q5(), str) True
def lab10_q5(): return """ 'assert type(c) is int' to make sure c is a number/integer make a helper function to solve this with the same parameters first base case is if the count is 0 which returns Link.empty if true Then is when lst is Link.empty, which should just Link.empty as well recursion for these base cases where you just link the first with the helper(rest, c) :Link(lst.first, helper(lst.rest, count)) """
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_is_str_rep_string(self):\n self.assertIsInstance(cardutils.Card(10,1).__str__(), str)", "def is_string(value):\n return isinstance(value, basestring)", "def is_str ( self, s ):\r\n\t\treturn isinstance ( s, type( str () ) )", "def isString(x):\n if type(x) == str:\n return True\n else:\n return False", "def is_str(x):\n return isinstance(x, str)", "def _is_string(arg):\n return isinstance(arg, types.StringTypes)", "def _is_string(data):\n return len(data) and isinstance(_to_ndarray(data).flat[0], str)", "def is_string(obj):\n return isinstance(obj, basestring)", "def strtype(x):\n if type(x) == str:\n return True\n if type(x) == unicode:\n return True\n return False", "def is_string(obj):\n return isinstance(obj, str)", "def test_str(self):\n s1 = Square(4, 2, 3, 47)\n self.assertEqual(str(s1), \"[Square] (47) 2/3 - 4\")", "def is_string(value):\n return isinstance(value, (str, bytes))", "def is_str(value):\n return isinstance(value, str)", "def test_str():\n # Test for string special method with scalar Rnode objects\n x = Rnode(1.0)\n try:\n assert str(x) == 'Reverse-mode Rnode Object ( Values: 1.0 )'\n except AssertionError as e:\n print(e)\n raise AssertionError", "def has_string_type(obj: _std_typing.Any) -> bool:\n return obj.dtype == sc.DType.string", "def isString(data):\n\ttry:\n\t\tfrom types import UnicodeType, StringType\n\t\tif type(data) == UnicodeType or type(data) == StringType:\n\t\t\treturn True\n\texcept ImportError:\n\t\tif type(data) == type(\"\"):\n\t\t\treturn True\n\treturn False", "def is_string(value):\n return isinstance(value, string_types)", "def is_string(atype):\n if atype == str:\n return True\n elif PY2:\n if atype == unicode:\n return True\n return False", "def test_strmethod(self):\n b1 = BaseModel()\n self.assertEqual(type(str(b1)), str)", "def is_string(val):\n return (\n isinstance(val, unicode) or \\\n isinstance(val, str) \n )", "def _check_datatype_to_string(prediction):\n if isinstance(prediction, str):\n return True\n raise TypeError('Prediction is not in string type.')", "def is_string(s):\n try:\n return isinstance(s, basestring)\n except NameError:\n return isinstance(s, str)", "def test_str_method(self):\n s1 = Square(4, 6, 2, 12)\n self.assertEqual(s1.__str__(), '[Square] (12) 6/2 - 4')", "def test_str(self):\n self.assertEqual(str(self.bs), str(self.wbs))\n self.assertEqual(str(self.be), str(self.be))\n # str(us) fails in Python 2\n self.assertEqual(str, type(str(self.wus)))\n # str(ue) fails in Python 2\n self.assertEqual(str, type(str(self.wue)))", "def canAdapt(self, stru):\n return isinstance(stru, crystal)", "def is_string(self):\n answer = self._call('is_string')\n return answer.yes", "def is_string(document):\r\n return isinstance(document, str)", "def _is_str(item):\n return isinstance(item, str)", "def test_str(self):\r\n f = lambda id_, seq, qual: len(seq) > 3\r\n s1 = 'aa'\r\n s2 = 'aaaa'\r\n sq = SeqQualBad('Q', f)\r\n self.assertEqual(sq('x', s1, [1, 2, 3]), False)\r\n self.assertEqual(str(sq), 'Q\\t0')\r\n self.assertEqual(sq('y', s2, [1, 2, 3]), True)\r\n self.assertEqual(str(sq), 'Q\\t1')", "def typeIsString(obj):\n return type(obj) is str or _haveTypeUnicode and type(obj) is unicode", "def isString(s):\r\n if not isinstance(s, util.AtomicString):\r\n return isinstance(s, basestring)\r\n return False", "def isString(s):\r\n try:\r\n return isinstance(s, unicode) or isinstance(s, basestring)\r\n except NameError:\r\n return isinstance(s, str)", "def test_string(self):\n htype = h5t.py_create('S10')\n self.assertIsInstance(htype, h5t.TypeStringID)\n self.assertEqual(htype.get_size(), 10)", "def test_string(self):\n htype = h5t.py_create('S10', logical=True)\n self.assertIsInstance(htype, h5t.TypeStringID)\n self.assertEqual(htype.get_size(), 10)", "def _is_string(self, obj):\n return isinstance(obj, unicode) or isinstance(obj, str)", "def test_string(self):\n htype = h5t.py_create('S1')\n self.assertIsInstance(htype, h5t.TypeStringID)\n self.assertEqual(htype.get_size(), 1)", "def is_str(value):\n if not type(value) is str:\n return False\n else:\n return True", "def test_string(self,address):\n t=type(address) == str\n assert t, \"not a string\"", "def check_solution(self, solution):\n return isinstance(solution, str)", "def not_a_string(obj):\n my_type = str(type(obj))\n if is_py3():\n is_str = my_type.find('bytes') < 0 and my_type.find('str') < 0\n return is_str\n\n return my_type.find('str') < 0 and \\\n my_type.find('unicode') < 0", "def assert_is_string(object_to_test):\n if( (type(object_to_test) == type(\"\")) or (type(object_to_test) == type(u\"\")) ):\n return\n logging.error(\"assert_is_string() test failed!\")\n logging.critical(repr(locals()))\n raise(ValueError)", "def is_string(value):\n try:\n basestring\n def is_string(value):\n \"\"\"Python 2 compatible implementation of is_string(value).\"\"\"\n return isinstance(value, basestring)\n except NameError:\n def is_string(value):\n \"\"\"Python 3 compatible implementation of is_string(value).\"\"\"\n return isinstance(value, (str, bytes))\n return is_string(value)", "def is_string_type(self):\n raise exceptions.NotImplementedError()", "def is_string(space, w_obj):\n return space.wrap(w_obj.tp == space.tp_str)", "def test_str(self):\n arm = self.ar[2009][11]\n self.assertEqual(str(arm), '<AwstatsMonth 2009-11>')", "def is_string(item: Any) -> bool:\n if isinstance(item, (bytes, bytearray, str)):\n return True\n elif (isinstance(item, (tuple, list)) and all(is_string(x) for x in item)):\n return True\n elif (isinstance(item, np.ndarray) and # binary or unicode\n (item.dtype.kind in (\"U\", \"S\") or item.dtype == object)):\n return True\n return False", "def test_speciality_str_representation(speciality):\n assert str(speciality) == \"Electrical\"", "def _is_unicode(arr):\n if (isinstance(arr, str) or\n issubclass(numpy.asarray(arr).dtype.type, str)):\n return True\n return False", "def test___str__(self):\n self.assertEqual(\n str(self.mineral),\n 'mineralxy')", "def string_check(param, name):\n\tif not isinstance(param, strcomp):\n\t\traise TypeError(\"Keyword arg '%s' must be of type string. Got: %s\" % (\n\t\t\tname, type(param)))\n\telse:\n\t\tpass", "def test_str(self, r, rep):\n assert str(r) == rep", "def _isstr(value):\n\n if isinstance(value, string_types + (bytes,)):\n return True\n elif hasattr(value, \"__iter__\"):\n return all(_isstr(v) for v in value)\n else:\n return False", "def test_str(self):\n self.assertEqual(str(self.john), \"J. Doe\")\n self.assertEqual(str(self.solar), \"N. Graule\")", "def test_repr(self):\n qg = ConcentricGrid(2, 3, 4)\n s = str(qg)\n assert \"ConcentricGrid\" in s\n assert \"jacobi\" in s\n assert \"L=2\" in s\n assert \"M=3\" in s\n assert \"N=4\" in s", "def stringable(self):\n return True", "def issparse(qob):\n return isinstance(qob, sp.spmatrix)", "def test_str(self, string, application):\n assert string == str(application)", "def test_vlstring_log(self):\n dt = h5t.special_dtype(vlen=str)\n htype = h5t.py_create(dt, logical=True)\n self.assertIsInstance(htype, h5t.TypeStringID)\n self.assertEqual(htype.is_variable_str(), True)\n self.assertEqual(htype.get_cset(), h5t.CSET_ASCII)\n self.assertEqual(htype.get_strpad(), h5t.STR_NULLTERM)", "def test_stringfield_return_text_type():\n stringify = fields.StringField().adapt\n assert isinstance(stringify(\"Hello World\"), text_type)", "def testStringRepresentationOnInstantiation(self):\r\n self.assertEqual(str(self.tv), 'table')", "def test_str(self):\r\n statobj = inputtypes.Status('test')\r\n self.assertEqual(str(statobj), 'test')\r\n self.assertEqual(unicode(statobj), u'test')", "def test_repr(self):\n fz = FourierZernikeBasis(L=6, M=3, N=0)\n s = str(fz)\n assert \"FourierZernikeBasis\" in s\n assert \"ansi\" in s\n assert \"L=6\" in s\n assert \"M=3\" in s\n assert \"N=0\" in s", "def test_str(self):\n self.assertEqual(str(self.subject), \"Test Subject\")", "def test_car_model_str_representation(car_model):\n assert str(car_model) == \"Golf\"", "def _is_valid_target_str(self, target):\n if isinstance(target, str):\n return True\n else:\n return False", "def is_my_type(type_str):\n raise NotImplementedError()", "def test_str():\n c = Circle(4) \n assert c.__str__() == 'Circle with radius: 4.000000'", "def _is_str_matching_builtin_type(str_value: str) -> bool:\n builtin_types = [\n getattr(builtins, d)\n for d in dir(builtins)\n if isinstance(getattr(builtins, d), type)\n ]\n return f\"<class '{str_value}'>\" in [str(bt) for bt in builtin_types]", "def test_compare_to_string(self):\n r = self.RNA(\"UCC\")\n self.assertEqual(r, \"UCC\")", "def testStringInput(self):\r\n from pydsl.Check import BNFChecker\r\n from pydsl.contrib.bnfgrammar import productionset0\r\n grammardef = productionset0\r\n checker = BNFChecker(grammardef)\r\n self.assertTrue(checker.check(\"SR\"))\r\n self.assertTrue(checker.check(\"SR\"))\r\n self.assertTrue(checker.check((\"S\",\"R\")))\r\n self.assertFalse(checker.check(\"SL\"))\r\n self.assertFalse(checker.check((\"S\",\"L\")))\r\n self.assertFalse(checker.check(\"\"))", "def test_str(self):\n tour = G(Tour, display_name='test1')\n self.assertEqual('test1', str(tour))", "def test_output_type():\n assert type(song_decoder(\"WUWUBUBWUBUWUB\")) is str", "def test_str(self, concept_embedding: ConceptEmbedding):\n # printing should not rise error\n str(concept_embedding)", "def test_str(self):\n self.assertEqual(str(self.bioe), str(self.wbioe))\n self.assertEqual(str(self.uioe), str(self.wuioe))\n self.assertEqual(str(self.bose), str(self.wbose))\n self.assertEqual(str(self.uose), str(self.wuose))", "def test_str(self):\n step = G(Step, display_name='test1')\n self.assertEqual('test1', str(step))", "def test_str_type(self):\n\n expected = TypeError\n input_ = 'c'\n with self.assertRaises(expected):\n math.factorial(input_)", "def test_is_hand_string_correct(self):\n self.assertEqual(self.hand.getPostCurrentHandString(), 'Three of a Kind As')", "def test_str(self):\n self.assertEqual(str(self.meter), \"1 * m\")\n\n newton = self.kgram * self.meter / (self.second ** 2)\n\n self.assertEqual(str(newton), \"1.0 * kg * m * s^-2\")", "def test_str(self):\n ary = self.ar[2009]\n self.assertEqual(str(ary), '<AwstatsYear 2009: 11, 12>')", "def test_K_str(self):\n r = Rectangle(5, 2)\n s = '[Rectangle] (1) 0/0 - 5/2'\n self.assertEqual(str(r), s)\n r = Rectangle(1, 1, 1)\n s = '[Rectangle] (2) 1/0 - 1/1'\n self.assertEqual(str(r), s)\n r = Rectangle(3, 4, 5, 6)\n s = '[Rectangle] (3) 5/6 - 3/4'\n self.assertEqual(str(r), s)\n\n Base._Base__nb_objects = 0\n r1 = Rectangle(4, 6, 2, 1, 12)\n self.assertEqual(str(r1), \"[Rectangle] (12) 2/1 - 4/6\")\n\n r2 = Rectangle(5, 5, 1)\n self.assertEqual(str(r2), \"[Rectangle] (1) 1/0 - 5/5\")", "def str_check(*args, func=None):\n func = func or inspect.stack()[2][3]\n for var in args:\n if not isinstance(var, (str, collections.UserString, collections.abc.Sequence)):\n name = type(var).__name__\n raise StringError(\n 'Function {} expected str, {} got instead.'.format(func, name))", "def is_valid(self, qstr):\r\n pass", "def DataIsString(self):\n return self.data_type in (definitions.REG_SZ, definitions.REG_EXPAND_SZ)", "def test_string():", "def test_str(self):\n f66: Fraction = Fraction(6, 6)\n f24: Fraction = Fraction(2, 4)\n self.assertEqual(str(f66), \"6/6\")\n self.assertNotEqual(str(f24), \"8/6\")\n self.assertTrue(str(f24), \"2/4\")", "def test_str_(self):\n str(self.standardcode)\n repr(self.standardcode)", "def _has_str_elems(obj):\n return all([isinstance(elem, str) for elem in obj])", "def test_str():\n post = models.Post(title=\"Test Post\")\n\n assert str(post) == post.title", "def test_is_hand_string_correct(self):\n self.assertEqual(self.hand.getPostCurrentHandString(), 'Four of a Kind Ts')", "def check_statement(self, statement):\n return isinstance(statement, str)", "def _is_proper_sequence(seq):\n return (isinstance(seq, collections.abc.Sequence) and\n not isinstance(seq, str))", "def test_str(self):\n faction = self.faction\n\n self.assertEqual(str(faction), self.faction_raw['name'])", "def test_native_str(self):\n if PY2:\n import __builtin__\n builtin_str = __builtin__.str\n else:\n import builtins\n builtin_str = builtins.str\n\n inputs = [b'blah', u'blah', 'blah']\n for s in inputs:\n self.assertEqual(native_str(s), builtin_str(s))\n self.assertTrue(isinstance(native_str(s), builtin_str))", "def is_tuple_consists_of_strings(an_array):\n return isinstance(an_array, tuple) and is_array_type(an_array, str)", "def test_if_it_accepts_string_datatype(self):\n with self.assertRaises(TypeError):\n prime_numbers(\"string\")", "def check_type( string_key ) : \r\n\r\n if type( string_key ) != type( '' ) : \r\n\r\n # raise self.__class__( \"'%s': EGI wants the key to be four _characters_ (not %s) !\" % (type(string_key), ) ) \r\n raise Eggog( \"'%s': EGI wants the key to be four _characters_ (not %s) !\" % (type(string_key), ) ) \r\n \r\n else :\r\n \r\n return True", "def check_type(s: pd.Series) -> Tuple[bool, str]:\n\n error_string = (\n \"should be TextSeries: the input Series should consist only of strings in every cell.\"\n \" See help(hero.HeroTypes) for more information.\"\n )\n\n try:\n first_non_nan_value = s.loc[s.first_valid_index()]\n if not isinstance(first_non_nan_value, str):\n return False, error_string\n except KeyError: # Only NaNs in Series -> same warning applies\n return False, error_string\n\n return True, \"\"", "def is_action_str(string: str) -> bool:", "async def test_str(\n submission_fixture: Submission,\n) -> None:\n AssertThat(str(submission_fixture)).IsInstanceOf(str)\n AssertThat(str(submission_fixture)).IsEqualTo(submission_fixture.url)", "def test_computer_string(self):\n # Computer to use for the network\n comp = \"1q-qvm\"\n\n # Get a network with the computer\n qnn = self.get_test_network(comp)\n\n # Checks\n self.assertEqual(type(qnn.computer), QuantumComputer)", "def test_node_str():\n a_left = Node(7, data='pl left')\n a_right = Node(42, data='pl right')\n a = Node(13, data='pl a', left=a_left, right=a_right)\n string_a = str(a)\n expect_string = '13'\n assert string_a == expect_string" ]
[ "0.69393235", "0.67513615", "0.6735242", "0.67170364", "0.67146415", "0.6673512", "0.6659964", "0.6621408", "0.6545047", "0.6489594", "0.64740264", "0.64732605", "0.64615583", "0.6448504", "0.64349174", "0.6421704", "0.64157605", "0.63823795", "0.6382036", "0.6368499", "0.6321071", "0.6313761", "0.6291852", "0.6271569", "0.6268804", "0.6267225", "0.62614685", "0.6252008", "0.6242975", "0.6241558", "0.6238892", "0.62366533", "0.6206673", "0.6179889", "0.6176907", "0.61733073", "0.61697763", "0.6169568", "0.61612535", "0.6149985", "0.61378545", "0.61091757", "0.6101296", "0.6079802", "0.6079702", "0.60676944", "0.60643065", "0.60563236", "0.6053264", "0.60218835", "0.59682316", "0.59471756", "0.5907225", "0.5892694", "0.588037", "0.5874825", "0.58725333", "0.5866566", "0.5860099", "0.5858301", "0.5846368", "0.58244693", "0.58064395", "0.580461", "0.5798476", "0.5787747", "0.57814145", "0.57624435", "0.57618505", "0.57302254", "0.5713295", "0.56836843", "0.5679657", "0.5678511", "0.56724983", "0.56724846", "0.56642467", "0.56626314", "0.56572026", "0.5647612", "0.5644753", "0.56425864", "0.56404024", "0.56356925", "0.5631153", "0.5625176", "0.56178653", "0.56064487", "0.56033474", "0.55996835", "0.5593114", "0.55822796", "0.5581637", "0.5580511", "0.55783045", "0.5574546", "0.55657446", "0.5553186", "0.5541458", "0.55413723", "0.55375147" ]
0.0
-1
>>> isinstance(lab10_q5(), str) True
def lab10_q6(): return """ Use try: return dictionary[key] so that if there is a key and value the value will be the output. make except Keyerror where it prints "Avoid Exception" and the new value for this key will be 'no value' next time it is called. It will output 'no value' """
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_is_str_rep_string(self):\n self.assertIsInstance(cardutils.Card(10,1).__str__(), str)", "def is_string(value):\n return isinstance(value, basestring)", "def is_str ( self, s ):\r\n\t\treturn isinstance ( s, type( str () ) )", "def isString(x):\n if type(x) == str:\n return True\n else:\n return False", "def is_str(x):\n return isinstance(x, str)", "def _is_string(arg):\n return isinstance(arg, types.StringTypes)", "def _is_string(data):\n return len(data) and isinstance(_to_ndarray(data).flat[0], str)", "def is_string(obj):\n return isinstance(obj, basestring)", "def strtype(x):\n if type(x) == str:\n return True\n if type(x) == unicode:\n return True\n return False", "def is_string(obj):\n return isinstance(obj, str)", "def test_str(self):\n s1 = Square(4, 2, 3, 47)\n self.assertEqual(str(s1), \"[Square] (47) 2/3 - 4\")", "def is_string(value):\n return isinstance(value, (str, bytes))", "def is_str(value):\n return isinstance(value, str)", "def test_str():\n # Test for string special method with scalar Rnode objects\n x = Rnode(1.0)\n try:\n assert str(x) == 'Reverse-mode Rnode Object ( Values: 1.0 )'\n except AssertionError as e:\n print(e)\n raise AssertionError", "def has_string_type(obj: _std_typing.Any) -> bool:\n return obj.dtype == sc.DType.string", "def isString(data):\n\ttry:\n\t\tfrom types import UnicodeType, StringType\n\t\tif type(data) == UnicodeType or type(data) == StringType:\n\t\t\treturn True\n\texcept ImportError:\n\t\tif type(data) == type(\"\"):\n\t\t\treturn True\n\treturn False", "def is_string(value):\n return isinstance(value, string_types)", "def is_string(atype):\n if atype == str:\n return True\n elif PY2:\n if atype == unicode:\n return True\n return False", "def test_strmethod(self):\n b1 = BaseModel()\n self.assertEqual(type(str(b1)), str)", "def is_string(val):\n return (\n isinstance(val, unicode) or \\\n isinstance(val, str) \n )", "def _check_datatype_to_string(prediction):\n if isinstance(prediction, str):\n return True\n raise TypeError('Prediction is not in string type.')", "def is_string(s):\n try:\n return isinstance(s, basestring)\n except NameError:\n return isinstance(s, str)", "def test_str_method(self):\n s1 = Square(4, 6, 2, 12)\n self.assertEqual(s1.__str__(), '[Square] (12) 6/2 - 4')", "def test_str(self):\n self.assertEqual(str(self.bs), str(self.wbs))\n self.assertEqual(str(self.be), str(self.be))\n # str(us) fails in Python 2\n self.assertEqual(str, type(str(self.wus)))\n # str(ue) fails in Python 2\n self.assertEqual(str, type(str(self.wue)))", "def canAdapt(self, stru):\n return isinstance(stru, crystal)", "def is_string(self):\n answer = self._call('is_string')\n return answer.yes", "def is_string(document):\r\n return isinstance(document, str)", "def _is_str(item):\n return isinstance(item, str)", "def test_str(self):\r\n f = lambda id_, seq, qual: len(seq) > 3\r\n s1 = 'aa'\r\n s2 = 'aaaa'\r\n sq = SeqQualBad('Q', f)\r\n self.assertEqual(sq('x', s1, [1, 2, 3]), False)\r\n self.assertEqual(str(sq), 'Q\\t0')\r\n self.assertEqual(sq('y', s2, [1, 2, 3]), True)\r\n self.assertEqual(str(sq), 'Q\\t1')", "def typeIsString(obj):\n return type(obj) is str or _haveTypeUnicode and type(obj) is unicode", "def isString(s):\r\n if not isinstance(s, util.AtomicString):\r\n return isinstance(s, basestring)\r\n return False", "def isString(s):\r\n try:\r\n return isinstance(s, unicode) or isinstance(s, basestring)\r\n except NameError:\r\n return isinstance(s, str)", "def test_string(self):\n htype = h5t.py_create('S10')\n self.assertIsInstance(htype, h5t.TypeStringID)\n self.assertEqual(htype.get_size(), 10)", "def test_string(self):\n htype = h5t.py_create('S10', logical=True)\n self.assertIsInstance(htype, h5t.TypeStringID)\n self.assertEqual(htype.get_size(), 10)", "def _is_string(self, obj):\n return isinstance(obj, unicode) or isinstance(obj, str)", "def test_string(self):\n htype = h5t.py_create('S1')\n self.assertIsInstance(htype, h5t.TypeStringID)\n self.assertEqual(htype.get_size(), 1)", "def is_str(value):\n if not type(value) is str:\n return False\n else:\n return True", "def test_string(self,address):\n t=type(address) == str\n assert t, \"not a string\"", "def check_solution(self, solution):\n return isinstance(solution, str)", "def not_a_string(obj):\n my_type = str(type(obj))\n if is_py3():\n is_str = my_type.find('bytes') < 0 and my_type.find('str') < 0\n return is_str\n\n return my_type.find('str') < 0 and \\\n my_type.find('unicode') < 0", "def assert_is_string(object_to_test):\n if( (type(object_to_test) == type(\"\")) or (type(object_to_test) == type(u\"\")) ):\n return\n logging.error(\"assert_is_string() test failed!\")\n logging.critical(repr(locals()))\n raise(ValueError)", "def is_string(value):\n try:\n basestring\n def is_string(value):\n \"\"\"Python 2 compatible implementation of is_string(value).\"\"\"\n return isinstance(value, basestring)\n except NameError:\n def is_string(value):\n \"\"\"Python 3 compatible implementation of is_string(value).\"\"\"\n return isinstance(value, (str, bytes))\n return is_string(value)", "def is_string_type(self):\n raise exceptions.NotImplementedError()", "def is_string(space, w_obj):\n return space.wrap(w_obj.tp == space.tp_str)", "def test_str(self):\n arm = self.ar[2009][11]\n self.assertEqual(str(arm), '<AwstatsMonth 2009-11>')", "def is_string(item: Any) -> bool:\n if isinstance(item, (bytes, bytearray, str)):\n return True\n elif (isinstance(item, (tuple, list)) and all(is_string(x) for x in item)):\n return True\n elif (isinstance(item, np.ndarray) and # binary or unicode\n (item.dtype.kind in (\"U\", \"S\") or item.dtype == object)):\n return True\n return False", "def test_speciality_str_representation(speciality):\n assert str(speciality) == \"Electrical\"", "def _is_unicode(arr):\n if (isinstance(arr, str) or\n issubclass(numpy.asarray(arr).dtype.type, str)):\n return True\n return False", "def test___str__(self):\n self.assertEqual(\n str(self.mineral),\n 'mineralxy')", "def string_check(param, name):\n\tif not isinstance(param, strcomp):\n\t\traise TypeError(\"Keyword arg '%s' must be of type string. Got: %s\" % (\n\t\t\tname, type(param)))\n\telse:\n\t\tpass", "def test_str(self, r, rep):\n assert str(r) == rep", "def _isstr(value):\n\n if isinstance(value, string_types + (bytes,)):\n return True\n elif hasattr(value, \"__iter__\"):\n return all(_isstr(v) for v in value)\n else:\n return False", "def test_str(self):\n self.assertEqual(str(self.john), \"J. Doe\")\n self.assertEqual(str(self.solar), \"N. Graule\")", "def test_repr(self):\n qg = ConcentricGrid(2, 3, 4)\n s = str(qg)\n assert \"ConcentricGrid\" in s\n assert \"jacobi\" in s\n assert \"L=2\" in s\n assert \"M=3\" in s\n assert \"N=4\" in s", "def stringable(self):\n return True", "def issparse(qob):\n return isinstance(qob, sp.spmatrix)", "def test_str(self, string, application):\n assert string == str(application)", "def test_vlstring_log(self):\n dt = h5t.special_dtype(vlen=str)\n htype = h5t.py_create(dt, logical=True)\n self.assertIsInstance(htype, h5t.TypeStringID)\n self.assertEqual(htype.is_variable_str(), True)\n self.assertEqual(htype.get_cset(), h5t.CSET_ASCII)\n self.assertEqual(htype.get_strpad(), h5t.STR_NULLTERM)", "def test_stringfield_return_text_type():\n stringify = fields.StringField().adapt\n assert isinstance(stringify(\"Hello World\"), text_type)", "def testStringRepresentationOnInstantiation(self):\r\n self.assertEqual(str(self.tv), 'table')", "def test_str(self):\r\n statobj = inputtypes.Status('test')\r\n self.assertEqual(str(statobj), 'test')\r\n self.assertEqual(unicode(statobj), u'test')", "def test_repr(self):\n fz = FourierZernikeBasis(L=6, M=3, N=0)\n s = str(fz)\n assert \"FourierZernikeBasis\" in s\n assert \"ansi\" in s\n assert \"L=6\" in s\n assert \"M=3\" in s\n assert \"N=0\" in s", "def test_str(self):\n self.assertEqual(str(self.subject), \"Test Subject\")", "def test_car_model_str_representation(car_model):\n assert str(car_model) == \"Golf\"", "def _is_valid_target_str(self, target):\n if isinstance(target, str):\n return True\n else:\n return False", "def is_my_type(type_str):\n raise NotImplementedError()", "def test_str():\n c = Circle(4) \n assert c.__str__() == 'Circle with radius: 4.000000'", "def _is_str_matching_builtin_type(str_value: str) -> bool:\n builtin_types = [\n getattr(builtins, d)\n for d in dir(builtins)\n if isinstance(getattr(builtins, d), type)\n ]\n return f\"<class '{str_value}'>\" in [str(bt) for bt in builtin_types]", "def test_compare_to_string(self):\n r = self.RNA(\"UCC\")\n self.assertEqual(r, \"UCC\")", "def testStringInput(self):\r\n from pydsl.Check import BNFChecker\r\n from pydsl.contrib.bnfgrammar import productionset0\r\n grammardef = productionset0\r\n checker = BNFChecker(grammardef)\r\n self.assertTrue(checker.check(\"SR\"))\r\n self.assertTrue(checker.check(\"SR\"))\r\n self.assertTrue(checker.check((\"S\",\"R\")))\r\n self.assertFalse(checker.check(\"SL\"))\r\n self.assertFalse(checker.check((\"S\",\"L\")))\r\n self.assertFalse(checker.check(\"\"))", "def test_str(self):\n tour = G(Tour, display_name='test1')\n self.assertEqual('test1', str(tour))", "def test_output_type():\n assert type(song_decoder(\"WUWUBUBWUBUWUB\")) is str", "def test_str(self, concept_embedding: ConceptEmbedding):\n # printing should not rise error\n str(concept_embedding)", "def test_str(self):\n self.assertEqual(str(self.bioe), str(self.wbioe))\n self.assertEqual(str(self.uioe), str(self.wuioe))\n self.assertEqual(str(self.bose), str(self.wbose))\n self.assertEqual(str(self.uose), str(self.wuose))", "def test_str(self):\n step = G(Step, display_name='test1')\n self.assertEqual('test1', str(step))", "def test_str_type(self):\n\n expected = TypeError\n input_ = 'c'\n with self.assertRaises(expected):\n math.factorial(input_)", "def test_is_hand_string_correct(self):\n self.assertEqual(self.hand.getPostCurrentHandString(), 'Three of a Kind As')", "def test_str(self):\n self.assertEqual(str(self.meter), \"1 * m\")\n\n newton = self.kgram * self.meter / (self.second ** 2)\n\n self.assertEqual(str(newton), \"1.0 * kg * m * s^-2\")", "def test_str(self):\n ary = self.ar[2009]\n self.assertEqual(str(ary), '<AwstatsYear 2009: 11, 12>')", "def test_K_str(self):\n r = Rectangle(5, 2)\n s = '[Rectangle] (1) 0/0 - 5/2'\n self.assertEqual(str(r), s)\n r = Rectangle(1, 1, 1)\n s = '[Rectangle] (2) 1/0 - 1/1'\n self.assertEqual(str(r), s)\n r = Rectangle(3, 4, 5, 6)\n s = '[Rectangle] (3) 5/6 - 3/4'\n self.assertEqual(str(r), s)\n\n Base._Base__nb_objects = 0\n r1 = Rectangle(4, 6, 2, 1, 12)\n self.assertEqual(str(r1), \"[Rectangle] (12) 2/1 - 4/6\")\n\n r2 = Rectangle(5, 5, 1)\n self.assertEqual(str(r2), \"[Rectangle] (1) 1/0 - 5/5\")", "def str_check(*args, func=None):\n func = func or inspect.stack()[2][3]\n for var in args:\n if not isinstance(var, (str, collections.UserString, collections.abc.Sequence)):\n name = type(var).__name__\n raise StringError(\n 'Function {} expected str, {} got instead.'.format(func, name))", "def is_valid(self, qstr):\r\n pass", "def DataIsString(self):\n return self.data_type in (definitions.REG_SZ, definitions.REG_EXPAND_SZ)", "def test_string():", "def test_str(self):\n f66: Fraction = Fraction(6, 6)\n f24: Fraction = Fraction(2, 4)\n self.assertEqual(str(f66), \"6/6\")\n self.assertNotEqual(str(f24), \"8/6\")\n self.assertTrue(str(f24), \"2/4\")", "def test_str_(self):\n str(self.standardcode)\n repr(self.standardcode)", "def _has_str_elems(obj):\n return all([isinstance(elem, str) for elem in obj])", "def test_str():\n post = models.Post(title=\"Test Post\")\n\n assert str(post) == post.title", "def test_is_hand_string_correct(self):\n self.assertEqual(self.hand.getPostCurrentHandString(), 'Four of a Kind Ts')", "def check_statement(self, statement):\n return isinstance(statement, str)", "def _is_proper_sequence(seq):\n return (isinstance(seq, collections.abc.Sequence) and\n not isinstance(seq, str))", "def test_str(self):\n faction = self.faction\n\n self.assertEqual(str(faction), self.faction_raw['name'])", "def test_native_str(self):\n if PY2:\n import __builtin__\n builtin_str = __builtin__.str\n else:\n import builtins\n builtin_str = builtins.str\n\n inputs = [b'blah', u'blah', 'blah']\n for s in inputs:\n self.assertEqual(native_str(s), builtin_str(s))\n self.assertTrue(isinstance(native_str(s), builtin_str))", "def is_tuple_consists_of_strings(an_array):\n return isinstance(an_array, tuple) and is_array_type(an_array, str)", "def test_if_it_accepts_string_datatype(self):\n with self.assertRaises(TypeError):\n prime_numbers(\"string\")", "def check_type( string_key ) : \r\n\r\n if type( string_key ) != type( '' ) : \r\n\r\n # raise self.__class__( \"'%s': EGI wants the key to be four _characters_ (not %s) !\" % (type(string_key), ) ) \r\n raise Eggog( \"'%s': EGI wants the key to be four _characters_ (not %s) !\" % (type(string_key), ) ) \r\n \r\n else :\r\n \r\n return True", "def check_type(s: pd.Series) -> Tuple[bool, str]:\n\n error_string = (\n \"should be TextSeries: the input Series should consist only of strings in every cell.\"\n \" See help(hero.HeroTypes) for more information.\"\n )\n\n try:\n first_non_nan_value = s.loc[s.first_valid_index()]\n if not isinstance(first_non_nan_value, str):\n return False, error_string\n except KeyError: # Only NaNs in Series -> same warning applies\n return False, error_string\n\n return True, \"\"", "def is_action_str(string: str) -> bool:", "async def test_str(\n submission_fixture: Submission,\n) -> None:\n AssertThat(str(submission_fixture)).IsInstanceOf(str)\n AssertThat(str(submission_fixture)).IsEqualTo(submission_fixture.url)", "def test_computer_string(self):\n # Computer to use for the network\n comp = \"1q-qvm\"\n\n # Get a network with the computer\n qnn = self.get_test_network(comp)\n\n # Checks\n self.assertEqual(type(qnn.computer), QuantumComputer)", "def test_node_str():\n a_left = Node(7, data='pl left')\n a_right = Node(42, data='pl right')\n a = Node(13, data='pl a', left=a_left, right=a_right)\n string_a = str(a)\n expect_string = '13'\n assert string_a == expect_string" ]
[ "0.69393235", "0.67513615", "0.6735242", "0.67170364", "0.67146415", "0.6673512", "0.6659964", "0.6621408", "0.6545047", "0.6489594", "0.64740264", "0.64732605", "0.64615583", "0.6448504", "0.64349174", "0.6421704", "0.64157605", "0.63823795", "0.6382036", "0.6368499", "0.6321071", "0.6313761", "0.6291852", "0.6271569", "0.6268804", "0.6267225", "0.62614685", "0.6252008", "0.6242975", "0.6241558", "0.6238892", "0.62366533", "0.6206673", "0.6179889", "0.6176907", "0.61733073", "0.61697763", "0.6169568", "0.61612535", "0.6149985", "0.61378545", "0.61091757", "0.6101296", "0.6079802", "0.6079702", "0.60676944", "0.60643065", "0.60563236", "0.6053264", "0.60218835", "0.59682316", "0.59471756", "0.5907225", "0.5892694", "0.588037", "0.5874825", "0.58725333", "0.5866566", "0.5860099", "0.5858301", "0.5846368", "0.58244693", "0.58064395", "0.580461", "0.5798476", "0.5787747", "0.57814145", "0.57624435", "0.57618505", "0.57302254", "0.5713295", "0.56836843", "0.5679657", "0.5678511", "0.56724983", "0.56724846", "0.56642467", "0.56626314", "0.56572026", "0.5647612", "0.5644753", "0.56425864", "0.56404024", "0.56356925", "0.5631153", "0.5625176", "0.56178653", "0.56064487", "0.56033474", "0.55996835", "0.5593114", "0.55822796", "0.5581637", "0.5580511", "0.55783045", "0.5574546", "0.55657446", "0.5553186", "0.5541458", "0.55413723", "0.55375147" ]
0.0
-1
Function to read the file
def read_file(file="input4.txt"): res = [] with open(file, encoding='utf-8') as f: for i in f.readlines(): res.append(i.split()) for i in res: t = i.pop(1).split(':') i.insert(1, t[0]) return res
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def read(path):", "def read(self, filename):\n pass", "def read(self, filename):\n pass", "def read_from_file(self, filename: str) -> None:", "def read_file(path_to_file):\n 8", "def read(self, filename):\n raise NotImplementedError", "def read_file():\n with open(FILE_NAME) as f:\n data = f.read()\n return data", "def readFromFile(filename):\n raise NotImplementedError", "def read_file(self):\n try:\n with open(self.file_name, 'r') as ach_file:\n file_contents = ach_file.read().replace('\\n', '').replace('\\r', '')\n\n self._parse_ach_file(file_contents)\n except FileNotFoundError as err:\n print(\"File does not exist -> \" + str(err))", "def _read_file(self) -> str:\n with open(self._file_name) as fp:\n return fp.read()", "def myReadFile( path):\n f = open(path,'r')\n result = f.readlines()\n f.close\n return result", "def read(cls):\n x_i=\"vas.txt\"\n with open(x_i, 'r')as txt_file:\n file = txt_file.read()\n return file", "def read_file(self, path):\n with open(path) as f:\n return self.read_file_obj(f)", "def read_file(self, file_name):\n f = file(file_name, \"r\")\n temp = f.read()\n f.close()", "def read(self, filename): # real signature unknown; restored from __doc__\n pass", "def readFromFile(self, path):\n log(logging.DEBUG, \"Read from file: \" + path)\n with open(path, \"r\") as f:\n return f.read()", "def read_file(self, file):\n fd = open(file)\n data = fd.read()\n fd.close()\n return data", "def read1(cls):\n x_i = \"vas.txt\"\n with open(x_i, 'r')as txt_file:\n file = txt_file.readlines()\n return file", "def read_file(file_path):\n with open(file_path, 'r') as infile:\n return infile.read()", "def __read_file(self, filename):\n with open(filename) as f:\n content = f.readlines()\n \n return content", "def ReadFile(path, mode='r'):\n with open(path, mode) as f:\n return f.read()", "def read_file(file):\n with open(file, \"r\") as fid:\n return fid.read()", "def read_file(name_file):\n with open(name_file, 'r') as file:\n return file.read()", "def _read_input_file(self):\n pass", "def read(file):\n with open(file, 'r') as file:\n return file.read()", "def read_file(self, file_name):\n\n with open(file_name, 'r') as file_input:\n file_content = file_input.read()\n return file_content", "def read_file(file_name):\n with open(file_name, \"r\") as f:\n return f.read()", "def read(self, path: str) -> str:\n raise NotImplementedError", "def read_file(file):\n f = open(file, 'r')\n print(f.read())", "def read_file(file_name):\n with open(file_name, 'r') as f:\n return f.read()", "def read():\n # TODO", "def _read_file(self, filePath):\n with open(filePath) as f:\n fileContent = f.read()\n f.close()\n return fileContent.strip()", "def read_file(self, file_path): \n logging.info('Lendo arquivo de {0}'.format(file_path))\n file_with_tags = open(file_path, \"r\", encoding='utf-8')\n return file_with_tags.readlines()", "def _read_file(file_name):\n file_handle = file(file_name)\n try:\n return file_handle.read()\n finally:\n file_handle.close()", "def read_file(file_path):\n try:\n input_file = open(file_path)\n text_content = input_file.read()\n input_file.close()\n return text_content\n except IOError:\n print (\"Can not read from file\")", "def _ReadFile(filepath):\n with open(filepath) as f:\n return f.read()", "def _Read(filename):\n with open(filename, 'rb') as f:\n return f.read()", "def open_and_read_file(file_path):\n\n # your code goes here\n return open(file_path).read()", "def read_file(path):\n assert_is_string(path)\n f = open(path, \"r\")\n data = f.read()\n f.close()\n return data", "def ReadFile(self, filename):\n file = open(filename, 'rb')\n result = \"\"\n try:\n result = file.read()\n finally:\n file.close()\n return result", "def _read(fname):\n fpath = os.path.dirname(__file__)\n fpath = os.path.join(fpath, fname)\n with open(fpath, 'r') as file_:\n return file_.read()", "def open_and_read_file(file_path):\n text_data = open(file_path).read()\n # print text_data\n return text_data", "def read(path):\n with open(path) as f:\n return f.read()", "def open_and_read_file(file_path):\n contents = open(file_path).read()\n # your code goes here\n\n return contents", "def read_file(self,filename):\n\n if (config.mode_format == \"simple\"): return self.read_file_simple(filename)\n if (config.mode_format == \"agsm\"): return self.read_file_agsm(filename)\n sys.exit(\"ERROR: unrecognised format \\\"\"+config.mode_format+\"\\\".\\n\" \\\n +\" Please choose another value for mode_format in AIMS_configure.py\")", "def read_file(filename):\n with open(filename) as fp:\n return fp.read()", "def ReadFile(self, filename):\r\n file = open(filename, 'rb')\r\n result = \"\"\r\n try:\r\n result = file.read()\r\n finally:\r\n file.close()\r\n return result", "def read(fn):\n with open(os.path.join(os.path.dirname(__file__), fn), encoding='utf-8') as f:\n return f.read()", "def read_file(file_path):\n with open(file_path) as file_h:\n return file_h.readlines()", "def readfile(filename, mode='r'):\n if mode != 'r' and mode != 'rb':\n print(f\"ERROR: incorrect mode : expected 'r' or 'rb' given {mode}\\n\")\n else:\n with open(Path(os.path.expanduser(filename)), mode)as f:\n content = f.read()\n f.close()\n return content", "def handle_file(filename,operation = 'r'):\n with open(filename,operation) as f:\n data = f.readlines()\n return data", "def read(self, args):\n assert self.exists(args=args)\n file_path = self.path(args)\n file_str = autofile.read_file(file_path)\n file_dat = self.reader_(file_str)\n assert self.checker_(file_dat)\n return file_dat", "def read(file_name):\n with io.open(os.path.join(os.path.dirname(__file__), file_name),\n encoding='utf-8') as f:\n return f.read()", "def SimpleRead(fn):\n content = \"\"\n try:\n content = open(fn).read()\n except :\n print(\"Failed to read file: %s\\n\"%(fn))\n print sys.exc_info()[1]\n\n return content", "def read_file(fname):\n with open(fname, 'r') as fopen:\n fdata = fopen.read()\n return fdata", "def open_and_read_file():\n file_path = sys.argv[1]\n #print file_path\n file_data = open(file_path, 'r')\n gettysburg = file_data.read()\n\n return gettysburg", "def read(self, file_path):\n # get the absolute path\n self._set_extension(file_path)\n\n logger.debug(\"reading %s\", self._file_path)\n\n if self._file_ext == 'csv':\n self._read_csv()\n\n if self._file_ext == 'json':\n self._read_json()\n\n if self._file_ext == 'txt':\n self._read_txt()\n\n return self._content", "def read_file(path):\n # Mystery arguments:\n strictness = False\n # Read the string:\n return _iterate_bibtexsource(_bibtex.open_file(path, strictness))", "def read_file(filename):\n f = open(filename)\n contents = f.read()\n f.close()\n return contents", "def _file_read(fname):\n if not os.path.exists(fname):\n parser.error(\"File '{0}' not found.\".format(fname))\n return open(fname, 'r')", "def open_and_read_file(file_path):\n\n # your code goes here\n file_ = open(file_path).read()\n\n return file_", "def read(self, filename):\n\t\treturn codecs.open(filename, 'r', 'utf8').read()", "def read(filename):\n with open(os.path.join(os.path.dirname(__file__), filename)) as f:\n return f.read()", "def read_from_file(self, file_name:str)->str:\n ret_value = \"\"\n try: \n with open(file_name, 'r') as f: \n try: \n ret_value = f.readlines()\n except Exception as e: \n print(\"Unable to read data from file (%s) - %s\" % (file_name, e))\n ret_value = False \n except Exception as e: \n print(\"Failed to open file (%s) - %s\" % (file_name, e))\n ret_value = False \n return ret_value", "def read(filename):\n\n path = os.path.join(os.path.dirname(__file__), filename)\n\n with open(path) as f:\n return f.read()", "def read_file(file_name):\n return open(os.path.join(os.path.dirname(os.path.dirname(__file__)), file_name)).read()", "def ReadFileContents(file_name): \n all_file_contents = open(file_name, 'r').readlines()\n return all_file_contents", "def read_file(self, *args):\n with open(os.path.join(self.temp_path, *args)) as fp:\n return fp.read()", "def read_filepath(self, filename, file_format='FASTA'):\n file_obj = open(filename, 'r')\n ret = self.read_file_object(file_obj, file_format=file_format)\n file_obj.close()\n return ret", "def readfile(file):\n with open(file, 'r') as f:\n data = f.read().splitlines()\n return data", "def read_file(name):\n with open(name, 'r') as my_file:\n return my_file.read().encode('utf-8')", "def read_file(path): #TODO implementme, handling paths more intelligently\n f = open(path, \"r\")\n string = f.read()\n f.close()\n return string", "def open_and_read_file(file_path):\n\n # your code goes here\n\n f = open(file_path, \"r\")\n text = f.read()\n\n return text", "def read(self, filename):\n with RavenFileReader(filename) as f:\n line = f.nexttag()\n while line:\n # Begin data type checks\n if self.cleantag(line) == 'SubBasins':\n self.read_subbasins(f)\n elif self.cleantag(line) == 'HRUs':\n self.read_HRUs(f)\n # Next line\n line = f.nexttag()", "def read_file(self, file: Path) -> str:\n with open(file) as f:\n return f.read()", "def read_file(filename):\n return open(filename).read()", "def read(self):\n\n with open(self.file, \"r\", encoding=\"utf-8\") as file:\n funilrys = file.read()\n\n return funilrys", "def read_file(file):\n f = open(file, \"r\", encoding=\"utf8\")\n return f.read()", "def read(path):\n with open(path) as f:\n contents = f.read()\n return contents", "def read_file():\n # Create a file object called login_details, and give option to read file\n login_details = open(\"login_details.txt\",\"r\")\n # Create a list containing each line of login_details. List is called contents\n contents = login_details.readlines()\n login_details.close()\n return contents", "def read(self):\n\t\treturn self.input_file.read(1)", "def read_file(filename):\n if os.path.isfile(filename):\n with open(filename, 'r') as f:\n return f.read()", "def read_from_file(path):\n with io.open(path, 'rb') as ios:\n return read(ios)", "def read_file():\r\n # https://blog.csdn.net/lzgs_4/article/details/50371030\r\n\r\n path = input(\"Please input the path of the dataset (e.g. ...\\cranfieldDocs) : \")\r\n # path = r\"C:\\Users\\15451\\PycharmProjects\\Nan\\dataset\\cranfieldDocs\" # the path of all the files\r\n\r\n files = os.listdir(path) # obtain all the file names in the file folder\r\n file_content = {}\r\n for file in files: # file is the file name\r\n f = open(path + \"/\" + file)\r\n iter_f = iter(f)\r\n str = \"\"\r\n for line in iter_f:\r\n line = line.strip()\r\n line = line.lower()\r\n str = str + \" \" + line\r\n str = remove_field_name(str)\r\n str = split_to_obtain_token(str)\r\n file_content[file] = str # str is the contect of the file choosen\r\n return file_content", "def read_file(self, file_name: str)-> str:\n if not os.path.exists(file_name):\n raise IOError(\"The File {} doesn't exists!\".format(file_name))\n\n with open(file_name) as file:\n return file.read().strip()", "def read_from_file(file_name):\n with open(file_name, \"rb\") as text_file:\n return text_file.read()", "def file_read(path: str) -> str:\n if os.path.isfile(path):\n while True:\n try:\n with open(path, \"r\") as fptr:\n return fptr.read()\n except PermissionError:\n pass\n return \"\"", "def ReadFile(f_path):\n data = ''\n\n if f_path:\n try:\n fh = open(f_path, 'r')\n try:\n data = fh.read()\n finally:\n fh.close()\n except IOError:\n return ''\n\n return data", "def read_file(file_name):\r\n\r\n if file_name.find('.md') == -1:\r\n file_name += '.md'\r\n\r\n with open(file_name, 'r', encoding='utf-8') as file:\r\n file_data = file.read()\r\n\r\n return file_data", "def read_file(file_path, mode='r', encoding=\"utf-8\"):\n with codecs.open(file_path, mode, encoding=encoding) as fp:\n return fp.read().strip()", "def open_and_read_file(file_path):\n\n # your code goes here\n file_name = (open(file_path)).read()\n return file_name", "def read_file(filepath: str) -> str:\n with open(filepath, \"r\") as filep:\n return filep.read()", "def read_file(file_path):\n file_contents = None\n with open(file_path) as f_desc:\n file_contents = f_desc.read()\n if not file_contents:\n raise CLIError('Could not read {}'.format(file_path))\n return file_contents", "def read_file(dir):\n file = open(dir, 'rb')\n return file.read()", "def read_file(self,file_name):\r\n data = np.genfromtxt(file_name)\r\n return data;", "def read_file(filename):\n with codecs.open(os.path.join(here, filename), encoding='utf-8') as f:\n content = f.read()\n return content", "def read(self, fname):\n return self.read_using_fguide(fname, self.fguide)", "def read(self, fileobj):\n raise NotImplementedError", "def fs_read(file_path):\n try:\n with open(str(file_path), 'r') as f:\n return f.read()\n except UnicodeDecodeError:\n with open(str(file_path), 'r', encoding='latin-1') as f:\n return f.read()\n except IOError as e:\n raise e", "def read(fname):\n return open(os.path.join(os.path.dirname(__file__), fname)).read()", "def read(fname):\n return open(os.path.join(os.path.dirname(__file__), fname)).read()" ]
[ "0.8188726", "0.8140984", "0.8140984", "0.7953263", "0.78393626", "0.7722133", "0.7643032", "0.761069", "0.75103563", "0.7457068", "0.735805", "0.73315775", "0.7328963", "0.7327442", "0.73023856", "0.72954583", "0.72721654", "0.72382605", "0.7223386", "0.7222797", "0.7218205", "0.71844155", "0.71817863", "0.7175644", "0.7171825", "0.715196", "0.7146649", "0.7143679", "0.71420556", "0.7125202", "0.7110071", "0.70982677", "0.7096971", "0.7082671", "0.7082316", "0.70715654", "0.70696735", "0.7059023", "0.7047458", "0.7027316", "0.70223767", "0.70173234", "0.7012837", "0.70103395", "0.70030636", "0.6999707", "0.69971526", "0.69741213", "0.6965792", "0.6949693", "0.6947159", "0.6946495", "0.69428235", "0.69407946", "0.6933591", "0.693185", "0.6926294", "0.69204664", "0.6910052", "0.690865", "0.6894035", "0.6889641", "0.68884605", "0.68825495", "0.6877094", "0.686867", "0.68631834", "0.68548125", "0.6848442", "0.6828982", "0.68244857", "0.68227935", "0.6821865", "0.68215805", "0.68205076", "0.68185943", "0.681551", "0.6808017", "0.6807129", "0.6799451", "0.6792116", "0.6788471", "0.67853355", "0.6785014", "0.6782227", "0.67690897", "0.6763164", "0.6763164", "0.67597073", "0.67443955", "0.6742793", "0.6738327", "0.67346627", "0.67298794", "0.672352", "0.67213583", "0.67130125", "0.67054594", "0.66997397", "0.6698929", "0.6698929" ]
0.0
-1
Function to create a dict
def create_dict(info): """ dict = {ip: {counter:*}, {weekdays: []}, {hours: []}} """ dict_info = dict() for i in info: ip = i[0] hours = i[1] weekdays = i[2] if ip not in dict_info: dict_info[ip] = {} dict_info[ip]['counter'] = 0 dict_info[ip]['hours'] = [] dict_info[ip]['weekdays'] = [] dict_info[ip]['counter'] += 1 dict_info[ip]['hours'].append(hours) dict_info[ip]['weekdays'].append(weekdays) return dict_info
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def make_dict(cls, *args: Any, **kwargs: Any) -> Dict[str, Any]:\n return _DictMaker(struct_class=cls, positional_args=args, keyword_args=kwargs).make_dict()", "def create_dict(*args):\n output = {}\n idx = 0\n while idx < len(args):\n output[args[idx + 1]] = args[idx]\n idx += 2\n\n return output", "def asdict():\n pass", "def dict() -> Dict:\n pass", "def make_dict(unused_s, unused_l, toks):\n result = {}\n key_value_pairs = chunks(toks, 2)\n for key_value_pair in key_value_pairs:\n result[key_value_pair[0]] = key_value_pair[1]\n return result", "def dict() -> Dict[str, Pin]:", "def make_dict(cls, fields, fields_kwargs):\n return utils.make_dict(fields, fields_kwargs)", "def to_dict(self) -> dict:", "def createdictionary(bpm, extremes, duration, numbeats, time_beats):\n dict = {}\n dict[\"mean_hr_bpm\"] = bpm\n dict[\"voltage_extremes\"] = extremes\n dict[\"duration\"] = duration\n dict[\"num_beats\"] = numbeats\n dict[\"beats\"] = time_beats\n return dict", "def get_dict_of_str2(self):\n pass", "def createDictionary(self):\n\t\tdictionary: dict = {}\n\t\tdictionary.update({'deckname': self.mDeckName})\n\t\tdictionary.update({'filename': self.autoFilename})\n\t\tdictionary.update({'creatorname': str(self.mCreatorname)})\n\t\tdictionary.update({'maxAttrPoints': str(self.mMaxAttributePoints)})\n\t\tminionListDict: dict = {}\n\t\tfor minion in self.mMinionSet:\n\t\t\tminionDict: dict = {}\n\t\t\tminionDict.update({'minionName': str(minion.mMinionName)})\n\t\t\tminionDict.update({'attack': str(minion.mAttackPoints)})\n\t\t\tminionDict.update({'hp': str(minion.mHealthPoints)})\n\t\t\tskillList: list = minion.mSkills\n\t\t\tskillNames: list = []\n\t\t\tfor skill in skillList:\n\t\t\t\tskillNames.append(skill.mSkillName)\n\t\t\tminionDict.update({'skills': skillNames})\n\t\t\tminionListDict.update({minion.mMinionName: minionDict})\n\t\tdictionary.update({'minions': minionListDict})\n\t\tdictionary.update({'id' : hash(str(dictionary))}) # TODO LPO: let DB handle that\n\t\tself.mDeckDict = dictionary\n\t\treturn dictionary", "def make_dict(keys, values):\n\n return dict(zip(keys, values))", "def give_me_a_dictionary():\n my_dict={'India':'Delhi','UK':'London','Germany':'Berlin'}\n return my_dict\n pass", "def file_to_dictionary():\n\n return;", "def make_file_dict():\r\n fileDict = {'pageUrls': [],\r\n 'pageFileNames': [],\r\n 'pageIds': [],\r\n 'fileUrls': [],\r\n 'fileIds': [],\r\n 'fileNames': [],\r\n 'cssUrls': [],\r\n 'cssFileNames': [],\r\n 'imgUrls': [],\r\n 'imgFileNames': []}\r\n return fileDict", "def make_dict(\n nn,\n q_id,\n polarity,\n context_cond,\n cat,\n subcat,\n answer_info,\n bias_targets,\n version,\n notes,\n context,\n question,\n ans_list,\n ans_place,\n):\n this_dict = {\n \"example_id\": nn,\n \"question_index\": q_id,\n \"question_polarity\": polarity,\n \"context_condition\": context_cond,\n \"category\": cat,\n \"answer_info\": answer_info,\n \"additional_metadata\": {\n \"subcategory\": subcat,\n \"stereotyped_groups\": bias_targets,\n \"version\": version,\n \"source\": notes,\n },\n \"context\": context.strip(),\n \"question\": question.strip(),\n \"ans0\": ans_list[0],\n \"ans1\": ans_list[1],\n \"ans2\": ans_list[2],\n \"label\": ans_place,\n }\n return this_dict", "def makeDict(headers, array, default=None):\n result, defdict = __makeDict(headers, array, default)\n return result", "def Dictionary_create_from(nMarkers, markerSize, baseDictionary):\n pass", "def _create_dictionaries(self, chars):\n dictionary = dict()\n for char in chars:\n dictionary[char] = len(dictionary)\n reverse_dictionary = dict(zip(dictionary.values(), dictionary.keys()))\n return dictionary, reverse_dictionary", "def createDict( self ):\n d = {}\n devTup = ( 'endcap', 'comp', 'shutter','397intensity' )\n for dev in devTup:\n d[dev] = {'devChannels':{}}\n endcap = ( ( 1, 1 ), ( 2, 0 ) )\n comp = ( ( 1, 4 ), ( 2, 2 ), ( 'common', 3 ) )\n shutter = ( ( 1, 5 ), ( 2, 6 ), ( 3, 7 ) )\n intensity397 = (('397intensity',8),)\n chanTup = ( endcap, comp, shutter ,intensity397 )\n for dev, value in zip( devTup, chanTup ):\n for chanPair in value:\n d[dev]['devChannels'][chanPair[0]] = {'value':None, 'channel':chanPair[1]}\n ecRange = ( 0.0, 40.0 )\n compRange = ( -40.0, 40.0 )\n shutterRange = ( 0.0, 5.0 )\n intensity397Range = (0.0,2500.0)\n rangeTup = ( ecRange, compRange, shutterRange, intensity397Range )\n for dev, value in zip( devTup, rangeTup ): d[dev]['range'] = value\n self.dcDict = d", "def create_dict(list_database):\n return_dict = dict()\n for key, value in list_database:\n if key != None:\n return_dict[key] = value\n return return_dict", "def _to_dict(self) -> dict:\n pass", "def build_dict(arg):\n # helper function to the Evaluator.to_property_di_graph() method that\n # packages the dictionaries returned by the \"associate_\" family of\n # functions and then supplies the master dict (one_dict) to the Vertex\n # obj as **kwargs\n one_dict = {}\n for ar in arg:\n one_dict.update(ar)\n return one_dict", "def new_dict(key, value, n_keys=0):\n # With JIT disabled, ignore all arguments and return a Python dict.\n return dict()", "def test_create_mimic_dict_1(self):\n result = self.module.create_mimic_dict(\"imdev.txt\")\n self.assertIsInstance(\n result, dict,\n \"The return value of create_mimic_dict() should be a dict.\"\n )", "def creer_dictionnaire_vide():\n dico = {}\n return dico", "def create_dictionaries(chars):\n return dict((c, i) for i, c in enumerate(chars)), dict((i, c) for i, c in enumerate(chars))", "def Dictionary_create(nMarkers, markerSize):\n pass", "def generate_dict(length):\r\n primeDict = {}\r\n index = 2\r\n \r\n while (index < length):\r\n primeDict[index]=True\r\n index = index+1\r\n \r\n return primeDict", "def convertToDict(self): \n out = dict()\n out[\"Value\"] = self.value \n out[\"Odds\"] = self.odds \n out[\"Path\"] = self.path\n out[\"Curated\"] = self.curated \n out[\"Edit Distance\"] = self.edit_distance \n out[\"Edit Distance Stem\"] = self.edit_distance_stem\n out[\"Source ID\"] = self.source_id\n out[\"Match\"] = self.match \n out[\"Offset Start\"] = self.offset_start \n out[\"Offset End\"] = self.offset_end\n return out", "def gen_dict():\n lines = [line for line in csv.reader(open(__ppath__ + \"/data/occupations.csv\"))] # uses a csv.reader to parse the file, converts the generic iterable to a list\n lines = [(line[0],float(line[1])) for line in lines[1:-2]]# removes the column names and \"Total\" row, re-expresses as a list of tuples to enable dictionary conversion\n lines.append((\"Unemployed\",0.2)) # accounts for missing 0.2% of jobs\n return dict(lines) # converts to dictionary", "def construct_kv_dict(self):\r\n key1 = self.key_factory('existing_field')\r\n key2 = self.key_factory('other_existing_field')\r\n new_value = 'new value'\r\n newer_value = 'newer value'\r\n return {key1: new_value, key2: newer_value}", "def as_dict(self):\n def append(d, key, value, is_iterative, is_primitive, is_enum):\n if value is None:\n if is_iterative:\n value = []\n elif is_primitive == False and is_enum == False:\n if is_iterative:\n value = map(lambda i : i.as_dict(), value)\n else:\n value = value.as_dict()\n d[key] = value\n\n # Populate a deep dictionary.\n d = dict()\n append(d, 'file', self.__file, False, False, False)\n append(d, 'file_reference', self.__file_reference, False, False, False)\n append(d, 'name', self.__name, False, True, False)\n return d", "def to_dict(self):", "def test_dictify(self) -> None:\n r = dictify(['a', 'b', 'c'], [1, 2, 3])\n assert r == {'a': 1, 'b': 2, 'c': 3}, r\n\n r = {}\n dictify(['a'], [1], r)\n dictify(['b'], [2], r)\n dictify(['c'], [3], r)\n assert r == {'a': 1, 'b': 2, 'c': 3}, r", "def _space(self) -> Dict:\n return {}", "def stubbornDict(*arg, **kwarg):\n result = {}\n for a in arg:\n result.update(StubbornDict.to_dict(a))\n result.update(kwarg)\n return StubbornDict(result)", "def test_return_as_dictionary(self):\n inventory_dict = Inventory(123, \"product\", 10, 5).return_as_dictionary()\n self.assertEqual(inventory_dict['product_code'], 123)\n self.assertEqual(inventory_dict['description'], \"product\")\n self.assertEqual(inventory_dict['market_price'], 10)\n self.assertEqual(inventory_dict['rental_price'], 5)", "def to_dict(cls) -> dict:\n raise NotImplementedError()", "def makeDictionary(self):\n self.dictionary = {}\n for i in range(len(self.movie)):\n if self.movie[i] in self.dictionary:\n vectors = self.dictionary[self.movie[i]]\n vectors[self.user[i]] = self.rating[i]\n self.dictionary[self.movie[i]] = vectors\n else:\n newMovie = dict([(self.user[i], self.rating[i])])\n self.dictionary[self.movie[i]] = newMovie\n return self.dictionary", "def dict(self):\n d = {}\n d['template_id'] = self.id\n d['name'] = self.name\n d['cpu'] = self.cpu\n d['memory'] = self.memory\n d['points'] = self.points\n d['description'] = self.description\n d['ec2name'] = self.ec2name\n # state is not put in dictionary\n return d", "def def_dict():\n d1 = c.defaultdict(dict)\n print '\\nInitially first dictionary is: ', dict(d1)\n l = [('a', 1), ('b', 2), ('a', 27), ('c', 3), ('d', 4), ('c', 29), ('e', 5)]\n for a, b in l:\n d1[a] = b\n print '\\nAfter adding elements to the default dictionary, it is: ', dict(d1)\n print '\\nItems in the first dictionary is: ', d1.items()\n st = 'malayalam'\n print '\\n\\nCreating dictionary from the string', st\n d2 = c.defaultdict(int)\n print '\\nInitially second dict is:', dict(d2)\n for i in st:\n d2[i] = st.count(i)\n print \"After updation, the second dictionary becomes: \", dict(d2)", "def buildCheckRequestToDict(self, uID, request, firstname, lastname):\n result = {}\n result['uID'] = uID\n result['request'] = request\n result['firstname'] = firstname\n result['lastname'] = lastname\n return result", "def get_attributes(self):\n \n retdict = {}\n retdict['s'] = str(self.s)\n if self.t != None:\n retdict['t'] = str(self.t)\n retdict['a'] = str(self.a)\n retdict['b'] = str(self.b)\n retdict['c'] = str(self.c)\n retdict['d'] = str(self.d)\n return retdict", "def _to_dict(self, remove_name=True):\n keys = [\"name\", \"path\", \"type\", \"mode\", \"description\", \"default\", \"min\", \"max\", \"enum\", \"optional\"]\n if remove_name:\n keys.remove(\"name\")\n result = {key: getattr(self, key) for key in keys}\n return _remove_empty_values(result)", "def createMap(*values):\n\tresult = dict()\n\tfor i in range(0, len(values), 2):\n\t\tresult[values[i]] = values[i+1]\n\treturn result", "def form_dictionary_by_diffrn(data_obj) -> dict:\n if isinstance(data_obj, Diffrn):\n ddict = data_obj.get_dictionary()\n else:\n ddict = {}\n\n return ddict", "def as_create_dict(self) -> dict:\n out_dict = self.as_dict()\n try:\n out_dict.pop('numShards', None)\n out_dict.pop('mongoURI', None)\n out_dict.pop('mongoDBVersion', None)\n out_dict.pop('mongoURIUpdated', None)\n out_dict.pop('mongoURIWithOptions', None)\n out_dict.pop('paused', None)\n out_dict.pop('srvAddress', None)\n out_dict.pop('links', None)\n out_dict.pop('state_name', None)\n except KeyError:\n pass\n try:\n out_dict['replicationSpecs'][0].__delitem__('id')\n except KeyError:\n pass\n return out_dict", "def create_dict(rows, tag, tag_id=None, start=0, enroll=False):\n enrollment_info_map = {\n 'Enrollment Requirement': 'requirements',\n 'Add Consent': 'add_consent',\n 'Drop Consent': 'drop_consent',\n }\n\n data = {}\n\n for row in rows:\n name_raw, desc_raw = row.find_all(tag, id=tag_id)[start:]\n name = name_raw.text.strip()\n desc = desc_raw.text.encode('ascii', 'ignore').decode().strip()\n\n if enroll:\n name = enrollment_info_map[name]\n else:\n name = name.lower().replace(' / ', '_')\n\n data.update({name: desc})\n\n return data", "def base_dict():\n out = OrderedDict()\n ao(out, 'name', 'String', 'Name', name='Name')\n ao(out, 'mro', 'List', name='mro', attr=['Hidden'])\n ao(out, 'comment', 'String', '')\n ao(out, 'preset', 'Preset', '', attr=['Hidden'])\n ao(out, 'dev', 'String', attr=['Hidden'])\n ao(out, 'devpath', 'String', attr=['Hidden'])\n ao(out, 'fullpath', 'String', attr=['Hidden'])\n ao(out, 'zerotime', 'Float', name='Start time', attr=['Hidden'])\n ao(out, 'initInstrument', 'Progress', attr=['Hidden'])\n return out", "def createDictInstance(self):\n\n dictinstance = {}\n for i in range(len(self.instancenumbers)):\n dictinstance.setdefault(self.instancenumbers[i], []).append(i)\n\n return dictinstance", "def get_data_to_create_object(self):\n return {}", "def as_dict(self, **kwargs):\n m = {}\n self.add_to_dict(m, **kwargs)\n return m", "def get_dict(**kwargs):\n return kwargs", "def gen_dict(keys, vals):\n retVal = {}\n for i in range(len(keys)):\n if i > len(vals):\n retVal[keys[i]] = \"\"\n continue\n retVal[keys[i]] = vals[i]\n return retVal", "def generate(self) -> Dict[str, Any]:\n raise NotImplementedError", "def asdict(v: Any) -> Dict[Any, Any]:\n return to_dict(v, reuse_instances=False, convert_sets=False)", "def _new_wos_dict():\n wos_dict = {\n 'DI': None,\n 'TI': None,\n 'PY': None,\n 'SO': None,\n 'UT': None,\n 'DE': None,\n }\n\n return wos_dict", "def to_dict(funs):\n def to_dict_funs(an_object):\n return dict((k, f(an_object)) for (k, f) in funs.items())\n return to_dict_funs", "def as_dict(self):\n def append(d, key, value, is_iterative, is_primitive, is_enum):\n if value is None:\n if is_iterative:\n value = []\n elif is_primitive == False and is_enum == False:\n if is_iterative:\n value = map(lambda i : i.as_dict(), value)\n else:\n value = value.as_dict()\n d[key] = value\n\n # Populate a deep dictionary.\n d = dict()\n append(d, 'has_constant_offset', self.__has_constant_offset, False, True, False)\n append(d, 'length', self.__length, False, True, False)\n append(d, 'uom', self.__uom, False, True, False)\n return d", "def pare_dict(*a, **kw):\n return pare_dict(*a, **kw)", "def to_dictionary(self):\n dict_contents = [\"id\", \"size\", \"x\", \"y\"]\n new_dict = {}\n for key in dict_contents:\n new_dict[key] = getattr(self, key)\n return new_dict", "def to_dict(self):\n d = {}\n i = 0\n for entry in self.entries:\n d[i] = {}\n attributes = self.get_attribute_list()\n print (attributes)\n for data in attributes:\n d[i][data] = entry.__getattribute__(data)\n i = i + 1\n return d", "def create(self, vocabulary=list) -> dict:\n try:\n out = {}\n for i in range(len(vocabulary)):\n out[vocabulary[i]] = i\n return(out)\n except Exception as error:\n print(f\"Error: self.create([...]) -> {error}\")", "def _to_request_dict(self):\n return {\"attr1\": self.attr1, \"attr2\": \"test\"}", "def create_template_dict(name, cat, boilerplate_name=None, is_common=False):\r\n return {\r\n \"display_name\": name,\r\n \"category\": cat,\r\n \"boilerplate_name\": boilerplate_name,\r\n \"is_common\": is_common\r\n }", "def to_dictionary(self):\n new_dictionary = {}\n for key, value in self.__dict__.items():\n new_dictionary[key.split(\"__\")[-1]] = value\n new_dictionary['size'] = new_dictionary['width']\n del new_dictionary['width']\n del new_dictionary['height']\n return new_dictionary", "def _convert_to_dict(r):\n if not r:\n return r\n else:\n return dict(token=r[0], code=r[2], value=r[1], address='-')", "def build_params_dict(params, param_names):\n if len(params) != len(param_names):\n raise ValueError('Parameter and parameter name length mismatch.')\n return dict(zip(param_names, params))", "def to_obj(self):\n return dict()", "def construct_kv_dict(self):\r\n key1 = user_state_key('field_a')\r\n key2 = user_state_key('field_b')\r\n new_value = 'new value'\r\n newer_value = 'newer value'\r\n return {key1: new_value, key2: newer_value}", "def __load(self) -> Dict:\n return dict()", "def create_dict(dictionary, user, father, angle, i=0):\n data = {}\n data[\"name\"] = user[\"person\"][\"name\"]\n data[\"id\"] = user[\"person\"][\"publicId\"]\n try:\n data[\"img\"] = user[\"person\"][\"picture\"]\n except:\n pass\n if father == \"self\":\n data[\"posx\"] = 0\n data[\"posy\"] = 0\n data[\"related_to\"] = \"self\"\n else:\n data[\"posx\"] = math.cos(math.radians(angle * i))\n data[\"posy\"] = math.sin(math.radians(angle * i))\n data[\"related_to\"] = father[\"person\"][\"publicId\"]\n data[\"angle\"] = angle\n dictionary[data[\"id\"]] = data.copy()", "def create_dictionary(self,*key_value_pairs,**items):\r\n if len(key_value_pairs)%2 !=0:\r\n raise ValueError(\"create dictionary failed. there should be\"\r\n \"an even number of key-value-pairs\")\r\n return self.set_to_dictionary({},*key_value_pairs,**items)", "def DictFunction2():\r\n print \"Create Second Dictionary\"\r\n NumberDict = dict(zip((i for i in range(16)), (hex(i) for i in range(16))))\r\n print NumberDict", "def test_to_dict(self):\n Square.reset_objects()\n s1 = Square(10, 2, 1)\n s1_dictionary = s1.to_dictionary()\n self.assertEqual(s1_dictionary, {'id': 1, 'x': 2, 'size': 10, 'y': 1})", "def make_employee_dict(names, ID_numbers, salaries, email_addresses):\r\n d = dict()\r\n for i in range(len(names)):\r\n d[ID_numbers[i]] = Employee(names[i], ID_numbers[i], salaries[i], email_addresses[i])\r\n return d", "def get_dict(self):\n return", "def todict(self) -> dict:\n return_dict = {\"t\": self.t, \"s\": self.s, \"v\": self.v, \"p\": self.p}\n if not self.c:\n return_dict[\"c\"] = self.c\n return return_dict", "def _to_dict(self, remove_name=True):\n keys = [\"name\", \"path\", \"type\", \"mode\", \"description\"]\n if remove_name:\n keys.remove(\"name\")\n result = {key: getattr(self, key) for key in keys}\n return _remove_empty_values(result)", "def _tuples_to_dict(self, tuples):\n d = {}\n for key, value in tuples:\n d[key] = value\n return d", "def return_as_dictionary(self):\n out_put_dict = {}\n out_put_dict['productCode'] = self.product_code\n out_put_dict['description'] = self.description\n out_put_dict['marketPrice'] = self.market_price\n out_put_dict['rentalPrice'] = self.rental_price\n\n return out_put_dict", "def build_person(first_name,last_name, age =''):\n person = { 'first': first_name.title(), 'last' : last_name.title()}\n if age:\n person['age'] = age\n return person", "def toDict(self):\n\n aDict = {}\n\n # Required Keys\n try:\n aDict[self.E0_KEY] = self.e0.toDict()\n aDict[self.E1_KEY] = self.e1.toDict()\n aDict[self.E2_KEY] = self.e2.toDict()\n aDict[self.MAXIMUM_HORIZONTAL_KEY] = self.maximumHorizontalProjection\n aDict[self.MAXIMUM_VERTICAL_KEY] = self.maximumVerticalProjection\n aDict[self.EQUIVALENT_HORIZONTAL_KEY] = self.equivalentHorizontalRadius\n\n except (NameError, AttributeError) as e:\n print(\"Missing required data error: %s\" % e)\n\n return aDict", "def to_dict(self) -> Dict:\n _dict = {}\n if hasattr(self, 'name') and self.name is not None:\n _dict['name'] = self.name\n if hasattr(self, 'value') and self.value is not None:\n _dict['value'] = self.value\n return _dict", "def to_dict(self) -> Dict:\n _dict = {}\n if hasattr(self, 'name') and self.name is not None:\n _dict['name'] = self.name\n if hasattr(self, 'value') and self.value is not None:\n _dict['value'] = self.value\n return _dict", "def crear():\n\n return dict(form=form)", "def build_person(first_name, last_name, age=''):\n person = {'first': first_name, 'last': last_name}\n if age:\n person['age'] = age\n return person", "def build_person(first_name, last_name, age=''):\n person = {'first': first_name, 'last': last_name}\n if age:\n person['age'] = age\n return person", "def __init__(self,n):\n\t\tself._dictOut={}\n\t\tself._dictIn = {}\n\t\tfor i in range(n):\n\t\t\tself._dictOut[i]=[]\n\t\t\tself._dictIn[i] = []", "def initialize_gensim_dictionary(text):\n dct = Dictionary(text)\n return dct", "def get_dic(self):\n dic = {\n 'size': self.size,\n 'bounds': self.bounds,\n 'visible': self.visible,\n 'is_static': self.is_static,\n 'options': self.options,\n 'primitive_type': self.primitive_type,\n 'constrain_ratio': self.constrain_ratio,\n 'constrain_navigation': self.constrain_navigation,\n 'framebuffer': self.framebuffer,\n # 'beforeclear': self.beforeclear,\n 'variables': self.get_variables_list(),\n 'vertex_shader': self.vertex_shader,\n 'fragment_shader': self.fragment_shader,\n }\n return dic", "def test_f2_to_dictionary(self):\n new_dict = {'x': 14, 'y': 5, 'id': 10, 'width': 22, 'height': 25}\n r1 = Rectangle(10, 2, 1, 9)\n r1.update(**new_dict)\n f = io.StringIO()\n with contextlib.redirect_stdout(f):\n print(type(r1.to_dictionary()))\n self.assertEqual(f.getvalue(), \"<class 'dict'>\\n\")", "def to_dict(self):\n return {}", "def makeDict(result_list):\n \n result_dict = dict()\n for line in result_list:\n if line[0] == 'set_property' and line[3] == 'get_ports':\n if line[4] not in result_dict:\n result_dict[line[4]] = dict()\n result_dict[line[4]][line[1]] = line[2]\n\n return result_dict", "def _to_dict(self, **kwargs):\n pass", "def buildApprovalToDict(self, uID, approval, firstname, lastname):\n result = {}\n result['uID'] = uID\n result['approval'] = approval\n result['firstname'] = firstname\n result['lastname'] = lastname\n return result", "def make_dict(row):\n return dict((key[0], value) for key, value in zip(colnames, row))", "def dict(self):\r\n d = {\r\n \"key\": self.field,\r\n \"value_count\": self.value_count,\r\n \"record_count\": self.record_count,\r\n \"value_ratio\": self.value_ratio,\r\n \"storage_types\": list(self.storage_types),\r\n \"null_count\": self.null_count,\r\n \"null_value_ratio\": self.null_value_ratio,\r\n \"null_record_ratio\": self.null_record_ratio,\r\n \"empty_string_count\": self.empty_string_count,\r\n \"unique_storage_type\": self.unique_storage_type\r\n }\r\n\r\n if self.distinct_overflow:\r\n d[\"distinct_overflow\"] = self.distinct_overflow,\r\n d[\"distinct_values\"] = []\r\n else:\r\n d[\"distinct_values\"] = list(self.distinct_values)\r\n\r\n return d", "def create_dictionary():\n d = {}\n for y in range(HEIGHT):\n if (y % 2) != 0:\n pos = (10*y)+10\n else:\n pos =((10*y)-9)+10 \n for x in range(WIDTH):\n xy_tuple = (x,y)\n d[pos] = xy_tuple\n if (y % 2) != 0:\n pos = pos - 1\n else:\n pos = pos + 1\n \n return d" ]
[ "0.71769756", "0.70463216", "0.69759154", "0.68891", "0.68752956", "0.6867047", "0.67607605", "0.6733505", "0.66315436", "0.6606104", "0.65966946", "0.6580841", "0.6546656", "0.65415686", "0.64865774", "0.64681804", "0.64519143", "0.6376904", "0.63741493", "0.6370084", "0.6347823", "0.63420904", "0.6341082", "0.63154566", "0.63113165", "0.629125", "0.62857085", "0.62391", "0.6237504", "0.62027305", "0.6199328", "0.61928034", "0.618758", "0.6179331", "0.6175777", "0.61590266", "0.6157189", "0.61517656", "0.6151753", "0.6138255", "0.6135293", "0.6124283", "0.6121961", "0.6119057", "0.6114239", "0.61077875", "0.61057645", "0.6102112", "0.6089755", "0.6089749", "0.6086317", "0.608189", "0.60788864", "0.60734206", "0.6071836", "0.60663915", "0.60657185", "0.6062433", "0.60540557", "0.6053476", "0.6042091", "0.60338587", "0.6023258", "0.60169256", "0.6007467", "0.6003187", "0.6001376", "0.60010976", "0.5992613", "0.598158", "0.597921", "0.5965837", "0.5963442", "0.5961862", "0.59539354", "0.5951668", "0.5948983", "0.59478194", "0.5945922", "0.59417635", "0.59357566", "0.59283304", "0.59278387", "0.5921863", "0.59199846", "0.59199846", "0.59196407", "0.5919304", "0.5919304", "0.59168184", "0.5915727", "0.59121585", "0.5910749", "0.5910423", "0.59085935", "0.589803", "0.58949673", "0.5891277", "0.5890203", "0.5887569" ]
0.6324139
23
Function to write to file
def write_to_file(info, mode='w', file="output4.txt"): with open(file, mode, encoding='utf-8') as f: for line in info: f.write(' '.join(map(str, line)) + '\n')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def write_to_file(self, filename: str) -> None:", "def write(self, filename):\n pass", "def write(self, filename):\n pass", "def write(self, fname):\n pass", "def file_write(stuff, file_path):\n with open(file_path, \"wt\") as fo:\n fo.write(stuff)", "def write_file(*args, **kwargs): # real signature unknown\n pass", "def write(self, filename): # real signature unknown; restored from __doc__\n pass", "def write_file(self):\n if self._write_file == None:\n return\n\n try:\n out = file(self._write_file, \"w\")\n except IOError, e:\n print e\n sys.exit(1)\n out.writelines(\"A cases\") \n out.close()", "def fileWrite(content):\n file = open('./result.txt', 'w')\n file.write(content)\n file.close()", "def write(filename):\n print(uc.write(filename))", "def write_to_file(filename, content):\n with open(filename, 'w') as f:\n f.write(content)", "def write_to_file(file_name, content):\n with open(file_name, \"w\") as text_file:\n text_file.write(str(content))", "def write():\n pass", "def write(cls, file, data):\n file.write(data)", "def write_to_file(filepath, data):\n\n with open(filepath, 'w') as f:\n f.write(str(data))", "def write_to_file(info: List[str]) -> None:\n return", "def filewrite(self, filename):\n io.write(self, filename)", "def write_file(file, content):\n with open(file, \"w\") as fid:\n fid.write(content)", "def write(self, filename, data):\n raise NotImplementedError", "def file_writer(path, data):\n with open(path, \"a\") as file:\n file.write(data + \"\\n\")", "def write(file, text):\n with open(file, 'w') as f:\n f.write(text)", "def write_to_file(filename, data):\n with open(filename, \"a\") as file:\n file.writelines(data)", "def writefile(filename, content):\n with open(Path(os.path.expanduser(filename)), 'w') as outfile:\n outfile.write(content)", "def write_to_file(entry, file):\n with open(file, \"a\") as f:\n f.write(entry)", "def write(self, outfile, rebasings=None):\r\n raise NotImplementedError()", "def write_to_file(filename, data):\n with open(filename, \"a\") as file:\n file.writelines(data)", "def write_file(filename):\r\n if Py3:\r\n return open(filename, \"w\", newline='')\r\n return open(filename, \"wb\")", "def write_to_file(self, filepath, mode = \"a\"): \n if \"r\" in mode: \n print(\"Only accepts write and append modes\")\n return \n with open(filepath, mode) as f: \n f.write(\"{}\\n\".format(self.title))\n verified, seen, ratio = self.get_verified_ratio()\n f.write(\"Verified Names: {}\\n\".format(str(verified)))\n f.write(\"Names: {}\\n\".format(str(seen)))\n f.write(\"Ratio: {}\\n\".format(str(ratio)))", "def write(message):\n\n with open(str(path), 'a') as fp:\n fp.write(message)", "def _write_file(output_path: str, file_content: Iterable[str]) -> None:\n with open(output_path, \"w+\", encoding=\"utf-8\") as f:\n f.writelines(file_content)\n\n logging.info(f\"wrote to '{output_path}'\")", "def write(data):", "def writeToFile(self, basedir, write_code=0):", "def write_file(filename=\"\", text=\"\"):\n with open(filename, \"w\") as f:\n return(f.write(text))", "def write( data ):", "def write_file(filename=\"\", text=\"\"):\n with open(filename, 'w') as f:\n return f.write(text)", "def writetofile(self,direction,value):\r\n output = str(\"{},{} \\n\".format(direction,value))\r\n self.new_file.write(output)", "def writeFile(self, name, text):\n\t\ttry:\n\t\t\tf = open(name, 'w')\n\t\t\tf.write (text)\n\t\t\tf.close()\n\t\texcept IOError:\n\t\t\tprint \"Error writing file %s\" % name", "def write_file(path, contents, mode=\"w\"):\n with open(path, mode) as f:\n f.write(contents)", "def write_to_file(self, file, content):\n with open(file, 'a') as report_file:\n report_file.write('{}\\n'.format(content))", "def write_file(filename=\"\", text=\"\"):\n with open(filename, mode=\"w\", encoding=\"utf-8\") as m:\n return m.write(text)", "def write(self,path,content):\n file_path = os.path.join( self.directory, path)\n with open(file_path, \"w\") as file:\n file.write( content )", "def write(self, out):", "def write(self, *args) -> \"int\":\n return _ida_fpro.qfile_t_write(self, *args)", "def write_file(file_path, contents):\n logger.debug(f'write to file:{file_path}')\n with open(file_path, 'w') as outfile:\n outfile.write(contents)", "def _write_output_file(output: str, file_name: str):\n\tfile1 = open(file_name, 'w')\n\tfile1.write(output)\n\tfile1.close()", "def write_file(filename=\"\", text=\"\"):\n with open(filename, 'w') as fl:\n wr = fl.write(text)\n return wr", "def saveOnFile(self, path, data):\n with open(path, \"w\") as f:\n f.write(data)", "def writeFile(file_name, file_text, mode='w+'):\n with open(file_name, mode) as file:\n file.write(file_text)", "def writeToFile(fileName, content, encoding = \"UTF-8\"):\n file = io.open(fileName, mode = \"w\", encoding = encoding)\n file.write(content)\n file.close()", "def fwrite(filename, text):\n basedir = os.path.dirname(filename)\n if not os.path.isdir(basedir):\n os.makedirs(basedir)\n\n with open(filename, 'w') as f:\n f.write(text)", "def write_file(filename=\"\", text=\"\"):\n with open(filename, 'w', encoding='utf-8') as f:\n return f.write(text)", "def write_file(self, directory, name, content):\n\n try:\n f = open(os.path.join(directory, name), 'w')\n f.write(content)\n f.close()\n except:\n print \"Content not written to file: %s\" % name", "def write_file(filename, data):\n file = open(filename, \"a\")\n file.write(data)\n file.close()", "def write(cls, path, text):\n with cls.open(path, 'wt') as fd:\n return fd.write(text)", "def spew(path, data):\n with open(path, 'w+') as f:\n f.write(data)", "def _write_to_file(self, string):\n with open(self.p.base_dir + '/' + self.p.filename, 'w') as f:\n f.write(string)", "def write(self, filename):\n with open(filename, \"w\") as f:\n f.write(self.get_string())", "def write(self, filename):\n with open(filename, \"w\") as f:\n f.write(self.get_string())", "def write_file(filename: str, content: str, mode: str = \"w\") -> IO:\n with open(filename, mode) as file:\n file.write(content)\n return file", "def out(filename, s):\n\tf = open(filename, 'w')\n\tf.write(s)\n\tf.close()", "def _write_file(self, filename, content, mode=None):\n with open(filename, 'w') as fp:\n fp.write(dedent(content).strip())\n fp.write('\\n')\n\n if mode is not None:\n os.chmod(filename, mode)", "def write_file(name_file, string):\n with open(name_file, 'w') as file:\n file.write(string)", "def _Write(buf, filename):\n with open(filename, 'wb') as f:\n f.write(buf)", "def write_file ( file_name, contents ):\n vlog(4, 'Writing File: %s SIZE=%s' % (file_name, len(contents)))\n with open(file_name, 'w') as file:\n file.write(contents)", "def write_file(data, filename):\n file = open(filename, \"wb\")\n file.write(data)\n file.close()", "def write_file(filename, content):\n codecs.open(filename, \"w\", encoding='utf-8').writelines(content)", "def write(self, args, file_dat):\n assert self.checker_(file_dat)\n file_path = self.path(args)\n file_str = self.writer_(file_dat)\n autofile.write_file(file_path, file_str)", "def writeFile(self, filename):\n\n s = self.asString()\n if os.access(filename, os.F_OK):\n raise RuntimeError(\"file %s already exists -- not overwritten.\" % (filename))\n \n f = file(filename, \"w\")\n f.write(s)\n f.close()", "def write(data, filename, mode='a'):\r\n f = open(filename, mode, encoding='utf8')\r\n f.write(data + '\\n')\r\n f.close()", "def writeFile( str_, *args ):\n filePath = path.join( *args )\n with open( filePath, 'w' ) as fd:\n fd.write(str_)", "def write(self, filename, data):\n owner_rw = 0600\n fd = os.open(filename, os.O_WRONLY | os.O_CREAT, owner_rw)\n # In case file existed already with wrong permissions, fix them.\n os.chmod(filename, owner_rw)\n os.write(fd, data)\n os.close(fd)", "def write_log_to_file(filename, content):\n append_to_file(filename, content)", "def write_id_to_file(file, id):\n output = open(file, 'w')\n output.write(id)\n output.close()", "def write_content_to_file(filename, content, options=\"w\"):\n with open(filename, options) as f:\n f.write(content)", "def write(self, content):\n ...", "def write_data():", "def WriteFile(fname, data):\n #self._out.Info(\"Write file '%s' size %d (%#0x)\" %\n #(fname, len(data), len(data)))\n with open(Filename(fname), 'wb') as fd:\n fd.write(data)", "def write(self, filename, data, hdr):\n pass", "def w(self, value):\n self.oFile.write(value)", "def write(self, path, content):\n this_file = open(path, 'w')\n this_file.write(content)\n this_file.close()", "def to_file(self, f: str) -> None:\n with open(f, \"w\") as open_file:\n open_file.write(\"\\n\".join(self.itos) + \"\\n\")", "def to_file(self, filename=None):\n name = None\n if filename is not None:\n name = filename\n elif self.name:\n name = self.name\n\n if name:\n #f = open(self.name, 'w')\n f = codecs.open(name, 'w', encoding='utf-8')\n self.seek(0)\n f.write(self.read())\n f.close()\n else:\n print \"No log_name for this log\"", "def write(self, arg, **kwargs):\r\n if hasattr(arg, 'seek'):\r\n self._tofile(arg, **kwargs)\r\n else:\r\n with open(arg, 'wb') as fid:\r\n self._tofile(fid, **kwargs)", "def write_file(content, file_path, mode='w', encoding='utf-8'):\n with codecs.open(file_path, mode, encoding=encoding) as fid:\n fid.write(content)", "def writeFile(fileName, text):\n with open(fileName, 'w', encoding='utf-8') as f:\n f.write(text)", "def _toFile(self):\n pass", "def write_file(data, file_path):\n try:\n with open(file_path, \"w\") as file_obj:\n file_obj.write(data)\n\n except OSError:\n writer(f\"\\nwarning: Unable to write backup file {file_path}\\n\", FORMAT[\"WARNING\"])", "def string_to_file(path_to_file, string_to_write):\n\t\twith open(path_to_file, 'w+') as f:\n\t\t\tf.write(string_to_write)", "def write(self, str: str, /) -> None:", "def write(self, file):\n #write header\n self.ID.write(file)\n if (self.write_size): \n self.size.write(file)\n for variable in self.variables:\n variable.write(file)\n for subchunk in self.subchunks:\n subchunk.write(file)", "def write_data_to_file(data, filename):\n with open(filename, 'wb') as outfile:\n outfile.write(data)", "def write_file(self, filename, **kwargs):\n with zopen(filename, \"wt\") as f:\n f.write(self.get_string(**kwargs))", "def write (self, file):\n\t\tfile.write (self.pack ())", "def write_to_file(self, papers, filename):\n\t\tpass", "def write_to_file(self, content):\n try:\n with open(self.full_path_to_file, \"wb\") as fp:\n fp.write(content)\n except PermissionError:\n logging.error(\n \"Conversion cannot be performed. Permission denied for this directory\"\n )\n sys.exit()\n self.logger.info(\"News has been successfully converted\")", "def to_file(self, file_path, smirnoff_data):\n pass", "def writefile(name, instream, start=None, end=None, append=False):", "def write(self):", "def write(self):", "def export_to_file(self, fp, *args, **kwargs):\n with open(fp, 'w') as fh:\n self._to_str(fh)", "def write_data_to_file(data1, data2, data3, data4, data5):\n with data_file as open('data_file.txt','w'):\n data_file.write(data1 +'\\n')\n data_file.write(data2 +'\\n')\n data_file.write(data3 +'\\n')\n data_file.write(data4 +'\\n')\n data_file.write(data5 +'\\n')" ]
[ "0.8413077", "0.80287683", "0.80287683", "0.80265", "0.79803306", "0.7701467", "0.7610143", "0.75384223", "0.7487648", "0.7396836", "0.73585635", "0.730112", "0.72875077", "0.7278125", "0.7264448", "0.72381777", "0.7235967", "0.7231617", "0.7231545", "0.7180415", "0.7160275", "0.7153931", "0.71467125", "0.71418595", "0.71376044", "0.71311224", "0.7130906", "0.71200585", "0.7105089", "0.70994014", "0.7097008", "0.705593", "0.7047945", "0.7032436", "0.70191395", "0.7017645", "0.70153356", "0.6965302", "0.69610775", "0.69584304", "0.6949199", "0.6938881", "0.6919934", "0.6901406", "0.6891893", "0.6887819", "0.6879704", "0.68764335", "0.68735033", "0.6870311", "0.6855213", "0.6851906", "0.6845397", "0.68453455", "0.6836495", "0.68337655", "0.6831884", "0.6831884", "0.6811996", "0.6807764", "0.6802243", "0.6800349", "0.6796242", "0.6791982", "0.6790799", "0.67851126", "0.6783622", "0.6783365", "0.67756104", "0.67746574", "0.67716825", "0.67707115", "0.67656434", "0.67631674", "0.6756621", "0.6751324", "0.67440027", "0.67435336", "0.6738719", "0.6737444", "0.67181176", "0.67164314", "0.67157286", "0.6714506", "0.67101735", "0.66973406", "0.66873944", "0.6685674", "0.66844296", "0.6680974", "0.66764003", "0.66716635", "0.6669091", "0.6668915", "0.6659028", "0.66544616", "0.66519326", "0.6648103", "0.6648103", "0.6645787", "0.66429293" ]
0.0
-1
Function to create the resulting list
def result_info(file="input4.txt"): dict_info = create_dict(read_file()) result = [] dict_of_hours = dict() for ip, info in dict_info.items(): # I go through the dictionary with information for i in info['hours']: # I consider for each hour the number of visits if i not in dict_of_hours: dict_of_hours[i] = 0 dict_of_hours[i] += 1 most_frequent_day = 0 # counter for the most frequent for for each ip most_frequent_word = info['weekdays'][0] for i in info['weekdays']: if most_frequent_day < info['weekdays'].count(i): most_frequent_day = info['weekdays'].count(i) most_frequent_word = i result.append([ip, info['counter'], most_frequent_word]) max_number = 0 most_frequent_hour = 0 for hour, number in dict_of_hours.items(): # I go through the dict with the hours to find the most popular if number > max_number: max_number = number most_frequent_hour = hour result.append(['Самый популярный час на сайте:', most_frequent_hour]) write_to_file(result)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _make_result_list(self,res):\n res_list = []\n for r in res:\n res_list.append(r)\n\n return res_list", "def X(self)->list:", "def make_list(unused_s, unused_l, toks):\n result = []\n for item in toks:\n result.append(item.asList())\n return result", "def list():", "def list():", "def create_list(self, data):\n\n temp = []\n for item in data:\n if len(item) > 2:\n i = 0\n while i < len(item):\n temp.append(item[i:min(i+2, len(item))])\n i += 2\n else:\n temp.append(item)\n data = temp\n\n temp = []\n for item in data:\n if item[-1] == \"{\":\n temp.append(\"[\" + item[0][:-1] + \",\")\n elif item[-1] == \"}\":\n temp.append(\"],\")\n else:\n temp.append(\"[\" + \" \".join(item).replace(\":\", \",\") + \"],\")\n return ast.literal_eval(\"\".join(temp))", "def grAList() -> list:\n return [2, 5, 6, 9, 10, 11, 13, 17, 18, 30]", "def mk_lst_atnum(self):\n\t\telem_rnge=[]\n\t\tfor i in self.atom_num_lst:\n\t\t\tel_strt=i[0]\n\t\t\tel_end=i[1]\n\t\t\trnge_sect=range(el_strt,el_end+1)\n\t\t\telem_rnge.extend(rnge_sect)\n\t\telements=[]\n\t\tfor i in elem_rnge:\n\t\t\telement=Element.from_Z(i)\t# Indice -> pymatgen element object\n\t\t\telements.append(element)\n\t\treturn elements\n\t\tprint elements", "def generate(self):\n return []", "def getList(self):", "def getList(self):", "def _build_iterable(self):", "def lst() :\n return s.lst()", "def create_list(starting, ending):\n pass # remove this line when starting your function", "def list(self):", "def get_list_of_str2(self):\n pass", "def af_list(self) -> List:\n ...", "def construct_sequence_list(self):\n return list(self.iter_sequence())", "def make_list(sv, piece):\r\n li=[tree_build(sv,x) for x in piece.split(Comma)] # process each element RECURSIVE\r\n res=(Comma, li, None) # triplet for a list: (\",\", [list of elements], None)\r\n return res", "def tolist(self, flat=0):\n pass", "def make_list(self, a, b, LIMIT):\n\t\tprint \"I just made a list\"\n\t\tlist_a_b = []\n\t\tfor i in range(a,b + 1):\n\t\t\tif (i % 2 != 0 and i < LIMIT):\n\t\t\t\tlist_a_b.append(i)\n\t\treturn list_a_b", "def mk_lst_trans_met(self):\n\t\telem_rnge_I = [[21,30],[39,44],[46,48],[74,76],[78,80]]\n\t\telem_rnge=[]\n\t\tfor i in elem_rnge_I:\n\t\t\tel_strt=i[0]\n\t\t\tel_end=i[1]\n\t\t\trnge_sect=range(el_strt,el_end+1)\n\t\t\telem_rnge.extend(rnge_sect)\n\t\telements=[]\n\t\tfor i in elem_rnge:\n\t\t\telement=Element.from_Z(i)\t# Indice -> pymatgen element object\n\t\t\telements.append(element)\n\t\treturn elements", "def to_list_flat(self):\n return self.rep.entries()", "def _list(self, exprs):\n require(exprs, len(exprs)!=0)\n result = Pair(exprs[-1], [])\n for i in reversed(exprs[:-1]):\n result = Pair(i, result)\n return result", "def get_obstList(self,X,Y,Z):\n return []", "def get_obstList(self,X,Y,Z):\n return []", "def create_list(oldList):\n\n #If list is empty...\n #return empty list\n if(oldList == []):\n return []\n\n #Index is assumed to be last value in list\n index = len(oldList[0])-1\n\n #Create new list\n testList = []\n for value in oldList:\n testList.append(value[index])\n\n return testList", "def create_list(cls, *args):\n return _create_list(cls, *args)", "def create_list(cls, *args):\n return _create_list(cls, *args)", "def generate_input_list(groups):\n lst = []\n name_map = {0: target0_path, 1: target1_path, 2:target2_path, 3:target3_path, 4:target4_path}\n for i in groups:\n lst += name_map[i]\n return lst", "def resulttolist(result, feedback = 0):\n\n newlist = []\n\n if feedback == 2:\n for i in result:\n j = \" \".join(i)\n k = list(j.split(\" \"))\n newlist.append(k)\n elif feedback == 3:\n for i in result:\n j = \" \".join(i)\n k = list(j.split(\" \"))\n newlist.append(k)\n else:\n for i in result:\n j = \"\".join(i)\n newlist.append(j)\n\n return newlist", "def getNewCodeList(self):\n tmp = []\n for child in self.children:\n tmp.extend(child.getNewCodeList())\n return tmp", "def lego_sets():\n # you must replace this line and return your own list\n return []", "def list(self, *args):\n return []", "def CreateList(self, bottom_range, top_range):\n print(f\"Creating a list from {bottom_range} to {top_range}\")\n cluster_list = [item for item in range(bottom_range, top_range+1)]\n print(f\"cluster_list: {cluster_list}\")\n return cluster_list", "def create_list(vals):\n my_list = LinkedList()\n for i in vals:\n my_list.insert(i)\n return my_list", "def create_list(self):\n\n\t\trandom_list = random.sample(range(0, 500), 10)\n\n\t\treturn random_list", "def lists(self):\r\n return Lists(self)", "def create_list(row):\n name = row['Name']\n cd = row['Condition description']\n br0 = row['birad[0]']\n br1 = row['birad[1]']\n br2 = row['birad[2]']\n br3 = row['birad[3]']\n br4 = row['birad[4]']\n br5 = row['birad[5]']\n br6 = row['birad[6]']\n rm = row['Relevant modalities']\n rf = row['Relevant findings']\n uf = row['Unique findings']\n ai = row['Additional info']\n params = row['Parameters']\n try:\n t = row['Typical']\n except:\n t = ''\n try:\n p = row['Possible']\n except:\n p = ''\n try:\n a = row['Atypical']\n except:\n a = ''\n try:\n gen = row['None']\n except:\n gen = ''\n try:\n u = row['Unrelated']\n except:\n u = ''\n try:\n pat = row['Pathogenomonic']\n except:\n pat = ''\n try:\n n = row['Negative']\n except:\n n = ''\n try:\n i = row['Ignore']\n except:\n i = ''\n try:\n notes = row['Notes']\n except:\n notes = ''\n ac = row['Associated conditions']\n dd = row['Differential diagnosis']\n return [name, cd, br0, br1, br2, br3, br4, br5, br6, rm, rf, uf, ai, params, t, p, a, gen, u, pat, n, i, ac, dd, notes]", "def to_list(self):\n return self.main_list[:self.num_elements]", "def simulation_to_lines(data: List(Float))->List(Tuple(Int, Float)):\n result = []\n counter = 0\n for payoff in data:\n result = result + [(counter, payoff)]\n counter+=1\n return result\n\n #print(str(result))", "def create_date_list(start_date = start_date, end_date = end_date):", "def make_lists(sv):\r\n \r\n mark_delayed(sv) # identify delayed objects\r\n make_pin_list(sv) # detect and initialize inputs (to false) \r\n make_old_list(sv) # create a list of used old/old \r", "def to_list(self):\n t = ([],) * self.size\n for x in range(self.size):\n t[x].extend(self.get_links(x))\n\n return t1", "def make_list(x, m, modelcut, errorcut, CorD):\n if x==[]:\n return []\n\n result_names_list = []\n count = 0\n for i in range(len(x.NAME)):\n bn = x.NAME[i].upper()\n if bn in m.indx:\n i_x = x.indx[bn]\n i_m = m.indx[bn]\n if CorD == 'C':\n if ((abs(x.F1001W[i_x]-m.F1001W[i_m]) < modelcut) and (x.FWSTD1[i_x] < errorcut)):\n result_names_list.append(x.NAME[i])\n elif CorD == 'D':\n if ((abs(x.DY[i_x]-m.DY[i_m]) < modelcut) and (x.STDDY[i_x] < errorcut)):\n result_names_list.append(x.NAME[i])\n else:\n print \"Not in Response:\", bn\n count += 1\n \n if count > 0:\n print \"Warning: \", count, \"BPMs removed from data for not beeing in the model\"\n \n return result_names_list", "def as_list(gen):\n return list(gen())", "def create_list(self, args, l_type):\n\n scraper_types = [\n \"subreddit\",\n \"redditor\",\n \"comments\"\n ]\n\n index = scraper_types.index(l_type)\n item_list = [item[0] for item in self._list_switch(args, index)]\n\n return item_list", "def list(self) -> list:\n return list(self)", "def build_list(self, l):\n comma = self.art_type([self.string_type(', ')],\n baseline=0,\n breakpoints=[1])\n repr_elems = self.concatenate(l, comma)\n return self.build_container(\n repr_elems, self.left_square_bracket, self.right_square_bracket)", "def make_list( elements ):\n if isinstance(elements, (list, tuple)):\n return elements\n else:\n return [elements]", "def get_list(self):\n a = []\n l = self\n while l.is_block():\n a.append(l.field(0))\n l = l.field(1)\n return a", "def Obtener_Lista(self):\n\t\treturn [self,self.Nombre+\" \"+self.Apellido,self.ID,self.Fecha, \n\t\tself.Edad,self.Test,self.Posicion,self.Prioridad,self.Progreso,self.Informe]", "def generate_all_symptoms(symptoms):\n result = []\n generate_helper(result, symptoms, 0)\n return result #result is a list of lists", "def make_list(products):\n heading_products = []\n genres = set([p.genre for p in products])\n for genre in genres:\n this_heading_product = HeadingProduct(genre, products)\n if len(this_heading_product.products):\n heading_products.append(this_heading_product)\n\n return heading_products", "def to_list(self):\n self.error_throw('output')\n\n if self.rank_method == methods_of_ranking[3]: #'diversified_ranking'\n export_list = self.output_div('list')\n else:\n export_list = self.output('list')\n return export_list", "def add_res_list(res_list):", "def _list(self):\n raise NotImplementedError", "def make_list(num, val):\n return List([val for i in range(num)])", "def gen_date_list(begin_date, end_date):\n begin_tm = time.strptime(begin_date, \"%Y%m%d\")\n end_tm = time.strptime(end_date, \"%Y%m%d\")\n begin_tv = calendar.timegm(begin_tm)\n end_tv = calendar.timegm(end_tm)\n date_list = []\n for tv in xrange(begin_tv, end_tv+86400, 86400):\n date_list.append(time.strftime(\"%Y%m%d\", time.gmtime(tv)))\n return date_list", "def gen_list(self, x_list, z, s, nsamp):\n x_list = self.transform_xin_list(x_list)\n pred_list = self.sample_gp_pred(nsamp, x_list)\n pred_list = [self.dt.inv_transform_y_data(pr) for pr in pred_list]\n return pred_list", "def __init__(self,resultList):\n self.writeList = list()\n self.readList = list()\n self.xList = list()\n for row in resultList:\n self.xList.append(row[0])\n self.writeList.append(row[1])\n self.readList.append(row[2])", "def list() -> List:\n pass", "def make_variations(self, input, start, end, elements):\n out = []\n for e in elements:\n out.append(e)\n return out", "def getList(self):\n\treturn self.sorted_list.getList()", "def __create_list(self, tokens : List[Union[str,int]]) -> List[List[Union[str,int]]]:\n if tokens:\n return [self.__add_instruction(cp(tokens[:1+syntaxParametersDict.get(tokens[0])]))] + self.__create_list(cp(tokens[1+syntaxParametersDict.get(tokens[0]):]))\n return []", "def ex_list(data):\n return tuple(data)", "def get_list(self):\r\n return self.numbers", "def result2list(foo):\n if isinstance(foo, ParseResults):\n return [result2list(bar) for bar in foo]\n else:\n return foo", "def getaslist(self):\n l = []\n l.append(self.title.strip().encode('utf-8'))\n l.append(self.desc.strip().encode('utf-8'));\n l.append(self.course_number.strip().encode('utf-8'))\n l.append(self.duration.strip().encode('utf-8'))\n l.append(self.difficulty.strip().encode('utf-8'))\n l.append(self.instructors.strip().encode('utf-8'))\n l.append(self.url.strip().encode('utf-8'))\n return l", "def get_sub_values(self):\n return list()", "def makelist(count, lista):\n if count <= 8:\n return makelist(count+1, lista+[(int(input()))])\n print(*list(filter(lambda x: x%2 == 0, lista)))", "def list(self):\n return [self[i,j] for i in range(self._d) for j in range(self._d)]", "def to_list(self) -> list:\n result = []\n for asset in self.asset_collection:\n insort_left(result, asset.to_list())\n return result", "def __to_list__(self):\r\n out = []\r\n node = self.head\r\n while node:\r\n out.append(node.value)\r\n node = node.next\r\n return out", "def _to_cc_list(collection):\n return \"{\" + \", \".join(collection) + \"}\"", "def create_list() -> List[Optional[float]]:\n return [None] * num_stations", "def makelist(input):\n if isinstance(input, list) or isinstance(input, np.array):\n output = input\n else:\n output = [input]\n return output", "def createDistanceList(gmaps, cityList, convert = 1):\n #First create a list\n distances = []\n print \"Cities Calculated:\"\n for i in range(len(cityList)):\n distances.append([])\n for j in range(len(cityList)):\n d = getDistance(gmaps, cityList[i], cityList[j])\n distances[i].append(d/(convert * 1.0))\n \n print cities[i]\n \n return distances", "def _MakeList(input):\n if len(input) == 0:\n raise ValueError(\n 'input cannot be empty.')\n elif len(input) == 1:\n output = input[0]\n if not isinstance(output, list):\n output = [output]\n else:\n output = list(input)\n return output", "def _make_list_islot_otuples_from_nodelist(self):\n raise NotImplementedError", "def get_lists(self):\n return [{\"id\": lst[\"list_id\"], \"name\": lst[\"name\"]}\n for lst in List.objects(user_id=self.user_id, active=True)]", "def fn(n):\n if n == len(graph)-1: return [[n]]\n ans = []\n for nn in graph[n]: \n ans.extend([[n] + x for x in fn(nn)])\n return ans", "def _distribute(p,t):\r\n\t\t\r\n\t\t# begin list with power\r\n\t\tl = [[p]]\r\n\t\tfor i in range(t - 1):\r\n\t\t\t\r\n\t\t\t# expand the first members\r\n\t\t\tfor n,j in enumerate(l):\r\n\t\t\t\tf = Li._fracture(j[0])\r\n\t\t\t\t\r\n\t\t\t\t# recombine with tails\r\n\t\t\t\tl[n] = [k + j[1:] for k in f]\r\n\t\t\t\r\n\t\t\t# unpack lists\r\n\t\t\tl = [k for j in l for k in j]\r\n\t\t\t\r\n\t\t# make tuples\r\n\t\tl = [tuple(i) for i in l]\r\n\t\t\t\t\r\n\t\treturn l", "def make_list(l: int = 10) -> list[int]:\n\n arr = [ ri(0, 9) for _ in range(l)]\n arr.sort()\n return arr", "def _getListing(self):\n\n # lets assure consistent litsting order\n items = self._items.items()\n items.sort()\n return [ \"%s%s%s: %s\" % (_def_sep, str(x[1]), _def_sep, x[1].__doc__)\n for x in items ]", "def get_items(self):\r\n combined_list = []\r\n for prefix, item_list in self.class_map.values():\r\n combined_list.extend(zip(self._get_id_range(prefix, len(item_list)),\r\n item_list))\r\n return combined_list", "def genSubset2(L):\n import itertools\n result = []\n for i in range(len(L) + 1):\n result += list(itertools.combinations(L, i))\n return result", "def build_extracted_list(input_list, subinterval):\n out = []\n wait = subinterval\n for i in input_list:\n if wait == subinterval:\n out.append(i)\n wait = 0\n else:\n wait += 1\n return out", "def dataPrep(mydata: list) -> list:\n mylist = [int(elm) for elm in mydata]\n\n volt = int(max(mylist)) + 3\n start = 0\n\n mylist.extend([volt, start])\n mylist.sort()\n\n return mylist", "def generate_a_values() -> List[str]:\n return [\"A_1\", \"A_2\", \"A_3\"]", "def test_create_results_data_list(self):\n user_created = self.create_user()\n list_return = self.new_calculation.create_results_data_list(user_created)\n\n list_data = [['Semaine', 'Poids'], [0.0, 100.0], [1.0, 95.0]]\n\n self.assertEqual(list, type(list_return))\n self.assertEqual(str, type(list_return[0][0]))\n self.assertEqual(str, type(list_return[0][1]))\n self.assertEqual(list_data, list_return)\n for elt in list_return:\n self.assertEqual(list, type(elt))\n self.assertEqual(2, len(elt))", "def _make_node_list(child_node, list_count):\n parent = GroupNode(child_node.node.parentnode) \n parent.add_child(child_node)\n if list_count == 0:\n return parent\n else:\n list_count -= 1\n return _make_node_list(parent, list_count)", "def tolist (self) :\r\n if self.complex :\r\n result = []\r\n for x in xrange(0,len(self)) :\r\n result.append(self[x])\r\n return result\r\n else :\r\n return self.impl.tolist()", "def expand(self) -> List[Classifier]:\n list2d = [[cl] * cl.num for cl in self]\n return list(chain.from_iterable(list2d))", "def newList(self):\n lst = []\n count = 0\n while count < 52:\n lst.append(randint(1, 1500))\n count += 1\n return lst", "def parameter_space():\n return [list(range(7, 17)),\n list(range(17, 27)),\n list(range(27, 37)),\n list(permutations(range(1, 5), 4))]", "def generate_objects(input_data):\n object_list = []\n for match_list in input_data:\n if len(match_list) == 5:\n object_list.append(Match(match_list[0], match_list[1], match_list[2], '', match_list[3], match_list[4]))\n else:\n object_list.append(\n Match(match_list[0], match_list[1], match_list[2], match_list[3], match_list[4], match_list[5]))\n return object_list", "def create(block):\n block_output = []\n N = len(block)\n\n for n_x in range(N):\n for n_y in range(N):\n \n b = block[n_y][n_x]\n if b == '#':\n out = block\n else:\n out = [list('.' * N)] * N\n \n for o_idx in range(len(out)):\n idx = N * n_y + o_idx\n\n if len(block_output) < idx + 1:\n block_output.append([])\n \n block_output[idx].extend(out[o_idx])\n \n return block_output", "def making_dataset_list_y(data_list:list) -> list:\n list_size = len(data_list)\n data_list_y = []\n for i in range(list_size):\n data_list_y.append(data_list[i][[\"W\",\"D\",\"L\"]])\n return data_list_y", "def find_all_ORFs_oneframe(dna): \n i=0\n multiple_list=[]\n while i < len(dna):\n part = dna[i:i+3]\n if part == 'ATG': #if the first indecied are ATG then it runs the code that creates the string of DNA\n ORF=rest_of_ORF(dna[i:]) \n multiple_list.append(ORF)\n i+=len(ORF)\n else:\n i+=3\n # print multiple_list\n return multiple_list\n \n #runs fuinction to mmake list of function\n #need to save to list\n #need to continue to next ATG start \n #need to add that to list\n #need to output new list as commas between lists", "def _lst_of_tpls(step, parsing_function, filt=None):\n lst = []\n for key in step:\n if step[key][0]: # On/Off flag\n if len(step[key]) > 1:\n content_d = step[key][1]\n content_vals = list(values_iterator(content_d))\n for ll in modified_cartesian(*map(ensure_list, content_vals)):\n content = dict(zip(list(content_d), ll))\n if filt is not None and filt(content):\n continue\n lst.append(parsing_function(key, content))\n else:\n lst.append(parsing_function(key, {}))\n return lst" ]
[ "0.72328603", "0.6897152", "0.67518175", "0.6660444", "0.6660444", "0.6498758", "0.6368195", "0.63266546", "0.63058394", "0.62481105", "0.62481105", "0.6236731", "0.62217695", "0.6189998", "0.617926", "0.6137528", "0.6121866", "0.61136776", "0.6106905", "0.60869616", "0.6086415", "0.6038325", "0.599887", "0.598878", "0.59837586", "0.59837586", "0.5955558", "0.5932145", "0.5932145", "0.5896578", "0.5865517", "0.5864389", "0.58615863", "0.58501774", "0.58274746", "0.5824875", "0.58184856", "0.5816791", "0.57964957", "0.57890284", "0.5784736", "0.57843906", "0.5782213", "0.57733893", "0.57726836", "0.57697785", "0.57663155", "0.574728", "0.574481", "0.57396424", "0.57279664", "0.57259554", "0.5725696", "0.57167786", "0.5716698", "0.5709517", "0.57089967", "0.5699025", "0.56924504", "0.56923556", "0.56816024", "0.56803566", "0.5675266", "0.56669015", "0.56634074", "0.565522", "0.564996", "0.5649603", "0.56470376", "0.5645948", "0.56453", "0.564088", "0.56373566", "0.5636183", "0.5631234", "0.56265867", "0.5621243", "0.56113416", "0.5608186", "0.560416", "0.56039625", "0.55890006", "0.55753994", "0.5575128", "0.55746", "0.5572631", "0.55713606", "0.5567357", "0.55570406", "0.55504066", "0.5549486", "0.5547513", "0.55454886", "0.5542685", "0.5542225", "0.55421567", "0.55386126", "0.5538013", "0.553715", "0.55368245", "0.5527665" ]
0.0
-1
Decode serialized example into image and segmentation label.
def decode(value): keys_to_features = { 'image/encoded': tf.FixedLenFeature((), tf.string, default_value=''), 'image/height': tf.FixedLenFeature((), tf.int64, default_value=0), 'image/width': tf.FixedLenFeature((), tf.int64, default_value=0), 'image/segmentation/class/encoded': tf.FixedLenFeature((), tf.string, default_value='') } data = tf.parse_single_example(value, keys_to_features) return data
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def decode(self, serialized_example):\n parsed_tensors = tf.io.parse_single_example(\n serialized_example, self.KEYS_TO_FEATURES\n )\n for k in parsed_tensors:\n if isinstance(parsed_tensors[k], tf.SparseTensor):\n if parsed_tensors[k].dtype == tf.string:\n parsed_tensors[k] = tf.sparse.to_dense(\n parsed_tensors[k], default_value=\"\"\n )\n else:\n parsed_tensors[k] = tf.sparse.to_dense(\n parsed_tensors[k], default_value=0\n )\n\n image = self._decode_image(parsed_tensors)\n boxes = self._decode_boxes(parsed_tensors)\n decode_image_shape = tf.logical_or(\n tf.equal(parsed_tensors[\"image/height\"], -1),\n tf.equal(parsed_tensors[\"image/width\"], -1),\n )\n image_shape = tf.cast(tf.shape(image), dtype=tf.int64)\n\n parsed_tensors[\"image/height\"] = tf.where(\n decode_image_shape, image_shape[0], parsed_tensors[\"image/height\"]\n )\n parsed_tensors[\"image/width\"] = tf.where(\n decode_image_shape, image_shape[1], parsed_tensors[\"image/width\"]\n )\n\n decoded_tensors = {\n \"image\": image,\n \"height\": parsed_tensors[\"image/height\"],\n \"width\": parsed_tensors[\"image/width\"],\n \"groundtruth_classes\": parsed_tensors[\"image/object/class/label\"],\n \"groundtruth_boxes\": boxes,\n }\n return decoded_tensors", "def single_example_parser(serialized_example):\n # Dimensions of the images in the CIFAR-10 dataset.\n # See http://www.cs.toronto.edu/~kriz/cifar.html for a description of the\n # input format.\n features = tf.parse_single_example(\n serialized_example,\n features={\n 'image': tf.FixedLenFeature([], tf.string),\n 'label': tf.FixedLenFeature([], tf.int64),\n })\n image = tf.decode_raw(features['image'], tf.uint8)\n image.set_shape([DEPTH * HEIGHT * WIDTH])\n\n # Reshape from [depth * height * width] to [depth, height, width].\n image = tf.cast(\n tf.transpose(tf.reshape(image, [DEPTH, HEIGHT, WIDTH]), [1, 2, 0]),\n tf.float32)\n label = tf.cast(features['label'], tf.int32)\n \n image = train_preprocess_fn(image)\n label = tf.one_hot(label, NUM_CLASSES)\n \n return image, label", "def _deserialize_example(example_proto, labeled=True):\n if labeled:\n feature_description = {\n 'image': tf.io.FixedLenFeature([], tf.string),\n 'image_name': tf.io.FixedLenFeature([], tf.string),\n 'patient_id': tf.io.FixedLenFeature([], tf.int64),\n 'sex': tf.io.FixedLenFeature([], tf.int64),\n 'age_approx': tf.io.FixedLenFeature([], tf.int64),\n 'anatom_site_general_challenge': tf.io.FixedLenFeature([], tf.int64),\n 'diagnosis': tf.io.FixedLenFeature([], tf.int64),\n 'target': tf.io.FixedLenFeature([], tf.int64)\n }\n else:\n feature_description = {\n 'image': tf.io.FixedLenFeature([], tf.string),\n 'image_name': tf.io.FixedLenFeature([], tf.string)\n }\n\n return tf.io.parse_single_example(example_proto, feature_description)", "def parser(self, serialized_example):\n features = {\n 'image/height': tf.FixedLenFeature([], tf.int64),\n 'image/width': tf.FixedLenFeature([], tf.int64),\n 'image/colorspace': tf.FixedLenFeature([], tf.string),\n 'image/channels': tf.FixedLenFeature([], tf.int64),\n 'image/class/label': tf.FixedLenFeature([], tf.int64),\n 'image/class/synset': tf.FixedLenFeature([], tf.string),\n 'image/class/text': tf.FixedLenFeature([], tf.string),\n 'image/object/bbox/xmin': tf.VarLenFeature(tf.float32),\n 'image/object/bbox/ymin': tf.VarLenFeature(tf.float32),\n 'image/object/bbox/xmax': tf.VarLenFeature(tf.float32),\n 'image/object/bbox/ymax': tf.VarLenFeature(tf.float32),\n 'image/object/bbox/label': tf.VarLenFeature(tf.int64),\n 'image/format': tf.FixedLenFeature([], tf.string),\n 'image/encoded': tf.FixedLenFeature([], tf.string)}\n parsed_features = tf.parse_single_example(serialized_example, features)\n\n # Get label as a Tensor.\n label = parsed_features['image/class/label']\n\n # Decode the image JPEG string into a Tensor.\n image = tf.image.decode_jpeg(parsed_features['image/encoded'],\n channels=self.DEPTH)\n\n # VGG preprocessing borrowed from slim; includes data augmentation so train_with_distortion should be set to True.\n if self.mode == tf.estimator.ModeKeys.TRAIN:\n assert self.params['train_with_distortion'] == True\n is_training = True\n else:\n is_training = False\n image = vgg_preprocess_image(image, 224, 224, is_training=is_training)\n\n return image, label", "def parser(self, serialized_example):\n features = {\n 'image/height': tf.FixedLenFeature([], tf.int64),\n 'image/width': tf.FixedLenFeature([], tf.int64),\n 'image/colorspace': tf.FixedLenFeature([], tf.string),\n 'image/channels': tf.FixedLenFeature([], tf.int64),\n 'image/class/label': tf.FixedLenFeature([], tf.int64),\n 'image/format': tf.FixedLenFeature([], tf.string),\n 'image/encoded': tf.FixedLenFeature([], tf.string),\n 'image/fixation_pt': tf.FixedLenFeature([2], tf.float32)}\n parsed_features = tf.parse_single_example(serialized_example, features)\n\n # Get label as a Tensor.\n label = parsed_features['image/class/label']\n\n # Decode the image JPEG string into a Tensor.\n image = tf.image.decode_jpeg(parsed_features['image/encoded'],\n channels=self.DEPTH)\n\n # Convert from uint8 -> float32 and map onto range [0, 1].\n image = tf.cast(image, tf.float32) * (1. / 255)\n\n # Standardize image.\n image = tf.image.per_image_standardization(image)\n\n # Apply data augmentation.\n if (self.mode == tf.estimator.ModeKeys.TRAIN\n and self.params['train_with_distortion']):\n # Randomly flip the image, zero-pad with four pixels along\n # each edge, and take a random 32 x 32 crop.\n image = tf.image.random_flip_left_right(image)\n image = tf.image.resize_image_with_crop_or_pad(image, 40, 40)\n image = tf.image.crop_to_bounding_box(image,\n tf.random_uniform([], minval=0, maxval=8, dtype=tf.int32),\n tf.random_uniform([], minval=0, maxval=8, dtype=tf.int32),\n 32, 32)\n\n return image, label", "def parser(serialized_example):\n features = tf.parse_single_example(\n serialized_example,\n features={\n \"image\": tf.FixedLenFeature([], tf.string),\n \"label\": tf.FixedLenFeature([], tf.int64),\n })\n image = tf.decode_raw(features[\"image\"], tf.uint8)\n image.set_shape([CHANNELS * HEIGHT * WIDTH])\n # Reshape from [depth * height * width] to [depth, height, width].\n image = tf.cast(\n tf.transpose(tf.reshape(image, [CHANNELS, HEIGHT, WIDTH]), [1, 2, 0]),\n tf.float32) * (2. / 255) - 1\n\n label = tf.cast(features['label'], tf.int32)\n\n random_noise = tf.random_normal([noise_dim])\n features = {\n 'real_images': image,\n 'random_noise': random_noise}\n\n return features, label", "def _parser(serialized_example):\n\n features = tf.compat.v1.parse_single_example(\n serialized_example,\n features={\n 'img_raw': tf.compat.v1.FixedLenFeature([], tf.string),\n 'label': tf.compat.v1.FixedLenFeature([], tf.int64),\n 'category': tf.compat.v1.FixedLenFeature([], tf.int64),\n 'elevation': tf.compat.v1.FixedLenFeature([], tf.int64),\n 'azimuth': tf.compat.v1.FixedLenFeature([], tf.int64),\n 'lighting': tf.compat.v1.FixedLenFeature([], tf.int64),\n })\n\n img = tf.compat.v1.decode_raw(features['img_raw'], tf.float64)\n img = tf.reshape(img, [96, 96, 1])\n img = tf.cast(img, tf.float32) # * (1. / 255) # left unnormalized\n\n lab = tf.cast(features['label'], tf.int32)\n cat = tf.cast(features['category'], tf.int32)\n elv = tf.cast(features['elevation'], tf.int32)\n azi = tf.cast(features['azimuth'], tf.int32)\n lit = tf.cast(features['lighting'], tf.int32)\n\n return img, lab, cat, elv, azi, lit", "def parse_fn(self, example_serialized):\n feature_description = {\n 'image_raw': tf.io.FixedLenFeature([], tf.string),\n 'label': tf.io.FixedLenFeature([], tf.int64)\n }\n features = tf.io.parse_single_example(example_serialized, feature_description)\n image = tf.io.decode_raw(features['image_raw'], tf.uint8)\n image = tf.cast(image, dtype='float32') / 255.0\n label = tf.cast(features['label'], dtype=tf.int32)\n image = tf.reshape(image, [32, 32, 3])\n if self.is_training:\n image = tf.image.resize_with_crop_or_pad(image, 32 + 8, 32 + 8)\n image = tf.image.random_crop(image, [32, 32, 3])\n image = tf.image.random_flip_left_right(image)\n return image, label", "def decode(self, image):\r\n raise NotImplementedError(\"Not Implemented\")", "def _prepare_image_and_label(self, data):\n image = tf.io.decode_image(data['image/encoded'], channels=3)\n label = tf.io.decode_image(data['image/segmentation/class/encoded'],\n channels=1)\n height = data['image/height']\n width = data['image/width']\n image = tf.reshape(image, (height, width, 3))\n label = tf.reshape(label, (1, height, width))\n label = tf.cast(label, tf.float32)\n # Normalizes image with mean and std pixel values.\n image = input_utils.normalize_image(image)\n return image, label", "def parse_sequence_example(serialized, image_id, image_feature, caption_feature):\r\n context, sequence = tf.parse_single_sequence_example(\r\n serialized,\r\n context_features={\r\n image_id : tf.FixedLenFeature([], dtype=tf.int64),\r\n image_feature: tf.FixedLenFeature([], dtype=tf.string)\r\n },\r\n sequence_features={\r\n caption_feature: tf.FixedLenSequenceFeature([], dtype=tf.int64),\r\n })\r\n\r\n encoded_image_id = context[image_id]\r\n encoded_image = context[image_feature]\r\n caption = sequence[caption_feature]\r\n return encoded_image_id, encoded_image, caption", "def parse_sequence_example(serialized, image_feature, caption_feature):\n\tcontext, sequence = tf.parse_single_sequence_example(\n\t\t\tserialized,\n\t\t\tcontext_features={\n\t\t\t\t\timage_feature: tf.FixedLenFeature([], dtype=tf.string)\n\t\t\t},\n\t\t\tsequence_features={\n\t\t\t\t\tcaption_feature: tf.FixedLenSequenceFeature([], dtype=tf.int64),\n\t\t\t})\n\n\tencoded_image = context[image_feature]\n\tcaption = sequence[caption_feature]\n\treturn encoded_image, caption", "def _decode_record(record):\r\n example = tf.io.parse_single_example(serialized=record, features=feature_description)\r\n # tf.Example only supports tf.int64, but the TPU only supports tf.int32.\r\n # So cast all int64 to int32.\r\n for key in [k for k in example.keys() if k not in ['example_id', 'unique_ids']]:\r\n example[key] = tf.cast(example[key], dtype=tf.int32)\r\n if is_training:\r\n features = {\r\n 'input_ids': example['input_ids'],\r\n 'input_mask': example['input_mask'],\r\n 'segment_ids': example['segment_ids']\r\n }\r\n labels = {\r\n 'start_logits_or_probs': tf.one_hot(example['start_positions'],\r\n depth=seq_length, dtype=tf.float32),\r\n 'end_logits_or_probs': tf.one_hot(example['end_positions'],\r\n depth=seq_length, dtype=tf.float32),\r\n 'ans_type': tf.one_hot(example['answer_types'],\r\n depth=len(ANSWER_TYPE_ORDER), dtype=tf.float32)\r\n }\r\n return (features, labels)\r\n else:\r\n return example", "def _parse_function(self, example_proto):\n\n # Currently only supports jpeg and png.\n # Need to use this logic because the shape is not known for\n # tf.image.decode_image and we rely on this info to\n # extend label if necessary.\n def _decode_image(content, channels):\n return tf.cond(\n tf.image.is_jpeg(content),\n lambda: tf.image.decode_jpeg(content, channels),\n lambda: tf.image.decode_png(content, channels))\n\n features = {\n 'image/encoded':\n tf.FixedLenFeature((), tf.string, default_value=''),\n 'image/filename':\n tf.FixedLenFeature((), tf.string, default_value=''),\n 'image/format':\n tf.FixedLenFeature((), tf.string, default_value='jpeg'),\n 'image/height':\n tf.FixedLenFeature((), tf.int64, default_value=0),\n 'image/width':\n tf.FixedLenFeature((), tf.int64, default_value=0),\n 'image/segmentation/class/encoded':\n tf.FixedLenFeature((), tf.string, default_value=''),\n 'image/segmentation/class/format':\n tf.FixedLenFeature((), tf.string, default_value='png'),\n }\n\n parsed_features = tf.parse_single_example(example_proto, features)\n\n image = _decode_image(parsed_features['image/encoded'], channels=3)\n\n label = None\n if self.split_name != common.TEST_SET:\n label = _decode_image(\n parsed_features['image/segmentation/class/encoded'], channels=1)\n\n image_name = parsed_features['image/filename']\n if image_name is None:\n image_name = tf.constant('')\n\n sample = {\n common.IMAGE: image,\n common.IMAGE_NAME: image_name,\n common.HEIGHT: parsed_features['image/height'],\n common.WIDTH: parsed_features['image/width'],\n }\n\n if label is not None:\n if label.get_shape().ndims == 2:\n label = tf.expand_dims(label, 2)\n elif label.get_shape().ndims == 3 and label.shape.dims[2] == 1:\n pass\n else:\n raise ValueError('Input label shape must be [height, width], or '\n '[height, width, 1].')\n\n label.set_shape([None, None, 1])\n\n sample[common.LABELS_CLASS] = label\n\n return sample", "def extract(self, source):\n\t\tp = Parser()\n\t\tf = open_pds(source)\n\t\tif self.log: self.log.debug(\"Parsing '%s'\" % (source))\n\t\tself.labels = p.parse(f)\n\t\tif self.log: self.log.debug(\"Found %d labels\" % (len(self.labels)))\n\t\tif self._check_image_is_supported():\n\t\t\tif self.log: self.log.debug(\"Image in '%s' is supported\" % (source))\n\t\t\tdim = self._get_image_dimensions()\n\t\t\tloc = self._get_image_location()\n\t\t\timageSampleBits = int(self.labels['IMAGE']['SAMPLE_BITS'])\n\t\t\timageSampleType = self.labels['IMAGE']['SAMPLE_TYPE']\n\t\t\tmd5Checksum = self._get_image_checksum()\n\t\t\tif self.log: self.log.debug(\"Image dimensions should be %s\" % (str(dim)))\n\t\t\tif self.log: self.log.debug(\"Seeking to image data at %d\" % (loc))\n\t\t\tf.seek(loc)\n\t\t\tif imageSampleBits == 8:\n\t\t\t\treadSize = dim[0] * dim[1]\n\t\t\telif imageSampleBits == 16:\n\t\t\t\treadSize = dim[0] * dim[1] * 2\n\t\t\tprint readSize\n\t\t\tif self.log: self.log.debug(\"Seek successful, reading data (%s)\" % (readSize))\n\t\t\t# rawImageData = f.readline()\n\t\t\t# f.seek(-int(self.labels[\"RECORD_BYTES\"]), os.SEEK_CUR)\n\t\t\trawImageData = f.read(readSize)\n\t\t\tif md5Checksum:\n\t\t\t\trawImageChecksum = hashlib.md5(rawImageData).hexdigest()\n\t\t\t\tchecksumVerificationPassed = rawImageChecksum == md5Checksum and True or False\n\t\t\t\tif not checksumVerificationPassed:\n\t\t\t\t\tif self.log: self.log.debug(\"Secure hash verification failed\")\n\t\t\t\t\tif self.raisesChecksumError:\n\t\t\t\t\t\terrorMessage = \"Verification failed! Expected '%s' but got '%s'.\" % (md5Checksum, rawImageChecksum)\n\t\t\t\t\t\traise ChecksumError, errorMessage\n\t\t\t\telse:\n\t\t\t\t\tif self.log: self.log.debug(\"Secure hash verification passed\")\n\t\t\tif self.log: self.log.debug(\"Read successful (len: %d), creating Image object\" % (len(rawImageData)))\n\t\t\t# The frombuffer defaults may change in a future release;\n\t\t\t# for portability, change the call to read:\n\t\t\t# frombuffer(mode, size, data, 'raw', mode, 0, 1).\n\t\t\tif (imageSampleBits == 16) and imageSampleType == ('MSB_INTEGER'):\n\t\t\t\t#img = Image.frombuffer('I', dim, rawImageData, 'raw', 'I;16BS', 0, 1)\n\t\t\t\timg = Image.frombuffer('F', dim, rawImageData, 'raw', 'F;16B', 0, 1)\n\t\t\t\timg = ImageMath.eval(\"convert(a/16.0, 'L')\", a=img)\n\t\t\telse:\n\t\t\t\timg = Image.frombuffer('L', dim, rawImageData, 'raw', 'L', 0, 1)\n\t\t\tif self.log:\n\t\t\t\tself.log.debug(\"Image result: %s\" % (str(img)))\n\t\t\t\tself.log.debug(\"Image info: %s\" % (str(img.info)))\n\t\t\t\tself.log.debug(\"Image mode: %s\" % (str(img.mode)))\n\t\t\t\tself.log.debug(\"Image size: %s\" % (str(img.size)))\n\t\telse:\n\t\t\tif self.log: self.log.error(\"Image is not supported '%s'\" % (source))\n\t\t\timg = None\n\t\tf.close()\n\n\t\treturn img, self.labels", "def dataset_parser(self, value):\n keys_to_features = {\n 'image/encoded':\n tf.io.FixedLenFeature((), tf.string, ''),\n 'image/format':\n tf.io.FixedLenFeature((), tf.string, 'jpeg'),\n 'image/class/label':\n tf.io.FixedLenFeature([], tf.int64, -1),\n 'image/class/text':\n tf.io.FixedLenFeature([], tf.string, ''),\n 'image/object/bbox/xmin':\n tf.io.VarLenFeature(dtype=tf.float32),\n 'image/object/bbox/ymin':\n tf.io.VarLenFeature(dtype=tf.float32),\n 'image/object/bbox/xmax':\n tf.io.VarLenFeature(dtype=tf.float32),\n 'image/object/bbox/ymax':\n tf.io.VarLenFeature(dtype=tf.float32),\n 'image/object/class/label':\n tf.io.VarLenFeature(dtype=tf.int64),\n }\n\n parsed = tf.io.parse_single_example(value, keys_to_features)\n image_bytes = tf.reshape(parsed['image/encoded'], shape=[])\n\n tensors_dict = preprocess_image(\n image_bytes=image_bytes,\n is_training=self.is_training,\n augmentation=self.augmentation,\n use_bfloat16=self.use_bfloat16,\n saturate_uint8=self.saturate_uint8,\n scale_and_center=self.scale_and_center,\n use_default_augment=self.use_default_augment)\n\n # Subtract one so that labels are in [0, 1000).\n label = tf.cast(tf.reshape(parsed['image/class/label'], shape=()) - 1,\n dtype=tf.int32)\n tensors_dict['label'] = label\n\n return tensors_dict", "def _convert_raw_example(\n self,\n mode_dict: MutableMapping[str, Any],\n example: Mapping[str, Any]) -> ProcessedExample:\n img_path = example['image_path_or_name']\n base_name = os.path.basename(img_path)\n img_fobj = example.get('image_fobj', tf.io.gfile.GFile(img_path, 'rb'))\n img_bytes, img_shape = image_utils.image_to_jpeg(fobj=img_fobj,\n filename=base_name)\n\n img_format = 'JPEG'\n key = hashlib.sha256(img_bytes.read()).hexdigest()\n img_bytes.seek(0)\n\n bboxes = example['bbox_info']\n processed_bboxes = []\n\n img_height = img_shape[0]\n img_width = img_shape[1]\n\n img_id = example.get('image_id', self._get_id('image'))\n mode_dict['images'].append({\n 'id': img_id,\n 'width': img_width,\n 'height': img_height,\n })\n\n for bbox_info in bboxes:\n annotations_bbox = bbox_info['bbox']\n bbox = bbox_utils.BBox(bbox=annotations_bbox,\n fmt=self.builder_config.bbox_format,\n img_width=img_width,\n img_height=img_height)\n label = bbox_info['label']\n if isinstance(label, int):\n text = str(label)\n elif isinstance(label, six.string_types):\n text = label\n label = bbox_info.get('label_id', self._get_label_id(text))\n else:\n raise TypeError(\n 'The provided label was not a string or int. Got: {}'.format(\n type(label)))\n\n if label >= self.builder_config.num_labels:\n raise ValueError('Provided label {} for {} is greater than '\n 'the number of classes specified. num_classes: '\n '{}'.format(label,\n base_name,\n self.builder_config.num_labels))\n\n annotation_id = example.get('annotation_id', self._get_id('annotation'))\n bbox.convert(bbox_utils.BBoxFormat.NORMALIZED_MIN_MAX)\n xmin, xmax, ymin, ymax = bbox.as_tuple()\n bbox = bbox.convert(bbox_utils.BBoxFormat.WIDTH_HEIGHT)\n mode_dict['annotations'].append({\n 'id': annotation_id,\n 'image_id': img_id,\n 'category_id': label,\n 'bbox': annotations_bbox,\n })\n\n processed_bboxes.append({\n 'bbox': tfds.features.BBox(ymin=ymin,\n xmin=xmin,\n ymax=ymax,\n xmax=xmax),\n 'class': {\n 'text': text,\n 'label': label,\n }\n })\n\n return img_id, {\n 'image': {\n 'height': img_width,\n 'width': img_shape[1],\n 'filename': img_path,\n 'source_id': img_id,\n 'encoded': img_bytes,\n 'format': img_format,\n 'key': {\n 'sha256': key,\n },\n 'object': processed_bboxes,\n }\n }", "def read_and_decode(filename, is_train=None):\n filename_queue = tf.train.string_input_producer([filename])\n reader = tf.TFRecordReader()\n _, serialized_example = reader.read(filename_queue)\n features = tf.parse_single_example(\n serialized_example, features={\n 'label': tf.FixedLenFeature([], tf.int64),\n 'img_raw': tf.FixedLenFeature([], tf.string),\n }\n )\n # You can do more image distortion here for training data\n img = tf.decode_raw(features['img_raw'], tf.float32)\n img = tf.reshape(img, [32, 32, 3])\n # img = tf.cast(img, tf.float32) #* (1. / 255) - 0.5\n if is_train ==True:\n # 1. Randomly crop a [height, width] section of the image.\n img = tf.random_crop(img, [24, 24, 3])\n\n # 2. Randomly flip the image horizontally.\n img = tf.image.random_flip_left_right(img)\n\n # 3. Randomly change brightness.\n img = tf.image.random_brightness(img, max_delta=63)\n\n # 4. Randomly change contrast.\n img = tf.image.random_contrast(img, lower=0.2, upper=1.8)\n\n # 5. Subtract off the mean and divide by the variance of the pixels.\n img = tf.image.per_image_standardization(img)\n\n elif is_train == False:\n # 1. Crop the central [height, width] of the image.\n img = tf.image.resize_image_with_crop_or_pad(img, 24, 24)\n\n # 2. Subtract off the mean and divide by the variance of the pixels.\n img = tf.image.per_image_standardization(img)\n\n elif is_train == None:\n img = img\n\n label = tf.cast(features['label'], tf.int32)\n return img, label", "def _decode_record(record,name_to_features):\n example = tf.parse_single_example(record,name_to_features)\n\n return example", "def decode_bytes(\n key_bytes: ByteString, serialized_value_bytes: ByteString\n) -> dict[str, tf.Tensor]:\n del key_bytes # Unused.\n feature_description = {\n KEY_IMAGE_BYTES: tf.io.FixedLenFeature([], tf.string, default_value=''),\n KEY_CLASS: tf.io.FixedLenFeature([], tf.int64, default_value=-1),\n }\n example = tf.io.parse_single_example(\n serialized_value_bytes, feature_description\n )\n return collections.OrderedDict([\n (KEY_IMAGE_DECODED, tf.io.decode_jpeg(example[KEY_IMAGE_BYTES])),\n (KEY_CLASS, tf.reshape(example[KEY_CLASS], [1])),\n ])", "def prepare_example(image_path, annotations, label_map_dict):\n print(\"encoding %s\" % image_path)\n with tf.gfile.GFile(image_path, 'rb') as fid:\n encoded_png = fid.read()\n encoded_png_io = io.BytesIO(encoded_png)\n image = pil.open(encoded_png_io)\n\n if image.format != 'PNG':\n raise ValueError('Image format error')\n\n key = hashlib.sha256(encoded_png).hexdigest()\n # obtain attributes\n width, height = image.size\n img_filename = image_path.split('/')[-1]\n\n xmin = []\n ymin = []\n xmax = []\n ymax = []\n classes = []\n classes_text = []\n truncated = []\n occlud = []\n\n xmin.append(int(annotations[2]) / width)\n ymin.append(int(annotations[3]) / height)\n xmax.append(int(annotations[4]) / width)\n ymax.append(int(annotations[5]) / height)\n class_name = annotations[1]\n classes_text.append(class_name)\n classes.append(label_map_dict[class_name])\n classes_text = [class_text.encode('utf-8') for class_text in classes_text]\n trun, occ = annotations[6].split(',')\n truncated.append(int(trun))\n occlud.append(int(occ))\n\n example = tf.train.Example(features=tf.train.Features(feature={\n 'image/height': dataset_util.int64_feature(height),\n 'image/width': dataset_util.int64_feature(width),\n 'image/filename': dataset_util.bytes_feature(img_filename.encode('utf8')),\n 'image/source_id': dataset_util.bytes_feature(img_filename.encode('utf8')),\n 'image/key/sha256': dataset_util.bytes_feature(key.encode('utf8')),\n 'image/encoded': dataset_util.bytes_feature(encoded_png),\n 'image/format': dataset_util.bytes_feature('png'.encode('utf8')),\n 'image/object/bbox/xmin': dataset_util.float_list_feature(xmin),\n 'image/object/bbox/xmax': dataset_util.float_list_feature(xmax),\n 'image/object/bbox/ymin': dataset_util.float_list_feature(ymin),\n 'image/object/bbox/ymax': dataset_util.float_list_feature(ymax),\n 'image/object/class/text': dataset_util.bytes_list_feature(classes_text),\n 'image/object/class/label': dataset_util.int64_list_feature(classes),\n 'image/object/truncated': dataset_util.int64_list_feature(truncated),\n 'image/object/view': dataset_util.int64_list_feature(occlud),\n }))\n return example", "def _dinamic_decode(self):\n raise NotImplementedError", "def parser(serialized_example):\n features = tf.parse_single_example(\n serialized_example,\n features={\n 'env': tf.FixedLenFeature([1, 4], tf.int64),\n # 'env_segment_number': tf.FixedLenFeature([], tf.int64),\n # 'env_segment_cpu': tf.FixedLenFeature([], tf.int64),\n # 'env_segment_mem': tf.FixedLenFeature([], tf.int64),\n # 'query_plan_ops': tf.VarLenFeature(tf.string),\n # 'query_table_size': tf.VarLenFeature(tf.float32),\n # 'segment_cpu_usage': tf.VarLenFeature(tf.float32),\n 'label': tf.FixedLenFeature([], tf.float32)\n })\n env = tf.cast(features['env'], tf.float32)\n # image.set_shape([DEPTH * HEIGHT * WIDTH])\n\n # # Reshape from [depth * height * width] to [depth, height, width].\n # image = tf.cast(\n # tf.transpose(tf.reshape(image, [DEPTH, HEIGHT, WIDTH]), [1, 2, 0]),\n # tf.float32)\n label = tf.cast(features['label'], tf.float32)\n reshape_label = tf.reshape(features['label'], (1,1))\n return env, reshape_label", "def decode(self, encoded):", "def parse_example(self, serialized_example):\n # Because of RaggedTensor specs, feature_specs can be a 2-level nested dict,\n # so have to wrap `tf.io.parse_single_example` between\n # `flatten_nest_dict`/`pack_as_nest_dict`.\n # {\n # 'video/image': tf.io.FixedLenSequenceFeature(...),\n # 'video/object/bbox': {\n # 'ragged_flat_values': tf.io.FixedLenSequenceFeature(...),\n # 'ragged_row_lengths_0', tf.io.FixedLenSequenceFeature(...),\n # },\n # }\n example = tf.io.parse_single_example(\n serialized=serialized_example,\n features=self.flat_feature_specs,\n )\n example = utils.pack_as_nest_dict(example, self._nested_feature_specs)\n\n example = { # pylint:disable=g-complex-comprehension\n k: _deserialize_single_field(example_data, tensor_info)\n for k, (example_data, tensor_info) in utils.zip_dict(\n example, self._flat_example_specs\n )\n }\n # Reconstruct all nesting\n example = utils.pack_as_nest_dict(example, self._example_specs)\n return example", "def _decode_record(record, name_to_features):\n example = tf.parse_single_example(record, name_to_features)\n return example", "def _decode_record(record):\n name_to_features = {\n \"input_ids\": tf.FixedLenFeature([self.max_sen_len], tf.int64),\n \"input_mask\": tf.FixedLenFeature([self.max_sen_len], tf.int64),\n \"segment_ids\": tf.FixedLenFeature([self.max_sen_len], tf.int64),\n \"stroke_ids\": tf.FixedLenFeature([self.max_sen_len], tf.int64),\n \"lmask\": tf.FixedLenFeature([self.max_sen_len], tf.int64),\n \"label_ids\": tf.FixedLenFeature([self.max_sen_len], tf.int64),\n }\n\n\n example = tf.parse_single_example(record, name_to_features)\n\n #int64 to int32\n for name in list(example.keys()):\n t = example[name]\n if t.dtype == tf.int64:\n t = tf.to_int32(t)\n example[name] = t\n input_ids = example['input_ids']\n input_mask = example['input_mask']\n segment_ids = example['segment_ids']\n stroke_ids = example['stroke_ids']\n label_ids = example['label_ids']\n lmask = example['lmask']\n py_labels = tf.py_func(_get_py_seq, [label_ids], [tf.int32])\n\n return input_ids, input_mask, segment_ids, stroke_ids, lmask, label_ids, py_labels", "def decode(self, encoded_train, encoded_dev):\n decoded_train_A = self.decoder_A.predict(encoded_train)\n decoded_dev_A = self.decoder_A.predict(encoded_dev)\n self.save_reconstruction(decoded_train_A, decoded_dev_A, modality=True, no_modality=0)\n decoded_train_V = self.decoder_V.predict(encoded_train)\n decoded_dev_V = self.decoder_V.predict(encoded_dev)\n self.save_reconstruction(decoded_train_V, decoded_dev_V, modality=True, no_modality=1)", "def get_encoded_label_from_example(example):\n return example.features.feature['label/encoded'].bytes_list.value[0]", "def _parse_tfexample(example):\n\n ## parse\n features = tf.parse_single_example(example, KEYS2FEATURES)\n\n image = tf.image.decode_png(features['image/encoded'])\n label = tf.image.decode_png(features['label/encoded'])\n # label is decoded as a 3-D png image\n label = label[..., 0]\n im_path = features['image/path']\n la_path = features['label/path']\n\n return image, label, im_path, la_path", "def decode(p):\n #assert p.endswith('.' + EXTENSION)\n p2 = os.path.basename(p).replace('baseline.png', '.png')\n p2p = os.path.join('/mnt/Volume0/test/clic2020-devkit/result/', p2) #add by me\n pp = os.path.join('/mnt/Volume0/test/clic2020-devkit/targets',p2)\n p2 = os.path.join('/mnt/Volume0/test/clic2020-devkit/inputs/', p2) #add by me\n p1 = pframe_dataset_shared.get_previous_frame_path(p2)\n #p1 = os.path.join('/mnt/Volume0/test/clic2020-devkit/test_data/inputs/', p1)\n #assert os.path.isfile(p1), (p2, p1, p, len(glob.glob('*.png')))\n b = Image.open(p).convert('L')\n f2_reconstructed = decoder(np.array(Image.open(p1)), b)\n Image.fromarray(f2_reconstructed).save(p2p)\n return f2_reconstructed, np.array(Image.open(pp))", "def decoder(self, value) -> Tuple:\n data = self.decode(value)\n # TODO: remove hardcoded value.\n image_id = 1.0\n image = data[\"image\"]\n boxes = data[\"groundtruth_boxes\"]\n classes = data[\"groundtruth_classes\"]\n return (image_id, image, boxes, classes)", "def _decode_infer(self, decoder, _encoder_output, features, labels):\r\n\r\n return decoder(_encoder_output, labels)", "def _read_and_decode(example_proto,data_shape,dtypes):\n features = {}\n for name in data_shape:\n features[name] = tf.FixedLenFeature([], tf.string)\n parsed_features = tf.parse_single_example(example_proto, features)\n count = 0\n res = {}\n for name in data_shape:\n res[name] = parsed_features[name]\n if dtypes[count]!=str:\n res[name]=tf.decode_raw(res[name],dtypes[count])\n if dtypes[count]==tf.float32 or dtypes[count]==tf.float64:\n res[name]=tf.convert_to_tensor(res[name],dtype=dtypes[count])\n if data_shape[name]:\n res[name]=tf.reshape(res[name],shape=data_shape[name])\n count += 1\n return res", "def serialize_example(image_inp_string,image_out_string):\n image_inp_shape = tf.image.decode_jpeg(image_inp_string).shape\n image_out_shape = tf.image.decode_jpeg(image_out_string).shape\n feature = {\n\n 'image_input': _bytes_feature(image_inp_string),\n 'image_output':_bytes_feature(image_out_string),\n }\n\n example_proto = tf.train.Example(features=tf.train.Features(feature=feature))\n return example_proto.SerializeToString()\n\n\n #--------------------------------------------------------------------------------------\n\n ###process image", "def _parse_example(self, example, scale_to_0_1: bool = False):\n\n features = {\n 'image': tf.FixedLenFeature([], tf.string),\n 'mask': tf.FixedLenFeature([], tf.string),\n }\n parsed_example = tf.parse_single_example(example, features)\n\n image = tf.decode_raw(parsed_example['image'], self.serialized_image_raw_dtype)\n image = tf.reshape(image, (self.image_width, self.image_width, self.image_channels))\n image = tf.cast(image, tf.float32)\n if scale_to_0_1:\n image /= 255.\n\n mask = tf.decode_raw(parsed_example['mask'], self.serialized_mask_raw_dtype)\n mask = tf.reshape(mask, (self.image_width, self.image_width, self.mask_channels))\n mask = tf.cast(mask, tf.float32) / 255.\n return image, mask", "def _decode_record(self, record, name_to_features):\n example = tf.parse_single_example(record, name_to_features)\n\n # tf.Example only supports tf.int64, but the TPU only supports tf.int32.\n # So cast all int64 to int32.\n for name in list(example.keys()):\n t = example[name]\n if t.dtype == tf.int64:\n t = tf.to_int32(t)\n # tf.logging.info(t)\n # t = tf.sparse.to_dense(t)\n # tf.logging.info(t.get_shape().as_list())\n # assert t.get_shape().as_list()[0] is not None\n example[name] = t\n \n del example[\"source_sos_ids\"]\n del example[\"source_sos_mask\"]\n\n return example", "def parse_single_example(serialized_example):\n feature_description = {\n \"immrf/data\": tf.io.FixedLenFeature([], tf.string),\n \"immrf/shape\": tf.io.VarLenFeature(tf.int64),\n \"immrf/path\": tf.io.FixedLenFeature([], tf.string),\n \"tmap/data\": tf.io.FixedLenFeature([], tf.string),\n \"tmap/shape\": tf.io.VarLenFeature(tf.int64),\n \"tmap/path\": tf.io.FixedLenFeature([], tf.string),\n \"mask/data\": tf.io.FixedLenFeature([], tf.string),\n \"mask/shape\": tf.io.VarLenFeature(tf.int64),\n \"mask/path\": tf.io.FixedLenFeature([], tf.string),\n }\n slice = tf.io.parse_single_example(serialized_example, feature_description)\n for key in [\"immrf\", \"tmap\", \"mask\"]:\n slice[key + \"/data\"] = tf.io.decode_raw(slice[key + \"/data\"], out_type=tf.float32)\n slice[key + \"/data\"] = utils.reshape_back(slice, key)\n return slice", "def imagenet_parser(value, image_size, is_training):\n keys_to_features = {\n 'image/encoded':\n tf.FixedLenFeature((), tf.string, ''),\n 'image/format':\n tf.FixedLenFeature((), tf.string, 'jpeg'),\n 'image/class/label':\n tf.FixedLenFeature([], tf.int64, -1),\n 'image/class/text':\n tf.FixedLenFeature([], tf.string, ''),\n 'image/object/bbox/xmin':\n tf.VarLenFeature(dtype=tf.float32),\n 'image/object/bbox/ymin':\n tf.VarLenFeature(dtype=tf.float32),\n 'image/object/bbox/xmax':\n tf.VarLenFeature(dtype=tf.float32),\n 'image/object/bbox/ymax':\n tf.VarLenFeature(dtype=tf.float32),\n 'image/object/class/label':\n tf.VarLenFeature(dtype=tf.int64),\n }\n\n parsed = tf.parse_single_example(value, keys_to_features)\n\n image_buffer = tf.reshape(parsed['image/encoded'], shape=[])\n\n xmin = tf.expand_dims(parsed['image/object/bbox/xmin'].values, 0)\n ymin = tf.expand_dims(parsed['image/object/bbox/ymin'].values, 0)\n xmax = tf.expand_dims(parsed['image/object/bbox/xmax'].values, 0)\n ymax = tf.expand_dims(parsed['image/object/bbox/ymax'].values, 0)\n # Note that ordering is (y, x)\n bbox = tf.concat([ymin, xmin, ymax, xmax], 0)\n # Force the variable number of bounding boxes into the shape\n # [1, num_boxes, coords].\n bbox = tf.expand_dims(bbox, 0)\n bbox = tf.transpose(bbox, [0, 2, 1])\n\n image = image_preprocessing(\n image_buffer=image_buffer,\n bbox=bbox,\n image_size=image_size,\n is_training=is_training\n )\n\n # Labels are in [1, 1000] range\n label = tf.cast(\n tf.reshape(parsed['image/class/label'], shape=[]), dtype=tf.int32)\n\n return image, label", "def _decode_record(record, name_to_features):\n example = tf.io.parse_single_example(serialized=record, features=name_to_features)\n\n # tf.Example only supports tf.int64, but the TPU only supports tf.int32.\n # So cast all int64 to int32.\n for name in list(example.keys()):\n t = example[name]\n if t.dtype == tf.int64:\n t = tf.cast(t, dtype=tf.int32)\n example[name] = t\n\n return example", "def decode(self, session, dev_example):\n unzipped_dev_example = list(zip(*dev_example))\n input_feed = self.create_feed_dict(unzipped_dev_example[0:4], dropout = 1)\n output_feed = [self.h_s, self.h_e, self.relevence]\n outputs = session.run(output_feed, input_feed)\n h_s = outputs[0]\n h_e = outputs[1]\n rel = outputs[2]\n return h_s, h_e, rel", "def dict_to_tf_example(data, label_map_dict):\n\n encoded_jpg_io = io.BytesIO()\n image = data['image']\n image.save(encoded_jpg_io, \"JPEG\", quality=80)\n encoded_jpg = encoded_jpg_io.getvalue()\n key = hashlib.sha256(encoded_jpg).hexdigest()\n\n width, height = image.size\n\n xmin = []\n ymin = []\n xmax = []\n ymax = []\n rotation = []\n classes = []\n classes_text = []\n truncated = []\n poses = []\n masks = []\n difficult_obj = []\n for obj in data['object']:\n difficult = bool(int(obj['difficult']))\n difficult_obj.append(int(difficult))\n\n xmin.append(float(obj['bndbox']['xmin']) / width)\n ymin.append(float(obj['bndbox']['ymin']) / height)\n xmax.append(float(obj['bndbox']['xmax']) / width)\n ymax.append(float(obj['bndbox']['ymax']) / height)\n rotation.append(float(obj['rotation']))\n masks.append(obj['mask'])\n classes_text.append(obj['name'].encode('utf8'))\n classes.append(label_map_dict[obj['name']])\n truncated.append(int(obj['truncated']))\n poses.append(obj['pose'].encode('utf8'))\n\n mask = np.stack(masks)\n encoded_mask = pn_encode(mask.flatten()).tolist()\n print('mask encode:', mask.shape, '->', len(encoded_mask)) ###\n example = tf.train.Example(features=tf.train.Features(feature={\n 'image/height': dataset_util.int64_feature(height),\n 'image/width': dataset_util.int64_feature(width),\n 'image/filename': dataset_util.bytes_feature(\n data['filename'].encode('utf8')),\n 'image/source_id': dataset_util.bytes_feature(\n data['filename'].encode('utf8')),\n 'image/key/sha256': dataset_util.bytes_feature(key.encode('utf8')),\n 'image/encoded': dataset_util.bytes_feature(encoded_jpg),\n 'image/format': dataset_util.bytes_feature('jpeg'.encode('utf8')),\n 'image/object/bbox/xmin': dataset_util.float_list_feature(xmin),\n 'image/object/bbox/xmax': dataset_util.float_list_feature(xmax),\n 'image/object/bbox/ymin': dataset_util.float_list_feature(ymin),\n 'image/object/bbox/ymax': dataset_util.float_list_feature(ymax),\n 'image/object/rotation': dataset_util.float_list_feature(rotation),\n 'image/object/class/text': dataset_util.bytes_list_feature(classes_text),\n 'image/object/class/label': dataset_util.int64_list_feature(classes),\n 'image/object/difficult': dataset_util.int64_list_feature(difficult_obj),\n 'image/object/truncated': dataset_util.int64_list_feature(truncated),\n 'image/object/view': dataset_util.bytes_list_feature(poses),\n 'image/segmentation/object': dataset_util.int64_list_feature(encoded_mask),\n 'image/segmentation/object/class': dataset_util.int64_list_feature(classes),\n }))\n return example", "def parse_record(raw_record, is_training):\n keys_to_features = {\n 'image/encoded':\n tf.FixedLenFeature((), tf.string, default_value=''),\n 'image/format':\n tf.FixedLenFeature((), tf.string, default_value='jpeg'),\n 'image/class/label':\n tf.FixedLenFeature([], dtype=tf.int64, default_value=-1),\n 'image/class/text':\n tf.FixedLenFeature([], dtype=tf.string, default_value=''),\n }\n\n parsed = tf.parse_single_example(raw_record, keys_to_features)\n\n image = tf.image.decode_image(\n tf.reshape(parsed['image/encoded'], shape=[]),\n _NUM_CHANNELS)\n\n # Note that tf.image.convert_image_dtype scales the image data to [0, 1).\n image = tf.image.convert_image_dtype(image, dtype=tf.float32)\n\n label = tf.cast(\n tf.reshape(parsed['image/class/label'], shape=[]),\n dtype=tf.int32)\n\n return {\"image\": image}, label", "def extract_labels(filename, num_images):\n\n# this function definition has been taken from internet \n print('Extracting', filename)\n with gzip.open(filename) as bytestream:\n bytestream.read(8)\n buf = bytestream.read(1 * num_images)\n labels = np.frombuffer(buf, dtype=np.uint8).astype(np.int64) #Interpret a buffer as a 1-dimensional array\n return labels", "def decode_labels(mask, num_images=1, num_classes=21, task='seg'):\n n, h, w, c = mask.shape\n assert (n >= num_images), 'Batch size %d should be greater or equal than number of images to save %d.' % (n, num_images)\n outputs = np.zeros((num_images, h, w, 3), dtype=np.uint8)\n for i in range(num_images):\n if task == 'normal':\n outputs[i] = mask[i]\n elif task == 'seg':\n img = Image.new('RGB', (w, h), (255, 255, 255)) # unlabeled part is white (255, 255, 255)\n pixels = img.load()\n for j_, j in enumerate(mask[i, :, :, 0]):\n for k_, k in enumerate(j):\n if k < num_classes:\n pixels[k_, j_] = label_colours[k]\n outputs[i] = np.array(img)\n else:\n raise Exception('task name is not recognized!')\n\n return outputs", "def predict_image(serialised_image):\n network_input = process_image(serialised_image)\n\n with open(\"trained_network.pickle\", \"r\") as fin:\n trained_network = cPickle.load(fin)\n\n trained_network.forward(network_input)\n output_layer = trained_network.output_layer\n\n return output_layer.index(max(output_layer))", "def decodeFrame(self, image):\n return image", "def decode_loaded(x):\n return cv2.imdecode(x, flags=cv2.IMREAD_ANYDEPTH + cv2.IMREAD_COLOR)", "def test_decode_labels_from_strings(self):\n classes = np.array([\"a\", \"b\", \"c\", \"d\", \"e\"])\n decoded = classes[np.random.randint(0, 5, 100)]\n y = np.array([v.upper() for v in decoded])\n\n oz = ClassificationScoreVisualizer(GaussianNB, classes=classes)\n npt.assert_array_equal(oz._decode_labels(y), decoded)\n\n encoder = {c.upper(): c for c in classes}\n oz = ClassificationScoreVisualizer(GaussianNB, encoder=encoder)\n npt.assert_array_equal(oz._decode_labels(y), decoded)\n\n class L2UTransformer(object):\n def transform(self, y):\n return np.array([yi.upper() for yi in y])\n\n def inverse_transform(self, y):\n return np.array([yi.lower() for yi in y])\n\n oz = ClassificationScoreVisualizer(GaussianNB, encoder=L2UTransformer())\n npt.assert_array_equal(oz._decode_labels(y), decoded)", "def read_and_decode(filename_queue, shape=None):\n label_bytes = 1\n width = shape[0]\n height = shape[1]\n depth = shape[2]\n record_byte_length = label_bytes + width * height\n\n with tf.name_scope(\"read_and_decode\"):\n # Length of record bytes in the dataset\n # Defined in utils module\n reader = tf.TFRecordReader()\n key, record_string = reader.read(filename_queue)\n\n feature_map = {\n \"image/encoded\": tf.FixedLenFeature(\n shape=[], dtype=tf.string)\n }\n parsed = tf.parse_single_example(record_string, feature_map)\n record_bytes = tf.decode_raw(parsed[\"image/encoded\"], tf.int8)\n\n # first byte is the label\n label = tf.cast(tf.strided_slice(record_bytes,\n begin=[0],\n end=[label_bytes]), tf.int32)\n # label = tf.reshape(label, [1])\n # print(label)\n\n # remaining bytes is the example\n example = tf.reshape(tf.strided_slice(record_bytes,\n begin=[label_bytes],\n end=[record_byte_length]), [width, height, depth])\n example = tf.cast(example, tf.float32)\n example.set_shape([width, height, depth])\n label.set_shape(1)\n label = tf.squeeze(label)\n # print(label)\n # label = tf.reshape(label, [0])\n\n return example, label", "def parse_record_reid(raw_record):\n keys_to_features = {\n 'image_raw': tf.FixedLenFeature([], tf.string),\n 'height': tf.FixedLenFeature([], tf.int64),\n 'width': tf.FixedLenFeature([], tf.int64),\n 'depth': tf.FixedLenFeature([], tf.int64),\n 'label': tf.FixedLenFeature([], tf.int64)\n }\n parsed = tf.parse_single_example(raw_record, keys_to_features)\n # image = tf.image.decode_image(\n # tf.reshape(parsed['image_raw'], shape=[]), _DEPTH)\n\n image = tf.decode_raw(parsed['image_raw'], tf.uint8)\n # image = tf.to_float(tf.image.convert_image_dtype(image, dtype=tf.uint8))\n image = tf.reshape(image, [_HEIGHT, _WIDTH, 3])\n # image = tf.cast(image, tf.float32) * (1. / 255.0)\n image = tf.cast(image,tf.float32)\n\n label = tf.cast(parsed['label'],tf.int32)\n\n label = tf.one_hot(label, labels_nums, 1, 0)\n # labels={\"seg\":None,\"reid\":label}\n return image, label", "def _decode_record(record, name_to_features):\n example = tf.parse_single_example(record, name_to_features)\n\n # tf.Example only supports tf.int64, but the TPU only supports tf.int32.\n # So cast all int64 to int32.\n for name in list(example.keys()):\n t = example[name]\n if t.dtype == tf.int64:\n t = tf.to_int32(t)\n example[name] = t\n\n return example[\"src_ids\"].values, example[\"tgt_ids\"].values, example[\"label\"][0]", "def _decode_record(record, name_to_features):\n example = tf.parse_single_example(record, name_to_features)\n\n # tf.Example only supports tf.int64, but the TPU only supports tf.int32.\n # So cast all int64 to int32.\n for name in list(example.keys()):\n print(name)\n t = example[name]\n if t.dtype == tf.int64:\n t = tf.to_int32(t)\n example[name] = t\n\n return example", "def dict_to_tf_example(data,\n label_map_dict,\n ignore_difficult_instances=False):\n full_path = os.path.join(FLAGS.data_dir, 'IMAGENES', data['filename'])[0:-3] + 'jpg'\n image_ = cv2.imread(full_path)\n with tf.gfile.GFile(full_path, 'rb') as fid:\n encoded_jpg = fid.read()\n encoded_jpg_io = io.BytesIO(encoded_jpg)\n image = PIL.Image.open(encoded_jpg_io)\n if image.format != 'JPEG':\n raise ValueError('Image format not JPEG')\n key = hashlib.sha256(encoded_jpg).hexdigest()\n image_id = get_image_id(data['filename'])\n width = int(image_.shape[1])\n height = int(image_.shape[0])\n image_id = get_image_id(data['filename'])\n xmin = []\n ymin = []\n xmax = []\n ymax = []\n area = []\n classes = []\n classes_text = []\n if 'object' in data:\n for obj in data['object']:\n name_in_obj_ = obj['name'].replace(' ', '').strip()\n if name_in_obj_ in label_map_dict:\n x_pos = [int(obj['bndbox']['xmax']), int(obj['bndbox']['xmin'])]\n y_pos = [int(obj['bndbox']['ymax']), int(obj['bndbox']['ymin'])]\n xmin.append((float(min(x_pos))) / width)\n ymin.append((float(min(y_pos))) / height)\n xmax.append((float(max(x_pos))) / width)\n ymax.append((float(max(y_pos))) / height)\n area.append((xmax[-1] - xmin[-1]) * (ymax[-1] - ymin[-1]))\n classes_text.append(name_in_obj_.replace(' ', '').encode('utf8'))\n classes.append(int(label_map_dict[name_in_obj_]))\n\n example = tf.train.Example(\n features=tf.train.Features(\n feature={\n 'image/height':\n tfrecord_util.int64_feature(height),\n 'image/width':\n tfrecord_util.int64_feature(width),\n 'image/filename':\n tfrecord_util.bytes_feature(data['filename'].encode('utf8')),\n 'image/source_id':\n tfrecord_util.bytes_feature(str(image_id).encode('utf8')),\n 'image/key/sha256':\n tfrecord_util.bytes_feature(key.encode('utf8')),\n 'image/encoded':\n tfrecord_util.bytes_feature(encoded_jpg),\n 'image/format':\n tfrecord_util.bytes_feature('jpeg'.encode('utf8')),\n 'image/object/bbox/xmin':\n tfrecord_util.float_list_feature(xmin),\n 'image/object/bbox/xmax':\n tfrecord_util.float_list_feature(xmax),\n 'image/object/bbox/ymin':\n tfrecord_util.float_list_feature(ymin),\n 'image/object/bbox/ymax':\n tfrecord_util.float_list_feature(ymax),\n 'image/object/area':\n tfrecord_util.float_list_feature(area),\n 'image/object/class/text':\n tfrecord_util.bytes_list_feature(classes_text),\n 'image/object/class/label':\n tfrecord_util.int64_list_feature(classes),\n }))\n return example", "def unpack_data(imagefile, labelfile):\n\t# Open the images with gzip in read binary mode\n\timages = open(imagefile, 'rb')\n\tlabels = open(labelfile, 'rb')\n\t# Read the binary data\n\t# We have to get big endian unsigned int. So we need '>I'\n\t# Get metadata for images\n\timages.read(4) # skip the magic_number\n\tnumber_of_images = images.read(4)\n\tnumber_of_images = unpack('>I', number_of_images)[0]\n\trows = images.read(4)\n\trows = unpack('>I', rows)[0]\n\tcols = images.read(4)\n\tcols = unpack('>I', cols)[0]\n\n\t# Get metadata for labels\n\tlabels.read(4) # skip the magic_number\n\tN = labels.read(4)\n\tN = unpack('>I', N)[0]\n\n\tif number_of_images != N:\n\t\traise Exception('number of labels did not match the number of images')\n\t# Get the data\n\tx = zeros((N, rows, cols), dtype=float32) # Initialize numpy array\n\ty = zeros((N, 1), dtype=uint8) # Initialize numpy array\n\tfor i in range(N):\n\t\tif i % 1000 == 0:\n\t\t\tprint(\"i: %i\" % i)\n\t\tfor row in range(rows):\n\t\t\tfor col in range(cols):\n\t\t\t\ttmp_pixel = images.read(1) # Just a single byte\n\t\t\t\ttmp_pixel = unpack('>B', tmp_pixel)[0]\n\t\t\t\tx[i][row][col] = tmp_pixel\n\t\ttmp_label = labels.read(1)\n\t\ty[i] = unpack('>B', tmp_label)[0]\n\treturn x, y", "def _parse(serialized_example):\n\n feature_map = {\n 'dayofweek': tf.io.FixedLenFeature([], tf.int64),\n 'dropofflat': tf.io.FixedLenFeature([], tf.float32),\n 'dropofflon': tf.io.FixedLenFeature([], tf.float32),\n 'fare_amount': tf.io.FixedLenFeature([], tf.float32),\n 'hourofday': tf.io.FixedLenFeature([], tf.int64),\n 'passengers': tf.io.FixedLenFeature([], tf.float32),\n 'pickuplat': tf.io.FixedLenFeature([], tf.float32),\n 'pickuplon': tf.io.FixedLenFeature([], tf.float32)\n }\n\n # Parse the serialized data into a dictionary.\n parsed_example = tf.io.parse_single_example(\n serialized=serialized_example,\n features=feature_map)\n\n features = add_engineered(parsed_example)\n label = features.pop(\"fare_amount\")\n\n return features, label", "def _decode_record(record, name_to_features):\n example = tf.parse_single_example(record, name_to_features)\n\n # tf.Example only supports tf.int64, but the TPU only supports tf.int32.\n # So cast all int64 to int32.\n for name in list(example.keys()):\n t = example[name]\n if t.dtype == tf.int64:\n t = tf.to_int32(t)\n example[name] = t\n\n return example", "def _decode_record(record, name_to_features):\n example = tf.parse_single_example(record, name_to_features)\n\n # tf.Example only supports tf.int64, but the TPU only supports tf.int32.\n # So cast all int64 to int32.\n for name in list(example.keys()):\n t = example[name]\n if t.dtype == tf.int64:\n t = tf.to_int32(t)\n example[name] = t\n\n return example", "def _decode_record(record, name_to_features):\n example = tf.parse_single_example(record, name_to_features)\n\n # tf.Example only supports tf.int64, but the TPU only supports tf.int32.\n # So cast all int64 to int32.\n for name in list(example.keys()):\n t = example[name]\n if t.dtype == tf.int64:\n t = tf.to_int32(t)\n example[name] = t\n\n return example", "def _decode_record(record, name_to_features):\n example = tf.parse_single_example(record, name_to_features)\n\n # tf.Example only supports tf.int64, but the TPU only supports tf.int32.\n # So cast all int64 to int32.\n for name in list(example.keys()):\n t = example[name]\n if t.dtype == tf.int64:\n t = tf.to_int32(t)\n example[name] = t\n\n return example", "def _decode_record(record, name_to_features):\n example = tf.parse_single_example(record, name_to_features)\n\n # tf.Example only supports tf.int64, but the TPU only supports tf.int32.\n # So cast all int64 to int32.\n for name in list(example.keys()):\n t = example[name]\n if t.dtype == tf.int64:\n t = tf.to_int32(t)\n example[name] = t\n\n return example", "def parse_example_proto(example_serialized, semantic_level_settings):\n # Dense features in Example proto.\n feature_map = {\n 'image/encoded': tf.FixedLenFeature([], dtype=tf.string, default_value=''),\n 'image/filename': tf.FixedLenFeature([], dtype=tf.string, default_value=''),\n }\n for level, num in semantic_level_settings:\n feature_map.update({'image/label/%s' % level:\n tf.FixedLenFeature([num], dtype=tf.int64, default_value=[-1] * num)})\n features = tf.parse_single_example(example_serialized, feature_map)\n labels = [tf.cast(features['image/label/%s' % level], dtype=tf.int32)\n for level, _ in semantic_level_settings]\n return features['image/encoded'], labels, features['image/filename']", "def extract_labels(filename, num_images):\n print('Extracting', filename)\n with gzip.open(filename) as bytestream:\n bytestream.read(8)\n buf = bytestream.read(1 * num_images)\n labels = np.frombuffer(buf, dtype=np.uint8).astype(np.int64)\n return labels", "def _read_from_file(queue, config, class_label):\n\t\n\tclass SequenceRecord(object):\n\t\tpass\n\tresult = SequenceRecord()\n\t\n\t# Dimensions of the images and the bytes they each take\n\t# up in the binary file\n\tresult.height = config.image_size\n\tresult.width = config.image_size\n\tresult.depth = config.image_depth\n\tresult.sequence_length = config.num_steps\n\tresult.image_bytes = (result.height * result.width * result.depth)\n\n\tresult.patient_ID_bytes = 5 #uint8\n\n\tinitial_image_name_bytes = 92 #uint8\n\tresult.num_features = config.num_features\n\tresult.one_feature_bytes = 8\n\tresult.feature_bytes = config.num_features * result.one_feature_bytes # float64\n\tresult.coord_bytes = config.num_steps*2*6 # x and y coords, uint32\n\n\trecord_bytes = result.image_bytes * result.sequence_length + result.coord_bytes + result.patient_ID_bytes + initial_image_name_bytes + result.feature_bytes\n\t\n\t# The amount of padding on the image_name must be adjusted based on the number of features\n\t# because the overall number of bytes must be a multiple of 8 for float64 processing of raw output.\n\tincrement = 8 - (record_bytes % 8)\n\tresult.image_name_bytes = initial_image_name_bytes + increment\n\trecord_bytes += increment\n\t\n\t# Create reader with the fixed record length and\n\t# read off one record\n\treader = tf.FixedLengthRecordReader(record_bytes=record_bytes)\n\tresult.key, value = reader.read(queue)\n\t# Convert from a string to a vector of uint8 that is record_bytes long.\n\trecord_data = tf.decode_raw(value, tf.uint8, name='decode_raw_uint8')\n\tfeature_data = tf.decode_raw(value, tf.float64, name='decode_raw_float64')\n\tindex = 0\n\tnext_index = result.patient_ID_bytes\n\tresult.subject_id, index = process_slice(index, result.patient_ID_bytes, record_data)\n\tresult.image_name, index = process_slice(index, result.image_name_bytes, record_data)\n\tresult.patch_coords, index = process_slice(index, result.coord_bytes, record_data)\n\n\t# features are taken from float64 stream, they are taken out as a single block of data.\n\tfeature_index = index // result.one_feature_bytes\n\tresult.features, feature_index = process_removal_slice(feature_index, result.num_features, feature_data, config.remove_feature)\n\n\t_ , index = process_slice(index, result.feature_bytes, record_data)\n\tsequence_data = tf.strided_slice(record_data, [index], [record_bytes])\n\n\t# Treat sequence as an image of dimensions [(steps * patch height), width, depth] and normalize per image\n\t# Then reshape back to a single sequence\n\n\twith tf.device(\"/cpu:0\"):\n\t\tnormalized_sequence = tf.reshape(sequence_data,\n\t\t\t[result.sequence_length*result.height,result.width, result.depth])\n\t\tnormalized_sequence = tf.image.per_image_standardization(normalized_sequence)\n\n\t\tresult.sequence = tf.reshape(normalized_sequence,\n\t\t\t\t\t\t\t\t[result.sequence_length, result.height * result.width * result.depth]) #result.image_bytes])\n\t\t\t\t\t\t\t\t\n\tresult.sequence = tf.cast(result.sequence, tf.float32)\n\tresult.label = tf.constant(class_label, shape=[1])\n\n\treturn result", "def load_encoded_data_segment(self, segment):\n\n # get data locations\n segment_number = self.properties.segments[segment]['segment_number']\n vDeflection_file = 'segments/{}/channels/vDeflection.dat'.format(segment_number)\n height_file = 'segments/{}/channels/height.dat'.format(segment_number)\n\n # load encoded data from archive\n vDeflection = self.archive.read_data(vDeflection_file)\n height = self.archive.read_data(height_file)\n\n return vDeflection, height", "def plot_segmentation_prediction(\n prediction: ndarray,\n label: ndarray,\n raw_img: Image,\n raw_label: Image,\n img_shape: tuple,\n img_name: str,\n save_path: str,\n) -> None:\n raw_img = raw_img.resize(img_shape)\n\n final_mask = mark_boundaries(raw_img, prediction == 1, [255, 0, 0])\n final_mask = mark_boundaries(final_mask, prediction == 2, [0, 255, 0])\n final_mask = mark_boundaries(final_mask, prediction == 3, [0, 0, 255])\n\n final_seg_mask = zeros(img_shape + (3,), uint8)\n final_seg_mask[prediction == 1] = [255, 0, 0]\n final_seg_mask[prediction == 2] = [0, 255, 0]\n final_seg_mask[prediction == 3] = [0, 0, 255]\n\n final_label = mark_boundaries(raw_img, label[1], [255, 0, 0])\n final_label = mark_boundaries(final_label, label[2], [0, 255, 0])\n\n if label.shape[0] == 4:\n final_label = mark_boundaries(final_label, label[3], [0, 0, 255])\n\n fig = plt.figure(figsize=(14, 14))\n\n fig.add_subplot(2, 2, 1)\n plt.imshow(final_mask)\n\n plt.title(\"Prediction\")\n\n fig.add_subplot(2, 2, 2)\n plt.imshow(final_seg_mask)\n plt.title(\"Prediction - mask\")\n\n fig.add_subplot(2, 2, 3)\n plt.imshow(final_label)\n plt.title(\"Reference\")\n\n raw_label = array(raw_label)\n raw_label[(raw_label == [255, 255, 0]).sum(axis=2) == 3] = [255, 0, 0]\n raw_label = Image.fromarray(raw_label)\n raw_label = raw_label.resize(img_shape)\n \n fig.add_subplot(2, 2, 4)\n plt.imshow(raw_label)\n plt.title(\"Reference - mask\")\n\n plt.savefig(join(save_path, img_name))\n plt.close()", "def extract_labels(filename, num_images):\n print('Extracting', filename)\n with gzip.open(filename) as bytestream:\n bytestream.read(8)\n buf = bytestream.read(1 * num_images)\n labels = numpy.frombuffer(buf, dtype=numpy.uint8).astype(numpy.int64)\n return labels", "def _decode_record(record, name_to_features):\n example = tf.parse_single_example(record, name_to_features)\n\n # tf.Example only supports tf.int64, but the TPU only supports tf.int32.\n # So cast all int64 to int32.\n for name in list(example.keys()):\n t = example[name]\n if t.dtype == tf.int64:\n t = tf.to_int32(t)\n example[name] = t\n\n return example", "def setup_annotations(self):\n sbd_path = get_data_path('sbd')\n target_path = pjoin(self.root, 'SegmentationClass/pre_encoded')\n if not os.path.exists(target_path): os.makedirs(target_path)\n path = pjoin(sbd_path, 'dataset/train.txt')\n sbd_train_list = tuple(open(path, 'r'))\n sbd_train_list = [id_.rstrip() for id_ in sbd_train_list]\n train_aug = self.files['train'] + sbd_train_list\n\n # keep unique elements (stable)\n train_aug = [train_aug[i] for i in \\\n sorted(np.unique(train_aug, return_index=True)[1])]\n self.files['train_aug'] = train_aug\n set_diff = set(self.files['val']) - set(train_aug) # remove overlap\n self.files['train_aug_val'] = list(set_diff)\n\n pre_encoded = glob.glob(pjoin(target_path, '*.png'))\n expected = np.unique(self.files['train_aug'] + self.files['val']).size\n\n if len(pre_encoded) != expected:\n print(\"Pre-encoding segmentation masks...\")\n for ii in tqdm(sbd_train_list):\n lbl_path = pjoin(sbd_path, 'dataset/cls', ii + '.mat')\n data = io.loadmat(lbl_path)\n lbl = data['GTcls'][0]['Segmentation'][0].astype(np.int32)\n lbl = m.toimage(lbl, high=lbl.max(), low=lbl.min())\n m.imsave(pjoin(target_path, ii + '.png'), lbl)\n\n for ii in tqdm(self.files['trainval']):\n fname = ii + '.png'\n lbl_path = pjoin(self.root, 'SegmentationClass', fname)\n lbl = self.encode_segmap(m.imread(lbl_path))\n lbl = m.toimage(lbl, high=lbl.max(), low=lbl.min())\n m.imsave(pjoin(target_path, fname), lbl)\n\n assert expected == 9733, 'unexpected dataset sizes'", "def decode(cls, flattened):\n if len(flattened) < 8:\n return None\n t = binary_cast(flattened[:8], 'BBBBBBBB', 'd')[0]\n img = cls.decompress(flattened[8:])\n return t, img", "def decode(data): #@NoSelf", "def _decode_record(record, name_to_features):\n\t\t\texample = tf.parse_single_example(record, name_to_features)\n\n\t\t\t# tf.Example only supports tf.int64, but the TPU only supports tf.int32.\n\t\t\t# So cast all int64 to int32.\n\t\t\tfor name in list(example.keys()):\n\t\t\t\tt = example[name]\n\t\t\t\tif t.dtype == tf.int64:\n\t\t\t\t\tt = tf.to_int32(t)\n\t\t\t\texample[name] = t\n\n\t\t\treturn example", "def _pickle_load(filename):\n with open(filename, 'rb') as f:\n save = pickle.load(f)\n image = save['image'].astype(np.float32)\n label = np.float32(save['label'])\n label = reformat_labels(label)\n return image, label", "def _decode_train(self, decoder, _encoder_output, _features, labels):\r\n target_embedded = tf.nn.embedding_lookup(decoder.target_embedding,\r\n labels[\"target_ids\"])\r\n\r\n return decoder(_encoder_output, labels=target_embedded[:,:-1], sequence_length=labels[\"target_len\"]-1)", "def parse_record(raw_record):\n keys_to_features = {\n 'image/height':\n tf.FixedLenFeature((), tf.int64),\n 'image/width':\n tf.FixedLenFeature((), tf.int64),\n 'image/encoded':\n tf.FixedLenFeature((), tf.string, default_value=''),\n 'image/format':\n tf.FixedLenFeature((), tf.string, default_value='jpeg'),\n 'label/encoded':\n tf.FixedLenFeature((), tf.string, default_value=''),\n 'label/format':\n tf.FixedLenFeature((), tf.string, default_value='png'),\n }\n\n parsed = tf.parse_single_example(raw_record, keys_to_features)\n\n # height = tf.cast(parsed['image/height'], tf.int32)\n # width = tf.cast(parsed['image/width'], tf.int32)\n\n image = tf.image.decode_image(\n tf.reshape(parsed['image/encoded'], shape=[]), _DEPTH)\n image = tf.to_float(tf.image.convert_image_dtype(image, dtype=tf.uint8))\n image.set_shape([None, None, 3])\n\n label = tf.image.decode_image(\n tf.reshape(parsed['label/encoded'], shape=[]), 1)\n label = tf.to_int32(tf.image.convert_image_dtype(label, dtype=tf.uint8))\n label.set_shape([None, None, 1])\n\n\n return image, label", "def extract_labels(filename):\n print('Extracting', filename)\n with gzip.open(filename) as bytestream:\n bytestream.read(8)\n buf = bytestream.read(10000)\n labels = numpy.frombuffer(buf, dtype=numpy.uint8).astype(numpy.int64)\n return labels", "def decode(data):\n raise NotImplementedError", "def deserialize(self, str):\n try:\n if self.header is None:\n self.header = std_msgs.msg.Header()\n if self.predict is None:\n self.predict = argus_msgs.msg.FilterPredictStep()\n if self.update is None:\n self.update = argus_msgs.msg.FilterUpdateStep()\n end = 0\n _x = self\n start = end\n end += 12\n (_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs,) = _get_struct_3I().unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.header.frame_id = str[start:end].decode('utf-8')\n else:\n self.header.frame_id = str[start:end]\n _x = self\n start = end\n end += 26\n (_x.step_num, _x.info_type, _x.predict.step_dt, _x.predict.trans_jacobian.column_major, _x.predict.trans_jacobian.rows, _x.predict.trans_jacobian.cols,) = _get_struct_QBdB2I().unpack(str[start:end])\n self.predict.trans_jacobian.column_major = bool(self.predict.trans_jacobian.column_major)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n pattern = '<%sd'%length\n start = end\n end += struct.calcsize(pattern)\n self.predict.trans_jacobian.data = struct.unpack(pattern, str[start:end])\n _x = self\n start = end\n end += 9\n (_x.predict.trans_noise_cov.column_major, _x.predict.trans_noise_cov.rows, _x.predict.trans_noise_cov.cols,) = _get_struct_B2I().unpack(str[start:end])\n self.predict.trans_noise_cov.column_major = bool(self.predict.trans_noise_cov.column_major)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n pattern = '<%sd'%length\n start = end\n end += struct.calcsize(pattern)\n self.predict.trans_noise_cov.data = struct.unpack(pattern, str[start:end])\n _x = self\n start = end\n end += 9\n (_x.predict.prior_state_cov.column_major, _x.predict.prior_state_cov.rows, _x.predict.prior_state_cov.cols,) = _get_struct_B2I().unpack(str[start:end])\n self.predict.prior_state_cov.column_major = bool(self.predict.prior_state_cov.column_major)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n pattern = '<%sd'%length\n start = end\n end += struct.calcsize(pattern)\n self.predict.prior_state_cov.data = struct.unpack(pattern, str[start:end])\n _x = self\n start = end\n end += 9\n (_x.predict.post_state_cov.column_major, _x.predict.post_state_cov.rows, _x.predict.post_state_cov.cols,) = _get_struct_B2I().unpack(str[start:end])\n self.predict.post_state_cov.column_major = bool(self.predict.post_state_cov.column_major)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n pattern = '<%sd'%length\n start = end\n end += struct.calcsize(pattern)\n self.predict.post_state_cov.data = struct.unpack(pattern, str[start:end])\n _x = self\n start = end\n end += 9\n (_x.update.prior_state_cov.column_major, _x.update.prior_state_cov.rows, _x.update.prior_state_cov.cols,) = _get_struct_B2I().unpack(str[start:end])\n self.update.prior_state_cov.column_major = bool(self.update.prior_state_cov.column_major)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n pattern = '<%sd'%length\n start = end\n end += struct.calcsize(pattern)\n self.update.prior_state_cov.data = struct.unpack(pattern, str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n pattern = '<%sd'%length\n start = end\n end += struct.calcsize(pattern)\n self.update.prior_obs_error = struct.unpack(pattern, str[start:end])\n _x = self\n start = end\n end += 9\n (_x.update.obs_error_cov.column_major, _x.update.obs_error_cov.rows, _x.update.obs_error_cov.cols,) = _get_struct_B2I().unpack(str[start:end])\n self.update.obs_error_cov.column_major = bool(self.update.obs_error_cov.column_major)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n pattern = '<%sd'%length\n start = end\n end += struct.calcsize(pattern)\n self.update.obs_error_cov.data = struct.unpack(pattern, str[start:end])\n _x = self\n start = end\n end += 9\n (_x.update.post_state_cov.column_major, _x.update.post_state_cov.rows, _x.update.post_state_cov.cols,) = _get_struct_B2I().unpack(str[start:end])\n self.update.post_state_cov.column_major = bool(self.update.post_state_cov.column_major)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n pattern = '<%sd'%length\n start = end\n end += struct.calcsize(pattern)\n self.update.post_state_cov.data = struct.unpack(pattern, str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n pattern = '<%sd'%length\n start = end\n end += struct.calcsize(pattern)\n self.update.state_delta = struct.unpack(pattern, str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n pattern = '<%sd'%length\n start = end\n end += struct.calcsize(pattern)\n self.update.post_obs_error = struct.unpack(pattern, str[start:end])\n _x = self\n start = end\n end += 9\n (_x.update.obs_jacobian.column_major, _x.update.obs_jacobian.rows, _x.update.obs_jacobian.cols,) = _get_struct_B2I().unpack(str[start:end])\n self.update.obs_jacobian.column_major = bool(self.update.obs_jacobian.column_major)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n pattern = '<%sd'%length\n start = end\n end += struct.calcsize(pattern)\n self.update.obs_jacobian.data = struct.unpack(pattern, str[start:end])\n _x = self\n start = end\n end += 9\n (_x.update.obs_noise_cov.column_major, _x.update.obs_noise_cov.rows, _x.update.obs_noise_cov.cols,) = _get_struct_B2I().unpack(str[start:end])\n self.update.obs_noise_cov.column_major = bool(self.update.obs_noise_cov.column_major)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n pattern = '<%sd'%length\n start = end\n end += struct.calcsize(pattern)\n self.update.obs_noise_cov.data = struct.unpack(pattern, str[start:end])\n return self\n except struct.error as e:\n raise genpy.DeserializationError(e) #most likely buffer underfill", "def create_voc_label(is_training):\n voc_dir = config.voc_dir\n cls_map = {name: i for i, name in enumerate(config.coco_classes)}\n sub_dir = 'train' if is_training else 'eval'\n voc_dir = os.path.join(voc_dir, sub_dir)\n if not os.path.isdir(voc_dir):\n raise ValueError(f'Cannot find {sub_dir} dataset path.')\n\n image_dir = anno_dir = voc_dir\n if os.path.isdir(os.path.join(voc_dir, 'Images')):\n image_dir = os.path.join(voc_dir, 'Images')\n if os.path.isdir(os.path.join(voc_dir, 'Annotations')):\n anno_dir = os.path.join(voc_dir, 'Annotations')\n\n if not is_training:\n data_dir = config.voc_root\n json_file = os.path.join(data_dir, config.instances_set.format(sub_dir))\n file_dir = os.path.split(json_file)[0]\n if not os.path.isdir(file_dir):\n os.makedirs(file_dir)\n json_dict = {\"images\": [], \"type\": \"instances\", \"annotations\": [],\n \"categories\": []}\n bnd_id = 1\n\n image_files_dict = {}\n image_anno_dict = {}\n images = []\n for anno_file in os.listdir(anno_dir):\n print(anno_file)\n if not anno_file.endswith('xml'):\n continue\n tree = et.parse(os.path.join(anno_dir, anno_file))\n root_node = tree.getroot()\n file_name = root_node.find('filename').text\n img_id = get_imageId_from_fileName(file_name)\n image_path = os.path.join(image_dir, file_name)\n print(image_path)\n if not os.path.isfile(image_path):\n print(f'Cannot find image {file_name} according to annotations.')\n continue\n\n labels = []\n for obj in root_node.iter('object'):\n cls_name = obj.find('name').text\n if cls_name not in cls_map:\n print(f'Label \"{cls_name}\" not in \"{config.coco_classes}\"')\n continue\n bnd_box = obj.find('bndbox')\n x_min = int(float(bnd_box.find('xmin').text)) - 1\n y_min = int(float(bnd_box.find('ymin').text)) - 1\n x_max = int(float(bnd_box.find('xmax').text)) - 1\n y_max = int(float(bnd_box.find('ymax').text)) - 1\n labels.append([y_min, x_min, y_max, x_max, cls_map[cls_name]])\n\n if not is_training:\n o_width = abs(x_max - x_min)\n o_height = abs(y_max - y_min)\n ann = {'area': o_width * o_height, 'iscrowd': 0, 'image_id': \\\n img_id, 'bbox': [x_min, y_min, o_width, o_height], \\\n 'category_id': cls_map[cls_name], 'id': bnd_id, \\\n 'ignore': 0, \\\n 'segmentation': []}\n json_dict['annotations'].append(ann)\n bnd_id = bnd_id + 1\n\n if labels:\n images.append(img_id)\n image_files_dict[img_id] = image_path\n image_anno_dict[img_id] = np.array(labels)\n\n if not is_training:\n size = root_node.find(\"size\")\n width = int(size.find('width').text)\n height = int(size.find('height').text)\n image = {'file_name': file_name, 'height': height, 'width': width,\n 'id': img_id}\n json_dict['images'].append(image)\n\n if not is_training:\n for cls_name, cid in cls_map.items():\n cat = {'supercategory': 'none', 'id': cid, 'name': cls_name}\n json_dict['categories'].append(cat)\n json_fp = open(json_file, 'w')\n json_str = json.dumps(json_dict)\n json_fp.write(json_str)\n json_fp.close()\n\n return images, image_files_dict, image_anno_dict", "def parser(_, serialized_example):\n features = {}\n\n for i in frame_nums:\n image_name = 'image_' + str(i)\n if flags.dataset_type == 'robot':\n pose_name = 'state_' + str(i)\n action_name = 'action_' + str(i)\n joint_pos_name = 'joint_positions_' + str(i)\n\n features[pose_name] = tf.FixedLenFeature([flags.pose_dim], tf.float32)\n features[image_name] = tf.FixedLenFeature([1], tf.string)\n features[action_name] = tf.FixedLenFeature([flags.pose_dim],\n tf.float32)\n features[joint_pos_name] = tf.FixedLenFeature([flags.joint_pos_dim],\n tf.float32)\n else:\n features[image_name] = tf.FixedLenFeature([1], tf.string)\n\n parsed_input = tf.parse_single_example(serialized_example, features)\n\n for i in frame_nums:\n image_name = 'image_' + str(i)\n pose_name = 'state_' + str(i)\n action_name = 'action_' + str(i)\n joint_pos_name = 'joint_positions_' + str(i)\n\n # Process image\n image_buffer = tf.reshape(parsed_input[image_name], shape=[])\n image = tf.image.decode_jpeg(image_buffer, channels=COLOR_CHAN)\n image = tf.image.resize_images(\n image, (IMG_HEIGHT, IMG_WIDTH),\n method=tf.image.ResizeMethod.BICUBIC)\n image = tf.cast(tf.expand_dims(image, 0), tf.float32) / 255.0\n\n if flags.dataset_type == 'robot':\n pose = tf.reshape(parsed_input[pose_name], shape=[flags.pose_dim])\n pose = tf.expand_dims(pose, 0)\n action = tf.reshape(parsed_input[action_name], shape=[flags.pose_dim])\n action = tf.expand_dims(action, 0)\n joint_pos = tf.reshape(\n parsed_input[joint_pos_name], shape=[flags.joint_pos_dim])\n joint_pos = tf.expand_dims(joint_pos, 0)\n else:\n pose = tf.zeros([1, flags.pose_dim])\n action = tf.zeros([1, flags.pose_dim])\n joint_pos = tf.zeros([1, flags.joint_pos_dim])\n\n if i == 0:\n image_seq = image\n action_seq, pose_seq, joint_pos_seq = action, pose, joint_pos\n else:\n image_seq = tf.concat([image_seq, image], 0)\n action_seq = tf.concat([action_seq, action], 0)\n pose_seq = tf.concat([pose_seq, pose], 0)\n joint_pos_seq = tf.concat([joint_pos_seq, joint_pos], 0)\n\n return image_seq, action_seq, action_seq, joint_pos_seq", "def _decode(self):\n with tf.variable_scope('same_question_concat'):\n batch_size = tf.shape(self.start_label)[0]\n concat_passage_encodes = tf.reshape(\n self.fuse_p_encodes,\n [batch_size, -1, 2 * self.hidden_size]\n )\n no_dup_question_encodes = tf.reshape(\n self.sep_q_encodes,\n [batch_size, -1, tf.shape(self.sep_q_encodes)[1], 2 * self.hidden_size]\n )[0:, 0, 0:, 0:]\n decoder = PointerNetDecoder(self.hidden_size)\n self.start_probs, self.end_probs = decoder.decode(concat_passage_encodes,\n no_dup_question_encodes)", "def dict_to_tf_example(data,\n label_map_dict,\n image_subdirectory,\n ignore_difficult_instances=False):\n \n data = data.strip().split()\n \n img_path = os.path.join(image_subdirectory, data[0])\n \n with tf.gfile.GFile(img_path, 'rb') as fid:\n encoded_jpg = fid.read()\n encoded_jpg_io = io.BytesIO(encoded_jpg)\n image = PIL.Image.open(encoded_jpg_io)\n\n if image.format != 'JPEG':\n raise ValueError('Image format not JPEG')\n\n key = hashlib.sha256(encoded_jpg).hexdigest()\n \n width, height = image.size\n \n num_boxes = len(data[1:])/5\n \n xmin = []\n ymin = []\n xmax = []\n ymax = []\n classes = []\n classes_text = []\n truncated = []\n poses = []\n difficult_obj = []\n \n for i in xrange(num_boxes):\n xmin.append(int(data[1 + 5 * i]))\n ymin.append(int(data[2 + 5 * i]))\n \n xmax.append(int(data[3 + 5 * i]))\n ymax.append(int(data[4 + 5 * i]))\n \n xmin[-1] = float(xmin[-1]) / width\n ymin[-1] = float(ymin[-1]) / height\n xmax[-1] = float(xmax[-1]) / width\n ymax[-1] = float(ymax[-1]) / height\n \n classes.append(int(data[5 + 5 * i]))\n \n classes_text.append(label_map_dict[classes[-1]].encode('utf8'))\n truncated.append(0)\n poses.append('Frontal'.encode('utf8'))\n difficult_obj.append(0)\n \n example = tf.train.Example(features=tf.train.Features(feature={\n 'image/height': dataset_util.int64_feature(height),\n 'image/width': dataset_util.int64_feature(width),\n 'image/filename': dataset_util.bytes_feature(\n data[0].encode('utf8')),\n 'image/source_id': dataset_util.bytes_feature(\n data[0].encode('utf8')),\n 'image/key/sha256': dataset_util.bytes_feature(key.encode('utf8')),\n 'image/encoded': dataset_util.bytes_feature(encoded_jpg),\n 'image/format': dataset_util.bytes_feature('jpeg'.encode('utf8')),\n 'image/object/bbox/xmin': dataset_util.float_list_feature(xmin),\n 'image/object/bbox/xmax': dataset_util.float_list_feature(xmax),\n 'image/object/bbox/ymin': dataset_util.float_list_feature(ymin),\n 'image/object/bbox/ymax': dataset_util.float_list_feature(ymax),\n 'image/object/class/text': dataset_util.bytes_list_feature(classes_text),\n 'image/object/class/label': dataset_util.int64_list_feature(classes),\n 'image/object/difficult': dataset_util.int64_list_feature(difficult_obj),\n 'image/object/truncated': dataset_util.int64_list_feature(truncated),\n 'image/object/view': dataset_util.bytes_list_feature(poses),\n }))\n return example", "def predict_label(self, src): # real signature unknown; restored from __doc__\n pass", "def parse_train(self, proto, height, width):\n _, sequence_parsed = tf.io.parse_single_sequence_example(\n proto,\n context_features=self._context_features,\n sequence_features=self._sequence_features)\n\n # Deserialize images to float32 tensors.\n images = tf.map_fn(\n _deserialize_png, sequence_parsed['images'], dtype=tf.float32)\n\n # Resize images.\n if height is not None and width is not None:\n images = smurf_utils.resize(images, height, width, is_flow=False)\n\n return {'images': images}", "def extract_labels(filename, num_images):\n print('Extracting', filename)\n with gzip.open(filename) as bytestream:\n bytestream.read(8)\n buf = bytestream.read(1 * num_images)\n labels = np.frombuffer(buf, dtype=np.uint8).astype(np.int64)\n return labels", "def read_stanford_labels():\n # First get the hardi data\n fetch_stanford_hardi()\n hard_img, gtab = read_stanford_hardi()\n\n # Fetch and load\n files, folder = fetch_stanford_labels()\n labels_file = pjoin(folder, \"aparc-reduced.nii.gz\")\n labels_img = nib.load(labels_file)\n return hard_img, gtab, labels_img", "def decode_example(\n serialized_proto: str,\n use_example_weight: bool = True) -> Tuple[Dict[str, tf.Tensor], tf.Tensor]:\n name_to_features = dict(\n context=tf.io.VarLenFeature(tf.int64),\n label=tf.io.FixedLenFeature([1], tf.int64))\n examples = tf.io.parse_single_example(serialized_proto, name_to_features)\n features = collections.OrderedDict()\n for name in examples:\n feature_content = examples[name]\n if feature_content.dtype == tf.int64:\n tf.cast(feature_content, tf.int32)\n if isinstance(feature_content, tf.SparseTensor):\n feature_content = tf.sparse.to_dense(feature_content)\n features[name] = feature_content\n\n if use_example_weight:\n # The returned example is in the format of ({'context': a list of movie IDs,\n # 'label': next movie ID}, example weight). Using 1.0 as the weight here.\n output = (features, tf.constant(1.0))\n else:\n # If using global similarity and global recall, return (features,\n # features['label']) instead.\n output = (features, features[\"label\"])\n return output", "def extract_labels(filename, num_images):\n print('Extracting', filename)\n with gzip.open(filename) as bytestream:\n bytestream.read(8)\n buf = bytestream.read(1 * num_images)\n labels = numpy.frombuffer(buf, dtype=numpy.uint8).astype(numpy.int64)\n return labels", "def deserialize_inference_result(results_b64):\n bytes_io = io.BytesIO(base64.b64decode(results_b64))\n single_pred_dict = dict(np.load(bytes_io))\n if len(single_pred_dict) != 1:\n raise ValueError('Expected exactly one object in the structured np array. '\n f'Saw {len(single_pred_dict)}')\n sequence_name = list(single_pred_dict.keys())[0]\n activations = list(single_pred_dict.values())[0]\n return sequence_name, activations", "def load_labels(filename):\n\n file_path = os.path.join(DATA_DIR, filename)\n with open(file_path, 'rb') as f:\n b = f.read()\n\n magic, n_labels = (struct.unpack('>i', b[i*4:(i+1)*4]) for i in range(2))\n\n assert magic[0] == 2049, \"bad magic number, what do?\"\n\n label_stream = array.array('B', b[8:])\n \n assert len(label_stream) == n_labels[0], \"mismatch in label length\"\n \n # label_stream is actually type array.array, which is iterable surely.\n # i'll convert it anyway...\n return tuple(label_stream)", "def deserialize(self, blob):\n pass", "def deserialize(self, str):\n try:\n if self.objects is None:\n self.objects = None\n end = 0\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n self.objects = []\n for i in range(0, length):\n val1 = vision_msgs.msg.ClassifiedObject()\n _v4 = val1.header\n start = end\n end += 4\n (_v4.seq,) = _struct_I.unpack(str[start:end])\n _v5 = _v4.stamp\n _x = _v5\n start = end\n end += 8\n (_x.secs, _x.nsecs,) = _struct_2I.unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n _v4.frame_id = str[start:end].decode('utf-8')\n else:\n _v4.frame_id = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val1.object_class = str[start:end].decode('utf-8')\n else:\n val1.object_class = str[start:end]\n start = end\n end += 4\n (val1.confidence,) = _struct_f.unpack(str[start:end])\n _v6 = val1.roi\n _x = _v6\n start = end\n end += 17\n (_x.x_offset, _x.y_offset, _x.height, _x.width, _x.do_rectify,) = _struct_4IB.unpack(str[start:end])\n _v6.do_rectify = bool(_v6.do_rectify)\n self.objects.append(val1)\n return self\n except struct.error as e:\n raise genpy.DeserializationError(e) #most likely buffer underfill", "def deserialize(self, str):\n try:\n if self.image is None:\n self.image = autonavigation.msg.Image()\n end = 0\n _x = self\n start = end\n end += 29\n (_x.unique_key, _x.gps_week, _x.gps_millisecond, _x.video_id, _x.image.header.seq, _x.image.header.stamp.secs, _x.image.header.stamp.nsecs,) = _struct_2IQB3I.unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.image.header.frame_id = str[start:end].decode('utf-8')\n else:\n self.image.header.frame_id = str[start:end]\n _x = self\n start = end\n end += 212\n (_x.image.localPose.time, _x.image.localPose.dr_x, _x.image.localPose.dr_y, _x.image.localPose.dr_z, _x.image.localPose.dr_heading, _x.image.localPose.dr_roll, _x.image.localPose.dr_pitch, _x.image.localPose.lf_speed, _x.image.localPose.rf_speed, _x.image.localPose.lr_speed, _x.image.localPose.rr_speed, _x.image.localPose.rot_x, _x.image.localPose.rot_y, _x.image.localPose.rot_z, _x.image.localPose.acc_x, _x.image.localPose.acc_y, _x.image.localPose.acc_z, _x.image.localPose.batteryState, _x.image.localPose.batteryEnergy, _x.image.localPose.steer, _x.image.localPose.brake, _x.image.localPose.fuel, _x.image.localPose.trans, _x.image.localPose.VehicleState, _x.image.localPose.mode, _x.image.localPose.drStatus, _x.image.localPose.errorStatus, _x.image.localPose.emergency_flag, _x.image.localPose.hardswitch_on, _x.image.gpsPos.gps_flag, _x.image.gpsPos.gps_week, _x.image.gpsPos.gps_millisecond, _x.image.gpsPos.longitude, _x.image.gpsPos.laltitude, _x.image.gpsPos.gaussX, _x.image.gpsPos.gaussY, _x.image.gpsPos.height, _x.image.gpsPos.pitch, _x.image.gpsPos.roll, _x.image.gpsPos.azimuth, _x.image.gpsPos.northVelocity, _x.image.gpsPos.eastVelocity, _x.image.gpsPos.upVelocity, _x.image.gpsPos.positionStatus, _x.image.gpsPos.rot_x, _x.image.gpsPos.rot_y, _x.image.gpsPos.rot_z, _x.image.gpsPos.acc_x, _x.image.gpsPos.acc_y, _x.image.gpsPos.acc_z, _x.image.height, _x.image.width,) = _struct_d21i7bBI6d13i2I.unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.image.encoding = str[start:end].decode('utf-8')\n else:\n self.image.encoding = str[start:end]\n _x = self\n start = end\n end += 5\n (_x.image.is_bigendian, _x.image.step,) = _struct_BI.unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n self.image.data = str[start:end]\n return self\n except struct.error as e:\n raise genpy.DeserializationError(e) #most likely buffer underfill", "def decode(self,data):\n import yaml\n return yaml.load(data.decode('utf-8'))", "def __read_img_file(filename, label):\n image = cv2.cvtColor(cv2.imread(filename), cv2.COLOR_BGR2RGB)\n height, width, _ = image.shape\n image = cv2.resize(image, (img_size, img_size))\n # A label is consist of [y1, x1, y2, x2, class_idx]\n label = np.reshape(label, (-1, 5))\n rel_bboxes = label[..., 0:4] / np.array([height, width, height, width], np.float32)\n label = np.concatenate([rel_bboxes, np.expand_dims(label[..., -1], 1)], axis=-1)\n return image, label", "def _decode_as_serialized_example_list(self, serialized):\n return _decode_as_serialized_example_list(serialized)", "def dict_to_tf_example(data,\n label_map_dict,\n image_subdirectory,\n ignore_difficult_instances=False):\n img_path = os.path.join(image_subdirectory, data['filename'])\n with tf.gfile.GFile(img_path, 'rb') as fid:\n encoded_jpg = fid.read()\n encoded_jpg_io = io.BytesIO(encoded_jpg)\n image = PIL.Image.open(encoded_jpg_io)\n if image.format != 'JPEG':\n raise ValueError('Image format not JPEG')\n key = hashlib.sha256(encoded_jpg).hexdigest()\n\n width = int(data['size']['width'])\n height = int(data['size']['height'])\n\n xmin = []\n ymin = []\n xmax = []\n ymax = []\n classes = []\n classes_text = []\n truncated = []\n poses = []\n difficult_obj = []\n if data.get('object') != None:\n for obj in data.get('object'):\n difficult_obj.append(int(0))\n\n xmin.append(float(obj['bndbox']['xmin']) / width)\n ymin.append(float(obj['bndbox']['ymin']) / height)\n xmax.append(float(obj['bndbox']['xmax']) / width)\n ymax.append(float(obj['bndbox']['ymax']) / height)\n\n class_name = obj['name']\n classes_text.append(class_name.encode('utf8'))\n classes.append(label_map_dict[class_name])\n truncated.append(int(0))\n poses.append('Unspecified'.encode('utf8'))\n\n example = tf.train.Example(features=tf.train.Features(feature={\n 'image/height': dataset_util.int64_feature(height),\n 'image/width': dataset_util.int64_feature(width),\n 'image/filename': dataset_util.bytes_feature(\n data['filename'].encode('utf8')),\n 'image/source_id': dataset_util.bytes_feature(\n data['filename'].encode('utf8')),\n 'image/key/sha256': dataset_util.bytes_feature(key.encode('utf8')),\n 'image/encoded': dataset_util.bytes_feature(encoded_jpg),\n 'image/format': dataset_util.bytes_feature('jpeg'.encode('utf8')),\n 'image/object/bbox/xmin': dataset_util.float_list_feature(xmin),\n 'image/object/bbox/xmax': dataset_util.float_list_feature(xmax),\n 'image/object/bbox/ymin': dataset_util.float_list_feature(ymin),\n 'image/object/bbox/ymax': dataset_util.float_list_feature(ymax),\n 'image/object/class/text': dataset_util.bytes_list_feature(classes_text),\n 'image/object/class/label': dataset_util.int64_list_feature(classes),\n 'image/object/difficult': dataset_util.int64_list_feature(difficult_obj),\n 'image/object/truncated': dataset_util.int64_list_feature(truncated),\n 'image/object/view': dataset_util.bytes_list_feature(poses),\n }))\n return example", "def dict_to_tf_example(label_map_dict):\n filename = label_map_dict[0]\n img_path = os.path.join(FLAGS.image_data_dir, filename)\n\n try:\n with tf.gfile.GFile(img_path, 'rb') as fid:\n encoded_jpg = fid.read()\n except:\n logging.warning('Image Not Found %s', img_path)\n return None\n\n encoded_jpg_io = io.BytesIO(encoded_jpg)\n image = Image.open(encoded_jpg_io)\n (witdh, height) = image.size\n\n if image.format != 'JPEG':\n raise ValueError('Image format not JPEG')\n key = hashlib.sha256(encoded_jpg).hexdigest()\n\n sentence_txt = label_map_dict[1]\n\n\n sentences = []\n f = open('dictionary.json', 'r')\n dictionary = f.read()\n dictionary = json.loads(dictionary)\n for index, _ in enumerate(sentence_txt):\n sentence = []\n for sen in sentence_txt[index].split(' '):\n try:\n sentence.append(dictionary[sen])\n except KeyError:\n sentence.append(dictionary['UNK'])\n sentences.append(sentence)\n\n feature_dict = {\n 'image/height': dataset_util.int64_feature(height),\n 'image/width': dataset_util.int64_feature(witdh),\n 'image/filename': dataset_util.bytes_feature(filename.encode('utf8')),\n 'image/score_0': dataset_util.int64_list_feature(sentences[0]),\n 'image/score_1': dataset_util.int64_list_feature(sentences[1]),\n 'image/score_2': dataset_util.int64_list_feature(sentences[2]),\n 'image/score_3': dataset_util.int64_list_feature(sentences[3]),\n 'image/score_4': dataset_util.int64_list_feature(sentences[4]),\n 'image/key/sha256': dataset_util.bytes_feature(key.encode('utf8')),\n 'image/encoded': dataset_util.bytes_feature(encoded_jpg),\n 'image/format': dataset_util.bytes_feature('jpeg'.encode('utf8'))\n }\n\n example = tf.train.Example(features=tf.train.Features(feature=feature_dict))\n return example", "def get_raw_data():\n\twith open('train_label.pkl', 'rb') as f:\n\t\ttrain_label = pickle.load(f)\n\n\twith open('train_image.pkl', 'rb') as f:\n\t\ttrain_data = pickle.load(f)\n\n\tprint(np.unique(np.asarray(train_label)))\n\n\treturn (train_label, np.asarray(train_data))", "def _convert_to_example(filename, subset_idx, left_image, right_image, disparity=None, mask=None):\n left_image_raw = left_image.tostring()\n right_image_raw = right_image.tostring()\n if disparity is not None:\n mask_raw = mask.tostring()\n disparity_raw = disparity.tostring()\n\n example = tf.train.Example(features=tf.train.Features(feature={\n 'height': _int64_feature(left_image.shape[0]),\n 'width': _int64_feature(left_image.shape[1]),\n 'left_image_raw': _bytes_feature(left_image_raw),\n 'right_image_raw': _bytes_feature(right_image_raw),\n 'mask_raw': _bytes_feature(mask_raw),\n 'disparity_raw': _bytes_feature(disparity_raw),\n 'filename': _bytes_feature(tf.compat.as_bytes(filename)),\n 'subset_idx': _int64_feature(subset_idx)\n }))\n else:\n example = tf.train.Example(features=tf.train.Features(feature={\n 'height': _int64_feature(left_image.shape[0]),\n 'width': _int64_feature(left_image.shape[1]),\n 'left_image_raw': _bytes_feature(left_image_raw),\n 'right_image_raw': _bytes_feature(right_image_raw),\n 'filename': _bytes_feature(tf.compat.as_bytes(filename)),\n 'subset_idx': _int64_feature(subset_idx)\n }))\n return example" ]
[ "0.66928875", "0.65845627", "0.6560088", "0.65355873", "0.64627737", "0.62374485", "0.6098066", "0.6090909", "0.59154075", "0.5877989", "0.58274025", "0.5801115", "0.5764262", "0.574245", "0.5734998", "0.57296777", "0.57066524", "0.56981283", "0.56740636", "0.5664909", "0.5655456", "0.56523055", "0.56272334", "0.5614881", "0.56148213", "0.5602914", "0.5599391", "0.55880606", "0.5583824", "0.5568194", "0.5554434", "0.55532753", "0.5548621", "0.55462146", "0.5537626", "0.55367374", "0.5535247", "0.5531083", "0.55192715", "0.5518978", "0.5516195", "0.5480233", "0.5477825", "0.5473887", "0.5455431", "0.544919", "0.54341805", "0.543358", "0.5420119", "0.5413954", "0.540215", "0.53963906", "0.53918827", "0.5385097", "0.53828263", "0.5378044", "0.53748554", "0.5374362", "0.536976", "0.536976", "0.536976", "0.53586143", "0.53577685", "0.53545636", "0.53534085", "0.53463286", "0.53300065", "0.5323136", "0.53203523", "0.53186756", "0.5309259", "0.5305746", "0.5305015", "0.5303611", "0.5301302", "0.5294431", "0.52943355", "0.5290274", "0.5288098", "0.52866554", "0.5283663", "0.5283085", "0.5277455", "0.5266958", "0.5265498", "0.52617335", "0.52387995", "0.5234086", "0.5233456", "0.5231548", "0.52262807", "0.5223021", "0.52182853", "0.52052236", "0.5200615", "0.5193394", "0.5189977", "0.51887155", "0.5183443", "0.51697916" ]
0.62265766
6
Initializes parameters for parsing annotations in the dataset.
def __init__(self, output_size, resize_eval=False, ignore_label=255, aug_rand_hflip=False, aug_scale_min=1.0, aug_scale_max=1.0, aug_policy='', use_bfloat16=True, mode=None): self._mode = mode self._is_training = (mode == ModeKeys.TRAIN) self._output_size = output_size self._resize_eval = resize_eval self._ignore_label = ignore_label # Data augmentation. self._aug_rand_hflip = aug_rand_hflip self._aug_scale_min = aug_scale_min self._aug_scale_max = aug_scale_max self._aug_policy = aug_policy # Device. self._use_bfloat16 = use_bfloat16 # Data is parsed depending on the model Modekey. if mode == ModeKeys.TRAIN: self._parse_fn = self._parse_train_data elif mode == ModeKeys.EVAL: self._parse_fn = self._parse_eval_data elif mode == ModeKeys.PREDICT or mode == ModeKeys.PREDICT_WITH_GT: self._parse_fn = self._parse_predict_data else: raise ValueError('mode is not defined.')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def init_all_params(self):\n self.annotations_timestamp = 0\n # self.annotations_offset = 0\n # self.annotation_offset_text.configure(text='Current: %d' % self.annotations_offset)\n self.annotations_timestamp_text.configure(text='Annotation timestamp:\\n %d' % self.annotations_timestamp)\n self.annotations_timestamp_text.grid(sticky=\"W\", row=9, column=0, columnspan=10)\n # set text frames\n # self.annotations_offset_entry.delete(0, 'end')\n # self.annotations_offset_entry.insert(0, str(self.annotations_offset))\n self.current_frame_entry.delete(0, 'end')\n self.current_frame_entry.insert(0, str(self.vid.frame_number))", "def __init__(self, annotation_file=None):\n # load dataset\n self.dataset,self.anns,self.cats,self.imgs = dict(),dict(),dict(),dict()\n self.imgToAnns, self.catToImgs = defaultdict(list), defaultdict(list)\n if not annotation_file == None:\n print('loading annotations into memory...')\n tic = time.time()\n dataset = json.load(open(annotation_file, 'r'))\n assert type(dataset)==dict, 'annotation file format {} not supported'.format(type(dataset))\n print('Done (t={:0.2f}s)'.format(time.time()- tic))\n self.dataset = dataset\n self.createIndex()", "def __init__(self, root_dir, annotation_file, transform=None):\n self.root_dir = root_dir\n self.annotations = pd.read_csv(annotation_file)\n self.transform = transform", "def _init_annotation(self):\n annotations = []\n for frame in self.frames:\n coordinates, sources, targets, ids = [], [], [], []\n frame_id = set()\n for spot_id, spot_annot in frame.items():\n coordinates.append((spot_annot[\"x\"], spot_annot[\"y\"]))\n sources.append(spot_annot[\"source\"])\n targets.append(spot_annot[\"target\"])\n ids.append(spot_id)\n frame_id.add(spot_annot[\"frame\"])\n if len(frame_id) != 1:\n raise ValueError(f\"Invalid frame number found in spot: {spot_id}\")\n annotations.append((\n np.array(coordinates, dtype=np.float),\n np.array(sources, dtype=np.str),\n targets,\n np.array(ids, dtype=np.str),\n frame_id.pop()))\n self.annotations = annotations", "def __init__(self, namespace, data=None, annotation_metadata=None,\n sandbox=None, time=0, duration=None):\n\n super(Annotation, self).__init__()\n\n if annotation_metadata is None:\n annotation_metadata = AnnotationMetadata()\n\n self.annotation_metadata = AnnotationMetadata(**annotation_metadata)\n\n self.namespace = namespace\n\n self.data = SortedKeyList(key=self._key)\n\n if data is not None:\n if isinstance(data, dict):\n self.append_columns(data)\n else:\n self.append_records(data)\n\n if sandbox is None:\n sandbox = Sandbox()\n\n self.sandbox = Sandbox(**sandbox)\n\n self.time = time\n self.duration = duration", "def __init__(\n self,\n annotations_filepath: str,\n dataset: AnyBaseDataset,\n annotation_index: Union[int, str] = -1,\n label_columns: Union[List[int], List[str]] = None,\n dtype: torch.dtype = torch.float,\n device: torch.device = None,\n **kwargs,\n ) -> None:\n self.annotations_filepath = annotations_filepath\n self.datasource = dataset\n self.dtype = dtype\n device_warning(device)\n\n # processing of the dataframe for dataset setup\n df = pd.read_csv(self.annotations_filepath, **kwargs)\n columns = df.columns\n # handle annotation index\n if isinstance(annotation_index, int):\n self.annotation_index = columns[annotation_index]\n elif isinstance(annotation_index, str):\n self.annotation_index = annotation_index\n else:\n raise RuntimeError('annotation_index should be int or str.')\n\n # handle labels\n if label_columns is None:\n self.labels = [\n column for column in columns if column != self.annotation_index\n ]\n elif all([isinstance(column, int) for column in label_columns]):\n self.labels = columns[label_columns]\n elif all([isinstance(column, str) for column in label_columns]):\n self.labels = label_columns\n else:\n raise RuntimeError(\n 'label_columns should be an iterable containing int or str'\n )\n # get the number of labels\n self.number_of_tasks = len(self.labels)\n\n # set the index explicitly, and discard non label columns\n df = df.set_index(self.annotation_index)[self.labels]\n DataFrameDataset.__init__(self, df)", "def __init__(self, curator=None, version='', corpus='', annotator=None,\n annotation_tools='', annotation_rules='', validation='',\n data_source=''):\n super(AnnotationMetadata, self).__init__()\n\n if curator is None:\n curator = Curator()\n\n if annotator is None:\n annotator = JObject()\n\n self.curator = Curator(**curator)\n self.annotator = JObject(**annotator)\n\n self.version = version\n self.corpus = corpus\n self.annotation_tools = annotation_tools\n self.annotation_rules = annotation_rules\n self.validation = validation\n self.data_source = data_source", "def __init__(self, config=None, first_ann_file=None,second_ann_file=None):\n # load dataset\n self.config = config\n\n self.dataset = {}\n self.anns = []\n self.imgToAnns = {}\n self.imgs = {}\n \n if not first_ann_file is None:\n print('loading annotations into memory...')\n tic = time.time()\n dataset = json.load(open(first_ann_file, 'r'))\n print('Done (t=%0.2fs)'%(time.time()- tic))\n self.dataset['images']= dataset['images']\n self.dataset['annotations'] = dataset['annotations']\n if 'classifications' in dataset.keys():\n self.dataset['cls_lbls'] = dataset['classifications']\n if not second_ann_file is None:\n dataset_second = json.load(open(second_ann_file, 'r'))\n self.split_second_ann(dataset_second)\n print('Done (t=%0.2fs)'%(time.time()- tic))\n\n self.process_dataset()\n self.createIndex()", "def __init__(self,config,typ='train'):\n\n self._config = config\n self.type = typ\n self.reader = JsonlReader(self._config.annotations.as_dict()[typ])\n self.annotations = self.reader.read()\n self.transform = get_image_processor(self._config.image_processor)", "def __init__(self):\n self.x = {}\n self.len = 0\n self.annotations = {}", "def __init__(self):\n self.dataset_path = input('Enter the path to the root directory of your dataset:\\n')\n self.classes = [c.lower() for c in os.listdir(self.dataset_path)]\n self.year = str(datetime.datetime.now().year)\n self.kit_path = input(\"Enter the path ot your VOCdevkit directory:\\n\")\n self.annotation_path = self.kit_path + '/VOC' + self.year + '/Annotations'\n self.renamer = data_renamer.DataRenamer(self.dataset_path, self.year)\n self.data_splitter = data_splitter.DataSplitter(self.dataset_path, self.classes, self.year, self.kit_path)\n self.annotation_maker = annotation_maker.AnnotationMaker(self.dataset_path, self.kit_path, self.year,\n self.annotation_path)", "def __init__(self, **kwargs):\n dataset_name = kwargs.get('dataset_name', None)\n self.reset()\n image_height, image_width, _ = kwargs['input_shape']\n dataset_info = get_dataset_info(dataset_name=dataset_name)\n self._label_inv_map = {v: k for k, v in dataset_info.label_map.items()}\n self._metric_names = ['bbox AP', 'bbox AP50']\n self._metrics_vals = [0, 0]\n self._gt_ann_file = kwargs['gt_json_path']\n self._coco_gt = COCO(self._gt_ann_file)\n self._last_image_id = None\n self._faster_proposlas_nms = FasterRCNNProposalsNMS(image_height, image_width, self._coco_gt,\n kwargs['score_threshold'], kwargs['nms_iou_thresh'],\n label_inv_map=self._label_inv_map,\n detections=self._detections)", "def __init__(self, dictionary: dict[str, t.Any]) -> None:\n missing_keys = self.__annotations__.keys() - dictionary.keys()\n if missing_keys:\n raise KeyError(f\"Fetched object lacks expected keys: {missing_keys}\")\n for annotation in self.__annotations__:\n setattr(self, annotation, dictionary[annotation])", "def __init__(self, cfg, data_dir, train_files):\n self.cfg = cfg\n self.imgs, self.ids, self.anns = None, None, None\n self.data_dir = data_dir\n self.product_labels = {}\n print('loading annotations into memory...')\n tic = time.time()\n self.datasets = []\n if type(train_files) != list:\n train_files = [train_files]\n for train_file in train_files:\n labels_file = os.path.dirname(train_file)\n labels_file = os.path.join(labels_file, 'labels.txt')\n with open(labels_file, 'r') as f:\n self.product_names = {}\n for line in f:\n label, prod_name = line.split()\n self.product_labels[prod_name] = int(label)\n with open(train_file, 'r') as f:\n dataset = {}\n train_file_dir = os.path.dirname(train_file)\n for line in f:\n img, ann_file = line.split()\n img = os.path.join(train_file_dir, 'images',\n os.path.basename(img))\n ann_file = os.path.join(train_file_dir, 'annotations',\n os.path.basename(ann_file))\n dataset[img] = ann_file\n self.datasets.append(dataset)\n print('Done (t={:0.2f}s)'.format(time.time() - tic))\n self.create_index()", "def __init__(self, dataset):\n self._dataset = dataset", "def init(self, parameters):\n pass", "def annotations(self, annotations):\n self._annotations = annotations", "def __init__(self):\n self.model = None\n self.joined_datasets = None\n self.id_col = None\n self.val_col = None\n self.pop_col = None\n self.total_population_per_unit = None\n self.centroids_of_areal_data = None\n self.prepared_data = None\n self.unknown_area_id = None\n\n # Parameters\n self.lags = None\n self.step = None\n self.min_no_of_observations = None\n self.max_search_radius = None", "def __init__(self, ann_path, train_val_list, test_list, config=None):\n self.ann_path = ann_path\n self.config = config\n self.train_val_list = train_val_list\n self.test_list = test_list", "def __init__(self, dataset: Dataset):\n self.dataset = dataset", "def init_attrs(self):\n raise NotImplementedError", "def __init__(self, **params):\n super(CoOccurrenceMatrixTransformer, self).__init__()\n self.counts = None\n self.count_params = params\n if \"lowercase\" not in params:\n self.count_params[\"lowercase\"] = False\n self.fit_params = None\n self.counter = None\n self.vocabulary = None\n logger.debug(f\"Class {self.__class__} is initialized\")", "def __init__(self, data, labels, emotions):\n self.data = data\n self.labels = labels\n self.emotions = emotions", "def __init__(self, anno):\n self._x1 = float(anno[0])\n self._y1 = float(anno[1])\n self._w = float(anno[2])\n self._h = float(anno[3])\n self._blur = int(anno[4])\n self._expression = int(anno[5])\n self._illumination = int(anno[6])\n self._invalid = int(anno[7])\n self._occlusion = int(anno[8])\n self._pose = int(anno[9])", "def load_annotations(self):\n # get keys\n with open(self.ann_file, 'r') as fin:\n keys = [line.strip().split(' ')[0] for line in fin]\n # get frame index list for LQ frames\n frame_index_list = []\n for i in range(self.num_input_frames):\n # Each clip of Vimeo90K has 7 frames starting from 1. So we use 9\n # for generating frame_index_list:\n # N | frame_index_list\n # 1 | 4\n # 3 | 3,4,5\n # 5 | 2,3,4,5,6\n # 7 | 1,2,3,4,5,6,7\n frame_index_list.append(i + (9 - self.num_input_frames) // 2)\n\n data_infos = []\n for key in keys:\n folder, subfolder = key.split('/')\n lq_paths = []\n for i in frame_index_list:\n lq_paths.append(\n osp.join(self.lq_folder, folder, subfolder, f'im{i}.png'))\n gt_paths = [osp.join(self.gt_folder, folder, subfolder, 'im4.png')]\n\n data_infos.append(\n dict(lq_path=lq_paths, gt_path=gt_paths, key=key))\n\n return data_infos", "def __init__(self, annotation_file=None, N=10, kind=\"official\"):\n\n # load dataset\n self.dataset, self.anns, self.cats, self.imgs = dict(), dict(), dict(), dict()\n self.imgToAnns, self.catToImgs = defaultdict(list), defaultdict(list)\n self.N = N\n self.kind = kind\n if not annotation_file == None:\n print(\"loading annotations into memory...\")\n tic = time.time()\n with open(annotation_file, \"r\") as f:\n dataset = json.load(f)\n assert (\n type(dataset) == dict\n ), \"annotation file format {} not supported\".format(type(dataset))\n print(\"Done (t={:0.2f}s)\".format(time.time() - tic))\n self.dataset = dataset\n self.createIndex()\n\n if getattr(pycocotools, \"__version__\", \"0\") >= \"12.0.2\":\n warnings.warn(\n 'mmpycocotools is deprecated. Please install official pycocotools by \"pip install pycocotools\"', # noqa: E501\n UserWarning,\n )\n self.img_ann_map = self.imgToAnns\n self.cat_img_map = self.catToImgs", "def __init__(self, **kwargs):\n DataLoader.__init__(self, **kwargs)", "def initialize(self, args):\n # You must parse model_config. JSON string is not parsed here\n self.model_config = json.loads(args['model_config'])\n print(\"model_config:\", self.model_config)\n\n self.input_names = []\n for input_config in self.model_config[\"input\"]:\n self.input_names.append(input_config[\"name\"])\n print(\"postprocess input names:\", self.input_names)\n\n self.output_names = []\n self.output_dtype = []\n for output_config in self.model_config[\"output\"]:\n self.output_names.append(output_config[\"name\"])\n dtype = pb_utils.triton_string_to_numpy(output_config[\"data_type\"])\n self.output_dtype.append(dtype)\n print(\"postprocess output names:\", self.output_names)\n self.postprocessor = fd.vision.ocr.DBDetectorPostprocessor()\n self.cls_preprocessor = fd.vision.ocr.ClassifierPreprocessor()\n self.rec_preprocessor = fd.vision.ocr.RecognizerPreprocessor()\n self.cls_threshold = 0.9", "def init(self, *args, **kwargs):\n raise NotImplementedError(\"ErddapArgoDataFetcher.init not implemented\")", "def __init__(self, parameters: jnp.ndarray):\n\n # Note that this method is implicitly overriden by the dataclass decorator and\n # should _not_ be marked abstract.\n raise NotImplementedError()", "def __init__(self):\n self.args = self._prepare_args(locals())\n self.requires_full_dataset_in_memory = False", "def __init__(self, data_dir, mode):\n data = pd.read_csv(path.join(data_dir, mode+\"_data.txt\"), delimiter='|')\n word2idx = np.load(path.join(data_dir, \"word2idx.dict\"))\n sentences = [sentence.split(' ') for sentence in data['sentence']]\n self.data_len = data['sequence_length'].values\n self.labels = data['sentiment_label'].values\n self.sequence_data = np.array([[word2idx[w] for w in s] for s in sentences])", "def _convert_annotations(self, ast):\n self.annotations = IDLAnnotations(ast)", "def __init__(self):\n super(sppasAnnotationsSettings, self).__init__()\n self.__dict__ = dict(\n error=-1,\n ok=0,\n warning=1,\n ignore=2,\n info=3,\n\n extension=\".xra\",\n\n # all the types of the annotations implemented into SPPAS\n types=(\"STANDALONE\", \"SPEAKER\", \"INTERACTION\"),\n\n # standard iso639-3 code for an undetermined language.\n UNDETERMINED=\"und\"\n\n )", "def init_params(self, parameters):\r\n max_epoch = parameters['num_epoch']\r\n momentum_rate = parameters['momentum']\r\n loss = parameters['loss_function']\r\n accuracy = parameters['accuracy']\r\n regularization = parameters['regularization']\r\n batch_size = parameters['batch_size']\r\n optimizer = parameters['optimizer'] if parameters['optimizer'] is not None else 'batch'\r\n self.__init__(max_epoch, optimizer, loss, accuracy, momentum_rate, regularization, batch_size)", "def __init__(self, dataset_path, use_augmentation, batch_size):\n\n self.dataset_path = dataset_path\n self.train_dictionary = {}\n self.evaluation_dictionary = {}\n self.image_width = 105\n self.image_height = 105\n self.batch_size = batch_size\n self.use_augmentation = use_augmentation\n self._train_alphabets = []\n self._validation_alphabets = []\n self._evaluation_alphabets = []\n self._current_train_alphabet_index = 0\n self._current_validation_alphabet_index = 0\n self._current_evaluation_alphabet_index = 0\n\n self.load_dataset()\n\n if (self.use_augmentation):\n self.image_augmentor = self.createAugmentor()\n else:\n self.use_augmentation = []", "def __init__(self, dataset):\n\n if isinstance(dataset, DataConfig):\n self.__data_config = dataset\n self.dataset = dataset.dataset\n elif isinstance(dataset, str):\n logger.debug(\"Dataset argument {} is string, looking up known dataset.\".format(dataset))\n self.__data_config = DataConfig.known_dataset(dataset)\n self.dataset = dataset\n else:\n raise ValueError(\"Argument 'dataset' must be of type DataConfig or str.\")", "def load_annotations(self):\n fname, aux = QFileDialog.getOpenFileName(self, 'Open file', '', \"(*.csv)\")\n if fname != '':\n self.model.AnnotationLoad(fname=fname)", "def __init__(self, root, which_set, vocab, transform=None):\n self.root = root\n self.img_root = os.path.join(root, 'Img')\n self.ann = json.load(open(os.path.join(root, '{}_labels.json'.format(which_set)),'r'))\n\n self.vocab = vocab\n self.transform = transform\n self.img_list = list(self.ann.keys())\n # transfer categories id to labels\n self.cat2label = {}\n for i, k in enumerate(label_corpus):\n self.cat2label[k] = i\n\n self.num_cats = len(self.cat2label) \n\n # vgnome has varied number of annotations [1, 20], average 5.73\n # we still choose five as the parameter. It can be adjusted later on\n self.num_ann_onebatch = 5\n self.ids = [a for a in range(len(self.ann))]\n\n print('\\t {} train samples from {} set'.format(len(self.ids), which_set ))\n print('\\t {} of categories'.format(self.num_cats))", "def __init__(self, config):\n logger.info(f\"{self.__class__.__name__}: Dataset initializing ...\")\n super().__init__(config)", "def postparse_hook_undeclared_parameter_annotation(self, data) -> cmd2.plugin.PostparsingData:\n pass", "def __init__(self, metadata_path, batch_size, val_split):\n self.metadata = scipy.io.loadmat(metadata_path)\n self.img_paths = self.metadata['wiki']['full_path'][0][0][0]\n self.genders = self.metadata['wiki']['gender'][0][0][0]\n self.face_score = self.metadata['wiki']['face_score'][0][0][0]\n self.sec_face_score = self.metadata['wiki']['second_face_score'][0][0][0]\n self.batch_size = batch_size\n self.val_split = val_split\n self.load_keys()", "def __init__(self, rules: Union[Dict[str, dict], Set[str]], separators: str = ''):\n\n if isinstance(rules, dict):\n for pattern, annotation in rules.items():\n if 'start' in annotation or 'end' in annotation:\n raise ValueError(\"Attributes 'start' and 'end' are reserved do not use it inside annotations\")\n elif not isinstance(rules, set):\n raise ValueError('Extraction rules must be specified as Dict[str, dict] or Set[str]')\n\n # Protect parameters against changes\n self.rules = deepcopy(rules)\n self.separators = copy(separators)\n\n # Set up the automaton\n self.automaton = Automaton()\n for pattern in self.rules:\n self.automaton.add_word(pattern, len(pattern))\n self.automaton.make_automaton()", "def _load_annotations(self):\n if self._raw_annotations is not None:\n return self._raw_annotations\n\n dataset_file = os.path.join(self._annotation_path, 'complete_dataset_v{}.pkl'.format(self._version))\n idx_file = os.path.join(self._annotation_path, 'splits_indices_v{}.pkl'.format(self._version))\n\n def get_split_from_ds(ds, idx):\n split = {}\n keys = sorted(ds.keys())\n for j in xrange(len(idx)):\n k = keys[idx[j]]\n split[k] = ds[k]\n return split\n\n with open(idx_file, 'rb') as fid:\n indices = cPickle.load(fid)[self._image_set]\n with open(dataset_file, 'rb') as fid:\n ds = cPickle.load(fid)\n self._raw_annotations = get_split_from_ds(ds, indices)\n\n return self._raw_annotations", "def _setupConfigAnnotation(self):\n annotations = IAnnotations(self)\n settings = annotations.get(\"PLOMINOFIELDCONFIG\", None)\n if not settings:\n annotations[\"PLOMINOFIELDCONFIG\"] = PersistentDict()", "def init(*args):\n global dataset\n dataset = args[0]", "def __init__(self, annotations=None, file_metadata=None, sandbox=None):\n super(JAMS, self).__init__()\n\n if file_metadata is None:\n file_metadata = FileMetadata()\n\n if sandbox is None:\n sandbox = Sandbox()\n\n self.annotations = AnnotationArray(annotations=annotations)\n\n self.file_metadata = FileMetadata(**file_metadata)\n\n self.sandbox = Sandbox(**sandbox)", "def __init__(self):\n\n self.points = None\n self.centroid_activation_frames = None\n self.noiseless_frames = None\n self.frames = None", "def __init__ ( self ) :\n\n self.m_src = self.configSrc ('source', ':Cspad.')\n self.m_key_in = self.configStr ('key_in', 'peaks_nda')\n self.m_print_bits = self.configInt ('print_bits', 1)\n\n self.counter = 0\n self.count_msg = 0\n\n if self.m_print_bits & 1 : self.print_input_pars()\n\n self.list_of_dtypes = [\n psana.ndarray_float32_2,\n psana.ndarray_float64_2\n ]", "def __init__(self, scData, refDataset, refAnnot):\n \n self.sc_data = scData\n self.refDataset = refDataset.astype(float)\n self.refAnnot = refAnnot", "def initialize(self):\n self.initialize_edges()\n self.initialize_prob()\n self.initialize_total_input_dict()\n\n self.initialize_fpmusigv_dict()", "def load_annotations(self):\n if self.ann_file.endswith('.json'):\n return self.load_json_annotations()\n\n video_infos = []\n with open(self.ann_file, 'r') as fin:\n for line in fin:\n line_split = line.strip().split()\n if self.multi_class:\n assert self.num_classes is not None\n filename, label = line_split[0], line_split[1:]\n label = list(map(int, label))\n else:\n filename, label = line_split\n label = int(label)\n if self.data_prefix is not None:\n filename = osp.join(self.data_prefix, filename)\n video_infos.append(dict(filename=filename, label=label))\n while len(video_infos) < self.min_video_num:\n left_num = min(self.min_video_num - len(video_infos), len(video_infos))\n video_infos.extend(random.sample(video_infos, left_num))\n return video_infos", "def __call__(self, *args, **kwargs):\n\n dataset = TextOnlyCocoAnnotation()\n\n if self.subset == 'train':\n image_paths, annotation_paths = self.collect_train_paths()\n elif self.subset == 'val':\n image_paths, annotation_paths = self.collect_val_paths()\n\n for image_path, annotation_path in tqdm(zip(image_paths, annotation_paths)):\n word_annotations = []\n with open(annotation_path, encoding='utf-8-sig') as read_file:\n content = [line.strip() for line in read_file.readlines()]\n for line in content:\n word_annotations.append(self.parse_line(line))\n should_add = not self.is_latin_required\n if self.is_latin_required:\n for word_annotation in word_annotations:\n if word_annotation['attributes']['language'].lower() == 'latin':\n should_add = True\n break\n if should_add:\n for word_annotation in word_annotations:\n dataset.add_bbox(image_path, imagesize.get(image_path), word_annotation)\n\n return dataset", "def init_params(self):\n blah", "def _init_from_DataArrays(self, data, validate=True):\n self._data_vars = self._DataArrays_as_mapping(data)\n\n if (len(self) > 1) and validate:\n first = self[0]\n for i in range(1, len(self)):\n da = self[i]\n first._is_compatible(da, raise_error=True)\n\n self._check_all_different_ids(self._data_vars.values())\n\n self.__itemattr = []\n for key, value in self._data_vars.items():\n self._set_name_attr(key, value)\n\n self.plot = _DatasetPlotter(self)\n\n if len(self) > 0:\n self._set_spectral_attributes(self.geometry)\n\n # since Dataset is MutableMapping it has values and keys by default\n # but we delete those to avoid confusion\n # self.values = None\n self.keys = None", "def _initialize(self) -> None:\n p = self.params\n # We make self.input public so that users can access its methods like\n # IdsToStrings if needed.\n with py_utils.infeed_context_scope(\n infeed_host_index=p.infeed_host_index,\n num_infeed_hosts=p.num_infeed_hosts):\n self.input = p.input.Instantiate()\n\n if hasattr(self.input, 'datasource') and isinstance(\n self.input.datasource, datasource.TFDatasetSource):\n # For the special case when the input is implemented by a tf.data.Dataset,\n # use it directly. Otherwise roundtrip adaptions may result in returning\n # duplciate batches.\n self._get_next_fn = self.input.datasource.GetNext\n else:\n self._get_next_fn = tf.function(self._get_batch)\n self._num_batches_produced = 0", "def _load_data(self, cfg):\r\n\r\n if self._split == \"train\":\r\n self._annotations = self._load_lists(cfg.EGO4D_STA.TRAIN_LISTS)\r\n elif self._split == \"val\":\r\n self._annotations = self._load_lists(cfg.EGO4D_STA.VAL_LISTS)\r\n else:\r\n self._annotations = self._load_lists(cfg.EGO4D_STA.TEST_LISTS)", "def _initialize_model_params(self):\n\n if 'model' not in self._raw_data_dict:\n raise Error('The \"model\" key is not found in the configuration file. Looks like the parsed file is not '\n 'Object Detection API model configuration file.')\n params = list(self._raw_data_dict['model'].values())[0]\n for rule in mapping_rules:\n self._update_param_using_rule(params, rule)", "def __call__(self, *args, **kwargs):\n\n dataset = TextOnlyCocoAnnotation()\n\n image_paths, annotation_paths = self.collect_train_paths()\n\n for image_path, annotation_path in tqdm(zip(image_paths, annotation_paths)):\n word_annotations = []\n with open(annotation_path, encoding='utf-8-sig') as read_file:\n content = [line.strip() for line in read_file.readlines()]\n for line in content:\n word_annotations.append(self.parse_line(line))\n should_add = not self.is_latin_required\n if self.is_latin_required:\n for word_annotation in word_annotations:\n if word_annotation['attributes']['language'].lower() == 'latin':\n should_add = True\n break\n if should_add:\n for word_annotation in word_annotations:\n dataset.add_bbox(image_path, imagesize.get(image_path), word_annotation)\n\n return dataset", "def __init__(self):\n super().__init__()\n self.data_set_loc = conf.config_section_mapper(\"filePath\").get(\"data_set_loc\")\n self.data_extractor = DataExtractor(self.data_set_loc)\n self.sim_act_diff_mov_tf = SimilarActorsFromDiffMovies()", "def __init__(self):\n super().__init__()\n self.data_set_loc = conf.config_section_mapper(\"filePath\").get(\"data_set_loc\")\n self.data_extractor = DataExtractor(self.data_set_loc)\n self.sim_act_diff_mov_tf = SimilarActorsFromDiffMovies()", "def load_annotations(self, ann_file, N, kind):\n\n self.coco = COCOPoint(ann_file, N=N, kind=kind)\n # The order of returned `cat_ids` will not\n # change with the order of the CLASSES\n self.cat_ids = self.coco.get_cat_ids(cat_names=self.CLASSES)\n\n self.cat2label = {cat_id: i for i, cat_id in enumerate(self.cat_ids)}\n self.img_ids = self.coco.get_img_ids()\n data_infos = []\n total_ann_ids = []\n for i in self.img_ids:\n info = self.coco.load_imgs([i])[0]\n info[\"filename\"] = info[\"file_name\"]\n data_infos.append(info)\n ann_ids = self.coco.get_ann_ids(img_ids=[i])\n total_ann_ids.extend(ann_ids)\n assert len(set(total_ann_ids)) == len(\n total_ann_ids\n ), f\"Annotation ids in '{ann_file}' are not unique!\"\n return data_infos", "def __initialize(self):\n\t\tself.matrix = [None] * self.size\n\t\tself.__get_log_values()\n\t\tfor row in range(self.size):\n\t\t\tself.matrix[row] = [None] * self.size\n\t\tmax_len = self.__get_max_length()\n\t\tdata = self.__get_data(self.text,max_len)\n\t\tmpoly = self.__get_mpoly(data)\n\t\tgpoly = self.__get_gploy()\n\t\tself.final_data = self.__get_final_data(mpoly,gpoly)\n\t\tself.__set_FIP(FP_num = 1)\n\t\tself.__set_FIP(FP_num = 2)\n\t\tself.__set_FIP(FP_num = 3)\n\t\tself.__set_AP()\n\t\tself.__fill_format_info_area()\n\t\tself.__set_TP()", "def __init__(self, statistics, *histograms, **style):\n \n Annotation.__init__(self, **style)\n\n # Split the specified statistics.\n self.statistics = tuple(statistics)\n self.histograms = histograms", "def __init__(self):\n self.data_set_loc = conf.config_section_mapper(\"filePath\").get(\"data_set_loc\")\n self.data_extractor = DataExtractor(self.data_set_loc)", "def __init__(self, **kwargs):\n is_training = kwargs.get('is_training', True)\n rootfolder = kwargs['rootfolder']\n dtype = kwargs.get('dtype', np.float64)\n self._load_mnist(rootfolder, is_training, dtype)\n # normalize data.\n self._data /= 255.\n ndarraydata.NdarrayDataLayer.__init__(\n self, sources=[self._data, self._label], **kwargs)", "def __init__(self):\n print ('Initializing Data reader object...')\n data_Train_Images, data_Train_Labels, data_Test_Image, data_Test_Labels = self.readDataFromFile()\n test_10k_x, test_10k_y, training_55k_x, training_55k_y, validation_5k_x, validation_5k_y = self.dataTransform(\n data_Test_Image, data_Test_Labels, data_Train_Images, data_Train_Labels)\n self.train = zip(training_55k_x, training_55k_y)\n self.valid = zip(validation_5k_x, validation_5k_y)\n self.test = zip(test_10k_x, test_10k_y)\n\n self.train_position = 0\n print ('Initialized!')", "def __init__(self, **kwargs):\n DataLoader.__init__(self, **kwargs)\n \n self._results_ = None", "def __init__(self):\n self.__dataset = None", "def init_params(self):\n self.clear()\n self._init_load_data()\n self._init_net_delay_data()", "def __init__(self, unencoded_dataset): \n self._unencoded_dataset = unencoded_dataset\n self._plain_train = self._get_plain_train()\n self._plain_test = self._get_plain_test()\n self._cipher_train = self._get_cipher_train()\n self._cipher_test = self._get_cipher_test()", "def __init__(__self__, *,\n metric_annotations_allow_list: Optional[pulumi.Input[str]] = None,\n metric_labels_allowlist: Optional[pulumi.Input[str]] = None):\n if metric_annotations_allow_list is not None:\n pulumi.set(__self__, \"metric_annotations_allow_list\", metric_annotations_allow_list)\n if metric_labels_allowlist is not None:\n pulumi.set(__self__, \"metric_labels_allowlist\", metric_labels_allowlist)", "def _initialize_parameters(self):\n self.ent_emb = tf.get_variable('ent_emb', shape=[len(self.ent_to_idx), self.k * 2],\n initializer=self.initializer)\n self.rel_emb = tf.get_variable('rel_emb', shape=[len(self.rel_to_idx), self.k * 2],\n initializer=self.initializer)", "def init(self, datasetInfo):\n #use user's dataset directory if not specified\n if datasetInfo is None: raise RecognizerError(\"No dataset file specified.\")\n \n if os.path.isfile(datasetInfo):\n datasetInfos = [datasetInfo]\n elif os.path.isdir(datasetInfo):\n datasetInfos = [os.path.join(datasetInfo, f) for f in os.listdir(datasetInfo) \\\n if f.endswith('.xml')]\n elif isinstance(datasetInfo, (types.ListType, types.TupleType)):\n datasetInfos = datasetInfo\n else: raise RecognizerError, \"Unknown datasetInfo type: %s\" % type(datasetInfo)\n \n patterns = {}; startsWiths = []; matchers = {}\n for f in datasetInfos:\n info, ns = getXmlEtree(f)\n if not ns.has_key('_'): ns['_'] = ns['_default']\n for dataset in info:\n \n #skip if comment\n if isinstance(dataset, lxml.etree._Comment): continue\n \n ipath = xpath(dataset, './/_:ipath/text()', ns)\n fileTemplate = xpath(dataset, './/_:fileTemplate/text()', ns)\n startsWith = fileTemplate[0:fileTemplate.index('$')]\n filePattern = xpath(dataset, './/_:filePattern/text()', ns)\n if matchers.has_key(startsWith):\n matchers[startsWith].append((re.compile(filePattern), ipath, dataset, ns))\n else: matchers[startsWith] = [(re.compile(filePattern), ipath, dataset, ns)]\n if startsWith != '': startsWiths.append(startsWith)\n startsWithPattern = r'(' + '|'.join(startsWiths) + ')'\n startsWithMatcher = re.compile(startsWithPattern)\n return startsWithMatcher, matchers", "def __init__(self):\n super(BlipDocument, self).__init__()\n self.annotations = Annotations()\n self.rotation = 0", "def load_annotations(self):\n assert self.ann_file.endswith('.pkl')\n data = hload_pkl(self.ann_file)\n\n video_infos = []\n for video_info in data:\n filename = video_info['filename']\n if self.data_prefix is not None:\n filename = osp.join(self.data_prefix, filename)\n video_info['filename'] = filename\n label = video_info['label']\n if self.multi_class and isinstance(label, np.ndarray):\n video_info['label'] = label.astype(np.float32)\n\n video_infos.append(video_info)\n\n while len(video_infos) < self.min_video_num:\n left_num = min(self.min_video_num - len(video_infos), len(video_infos))\n video_infos.extend(random.sample(video_infos, left_num))\n return video_infos", "def __init__(self, *args, **kwargs):\n super(DeepSpeakerDataLoader, self).__init__(*args, **kwargs)", "def load_annotations(self):\n assert self.ann_file.endswith('.pkl')\n ann_info = hload_pkl(self.ann_file)\n\n video_infos = []\n for i, video_info in enumerate(ann_info):\n if isinstance(video_info['text'], str):\n video_info['text'] = [video_info['text']]\n for text in video_info['text']:\n info = {}\n frame_dir = video_info['filename']\n filename = osp.join(self.data_prefix, video_info['filename']+'.mp4') \n info['filename'] = filename\n info['frame_dir'] = frame_dir\n info['index'] = i\n info['label'] = -1 if 'answer_idx' not in video_info else video_info['answer_idx']\n info['text'] = [text]\n if self.is_ret:\n pass\n elif self.is_mc:\n info['clip_text_candidate'] = [0, 1, 2, 3, 4]\n elif self.is_qa:\n pass\n video_infos.append(info) \n del ann_info\n\n return video_infos", "def __attrs_post_init__(self):\n # List of sources\n src_list = (\n self._source,\n self._plaintext,\n self._zlib,\n self._fname_plain,\n self._fname_zlib,\n self._dict_json,\n self._url,\n )\n src_count = sum(1 for _ in src_list if _ is not None)\n\n # Complain if multiple sources provided\n if src_count > 1:\n raise RuntimeError(\"At most one data source can be specified.\")\n\n # Leave uninitialized (\"manual\" init) if no source provided\n if src_count == 0:\n self.source_type = SourceTypes.Manual\n return\n\n # If general ._source was provided, run the generalized import\n if self._source is not None:\n self._general_import()\n return\n\n # For all of these below, '()' is passed as 'exc' argument since\n # desire _try_import not to handle any exception types\n\n # Plaintext str or bytes\n # Special case, since preconverting input.\n if self._plaintext is not None:\n self._try_import(\n self._import_plaintext_bytes, _utf8_encode(self._plaintext), ()\n )\n self.source_type = SourceTypes.BytesPlaintext\n return\n\n # Remainder are iterable\n for src, fxn, st in zip(\n (\n self._zlib,\n self._fname_plain,\n self._fname_zlib,\n self._dict_json,\n self._url,\n ),\n (\n self._import_zlib_bytes,\n self._import_plaintext_fname,\n self._import_zlib_fname,\n self._import_json_dict,\n self._import_url,\n ),\n (\n SourceTypes.BytesZlib,\n SourceTypes.FnamePlaintext,\n SourceTypes.FnameZlib,\n SourceTypes.DictJSON,\n SourceTypes.URL,\n ),\n ):\n if src is not None:\n self._try_import(fxn, src, ())\n self.source_type = st\n return", "def prepareData(self, *data):\n arguments = 8\n (self.X, self.X_name, self.Y, self.Y_name, self.alignment,\n self.model, self.annotations, self.args) = tuple(data[:arguments])\n \n self.width = self.args.beam_width\n self.mathType = self.args.mathType\n self.io_files = {\n 'input': self.args.intermediate_input_files,\n 'output': self.args.intermediate_output_files\n }\n self.repeat_width = self.args.repeat_width\n self.cons_count = self.args.cons_count\n self.posterior_processors = self.args.posterior_processors \n\n self.positionGenerator = \\\n list(AlignmentBeamGenerator(self.alignment, self.width))\n \n for i in range(len(self.model.states)):\n self.model.states[i].computeHints(self)\n\n return data[arguments:]", "def _initialize_parameters(self):\n self.ent_emb = tf.get_variable('ent_emb', shape=[len(self.ent_to_idx), self.k],\n initializer=self.initializer)\n self.rel_emb = tf.get_variable('rel_emb', shape=[len(self.rel_to_idx), self.k],\n initializer=self.initializer)", "def __init__(self, seq, annotation=False):\n self.seq = seq\n self.length = len(seq)\n self.annotation = annotation", "def __init__(self, train_y, test_id, train_id, tags, data_dir='data/output/'):\n self.train_y = train_y\n self.test_id = test_id\n self.train_id = train_id\n self.TAGS = tags\n self.data_dir = data_dir", "def initialize(self, data_config, params=None):\n if params is None:\n params = {}\n self.params.update(params)\n dropout = self.params.get(\"dropout\")\n if dropout is not None:\n misc.set_dropout(self, dropout)\n self.examples_inputter.initialize(data_config)", "def _setup(self):\n numerator = np.arange(1, MAX_NUMERATOR, dtype=float)\n denominator = np.arange(1, MAX_DENOMINATOR, dtype=float)\n outer = np.outer(numerator, 1/denominator)\n self.ratios = np.unique(outer[outer!=1])\n\n self.known_periods, self.known_dms, self.known_ras, self.known_decls = \\\n np.loadtxt(KNOWNPSR_FILENM, usecols=(0,1,2,3), unpack=True)", "def __init__(self, **kwargs):\n\n for name, attr in kwargs.items():\n setattr(self, name, attr)\n\n if 'scope' in kwargs.keys():\n self.is_main = True\n\n # collect all fields from all configs and regular kwargs\n fields = (_get_fields(attr) for name, attr in\n sorted(kwargs.items(), key=itemgetter(0))\n if not name == \"scope\")\n\n self.identifier_fields = sum(fields, [])", "def __init__(self, labels, tokens, samples, characters=None):\n self._characters = characters\n self._samples = samples\n self._tokens = tokens\n self._labels = labels\n\n # Calculated values\n # shape = (batch size)\n self._sequence_lengths = np.ones(labels.shape[0]) * labels.shape[1]\n if characters is not None:\n # shape = (batch size, sequence length)\n self._word_lengths = np.ones((characters.shape[0], characters.shape[1])) * characters.shape[2]\n else:\n self._word_lengths = None", "def _initialize_parser_keys(self):\n self.parser.source_role_marker = TRANSFER_ROLES.SOURCE\n self.parser.target_role_marker = TRANSFER_ROLES.TARGET\n rack_shape_agg = get_root_aggregate(IRackShape)\n rack_shape_agg.filter = None\n self.parser.allowed_rack_dimensions = [(rs.number_rows,\n rs.number_columns)\n for rs in rack_shape_agg]", "def init_batch(self):\n pass", "def __init__(self):\n this = _libsbml.new_RDFAnnotationParser()\n try: self.this.append(this)\n except: self.this = this", "def _ann_parser(self):\n pd = tfds.core.lazy_imports.pandas\n with tf.io.gfile.GFile(self.ann_path) as csv_f:\n # read file\n df = pd.read_csv(csv_f, sep=',')\n\n # split\n return {'train_val': df[df['Image Index'].isin(self.train_val_list)],\n 'test': df[df['Image Index'].isin(self.test_list)]\n }", "def initialize(self, runInfo, inputs, initDict) :\n super().initialize(runInfo, inputs, initDict)\n for metricIn in self.assemblerDict['Metric']:\n self.metricsDict[metricIn[2]] = metricIn[3]", "def __init__(self):\n self.metadata = {}\n self.geometry = {'array': None, \n 'geom': None, \n 'wkt': None}", "def __init__(self, root, labels, transform=None):\n self.transform = transform\n self.labels = []\n for label in labels:\n self.labels.append(label)\n self.labels.append(\"Not_\" + label)\n\n _mapper = {}\n with open(\"expr/CelebAMask-HQ-attribute-anno.txt\", \"r\") as fin:\n num = fin.readline()\n headers = fin.readline().split()\n for label in labels:\n assert label in headers\n labels_idx = [headers.index(label) for label in labels]\n\n for line in fin.readlines():\n line = line.split()\n file_name, labels = line[0], line[1:]\n assert len(labels) == len(headers)\n _mapper[file_name] = [2 * num if labels[label_idx] == '1' else 2 * num + 1 for num, label_idx in enumerate(labels_idx)]\n\n self.celeba_mapper = {}\n with open(\"expr/CelebA-HQ-to-CelebA-mapping.txt\", \"r\") as fin:\n header = fin.readline()\n count = 0\n for line in fin.readlines():\n _, _, file_name = line.split()\n org_name = f\"{count}.jpg\"\n if org_name in _mapper:\n self.celeba_mapper[file_name] = _mapper[f\"{count}.jpg\"]\n count += 1\n\n self.samples, self.targets = self._make_dataset(root)\n print(\"len dataset\", len(self.targets))", "def __init__(self, params):\r\n self.Params.update(params)\r\n self._tracked_properties.extend(\r\n ['Application', 'Algorithm', 'Citation'])", "def __call__(self, *args, **kwargs):\n\n dataset = TextOnlyCocoAnnotation()\n\n n_images = 1000 if self.is_train else 500\n for i in tqdm(range(1, n_images + 1)):\n image_path = os.path.join(self.images_folder, 'img_{}.jpg'.format(i))\n annotation_path = os.path.join(self.annotations_folder, 'gt_img_{}.txt'.format(i))\n\n with open(annotation_path, encoding='utf-8-sig') as read_file:\n content = [line.strip() for line in read_file.readlines()]\n for line in content:\n dataset.add_bbox(image_path, imagesize.get(image_path), self.parse_line(line))\n\n return dataset", "def __init__(self, image_dir, attr_path, selected_attrs, transform, mode):\n self.image_dir = image_dir\n self.attr_path = attr_path\n self.selected_attrs = selected_attrs\n self.transform = transform\n self.mode = mode\n self.train_dataset = []\n self.test_dataset = []\n self.attr2idx = {}\n self.idx2attr = {}\n self.preprocess()\n\n if mode == 'train':\n self.num_images = len(self.train_dataset)\n else:\n self.num_images = len(self.test_dataset)", "def set_params(self, params: Dict):\n\n if params['training_instances'] is not None:\n self.training_instances = params['training_instances']\n if params['n'] is not None:\n self.n = params['n']\n if params['lda'] is not None:\n self.lda = params['lda']\n if params['verbose'] is not None:\n self.verbose = params['verbose']\n\n self.num_features = self.training_instances[0].get_feature_count()\n self.w = None\n self.b = None", "def init(self, parameters, agent_parameters):\n pass", "def initialise_dataset_loader(\n self, data_param=None, task_param=None, data_partitioner=None):\n raise NotImplementedError", "def initialize(self, **kwargs):" ]
[ "0.6673975", "0.65937763", "0.6560803", "0.65485716", "0.65461046", "0.64888483", "0.6485457", "0.64349765", "0.63921803", "0.6385465", "0.63169533", "0.62924635", "0.6280703", "0.6174638", "0.61208785", "0.60870314", "0.6060363", "0.6031667", "0.60257536", "0.60226005", "0.6016223", "0.60115653", "0.5995642", "0.5978536", "0.5966961", "0.5956202", "0.5948495", "0.5943376", "0.5938173", "0.5936827", "0.5928127", "0.5927737", "0.59105784", "0.58964443", "0.5887276", "0.58665425", "0.5865746", "0.58598924", "0.5855532", "0.58480716", "0.58366096", "0.58227754", "0.58161086", "0.5804865", "0.5798942", "0.57870966", "0.5774639", "0.5774166", "0.5772111", "0.57648754", "0.57616323", "0.5759075", "0.57567984", "0.57564145", "0.57551974", "0.5748377", "0.5747569", "0.5729263", "0.5723473", "0.5705622", "0.5705622", "0.5693871", "0.56938434", "0.5691694", "0.56905377", "0.56904155", "0.5688133", "0.56861526", "0.56859785", "0.56836176", "0.56835157", "0.5682505", "0.568152", "0.5679199", "0.5675374", "0.56715477", "0.56698877", "0.5668108", "0.5661746", "0.5654946", "0.5649227", "0.56481075", "0.56348765", "0.5632306", "0.5628584", "0.56244683", "0.5623508", "0.56198215", "0.5614415", "0.56072223", "0.56065804", "0.5606354", "0.56023324", "0.55968267", "0.55919474", "0.5589821", "0.5589352", "0.55879337", "0.5581526", "0.5579606", "0.5579317" ]
0.0
-1
Parses data to an image and associated training labels.
def __call__(self, value): with tf.name_scope('parser'): data = decode(value) return self._parse_fn(data)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _parse_train_data(self, data):\n image, label = self._prepare_image_and_label(data)\n\n # Flips image randomly during training.\n if self._aug_rand_hflip:\n image, label = input_utils.random_horizontal_flip(image, masks=label)\n\n # Resizes and crops image.\n image, image_info = input_utils.resize_and_crop_image(\n image,\n self._output_size,\n self._output_size,\n aug_scale_min=self._aug_scale_min,\n aug_scale_max=self._aug_scale_max)\n\n # Resizes and crops boxes.\n image_scale = image_info[2, :]\n offset = image_info[3, :]\n\n # Pad label and make sure the padded region assigned to the ignore label.\n # The label is first offset by +1 and then padded with 0.\n label += 1\n label = tf.expand_dims(label, axis=3)\n label = input_utils.resize_and_crop_masks(\n label, image_scale, self._output_size, offset)\n label -= 1\n label = tf.where(tf.equal(label, -1),\n self._ignore_label * tf.ones_like(label), label)\n label = tf.squeeze(label, axis=0)\n valid_mask = tf.not_equal(label, self._ignore_label)\n labels = {\n 'masks': label,\n 'valid_masks': valid_mask\n }\n\n # If bfloat16 is used, casts input image to tf.bfloat16.\n if self._use_bfloat16:\n image = tf.cast(image, dtype=tf.bfloat16)\n return image, labels", "def _prepare_image_and_label(self, data):\n image = tf.io.decode_image(data['image/encoded'], channels=3)\n label = tf.io.decode_image(data['image/segmentation/class/encoded'],\n channels=1)\n height = data['image/height']\n width = data['image/width']\n image = tf.reshape(image, (height, width, 3))\n label = tf.reshape(label, (1, height, width))\n label = tf.cast(label, tf.float32)\n # Normalizes image with mean and std pixel values.\n image = input_utils.normalize_image(image)\n return image, label", "def load_data(self) -> tuple:\n label_num = {}\n data_set = pathlib.Path(self.path)\n data = []\n\n # create the label lookup dict for verifcation later\n for i, v in enumerate(data_set.iterdir()):\n label_num[v.name] = i\n self.labels[i] = v.name\n # end\n\n # read images\n for img_path in data_set.rglob(\"*.jpg\"):\n lbl = label_num[str(img_path.parent.stem)]\n img = cv2.imread(str(img_path))\n img = cv2.resize(img, self.dims, interpolation=cv2.INTER_AREA)\n\n # flatten RGB data into a vector\n # NOTE: NOT ACTUALLY NECESSARY! \n img.flatten()\n\n # label the sample and append to temp data list\n sample = np.append(lbl, img)\n data.append(sample)\n # end\n\n # partition and package the data (*_ ensures safe unpacking)\n train, test, validate, *_ = Data.partition(data, self.parts, 0.7, 0.2)\n self.train = Data(train)\n self.test = Data(test)\n self.validate = Data(validate)", "def __parse_image_load(self, image_path: str, image_label: int):\n one_hot = tf.one_hot(image_label, self.num_classes, dtype=dtypes.int32)\n if self.rgb:\n flag = cv2.IMREAD_COLOR\n else:\n flag = cv2.IMREAD_GRAYSCALE\n\n img = cv2.imread(image_path, flags=flag)\n img = cv2.resize(img, (self.image_shape[1], self.image_shape[0]), interpolation=cv2.INTER_AREA).astype(\n np.float32)\n\n if self.normalize_images:\n img_mean = np.mean(img, axis=(0, 1))\n img_std = np.std(img, axis=(0, 1))\n\n img = (img - img_mean) / img_std\n\n return img, one_hot", "def convert_labels() -> None:\n data_folder = 'images'\n validation_split = 0.10\n\n # Convert annotations and split into validation and train set\n number_images = int(len(os.listdir(data_folder)) / 2)\n train_size = int(number_images * (1 - validation_split))\n val_size = number_images - train_size\n\n print(f'Training dataset size: {train_size}')\n print(f'Validation dataset size: {val_size}')\n\n with open('train.txt', 'w') as train_file, open('val.txt', 'w') as val_file:\n files = os.listdir(data_folder)\n print(len(files))\n # shuffle otherwise validation is from the same session\n random.shuffle(files)\n processed = 0\n for file_name in files:\n if file_name.split('.')[1] == 'jpg':\n # if image has no labels\n write = False\n if processed < train_size:\n file_to_write = train_file\n else:\n file_to_write = val_file\n\n with open(f'{data_folder}/{file_name}'.split('.')[0] + '.txt') as label_file:\n labels = []\n for line in label_file:\n line = line.split(' ')\n line[-1] = line[-1].rstrip()\n\n img = cv2.imread(f'{data_folder}/{file_name}')\n img_height = img.shape[0]\n img_width = img.shape[1]\n \n x = float(line[1]) * img_width\n y = float(line[2]) * img_height\n w = float(line[3]) * img_width\n h = float(line[4]) * img_height\n\n xmin = int(x - w/2)\n ymin = int(y - h/2)\n xmax = int(x + w/2)\n ymax = int(y + h/2)\n\n labels.append(f' {xmin},{ymin},{xmax},{ymax},{line[0]}')\n if len(labels) > 0:\n write = True\n file_to_write.write(f'{data_folder}/{file_name}')\n for label in labels:\n file_to_write.write(label)\n if write:\n file_to_write.write('\\n') \n processed += 1\n print(f'[{processed}/{number_images}] Processed {file_name}')", "def load_data_and_labels(self):\n gen = image.ImageDataGenerator()\n target_size = (224,224)\n if self.preprocess:\n print('Preprocessing data...')\n if not os.path.isdir(self.pproc_dir()):\n os.mkdir(self.pproc_dir())\n \n batch_arr = []\n for ld,segment in [(self.train_dir(), 'train'),\n (self.valid_dir(), 'valid')]:\n # TODO(ness): segment = os.basename(ld)\n flowgen = gen.flow_from_directory(\n ld,\n target_size=target_size,\n shuffle=False,\n class_mode=None,\n batch_size=1)\n # Save the batches using method defined in utils.py\n data = np.concatenate([flowgen.next() for i in range(flowgen.n)])\n batches_dir = self.pproc_dir() + segment + '-bc'\n save_array(batches_dir, data)\n \n # Save the classes.\n cls_dir = self.pproc_dir() + segment + '-cl'\n save_array(cls_dir, flowgen.classes)\n \n batch_arr.append((data, flowgen.classes, flowgen.class_indices))\n \n # Set the data.\n self.training_data = batch_arr[0][0]\n self.validation_data = batch_arr[1][0]\n \n # Classes are zero-indexed and represent a category in\n # numerical form. So if the classes are 'dog' and 'cat',\n # the possible class values will be 0 and 1.\n self.trn_classes = batch_arr[0][1]\n self.val_classes = batch_arr[1][1]\n \n # Labels are the one-hot encoded (i.e. categorical)\n # version of the classes. In other words, if there are\n # 5 classes and an element belongs to class 2,\n # its label will be [0,0,1,0,0] (index 1).\n self.training_labels = to_categorical(batch_arr[0][1])\n self.validation_labels = to_categorical(batch_arr[1][1])\n \n # Class indices are dictionaries of the form\n # {'category_name': 0, 'category_name_2: 1}. They\n # make the mapping between numerical class indices and\n # a human-readable category name. They are (should be...)\n # the same for validation and training, so only load them\n # once, after sanity checking.\n self.cindices = batch_arr[0][2]\n print('Done preprocessing.')\n else:\n print('Loading data...')\n # Load the pre-saved data using methods defined in utils.py. See\n # preprocessing branch for the meaning of the data.\n self.training_data = load_array(self.pproc_dir() + 'train-bc')\n self.validation_data = load_array(self.pproc_dir() + 'valid-bc')\n self.trn_classes = load_array(self.pproc_dir() + 'train-cl')\n self.val_classes = load_array(self.pproc_dir() + 'valid-cl')\n self.training_labels = to_categorical(self.trn_classes)\n self.validation_labels = to_categorical(self.val_classes)\n \n # To get the class indices, we create the generator. It's cheap to\n # run since it doesn't actually load all the data.\n flowgen = gen.flow_from_directory(\n self.train_dir(),\n target_size=target_size,\n shuffle=False,\n class_mode=None,\n batch_size=1) \n self.cindices = flowgen.class_indices\n print('Done loading.')", "def load_labeled_data():\n\n images = []\n labels = []\n\n for i in range(1, 10):\n path = (\"selflabeled\", str(i), \"*.jpg\")\n filenames = glob.glob(\"/\".join(path))\n images_one_type = [cv2.imread(img) for img in filenames]\n labels_one_type = [i] * len(images_one_type)\n images += images_one_type\n labels += labels_one_type\n\n return images, labels", "def _parse_eval_data(self, data):\n image, label = self._prepare_image_and_label(data)\n # The label is first offset by +1 and then padded with 0.\n label += 1\n label = tf.expand_dims(label, axis=3)\n\n if self._resize_eval:\n # Resizes and crops image.\n image, image_info = input_utils.resize_and_crop_image(\n image, self._output_size, self._output_size)\n\n # Resizes and crops mask.\n image_scale = image_info[2, :]\n offset = image_info[3, :]\n\n label = input_utils.resize_and_crop_masks(label, image_scale,\n self._output_size, offset)\n else:\n # Pads image and mask to output size.\n image = tf.image.pad_to_bounding_box(image, 0, 0, self._output_size[0],\n self._output_size[1])\n label = tf.image.pad_to_bounding_box(label, 0, 0, self._output_size[0],\n self._output_size[1])\n\n label -= 1\n label = tf.where(tf.equal(label, -1),\n self._ignore_label * tf.ones_like(label), label)\n label = tf.squeeze(label, axis=0)\n\n valid_mask = tf.not_equal(label, self._ignore_label)\n labels = {\n 'masks': label,\n 'valid_masks': valid_mask\n }\n\n # If bfloat16 is used, casts input image to tf.bfloat16.\n if self._use_bfloat16:\n image = tf.cast(image, dtype=tf.bfloat16)\n return image, labels", "def preprocess_data():\n le = preprocessing.LabelEncoder()\n # Reshape and normalize pixel values to be between 0 and 1\n train_images_reshaped = train_images.reshape(len(train_images), 1024, 1024, 1)/255.\n test_images_reshaped = test_images.reshape(len(test_images), 1024, 1024, 1)/255.\n\n return train_images_reshaped, test_images_reshaped, le.fit_transform(train_labels), le.fit_transform(test_labels)", "def load_data(self):\n sets = ['train', 'val']\n images = []\n labels = []\n self.labels_dic = {}\n file = open(self.path + 'wnids.txt')\n train_labels = file.read().split()\n if self.train:\n for fn in range(self.num_classes):\n f = train_labels[fn]\n for i in os.listdir(self.path + 'train/' + f + '/images/'):\n images.append(Image.open(self.path + 'train/' + f + '/images/' + i))\n labels.append(f)\n #image label n link to folder names of TinyImageNet\n self.labels_dic[f] = fn\n\n else:\n for fn in range(self.num_classes):\n f = train_labels[fn]\n self.labels_dic[f] = fn\n file_val = open(self.path + 'val/val_annotations.txt')\n val_labels = file_val.read().split('\\n')\n for im in val_labels:\n im_data = im.split(\"\t\")[:2]\n if len(im_data) < 2:\n continue\n if im_data[1] in self.labels_dic:\n images.append(Image.open(self.path + 'val/images/' + im_data[0]))\n labels.append(im_data[1])\n\n self.images = images\n self.labels = labels", "def load_data(self):\n print('Loading {} dataset'.format(self.split))\n data_split_path = os.path.join(self.root_dir, 'splits', '{}.csv'.format(self.split))\n with open(data_split_path,'r') as f:\n reader = csv.reader(f, delimiter=',')\n data_classes = {}\n for i,row in enumerate(reader):\n if i==0:\n continue\n data_classes[row[1]] = 1\n data_classes = data_classes.keys()\n print(data_classes)\n\n n_classes = len(data_classes)\n print('n_classes:{}, n_label:{}, n_unlabel:{}'.format(n_classes,self.n_label,self.n_unlabel))\n dataset_l = np.zeros([n_classes, self.n_label, self.im_height, self.im_width, self.channels], dtype=np.float32)\n if self.n_unlabel>0:\n dataset_u = np.zeros([n_classes, self.n_unlabel, self.im_height, self.im_width, self.channels], dtype=np.float32)\n else:\n dataset_u = []\n\n for i, cls in enumerate(data_classes):\n im_dir = os.path.join(self.root_dir, 'data/{}/'.format(self.split), cls)\n im_files = sorted(glob.glob(os.path.join(im_dir, '*.jpg')))\n np.random.RandomState(self.seed).shuffle(im_files) # fix the seed to keep label,unlabel fixed\n for j, im_file in enumerate(im_files):\n im = np.array(Image.open(im_file).resize((self.im_width, self.im_height)), \n np.float32, copy=False)\n if j<self.n_label:\n dataset_l[i, j] = im\n else:\n dataset_u[i,j-self.n_label] = im\n print('labeled data:', np.shape(dataset_l))\n print('unlabeled data:', np.shape(dataset_u))\n \n self.dataset_l = dataset_l\n self.dataset_u = dataset_u\n self.n_classes = n_classes", "def parse_data(filename, data_path, ground_truths_path):\n with open(filename) as f:\n content = f.readlines()\n\n content = [x.strip() for x in content]\n\n data = []\n for i, item in enumerate(content):\n if i == 0:\n continue\n parametres = item.split(',')\n\n image = cv2.imread(os.path.join(data_path, parametres[0]), -1)\n image_processed = image * np.uint16(65535.0 / max(image.ravel()))\n image_processed = cv2.resize(image_processed, (960, 960), interpolation = cv2.INTER_AREA)\n\n ground_truth = cv2.imread(os.path.join(ground_truths_path, parametres[0][:parametres[0].rfind('.')] + \".png\"), -1)\n ground_truth_processed = np.uint16(np.copy(ground_truth))\n indices = np.where(np.any(ground_truth_processed != [0, 0, 255], axis = -1))\n ground_truth_processed[indices] = [0, 0, 0]\n indices = np.where(np.all(ground_truth_processed == [0, 0, 255], axis = -1))\n ground_truth_processed[indices] = [65535, 65535, 65535]\n ground_truth_processed = cv2.cvtColor(ground_truth_processed, cv2.COLOR_BGR2GRAY)\n ground_truth_processed = cv2.resize(ground_truth_processed, (960, 960), interpolation = cv2.INTER_AREA)\n \n img = Image(image, image_processed, ground_truth, ground_truth_processed,\n parametres[0], parametres[1], parametres[2], parametres[3], \n parametres[4], parametres[5], parametres[6], parametres[7], \n parametres[8])\n data.append(img)\n\n return data", "def _parse_raw_labels(self, lines):\r\n images = []\r\n labels = []\r\n idx = 0\r\n while idx < len(lines):\r\n image_path = lines[idx].strip()\r\n images.append(self._real_image_path(image_path))\r\n idx += 1\r\n\r\n num = int(lines[idx])\r\n idx += 1\r\n\r\n labels_ = []\r\n for _ in range(num):\r\n x1, y1, w, h, blur, expression, illumination, invalid, \\\r\n occlusion, pose = [int(v) \r\n for v in lines[idx].strip().split()]\r\n x2, y2 = x1 + w - 1, y1 + h - 1 # -1 to get the read x2, y2\r\n\r\n labels_.append([x1, y1, x2, y2])\r\n idx += 1\r\n \r\n labels.append(np.array(labels_))\r\n\r\n self._data_map[self._real_image_path(image_path)] = np.array(labels_)\r\n return np.array(images), np.array(labels)", "def train(training_labels_filename='train.tsv', training_image_filename='train.png', num_training_cards=56):\r\n model = {}\r\n\r\n labels = {}\r\n with open(training_labels_filename, 'r') as file:\r\n for line in file:\r\n key, num, suit = line.strip().split()\r\n labels[int(key)] = (num, suit)\r\n\r\n training_img = cv2.imread(training_image_filename)\r\n for i, card in enumerate(extract_cards(training_img, num_training_cards)):\r\n model[i] = (labels[i], preprocess(card))\r\n\r\n return model", "def main():\n labels, data = load_image_data()\n print(labels.shape, data.shape)", "def load_image_data():\n print(\"Loading image data...\")\n label_dict = get_label_vectors()\n categories = [c for c in os.listdir('images/') if c[0] != '.'] # ignore\n labels = [] # instantiate list for image labels\n data = [] # instantiate list for image data\n for i in categories:\n path = 'images/{}/'.format(i) # define path to category folder\n for j in os.listdir(path): # get images from category folder\n labels.append(label_dict[i]) # append label vector\n data.append(cv2.imread(path + j).flatten()) # append flattened image data\n\n labels = np.array(labels) # convert lists to array\n data = np.array(data)\n print(\"Done.\")\n\n return labels, data", "def preprocess_images_and_labels(config,\n image_file_data,\n train_image_id_labels,\n val_image_id_labels):\n # Commenting out the line below in keeping with the comment block above the function. We don't\n # actually need to do this, because safe_create_dir will never overwrite an existing directory,\n # but better to be safe.\n # safe_create_dir(config.ImageDataConfig.preprocessed_image_path)\n\n # Add in a counter for tracking progress via the console\n counter = 0\n\n train_image_metadata, val_image_metadata = [], []\n for image_data in image_file_data:\n\n if image_data.image_id in train_image_id_labels:\n image_metadata = train_image_metadata\n image_label = train_image_id_labels[image_data.image_id]\n else:\n image_metadata = val_image_metadata\n image_label = val_image_id_labels[image_data.image_id]\n\n # TODO Stop squishing the image and handle cropping correctly sized windows at sample time.\n\n # Comment out the chunk below to avoid re-saving our images, which we have\n # already preprocessed. We just want to generate metadata for them.\n\t\"\"\"\n image = load_img(\n image_data.original_file_path,\n target_size=config.ImageDataConfig.size)\n new_file_path = os.path.join(\n config.ImageDataConfig.preprocessed_image_path,\n image_data.original_filename.upper().replace('PNG', 'JPG')) # Convert all images to jpegs.\n image.save(new_file_path, format='JPEG', quality=85)\n\t\"\"\"\n\n # We generate metadata, setting the image filepath as the original filepath, as we\n # have already preprocessed beforehand.\n original_file_path = image_data.original_file_path\n\n image_metadata.append(ProcessedImageMetadata(image_data.image_id, original_file_path, image_label))\n\n return train_image_metadata, val_image_metadata", "def read_training_pixels(image_path, label_path):\n\n if io_function.is_file_exist(image_path) is False or io_function.is_file_exist(label_path) is False:\n return False\n\n # check: they are from the same polygons\n polygon_index_img = os.path.basename(image_path).split('_')[-3]\n polygon_index_label = os.path.basename(label_path).split('_')[-3]\n if polygon_index_img != polygon_index_label:\n raise ValueError(\"%s and %s are not from the same training polygons\" % (image_path, label_path))\n\n with rasterio.open(image_path) as img_obj:\n # read the all bands\n indexes = img_obj.indexes\n nbands = len(indexes)\n img_data = img_obj.read(indexes)\n\n with rasterio.open(label_path) as img_obj:\n # read the all bands (only have one band)\n indexes = img_obj.indexes\n if len(indexes) != 1:\n raise ValueError('error, the label should only have one band')\n\n label_data = img_obj.read(indexes)\n\n # check the size\n # print(img_data.shape)\n # print(label_data.shape)\n if img_data.shape[1] != label_data.shape[1] or img_data.shape[2] != label_data.shape[2]:\n raise ValueError('the image and label have different size')\n\n X_arr = img_data.reshape(nbands, -1)\n y_arr = label_data.reshape(-1)\n\n basic.outputlogMessage(str(X_arr.shape))\n basic.outputlogMessage(str(y_arr.shape))\n # sys.exit(1)\n\n return X_arr, y_arr", "def Read_Raw_Images(path_data,path_labels):\n \n data = skimage.io.imread(path_data).astype(np.float32)\n for i in range(data.shape[0]):\n data[i,...] = skimage.exposure.rescale_intensity(data[i,...], out_range=(0,1))\n data_labels = skimage.io.imread(path_labels) > 0\n \n training_data=data[0:25,:,:]\n training_labels=data_labels[0:25,:,:]\n \n testing_data=data[25:data.shape[0],:,:]\n testing_labels=data_labels[25:data.shape[0],:,:]\n \n np.save(\"data.npy\",training_data)\n np.save(\"labels.npy\",training_labels)\n np.save(\"data_validation.npy\",testing_data)\n np.save(\"labels_validation.npy\",testing_labels)\n \n return()", "def __parse_image(self, image_path: str, image_label: int) -> tuple:\n one_hot = tf.one_hot(image_label, self.num_classes, dtype=dtypes.int32)\n img_file = tf.read_file(image_path)\n img_decoded = tf.image.decode_jpeg(img_file, channels=self.image_shape[2])\n img_decoded = tf.image.resize_images(img_decoded, self.image_shape[0:2])\n img_decoded = tf.cast(img_decoded, tf.float32)\n if self.normalize_images:\n img_decoded = tf.image.per_image_standardization(img_decoded)\n\n return img_decoded, one_hot", "def prep_image_data(arg_dict):\n cat_df = pd.read_csv(arg_dict['category_file'],\n skiprows=1,\n sep='\\s+')\n bbox_df = pd.read_csv(arg_dict['bbox_file'],\n skiprows=1,\n sep='\\s+')\n img_dir = arg_dict['image_dir']\n\n combo_df = pd.merge(cat_df, bbox_df, how='outer', on='image_name')\n combo_df['image_name'] = combo_df['image_name'].apply(\n lambda x: x[len('img'):-len('.jpg')])\n labels = Labels(combo_df, img_dir, n_images_loaded=-1)\n labels.set_data_target('raw_image', chunksize=3000)\n return labels", "def prepare_data(data):\n\n image_array = np.zeros(shape=(len(data), 48, 48))\n image_label = np.array(list(map(int, data['emotion'])))\n\n for i, row in enumerate(data.index):\n image = np.fromstring(data.loc[row, 'pixels'], dtype=int, sep=' ')\n image = np.reshape(image, (48, 48))\n\n image = face_detection(image.astype(np.uint8))\n\n image_array[i] = image\n\n return image_array, image_label", "def __multilabel_processing(self):\n # read the raw dataset\n self.data['image_name'] = self.data['image_name'].map(lambda x: '{}.{}'.format(x, img_format))\n self.data['tags'] = self.data['tags'].map(lambda x: x.split())\n\n # create a df with the same number of rows as the dataset filled with the name of the unique values in tags\n label_names = self.data['tags'].explode().unique().tolist()\n label_df = pd.DataFrame([label_names] * self.data.shape[0], columns=label_names)\n\n # binarize the labels according to if they exist for each image or not\n self.data = pd.concat([self.data, label_df], axis=1)\n self.data[['image_name'] + label_names] = self.data.apply(lambda x: pd.Series([x[0]] + [1 if label in x[1] else 0 for label in x[2:]]), axis=1)", "def labels_for_training_data():\n current_id = 0\n label_ids = dict()\n faces, faces_ids = list(), list()\n\n # Go through directories and find label and path to image\n for root, dirs, files in walk('data/'):\n for file in files:\n if file.endswith('.jpg') or file.endswith('.png'):\n img_path = path.join(root, file)\n label = path.basename(root).replace(' ', '-').lower()\n if label not in label_ids:\n label_ids[label] = current_id\n current_id += 1\n id_ = label_ids[label]\n\n test_img = cv2.imread(img_path)\n test_img = cv2.cvtColor(test_img, cv2.COLOR_BGR2GRAY)\n if test_img is None:\n print('Image not loaded properly')\n continue\n\n faces.append(test_img)\n faces_ids.append(id_)\n\n # Make directory with labels doesn't exist make directory and file with labels\n if not path.exists('labels/'):\n makedirs('labels/')\n with open('labels/face-labels.pickle', 'wb') as file:\n pickle.dump(label_ids, file)\n\n return faces, faces_ids", "def parse_function(images, labels, n_classes, resized_shape, palette):\r\n images = load_image(images, resized_shape)\r\n labels = load_label(labels, n_classes, resized_shape, palette)\r\n return images, labels", "def import_data(self, img_size):\n path = self._path\n images = []\n labels = []\n\n categs_name = [filename for filename in os.listdir(path)]\n for categ in categs_name:\n if isdir(join(path, categ)):\n\n for img_name in os.listdir(join(path, categ)):\n\n if \".jpg\" in img_name:\n\n img_name = self.correct_filename(img_name, categ)\n img_path = join(path, categ, img_name)\n img = cv2.imread(img_path)\n\n if img_size:\n dim = (img_size, img_size)\n try:\n img = cv2.resize(img, dim)\n except:\n print(img_name, \"has not been loaded.\")\n continue\n\n images.append(img)\n labels.append(categ)\n\n X = np.array(images)\n y = self.transform_labels(labels)\n\n return X, y", "def _preprocess_image(self, input_data):\n image = self.preprocessor.preprocess(input_data.images)\n return InputData(images=image, labels=input_data.labels)", "def create_XY(self, data, min_dimension, label_names):\n # Create empty array, X, for the images, and an empty list, y, for the image labels\n X = np.empty((0, min_dimension, min_dimension, 3))\n Y = []\n \n # For each artist name listed in label_names\n for name in label_names:\n \n # Get all images for each artist\n images = glob.glob(os.path.join(data, name, \"*.jpg\"))\n \n # For each image in images \n for image in tqdm(images): # I use tqdm() to allow the user to follow along\n \n # Load image\n loaded_img = cv2.imread(image)\n \n # Resize image to the specified dimensions\n resized_img = cv2.resize(loaded_img, (min_dimension, min_dimension), interpolation = cv2.INTER_AREA) # INTER_AREA means that it is resizing using pixel-area relation which was a suggested method by Ross\n \n # Create array of image\n image_array = np.array([np.array(resized_img)])\n \n # Append to trainX array and trainY list\n X = np.vstack((X, image_array))\n Y.append(name)\n \n return X, Y", "def preprocessing(image_data, max_height, max_width):\n img = image_data[\"image\"]\n img = resize_image(img, max_height, max_width)\n gt_boxes = image_data[\"objects\"][\"bbox\"]\n gt_labels = image_data[\"objects\"][\"label\"]\n return img, gt_boxes, gt_labels", "def load_data(datafile, num_class, save=False, save_path='dataset.pkl'):\n train_list = open(datafile, 'r')\n labels = []\n images = []\n for line in train_list:\n tmp = line.strip().split(' ')\n filepath = tmp[0]\n print(filepath)\n img = Image.open(filepath)\n img = prep.resize_image(img, 224, 224)\n np_img = prep.pil_to_nparray(img)\n images.append(np_img)\n\n # one-hot encoder\n index = int(tmp[1])\n label = np.zeros(num_class)\n label[index] = 1\n labels.append(label)\n if save:\n pickle.dump((images, labels), open(save_path, 'wb'))\n return images, labels", "def _parse_predict_data(self, data):\n image, labels = self._parse_eval_data(data)\n return {\n 'images': image,\n 'labels': labels\n }", "def __read_img_file(filename, label):\n image = cv2.cvtColor(cv2.imread(filename), cv2.COLOR_BGR2RGB)\n height, width, _ = image.shape\n image = cv2.resize(image, (img_size, img_size))\n # A label is consist of [y1, x1, y2, x2, class_idx]\n label = np.reshape(label, (-1, 5))\n rel_bboxes = label[..., 0:4] / np.array([height, width, height, width], np.float32)\n label = np.concatenate([rel_bboxes, np.expand_dims(label[..., -1], 1)], axis=-1)\n return image, label", "def data_splits(im_dir='/media/ignacio/Datos/plant_net/images_ori', tag=False):\n homedir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))\n splits_dir = os.path.join(homedir, 'data', 'data_splits')\n print(\"Loading data...\")\n file_list = os.listdir(splits_dir)\n\n # Metadata labels\n metadata = np.genfromtxt(os.path.join(splits_dir, 'synsets.txt'), dtype='str', delimiter='/n')\n\n # Training splits\n train = np.genfromtxt(os.path.join(splits_dir, 'train.txt'), dtype='str', delimiter=' ')\n y_train = train[:, -1].astype(np.int32)\n if tag:\n X_train = train[:, 0:2].astype(object)\n X_train[:, 0] = np.array([os.path.join(im_dir, i) for i in X_train[:, 0]])\n else:\n X_train = np.array([os.path.join(im_dir, i) for i in train[:, 0]])\n\n # Validation splits\n if 'val.txt' in file_list:\n val = np.genfromtxt(os.path.join(splits_dir, 'val.txt'), dtype='str', delimiter=' ')\n y_val = val[:, -1].astype(np.int32)\n if tag:\n X_val = val[:, 0:2].astype(object)\n X_val[:, 0] = np.array([os.path.join(im_dir, i) for i in X_val[:, 0]])\n else:\n X_val = np.array([os.path.join(im_dir, i) for i in val[:, 0]])\n else:\n print 'Training with no validation data.'\n X_val, y_val = None, None\n\n return X_train, y_train, X_val, y_val, metadata", "def populate_data(self):\n training_labels = open('./digitdata/traininglabels', 'r')\n training_images = open('./digitdata/trainingimages', 'r')\n count = 0\n with training_images as ti:\n data = list(csv.reader(ti))\n data = [i for i in data if i]\n for label in training_labels:\n l = 0\n while l < 28:\n coord = count + l\n w = 0\n while w < 28:\n int_label = int(label)\n if data[coord][0][w] == \"+\":\n self.class_probabilities[int_label][l][w][0] += 1\n if data[coord][0][w] == \"#\":\n self.class_probabilities[int_label][l][w][1] += 1\n if data[coord][0][w] == \" \":\n self.class_probabilities[int_label][l][w][2] += 1 \n w += 1\n l += 1\n count += 28 \n print self.class_probabilities", "def extract(self, source):\n\t\tp = Parser()\n\t\tf = open_pds(source)\n\t\tif self.log: self.log.debug(\"Parsing '%s'\" % (source))\n\t\tself.labels = p.parse(f)\n\t\tif self.log: self.log.debug(\"Found %d labels\" % (len(self.labels)))\n\t\tif self._check_image_is_supported():\n\t\t\tif self.log: self.log.debug(\"Image in '%s' is supported\" % (source))\n\t\t\tdim = self._get_image_dimensions()\n\t\t\tloc = self._get_image_location()\n\t\t\timageSampleBits = int(self.labels['IMAGE']['SAMPLE_BITS'])\n\t\t\timageSampleType = self.labels['IMAGE']['SAMPLE_TYPE']\n\t\t\tmd5Checksum = self._get_image_checksum()\n\t\t\tif self.log: self.log.debug(\"Image dimensions should be %s\" % (str(dim)))\n\t\t\tif self.log: self.log.debug(\"Seeking to image data at %d\" % (loc))\n\t\t\tf.seek(loc)\n\t\t\tif imageSampleBits == 8:\n\t\t\t\treadSize = dim[0] * dim[1]\n\t\t\telif imageSampleBits == 16:\n\t\t\t\treadSize = dim[0] * dim[1] * 2\n\t\t\tprint readSize\n\t\t\tif self.log: self.log.debug(\"Seek successful, reading data (%s)\" % (readSize))\n\t\t\t# rawImageData = f.readline()\n\t\t\t# f.seek(-int(self.labels[\"RECORD_BYTES\"]), os.SEEK_CUR)\n\t\t\trawImageData = f.read(readSize)\n\t\t\tif md5Checksum:\n\t\t\t\trawImageChecksum = hashlib.md5(rawImageData).hexdigest()\n\t\t\t\tchecksumVerificationPassed = rawImageChecksum == md5Checksum and True or False\n\t\t\t\tif not checksumVerificationPassed:\n\t\t\t\t\tif self.log: self.log.debug(\"Secure hash verification failed\")\n\t\t\t\t\tif self.raisesChecksumError:\n\t\t\t\t\t\terrorMessage = \"Verification failed! Expected '%s' but got '%s'.\" % (md5Checksum, rawImageChecksum)\n\t\t\t\t\t\traise ChecksumError, errorMessage\n\t\t\t\telse:\n\t\t\t\t\tif self.log: self.log.debug(\"Secure hash verification passed\")\n\t\t\tif self.log: self.log.debug(\"Read successful (len: %d), creating Image object\" % (len(rawImageData)))\n\t\t\t# The frombuffer defaults may change in a future release;\n\t\t\t# for portability, change the call to read:\n\t\t\t# frombuffer(mode, size, data, 'raw', mode, 0, 1).\n\t\t\tif (imageSampleBits == 16) and imageSampleType == ('MSB_INTEGER'):\n\t\t\t\t#img = Image.frombuffer('I', dim, rawImageData, 'raw', 'I;16BS', 0, 1)\n\t\t\t\timg = Image.frombuffer('F', dim, rawImageData, 'raw', 'F;16B', 0, 1)\n\t\t\t\timg = ImageMath.eval(\"convert(a/16.0, 'L')\", a=img)\n\t\t\telse:\n\t\t\t\timg = Image.frombuffer('L', dim, rawImageData, 'raw', 'L', 0, 1)\n\t\t\tif self.log:\n\t\t\t\tself.log.debug(\"Image result: %s\" % (str(img)))\n\t\t\t\tself.log.debug(\"Image info: %s\" % (str(img.info)))\n\t\t\t\tself.log.debug(\"Image mode: %s\" % (str(img.mode)))\n\t\t\t\tself.log.debug(\"Image size: %s\" % (str(img.size)))\n\t\telse:\n\t\t\tif self.log: self.log.error(\"Image is not supported '%s'\" % (source))\n\t\t\timg = None\n\t\tf.close()\n\n\t\treturn img, self.labels", "def classify_images():\n\n # Load the desired image\n img_path = 'dataset/colorize_images/n02085782_919.jpg'\n img = image.load_img(img_path, target_size=(299, 299))\n x = image.img_to_array(img)\n x = np.expand_dims(x, axis=0)\n x = preprocess_input(x)\n\n model = InceptionV3(weights=\"imagenet\")\n preds = model.predict(x)\n # decode the results into a list of tuples (class, description, probability)\n # (one such list for each sample in the batch)\n print('Predicted:', decode_predictions(preds, top=3)[0])", "def load_data(train_test_ratio = 0.8, class_range = 8, randomised = True):\n\n # Get image filenames, labels, and the number of classification classes\n filenames = glob.glob(\"../img/*.png\")\n if randomised:\n random.shuffle(filenames)\n\n img_labels = []\n for filename in filenames:\n label = int(filename.split(\"-d\",1)[1].split('-',1)[0])\n label = max(0, (label - 1) // (class_range))\n img_labels.append(label)\n\n num_classes = max(img_labels) + 1 # E.g. max label 5 -> 0-5 inclusive\n num_total_samples = len(filenames)\n num_train_samples = int(num_total_samples * train_test_ratio)\n num_test_samples = num_total_samples - num_train_samples\n\n training_images = np.empty(\n (num_train_samples, OUTPUT_RES, OUTPUT_RES, 3), dtype='uint8'\n )\n training_labels = np.asarray(img_labels[:num_train_samples], dtype='uint8')\n\n for i in range(0, num_train_samples):\n training_images[i] = parse_img(filenames[i])\n\n test_images = np.empty(\n (num_test_samples, OUTPUT_RES, OUTPUT_RES, 3), dtype='uint8'\n )\n test_labels = np.asarray(img_labels[num_train_samples:], dtype='uint8')\n\n for i in range(0, num_test_samples):\n test_images[i] = parse_img(filenames[i + num_train_samples])\n\n return ((training_images, training_labels),\n (test_images, test_labels),\n num_classes)", "def _load_images_labels(self):\n path_dataset_file = self.path_model_id.joinpath(f'{self.set_name}_set.csv')\n \n with path_dataset_file.open(mode='r', newline='') as f:\n csv_reader = reader(f, delimiter=',')\n rows = list(csv_reader)\n\n if self.shuffle:\n rng = default_rng(self.seed)\n rng.shuffle(rows)\n \n self.n_examples = len(rows)\n\n ds_files = tf.data.Dataset.from_tensor_slices(\n [path.join(str(self.path_data), f'label_{row[1]}', row[0])\n for row in rows])\n \n ds_images = ds_files.map(self._load_preprocess_image)\n\n class_labels_enc = self.class_le.fit_transform(\n [row[1] for row in rows])\n\n ds_labels = tf.data.Dataset.from_tensor_slices(\n class_labels_enc)\n\n return ds_images, ds_labels", "def _load_images_and_labels(image_dir):\n\n print('Extracting images from: ', image_dir)\n\n image_paths = _load_image_paths(image_dir)\n images = _extract_images(image_paths)\n num_images = len(image_paths)\n labels = np.ones(num_images, dtype=np.int64)\n\n return images, labels", "def read_images(self, img_name, label_name):\n image_string = tf.read_file(img_name)\n image_decoded = tf.image.decode_jpeg(image_string, channels=3)\n label_string = tf.read_file(label_name)\n label_decoded = tf.image.decode_jpeg(label_string, channels=1)\n return image_decoded, label_decoded", "def parse(self, row, training=False):\n record = {}\n for alias, key in self.key_map.items():\n if key not in row:\n continue\n if key == 'image':\n image_raw = row[key]\n pil_img = Image.open(BytesIO(image_raw)).convert('RGB')\n img_tensor = self.transformer(pil_img)\n\n elif key == 'bbox/class':\n obj_cls = row[key]\n elif key == 'bbox/xmin':\n obj_xmin = row[key]\n elif key == 'bbox/ymin':\n obj_ymin = row[key]\n elif key == 'bbox/xmax':\n obj_xmax = row[key]\n elif key == 'bbox/ymax':\n obj_ymax = row[key]\n\n bboxes = []\n labels = []\n\n for i in range(len(obj_cls)):\n label = obj_cls[i]\n bbox = [\n float(obj_xmin[i]),\n float(obj_ymin[i]),\n float(obj_xmax[i]),\n float(obj_ymax[i])\n ]\n\n ignore = False\n if self.min_size:\n w = bbox[2] - bbox[0]\n h = bbox[3] - bbox[1]\n if w < self.min_size or h < self.min_size:\n ignore = True\n\n if not ignore:\n bboxes.append(bbox)\n labels.append(label)\n\n if not bboxes:\n bboxes = np.zeros((0, 4))\n labels = np.zeros((0,))\n else:\n bboxes = np.array(bboxes, ndmin=2)\n labels = np.array(labels).astype(np.int64)\n\n width = pil_img.size[0]\n height = pil_img.size[1]\n\n h_scale = 1.0 * self.img_shape[0] / height\n w_scale = 1.0 * self.img_shape[1] / width\n\n scale_factor = np.array([w_scale, h_scale, w_scale, h_scale], dtype=np.float32)\n\n bboxes = self._resize_bboxes(bboxes, scale_factor)\n\n record['image'] = img_tensor\n record['gt_bboxes'] = torch.from_numpy(bboxes)\n record['gt_labels'] = torch.from_numpy(labels)\n\n return record", "def load_imagenet(directory):\n path_train, path_val = directory + '/ILSVRC2012_img_train', directory + '/ILSVRC2012_img_val'\n train_labels = os.listdir(path_train)\n train_data = []\n for label in train_labels:\n imgs_path = os.path.join(path_train, label)\n imgs = os.listdir(imgs_path)\n for img_name in imgs:\n img_path = os.path.join(imgs_path, img_name)\n img = cv2.imread(img_path)\n b, g, r = cv2.split(img)\n img = cv2.merge([r,g,b]).reshape(-1, 64, 64, 3)\n train_data.append(img)\n train_labels.append(label)\n train_data = np.concatenate(train_data)\n train_labels = np.array(train_labels, dtype='str')\n \n test_labels = os.listdir(path_val)\n test_data = []\n for label in test_labels:\n imgs_path = os.path.join(path_val, label)\n for img_name in imgs:\n img_path = os.path.join(imgs_path, img_name)\n img = cv2.imread(img_path)\n b, g, r = cv2.split(img)\n img = cv2.merge([r,g,b]).reshape(-1, 64, 64, 3)\n test_data.append(img)\n test_labels.append(label)\n test_data = np.concatenate(test_data)\n test_labels = np.array(test_labels, dtype='str')\n \n _, train_labels = np.unique(train_labels, return_inverse=True)\n _, test_labels = np.unique(test_labels, return_inverse=True)\n \n del r, g, b, imgs_path, img_name, img, imgs\n \n return train_data, train_labels, test_data, test_labels", "def load_images(test_data_dir, image_size = (300, 300)):\n # loop over the input images\n images_data = []\n labels = []\n imagePaths = sorted(list(paths.list_images(test_data_dir)))\n for imagePath in imagePaths:\n # load the image, pre-process it, and store it in the data list\n image = cv2.imread(imagePath)\n image = cv2.resize(image, image_size)\n image = img_to_array(image)\n images_data.append(image)\n\n # extract the class label from the image path and update the\n # labels list\n label = imagePath.split(os.path.sep)[-2]\n labels.append(label)\n return images_data, sorted(labels)", "def reformat(x, y, img_size, num_ch, num_class):\n dataset = x.reshape(\n (-1, img_size, img_size, num_ch)).astype(np.float32)\n labels = (np.arange(num_class) == y[:, None]).astype(np.float32)\n return dataset, labels", "def get_training_data(data_dir):\n data = []\n for label in labels:\n path = os.path.join(data_dir, label)\n class_num = labels.index(label)\n img_set = os.listdir(path)\n n = len(img_set)\n for i in range(n):\n try:\n img = img_set[i]\n img_arr = cv2.imread(os.path.join(path, img))\n resized_arr = cv2.resize(img_arr, (img_size, img_size)) # Reshaping images to preferred size\n data.append([resized_arr, class_num])\n if i % 100 == 0:\n print(\"Processing images: {}/{}\".format(i + 1, n))\n except Exception as e:\n print(e)\n return np.array(data)", "def _data_generation(self, batch_data):\n # Initialization\n batch_x = []\n batch_y = defaultdict(list)\n\n for ind, item_data in batch_data.iterrows():\n img_path = os.path.join(self.img_dir, \"images\", \"rgb\", item_data[\"name\"])\n img = cv2.imread(img_path)\n try:\n img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n except Exception as error:\n print(img_path)\n print(error)\n not_valid_mask = self.read_masks_borders(item_data[\"name\"])\n img[not_valid_mask] = 0\n\n # getmasks\n targets = np.zeros((img.shape[0], img.shape[1], len(self.classes)))\n for i, c in enumerate(self.classes):\n mask_path = os.path.join(self.img_dir, \"labels\", c, item_data[\"name\"])\n mask = cv2.imread(\n mask_path.replace(\".jpg\", \".png\"), cv2.IMREAD_GRAYSCALE\n )\n mask[not_valid_mask[:, :, 0]] = 0\n mask = mask > 0\n targets[:, :, i] = mask\n\n res = self.reshape_func(image=img, mask=targets)\n img, targets = res['image'], res['mask']\n if self.do_aug:\n res = self.aug(image=img, mask=targets)\n img, targets = res['image'], res['mask']\n\n for i, c in enumerate(self.classes):\n batch_y[c].append(targets[:, :, i])\n\n batch_x.append(img)\n\n batch_x = np.array(batch_x, np.float32)\n batch_y = {k: np.array(v, np.float32) for k, v in batch_y.items()}\n batch_y = {k: np.expand_dims(v, axis=-1) for k, v in batch_y.items()}\n\n return (\n imagenet_utils.preprocess_input(batch_x, \"channels_last\", mode=\"tf\"),\n batch_y\n )", "def read_data_sets_label(data_dir, label):\n train_data, test_data = read_data_sets(data_dir, one_hot=False)\n train_mask = create_mask(train_data, label)\n test_mask = create_mask(test_data, label)\n return (train_data.images[train_mask], test_data.images[test_mask])", "def load_data_preprocess(self):\n\n print(\"Loading the dataset ...\")\n # load the data\n c_util = CarUtils()\n train_x, train_y, test_x, test_y, classes = c_util.load_data()\n\n # set the image ordering\n K.set_image_dim_ordering(\"th\")\n\n print(\"Pre-processing the dataset ...\")\n # pre-process the data\n train_x = train_x.astype('float32')\n test_x = test_x.astype('float32')\n\n train_x = train_x / 255\n test_x = test_x / 255\n\n print(train_x.shape[0], ' train samples')\n print(test_x.shape[0], ' test samples')\n\n train_y = np_utils.to_categorical(train_y, CarsClassifierModel._nb_classes)\n test_y = np_utils.to_categorical(test_y, CarsClassifierModel._nb_classes)\n\n return train_x, train_y, test_x, test_y", "def read_stanford_labels():\n # First get the hardi data\n fetch_stanford_hardi()\n hard_img, gtab = read_stanford_hardi()\n\n # Fetch and load\n files, folder = fetch_stanford_labels()\n labels_file = pjoin(folder, \"aparc-reduced.nii.gz\")\n labels_img = nib.load(labels_file)\n return hard_img, gtab, labels_img", "def build_dataset(self):\n print(\"reading data of images currently , please wait......\")\n x_train, y_train, _ = get_images(self.train_directory)\n x_test, y_test, _ = get_images(self.test_directory)\n x_train, y_train = image_subset(self.num_classes, x_train, y_train)\n x_test, y_test = image_subset(self.num_classes, x_test, y_test)\n x_train = x_train.astype('float32')\n x_test = x_test.astype('float32')\n self.x_train = x_train / 255\n self.x_test = x_test / 255\n self.y_train = utils.to_categorical(y_train, self.num_classes)\n self.y_test = utils.to_categorical(y_test, self.num_classes)", "def load_tiny_imagenet(directory):\n path_train, path_val, path_test = directory + '/train', directory + '/val', directory + '/test'\n labels = os.listdir(path_train)\n train_data = []\n train_labels = []\n for label in labels:\n imgs_path = os.path.join(path_train, label, 'images')\n imgs = os.listdir(imgs_path)\n for img_name in imgs:\n img_path = os.path.join(imgs_path, img_name)\n img = cv2.imread(img_path)\n b, g, r = cv2.split(img)\n img = cv2.merge([r,g,b]).reshape(-1, 64, 64, 3)\n train_data.append(img)\n train_labels.append(label)\n train_data = np.concatenate(train_data)\n train_labels = np.array(train_labels, dtype='str')\n \n test_data = []\n test_labels = []\n with open(path_val+'/val_annotations.txt', 'r') as f:\n val_annotations = [line.strip().split('\\t') for line in f]\n val_annotations = np.array(val_annotations)\n imgs_path = os.path.join(path_val, 'images')\n imgs = os.listdir(imgs_path)\n for img_name in imgs:\n img_path = os.path.join(imgs_path, img_name)\n img = cv2.imread(img_path)\n b, g, r = cv2.split(img)\n img = cv2.merge([r,g,b]).reshape(-1, 64, 64, 3)\n test_data.append(img)\n label = val_annotations[val_annotations[:, 0] == img_name, 1].astype('U9')\n test_labels.append(label)\n test_data = np.concatenate(test_data)\n test_labels = np.concatenate(test_labels)\n test_labels = np.array(test_labels, dtype='str')\n \n _, train_labels = np.unique(train_labels, return_inverse=True)\n _, test_labels = np.unique(test_labels, return_inverse=True)\n \n del r, g, b, label, labels, imgs_path, img_name, img, imgs, val_annotations\n \n return train_data, train_labels, test_data, test_labels", "def parse_inputs(filename, label_dict):\n training_instances = dict()\n with open(filename, \"r\") as f:\n reader = csv.reader(f)\n for line in islice(reader, 1, None):\n if not line:\n continue # Ignore empty line\n\n img_path = line[0]\n cls_name = line[-1]\n x1, y1, x2, y2 = [float(x) for x in line[1:-1]]\n an_object = [y1, x1, y2, x2, label_dict[cls_name]]\n\n if img_path in training_instances:\n training_instances[img_path].append(an_object)\n else:\n training_instances[img_path] = [an_object]\n inputs = training_instances.keys()\n labels = {k: np.stack(v).flatten() for k, v in training_instances.items()}\n\n return inputs, labels", "def __init__(self, image_root, label_root, img_x, img_y):\n self.images_path = image_root\n self.labels_path = label_root\n self.data_len = 0\n self.images = []\n self.labels = open(self.labels_path, \"r\").readlines()\n self.transform = transforms.Compose([\n transforms.Resize((img_x, img_y)), \n transforms.ToTensor(),\n transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])\n\n for file in self.labels:\n self.data_len += 1\n tem = file.split(\" \")[0]\n temp = tem.split(\"-\")\n self.images.append(self.images_path + temp[0] + '/' + temp[0] + \"-\" + temp[1] + \"/\" + tem + \".png\")", "def prep_data(labels, image_root):\n labels = split_description(labels)\n labels = convert_plastics(labels)\n\n # Encoding shape and color data\n labels['Shape'] = encode_column(labels[['Shape']])\n labels['Color'] = encode_column(labels[['Color']])\n labels['isPlastic'] = encode_column(labels[['isPlastic']])\n labels = add_filenames(labels, image_root)\n labels = labels.dropna().reset_index()\n\n return labels", "def load_next_image(self):\n # Did we finish an epoch?\n if self._cur == len(self.indexlist):\n self._cur = 0\n shuffle(self.indexlist)\n\n # Load an image\n index = self.indexlist[self._cur] # Get the image index\n # Load and prepare ground truth\n multilabel = np.zeros(20).astype(np.int32)\n anns = load_pascal_annotation(index, self.pascal_root)\n for label in anns['gt_classes']:\n # in the multilabel problem we don't care how MANY instances\n # there are of each class. Only if they are present.\n # The \"-1\" is b/c we are not interested in the background\n # class.\n multilabel[label - 1] = 1\n\n self._cur += 1\n return index, multilabel", "def extract_data(train_path, label_path=None, rgb=False):\n\n # Import images as a collection\n X_train = io.ImageCollection(train_path).concatenate()\n \n # Reshape the array in case it is needed\n if rgb:\n # First check if for whatever reason the array is not RGB already\n if not X_train.shape[-1] == 3:\n X_train = X_train[:, :, :, 3]\n else:\n # If not RGB, just reshape to having just one channel, grayscale\n X_train = X_train[:, :, :, np.newaxis]\n \n # Always convert to a valid type and normalize\n X_train = X_train.astype(\"float32\")\n X_train /= 255.0\n\n # Do the same to the segmentation maps, if passed\n if not label_path is None:\n y_train = io.ImageCollection(label_path).concatenate()\n # The segmentation maps should always be shape (:, :, :, 1)\n y_train = y_train[:, :, :, np.newaxis]\n # Convert to a valid type and normalize\n y_train = y_train.astype(\"float32\")\n y_train /= 255.0\n\n return X_train, y_train\n\n return X_train", "def load_imgsLabels(self, image_paths):\n \n# label = image_paths[-1]\n \n images = self.load_images(image_paths)\n \n images = self.resize_images(images)\n \n images_list = self.greyscale_images(images)\n\n return images_list", "def explore_data():\n labels = [\"vehicles\", \"non-vehicles\"]\n labelmap = {0: \"vehicles\", 1: \"non-vehicles\"}\n vehicles_glob = os.path.join(data_dir, \"vehicles\", \"**\", \"*.png\")\n nonvehicles_glob = os.path.join(data_dir, \"non-vehicles\", \"**\", \"*.png\")\n class_fnames = [\n glob.glob(vehicles_glob, recursive = True),\n glob.glob(nonvehicles_glob, recursive = True)]\n n_samples = [len(fnames) for fnames in class_fnames]\n shapes = []\n samples = []\n print(table_format([\"label\", \"size\", \"shape\"], header = True))\n for label, fnames in enumerate(class_fnames):\n indices = np.random.choice(len(fnames), 4*10, replace = False)\n for i in indices:\n fname = fnames[i]\n img = cv2.imread(fname)\n samples.append(img)\n shape = img.shape\n shapes.append(shape)\n print(table_format([labels[label], n_samples[label], shapes[label]]))\n\n samples = np.stack(samples)\n samples = tile(samples, 2*4, 10)\n cv2.imwrite(os.path.join(out_dir, \"datasamples.png\"), samples)\n\n return class_fnames, labelmap", "def load_data(data_file):\n data = pickle.load(open(data_file, \"rb\"))\n images = data[\"images\"]\n labels = data[\"labels\"]\n\n return images, labels", "def load_dataset():\n # Get the start time\n start_time = time.time()\n\n # Load dataset YAML file\n # This contains all of our image labels, as well as locations of the images themself\n print(\"Reading dataset/dataset.yaml... \", end=\"\")\n with open(\"dataset/dataset.yaml\", \"r\") as file:\n dataset = yaml.safe_load(file)\n\n # Get paths, labels\n paths = []\n labels = []\n for sample in dataset:\n # Assign a \"1\" label if we're looking at the ground\n # 0 for everything else: trees, buildings, cars, etc\n label_semantic = max(sample[\"labels\"].keys(), key=sample[\"labels\"].get)\n if max(sample[\"labels\"].values()) < 0.80:\n # Samples that are not obviously in any one category: unsafe\n label=0\n elif label_semantic == \"GROUND\":\n # Safe if >80% ground\n label = 1\n else:\n # Unsafe otherwise, this is usually water\n label = 0\n\n paths.append(sample[\"path\"])\n labels.append(label)\n print(\"done!\", flush=True)\n\n print(\"Loading images\", end=\"\")\n # Get images\n images = np.zeros((len(paths), 128, 128, 3), dtype=np.float32)\n progress = 0.0\n for i, path in enumerate(paths):\n images[i] = np.array(PIL.Image.open(path).resize((128, 128))) / 255.0\n if i / len(paths) > progress:\n progress += 1.0 / 20.0\n print(\".\", end=\"\", flush=True)\n print(\" done!\")\n labels = np.array(labels, dtype=np.int)\n\n # Return\n print(f\"Loaded {len(images)} images in {time.time() - start_time} seconds!\")\n return images, labels", "def dataset_parser(self, value):\n keys_to_features = {\n 'image/encoded':\n tf.io.FixedLenFeature((), tf.string, ''),\n 'image/format':\n tf.io.FixedLenFeature((), tf.string, 'jpeg'),\n 'image/class/label':\n tf.io.FixedLenFeature([], tf.int64, -1),\n 'image/class/text':\n tf.io.FixedLenFeature([], tf.string, ''),\n 'image/object/bbox/xmin':\n tf.io.VarLenFeature(dtype=tf.float32),\n 'image/object/bbox/ymin':\n tf.io.VarLenFeature(dtype=tf.float32),\n 'image/object/bbox/xmax':\n tf.io.VarLenFeature(dtype=tf.float32),\n 'image/object/bbox/ymax':\n tf.io.VarLenFeature(dtype=tf.float32),\n 'image/object/class/label':\n tf.io.VarLenFeature(dtype=tf.int64),\n }\n\n parsed = tf.io.parse_single_example(value, keys_to_features)\n image_bytes = tf.reshape(parsed['image/encoded'], shape=[])\n\n tensors_dict = preprocess_image(\n image_bytes=image_bytes,\n is_training=self.is_training,\n augmentation=self.augmentation,\n use_bfloat16=self.use_bfloat16,\n saturate_uint8=self.saturate_uint8,\n scale_and_center=self.scale_and_center,\n use_default_augment=self.use_default_augment)\n\n # Subtract one so that labels are in [0, 1000).\n label = tf.cast(tf.reshape(parsed['image/class/label'], shape=()) - 1,\n dtype=tf.int32)\n tensors_dict['label'] = label\n\n return tensors_dict", "def load_data():\n # Dictionary mapping image names to labels\n image_name_to_label = dict()\n\n # Store labels associated with image names\n notifier.send(\" Reading metadata...\")\n with open(\"data/metadata.csv\") as file: # Original dataset\n # Use images for normal, virus (unknown type), COVID-19, SARS\n metadata_contents = csv.DictReader(file)\n for row in metadata_contents:\n if row[\"Label\"].lower() == \"normal\":\n label = 2\n elif row[\"Label_2_Virus_category\"].lower() == \"covid-19\":\n label = 0\n elif row[\"Label_1_Virus_category\"].lower() == \"virus\":\n label = 1\n else:\n continue\n image_name_to_label[row[\"X_ray_image_name\"]] = label\n with open(\"data/metadata2.csv\") as file: # GitHub dataset\n # Use COVID-19, SARS\n metadata_contents = csv.DictReader(file)\n for row in metadata_contents:\n if row[\"filename\"] in image_name_to_label: # Image already added\n continue\n if \"covid-19\" in row[\"finding\"].lower():\n label = 0\n elif row[\"finding\"].lower() == \"sars\":\n label = 1\n else:\n continue\n image_name_to_label[row[\"filename\"]] = label\n with open(\"data/metadata_COVID-19.csv\") as file: # Additional COVID-19 images\n metadata_contents = csv.DictReader(file)\n for row in metadata_contents:\n name = \"COVID-19/\" + row[\"FILE NAME\"] + \".\" + row[\"FORMAT\"]\n image_name_to_label[name.lower().replace(\" \", \"\")] = 0\n with open(\"data/metadata_ViralPneumonia.csv\") as file: # Additional virus images\n metadata_contents = csv.DictReader(file)\n for row in metadata_contents:\n name = \"ViralPneumonia/\" + row[\"FILE NAME\"].replace(\"-\", \"(\") + \").\" + row[\"FORMAT\"]\n image_name_to_label[name.lower().replace(\" \", \"\")] = 1\n with open(\"data/metadata_Normal.csv\") as file: # Additional normal images\n metadata_contents = csv.DictReader(file)\n for row in metadata_contents:\n name = \"Normal/\" + row[\"FILE NAME\"].replace(\"-\", \"(\") + \").\" + row[\"FORMAT\"]\n image_name_to_label[name.lower().replace(\" \", \"\")] = 2\n\n notifier.send(\" Loading images...\")\n images, labels = load_images(image_name_to_label)\n\n notifier.send(\" Splitting data...\")\n return split_data(images, labels)", "def prep_data(self):\n\n self.fit_tokenizer(texts=self.texts)\n sequences = self.get_sequences(self.texts)\n self.text_data = pad_sequences(sequences, maxlen=self.MAX_SEQUENCE_LENGTH)\n\n self.labels = to_categorical(np.asarray(self.labels))\n print('Shape of data tensor:', self.text_data.shape)\n print('Shape of label tensor:', self.labels.shape)\n\n # split the data into a training set and a validation set\n indices = np.arange(self.text_data.shape[0])\n np.random.shuffle(indices)\n self.text_data = self.text_data[indices]\n self.labels = self.labels[indices]\n nb_validation_samples = int(self.VALIDATION_SPLIT * self.text_data.shape[0])\n\n x_train = self.text_data[:-nb_validation_samples]\n y_train = self.labels[:-nb_validation_samples]\n x_val = self.text_data[-nb_validation_samples:]\n y_val = self.labels[-nb_validation_samples:]\n\n return x_train,y_train, x_val, y_val", "def split_data(images, labels):\n images, labels = shuffle_data_pair(images, labels)\n\n num_covid_points = sum(map(lambda label: label == 0, labels))\n\n # Calculate split\n num_test = int(num_covid_points * 0.1)\n num_covid_train = num_covid_points - num_test * 2\n num_other_train = int(num_covid_train * 1.1)\n\n # (train, validate, test) points added\n num_points_added = [\n [0, 0, 0], # COVID-19\n [0, 0, 0], # Viral pneumonia\n [0, 0, 0] # Normal\n ]\n\n # Datasets\n images_train = []\n labels_train = []\n images_validate = []\n labels_validate = []\n images_test = []\n labels_test = []\n\n # Add images and labels to datasets\n notifier.send(\" Adding images and labels to dataset...\")\n for i, label in enumerate(labels):\n print(f\" Point: {i} / {len(labels)}\")\n completed_labels = [False, False, False] # Enough of label added\n if all(completed_labels):\n break\n for j in range(3): # 0: COVID-19, 1: Viral pneumonia, 2: Normal\n if completed_labels[j]:\n continue\n if label == j:\n # Add training data\n can_add_training = False\n if j == 0: # COVID-19\n if num_points_added[j][0] < num_covid_train:\n can_add_training = True\n num_points_added[j][0] += 1\n elif num_points_added[j][0] < num_other_train: # Not COVID-19\n can_add_training = True\n num_points_added[j][0] += 1\n if can_add_training:\n images_train.append(images[i])\n labels_train.append(labels[i])\n break\n\n # Add validation data\n if num_points_added[j][1] < num_test:\n num_points_added[j][1] += 1\n images_validate.append(images[i])\n labels_validate.append(labels[i])\n break\n\n # Add testing data\n if num_points_added[j][2] < num_test:\n num_points_added[j][2] += 1\n images_test.append(images[i])\n labels_test.append(labels[i])\n break\n\n # Point couldn't be added anywhere: label is complete\n completed_labels[j] = True\n break\n\n # Shuffle all data\n notifier.send(\" Shuffling data...\")\n images_train, labels_train = shuffle_data_pair(\n images_train, labels_train\n )\n images_validate, labels_validate = shuffle_data_pair(\n images_validate, labels_validate\n )\n images_test, labels_test = shuffle_data_pair(\n images_test, labels_test\n )\n\n if PLOT_LABELS:\n # Plot data frequencies\n plt.hist(labels, bins=3)\n plt.title(\"Labels\")\n\n plt.hist(labels_train, bins=3)\n plt.title(\"Train Labels\")\n\n plt.hist(labels_validate, bins=3)\n plt.title(\"Validate Labels\")\n\n plt.hist(labels_test, bins=3)\n plt.title(\"Test Labels\")\n\n plt.show()\n\n # Make labels categorical\n notifier.send(\" Making labels categorical: train...\")\n labels_train = tf.keras.utils.to_categorical(labels_train)\n notifier.send(\" Making labels categorical: validate...\")\n labels_validate = tf.keras.utils.to_categorical(labels_validate)\n notifier.send(\" Making labels categorical: test...\")\n labels_test = tf.keras.utils.to_categorical(labels_test)\n\n notifier.send(\" Converting data to NumPy arrays...\")\n return \\\n np.array(images_train), np.array(images_validate), np.array(images_test), \\\n np.array(labels_train), np.array(labels_validate), np.array(labels_test)", "def read_files_and_visualize(data):\n\n image = cv2.imread(data[0])\n label = cv2.imread(data[1], 0)\n name = data[1].split('/')[-1].split('.')[0]\n obj_label = None\n\n if generator_options.save_label_preview:\n obj_label = []\n if os.path.isfile(data[2]):\n with open(data[2], 'r') as f:\n obj = csv.reader(f, delimiter=',')\n for row in obj:\n row = [int(r.split('.')[0]) if index != 0 else r\n for index, r in enumerate(row)]\n obj_label.append(row)\n\n else:\n label_vals = np.unique(label)\n for val in label_vals:\n obj_label.append([_LABEL_DEF_FULL[val], 0, 0, 0, 0])\n\n save_visuals(image, label, obj_label, name)", "def format_dataset(dataset_path, image_path_prefix):\n\n image_paths = load_image_paths(dataset_path, image_path_prefix)\n image_sizes = load_image_sizes(dataset_path)\n image_bboxes = load_bounding_box_annotations(dataset_path)\n image_parts = load_part_annotations(dataset_path)\n image_labels, new_label_to_original_label_map = format_labels(load_image_labels(dataset_path))\n class_names = load_class_names(dataset_path)\n train_images, test_images = load_train_test_split(dataset_path)\n\n train_data = []\n test_data = []\n\n for image_ids, data_store in [(train_images, train_data), (test_images, test_data)]:\n for image_id in image_ids:\n\n width, height = image_sizes[image_id]\n width = float(width)\n height = float(height)\n\n x, y, w, h = image_bboxes[image_id]\n x1 = max(x / width, 0.)\n x2 = min((x + w) / width, 1.)\n y1 = max(y / height, 0.)\n y2 = min((y + h) / height, 1.)\n\n parts_x = []\n parts_y = []\n parts_v = []\n parts = image_parts[image_id]\n for part_index in range(0, len(parts), 3):\n parts_x.append(max(parts[part_index] / width, 0.))\n parts_y.append(max(parts[part_index + 1] / height, 0.))\n parts_v.append(int(parts[part_index + 2]))\n\n data_store.append({\n \"filename\": image_paths[image_id],\n \"id\": image_id,\n \"class\": {\n \"label\": image_labels[image_id],\n \"text\": class_names[new_label_to_original_label_map[image_labels[image_id]]]\n },\n \"object\": {\n \"count\": 1,\n \"bbox\": {\n \"xmin\": [x1],\n \"xmax\": [x2],\n \"ymin\": [y1],\n \"ymax\": [y2],\n \"label\": [image_labels[image_id]],\n \"text\": [class_names[new_label_to_original_label_map[image_labels[image_id]]]]\n },\n \"parts\": {\n \"x\": parts_x,\n \"y\": parts_y,\n \"v\": parts_v\n },\n \"id\": [image_id],\n \"area\": [w * h]\n }\n })\n\n return train_data, test_data", "def prepare_train_img(self, index):\n img_ann_info = self.data_infos[index]\n img_info = {\n 'filename': img_ann_info['file_name'],\n 'height': img_ann_info['height'],\n 'width': img_ann_info['width']\n }\n ann_info = self._parse_anno_info(img_ann_info['annotations'])\n results = dict(img_info=img_info, ann_info=ann_info)\n results['bbox_fields'] = []\n results['mask_fields'] = []\n results['seg_fields'] = []\n self.pre_pipeline(results)\n\n return self.pipeline(results)", "def get_raw_data():\n\twith open('train_label.pkl', 'rb') as f:\n\t\ttrain_label = pickle.load(f)\n\n\twith open('train_image.pkl', 'rb') as f:\n\t\ttrain_data = pickle.load(f)\n\n\tprint(np.unique(np.asarray(train_label)))\n\n\treturn (train_label, np.asarray(train_data))", "def DataLoader(data_place):\n # Nd = []\n # Np = []\n # Nz = []\n # channel_num = []\n # images = []\n # id_labels = []\n # pose_labels = []\n\n # mycase\n # Nz = 50\n # channel_num = 3\n # images = np.load('{}/images.npy'.format(data_place))\n # id_labels = np.load('{}/ids.npy'.format(data_place))\n # pose_labels = np.load('{}/yaws.npy'.format(data_place))\n #\n # Np = int(pose_labels.max() + 1)\n # Nd = int(id_labels.max() + 1)\n #\n # return [images, id_labels, pose_labels, Nd, Np, Nz, channel_num]\n\n # mycase MultiPIE\n Nz = 50\n channel_num = 3\n image_attributes_df = pd.read_csv(data_place)\n\n Nd = int(np.max(image_attributes_df['Id'])+1)\n Np = int(np.max(image_attributes_df['pose'])+1)\n Ni = int(np.max(image_attributes_df['illum'])+1)\n\n return [image_attributes_df, Nd, Np, Ni, Nz, channel_num]", "def label_training_data(input_path, output_path):\r\n import shutil\r\n image_files = [file for file in os.listdir(path=input_path) if '.JPG' in file or '.jpeg' in file]\r\n \r\n for file in image_files:\r\n file_input_path = os.path.join(input_path,file)\r\n \r\n img = cv2.imread(file_input_path)\r\n \r\n file_output_path = os.path.join(output_path, classify_face(img))\r\n \r\n try:\r\n os.makedirs(file_output_path)\r\n except FileExistsError:\r\n # directory already exists\r\n pass\r\n shutil.move(file_input_path, file_output_path)", "def load_png_data():\n m=1 #训练文件个数\n n=1 #测试文件个数\n train_set_x=[]#训练数据集\n train_set_y=[]#训练标签集\n\n test_set_x=[]#测试数据集\n test_set_y=[]#测试标签集\n\n train_data={}\n\n train_path=r\".\\dataset\\train_label\\\\\"\n dirs=os.listdir(train_path)\n\n for file in dirs:\n srcImg=cv2.imread(train_path+file)\n #将label数据集保存为numpy格式并保存\n npImg=np.array(srcImg)\n np.save(train_path+str(m)+'.npy',npImg)\n train_set_x.append(npImg)\n\n\n NoiseImg = GaussianNoise(srcImg, 25, 4, 0.8)\n npNoiseImg = np.array(NoiseImg)\n cv2.imwrite(r\".\\dataset\\trainset\\\\\"+str(m)+'.png', NoiseImg, [int(cv2.IMWRITE_PNG_STRATEGY_DEFAULT)])\n np.save(r\".\\dataset\\trainset\\\\\" + str(m) + '.npy', npNoiseImg)\n train_set_y.append(npNoiseImg)\n m=m+1\n train_data['train_set_x']=train_set_x\n train_data['train_set_y']=train_set_y\n\n test_path = r\".\\dataset\\test_label\\\\\"\n dirs_test = os.listdir(test_path)\n for file in dirs_test:\n srcImg=cv2.imread(test_path+file)\n #将label数据集保存为numpy格式并保存\n npImg=np.array(srcImg)\n np.save(test_path+str(n)+'.npy',npImg)\n test_set_x.append(npImg)\n\n\n NoiseImg = GaussianNoise(srcImg, 25, 4, 0.8)\n npNoiseImg = np.array(NoiseImg)\n cv2.imwrite(r\".\\dataset\\testset\\\\\"+str(n)+'.png', NoiseImg, [int(cv2.IMWRITE_PNG_STRATEGY_DEFAULT)])\n np.save(r\".\\dataset\\testset\\\\\" + str(n) + '.npy', npNoiseImg)\n test_set_y.append(npNoiseImg)\n n=n+1\n train_data['test_set_x']=test_set_x\n train_data['test_set_y']=test_set_y\n\n np.savez(r\"E:\\DeepLearning\\CNNDenoiser\\dataset\\train_data.npz\",**train_data)", "def _process_dataset(all_train_img, all_train_label, all_test_img, all_test_label):\n # Read all training and test images and set the correct path\n train_files = tf.io.gfile.listdir(all_train_img)\n test_files = tf.io.gfile.listdir(all_test_img)\n all_train_class_path = [os.path.join(all_train_img, f) for f in train_files]\n all_test_img_path = [os.path.join(all_test_img, f) for f in test_files]\n # Since Labels start at 1, substract -1 for correct indices with starting '0'\n label_np_test = read_labels_txt(all_test_label) - 1\n synsets_np_train = read_labels_mat(all_train_label)\n\n all_train_img_path = []\n label_np_train = []\n for folder in all_train_class_path:\n img_class_files = tf.io.gfile.listdir(folder)\n synset = os.path.basename(os.path.normpath(folder))\n label_train = synsets_np_train.index(synset)\n for f in img_class_files:\n all_train_img_path.append(os.path.join(folder, f))\n label_np_train.append(label_train)\n\n # Create the Datasets for training and test images with corresponding labels\n path_ds_train = tf.data.Dataset.from_tensor_slices((all_train_img_path, label_np_train))\n img_label_ds_train = path_ds_train.map(_process_image)\n path_ds_test = tf.data.Dataset.from_tensor_slices((all_test_img_path, label_np_test))\n img_label_ds_test = path_ds_test.map(_process_image)\n\n print(img_label_ds_train)\n print(img_label_ds_test)\n\n # Check an example image if necessary\n # example, = img_label_ds_test.take(1)\n for i in range(5):\n example, = img_label_ds_train.take(1)\n image, label = example[0], example[1]\n plt.figure(i)\n if image.shape[2] == 1:\n plt.imshow(tf.squeeze(image), cmap='gray')\n else:\n plt.imshow(image/255)\n print(\"Label: {}\".format(label.numpy()))\n plt.show()\n\n return img_label_ds_train, img_label_ds_test", "def reformat(x, y):\r\n # img_size, num_ch, num_class = int(np.sqrt(x.shape[1])), 1, len(np.unique(np.argmax(y, 1)))\r\n img_size, num_ch, num_class = 14, 1, 16\r\n dataset = x.reshape((-1, img_size, img_size, num_ch)).astype(np.float32)\r\n labels = (np.arange(num_class) == y[:, None]).astype(np.float32) # =[1 2 3 ... 10]??\r\n return dataset, labels", "def parse_train(self, proto, height, width):\n _, sequence_parsed = tf.io.parse_single_sequence_example(\n proto,\n context_features=self._context_features,\n sequence_features=self._sequence_features)\n\n # Deserialize images to float32 tensors.\n images = tf.map_fn(\n _deserialize_png, sequence_parsed['images'], dtype=tf.float32)\n\n # Resize images.\n if height is not None and width is not None:\n images = smurf_utils.resize(images, height, width, is_flow=False)\n\n return {'images': images}", "def import_minimias_dataset(data_dir: str, label_encoder) -> (np.ndarray, np.ndarray):\n # Initialise variables.\n images = list()\n labels = list()\n\n if not config.is_roi:\n # Loop over the image paths and update the data and labels lists with the pre-processed images & labels.\n print(\"Loading whole images\")\n for image_path in list(paths.list_images(data_dir)):\n images.append(preprocess_image(image_path))\n labels.append(image_path.split(os.path.sep)[-2]) # Extract label from path.\n else:\n # Use the CSV file to get the images and their labels, and crop the images around the specified ROI.\n print(\"Loading cropped ROI images\")\n images, labels = crop_roi_image(data_dir)\n\n # Convert the data and labels lists to NumPy arrays.\n images = np.array(images, dtype=\"float32\") # Convert images to a batch.\n labels = np.array(labels)\n\n # Encode labels.\n labels = encode_labels(labels, label_encoder)\n\n return images, labels", "def preprocessing(fish_class):\n\n\t# encode fish_class into integer\n\tfish_label = fish_labels[fish_class]\n\t\n\t# return a list of image directories for each image\n\timg_handles = glob.glob(data_dir + fish_class + '/' + '*.jpg')\n\t\n\t# build an empty list to store each img as np.ndarray\n\timgs = []\n\t\n\t# build an empty list to store the encoded label for each image\n\tlabels = []\n\t\n\t# iterate through all images in the fish_class folder\n\tfor img_handle in img_handles:\n\t\n\t\t# read img as np.ndarray\n\t\timg = cv2.imread(img_handle)\n\t\t\n\t\t# resize it \n\t\tcv2.resize( img, (new_img_width, new_img_height)\n\t\timg = cv2.resize(img, new_img_size, interpolation=cv2.INTER_CUBIC)\n\t\tstore the img in format of np.ndarray into the imgs \n\t\timgs.append(img)\n\t\t\n\t\t# store a label in labels\n\t\tlabels.append(fish_label)\n\t\n\treturn imgs, labels\n\n# time the preprocessing\nt0 = time.time()\n\n# build an empty list to store preprocessed data\npreprocessed_data = []\n\n# build an empty list to store labels\nencoded_labels = []\n\nfor num, fish in enumerate(classes):\n\tprint num\n\tprint 'Preprocessing imgs of fish %s' %(fish)\n\tprint '----------------------------------------------------------------------'\n\tpreprocessed_imgs, labels = preprocessing(fish)\n\tpreprocessed_data.append(preprocessed_imgs)\n\tencoded_labels.append(labels)", "def prepare_data(self, *args, **kwargs):\n # get paths to train and test splits\n _split_paths = [os.path.join(self.path_to_data, split)\n for split in os.listdir(self.path_to_data)]\n\n # for each split [train, test]\n for _path in _split_paths:\n _img_classes = os.listdir(_path) # get subfolders representing each class\n self.splits[os.path.basename(_path)] = []\n\n # get the images in pairs with its corresponding class\n for _class in _img_classes:\n _data = self.get_img_text_pair(os.path.join(_path, _class))\n\n if os.path.basename(_path) == 'train':\n self.weights[self.encode_label(_class)] = len(_data)\n self.splits[os.path.basename(_path)].extend(_data)", "def InitDataset(self):\n train_txt = 'ImageSets/Main/train.txt'\n val_txt = 'ImageSets/Main/val.txt'\n annotations = \"Annotations\"\n jpegimages = \"JPEGImages\"\n images_path = train_txt if (self.is_train) else val_txt \n images_path = readTxt(os.path.join(self.path, images_path))\n images_path.pop(-1)\n # rawdata format: [path_2_image, path_2_xml]\n rawData = list()\n for each in images_path:\n xml = os.path.join(self.path, annotations, each + '.xml')\n jpeg = os.path.join(self.path, jpegimages, each + '.jpg')\n rawData.append([jpeg, xml])\n return rawData", "def load_data_pkl(self):\n pkl_name = '{}/data/mini-imagenet-cache-{}.pkl'.format(self.root_dir, self.split)\n print('Loading pkl dataset: {} '.format(pkl_name))\n\n try:\n with open(pkl_name, \"rb\") as f:\n data = pkl.load(f, encoding='bytes')\n image_data = data[b'image_data']\n class_dict = data[b'class_dict']\n except:\n with open(pkl_name, \"rb\") as f:\n data = pkl.load(f)\n image_data = data['image_data']\n class_dict = data['class_dict']\n\n print(data.keys(), image_data.shape, class_dict.keys())\n data_classes = sorted(class_dict.keys()) # sorted to keep the order\n\n n_classes = len(data_classes)\n print('n_classes:{}, n_label:{}, n_unlabel:{}'.format(n_classes,self.n_label,self.n_unlabel))\n dataset_l = np.zeros([n_classes, self.n_label, self.im_height, self.im_width, self.channels], dtype=np.float32)\n if self.n_unlabel>0:\n dataset_u = np.zeros([n_classes, self.n_unlabel, self.im_height, self.im_width, self.channels], dtype=np.float32)\n else:\n dataset_u = []\n\n for i, cls in enumerate(data_classes):\n idxs = class_dict[cls] \n np.random.RandomState(self.seed).shuffle(idxs) # fix the seed to keep label,unlabel fixed\n dataset_l[i] = image_data[idxs[0:self.n_label]]\n if self.n_unlabel>0:\n dataset_u[i] = image_data[idxs[self.n_label:]]\n print('labeled data:', np.shape(dataset_l))\n print('unlabeled data:', np.shape(dataset_u))\n \n self.dataset_l = dataset_l\n self.dataset_u = dataset_u\n self.n_classes = n_classes\n\n del image_data", "def process_images(image, label):\n # Normalize images to have a mean of 0 and standard deviation of 1\n # per_image_standardization is preferred, which normalize the entire image to mean zero and std 1.\n # It also make learning fast.\n image = tf.image.per_image_standardization(image)\n # Resize images from 32x32 to 277x277\n image = tf.image.resize(image, (227,227))\n return image, label", "def mapping_image_to_label (self, labels_df, polygons, fpath_tiff): \n \n unread_tiff = rasterio.open(fpath_tiff)\n\n #Projecting the coordinates to that CRS \n proj = Proj(init='epsg:32618')\n data = []\n labels = []\n failed = []\n \n src = rasterio.open(fpath_tiff, 'r')\n outfolder = '/train/batch'\n \n print (\"Hold on tight! Mapping each image to its respective label...\")\n \n \n for num, row in labels_df.iterrows():\n try:\n \n \n roof_material_num = 0\n polygon0 = polygons [num]\n polygon0['coordinates'] = self.transforming_coordinates(polygon0['coordinates'], proj)\n masked_image, out_transform = rasterio.mask.mask(src,[polygon0], filled = True, crop=True, nodata = 0)\n img_image = reshape_as_image (masked_image)\n \n #Defining the name of the image file as \"buildingID+roofMaterial+png\" and its path \n img_path = os.path.join (outfolder, str (row['id'])+'-'+ str (row['roof_material'])+'.png')\n \n #swapping the color channels from RGB2BGR\n img_image = cv2.cvtColor (img_image, cv2.COLOR_RGB2BGR) #img_image is a numpy array\n \n #resizing the image dimensions to 128x128 to match ImageNet dimensions\n img_image = cv2.resize(img_image, (128, 128))\n \n #writing the image in the file\n #cv2.imwrite (img_path, img_image)\n # update the data and labels lists, respectively\n data.append(img_image) #data is a list\n labels.append(row['roof_material'])\n \n except Exception as e:\n print (e)\n failed.append (num)\n \n \n #print number of images we failed to crop and write \n print (\"Bad News First: Failed to write\", len(failed), \"Images.\")\n print (\"Good News: Successfully mapped\", len (data), \"Images.\")\n data = np.array(data)\n labels = np.array(labels)\n #batch = data.sample(frac=0.5, replace=False, random_state=1)\n #print(\"Size and shape of validY: {}\\n\".format(batch.shape))\n return data, labels", "def get_train_data(trainlist):\n if not os.path.exists(trainlist):\n raise ValueError('Train data is not exist.')\n\n images = []\n labels = []\n count = 0\n lines = open(trainlist, 'r')\n lines = list(lines)\n for line in lines:\n image_file, label = line.strip('\\n').split('::')\n count += 1\n if count % 100 == 0:\n print('Load {} images.'.format(count))\n image = cv2.imread(image_file)\n image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\n images.append(image)\n labels.append(label)\n images = np.array(images)\n labels = np.array(labels)\n return images, labels", "def prep_for_train(sample_df):\n # Save the labels\n y = sample_df['label'].to_numpy()\n\n # Path to the data\n DATADIR = Path('data/') / 'real_vs_fake' / 'real-vs-fake'\n\n # Load the sample images\n n = sample_df.shape[0]\n X = np.empty(shape=(n, 256, 256, 3))\n\n # Load in the images to be trained on\n for img_idx, img_path in enumerate(sample_df['path']):\n img = plt.imread(DATADIR / img_path)\n X[img_idx, :, :, :] = img / 255.0\n\n return X, y[np.newaxis].reshape(-1, 1)", "def preprocess_image(image, label, is_training):\n if is_training:\n # Randomly scale the image and label.\n image, label = preprocessing.random_rescale_image_and_label(\n image, label, _MIN_SCALE, _MAX_SCALE)\n\n # Randomly crop or pad a [_HEIGHT, _WIDTH] section of the image and label.\n image, label = preprocessing.random_crop_or_pad_image_and_label(\n image, label, _HEIGHT, _WIDTH, _IGNORE_LABEL)\n\n # Randomly flip the image and label horizontally.\n image, label = preprocessing.random_flip_left_right_image_and_label(\n image, label)\n\n image.set_shape([_HEIGHT, _WIDTH, 3])\n label.set_shape([_HEIGHT, _WIDTH, 1])\n print(\"seg11111111111\",image,label)\n image = preprocessing.mean_image_subtraction(image)\n\n return image, label", "def readDataFromFile():\n image_size = 28 # each image is 28x28\n\n num_images = 60000 # there are 60k images\n with gzip.open(r'train-images-idx3-ubyte.gz', 'r') as f: # 60k train & valid\n f.read(16) # reading by 16-byte double\n buffer_Train_Images = f.read(image_size * image_size * num_images)\n f.close()\n data_Train_Images = np.frombuffer(buffer_Train_Images, dtype=np.uint8).astype(\n np.int32) # translating into 0 to 255\n data_Train_Images = data_Train_Images.reshape(num_images,\n image_size * image_size) # Data = 60k x 28 x 28 with 1 value in it\n\n with gzip.open('train-labels-idx1-ubyte.gz', 'r') as f: # 60k train & valid - labels\n f.read(8) # reading by 16-byte double\n buffer_Train_Labels = f.read(num_images)\n data_Train_Labels = np.frombuffer(buffer_Train_Labels, dtype=np.uint8).astype(\n np.int32) # translating into 0 to 255\n\n num_images = 10000 # there are 10k images\n with gzip.open('t10k-images-idx3-ubyte.gz', 'r') as f: # 10k tests\n f.read(16) # reading by 16-byte double\n buffer_Test_Image = f.read(image_size * image_size * num_images)\n data_Test_Image = np.frombuffer(buffer_Test_Image, dtype=np.uint8).astype(\n np.uint8) # translating into 0 to 255\n data_Test_Image = data_Test_Image.reshape(num_images, image_size * image_size) # Data = 60k x 28 x 28 with\n\n with gzip.open('t10k-labels-idx1-ubyte.gz', 'r') as f: # 10k tests - lbles\n f.read(8) # reading by 16-byte double\n buffer_Test_Label = f.read(num_images)\n data_Test_Labels = np.frombuffer(buffer_Test_Label, dtype=np.uint8).astype(\n np.int32) # translating into 0 to 255\n\n return data_Train_Images, data_Train_Labels, data_Test_Image, data_Test_Labels", "def load_data(fname):\n pathname = \"data/\" + fname\n data = pickle.load(open(pathname, 'rb'), encoding='latin1')\n images = np.array([img[:-1] for img in data])\n ys = [int(img[-1]) for img in data]\n length = len(ys)\n labels = np.zeros((length, 10))\n\n for i in range(length):\n labels[i, ys[i]] = 1\n\n return images, labels", "def _preprocess(self, features, labels):\n with tf.variable_scope('preprocess'):\n with tf.variable_scope('image'):\n features['image_orig'] = features['image']\n image = tf.image.convert_image_dtype(features['image_orig'],\n dtype=tf.float32)\n if self.mode == ModeKeys.TRAIN:\n images = tf.unstack(image)\n images = [augment_image(img) for img in images]\n image = tf.stack(images)\n image = tf.subtract(image, 0.5)\n image = tf.multiply(image, 2.0)\n features['image'] = image\n\n if labels is None:\n return features, None\n\n with tf.variable_scope('label'):\n # TODO(Shancheng): use start token and end token rather constant 0\n # labels for decoder input\n labels['label_input'] = tf.concat([labels['label'][:, -1:],\n labels['label'][:, 0:-1]], axis=1)\n # from text length to training label length\n labels['length'] = tf.reshape(labels['length'], [-1])\n labels['length'] = labels['length'] + 1\n\n return features, labels", "def format_for_network(image: np.ndarray, label: np.uint8) -> tuple:\n height, width = image.shape\n input_data = np.empty(width * height, dtype=np.float32)\n\n i = 0\n for x in range(width):\n for y in range(height):\n input_data[i] = image[x, y] / 255.0\n i += 1\n\n output_data = np.zeros(10, dtype=np.float32)\n output_data[label] = 1.0\n\n return input_data, output_data", "def post(self):\n result = {'status': 'error'}\n\n args = input_parser.parse_args()\n input_data = args['image'].read()\n image = self.model_wrapper._read_image(input_data)\n preds = self.model_wrapper._predict(image)\n\n # Modify this code if the schema is changed\n label_preds = [{'label_id': p[0], 'label': p[1], 'probability': p[2]} for p in [x for x in preds]]\n result['predictions'] = label_preds\n result['status'] = 'ok'\n\n return result", "def load_dataset(path_test, width, height):\n tot_images = 0\n for label in listdir(path_test):\n label_full = join(path_test, label)\n for img_name in listdir(label_full):\n tot_images += 1\n\n # allocate the memory\n # THE DTYPE is float, should be the right one\n all_images = np.zeros((tot_images, width, height, 3))\n\n true_labels = []\n num_images = 0\n for label in listdir(path_test):\n label_full = join(path_test, label)\n for img_name in listdir(label_full):\n # for img_name in listdir(label_full)[:10]:\n img_name_full = join(label_full, img_name)\n print(f\"Opening {img_name_full} {width}\")\n\n image = cv2.imread(img_name_full)\n\n image = cv2.resize(image, (width, height))\n\n # scale the pixel values to [0, 1]\n image = image.astype(\"float\") / 255.0\n\n all_images[num_images, :, :, :] = image\n\n num_images += 1\n true_labels.append(label)\n\n print(f\"All_images.shape {all_images.shape}\")\n\n # cv2.imshow('Resized all_images[0]', all_images[0])\n # cv2.waitKey(0)\n\n return all_images, true_labels", "def prepare_data(src, dst):\n\n data_prefix = 'miniCelebA_'\n for split in ['train', 'val', 'test']:\n print('processing %s split' % split)\n if (not os.path.exists(os.path.join(dst, 'x_' + split + '.npy')) or not\n os.path.exists(os.path.join(dst, 'y_' + split + '.npy'))):\n labels = glob(os.path.join(src, split, '*'))\n no_sample = 0\n for lb in labels:\n no_sample += len(os.listdir(lb))\n\n x = np.zeros((no_sample, 224, 224, 3))\n y = np.zeros((no_sample, 20))\n count = 0\n for lb in labels:\n files = glob(os.path.join(lb, '*.png'))\n for f in files:\n print('processing file: %s, with label %s' % (f, lb.split('/')[-1]))\n y[count] = to_categorical(int(lb.split('/')[-1]), 20)\n img = misc.imresize(misc.imread(f), (224, 224), 'bicubic')\n if img.ndim == 2:\n img = np.expand_dims(img, -1)\n img = np.concatenate((img, img, img), axis=-1)\n x[count] = img\n\n count += 1\n\n assert count == no_sample, \"number of sample (%d) is different than number of read image (%d)\" % (\n no_sample, count)\n\n x = get_deep_feature(x)\n np.save(os.path.join(dst, data_prefix + 'x_' + split + '.npy'), x)\n np.save(os.path.join(dst, data_prefix + 'y_' + split + '.npy'), y)", "def prepare_data(sourcedir):\n # Set up empty lists for storing the data and labels\n data, labels = [], []\n\n # Walk through the source directory\n for (root, subdirs, files) in os.walk(sourcedir):\n # Assign a numerical identifier to each class directory\n for i, class_dir in enumerate(subdirs):\n classes[class_dir] = i\n print(\"[INFO] Found class {}; \"\n \"assigned identifier {}.\".format(class_dir, i))\n\n # Define allowed image extensions\n ext = ['png', 'jpg', 'jpeg']\n\n # Loop over the files in each directory\n for f in files:\n # Check file extension\n if f.split('.')[-1] in ext:\n # Get image path\n path = os.path.join(root, f)\n # Extract class label from path\n label = path.split('/')[-2]\n # Get the corresponding label integer from the classes dict\n numlabel = classes[label]\n # Load image\n image = load_img(path, target_size=target_size)\n # Convert image to numpy array\n features = img_to_array(image)\n\n # Append data and labels to lists\n data.append(features)\n labels.append(numlabel)\n\n # Convert lists to numpy arrays\n data = np.array(data)\n labels = np.array(labels)\n\n # Convert numerical labels into one-hot encoded vectors\n labels = np_utils.to_categorical(labels, len(classes))\n\n # Normalize the RGB values into range 0...1\n data = data.astype('float') / 255.0\n\n # Return data and labels\n return data, labels", "def init_train(self):\n data = self.loader.load_labelled_data(self.conf.split, 'training')\n\n # Initialise unlabelled data iterator\n num_ul = 0\n if self.conf.ul_mix > 0:\n ul_data = self.loader.load_unlabelled_data(self.conf.split, 'all')\n\n # calculate number of unlabelled images as a proportion of the labelled images\n num_ul = int(data.size() * self.conf.ul_mix)\n num_ul = num_ul if num_ul <= ul_data.size() else ul_data.size()\n log.info('Sampling %d unlabelled images out of total %d.' % (num_ul, ul_data.size()))\n ul_data.sample(num_ul)\n self.gen_X_U = data_utils.generator(self.conf.batch_size, 'overflow', ul_data.images)\n\n # Initialise labelled data iterator\n assert self.conf.l_mix >= 0\n\n # calculate number of labelled images\n num_l = int(data.size() * self.conf.l_mix)\n num_l = num_l if num_l <= data.size() else data.size()\n log.info('Using %d labelled images out of total %d.' % (num_l, data.size()))\n train_images = data.images[:num_l]\n train_masks = data.masks[:num_l]\n\n self.conf.unlabelled_image_num = num_ul\n self.conf.labelled_image_num = num_l\n self.conf.data_len = num_ul if num_ul > num_l else num_l\n self.conf.batches = int(np.ceil(self.conf.data_len / self.conf.batch_size))\n self.conf.save()\n\n self.gen_X_L = data_utils.generator(self.conf.batch_size, 'overflow', train_images, train_masks)\n\n # Initialise real masks iterator for discriminator training, using the real masks from the data CV split.\n self.other_masks = data_utils.generator(self.conf.batch_size, 'overflow', data.masks + 0)", "def load_test_data():\n X = []\n y = []\n for fname in os.listdir(test_dir):\n label = int(fname.split(\"_\")[0])\n img = plt.imread(os.path.join(test_dir, fname))\n X.append(img)\n y.append(label)\n X = np.stack(X)\n y = np.stack(y)\n return X, y", "def process_data(self) -> Tuple[list, List[Sequence[int]]]:\n features, labels = self.load_files()\n x = [self.process_text(f, self.max_length) for f in features]\n y = [self._convert_labels(l, self.max_length) for l in labels]\n return x, y", "def read_batch(self):\n imgs = []\n labels = []\n idx = np.random.choice(self.nImgs,self.batch_size)\n \tfor i in idx:\n imgs.append(cv2.imread(self.data_files[i]))\n \t labels.append(cv2.imread(self.label_files[i]))\n \timgs,labels = np.array(imgs),np.array(labels)\n imgs = (imgs - self.mean)/self.stddev\n \tlabels = (labels - self.mean)/self.stddev\n return imgs,labels", "def get_labeled_data(imagefile, labelfile):\n # Open the images with gzip in read binary mode\n images = open(imagefile, 'rb')\n labels = open(labelfile, 'rb')\n\n # Read the binary data\n # We have to get big endian unsigned int. So we need '>I'\n\n # Get metadata for images\n images.read(4) # skip the magic_number\n number_of_images = images.read(4)\n number_of_images = unpack('>I', number_of_images)[0]\n rows = images.read(4)\n rows = unpack('>I', rows)[0]\n cols = images.read(4)\n cols = unpack('>I', cols)[0]\n\n # Get metadata for labels\n labels.read(4) # skip the magic_number\n N = labels.read(4)\n N = unpack('>I', N)[0]\n\n if number_of_images != N:\n raise Exception('number of labels did not match the number of images')\n\n # Get the data\n X = np.zeros((N, rows * cols), dtype=np.uint8) # Initialize numpy array\n y = np.zeros(N, dtype=np.uint8) # Initialize numpy array\n for i in range(N):\n for id in range(rows * cols):\n tmp_pixel = images.read(1) # Just a single byte\n tmp_pixel = unpack('>B', tmp_pixel)[0]\n X[i][id] = tmp_pixel\n tmp_label = labels.read(1)\n y[i] = unpack('>B', tmp_label)[0]\n return (X, y)", "def get_labeled_data(imagefile, labelfile):\n # Open the images with gzip in read binary mode\n images = open(imagefile, 'rb')\n labels = open(labelfile, 'rb')\n\n # Read the binary data\n # We have to get big endian unsigned int. So we need '>I'\n\n # Get metadata for images\n images.read(4) # skip the magic_number\n number_of_images = images.read(4)\n number_of_images = unpack('>I', number_of_images)[0]\n rows = images.read(4)\n rows = unpack('>I', rows)[0]\n cols = images.read(4)\n cols = unpack('>I', cols)[0]\n\n # Get metadata for labels\n labels.read(4) # skip the magic_number\n N = labels.read(4)\n N = unpack('>I', N)[0]\n\n if number_of_images != N:\n raise Exception('number of labels did not match the number of images')\n\n # Get the data\n X = np.zeros((N, rows * cols), dtype=np.uint8) # Initialize numpy array\n y = np.zeros(N, dtype=np.uint8) # Initialize numpy array\n for i in range(N):\n for id in range(rows * cols):\n tmp_pixel = images.read(1) # Just a single byte\n tmp_pixel = unpack('>B', tmp_pixel)[0]\n X[i][id] = tmp_pixel\n tmp_label = labels.read(1)\n y[i] = unpack('>B', tmp_label)[0]\n return (X, y)", "def file_reader(image_file, label_file):\n\n image = im.imread(image_file)\n\n with open(label_file, \"r\") as file:\n label = float(file.read())\n\n return image, label", "def load_data(data_dir):\n\n # Initiate lists\n images = []\n labels = []\n\n main_dir = os.path.abspath(os.curdir)\n\n for i in range(NUM_CATEGORIES):\n os.chdir(os.path.join(data_dir, str(i))) # Open directory i\n dir_images = os.listdir() # Create a list of all images in directory\n\n for j in range(len(dir_images)):\n image = cv2.imread(dir_images[j]) # Read image from file\n image = tf.keras.preprocessing.image.img_to_array(image) # Transform image to numpy array\n image = tf.image.resize(image, (IMG_WIDTH, IMG_HEIGHT)) # Reshape image to 30 x 30 px\n image = image/255 # Normalize image RGB values\n images.append(image) \n labels.append(i)\n\n os.chdir(main_dir)\n \n return (images, labels)", "def load_data(self) -> tuple:\n self.read_path = Path(os.environ[\"DATA_PATH\"]) / \"characters\"\n self.pretrain_path = Path(os.environ[\"FONT_DATA\"]) / \"training\"\n self.dataset_builder.build_data_set()\n X_pretrain, y_pretrain, X_train, y_train, X_dev, y_dev, X_test, y_test = tuple(\n [] for l in range(8)\n )\n\n for letter in self.hebrew.letter_li:\n pretrain_images = glob(f\"{Path(self.pretrain_path/letter)}/*.jpeg\")\n train_images = glob(f'{Path(self.read_path/\"train\"/letter)}/*.jpg')\n dev_images = glob(f'{Path(self.read_path/\"dev\"/letter)}/*.jpg')\n test_images = glob(f'{Path(self.read_path/\"test\"/letter)}/*.jpg')\n\n # pretrain data\n for img in pretrain_images:\n image = cv2.imread(img)\n image = cv2.resize(image, self.img_size)\n X_pretrain.append(image)\n y_pretrain.append(self.hebrew.letter_li.index(letter))\n\n # training data\n for img in train_images:\n image = cv2.imread(img)\n image = cv2.resize(image, self.img_size)\n X_train.append(image)\n y_train.append(self.hebrew.letter_li.index(letter))\n\n # dev data\n for img in dev_images:\n image = cv2.imread(img)\n image = cv2.resize(image, self.img_size)\n X_dev.append(image)\n y_dev.append(self.hebrew.letter_li.index(letter))\n\n # test data\n for img in test_images:\n image = cv2.imread(img)\n image = cv2.resize(image, self.img_size)\n X_test.append(image)\n y_test.append(self.hebrew.letter_li.index(letter))\n\n return (\n np.array(X_pretrain),\n np.array(y_pretrain),\n np.array(X_train),\n np.array(y_train),\n np.array(X_dev),\n np.array(y_dev),\n np.array(X_test),\n np.array(y_test),\n )" ]
[ "0.7921838", "0.75767523", "0.7083806", "0.70367414", "0.6984986", "0.6949761", "0.68525404", "0.6838453", "0.68272054", "0.6822626", "0.67811656", "0.67373544", "0.6718452", "0.66978455", "0.6692323", "0.66836244", "0.66790134", "0.66592354", "0.6651176", "0.6622316", "0.66154784", "0.66017926", "0.656892", "0.65528435", "0.6520673", "0.6506123", "0.6494743", "0.6485098", "0.6457212", "0.64487433", "0.64454806", "0.64389503", "0.64304614", "0.6429235", "0.6423581", "0.6421907", "0.6408175", "0.64059234", "0.64048046", "0.6401985", "0.6398148", "0.6378616", "0.6373005", "0.63614106", "0.6352129", "0.63479775", "0.63404673", "0.6338879", "0.6336689", "0.6332821", "0.63325053", "0.6327341", "0.6310643", "0.6297223", "0.62836164", "0.62732756", "0.626967", "0.62661535", "0.6260098", "0.6237032", "0.62317216", "0.6222506", "0.6215759", "0.6211848", "0.6210671", "0.61980635", "0.61969185", "0.6191588", "0.61887866", "0.61877793", "0.6182074", "0.61812097", "0.6179952", "0.6164706", "0.61598676", "0.6158637", "0.61543953", "0.6151639", "0.6151447", "0.6151201", "0.6151043", "0.6149173", "0.61371255", "0.61339784", "0.6133629", "0.6129519", "0.6126827", "0.612086", "0.6120373", "0.6119729", "0.61051136", "0.6094399", "0.6092253", "0.6086239", "0.6085764", "0.6084567", "0.6078971", "0.6078971", "0.6064978", "0.6061851", "0.60577947" ]
0.0
-1
Prepare normalized image and label.
def _prepare_image_and_label(self, data): image = tf.io.decode_image(data['image/encoded'], channels=3) label = tf.io.decode_image(data['image/segmentation/class/encoded'], channels=1) height = data['image/height'] width = data['image/width'] image = tf.reshape(image, (height, width, 3)) label = tf.reshape(label, (1, height, width)) label = tf.cast(label, tf.float32) # Normalizes image with mean and std pixel values. image = input_utils.normalize_image(image) return image, label
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def normalize(image, label):\n image -= settings.DATASET_MEAN\n image /= settings.DATASET_STD\n\n return image, label", "def img_normalize(image, label):\n mean, std = ds_stats\n image -= tf.constant(mean, shape=[1, 1, num_channels], dtype=image.dtype)\n image /= tf.constant(std, shape=[1, 1, num_channels], dtype=image.dtype)\n return image, label", "def prep_image_data(arg_dict):\n cat_df = pd.read_csv(arg_dict['category_file'],\n skiprows=1,\n sep='\\s+')\n bbox_df = pd.read_csv(arg_dict['bbox_file'],\n skiprows=1,\n sep='\\s+')\n img_dir = arg_dict['image_dir']\n\n combo_df = pd.merge(cat_df, bbox_df, how='outer', on='image_name')\n combo_df['image_name'] = combo_df['image_name'].apply(\n lambda x: x[len('img'):-len('.jpg')])\n labels = Labels(combo_df, img_dir, n_images_loaded=-1)\n labels.set_data_target('raw_image', chunksize=3000)\n return labels", "def imagenet_preprocess(image, label):\n i = image\n i = tf.cast(i, tf.float32)\n i = tf.image.resize_with_crop_or_pad(i, 224, 224)\n if model_name == 'ResNet50' or model_name == 'ResNet152':\n i = tf.keras.applications.resnet.preprocess_input(i)\n else:\n i = tf.keras.applications.densenet.preprocess_input(i)\n return (i, label)", "def preprocess_image(image, label, is_training):\n if is_training:\n # Randomly scale the image and label.\n image, label = preprocessing.random_rescale_image_and_label(\n image, label, _MIN_SCALE, _MAX_SCALE)\n\n # Randomly crop or pad a [_HEIGHT, _WIDTH] section of the image and label.\n image, label = preprocessing.random_crop_or_pad_image_and_label(\n image, label, _HEIGHT, _WIDTH, _IGNORE_LABEL)\n\n # Randomly flip the image and label horizontally.\n image, label = preprocessing.random_flip_left_right_image_and_label(\n image, label)\n\n image.set_shape([_HEIGHT, _WIDTH, 3])\n label.set_shape([_HEIGHT, _WIDTH, 1])\n print(\"seg11111111111\",image,label)\n image = preprocessing.mean_image_subtraction(image)\n\n return image, label", "def norm_input(image, label):\n cropped_image = tf.image.resize_image_with_crop_or_pad(image, FLAGS.image_size, FLAGS.image_size)\n\n norm_image = tf.image.per_image_standardization(cropped_image)\n\n return norm_image, label", "def preprocess(path, path2 , scale):\n image = imread(path)\n label_ = imread(path2)\n\n #label_ = modcrop(label, scale)\n\n # Must be normalized\n input_ = image / 255.\n label_ = label_ / 255.\n\n #input_ = scipy.ndimage.interpolation.zoom(label_, (1./scale), prefilter=False)\n #input_ = scipy.ndimage.interpolation.zoom(input_, (scale/1.), prefilter=False)\n\n return input_, label_", "def test_normalize(dummy_input):\n # Test the 2D image: H, W, C\n image, label = dummy_input(image_size=(512, 512, 3),\n label_size=(512, 512, 1))\n transform = Normalize(means=None, stds=None)\n _image, _label = transform(image, label, normalize_tags=[True, False])\n assert not (image == _image).all()\n assert (label == _label).all()\n\n # Test the 3D image: H, W, D, C\n image, label = dummy_input(image_size=(512, 512, 20, 3),\n label_size=(512, 512, 20, 1))\n transform = Normalize(means=None, stds=None)\n _image, _label = transform(image, label, normalize_tags=[True, False])\n assert not (image == _image).all()\n assert (label == _label).all()\n assert np.abs(np.mean(_image)-0) < 1e-8\n assert np.abs(np.std(_image)-1) < 1e-8", "def preprocess_data():\n le = preprocessing.LabelEncoder()\n # Reshape and normalize pixel values to be between 0 and 1\n train_images_reshaped = train_images.reshape(len(train_images), 1024, 1024, 1)/255.\n test_images_reshaped = test_images.reshape(len(test_images), 1024, 1024, 1)/255.\n\n return train_images_reshaped, test_images_reshaped, le.fit_transform(train_labels), le.fit_transform(test_labels)", "def __call__(self, src, label):\r\n # img = mx.nd.image.to_tensor(src)\r\n # img = mx.nd.image.normalize(img, mean=self._mean, std=self._std)\r\n src = mx.nd.array(src)\r\n img = mx.nd.image.to_tensor(src)\r\n img = mx.nd.image.normalize(img, mean=self._mean, std=self._std)\r\n return img, mx.nd.array(label, dtype=img.dtype)", "def _preprocess_image(self, sample):\n image = sample[common.IMAGE]\n label = sample[common.LABELS_CLASS]\n\n original_image, image, label = input_preprocess.preprocess_image_and_label(\n image=image,\n label=label,\n crop_height=self.crop_size[0],\n crop_width=self.crop_size[1],\n min_resize_value=self.min_resize_value,\n max_resize_value=self.max_resize_value,\n resize_factor=self.resize_factor,\n min_scale_factor=self.min_scale_factor,\n max_scale_factor=self.max_scale_factor,\n scale_factor_step_size=self.scale_factor_step_size,\n ignore_label=self.ignore_label,\n is_training=self.is_training,\n model_variant=self.model_variant)\n\n sample[common.IMAGE] = image\n\n if not self.is_training:\n # Original image is only used during visualization.\n sample[common.ORIGINAL_IMAGE] = original_image\n\n if label is not None:\n sample[common.LABEL] = label\n\n # Remove common.LABEL_CLASS key in the sample since it is only used to\n # derive label and not used in training and evaluation.\n sample.pop(common.LABELS_CLASS, None)\n\n return sample", "def _preprocess_image(self, input_data):\n image = self.preprocessor.preprocess(input_data.images)\n return InputData(images=image, labels=input_data.labels)", "def preprocess(img):\n \n scaler=StandardScaler() ## scaler object to perform preprocessing\n img=scaler.fit_transform(img) ## zero-center and normalize\n \n return img", "def normalise(image):", "def preprocess_image(self, inputs):\n raise NotImplementedError('preprocess_image method not implemented.')", "def prepare_data(rawimage, rawlabel, mapping, params):\n # rawimage: TF tensor: H x W x 3, tf.uint8\n # rawlabel: TF tensor: H x W, tf.uint8/16, [0,tf.uint8/16-1]\n # images: TF tensor: Nb x hf x wf x 3, tf.float32 in [0,1)\n # labels: TF tensor: Nb x hf x wf (in case of upsampling), tf.int32, [0, Nclasses] (in case of extra void class)\n\n image = tf.image.convert_image_dtype(rawimage, dtype=tf.float32)\n # resize to learnable system's dimensions\n image = tf.image.resize_images(image, [params.height_network, params.width_network])\n\n label_for_resize = tf.to_int32(rawlabel[tf.newaxis, ..., tf.newaxis])\n label = tf.image.resize_nearest_neighbor(label_for_resize, [params.height_network, params.width_network])\n label = tf.squeeze(label, axis=[0, 3])\n\n label = _lids2cids(mapping, label)\n\n return image, label", "def preprocess(self):\n \n file_name_list = os.listdir(self.image_dir)\n random.seed(1234)\n random.shuffle(file_name_list)\n \n for i,d in enumerate(self.domains):\n self.attr2idx[d]=i \n\n for i, file_name in enumerate(file_name_list):\n if (file_name.startswith('X_')):\n continue\n \n parts = file_name.split(\"-\")\n label = int(parts[0])\n if label not in self.domains:\n continue\n img_name = file_name\n\n count=self.get_sample_count(label)\n if count<self.valid_set_size:\n # create holdout set on the fly\n utils.copy_file(self.image_dir,self.valid_set_dir,img_name)\n else:\n self.dataset.append([img_name, self.attr2idx[label]])\n \n self.increment_sample_count(label)\n\n print(\"Sample count per domain: \"+str(self.sample_count)+\" (including holdout set, holdout size per domain is: \"+str(self.valid_set_size)+\")\")\n print('Finished preprocessing the dataset...')", "def preprocess(path, scale=3):\n image = imread(path, is_grayscale=True)\n label_ = modcrop(image, scale)\n\n # Must be normalized\n \n label_ = label_ / 255.\n \n\n\n input_ = scipy.ndimage.interpolation.zoom(label_, (1. / scale), prefilter=False)\n input_ = scipy.ndimage.interpolation.zoom(input_, (scale / 1.), prefilter=False)\n\n return input_, label_", "def prep_data(labels, image_root):\n labels = split_description(labels)\n labels = convert_plastics(labels)\n\n # Encoding shape and color data\n labels['Shape'] = encode_column(labels[['Shape']])\n labels['Color'] = encode_column(labels[['Color']])\n labels['isPlastic'] = encode_column(labels[['isPlastic']])\n labels = add_filenames(labels, image_root)\n labels = labels.dropna().reset_index()\n\n return labels", "def process_images(image, label):\n # Normalize images to have a mean of 0 and standard deviation of 1\n # per_image_standardization is preferred, which normalize the entire image to mean zero and std 1.\n # It also make learning fast.\n image = tf.image.per_image_standardization(image)\n # Resize images from 32x32 to 277x277\n image = tf.image.resize(image, (227,227))\n return image, label", "def preprocessing(image_data, max_height, max_width):\n img = image_data[\"image\"]\n img = resize_image(img, max_height, max_width)\n gt_boxes = image_data[\"objects\"][\"bbox\"]\n gt_labels = image_data[\"objects\"][\"label\"]\n return img, gt_boxes, gt_labels", "def _prepare_im(self, im):\n # Train and test setups differ\n train_size = cfg.TRAIN.IM_SIZE\n if \"train\" in self._split:\n # Scale and aspect ratio then horizontal flip\n im = transforms.random_sized_crop(im=im, size=train_size, area_frac=0.08)\n im = transforms.horizontal_flip(im=im, p=0.5, order=\"HWC\")\n else:\n # Scale and center crop\n im = transforms.scale(cfg.TEST.IM_SIZE, im)\n im = transforms.center_crop(train_size, im)\n # HWC -> CHW\n im = im.transpose([2, 0, 1])\n # [0, 255] -> [0, 1]\n im = im / 255.0\n # PCA jitter\n if \"train\" in self._split:\n im = transforms.lighting(im, 0.1, _EIG_VALS, _EIG_VECS)\n # Color normalization\n im = transforms.color_norm(im, _MEAN, _SD)\n return im", "def normalize_labels(self):\n self.y_mean, self.y_std = du.get_mean_std(self.y_train)\n self.y_train = du.normalize(self.y_train, self.y_mean, self.y_std)\n if self.x_test is not None and self.y_test is not None:\n self.y_test = du.normalize(self.y_test, self.y_mean, self.y_std)", "def normalize_image(img):\n arr = np.array(img)\n new_img = Image.fromarray(normalize(arr).astype('uint8'),'L')\n return new_img", "def preprocess(self):\n meta_file_path = os.path.join(database_directory, 'data.txt')\n meta = pd.read_csv(meta_file_path, delimiter=' ', header=None)\n meta = meta[meta[0] != '45567.jpg'] # Corrupt image.\n meta.to_pickle(os.path.join(database_directory, 'meta.pkl'))\n for file_name in meta.iloc[:, 0].values:\n if file_name.endswith('.jpg'):\n file_path = os.path.join(database_directory, file_name)\n image = imageio.imread(file_path).astype(np.uint8)\n image = transform.resize(image, (self.preprocessed_image_size, self.preprocessed_image_size),\n preserve_range=True)\n image = image.transpose((2, 0, 1))\n np.save(file_path.replace('.jpg', '.npy'), image)", "def __multilabel_processing(self):\n # read the raw dataset\n self.data['image_name'] = self.data['image_name'].map(lambda x: '{}.{}'.format(x, img_format))\n self.data['tags'] = self.data['tags'].map(lambda x: x.split())\n\n # create a df with the same number of rows as the dataset filled with the name of the unique values in tags\n label_names = self.data['tags'].explode().unique().tolist()\n label_df = pd.DataFrame([label_names] * self.data.shape[0], columns=label_names)\n\n # binarize the labels according to if they exist for each image or not\n self.data = pd.concat([self.data, label_df], axis=1)\n self.data[['image_name'] + label_names] = self.data.apply(lambda x: pd.Series([x[0]] + [1 if label in x[1] else 0 for label in x[2:]]), axis=1)", "def __call__(self, src, label):\n # resize shorter side but keep in max_size\n h, w, _ = src.shape\n img = timage.resize_short_within(src, self._short, self._max_size, interp=1)\n # no scaling ground-truth, return image scaling ratio instead\n bbox = tbbox.resize(label, (w, h), (img.shape[1], img.shape[0]))\n im_scale = h / float(img.shape[0])\n\n img = mx.nd.image.to_tensor(img)\n img = mx.nd.image.normalize(img, mean=self._mean, std=self._std)\n return img, bbox.astype('float32'), mx.nd.array([im_scale])", "def __init__(self, image_root, label_root, img_x, img_y):\n self.images_path = image_root\n self.labels_path = label_root\n self.data_len = 0\n self.images = []\n self.labels = open(self.labels_path, \"r\").readlines()\n self.transform = transforms.Compose([\n transforms.Resize((img_x, img_y)), \n transforms.ToTensor(),\n transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])\n\n for file in self.labels:\n self.data_len += 1\n tem = file.split(\" \")[0]\n temp = tem.split(\"-\")\n self.images.append(self.images_path + temp[0] + '/' + temp[0] + \"-\" + temp[1] + \"/\" + tem + \".png\")", "def preprocess(self, img):\n img_ = image.load_img(img, target_size=(299, 299))\n img_ = image.img_to_array(img_)\n img_ = np.expand_dims(img_, axis=0)\n img_ = preprocess_input(img_)\n return img_", "def imageprepare(self, argv):\n im = Image.open(argv).convert('L')\n img = im.resize((28, 28), Image.ANTIALIAS).filter(ImageFilter.SHARPEN)\n tv = list(img.getdata()) \n \n # normalize pixels to 0 and 1. 0 is pure white, 1 is pure black.\n tva = [(255 - x) * 1.0 / 255.0 for x in tv]\n return tva", "def prepare_train_img(self, index):\n img_ann_info = self.data_infos[index]\n img_info = {\n 'filename': img_ann_info['file_name'],\n 'height': img_ann_info['height'],\n 'width': img_ann_info['width']\n }\n ann_info = self._parse_anno_info(img_ann_info['annotations'])\n results = dict(img_info=img_info, ann_info=ann_info)\n results['bbox_fields'] = []\n results['mask_fields'] = []\n results['seg_fields'] = []\n self.pre_pipeline(results)\n\n return self.pipeline(results)", "def __call__(self, src, label, mask):\n # resize shorter side but keep in max_size\n h, _, _ = src.shape\n img = timage.resize_short_within(src, self._short, self._max_size, interp=1)\n # no scaling ground-truth, return image scaling ratio instead\n im_scale = float(img.shape[0]) / h\n\n img = mx.nd.image.to_tensor(img)\n img = mx.nd.image.normalize(img, mean=self._mean, std=self._std)\n return img, mx.nd.array([img.shape[-2], img.shape[-1], im_scale])", "def _preprocessing(self, input_image):\n if self.resize:\n input_image = self._np_resize_image(input_image,\n self.input_size,\n dtype='int')\n image = self._np_transpose(input_image)\n image = self._np_normalize(image)\n image = self._np_flip_n_cat(image)\n return image", "def _prepare_image(self, image, initial_shape, gt_shape=None):\n image.landmarks['initial_shape'] = initial_shape\n image = image.rescale_to_reference_shape(\n self.reference_shape, group='initial_shape',\n interpolator=self.interpolator)\n\n if gt_shape:\n image.landmarks['gt_shape'] = initial_shape\n\n if self.n_levels > 1:\n if self.scaled_levels:\n pyramid = image.gaussian_pyramid(\n n_levels=self.n_levels, downscale=self.downscale)\n else:\n pyramid = image.smoothing_pyramid(\n n_levels=self.n_levels, downscale=self.downscale)\n images = [compute_features(i, self.feature_type)\n for i in pyramid]\n images.reverse()\n else:\n images = [compute_features(image, self.feature_type)]\n\n return images", "def img_prep(img, shape=(128, 128)):\n # Resize\n img = resize_volume(img, (shape[0], shape[1]))\n\n img = numpy.multiply(255, _normalize(img)).astype(numpy.uint8)\n\n return img", "def prepare_images(images):\n images = color.rgb2lab(images)\n\n l = images[:,:,:,:1]/100.\n ab = images[:,:,:,1:]/200. + 0.5\n\n return l, ab", "def preprocess_image_meta(\n image_labels: pd.DataFrame, save_dir: Optional[str] = None\n) -> pd.DataFrame:\n image_labels[\"image_name\"] = image_labels[\"image\"]\n image_labels[\"x\"] = image_labels[\"left\"]\n image_labels[\"y\"] = image_labels[\"top\"]\n image_labels[\"w\"] = image_labels[\"width\"]\n image_labels[\"h\"] = image_labels[\"height\"]\n print(image_labels.head())\n if save_dir:\n image_labels.to_csv(f\"{save_dir}/image_meta.csv\", index=False)\n\n return image_labels", "def __call__(self, src, label):\n # resize shorter side but keep in max_size\n h, w, _ = src.shape\n img = timage.resize_short_within(src, self._short, self._max_size, interp=1)\n # no scaling ground-truth, return image scaling ratio instead\n bbox = tbbox.resize(label, (w, h), (img.shape[1], img.shape[0]))\n im_scale = h / float(img.shape[0])\n\n img = vf.to_tensor(img)\n img = vf.normalize(img, mean=self._mean, std=self._std)\n return img, bbox.astype('float32'), torch.tensor([im_scale], dtype=torch.float32)", "def _preprocess(self, features, labels):\n with tf.variable_scope('preprocess'):\n with tf.variable_scope('image'):\n features['image_orig'] = features['image']\n image = tf.image.convert_image_dtype(features['image_orig'],\n dtype=tf.float32)\n if self.mode == ModeKeys.TRAIN:\n images = tf.unstack(image)\n images = [augment_image(img) for img in images]\n image = tf.stack(images)\n image = tf.subtract(image, 0.5)\n image = tf.multiply(image, 2.0)\n features['image'] = image\n\n if labels is None:\n return features, None\n\n with tf.variable_scope('label'):\n # TODO(Shancheng): use start token and end token rather constant 0\n # labels for decoder input\n labels['label_input'] = tf.concat([labels['label'][:, -1:],\n labels['label'][:, 0:-1]], axis=1)\n # from text length to training label length\n labels['length'] = tf.reshape(labels['length'], [-1])\n labels['length'] = labels['length'] + 1\n\n return features, labels", "def image_preprocess(image, image_size, mean_rgb, stddev_rgb):\n input_processor = dataloader.DetectionInputProcessor(image, image_size)\n input_processor.normalize_image(mean_rgb, stddev_rgb)\n input_processor.set_scale_factors_to_output_size()\n image = input_processor.resize_and_crop_image()\n image_scale = input_processor.image_scale_to_original\n return image, image_scale", "def add_image_normalization(self):\n self.methods.append(self._normalize_image)\n self.args.append(None)", "def __call__(self, src, label, mask):\n # resize shorter side but keep in max_size\n h, _, _ = src.shape\n img = timage.resize_short_within(src, self._short, self._max_size, interp=1)\n # no scaling ground-truth, return image scaling ratio instead\n im_scale = float(img.shape[0]) / h\n\n img = vf.to_tensor(img)\n img = vf.normalize(img, mean=self._mean, std=self._std)\n return img, torch.tensor([img.shape[-2], img.shape[-1], im_scale], dtype=torch.float32)", "def normalize_dataset(self):", "def normalize(image, target=None):\n if target is None:\n target = np.array([[148.60, 41.56], [169.30, 9.01], [105.97, 6.67]])\n\n whitemask = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)\n whitemask = whitemask > 215\n\n imagelab = cv2.cvtColor(image, cv2.COLOR_RGB2LAB)\n\n imageL, imageA, imageB = cv2.split(imagelab)\n\n # mask is valid when true\n imageLM = np.ma.MaskedArray(imageL, whitemask)\n imageAM = np.ma.MaskedArray(imageA, whitemask)\n imageBM = np.ma.MaskedArray(imageB, whitemask)\n\n # Sometimes STD is near 0, or 0; add epsilon to avoid div by 0 -NI\n epsilon = 1e-11\n\n imageLMean = imageLM.mean()\n imageLSTD = imageLM.std() + epsilon\n\n imageAMean = imageAM.mean()\n imageASTD = imageAM.std() + epsilon\n\n imageBMean = imageBM.mean()\n imageBSTD = imageBM.std() + epsilon\n\n # normalization in lab\n imageL = (imageL - imageLMean) / imageLSTD * target[0][1] + target[0][0]\n imageA = (imageA - imageAMean) / imageASTD * target[1][1] + target[1][0]\n imageB = (imageB - imageBMean) / imageBSTD * target[2][1] + target[2][0]\n\n imagelab = cv2.merge((imageL, imageA, imageB))\n imagelab = np.clip(imagelab, 0, 255)\n imagelab = imagelab.astype(np.uint8)\n\n # Back to RGB space\n returnimage = cv2.cvtColor(imagelab, cv2.COLOR_LAB2RGB)\n # Replace white pixels\n returnimage[whitemask] = image[whitemask]\n\n return returnimage", "def preprocess(exam, data_folder, save_path, image_format):\n for v in ['L-CC', 'L-MLO', 'R-CC', 'R-MLO']:\n if len(exam[v]) == 0:\n continue\n else:\n for image in exam[v]:\n image_path = data_folder + '/' + image + '.' + image_format\n # Extract subdirectories\n subdirs = \"/\".join(image.split('/')[:-1])\n save_dirs = os.path.join(save_path, subdirs)\n # Extract image id\n image_id = image.split('/')[-1]\n # Create save directories\n os.makedirs(save_dirs, exist_ok=True)\n png_save_path = os.path.join(save_dirs, image_id + '.png')\n with Image(filename=image_path, format=image_format) as img:\n with img.clone() as i:\n i.auto_level()\n with i.convert('png') as png_image:\n png_image.transform(resize='896x1152!')\n png_image.save(filename=png_save_path)", "def prep_for_train(sample_df):\n # Save the labels\n y = sample_df['label'].to_numpy()\n\n # Path to the data\n DATADIR = Path('data/') / 'real_vs_fake' / 'real-vs-fake'\n\n # Load the sample images\n n = sample_df.shape[0]\n X = np.empty(shape=(n, 256, 256, 3))\n\n # Load in the images to be trained on\n for img_idx, img_path in enumerate(sample_df['path']):\n img = plt.imread(DATADIR / img_path)\n X[img_idx, :, :, :] = img / 255.0\n\n return X, y[np.newaxis].reshape(-1, 1)", "def pre_processing_function(label, filename: str, augmentor: Augmentor = None):\n image = imread(filename)\n if augmentor is not None:\n image = np.round(augmentor.run(image)).astype(np.uint8)\n\n return image, label", "def __call__(self, src, label):\n\n h, w, _ = src.shape\n # interp = np.random.randint(0, 5)\n img = timage.resize_short_within(src, self._short, self._max_size, interp=1)\n img, flips = timage.random_flip(img, px=0.5)\n img = img.astype(np.float32)\n\n if self.teacher_aug:\n target_image_1 = self.random_color_aug(img)\n else:\n target_image_1 = img\n target_image_2 = self.random_color_aug(img)\n\n # target_image_1 = mx.nd.image.to_tensor(target_image_1)\n target_image_1 = mx.nd.image.to_tensor(target_image_1)\n target_image_1 = mx.nd.image.normalize(target_image_1, mean=self._mean, std=self._std)\n\n target_image_2 = mx.nd.image.to_tensor(target_image_2)\n target_image_2 = mx.nd.image.normalize(target_image_2, mean=self._mean, std=self._std)\n\n return target_image_1, target_image_2", "def preprocess_images_and_labels(config,\n image_file_data,\n train_image_id_labels,\n val_image_id_labels):\n # Commenting out the line below in keeping with the comment block above the function. We don't\n # actually need to do this, because safe_create_dir will never overwrite an existing directory,\n # but better to be safe.\n # safe_create_dir(config.ImageDataConfig.preprocessed_image_path)\n\n # Add in a counter for tracking progress via the console\n counter = 0\n\n train_image_metadata, val_image_metadata = [], []\n for image_data in image_file_data:\n\n if image_data.image_id in train_image_id_labels:\n image_metadata = train_image_metadata\n image_label = train_image_id_labels[image_data.image_id]\n else:\n image_metadata = val_image_metadata\n image_label = val_image_id_labels[image_data.image_id]\n\n # TODO Stop squishing the image and handle cropping correctly sized windows at sample time.\n\n # Comment out the chunk below to avoid re-saving our images, which we have\n # already preprocessed. We just want to generate metadata for them.\n\t\"\"\"\n image = load_img(\n image_data.original_file_path,\n target_size=config.ImageDataConfig.size)\n new_file_path = os.path.join(\n config.ImageDataConfig.preprocessed_image_path,\n image_data.original_filename.upper().replace('PNG', 'JPG')) # Convert all images to jpegs.\n image.save(new_file_path, format='JPEG', quality=85)\n\t\"\"\"\n\n # We generate metadata, setting the image filepath as the original filepath, as we\n # have already preprocessed beforehand.\n original_file_path = image_data.original_file_path\n\n image_metadata.append(ProcessedImageMetadata(image_data.image_id, original_file_path, image_label))\n\n return train_image_metadata, val_image_metadata", "def normalize_image(self):\n # The image normalization is identical to Cloud TPU ResNet.\n self._image = tf.image.convert_image_dtype(self._image, dtype=tf.float32)\n offset = tf.constant(DATASET_MEAN)\n offset = tf.expand_dims(offset, axis=0)\n offset = tf.expand_dims(offset, axis=0)\n self._image -= offset\n\n scale = tf.constant(DATASET_VAR)\n scale = tf.expand_dims(scale, axis=0)\n scale = tf.expand_dims(scale, axis=0)\n self._image /= scale", "def preprocess_image(image, model_image_size):\n #resized_image = cv2.resize(image, tuple(reversed(model_image_size)), cv2.INTER_AREA)\n resized_image = letterbox_resize(image, tuple(reversed(model_image_size)))\n image_data = np.asarray(resized_image).astype('float32')\n image_data = normalize_image(image_data)\n image_data = np.expand_dims(image_data, 0) # Add batch dimension.\n return image_data", "def prepare_data(src, dst):\n\n data_prefix = 'miniCelebA_'\n for split in ['train', 'val', 'test']:\n print('processing %s split' % split)\n if (not os.path.exists(os.path.join(dst, 'x_' + split + '.npy')) or not\n os.path.exists(os.path.join(dst, 'y_' + split + '.npy'))):\n labels = glob(os.path.join(src, split, '*'))\n no_sample = 0\n for lb in labels:\n no_sample += len(os.listdir(lb))\n\n x = np.zeros((no_sample, 224, 224, 3))\n y = np.zeros((no_sample, 20))\n count = 0\n for lb in labels:\n files = glob(os.path.join(lb, '*.png'))\n for f in files:\n print('processing file: %s, with label %s' % (f, lb.split('/')[-1]))\n y[count] = to_categorical(int(lb.split('/')[-1]), 20)\n img = misc.imresize(misc.imread(f), (224, 224), 'bicubic')\n if img.ndim == 2:\n img = np.expand_dims(img, -1)\n img = np.concatenate((img, img, img), axis=-1)\n x[count] = img\n\n count += 1\n\n assert count == no_sample, \"number of sample (%d) is different than number of read image (%d)\" % (\n no_sample, count)\n\n x = get_deep_feature(x)\n np.save(os.path.join(dst, data_prefix + 'x_' + split + '.npy'), x)\n np.save(os.path.join(dst, data_prefix + 'y_' + split + '.npy'), y)", "def reshape_and_normalize_image(image):\n # Reshape image to mach expected input of VGG16\n image = np.reshape(image, ((1,) + image.shape))\n # Substract the mean to match the expected input of VGG16\n image = image - CONFIG.MEANS\n \n return image", "def splitTransform(self):\n\t\t#path_merge = \"transform\"\n\t\t#path_train = \"transform/data/\"\n\t\t#path_label = \"transform/label/\"\n\t\tpath_merge = \"train/merge\"\n\t\tpath_train = \"train/image\"\n\t\tpath_label = \"train/label\"\n\t\ttrain_imgs = glob.glob(path_merge+\"/*.\"+self.img_type)\n\t\tfor imgname in train_imgs:\n\t\t\tmidname = imgname[imgname.rindex(\"/\")+1:imgname.rindex(\".\"+self.img_type)]\n\t\t\timg = cv2.imread(imgname)\n\t\t\timg_train = img[:,:,2]#cv2 read image rgb->bgr\n\t\t\timg_label = img[:,:,0]\n\t\t\tcv2.imwrite(path_train+midname+\".\"+self.img_type,img_train)\n\t\t\tcv2.imwrite(path_label+midname+\".\"+self.img_type,img_label)", "def prepare_images(self):\n\n qt_original_image = self.convert_image_to_QTformat(self.original_image)\n self.send_original_photo_to_gui.emit(qt_original_image)\n\n self.processed_image = self.procces_image(self.original_image)\n qt_processed_image = self.convert_image_to_QTformat(self.processed_image)\n self.send_processed_photo_to_gui.emit(qt_processed_image)", "def create_label(self, loaded_img, loaded_label):\n _, label = cv2.threshold(loaded_label, 120, 255, cv2.THRESH_BINARY)\n kernel = np.ones((5, 5), np.uint8)\n label = cv2.dilate(label, kernel, iterations=1)\n _, contours, _ = cv2.findContours(label, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\n if contours:\n areas = [cv2.contourArea(cnt) for cnt in contours]\n x, y, w, h = cv2.boundingRect(contours[np.argmax(areas)])\n label = label[y:y + h, x:x + w]\n return loaded_img.astype(np.float32) / 255, cv2.resize(label, (self.label_w, self.label_h)).astype(np.float32) / 255\n else:\n return loaded_img.astype(np.float32) / 255, np.zeros([self.label_h, self.label_w], dtype=np.float32)", "def _compute_normalization(self, normalize=True):\n if normalize:\n if self._img_norm is None:\n if np.sum(self._data) == 0:\n self._img_norm = 1\n else:\n self._img_norm = self._compute_raw_image_norm()\n\n if self._img_norm != 0.0 and np.isfinite(self._img_norm):\n self._data /= (self._img_norm * self._normalization_correction)\n self._normalization_status = 0\n else:\n self._normalization_status = 1\n self._img_norm = 1\n warnings.warn('Overflow encountered while computing '\n 'normalization constant. Normalization '\n 'constant will be set to 1.', NonNormalizable)\n else:\n self._normalization_status = 2", "def prepare_image(img):\n img = img.filter(ImageFilter.SMOOTH_MORE)\n img = img.filter(ImageFilter.SMOOTH_MORE)\n if 'L' != img.mode:\n img = img.convert('L')\n return img", "def preprare_Xy(directory, img_dim, names):\n # Create empty array for images, with dimensions to which all images will be resized and 3 color channels\n X = np.empty((0, img_dim, img_dim, 3))\n # Create empty list for corresponding labels\n y = []\n \n # For each label name (artist)\n for name in names:\n # Get the paths of all images \n img_paths = glob.glob(os.path.join(directory, name, \"*.jpg\"))\n \n # For each image for the given artist, load the image and append image array and label\n for img_path in tqdm(img_paths):\n img = load_img(img_path, target_size=(img_dim,img_dim))\n img_array = np.array([img_to_array(img)])\n X = np.vstack([X, img_array])\n y.append(name)\n\n # Normalize images using min max regularisation\n X_scaled = (X - X.min())/(X.max() - X.min())\n \n # Binarize labels\n lb = LabelBinarizer()\n y_binary = lb.fit_transform(y)\n \n return X_scaled, y_binary", "def preprocess(self, data, label):\n\t\traise NotImplementedError", "def normalize_data(img):\n nor = np.linalg.norm(img, axis = 1)\n nor = np.reshape(nor, (len(img), 1))\n img = np.divide(img, nor)\n return img", "def _parse_train_data(self, data):\n image, label = self._prepare_image_and_label(data)\n\n # Flips image randomly during training.\n if self._aug_rand_hflip:\n image, label = input_utils.random_horizontal_flip(image, masks=label)\n\n # Resizes and crops image.\n image, image_info = input_utils.resize_and_crop_image(\n image,\n self._output_size,\n self._output_size,\n aug_scale_min=self._aug_scale_min,\n aug_scale_max=self._aug_scale_max)\n\n # Resizes and crops boxes.\n image_scale = image_info[2, :]\n offset = image_info[3, :]\n\n # Pad label and make sure the padded region assigned to the ignore label.\n # The label is first offset by +1 and then padded with 0.\n label += 1\n label = tf.expand_dims(label, axis=3)\n label = input_utils.resize_and_crop_masks(\n label, image_scale, self._output_size, offset)\n label -= 1\n label = tf.where(tf.equal(label, -1),\n self._ignore_label * tf.ones_like(label), label)\n label = tf.squeeze(label, axis=0)\n valid_mask = tf.not_equal(label, self._ignore_label)\n labels = {\n 'masks': label,\n 'valid_masks': valid_mask\n }\n\n # If bfloat16 is used, casts input image to tf.bfloat16.\n if self._use_bfloat16:\n image = tf.cast(image, dtype=tf.bfloat16)\n return image, labels", "def sanitise_inputs(self):\n img_paths_var_names = ('dataset_img_path', 'atlas_img_path', 'brain_of_atlas_img_path')\n for img_path_var_name in img_paths_var_names:\n img_path = getattr(self, img_path_var_name)\n if not os.path.exists(img_path):\n sys.exit('Cannot perform registration, image {} not found'.format(img_path))\n if not img_path.endswith('.nii'):\n if img_path.endswith(('.tiff', '.tif')):\n nii_path = '{}{}'.format(os.path.splitext(img_path)[0], '.nii')\n bio.tiff_to_nii(img_path, nii_path)\n setattr(self, img_path_var_name, nii_path)\n else:\n raise RegistrationError('Cannot perform registration, image {} not in supported format'\n .format(img_path))", "def normalize_images(data, blend_cat, Args):\n im = data['X_train']['blend_image']\n std = np.std(im)\n mean = np.mean(im)\n data['X_train']['blend_image'] = (im - mean) / std\n data['X_val']['blend_image'] = (data['X_val']['blend_image'] - mean) / std\n data['X_train'] = normalize_other_inputs(data['X_train'], Args)\n data['X_val'] = normalize_other_inputs(data['X_val'], Args)\n for key in data['Y_train'].keys():\n data['Y_train'][key] = (data['Y_train'][key] - mean) / std\n data['Y_val'][key] = (data['Y_val'][key] - mean) / std\n blend_cat['std'] = std\n blend_cat['mean'] = mean\n return data", "def preprocess(img):\n # standard mean and std for the model\n mean = np.array([0.485, 0.456, 0.406])\n std = np.array([0.229, 0.224, 0.225])\n # resize\n img = img.resize(size = (224, 224))\n # transforms to numpy\n img = np.array(img, dtype = np.float64)\n # Mean and Std\n img = (img - mean)/std\n # transpose [channels first]\n img = img.transpose((2, 0, 1))\n # conver to Tensor\n img = torch.from_numpy(img)\n return img", "def preprocess_example_input(input_config):\n\n input_path = input_config[\"input_path\"]\n input_shape = input_config[\"input_shape\"]\n one_img = imread(input_path)\n if \"normalize_cfg\" in input_config.keys():\n normalize_cfg = input_config[\"normalize_cfg\"]\n mean = np.array(normalize_cfg[\"mean\"], dtype=np.float32)\n std = np.array(normalize_cfg[\"std\"], dtype=np.float32)\n one_img = imnormalize(one_img, mean, std)\n one_img = imresize(one_img, input_shape[2:][::-1]).transpose(2, 0, 1)\n one_img = torch.from_numpy(one_img).unsqueeze(0).float().requires_grad_(True)\n (_, C, H, W) = input_shape\n one_meta = {\n \"img_shape\": (H, W, C),\n \"ori_shape\": (H, W, C),\n \"pad_shape\": (H, W, C),\n \"filename\": \"<demo>.png\",\n \"scale_factor\": 1.0,\n \"flip\": False,\n }\n\n return one_img, one_meta", "def __init__(self, path_image, path_imagefile, path_bndboxfile, transform):\r\n # -------------------- DATA ARGUMENT\r\n self.shape = 446\r\n self.hue = 0.1\r\n self.saturation = 1.5\r\n self.exposure = 1.5\r\n self.imagelist = []\r\n self.labellist = []\r\n self.transform = transform\r\n label_dir = os.listdir(path_bndboxfile)\r\n image_dir = os.listdir(path_imagefile)\r\n\r\n # read imagepath\r\n for file in image_dir:\r\n file_name = os.path.join(path_imagefile, file)\r\n with open(file_name) as f:\r\n lines = f.readlines()\r\n for line in lines:\r\n image_name = line.split()[0] + '.JPEG'\r\n image = os.path.join(path_image, image_name)\r\n self.imagelist.append(image)\r\n\r\n # read imagelabel, i.e, (name, xmin, xmax, ymin, ymax)\r\n for file in label_dir:\r\n if file.split('.')[1] == 'xml':\r\n file_name = os.path.join(path_bndboxfile, file)\r\n with open(file_name) as f:\r\n xml_tree = parse(f).documentElement\r\n objects = xml_tree.getElementsByTagName('object')\r\n for object in objects:\r\n label = []\r\n name = object.getElementsByTagName('name')[0]\r\n label.append(name.childNodes[0].data)\r\n bndbox = object.getElementsByTagName('bndbox')[0]\r\n for node in bndbox.childNodes:\r\n if node.nodeType == node.ELEMENT_NODE:\r\n label.append(node.childNodes[0].data)\r\n self.labellist.append(label)\r\n else:\r\n print('Expect files in xml format. but get {}'.format(file.split('.')[1]))", "def processImage(fpaths_src, label_map, fnames_src, img_idx):\n global counter\n \n n_imgs = len(fpaths_src)\n print(\"Processing %s -- %s/%s (%s%%)\"%(fnames_src[img_idx],counter,n_imgs,round(100.*counter/n_imgs)))\n \n path = fpaths_src[img_idx]\n src_image_raw = Image.open(path, 'r')\n \n # size normalization of the image\n if not (args.resize == None):\n src_image_raw = src_image_raw.resize(size=(int(args.resize), int(args.resize)), resample=Image.BILINEAR)\n \n # convert to writable numpy array\n src_image = np.asarray(src_image_raw, dtype=np.uint8)\n src_image.setflags(write=True)\n \n # some dummy label\n label = -99.99\n # the labels\n if not (label_map == {}):\n # let the label start at 1, instead of 0\n label = int(label_map[fnames_src[img_idx]])+1\n else:\n # add a dummy label (between 0 and 1)\n label = np.random.rand()\n \n image_features = []\n \n # add the original\n image_features.append(generateFeatures(src_image,label,args.knn))\n \n if args.augment == 1:\n print \"Augmenting dataset...\"\n # data augmentation techniques\n rotation_angles = [i for i in xrange(36,360,36)] # samples are transformed by these rotation angles\n \n flip_x = True # data augmentation by flipping around x axis\n flip_y = True # data augmentation by flipping around y axis\n flip_xy= True # data augmentation by flipping around x AND y axis\n \n for angle in rotation_angles:\n rot_matrix = cv2.getRotationMatrix2D(\n (src_image.shape[1]/2.,src_image.shape[0]/2.),\n angle,\n 1.0)\n rot_sample_crop = np.array([])\n rot_sample_crop = cv2.warpAffine(src_image,\n rot_matrix,\n (src_image.shape[1],src_image.shape[0]),\n rot_sample_crop,\n cv2.INTER_LINEAR,\n cv2.BORDER_REFLECT_101)\n \n # add the sample to the dataset\n image_features.append(generateFeatures(rot_sample_crop,label,args.knn))\n \n # add 3 flipped copies\n if flip_x:\n rot_sample_crop_x = cv2.flip(rot_sample_crop,0)\n image_features.append(generateFeatures(rot_sample_crop_x,label,args.knn))\n if flip_y:\n rot_sample_crop_y = cv2.flip(rot_sample_crop,1)\n image_features.append(generateFeatures(rot_sample_crop_y,label,args.knn))\n if flip_xy:\n rot_sample_crop_xy = cv2.flip(rot_sample_crop,-1)\n image_features.append(generateFeatures(rot_sample_crop_xy,label,args.knn))\n \n counter+=1\n\n # return a nx128 or nxk matrix for the features of all modifications of this image\n feat_matrix = np.asarray(image_features)\n return feat_matrix", "def preprocess(img, min_size, max_size):\n if(min_size > max_size):\n raise Exception('min_size should not exceed max_size')\n \n width, height = img.size\n minDim = min(width,height)\n maxDim = max(width,height)\n scale_shorter_side = min_size/minDim\n scale_longer_side = maxDim * scale_shorter_side\n if(scale_longer_side > max_size):\n scale = max_size/maxDim\n else:\n scale = scale_shorter_side\n transform = transforms.Compose([\n transforms.Resize((round(img.height*scale),round(img.width * scale))),\n transforms.ToTensor(),\n transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])\n ])\n img = transform(img)\n return scale,img", "def _preprocess_image(self, image_raw):\n\n image = tf.io.decode_raw(image_raw, tf.float64)\n\n return image * self.rescale", "def preprocess_image(self, batched_inputs):\n images = [x[\"image\"].to(self.device) for x in batched_inputs]\n images_aug = [x[\"image_color\"].to(self.device) for x in batched_inputs]\n\n images = [self.normalizer(x) for x in images]\n images_aug = [self.normalizer(x) for x in images_aug]\n\n images = ImageList.from_tensors(images,\n self.backbone.size_divisibility)\n images_aug = ImageList.from_tensors(images_aug,\n self.backbone.size_divisibility)\n return images, images_aug", "def imageprepare():\r\n file_name = 'temp_image.png'\r\n im = Image.open(file_name).convert('L')\r\n im = im.resize((20, 20))\r\n p = Image.new('L', (28,28), (255))\r\n p.paste(im,(4,4,24,24))\r\n p.save(\"last_image.png\")\r\n\r\n tv = list(p.getdata()) # get pixel values\r\n # normalize pixels to 0 and 1. 0 is pure white, 1 is pure black.\r\n tva = [(255 - x) * 1.0 / 255.0 for x in tv]\r\n tva = np.reshape(tva, (28, 28))\r\n\r\n return tva", "def prepare_data(self, *args, **kwargs):\n # get paths to train and test splits\n _split_paths = [os.path.join(self.path_to_data, split)\n for split in os.listdir(self.path_to_data)]\n\n # for each split [train, test]\n for _path in _split_paths:\n _img_classes = os.listdir(_path) # get subfolders representing each class\n self.splits[os.path.basename(_path)] = []\n\n # get the images in pairs with its corresponding class\n for _class in _img_classes:\n _data = self.get_img_text_pair(os.path.join(_path, _class))\n\n if os.path.basename(_path) == 'train':\n self.weights[self.encode_label(_class)] = len(_data)\n self.splits[os.path.basename(_path)].extend(_data)", "def imageprepare(self,argv):\r\n\t\tim = Image.open(argv).convert('L')\r\n\t\twidth = float(im.size[0])\r\n\t\theight = float(im.size[1])\r\n\t\tnewImage = Image.new('L', (28, 28), (255)) # creates white canvas of 28x28 pixels\r\n\r\n\t\tif width > height: # check which dimension is bigger\r\n\t\t\t# Width is bigger. Width becomes 20 pixels.\r\n\t\t\tnheight = int(round((20.0 / width * height), 0)) # resize height according to ratio width\r\n\t\t\tif nheight == 0: # rare case but minimum is 1 pixel\r\n\t\t\t\tnheight = 1\r\n\t\t\t\t# resize and sharpen\r\n\t\t\timg = im.resize((20, nheight), Image.ANTIALIAS).filter(ImageFilter.SHARPEN)\r\n\t\t\twtop = int(round(((28 - nheight) / 2), 0)) # caculate horizontal pozition\r\n\t\t\tnewImage.paste(img, (4, wtop)) # paste resized image on white canvas\r\n\t\telse:\r\n\t\t\t# Height is bigger. Heigth becomes 20 pixels.\r\n\t\t\tnwidth = int(round((20.0 / height * width), 0)) # resize width according to ratio height\r\n\t\t\tif (nwidth == 0): # rare case but minimum is 1 pixel\r\n\t\t\t\tnwidth = 1\r\n\t\t\t\t# resize and sharpen\r\n\t\t\timg = im.resize((nwidth, 20), Image.ANTIALIAS).filter(ImageFilter.SHARPEN)\r\n\t\t\twleft = int(round(((28 - nwidth) / 2), 0)) # caculate vertical pozition\r\n\t\t\tnewImage.paste(img, (wleft, 4)) # paste resized image on white canvas\r\n\r\n\t\t# newImage.save(\"sample.png\")\r\n\r\n\t\ttv = list(newImage.getdata()) # get pixel values\r\n\r\n\t\t# normalize pixels to 0 and 1. 0 is pure white, 1 is pure black.\r\n\t\ttva = [(255 - x) * 1.0 / 255.0 for x in tv]\r\n\t\treturn tva", "def pre_process_data(input_path: list, cuts: int, shape: int = 32, normalize: bool = True) -> list:\n images = []\n images_uncut = []\n for files_path in input_path:\n\n files = os.listdir(files_path) # TODO paths\n for f in files:\n file_path = f'{files_path}/{f}'\n im_uncut = cv2.imread(file_path)\n im_uncut = cv2.cvtColor(im_uncut, cv2.COLOR_RGB2GRAY)\n images_uncut.append(cv2.resize(im_uncut, (shape * cuts, shape * cuts)))\n x = np.array(images_uncut)\n\n if normalize:\n x_mean = np.mean(x, axis=(0, 1, 2))\n x_std = np.std(x, axis=(0, 1, 2))\n x = (x - x_mean) / (x_std + 1e-9)\n\n for im in x:\n height = im.shape[0]\n width = im.shape[1]\n frac_h = height // cuts\n frac_w = width // cuts\n i = 0\n image = []\n for h in range(cuts):\n for w in range(cuts):\n crop = im[h * frac_h:(h + 1) * frac_h, w * frac_w:(w + 1) * frac_w]\n crop_rehaped = cv2.resize(crop, (shape, shape))\n image.append([crop_rehaped, i, number_to_angle(i, cuts), neighbours(i, cuts)])\n i = i + 1\n images.append(image)\n # return np.array(images) # todo back to array\n return images", "def normalize_other_inputs(X, Args):\n other_keys = list(X.keys())\n other_keys.remove(\"blend_image\")\n for key in other_keys:\n X[key] = (X[key] - np.mean(X[key])) / np.std(X[key])\n if Args.model == \"orchid\":\n loc_im = np.zeros_like(X[other_keys[0]])\n for i, key in enumerate(other_keys):\n im = X.pop(key)\n maximum = np.min((im.max(axis=2).max(axis=1)))\n im[im < maximum / 1.5] = 0\n im[im >= maximum / 1.5] = i + 1\n loc_im += im\n X['loc_im'] = loc_im\n return X", "def _preprocess_input(self, dataset):\n masker = self.masker or dataset.masker\n\n mask_img = masker.mask_img or masker.labels_img\n if isinstance(mask_img, str):\n mask_img = nib.load(mask_img)\n\n # Ensure that protected values are not included among _required_inputs\n assert \"aggressive_mask\" not in self._required_inputs.keys(), \"This is a protected name.\"\n\n if \"aggressive_mask\" in self.inputs_.keys():\n LGR.warning(\"Removing existing 'aggressive_mask' from Estimator.\")\n self.inputs_.pop(\"aggressive_mask\")\n\n # A dictionary to collect masked image data, to be further reduced by the aggressive mask.\n temp_image_inputs = {}\n\n for name, (type_, _) in self._required_inputs.items():\n if type_ == \"image\":\n # If no resampling is requested, check if resampling is required\n if not self.resample:\n check_imgs = {img: nib.load(img) for img in self.inputs_[name]}\n _check_same_fov(**check_imgs, reference_masker=mask_img, raise_error=True)\n imgs = list(check_imgs.values())\n else:\n # resampling will only occur if shape/affines are different\n # making this harmless if all img shapes/affines are the same as the reference\n imgs = [\n resample_to_img(nib.load(img), mask_img, **self._resample_kwargs)\n for img in self.inputs_[name]\n ]\n\n # input to NiFtiLabelsMasker must be 4d\n img4d = concat_imgs(imgs, ensure_ndim=4)\n\n # Mask required input images using either the dataset's mask or the estimator's.\n temp_arr = masker.transform(img4d)\n\n # An intermediate step to mask out bad voxels.\n # Can be dropped once PyMARE is able to handle masked arrays or missing data.\n nonzero_voxels_bool = np.all(temp_arr != 0, axis=0)\n nonnan_voxels_bool = np.all(~np.isnan(temp_arr), axis=0)\n good_voxels_bool = np.logical_and(nonzero_voxels_bool, nonnan_voxels_bool)\n\n data = masker.transform(img4d)\n\n temp_image_inputs[name] = data\n if \"aggressive_mask\" not in self.inputs_.keys():\n self.inputs_[\"aggressive_mask\"] = good_voxels_bool\n else:\n # Remove any voxels that are bad in any image-based inputs\n self.inputs_[\"aggressive_mask\"] = np.logical_or(\n self.inputs_[\"aggressive_mask\"],\n good_voxels_bool,\n )\n\n # Further reduce image-based inputs to remove \"bad\" voxels\n # (voxels with zeros or NaNs in any studies)\n if \"aggressive_mask\" in self.inputs_.keys():\n n_bad_voxels = (\n self.inputs_[\"aggressive_mask\"].size - self.inputs_[\"aggressive_mask\"].sum()\n )\n if n_bad_voxels:\n LGR.warning(\n f\"Masking out {n_bad_voxels} additional voxels. \"\n \"The updated masker is available in the Estimator.masker attribute.\"\n )\n\n for name, raw_masked_data in temp_image_inputs.items():\n self.inputs_[name] = raw_masked_data[:, self.inputs_[\"aggressive_mask\"]]", "def __init__(self, train_path=\"./data/train/image\", label_path=\"./data/train/label\",\n\t\t\t\t merge_path=\"./data/train/merge\", aug_merge_path=\"./data/train/aug_merge\", \n\t\t\t\t aug_train_path=\"./data/train/aug_images\", \n\t\t\t\t aug_label_path=\"./data/train/aug_masks\", img_type=\"tif\"):\n\n\t\tself.train_imgs = glob.glob(train_path+\"/*.\"+img_type)\n\t\tself.label_imgs = glob.glob(label_path+\"/*.\"+img_type)\n\t\tself.train_path = train_path\n\t\tself.label_path = label_path\n\t\tself.merge_path = merge_path\n\t\tself.img_type = img_type\n\t\tself.aug_merge_path = aug_merge_path\n\t\tself.aug_train_path = aug_train_path\n\t\tself.aug_label_path = aug_label_path\n\n\t\tif not os.path.exists(merge_path):\n\t\t\tos.mkdir(merge_path)\n\t\t\tos.mkdir(aug_merge_path)\n\t\t\tos.mkdir(aug_train_path)\n\t\t\tos.mkdir(aug_label_path)\n\n\t\tself.slices = len(self.train_imgs)\n\t\tself.datagen = ImageDataGenerator(\n\t\t\t\t\t\t\t\t\tpreprocessing_function=self.preprocess,\n\t\t\t\t\t\t\t\t\trotation_range=0.2,\n\t\t\t\t\t\t\t\t\twidth_shift_range=0.1,\n\t\t\t\t\t\t\t\t\theight_shift_range=0.1,\n\t\t\t\t\t\t\t\t\tshear_range=0.05,\n\t\t\t\t\t\t\t\t\tzoom_range=0.05,\n\t\t\t\t\t\t\t\t\thorizontal_flip=True,\n\t\t\t\t\t\t\t\t\tfill_mode='nearest')", "def imageprepare(argv):\n im = Image.open(argv).convert('L')\n width = float(im.size[0])\n height = float(im.size[1])\n newImage = Image.new('L', (28, 28), (255)) # creates white canvas of 28x28 pixels\n\n if width > height: # check which dimension is bigger\n # Width is bigger. Width becomes 20 pixels.\n nheight = int(round((20.0 / width * height), 0)) # resize height according to ratio width\n if (nheight == 0): # rare case but minimum is 1 pixel\n nheight = 1\n # resize and sharpen\n img = im.resize((20, nheight), Image.ANTIALIAS).filter(ImageFilter.SHARPEN)\n wtop = int(round(((28 - nheight) / 2), 0)) # calculate horizontal position\n newImage.paste(img, (4, wtop)) # paste resized image on white canvas\n else:\n # Height is bigger. Heigth becomes 20 pixels.\n nwidth = int(round((20.0 / height * width), 0)) # resize width according to ratio height\n if (nwidth == 0): # rare case but minimum is 1 pixel\n nwidth = 1\n # resize and sharpen\n img = im.resize((nwidth, 20), Image.ANTIALIAS).filter(ImageFilter.SHARPEN)\n wleft = int(round(((28 - nwidth) / 2), 0)) # caculate vertical pozition\n newImage.paste(img, (wleft, 4)) # paste resized image on white canvas\n\n # newImage.save(\"sample.png\n\n tv = list(newImage.getdata()) # get pixel values\n\n # normalize pixels to 0 and 1. 0 is pure white, 1 is pure black.\n tva = [(255 - x) * 1.0 / 255.0 for x in tv]\n print(tva)\n return tva", "def __call__(self, src, label):\n # resize shorter side but keep in max_size\n h, w, _ = src.shape\n img = timage.resize_short_within(src, self._short, self._max_size, interp=1)\n img = img.astype(np.float32)\n\n if self.augmentation:\n img = self.random_color_aug(img)\n bbox = tbbox.resize(label, (w, h), (img.shape[1], img.shape[0]))\n\n # random horizontal flip\n h, w, _ = img.shape\n img, flips = timage.random_flip(img, px=0.5)\n bbox = tbbox.flip(bbox, (w, h), flip_x=flips[0])\n\n # to tensor\n img = mx.nd.image.to_tensor(img)\n img = mx.nd.image.normalize(img, mean=self._mean, std=self._std)\n\n if self._anchors is None:\n return img, bbox.astype(img.dtype)\n\n # generate RPN target so cpu workers can help reduce the workload\n # feat_h, feat_w = (img.shape[1] // self._stride, img.shape[2] // self._stride)\n oshape = self._feat_sym.infer_shape(data=(1, 3, img.shape[1], img.shape[2]))[1][0]\n anchor = self._anchors[:, :, :oshape[2], :oshape[3], :].reshape((-1, 4))\n gt_bboxes = mx.nd.array(bbox[:, :4])\n cls_target, box_target, box_mask = self._target_generator(\n gt_bboxes, anchor, img.shape[2], img.shape[1])\n return img, bbox.astype(img.dtype), cls_target, box_target, box_mask", "def preprocess_directory(data_path, label_path, damage_fn):\r\n\r\n file_names = os.listdir(data_path)\r\n os.mkdir(label_path)\r\n\r\n for file_name in file_names:\r\n file_path = data_path + \"/\" + file_name\r\n cur_label_path = label_path + \"/\" + file_name\r\n current_image = Image.open(file_path)\r\n label = damage_fn(current_image)\r\n label.save(cur_label_path, \"JPEG\")", "def _postprocess(img):\n img = _scale_to_zero_one(img)\n img = img.reshape(1, -1) # to avoid a scikit-learn deprecation warning later\n return img", "def preprocess(img):\n dim=(227,227)\n resize_width = 224\n resize_height = 224\n\n img=cv2.resize(img,dim)\n #img=cv2.normalize(img,None,alpha=0,beta=1,norm_type=cv2.NORM_MINMAX,dtype=cv2.CV_32F)\n img = img.astype(numpy.float32)\n\n #Preprocess image changing the RGB pixel values to\t the values the network needs\n # to do this we subtract the mean and multiply the std for each channel (R, G and B)\n # these mean and std values come from the stat.txt file that must accompany the\n # graph file for the network.\n \n img[:,:,0] = (img[:,:,0] - gNetworkMean[0])\n img[:,:,1] = (img[:,:,1] - gNetworkMean[1])\n img[:,:,2] = (img[:,:,2] - gNetworkMean[2])\n \n\n # Finally we return the values as Float16 rather than Float32 as that is what the network expects.\n cv2.imshow(\"Frame\", img)\n return img.astype('float16') #age_float_array.astype(numpy.float16)", "def prepare_train_img(self, idx):\n\n img_info = self.data_infos[idx]\n ann_info = self.get_ann_info(idx)\n results = dict(img_info=img_info, ann_info=ann_info)\n if self.proposals is not None:\n results['proposals'] = self.proposals[idx]\n \n self.pre_pipeline(results)\n \n if self.pre_train_pipeline is not None:\n self.pre_train_pipeline(results)\n \n results_original, results_augment = deepcopy(results), deepcopy(results)\n return self.pipeline(results_original), self.pipeline_multiscale(results_augment)", "def compute_img(self):\r\n self.load_img()\r\n self.check_shape()\r\n self.convert_img()\r\n self.img_computed = True", "def __parse_image_load(self, image_path: str, image_label: int):\n one_hot = tf.one_hot(image_label, self.num_classes, dtype=dtypes.int32)\n if self.rgb:\n flag = cv2.IMREAD_COLOR\n else:\n flag = cv2.IMREAD_GRAYSCALE\n\n img = cv2.imread(image_path, flags=flag)\n img = cv2.resize(img, (self.image_shape[1], self.image_shape[0]), interpolation=cv2.INTER_AREA).astype(\n np.float32)\n\n if self.normalize_images:\n img_mean = np.mean(img, axis=(0, 1))\n img_std = np.std(img, axis=(0, 1))\n\n img = (img - img_mean) / img_std\n\n return img, one_hot", "def preprocess_image(self, inputs):\n return utils.preprocess_image(inputs, mode='custom_tf')", "def _initialize_attributes(self):\n height, width = self.image.shape[:2]\n\n self.confidence = (1 - self.mask).astype(float)\n self.data = np.zeros([height, width])\n\n self.working_image = np.copy(self.image)\n self.working_mask = np.copy(self.mask)", "def _compute_normalization(self, normalize=True):\n self._normalization_constant = 1.0 / self._normalization_correction\n\n if normalize:\n # compute normalization constant so that\n # N*C*sum(data) = 1:\n if self._img_norm is None:\n self._img_norm = self._compute_raw_image_norm()\n\n if self._img_norm != 0.0 and np.isfinite(self._img_norm):\n self._normalization_constant /= self._img_norm\n self._normalization_status = 0\n\n else:\n self._normalization_constant = 1.0\n self._normalization_status = 1\n warnings.warn(\"Overflow encountered while computing \"\n \"normalization constant. Normalization \"\n \"constant will be set to 1.\", NonNormalizable)\n\n else:\n self._normalization_status = 2", "def prepare_for_predict(self) -> None:\n _, self.all_labels_embed = self._create_all_labels_embed()", "def preprocess(img):\n if img.ndim != 3:\n raise TypeError('bad ndim of img')\n if img.dtype != np.uint8:\n raise TypeError('bad dtype of img')\n img = cv2.resize(img, (224, 224))\n img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n img = img.astype(np.float32)\n img *= (2.0/255) # normalize to: 0.0~2.0\n img -= 1.0 # subtract mean to make it: -1.0~1.0\n img = np.expand_dims(img, axis=0)\n return img", "def imageprepare(filename):\n img = Image.open(filename).convert('L')\n rect = img.getbbox()\n im = img.crop(rect)\n im.save(filename + '_pressprocessed.png')\n\n width = float(im.size[0])\n height = float(im.size[1])\n newImage = Image.new('L', (28, 28), (0)) #creates white canvas of 28x28 pixels\n if width > height: #check which dimension is bigger\n #Width is bigger. Width becomes 20 pixels.\n nheight = int(round((20.0/width*height),0)) #resize height according to ratio width\n if (nheight == 0): #rare case but minimum is 1 pixel\n nheight = 1\n # resize and sharpen\n img = im.resize((20,nheight), Image.ANTIALIAS).filter(ImageFilter.SHARPEN)\n wtop = int(round(((28 - nheight)/2),0)) #caculate horizontal pozition\n newImage.paste(img, (4, wtop)) #paste resized image on white canvas\n newImage.save(filename + '_final.png')\n else:\n #Height is bigger. Heigth becomes 20 pixels. \n nwidth = int(round((20.0/height*width),0)) #resize width according to ratio height\n if (nwidth == 0): #rare case but minimum is 1 pixel\n nwidth = 1\n # resize and sharpen\n img = im.resize((nwidth,20), Image.ANTIALIAS).filter(ImageFilter.SHARPEN)\n wleft = int(round(((28 - nwidth)/2),0)) #caculate vertical pozition\n newImage.paste(img, (wleft, 4)) #paste resized image on white canvas\n newImage.save(filename + '_final.png')\n tv = list(newImage.getdata()) #get pixel values\n #normalize pixels to 0 and 1. 0 is pure white, 1 is pure black.\n tva = [ (x)*1.0/255.0 for x in tv] \n return tva", "def _preprocess_data(self, name, directory):\n if name.endswith('data'):\n for path in glob(str(directory / '**/*.jpg'), recursive=True):\n try:\n with Image.open(path) as img:\n if not name.startswith('feature'):\n img = img.rotate(-90, 0, 1)\n img = img.resize(self.input_shape)\n except (ValueError, OSError):\n print(\"Couldn't open {}\".format(path))\n else:\n path = Path(path)\n filename = path.name.split('img-')[1]\n target = (path.parent / filename).with_suffix('.image.png')\n img.save(target, 'PNG')\n os.remove(str(path))\n elif name.endswith('targets'):\n for path in glob(str(directory / '**/*.mat'), recursive=True):\n try:\n mat = spio.loadmat(path)['depthMap']\n img = spmisc.toimage(mat).resize(self.target_shape)\n except ValueError:\n print(\"Couldn't open {}\".format(path))\n else:\n path = Path(path)\n name = path.name[path.name.index('-') + 1:]\n target = (path.parent / name).with_suffix('.depth.png')\n img.save(target, 'PNG')\n os.remove(str(path))", "def preprocess_input(img):\n img /= 255.\n img -= 0.5\n img *= 2.\n return img", "def preprocess_image(self, batched_inputs):\n images = [x.to(self.device) for x in batched_inputs]\n norms = [self.normalizer(x) for x in images]\n size = (norms[0].shape[1],norms[0].shape[2])\n images = ImageList.from_tensors(norms, self.backbone.size_divisibility)\n return images, size", "def _image_transform(self, img, source, title):\n conf = source.conf[title]\n \n xmin = conf.get('xmin', 0)\n ymin = conf.get('ymin', 0)\n\n xmax = img.shape[-1] + xmin\n ymax = img.shape[-2] + ymin\n if \"xmax\" in conf:\n if(conf['xmax'] <= xmin):\n logging.warning(\"xmax <= xmin for title %s on %s. Ignoring xmax\", title, source.name())\n else:\n xmax = conf['xmax']\n if \"ymax\" in conf:\n if(conf['ymax'] <= ymin):\n logging.warning(\"ymax <= ymin for title %s on %s. Ignoring xmax\", title, source.name())\n else:\n ymax = conf['ymax']\n\n \n translate_transform = QtGui.QTransform().translate(ymin, xmin)\n\n # The order of dimensions in the scale call is (y,x) as in the numpy\n # array the last dimension corresponds to the x.\n scale_transform = QtGui.QTransform().scale((ymax-ymin)/img.shape[-2],\n (xmax-xmin)/img.shape[-1])\n \n #rotate_transform = QtGui.QTransform()\n #if source.data_type[title] == 'image':\n # if \"angle\" in conf:\n # rotate_transform = QtGui.QTransform(numpy.cos(conf[\"angle\"]), numpy.sin(conf[\"angle\"]), -numpy.sin(conf[\"angle\"]), numpy.cos(conf[\"angle\"]), 0, 0)\n\n transpose_transform = QtGui.QTransform()\n if source.data_type[title] == 'image':\n transpose_transform *= QtGui.QTransform(0, 1, 0,\n 1, 0, 0,\n 0, 0, 1)\n if(self.settingsWidget.ui.transpose.currentText() == 'Yes' or\n (self.settingsWidget.ui.transpose.currentText() == 'Auto' \n and \"transpose\" in conf)):\n transpose_transform *= QtGui.QTransform(0, 1, 0,\n 1, 0, 0,\n 0, 0, 1)\n \n transform = scale_transform * translate_transform * transpose_transform\n #transform = scale_transform * translate_transform * rotate_transform * transpose_transform\n \n # print('|%f %f %f|' % (transform.m11(), transform.m12(), transform.m13()))\n # print('|%f %f %f|' % (transform.m21(), transform.m22(), transform.m23()))\n # print('|%f %f %f|' % (transform.m31(), transform.m32(), transform.m33()))\n return transform", "def normalize(image):\r\n return image / 127.5 - 1.", "def preprocessing(image_data, final_height, final_width, label_id, apply_augmentation=False, evaluate=False):\n img = image_data[\"image\"]\n gt_boxes = image_data[\"objects\"][\"bbox\"]\n gt_labels = tf.cast(image_data[\"objects\"][\"label\"] + 1, tf.int32) # add 1 for background\n\n # delete gt_boxe and gt_label entrys that do not belong to label_id\n person_or_not = gt_labels == (label_id + 1) # + 1 since the lable background is added\n gt_boxes = gt_boxes[person_or_not]\n gt_labels = gt_labels[person_or_not]\n gt_labels = gt_labels - label_id # since just one lable is used it is identified with 1\n\n if evaluate:\n not_diff = tf.logical_not(image_data[\"objects\"][\"is_difficult\"])\n gt_boxes = gt_boxes[not_diff]\n gt_labels = gt_labels[not_diff]\n img = tf.image.convert_image_dtype(img, tf.float32)\n img = tf.image.resize(img, (final_height, final_width))\n if apply_augmentation:\n img, gt_boxes = randomly_apply_operation(flip_horizontally, img, gt_boxes)\n return img, gt_boxes, gt_labels", "def prep_image(img, inp_dim):\n\n orig_im = img\n dim = orig_im.shape[1], orig_im.shape[0]\n img = (letterbox_image(orig_im, (inp_dim, inp_dim)))\n img_ = img[:,:,::-1].transpose((2,0,1)).copy()\n img_ = torch.from_numpy(img_).float().div(255.0).unsqueeze(0)\n return img_, orig_im, dim", "def prep_image(img, inp_dim):\n\n orig_im = img\n dim = orig_im.shape[1], orig_im.shape[0]\n img = (letterbox_image(orig_im, (inp_dim, inp_dim)))\n img_ = img[:,:,::-1].transpose((2,0,1)).copy()\n img_ = torch.from_numpy(img_).float().div(255.0).unsqueeze(0)\n return img_, orig_im, dim" ]
[ "0.70676106", "0.68583", "0.66364515", "0.65736663", "0.6464647", "0.64554685", "0.6352765", "0.6297984", "0.625139", "0.62383085", "0.62295324", "0.62290204", "0.62271994", "0.6218579", "0.61705977", "0.6143832", "0.61042327", "0.60809606", "0.60745084", "0.60027677", "0.5987943", "0.596762", "0.596578", "0.5960089", "0.59530795", "0.59527105", "0.594513", "0.5929658", "0.5923151", "0.58968425", "0.5894969", "0.5864793", "0.58558285", "0.58312595", "0.58240247", "0.58041376", "0.57999086", "0.5787549", "0.57819974", "0.57740843", "0.57409334", "0.57371247", "0.5733294", "0.5731052", "0.5727785", "0.571692", "0.57130194", "0.5707001", "0.5690691", "0.56738746", "0.56720334", "0.5657934", "0.5656631", "0.56523746", "0.56468797", "0.56411904", "0.56359255", "0.5630628", "0.562547", "0.56219286", "0.5612002", "0.5606631", "0.5605539", "0.5605352", "0.55956924", "0.5590065", "0.5585454", "0.5583583", "0.5563354", "0.5547374", "0.5541643", "0.5535976", "0.5527953", "0.55255026", "0.5518934", "0.55181515", "0.55092674", "0.55079234", "0.550655", "0.5498854", "0.549861", "0.5496175", "0.54850876", "0.5483568", "0.5479615", "0.54779613", "0.54733694", "0.5467792", "0.54671323", "0.545674", "0.5451546", "0.5440451", "0.54404175", "0.543622", "0.54330194", "0.5431241", "0.54307693", "0.5424405", "0.5415989", "0.5415989" ]
0.72255284
0
Parses data for training and evaluation.
def _parse_train_data(self, data): image, label = self._prepare_image_and_label(data) # Flips image randomly during training. if self._aug_rand_hflip: image, label = input_utils.random_horizontal_flip(image, masks=label) # Resizes and crops image. image, image_info = input_utils.resize_and_crop_image( image, self._output_size, self._output_size, aug_scale_min=self._aug_scale_min, aug_scale_max=self._aug_scale_max) # Resizes and crops boxes. image_scale = image_info[2, :] offset = image_info[3, :] # Pad label and make sure the padded region assigned to the ignore label. # The label is first offset by +1 and then padded with 0. label += 1 label = tf.expand_dims(label, axis=3) label = input_utils.resize_and_crop_masks( label, image_scale, self._output_size, offset) label -= 1 label = tf.where(tf.equal(label, -1), self._ignore_label * tf.ones_like(label), label) label = tf.squeeze(label, axis=0) valid_mask = tf.not_equal(label, self._ignore_label) labels = { 'masks': label, 'valid_masks': valid_mask } # If bfloat16 is used, casts input image to tf.bfloat16. if self._use_bfloat16: image = tf.cast(image, dtype=tf.bfloat16) return image, labels
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def parse_data():\n n_train, n_test = 15000, 4996\n n_features = 1355191\n\n print('- parsing train data')\n X_train = sp.lil_matrix((n_train, n_features))\n y_train = np.zeros(n_train)\n with open('/Users/kitazawa/data/news20.train') as f:\n lines = map(lambda l: l.rstrip().split(' '), f.readlines())\n for i, line in enumerate(lines):\n y_train[i] = int(line[0])\n\n for fv in line[1:]:\n f, v = fv.split(':')\n X_train[i, (int(f) - 1)] = float(v)\n print('-- density: %f' % (X_train.nnz / (n_train * n_features)))\n\n print('- parsing test data')\n X_test = sp.lil_matrix((n_test, n_features))\n y_test = np.zeros(n_test)\n with open('/Users/kitazawa/data/news20.test') as f:\n lines = map(lambda l: l.rstrip().split(' '), f.readlines())\n for i, line in enumerate(lines):\n y_test[i] = int(line[0])\n\n for fv in line[1:]:\n f, v = fv.split(':')\n X_test[i, (int(f) - 1)] = float(v)\n print('-- density: %f' % (X_test.nnz / (n_test * n_features)))\n\n return X_train, y_train, X_test, y_test", "def preprocess(self, train_file, validation_file, test_file):\n chardict, labeldict = self.make_dictionary(train_file, validation_file, test_file)\n print 'preparing training data'\n training = self.parse_file(train_file, chardict, labeldict)\n \n print 'preparing validation data'\n validation = self.parse_file(validation_file, chardict, labeldict)\n\n print 'preparing test data'\n test = self.parse_file(test_file, chardict, labeldict)\n\n return Data(training, validation, test, chardict, labeldict)", "def load_data():\n print(\"PARSING TRAIN\")\n ys_train, x_train, ids_train = load_pickle_data(\"ys_train\"), load_pickle_data(\"x_train\"), load_pickle_data(\n \"ids_train\")\n if ys_train is None or x_train is None or ids_train is None:\n ys_train, x_train, ids_train = load_csv_data(\"{}/train.csv\".format(DATA_DIR))\n dump_pickle_data(ys_train, \"ys_train\")\n dump_pickle_data(x_train, \"x_train\")\n dump_pickle_data(ids_train, \"ids_train\")\n\n print(\"PARSING TEST\")\n x_test, ids_test = load_pickle_data(\"x_test\"), load_pickle_data(\"ids_test\")\n if x_test is None or ids_test is None:\n _, x_test, ids_test = load_csv_data(\"{}/test.csv\".format(DATA_DIR))\n dump_pickle_data(x_test, \"x_test\")\n dump_pickle_data(ids_test, \"ids_test\")\n\n return ys_train, x_train, ids_train, x_test, ids_test", "def parse_dataset(self, data):\n pass", "def parse_train_data(training_set, language):\n print \"Reading training set: \" + training_set\n xmldoc = minidom.parse(training_set)\n lex_list = xmldoc.getElementsByTagName('lexelt')\n training_output = {}\n\n print \"Processing training set and training models...\"\n for node in lex_list:\n lexelt = node.getAttribute('item')\n training_output[lexelt] = {}\n inst_list = node.getElementsByTagName(\"instance\")\n # setup the neighbor_word_list within k distance of the word\n neighbor_word_list = []\n senseid_set = set()\n for inst in inst_list:\n sentence = inst.getElementsByTagName('context')[0]\n senseid_set.add(inst.getElementsByTagName('answer')[0].getAttribute('senseid'))\n neighbor_word_list = list(set(neighbor_word_list + get_neighbor_words_list(sentence, language)))\n senseid_list = list(senseid_set)\n training_output[lexelt][\"neighbor_word_list\"] = neighbor_word_list\n _4c_4d_feature = extract_4c_4d_feature(neighbor_word_list, senseid_list, inst_list, language)\n training_output[lexelt][\"4c_4d_feature\"] = _4c_4d_feature\n x_list = []\n y_list = []\n for inst in inst_list:\n y = inst.getElementsByTagName('answer')[0].getAttribute('senseid')\n if ignore_U_activated and y.__eq__('U'):\n continue\n y_list.append(str(replace_accented(y)))\n x = extract_vector(inst, neighbor_word_list, _4c_4d_feature, language)\n x_list.append(x)\n # for each node, build a classifier\n if language.__eq__(\"English\"):\n #clf = RandomForestClassifier(n_estimators=10) 58.9\n #clf = SGDClassifier() 61.1\n #clf = MultinomialNB() 62.9\n #clf = BernoulliNB() 55.8\n #clf = Perceptron() 60.4\n #clf = PassiveAggressiveClassifier() 62.1\n #clf = RidgeClassifier() 62.7\n #clf = svm.LinearSVC() 62.5\n #clf = KNeighborsClassifier()\n #clf = GaussianNB()\n clf = MultinomialNB(alpha=0.95) #+ alpha=0.95 + k=13 + left_right_order + vector_0_1 off = 64.7\n elif language.__eq__(\"Spanish\"):\n #clf = svm.LinearSVC() 82.0\n #clf = MultinomialNB() 82.2\n #clf = RidgeClassifier() 81.5\n #clf = PassiveAggressiveClassifier() 81.9\n #clf = BernoulliNB() 72.4\n clf = MultinomialNB(alpha=0.50) #0.25:82.6 0.4:83.1 0.45:83.2 0.5: 83.2 0.55:83.2 0.6:82.8 0.75:82.7\n elif language.__eq__(\"Catalan\"):\n #clf = svm.LinearSVC() # 82.8\n #clf = MultinomialNB() # 80.8\n #clf = RidgeClassifier() 82.6\n #clf = svm.LinearSVC(C=1.5) 82.9\n clf = MultinomialNB(alpha=0.25) # 0.5:84.3 0.35:84.6 0.3:84.8 0.25:85.4 0.2:85.3\n else:\n clf = svm.LinearSVC()\n clf.fit(x_list, y_list)\n training_output[lexelt][\"Classifier\"] = clf\n print \"Models trained.\"\n return training_output", "def parse_training_data(data_dir, task):\n\n COMMENTS_FILE = \"%s_annotated_comments.tsv\" % task\n LABELS_FILE = \"%s_annotations.tsv\" % task\n\n print(os.path.join(Project_Path, data_dir, COMMENTS_FILE))\n comments = pd.read_csv(os.path.join(Project_Path, data_dir, COMMENTS_FILE), sep = '\\t', index_col = 0)\n # remove special newline and tab tokens\n comments['comment'] = comments['comment'].apply(lambda x: x.replace(\"NEWLINE_TOKEN\", \" \"))\n comments['comment'] = comments['comment'].apply(lambda x: x.replace(\"TAB_TOKEN\", \" \"))\n\n annotations = pd.read_csv(os.path.join(Project_Path, data_dir, LABELS_FILE), sep = '\\t', index_col = 0)\n labels = empirical_dist(annotations[task])\n X = comments.sort_index()['comment'].values\n y = labels.sort_index().values\n\n assert(X.shape[0] == y.shape[0])\n return X, y", "def _load_data(self):\n self.mapper = Mapper()\n self.mapper.generate_vocabulary(self.review_summary_file)\n self.X_fwd, self.X_bwd, self.Y = self.mapper.get_tensor(reverseflag=True)\n # Store all the mapper values in a dict for later recovery\n self.mapper_dict = dict()\n self.mapper_dict['seq_length'] = self.mapper.get_seq_length()\n self.mapper_dict['vocab_size'] = self.mapper.get_vocabulary_size()\n self.mapper_dict['rev_map'] = self.mapper.get_reverse_map()\n # Split into test and train data\n self._split_train_tst()", "def process_data(data, labels):\n\t\n\t# Split the dataset of string into train, validation, and test \n\t# Use a 70/15/15 split\n\t# train_test_split shuffles the data before splitting it \n\t# Stratify keeps the proportion of labels the same in each split\n\n\t# -- WRITE THE SPLITTING CODE HERE --\n\t# Split the data into 70 percent train and 30 percent test and validate data\n\ttrain_X, test_X_split, train_Y, test_Y_split = train_test_split(data, labels, test_size=0.30, stratify=labels,random_state= 1)\n\t# Split the remaining 30 percent data into 15 percent test and validate data each\n\ttest_X, val_X, test_Y, val_Y = train_test_split(test_X_split, test_Y_split, test_size=0.50, stratify=test_Y_split, random_state= 1)\n\n\t# Preprocess each dataset of strings into a dataset of feature vectors\n\t# using the CountVectorizer function. \n\t# Note, fit the Vectorizer using the training set only, and then\n\t# transform the validation and test sets.\n\n\t# -- WRITE THE PROCESSING CODE HERE --\n\t# Preprocess dataset using CountVectorizer from ngram range of 1 to 3\n\tvector = CountVectorizer(ngram_range=(1,3))\n\t# Fit data on train dataset\n\ttrain_X = vector.fit_transform(train_X)\n\t# Transform data on test dataset\n\ttest_X = vector.transform(test_X)\n\t# Transform data on validate dataset.\n\tval_X = vector.transform(val_X)\n\t# Return the training, validation, and test set inputs and labels\n\treturn train_X, train_Y, val_X, val_Y, test_X, test_Y\n\t# -- RETURN THE ARRAYS HERE -- ", "def load_and_process(data_dir, train_node_num, eval_node_num, test_node_num):\n biases, feature, label = get_biases_features_labels(data_dir)\n # split training, validation and testing set\n nodes_num = label.shape[0]\n train_mask = get_mask(nodes_num, 0, train_node_num)\n eval_mask = get_mask(nodes_num, train_node_num, train_node_num + eval_node_num)\n test_mask = get_mask(nodes_num, nodes_num - test_node_num, nodes_num)\n\n y_train = np.zeros(label.shape)\n y_val = np.zeros(label.shape)\n y_test = np.zeros(label.shape)\n\n y_train[train_mask, :] = label[train_mask, :]\n y_val[eval_mask, :] = label[eval_mask, :]\n y_test[test_mask, :] = label[test_mask, :]\n\n y_train = y_train[np.newaxis]\n y_val = y_val[np.newaxis]\n y_test = y_test[np.newaxis]\n train_mask = train_mask[np.newaxis]\n eval_mask = eval_mask[np.newaxis]\n test_mask = test_mask[np.newaxis]\n\n return feature, biases, y_train, train_mask, y_val, eval_mask, y_test, test_mask", "def parse_data(self):\n vehicle_data = self.data.get('vehicles')\n for vehicle_datum in vehicle_data:\n self.vehicles.append(Vehicle(**vehicle_datum))\n\n job_data = self.data.get('jobs')\n for job_datum in job_data:\n self.jobs.append(Job(**job_datum))\n\n self.matrix = self.add_dummy_location_to_matrix(self.data.get('matrix'))", "def load_data(self, training_data):\n \"\"\"training data format [(instance, label),(instance, label),...]\"\"\"\n self.training_data = training_data", "def process_data(self) -> Tuple[list, List[Sequence[int]]]:\n features, labels = self.load_files()\n x = [self.process_text(f, self.max_length) for f in features]\n y = [self._convert_labels(l, self.max_length) for l in labels]\n return x, y", "def parse_file():\r\n if len(sys.argv) < 2:\r\n print(\"Need a file\")\r\n sys.exit(1)\r\n\r\n data_input = open(sys.argv[1])\r\n\r\n data = []\r\n for line in data_input: #for each of these lines\r\n if(len(line) == 0): pass #skip empty lines\r\n split_within_line = line.split(\"\\t\") #split by tabs\r\n new_datum = Datum(split_within_line[0], split_within_line[1], split_within_line[2]) #feed splits into a Datum object\r\n data.append(new_datum) #add Datum to list of data\r\n\r\n #make a list of characters representing the issues\r\n for i in range(len(data[0].dat_votes)-1): #from 0 to the end of the list of issues from the first datum\r\n original_issues.append(chr(i+97))\r\n\r\n\r\n i = 0\r\n tuning_set = []\r\n training_set = []\r\n num_reps = len(data)\r\n for i in range(0, num_reps-1):\r\n if (i % 4 == 0):\r\n tuning_set.append(data[i])\r\n else:\r\n training_set.append(data[i])\r\n\r\n pair = _count_parties(training_set)\r\n\r\n unpruned = induce_node_tree(training_set, original_issues,\"D\",-1)\r\n # print(\"\\n#### UNPRUNED TREE ####\\n\")\r\n # print(unpruned)\r\n\r\n unprune_acc = calc_accuracy(unpruned, tuning_set)\r\n\r\n pruned = prune_tree(unpruned, tuning_set)\r\n print(\"\\n#### PRUNED TREE ####\\n\")\r\n print(pruned)\r\n\r\n acc = calc_accuracy(pruned, training_set)\r\n\r\n # print(\"Accuracy of unpruned tree with tuning_set: \" + str(unprune_acc))\r\n print(\"Accuracy of pruned tree with tuning_set: \" + str(acc))\r\n leave_one_out_cross_validation(data)", "def load_data(self):\n with open('data/fordTrain.csv') as f:\n data = csv.reader(f, delimiter=',')\n train = [x for i, x in enumerate(data) if i > 0] \n # Extract features and target variable separately\n trainx = [x[3:] for x in train]\n trainy = [x[2] for x in train]\n\n with open('data/fordTest.csv') as f:\n data = csv.reader(f, delimiter=',')\n testx = [x[3:] for i, x in enumerate(data) if i > 0] \n\n with open('data/Solution.csv') as f:\n data = csv.reader(f, delimiter=',')\n testy = [x[2] for i, x in enumerate(data) if i > 0] \n\n # Extract features and target variable, convert to numpy array\n trainx = np.asarray(trainx, dtype=np.float32)\n trainy = np.asarray(trainy, dtype=np.int8)\n testx = np.asarray(testx, dtype=np.float32)\n testy = np.asarray(testy, dtype=np.int8)\n\n # Return training and test sets\n trainSet = Dataset(trainx, trainy)\n testSet = Dataset(testx, testy)\n return trainSet, testSet", "def split_data(self):\n if not self.load_data:\n raise AttributeError('Preprocessor has not loaded any data.')\n \n # 3 - Find example counts for each set\n self.n_examples = self.data[0].shape[0]\n self.n_train = int(self.n_examples * self.train_ratio)\n self.n_val = int(self.n_examples * self.val_ratio)\n self.n_test = self.n_examples - self.n_train - self.n_val\n \n logger.info(f'Set sizes:')\n logger.info(f'train: {self.n_train}')\n logger.info(f'val: {self.n_val}')\n logger.info(f'test: {self.n_test}')\n if self.n_test < 0:\n raise ValueError('Train + validation ratios must bef < 1')\n\n # 4 - Separate data into train, test, val\n if isinstance(self.data[0], pd.DataFrame):\n logger.info('Dataset is in a dataframe.')\n self.isdataframe = True\n\n self.train_data = [self.data[0].iloc[:self.n_train],\n self.data[1].iloc[:self.n_train]]\n \n self.val_data = [self.data[0].iloc[self.n_train:self.n_val + self.n_train],\n self.data[1].iloc[self.n_train:self.n_val + self.n_train]]\n \n self.test_data = [self.data[0].iloc[self.n_val + self.n_train:],\n self.data[1].iloc[self.n_val + self.n_train:]]\n logger.info('Data was split into train, val, test.')\n else:\n self.isdataframe = False\n logger.info('Dataset is in a numpy array.')\n \n # If datasets are numpy array or sparse\n self.train_data = [self.data[0][:self.n_train],\n self.data[1][:self.n_train]]\n \n self.val_data = [self.data[0][self.n_train:self.n_val + self.n_train],\n self.data[1][self.n_train:self.n_val + self.n_train]]\n \n self.test_data = [self.data[0][self.n_val + self.n_train:],\n self.data[1][self.n_val + self.n_train:]]\n logger.info('Data was split into train, val, test.')\n \n assert(self.n_train == self.train_data[0].shape[0])\n assert(self.n_val == self.val_data[0].shape[0])\n assert(self.n_test == self.test_data[0].shape[0])\n \n # Free memory\n del self.data\n \n if self.save_sets:\n self.save_datasets()", "def load_data():\r\n train = convert_corpus_to_lists(ConllCorpusReader('CoNLL-2003', 'train.txt', ['words', 'pos', 'ignore', 'chunk']))\r\n val = convert_corpus_to_lists(ConllCorpusReader('CoNLL-2003', 'valid.txt', ['words', 'pos', 'ignore', 'chunk'])) # testa will be our val set\r\n test = convert_corpus_to_lists(ConllCorpusReader('CoNLL-2003', 'test.txt', ['words', 'pos', 'ignore', 'chunk']))\r\n\r\n return train, val, test", "def data_parser(data):\n\n with open(data, 'r') as inp:\n\n # take every sample\n # the last line in the text file is empty, so reading until -1\n samples = inp.read().split('\\n')[:-1]\n\n vec = []\n labels = []\n for sample in samples:\n # file is tab delimited\n split_samples = sample.split('\\t')\n # last column contains the label\n labels.append(int(split_samples[-1]))\n\n features = []\n for feature in split_samples[:-1]:\n features.append(float(feature))\n vec.append(features)\n\n # make the features and labels as a numpy array\n vec = np.array(vec)\n labels = np.array(labels)\n return vec, labels", "def load_data(data_prefix, dataset_str, precalc):\n (num_data, train_adj, full_adj, feats, train_feats, test_feats, labels,\n train_data, val_data,\n test_data) = utils.load_graphsage_data(data_prefix, dataset_str)\n visible_data = train_data\n\n y_train = np.zeros(labels.shape)\n y_val = np.zeros(labels.shape)\n y_test = np.zeros(labels.shape)\n y_train[train_data, :] = labels[train_data, :]\n y_val[val_data, :] = labels[val_data, :]\n y_test[test_data, :] = labels[test_data, :]\n\n train_mask = utils.sample_mask(train_data, labels.shape[0])\n val_mask = utils.sample_mask(val_data, labels.shape[0])\n test_mask = utils.sample_mask(test_data, labels.shape[0])\n\n if precalc:\n train_feats = train_adj.dot(feats)\n train_feats = np.hstack((train_feats, feats))\n test_feats = full_adj.dot(feats)\n test_feats = np.hstack((test_feats, feats))\n\n return (train_adj, full_adj, train_feats, test_feats, y_train, y_val, y_test,\n train_mask, val_mask, test_mask, train_data, val_data, test_data,\n num_data, visible_data)", "def load_data(): \n\tdf = pandas.read_csv('data/iris.data', header=None)\n\ty = df.iloc[0:df.shape[0], 4].values\n\ty = np.where(y == 'Iris-setosa', 0, y)\n\ty = np.where(y == 'Iris-versicolor', 1, y)\n\ty = np.where(y == 'Iris-virginica', 2, y)\n\tx = df.iloc[0:df.shape[0], 0:4].values\n\tx = tuple(x)\n\ty = tuple(y)\n\ttraining_inputs = x[0:40] + x[50:90] + x[100:140]\n\ttraining_results = y[0:40] + y[50:90] + y[100:140]\n\ttraining_data = (training_inputs, training_results)\n\ttest_inputs = x[40:50] + x[90:100] + x[140:150]\n\ttest_results = y[40:50] + y[90:1000] + y[140:150]\n\ttest_data = (test_inputs, test_results)\n\treturn (training_data, test_data)", "def preprocess_data(self):\n\n self._preprocess_train_data()\n self._preprocess_test_data()", "def prepare_data(self):\n # Set up the path\n self.path_target_train = os.path.join(self.data_dir, self.train_path_file_target + \".pkl\")\n self.path_target_test = os.path.join(self.data_dir, self.test_path_file_target + \".pkl\")\n\n if not os.path.exists(self.path_target_train) or not os.path.exists(self.path_target_test):\n # Create vocabularies of the appropriate sizes.\n self.create_vocabulary(self.train_path_file)\n\n # Create token ids for the training data.\n input_train_path = self.train_path_file\n target_train_path = self.train_path_file_target\n train_input, train_input_length, train_labels = self.data_to_token_ids(input_train_path, target_train_path)\n\n # Create token ids for the validation data.\n input_test_path = self.test_path_file\n target_test_path = self.test_path_file_target\n test_input, test_input_length, _ = self.data_to_token_ids(input_test_path, target_test_path, train=False)\n\n # Collect data into a list\n training_data = [train_input, train_input_length, train_labels]\n test_data = [test_input, test_input_length]\n\n # Save all the data\n with open(self.path_target_train, 'wb') as f:\n pickle.dump(training_data,f)\n with open(self.path_target_test, 'wb') as f:\n pickle.dump(test_data, f)\n else:\n # Load data\n with open(self.path_target_train, 'rb') as f:\n training_data = pickle.load(f)\n with open(self.path_target_test, 'rb') as f:\n test_data = pickle.load(f)\n\n # Initialize vocabulary\n self.initialize_vocabulary()\n\n # Convert list into a numpy array - train data\n train_input = pd.DataFrame(training_data[0]).fillna(value=0).astype(int).values\n train_length_input = np.array(training_data[1], dtype=int)\n train_labels = np.array(training_data[2], dtype=int)\n\n # Convert list into a numpy array - test data\n test_input = pd.DataFrame(test_data[0]).fillna(value=0).astype(int).values\n test_length_input = pd.DataFrame(test_data[1]).fillna(value=0).astype(int).values\n\n # Printing maximum length\n print(\"Shape of the input training matrix {}\".format(str(train_input.shape)))\n print(\"Shape of the input test matrix {}\".format(str(test_input.shape)))\n\n # Copy the files\n self.copy_files()\n\n # Return output\n return train_input, train_length_input, train_labels, test_input, test_length_input", "def data_preprocessing():\n lineid_content = get_lineid_content()\n print('Read movie_lines.txt file complete...')\n convos = get_convos()\n print('Read movie_conversations.txt file complete...')\n print('Building dataset')\n get_data(lineid_content, convos)", "def train(self, data):\n pass", "def train(self, training_data):\n pass", "def load_data_wrapper():\r\n\r\n train_data, valid_data, tst_data = load_data()\r\n ## calling the function load_data()\r\n ## will return a tuple with three values for train, validation and test data\r\n ## storing the tuple values in separate three variables\r\n\r\n ## training_data:\r\n training_inputs = [np.reshape(x, (784,1)) for x in train_data[0]]\r\n ## reshaping the training inputs to 784x1 vector\r\n ## the required format for our neural network's input layer\r\n ## ---\r\n training_results = [vectorized_result(y) for y in train_data[1]]\r\n ## calling vectorized_result() function(see below)\r\n ## will convert the digit value in 10-dimensional vector\r\n ## the required format for our neural network's output layer\r\n ## ---\r\n training_data = zip(training_inputs, training_results)\r\n ## zipping together the training_inputs and training_results\r\n\r\n ## validation_data:\r\n validation_inputs = [np.reshape(x, (784,1)) for x in valid_data[0]]\r\n ## reshaping the validation inputs to 784x1 vector\r\n ## ---\r\n validation_data = zip(validation_inputs, valid_data[1])\r\n ## zipping together the validation_inputs and it's corresponding outputs\r\n\r\n ## test_data:\r\n test_inputs = [np.reshape(x, (784,1)) for x in tst_data[0]]\r\n ## reshaping the test inputs to 784x1 vector\r\n ## ---\r\n test_data = zip(test_inputs, tst_data[1])\r\n ## zipping together the test_inputs and it's corresponding outputs\r\n\r\n return (training_data, validation_data, test_data)", "def load_data(self):\n\n self._load_train_data()\n self._load_test_data()", "def loadData ( self ) :\n df = pd.read_json ( self.dataset )\n df = df[pd.notnull ( df[2] )]\n df[1] = df[1].apply ( self.clean_text )\n\n self.X = df[1]\n self.y = df[2]", "def load_data(params):\n train_df = pd.read_csv(os.path.join(*[dataset_path, params['tokenizer'] + '_train_' + params['stemming'] + '.csv']))\n dev_df = pd.read_csv(os.path.join(*[dataset_path, params['tokenizer'] + '_dev_' + params['stemming'] + '.csv']))\n train_data, label_encode = data_prep(train_df, params, if_resample=True)\n dev_data, _ = data_prep(dev_df, params)\n return train_data, dev_data, label_encode", "def prepare_data(self):\n try:\n self.train_dataset = self.datasets['train']\n self.val_dataset = self.datasets['val']\n try:\n self.test_dataset = self.datasets['test']\n except:\n pass\n except Exception as e:\n print('Data was not succesfully prepared:', e)", "def _prepare_data(self):\n #TODO hardcoded values need to change\n print_info(\"Preprocessing the train data...\")\n self._place_dataset(os.path.join(self._hparams[\"temp-data\"], \"train\"),\n self.TRAIN_OUT_PATH)\n\n print_info(\"Preprocessing the test data...\")\n self._place_dataset(os.path.join(self._hparams[\"temp-data\"], \"test\"),\n self.TEST_OUT_PATH)\n\n print_info(\"Preprocessing the validation data...\")\n self._place_dataset(os.path.join(self._hparams[\"temp-data\"], \"val\"),\n self.VAL_OUT_PATH)", "def load_data(self):\n self.data = self.read_var(self.datavar)\n self.test_shape(self.datavar, self.data.shape, 2)", "def run_data (arguments):\n if arguments.define_labels:\n data.define_labels()\n elif arguments.preprocess:\n # Preprocess from data_raw --> data_preprocessed\n data.preprocess()\n elif arguments.annotate:\n # Annotate from data_preprocessed --> data_annotated\n reverse = False # DEBUG\n annotator.annotate(reverse)\n elif arguments.split:\n # Split from data_annotated --> train.txt/valid.txt\n restrict = 100 # Default: Keep 100% of all files\n splitter.train_valid(restrict_to=restrict)", "def _load_data(self):\n data_x, data_y = make_classification(n_samples=5000, n_features=20,\n n_informative=10,\n n_redundant=0, n_repeated=0,\n n_classes=2,\n n_clusters_per_class=4,\n weights=None, flip_y=0.01,\n class_sep=1.0, hypercube=True,\n shift=0.0, scale=1.0,\n shuffle=True,\n random_state=self.args.rand_seed)\n\n self.orig_column_names = np.arange(data_x.shape[-1])\n self.data_x = data_x\n self.data_y = self.to_one_hot_encoding(data_y)\n self.numerical_idx = np.arange(data_x.shape[-1])\n self.non_num_idx = None\n self.all_non_numerical_idx = None\n\n # Calculate adjacency matrix\n self.meta_inf = data_x[:, :1].astype('float32')\n if self.args.graph_type:\n self.adj = self.get_adjacency()", "def _load_processed_data(self):\n with open(os.path.join(self._data_root_path, self._processed_train_data_file_name),\n 'r') as f:\n train_examples = json.load(f)\n\n with open(os.path.join(self._data_root_path, self._processed_dev_data_file_name), 'r') as f:\n dev_examples = json.load(f)\n\n with open(os.path.join(self._data_root_path, self._word_vocab_file_name), 'r') as f:\n word_vocab = Vocab.from_json(json.load(f))\n\n with open(os.path.join(self._data_root_path, self._char_vocab_file_name), 'r') as f:\n char_vocab = Vocab.from_json(json.load(f))\n\n return train_examples, dev_examples, word_vocab, char_vocab", "def _load_training_data(self):\n self._save_training_data()", "def process_raw_data(self):\n \n # Define some variables of interest.\n vor = [\"n_sentences\", \"n_correct\", \"p_correct\", \"median_RT\", \\\n \"mean_RT\", \"stdev_RT\", \"scaled_stdev_RT\"]\n \n # Get all participant names, or return straight away if no data was\n # loaded yet.\n if hasattr(self, \"raw\"):\n participants = self.raw.keys()\n participants.sort()\n else:\n self.data = None\n return\n\n # Count the number of participants.\n n = len(participants)\n \n # Create a data dict for each variable of interest.\n self.data = {}\n self.data[\"ppname\"] = []\n for var in vor:\n self.data[var] = numpy.zeros(n, dtype=float) * numpy.NaN\n \n # Loop through all participants.\n for i, ppname in enumerate(participants):\n # Add the participant name.\n self.data[\"ppname\"].append(copy.deepcopy(ppname))\n # Skip empty datasets.\n if self.raw[ppname] is None:\n continue\n # Compute stuff relevant to this task.\n self.data[\"n_sentences\"][i] = len(self.raw[ppname][\"Sentence\"])\n self.data[\"n_correct\"][i] = numpy.sum(self.raw[ppname][\"correct\"])\n self.data[\"p_correct\"][i] = float(self.data[\"n_correct\"][i]) \\\n / float(self.data[\"n_sentences\"][i])\n self.data[\"median_RT\"][i] = numpy.nanmedian(self.raw[ppname][\"RT\"])\n self.data[\"mean_RT\"][i] = numpy.nanmean(self.raw[ppname][\"RT\"])\n self.data[\"stdev_RT\"][i] = numpy.nanstd(self.raw[ppname][\"RT\"])\n # Compute a scaled standard deviation of the response time, scaled to the\n # median response time to remove the correlation between the two.\n self.data[\"scaled_stdev_RT\"] = self.data[\"stdev_RT\"] / self.data[\"median_RT\"]", "def load(self):\n\n X_train, y_train, X_test, y_test, variable_types, name = _load_data(\n self.task_id)\n\n self.X_train = X_train\n self.y_train = y_train\n self.X_test = X_test\n self.y_test = y_test\n self.variable_types = variable_types\n self.name = name\n\n return self.X_train, self.y_train, self.X_test, self.y_test", "def get_data():\n\n pathxtrain = sys.argv[1]\n pathxtest = sys.argv[2]\n pathlabeltrain = sys.argv[3]\n pathlabeltest = sys.argv[4]\n\n xtrain = p.read_csv(pathxtrain, header=None)\n xtest = p.read_csv(pathxtest, header=None)\n label_train = p.read_csv(pathlabeltrain, header=None)\n label_test = p.read_csv(pathlabeltest, header=None)\n\n xtrain_mx = xtrain.values\n xtest_mx = xtest.values\n\n label_train = label_train.values.reshape(label_train.shape[0])\n label_test = label_test.values.reshape(label_test.shape[0])\n\n return xtrain_mx, xtest_mx, label_train, label_test", "def Train_data():\n print (\"loading train data ...\")\n time_start = time.time()\n data_root = '/media/keziwen/86AA9651AA963E1D'\n with h5py.File(join(data_root, './data/train_real2.h5')) as f:\n data_real = f['train_real'][:]\n num, nt, ny, nx = data_real.shape\n data_real = np.transpose(data_real, (0, 1, 3, 2))\n with h5py.File(join(data_root, './data/train_imag2.h5')) as f:\n data_imag = f['train_imag'][:]\n num, nt, ny, nx = data_imag.shape\n data_imag = np.transpose(data_imag, (0, 1, 3, 2))\n data = data_real+1j*data_imag\n num_train = 15000\n num_validate = 2000\n train_data = data[0:num_train]\n validate_data = data[num_train:num_train+num_validate]\n\n train_data = np.random.permutation(train_data)\n\n time_end = time.time()\n print ('dataset has been created using {}s'.format(time_end-time_start))\n return train_data, validate_data", "def parse(self):\n\t\tfirst = None\n\t\tf = open(self.input_file)\n\t\tfor line in f.readlines():\n\t\t\tif line.startswith(\"#\"):\n\t\t\t\tcontinue\n\t\t\ttry:\n\t\t\t\tflow,t,sequence,size = line.split()\n\t\t\texcept:\n\t\t\t\tcontinue\n\t\t\t# append data to a list of tuples\n\t\t\tflow = int(flow)\n\t\t\tt = float(t)\n\t\t\tsequence = int(sequence)\n\t\t\tif size == \"x\":\n\t\t\t\tcontinue\n\t\t\tsize = int(size)\n\t\t\tif not size == 0:\n\t\t\t\tif flow == 1:\n\t\t\t\t\tself.data1.append((t,sequence,size))\n\t\t\t\telif flow == 2:\n\t\t\t\t\tself.data2.append((t,sequence,size))\n\t\t\t\telif flow == 3:\n\t\t\t\t\tself.data3.append((t, sequence, size))\n\t\t\t\telif flow == 4:\n\t\t\t\t\tself.data4.append((t, sequence, size))\n\t\t\t\telif flow == 5:\n\t\t\t\t\tself.data5.append((t, sequence, size))\n\t\t\t\telse:\n\t\t\t\t\tprint \"Erroneous data: \",flow, t, sequence, size\n\t\t\t# Keep track of the minimum and maximum time seen\n\t\t\tif not self.min_time or t < self.min_time:\n\t\t\t\tself.min_time = t\n\t\t\tif not self.max_time or t > self.max_time:\n\t\t\t\tself.max_time = t\n\n\t\t\t# print len(self.data1),len(self.data2),len(self.data3),len(self.data4),len(self.data5)", "def _read(self, documents):\n data = []\n X,Y = [], []\n for document in documents:\n d_ata = pd.read_csv(document, sep='\\t', names=['review','label'])\n data.append(d_ata)\n data = pd.concat(data)\n self.data = data\n Y = data.label\n self.vec.fit(data.review)\n X = self.preprocess(data)\n \n return train_test_split(X,Y)", "def dataset_preparation():\r\n with open('../data/patterns_num.txt', 'r') as f:\r\n data = f.readlines()\r\n X, Y = [], []\r\n for line in data:\r\n x, y = line.split('\\t')\r\n if len(x) > 5 and x not in X: # better results are achieved excluding short query patterns\r\n X.append(x.replace(\"X\", \"\").replace(\"Y\", \"\").lower())\r\n Y.append(int(y.replace('\\n', '')))\r\n test_size = 0.2\r\n # print('Test size:', test_size, '\\nWrong classifications:\\n')\r\n\r\n X_train, X_test, y_train, y_test = train_test_split(X, Y, test_size=test_size, random_state=42, stratify=Y)\r\n return X_train, y_train, X_test, y_test", "def validate(self, data_loader=None):\n if data_loader is None:\n data_loader = self.dataset.val_data_loader\n m = self.model\n m.eval()\n\n batch_matrix_list = []\n for idx, data in tqdm(enumerate(data_loader), total=len(data_loader)):\n if type(data) is dict:\n for key, value in data.items():\n data[key] = value.to(self.device)\n pred = m.predict(data)\n batch_matrix = self.evaluator.collect(data, pred)\n batch_matrix_list.append(batch_matrix)\n\n if self.single:\n result = self.evaluator.evaluate(batch_matrix_list, groupby=False)\n else:\n result = self.evaluator.evaluate(batch_matrix_list, groupby=True)\n return result", "def load_data_preprocess(self):\n\n print(\"Loading the dataset ...\")\n # load the data\n c_util = CarUtils()\n train_x, train_y, test_x, test_y, classes = c_util.load_data()\n\n # set the image ordering\n K.set_image_dim_ordering(\"th\")\n\n print(\"Pre-processing the dataset ...\")\n # pre-process the data\n train_x = train_x.astype('float32')\n test_x = test_x.astype('float32')\n\n train_x = train_x / 255\n test_x = test_x / 255\n\n print(train_x.shape[0], ' train samples')\n print(test_x.shape[0], ' test samples')\n\n train_y = np_utils.to_categorical(train_y, CarsClassifierModel._nb_classes)\n test_y = np_utils.to_categorical(test_y, CarsClassifierModel._nb_classes)\n\n return train_x, train_y, test_x, test_y", "def build_data_set(self):\n if not self.assert_data_correct():\n self.download_all_data()\n self.unpack_rename_data()\n self.split_data_characters()\n self.clean_data_fragments()\n self.create_font_data()\n if not self.assert_train_augmented():\n self.augment_train_data()\n if not self.assert_style_data_correct():\n self.download_style_data()\n self.unpack_rename_data()", "def prep_data(self):\n\n self.fit_tokenizer(texts=self.texts)\n sequences = self.get_sequences(self.texts)\n self.text_data = pad_sequences(sequences, maxlen=self.MAX_SEQUENCE_LENGTH)\n\n self.labels = to_categorical(np.asarray(self.labels))\n print('Shape of data tensor:', self.text_data.shape)\n print('Shape of label tensor:', self.labels.shape)\n\n # split the data into a training set and a validation set\n indices = np.arange(self.text_data.shape[0])\n np.random.shuffle(indices)\n self.text_data = self.text_data[indices]\n self.labels = self.labels[indices]\n nb_validation_samples = int(self.VALIDATION_SPLIT * self.text_data.shape[0])\n\n x_train = self.text_data[:-nb_validation_samples]\n y_train = self.labels[:-nb_validation_samples]\n x_val = self.text_data[-nb_validation_samples:]\n y_val = self.labels[-nb_validation_samples:]\n\n return x_train,y_train, x_val, y_val", "def _load_data(self):\n if self._name in BALANCE_DATASET:\n _loader = dataset_loaders[self._name]\n xnp, y = _loader()\n\n # Train - Test split\n gen = ShuffleSplit(n_splits=1, random_state=42, test_size=self._test_size).split(xnp)\n train_idx, test_idx = next(gen)\n\n # Train data.\n self.xnp_tr = xnp[train_idx]\n self.y_tr = y[train_idx]\n # Test data.\n self.xnp_ts = xnp[test_idx]\n self.y_ts = y[test_idx]\n\n else:\n _loader = dataset_loaders[self._name]\n xnp, xp, y = _loader()\n # self.xnp, self.xp, self.y = _loader()\n\n # Train - Test split\n gen = ShuffleSplit(n_splits=1, random_state=42, test_size=self._test_size).split(xnp)\n train_idx, test_idx = next(gen)\n\n # Train data.\n self.xnp_tr = xnp[train_idx]\n self.xp_tr = xp[train_idx]\n self.y_tr = y[train_idx]\n # Test data.\n self.xnp_ts = xnp[test_idx]\n self.xp_ts = xp[test_idx]\n self.y_ts = y[test_idx]", "def __init__(self):\n print ('Initializing Data reader object...')\n data_Train_Images, data_Train_Labels, data_Test_Image, data_Test_Labels = self.readDataFromFile()\n test_10k_x, test_10k_y, training_55k_x, training_55k_y, validation_5k_x, validation_5k_y = self.dataTransform(\n data_Test_Image, data_Test_Labels, data_Train_Images, data_Train_Labels)\n self.train = zip(training_55k_x, training_55k_y)\n self.valid = zip(validation_5k_x, validation_5k_y)\n self.test = zip(test_10k_x, test_10k_y)\n\n self.train_position = 0\n print ('Initialized!')", "def process_dataset(self):\n\n logger.info('\\n')\n logger.info('=' * 40)\n logger.info('=\\t DeepRank Data Set')\n logger.info('=')\n logger.info('=\\t Training data')\n for f in self.train_database:\n logger.info(f'=\\t -> {f}')\n logger.info('=')\n if self.valid_database:\n logger.info('=\\t Validation data')\n for f in self.valid_database:\n logger.info(f'=\\t -> {f}')\n logger.info('=')\n if self.test_database:\n logger.info('=\\t Test data')\n for f in self.test_database:\n logger.info(f'=\\t -> {f}')\n logger.info('=')\n logger.info('=' * 40 + '\\n')\n sys.stdout.flush()\n\n # check if the files are ok\n self.check_hdf5_files(self.train_database)\n\n if self.valid_database:\n self.valid_database = self.check_hdf5_files(\n self.valid_database)\n\n if self.test_database:\n self.test_database = self.check_hdf5_files(\n self.test_database)\n\n # create the indexing system\n # alows to associate each mol to an index\n # and get fname and mol name from the index\n self.create_index_molecules()\n\n # get the actual feature name\n if self.mapfly:\n self.get_raw_feature_name()\n else:\n self.get_mapped_feature_name()\n\n # get the pairing\n self.get_pairing_feature()\n\n # get grid shape\n self.get_grid_shape()\n\n # get the input shape\n self.get_input_shape()\n\n # get renormalization factor\n if self.normalize_features or self.normalize_targets or self.clip_features:\n if self.mapfly:\n self.compute_norm()\n else:\n self.get_norm()\n\n logger.info('\\n')\n logger.info(\" Data Set Info:\")\n logger.info(\n f' Augmentation : {self.use_rotation} rotations')\n logger.info(\n f' Training set : {self.ntrain} conformations')\n logger.info(\n f' Validation set : {self.nvalid} conformations')\n logger.info(\n f' Test set : {self.ntest} conformations')\n logger.info(f' Number of channels : {self.input_shape[0]}')\n logger.info(f' Grid Size : {self.data_shape[1]}, '\n f'{self.data_shape[2]}, {self.data_shape[3]}')\n sys.stdout.flush()", "def split_data_into_training_and_validation(self, data):\n training_dataset = self.get_data_from_indices(data, np.arange(self.num_training_samples))\n validation_dataset = self.get_data_from_indices(data, np.arange(self.num_training_samples,\n self.p.trainer.num_samples))\n return training_dataset, validation_dataset", "def load(train_file, test_file):\n print('\\nLoad the raw training and test set data...')\n y_train, tx_train, ids_train = load_csv_data(train_file)\n y_test, tx_test, ids_test = load_csv_data(test_file)\n print('\\n... finished.')\n return y_train, tx_train, ids_train, y_test, tx_test, ids_test", "def _create_data():\n tf.logging.info(\"Create records..\")\n train, val, test = util.load_data(data_dir, FLAGS[\"is_aug\"])\n tf.logging.info(\"Dataset size: Train-{} Test-{} Val-{}\".format(len(train), len(test), len(val)))\n return train, val, test", "def pre_process_data(self, all_labels, all_data):\n\n # [1] Normalizes data\n all_data = self.pre_precess_manager.normalization(all_data)\n\n data_train, data_test, label_train, label_test = train_test_split(all_data, all_labels, test_size=0.1,\n shuffle=True)\n\n return data_train, data_test, label_train, label_test", "def run_epoch(self, epoch, data_loader, training=False):\n if training:\n self.model.train()\n else:\n self.model.eval()\n\n epoch_metrics = {\"loss\": 0.0}\n overall_parsing_counts = {\"correct\": 0, \"predicted\": 0, \"gold\": 0}\n num_evaluated_batches = 0\n\n with torch.set_grad_enabled(training):\n for sentences, target in data_loader:\n # Run model\n target = self._to_device(target)\n output, parsing_counts = self.parser.evaluate_batch(sentences)\n\n # Compute loss\n output, target = self._unroll_sequence_batch(output), self._unroll_sequence_batch(target)\n loss = self.criterion(output, target)\n\n # Add metrics to overall total\n epoch_metrics[\"loss\"] += loss.item()\n for count in \"gold\", \"predicted\", \"correct\":\n overall_parsing_counts[count] += parsing_counts[count]\n\n # Perform backpropagation (when training)\n if training:\n self.optimizer.zero_grad()\n loss.backward()\n self.optimizer.step()\n\n # Print progress\n num_evaluated_batches += 1\n self.logger.debug('{} Epoch: {} {} Loss: {:.6f}'.format(\n \"Training\" if training else \"Validation\",\n epoch,\n self._progress(num_evaluated_batches, data_loader),\n loss.item()))\n\n epoch_metrics.update(self.compute_prf(overall_parsing_counts))\n\n return epoch_metrics", "def load(self):\n\n x = [] # input documents (n_docs, max_seq_len)\n labels = [] # targets we are predicting for each input\n\n for file_path in glob.glob(self.train_dir + '*.txt'):\n tokens = read_tokens(file_path)\n unique = list(set(tokens))\n x_count = round(len(unique) * 0.85)\n\n for _ in range(self.samples_per_doc):\n random.shuffle(unique)\n x.append(' '.join(unique[:x_count]))\n labels.append(' '.join(unique[x_count:]))\n\n # make x and y\n pkl = open('Model/tokenizer.p', 'rb')\n self.tokenizer = pickle.load(pkl)\n x = self.tokenizer.texts_to_matrix(x, mode='binary')\n y = self.tokenizer.texts_to_matrix(labels, mode='binary')\n\n # column zero is empty\n return x, y[:,1:]", "def load_data():\n train = pd.read_csv(\"../input/train.csv\", dtype={\"Age\": np.float64}, )\n test = pd.read_csv(\"../input/test.csv\", dtype={\"Age\": np.float64}, )\n\n train = train.set_index('PassengerId')\n test = test.set_index('PassengerId')\n\n train = train.apply(preprocess, axis=1)\n test = test.apply(preprocess, axis=1)\n\n x_train = train.drop(['Survived'], axis=1)\n y_train = train['Survived']\n x_test = test\n return {'train': {'x': x_train, 'y': y_train},\n 'test': {'x': x_test},\n 'full_features': pd.concat([x_train, x_test])}", "def _prepare_for_training(\n self,\n trackers: List[TrackerWithCachedStates],\n domain: Domain,\n precomputations: MessageContainerForCoreFeaturization,\n **kwargs: Any,\n ) -> Tuple[RasaModelData, np.ndarray]:\n training_trackers = self._get_trackers_for_training(trackers)\n # dealing with training data\n tracker_state_features, label_ids, entity_tags = self._featurize_for_training(\n training_trackers,\n domain,\n precomputations=precomputations,\n bilou_tagging=self.config[BILOU_FLAG],\n **kwargs,\n )\n\n if not tracker_state_features:\n return RasaModelData(), label_ids\n\n self._label_data, encoded_all_labels = self._create_label_data(\n domain, precomputations=precomputations\n )\n\n # extract actual training data to feed to model\n model_data = self._create_model_data(\n tracker_state_features, label_ids, entity_tags, encoded_all_labels\n )\n\n if self.config[ENTITY_RECOGNITION]:\n self._entity_tag_specs = (\n self.featurizer.state_featurizer.entity_tag_specs\n if self.featurizer.state_featurizer is not None\n else []\n )\n\n # keep one example for persisting and loading\n self.data_example = model_data.first_data_example()\n\n return model_data, label_ids", "def data_parser(data, snp_neighbour):\n\n # Get only the features of the SNP of interest\n if snp_neighbour == 0:\n # The SNP of interest samples are located at the middle position of the data sequence\n index_SNPi = (data.shape[2] - 1) / 2 # -1 for the SNP of interest\n samples = data[:, :, int(index_SNPi)]\n # Define the number of considered nucleotide positions\n n_positions = 1\n\n # Get the features of the SNP of interest and neighbouring positions\n else:\n # The data should fit in a 2D array for performing neural network. The number of samples should be stay, and\n # the number of features will be the number of features times the number of nucleotides\n samples = data.reshape([data.shape[0], -1])\n # Define the number of considered nucleotide positions\n n_positions = data.shape[2]\n\n # Get the number of used features\n n_features = data.shape[1]\n\n return samples, n_features, n_positions", "def train_with_loader(self, data, validating_data=None, scheduler=None, epochs=1):\n print('Training...')\n for epoch in range(epochs):\n self.train()\n for train_in, train_out in data:\n self.compute_loss(train_in, train_out, is_guess=False, training=True)\n self.eval()\n if validating_data:\n with torch.no_grad():\n valid_loss = self.compute_loss_loader(validating_data).item()\n print('Average validation error at step ',epoch+1,': ', valid_loss)\n if scheduler and valid_loss:\n scheduler.step()", "def validate(self):\n self.set_model_mode('eval')\n self.evaluator.reset()\n losses = MetricMeter()\n\n print('Do evaluation on {} set'.format('valid set'))\n data_loader = self.val_loader\n assert data_loader is not None\n for batch_idx, batch in enumerate(data_loader):\n input, label = self.parse_batch_test(batch)\n loss = self.forward_backward(batch, backprob=False)\n losses.update(loss)\n # total_loss += loss['loss']\n output = self.model_inference(input)\n self.evaluator.process(output, label)\n\n results = self.evaluator.evaluate()\n total_loss = losses.meters['loss_x'].avg\n\n for k, v in results.items():\n tag = '{}/{}'.format('validation', k)\n self.write_scalar(tag, v, self.epoch)\n # if full_results:\n return [total_loss,losses.dict_results(),results]\n # return total_loss", "def _get_data(self):\n\n # Grab the data. Note, the separator is actually ', ', not just a\n # comma, so specify. Also, recognize the \"?\" as an NA value\n # (I think it is easier to have pandas catch the NA values instead\n # of manually searching for and parsing these in the future).\n # Finally, set the engine to python, since having a separator greater\n # than one character automatically does this, and prints a warning\n # message. By explicitly telling it to use python, we suppress the\n # warning.\n self.train_df = pd.read_csv(self.train_url, sep=', ', header=None,\n na_values='?', engine='python')\n\n # For the training data, have one comment row, so need to ignore\n self.test_df = pd.read_csv(self.test_url, sep=', ', header=None,\n skiprows=1, na_values='?', engine='python')\n\n # Get the header data\n response = requests.get(self.head_url)\n header = response.text.split('\\n')\n\n # Now, filter to grab the header lines:\n # First, make sure there is at least one character for the line, and\n # ignore lines that start with the comment character for the file \"|\"\n header = [row for row in header if len(row) > 0 and row[0] != '|']\n\n # Ignore the first row, since it is just identifying the classifier\n # task and, get just the header values\n header = [head.split(':')[0] for head in header[1:]]\n\n # Finally, we need to add a header name for the last column (if <= or >\n # income of 50k)\n header.append('income')\n\n # Now, set the header for the data sets\n self.train_df.columns = header\n self.test_df.columns = header", "def train(self):\n # 1. Extracting details of attributes\n\n self.get_attribute_data()\n if self.train_data is None and self.train_data_file is None:\n raise ValueError(\"Neither training data not training file provided\")\n\n self.get_train_data()\n self.classifier = self.build_tree(rows=self.train_data, attribute_list=self.attribute_names)", "def read_data(self, p_data=''):\n\n _header_ = self._header_ + 'read_data(): '\n\n if p_data:\n self.p_data = p_data\n\n if not self.p_data:\n raise ValueError(_header_ + 'No data to read.')\n\n if not os.path.isfile(self.p_data):\n raise FileNotFoundError(_header_ + 'No such file: %s' % self.p_data)\n\n if self.verbose:\n print(_header_ + 'Reading data from %s ...' % self.p_data)\n\n if self.nidx_pred:\n # If there are nodes already in .nidx_pred, then they are likely copied over from the train data\n # So, these must be purged prior to reading new data\n print(_header_ + 'Excluding %d predicting nodes transfered from training dataset ...' % len(self.nidx_pred))\n self.nidx_exclude += self.nidx_pred\n self.nidx_pred = []\n\n # Extract data\n all_links = []\n all_labels = []\n has_other = False\n self.df = pd.read_table(self.p_data)\n df = self.df.applymap(func=lambda x: [i for i in x.strip().split('/') if i] if isinstance(x, str) else [])\n has_node = self.columns['nodes'] in df\n has_layer = self.columns['layers'] in df\n\n for i_row in range(len(df)):\n if has_layer:\n sp = df[self.columns['layers']][i_row][0]\n if sp in self.masklayer:\n continue\n if sp in self.layer2nidx:\n self.layer2nidx[sp] |= {i_row}\n else:\n self.layer2nidx[sp] = {i_row}\n self.nidx2layer.append(sp)\n labs = df[self.columns['labels']][i_row]\n if self.lab_other:\n node_lab = [x if (not self.labels or x in self.labels) else 'other' for x in labs]\n if not has_other and 'other' in node_lab:\n has_other = True\n else:\n node_lab = [x for x in labs if (not self.labels or x in self.labels)]\n if labs:\n all_labels += labs\n if not node_lab:\n self.nidx_exclude.append(i_row)\n self.nidx_pred.append(i_row)\n self.node_links.append([x for x in list(set(df[self.columns['links']][i_row])) if x not in self.exclude_links])\n self.node_labels.append(node_lab)\n if has_node:\n self.nodes.append(df[self.columns['nodes']][i_row])\n\n all_links += self.node_links[-1]\n\n # track link frequency\n for link in self.node_links[-1]:\n if link in self.link2freq:\n self.link2freq[link] += 1\n else:\n self.link2freq[link] = 1\n\n self.links += sorted(set(all_links) - set(self.links))\n set_all_labels = set(all_labels)\n if self.labels:\n if self.lab_other and 'other' not in self.labels and has_other:\n self.labels.append('other')\n\n if self.verbose:\n if self.lab_other:\n print(_header_ + 'Other labels: %s' % (','.join(set_all_labels - set(self.labels))))\n else:\n print(_header_ + 'Excluded labels: %s' % (','.join(set_all_labels - set(self.labels))))\n else:\n self.labels = sorted(list(set_all_labels))\n\n self.n_labels = len(self.labels)\n\n for idx, link in enumerate(self.links):\n self.link2lidx[link] = idx\n\n if self.verbose:\n print(' Found %d nodes' % len(self.node_links))\n print(' Found %d links' % len(self.links))\n\n return self", "def prepare_data(self):\n data = self._get_dataset(self.hparams.dataset_path)\n label_encoder = data[\"label_encoder\"]\n del data[\"label_encoder\"]\n\n click.secho(\"Building inputs and labels.\", fg=\"yellow\")\n datasets = {\n \"train\": defaultdict(list),\n \"valid\": defaultdict(list),\n \"test\": defaultdict(list),\n }\n for dataset_name, dataset in data.items():\n for sample in dataset:\n instance = self.build_input(\n self.tokenizer, sample[\"text\"], label_encoder, sample[\"label\"]\n )\n for input_name, input_array in instance.items():\n datasets[dataset_name][input_name].append(input_array)\n\n click.secho(\"Padding inputs and building tensors.\", fg=\"yellow\")\n tensor_datasets = {\"train\": [], \"valid\": [], \"test\": []}\n for dataset_name, dataset in datasets.items():\n dataset = self.pad_dataset(dataset, padding=self.tokenizer.pad_index)\n for input_name in MODEL_INPUTS:\n if input_name == \"labels\":\n tensor = torch.tensor(dataset[input_name], dtype=torch.float32)\n else:\n tensor = torch.tensor(dataset[input_name])\n tensor_datasets[dataset_name].append(tensor)\n\n self.train_dataset = TensorDataset(*tensor_datasets[\"train\"])\n self.valid_dataset = TensorDataset(*tensor_datasets[\"valid\"])\n self.test_dataset = TensorDataset(*tensor_datasets[\"test\"])\n click.secho(\n \"Train dataset (Batch, Candidates, Seq length): {}\".format(\n self.train_dataset.tensors[0].shape\n ),\n fg=\"yellow\",\n )\n click.secho(\n \"Valid dataset (Batch, Candidates, Seq length): {}\".format(\n self.valid_dataset.tensors[0].shape\n ),\n fg=\"yellow\",\n )\n click.secho(\n \"Test dataset (Batch, Candidates, Seq length): {}\".format(\n self.test_dataset.tensors[0].shape\n ),\n fg=\"yellow\",\n )", "def _parse_data(self):\n for i, val in enumerate(self.values.keys()):\n x_, y_ = [], []\n xy = self.values[val]\n for value in self.values.index:\n x_.append(xy[value][0])\n y_.append(xy[value][1])\n\n self.set_and_get(\"x_\", val, x_)\n self.set_and_get(\"y_\", val, y_)", "def parse_data( self, data ):\n data = data.split( ',' )\n data = list( map( lambda x: x.strip(), data ) ) # remove white space\n # create data structure\n fields = [\n 'time',\n 'value'\n ] \n Reading = namedtuple( 'Reading', fields )\n \n try:\n return [ Reading( time = float( data[ i + 1 ] ), value = float( data[ i ] ) ) for i in range( 0, len( data ), len( fields ) ) ]\n \n except ValueError as err:\n raise err", "def train(self, training_data, training_labels, validation_data, validation_labels):\n abstract", "def get_data(self):\n\n self.read_expression()\n self.read_tfs()\n self.read_metadata()\n self.set_gold_standard_and_priors()", "def process_data(train_file, test_file):\n y_train, tx_train, ids_train, y_test, tx_test, ids_test = load(train_file, test_file)\n header_train = get_header(train_file)\n header_test = get_header(test_file)\n print('\\nData set will be split into four, each representing data with different jet numbers.')\n for jet_num in range(4):\n print('\\nProcess training set with jet number = ' + str(jet_num) + '...')\n y_train_jet, tx_train_jet, ids_train_jet = split_data(y_train, tx_train, ids_train, jet_num)\n columns_to_remove = analyze(tx_train_jet)\n tx_train_jet, header_train_jet = remove_columns(tx_train_jet, header_train, columns_to_remove)\n create_csv('train_jet_' + str(jet_num) + '.csv', y_train_jet, tx_train_jet, ids_train_jet, header_train_jet, False)\n print('\\n... created train_jet_' + str(jet_num) + '.csv file.')\n print('\\nProcess test set with jet number = ' + str(jet_num) + '...')\n y_test_jet, tx_test_jet, ids_test_jet = split_data(y_test, tx_test, ids_test, jet_num)\n columns_to_remove = analyze(tx_test_jet)\n tx_test_jet, header_test_jet = remove_columns(tx_test_jet, header_test, columns_to_remove)\n create_csv('test_jet_' + str(jet_num) + '.csv', y_test_jet, tx_test_jet, ids_test_jet, header_test_jet, True)\n print('\\n... created test_jet_' + str(jet_num) + '.csv file.')", "def parse(cls, data):\n raise NotImplementedError", "def train(self, train_data):\n with open(train_data, 'r') as train_data:\n while True:\n tokens = train_data.readline().split()\n pos = train_data.readline().split()\n labels = train_data.readline().split()\n if not tokens or not pos or not labels:\n break\n # Generate transition probabilities\n for i in range(0, len(labels) - self.N_VALUE + 1):\n self.add_label_sequence(labels[i:i + self.N_VALUE])\n # Generate lexical generation probabilities\n for i in range(0, len(tokens)):\n token = tokens[i].lower()\n label = labels[i]\n self.add_word_tag(token, label)\n self.handle_unknowns()", "def load_training_data(config):\n # Load data\n LOGGER.info(\"Loading training data.\")\n train_x = load_data(config['data_source'], config['train_x_filename'])\n train_y = load_data(config['data_source'], config['train_y_filename'])\n val_x = load_data(config['data_source'], config['val_x_filename'])\n val_y = load_data(config['data_source'], config['val_y_filename'])\n LOGGER.info(\"Training data size: %d\", len(train_x))\n LOGGER.info(\"Validation data size: %d\", len(val_x))\n\n # Build datasets and create iterators\n LOGGER.info(\"Building dataset.\")\n train_dataset = get_dataset(\n train_x, train_y, config['batch_size'], config['data_shape'],\n config['n_classes'], True)\n val_dataset = get_dataset(\n val_x, val_y, config['batch_size'], config['data_shape'],\n config['n_classes'])\n\n return train_dataset, val_dataset, len(val_x)", "def load_train_data():\n\n # Load X_train\n with open('X_train.csv') as csvfile:\n reader = csv.DictReader(csvfile)\n feature_string_matrix = []\n for row in reader:\n feature_list = []\n for i in range(TRAIN_N):\n x_value = row['x' + str(i)]\n # Hit missing values\n if x_value == '':\n feature_list.append(np.nan)\n else:\n feature_list.append(float(row['x' + str(i)]))\n feature_string_matrix.append(feature_list)\n X_train = np.array(feature_string_matrix)\n # Load Y_train\n with open('y_train.csv') as csvfile:\n reader = csv.DictReader(csvfile)\n y_string = []\n for row in reader:\n y_value = [float(row['y'])]\n y_string.append(y_value)\n y_train = np.array(y_string)\n return X_train, y_train", "def load_training_data(\n self,\n train_data_file=\"datasets/train_data.json\",\n test_data_file=\"datasets/test_data.json\",\n ):\n train_data = pd.read_json(train_data_file)\n test_data = pd.read_json(test_data_file)\n return train_data, test_data", "def train(X : list, y : list, model_name : str, data, mode,*vals,**args):\n # 훈련 결과 dictionary\n scores = {}\n\n # 훈련 모드 선택\n\n model = MODELS[mode][model_name]\n\n X_train, X_val, y_train, y_val = train_test_split(data[X], data[y], train_size=0.7)\n \n model.fit(X_train, y_train)\n\n scores['train'] = validation(model, X_train, y_train, mode)\n scores['validation'] = validation(model, X_val, y_val, mode)\n\n return scores", "def readData(path_to_dataset, train_size=0.8, validation_size=0.2):\n data = pd.read_csv(os.path.join(path_to_dataset, 'training_set_rel3.tsv'), sep='\\t', encoding='ISO-8859-1')\n # Drop columns that has null value \n data = data.dropna(axis=1)\n # Only take 4 columns of data from the dataset: essay_id, essay_set, essay, domain1_score\n data = data[['essay_id', 'essay_set', 'essay', 'domain1_score']]\n # Perform 80:20 train-test split on the training data\n train_set, test_set = train_test_split(data, train_size=train_size, random_state=0)\n # Split the 80% training set further into 60:20\n training_set, validation_set = train_test_split(train_set, test_size=validation_size, random_state=0)\n return training_set, test_set, validation_set", "def __init__(self):\n\n print '-'*60\n #self.train_folder = '../data/preprocess_nonstopword_nonstemming/train_clean/' # folder\n #self.test_folder = '../data/preprocess_nonstopword_nonstemming/test_clean/' # folder\n self.train_folder = '../data/preprocess_6/train_clean/' # folder\n self.test_folder = '../data/preprocess_6/test_clean/' # folder\n self.label_file = '../data/train_labels.csv' # path\n #pred_file = './submission_NB.csv' # predicitons\n self.pred_file = './submission_pre_6_t0.6.csv'\n\n\n self.train_ans = []\n self.test_index = []", "def _extract_results(self) -> None:\n metric_name = self.metric.name\n for inference_name in ['train', 'test', 'opt']:\n # TODO: Extract information from self.search_results\n data = getattr(self.search_results, f'{inference_name}_metric_dict')[metric_name]\n if all([d is None for d in data]):\n if inference_name not in OPTIONAL_INFERENCE_CHOICES:\n raise ValueError(f\"Expected {metric_name} score for {inference_name} set\"\n f\" to not be None, but got {data}\")\n else:\n continue\n self.data[f'single::{inference_name}::{metric_name}'] = np.array(data)\n\n if self.ensemble_results.empty() or inference_name == 'opt':\n continue\n\n data = getattr(self.ensemble_results, f'{inference_name}_scores')\n if all([d is None for d in data]):\n if inference_name not in OPTIONAL_INFERENCE_CHOICES:\n raise ValueError(f\"Expected {metric_name} score for {inference_name} set\"\n f\" to not be None, but got {data}\")\n else:\n continue\n self.data[f'ensemble::{inference_name}::{metric_name}'] = np.array(data)", "def _load_data(self, save_temp=False):\n # directly read processed data and encode\n print ('Start tokenizing data...')\n self.data = json.loads(\n open(self.cfg.data_path+self.cfg.data_file, 'r', encoding='utf-8').read().lower())\n self.train, self.dev, self.test = [], [], []\n print ('Start encoding data...')\n p = progressbar.ProgressBar(len(self.data))\n p.start()\n p_idx = 0\n for fn, dial in self.data.items():\n p.update(p_idx)\n p_idx += 1\n if '.json' in fn:\n fn = fn.replace('.json', '')\n if 'all' in self.cfg.exp_domains or self.exp_files.get(fn):\n if self.dev_files.get(fn):\n self.dev.append(self._get_encoded_data(fn, dial))\n elif self.test_files.get(fn):\n self.test.append(self._get_encoded_data(fn, dial))\n else:\n if self.data_mode == 'train':\n self.train.append(self._get_encoded_data(fn, dial))\n elif self.data_mode == 'test':\n pass\n else:\n raise Exception('Wrong Reader Data Mode!!!')\n p.finish()", "def load_dataset(self):\n # Get all the files in the directory\n file_list = self.get_file_list()\n\n # Concatenate the data corresponding to a list of files\n data = self.concatenate_file_data(file_list)\n\n # Shuffle the data and create the training and the validation datasets\n data = self.shuffle_data_dictionary(data)\n self.training_dataset, self.validation_dataset = self.split_data_into_training_and_validation(data)", "def load_data(path_to_dir):\n train_pos = []\n train_neg = []\n test_pos = []\n test_neg = []\n with open(path_to_dir+\"train-pos.txt\", \"r\") as f:\n for i,line in enumerate(f):\n words = [w.lower() for w in line.strip().split() if len(w)>=3]\n train_pos.append(words)\n with open(path_to_dir+\"train-neg.txt\", \"r\") as f:\n for line in f:\n words = [w.lower() for w in line.strip().split() if len(w)>=3]\n train_neg.append(words)\n with open(path_to_dir+\"test-pos.txt\", \"r\") as f:\n for line in f:\n words = [w.lower() for w in line.strip().split() if len(w)>=3]\n test_pos.append(words)\n with open(path_to_dir+\"test-neg.txt\", \"r\") as f:\n for line in f:\n words = [w.lower() for w in line.strip().split() if len(w)>=3]\n test_neg.append(words)\n\n return train_pos, train_neg, test_pos, test_neg", "def read_data(self):\r\n IS_REMAPPED = 1\r\n if IS_REMAPPED:\r\n data_as_strings = list(csv.reader(open('ring-data.csv'), delimiter=','))\r\n self.TRAINING_DATA = [[remap(float(f1),float(f2))[0],remap(float(f1),float(f2))[1],\\\r\n int(c)] for [f1, f2, c] in data_as_strings]\r\n data_as_strings = list(csv.reader(open('ring-data.csv'), delimiter=','))\r\n self.TESTING_DATA = [[remap(float(f1),float(f2))[0],remap(float(f1),float(f2))[1],\\\r\n int(c)] for [f1, f2, c] in data_as_strings]\r\n else:\r\n data_as_strings = list(csv.reader(open('ring-data.csv'), delimiter=','))\r\n self.TRAINING_DATA = [[float(f1), float(f2), int(c)] for [f1, f2, c] in data_as_strings]\r\n data_as_strings = list(csv.reader(open('ring-data.csv'), delimiter=','))\r\n self.TESTING_DATA = [[float(f1), float(f2), int(c)] for [f1, f2, c] in data_as_strings]", "def run_parse(self):\n # Data set already has source file names from load_inputs\n parsedset = {}\n parsedset['data_set'] = []\n for log in self.input_files:\n parsemodule = self.parse_modules[self.args.parser]\n try:\n if self.args.tzone:\n parsemodule.tzone = self.args.tzone\n except NameError: pass\n parsedset['data_set'].append(parsemodule.parse_file(log))\n self.data_set = parsedset\n del(parsedset)", "def load_data(self) -> tuple:\n label_num = {}\n data_set = pathlib.Path(self.path)\n data = []\n\n # create the label lookup dict for verifcation later\n for i, v in enumerate(data_set.iterdir()):\n label_num[v.name] = i\n self.labels[i] = v.name\n # end\n\n # read images\n for img_path in data_set.rglob(\"*.jpg\"):\n lbl = label_num[str(img_path.parent.stem)]\n img = cv2.imread(str(img_path))\n img = cv2.resize(img, self.dims, interpolation=cv2.INTER_AREA)\n\n # flatten RGB data into a vector\n # NOTE: NOT ACTUALLY NECESSARY! \n img.flatten()\n\n # label the sample and append to temp data list\n sample = np.append(lbl, img)\n data.append(sample)\n # end\n\n # partition and package the data (*_ ensures safe unpacking)\n train, test, validate, *_ = Data.partition(data, self.parts, 0.7, 0.2)\n self.train = Data(train)\n self.test = Data(test)\n self.validate = Data(validate)", "def getProcessedData(self, data, labels):\n if self.underSamplePercentage != 0:\n data, labels = self.underSample(data, labels)\n if self.beta != 0: \n synData, synLabels = self.adaSynAdd(data, labels)\n if synData is not None:\n data, labels = combineTestSets(data, labels, synData, synLabels)\n return data, labels", "def trainData(self, X, y, NeuralNet, epochs):", "def train(self, train_data_loader, n_epochs, valid_data_loader=None):\n\n # Loop over epochs\n best_valid_loss = 99999\n for i in range(n_epochs):\n self.logger.info('Epoch %i' % i)\n summary = dict(epoch=i)\n # Train on this epoch\n sum_train = self.train_epoch(train_data_loader)\n summary.update(sum_train)\n # Evaluate on this epoch\n sum_valid = None\n if valid_data_loader is not None:\n sum_valid = self.evaluate(valid_data_loader)\n summary.update(sum_valid)\n \n if sum_valid['valid_loss'] < best_valid_loss:\n best_valid_loss = sum_valid['valid_loss']\n self.logger.debug('Checkpointing new best model with loss: %.3f', best_valid_loss)\n self.write_checkpoint(checkpoint_id=i,best=True)\n \n if self.lr_scheduler is not None:\n self.lr_scheduler.step()\n\n # Save summary, checkpoint\n self.save_summary(summary)\n if self.output_dir is not None:\n self.write_checkpoint(checkpoint_id=i)\n\n return self.summaries", "def get_training_data():\n \n X = pd.read_csv('../data/train_values.csv').set_index('sequence_id')\n y = pd.read_csv('../data/train_labels.csv').set_index('sequence_id')\n return X, y", "def parse_data(self):\n\t\traise NotImplementedError('%s: No parse function implemented!' % self.name)", "def parseArray(self, data):\n self.title = data[0]\n self.director = data[1]\n self.cast = data[2]\n self.producer = data[3]\n self.writer = data[4]\n self.country = data[5]\n self.language = data[6]\n self.year = data[7]\n self.genres = data[8]\n self.votes = data[9]\n self.rating = float(data[10])\n self.runtime = data[11]\n self.plot = data[12]\n self.coverUrl = data[13]", "def trainAndCalculate(self):\n f = open(\"congressional_voting_dataset.csv\")\n data = np.genfromtxt(fname = f, delimiter=',', dtype=str, encoding=None)\n X = data[:, :-1]\n y = data[:, -1]\n X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.20)\n self.svclassifier.fit(X_train, y_train)", "def _unpack_training_data(data, val=None):\n if isinstance(data, TrainingData):\n assert val is None\n return data\n\n if val is not None:\n x, y = data\n return TrainingData.from_x_y(x, y, val)\n\n train, val = data\n if not isinstance(train, Dataset):\n xx, yy = train\n train = RamDataset(xx, yy)\n if not isinstance(val, Dataset):\n xx, yy = val\n val = RamDataset(xx, yy)\n return TrainingData(train, val)", "def process_raw_data(self):\n \n # Get all participant names, or return straight away if no data was\n # loaded yet.\n if hasattr(self, \"raw\"):\n participants = self.raw.keys()\n participants.sort()\n else:\n self.data = None\n return\n\n # Count the number of participants.\n n = len(participants)\n \n # Find out how many questions there were.\n n_questions = 0\n for i, ppname in enumerate(participants):\n if self.raw[ppname] is None:\n continue\n if len(self.raw[ppname][\"QuestionNumber\"]) > n_questions:\n n_questions = len(self.raw[ppname][\"QuestionNumber\"])\n \n # Define some variables of interest.\n vor = []\n for i in range(n_questions):\n vor.append(\"Q%d_resp\" % (i+1))\n vor.append(\"Q%d_RT\" % (i+1))\n \n # Create a data dict for each variable of interest.\n self.data = {}\n self.data[\"ppname\"] = []\n for var in vor:\n self.data[var] = numpy.zeros(n, dtype=float) * numpy.NaN\n \n # Loop through all participants.\n for i, ppname in enumerate(participants):\n # Add the participant name.\n self.data[\"ppname\"].append(copy.deepcopy(ppname))\n # Skip empty datasets.\n if self.raw[ppname] is None:\n continue\n # Compute stuff relevant to this task.\n for j, qnr in enumerate(self.raw[ppname][\"QuestionNumber\"]):\n # Split Questionnaire 3, Q13 and Q14 into sub-questions\n if \"Q3\" in self._task_name and int(qnr) in [13,14]:\n # These questions split out into two parts: A description\n # of what each sub-part is, and a Boolean response for\n # each sub-part in the question. Example:\n # \"1_1_1_1_1_0//Television_VideogameConsole(suchas:WiiUPlayStationXboxorNintendoDS)_Tablet(likeanIPad)_Smartphone_LaptoporDesktopComputer_Noneofthese\"\n bool_resp, descr = self.raw[ppname][\"Response\"][j].split(\"//\")\n bool_resp = map(int, bool_resp.split(\"_\"))\n descr = descr.split(\"_\")\n # Store the data in the dict.\n for k, item in enumerate(descr):\n # Clean up the item name.\n if \"(\" in item:\n item = item[:item.find(\"(\")]\n var = \"Q%s_%s_resp\" % (int(qnr), item)\n # Create a new entry in the dict for this variable, if\n # one doesn't exist yet.\n if var not in self.data.keys():\n self.data[var] = numpy.zeros(n, dtype=float) * numpy.NaN\n # Store the data in the dict.\n self.data[var][i] = bool_resp[k]\n # Store response time for the whole item.\n self.data[\"Q%s_RT\" % (int(qnr))][i] = \\\n float(self.raw[ppname][\"TimeEndQuestion\"][j]) \\\n - float(self.raw[ppname][\"TimeStartQuestion\"][j])\n # All other questions are one-question one-response:\n else:\n # Store the data in the dict.\n self.data[\"Q%s_resp\" % (int(qnr))][i] = \\\n float(self.raw[ppname][\"Response\"][j])\n self.data[\"Q%s_RT\" % (int(qnr))][i] = \\\n self.raw[ppname][\"TimeEndQuestion\"][j] \\\n - self.raw[ppname][\"TimeStartQuestion\"][j]", "def load_data(self):\n params = self.params\n catg = params.data_category\n langs = ['en', params.target_lang]\n data = {lang: {splt: {} for splt in (['train', 'valid'] if lang == 'en' else ['test'])} for lang in langs}\n clf_dataset_path = {\n lang: {\n splt: {\n 'x': os.path.join(params.data_path, '%s_%s_%s_x.bpe.pth' % (splt, lang, catg)),\n 'y': os.path.join(params.data_path, '%s_%s_%s_y.txt' % (splt, lang, catg)),\n } for splt in (['train', 'valid'] if lang == 'en' else ['test'])\n } for lang in langs\n }\n for splt in ['train', 'valid', 'test']:\n for lang in langs:\n if lang == 'en' and splt in ['train', 'valid'] or lang != 'en' and splt == 'test':\n # load data and dictionary\n data1 = load_binarized(clf_dataset_path[lang][splt]['x'], params)\n data['dico'] = data.get('dico', data1['dico'])\n # set dictionary parameters\n set_dico_parameters(params, data, data1['dico'])\n # create dataset\n data[lang][splt]['x'] = Dataset(data1['sentences'], data1['positions'], params)\n # load labels\n with open(clf_dataset_path[lang][splt]['y'], 'r') as f:\n labels = [int(l) for l in f]\n data[lang][splt]['y'] = torch.LongTensor(labels)\n assert len(data[lang][splt]['x']) == len(data[lang][splt]['y'])\n\n return data", "def _parse_data(self, proto_data):\n logger.info(\"Start to parse graph proto data.\")\n\n self._parse_op_nodes(proto_data.node)\n self._parse_parameters(proto_data.parameters)\n self._parse_consts(proto_data.const_vals)\n\n self._update_input_after_create_node()\n self._update_output_after_create_node()\n\n logger.info(\"Parse proto data end, normal node count(only contain op node, \"\n \"parameter, const): %s.\", self.normal_node_count)", "def load_data(args):\n data_df = pd.read_csv(os.path.join(args.data_dir, 'driving_log.csv'))\n\n X = data_df[['center', 'left', 'right']].values\n y = data_df['steering'].values\n\n X_train, X_valid, y_train, y_valid = train_test_split(X, y, test_size=args.test_size, random_state=0)\n\n return X_train, X_valid, y_train, y_valid", "def load_dataset(path, test_or_train):\n senta_batch, sentb_batch, scores_batch = [], [], []\n with open(path, encoding='utf-8') as f:\n for i, line in enumerate(f):\n items = line.strip().split('\\t')\n if test_or_train == 'train':\n senta, sentb, score = items[-2], items[-1], float(items[-3])\n elif test_or_train in ['dev', 'test']:\n senta, sentb, score = items[-2], items[-1], float(items[-3])\n else:\n raise Exception(\"{} error\".format(test_or_train))\n senta_batch.append(senta)\n sentb_batch.append(sentb)\n scores_batch.append(score)\n return senta_batch, sentb_batch, scores_batch", "def test_data(self):\n\n return self.__valid_data, self.__valid_labels", "def validation_epoch(self):\n self.model.eval()\n\n # Compute for training set\n train_loss, train_acc = compute_loss_and_accuracy(\n self.dataloader_train, self.model, self.loss_criterion\n )\n self.TRAIN_ACC.append(train_acc)\n self.TRAIN_LOSS.append(train_loss)\n\n # Compute for validation set\n validation_loss, validation_acc = compute_loss_and_accuracy(\n self.dataloader_val, self.model, self.loss_criterion\n )\n self.VALIDATION_ACC.append(validation_acc)\n self.VALIDATION_LOSS.append(validation_loss)\n print(\"Current validation loss:\", validation_loss, \" Accuracy:\", validation_acc)\n # Compute for testing set\n test_loss, test_acc = compute_loss_and_accuracy(\n self.dataloader_test, self.model, self.loss_criterion\n )\n self.TEST_ACC.append(test_acc)\n self.TEST_LOSS.append(test_loss)\n\n self.model.train()", "def preprocess_valid_data(self):\r\n print(\"* Preprocessing validation data.\", flush=True)\r\n prep.create_HDF_file(self.C.validation_set)\r\n\r\n self.print_time_elapsed()" ]
[ "0.7268114", "0.6815508", "0.6703304", "0.6685302", "0.65728796", "0.64867747", "0.6361533", "0.63356423", "0.6325312", "0.6323176", "0.631201", "0.63008344", "0.6293298", "0.6284289", "0.62585795", "0.62380934", "0.6225368", "0.61996716", "0.6173986", "0.6165433", "0.6161794", "0.61498725", "0.61315715", "0.61308855", "0.61296684", "0.6125358", "0.61229086", "0.6121578", "0.611656", "0.6112187", "0.6098594", "0.608134", "0.60681504", "0.6065634", "0.60535246", "0.6050409", "0.60445637", "0.6034898", "0.6033067", "0.6030241", "0.6030138", "0.6022303", "0.60201806", "0.6012816", "0.6006439", "0.60040545", "0.5993545", "0.59922206", "0.59903586", "0.5982171", "0.5977945", "0.5976669", "0.59721935", "0.59621257", "0.59617805", "0.59593004", "0.59539396", "0.5949479", "0.5942769", "0.59258115", "0.5915548", "0.5912975", "0.5910868", "0.5909248", "0.59071517", "0.5904734", "0.5899824", "0.5892247", "0.5891006", "0.5889492", "0.5885907", "0.588562", "0.58814955", "0.5881388", "0.58801186", "0.5872034", "0.58636487", "0.58620596", "0.5853847", "0.5853474", "0.58502847", "0.5844645", "0.58421695", "0.58415735", "0.5824611", "0.5822588", "0.58223057", "0.58220625", "0.5820757", "0.58187205", "0.5818547", "0.5815506", "0.5814723", "0.58088493", "0.580581", "0.58055425", "0.5796215", "0.57962066", "0.5795028", "0.5790806" ]
0.60057056
45
Parses data for training and evaluation.
def _parse_eval_data(self, data): image, label = self._prepare_image_and_label(data) # The label is first offset by +1 and then padded with 0. label += 1 label = tf.expand_dims(label, axis=3) if self._resize_eval: # Resizes and crops image. image, image_info = input_utils.resize_and_crop_image( image, self._output_size, self._output_size) # Resizes and crops mask. image_scale = image_info[2, :] offset = image_info[3, :] label = input_utils.resize_and_crop_masks(label, image_scale, self._output_size, offset) else: # Pads image and mask to output size. image = tf.image.pad_to_bounding_box(image, 0, 0, self._output_size[0], self._output_size[1]) label = tf.image.pad_to_bounding_box(label, 0, 0, self._output_size[0], self._output_size[1]) label -= 1 label = tf.where(tf.equal(label, -1), self._ignore_label * tf.ones_like(label), label) label = tf.squeeze(label, axis=0) valid_mask = tf.not_equal(label, self._ignore_label) labels = { 'masks': label, 'valid_masks': valid_mask } # If bfloat16 is used, casts input image to tf.bfloat16. if self._use_bfloat16: image = tf.cast(image, dtype=tf.bfloat16) return image, labels
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def parse_data():\n n_train, n_test = 15000, 4996\n n_features = 1355191\n\n print('- parsing train data')\n X_train = sp.lil_matrix((n_train, n_features))\n y_train = np.zeros(n_train)\n with open('/Users/kitazawa/data/news20.train') as f:\n lines = map(lambda l: l.rstrip().split(' '), f.readlines())\n for i, line in enumerate(lines):\n y_train[i] = int(line[0])\n\n for fv in line[1:]:\n f, v = fv.split(':')\n X_train[i, (int(f) - 1)] = float(v)\n print('-- density: %f' % (X_train.nnz / (n_train * n_features)))\n\n print('- parsing test data')\n X_test = sp.lil_matrix((n_test, n_features))\n y_test = np.zeros(n_test)\n with open('/Users/kitazawa/data/news20.test') as f:\n lines = map(lambda l: l.rstrip().split(' '), f.readlines())\n for i, line in enumerate(lines):\n y_test[i] = int(line[0])\n\n for fv in line[1:]:\n f, v = fv.split(':')\n X_test[i, (int(f) - 1)] = float(v)\n print('-- density: %f' % (X_test.nnz / (n_test * n_features)))\n\n return X_train, y_train, X_test, y_test", "def preprocess(self, train_file, validation_file, test_file):\n chardict, labeldict = self.make_dictionary(train_file, validation_file, test_file)\n print 'preparing training data'\n training = self.parse_file(train_file, chardict, labeldict)\n \n print 'preparing validation data'\n validation = self.parse_file(validation_file, chardict, labeldict)\n\n print 'preparing test data'\n test = self.parse_file(test_file, chardict, labeldict)\n\n return Data(training, validation, test, chardict, labeldict)", "def load_data():\n print(\"PARSING TRAIN\")\n ys_train, x_train, ids_train = load_pickle_data(\"ys_train\"), load_pickle_data(\"x_train\"), load_pickle_data(\n \"ids_train\")\n if ys_train is None or x_train is None or ids_train is None:\n ys_train, x_train, ids_train = load_csv_data(\"{}/train.csv\".format(DATA_DIR))\n dump_pickle_data(ys_train, \"ys_train\")\n dump_pickle_data(x_train, \"x_train\")\n dump_pickle_data(ids_train, \"ids_train\")\n\n print(\"PARSING TEST\")\n x_test, ids_test = load_pickle_data(\"x_test\"), load_pickle_data(\"ids_test\")\n if x_test is None or ids_test is None:\n _, x_test, ids_test = load_csv_data(\"{}/test.csv\".format(DATA_DIR))\n dump_pickle_data(x_test, \"x_test\")\n dump_pickle_data(ids_test, \"ids_test\")\n\n return ys_train, x_train, ids_train, x_test, ids_test", "def parse_dataset(self, data):\n pass", "def parse_train_data(training_set, language):\n print \"Reading training set: \" + training_set\n xmldoc = minidom.parse(training_set)\n lex_list = xmldoc.getElementsByTagName('lexelt')\n training_output = {}\n\n print \"Processing training set and training models...\"\n for node in lex_list:\n lexelt = node.getAttribute('item')\n training_output[lexelt] = {}\n inst_list = node.getElementsByTagName(\"instance\")\n # setup the neighbor_word_list within k distance of the word\n neighbor_word_list = []\n senseid_set = set()\n for inst in inst_list:\n sentence = inst.getElementsByTagName('context')[0]\n senseid_set.add(inst.getElementsByTagName('answer')[0].getAttribute('senseid'))\n neighbor_word_list = list(set(neighbor_word_list + get_neighbor_words_list(sentence, language)))\n senseid_list = list(senseid_set)\n training_output[lexelt][\"neighbor_word_list\"] = neighbor_word_list\n _4c_4d_feature = extract_4c_4d_feature(neighbor_word_list, senseid_list, inst_list, language)\n training_output[lexelt][\"4c_4d_feature\"] = _4c_4d_feature\n x_list = []\n y_list = []\n for inst in inst_list:\n y = inst.getElementsByTagName('answer')[0].getAttribute('senseid')\n if ignore_U_activated and y.__eq__('U'):\n continue\n y_list.append(str(replace_accented(y)))\n x = extract_vector(inst, neighbor_word_list, _4c_4d_feature, language)\n x_list.append(x)\n # for each node, build a classifier\n if language.__eq__(\"English\"):\n #clf = RandomForestClassifier(n_estimators=10) 58.9\n #clf = SGDClassifier() 61.1\n #clf = MultinomialNB() 62.9\n #clf = BernoulliNB() 55.8\n #clf = Perceptron() 60.4\n #clf = PassiveAggressiveClassifier() 62.1\n #clf = RidgeClassifier() 62.7\n #clf = svm.LinearSVC() 62.5\n #clf = KNeighborsClassifier()\n #clf = GaussianNB()\n clf = MultinomialNB(alpha=0.95) #+ alpha=0.95 + k=13 + left_right_order + vector_0_1 off = 64.7\n elif language.__eq__(\"Spanish\"):\n #clf = svm.LinearSVC() 82.0\n #clf = MultinomialNB() 82.2\n #clf = RidgeClassifier() 81.5\n #clf = PassiveAggressiveClassifier() 81.9\n #clf = BernoulliNB() 72.4\n clf = MultinomialNB(alpha=0.50) #0.25:82.6 0.4:83.1 0.45:83.2 0.5: 83.2 0.55:83.2 0.6:82.8 0.75:82.7\n elif language.__eq__(\"Catalan\"):\n #clf = svm.LinearSVC() # 82.8\n #clf = MultinomialNB() # 80.8\n #clf = RidgeClassifier() 82.6\n #clf = svm.LinearSVC(C=1.5) 82.9\n clf = MultinomialNB(alpha=0.25) # 0.5:84.3 0.35:84.6 0.3:84.8 0.25:85.4 0.2:85.3\n else:\n clf = svm.LinearSVC()\n clf.fit(x_list, y_list)\n training_output[lexelt][\"Classifier\"] = clf\n print \"Models trained.\"\n return training_output", "def parse_training_data(data_dir, task):\n\n COMMENTS_FILE = \"%s_annotated_comments.tsv\" % task\n LABELS_FILE = \"%s_annotations.tsv\" % task\n\n print(os.path.join(Project_Path, data_dir, COMMENTS_FILE))\n comments = pd.read_csv(os.path.join(Project_Path, data_dir, COMMENTS_FILE), sep = '\\t', index_col = 0)\n # remove special newline and tab tokens\n comments['comment'] = comments['comment'].apply(lambda x: x.replace(\"NEWLINE_TOKEN\", \" \"))\n comments['comment'] = comments['comment'].apply(lambda x: x.replace(\"TAB_TOKEN\", \" \"))\n\n annotations = pd.read_csv(os.path.join(Project_Path, data_dir, LABELS_FILE), sep = '\\t', index_col = 0)\n labels = empirical_dist(annotations[task])\n X = comments.sort_index()['comment'].values\n y = labels.sort_index().values\n\n assert(X.shape[0] == y.shape[0])\n return X, y", "def _load_data(self):\n self.mapper = Mapper()\n self.mapper.generate_vocabulary(self.review_summary_file)\n self.X_fwd, self.X_bwd, self.Y = self.mapper.get_tensor(reverseflag=True)\n # Store all the mapper values in a dict for later recovery\n self.mapper_dict = dict()\n self.mapper_dict['seq_length'] = self.mapper.get_seq_length()\n self.mapper_dict['vocab_size'] = self.mapper.get_vocabulary_size()\n self.mapper_dict['rev_map'] = self.mapper.get_reverse_map()\n # Split into test and train data\n self._split_train_tst()", "def process_data(data, labels):\n\t\n\t# Split the dataset of string into train, validation, and test \n\t# Use a 70/15/15 split\n\t# train_test_split shuffles the data before splitting it \n\t# Stratify keeps the proportion of labels the same in each split\n\n\t# -- WRITE THE SPLITTING CODE HERE --\n\t# Split the data into 70 percent train and 30 percent test and validate data\n\ttrain_X, test_X_split, train_Y, test_Y_split = train_test_split(data, labels, test_size=0.30, stratify=labels,random_state= 1)\n\t# Split the remaining 30 percent data into 15 percent test and validate data each\n\ttest_X, val_X, test_Y, val_Y = train_test_split(test_X_split, test_Y_split, test_size=0.50, stratify=test_Y_split, random_state= 1)\n\n\t# Preprocess each dataset of strings into a dataset of feature vectors\n\t# using the CountVectorizer function. \n\t# Note, fit the Vectorizer using the training set only, and then\n\t# transform the validation and test sets.\n\n\t# -- WRITE THE PROCESSING CODE HERE --\n\t# Preprocess dataset using CountVectorizer from ngram range of 1 to 3\n\tvector = CountVectorizer(ngram_range=(1,3))\n\t# Fit data on train dataset\n\ttrain_X = vector.fit_transform(train_X)\n\t# Transform data on test dataset\n\ttest_X = vector.transform(test_X)\n\t# Transform data on validate dataset.\n\tval_X = vector.transform(val_X)\n\t# Return the training, validation, and test set inputs and labels\n\treturn train_X, train_Y, val_X, val_Y, test_X, test_Y\n\t# -- RETURN THE ARRAYS HERE -- ", "def load_and_process(data_dir, train_node_num, eval_node_num, test_node_num):\n biases, feature, label = get_biases_features_labels(data_dir)\n # split training, validation and testing set\n nodes_num = label.shape[0]\n train_mask = get_mask(nodes_num, 0, train_node_num)\n eval_mask = get_mask(nodes_num, train_node_num, train_node_num + eval_node_num)\n test_mask = get_mask(nodes_num, nodes_num - test_node_num, nodes_num)\n\n y_train = np.zeros(label.shape)\n y_val = np.zeros(label.shape)\n y_test = np.zeros(label.shape)\n\n y_train[train_mask, :] = label[train_mask, :]\n y_val[eval_mask, :] = label[eval_mask, :]\n y_test[test_mask, :] = label[test_mask, :]\n\n y_train = y_train[np.newaxis]\n y_val = y_val[np.newaxis]\n y_test = y_test[np.newaxis]\n train_mask = train_mask[np.newaxis]\n eval_mask = eval_mask[np.newaxis]\n test_mask = test_mask[np.newaxis]\n\n return feature, biases, y_train, train_mask, y_val, eval_mask, y_test, test_mask", "def parse_data(self):\n vehicle_data = self.data.get('vehicles')\n for vehicle_datum in vehicle_data:\n self.vehicles.append(Vehicle(**vehicle_datum))\n\n job_data = self.data.get('jobs')\n for job_datum in job_data:\n self.jobs.append(Job(**job_datum))\n\n self.matrix = self.add_dummy_location_to_matrix(self.data.get('matrix'))", "def load_data(self, training_data):\n \"\"\"training data format [(instance, label),(instance, label),...]\"\"\"\n self.training_data = training_data", "def process_data(self) -> Tuple[list, List[Sequence[int]]]:\n features, labels = self.load_files()\n x = [self.process_text(f, self.max_length) for f in features]\n y = [self._convert_labels(l, self.max_length) for l in labels]\n return x, y", "def parse_file():\r\n if len(sys.argv) < 2:\r\n print(\"Need a file\")\r\n sys.exit(1)\r\n\r\n data_input = open(sys.argv[1])\r\n\r\n data = []\r\n for line in data_input: #for each of these lines\r\n if(len(line) == 0): pass #skip empty lines\r\n split_within_line = line.split(\"\\t\") #split by tabs\r\n new_datum = Datum(split_within_line[0], split_within_line[1], split_within_line[2]) #feed splits into a Datum object\r\n data.append(new_datum) #add Datum to list of data\r\n\r\n #make a list of characters representing the issues\r\n for i in range(len(data[0].dat_votes)-1): #from 0 to the end of the list of issues from the first datum\r\n original_issues.append(chr(i+97))\r\n\r\n\r\n i = 0\r\n tuning_set = []\r\n training_set = []\r\n num_reps = len(data)\r\n for i in range(0, num_reps-1):\r\n if (i % 4 == 0):\r\n tuning_set.append(data[i])\r\n else:\r\n training_set.append(data[i])\r\n\r\n pair = _count_parties(training_set)\r\n\r\n unpruned = induce_node_tree(training_set, original_issues,\"D\",-1)\r\n # print(\"\\n#### UNPRUNED TREE ####\\n\")\r\n # print(unpruned)\r\n\r\n unprune_acc = calc_accuracy(unpruned, tuning_set)\r\n\r\n pruned = prune_tree(unpruned, tuning_set)\r\n print(\"\\n#### PRUNED TREE ####\\n\")\r\n print(pruned)\r\n\r\n acc = calc_accuracy(pruned, training_set)\r\n\r\n # print(\"Accuracy of unpruned tree with tuning_set: \" + str(unprune_acc))\r\n print(\"Accuracy of pruned tree with tuning_set: \" + str(acc))\r\n leave_one_out_cross_validation(data)", "def load_data(self):\n with open('data/fordTrain.csv') as f:\n data = csv.reader(f, delimiter=',')\n train = [x for i, x in enumerate(data) if i > 0] \n # Extract features and target variable separately\n trainx = [x[3:] for x in train]\n trainy = [x[2] for x in train]\n\n with open('data/fordTest.csv') as f:\n data = csv.reader(f, delimiter=',')\n testx = [x[3:] for i, x in enumerate(data) if i > 0] \n\n with open('data/Solution.csv') as f:\n data = csv.reader(f, delimiter=',')\n testy = [x[2] for i, x in enumerate(data) if i > 0] \n\n # Extract features and target variable, convert to numpy array\n trainx = np.asarray(trainx, dtype=np.float32)\n trainy = np.asarray(trainy, dtype=np.int8)\n testx = np.asarray(testx, dtype=np.float32)\n testy = np.asarray(testy, dtype=np.int8)\n\n # Return training and test sets\n trainSet = Dataset(trainx, trainy)\n testSet = Dataset(testx, testy)\n return trainSet, testSet", "def split_data(self):\n if not self.load_data:\n raise AttributeError('Preprocessor has not loaded any data.')\n \n # 3 - Find example counts for each set\n self.n_examples = self.data[0].shape[0]\n self.n_train = int(self.n_examples * self.train_ratio)\n self.n_val = int(self.n_examples * self.val_ratio)\n self.n_test = self.n_examples - self.n_train - self.n_val\n \n logger.info(f'Set sizes:')\n logger.info(f'train: {self.n_train}')\n logger.info(f'val: {self.n_val}')\n logger.info(f'test: {self.n_test}')\n if self.n_test < 0:\n raise ValueError('Train + validation ratios must bef < 1')\n\n # 4 - Separate data into train, test, val\n if isinstance(self.data[0], pd.DataFrame):\n logger.info('Dataset is in a dataframe.')\n self.isdataframe = True\n\n self.train_data = [self.data[0].iloc[:self.n_train],\n self.data[1].iloc[:self.n_train]]\n \n self.val_data = [self.data[0].iloc[self.n_train:self.n_val + self.n_train],\n self.data[1].iloc[self.n_train:self.n_val + self.n_train]]\n \n self.test_data = [self.data[0].iloc[self.n_val + self.n_train:],\n self.data[1].iloc[self.n_val + self.n_train:]]\n logger.info('Data was split into train, val, test.')\n else:\n self.isdataframe = False\n logger.info('Dataset is in a numpy array.')\n \n # If datasets are numpy array or sparse\n self.train_data = [self.data[0][:self.n_train],\n self.data[1][:self.n_train]]\n \n self.val_data = [self.data[0][self.n_train:self.n_val + self.n_train],\n self.data[1][self.n_train:self.n_val + self.n_train]]\n \n self.test_data = [self.data[0][self.n_val + self.n_train:],\n self.data[1][self.n_val + self.n_train:]]\n logger.info('Data was split into train, val, test.')\n \n assert(self.n_train == self.train_data[0].shape[0])\n assert(self.n_val == self.val_data[0].shape[0])\n assert(self.n_test == self.test_data[0].shape[0])\n \n # Free memory\n del self.data\n \n if self.save_sets:\n self.save_datasets()", "def load_data():\r\n train = convert_corpus_to_lists(ConllCorpusReader('CoNLL-2003', 'train.txt', ['words', 'pos', 'ignore', 'chunk']))\r\n val = convert_corpus_to_lists(ConllCorpusReader('CoNLL-2003', 'valid.txt', ['words', 'pos', 'ignore', 'chunk'])) # testa will be our val set\r\n test = convert_corpus_to_lists(ConllCorpusReader('CoNLL-2003', 'test.txt', ['words', 'pos', 'ignore', 'chunk']))\r\n\r\n return train, val, test", "def data_parser(data):\n\n with open(data, 'r') as inp:\n\n # take every sample\n # the last line in the text file is empty, so reading until -1\n samples = inp.read().split('\\n')[:-1]\n\n vec = []\n labels = []\n for sample in samples:\n # file is tab delimited\n split_samples = sample.split('\\t')\n # last column contains the label\n labels.append(int(split_samples[-1]))\n\n features = []\n for feature in split_samples[:-1]:\n features.append(float(feature))\n vec.append(features)\n\n # make the features and labels as a numpy array\n vec = np.array(vec)\n labels = np.array(labels)\n return vec, labels", "def load_data(data_prefix, dataset_str, precalc):\n (num_data, train_adj, full_adj, feats, train_feats, test_feats, labels,\n train_data, val_data,\n test_data) = utils.load_graphsage_data(data_prefix, dataset_str)\n visible_data = train_data\n\n y_train = np.zeros(labels.shape)\n y_val = np.zeros(labels.shape)\n y_test = np.zeros(labels.shape)\n y_train[train_data, :] = labels[train_data, :]\n y_val[val_data, :] = labels[val_data, :]\n y_test[test_data, :] = labels[test_data, :]\n\n train_mask = utils.sample_mask(train_data, labels.shape[0])\n val_mask = utils.sample_mask(val_data, labels.shape[0])\n test_mask = utils.sample_mask(test_data, labels.shape[0])\n\n if precalc:\n train_feats = train_adj.dot(feats)\n train_feats = np.hstack((train_feats, feats))\n test_feats = full_adj.dot(feats)\n test_feats = np.hstack((test_feats, feats))\n\n return (train_adj, full_adj, train_feats, test_feats, y_train, y_val, y_test,\n train_mask, val_mask, test_mask, train_data, val_data, test_data,\n num_data, visible_data)", "def load_data(): \n\tdf = pandas.read_csv('data/iris.data', header=None)\n\ty = df.iloc[0:df.shape[0], 4].values\n\ty = np.where(y == 'Iris-setosa', 0, y)\n\ty = np.where(y == 'Iris-versicolor', 1, y)\n\ty = np.where(y == 'Iris-virginica', 2, y)\n\tx = df.iloc[0:df.shape[0], 0:4].values\n\tx = tuple(x)\n\ty = tuple(y)\n\ttraining_inputs = x[0:40] + x[50:90] + x[100:140]\n\ttraining_results = y[0:40] + y[50:90] + y[100:140]\n\ttraining_data = (training_inputs, training_results)\n\ttest_inputs = x[40:50] + x[90:100] + x[140:150]\n\ttest_results = y[40:50] + y[90:1000] + y[140:150]\n\ttest_data = (test_inputs, test_results)\n\treturn (training_data, test_data)", "def preprocess_data(self):\n\n self._preprocess_train_data()\n self._preprocess_test_data()", "def prepare_data(self):\n # Set up the path\n self.path_target_train = os.path.join(self.data_dir, self.train_path_file_target + \".pkl\")\n self.path_target_test = os.path.join(self.data_dir, self.test_path_file_target + \".pkl\")\n\n if not os.path.exists(self.path_target_train) or not os.path.exists(self.path_target_test):\n # Create vocabularies of the appropriate sizes.\n self.create_vocabulary(self.train_path_file)\n\n # Create token ids for the training data.\n input_train_path = self.train_path_file\n target_train_path = self.train_path_file_target\n train_input, train_input_length, train_labels = self.data_to_token_ids(input_train_path, target_train_path)\n\n # Create token ids for the validation data.\n input_test_path = self.test_path_file\n target_test_path = self.test_path_file_target\n test_input, test_input_length, _ = self.data_to_token_ids(input_test_path, target_test_path, train=False)\n\n # Collect data into a list\n training_data = [train_input, train_input_length, train_labels]\n test_data = [test_input, test_input_length]\n\n # Save all the data\n with open(self.path_target_train, 'wb') as f:\n pickle.dump(training_data,f)\n with open(self.path_target_test, 'wb') as f:\n pickle.dump(test_data, f)\n else:\n # Load data\n with open(self.path_target_train, 'rb') as f:\n training_data = pickle.load(f)\n with open(self.path_target_test, 'rb') as f:\n test_data = pickle.load(f)\n\n # Initialize vocabulary\n self.initialize_vocabulary()\n\n # Convert list into a numpy array - train data\n train_input = pd.DataFrame(training_data[0]).fillna(value=0).astype(int).values\n train_length_input = np.array(training_data[1], dtype=int)\n train_labels = np.array(training_data[2], dtype=int)\n\n # Convert list into a numpy array - test data\n test_input = pd.DataFrame(test_data[0]).fillna(value=0).astype(int).values\n test_length_input = pd.DataFrame(test_data[1]).fillna(value=0).astype(int).values\n\n # Printing maximum length\n print(\"Shape of the input training matrix {}\".format(str(train_input.shape)))\n print(\"Shape of the input test matrix {}\".format(str(test_input.shape)))\n\n # Copy the files\n self.copy_files()\n\n # Return output\n return train_input, train_length_input, train_labels, test_input, test_length_input", "def data_preprocessing():\n lineid_content = get_lineid_content()\n print('Read movie_lines.txt file complete...')\n convos = get_convos()\n print('Read movie_conversations.txt file complete...')\n print('Building dataset')\n get_data(lineid_content, convos)", "def train(self, data):\n pass", "def train(self, training_data):\n pass", "def load_data_wrapper():\r\n\r\n train_data, valid_data, tst_data = load_data()\r\n ## calling the function load_data()\r\n ## will return a tuple with three values for train, validation and test data\r\n ## storing the tuple values in separate three variables\r\n\r\n ## training_data:\r\n training_inputs = [np.reshape(x, (784,1)) for x in train_data[0]]\r\n ## reshaping the training inputs to 784x1 vector\r\n ## the required format for our neural network's input layer\r\n ## ---\r\n training_results = [vectorized_result(y) for y in train_data[1]]\r\n ## calling vectorized_result() function(see below)\r\n ## will convert the digit value in 10-dimensional vector\r\n ## the required format for our neural network's output layer\r\n ## ---\r\n training_data = zip(training_inputs, training_results)\r\n ## zipping together the training_inputs and training_results\r\n\r\n ## validation_data:\r\n validation_inputs = [np.reshape(x, (784,1)) for x in valid_data[0]]\r\n ## reshaping the validation inputs to 784x1 vector\r\n ## ---\r\n validation_data = zip(validation_inputs, valid_data[1])\r\n ## zipping together the validation_inputs and it's corresponding outputs\r\n\r\n ## test_data:\r\n test_inputs = [np.reshape(x, (784,1)) for x in tst_data[0]]\r\n ## reshaping the test inputs to 784x1 vector\r\n ## ---\r\n test_data = zip(test_inputs, tst_data[1])\r\n ## zipping together the test_inputs and it's corresponding outputs\r\n\r\n return (training_data, validation_data, test_data)", "def load_data(self):\n\n self._load_train_data()\n self._load_test_data()", "def loadData ( self ) :\n df = pd.read_json ( self.dataset )\n df = df[pd.notnull ( df[2] )]\n df[1] = df[1].apply ( self.clean_text )\n\n self.X = df[1]\n self.y = df[2]", "def load_data(params):\n train_df = pd.read_csv(os.path.join(*[dataset_path, params['tokenizer'] + '_train_' + params['stemming'] + '.csv']))\n dev_df = pd.read_csv(os.path.join(*[dataset_path, params['tokenizer'] + '_dev_' + params['stemming'] + '.csv']))\n train_data, label_encode = data_prep(train_df, params, if_resample=True)\n dev_data, _ = data_prep(dev_df, params)\n return train_data, dev_data, label_encode", "def prepare_data(self):\n try:\n self.train_dataset = self.datasets['train']\n self.val_dataset = self.datasets['val']\n try:\n self.test_dataset = self.datasets['test']\n except:\n pass\n except Exception as e:\n print('Data was not succesfully prepared:', e)", "def _prepare_data(self):\n #TODO hardcoded values need to change\n print_info(\"Preprocessing the train data...\")\n self._place_dataset(os.path.join(self._hparams[\"temp-data\"], \"train\"),\n self.TRAIN_OUT_PATH)\n\n print_info(\"Preprocessing the test data...\")\n self._place_dataset(os.path.join(self._hparams[\"temp-data\"], \"test\"),\n self.TEST_OUT_PATH)\n\n print_info(\"Preprocessing the validation data...\")\n self._place_dataset(os.path.join(self._hparams[\"temp-data\"], \"val\"),\n self.VAL_OUT_PATH)", "def load_data(self):\n self.data = self.read_var(self.datavar)\n self.test_shape(self.datavar, self.data.shape, 2)", "def run_data (arguments):\n if arguments.define_labels:\n data.define_labels()\n elif arguments.preprocess:\n # Preprocess from data_raw --> data_preprocessed\n data.preprocess()\n elif arguments.annotate:\n # Annotate from data_preprocessed --> data_annotated\n reverse = False # DEBUG\n annotator.annotate(reverse)\n elif arguments.split:\n # Split from data_annotated --> train.txt/valid.txt\n restrict = 100 # Default: Keep 100% of all files\n splitter.train_valid(restrict_to=restrict)", "def _load_data(self):\n data_x, data_y = make_classification(n_samples=5000, n_features=20,\n n_informative=10,\n n_redundant=0, n_repeated=0,\n n_classes=2,\n n_clusters_per_class=4,\n weights=None, flip_y=0.01,\n class_sep=1.0, hypercube=True,\n shift=0.0, scale=1.0,\n shuffle=True,\n random_state=self.args.rand_seed)\n\n self.orig_column_names = np.arange(data_x.shape[-1])\n self.data_x = data_x\n self.data_y = self.to_one_hot_encoding(data_y)\n self.numerical_idx = np.arange(data_x.shape[-1])\n self.non_num_idx = None\n self.all_non_numerical_idx = None\n\n # Calculate adjacency matrix\n self.meta_inf = data_x[:, :1].astype('float32')\n if self.args.graph_type:\n self.adj = self.get_adjacency()", "def _load_processed_data(self):\n with open(os.path.join(self._data_root_path, self._processed_train_data_file_name),\n 'r') as f:\n train_examples = json.load(f)\n\n with open(os.path.join(self._data_root_path, self._processed_dev_data_file_name), 'r') as f:\n dev_examples = json.load(f)\n\n with open(os.path.join(self._data_root_path, self._word_vocab_file_name), 'r') as f:\n word_vocab = Vocab.from_json(json.load(f))\n\n with open(os.path.join(self._data_root_path, self._char_vocab_file_name), 'r') as f:\n char_vocab = Vocab.from_json(json.load(f))\n\n return train_examples, dev_examples, word_vocab, char_vocab", "def _load_training_data(self):\n self._save_training_data()", "def process_raw_data(self):\n \n # Define some variables of interest.\n vor = [\"n_sentences\", \"n_correct\", \"p_correct\", \"median_RT\", \\\n \"mean_RT\", \"stdev_RT\", \"scaled_stdev_RT\"]\n \n # Get all participant names, or return straight away if no data was\n # loaded yet.\n if hasattr(self, \"raw\"):\n participants = self.raw.keys()\n participants.sort()\n else:\n self.data = None\n return\n\n # Count the number of participants.\n n = len(participants)\n \n # Create a data dict for each variable of interest.\n self.data = {}\n self.data[\"ppname\"] = []\n for var in vor:\n self.data[var] = numpy.zeros(n, dtype=float) * numpy.NaN\n \n # Loop through all participants.\n for i, ppname in enumerate(participants):\n # Add the participant name.\n self.data[\"ppname\"].append(copy.deepcopy(ppname))\n # Skip empty datasets.\n if self.raw[ppname] is None:\n continue\n # Compute stuff relevant to this task.\n self.data[\"n_sentences\"][i] = len(self.raw[ppname][\"Sentence\"])\n self.data[\"n_correct\"][i] = numpy.sum(self.raw[ppname][\"correct\"])\n self.data[\"p_correct\"][i] = float(self.data[\"n_correct\"][i]) \\\n / float(self.data[\"n_sentences\"][i])\n self.data[\"median_RT\"][i] = numpy.nanmedian(self.raw[ppname][\"RT\"])\n self.data[\"mean_RT\"][i] = numpy.nanmean(self.raw[ppname][\"RT\"])\n self.data[\"stdev_RT\"][i] = numpy.nanstd(self.raw[ppname][\"RT\"])\n # Compute a scaled standard deviation of the response time, scaled to the\n # median response time to remove the correlation between the two.\n self.data[\"scaled_stdev_RT\"] = self.data[\"stdev_RT\"] / self.data[\"median_RT\"]", "def load(self):\n\n X_train, y_train, X_test, y_test, variable_types, name = _load_data(\n self.task_id)\n\n self.X_train = X_train\n self.y_train = y_train\n self.X_test = X_test\n self.y_test = y_test\n self.variable_types = variable_types\n self.name = name\n\n return self.X_train, self.y_train, self.X_test, self.y_test", "def get_data():\n\n pathxtrain = sys.argv[1]\n pathxtest = sys.argv[2]\n pathlabeltrain = sys.argv[3]\n pathlabeltest = sys.argv[4]\n\n xtrain = p.read_csv(pathxtrain, header=None)\n xtest = p.read_csv(pathxtest, header=None)\n label_train = p.read_csv(pathlabeltrain, header=None)\n label_test = p.read_csv(pathlabeltest, header=None)\n\n xtrain_mx = xtrain.values\n xtest_mx = xtest.values\n\n label_train = label_train.values.reshape(label_train.shape[0])\n label_test = label_test.values.reshape(label_test.shape[0])\n\n return xtrain_mx, xtest_mx, label_train, label_test", "def Train_data():\n print (\"loading train data ...\")\n time_start = time.time()\n data_root = '/media/keziwen/86AA9651AA963E1D'\n with h5py.File(join(data_root, './data/train_real2.h5')) as f:\n data_real = f['train_real'][:]\n num, nt, ny, nx = data_real.shape\n data_real = np.transpose(data_real, (0, 1, 3, 2))\n with h5py.File(join(data_root, './data/train_imag2.h5')) as f:\n data_imag = f['train_imag'][:]\n num, nt, ny, nx = data_imag.shape\n data_imag = np.transpose(data_imag, (0, 1, 3, 2))\n data = data_real+1j*data_imag\n num_train = 15000\n num_validate = 2000\n train_data = data[0:num_train]\n validate_data = data[num_train:num_train+num_validate]\n\n train_data = np.random.permutation(train_data)\n\n time_end = time.time()\n print ('dataset has been created using {}s'.format(time_end-time_start))\n return train_data, validate_data", "def parse(self):\n\t\tfirst = None\n\t\tf = open(self.input_file)\n\t\tfor line in f.readlines():\n\t\t\tif line.startswith(\"#\"):\n\t\t\t\tcontinue\n\t\t\ttry:\n\t\t\t\tflow,t,sequence,size = line.split()\n\t\t\texcept:\n\t\t\t\tcontinue\n\t\t\t# append data to a list of tuples\n\t\t\tflow = int(flow)\n\t\t\tt = float(t)\n\t\t\tsequence = int(sequence)\n\t\t\tif size == \"x\":\n\t\t\t\tcontinue\n\t\t\tsize = int(size)\n\t\t\tif not size == 0:\n\t\t\t\tif flow == 1:\n\t\t\t\t\tself.data1.append((t,sequence,size))\n\t\t\t\telif flow == 2:\n\t\t\t\t\tself.data2.append((t,sequence,size))\n\t\t\t\telif flow == 3:\n\t\t\t\t\tself.data3.append((t, sequence, size))\n\t\t\t\telif flow == 4:\n\t\t\t\t\tself.data4.append((t, sequence, size))\n\t\t\t\telif flow == 5:\n\t\t\t\t\tself.data5.append((t, sequence, size))\n\t\t\t\telse:\n\t\t\t\t\tprint \"Erroneous data: \",flow, t, sequence, size\n\t\t\t# Keep track of the minimum and maximum time seen\n\t\t\tif not self.min_time or t < self.min_time:\n\t\t\t\tself.min_time = t\n\t\t\tif not self.max_time or t > self.max_time:\n\t\t\t\tself.max_time = t\n\n\t\t\t# print len(self.data1),len(self.data2),len(self.data3),len(self.data4),len(self.data5)", "def _read(self, documents):\n data = []\n X,Y = [], []\n for document in documents:\n d_ata = pd.read_csv(document, sep='\\t', names=['review','label'])\n data.append(d_ata)\n data = pd.concat(data)\n self.data = data\n Y = data.label\n self.vec.fit(data.review)\n X = self.preprocess(data)\n \n return train_test_split(X,Y)", "def dataset_preparation():\r\n with open('../data/patterns_num.txt', 'r') as f:\r\n data = f.readlines()\r\n X, Y = [], []\r\n for line in data:\r\n x, y = line.split('\\t')\r\n if len(x) > 5 and x not in X: # better results are achieved excluding short query patterns\r\n X.append(x.replace(\"X\", \"\").replace(\"Y\", \"\").lower())\r\n Y.append(int(y.replace('\\n', '')))\r\n test_size = 0.2\r\n # print('Test size:', test_size, '\\nWrong classifications:\\n')\r\n\r\n X_train, X_test, y_train, y_test = train_test_split(X, Y, test_size=test_size, random_state=42, stratify=Y)\r\n return X_train, y_train, X_test, y_test", "def validate(self, data_loader=None):\n if data_loader is None:\n data_loader = self.dataset.val_data_loader\n m = self.model\n m.eval()\n\n batch_matrix_list = []\n for idx, data in tqdm(enumerate(data_loader), total=len(data_loader)):\n if type(data) is dict:\n for key, value in data.items():\n data[key] = value.to(self.device)\n pred = m.predict(data)\n batch_matrix = self.evaluator.collect(data, pred)\n batch_matrix_list.append(batch_matrix)\n\n if self.single:\n result = self.evaluator.evaluate(batch_matrix_list, groupby=False)\n else:\n result = self.evaluator.evaluate(batch_matrix_list, groupby=True)\n return result", "def load_data_preprocess(self):\n\n print(\"Loading the dataset ...\")\n # load the data\n c_util = CarUtils()\n train_x, train_y, test_x, test_y, classes = c_util.load_data()\n\n # set the image ordering\n K.set_image_dim_ordering(\"th\")\n\n print(\"Pre-processing the dataset ...\")\n # pre-process the data\n train_x = train_x.astype('float32')\n test_x = test_x.astype('float32')\n\n train_x = train_x / 255\n test_x = test_x / 255\n\n print(train_x.shape[0], ' train samples')\n print(test_x.shape[0], ' test samples')\n\n train_y = np_utils.to_categorical(train_y, CarsClassifierModel._nb_classes)\n test_y = np_utils.to_categorical(test_y, CarsClassifierModel._nb_classes)\n\n return train_x, train_y, test_x, test_y", "def build_data_set(self):\n if not self.assert_data_correct():\n self.download_all_data()\n self.unpack_rename_data()\n self.split_data_characters()\n self.clean_data_fragments()\n self.create_font_data()\n if not self.assert_train_augmented():\n self.augment_train_data()\n if not self.assert_style_data_correct():\n self.download_style_data()\n self.unpack_rename_data()", "def _parse_train_data(self, data):\n image, label = self._prepare_image_and_label(data)\n\n # Flips image randomly during training.\n if self._aug_rand_hflip:\n image, label = input_utils.random_horizontal_flip(image, masks=label)\n\n # Resizes and crops image.\n image, image_info = input_utils.resize_and_crop_image(\n image,\n self._output_size,\n self._output_size,\n aug_scale_min=self._aug_scale_min,\n aug_scale_max=self._aug_scale_max)\n\n # Resizes and crops boxes.\n image_scale = image_info[2, :]\n offset = image_info[3, :]\n\n # Pad label and make sure the padded region assigned to the ignore label.\n # The label is first offset by +1 and then padded with 0.\n label += 1\n label = tf.expand_dims(label, axis=3)\n label = input_utils.resize_and_crop_masks(\n label, image_scale, self._output_size, offset)\n label -= 1\n label = tf.where(tf.equal(label, -1),\n self._ignore_label * tf.ones_like(label), label)\n label = tf.squeeze(label, axis=0)\n valid_mask = tf.not_equal(label, self._ignore_label)\n labels = {\n 'masks': label,\n 'valid_masks': valid_mask\n }\n\n # If bfloat16 is used, casts input image to tf.bfloat16.\n if self._use_bfloat16:\n image = tf.cast(image, dtype=tf.bfloat16)\n return image, labels", "def prep_data(self):\n\n self.fit_tokenizer(texts=self.texts)\n sequences = self.get_sequences(self.texts)\n self.text_data = pad_sequences(sequences, maxlen=self.MAX_SEQUENCE_LENGTH)\n\n self.labels = to_categorical(np.asarray(self.labels))\n print('Shape of data tensor:', self.text_data.shape)\n print('Shape of label tensor:', self.labels.shape)\n\n # split the data into a training set and a validation set\n indices = np.arange(self.text_data.shape[0])\n np.random.shuffle(indices)\n self.text_data = self.text_data[indices]\n self.labels = self.labels[indices]\n nb_validation_samples = int(self.VALIDATION_SPLIT * self.text_data.shape[0])\n\n x_train = self.text_data[:-nb_validation_samples]\n y_train = self.labels[:-nb_validation_samples]\n x_val = self.text_data[-nb_validation_samples:]\n y_val = self.labels[-nb_validation_samples:]\n\n return x_train,y_train, x_val, y_val", "def _load_data(self):\n if self._name in BALANCE_DATASET:\n _loader = dataset_loaders[self._name]\n xnp, y = _loader()\n\n # Train - Test split\n gen = ShuffleSplit(n_splits=1, random_state=42, test_size=self._test_size).split(xnp)\n train_idx, test_idx = next(gen)\n\n # Train data.\n self.xnp_tr = xnp[train_idx]\n self.y_tr = y[train_idx]\n # Test data.\n self.xnp_ts = xnp[test_idx]\n self.y_ts = y[test_idx]\n\n else:\n _loader = dataset_loaders[self._name]\n xnp, xp, y = _loader()\n # self.xnp, self.xp, self.y = _loader()\n\n # Train - Test split\n gen = ShuffleSplit(n_splits=1, random_state=42, test_size=self._test_size).split(xnp)\n train_idx, test_idx = next(gen)\n\n # Train data.\n self.xnp_tr = xnp[train_idx]\n self.xp_tr = xp[train_idx]\n self.y_tr = y[train_idx]\n # Test data.\n self.xnp_ts = xnp[test_idx]\n self.xp_ts = xp[test_idx]\n self.y_ts = y[test_idx]", "def __init__(self):\n print ('Initializing Data reader object...')\n data_Train_Images, data_Train_Labels, data_Test_Image, data_Test_Labels = self.readDataFromFile()\n test_10k_x, test_10k_y, training_55k_x, training_55k_y, validation_5k_x, validation_5k_y = self.dataTransform(\n data_Test_Image, data_Test_Labels, data_Train_Images, data_Train_Labels)\n self.train = zip(training_55k_x, training_55k_y)\n self.valid = zip(validation_5k_x, validation_5k_y)\n self.test = zip(test_10k_x, test_10k_y)\n\n self.train_position = 0\n print ('Initialized!')", "def process_dataset(self):\n\n logger.info('\\n')\n logger.info('=' * 40)\n logger.info('=\\t DeepRank Data Set')\n logger.info('=')\n logger.info('=\\t Training data')\n for f in self.train_database:\n logger.info(f'=\\t -> {f}')\n logger.info('=')\n if self.valid_database:\n logger.info('=\\t Validation data')\n for f in self.valid_database:\n logger.info(f'=\\t -> {f}')\n logger.info('=')\n if self.test_database:\n logger.info('=\\t Test data')\n for f in self.test_database:\n logger.info(f'=\\t -> {f}')\n logger.info('=')\n logger.info('=' * 40 + '\\n')\n sys.stdout.flush()\n\n # check if the files are ok\n self.check_hdf5_files(self.train_database)\n\n if self.valid_database:\n self.valid_database = self.check_hdf5_files(\n self.valid_database)\n\n if self.test_database:\n self.test_database = self.check_hdf5_files(\n self.test_database)\n\n # create the indexing system\n # alows to associate each mol to an index\n # and get fname and mol name from the index\n self.create_index_molecules()\n\n # get the actual feature name\n if self.mapfly:\n self.get_raw_feature_name()\n else:\n self.get_mapped_feature_name()\n\n # get the pairing\n self.get_pairing_feature()\n\n # get grid shape\n self.get_grid_shape()\n\n # get the input shape\n self.get_input_shape()\n\n # get renormalization factor\n if self.normalize_features or self.normalize_targets or self.clip_features:\n if self.mapfly:\n self.compute_norm()\n else:\n self.get_norm()\n\n logger.info('\\n')\n logger.info(\" Data Set Info:\")\n logger.info(\n f' Augmentation : {self.use_rotation} rotations')\n logger.info(\n f' Training set : {self.ntrain} conformations')\n logger.info(\n f' Validation set : {self.nvalid} conformations')\n logger.info(\n f' Test set : {self.ntest} conformations')\n logger.info(f' Number of channels : {self.input_shape[0]}')\n logger.info(f' Grid Size : {self.data_shape[1]}, '\n f'{self.data_shape[2]}, {self.data_shape[3]}')\n sys.stdout.flush()", "def split_data_into_training_and_validation(self, data):\n training_dataset = self.get_data_from_indices(data, np.arange(self.num_training_samples))\n validation_dataset = self.get_data_from_indices(data, np.arange(self.num_training_samples,\n self.p.trainer.num_samples))\n return training_dataset, validation_dataset", "def load(train_file, test_file):\n print('\\nLoad the raw training and test set data...')\n y_train, tx_train, ids_train = load_csv_data(train_file)\n y_test, tx_test, ids_test = load_csv_data(test_file)\n print('\\n... finished.')\n return y_train, tx_train, ids_train, y_test, tx_test, ids_test", "def _create_data():\n tf.logging.info(\"Create records..\")\n train, val, test = util.load_data(data_dir, FLAGS[\"is_aug\"])\n tf.logging.info(\"Dataset size: Train-{} Test-{} Val-{}\".format(len(train), len(test), len(val)))\n return train, val, test", "def pre_process_data(self, all_labels, all_data):\n\n # [1] Normalizes data\n all_data = self.pre_precess_manager.normalization(all_data)\n\n data_train, data_test, label_train, label_test = train_test_split(all_data, all_labels, test_size=0.1,\n shuffle=True)\n\n return data_train, data_test, label_train, label_test", "def run_epoch(self, epoch, data_loader, training=False):\n if training:\n self.model.train()\n else:\n self.model.eval()\n\n epoch_metrics = {\"loss\": 0.0}\n overall_parsing_counts = {\"correct\": 0, \"predicted\": 0, \"gold\": 0}\n num_evaluated_batches = 0\n\n with torch.set_grad_enabled(training):\n for sentences, target in data_loader:\n # Run model\n target = self._to_device(target)\n output, parsing_counts = self.parser.evaluate_batch(sentences)\n\n # Compute loss\n output, target = self._unroll_sequence_batch(output), self._unroll_sequence_batch(target)\n loss = self.criterion(output, target)\n\n # Add metrics to overall total\n epoch_metrics[\"loss\"] += loss.item()\n for count in \"gold\", \"predicted\", \"correct\":\n overall_parsing_counts[count] += parsing_counts[count]\n\n # Perform backpropagation (when training)\n if training:\n self.optimizer.zero_grad()\n loss.backward()\n self.optimizer.step()\n\n # Print progress\n num_evaluated_batches += 1\n self.logger.debug('{} Epoch: {} {} Loss: {:.6f}'.format(\n \"Training\" if training else \"Validation\",\n epoch,\n self._progress(num_evaluated_batches, data_loader),\n loss.item()))\n\n epoch_metrics.update(self.compute_prf(overall_parsing_counts))\n\n return epoch_metrics", "def load(self):\n\n x = [] # input documents (n_docs, max_seq_len)\n labels = [] # targets we are predicting for each input\n\n for file_path in glob.glob(self.train_dir + '*.txt'):\n tokens = read_tokens(file_path)\n unique = list(set(tokens))\n x_count = round(len(unique) * 0.85)\n\n for _ in range(self.samples_per_doc):\n random.shuffle(unique)\n x.append(' '.join(unique[:x_count]))\n labels.append(' '.join(unique[x_count:]))\n\n # make x and y\n pkl = open('Model/tokenizer.p', 'rb')\n self.tokenizer = pickle.load(pkl)\n x = self.tokenizer.texts_to_matrix(x, mode='binary')\n y = self.tokenizer.texts_to_matrix(labels, mode='binary')\n\n # column zero is empty\n return x, y[:,1:]", "def load_data():\n train = pd.read_csv(\"../input/train.csv\", dtype={\"Age\": np.float64}, )\n test = pd.read_csv(\"../input/test.csv\", dtype={\"Age\": np.float64}, )\n\n train = train.set_index('PassengerId')\n test = test.set_index('PassengerId')\n\n train = train.apply(preprocess, axis=1)\n test = test.apply(preprocess, axis=1)\n\n x_train = train.drop(['Survived'], axis=1)\n y_train = train['Survived']\n x_test = test\n return {'train': {'x': x_train, 'y': y_train},\n 'test': {'x': x_test},\n 'full_features': pd.concat([x_train, x_test])}", "def _prepare_for_training(\n self,\n trackers: List[TrackerWithCachedStates],\n domain: Domain,\n precomputations: MessageContainerForCoreFeaturization,\n **kwargs: Any,\n ) -> Tuple[RasaModelData, np.ndarray]:\n training_trackers = self._get_trackers_for_training(trackers)\n # dealing with training data\n tracker_state_features, label_ids, entity_tags = self._featurize_for_training(\n training_trackers,\n domain,\n precomputations=precomputations,\n bilou_tagging=self.config[BILOU_FLAG],\n **kwargs,\n )\n\n if not tracker_state_features:\n return RasaModelData(), label_ids\n\n self._label_data, encoded_all_labels = self._create_label_data(\n domain, precomputations=precomputations\n )\n\n # extract actual training data to feed to model\n model_data = self._create_model_data(\n tracker_state_features, label_ids, entity_tags, encoded_all_labels\n )\n\n if self.config[ENTITY_RECOGNITION]:\n self._entity_tag_specs = (\n self.featurizer.state_featurizer.entity_tag_specs\n if self.featurizer.state_featurizer is not None\n else []\n )\n\n # keep one example for persisting and loading\n self.data_example = model_data.first_data_example()\n\n return model_data, label_ids", "def data_parser(data, snp_neighbour):\n\n # Get only the features of the SNP of interest\n if snp_neighbour == 0:\n # The SNP of interest samples are located at the middle position of the data sequence\n index_SNPi = (data.shape[2] - 1) / 2 # -1 for the SNP of interest\n samples = data[:, :, int(index_SNPi)]\n # Define the number of considered nucleotide positions\n n_positions = 1\n\n # Get the features of the SNP of interest and neighbouring positions\n else:\n # The data should fit in a 2D array for performing neural network. The number of samples should be stay, and\n # the number of features will be the number of features times the number of nucleotides\n samples = data.reshape([data.shape[0], -1])\n # Define the number of considered nucleotide positions\n n_positions = data.shape[2]\n\n # Get the number of used features\n n_features = data.shape[1]\n\n return samples, n_features, n_positions", "def train_with_loader(self, data, validating_data=None, scheduler=None, epochs=1):\n print('Training...')\n for epoch in range(epochs):\n self.train()\n for train_in, train_out in data:\n self.compute_loss(train_in, train_out, is_guess=False, training=True)\n self.eval()\n if validating_data:\n with torch.no_grad():\n valid_loss = self.compute_loss_loader(validating_data).item()\n print('Average validation error at step ',epoch+1,': ', valid_loss)\n if scheduler and valid_loss:\n scheduler.step()", "def validate(self):\n self.set_model_mode('eval')\n self.evaluator.reset()\n losses = MetricMeter()\n\n print('Do evaluation on {} set'.format('valid set'))\n data_loader = self.val_loader\n assert data_loader is not None\n for batch_idx, batch in enumerate(data_loader):\n input, label = self.parse_batch_test(batch)\n loss = self.forward_backward(batch, backprob=False)\n losses.update(loss)\n # total_loss += loss['loss']\n output = self.model_inference(input)\n self.evaluator.process(output, label)\n\n results = self.evaluator.evaluate()\n total_loss = losses.meters['loss_x'].avg\n\n for k, v in results.items():\n tag = '{}/{}'.format('validation', k)\n self.write_scalar(tag, v, self.epoch)\n # if full_results:\n return [total_loss,losses.dict_results(),results]\n # return total_loss", "def _get_data(self):\n\n # Grab the data. Note, the separator is actually ', ', not just a\n # comma, so specify. Also, recognize the \"?\" as an NA value\n # (I think it is easier to have pandas catch the NA values instead\n # of manually searching for and parsing these in the future).\n # Finally, set the engine to python, since having a separator greater\n # than one character automatically does this, and prints a warning\n # message. By explicitly telling it to use python, we suppress the\n # warning.\n self.train_df = pd.read_csv(self.train_url, sep=', ', header=None,\n na_values='?', engine='python')\n\n # For the training data, have one comment row, so need to ignore\n self.test_df = pd.read_csv(self.test_url, sep=', ', header=None,\n skiprows=1, na_values='?', engine='python')\n\n # Get the header data\n response = requests.get(self.head_url)\n header = response.text.split('\\n')\n\n # Now, filter to grab the header lines:\n # First, make sure there is at least one character for the line, and\n # ignore lines that start with the comment character for the file \"|\"\n header = [row for row in header if len(row) > 0 and row[0] != '|']\n\n # Ignore the first row, since it is just identifying the classifier\n # task and, get just the header values\n header = [head.split(':')[0] for head in header[1:]]\n\n # Finally, we need to add a header name for the last column (if <= or >\n # income of 50k)\n header.append('income')\n\n # Now, set the header for the data sets\n self.train_df.columns = header\n self.test_df.columns = header", "def train(self):\n # 1. Extracting details of attributes\n\n self.get_attribute_data()\n if self.train_data is None and self.train_data_file is None:\n raise ValueError(\"Neither training data not training file provided\")\n\n self.get_train_data()\n self.classifier = self.build_tree(rows=self.train_data, attribute_list=self.attribute_names)", "def read_data(self, p_data=''):\n\n _header_ = self._header_ + 'read_data(): '\n\n if p_data:\n self.p_data = p_data\n\n if not self.p_data:\n raise ValueError(_header_ + 'No data to read.')\n\n if not os.path.isfile(self.p_data):\n raise FileNotFoundError(_header_ + 'No such file: %s' % self.p_data)\n\n if self.verbose:\n print(_header_ + 'Reading data from %s ...' % self.p_data)\n\n if self.nidx_pred:\n # If there are nodes already in .nidx_pred, then they are likely copied over from the train data\n # So, these must be purged prior to reading new data\n print(_header_ + 'Excluding %d predicting nodes transfered from training dataset ...' % len(self.nidx_pred))\n self.nidx_exclude += self.nidx_pred\n self.nidx_pred = []\n\n # Extract data\n all_links = []\n all_labels = []\n has_other = False\n self.df = pd.read_table(self.p_data)\n df = self.df.applymap(func=lambda x: [i for i in x.strip().split('/') if i] if isinstance(x, str) else [])\n has_node = self.columns['nodes'] in df\n has_layer = self.columns['layers'] in df\n\n for i_row in range(len(df)):\n if has_layer:\n sp = df[self.columns['layers']][i_row][0]\n if sp in self.masklayer:\n continue\n if sp in self.layer2nidx:\n self.layer2nidx[sp] |= {i_row}\n else:\n self.layer2nidx[sp] = {i_row}\n self.nidx2layer.append(sp)\n labs = df[self.columns['labels']][i_row]\n if self.lab_other:\n node_lab = [x if (not self.labels or x in self.labels) else 'other' for x in labs]\n if not has_other and 'other' in node_lab:\n has_other = True\n else:\n node_lab = [x for x in labs if (not self.labels or x in self.labels)]\n if labs:\n all_labels += labs\n if not node_lab:\n self.nidx_exclude.append(i_row)\n self.nidx_pred.append(i_row)\n self.node_links.append([x for x in list(set(df[self.columns['links']][i_row])) if x not in self.exclude_links])\n self.node_labels.append(node_lab)\n if has_node:\n self.nodes.append(df[self.columns['nodes']][i_row])\n\n all_links += self.node_links[-1]\n\n # track link frequency\n for link in self.node_links[-1]:\n if link in self.link2freq:\n self.link2freq[link] += 1\n else:\n self.link2freq[link] = 1\n\n self.links += sorted(set(all_links) - set(self.links))\n set_all_labels = set(all_labels)\n if self.labels:\n if self.lab_other and 'other' not in self.labels and has_other:\n self.labels.append('other')\n\n if self.verbose:\n if self.lab_other:\n print(_header_ + 'Other labels: %s' % (','.join(set_all_labels - set(self.labels))))\n else:\n print(_header_ + 'Excluded labels: %s' % (','.join(set_all_labels - set(self.labels))))\n else:\n self.labels = sorted(list(set_all_labels))\n\n self.n_labels = len(self.labels)\n\n for idx, link in enumerate(self.links):\n self.link2lidx[link] = idx\n\n if self.verbose:\n print(' Found %d nodes' % len(self.node_links))\n print(' Found %d links' % len(self.links))\n\n return self", "def prepare_data(self):\n data = self._get_dataset(self.hparams.dataset_path)\n label_encoder = data[\"label_encoder\"]\n del data[\"label_encoder\"]\n\n click.secho(\"Building inputs and labels.\", fg=\"yellow\")\n datasets = {\n \"train\": defaultdict(list),\n \"valid\": defaultdict(list),\n \"test\": defaultdict(list),\n }\n for dataset_name, dataset in data.items():\n for sample in dataset:\n instance = self.build_input(\n self.tokenizer, sample[\"text\"], label_encoder, sample[\"label\"]\n )\n for input_name, input_array in instance.items():\n datasets[dataset_name][input_name].append(input_array)\n\n click.secho(\"Padding inputs and building tensors.\", fg=\"yellow\")\n tensor_datasets = {\"train\": [], \"valid\": [], \"test\": []}\n for dataset_name, dataset in datasets.items():\n dataset = self.pad_dataset(dataset, padding=self.tokenizer.pad_index)\n for input_name in MODEL_INPUTS:\n if input_name == \"labels\":\n tensor = torch.tensor(dataset[input_name], dtype=torch.float32)\n else:\n tensor = torch.tensor(dataset[input_name])\n tensor_datasets[dataset_name].append(tensor)\n\n self.train_dataset = TensorDataset(*tensor_datasets[\"train\"])\n self.valid_dataset = TensorDataset(*tensor_datasets[\"valid\"])\n self.test_dataset = TensorDataset(*tensor_datasets[\"test\"])\n click.secho(\n \"Train dataset (Batch, Candidates, Seq length): {}\".format(\n self.train_dataset.tensors[0].shape\n ),\n fg=\"yellow\",\n )\n click.secho(\n \"Valid dataset (Batch, Candidates, Seq length): {}\".format(\n self.valid_dataset.tensors[0].shape\n ),\n fg=\"yellow\",\n )\n click.secho(\n \"Test dataset (Batch, Candidates, Seq length): {}\".format(\n self.test_dataset.tensors[0].shape\n ),\n fg=\"yellow\",\n )", "def _parse_data(self):\n for i, val in enumerate(self.values.keys()):\n x_, y_ = [], []\n xy = self.values[val]\n for value in self.values.index:\n x_.append(xy[value][0])\n y_.append(xy[value][1])\n\n self.set_and_get(\"x_\", val, x_)\n self.set_and_get(\"y_\", val, y_)", "def parse_data( self, data ):\n data = data.split( ',' )\n data = list( map( lambda x: x.strip(), data ) ) # remove white space\n # create data structure\n fields = [\n 'time',\n 'value'\n ] \n Reading = namedtuple( 'Reading', fields )\n \n try:\n return [ Reading( time = float( data[ i + 1 ] ), value = float( data[ i ] ) ) for i in range( 0, len( data ), len( fields ) ) ]\n \n except ValueError as err:\n raise err", "def train(self, training_data, training_labels, validation_data, validation_labels):\n abstract", "def get_data(self):\n\n self.read_expression()\n self.read_tfs()\n self.read_metadata()\n self.set_gold_standard_and_priors()", "def process_data(train_file, test_file):\n y_train, tx_train, ids_train, y_test, tx_test, ids_test = load(train_file, test_file)\n header_train = get_header(train_file)\n header_test = get_header(test_file)\n print('\\nData set will be split into four, each representing data with different jet numbers.')\n for jet_num in range(4):\n print('\\nProcess training set with jet number = ' + str(jet_num) + '...')\n y_train_jet, tx_train_jet, ids_train_jet = split_data(y_train, tx_train, ids_train, jet_num)\n columns_to_remove = analyze(tx_train_jet)\n tx_train_jet, header_train_jet = remove_columns(tx_train_jet, header_train, columns_to_remove)\n create_csv('train_jet_' + str(jet_num) + '.csv', y_train_jet, tx_train_jet, ids_train_jet, header_train_jet, False)\n print('\\n... created train_jet_' + str(jet_num) + '.csv file.')\n print('\\nProcess test set with jet number = ' + str(jet_num) + '...')\n y_test_jet, tx_test_jet, ids_test_jet = split_data(y_test, tx_test, ids_test, jet_num)\n columns_to_remove = analyze(tx_test_jet)\n tx_test_jet, header_test_jet = remove_columns(tx_test_jet, header_test, columns_to_remove)\n create_csv('test_jet_' + str(jet_num) + '.csv', y_test_jet, tx_test_jet, ids_test_jet, header_test_jet, True)\n print('\\n... created test_jet_' + str(jet_num) + '.csv file.')", "def parse(cls, data):\n raise NotImplementedError", "def train(self, train_data):\n with open(train_data, 'r') as train_data:\n while True:\n tokens = train_data.readline().split()\n pos = train_data.readline().split()\n labels = train_data.readline().split()\n if not tokens or not pos or not labels:\n break\n # Generate transition probabilities\n for i in range(0, len(labels) - self.N_VALUE + 1):\n self.add_label_sequence(labels[i:i + self.N_VALUE])\n # Generate lexical generation probabilities\n for i in range(0, len(tokens)):\n token = tokens[i].lower()\n label = labels[i]\n self.add_word_tag(token, label)\n self.handle_unknowns()", "def load_training_data(config):\n # Load data\n LOGGER.info(\"Loading training data.\")\n train_x = load_data(config['data_source'], config['train_x_filename'])\n train_y = load_data(config['data_source'], config['train_y_filename'])\n val_x = load_data(config['data_source'], config['val_x_filename'])\n val_y = load_data(config['data_source'], config['val_y_filename'])\n LOGGER.info(\"Training data size: %d\", len(train_x))\n LOGGER.info(\"Validation data size: %d\", len(val_x))\n\n # Build datasets and create iterators\n LOGGER.info(\"Building dataset.\")\n train_dataset = get_dataset(\n train_x, train_y, config['batch_size'], config['data_shape'],\n config['n_classes'], True)\n val_dataset = get_dataset(\n val_x, val_y, config['batch_size'], config['data_shape'],\n config['n_classes'])\n\n return train_dataset, val_dataset, len(val_x)", "def load_train_data():\n\n # Load X_train\n with open('X_train.csv') as csvfile:\n reader = csv.DictReader(csvfile)\n feature_string_matrix = []\n for row in reader:\n feature_list = []\n for i in range(TRAIN_N):\n x_value = row['x' + str(i)]\n # Hit missing values\n if x_value == '':\n feature_list.append(np.nan)\n else:\n feature_list.append(float(row['x' + str(i)]))\n feature_string_matrix.append(feature_list)\n X_train = np.array(feature_string_matrix)\n # Load Y_train\n with open('y_train.csv') as csvfile:\n reader = csv.DictReader(csvfile)\n y_string = []\n for row in reader:\n y_value = [float(row['y'])]\n y_string.append(y_value)\n y_train = np.array(y_string)\n return X_train, y_train", "def load_training_data(\n self,\n train_data_file=\"datasets/train_data.json\",\n test_data_file=\"datasets/test_data.json\",\n ):\n train_data = pd.read_json(train_data_file)\n test_data = pd.read_json(test_data_file)\n return train_data, test_data", "def train(X : list, y : list, model_name : str, data, mode,*vals,**args):\n # 훈련 결과 dictionary\n scores = {}\n\n # 훈련 모드 선택\n\n model = MODELS[mode][model_name]\n\n X_train, X_val, y_train, y_val = train_test_split(data[X], data[y], train_size=0.7)\n \n model.fit(X_train, y_train)\n\n scores['train'] = validation(model, X_train, y_train, mode)\n scores['validation'] = validation(model, X_val, y_val, mode)\n\n return scores", "def readData(path_to_dataset, train_size=0.8, validation_size=0.2):\n data = pd.read_csv(os.path.join(path_to_dataset, 'training_set_rel3.tsv'), sep='\\t', encoding='ISO-8859-1')\n # Drop columns that has null value \n data = data.dropna(axis=1)\n # Only take 4 columns of data from the dataset: essay_id, essay_set, essay, domain1_score\n data = data[['essay_id', 'essay_set', 'essay', 'domain1_score']]\n # Perform 80:20 train-test split on the training data\n train_set, test_set = train_test_split(data, train_size=train_size, random_state=0)\n # Split the 80% training set further into 60:20\n training_set, validation_set = train_test_split(train_set, test_size=validation_size, random_state=0)\n return training_set, test_set, validation_set", "def __init__(self):\n\n print '-'*60\n #self.train_folder = '../data/preprocess_nonstopword_nonstemming/train_clean/' # folder\n #self.test_folder = '../data/preprocess_nonstopword_nonstemming/test_clean/' # folder\n self.train_folder = '../data/preprocess_6/train_clean/' # folder\n self.test_folder = '../data/preprocess_6/test_clean/' # folder\n self.label_file = '../data/train_labels.csv' # path\n #pred_file = './submission_NB.csv' # predicitons\n self.pred_file = './submission_pre_6_t0.6.csv'\n\n\n self.train_ans = []\n self.test_index = []", "def _extract_results(self) -> None:\n metric_name = self.metric.name\n for inference_name in ['train', 'test', 'opt']:\n # TODO: Extract information from self.search_results\n data = getattr(self.search_results, f'{inference_name}_metric_dict')[metric_name]\n if all([d is None for d in data]):\n if inference_name not in OPTIONAL_INFERENCE_CHOICES:\n raise ValueError(f\"Expected {metric_name} score for {inference_name} set\"\n f\" to not be None, but got {data}\")\n else:\n continue\n self.data[f'single::{inference_name}::{metric_name}'] = np.array(data)\n\n if self.ensemble_results.empty() or inference_name == 'opt':\n continue\n\n data = getattr(self.ensemble_results, f'{inference_name}_scores')\n if all([d is None for d in data]):\n if inference_name not in OPTIONAL_INFERENCE_CHOICES:\n raise ValueError(f\"Expected {metric_name} score for {inference_name} set\"\n f\" to not be None, but got {data}\")\n else:\n continue\n self.data[f'ensemble::{inference_name}::{metric_name}'] = np.array(data)", "def _load_data(self, save_temp=False):\n # directly read processed data and encode\n print ('Start tokenizing data...')\n self.data = json.loads(\n open(self.cfg.data_path+self.cfg.data_file, 'r', encoding='utf-8').read().lower())\n self.train, self.dev, self.test = [], [], []\n print ('Start encoding data...')\n p = progressbar.ProgressBar(len(self.data))\n p.start()\n p_idx = 0\n for fn, dial in self.data.items():\n p.update(p_idx)\n p_idx += 1\n if '.json' in fn:\n fn = fn.replace('.json', '')\n if 'all' in self.cfg.exp_domains or self.exp_files.get(fn):\n if self.dev_files.get(fn):\n self.dev.append(self._get_encoded_data(fn, dial))\n elif self.test_files.get(fn):\n self.test.append(self._get_encoded_data(fn, dial))\n else:\n if self.data_mode == 'train':\n self.train.append(self._get_encoded_data(fn, dial))\n elif self.data_mode == 'test':\n pass\n else:\n raise Exception('Wrong Reader Data Mode!!!')\n p.finish()", "def load_dataset(self):\n # Get all the files in the directory\n file_list = self.get_file_list()\n\n # Concatenate the data corresponding to a list of files\n data = self.concatenate_file_data(file_list)\n\n # Shuffle the data and create the training and the validation datasets\n data = self.shuffle_data_dictionary(data)\n self.training_dataset, self.validation_dataset = self.split_data_into_training_and_validation(data)", "def load_data(path_to_dir):\n train_pos = []\n train_neg = []\n test_pos = []\n test_neg = []\n with open(path_to_dir+\"train-pos.txt\", \"r\") as f:\n for i,line in enumerate(f):\n words = [w.lower() for w in line.strip().split() if len(w)>=3]\n train_pos.append(words)\n with open(path_to_dir+\"train-neg.txt\", \"r\") as f:\n for line in f:\n words = [w.lower() for w in line.strip().split() if len(w)>=3]\n train_neg.append(words)\n with open(path_to_dir+\"test-pos.txt\", \"r\") as f:\n for line in f:\n words = [w.lower() for w in line.strip().split() if len(w)>=3]\n test_pos.append(words)\n with open(path_to_dir+\"test-neg.txt\", \"r\") as f:\n for line in f:\n words = [w.lower() for w in line.strip().split() if len(w)>=3]\n test_neg.append(words)\n\n return train_pos, train_neg, test_pos, test_neg", "def read_data(self):\r\n IS_REMAPPED = 1\r\n if IS_REMAPPED:\r\n data_as_strings = list(csv.reader(open('ring-data.csv'), delimiter=','))\r\n self.TRAINING_DATA = [[remap(float(f1),float(f2))[0],remap(float(f1),float(f2))[1],\\\r\n int(c)] for [f1, f2, c] in data_as_strings]\r\n data_as_strings = list(csv.reader(open('ring-data.csv'), delimiter=','))\r\n self.TESTING_DATA = [[remap(float(f1),float(f2))[0],remap(float(f1),float(f2))[1],\\\r\n int(c)] for [f1, f2, c] in data_as_strings]\r\n else:\r\n data_as_strings = list(csv.reader(open('ring-data.csv'), delimiter=','))\r\n self.TRAINING_DATA = [[float(f1), float(f2), int(c)] for [f1, f2, c] in data_as_strings]\r\n data_as_strings = list(csv.reader(open('ring-data.csv'), delimiter=','))\r\n self.TESTING_DATA = [[float(f1), float(f2), int(c)] for [f1, f2, c] in data_as_strings]", "def run_parse(self):\n # Data set already has source file names from load_inputs\n parsedset = {}\n parsedset['data_set'] = []\n for log in self.input_files:\n parsemodule = self.parse_modules[self.args.parser]\n try:\n if self.args.tzone:\n parsemodule.tzone = self.args.tzone\n except NameError: pass\n parsedset['data_set'].append(parsemodule.parse_file(log))\n self.data_set = parsedset\n del(parsedset)", "def load_data(self) -> tuple:\n label_num = {}\n data_set = pathlib.Path(self.path)\n data = []\n\n # create the label lookup dict for verifcation later\n for i, v in enumerate(data_set.iterdir()):\n label_num[v.name] = i\n self.labels[i] = v.name\n # end\n\n # read images\n for img_path in data_set.rglob(\"*.jpg\"):\n lbl = label_num[str(img_path.parent.stem)]\n img = cv2.imread(str(img_path))\n img = cv2.resize(img, self.dims, interpolation=cv2.INTER_AREA)\n\n # flatten RGB data into a vector\n # NOTE: NOT ACTUALLY NECESSARY! \n img.flatten()\n\n # label the sample and append to temp data list\n sample = np.append(lbl, img)\n data.append(sample)\n # end\n\n # partition and package the data (*_ ensures safe unpacking)\n train, test, validate, *_ = Data.partition(data, self.parts, 0.7, 0.2)\n self.train = Data(train)\n self.test = Data(test)\n self.validate = Data(validate)", "def getProcessedData(self, data, labels):\n if self.underSamplePercentage != 0:\n data, labels = self.underSample(data, labels)\n if self.beta != 0: \n synData, synLabels = self.adaSynAdd(data, labels)\n if synData is not None:\n data, labels = combineTestSets(data, labels, synData, synLabels)\n return data, labels", "def trainData(self, X, y, NeuralNet, epochs):", "def train(self, train_data_loader, n_epochs, valid_data_loader=None):\n\n # Loop over epochs\n best_valid_loss = 99999\n for i in range(n_epochs):\n self.logger.info('Epoch %i' % i)\n summary = dict(epoch=i)\n # Train on this epoch\n sum_train = self.train_epoch(train_data_loader)\n summary.update(sum_train)\n # Evaluate on this epoch\n sum_valid = None\n if valid_data_loader is not None:\n sum_valid = self.evaluate(valid_data_loader)\n summary.update(sum_valid)\n \n if sum_valid['valid_loss'] < best_valid_loss:\n best_valid_loss = sum_valid['valid_loss']\n self.logger.debug('Checkpointing new best model with loss: %.3f', best_valid_loss)\n self.write_checkpoint(checkpoint_id=i,best=True)\n \n if self.lr_scheduler is not None:\n self.lr_scheduler.step()\n\n # Save summary, checkpoint\n self.save_summary(summary)\n if self.output_dir is not None:\n self.write_checkpoint(checkpoint_id=i)\n\n return self.summaries", "def get_training_data():\n \n X = pd.read_csv('../data/train_values.csv').set_index('sequence_id')\n y = pd.read_csv('../data/train_labels.csv').set_index('sequence_id')\n return X, y", "def parse_data(self):\n\t\traise NotImplementedError('%s: No parse function implemented!' % self.name)", "def parseArray(self, data):\n self.title = data[0]\n self.director = data[1]\n self.cast = data[2]\n self.producer = data[3]\n self.writer = data[4]\n self.country = data[5]\n self.language = data[6]\n self.year = data[7]\n self.genres = data[8]\n self.votes = data[9]\n self.rating = float(data[10])\n self.runtime = data[11]\n self.plot = data[12]\n self.coverUrl = data[13]", "def trainAndCalculate(self):\n f = open(\"congressional_voting_dataset.csv\")\n data = np.genfromtxt(fname = f, delimiter=',', dtype=str, encoding=None)\n X = data[:, :-1]\n y = data[:, -1]\n X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.20)\n self.svclassifier.fit(X_train, y_train)", "def _unpack_training_data(data, val=None):\n if isinstance(data, TrainingData):\n assert val is None\n return data\n\n if val is not None:\n x, y = data\n return TrainingData.from_x_y(x, y, val)\n\n train, val = data\n if not isinstance(train, Dataset):\n xx, yy = train\n train = RamDataset(xx, yy)\n if not isinstance(val, Dataset):\n xx, yy = val\n val = RamDataset(xx, yy)\n return TrainingData(train, val)", "def process_raw_data(self):\n \n # Get all participant names, or return straight away if no data was\n # loaded yet.\n if hasattr(self, \"raw\"):\n participants = self.raw.keys()\n participants.sort()\n else:\n self.data = None\n return\n\n # Count the number of participants.\n n = len(participants)\n \n # Find out how many questions there were.\n n_questions = 0\n for i, ppname in enumerate(participants):\n if self.raw[ppname] is None:\n continue\n if len(self.raw[ppname][\"QuestionNumber\"]) > n_questions:\n n_questions = len(self.raw[ppname][\"QuestionNumber\"])\n \n # Define some variables of interest.\n vor = []\n for i in range(n_questions):\n vor.append(\"Q%d_resp\" % (i+1))\n vor.append(\"Q%d_RT\" % (i+1))\n \n # Create a data dict for each variable of interest.\n self.data = {}\n self.data[\"ppname\"] = []\n for var in vor:\n self.data[var] = numpy.zeros(n, dtype=float) * numpy.NaN\n \n # Loop through all participants.\n for i, ppname in enumerate(participants):\n # Add the participant name.\n self.data[\"ppname\"].append(copy.deepcopy(ppname))\n # Skip empty datasets.\n if self.raw[ppname] is None:\n continue\n # Compute stuff relevant to this task.\n for j, qnr in enumerate(self.raw[ppname][\"QuestionNumber\"]):\n # Split Questionnaire 3, Q13 and Q14 into sub-questions\n if \"Q3\" in self._task_name and int(qnr) in [13,14]:\n # These questions split out into two parts: A description\n # of what each sub-part is, and a Boolean response for\n # each sub-part in the question. Example:\n # \"1_1_1_1_1_0//Television_VideogameConsole(suchas:WiiUPlayStationXboxorNintendoDS)_Tablet(likeanIPad)_Smartphone_LaptoporDesktopComputer_Noneofthese\"\n bool_resp, descr = self.raw[ppname][\"Response\"][j].split(\"//\")\n bool_resp = map(int, bool_resp.split(\"_\"))\n descr = descr.split(\"_\")\n # Store the data in the dict.\n for k, item in enumerate(descr):\n # Clean up the item name.\n if \"(\" in item:\n item = item[:item.find(\"(\")]\n var = \"Q%s_%s_resp\" % (int(qnr), item)\n # Create a new entry in the dict for this variable, if\n # one doesn't exist yet.\n if var not in self.data.keys():\n self.data[var] = numpy.zeros(n, dtype=float) * numpy.NaN\n # Store the data in the dict.\n self.data[var][i] = bool_resp[k]\n # Store response time for the whole item.\n self.data[\"Q%s_RT\" % (int(qnr))][i] = \\\n float(self.raw[ppname][\"TimeEndQuestion\"][j]) \\\n - float(self.raw[ppname][\"TimeStartQuestion\"][j])\n # All other questions are one-question one-response:\n else:\n # Store the data in the dict.\n self.data[\"Q%s_resp\" % (int(qnr))][i] = \\\n float(self.raw[ppname][\"Response\"][j])\n self.data[\"Q%s_RT\" % (int(qnr))][i] = \\\n self.raw[ppname][\"TimeEndQuestion\"][j] \\\n - self.raw[ppname][\"TimeStartQuestion\"][j]", "def load_data(self):\n params = self.params\n catg = params.data_category\n langs = ['en', params.target_lang]\n data = {lang: {splt: {} for splt in (['train', 'valid'] if lang == 'en' else ['test'])} for lang in langs}\n clf_dataset_path = {\n lang: {\n splt: {\n 'x': os.path.join(params.data_path, '%s_%s_%s_x.bpe.pth' % (splt, lang, catg)),\n 'y': os.path.join(params.data_path, '%s_%s_%s_y.txt' % (splt, lang, catg)),\n } for splt in (['train', 'valid'] if lang == 'en' else ['test'])\n } for lang in langs\n }\n for splt in ['train', 'valid', 'test']:\n for lang in langs:\n if lang == 'en' and splt in ['train', 'valid'] or lang != 'en' and splt == 'test':\n # load data and dictionary\n data1 = load_binarized(clf_dataset_path[lang][splt]['x'], params)\n data['dico'] = data.get('dico', data1['dico'])\n # set dictionary parameters\n set_dico_parameters(params, data, data1['dico'])\n # create dataset\n data[lang][splt]['x'] = Dataset(data1['sentences'], data1['positions'], params)\n # load labels\n with open(clf_dataset_path[lang][splt]['y'], 'r') as f:\n labels = [int(l) for l in f]\n data[lang][splt]['y'] = torch.LongTensor(labels)\n assert len(data[lang][splt]['x']) == len(data[lang][splt]['y'])\n\n return data", "def _parse_data(self, proto_data):\n logger.info(\"Start to parse graph proto data.\")\n\n self._parse_op_nodes(proto_data.node)\n self._parse_parameters(proto_data.parameters)\n self._parse_consts(proto_data.const_vals)\n\n self._update_input_after_create_node()\n self._update_output_after_create_node()\n\n logger.info(\"Parse proto data end, normal node count(only contain op node, \"\n \"parameter, const): %s.\", self.normal_node_count)", "def load_data(args):\n data_df = pd.read_csv(os.path.join(args.data_dir, 'driving_log.csv'))\n\n X = data_df[['center', 'left', 'right']].values\n y = data_df['steering'].values\n\n X_train, X_valid, y_train, y_valid = train_test_split(X, y, test_size=args.test_size, random_state=0)\n\n return X_train, X_valid, y_train, y_valid", "def load_dataset(path, test_or_train):\n senta_batch, sentb_batch, scores_batch = [], [], []\n with open(path, encoding='utf-8') as f:\n for i, line in enumerate(f):\n items = line.strip().split('\\t')\n if test_or_train == 'train':\n senta, sentb, score = items[-2], items[-1], float(items[-3])\n elif test_or_train in ['dev', 'test']:\n senta, sentb, score = items[-2], items[-1], float(items[-3])\n else:\n raise Exception(\"{} error\".format(test_or_train))\n senta_batch.append(senta)\n sentb_batch.append(sentb)\n scores_batch.append(score)\n return senta_batch, sentb_batch, scores_batch", "def test_data(self):\n\n return self.__valid_data, self.__valid_labels", "def validation_epoch(self):\n self.model.eval()\n\n # Compute for training set\n train_loss, train_acc = compute_loss_and_accuracy(\n self.dataloader_train, self.model, self.loss_criterion\n )\n self.TRAIN_ACC.append(train_acc)\n self.TRAIN_LOSS.append(train_loss)\n\n # Compute for validation set\n validation_loss, validation_acc = compute_loss_and_accuracy(\n self.dataloader_val, self.model, self.loss_criterion\n )\n self.VALIDATION_ACC.append(validation_acc)\n self.VALIDATION_LOSS.append(validation_loss)\n print(\"Current validation loss:\", validation_loss, \" Accuracy:\", validation_acc)\n # Compute for testing set\n test_loss, test_acc = compute_loss_and_accuracy(\n self.dataloader_test, self.model, self.loss_criterion\n )\n self.TEST_ACC.append(test_acc)\n self.TEST_LOSS.append(test_loss)\n\n self.model.train()", "def preprocess_valid_data(self):\r\n print(\"* Preprocessing validation data.\", flush=True)\r\n prep.create_HDF_file(self.C.validation_set)\r\n\r\n self.print_time_elapsed()" ]
[ "0.7268114", "0.6815508", "0.6703304", "0.6685302", "0.65728796", "0.64867747", "0.6361533", "0.63356423", "0.6325312", "0.6323176", "0.631201", "0.63008344", "0.6293298", "0.6284289", "0.62585795", "0.62380934", "0.6225368", "0.61996716", "0.6173986", "0.6165433", "0.6161794", "0.61498725", "0.61315715", "0.61308855", "0.61296684", "0.6125358", "0.61229086", "0.6121578", "0.611656", "0.6112187", "0.6098594", "0.608134", "0.60681504", "0.6065634", "0.60535246", "0.6050409", "0.60445637", "0.6034898", "0.6033067", "0.6030241", "0.6030138", "0.6022303", "0.60201806", "0.6012816", "0.6006439", "0.60057056", "0.60040545", "0.5993545", "0.59922206", "0.59903586", "0.5982171", "0.5977945", "0.5976669", "0.59721935", "0.59621257", "0.59617805", "0.59593004", "0.59539396", "0.5949479", "0.5942769", "0.59258115", "0.5915548", "0.5912975", "0.5910868", "0.5909248", "0.59071517", "0.5904734", "0.5899824", "0.5892247", "0.5891006", "0.5889492", "0.5885907", "0.588562", "0.58814955", "0.5881388", "0.58801186", "0.5872034", "0.58636487", "0.58620596", "0.5853847", "0.5853474", "0.58502847", "0.5844645", "0.58421695", "0.58415735", "0.5824611", "0.5822588", "0.58223057", "0.58220625", "0.5820757", "0.58187205", "0.5818547", "0.5815506", "0.5814723", "0.58088493", "0.580581", "0.58055425", "0.5796215", "0.57962066", "0.5795028", "0.5790806" ]
0.0
-1
Parses data for prediction.
def _parse_predict_data(self, data): image, labels = self._parse_eval_data(data) return { 'images': image, 'labels': labels }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def parse_dataset(self, data):\n pass", "def predict(self, datafile):", "def predict(self, data: List):", "def predict(self, data):\n\t\traise NotImplementedError", "def postprocess(self, data):\n all_predictions, all_nbest_json, scores_diff_json = predictions(self._dev_dataset,\n data,\n self._tokenizer)\n\n if len(all_nbest_json) == 0 or len(all_nbest_json[0]) == 0:\n return [{'predicted': '',\n 'confidence': 0}]\n\n return [{'predicted': all_nbest_json[0][0]['text'],\n 'confidence': all_nbest_json[0][0]['probability']}]", "def predict(self, data):\n\t\tres = self.neuralNetworks.inference(self.dataCenter.process_inference_data(data))\n\t\tprint(res)", "def process(self, data):\n return self.estimator.predict(data)", "def _parse_fit_and_predict_result(result):\n if len(result) > 1 and result[1] and not isinstance(result[1], str):\n # Scores object does not resemble a label prediction (always string)\n y = result[0]\n scores = result[1]\n else:\n y = result\n scores = None\n return y, scores", "def prediction():\r\n\r\n\r\n\tpredictVal = []\r\n\taccuracy = 0.0\r\n\r\n\t# Calculate accuracy for each class in testData\r\n\tfor item in testData:\r\n\t\tclass0Prediction = posProb / 100\r\n\t\tclass1Prediction = negProb / 100\r\n\t\t\r\n\t\t# Multiply the prior probablities for negative and positive reviews by their feature likelihoods \r\n\t\tfor word in item[2]:\r\n\t\t\tclass0Prediction *= class0Dict[word]\r\n\t\t\tclass1Prediction *= class1Dict[word]\r\n\r\n\t\t# Give every item in testData a predicted value\r\n\t\tif(class0Prediction > class1Prediction):\r\n\t\t\tpredictVal.append('0')\r\n\t\telse:\r\n\t\t\tpredictVal.append('1')\r\n\r\n\tfor i in range(len(testData)):\r\n\t\tif(testData[i][1] == predictVal[i]):\r\n\t\t\taccuracy += 1\r\n\r\n\t\t\t\r\n\taccuracy = 100 * (accuracy / len(testData))\r\n\treturn(predictVal, accuracy)", "def predict(self, data_in):\n pass", "def predictSet(self, testData=\"\"):\n rawTestDataDump = self._read_file(testData)\n formattedTestData = [line.split(' ') for line in rawTestDataDump.split('\\n')]\n for test in formattedTestData:\n self._predictions.append(self.predict(test))\n return self._predictions", "def parseArray(self, data):\n self.title = data[0]\n self.director = data[1]\n self.cast = data[2]\n self.producer = data[3]\n self.writer = data[4]\n self.country = data[5]\n self.language = data[6]\n self.year = data[7]\n self.genres = data[8]\n self.votes = data[9]\n self.rating = float(data[10])\n self.runtime = data[11]\n self.plot = data[12]\n self.coverUrl = data[13]", "def data_parser(data):\n\n with open(data, 'r') as inp:\n\n # take every sample\n # the last line in the text file is empty, so reading until -1\n samples = inp.read().split('\\n')[:-1]\n\n vec = []\n labels = []\n for sample in samples:\n # file is tab delimited\n split_samples = sample.split('\\t')\n # last column contains the label\n labels.append(int(split_samples[-1]))\n\n features = []\n for feature in split_samples[:-1]:\n features.append(float(feature))\n vec.append(features)\n\n # make the features and labels as a numpy array\n vec = np.array(vec)\n labels = np.array(labels)\n return vec, labels", "def _parse_raw_predictions(self, raw_predictions):\n result = []\n for line in raw_predictions.split(\"\\n\"):\n line_parts = line.split(\"|\")\n type = line_parts[0]\n assert type.lstrip(\"*\") in (\n \"FP\", \"TP\", \"TN\", \"FN\"), 'Expected {} to be in (FP, TP, TN, FN), {}'.format(line[0], line)\n\n docid = line_parts[1]\n start_end = line_parts[2]\n entity_name = line_parts[3]\n alt_gene = None\n alt_gene_start_end = None\n\n if type.lstrip(\"*\") == \"TP\":\n start_end = line_parts[3]\n entity_name = line_parts[2]\n alt_gene = line_parts[4]\n alt_gene_start_end = line_parts[5]\n\n result.append({\n \"type\": type,\n \"docid\": docid,\n \"start_end\": start_end,\n \"entity_name\": entity_name,\n \"alt_gene\": alt_gene,\n \"alt_gene_start_end\": alt_gene_start_end,\n })\n return result", "def parse_prediction(self, predictions):\n\t\tusers = list()\n\t\tprint(predictions)\n\t\tfor prediction in predictions:\n\t\t\tfor email in prediction:\n\t\t\t\tusers.append(email)\n\t\t\t\t\n\t\treturn users", "def parse(cls, data):\n raise NotImplementedError", "def predict(self, data):\n return self.result.predict(data)", "def load_data(self):\r\n self.logger.log(self.log_file, 'Loading prediction data!')\r\n try:\r\n prediction_data = self.aws_operations.read_csv(self.prediction_file_path)\r\n if prediction_data is None:\r\n return None\r\n self.logger.log(self.log_file, 'Prediction data loaded successfully!')\r\n return prediction_data\r\n except Exception as e:\r\n self.logger.log(self.log_file, 'Error occurred while loading prediction data: %s' % e)\r\n raise e", "def post(self):\n result = {'status': 'error'}\n\n args = input_parser.parse_args()\n input_data = args['image'].read()\n image = self.model_wrapper._read_image(input_data)\n preds = self.model_wrapper._predict(image)\n\n # Modify this code if the schema is changed\n label_preds = [{'label_id': p[0], 'label': p[1], 'probability': p[2]} for p in [x for x in preds]]\n result['predictions'] = label_preds\n result['status'] = 'ok'\n\n return result", "def parse(self, data):\n raise NotImplementedError", "def predict(self, x):\n \n\n return predictions", "def postprocess(self, data):\n if self.error is not None:\n return [self.error]\n\n # Iterating over inference results to render the normalized probabilities\n response = []\n for inference_result in data:\n softmax_result = inference_result.softmax().asnumpy()\n for idx, label in enumerate(self.labels):\n response.append({label: float(softmax_result[0][idx])})\n return [response]", "def _predict(self, data):\n # make sure we're talking about arrays\n data = N.asarray(data)\n\n # checks only in debug mode\n if __debug__:\n if not data.ndim == 2:\n raise ValueError, \"Data array must be two-dimensional.\"\n\n if not data.shape[1] == self.__data.nfeatures:\n raise ValueError, \"Length of data samples (features) does \" \\\n \"not match the classifier.\"\n\n # compute the distance matrix between training and test data with\n # distances stored row-wise, ie. distances between test sample [0]\n # and all training samples will end up in row 0\n dists = self.__dfx(self.__data.samples, data).T\n\n # determine the k nearest neighbors per test sample\n knns = dists.argsort(axis=1)[:, :self.__k]\n\n # predicted class labels will go here\n predicted = []\n\n if self.__voting == 'majority':\n vfx = self.getMajorityVote\n elif self.__voting == 'weighted':\n vfx = self.getWeightedVote\n else:\n raise ValueError, \"kNN told to perform unknown voting '%s'.\" \\\n % self.__voting\n\n # perform voting\n results = [vfx(knn) for knn in knns]\n\n # extract predictions\n predicted = [r[0] for r in results]\n\n # store the predictions in the state. Relies on State._setitem to do\n # nothing if the relevant state member is not enabled\n self.predictions = predicted\n self.values = [r[1] for r in results]\n\n return predicted", "def _predict_all(self, data):\n preds = np.zeros(len(data))\n for row in data.itertuples():\n index, item, _, user = row\n preds[index] = self.predict(user, item)\n return preds", "def validate(self, validate_data):\n with open(validate_data, 'r') as validate_data:\n true_positive = 0\n true_negative = 0\n false_positive = 0\n false_negative = 0\n result = {}\n for type in self.label_type_map:\n result[type] = []\n while True:\n tokens = validate_data.readline().split()\n pos = validate_data.readline().split()\n labels = validate_data.readline().split()\n if not tokens or not pos or not labels:\n break\n # Classify all named entities in a sentence 85\n curr_results = self.viterbi(tokens)\n for i in range(0, len(labels)):\n if curr_results[i] != 'O':\n if labels[i] == 'O':\n false_positive += 1 # Not 'O', but should be 'O'\n else:\n if self.label_type_map[labels[i]] == self.label_type_map[curr_results[i]]:\n true_positive += 1 # Correct prediction\n else:\n if labels[i] == 'O':\n true_negative += 1 # Correct prediction of 'O'\n else:\n false_negative += 1 # Predicted 'O', not 'O'\n # Calculate precision - TP / (TP + FP)\n precision = float(true_positive) / float(true_positive + false_positive)\n # Calculate recall - TP / (TP + FN)\n recall = float(true_positive) / float(true_positive + false_negative)\n # Calculate F-Score - 2 * P * R / (P + R)\n f_score = float(2*precision * recall) / float(precision + recall)\n print \"Precision: \" + str(precision)\n print \"Recall: \" + str(recall)\n print \"F-score: \" + str(f_score)", "def predict(self, data):\n data['predicted'] = self.sentiment_classifier.predict_estimator(data)\n return data", "def parse_data(self):\n vehicle_data = self.data.get('vehicles')\n for vehicle_datum in vehicle_data:\n self.vehicles.append(Vehicle(**vehicle_datum))\n\n job_data = self.data.get('jobs')\n for job_datum in job_data:\n self.jobs.append(Job(**job_datum))\n\n self.matrix = self.add_dummy_location_to_matrix(self.data.get('matrix'))", "def analyze_data(data_path):\n # Check whether this data is a prediction file or not\n is_prediction, is_not_prediction = is_prediction_file(data_path)\n assert (is_prediction or is_not_prediction) and not (is_prediction and is_not_prediction), \\\n \"The file should be either a prediction file or not, i.e. it should contain either 2 or 3 columns\"\n sequence_pairs = []\n # if prediction, recover the original data and also compute accuracy\n per_instance_accuracy = -1\n per_sequence_accuracy = -1\n if is_prediction:\n per_instance_accuracy, per_sequence_accuracy = recover_original_data(data_path, sequence_pairs)\n # Construct sequence data\n data = SequenceData(sequence_pairs) if is_prediction else SequenceData(data_path)\n if is_prediction:\n print(\"A prediction data file:\", data_path)\n else:\n print(\"A non-prediction data file:\", data_path)\n print(\"{0} sequences (average length: {1:.1f})\".format(\n len(data.sequence_pairs), data.get_sequence_average_length()))\n print(\"{0} words\".format(data.num_of_words))\n print(\"{0} labeled words\".format(data.num_labeled_words))\n print(\"{0} word types\".format(len(data.word_count)))\n print(\"{0} label types\".format(len(data.label_count)))\n if is_prediction:\n print(\"Per-instance accuracy: {0:.3f}%\".format(per_instance_accuracy))\n print(\"Per-sequence accuracy: {0:.3f}%\".format(per_sequence_accuracy))", "def predict(self,data):\n results = []\n predict_instances = np.shape(data)[0]\n stored_instances = np.shape(self.data)[0]\n for predict_index in range(predict_instances):\n neighbors = [] # dist, label\n for stored_index in range(stored_instances):\n neighbors.append((self._distance(self.data[stored_index], data[predict_index]), self.data_labels[stored_index][0], data[predict_index]))\n neighbors = sorted(neighbors, key=lambda x: x[0])[:self.k]\n results.append(self._analyze_neighbors(neighbors))", "def predict(self, predPoints=None):", "def prep_data(self):\n\n self.fit_tokenizer(texts=self.texts)\n sequences = self.get_sequences(self.texts)\n self.text_data = pad_sequences(sequences, maxlen=self.MAX_SEQUENCE_LENGTH)\n\n self.labels = to_categorical(np.asarray(self.labels))\n print('Shape of data tensor:', self.text_data.shape)\n print('Shape of label tensor:', self.labels.shape)\n\n # split the data into a training set and a validation set\n indices = np.arange(self.text_data.shape[0])\n np.random.shuffle(indices)\n self.text_data = self.text_data[indices]\n self.labels = self.labels[indices]\n nb_validation_samples = int(self.VALIDATION_SPLIT * self.text_data.shape[0])\n\n x_train = self.text_data[:-nb_validation_samples]\n y_train = self.labels[:-nb_validation_samples]\n x_val = self.text_data[-nb_validation_samples:]\n y_val = self.labels[-nb_validation_samples:]\n\n return x_train,y_train, x_val, y_val", "def predict(self, X):", "def predict(self, X):", "def predict():\n data = request.json\n\n if data:\n predict = bool(data[\"predict\"])\n\n if predict:\n if predictor.pred_dict[\"model\"] == 0:\n # ARIMA\n arima_forecast = predictor.get_prediction_arima()\n plots.arima_df = arima_forecast\n elif predictor.pred_dict[\"model\"] == 1:\n # Prophet\n prophet_forecast = predictor.get_prediction_prophet()\n plots.prophet_df = prophet_forecast\n elif predictor.pred_dict[\"model\"] == 2:\n # LSTM\n lstm_forecast = predictor.get_prediction_bidirectlstm()\n plots.lstm_df = lstm_forecast\n else:\n pass\n\n return 'Non tam praeclarum est scire latine, quam turpe nescire'", "def predict(self, data):\n feat_df = data[['value']].copy()\n feat_df['length'] = feat_df['value'].apply(lambda val: len(val))\n feat_df['digit_frac'] = feat_df['value'].apply(\n lambda val: 0 if len(val) == 0 else\n sum(char.isdigit() for char in val) / len(val))\n feat_df['digit_num'] = feat_df['value'].apply(\n lambda val: sum(char.isdigit() for char in val))\n feat_df['alpha_frac'] = feat_df['value'].apply(\n lambda val: 0 if len(val) == 0 else\n sum(char.isalpha() for char in val) / len(val))\n feat_df['alpha_num'] = feat_df['value'].apply(\n lambda val: sum(char.isalpha() for char in val))\n feat_df['space_frac'] = feat_df['value'].apply(\n lambda val: 0 if len(val) == 0 else\n sum(char.isspace() for char in val) / len(val))\n feat_df['space_num'] = feat_df['value'].apply(\n lambda val: sum(char.isspace() for char in val))\n features = feat_df.ix[:, 1:].as_matrix()\n return self.clf.predict_proba(features)", "def process_data(self) -> Tuple[list, List[Sequence[int]]]:\n features, labels = self.load_files()\n x = [self.process_text(f, self.max_length) for f in features]\n y = [self._convert_labels(l, self.max_length) for l in labels]\n return x, y", "def _parse_data(self):\n for i, val in enumerate(self.values.keys()):\n x_, y_ = [], []\n xy = self.values[val]\n for value in self.values.index:\n x_.append(xy[value][0])\n y_.append(xy[value][1])\n\n self.set_and_get(\"x_\", val, x_)\n self.set_and_get(\"y_\", val, y_)", "def parse_data():\n n_train, n_test = 15000, 4996\n n_features = 1355191\n\n print('- parsing train data')\n X_train = sp.lil_matrix((n_train, n_features))\n y_train = np.zeros(n_train)\n with open('/Users/kitazawa/data/news20.train') as f:\n lines = map(lambda l: l.rstrip().split(' '), f.readlines())\n for i, line in enumerate(lines):\n y_train[i] = int(line[0])\n\n for fv in line[1:]:\n f, v = fv.split(':')\n X_train[i, (int(f) - 1)] = float(v)\n print('-- density: %f' % (X_train.nnz / (n_train * n_features)))\n\n print('- parsing test data')\n X_test = sp.lil_matrix((n_test, n_features))\n y_test = np.zeros(n_test)\n with open('/Users/kitazawa/data/news20.test') as f:\n lines = map(lambda l: l.rstrip().split(' '), f.readlines())\n for i, line in enumerate(lines):\n y_test[i] = int(line[0])\n\n for fv in line[1:]:\n f, v = fv.split(':')\n X_test[i, (int(f) - 1)] = float(v)\n print('-- density: %f' % (X_test.nnz / (n_test * n_features)))\n\n return X_train, y_train, X_test, y_test", "def make_predictions(self):\n if is_classification(self.model):\n if self.ct == None:\n prediction = self.model.predict(self.input_data.to_numpy())\n probabilities = self.model.predict_proba(self.input_data.to_numpy())\n return prediction, probabilities\n elif self.ct != None: \n prediction = self.model.predict(self.data_into_model())\n probabilities = self.model.predict_proba(self.data_into_model())\n return prediction, probabilities\n else:\n raise Exception((\"{} not supported. Please create an issue on Github\").format(self.model))\n \n else:\n if self.ct == None:\n prediction = self.model.predict(self.input_data)\n return prediction\n elif self.ct != None: \n prediction = self.model.predict(self.data_into_model())\n return prediction\n else:\n raise Exception((\"{} not supported. Please create an issue on Github\").format(self.self.model))", "def load_data_preprocess(self):\n\n print(\"Loading the dataset ...\")\n # load the data\n c_util = CarUtils()\n train_x, train_y, test_x, test_y, classes = c_util.load_data()\n\n # set the image ordering\n K.set_image_dim_ordering(\"th\")\n\n print(\"Pre-processing the dataset ...\")\n # pre-process the data\n train_x = train_x.astype('float32')\n test_x = test_x.astype('float32')\n\n train_x = train_x / 255\n test_x = test_x / 255\n\n print(train_x.shape[0], ' train samples')\n print(test_x.shape[0], ' test samples')\n\n train_y = np_utils.to_categorical(train_y, CarsClassifierModel._nb_classes)\n test_y = np_utils.to_categorical(test_y, CarsClassifierModel._nb_classes)\n\n return train_x, train_y, test_x, test_y", "def predict(self, data):\n\n prediction = None\n if self.model is not None:\n prediction = self.model.predict(data)\n return prediction", "def oldPredict(self, data):\n\n predictions = []\n\n if len(self.observations) < self.k_neighbors:\n print(f\"Data length ({len(data)}) was too small.\")\n\n for row in data:\n neighbors_info = {}\n\n for row_index in range(len(self.observations)):\n distance = self.calcualteEuclideanDistance(self.observations[row_index], row)\n if len(neighbors_info) > self.k_neighbors - 1:\n largest_distance = max(neighbors_info.keys())\n if distance < largest_distance:\n neighbors_info[distance] = self.labels[row_index]\n del neighbors_info[largest_distance]\n else:\n neighbors_info[distance] = self.labels[row_index]\n\n unique_values = set(neighbors_info.values())\n if len(unique_values) == 1:\n value = unique_values.pop()\n predictions.append(value)\n else:\n best_value = 0\n best_value_weight = 0\n for label in unique_values:\n weight = 0\n for distance in neighbors_info.keys():\n if label == neighbors_info[distance]:\n if 'inverse_distance' == self.weight_type:\n weight += self.calulateWeightedVote(distance)\n elif 'no_weight' == self.weight_type:\n weight += 1\n else:\n print(\"Not a valid_weight_type.\")\n\n if weight > best_value_weight:\n best_value_weight = weight\n best_value = label\n\n predictions.append(best_value)\n # print(f\"Neighbors Info: {neighbors_info}\")\n\n return predictions", "def predict(self, X):\n ...", "def predict(self, X):\n ...", "def predict(self, X):\n ...", "def predict_classification(self, data, current_timestamp):\n\n latest_train_file = self._train_file_manager.latest_train_filename(current_timestamp)\n\n predict_x, symbols, prediction_timestamp, target_timestamp = self._data_transformation.create_predict_data(\n data)\n predict_x = self._preprocess_inputs(predict_x)\n\n if self._topology is None:\n n_timesteps = predict_x.shape[2]\n self.initialise_topology(n_timesteps)\n\n # Verify data is the correct shape\n network_input_shape = self._topology.get_network_input_shape()\n data_input_shape = predict_x.shape[-3:]\n\n if data_input_shape != network_input_shape:\n err_msg = 'Data shape' + str(data_input_shape) + \" doesnt match network input \" + str(\n network_input_shape)\n raise ValueError(err_msg)\n\n predict_y = cromulon_eval.eval_neural_net(\n predict_x, self._topology,\n self._tensorflow_flags,\n latest_train_file\n )\n\n if self._tensorflow_flags.predict_single_shares: # Return batch axis to series position\n predict_y = np.swapaxes(predict_y, axis1=1, axis2=2)\n predict_y = np.squeeze(predict_y, axis=1)\n\n target_timestamps = []\n for i in range(self._topology.n_forecasts):\n temp_timestamp = deepcopy(target_timestamp)\n target_timestamps.append(temp_timestamp)\n target_timestamp += timedelta(days=self._data_transformation.target_delta_ndays)\n\n return predict_y, symbols, target_timestamps", "def preprocess_data(self):\n\n self._preprocess_train_data()\n self._preprocess_test_data()", "def predict(data, model_predict):\n # Execute any steps you need to do before scoring\n\n # This method makes predictions against the raw, deserialized model\n #predictions = model_predict(data)\n\n data.to_csv(\"/opt/code/chemprop_folder/for_scoring.csv\", index=False)\n\n args = PredictArgs().parse_args([\n '--test_path', '/opt/chemprop_folder/for_scoring.csv',\n '--checkpoint_path', '/opt/code/model.pth',\n '--preds_path', '/opt/chemprop_folder/preds.csv'\n ])\n\n make_predictions(args)\n\n preds_df = pds.read_csv(\"/opt/chemprop_folder/preds.csv\")\n sh = str(preds_df.shape)\n print(sh)\n\n preds_df = preds_df.rename(columns = {\"p_np\": \"positive_class_label\"})\n preds_df = preds_df.drop(columns=['smiles'])\n preds_df[\"negative_class_label\"] = 1 - preds_df[\"positive_class_label\"]\n\n print(preds_df.head())\n\n # Execute any steps you need to do after scoring\n # Note: To properly send predictions back to DataRobot, the returned DataFrame should contain a\n # column for each output label for classification or a single value column for regression\n return preds_df", "def predict(self, first_preprocessed_inputs,second_preprocessed_inputs,third_preprocessed_inputs):\r\n pass", "def load_data_to_predict(self, filepath, sep=\",\"):\n if filepath.split('.')[-1] == 'csv':\n self.data_to_predict = pd.read_csv(filepath, sep=sep)\n elif filepath.split('.')[-1] == 'json':\n self.data_to_predict = pd.read_json(filepath)\n else:\n print 'Please select a csv or json file'", "def get_prediction_data(self):\n df = pd.read_csv(f'realtime_data/data.csv', names=['date', 'open', 'high', 'low', 'close', 'volume', 'barCount', 'average'])\n df = self.data_util_ref.pre_data_process(df)\n output = self.visual_ref.get_prediction_visual_data(df)\n\n # return json.dumps([output['data'][-30:], output['annotations']])\n return output", "def predict(self, data: Union[Any, List[Any]]):\n # predict without labels\n self._engine.eval()\n\n # prepare a list of (data, label) to make it iterable\n # for compatibility with schedule\n simple_dataloader = [(data, None)]\n data_iter = iter(simple_dataloader)\n output, _, _ = self.engine.execute_schedule(data_iter, forward_only=True, return_loss=False)\n return output", "def update_predictions(data):\n # TODO: Priority 1 - update predictions with inference results\n # TODO: Understand from a research team exactly what the data is going to look like\n trackID = data[0]\n prediction = data[1]\n confidence = data[2]\n to_Insert_Array = [trackID, prediction, confidence]\n OutPut_Data[trackID] = to_Insert_Array", "def parse_data(self):\n\t\traise NotImplementedError('%s: No parse function implemented!' % self.name)", "def predict(self, data):\n xdata, _ = self.array_from_cases(data)\n preds = self.model.predict(xdata)\n label_preds = [dict(zip(self.binarizer.classes_, pred)) for pred in preds]\n return label_preds", "def prediction_data(loan_data):\n return loan_data[loan_data.status == \"LIVE\"]", "def predict(self, data: np.array) -> np.array:\n raise NotImplementedError", "def preprocess(self, data, label):\n\t\traise NotImplementedError", "def _get_prediction(self):\n raise NotImplementedError", "def get_predictions():\n\n print(\"OK1\");\n print(\"OK2\");\n return;", "def _parse_train_data(self, data):\n image, label = self._prepare_image_and_label(data)\n\n # Flips image randomly during training.\n if self._aug_rand_hflip:\n image, label = input_utils.random_horizontal_flip(image, masks=label)\n\n # Resizes and crops image.\n image, image_info = input_utils.resize_and_crop_image(\n image,\n self._output_size,\n self._output_size,\n aug_scale_min=self._aug_scale_min,\n aug_scale_max=self._aug_scale_max)\n\n # Resizes and crops boxes.\n image_scale = image_info[2, :]\n offset = image_info[3, :]\n\n # Pad label and make sure the padded region assigned to the ignore label.\n # The label is first offset by +1 and then padded with 0.\n label += 1\n label = tf.expand_dims(label, axis=3)\n label = input_utils.resize_and_crop_masks(\n label, image_scale, self._output_size, offset)\n label -= 1\n label = tf.where(tf.equal(label, -1),\n self._ignore_label * tf.ones_like(label), label)\n label = tf.squeeze(label, axis=0)\n valid_mask = tf.not_equal(label, self._ignore_label)\n labels = {\n 'masks': label,\n 'valid_masks': valid_mask\n }\n\n # If bfloat16 is used, casts input image to tf.bfloat16.\n if self._use_bfloat16:\n image = tf.cast(image, dtype=tf.bfloat16)\n return image, labels", "def predict(self, data, verbose=0):\n\n if self.model is None:\n raise StandardError('Model is not built. Run build method or load model before fitting')\n # print len(data[0])\n # print type(data[0])\n # print data.shape\n return self.model.predict(data, 1, verbose) # ,steps)", "def _postprocess(self, preds):\n ntok = preds.pop(\"ntok\")\n ids = preds.pop(\"input_ids\")[:ntok]\n preds[\"tokens\"] = self._detokenize(ids)\n\n # Decode predicted top-k tokens.\n # token_topk_preds will be a List[List[(word, prob)]]\n # Initialize prediction for 0th token as N/A.\n token_topk_preds = [[(\"N/A\", 1.)]]\n pred_ids = preds.pop(\"top_k_indices\")[:ntok] # <int>[num_tokens, k]\n pred_probs = preds.pop(\"top_k_probs\")[:ntok] # <float32>[num_tokens, k]\n for token_pred_ids, token_pred_probs in zip(pred_ids, pred_probs):\n token_pred_words = self._detokenize(token_pred_ids)\n token_topk_preds.append(list(zip(token_pred_words, token_pred_probs)))\n preds[\"pred_tokens\"] = token_topk_preds\n\n # Process attention.\n for key in preds:\n if not re.match(r\"layer_(\\d+)/attention\", key):\n continue\n # Select only real tokens, since most of this matrix is padding.\n # <float32>[num_heads, max_seq_length, max_seq_length]\n # -> <float32>[num_heads, num_tokens, num_tokens]\n preds[key] = preds[key][:, :ntok, :ntok].transpose((0, 2, 1))\n # Make a copy of this array to avoid memory leaks, since NumPy otherwise\n # keeps a pointer around that prevents the source array from being GCed.\n preds[key] = preds[key].copy()\n\n return preds", "def test_init_prediction_data(raw_data):\n prediction_data = PredictionData(**raw_data)\n assert prediction_data", "def predict(self, data):\n try:\n getattr(self, \"tree\")\n except AttributeError:\n raise RuntimeError(\"You must train classifer before predicting data!\")\n\n predicts_proba = self.predict_proba(data)\n predicts = _classify_from_probs(predicts_proba)\n return predicts", "def prediction():\n # retweets_only = request.args.get('retweets_only')\n # api.set_retweet_checking(strtobool(retweets_only.lower()))\n # with_sentiment = request.args.get('with_sentiment')\n # api.set_with_sentiment(strtobool(with_sentiment.lower()))\n # query = request.args.get('query')\n # api.set_query(query)\n\n # tweets = api.get_tweets()\n perdiction = api.get_perdiction()\n\n return perdiction", "def test_prediction(model, raw_data):\n data = PredictionData(**raw_data)\n formated_data = {}\n for key, value in data.dict().items():\n formated_data[key] = [value]\n df = pd.DataFrame(formated_data)\n res = model.predict(df)[0]\n assert type(res) == np.float64", "def loadData ( self ) :\n df = pd.read_json ( self.dataset )\n df = df[pd.notnull ( df[2] )]\n df[1] = df[1].apply ( self.clean_text )\n\n self.X = df[1]\n self.y = df[2]", "def data_parser(data, snp_neighbour):\n\n # Get only the features of the SNP of interest\n if snp_neighbour == 0:\n # The SNP of interest samples are located at the middle position of the data sequence\n index_SNPi = (data.shape[2] - 1) / 2 # -1 for the SNP of interest\n samples = data[:, :, int(index_SNPi)]\n # Define the number of considered nucleotide positions\n n_positions = 1\n\n # Get the features of the SNP of interest and neighbouring positions\n else:\n # The data should fit in a 2D array for performing neural network. The number of samples should be stay, and\n # the number of features will be the number of features times the number of nucleotides\n samples = data.reshape([data.shape[0], -1])\n # Define the number of considered nucleotide positions\n n_positions = data.shape[2]\n\n # Get the number of used features\n n_features = data.shape[1]\n\n return samples, n_features, n_positions", "def pre_process_data(self, all_labels, all_data):\n\n # [1] Normalizes data\n all_data = self.pre_precess_manager.normalization(all_data)\n\n data_train, data_test, label_train, label_test = train_test_split(all_data, all_labels, test_size=0.1,\n shuffle=True)\n\n return data_train, data_test, label_train, label_test", "def predict(self, sess, img_data):\n\n with sess.as_default():\n new_image = self.preprocess(img_data, self.input_shape)\n input_feed = self.create_input_feed(sess, new_image, img_data)\n output_fetch = self.create_output_fetch(sess)\n all_classes, all_scores, all_bboxes = sess.run(output_fetch, input_feed)\n\n return all_classes, all_scores, all_bboxes", "def predict(self, data):\n length = len(data)\n\n \"\"\"\n We check if the size of the data is equal to the number of input neurons\n \"\"\"\n assert length == self.structure[0], 'ERROR: the length of the input list is not equal to the number of input neurons'\n\n data = np.reshape(data, (length, 1)).astype(float)\n\n # print(type(data))\n\n \"\"\"\n We loop over all the transitions between the layers of our brain\n \"\"\"\n for i in range(self.number_of_transitions):\n if self.activation_function == 'sigmoid':\n data = self.sigmoid(np.dot(self.weights[i], data) + self.biases[i])\n elif self.activation_function == 'ReLU':\n data = self.ReLU(np.dot(self.weights[i], data) + self.biases[i])\n elif self.activation_function == 'tanh':\n data = self.tanh(np.dot(self.weights[i], data) + self.biases[i])\n\n \"\"\"\n We allow our brain to store the last prediction. This might be helpful for printing it out on the screen for the user to investigate\n \"\"\"\n self.output = data\n\n return data", "def extract_pred_from_estimator_predictions(predictions):\n # print('predictions:', predictions)\n pred = np.array([])\n for prediction in predictions:\n pred = np.append(pred, prediction['predictions'])\n num_samples = len(pred)\n pred = pred.reshape((num_samples, ))\n return pred", "def predict(self, X):\n pass", "def predict(self, X):\n pass", "def predict(self, X):\n pass", "def _parse_eval_data(self, data):\n image, label = self._prepare_image_and_label(data)\n # The label is first offset by +1 and then padded with 0.\n label += 1\n label = tf.expand_dims(label, axis=3)\n\n if self._resize_eval:\n # Resizes and crops image.\n image, image_info = input_utils.resize_and_crop_image(\n image, self._output_size, self._output_size)\n\n # Resizes and crops mask.\n image_scale = image_info[2, :]\n offset = image_info[3, :]\n\n label = input_utils.resize_and_crop_masks(label, image_scale,\n self._output_size, offset)\n else:\n # Pads image and mask to output size.\n image = tf.image.pad_to_bounding_box(image, 0, 0, self._output_size[0],\n self._output_size[1])\n label = tf.image.pad_to_bounding_box(label, 0, 0, self._output_size[0],\n self._output_size[1])\n\n label -= 1\n label = tf.where(tf.equal(label, -1),\n self._ignore_label * tf.ones_like(label), label)\n label = tf.squeeze(label, axis=0)\n\n valid_mask = tf.not_equal(label, self._ignore_label)\n labels = {\n 'masks': label,\n 'valid_masks': valid_mask\n }\n\n # If bfloat16 is used, casts input image to tf.bfloat16.\n if self._use_bfloat16:\n image = tf.cast(image, dtype=tf.bfloat16)\n return image, labels", "def classify(self, data):\n \"*** YOUR CODE HERE ***\"\n return self.sklearn_classifier.predict(data)", "def predict_proba(self, data):\n try:\n getattr(self, \"tree\")\n except AttributeError:\n raise RuntimeError(\"You must train classifer before predicting data!\")\n\n predicts = [self.tree.traverse(row) for name, row in data.iterrows()]\n return predicts", "def rank_pre_extract(self, mention_data, predictions):\n mdata = pd.DataFrame(mention_data)\n\n\n\n\n pass", "def get_predictions(year, month):\n \n start_date = str(year)+\"-\"+str(month)+\"-01\"\n end_date = str(year)+\"-\"+str(month)+\"-\"+str(monthrange(year, month)[1])\n\n date_range = pd.date_range(start_date,end_date, freq='D').strftime(\"%Y-%m-%d\").tolist()\n\n # predictfunction \n # do predictions\n pred_arr = []\n file_name = '../predictions/model_'+str(year)+'_'+str(month)+'.csv'\n \n try:\n predictions = load_predictions(file_name)\n predictions = predictions.round()\n except:\n print(\"An exception occurred\")\n predictions = pd.DataFrame(data = date_range,columns=['Datum'])\n \n \n for index,row in predictions.iterrows():\n \n pred_mail = 0\n pred_counter = 0\n pred_tel = 0\n \n # check predictions dataframe for 'Datum'\n if 'Datum' in predictions.columns:\n date = row['Datum']\n else:\n break;\n\n # check predictions dataframe for 'Mail'\n if 'Mail' in predictions.columns:\n pred_mail = row['Mail']\n\n # check predictions dataframe for 'Schalter'\n if 'Schalter' in predictions.columns:\n pred_counter = row['Schalter']\n\n # check predictions dataframe for 'Tel'\n if 'Tel' in predictions.columns:\n pred_tel = row['Tel']\n \n \n pred_dict = {'date': date, \n 'predictions':{'mail' : pred_mail, \n 'tel' : pred_tel, \n 'counter' : pred_counter\n }\n }\n\n pred_arr.append(pred_dict)\n\n print(pred_arr) \n \n return pred_arr", "def process_raw_data(self):\n \n # Define some variables of interest.\n vor = [\"n_sentences\", \"n_correct\", \"p_correct\", \"median_RT\", \\\n \"mean_RT\", \"stdev_RT\", \"scaled_stdev_RT\"]\n \n # Get all participant names, or return straight away if no data was\n # loaded yet.\n if hasattr(self, \"raw\"):\n participants = self.raw.keys()\n participants.sort()\n else:\n self.data = None\n return\n\n # Count the number of participants.\n n = len(participants)\n \n # Create a data dict for each variable of interest.\n self.data = {}\n self.data[\"ppname\"] = []\n for var in vor:\n self.data[var] = numpy.zeros(n, dtype=float) * numpy.NaN\n \n # Loop through all participants.\n for i, ppname in enumerate(participants):\n # Add the participant name.\n self.data[\"ppname\"].append(copy.deepcopy(ppname))\n # Skip empty datasets.\n if self.raw[ppname] is None:\n continue\n # Compute stuff relevant to this task.\n self.data[\"n_sentences\"][i] = len(self.raw[ppname][\"Sentence\"])\n self.data[\"n_correct\"][i] = numpy.sum(self.raw[ppname][\"correct\"])\n self.data[\"p_correct\"][i] = float(self.data[\"n_correct\"][i]) \\\n / float(self.data[\"n_sentences\"][i])\n self.data[\"median_RT\"][i] = numpy.nanmedian(self.raw[ppname][\"RT\"])\n self.data[\"mean_RT\"][i] = numpy.nanmean(self.raw[ppname][\"RT\"])\n self.data[\"stdev_RT\"][i] = numpy.nanstd(self.raw[ppname][\"RT\"])\n # Compute a scaled standard deviation of the response time, scaled to the\n # median response time to remove the correlation between the two.\n self.data[\"scaled_stdev_RT\"] = self.data[\"stdev_RT\"] / self.data[\"median_RT\"]", "def predict(self, data, verbose=0):\n\n if self.model is None:\n raise StandardError('Model is not built. Run build method or load model before fitting')\n return self.model.predict(data, 1, verbose) # ,steps)", "def predict(self, data, verbose=0):\n\n if self.model is None:\n raise StandardError('Model is not built. Run build method or load model before fitting')\n return self.model.predict(data, 1, verbose) # ,steps)", "def predict(self, review):\n raise NotImplementedError", "def predict(self, test_file_path: str) -> List[Dict[str, float]]:\n # TODO write code to extract features from test_file_path and \n # predict the labels for the model.\n pass", "def predict(self, test_data):\n return self.leader.predict(test_data)", "def predict(self, data: pd.DataFrame):\n \n # call pipeline on question and text\n return self._pipeline(question=data.iloc[0].question, context=data.iloc[0].context)", "def test_service_api_predict_multiple_raw_classified(service_app):\n response = service_app.post('/predict',\n data=json.dumps(data),\n content_type='application/json')\n response_data = json.loads(response.data)\n assert response.headers['Content-Type'] == 'application/json'\n assert response.status_code == 200\n assert response_data['message'] == 'Records successfully classified'\n assert len(response_data['prediction'].keys()) == 102\n assert response_data['prediction']['business_outcome'] == [4, 5]\n assert response_data['prediction']['phat'] == [0.8228085289874678, 0.753958838418463]\n assert all(len(value) == 2 for value in response_data['prediction'].values())", "def predict_data(self, data, context = {}):\n datapoints = policy_model.policy2datapoint(data)\n result = self.predict_datapoint(datapoints, context)\n return result", "def predict(self, X_val):\n \n # Get scores\n preds = list()\n scores = self.get_scores(X_val)\n\n # Round to predictions\n for score in scores:\n preds.append(round(score))\n \n # Read as numpy array\n preds = np.array(preds).astype('int32')\n \n return preds", "def predict(self, unknown):\n for title in unknown:\n for ind in range(len((unknown[list(unknown.keys())[0]]))):\n unknown[title][ind] = (unknown[title][ind] - self.normalization_n[ind]) / (self.normalization_d[ind])\n print(unknown)\n unknown_labels = {}\n for title in unknown:\n neighbors = self.k_neighbors(unknown[title], self.dataset, self.k)\n unknown_labels[title] = self.rate(neighbors, self.labels)\n return unknown_labels", "def doPredict(self, data: StockData) -> float:\r\n pass", "def preprocess(self, train_file, validation_file, test_file):\n chardict, labeldict = self.make_dictionary(train_file, validation_file, test_file)\n print 'preparing training data'\n training = self.parse_file(train_file, chardict, labeldict)\n \n print 'preparing validation data'\n validation = self.parse_file(validation_file, chardict, labeldict)\n\n print 'preparing test data'\n test = self.parse_file(test_file, chardict, labeldict)\n\n return Data(training, validation, test, chardict, labeldict)", "def parse_data( self ):\n super( PivotGraph, self ).parse_data()\n new_parsed_data = {}\n parsed_data = getattr( self, 'parsed_data', self.results )\n for pivot, data in parsed_data.items():\n new_pivot = self.parse_pivot( pivot )\n data = self.parse_datum( data )\n if data != None:\n new_parsed_data[ new_pivot ] = data\n self.parsed_data = new_parsed_data", "def extract_predictions(dataset):\n return dataset.Prediction.apply(lambda x: -1 if x == 'b' else 1)", "def predict(self):\n raise NotImplementedError", "def analyze_predictions(submission_model_path, csv_result):\n all_data_info = pd.read_csv(path.join(DATA_PATH, 'all_data_info.csv'))\n submission_model = pd.read_csv(submission_model_path)\n\n genre_label = get_genre_labels(True)\n null_genre = 0\n null_label = 0\n exact_labels = []\n errors = 0\n progress = progressbar.ProgressBar(\n maxval=len(submission_model),\n widgets=[progressbar.Bar('=', '[', ']'), ' ',\n progressbar.Percentage()])\n progress.start()\n for i, row in submission_model.iterrows():\n progress.update(i + 1)\n genre = all_data_info[all_data_info['new_filename'] == row['img']][\n 'genre'].dropna()\n # some paintings don't have a genre. Checking it\n if len(genre) < 1:\n null_genre += 1\n exact_labels.append(row['label'])\n continue\n if genre.values[0] not in genre_label:\n # No label. It's strange, but let's go on...\n null_label += 1\n exact_labels.append(row['label'])\n continue\n exact_labels.append(int(genre_label[genre.values[0]]['label']))\n if row['label'] != exact_labels[-1]:\n errors += 1\n progress.finish()\n\n submission_model['exact_label'] = exact_labels\n submission_model.to_csv(csv_result, index=False)\n\n # plot_confusion_matrix(submission_model)\n\n print('Genre is null: ' + str(null_genre))\n print('Label is null: ' + str(null_label))\n print('Errors: ' + str(errors) + ' (out of ' + str(len(submission_model)) +\n ')')\n print('Accuracy: ' + str(1 - (errors / float(len(submission_model)))))", "def test_service_api_predict_single_raw_classified(service_app):\n response = service_app.post('/predict',\n data=json.dumps(data[1:2]),\n content_type='application/json')\n\n response_data = json.loads(response.data)\n assert response.headers['Content-Type'] == 'application/json'\n assert response.status_code == 200\n assert response_data['message'] == 'Records successfully classified'\n assert len(response_data['prediction'].keys()) == 102\n assert response_data['prediction']['business_outcome'] == [4]\n assert response_data['prediction']['phat'] == [0.8228085289874678]\n assert all(len(value) == 1 for value in response_data['prediction'].values())", "def predict(self, model, context, data):\n pass" ]
[ "0.70179003", "0.68369985", "0.67741996", "0.6700655", "0.665754", "0.6550395", "0.6539271", "0.6422481", "0.6383678", "0.63100535", "0.6299874", "0.62833273", "0.6269634", "0.6264631", "0.6253759", "0.62460303", "0.62442476", "0.621106", "0.6204033", "0.6197395", "0.61885244", "0.61729884", "0.6166981", "0.6099921", "0.60890776", "0.6082593", "0.60725206", "0.6065796", "0.60638803", "0.606002", "0.6052431", "0.60374534", "0.60374534", "0.60333294", "0.6028566", "0.6006943", "0.6004488", "0.60018224", "0.5997795", "0.5982781", "0.5981183", "0.5972839", "0.5972368", "0.5972368", "0.5972368", "0.59716463", "0.5960382", "0.59467345", "0.5941316", "0.5936213", "0.5933607", "0.59200203", "0.59192395", "0.59123516", "0.5904881", "0.5881015", "0.5877306", "0.58729154", "0.58674747", "0.58614767", "0.5857272", "0.5856593", "0.5852862", "0.5838566", "0.5828276", "0.58271235", "0.5827057", "0.5826266", "0.582074", "0.580643", "0.5801234", "0.57955956", "0.57938045", "0.5793481", "0.5793481", "0.5793481", "0.5792057", "0.5786852", "0.5785903", "0.5775865", "0.57727", "0.576439", "0.5763659", "0.5763659", "0.5763494", "0.5763182", "0.5760263", "0.57599396", "0.5756828", "0.5754924", "0.5736747", "0.57339907", "0.5722416", "0.57215965", "0.57208276", "0.57125527", "0.57123166", "0.5711367", "0.5708668", "0.5708538" ]
0.7601515
0
If an iteriter_op is given an iterator as input, no exception should be thrown, and we should return the wrapped function's output.
def test_iteriter_op_1(): @ops.iteriter_op def f(x): return iter([4, 5, 6]) result = f(iter([1, 2, 3])) # Passing in an iterator, as expected assert(isinstance(result, collections.abc.Iterator)), f"{result}" assert(list(result) == [4, 5, 6])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_iteriter_op_3():\n\n @ops.iteriter_op\n def f(x):\n return [4, 5, 6] # Returning a list instead of an iterator\n\n with pytest.raises(ValueError):\n result = f(iter([1, 2, 3]))", "def test_iteriter_op_2():\n\n @ops.iteriter_op\n def f(x):\n return iter([4, 5, 6])\n\n with pytest.raises(ValueError):\n f([1, 2, 3]) # Passing in a list instead of an iterator", "def test_listiter_op_2():\n\n @ops.listiter_op\n def f(x):\n return iter([4, 5, 6])\n\n with pytest.raises(ValueError):\n f(iter([1, 2, 3])) # Passing in an iterator instead of a list", "def test_iterlist_op_3():\n\n @ops.iterlist_op\n def f(x):\n return iter([4, 5, 6]) # Returning an iterator instead of a list\n\n with pytest.raises(ValueError):\n result = f(iter([1, 2, 3]))", "def test_listiter_op_1():\n\n @ops.listiter_op\n def f(x):\n return iter([4, 5, 6])\n\n result = f([1, 2, 3]) # Passing in a list, as expected\n\n assert(isinstance(result, collections.abc.Iterator)), f\"{result}\"\n assert(list(result) == [4, 5, 6])", "def test_listiter_op_3():\n\n @ops.listiter_op\n def f(x):\n return [4, 5, 6] # Returning a list instead of an iterator\n\n with pytest.raises(ValueError):\n result = f([1, 2, 3])", "def test_iterlist_op_1():\n\n @ops.iterlist_op\n def f(x):\n return [4, 5, 6]\n\n result = f(iter([1, 2, 3])) # Passing in an iterator, as expected\n\n assert(isinstance(result, list)), f\"{result}\"\n assert(result == [4, 5, 6])", "def test_iterlist_op_2():\n\n @ops.iterlist_op\n def f(x):\n return [4, 5, 6]\n\n with pytest.raises(ValueError):\n f([1, 2, 3]) # Passing in a list instead of an iterator", "def chek_iter_obj(func):\n\n def wrapper(self, lst):\n try:\n iter(lst)\n return func(self, lst)\n except TypeError:\n raise TypeError(f\"{lst} isn't iterable object\")\n\n return wrapper", "def test_listlist_op_3():\n\n @ops.listlist_op\n def f(x):\n return iter([4, 5, 6]) # Returning an iterator instead of an list\n\n with pytest.raises(ValueError):\n result = f([1, 2, 3])", "def iter(space, w_collection_or_callable, w_sentinel=None):\n if w_sentinel is None:\n return space.iter(w_collection_or_callable)\n else:\n return iter_sentinel(space, w_collection_or_callable, w_sentinel)", "def test_listlist_op_2():\n\n @ops.listlist_op\n def f(x):\n return [4, 5, 6]\n\n with pytest.raises(ValueError):\n f(iter([1, 2, 3])) # Passing in an iterator instead of an list", "def __iter__(self):\n return iter(())", "def generator(func):\n\n @fn\n @wraps(func)\n def gen(*args, **kwargs):\n return Iter(func(*args, **kwargs))\n\n return gen", "def next(space, w_iterator, w_default=None):\n try:\n return space.next(w_iterator)\n except OperationError as e:\n if w_default is not None and e.match(space, space.w_StopIteration):\n return w_default\n raise", "def __iter__(self):\r\n return self._iterate()", "def safe_iterator(i):\n return i or []", "def ireduce(f, it):\n acc = it.next()\n yield acc\n for x in it:\n acc = f(acc, x)\n yield acc", "def toiter(x):\n if iterable(x):\n return x\n else:\n return [x]", "def _iterator_codegen(resty):\n\n def codegen(context, builder, sig, args):\n [d] = args\n [td] = sig.args\n iterhelper = context.make_helper(builder, resty)\n iterhelper.parent = d\n iterhelper.state = iterhelper.state.type(None)\n return impl_ret_borrowed(\n context,\n builder,\n resty,\n iterhelper._getvalue(),\n )\n\n return codegen", "def iter_except(function, exception):\r\n try:\r\n while True:\r\n yield function()\r\n except exception:\r\n return", "def intercept(iterable, function):\n\n def intercepting(iterable_):\n for item in iterable_:\n function(item)\n yield item\n\n return intercepting(iterable)", "def iter_except(function, exception):\n try:\n while True:\n yield function()\n except exception:\n return", "def next(self, in_op):\n raise NotImplementedError", "def cotakewhile(function, iterator):\n results = []\n\n def checkTake(shouldTake, item):\n if shouldTake == True:\n results.append(item)\n return item\n\n def dotake(item):\n d = maybeDeferred(function, item)\n d.addCallback(checkTake, item)\n return d\n\n def dostop(takeResult):\n return takeResult is None\n\n cfc = _CoFunCaller(resultCollector=dotake, stopFunction=dostop)\n return cfc.coiterate(iterator).addCallback(lambda _: results)", "def process_list(_func, iterator, *args, **kwargs):\n return [_func(i, *args, **kwargs) for i in iterator]", "def iter_except(func, exception):\n try:\n while True:\n yield func()\n except exception:\n pass", "def wrap_generator(generator, wrapper_function):\n for item in generator:\n yield wrapper_function(item)", "def generator_wrapper(iterable):\n\n num_items = len(iterable)\n for idx in range(num_items):\n yield iterable[idx]", "def get_next_as_optional(iterator):\n return iterator.get_next_as_optional()", "def __next__(self):\n if(self._isDone()):\n raise StopIteration\n return self._next()", "def getIter(object):\n iterator = None\n try:\n iterator = iter(object)\n except TypeError:\n pass\n return iterator", "async def anext(iterator):\n return await iterator.__anext__()", "async def anext(iterator):\n return await iterator.__anext__()", "def testIterWithException(self):\n\t\tc = Controller()\n\t\tx = c.mock()\n\t\tx.__iter__()\n\t\tc.setReturn(1)\n\t\tc.setException(Exception)\n\t\tc.replay()\n\t\ti = x.__iter__()\n\t\tself.failUnless(i.next() == 1)\n\t\tself.failUnlessRaises(Exception, i.next)", "def on_operation(\n self,\n ) -> AsyncIteratorOrIterator[None]: # pragma: no cover # pyright: ignore\n yield None", "def __call__(self, func, *args, **kwargs):\n\n @wraps(func) # To keep its own namespace\n def wrapper(*args, **kwargs):\n gener = self.__iter__()\n return func(gener, *args, **kwargs)\n return wrapper", "def only(it: Union[Iterator[_T], Iterable[_T]]) -> _T:\n if hasattr(it, \"__next__\"):\n # noinspection PyTypeHints\n iterator: Iterator[_T] = it # type: ignore\n else:\n iterator = iter(it)\n\n try:\n ret = next(iterator)\n except StopIteration:\n raise ValueError(\"Expected only a single element in an iterable, but got none\")\n\n second_element = next(iterator, _SENTINEL)\n if second_element != _SENTINEL:\n raise ValueError(\"Expected only a single element in iterable, but got at least 2\")\n return ret", "def threadsafe(f):\n def g(*a, **kw):\n return threadsafe_iter(f(*a, **kw))\n return g", "def _iter_return(tup, fxn, invalid_error):\n\n try:\n val = fxn(*tup)\n except (IndexError, ValueError):\n if invalid_error:\n # Raise the exception if invalid_error indicates\n raise\n else:\n # Otherwise, just return a 'None' value\n return None\n ## end if\n else:\n # Good value; just generate it\n return val\n ## end try", "def test_invoke_error():\n\n with pywren.invokers.LocalInvoker(\"/tmp/task\") as iv:\n\n wrenexec = pywren.local_executor(iv)\n\n with pywrenext.iterwren.IterExec(wrenexec) as IE:\n\n iter_futures = IE.map(except_func, 10, [2])\n print(\"mapped\")\n pywrenext.iterwren.wait_exec(IE)\n assert iter_futures[0].current_iter == 2", "def chained(func):\n def wrapper(*args, **kwargs):\n for xs in func(*args, **kwargs):\n for x in xs:\n yield x\n return wrapper", "def yield_wrapped_ops(\n self,\n fn: Union[\n Callable,\n Tuple[Union[str, Collection[str]], Union[Callable, Collection[Callable]]],\n ],\n exclude=(),\n domain: Union[str, int, Collection] = None,\n ) -> Iterable[FnOp]:\n if isinstance(fn, tuple):\n name_path, fn_path = fn\n else:\n name_path, fn_path = (), fn\n\n fun_path = cast(Tuple[Callable, ...], astuple(fn_path, None))\n fun = fun_path[-1]\n\n if isinstance(fun, Operation):\n ## pass-through operations\n yield fun\n return\n\n def param_to_modifier(name: str, param: inspect.Parameter) -> str:\n return (\n optional(name)\n # is optional?\n if param.default is not inspect._empty # type: ignore\n else keyword(name)\n if param.kind == Parameter.KEYWORD_ONLY\n else name\n )\n\n given_name_path = astuple(name_path, None)\n\n decors_by_name = get_autograph_decors(fun, {}, domain or self.domain)\n\n for decor_name, decors in decors_by_name.items() or ((None, {}),):\n if given_name_path and not decor_name:\n name_path = decor_path = given_name_path\n else: # Name in decors was \"default\"(None).\n name_path = decor_path = astuple(\n (decor_name if decor_name else func_name(fun, fqdn=1)).split(\".\"),\n None,\n )\n assert decor_path, locals()\n\n if given_name_path:\n # Overlay `decor_path` over `named_path`, right-aligned.\n name_path = tuple(*name_path[: -len(decor_path)], *decor_path)\n\n fn_name = str(name_path[-1])\n if fn_name in exclude:\n continue\n overrides = self._from_overrides(decor_path)\n\n # TODO: support an extra overrides source, in ``wrap_funcs()``.\n op_data = (\n ChainMap(overrides, decors)\n if (overrides and decors)\n else overrides\n if overrides\n else decors\n )\n if op_data:\n log.debug(\"Autograph overrides for %r: %s\", name_path, op_data)\n\n op_props = \"needs provides renames, inp_sideffects out_sideffects\".split()\n needs, provides, override_renames, inp_sideffects, out_sideffects = (\n op_data.get(a, UNSET) for a in op_props\n )\n\n if needs is UNSET:\n needs = [...]\n needs = aslist(needs, \"needs\")\n if ... in needs:\n sig = inspect.signature(fun)\n fun_needs = [\n param_to_modifier(name, param)\n for name, param in sig.parameters.items()\n if name != \"self\" and param.kind is not Parameter.VAR_KEYWORD\n ]\n ## Insert object as 1st need for object-methods.\n #\n if len(fun_path) > 1:\n clazz = fun_path[-2]\n # TODO: respect autograph decorator for object-names.\n class_name = name_path[-2] if len(name_path) > 1 else clazz.__name__\n if is_regular_class(class_name, clazz):\n log.debug(\"Object-method %s.%s\", class_name, fn_name)\n fun_needs.insert(0, camel_2_snake_case(class_name))\n\n needs = [\n fneed if n is ... else n\n for n, fneed in itt.zip_longest(needs, fun_needs, fillvalue=...)\n ]\n\n if provides is UNSET:\n if is_regular_class(fn_name, fun):\n ## Convert class-name into object variable.\n provides = camel_2_snake_case(fn_name)\n elif self.out_patterns:\n provides = self._deduce_provides_from_fn_name(fn_name) or UNSET\n if provides is UNSET:\n provides = ()\n provides = aslist(provides, \"provides\")\n\n needs, provides = self._apply_renames(\n (override_renames, self.renames), (needs, provides)\n )\n\n if inp_sideffects is not UNSET:\n needs.extend(\n (\n i\n if is_sfx(i)\n else sfxed(*i)\n if isinstance(i, tuple)\n else token(i)\n )\n for i in aslist(inp_sideffects, \"inp_sideffects\")\n )\n\n if out_sideffects is not UNSET:\n provides.extend(\n (\n i\n if is_sfx(i)\n else sfxed(*i)\n if isinstance(i, tuple)\n else token(i)\n )\n for i in aslist(out_sideffects, \"out_sideffects\")\n )\n\n if self.full_path_names:\n fn_name = self._join_path_names(*name_path)\n\n op_kws = self._collect_rest_op_args(decors)\n\n yield FnOp(fn=fun, name=fn_name, needs=needs, provides=provides, **op_kws)", "def consumer(func):\n\n from functools import wraps\n\n @wraps(func)\n def wrapper(*args,**kw):\n gen = func(*args, **kw)\n gen.next()\n return gen\n return wrapper", "def _iter(*args, **kwargs):\n if ProgressBar:\n return ProgressBar()(_Iter(*args, **kwargs))\n else:\n return _Iter(*args, **kwargs)", "def __iter__(self):\n return self._next()", "def __iter__(self):\n return self._next()", "def __iter__(self):\n return self._next()", "def __iter__(self):\n return NotImplemented", "def test_invoke_error_map():\n\n with pywren.invokers.LocalInvoker(\"/tmp/task\") as iv:\n\n wrenexec = pywren.local_executor(iv)\n\n with pywrenext.iterwren.IterExec(wrenexec) as IE:\n\n iter_futures = IE.map(except_func, 10, [12, 3, 5, 20])\n print(\"mapped\")\n pywrenext.iterwren.wait_exec(IE)\n all_final_iters = [f.current_iter for f in iter_futures]\n print(all_final_iters)\n assert all_final_iters == [10, 3, 5, 10]", "def mapf(f: Callable[[D_], R_], C: Iterable[D_]) -> Iterator[R_]:\n return (f(x) for x in C)", "def iterate(itr: AnyIterable) -> AsyncIterator[T]:\n if isinstance(itr, AsyncIterator):\n return itr\n\n async def gen():\n for i in itr:\n yield i\n\n return gen()", "def __next__(self):\n return next(self.iterator)", "def test_iter_method(self):\n ref = mock.Mock()\n ref.side_effect = [{'rows': [1,2,3]}, {'rows': []}]\n rslt = Result(ref)\n collection = [x for x in rslt]\n self.assertEqual(collection, [1,2,3])\n\n run_iter = lambda x: [y for y in x]\n\n rslt = Result(ref, skip=1000)\n self.assertRaises(CloudantArgumentError, run_iter, rslt)\n\n rslt = Result(ref, limit=1000)\n self.assertRaises(CloudantArgumentError, run_iter, rslt)", "def mkiter(item):\n # FIXME: don't really need to construct a list\n if item is None:\n return iter(())\n elif isIterable(item):\n return iter(item)\n else:\n return iter([item])", "def threadsafe_generator(f):\n\tdef g(*a, **kw):\n\t\treturn threadsafe_iter(f(*a, **kw))\n\treturn g", "def _get_outputs(self, iterator, pre_dispatch):\n dispatch_thread_id = threading.get_ident()\n detach_generator_exit = False\n try:\n self._start(iterator, pre_dispatch)\n # first yield returns None, for internal use only. This ensures\n # that we enter the try/except block and start dispatching the\n # tasks.\n yield\n\n with self._backend.retrieval_context():\n yield from self._retrieve()\n\n except GeneratorExit:\n # The generator has been garbage collected before being fully\n # consumed. This aborts the remaining tasks if possible and warn\n # the user if necessary.\n self._exception = True\n\n # In some interpreters such as PyPy, GeneratorExit can be raised in\n # a different thread than the one used to start the dispatch of the\n # parallel tasks. This can lead to hang when a thread attempts to\n # join itself. As workaround, we detach the execution of the\n # aborting code to a dedicated thread. We then need to make sure\n # the rest of the function does not call `_terminate_and_reset`\n # in finally.\n if dispatch_thread_id != threading.get_ident():\n if not IS_PYPY:\n warnings.warn(\n \"A generator produced by joblib.Parallel has been \"\n \"gc'ed in an unexpected thread. This behavior should \"\n \"not cause major -issues but to make sure, please \"\n \"report this warning and your use case at \"\n \"https://github.com/joblib/joblib/issues so it can \"\n \"be investigated.\"\n )\n\n detach_generator_exit = True\n _parallel = self\n\n class _GeneratorExitThread(threading.Thread):\n def run(self):\n _parallel._abort()\n if _parallel.return_generator:\n _parallel._warn_exit_early()\n _parallel._terminate_and_reset()\n\n _GeneratorExitThread(\n name=\"GeneratorExitThread\"\n ).start()\n return\n\n # Otherwise, we are in the thread that started the dispatch: we can\n # safely abort the execution and warn the user.\n self._abort()\n if self.return_generator:\n self._warn_exit_early()\n\n raise\n\n # Note: we catch any BaseException instead of just Exception instances\n # to also include KeyboardInterrupt\n except BaseException:\n self._exception = True\n self._abort()\n raise\n finally:\n # Store the unconsumed tasks and terminate the workers if necessary\n _remaining_outputs = ([] if self._exception else self._jobs)\n self._jobs = collections.deque()\n self._running = False\n if not detach_generator_exit:\n self._terminate_and_reset()\n\n while len(_remaining_outputs) > 0:\n batched_results = _remaining_outputs.popleft()\n batched_results = batched_results.get_result(self.timeout)\n for result in batched_results:\n yield result", "def with_iter(contextmanager):\n with contextmanager as iterable:\n for item in iterable:\n yield item", "def __iter__(self):\n return iter(self.__iter())", "def mapg(f: Callable[[D_], R_], C: Iterable[D_]) -> Iterator[R_]:\n for x in C:\n yield f(x)", "def __call__(self, iterable):\n if self._ordered:\n imap = self._distrubtor.imap\n else:\n imap = self._distrubtor.imap_unordered\n\n for result in imap(iterable):\n yield result", "def dispatch_next(self):\r\n self._dispatch_amount += 1\r\n while self._dispatch_amount:\r\n try:\r\n # XXX: possible race condition shuffling the order of\r\n # dispatches in the next two lines.\r\n func, args, kwargs = next(self._original_iterable)\r\n self.dispatch(func, args, kwargs)\r\n self._dispatch_amount -= 1\r\n except ValueError:\r\n \"\"\" Race condition in accessing a generator, we skip,\r\n the dispatch will be done later.\r\n \"\"\"\r\n except StopIteration:\r\n self._iterating = False\r\n self._original_iterable = None\r\n return", "def threadsafe_generator(f):\n\n def g(*a, **kw):\n return ThreadsafeIter(f(*a, **kw))\n\n return g", "def _iter2aiter(iter):\n\n def _consume(loop, iter, q):\n for item in iter:\n q.put(item)\n q.put(SENTINEL)\n\n async def _aiter():\n loop = asyncio.get_running_loop()\n q = janus.Queue(maxsize=DEFAULT_INFLIGHT_CHUNKS)\n try:\n fut = loop.run_in_executor(None, lambda: _consume(loop, iter, q.sync_q))\n while True:\n item = await q.async_q.get()\n if item is SENTINEL:\n break\n yield item\n q.async_q.task_done()\n await fut\n finally:\n q.close()\n await q.wait_closed()\n\n return _aiter()", "def threadsafe_generator(f):\n def g(*a, **kw):\n return threadsafe_iter(f(*a, **kw))\n return g", "def threadsafe_generator(f):\n def g(*a, **kw):\n return threadsafe_iter(f(*a, **kw))\n return g", "def threadsafe_generator(f):\n def g(*a, **kw):\n return threadsafe_iter(f(*a, **kw))\n return g", "def threadsafe_generator(f):\n def g(*a, **kw):\n return threadsafe_iter(f(*a, **kw))\n return g", "def threadsafe_generator(f):\n def g(*a, **kw):\n return threadsafe_iter(f(*a, **kw))\n return g", "def threadsafe_generator(f):\n def g(*a, **kw):\n return threadsafe_iter(f(*a, **kw))\n return g", "def threadsafe_generator(f):\n def g(*a, **kw):\n return threadsafe_iter(f(*a, **kw))\n return g", "def iter_except(func, exception, first=None):\n try:\n if first is not None:\n yield first()\n while True:\n yield func()\n except exception:\n pass", "def threadsafe_generator(f):\n def g(*a, **kw):\n return threadsafe_iter(f(*a, **kw))\n\n return g", "def wrapit(fn):\n def inside(dummy, *args):\n try:\n return fn(*args)\n except Exception as e:\n print(\"Error in XSLT extension: %s\" % e)\n raise\n return inside", "def __iter__(self):\n return self.next()", "def for_each(f: Callable[[A], Maybe[B]], iterable: Iterable[A]\n ) -> Maybe[Iterable[B]]:\n return cast(Maybe[Iterable[B]], map_m_(Just, f, iterable))", "def __iter__(self):\n raise NotImplementedError(\"__iter__\")", "def no_none(decorated):\n def _func(*args, **kwargs):\n \"\"\"wrap generator\"\"\"\n for value in decorated(*args, **kwargs):\n if value is not None:\n yield value\n return _func", "def test_assert_iterator(self):\n iterator = iter([1,2,3,4])\n # Should pass\n self.assert_iterator(iterator,\n count=4,\n assert_item_function=lambda i: i>0)", "def iterateInReactor(self, iterator):\n d = defer.Deferred()\n self.iterators.append((iterator, d))\n return d", "def iter_except(cls, func, exception, first=None):\n try:\n if first is not None:\n yield first() # For database APIs needing an initial cast to db.first()\n while True:\n yield func()\n except exception:\n pass", "def threadsafe_generator(f):\n\n def g(*a, **kw):\n return threadsafe_iter(f(*a, **kw))\n\n return g", "def __call__(self, input=None): # pragma: no cover\n while False:\n yield None", "def __next__(self):\n return next(self.iter)", "def __iter__(self):\n for o in self._iter:\n yield o", "def pipeline(func):\n @wraps(func)\n def process(img_or_iterable, *args, **kwargs):\n if isinstance(img_or_iterable, (SliceableIterable, FramesSequence)):\n _len = len(img_or_iterable)\n s = SliceableIterable(img_or_iterable, range(_len), _len)\n s._proc_func = lambda image: func(image, *args, **kwargs)\n return s\n else:\n # Fall back on normal behavior of func, interpreting input\n # as a single image.\n return func(img_or_iterable)\n\n if process.__doc__ is None:\n process.__doc__ = ''\n process.__doc__ = (\"This function has been made pims-aware. When passed\\n\"\n \"a pims reader or SliceableIterable, it will return a \\n\"\n \"new SliceableIterable of the results. When passed \\n\"\n \"other objects, its behavior is \"\n \"unchanged.\\n\\n\") + process.__doc__\n return process", "def get_iterator(dataset):\n if context.executing_eagerly():\n iterator = dataset_ops.make_one_shot_iterator(dataset)\n else:\n iterator = dataset_ops.make_initializable_iterator(dataset)\n initialize_iterator(iterator)\n return iterator", "def __iter__(self):\n return self.new_generator()", "def test_do_non_gf():\n f = lambda: None\n with raises(TypeError) as err_info:\n perf(do(f)())\n assert str(\n err_info.value\n ) == \"%r is not a generator function. It returned None.\" % (f,)", "def _wrapper(func, args):\n return func(*args)", "def _fold_loop(cls, f, agg, next):\n\n while next is not None:\n (val, next) = next\n agg = f(val, agg)\n return agg", "def test_unwrap_or_else(\n self, start: Result[int, int], fn: t.Callable[[int], int], exp: int\n ) -> None:\n assert start.unwrap_or_else(fn) == exp", "def wrap_generator(func):\n\n async def _wrapped(*a, **k):\n r, ret = None, []\n gen = func(*a, **k)\n while True:\n try:\n item = gen.send(r)\n except StopIteration:\n break\n if inspect.isawaitable(item):\n r = await item\n else:\n r = item\n ret.append(r)\n\n if len(ret) == 1:\n return ret.pop()\n return ret\n\n return _wrapped", "def iterate(func, x):\n while True:\n x = func(x)\n yield x", "def _wrap_generator(self, func):\n @functools.wraps(func)\n def generator_context(*args, **kwargs):\n gen = func(*args, **kwargs)\n\n # Generators are suspended and unsuspended at `yield`, hence we\n # make sure the grad mode is properly set every time the execution\n # flow returns into the wrapped generator and restored when it\n # returns through our `yield` to our caller (see PR #49017).\n cls = type(self)\n try:\n # Issuing `None` to a generator fires it up\n with cls():\n response = gen.send(None)\n\n while True:\n try:\n # Forward the response to our caller and get its next request\n request = yield response\n\n except GeneratorExit:\n # Inform the still active generator about its imminent closure\n with cls():\n gen.close()\n raise\n\n except BaseException:\n # Propagate the exception thrown at us by the caller\n with cls():\n response = gen.throw(*sys.exc_info())\n\n else:\n # Pass the last request to the generator and get its response\n with cls():\n response = gen.send(request)\n\n # We let the exceptions raised above by the generator's `.throw` or\n # `.send` methods bubble up to our caller, except for StopIteration\n except StopIteration as e:\n # The generator informed us that it is done: take whatever its\n # returned value (if any) was and indicate that we're done too\n # by returning it (see docs for python's return-statement).\n return e.value\n\n return generator_context", "def comap(function, iterator):\n results = []\n cfc = _CoFunCaller(function, resultCollector=results.append)\n d = cfc.coiterate(iterator)\n d.addCallback(lambda _: results)\n return d", "def flow_to_iter(flow):\n if ((sys.version_info.major == 3 and hasattr(flow, \"__next__\"))\n or (sys.version_info.major == 2 and hasattr(flow, \"next\"))):\n return flow\n else:\n return iter(flow)", "def __rshift__(self, next: 'IO[TResult]') -> 'IO[TResult]':\n return self.bind(lambda _: next)", "def __iter__(self):\n makeit = getattr(self._data, '__inverted__', self.__next__)\n return makeit()", "def cooperative_iter(citer):\n try:\n for chunk in citer:\n sleep(0)\n yield chunk\n except Exception as err:\n msg = (_(\"Error: cooperative_iter exception %(error)s\") %\n dict(error=err))\n LOG.error(msg)\n raise" ]
[ "0.734651", "0.7288327", "0.6928596", "0.6799886", "0.66857606", "0.6591529", "0.6551025", "0.6511721", "0.63353145", "0.59754914", "0.5973058", "0.59268594", "0.5852119", "0.58467436", "0.5843037", "0.583689", "0.57858115", "0.5740564", "0.5712443", "0.56730723", "0.5633042", "0.55738294", "0.55536985", "0.55025804", "0.54791933", "0.5469948", "0.545959", "0.54455984", "0.5414505", "0.5411123", "0.5407896", "0.5407219", "0.5398889", "0.5398889", "0.53980994", "0.5397663", "0.53838456", "0.53771687", "0.5364043", "0.5356085", "0.5355088", "0.5347635", "0.5334193", "0.5331089", "0.53195065", "0.53193593", "0.53193593", "0.53193593", "0.53101945", "0.53024596", "0.52930987", "0.5287613", "0.52858907", "0.52818704", "0.52679336", "0.5264984", "0.52641815", "0.52622706", "0.52527964", "0.52523494", "0.524936", "0.52422124", "0.52400047", "0.52326167", "0.52244973", "0.52244973", "0.52244973", "0.52244973", "0.52244973", "0.52244973", "0.52244973", "0.5222956", "0.5222383", "0.5216477", "0.52057725", "0.5201175", "0.51902676", "0.51855415", "0.51848245", "0.5178654", "0.51703244", "0.51662767", "0.516618", "0.51641464", "0.5147765", "0.51447546", "0.51399386", "0.5137993", "0.5135935", "0.5134609", "0.51249546", "0.5124945", "0.51223844", "0.51175046", "0.51113397", "0.51017183", "0.50876695", "0.5084327", "0.5081159", "0.5073131" ]
0.74068475
0
If an iteriter_op is given something besides an iterator as input, raise a ValueError.
def test_iteriter_op_2(): @ops.iteriter_op def f(x): return iter([4, 5, 6]) with pytest.raises(ValueError): f([1, 2, 3]) # Passing in a list instead of an iterator
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_iteriter_op_3():\n\n @ops.iteriter_op\n def f(x):\n return [4, 5, 6] # Returning a list instead of an iterator\n\n with pytest.raises(ValueError):\n result = f(iter([1, 2, 3]))", "def test_listiter_op_2():\n\n @ops.listiter_op\n def f(x):\n return iter([4, 5, 6])\n\n with pytest.raises(ValueError):\n f(iter([1, 2, 3])) # Passing in an iterator instead of a list", "def test_iterlist_op_3():\n\n @ops.iterlist_op\n def f(x):\n return iter([4, 5, 6]) # Returning an iterator instead of a list\n\n with pytest.raises(ValueError):\n result = f(iter([1, 2, 3]))", "def test_iterlist_op_2():\n\n @ops.iterlist_op\n def f(x):\n return [4, 5, 6]\n\n with pytest.raises(ValueError):\n f([1, 2, 3]) # Passing in a list instead of an iterator", "def test_listiter_op_3():\n\n @ops.listiter_op\n def f(x):\n return [4, 5, 6] # Returning a list instead of an iterator\n\n with pytest.raises(ValueError):\n result = f([1, 2, 3])", "def test_iteriter_op_1():\n\n @ops.iteriter_op\n def f(x):\n return iter([4, 5, 6])\n\n result = f(iter([1, 2, 3])) # Passing in an iterator, as expected\n\n assert(isinstance(result, collections.abc.Iterator)), f\"{result}\"\n assert(list(result) == [4, 5, 6])", "def test_listlist_op_2():\n\n @ops.listlist_op\n def f(x):\n return [4, 5, 6]\n\n with pytest.raises(ValueError):\n f(iter([1, 2, 3])) # Passing in an iterator instead of an list", "def test_listlist_op_3():\n\n @ops.listlist_op\n def f(x):\n return iter([4, 5, 6]) # Returning an iterator instead of an list\n\n with pytest.raises(ValueError):\n result = f([1, 2, 3])", "def test_listiter_op_1():\n\n @ops.listiter_op\n def f(x):\n return iter([4, 5, 6])\n\n result = f([1, 2, 3]) # Passing in a list, as expected\n\n assert(isinstance(result, collections.abc.Iterator)), f\"{result}\"\n assert(list(result) == [4, 5, 6])", "def test_generator_input_with_no_iterable_len_raises(self):\n for chunk_size, n_splits, n_jobs in product([None, 1, 3], [None, 1, 3], [None, 1, 3]):\n with self.subTest(chunk_size=chunk_size, n_splits=n_splits, n_jobs=n_jobs), self.assertRaises(ValueError):\n get_n_chunks(iter(self.test_data), iterable_len=None, chunk_size=chunk_size, n_splits=n_splits,\n n_jobs=n_jobs)", "def test_iterlist_op_1():\n\n @ops.iterlist_op\n def f(x):\n return [4, 5, 6]\n\n result = f(iter([1, 2, 3])) # Passing in an iterator, as expected\n\n assert(isinstance(result, list)), f\"{result}\"\n assert(result == [4, 5, 6])", "def _assert_valid_value_and_cast(self, value):\n if not hasattr(value, '__iter__'):\n raise AssertionError(f\"{value} is not a valid iterable type\")\n return value", "def test_generator_without_iterable_len(self):\n with self.assertRaises(ValueError):\n next(chunk_tasks(iter([]), n_splits=1))", "def isiterable(x):\n try:\n iter(x)\n except TypeError:\n return False\n return True", "def _data_validation(data):\n if isinstance(data, (list, tuple, type(None))) is not True:\n raise ValueError(f\"data must be tuple, list, or None, \"\n f\"data type is '{type(data).__name__}'. \"\n f\"Iterable data cannot be empty.\")", "def is_iterable(thing):\n\n try:\n iter(thing)\n except TypeError:\n return False\n return True", "def _is_iterable(items):\n return isinstance(items, (list, tuple, set, np.ndarray))", "def isiterable(x):\n try:\n iter(x)\n return True\n except TypeError:\n return False", "def iterable(x):\n try:\n iter(x)\n return True\n except TypeError:\n return False", "def test_iter(\n self, start: Result[int, int], exp: t.Tuple[int, ...]\n ) -> None:\n assert tuple(start.iter()) == exp", "def _is_iterable(value):\n return isinstance(value, list) or isinstance(value, tuple)", "def isIterable(value):\n try:\n iter(value)\n return True\n except:\n return False", "def test_assert_iterator(self):\n iterator = iter([1,2,3,4])\n # Should pass\n self.assert_iterator(iterator,\n count=4,\n assert_item_function=lambda i: i>0)", "def _raise_if(predicate, *args):\n if predicate:\n raise InvalidChunk(*args)", "def assert_is_iter(ext):\n if not parallel.is_iter(ext):\n ext = [ext]\n return ext", "def safe_iterator(i):\n return i or []", "def only(it: Union[Iterator[_T], Iterable[_T]]) -> _T:\n if hasattr(it, \"__next__\"):\n # noinspection PyTypeHints\n iterator: Iterator[_T] = it # type: ignore\n else:\n iterator = iter(it)\n\n try:\n ret = next(iterator)\n except StopIteration:\n raise ValueError(\"Expected only a single element in an iterable, but got none\")\n\n second_element = next(iterator, _SENTINEL)\n if second_element != _SENTINEL:\n raise ValueError(\"Expected only a single element in iterable, but got at least 2\")\n return ret", "def test_Validator_iter_errors_two_arguments(self):\n\n validator = validators.Draft7Validator({})\n with self.assertWarns(DeprecationWarning) as w:\n error, = validator.iter_errors(\"foo\", {\"type\": \"number\"})\n\n self.assertEqual(error.validator, \"type\")\n self.assertEqual(w.filename, __file__)\n self.assertTrue(\n str(w.warning).startswith(\n \"Passing a schema to Validator.iter_errors is deprecated \",\n ),\n )", "def EOF_or_raise(f):\n try:\n f.next()\n except StopIteration:\n return\n else:\n raise Exception(str(f))", "def is_iterator(x):\n if sys.version_info >= (2, 7):\n return isinstance(x, collections.Iterator)\n return isinstance(x, collections.Iterator) and hasattr(x, '__iter__')", "def _is_iterable_non_string(arg):\n return (hasattr(arg, \"__iter__\") or hasattr(arg, \"__getattr__\")) and not isinstance(arg, str)", "def _iterator_unknown_size(self) -> Iterator[int]:\n raise NotImplementedError", "def test_invalid(self):\n a = np.ones((10, 10))\n ai = np.ones((10, 2), dtype=np.intp)\n\n # sanity check\n take_along_axis(a, ai, axis=1)\n\n # not enough indices\n assert_raises(ValueError, take_along_axis, a, np.array(1), axis=1)\n # bool arrays not allowed\n assert_raises(IndexError, take_along_axis, a, ai.astype(bool), axis=1)\n # float arrays not allowed\n assert_raises(IndexError, take_along_axis, a, ai.astype(float), axis=1)\n # invalid axis\n assert_raises(AxisError, take_along_axis, a, ai, axis=10)", "def iterable(arg):\n return isinstance(arg, collections.Iterable) and not isinstance(arg, six.string_types)", "def next(self, in_op):\n raise NotImplementedError", "def is_iterator(obj):\n return isinstance(obj, collections.Iterator)", "def test_make_np_iterable_type_error():\n with pytest.raises(TypeError):\n _ = uc._make_np_iterable(\"s\")", "def test_iterator_input():\n empty_iterator = iter(())\n transactions = empty_iterator\n itemsets, rules = apriori(transactions, 0.2, 0.2)\n assert itemsets == {} and rules == []\n\n transactions = [(1, 2), (1, 2), (1, 3), (1, 4), (1, 3)]\n transactions_iter = iter(transactions)\n itemsets1, rules1 = apriori(transactions_iter, 0.2, 1)\n itemsets2, rules2 = apriori(transactions, 0.2, 1)\n assert len(rules1) == len(rules2)\n for i in range(len(rules1)):\n assert rules1[i] == rules2[i]", "def test_invalid_op_inputs_with_wrong_types(self, data, description):\n with self.assertRaises(TypeError, msg=description):\n tfx.get_op(data, tf.Graph())", "def _to_int(maybe_iter):\n if not isinstance(maybe_iter, str) and isinstance(maybe_iter, abc.Iterable):\n return tuple([_to_int(a) for a in maybe_iter])\n try:\n return int(maybe_iter)\n except ValueError:\n return maybe_iter", "def is_iterable(obj):\n return isinstance(obj, (list, tuple, types.GeneratorType)) or \\\n (not isinstance(obj, (int, str, dict)) and\n bool(getattr(obj, \"next\", False)))", "def is_iterable(arg):\n return (\n isinstance(arg, collections.Iterable)\n and not isinstance(arg, str)\n )", "def isiter(obj):\r\n \r\n try:\r\n iter(obj)\r\n return True\r\n except TypeError:\r\n pass\r\n return False", "def unpack_iterator_input(iterator):\n try:\n next_element = iterator.get_next()\n except errors.OutOfRangeError:\n raise RuntimeError('Your dataset iterator ran out of data; '\n 'Make sure that your dataset can generate '\n 'required number of samples.')\n\n if isinstance(next_element, (list, tuple)):\n if len(next_element) not in [2, 3]:\n raise ValueError(\n 'Please provide model inputs as a list or tuple of 2 or 3 '\n 'elements: (input, target) or (input, target, sample_weights) '\n 'Received %s' % next_element)\n if len(next_element) == 2:\n x, y = next_element\n weights = None\n else:\n x, y, weights = next_element\n else:\n x = next_element\n y = None\n weights = None\n return x, y, weights", "def is_nonstring_iterable(x):\n if isinstance(x, primitive_iterable):\n return False\n return isinstance(x, collections.Iterable)", "def testIterWithException(self):\n\t\tc = Controller()\n\t\tx = c.mock()\n\t\tx.__iter__()\n\t\tc.setReturn(1)\n\t\tc.setException(Exception)\n\t\tc.replay()\n\t\ti = x.__iter__()\n\t\tself.failUnless(i.next() == 1)\n\t\tself.failUnlessRaises(Exception, i.next)", "def test_initializing_deque_with_non_iterable_raises_error():\n from deque import Deque\n with pytest.raises(TypeError):\n new_deque = Deque(interable=123456)", "def is_iterable(obj):\n try:\n itr = iter(obj)\n del itr\n return True\n except:\n return False", "def is_item_iterable(item):\n try:\n _ = [_ for _ in item]\n except TypeError:\n return False\n return True", "def is_iterable(value):\n # noinspection PyUnresolvedReferences\n return hasattr(value, '__iter__') and hasattr(value, '__getitem__')", "def _ensure_iterable(x):\n if isinstance(x[0], Iterable):\n if len(x) > 1:\n raise TypeError(\"Either Iterable or variable argument list expected\")\n return x[0]\n else:\n return x", "def next(space, w_iterator, w_default=None):\n try:\n return space.next(w_iterator)\n except OperationError as e:\n if w_default is not None and e.match(space, space.w_StopIteration):\n return w_default\n raise", "def testExplicitGeneratorConvenienceFunctionExceptionUsage(self):\n\t\tc = Controller()\n\t\tx = c.mock()\n\t\tc.generator(x.g(8, 9), [10], Exception(\"bogus\"))\n\t\tc.replay()\n\t\tg = x.g(8, 9)\n\t\tself.failUnless(g.next() == 10)\n\t\tself.failUnlessRaises(Exception, g.next)", "def check_iterable(value):\n try:\n iter(value)\n if not isinstance(value, six.string_types):\n return True\n else:\n return False\n except Exception as e:\n pass\n\n return False", "def _check_iterable(self):\n if self.theoretical_size is None:\n raise TypeError(\"This `fixture_ref` has not yet been initialized, so it cannot be unpacked/iterated upon. \"\n \"This is not supposed to happen when a `fixture_ref` is used correctly, i.e. as an item in\"\n \" the `argvalues` of a `@parametrize` decorator. Please check the documentation for \"\n \"details.\")\n if self.theoretical_size == 1:\n raise TypeError(\"This fixture_ref does not represent a tuple of arguments, it is not iterable\")", "def test_invalid_process(self):\n with self.assertRaises(TypeError):\n self.encoder.process([1, 2, 3, 4])", "def is_iterable(x: Any) -> bool:\r\n return isinstance(x, collections.abc.Iterable) and not isinstance(x, (str, bytes))", "def is_iterable(element):\n return isinstance(element, (set, list, tuple))", "def test_badargs(self):\n self.assertRaises(TypeError, isint, [])\n self.assertRaises(TypeError, isint, {})\n self.assertRaises(TypeError, isint, None)\n return", "def is_iterator(obj: Any) -> bool:\n if isinstance(obj, collections.abc.Iterator):\n return True\n else:\n try:\n return iter(obj) is obj\n except TypeError:\n return False", "def test_G_2_by_2_bad_data(self):\r\n self.assertRaises(ValueError, G_2_by_2, 1, -1, 1, 1)", "def __iter__(self):\n return NotImplemented", "def is_iterator(obj):\n cls = obj.__class__\n return hasattr(cls, '__next__') and not hasattr(cls, '__len__')", "def test_bad_iterations(self):\r\n with pytest.raises(ValueError, match=\"Number of iterations must be a positive int\"):\r\n clique.search(clique=[0, 1, 2, 3], graph=nx.complete_graph(5), iterations=-1)", "def is_iterable(x):\n if isinstance(x, six.string_types):\n return False\n return hasattr(x, '__iter__')", "def chek_iter_obj(func):\n\n def wrapper(self, lst):\n try:\n iter(lst)\n return func(self, lst)\n except TypeError:\n raise TypeError(f\"{lst} isn't iterable object\")\n\n return wrapper", "def test_if_it_accepts_lists(self):\n with self.assertRaises(TypeError):\n prime_numbers([])", "def test_type_error(self):\n with self.assertRaises(TypeError):\n function_inclusion_filter_builder(5)", "def _next_exhausted(self):\n\n raise StopIteration() from None", "def test_filter_args_error_msg():\r\n nose.tools.assert_raises(ValueError, filter_args, f, [])", "def nonstringiter(obj):\n return not isinstance(obj, string_types) and isinstance(obj, Iterable)", "def test_invoke_processor_errors():\n\n def processor(app, documents):\n raise ValueError(\"something bad happened\")\n yield\n\n testapp = holocron.Application()\n testapp.add_processor(\"processor\", processor)\n testapp.add_pipe(\"test\", [{\"name\": \"processor\"}])\n\n stream = testapp.invoke(\"test\")\n\n with pytest.raises(ValueError, match=r\"^something bad happened$\"):\n next(stream)\n\n with pytest.raises(StopIteration):\n next(stream)", "def _is_good_iterable(obj):\n return _is_iterable(obj) and _has_str_elems(obj)", "def test_foreach_in_non_first_position_raises_error(self):\n with pytest.raises(AssertionError) as exc_info:\n list(parser.generate_commands(yaml.load(\"\"\"\n - something\n - foreach: [A,B]\n \"\"\")))\n assert (\"'foreach' may only be specified at the beginning of a sequence\" in\n str(exc_info.value))", "def test_empty_list_error(self):\n with self.assertRaises(ValueError):\n function_inclusion_filter_builder([])", "def _is_non_string_iterable(value):\n if isinstance(value, str):\n return False\n if hasattr(value, '__iter__'):\n return True\n if isinstance(value, collections.abc.Sequence):\n return True\n return False", "def test_stop_iteration_in_generators_yield_from(\n assert_errors,\n parse_ast_tree,\n code,\n statement,\n exception,\n default_options,\n):\n tree = parse_ast_tree(code.format(statement, exception))\n\n visitor = FunctionDefinitionVisitor(default_options, tree=tree)\n visitor.run()\n\n assert_errors(visitor, [StopIterationInsideGeneratorViolation])", "def check_throw():\n while True:\n try:\n yield\n except ValueError:\n pass", "def test_max_list_iter(self):\n tlist = None\n with self.assertRaises(ValueError): # used to check for exception\n max_list_iter(tlist)", "def test_max_list_iter(self):\n tlist = None\n with self.assertRaises(ValueError): # used to check for exception\n max_list_iter(tlist)", "def iter_except(func, exception):\n try:\n while True:\n yield func()\n except exception:\n pass", "def test_using_non_query_operators_in_query_args_raises_error(self):\r\n with self.assertRaises(query.QueryException):\r\n TestModel.objects(5)", "def test_sum_integer_should_raise_exception(self):\n\n with self.assertRaises(TypeError):\n sum(1)", "def py_raise(*xs):\n raise NotImplemented", "def test_queryOrIllegalQuery(self):\n self.assertRaises(imap4.IllegalQueryError,\n imap4.Or, imap4.Query(messages=1))", "def toiter(x):\n if iterable(x):\n return x\n else:\n return [x]", "def __iter__(self):\n raise NotImplementedError(\"__iter__\")", "def raise_with_op(node, thunk=None, exc_info=None):\r\n if exc_info is None:\r\n exc_info = sys.exc_info()\r\n exc_type, exc_value, exc_trace = exc_info\r\n if exc_type == KeyboardInterrupt:\r\n # print a simple traceback from KeyboardInterrupt\r\n raise exc_type, exc_value, exc_trace\r\n try:\r\n trace = node.tag.trace\r\n except AttributeError:\r\n try:\r\n trace = node.op.tag.trace\r\n except AttributeError:\r\n trace = ()\r\n exc_value.__thunk_trace__ = trace\r\n exc_value.__op_instance__ = node\r\n if node in node.fgraph.toposort():\r\n exc_value.__applynode_index__ = node.fgraph.toposort().index(node)\r\n else:\r\n exc_value.__applynode_index__ = None\r\n\r\n # nose and unittest catch the exception and do not run th thunk_hook\r\n # so it can be useful to just blurt out errors right here\r\n if raise_with_op.print_thunk_trace:\r\n log_thunk_trace(exc_value)\r\n\r\n hints = []\r\n detailed_err_msg = \"\\nApply node that caused the error: \" + str(node)\r\n\r\n types = [getattr(ipt, 'type', 'No type') for ipt in node.inputs]\r\n detailed_err_msg += \"\\nInputs types: %s\\n\" % types\r\n\r\n if thunk is not None:\r\n if hasattr(thunk, 'inputs'):\r\n shapes = [getattr(ipt[0], 'shape', 'No shapes')\r\n for ipt in thunk.inputs]\r\n strides = [getattr(ipt[0], 'strides', 'No strides')\r\n for ipt in thunk.inputs]\r\n scalar_values = []\r\n for ipt in thunk.inputs:\r\n if getattr(ipt[0], \"size\", -1) == 1:\r\n scalar_values.append(ipt[0])\r\n else:\r\n scalar_values.append(\"not scalar\")\r\n else:\r\n shapes = \"The thunk don't have an inputs attributes.\"\r\n strides = \"So we can't access the strides of inputs values\"\r\n scalar_values = \"And can't print its inputs scalar value\"\r\n\r\n detailed_err_msg += (\"Inputs shapes: %s\" % shapes +\r\n \"\\nInputs strides: %s\" % strides +\r\n \"\\nInputs scalar values: %s\\n\" % scalar_values)\r\n else:\r\n hints.append(\r\n \"HINT: Use another linker then the c linker to\"\r\n \" have the inputs shapes and strides printed.\")\r\n\r\n # Print node backtrace\r\n tr = getattr(node.tag, 'trace', None)\r\n if tr:\r\n sio = StringIO.StringIO()\r\n traceback.print_list(tr, sio)\r\n tr = sio.getvalue()\r\n detailed_err_msg += \"\\nBacktrace when the node is created:\"\r\n detailed_err_msg += str(tr)\r\n else:\r\n hints.append(\r\n \"HINT: Re-running with most Theano optimization disabled could\"\r\n \" give you a back-traces when this node was created. This can\"\r\n \" be done with by setting the Theano flags\"\r\n \" optimizer=fast_compile\")\r\n\r\n if theano.config.exception_verbosity == 'high':\r\n f = StringIO.StringIO()\r\n theano.printing.debugprint(node, file=f, stop_on_name=True,\r\n print_type=True)\r\n detailed_err_msg += \"\\nDebugprint of the apply node: \\n\"\r\n detailed_err_msg += f.getvalue()\r\n\r\n else:\r\n hints.append(\r\n \"HINT: Use the Theano flag 'exception_verbosity=high'\"\r\n \" for a debugprint of this apply node.\")\r\n\r\n exc_value = exc_type(str(exc_value) + detailed_err_msg +\r\n '\\n' + '\\n'.join(hints))\r\n raise exc_type, exc_value, exc_trace", "def yield_in_except_throw_exc_type():\n try:\n raise ValueError\n except ValueError as exc:\n assert sys.exc_info()[1] is exc, sys.exc_info()\n yield\n assert sys.exc_info()[1] is exc, sys.exc_info()", "def _validate_from_args(self, fget=None, fset=None, fdel=None, fval=None, fitr=None, doc=None):\n if fitr is None:\n fitr = iter\n if doc is None and fget is not None:\n doc = fget.__doc__\n return fget, fset, fdel, fval, fitr, doc", "def test_iterable_len_doesnt_match_input_size(self):\n num_args = 10\n for iter_len in [5, 10, 20]:\n expected_args_sum = min(iter_len, num_args)\n\n # Test for normal list (range is considered a normal list as it implements __len__ and such)\n with self.subTest(iter_len=iter_len, input='list'):\n chunks = list(chunk_tasks(range(num_args), iterable_len=iter_len, n_splits=1))\n total_args = sum(map(len, chunks))\n self.assertEqual(total_args, expected_args_sum)\n self.assertEqual(list(range(expected_args_sum)), list(chain.from_iterable(chunks)))\n\n # Test for an actual generator (range does not really behave like one)\n with self.subTest(iter_len=iter_len, input='generator/iterator'):\n chunks = list(chunk_tasks(iter(range(num_args)), iterable_len=iter_len, n_splits=1))\n total_args = sum(map(len, chunks))\n self.assertEqual(total_args, expected_args_sum)\n self.assertEqual(list(range(expected_args_sum)), list(chain.from_iterable(chunks)))", "def __ne__(self, *args):\n return _ida_hexrays.user_numforms_iterator_t___ne__(self, *args)", "def __ne__(self, *args):\n return _SALOMERuntime.SALOMERuntime_PySwigIterator___ne__(self, *args)", "def test_args_bad_value(testapp, args, error):\n\n with pytest.raises(ValueError) as excinfo:\n next(sitemap.process(testapp, [], **args))\n assert str(excinfo.value) == error", "def __check_train_args__(reader, event_handler, **kwargs):\n if not callable(reader) or not isinstance(reader(), collections.Iterator):\n raise TypeError('train_data_reader should be a function, '\n 'which can return a iterator')\n if not callable(event_handler):\n raise TypeError('event handler should be a function')", "def __ne__(self, *args):\n return _ida_hexrays.user_unions_iterator_t___ne__(self, *args)", "def yield_and_raise(data, exc):\n yield from data\n raise exc", "def test_parsingRaisesIllegalOperationResponse(self):\n self.assertParseExceptionResponse(\n imap4.IllegalOperation(\"operation\"),\n b\"001\", b\"NO Illegal operation: operation\\r\\n\",\n )", "def test_collect(\n self, iterable: t.Iterable[Result[int, str]], exp: Result[int, str]\n ) -> None:\n assert Result.collect(iterable) == exp", "def test_clumerge_exceptions(prng: Generator):\n # Data item does not contain required field `unknown`\n nd = 3\n npts = prng.integers(10, high=101)\n ds = {\n \"points\": prng.random((npts, nd)),\n \"clusters\": prng.integers(1, high=6, size=npts),\n }\n with pytest.raises(\n ValueError,\n match=re.escape(\"Data item does not contain required field `unknown`\"),\n ):\n clumerge(ds, fields=(\"clusters\", \"unknown\"))\n\n # \"`{clusters_field}` must contain integer types\n nd = 4\n npts = prng.integers(10, high=101)\n ds = {\"points\": prng.random((npts, nd)), \"clusters\": prng.random(npts)}\n with pytest.raises(\n ValueError,\n match=re.escape(\"`clusters` must contain integer types\"),\n ):\n clumerge(ds)\n\n # Data item contains fields with different sizes (npts != npts / 2)\n nd = 2\n npts = prng.integers(10, high=101)\n ds = {\n \"points\": prng.random((npts, nd)),\n \"clusters\": prng.integers(1, high=6, size=npts // 2),\n }\n with pytest.raises(\n ValueError,\n match=r\"Data item contains fields with different sizes \\([0-9]+ != [0-9]+\\)\",\n ):\n clumerge(ds)\n\n # Dimension mismatch in field `points`\n nd1 = 2\n nd2 = 3\n npts = prng.integers(10, high=101)\n ds1 = {\n \"points\": prng.random((npts, nd1)),\n \"clusters\": prng.integers(1, high=6, size=npts),\n }\n ds2 = {\n \"points\": prng.random((npts, nd2)),\n \"clusters\": prng.integers(1, high=6, size=npts),\n }\n with pytest.raises(\n ValueError,\n match=re.escape(\"Dimension mismatch in field `points`\"),\n ):\n clumerge(ds1, ds2)" ]
[ "0.7217243", "0.7193178", "0.6957775", "0.68198645", "0.6498388", "0.6485671", "0.63076574", "0.6105469", "0.58576494", "0.5822482", "0.57532716", "0.57530195", "0.5725821", "0.56677824", "0.5644975", "0.5638798", "0.5572163", "0.5566992", "0.5561028", "0.5558914", "0.5509184", "0.54428643", "0.54236645", "0.54004943", "0.5400248", "0.536194", "0.53423", "0.5340234", "0.5337365", "0.5331542", "0.53239185", "0.53061986", "0.52940875", "0.52760535", "0.5241881", "0.52055", "0.52035034", "0.51926553", "0.5175868", "0.51581544", "0.5156555", "0.51354253", "0.51273227", "0.5119725", "0.50778604", "0.50572425", "0.50420773", "0.50408196", "0.49948844", "0.49914283", "0.49812287", "0.49592716", "0.49516863", "0.49503434", "0.4949139", "0.49437508", "0.4931055", "0.4924687", "0.4924674", "0.49161598", "0.49089283", "0.4907819", "0.4883887", "0.4879093", "0.48675054", "0.48452437", "0.48431602", "0.48425987", "0.48376048", "0.48371243", "0.483458", "0.48322937", "0.4822326", "0.482173", "0.48053524", "0.48034394", "0.4802909", "0.48002106", "0.4799892", "0.4799892", "0.47937673", "0.4792995", "0.4792714", "0.47862476", "0.4783688", "0.47652498", "0.4764758", "0.4763761", "0.47584432", "0.4756643", "0.47555307", "0.47536623", "0.47443584", "0.47405997", "0.47376135", "0.47263563", "0.47262838", "0.47215176", "0.4719629", "0.47194302" ]
0.75408363
0
If an iteriter_op returns something besides an iterator as output, raise a ValueError.
def test_iteriter_op_3(): @ops.iteriter_op def f(x): return [4, 5, 6] # Returning a list instead of an iterator with pytest.raises(ValueError): result = f(iter([1, 2, 3]))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_iteriter_op_2():\n\n @ops.iteriter_op\n def f(x):\n return iter([4, 5, 6])\n\n with pytest.raises(ValueError):\n f([1, 2, 3]) # Passing in a list instead of an iterator", "def test_listiter_op_2():\n\n @ops.listiter_op\n def f(x):\n return iter([4, 5, 6])\n\n with pytest.raises(ValueError):\n f(iter([1, 2, 3])) # Passing in an iterator instead of a list", "def test_iterlist_op_3():\n\n @ops.iterlist_op\n def f(x):\n return iter([4, 5, 6]) # Returning an iterator instead of a list\n\n with pytest.raises(ValueError):\n result = f(iter([1, 2, 3]))", "def test_iterlist_op_2():\n\n @ops.iterlist_op\n def f(x):\n return [4, 5, 6]\n\n with pytest.raises(ValueError):\n f([1, 2, 3]) # Passing in a list instead of an iterator", "def test_iteriter_op_1():\n\n @ops.iteriter_op\n def f(x):\n return iter([4, 5, 6])\n\n result = f(iter([1, 2, 3])) # Passing in an iterator, as expected\n\n assert(isinstance(result, collections.abc.Iterator)), f\"{result}\"\n assert(list(result) == [4, 5, 6])", "def test_listiter_op_3():\n\n @ops.listiter_op\n def f(x):\n return [4, 5, 6] # Returning a list instead of an iterator\n\n with pytest.raises(ValueError):\n result = f([1, 2, 3])", "def test_listlist_op_2():\n\n @ops.listlist_op\n def f(x):\n return [4, 5, 6]\n\n with pytest.raises(ValueError):\n f(iter([1, 2, 3])) # Passing in an iterator instead of an list", "def test_listlist_op_3():\n\n @ops.listlist_op\n def f(x):\n return iter([4, 5, 6]) # Returning an iterator instead of an list\n\n with pytest.raises(ValueError):\n result = f([1, 2, 3])", "def test_listiter_op_1():\n\n @ops.listiter_op\n def f(x):\n return iter([4, 5, 6])\n\n result = f([1, 2, 3]) # Passing in a list, as expected\n\n assert(isinstance(result, collections.abc.Iterator)), f\"{result}\"\n assert(list(result) == [4, 5, 6])", "def _assert_valid_value_and_cast(self, value):\n if not hasattr(value, '__iter__'):\n raise AssertionError(f\"{value} is not a valid iterable type\")\n return value", "def test_iterlist_op_1():\n\n @ops.iterlist_op\n def f(x):\n return [4, 5, 6]\n\n result = f(iter([1, 2, 3])) # Passing in an iterator, as expected\n\n assert(isinstance(result, list)), f\"{result}\"\n assert(result == [4, 5, 6])", "def test_generator_without_iterable_len(self):\n with self.assertRaises(ValueError):\n next(chunk_tasks(iter([]), n_splits=1))", "def test_iter(\n self, start: Result[int, int], exp: t.Tuple[int, ...]\n ) -> None:\n assert tuple(start.iter()) == exp", "def test_assert_iterator(self):\n iterator = iter([1,2,3,4])\n # Should pass\n self.assert_iterator(iterator,\n count=4,\n assert_item_function=lambda i: i>0)", "def test_generator_input_with_no_iterable_len_raises(self):\n for chunk_size, n_splits, n_jobs in product([None, 1, 3], [None, 1, 3], [None, 1, 3]):\n with self.subTest(chunk_size=chunk_size, n_splits=n_splits, n_jobs=n_jobs), self.assertRaises(ValueError):\n get_n_chunks(iter(self.test_data), iterable_len=None, chunk_size=chunk_size, n_splits=n_splits,\n n_jobs=n_jobs)", "def safe_iterator(i):\n return i or []", "def _iterator_unknown_size(self) -> Iterator[int]:\n raise NotImplementedError", "def iterable(x):\n try:\n iter(x)\n return True\n except TypeError:\n return False", "def isiterable(x):\n try:\n iter(x)\n except TypeError:\n return False\n return True", "def isIterable(value):\n try:\n iter(value)\n return True\n except:\n return False", "def only(it: Union[Iterator[_T], Iterable[_T]]) -> _T:\n if hasattr(it, \"__next__\"):\n # noinspection PyTypeHints\n iterator: Iterator[_T] = it # type: ignore\n else:\n iterator = iter(it)\n\n try:\n ret = next(iterator)\n except StopIteration:\n raise ValueError(\"Expected only a single element in an iterable, but got none\")\n\n second_element = next(iterator, _SENTINEL)\n if second_element != _SENTINEL:\n raise ValueError(\"Expected only a single element in iterable, but got at least 2\")\n return ret", "def is_iterable(thing):\n\n try:\n iter(thing)\n except TypeError:\n return False\n return True", "def isiterable(x):\n try:\n iter(x)\n return True\n except TypeError:\n return False", "def __iter__(self):\n return NotImplemented", "def _is_iterable(value):\n return isinstance(value, list) or isinstance(value, tuple)", "def assert_is_iter(ext):\n if not parallel.is_iter(ext):\n ext = [ext]\n return ext", "def is_iterator(x):\n if sys.version_info >= (2, 7):\n return isinstance(x, collections.Iterator)\n return isinstance(x, collections.Iterator) and hasattr(x, '__iter__')", "def testIterWithException(self):\n\t\tc = Controller()\n\t\tx = c.mock()\n\t\tx.__iter__()\n\t\tc.setReturn(1)\n\t\tc.setException(Exception)\n\t\tc.replay()\n\t\ti = x.__iter__()\n\t\tself.failUnless(i.next() == 1)\n\t\tself.failUnlessRaises(Exception, i.next)", "def _is_iterable(items):\n return isinstance(items, (list, tuple, set, np.ndarray))", "def _next_exhausted(self):\n\n raise StopIteration() from None", "def EOF_or_raise(f):\n try:\n f.next()\n except StopIteration:\n return\n else:\n raise Exception(str(f))", "def is_iterator(obj):\n return isinstance(obj, collections.Iterator)", "def __iter__(self):\n return iter(())", "def toiter(x):\n if iterable(x):\n return x\n else:\n return [x]", "def _iter_return(tup, fxn, invalid_error):\n\n try:\n val = fxn(*tup)\n except (IndexError, ValueError):\n if invalid_error:\n # Raise the exception if invalid_error indicates\n raise\n else:\n # Otherwise, just return a 'None' value\n return None\n ## end if\n else:\n # Good value; just generate it\n return val\n ## end try", "def isiter(obj):\r\n \r\n try:\r\n iter(obj)\r\n return True\r\n except TypeError:\r\n pass\r\n return False", "def __iter__(self):\n raise NotImplementedError(\"__iter__\")", "def test_collect(\n self, iterable: t.Iterable[Result[int, str]], exp: Result[int, str]\n ) -> None:\n assert Result.collect(iterable) == exp", "def _data_validation(data):\n if isinstance(data, (list, tuple, type(None))) is not True:\n raise ValueError(f\"data must be tuple, list, or None, \"\n f\"data type is '{type(data).__name__}'. \"\n f\"Iterable data cannot be empty.\")", "def is_iterator(obj):\n cls = obj.__class__\n return hasattr(cls, '__next__') and not hasattr(cls, '__len__')", "def is_iterable(obj):\n return isinstance(obj, (list, tuple, types.GeneratorType)) or \\\n (not isinstance(obj, (int, str, dict)) and\n bool(getattr(obj, \"next\", False)))", "def next(space, w_iterator, w_default=None):\n try:\n return space.next(w_iterator)\n except OperationError as e:\n if w_default is not None and e.match(space, space.w_StopIteration):\n return w_default\n raise", "def test_make_np_iterable_type_error():\n with pytest.raises(TypeError):\n _ = uc._make_np_iterable(\"s\")", "def test_Validator_iter_errors_two_arguments(self):\n\n validator = validators.Draft7Validator({})\n with self.assertWarns(DeprecationWarning) as w:\n error, = validator.iter_errors(\"foo\", {\"type\": \"number\"})\n\n self.assertEqual(error.validator, \"type\")\n self.assertEqual(w.filename, __file__)\n self.assertTrue(\n str(w.warning).startswith(\n \"Passing a schema to Validator.iter_errors is deprecated \",\n ),\n )", "def is_iterable(obj):\n try:\n itr = iter(obj)\n del itr\n return True\n except:\n return False", "def iterable(arg):\n return isinstance(arg, collections.Iterable) and not isinstance(arg, six.string_types)", "def get_next_as_optional(iterator):\n return iterator.get_next_as_optional()", "def __iter__(self):\n raise NotImplementedError", "def __iter__(self):\n raise NotImplementedError", "def __iter__(self):\n raise NotImplementedError", "def __iter__(self):\n raise NotImplementedError", "def __iter__(self):\n raise NotImplementedError", "def test_iterator_input():\n empty_iterator = iter(())\n transactions = empty_iterator\n itemsets, rules = apriori(transactions, 0.2, 0.2)\n assert itemsets == {} and rules == []\n\n transactions = [(1, 2), (1, 2), (1, 3), (1, 4), (1, 3)]\n transactions_iter = iter(transactions)\n itemsets1, rules1 = apriori(transactions_iter, 0.2, 1)\n itemsets2, rules2 = apriori(transactions, 0.2, 1)\n assert len(rules1) == len(rules2)\n for i in range(len(rules1)):\n assert rules1[i] == rules2[i]", "def test_enforce_iterable():\n formatter = TabularOutputFormatter()\n loremipsum = (\n \"lorem ipsum dolor sit amet consectetur adipiscing elit sed do eiusmod\".split(\n \" \"\n )\n )\n\n for format_name in formatter.supported_formats:\n formatter.format_name = format_name\n try:\n formatted = next(formatter.format_output(zip(loremipsum), [\"lorem\"]))\n except TypeError:\n assert False, \"{0} doesn't return iterable\".format(format_name)", "def is_iterable(value):\n # noinspection PyUnresolvedReferences\n return hasattr(value, '__iter__') and hasattr(value, '__getitem__')", "def check_throw():\n while True:\n try:\n yield\n except ValueError:\n pass", "def testExplicitGeneratorConvenienceFunctionExceptionUsage(self):\n\t\tc = Controller()\n\t\tx = c.mock()\n\t\tc.generator(x.g(8, 9), [10], Exception(\"bogus\"))\n\t\tc.replay()\n\t\tg = x.g(8, 9)\n\t\tself.failUnless(g.next() == 10)\n\t\tself.failUnlessRaises(Exception, g.next)", "def _to_int(maybe_iter):\n if not isinstance(maybe_iter, str) and isinstance(maybe_iter, abc.Iterable):\n return tuple([_to_int(a) for a in maybe_iter])\n try:\n return int(maybe_iter)\n except ValueError:\n return maybe_iter", "def next(self, in_op):\n raise NotImplementedError", "def get_next_as_optional(self):\n raise NotImplementedError(\"Iterator.get_next_as_optional()\")", "def _ensure_iterable(x):\n if isinstance(x[0], Iterable):\n if len(x) > 1:\n raise TypeError(\"Either Iterable or variable argument list expected\")\n return x[0]\n else:\n return x", "def test_collect_short_circuits(self) -> None:\n until_err: t.List[Result[int, str]] = [Ok(1), Ok(2), Err(\"no\")]\n\n def _iterable() -> t.Iterable[Result[int, str]]:\n yield from until_err\n # If we continue iterating after the err, we will raise a\n # runtime Error.\n assert False, \"Result.collect() did not short circuit on err!\"\n\n assert Result.collect(_iterable()) == Err(\"no\")", "def stop():\n raise StopIteration", "def test_iter(self):\n obs = self.tester.__iter__()\n self.assertTrue(isinstance(obs, Iterable))\n self.assertEqual(set(obs), self.exp_sample_ids)", "def test_iter(self):\n obs = self.tester.__iter__()\n self.assertTrue(isinstance(obs, Iterable))\n self.assertEqual(set(obs), self.exp_sample_ids)", "def test_iter_operation(self):\n ar = awstats_reader.AwstatsReader(test_file_dir, 'jjncj.com')\n year_iter = ar.__iter__()\n self.assertTrue(isinstance(year_iter, types.GeneratorType))", "def chek_iter_obj(func):\n\n def wrapper(self, lst):\n try:\n iter(lst)\n return func(self, lst)\n except TypeError:\n raise TypeError(f\"{lst} isn't iterable object\")\n\n return wrapper", "def nonstringiter(obj):\n return not isinstance(obj, string_types) and isinstance(obj, Iterable)", "def test_stop_iteration_in_generators_yield_from(\n assert_errors,\n parse_ast_tree,\n code,\n statement,\n exception,\n default_options,\n):\n tree = parse_ast_tree(code.format(statement, exception))\n\n visitor = FunctionDefinitionVisitor(default_options, tree=tree)\n visitor.run()\n\n assert_errors(visitor, [StopIterationInsideGeneratorViolation])", "def is_nonstring_iterable(x):\n if isinstance(x, primitive_iterable):\n return False\n return isinstance(x, collections.Iterable)", "def is_iterator(obj: Any) -> bool:\n if isinstance(obj, collections.abc.Iterator):\n return True\n else:\n try:\n return iter(obj) is obj\n except TypeError:\n return False", "def is_iterator(self):\n return self._is_iterator", "def iter_except(func, exception):\n try:\n while True:\n yield func()\n except exception:\n pass", "def test_stop_iteration_inside_generators(\n assert_errors,\n parse_ast_tree,\n code,\n statement,\n exception,\n default_options,\n mode,\n):\n tree = parse_ast_tree(mode(code.format(statement, exception)))\n\n visitor = FunctionDefinitionVisitor(default_options, tree=tree)\n visitor.run()\n\n assert_errors(visitor, [StopIterationInsideGeneratorViolation])", "def test_repr_not_called(self):\n class x(object):\n def __repr__(self):\n raise StopIteration('repr should not be called')\n\n try:\n sys.exit((x(), x()))\n except SystemExit:\n pass", "def _is_iterable_non_string(arg):\n return (hasattr(arg, \"__iter__\") or hasattr(arg, \"__getattr__\")) and not isinstance(arg, str)", "def __iter__(self):\n\n raise NotImplementedError()", "def test_iter(self):\n obs = self.tester.__iter__()\n self.assertTrue(isinstance(obs, Iterable))\n self.assertEqual(set(obs), self.exp_categories)", "def test_iter(self):\n obs = self.tester.__iter__()\n self.assertTrue(isinstance(obs, Iterable))\n self.assertEqual(set(obs), self.exp_categories)", "def yield_in_except_throw_exc_type():\n try:\n raise ValueError\n except ValueError as exc:\n assert sys.exc_info()[1] is exc, sys.exc_info()\n yield\n assert sys.exc_info()[1] is exc, sys.exc_info()", "def __ne__(self, *args):\n return _libsbml.SwigPyIterator___ne__(self, *args)", "def test_invoke_processor_errors():\n\n def processor(app, documents):\n raise ValueError(\"something bad happened\")\n yield\n\n testapp = holocron.Application()\n testapp.add_processor(\"processor\", processor)\n testapp.add_pipe(\"test\", [{\"name\": \"processor\"}])\n\n stream = testapp.invoke(\"test\")\n\n with pytest.raises(ValueError, match=r\"^something bad happened$\"):\n next(stream)\n\n with pytest.raises(StopIteration):\n next(stream)", "def check_iterable(value):\n try:\n iter(value)\n if not isinstance(value, six.string_types):\n return True\n else:\n return False\n except Exception as e:\n pass\n\n return False", "def is_item_iterable(item):\n try:\n _ = [_ for _ in item]\n except TypeError:\n return False\n return True", "def __ne__(self, *args):\n return _SALOMERuntime.SALOMERuntime_PySwigIterator___ne__(self, *args)", "def __ne__(self, *args):\n return _ida_hexrays.qlist_cinsn_t_iterator___ne__(self, *args)", "def is_iterable(x: Any) -> bool:\r\n return isinstance(x, collections.abc.Iterable) and not isinstance(x, (str, bytes))", "def __ne__(self, *args):\n return _ida_hexrays.udcall_map_iterator_t___ne__(self, *args)", "def __ne__(self, *args):\n return _ida_hexrays.user_unions_iterator_t___ne__(self, *args)", "def __ne__(self, *args):\n return _ida_hexrays.user_cmts_iterator_t___ne__(self, *args)", "def get_next(self):\n raise NotImplementedError(\"Iterator.get_next()\")", "def test_invoke_error():\n\n with pywren.invokers.LocalInvoker(\"/tmp/task\") as iv:\n\n wrenexec = pywren.local_executor(iv)\n\n with pywrenext.iterwren.IterExec(wrenexec) as IE:\n\n iter_futures = IE.map(except_func, 10, [2])\n print(\"mapped\")\n pywrenext.iterwren.wait_exec(IE)\n assert iter_futures[0].current_iter == 2", "def iter_except(function, exception):\r\n try:\r\n while True:\r\n yield function()\r\n except exception:\r\n return", "def __iter__(self):\n raise Exception(\"Don't iterate this! Did you pass this to intersect without putting it in a list?\")", "def test_invalid(self):\n a = np.ones((10, 10))\n ai = np.ones((10, 2), dtype=np.intp)\n\n # sanity check\n take_along_axis(a, ai, axis=1)\n\n # not enough indices\n assert_raises(ValueError, take_along_axis, a, np.array(1), axis=1)\n # bool arrays not allowed\n assert_raises(IndexError, take_along_axis, a, ai.astype(bool), axis=1)\n # float arrays not allowed\n assert_raises(IndexError, take_along_axis, a, ai.astype(float), axis=1)\n # invalid axis\n assert_raises(AxisError, take_along_axis, a, ai, axis=10)", "def is_iterable(element):\n return isinstance(element, (set, list, tuple))", "def is_iterable(x):\n if isinstance(x, six.string_types):\n return False\n return hasattr(x, '__iter__')", "def yield_in_except_throw_instance():\n try:\n raise ValueError\n except ValueError as exc:\n assert sys.exc_info()[1] is exc, sys.exc_info()\n yield\n assert sys.exc_info()[1] is exc, sys.exc_info()", "def flow_to_iter(flow):\n if ((sys.version_info.major == 3 and hasattr(flow, \"__next__\"))\n or (sys.version_info.major == 2 and hasattr(flow, \"next\"))):\n return flow\n else:\n return iter(flow)", "def _is_good_iterable(obj):\n return _is_iterable(obj) and _has_str_elems(obj)" ]
[ "0.76426315", "0.73116994", "0.7197692", "0.69156325", "0.6845044", "0.6789451", "0.63753885", "0.6361283", "0.62555766", "0.6095181", "0.6085253", "0.59735376", "0.5929287", "0.5857033", "0.58470154", "0.58267", "0.57885355", "0.57856303", "0.57807213", "0.576038", "0.5737208", "0.57153285", "0.5705049", "0.56767434", "0.5663816", "0.5636932", "0.5633784", "0.5604108", "0.5601878", "0.5542627", "0.552298", "0.55098665", "0.5493738", "0.54704064", "0.54444253", "0.54270947", "0.5417239", "0.53816146", "0.53631306", "0.5345524", "0.5342955", "0.53246075", "0.5313499", "0.5311701", "0.53026325", "0.5295344", "0.52783024", "0.5270594", "0.5270594", "0.5270594", "0.5270594", "0.5270594", "0.5270161", "0.526127", "0.52602816", "0.52543366", "0.5231081", "0.5223035", "0.5217481", "0.52048624", "0.5196329", "0.51939225", "0.51858515", "0.51851636", "0.51851636", "0.51815283", "0.51814955", "0.5166615", "0.5164163", "0.51614624", "0.51451236", "0.5141071", "0.5133578", "0.5129939", "0.5128298", "0.51192236", "0.51142544", "0.510601", "0.510601", "0.5085783", "0.5085174", "0.5076046", "0.5072182", "0.5069567", "0.5067351", "0.50640064", "0.50591713", "0.5057657", "0.5057508", "0.5055407", "0.50477326", "0.5043054", "0.50428593", "0.5041838", "0.5041029", "0.50401276", "0.5040106", "0.5037976", "0.5028907", "0.5026386" ]
0.7515847
1
If a listlist_op is given a list as input, no exception should be thrown, and we should return the wrapped function's output.
def test_listlist_op_1(): @ops.listlist_op def f(x): return [4, 5, 6] result = f([1, 2, 3]) # Passing in a list, as expected assert(isinstance(result, list)), f"{result}" assert(result == [4, 5, 6])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_listlist_op_3():\n\n @ops.listlist_op\n def f(x):\n return iter([4, 5, 6]) # Returning an iterator instead of an list\n\n with pytest.raises(ValueError):\n result = f([1, 2, 3])", "def test_listlist_op_2():\n\n @ops.listlist_op\n def f(x):\n return [4, 5, 6]\n\n with pytest.raises(ValueError):\n f(iter([1, 2, 3])) # Passing in an iterator instead of an list", "def Listor(fun):\n @functools.wraps(fun)\n def inside(*args, **kwargs):\n return list(fun(*args, **kwargs))\n return inside", "def test_iterlist_op_1():\n\n @ops.iterlist_op\n def f(x):\n return [4, 5, 6]\n\n result = f(iter([1, 2, 3])) # Passing in an iterator, as expected\n\n assert(isinstance(result, list)), f\"{result}\"\n assert(result == [4, 5, 6])", "def to_list(f):\n @functools.wraps(f)\n def wrapper(*args, **kwargs):\n return list(f(*args, **kwargs))\n return wrapper", "def test_listiter_op_3():\n\n @ops.listiter_op\n def f(x):\n return [4, 5, 6] # Returning a list instead of an iterator\n\n with pytest.raises(ValueError):\n result = f([1, 2, 3])", "def list_generalizer(f):\n @functools.wraps(f)\n def wrapped(data, *args, **kwargs):\n if type(data) == list:\n return [f(d, *args, **kwargs) for d in data]\n else:\n return f(data, *args, **kwargs)\n\n return wrapped", "def listify(fn=None, wrapper=list):\n\n def listify_return(fn):\n @functools.wraps(fn)\n def listify_helper(*args, **kw):\n return wrapper(fn(*args, **kw))\n\n return listify_helper\n\n if fn is None:\n return listify_return\n return listify_return(fn)", "def visit_List(self, node):\n self.generic_visit(node)\n if isinstance(node.ctx, ast.Load):\n return to_call(to_attribute(self.operator, '__list__'), node.elts)\n return node", "def controlled_list(input_list):\n output_list = input_list\n\n if not isinstance(input_list, list):\n\n dummy_list = []\n dummy_list.append(input_list)\n output_list = dummy_list\n print('Converting')\n print('Before return')\n print(output_list)\n return output_list", "def test_iterlist_op_2():\n\n @ops.iterlist_op\n def f(x):\n return [4, 5, 6]\n\n with pytest.raises(ValueError):\n f([1, 2, 3]) # Passing in a list instead of an iterator", "def test_listiter_op_1():\n\n @ops.listiter_op\n def f(x):\n return iter([4, 5, 6])\n\n result = f([1, 2, 3]) # Passing in a list, as expected\n\n assert(isinstance(result, collections.abc.Iterator)), f\"{result}\"\n assert(list(result) == [4, 5, 6])", "def test_iterlist_op_3():\n\n @ops.iterlist_op\n def f(x):\n return iter([4, 5, 6]) # Returning an iterator instead of a list\n\n with pytest.raises(ValueError):\n result = f(iter([1, 2, 3]))", "def test_listiter_op_2():\n\n @ops.listiter_op\n def f(x):\n return iter([4, 5, 6])\n\n with pytest.raises(ValueError):\n f(iter([1, 2, 3])) # Passing in an iterator instead of a list", "def handle_list(list_name, list, args):\n if not args:\n return list\n else:\n len(args) == 1 or syntax_error(\"Wrong number of args for list expression.\")\n try:\n return list[int(args[0])]\n except ValueError:\n syntax_error(\"Invald index value: '%s'\" % args[0])\n except IndexError:\n syntax_error(\"Index out of range in '%s': %d\" % (list_name, index))", "def list_func(lst: List[valueType]) -> List[valueType]:\n tmp = [] # type: List[valueType]\n for e in lst:\n if isinstance(e, (list, set, tuple)):\n tmp.append(list_func(list(e)))\n else:\n if isinstance(e, (float, int)):\n tmp.append(func(e))\n else:\n raise Exception\n return tmp", "def listify(arg):\n if isinstance(arg, list):\n return arg\n else:\n return [arg]", "def get_list_dep() -> Callable:\n args = []\n body = [\" r = {}\"]\n # Apply list ops as annotations\n for list_op in self.list_ops:\n args += [f\"{list_op.name}: Optional[List[str]] = Query(None)\"]\n body += [\n f\" if {list_op.name} is not None:\",\n f' r[\"{list_op.name}\"] = {list_op.name}',\n ]\n code = [f\"def inner({', '.join(args)}) -> dict:\"] + body + [\" return r\"]\n r = {\"Optional\": typing.Optional, \"List\": typing.List, \"Query\": Query}\n exec(\"\\n\".join(code), {}, r)\n return r[\"inner\"]", "def test_returns_list(self):\n metrics = ('input', 'output')\n\n @callback_return(*metrics)\n def returns_list():\n return [2, 1, 3]\n\n r = returns_list()\n self.assertEqual(len(metrics), len(r.keys()), 'Extra return values should be dropped.')\n self.assertEqual(2, r['input'])\n self.assertEqual(1, r['output'])\n self.assertNotIn('extra', r)", "def decorator(arg):\n return lambda: list(arg)", "def process_list(_func, iterator, *args, **kwargs):\n return [_func(i, *args, **kwargs) for i in iterator]", "def is_listing(op):\n return isinstance(op, (list, tuple))", "def ListMonad(*elements: List[T]) -> _List[T]: # pylint: disable=invalid-name\n\n return _List(list(elements), None)", "def test_neg_operate_list_invalid_requests(self, list):\n key = (\"test\", \"demo\", \"list_key\")\n try:\n key, _, _ = self.as_connection.operate(key, list)\n except e.OpNotApplicable as exception:\n assert exception.code == 26", "def as_list(arg):\n if _is_list(arg):\n return arg\n return [arg]", "def listify(value):\n if isinstance(value, list):\n return value\n else:\n return [value]", "def _builtin_consult_as_list(op1, op2, **kwdargs):\n # TODO make non-recursive\n check_mode((op1, op2), ['*L'], functor='consult', **kwdargs)\n _builtin_consult(op1, **kwdargs)\n if _is_list_nonempty(op2):\n _builtin_consult_as_list(op2.args[0], op2.args[1], **kwdargs)\n return True", "def lists_equal_length(func):\n # Define the wrapper function.\n def wrapper(self, *args, **kwargs):\n\n # Collect all `list` objects from `args`.\n lists_args = [arg for arg in args if isinstance(arg, list)]\n # Collecgt all `list` object from `kwargs`.\n lists_kwargs = [arg for arg in kwargs.values() if isinstance(arg, list)]\n # Concatenate the lists of `list` objects.\n lists = lists_args + lists_kwargs\n\n # Check whether all the `list` objects have the same length.\n do_have_same_length = len(set(map(len, lists))) == 1\n\n # Raise an `InvalidArgumentsError` exception if there's a length\n # mismatch.\n if not do_have_same_length:\n msg_fmt = \"The argument lists must have the same length.\"\n raise InvalidArgumentsError(msg_fmt)\n\n # Simply execute the decorated method with the provided arguments\n # and return the result.\n return func(self, *args, **kwargs)\n\n return wrapper", "def cast_to_list(position):\n\n\[email protected]\n\tdef wrapper(function, instance, args, kwargs):\n\t\tif not isinstance(args[position], list):\n\t\t\targs = list(args)\n\t\t\targs[position] = [args[position]]\n\t\t\targs = tuple(args)\n\n\t\treturn function(*args, **kwargs)\n\n\treturn wrapper", "def list_wrap(spec):\n if not isinstance(spec, list):\n spec = [spec]\n return spec", "def __noop_list(self, *args, **kwargs):\n return []", "def _MakeList(input):\n if len(input) == 0:\n raise ValueError(\n 'input cannot be empty.')\n elif len(input) == 1:\n output = input[0]\n if not isinstance(output, list):\n output = [output]\n else:\n output = list(input)\n return output", "def _convert_to_list(self, input_argument):\n if type(input_argument) is not list:\n input_argument = [input_argument]\n return input_argument", "def list_response(wrapped):\n\n @wraps(wrapped)\n def decorated(request, *args, **kwargs):\n number_of_items = None\n offset = None\n if 'numberofitems' in request['args']:\n if request['args']['numberofitems'].isnumeric():\n number_of_items = int(request['args']['numberofitems'])\n else:\n raise UserException(ERROR_NUMERIC_REQUIRED % 'numberOfItems')\n if 'offset' in request['args']:\n if request['args']['offset'].isnumeric():\n offset = int(request['args']['offset'])\n else:\n raise UserException(ERROR_NUMERIC_REQUIRED % 'offset')\n\n if number_of_items is not None and offset is not None:\n return wrapped(request, number_of_items, offset)\n elif number_of_items is not None:\n return wrapped(request, number_of_items)\n elif offset is not None:\n return wrapped(request, offset=offset)\n else:\n return wrapped(request)\n\n return decorated", "def give_me_a_list():\n my_list=[1,2,3,4]\n return my_list\n pass", "def ensure_list(self, x):\n return x if isinstance(x, list) else [x]", "def aslist(something):\n return something if isinstance(something, list) else [something]", "def operationListReturn(self, a, b, c, operation):\n assert len(a) == len(b) == len(c), 'Length mismatch'\n for i in range(len(a)):\n if isinstance(a[i], list): self.operationListReturn(a[i], b[i], c[i], operation)\n else: c[i] = operation(a[i],b[i])\n return c", "def builtin_iterable(func):\n if sys.version_info[:1] < (3,):\n @wraps(func)\n def inner(*args, **kwargs):\n return list(func(*args, **kwargs))\n return inner\n return func", "def response_list(input_list):\n if isinstance(input_list, dict):\n input_list = [input_list]\n\n return input_list", "def list() -> List:\n pass", "def safelist(listable):\n if type(listable) == str:\n return [listable]\n else:\n return listable.tolist()", "def handleList(self, _): # pylint: disable=invalid-name", "def _maplist_vm(vm, f, xs):\n def f_(*args):\n return vm.call(f, args)\n return list(map(f_, xs))", "def ensure_list(iterable: Iterable[A]) -> List[A]:\n if isinstance(iterable, list):\n return iterable\n else:\n return list(iterable)", "def ensure_list(iterable: Iterable[A]) -> List[A]:\n if isinstance(iterable, list):\n return iterable\n else:\n return list(iterable)", "def test_iteriter_op_3():\n\n @ops.iteriter_op\n def f(x):\n return [4, 5, 6] # Returning a list instead of an iterator\n\n with pytest.raises(ValueError):\n result = f(iter([1, 2, 3]))", "def makelist(input):\n if isinstance(input, list) or isinstance(input, np.array):\n output = input\n else:\n output = [input]\n return output", "def test_pos_operate_with_list_operations_different_datatypes(self, list, result, bin, expected):\n key = (\"test\", \"demo\", \"list_key\")\n\n key, _, bins = self.as_connection.operate(key, list)\n\n assert bins == result\n\n key, _, bins = self.as_connection.get(key)\n\n assert bins[bin] == expected", "def wrapped_func(ret_val, *args, **kwargs):\n val = func(*args, **kwargs)\n ret_val.append(val)", "def is_list(value):\n return isinstance(value, list) or None", "def lmap(f: Callable, *xs) -> list:\n return list(map(f, *xs))", "def _to_list( self, input ):\n import numpy\n listtypes = (list, tuple, numpy.ndarray)\n if input == None:\n return None\n elif type(input) in listtypes:\n return list(input)\n else:\n return [input]", "def operates_on_task_list(fn):\n @dnzo_login_required\n def task_wrapper(self, dnzo_user, task_list_name, *args):\n from tasks_data.task_lists import get_task_list\n task_list = get_task_list(dnzo_user, task_list_name)\n if not task_list or task_list.deleted:\n self.not_found()\n else:\n fn(self, dnzo_user, task_list, *args)\n return task_wrapper", "def to_list(x):\n if isinstance(x, list):\n return x\n return [x]", "def listify(supposed_lst:object=None):\n\tif (supposed_lst is not None):\n\t\tif (not isinstance(supposed_lst, list)):\n\t\t\tsupposed_lst = [supposed_lst]\n\t\t# If it was already a list, check it for emptiness and `None`.\n\t\telif (isinstance(supposed_lst, list)):\n\t\t\tif (not supposed_lst):\n\t\t\t\traise ValueError(dedent(\n\t\t\t\t\tf\"Yikes - The list you provided contained `None` as an element.\" \\\n\t\t\t\t\tf\"{supposed_lst}\"\n\t\t\t\t))\n\t\t\tif (None in supposed_lst):\n\t\t\t\traise ValueError(dedent(\n\t\t\t\t\tf\"Yikes - The list you provided contained `None` as an element.\" \\\n\t\t\t\t\tf\"{supposed_lst}\"\n\t\t\t\t))\n\t# Allow `is None` to pass through because we need it to trigger null conditions.\n\treturn supposed_lst", "def as_list_or_tuple(\n use_list: bool, use_tuple: bool, outputs: Union[V, Sequence[V]]\n) -> Union[V, List[V], Tuple[V, ...]]:\n if use_list and use_tuple:\n raise ValueError(\"Both flags cannot be simultaneously True\")\n\n if use_list or use_tuple:\n if isinstance(outputs, Sequence):\n if use_list:\n return list(outputs)\n else:\n return tuple(outputs)\n else:\n if use_list:\n return [outputs]\n else:\n return (outputs,)\n else:\n if isinstance(outputs, Sequence):\n if len(outputs) != 1:\n raise ValueError(\"Wrong arguments; expected a one element list\")\n return outputs[0]\n else:\n return outputs", "def test_pos_operate_with_list_addition_operations(self, list, result, bin, expected):\n key = (\"test\", \"demo\", \"list_key\")\n\n key, _, bins = self.as_connection.operate(key, list)\n\n assert bins == result\n\n key, _, bins = self.as_connection.get(key)\n\n assert bins[bin] == expected", "def test_neg_operate_append_items_not_a_list(self):\n key = (\"test\", \"demo\", \"list_key\")\n\n list = [\n {\"op\": aerospike.OP_LIST_APPEND_ITEMS, \"bin\": \"int_bin\", \"val\": 7},\n ]\n\n try:\n key, _, bins = self.as_connection.operate(key, list)\n except e.ParamError as exception:\n assert exception.code == -2", "def listify(obj):\n if obj is None:\n # When the object is None, an empty list will be returned\n return []\n elif isinstance(obj, list):\n # When the object is already a list, that list will be returned\n return obj\n\n # When a single object is passed to the method, a list with the\n # object as single item will be returned\n return [obj]", "def maplist(f, xs):\n return list(map(f, xs))", "def svn_client_invoke_list_func(svn_client_list_func_t__obj, void_baton, char_path, svn_dirent_t_dirent, svn_lock_t_lock, char_abs_path, apr_pool_t_pool): # real signature unknown; restored from __doc__\n pass", "def __call__(self, X, Y=None, eval_gradient=False):\n return [f(X, Y=Y, eval_gradient=eval_gradient) for f in self.list_func]", "def atomp(lst):\n return not isinstance(lst, list)", "def atomp(lst):\n return not isinstance(lst, list)", "def list_check(*args, func=None):\n func = func or inspect.stack()[2][3]\n for var in args:\n if not isinstance(var, (list, collections.UserList, collections.abc.MutableSequence)):\n name = type(var).__name__\n raise ListError(\n 'Function {} expected list, {} got instead.'.format(func, name))", "def _is_list(val):\n\n return isinstance(val, list)", "def _as_list(value):\n if not isinstance(value, list):\n value = [value]\n return value", "def apply_function_to_nested_list(func, l):\n from itertools import chain\n result = func(list(chain(*l)))\n csum = np.cumsum(map(len, l))\n new_l = [result[(0 if i == 0 else csum[i-1]):csum[i]] for i in range(len(l))]\n return new_l", "def test_pos_operate_with_list_trim_val_with_negative_value(self):\n key = (\"test\", \"demo\", \"list_key\")\n list = [{\"op\": aerospike.OP_LIST_TRIM, \"bin\": \"int_bin\", \"index\": 1, \"val\": -9}]\n\n (key, meta, bins) = self.as_connection.operate(key, list)\n\n (key, meta, bins) = self.as_connection.get(key)\n\n assert bins[\"int_bin\"] == [2, 3, 4]", "def test_list_increment_with_incorrect_value_type(self):\n key = (\"test\", \"demo\", \"list_key\")\n list = [{\"op\": aerospike.OP_LIST_INCREMENT, \"index\": 2, \"bin\": \"int_bin\", \"val\": \"twenty\"}]\n\n with pytest.raises(e.AerospikeError):\n self.as_connection.operate(key, list)", "def map_(func, some_list):\n \n result = []\n \n for arg in some_list:\n result.append(func(arg))\n \n return result", "def test_return_negative_numbers_from_lst():\n assert return_negative_numbers_from_lst([-1, 0, 1, -23, 4]) == [-1, -23]\n assert return_negative_numbers_from_lst([0]) == []\n assert return_negative_numbers_from_lst([2, 3, 17]) == []\n assert return_negative_numbers_from_lst([-2, -3, -17]) == [-2, -3, -17]", "def listify(x):\n\n if isinstance(x, list):\n return x\n elif isinstance(x, tuple):\n return list(x)\n else:\n return [x]", "def _operate_recursive(\n function: Callable[..., V], iterables: RecursiveIterable[V], result: RecursiveList[V]\n) -> RecursiveList[V]:\n for items in zip(*iterables): # type: ignore\n if any(isinstance(item, Iterable) for item in items): # pylint: disable=W1116\n sub_result = [] # type: ignore\n _operate_recursive(function, items, sub_result)\n else:\n sub_result = function(*items) # type: ignore\n result.append(sub_result)\n return result", "def operate_recursive(function: Callable[..., V], *iterables: RecursiveIterable[V]) -> RecursiveList[V]:\n return _operate_recursive(function, iterables, [])", "def _is_list(arg):\n return isinstance(arg, collections.Sequence) and not _is_string(arg)", "def zzX_value(l, f):\n if type(f) is not list:\n return zzX_const(l, f)\n else:\n if not l:\n return f\n else:\n return [zzX_value(l-1, f)]", "def chek_iter_obj(func):\n\n def wrapper(self, lst):\n try:\n iter(lst)\n return func(self, lst)\n except TypeError:\n raise TypeError(f\"{lst} isn't iterable object\")\n\n return wrapper", "def _is_list(item):\n return isinstance(item, list)", "def _set_listonly(self, value):\n if not value and self.__listonly:\n self.__listonly = False\n self.clear_preprocessed()", "def is_list(value):\n return isinstance(value, list)", "def test_pos_operate_with_list_remove_operations(self, list, bin, expected):\n key = (\"test\", \"demo\", \"list_key\")\n\n self.as_connection.operate(key, list)\n\n key, _, bins = self.as_connection.get(key)\n\n assert bins[bin] == expected", "def __call__(self, items: List[Item]) -> List[Item]:", "def to_list():\n\n @sinks\n def _dagpype_internal_fn_act(target):\n l = []\n try:\n while True:\n l.append((yield))\n except GeneratorExit:\n target.send(l) \n target.close()\n\n return _dagpype_internal_fn_act", "def visit_list(self, node: Node, *args: t.Any, **kwargs: t.Any) -> t.List[Node]:\n rv = self.visit(node, *args, **kwargs)\n\n if not isinstance(rv, list):\n return [rv]\n\n return rv", "def list_ref(s_list, i):\n require_type(isa(s_list,List), 'parameters of list-ref must be a list')\n return s_list[i]", "def convert_old_style_list(list_):\n if not isinstance(list_, (tuple, list)) or len(list_) != 2:\n return list_, False\n first_item, second_item = list_\n if second_item == []:\n return [first_item], True\n try:\n # see if second item is iterable\n iter(second_item)\n except TypeError:\n return list_, False\n old_style_list = True\n new_second_item = []\n for sublist in second_item:\n item, old_style_list = convert_old_style_list(sublist)\n if not old_style_list:\n break\n new_second_item.extend(item)\n if old_style_list:\n second_item = new_second_item\n return [first_item, second_item], old_style_list", "def test_ip_lists_get_command_for_success(mock_client, ip_lists_success, monkeypatch):\n monkeypatch.setattr(\n illumio.pce.PolicyComputeEngine._PCEObjectAPI,\n \"get\",\n lambda *a, **k: [IPList.from_json(ip_list) for ip_list in ip_lists_success],\n )\n resp = ip_lists_get_command(\n mock_client,\n {\n \"max_results\": \"1\",\n \"fqdn\": \"app\",\n \"ip_address\": \"127.0.0.1\",\n \"name\": \"a\",\n \"description\": \"a\",\n },\n )\n\n assert resp.raw_response == remove_empty_elements(ip_lists_success)", "def tolist(x):\n return x if isinstance(x, list) else [x]", "def make_list( elements ):\n if isinstance(elements, (list, tuple)):\n return elements\n else:\n return [elements]", "def add_to_list(the_list, value):\n return the_list", "def makeListF(f, url, *argsf, caseSensitive = False, wildCards = True):", "def _wrapper(func, args):\n return func(*args)", "def reverse_list_mutate(int_list: Optional[List]) -> None:\r\n if isinstance(int_list, list):\r\n if len(int_list) == 0:\r\n int_list = int_list\r\n elif len(int_list) > 0:\r\n mod_list = int_list[::-1]\r\n for i in range(len(mod_list)):\r\n int_list[i] = mod_list[i]\r\n else:\r\n raise ValueError", "def convert_to_list(item):\n return item if item is None or isinstance(item, list) else [item]", "def listify(obj):\n return obj if isinstance(obj, (list, tuple, type(None))) else [obj]", "def empty_list(*args):\n return []", "def apply(L, f):\n\n result = []\n for i in L:\n result.append(f(i))\n\n return result", "def test_iteriter_op_2():\n\n @ops.iteriter_op\n def f(x):\n return iter([4, 5, 6])\n\n with pytest.raises(ValueError):\n f([1, 2, 3]) # Passing in a list instead of an iterator" ]
[ "0.7166146", "0.71624446", "0.7014189", "0.6786135", "0.6684333", "0.6671589", "0.6536824", "0.6533272", "0.6519165", "0.64344203", "0.6429467", "0.64098346", "0.6329758", "0.6261592", "0.6214731", "0.61999166", "0.6112252", "0.6028815", "0.6003266", "0.5961093", "0.5949775", "0.5941206", "0.588483", "0.58796406", "0.5859028", "0.5828931", "0.58254296", "0.57948834", "0.5788565", "0.57205945", "0.5702837", "0.56976765", "0.56212145", "0.5614894", "0.5609092", "0.56061375", "0.56040305", "0.55758643", "0.5561176", "0.5534481", "0.5501894", "0.5496825", "0.5484833", "0.5482509", "0.5471159", "0.5471159", "0.54697305", "0.5468929", "0.54507905", "0.54482955", "0.54481864", "0.542075", "0.5403652", "0.5382406", "0.5360071", "0.53593284", "0.53485006", "0.5339183", "0.53294307", "0.5327028", "0.53056115", "0.5304071", "0.5301213", "0.5292395", "0.5292395", "0.5290304", "0.5280675", "0.5275581", "0.5265299", "0.5263499", "0.5262425", "0.52513605", "0.52497894", "0.52471375", "0.5222374", "0.5210396", "0.5194949", "0.51869446", "0.51753443", "0.51689327", "0.5157968", "0.515064", "0.5134745", "0.5133842", "0.51254135", "0.51238126", "0.5120003", "0.5119932", "0.51156515", "0.5114111", "0.51117116", "0.51018286", "0.5101178", "0.50944", "0.5086148", "0.5083284", "0.5082061", "0.50677276", "0.5062743", "0.50621325" ]
0.77922696
0
If a listlist_op is given something besides a list as input, raise a ValueError.
def test_listlist_op_2(): @ops.listlist_op def f(x): return [4, 5, 6] with pytest.raises(ValueError): f(iter([1, 2, 3])) # Passing in an iterator instead of an list
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_listlist_op_3():\n\n @ops.listlist_op\n def f(x):\n return iter([4, 5, 6]) # Returning an iterator instead of an list\n\n with pytest.raises(ValueError):\n result = f([1, 2, 3])", "def is_listing(op):\n return isinstance(op, (list, tuple))", "def test_neg_operate_list_invalid_requests(self, list):\n key = (\"test\", \"demo\", \"list_key\")\n try:\n key, _, _ = self.as_connection.operate(key, list)\n except e.OpNotApplicable as exception:\n assert exception.code == 26", "def _validate_internal_value_list(self, value: Any):\n # Make sure the value is a list\n if not isinstance(value, list):\n raise TypeError(f\"Value must be a list\")\n\n # Make sure the list has the correct number of values in it\n length = len(value)\n if isinstance(self._nargs, int):\n if self._action == \"store\":\n if length != self._nargs:\n raise ValueError(f\"Requires list of length {self._nargs}\")\n else:\n if length == 0 or length % self._nargs != 0:\n raise ValueError(f\"Requires list of length a multiple of {self._nargs}\")\n elif self._nargs == \"+\":\n if length == 0:\n raise ValueError(f\"Requires at least one item\")\n\n if any(not isinstance(element, self._type) for element in value):\n raise TypeError(f\"All values must be of the declared type {self._type.__name__}\")", "def test_listlist_op_1():\n\n @ops.listlist_op\n def f(x):\n return [4, 5, 6]\n\n result = f([1, 2, 3]) # Passing in a list, as expected\n\n assert(isinstance(result, list)), f\"{result}\"\n assert(result == [4, 5, 6])", "def list_check(*args, func=None):\n func = func or inspect.stack()[2][3]\n for var in args:\n if not isinstance(var, (list, collections.UserList, collections.abc.MutableSequence)):\n name = type(var).__name__\n raise ListError(\n 'Function {} expected list, {} got instead.'.format(func, name))", "def handle_list(list_name, list, args):\n if not args:\n return list\n else:\n len(args) == 1 or syntax_error(\"Wrong number of args for list expression.\")\n try:\n return list[int(args[0])]\n except ValueError:\n syntax_error(\"Invald index value: '%s'\" % args[0])\n except IndexError:\n syntax_error(\"Index out of range in '%s': %d\" % (list_name, index))", "def test_iterlist_op_2():\n\n @ops.iterlist_op\n def f(x):\n return [4, 5, 6]\n\n with pytest.raises(ValueError):\n f([1, 2, 3]) # Passing in a list instead of an iterator", "def test_list_increment_with_incorrect_value_type(self):\n key = (\"test\", \"demo\", \"list_key\")\n list = [{\"op\": aerospike.OP_LIST_INCREMENT, \"index\": 2, \"bin\": \"int_bin\", \"val\": \"twenty\"}]\n\n with pytest.raises(e.AerospikeError):\n self.as_connection.operate(key, list)", "def _is_list(val):\n\n return isinstance(val, list)", "def _validate_list_type(self, name, obj, *args):\n if obj is None:\n return\n if isinstance(obj, list):\n for i in obj:\n self._validate_type_not_null(name, i, *args)\n else:\n self._validate_type(name, obj, *args)", "def test_llist_no_parameter_negative(self):\n\n with pytest.raises(TypeError) as typeError:\n TestLList.llist_integer.add()\n\n assert \"Required argument 'value' (pos 1) not found\" in typeError.value", "def test_listiter_op_3():\n\n @ops.listiter_op\n def f(x):\n return [4, 5, 6] # Returning a list instead of an iterator\n\n with pytest.raises(ValueError):\n result = f([1, 2, 3])", "def list_typecheck(val, name, msg):\n if type(val) != Pair and val != Nil():\n raise SnekEvaluationError(name + \" error: \" + msg)", "def check_list(self,list_input):\n if not isinstance(list_input,list):\n raise ValueError('input is not in list type')\n for i in list_input:\n if isinstance(i,list) and len(i) != 0:\n for j in i:\n if not isinstance(j,(float,int)):\n print(j)\n raise ValueError('cannot convert')\n else:\n print(i)\n raise ValueError('wrong defined')", "def test_list_type(self):\n\n expected = TypeError\n input_ = []\n with self.assertRaises(expected):\n math.factorial(input_)", "def _is_list(arg):\n return isinstance(arg, collections.Sequence) and not _is_string(arg)", "def test_listiter_op_2():\n\n @ops.listiter_op\n def f(x):\n return iter([4, 5, 6])\n\n with pytest.raises(ValueError):\n f(iter([1, 2, 3])) # Passing in an iterator instead of a list", "def test_iterlist_op_3():\n\n @ops.iterlist_op\n def f(x):\n return iter([4, 5, 6]) # Returning an iterator instead of a list\n\n with pytest.raises(ValueError):\n result = f(iter([1, 2, 3]))", "def _is_list(item):\n return isinstance(item, list)", "def test_neg_operate_append_items_not_a_list(self):\n key = (\"test\", \"demo\", \"list_key\")\n\n list = [\n {\"op\": aerospike.OP_LIST_APPEND_ITEMS, \"bin\": \"int_bin\", \"val\": 7},\n ]\n\n try:\n key, _, bins = self.as_connection.operate(key, list)\n except e.ParamError as exception:\n assert exception.code == -2", "def test_neg_operate_list_operation_bin_notlist(self):\n key = (\"test\", \"demo\", 1)\n list = [{\"op\": aerospike.OP_LIST_INSERT, \"bin\": \"age\", \"index\": 2, \"val\": 9}]\n\n try:\n (key, _, _) = self.as_connection.operate(key, list)\n\n except e.BinIncompatibleType as exception:\n assert exception.code == 12", "def visit_List(self, node):\n self.generic_visit(node)\n if isinstance(node.ctx, ast.Load):\n return to_call(to_attribute(self.operator, '__list__'), node.elts)\n return node", "def is_list(value):\n return isinstance(value, list)", "def is_list(value):\n return isinstance(value, list) or None", "def ensure_list(self, x):\n return x if isinstance(x, list) else [x]", "def test_if_it_accepts_lists(self):\n with self.assertRaises(TypeError):\n prime_numbers([])", "def test_if_it_accepts_lists(self):\n with self.assertRaises(TypeError):\n prime_numbers(56.58)", "def value_error(var, _list):\n\n #if not any(r):\n if len(_list) == 2:\n divisor = \" or \"\n elif len(_list) > 2:\n divisor = \", \"\n\n print(_list)\n print(len(_list))\n raise ValueError(\"'{var_name}' must be {type}, received '{var_type}'\"\n .format(var_name=RaiseIfNot._get_name(var),\n type=divisor.join(map(\n lambda x: \"'\" + x + \"'\",\n _list)), var_type=var))", "def __allowed_values_incorrect_list(self):\n strTestName = 'Values of a list (incorrect)'\n RxCSObject = _RxCSobject()\n\n RxCSObject.paramAddMan('parameter1', 'tuple')\n RxCSObject.paramAddMan('parameter2', 'list')\n\n RxCSObject.paramAllowed('parameter2', ('Allowed string #1', 'Allowed string #2', 3, 4, 11))\n RxCSObject.parameter1 = (1, 3, 4)\n RxCSObject.parameter2 = [11, 3, 'Allowed string #1', 'Allowed string #11']\n\n self.__parametersCheck_error(RxCSObject, AllowedValuesError, strTestName)", "def test_creation_list():\n with pytest.raises(ValueError) as __:\n value = list()\n __ = param.Integer(value=value)", "def _builtin_consult_as_list(op1, op2, **kwdargs):\n # TODO make non-recursive\n check_mode((op1, op2), ['*L'], functor='consult', **kwdargs)\n _builtin_consult(op1, **kwdargs)\n if _is_list_nonempty(op2):\n _builtin_consult_as_list(op2.args[0], op2.args[1], **kwdargs)\n return True", "def test_validate_positive_integer_list():\n with pytest.raises(ValueError):\n validate_positive_integer_list(0.5, 1)\n\n with pytest.raises(ValueError):\n validate_positive_integer_list([0.5, 0, 5], 2)\n\n with pytest.raises(ValueError):\n validate_positive_integer_list([1], 2)\n\n with pytest.raises(ValueError):\n validate_positive_integer_list(0, 2)\n\n with pytest.raises(ValueError):\n validate_positive_integer_list(-1, 2)\n\n assert validate_positive_integer_list(1, 2) == [1, 1]", "def test_wrong_input_on_creation(self):\r\n\r\n self.assertRaises(TypeError, TypedListType, None)", "def _require_listlike(level, arr, arrname: str):\n if level is not None and not is_list_like(level):\n if not is_list_like(arr):\n raise TypeError(f\"{arrname} must be list-like\")\n if len(arr) > 0 and is_list_like(arr[0]):\n raise TypeError(f\"{arrname} must be list-like\")\n level = [level]\n arr = [arr]\n elif level is None or is_list_like(level):\n if not is_list_like(arr) or not is_list_like(arr[0]):\n raise TypeError(f\"{arrname} must be list of lists-like\")\n return level, arr", "def test_empty_list_error(self):\n with self.assertRaises(ValueError):\n function_inclusion_filter_builder([])", "def test_raises_typeerror_if_arg_not_list(self):\n def result():\n return num_islands({})\n\n self.assertRaises(TypeError, result)", "def test_graph_with_list_fail():\n try:\n node_list = [\"slippery list\"]\n node_list.append(Node({'A':['B','C']}))\n node_list.append(Node({'B':['C','D']}))\n node_list.append(Node({'C':['D']}))\n node_list.append(Node({'D':['C']}))\n g = Graph(node_list)\n except Exception as e:\n assert str(e) == 'invalid node provided!'", "def test_list_increment_with_missing_value(self):\n key = (\"test\", \"demo\", \"list_key\")\n list = [\n {\n \"op\": aerospike.OP_LIST_INCREMENT,\n \"bin\": \"int_bin\",\n \"index\": 2,\n }\n ]\n\n with pytest.raises(e.ParamError):\n self.as_connection.operate(key, list)", "def test_list_increment_with_missing_index(self):\n key = (\"test\", \"demo\", \"list_key\")\n list = [{\"op\": aerospike.OP_LIST_INCREMENT, \"bin\": \"int_bin\", \"val\": 20}]\n\n with pytest.raises(e.ParamError):\n self.as_connection.operate(key, list)", "def isList(data):\n\ttry:\n\t\tfrom types import ListType\n\t\tif type(data) == ListType:\n\t\t\treturn True\n\texcept ImportError:\n\t\tif type(data) == type([]):\n\t\t\treturn True\n\treturn False", "def isList(memoryManager, paramsList):\n if isEmptyList(paramsList):\n return [1.0]\n A = paramsList[0]\n if validateList(A):\n return [0.0] if len(A) <= 1 else [1.0]\n return [0.0]", "def _data_validation(data):\n if isinstance(data, (list, tuple, type(None))) is not True:\n raise ValueError(f\"data must be tuple, list, or None, \"\n f\"data type is '{type(data).__name__}'. \"\n f\"Iterable data cannot be empty.\")", "def _check_multilabel_list(obj: Any):\n if not isinstance(obj, list):\n raise TypeError(f\"obj must be a list, got '{type(obj)}'\")\n\n if len(obj) > 0:\n if not isinstance(obj[0], list):\n raise TypeError(f\"obj must contain lists, got '{type(obj[0])}'\")\n\n if len(obj[0]) > 0:\n if not isinstance(obj[0][0], str):\n raise TypeError(\n f\"obj must contain lists of strings, got lists of '{type(obj[0][0])}'\"\n )", "def __relational_restriction_incorrect_list_vs_list(self):\n strTestName = 'List lower than a list (incorrect)'\n RxCSObject = _RxCSobject()\n\n # Firstly, let us define a reference parameter\n RxCSObject.paramAddMan('lRefParameter1', 'List reference parameter')\n RxCSObject.paramType('lRefParameter1', list)\n\n # Now, let us define a list\n RxCSObject.paramAddMan('lParameter1', 'List parameter')\n RxCSObject.paramType('lParameter1', list)\n RxCSObject.paramL('lParameter1', 'lRefParameter1')\n\n RxCSObject.lRefParameter1 = [5, 10, 15, 20]\n RxCSObject.lParameter1 = [4, 11, 16, 21]\n\n self.__parametersCheck_error(RxCSObject, RelationalError, strTestName)", "def test_pos_operate_with_list_insert_index_negative(self):\n key = (\"test\", \"demo\", \"list_key\")\n list = [{\"op\": aerospike.OP_LIST_INSERT, \"bin\": \"int_bin\", \"index\": -2, \"val\": 9}]\n\n (key, meta, bins) = self.as_connection.operate(key, list)\n\n (key, meta, bins) = self.as_connection.get(key)\n\n assert bins[\"int_bin\"] == [1, 2, 9, 3, 4]", "def isList(l):\r\n return hasattr(l, '__iter__') \\\r\n or (type(l) in (types.ListType, types.TupleType))", "def is_list(s_list):\n return isa(s_list, List)", "def test_list_increment_with_missing_bin(self):\n key = (\"test\", \"demo\", \"list_key\")\n list = [{\"op\": aerospike.OP_LIST_INCREMENT, \"index\": 2, \"val\": 20}]\n\n with pytest.raises(e.ParamError):\n self.as_connection.operate(key, list)", "def test_empty_list(self):\n argument = []\n with self.assertRaises(ValueError):\n find_an_even(argument)", "def is_list_of(seq, expected_type):\n return is_seq_of(seq, expected_type, seq_type=list)", "def is_list(self) -> bool:\n return False", "def __relational_restriction_incorrect_list_vs_number(self):\n strTestName = 'List lower than a number (incorrect)'\n RxCSObject = _RxCSobject()\n\n RxCSObject.paramAddMan('lParameter1', 'List parameter')\n RxCSObject.paramType('lParameter1', list)\n RxCSObject.paramL('lParameter1', 1)\n\n RxCSObject.lParameter1 = [3, 8, 9, 11, 3, 5, 7, 101]\n\n self.__parametersCheck_error(RxCSObject, RelationalError, strTestName)", "def listify(arg):\n if isinstance(arg, list):\n return arg\n else:\n return [arg]", "def test_add_lists():\n #Test adding integers\n assert [2, 2] == add_lists ([0, 1], [2, 1])\n \n #Test adding floats\n assert [1.5, 2.0] == add_lists([1.0, 1.5], [0.5, 0.5])\n \n #Test adding strings\n assert ['red apple', 'yellow banana'] == add_lists(['red ', 'yellow '], ['apple', 'banana'])\n \n #Test adding negative numbers\n assert [-4, -9] == add_lists([5, -4], [-9, -5])\n \n #Test the function returns a list\n assert isinstance(add_lists([0, 1], [2, 1]), list)", "def test_iterlist_op_1():\n\n @ops.iterlist_op\n def f(x):\n return [4, 5, 6]\n\n result = f(iter([1, 2, 3])) # Passing in an iterator, as expected\n\n assert(isinstance(result, list)), f\"{result}\"\n assert(result == [4, 5, 6])", "def test_columns_list_element_error(self):\n\n with pytest.raises(ValueError):\n\n BaseTransformer(columns=[[], \"a\"])", "def __NDim_restriction_incorrect_list_parameter(self):\n\n strTestName = 'The number of dimensions in a list lower or equal to a parameter (incorrect)'\n RxCSObject = _RxCSobject()\n\n # Firstly, let us define a reference parameter\n RxCSObject.paramAddMan('iRefParameter1', 'Int parameter')\n RxCSObject.paramType('iRefParameter1', int)\n\n # Now, let us define a list parameter\n RxCSObject.paramAddMan('parameter1', 'List parameter')\n RxCSObject.paramType('parameter1', list)\n RxCSObject.paramNDimLE('parameter1', 'iRefParameter1', mul=0.5, add=-1)\n\n RxCSObject.iRefParameter1 = 2\n RxCSObject.parameter1 = [4, 2, 11, -1, -4]\n\n self.__parametersCheck_error(RxCSObject, NDimError, strTestName)", "def is_list ( self, s ):\r\n\t\treturn isinstance ( s, type( list () ) )", "def is_listlike(x: Any) -> bool:\r\n return (isinstance(x, (list, tuple)))", "def _list4_validator(_: object, attrib: 'attrs.Attribute[List[Vec]]', value: object) -> None:\n if not isinstance(value, list):\n raise TypeError(attrib.name + ' should be a list!')\n if len(value) != 4:\n raise ValueError(attrib.name + ' must have 4 values!')", "def _check_args(self, args):\n if not isinstance(args, list) or not len(args) >= 2:\n raise FunctionArgumentException(\"Argument of attribute getter \"\n \"function '%s' must be a list of \"\n \"indeces; got: '%s'\" % (\n self.name,\n args\n ))\n\n if not is_homogeneous(args, (str, int)):\n raise FunctionArgumentException(\n \"'%s': argument must be a list of strings; got: '%s'\" %\n (self.name, args)\n )", "def test_fromlist(self):\n\n self.assertRaises(TypeError, self.hw, [])", "def validate(self, list_value):\n errors = DataDefinition.validate(self, list_value)\n if errors:\n return errors\n\n for index, value in enumerate(list_value):\n errors = self.element_type.validate(value)\n if errors:\n msg = message_factory.get_message(\n 'vapi.data.list.invalid.entry',\n str(value), index)\n return [msg] + errors\n\n return None", "def is_list_of_list(self) -> bool:\n return bool(AnnotationWrapper.list_of_list_re.match(self.data))", "def test_missing_generic_args(self):\n import System\n #TODO specify clearly which exception is appropriate here\n self.assertRaises(Exception, System.Collections.Generic.List)", "def test_pos_operate_with_list_operations_different_datatypes(self, list, result, bin, expected):\n key = (\"test\", \"demo\", \"list_key\")\n\n key, _, bins = self.as_connection.operate(key, list)\n\n assert bins == result\n\n key, _, bins = self.as_connection.get(key)\n\n assert bins[bin] == expected", "def __relational_restriction_correct_list_vs_list(self):\n strTestName = 'List higher than a list (correct)'\n RxCSObject = _RxCSobject()\n\n # Firstly, let us define a reference parameter\n RxCSObject.paramAddMan('lRefParameter1', 'List reference parameter')\n RxCSObject.paramType('lRefParameter1', list)\n\n # Now, let us define a list\n RxCSObject.paramAddMan('lParameter1', 'List parameter')\n RxCSObject.paramType('lParameter1', list)\n RxCSObject.paramH('lParameter1', 'lRefParameter1')\n\n RxCSObject.lRefParameter1 = [5, 10, 15, 20]\n RxCSObject.lParameter1 = [6, 11, 16, 21]\n\n self.__parametersCheck_error(RxCSObject, 'correct', strTestName)", "def _validate_nested_list_type(self, name, obj, nested_level, *args):\n if nested_level <= 1:\n self._validate_list_type(name, obj, *args)\n else:\n if obj is None:\n return\n if not isinstance(obj, list):\n raise TypeError(self.__class__.__name__ + '.' + name + ' contains value of type ' +\n type(obj).__name__ + ' where a list is expected')\n for sub_obj in obj:\n self._validate_nested_list_type(name, sub_obj, nested_level - 1, *args)", "def as_list(arg):\n if _is_list(arg):\n return arg\n return [arg]", "def test_raises_typeerror_if_rows_not_list(self):\n def result():\n return num_islands([{}, \"test\", 123])\n\n self.assertRaises(TypeError, result)", "def atomp(lst):\n return not isinstance(lst, list)", "def atomp(lst):\n return not isinstance(lst, list)", "def test_llist_get_element_negative(self):\n\n with pytest.raises(TypeError) as typeError:\n TestLList.llist_integer.get()", "def isList(self, item):\n\t retval = False\n\t if type(item) in (ListType, TupleType) :\n\t retval = True", "def listify(supposed_lst:object=None):\n\tif (supposed_lst is not None):\n\t\tif (not isinstance(supposed_lst, list)):\n\t\t\tsupposed_lst = [supposed_lst]\n\t\t# If it was already a list, check it for emptiness and `None`.\n\t\telif (isinstance(supposed_lst, list)):\n\t\t\tif (not supposed_lst):\n\t\t\t\traise ValueError(dedent(\n\t\t\t\t\tf\"Yikes - The list you provided contained `None` as an element.\" \\\n\t\t\t\t\tf\"{supposed_lst}\"\n\t\t\t\t))\n\t\t\tif (None in supposed_lst):\n\t\t\t\traise ValueError(dedent(\n\t\t\t\t\tf\"Yikes - The list you provided contained `None` as an element.\" \\\n\t\t\t\t\tf\"{supposed_lst}\"\n\t\t\t\t))\n\t# Allow `is None` to pass through because we need it to trigger null conditions.\n\treturn supposed_lst", "def __relational_restriction_incorrect_list_vs_parameter(self):\n strTestName = 'List lower than a parameter (incorrect)'\n RxCSObject = _RxCSobject()\n\n # Firstly, let us define a reference parameter\n RxCSObject.paramAddMan('iRefParameter1', 'Int parameter')\n RxCSObject.paramType('iRefParameter1', int)\n\n # Now, let us define a list\n RxCSObject.paramAddMan('lParameter1', 'List parameter')\n RxCSObject.paramType('lParameter1', list)\n RxCSObject.paramL('lParameter1', 'iRefParameter1', add=4)\n\n RxCSObject.iRefParameter1 = 0\n RxCSObject.lParameter1 = [3, 1, -9, 12, 2, 3, 0, -101]\n\n self.__parametersCheck_error(RxCSObject, RelationalError, strTestName)", "def test_pos_operate_with_list_get_range_val_out_of_bounds(self):\n key = (\"test\", \"demo\", \"list_key\")\n list = [{\"op\": aerospike.OP_LIST_GET_RANGE, \"bin\": \"int_bin\", \"index\": 2, \"val\": 9}]\n\n (key, meta, bins) = self.as_connection.operate(key, list)\n\n assert bins == {\"int_bin\": [3, 4]}", "def enterList(CustomMessage=\"Please enter a list: \", CustomErrorMessage=\"The input is not a valid list, please try again...\", ExplicitType=type):\r\n \r\n isList = False\r\n while isList == False:\r\n try:\r\n ls = []\r\n # we map 'ls' into 'ldict' to be used for exec()\r\n ldict = locals()\r\n\r\n print(CustomMessage)\r\n if ExplicitType != type:\r\n print(\" Please note that every element inside the list needs to be of type '\", TypeclassToString(ExplicitType), \"'\", sep=\"\")\r\n\r\n # Using exec(), we can convert the user's string input to a list.\r\n # Note the use of ldict as an argument\r\n # Remark: This method is potentially dangerous, as any code can be executed with the proper syntax. Consider limiting the length of the input.\r\n exec(\"ls = list(\"+input()+\")\", globals(), ldict)\r\n # Value assignments inside exec() will map onto ldict, which we need to transfer back to the original variables (ls)\r\n ls = ldict[\"ls\"]\r\n\r\n isList = True\r\n \r\n if ExplicitType == type:\r\n break\r\n\r\n # Convert to specified ExplicitType. A conversion error means that the user input is invalid.\r\n for i in range(0, len(ls)):\r\n ldict = locals()\r\n\r\n # Type constructors' names for built-in types are the same as the type's name.\r\n # e.g. constructor for type int is int())\r\n exec(\"ls[i] = \" + TypeclassToString(ExplicitType) + \"(ls[i])\", globals(), ldict)\r\n # We transfer 'ls's value from ldict back to the original list.\r\n ls = ldict[\"ls\"]\r\n except Exception:\r\n # If isList == True, but an exception is neverthless thrown, this means that an item inside the list does not have the correct type.\r\n # This is why we need to reset isList to false every time an exception is thrown.\r\n isList = False\r\n print(CustomErrorMessage)\r\n return ls", "def list_func(lst: List[valueType]) -> List[valueType]:\n tmp = [] # type: List[valueType]\n for e in lst:\n if isinstance(e, (list, set, tuple)):\n tmp.append(list_func(list(e)))\n else:\n if isinstance(e, (float, int)):\n tmp.append(func(e))\n else:\n raise Exception\n return tmp", "def _validate_command(self):\n if not isinstance(self.command, list):\n raise securesystemslib.exceptions.FormatError(\n \"Invalid Link: field `command` must be of type list, got: {}\"\n .format(type(self.command)))", "def test_likelihoods_unequal_list_lengths(self):\r\n self.assertRaises(ValueError, likelihoods, [1, 2], [1])", "def test_badxvaluewithlist(self):\n Rectangle.reset_objects()\n with self.assertRaises(TypeError) as e:\n r1 = Square(1, [1, 2], 2, 3)\n self.assertEqual(str(e.exception), 'x must be an integer')", "def validate_X(X: List[str]):\n _check_string_list(X)", "def is_list_like(value):\n if is_iterable(value) and not isinstance(value, six.string_types):\n return True\n\n else:\n return False", "def __size_restriction_incorrect_list_list(self):\n\n strTestName = 'List size higher or equal to the size of other list (incorrect)'\n RxCSObject = _RxCSobject()\n\n # Firstly, let us define a reference parameter\n RxCSObject.paramAddMan('lRefParameter1', 'Ref. parameter')\n RxCSObject.paramType('lRefParameter1', list)\n\n # Now, let us define a list\n RxCSObject.paramAddMan('parameter1', 'List 1D parameter')\n RxCSObject.paramType('parameter1', list)\n RxCSObject.paramSizHE('parameter1', 'lRefParameter1', mul=0.5)\n\n RxCSObject.lRefParameter1 = [21, 22, 23, 24, 25, 26]\n RxCSObject.parameter1 = [11, 12]\n\n self.__parametersCheck_error(RxCSObject, SizeError, strTestName)", "def validate_list(types,val,allowed,tname):\n if not len(types): return TYPE_MISMATCH\n if type(val) not in TYPES[tname]: raise Exception('unknown type')\n for v in val:\n result=VALIDATORS[types[-1]](types[:-1],v,allowed,types[-1])\n if not result: return result\n return True", "def process_list_arg(arg):\n if isinstance(arg, list):\n return arg\n elif isinstance(arg, basestring):\n args = []\n for part in arg.split(\",\"):\n args.append(part.strip())\n return args", "def _convert_to_list(self, input_argument):\n if type(input_argument) is not list:\n input_argument = [input_argument]\n return input_argument", "def isList(x):\n \n return ( type(x) == list ) # True if the type of x is a list", "def test_pos_operate_with_list_trim_val_with_negative_value(self):\n key = (\"test\", \"demo\", \"list_key\")\n list = [{\"op\": aerospike.OP_LIST_TRIM, \"bin\": \"int_bin\", \"index\": 1, \"val\": -9}]\n\n (key, meta, bins) = self.as_connection.operate(key, list)\n\n (key, meta, bins) = self.as_connection.get(key)\n\n assert bins[\"int_bin\"] == [2, 3, 4]", "def _validate_node(self, node):\n if not isinstance(node, self._Node):\n raise TypeError('Invalid object type!')\n if node._container != self:\n raise ValueError('Node does not belong to this list!')\n if node._index < 0 or node._index >= self._size:\n raise ValueError('Invalid node!')", "def test_return_negative_numbers_from_lst():\n assert return_negative_numbers_from_lst([-1, 0, 1, -23, 4]) == [-1, -23]\n assert return_negative_numbers_from_lst([0]) == []\n assert return_negative_numbers_from_lst([2, 3, 17]) == []\n assert return_negative_numbers_from_lst([-2, -3, -17]) == [-2, -3, -17]", "def _is_list(self):\n # TODO\n if self.is_int():\n return self.int() == 0\n else:\n return self.size_words() == 2 and self.tag() == 0 and self.field(1)._is_list()", "def test_badyvaluewithlist(self):\n Rectangle.reset_objects()\n with self.assertRaises(TypeError) as e:\n r1 = Square(1, 2, [1, 2], 3)\n self.assertEqual(str(e.exception), 'y must be an integer')", "def checkInput(Matrix,List):\r\n \r\n if type(Matrix) != list or type(List) != list:\r\n \r\n raise RuntimeError('malformed')\r\n for k in Matrix:\r\n if type(k) != list:\r\n \r\n raise RuntimeError('malformed')\r\n if len(k) != len(List):\r\n \r\n raise RuntimeError('malformed')\r\n for j in k:\r\n if type(j) != int and type(j) != float:\r\n \r\n raise RuntimeError('malformed')\r\n if j > 30:\r\n \r\n raise RuntimeError('malformed')\r\n for p in List:\r\n if type(p) != str:\r\n \r\n raise RuntimeError('malformed')\r\n\r\n if len(Matrix) != len(List):\r\n \r\n raise RuntimeError('malformed')\r\n return", "def __NDim_restriction_incorrect_list_number(self):\n\n strTestName = 'The number of dimensions in a list higher than a number (incorrect)'\n RxCSObject = _RxCSobject()\n\n RxCSObject.paramAddMan('parameter1', 'List parameter')\n RxCSObject.paramType('parameter1', list)\n RxCSObject.paramNDimH('parameter1', 1)\n\n RxCSObject.parameter1 = [4, 2, 11, -1, -4]\n\n self.__parametersCheck_error(RxCSObject, NDimError, strTestName)", "def testSetWithListFails(self):\n def setSat():\n self.node.sat = [-1.1]\n\n self.assertRaises(\n TypeError,\n setSat\n )", "def _list_like(self, value):\n return (not hasattr(value, \"strip\") and\n (hasattr(value, \"__getitem__\") or\n hasattr(value, \"__iter__\")))\n # return is_sequence(value) # use from pandas.core.common import is_sequence", "def test_validate_bad_data(self, value):\n opt = scheme.ListOption('test-opt')\n with pytest.raises(errors.SchemeValidationError):\n opt.validate('foo', value)" ]
[ "0.6896146", "0.6843723", "0.6687192", "0.66674805", "0.6584861", "0.650589", "0.64047855", "0.6385669", "0.6318132", "0.62996775", "0.6293289", "0.62533414", "0.6251247", "0.62425464", "0.62239265", "0.62199765", "0.6166311", "0.61608076", "0.6119108", "0.6056942", "0.60092473", "0.5972202", "0.5963308", "0.59515876", "0.5946925", "0.5931063", "0.59263426", "0.5862697", "0.58481324", "0.58423495", "0.58380336", "0.5837136", "0.5804837", "0.5804072", "0.58019114", "0.57808", "0.5779393", "0.57477206", "0.57192737", "0.570066", "0.56972235", "0.56706864", "0.5669838", "0.56686944", "0.5667576", "0.56241775", "0.5621871", "0.5620142", "0.5615865", "0.55993634", "0.5591057", "0.5590167", "0.558915", "0.5554184", "0.55498016", "0.5549214", "0.5547118", "0.5527633", "0.5524695", "0.5519854", "0.5492884", "0.54522914", "0.54521567", "0.5440568", "0.5440013", "0.54382807", "0.54272103", "0.5422695", "0.54136264", "0.5412571", "0.5390738", "0.53856295", "0.53856295", "0.5384872", "0.53828055", "0.53710616", "0.5367866", "0.53638715", "0.5363116", "0.53620327", "0.53607506", "0.5351066", "0.53460896", "0.5345879", "0.53401375", "0.533765", "0.5336313", "0.5323878", "0.53223836", "0.5320299", "0.53184503", "0.5303804", "0.529866", "0.5290657", "0.5282094", "0.52778625", "0.52736914", "0.52649844", "0.52619034", "0.52568114" ]
0.71428686
0
If a listlist_op returns something besides a list as output, raise a ValueError.
def test_listlist_op_3(): @ops.listlist_op def f(x): return iter([4, 5, 6]) # Returning an iterator instead of an list with pytest.raises(ValueError): result = f([1, 2, 3])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_listlist_op_2():\n\n @ops.listlist_op\n def f(x):\n return [4, 5, 6]\n\n with pytest.raises(ValueError):\n f(iter([1, 2, 3])) # Passing in an iterator instead of an list", "def test_listlist_op_1():\n\n @ops.listlist_op\n def f(x):\n return [4, 5, 6]\n\n result = f([1, 2, 3]) # Passing in a list, as expected\n\n assert(isinstance(result, list)), f\"{result}\"\n assert(result == [4, 5, 6])", "def is_listing(op):\n return isinstance(op, (list, tuple))", "def test_listiter_op_3():\n\n @ops.listiter_op\n def f(x):\n return [4, 5, 6] # Returning a list instead of an iterator\n\n with pytest.raises(ValueError):\n result = f([1, 2, 3])", "def ensure_list(self, x):\n return x if isinstance(x, list) else [x]", "def _is_list(val):\n\n return isinstance(val, list)", "def test_iterlist_op_2():\n\n @ops.iterlist_op\n def f(x):\n return [4, 5, 6]\n\n with pytest.raises(ValueError):\n f([1, 2, 3]) # Passing in a list instead of an iterator", "def is_list(value):\n return isinstance(value, list) or None", "def test_iterlist_op_3():\n\n @ops.iterlist_op\n def f(x):\n return iter([4, 5, 6]) # Returning an iterator instead of a list\n\n with pytest.raises(ValueError):\n result = f(iter([1, 2, 3]))", "def _validate_internal_value_list(self, value: Any):\n # Make sure the value is a list\n if not isinstance(value, list):\n raise TypeError(f\"Value must be a list\")\n\n # Make sure the list has the correct number of values in it\n length = len(value)\n if isinstance(self._nargs, int):\n if self._action == \"store\":\n if length != self._nargs:\n raise ValueError(f\"Requires list of length {self._nargs}\")\n else:\n if length == 0 or length % self._nargs != 0:\n raise ValueError(f\"Requires list of length a multiple of {self._nargs}\")\n elif self._nargs == \"+\":\n if length == 0:\n raise ValueError(f\"Requires at least one item\")\n\n if any(not isinstance(element, self._type) for element in value):\n raise TypeError(f\"All values must be of the declared type {self._type.__name__}\")", "def _is_list(item):\n return isinstance(item, list)", "def test_listiter_op_2():\n\n @ops.listiter_op\n def f(x):\n return iter([4, 5, 6])\n\n with pytest.raises(ValueError):\n f(iter([1, 2, 3])) # Passing in an iterator instead of a list", "def is_list(value):\n return isinstance(value, list)", "def handle_list(list_name, list, args):\n if not args:\n return list\n else:\n len(args) == 1 or syntax_error(\"Wrong number of args for list expression.\")\n try:\n return list[int(args[0])]\n except ValueError:\n syntax_error(\"Invald index value: '%s'\" % args[0])\n except IndexError:\n syntax_error(\"Index out of range in '%s': %d\" % (list_name, index))", "def check_list(self,list_input):\n if not isinstance(list_input,list):\n raise ValueError('input is not in list type')\n for i in list_input:\n if isinstance(i,list) and len(i) != 0:\n for j in i:\n if not isinstance(j,(float,int)):\n print(j)\n raise ValueError('cannot convert')\n else:\n print(i)\n raise ValueError('wrong defined')", "def test_neg_operate_list_invalid_requests(self, list):\n key = (\"test\", \"demo\", \"list_key\")\n try:\n key, _, _ = self.as_connection.operate(key, list)\n except e.OpNotApplicable as exception:\n assert exception.code == 26", "def list_typecheck(val, name, msg):\n if type(val) != Pair and val != Nil():\n raise SnekEvaluationError(name + \" error: \" + msg)", "def _is_list(arg):\n return isinstance(arg, collections.Sequence) and not _is_string(arg)", "def test_list_increment_with_incorrect_value_type(self):\n key = (\"test\", \"demo\", \"list_key\")\n list = [{\"op\": aerospike.OP_LIST_INCREMENT, \"index\": 2, \"bin\": \"int_bin\", \"val\": \"twenty\"}]\n\n with pytest.raises(e.AerospikeError):\n self.as_connection.operate(key, list)", "def visit_List(self, node):\n self.generic_visit(node)\n if isinstance(node.ctx, ast.Load):\n return to_call(to_attribute(self.operator, '__list__'), node.elts)\n return node", "def aslist(something):\n return something if isinstance(something, list) else [something]", "def test_iterlist_op_1():\n\n @ops.iterlist_op\n def f(x):\n return [4, 5, 6]\n\n result = f(iter([1, 2, 3])) # Passing in an iterator, as expected\n\n assert(isinstance(result, list)), f\"{result}\"\n assert(result == [4, 5, 6])", "def list_check(*args, func=None):\n func = func or inspect.stack()[2][3]\n for var in args:\n if not isinstance(var, (list, collections.UserList, collections.abc.MutableSequence)):\n name = type(var).__name__\n raise ListError(\n 'Function {} expected list, {} got instead.'.format(func, name))", "def test_list_type(self):\n\n expected = TypeError\n input_ = []\n with self.assertRaises(expected):\n math.factorial(input_)", "def listify(value):\n if isinstance(value, list):\n return value\n else:\n return [value]", "def is_list(self) -> bool:\n return False", "def isList(memoryManager, paramsList):\n if isEmptyList(paramsList):\n return [1.0]\n A = paramsList[0]\n if validateList(A):\n return [0.0] if len(A) <= 1 else [1.0]\n return [0.0]", "def test_creation_list():\n with pytest.raises(ValueError) as __:\n value = list()\n __ = param.Integer(value=value)", "def listify(arg):\n if isinstance(arg, list):\n return arg\n else:\n return [arg]", "def _MakeList(input):\n if len(input) == 0:\n raise ValueError(\n 'input cannot be empty.')\n elif len(input) == 1:\n output = input[0]\n if not isinstance(output, list):\n output = [output]\n else:\n output = list(input)\n return output", "def safelist(listable):\n if type(listable) == str:\n return [listable]\n else:\n return listable.tolist()", "def controlled_list(input_list):\n output_list = input_list\n\n if not isinstance(input_list, list):\n\n dummy_list = []\n dummy_list.append(input_list)\n output_list = dummy_list\n print('Converting')\n print('Before return')\n print(output_list)\n return output_list", "def _validate_list_type(self, name, obj, *args):\n if obj is None:\n return\n if isinstance(obj, list):\n for i in obj:\n self._validate_type_not_null(name, i, *args)\n else:\n self._validate_type(name, obj, *args)", "def as_list(arg):\n if _is_list(arg):\n return arg\n return [arg]", "def _builtin_consult_as_list(op1, op2, **kwdargs):\n # TODO make non-recursive\n check_mode((op1, op2), ['*L'], functor='consult', **kwdargs)\n _builtin_consult(op1, **kwdargs)\n if _is_list_nonempty(op2):\n _builtin_consult_as_list(op2.args[0], op2.args[1], **kwdargs)\n return True", "def _to_list( self, input ):\n import numpy\n listtypes = (list, tuple, numpy.ndarray)\n if input == None:\n return None\n elif type(input) in listtypes:\n return list(input)\n else:\n return [input]", "def ensure_list(iterable: Iterable[A]) -> List[A]:\n if isinstance(iterable, list):\n return iterable\n else:\n return list(iterable)", "def ensure_list(iterable: Iterable[A]) -> List[A]:\n if isinstance(iterable, list):\n return iterable\n else:\n return list(iterable)", "def _as_list(value):\n if not isinstance(value, list):\n value = [value]\n return value", "def test_llist_no_parameter_negative(self):\n\n with pytest.raises(TypeError) as typeError:\n TestLList.llist_integer.add()\n\n assert \"Required argument 'value' (pos 1) not found\" in typeError.value", "def isList(data):\n\ttry:\n\t\tfrom types import ListType\n\t\tif type(data) == ListType:\n\t\t\treturn True\n\texcept ImportError:\n\t\tif type(data) == type([]):\n\t\t\treturn True\n\treturn False", "def is_list(s_list):\n return isa(s_list, List)", "def _convert_to_list(self, input_argument):\n if type(input_argument) is not list:\n input_argument = [input_argument]\n return input_argument", "def list_func(lst: List[valueType]) -> List[valueType]:\n tmp = [] # type: List[valueType]\n for e in lst:\n if isinstance(e, (list, set, tuple)):\n tmp.append(list_func(list(e)))\n else:\n if isinstance(e, (float, int)):\n tmp.append(func(e))\n else:\n raise Exception\n return tmp", "def test_raises_typeerror_if_arg_not_list(self):\n def result():\n return num_islands({})\n\n self.assertRaises(TypeError, result)", "def test_neg_operate_append_items_not_a_list(self):\n key = (\"test\", \"demo\", \"list_key\")\n\n list = [\n {\"op\": aerospike.OP_LIST_APPEND_ITEMS, \"bin\": \"int_bin\", \"val\": 7},\n ]\n\n try:\n key, _, bins = self.as_connection.operate(key, list)\n except e.ParamError as exception:\n assert exception.code == -2", "def isList(l):\r\n return hasattr(l, '__iter__') \\\r\n or (type(l) in (types.ListType, types.TupleType))", "def value_error(var, _list):\n\n #if not any(r):\n if len(_list) == 2:\n divisor = \" or \"\n elif len(_list) > 2:\n divisor = \", \"\n\n print(_list)\n print(len(_list))\n raise ValueError(\"'{var_name}' must be {type}, received '{var_type}'\"\n .format(var_name=RaiseIfNot._get_name(var),\n type=divisor.join(map(\n lambda x: \"'\" + x + \"'\",\n _list)), var_type=var))", "def is_list ( self, s ):\r\n\t\treturn isinstance ( s, type( list () ) )", "def is_list_of(seq, expected_type):\n return is_seq_of(seq, expected_type, seq_type=list)", "def test_raises_typeerror_if_rows_not_list(self):\n def result():\n return num_islands([{}, \"test\", 123])\n\n self.assertRaises(TypeError, result)", "def _list_like(self, value):\n return (not hasattr(value, \"strip\") and\n (hasattr(value, \"__getitem__\") or\n hasattr(value, \"__iter__\")))\n # return is_sequence(value) # use from pandas.core.common import is_sequence", "def atomp(lst):\n return not isinstance(lst, list)", "def atomp(lst):\n return not isinstance(lst, list)", "def convert_old_style_list(list_):\n if not isinstance(list_, (tuple, list)) or len(list_) != 2:\n return list_, False\n first_item, second_item = list_\n if second_item == []:\n return [first_item], True\n try:\n # see if second item is iterable\n iter(second_item)\n except TypeError:\n return list_, False\n old_style_list = True\n new_second_item = []\n for sublist in second_item:\n item, old_style_list = convert_old_style_list(sublist)\n if not old_style_list:\n break\n new_second_item.extend(item)\n if old_style_list:\n second_item = new_second_item\n return [first_item, second_item], old_style_list", "def test_neg_operate_list_operation_bin_notlist(self):\n key = (\"test\", \"demo\", 1)\n list = [{\"op\": aerospike.OP_LIST_INSERT, \"bin\": \"age\", \"index\": 2, \"val\": 9}]\n\n try:\n (key, _, _) = self.as_connection.operate(key, list)\n\n except e.BinIncompatibleType as exception:\n assert exception.code == 12", "def test_listiter_op_1():\n\n @ops.listiter_op\n def f(x):\n return iter([4, 5, 6])\n\n result = f([1, 2, 3]) # Passing in a list, as expected\n\n assert(isinstance(result, collections.abc.Iterator)), f\"{result}\"\n assert(list(result) == [4, 5, 6])", "def test_if_it_accepts_lists(self):\n with self.assertRaises(TypeError):\n prime_numbers([])", "def test_empty_list_error(self):\n with self.assertRaises(ValueError):\n function_inclusion_filter_builder([])", "def __allowed_values_incorrect_list(self):\n strTestName = 'Values of a list (incorrect)'\n RxCSObject = _RxCSobject()\n\n RxCSObject.paramAddMan('parameter1', 'tuple')\n RxCSObject.paramAddMan('parameter2', 'list')\n\n RxCSObject.paramAllowed('parameter2', ('Allowed string #1', 'Allowed string #2', 3, 4, 11))\n RxCSObject.parameter1 = (1, 3, 4)\n RxCSObject.parameter2 = [11, 3, 'Allowed string #1', 'Allowed string #11']\n\n self.__parametersCheck_error(RxCSObject, AllowedValuesError, strTestName)", "def to_list(x):\n if isinstance(x, list):\n return x\n return [x]", "def ensure_list(obj, allow_tuple=True):\n if isinstance(obj, list):\n return obj\n\n elif allow_tuple and isinstance(obj, tuple):\n return obj\n elif not allow_tuple and isinstance(obj, tuple):\n return list(obj)\n else:\n return [obj]", "def validate(self, list_value):\n errors = DataDefinition.validate(self, list_value)\n if errors:\n return errors\n\n for index, value in enumerate(list_value):\n errors = self.element_type.validate(value)\n if errors:\n msg = message_factory.get_message(\n 'vapi.data.list.invalid.entry',\n str(value), index)\n return [msg] + errors\n\n return None", "def test_if_it_accepts_lists(self):\n with self.assertRaises(TypeError):\n prime_numbers(56.58)", "def _is_list(self):\n # TODO\n if self.is_int():\n return self.int() == 0\n else:\n return self.size_words() == 2 and self.tag() == 0 and self.field(1)._is_list()", "def isList(x):\n \n return ( type(x) == list ) # True if the type of x is a list", "def listify(supposed_lst:object=None):\n\tif (supposed_lst is not None):\n\t\tif (not isinstance(supposed_lst, list)):\n\t\t\tsupposed_lst = [supposed_lst]\n\t\t# If it was already a list, check it for emptiness and `None`.\n\t\telif (isinstance(supposed_lst, list)):\n\t\t\tif (not supposed_lst):\n\t\t\t\traise ValueError(dedent(\n\t\t\t\t\tf\"Yikes - The list you provided contained `None` as an element.\" \\\n\t\t\t\t\tf\"{supposed_lst}\"\n\t\t\t\t))\n\t\t\tif (None in supposed_lst):\n\t\t\t\traise ValueError(dedent(\n\t\t\t\t\tf\"Yikes - The list you provided contained `None` as an element.\" \\\n\t\t\t\t\tf\"{supposed_lst}\"\n\t\t\t\t))\n\t# Allow `is None` to pass through because we need it to trigger null conditions.\n\treturn supposed_lst", "def is_listlike(x: Any) -> bool:\r\n return (isinstance(x, (list, tuple)))", "def _require_listlike(level, arr, arrname: str):\n if level is not None and not is_list_like(level):\n if not is_list_like(arr):\n raise TypeError(f\"{arrname} must be list-like\")\n if len(arr) > 0 and is_list_like(arr[0]):\n raise TypeError(f\"{arrname} must be list-like\")\n level = [level]\n arr = [arr]\n elif level is None or is_list_like(level):\n if not is_list_like(arr) or not is_list_like(arr[0]):\n raise TypeError(f\"{arrname} must be list of lists-like\")\n return level, arr", "def _check_multilabel_list(obj: Any):\n if not isinstance(obj, list):\n raise TypeError(f\"obj must be a list, got '{type(obj)}'\")\n\n if len(obj) > 0:\n if not isinstance(obj[0], list):\n raise TypeError(f\"obj must contain lists, got '{type(obj[0])}'\")\n\n if len(obj[0]) > 0:\n if not isinstance(obj[0][0], str):\n raise TypeError(\n f\"obj must contain lists of strings, got lists of '{type(obj[0][0])}'\"\n )", "def is_list_of_list(self) -> bool:\n return bool(AnnotationWrapper.list_of_list_re.match(self.data))", "def check_for_list(check):", "def makelist(input):\n if isinstance(input, list) or isinstance(input, np.array):\n output = input\n else:\n output = [input]\n return output", "def ensure_list(value: Any) -> List[Any]:\n\n if isinstance(value, (Mapping, str)): # do not unpack dictionaries\n return [value]\n elif isinstance(value, Iterable):\n return list(value)\n else:\n return [value]", "def is_list_like(value):\n if is_iterable(value) and not isinstance(value, six.string_types):\n return True\n\n else:\n return False", "def test_wrong_input_on_creation(self):\r\n\r\n self.assertRaises(TypeError, TypedListType, None)", "def _to_list(value: Union[Dict[str, Any], List, Tuple, int], name=None, list_length=None):\n if not isinstance(value, (list, tuple)):\n if list_length is not None:\n value = [value] * list_length\n else:\n value = [value]\n if list_length is not None and len(value) != list_length:\n name = '' if name is None else name\n raise ValueError(\"hparams '%s' must be a list of length %d\" % (name, list_length))\n return value", "def tolist(x):\n return x if isinstance(x, list) else [x]", "def target_list_option(s):\n return _convert(s, (list, tuple))", "def test_add_lists():\n #Test adding integers\n assert [2, 2] == add_lists ([0, 1], [2, 1])\n \n #Test adding floats\n assert [1.5, 2.0] == add_lists([1.0, 1.5], [0.5, 0.5])\n \n #Test adding strings\n assert ['red apple', 'yellow banana'] == add_lists(['red ', 'yellow '], ['apple', 'banana'])\n \n #Test adding negative numbers\n assert [-4, -9] == add_lists([5, -4], [-9, -5])\n \n #Test the function returns a list\n assert isinstance(add_lists([0, 1], [2, 1]), list)", "def test_columns_list_element_error(self):\n\n with pytest.raises(ValueError):\n\n BaseTransformer(columns=[[], \"a\"])", "def test_list_increment_with_missing_value(self):\n key = (\"test\", \"demo\", \"list_key\")\n list = [\n {\n \"op\": aerospike.OP_LIST_INCREMENT,\n \"bin\": \"int_bin\",\n \"index\": 2,\n }\n ]\n\n with pytest.raises(e.ParamError):\n self.as_connection.operate(key, list)", "def _list4_validator(_: object, attrib: 'attrs.Attribute[List[Vec]]', value: object) -> None:\n if not isinstance(value, list):\n raise TypeError(attrib.name + ' should be a list!')\n if len(value) != 4:\n raise ValueError(attrib.name + ' must have 4 values!')", "def test_pos_operate_with_list_operations_different_datatypes(self, list, result, bin, expected):\n key = (\"test\", \"demo\", \"list_key\")\n\n key, _, bins = self.as_connection.operate(key, list)\n\n assert bins == result\n\n key, _, bins = self.as_connection.get(key)\n\n assert bins[bin] == expected", "def convert_to_list(item):\n return item if item is None or isinstance(item, list) else [item]", "def test_return_negative_numbers_from_lst():\n assert return_negative_numbers_from_lst([-1, 0, 1, -23, 4]) == [-1, -23]\n assert return_negative_numbers_from_lst([0]) == []\n assert return_negative_numbers_from_lst([2, 3, 17]) == []\n assert return_negative_numbers_from_lst([-2, -3, -17]) == [-2, -3, -17]", "def is_list(obj):\n return type(obj) is list", "def test_llist_get_element_negative(self):\n\n with pytest.raises(TypeError) as typeError:\n TestLList.llist_integer.get()", "def _coerce_list(self, thing):\n\t\tif isinstance(thing, basestring):\n\t\t\treturn list((thing,))\n\n\t\treturn list(thing)", "def test_iteriter_op_3():\n\n @ops.iteriter_op\n def f(x):\n return [4, 5, 6] # Returning a list instead of an iterator\n\n with pytest.raises(ValueError):\n result = f(iter([1, 2, 3]))", "def test_empty_list(self):\n argument = []\n with self.assertRaises(ValueError):\n find_an_even(argument)", "def _data_validation(data):\n if isinstance(data, (list, tuple, type(None))) is not True:\n raise ValueError(f\"data must be tuple, list, or None, \"\n f\"data type is '{type(data).__name__}'. \"\n f\"Iterable data cannot be empty.\")", "def test_fromlist(self):\n\n self.assertRaises(TypeError, self.hw, [])", "def ensure_list(thing):\r\n\r\n if isinstance(thing, str_types):\r\n return [thing]\r\n return thing", "def test_list_increment_with_missing_index(self):\n key = (\"test\", \"demo\", \"list_key\")\n list = [{\"op\": aerospike.OP_LIST_INCREMENT, \"bin\": \"int_bin\", \"val\": 20}]\n\n with pytest.raises(e.ParamError):\n self.as_connection.operate(key, list)", "def test_list_format(self) -> None:\n r = self.perform_request('list', False)\n self.assert_json_schema(r.json(), self.get_list_schema())", "def test_graph_with_list_fail():\n try:\n node_list = [\"slippery list\"]\n node_list.append(Node({'A':['B','C']}))\n node_list.append(Node({'B':['C','D']}))\n node_list.append(Node({'C':['D']}))\n node_list.append(Node({'D':['C']}))\n g = Graph(node_list)\n except Exception as e:\n assert str(e) == 'invalid node provided!'", "def test_validate_positive_integer_list():\n with pytest.raises(ValueError):\n validate_positive_integer_list(0.5, 1)\n\n with pytest.raises(ValueError):\n validate_positive_integer_list([0.5, 0, 5], 2)\n\n with pytest.raises(ValueError):\n validate_positive_integer_list([1], 2)\n\n with pytest.raises(ValueError):\n validate_positive_integer_list(0, 2)\n\n with pytest.raises(ValueError):\n validate_positive_integer_list(-1, 2)\n\n assert validate_positive_integer_list(1, 2) == [1, 1]", "def is_list(self) -> bool:\n if self.is_list_of_list: # pylint: disable=R1705\n return False\n else:\n return bool(AnnotationWrapper.list_field_re.match(self.data))", "def isList(self, item):\n\t retval = False\n\t if type(item) in (ListType, TupleType) :\n\t retval = True" ]
[ "0.73349303", "0.71511084", "0.6736014", "0.66537863", "0.6652018", "0.6627136", "0.65927994", "0.6493474", "0.6485327", "0.6485104", "0.64288867", "0.6416885", "0.6406958", "0.63908684", "0.63792086", "0.6378879", "0.63132125", "0.6302996", "0.6289423", "0.62221825", "0.6194371", "0.617668", "0.61411506", "0.60991126", "0.60606086", "0.6052341", "0.6048131", "0.60342896", "0.60317266", "0.6009316", "0.60050154", "0.6001704", "0.5999022", "0.5990069", "0.5988323", "0.5981723", "0.59626913", "0.59626913", "0.5959347", "0.5958653", "0.592055", "0.5919887", "0.5913274", "0.59049803", "0.58843", "0.58704513", "0.58606875", "0.58564556", "0.58508915", "0.5850245", "0.5840581", "0.5827812", "0.58197534", "0.58197534", "0.5809191", "0.5808668", "0.58008164", "0.5799061", "0.57898545", "0.5785462", "0.57587665", "0.57582", "0.5743373", "0.5739756", "0.5735244", "0.5734761", "0.57339257", "0.56933665", "0.5670991", "0.56593955", "0.5651986", "0.5650658", "0.5649251", "0.56473", "0.56363165", "0.5633368", "0.5623076", "0.5619855", "0.5614955", "0.56129026", "0.56058097", "0.5599653", "0.5589116", "0.5586633", "0.5585945", "0.55708116", "0.5568165", "0.55624783", "0.5558112", "0.55519766", "0.55503714", "0.55459857", "0.5539198", "0.55306697", "0.55220073", "0.55175215", "0.55154526", "0.5513971", "0.55131394", "0.5502443" ]
0.72641724
1
If a listiter_op is given a list as input, no exception should be thrown, and we should return the wrapped function's output.
def test_listiter_op_1(): @ops.listiter_op def f(x): return iter([4, 5, 6]) result = f([1, 2, 3]) # Passing in a list, as expected assert(isinstance(result, collections.abc.Iterator)), f"{result}" assert(list(result) == [4, 5, 6])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_iterlist_op_1():\n\n @ops.iterlist_op\n def f(x):\n return [4, 5, 6]\n\n result = f(iter([1, 2, 3])) # Passing in an iterator, as expected\n\n assert(isinstance(result, list)), f\"{result}\"\n assert(result == [4, 5, 6])", "def test_listiter_op_3():\n\n @ops.listiter_op\n def f(x):\n return [4, 5, 6] # Returning a list instead of an iterator\n\n with pytest.raises(ValueError):\n result = f([1, 2, 3])", "def test_listlist_op_3():\n\n @ops.listlist_op\n def f(x):\n return iter([4, 5, 6]) # Returning an iterator instead of an list\n\n with pytest.raises(ValueError):\n result = f([1, 2, 3])", "def test_listlist_op_2():\n\n @ops.listlist_op\n def f(x):\n return [4, 5, 6]\n\n with pytest.raises(ValueError):\n f(iter([1, 2, 3])) # Passing in an iterator instead of an list", "def test_listiter_op_2():\n\n @ops.listiter_op\n def f(x):\n return iter([4, 5, 6])\n\n with pytest.raises(ValueError):\n f(iter([1, 2, 3])) # Passing in an iterator instead of a list", "def test_iterlist_op_3():\n\n @ops.iterlist_op\n def f(x):\n return iter([4, 5, 6]) # Returning an iterator instead of a list\n\n with pytest.raises(ValueError):\n result = f(iter([1, 2, 3]))", "def test_iterlist_op_2():\n\n @ops.iterlist_op\n def f(x):\n return [4, 5, 6]\n\n with pytest.raises(ValueError):\n f([1, 2, 3]) # Passing in a list instead of an iterator", "def test_listlist_op_1():\n\n @ops.listlist_op\n def f(x):\n return [4, 5, 6]\n\n result = f([1, 2, 3]) # Passing in a list, as expected\n\n assert(isinstance(result, list)), f\"{result}\"\n assert(result == [4, 5, 6])", "def test_iteriter_op_3():\n\n @ops.iteriter_op\n def f(x):\n return [4, 5, 6] # Returning a list instead of an iterator\n\n with pytest.raises(ValueError):\n result = f(iter([1, 2, 3]))", "def process_list(_func, iterator, *args, **kwargs):\n return [_func(i, *args, **kwargs) for i in iterator]", "def test_iteriter_op_1():\n\n @ops.iteriter_op\n def f(x):\n return iter([4, 5, 6])\n\n result = f(iter([1, 2, 3])) # Passing in an iterator, as expected\n\n assert(isinstance(result, collections.abc.Iterator)), f\"{result}\"\n assert(list(result) == [4, 5, 6])", "def test_iteriter_op_2():\n\n @ops.iteriter_op\n def f(x):\n return iter([4, 5, 6])\n\n with pytest.raises(ValueError):\n f([1, 2, 3]) # Passing in a list instead of an iterator", "def chek_iter_obj(func):\n\n def wrapper(self, lst):\n try:\n iter(lst)\n return func(self, lst)\n except TypeError:\n raise TypeError(f\"{lst} isn't iterable object\")\n\n return wrapper", "def Listor(fun):\n @functools.wraps(fun)\n def inside(*args, **kwargs):\n return list(fun(*args, **kwargs))\n return inside", "def list_generalizer(f):\n @functools.wraps(f)\n def wrapped(data, *args, **kwargs):\n if type(data) == list:\n return [f(d, *args, **kwargs) for d in data]\n else:\n return f(data, *args, **kwargs)\n\n return wrapped", "def builtin_iterable(func):\n if sys.version_info[:1] < (3,):\n @wraps(func)\n def inner(*args, **kwargs):\n return list(func(*args, **kwargs))\n return inner\n return func", "def to_list(f):\n @functools.wraps(f)\n def wrapper(*args, **kwargs):\n return list(f(*args, **kwargs))\n return wrapper", "def listify(fn=None, wrapper=list):\n\n def listify_return(fn):\n @functools.wraps(fn)\n def listify_helper(*args, **kw):\n return wrapper(fn(*args, **kw))\n\n return listify_helper\n\n if fn is None:\n return listify_return\n return listify_return(fn)", "def _operate_recursive(\n function: Callable[..., V], iterables: RecursiveIterable[V], result: RecursiveList[V]\n) -> RecursiveList[V]:\n for items in zip(*iterables): # type: ignore\n if any(isinstance(item, Iterable) for item in items): # pylint: disable=W1116\n sub_result = [] # type: ignore\n _operate_recursive(function, items, sub_result)\n else:\n sub_result = function(*items) # type: ignore\n result.append(sub_result)\n return result", "def intercept(iterable, function):\n\n def intercepting(iterable_):\n for item in iterable_:\n function(item)\n yield item\n\n return intercepting(iterable)", "def decorator(arg):\n return lambda: list(arg)", "def list_func(lst: List[valueType]) -> List[valueType]:\n tmp = [] # type: List[valueType]\n for e in lst:\n if isinstance(e, (list, set, tuple)):\n tmp.append(list_func(list(e)))\n else:\n if isinstance(e, (float, int)):\n tmp.append(func(e))\n else:\n raise Exception\n return tmp", "def visit_List(self, node):\n self.generic_visit(node)\n if isinstance(node.ctx, ast.Load):\n return to_call(to_attribute(self.operator, '__list__'), node.elts)\n return node", "def toiter(x):\n if iterable(x):\n return x\n else:\n return [x]", "def operate_recursive(function: Callable[..., V], *iterables: RecursiveIterable[V]) -> RecursiveList[V]:\n return _operate_recursive(function, iterables, [])", "def controlled_list(input_list):\n output_list = input_list\n\n if not isinstance(input_list, list):\n\n dummy_list = []\n dummy_list.append(input_list)\n output_list = dummy_list\n print('Converting')\n print('Before return')\n print(output_list)\n return output_list", "def wrapped_func(ret_val, *args, **kwargs):\n val = func(*args, **kwargs)\n ret_val.append(val)", "def safe_iterator(i):\n return i or []", "def flatmap(iterable, function_to_list):\n for element in iterable:\n list_block = function_to_list(element)\n for result_value in list_block:\n yield result_value", "def is_listing(op):\n return isinstance(op, (list, tuple))", "def visit_ListComp(self, node):\n try:\n (generator,) = node.generators\n except ValueError:\n raise NotImplementedError(\"Only single loop comprehensions are allowed\")\n\n names = find_names(generator.target)\n argslist = [ast.arg(arg=name.id, annotation=None) for name in names]\n if len(names) <= 1:\n signature = ast.arguments(\n args=argslist,\n vararg=None,\n kwonlyargs=[],\n kw_defaults=[],\n kwarg=None,\n defaults=[],\n )\n else:\n signature = ast.List(elts=argslist, ctx=ast.Load())\n\n array = generator.iter\n lam_sig = functools.partial(ast.Lambda, args=signature)\n\n filters = generator.ifs\n if filters:\n filt = ast.BoolOp(op=ast.And(), values=filters)\n # array.filter\n method = ast.Attribute(value=array, attr=\"filter\", ctx=ast.Load())\n # array.filter(func)\n array = ast.Call(func=method, args=[lam_sig(body=filt)], keywords=[])\n\n method = ast.Attribute(value=array, attr=\"map\", ctx=ast.Load())\n mapped = ast.Call(func=method, args=[lam_sig(body=node.elt)], keywords=[])\n result = self.visit(mapped)\n return result", "def chained(func):\n def wrapper(*args, **kwargs):\n for xs in func(*args, **kwargs):\n for x in xs:\n yield x\n return wrapper", "def test_neg_operate_list_invalid_requests(self, list):\n key = (\"test\", \"demo\", \"list_key\")\n try:\n key, _, _ = self.as_connection.operate(key, list)\n except e.OpNotApplicable as exception:\n assert exception.code == 26", "def test_returns_list(self):\n metrics = ('input', 'output')\n\n @callback_return(*metrics)\n def returns_list():\n return [2, 1, 3]\n\n r = returns_list()\n self.assertEqual(len(metrics), len(r.keys()), 'Extra return values should be dropped.')\n self.assertEqual(2, r['input'])\n self.assertEqual(1, r['output'])\n self.assertNotIn('extra', r)", "def _wrapper(func, args):\n return func(*args)", "def runner(func, iterable, arguments, local=False):\n if local:\n return [func(i, *arguments) for i in iterable]\n else:\n if iterable:\n return group(func.s(i, *arguments) for i in iterable)().get()\n else:\n # group()() returns None if group is called with no arguments,\n # leading to an AttributeError with get().\n return []", "def listify(arg):\n if isinstance(arg, list):\n return arg\n else:\n return [arg]", "def apply(L, f):\n\n result = []\n for i in L:\n result.append(f(i))\n\n return result", "def list_response(wrapped):\n\n @wraps(wrapped)\n def decorated(request, *args, **kwargs):\n number_of_items = None\n offset = None\n if 'numberofitems' in request['args']:\n if request['args']['numberofitems'].isnumeric():\n number_of_items = int(request['args']['numberofitems'])\n else:\n raise UserException(ERROR_NUMERIC_REQUIRED % 'numberOfItems')\n if 'offset' in request['args']:\n if request['args']['offset'].isnumeric():\n offset = int(request['args']['offset'])\n else:\n raise UserException(ERROR_NUMERIC_REQUIRED % 'offset')\n\n if number_of_items is not None and offset is not None:\n return wrapped(request, number_of_items, offset)\n elif number_of_items is not None:\n return wrapped(request, number_of_items)\n elif offset is not None:\n return wrapped(request, offset=offset)\n else:\n return wrapped(request)\n\n return decorated", "def operationListReturn(self, a, b, c, operation):\n assert len(a) == len(b) == len(c), 'Length mismatch'\n for i in range(len(a)):\n if isinstance(a[i], list): self.operationListReturn(a[i], b[i], c[i], operation)\n else: c[i] = operation(a[i],b[i])\n return c", "def __call__(self, X, Y=None, eval_gradient=False):\n return [f(X, Y=Y, eval_gradient=eval_gradient) for f in self.list_func]", "def lift(f: Callable[..., Data]) -> LiftedFunc:\n def inner(*args: Result) -> Result:\n out = []\n for args1 in itertools.product(*args):\n val = f(*args1)\n out.append(val)\n return out\n return inner", "def ensure_list(iterable: Iterable[A]) -> List[A]:\n if isinstance(iterable, list):\n return iterable\n else:\n return list(iterable)", "def ensure_list(iterable: Iterable[A]) -> List[A]:\n if isinstance(iterable, list):\n return iterable\n else:\n return list(iterable)", "def iter(space, w_collection_or_callable, w_sentinel=None):\n if w_sentinel is None:\n return space.iter(w_collection_or_callable)\n else:\n return iter_sentinel(space, w_collection_or_callable, w_sentinel)", "def test_iter_method(self):\n ref = mock.Mock()\n ref.side_effect = [{'rows': [1,2,3]}, {'rows': []}]\n rslt = Result(ref)\n collection = [x for x in rslt]\n self.assertEqual(collection, [1,2,3])\n\n run_iter = lambda x: [y for y in x]\n\n rslt = Result(ref, skip=1000)\n self.assertRaises(CloudantArgumentError, run_iter, rslt)\n\n rslt = Result(ref, limit=1000)\n self.assertRaises(CloudantArgumentError, run_iter, rslt)", "def map(self, callable, iterable):\n iterable = executor.get_actual_value(iterable)\n return super(Executor, self).map(callable, iterable)", "def lmap(f: Callable, *xs) -> list:\n return list(map(f, *xs))", "def apply_function_to_nested_list(func, l):\n from itertools import chain\n result = func(list(chain(*l)))\n csum = np.cumsum(map(len, l))\n new_l = [result[(0 if i == 0 else csum[i-1]):csum[i]] for i in range(len(l))]\n return new_l", "def _builtin_consult_as_list(op1, op2, **kwdargs):\n # TODO make non-recursive\n check_mode((op1, op2), ['*L'], functor='consult', **kwdargs)\n _builtin_consult(op1, **kwdargs)\n if _is_list_nonempty(op2):\n _builtin_consult_as_list(op2.args[0], op2.args[1], **kwdargs)\n return True", "def lists_equal_length(func):\n # Define the wrapper function.\n def wrapper(self, *args, **kwargs):\n\n # Collect all `list` objects from `args`.\n lists_args = [arg for arg in args if isinstance(arg, list)]\n # Collecgt all `list` object from `kwargs`.\n lists_kwargs = [arg for arg in kwargs.values() if isinstance(arg, list)]\n # Concatenate the lists of `list` objects.\n lists = lists_args + lists_kwargs\n\n # Check whether all the `list` objects have the same length.\n do_have_same_length = len(set(map(len, lists))) == 1\n\n # Raise an `InvalidArgumentsError` exception if there's a length\n # mismatch.\n if not do_have_same_length:\n msg_fmt = \"The argument lists must have the same length.\"\n raise InvalidArgumentsError(msg_fmt)\n\n # Simply execute the decorated method with the provided arguments\n # and return the result.\n return func(self, *args, **kwargs)\n\n return wrapper", "def map_(func, some_list):\n \n result = []\n \n for arg in some_list:\n result.append(func(arg))\n \n return result", "def maplist(f, xs):\n return list(map(f, xs))", "def ft_filter(function_to_apply, list_of_inputs):\n if not callable(function_to_apply):\n exit(\"First param should be a Function\")\n try:\n object_iter = iter(list_of_inputs)\n except TypeError:\n exit(\"Second Argument must be iterable\")\n lst = []\n for item in list_of_inputs:\n if function_to_apply(item) == True: \n lst.append(item)\n return lst", "def __call__(self, items: List[Item]) -> List[Item]:", "def atomp(lst):\n return not isinstance(lst, list)", "def atomp(lst):\n return not isinstance(lst, list)", "def __call__(self, func, *args, **kwargs):\n\n @wraps(func) # To keep its own namespace\n def wrapper(*args, **kwargs):\n gener = self.__iter__()\n return func(gener, *args, **kwargs)\n return wrapper", "def get_list_dep() -> Callable:\n args = []\n body = [\" r = {}\"]\n # Apply list ops as annotations\n for list_op in self.list_ops:\n args += [f\"{list_op.name}: Optional[List[str]] = Query(None)\"]\n body += [\n f\" if {list_op.name} is not None:\",\n f' r[\"{list_op.name}\"] = {list_op.name}',\n ]\n code = [f\"def inner({', '.join(args)}) -> dict:\"] + body + [\" return r\"]\n r = {\"Optional\": typing.Optional, \"List\": typing.List, \"Query\": Query}\n exec(\"\\n\".join(code), {}, r)\n return r[\"inner\"]", "def ListMonad(*elements: List[T]) -> _List[T]: # pylint: disable=invalid-name\n\n return _List(list(elements), None)", "def __noop_list(self, *args, **kwargs):\n return []", "def batch(_func):\n def batch_wrap(\n _lst, num_threads=25, suppress_err_msg=False, raise_exception=False\n ):\n def worker():\n while True:\n item = q.get()\n try:\n _func(*item)\n except Exception as err:\n if not suppress_err_msg:\n log.error('Error: {}'.format(err))\n if raise_exception:\n raise Exception(err)\n q.task_done()\n\n q = queue.Queue()\n\n for _i in range(num_threads):\n t = threading.Thread(target=worker)\n t.daemon = True\n t.start()\n\n for _item in _lst:\n if not isinstance(_item, tuple):\n q.put((_item,))\n else:\n q.put(_item)\n\n q.join() # Wait for all operations to complete\n\n return batch_wrap", "def make_iterable(arg):\n return arg if is_iterable(arg) else (arg,)", "def cast_to_list(position):\n\n\[email protected]\n\tdef wrapper(function, instance, args, kwargs):\n\t\tif not isinstance(args[position], list):\n\t\t\targs = list(args)\n\t\t\targs[position] = [args[position]]\n\t\t\targs = tuple(args)\n\n\t\treturn function(*args, **kwargs)\n\n\treturn wrapper", "def mapf(f: Callable[[D_], R_], C: Iterable[D_]) -> Iterator[R_]:\n return (f(x) for x in C)", "def unaryFunctionGenerator(op, operationName):\n def unaryFunction(memoryManager, paramsList):\n def unaryOperation(a):\n if a is None:\n return None\n if type(a) is not float:\n raise Exception(\"Cannot {} nested list\".format(operationName))\n return op(a)\n\n handleEmpty(paramsList, operationName)\n A = paramsList[0]\n \n if type(A) == float:\n return unaryOperation(A)\n\n lengthA = len(A)\n\n result = []\n for i in range(lengthA):\n result.append(unaryOperation(A[i]))\n return result\n\n return unaryFunction", "def convert_old_style_list(list_):\n if not isinstance(list_, (tuple, list)) or len(list_) != 2:\n return list_, False\n first_item, second_item = list_\n if second_item == []:\n return [first_item], True\n try:\n # see if second item is iterable\n iter(second_item)\n except TypeError:\n return list_, False\n old_style_list = True\n new_second_item = []\n for sublist in second_item:\n item, old_style_list = convert_old_style_list(sublist)\n if not old_style_list:\n break\n new_second_item.extend(item)\n if old_style_list:\n second_item = new_second_item\n return [first_item, second_item], old_style_list", "def yield_wrapped_ops(\n self,\n fn: Union[\n Callable,\n Tuple[Union[str, Collection[str]], Union[Callable, Collection[Callable]]],\n ],\n exclude=(),\n domain: Union[str, int, Collection] = None,\n ) -> Iterable[FnOp]:\n if isinstance(fn, tuple):\n name_path, fn_path = fn\n else:\n name_path, fn_path = (), fn\n\n fun_path = cast(Tuple[Callable, ...], astuple(fn_path, None))\n fun = fun_path[-1]\n\n if isinstance(fun, Operation):\n ## pass-through operations\n yield fun\n return\n\n def param_to_modifier(name: str, param: inspect.Parameter) -> str:\n return (\n optional(name)\n # is optional?\n if param.default is not inspect._empty # type: ignore\n else keyword(name)\n if param.kind == Parameter.KEYWORD_ONLY\n else name\n )\n\n given_name_path = astuple(name_path, None)\n\n decors_by_name = get_autograph_decors(fun, {}, domain or self.domain)\n\n for decor_name, decors in decors_by_name.items() or ((None, {}),):\n if given_name_path and not decor_name:\n name_path = decor_path = given_name_path\n else: # Name in decors was \"default\"(None).\n name_path = decor_path = astuple(\n (decor_name if decor_name else func_name(fun, fqdn=1)).split(\".\"),\n None,\n )\n assert decor_path, locals()\n\n if given_name_path:\n # Overlay `decor_path` over `named_path`, right-aligned.\n name_path = tuple(*name_path[: -len(decor_path)], *decor_path)\n\n fn_name = str(name_path[-1])\n if fn_name in exclude:\n continue\n overrides = self._from_overrides(decor_path)\n\n # TODO: support an extra overrides source, in ``wrap_funcs()``.\n op_data = (\n ChainMap(overrides, decors)\n if (overrides and decors)\n else overrides\n if overrides\n else decors\n )\n if op_data:\n log.debug(\"Autograph overrides for %r: %s\", name_path, op_data)\n\n op_props = \"needs provides renames, inp_sideffects out_sideffects\".split()\n needs, provides, override_renames, inp_sideffects, out_sideffects = (\n op_data.get(a, UNSET) for a in op_props\n )\n\n if needs is UNSET:\n needs = [...]\n needs = aslist(needs, \"needs\")\n if ... in needs:\n sig = inspect.signature(fun)\n fun_needs = [\n param_to_modifier(name, param)\n for name, param in sig.parameters.items()\n if name != \"self\" and param.kind is not Parameter.VAR_KEYWORD\n ]\n ## Insert object as 1st need for object-methods.\n #\n if len(fun_path) > 1:\n clazz = fun_path[-2]\n # TODO: respect autograph decorator for object-names.\n class_name = name_path[-2] if len(name_path) > 1 else clazz.__name__\n if is_regular_class(class_name, clazz):\n log.debug(\"Object-method %s.%s\", class_name, fn_name)\n fun_needs.insert(0, camel_2_snake_case(class_name))\n\n needs = [\n fneed if n is ... else n\n for n, fneed in itt.zip_longest(needs, fun_needs, fillvalue=...)\n ]\n\n if provides is UNSET:\n if is_regular_class(fn_name, fun):\n ## Convert class-name into object variable.\n provides = camel_2_snake_case(fn_name)\n elif self.out_patterns:\n provides = self._deduce_provides_from_fn_name(fn_name) or UNSET\n if provides is UNSET:\n provides = ()\n provides = aslist(provides, \"provides\")\n\n needs, provides = self._apply_renames(\n (override_renames, self.renames), (needs, provides)\n )\n\n if inp_sideffects is not UNSET:\n needs.extend(\n (\n i\n if is_sfx(i)\n else sfxed(*i)\n if isinstance(i, tuple)\n else token(i)\n )\n for i in aslist(inp_sideffects, \"inp_sideffects\")\n )\n\n if out_sideffects is not UNSET:\n provides.extend(\n (\n i\n if is_sfx(i)\n else sfxed(*i)\n if isinstance(i, tuple)\n else token(i)\n )\n for i in aslist(out_sideffects, \"out_sideffects\")\n )\n\n if self.full_path_names:\n fn_name = self._join_path_names(*name_path)\n\n op_kws = self._collect_rest_op_args(decors)\n\n yield FnOp(fn=fun, name=fn_name, needs=needs, provides=provides, **op_kws)", "def to_list():\n\n @sinks\n def _dagpype_internal_fn_act(target):\n l = []\n try:\n while True:\n l.append((yield))\n except GeneratorExit:\n target.send(l) \n target.close()\n\n return _dagpype_internal_fn_act", "def sequence(f, lst: list) -> list:\n ret = []\n for ele in lst:\n ret.append(f(ele))\n return ret", "def custom_filter(some_func, iterator_list):\n\n local_iterator = from_input_to_list(iterator_list)\n func_map = [some_func(i) for i in local_iterator]\n true_list = [j for j in func_map if j > 100] # here we can hardcode any condition\n\n return true_list", "def _maplist_vm(vm, f, xs):\n def f_(*args):\n return vm.call(f, args)\n return list(map(f_, xs))", "def test_neg_operate_append_items_not_a_list(self):\n key = (\"test\", \"demo\", \"list_key\")\n\n list = [\n {\"op\": aerospike.OP_LIST_APPEND_ITEMS, \"bin\": \"int_bin\", \"val\": 7},\n ]\n\n try:\n key, _, bins = self.as_connection.operate(key, list)\n except e.ParamError as exception:\n assert exception.code == -2", "def handle_list(list_name, list, args):\n if not args:\n return list\n else:\n len(args) == 1 or syntax_error(\"Wrong number of args for list expression.\")\n try:\n return list[int(args[0])]\n except ValueError:\n syntax_error(\"Invald index value: '%s'\" % args[0])\n except IndexError:\n syntax_error(\"Index out of range in '%s': %d\" % (list_name, index))", "def as_list(arg):\n if _is_list(arg):\n return arg\n return [arg]", "def handleList(self, _): # pylint: disable=invalid-name", "def listify(value):\n if isinstance(value, list):\n return value\n else:\n return [value]", "def applyToEach(L,f):\n for i in range(len(L)):\n L[i] = f(L[i])", "def zzX_value(l, f):\n if type(f) is not list:\n return zzX_const(l, f)\n else:\n if not l:\n return f\n else:\n return [zzX_value(l-1, f)]", "def give_me_a_list():\n my_list=[1,2,3,4]\n return my_list\n pass", "def map(function, iterable):\n\n return [function(x) for x in iterable]", "def _shifter(iterable, offset):\n if not offset:\n return iterable\n return [i + offset for i in iterable]", "def test_list_increment_with_incorrect_value_type(self):\n key = (\"test\", \"demo\", \"list_key\")\n list = [{\"op\": aerospike.OP_LIST_INCREMENT, \"index\": 2, \"bin\": \"int_bin\", \"val\": \"twenty\"}]\n\n with pytest.raises(e.AerospikeError):\n self.as_connection.operate(key, list)", "def list_wrap(spec):\n if not isinstance(spec, list):\n spec = [spec]\n return spec", "def wrap_generator(generator, wrapper_function):\n for item in generator:\n yield wrapper_function(item)", "def threadsafe(f):\n def g(*a, **kw):\n return threadsafe_iter(f(*a, **kw))\n return g", "def pipeline(func):\n @wraps(func)\n def process(img_or_iterable, *args, **kwargs):\n if isinstance(img_or_iterable, (SliceableIterable, FramesSequence)):\n _len = len(img_or_iterable)\n s = SliceableIterable(img_or_iterable, range(_len), _len)\n s._proc_func = lambda image: func(image, *args, **kwargs)\n return s\n else:\n # Fall back on normal behavior of func, interpreting input\n # as a single image.\n return func(img_or_iterable)\n\n if process.__doc__ is None:\n process.__doc__ = ''\n process.__doc__ = (\"This function has been made pims-aware. When passed\\n\"\n \"a pims reader or SliceableIterable, it will return a \\n\"\n \"new SliceableIterable of the results. When passed \\n\"\n \"other objects, its behavior is \"\n \"unchanged.\\n\\n\") + process.__doc__\n return process", "def n_ary(func):\n def wrapper(x, *args):\n return x if not args else func(x, wrapper(*args))\n return wrapper", "def mapr(\n f: Callable[[Any], Any],\n collection: Sequence[Any]) -> List[Any]:\n if len(collection) == 0:\n return []\n return mapr(f, collection[:-1]) + [f(collection[-1])]", "def simple_map(f, l):\n # Again, my first take is a list comprehension.\n return [ f(item) for item in l ]", "def notify_wrap(self, func, *args, **kw):\n val = func(self, *args,**kw)\n if not self._observable_frozen:\n self.notify('list', None, self)\n return val", "def test_list_increment_with_missing_value(self):\n key = (\"test\", \"demo\", \"list_key\")\n list = [\n {\n \"op\": aerospike.OP_LIST_INCREMENT,\n \"bin\": \"int_bin\",\n \"index\": 2,\n }\n ]\n\n with pytest.raises(e.ParamError):\n self.as_connection.operate(key, list)", "def doit(self, **hints):\n return L(*self.args, **hints)", "def for_each(f: Callable[[A], Maybe[B]], iterable: Iterable[A]\n ) -> Maybe[Iterable[B]]:\n return cast(Maybe[Iterable[B]], map_m_(Just, f, iterable))", "def operates_on_task_list(fn):\n @dnzo_login_required\n def task_wrapper(self, dnzo_user, task_list_name, *args):\n from tasks_data.task_lists import get_task_list\n task_list = get_task_list(dnzo_user, task_list_name)\n if not task_list or task_list.deleted:\n self.not_found()\n else:\n fn(self, dnzo_user, task_list, *args)\n return task_wrapper", "def test_list_increment_with_missing_index(self):\n key = (\"test\", \"demo\", \"list_key\")\n list = [{\"op\": aerospike.OP_LIST_INCREMENT, \"bin\": \"int_bin\", \"val\": 20}]\n\n with pytest.raises(e.ParamError):\n self.as_connection.operate(key, list)", "def flatten(list_):\n for elem in list_:\n if type(elem) != list:\n yield elem\n else:\n yield from flatten(elem)", "def __call__(self, X, Y=None, eval_gradient=False):\n list_pfunc = self._get_one_param('list_func')\n if(Y is None):\n Y =X\n for f in reversed(list_pfunc):\n X = f(X, Y=Y, eval_gradient=False)\n return X", "def interleave(inter, f, seq):\n seq = iter(seq)\n try:\n f(next(seq))\n except StopIteration:\n pass\n else:\n for x in seq:\n inter()\n f(x)", "def _apply_func(data, func, num_rows, base_row_index=0, increment=False):\n row = list(data[base_row_index])\n curr_index = base_row_index\n for _ in range(num_rows):\n data.append(func(row))\n if increment:\n curr_index += 1\n row = list(data[curr_index])\n return data" ]
[ "0.7756567", "0.75442225", "0.75318354", "0.75228345", "0.747424", "0.74187565", "0.7351864", "0.71246946", "0.6990763", "0.6881843", "0.68182045", "0.66678816", "0.65725154", "0.65193516", "0.62121695", "0.6203566", "0.59676445", "0.5919752", "0.5807262", "0.5753957", "0.56545615", "0.5622955", "0.5607902", "0.559432", "0.5534809", "0.5485539", "0.5474979", "0.54562104", "0.54493505", "0.54087555", "0.5404381", "0.53848344", "0.53774154", "0.53758824", "0.5355555", "0.534703", "0.53395176", "0.53370285", "0.5324449", "0.5300343", "0.5293169", "0.52699184", "0.5237985", "0.5237985", "0.5234971", "0.5222926", "0.5221228", "0.52064", "0.5167139", "0.5165403", "0.5146539", "0.5144075", "0.51075435", "0.5106911", "0.5097315", "0.5089935", "0.5089935", "0.50879043", "0.50846857", "0.50821644", "0.50798273", "0.5059936", "0.50521225", "0.5049984", "0.5036306", "0.503097", "0.5030199", "0.50282145", "0.5024216", "0.50206125", "0.5020057", "0.5019792", "0.5011805", "0.5008851", "0.49927968", "0.4979532", "0.49747074", "0.4971633", "0.4967204", "0.4956344", "0.49558166", "0.49466977", "0.49444136", "0.4942528", "0.49316227", "0.4930581", "0.49274546", "0.49239898", "0.49091408", "0.49074432", "0.49059132", "0.48857316", "0.48808005", "0.48659128", "0.4860104", "0.4859311", "0.4853824", "0.4853136", "0.48412186", "0.48383832" ]
0.76254886
1
If a listiter_op is given something besides a list as input, raise a ValueError.
def test_listiter_op_2(): @ops.listiter_op def f(x): return iter([4, 5, 6]) with pytest.raises(ValueError): f(iter([1, 2, 3])) # Passing in an iterator instead of a list
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_listlist_op_2():\n\n @ops.listlist_op\n def f(x):\n return [4, 5, 6]\n\n with pytest.raises(ValueError):\n f(iter([1, 2, 3])) # Passing in an iterator instead of an list", "def test_iterlist_op_2():\n\n @ops.iterlist_op\n def f(x):\n return [4, 5, 6]\n\n with pytest.raises(ValueError):\n f([1, 2, 3]) # Passing in a list instead of an iterator", "def test_listlist_op_3():\n\n @ops.listlist_op\n def f(x):\n return iter([4, 5, 6]) # Returning an iterator instead of an list\n\n with pytest.raises(ValueError):\n result = f([1, 2, 3])", "def test_iterlist_op_3():\n\n @ops.iterlist_op\n def f(x):\n return iter([4, 5, 6]) # Returning an iterator instead of a list\n\n with pytest.raises(ValueError):\n result = f(iter([1, 2, 3]))", "def test_listiter_op_3():\n\n @ops.listiter_op\n def f(x):\n return [4, 5, 6] # Returning a list instead of an iterator\n\n with pytest.raises(ValueError):\n result = f([1, 2, 3])", "def test_iteriter_op_2():\n\n @ops.iteriter_op\n def f(x):\n return iter([4, 5, 6])\n\n with pytest.raises(ValueError):\n f([1, 2, 3]) # Passing in a list instead of an iterator", "def test_iteriter_op_3():\n\n @ops.iteriter_op\n def f(x):\n return [4, 5, 6] # Returning a list instead of an iterator\n\n with pytest.raises(ValueError):\n result = f(iter([1, 2, 3]))", "def test_iterlist_op_1():\n\n @ops.iterlist_op\n def f(x):\n return [4, 5, 6]\n\n result = f(iter([1, 2, 3])) # Passing in an iterator, as expected\n\n assert(isinstance(result, list)), f\"{result}\"\n assert(result == [4, 5, 6])", "def test_listiter_op_1():\n\n @ops.listiter_op\n def f(x):\n return iter([4, 5, 6])\n\n result = f([1, 2, 3]) # Passing in a list, as expected\n\n assert(isinstance(result, collections.abc.Iterator)), f\"{result}\"\n assert(list(result) == [4, 5, 6])", "def is_listing(op):\n return isinstance(op, (list, tuple))", "def test_neg_operate_list_invalid_requests(self, list):\n key = (\"test\", \"demo\", \"list_key\")\n try:\n key, _, _ = self.as_connection.operate(key, list)\n except e.OpNotApplicable as exception:\n assert exception.code == 26", "def _validate_internal_value_list(self, value: Any):\n # Make sure the value is a list\n if not isinstance(value, list):\n raise TypeError(f\"Value must be a list\")\n\n # Make sure the list has the correct number of values in it\n length = len(value)\n if isinstance(self._nargs, int):\n if self._action == \"store\":\n if length != self._nargs:\n raise ValueError(f\"Requires list of length {self._nargs}\")\n else:\n if length == 0 or length % self._nargs != 0:\n raise ValueError(f\"Requires list of length a multiple of {self._nargs}\")\n elif self._nargs == \"+\":\n if length == 0:\n raise ValueError(f\"Requires at least one item\")\n\n if any(not isinstance(element, self._type) for element in value):\n raise TypeError(f\"All values must be of the declared type {self._type.__name__}\")", "def test_list_type(self):\n\n expected = TypeError\n input_ = []\n with self.assertRaises(expected):\n math.factorial(input_)", "def _data_validation(data):\n if isinstance(data, (list, tuple, type(None))) is not True:\n raise ValueError(f\"data must be tuple, list, or None, \"\n f\"data type is '{type(data).__name__}'. \"\n f\"Iterable data cannot be empty.\")", "def test_list_increment_with_incorrect_value_type(self):\n key = (\"test\", \"demo\", \"list_key\")\n list = [{\"op\": aerospike.OP_LIST_INCREMENT, \"index\": 2, \"bin\": \"int_bin\", \"val\": \"twenty\"}]\n\n with pytest.raises(e.AerospikeError):\n self.as_connection.operate(key, list)", "def test_listlist_op_1():\n\n @ops.listlist_op\n def f(x):\n return [4, 5, 6]\n\n result = f([1, 2, 3]) # Passing in a list, as expected\n\n assert(isinstance(result, list)), f\"{result}\"\n assert(result == [4, 5, 6])", "def test_if_it_accepts_lists(self):\n with self.assertRaises(TypeError):\n prime_numbers([])", "def list_typecheck(val, name, msg):\n if type(val) != Pair and val != Nil():\n raise SnekEvaluationError(name + \" error: \" + msg)", "def test_if_it_accepts_lists(self):\n with self.assertRaises(TypeError):\n prime_numbers(56.58)", "def test_iteriter_op_1():\n\n @ops.iteriter_op\n def f(x):\n return iter([4, 5, 6])\n\n result = f(iter([1, 2, 3])) # Passing in an iterator, as expected\n\n assert(isinstance(result, collections.abc.Iterator)), f\"{result}\"\n assert(list(result) == [4, 5, 6])", "def _is_iterable(value):\n return isinstance(value, list) or isinstance(value, tuple)", "def check_list(self,list_input):\n if not isinstance(list_input,list):\n raise ValueError('input is not in list type')\n for i in list_input:\n if isinstance(i,list) and len(i) != 0:\n for j in i:\n if not isinstance(j,(float,int)):\n print(j)\n raise ValueError('cannot convert')\n else:\n print(i)\n raise ValueError('wrong defined')", "def test_empty_list_error(self):\n with self.assertRaises(ValueError):\n function_inclusion_filter_builder([])", "def test_raises_typeerror_if_arg_not_list(self):\n def result():\n return num_islands({})\n\n self.assertRaises(TypeError, result)", "def list_check(*args, func=None):\n func = func or inspect.stack()[2][3]\n for var in args:\n if not isinstance(var, (list, collections.UserList, collections.abc.MutableSequence)):\n name = type(var).__name__\n raise ListError(\n 'Function {} expected list, {} got instead.'.format(func, name))", "def test_empty_list(self):\n argument = []\n with self.assertRaises(ValueError):\n find_an_even(argument)", "def _is_list(arg):\n return isinstance(arg, collections.Sequence) and not _is_string(arg)", "def test_neg_operate_append_items_not_a_list(self):\n key = (\"test\", \"demo\", \"list_key\")\n\n list = [\n {\"op\": aerospike.OP_LIST_APPEND_ITEMS, \"bin\": \"int_bin\", \"val\": 7},\n ]\n\n try:\n key, _, bins = self.as_connection.operate(key, list)\n except e.ParamError as exception:\n assert exception.code == -2", "def test_list_increment_with_missing_value(self):\n key = (\"test\", \"demo\", \"list_key\")\n list = [\n {\n \"op\": aerospike.OP_LIST_INCREMENT,\n \"bin\": \"int_bin\",\n \"index\": 2,\n }\n ]\n\n with pytest.raises(e.ParamError):\n self.as_connection.operate(key, list)", "def _assert_valid_value_and_cast(self, value):\n if not hasattr(value, '__iter__'):\n raise AssertionError(f\"{value} is not a valid iterable type\")\n return value", "def test_list_increment_with_missing_index(self):\n key = (\"test\", \"demo\", \"list_key\")\n list = [{\"op\": aerospike.OP_LIST_INCREMENT, \"bin\": \"int_bin\", \"val\": 20}]\n\n with pytest.raises(e.ParamError):\n self.as_connection.operate(key, list)", "def _is_list(val):\n\n return isinstance(val, list)", "def test_max_list_iter(self):\n tlist = None\n with self.assertRaises(ValueError): # used to check for exception\n max_list_iter(tlist)", "def test_max_list_iter(self):\n tlist = None\n with self.assertRaises(ValueError): # used to check for exception\n max_list_iter(tlist)", "def _validate_list_type(self, name, obj, *args):\n if obj is None:\n return\n if isinstance(obj, list):\n for i in obj:\n self._validate_type_not_null(name, i, *args)\n else:\n self._validate_type(name, obj, *args)", "def _is_list(item):\n return isinstance(item, list)", "def test_list_increment_with_missing_bin(self):\n key = (\"test\", \"demo\", \"list_key\")\n list = [{\"op\": aerospike.OP_LIST_INCREMENT, \"index\": 2, \"val\": 20}]\n\n with pytest.raises(e.ParamError):\n self.as_connection.operate(key, list)", "def test_wrong_input_on_creation(self):\r\n\r\n self.assertRaises(TypeError, TypedListType, None)", "def _check_args(self, args):\n if not isinstance(args, list) or not len(args) >= 2:\n raise FunctionArgumentException(\"Argument of attribute getter \"\n \"function '%s' must be a list of \"\n \"indeces; got: '%s'\" % (\n self.name,\n args\n ))\n\n if not is_homogeneous(args, (str, int)):\n raise FunctionArgumentException(\n \"'%s': argument must be a list of strings; got: '%s'\" %\n (self.name, args)\n )", "def test_neg_operate_list_operation_bin_notlist(self):\n key = (\"test\", \"demo\", 1)\n list = [{\"op\": aerospike.OP_LIST_INSERT, \"bin\": \"age\", \"index\": 2, \"val\": 9}]\n\n try:\n (key, _, _) = self.as_connection.operate(key, list)\n\n except e.BinIncompatibleType as exception:\n assert exception.code == 12", "def test_columns_list_element_error(self):\n\n with pytest.raises(ValueError):\n\n BaseTransformer(columns=[[], \"a\"])", "def test_llist_no_parameter_negative(self):\n\n with pytest.raises(TypeError) as typeError:\n TestLList.llist_integer.add()\n\n assert \"Required argument 'value' (pos 1) not found\" in typeError.value", "def isList(l):\r\n return hasattr(l, '__iter__') \\\r\n or (type(l) in (types.ListType, types.TupleType))", "def test_raises_typeerror_if_rows_not_list(self):\n def result():\n return num_islands([{}, \"test\", 123])\n\n self.assertRaises(TypeError, result)", "def _is_iterable(items):\n return isinstance(items, (list, tuple, set, np.ndarray))", "def test_validate_positive_integer_list():\n with pytest.raises(ValueError):\n validate_positive_integer_list(0.5, 1)\n\n with pytest.raises(ValueError):\n validate_positive_integer_list([0.5, 0, 5], 2)\n\n with pytest.raises(ValueError):\n validate_positive_integer_list([1], 2)\n\n with pytest.raises(ValueError):\n validate_positive_integer_list(0, 2)\n\n with pytest.raises(ValueError):\n validate_positive_integer_list(-1, 2)\n\n assert validate_positive_integer_list(1, 2) == [1, 1]", "def test_creation_list():\n with pytest.raises(ValueError) as __:\n value = list()\n __ = param.Integer(value=value)", "def value_error(var, _list):\n\n #if not any(r):\n if len(_list) == 2:\n divisor = \" or \"\n elif len(_list) > 2:\n divisor = \", \"\n\n print(_list)\n print(len(_list))\n raise ValueError(\"'{var_name}' must be {type}, received '{var_type}'\"\n .format(var_name=RaiseIfNot._get_name(var),\n type=divisor.join(map(\n lambda x: \"'\" + x + \"'\",\n _list)), var_type=var))", "def test_missing_generic_args(self):\n import System\n #TODO specify clearly which exception is appropriate here\n self.assertRaises(Exception, System.Collections.Generic.List)", "def handle_list(list_name, list, args):\n if not args:\n return list\n else:\n len(args) == 1 or syntax_error(\"Wrong number of args for list expression.\")\n try:\n return list[int(args[0])]\n except ValueError:\n syntax_error(\"Invald index value: '%s'\" % args[0])\n except IndexError:\n syntax_error(\"Index out of range in '%s': %d\" % (list_name, index))", "def atomp(lst):\n return not isinstance(lst, list)", "def atomp(lst):\n return not isinstance(lst, list)", "def _builtin_consult_as_list(op1, op2, **kwdargs):\n # TODO make non-recursive\n check_mode((op1, op2), ['*L'], functor='consult', **kwdargs)\n _builtin_consult(op1, **kwdargs)\n if _is_list_nonempty(op2):\n _builtin_consult_as_list(op2.args[0], op2.args[1], **kwdargs)\n return True", "def convert_old_style_list(list_):\n if not isinstance(list_, (tuple, list)) or len(list_) != 2:\n return list_, False\n first_item, second_item = list_\n if second_item == []:\n return [first_item], True\n try:\n # see if second item is iterable\n iter(second_item)\n except TypeError:\n return list_, False\n old_style_list = True\n new_second_item = []\n for sublist in second_item:\n item, old_style_list = convert_old_style_list(sublist)\n if not old_style_list:\n break\n new_second_item.extend(item)\n if old_style_list:\n second_item = new_second_item\n return [first_item, second_item], old_style_list", "def test_sum_list_string_should_raise_exception(self):\n\n with self.assertRaises(TypeError):\n sum(['banana', 'apple'])", "def _list_like(self, value):\n return (not hasattr(value, \"strip\") and\n (hasattr(value, \"__getitem__\") or\n hasattr(value, \"__iter__\")))\n # return is_sequence(value) # use from pandas.core.common import is_sequence", "def test_graph_with_list_fail():\n try:\n node_list = [\"slippery list\"]\n node_list.append(Node({'A':['B','C']}))\n node_list.append(Node({'B':['C','D']}))\n node_list.append(Node({'C':['D']}))\n node_list.append(Node({'D':['C']}))\n g = Graph(node_list)\n except Exception as e:\n assert str(e) == 'invalid node provided!'", "def is_list(value):\n return isinstance(value, list) or None", "def ensure_list(self, x):\n return x if isinstance(x, list) else [x]", "def test_badxvaluewithlist(self):\n Rectangle.reset_objects()\n with self.assertRaises(TypeError) as e:\n r1 = Square(1, [1, 2], 2, 3)\n self.assertEqual(str(e.exception), 'x must be an integer')", "def test_list_no_even(self):\n argument = [1, 3, 5, 7]\n with self.assertRaises(ValueError):\n find_an_even(argument)", "def test_list_size_one_no_even(self):\n argument = [1]\n with self.assertRaises(ValueError):\n find_an_even(argument)", "def is_list(value):\n return isinstance(value, list)", "def test_list_no_even_same(self):\n argument = [1, 3, 3, 7]\n with self.assertRaises(ValueError):\n find_an_even(argument)", "def _raise_on_error(data: Union[list, dict]) -> None:\n if isinstance(data, list) and data:\n data = data[0]\n\n if isinstance(data, dict) and \"error\" in data:\n raise_error(data[\"error\"])", "def test_badyvaluewithlist(self):\n Rectangle.reset_objects()\n with self.assertRaises(TypeError) as e:\n r1 = Square(1, 2, [1, 2], 3)\n self.assertEqual(str(e.exception), 'y must be an integer')", "def __allowed_values_incorrect_list(self):\n strTestName = 'Values of a list (incorrect)'\n RxCSObject = _RxCSobject()\n\n RxCSObject.paramAddMan('parameter1', 'tuple')\n RxCSObject.paramAddMan('parameter2', 'list')\n\n RxCSObject.paramAllowed('parameter2', ('Allowed string #1', 'Allowed string #2', 3, 4, 11))\n RxCSObject.parameter1 = (1, 3, 4)\n RxCSObject.parameter2 = [11, 3, 'Allowed string #1', 'Allowed string #11']\n\n self.__parametersCheck_error(RxCSObject, AllowedValuesError, strTestName)", "def test_fromlist(self):\n\n self.assertRaises(TypeError, self.hw, [])", "def visit_List(self, node):\n self.generic_visit(node)\n if isinstance(node.ctx, ast.Load):\n return to_call(to_attribute(self.operator, '__list__'), node.elts)\n return node", "def test_pos_operate_with_list_get_range_val_out_of_bounds(self):\n key = (\"test\", \"demo\", \"list_key\")\n list = [{\"op\": aerospike.OP_LIST_GET_RANGE, \"bin\": \"int_bin\", \"index\": 2, \"val\": 9}]\n\n (key, meta, bins) = self.as_connection.operate(key, list)\n\n assert bins == {\"int_bin\": [3, 4]}", "def _is_list(arg):\n if isinstance(arg, dict):\n return False\n if isinstance(arg, str): # Python 3-only, as str has __iter__\n return False\n return (\n not _has_method(arg, \"strip\")\n and _has_method(arg, \"__getitem__\")\n or _has_method(arg, \"__iter__\")\n )", "def _list_assert(actual_list, expected_list):\n for actual, expected in itertools.izip_longest(actual_list, expected_list):\n _value_assert(None, actual, expected)", "def test_likelihoods_unequal_list_lengths(self):\r\n self.assertRaises(ValueError, likelihoods, [1, 2], [1])", "def test_validate_bad_data(self, value):\n opt = scheme.ListOption('test-opt')\n with pytest.raises(errors.SchemeValidationError):\n opt.validate('foo', value)", "def _validate_node(self, node):\n if not isinstance(node, self._Node):\n raise TypeError('Invalid object type!')\n if node._container != self:\n raise ValueError('Node does not belong to this list!')\n if node._index < 0 or node._index >= self._size:\n raise ValueError('Invalid node!')", "def test_pos_operate_with_list_insert_index_negative(self):\n key = (\"test\", \"demo\", \"list_key\")\n list = [{\"op\": aerospike.OP_LIST_INSERT, \"bin\": \"int_bin\", \"index\": -2, \"val\": 9}]\n\n (key, meta, bins) = self.as_connection.operate(key, list)\n\n (key, meta, bins) = self.as_connection.get(key)\n\n assert bins[\"int_bin\"] == [1, 2, 9, 3, 4]", "def testSetWithListFails(self):\n def setSat():\n self.node.sat = [-1.1]\n\n self.assertRaises(\n TypeError,\n setSat\n )", "def test_make_np_iterable_type_error():\n with pytest.raises(TypeError):\n _ = uc._make_np_iterable(\"s\")", "def iterable(arg):\n return isinstance(arg, collections.Iterable) and not isinstance(arg, six.string_types)", "def process_list(_func, iterator, *args, **kwargs):\n return [_func(i, *args, **kwargs) for i in iterator]", "def __init__(self, inputlist):\n\n # Verify if the inputlist is a iterable but the first element isn't\n # it means is a single dimensional iterable\n try:\n self.n = len(inputlist)\n except TypeError:\n raise(VetorError, \"Parametro de entrada nao possui tamanho\")\n\n if hasattr(inputlist[0], \"__iter__\"):\n raise(VetorError, \"Elementos nao sao escalares\")\n\n self.elem = inputlist", "def test_generator_input_with_no_iterable_len_raises(self):\n for chunk_size, n_splits, n_jobs in product([None, 1, 3], [None, 1, 3], [None, 1, 3]):\n with self.subTest(chunk_size=chunk_size, n_splits=n_splits, n_jobs=n_jobs), self.assertRaises(ValueError):\n get_n_chunks(iter(self.test_data), iterable_len=None, chunk_size=chunk_size, n_splits=n_splits,\n n_jobs=n_jobs)", "def ensure_list(iterable: Iterable[A]) -> List[A]:\n if isinstance(iterable, list):\n return iterable\n else:\n return list(iterable)", "def ensure_list(iterable: Iterable[A]) -> List[A]:\n if isinstance(iterable, list):\n return iterable\n else:\n return list(iterable)", "def isIterable(obj):\n return isinstance(obj, ListType)", "def py_raise(*xs):\n raise NotImplemented", "def _check_iterable(self):\n if self.theoretical_size is None:\n raise TypeError(\"This `fixture_ref` has not yet been initialized, so it cannot be unpacked/iterated upon. \"\n \"This is not supposed to happen when a `fixture_ref` is used correctly, i.e. as an item in\"\n \" the `argvalues` of a `@parametrize` decorator. Please check the documentation for \"\n \"details.\")\n if self.theoretical_size == 1:\n raise TypeError(\"This fixture_ref does not represent a tuple of arguments, it is not iterable\")", "def is_listlike(x: Any) -> bool:\r\n return (isinstance(x, (list, tuple)))", "def _ensure_iterable(x):\n if isinstance(x[0], Iterable):\n if len(x) > 1:\n raise TypeError(\"Either Iterable or variable argument list expected\")\n return x[0]\n else:\n return x", "def test_raises_typeerror_if_not_list(self):\n def result(): return find_rotation_point(\"test\")\n self.assertRaises(TypeError, result)", "def test_generator_without_iterable_len(self):\n with self.assertRaises(ValueError):\n next(chunk_tasks(iter([]), n_splits=1))", "def is_item_iterable(item):\n try:\n _ = [_ for _ in item]\n except TypeError:\n return False\n return True", "def _check_multilabel_list(obj: Any):\n if not isinstance(obj, list):\n raise TypeError(f\"obj must be a list, got '{type(obj)}'\")\n\n if len(obj) > 0:\n if not isinstance(obj[0], list):\n raise TypeError(f\"obj must contain lists, got '{type(obj[0])}'\")\n\n if len(obj[0]) > 0:\n if not isinstance(obj[0][0], str):\n raise TypeError(\n f\"obj must contain lists of strings, got lists of '{type(obj[0][0])}'\"\n )", "def is_list_like(value):\n if is_iterable(value) and not isinstance(value, six.string_types):\n return True\n\n else:\n return False", "def test_posteriors_unequal_lists(self):\r\n self.assertRaises(ValueError, posteriors, [1, 2, 3], [1])", "def test_RestrictingNodeTransformer__visit_NotIn_List():\n assert restricted_eval('2 not in [1, 2, 3]') is False", "def test_list_int(self):\n result = get_avg([\"Hello\", \"World\"])\n self.assertEqual(result, TypeError)", "def _is_iterable_non_string(arg):\n return (hasattr(arg, \"__iter__\") or hasattr(arg, \"__getattr__\")) and not isinstance(arg, str)", "def is_tuple_or_list(value):\n return isinstance(value, list) or isinstance(value, tuple)", "def validate_list(validators, data):\n if type(data) is not list:\n return False\n n_validators = len(validators)\n if n_validators == 0:\n return len(data) == 0\n elif n_validators == 1:\n validator = validators[0]\n return all(imap(lambda item: validate_common(validator, item), data))\n elif n_validators > 1:\n raise NotImplementedError(\"You cannot specify more than one validator for list at the moment.\")" ]
[ "0.76405627", "0.7451966", "0.7407579", "0.73711157", "0.7272287", "0.6842472", "0.67784035", "0.6713179", "0.65655285", "0.6331", "0.62677497", "0.62505025", "0.6126729", "0.6060513", "0.60524344", "0.60413057", "0.5994478", "0.5923151", "0.5910163", "0.5815476", "0.5803793", "0.5797612", "0.5776292", "0.57018596", "0.5693806", "0.5682503", "0.56680715", "0.56610525", "0.561739", "0.56114024", "0.5598629", "0.5567752", "0.5564626", "0.5564626", "0.55601054", "0.5481916", "0.5464232", "0.5461882", "0.5429303", "0.542803", "0.5424926", "0.5421738", "0.54163873", "0.53936744", "0.53885174", "0.5366669", "0.5352702", "0.5345363", "0.5298219", "0.5288001", "0.5258687", "0.5258687", "0.52388704", "0.5236412", "0.522635", "0.5217773", "0.5195342", "0.5189135", "0.5174227", "0.516104", "0.5147199", "0.5144029", "0.5120755", "0.51179636", "0.5111082", "0.5100567", "0.5098248", "0.50967467", "0.50967336", "0.50871253", "0.5085981", "0.5073339", "0.50681967", "0.506346", "0.50618005", "0.5060244", "0.50449944", "0.5039085", "0.5017113", "0.50160855", "0.5012246", "0.5001891", "0.49983767", "0.49983767", "0.4988698", "0.49853337", "0.49729759", "0.49727446", "0.49684522", "0.49638757", "0.4956922", "0.49555635", "0.49536076", "0.4953372", "0.49522126", "0.49492744", "0.4947529", "0.49431366", "0.4939051", "0.49375218" ]
0.75528175
1
If a listiter_op returns something besides an iterator as output, raise a ValueError.
def test_listiter_op_3(): @ops.listiter_op def f(x): return [4, 5, 6] # Returning a list instead of an iterator with pytest.raises(ValueError): result = f([1, 2, 3])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_listiter_op_2():\n\n @ops.listiter_op\n def f(x):\n return iter([4, 5, 6])\n\n with pytest.raises(ValueError):\n f(iter([1, 2, 3])) # Passing in an iterator instead of a list", "def test_iterlist_op_3():\n\n @ops.iterlist_op\n def f(x):\n return iter([4, 5, 6]) # Returning an iterator instead of a list\n\n with pytest.raises(ValueError):\n result = f(iter([1, 2, 3]))", "def test_iterlist_op_2():\n\n @ops.iterlist_op\n def f(x):\n return [4, 5, 6]\n\n with pytest.raises(ValueError):\n f([1, 2, 3]) # Passing in a list instead of an iterator", "def test_listlist_op_2():\n\n @ops.listlist_op\n def f(x):\n return [4, 5, 6]\n\n with pytest.raises(ValueError):\n f(iter([1, 2, 3])) # Passing in an iterator instead of an list", "def test_listlist_op_3():\n\n @ops.listlist_op\n def f(x):\n return iter([4, 5, 6]) # Returning an iterator instead of an list\n\n with pytest.raises(ValueError):\n result = f([1, 2, 3])", "def test_iteriter_op_3():\n\n @ops.iteriter_op\n def f(x):\n return [4, 5, 6] # Returning a list instead of an iterator\n\n with pytest.raises(ValueError):\n result = f(iter([1, 2, 3]))", "def test_iteriter_op_2():\n\n @ops.iteriter_op\n def f(x):\n return iter([4, 5, 6])\n\n with pytest.raises(ValueError):\n f([1, 2, 3]) # Passing in a list instead of an iterator", "def test_listiter_op_1():\n\n @ops.listiter_op\n def f(x):\n return iter([4, 5, 6])\n\n result = f([1, 2, 3]) # Passing in a list, as expected\n\n assert(isinstance(result, collections.abc.Iterator)), f\"{result}\"\n assert(list(result) == [4, 5, 6])", "def test_iterlist_op_1():\n\n @ops.iterlist_op\n def f(x):\n return [4, 5, 6]\n\n result = f(iter([1, 2, 3])) # Passing in an iterator, as expected\n\n assert(isinstance(result, list)), f\"{result}\"\n assert(result == [4, 5, 6])", "def test_iteriter_op_1():\n\n @ops.iteriter_op\n def f(x):\n return iter([4, 5, 6])\n\n result = f(iter([1, 2, 3])) # Passing in an iterator, as expected\n\n assert(isinstance(result, collections.abc.Iterator)), f\"{result}\"\n assert(list(result) == [4, 5, 6])", "def _assert_valid_value_and_cast(self, value):\n if not hasattr(value, '__iter__'):\n raise AssertionError(f\"{value} is not a valid iterable type\")\n return value", "def safe_iterator(i):\n return i or []", "def _is_iterable(value):\n return isinstance(value, list) or isinstance(value, tuple)", "def test_generator_without_iterable_len(self):\n with self.assertRaises(ValueError):\n next(chunk_tasks(iter([]), n_splits=1))", "def test_max_list_iter(self):\n tlist = None\n with self.assertRaises(ValueError): # used to check for exception\n max_list_iter(tlist)", "def test_max_list_iter(self):\n tlist = None\n with self.assertRaises(ValueError): # used to check for exception\n max_list_iter(tlist)", "def test_listlist_op_1():\n\n @ops.listlist_op\n def f(x):\n return [4, 5, 6]\n\n result = f([1, 2, 3]) # Passing in a list, as expected\n\n assert(isinstance(result, list)), f\"{result}\"\n assert(result == [4, 5, 6])", "def test_neg_operate_list_invalid_requests(self, list):\n key = (\"test\", \"demo\", \"list_key\")\n try:\n key, _, _ = self.as_connection.operate(key, list)\n except e.OpNotApplicable as exception:\n assert exception.code == 26", "def _data_validation(data):\n if isinstance(data, (list, tuple, type(None))) is not True:\n raise ValueError(f\"data must be tuple, list, or None, \"\n f\"data type is '{type(data).__name__}'. \"\n f\"Iterable data cannot be empty.\")", "def test_list_increment_with_incorrect_value_type(self):\n key = (\"test\", \"demo\", \"list_key\")\n list = [{\"op\": aerospike.OP_LIST_INCREMENT, \"index\": 2, \"bin\": \"int_bin\", \"val\": \"twenty\"}]\n\n with pytest.raises(e.AerospikeError):\n self.as_connection.operate(key, list)", "def test_list_type(self):\n\n expected = TypeError\n input_ = []\n with self.assertRaises(expected):\n math.factorial(input_)", "def test_empty_list_error(self):\n with self.assertRaises(ValueError):\n function_inclusion_filter_builder([])", "def test_generator_input_with_no_iterable_len_raises(self):\n for chunk_size, n_splits, n_jobs in product([None, 1, 3], [None, 1, 3], [None, 1, 3]):\n with self.subTest(chunk_size=chunk_size, n_splits=n_splits, n_jobs=n_jobs), self.assertRaises(ValueError):\n get_n_chunks(iter(self.test_data), iterable_len=None, chunk_size=chunk_size, n_splits=n_splits,\n n_jobs=n_jobs)", "def chek_iter_obj(func):\n\n def wrapper(self, lst):\n try:\n iter(lst)\n return func(self, lst)\n except TypeError:\n raise TypeError(f\"{lst} isn't iterable object\")\n\n return wrapper", "def test_if_it_accepts_lists(self):\n with self.assertRaises(TypeError):\n prime_numbers([])", "def _is_iterable(items):\n return isinstance(items, (list, tuple, set, np.ndarray))", "def test_empty_list(self):\n argument = []\n with self.assertRaises(ValueError):\n find_an_even(argument)", "def is_listing(op):\n return isinstance(op, (list, tuple))", "def test_iter(\n self, start: Result[int, int], exp: t.Tuple[int, ...]\n ) -> None:\n assert tuple(start.iter()) == exp", "def list_typecheck(val, name, msg):\n if type(val) != Pair and val != Nil():\n raise SnekEvaluationError(name + \" error: \" + msg)", "def assert_is_iter(ext):\n if not parallel.is_iter(ext):\n ext = [ext]\n return ext", "def test_if_it_accepts_lists(self):\n with self.assertRaises(TypeError):\n prime_numbers(56.58)", "def isIterable(value):\n try:\n iter(value)\n return True\n except:\n return False", "def test_assert_iterator(self):\n iterator = iter([1,2,3,4])\n # Should pass\n self.assert_iterator(iterator,\n count=4,\n assert_item_function=lambda i: i>0)", "def test_raises_typeerror_if_rows_not_list(self):\n def result():\n return num_islands([{}, \"test\", 123])\n\n self.assertRaises(TypeError, result)", "def toiter(x):\n if iterable(x):\n return x\n else:\n return [x]", "def test_enforce_iterable():\n formatter = TabularOutputFormatter()\n loremipsum = (\n \"lorem ipsum dolor sit amet consectetur adipiscing elit sed do eiusmod\".split(\n \" \"\n )\n )\n\n for format_name in formatter.supported_formats:\n formatter.format_name = format_name\n try:\n formatted = next(formatter.format_output(zip(loremipsum), [\"lorem\"]))\n except TypeError:\n assert False, \"{0} doesn't return iterable\".format(format_name)", "def test_list_increment_with_missing_index(self):\n key = (\"test\", \"demo\", \"list_key\")\n list = [{\"op\": aerospike.OP_LIST_INCREMENT, \"bin\": \"int_bin\", \"val\": 20}]\n\n with pytest.raises(e.ParamError):\n self.as_connection.operate(key, list)", "def testIterWithException(self):\n\t\tc = Controller()\n\t\tx = c.mock()\n\t\tx.__iter__()\n\t\tc.setReturn(1)\n\t\tc.setException(Exception)\n\t\tc.replay()\n\t\ti = x.__iter__()\n\t\tself.failUnless(i.next() == 1)\n\t\tself.failUnlessRaises(Exception, i.next)", "def test_list_increment_with_missing_value(self):\n key = (\"test\", \"demo\", \"list_key\")\n list = [\n {\n \"op\": aerospike.OP_LIST_INCREMENT,\n \"bin\": \"int_bin\",\n \"index\": 2,\n }\n ]\n\n with pytest.raises(e.ParamError):\n self.as_connection.operate(key, list)", "def test_collect(\n self, iterable: t.Iterable[Result[int, str]], exp: Result[int, str]\n ) -> None:\n assert Result.collect(iterable) == exp", "def test_make_np_iterable_type_error():\n with pytest.raises(TypeError):\n _ = uc._make_np_iterable(\"s\")", "def test_raises_typeerror_if_arg_not_list(self):\n def result():\n return num_islands({})\n\n self.assertRaises(TypeError, result)", "def EOF_or_raise(f):\n try:\n f.next()\n except StopIteration:\n return\n else:\n raise Exception(str(f))", "def _ensure_iterable(x):\n if isinstance(x[0], Iterable):\n if len(x) > 1:\n raise TypeError(\"Either Iterable or variable argument list expected\")\n return x[0]\n else:\n return x", "def __iter__(self):\n return NotImplemented", "def _iterator_unknown_size(self) -> Iterator[int]:\n raise NotImplementedError", "def test_list_no_even_same(self):\n argument = [1, 3, 3, 7]\n with self.assertRaises(ValueError):\n find_an_even(argument)", "def test_collect_short_circuits(self) -> None:\n until_err: t.List[Result[int, str]] = [Ok(1), Ok(2), Err(\"no\")]\n\n def _iterable() -> t.Iterable[Result[int, str]]:\n yield from until_err\n # If we continue iterating after the err, we will raise a\n # runtime Error.\n assert False, \"Result.collect() did not short circuit on err!\"\n\n assert Result.collect(_iterable()) == Err(\"no\")", "def test_wrong_input_on_creation(self):\r\n\r\n self.assertRaises(TypeError, TypedListType, None)", "def process_list(_func, iterator, *args, **kwargs):\n return [_func(i, *args, **kwargs) for i in iterator]", "def test_list_increment_with_missing_bin(self):\n key = (\"test\", \"demo\", \"list_key\")\n list = [{\"op\": aerospike.OP_LIST_INCREMENT, \"index\": 2, \"val\": 20}]\n\n with pytest.raises(e.ParamError):\n self.as_connection.operate(key, list)", "def is_iterable(obj):\n return isinstance(obj, (list, tuple, types.GeneratorType)) or \\\n (not isinstance(obj, (int, str, dict)) and\n bool(getattr(obj, \"next\", False)))", "def is_iterable(thing):\n\n try:\n iter(thing)\n except TypeError:\n return False\n return True", "def iterable(x):\n try:\n iter(x)\n return True\n except TypeError:\n return False", "def isiterable(x):\n try:\n iter(x)\n except TypeError:\n return False\n return True", "def _validate_internal_value_list(self, value: Any):\n # Make sure the value is a list\n if not isinstance(value, list):\n raise TypeError(f\"Value must be a list\")\n\n # Make sure the list has the correct number of values in it\n length = len(value)\n if isinstance(self._nargs, int):\n if self._action == \"store\":\n if length != self._nargs:\n raise ValueError(f\"Requires list of length {self._nargs}\")\n else:\n if length == 0 or length % self._nargs != 0:\n raise ValueError(f\"Requires list of length a multiple of {self._nargs}\")\n elif self._nargs == \"+\":\n if length == 0:\n raise ValueError(f\"Requires at least one item\")\n\n if any(not isinstance(element, self._type) for element in value):\n raise TypeError(f\"All values must be of the declared type {self._type.__name__}\")", "def test_Validator_iter_errors_two_arguments(self):\n\n validator = validators.Draft7Validator({})\n with self.assertWarns(DeprecationWarning) as w:\n error, = validator.iter_errors(\"foo\", {\"type\": \"number\"})\n\n self.assertEqual(error.validator, \"type\")\n self.assertEqual(w.filename, __file__)\n self.assertTrue(\n str(w.warning).startswith(\n \"Passing a schema to Validator.iter_errors is deprecated \",\n ),\n )", "def test_neg_operate_append_items_not_a_list(self):\n key = (\"test\", \"demo\", \"list_key\")\n\n list = [\n {\"op\": aerospike.OP_LIST_APPEND_ITEMS, \"bin\": \"int_bin\", \"val\": 7},\n ]\n\n try:\n key, _, bins = self.as_connection.operate(key, list)\n except e.ParamError as exception:\n assert exception.code == -2", "def iterable(arg):\n return isinstance(arg, collections.Iterable) and not isinstance(arg, six.string_types)", "def only(it: Union[Iterator[_T], Iterable[_T]]) -> _T:\n if hasattr(it, \"__next__\"):\n # noinspection PyTypeHints\n iterator: Iterator[_T] = it # type: ignore\n else:\n iterator = iter(it)\n\n try:\n ret = next(iterator)\n except StopIteration:\n raise ValueError(\"Expected only a single element in an iterable, but got none\")\n\n second_element = next(iterator, _SENTINEL)\n if second_element != _SENTINEL:\n raise ValueError(\"Expected only a single element in iterable, but got at least 2\")\n return ret", "def test_generator_manual() -> None:\n reversed_int: List[int] = []\n\n generator = reverse([1, 2, 3])\n reversed_int.append(next(generator))\n reversed_int.append(next(generator))\n reversed_int.append(next(generator))\n\n with pytest.raises(StopIteration):\n next(generator)\n\n assert reversed_int == [3, 2, 1]", "def next(self, in_op):\n raise NotImplementedError", "def test_columns_list_element_error(self):\n\n with pytest.raises(ValueError):\n\n BaseTransformer(columns=[[], \"a\"])", "def is_iterable(value):\n # noinspection PyUnresolvedReferences\n return hasattr(value, '__iter__') and hasattr(value, '__getitem__')", "def test_list_size_one_no_even(self):\n argument = [1]\n with self.assertRaises(ValueError):\n find_an_even(argument)", "def test_list_no_even(self):\n argument = [1, 3, 5, 7]\n with self.assertRaises(ValueError):\n find_an_even(argument)", "def isiterable(x):\n try:\n iter(x)\n return True\n except TypeError:\n return False", "def py_raise(*xs):\n raise NotImplemented", "def test_is_iterable(self):\r\n msg_list = messages.MessageList()\r\n\r\n # Adds 3 Message objects to the list.\r\n msg_list.push(messages.StringMessage(\"a\"))\r\n msg_list.push(messages.StringMessage(\"b\"))\r\n msg_list.push(messages.StringMessage(\"c\"))\r\n\r\n self.assertEqual([\"ab\", \"bb\", \"cb\"], [x.msg + \"b\" for x in msg_list])", "def is_item_iterable(item):\n try:\n _ = [_ for _ in item]\n except TypeError:\n return False\n return True", "def test_foreach_in_non_first_position_raises_error(self):\n with pytest.raises(AssertionError) as exc_info:\n list(parser.generate_commands(yaml.load(\"\"\"\n - something\n - foreach: [A,B]\n \"\"\")))\n assert (\"'foreach' may only be specified at the beginning of a sequence\" in\n str(exc_info.value))", "def is_nonstring_iterable(x):\n if isinstance(x, primitive_iterable):\n return False\n return isinstance(x, collections.Iterable)", "def __iter__(self):\n raise NotImplementedError(\"__iter__\")", "def _next_exhausted(self):\n\n raise StopIteration() from None", "def test_iter_method(self):\n ref = mock.Mock()\n ref.side_effect = [{'rows': [1,2,3]}, {'rows': []}]\n rslt = Result(ref)\n collection = [x for x in rslt]\n self.assertEqual(collection, [1,2,3])\n\n run_iter = lambda x: [y for y in x]\n\n rslt = Result(ref, skip=1000)\n self.assertRaises(CloudantArgumentError, run_iter, rslt)\n\n rslt = Result(ref, limit=1000)\n self.assertRaises(CloudantArgumentError, run_iter, rslt)", "def test_lined_list_create_with_non_iterable():\n from linked_list import Linked_List\n new_linked_list = Linked_List(-100)\n assert new_linked_list.head.value == -100", "def test_iter_files_negative():\n with pytest.raises(ValueError):\n x = list(iter_files(\"wrong_path\", ignore_errors=False))\n assert len(x) > 0\n\n with pytest.raises(RuntimeError):\n x = list(iter_files(\"http://foobar.baz.nonexistent\", ignore_errors=False))\n assert len(x) > 0\n\n with pytest.raises(RuntimeError):\n x = list(iter_files(\"http://google.com/X\", ignore_errors=False))\n assert len(x) > 0", "def test_max_list_iter(self):\n tlist = None\n with self.assertRaises(ValueError): # used to check for exception\n max_list_iter(tlist)\n # test that a none list argument raises a ValueError\n with self.assertRaises(ValueError):\n max_list_iter(34)\n #test a max found at end of list\n self.assertEqual(max_list_iter([0, 1, 2, 5, 3, 7]), 7)\n #test a max found at start of list\n self.assertEqual(max_list_iter([12, 0, 3, 4, 4]), 12)\n #test a max found in middle of the list\n self.assertEqual(max_list_iter([0, 1, 3, 12, 4, 4]), 12)\n #test an empty list returns None\n self.assertEqual(max_list_iter([]), None)", "def test_creation_list():\n with pytest.raises(ValueError) as __:\n value = list()\n __ = param.Integer(value=value)", "def test_llist_no_parameter_negative(self):\n\n with pytest.raises(TypeError) as typeError:\n TestLList.llist_integer.add()\n\n assert \"Required argument 'value' (pos 1) not found\" in typeError.value", "def check_list(self,list_input):\n if not isinstance(list_input,list):\n raise ValueError('input is not in list type')\n for i in list_input:\n if isinstance(i,list) and len(i) != 0:\n for j in i:\n if not isinstance(j,(float,int)):\n print(j)\n raise ValueError('cannot convert')\n else:\n print(i)\n raise ValueError('wrong defined')", "def _list_like(self, value):\n return (not hasattr(value, \"strip\") and\n (hasattr(value, \"__getitem__\") or\n hasattr(value, \"__iter__\")))\n # return is_sequence(value) # use from pandas.core.common import is_sequence", "def check_iterable(value):\n try:\n iter(value)\n if not isinstance(value, six.string_types):\n return True\n else:\n return False\n except Exception as e:\n pass\n\n return False", "def test_initializing_deque_with_non_iterable_raises_error():\n from deque import Deque\n with pytest.raises(TypeError):\n new_deque = Deque(interable=123456)", "def test_imap_strange():\n assert list(imap(None, 'abc')) == [('a',), ('b',), ('c',)]", "def test_posteriors_unequal_lists(self):\r\n self.assertRaises(ValueError, posteriors, [1, 2, 3], [1])", "def _raise_if(predicate, *args):\n if predicate:\n raise InvalidChunk(*args)", "def __iter__(self):\n return iter(())", "def testIterator(self):\n # Use the iterator to convert storage table to a list.\n iter_rows = [r for r in self._table]\n self.assertSameElements(self._fake_rows, iter_rows)", "def testExplicitGeneratorConvenienceFunctionExceptionUsage(self):\n\t\tc = Controller()\n\t\tx = c.mock()\n\t\tc.generator(x.g(8, 9), [10], Exception(\"bogus\"))\n\t\tc.replay()\n\t\tg = x.g(8, 9)\n\t\tself.failUnless(g.next() == 10)\n\t\tself.failUnlessRaises(Exception, g.next)", "def zip_strict(*iterables) -> Iterator[Tuple[Any, ...]]:\n for values in itertools.zip_longest(*iterables, fillvalue=_NO_VALUE):\n if any(value is _NO_VALUE for value in values):\n msg = f'all iterables must have the same length'\n raise ValueError(msg)\n yield values", "def test_invoke_error():\n\n with pywren.invokers.LocalInvoker(\"/tmp/task\") as iv:\n\n wrenexec = pywren.local_executor(iv)\n\n with pywrenext.iterwren.IterExec(wrenexec) as IE:\n\n iter_futures = IE.map(except_func, 10, [2])\n print(\"mapped\")\n pywrenext.iterwren.wait_exec(IE)\n assert iter_futures[0].current_iter == 2", "def test_stop_iteration_in_generators_yield_from(\n assert_errors,\n parse_ast_tree,\n code,\n statement,\n exception,\n default_options,\n):\n tree = parse_ast_tree(code.format(statement, exception))\n\n visitor = FunctionDefinitionVisitor(default_options, tree=tree)\n visitor.run()\n\n assert_errors(visitor, [StopIterationInsideGeneratorViolation])", "def is_iterable(obj):\n try:\n itr = iter(obj)\n del itr\n return True\n except:\n return False", "def _iter_return(tup, fxn, invalid_error):\n\n try:\n val = fxn(*tup)\n except (IndexError, ValueError):\n if invalid_error:\n # Raise the exception if invalid_error indicates\n raise\n else:\n # Otherwise, just return a 'None' value\n return None\n ## end if\n else:\n # Good value; just generate it\n return val\n ## end try", "def get_only(seq: Iterable[T]) -> T:\n it = iter(seq)\n try:\n first_element = it.__next__()\n # we use the sentinel approach rather than the usual (evil) Python \"attempt can catch the\n # exception\" approach to avoid raising zillions of spurious exceptions on the expected\n # code path, which makes debugging a pain\n sentinel = object()\n second_element = next(it, sentinel)\n if second_element is sentinel:\n return first_element\n else:\n got_msg: str\n if isinstance(seq, Sized):\n got_msg = str_list_limited(seq, limit=10)\n else:\n got_msg = f\"{first_element!r}, {second_element!r}, and possibly more.\"\n raise ValueError(f\"Expected one item in sequence but got {got_msg}\")\n except StopIteration:\n raise ValueError(\"Expected one item in sequence but got none\")", "def convert_old_style_list(list_):\n if not isinstance(list_, (tuple, list)) or len(list_) != 2:\n return list_, False\n first_item, second_item = list_\n if second_item == []:\n return [first_item], True\n try:\n # see if second item is iterable\n iter(second_item)\n except TypeError:\n return list_, False\n old_style_list = True\n new_second_item = []\n for sublist in second_item:\n item, old_style_list = convert_old_style_list(sublist)\n if not old_style_list:\n break\n new_second_item.extend(item)\n if old_style_list:\n second_item = new_second_item\n return [first_item, second_item], old_style_list", "def test_max_list_iter_empty(self):\n tlist = []\n self.assertEqual(max_list_iter(tlist), None) #Tests output for an empty list", "def test_llist_get_element_negative(self):\n\n with pytest.raises(TypeError) as typeError:\n TestLList.llist_integer.get()" ]
[ "0.800803", "0.7905361", "0.77610767", "0.77083707", "0.77032125", "0.75699127", "0.7554352", "0.70976245", "0.70190036", "0.67172563", "0.6027455", "0.58875996", "0.5732145", "0.57207274", "0.57120436", "0.57120436", "0.5675813", "0.5638414", "0.5630779", "0.5580102", "0.55597013", "0.5530454", "0.55101407", "0.546797", "0.54459465", "0.5433452", "0.54111403", "0.5401874", "0.5352246", "0.5348218", "0.534085", "0.5335252", "0.5328757", "0.5326088", "0.5320112", "0.529609", "0.5287348", "0.52802086", "0.52791286", "0.5266933", "0.5261733", "0.5258054", "0.5253755", "0.52381885", "0.52148974", "0.520684", "0.51939124", "0.51882106", "0.5173691", "0.5172963", "0.51709306", "0.51607364", "0.51414645", "0.5135525", "0.5128399", "0.5126536", "0.51161975", "0.5108428", "0.51075655", "0.50931895", "0.50912243", "0.5086296", "0.50859874", "0.5073033", "0.5072405", "0.5068129", "0.505482", "0.5047196", "0.50450873", "0.50390315", "0.5036935", "0.5023412", "0.5004836", "0.5001656", "0.49770874", "0.4969064", "0.49636677", "0.4956055", "0.4945599", "0.49414876", "0.49397776", "0.49396896", "0.49331084", "0.49261275", "0.4925572", "0.49195114", "0.49179536", "0.4912367", "0.49120885", "0.49059662", "0.49046963", "0.49025124", "0.49010903", "0.48907095", "0.4889218", "0.48888996", "0.4887419", "0.4886383", "0.48831016", "0.48761037" ]
0.77557844
3
If an iterlist_op is given an iterator as input, no exception should be thrown, and we should return the wrapped function's output.
def test_iterlist_op_1(): @ops.iterlist_op def f(x): return [4, 5, 6] result = f(iter([1, 2, 3])) # Passing in an iterator, as expected assert(isinstance(result, list)), f"{result}" assert(result == [4, 5, 6])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_listiter_op_2():\n\n @ops.listiter_op\n def f(x):\n return iter([4, 5, 6])\n\n with pytest.raises(ValueError):\n f(iter([1, 2, 3])) # Passing in an iterator instead of a list", "def test_listiter_op_1():\n\n @ops.listiter_op\n def f(x):\n return iter([4, 5, 6])\n\n result = f([1, 2, 3]) # Passing in a list, as expected\n\n assert(isinstance(result, collections.abc.Iterator)), f\"{result}\"\n assert(list(result) == [4, 5, 6])", "def test_iterlist_op_3():\n\n @ops.iterlist_op\n def f(x):\n return iter([4, 5, 6]) # Returning an iterator instead of a list\n\n with pytest.raises(ValueError):\n result = f(iter([1, 2, 3]))", "def test_listiter_op_3():\n\n @ops.listiter_op\n def f(x):\n return [4, 5, 6] # Returning a list instead of an iterator\n\n with pytest.raises(ValueError):\n result = f([1, 2, 3])", "def test_iteriter_op_3():\n\n @ops.iteriter_op\n def f(x):\n return [4, 5, 6] # Returning a list instead of an iterator\n\n with pytest.raises(ValueError):\n result = f(iter([1, 2, 3]))", "def test_iteriter_op_1():\n\n @ops.iteriter_op\n def f(x):\n return iter([4, 5, 6])\n\n result = f(iter([1, 2, 3])) # Passing in an iterator, as expected\n\n assert(isinstance(result, collections.abc.Iterator)), f\"{result}\"\n assert(list(result) == [4, 5, 6])", "def test_iterlist_op_2():\n\n @ops.iterlist_op\n def f(x):\n return [4, 5, 6]\n\n with pytest.raises(ValueError):\n f([1, 2, 3]) # Passing in a list instead of an iterator", "def test_listlist_op_3():\n\n @ops.listlist_op\n def f(x):\n return iter([4, 5, 6]) # Returning an iterator instead of an list\n\n with pytest.raises(ValueError):\n result = f([1, 2, 3])", "def test_listlist_op_2():\n\n @ops.listlist_op\n def f(x):\n return [4, 5, 6]\n\n with pytest.raises(ValueError):\n f(iter([1, 2, 3])) # Passing in an iterator instead of an list", "def test_iteriter_op_2():\n\n @ops.iteriter_op\n def f(x):\n return iter([4, 5, 6])\n\n with pytest.raises(ValueError):\n f([1, 2, 3]) # Passing in a list instead of an iterator", "def chek_iter_obj(func):\n\n def wrapper(self, lst):\n try:\n iter(lst)\n return func(self, lst)\n except TypeError:\n raise TypeError(f\"{lst} isn't iterable object\")\n\n return wrapper", "def process_list(_func, iterator, *args, **kwargs):\n return [_func(i, *args, **kwargs) for i in iterator]", "def test_listlist_op_1():\n\n @ops.listlist_op\n def f(x):\n return [4, 5, 6]\n\n result = f([1, 2, 3]) # Passing in a list, as expected\n\n assert(isinstance(result, list)), f\"{result}\"\n assert(result == [4, 5, 6])", "def intercept(iterable, function):\n\n def intercepting(iterable_):\n for item in iterable_:\n function(item)\n yield item\n\n return intercepting(iterable)", "def safe_iterator(i):\n return i or []", "def toiter(x):\n if iterable(x):\n return x\n else:\n return [x]", "def builtin_iterable(func):\n if sys.version_info[:1] < (3,):\n @wraps(func)\n def inner(*args, **kwargs):\n return list(func(*args, **kwargs))\n return inner\n return func", "def iter(space, w_collection_or_callable, w_sentinel=None):\n if w_sentinel is None:\n return space.iter(w_collection_or_callable)\n else:\n return iter_sentinel(space, w_collection_or_callable, w_sentinel)", "def chained(func):\n def wrapper(*args, **kwargs):\n for xs in func(*args, **kwargs):\n for x in xs:\n yield x\n return wrapper", "def _operate_recursive(\n function: Callable[..., V], iterables: RecursiveIterable[V], result: RecursiveList[V]\n) -> RecursiveList[V]:\n for items in zip(*iterables): # type: ignore\n if any(isinstance(item, Iterable) for item in items): # pylint: disable=W1116\n sub_result = [] # type: ignore\n _operate_recursive(function, items, sub_result)\n else:\n sub_result = function(*items) # type: ignore\n result.append(sub_result)\n return result", "def Listor(fun):\n @functools.wraps(fun)\n def inside(*args, **kwargs):\n return list(fun(*args, **kwargs))\n return inside", "def ireduce(f, it):\n acc = it.next()\n yield acc\n for x in it:\n acc = f(acc, x)\n yield acc", "def wrap_generator(generator, wrapper_function):\n for item in generator:\n yield wrapper_function(item)", "def test_iter_method(self):\n ref = mock.Mock()\n ref.side_effect = [{'rows': [1,2,3]}, {'rows': []}]\n rslt = Result(ref)\n collection = [x for x in rslt]\n self.assertEqual(collection, [1,2,3])\n\n run_iter = lambda x: [y for y in x]\n\n rslt = Result(ref, skip=1000)\n self.assertRaises(CloudantArgumentError, run_iter, rslt)\n\n rslt = Result(ref, limit=1000)\n self.assertRaises(CloudantArgumentError, run_iter, rslt)", "def cotakewhile(function, iterator):\n results = []\n\n def checkTake(shouldTake, item):\n if shouldTake == True:\n results.append(item)\n return item\n\n def dotake(item):\n d = maybeDeferred(function, item)\n d.addCallback(checkTake, item)\n return d\n\n def dostop(takeResult):\n return takeResult is None\n\n cfc = _CoFunCaller(resultCollector=dotake, stopFunction=dostop)\n return cfc.coiterate(iterator).addCallback(lambda _: results)", "def mapf(f: Callable[[D_], R_], C: Iterable[D_]) -> Iterator[R_]:\n return (f(x) for x in C)", "def generator(func):\n\n @fn\n @wraps(func)\n def gen(*args, **kwargs):\n return Iter(func(*args, **kwargs))\n\n return gen", "def _wrapper(func, args):\n return func(*args)", "def __call__(self, func, *args, **kwargs):\n\n @wraps(func) # To keep its own namespace\n def wrapper(*args, **kwargs):\n gener = self.__iter__()\n return func(gener, *args, **kwargs)\n return wrapper", "def operate_recursive(function: Callable[..., V], *iterables: RecursiveIterable[V]) -> RecursiveList[V]:\n return _operate_recursive(function, iterables, [])", "def yield_wrapped_ops(\n self,\n fn: Union[\n Callable,\n Tuple[Union[str, Collection[str]], Union[Callable, Collection[Callable]]],\n ],\n exclude=(),\n domain: Union[str, int, Collection] = None,\n ) -> Iterable[FnOp]:\n if isinstance(fn, tuple):\n name_path, fn_path = fn\n else:\n name_path, fn_path = (), fn\n\n fun_path = cast(Tuple[Callable, ...], astuple(fn_path, None))\n fun = fun_path[-1]\n\n if isinstance(fun, Operation):\n ## pass-through operations\n yield fun\n return\n\n def param_to_modifier(name: str, param: inspect.Parameter) -> str:\n return (\n optional(name)\n # is optional?\n if param.default is not inspect._empty # type: ignore\n else keyword(name)\n if param.kind == Parameter.KEYWORD_ONLY\n else name\n )\n\n given_name_path = astuple(name_path, None)\n\n decors_by_name = get_autograph_decors(fun, {}, domain or self.domain)\n\n for decor_name, decors in decors_by_name.items() or ((None, {}),):\n if given_name_path and not decor_name:\n name_path = decor_path = given_name_path\n else: # Name in decors was \"default\"(None).\n name_path = decor_path = astuple(\n (decor_name if decor_name else func_name(fun, fqdn=1)).split(\".\"),\n None,\n )\n assert decor_path, locals()\n\n if given_name_path:\n # Overlay `decor_path` over `named_path`, right-aligned.\n name_path = tuple(*name_path[: -len(decor_path)], *decor_path)\n\n fn_name = str(name_path[-1])\n if fn_name in exclude:\n continue\n overrides = self._from_overrides(decor_path)\n\n # TODO: support an extra overrides source, in ``wrap_funcs()``.\n op_data = (\n ChainMap(overrides, decors)\n if (overrides and decors)\n else overrides\n if overrides\n else decors\n )\n if op_data:\n log.debug(\"Autograph overrides for %r: %s\", name_path, op_data)\n\n op_props = \"needs provides renames, inp_sideffects out_sideffects\".split()\n needs, provides, override_renames, inp_sideffects, out_sideffects = (\n op_data.get(a, UNSET) for a in op_props\n )\n\n if needs is UNSET:\n needs = [...]\n needs = aslist(needs, \"needs\")\n if ... in needs:\n sig = inspect.signature(fun)\n fun_needs = [\n param_to_modifier(name, param)\n for name, param in sig.parameters.items()\n if name != \"self\" and param.kind is not Parameter.VAR_KEYWORD\n ]\n ## Insert object as 1st need for object-methods.\n #\n if len(fun_path) > 1:\n clazz = fun_path[-2]\n # TODO: respect autograph decorator for object-names.\n class_name = name_path[-2] if len(name_path) > 1 else clazz.__name__\n if is_regular_class(class_name, clazz):\n log.debug(\"Object-method %s.%s\", class_name, fn_name)\n fun_needs.insert(0, camel_2_snake_case(class_name))\n\n needs = [\n fneed if n is ... else n\n for n, fneed in itt.zip_longest(needs, fun_needs, fillvalue=...)\n ]\n\n if provides is UNSET:\n if is_regular_class(fn_name, fun):\n ## Convert class-name into object variable.\n provides = camel_2_snake_case(fn_name)\n elif self.out_patterns:\n provides = self._deduce_provides_from_fn_name(fn_name) or UNSET\n if provides is UNSET:\n provides = ()\n provides = aslist(provides, \"provides\")\n\n needs, provides = self._apply_renames(\n (override_renames, self.renames), (needs, provides)\n )\n\n if inp_sideffects is not UNSET:\n needs.extend(\n (\n i\n if is_sfx(i)\n else sfxed(*i)\n if isinstance(i, tuple)\n else token(i)\n )\n for i in aslist(inp_sideffects, \"inp_sideffects\")\n )\n\n if out_sideffects is not UNSET:\n provides.extend(\n (\n i\n if is_sfx(i)\n else sfxed(*i)\n if isinstance(i, tuple)\n else token(i)\n )\n for i in aslist(out_sideffects, \"out_sideffects\")\n )\n\n if self.full_path_names:\n fn_name = self._join_path_names(*name_path)\n\n op_kws = self._collect_rest_op_args(decors)\n\n yield FnOp(fn=fun, name=fn_name, needs=needs, provides=provides, **op_kws)", "def pipeline(func):\n @wraps(func)\n def process(img_or_iterable, *args, **kwargs):\n if isinstance(img_or_iterable, (SliceableIterable, FramesSequence)):\n _len = len(img_or_iterable)\n s = SliceableIterable(img_or_iterable, range(_len), _len)\n s._proc_func = lambda image: func(image, *args, **kwargs)\n return s\n else:\n # Fall back on normal behavior of func, interpreting input\n # as a single image.\n return func(img_or_iterable)\n\n if process.__doc__ is None:\n process.__doc__ = ''\n process.__doc__ = (\"This function has been made pims-aware. When passed\\n\"\n \"a pims reader or SliceableIterable, it will return a \\n\"\n \"new SliceableIterable of the results. When passed \\n\"\n \"other objects, its behavior is \"\n \"unchanged.\\n\\n\") + process.__doc__\n return process", "def threadsafe(f):\n def g(*a, **kw):\n return threadsafe_iter(f(*a, **kw))\n return g", "def __iter__(self):\n return iter(())", "def runner(func, iterable, arguments, local=False):\n if local:\n return [func(i, *arguments) for i in iterable]\n else:\n if iterable:\n return group(func.s(i, *arguments) for i in iterable)().get()\n else:\n # group()() returns None if group is called with no arguments,\n # leading to an AttributeError with get().\n return []", "def scanl(func, start, itr):\n if not callable(func):\n raise TypeError(\"First argument to scanl must be callable\")\n itr = iter(itr)\n\n return _scanl(func, start, itr)", "def _iterator_codegen(resty):\n\n def codegen(context, builder, sig, args):\n [d] = args\n [td] = sig.args\n iterhelper = context.make_helper(builder, resty)\n iterhelper.parent = d\n iterhelper.state = iterhelper.state.type(None)\n return impl_ret_borrowed(\n context,\n builder,\n resty,\n iterhelper._getvalue(),\n )\n\n return codegen", "def test_invoke_error_map():\n\n with pywren.invokers.LocalInvoker(\"/tmp/task\") as iv:\n\n wrenexec = pywren.local_executor(iv)\n\n with pywrenext.iterwren.IterExec(wrenexec) as IE:\n\n iter_futures = IE.map(except_func, 10, [12, 3, 5, 20])\n print(\"mapped\")\n pywrenext.iterwren.wait_exec(IE)\n all_final_iters = [f.current_iter for f in iter_futures]\n print(all_final_iters)\n assert all_final_iters == [10, 3, 5, 10]", "def generator_wrapper(iterable):\n\n num_items = len(iterable)\n for idx in range(num_items):\n yield iterable[idx]", "def next(self, in_op):\n raise NotImplementedError", "def interleave(inter, f, seq):\n seq = iter(seq)\n try:\n f(next(seq))\n except StopIteration:\n pass\n else:\n for x in seq:\n inter()\n f(x)", "def map(self, callable, iterable):\n iterable = executor.get_actual_value(iterable)\n return super(Executor, self).map(callable, iterable)", "def lift(f: Callable[..., Data]) -> LiftedFunc:\n def inner(*args: Result) -> Result:\n out = []\n for args1 in itertools.product(*args):\n val = f(*args1)\n out.append(val)\n return out\n return inner", "def test_invoke_error():\n\n with pywren.invokers.LocalInvoker(\"/tmp/task\") as iv:\n\n wrenexec = pywren.local_executor(iv)\n\n with pywrenext.iterwren.IterExec(wrenexec) as IE:\n\n iter_futures = IE.map(except_func, 10, [2])\n print(\"mapped\")\n pywrenext.iterwren.wait_exec(IE)\n assert iter_futures[0].current_iter == 2", "def __call__(self, iterable):\n if self._ordered:\n imap = self._distrubtor.imap\n else:\n imap = self._distrubtor.imap_unordered\n\n for result in imap(iterable):\n yield result", "def flatmap(iterable, function_to_list):\n for element in iterable:\n list_block = function_to_list(element)\n for result_value in list_block:\n yield result_value", "def for_each(f: Callable[[A], Maybe[B]], iterable: Iterable[A]\n ) -> Maybe[Iterable[B]]:\n return cast(Maybe[Iterable[B]], map_m_(Just, f, iterable))", "def __iter__(self):\r\n return self._iterate()", "def custom_filter(some_func, iterator_list):\n\n local_iterator = from_input_to_list(iterator_list)\n func_map = [some_func(i) for i in local_iterator]\n true_list = [j for j in func_map if j > 100] # here we can hardcode any condition\n\n return true_list", "def on_operation(\n self,\n ) -> AsyncIteratorOrIterator[None]: # pragma: no cover # pyright: ignore\n yield None", "def comap(function, iterator):\n results = []\n cfc = _CoFunCaller(function, resultCollector=results.append)\n d = cfc.coiterate(iterator)\n d.addCallback(lambda _: results)\n return d", "def mkiter(item):\n # FIXME: don't really need to construct a list\n if item is None:\n return iter(())\n elif isIterable(item):\n return iter(item)\n else:\n return iter([item])", "def mapg(f: Callable[[D_], R_], C: Iterable[D_]) -> Iterator[R_]:\n for x in C:\n yield f(x)", "def wrapped_func(ret_val, *args, **kwargs):\n val = func(*args, **kwargs)\n ret_val.append(val)", "async def anext(iterator):\n return await iterator.__anext__()", "async def anext(iterator):\n return await iterator.__anext__()", "def flatmap2(func, *iterable) -> Iterator:\n return map(func, chain(*chain(*chain(*iterable))))", "def iter_except(function, exception):\r\n try:\r\n while True:\r\n yield function()\r\n except exception:\r\n return", "def test_unwrap_or_else(\n self, start: Result[int, int], fn: t.Callable[[int], int], exp: int\n ) -> None:\n assert start.unwrap_or_else(fn) == exp", "def with_iter(contextmanager):\n with contextmanager as iterable:\n for item in iterable:\n yield item", "def listify(fn=None, wrapper=list):\n\n def listify_return(fn):\n @functools.wraps(fn)\n def listify_helper(*args, **kw):\n return wrapper(fn(*args, **kw))\n\n return listify_helper\n\n if fn is None:\n return listify_return\n return listify_return(fn)", "def next(space, w_iterator, w_default=None):\n try:\n return space.next(w_iterator)\n except OperationError as e:\n if w_default is not None and e.match(space, space.w_StopIteration):\n return w_default\n raise", "def foldl(func, start, itr):\n return _foldl(func, start, iter(itr))", "def wrapit(fn):\n def inside(dummy, *args):\n try:\n return fn(*args)\n except Exception as e:\n print(\"Error in XSLT extension: %s\" % e)\n raise\n return inside", "def list_generalizer(f):\n @functools.wraps(f)\n def wrapped(data, *args, **kwargs):\n if type(data) == list:\n return [f(d, *args, **kwargs) for d in data]\n else:\n return f(data, *args, **kwargs)\n\n return wrapped", "def flatmap(func, *iterable) -> Iterator:\n return map(func, chain(*chain(*iterable)))", "def map(iterable, function):\n for x in iterable:\n yield function(x)", "def wrap_generator(func):\n\n async def _wrapped(*a, **k):\n r, ret = None, []\n gen = func(*a, **k)\n while True:\n try:\n item = gen.send(r)\n except StopIteration:\n break\n if inspect.isawaitable(item):\n r = await item\n else:\n r = item\n ret.append(r)\n\n if len(ret) == 1:\n return ret.pop()\n return ret\n\n return _wrapped", "def iter_except(function, exception):\n try:\n while True:\n yield function()\n except exception:\n return", "def threadsafe_generator(f):\n\n def g(*a, **kw):\n return ThreadsafeIter(f(*a, **kw))\n\n return g", "def map(function, iterable):\n\n return [function(x) for x in iterable]", "def chained_operation(function):\n\n @wraps(function)\n def wrapper(*args, **kwargs):\n try:\n return function(*args, **kwargs)\n except DistributedOperationException as e:\n operation = f\"{function.__module__}.{function.__name__}\"\n raise DistributedOperationException(\n f\"Error found while calling `{operation}`. Please see the earlier error for more details.\"\n ) from e\n\n return wrapper", "def cofilter(function, iterator):\n results = []\n\n def checkFilter(notfiltered, item):\n if notfiltered == True:\n results.append(item)\n\n def dofilter(item):\n d = maybeDeferred(function, item)\n d.addCallback(checkFilter, item)\n return d\n\n d = _CoFunCaller(resultCollector=dofilter).coiterate(iterator)\n d.addCallback(lambda _: results)\n return d", "def threadsafe_generator(f):\n\tdef g(*a, **kw):\n\t\treturn threadsafe_iter(f(*a, **kw))\n\treturn g", "def testIterWithException(self):\n\t\tc = Controller()\n\t\tx = c.mock()\n\t\tx.__iter__()\n\t\tc.setReturn(1)\n\t\tc.setException(Exception)\n\t\tc.replay()\n\t\ti = x.__iter__()\n\t\tself.failUnless(i.next() == 1)\n\t\tself.failUnlessRaises(Exception, i.next)", "def wrapper(*args, **kwargs):\n return func(*args, **kwargs)", "def wrapper(*args, **kwargs):\n return func(*args, **kwargs)", "def assert_is_iter(ext):\n if not parallel.is_iter(ext):\n ext = [ext]\n return ext", "def consumer(func):\n\n from functools import wraps\n\n @wraps(func)\n def wrapper(*args,**kw):\n gen = func(*args, **kw)\n gen.next()\n return gen\n return wrapper", "def wrapped_f(*args):\n input_docs = func(*args)\n output_doc_cnt = 0\n # split input_docs into chunks of size self.batch_size\n for batchiter in iter_n(input_docs, int(self.batch_size / len(self.input_types))):\n output_docs = self.key_lookup_batch(batchiter)\n for odoc in output_docs:\n # print debug information if the original id is the in the debug list\n if \"dt_debug\" in odoc:\n if isinstance(self.debug, list) and odoc[\"dt_debug\"][\"orig_id\"] in self.debug:\n self.logger.debug(\"DataTransform Debug doc['dt_debug']: {}\".format(odoc[\"dt_debug\"]))\n output_doc_cnt += 1\n yield odoc\n self.logger.info(\"wrapped_f Num. output_docs: {}\".format(output_doc_cnt))\n self.logger.info(\"DataTransform.histogram: {}\".format(self.histogram))", "def http_error_tolerant_generator(f):\n @wraps(f)\n def inner(*args, **kwargs):\n try:\n for i in f(*args, **kwargs):\n yield i\n except HttpError as exc:\n logging.exception(\"Unhandled HttpError: %s\" % exc)\n raise StopIteration\n return inner", "def getIter(object):\n iterator = None\n try:\n iterator = iter(object)\n except TypeError:\n pass\n return iterator", "def iter_except(func, exception):\n try:\n while True:\n yield func()\n except exception:\n pass", "def PeekIterable(iterable):\n try:\n head_element = iterable.next()\n new_iterable = itertools.chain([head_element], iterable)\n return head_element, new_iterable\n except StopIteration:\n return None, iterable", "def make_iterable(arg):\n return arg if is_iterable(arg) else (arg,)", "def __iter__(self):\n return iter(self.wrappers)", "def _shifter(iterable, offset):\n if not offset:\n return iterable\n return [i + offset for i in iterable]", "def wrap_call(*args, return_idx=0):\n return args[return_idx]", "def threadsafe_generator(f):\n def g(*a, **kw):\n return threadsafe_iter(f(*a, **kw))\n return g", "def threadsafe_generator(f):\n def g(*a, **kw):\n return threadsafe_iter(f(*a, **kw))\n return g", "def threadsafe_generator(f):\n def g(*a, **kw):\n return threadsafe_iter(f(*a, **kw))\n return g", "def threadsafe_generator(f):\n def g(*a, **kw):\n return threadsafe_iter(f(*a, **kw))\n return g", "def threadsafe_generator(f):\n def g(*a, **kw):\n return threadsafe_iter(f(*a, **kw))\n return g", "def threadsafe_generator(f):\n def g(*a, **kw):\n return threadsafe_iter(f(*a, **kw))\n return g", "def threadsafe_generator(f):\n def g(*a, **kw):\n return threadsafe_iter(f(*a, **kw))\n return g", "def threadsafe_generator(f):\n def g(*a, **kw):\n return threadsafe_iter(f(*a, **kw))\n\n return g", "def visit_ListComp(self, node):\n try:\n (generator,) = node.generators\n except ValueError:\n raise NotImplementedError(\"Only single loop comprehensions are allowed\")\n\n names = find_names(generator.target)\n argslist = [ast.arg(arg=name.id, annotation=None) for name in names]\n if len(names) <= 1:\n signature = ast.arguments(\n args=argslist,\n vararg=None,\n kwonlyargs=[],\n kw_defaults=[],\n kwarg=None,\n defaults=[],\n )\n else:\n signature = ast.List(elts=argslist, ctx=ast.Load())\n\n array = generator.iter\n lam_sig = functools.partial(ast.Lambda, args=signature)\n\n filters = generator.ifs\n if filters:\n filt = ast.BoolOp(op=ast.And(), values=filters)\n # array.filter\n method = ast.Attribute(value=array, attr=\"filter\", ctx=ast.Load())\n # array.filter(func)\n array = ast.Call(func=method, args=[lam_sig(body=filt)], keywords=[])\n\n method = ast.Attribute(value=array, attr=\"map\", ctx=ast.Load())\n mapped = ast.Call(func=method, args=[lam_sig(body=node.elt)], keywords=[])\n result = self.visit(mapped)\n return result", "def __iter__(self):\n return self.ListIterator(self.first)", "def imap_c(func):\n return functools.partial(imap, func)", "def _call_or_ret(self, item, *args):\n if callable(item):\n return item(*args)\n return item" ]
[ "0.76466507", "0.76008874", "0.7570353", "0.7508546", "0.74234086", "0.7396815", "0.7382202", "0.73407215", "0.7291801", "0.72297966", "0.6824224", "0.666192", "0.6128793", "0.60040385", "0.5923186", "0.58794075", "0.58676404", "0.57020146", "0.56718487", "0.5595165", "0.5567609", "0.5557475", "0.55147606", "0.5493095", "0.5481294", "0.54799074", "0.5437992", "0.5430538", "0.5417626", "0.53754437", "0.5374055", "0.5325826", "0.53027433", "0.52974933", "0.52934116", "0.52850735", "0.5265419", "0.5258489", "0.52580535", "0.52571523", "0.52566254", "0.52507293", "0.5229665", "0.5224327", "0.5190938", "0.51908785", "0.51891106", "0.51830447", "0.51743865", "0.5165853", "0.51656103", "0.5153795", "0.5144202", "0.5107201", "0.50870043", "0.50870043", "0.50802433", "0.50736254", "0.50732636", "0.5062559", "0.5058222", "0.50578076", "0.50416744", "0.5037482", "0.5015727", "0.5009421", "0.5006608", "0.500496", "0.4997559", "0.49945012", "0.49887952", "0.49626926", "0.49614212", "0.4960751", "0.49591509", "0.49481028", "0.49481028", "0.4931525", "0.4926924", "0.4921052", "0.4920642", "0.49194565", "0.49160406", "0.49048814", "0.49041018", "0.48946986", "0.48923767", "0.48873106", "0.48852935", "0.48852935", "0.48852935", "0.48852935", "0.48852935", "0.48852935", "0.48852935", "0.48839286", "0.48828185", "0.48791268", "0.48790443", "0.48761386" ]
0.76173055
1
If an iterlist_op is given something besides an iterator as input, raise a ValueError.
def test_iterlist_op_2(): @ops.iterlist_op def f(x): return [4, 5, 6] with pytest.raises(ValueError): f([1, 2, 3]) # Passing in a list instead of an iterator
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_listiter_op_2():\n\n @ops.listiter_op\n def f(x):\n return iter([4, 5, 6])\n\n with pytest.raises(ValueError):\n f(iter([1, 2, 3])) # Passing in an iterator instead of a list", "def test_iterlist_op_3():\n\n @ops.iterlist_op\n def f(x):\n return iter([4, 5, 6]) # Returning an iterator instead of a list\n\n with pytest.raises(ValueError):\n result = f(iter([1, 2, 3]))", "def test_listlist_op_2():\n\n @ops.listlist_op\n def f(x):\n return [4, 5, 6]\n\n with pytest.raises(ValueError):\n f(iter([1, 2, 3])) # Passing in an iterator instead of an list", "def test_iteriter_op_2():\n\n @ops.iteriter_op\n def f(x):\n return iter([4, 5, 6])\n\n with pytest.raises(ValueError):\n f([1, 2, 3]) # Passing in a list instead of an iterator", "def test_listlist_op_3():\n\n @ops.listlist_op\n def f(x):\n return iter([4, 5, 6]) # Returning an iterator instead of an list\n\n with pytest.raises(ValueError):\n result = f([1, 2, 3])", "def test_listiter_op_3():\n\n @ops.listiter_op\n def f(x):\n return [4, 5, 6] # Returning a list instead of an iterator\n\n with pytest.raises(ValueError):\n result = f([1, 2, 3])", "def test_iteriter_op_3():\n\n @ops.iteriter_op\n def f(x):\n return [4, 5, 6] # Returning a list instead of an iterator\n\n with pytest.raises(ValueError):\n result = f(iter([1, 2, 3]))", "def test_iterlist_op_1():\n\n @ops.iterlist_op\n def f(x):\n return [4, 5, 6]\n\n result = f(iter([1, 2, 3])) # Passing in an iterator, as expected\n\n assert(isinstance(result, list)), f\"{result}\"\n assert(result == [4, 5, 6])", "def test_listiter_op_1():\n\n @ops.listiter_op\n def f(x):\n return iter([4, 5, 6])\n\n result = f([1, 2, 3]) # Passing in a list, as expected\n\n assert(isinstance(result, collections.abc.Iterator)), f\"{result}\"\n assert(list(result) == [4, 5, 6])", "def test_iteriter_op_1():\n\n @ops.iteriter_op\n def f(x):\n return iter([4, 5, 6])\n\n result = f(iter([1, 2, 3])) # Passing in an iterator, as expected\n\n assert(isinstance(result, collections.abc.Iterator)), f\"{result}\"\n assert(list(result) == [4, 5, 6])", "def _data_validation(data):\n if isinstance(data, (list, tuple, type(None))) is not True:\n raise ValueError(f\"data must be tuple, list, or None, \"\n f\"data type is '{type(data).__name__}'. \"\n f\"Iterable data cannot be empty.\")", "def _is_iterable(value):\n return isinstance(value, list) or isinstance(value, tuple)", "def _assert_valid_value_and_cast(self, value):\n if not hasattr(value, '__iter__'):\n raise AssertionError(f\"{value} is not a valid iterable type\")\n return value", "def test_if_it_accepts_lists(self):\n with self.assertRaises(TypeError):\n prime_numbers([])", "def test_neg_operate_list_invalid_requests(self, list):\n key = (\"test\", \"demo\", \"list_key\")\n try:\n key, _, _ = self.as_connection.operate(key, list)\n except e.OpNotApplicable as exception:\n assert exception.code == 26", "def is_listing(op):\n return isinstance(op, (list, tuple))", "def test_list_type(self):\n\n expected = TypeError\n input_ = []\n with self.assertRaises(expected):\n math.factorial(input_)", "def _is_iterable(items):\n return isinstance(items, (list, tuple, set, np.ndarray))", "def test_empty_list_error(self):\n with self.assertRaises(ValueError):\n function_inclusion_filter_builder([])", "def test_generator_without_iterable_len(self):\n with self.assertRaises(ValueError):\n next(chunk_tasks(iter([]), n_splits=1))", "def test_if_it_accepts_lists(self):\n with self.assertRaises(TypeError):\n prime_numbers(56.58)", "def test_list_increment_with_incorrect_value_type(self):\n key = (\"test\", \"demo\", \"list_key\")\n list = [{\"op\": aerospike.OP_LIST_INCREMENT, \"index\": 2, \"bin\": \"int_bin\", \"val\": \"twenty\"}]\n\n with pytest.raises(e.AerospikeError):\n self.as_connection.operate(key, list)", "def test_generator_input_with_no_iterable_len_raises(self):\n for chunk_size, n_splits, n_jobs in product([None, 1, 3], [None, 1, 3], [None, 1, 3]):\n with self.subTest(chunk_size=chunk_size, n_splits=n_splits, n_jobs=n_jobs), self.assertRaises(ValueError):\n get_n_chunks(iter(self.test_data), iterable_len=None, chunk_size=chunk_size, n_splits=n_splits,\n n_jobs=n_jobs)", "def safe_iterator(i):\n return i or []", "def test_empty_list(self):\n argument = []\n with self.assertRaises(ValueError):\n find_an_even(argument)", "def test_raises_typeerror_if_arg_not_list(self):\n def result():\n return num_islands({})\n\n self.assertRaises(TypeError, result)", "def test_max_list_iter(self):\n tlist = None\n with self.assertRaises(ValueError): # used to check for exception\n max_list_iter(tlist)", "def test_max_list_iter(self):\n tlist = None\n with self.assertRaises(ValueError): # used to check for exception\n max_list_iter(tlist)", "def assert_is_iter(ext):\n if not parallel.is_iter(ext):\n ext = [ext]\n return ext", "def test_listlist_op_1():\n\n @ops.listlist_op\n def f(x):\n return [4, 5, 6]\n\n result = f([1, 2, 3]) # Passing in a list, as expected\n\n assert(isinstance(result, list)), f\"{result}\"\n assert(result == [4, 5, 6])", "def is_iterable(thing):\n\n try:\n iter(thing)\n except TypeError:\n return False\n return True", "def iterable(arg):\n return isinstance(arg, collections.Iterable) and not isinstance(arg, six.string_types)", "def test_list_increment_with_missing_index(self):\n key = (\"test\", \"demo\", \"list_key\")\n list = [{\"op\": aerospike.OP_LIST_INCREMENT, \"bin\": \"int_bin\", \"val\": 20}]\n\n with pytest.raises(e.ParamError):\n self.as_connection.operate(key, list)", "def isiterable(x):\n try:\n iter(x)\n except TypeError:\n return False\n return True", "def chek_iter_obj(func):\n\n def wrapper(self, lst):\n try:\n iter(lst)\n return func(self, lst)\n except TypeError:\n raise TypeError(f\"{lst} isn't iterable object\")\n\n return wrapper", "def _ensure_iterable(x):\n if isinstance(x[0], Iterable):\n if len(x) > 1:\n raise TypeError(\"Either Iterable or variable argument list expected\")\n return x[0]\n else:\n return x", "def test_list_increment_with_missing_value(self):\n key = (\"test\", \"demo\", \"list_key\")\n list = [\n {\n \"op\": aerospike.OP_LIST_INCREMENT,\n \"bin\": \"int_bin\",\n \"index\": 2,\n }\n ]\n\n with pytest.raises(e.ParamError):\n self.as_connection.operate(key, list)", "def test_make_np_iterable_type_error():\n with pytest.raises(TypeError):\n _ = uc._make_np_iterable(\"s\")", "def isIterable(value):\n try:\n iter(value)\n return True\n except:\n return False", "def test_raises_typeerror_if_rows_not_list(self):\n def result():\n return num_islands([{}, \"test\", 123])\n\n self.assertRaises(TypeError, result)", "def test_neg_operate_append_items_not_a_list(self):\n key = (\"test\", \"demo\", \"list_key\")\n\n list = [\n {\"op\": aerospike.OP_LIST_APPEND_ITEMS, \"bin\": \"int_bin\", \"val\": 7},\n ]\n\n try:\n key, _, bins = self.as_connection.operate(key, list)\n except e.ParamError as exception:\n assert exception.code == -2", "def test_wrong_input_on_creation(self):\r\n\r\n self.assertRaises(TypeError, TypedListType, None)", "def isiterable(x):\n try:\n iter(x)\n return True\n except TypeError:\n return False", "def _raise_if(predicate, *args):\n if predicate:\n raise InvalidChunk(*args)", "def list_typecheck(val, name, msg):\n if type(val) != Pair and val != Nil():\n raise SnekEvaluationError(name + \" error: \" + msg)", "def _validate_internal_value_list(self, value: Any):\n # Make sure the value is a list\n if not isinstance(value, list):\n raise TypeError(f\"Value must be a list\")\n\n # Make sure the list has the correct number of values in it\n length = len(value)\n if isinstance(self._nargs, int):\n if self._action == \"store\":\n if length != self._nargs:\n raise ValueError(f\"Requires list of length {self._nargs}\")\n else:\n if length == 0 or length % self._nargs != 0:\n raise ValueError(f\"Requires list of length a multiple of {self._nargs}\")\n elif self._nargs == \"+\":\n if length == 0:\n raise ValueError(f\"Requires at least one item\")\n\n if any(not isinstance(element, self._type) for element in value):\n raise TypeError(f\"All values must be of the declared type {self._type.__name__}\")", "def _is_iterable_non_string(arg):\n return (hasattr(arg, \"__iter__\") or hasattr(arg, \"__getattr__\")) and not isinstance(arg, str)", "def is_iterable(obj):\n return isinstance(obj, (list, tuple, types.GeneratorType)) or \\\n (not isinstance(obj, (int, str, dict)) and\n bool(getattr(obj, \"next\", False)))", "def is_iterable(arg):\n return (\n isinstance(arg, collections.Iterable)\n and not isinstance(arg, str)\n )", "def test_missing_generic_args(self):\n import System\n #TODO specify clearly which exception is appropriate here\n self.assertRaises(Exception, System.Collections.Generic.List)", "def iterable(x):\n try:\n iter(x)\n return True\n except TypeError:\n return False", "def test_iter(\n self, start: Result[int, int], exp: t.Tuple[int, ...]\n ) -> None:\n assert tuple(start.iter()) == exp", "def test_list_increment_with_missing_bin(self):\n key = (\"test\", \"demo\", \"list_key\")\n list = [{\"op\": aerospike.OP_LIST_INCREMENT, \"index\": 2, \"val\": 20}]\n\n with pytest.raises(e.ParamError):\n self.as_connection.operate(key, list)", "def is_nonstring_iterable(x):\n if isinstance(x, primitive_iterable):\n return False\n return isinstance(x, collections.Iterable)", "def test_list_no_even_same(self):\n argument = [1, 3, 3, 7]\n with self.assertRaises(ValueError):\n find_an_even(argument)", "def test_list_size_one_no_even(self):\n argument = [1]\n with self.assertRaises(ValueError):\n find_an_even(argument)", "def next(self, in_op):\n raise NotImplementedError", "def is_item_iterable(item):\n try:\n _ = [_ for _ in item]\n except TypeError:\n return False\n return True", "def test_initializing_deque_with_non_iterable_raises_error():\n from deque import Deque\n with pytest.raises(TypeError):\n new_deque = Deque(interable=123456)", "def test_list_no_even(self):\n argument = [1, 3, 5, 7]\n with self.assertRaises(ValueError):\n find_an_even(argument)", "def py_raise(*xs):\n raise NotImplemented", "def _check_iterable(self):\n if self.theoretical_size is None:\n raise TypeError(\"This `fixture_ref` has not yet been initialized, so it cannot be unpacked/iterated upon. \"\n \"This is not supposed to happen when a `fixture_ref` is used correctly, i.e. as an item in\"\n \" the `argvalues` of a `@parametrize` decorator. Please check the documentation for \"\n \"details.\")\n if self.theoretical_size == 1:\n raise TypeError(\"This fixture_ref does not represent a tuple of arguments, it is not iterable\")", "def is_iterable(element):\n return isinstance(element, (set, list, tuple))", "def test_Validator_iter_errors_two_arguments(self):\n\n validator = validators.Draft7Validator({})\n with self.assertWarns(DeprecationWarning) as w:\n error, = validator.iter_errors(\"foo\", {\"type\": \"number\"})\n\n self.assertEqual(error.validator, \"type\")\n self.assertEqual(w.filename, __file__)\n self.assertTrue(\n str(w.warning).startswith(\n \"Passing a schema to Validator.iter_errors is deprecated \",\n ),\n )", "def test_llist_no_parameter_negative(self):\n\n with pytest.raises(TypeError) as typeError:\n TestLList.llist_integer.add()\n\n assert \"Required argument 'value' (pos 1) not found\" in typeError.value", "def is_iterable(value):\n # noinspection PyUnresolvedReferences\n return hasattr(value, '__iter__') and hasattr(value, '__getitem__')", "def test_assert_iterator(self):\n iterator = iter([1,2,3,4])\n # Should pass\n self.assert_iterator(iterator,\n count=4,\n assert_item_function=lambda i: i>0)", "def test_invalid_op_inputs_with_wrong_types(self, data, description):\n with self.assertRaises(TypeError, msg=description):\n tfx.get_op(data, tf.Graph())", "def _check_args(self, args):\n if not isinstance(args, list) or not len(args) >= 2:\n raise FunctionArgumentException(\"Argument of attribute getter \"\n \"function '%s' must be a list of \"\n \"indeces; got: '%s'\" % (\n self.name,\n args\n ))\n\n if not is_homogeneous(args, (str, int)):\n raise FunctionArgumentException(\n \"'%s': argument must be a list of strings; got: '%s'\" %\n (self.name, args)\n )", "def test_iterable_len_doesnt_match_input_size(self):\n num_args = 10\n for iter_len in [5, 10, 20]:\n expected_args_sum = min(iter_len, num_args)\n\n # Test for normal list (range is considered a normal list as it implements __len__ and such)\n with self.subTest(iter_len=iter_len, input='list'):\n chunks = list(chunk_tasks(range(num_args), iterable_len=iter_len, n_splits=1))\n total_args = sum(map(len, chunks))\n self.assertEqual(total_args, expected_args_sum)\n self.assertEqual(list(range(expected_args_sum)), list(chain.from_iterable(chunks)))\n\n # Test for an actual generator (range does not really behave like one)\n with self.subTest(iter_len=iter_len, input='generator/iterator'):\n chunks = list(chunk_tasks(iter(range(num_args)), iterable_len=iter_len, n_splits=1))\n total_args = sum(map(len, chunks))\n self.assertEqual(total_args, expected_args_sum)\n self.assertEqual(list(range(expected_args_sum)), list(chain.from_iterable(chunks)))", "def test_foreach_in_non_first_position_raises_error(self):\n with pytest.raises(AssertionError) as exc_info:\n list(parser.generate_commands(yaml.load(\"\"\"\n - something\n - foreach: [A,B]\n \"\"\")))\n assert (\"'foreach' may only be specified at the beginning of a sequence\" in\n str(exc_info.value))", "def test_sum_list_string_should_raise_exception(self):\n\n with self.assertRaises(TypeError):\n sum(['banana', 'apple'])", "def test_iterator_input():\n empty_iterator = iter(())\n transactions = empty_iterator\n itemsets, rules = apriori(transactions, 0.2, 0.2)\n assert itemsets == {} and rules == []\n\n transactions = [(1, 2), (1, 2), (1, 3), (1, 4), (1, 3)]\n transactions_iter = iter(transactions)\n itemsets1, rules1 = apriori(transactions_iter, 0.2, 1)\n itemsets2, rules2 = apriori(transactions, 0.2, 1)\n assert len(rules1) == len(rules2)\n for i in range(len(rules1)):\n assert rules1[i] == rules2[i]", "def test_columns_list_element_error(self):\n\n with pytest.raises(ValueError):\n\n BaseTransformer(columns=[[], \"a\"])", "def _validate_new_input(inp):\n if type(inp) == list:\n for item in inp:\n _validate_new_input(item)\n elif callable(inp):\n assert getattr(inp, _ATTRIBUTE_EXCEPTION_SAFE, False), (\n f\"New function argument '{inp}' passed to original function is not exception-safe.\"\n \" Please decorate the function with `exception_safe_function` or \"\n \"`pickalable_exception_safe_function`\"\n )\n else:\n assert hasattr(inp, \"__class__\") and type(inp.__class__) in [\n ExceptionSafeClass,\n ExceptionSafeAbstractClass,\n ], (\n f\"Invalid new input '{inp}'. New args / kwargs introduced to `original` function \"\n \"calls by patched code must either be functions decorated with \"\n \"`exception_safe_function_for_class`, instances of classes with the \"\n \"`ExceptionSafeClass` or `ExceptionSafeAbstractClass` metaclass safe or lists of \"\n \"such exception safe functions / classes.\"\n )", "def test_RestrictingNodeTransformer__visit_NotIn_List():\n assert restricted_eval('2 not in [1, 2, 3]') is False", "def check_list(self,list_input):\n if not isinstance(list_input,list):\n raise ValueError('input is not in list type')\n for i in list_input:\n if isinstance(i,list) and len(i) != 0:\n for j in i:\n if not isinstance(j,(float,int)):\n print(j)\n raise ValueError('cannot convert')\n else:\n print(i)\n raise ValueError('wrong defined')", "def test_validate_positive_integer_list():\n with pytest.raises(ValueError):\n validate_positive_integer_list(0.5, 1)\n\n with pytest.raises(ValueError):\n validate_positive_integer_list([0.5, 0, 5], 2)\n\n with pytest.raises(ValueError):\n validate_positive_integer_list([1], 2)\n\n with pytest.raises(ValueError):\n validate_positive_integer_list(0, 2)\n\n with pytest.raises(ValueError):\n validate_positive_integer_list(-1, 2)\n\n assert validate_positive_integer_list(1, 2) == [1, 1]", "def test_neg_operate_list_operation_bin_notlist(self):\n key = (\"test\", \"demo\", 1)\n list = [{\"op\": aerospike.OP_LIST_INSERT, \"bin\": \"age\", \"index\": 2, \"val\": 9}]\n\n try:\n (key, _, _) = self.as_connection.operate(key, list)\n\n except e.BinIncompatibleType as exception:\n assert exception.code == 12", "def EOF_or_raise(f):\n try:\n f.next()\n except StopIteration:\n return\n else:\n raise Exception(str(f))", "def check_iterable(value):\n try:\n iter(value)\n if not isinstance(value, six.string_types):\n return True\n else:\n return False\n except Exception as e:\n pass\n\n return False", "def toiter(x):\n if iterable(x):\n return x\n else:\n return [x]", "def is_iterable(x: Any) -> bool:\r\n return isinstance(x, collections.abc.Iterable) and not isinstance(x, (str, bytes))", "def test_ordered_cmp_not_a_function(cls):\n with pytest.raises(ValueError) as err:\n orderedstructs.SkipList(object, 14)\n assert err.value.args[0] == \\\n 'Argument \"cmp_func\" to __init__ must be a callable object not an \"int\" object.'", "def process_list(_func, iterator, *args, **kwargs):\n return [_func(i, *args, **kwargs) for i in iterator]", "def test_badargs(self):\n self.assertRaises(TypeError, isint, [])\n self.assertRaises(TypeError, isint, {})\n self.assertRaises(TypeError, isint, None)\n return", "def _to_int(maybe_iter):\n if not isinstance(maybe_iter, str) and isinstance(maybe_iter, abc.Iterable):\n return tuple([_to_int(a) for a in maybe_iter])\n try:\n return int(maybe_iter)\n except ValueError:\n return maybe_iter", "def _is_list(arg):\n return isinstance(arg, collections.Sequence) and not _is_string(arg)", "def test_filter_args_error_msg():\r\n nose.tools.assert_raises(ValueError, filter_args, f, [])", "def test_collect(\n self, iterable: t.Iterable[Result[int, str]], exp: Result[int, str]\n ) -> None:\n assert Result.collect(iterable) == exp", "def test_likelihoods_unequal_list_lengths(self):\r\n self.assertRaises(ValueError, likelihoods, [1, 2], [1])", "def test_badxvaluewithlist(self):\n Rectangle.reset_objects()\n with self.assertRaises(TypeError) as e:\n r1 = Square(1, [1, 2], 2, 3)\n self.assertEqual(str(e.exception), 'x must be an integer')", "def test_graph_with_list_fail():\n try:\n node_list = [\"slippery list\"]\n node_list.append(Node({'A':['B','C']}))\n node_list.append(Node({'B':['C','D']}))\n node_list.append(Node({'C':['D']}))\n node_list.append(Node({'D':['C']}))\n g = Graph(node_list)\n except Exception as e:\n assert str(e) == 'invalid node provided!'", "def _is_good_iterable(obj):\n return _is_iterable(obj) and _has_str_elems(obj)", "def unpack_iterator_input(iterator):\n try:\n next_element = iterator.get_next()\n except errors.OutOfRangeError:\n raise RuntimeError('Your dataset iterator ran out of data; '\n 'Make sure that your dataset can generate '\n 'required number of samples.')\n\n if isinstance(next_element, (list, tuple)):\n if len(next_element) not in [2, 3]:\n raise ValueError(\n 'Please provide model inputs as a list or tuple of 2 or 3 '\n 'elements: (input, target) or (input, target, sample_weights) '\n 'Received %s' % next_element)\n if len(next_element) == 2:\n x, y = next_element\n weights = None\n else:\n x, y, weights = next_element\n else:\n x = next_element\n y = None\n weights = None\n return x, y, weights", "def test_defined_in_iter():\n\n @type_checked\n def _run_test(thing:[(int, str, str)]):\n for group in thing:\n assert isinstance(group[0], int)\n assert isinstance(group[1], str)\n assert isinstance(group[2], str)\n assert len(thing) == 4\n\n _run_test(thing=[\n (12.3, None, False),\n (\"12.1\", True, 1),\n (False, 10, 12.1),\n (True, 14.9, None),\n ])", "def test_enforce_iterable():\n formatter = TabularOutputFormatter()\n loremipsum = (\n \"lorem ipsum dolor sit amet consectetur adipiscing elit sed do eiusmod\".split(\n \" \"\n )\n )\n\n for format_name in formatter.supported_formats:\n formatter.format_name = format_name\n try:\n formatted = next(formatter.format_output(zip(loremipsum), [\"lorem\"]))\n except TypeError:\n assert False, \"{0} doesn't return iterable\".format(format_name)", "def test_invalid(self):\n a = np.ones((10, 10))\n ai = np.ones((10, 2), dtype=np.intp)\n\n # sanity check\n take_along_axis(a, ai, axis=1)\n\n # not enough indices\n assert_raises(ValueError, take_along_axis, a, np.array(1), axis=1)\n # bool arrays not allowed\n assert_raises(IndexError, take_along_axis, a, ai.astype(bool), axis=1)\n # float arrays not allowed\n assert_raises(IndexError, take_along_axis, a, ai.astype(float), axis=1)\n # invalid axis\n assert_raises(AxisError, take_along_axis, a, ai, axis=10)", "def make_iterable(arg):\n return arg if is_iterable(arg) else (arg,)", "def _validate_node(self, node):\n if not isinstance(node, self._Node):\n raise TypeError('Invalid object type!')\n if node._container != self:\n raise ValueError('Node does not belong to this list!')\n if node._index < 0 or node._index >= self._size:\n raise ValueError('Invalid node!')" ]
[ "0.78835404", "0.7705365", "0.7613191", "0.7498913", "0.7423901", "0.74038625", "0.73198646", "0.67817676", "0.6730599", "0.6505109", "0.5887717", "0.5857515", "0.58052135", "0.5753452", "0.5708452", "0.5689192", "0.56823623", "0.5681861", "0.56544685", "0.56081665", "0.5602864", "0.55661017", "0.5566092", "0.55545145", "0.5540586", "0.54962283", "0.54371804", "0.54371804", "0.53806293", "0.53567666", "0.5340147", "0.5330103", "0.5328899", "0.5314975", "0.5301957", "0.5292369", "0.5283503", "0.5281148", "0.5263967", "0.5259464", "0.5246498", "0.5228478", "0.522201", "0.5221708", "0.5214029", "0.5204889", "0.5201878", "0.51973444", "0.51941663", "0.51924187", "0.5172599", "0.51670235", "0.515416", "0.5152562", "0.5149235", "0.5144218", "0.5142574", "0.5122349", "0.51167566", "0.5115059", "0.511447", "0.50933844", "0.5072678", "0.5068403", "0.5055603", "0.505499", "0.5051888", "0.5049248", "0.5043317", "0.5031473", "0.5015253", "0.49991313", "0.4991088", "0.4990985", "0.4957774", "0.49459124", "0.49381775", "0.49315405", "0.493039", "0.49163392", "0.49125043", "0.4906175", "0.49047077", "0.49043614", "0.4896966", "0.48952454", "0.4891933", "0.48833045", "0.48824307", "0.48799497", "0.48615092", "0.4857329", "0.48513564", "0.4850468", "0.48497477", "0.484852", "0.4847668", "0.4844849", "0.48416921", "0.48405156" ]
0.7661832
2
If an iterlist_op returns something besides a list as output, raise a ValueError.
def test_iterlist_op_3(): @ops.iterlist_op def f(x): return iter([4, 5, 6]) # Returning an iterator instead of a list with pytest.raises(ValueError): result = f(iter([1, 2, 3]))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_listlist_op_2():\n\n @ops.listlist_op\n def f(x):\n return [4, 5, 6]\n\n with pytest.raises(ValueError):\n f(iter([1, 2, 3])) # Passing in an iterator instead of an list", "def test_listlist_op_3():\n\n @ops.listlist_op\n def f(x):\n return iter([4, 5, 6]) # Returning an iterator instead of an list\n\n with pytest.raises(ValueError):\n result = f([1, 2, 3])", "def test_listiter_op_2():\n\n @ops.listiter_op\n def f(x):\n return iter([4, 5, 6])\n\n with pytest.raises(ValueError):\n f(iter([1, 2, 3])) # Passing in an iterator instead of a list", "def test_iterlist_op_2():\n\n @ops.iterlist_op\n def f(x):\n return [4, 5, 6]\n\n with pytest.raises(ValueError):\n f([1, 2, 3]) # Passing in a list instead of an iterator", "def test_listiter_op_3():\n\n @ops.listiter_op\n def f(x):\n return [4, 5, 6] # Returning a list instead of an iterator\n\n with pytest.raises(ValueError):\n result = f([1, 2, 3])", "def test_iterlist_op_1():\n\n @ops.iterlist_op\n def f(x):\n return [4, 5, 6]\n\n result = f(iter([1, 2, 3])) # Passing in an iterator, as expected\n\n assert(isinstance(result, list)), f\"{result}\"\n assert(result == [4, 5, 6])", "def test_listiter_op_1():\n\n @ops.listiter_op\n def f(x):\n return iter([4, 5, 6])\n\n result = f([1, 2, 3]) # Passing in a list, as expected\n\n assert(isinstance(result, collections.abc.Iterator)), f\"{result}\"\n assert(list(result) == [4, 5, 6])", "def test_iteriter_op_3():\n\n @ops.iteriter_op\n def f(x):\n return [4, 5, 6] # Returning a list instead of an iterator\n\n with pytest.raises(ValueError):\n result = f(iter([1, 2, 3]))", "def test_iteriter_op_2():\n\n @ops.iteriter_op\n def f(x):\n return iter([4, 5, 6])\n\n with pytest.raises(ValueError):\n f([1, 2, 3]) # Passing in a list instead of an iterator", "def test_listlist_op_1():\n\n @ops.listlist_op\n def f(x):\n return [4, 5, 6]\n\n result = f([1, 2, 3]) # Passing in a list, as expected\n\n assert(isinstance(result, list)), f\"{result}\"\n assert(result == [4, 5, 6])", "def _is_iterable(value):\n return isinstance(value, list) or isinstance(value, tuple)", "def test_iteriter_op_1():\n\n @ops.iteriter_op\n def f(x):\n return iter([4, 5, 6])\n\n result = f(iter([1, 2, 3])) # Passing in an iterator, as expected\n\n assert(isinstance(result, collections.abc.Iterator)), f\"{result}\"\n assert(list(result) == [4, 5, 6])", "def is_listing(op):\n return isinstance(op, (list, tuple))", "def _assert_valid_value_and_cast(self, value):\n if not hasattr(value, '__iter__'):\n raise AssertionError(f\"{value} is not a valid iterable type\")\n return value", "def ensure_list(self, x):\n return x if isinstance(x, list) else [x]", "def _validate_internal_value_list(self, value: Any):\n # Make sure the value is a list\n if not isinstance(value, list):\n raise TypeError(f\"Value must be a list\")\n\n # Make sure the list has the correct number of values in it\n length = len(value)\n if isinstance(self._nargs, int):\n if self._action == \"store\":\n if length != self._nargs:\n raise ValueError(f\"Requires list of length {self._nargs}\")\n else:\n if length == 0 or length % self._nargs != 0:\n raise ValueError(f\"Requires list of length a multiple of {self._nargs}\")\n elif self._nargs == \"+\":\n if length == 0:\n raise ValueError(f\"Requires at least one item\")\n\n if any(not isinstance(element, self._type) for element in value):\n raise TypeError(f\"All values must be of the declared type {self._type.__name__}\")", "def ensure_list(iterable: Iterable[A]) -> List[A]:\n if isinstance(iterable, list):\n return iterable\n else:\n return list(iterable)", "def ensure_list(iterable: Iterable[A]) -> List[A]:\n if isinstance(iterable, list):\n return iterable\n else:\n return list(iterable)", "def _is_list(val):\n\n return isinstance(val, list)", "def test_list_type(self):\n\n expected = TypeError\n input_ = []\n with self.assertRaises(expected):\n math.factorial(input_)", "def test_list_increment_with_incorrect_value_type(self):\n key = (\"test\", \"demo\", \"list_key\")\n list = [{\"op\": aerospike.OP_LIST_INCREMENT, \"index\": 2, \"bin\": \"int_bin\", \"val\": \"twenty\"}]\n\n with pytest.raises(e.AerospikeError):\n self.as_connection.operate(key, list)", "def _is_list(arg):\n return isinstance(arg, collections.Sequence) and not _is_string(arg)", "def test_raises_typeerror_if_rows_not_list(self):\n def result():\n return num_islands([{}, \"test\", 123])\n\n self.assertRaises(TypeError, result)", "def test_raises_typeerror_if_arg_not_list(self):\n def result():\n return num_islands({})\n\n self.assertRaises(TypeError, result)", "def _is_list(item):\n return isinstance(item, list)", "def check_list(self,list_input):\n if not isinstance(list_input,list):\n raise ValueError('input is not in list type')\n for i in list_input:\n if isinstance(i,list) and len(i) != 0:\n for j in i:\n if not isinstance(j,(float,int)):\n print(j)\n raise ValueError('cannot convert')\n else:\n print(i)\n raise ValueError('wrong defined')", "def test_if_it_accepts_lists(self):\n with self.assertRaises(TypeError):\n prime_numbers([])", "def _data_validation(data):\n if isinstance(data, (list, tuple, type(None))) is not True:\n raise ValueError(f\"data must be tuple, list, or None, \"\n f\"data type is '{type(data).__name__}'. \"\n f\"Iterable data cannot be empty.\")", "def test_neg_operate_list_invalid_requests(self, list):\n key = (\"test\", \"demo\", \"list_key\")\n try:\n key, _, _ = self.as_connection.operate(key, list)\n except e.OpNotApplicable as exception:\n assert exception.code == 26", "def list_typecheck(val, name, msg):\n if type(val) != Pair and val != Nil():\n raise SnekEvaluationError(name + \" error: \" + msg)", "def test_empty_list_error(self):\n with self.assertRaises(ValueError):\n function_inclusion_filter_builder([])", "def is_list(value):\n return isinstance(value, list) or None", "def _is_iterable(items):\n return isinstance(items, (list, tuple, set, np.ndarray))", "def _ensure_iterable(x):\n if isinstance(x[0], Iterable):\n if len(x) > 1:\n raise TypeError(\"Either Iterable or variable argument list expected\")\n return x[0]\n else:\n return x", "def atomp(lst):\n return not isinstance(lst, list)", "def atomp(lst):\n return not isinstance(lst, list)", "def test_if_it_accepts_lists(self):\n with self.assertRaises(TypeError):\n prime_numbers(56.58)", "def convert_old_style_list(list_):\n if not isinstance(list_, (tuple, list)) or len(list_) != 2:\n return list_, False\n first_item, second_item = list_\n if second_item == []:\n return [first_item], True\n try:\n # see if second item is iterable\n iter(second_item)\n except TypeError:\n return list_, False\n old_style_list = True\n new_second_item = []\n for sublist in second_item:\n item, old_style_list = convert_old_style_list(sublist)\n if not old_style_list:\n break\n new_second_item.extend(item)\n if old_style_list:\n second_item = new_second_item\n return [first_item, second_item], old_style_list", "def _list_like(self, value):\n return (not hasattr(value, \"strip\") and\n (hasattr(value, \"__getitem__\") or\n hasattr(value, \"__iter__\")))\n # return is_sequence(value) # use from pandas.core.common import is_sequence", "def is_list(value):\n return isinstance(value, list)", "def safe_iterator(i):\n return i or []", "def test_empty_list(self):\n argument = []\n with self.assertRaises(ValueError):\n find_an_even(argument)", "def test_max_list_iter(self):\n tlist = None\n with self.assertRaises(ValueError): # used to check for exception\n max_list_iter(tlist)", "def test_max_list_iter(self):\n tlist = None\n with self.assertRaises(ValueError): # used to check for exception\n max_list_iter(tlist)", "def toiter(x):\n if iterable(x):\n return x\n else:\n return [x]", "def test_neg_operate_append_items_not_a_list(self):\n key = (\"test\", \"demo\", \"list_key\")\n\n list = [\n {\"op\": aerospike.OP_LIST_APPEND_ITEMS, \"bin\": \"int_bin\", \"val\": 7},\n ]\n\n try:\n key, _, bins = self.as_connection.operate(key, list)\n except e.ParamError as exception:\n assert exception.code == -2", "def isList(l):\r\n return hasattr(l, '__iter__') \\\r\n or (type(l) in (types.ListType, types.TupleType))", "def test_creation_list():\n with pytest.raises(ValueError) as __:\n value = list()\n __ = param.Integer(value=value)", "def test_enforce_iterable():\n formatter = TabularOutputFormatter()\n loremipsum = (\n \"lorem ipsum dolor sit amet consectetur adipiscing elit sed do eiusmod\".split(\n \" \"\n )\n )\n\n for format_name in formatter.supported_formats:\n formatter.format_name = format_name\n try:\n formatted = next(formatter.format_output(zip(loremipsum), [\"lorem\"]))\n except TypeError:\n assert False, \"{0} doesn't return iterable\".format(format_name)", "def aslist(something):\n return something if isinstance(something, list) else [something]", "def _builtin_consult_as_list(op1, op2, **kwdargs):\n # TODO make non-recursive\n check_mode((op1, op2), ['*L'], functor='consult', **kwdargs)\n _builtin_consult(op1, **kwdargs)\n if _is_list_nonempty(op2):\n _builtin_consult_as_list(op2.args[0], op2.args[1], **kwdargs)\n return True", "def listify(arg):\n if isinstance(arg, list):\n return arg\n else:\n return [arg]", "def isIterable(obj):\n return isinstance(obj, ListType)", "def make_iterable(arg):\n return arg if is_iterable(arg) else (arg,)", "def test_collect(\n self, iterable: t.Iterable[Result[int, str]], exp: Result[int, str]\n ) -> None:\n assert Result.collect(iterable) == exp", "def test_make_np_iterable_type_error():\n with pytest.raises(TypeError):\n _ = uc._make_np_iterable(\"s\")", "def _to_list( self, input ):\n import numpy\n listtypes = (list, tuple, numpy.ndarray)\n if input == None:\n return None\n elif type(input) in listtypes:\n return list(input)\n else:\n return [input]", "def chek_iter_obj(func):\n\n def wrapper(self, lst):\n try:\n iter(lst)\n return func(self, lst)\n except TypeError:\n raise TypeError(f\"{lst} isn't iterable object\")\n\n return wrapper", "def test_make_np_iterable_list(val):\n val_rec = uc._make_np_iterable(val)\n\n assert isinstance(val_rec, np.ndarray)\n assert len(val_rec) == len(val)", "def test_columns_list_element_error(self):\n\n with pytest.raises(ValueError):\n\n BaseTransformer(columns=[[], \"a\"])", "def iterable(arg):\n return isinstance(arg, collections.Iterable) and not isinstance(arg, six.string_types)", "def test_list_increment_with_missing_value(self):\n key = (\"test\", \"demo\", \"list_key\")\n list = [\n {\n \"op\": aerospike.OP_LIST_INCREMENT,\n \"bin\": \"int_bin\",\n \"index\": 2,\n }\n ]\n\n with pytest.raises(e.ParamError):\n self.as_connection.operate(key, list)", "def as_list(arg):\n if _is_list(arg):\n return arg\n return [arg]", "def is_list(self) -> bool:\n return False", "def isnondet(r):\n return isinstance(r, list) # BAD", "def is_iterable(value):\n # noinspection PyUnresolvedReferences\n return hasattr(value, '__iter__') and hasattr(value, '__getitem__')", "def listify(value):\n if isinstance(value, list):\n return value\n else:\n return [value]", "def test_list_increment_with_missing_index(self):\n key = (\"test\", \"demo\", \"list_key\")\n list = [{\"op\": aerospike.OP_LIST_INCREMENT, \"bin\": \"int_bin\", \"val\": 20}]\n\n with pytest.raises(e.ParamError):\n self.as_connection.operate(key, list)", "def visit_List(self, node):\n self.generic_visit(node)\n if isinstance(node.ctx, ast.Load):\n return to_call(to_attribute(self.operator, '__list__'), node.elts)\n return node", "def _as_list(value):\n if not isinstance(value, list):\n value = [value]\n return value", "def is_iterable(element):\n return isinstance(element, (set, list, tuple))", "def test_generator_without_iterable_len(self):\n with self.assertRaises(ValueError):\n next(chunk_tasks(iter([]), n_splits=1))", "def builtin_iterable(func):\n if sys.version_info[:1] < (3,):\n @wraps(func)\n def inner(*args, **kwargs):\n return list(func(*args, **kwargs))\n return inner\n return func", "def list_func(lst: List[valueType]) -> List[valueType]:\n tmp = [] # type: List[valueType]\n for e in lst:\n if isinstance(e, (list, set, tuple)):\n tmp.append(list_func(list(e)))\n else:\n if isinstance(e, (float, int)):\n tmp.append(func(e))\n else:\n raise Exception\n return tmp", "def test_is_iterable(self):\r\n msg_list = messages.MessageList()\r\n\r\n # Adds 3 Message objects to the list.\r\n msg_list.push(messages.StringMessage(\"a\"))\r\n msg_list.push(messages.StringMessage(\"b\"))\r\n msg_list.push(messages.StringMessage(\"c\"))\r\n\r\n self.assertEqual([\"ab\", \"bb\", \"cb\"], [x.msg + \"b\" for x in msg_list])", "def test_neg_operate_list_operation_bin_notlist(self):\n key = (\"test\", \"demo\", 1)\n list = [{\"op\": aerospike.OP_LIST_INSERT, \"bin\": \"age\", \"index\": 2, \"val\": 9}]\n\n try:\n (key, _, _) = self.as_connection.operate(key, list)\n\n except e.BinIncompatibleType as exception:\n assert exception.code == 12", "def controlled_list(input_list):\n output_list = input_list\n\n if not isinstance(input_list, list):\n\n dummy_list = []\n dummy_list.append(input_list)\n output_list = dummy_list\n print('Converting')\n print('Before return')\n print(output_list)\n return output_list", "def test_list_increment_with_missing_bin(self):\n key = (\"test\", \"demo\", \"list_key\")\n list = [{\"op\": aerospike.OP_LIST_INCREMENT, \"index\": 2, \"val\": 20}]\n\n with pytest.raises(e.ParamError):\n self.as_connection.operate(key, list)", "def isIterable(value):\n try:\n iter(value)\n return True\n except:\n return False", "def _is_list(self):\n # TODO\n if self.is_int():\n return self.int() == 0\n else:\n return self.size_words() == 2 and self.tag() == 0 and self.field(1)._is_list()", "def process_list(_func, iterator, *args, **kwargs):\n return [_func(i, *args, **kwargs) for i in iterator]", "def list_check(*args, func=None):\n func = func or inspect.stack()[2][3]\n for var in args:\n if not isinstance(var, (list, collections.UserList, collections.abc.MutableSequence)):\n name = type(var).__name__\n raise ListError(\n 'Function {} expected list, {} got instead.'.format(func, name))", "def test_wrong_input_on_creation(self):\r\n\r\n self.assertRaises(TypeError, TypedListType, None)", "def is_list ( self, s ):\r\n\t\treturn isinstance ( s, type( list () ) )", "def is_list_like(value):\n if is_iterable(value) and not isinstance(value, six.string_types):\n return True\n\n else:\n return False", "def is_nonstring_iterable(x):\n if isinstance(x, primitive_iterable):\n return False\n return isinstance(x, collections.Iterable)", "def value_error(var, _list):\n\n #if not any(r):\n if len(_list) == 2:\n divisor = \" or \"\n elif len(_list) > 2:\n divisor = \", \"\n\n print(_list)\n print(len(_list))\n raise ValueError(\"'{var_name}' must be {type}, received '{var_type}'\"\n .format(var_name=RaiseIfNot._get_name(var),\n type=divisor.join(map(\n lambda x: \"'\" + x + \"'\",\n _list)), var_type=var))", "def output_is_valid(output):\n\n is_correct = type(output) is list\n for member in output:\n is_correct *= type(member) is list\n for item in member:\n is_correct *= type(item) is tuple and len(item) == 2\n\n return bool(is_correct)", "def is_list_of(seq, expected_type):\n return is_seq_of(seq, expected_type, seq_type=list)", "def eval_list(self, value):\n\n okay = True\n count = 0\n for v in value.elts:\n if not self.eval_value(v):\n okay = False\n break\n count += 1\n return okay", "def assert_is_iter(ext):\n if not parallel.is_iter(ext):\n ext = [ext]\n return ext", "def _data_sanity_checks(self, explore_iterable):\n data_list = []\n\n for val in explore_iterable:\n\n if not self.f_supports(val):\n raise TypeError(\n \"%s is of not supported type %s.\" % (repr(val), str(type(val)))\n )\n\n if not self._values_of_same_type(val, self._default):\n raise TypeError(\n \"Data of `%s` is not of the same type as the original entry value, \"\n \"new type is %s vs old type %s.\"\n % (self.v_full_name, str(type(val)), str(type(self._default)))\n )\n\n data_list.append(val)\n\n if len(data_list) == 0:\n raise ValueError(\"Cannot explore an empty list!\")\n\n return data_list", "def safelist(listable):\n if type(listable) == str:\n return [listable]\n else:\n return listable.tolist()", "def is_listlike(x: Any) -> bool:\r\n return (isinstance(x, (list, tuple)))", "def is_iterable(arg):\n return (\n isinstance(arg, collections.Iterable)\n and not isinstance(arg, str)\n )", "def _MakeList(input):\n if len(input) == 0:\n raise ValueError(\n 'input cannot be empty.')\n elif len(input) == 1:\n output = input[0]\n if not isinstance(output, list):\n output = [output]\n else:\n output = list(input)\n return output", "def _list_assert(actual_list, expected_list):\n for actual, expected in itertools.izip_longest(actual_list, expected_list):\n _value_assert(None, actual, expected)", "def test_raises_typeerror_if_not_list(self):\n def result(): return find_rotation_point(\"test\")\n self.assertRaises(TypeError, result)", "def isList(data):\n\ttry:\n\t\tfrom types import ListType\n\t\tif type(data) == ListType:\n\t\t\treturn True\n\texcept ImportError:\n\t\tif type(data) == type([]):\n\t\t\treturn True\n\treturn False", "def handle_list(list_name, list, args):\n if not args:\n return list\n else:\n len(args) == 1 or syntax_error(\"Wrong number of args for list expression.\")\n try:\n return list[int(args[0])]\n except ValueError:\n syntax_error(\"Invald index value: '%s'\" % args[0])\n except IndexError:\n syntax_error(\"Index out of range in '%s': %d\" % (list_name, index))" ]
[ "0.77509636", "0.76935655", "0.76132554", "0.7517712", "0.7484861", "0.7235555", "0.70362914", "0.7030836", "0.6869668", "0.66613793", "0.6354757", "0.630842", "0.6297937", "0.61403257", "0.60431194", "0.6029395", "0.6019079", "0.6019079", "0.59903765", "0.5965533", "0.59531224", "0.5932811", "0.59256685", "0.59196365", "0.5908537", "0.59047323", "0.5896841", "0.58962613", "0.58743244", "0.5842213", "0.58404267", "0.5809849", "0.5808365", "0.5808079", "0.5764337", "0.5764337", "0.5755813", "0.5709706", "0.57062256", "0.5695532", "0.56640977", "0.5617957", "0.5603097", "0.5603097", "0.55986804", "0.55985785", "0.55696726", "0.5535148", "0.5515099", "0.55098563", "0.54947937", "0.5491005", "0.54863185", "0.54786557", "0.5477798", "0.54643315", "0.5463907", "0.5460647", "0.5455579", "0.5452346", "0.544969", "0.54353", "0.5427315", "0.54188174", "0.5414796", "0.5400754", "0.5388075", "0.53842306", "0.53578615", "0.535717", "0.5348136", "0.53475523", "0.534565", "0.53285104", "0.5321059", "0.5313423", "0.5306183", "0.5303109", "0.5301796", "0.52937776", "0.5293271", "0.52887404", "0.52866983", "0.5284959", "0.52736884", "0.5259184", "0.5256486", "0.52464247", "0.524243", "0.5236327", "0.5232863", "0.5230922", "0.52285177", "0.52234036", "0.5221205", "0.5218178", "0.5206671", "0.52044964", "0.5203094", "0.5200796" ]
0.75992316
3
Constant evaluation should ignore the existing fitness function and set the fitness of all individuals to the same value.
def test_const_evaluate(): pop = test_population pop = ops.const_evaluate(pop, value=123456789.0) for ind in pop: assert(pytest.approx(123456789.0) == ind.fitness)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _recompute_fitness(self):\n for cur in self.population:\n if cur['fitness'] is None:\n cur['fitness'] = self.op.fitness(cur['individual'])", "def fitness(self):\n pass", "def evaluate(self, fitness):\n self.fitness = fitness(self.phenotype)", "def fitness_sharing(self):\n for gene in self.population:\n gene.fitness = gene.raw_fitness\n return\n def dist(gene1, gene2):\n \"\"\"Return distence between two gene\"\"\"\n return abs(len(gene1.goal) - len(gene2.goal))\n for gene in self.population:\n raw_fitnesses = [e.raw_fitness for e in self.population if dist(e, gene) <= 5]\n gene.fitness = sum(raw_fitnesses) / len(raw_fitnesses)", "def fitness(self):\n # TO BE DECIDED\n return 1", "def _mutate(self, individuals):\n for cur in individuals:\n if random.random() < self.mutation_probability:\n self.op.mutate(cur['individual'])\n cur['fitness'] = None", "def fitness(self,*val):\n if len(val): self._fitness = val[0]\n return self._fitness", "def evaluate_fitness(individuals, grammar, fitness_function):\n for ind in individuals:\n ind.phenotype, ind.used_codons = grammar.generate(ind.genome)\n if ind.phenotype != None:\n if not hasattr(fitness_function, \"COEVOLUTION\") or \\\n not fitness_function.COEVOLUTION:\n ind.evaluate(fitness_function)\n if hasattr(fitness_function, \"COEVOLUTION\") and fitness_function.COEVOLUTION:\n fitness_function.__call__(individuals)", "def evaluate_fitness(self):\r\n fitness = 0.0\r\n # TO-DO: Write your fitness evaluation code here:\r\n \r\n if self.graph is not None:\r\n try:\r\n fitness = 1.0 / algorithms.sdr_widgerson(\r\n self.graph, self.values[0], self.values[1]\r\n )\r\n except RuntimeError:\r\n fitness = 1 / (2 ** 63)\r\n else:\r\n raise RuntimeError(\"Particle graph has not been set!\")\r\n \r\n # END TO-DO\r\n self.current_fitness = fitness\r\n \r\n # Check if we've got a better result\r\n if fitness > self.best_fitness:\r\n # Update the best performance accordingly\r\n self.best_fitness = fitness\r\n self.personal_best = self.values[:]\r\n self.best_coloring = copy.deepcopy(self.graph)\r\n \r\n self.sync = True", "def fitness(individual):\n different_pos = 0\n return different_pos", "def _calculate_fitness(self):\n pass", "def evaluate_fitness_against_random(self):\n #self.normalize() # Normalize before evaluating\n for i in tqdm(range(self.population_size)):\n self.individual.load_chromosome(self.population[i])\n self.fitness[i] = evaluate_agent(self.individual, self.evaluations_per_chromosome * 4) / (self.evaluations_per_chromosome * 4)\n print(self.fitness)", "def evaluator(evaluate):\r\n @functools.wraps(evaluate)\r\n def ecspy_evaluator(candidates, args):\r\n fitness = []\r\n for candidate in candidates:\r\n fitness.append(evaluate(candidate, args))\r\n return fitness\r\n ecspy_evaluator.single_evaluation = evaluate\r\n return ecspy_evaluator", "def interactive_evaluate_fitness(individuals, grammar, callback):\n evaluate_fitness(individuals, grammar, lambda x: 0.0)\n fitness_values = callback()\n for i, individual in enumerate(individuals):\n if individual.phenotype != None:\n individual.fitness = fitness_values[i]", "def evolve(self):\n self.generation = 0\n start_time = time.time()\n\n # while the termination criteria is not satisfied, makes another generation\n while not self.termination_criteria.satisfied(self.generation, time.time()-start_time, self.population):\n self.generation += 1\n #print str(self.generation)\n next_generation = []\n\n if self.elitism:\n # Keeps the 10% best individuals\n best_individuals = heapq.nsmallest(int(0.1*self.population_size), self.population, lambda individual: individual.get_fitness())\n next_generation += copy.deepcopy(best_individuals)\n\n # select genetic operation probabilistically\n # this is a roulette wheel selection\n operations = numpy.random.choice(['reproduction', 'crossover', 'mutation'], size=self.population_size, p=[self.reproduction, self.crossover, self.mutation]).tolist()\n individuals = numpy.random.choice(self.population, p=self.normalized_fitness, size=2*self.population_size, replace=True).tolist()\n\n while len(next_generation) < self.population_size:\n operation = operations.pop()\n individual = individuals.pop()\n individual.get_fitness() # enforce fitness calculation\n\n if operation == 'reproduction':\n next_generation.append(individual)\n elif operation == 'crossover':\n individual2 = individuals.pop()\n individual2.get_fitness() # enforce fitness calculation\n individual1, individual2 = individual.crossover(individual2)\n next_generation.append(individual1)\n next_generation.append(individual2)\n elif operation == 'mutation':\n individual1 = individual.mutate()\n next_generation.append(individual1)\n\n self.population = next_generation\n self.population_fitness = numpy.asarray(map(lambda individual: individual.get_fitness(), self.population))\n most_negative = self.population_fitness.min()\n self.normalized_fitness = numpy.asarray(map(lambda fitness: 1/math.pow(fitness+numpy.absolute(most_negative)+1, 1), self.population_fitness))\n s = float(self.normalized_fitness.sum())\n self.normalized_fitness = numpy.asarray(map(lambda fitness: fitness/s, self.normalized_fitness))\n\n mean = numpy.mean(self.population_fitness)\n std = numpy.std(self.population_fitness)\n min = self.population_fitness.min()\n\n info_mean = pandas.DataFrame([[self.generation, mean, min, std]], columns=[\"generation\", \"mean\", \"min\", \"std\"])\n self.generation_info = self.generation_info.append(info_mean, ignore_index=True)", "def setFitness(self, fitness):\n self._fitness = fitness", "def c_test_fitness_function(self, function):\r\n return 1", "def c_test_fitness_function(self, function):\r\n return 1", "def test_update_fitness():\n herb1 = Herbivore(10)\n original_fitness = herb1.fitness\n herb1.feeding(10)\n nt.assert_not_equal(herb1.fitness, original_fitness)", "def merit(problem, eval_func):\n def fitness(indiv):\n quali = eval_func(phenotype(indiv))\n return quali\n return fitness", "def evaulate_fitness_against_pop(self):\n #self.normalize() # Normalize before evaluating\n\n self.fitness = [0] * self.population_size # Reset fitness\n evaluations = [0] * self.population_size\n agents = [self.individual_type(), self.individual_type(), self.individual_type(), self.individual_type()]\n for _ in tqdm(range(self.population_size * self.evaluations_per_chromosome)):\n sample = [random.randint(0,self.population_size - 1) for _ in range(4)] # sample random chromosomes\n for i in range(len(sample)):\n evaluations[sample[i]] += 1\n agents[i].load_chromosome(self.population[sample[i]]) # Load chromosome of random sample [i] into agent[i]\n win_index = evaluate_agents(agents)\n self.fitness[sample[win_index]] += 1 # increment fitness of winner -> amount of wins is obtained\n for i, fitness in enumerate(self.fitness):\n self.fitness[i] = fitness / evaluations[i] # Divide fitness with amount of times chromosome have been evaluated -> win rate is obtained.", "def test_fitness():\n herb1 = Herbivore(0)\n herb2 = Herbivore(80)\n nt.assert_not_equal(herb1.fitness, herb2.fitness)\n herb3 = Herbivore(20, 0)\n herb4 = Herbivore(20, 80)\n nt.assert_not_equal(herb3.fitness, herb4.fitness)", "def _evaluate_fitness(self, population: Population):\n for n, individual in enumerate(population.individuals):\n\n # Dataset extraction using individual features\n X_data = self._create_dataset(individual, self._X)\n\n # Get scores for each fitness strategy (each objective)\n scores = [fitness_func.eval_fitness(X=X_data, y=self._y, num_feats=len(population.features))\n for fitness_func in self.fitness]\n\n # If the number of features is an objective\n if self.optimize_features:\n scores.append(self.features_function(individual=individual,\n total_feats=len(self._population.features)))\n\n # Create a solution\n individual.fitness = Solution(scores)\n\n return population", "def calculate_fitness(self, **kwargs):\n self.__fitness = self.fitness_function.calculate(self.__genes, **kwargs)\n self.num_fitness_eval += 1\n return self.__fitness", "def fitness(self):\n params = np.array([self['p{}'.format(i)] for i in range(n_pars)])\n \n return func(params)", "def adapt(self, fitness: np.ndarray):\n max_val = np.amax(fitness, axis=0)\n min_val = np.amin(fitness, axis=0)\n self.values = np.multiply(\n self.initial_values,\n np.tile(np.subtract(max_val, min_val), (self.number_of_vectors, 1)),\n )\n self.normalize()", "def mutation(self, ind):\n if random.random() > self.mutationRate:\n return\n indexA = random.randint(0, self.genSize-1)\n indexB = random.randint(0, self.genSize-1)\n\n tmp = ind.genes[indexA]\n ind.genes[indexA] = ind.genes[indexB]\n ind.genes[indexB] = tmp\n\n ind.computeFitness()\n self.updateBest(ind)", "def test_mutate(self):\n f0 = 5 * (np.random.rand(10, 5) - 0.5)\n ga = population.Evolver(f0, eval_one_max)\n\n self.assertFalse(ga.generations[-1].new)\n\n for i in range(10):\n ga.mutate()\n\n self.assertTrue(ga.generations[-1].new)", "def evaluate(self, state):\n\n fitness = np.sum(state)\n self.num_evals += 1\n #print(self.num_evals)\n return fitness", "def regularization(fitness):\n if all(val == 0 for val in fitness):\n return np.array([1/i for i in range(fitness.shape[0])])\n return fitness/sum(fitness)", "def calculate_population_fitness(self):\n for individual in tqdm(self.current_generation):\n individual.fitness = self.fitness_function(\n individual.genes, self.seed_data)\n log.info(f'Current best validation accuracy: {max([x.fitness for x in self.current_generation])}')", "def calcIndividualFitness(self, indiv):\n\t\tt = time()\n\t\tself.simulator.simulate(indiv)\n\t\tself.evaluator.evaluate(indiv)\n\t\tt = time() - t\n\t\tself.simulatorTime += t", "def calculate_fitness(self):\n fitness = (self.matrix * self.weight_matrix).sum()\n self.fitness = fitness\n return fitness", "def fitness_function(neural_net):\r\n fitness = 25\r\n for i in range(1, 6):\r\n for j in range(1, 6):\r\n answer = np.exp(neural_net.calculate([np.log(i), np.log(j)])[0])\r\n result = i*j\r\n fitness -= abs(answer - result)\r\n\r\n return fitness", "def calculate_fitness_value(self):\n sequence = ''.join(self.genes)\n if sequence in seq_to_fitness:\n self.fitness_value = seq_to_fitness[sequence]\n else:\n self.fitness_value = polly_stats.get_amount_of_bad_regions(\n self.genes, self.environment)", "def simplify_modifications(self, simulationProblem, objFunction, fitness):\n constraintsOrig = self.constraints.copy()\n for k in constraintsOrig.keys():\n del self.constraints[k]\n try:\n res = simulationProblem.simulate(self)\n newFitness = objFunction.get_fitness(res)\n except Exception:\n newFitness = -1.0\n if round(fitness, 12) != round(newFitness, 12):\n self.constraints[k] = constraintsOrig[k]", "def adjust_fitness_scores(self):\n\n for species in self.species:\n species.adjust_fitness()", "def update_fitness_for_population(self):\n def wrapper(func, *args, **kwargs):\n \"\"\"func wrapper\"\"\"\n return func, args, kwargs\n\n with Pool(processes=4) as pool:\n for gene in self.population:\n func, args, kargs = wrapper(gene.update_fitness_for_proof,\n self.proof, self.limit_hyp,\n self.limit_goal)\n pool.apply_async(func(*args, **kargs))", "def __calculate_fitness(self):\n \"\"\"\n Equation:\n f(fenotype) = cos(fenotype) * fenotype + 2\n \"\"\"\n self.fitness = math.cos(self.fenotype) * self.fenotype + 2", "def run(self):\n self._fitness = self._fitness_func(self._bitstring, *self._args, **self._kwargs)", "def get_fitness(self):\n if self.fitness == 0:\n self.fitness = 1 / self.get_cost()\n return self.fitness", "def fitness(self):\r\n history = self.history\r\n return sum(history) / len(history)", "def fitness(individual, target_sum, target_mult):\r\n sum = abs(reduce(add, [i + 1 for i, pipe in enumerate(individual) if pipe == 0], 0) - 36)\r\n mult = abs(reduce(mul, [i + 1 for i, pipe in enumerate(individual) if pipe == 1], 0) - 360)\r\n return abs(target_sum - sum) + abs(target_mult - mult)", "def calculate_fitness_test(self, **kwargs):\n if self.genes_test is None:\n raise ValueError(\"Genes test is not set!\")\n\n self.__fitness_test = self.fitness_function.calculate(self.__genes_test, **kwargs)\n self.num_fitness_eval += 1", "def updateFitness(self):\r\n for candidate in self.candidates:\r\n candidate.updateFitness()\r\n return", "def update(self, globalBest: list):\n try:\n vNext: list = []\n xNext: list = []\n\n for i in range(self.dimension):\n r1: float = random.uniform(0, 1)\n r2: float = random.uniform(0, 1)\n\n vNext.append(\n self.inertia * self.v[i]\n + self.aCognitive * (self.bestPosition[i] - self.x[i]) * r1\n + self.aSocial * (globalBest[i] - self.x[i]) * r2\n )\n xNext.append(self.x[i] + vNext[i])\n\n self.x: list = xNext\n self.v: list = vNext\n\n if self.dataset is not None:\n currentFitness: float = self.func(*self.x, self.dataset)\n else:\n currentFitness: float = self.func(*self.x)\n\n if currentFitness <= self.bestValue:\n self.bestValue: float = currentFitness\n self.bestPosition: list = self.x\n\n # DEBUG\n self.coordinatesX.append(self.bestPosition[0])\n self.coordinatesY.append(self.bestPosition[1])\n self.coordinatesZ.append(self.bestValue)\n\n except IndexError:\n print(\n \"WARN: Dimensions of global best must match amount of parameters to be optimized.\"\n )\n raise IndexError", "def __init__(self, fitness_function,\n rel_noise=lambda dim: 1.1 * np.random.randn() / dim,\n abs_noise=lambda dim: 1.1 * np.random.randn()):\n Function.__init__(self, fitness_function)\n self.rel_noise = rel_noise\n self.abs_noise = abs_noise", "def run(self, fitness_function):\r\n return run_helper(depth, fitness_function, 0, 0, 0) # fixme\r", "def mutatePopulation(self, population):\n\t\tfor i in range(int(math.ceil(self.selectionRate * len(population)))):\n\t\t\tmutatedIndiv = self.mutateIndividual(population[i])\n\t\t\twhile self.isIndividualInPopulation(mutatedIndiv, population) == True:\n\t\t\t\tmutatedIndiv = self.mutateIndividual(population[i])\n\t\t\tself.calcIndividualFitness(mutatedIndiv)\n\t\t\tpopulation.append(mutatedIndiv)\n\t\tself.sortPopulation(population)\n\t\treturn population[:self.populationSize]", "def mutateAndTest(org):\n try:\n c = deepcopy(org) #for editing every value of multidimensional array(our image), without affecting original\n c.mutate()\n i1 = c.drawImage()\n i2 = globalTarget\n return (fitness(i1,i2),c)\n except KeyboardInterrupt:\n pass", "def adjust_fitness(self):\n # see genetics.cpp:2668 \"Can change the fitness of the organisms in the\n # species to be higher for very new species (to protect them)\"\n # NOTE I don't believe this is found in the paper\n # Looks like they used a 1 for this param anyway, so it didn't do\n # anything\n\n cur_max = self.get_champion().fitness\n if cur_max > self.max_fitness_ever:\n self.max_fitness_ever = cur_max\n self.gen_last_improved = self.pop.gen_num\n\n for g in self.genomes:\n g.adj_fitness = g.fitness/len(self)\n\n # genetics.cpp:2699 Kill species that haven't progressed for a long\n # time by dividing fitness of all individuals in spec by 100. Weird way\n # to do it.\n if ((self.pop.gen_num - self.gen_last_improved) >\n self.pop.species_dropoff_age):\n for g in self.genomes:\n g.adj_fitness *= .01", "def test_update_fitness_carn():\n carn1 = Carnivore(30)\n carn2 = Carnivore(30)\n carn1.feeding([Herbivore(age=90) for _ in range(50)])\n nt.assert_not_equal(carn1.fitness, carn2.fitness)", "def fitness(self, tree):\n key = ast.dump(tree)\n if key in self.fitness_cache:\n return self.fitness_cache[key]\n\n # Save defs\n original_defs = {}\n for name in self.toplevel_defs(tree):\n if name in self.globals:\n original_defs[name] = self.globals[name]\n else:\n warnings.warn(f\"Couldn't find definition of {repr(name)}\")\n\n assert original_defs, f\"Couldn't find any definition\"\n\n if self.log >= 3:\n print(\"Repair candidate:\")\n print_content(astor.to_source(tree), '.py')\n print()\n\n # Create new definition\n try:\n code = compile(tree, '<Repairer>', 'exec')\n except ValueError: # Compilation error\n code = None\n\n if code is None:\n if self.log >= 3:\n print(f\"Fitness = 0.0 (compilation error)\")\n\n fitness = 0.0\n return fitness\n\n # Execute new code, defining new functions in `self.globals`\n exec(code, self.globals)\n\n # Set new definitions in the namespace (`__globals__`)\n # of the function we will be calling.\n function = self.debugger.function()\n for name in original_defs:\n function.__globals__[name] = self.globals[name]\n\n fitness = self.run_tests(validate=False)\n\n # Restore definitions\n for name in original_defs:\n function.__globals__[name] = original_defs[name]\n self.globals[name] = original_defs[name]\n\n if self.log >= 3:\n print(f\"Fitness = {fitness}\")\n\n self.fitness_cache[key] = fitness\n return fitness", "def get_fitness(self):\n hard_conflicts = self.get_conflicts()\n soft_conflicts = self.get_soft_conflicts()\n hard_fitness = 1 / hard_conflicts if hard_conflicts != 0 else math.inf\n soft_fitness = 1 / soft_conflicts if soft_conflicts != 0 else math.inf\n return [hard_fitness, soft_fitness]", "def calc_fitness_by_gen(self):\r\n f_sum = 0\r\n # first loop gives us the sum of the fitness\r\n for c, _ in self.temp_hist_by_gen.items():\r\n f_sum += c.fitness()\r\n # now we calc the chances by fitness of each one\r\n for c, _ in self.temp_hist_by_gen.items():\r\n self.temp_hist_by_gen[c] = c.fitness() / f_sum", "def self_mutate(self) -> 'Individual':\n self.mutator_cls.mutate_inplace(self.chromosome)\n self.fitness.cache_clear()\n return self", "def fitness(self) -> float:\n return self._fitness", "def test__evolve(self):\n f0 = 3 * np.random.rand(10, 5)\n ga = population.Evolver(f0, eval_one_max)\n\n new = 0\n ngen = 10000\n for i in range(ngen):\n ga._evolve()\n #print ga.generations[-1].individuals[-1]\n #print ga.generations[-1].fitness[-1]\n\n self.assertEqual(len(ga.generations), ngen + 1)", "def __simulate_generation(self):\n global seq_to_fitness\n\n # 1. calculate fitness value of each chromosome.\n threads = []\n for chromosome in self.chromosomes:\n t = threading.Thread(target=chromosome.calculate_fitness_value())\n t.start()\n threads.append(t)\n\n for thread in threads:\n thread.join()\n\n for chromosome in self.chromosomes:\n key = ''.join(chromosome.genes)\n if key not in seq_to_fitness:\n seq_to_fitness[key] = chromosome.fitness_value\n\n # 2. sort the chromosomes by its fitness value and reverse the list,\n # because the chromosome with the lowest fitness value is the best.\n self.chromosomes.sort(key=lambda c: c.fitness_value)\n self.chromosomes = self.chromosomes[::-1]\n\n # 3. divide the chromosome into two halves and delete the weakest\n # chromosome.\n index_half = len(self.chromosomes) // 2\n lower_half = self.chromosomes[:index_half]\n upper_half = self.chromosomes[index_half:]\n\n # 4. delete four more weak chromosomes.\n del lower_half[0]\n random.shuffle(lower_half)\n\n for i in range(0, 3):\n lower_half.pop()\n\n # 5. crossover: fill the four vacancies in the population with new\n # chromosomes. The genes of the new chromosomes are mixtures of the\n # genes of two randomly chosen strong chromosomes.\n c1 = random.choice(upper_half)\n c2 = random.choice(upper_half)\n new_chromosomes = [\n Chromosome(c1.genes[:self.chromosome_size // 2]\n + c2.genes[self.chromosome_size // 2:],\n self.environment),\n Chromosome(c1.genes[self.chromosome_size // 2:]\n + c2.genes[:self.chromosome_size // 2],\n self.environment),\n Chromosome(c2.genes[:self.chromosome_size // 2]\n + c1.genes[self.chromosome_size // 2:],\n self.environment),\n Chromosome(c2.genes[self.chromosome_size // 2:]\n + c1.genes[:self.chromosome_size // 2],\n self.environment)]\n\n # 6. Get the fittest chromosome of this generation and perform\n # mutations on the remaining chromosomes.\n # The mutation probability for the upper half is 5 percent and\n # the mutation probability for the lower half is 10 percent.\n self.fittest_chromosome = upper_half.pop()\n self.__mutate(lower_half, 10)\n self.__mutate(upper_half, 5)\n\n # 7. Rejoin all chromosomes.\n upper_half.append(self.fittest_chromosome)\n self.chromosomes = lower_half + upper_half + new_chromosomes\n self.generation += 1", "def _fitness(individual, X, y):\n yhat = individual.evaluate(X)\n return ((y - yhat) ** 2).sum()", "def computeFitnessList(self,fitnessFunction):\r\n self.fitness_list = []\r\n cmlsum = 0\r\n for individual in self.population_list:\r\n cmlsum = cmlsum + fitnessFunction(individual, self.board_size, self.pos_bits_size)\r\n self.fitness_list.append(cmlsum)", "def step(individuals, grammar, replacement, selection, fitness_function, best_ever):\n #Select parents\n parents = selection(individuals)\n #Crossover parents and add to the new population\n new_pop = []\n while len(new_pop) < GENERATION_SIZE:\n new_pop.extend(onepoint_crossover(*random.sample(parents, 2)))\n #Mutate the new population\n new_pop = list(map(int_flip_mutation, new_pop))\n #Evaluate the fitness of the new population\n evaluate_fitness(new_pop, grammar, fitness_function)\n #Replace the sorted individuals with the new populations\n individuals = replacement(new_pop, individuals)\n best_ever = max(best_ever, max(individuals))\n return individuals, best_ever", "def policy_evaluation_on_grid_world() -> ValueFunction:\n return get_policy_evaluation(grid_world, 0.9999, 0.0001)", "def _fitness_model__(self, solution=None, minmax=0):\n return self.objective_func(solution) if minmax == 0 else 1.0 / (self.objective_func(solution) + self.EPSILON)", "def freeze_evaluator(self):\n self._evaluator_overrides = self._get_evaluators()", "def default_fitness(maximise):\n if maximise:\n return -100000.0\n else:\n return 100000.0", "def setFitness(self, fit):\n self.fitness = fit", "def mutate(offspring, individuals, params, *args):\n\n prob_mut = params.get(\"prob_mutation\", 0.3)\n prob_stand = 1 / 3 * prob_mut\n prob_point = 1 / 3 * prob_mut\n prob_mono = prob_mut - prob_stand - prob_point\n prob_replace = prob_mut\n r = np.random.rand()\n\n for ind in offspring:\n if r <= prob_stand:\n # Standard mutation\n #\n # This picks a random subtree anywhere within the tree\n rand_node = choice(ind.nodes[1:])\n tree = ind.grow_tree(method=\"grow\", depth=rand_node.depth, ind=rand_node)\n rand_node.value = tree.value\n rand_node.roots = tree.roots\n\n # This picks a whole subtree at depth=1 under the linear node\n # rand_subtree = np.random.randint(len(ind.roots))\n # del ind.roots[rand_subtree]\n # ind.grow_tree(method=\"grow\", ind=ind)\n\n ind.nodes = ind.get_sub_nodes()\n\n elif r <= prob_point + prob_stand:\n # Small mutation\n for node in ind.nodes[1:]:\n if np.random.rand() < prob_replace and callable(node.value):\n value = choice(node.function_set)\n while node.value.__code__.co_argcount != value.__code__.co_argcount:\n value = choice(node.function_set)\n node.value = value\n elif np.random.rand() < prob_replace:\n node.value = choice(node.terminal_set)\n ind.nodes = ind.get_sub_nodes()\n\n elif r <= prob_mono + prob_point + prob_stand:\n # Mono parental\n swap_nodes = sample(ind.nodes[1:], 2)\n tmp_value = swap_nodes[0].value\n tmp_roots = swap_nodes[0].roots\n swap_nodes[0].value = swap_nodes[1].value\n swap_nodes[0].roots = swap_nodes[1].roots\n swap_nodes[1].value = tmp_value\n swap_nodes[1].roots = tmp_roots\n ind.nodes = ind.get_sub_nodes()\n\n else:\n pass", "def eval_fitness(genomes, config):\n for _, genome in genomes:\n cppn = neat.nn.FeedForwardNetwork.create(genome, config)\n network = ESNetwork(SUBSTRATE, cppn, DYNAMIC_PARAMS)\n net = network.create_phenotype_network()\n\n sum_square_error = 0.0\n\n for xor_inputs, xor_expected in zip(XOR_INPUTS, XOR_OUTPUTS):\n new_xor_input = xor_inputs + (1.0,)\n net.reset()\n\n for _ in range(network.activations):\n xor_output = net.activate(new_xor_input)\n\n sum_square_error += ((xor_output[0] - xor_expected[0])**2.0)/4.0\n\n genome.fitness = 1 - sum_square_error", "def force_evaluator_update(self):\n if self._evaluator_overrides is not None:\n self._evaluator_overrides = self._create_evaluators()", "def __calculated_initial_fitness(self) -> None:\n\n logger.info('Start calculating initial fitness')\n\n __calculated_values = (self._commands_holder_train.execute_commands(\n self.__initial_parameters))\n\n self.__initial_fitness = (self._evaluate_fitness(\n __calculated_values, self._training_dataset))\n\n logger.info('Initial fitness calculation finishes')", "def run(self, iterations):\n # print(f'Before:\\n {self.population}\\n')\n # self.best()\n # print(f'Best Genome before: {self.best_genome.array}, fitness={self.best_genome.fitness} ')\n\n mutator = Rand1MutationOperator(self.population, self.bounds, 0.2)\n mixer = ExponentialCrossoverOperator(self.minfun)\n replacer = ElitistReplacementOperator()\n\n for _ in range(iterations):\n candidate_population = Population(None, None, 0)\n for target in self.population.collection:\n # List with genomes who will be the donors\n mutant = mutator.apply(target)\n # Genome modified by replacing a few random positions\n candidate_genome = mixer.apply(target, mutant)\n\n candidate_population.add(candidate_genome)\n\n # Targets are replaced by candidates from the population if candidate has less fitness than target\n self.population = replacer.apply(self.population, candidate_population)\n\n # print(f'After:\\n {self.population}\\n')\n # self.best()\n # print(f'Best Genome after: {self.best_genome.array}, fitness={self.best_genome.fitness} ')", "def test_fitness_carn():\n carn1 = Carnivore(0)\n carn2 = Carnivore(80)\n nt.assert_not_equal(carn1.fitness, carn2.fitness)\n carn3 = Carnivore(20, 0)\n carn4 = Carnivore(20, 80)\n nt.assert_not_equal(carn3.fitness, carn4.fitness)", "def stochastic_universal_selection(self, fitness, num_parents):\n\n fitness_sum = numpy.sum(fitness)\n if fitness_sum == 0:\n self.logger.error(\"Cannot proceed because the sum of fitness values is zero. Cannot divide by zero.\")\n raise ZeroDivisionError(\"Cannot proceed because the sum of fitness values is zero. Cannot divide by zero.\")\n probs = fitness / fitness_sum\n probs_start = numpy.zeros(probs.shape, dtype=float) # An array holding the start values of the ranges of probabilities.\n probs_end = numpy.zeros(probs.shape, dtype=float) # An array holding the end values of the ranges of probabilities.\n\n curr = 0.0\n\n # Calculating the probabilities of the solutions to form a roulette wheel.\n for _ in range(probs.shape[0]):\n min_probs_idx = numpy.where(probs == numpy.min(probs))[0][0]\n probs_start[min_probs_idx] = curr\n curr = curr + probs[min_probs_idx]\n probs_end[min_probs_idx] = curr\n probs[min_probs_idx] = 99999999999\n\n pointers_distance = 1.0 / self.num_parents_mating # Distance between different pointers.\n first_pointer = numpy.random.uniform(low=0.0, \n high=pointers_distance, \n size=1)[0] # Location of the first pointer.\n\n # Selecting the best individuals in the current generation as parents for producing the offspring of the next generation.\n if self.gene_type_single == True:\n parents = numpy.empty((num_parents, self.population.shape[1]), dtype=self.gene_type[0])\n else:\n parents = numpy.empty((num_parents, self.population.shape[1]), dtype=object)\n\n parents_indices = []\n\n for parent_num in range(num_parents):\n rand_pointer = first_pointer + parent_num*pointers_distance\n for idx in range(probs.shape[0]):\n if (rand_pointer >= probs_start[idx] and rand_pointer < probs_end[idx]):\n parents[parent_num, :] = self.population[idx, :].copy()\n parents_indices.append(idx)\n break\n\n return parents, numpy.array(parents_indices)", "def evaluationFunction(individual, modelOmega, mean):\n logValue = float('Inf')\n genomeModel = models.model.newModel(modelOmega[0].definitions)\n genomeModel.bins = list(individual)\n modelLambda = models.model.newModel(modelOmega[0].definitions)\n modelLambda.bins = calcNumberBins(genomeModel.bins, mean)\n for i in range(len(modelOmega)):\n tempValue = calcLogLikelihood(modelLambda, modelOmega[i])\n calcLogLikelihood.cache_clear()\n if tempValue < logValue:\n logValue = tempValue\n return logValue,", "def apply_test(self):\n\n if self.__genes_test is None or self.__fitness_test is None:\n raise ValueError(\"Test values should not be None.\")\n\n self.genes = self.__genes_test\n self.__fitness = self.__fitness_test\n\n self.__genes_test = None\n self.__fitness_test = None", "def evaluateAll(population: list):\n worst = 0\n best = sys.maxsize\n sum = 0\n probabilites = []\n for i in range(len(population)):\n eval = population[i][1]\n if eval > worst:\n worst = eval\n if eval < best:\n best = eval\n for j in range(len(population)):\n fitness = updateFitness(population[j], worst)\n sum += fitness\n for k in range(len(population)):\n prob = updateProb(population[k], sum)\n probabilites.append(prob)\n\n print(\"worst chromosome makespan:\", worst, \"best chromosome makespan:\",best,file=out_file)\n return probabilites", "def init_population(self):\n for idx in xrange(0, self.population_size):\n individual = self.individual_factory.create()\n self.population.append(individual)\n\n self.population_fitness = numpy.asarray(map(lambda individual: individual.get_fitness(), self.population))\n\n # In order to roulette wheel selection work with negative values, \n # we sum all fitness values to the absolute value of the most negative plus one\n most_negative = self.population_fitness.min()\n self.normalized_fitness = numpy.asarray(map(lambda fitness: 1/math.pow(fitness+numpy.absolute(most_negative)+1, 1), self.population_fitness))\n s = float(self.normalized_fitness.sum())\n self.normalized_fitness = numpy.asarray(map(lambda fitness: fitness/s, self.normalized_fitness))\n #print self.population_fitness.min()\n #print self.population_fitness\n #print self.normalized_fitness", "def eaSimple(population, toolbox, cxpb, mutpb, ngen, stats=None,\n halloffame=None, verbose=__debug__):\n # logbook = tools.Logbook()\n # logbook.header = ['gen', 'nevals'] + (stats.fields if stats else [])\n\n # Evaluate the individuals with an invalid fitness\n invalid_ind = [ind for ind in population if not ind.fitness.valid]\n fitnesses = toolbox.map(toolbox.evaluate, invalid_ind)\n for ind, fit in zip(invalid_ind, fitnesses):\n ind.fitness.values = fit\n\n if halloffame is not None:\n halloffame.update(population)\n\n # Begin the generational process\n gen = 1\n found_best = False\n\n # Run the algorithm until a \"convergence\"\n while gen <= ngen and not found_best:\n # Select the next generation individuals\n offspring = toolbox.select(population, len(population))\n\n # # Vary the pool of individuals\n offspring = algorithms.varAnd(offspring, toolbox, cxpb, mutpb)\n\n # Evaluate the individuals with an invalid fitness\n invalid_ind = [ind for ind in offspring if not ind.fitness.valid]\n\n fitnesses = toolbox.map(toolbox.evaluate, invalid_ind)\n for ind, fit in zip(invalid_ind, fitnesses):\n ind.fitness.values = fit\n found_best = fit[0] == 1\n\n # Update the hall of fame with the generated individuals\n if halloffame is not None:\n halloffame.update(offspring)\n\n # Replace the current population by the offspring\n population[:] = offspring\n\n gen += 1\n\n return population,", "def update_score(self):\n self.score = TurboMQ.calculate_fitness(self.result, self.graph)", "def __init__(self, fitness_function=None):\n Function.initialize(self, fitness_function)", "def get_individual_fitness(individual):\r\n fitness = 0\r\n # A COMPLETER\r\n \r\n #Si distance avec le point objectif diminue, alors fitness augmente ?\r\n \r\n return fitness", "def fitness_function(self, population: List[Network]) -> List[Union[float, int]]:\n # The seed changes\n self.last_used_seed += 1\n\n # Snakes are re-generated\n snakes = []\n for n in population:\n snakes.append(Snake(11, Experiment.ExperimentAI(n)))\n\n # Metrics are calculated\n scores, times = self.snake_game.simulate(snakes, self.last_used_seed)\n\n # The fitnesses are calculated\n fitnesses = []\n for i in range(len(scores)):\n f = scores[i]*(1.0 + 1.0/float(times[i]))\n fitnesses.append(f)\n\n return fitnesses", "def GausianMutator(individual, sigma, indpb):\n for idx, gene in enumerate(individual):\n if rand.random() > indpb:\n dtype = gene.type\n if dtype == bool:\n gene.value(not gene.value())\n continue\n\n min_value, max_value = gene.min, gene.max\n\n if not gene.is_interval:\n sigma_v = sigma * (min_value - max_value)\n if dtype == int and sigma_v < 0.5:\n sigma_v = 0.5\n result = math.inf\n i = 0\n while not min_value <= result <= max_value:\n result = rand.gauss(gene.value(), sigma_v)\n if dtype == int:\n result = dif.floor(result)\n\n if i > 10000:\n raise ValueError(\"tried to mutate trading attribute over 10 000 times\")\n i += 1\n\n gene.value(result)\n\n else:\n # finding center for new range\n rng_srt, rng_end, rng_ctr = gene.range_start(), gene.range_end(), gene.range_center()\n min_rng = gene.min_range\n min_rad = min_rng / 2\n rng = rng_end - rng_srt\n rng_rad = rng / 2\n min_rng_ctr, max_rng_ctr = min_value + (min_rng / 2), max_value - (min_rng / 2)\n sigma_c = sigma * (max_rng_ctr - min_rng_ctr)\n if dtype == int and sigma_c < 0.5: # to make int variables with small range be able to mutate\n sigma_c = 0.5\n\n if dtype == int and (rng_srt % 1 != 0 or rng_end % 1 != 0):\n raise ValueError(\"int attribute has floating point range\\n\" + gene)\n\n counter = 0\n new_rng_ctr = math.inf\n while new_rng_ctr > max_rng_ctr or new_rng_ctr < min_rng_ctr:\n new_rng_ctr = rand.gauss(rng_ctr, sigma_c)\n if dtype == int:\n new_rng_ctr = dif.floor_to_05(new_rng_ctr)\n if counter >= 10000:\n print(\"min_rng_ctr =\", min_rng_ctr, \"max_rng_ctr =\", max_rng_ctr, rng_ctr, sigma_c)\n raise ValueError(\"tried to generate new range center over 10000 times\")\n counter += 1\n\n max_rad = min(new_rng_ctr - min_value, max_value - new_rng_ctr)\n sigma_r = sigma * (max_rad - (min_rng / 2))\n if dtype == int and sigma_r < 0.5:\n sigma_r = 0.5\n mu = min(rng_rad, max_rad)\n\n new_rng_rad = math.inf\n counter = 0\n while new_rng_rad < min_rad or new_rng_rad > max_rad:\n new_rng_rad = rand.gauss(mu, sigma_r)\n if dtype == int and new_rng_ctr % 1 == 0.5:\n new_rng_rad = dif.floor_to_05(new_rng_rad)\n if new_rng_rad % 0.5 != 0:\n new_rng_rad = math.inf\n elif dtype == int and new_rng_ctr % 1 == 0:\n new_rng_rad = dif.floor(new_rng_rad)\n\n if (counter >= 100):\n print(new_rng_ctr, min_rad, min_value, max_value, sigma_r, sigma)\n raise ValueError(\"tried to generate new range radius over 100 times\")\n counter += 1\n\n gene._range_center = new_rng_ctr\n gene.radius(new_rng_rad)\n return []", "def sample_fitness(individual):\n\n return individual.dataframe.sample(frac=0.1, random_state=0).mean().mean()", "def fitness(self, individual: Individual): # TODO: convert to fitness struct \n j_indiv = self.__convert_individual_p_to_j(individual)\n return self.__convert_fitness_j_to_p(self.way.fitnessGateway(j_indiv))", "def fitness(individual, divider, target_sum, target_multiply):\n\n sum_val = reduce(operator.add, individual[:divider], 0)\n multiply_val = reduce(operator.mul, individual[divider:], 1)\n \n sum_error = abs(target_sum - sum_val)\n sum_error = sum_error / target_sum\n\n multiply_error = abs(target_multiply - multiply_val)\n multiply_error = multiply_error / target_multiply\n\n #print(multiply_error, sum_error)\n #print(sum_error, multiply_error)\n return (multiply_error + sum_error)/2 * 100", "def calc_sum_fitness(self):\n fitness: float = 0\n for c in self.characters:\n fitness += c.fitness\n self.sum_fitness = round(fitness, 3)", "def calc_fitness(variant_fit, orig_fit, generations, count):\n\n Ne = 1000\n #Determines the value that controls variation within the simulation\n if count <= 0.1 * generations:\n beta = 1e-2\n elif count <= 0.9 * generations:\n #y = mx + b: linearly increases\n slope = (1.1 - 1e-2) / (0.8 * generations)\n beta = (slope * (count - (0.1 * generations))) + 1e-2\n else:\n slope = (1.3 - 1.1) / (0.1 * generations)\n beta = (slope * (count - (0.9 * generations))) + 1.1\n thresholds = 0\n\n #Fitness values are calculated based on the new and current sum of squared values\n xi = calc_x(orig_fit, beta, thresholds)\n xj = calc_x(variant_fit, beta, thresholds)\n\n #Fitness values are compared to determine if a mutation should be accepted\n if xj >= xi:\n return 1.0\n #Deleterious mutations are accepted exponentially\n else:\n exponent = -2 * float(Ne) * (xi - xj)\n return safe_calc(exponent)", "def fitness(self, hVals):\n fitness = 0\n\n board_state = self.board.deep_copy()\n pieces_state = self.pieces.deep_copy()\n\n ai = opponent_AI(self.board, self.pieces)\n\n # tie/ 0 score\n board1 = [['♜', '♞', '♝', '♛', '♚', '♝', '♞', '♜'],\n ['♟', '♟', '♟', '♟', '♟', '♟', '♟', '♟'],\n [None, None, None, None, None, None, None, None],\n [None, None, None, None, None, None, None, None],\n [None, None, None, None, None, None, None, None],\n [None, None, None, None, None, None, None, None],\n ['♙', '♙', '♙', '♙', '♙', '♙', '♙', '♙'],\n ['♖', '♘', '♗', '♕', '♔', '♗', '♘', '♖']]\n\n # mild white advantage\n board2 = [['♜', '♞', '♝', '♛', '♚', '♝', '♞', '♜'],\n ['♟', '♟', '♟', '♟', '♟', '♟', '♟', '♟'],\n [None, None, None, None, None, None, None, None],\n [None, None, None, None, None, None, None, None],\n [None, None, None, None, '♙', None, None, None],\n [None, None, None, None, None, None, None, None],\n ['♙', '♙', '♙', '♙', None, '♙', '♙', '♙'],\n ['♖', '♘', '♗', '♕', '♔', '♗', '♘', '♖']]\n\n # white advantage\n board3 = [[None, None, None, None, None, '♜', '♚', None],\n [None, None, None, None, None, '♟', '♟', '♟'],\n [None, None, '♟', None, '♟', None, None, None],\n [None, '♟', '♙', None, None, None, None, None],\n [None, '♙', None, None, None, None, None, None],\n [None, None, None, None, None, '♘', None, '♙'],\n [None, None, None, None, '♗', '♙', '♙', None],\n [None, None, None, None, None, None, '♔', None]]\n # black advantage\n board4 = [[None, None, None, '♜', None, None, '♚', None],\n [None, None, '♜', None, None, '♟', None, None],\n [None, None, None, None, '♟', None, '♟', None],\n [None, '♟', None, None, '♙', None, None, '♟'],\n [None, '♙', None, '♙', None, None, None, None],\n [None, None, None, None, None, None, None, '♙'],\n [None, None, None, None, None, '♙', '♙', None],\n [None, None, None, None, '♕', None, '♔', None]]\n\n # white advantage\n board5 = [[None, None, None, None, None, None, '♚', None],\n ['♟', None, None, None, '♙', None, None, '♟'],\n [None, '♟', None, None, None, '♕', None, None],\n [None, None, None, '♟', None, None, None, '♔'],\n [None, None, '♟', '♙', None, None, '♙', None],\n [None, '♞', '♙', None, None, None, None, None],\n [None, None, None, None, None, None, None, '♙'],\n [None, None, None, None, '♛', None, None, None]]\n\n # strong black advantage\n board6 = [[None, '♛', None, None, None, '♗', '♚', None],\n [None, None, None, None, None, '♟', None, '♟'],\n [None, None, '♟', None, None, None, '♟', None],\n [None, '♟', None, '♝', None, None, None, None],\n [None, None, None, None, '♞', None, None, None],\n [None, None, None, None, None, '♘', None, '♙'],\n ['♜', None, None, None, None, None, '♙', '♔'],\n [None, None, None, None, None, None, None, None]]\n\n # even game\n board7 = [['♜', None, '♝', '♛', '♚', '♝', None, '♜'],\n ['♟', '♟', '♟', None, None, '♟', '♟', '♟'],\n [None, None, '♞', '♟', None, '♞', None, None],\n [None, None, None, None, '♟', None, None, None],\n [None, None, None, None, '♙', None, None, None],\n [None, None, '♘', '♙', None, '♘', None, None],\n ['♙', '♙', '♙', None, None, '♙', '♙', '♙'],\n ['♖', None, '♗', '♕', '♔', '♗', None, '♖']]\n\n # B Queen\n board9 = [[None, None, None, None, None, None, None, None],\n [None, None, None, None, None, None, '♚', '♟'],\n [None, None, None, None, None, None, None, None],\n [None, None, None, '♛', None, None, None, None],\n [None, None, None, None, None, None, None, None],\n [None, None, None, None, None, None, None, None],\n [None, '♔', '♙', None, None, None, None, None],\n [None, None, None, None, None, None, None, None]]\n\n # B Rook\n board10 = [[None, None, None, None, None, None, None, None],\n [None, None, None, None, None, None, '♚', '♟'],\n [None, None, None, None, None, None, None, None],\n [None, None, None, '♜', None, None, None, None],\n [None, None, None, None, None, None, None, None],\n [None, None, None, None, None, None, None, None],\n [None, '♔', '♙', None, None, None, None, None],\n [None, None, None, None, None, None, None, None]]\n\n # B Bishop\n board11 = [[None, None, None, None, None, None, None, None],\n [None, None, None, None, None, None, '♚', '♟'],\n [None, None, None, None, None, None, None, None],\n [None, None, None, '♝' , None, None, None, None],\n [None, None, None, None, None, None, None, None],\n [None, None, None, None, None, None, None, None],\n [None, '♔', '♙', None, None, None, None, None],\n [None, None, None, None, None, None, None, None]]\n\n # B Knight\n board12 = [[None, None, None, None, None, None, None, None],\n [None, None, None, None, None, None, '♚', '♟'],\n [None, None, None, None, None, None, None, None],\n [None, None, None, '♞' , None, None, None, None],\n [None, None, None, None, None, None, None, None],\n [None, None, None, None, None, None, None, None],\n [None, '♔', '♙', None, None, None, None, None],\n [None, None, None, None, None, None, None, None]]\n # B Pawn\n board13 = [[None, None, None, None, None, None, None, None],\n [None, None, None, None, None, None, '♚', '♟'],\n [None, None, None, None, None, None, None, None],\n [None, None, None, '♟' , None, None, None, None],\n [None, None, None, None, None, None, None, None],\n [None, None, None, None, None, None, None, None],\n [None, '♔', '♙', None, None, None, None, None],\n [None, None, None, None, None, None, None, None]]\n\n # W Queen\n board15 = [[None, None, None, None, None, None, None, None],\n [None, None, None, None, None, None, '♚', '♟'],\n [None, None, None, None, None, None, None, None],\n [None, None, None, '♕', None, None, None, None],\n [None, None, None, None, None, None, None, None],\n [None, None, None, None, None, None, None, None],\n [None, '♔', '♙', None, None, None, None, None],\n [None, None, None, None, None, None, None, None]]\n\n # W Rook\n board16 = [[None, None, None, None, None, None, None, None],\n [None, None, None, None, None, None, '♚', '♟'],\n [None, None, None, None, None, None, None, None],\n [None, None, None, '♖', None, None, None, None],\n [None, None, None, None, None, None, None, None],\n [None, None, None, None, None, None, None, None],\n [None, '♔', '♙', None, None, None, None, None],\n [None, None, None, None, None, None, None, None]]\n\n # W Bishop\n board17 = [[None, None, None, None, None, None, None, None],\n [None, None, None, None, None, None, '♚', '♟'],\n [None, None, None, None, None, None, None, None],\n [None, None, None, '♗' , None, None, None, None],\n [None, None, None, None, None, None, None, None],\n [None, None, None, None, None, None, None, None],\n [None, '♔', '♙', None, None, None, None, None],\n [None, None, None, None, None, None, None, None]]\n\n # W Knight\n board18 = [[None, None, None, None, None, None, None, None],\n [None, None, None, None, None, None, '♚', '♟'],\n [None, None, None, None, None, None, None, None],\n [None, None, None, '♘' , None, None, None, None],\n [None, None, None, None, None, None, None, None],\n [None, None, None, None, None, None, None, None],\n [None, '♔', '♙', None, None, None, None, None],\n [None, None, None, None, None, None, None, None]]\n # W Pawn\n board19 = [[None, None, None, None, None, None, None, None],\n [None, None, None, None, None, None, '♚', '♟'],\n [None, None, None, None, None, None, None, None],\n [None, None, None, '♙' , None, None, None, None],\n [None, None, None, None, None, None, None, None],\n [None, None, None, None, None, None, None, None],\n [None, '♔', '♙', None, None, None, None, None],\n [None, None, None, None, None, None, None, None]]\n\n board_state.squares = board1\n\n score1 = ai.evaluate(board_state, pieces_state, Vals=hVals)\n\n # encourages heuristic to evaluate black and white pieces equivalently and opposite to each other\n if not (-24000 < score1 < 24000):\n fitness += 3\n\n if not (-12000 < score1 < 12000):\n fitness += 3\n\n if not (-6000 < score1 < 6000):\n fitness += 2\n\n if not (-5000 < score1 < 5000):\n fitness += 2\n\n if not (-4000 < score1 < 4000):\n fitness += 2\n\n if not (-3000 < score1 < 3000):\n fitness += 2\n\n if not (-2000 < score1 < 2000):\n fitness += 1\n\n if not (-1000 < score1 < 1000):\n fitness += 1\n\n if not (-500 < score1 < 500):\n fitness += 1\n\n if not (-400 < score1 < 400):\n fitness += 1\n\n if not (-300 < score1 < 300):\n fitness += 1\n\n if not (-250 < score1 < 250):\n fitness += 1\n\n if not (-200 < score1 < 200):\n fitness += 1\n\n# # If the heuristic needs to be very specific\n# if not (-150 < score1 < 150):\n# fitness += 1\n#\n# if not (-100 < score1 < 100):\n# fitness += 1\n#\n# if not (-75 < score1 < 75):\n# fitness += 1\n#\n# if not (-50 < score1 < 50):\n# fitness += 1\n\n board_state.squares = board2\n\n score2 = ai.evaluate(board_state, pieces_state, Vals=hVals)\n\n if score2 > score1:\n fitness += 1\n\n board_state.squares = board3\n\n score3 = ai.evaluate(board_state, pieces_state, Vals=hVals)\n\n if score3 > -200:\n fitness += 1\n\n board_state.squares = board4\n\n score4 = ai.evaluate(board_state, pieces_state, Vals=hVals)\n\n if score4 < 300:\n fitness += 1\n\n board_state.squares = board5\n\n score5 = ai.evaluate(board_state, pieces_state, Vals=hVals)\n\n if score5 > -200:\n fitness += 1\n\n if score3 > score2:\n fitness += 1\n\n if score5 > score2:\n fitness += 1\n\n board_state.squares = board6\n\n score6 = ai.evaluate(board_state, pieces_state, Vals=hVals)\n\n if score6 < 500:\n fitness += 1\n\n if score6 < score4:\n fitness += 1\n\n board_state.squares = board7\n\n score7 = ai.evaluate(board_state, pieces_state, Vals=hVals)\n\n # encourages heuristic to evaluate black and white pieces equivalently and opposite to each other\n if not (-24000 < score7 < 24000):\n fitness += 3\n\n if not (-12000 < score7 < 12000):\n fitness += 3\n\n if not (-6000 < score7 < 6000):\n fitness += 2\n\n if not (-5000 < score7 < 5000):\n fitness += 2\n\n if not (-4000 < score7 < 4000):\n fitness += 2\n\n if not (-3000 < score7 < 3000):\n fitness += 2\n\n if not (-2000 < score7 < 2000):\n fitness += 1\n\n if not (-1000 < score7 < 1000):\n fitness += 1\n\n if not (-500 < score7 < 500):\n fitness += 1\n\n if not (-400 < score7 < 400):\n fitness += 1\n\n if not (-300 < score7 < 300):\n fitness += 1\n\n if not (-250 < score7 < 250):\n fitness += 1\n\n if not (-200 < score7 < 200):\n fitness += 1\n\n# if not (-150 < score7 < 150):\n# fitness += 1\n#\n# if not (-100 < score7 < 100):\n# fitness += 1\n#\n# if not (-75 < score7 < 75):\n# fitness += 1\n#\n# if not (-50 < score7 < 50):\n# fitness += 1\n\n board_state.squares = board9\n score9 = ai.evaluate(board_state, pieces_state, Vals=hVals)\n board_state.squares = board10\n score10 = ai.evaluate(board_state, pieces_state, Vals=hVals)\n board_state.squares = board11\n score11 = ai.evaluate(board_state, pieces_state, Vals=hVals)\n board_state.squares = board12\n score12 = ai.evaluate(board_state, pieces_state, Vals=hVals)\n board_state.squares = board13\n score13 = ai.evaluate(board_state, pieces_state, Vals=hVals)\n\n # Optimizes Black piece values relative to board impact\n if not (score9 > score10 > score11 > score13 > 0):\n fitness += 1\n if not (score9 > score10 > score12 > score13 > 0):\n fitness += 1\n\n board_state.squares = board15\n score15 = ai.evaluate(board_state, pieces_state, Vals=hVals)\n board_state.squares = board16\n score16 = ai.evaluate(board_state, pieces_state, Vals=hVals)\n board_state.squares = board17\n score17 = ai.evaluate(board_state, pieces_state, Vals=hVals)\n board_state.squares = board18\n score18 = ai.evaluate(board_state, pieces_state, Vals=hVals)\n board_state.squares = board19\n score19 = ai.evaluate(board_state, pieces_state, Vals=hVals)\n\n # Optimizes White piece values relative to board impact\n if not (0 > score19 > score18 > score16 > score15):\n fitness += 1\n\n if not (0 > score19 > score17 > score16 > score15):\n fitness += 1\n\n if not ((score15) < (score18 + score17) < score16):\n fitness += 1\n\n if not ((score9) > (score11 + score12) > score10):\n fitness += 1\n\n # For troubleshooting\n print(fitness, \": \", hVals)\n\n return fitness", "def calcFitness (self) :\n fitnessArray = [[8, 4, 2, 1],\n [16, 8, 4, 2],\n [32, 16, 8, 4],\n [64, 32, 16, 8]]\n # fitnessArray = [[160, 80, 5, 4],\n # [320, 40, 4, 3],\n # [640, 20, 3, 2],\n # [1280, 10, 2, 1]]\n fitness = 0\n for k in range(4) :\n for i in range (4) :\n fitness += self.grid[k,i] * fitnessArray[k][i]\n return (fitness / 100)", "def update(self, function_values, es, bounds=None):\r\n if bounds is None:\r\n bounds = self.bounds\r\n if bounds is None or (bounds[0] is None and bounds[1] is None): # no bounds ==> no penalty\r\n return self # len(function_values) * [0.0] # case without voilations\r\n\r\n N = es.N\r\n ### prepare\r\n # compute varis = sigma**2 * C_ii\r\n varis = es.sigma**2 * array(N * [es.C] if np.isscalar(es.C) else ( # scalar case\r\n es.C if np.isscalar(es.C[0]) else # diagonal matrix case\r\n [es.C[i][i] for i in xrange(N)])) # full matrix case\r\n\r\n # dmean = (es.mean - es.gp.into_bounds(es.mean)) / varis**0.5\r\n dmean = (es.mean - es.gp.geno(es.gp.into_bounds(es.gp.pheno(es.mean)))) / varis**0.5\r\n\r\n ### Store/update a history of delta fitness value\r\n fvals = sorted(function_values)\r\n l = 1 + len(fvals)\r\n val = fvals[3*l // 4] - fvals[l // 4] # exact interquartile range apart interpolation\r\n val = val / np.mean(varis) # new: val is normalized with sigma of the same iteration\r\n # insert val in history\r\n if np.isfinite(val) and val > 0:\r\n self.hist.insert(0, val)\r\n elif val == inf and len(self.hist) > 1:\r\n self.hist.insert(0, max(self.hist))\r\n else:\r\n pass # ignore 0 or nan values\r\n if len(self.hist) > 20 + (3*N) / es.popsize:\r\n self.hist.pop()\r\n\r\n ### prepare\r\n dfit = np.median(self.hist) # median interquartile range\r\n damp = min(1, es.sp.mueff/10./N)\r\n\r\n ### set/update weights\r\n # Throw initialization error\r\n if len(self.hist) == 0:\r\n raise _Error('wrongful initialization, no feasible solution sampled. ' +\r\n 'Reasons can be mistakenly set bounds (lower bound not smaller than upper bound) or a too large initial sigma0 or... ' +\r\n 'See description of argument func in help(cma.fmin) or an example handling infeasible solutions in help(cma.CMAEvolutionStrategy). ')\r\n # initialize weights\r\n if (dmean.any() and (not self.weights_initialized or es.countiter == 2)): # TODO\r\n self.gamma = array(N * [2*dfit])\r\n self.weights_initialized = True\r\n # update weights gamma\r\n if self.weights_initialized:\r\n edist = array(abs(dmean) - 3 * max(1, N**0.5/es.sp.mueff))\r\n if 1 < 3: # this is better, around a factor of two\r\n # increase single weights possibly with a faster rate than they can decrease\r\n # value unit of edst is std dev, 3==random walk of 9 steps\r\n self.gamma *= exp((edist>0) * np.tanh(edist/3) / 2.)**damp\r\n # decrease all weights up to the same level to avoid single extremely small weights\r\n # use a constant factor for pseudo-keeping invariance\r\n self.gamma[self.gamma > 5 * dfit] *= exp(-1./3)**damp\r\n # self.gamma[idx] *= exp(5*dfit/self.gamma[idx] - 1)**(damp/3)\r\n elif 1 < 3 and (edist>0).any(): # previous method\r\n # CAVE: min was max in TEC 2009\r\n self.gamma[edist>0] *= 1.1**min(1, es.sp.mueff/10./N)\r\n # max fails on cigtab(N=12,bounds=[0.1,None]):\r\n # self.gamma[edist>0] *= 1.1**max(1, es.sp.mueff/10./N) # this was a bug!?\r\n # self.gamma *= exp((edist>0) * np.tanh(edist))**min(1, es.sp.mueff/10./N)\r\n else: # alternative version, but not better\r\n solutions = es.pop # this has not been checked\r\n r = self.feasible_ratio(solutions) # has to be the averaged over N iterations\r\n self.gamma *= exp(np.max([N*[0], 0.3 - r], axis=0))**min(1, es.sp.mueff/10/N)\r\n es.more_to_write += list(self.gamma) if self.weights_initialized else N * [1.0]\r\n ### return penalty\r\n # es.more_to_write = self.gamma if not np.isscalar(self.gamma) else N*[1]\r\n return self # bound penalty values\r", "def evaluator(self, candidates, args):\r\n fitness = []\r\n if self._use_ants:\r\n for candidate in candidates:\r\n total = 0\r\n for c in candidate:\r\n total += c.value\r\n fitness.append(total)\r\n else:\r\n for candidate in candidates:\r\n total_value = 0\r\n total_weight = 0\r\n for c, i in zip(candidate, self.items):\r\n total_weight += c * i[0]\r\n total_value += c * i[1]\r\n if total_weight > self.capacity:\r\n fitness.append(self.capacity - total_weight)\r\n else:\r\n fitness.append(total_value)\r\n return fitness", "def get_nonAltruist_fitness(self):\n return self.nonAltruist_fitness", "def initPopulation(self):\n for i in range(0, self.popSize):\n individual = Individual(self.genSize, self.data)\n individual.computeFitness()\n self.population.append(individual)\n\n self.best = self.population[0].copy()\n for ind_i in self.population:\n if self.best.getFitness() > ind_i.getFitness():\n self.best = ind_i.copy()\n print (\"Best initial sol: \",self.best.getFitness())", "def _iterate(self, maxGen):\n # Automatic handling between Generator/Coroutine/Normal modes of operation\n while (((type(maxGen) == int) and (maxGen > 0)) or (maxGen == None)):\n\n # Initializing the fitness vectors\n if (self._iterationNum == 0):\n for i in range(self.nPopulation):\n self._extFitness = (yield self._population[i,:])\n if (self._extFitness is None): break\n self._fitness[i] = self.m * self._extFitness\n self._iterationNum += 1\n else:\n for i in range(self.nPopulation):\n self._extFitness = (yield self._trialPopulation[i,:])\n if (self._extFitness is None): break\n self._trialFitness[i] = self.m * self._extFitness\n self._iterationNum += 1\n\n # Check if the optimizer is used in coroutine mode\n if (self._extFitness is not None):\n\n if (self._iterationNum > 1):\n mask = self._trialFitness < self._fitness\n self._population[mask, :] = self._trialPopulation[mask, :]\n self._fitness[mask] = self._trialFitness[mask]\n\n self._minIndex = np.argmin(self._fitness)\n\n for j in range(self.nPopulation):\n\n rnds = (random(3) * self.nPopulation).astype(int);\n while rnds[0] in [j]:\n rnds[0] = int(random() * self.nPopulation)\n while rnds[1] in [j, rnds[0]]:\n rnds[1] = int(random() * self.nPopulation)\n while rnds[2] in [j, rnds[0], rnds[1]]:\n rnds[2] = int(random() * self.nPopulation)\n\n v = self._population[rnds[0], :] + self.F * (self._population[rnds[1], :] - self._population[rnds[2], :]);\n u = np.zeros_like(v)\n randb = random(self.nDimension);\n for index, value in enumerate(randb):\n if value <= self.CR:\n u[index] = v[index]\n else:\n u[index] = self._population[j, index]\n\n rnbr = int(random() * self.nDimension)\n u[rnbr] = v[rnbr]\n\n # Applying constraints on the population vector\n for index, val in enumerate(u):\n if (val < self.bounds[index][0]):\n u[index] = self.bounds[index][0]\n elif (val > self.bounds[index][1]):\n u[index] = self.bounds[index][1]\n\n self._trialPopulation[j, :] = u;\n\n # Check if the optimizer is used in normal mode\n elif (maxGen != None):\n\n maxGen -= 1\n if (self._iterationNum == 1):\n for i in range(self.nPopulation):\n self._fitness[i] = self.m * self.func(self._population[i, :])\n\n for j in range(self.nPopulation):\n\n rnds = (random(3) * self.nPopulation).astype(int);\n while rnds[0] in [j]:\n rnds[0] = int(random() * self.nPopulation)\n while rnds[1] in [j, rnds[0]]:\n rnds[1] = int(random() * self.nPopulation)\n while rnds[2] in [j, rnds[0], rnds[1]]:\n rnds[2] = int(random() * self.nPopulation)\n\n v = self._population[rnds[0], :] + self.F * (self._population[rnds[1], :] - self._population[rnds[2], :]);\n u = np.zeros_like(v)\n randb = random(self.nDimension);\n for index, value in enumerate(randb):\n if value <= self.CR:\n u[index] = v[index]\n else:\n u[index] = self._population[j, index]\n\n rnbr = int(random() * self.nDimension)\n u[rnbr] = v[rnbr]\n\n # Applying constraints on the population vector\n for index, val in enumerate(u):\n if (val < self.bounds[index][0]):\n u[index] = self.bounds[index][0]\n elif (val > self.bounds[index][1]):\n u[index] = self.bounds[index][1]\n\n self._trialPopulation[j, :] = u;\n\n for i in range(self.nPopulation):\n self._trialFitness[i] = self.m * self.func(self._trialPopulation[i, :])\n\n mask = self._trialFitness < self._fitness\n self._population[mask, :] = self._trialPopulation[mask, :]\n self._fitness[mask] = self._trialFitness[mask]\n\n self._minIndex = np.argmin(self._fitness)", "def update_fitness(self, treatment):\n self.fitness = max(0, min(1, 1 - self.c - np.dot(self.alpha, treatment)))\n return self.fitness", "def multiplication_test():\r\n\r\n def fitness_function(neural_net):\r\n \"\"\"Calculate the fitness of a neural_net.\"\"\"\r\n fitness = 25\r\n for i in range(1, 6):\r\n for j in range(1, 6):\r\n answer = np.exp(neural_net.calculate([np.log(i), np.log(j)])[0])\r\n result = i*j\r\n fitness -= abs(answer - result)\r\n\r\n return fitness\r\n\r\n gen_size = 50\r\n net_size = (2, 1)\r\n genetic_algorithm = GeneticAlgorithm(gen_size, net_size, mutation_rate=0.3, mutation_chance=0.5)\r\n\r\n highest_so_far = 0\r\n while True:\r\n # Testing creatures\r\n for neural_net in genetic_algorithm.population:\r\n neural_net.fitness = fitness_function(neural_net)\r\n\r\n # Sorting creatures\r\n genetic_algorithm.calculate_stats()\r\n\r\n print(\"Gen\", genetic_algorithm.current_generation, \":\")\r\n print(\"Max fitness\", genetic_algorithm.stats.max_fitness)\r\n print(\"Mean fitness\", genetic_algorithm.stats.mean_fitness)\r\n highest_so_far = max(genetic_algorithm.stats.max_fitness, highest_so_far)\r\n print(\"Highest so far\", highest_so_far)\r\n\r\n\r\n # Starting next generation\r\n if genetic_algorithm.stats.max_fitness < 24.9 and genetic_algorithm.current_generation < 1000:\r\n genetic_algorithm.next_generation()\r\n else:\r\n break\r\n\r\n\r\n quit()\r\n\r\n\r\n for net in genetic_algorithm.sorted_population:\r\n print(net.fitness)\r\n best_neural_net = genetic_algorithm.sorted_population[0]\r\n print(\"Weights:\")\r\n print(best_neural_net.layers[0].weights[0])\r\n while True:\r\n print()\r\n in_a = input(\"Give net first number: \")\r\n in_b = input(\"Give net second number: \")\r\n answer = best_neural_net.calculate([np.log(float(in_a)), np.log(float(in_b))])[0]\r\n print(\"Net's answer:\", np.exp(answer))", "def evolute(self, ngen, x0=None, verbose=True):\r\n self.history = {'local_fitness':[], 'global_fitness':[], 'a': [], 'A': []}\r\n self.best_fitness=float(\"inf\") \r\n self.verbose=verbose\r\n self.Positions = np.zeros((self.nwhales, self.dim))\r\n if x0:\r\n assert len(x0) == self.nwhales, '--error: the length of x0 ({}) MUST equal the number of whales in the group ({})'.format(len(x0), self.nwhales)\r\n for i in range(self.nwhales):\r\n self.Positions[i,:] = x0[i]\r\n else:\r\n #self.Positions=self.init_sample(self.bounds) #TODO, update later for mixed-integer optimisation\r\n # Initialize the positions of whales\r\n \r\n for i in range(self.dim):\r\n self.Positions[:, i] = (np.random.uniform(0, 1, self.nwhales) * (self.ub[i] - self.lb[i]) + self.lb[i])\r\n \r\n fitness0=self.eval_whales()\r\n \r\n self.best_position, self.best_fitness = self.select(self.Positions, fitness0)\r\n \r\n for k in range(0, ngen):\r\n \r\n # a is annealed from 2 to 0\r\n self.a = self.a0 - k * ((self.a0) / (ngen))\r\n # fac is annealed from -1 to -2 to estimate l\r\n self.fac = -1 + k * ((-1) / ngen)\r\n #-----------------------------\r\n # Update Whale Positions\r\n #-----------------------------\r\n self.UpdateWhales()\r\n \r\n #----------------------\r\n # Evaluate New Whales\r\n #----------------------\r\n fitness=self.eval_whales()\r\n \r\n for i, fits in enumerate(fitness):\r\n #save the best of the best!!!\r\n if fits < self.best_fitness:\r\n self.best_fitness=fits\r\n self.best_position=self.Positions[i, :].copy()\r\n \r\n #--mir\r\n if self.mode=='max':\r\n self.fitness_best_correct=-self.best_fitness\r\n self.local_fitness=-np.min(fitness)\r\n else:\r\n self.fitness_best_correct=self.best_fitness\r\n self.local_fitness=np.min(fitness)\r\n\r\n self.history['local_fitness'].append(self.local_fitness)\r\n self.history['global_fitness'].append(self.fitness_best_correct)\r\n self.history['a'].append(self.a)\r\n self.history['A'].append(self.A)\r\n \r\n # Print statistics\r\n if self.verbose and i % self.nwhales:\r\n print('^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^')\r\n print('WOA step {}/{}, nwhales={}, Ncores={}'.format((k+1)*self.nwhales, ngen*self.nwhales, self.nwhales, self.ncores))\r\n print('^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^')\r\n print('Best Whale Fitness:', np.round(self.fitness_best_correct,6))\r\n print('Best Whale Position:', np.round(self.best_position,6))\r\n print('a:', np.round(self.a,3))\r\n print('A:', np.round(self.A,3))\r\n print('^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^')\r\n\r\n if self.verbose:\r\n print('------------------------ WOA Summary --------------------------')\r\n print('Best fitness (y) found:', self.fitness_best_correct)\r\n print('Best individual (x) found:', self.best_position)\r\n print('--------------------------------------------------------------') \r\n \r\n return self.best_position, self.fitness_best_correct, self.history", "def adjust_negative_fitness_scores(self):\n\n # Find the lowest fitness value.\n min_fitness = self.genomes[0].fitness\n for genome in self.genomes:\n if genome.fitness < min_fitness:\n min_fitness = genome.fitness\n\n if min_fitness > 0:\n return\n\n for genome in self.genomes:\n genome.fitness += abs(min_fitness)" ]
[ "0.74931896", "0.7079468", "0.69477665", "0.6825639", "0.67214495", "0.6705714", "0.6655073", "0.6554352", "0.6519272", "0.6481715", "0.6447769", "0.62911665", "0.61991894", "0.6193287", "0.6174193", "0.6151497", "0.6150842", "0.6150842", "0.610781", "0.60786945", "0.607092", "0.60208887", "0.5992473", "0.5970455", "0.59389764", "0.5927531", "0.5904772", "0.58538055", "0.5851372", "0.5839621", "0.5825952", "0.5821092", "0.5819108", "0.5818257", "0.5816065", "0.5812478", "0.5806263", "0.57824683", "0.5776029", "0.57751477", "0.57720876", "0.5762048", "0.576203", "0.57528496", "0.572794", "0.57205755", "0.5704687", "0.5702648", "0.56997097", "0.5695071", "0.5685124", "0.5685036", "0.5674849", "0.5673024", "0.56717134", "0.56700885", "0.5649596", "0.5649021", "0.56308115", "0.5628438", "0.5622917", "0.5619971", "0.5590673", "0.5585018", "0.55631846", "0.5538879", "0.55356", "0.55333126", "0.5524757", "0.5508351", "0.54989445", "0.5489389", "0.54887956", "0.54707986", "0.54676306", "0.5449937", "0.54421127", "0.5437307", "0.54240865", "0.5421831", "0.5413383", "0.540837", "0.5406347", "0.5405841", "0.54037434", "0.540114", "0.5393911", "0.53888994", "0.537733", "0.5376167", "0.53735906", "0.5372129", "0.53710407", "0.5365276", "0.5362879", "0.53623646", "0.53559047", "0.5355802", "0.53553635", "0.5353223" ]
0.6333001
11
If a pool of size 3 is used, the first 3 individuals in the input iterator should be collected into a list.
def test_pool(): pop = iter([ 'a', 'b', 'c', 'd', 'e' ]) pop = ops.pool(pop, size=3) assert(len(pop) == 3) assert(pop == [ 'a', 'b', 'c' ])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def lazy_groups_of(iterator: Iterator[A], group_size: int) -> Iterator[List[A]]:\n return iter(lambda: list(islice(iterator, 0, group_size)), [])", "def construct(self, x):\n results = []\n x = self.pool0(x)\n results.append(x)\n x = self.pool1(x)\n results.append(x)\n x = self.pool2(x)\n results.append(x)\n x = self.pool3(x)\n results.append(x)\n return results", "def n_wise(x: List[Any], size: Optional[int] = 2) -> Iterable:\n\n iterator = iter(x)\n\n return iter(lambda: tuple(islice(iterator, size)), ())", "def chunk(it, size):\n it = iter(it)\n return iter(lambda: list(islice(it, size)), [])", "def chunker( it, size ):\n \n # Variables\n it = iter( it )\n \n # Selecting a bunch of jobs\n while True:\n p = tuple( itertools.islice( it, size ) )\n if not p:\n break\n yield p", "def _split_iterators(iterator, n=None):\n #if n is None:\n # item, iterator = cytoolz.peek(iterator)\n # n = len(item)\n iterators = itertools.tee(iterator, n)\n #iterators = ((sample[i] for sample in iterator) for i, iterator in enumerate(iterators))\n # Above does not work?!\n\n out = list()\n out.append(s[0] for s in iterators[0])\n out.append(s[1] for s in iterators[1])\n out.append(s[2] for s in iterators[2])\n iterators = out\n return iterators", "def chunk(it, size):\n\tit = iter(it)\n\treturn iter(lambda: tuple(islice(it, size)), ())", "def batch(size, iterable):\r\n return list(xbatch(size, iterable))", "def take(num, iterable):\n return list(islice(iterable, num))", "def __init__(self, iterator):\n self.iterator = []\n while iterator.hasNext():\n self.iterator.append(iterator.next())", "def chunk(iter_list, size):\n iter_list = iter(iter_list)\n # lambda: creates a returning expression function\n # which returns slices\n # iter, with the second argument () stops creating\n # iterators when it reaches the end\n return iter(lambda: tuple(islice(iter_list, size)), ())", "def take(n, iterable):\n return list(islice(iterable, n))", "def take(n, iterable):\n return list(islice(iterable, n))", "def grouped(iterable, n):\n # https://gist.github.com/yoyonel/fb8c9d6fb06871db527492f5144b2e7b\n iterable = iter(iterable)\n return iter(lambda: list(itertools.islice(iterable, n)), [])", "def vytvorit_generator():\n mylist = range(3)\n print 'mylist = ', mylist\n for element in mylist:\n yield element", "def take(n, iterable):\n return list(itertools.islice(iterable, n))", "def take(n, iterable):\n return list(itertools.islice(iterable, n))", "def _elements(self):\n return list(islice(self.generate(), None))", "def better_grouper(inputs, n):\n iters = [iter(inputs)] * n\n return zip(*iters)", "def batch(iterable, size):\n sourceiter = iter(iterable)\n while True:\n batchiter = islice(sourceiter, size)\n yield list(chain([batchiter.next()], batchiter))", "def chunks(sequence: Iterable[T], chunk_size: int = 2) -> Iterable[List[T]]:\n lsequence = list(sequence)\n while lsequence:\n size = min(len(lsequence), chunk_size)\n yield lsequence[:size]\n lsequence = lsequence[size:]", "def iter_chunks(sequence, chunk_size) :\n res = []\n for item in sequence :\n res.append(item)\n if len(res) >= chunk_size :\n yield res\n res = []\n if res : yield res", "def batch(iterable, k=3):\n\n for i in range(0, len(iterable), k):\n yield iterable[i:i + k]", "def chunks(iterable: Iterable, size: int) -> Iterable:\n it = iter(iterable)\n item = list(itertools.islice(it, size))\n while item:\n yield item\n item = list(itertools.islice(it, size))", "def split_chunk_iter(chunk, sizes, neighbors, rng=None):\n assert len(chunk) > len(sizes), f\"{len(chunk)} !> {len(sizes)}\"\n if not isinstance(rng, random.Random):\n rng = random\n # start by drawing three random items\n splits = [[c] for c in rng.sample(list(chunk), len(sizes))]\n unused = set(chunk) - set(sum(splits, []))\n max_iters = max(sizes) * len(sizes) # worst case\n for j in range(max_iters):\n i = j % len(sizes)\n size = sizes[i]\n split = splits[i]\n if len(split) == size:\n continue\n # get all of the neighbors of the split\n candidates = set()\n for c in split:\n candidates |= neighbors[c]\n # filter to unused cubes\n candidates = candidates & unused\n if not candidates:\n return None\n # Pick a candidate at random and add it\n choice = rng.choice(list(candidates))\n split.append(choice)\n unused.remove(choice)\n return splits", "def split(iterator, criterion):\n a = []\n b = []\n for x in iterator:\n if criterion(x):\n a.append(x)\n else:\n b.append(x)\n\n return a, b", "def iterator_peek(iterator: Iterator[T], count: int) -> tuple[list[T], Iterator[T]]:\n\n ret = []\n for _ in range(count):\n try:\n ret.append(next(iterator))\n except StopIteration:\n break\n\n return ret, chain(ret, iterator)", "def take(iterable, n):\n return list(itertools.islice(iterable, n))", "def iter_batch(iterable, size) -> Iterable:\n source_iter = iter(iterable)\n while source_iter:\n b = list(islice(source_iter, size))\n if len(b) == 0:\n break\n yield b", "def items():\n for i in self._iter_restrict(zeros, ones):\n yield self.pcdata[i]", "def chunks(l):\n for i in range(0, len(l), concurrent):\n yield l[i:i + concurrent]", "def list_generator(size: int) -> list:\n libros = size * [None]\n\n return libros", "def chunk(iterable, n):\n iterable = [e for e in iterable]\n avg_length = int(math.ceil(len(iterable) / n))\n return [iterable[i * avg_length:(i + 1) * avg_length] for i in range(n)]", "def _get_chunk_data(self, inputs: Iterable, chunk_size: int):\n inputs_iter = iter(inputs)\n while True:\n try:\n chunk_data = []\n for _ in range(chunk_size):\n processed_data = next(inputs_iter)\n chunk_data.append(processed_data)\n yield chunk_data\n except StopIteration:\n if chunk_data:\n yield chunk_data\n break", "def __iter__(self):\n from sage.combinat.posets.posets import FinitePosets_n\n n = 0\n while True:\n for P in FinitePosets_n(n):\n yield P\n n += 1", "def getPoolData(self):\r\n # type: () -> (list[Data.Data])\r\n output = []\r\n # start from the beginning of the pool area\r\n ea = self.func_ea + self.getSize(withPool=False)\r\n while ea < self.getSize(withPool=True):\r\n # create and append the data item\r\n data = Data.Data(ea)\r\n output.append(data)\r\n # advance ea to the next item\r\n ea += data.getSize()\r\n return output", "def primeset(source: Iterable[int]) -> Iterator[int]:\n for i in source:\n if prime(i):\n yield i", "def iterator(x, shuffle_=False, batch_size=1):\n if shuffle_:\n random.shuffle(x)\n # x.sort(key=lambda y: y['adjacency'].shape[1], reverse=True)\n new = [x[i:i + batch_size] for i in range(0, len(x), batch_size)]\n return new", "def chunks(data: List[Any], num: int) -> Generator[List[Any], None, None]:\n for i in range(0, len(data), num):\n yield data[i : i + num]", "def test_iter(self):\n for n, pool in enumerate([self.pool0, self.pool1, self.pool2]):\n assert list(iter(pool)) in (\n list('ab'[:n]),\n list('ba'[:n]),\n )", "def batch_iter(iterable: Iterable[T], size: int) -> Iterator[Tuple[T]]:\n # make sure we can deal with iterables like lists too\n sourceiter = iter(iterable)\n # call islice until it returns an empty tuple\n return iter(lambda: tuple(islice(sourceiter, size)), ())", "def grouper(n, iterable):\n it = iter(iterable)\n while True:\n chunk = tuple(itertools.islice(it, n))\n if not chunk:\n return\n yield chunk", "def getChunks(inp_list, chunk_size):\n return [inp_list[x:x + chunk_size] for x in range(0, len(inp_list), chunk_size)]", "def results(self):\n page = []\n\n for i, item in enumerate(super(VideoCarouselTile, self).results()):\n page.append(item)\n if (i + 1) % 3 == 0:\n yield page\n page = []\n if page:\n yield page", "def grouper(iterable, n):\n it = iter(iterable)\n while True:\n chunk = tuple(islice(it, n))\n if not chunk:\n return\n yield chunk", "def next_n(iterator, N):\n try:\n items = []\n for _ in range(N):\n items.append(next(iterator))\n return items\n except StopIteration:\n if items:\n return items\n return None", "def group(seq, size):\n if not hasattr(seq, 'next'):\n seq = iter(seq)\n while True:\n yield [seq.next() for i in xrange(size)]", "def section_4_7():\n import itertools\n\n def test1():\n def count(n):\n while True:\n yield n\n n += 1\n\n c = count(0)\n for x in itertools.islice(c, 10, 20):\n print(x)\n\n test1()", "def test_create_chunks():\n items = list(range(0, 100))\n size = 3\n\n chunks = create_chunks(items, size)\n\n current = next(chunks)\n assert len(current) == size\n assert current == [0, 1, 2]\n\n current = next(chunks)\n assert current == [3, 4, 5]", "def grAList() -> list:\n return [2, 5, 6, 9, 10, 11, 13, 17, 18, 30]", "def get_batch(iterator, batch_size):\n while True:\n center_batch = np.zeros(batch_size, dtype = np.uint32)\n target_batch = np.zeros((batch_size, 1), dtype = np.uint32)\n for index in range(batch_size):\n center_batch[index], target_batch[index] = next(iterator)\n\n yield center_batch, target_batch", "def getSample(iterator, k):\n # fill the reservoir to start\n result = [next(iterator) for _ in range(k)]\n\n n = k\n\n for item in iterator:\n n += 1\n s = random.randint(0, n)\n if s < k:\n result[s] = item\n\n return result", "def test_iter(self, feed: typing.Type[feedmod.Provider], reference: str):\n conf10 = self.Conf(reference, 10, 'conf10')\n conf1000 = self.Conf(reference, 1000, 'conf1000')\n instant = feed(identity='instant')\n pool = feedmod.Pool(conf10, instant, conf1000)\n assert tuple(f.identity for f in pool) == ('instant', 'conf1000', 'conf10')", "def batches(iterable, size):\n sourceiter = iter(iterable)\n while True:\n batchiter = islice(sourceiter, size)\n yield chain([next(batchiter)], batchiter)", "def itergroup(iterable, size: int):\n group = []\n for item in iterable:\n group.append(item)\n if len(group) == size:\n yield group\n group = []\n if group:\n yield group", "def chunks(item_list, n_items):\n for i in range(0, len(item_list), n_items):\n yield item_list[i : i + n_items]", "def compute_pool(in_size):\n return (in_size - 2) // 2 + 1", "def one_shot_iterator(dataloader):\n while True:\n for data in dataloader:\n yield data", "def iter_triple_ids(self) -> Iterable[List[int]]:\n raise NotImplementedError", "def __iter__(self):\n n = self.head\n for _ in range(len(self)):\n if n == self.capacity:\n n = 0\n yield self.lst[n]\n n += 1", "def buckets(data, n):\n # Shuffle all datasets to get a more consistent workload for all threads.\n random.shuffle(data)\n\n for i in range(0, len(data), n):\n yield data[i:i + n]", "def grouper(n, iterable):\n\tit = iter(iterable)\n\twhile True:\n\t\tchunk = tuple(itertools.islice(it, n))\n\t\tif not chunk:\n\t\t\treturn\n\t\tyield chunk", "def chunker(results, n):\n\n def grouper(iterable, n, fillvalue=None):\n args = [iter(iterable)] * n\n return zip_longest(*args, fillvalue=fillvalue)\n\n m = int(len(results) / n)\n return list(grouper(iterable=results, n=m, fillvalue=None))", "def prepare_batch(self, iterator):\n elements = []\n\n for label, album_ids in iterator:\n for album_id in album_ids:\n image_path = os.path.join(self.image_folder, album_id)\n # If path doesn't exist, continue\n if not os.path.exists(image_path):\n continue\n images = [os.path.join(image_path, img_name)\n for img_name in sorted(os.listdir(image_path))]\n # If no photo available, continue\n if len(images) == 0:\n continue\n\n elements.append((label, images))\n\n random.shuffle(elements)\n\n return sorted(elements, key=lambda p: len(p[1]), reverse=True)", "def iterfetch(cursor, batchsize=1000):\n\t# type: (Cursor, int) -> Iterator[Any]\n\n\twhile True:\n\t\tresults = cursor.fetchmany(batchsize)\n\t\tif not results:\n\t\t\tbreak\n\t\tfor result in results:\n\t\t\tyield result", "def _extract_batch(self, data, batch_size):\n\n batch_size = batch_size or BATCH_SIZE\n\n batch = []\n try:\n for i in range(batch_size):\n batch.append(data.next())\n except StopIteration:\n pass\n\n return batch", "def chunks(alist, n):\n for i in range(0, len(alist), n):\n yield alist[i:i + n]", "def test_chunked():\n examples = list(range(10))\n assert list(chunked(iter(examples), 0)) == examples\n assert list(chunked(iter(examples), 1)) == [[i] for i in examples]\n assert list(chunked(iter(examples), 2)) == [[0,1], [2,3], [4,5], [6,7], [8,9]]\n assert list(chunked(iter(examples), 3)) == [[0,1,2], [3,4,5], [6,7,8], [9]]\n assert list(chunked(iter(examples), 4)) == [[0,1,2,3], [4,5,6,7], [8,9]]\n assert list(chunked(iter(examples), 5)) == [[0,1,2,3,4], [5,6,7,8,9]]\n assert list(chunked(iter(examples), 6)) == [[0,1,2,3,4,5], [6,7,8,9]]\n assert list(chunked(iter(examples), 7)) == [[0,1,2,3,4,5,6], [7,8,9]]\n assert list(chunked(iter(examples), 8)) == [[0,1,2,3,4,5,6,7], [8,9]]\n assert list(chunked(iter(examples), 9)) == [[0,1,2,3,4,5,6,7,8], [9]]\n assert list(chunked(iter(examples), 10)) == [examples]\n assert list(chunked(iter(examples), 11)) == [examples]", "def pick(iterable):\n for element in iterable:\n yield element\n while True:\n yield element", "def fetchsome(cursor, arraySize=5000):\n while True:\n results = cursor.fetchmany(arraySize)\n if not results:\n break\n for result in results:\n yield result", "def _stream_split_iterators(stream, n=None):\n iterators = list(map(Stream, _split_iterators(iter(stream.samples()), n)))\n return iterators", "def star_topology(random, population, args):\r\n for _ in range(len(population)):\r\n yield population[:]", "def _split_in_chunks(lst: Sequence[Any], chunksize: int) -> Iterator[Sequence[Any]]:\n for i in range(0, len(lst), chunksize):\n yield lst[i:i + chunksize]", "def _islice_batched(it: Iterator[np.ndarray], n: int) -> Iterator[np.ndarray]:\n while n > 0:\n arr: np.ndarray = next(it)\n k = arr.shape[0]\n yield arr[:n, :]\n n -= k", "def next(self) -> List[object]:\n ...", "def __call__(self, in_images):\n ret_images = []\n for image in in_images:\n if self.curr_elements < self.max_elements:\n self.images.append(image)\n ret_images.append(image)\n self.curr_elements+=1\n else:\n if np.random.uniform() > 0.5:\n idx = np.random.randint(0, self.curr_elements)\n temp = copy.deepcopy(self.images[idx])\n self.images[idx] = image\n ret_images.append(temp)\n else:\n ret_images.append(image)\n return ret_images", "def compute_combinations(items: List[Union[List[Any], Tuple]], n: int) -> List[List[Any]]:\n return [chunks[i:i + n] for chunks in items for i in range(len(chunks) - (n - 1))]", "def grouper(iterable, n):\n args = [iter(iterable)] * n\n return zip(*args)", "def _complete_windows(it: Iterator[_T], window_size: int) -> Iterator[Tuple[_T, ...]]:\n win = deque(islice(it, window_size), window_size)\n if len(win) < window_size:\n return\n # cache method access for slight speed boost\n append = win.append\n yield tuple(win)\n for e in it:\n append(e)\n yield tuple(win)", "def window(iterable, stride=3):\n for i in range(len(iterable) - stride + 1):\n yield iterable[i: i + stride]", "def chunk(iterable_obj, part_size):\n # Apparently,this was faster than all other methods when I tested\n # Container for new items\n temp_lst = []\n\n # Cycle through list of items with part_size step\n for i in range(0, len(iterable_obj), part_size):\n # append part_size list to temp list\n temp_lst.append(iterable_obj[i:i + part_size])\n\n return temp_lst", "def __iter__(self):\n worker_info = torch.utils.data.get_worker_info()\n num_batches = len(self.coords_batcher)\n if worker_info is None:\n # In single-processing mode\n start, end = 0, num_batches\n else:\n worker_id = worker_info.id\n num_workers = worker_info.num_workers\n shard_size = int(np.ceil(num_batches / num_workers))\n start = shard_size * worker_id\n end = min(start + shard_size, num_batches)\n return (self.get_batch(i) for i in range(start, end))", "def chunks(l, n):\n for i in xrange(0, len(l), n):\n yield l[i:i+n]", "def batched(\n iterable: Iterable[_T],\n batch_size: int,\n container_factory: 'Callable[[Iterator[_T]], Collection[_T]]' = tuple\n) -> 'Iterator[Collection[_T]]':\n\n iterator = iter(iterable)\n while True:\n batch = container_factory(islice(iterator, batch_size))\n if len(batch) == 0:\n return\n\n yield batch", "def slice_list(input, size):\n input_size = len(input)\n slice_size = input_size // size\n remain = input_size % size\n result = []\n iterator = iter(input)\n for i in range(size):\n result.append([])\n for j in range(slice_size):\n result[i].append(next(iterator))\n if remain:\n result[i].append(next(iterator))\n remain -= 1\n return result", "def __next__(self):\n # Stop iteration once data source has been exhausted\n empty = False\n while not empty:\n try:\n if self.opt.num_buckets > 1:\n label_batch, enc_input_batch, dec_input_batch = self.bucketed_next()\n else:\n label_batch, enc_input_batch, dec_input_batch = self.unbucketed_next()\n if len(enc_input_batch) > 0:\n empty = True\n except IndexError:\n raise StopIteration\n # Apply padding to the obtained batch\n if self.opt.pad:\n label_batch = self.apply_padding(label_batch)\n enc_input_batch = self.apply_padding(enc_input_batch)\n dec_input_batch = self.apply_padding(dec_input_batch)\n # Convert batch lists to numpy arrays\n label_array = np.array(label_batch, dtype=np.int32)\n enc_input_array = np.array(enc_input_batch, dtype=np.int32)\n dec_input_array = np.array(dec_input_batch, dtype=np.int32)\n return label_array, enc_input_array, dec_input_array", "def chunkify(list,size):\n for i in range (0, len(list), size):\n yield list[i:i+size]", "def chunks(lst, size):\n for i in range(0, len(lst), size):\n yield lst[i:i + size]", "def chunks(iterator, size):\n for index in range(0, len(iterator), size):\n yield iterator[index:index + size]", "def chunks(iterable: Iterable, n: int = 1000, cls: Type = list) -> Generator:\n\n it = iter(iterable)\n while True:\n chunked = itertools.islice(it, n)\n try:\n first_element = next(chunked)\n except StopIteration:\n return\n yield cls(itertools.chain((first_element,), chunked))", "def batch(iterable, n):\n iterable = [e for e in iterable]\n size = len(iterable)\n return [iterable[i:i + n] for i in range(0, size, n)]", "def chunks(self, lst, n):\n for i in range(0, len(lst), n):\n yield lst[i:i + n]", "def chunk_list(iterable: Iterable[T], size: int) -> Iterable[List[T]]:\n\n iterable = iter(iterable)\n\n item = list(itertools.islice(iterable, size))\n while item:\n yield item\n item = list(itertools.islice(iterable, size))", "def chunks(items, chunk_size):\r\n items = list(items)\r\n return (items[i:i + chunk_size] for i in xrange(0, len(items), chunk_size))", "def __iter__(self) -> Union[Iterator[int], Iterator[Tuple[int, Any]]]:\n self.size = self._data._dataset_size\n if (not self._data._fully_cached or\n self._data._should_call_prefetch_source):\n self._data._start_iteration()\n # First epoch of lazy loading, calling prefetch, and returning\n # indices and examples.\n iterator = self._iterator_unknown_size()\n else:\n # Non-lazy loading, or when dataset has been fully iterated.\n assert self.size is not None\n iterator = self._iterator_given_size(self.size)\n\n if self._data._should_call_prefetch_processed:\n # Processing routine is performed in main process. Yield\n # processed examples instead.\n map_fn = lambda idx: (idx, self._data._processed_cache[idx])\n elif self._data._should_yield_raw_example:\n # Return indices and examples for any epoch in this case.\n map_fn = lambda idx: (idx, self._data._source[idx])\n else:\n map_fn = None # type: ignore\n if map_fn is not None:\n return map(map_fn, iterator)\n\n return iterator", "def slice_and_run(single_iterator: permutations):\n step = 10000000\n start = 0\n stop = start + step\n # I use next_it bool to make sure to create one more slice with no end limit when slices are finished\n next_it = False\n while True:\n if next_it is False:\n cake_slice = islice(single_iterator, start, stop)\n else:\n cake_slice = islice(single_iterator, start, None)\n if args.cores is None:\n with Pool() as pool:\n data = pool.map(printer, cake_slice)\n else:\n with Pool(int(args.cores)) as pool:\n data = pool.map(printer, cake_slice)\n start += step\n stop += step\n if next_it is True:\n break\n if not data:\n next_it = True", "def chunks(_class, l, n):\n\t\t# CITE: http://stackoverflow.com/questions/312443/how-do-you-split-a-list-into-evenly-sized-chunks-in-python\n\t for i in xrange(0, len(l), n):\n\t yield l[i:i+n]", "def chunks(lst, n):\n for i in range(0, len(lst), n):\n yield lst[i : i + n]", "def train_data() -> Iterator[Tuple[Label, ChanneledImage]]:\n return zip(*get_data(TRAIN_FILES, 60000))", "def __iter__(self):\n return iter(self.cliques)" ]
[ "0.640328", "0.6058334", "0.58099174", "0.5747291", "0.57104117", "0.5681089", "0.56514186", "0.5616282", "0.56008047", "0.55808794", "0.5538672", "0.5503756", "0.5503756", "0.54849136", "0.5432563", "0.54290795", "0.54290795", "0.54163194", "0.54163116", "0.5399655", "0.53910416", "0.53711814", "0.53680724", "0.53597134", "0.53490454", "0.53470373", "0.5339381", "0.533715", "0.5325946", "0.52909666", "0.5290292", "0.52892774", "0.5287136", "0.52860373", "0.52710193", "0.5267592", "0.5263528", "0.5261027", "0.5258367", "0.5249227", "0.5245116", "0.52252316", "0.5212325", "0.5211912", "0.52017915", "0.51928467", "0.51908225", "0.5180234", "0.5178486", "0.5174676", "0.516865", "0.5163071", "0.51624274", "0.5148395", "0.51461035", "0.5139883", "0.5134869", "0.5130427", "0.51249564", "0.51244843", "0.5121708", "0.5119943", "0.511857", "0.5112146", "0.510977", "0.5107765", "0.510288", "0.5101043", "0.50998443", "0.50929433", "0.50925213", "0.508132", "0.5074376", "0.50717294", "0.5071145", "0.5070972", "0.5061234", "0.5055865", "0.5049309", "0.50473595", "0.5044695", "0.5040521", "0.5040079", "0.5037513", "0.50356406", "0.5032615", "0.5032041", "0.5031934", "0.5029187", "0.5028442", "0.50279063", "0.50273645", "0.50133544", "0.5008622", "0.50034356", "0.5001306", "0.5000564", "0.49932152", "0.4992121", "0.49913678" ]
0.7016338
0
Compute the color of a given pixel.
def start_up(t, coord, ii, n_pixels, value): global position x, y, z = coord # print(coord) # print("position") # print(int(position)) if (ii == 0): r = value[int(position)] g = value[int(position)] b = value[int(position)] elif (ii == 1 or ii == 29 or ii == 28): r = value[int(position)] * .7 g = value[int(position)] * .5 b = value[int(position)] * .5 else: r = 0 g = 0 b = 0 position += .01 if (position > 499): position = 0 # padXData = touchOSC.padXData # padYData = int(touchOSC.padYData * .65) # print padYData # print touchOSC.padYData # r,g,b = colorOSC # if x == padXData and z == padYData: # r,g,b = draw.circle(padXData,padYData, x, z,colorOSC) # draw.circle(5,5, x, z) return (r, g, b)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_color(im_obj):\n #im = Image.open(path, 'r')\n x, y = im_obj.size\n\n r, g, b = 0, 0, 0\n for i in xrange(x):\n for j in xrange(y):\n color_px = im_obj.getpixel((i, j))\n #print color_px\n r += color_px[0]\n g += color_px[1]\n b += color_px[2]\n\n r = r / (x * y)\n g = g / (x * y)\n b = b / (x * y)\n return (r, g, b)", "def getPixelColour(self, item, pixel):\n return item.get_at(pixel)", "def brightness(pixel):\n red = pixel[0]\n green = pixel[1]\n blue = pixel[2]\n return (21*red + 72*green + 7*blue) // 100", "def fakingColors(pixel):\n\n return (\n pixel[0] * 0.598 - 0.1957 * pixel[1] - 0.038 * pixel[2],\n pixel[1] * 1.174 - pixel[0] * 0.1994 - pixel[2] * 0.076,\n pixel[2] * 0.228 - pixel[0] * 0.1495 - pixel[1] * 0.2935\n )", "def get_red(self, x, y):\n self.__check_dimensions(x, y)\n return self.pixels[(x, y)].get_red()", "def getPixel(self,x,y):\n return color_to_rgb(self._image.get(x, y))", "def GetPixel(*args, **kwargs):\n return _gdi_.Colour_GetPixel(*args, **kwargs)", "def pixel(self, x: int, y: int, colour: int, /) -> None:", "def getPixelColor(self, n):\n self._logger.debug(\"getPixelColor\")", "def find_reddest_pixel(img):\n # HINTS/ADVICE-------------\n # Use a nested for loop here.\n #\n # BE CAREFUL DOING ARITHMETIC WITH UNSIGNED INTEGERS: \n # >>> a = np.array([2], dtype='uint8')\n # >>> b = np.array([3], dtype='uint8')\n # >>> a - b\n # array([255], dtype=uint8)\n #\n # Reminder:\n # numpy arrays have a \"shape\" attribute that stores the layout:\n # img.shape[0] - rows\n # img.shape[1] - columns\n # img.shape[2] - color channels\n\n max_redness = 0\n max_x = 0\n max_y = 0\n \n img = np.array(img, dtype = 'int32')\n for r in range(img.shape[0]):\n for c in range(img.shape[1]):\n red = img[r, c, 2]\n green = img[r, c, 1]\n blue = img[r, c, 0] \n redness = (red - green) + (red - blue)\n\n if redness > max_redness:\n max_redness = redness\n max_x = c\n max_y = r\n \n return (max_x, max_y)", "def fl_get_pixel(colr):\n _fl_get_pixel = library.cfuncproto(\n library.load_so_libforms(), \"fl_get_pixel\",\\\n cty.c_ulong, [xfdata.FL_COLOR],\\\n \"\"\"long unsigned int fl_get_pixel(FL_COLOR col)\"\"\")\n library.check_if_flinitialized()\n #library.checknonfatal_allowed_value_in_list(colr, xfdata.COLOR_list)\n ul_colr = library.convert_to_FL_COLOR(colr)\n library.keep_elem_refs(colr, ul_colr)\n retval = _fl_get_pixel(ul_colr)\n return retval", "def getPixel(self, px, py):\n if not self.inBounds(px,py):\n return IColor()\n idx = py*self.w + px\n return self.data[idx]", "def get_color(self, coord):\n return self.board[coord[0], coord[1]]", "def get_red(x, y, slot = 0):\r\n return __g[slot].pixels_rgb[__g[slot].width * 3 * y + x * 3]", "def get_pixel(self, frame: int, x: int, y: int) -> Color:\n return self.get_frame(frame).clone()[x, y]", "def get_pixel_color(self, x, y):\n raise NotImplementedError # remove when we fix it. :)\n\n # do the Window import here because we don't want to import it at the\n # top or else we won't be able to set window properties\n from kivy.core.window import Window\n\n # convert the passed x/y to the actual x/y of the Window since it's\n # possible for the mpf-mc display size to be different than the Window\n # size\n x *= Window.width / Window.children[0].width\n y *= Window.height / Window.children[0].height\n\n return glReadPixels(x, y, 1, 1, GL_RGB, GL_UNSIGNED_BYTE)", "def get_rgb(self, x, y):\n self.__check_dimensions(x, y)\n return self.pixels[(x, y)].get_rgb()", "def get_color_of_point(point: Tuple, rgb_im: Image, width: int, height: int) -> Tuple:\n x = int(point[0])\n y = int(point[1])\n new_point = (x, y)\n try:\n return rgb_im.getpixel(new_point)\n except:\n new_point = list(new_point)\n if new_point[0] == width:\n new_point[0] -= 1\n if new_point[1] == height:\n new_point[1] -= 1\n new_point = tuple(new_point)\n return rgb_im.getpixel(new_point)", "def get_color(self, point):\n \n d = point - self._origin\n dist = int(d.dot(d) ** 0.5) % 2\n if dist == 0:\n return self.c1.dup()\n else:\n return self.c2.dup()", "def get_green(self, x, y):\n self.__check_dimensions(x, y)\n return self.pixels[(x, y)].get_green()", "def _calcColor(self, colorTuple):\n return milight.color_from_rgb(*colorTuple)", "def getPixelColor(self, n):\n\t\treturn self.leds[n]", "def get_pixel(framebuf, x, y):\n index = (y * framebuf.stride + x) * 2\n lobyte, hibyte = framebuf.buf[index : index + 2]\n r = hibyte & 0xF8\n g = ((hibyte & 0x07) << 5) | ((lobyte & 0xE0) >> 5)\n b = (lobyte & 0x1F) << 3\n return (r << 16) | (g << 8) | b", "def pixel(self, x, y):\n \n # Pixel data is unsigned char (8bit unsigned integer),\n # and there are for (blue,green,red,alpha)\n data_format = \"BBBB\"\n \n # Calculate offset, based on\n # http://www.markj.net/iphone-uiimage-pixel-color/\n offset = 4 * ((self.width*int(round(y))) + int(round(x)))\n \n # Unpack data from string into Python'y integers\n b, g, r, a = struct.unpack_from(data_format, self._data, offset=offset)\n \n # Return BGRA as RGBA\n return (r, g, b, a)", "def retrieveColor(image):\n w, h, dim = image.shape\n ret = np.zeros((w, h, dim), dtype=np.uint8)\n for i in range(w):\n for j in range(h):\n ret[i][j] = fakingColors(image[i][j])\n return np.clip(ret, 0, 255)", "def color_pixels(self, image, color):\r\n\r\n image[self.ally, self.allx] = color\r\n return image", "def GetPixel(*args, **kwargs):\n return _gdi_.DC_GetPixel(*args, **kwargs)", "def get_color(self, point):\n return self._color.dup()", "def get_pixel_colour(x, y, w, h, steps):\n global screen\n rgb_average = [0, 0, 0]\n # loops trough the rectangle while checking pixel rgb values\n for current_x in range(0, w, steps):\n for current_y in range(0, h, steps):\n pixel_rgb = screen.getpixel((x + current_x, y + current_y))\n rgb_average[0] += pixel_rgb[0]\n rgb_average[1] += pixel_rgb[1]\n rgb_average[2] += pixel_rgb[2]\n # normalizes color value to represent the complete rectangle as one single value\n rgb_average[0] = round(rgb_average[0] / (w * h) * steps * steps)\n rgb_average[1] = round(rgb_average[1] / (w * h) * steps * steps)\n rgb_average[2] = round(rgb_average[2] / (w * h) * steps * steps)\n return rgb_average", "def intensity( rgb ):\n return int( (rgb[0] + rgb[1] + rgb[2])/3 )", "def getPixel (self, x, y):\r\n return self.image [y][x]", "def _get_color(self, c, x, max_num):\n\n ratio = 5*(float(x)/max_num)\n i = int(math.floor(ratio))\n j = int(math.ceil(ratio))\n ratio -= i\n r = (1 - ratio) * self._colors[i][c] + ratio*self._colors[j][c]\n return int(255*r)", "def GetPixel(*args, **kwargs):\n return _gdi_.Palette_GetPixel(*args, **kwargs)", "def pix_to_rvals(cube, pixel):\n return cube.rvals[pixel[1], pixel[0]] * cube.dist", "def get_colour(self, x, y):\n if x >= self.width or y >= self.height:\n return (0, 0, 0)\n\n return self.env_img.get_at((int(x), int(y))).normalize()[0:3]", "def grayscale(img):\n for pixel in img:\n x, y, col = pixel\n r, g, b = col\n \n r = (r + g + b)/3\n r = g = b\n \n new_color = create_color(r, g, b)\n set_color(img, x, y, new_color)", "def pixelvalue(self, *args, **kwargs):\n return _image.image_pixelvalue(self, *args, **kwargs)", "def get_pixel(framebuf, x, y):\n index = (y >> 3) * framebuf.stride + x\n offset = y & 0x07\n return (framebuf.buf[index] >> offset) & 0x01", "def pixel_at(self, x, y):\n return self.arr[x, y, 1] == 255", "def get_pixel(self, i, j):\n # Inside image bounds?\n if i > self.width or j > self.height:\n print(\"Pixel out of bounds\")\n return None\n\n # Get Pixel\n pixel = self.image.getpixel((i, j))\n return pixel", "def get_green(x, y, slot = 0):\r\n return __g[slot].pixels_rgb[__g[slot].width * 3 * y + x * 3 + 1]", "def get_pixel(framebuf, x, y):\n index = (y * framebuf.stride + x) // 8\n offset = 7 - x & 0x07\n return (framebuf.buf[index] >> offset) & 0x01", "def findpixel(img, col):\n for x in range(img.width):\n for y in range(img.height):\n if img[y][x][0] == col[0] and img[y][x][1] == col[1] and img[y][x][2] == col[2]:\n return cvPoint(x, y)", "def get_blue(self, x, y):\n self.__check_dimensions(x, y)\n return self.pixels[(x, y)].get_blue()", "def pixelConverter(pixels):\n intensity = int((sum(pixels[:3]) / 3) * (pixels[-1] / 100))\n return intensity", "def get_pixel(image, i, j):\n if i >= image.shape[0] or j >= image.shape[1]:\n return 1, None\n return 0, image[i : i + 1, j : j + 1]", "def GetRGB(self, *args):\n return _XCAFDoc.XCAFDoc_Color_GetRGB(self, *args)", "def _rgb(x, y, z):\n rgb = np.array([x, y, z]).T\n rgb -= rgb.min(0)\n rgb /= np.maximum(rgb.max(0), 1e-16) # avoid div by zero\n return rgb", "def get_pixel(framebuf, x, y):\n index = (y * framebuf.stride + x) >> 2\n pixel = framebuf.buf[index]\n\n shift = (x & 0b11) << 1\n return (pixel >> shift) & 0b11", "def get_pixel(self, x, y):\n if x < 0 or x > 7 or y < 0 or y > 15:\n # Ignore out of bounds pixels.\n return\n if y < 8:\n return self.get_led( y * 16 + x)\n else:\n return self.get_led((y-8) * 16 + (x+8))", "def pixel(self, x: int, y: int, color: int):\n if (\n (x < self.size[0] and y < self.size[1]) and (x >= 0 and y >= 0)\n ):\n index, offset = self.position(x, y)\n self.image[index] = (\n self.image[index] & ~(0x01 << offset)\n ) | (\n (color != 0) << offset\n )\n else:\n return", "def color(self):\n if self._simplecell:\n self.fetch()\n return self._color", "def getpixel(self, col, row):\n try:\n return self.vram[row][col]\n except IndexError:\n return None", "def get_pixel(self, x,y):\n\t\tstructval=self.__gdal__.ReadRaster(px,py,1,1,buf_type=G.GDT_UInt16) #Assumes 16 bit int aka 'short'\n\t\treturn struct.unpack('h' , structval)[0]", "def detect_colour(field):\n # create list of BGR tuples and count them\n pixels = Counter(map(tuple, np.reshape(field, (-1, 3)).tolist()))\n # filter out the colours which just have a few occurrences\n pixels = dict(filter(lambda pixel: pixel[1] > 100, dict(pixels).items()))\n # and merge the nearby colours\n pixels = merge_colours(pixels)\n\n # the background color should be the one with the most pixels present\n return Counter(pixels).most_common(1)[0][0]", "def get_colors(self, image: np.ndarray, coordinates: np.ndarray) -> np.ndarray:\r\n x = coordinates.squeeze(1)\r\n return np.flip(image[x[:, 1], x[:, 0]].astype(np.float64) / 255.0, axis=1)", "def green_channel(input_image):\n return input_image[:, :, 1]", "def get_color(self, _pos):\n return self.__framebuffer[_pos]", "def rgb_color(self):\n return self._color", "def get_pixel(framebuf, x, y):\n index = (y * framebuf.stride + x) * 3\n return (\n (framebuf.buf[index] << 16)\n | (framebuf.buf[index + 1] << 8)\n | framebuf.buf[index + 2]\n )", "def get_pixel(image, x, y):\n x = in_bound(image[\"height\"], x)\n y = in_bound(image[\"width\"], y)\n \n return image['pixels'][ x * image[\"width\"] + y]", "def getPixel(self):\r\n return self.__buffer[y][x]", "def color_in_rgb(self):\n return self._color_rgb", "def _color(self, x, factor):\r\n factor = (factor/MAX_LEVEL) * 1.8 + .1\r\n degenerate = tf.image.grayscale_to_rgb(tf.image.rgb_to_grayscale(x))\r\n x = tfa.image.blend(degenerate, tf.cast(x, tf.float32), factor)\r\n return tf.saturate_cast(x, tf.uint8)", "def _decode_pixel(pixel):\n\tr = (pixel & 0b0111110000000000) >> 7\n\tg = (pixel & 0b0000001111100000) >> 2\n\tb = (pixel & 0b0000000000011111) << 3\n\n\treturn \"%c%c%c\" % (\n\t\t(r | (r >> 5)),\n\t\t(g | (g >> 5)),\n\t\t(b | (b >> 5)),\n\t)", "def find_reddest_pixel_fast(img): \n img = np.array(img, dtype = 'int32')\n location = cv2.minMaxLoc((img[:, :, 2] - img[:, :, 1]) + (img[:, :, 2] - img[:, :, 0]))[3]\n return location", "def get_color(self):\r\n return self._color", "def int2color(x):\n # r = int(1000 * x % 255)\n # g = int(10000 * x % 255)\n # b = int(100000 * x % 255)\n x = 0 if x == 0 else int(1/x)\n b = x & 0xff\n g = (x >> 8) & 0xff\n r = (x >> 16) & 0xff\n return [r, g, b]", "def _get_color_brightness(self, color):\n d0, _, _ = self._get_color_dominance_indices(color)\n return color[d0]/MAX", "def get_color(self):\r\n return self.__color", "def get_color(self):\n return COLOR_DICT[self.element]", "def pixel( self, x, y, c = '#ffffff' ):\n self.raster.put( c, ( x, y ) )", "def to_color(self):\n return (int(self.r * 255), int(self.g * 255), int(self.b * 255))", "def meanColor(self):\n return self.image[self.x, self.y]", "def get_color(self):\n return self._color", "def get_color(self):\n return self._color", "def get_color(self):\n return self.color", "def rgb_2_scalar_idx(r, g, b):\n return 256 ** 2 * r + 256 * g + b", "def GetColor(self, *args):\n return _XCAFDoc.XCAFDoc_ColorTool_GetColor(self, *args)", "def FindColor(self, *args):\n return _XCAFDoc.XCAFDoc_ColorTool_FindColor(self, *args)", "def int2color_tuple(x):\n red_val = int(1000 * x % 255)\n green_val = int(10000 * x % 255)\n blue_val = int(100000 * x % 255)\n return red_val, green_val, blue_val", "def find_approximate(pixel):\n #don't change the alphas\n if(pixel[3] <= 0):\n return pixel\n\n #current method: compute the sum and see which is less diff\n pallette_pixel_sum = {}\n for color in _COMPILED_PALETTE.keys():\n r_sum = math.fabs(_COMPILED_PALETTE[color][0] - pixel[0] ** _MAGNITUDE)\n g_sum = math.fabs(_COMPILED_PALETTE[color][1] - pixel[1] ** _MAGNITUDE)\n b_sum = math.fabs(_COMPILED_PALETTE[color][2] - pixel[2] ** _MAGNITUDE)\n\n i_sum = r_sum + g_sum + b_sum\n pallette_pixel_sum[i_sum] = (color[0], color[1], color[2], pixel[3])\n\n npixel = pallette_pixel_sum[min(pallette_pixel_sum.keys())]\n return npixel", "def getRGBFromXYAndBrightness(self, x, y, bri=1):\n # The xy to color conversion is almost the same, but in reverse order.\n # Check if the xy value is within the color gamut of the lamp.\n # If not continue with step 2, otherwise step 3.\n # We do this to calculate the most accurate color the given light can actually do.\n xyPoint = XYPoint(x, y)\n\n if not self.checkPointInLampsReach(xyPoint):\n # Calculate the closest point on the color gamut triangle\n # and use that as xy value See step 6 of color to xy.\n xyPoint = self.getClosestPointToPoint(xyPoint)\n\n # Calculate XYZ values Convert using the following formulas:\n Y = bri\n X = (Y / xyPoint.y) * xyPoint.x\n Z = (Y / xyPoint.y) * (1 - xyPoint.x - xyPoint.y)\n\n # Convert to RGB using Wide RGB D65 conversion\n r = X * 1.612 - Y * 0.203 - Z * 0.302\n g = -X * 0.509 + Y * 1.412 + Z * 0.066\n b = X * 0.026 - Y * 0.072 + Z * 0.962\n\n # Apply reverse gamma correction\n r, g, b = map(\n lambda x: (12.92 * x) if (x <= 0.0031308) else ((1.0 + 0.055) * pow(x, (1.0 / 2.4)) - 0.055),\n [r, g, b]\n )\n\n # Bring all negative components to zero\n r, g, b = map(lambda x: max(0, x), [r, g, b])\n\n # If one component is greater than 1, weight components by that value.\n max_component = max(r, g, b)\n if max_component > 1:\n r, g, b = map(lambda x: x / max_component, [r, g, b])\n\n r, g, b = map(lambda x: int(x * 255), [r, g, b])\n\n # Convert the RGB values to your color object The rgb values from the above formulas are between 0.0 and 1.0.\n return (r, g, b)", "def av_color(file):\n\ttry:\n\t\timage = Image.open(file)\n\t\tw, h = image.size\n\t\tpixels = image.getcolors(w * h)\n\t\tmost_frequent_pixel = pixels[0]\n\t\tfor count, colour in pixels:\n\t\t\tif count > most_frequent_pixel[0]:\n\t\t\t\tmost_frequent_pixel = (count, colour)\n\t\tdbg = int('0x%02x%02x%02x' % most_frequent_pixel[1], 16)\n\t\tprint(dbg)\n\t\treturn dbg\n\texcept Exception as e:\n\t\tprint('[!Error!] in AV COLOR')\n\t\tprint(e)\n\t\treturn 0xB46BCF", "def GetPixelPoint(*args, **kwargs):\n return _gdi_.DC_GetPixelPoint(*args, **kwargs)", "def _to_color(indx, base):\n base2 = base * base\n b = 2 - indx / base2\n r = 2 - (indx % base2) / base\n g = 2 - (indx % base2) % base\n return b * 127, r * 127, g * 127", "def get_pixel(self, x, y):\n assert self.valid_coordinates(x, y)\n return self.pixels[self.pixel_offset(x, y)]", "def pixel(self, x, y, color=None):\n if self.rotation == 1:\n x, y = y, x\n x = self.width - x - 1\n if self.rotation == 2:\n x = self.width - x - 1\n y = self.height - y - 1\n if self.rotation == 3:\n x, y = y, x\n y = self.height - y - 1\n\n if x < 0 or x >= self.width or y < 0 or y >= self.height:\n return None\n if color is None:\n return self.format.get_pixel(self, x, y)\n self.format.set_pixel(self, x, y, color)\n return None", "def colorDistance(self, color = (0, 0, 0)):\n return spsd.cdist(self.meanColor(), [color])[:,0]", "def getPixel(self,x,y) :\n # check the bounds to make sure we are in the correct area\n if x<0 or x>self.m_width :\n print \"error x out of bounds\\n\"\n return\n if y<0 or y>self.m_height :\n print \"error y our of bounds\\n\"\n return\n # now calculate the index into the 1D array of data\n index=(y*self.m_width*4)+x*4\n # grab the pixels\n red = self.getUcharArrayItem(self.charPixelPtr,index)\n green = self.getUcharArrayItem(self.charPixelPtr,index+1)\n blue = self.getUcharArrayItem(self.charPixelPtr,index+2)\n alpha=self.getUcharArrayItem(self.charPixelPtr,index+3)\n return (red,green,blue,alpha)", "def get_pixel_value(img, x, y):\n shape = tf.shape(x)\n batch_size = shape[0]\n height = shape[1]\n width = shape[2]\n\n batch_idx = tf.range(0, batch_size)\n batch_idx = tf.reshape(batch_idx, (batch_size, 1, 1))\n b = tf.tile(batch_idx, (1, height, width))\n\n indices = tf.stack([b, y, x], 3)\n\n return tf.gather_nd(img, indices)", "def get_pixel_value(img, x, y):\n shape = tf.shape(x)\n batch_size = shape[0]\n height = shape[1]\n width = shape[2]\n\n batch_idx = tf.range(0, batch_size)\n batch_idx = tf.reshape(batch_idx, (batch_size, 1, 1))\n b = tf.tile(batch_idx, (1, height, width))\n\n indices = tf.stack([b, y, x], 3)\n\n return tf.gather_nd(img, indices)", "def get_pixel_value(img, x, y):\n shape = tf.shape(x)\n batch_size = shape[0]\n height = shape[1]\n width = shape[2]\n\n batch_idx = tf.range(0, batch_size)\n batch_idx = tf.reshape(batch_idx, (batch_size, 1, 1))\n b = tf.tile(batch_idx, (1, height, width))\n\n indices = tf.stack([b, y, x], 3)\n\n return tf.gather_nd(img, indices)", "def getColor(self):\n return self.__color", "def getColor(self):\n return self.__color", "def getColor(self):\n return self.__color", "def color(self):\n return self._rgba", "def color(self):\n return self._rgba", "def get_color(self):\n\n return self.color", "def one_color(image,color=[0,0,255]):\r\n output = image.copy()\r\n for line in range(len(image)):\r\n for column in range(len(image[0])):\r\n distance = calc_distance(color,image[line][column])\r\n if distance <=150:\r\n output[line][column]=[255,255,255]\r\n else:\r\n output[line][column]=[0,0,0]\r\n return output", "def getJuliaPixelColor(z, gradient):\n c = complex(-1.0, 0.0)\n for i in range(len(gradient)):\n z = z * z + c # Get z1, z2, ...\n if abs(z) > 2:\n z = 2.0\n return gradient[i] # The sequence is unbounded\n return gradient[len(gradient) - 1] # Indicate a bounded sequence" ]
[ "0.700929", "0.69985324", "0.6966529", "0.68848133", "0.6798888", "0.6789337", "0.6751443", "0.67474806", "0.6740431", "0.65576667", "0.6495866", "0.6494163", "0.6480991", "0.64658093", "0.64402866", "0.64176357", "0.64166456", "0.64017946", "0.63647753", "0.63250977", "0.6310689", "0.6270842", "0.62226725", "0.6216369", "0.61961997", "0.61781293", "0.61484206", "0.6118787", "0.6113793", "0.6095519", "0.60549325", "0.60544604", "0.60523486", "0.6052284", "0.60348225", "0.60124296", "0.59987944", "0.59977055", "0.59798914", "0.5978551", "0.5972292", "0.5960971", "0.59606457", "0.59540594", "0.5936636", "0.5924817", "0.5876801", "0.5870216", "0.58695227", "0.58639467", "0.58599484", "0.58597773", "0.58541024", "0.5848786", "0.58407843", "0.5822044", "0.58213776", "0.58159596", "0.581193", "0.5811918", "0.5810402", "0.5808623", "0.5799937", "0.5768357", "0.576023", "0.57601327", "0.5734668", "0.57286805", "0.5725468", "0.57211876", "0.5720131", "0.5713046", "0.56784153", "0.5678244", "0.5656257", "0.5656257", "0.5647984", "0.56240314", "0.5611193", "0.56074405", "0.56047285", "0.55943173", "0.559199", "0.5587993", "0.55783767", "0.5575375", "0.55734205", "0.5566923", "0.55596435", "0.5558596", "0.5539905", "0.5539905", "0.5539905", "0.5537534", "0.5537534", "0.5537534", "0.55358255", "0.55358255", "0.5531663", "0.5528587", "0.551295" ]
0.0
-1
Converts a wave to a vector of prosodic features. offset (in ms) determines where the signal will be sampled. window_len is ignored.
def wav_to_prosodic(path, sr=16000, offset=10): sound = parselmouth.Sound(path) pitch = sound.to_pitch() #timestep, pitch_floor, pitch_ceiling intensity = sound.to_intensity() features = [] max_time = sound.get_total_duration() for time in np.arange(0, max_time, 0.001): f0 = pitch.get_value_at_time(time) f0_nan = 0 if np.isnan(f0): f0 = 0 f0_nan = 1 int_db = intensity.get_value(time) if np.isnan(int_db): int_db = 0 features.append([f0, f0_nan, int_db]) array_feats = np.array(features).T print("SHAPE OF THE FEATURES:", array_feats.shape) assert(not np.any(np.isnan(array_feats))) return array_feats, max_time
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _choose_wavelength_slice(self, offset):\n if 'WAVE' not in self.axes_wcs.wcs.ctype:\n raise cu.CubeError(2, \"Spectral dimension not present\")\n if self.data.ndim == 4:\n raise cu.CubeError(4, \"Can only work with 3D cubes\")\n\n axis = -2 if self.axes_wcs.wcs.ctype[0] in ['TIME', 'UTC'] else -1\n arr = None\n length = self.data.shape[axis]\n if isinstance(offset, int) and offset >= 0 and offset < length:\n arr = self.data.take(offset, axis=axis)\n\n if isinstance(offset, u.Quantity):\n delta = self.axes_wcs.wcs.cdelt[-1 - axis] * u.m\n wloffset = offset.to(u.m) / delta\n wloffset = int(wloffset)\n if wloffset >= 0 and wloffset < self.data.shape[axis]:\n arr = self.data.take(wloffset, axis=axis)\n\n return arr", "def get_features(filename, training=True):\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\")\n wav, _ = librosa.load(filename, \n sr=SAMPLE_RATE, \n mono=True,\n dtype=np.float64)\n energy = np.abs(wav)\n silence_threshold = np.percentile(energy, 95)\n offsets = np.where(energy > silence_threshold)[0]\n if training:\n audio_voice_only = wav[offsets[0]:offsets[-1]]\n else:\n #avoid cutting off too abruptly\n audio_voice_only = wav[offsets[0]:offsets[-1] + 4800]\n if training:\n if len(audio_voice_only) >= 160 * NUM_FRAMES:\n start_ = np.random.randint(len(audio_voice_only) - 160 * NUM_FRAMES + 1)\n end_ = start_ + 160 * NUM_FRAMES - 1\n audio_voice_only = audio_voice_only[start_:end_]\n else:\n return [0], [0]\n wav = librosa.util.normalize(audio_voice_only)\n #deep speaker uses preemphasis here, I do not, because I want the model to correctly transform lower\n #frequencies, too. I apply preemphasis to spectrum before putting data into model embedder instead.\n wav = lfilter([1., -PREEMPH], [1.], wav)[1:]\n #f0 extraction (most time consuming operation in this function)\n f0, timeaxis = pyworld.harvest(wav, SAMPLE_RATE, frame_period=FRAME_PERIOD, f0_floor=71.0, f0_ceil=800.0)\n sp = pyworld.cheaptrick(wav, f0, timeaxis, SAMPLE_RATE, fft_size=NFFT)\n ap = pyworld.d4c(wav, f0, timeaxis, SAMPLE_RATE, fft_size=NFFT)\n mfe = sp2mfe(sp)\n lmfe = np.log(mfe)\n mean = np.mean(lmfe)\n std = np.std(lmfe)\n nmfe = (lmfe - mean) / std\n \n if training:\n return nmfe.T, f0\n else:\n out_len = len(f0) // 4 * 4\n# out_len = len(f0)\n return nmfe[:out_len].T, mean, std, sp[:out_len], f0[:out_len], ap[:out_len]", "def wav_to_features(sample_rate, clip_duration_ms, window_size_ms,\n window_stride_ms, feature_bin_count, quantize, preprocess,\n input_wav, output_c_file):\n\n # Start a new TensorFlow session.\n sess = tf.compat.v1.InteractiveSession()\n\n model_settings = models.prepare_model_settings(\n 0, sample_rate, clip_duration_ms, window_size_ms, window_stride_ms,\n feature_bin_count, preprocess)\n audio_processor = input_data.AudioProcessor(None, None, 0, 0, '', 0, 0,\n model_settings, None)\n\n results = audio_processor.get_features_for_wav(input_wav, model_settings,\n sess)\n features = results[0]\n\n variable_base = os.path.splitext(os.path.basename(input_wav).lower())[0]\n\n # Save a C source file containing the feature data as an array.\n with gfile.GFile(output_c_file, 'w') as f:\n f.write('/* File automatically created by\\n')\n f.write(' * tensorflow/examples/speech_commands/wav_to_features.py \\\\\\n')\n f.write(' * --sample_rate=%d \\\\\\n' % sample_rate)\n f.write(' * --clip_duration_ms=%d \\\\\\n' % clip_duration_ms)\n f.write(' * --window_size_ms=%d \\\\\\n' % window_size_ms)\n f.write(' * --window_stride_ms=%d \\\\\\n' % window_stride_ms)\n f.write(' * --feature_bin_count=%d \\\\\\n' % feature_bin_count)\n if quantize:\n f.write(' * --quantize=1 \\\\\\n')\n f.write(' * --preprocess=\"%s\" \\\\\\n' % preprocess)\n f.write(' * --input_wav=\"%s\" \\\\\\n' % input_wav)\n f.write(' * --output_c_file=\"%s\" \\\\\\n' % output_c_file)\n f.write(' */\\n\\n')\n f.write('const int g_%s_width = %d;\\n' %\n (variable_base, model_settings['fingerprint_width']))\n f.write('const int g_%s_height = %d;\\n' %\n (variable_base, model_settings['spectrogram_length']))\n if quantize:\n features_min, features_max = input_data.get_features_range(model_settings)\n f.write('const unsigned char g_%s_data[] = {' % variable_base)\n i = 0\n for value in features.flatten():\n quantized_value = int(\n round(\n (255 * (value - features_min)) / (features_max - features_min)))\n if quantized_value < 0:\n quantized_value = 0\n if quantized_value > 255:\n quantized_value = 255\n if i == 0:\n f.write('\\n ')\n f.write('%d, ' % (quantized_value))\n i = (i + 1) % 10\n else:\n f.write('const float g_%s_data[] = {\\n' % variable_base)\n i = 0\n for value in features.flatten():\n if i == 0:\n f.write('\\n ')\n f.write('%f, ' % value)\n i = (i + 1) % 10\n f.write('\\n};\\n')", "def slice_signal(file, window_size, stride, sample_rate):\n wav, sr = librosa.load(file, sr=sample_rate)\n hop = int(window_size * stride)\n slices = []\n for end_idx in range(window_size, len(wav), hop):\n start_idx = end_idx - window_size\n slice_sig = wav[start_idx:end_idx]\n #print(type(slice_sig),' ',slice_sig.shape,'begin:',start_idx,'end_idx:',end_idx)\n slices.append(slice_sig)\n\n if(len(slices)*window_size<len(wav)):\n slice_sig = np.zeros((window_size,))\n temp = wav[len(slices)*window_size:]\n slice_sig[:len(temp)] = temp\n slices.append(slice_sig)\n #print(type(slice_sig), ' ', slice_sig.shape,'begin:',0,'end_idx:',len(temp))\n\n return slices", "def extract_features(wavfile, feature, sampling_rate=16000):\n\n raw_signal, sr = librosa.core.load(wavfile,\n sampling_rate,\n mono=True,\n dtype='float'\n )\n\n\n if feature == 'MFCC':\n feat_seq = librosa.feature.mfcc(raw_signal,\n sampling_rate,\n n_fft=400,\n hop_length=160,\n n_mfcc=13,\n fmin=75,\n fmax=5999\n )\n # Numerical Stability\n #feat_seq = np.where(feat_seq == 0, np.finfo(float).eps, feat_seq)\n\n\n elif feature == 'FBANK':\n feat_seq = librosa.feature.melspectrogram(raw_signal,\n sampling_rate,\n n_fft=400,\n hop_length=160,\n n_mels=13,\n fmin=75,\n fmax=5999\n )\n\n # Numerical Stability\n feat_seq = np.where(feat_seq == 0, np.finfo(float).eps, feat_seq)\n\n # 20 * log | convert to Me-Scale\n feat_seq = 20*np.log10(feat_seq)\n\n # z-norm: feature normalization\n feat_norm = preprocessing.scale(feat_seq, axis=1)\n\n return feat_norm", "def wand_features(data, signals=EMG_SIGNALS, frame_len=EMG_FRAME_LEN,\n frame_shift=EMG_SHIFT_LEN, k=10):\n\n # samples is n_signals x n_timesteps\n samples = np.array(data[signals].T)\n phones = compute_subphones(data[\"phone\"])\n\n n_signals, n_timesteps = samples.shape[0], samples.shape[1]\n\n # Create the 17-point weighted moving average filter shown in Figure 4.2.\n ramp_filter = np.linspace(0,0.1,num=9)\n ma_filter = np.concatenate((ramp_filter[:-1], ramp_filter[::-1]))\n assert len(ma_filter) == 17\n \n n_frames = int(n_timesteps / frame_shift)\n n_feats = 5\n features = np.zeros((n_signals, n_feats, n_frames))\n frame_phones = []\n\n for i in range(n_signals):\n # Mean normalize\n x = samples[i] - np.mean(samples[i])\n\n # Apply moving average filter to compute low frequency signal w\n w = np.convolve(x, ma_filter, mode=\"same\")\n\n # Compute high frequency signal p\n p = x - w\n\n # Compute rectified signal r\n r = abs(p)\n\n # Ignore any frames that are incomplete (i.e. if n_timesteps is 2500 but \n # n_frames is 416 and frame_shift is 6, count up to 416*6 = 2496 rather\n # than 2500 timesteps, so we don't end up with a unit in the features that\n # is made up of an incomplete set of samples)\n for frame_id, t in enumerate(range(0, n_frames*frame_shift, frame_shift)):\n w_frame = w[t:t+frame_len]\n p_frame = p[t:t+frame_len]\n r_frame = r[t:t+frame_len]\n M_w = np.mean(w_frame) # Frame-based mean of w\n P_w = np.mean(w_frame * w_frame) # Frame-based power of w\n P_r = np.mean(r_frame * r_frame) # Frame-based power of r\n M_r = np.mean(r_frame) # Frame-based mean of r\n\n # Zero-crossing rate of p\n z_p = len(np.where(np.diff(np.signbit(p_frame)))[0]) / len(p_frame)\n\n features[i, :, frame_id] = np.array([M_w, P_w, P_r, z_p, M_r])\n mode_phone = mode(phones[t:t+frame_len])\n frame_phones.append(mode_phone)\n\n features = np.reshape(features, [-1, n_frames])\n\n features, labels = stack_context(features, k=k, labels=frame_phones)\n\n return features, labels", "def train_sample_windowize(field, delta=1, n=20):\n padded = np.pad(field, delta, mode='constant', constant_values=-1)\n X = np.zeros((n * n, (1 + delta * 2) ** 2))\n for i in range(n):\n for j in range(n):\n X[i * n + j] = padded[i:i + 2 * delta + 1, j:j + 2 * delta + 1].ravel()\n return X", "def encode_window(self, X, window, batch_size=50, window_batch_size=10000):\n features = numpy.empty((\n numpy.shape(X)[0], self.out_channels,\n numpy.shape(X)[2] - window + 1\n ))\n masking = numpy.empty((\n min(window_batch_size, numpy.shape(X)[2] - window + 1),\n numpy.shape(X)[1], window\n ))\n for b in range(numpy.shape(X)[0]):\n for i in range(math.ceil(\n (numpy.shape(X)[2] - window + 1) / window_batch_size)\n ):\n for j in range(\n i * window_batch_size,\n min(\n (i + 1) * window_batch_size,\n numpy.shape(X)[2] - window + 1\n )\n ):\n j0 = j - i * window_batch_size\n masking[j0, :, :] = X[b, :, j: j + window]\n features[\n b, :, i * window_batch_size: (i + 1) * window_batch_size\n ] = numpy.swapaxes(\n self.encode(masking[:j0 + 1], batch_size=batch_size), 0, 1\n )\n return features", "def polyfit_window(x, window_length=5, deg=1, deriv=0, delta=1, pos=None):\n if not pos:\n pos = int(window_length/2)+1\n num_samples = len(x)\n idx = np.arange(window_length)\n x_out = np.zeros(num_samples)\n\n x_padded = np.concatenate([np.zeros(window_length-1), x])\n\n for frame_start in np.arange(num_samples):\n x_frame = x_padded[idx + frame_start]\n p = np.polyfit(idx*delta, x_frame, deg=deg)\n p = np.polyder(p, m=deriv)\n x_out[frame_start] = np.polyval(p, idx[pos]*delta)\n\n return x_out", "def get_data_rescaled(self, wave):\n m = (self.max_threshold - self.min_threshold)/(np.max(wave) - np.min(wave))\n b = self.min_threshold - m * np.min(wave)\n wave = m * wave + b\n return np.array([lfilter(self.b, self.a, wave[i]) for i in range(self.n_channels)])", "def collect_features(self, wav_path, label_path):\n n_fft = 512\n window_length = 20\n\n sound, fs = librosa.core.load(wav_path, sr=16000)\n\n if fs != 16000:\n print(wav_path)\n\n # Preemphasis\n preemp_sound = np.append(sound[0], sound[1:] - 0.97 * sound[:-1])\n\n # STFT\n spect = librosa.core.stft(preemp_sound,\n n_fft=n_fft,\n win_length=window_length * int(fs / 1000),\n hop_length=window_length * int(fs / 2000),\n window=scipy.signal.hamming,\n center=True)\n\n spect = np.log10(np.transpose(abs(spect[:, 1:]) ** 2) + 1e-16)\n\n return spect", "def get_data(self, wave):\n data = np.array([lfilter(self.b, self.a, wave[i]) for i in range(self.n_channels)])\n self.min_threshold = np.min(data)\n self.max_threshold = np.max(data)\n return data", "def predict_proba(self, window: np.array):\n \n data = np.transpose(np.array(window))[self.data_channels]\n print('data shape in wrapped:', data.shape)\n proba = self.clf.predict_proba(data)\n return proba[0][1] # proba = [[prob_left, prob_right]]", "def wave_get_pulses():\n return _u2i(_pigpio_command(_control, _PI_CMD_WVSP, 0, 0))", "def window_data(X, window_length):\n return X[int(len(X)/2-window_length/2):int(len(X)/2+window_length/2)]", "def extract_features(audio_filename, args):\n #print(\"Extract_features\")\n spec_type = args['spec_type']\n\n if spec_type == 'cqt':\n bin_multiple = args['bin_multiple']\n max_midi = args['max_midi']\n min_midi = args['min_midi']\n note_range = max_midi - min_midi + 1\n sr = args['sr']\n hop_length = args['hop_length']\n window_size = args['window_size']\n\n bins_per_octave = 12 * bin_multiple # should be a multiple of 12\n n_bins = note_range * bin_multiple\n\n # down-sample,mono-channel\n y, _ = librosa.load(audio_filename, sr)\n # y: an np.ndarray[ shape=(n,) ] giving the audio time series. librosa.load automatically downsamples to the\n # required sample rate sr\n # doku on librosa.cqt:\n # https://librosa.github.io/librosa/generated/librosa.core.cqt.html?highlight=cqt#librosa.core.cqts\n S = librosa.cqt(y, fmin=librosa.midi_to_hz(min_midi), sr=sr, hop_length=hop_length,\n bins_per_octave=bins_per_octave, n_bins=n_bins)\n S = S.T\n S = np.abs(S)\n min_db = np.min(S)\n print(np.min(S), np.max(S), np.mean(S))\n S = np.pad(S, ((window_size // 2, window_size // 2), (0, 0)), 'constant', constant_values=min_db)\n\n windows = []\n\n # IMPORTANT NOTE:\n # Since we pad the the spectrogram frame,\n # the onset frames are actually `offset` frames.\n # To obtain a window of the center frame at each true index, we take a slice from i to i+window_size\n # starting at frame 0 of the padded spectrogram\n for i in range(S.shape[0] - window_size + 1):\n w = S[i:i + window_size, :]\n windows.append(w)\n\n # print inputs\n x = np.array(windows)\n return x\n\n else:\n print(\"WARNING: feature type \" + spec_type + \" not implemented.\")\n return 0", "def wav_to_intensity(path, sr=16000, offset=10):\n sound = parselmouth.Sound(path)\n intensity = sound.to_intensity()\n\n features = []\n\n max_time = sound.get_total_duration()\n\n for time in np.arange(0, max_time, 0.001):\n int_db = intensity.get_value(time)\n if np.isnan(int_db):\n int_db = 0\n\n features.append([int_db])\n\n array_feats = np.array(features).T\n\n print(\"SHAPE OF THE FEATURES:\", array_feats.shape)\n assert(not np.any(np.isnan(array_feats)))\n\n return array_feats, max_time", "def calc_window(shape):\n \"\"\"Compute fourier-space window function. Like the other fourier-based\n functions in this module, equi-spaced pixels are assumed. Since the\n window function is separable, it is returned as an x and y part,\n such that window = wy[:,None]*wx[None,:].\"\"\"\n wy = np.sinc(np.fft.fftfreq(shape[-2]))\n wx = np.sinc(np.fft.fftfreq(shape[-1]))\n return wy, wx", "def poly_features(frames, sample_rate, *, kwargs={}):\n l = []\n for frame in frames:\n l.append(\n np.mean(\n librosa.feature.poly_features(\n y=frame,\n sr=sample_rate,\n **kwargs\n ).T, axis=0\n )\n )\n return np.array(l)", "def welch(pro, fs, nfft, window, overlap, axis, detrend, scaling):\n\n # build the welch generating function\n genfunc = partial(_spectra_estimatives, pro, fs, nfft, window, overlap, \n axis, detrend, scaling, func=periodogram)\n\n # obtain the positive freqs.\n freqs = np.fft.rfftfreq(nfft, 1/fs)\n\n # num. segments that fit into pro samples of len nfft with % overlap\n nsegs = int((pro.shape[axis] - nfft) // (nfft * (1-overlap)) + 1)\n shape = list(pro.shape)\n shape[axis] = nsegs\n\n # return producer from welch gen func with each yielded \n result = producer(genfunc, chunksize=len(freqs), axis=axis, shape=shape)\n return freqs, result", "def fram_wave(waveform: np.array, hop_length: int = 160, fft_window_size: int = 400, center: bool = True):\n frames = []\n for i in range(0, waveform.shape[0] + 1, hop_length):\n if center:\n half_window = (fft_window_size - 1) // 2 + 1\n start = i - half_window if i > half_window else 0\n end = i + half_window if i < waveform.shape[0] - half_window else waveform.shape[0]\n frame = waveform[start:end]\n if start == 0:\n padd_width = (-i + half_window, 0)\n frame = np.pad(frame, pad_width=padd_width, mode=\"reflect\")\n\n elif end == waveform.shape[0]:\n padd_width = (0, (i - waveform.shape[0] + half_window))\n frame = np.pad(frame, pad_width=padd_width, mode=\"reflect\")\n\n else:\n frame = waveform[i : i + fft_window_size]\n frame_width = frame.shape[0]\n if frame_width < waveform.shape[0]:\n frame = np.lib.pad(\n frame, pad_width=(0, fft_window_size - frame_width), mode=\"constant\", constant_values=0\n )\n frames.append(frame)\n\n frames = np.stack(frames, 0)\n return frames", "def createIntegratedPsf(self):\n\n (wavelengths, weights) = self.filter\n for i in range(len(wavelengths)):\n\n wavelength = wavelengths[i]\n weight = weights[i]\n self.convertToOpd(wavelength) # creates self.opd\n opd = self.embedOpd()\n zf = numpy.fft.fft2(opd)\n del opd\n # Compute the amplitude squared.\n # (psf is not really the point spread function yet)\n psf = np.conjugate(zf)\n # psf will now be the point spread function, but still complex\n np.multiply(psf, zf, psf)\n del zf\n # normalize the PSF, and convert to single precision\n psf = psf.real / psf.size\n psf = psf.astype(np.float32)\n\n self.center(psf)\n\n # This describes the image scale if no resampling is done.\n cdelt_before_resampling = (wavelength * MICRONStoMETERS) / \\\n (self.D * self.oversample) * RADIANStoDEGREES\n if self.pixel_size is None:\n # we won't resample the output image\n self.cdelt = cdelt_before_resampling\n # Extract a subset.\n if self.output_size < self.npix:\n o_npix = self.output_size\n n0 = (self.npix - o_npix) // 2\n self.integrated_psf += \\\n (psf[n0:n0 + o_npix, n0:n0 + o_npix] * weight)\n else:\n self.integrated_psf += (psf * weight)\n else:\n # we'll resample to this image scale\n self.cdelt = self.pixel_size / self.oversample * ARCSECtoDEGREES\n # These three parameters are only used by mapPsf and for\n # normalizing the weight after resampling.\n self.rescale = self.cdelt / cdelt_before_resampling\n self.input_center = (self.npix + 1) // 2\n self.output_center = (self.output_size + 1) // 2\n sub_psf = np.zeros((self.output_size, self.output_size),\n dtype=np.float32)\n # Do the resampling, writing the output to sub_psf.\n ndimage.geometric_transform(psf, self.mapPsf,\n output_shape=(self.output_size, self.output_size),\n output=sub_psf, prefilter=True)\n weight = weight * self.rescale**2\n self.integrated_psf += (sub_psf * weight)\n del sub_psf\n\n if self.verbose:\n print(\"PSF for wavelength %g has been computed\" % wavelength)", "def smooth(inpt, window_len=10, window_type='flat'):\n if not (window_len % 2 == 0):\n window_len += 1\n print('Window length supplied is odd - using next highest integer: {}.'.format(window_len))\n\n if window_len <= 3:\n print('Error in data smoothing - please select a larger window length')\n return\n\n # window_type = 'hanning'\n if not window_type in ['flat', 'hanning', 'hamming', 'bartlett', 'blackman']:\n print('Error - Invalid window_type')\n return\n\n # Generate two arguments to pass into numpy.convolve()\n # s is the input signal doctored with reflections of the input at the beginning and end\n # this serves to remove noise in the smoothing method\n # w is the window matrix based on pre-defined window functions or unit matrix for flat window\n\n s = np.r_[inpt[window_len -1:0:-1], inpt, inpt[-1:-window_len:-1]]\n # w = eval('np.'+window_type+'(window_len)')\n if window_type == 'flat': # moving average\n w = np.ones(window_len, 'd')\n else:\n w = eval('np.' + window_type + '(window_len)')\n\n # create smoothed data via numpy.convolve using the normalized input window matrix\n otpt = np.convolve(w / w.sum(), s, mode='valid')\n\n # format otpt to be same size as inpt and return\n return otpt[int(window_len / 2 -1):-int(window_len / 2)]", "def freq_window(self, startwindow, stopwindow, window=\"hann\"):\n n = self.times.size\n fwindow = _freq_window(self.fs, n, startwindow, stopwindow, window=window)\n new_response = self.from_freq(self.fs, self.in_freq * fwindow)\n\n return new_response", "def track_energy(wave, win_len, hop_len, win):\n\n wave = np.lib.pad(\n wave, pad_width=(win_len-hop_len, 0), mode='constant', constant_values=0\n )\n\n # post padding\n wave = librosa.util.fix_length(\n wave, int(win_len * np.ceil(len(wave) / win_len))\n )\n\n # cut into frames\n wavmat = librosa.util.frame(wave, frame_length=win_len, hop_length=hop_len)\n\n # Envelope follower\n wavmat = hwr(wavmat) ** 0.5 # half-wave rectification + compression\n\n return np.mean((wavmat.T * win), axis=1)", "def read_process_song(path, window=1, overlap=0, debug=True):\n\n arr_features = []\n\n signal, sr = librosa.load(path)\n signal = signal[:660000]\n\n # Debug process\n if debug:\n print(\"Reading file: {}\".format(path))\n\n # Split songs:\n samples = split_songs(signal, window, overlap)\n\n # Append the result to the data structure\n for s in samples:\n features = get_features(s, sr)\n arr_features.append(features)\n return arr_features", "def extract_window_data(df, window_len=30, zero_base=True):\n window_data = []\n for idx in range(len(df) - window_len):\n tmp = df[idx: (idx + window_len)].copy()\n if zero_base:\n tmp = normalise_min_max(tmp)\n window_data.append(tmp.values)\n return np.array(window_data)\n #return window_data", "def extract_window_data(df, window_len=10, zero_base=True):\n window_data = []\n for idx in range(len(df) - window_len):\n tmp = df[idx: (idx + window_len)].copy()\n if zero_base:\n tmp = normalise_zero_base(tmp)\n window_data.append(tmp.values)\n return np.array(window_data)", "def compute_chunk_features(mp3_file):\n # Extract MP3 file to a mono, 10kHz WAV file\n sox_command = \"/usr/local/bin/sox\"\n out_file = 'temp.wav'\n #cmd = sox_command % (out_file, mp3_file)\n temp2 = subprocess.call([sox_command, mp3_file,'-r 10000','-c 1',out_file])\n # Read in chunks of data from WAV file\n wav_data1, wav_data2 = read_wav(out_file)\n # We'll cover how the features are computed in the next section!\n return np.array(features(wav_data1)), np.array(features(wav_data2))", "def compute_chunk_features(mp3_file):\n # On convertit le fichier mp3 en un fichier wav mono, 1avec un sample rate de 10000Hertz: on utilise\n # On utilise l'application sox \"c:/Program Files (x86)/sox-14.4.0/sox\"\n\n sox_command = \"./sox-14.4.0/sox\"\n out_file = 'temp.wav'\n #cmd = sox_command % (out_file, mp3_file)\n temp2 = subprocess.call([sox_command, mp3_file,'-r 10000','-c 1',out_file])\n # Read in chunks of data from WAV file\n wav_data1, wav_data2 = read_wav(out_file)\n # We'll cover how the features are computed in the next section!\n return np.array(features(wav_data1)), np.array(features(wav_data2))", "def framing(signal, frame_length, frame_step, window_func=lambda x: np.ones((x,))):\n signal_length = len(signal)\n num_frames = 1 + (signal_length - frame_length) // frame_step\n\n frames = np.zeros((num_frames, frame_length))\n for index in range(num_frames):\n frames[index] = np.asarray(signal[index * frame_step: index * frame_step + frame_length],\n dtype='float32') * window_func(frame_length)\n return frames", "def window_data(data: np.ndarray):\n\n w_len = 128\n stride = w_len // 2\n\n no_offset_windows = np.split(data, 10)\n offset_windows = np.split(data[stride:-stride], 9)\n windows = [0] * 19\n windows[::2] = no_offset_windows\n windows[1::2] = offset_windows\n windows = np.array(windows, dtype=np.float32)\n\n return windows", "def generate_profile(npoints, length, alpha, window, h = 1., seed=None):\n npoints = int(npoints)\n window = int(window)\n length = float(length)\n alpha = float(alpha)\n h = float(h)\n if not seed is None:\n seed = int(seed)\n \n prng = np.random.RandomState(seed)\n phase = 2.*np.pi*prng.rand(npoints)\n k = 2.*np.pi*np.fft.fftfreq(npoints,length/float(npoints-1))\n amp = np.zeros(npoints)\n nflt = npoints//window\n if npoints%2 == 0:\n nfreq = npoints//2+1\n else:\n nfreq = (npoints-1)//2+1\n amp[1:] = (alpha*(2.*np.pi/np.abs(k[1:]))**(0.5*(1.+2.*h))*np.sqrt(np.pi/length)/2.*float(npoints))\n amp[nflt+1:-nflt] = 0.\n f = amp*np.exp(np.complex(0., 1.)*phase)\n fund = np.fft.fft(prng.choice([-1., 1.])*alpha*length*np.sin(np.linspace(0., length, npoints)*np.pi/length))\n f = np.real(np.fft.ifft(f+fund))\n return f-f[0]-(f[-1]-f[0])/length*np.linspace(0., length, npoints)", "def feats_array_4_window(window: np.ndarray):\n\n outvec = np.zeros((len(funclist), window.shape[1]))\n\n for i in range(len(funclist)):\n for j in range(window.shape[1]):\n outvec[i, j] = funclist[i](window[:, j])\n\n outvec = outvec.reshape(-1)\n\n return outvec", "def preprocess_data(num_mfcc_coeffs, num_filters, window_len, window_step, max_num_frames):\n inputs = [] \n labels = [] \n \n SOURCE_DIR = '../data/cmu_arctic/scottish-english-male-awb/wav/' \n TARGET_DIR = '../data/cmu_arctic/us-english-male-bdl/wav/'\n index = 0\n for source_fname, target_fname in zip(os.listdir(SOURCE_DIR), os.listdir(TARGET_DIR)):\n if index >= 20:\n break\n index += 1\n\n if source_fname == '.DS_Store' or target_fname == '.DS_Store':\n continue\n\n (source_sample_rate, source_wav_data) = wav.read(SOURCE_DIR + source_fname) \n (target_sample_rate, target_wav_data) = wav.read(TARGET_DIR + target_fname)\n\n source_mfcc_features = np.array(mfcc(source_wav_data, samplerate=source_sample_rate, numcep=num_mfcc_coeffs, nfilt=num_filters, winlen=window_len, winstep=window_step))\n target_mfcc_features = np.array(mfcc(target_wav_data, samplerate=target_sample_rate, numcep=num_mfcc_coeffs, nfilt=num_filters, winlen=window_len, winstep=window_step))\n\n # align with FastDTW\n source_mfcc_features, target_mfcc_features = get_dtw_series(source_mfcc_features, target_mfcc_features)\n\n # pad MFCC feature matrices (rows) to max_num_frames\n source_padded_frames = pad_sequence(source_mfcc_features, max_num_frames)\n target_padded_frames = pad_sequence(target_mfcc_features, max_num_frames)\n\n inputs.append(source_padded_frames) \n labels.append(target_padded_frames) \n\n return inputs, labels", "def moving_window_pts(data, tvec, wn, deg=2, drop_deg=False):\n\n deg_orig = deg\n posx, posz = data.T\n npts = len(posx)\n spos = np.zeros((npts, 2))\n svel = np.zeros((npts, 2))\n sacc = np.zeros((npts, 2))\n\n for i in range(npts):\n start, stop, at_end = window_bounds(i, npts, wn)\n if at_end and drop_deg:\n deg = deg_orig - 1\n else:\n deg = deg_orig\n\n t = tvec[start:stop]\n x = posx[start:stop]\n z = posz[start:stop]\n\n pfpx = np.polyfit(t, x, deg)\n pfpz = np.polyfit(t, z, deg)\n pfvx = np.polyder(pfpx, m=1)\n pfvz = np.polyder(pfpz, m=1)\n pfax = np.polyder(pfpx, m=2)\n pfaz = np.polyder(pfpz, m=2)\n\n tval = tvec[i]\n spos[i] = np.polyval(pfpx, tval), np.polyval(pfpz, tval)\n svel[i] = np.polyval(pfvx, tval), np.polyval(pfvz, tval)\n sacc[i] = np.polyval(pfax, tval), np.polyval(pfaz, tval)\n\n return spos, svel, sacc", "def wmeanfilt(vec, wid=3, w=None):\n # 2012-04-28 06:09 IJMC: Created\n\n filt = 0*vec\n if wid<1:\n wid = 1\n \n wid = int(wid)\n\n n = len(vec)\n\n for ii in range(n):\n i0 = np.max([0, ii -wid/2])\n i1 = np.min([n-1, ii + wid/2])\n filt[ii] = wmean(vec[i0:i1+1], w[i0:i1+1])\n #print ii, i0, i1\n\n return filt", "def frame_generator(wav_data, timestamp_offset):\n n = int(sample_rate * (frame_duration_ms / 1000.0) * 2)\n offset = 0\n timestamp = timestamp_offset\n duration = (float(n) / sample_rate) / 2.0\n while offset + n < len(wav_data):\n yield Frame(wav_data[offset:offset + n], timestamp, duration)\n timestamp += duration\n offset += n", "def get_window_data(symbol_signals_df, window_size, feature_col_number, target_col_number):\n X = []\n y = []\n for i in range(len(symbol_signals_df) - window_size):\n features = symbol_signals_df.iloc[i : (i + window_size), feature_col_number]\n \n #print(features)\n \n target = symbol_signals_df.iloc[(i + window_size), target_col_number]\n \n \n X.append(features)\n y.append(target)\n \n return np.array(X), np.array(y).reshape(-1, 1)", "def returnSGFilteredData(x, window_length, polyorder, deriv):\n return ss.savgol_filter(x.flatten(),\n window_length=window_length,\n polyorder=polyorder,\n deriv=deriv)", "def impulse_data(sample_rate=512,psd_segment_length=60):\n epoch = 1153742417.0\n ts_data = numpy.zeros(sample_rate * psd_segment_length)\n ts_data = types.TimeSeries(ts_data, delta_t=1.0/sample_rate, epoch=epoch)\n return ts_data", "def fft(self):\n fft_start_time = time.time()\n self.wave_x = range(START, START + N)\n self.wave_y = self.data[START:START + N]\n self.spec_x = np.fft.rfftfreq(N, d=1.0/RATE)\n windowed_signal = self.data[START:START + N] * WINDOW\n spec_y_raw = np.fft.rfft(windowed_signal)\n self.spec_y = [np.sqrt(c.real ** 2 + c.imag ** 2) for c in spec_y_raw]", "def get_trimmed_features(words, num_recordings, base_path=\"\", energy_threshold=0.001):\n\n features_by_word = []\n for i in range(len(words)):\n indexes = []\n feature_array = []\n for j in range(1, num_recordings[i] + 1):\n # Determine the path\n path = base_path + words[i] + str(j) + \".wav\"\n (rate, data) = get_sig(path)\n # features is all the audio features for a given file\n features = get_st_features(data, rate)[0]\n # features[1] is total frame energies\n # energy threshold of 0.001 is arbitrary\n indexes.append(relevant_indexes(features[1], energy_threshold))\n # Add features for this specific audio file to the feature array for this word\n feature_array.append(features)\n # Finds the minimum index of all start indexes\n min_index = sorted(indexes, key=lambda x: x[0])[0][0]\n # Finds the max index of all end indexes\n max_index = sorted(indexes, key=lambda x: x[1])[::-1][0][1]\n # Debug print statements commented out\n # print(\"min, max index for word\", words[i])\n # print(min_index, max_index)\n # Only take the frames between min index and max index for each sample word\n # Note: Potential for a bug; if maxIndex is outside the length of its frame array\n # To fix, need to pad the shorter recordings with extra data\n features_by_word.append([x[0:34, min_index:max_index].transpose() for x in feature_array])\n # print(numpy.shape(features_by_word[i]))\n # features_by_word is an array of len(words) cells\n # Each cell has num_recordings[i] elements corresponding to the number of recordings of each word words[i]\n # Each recording has the same number of frames for a given word, as determined by minIndex and maxIndex\n # for a given word.\n # Finally, each frame contains the 34 features from that frame's raw data samples\n return features_by_word", "def smooth_pseudo_wvd(\n signal,\n sampling_rate=1000,\n freq_length=None,\n time_length=None,\n segment_step=1,\n nfreqbin=None,\n window_method=\"hamming\",\n):\n\n # Define parameters\n N = len(signal)\n # sample_spacing = 1 / sampling_rate\n if nfreqbin is None:\n nfreqbin = 300\n\n # Zero-padded signal to length 2N\n signal_padded = np.append(signal, np.zeros_like(signal))\n\n # DFT\n signal_fft = np.fft.fft(signal_padded)\n signal_fft[1 : N - 1] = signal_fft[1 : N - 1] * 2\n signal_fft[N:] = 0\n\n # Inverse FFT\n signal_ifft = np.fft.ifft(signal_fft)\n signal_ifft[N:] = 0\n\n # Make analytic signal\n signal = scipy.signal.hilbert(signal_detrend(signal_ifft))\n\n # Create smoothing windows in time and frequency\n if freq_length is None:\n freq_length = np.floor(N / 4.0)\n # Plus one if window length is not odd\n if freq_length % 2 == 0:\n freq_length += 1\n elif len(freq_length) % 2 == 0:\n raise ValueError(\"The length of frequency smoothing window must be odd.\")\n\n if time_length is None:\n time_length = np.floor(N / 10.0)\n # Plus one if window length is not odd\n if time_length % 2 == 0:\n time_length += 1\n elif len(time_length) % 2 == 0:\n raise ValueError(\"The length of time smoothing window must be odd.\")\n\n if window_method == \"hamming\":\n freq_window = scipy.signal.hamming(int(freq_length)) # normalize by max\n time_window = scipy.signal.hamming(int(time_length)) # normalize by max\n elif window_method == \"gaussian\":\n std_freq = freq_length / (6 * np.sqrt(2 * np.log(2)))\n freq_window = scipy.signal.gaussian(freq_length, std_freq)\n freq_window /= max(freq_window)\n std_time = time_length / (6 * np.sqrt(2 * np.log(2)))\n time_window = scipy.signal.gaussian(time_length, std_time)\n time_window /= max(time_window)\n # to add warning if method is not one of the supported methods\n\n # Mid-point index of windows\n midpt_freq = (len(freq_window) - 1) // 2\n midpt_time = (len(time_window) - 1) // 2\n\n # Create arrays\n time_array = np.arange(start=0, stop=N, step=segment_step, dtype=int) / sampling_rate\n # frequency_array = np.fft.fftfreq(nfreqbin, sample_spacing)[0:nfreqbin / 2]\n frequency_array = 0.5 * np.arange(nfreqbin, dtype=float) / N\n pwvd = np.zeros((nfreqbin, len(time_array)), dtype=complex)\n\n # Calculate pwvd\n for i, t in enumerate(time_array):\n # time shift\n tau_max = np.min(\n [t + midpt_time - 1, N - t + midpt_time, np.round(N / 2.0) - 1, midpt_freq]\n )\n # time-lag list\n tau = np.arange(\n start=-np.min([midpt_time, N - t]), stop=np.min([midpt_time, t - 1]) + 1, dtype=\"int\"\n )\n time_pts = (midpt_time + tau).astype(int)\n g2 = time_window[time_pts]\n g2 = g2 / np.sum(g2)\n signal_pts = (t - tau - 1).astype(int)\n # zero frequency\n pwvd[0, i] = np.sum(g2 * signal[signal_pts] * np.conjugate(signal[signal_pts]))\n # other frequencies\n for m in range(int(tau_max)):\n tau = np.arange(\n start=-np.min([midpt_time, N - t - m]),\n stop=np.min([midpt_time, t - m - 1]) + 1,\n dtype=\"int\",\n )\n time_pts = (midpt_time + tau).astype(int)\n g2 = time_window[time_pts]\n g2 = g2 / np.sum(g2)\n signal_pt1 = (t + m - tau - 1).astype(int)\n signal_pt2 = (t - m - tau - 1).astype(int)\n # compute positive half\n rmm = np.sum(g2 * signal[signal_pt1] * np.conjugate(signal[signal_pt2]))\n pwvd[m + 1, i] = freq_window[midpt_freq + m + 1] * rmm\n # compute negative half\n rmm = np.sum(g2 * signal[signal_pt2] * np.conjugate(signal[signal_pt1]))\n pwvd[nfreqbin - m - 1, i] = freq_window[midpt_freq - m + 1] * rmm\n\n m = np.round(N / 2.0)\n\n if t <= N - m and t >= m + 1 and m <= midpt_freq:\n tau = np.arange(\n start=-np.min([midpt_time, N - t - m]),\n stop=np.min([midpt_time, t - 1 - m]) + 1,\n dtype=\"int\",\n )\n time_pts = (midpt_time + tau + 1).astype(int)\n g2 = time_window[time_pts]\n g2 = g2 / np.sum(g2)\n signal_pt1 = (t + m - tau).astype(int)\n signal_pt2 = (t - m - tau).astype(int)\n x = np.sum(g2 * signal[signal_pt1] * np.conjugate(signal[signal_pt2]))\n x *= freq_window[midpt_freq + m + 1]\n y = np.sum(g2 * signal[signal_pt2] * np.conjugate(signal[signal_pt1]))\n y *= freq_window[midpt_freq - m + 1]\n pwvd[m, i] = 0.5 * (x + y)\n\n pwvd = np.real(np.fft.fft(pwvd, axis=0))\n\n # Visualization\n\n return frequency_array, time_array, pwvd", "def smooth(x, window_len=11, window='hanning'):\n\n# if x.ndim != 1:\n# raise ValueError, \"smooth only accepts 1 dimension arrays.\"\n# if x.size < window_len:\n# raise ValueError, \"Input vector needs to be bigger than window size.\"\n\n if window_len<3:\n return x\n\n# if not window in ['flat', 'hanning', 'hamming', 'bartlett', 'blackman']:\n# raise ValueError, \"Window is on of 'flat', 'hanning', 'hamming', 'bartlett', 'blackman'\"\n\n\n s = np.r_[x[window_len - 1:0:-1], x, x[-1:-window_len:-1]]\n# print(len(s))\n if window == 'flat': # moving average\n w = np.ones(window_len, 'd')\n else:\n w = eval('np.' + window + '(window_len)')\n\n y = np.convolve(w / w.sum(), s, mode='valid')\n return y", "def extract_features(\n fp, sample_rate, window_length, hop_length, n_mel, new_img_size, low_cut, high_cut\n):\n y, sr = librosa.load(fp, sr=args.sample_rate)\n y_filtered = butter_bandpass_filter(y, low_cut, high_cut, sr)\n melspectrogram_db = compute_melspectrogram_with_fixed_size(\n y_filtered, sample_rate, window_length, hop_length, n_mel, new_img_size\n )\n return melspectrogram_db", "def convert_wave_to_units(self, wave):\n return [self.convert_point_to_units(i) for i in wave]", "def _get_strided(waveform, window_size, window_shift, snip_edges):\n assert waveform.dim() == 1\n num_samples = waveform.size(0)\n strides = (window_shift * waveform.stride(0), waveform.stride(0))\n\n if snip_edges:\n if num_samples < window_size:\n return torch.empty((0, 0))\n else:\n m = 1 + (num_samples - window_size) // window_shift\n else:\n reversed_waveform = torch.flip(waveform, [0])\n m = (num_samples + (window_shift // 2)) // window_shift\n pad = window_size // 2 - window_shift // 2\n pad_right = reversed_waveform\n if pad > 0:\n # torch.nn.functional.pad returns [2,1,0,1,2] for 'reflect'\n # but we want [2, 1, 0, 0, 1, 2]\n pad_left = reversed_waveform[-pad:]\n waveform = torch.cat((pad_left, waveform, pad_right), dim=0)\n else:\n # pad is negative so we want to trim the waveform at the front\n waveform = torch.cat((waveform[-pad:], pad_right), dim=0)\n\n sizes = (m, window_size)\n return waveform.as_strided(sizes, strides)", "def silence_removal(signal, sampling_rate, st_win, st_step, smooth_window=0.5,\n weight=0.5, plot=False):\n\n if weight >= 1:\n weight = 0.99\n if weight <= 0:\n weight = 0.01\n\n # Step 1: feature extraction\n signal = audioBasicIO.stereo_to_mono(signal)\n st_feats, _ = stf.feature_extraction(signal, sampling_rate,\n st_win * sampling_rate,\n st_step * sampling_rate)\n\n # Step 2: train binary svm classifier of low vs high energy frames\n # keep only the energy short-term sequence (2nd feature)\n st_energy = st_feats[1, :]\n en = np.sort(st_energy)\n # number of 10% of the total short-term windows\n st_windows_fraction = int(len(en) / 10)\n\n # compute \"lower\" 10% energy threshold\n low_threshold = np.mean(en[0:st_windows_fraction]) + 1e-15\n\n # compute \"higher\" 10% energy threshold\n high_threshold = np.mean(en[-st_windows_fraction:-1]) + 1e-15\n\n # get all features that correspond to low energy\n low_energy = st_feats[:, np.where(st_energy <= low_threshold)[0]]\n\n # get all features that correspond to high energy\n high_energy = st_feats[:, np.where(st_energy >= high_threshold)[0]]\n\n # form the binary classification task and ...\n features = [low_energy.T, high_energy.T]\n # normalize and train the respective svm probabilistic model\n\n # (ONSET vs SILENCE)\n features_norm, mean, std = at.normalize_features(features)\n svm = at.train_svm(features_norm, 1.0)\n\n # Step 3: compute onset probability based on the trained svm\n prob_on_set = []\n for index in range(st_feats.shape[1]):\n # for each frame\n cur_fv = (st_feats[:, index] - mean) / std\n # get svm probability (that it belongs to the ONSET class)\n prob_on_set.append(svm.predict_proba(cur_fv.reshape(1, -1))[0][1])\n prob_on_set = np.array(prob_on_set)\n\n # smooth probability:\n prob_on_set = smooth_moving_avg(prob_on_set, smooth_window / st_step)\n\n # Step 4A: detect onset frame indices:\n prog_on_set_sort = np.sort(prob_on_set)\n\n # find probability Threshold as a weighted average\n # of top 10% and lower 10% of the values\n nt = int(prog_on_set_sort.shape[0] / 10)\n threshold = (np.mean((1 - weight) * prog_on_set_sort[0:nt]) +\n weight * np.mean(prog_on_set_sort[-nt::]))\n\n max_indices = np.where(prob_on_set > threshold)[0]\n # get the indices of the frames that satisfy the thresholding\n index = 0\n seg_limits = []\n time_clusters = []\n\n # Step 4B: group frame indices to onset segments\n while index < len(max_indices):\n # for each of the detected onset indices\n cur_cluster = [max_indices[index]]\n if index == len(max_indices)-1:\n break\n while max_indices[index+1] - cur_cluster[-1] <= 2:\n cur_cluster.append(max_indices[index+1])\n index += 1\n if index == len(max_indices)-1:\n break\n index += 1\n time_clusters.append(cur_cluster)\n seg_limits.append([cur_cluster[0] * st_step,\n cur_cluster[-1] * st_step])\n\n # Step 5: Post process: remove very small segments:\n min_duration = 0.2\n seg_limits_2 = []\n for s_lim in seg_limits:\n if s_lim[1] - s_lim[0] > min_duration:\n seg_limits_2.append(s_lim)\n seg_limits = seg_limits_2\n\n if plot:\n time_x = np.arange(0, signal.shape[0] / float(sampling_rate), 1.0 /\n sampling_rate)\n\n plt.subplot(2, 1, 1)\n plt.plot(time_x, signal)\n for s_lim in seg_limits:\n plt.axvline(x=s_lim[0], color='red')\n plt.axvline(x=s_lim[1], color='red')\n plt.subplot(2, 1, 2)\n plt.plot(np.arange(0, prob_on_set.shape[0] * st_step, st_step), \n prob_on_set)\n plt.title('Signal')\n for s_lim in seg_limits:\n plt.axvline(x=s_lim[0], color='red')\n plt.axvline(x=s_lim[1], color='red')\n plt.title('svm Probability')\n plt.show()\n\n return seg_limits", "def get_wavelength(start_wave, wave_per_pixel, size):\n\n return np.array([start_wave + i*wave_per_pixel for i in range(size)])", "def _feature_window_function(window_type, window_size, blackman_coeff):\n if window_type == HANNING:\n return torch.hann_window(window_size, periodic=False)\n elif window_type == HAMMING:\n return torch.hamming_window(window_size, periodic=False, alpha=0.54, beta=0.46)\n elif window_type == POVEY:\n # like hanning but goes to zero at edges\n return torch.hann_window(window_size, periodic=False).pow(0.85)\n elif window_type == RECTANGULAR:\n return torch.ones(window_size, dtype=torch.get_default_dtype())\n elif window_type == BLACKMAN:\n a = 2 * math.pi / (window_size - 1)\n window_function = torch.arange(window_size, dtype=torch.get_default_dtype())\n # can't use torch.blackman_window as they use different coefficients\n return blackman_coeff - 0.5 * torch.cos(a * window_function) + \\\n (0.5 - blackman_coeff) * torch.cos(2 * a * window_function)\n else:\n raise Exception('Invalid window type ' + window_type)", "def smooth(x, window_len=11, window=\"hanning\"):\n\n if x.ndim != 1:\n raise ValueError(\"smooth only accepts 1 dimension arrays.\")\n\n if x.size < window_len:\n raise ValueError(\"Input vector needs to be bigger than window size.\")\n\n if window_len < 3:\n return x\n\n if not window in [\"flat\", \"hanning\", \"hamming\", \"bartlett\", \"blackman\"]:\n raise ValueError(\n \"Window is on of 'flat', 'hanning', 'hamming', 'bartlett', 'blackman'\"\n )\n\n s = np.r_[x[window_len - 1 : 0 : -1], x, x[-2 : -window_len - 1 : -1]]\n # print(len(s))\n if window == \"flat\": # moving average\n w = np.ones(window_len, \"d\")\n else:\n w = eval(\"np.\" + window + \"(window_len)\")\n\n y = np.convolve(w / w.sum(), s, mode=\"valid\")\n return y[(window_len // 2 - 1) : -(window_len // 2 + 1)]", "def wavedec(data: torch.Tensor,\n wavelet: pywt.Wavelet,\n level: int = None,\n mode: str = \"reflect\") -> list:\n if len(data.shape) == 1:\n # assume time series\n data = data.unsqueeze(0).unsqueeze(0)\n elif len(data.shape) == 2:\n # assume batched time series\n data = data.unsqueeze(1)\n\n dec_lo, dec_hi, _, _ = get_filter_tensors(\n wavelet, flip=True, device=data.device, dtype=data.dtype)\n filt_len = dec_lo.shape[-1]\n # dec_lo = torch.tensor(dec_lo[::-1]).unsqueeze(0)\n # dec_hi = torch.tensor(dec_hi[::-1]).unsqueeze(0)\n filt = torch.stack([dec_lo, dec_hi], 0)\n\n if level is None:\n level = pywt.dwt_max_level(data.shape[-1], filt_len)\n\n result_lst = []\n res_lo = data\n for s in range(level):\n res_lo = fwt_pad(res_lo, wavelet, level=s, mode=mode)\n res = torch.nn.functional.conv1d(res_lo, filt, stride=2)\n res_lo, res_hi = torch.split(res, 1, 1)\n result_lst.append(res_hi.squeeze(1))\n result_lst.append(res_lo.squeeze(1))\n return result_lst[::-1]", "def wavedec2(data, wavelet, level: int = None, mode: str = \"reflect\") -> list:\n dec_lo, dec_hi, _, _ = get_filter_tensors(\n wavelet, flip=True, device=data.device, dtype=data.dtype)\n dec_filt = construct_2d_filt(lo=dec_lo, hi=dec_hi)\n\n if level is None:\n level = pywt.dwtn_max_level([data.shape[-1], data.shape[-2]], wavelet)\n\n result_lst = []\n res_ll = data\n for s in range(level):\n res_ll = fwt_pad2d(res_ll, wavelet, level=s, mode=mode)\n res = torch.nn.functional.conv2d(res_ll, dec_filt, stride=2)\n res_ll, res_lh, res_hl, res_hh = torch.split(res, 1, 1)\n result_lst.append((res_lh, res_hl, res_hh))\n result_lst.append(res_ll)\n return result_lst[::-1]", "def _get_window_start(self, waveforms):", "def get_1d_features(waveforms):\n durations = []\n PTratio= []\n repolarizationslope= []\n recoveryslope = []\n for i in range(len(waveforms)): \n waveform=waveforms[i,:] \n durations.append(get_waveform_duration(waveform))\n PTratio.append(get_waveform_PTratio(waveform))\n repolarizationslope.append(get_waveform_repolarizationslope(waveform))\n recoveryslope.append(get_waveform_recoveryslope(waveform))\n return np.array(durations), np.array(PTratio), np.array(repolarizationslope), np.array(recoveryslope)", "def smooth_data(rawsong, samp_freq, freq_cutoffs=None, smooth_win=2):\n\n if freq_cutoffs is None:\n # then don't do bandpass_filtfilt\n filtsong = rawsong\n else:\n filtsong = bandpass_filtfilt(rawsong, samp_freq, freq_cutoffs)\n\n squared_song = np.power(filtsong, 2)\n len = np.round(samp_freq * smooth_win / 1000).astype(int)\n h = np.ones((len,)) / len\n smooth = np.convolve(squared_song, h)\n offset = round((smooth.shape[-1] - filtsong.shape[-1]) / 2)\n smooth = smooth[offset:filtsong.shape[-1] + offset]\n return smooth", "def smooth_data(rawsong, samp_freq, freq_cutoffs=None, smooth_win=2):\n\n if freq_cutoffs is None:\n # then don't do bandpass_filtfilt\n filtsong = rawsong\n else:\n filtsong = bandpass_filtfilt(rawsong, samp_freq, freq_cutoffs)\n\n squared_song = np.power(filtsong, 2)\n len = np.round(samp_freq * smooth_win / 1000).astype(int)\n h = np.ones((len,)) / len\n smooth = np.convolve(squared_song, h)\n offset = round((smooth.shape[-1] - filtsong.shape[-1]) / 2)\n smooth = smooth[offset:filtsong.shape[-1] + offset]\n return smooth", "def extract_wavelet(self, freq, num_cyc=3, mode=\"complex\", ignore_sessions=False):\n wav = wavelet(freq, sampling_freq=self.sampling_freq, num_cyc=num_cyc)\n if self.sessions is None or ignore_sessions:\n convolved = self.__class__(\n pd.DataFrame(\n {x: convolve(y, wav, mode=\"same\") for x, y in self.iteritems()}\n ),\n sampling_freq=self.sampling_freq,\n )\n else:\n convolved = self.__class__(sampling_freq=self.sampling_freq)\n for k, v in self.itersessions():\n session = self.__class__(\n pd.DataFrame(\n {x: convolve(y, wav, mode=\"same\") for x, y in v.iteritems()}\n ),\n sampling_freq=self.sampling_freq,\n )\n convolved = convolved.append(session, session_id=k)\n if mode == \"complex\":\n convolved = convolved\n elif mode == \"filtered\":\n convolved = np.real(convolved)\n elif mode == \"phase\":\n convolved = np.angle(convolved)\n elif mode == \"magnitude\":\n convolved = np.abs(convolved)\n elif mode == \"power\":\n convolved = np.abs(convolved) ** 2\n else:\n raise ValueError(\n \"Mode must be ['complex','filtered','phase',\" \"'magnitude','power']\"\n )\n convolved = self.__class__(\n convolved,\n sampling_freq=self.sampling_freq,\n features=self.features,\n sessions=self.sessions,\n )\n convolved.columns = (\n \"f\" + \"%s\" % round(freq, 2) + \"_\" + mode + \"_\" + self.columns\n )\n return convolved", "def calc_psf(filterName, oversample=4, offset_r=0, offset_theta=0, instrument='nircam', fov=5):\n # TODO complete instrument selection list\n if instrument.lower() == 'nircam':\n instr = webbpsf.NIRCam()\n instr.filter = filterName\n instr.options['source_offset_r'] = offset_r\n instr.options['source_offset_theta'] = offset_theta\n PSF = instr.calc_psf(oversample=oversample, fov_arcsec=fov)\n # retern the oversampled data\n return PSF[0].data", "def get_st_features(signal, rate, window_step=0.025, window_length=0.05):\n\n sample_step = int(rate*window_step)\n sample_length = int(rate*window_length)\n\n (features, feature_names) = audioFeatureExtraction.stFeatureExtraction(signal, rate, sample_length, sample_step)\n\n return features, feature_names", "def plot_wavelength_slice(self, offset, **kwargs):\n cumul_cube_lengths = np.cumsum(np.array([c.shape[self.common_axis]\n for c in self.data]))\n sequence_index, cube_index = cu._convert_cube_like_index_to_sequence_indices(\n offset, cumul_cube_lengths)\n plot = self[sequence_index].plot_wavelength_slice(cube_index, **kwargs)\n return plot", "def smooth(x,window_len=11,window='hanning'):\n\n if x.ndim != 1:\n raise ValueError, \"smooth only accepts 1 dimension arrays.\"\n\n if x.size < window_len:\n raise ValueError, \"Input vector needs to be bigger than window size.\"\n\n\n if window_len<3:\n return x\n\n\n if not window in ['flat', 'hanning', 'hamming', 'bartlett', 'blackman']:\n raise ValueError, \"Window is on of 'flat', 'hanning', 'hamming', 'bartlett', 'blackman'\"\n\n\n s=np.r_[x[window_len-1:0:-1],x,x[-2:-window_len-1:-1]]\n #print(len(s))\n if window == 'flat': #moving average\n w=np.ones(window_len,'d')\n else:\n w=eval('np.'+window+'(window_len)')\n\n y=np.convolve(w/w.sum(),s,mode='valid')\n #return y\n return y[(window_len/2):-(window_len/2)]", "def smooth(x,window_len=11,window='hanning'):\n\n if x.ndim != 1:\n raise ValueError(\"smooth only accepts 1 dimension arrays.\")\n\n if x.size < window_len:\n raise ValueError(\"Input vector needs to be bigger than window size.\")\n\n\n if window_len<3:\n return x\n\n\n if not window in ['flat', 'hanning', 'hamming', 'bartlett', 'blackman']:\n raise ValueError(\"Window is on of 'flat', 'hanning', 'hamming', 'bartlett', 'blackman'\")\n\n\n s=np.r_[x[window_len-1:0:-1],x,x[-2:-window_len-1:-1]]\n #print(len(s))\n if window == 'flat': #moving average\n w=np.ones(window_len,'d')\n else:\n w=eval('np.'+window+'(window_len)')\n\n y=np.convolve(w/w.sum(),s,mode='same')\n return y", "def smooth(x,window_len=11,window='hanning'):\r\n\r\n if window_len<3:\r\n return x\r\n\r\n s=np.r_[x[window_len-1:0:-1],x,x[-2:-window_len-1:-1]]\r\n #print(len(s))\r\n if window == 'flat': #moving average\r\n w=np.ones(window_len,'d')\r\n else:\r\n w=eval('np.'+window+'(window_len)')\r\n\r\n y=np.convolve(w/w.sum(),s,mode='valid')\r\n return y[0:256]", "def smooth(x, window_len=11, window='hanning'):\n\n if x.ndim != 1:\n raise ValueError, \"smooth only accepts 1 dimension arrays.\"\n\n if x.size < window_len:\n raise ValueError, \"Input vector needs to be bigger than window size.\"\n\n if window_len < 3:\n return x\n\n if not window in ['flat', 'hanning', 'hamming', 'bartlett', 'blackman']:\n raise ValueError, \"Window is on of 'flat', 'hanning', 'hamming', 'bartlett', 'blackman'\"\n\n s = np.r_[x[window_len - 1:0:-1], x, x[-1:-window_len:-1]]\n #print(len(s))\n if window == 'flat': #moving average\n w = np.ones(window_len, 'd')\n else:\n w = eval('np.' + window + '(window_len)')\n\n y = np.convolve(w / w.sum(), s, mode='valid')\n return y", "def wave_samples(self):\n return self._quantized_subsamples", "def smooth(x,window_len=11,window='hanning'): \n \n if x.ndim != 1:\n raise ValueError(\"smooth only accepts 1 dimension arrays.\")\n\n if x.size < window_len:\n raise ValueError(\"Input vector needs to be bigger than window size.\")\n \n\n if window_len<3:\n return x\n \n \n if not window in ['flat', 'hanning', 'hamming', 'bartlett', 'blackman']:\n raise ValueError(\"Window is on of 'flat', 'hanning', 'hamming', 'bartlett', 'blackman'\")\n \n\n s=np.r_[x[window_len-1:0:-1],x,x[-1:-window_len:-1]]\n #print(len(s))\n if window == 'flat': #moving average\n w=np.ones(window_len,'d')\n else:\n w=eval('np.'+window+'(window_len)')\n \n y=np.convolve(w/w.sum(),s,mode='valid')\n return y", "def window(data, f_interval=None, f_resolution=None, sampling=None, w_column=None):\n print('-------------------------- window')\n \n # Avoid overwritting data:\n data0 = data.copy()\n\n f_range = round(f_interval[0]+(f_interval[1]-f_interval[0])/2)\n picon = 2*np.pi*f_range*data[:,0]\n fsin = np.sin(picon)\n fcos = np.cos(picon)\n\n # Sinusoidal\n data0[:,1] = fsin\n Pf_power, _, _, _, = tt.power(data0, f_interval, f_resolution, sampling, w_column)\n f = Pf_power[:,0]\n Psin = Pf_power[:,1]\n\n # Co-sinusoidal\n data0[:,1] = fcos\n Pf_power, _, _, _, = tt.power(data0, f_interval, f_resolution, sampling, w_column)\n f = Pf_power[:,0]\n Pcos = Pf_power[:,1]\n\n # Output:\n P = 1./2*(Pcos+Psin)\n Pf_window = np.vstack([f, P]).T\n return Pf_window", "def sample(wave, factor):\n ys = np.zeros(len(wave))\n ys[::factor] = np.real(wave.ys[::factor])\n return Wave(ys, framerate=wave.framerate)", "def smooth(x,window_len=11,window='hanning'):\n\n if x.ndim != 1:\n raise ValueError(\"smooth only accepts 1 dimension arrays.\")\n\n if x.size < window_len:\n raise ValueError(\"Input vector needs to be bigger than window size.\")\n\n\n if window_len<3:\n return x\n\n\n if not window in ['flat', 'hanning', 'hamming', 'bartlett', 'blackman']:\n raise ValueError(\"Window is on of 'flat', 'hanning', 'hamming', 'bartlett', 'blackman'\")\n\n\n s=np.r_[x[window_len-1:0:-1],x,x[-2:-window_len-1:-1]]\n #print(len(s))\n if window == 'flat': #moving average\n w=np.ones(window_len,'d')\n else:\n w=eval('np.'+window+'(window_len)')\n\n y=np.convolve(w/w.sum(),s,mode='valid')\n return y", "def smooth(x,window_len=11,window='hanning'):\n\n if x.ndim != 1:\n raise(ValueError, \"smooth only accepts 1 dimension arrays.\")\n\n if x.size < window_len:\n raise(ValueError, \"Input vector needs to be bigger than window size.\")\n\n\n if window_len<3:\n return x\n\n if not window in ['flat', 'hanning', 'hamming', 'bartlett', 'blackman']:\n raise (ValueError,\"Window is on of 'flat', 'hanning', 'hamming', 'bartlett', 'blackman'\")\n\n\n s=np.r_[x[window_len-1:0:-1],x,x[-2:-window_len-1:-1]]\n #print(len(s))\n if window == 'flat': #moving average\n w=np.ones(window_len,'d')\n else:\n w=eval('np.'+window+'(window_len)')\n\n y=np.convolve(w/w.sum(),s,mode='valid')\n if len(y) is not len(x):\n y = y[window_len/2-1:-(window_len/2)]\n return y", "def smooth(x, window_len=11, window='hanning'):\n window_len = min(window_len, len(x) - 1)\n if x.ndim != 1:\n raise ValueError(\"smooth only accepts 1 dimension arrays.\")\n\n if x.size < window_len:\n raise ValueError(\"Input vector needs to be bigger than window size.\")\n\n if window_len < 3:\n return x\n\n if window not in ['flat', 'hanning', 'hamming', 'bartlett', 'blackman']:\n raise ValueError(\"Window is on of 'flat', 'hanning', 'hamming', 'bartlett', 'blackman'\")\n\n s = np.r_[x[window_len - 1:0:-1], x, x[-2:-window_len - 1:-1]]\n # print(len(s))\n if window == 'flat': # moving average\n w = np.ones(window_len, 'd')\n else:\n w = eval('np.' + window + '(window_len)')\n\n y = np.convolve(w / w.sum(), s, mode='valid')\n return y", "def apply_window(audio):\n\treturn audio * numpy.hanning(len(audio))", "def unpack(self, pos, formatSpecifier, length):\n start = pos + self.posWAVEDESC\n x = np.frombuffer(self.data[start:start + length], self.endianness + formatSpecifier, count=1)[0]\n return x", "def smooth(x,window_len=11,window='hanning'):\n\n\t# if x.ndim != 1:\n\t# raise ValueError, \"smooth only accepts 1 dimension arrays.\"\n\n\t# if x.size < window_len:\n\t# raise ValueError, \"Input vector needs to be bigger than window size.\"\n\n\tassert x.ndim==1\n\tassert x.size==window_len\n\n\tif window_len<3:\n\t\treturn x\n\n\tflag = (window in ['flat', 'hanning', 'hamming', 'bartlett', 'blackman'])\n\tassert flag==1\n\n\ts=numpy.r_[x[window_len-1:0:-1],x,x[-2:-window_len-1:-1]]\n\tif window == 'flat': #moving average\n\t\tw=numpy.ones(window_len,'d')\n\telse:\n\t\tw=eval('numpy.'+window+'(window_len)')\n\n\ty=numpy.convolve(w/w.sum(),s,mode='valid')\n\treturn y", "def waverec(coeffs: list, wavelet: pywt.Wavelet) -> torch.Tensor:\n _, _, rec_lo, rec_hi = get_filter_tensors(\n wavelet, flip=False, device=coeffs[-1].device,\n dtype=coeffs[-1].dtype\n )\n filt_len = rec_lo.shape[-1]\n filt = torch.stack([rec_lo, rec_hi], 0)\n\n res_lo = coeffs[0]\n for c_pos, res_hi in enumerate(coeffs[1:]):\n res_lo = torch.stack([res_lo, res_hi], 1)\n res_lo = torch.nn.functional.conv_transpose1d(\n res_lo, filt, stride=2).squeeze(1)\n\n # remove the padding\n padl = (2 * filt_len - 3) // 2\n padr = (2 * filt_len - 3) // 2\n if c_pos < len(coeffs) - 2:\n pred_len = res_lo.shape[-1] - (padl + padr)\n nex_len = coeffs[c_pos + 2].shape[-1]\n if nex_len != pred_len:\n padr += 1\n pred_len = res_lo.shape[-1] - (padl + padr)\n assert (\n nex_len == pred_len\n ), \"padding error, please open an issue on github \"\n if padl > 0:\n res_lo = res_lo[..., padl:]\n if padr > 0:\n res_lo = res_lo[..., :-padr]\n return res_lo", "def smooth(x,window_len=11,window='hanning'):\n\n if x.ndim != 1:\n raise ValueError, \"smooth only accepts 1 dimension arrays.\"\n\n if x.size < window_len:\n raise ValueError, \"Input vector needs to be bigger than window size.\"\n\n\n if window_len<3:\n return x\n\n\n if not window in ['flat', 'hanning', 'hamming', 'bartlett', 'blackman']:\n raise ValueError, \"Window is on of 'flat', 'hanning', 'hamming', 'bartlett', 'blackman'\"\n\n\n s=numpy.r_[2*x[0]-x[window_len:1:-1],x,2*x[-1]-x[-1:-window_len:-1]]\n #print(len(s))\n if window == 'flat': #moving average\n w=ones(window_len,'d')\n else:\n w=eval('numpy.'+window+'(window_len)')\n\n y=numpy.convolve(w/w.sum(),s,mode='same')\n return y[window_len-1:-window_len+1]", "def gtgram(wave,fs,window_time, hop_time,channels,f_min,f_max):\n xe = gtgram_xe(wave, fs, channels, f_min, f_max)\n nwin, hop_samples, ncols = gt.gtgram_strides(fs,window_time, hop_time, xe.shape[1])\n y = np.zeros((channels, ncols))\n for cnum in range(ncols):\n segment = xe[:, cnum * hop_samples + np.arange(nwin)]\n y[:, cnum] = np.sqrt(segment.mean(1))\n return y", "def smooth(x,window_len=11,window='bartlett',data = None):\n\n x = array(x)\n\n # use externally derieved window evaluation\n if data is not None:\n window_len = len(data)\n window = 'extern'\n\n if x.size < window_len:\n raise ValueError(\"Input vector needs to be bigger than window size.\")\n\n\n if window_len<3:\n raise ValueError(\"window must not be shorter than 3\")\n\n if window_len%2 is 0:\n raise ValueError(\"window_len should be odd\")\n\n if not window in ['flat', 'hanning', 'hamming', 'bartlett', 'blackman','triang','extern']:\n raise ValueError(\"Window is none of 'flat', 'hanning', 'hamming', 'bartlett', 'blackman','triang','extern'\")\n\n \n s=r_[x[window_len-1:0:-1],x,x[-1:-window_len:-1]]\n #print(len(s))\n if window == 'flat': #moving average\n w=ones(window_len,'d')\n\n elif window == 'triang':\n w = triang(window_len)\n\n elif window == 'extern':\n w = data\n \n else:\n w=eval(window+'(window_len)')\n\n y=convolve(w/w.sum(),s,mode='valid')\n \n return y[int((window_len-1)/2):len(y)-int((window_len-1)/2)]", "def smooth( x, window_len = 5, window = 'hanning' ):\n\n if x.ndim != 1:\n raise ValueError, \"smooth only accepts 1 dimension arrays.\"\n\n if x.size < window_len:\n raise ValueError, \"Input vector needs to be bigger than window size.\"\n\n if window_len<3:\n return x\n\n if not window in ['flat', 'hanning', 'hamming', 'bartlett', 'blackman']:\n raise ValueError, \"Window is on of 'flat', 'hanning', 'hamming', 'bartlett', 'blackman'\"\n\n s=np.r_[x[window_len-1:0:-1],x,x[-1:-window_len:-1]]\n\n if window == 'flat': #moving average\n w=np.ones(window_len,'d')\n else:\n w=eval('np.'+window+'(window_len)')\n\n y=np.convolve(w/w.sum(),s,mode='valid')\n return y", "def smooth_avg(arr, winlen):\n\n window = np.ones(winlen) / (1.0 * winlen)\n return fftconvolve(arr, window, 'same')", "def _get_pulse_shaping_waveform(self):\n self.pulse_shaping_list = []\n # Make the rise time be 3.3333% if the dot time.\n rise_time_in_msec = 0.03333333333333 * self.dot_time_in_msec\n # Limit the rise time to 2 milliseconds.\n if rise_time_in_msec > 0.002:\n rise_time_in_msec = 0.002\n rising_falling_count = int(rise_time_in_msec * self.sample_rate)\n step = math.pi / rising_falling_count\n # The first value is zero, so skip that value.\n # The last value is 1.0, so skip that value too.\n for i in range(1, rising_falling_count - 1):\n gain = 0.5 * (1.0 - math.cos(step * i))\n self.pulse_shaping_list.append(gain)", "def set_fake_regular_offsets(self, win_wd, win_gap=0):\n sample_onset = int((win_wd + win_gap)*self.sampling_rate)\n self.onset_samples = range(0, len(self.audio), sample_onset)\n # excluding windows that are too close to the beginning\n self.onset_samples = [x for x in self.onset_samples if x > self.beginning_buffer]\n self.onset_times = [x/self.sampling_rate for x in self.onset_samples]", "def custom_sound(type_of, attack, decay, cutoff, coef, time, freq):\n dzw = np.zeros(time*44100)\n l=0\n for i in type_of:\n if i==\"sin\":\n dzw+= coef[l]*sin_custom(freq,time,attack[l],decay[l])\n if i==\"sq\":\n dzw+= coef[l]*sq_custom(freq,time,attack[l],decay[l])\n if i==\"saw\":\n dzw+= coef[l]*saw_custom(freq,time,attack[l],decay[l])\n l+=1 \n dzw[(1-cutoff)*time*44100 -1:]==0\n dzw = np.repeat(dzw,2).reshape(len(dzw),2)\n dzw = dzw/np.amax(dzw)\n return(dzw)", "def extract_features(self, doc):\n\n features = dict()\n\n bow = self.vectorize_doc_simple(doc)\n\n charcount = self.char_count(doc)\n wordcount = self.word_count(doc)\n sentencecount = self.sentence_count(doc)\n paragraphcount = self.paragraph_count(doc)\n\n # extract characters features\n features['characters per word'] = charcount / wordcount\n features['characters per sentence'] = charcount / sentencecount\n features['characters per paragraph'] = charcount / paragraphcount\n features['characters per document'] = charcount\n\n features['word characters length variance'] = numpy.std(\n self.word_char_length_variance(doc))\n features['sentence characters length variance'] = numpy.std(\n self.sentence_char_length_variance(doc))\n\n # extract words features\n features['words per sentence'] = wordcount / sentencecount\n features['words per paragraph'] = wordcount / paragraphcount\n features['words per document'] = wordcount\n\n features['sentence words length variance'] = numpy.std(\n self.sentence_words_length_variance(doc))\n\n # extract sentences features\n features['sentences per paragraph'] = sentencecount / paragraphcount\n features['sentences per document'] = sentencecount\n\n # extract paragraphs features\n features['paragraphs per document'] = paragraphcount\n\n # extract syllables features\n syllablecount = 0\n for word, count in bow.iteritems():\n syllablecount += self.num_of_syllables(word) * count\n features['syllables per word'] = syllablecount / wordcount\n features['syllables per sentence'] = syllablecount / sentencecount\n features['syllables per paragraph'] = syllablecount / paragraphcount\n\n # extract part of speech features\n tokens = self.pos_tag_doc(doc)\n\n pos_counts = self.vectorize_pos_tags(tokens)\n poswordcount = sum(pos_counts.values())\n for i in xrange(82, 101):\n features['%d per word' % i] = pos_counts[i] / poswordcount\n\n sorted_pos_counts = sorted(pos_counts, key=pos_counts.get, reverse=True)\n features['1st top tag'] = str(sorted_pos_counts[0])\n features['2nd top tag'] = str(sorted_pos_counts[1])\n features['3rd top tag'] = str(sorted_pos_counts[2])\n features['4th top tag'] = str(sorted_pos_counts[3])\n features['5th top tag'] = str(sorted_pos_counts[4])\n\n # extract vocab features\n vocabsize = len(self.vectorize_doc_simple(doc))\n features['vocab size'] = vocabsize\n features['words per vocab size'] = wordcount / vocabsize\n\n return features", "def windowed_means(out_features, param):\n sampling_fq = param.t_max * 1000 + 1\n temp_wnd = np.linspace(param.min_latency, param.max_latency, param.steps + 1)\n intervals = np.zeros((param.steps, 2))\n for i in range(0, temp_wnd.shape[0] - 1):\n intervals[i, 0] = temp_wnd[i]\n intervals[i, 1] = temp_wnd[i + 1]\n intervals = intervals - param.t_min\n output_features = []\n for i in range(out_features.shape[0]):\n feature = []\n for j in range(out_features.shape[1]):\n time_course = out_features[i][j]\n for k in range(intervals.shape[0]):\n borders = intervals[k] * sampling_fq\n feature.append(np.average(time_course[int(borders[0] - 1):int(borders[1] - 1)]))\n output_features.append(feature)\n out = preprocessing.scale(np.array(output_features), axis=1)\n return out", "def offsetpolygon(polyx, offset):\n polyy = []\n # need three points at a time\n for counter in range(0, len(polyx) - 3):\n # get first offset intercept\n pt = getpt(polyx[counter],\n polyx[counter + 1],\n polyx[counter + 2],\n offset)\n # append new point to polyy\n polyy.append(pt)\n # last three points\n pt = getpt(polyx[-3], polyx[-2], polyx[-1], offset)\n polyy.append(pt)\n pt = getpt(polyx[-2], polyx[-1], polyx[0], offset)\n polyy.append(pt)\n pt = getpt(polyx[-1], polyx[0], polyx[1], offset)\n polyy.append(pt)\n return polyy", "def translate(self, offset):\n return BSplineFunc(self.kvs, self.coeffs + offset)", "def _fprop_slice_np(h, stride, H, roi_offset):\n hstart = int(np.floor(float(h) * stride))\n hend = int(np.ceil(float(h + 1) * stride))\n\n hstart = min(max(hstart + roi_offset, 0), H)\n hend = min(max(hend + roi_offset, 0), H)\n\n return slice(hstart, hend), hend - hstart", "def smooth(data, window_len=10, window='hanning', keep_original=False):\n # TODO: add comnparison\n window_len += (window_len + 1) % 2\n s = np.r_['-1', data[:, window_len - 1:0:-1], data, data[:, -2:-window_len - 1:-1]]\n\n if window == 'flat': # moving average\n w = np.ones(window_len, 'd')\n else:\n w = eval('np.' + window + '(window_len)')\n\n # y=np.convolve(w/w.sum(),s,mode='valid')\n surplus_data = int((window_len - 1) * 0.5)\n return np.apply_along_axis(lambda m: np.convolve(m, w / w.sum(), mode='valid'), axis=1, arr=s)[:,\n surplus_data:-surplus_data]", "def clip_motif_time_series(freq_preprocessed_data, all_offsets, all_bin_widths, motif_start_times, motif_length: int):\n # [Freq]->(Instances, Frequency, Channels, Time-Steps, Bin Width)\n # Only need to get the times around the first syllable\n\n motif_events_series = []\n for pred_data, offset, bin_width in zip(freq_preprocessed_data, all_offsets, all_bin_widths):\n # Grab the Neural Activity Centered on Each event\n set_window = (offset - bin_width, offset + motif_length)\n chunk_events = fet.get_event_related_nd_chunk(chunk_data=pred_data, chunk_indices=motif_start_times, fs=1000,\n window=set_window) # clip the data at the start times\n\n corrected_chunk_events = []\n for chunk in chunk_events:\n corrected_chunk_events.append(np.squeeze(chunk))\n\n chunk_events = fet.event_shape_correction(chunk_events=corrected_chunk_events,\n original_dim=2) # Reformat to be array-like\n\n chunk_events_series = get_time_series(data=chunk_events, bin_width=bin_width) # clip samples based on bin_width\n\n motif_events_series.append(np.squeeze(chunk_events_series)) # Remove Single axis and append to list\n\n return motif_events_series", "def smooth(x, window_len=11, window='hanning', mode='same'):\n import numpy\n if x.ndim != 1:\n raise ValueError(\"smooth only accepts 1 dimension arrays.\")\n\n if x.size < window_len:\n raise ValueError(\"Input vector needs to be bigger than window size.\")\n\n if window_len < 3:\n return x\n\n if not window in ['flat', 'hanning', 'hamming', 'bartlett', 'blackman']:\n raise ValueError(\"Window is on of 'flat', 'hanning', 'hamming', 'bartlett', 'blackman'\")\n\n s = numpy.r_[x[window_len - 1:0:-1], x, x[-2:-window_len - 1:-1]]\n # print(len(s))\n if window == 'flat': # moving average\n w = numpy.ones(window_len, 'd')\n else:\n w = eval('numpy.' + window + '(window_len)')\n\n y = numpy.convolve(w / w.sum(), s, mode=mode)\n if mode == 'same':\n return y[np.int_(window_len) - 1:-np.int_(window_len) + 1]\n else:\n return y[np.int_(window_len / 2 - 1):-np.int_(window_len / 2)]", "def _choose_x_slice(self, offset):\n arr = None\n axis = 0\n length = self.data.shape[axis]\n if isinstance(offset, int) and offset >= 0 and offset < length:\n arr = self.data.take(offset, axis=axis)\n\n if isinstance(offset, u.Quantity):\n unit = self.axes_wcs.wcs.cunit[-1]\n delta = self.axes_wcs.wcs.cdelt[-1] * unit\n wloffset = offset.to(unit) / delta\n wloffset = int(wloffset)\n if wloffset >= 0 and wloffset < self.data.shape[axis]:\n arr = self.data.take(wloffset, axis=axis)\n\n return arr", "def windowfunction(time, freq):\n \n Ntime = len(time)\n Nfreq = len(freq)\n winkernel = np.empty_like(freq)\n\n for i in range(Nfreq):\n winkernel[i] = np.sum(np.cos(2.0*pi*freq[i]*time))**2 \\\n + np.sum(np.sin(2.0*pi*freq[i]*time))**2\n\n # Normalise such that winkernel(nu = 0.0) = 1.0 \n\n return winkernel/Ntime**2", "def stft(self, wav: np.ndarray) -> np.ndarray:\n return librosa.stft(\n y=wav,\n n_fft=self.filter_length,\n hop_length=self.hop_length,\n win_length=self.win_length,\n pad_mode=\"reflect\",\n )", "def wave_create():\n return _u2i(_pigpio_command(_control, _PI_CMD_WVCRE, 0, 0))", "def smooth(x, window_len=3, window='hanning'):\n s = np.r_[2*x[0]-x[window_len:1:-1], x, 2*x[-1]-x[-1:-window_len:-1]]\n w = getattr(np, window)(window_len)\n y = np.convolve(w/w.sum(), s, mode='same') \n return y[window_len-1:-window_len+1]", "def sliding_window_offsets(data, window_size=500, shift_size=1):\n offsets = np.asarray(_sliding_window_chunkoffsets(data, window_size, shift_size))\n return offsets", "def smooth(x,window_len=10,window='hanning'):\n #\n if x.ndim != 1:\n raise ValueError(\"smooth only accepts 1 dimension arrays.\")\n #\n if x.size < window_len:\n raise ValueError(\"Input vector needs to be bigger than window size.\")\n #\n if window_len<3:\n return x\n #\n if not window in ['flat', 'hanning', 'hamming', 'bartlett', 'blackman']:\n raise ValueError(\"Window is on of 'flat', 'hanning', 'hamming', 'bartlett', 'blackman'\")\n #\n s=r_[2*x[0]-x[window_len:1:-1],x,2*x[-1]-x[-1:-window_len:-1]]\n #print(len(s))\n if window == 'flat': #moving average\n w=ones(window_len,'d')\n else:\n w=eval(window+'(window_len)')\n #\n y=convolve(w/w.sum(),s,mode='same')\n return y[window_len-1:-window_len+1]" ]
[ "0.5480287", "0.5404212", "0.5282063", "0.50624555", "0.50224143", "0.50093454", "0.4989605", "0.49353826", "0.486388", "0.48465458", "0.4828349", "0.47970074", "0.47777593", "0.47630838", "0.4727671", "0.47226426", "0.4674579", "0.4674468", "0.46724492", "0.46629953", "0.46431035", "0.46397656", "0.4619603", "0.46153444", "0.46127573", "0.46108994", "0.4610209", "0.4599091", "0.45964658", "0.4592125", "0.4576537", "0.45560563", "0.45554113", "0.45541048", "0.4533384", "0.45265856", "0.4526115", "0.45230916", "0.45223346", "0.45154032", "0.45129085", "0.45035967", "0.44643843", "0.44591504", "0.4455275", "0.4452231", "0.44474596", "0.44389173", "0.4418278", "0.4417141", "0.44135022", "0.44124642", "0.44077078", "0.44069234", "0.44039533", "0.4402546", "0.44023544", "0.44023544", "0.43917248", "0.43908498", "0.4390303", "0.4389377", "0.43865475", "0.437857", "0.43768227", "0.43744794", "0.43736354", "0.43730223", "0.43721965", "0.4371912", "0.43702486", "0.43681985", "0.43666983", "0.43656346", "0.43531796", "0.4349404", "0.43473935", "0.43456173", "0.4341445", "0.4335077", "0.4331977", "0.43302113", "0.43301696", "0.43262756", "0.43236187", "0.43209228", "0.43204486", "0.4320423", "0.4319292", "0.4319014", "0.43185088", "0.43145046", "0.43131462", "0.43106845", "0.4309533", "0.4305724", "0.43044427", "0.4303345", "0.4303214", "0.4291944" ]
0.6025671
0
Converts a wave to a vector of prosodic features. offset (in ms) determines where the signal will be sampled. window_len is ignored.
def wav_to_intensity(path, sr=16000, offset=10): sound = parselmouth.Sound(path) intensity = sound.to_intensity() features = [] max_time = sound.get_total_duration() for time in np.arange(0, max_time, 0.001): int_db = intensity.get_value(time) if np.isnan(int_db): int_db = 0 features.append([int_db]) array_feats = np.array(features).T print("SHAPE OF THE FEATURES:", array_feats.shape) assert(not np.any(np.isnan(array_feats))) return array_feats, max_time
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def wav_to_prosodic(path, sr=16000, offset=10):\n sound = parselmouth.Sound(path)\n pitch = sound.to_pitch() #timestep, pitch_floor, pitch_ceiling\n intensity = sound.to_intensity()\n\n features = []\n\n max_time = sound.get_total_duration()\n\n for time in np.arange(0, max_time, 0.001):\n f0 = pitch.get_value_at_time(time)\n f0_nan = 0\n if np.isnan(f0):\n f0 = 0\n f0_nan = 1\n int_db = intensity.get_value(time)\n if np.isnan(int_db):\n int_db = 0\n\n features.append([f0, f0_nan, int_db])\n\n array_feats = np.array(features).T\n\n print(\"SHAPE OF THE FEATURES:\", array_feats.shape)\n assert(not np.any(np.isnan(array_feats)))\n\n return array_feats, max_time", "def _choose_wavelength_slice(self, offset):\n if 'WAVE' not in self.axes_wcs.wcs.ctype:\n raise cu.CubeError(2, \"Spectral dimension not present\")\n if self.data.ndim == 4:\n raise cu.CubeError(4, \"Can only work with 3D cubes\")\n\n axis = -2 if self.axes_wcs.wcs.ctype[0] in ['TIME', 'UTC'] else -1\n arr = None\n length = self.data.shape[axis]\n if isinstance(offset, int) and offset >= 0 and offset < length:\n arr = self.data.take(offset, axis=axis)\n\n if isinstance(offset, u.Quantity):\n delta = self.axes_wcs.wcs.cdelt[-1 - axis] * u.m\n wloffset = offset.to(u.m) / delta\n wloffset = int(wloffset)\n if wloffset >= 0 and wloffset < self.data.shape[axis]:\n arr = self.data.take(wloffset, axis=axis)\n\n return arr", "def get_features(filename, training=True):\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\")\n wav, _ = librosa.load(filename, \n sr=SAMPLE_RATE, \n mono=True,\n dtype=np.float64)\n energy = np.abs(wav)\n silence_threshold = np.percentile(energy, 95)\n offsets = np.where(energy > silence_threshold)[0]\n if training:\n audio_voice_only = wav[offsets[0]:offsets[-1]]\n else:\n #avoid cutting off too abruptly\n audio_voice_only = wav[offsets[0]:offsets[-1] + 4800]\n if training:\n if len(audio_voice_only) >= 160 * NUM_FRAMES:\n start_ = np.random.randint(len(audio_voice_only) - 160 * NUM_FRAMES + 1)\n end_ = start_ + 160 * NUM_FRAMES - 1\n audio_voice_only = audio_voice_only[start_:end_]\n else:\n return [0], [0]\n wav = librosa.util.normalize(audio_voice_only)\n #deep speaker uses preemphasis here, I do not, because I want the model to correctly transform lower\n #frequencies, too. I apply preemphasis to spectrum before putting data into model embedder instead.\n wav = lfilter([1., -PREEMPH], [1.], wav)[1:]\n #f0 extraction (most time consuming operation in this function)\n f0, timeaxis = pyworld.harvest(wav, SAMPLE_RATE, frame_period=FRAME_PERIOD, f0_floor=71.0, f0_ceil=800.0)\n sp = pyworld.cheaptrick(wav, f0, timeaxis, SAMPLE_RATE, fft_size=NFFT)\n ap = pyworld.d4c(wav, f0, timeaxis, SAMPLE_RATE, fft_size=NFFT)\n mfe = sp2mfe(sp)\n lmfe = np.log(mfe)\n mean = np.mean(lmfe)\n std = np.std(lmfe)\n nmfe = (lmfe - mean) / std\n \n if training:\n return nmfe.T, f0\n else:\n out_len = len(f0) // 4 * 4\n# out_len = len(f0)\n return nmfe[:out_len].T, mean, std, sp[:out_len], f0[:out_len], ap[:out_len]", "def wav_to_features(sample_rate, clip_duration_ms, window_size_ms,\n window_stride_ms, feature_bin_count, quantize, preprocess,\n input_wav, output_c_file):\n\n # Start a new TensorFlow session.\n sess = tf.compat.v1.InteractiveSession()\n\n model_settings = models.prepare_model_settings(\n 0, sample_rate, clip_duration_ms, window_size_ms, window_stride_ms,\n feature_bin_count, preprocess)\n audio_processor = input_data.AudioProcessor(None, None, 0, 0, '', 0, 0,\n model_settings, None)\n\n results = audio_processor.get_features_for_wav(input_wav, model_settings,\n sess)\n features = results[0]\n\n variable_base = os.path.splitext(os.path.basename(input_wav).lower())[0]\n\n # Save a C source file containing the feature data as an array.\n with gfile.GFile(output_c_file, 'w') as f:\n f.write('/* File automatically created by\\n')\n f.write(' * tensorflow/examples/speech_commands/wav_to_features.py \\\\\\n')\n f.write(' * --sample_rate=%d \\\\\\n' % sample_rate)\n f.write(' * --clip_duration_ms=%d \\\\\\n' % clip_duration_ms)\n f.write(' * --window_size_ms=%d \\\\\\n' % window_size_ms)\n f.write(' * --window_stride_ms=%d \\\\\\n' % window_stride_ms)\n f.write(' * --feature_bin_count=%d \\\\\\n' % feature_bin_count)\n if quantize:\n f.write(' * --quantize=1 \\\\\\n')\n f.write(' * --preprocess=\"%s\" \\\\\\n' % preprocess)\n f.write(' * --input_wav=\"%s\" \\\\\\n' % input_wav)\n f.write(' * --output_c_file=\"%s\" \\\\\\n' % output_c_file)\n f.write(' */\\n\\n')\n f.write('const int g_%s_width = %d;\\n' %\n (variable_base, model_settings['fingerprint_width']))\n f.write('const int g_%s_height = %d;\\n' %\n (variable_base, model_settings['spectrogram_length']))\n if quantize:\n features_min, features_max = input_data.get_features_range(model_settings)\n f.write('const unsigned char g_%s_data[] = {' % variable_base)\n i = 0\n for value in features.flatten():\n quantized_value = int(\n round(\n (255 * (value - features_min)) / (features_max - features_min)))\n if quantized_value < 0:\n quantized_value = 0\n if quantized_value > 255:\n quantized_value = 255\n if i == 0:\n f.write('\\n ')\n f.write('%d, ' % (quantized_value))\n i = (i + 1) % 10\n else:\n f.write('const float g_%s_data[] = {\\n' % variable_base)\n i = 0\n for value in features.flatten():\n if i == 0:\n f.write('\\n ')\n f.write('%f, ' % value)\n i = (i + 1) % 10\n f.write('\\n};\\n')", "def slice_signal(file, window_size, stride, sample_rate):\n wav, sr = librosa.load(file, sr=sample_rate)\n hop = int(window_size * stride)\n slices = []\n for end_idx in range(window_size, len(wav), hop):\n start_idx = end_idx - window_size\n slice_sig = wav[start_idx:end_idx]\n #print(type(slice_sig),' ',slice_sig.shape,'begin:',start_idx,'end_idx:',end_idx)\n slices.append(slice_sig)\n\n if(len(slices)*window_size<len(wav)):\n slice_sig = np.zeros((window_size,))\n temp = wav[len(slices)*window_size:]\n slice_sig[:len(temp)] = temp\n slices.append(slice_sig)\n #print(type(slice_sig), ' ', slice_sig.shape,'begin:',0,'end_idx:',len(temp))\n\n return slices", "def extract_features(wavfile, feature, sampling_rate=16000):\n\n raw_signal, sr = librosa.core.load(wavfile,\n sampling_rate,\n mono=True,\n dtype='float'\n )\n\n\n if feature == 'MFCC':\n feat_seq = librosa.feature.mfcc(raw_signal,\n sampling_rate,\n n_fft=400,\n hop_length=160,\n n_mfcc=13,\n fmin=75,\n fmax=5999\n )\n # Numerical Stability\n #feat_seq = np.where(feat_seq == 0, np.finfo(float).eps, feat_seq)\n\n\n elif feature == 'FBANK':\n feat_seq = librosa.feature.melspectrogram(raw_signal,\n sampling_rate,\n n_fft=400,\n hop_length=160,\n n_mels=13,\n fmin=75,\n fmax=5999\n )\n\n # Numerical Stability\n feat_seq = np.where(feat_seq == 0, np.finfo(float).eps, feat_seq)\n\n # 20 * log | convert to Me-Scale\n feat_seq = 20*np.log10(feat_seq)\n\n # z-norm: feature normalization\n feat_norm = preprocessing.scale(feat_seq, axis=1)\n\n return feat_norm", "def wand_features(data, signals=EMG_SIGNALS, frame_len=EMG_FRAME_LEN,\n frame_shift=EMG_SHIFT_LEN, k=10):\n\n # samples is n_signals x n_timesteps\n samples = np.array(data[signals].T)\n phones = compute_subphones(data[\"phone\"])\n\n n_signals, n_timesteps = samples.shape[0], samples.shape[1]\n\n # Create the 17-point weighted moving average filter shown in Figure 4.2.\n ramp_filter = np.linspace(0,0.1,num=9)\n ma_filter = np.concatenate((ramp_filter[:-1], ramp_filter[::-1]))\n assert len(ma_filter) == 17\n \n n_frames = int(n_timesteps / frame_shift)\n n_feats = 5\n features = np.zeros((n_signals, n_feats, n_frames))\n frame_phones = []\n\n for i in range(n_signals):\n # Mean normalize\n x = samples[i] - np.mean(samples[i])\n\n # Apply moving average filter to compute low frequency signal w\n w = np.convolve(x, ma_filter, mode=\"same\")\n\n # Compute high frequency signal p\n p = x - w\n\n # Compute rectified signal r\n r = abs(p)\n\n # Ignore any frames that are incomplete (i.e. if n_timesteps is 2500 but \n # n_frames is 416 and frame_shift is 6, count up to 416*6 = 2496 rather\n # than 2500 timesteps, so we don't end up with a unit in the features that\n # is made up of an incomplete set of samples)\n for frame_id, t in enumerate(range(0, n_frames*frame_shift, frame_shift)):\n w_frame = w[t:t+frame_len]\n p_frame = p[t:t+frame_len]\n r_frame = r[t:t+frame_len]\n M_w = np.mean(w_frame) # Frame-based mean of w\n P_w = np.mean(w_frame * w_frame) # Frame-based power of w\n P_r = np.mean(r_frame * r_frame) # Frame-based power of r\n M_r = np.mean(r_frame) # Frame-based mean of r\n\n # Zero-crossing rate of p\n z_p = len(np.where(np.diff(np.signbit(p_frame)))[0]) / len(p_frame)\n\n features[i, :, frame_id] = np.array([M_w, P_w, P_r, z_p, M_r])\n mode_phone = mode(phones[t:t+frame_len])\n frame_phones.append(mode_phone)\n\n features = np.reshape(features, [-1, n_frames])\n\n features, labels = stack_context(features, k=k, labels=frame_phones)\n\n return features, labels", "def train_sample_windowize(field, delta=1, n=20):\n padded = np.pad(field, delta, mode='constant', constant_values=-1)\n X = np.zeros((n * n, (1 + delta * 2) ** 2))\n for i in range(n):\n for j in range(n):\n X[i * n + j] = padded[i:i + 2 * delta + 1, j:j + 2 * delta + 1].ravel()\n return X", "def encode_window(self, X, window, batch_size=50, window_batch_size=10000):\n features = numpy.empty((\n numpy.shape(X)[0], self.out_channels,\n numpy.shape(X)[2] - window + 1\n ))\n masking = numpy.empty((\n min(window_batch_size, numpy.shape(X)[2] - window + 1),\n numpy.shape(X)[1], window\n ))\n for b in range(numpy.shape(X)[0]):\n for i in range(math.ceil(\n (numpy.shape(X)[2] - window + 1) / window_batch_size)\n ):\n for j in range(\n i * window_batch_size,\n min(\n (i + 1) * window_batch_size,\n numpy.shape(X)[2] - window + 1\n )\n ):\n j0 = j - i * window_batch_size\n masking[j0, :, :] = X[b, :, j: j + window]\n features[\n b, :, i * window_batch_size: (i + 1) * window_batch_size\n ] = numpy.swapaxes(\n self.encode(masking[:j0 + 1], batch_size=batch_size), 0, 1\n )\n return features", "def polyfit_window(x, window_length=5, deg=1, deriv=0, delta=1, pos=None):\n if not pos:\n pos = int(window_length/2)+1\n num_samples = len(x)\n idx = np.arange(window_length)\n x_out = np.zeros(num_samples)\n\n x_padded = np.concatenate([np.zeros(window_length-1), x])\n\n for frame_start in np.arange(num_samples):\n x_frame = x_padded[idx + frame_start]\n p = np.polyfit(idx*delta, x_frame, deg=deg)\n p = np.polyder(p, m=deriv)\n x_out[frame_start] = np.polyval(p, idx[pos]*delta)\n\n return x_out", "def get_data_rescaled(self, wave):\n m = (self.max_threshold - self.min_threshold)/(np.max(wave) - np.min(wave))\n b = self.min_threshold - m * np.min(wave)\n wave = m * wave + b\n return np.array([lfilter(self.b, self.a, wave[i]) for i in range(self.n_channels)])", "def collect_features(self, wav_path, label_path):\n n_fft = 512\n window_length = 20\n\n sound, fs = librosa.core.load(wav_path, sr=16000)\n\n if fs != 16000:\n print(wav_path)\n\n # Preemphasis\n preemp_sound = np.append(sound[0], sound[1:] - 0.97 * sound[:-1])\n\n # STFT\n spect = librosa.core.stft(preemp_sound,\n n_fft=n_fft,\n win_length=window_length * int(fs / 1000),\n hop_length=window_length * int(fs / 2000),\n window=scipy.signal.hamming,\n center=True)\n\n spect = np.log10(np.transpose(abs(spect[:, 1:]) ** 2) + 1e-16)\n\n return spect", "def get_data(self, wave):\n data = np.array([lfilter(self.b, self.a, wave[i]) for i in range(self.n_channels)])\n self.min_threshold = np.min(data)\n self.max_threshold = np.max(data)\n return data", "def predict_proba(self, window: np.array):\n \n data = np.transpose(np.array(window))[self.data_channels]\n print('data shape in wrapped:', data.shape)\n proba = self.clf.predict_proba(data)\n return proba[0][1] # proba = [[prob_left, prob_right]]", "def wave_get_pulses():\n return _u2i(_pigpio_command(_control, _PI_CMD_WVSP, 0, 0))", "def window_data(X, window_length):\n return X[int(len(X)/2-window_length/2):int(len(X)/2+window_length/2)]", "def extract_features(audio_filename, args):\n #print(\"Extract_features\")\n spec_type = args['spec_type']\n\n if spec_type == 'cqt':\n bin_multiple = args['bin_multiple']\n max_midi = args['max_midi']\n min_midi = args['min_midi']\n note_range = max_midi - min_midi + 1\n sr = args['sr']\n hop_length = args['hop_length']\n window_size = args['window_size']\n\n bins_per_octave = 12 * bin_multiple # should be a multiple of 12\n n_bins = note_range * bin_multiple\n\n # down-sample,mono-channel\n y, _ = librosa.load(audio_filename, sr)\n # y: an np.ndarray[ shape=(n,) ] giving the audio time series. librosa.load automatically downsamples to the\n # required sample rate sr\n # doku on librosa.cqt:\n # https://librosa.github.io/librosa/generated/librosa.core.cqt.html?highlight=cqt#librosa.core.cqts\n S = librosa.cqt(y, fmin=librosa.midi_to_hz(min_midi), sr=sr, hop_length=hop_length,\n bins_per_octave=bins_per_octave, n_bins=n_bins)\n S = S.T\n S = np.abs(S)\n min_db = np.min(S)\n print(np.min(S), np.max(S), np.mean(S))\n S = np.pad(S, ((window_size // 2, window_size // 2), (0, 0)), 'constant', constant_values=min_db)\n\n windows = []\n\n # IMPORTANT NOTE:\n # Since we pad the the spectrogram frame,\n # the onset frames are actually `offset` frames.\n # To obtain a window of the center frame at each true index, we take a slice from i to i+window_size\n # starting at frame 0 of the padded spectrogram\n for i in range(S.shape[0] - window_size + 1):\n w = S[i:i + window_size, :]\n windows.append(w)\n\n # print inputs\n x = np.array(windows)\n return x\n\n else:\n print(\"WARNING: feature type \" + spec_type + \" not implemented.\")\n return 0", "def calc_window(shape):\n \"\"\"Compute fourier-space window function. Like the other fourier-based\n functions in this module, equi-spaced pixels are assumed. Since the\n window function is separable, it is returned as an x and y part,\n such that window = wy[:,None]*wx[None,:].\"\"\"\n wy = np.sinc(np.fft.fftfreq(shape[-2]))\n wx = np.sinc(np.fft.fftfreq(shape[-1]))\n return wy, wx", "def poly_features(frames, sample_rate, *, kwargs={}):\n l = []\n for frame in frames:\n l.append(\n np.mean(\n librosa.feature.poly_features(\n y=frame,\n sr=sample_rate,\n **kwargs\n ).T, axis=0\n )\n )\n return np.array(l)", "def welch(pro, fs, nfft, window, overlap, axis, detrend, scaling):\n\n # build the welch generating function\n genfunc = partial(_spectra_estimatives, pro, fs, nfft, window, overlap, \n axis, detrend, scaling, func=periodogram)\n\n # obtain the positive freqs.\n freqs = np.fft.rfftfreq(nfft, 1/fs)\n\n # num. segments that fit into pro samples of len nfft with % overlap\n nsegs = int((pro.shape[axis] - nfft) // (nfft * (1-overlap)) + 1)\n shape = list(pro.shape)\n shape[axis] = nsegs\n\n # return producer from welch gen func with each yielded \n result = producer(genfunc, chunksize=len(freqs), axis=axis, shape=shape)\n return freqs, result", "def createIntegratedPsf(self):\n\n (wavelengths, weights) = self.filter\n for i in range(len(wavelengths)):\n\n wavelength = wavelengths[i]\n weight = weights[i]\n self.convertToOpd(wavelength) # creates self.opd\n opd = self.embedOpd()\n zf = numpy.fft.fft2(opd)\n del opd\n # Compute the amplitude squared.\n # (psf is not really the point spread function yet)\n psf = np.conjugate(zf)\n # psf will now be the point spread function, but still complex\n np.multiply(psf, zf, psf)\n del zf\n # normalize the PSF, and convert to single precision\n psf = psf.real / psf.size\n psf = psf.astype(np.float32)\n\n self.center(psf)\n\n # This describes the image scale if no resampling is done.\n cdelt_before_resampling = (wavelength * MICRONStoMETERS) / \\\n (self.D * self.oversample) * RADIANStoDEGREES\n if self.pixel_size is None:\n # we won't resample the output image\n self.cdelt = cdelt_before_resampling\n # Extract a subset.\n if self.output_size < self.npix:\n o_npix = self.output_size\n n0 = (self.npix - o_npix) // 2\n self.integrated_psf += \\\n (psf[n0:n0 + o_npix, n0:n0 + o_npix] * weight)\n else:\n self.integrated_psf += (psf * weight)\n else:\n # we'll resample to this image scale\n self.cdelt = self.pixel_size / self.oversample * ARCSECtoDEGREES\n # These three parameters are only used by mapPsf and for\n # normalizing the weight after resampling.\n self.rescale = self.cdelt / cdelt_before_resampling\n self.input_center = (self.npix + 1) // 2\n self.output_center = (self.output_size + 1) // 2\n sub_psf = np.zeros((self.output_size, self.output_size),\n dtype=np.float32)\n # Do the resampling, writing the output to sub_psf.\n ndimage.geometric_transform(psf, self.mapPsf,\n output_shape=(self.output_size, self.output_size),\n output=sub_psf, prefilter=True)\n weight = weight * self.rescale**2\n self.integrated_psf += (sub_psf * weight)\n del sub_psf\n\n if self.verbose:\n print(\"PSF for wavelength %g has been computed\" % wavelength)", "def fram_wave(waveform: np.array, hop_length: int = 160, fft_window_size: int = 400, center: bool = True):\n frames = []\n for i in range(0, waveform.shape[0] + 1, hop_length):\n if center:\n half_window = (fft_window_size - 1) // 2 + 1\n start = i - half_window if i > half_window else 0\n end = i + half_window if i < waveform.shape[0] - half_window else waveform.shape[0]\n frame = waveform[start:end]\n if start == 0:\n padd_width = (-i + half_window, 0)\n frame = np.pad(frame, pad_width=padd_width, mode=\"reflect\")\n\n elif end == waveform.shape[0]:\n padd_width = (0, (i - waveform.shape[0] + half_window))\n frame = np.pad(frame, pad_width=padd_width, mode=\"reflect\")\n\n else:\n frame = waveform[i : i + fft_window_size]\n frame_width = frame.shape[0]\n if frame_width < waveform.shape[0]:\n frame = np.lib.pad(\n frame, pad_width=(0, fft_window_size - frame_width), mode=\"constant\", constant_values=0\n )\n frames.append(frame)\n\n frames = np.stack(frames, 0)\n return frames", "def smooth(inpt, window_len=10, window_type='flat'):\n if not (window_len % 2 == 0):\n window_len += 1\n print('Window length supplied is odd - using next highest integer: {}.'.format(window_len))\n\n if window_len <= 3:\n print('Error in data smoothing - please select a larger window length')\n return\n\n # window_type = 'hanning'\n if not window_type in ['flat', 'hanning', 'hamming', 'bartlett', 'blackman']:\n print('Error - Invalid window_type')\n return\n\n # Generate two arguments to pass into numpy.convolve()\n # s is the input signal doctored with reflections of the input at the beginning and end\n # this serves to remove noise in the smoothing method\n # w is the window matrix based on pre-defined window functions or unit matrix for flat window\n\n s = np.r_[inpt[window_len -1:0:-1], inpt, inpt[-1:-window_len:-1]]\n # w = eval('np.'+window_type+'(window_len)')\n if window_type == 'flat': # moving average\n w = np.ones(window_len, 'd')\n else:\n w = eval('np.' + window_type + '(window_len)')\n\n # create smoothed data via numpy.convolve using the normalized input window matrix\n otpt = np.convolve(w / w.sum(), s, mode='valid')\n\n # format otpt to be same size as inpt and return\n return otpt[int(window_len / 2 -1):-int(window_len / 2)]", "def freq_window(self, startwindow, stopwindow, window=\"hann\"):\n n = self.times.size\n fwindow = _freq_window(self.fs, n, startwindow, stopwindow, window=window)\n new_response = self.from_freq(self.fs, self.in_freq * fwindow)\n\n return new_response", "def track_energy(wave, win_len, hop_len, win):\n\n wave = np.lib.pad(\n wave, pad_width=(win_len-hop_len, 0), mode='constant', constant_values=0\n )\n\n # post padding\n wave = librosa.util.fix_length(\n wave, int(win_len * np.ceil(len(wave) / win_len))\n )\n\n # cut into frames\n wavmat = librosa.util.frame(wave, frame_length=win_len, hop_length=hop_len)\n\n # Envelope follower\n wavmat = hwr(wavmat) ** 0.5 # half-wave rectification + compression\n\n return np.mean((wavmat.T * win), axis=1)", "def extract_window_data(df, window_len=30, zero_base=True):\n window_data = []\n for idx in range(len(df) - window_len):\n tmp = df[idx: (idx + window_len)].copy()\n if zero_base:\n tmp = normalise_min_max(tmp)\n window_data.append(tmp.values)\n return np.array(window_data)\n #return window_data", "def read_process_song(path, window=1, overlap=0, debug=True):\n\n arr_features = []\n\n signal, sr = librosa.load(path)\n signal = signal[:660000]\n\n # Debug process\n if debug:\n print(\"Reading file: {}\".format(path))\n\n # Split songs:\n samples = split_songs(signal, window, overlap)\n\n # Append the result to the data structure\n for s in samples:\n features = get_features(s, sr)\n arr_features.append(features)\n return arr_features", "def extract_window_data(df, window_len=10, zero_base=True):\n window_data = []\n for idx in range(len(df) - window_len):\n tmp = df[idx: (idx + window_len)].copy()\n if zero_base:\n tmp = normalise_zero_base(tmp)\n window_data.append(tmp.values)\n return np.array(window_data)", "def compute_chunk_features(mp3_file):\n # Extract MP3 file to a mono, 10kHz WAV file\n sox_command = \"/usr/local/bin/sox\"\n out_file = 'temp.wav'\n #cmd = sox_command % (out_file, mp3_file)\n temp2 = subprocess.call([sox_command, mp3_file,'-r 10000','-c 1',out_file])\n # Read in chunks of data from WAV file\n wav_data1, wav_data2 = read_wav(out_file)\n # We'll cover how the features are computed in the next section!\n return np.array(features(wav_data1)), np.array(features(wav_data2))", "def compute_chunk_features(mp3_file):\n # On convertit le fichier mp3 en un fichier wav mono, 1avec un sample rate de 10000Hertz: on utilise\n # On utilise l'application sox \"c:/Program Files (x86)/sox-14.4.0/sox\"\n\n sox_command = \"./sox-14.4.0/sox\"\n out_file = 'temp.wav'\n #cmd = sox_command % (out_file, mp3_file)\n temp2 = subprocess.call([sox_command, mp3_file,'-r 10000','-c 1',out_file])\n # Read in chunks of data from WAV file\n wav_data1, wav_data2 = read_wav(out_file)\n # We'll cover how the features are computed in the next section!\n return np.array(features(wav_data1)), np.array(features(wav_data2))", "def framing(signal, frame_length, frame_step, window_func=lambda x: np.ones((x,))):\n signal_length = len(signal)\n num_frames = 1 + (signal_length - frame_length) // frame_step\n\n frames = np.zeros((num_frames, frame_length))\n for index in range(num_frames):\n frames[index] = np.asarray(signal[index * frame_step: index * frame_step + frame_length],\n dtype='float32') * window_func(frame_length)\n return frames", "def generate_profile(npoints, length, alpha, window, h = 1., seed=None):\n npoints = int(npoints)\n window = int(window)\n length = float(length)\n alpha = float(alpha)\n h = float(h)\n if not seed is None:\n seed = int(seed)\n \n prng = np.random.RandomState(seed)\n phase = 2.*np.pi*prng.rand(npoints)\n k = 2.*np.pi*np.fft.fftfreq(npoints,length/float(npoints-1))\n amp = np.zeros(npoints)\n nflt = npoints//window\n if npoints%2 == 0:\n nfreq = npoints//2+1\n else:\n nfreq = (npoints-1)//2+1\n amp[1:] = (alpha*(2.*np.pi/np.abs(k[1:]))**(0.5*(1.+2.*h))*np.sqrt(np.pi/length)/2.*float(npoints))\n amp[nflt+1:-nflt] = 0.\n f = amp*np.exp(np.complex(0., 1.)*phase)\n fund = np.fft.fft(prng.choice([-1., 1.])*alpha*length*np.sin(np.linspace(0., length, npoints)*np.pi/length))\n f = np.real(np.fft.ifft(f+fund))\n return f-f[0]-(f[-1]-f[0])/length*np.linspace(0., length, npoints)", "def window_data(data: np.ndarray):\n\n w_len = 128\n stride = w_len // 2\n\n no_offset_windows = np.split(data, 10)\n offset_windows = np.split(data[stride:-stride], 9)\n windows = [0] * 19\n windows[::2] = no_offset_windows\n windows[1::2] = offset_windows\n windows = np.array(windows, dtype=np.float32)\n\n return windows", "def feats_array_4_window(window: np.ndarray):\n\n outvec = np.zeros((len(funclist), window.shape[1]))\n\n for i in range(len(funclist)):\n for j in range(window.shape[1]):\n outvec[i, j] = funclist[i](window[:, j])\n\n outvec = outvec.reshape(-1)\n\n return outvec", "def preprocess_data(num_mfcc_coeffs, num_filters, window_len, window_step, max_num_frames):\n inputs = [] \n labels = [] \n \n SOURCE_DIR = '../data/cmu_arctic/scottish-english-male-awb/wav/' \n TARGET_DIR = '../data/cmu_arctic/us-english-male-bdl/wav/'\n index = 0\n for source_fname, target_fname in zip(os.listdir(SOURCE_DIR), os.listdir(TARGET_DIR)):\n if index >= 20:\n break\n index += 1\n\n if source_fname == '.DS_Store' or target_fname == '.DS_Store':\n continue\n\n (source_sample_rate, source_wav_data) = wav.read(SOURCE_DIR + source_fname) \n (target_sample_rate, target_wav_data) = wav.read(TARGET_DIR + target_fname)\n\n source_mfcc_features = np.array(mfcc(source_wav_data, samplerate=source_sample_rate, numcep=num_mfcc_coeffs, nfilt=num_filters, winlen=window_len, winstep=window_step))\n target_mfcc_features = np.array(mfcc(target_wav_data, samplerate=target_sample_rate, numcep=num_mfcc_coeffs, nfilt=num_filters, winlen=window_len, winstep=window_step))\n\n # align with FastDTW\n source_mfcc_features, target_mfcc_features = get_dtw_series(source_mfcc_features, target_mfcc_features)\n\n # pad MFCC feature matrices (rows) to max_num_frames\n source_padded_frames = pad_sequence(source_mfcc_features, max_num_frames)\n target_padded_frames = pad_sequence(target_mfcc_features, max_num_frames)\n\n inputs.append(source_padded_frames) \n labels.append(target_padded_frames) \n\n return inputs, labels", "def moving_window_pts(data, tvec, wn, deg=2, drop_deg=False):\n\n deg_orig = deg\n posx, posz = data.T\n npts = len(posx)\n spos = np.zeros((npts, 2))\n svel = np.zeros((npts, 2))\n sacc = np.zeros((npts, 2))\n\n for i in range(npts):\n start, stop, at_end = window_bounds(i, npts, wn)\n if at_end and drop_deg:\n deg = deg_orig - 1\n else:\n deg = deg_orig\n\n t = tvec[start:stop]\n x = posx[start:stop]\n z = posz[start:stop]\n\n pfpx = np.polyfit(t, x, deg)\n pfpz = np.polyfit(t, z, deg)\n pfvx = np.polyder(pfpx, m=1)\n pfvz = np.polyder(pfpz, m=1)\n pfax = np.polyder(pfpx, m=2)\n pfaz = np.polyder(pfpz, m=2)\n\n tval = tvec[i]\n spos[i] = np.polyval(pfpx, tval), np.polyval(pfpz, tval)\n svel[i] = np.polyval(pfvx, tval), np.polyval(pfvz, tval)\n sacc[i] = np.polyval(pfax, tval), np.polyval(pfaz, tval)\n\n return spos, svel, sacc", "def wmeanfilt(vec, wid=3, w=None):\n # 2012-04-28 06:09 IJMC: Created\n\n filt = 0*vec\n if wid<1:\n wid = 1\n \n wid = int(wid)\n\n n = len(vec)\n\n for ii in range(n):\n i0 = np.max([0, ii -wid/2])\n i1 = np.min([n-1, ii + wid/2])\n filt[ii] = wmean(vec[i0:i1+1], w[i0:i1+1])\n #print ii, i0, i1\n\n return filt", "def frame_generator(wav_data, timestamp_offset):\n n = int(sample_rate * (frame_duration_ms / 1000.0) * 2)\n offset = 0\n timestamp = timestamp_offset\n duration = (float(n) / sample_rate) / 2.0\n while offset + n < len(wav_data):\n yield Frame(wav_data[offset:offset + n], timestamp, duration)\n timestamp += duration\n offset += n", "def get_window_data(symbol_signals_df, window_size, feature_col_number, target_col_number):\n X = []\n y = []\n for i in range(len(symbol_signals_df) - window_size):\n features = symbol_signals_df.iloc[i : (i + window_size), feature_col_number]\n \n #print(features)\n \n target = symbol_signals_df.iloc[(i + window_size), target_col_number]\n \n \n X.append(features)\n y.append(target)\n \n return np.array(X), np.array(y).reshape(-1, 1)", "def returnSGFilteredData(x, window_length, polyorder, deriv):\n return ss.savgol_filter(x.flatten(),\n window_length=window_length,\n polyorder=polyorder,\n deriv=deriv)", "def impulse_data(sample_rate=512,psd_segment_length=60):\n epoch = 1153742417.0\n ts_data = numpy.zeros(sample_rate * psd_segment_length)\n ts_data = types.TimeSeries(ts_data, delta_t=1.0/sample_rate, epoch=epoch)\n return ts_data", "def fft(self):\n fft_start_time = time.time()\n self.wave_x = range(START, START + N)\n self.wave_y = self.data[START:START + N]\n self.spec_x = np.fft.rfftfreq(N, d=1.0/RATE)\n windowed_signal = self.data[START:START + N] * WINDOW\n spec_y_raw = np.fft.rfft(windowed_signal)\n self.spec_y = [np.sqrt(c.real ** 2 + c.imag ** 2) for c in spec_y_raw]", "def get_trimmed_features(words, num_recordings, base_path=\"\", energy_threshold=0.001):\n\n features_by_word = []\n for i in range(len(words)):\n indexes = []\n feature_array = []\n for j in range(1, num_recordings[i] + 1):\n # Determine the path\n path = base_path + words[i] + str(j) + \".wav\"\n (rate, data) = get_sig(path)\n # features is all the audio features for a given file\n features = get_st_features(data, rate)[0]\n # features[1] is total frame energies\n # energy threshold of 0.001 is arbitrary\n indexes.append(relevant_indexes(features[1], energy_threshold))\n # Add features for this specific audio file to the feature array for this word\n feature_array.append(features)\n # Finds the minimum index of all start indexes\n min_index = sorted(indexes, key=lambda x: x[0])[0][0]\n # Finds the max index of all end indexes\n max_index = sorted(indexes, key=lambda x: x[1])[::-1][0][1]\n # Debug print statements commented out\n # print(\"min, max index for word\", words[i])\n # print(min_index, max_index)\n # Only take the frames between min index and max index for each sample word\n # Note: Potential for a bug; if maxIndex is outside the length of its frame array\n # To fix, need to pad the shorter recordings with extra data\n features_by_word.append([x[0:34, min_index:max_index].transpose() for x in feature_array])\n # print(numpy.shape(features_by_word[i]))\n # features_by_word is an array of len(words) cells\n # Each cell has num_recordings[i] elements corresponding to the number of recordings of each word words[i]\n # Each recording has the same number of frames for a given word, as determined by minIndex and maxIndex\n # for a given word.\n # Finally, each frame contains the 34 features from that frame's raw data samples\n return features_by_word", "def smooth_pseudo_wvd(\n signal,\n sampling_rate=1000,\n freq_length=None,\n time_length=None,\n segment_step=1,\n nfreqbin=None,\n window_method=\"hamming\",\n):\n\n # Define parameters\n N = len(signal)\n # sample_spacing = 1 / sampling_rate\n if nfreqbin is None:\n nfreqbin = 300\n\n # Zero-padded signal to length 2N\n signal_padded = np.append(signal, np.zeros_like(signal))\n\n # DFT\n signal_fft = np.fft.fft(signal_padded)\n signal_fft[1 : N - 1] = signal_fft[1 : N - 1] * 2\n signal_fft[N:] = 0\n\n # Inverse FFT\n signal_ifft = np.fft.ifft(signal_fft)\n signal_ifft[N:] = 0\n\n # Make analytic signal\n signal = scipy.signal.hilbert(signal_detrend(signal_ifft))\n\n # Create smoothing windows in time and frequency\n if freq_length is None:\n freq_length = np.floor(N / 4.0)\n # Plus one if window length is not odd\n if freq_length % 2 == 0:\n freq_length += 1\n elif len(freq_length) % 2 == 0:\n raise ValueError(\"The length of frequency smoothing window must be odd.\")\n\n if time_length is None:\n time_length = np.floor(N / 10.0)\n # Plus one if window length is not odd\n if time_length % 2 == 0:\n time_length += 1\n elif len(time_length) % 2 == 0:\n raise ValueError(\"The length of time smoothing window must be odd.\")\n\n if window_method == \"hamming\":\n freq_window = scipy.signal.hamming(int(freq_length)) # normalize by max\n time_window = scipy.signal.hamming(int(time_length)) # normalize by max\n elif window_method == \"gaussian\":\n std_freq = freq_length / (6 * np.sqrt(2 * np.log(2)))\n freq_window = scipy.signal.gaussian(freq_length, std_freq)\n freq_window /= max(freq_window)\n std_time = time_length / (6 * np.sqrt(2 * np.log(2)))\n time_window = scipy.signal.gaussian(time_length, std_time)\n time_window /= max(time_window)\n # to add warning if method is not one of the supported methods\n\n # Mid-point index of windows\n midpt_freq = (len(freq_window) - 1) // 2\n midpt_time = (len(time_window) - 1) // 2\n\n # Create arrays\n time_array = np.arange(start=0, stop=N, step=segment_step, dtype=int) / sampling_rate\n # frequency_array = np.fft.fftfreq(nfreqbin, sample_spacing)[0:nfreqbin / 2]\n frequency_array = 0.5 * np.arange(nfreqbin, dtype=float) / N\n pwvd = np.zeros((nfreqbin, len(time_array)), dtype=complex)\n\n # Calculate pwvd\n for i, t in enumerate(time_array):\n # time shift\n tau_max = np.min(\n [t + midpt_time - 1, N - t + midpt_time, np.round(N / 2.0) - 1, midpt_freq]\n )\n # time-lag list\n tau = np.arange(\n start=-np.min([midpt_time, N - t]), stop=np.min([midpt_time, t - 1]) + 1, dtype=\"int\"\n )\n time_pts = (midpt_time + tau).astype(int)\n g2 = time_window[time_pts]\n g2 = g2 / np.sum(g2)\n signal_pts = (t - tau - 1).astype(int)\n # zero frequency\n pwvd[0, i] = np.sum(g2 * signal[signal_pts] * np.conjugate(signal[signal_pts]))\n # other frequencies\n for m in range(int(tau_max)):\n tau = np.arange(\n start=-np.min([midpt_time, N - t - m]),\n stop=np.min([midpt_time, t - m - 1]) + 1,\n dtype=\"int\",\n )\n time_pts = (midpt_time + tau).astype(int)\n g2 = time_window[time_pts]\n g2 = g2 / np.sum(g2)\n signal_pt1 = (t + m - tau - 1).astype(int)\n signal_pt2 = (t - m - tau - 1).astype(int)\n # compute positive half\n rmm = np.sum(g2 * signal[signal_pt1] * np.conjugate(signal[signal_pt2]))\n pwvd[m + 1, i] = freq_window[midpt_freq + m + 1] * rmm\n # compute negative half\n rmm = np.sum(g2 * signal[signal_pt2] * np.conjugate(signal[signal_pt1]))\n pwvd[nfreqbin - m - 1, i] = freq_window[midpt_freq - m + 1] * rmm\n\n m = np.round(N / 2.0)\n\n if t <= N - m and t >= m + 1 and m <= midpt_freq:\n tau = np.arange(\n start=-np.min([midpt_time, N - t - m]),\n stop=np.min([midpt_time, t - 1 - m]) + 1,\n dtype=\"int\",\n )\n time_pts = (midpt_time + tau + 1).astype(int)\n g2 = time_window[time_pts]\n g2 = g2 / np.sum(g2)\n signal_pt1 = (t + m - tau).astype(int)\n signal_pt2 = (t - m - tau).astype(int)\n x = np.sum(g2 * signal[signal_pt1] * np.conjugate(signal[signal_pt2]))\n x *= freq_window[midpt_freq + m + 1]\n y = np.sum(g2 * signal[signal_pt2] * np.conjugate(signal[signal_pt1]))\n y *= freq_window[midpt_freq - m + 1]\n pwvd[m, i] = 0.5 * (x + y)\n\n pwvd = np.real(np.fft.fft(pwvd, axis=0))\n\n # Visualization\n\n return frequency_array, time_array, pwvd", "def smooth(x, window_len=11, window='hanning'):\n\n# if x.ndim != 1:\n# raise ValueError, \"smooth only accepts 1 dimension arrays.\"\n# if x.size < window_len:\n# raise ValueError, \"Input vector needs to be bigger than window size.\"\n\n if window_len<3:\n return x\n\n# if not window in ['flat', 'hanning', 'hamming', 'bartlett', 'blackman']:\n# raise ValueError, \"Window is on of 'flat', 'hanning', 'hamming', 'bartlett', 'blackman'\"\n\n\n s = np.r_[x[window_len - 1:0:-1], x, x[-1:-window_len:-1]]\n# print(len(s))\n if window == 'flat': # moving average\n w = np.ones(window_len, 'd')\n else:\n w = eval('np.' + window + '(window_len)')\n\n y = np.convolve(w / w.sum(), s, mode='valid')\n return y", "def extract_features(\n fp, sample_rate, window_length, hop_length, n_mel, new_img_size, low_cut, high_cut\n):\n y, sr = librosa.load(fp, sr=args.sample_rate)\n y_filtered = butter_bandpass_filter(y, low_cut, high_cut, sr)\n melspectrogram_db = compute_melspectrogram_with_fixed_size(\n y_filtered, sample_rate, window_length, hop_length, n_mel, new_img_size\n )\n return melspectrogram_db", "def convert_wave_to_units(self, wave):\n return [self.convert_point_to_units(i) for i in wave]", "def _get_strided(waveform, window_size, window_shift, snip_edges):\n assert waveform.dim() == 1\n num_samples = waveform.size(0)\n strides = (window_shift * waveform.stride(0), waveform.stride(0))\n\n if snip_edges:\n if num_samples < window_size:\n return torch.empty((0, 0))\n else:\n m = 1 + (num_samples - window_size) // window_shift\n else:\n reversed_waveform = torch.flip(waveform, [0])\n m = (num_samples + (window_shift // 2)) // window_shift\n pad = window_size // 2 - window_shift // 2\n pad_right = reversed_waveform\n if pad > 0:\n # torch.nn.functional.pad returns [2,1,0,1,2] for 'reflect'\n # but we want [2, 1, 0, 0, 1, 2]\n pad_left = reversed_waveform[-pad:]\n waveform = torch.cat((pad_left, waveform, pad_right), dim=0)\n else:\n # pad is negative so we want to trim the waveform at the front\n waveform = torch.cat((waveform[-pad:], pad_right), dim=0)\n\n sizes = (m, window_size)\n return waveform.as_strided(sizes, strides)", "def silence_removal(signal, sampling_rate, st_win, st_step, smooth_window=0.5,\n weight=0.5, plot=False):\n\n if weight >= 1:\n weight = 0.99\n if weight <= 0:\n weight = 0.01\n\n # Step 1: feature extraction\n signal = audioBasicIO.stereo_to_mono(signal)\n st_feats, _ = stf.feature_extraction(signal, sampling_rate,\n st_win * sampling_rate,\n st_step * sampling_rate)\n\n # Step 2: train binary svm classifier of low vs high energy frames\n # keep only the energy short-term sequence (2nd feature)\n st_energy = st_feats[1, :]\n en = np.sort(st_energy)\n # number of 10% of the total short-term windows\n st_windows_fraction = int(len(en) / 10)\n\n # compute \"lower\" 10% energy threshold\n low_threshold = np.mean(en[0:st_windows_fraction]) + 1e-15\n\n # compute \"higher\" 10% energy threshold\n high_threshold = np.mean(en[-st_windows_fraction:-1]) + 1e-15\n\n # get all features that correspond to low energy\n low_energy = st_feats[:, np.where(st_energy <= low_threshold)[0]]\n\n # get all features that correspond to high energy\n high_energy = st_feats[:, np.where(st_energy >= high_threshold)[0]]\n\n # form the binary classification task and ...\n features = [low_energy.T, high_energy.T]\n # normalize and train the respective svm probabilistic model\n\n # (ONSET vs SILENCE)\n features_norm, mean, std = at.normalize_features(features)\n svm = at.train_svm(features_norm, 1.0)\n\n # Step 3: compute onset probability based on the trained svm\n prob_on_set = []\n for index in range(st_feats.shape[1]):\n # for each frame\n cur_fv = (st_feats[:, index] - mean) / std\n # get svm probability (that it belongs to the ONSET class)\n prob_on_set.append(svm.predict_proba(cur_fv.reshape(1, -1))[0][1])\n prob_on_set = np.array(prob_on_set)\n\n # smooth probability:\n prob_on_set = smooth_moving_avg(prob_on_set, smooth_window / st_step)\n\n # Step 4A: detect onset frame indices:\n prog_on_set_sort = np.sort(prob_on_set)\n\n # find probability Threshold as a weighted average\n # of top 10% and lower 10% of the values\n nt = int(prog_on_set_sort.shape[0] / 10)\n threshold = (np.mean((1 - weight) * prog_on_set_sort[0:nt]) +\n weight * np.mean(prog_on_set_sort[-nt::]))\n\n max_indices = np.where(prob_on_set > threshold)[0]\n # get the indices of the frames that satisfy the thresholding\n index = 0\n seg_limits = []\n time_clusters = []\n\n # Step 4B: group frame indices to onset segments\n while index < len(max_indices):\n # for each of the detected onset indices\n cur_cluster = [max_indices[index]]\n if index == len(max_indices)-1:\n break\n while max_indices[index+1] - cur_cluster[-1] <= 2:\n cur_cluster.append(max_indices[index+1])\n index += 1\n if index == len(max_indices)-1:\n break\n index += 1\n time_clusters.append(cur_cluster)\n seg_limits.append([cur_cluster[0] * st_step,\n cur_cluster[-1] * st_step])\n\n # Step 5: Post process: remove very small segments:\n min_duration = 0.2\n seg_limits_2 = []\n for s_lim in seg_limits:\n if s_lim[1] - s_lim[0] > min_duration:\n seg_limits_2.append(s_lim)\n seg_limits = seg_limits_2\n\n if plot:\n time_x = np.arange(0, signal.shape[0] / float(sampling_rate), 1.0 /\n sampling_rate)\n\n plt.subplot(2, 1, 1)\n plt.plot(time_x, signal)\n for s_lim in seg_limits:\n plt.axvline(x=s_lim[0], color='red')\n plt.axvline(x=s_lim[1], color='red')\n plt.subplot(2, 1, 2)\n plt.plot(np.arange(0, prob_on_set.shape[0] * st_step, st_step), \n prob_on_set)\n plt.title('Signal')\n for s_lim in seg_limits:\n plt.axvline(x=s_lim[0], color='red')\n plt.axvline(x=s_lim[1], color='red')\n plt.title('svm Probability')\n plt.show()\n\n return seg_limits", "def get_wavelength(start_wave, wave_per_pixel, size):\n\n return np.array([start_wave + i*wave_per_pixel for i in range(size)])", "def _feature_window_function(window_type, window_size, blackman_coeff):\n if window_type == HANNING:\n return torch.hann_window(window_size, periodic=False)\n elif window_type == HAMMING:\n return torch.hamming_window(window_size, periodic=False, alpha=0.54, beta=0.46)\n elif window_type == POVEY:\n # like hanning but goes to zero at edges\n return torch.hann_window(window_size, periodic=False).pow(0.85)\n elif window_type == RECTANGULAR:\n return torch.ones(window_size, dtype=torch.get_default_dtype())\n elif window_type == BLACKMAN:\n a = 2 * math.pi / (window_size - 1)\n window_function = torch.arange(window_size, dtype=torch.get_default_dtype())\n # can't use torch.blackman_window as they use different coefficients\n return blackman_coeff - 0.5 * torch.cos(a * window_function) + \\\n (0.5 - blackman_coeff) * torch.cos(2 * a * window_function)\n else:\n raise Exception('Invalid window type ' + window_type)", "def smooth(x, window_len=11, window=\"hanning\"):\n\n if x.ndim != 1:\n raise ValueError(\"smooth only accepts 1 dimension arrays.\")\n\n if x.size < window_len:\n raise ValueError(\"Input vector needs to be bigger than window size.\")\n\n if window_len < 3:\n return x\n\n if not window in [\"flat\", \"hanning\", \"hamming\", \"bartlett\", \"blackman\"]:\n raise ValueError(\n \"Window is on of 'flat', 'hanning', 'hamming', 'bartlett', 'blackman'\"\n )\n\n s = np.r_[x[window_len - 1 : 0 : -1], x, x[-2 : -window_len - 1 : -1]]\n # print(len(s))\n if window == \"flat\": # moving average\n w = np.ones(window_len, \"d\")\n else:\n w = eval(\"np.\" + window + \"(window_len)\")\n\n y = np.convolve(w / w.sum(), s, mode=\"valid\")\n return y[(window_len // 2 - 1) : -(window_len // 2 + 1)]", "def wavedec(data: torch.Tensor,\n wavelet: pywt.Wavelet,\n level: int = None,\n mode: str = \"reflect\") -> list:\n if len(data.shape) == 1:\n # assume time series\n data = data.unsqueeze(0).unsqueeze(0)\n elif len(data.shape) == 2:\n # assume batched time series\n data = data.unsqueeze(1)\n\n dec_lo, dec_hi, _, _ = get_filter_tensors(\n wavelet, flip=True, device=data.device, dtype=data.dtype)\n filt_len = dec_lo.shape[-1]\n # dec_lo = torch.tensor(dec_lo[::-1]).unsqueeze(0)\n # dec_hi = torch.tensor(dec_hi[::-1]).unsqueeze(0)\n filt = torch.stack([dec_lo, dec_hi], 0)\n\n if level is None:\n level = pywt.dwt_max_level(data.shape[-1], filt_len)\n\n result_lst = []\n res_lo = data\n for s in range(level):\n res_lo = fwt_pad(res_lo, wavelet, level=s, mode=mode)\n res = torch.nn.functional.conv1d(res_lo, filt, stride=2)\n res_lo, res_hi = torch.split(res, 1, 1)\n result_lst.append(res_hi.squeeze(1))\n result_lst.append(res_lo.squeeze(1))\n return result_lst[::-1]", "def wavedec2(data, wavelet, level: int = None, mode: str = \"reflect\") -> list:\n dec_lo, dec_hi, _, _ = get_filter_tensors(\n wavelet, flip=True, device=data.device, dtype=data.dtype)\n dec_filt = construct_2d_filt(lo=dec_lo, hi=dec_hi)\n\n if level is None:\n level = pywt.dwtn_max_level([data.shape[-1], data.shape[-2]], wavelet)\n\n result_lst = []\n res_ll = data\n for s in range(level):\n res_ll = fwt_pad2d(res_ll, wavelet, level=s, mode=mode)\n res = torch.nn.functional.conv2d(res_ll, dec_filt, stride=2)\n res_ll, res_lh, res_hl, res_hh = torch.split(res, 1, 1)\n result_lst.append((res_lh, res_hl, res_hh))\n result_lst.append(res_ll)\n return result_lst[::-1]", "def _get_window_start(self, waveforms):", "def smooth_data(rawsong, samp_freq, freq_cutoffs=None, smooth_win=2):\n\n if freq_cutoffs is None:\n # then don't do bandpass_filtfilt\n filtsong = rawsong\n else:\n filtsong = bandpass_filtfilt(rawsong, samp_freq, freq_cutoffs)\n\n squared_song = np.power(filtsong, 2)\n len = np.round(samp_freq * smooth_win / 1000).astype(int)\n h = np.ones((len,)) / len\n smooth = np.convolve(squared_song, h)\n offset = round((smooth.shape[-1] - filtsong.shape[-1]) / 2)\n smooth = smooth[offset:filtsong.shape[-1] + offset]\n return smooth", "def smooth_data(rawsong, samp_freq, freq_cutoffs=None, smooth_win=2):\n\n if freq_cutoffs is None:\n # then don't do bandpass_filtfilt\n filtsong = rawsong\n else:\n filtsong = bandpass_filtfilt(rawsong, samp_freq, freq_cutoffs)\n\n squared_song = np.power(filtsong, 2)\n len = np.round(samp_freq * smooth_win / 1000).astype(int)\n h = np.ones((len,)) / len\n smooth = np.convolve(squared_song, h)\n offset = round((smooth.shape[-1] - filtsong.shape[-1]) / 2)\n smooth = smooth[offset:filtsong.shape[-1] + offset]\n return smooth", "def get_1d_features(waveforms):\n durations = []\n PTratio= []\n repolarizationslope= []\n recoveryslope = []\n for i in range(len(waveforms)): \n waveform=waveforms[i,:] \n durations.append(get_waveform_duration(waveform))\n PTratio.append(get_waveform_PTratio(waveform))\n repolarizationslope.append(get_waveform_repolarizationslope(waveform))\n recoveryslope.append(get_waveform_recoveryslope(waveform))\n return np.array(durations), np.array(PTratio), np.array(repolarizationslope), np.array(recoveryslope)", "def calc_psf(filterName, oversample=4, offset_r=0, offset_theta=0, instrument='nircam', fov=5):\n # TODO complete instrument selection list\n if instrument.lower() == 'nircam':\n instr = webbpsf.NIRCam()\n instr.filter = filterName\n instr.options['source_offset_r'] = offset_r\n instr.options['source_offset_theta'] = offset_theta\n PSF = instr.calc_psf(oversample=oversample, fov_arcsec=fov)\n # retern the oversampled data\n return PSF[0].data", "def extract_wavelet(self, freq, num_cyc=3, mode=\"complex\", ignore_sessions=False):\n wav = wavelet(freq, sampling_freq=self.sampling_freq, num_cyc=num_cyc)\n if self.sessions is None or ignore_sessions:\n convolved = self.__class__(\n pd.DataFrame(\n {x: convolve(y, wav, mode=\"same\") for x, y in self.iteritems()}\n ),\n sampling_freq=self.sampling_freq,\n )\n else:\n convolved = self.__class__(sampling_freq=self.sampling_freq)\n for k, v in self.itersessions():\n session = self.__class__(\n pd.DataFrame(\n {x: convolve(y, wav, mode=\"same\") for x, y in v.iteritems()}\n ),\n sampling_freq=self.sampling_freq,\n )\n convolved = convolved.append(session, session_id=k)\n if mode == \"complex\":\n convolved = convolved\n elif mode == \"filtered\":\n convolved = np.real(convolved)\n elif mode == \"phase\":\n convolved = np.angle(convolved)\n elif mode == \"magnitude\":\n convolved = np.abs(convolved)\n elif mode == \"power\":\n convolved = np.abs(convolved) ** 2\n else:\n raise ValueError(\n \"Mode must be ['complex','filtered','phase',\" \"'magnitude','power']\"\n )\n convolved = self.__class__(\n convolved,\n sampling_freq=self.sampling_freq,\n features=self.features,\n sessions=self.sessions,\n )\n convolved.columns = (\n \"f\" + \"%s\" % round(freq, 2) + \"_\" + mode + \"_\" + self.columns\n )\n return convolved", "def plot_wavelength_slice(self, offset, **kwargs):\n cumul_cube_lengths = np.cumsum(np.array([c.shape[self.common_axis]\n for c in self.data]))\n sequence_index, cube_index = cu._convert_cube_like_index_to_sequence_indices(\n offset, cumul_cube_lengths)\n plot = self[sequence_index].plot_wavelength_slice(cube_index, **kwargs)\n return plot", "def smooth(x,window_len=11,window='hanning'):\n\n if x.ndim != 1:\n raise ValueError, \"smooth only accepts 1 dimension arrays.\"\n\n if x.size < window_len:\n raise ValueError, \"Input vector needs to be bigger than window size.\"\n\n\n if window_len<3:\n return x\n\n\n if not window in ['flat', 'hanning', 'hamming', 'bartlett', 'blackman']:\n raise ValueError, \"Window is on of 'flat', 'hanning', 'hamming', 'bartlett', 'blackman'\"\n\n\n s=np.r_[x[window_len-1:0:-1],x,x[-2:-window_len-1:-1]]\n #print(len(s))\n if window == 'flat': #moving average\n w=np.ones(window_len,'d')\n else:\n w=eval('np.'+window+'(window_len)')\n\n y=np.convolve(w/w.sum(),s,mode='valid')\n #return y\n return y[(window_len/2):-(window_len/2)]", "def get_st_features(signal, rate, window_step=0.025, window_length=0.05):\n\n sample_step = int(rate*window_step)\n sample_length = int(rate*window_length)\n\n (features, feature_names) = audioFeatureExtraction.stFeatureExtraction(signal, rate, sample_length, sample_step)\n\n return features, feature_names", "def smooth(x,window_len=11,window='hanning'):\n\n if x.ndim != 1:\n raise ValueError(\"smooth only accepts 1 dimension arrays.\")\n\n if x.size < window_len:\n raise ValueError(\"Input vector needs to be bigger than window size.\")\n\n\n if window_len<3:\n return x\n\n\n if not window in ['flat', 'hanning', 'hamming', 'bartlett', 'blackman']:\n raise ValueError(\"Window is on of 'flat', 'hanning', 'hamming', 'bartlett', 'blackman'\")\n\n\n s=np.r_[x[window_len-1:0:-1],x,x[-2:-window_len-1:-1]]\n #print(len(s))\n if window == 'flat': #moving average\n w=np.ones(window_len,'d')\n else:\n w=eval('np.'+window+'(window_len)')\n\n y=np.convolve(w/w.sum(),s,mode='same')\n return y", "def smooth(x,window_len=11,window='hanning'):\r\n\r\n if window_len<3:\r\n return x\r\n\r\n s=np.r_[x[window_len-1:0:-1],x,x[-2:-window_len-1:-1]]\r\n #print(len(s))\r\n if window == 'flat': #moving average\r\n w=np.ones(window_len,'d')\r\n else:\r\n w=eval('np.'+window+'(window_len)')\r\n\r\n y=np.convolve(w/w.sum(),s,mode='valid')\r\n return y[0:256]", "def smooth(x, window_len=11, window='hanning'):\n\n if x.ndim != 1:\n raise ValueError, \"smooth only accepts 1 dimension arrays.\"\n\n if x.size < window_len:\n raise ValueError, \"Input vector needs to be bigger than window size.\"\n\n if window_len < 3:\n return x\n\n if not window in ['flat', 'hanning', 'hamming', 'bartlett', 'blackman']:\n raise ValueError, \"Window is on of 'flat', 'hanning', 'hamming', 'bartlett', 'blackman'\"\n\n s = np.r_[x[window_len - 1:0:-1], x, x[-1:-window_len:-1]]\n #print(len(s))\n if window == 'flat': #moving average\n w = np.ones(window_len, 'd')\n else:\n w = eval('np.' + window + '(window_len)')\n\n y = np.convolve(w / w.sum(), s, mode='valid')\n return y", "def smooth(x,window_len=11,window='hanning'): \n \n if x.ndim != 1:\n raise ValueError(\"smooth only accepts 1 dimension arrays.\")\n\n if x.size < window_len:\n raise ValueError(\"Input vector needs to be bigger than window size.\")\n \n\n if window_len<3:\n return x\n \n \n if not window in ['flat', 'hanning', 'hamming', 'bartlett', 'blackman']:\n raise ValueError(\"Window is on of 'flat', 'hanning', 'hamming', 'bartlett', 'blackman'\")\n \n\n s=np.r_[x[window_len-1:0:-1],x,x[-1:-window_len:-1]]\n #print(len(s))\n if window == 'flat': #moving average\n w=np.ones(window_len,'d')\n else:\n w=eval('np.'+window+'(window_len)')\n \n y=np.convolve(w/w.sum(),s,mode='valid')\n return y", "def window(data, f_interval=None, f_resolution=None, sampling=None, w_column=None):\n print('-------------------------- window')\n \n # Avoid overwritting data:\n data0 = data.copy()\n\n f_range = round(f_interval[0]+(f_interval[1]-f_interval[0])/2)\n picon = 2*np.pi*f_range*data[:,0]\n fsin = np.sin(picon)\n fcos = np.cos(picon)\n\n # Sinusoidal\n data0[:,1] = fsin\n Pf_power, _, _, _, = tt.power(data0, f_interval, f_resolution, sampling, w_column)\n f = Pf_power[:,0]\n Psin = Pf_power[:,1]\n\n # Co-sinusoidal\n data0[:,1] = fcos\n Pf_power, _, _, _, = tt.power(data0, f_interval, f_resolution, sampling, w_column)\n f = Pf_power[:,0]\n Pcos = Pf_power[:,1]\n\n # Output:\n P = 1./2*(Pcos+Psin)\n Pf_window = np.vstack([f, P]).T\n return Pf_window", "def wave_samples(self):\n return self._quantized_subsamples", "def sample(wave, factor):\n ys = np.zeros(len(wave))\n ys[::factor] = np.real(wave.ys[::factor])\n return Wave(ys, framerate=wave.framerate)", "def smooth(x,window_len=11,window='hanning'):\n\n if x.ndim != 1:\n raise ValueError(\"smooth only accepts 1 dimension arrays.\")\n\n if x.size < window_len:\n raise ValueError(\"Input vector needs to be bigger than window size.\")\n\n\n if window_len<3:\n return x\n\n\n if not window in ['flat', 'hanning', 'hamming', 'bartlett', 'blackman']:\n raise ValueError(\"Window is on of 'flat', 'hanning', 'hamming', 'bartlett', 'blackman'\")\n\n\n s=np.r_[x[window_len-1:0:-1],x,x[-2:-window_len-1:-1]]\n #print(len(s))\n if window == 'flat': #moving average\n w=np.ones(window_len,'d')\n else:\n w=eval('np.'+window+'(window_len)')\n\n y=np.convolve(w/w.sum(),s,mode='valid')\n return y", "def smooth(x,window_len=11,window='hanning'):\n\n if x.ndim != 1:\n raise(ValueError, \"smooth only accepts 1 dimension arrays.\")\n\n if x.size < window_len:\n raise(ValueError, \"Input vector needs to be bigger than window size.\")\n\n\n if window_len<3:\n return x\n\n if not window in ['flat', 'hanning', 'hamming', 'bartlett', 'blackman']:\n raise (ValueError,\"Window is on of 'flat', 'hanning', 'hamming', 'bartlett', 'blackman'\")\n\n\n s=np.r_[x[window_len-1:0:-1],x,x[-2:-window_len-1:-1]]\n #print(len(s))\n if window == 'flat': #moving average\n w=np.ones(window_len,'d')\n else:\n w=eval('np.'+window+'(window_len)')\n\n y=np.convolve(w/w.sum(),s,mode='valid')\n if len(y) is not len(x):\n y = y[window_len/2-1:-(window_len/2)]\n return y", "def smooth(x, window_len=11, window='hanning'):\n window_len = min(window_len, len(x) - 1)\n if x.ndim != 1:\n raise ValueError(\"smooth only accepts 1 dimension arrays.\")\n\n if x.size < window_len:\n raise ValueError(\"Input vector needs to be bigger than window size.\")\n\n if window_len < 3:\n return x\n\n if window not in ['flat', 'hanning', 'hamming', 'bartlett', 'blackman']:\n raise ValueError(\"Window is on of 'flat', 'hanning', 'hamming', 'bartlett', 'blackman'\")\n\n s = np.r_[x[window_len - 1:0:-1], x, x[-2:-window_len - 1:-1]]\n # print(len(s))\n if window == 'flat': # moving average\n w = np.ones(window_len, 'd')\n else:\n w = eval('np.' + window + '(window_len)')\n\n y = np.convolve(w / w.sum(), s, mode='valid')\n return y", "def apply_window(audio):\n\treturn audio * numpy.hanning(len(audio))", "def unpack(self, pos, formatSpecifier, length):\n start = pos + self.posWAVEDESC\n x = np.frombuffer(self.data[start:start + length], self.endianness + formatSpecifier, count=1)[0]\n return x", "def smooth(x,window_len=11,window='hanning'):\n\n\t# if x.ndim != 1:\n\t# raise ValueError, \"smooth only accepts 1 dimension arrays.\"\n\n\t# if x.size < window_len:\n\t# raise ValueError, \"Input vector needs to be bigger than window size.\"\n\n\tassert x.ndim==1\n\tassert x.size==window_len\n\n\tif window_len<3:\n\t\treturn x\n\n\tflag = (window in ['flat', 'hanning', 'hamming', 'bartlett', 'blackman'])\n\tassert flag==1\n\n\ts=numpy.r_[x[window_len-1:0:-1],x,x[-2:-window_len-1:-1]]\n\tif window == 'flat': #moving average\n\t\tw=numpy.ones(window_len,'d')\n\telse:\n\t\tw=eval('numpy.'+window+'(window_len)')\n\n\ty=numpy.convolve(w/w.sum(),s,mode='valid')\n\treturn y", "def waverec(coeffs: list, wavelet: pywt.Wavelet) -> torch.Tensor:\n _, _, rec_lo, rec_hi = get_filter_tensors(\n wavelet, flip=False, device=coeffs[-1].device,\n dtype=coeffs[-1].dtype\n )\n filt_len = rec_lo.shape[-1]\n filt = torch.stack([rec_lo, rec_hi], 0)\n\n res_lo = coeffs[0]\n for c_pos, res_hi in enumerate(coeffs[1:]):\n res_lo = torch.stack([res_lo, res_hi], 1)\n res_lo = torch.nn.functional.conv_transpose1d(\n res_lo, filt, stride=2).squeeze(1)\n\n # remove the padding\n padl = (2 * filt_len - 3) // 2\n padr = (2 * filt_len - 3) // 2\n if c_pos < len(coeffs) - 2:\n pred_len = res_lo.shape[-1] - (padl + padr)\n nex_len = coeffs[c_pos + 2].shape[-1]\n if nex_len != pred_len:\n padr += 1\n pred_len = res_lo.shape[-1] - (padl + padr)\n assert (\n nex_len == pred_len\n ), \"padding error, please open an issue on github \"\n if padl > 0:\n res_lo = res_lo[..., padl:]\n if padr > 0:\n res_lo = res_lo[..., :-padr]\n return res_lo", "def smooth(x,window_len=11,window='hanning'):\n\n if x.ndim != 1:\n raise ValueError, \"smooth only accepts 1 dimension arrays.\"\n\n if x.size < window_len:\n raise ValueError, \"Input vector needs to be bigger than window size.\"\n\n\n if window_len<3:\n return x\n\n\n if not window in ['flat', 'hanning', 'hamming', 'bartlett', 'blackman']:\n raise ValueError, \"Window is on of 'flat', 'hanning', 'hamming', 'bartlett', 'blackman'\"\n\n\n s=numpy.r_[2*x[0]-x[window_len:1:-1],x,2*x[-1]-x[-1:-window_len:-1]]\n #print(len(s))\n if window == 'flat': #moving average\n w=ones(window_len,'d')\n else:\n w=eval('numpy.'+window+'(window_len)')\n\n y=numpy.convolve(w/w.sum(),s,mode='same')\n return y[window_len-1:-window_len+1]", "def gtgram(wave,fs,window_time, hop_time,channels,f_min,f_max):\n xe = gtgram_xe(wave, fs, channels, f_min, f_max)\n nwin, hop_samples, ncols = gt.gtgram_strides(fs,window_time, hop_time, xe.shape[1])\n y = np.zeros((channels, ncols))\n for cnum in range(ncols):\n segment = xe[:, cnum * hop_samples + np.arange(nwin)]\n y[:, cnum] = np.sqrt(segment.mean(1))\n return y", "def smooth(x,window_len=11,window='bartlett',data = None):\n\n x = array(x)\n\n # use externally derieved window evaluation\n if data is not None:\n window_len = len(data)\n window = 'extern'\n\n if x.size < window_len:\n raise ValueError(\"Input vector needs to be bigger than window size.\")\n\n\n if window_len<3:\n raise ValueError(\"window must not be shorter than 3\")\n\n if window_len%2 is 0:\n raise ValueError(\"window_len should be odd\")\n\n if not window in ['flat', 'hanning', 'hamming', 'bartlett', 'blackman','triang','extern']:\n raise ValueError(\"Window is none of 'flat', 'hanning', 'hamming', 'bartlett', 'blackman','triang','extern'\")\n\n \n s=r_[x[window_len-1:0:-1],x,x[-1:-window_len:-1]]\n #print(len(s))\n if window == 'flat': #moving average\n w=ones(window_len,'d')\n\n elif window == 'triang':\n w = triang(window_len)\n\n elif window == 'extern':\n w = data\n \n else:\n w=eval(window+'(window_len)')\n\n y=convolve(w/w.sum(),s,mode='valid')\n \n return y[int((window_len-1)/2):len(y)-int((window_len-1)/2)]", "def smooth( x, window_len = 5, window = 'hanning' ):\n\n if x.ndim != 1:\n raise ValueError, \"smooth only accepts 1 dimension arrays.\"\n\n if x.size < window_len:\n raise ValueError, \"Input vector needs to be bigger than window size.\"\n\n if window_len<3:\n return x\n\n if not window in ['flat', 'hanning', 'hamming', 'bartlett', 'blackman']:\n raise ValueError, \"Window is on of 'flat', 'hanning', 'hamming', 'bartlett', 'blackman'\"\n\n s=np.r_[x[window_len-1:0:-1],x,x[-1:-window_len:-1]]\n\n if window == 'flat': #moving average\n w=np.ones(window_len,'d')\n else:\n w=eval('np.'+window+'(window_len)')\n\n y=np.convolve(w/w.sum(),s,mode='valid')\n return y", "def _get_pulse_shaping_waveform(self):\n self.pulse_shaping_list = []\n # Make the rise time be 3.3333% if the dot time.\n rise_time_in_msec = 0.03333333333333 * self.dot_time_in_msec\n # Limit the rise time to 2 milliseconds.\n if rise_time_in_msec > 0.002:\n rise_time_in_msec = 0.002\n rising_falling_count = int(rise_time_in_msec * self.sample_rate)\n step = math.pi / rising_falling_count\n # The first value is zero, so skip that value.\n # The last value is 1.0, so skip that value too.\n for i in range(1, rising_falling_count - 1):\n gain = 0.5 * (1.0 - math.cos(step * i))\n self.pulse_shaping_list.append(gain)", "def smooth_avg(arr, winlen):\n\n window = np.ones(winlen) / (1.0 * winlen)\n return fftconvolve(arr, window, 'same')", "def set_fake_regular_offsets(self, win_wd, win_gap=0):\n sample_onset = int((win_wd + win_gap)*self.sampling_rate)\n self.onset_samples = range(0, len(self.audio), sample_onset)\n # excluding windows that are too close to the beginning\n self.onset_samples = [x for x in self.onset_samples if x > self.beginning_buffer]\n self.onset_times = [x/self.sampling_rate for x in self.onset_samples]", "def offsetpolygon(polyx, offset):\n polyy = []\n # need three points at a time\n for counter in range(0, len(polyx) - 3):\n # get first offset intercept\n pt = getpt(polyx[counter],\n polyx[counter + 1],\n polyx[counter + 2],\n offset)\n # append new point to polyy\n polyy.append(pt)\n # last three points\n pt = getpt(polyx[-3], polyx[-2], polyx[-1], offset)\n polyy.append(pt)\n pt = getpt(polyx[-2], polyx[-1], polyx[0], offset)\n polyy.append(pt)\n pt = getpt(polyx[-1], polyx[0], polyx[1], offset)\n polyy.append(pt)\n return polyy", "def custom_sound(type_of, attack, decay, cutoff, coef, time, freq):\n dzw = np.zeros(time*44100)\n l=0\n for i in type_of:\n if i==\"sin\":\n dzw+= coef[l]*sin_custom(freq,time,attack[l],decay[l])\n if i==\"sq\":\n dzw+= coef[l]*sq_custom(freq,time,attack[l],decay[l])\n if i==\"saw\":\n dzw+= coef[l]*saw_custom(freq,time,attack[l],decay[l])\n l+=1 \n dzw[(1-cutoff)*time*44100 -1:]==0\n dzw = np.repeat(dzw,2).reshape(len(dzw),2)\n dzw = dzw/np.amax(dzw)\n return(dzw)", "def translate(self, offset):\n return BSplineFunc(self.kvs, self.coeffs + offset)", "def _fprop_slice_np(h, stride, H, roi_offset):\n hstart = int(np.floor(float(h) * stride))\n hend = int(np.ceil(float(h + 1) * stride))\n\n hstart = min(max(hstart + roi_offset, 0), H)\n hend = min(max(hend + roi_offset, 0), H)\n\n return slice(hstart, hend), hend - hstart", "def extract_features(self, doc):\n\n features = dict()\n\n bow = self.vectorize_doc_simple(doc)\n\n charcount = self.char_count(doc)\n wordcount = self.word_count(doc)\n sentencecount = self.sentence_count(doc)\n paragraphcount = self.paragraph_count(doc)\n\n # extract characters features\n features['characters per word'] = charcount / wordcount\n features['characters per sentence'] = charcount / sentencecount\n features['characters per paragraph'] = charcount / paragraphcount\n features['characters per document'] = charcount\n\n features['word characters length variance'] = numpy.std(\n self.word_char_length_variance(doc))\n features['sentence characters length variance'] = numpy.std(\n self.sentence_char_length_variance(doc))\n\n # extract words features\n features['words per sentence'] = wordcount / sentencecount\n features['words per paragraph'] = wordcount / paragraphcount\n features['words per document'] = wordcount\n\n features['sentence words length variance'] = numpy.std(\n self.sentence_words_length_variance(doc))\n\n # extract sentences features\n features['sentences per paragraph'] = sentencecount / paragraphcount\n features['sentences per document'] = sentencecount\n\n # extract paragraphs features\n features['paragraphs per document'] = paragraphcount\n\n # extract syllables features\n syllablecount = 0\n for word, count in bow.iteritems():\n syllablecount += self.num_of_syllables(word) * count\n features['syllables per word'] = syllablecount / wordcount\n features['syllables per sentence'] = syllablecount / sentencecount\n features['syllables per paragraph'] = syllablecount / paragraphcount\n\n # extract part of speech features\n tokens = self.pos_tag_doc(doc)\n\n pos_counts = self.vectorize_pos_tags(tokens)\n poswordcount = sum(pos_counts.values())\n for i in xrange(82, 101):\n features['%d per word' % i] = pos_counts[i] / poswordcount\n\n sorted_pos_counts = sorted(pos_counts, key=pos_counts.get, reverse=True)\n features['1st top tag'] = str(sorted_pos_counts[0])\n features['2nd top tag'] = str(sorted_pos_counts[1])\n features['3rd top tag'] = str(sorted_pos_counts[2])\n features['4th top tag'] = str(sorted_pos_counts[3])\n features['5th top tag'] = str(sorted_pos_counts[4])\n\n # extract vocab features\n vocabsize = len(self.vectorize_doc_simple(doc))\n features['vocab size'] = vocabsize\n features['words per vocab size'] = wordcount / vocabsize\n\n return features", "def windowed_means(out_features, param):\n sampling_fq = param.t_max * 1000 + 1\n temp_wnd = np.linspace(param.min_latency, param.max_latency, param.steps + 1)\n intervals = np.zeros((param.steps, 2))\n for i in range(0, temp_wnd.shape[0] - 1):\n intervals[i, 0] = temp_wnd[i]\n intervals[i, 1] = temp_wnd[i + 1]\n intervals = intervals - param.t_min\n output_features = []\n for i in range(out_features.shape[0]):\n feature = []\n for j in range(out_features.shape[1]):\n time_course = out_features[i][j]\n for k in range(intervals.shape[0]):\n borders = intervals[k] * sampling_fq\n feature.append(np.average(time_course[int(borders[0] - 1):int(borders[1] - 1)]))\n output_features.append(feature)\n out = preprocessing.scale(np.array(output_features), axis=1)\n return out", "def smooth(data, window_len=10, window='hanning', keep_original=False):\n # TODO: add comnparison\n window_len += (window_len + 1) % 2\n s = np.r_['-1', data[:, window_len - 1:0:-1], data, data[:, -2:-window_len - 1:-1]]\n\n if window == 'flat': # moving average\n w = np.ones(window_len, 'd')\n else:\n w = eval('np.' + window + '(window_len)')\n\n # y=np.convolve(w/w.sum(),s,mode='valid')\n surplus_data = int((window_len - 1) * 0.5)\n return np.apply_along_axis(lambda m: np.convolve(m, w / w.sum(), mode='valid'), axis=1, arr=s)[:,\n surplus_data:-surplus_data]", "def clip_motif_time_series(freq_preprocessed_data, all_offsets, all_bin_widths, motif_start_times, motif_length: int):\n # [Freq]->(Instances, Frequency, Channels, Time-Steps, Bin Width)\n # Only need to get the times around the first syllable\n\n motif_events_series = []\n for pred_data, offset, bin_width in zip(freq_preprocessed_data, all_offsets, all_bin_widths):\n # Grab the Neural Activity Centered on Each event\n set_window = (offset - bin_width, offset + motif_length)\n chunk_events = fet.get_event_related_nd_chunk(chunk_data=pred_data, chunk_indices=motif_start_times, fs=1000,\n window=set_window) # clip the data at the start times\n\n corrected_chunk_events = []\n for chunk in chunk_events:\n corrected_chunk_events.append(np.squeeze(chunk))\n\n chunk_events = fet.event_shape_correction(chunk_events=corrected_chunk_events,\n original_dim=2) # Reformat to be array-like\n\n chunk_events_series = get_time_series(data=chunk_events, bin_width=bin_width) # clip samples based on bin_width\n\n motif_events_series.append(np.squeeze(chunk_events_series)) # Remove Single axis and append to list\n\n return motif_events_series", "def _choose_x_slice(self, offset):\n arr = None\n axis = 0\n length = self.data.shape[axis]\n if isinstance(offset, int) and offset >= 0 and offset < length:\n arr = self.data.take(offset, axis=axis)\n\n if isinstance(offset, u.Quantity):\n unit = self.axes_wcs.wcs.cunit[-1]\n delta = self.axes_wcs.wcs.cdelt[-1] * unit\n wloffset = offset.to(unit) / delta\n wloffset = int(wloffset)\n if wloffset >= 0 and wloffset < self.data.shape[axis]:\n arr = self.data.take(wloffset, axis=axis)\n\n return arr", "def smooth(x, window_len=11, window='hanning', mode='same'):\n import numpy\n if x.ndim != 1:\n raise ValueError(\"smooth only accepts 1 dimension arrays.\")\n\n if x.size < window_len:\n raise ValueError(\"Input vector needs to be bigger than window size.\")\n\n if window_len < 3:\n return x\n\n if not window in ['flat', 'hanning', 'hamming', 'bartlett', 'blackman']:\n raise ValueError(\"Window is on of 'flat', 'hanning', 'hamming', 'bartlett', 'blackman'\")\n\n s = numpy.r_[x[window_len - 1:0:-1], x, x[-2:-window_len - 1:-1]]\n # print(len(s))\n if window == 'flat': # moving average\n w = numpy.ones(window_len, 'd')\n else:\n w = eval('numpy.' + window + '(window_len)')\n\n y = numpy.convolve(w / w.sum(), s, mode=mode)\n if mode == 'same':\n return y[np.int_(window_len) - 1:-np.int_(window_len) + 1]\n else:\n return y[np.int_(window_len / 2 - 1):-np.int_(window_len / 2)]", "def windowfunction(time, freq):\n \n Ntime = len(time)\n Nfreq = len(freq)\n winkernel = np.empty_like(freq)\n\n for i in range(Nfreq):\n winkernel[i] = np.sum(np.cos(2.0*pi*freq[i]*time))**2 \\\n + np.sum(np.sin(2.0*pi*freq[i]*time))**2\n\n # Normalise such that winkernel(nu = 0.0) = 1.0 \n\n return winkernel/Ntime**2", "def wave_create():\n return _u2i(_pigpio_command(_control, _PI_CMD_WVCRE, 0, 0))", "def smooth(x, window_len=3, window='hanning'):\n s = np.r_[2*x[0]-x[window_len:1:-1], x, 2*x[-1]-x[-1:-window_len:-1]]\n w = getattr(np, window)(window_len)\n y = np.convolve(w/w.sum(), s, mode='same') \n return y[window_len-1:-window_len+1]", "def sliding_window_offsets(data, window_size=500, shift_size=1):\n offsets = np.asarray(_sliding_window_chunkoffsets(data, window_size, shift_size))\n return offsets", "def stft(self, wav: np.ndarray) -> np.ndarray:\n return librosa.stft(\n y=wav,\n n_fft=self.filter_length,\n hop_length=self.hop_length,\n win_length=self.win_length,\n pad_mode=\"reflect\",\n )", "def smooth(x,window_len=10,window='hanning'):\n #\n if x.ndim != 1:\n raise ValueError(\"smooth only accepts 1 dimension arrays.\")\n #\n if x.size < window_len:\n raise ValueError(\"Input vector needs to be bigger than window size.\")\n #\n if window_len<3:\n return x\n #\n if not window in ['flat', 'hanning', 'hamming', 'bartlett', 'blackman']:\n raise ValueError(\"Window is on of 'flat', 'hanning', 'hamming', 'bartlett', 'blackman'\")\n #\n s=r_[2*x[0]-x[window_len:1:-1],x,2*x[-1]-x[-1:-window_len:-1]]\n #print(len(s))\n if window == 'flat': #moving average\n w=ones(window_len,'d')\n else:\n w=eval(window+'(window_len)')\n #\n y=convolve(w/w.sum(),s,mode='same')\n return y[window_len-1:-window_len+1]" ]
[ "0.6025386", "0.54781526", "0.5399577", "0.5278359", "0.50593245", "0.50171155", "0.50058156", "0.49881226", "0.49332815", "0.4863905", "0.4842912", "0.48232105", "0.47933024", "0.47808012", "0.4761141", "0.47247434", "0.47176567", "0.46723312", "0.46707693", "0.4661695", "0.46402556", "0.4639597", "0.46186784", "0.46114025", "0.4608447", "0.46076497", "0.4606876", "0.45965448", "0.45920363", "0.458755", "0.45724723", "0.4555235", "0.4553121", "0.45514292", "0.45302072", "0.45269695", "0.4523417", "0.45217964", "0.45184645", "0.45117757", "0.45093074", "0.44994822", "0.44598457", "0.44563434", "0.44531885", "0.4446621", "0.44431", "0.44371513", "0.44140264", "0.44138312", "0.44111976", "0.44103912", "0.4403402", "0.44027275", "0.44023755", "0.44001752", "0.44001752", "0.43984067", "0.43921185", "0.4387973", "0.43877175", "0.438461", "0.43836373", "0.43767306", "0.43753424", "0.43724602", "0.43710655", "0.43707544", "0.43703708", "0.4368711", "0.4368294", "0.4366272", "0.43646285", "0.43634224", "0.43505675", "0.43478945", "0.4345348", "0.43436226", "0.4337639", "0.4333266", "0.43300882", "0.43290666", "0.43268663", "0.4324746", "0.43231094", "0.4320785", "0.43207413", "0.43194956", "0.4318538", "0.4318028", "0.4316838", "0.43127605", "0.4311626", "0.43112874", "0.43058562", "0.43020067", "0.43019235", "0.43011823", "0.42997563", "0.4290066" ]
0.46706223
19
Load dumped object handled by file_name. If file_name is None, then default file name is used.
def model_load(file_name=None): if file_name is None : file_name = "./data/_oP5_SegmentClassifier.dump" else: pass return p5_util.object_load(file_name)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def loadFromFile(file_name=\"saved_object.pickle\"):\n\n try:\n with open(file_name, \"rb\") as inputToLoad:\n loaded_object = pickle.load(inputToLoad)\n return loaded_object\n except IOError:\n raise InvalidFilesPath\n except ImportError as e:\n raise InvalidFile(\n \"Structure of project has been changed since saving this object: %s\" % str(e))\n except TypeError:\n return pickle.load(file_name)", "def loadObj(name):\n\n with open(name + '.pkl', 'rb') as f:\n return pickle.load(f)", "def load_obj(name):\n with open('../../data/' + name + '.pkl', 'rb') as f:\n return pickle.load(f)", "def load_obj(name):\n with open('../../data/' + name + '.pkl', 'rb') as f:\n return pickle.load(f)", "def load_obj(name):\r\n with open('../pickle/' + name + '.pkl', 'rb') as fout:\r\n return pickle.load(fout)\r\n # end with\r", "def load_object(self, filename):\n with open(filename, 'rb') as inp: # Overwrites any existing file.\n data = pickle.load(inp)\n return data", "def _load_obj(name):\n with open('/bigdisk/pickles/' + name, 'r') as f:\n return pickle.load(f)", "def pickleLoad(filename):\n #Todo: Handle exceptions from pickle\n filehandler = open(\"obj/\" + filename + \".obj\", 'rb')\n object = pickle.load(filehandler)\n return object", "def load_object(self, name: str):\r\n with open_(self._path_for_pickle(name), \"rb\") as f:\r\n return dill.load(f)", "def load(self, filename):\n loader = GazpachoObjectBuilder(filename=filename, app=self._app)\n self._read_from_loader(loader)\n self.path = filename\n self.name = os.path.basename(filename)", "def load_object(filename):\n with open(filename, 'rb') as input_file: # Overwrites any existing file.\n obj = pickle.load(input_file)\n return obj", "def load(self, filename):\n pass", "def __init__(self, name, loadfile=None, loadpath=''):\n \n self.name = name\n \n if loadfile==None:\n self.data = []\n else:\n with open(loadpath+loadfile) as currentfile:\n self.data = pickle.load(currentfile)", "def load_obj(saved_name):\n with open( saved_name + '.pkl', 'rb') as f:\n return pickle.load(f)", "def load_object(filename):\r\n with open(filename, 'rb') as input:\r\n obj = pickle.load(input)\r\n return obj", "def load(file_, name='_pkl', use_cpickle=False):\n file_.seek(0) # To be able to read several objects in one file\n if use_cpickle:\n unpickler = cPickle.Unpickler\n else:\n unpickler = pickle.Unpickler\n with tarfile.open(fileobj=file_, mode='r') as tar_file:\n p = unpickler(\n tar_file.extractfile(tar_file.getmember(name)))\n if '_parameters' in tar_file.getnames():\n p.persistent_load = _PersistentLoad(tar_file)\n return p.load()", "def load(cls, file_id):\n if not isinstance(file_id, file):\n handle = open(\n \"{:s}{:s}-{:d}.pckl\".format(\n DUMP_PATH,\n cls.__name__,\n file_id\n ),\n \"rb\")\n else:\n handle = file_id\n return pickle.load(handle)", "def load(self, filename):\n raise NotImplementedError", "def load(cls,filename):\n obj = None\n f = open(filename,'r')\n try:\n obj = pickle.load(f)\n obj.filename = filename\n finally:\n f.close()\n return obj", "def load_viz_object(filename: str) -> OrqVizObject:\n\n with open(filename, \"rb\") as f:\n loaded_object = pickle.load(f)\n\n return loaded_object", "def load(cls, filename):\n \n raise NotImplementedError(\"not implemented!\")", "def load_instance(file_name):\n file1 = open(file_name, 'rb')\n instance = pickle.load(file1)\n file1.close()\n\n return instance", "def load(self, filename):\n\n return super().load(filename=filename)", "def load(self, which):\n\t\tpath = os.path.join(self.storagedir, which)\n\t\tprint(\"Loading from\", path)\n\t\twith open(path, \"rb\") as handle:\n\t\t\tsetattr(self, which, _pickle.load(handle))", "def load_object(fpath):\r\n with open(fpath, 'rb') as i:\r\n return pickle.load(i)", "def load(self, file_name):\n\n self._state.load(file_name)", "def load(cls, file_name, auto_cls=True):\n file_name = os.path.expanduser(file_name)\n file_name = os.path.abspath(file_name)\n obj = cls.loaddata(np.load(file_name), auto_cls)\n obj.file_name = file_name\n return obj", "def retrieve_object(self, name: str):\n file_path = self.__get_file_path(name)\n return self.__deserialize_object(file_path)", "def load(file_name):\n try:\n return _load(file_name)\n except KaleMarshalException as e:\n log.error(e)\n log.debug(\"Original Traceback\", exc_info=e.__traceback__)\n utils.graceful_exit(1)", "def read_from_file(name):\n print 'reading structures from pickle'\n print '------------------------------'\n\n path = os.getcwd() + '/pickles/' + name + '.pkl'\n file = open(path, 'rb')\n new_obj = pickle.load(file)\n file.close()\n\n return new_obj", "def load(self,filename=None): # return True\r\n pass", "def pickle_load(file_name: str) -> Any:\n with open(file_name, 'rb') as file:\n return pickle.load(file)", "def pickle_loader(fileobj):\n if isinstance(fileobj, bytes):\n data = pickle.loads(fileobj, encoding=\"latin1\")\n elif isinstance(fileobj, six.string_types):\n with open(fileobj, 'rb') as f:\n data = pickle.load(f, encoding=\"latin1\")\n elif hasattr(fileobj, 'read'):\n data = pickle.load(fileobj, encoding=\"latin1\")\n else:\n raise ValueError('fileobj is not a filename or a file object')\n return data", "def load_from_disk(name):\n shortname = _dumpify(_compress_name(name) + '.pkl')\n print 'load_from_disk(%s)' % shortname\n pkl_file = open(shortname, 'rb')\n object = pickle.load(pkl_file)\n pkl_file.close()\n return object", "def pkl_load(name, path = 'obj'):\n if '.p' not in name:\n name = name + '.pkl'\n path = os.path.join(path, name)\n try:\n obj = pickle.load(open(path, 'rb'))\n except FileNotFoundError:\n obj = None\n return obj", "def _load_file(name):\n filename = 'ind.{}.{}'.format(dataset_name, name)\n filename = os.path.join(path, filename)\n with open(filename, 'rb') as f:\n if sys.version_info > (3, 0):\n return pickle.load(f, encoding='latin1') # pylint: disable=unexpected-keyword-arg\n else:\n return pickle.load(f)", "def load_from(filename):\n from .io import load\n return load(filename)", "def loadFromFile(self,filename):\n path = os.path.dirname(__file__)+\"/\"+filename\n if os.path.exists(path) and os.path.isfile(path):\n self.load(yaml.load(open(path, 'r')))", "def from_pickle(filename):\n\t\tif isinstance(filename, strcomp):\n\t\t\tif os.path.exists(filename):\n\t\t\t\ttry:\n\t\t\t\t\tfile = open(filename, \"rb\")\n\t\t\t\t\tobj = pickle.load(file)\n\t\t\t\t\tfile.close()\n\t\t\t\t\treturn obj\n\t\t\t\t\t# return pickle.load(open(filename, \"rb\"))\n\t\t\t\texcept:\n\t\t\t\t\traise IOError(\"Could not unpickle file: %s\" % (filename))\n\t\t\telse:\n\t\t\t\traise FileNotFoundError(\"File does not exist: %s\" % (filename))\n\t\telse:\n\t\t\traise TypeError(\"Must be of type str. Got: %s\" % (type(filename)))", "def load(self, filename=None):\n prefix = os.path.dirname(filename)\n if not os.path.exists(prefix):\n os.makedirs(prefix)\n\n name = filename or self.filename\n\n if os.path.exists(name):\n with open(name, 'rb') as dbfile:\n self.data = yaml.safe_load(dbfile) or dict()", "def load_dictionary(filename):\n filename = os.path.join(FILE_DIR, 'assets/obj/' + filename)\n try:\n with open(filename, 'rb') as input:\n return pickle.load(input)\n except Exception as e:\n print(\"exception\", e)", "def load_file(self, filename):\n with open(filename, \"rb\") as pickle_handle:\n return pickle.load(pickle_handle)", "def load(filename):\n with open(filename,'rb') as f:\n return pickle.load(self,f)", "def load(self, file_id):\n pass", "def read_object(filename: str) -> Any:\n with open(filename, 'rb') as read_file:\n obj = pickle.load(read_file)\n\n return obj", "def load_object(filename):\n\n with gzip.GzipFile(filename, 'rb') as source: result = source.read()\n ret = pickle.loads(result)\n source.close()\n\n return ret", "def load_model(file_name):\n with open(file_name, 'rb') as file:\n return pickle.load(file)", "def load_skel(self, file_name): \r\n\r\n fid = open(file_name, 'r')\r\n self.read_skel(fid)\r\n fid.close()\r\n self.name = file_name", "def load_object(filepath):\n with open(filepath, 'rb') as f:\n obj = pickle.load(f)\n return obj", "def load_pickle(filename):\n\n with open(filename, 'rb') as file:\n if filename.split('.')[-1] == 'dill':\n obj = dill.load(file)\n else:\n obj = pickle.load(file)\n return obj", "def load_object(path):\r\n with open(path,\"rb\") as f:\r\n object = pickle.load(f) \r\n return object", "def load (self, filename) :\n\t\tserialFile = open (filename, \"rb\")\n\t\tself.production_rules = pickle.load (serialFile)\n\t\tself.unitrelation = pickle.load (serialFile)\n\t\tself.labels = pickle.load (serialFile)\n\t\tself.keeper = pickle.load (serialFile)\n\t\tself.strnodes = pickle.load(serialFile)\n\t\tself.tokens = pickle.load (serialFile)\n\t\tserialFile.close()", "def load(self, filename):\n import pickle\n return pickle.load(open(filename, 'rb'))", "def __init__(self, file_name, load_uncertainty=False):\n if file_name[-3:] == 'npz':\n self._load_npz(file_name)\n else:\n self._load_3ddose(file_name, load_uncertainty)", "def readobject(filename):\n # import cPickle as pickle\n with open(filename, 'rb') as input_file:\n return pickle.load(input_file)", "def load(fname, objname=None):\r\n if not objname:\r\n objname = fname.split(\".\")[0]\r\n return f'\\ncmd.load(\"{fname}\", \"{objname}\")'", "def load_from_disk(self, file_name = \"vehicle_classifier.pkl\"):\n self.classifier.load_from_disk(file_name)", "def load(file_name):\n ferme_fenetre()\n Hitori(file_name)", "def load(self):\r\n self.read(self.filename)", "def loads(self, profile_name: Optional[str] = \"default\", **kwargs):\n bytes_pickle = self._decode_pickle(self.pickle_object)\n self.remote_object = cloudpickle.loads(bytes_pickle)\n self.remote_object.loads(profile_name, **kwargs)", "def loadPlayerFile (self):\n #print self.__filename\n if self.__filename == \"\":\n self.__setPlayerFilename()\n #print \"filename= \" + self.__filename \n try:\n #filename handled internally -- derive it from playerName\n# print self.__filename\n f = open(self.__filename, \"r\")\n tempIn = pickle.load(f)\n self.__playerName = tempIn.getPlayerName()\n self.setBestStepRun(tempIn.getBestStepRun())\n self.__songDictionary = tempIn.getAllSongs()\n self.setDifficulty(tempIn.getDifficulty())\n self.setHighScore(tempIn.getHighScore())\n self.setLevelReached(tempIn.getLevelReached())\n f.close() \n except IOError:\n raise PlayerIOError(\"Unable to read player info from file.\")", "def __init__(self, file_name=None):\n # deserialize\n if file_name:\n if os.path.isfile(file_name):\n self.__dict__ = load_json_object(file_name)\n else:\n raise IOError('The file {0} was not found.'.format(file_name))\n else:\n self.checking_entity = ''\n self.checking_level = '1'\n self.comments = ''\n self.contributors = ''\n self.publish_date = datetime.today().strftime('%Y-%m-%d')\n self.source_text = 'en'\n self.source_text_version = ''\n self.version = ''", "def load_pickle(filename):\n with open(filename, 'rb') as file:\n obj = pickle.load(file)\n return obj", "def load_obj(path):\n with open(path, 'rb') as f:\n return pickle.load(f)", "def load_pickle(filename):\n with open(filename, \"rb\") as f:\n obj = pickle.load(f)\n\n return obj", "def load(cls, filename):\n return cPickle.load(open(filename, \"rb\"))", "def load(self, file_name_with_path: str):\n\n if self.state._models is None:\n self.register_models()\n logger.info(\"Agent State loaded successfully\")\n for k, model in self.state._models.items():\n model.load(file_name_with_path=os.path.join(f'{file_name_with_path}_{model.name}.th'))\n logger.info(f'{file_name_with_path}_{model.name}.th loaded')\n logger.info(f\"{model.name} model loaded successfully\")\n self.state = Munch(json.load(open(file_name_with_path + \".meta\")))", "def load(self, filename=None):\n if filename is None:\n filename = BlockChainConf.DEFAULT_CHAIN_DUMP_FILENAME\n\n with open(filename, \"r\") as chain:\n chain = loads(chain)\n\n # verify the integrity of the chain\n # before simply assuming that it is\n # a valid one", "def load(self, file_name):\n self.file_name = file_name\n self.frd = FRDFile(file_name)\n self._build_node_kon()\n self._build_step_idx()", "def __init__(self, file_name=None):\n self.file_name = file_name\n self.frd = None\n self._steps = []\n if file_name is not None:\n self.load(file_name)", "def load(self, filename):\n\t\tif self.isInitialized():\n\t\t\tself.Loaded = self.loader.load(filename)", "def read_pickle(file_name):\n with open(file_name, 'rb') as f:\n obj = pickle.load(f)\n return obj", "def load(filename):\n file = gzip.GzipFile(filename, 'rb')\n buffer = \"\"\n while True:\n data = file.read()\n if data == \"\":\n break\n buffer += data\n object = pickle.loads(buffer)\n file.close()\n return object", "def load(self, sFilename):\n\n f = open(sFilename, \"r\")\n u = pickle.Unpickler(f)\n dObj = u.load()\n f.close()\n return dObj", "def load_obj(path: str):\n with open(path, 'rb') as h:\n return pickle.load(h)", "def load(cls, filename, format=None, mode='rb'):\n format = infer_format(filename, format)\n if not os.path.isfile(filename):\n raise RuntimeError(\"{0!r} not found.\".format(filename))\n if format == 'pkl.gz':\n f = gzip.open(filename, 'rb')\n data = pickle.loads(f.read())\n f.close()\n elif format == 'pkl':\n with io.open(filename, 'rb') as f:\n data = pickle.loads(f.read())\n x = cls(**data)\n return x", "def load(self, sFilename):\n\n f = open(sFilename, \"r\")\n u = pickle.Unpickler(f)\n dObj = u.load()\n f.close()\n return dObj", "def load(self, sFilename):\n\n f = open(sFilename, \"r\")\n u = pickle.Unpickler(f)\n dObj = u.load()\n f.close()\n return dObj", "def load(self, sFilename):\n\n f = open(sFilename, \"r\")\n u = pickle.Unpickler(f)\n dObj = u.load()\n f.close()\n return dObj", "def load_data_loader_from_file(cls, filename):\n print(\"Loading data loader from file: {}\".format(filename))\n\n with open(filename, \"rb\") as file:\n return pickle.load(file)", "def read_object_from_file(file_name):\n if os.path.exists(file_name) is False:\n print (\"Error read path: [%s]\" % file_name)\n return None\n with open(file_name, 'r') as f:\n try:\n obj = json.load(f)\n except Exception:\n print (\"Error json: [%s]\" % f.read()[0:10])\n return None\n return obj", "def loadVar(name):\n with open(name+'.pickle','rb') as fl:\n return pickle.load(fl)", "def open_pickle_file(file_name):\n print(\"Unpickling file \" + file_name)\n full_file_name = full_path(file_name)\n with open(full_file_name, mode='rb') as f:\n return pickle.load(f)", "def load(self, path):\n pass", "def load(self, path):\n pass", "def load_pkl(self, name, file_object=None):\n if file_object:\n f = file_object\n else:\n f = gzip.open(name, 'rb')\n temp = pickle.load(f)\n if temp.ht_version < HT_OLD_COMPAT_VERSION:\n raise ValueError('Version of ' + name + ' is ' + str(temp.ht_version)\n + ' which is not at least ' +\n str(HT_OLD_COMPAT_VERSION))\n # assert temp.ht_version >= HT_COMPAT_VERSION\n params = temp.params\n self.hashbits = temp.hashbits\n self.depth = temp.depth\n if hasattr(temp, 'maxtimebits'):\n self.maxtimebits = temp.maxtimebits\n else:\n self.maxtimebits = _bitsfor(temp.maxtime)\n if temp.ht_version < HT_COMPAT_VERSION:\n # Need to upgrade the database.\n print(\"Loading database version\", temp.ht_version,\n \"in compatibility mode.\")\n # Offset all the nonzero bins with one ID count.\n temp.table += np.array(1 << self.maxtimebits).astype(np.uint32) * (\n temp.table != 0)\n temp.ht_version = HT_VERSION\n self.table = temp.table\n self.ht_version = temp.ht_version\n self.counts = temp.counts\n self.names = temp.names\n self.hashesperid = np.array(temp.hashesperid).astype(np.uint32)\n self.dirty = False\n self.params = params", "def load_file(*args, **kwargs): # real signature unknown\n pass", "def __init__(self, name=None, pickle_file=None):\n self.pickle_file = pickle_file\n if pickle_file is not None:\n self.store = pickle.load(pickle_file)\n if not isinstance(self.store, dict):\n raise ValueError, \"pickle file '%s' does not contain a dict\" % pickle_file\n else:\n self.store = OrderedDict()", "def load_pickle(file_name):\n with open(file_name, \"rb\") as handle:\n pickle_file = pickle.load(handle)\n\n return pickle_file", "def load_model_custom(file, object):\n return getattr(load_module(file), object)", "def load(cls, from_file):\n raise NotImplementedError", "def load(path):\n pass", "def __init__(self, fname):\n self.fname = os.path.abspath(fname)\n self.restore()", "def load_model(self, file_name):\n with open(file_name, 'rb') as file:\n self.lin_reg = pickle.load(file)", "def load(filename):\n import pickle\n return pickle.load(open(filename, 'r'))", "def load(self, file_name):\n self.file_name = file_name\n\n with open(file_name, 'rb') as in_file:\n eof = (in_file.read(1) == b'')\n\n while not eof:\n key = int(in_file.read(4))\n code = in_file.read(1).decode()\n block = None\n if key == 1:\n block = FRDHeader(in_file, code)\n self.headers.append(block)\n elif key == 2:\n block = FRDNodeBlock(in_file)\n self.node_block = block\n elif key == 3:\n block = FRDElemBlock(in_file)\n self.elem_block = block\n elif key == 100:\n block = FRDResultBlock(in_file)\n self.result_blocks.append(block)\n elif key == 9999:\n eof = True\n if block is not None:\n self.blocks.append(block)\n eof = (eof or (in_file.read(1) == b''))", "def load_pickle(cls, filename_or_file_object):\n\n if hasattr(filename_or_file_object, 'read'):\n fi = filename_or_file_object\n else:\n fi = open(filename_or_file_object, 'r+b')\n rw_instance = RandomWriter()\n rw_instance.model.state_dict = pickle.load(fi)\n fi.close()\n return rw_instance", "def load(file_choice = file):\n\t\tif file_choice!=file:\n\t\t\tfile = file_choice\n\n\t\tpass", "def load(cls, filename, **kwargs):\n with open(filename, 'rb') as fin:\n self = pickle.load(fin, **kwargs)\n self._check_types()\n return self", "def load(self, filename = 'array_zest', path = '/home/eric/dev/insitu/data/zs_recovery/'):\n lpath_filename = path + filename + '.pkl'\n f = open(lpath_filename, 'rb')\n tmp_dict = pickle.load(f)\n f.close()\n self.__dict__.update(tmp_dict)" ]
[ "0.71736634", "0.7085416", "0.7077506", "0.7077506", "0.6953144", "0.6927678", "0.6907037", "0.68987334", "0.680562", "0.67826605", "0.67519313", "0.6732998", "0.67002213", "0.6668907", "0.66492176", "0.6630586", "0.6611128", "0.6594634", "0.6578726", "0.65453917", "0.6457661", "0.6454327", "0.64249873", "0.6400728", "0.6352962", "0.63399297", "0.6319636", "0.6319089", "0.63183004", "0.62781584", "0.626302", "0.62593627", "0.6239833", "0.6193089", "0.61876196", "0.6177095", "0.61648655", "0.6161924", "0.61233556", "0.61084485", "0.60928214", "0.6089966", "0.60659826", "0.6062951", "0.6060498", "0.60558593", "0.60522324", "0.6042083", "0.6037445", "0.60346776", "0.60274553", "0.60186446", "0.60146797", "0.60066587", "0.60066456", "0.600114", "0.5993893", "0.59929883", "0.5979292", "0.5973888", "0.5956971", "0.595451", "0.5942193", "0.5931464", "0.5926608", "0.5920549", "0.59026504", "0.5897527", "0.589661", "0.58863384", "0.58779114", "0.5869241", "0.58646035", "0.5863652", "0.5849412", "0.58492434", "0.58421713", "0.58421713", "0.58421713", "0.583984", "0.58296484", "0.58225566", "0.5821756", "0.5817701", "0.5817701", "0.5813841", "0.58133477", "0.5812771", "0.5792459", "0.5778623", "0.57779396", "0.5774723", "0.5774216", "0.57690537", "0.57661533", "0.5764232", "0.5757204", "0.5751945", "0.57487625", "0.57461095" ]
0.6612186
16
Print percentage of rows that have been processed.
def _print_stat_rows(title,rows_before,rows_after): self.strprint(str(title)+" : Percent of processed rows = %1.2F"\ %(np.abs(rows_before-rows_after)*100/rows_before))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def printProgress(self, percentage):\n #print '%s\\r' % ' '*20, # clean up row\n #print '%3d%% ' % percentage, # ending with comma prevents newline from being appended\n sys.stdout.flush()", "def PrintProgress(self):\n print ' Examined %d nodes, found %d unique...' % (\n self.nodes_examined, self.unique_nodes\n )", "def _print_progress(self):\n if self.current_training_size % 1000 == 0:\n print(self.current_training_size, end='')\n elif self.current_training_size % 100 == 0:\n print('.', end='')", "def report_progress(self):\r\n stats = self.simulation_stats.stats\r\n solutions = len(self.solutions)\r\n round = self.round\r\n scores = stats[round]\r\n best_score = min(scores)\r\n avg_score = sum(scores) / solutions\r\n line = f\"Round: {round}\\t\\tBest Score: {best_score}\\t\\t Average Score: {avg_score}\"\r\n print(line)", "def _log_progress(self):\n self.num_of_requests_in_pipeline += 1\n if self.num_of_requests_in_pipeline % 20 == 0:\n print('-' * 200)\n print(f'DB PIPELINE: {self.num_of_requests_in_pipeline} items wenth though pipeline.')\n print('-' * 200)", "def _progress(self, num_completed_batches, data_loader):\n return '[{}/{} ({:.0f}%)]'.format(num_completed_batches, len(data_loader),\n 100.0 * num_completed_batches / len(data_loader))", "def _print_progress(self):\n \n print 'Completed %d of %d' %(self.progress_id, self.total_work)\n self.progress_id += 1", "def print_progress(done,total):\n \n percent = 100.0*done/(total) \n bar = int(0.2*percent) \n \n sys.stdout.write('\\r')\n sys.stdout.write('[%-20s] %d%%' % ('='*bar, percent))\n sys.stdout.flush()\n \n return", "def _printProgressBar(self, fractionComplete):\n import sys\n nInc = 50\n count = int(nInc * fractionComplete)\n proBar = \"|\"\n for i in range(nInc):\n if i < count:\n proBar += \"-\"\n else:\n proBar += \" \"\n proBar += \"|\"\n print((proBar, int(fractionComplete * 100), \"%\\r\",))\n sys.stdout.flush()\n\n return", "def print_progress(self, info_dict):\n if self.n_print != 0:\n t = info_dict['t']\n if t == 1 or t % self.n_print == 0:\n string = 'Iteration {0}'.format(str(t).rjust(len(str(self.n_iter))))\n string += ' [{0}%]'.format(str(int(t / self.n_iter * 100)).rjust(3))\n print(string)", "def progress_msg(processed, total):\n if total > 1:\n percent = int((float(processed) / total) * 100)\n stderr.write(\"\\r[%d/%d] %d%%\" % (processed, total, percent))\n stderr.flush()", "def percent_processed(self):\n try:\n return (self.pos / self.data_encap.size) * 100.0\n except ZeroDivisionError:\n return 100.0", "def _printProgress(self, progress):\n if not self._quiet:\n sys.stdout.write('\\rWriting store to CSV: [{0:50s}] {1:.2f}% '.format('#' * int(progress * 50.0), progress * 100.0))\n sys.stdout.flush()", "def download_report_hook(count, block_size, total_size):\n percent = int(count * block_size * 100 / total_size)\n print(\"\\r%d%%\" % percent + \" completed\", end=\"\\r\")", "def download_report_hook(count, block_size, total_size):\n percent = int(count * block_size * 100 / total_size)\n print(\"\\r%d%%\" % percent + \" completed\", end=\"\\r\")", "def __show_progress(self, _cur_file_idx, _file_count):\n if (self.__is_show_proegress == False):\n return\n\n if(_file_count == 0):\n raise StandardError('no file found.')\n\n # show progress for each 5% (20 steps)\n digit = math.modf(math.log10(_file_count))[1]\n if(digit < 3):\n print \"prog: [{0}%] {1}/{2}\".format((100 * _cur_file_idx) /_file_count,\n _cur_file_idx, _file_count)\n else:\n digit = digit - 2\n skipstep10 = math.pow(10, digit)\n if ((_cur_file_idx % skipstep10) == 0):\n print \"prog: [{0}%] {1}/{2}\".format((100 * _cur_file_idx) /_file_count,\n _cur_file_idx, _file_count)", "def percentage(self):\n return sum(self.chunk_percentage) / self.total_steps", "def as_percent(self):\n if self.dynamic:\n self._update_db_obj()\n return f\"{(self._db_obj.progress / self._db_obj.total) * 100}%\"", "def _progressBar(self, percent, printEvery=10):\n floor = int(percent)\n sys.stdout.write('\\r' * (floor + 9))\n sys.stdout.write('[')\n sys.stdout.write('=' * (floor/printEvery))\n sys.stdout.write('>] {:02.2f}%'.format(percent))\n sys.stdout.flush()", "def repetitive(df):\r\n total_rows = df.shape[0] \r\n for col in df.columns:\r\n count = df[col].value_counts(dropna=False)\r\n high_percent = (count/total_rows).iloc[0] \r\n if high_percent > 0.95:\r\n print('{0}: {1:.1f}%'.format(col, high_percent*100))\r\n print(count)\r\n print()", "def print_progress(remaining_pairs, G):\n total_pairs = G.number_of_nodes()**2\n uncomputed_pairs = len(remaining_pairs)\n print(1 - uncomputed_pairs/total_pairs)", "def displaySummary(self, dictionnary, lineCount):\n for key in self.summaryDict.keys():\n dictionnary[key] = (dictionnary[key] / lineCount) * 100\n #print(str(key)+\" => \"+str(dictionnary[key])+\" %\")", "def occurance(row):\r\n # divide the row's highest counted cause by the row's total number of deaths\r\n percentage = row['max_count'] / row['all_count']\r\n percentage *= 100\r\n # round the percentage up so it's two digits\r\n return round(percentage)", "def dl_progress(count, block_size, total_size):\n percent = int(count*block_size*100/total_size)\n sys.stdout.write(\"\\r\" + 'Progress:' + \"...%d%%\" % percent)\n sys.stdout.flush()", "def print_results(self) -> None:\n print(\"=\" * 70, file=sys.stderr)\n total = 0.0\n max_points = 0.0\n for problem in self.problems:\n total += problem.run_tests()\n max_points += problem.max_grade\n print(f\"Total Grade: {total}/{max_points}\", file=sys.stderr)", "def _print_progress(self):\n print(\n 'E {} S {} TR {:6.2f} G {:6.2f} Reg {:6.5f} Loss {:6.5f} AvgQ {:6.2f}'\n ' MinR {:6.2f} MaxR {:6.2f}'.format(\n self.episode, self.episode_step, self.tracker.total_reward, self.tracker.discounted_rewards,\n self.reg_loss_val, self.critic_loss_val, self.mean_q_val,\n self.tracker.min_reward, self.tracker.max_reward))", "def update_progress(self, done):\r\n if done % 100 == 0:\r\n print >>sys.stderr, \" %d processed, run time %d secs\" % (done, (datetime.now() - self.started_at).seconds)", "def OnProgress(bytes_read, total_bytes, percent):\n sys.stdout.write(\"progress: %.2f%% \\r\" % (percent))\n sys.stdout.flush()", "def progress(count, total):\r\n bar_len = 45\r\n filled_len = int(round(bar_len * count / float(total)))\r\n\r\n percents = round(100 * count / float(total), 1)\r\n p_bar = '=' * filled_len + '.' * (bar_len - filled_len)\r\n try:\r\n sys.stdout.write(' File {} of {} [{}] {}{}\\r'.format(count, total, p_bar, percents, '%'))\r\n except:\r\n pass\r\n sys.stdout.flush()", "def _percent(self, lines_total, lines_covered):\n\n if lines_total == 0:\n return '0.0'\n return str(float(float(lines_covered) / float(lines_total)))", "def emit_status(self):\n next_job_count = len(self.fe.get_next_jobs())\n if next_job_count:\n emoji = \"🤔\"\n else:\n emoji = \"👌\"\n remaining = len(self.fe.get_current_network())\n\n pct = (self.total_job_count - remaining) / self.total_job_count\n print(\n f\"{emoji} ———— {next_job_count} jobs running, {remaining} remaining ({int(100*pct)}%). \",\n end=\"\\r\",\n )", "def calculate_progress_percentage(d):\n successcounter = 0\n for test in d:\n if d[test][\"status\"] != \"not yet run\":\n successcounter += 1\n totalcounter = 0\n for test in d:\n totalcounter += 1\n return int(successcounter / totalcounter * 100)", "def download_progress_hook(count, blockSize, totalSize):\n percent = int(count * blockSize * 100 / totalSize)\n\n global last_percent_reported\n if last_percent_reported != percent:\n if percent % 5 == 0:\n sys.stdout.write(\"%s%%\" % percent)\n sys.stdout.flush()\n else:\n sys.stdout.write(\".\")\n sys.stdout.flush()\n last_percent_reported = percent", "def download_progress_hook(count, blockSize, totalSize):\n global last_percent_reported\n percent = int(count * blockSize * 100 / totalSize)\n\n if last_percent_reported != percent:\n if percent % 5 == 0:\n sys.stdout.write(\"%s%%\" % percent)\n sys.stdout.flush()\n else:\n sys.stdout.write(\".\")\n sys.stdout.flush()\n\n last_percent_reported = percent", "def print_progress(self):\n print(\n '\\rE {} S {} TR {:6.2f} G {:6.2f} Loss {:6.5f} AvgQ {:6.2f}'\n ' MinR {:6.2f} MaxR {:6.2f}'.format(\n self.episode, self.episode_step,\n self.tracker.total_reward, self.tracker.discounted_rewards,\n self.loss_val, self.total_max_q / self.episode_step,\n self.tracker.min_reward, self.tracker.max_reward,\n end=\"\"))", "def _print_progress(counter):\n\tif(slogviz.config.interactive):\n\t\tprint('parse log file entry nr: {}'.format(counter),end='\\r')", "def calc_progress(self):\n if self.is_prepared():\n self._sync_info_from_disk()\n self._num_sown_batches = len(\n glob.glob(\n os.path.join(self.location, \"batches\", BTCH_NM.format(\"*\"))\n )\n )\n self._num_results = len(\n glob.glob(\n os.path.join(self.location, \"results\", RSLT_NM.format(\"*\"))\n )\n )\n else:\n self._num_sown_batches = -1\n self._num_results = -1", "def displayed_percent(self):\n return (self.displayed_words / self.total_words) * 100", "def progress_func(completed, total):\n if not self.log:\n return\n dots = (completed * dot_count) / total\n if dots > dot_count:\n dots = dot_count\n self.progress_lock.acquire()\n if self.dots_written < dot_count:\n dots_to_write = dots - self.dots_written\n self.dots_written = dots\n os.write(old_stdout, '.' * dots_to_write)\n self.progress_lock.release()", "def printReport(self):\n\t\tself.app.printflush('Fetched: ' + str(self.fetched_count), self.app.IGNORE_EXIT_FLAG)\n\t\tself.app.printflush('Processes: ' + str(self.processes), self.app.IGNORE_EXIT_FLAG)\n\t\tself.app.printflush('Updated: ' + str(self.updated_count), self.app.IGNORE_EXIT_FLAG)\n\t\tself.app.printflush('Average page load time: ' + str(self.average_time), self.app.IGNORE_EXIT_FLAG)\n\t\tself.app.printflush('Returned with code: ' + repr(self.code_statistics), self.app.IGNORE_EXIT_FLAG)\n\t\tself.app.printflush('Closing Processes... ', self.app.IGNORE_EXIT_FLAG)", "def progress(self) -> int:\n return int(round(100 * self.somme() / self.finances))", "def progress_iterator(collection: Collection, message: str) -> Iterable:\n num_items = len(collection)\n last_percentage = -1\n for i, item in enumerate(collection):\n percentage = 100 * i // num_items\n if percentage > last_percentage:\n last_percentage = percentage\n print(f\"{message} {percentage}%\", end='\\r')\n yield item\n print(f\"{message} 100%\")", "def summarize(data, verbal=False, using_files=True):\n\n if using_files:\n for file_name in tqdm(data):\n fill_table(pd.read_csv(file_name))\n else:\n for table in tqdm(data):\n fill_table(table)\n\n for cluster in table_summary:\n #total_genes = sum(table_summary[cluster][\"phylum\"].values) # number of genes\n #total_genes = table_summary[cluster][\"N\"] # number of samples\n total_genes = table_summary[cluster][\"eggNOG\"].eggNOG.sum() # number of genes in COGs with duplicates\n \n phylum_percent = table_summary[cluster][\"phylum\"].apply(lambda x: x/total_genes * 100)\n phylum_percent.columns = [\"percent\"]\n table_summary[cluster][\"phylum\"] = pd.concat([table_summary[cluster][\"phylum\"],phylum_percent],axis=1)\n\n #Read above for fix\n genus_percent = table_summary[cluster][\"genus\"].apply(lambda x: x/total_genes * 100)\n genus_percent.columns = [\"percent\"]\n table_summary[cluster][\"genus\"] = pd.concat([table_summary[cluster][\"genus\"],genus_percent],axis=1)\n\n #read above for fix\n cog_percent = table_summary[cluster][\"eggNOG\"].apply(lambda x: x/table_summary[cluster][\"gene_cog\"] * 100)\n cog_percent.columns = [\"percent\"]\n table_summary[cluster][\"eggNOG\"] = pd.concat([table_summary[cluster][\"eggNOG\"],cog_percent],axis=1)\n\n #Print the data\n if verbal:\n print \"Cluster %s:\\n\" % cluster\n print \"Number of Samples: %d\\n\" % table_summary[cluster][\"N\"]\n print \"Taxonomy:\"\n print table_summary[cluster][\"phylum\"].sort(\"percent\", ascending=False)\n print \"----------------------------------\"\n print table_summary[cluster][\"genus\"].sort(\"percent\", ascending=False)\n print \"-----------------------------------\"\n print \"COGS:\"\n print table_summary[cluster][\"eggNOG\"].sort(\"percent\", ascending=False)\n print \"------------------------------------\"\n print \"End Summary\"", "def printProgress(iteration, total, prefix = '', suffix = '', decimals = 2, barLength = 100):\n\tfilledLength\t= int(round(barLength * iteration / float(total)))\n\tpercents\t\t= round(100.00 * (iteration / float(total)), decimals)\n\tbar\t\t\t = '#' * filledLength + '-' * (barLength - filledLength)\n\tsys.stdout.write('%s [%s] %s%s %s (%s/%s total)\\r' % (prefix, bar, percents, '%', suffix, iteration, total))\n\tsys.stdout.flush()\n\tif iteration == total:\n\t\tprint(\"\\n\")", "def status_statement(current, final, count, chr=None):\n if current % int(final/count) == 0:\n if chr is None:\n print(\"Processed %i out of %i\" % (current, final))\n else:\n print(\"Processed %i out of %i in %s\" % (current, final, chr))", "def percentage(count, total):\n return count / total * 100", "def overall_progress(app_id):\r\n sql = text('''SELECT task.id, n_answers,\r\n COUNT(task_run.task_id) AS n_task_runs\r\n FROM task LEFT OUTER JOIN task_run ON task.id=task_run.task_id\r\n WHERE task.app_id=:app_id GROUP BY task.id''')\r\n results = db.engine.execute(sql, app_id=app_id)\r\n n_expected_task_runs = 0\r\n n_task_runs = 0\r\n for row in results:\r\n tmp = row[2]\r\n if row[2] > row[1]:\r\n tmp = row[1]\r\n n_expected_task_runs += row[1]\r\n n_task_runs += tmp\r\n pct = float(0)\r\n if n_expected_task_runs != 0:\r\n pct = float(n_task_runs) / float(n_expected_task_runs)\r\n return (pct * 100)", "def print_output(self):\n print(\"Reference score: \" + str(self.PotTax_reference.sum().TFI))\n print(\"Intervention score: \" + str(self.PotTax_intervention.sum().TFI))\n return", "def print_leaf(self, counts):\n total = sum(counts.values()) * 1.0\n probs = {}\n for lbl in counts.keys():\n probs[lbl] = str(int(counts[lbl] / total * 100)) + \"%\"\n return probs", "def print_acc(class_matrix):\n total = 0\n num_index = len(class_matrix)\n for i in range(num_index):\n total += class_matrix[i][i]\n print(\"Accuracy: {0}%\".format(100 * total/np.sum(class_matrix)))", "def update_percent(self):", "def percent_done(self) -> int:\n percent = (self.downloaded_images/self.total_images) * 100\n return int(percent)", "def determine_progress_value(rows, total_rows, percent_1, percent_5, percent_10, percent_20, percent_30, percent_40,\n percent_50,\n percent_60, percent_70, percent_80, percent_90, percent_100):\n\n if rows >= total_rows * 1.0 and not percent_100:\n for idx2, inst in enumerate(progress):\n if trsfrm_no in inst.keys():\n progress[idx2][trsfrm_no][\"currVal\"] = 100\n percent_100 = True\n elif rows >= total_rows * 0.9 and not percent_90:\n for idx2, inst in enumerate(progress):\n if trsfrm_no in inst.keys():\n progress[idx2][trsfrm_no][\"currVal\"] = 90\n percent_90 = True\n elif rows >= total_rows * 0.8 and not percent_80:\n for idx2, inst in enumerate(progress):\n if trsfrm_no in inst.keys():\n progress[idx2][trsfrm_no][\"currVal\"] = 80\n percent_80 = True\n elif rows >= total_rows * 0.7 and not percent_70:\n for idx2, inst in enumerate(progress):\n if trsfrm_no in inst.keys():\n progress[idx2][trsfrm_no][\"currVal\"] = 70\n percent_70 = True\n elif rows >= total_rows * 0.6 and not percent_60:\n for idx2, inst in enumerate(progress):\n if trsfrm_no in inst.keys():\n progress[idx2][trsfrm_no][\"currVal\"] = 60\n percent_60 = True\n elif rows >= total_rows * 0.5 and not percent_50:\n for idx2, inst in enumerate(progress):\n if trsfrm_no in inst.keys():\n progress[idx2][trsfrm_no][\"currVal\"] = 50\n percent_50 = True\n elif rows >= total_rows * 0.4 and not percent_40:\n for idx2, inst in enumerate(progress):\n if trsfrm_no in inst.keys():\n progress[idx2][trsfrm_no][\"currVal\"] = 40\n percent_40 = True\n elif rows >= total_rows * 0.3 and not percent_30:\n for idx2, inst in enumerate(progress):\n if trsfrm_no in inst.keys():\n progress[idx2][trsfrm_no][\"currVal\"] = 30\n percent_30 = True\n elif rows >= total_rows * 0.2 and not percent_20:\n for idx2, inst in enumerate(progress):\n if trsfrm_no in inst.keys():\n progress[idx2][trsfrm_no][\"currVal\"] = 20\n percent_20 = True\n elif rows >= total_rows * 0.1 and not percent_10:\n for idx2, inst in enumerate(progress):\n if trsfrm_no in inst.keys():\n progress[idx2][trsfrm_no][\"currVal\"] = 10\n percent_10 = True\n elif rows >= total_rows * 0.05 and not percent_5:\n for idx2, inst in enumerate(progress):\n if trsfrm_no in inst.keys():\n progress[idx2][trsfrm_no][\"currVal\"] = 5\n percent_5 = True\n elif rows >= total_rows * 0.01 and not percent_1:\n for idx2, inst in enumerate(progress):\n if trsfrm_no in inst.keys():\n progress[idx2][trsfrm_no][\"currVal\"] = 1\n percent_1 = True\n\n return percent_1, percent_5, percent_10, percent_20, percent_30, percent_40, percent_50, percent_60, percent_70, percent_80, percent_90, percent_100", "def percent(self):\r\n return self._percent", "def display_results_line(stats):\n # Line output.\n template = ' %5d |%6.2f |%6.2f %6.2f %6.2f |%3d %3d %3d'\n\n num_bytes = stats['data_size']\n\n P_times = stats['P_times']\n val = [num_bytes]\n for p in P_times:\n val.append(p*1000.)\n\n val.append(stats['count_lost'])\n val.append(stats['count_timeout'])\n val.append(stats['count_corrupt'])\n val = tuple(val)\n\n print(template % val)", "def get_progress(count, block_size, total_size) -> None:\r\n percent = int(count * block_size * 100 / total_size)\r\n print(f\"Downloading clip... {percent}%\", end=\"\\r\", flush=True)", "def statistics(self, **kwargs) -> None:\n print(\n tabulate.tabulate(\n list(self._iter_statistics(**kwargs)),\n headers=[\"path\", \"type\", \"occurences\", \"%\"],\n floatfmt=\".3f\",\n )\n )", "def printProgress (iteration, total, msg, suffix = '', decimals = 0):\n barLength = 50\n formatStr = \"{0:.\" + str(decimals) + \"f}\"\n percents = formatStr.format(100 * (iteration / float(total)))\n filledLength = int(round(barLength * iteration / float(total)))\n bar = '=' * filledLength + '-' * (barLength - filledLength)\n\n\n # sys.stdout.write('[ %s %s%s %s \\r%s' % ( msg, bar, percents, '%', suffix))\n sys.stdout.write('{:<30}[ {:<50} ]{}% \\r'.format(msg,bar,percents))\n if iteration == total:\n sys.stdout.write('\\n')\n sys.stdout.flush()", "def print_avg():", "def printProgressBar (self,iteration, total, prefix = '', suffix = '', decimals = 1, length = 100, fill = '█'):\n percent = (\"{0:.\" + str(decimals) + \"f}\").format(100 * (iteration / float(total)))\n filledLength = int(length * iteration // total)\n bar = fill * filledLength + '-' * (length - filledLength)\n print('\\r%s |%s| %s%% %s' % (prefix, bar, percent, suffix), end = '\\r')\n # Print New Line on Complete\n if iteration == total: \n print()", "def task_progress(project):\n complete = Task.objects.filter(project=project, status='C').count()\n total = Task.objects.filter(project=project).count()\n if total == 0:\n return 0\n\n return round(complete/total * 100, 2)", "def cb(complete,total):\n percent = int(complete * 100.0 / total)\n log.info(\"Download completion: {0}%\".format(percent))", "def printProgress (iteration, total, prefix = '', suffix = '', decimals = 2, barLength = 100):\n filledLength = int(round(barLength * iteration / float(total)))\n percents = round(100.00 * (iteration / float(total)), decimals)\n bar = '#' * filledLength + '-' * (barLength - filledLength)\n Sys.stdout.write('%s [%s] %s%s %s\\r' % (prefix, bar, percents, '%', suffix)),\n Sys.stdout.flush()\n if iteration == total:\n print(\"\\n\")", "def printProgress (iteration, total, prefix = '', suffix = '', decimals = 2, barLength = 100):\n filledLength = int(round(barLength * iteration / float(total)))\n percents = round(100.00 * (iteration / float(total)), decimals)\n bar = '#' * filledLength + '-' * (barLength - filledLength)\n Sys.stdout.write('%s [%s] %s%s %s\\r' % (prefix, bar, percents, '%', suffix)),\n Sys.stdout.flush()\n if iteration == total:\n print(\"\\n\")", "def printProgress (iteration, total, prefix = '', suffix = '', decimals = 1, barLength = 100):\n filledLength = int(round(barLength * iteration / float(total)))\n percents = round(100.00 * (iteration / float(total)), decimals)\n bar = '#' * filledLength + '-' * (barLength - filledLength)\n sys.stdout.write('%s [%s] %s%s %s\\r' % (prefix, bar, percents, '%', suffix)),\n sys.stdout.flush()\n if iteration == total:\n print(\"\\n\")", "def fetch_progress(self):\n threads = len(opts.thread)\n files = len(self.files)\n t_width = len(str(threads))\n f_width = len(str(files))\n\n t_progress = f\"[{self.pos: >{t_width}}/{threads}]\"\n f_progress = f\"[{self.count: >{f_width}}/{files}]\"\n\n if self.count:\n progress = f\"{t_progress} {f_progress}\"\n else:\n progress = t_progress\n\n return progress", "def report_percent_complete(self):\n meta_str = 'Bundling percent complete: ' + \\\n str(int(self.percent_complete))\n\n TaskComm.set_state('PROGRESS', meta_str)", "def printProgress (iteration, total, prefix = '', suffix = '', decimals = 2, barLength = 100):\n filledLength = int(round(barLength * iteration / float(total)))\n percents = round(100.00 * (iteration / float(total)), decimals)\n bar = '#' * filledLength + '-' * (barLength - filledLength)\n sys.stdout.write('%s [%s] %s%s %s\\r' % (prefix, bar, percents, '%', suffix)),\n sys.stdout.flush()\n if iteration == total:\n print(\"\\n\")", "def dump(self):\n fmt='%20s:%10.4fs%6.1f%%'\n print('\\n----------------TIME MANAGER PROFILE----------------\\n\\n')\n total_t=time.time()-self.tic0\n for rec in self.record:\n print(fmt % (rec[0],rec[1],100.0*rec[1]/total_t))\n print(fmt % ('TOTAL ELAPSED TIME', total_t, 100.0))\n print('\\n----------------TIME MANAGER PROFILE----------------\\n\\n')", "def prograssBar(val, final):\n end = \"\"\n maxlen = 50\n step = final // maxlen\n\n print(\"\\r[ \" + \"#\" * (val // step) + \" ] \" +\n str(int(val * 100.0 / final)) + \"% \", end=end)", "def print_rows():\n do_four(print_row)", "def print_rows():\n do_four(print_row)", "def __call__(self, param):\n count = param.nbatch\n filled_len = int(round(self.bar_len * count / float(self.total)))\n percents = math.ceil(100.0 * count / float(self.total))\n prog_bar = '=' * filled_len + '-' * (self.bar_len - filled_len)\n logging.info('[%s] %s%s\\r', prog_bar, percents, '%')", "def progress(i, my_list, message=\"\"):\n\tmy_progress = (i / len(my_list)) * 100\n\tmy_progress = str(round(my_progress, 1)) + \"% \" + message\n\tsys.stdout.write('\\r')\n\tsys.stdout.write(my_progress)\n\tsys.stdout.flush()", "def run(self):\n for i,p in enumerate(self.pairs):\n self.forPointPair(i)\n if i % 100000 == 0:\n print('Percentage Processed: ' + str(round(i * 100 / len(self.pairs), 3)) + '. Existing Cluster Labels: ', len(np.unique(self.labels)))", "def print_scores(self):\n ### FILL IN ###", "def printProgress(iteration, total, prefix = '', suffix = '', decimals = 1, barLength = 100):\n formatStr = \"{0:.\" + str(decimals) + \"f}\"\n percent = formatStr.format(100 * (iteration / float(total)))\n filledLength = int(round(barLength * iteration / float(total)))\n bar = '#'* filledLength + '-' * (barLength - filledLength)\n sys.stdout.write('\\r%s |%s| %s%s %s' % (prefix, bar, percent, '%', suffix)),\n if iteration == total:\n sys.stdout.write('\\n')\n sys.stdout.flush()", "def printProgress (iteration, total, prefix = '', suffix = '', decimals = 1, barLength = 100):\n formatStr = \"{0:.\" + str(decimals) + \"f}\"\n percents = formatStr.format(100 * (iteration / float(total)))\n filledLength = int(round(barLength * iteration / float(total)))\n bar = '*' * filledLength + '-' * (barLength - filledLength)\n sys.stdout.write('\\r%s |%s| %s%s %s' % (prefix, bar, percents, '%', suffix)),\n if iteration == total:\n sys.stdout.write('\\n')\n sys.stdout.flush()", "def percentage_progress(self):\n\n if self.total_progress != 0:\n return float(self.progress) / self.total_progress\n return self.progress", "def percentage_progress(self):\n\n if self.total_progress != 0:\n return float(self.progress) / self.total_progress\n return self.progress", "def print_chunk_progress(self, actions):\r\n for action in actions:\r\n if (self._cur_print % constants.ES_BULK_CHUNK_SIZE == 0) & (self._cur_print > 0):\r\n print(\"{0} emails converted. Starting bulk import (chunk size: {1})...\".format(self._cur_print,\r\n constants.ES_BULK_CHUNK_SIZE))\r\n self._cur_print += 1\r\n yield action", "def cb(complete, total):\n percent = int(complete * 100.0 / total)\n log.info(\"Download completion: {0}%\".format(percent))", "def _printProgressBar (iteration, total, prefix = '', suffix = '', decimals = 1, length = 100, fill = '$'):\r\n percent = (\"{0:.\" + str(decimals) + \"f}\").format(100 * (iteration / float(total)))\r\n filledLength = int(length * iteration // total)\r\n bar = fill * filledLength + '-' * (length - filledLength)\r\n sys.stdout.write('\\r{} |{}| {}% {}'.format(prefix, bar, percent, suffix))\r\n # Print New Line on Complete\r\n if iteration == total: \r\n print()", "def printProgress(iteration, total, prefix='', suffix='', decimals=1, barLength=100):\n import sys\n formatStr = \"{0:.\" + str(decimals) + \"f}\"\n percent = formatStr.format(100 * (iteration / float(total)))\n filledLength = int(round(barLength * iteration / float(total)))\n bar = '#' * filledLength + '-' * (barLength - filledLength)\n sys.stdout.write('\\r%s |%s| %s%s %s ' %\n (prefix, bar, percent, '%', suffix)),\n if iteration == total:\n sys.stdout.write('\\n')\n sys.stdout.flush()", "def print_progression(self, n):\n print(\" \".join(str(round(next(self), 5)) for i in range(n)))", "def report(self,f):\n f.write(\"sectorsize: %d\\n\" % self.sectorsize)\n for run in sorted(self.db):\n f.write(\" [@%8d ; %8d]\\n\" % (run.img_offset,run.bytes))\n f.write(\"total entries in database: %d\\n\\n\" % len(r))", "def get_percent_completed(self):\n completed = self.object_list.filter(status__exact=True).count()\n total = len(self.object_list)\n return int(100 * completed / total) if total > 0 else 0", "def progress(loss, epoch, batch, batch_size, dataset_size):\n batches = math.ceil(float(dataset_size) / batch_size)\n count = batch * batch_size\n bar_len = 40\n filled_len = int(round(bar_len * count / float(dataset_size)))\n\n bar = '=' * filled_len + '-' * (bar_len - filled_len)\n\n status = 'Epoch {}, Loss: {:.4f}'.format(epoch, loss)\n _progress_str = \"\\r \\r [{}] ...{}\".format(bar, status)\n sys.stdout.write(_progress_str)\n sys.stdout.flush()\n\n if batch == batches:\n print()", "def print_batch_stats(self):\n\n # current epoch time, numfiles, numbytes, trans secs, status\n print(f\"TRANS_STATS_BATCH: {time.time()} {self.batchvals['transfer_name']} {self.batchvals['numfiles']} {self.filevals['totbytes']} {self.filevals['end_time'] - self.filevals['start_time']} {self.filevals['status']}\")", "def final_report(self):\n print('Final Count for', self.reason, self.successes, 'of', self.tests, 'tests passed')", "def printProgress (iteration, total, prefix = '', suffix = '', decimals = 1, barLength = 100):\n formatStr = \"{0:.\" + str(decimals) + \"f}\"\n percents = formatStr.format(100 * (iteration / float(total)))\n filledLength = int(round(barLength * iteration / float(total)))\n bar = '█' * filledLength + '-' * (barLength - filledLength)\n sys.stdout.write('\\r%s |%s| %s%s %s' % (prefix, bar, percents, '%', suffix)),\n if iteration == total:\n sys.stdout.write('\\n')\n sys.stdout.flush()", "def print_progress(self, i, current_params):\n for split in range(1,11):\n if i == (round(self.iterations/10*split)-1):\n post = -self.full_neg_posterior(current_params)\n approx = self.create_normal_logq(current_params)\n diff = post - approx\n if not self.quiet_progress:\n print(str(split) + \"0% done : ELBO is \" + str(diff) + \", p(y,z) is \" + str(post) + \", q(z) is \" + str(approx))", "def _dl_progress_bar(self):\n if not self.show_progress:\n return\n\n if self.file_size:\n ratio = float(self.bytes_read) / self.file_size\n else:\n ratio = 1\n percent = int(ratio * 100)\n\n bar_len = 60\n done = int(bar_len * ratio)\n bar = ('=' * done) + (' ' * (bar_len - done))\n\n progress = '{percent: >3}%: [{bar}]'.format(percent=percent, bar=bar)\n backspace = '\\b' * len(progress)\n print(backspace + '\\r', end='')\n print(progress, end='')", "def calc_total_rows(self):\n #total_rows = len(self.file_list) - 1 # Minus header\n print('Total number of rows: ' + str(self.tot_rows))\n results.append('Total number of rows: ' + str(self.tot_rows))", "def progress(self):\n percent = self._infos.get(BulkInsertState.IMPORT_PROGRESS, \"0\")\n return int(percent)", "def show_data():\n with open(\"ScansforStudents.csv\", \"rU\") as csvfile:\n reader = csv.reader(csvfile, delimiter = ',', quotechar = '|')\n k = 0\n for row in reader:\n print(row)\n if k == 100:\n break\n k += 1", "def response_count_percentage(this_count):\n num_targets = db.session.query(ColourTargetColBG.id).count()\n return (this_count / num_targets) * 100.0", "def print_progress(self, i, current_params):\n for split in range(1,11):\n if i == (round(self.iterations/10*split)-1):\n post = -self.neg_posterior(current_params)\n approx = self.create_normal_logq(current_params)\n diff = post - approx\n if not self.quiet_progress:\n print(str(split) + \"0% done : ELBO is \" + str(diff) + \", p(y,z) is \" + str(post) + \", q(z) is \" + str(approx))", "def round_statistics(self):\n n_query = self.dbm.get_value(\"SELECT COUNT(*) FROM %s\" % self.query_train_table)\n n_keyword = self.dbm.get_value(\"SELECT COUNT(*) FROM keyword\")\n n_product = self.dbm.get_value(\"SELECT COUNT(DISTINCT product) FROM keyword_product_weight\")\n n_relation = self.dbm.get_value(\"SELECT COUNT(*) FROM keyword_product_weight\")\n\n self._round_results.append((n_query, self._not_enough_recs, n_keyword, n_product, n_relation))\n\n if config.verbose:\n print 'Round statistics: query: %d (not enough %d), keyword: %d, product: %d, relation: %d, A/M: %.2f%%' % (n_query, self._not_enough_recs, n_keyword, n_product, n_relation, 100.0*n_relation / (n_keyword*n_product))", "def printProgress (iteration, total, prefix = '', suffix = '', decimals = 1, barLength = 100, fill = '█'):\n percent = (\"{0:.\" + str(decimals) + \"f}\").format(100 * (iteration / float(total)))\n filledLength = int(barLength * iteration // total)\n bar = fill * filledLength + '-' * (barLength - filledLength)\n sys.stdout.write('\\r%s |%s| %s%s %s' % (prefix, bar, percent, '%', suffix)),\n if iteration == total:\n sys.stdout.write('\\n')\n sys.stdout.flush()" ]
[ "0.7295432", "0.690456", "0.6810523", "0.65595317", "0.64016294", "0.63650185", "0.63531333", "0.6352935", "0.6349985", "0.6310579", "0.6283685", "0.6247981", "0.6222485", "0.6173231", "0.61398053", "0.61388904", "0.61365205", "0.6132179", "0.61136687", "0.61033106", "0.6093487", "0.60699356", "0.6062022", "0.6045899", "0.6032401", "0.60060966", "0.60033005", "0.5999316", "0.59792435", "0.5973629", "0.59702843", "0.59191614", "0.59009856", "0.58947724", "0.5875985", "0.58543", "0.5846072", "0.58409685", "0.5840747", "0.58266807", "0.5823062", "0.58207005", "0.5812126", "0.57898015", "0.57810867", "0.57757926", "0.57676214", "0.57607377", "0.5753739", "0.5748765", "0.5746946", "0.5726183", "0.5722778", "0.5721043", "0.57160014", "0.5713159", "0.5712366", "0.57102334", "0.57090294", "0.5708932", "0.5707982", "0.5705203", "0.5691287", "0.5691287", "0.5689891", "0.5682899", "0.5682821", "0.5682739", "0.56805474", "0.5668209", "0.5664315", "0.5664315", "0.5661791", "0.56598896", "0.56596434", "0.5658683", "0.5656253", "0.5654978", "0.5653916", "0.5653916", "0.5652019", "0.5649998", "0.5647425", "0.5645385", "0.5641", "0.5640379", "0.56394637", "0.5636751", "0.5633389", "0.5632225", "0.5631828", "0.56184924", "0.5617016", "0.56110996", "0.5608741", "0.5606415", "0.56040037", "0.5600113", "0.55977774", "0.5593987" ]
0.74636936
0
Print percentage of rows that have been processed.
def print_stat_rows(title,rows_before,rows_after): _print_stat_rows(title,rows_before,rows_after)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _print_stat_rows(title,rows_before,rows_after):\n self.strprint(str(title)+\" : Percent of processed rows = %1.2F\"\\\n %(np.abs(rows_before-rows_after)*100/rows_before))", "def printProgress(self, percentage):\n #print '%s\\r' % ' '*20, # clean up row\n #print '%3d%% ' % percentage, # ending with comma prevents newline from being appended\n sys.stdout.flush()", "def PrintProgress(self):\n print ' Examined %d nodes, found %d unique...' % (\n self.nodes_examined, self.unique_nodes\n )", "def _print_progress(self):\n if self.current_training_size % 1000 == 0:\n print(self.current_training_size, end='')\n elif self.current_training_size % 100 == 0:\n print('.', end='')", "def report_progress(self):\r\n stats = self.simulation_stats.stats\r\n solutions = len(self.solutions)\r\n round = self.round\r\n scores = stats[round]\r\n best_score = min(scores)\r\n avg_score = sum(scores) / solutions\r\n line = f\"Round: {round}\\t\\tBest Score: {best_score}\\t\\t Average Score: {avg_score}\"\r\n print(line)", "def _log_progress(self):\n self.num_of_requests_in_pipeline += 1\n if self.num_of_requests_in_pipeline % 20 == 0:\n print('-' * 200)\n print(f'DB PIPELINE: {self.num_of_requests_in_pipeline} items wenth though pipeline.')\n print('-' * 200)", "def _progress(self, num_completed_batches, data_loader):\n return '[{}/{} ({:.0f}%)]'.format(num_completed_batches, len(data_loader),\n 100.0 * num_completed_batches / len(data_loader))", "def _print_progress(self):\n \n print 'Completed %d of %d' %(self.progress_id, self.total_work)\n self.progress_id += 1", "def print_progress(done,total):\n \n percent = 100.0*done/(total) \n bar = int(0.2*percent) \n \n sys.stdout.write('\\r')\n sys.stdout.write('[%-20s] %d%%' % ('='*bar, percent))\n sys.stdout.flush()\n \n return", "def _printProgressBar(self, fractionComplete):\n import sys\n nInc = 50\n count = int(nInc * fractionComplete)\n proBar = \"|\"\n for i in range(nInc):\n if i < count:\n proBar += \"-\"\n else:\n proBar += \" \"\n proBar += \"|\"\n print((proBar, int(fractionComplete * 100), \"%\\r\",))\n sys.stdout.flush()\n\n return", "def print_progress(self, info_dict):\n if self.n_print != 0:\n t = info_dict['t']\n if t == 1 or t % self.n_print == 0:\n string = 'Iteration {0}'.format(str(t).rjust(len(str(self.n_iter))))\n string += ' [{0}%]'.format(str(int(t / self.n_iter * 100)).rjust(3))\n print(string)", "def progress_msg(processed, total):\n if total > 1:\n percent = int((float(processed) / total) * 100)\n stderr.write(\"\\r[%d/%d] %d%%\" % (processed, total, percent))\n stderr.flush()", "def percent_processed(self):\n try:\n return (self.pos / self.data_encap.size) * 100.0\n except ZeroDivisionError:\n return 100.0", "def _printProgress(self, progress):\n if not self._quiet:\n sys.stdout.write('\\rWriting store to CSV: [{0:50s}] {1:.2f}% '.format('#' * int(progress * 50.0), progress * 100.0))\n sys.stdout.flush()", "def download_report_hook(count, block_size, total_size):\n percent = int(count * block_size * 100 / total_size)\n print(\"\\r%d%%\" % percent + \" completed\", end=\"\\r\")", "def download_report_hook(count, block_size, total_size):\n percent = int(count * block_size * 100 / total_size)\n print(\"\\r%d%%\" % percent + \" completed\", end=\"\\r\")", "def __show_progress(self, _cur_file_idx, _file_count):\n if (self.__is_show_proegress == False):\n return\n\n if(_file_count == 0):\n raise StandardError('no file found.')\n\n # show progress for each 5% (20 steps)\n digit = math.modf(math.log10(_file_count))[1]\n if(digit < 3):\n print \"prog: [{0}%] {1}/{2}\".format((100 * _cur_file_idx) /_file_count,\n _cur_file_idx, _file_count)\n else:\n digit = digit - 2\n skipstep10 = math.pow(10, digit)\n if ((_cur_file_idx % skipstep10) == 0):\n print \"prog: [{0}%] {1}/{2}\".format((100 * _cur_file_idx) /_file_count,\n _cur_file_idx, _file_count)", "def percentage(self):\n return sum(self.chunk_percentage) / self.total_steps", "def as_percent(self):\n if self.dynamic:\n self._update_db_obj()\n return f\"{(self._db_obj.progress / self._db_obj.total) * 100}%\"", "def _progressBar(self, percent, printEvery=10):\n floor = int(percent)\n sys.stdout.write('\\r' * (floor + 9))\n sys.stdout.write('[')\n sys.stdout.write('=' * (floor/printEvery))\n sys.stdout.write('>] {:02.2f}%'.format(percent))\n sys.stdout.flush()", "def repetitive(df):\r\n total_rows = df.shape[0] \r\n for col in df.columns:\r\n count = df[col].value_counts(dropna=False)\r\n high_percent = (count/total_rows).iloc[0] \r\n if high_percent > 0.95:\r\n print('{0}: {1:.1f}%'.format(col, high_percent*100))\r\n print(count)\r\n print()", "def print_progress(remaining_pairs, G):\n total_pairs = G.number_of_nodes()**2\n uncomputed_pairs = len(remaining_pairs)\n print(1 - uncomputed_pairs/total_pairs)", "def displaySummary(self, dictionnary, lineCount):\n for key in self.summaryDict.keys():\n dictionnary[key] = (dictionnary[key] / lineCount) * 100\n #print(str(key)+\" => \"+str(dictionnary[key])+\" %\")", "def occurance(row):\r\n # divide the row's highest counted cause by the row's total number of deaths\r\n percentage = row['max_count'] / row['all_count']\r\n percentage *= 100\r\n # round the percentage up so it's two digits\r\n return round(percentage)", "def dl_progress(count, block_size, total_size):\n percent = int(count*block_size*100/total_size)\n sys.stdout.write(\"\\r\" + 'Progress:' + \"...%d%%\" % percent)\n sys.stdout.flush()", "def print_results(self) -> None:\n print(\"=\" * 70, file=sys.stderr)\n total = 0.0\n max_points = 0.0\n for problem in self.problems:\n total += problem.run_tests()\n max_points += problem.max_grade\n print(f\"Total Grade: {total}/{max_points}\", file=sys.stderr)", "def _print_progress(self):\n print(\n 'E {} S {} TR {:6.2f} G {:6.2f} Reg {:6.5f} Loss {:6.5f} AvgQ {:6.2f}'\n ' MinR {:6.2f} MaxR {:6.2f}'.format(\n self.episode, self.episode_step, self.tracker.total_reward, self.tracker.discounted_rewards,\n self.reg_loss_val, self.critic_loss_val, self.mean_q_val,\n self.tracker.min_reward, self.tracker.max_reward))", "def update_progress(self, done):\r\n if done % 100 == 0:\r\n print >>sys.stderr, \" %d processed, run time %d secs\" % (done, (datetime.now() - self.started_at).seconds)", "def OnProgress(bytes_read, total_bytes, percent):\n sys.stdout.write(\"progress: %.2f%% \\r\" % (percent))\n sys.stdout.flush()", "def progress(count, total):\r\n bar_len = 45\r\n filled_len = int(round(bar_len * count / float(total)))\r\n\r\n percents = round(100 * count / float(total), 1)\r\n p_bar = '=' * filled_len + '.' * (bar_len - filled_len)\r\n try:\r\n sys.stdout.write(' File {} of {} [{}] {}{}\\r'.format(count, total, p_bar, percents, '%'))\r\n except:\r\n pass\r\n sys.stdout.flush()", "def _percent(self, lines_total, lines_covered):\n\n if lines_total == 0:\n return '0.0'\n return str(float(float(lines_covered) / float(lines_total)))", "def emit_status(self):\n next_job_count = len(self.fe.get_next_jobs())\n if next_job_count:\n emoji = \"🤔\"\n else:\n emoji = \"👌\"\n remaining = len(self.fe.get_current_network())\n\n pct = (self.total_job_count - remaining) / self.total_job_count\n print(\n f\"{emoji} ———— {next_job_count} jobs running, {remaining} remaining ({int(100*pct)}%). \",\n end=\"\\r\",\n )", "def calculate_progress_percentage(d):\n successcounter = 0\n for test in d:\n if d[test][\"status\"] != \"not yet run\":\n successcounter += 1\n totalcounter = 0\n for test in d:\n totalcounter += 1\n return int(successcounter / totalcounter * 100)", "def download_progress_hook(count, blockSize, totalSize):\n percent = int(count * blockSize * 100 / totalSize)\n\n global last_percent_reported\n if last_percent_reported != percent:\n if percent % 5 == 0:\n sys.stdout.write(\"%s%%\" % percent)\n sys.stdout.flush()\n else:\n sys.stdout.write(\".\")\n sys.stdout.flush()\n last_percent_reported = percent", "def download_progress_hook(count, blockSize, totalSize):\n global last_percent_reported\n percent = int(count * blockSize * 100 / totalSize)\n\n if last_percent_reported != percent:\n if percent % 5 == 0:\n sys.stdout.write(\"%s%%\" % percent)\n sys.stdout.flush()\n else:\n sys.stdout.write(\".\")\n sys.stdout.flush()\n\n last_percent_reported = percent", "def print_progress(self):\n print(\n '\\rE {} S {} TR {:6.2f} G {:6.2f} Loss {:6.5f} AvgQ {:6.2f}'\n ' MinR {:6.2f} MaxR {:6.2f}'.format(\n self.episode, self.episode_step,\n self.tracker.total_reward, self.tracker.discounted_rewards,\n self.loss_val, self.total_max_q / self.episode_step,\n self.tracker.min_reward, self.tracker.max_reward,\n end=\"\"))", "def _print_progress(counter):\n\tif(slogviz.config.interactive):\n\t\tprint('parse log file entry nr: {}'.format(counter),end='\\r')", "def calc_progress(self):\n if self.is_prepared():\n self._sync_info_from_disk()\n self._num_sown_batches = len(\n glob.glob(\n os.path.join(self.location, \"batches\", BTCH_NM.format(\"*\"))\n )\n )\n self._num_results = len(\n glob.glob(\n os.path.join(self.location, \"results\", RSLT_NM.format(\"*\"))\n )\n )\n else:\n self._num_sown_batches = -1\n self._num_results = -1", "def displayed_percent(self):\n return (self.displayed_words / self.total_words) * 100", "def progress_func(completed, total):\n if not self.log:\n return\n dots = (completed * dot_count) / total\n if dots > dot_count:\n dots = dot_count\n self.progress_lock.acquire()\n if self.dots_written < dot_count:\n dots_to_write = dots - self.dots_written\n self.dots_written = dots\n os.write(old_stdout, '.' * dots_to_write)\n self.progress_lock.release()", "def printReport(self):\n\t\tself.app.printflush('Fetched: ' + str(self.fetched_count), self.app.IGNORE_EXIT_FLAG)\n\t\tself.app.printflush('Processes: ' + str(self.processes), self.app.IGNORE_EXIT_FLAG)\n\t\tself.app.printflush('Updated: ' + str(self.updated_count), self.app.IGNORE_EXIT_FLAG)\n\t\tself.app.printflush('Average page load time: ' + str(self.average_time), self.app.IGNORE_EXIT_FLAG)\n\t\tself.app.printflush('Returned with code: ' + repr(self.code_statistics), self.app.IGNORE_EXIT_FLAG)\n\t\tself.app.printflush('Closing Processes... ', self.app.IGNORE_EXIT_FLAG)", "def progress(self) -> int:\n return int(round(100 * self.somme() / self.finances))", "def progress_iterator(collection: Collection, message: str) -> Iterable:\n num_items = len(collection)\n last_percentage = -1\n for i, item in enumerate(collection):\n percentage = 100 * i // num_items\n if percentage > last_percentage:\n last_percentage = percentage\n print(f\"{message} {percentage}%\", end='\\r')\n yield item\n print(f\"{message} 100%\")", "def summarize(data, verbal=False, using_files=True):\n\n if using_files:\n for file_name in tqdm(data):\n fill_table(pd.read_csv(file_name))\n else:\n for table in tqdm(data):\n fill_table(table)\n\n for cluster in table_summary:\n #total_genes = sum(table_summary[cluster][\"phylum\"].values) # number of genes\n #total_genes = table_summary[cluster][\"N\"] # number of samples\n total_genes = table_summary[cluster][\"eggNOG\"].eggNOG.sum() # number of genes in COGs with duplicates\n \n phylum_percent = table_summary[cluster][\"phylum\"].apply(lambda x: x/total_genes * 100)\n phylum_percent.columns = [\"percent\"]\n table_summary[cluster][\"phylum\"] = pd.concat([table_summary[cluster][\"phylum\"],phylum_percent],axis=1)\n\n #Read above for fix\n genus_percent = table_summary[cluster][\"genus\"].apply(lambda x: x/total_genes * 100)\n genus_percent.columns = [\"percent\"]\n table_summary[cluster][\"genus\"] = pd.concat([table_summary[cluster][\"genus\"],genus_percent],axis=1)\n\n #read above for fix\n cog_percent = table_summary[cluster][\"eggNOG\"].apply(lambda x: x/table_summary[cluster][\"gene_cog\"] * 100)\n cog_percent.columns = [\"percent\"]\n table_summary[cluster][\"eggNOG\"] = pd.concat([table_summary[cluster][\"eggNOG\"],cog_percent],axis=1)\n\n #Print the data\n if verbal:\n print \"Cluster %s:\\n\" % cluster\n print \"Number of Samples: %d\\n\" % table_summary[cluster][\"N\"]\n print \"Taxonomy:\"\n print table_summary[cluster][\"phylum\"].sort(\"percent\", ascending=False)\n print \"----------------------------------\"\n print table_summary[cluster][\"genus\"].sort(\"percent\", ascending=False)\n print \"-----------------------------------\"\n print \"COGS:\"\n print table_summary[cluster][\"eggNOG\"].sort(\"percent\", ascending=False)\n print \"------------------------------------\"\n print \"End Summary\"", "def printProgress(iteration, total, prefix = '', suffix = '', decimals = 2, barLength = 100):\n\tfilledLength\t= int(round(barLength * iteration / float(total)))\n\tpercents\t\t= round(100.00 * (iteration / float(total)), decimals)\n\tbar\t\t\t = '#' * filledLength + '-' * (barLength - filledLength)\n\tsys.stdout.write('%s [%s] %s%s %s (%s/%s total)\\r' % (prefix, bar, percents, '%', suffix, iteration, total))\n\tsys.stdout.flush()\n\tif iteration == total:\n\t\tprint(\"\\n\")", "def status_statement(current, final, count, chr=None):\n if current % int(final/count) == 0:\n if chr is None:\n print(\"Processed %i out of %i\" % (current, final))\n else:\n print(\"Processed %i out of %i in %s\" % (current, final, chr))", "def percentage(count, total):\n return count / total * 100", "def overall_progress(app_id):\r\n sql = text('''SELECT task.id, n_answers,\r\n COUNT(task_run.task_id) AS n_task_runs\r\n FROM task LEFT OUTER JOIN task_run ON task.id=task_run.task_id\r\n WHERE task.app_id=:app_id GROUP BY task.id''')\r\n results = db.engine.execute(sql, app_id=app_id)\r\n n_expected_task_runs = 0\r\n n_task_runs = 0\r\n for row in results:\r\n tmp = row[2]\r\n if row[2] > row[1]:\r\n tmp = row[1]\r\n n_expected_task_runs += row[1]\r\n n_task_runs += tmp\r\n pct = float(0)\r\n if n_expected_task_runs != 0:\r\n pct = float(n_task_runs) / float(n_expected_task_runs)\r\n return (pct * 100)", "def print_output(self):\n print(\"Reference score: \" + str(self.PotTax_reference.sum().TFI))\n print(\"Intervention score: \" + str(self.PotTax_intervention.sum().TFI))\n return", "def print_leaf(self, counts):\n total = sum(counts.values()) * 1.0\n probs = {}\n for lbl in counts.keys():\n probs[lbl] = str(int(counts[lbl] / total * 100)) + \"%\"\n return probs", "def print_acc(class_matrix):\n total = 0\n num_index = len(class_matrix)\n for i in range(num_index):\n total += class_matrix[i][i]\n print(\"Accuracy: {0}%\".format(100 * total/np.sum(class_matrix)))", "def update_percent(self):", "def percent_done(self) -> int:\n percent = (self.downloaded_images/self.total_images) * 100\n return int(percent)", "def determine_progress_value(rows, total_rows, percent_1, percent_5, percent_10, percent_20, percent_30, percent_40,\n percent_50,\n percent_60, percent_70, percent_80, percent_90, percent_100):\n\n if rows >= total_rows * 1.0 and not percent_100:\n for idx2, inst in enumerate(progress):\n if trsfrm_no in inst.keys():\n progress[idx2][trsfrm_no][\"currVal\"] = 100\n percent_100 = True\n elif rows >= total_rows * 0.9 and not percent_90:\n for idx2, inst in enumerate(progress):\n if trsfrm_no in inst.keys():\n progress[idx2][trsfrm_no][\"currVal\"] = 90\n percent_90 = True\n elif rows >= total_rows * 0.8 and not percent_80:\n for idx2, inst in enumerate(progress):\n if trsfrm_no in inst.keys():\n progress[idx2][trsfrm_no][\"currVal\"] = 80\n percent_80 = True\n elif rows >= total_rows * 0.7 and not percent_70:\n for idx2, inst in enumerate(progress):\n if trsfrm_no in inst.keys():\n progress[idx2][trsfrm_no][\"currVal\"] = 70\n percent_70 = True\n elif rows >= total_rows * 0.6 and not percent_60:\n for idx2, inst in enumerate(progress):\n if trsfrm_no in inst.keys():\n progress[idx2][trsfrm_no][\"currVal\"] = 60\n percent_60 = True\n elif rows >= total_rows * 0.5 and not percent_50:\n for idx2, inst in enumerate(progress):\n if trsfrm_no in inst.keys():\n progress[idx2][trsfrm_no][\"currVal\"] = 50\n percent_50 = True\n elif rows >= total_rows * 0.4 and not percent_40:\n for idx2, inst in enumerate(progress):\n if trsfrm_no in inst.keys():\n progress[idx2][trsfrm_no][\"currVal\"] = 40\n percent_40 = True\n elif rows >= total_rows * 0.3 and not percent_30:\n for idx2, inst in enumerate(progress):\n if trsfrm_no in inst.keys():\n progress[idx2][trsfrm_no][\"currVal\"] = 30\n percent_30 = True\n elif rows >= total_rows * 0.2 and not percent_20:\n for idx2, inst in enumerate(progress):\n if trsfrm_no in inst.keys():\n progress[idx2][trsfrm_no][\"currVal\"] = 20\n percent_20 = True\n elif rows >= total_rows * 0.1 and not percent_10:\n for idx2, inst in enumerate(progress):\n if trsfrm_no in inst.keys():\n progress[idx2][trsfrm_no][\"currVal\"] = 10\n percent_10 = True\n elif rows >= total_rows * 0.05 and not percent_5:\n for idx2, inst in enumerate(progress):\n if trsfrm_no in inst.keys():\n progress[idx2][trsfrm_no][\"currVal\"] = 5\n percent_5 = True\n elif rows >= total_rows * 0.01 and not percent_1:\n for idx2, inst in enumerate(progress):\n if trsfrm_no in inst.keys():\n progress[idx2][trsfrm_no][\"currVal\"] = 1\n percent_1 = True\n\n return percent_1, percent_5, percent_10, percent_20, percent_30, percent_40, percent_50, percent_60, percent_70, percent_80, percent_90, percent_100", "def percent(self):\r\n return self._percent", "def display_results_line(stats):\n # Line output.\n template = ' %5d |%6.2f |%6.2f %6.2f %6.2f |%3d %3d %3d'\n\n num_bytes = stats['data_size']\n\n P_times = stats['P_times']\n val = [num_bytes]\n for p in P_times:\n val.append(p*1000.)\n\n val.append(stats['count_lost'])\n val.append(stats['count_timeout'])\n val.append(stats['count_corrupt'])\n val = tuple(val)\n\n print(template % val)", "def get_progress(count, block_size, total_size) -> None:\r\n percent = int(count * block_size * 100 / total_size)\r\n print(f\"Downloading clip... {percent}%\", end=\"\\r\", flush=True)", "def statistics(self, **kwargs) -> None:\n print(\n tabulate.tabulate(\n list(self._iter_statistics(**kwargs)),\n headers=[\"path\", \"type\", \"occurences\", \"%\"],\n floatfmt=\".3f\",\n )\n )", "def printProgress (iteration, total, msg, suffix = '', decimals = 0):\n barLength = 50\n formatStr = \"{0:.\" + str(decimals) + \"f}\"\n percents = formatStr.format(100 * (iteration / float(total)))\n filledLength = int(round(barLength * iteration / float(total)))\n bar = '=' * filledLength + '-' * (barLength - filledLength)\n\n\n # sys.stdout.write('[ %s %s%s %s \\r%s' % ( msg, bar, percents, '%', suffix))\n sys.stdout.write('{:<30}[ {:<50} ]{}% \\r'.format(msg,bar,percents))\n if iteration == total:\n sys.stdout.write('\\n')\n sys.stdout.flush()", "def print_avg():", "def printProgressBar (self,iteration, total, prefix = '', suffix = '', decimals = 1, length = 100, fill = '█'):\n percent = (\"{0:.\" + str(decimals) + \"f}\").format(100 * (iteration / float(total)))\n filledLength = int(length * iteration // total)\n bar = fill * filledLength + '-' * (length - filledLength)\n print('\\r%s |%s| %s%% %s' % (prefix, bar, percent, suffix), end = '\\r')\n # Print New Line on Complete\n if iteration == total: \n print()", "def task_progress(project):\n complete = Task.objects.filter(project=project, status='C').count()\n total = Task.objects.filter(project=project).count()\n if total == 0:\n return 0\n\n return round(complete/total * 100, 2)", "def cb(complete,total):\n percent = int(complete * 100.0 / total)\n log.info(\"Download completion: {0}%\".format(percent))", "def printProgress (iteration, total, prefix = '', suffix = '', decimals = 2, barLength = 100):\n filledLength = int(round(barLength * iteration / float(total)))\n percents = round(100.00 * (iteration / float(total)), decimals)\n bar = '#' * filledLength + '-' * (barLength - filledLength)\n Sys.stdout.write('%s [%s] %s%s %s\\r' % (prefix, bar, percents, '%', suffix)),\n Sys.stdout.flush()\n if iteration == total:\n print(\"\\n\")", "def printProgress (iteration, total, prefix = '', suffix = '', decimals = 2, barLength = 100):\n filledLength = int(round(barLength * iteration / float(total)))\n percents = round(100.00 * (iteration / float(total)), decimals)\n bar = '#' * filledLength + '-' * (barLength - filledLength)\n Sys.stdout.write('%s [%s] %s%s %s\\r' % (prefix, bar, percents, '%', suffix)),\n Sys.stdout.flush()\n if iteration == total:\n print(\"\\n\")", "def printProgress (iteration, total, prefix = '', suffix = '', decimals = 1, barLength = 100):\n filledLength = int(round(barLength * iteration / float(total)))\n percents = round(100.00 * (iteration / float(total)), decimals)\n bar = '#' * filledLength + '-' * (barLength - filledLength)\n sys.stdout.write('%s [%s] %s%s %s\\r' % (prefix, bar, percents, '%', suffix)),\n sys.stdout.flush()\n if iteration == total:\n print(\"\\n\")", "def fetch_progress(self):\n threads = len(opts.thread)\n files = len(self.files)\n t_width = len(str(threads))\n f_width = len(str(files))\n\n t_progress = f\"[{self.pos: >{t_width}}/{threads}]\"\n f_progress = f\"[{self.count: >{f_width}}/{files}]\"\n\n if self.count:\n progress = f\"{t_progress} {f_progress}\"\n else:\n progress = t_progress\n\n return progress", "def report_percent_complete(self):\n meta_str = 'Bundling percent complete: ' + \\\n str(int(self.percent_complete))\n\n TaskComm.set_state('PROGRESS', meta_str)", "def printProgress (iteration, total, prefix = '', suffix = '', decimals = 2, barLength = 100):\n filledLength = int(round(barLength * iteration / float(total)))\n percents = round(100.00 * (iteration / float(total)), decimals)\n bar = '#' * filledLength + '-' * (barLength - filledLength)\n sys.stdout.write('%s [%s] %s%s %s\\r' % (prefix, bar, percents, '%', suffix)),\n sys.stdout.flush()\n if iteration == total:\n print(\"\\n\")", "def dump(self):\n fmt='%20s:%10.4fs%6.1f%%'\n print('\\n----------------TIME MANAGER PROFILE----------------\\n\\n')\n total_t=time.time()-self.tic0\n for rec in self.record:\n print(fmt % (rec[0],rec[1],100.0*rec[1]/total_t))\n print(fmt % ('TOTAL ELAPSED TIME', total_t, 100.0))\n print('\\n----------------TIME MANAGER PROFILE----------------\\n\\n')", "def prograssBar(val, final):\n end = \"\"\n maxlen = 50\n step = final // maxlen\n\n print(\"\\r[ \" + \"#\" * (val // step) + \" ] \" +\n str(int(val * 100.0 / final)) + \"% \", end=end)", "def print_rows():\n do_four(print_row)", "def print_rows():\n do_four(print_row)", "def __call__(self, param):\n count = param.nbatch\n filled_len = int(round(self.bar_len * count / float(self.total)))\n percents = math.ceil(100.0 * count / float(self.total))\n prog_bar = '=' * filled_len + '-' * (self.bar_len - filled_len)\n logging.info('[%s] %s%s\\r', prog_bar, percents, '%')", "def progress(i, my_list, message=\"\"):\n\tmy_progress = (i / len(my_list)) * 100\n\tmy_progress = str(round(my_progress, 1)) + \"% \" + message\n\tsys.stdout.write('\\r')\n\tsys.stdout.write(my_progress)\n\tsys.stdout.flush()", "def run(self):\n for i,p in enumerate(self.pairs):\n self.forPointPair(i)\n if i % 100000 == 0:\n print('Percentage Processed: ' + str(round(i * 100 / len(self.pairs), 3)) + '. Existing Cluster Labels: ', len(np.unique(self.labels)))", "def print_scores(self):\n ### FILL IN ###", "def printProgress(iteration, total, prefix = '', suffix = '', decimals = 1, barLength = 100):\n formatStr = \"{0:.\" + str(decimals) + \"f}\"\n percent = formatStr.format(100 * (iteration / float(total)))\n filledLength = int(round(barLength * iteration / float(total)))\n bar = '#'* filledLength + '-' * (barLength - filledLength)\n sys.stdout.write('\\r%s |%s| %s%s %s' % (prefix, bar, percent, '%', suffix)),\n if iteration == total:\n sys.stdout.write('\\n')\n sys.stdout.flush()", "def printProgress (iteration, total, prefix = '', suffix = '', decimals = 1, barLength = 100):\n formatStr = \"{0:.\" + str(decimals) + \"f}\"\n percents = formatStr.format(100 * (iteration / float(total)))\n filledLength = int(round(barLength * iteration / float(total)))\n bar = '*' * filledLength + '-' * (barLength - filledLength)\n sys.stdout.write('\\r%s |%s| %s%s %s' % (prefix, bar, percents, '%', suffix)),\n if iteration == total:\n sys.stdout.write('\\n')\n sys.stdout.flush()", "def percentage_progress(self):\n\n if self.total_progress != 0:\n return float(self.progress) / self.total_progress\n return self.progress", "def percentage_progress(self):\n\n if self.total_progress != 0:\n return float(self.progress) / self.total_progress\n return self.progress", "def print_chunk_progress(self, actions):\r\n for action in actions:\r\n if (self._cur_print % constants.ES_BULK_CHUNK_SIZE == 0) & (self._cur_print > 0):\r\n print(\"{0} emails converted. Starting bulk import (chunk size: {1})...\".format(self._cur_print,\r\n constants.ES_BULK_CHUNK_SIZE))\r\n self._cur_print += 1\r\n yield action", "def cb(complete, total):\n percent = int(complete * 100.0 / total)\n log.info(\"Download completion: {0}%\".format(percent))", "def _printProgressBar (iteration, total, prefix = '', suffix = '', decimals = 1, length = 100, fill = '$'):\r\n percent = (\"{0:.\" + str(decimals) + \"f}\").format(100 * (iteration / float(total)))\r\n filledLength = int(length * iteration // total)\r\n bar = fill * filledLength + '-' * (length - filledLength)\r\n sys.stdout.write('\\r{} |{}| {}% {}'.format(prefix, bar, percent, suffix))\r\n # Print New Line on Complete\r\n if iteration == total: \r\n print()", "def printProgress(iteration, total, prefix='', suffix='', decimals=1, barLength=100):\n import sys\n formatStr = \"{0:.\" + str(decimals) + \"f}\"\n percent = formatStr.format(100 * (iteration / float(total)))\n filledLength = int(round(barLength * iteration / float(total)))\n bar = '#' * filledLength + '-' * (barLength - filledLength)\n sys.stdout.write('\\r%s |%s| %s%s %s ' %\n (prefix, bar, percent, '%', suffix)),\n if iteration == total:\n sys.stdout.write('\\n')\n sys.stdout.flush()", "def print_progression(self, n):\n print(\" \".join(str(round(next(self), 5)) for i in range(n)))", "def report(self,f):\n f.write(\"sectorsize: %d\\n\" % self.sectorsize)\n for run in sorted(self.db):\n f.write(\" [@%8d ; %8d]\\n\" % (run.img_offset,run.bytes))\n f.write(\"total entries in database: %d\\n\\n\" % len(r))", "def get_percent_completed(self):\n completed = self.object_list.filter(status__exact=True).count()\n total = len(self.object_list)\n return int(100 * completed / total) if total > 0 else 0", "def progress(loss, epoch, batch, batch_size, dataset_size):\n batches = math.ceil(float(dataset_size) / batch_size)\n count = batch * batch_size\n bar_len = 40\n filled_len = int(round(bar_len * count / float(dataset_size)))\n\n bar = '=' * filled_len + '-' * (bar_len - filled_len)\n\n status = 'Epoch {}, Loss: {:.4f}'.format(epoch, loss)\n _progress_str = \"\\r \\r [{}] ...{}\".format(bar, status)\n sys.stdout.write(_progress_str)\n sys.stdout.flush()\n\n if batch == batches:\n print()", "def print_batch_stats(self):\n\n # current epoch time, numfiles, numbytes, trans secs, status\n print(f\"TRANS_STATS_BATCH: {time.time()} {self.batchvals['transfer_name']} {self.batchvals['numfiles']} {self.filevals['totbytes']} {self.filevals['end_time'] - self.filevals['start_time']} {self.filevals['status']}\")", "def final_report(self):\n print('Final Count for', self.reason, self.successes, 'of', self.tests, 'tests passed')", "def printProgress (iteration, total, prefix = '', suffix = '', decimals = 1, barLength = 100):\n formatStr = \"{0:.\" + str(decimals) + \"f}\"\n percents = formatStr.format(100 * (iteration / float(total)))\n filledLength = int(round(barLength * iteration / float(total)))\n bar = '█' * filledLength + '-' * (barLength - filledLength)\n sys.stdout.write('\\r%s |%s| %s%s %s' % (prefix, bar, percents, '%', suffix)),\n if iteration == total:\n sys.stdout.write('\\n')\n sys.stdout.flush()", "def print_progress(self, i, current_params):\n for split in range(1,11):\n if i == (round(self.iterations/10*split)-1):\n post = -self.full_neg_posterior(current_params)\n approx = self.create_normal_logq(current_params)\n diff = post - approx\n if not self.quiet_progress:\n print(str(split) + \"0% done : ELBO is \" + str(diff) + \", p(y,z) is \" + str(post) + \", q(z) is \" + str(approx))", "def _dl_progress_bar(self):\n if not self.show_progress:\n return\n\n if self.file_size:\n ratio = float(self.bytes_read) / self.file_size\n else:\n ratio = 1\n percent = int(ratio * 100)\n\n bar_len = 60\n done = int(bar_len * ratio)\n bar = ('=' * done) + (' ' * (bar_len - done))\n\n progress = '{percent: >3}%: [{bar}]'.format(percent=percent, bar=bar)\n backspace = '\\b' * len(progress)\n print(backspace + '\\r', end='')\n print(progress, end='')", "def calc_total_rows(self):\n #total_rows = len(self.file_list) - 1 # Minus header\n print('Total number of rows: ' + str(self.tot_rows))\n results.append('Total number of rows: ' + str(self.tot_rows))", "def progress(self):\n percent = self._infos.get(BulkInsertState.IMPORT_PROGRESS, \"0\")\n return int(percent)", "def show_data():\n with open(\"ScansforStudents.csv\", \"rU\") as csvfile:\n reader = csv.reader(csvfile, delimiter = ',', quotechar = '|')\n k = 0\n for row in reader:\n print(row)\n if k == 100:\n break\n k += 1", "def response_count_percentage(this_count):\n num_targets = db.session.query(ColourTargetColBG.id).count()\n return (this_count / num_targets) * 100.0", "def print_progress(self, i, current_params):\n for split in range(1,11):\n if i == (round(self.iterations/10*split)-1):\n post = -self.neg_posterior(current_params)\n approx = self.create_normal_logq(current_params)\n diff = post - approx\n if not self.quiet_progress:\n print(str(split) + \"0% done : ELBO is \" + str(diff) + \", p(y,z) is \" + str(post) + \", q(z) is \" + str(approx))", "def round_statistics(self):\n n_query = self.dbm.get_value(\"SELECT COUNT(*) FROM %s\" % self.query_train_table)\n n_keyword = self.dbm.get_value(\"SELECT COUNT(*) FROM keyword\")\n n_product = self.dbm.get_value(\"SELECT COUNT(DISTINCT product) FROM keyword_product_weight\")\n n_relation = self.dbm.get_value(\"SELECT COUNT(*) FROM keyword_product_weight\")\n\n self._round_results.append((n_query, self._not_enough_recs, n_keyword, n_product, n_relation))\n\n if config.verbose:\n print 'Round statistics: query: %d (not enough %d), keyword: %d, product: %d, relation: %d, A/M: %.2f%%' % (n_query, self._not_enough_recs, n_keyword, n_product, n_relation, 100.0*n_relation / (n_keyword*n_product))", "def printProgress (iteration, total, prefix = '', suffix = '', decimals = 1, barLength = 100, fill = '█'):\n percent = (\"{0:.\" + str(decimals) + \"f}\").format(100 * (iteration / float(total)))\n filledLength = int(barLength * iteration // total)\n bar = fill * filledLength + '-' * (barLength - filledLength)\n sys.stdout.write('\\r%s |%s| %s%s %s' % (prefix, bar, percent, '%', suffix)),\n if iteration == total:\n sys.stdout.write('\\n')\n sys.stdout.flush()" ]
[ "0.74636936", "0.7295432", "0.690456", "0.6810523", "0.65595317", "0.64016294", "0.63650185", "0.63531333", "0.6352935", "0.6349985", "0.6310579", "0.6283685", "0.6247981", "0.6222485", "0.6173231", "0.61398053", "0.61388904", "0.61365205", "0.6132179", "0.61136687", "0.61033106", "0.6093487", "0.60699356", "0.6062022", "0.6045899", "0.6032401", "0.60060966", "0.60033005", "0.5999316", "0.59792435", "0.5973629", "0.59702843", "0.59191614", "0.59009856", "0.58947724", "0.5875985", "0.58543", "0.5846072", "0.58409685", "0.5840747", "0.58266807", "0.5823062", "0.58207005", "0.5812126", "0.57898015", "0.57810867", "0.57757926", "0.57676214", "0.57607377", "0.5753739", "0.5748765", "0.5746946", "0.5726183", "0.5722778", "0.5721043", "0.57160014", "0.5713159", "0.5712366", "0.57102334", "0.57090294", "0.5708932", "0.5707982", "0.5705203", "0.5691287", "0.5691287", "0.5689891", "0.5682899", "0.5682821", "0.5682739", "0.56805474", "0.5668209", "0.5664315", "0.5664315", "0.5661791", "0.56598896", "0.56596434", "0.5658683", "0.5656253", "0.5654978", "0.5653916", "0.5653916", "0.5652019", "0.5649998", "0.5647425", "0.5645385", "0.5641", "0.5640379", "0.56394637", "0.5636751", "0.5633389", "0.5632225", "0.5631828", "0.56184924", "0.5617016", "0.56110996", "0.5608741", "0.5606415", "0.56040037", "0.5600113", "0.55977774", "0.5593987" ]
0.0
-1
Encapsulation of print function. If flag is_verbose is fixed to True, then print takes place.
def strprint(self, mystr): if self.is_verbose is True: print(mystr) else: pass return
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _verbose(self,text):\n if self.verbose:\n print(text)", "def _print(self, text):\n\t\tif self.verbose:\n\t\t\tprint text", "def verbose_print(verbose, print_function=None):\n\n if verbose:\n return print_function or print\n else:\n def vprint(*args, **kwars):\n pass\n return vprint", "def print_verbose(args, msg):\n if args.verbose:\n print(msg)", "def verbose_print(msg: str = '') -> None:\n assert isinstance(msg, str)\n if __verbose:\n print(msg)", "def printv(self, *arg):\n if self.verbose:\n print(*arg)", "def verbose_print(text,verbose_level):\n if Args.verbose >= verbose_level:\n print '\\t' * (verbose_level-1) + text", "def _vprint(self, string):\n if self.verbose:\n print(string)", "def vprint (*args, take_action=False, **kwargs):\n\n take_action = take_action and not opts.take_action\n\n if opts.verbose or take_action:\n print (*args, **kwargs)\n\n return take_action", "def verbosePrint(string, nonl=False):\n if not verbose:\n return\n if nonl:\n print(string, end=' ')\n else:\n print(string)", "def vprint(msg):\n if defaults.verbose:\n print(msg)", "def print_if_verbose(self, log):\n\n if self.verbose:\n print(log)\n return log", "def print_verbose(message:str):\n if params['verbose']:\n print(message)\n return", "def vprint(string):\n global verbose\n if verbose:\n print(string)", "def printv(self, string, **kwargs):\n if self.verbose:\n print(string, **kwargs)", "def print_verbose(self) -> None:\n print(self)\n if self.meta is not None:\n print(self.meta.__repr__())", "def _print(self, *args, **kwargs) -> None:\n # Only print in verbose mode\n if self._verbose:\n arglist = list(args)\n arglist[0] = f\"[buddy-{self._experiment_name}] {args[0]}\"\n print(*arglist, **kwargs)", "def v_print(msg):\n if (VERBOSE == 1):\n print(msg)", "def cmdPrint( self, *args):\n return self.cmd( *args, **{ 'verbose': True } )", "def pr(string, verbose):\n if(verbose):\n print(string)", "def set_verboseprint(func=misc.init_verbose_print(verbose=True, vfunc=print, nvfunc=misc.log)):\n global verboseprint\n verboseprint = func\n ml.verboseprint = verboseprint\n transform.verboseprint = verboseprint", "def _p(self, *args, level=2, **kwargs):\n if self._verbosity >= level:\n print(*args, **kwargs)", "def printmsg(msg, verbose):\n if verbose:\n print(msg)\n\n return None", "def println(message, verbose_only=False):\n if verbose_only and not system.config['verbose']:\n return\n print(message)", "def _print(self, msg):\n # XXX: Not using the logger framework: need to\n # learn to use logger better.\n if not self.verbose:\n return\n if self.verbose < 50:\n writer = sys.stderr.write\n else:\n writer = sys.stdout.write\n writer(f\"[{self}]: {msg}\\n\")", "def print_log(message, verbose):\n if verbose:\n print(message)", "def print(s, end='\\n'):\n if GLOBAL['VERBOSE']:\n builtins.print(s, end=end, flush=True) if (not GLOBAL['LOGGING'] or GLOBAL['LOGGER'] is None) else GLOBAL['LOGGER'].info(s)", "def init_verbose_print(verbose=True, vfunc=print, nvfunc=None):\n global verboseprint\n if verbose:\n verboseprint = vfunc\n else:\n if not nvfunc:\n verboseprint = lambda *a, **k: None\n else:\n verboseprint = nvfunc\n return verboseprint", "def printt(content, flag=False):\n if flag:\n print(content)", "def verbose(string, level, indent=None):\n if args.verbose:\n if args.verbose > level:\n if indent is None:\n if level <= LEVEL_4:\n indent = \" \" * level\n else:\n indent = \" \"\n print (indent + string)\n return", "def printed(method):\n\t\tdef wrapper(cls, *args):\n\t\t\tif cls.verbose:\n\t\t\t\treturn method(cls, *args)\n\t\treturn wrapper", "def if_verbose(message):\n if args.verbose:\n logging.info(message)\n global_timer()", "def is_verbose():\n return g_verbose", "def pprint(*args, **kwargs):\n if PRINTING:\n print(*args, **kwargs)", "def verbose(self, *args):\n\n if self.is_on(_Log.VERBOSE):\n self._write(self._out, *args)", "def verbose():\n GLOBAL['VERBOSE'] = True", "def vprint(*args, **kwargs ):\n\n forceprint = False\n for key in kwargs:\n if key == \"forceprint\":\n forceprint =kwargs[key]\n \n line = ''\n if debug or forceprint : \n for arg in args:\n line += str(arg) +\" \"\n log = open(exepath + 'pyframe.log', 'a') \n log.write(line + \"\\n\")\n log.close() \n print line", "def print(self, *args, **kwargs):\n print(*args, **kwargs)", "def enable_verbose(self):\n self.verbose = True", "def print_warning(verbose, message):\n if verbose:\n print(message)", "def show(self, *args, prefix=None):\n if prefix is None:\n prefix = '$'\n if self.verbose >= 2:\n print(prefix, *args)", "def debug_print(self, *content):\n if self.debug:\n print(*content)", "def do_verbose(self, arg):\n global verbose\n if verbose == 1:\n verbose = 0\n # prtin and add to log file \n logmsg = \" INFO: verbose mode disable\"\n log(logmsg)\n else:\n verbose = 1\n # prtin and add to log file \n logmsg = \" INFO: verbose mode enable\"\n log(logmsg)", "def verbose():\n return _verbose", "def printMessage(Message, verbosity):\n if(verbosity == 1):\n print(Message)", "def verbose(ctx, msg, *args):\n if ctx.verbose:\n info(msg, *args)", "def _print(self, msg, msg_args):\r\n # XXX: Not using the logger framework: need to\r\n # learn to use logger better.\r\n if not self.verbose:\r\n return\r\n if self.verbose < 50:\r\n writer = sys.stderr.write\r\n else:\r\n writer = sys.stdout.write\r\n msg = msg % msg_args\r\n writer('[%s]: %s\\n' % (self, msg))", "def show(self,verbose=0):\n print 'inferenceArgs',self.ws.inferenceArgs\n print 'inferenceExpr',theano.pp(self.ws.inferenceExpr)\n if verbose>=1:\n print 'debugprint inferenceExpr:'\n theano.printing.debugprint(self.ws.inferenceExpr)\n if self.ws.dataLossExpr:\n print 'dataLossArgs',self.ws.dataLossArgs\n print 'dataLossExpr',theano.pp(self.ws.dataLossExpr)\n print 'debugprint dataLossExpr:'\n theano.printing.debugprint(self.ws.dataLossExpr)", "def say(self, verbosity, msg):\n if self.verbosity >= verbosity:\n print(msg)", "def report(s):\n if opts[\"verbose\"]:\n print(\"%s: %s\" % (NAME, s))", "def is_verbose() -> bool:\n return VERBOSE", "def _verboseHeader(self):\n\n if verbose:\n name = self._getName()\n methodName = self._getMethodName()\n\n title = f\"Running {name}.{methodName}\"\n print('{}\\n{}'.format(title, '-' * len(title)))", "def verbose(self, verbose):\n self._verbose = verbose", "def print_(self, s: str) -> None:", "def printer(message):\n if VERBOSITY:\n pprint(message)", "def DEBUG_PRINT(msg, obj='', suffix=''):\n if PRINT_DEBUGS:\n print msg, obj, suffix", "def debug_print(*args, sep=' ', end='\\n', file=sys.stdout, flush=False, lvl=1):\n if debuglvl >= lvl:\n print(*args, sep=sep, end=end, file=file, flush=flush)", "def set_verbose(self, verbose):\n self._verbose = verbose", "def print_out():\n pass", "def set_verbose(self, v):\n self._verbose = bool(v)", "def set_verbose(self, verbose):\n self._shared.set_verbose(verbose)", "def _redefine_print(is_main):\n import builtins as __builtin__\n\n builtin_print = __builtin__.print\n\n def print(*args, **kwargs):\n force = kwargs.pop(\"force\", False)\n if is_main or force:\n builtin_print(*args, **kwargs)\n\n __builtin__.print = print", "def Print(self):\n\n\t\tif self.verbose:\n\n\t\t print (\"\\033[1m[HEADER]\\033[0m\")\n\t\t print (\"code:\\t\\t%s\" % self.kod)\n\t \tprint (\"version:\\t%s\" % self.ver)\n\t\t print (\"date and time:\\t%s\" % self.probid)\n\t\t print (\"dump number:\\t%s\" % self.knod)\n\t \tprint (\"number of histories:\\t%s\" % self.nps)\n\t\t print (\"number of pseudorandom numbers used:\\t%s\" % self.rnr)\n\t\t print (\"title: %s\" % self.title)\n\n\t\t if self.ntal>1:\n\t\t\t\tprint self.ntal, 'tallies:', self.ntals\n\t \telse:\n\t\t\t\tprint self.ntal, 'tally:', self.ntals\n\n\n\t\t if self.npert != 0:\n\t\t\t\tprint(\"number of perturbations: %s\" % self.npert)", "def debug(string):\n if verbose:\n print string\n return", "def verbose(self, enabled=True):\r\n self.verbose = verbose", "def __init__(self, enable_verbose=True):\n self.enable_verbose = enable_verbose\n if self.enable_verbose:\n self.show = self._print_screen\n else:\n self.show = self._not_print_screen", "def set_print_statements(self, print_flag: bool) -> None:\n if print_flag:\n self._print_statements_enabled = print_flag\n else:\n self._print_statements_enabled = print_flag", "def test():\n v_print(1, \"-vvv Verbose 1 - INFO\")\n v_print(2, \"-vv Verbose 2 - WARN\")\n v_print(3, \"-v Verbose 3 - ERROR\")", "def log(self, message):\n if VERBOSE:\n print self, message", "def _debug_print(message):\n\n if _debug == True:\n print(message)", "def print_info(message: str):\n global verbose\n if verbose:\n print(\"%s%s%s\" % (KYEL, message, KNRM))", "def pretty_print(\n self, verbose: Optional[bool] = None, log_level: Optional[infra.Level] = None\n ) -> None:\n if verbose is None:\n verbose = self.options.log_verbose\n if log_level is None:\n log_level = self.options.log_level\n\n formatter.pretty_print_title(\n f\"Diagnostic Run {self.name} version {self.version}\"\n )\n print(f\"verbose: {verbose}, log level: {log_level}\")\n diagnostic_stats = {level: 0 for level in infra.Level}\n for diagnostic in self.diagnostics:\n diagnostic_stats[diagnostic.level] += 1\n formatter.pretty_print_title(\n \" \".join(f\"{diagnostic_stats[level]} {level.name}\" for level in infra.Level)\n )\n\n for diagnostic in self.diagnostics:\n diagnostic.pretty_print(verbose, log_level)\n\n unprinted_diagnostic_stats = [\n (level, count)\n for level, count in diagnostic_stats.items()\n if count > 0 and level.value < log_level.value\n ]\n if unprinted_diagnostic_stats:\n print(\n f\"{' '.join(f'{count} {level.name}' for level, count in unprinted_diagnostic_stats)} \"\n \"were not printed due to the log level.\"\n )\n print()", "def isVerbose(self):\n return self.opts.verbose", "def test_func(verbose: bool) -> None:\n click.echo(verbose)", "def log(s):\n if VERBOSE_MODE:\n print(s)", "def hook_print():\n sys.stdout = PrintHook()", "def setVerbose(self, verbose):\n self._verbose = verbose", "def dprint(msg, debug):\n if debug:\n six.print_(msg)", "def debug(message: str) -> None:\n if is_verbose():\n print(message)\n else:\n sys.stdout.write(\".\")\n sys.stdout.flush()", "def log(self, msg: str) -> None:\n if self.args.verbose:\n print(msg)", "def debug(self, msg=\"\"):\n if self.verbose:\n print(\"Debug: \" + msg)", "def d_print(level, *args, **kwargs):\n if not isinstance(level, int):\n print(level, *args, **kwargs)\n elif debug >= level:\n print(*args, **kwargs)", "def magic_Pprint(self, parameter_s=''):\n \n self.shell.outputcache.Pprint = 1 - self.shell.outputcache.Pprint\n print 'Pretty printing has been turned', \\\n ['OFF','ON'][self.shell.outputcache.Pprint]", "def verbose(obj, msg):\n return obj.verbose(msg)", "def custom_print(*objects):\n print(*objects, sep=OFS, end=ORS)", "def _print(self, *args):\n return _ida_hexrays.vd_printer_t__print(self, *args)", "def debug_print(text):\r\n if settings.debug:\r\n print (text)", "def setVerboseLevel(self,verbose):\n\tself.verbose=verbose\n\tif self.verbose and self.dbType=='sqlite':\n\t print \"db isolation\",self.db.isolation_level", "def Verbose(on_off=\"on\"):\n\n global verbose\n \n if on_off.isdigit():\n int_value = int(on_off)\n else:\n int_value = 1\n\n if on_off.lower() == \"off\":\n int_value = 0\n print \"Verbose disabled.\"\n elif on_off.lower() == \"on\":\n int_value = 1\n print \"Verbose enabled.\"\n \n if -1 < int_value < 3:\n verbose=int_value\n interface.VERBOSE=int_value\n else:\n raise TypeError", "def verbose():\n return Verbose.level()", "def debug_print(self, *args, **kwargs):\n print(\"APP_DEBUG_PRINT\", args, kwargs)", "def debug_print(debug_data):\n if DEBUG_MODE == \"true\":\n pp.pprint(debug_data)", "def _default_vprint_worker(*args, **kwargs):\r\n print(*args, **kwargs)", "def printdebug(self, msg):\n if self.debug > 0:\n print(msg)", "def verbose ( self , message , *args , **kwargs ) :\n return self.logger.verbose ( message , *args , **kwargs )", "def VerboseOut(self, message):\n if self._verbose:\n self.StdErr(message, die=False)", "def SetVerbose(new_verbose=True):\n global _verbose\n _verbose = new_verbose", "def pprint(self, parameter_s=''):\n ptformatter = self.shell.display_formatter.formatters['text/plain']\n ptformatter.pprint = bool(1 - ptformatter.pprint)\n print('Pretty printing has been turned',\n ['OFF','ON'][ptformatter.pprint])", "def prnt(printstring, silent=False):\n if not silent:\n stdout.write(printstring)", "def toggleVerbose(self):\n self.__VERBOSE = not self.__VERBOSE" ]
[ "0.7844468", "0.7715159", "0.76063657", "0.75858647", "0.74731535", "0.7468149", "0.74547803", "0.7313309", "0.72769666", "0.72654116", "0.7237131", "0.7225605", "0.7189013", "0.7179139", "0.7146172", "0.70976067", "0.70916754", "0.70261127", "0.6895416", "0.68448365", "0.68261546", "0.6748139", "0.67068934", "0.6699468", "0.6620064", "0.6547103", "0.65321696", "0.6459374", "0.6458041", "0.64433736", "0.6401062", "0.63941365", "0.6387239", "0.6347025", "0.63256323", "0.6316767", "0.6308764", "0.63037646", "0.6295533", "0.62919927", "0.6260481", "0.62494683", "0.6244639", "0.6218137", "0.6212929", "0.6196374", "0.6185697", "0.6149705", "0.6143491", "0.61432713", "0.60858166", "0.6060487", "0.6041137", "0.6012855", "0.6003986", "0.5998094", "0.5992455", "0.59686714", "0.59646344", "0.59634846", "0.59515864", "0.594956", "0.5939157", "0.5934918", "0.5923894", "0.5923786", "0.5922279", "0.5921568", "0.5896167", "0.58939004", "0.58925414", "0.58765554", "0.58697236", "0.5858791", "0.58567214", "0.5841174", "0.5832198", "0.5829967", "0.5817249", "0.58086824", "0.57883686", "0.578453", "0.578452", "0.5779876", "0.5779548", "0.57663006", "0.57562065", "0.57561725", "0.575127", "0.57465094", "0.574497", "0.5743024", "0.57359344", "0.57318115", "0.5728354", "0.5699839", "0.56952924", "0.56937134", "0.5691153", "0.56850755" ]
0.68400043
20
Copy all attributes from a given object P5_SegmentClassifier object into self.
def copy(self, other_object): #------------------------------------------------------------------------- # Debug parameters #------------------------------------------------------------------------- self._path_to_model = other_object.path_to_model #------------------------------------------------------------------------- # Data-model parameters #------------------------------------------------------------------------- self.is_verbose = other_object.is_verbose self._df_invoice_line = other_object._df_invoice_line.copy() self._total_outliers = other_object._total_outliers self._df_invoice_ref = other_object._df_invoice_ref.copy() self._list_quant_feature = other_object._list_quant_feature.copy() self._list_feature_to_drop = other_object._list_feature_to_drop.copy() self._df_invoice_original = other_object._df_invoice_original.copy() if other_object._arr_sample_customerID is not None: self._arr_sample_customerID = other_object._arr_sample_customerID.copy() else : self._arr_sample_customerID = None self._df_invoice_line_out_sample \ = other_object._df_invoice_line_out_sample.copy() #------------------------------------------------------------------------- # RFM features #------------------------------------------------------------------------- self._is_rfm_encode = other_object._is_rfm_encode self._encoder_rfm = other_object._encoder_rfm self._df_customers_rfm = other_object._df_customers_rfm.copy() self.df_customers_rfm_fileName = other_object.df_customers_rfm_fileName self.df_RFM_quantiles = other_object.df_RFM_quantiles self._day_now = other_object._day_now self._is_transform_rfm = other_object._is_transform_rfm #------------------------------------------------------------------------- # Time features #------------------------------------------------------------------------- self._list_new_feature = other_object._list_new_feature self._pca_timeFeature = other_object._pca_timeFeature self._std_scaler_timeFeature = other_object._std_scaler_timeFeature self._df_customers_timeFeature_fileName \ = other_object._df_customers_timeFeature_fileName if other_object._dict_timeFeature_encoder is not None: self._dict_timeFeature_encoder \ = other_object._dict_timeFeature_encoder.copy() else: self._dict_timeFeature_encoder = other_object._dict_timeFeature_encoder if other_object._df_customers_timeFeature is not None: self._df_customers_timeFeature \ = other_object._df_customers_timeFeature.copy() else: self._df_customers_timeFeature = other_object._df_customers_timeFeature self._is_transform_timeFeature = other_object._is_transform_timeFeature #------------------------------------------------------------------------- # NLP features #------------------------------------------------------------------------- self._vectorizer_nlp = other_object._vectorizer_nlp self._matrix_weights_nlp = other_object._matrix_weights_nlp self._df_customers_nlp_fileName = other_object._df_customers_nlp_fileName self._pca_nlp = other_object._pca_nlp self._df_customers_pca_nlp = other_object._df_customers_pca_nlp.copy() self._nlp_pca_ndim = other_object._nlp_pca_ndim self._is_transform_nlp = other_object._is_transform_nlp #------------------------------------------------------------------------- # All features #------------------------------------------------------------------------- self._df_customers_fileName = other_object._df_customers_fileName self._df_customers = other_object._df_customers.copy() #------------------------------------------------------------------------- # Classifier #------------------------------------------------------------------------- if other_object._y_clusters is not None: self._y_clusters = other_object._y_clusters.copy() else: self._y_clusters = other_object._y_clusters self._dict_classifier_param = other_object._dict_classifier_param.copy() self._classifier_name = other_object._classifier_name self._classifier_model = other_object._classifier_model
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def copy(self, threshold):\n self.indicator = threshold['indicator']\n self.stage = threshold['stage']\n self.begin = threshold['begin']\n self.end = threshold['end']\n self.quality = threshold['quality']\n self.weight = threshold['weight']\n return self", "def postprocess_segments(self):\n # make segs a list of mask arrays, it's easier to store\n # as there is a hdf5 equivalent\n for iseg, seg in enumerate(self.segs):\n mask = np.zeros(self.X.shape[0], dtype=bool)\n mask[seg] = True\n self.segs[iseg] = mask\n # convert to arrays\n self.segs = np.array(self.segs)\n self.segs_tips = np.array(self.segs_tips)", "def _set_attributes(self):", "def postprocess_segments(self):\n # make segs a list of mask arrays, it's easier to store\n # as there is a hdf5 equivalent\n for iseg, seg in enumerate(self.segs):\n mask = np.zeros(self._adata.shape[0], dtype=bool)\n mask[seg] = True\n self.segs[iseg] = mask\n # convert to arrays\n self.segs = np.array(self.segs)\n self.segs_tips = np.array(self.segs_tips)", "def copy(self) -> \"BaseSegment\":\n new_seg = copy(self)\n # Position markers are immutable, and it's important that we keep\n # a reference to the same TemplatedFile, so keep the same position\n # marker.\n new_seg.pos_marker = self.pos_marker\n if self.segments:\n new_seg.segments = tuple(seg.copy() for seg in self.segments)\n return new_seg", "def classifierCopy(self, old_cl, explore_iter):\n self.specified_attributes = copy.deepcopy(old_cl.specified_attributes)\n self.condition = copy.deepcopy(old_cl.condition)\n self.action = copy.deepcopy(old_cl.action)\n self.ga_timestamp = explore_iter\n self.init_timestamp = explore_iter\n self.prediction = old_cl.prediction\n self.error = old_cl.error\n self.fitness = old_cl.fitness", "def setSegments(self, segments):\n for point, segment in zip(self.points, segments):\n point.set(segment.p1)", "def __init__(self, in_dir, crop_h, crop_w, transform):\n super(SegmentationDataset, self).__init__()\n\n self.im_dir = in_dir + 'grey/'\n self.annot_dir = in_dir + 'labels/'\n self.transform = transform\n self.crop_h = crop_h\n self.crop_w = crop_w\n\n self.images = []\n\n # Generate crop list\n for img_path in chain(*(glob.iglob(path.join(self.im_dir, ext)) for ext in SegmentationDataset._EXTENSIONS)):\n _, name_with_ext = path.split(img_path)\n idx, _ = path.splitext(name_with_ext)\n\n self.images.append({\n \"idx\": idx,\n \"path\": img_path,\n \"annot_path\": self.annot_dir + idx + '.png'\n })", "def clone(self):\n return _libsbml.LineSegment_clone(self)", "def __init__(self, header, roi, track_id, detected_object, min, max):\n self._header = header\n self._roi = roi\n self._track_id = track_id\n self._object = detected_object\n\n self._p1 = deepcopy(min)\n\n self._p2 = deepcopy(self._p1)\n self._p2.x = max.x\n\n self._p3 = deepcopy(self._p2)\n self._p3.z = max.z\n\n self._p4 = deepcopy(self._p3)\n self._p4.x = min.x\n\n self._p5 = deepcopy(min)\n self._p5.y = max.y\n\n self._p6 = deepcopy(self._p5)\n self._p6.x = max.x\n\n self._p7 = deepcopy(self._p6)\n self._p7.z = max.z\n\n self._p8 = deepcopy(self._p7)\n self._p8.x = min.x", "def construct_segment(self):\n segment = Segment(\n model_id = self.model_id,\n chain_id = self.chain_id)\n\n segment.chain = self.chain\n segment.model = self.model\n\n return segment", "def __init__(self, fast5_path):\n self.fast5_path = fast5_path\n super(CreateLabels, self).__init__(fast5_path)\n self.aligned_signal = self._initialize()\n self.kmer_index = 2\n self.rna = self.is_read_rna()", "def __init__(self):\n self.svclassifier = SVC(kernel='linear')", "def copy(self):\n return self.__class__(\n self.kind, self.link_ids.copy(), self.included_nodes.copy(), self.mass,\n self.name, self.crossring_cleavages.copy(), self.composition.copy())", "def construct_segment(self):\n segment = Segment(\n model_id = self.model_id,\n chain_id = self.chain_id)\n\n segment.chain = self\n segment.model = self.model\n\n return segment", "def __init__(self, ebunch=None):\n super(BayesianModel, self).__init__()\n if ebunch:\n self.add_edges_from(ebunch)\n self.cpds = []\n self.cardinalities = self.get_cardinality()\n self.probs = dict()", "def __init__(self,\n threshold=0.6,\n subsample=1.,\n estimator=DecisionTreeClassifier(max_depth=6),\n n_folds=2,\n stratify=True,\n random_state=1,\n n_jobs=-1):\n self.threshold = threshold\n self.subsample = subsample\n self.estimator = estimator\n self.n_folds = n_folds\n self.stratify = stratify\n self.random_state = random_state\n self.n_jobs = n_jobs\n self.__Ddrifts = dict()\n self.__fitOK = False", "def copy(self):\n return Segment([p.copy() for p in self.endpoints])", "def __init__(self, slice_number: int = -1):\n super().__init__()\n self.metric = 'SEGAREA'\n self.slice_number = slice_number", "def readAssembledObjects(self):\n # get the classifier to use, if any, from the Assembler\n ## this is used to cluster the ROM segments\n self._divisionClassifier = self._assembledObjects.get('Classifier', [[None]*4])[0][3]\n self._metricClassifiers = self._assembledObjects.get('Metric', None)", "def prepare(self):\n orig_vobject_item = self._vobject_item\n self.serialize()\n self.etag\n self.uid\n self.name\n self.time_range\n self.component_name\n self._vobject_item = orig_vobject_item", "def __init__(self, **attrs):\n \n self.minifig_id = None\n self.name = None\n self.pieces = None\n \n self.img_url = None\n \n self.count = None\n \n super().__init__(**attrs)", "def __init__(self):\n self.s_sect = []", "def copy(self):\n new = self.__class__()\n do_not_copy_by_ref = {\"alleles\", \"strains\", \"base_cobra_model\", \"notes\",\n \"annotation\"}\n for attr in self.__dict__:\n if attr not in do_not_copy_by_ref:\n new.__dict__[attr] = self.__dict__[attr]\n new.notes = deepcopy(self.notes)\n new.annotation = deepcopy(self.annotation)\n\n new.alleles = DictList()\n do_not_copy_by_ref = {\"_strains\", \"_model\"}\n for allele in self.alleles:\n new_allele = allele.__class__()\n for attr, value in iteritems(allele.__dict__):\n if attr not in do_not_copy_by_ref:\n new_allele.__dict__[attr] = copy(\n value) if attr == \"formula\" else value\n new_allele._model = new\n new.alleles.append(new_allele)\n\n new.strains = DictList()\n do_not_copy_by_ref = {\"_model\", \"_alleles\", \"_base_cobra_model\"}\n for strain in self.strains:\n new_strain = strain.__class__()\n for attr, value in iteritems(strain.__dict__):\n if attr not in do_not_copy_by_ref:\n new_strain.__dict__[attr] = copy(value)\n new_strain._model = new\n new.strains.append(new_strain)\n # update awareness\n for allele, stoic in iteritems(strain._alleles):\n new_allele = new.alleles.get_by_id(allele.id)\n new_strain._alleles[new_allele] = stoic\n new_allele._strain.add(new_strain)\n # it doesn't make sense to retain the context of a copied model so\n # assign a new empty context\n new._contexts = list()", "def __init__(self):\n self.c_sect = []", "def __init__(self,dataframe,splitRatio,classLabel=\"classLabel\"):\n\t\tself.data = dataframe\n\t\tprint('Class feature for classification: '+classLabel)\n\t\tself.classLabel = classLabel\n\t\tself.splitRatio = splitRatio\n\n\t\t#Split data into training and testing dataframes\n\t\ttemp = self.data.loc[np.random.permutation(self.data.index)] \n\t\tsplitIndex = int(self.data.shape[0]*self.splitRatio)\n\t\tprint('Split {0} rows into train={1} and test={2} rows').format(len(self.data),\n\t\t\t\t\t\t\t\t\t splitIndex, len(self.data)-splitIndex)\n\t\tself.training = temp[0:splitIndex]\n\t\tself.testing = temp[splitIndex::]\n\n\t\t#Separate Training data by class\n\t\tself.separated = self.training.groupby(self.classLabel)\n\n\t\t#Compute first and second moments per class label for Training Data\n\t\tself.trainingMeans = self.separated.mean()\n\t\tself.trainingStds = self.separated.std()\n\n\t\t#Compute priors\n\t\tself.priors = self.training[self.classLabel].value_counts(True)", "def __init__(self, scaled_signal):\n self.scaled_signal = None\n self.raw_signal = None\n self._add_scaled_signal(scaled_signal)\n self.signal_length = len(self.scaled_signal)\n self.minus_strand = None\n # label can be used for neural network training with all signal continuously labelled\n self.label = defaultdict()\n # predictions can have multiple labels for different sections of current\n self.prediction = defaultdict()\n # guides are sections that we are confident in (guide alignments)\n self.guide = defaultdict()", "def __load_segments(self):\r\n self.__segments = []\r\n if len(self.points) > 1:\r\n s = self.points[0]\r\n k = 1\r\n while k < len(self.points):\r\n e = self.points[k]\r\n self.__segments.append(Segment(s, e))\r\n s = e \r\n k += 1\r\n e = self.points[0]\r\n self.__segments.append(Segment(s, e))", "def setAttributesfromConfigParser(cp, obj=None):\n\n if obj == None:\n class Object(object):\n pass\n obj = Object()\n for s in cp.sections():\n obj.__dict__[s] = cp.items(s)\n for f in cp.items(s):\n try: val=int(f[1])\n except: val=f[1]\n obj.__dict__[f[0]] = val\n return obj", "def process(self):\n self.output_info = self.attributes.copy()", "def __init__(self):\n self.X = None\n self.Y = None\n self.features = None\n self.max = self.min = None\n self._look_up = None\n self.attr_weight = None", "def construct_segments(self):\n for strand in self.strand_list:\n strand.construct_segment()", "def fetch(self, segment):\n pass", "def __init__(self):\n self.size_joint_feature = None", "def __copy__(self):\n # prepare unnamed arguments\n args = [getattr(self, arg) for arg in self._copy_conf['args']]\n\n # prepare named arguments\n kwargs = {}\n for arg in self._copy_conf['kwargs']:\n # if arg is a tuple, the first entry will be the named kwargs, and\n # the second will be the name of the attribute to copy\n name = arg\n if isinstance(arg, tuple):\n name, arg = arg\n if hasattr(self, arg):\n kwargs[name] = getattr(self, arg)\n\n # create the new instance\n new_copy = self.__class__(*args, **kwargs)\n\n # then copy attributes\n for attr_name in self._copy_conf['attrs']:\n if hasattr(self, attr_name):\n setattr(new_copy, attr_name, getattr(self, attr_name))\n\n return new_copy", "def copy(self):\n model_copy = BayesianModel()\n model_copy.add_nodes_from(self.nodes())\n model_copy.add_edges_from(self.edges())\n if self.cpds:\n model_copy.add_cpds(*[cpd.copy() for cpd in self.cpds])\n return model_copy", "def _set_attributes(self, model):\n\n if model:\n self._get_dict(model)", "def new_segment(**kwargs):\n sessiontoken = kwargs['sessiontoken']\n proxy = kwargs['proxy']\n if kwargs['objectname'] is None or kwargs['gateway'] is None:\n print(\"Please specify a name for the segment, and the gateway/network.\")\n sys.exit(1)\n if kwargs['segment_type'] == \"flexible\" and kwargs['tier1_id'] is None:\n print(\"Please specify either the segment type as 'fixed' (-st fixed) OR segment type as 'flexible' as well as the ID of the Tier1 for connectivity (-t1id TIER1ID). Use pyVMC -h for additional options.\")\n sys.exit(1)\n if kwargs['segment_type'] == \"fixed\" and kwargs['tier1_id'] is not None:\n print(\"Invalid configuration - 'fixed' segments may only be connected to the default CGW. To attach to a customer Tier1, please create a 'flexible' segment.\")\n sys.exit(1)\n rt_set = [None, \"ROUTED\", \"DISCONNECTED\"]\n if kwargs['segment_type'] == \"fixed\" and kwargs['routing_type'] not in rt_set:\n print(\"Invalid configuration. For a 'fixed' segment, the routing type must be left blank or set explicitly to 'ROUTED' or 'DISCONNECTED.'\")\n sys.exit(1)\n\n segment_name = kwargs[\"objectname\"]\n gateway = kwargs['gateway']\n\n # Search for segment to determine if it already exists\n segment=search_nsx_json(proxy, sessiontoken, \"Segment\", segment_name)\n if len(segment['results']) > 0:\n print(\"The segment already appears to exist.\")\n sys.exit(1)\n\n\n # Establish baseline json payload\n json_data = {\n \"display_name\":segment_name,\n \"id\":segment_name,\n \"advanced_config\":{\"connectivity\":\"ON\"},\n \"subnets\":[\n {\n \"gateway_address\": gateway\n }\n ]\n }\n #set segment type as either \"fixed\" or \"flexible\"\n segment_type = kwargs['segment_type']\n tier1_id = kwargs['tier1_id']\n\n if segment_type == \"fixed\":\n json_data[\"connectivity_path\"] = \"/infra/tier-1s/cgw\"\n if kwargs['routing_type'] == \"DISCONNECTED\":\n json_data[\"advanced_config\"][\"connectivity\"] = \"OFF\"\n else:\n json_data[\"advanced_config\"][\"connectivity\"] = \"ON\"\n elif segment_type == \"flexible\" and tier1_id is not None:\n json_data[\"connectivity_path\"] = f'/infra/tier-1s/{tier1_id}'\n else:\n print(\"Please specify either the segment type as 'fixed' (-st fixed) OR segment type as 'flexible' as well as the ID of the Tier1 for connectivity (-t1id TIER1ID). Use pyVMC -h for additional options.\")\n if kwargs['dhcp_range'] is not None:\n json_data[\"subnets\"][0][\"dhcp_ranges\"] = [f'{kwargs[\"dhcp_range\"]}']\n if kwargs['domain_name'] is not None:\n json_data[\"domain_name\"] = kwargs[\"domain_name\"]\n\n print(json.dumps(json_data, indent = 2))\n\n status = new_segment_json(proxy, sessiontoken, segment_name, segment_type, json_data)\n if status == 200:\n print(f'The following network has been created: {segment_name}')\n vars = {\"proxy\":proxy, \"sessiontoken\":sessiontoken, \"object_type\":\"Segment\", \"object_id\":segment_name}\n search_nsx(**vars)\n else:\n print(\"The segment was not created. Please check your syntax and try again.\")\n sys.exit(1)", "def __init__(self, **attributes):\n self.set(**attributes)", "def assign_actual(segments_path, training_path):\n pass", "def _make_segment_dict(obj):\n #NOTE(jrichard) drop change in next rebase.\n return {'id': obj.id,\n NETWORK_TYPE: obj.network_type,\n PHYSICAL_NETWORK: obj.physical_network,\n SEGMENTATION_ID: obj.segmentation_id,\n NETWORK_ID: getattr(obj, 'network_id', None)}", "def consolidate_instances_all_way(self, stats, segmented_instances):\n\n img = np.zeros(segmented_instances.shape).astype(np.uint8)\n\n #get all pixel labels in the segmented_instances mask\n segment_numbers = np.unique(segmented_instances)\n\n # remove the background label\n segment_numbers=segment_numbers[segment_numbers!=0]\n\n end_points = np.empty((len(segment_numbers),),dtype=np.object_)\n end_points.fill([])\n\n for curr_segment in segment_numbers:\n idx=[]\n i=curr_segment-1\n if curr_segment!=0:\n #Show all segments of curr_segment. Only useful to view results\n img[segmented_instances== curr_segment]= 255\n #get indeces of the segments for curr_segment\n idx = np.argwhere(segmented_instances == curr_segment)\n if len(idx>0):\n end_points[i]= self._get_end_points(segmented_instances, i, \\\n stats, idx)\n # add point markers and lines connecting each end point to centroid.\n # useful only to view results\n \"\"\"for pt_num, pt in enumerate(end_points[i]):\n cv2.circle(img, (pt[0],pt[1]), 3, 100, -1)\n cv2.line(img,(pt[0],pt[1]),\\\n (stats['centroid'][i,0], stats['centroid'][i,1]),150,2)\n cv2.circle(img, (stats['centroid'][i,0], stats['centroid'][i,1]), 3, 200, -1)\"\"\"\n #self.showme(img, 'line '+str(i))\n\n # cluster segments into stem instances\n cluster_mask, clustered_instances = self._cluster_segments_all_way(segmented_instances,\\\n segment_numbers, end_points, \\\n stats)\n\n #put all instances in one layer\n if len(cluster_mask)>0:\n single_layer_cluster_mask=np.zeros(cluster_mask[0].shape)\n for i in xrange(len(cluster_mask)):\n single_layer_cluster_mask[cluster_mask[i]>0]= i+1\n\n # self.showObjects(clustered_instances);\n return single_layer_cluster_mask, clustered_instances", "def load_object(self, obj):\n\n self.poses = obj.poses\n self.selected_point = obj.selected_point\n self.calibration_changed()", "def set_attributes(self):\n s = _setter(oself=self, e1=NameError, e2=AttributeError)\n\n s('oself.coef_ = oself.model.coef_')\n s('oself.intercept_ = oself.model.intercept_')\n\n self.time_prepare = None\n s('oself.time_prepare = oself.model.time_prepare')\n self.time_upload_data = None\n s('oself.time_upload_data = oself.model.time_upload_data')\n self.time_fitonly = None\n s('oself.time_fitonly = oself.model.time_fitonly')", "def __init__(self, dset, centroid):\r\n assert isinstance(dset,a6dataset.Dataset)\r\n assert type(centroid)==list\r\n assert len(centroid)==dset.getDimension()\r\n assert a6checks.is_point(centroid)\r\n\r\n copy=[]\r\n for k in centroid:\r\n copy.append(k)\r\n self._dataset=dset\r\n self._centroid=copy\r\n self._indices=[]", "def prepareParrallelize(self,segs):\n\n angles = numpy.array([s.angle for s in segs ])\n angles[numpy.where(angles<0)] += _pi # we care about direction, not angle orientation\n clList = clusterValues(angles, 0.15, refScaleAbs='abs')\n\n for cl in clList:\n meanA = angles[list(cl)].mean()\n for i in cl:\n seg = segs[i]\n seg.newAngle = meanA if seg.angle>=0. else meanA-_pi", "def __init__(self, df, num_classes, image_size, device):\n self.maps = df['map_path'].tolist() \n self.contours = df['contourLevel'].tolist()\n self.points = df['tagged_points_path'].tolist()\n self.masks = df['tagged_path'].tolist()\n self.num_classes = num_classes\n self.image_size = image_size\n self.device = device", "def __init__(self, annoPath, classes , datasetName, setID,useDiff = True,\n convertToPerson = None, convertIdToCls = None,\n is_image_index_flattened=False, splitIndex=1):\n self._annoPath = annoPath\n self._classes = classes\n self._datasetName = datasetName\n self._setID = setID\n self.num_classes = len(classes)\n self.useDiff = useDiff\n self._classToIndex = self._create_classToIndex(classes)\n self._convertToPerson = convertToPerson\n self._convertIdToCls = convertIdToCls\n self._is_image_index_flattened = is_image_index_flattened\n self._splitIndex = 1", "def __init__(self, pointFile, labelFile):\n UnsupervisedLearning.__init__(self, pointFile, labelFile)\n self.distances = {} # caches distance computations we may need again\n self.centroids = {} # maps clusters to centroids\n self.pointcounts = {} # maps clusters to num_points\n self.labelcount = 0\n # replace this with another function to explore other\n # measures of cluster similarity\n\n self.closestClusters = self.averageLinkClosest\n self.tree = self.buildTree()", "def __init__(self, clf, **kwargs):\n\n # Is done before parents __init__ since we need\n # it for _set_retrainable called during __init__\n self.__clf = clf\n \"\"\"Store the classifier to use.\"\"\"\n\n Classifier.__init__(self, **kwargs)\n\n # adhere to slave classifier capabilities\n # TODO: unittest\n self.__tags__ = self.__tags__[:] + ['meta']\n if clf is not None:\n self.__tags__ += clf.__tags__", "def __init__(self,image_in):\r\n \r\n image = image_in.astype(\"uint8\")\r\n self.image = cv2.resize(image,(st_width, st_height),interpolation = cv2.INTER_AREA)\r\n \r\n # define contours of objects on the image\r\n ret, thresh = cv2.threshold(image,0,255,cv2.THRESH_BINARY_INV+cv2.THRESH_OTSU)\r\n contours, hierarchy = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)\r\n \r\n self.contours = contours\r\n self.hierarchy = hierarchy\r\n self.top_contours = top_level_contours(hierarchy)\r\n \r\n # define bounding shapes of objects in the image\r\n self.contours_poly, self.boundRect, self.centers, self.radius = bound_shapes(contours)\r\n self.masked_image = image\r\n self.compressed_path = (-1,-1)", "def model_load(file_name=None):\n if file_name is None :\n file_name = \"./data/_oP5_SegmentClassifier.dump\"\n else:\n pass\n\n return p5_util.object_load(file_name)", "def CopyAllocate(self, vtkDataSetAttributes, p_int, p_int_1, p_int_2):\n ...", "def copy(self):", "def __init__(self, threshold = 0.65):\n \n p = os.path.dirname(os.path.realpath(__file__)) + '/models/'\n self.face_detector = cv2.dnn.readNetFromTensorflow(p + \"opencv_face_detector_uint8.pb\",\n p + \"opencv_face_detector.pbtxt\")\n self.align_predictor = dlib.shape_predictor(p +'shape_predictor_68_face_landmarks.dat')\n self.gender_svm = joblib.load(p + 'svm_classifier.joblib')\n self.vgg_feature_extractor = VGGFace(include_top = False, input_shape = (224, 224, 3), pooling ='avg')\n self.threshold = threshold", "def __init__(self, threshold=0.5, num_points=11, *args, **kwargs):\n super(SSD_AP, self).__init__(threshold=threshold, num_points=num_points, *args, **kwargs)", "def __init__(self, obj, *args, **kwargs):\n self.obj_ = obj\n super(ArtificialRV, self).__init__(*args, **kwargs)", "def __init__(self, segments, lemma = None, case = None):\n self.segments = segments\n if isinstance(self.segments, str):\n self.segments = [Segment.new_segment(s) for s in self.segments]\n self.lemma = lemma\n self.case = case", "def __init__(self, harvest_attribs=None, copy_attribs='copy', **kwargs):\n ClassWithCollections.__init__(self, **kwargs)\n\n self.__atribs = harvest_attribs\n self.__copy_attribs = copy_attribs\n\n self._setAttribs(harvest_attribs)", "def __setAttributes(self):\n values = {\"f\":\"json\"}\n layerInfo = self._getEsriRESTJSON(self.url,values)\n #Geometry Type\n geometryType = getGeometryType(layerInfo['geometryType'])\n self.geometryType = geometryType\n #Name\n name=arcpy.ValidateTableName(layerInfo['name'])\n self.name=name\n #Spatial Reference - both the wkid and the arcpy SpatialReference object\n #in case it's in a wkt\n try:\n wkid = layerInfo['extent']['spatialReference']['wkid']\n except:\n wkid = 4326\n sr = arcpy.SpatialReference()\n sr.factoryCode = int(wkid)\n sr.create()\n self.sr = sr\n self.wkid = wkid\n #field used to update the feature class are a subset of all the fields in a feature class\n fields = layerInfo['fields']\n updateFields = []\n for field in fields:\n if (field['type'] in ['esriFieldTypeOID','esriFieldTypeGeometry','esriFieldTypeGUID'] or 'shape' in field['name'].lower() or field['name'] in self.userFields):\n pass\n else:\n updateFields.append(field)\n updateFields.insert(0, {\"name\":'Shape@', \"type\":\"esriFieldTypeGeometry\"})\n self.updateFields = updateFields\n #Max values\n if layerInfo.has_key('maxRecordCount'):\n self.maxRecordCount = int(layerInfo['maxRecordCount'])\n else:\n self.maxRecordCount = 1000", "def restore(self):\n if self.obj:\n for attrib in self.attribs:\n setattr(self.obj, attrib, getattr(self, attrib))", "def _cluster_model_bundle(self, model, model_clust_thr, identifier=None):\n thresholds = [30, 20, 15, model_clust_thr]\n model_cluster_map = qbx_and_merge(model, thresholds,\n nb_pts=self.nb_points,\n rng=self.rng,\n verbose=False)\n self.model_centroids = model_cluster_map.centroids\n len_centroids = len(self.model_centroids)\n if len_centroids > 1000:\n logging.warning('Model {0} simplified at threshod '\n '{1}mm with {2} centroids'.format(identifier,\n str(model_clust_thr),\n str(len_centroids)))", "def _initialize_attributes(self):\n height, width = self.image.shape[:2]\n\n self.confidence = (1 - self.mask).astype(float)\n self.data = np.zeros([height, width])\n\n self.working_image = np.copy(self.image)\n self.working_mask = np.copy(self.mask)", "def __init__(self, obj, **adapted_methods):\n self.obj = obj\n self.__dict__.update(adapted_methods) # 将传入的实例属性作为适配器实例的属性", "def copy(self):\n r = PredictionJobRequest()\n r.__dict__.update(self.__dict__)\n\n return r", "def copy(self):\n new = self.__class__(integration=None, data=None)\n for attribute, value in self.__dict__.items():\n if attribute in self.referenced_attributes:\n setattr(new, attribute, value)\n elif hasattr(value, 'copy'):\n setattr(new, attribute, value.copy())\n else:\n setattr(new, attribute, deepcopy(value))\n return new", "def __init__(\n self,\n cns,\n merged_vcf,\n out_dir,\n ):\n # pylint: disable=line-too-long\n self.cns = cns\n self.out_dir = os.path.join(out_dir)\n self.vcf = str(merged_vcf)\n self.segs = os.path.join(self.out_dir, \"segs.csv\")\n self.circos_out = os.path.join(self.out_dir, \"circos.png\")\n self.svs = {}\n\n if os.path.isfile(self.vcf):\n self.svs = self.get_svs()", "def __init__(self):\n self.g_sect = []", "def _update_state_from_infos(self) -> None:\n # update the keys that is the integer label_value of the SegmentInfo\n self.infos = {\n si.label_value : si for si in self.infos.values()\n }\n return None\n\n # TODO Legacy branch\n # for idx, seginfo in enumerate(self.infos.values()):\n # prefix = f'Segment{idx}_'\n # self.metadata.update(\n # seginfo.to_dict(keystyle='slicer', prefix=prefix)\n # )", "def __init__(self,DA_obj,filename,state='unknown',pool='unknown',status='unknown',element_type='unknown'):\n\t\tself.DA = DA_obj\n\t\tself.filename = filename\n\t\tself.target = \"%s/%s\" % (DA_obj.segment_path,filename)\n\t\tself.state = state\n\t\tself.pool = pool\n\t\tself.element_type = element_type\n\t\tself.mod_date = fileutil.mod_date(self.target)\n\t\tself.st_size = fileutil.st_size(self.target)", "def merge_annotation(self, other_seg):\n try:\n assert isinstance(other_seg, SFFSegmentation)\n except AssertionError:\n print_date(_encode(u\"Invalid type for other_seg: {}\".format(type(other_seg)), u'utf-8'))\n sys.exit(65)\n # global data\n self.name = other_seg.name\n self.software = other_seg.software\n self.global_external_references = other_seg.global_external_references\n self.details = other_seg.details\n # loop through segments\n for segment in self.segments:\n other_segment = other_seg.segments.get_by_id(segment.id)\n segment.biological_annotation = other_segment.biological_annotation\n segment.complexes_and_macromolecules = other_segment.complexes_and_macromolecules", "def new_segments_center_of_mass_set(self, segments_center_of_mass):\n if segments_center_of_mass.time.size != 1:\n raise IndexError(\"Segments center of mass should be from one frame only\")\n self.segments_center_of_mass = segments_center_of_mass\n\n # Remove previous actors from the scene\n for actor in self.segments_center_of_mass_actors:\n self.parent_window.ren.RemoveActor(actor)\n self.segments_center_of_mass_actors = list()\n\n # Create the geometry of a point (the coordinate) points = vtk.vtkPoints()\n for i in range(segments_center_of_mass.channel.size):\n # Create a mapper\n mapper = vtkPolyDataMapper()\n\n # Create an actor\n self.segments_center_of_mass_actors.append(vtkActor())\n self.segments_center_of_mass_actors[i].SetMapper(mapper)\n\n self.parent_window.ren.AddActor(self.segments_center_of_mass_actors[i])\n\n # Update marker position\n self.update_segments_center_of_mass(self.segments_center_of_mass)", "def copy(self):\n copyPreprocessors = []\n copyModels = []\n try:\n #package is defined here once and passed to _cloneObject.\n #When further modules are required, further imports will be necessary\n moduleObject = {\"sklearn\": importlib.import_module(\"sklearn.base\")}\n except(ImportError):\n moduleObject = None\n for preprocessor in self.preprocessors:\n copyPrep = self._cloneObject(preprocessor, moduleObject=moduleObject)\n copyPreprocessors.append(copyPrep)\n\n for model in self.models:\n copyModel = self._cloneObject(model, moduleObject=moduleObject)\n copyModels.append(copyModel)\n return Layer(models=copyModels, preprocessors=copyPreprocessors)", "def copyAttributes(fromNcVar, toNcVar):\r\n for attrName in fromNcVar.ncattrs():\r\n attrVal = getattr(fromNcVar, attrName)\r\n setattr(toNcVar, attrName, attrVal)", "def CopyData(self, p_int, vtkDataSetAttributes, p_int_1, vtkDataSetAttributes_1, p_int_2):\n ...", "def _prep_attributes(self):\n self.coeff_ = self._coeff_to_df()\n self.results_ = self._results()\n self.cv_results_ = self._cv_results()\n self.intercept_ = self.model_.intercept_\n self.params_ = self.model_.get_params()\n\n return None", "def _post_process(self, preds) -> List[Dict]:\n if isinstance(preds, tuple):\n dets = preds[0]\n segms = preds[1]\n else:\n dets = preds\n segms = [[]] * len(dets)\n\n classes = self.model.CLASSES\n if isinstance(classes, str):\n classes = (classes, )\n\n assert len(dets) == len(classes)\n assert len(segms) == len(classes)\n\n objects = []\n\n for i, (label, bboxes, masks) in enumerate(zip(classes, dets, segms)):\n\n for bbox, mask in zip_longest(bboxes, masks):\n if bbox[4] < self.bbox_thr:\n continue\n obj = {\n 'class_id': i,\n 'label': label,\n 'bbox': bbox,\n 'mask': mask,\n 'det_model_cfg': self.model.cfg\n }\n objects.append(obj)\n\n return objects", "def __init__(self,clip_list):\n self.requested_clips=clip_list", "def copyfragment(fragment0, newobj):\n\n # Copy attribute data\n for item in fragment0.attributes():\n newobj[item] = fragment0.get_attribute(item)\n # Copy tables\n for tbl in fragment0.tables():\n newobj.addtable(tbl)\n # Copy keytexts\n for i in range(0, fragment0.nkeytexts()):\n keytext = fragment0.keytext(i)\n newobj.addkeytext(keytext.name(), keytext.junk_text(), keytext.message())\n # Try to copy other attributes that fragment subclasses\n # have (such as keywords)\n try:\n for line in fragment0.keywords():\n newobj.addkeyword(line)\n except AttributeError:\n # Either the source or target doesn't support\n # keyword storage\n pass\n # Return the populated object\n return newobj", "def __init__(self, df):\n self._binarized_df = None\n self.__schema__ = 'viNet'\n self._confidence = viNetDataframeColumn.confidence.value\n self._predicted = viNetDataframeColumn.predicted.value\n self._groundtruth = viNetDataframeColumn.groundtruth.value\n\n self._validate(df)\n self._dataframe = df\n self._customer = None\n self._windfarm = None\n self._tag = 'Unknown Tag'\n self._config = 'Unknown Config'", "def updateAnnotations(self):\n self.backupDatafiles()\n print(\"Updating annotation files \", self.field(\"trainDir\"))\n listOfDataFiles = QDir(self.field(\"trainDir\")).entryList(['*.data'])\n for file in listOfDataFiles:\n # Read the annotation\n segments = Segment.SegmentList()\n newsegments = Segment.SegmentList()\n segments.parseJSON(os.path.join(self.field(\"trainDir\"), file))\n allSpSegs = np.arange(len(segments)).tolist()\n newsegments.metadata = segments.metadata\n for segix in allSpSegs:\n seg = segments[segix]\n if self.field(\"species\") not in [fil[\"species\"] for fil in seg[4]]:\n newsegments.addSegment(seg) # leave non-target segments unchanged\n else:\n for seg2 in self.segments:\n if seg2[1] == seg:\n # find the index of target sp and update call type\n seg[4][[fil[\"species\"] for fil in seg[4]].index(self.field(\"species\"))][\"calltype\"] = self.clusters[seg2[-1]]\n newsegments.addSegment(seg)\n newsegments.saveJSON(os.path.join(self.field(\"trainDir\"), file))", "def __init__(self, name, attributes, incident_edges):\n self.name = name # initialize all necessary fields\n self.attributes = attributes\n self.incident_edges = incident_edges", "def __init__(self, name, attributes, incident_edges):\n self.name = name # initialize all necessary fields\n self.attributes = attributes\n self.incident_edges = incident_edges", "def __init__(self, nfolds, instances, labels):\n self.nfolds = nfolds\n self.instances = instances\n self.labels = labels\n self.mergefolds = self.stratify()", "def copy(self, extra=None):\n java_p_map = self._to_scala_param_map(extra)\n new_java_obj = self._java_obj.copy(java_p_map)\n ret_val = MLPipeline([])\n ret_val._java_obj = new_java_obj\n return ret_val", "def __init__(self):\n # Create an empty list of features\n self.features = []\n # Create an empty list of edges\n self.constraints = []", "def __init__(self, obj, adapted_methods):\n self.obj = obj\n self.__dict__.update(adapted_methods)", "def process(self, method):\n process_dicts = []\n for d in self.data_dicts:\n dd = copy.deepcopy(d)\n for ap in self.aps:\n dd[ap] = method(d[ap])\n process_dicts.append(dict2str(dd))\n\n # print(process_dicts)\n # print(type(process_dicts[0]))\n return Dataset(process_dicts)", "def _set_attr(self):\n self.as_skeletal = self._import_as_skeleton()\n self.materials = self._import_materials()\n self.textures = self._import_textures()", "def __init__(self):\n\n self.points = None\n self.centroid_activation_frames = None\n self.noiseless_frames = None\n self.frames = None", "def __init__(self, scn_line_list):\n self.scn_line_list = scn_line_list", "def add_segment(self, segment):\n assert segment is None or isinstance(segment, Segment)\n\n self.segment = segment\n if segment is None:\n return\n\n ## reset Strand description with the description derived\n ## from the new Segment\n try:\n frag1 = segment[0]\n frag2 = segment[-1]\n except IndexError:\n return\n\n self.chain_id1 = frag1.chain_id\n self.fragment_id1 = frag1.fragment_id\n self.res_name1 = frag1.res_name\n\n self.chain_id2 = frag2.chain_id\n self.fragment_id2 = frag2.fragment_id\n self.res_name2 = frag2.res_name", "def __init__(self, **attributes):\n for key, value in attributes.items():\n setattr(self, key, value)", "def __init__(self, centroid):\n self.label = ''\n self.centroid = centroid\n self.points = []\n self.radius = 0.0 # used to draw plot\n self.neighbour = {}\n self.inter_cost = 0\n self.intra_cost = 0\n self.dm_cost = 0", "def from_extract(self, extract: List[Chain], X: np.ndarray, y: np.ndarray) -> 'SklearnModel':\n new_model = deepcopy(self)\n combined_chain = self._combine_chains(extract)\n self._model_samples, self._prediction_samples = combined_chain[\"model\"], combined_chain[\"in_sample_predictions\"]\n self._acceptance_trace = combined_chain[\"acceptance\"]\n new_model.data = self._convert_covariates_to_data(X, y)\n return new_model", "def consolidate_instances(self, stats, segmented_instances, idx_map):\n\n img = np.zeros(segmented_instances.shape).astype(np.uint8)\n\n labels = np.unique(segmented_instances)\n labels=labels[labels!=0]\n reverse_idx_map = np.zeros(len(idx_map)).astype(np.int)\n for l in labels:\n reverse_idx_map[idx_map[l]]=np.int(l)\n\n #calculate slope of line between centroids.\n # TO DO: make this more efficient.\n centroid_slopes = self._calc_centroid_slopes(segmented_instances, labels, stats, idx_map)\n seg_slopes = np.zeros(len(labels))\n #for each instance i\n for i in range(0, len(labels)):\n idx=[]\n curr_label = reverse_idx_map[i]\n if curr_label!=0:\n #Show all segments of curr_label\n img[segmented_instances== curr_label]= 255\n #calculate slope m of instance i\n idx = np.argwhere(segmented_instances == curr_label)\n if len(idx>0):\n max_y= max(idx[:,0])\n min_y= min(idx[:,0])\n x_for_max = idx[idx[:,0]==max_y, 1][0]\n x_for_min = idx[idx[:,0]==min_y, 1][0]\n if x_for_max < x_for_min:\n x1= x_for_max\n y1= max_y\n x2= x_for_min\n y2= min_y\n else:\n x1= x_for_min\n y1= min_y\n x2= x_for_max\n y2= max_y\n m = self._slope(x1,y1,x2,y2)\n seg_slopes[i]=m\n cv2.line(img,(x1, y1),(x2, y2),(0,100,0),4)\n cv2.circle(img, (stats['centroid'][i,0], stats['centroid'][i,1]), 3, (200, 0, 0), -1)\n #self.showme(img, 'line '+str(i))\n\n # cluster segments\n clusters, clustered_instances = self._cluster_segments(segmented_instances, centroid_slopes, seg_slopes, reverse_idx_map)\n #find the closest centroid to a line with slope m that starts at the instances centroid\n # self.showObjects(clustered_instances);\n return clusters, clustered_instances", "def _update_classification_mask(self, obj_class, mask_coords):\n # Remove background where object resides\n self.classification_mask[0, mask_coords[1]:mask_coords[3], mask_coords[0]:mask_coords[2]] = 0\n # Set classification mask where object resides\n self.classification_mask[obj_class, mask_coords[1]:mask_coords[3], mask_coords[0]:mask_coords[2]] = 1", "def configure_segment(**kwargs):\n sessiontoken = kwargs['sessiontoken']\n proxy = kwargs['proxy']\n segment_name = kwargs[\"objectname\"]\n # Quick search to see if the segment exists of not.\n segment=search_nsx_json(proxy, sessiontoken, \"Segment\", segment_name)\n # If the segment exists, capture the path for the API call, and the existing configuration in JSON.\n if len(segment['results']) > 0:\n json_init=segment['results'][0]\n segment_path = segment['results'][0]['path']\n else:\n print(\"The segment does not exist. Please create a segment using 'new-segment'.\")\n sys.exit(1)\n # Establish a list of keys to keep - these represent the values we are willing/able to update.\n keep_list = ['display_name', 'connectivity_path', 'advanced_config', 'resource_type', 'subnets']\n # Construct a new JSON using just the keys we want to keep\n json_data = dict([(key, val) for key, val in \n json_init.items() if key in keep_list])\n # Update the json_data with the configuration specified by the user.\n if kwargs['connectivity'] is not None:\n json_data[\"advanced_config\"][\"connectivity\"] = f'{kwargs[\"connectivity\"]}'\n if kwargs['tier1_id'] is not None:\n if segment_path == \"/infra/tier-1s/cgw\":\n print(\"This is a fixed segment - you may not alter the connectivity path. Please create a 'flexible' segment.\")\n else:\n json_data[\"connectivity_path\"] = f'/infra/tier-1s/{kwargs[\"tier1_id\"]}'\n#\n # make the call to the API\n status = configure_segment_json(proxy, sessiontoken, segment_path, json_data)\n # present results.\n if status == 200:\n print(f'The following network has been modified: {segment_name}')\n vars = {\"proxy\":proxy, \"sessiontoken\":sessiontoken, \"object_type\":\"Segment\", \"object_id\":segment_name}\n search_nsx(**vars)\n else:\n print(\"The segment was not modified. Please check your syntax and try again.\")\n sys.exit(1)", "def __init__(self, proportion, splits):\n\t\tself.proportion = proportion\n\t\tself.splits = splits", "def __copy__(self,iClass=None):\n iClass = iClass or self.__class__\n clone = iClass(GPath(self.archive))\n copier = copy.copy\n getter = object.__getattribute__\n setter = object.__setattr__\n for attr in Installer.__slots__:\n setter(clone,attr,copier(getter(self,attr)))\n return clone" ]
[ "0.5413871", "0.51940006", "0.5167523", "0.5163446", "0.5083279", "0.49784464", "0.49112073", "0.48626807", "0.48342088", "0.48338348", "0.482826", "0.48198396", "0.48187032", "0.4814801", "0.48118797", "0.4783213", "0.4774069", "0.47707164", "0.47706905", "0.47557005", "0.47368872", "0.47045633", "0.46992242", "0.46973717", "0.46813193", "0.46777764", "0.46617073", "0.46511912", "0.46497196", "0.46303564", "0.46290654", "0.46251616", "0.46094564", "0.4589829", "0.45877218", "0.45875326", "0.45799446", "0.45742327", "0.4572919", "0.4555654", "0.4551197", "0.45466352", "0.45448613", "0.45414782", "0.45374185", "0.45349512", "0.45307496", "0.45300686", "0.45232844", "0.4515142", "0.4507471", "0.4505522", "0.44912887", "0.4486622", "0.4482687", "0.44825912", "0.44822034", "0.4470457", "0.44695202", "0.4467874", "0.44596595", "0.44571865", "0.4453976", "0.44448623", "0.44423184", "0.44423127", "0.44349018", "0.44305232", "0.44303715", "0.44293463", "0.44255883", "0.4424196", "0.44212765", "0.44199222", "0.4416618", "0.44150147", "0.44048184", "0.4393619", "0.4391093", "0.4389245", "0.43852", "0.43840736", "0.43840736", "0.43837893", "0.43787858", "0.43773633", "0.43711486", "0.43694162", "0.43685913", "0.4365937", "0.43629047", "0.43628466", "0.43625364", "0.43591097", "0.4355896", "0.4353909", "0.43476975", "0.43460765", "0.43442145", "0.43437663" ]
0.47427928
20
Remove raws with countries other then 'United Kingdom' then remove Country feature.
def _feature_country_process(self): if 'Country' not in self._df_invoice_line.columns: return list_countries_keep = ['United Kingdom'] rows_before = self._df_invoice_line.shape[0] df_invoice_line_new = pd.DataFrame() for country in list_countries_keep : df_invoice_line_new = df_invoice_line_new.append(\ self._df_invoice_line[self._df_invoice_line['Country']==country]\ , ignore_index=True) self.df_invoice_line = df_invoice_line_new del(df_invoice_line_new) rows_after = self._df_invoice_line.shape[0] _print_stat_rows("Countries filtering : ",rows_before, rows_after) #------------------------------------------------------------------------- # Due to the fact only one country is used, then this feature is dropped #------------------------------------------------------------------------- list_col_to_keep = [col for col in self._df_invoice_line.columns \ if col not in 'Country'] self._df_invoice_line = self._df_invoice_line[list_col_to_keep] return
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def apply_feature_filter(self):\n self.features = set()\n for language in self.data.values():\n features_in_data = set(language.keys())\n features_to_keep = features_in_data & self.feature_filter\n self.features |= features_to_keep\n features_to_remove = features_in_data - features_to_keep\n for feat in features_to_remove:\n language.pop(feat)\n self.features = sorted(list(self.features))", "def clean_iso_country(spark, input_data):\n try:\n #read file\n df_iso_country = spark.read.option(\"header\",\"true\").csv(input_data+'wikipedia-iso-country-codes.csv')\n df = (df_iso_country.withColumnRenamed('English short name lower case','country_name') \\\n .withColumnRenamed('Alpha_2', 'country_iso2') \\\n .withColumnRenamed('Alpha_3', 'country_iso3') \\\n .withColumnRenamed('Num_code','country_num'))\n\n df_clean_iso_country = df_iso_country.drop(\"ISO_3166-2\") \\\n .select(F.col(\"Country\").alias(\"country_name\"), \\\n F.col(\"Alpha_2\").alias(\"country_iso2\"), \\\n F.col(\"Alpha_3\").alias(\"country_iso3\"), \\\n F.col(\"Num_code\").alias(\"country_num\") \\\n .cast(\"int\")) \\\n .dropDuplicates()\n print('***** Make df_clean_iso_country processing ')\n df_clean_iso_country.printSchema()\n #df_clean_iso_country.show(2)\n except Exception as e:\n print(\"Unexpected error: %s\" % e)\n else:\n return(df_clean_iso_country)", "def findCountryCode(self):\n RecordsWithCountry = []\n for state in pycountry.subdivisions:\n #print(state.name)\n for record in self.Records: \n if state.name == record.state:\n #print(state.country, record.state)\n r = RecordCountry(date=record.date,\n country=state.country.alpha_3,\n impressions=record.impressions,\n CTR=record.CTR)\n self.Records.remove(record)\n RecordsWithCountry.append(r)\n for record in self.Records: \n r = RecordCountry(date=record.date,\n country=\"XXX\",\n impressions=record.impressions,\n CTR=record.CTR)\n RecordsWithCountry.append(r)\n self.Records = RecordsWithCountry", "def clean_countries(event_db):\n event_db[\"country_edb\"] = event_db[\"country_edb\"].apply(_clean_country_str)\n event_db = my_utils.split_strings_at_comma_and_distribute_to_new_rows(event_db, 'country_edb')\n return event_db", "def clean_country(raw_country):\n #\n if raw_country[0:2]==\"l'\":\n raw_country = raw_country[2:]\n country = ''.join((c for c in unicodedata.normalize('NFD', raw_country) if unicodedata.category(c) != 'Mn'))\n country = re.sub(r\"(\\s|')\", \"-\", country) # replace space and quotes with dash\n return country", "def mask_foreign_country(column):\n codes = misc_utils.load_country_code()\n # Remove New Zealand from foreign country list\n codes = codes.drop(codes[codes['Alpha-2'] == 'nz'].index)\n # Remove texts in brackets: belgian franc (convertible) -> belgian franc\n codes['Country'] = codes['Country'].replace({r'\\(.*\\)': ''}, regex=True).str.strip()\n regex = list()\n regex.append('|'.join(r'\\s' + codes['Country'] + r'\\b'))\n # Don't use Alpha-2 and Alpha-3 since there are lots of misreplacement\n # regex.append('|'.join(r'\\s' + codes['Alpha-2'] + r'\\b'))\n # regex.append('|'.join(r'\\s' + codes['Alpha-3'] + r'\\b'))\n regex_str = '|'.join(regex)\n column = column.replace(regex_str, ' $FOREIGN_COUNTRY ', regex=True)\n return column", "def clean_data():\n datapath = Path(os.getcwd()) / \"data\"\n files = [str(file) for file in datapath.glob(\"*.csv\")]\n for file in files:\n if file.endswith(\"confirmed.csv\"):\n Confirmed = pd.read_csv(file)\n elif file.endswith(\"deaths.csv\"):\n Deaths = pd.read_csv(file)\n elif file.endswith(\"recovered.csv\"):\n Recovered = pd.read_csv(file)\n\n dataFrames = [Confirmed, Deaths, Recovered]\n countryList = list(dataFrames[0][\"Country/Region\"]) #list of valid countries\n countryList = list(dict.fromkeys(countryList))\n\n #create country population dictionary and align values with those in countryList\n countriesPop = {}\n countriesPop[\"US\"] = CountryInfo(\"USA\").population()\n countriesPop[\"Czechia\"] = CountryInfo(\"Czech Republic\").population()\n countriesPop[\"Taiwan*\"] = CountryInfo(\"Taiwan\").population()\n countriesPop[\"Korea, South\"] = CountryInfo(\"South Korea\").population()\n countriesPop[\"Eswatini\"] = CountryInfo(\"Swaziland\").population()\n countriesPop[\"Cote d'Ivoire\"] = CountryInfo(\"Ivory Coast\").population()\n\n for country in countryList:\n try:\n countriesPop[country] = CountryInfo(country).population()\n except KeyError:\n pass\n\n #remove unnecessary information from dataframes\n for count in range(len(dataFrames)):\n dataFrames[count] = dataFrames[count].drop(\"Province/State\",axis=1)\n dataFrames[count] = dataFrames[count].drop(\"Lat\",axis=1)\n dataFrames[count] = dataFrames[count].drop(\"Long\",axis=1)\n dataFrames[count] = dataFrames[count].rename(columns={\"Country/Region\": \"Country\"})\n dataFrames[count][\"Country\"] = dataFrames[count][\"Country\"].replace({\"Korea, South\": \"South Korea\"})\n dataFrames[count] = dataFrames[count].groupby(\"Country\").sum()\n\n # create per 100k capita values by dividing country data by population\n ConfirmedPC = dataFrames[0].copy()\n DeathsPC = dataFrames[1].copy()\n RecoveredPC = dataFrames[2].copy()\n countryList.append(\"South Korea\")\n\n for country in countryList:\n try:\n ConfirmedPC.loc[country] = ConfirmedPC.loc[country].divide(countriesPop[country]).multiply(100000) #confirmed cases per 100k inhabitants\n DeathsPC.loc[country] = DeathsPC.loc[country].divide(countriesPop[country]).multiply(100000) #deaths per 100k inhabitants\n RecoveredPC.loc[country] = RecoveredPC.loc[country].divide(countriesPop[country]).multiply(100000) #recovered cases per 100k inhabitants\n except KeyError:\n pass\n\n dataFrames.extend([ConfirmedPC, DeathsPC, RecoveredPC])\n\n return dataFrames, countryList", "def build_feature_filter(self):\n if self.features == [\"*\"]:\n random_iso = list(self.data.keys())[0]\n self.features = set()\n for lang_features in self.data.values():\n self.features |= set(lang_features.keys())\n self.features = list(self.features)\n if self.exclusions:\n self.features = [f for f in self.features if f not in self.exclusions]\n self.feature_filter = set(self.features)", "def discard(self):\n for f in self.featureNames:\n self.data = self.data[self.data[:,self._getFIdx(f)] != '-99999']\n return", "def clean_information(data):\n\n\t# create a list dict\n\tcountries = []\n\t\n\t# open csv file\n\twith open('input.csv') as csvfile:\n\n\t\t# read in file as dictionary\n\t\tdatareader = csv.DictReader(csvfile)\n\n\t\t# for every row in data reader\n\t\tfor row in datareader:\n\n\t\t\t# create space for a dictionary\n\t\t\tdictionary = {}\n\n\t\t\t# if value is unknown go to next country\n\t\t\tif row['Pop. Density (per sq. mi.)'] == 'unknown':\n\t\t\t\tcontinue\n\n\t\t\tif row['GDP ($ per capita) dollars'] == 'unknown':\n\t\t\t\tcontinue\n\n\t\t\t# if no value go to next country\n\t\t\tif not row['Pop. Density (per sq. mi.)']:\n\t\t\t\tcontinue\n\n\t\t\t# if no value go to next country\t\n\t\t\tif not row['Infant mortality (per 1000 births)']:\n\t\t\t\tcontinue\n\n\t\t\t# if no value go to next country\n\t\t\tif not row['GDP ($ per capita) dollars']:\n\t\t\t\tcontinue\n\n\t\t\t# find country and strip for white space\n\t\t\tdictionary['Country'] = row['Country'].rstrip()\n\n\t\t\t# get region and put it in a dictionary\n\t\t\tdictionary['Region'] = row['Region'].rstrip()\n\n\t\t\t# add population density to dictionary\n\t\t\tdictionary['Pop. Density (per sq. mi.)'] = row['Pop. Density (per sq. mi.)']\n\n\t\t\t# add infant mortality to dictionary\n\t\t\tdictionary['Infant mortality (per 1000 births)'] = row['Infant mortality (per 1000 births)']\n\n\t\t\t# add GDP per capita to dictionary and keep only numbers\n\t\t\tdictionary['GDP ($ per capita) dollars'] = row['GDP ($ per capita) dollars'].split()[0]\n\n\t\t\t# append everything to a list\n\t\t\tcountries.append(dictionary)\n\n\t\treturn countries", "def remove_redundant_regions(self):\r\n self.flanking_region.attributes.id = self._flanking_region.attributes.id\r\n self.flanking_region.attributes.parent = ''\r\n for feature in self.pcr_product:\r\n feature.attributes.id = feature.attributes.parent\r\n feature.attributes.parent = ''\r\n self._flanking_region = None\r\n self.gt_seq_region = []\r\n if self.pcr_product:\r\n snp_parent = self.pcr_product[0].attributes.id\r\n else:\r\n snp_parent = self.flanking_region.attributes.id\r\n for snp in self.snp:\r\n snp.attributes.parent = snp_parent", "def populate_countries(self):\n # For each country in population.\n for name, pop in self.population.iterrows():\n p = pop['Population']\n # Get all relevant time series based on country name.\n c = self.raw_confirmed.loc[self.raw_confirmed['Country/Region'] == name].sum(numeric_only=True)\n d = self.raw_deceased.loc[self.raw_deceased['Country/Region'] == name].sum(numeric_only=True)\n r = self.raw_recovered.loc[self.raw_recovered['Country/Region'] == name].sum(numeric_only=True)\n # Create new country object.\n self.countries.append(country.Country(name, p, c, d, r))", "def trim_features():\n pass", "def country_code_update(df):\n from pycountry import countries as ct\n new_df = country_grouping(df)\n # country names in the data set that are not fit ISO standard\n completion = pd.DataFrame(np.array([['Bolivia', 'BO'],\n ['Brunei', 'BN'],\n ['Congo (Brazzaville)', 'CG'],\n ['Congo (Kinshasa)', 'CD'],\n ['Cote d\\'Ivoire', 'CI'],\n ['Holy See', 'VA'],\n ['Iran', 'IR'],\n ['Korea, South', 'KR'],\n ['Moldova', 'MD'],\n ['Russia', 'RU'],\n ['Taiwan*', 'TW'],\n ['Tanzania', 'TZ'],\n ['US', 'US'],\n ['Venezuela', 'VE'],\n ['Vietnam', 'VN'],\n ['Syria', 'SY'],\n ['Laos', 'LA'],\n ['West Bank and Gaza', 'PS'],\n ['Kosovo', 'XK'],\n ['Burma', 'MM']\n ]),\n columns=['c_name', 'c_code']\n )\n country_code_list = []\n for country_name in new_df['Country/Region']:\n try:\n if country_name in completion['c_name'].tolist():\n # print('exception covered: ', country_name)\n country_code = completion['c_code'].loc[completion['c_name'] == country_name].item()\n # identifies the cruise ships in the data set considered as a 'country'\n elif country_name == 'Diamond Princess' or country_name == 'MS Zaandam':\n country_code = 'Cruise Ship'\n else:\n country_code = ct.get(name=country_name).alpha_2\n except KeyError:\n print('no result: ', country_name)\n country_code = 'None'\n pass\n country_code_list.append(country_code)\n # print(country_code_list)\n new_df.insert(0, \"country_code\", country_code_list, True)\n new_df = new_df.drop(columns='Country/Region')\n unknown_index = new_df[new_df['country_code'] == 'Cruise Ship'].index\n new_df.drop(unknown_index, inplace=True) # drop when country_code = 'None', most likely are Cruise ships\n # new_df.set_index(new_df['country_code'])\n return new_df", "def clean_data(df, start = 1995, stop = 2018):\n country_iso3_code = pd.read_html('https://unstats.un.org/unsd/methodology/m49/')\n country_iso3_code = country_iso3_code[0]['ISO-alpha3 code']\n df = df.loc[df.country_iso3_code.isin(country_iso3_code)]\n df = df.set_index(['indicator', 'country_iso3_code', 'country', 'year']).unstack(level = 0)\n df.columns = df.columns.get_level_values(1)\n df = df.rename(columns = {'NY.GDP.PCAP.KD.ZG': 'pc_GDP_growth',\n 'NY.GDP.PCAP.PP.CD': 'pc_GDP_PPP'})\n df = df.reset_index()\n df = df.loc[(df.year >= (start - 1)) & (df.year <= stop)]\n df = df.dropna()\n return df", "def get_China_exhubei(df) -> pandas.core.frame.DataFrame:\n return df[(df['countryCode']=='CN') & (df['province']!='Hubei Province') & ~(df['province'].isnull()) \\\n & ~(df['city'].isnull())]", "def test_country_name_not_in_countries(self):\n\t\tcountry_code = get_country_code('Venezuela, RB')\n\t\tself.assertEqual(country_code, 've')", "def remove_empty_sources(self):\n for source in [\"dxf\", \"edilizia\", \"easyroom\", \"merged\"]:\n if source in self and not self[source]:\n del self[source]", "def process_country_shapes():\n path_processed = os.path.join(\n SHAPEFILE_DIR, 'national_outline_{}.shp'.format(COUNTRY_ABBRV))\n\n single_country = None\n if not os.path.exists(path_processed):\n print('Working on national outline')\n path_raw = os.path.join(BASE_DIR, 'data', 'gadm36_levels_shp', 'gadm36_0.shp')\n countries = geopandas.read_file(path_raw)\n\n for name in countries.GID_0.unique():\n if not name == COUNTRY_ABBRV:\n continue\n\n print('Working on {}'.format(name))\n single_country = countries[countries.GID_0 == name]\n\n print('Excluding small shapes')\n single_country['geometry'] = single_country.apply(\n exclude_small_shapes,axis=1)\n\n print('Simplifying geometries')\n single_country['geometry'] = single_country.simplify(\n tolerance = 0.005, preserve_topology=True\n ).buffer(0.01).simplify(tolerance = 0.005,\n preserve_topology=True)\n\n print('Writing national outline to file')\n single_country.to_file(path_processed, driver='ESRI Shapefile')\n found = True\n break\n \n if not found:\n raise ValueError(f'country abbrv {COUNTRY_ABBRV} does not exist')\n\n else:\n single_country = geopandas.read_file(path_processed)\n\n return single_country", "def prune_features(self, verbose=False):\n # Collect all features and prune those occurring only once.\n features = defaultdict(int)\n for k in self.utterance_features:\n for f in self.utterance_features[k]:\n features[f] += 1\n\n if verbose:\n print \"Total number of features: \", len(features)\n\n self.remove_features = []\n for k in features:\n if features[k] <= 2:\n self.remove_features.append(k)\n\n if verbose:\n print \"Number of unique features: \", len(self.remove_features)\n\n self.remove_features = set(self.remove_features)\n for k in self.utterance_features:\n self.utterance_features[k].prune(self.remove_features)\n\n features = defaultdict(int)\n for k in self.utterance_features:\n for f in self.utterance_features[k]:\n features[f] += 1\n\n if verbose:\n print \"Total number of features: \", len(features)", "def remove_city(g, city_name):\n code = g.convert[city_name]\n \n for key in g.city_dict:\n \n old_flights_in = g.city_dict[key].get_flights_in()\n new_flights_in = []\n for flight in old_flights_in:\n if(flight[0] != code):\n new_flights_in.append(flight)\n \n old_flights_out = g.city_dict[key].get_flights_out()\n new_flights_out = []\n for flight in old_flights_out:\n if(flight[0] != code):\n new_flights_out.append(flight)\n \n g.city_dict[key].set_flights_in(new_flights_in)\n g.city_dict[key].set_flights_out(new_flights_out)\n \n del g.city_dict[code]\n del g.convert[city_name]\n \n return g", "def clean_and_save_country(country_name, df):\n drop_columns = ['Lat', \n 'Long', \n 'Province/State']\n\n df.drop(columns=drop_columns, inplace = True)\n df_group = df.groupby(['Country/Region'])\n\n country = df_group.get_group(country_name)\n country.drop(columns = ['Country/Region'], inplace=True)\n country = country.agg(['sum'])\n country = country.T\n country.reset_index(level=0, inplace=True)\n country['index'] = pd.to_datetime(country['index'])\n country.rename(columns={'index': 'date'}, inplace=True)\n\n country.to_csv('../data/' + country_name + '_timeseries.csv', index=False)", "def load_country_code_data():\n name_conversion = {\n 'East Timor': 'Timor-Leste',\n 'Republic of the Congo': 'Congo (Kinshasa)',\n 'Ivory Coast': 'Cote d\\'Ivoire',\n 'Macedonia': 'North Macedonia',\n 'Myanmar': 'Burma',\n 'Republic of Serbia': 'Serbia',\n 'Taiwan': 'Taiwan*',\n 'The Bahamas': 'Bahamas',\n 'United Republic of Tanzania': 'Tanzania',\n 'United States of America': 'US'\n }\n\n shapefile = os.path.join('data', 'ne_110m_admin_0_countries.shp')\n\n gdf = gpd.read_file(shapefile)[['ADMIN', 'ADM0_A3', 'geometry']]\n gdf.columns = ['country', 'country_code', 'geometry']\n\n gdf.loc[gdf['country'].isin(name_conversion.keys()), 'country'] = gdf['country'].map(name_conversion)\n\n return gdf", "def clean_and_save_worldwide(df):\n drop_columns = ['FIPS',\n 'Lat', \n 'Long_', \n 'Combined_Key', \n 'Admin2', \n 'Province_State']\n\n df.drop(columns=drop_columns, inplace=True)\n\n df_cases = df.groupby(['Country_Region'], as_index=False).sum()\n df_cases.to_csv('../data/Total_cases_worldwide.csv', index=False)", "def exclude_small_shapes(x,regionalized=False):\n # if its a single polygon, just return the polygon geometry\n if x.geometry.geom_type == 'Polygon':\n return x.geometry\n\n # if its a multipolygon, we start trying to simplify and\n # remove shapes if its too big.\n elif x.geometry.geom_type == 'MultiPolygon':\n\n if regionalized == False:\n area1 = 0.1\n area2 = 250\n\n elif regionalized == True:\n area1 = 0.01\n area2 = 50\n\n # dont remove shapes if total area is already very small\n if x.geometry.area < area1:\n return x.geometry\n # remove bigger shapes if country is really big\n\n if x['GID_0'] in ['CHL','IDN']:\n threshold = 0.01\n elif x['GID_0'] in ['RUS','GRL','CAN','USA']:\n if regionalized == True:\n threshold = 0.01\n else:\n threshold = 0.01\n\n elif x.geometry.area > area2:\n threshold = 0.1\n else:\n threshold = 0.001\n\n # save remaining polygons as new multipolygon for the\n # specific country\n new_geom = []\n for y in x.geometry:\n if y.area > threshold:\n new_geom.append(y)\n\n return MultiPolygon(new_geom)", "def getUniqCountries(df):\n # getting a unique list of the countries\n countries_uniq_list = df.geo_country.unique().tolist()\n # delete the \"nan\"\n del countries_uniq_list[0]\n # sort list alphabatically\n return sorted(countries_uniq_list)", "def removeHindiNames(data):\n\thi = r\"\\w+:hi$\"\n\tfor each in data:\n\t\tk = list(filter(lambda e: re.findall(hi, e) != [], each['k']))\n\t\tif k:\n\t\t\tfor e in k:\n\t\t\t\tindex = each['k'].index(e)\n\t\t\t\teach['k'].pop(index)\n\t\t\t\teach['v'].pop(index)\n\t\tyield each", "def test_countries():\n test_path = tempfile.mkdtemp()\n x_train, metadata = countries(test_path)\n try:\n assert x_train.shape == (288, 3)\n except:\n shutil.rmtree(test_path)\n raise()", "def data_clean(countryfile, folder):\n with open(r\"C:\\Users\\User\\Documents\\LUCAS2015_spectra\\LUCAS2015_Soil_Spectra_EU28\\spectra_ \" + countryfile + \" .csv\") as f:\n # maakt csv reader aan\n reader = csv.reader(f)\n # Open\n with open(folder + r\"\\spectra_ \" + countryfile + \" .csv\", 'w', newline='') as file:\n writer = csv.writer(file)\n for c, row in enumerate(reader):\n if c == 0:\n writer.writerow(row[:5] + row[205:-200:2])\n else:\n x = np.array(row[205:-200:2], dtype='float64')\n reflectance = 10 ** (-x)\n writer.writerow(row[:5] + list(reflectance))", "def removeLanguage(language):", "def load_random_countries(data):\n country_codes = list(set([elem['country'] for elem in data]))\n cc_objects = [CountryCode(data=country_code)\n for country_code\n in country_codes]\n CountryCode.objects.bulk_create(cc_objects)", "def updateCountryNames(self):\n try:\n with open('countryNameMapping.json', 'r') as file:\n name_mapping = json.loads(file.read())\n except:\n sys.exit('countryNameMapping.json file is unavailable in current directory.')\n \n for key, value in name_mapping.items():\n self.covid_df.replace(key, value, inplace=True)\n \n try:\n with open('countryNameISO2.json', 'r') as file:\n self.name_iso2_mapping = json.loads(file.read())\n except:\n print('countryNameISO2.json file is unavailable in current directory, creating file...')\n self.writeCountryCodeFile()\n print('Re-importing required JSONs...')\n self.updateCountryNames()", "def cleanup_regions(self, timestamp, bid, ofr):\n regions = []\n\n for region in self.regions:\n if not region.can_delete(timestamp, bid, ofr):\n regions.append(region)\n\n # replace the regions list\n self.regions = regions", "def apply_language_filter(self):\n all_langs = self.data.keys()\n langs_to_remove = [l for l in all_langs if not self.config.filter_language(l)]\n for lang in langs_to_remove:\n self.data.pop(lang)\n # Make sure we've not removed all languages\n if not self.data.keys():\n raise ValueError(\"Language filters leave nothing in the dataset for model '%s'!\" % self.name)\n # Keep a sorted list so that the order of things in XML is deterministic\n self.languages = sorted(list(self.data.keys()))", "def remove_feature(self, name):\n logging.info('removing feature %s' % name)\n self.fguide.remove(name)\n self.dataset.pop(name)", "def binarize_production_countries(df):\n df['production_countries'] = df['production_countries'].apply(lambda x: get_literal_eval(x))\n countries = {\n 'United States of America': 'usa',\n 'United Kingdom': 'uk',\n 'France': 'france'\n }\n\n for country, short in countries.iteritems():\n df['prod_{}'.format(short)] = df['production_countries'].apply(lambda x: 1 if country in x else 0)\n\n def check_other(prod_countries):\n for c in prod_countries:\n if c not in countries:\n return 1\n return 0\n\n df['prod_other'] = df['production_countries'].apply(check_other)\n\n return df", "def filter_market_country(self, bdaqmark):\n\n if self._COUNTRIES == 'UKIRE':\n nm = bdaqmark.name.split('|')[2]\n if (nm == 'UK Racing') or (nm == 'Irish Racing'):\n return True\n else:\n return False\n elif self._COUNTRIES == 'ALL':\n return True\n else:\n raise InternalError, 'countries must be \\'UKIRE\\' or \\'ALL\\''", "def clear_features(self):\n self.features_group_list = []", "def remove_other_elements(data):\n charset = ['F','l','B','r','I','i','M','g','L','b','a','e','K','V','d','R','Z','G','A','Y','u']\n x = []\n for i in range(data.shape[0]):\n for j in range(len(data.iloc[i,1])):\n if data.iloc[i,1][j] in charset:\n x.append(i)\n break\n df = data[(True^data['Index'].isin(x))]\n df.reset_index(drop=True, inplace=True)\n return df", "def prune_features(self):\r\n for i, features in enumerate(self.curr_features):\r\n # Continue if the number of features in this grid does\r\n # not exceed the upper bound.\r\n if len(features) <= self.config.grid_max_feature_num:\r\n continue\r\n self.curr_features[i] = sorted(features, key=lambda x:x.lifetime, \r\n reverse=True)[:self.config.grid_max_feature_num]", "def clean_data(inputFile, cutoff=0.95):\r\n ISOcodes = {'sk': 0, 'fr': 1, 'es': 2, 'de': 3, 'pl': 4}\r\n\r\n df = pd.read_csv(inputFile, encoding=\"utf8\")\r\n df['text'].replace('', np.nan, inplace=True)\r\n df.dropna(subset=['text'], inplace=True)\r\n total = len(df)\r\n englishCount, misclassifiedCount, count = 0, 0, 0\r\n hitList = []\r\n startTime = time()\r\n for line in df.iterrows():\r\n label = line[1][\"label\"]\r\n text = line[1][\"text\"]\r\n try:\r\n detectedLanguage = detect_langs(text)\r\n language = str(detectedLanguage[0]).split(\":\")\r\n if language[0] == 'en':\r\n if float(language[1]) > cutoff:\r\n englishCount += 1\r\n hitList.append(count)\r\n elif label != ISOcodes[language[0]]:\r\n if float(language[1]) > cutoff:\r\n misclassifiedCount += 1\r\n hitList.append(count)\r\n except:\r\n pass\r\n\r\n count += 1\r\n if count % 1000 == 0:\r\n percentComplete = count*100/total\r\n now = time()\r\n timeLeft = (1 - count/total)*((now-startTime)/60)/(count/total)\r\n timeLeft = str(round(timeLeft, 2)).split(\".\")\r\n minutes = timeLeft[0]\r\n seconds = (float(timeLeft[1])/100)*60\r\n print(\"Percent Complete: {}%\".format(round(percentComplete, 2)))\r\n print(\"Time Left: {}:{:02d}\".format(minutes, int(seconds)))\r\n df.drop(df.index[hitList], inplace=True)\r\n\r\n now = time()\r\n print(\"Number of English examples removed: {}\".format(englishCount))\r\n print(\"Number of misclassified examples removed: {}\".format(misclassifiedCount))\r\n print(\"Number of rows originally in dataframe: {}\".format(total))\r\n print(\"Percent of training examples classified as English: {}%\".format(round(englishCount*100/total, 2)))\r\n print(\"Percent of training examples classified as incorrect: {}%\".format(round(misclassifiedCount*100/total, 2)))\r\n print(\"New dataframe length: {}\".format(len(df)))\r\n print(\"Actual time taken in minutes: {}\".format((now-startTime)/60))\r\n\r\n return df", "def _derive_country_GB(place):\n _LOGGER.debug(\"derive_country_gb: %r\", place)\n alt = _GB_SUPPORT[\"alternative_names\"]\n try:\n derived = alt[place.name.lower()]\n except KeyError:\n derived = []\n return [DerivedName(text, \"en\") for text in derived]", "def nativity_race_with_countries(row): \n if row['nativity'] != 'United States':\n return row['nativity']\n else:\n return row['race']", "def get_country(self, data: dict):\n country_entries = data.get(\"P27\")\n if country_entries is None or len(country_entries) == 0:\n country_entries = data.get(\"P19\")\n if country_entries is None or len(country_entries) == 0:\n return [{\"country\": \"Unknown\", \"region\": \"Unknown\"}]\n countries = []\n for entry in country_entries:\n country = entry.get(\"mainsnak\").get(\"datavalue\").get(\"value\").get(\"id\")\n countries.append(self._reference.get_country(country))\n return countries", "def filter_region_graph(data, region):\r\n MetaDct = data[1]\r\n f_MetaDct = {}\r\n for idx in MetaDct:\r\n if idx != ',':\r\n if MetaDct[idx].region == region:\r\n f_MetaDct[idx] = MetaDct[idx].country\r\n return f_MetaDct", "def prune_gbm_features(schema: Dict):\n gbm_feature_types = ['binary', 'category', 'number']\n pruned_all_of = []\n for cond in schema['items']['allOf']:\n if_type = cond['if']['properties']['type']['const']\n if if_type in gbm_feature_types:\n pruned_all_of += [cond]\n schema['items']['allOf'] = pruned_all_of", "def get_countryes(db_name=_db_indicators, country_txt_file=os.path.join('Source', 'work_countries.txt')):\n imf = cmm.READ_DB(db_name=None)\n country_list = cmm.read_countries(file_name=country_txt_file)\n print('CREATE IMF: reading countries from all neede datasets...', end=' ')\n coni = sa.create_engine('sqlite+pysqlite:///{db_name}'.format(db_name=db_name))\n dbSETS=pd.read_sql('SELECT DISTINCT Dataset from {INDI_NAME}'.format(INDI_NAME=cmm.strINDI_db_name), con=coni)\n\n cntrl=list()\n\n for k, d in dbSETS.iterrows():\n try:\n cntrl.append(pd.DataFrame(imf.get_datastructure_list(d['Dataset'])['Geographical Areas']).set_index('CL_AREA_{}'.format(d['Dataset'])))\n except KeyError:\n pass\n\n # pdfC = pd.concat([pd.DataFrame(imf.get_datastructure_list(d['Dataset'])['Geographical Areas']).set_index('CL_AREA_{}'.format(d['Dataset'])) for k, d in dbSETS.iterrows() ])\n pdfC = pd.concat(cntrl)\n\n pdfC=pdfC[pdfC.index.isin(country_list)]\n pdfC = pdfC[~pdfC.index.duplicated()]\n pdfC.index.name='id'\n pdfC=pdfC.rename(columns={'Geographical Areas':'Country'})\n print('done reading countries', end='\\n')\n return pdfC\n\n\n #print(dbSETS)", "def countries_from_iso_list(countriesset):\n countries = list()\n for countryiso in sorted(list(countriesset)):\n if countryiso == WORLD:\n countries.append({\"iso3\": WORLD, \"name\": \"World\"})\n else:\n countryname = Country.get_country_name_from_iso3(countryiso)\n if countryname is None:\n continue\n countries.append({\"iso3\": countryiso, \"name\": countryname})\n return countries", "def test_remove_from_whitelist(self):\n\n self.feature_test.add_to_whitelist(3)\n self.feature_test.remove_from_whitelist(3)\n self.assertFalse(3 in Feature(\"testing\").whitelist)", "def g_mob_preproc(g_mob: pd.DataFrame) -> pd.DataFrame:\n\n # Assert sub_region_2 exists\n assert (\n \"sub_region_2\" in g_mob.columns\n ), \"sub_region_2 no longer in google mobility data. Check renamed/redefined columns\"\n\n # Filter out county and country level aggregations\n g_mob = g_mob[g_mob[\"sub_region_2\"].isna()]\n g_mob.drop([\"census_fips_code\", \"sub_region_2\"], axis=1, inplace=True)\n\n return g_mob", "def remove_feature(self, name):\n logging.info('removing feature %s' % name)\n self.fguide.remove(name)\n self.train.pop(name)\n self.test.pop(name)", "def get_countries_geo_df() -> geopandas.GeoDataFrame:\n\n geo_df: geopandas.GeoDataFrame = geopandas.read_file(\n GEO_DATA_DIR / \"ne_110m_admin_0_map_units\" / \"ne_110m_admin_0_map_units.shp\"\n )\n\n geo_df = geo_df.rename(columns={\"ADMIN\": CODE}, errors=\"raise\")\n\n # Keys are what's in the geo df, values are what we want to rename them to\n # Values must match the names in the original data source. If you don't like those\n # names, change them there and then come back and change the values here.\n geo_df[CODE] = (\n geo_df[CODE]\n .map(\n {\n \"Central African Republic\": \"Central African Rep.\",\n \"Democratic Republic of the Congo\": \"Dem. Rep. Congo\",\n \"Equatorial Guinea\": \"Eq. Guinea\",\n \"eSwatini\": \"Eswatini\",\n \"Georgia (Country)\": \"Georgia\",\n \"Republic of Serbia\": \"Serbia\",\n \"United Arab Emirates\": \"UAE\",\n \"United Kingdom\": \"Britain\",\n \"United Republic of Tanzania\": \"Tanzania\",\n \"Western Sahara\": \"W. Sahara\",\n \"United States of America\": \"United States\",\n }\n )\n .fillna(geo_df[CODE])\n )\n geo_df = geo_df[geo_df[CODE] != \"Antarctica\"]\n\n colonial_power_main_countries = {\n \"Britain\": \"England\",\n \"France\": \"France, Metropolitan\",\n \"Norway\": \"Norway\",\n \"Papua New Guinea\": \"Papua New Guinea\",\n }\n\n is_main_country_idx = geo_df[CODE].map(colonial_power_main_countries).isna() | (\n geo_df[\"NAME_SORT\"] == geo_df[CODE].map(colonial_power_main_countries)\n )\n\n geo_df[CODE] = geo_df[CODE].where(\n is_main_country_idx, geo_df[CODE].str.cat(geo_df[\"NAME_SORT\"], sep=\" - \"),\n )\n geo_df[\"name\"] = geo_df[CODE]\n\n geo_df = geo_df[\n [\n \"featurecla\",\n \"scalerank\",\n \"LABELRANK\",\n # \"SOVEREIGNT\",\n # \"SOV_A3\",\n # \"ADM0_DIF\",\n \"LEVEL\",\n # \"TYPE\",\n CODE,\n \"name\",\n # \"ADM0_A3\",\n # \"GEOU_DIF\",\n # \"GEOUNIT\",\n # \"GU_A3\",\n # \"SU_DIF\",\n # \"SUBUNIT\",\n # \"SU_A3\",\n # \"BRK_DIFF\",\n # \"NAME\",\n # \"NAME_LONG\",\n # \"BRK_A3\",\n # \"BRK_NAME\",\n # \"BRK_GROUP\",\n \"ABBREV\",\n # \"POSTAL\",\n # \"FORMAL_EN\",\n # \"FORMAL_FR\",\n # \"NAME_CIAWF\",\n # \"NOTE_ADM0\",\n # \"NOTE_BRK\",\n \"NAME_SORT\",\n # \"NAME_ALT\",\n # \"MAPCOLOR7\",\n # \"MAPCOLOR8\",\n # \"MAPCOLOR9\",\n # \"MAPCOLOR13\",\n # \"POP_EST\",\n # \"POP_RANK\",\n # \"GDP_MD_EST\",\n # \"POP_YEAR\",\n # \"LASTCENSUS\",\n # \"GDP_YEAR\",\n \"ECONOMY\",\n \"INCOME_GRP\",\n # \"WIKIPEDIA\",\n # \"FIPS_10_\",\n # \"ISO_A2\",\n # \"ISO_A3\",\n # \"ISO_A3_EH\",\n # \"ISO_N3\",\n # \"UN_A3\",\n # \"WB_A2\",\n # \"WB_A3\",\n # \"WOE_ID\",\n # \"WOE_ID_EH\",\n # \"WOE_NOTE\",\n # \"ADM0_A3_IS\",\n # \"ADM0_A3_US\",\n # \"ADM0_A3_UN\",\n # \"ADM0_A3_WB\",\n \"CONTINENT\",\n \"REGION_UN\",\n \"SUBREGION\",\n \"REGION_WB\",\n # \"NAME_LEN\",\n # \"LONG_LEN\",\n # \"ABBREV_LEN\",\n # \"TINY\",\n # \"HOMEPART\",\n # \"MIN_ZOOM\",\n # \"MIN_LABEL\",\n # \"MAX_LABEL\",\n # \"NE_ID\",\n # \"WIKIDATAID\",\n # \"NAME_AR\",\n # \"NAME_BN\",\n # \"NAME_DE\",\n # \"NAME_EN\",\n # \"NAME_ES\",\n # \"NAME_FR\",\n # \"NAME_EL\",\n # \"NAME_HI\",\n # \"NAME_HU\",\n # \"NAME_ID\",\n # \"NAME_IT\",\n # \"NAME_JA\",\n # \"NAME_KO\",\n # \"NAME_NL\",\n # \"NAME_PL\",\n # \"NAME_PT\",\n # \"NAME_RU\",\n # \"NAME_SV\",\n # \"NAME_TR\",\n # \"NAME_VI\",\n # \"NAME_ZH\",\n \"geometry\",\n ]\n ]\n\n return geo_df", "def _clean_feature(feature: schema_pb2.Feature) -> schema_pb2.Feature:\n copy = schema_pb2.Feature()\n copy.CopyFrom(feature)\n copy.ClearField(\"name\")\n if copy.HasField(\"struct_domain\"):\n del copy.struct_domain.feature[:]\n return copy", "def cleanFeatures(merged, featureType):\n minimumInningsPitched = 10\n minimumAtBats = 10\n merged = merged[(merged['ip'] > minimumInningsPitched) | (merged['ab_y'] > minimumAtBats)]\n\n if featureType == 'historical':\n merged['is_winner'] = merged['Winner'].apply(lambda x: 0 if isinstance(x, float) else 1)\n modelData = merged[['h_x', 'bb_x', 'so_x', 'er', 'sv', 'w', 'ip', 'h_y', 'hr_y',\n 'rbi', 'bb_y', 'so_y', 'ab_y', 'is_winner']]\n elif featureType == 'projected':\n modelData = merged[['player_id', 'league_y', 'h_x', 'bb_x', 'so_x', 'er', 'sv', 'w', 'ip', 'h_y', 'hr_y',\n 'rbi', 'bb_y', 'so_y', 'ab_y']]\n modelData.rename(columns={\n 'league_y': 'league'\n }, inplace=True)\n\n modelData.is_copy = False\n modelData = modelData.replace([np.inf, -np.inf], np.nan).fillna(0)\n\n return modelData.drop_duplicates()", "def remove_features(sets_x, unused_features):\n\n # initiate empty list for return variable\n significant_x = [] \n\n # iterate through subsets and their corresponding insignificant features\n for x, features in zip(sets_x, unused_features):\n # remove features from subset and store the result into list\n significant_x.append(np.delete(x,features,1))\n \n return significant_x", "def clean_wrong_counties(sparkdf):\n\tcounties=['burgos','ávila','león','segovia','palencia','zamora','soria','salamanca','valladolid']\n\n\t# First we replace common errors (avila -> ávila, leon -> león)\n\tsparkdf=sparkdf.withColumn('county',lower(col('county'))).withColumn('county', regexp_replace('county', 'avila', 'ávila')).withColumn('county', regexp_replace('county', 'leon', 'león'))\n\n\treturn sparkdf.filter(col('county').isin(counties) == True)", "def test_unknown_countries(self):\n # Currently, there are no Countries or Regions\n self.assertEqual(Country.objects.count(), 0)\n self.assertEqual(Region.objects.count(), 0)\n\n # Call the command with countries that are not recognized by the iso3166 library\n self.call_command(filename='power_plant_import/tests/data/unknown_countries.csv')\n\n # No Countries or Regions were created during the test\n self.assertEqual(Country.objects.count(), 0)\n self.assertEqual(Region.objects.count(), 0)", "def clean(self):\n self.df = _data.prune(self.df, [REGEX_PATTERN_GCI, REGEX_PATTERN_DB_ID])\n self.df, _ = _data.remove_totally_failed_tests(self.df)\n self.is_cleaned = True", "def country(alpha_2_code: str) -> None:", "def _drop_features(self, X, drop_features):\n self.drop_features = drop_features\n if len(self.drop_features) != 0:\n cfp = ComprehensiveFCParameters()\n df2 = []\n for df in self.drop_features:\n if df in X.columns:\n df2.append(df) # exact match\n else:\n if df in cfp.keys() or df in ['fft_coefficient_hann']:\n df = '*__{:s}__*'.format(df) # feature calculator\n # wildcard match\n df2 += [col for col in X.columns if fnmatch(col, df)] \n X = X.drop(columns=df2)\n return X", "def removeLegacy(self, path=None):\n\n df = pd.read_csv(path, compression='gzip')\n print(df.shape)\n gamelist = pd.read_csv('Resources/Genres.csv.gz', usecols=['appid'])\n gamelist = pd.DataFrame(gamelist.appid.unique(), columns=['appid'])\n print(gamelist)\n filter_df = pd.merge(df, gamelist, on='appid', how='inner')\n filter_df = filter_df.dropna()\n filter_df = filter_df.sort_values(['steamid', 'appid'], ascending=[True, True])\n print('done')\n print(filter_df.shape)\n print(filter_df)\n print(np.setdiff1d(df['appid'].unique(), filter_df['appid'].unique()))\n filter_df.to_csv(path, compression='gzip', columns=['steamid', 'appid', 'rating'], index=None)", "def test_default_country_unset(self):\n response = self.client.get(reverse(\"billing_info\"))\n self.assertEqual(response.status_code, 200)\n self.assertContains(\n response, '<option value=\"\" selected>---------</option>', html=True\n )", "def load_all_countries(self):\n core = self.core\n regionNodes = core.load_children(self.META[\"Countries\"])\n countryNodes = []\n if regionNodes:\n for regionNode in regionNodes:\n if core.get_base_type(regionNode) == self.META[\"Region\"]:\n countryNodes += core.load_children(regionNode)\n return countryNodes\n else:\n print(\"There are no regions in the database\")", "def remove_rows_with_non_english_movies(df):\n df = df[df['original_language'] == 'en']\n return df", "def remove_unwanted_features(self):\n\n bad_feats = []\n for f in self.features:\n\n # Exclude features with no data\n if self.valuecounts[f] == 0:\n self.messages.append(\"\"\"[INFO] Model \"%s\": Feature %s excluded because there are no datapoints for selected languages.\"\"\" % (self.name, f))\n bad_feats.append(f)\n continue\n\n # Exclude features with lots of missing data\n missing_ratio = self.missing_ratios[f]\n if int(100*(1.0-missing_ratio)) < self.minimum_data:\n self.messages.append(\"\"\"[INFO] Model \"%s\": Feature %s excluded because of excessive missing data (%d%%).\"\"\" % (self.name, f, int(missing_ratio*100)))\n bad_feats.append(f)\n continue\n\n # Exclude constant features\n if self.valuecounts[f] == 1:\n if self.remove_constant_features:\n self.constant_feature_removed = True\n self.messages.append(\"\"\"[INFO] Model \"%s\": Feature %s excluded because its value is constant across selected languages. Set \"remove_constant_features=False\" in config to stop this.\"\"\" % (self.name, f))\n bad_feats.append(f)\n continue\n else:\n self.constant_feature = True\n\n for bad in bad_feats:\n self.features.remove(bad)\n for lang in self.languages:\n if bad in self.data[lang]:\n self.data[lang].pop(bad)\n\n # Make sure there's something left\n if not self.features:\n raise ValueError(\"No features specified for model %s!\" % self.name)\n self.features.sort()\n self.messages.append(\"\"\"[INFO] Model \"%s\": Using %d features from data source %s\"\"\" % (self.name, len(self.features), self.data_filename))\n if self.constant_feature and self.rate_variation:\n self.messages.append(\"\"\"[WARNING] Model \"%s\": Rate variation enabled with constant features retained in data. This *may* skew rate estimates for non-constant features.\"\"\" % self.name)", "def clean_data(data):\n data.dropna(inplace=True)\n for feature in data:\n if ((feature != 'lat') and (feature != 'long') and (feature != 'date')):\n data.drop(data[(data[feature] < 0)].index, inplace=True)\n data.drop(data[(data['price'] == 0)].index, inplace=True)\n data.drop(data[(data['bedrooms'] == 0) & (data['bathrooms'] == 0.0)].index, inplace=True)\n return data", "def removeAutoSaveFilter(filter):", "def cleanup_callback(self):\n\n # Remove from include\n ghtin = self.idf.output_directory / \"GHTIn.idf\"\n if ghtin.exists():\n try:\n self.idf.include.remove(ghtin)\n ghtin.remove()\n except ValueError:\n log(\"nothing to remove\", lg.DEBUG)", "def target_extract(path, country, lat_col, lon_col, crs='EPSG:4326'):\n # Read input from path\n df = pd.read_table(path, sep=None, engine='python')\n\n # Create GeoDataFrame with geometry\n gdf = gpd.GeoDataFrame(df, geometry=gpd.points_from_xy(\n df[lon_col], df[lat_col]), crs=crs)\n\n # Get and read the country boundaries\n world = gpd.read_file(shpreader.natural_earth(resolution='10m',\n category='cultural',\n name='admin_0_countries')\n )\n\n country_geom = world[world['ADMIN'] == country.capitalize()].geometry\n country_geom.crs = 'EPSG:4326'\n\n # Clip to records within country\n subset = gpd.clip(gdf, country_geom).reset_index(drop=True)\n # subset = gdf.cx[country_geom]\n\n return subset", "def clean(img):\n\n label_img = label(img, connectivity=2)\n props = sorted(regionprops(label_img), key=lambda x: x.area)\n clean = morphology.binary_closing(img)\n\n clean = morphology.remove_small_holes(clean)\n return morphology.remove_small_objects(clean,\n int(np.floor(props[-1].area) / 10), connectivity=2)", "def remove_regions_from_codes(self, regions_to_remove):\n reduced_regions = []\n reduced_regions_indx = []\n for indx, r in enumerate(self.Rs):\n if r in regions_to_remove:\n pass\n else:\n reduced_regions_indx.append(indx)\n reduced_regions.append(r)\n\n self.Rs = reduced_regions\n _, nCMs, nDs = self.ActiveCMs.shape\n self.reduce_regions_from_index(reduced_regions_indx)", "def clean():\n filter_phase_data()\n combine_phase_data()\n remove_duplicates_phase_data()", "def countryAndTowns(countryName):\n try:\n country = db.Country.selectBy(name=countryName).getOne()\n except SQLObjectNotFound as e:\n raise type(e)(\n \"Country `{}` could not be found in the database.\".format(countryName)\n )\n\n woeidList = [x.woeid for x in country.hasTowns]\n woeidList.append(country.woeid)\n\n return woeidList", "def filter_out_geoserver_layers_that_are_registered(all_gs_layers, registered_geoserver_layer_names):\n\tdangling_gs_layers = []\n\tfor layer in all_gs_layers:\n\t\tif layer.name not in registered_geoserver_layer_names:\n\t\t\tdangling_gs_layers.append(layer)\n\n\treturn dangling_gs_layers", "def _remove_unconfirmed_transactions(frame):\n\n frame.drop(frame.loc[frame['posted'] == False].index, inplace=True)\n return frame", "def strip_ds(ds):\n if 'brain' in np.unique(ds.sa.all_ROIs):\n ds = ds[(ds.sa.all_ROIs != 'brain'), :]\n print('excluded the rest of the brain from the dataset')\n if 'overlap' in np.unique(ds.sa.all_ROIs):\n ds = ds[(ds.sa.all_ROIs != 'overlap'), :]\n print('excluded overlap from the dataset')\n return ds", "def __extract_series_per_country(self, countries):\n frames = []\n for country in countries:\n country_series = self.series.loc[self.series[COUNTRY_LABEL] == country]\n frames.append(country_series)\n df = pd.concat(frames)\n mask = [False, True, False, False]\n mask = mask + [True for i in range(4, len(list(df)))]\n return df.iloc[:,mask]", "def filter_by_country(df, cont_list):\n return df.filter((df.country.isin(cont_list)))", "def remove_unk_oov(self):\n unk_oov_nodes = []\n for node in self.nodes:\n if node.sym in [UNK, OOV]:\n unk_oov_nodes.append(node)\n self.remove_nodes(unk_oov_nodes)", "def remove_feature(self,colName):\n if isinstance(self.time_series_data,Time_Series_Data_Collection):\n for i in self.time_series_data:\n self.time_series_data[i].remove(colName)\n return self\n self.time_series_data.remove(colName)\n return self", "def remove_low_locality_tags(locality, tags_list):\n LOCALITY_THRESHOLD = 1 # Locality of 'newyorkcity' is 0.057\n tags_to_remove = []\n for tag in tags_list:\n if tag not in locality:\n tags_to_remove.append(tag)\n else:\n locality_score = locality[tag]\n if locality_score[0] < LOCALITY_THRESHOLD:\n tags_to_remove.append(tag)\n for tag in tags_to_remove:\n tags_list.remove(tag)", "def prune_taxa(self, taxa, update_taxon_set=False):\n for taxon in taxa:\n if taxon in self.taxon_seq_map:\n del self.taxon_seq_map[taxon]\n if update_taxon_set and taxon in self.taxon_set:\n self.taxon_set.remove(taxon)", "def clean_embargoed_countries(self):\r\n embargoed_countries = self.cleaned_data[\"embargoed_countries\"]\r\n if not embargoed_countries:\r\n return ''\r\n\r\n error_countries = []\r\n\r\n for country in embargoed_countries.split(','):\r\n country = country.strip().upper()\r\n if not self._is_valid_code(country):\r\n error_countries.append(country)\r\n\r\n if error_countries:\r\n msg = 'COULD NOT PARSE COUNTRY CODE(S) FOR: {0}'.format(error_countries)\r\n msg += ' Please check the list of country codes and verify your entries.'\r\n raise forms.ValidationError(msg)\r\n\r\n return embargoed_countries", "def writeCountryCodeFile(self):\n try:\n geojson = requests.get(self.GEOJSON_URL).json()\n except:\n sys.exit('GeoJSON data unavailable at source.')\n \n country_mapping = {}\n for country in geojson['features']:\n iso_2 = country['properties']['ISO_A2']\n country_name = country['properties']['ADMIN']\n country_mapping.update({country_name: iso_2})\n \n with open('countryNameISO2.json', 'w') as file:\n json.dump(country_mapping, file)", "def delete(feature_set, population):\n features = [x for x in list(feature_set)]\n pop = [x for y in population for x in y]\n min = float(\"+inf\")\n rem = features[0]\n for i in range(0, len(features)):\n x = pop.count(features[i])\n if x < min:\n min = x\n rem = features[i]\n features.remove(rem)\n return set(features)", "def unused_featurevalues():\n\n fvs = FeatureValue.objects.filter(feature__active=True)\n unused_fvs = fvs.filter(languages__isnull=True)\n natlang_only_fvs = fvs.filter(languages__language__natlang=True).exclude(languages__language__natlang=False)\n\n if not natlang_only_fvs:\n # Natlangs had no unique features so return early\n return unused_fvs\n\n # dsd\n decorate = ((fv.id, fv) for fv in set(unused_fvs) | set(natlang_only_fvs))\n sort = sorted(decorate)\n return [fv for (_, fv) in sort]", "def test_get_country_by_geo_location(self):\n pass", "def _remove_ucr_uncorrected_vals():\n global _CONN\n global _UCR_INDICATOR_DICT\n\n try:\n c = _CONN.cursor()\n c.execute(f'SELECT max(year) FROM SimpleCount WHERE fk_simplecount_indicator = 1100')\n year = c.fetchone()[0]\n c.close()\n \n ucr_indicator_str = ', '.join([str(x) for x in _UCR_INDICATOR_DICT.values()])\n sql = f'DELETE FROM SimpleCount WHERE year = {year} AND fk_simplecount_indicator IN ({ucr_indicator_str})'\n database.execute_simple_sql(sql)\n print(\"NOTE: Uncorrected UCR data values are removed from the database prior to inserting the corrected values.\")\n except:\n raise", "def test_country_unknown(self):\n survey = SurveyFactory.create()\n\n data = {\n 'experiment_version': '1',\n 'response_version': 1,\n 'person_id': 'joemamma',\n 'survey_id': survey.name,\n 'flow_id': '20141113',\n 'question_id': '1',\n 'updated_ts': self.timestamp(),\n\n 'question_text': 'ou812?',\n 'variation_id': '1',\n 'country': 'unknown'\n }\n resp = self.client.post(\n reverse('heartbeat-api'),\n content_type='application/json',\n data=json.dumps(data))\n\n assert resp.status_code == 201\n ans = Answer.objects.latest('id')\n assert ans.country == 'UNK'", "def filterByCountry(partners) -> dict():\n countries = dict()\n for partner in partners:\n countries.setdefault(partner['country'],[]).append(partner)\n return countries", "def country_codes():\n\n iso_sel = [\n Freedom_short.iso_code,\n Freedom_short.year,\n Freedom_short.country,\n Freedom_short.region,\n Freedom_short.hf_score,\n Freedom_short.hf_rank,\n Freedom_short.hf_quartile,\n ]\n\n # Use Pandas to perform the sql query\n #Grab 2017 Data Only for Dropdown\n codes_stmt = db.session.query(*iso_sel).filter(Freedom_short.year == 2017).order_by(Freedom_short.iso_code).statement\n codes_df = pd.read_sql_query(codes_stmt, db.session.bind)\n\n # Return a list of the column names (sample names)\n return jsonify(list(codes_df[\"iso_code\"]))", "def get_shapes4country(country='South Africa'):\n # location of data\n URL = \"http://www.naturalearthdata.com/downloads/10m-cultural-vectors\"\n URL += \"/10m-admin-1-states-provinces/\"\n # Shapefiles locally?\n # TODO - update to download automatically and store in AC_tools' data directory\n shapefiles = 'ne_10m_admin_1_states_provinces_lakes'\n# shapefiles = 'ne_10m_admin_1_states_provinces'\n folder = '/mnt/lustre/users/ts551/labbook/Python_progs/'\n folder += '/AC_tools/data/shapefiles/{}'.format(shapefiles, shapefiles)\n states = geopandas.read_file(folder)\n # Just select state of interest\n choosen_states = states.query(\"admin == '{}'\".format(country))\n choosen_states = choosen_states.reset_index(drop=True)\n # Get the shapes\n shapes = zip(choosen_states.geometry, range(len(choosen_states)))\n return shapes", "def get_countries(d: pd.DataFrame, filter_: Union[dict, bool] = True):\n good = set(d['confirmed'].index)\n if filter_ and not isinstance(filter_, dict):\n filter_ = JHU_FILTER_DEFAULTS\n if filter_:\n for key, minimum in filter_.items():\n enough = d[key].index[d[key].max(axis=1) >= minimum].tolist()\n good = good.intersection(enough)\n bad = set(d['confirmed'].index).difference(good)\n # print(\"JHU data acceptable for %s\" % ','.join(good))\n # print(\"JHU data not acceptable for %s\" % ','.join(bad))\n return good", "def filter_layers(self, root, name_dict):\n for g in root.xpath(\"//svg:g\", namespaces=inkex.NSS):\n attr = inkex.addNS('label', ns='inkscape')\n if attr not in g.attrib:\n # Not a layer, skip.\n continue\n label = g.attrib[attr]\n if '%' not in label:\n # Nothing to be done, skip.\n continue\n\n # Treat %IF_???% layers\n match = re.match('.*%IF_([^%]*)%', label)\n if match is not None:\n lookup = match.groups()[0]\n try:\n var = name_dict[lookup]\n except KeyError:\n errormsg(_('Column \"' + lookup + '\" not in the csv file'))\n continue\n if var and (var.lower() not in ('0', 'false', 'no')):\n # Set group visibility to true.\n if 'style' in g.attrib:\n del g.attrib['style']\n # Include the group.\n continue\n else:\n # Remove the group's content.\n g.clear()\n\n # Treat %UNLESS_???% layers\n match = re.match('.*%UNLESS_([^%]*)%', label)\n if match is not None:\n lookup = match.groups()[0]\n try:\n var = name_dict[lookup]\n except KeyError:\n errormsg(_('Column \"' + lookup + '\" not in the csv file'))\n continue\n if not(var) or (var.lower() in ('0', 'false', 'no')):\n # Set group visibility to true.\n if 'style' in g.attrib:\n del g.attrib['style']\n # Include the group.\n continue\n else:\n # Remove the group's content.\n g.clear()", "def test_remove_word_with_one_bifurcation(multi_trie):\n multi_trie.remove(\"howdy\")\n assert multi_trie.contains(\"howdy\") is False", "def create_countries(name_countries,origin='united kingdom',beta=0.2,gamma=0.1,I0=10,Horizon=horizon):\n countries = []\n for country in name_countries:\n if country == origin:\n c = Country(name=country,N=df_countries['population'].loc[country],beta=beta,gamma=gamma,I0=I0,H=Horizon)\n else:\n c = Country(name=country,N=df_countries['population'].loc[country],beta=beta,gamma=gamma,I0=0,H=Horizon)\n countries.append(c)\n return countries", "def test_remove_one_bifurcation_word_retains_all_other_words(multi_trie):\n multi_trie.remove(\"howdy\")\n assert multi_trie.contains(\"hey\") is True\n assert multi_trie.contains(\"hell\") is True\n assert multi_trie.contains(\"head\") is True\n assert multi_trie.contains(\"hello\") is True", "def remove_crds_filter(self, filter):\n if filter in self.filters:\n self.filters.remove(filter)", "def test_clean_country_flag(self):\n # country_flag = self.cleaned_data.get('country_flag', None)\n # field = self.fields.get(self.country_field_name, None)\n # if not field and hasattr(self, 'computed_fields'):\n # field = self.computed_fields.get(self.country_field_name, None)\n # if field.initial == self.cleaned_data.get(self.country_field_name, None)\n pass", "def test_get_countries(self):\n pass" ]
[ "0.59036386", "0.5863824", "0.5825038", "0.5797917", "0.5704074", "0.5514831", "0.55131525", "0.5411934", "0.5399952", "0.53786814", "0.53708786", "0.53605676", "0.5328933", "0.53094655", "0.51719415", "0.5159425", "0.51442134", "0.5126341", "0.51245403", "0.50806504", "0.50648564", "0.50499237", "0.5031425", "0.50312304", "0.5019079", "0.4963156", "0.49614692", "0.49172637", "0.49153462", "0.49069163", "0.49006146", "0.48944938", "0.48930264", "0.48672247", "0.4858254", "0.4851925", "0.4832946", "0.48299587", "0.4826899", "0.48219997", "0.4816298", "0.4807285", "0.48068687", "0.48063794", "0.48016682", "0.4786959", "0.4780745", "0.47753686", "0.47726667", "0.47635028", "0.4760382", "0.4755779", "0.47523683", "0.47482252", "0.4741027", "0.47375008", "0.47351193", "0.4726388", "0.4726379", "0.4718312", "0.47096246", "0.47009984", "0.47003135", "0.46820617", "0.46683022", "0.46676412", "0.46562684", "0.46538574", "0.46339026", "0.4633342", "0.4631797", "0.46210185", "0.46093464", "0.4595693", "0.45937043", "0.45917785", "0.45875767", "0.45846203", "0.4582929", "0.45828828", "0.45693716", "0.4562213", "0.45607272", "0.4554853", "0.45464066", "0.45420554", "0.4541623", "0.4540876", "0.45386636", "0.4536356", "0.4532403", "0.4521236", "0.4520293", "0.4519428", "0.45050168", "0.4504603", "0.45006797", "0.4499239", "0.4494625", "0.44938955" ]
0.70100856
0
Builds features issued from InvoiceDate. A dataframe is built per new feature and dumped into a file. Each one of the dataframe have encoded features issues from InvoiceDate.
def data_transform_timeFeature(self): #------------------------------------------------------------------------- # All new features are built into separate dataframes # and each of them are dumped into a separate file. #------------------------------------------------------------------------- self.strprint("self.df_invoice_line : "+str(self.df_invoice_line.shape)) self._dict_timeFeature_encoder, df_customers_timeFeature \ = p5_util.time_list_feature_build(self.df_invoice_line\ , self._list_new_feature, dict_encoder = self._dict_timeFeature_encoder\ ,is_verbose=self.is_verbose) #------------------------------------------------------------------------- # New time features are aggregated into a single dataframe. # Values are scaled. #------------------------------------------------------------------------- df_customers_timeFeature, self._std_scaler_timeFeature \ = p5_util.time_list_feature_restore(self._list_new_feature \ , std_scale = self._std_scaler_timeFeature\ , df_timeFeature = df_customers_timeFeature, is_verbose = self.is_verbose) self.strprint("df_customers_timeFeature : "+str(df_customers_timeFeature.shape)) #------------------------------------------------------------------------- # Dimension reduction thanks to PCA #------------------------------------------------------------------------- n_dim=30 root_name = 'time_pca_' # Column CustomerID is used into df_pca_reduce df_customers_timeFeature['CustomerID'] = df_customers_timeFeature.index df_customers_timeFeature, pca_timeFeature \ = p5_util.df_pca_reduce(df_customers_timeFeature, n_dim, root_name\ , p_is_scale=False, pca = self._pca_timeFeature) self.strprint(df_customers_timeFeature.shape) if self._pca_timeFeature is None: #---------------------------------------------------------------------- # Data-model is in built process with part of data-set. #---------------------------------------------------------------------- self._pca_timeFeature = pca_timeFeature p5_util.object_dump(df_customers_timeFeature\ , self._df_customers_timeFeature_fileName) else: #---------------------------------------------------------------------- # Data-model is already built and this method is called # for a customer classification. #---------------------------------------------------------------------- self._df_customers_timeFeature = df_customers_timeFeature.copy() return
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_date_features(df = None, date = None):\n #TODO", "def data_transform_rfm(self) :\n \n is_built_step = False\n if self._encoder_rfm is None:\n is_built_step = True \n \n #-------------------------------------------------------------------------\n # RFM feature is built\n #-------------------------------------------------------------------------\n ser_invoice_date = self._df_invoice_line.InvoiceDate\n \n self.df_invoice_line, df_RFM, self.df_RFM_quantiles, self._day_now \\\n = p5_util.p5_df_rfm_build(self.df_invoice_line, day_now = self._day_now\\\n , df_RFM_threshold=self.df_RFM_quantiles)\n \n self._df_invoice_line.InvoiceDate = ser_invoice_date\n \n #-------------------------------------------------------------------------\n # RFM score is added to dataframe\n #-------------------------------------------------------------------------\n df_merged = pd.merge(self.df_invoice_line\\\n , df_RFM[['CustomerID','RFM']], how='left', on=['CustomerID'])\n\n self._df_invoice_line \\\n = pd.DataFrame(df_merged.values, index = self._df_invoice_line.index\\\n , columns=df_merged.columns)\n \n\n #self._df_invoice_line \\\n #= pd.concat([ self.df_invoice_line,df_RFM[['CustomerID','RFM']] ], axis=1\\\n #,join='inner')\n \n \n #-------------------------------------------------------------------------\n # RFM encoding\n #-------------------------------------------------------------------------\n self._encoder_rfm, df_RFM_encoded \\\n = p5_util.df_rfm_one_hot_encode(df_RFM,'RFM', encoder=self._encoder_rfm)\n\n #-------------------------------------------------------------------------\n # Encoded RFM features are renamed\n #-------------------------------------------------------------------------\n df_customers_rfm, list_col_unchanged \\\n = p5_util.df_rename_columns(df_RFM_encoded, df_RFM_encoded.columns\\\n , 'w_rfm_')\n \n self.strprint(\"df_customers_rfm =\" +str(df_customers_rfm.shape))\n\n #-------------------------------------------------------------------------\n # dataframe with RFM encoded values per customer is dumped\n #-------------------------------------------------------------------------\n if is_built_step is True:\n p5_util.object_dump(df_customers_rfm, self.df_customers_rfm_fileName)\n else :\n self._df_customers_rfm = df_customers_rfm.copy()\n return", "def df_customers_features_build(self):\n\n df_customers_rfm = self._df_customers_rfm.copy()\n df_customers_timeFeature = self._df_customers_timeFeature.copy()\n df_customers_nlp = self._df_customers_pca_nlp.copy()\n\n #-------------------------------------------------------------------------\n # Dataframe are aggregated; note that indexes are customerID.\n #-------------------------------------------------------------------------\n df_customers = pd.DataFrame()\n\n df_customers = pd.concat([df_customers,df_customers_rfm], axis=1)\n\n df_customers = pd.concat([df_customers,df_customers_timeFeature]\\\n , join='inner', axis=1)\n\n df_customers = pd.concat([df_customers,df_customers_nlp]\\\n , join='inner', axis=1)\n \n self.strprint(\"All features : \"+str(df_customers.shape))\n self._df_customers = df_customers.copy()\n return", "def features_past_generation(features_creation_function,\n days,\n feature_names_prefix,\n data,\n indices):\n matches_outcomes=[]\n for i,match_indice in enumerate(indices):\n match=data.iloc[match_indice,:]\n past_matches=data[(data.Date<match.Date)&(data.Date>=match.Date-datetime.timedelta(days=days))]\n match_features_outcome_1=features_creation_function(1,match,past_matches)\n match_features_outcome_2=features_creation_function(2,match,past_matches)\n matches_outcomes.append(match_features_outcome_1)\n matches_outcomes.append(match_features_outcome_2)\n if i%100==0:\n print(str(i)+\"/\"+str(len(indices))+\" matches treated. \"+ features_creation_function.__name__ + str(days))\n train=pd.DataFrame(matches_outcomes)\n train.columns=[feature_names_prefix + \"_\" + str(days) +\"_\" +str(i) for i in range(len(train.columns))]\n \n \n \n return train", "def data_process_deprecated(self, CustomerID, InvoiceDate, InvoiceNo, Description, Quantity\\\n , UnitPrice ):\n dict_invoice = {'InvoiceDate':InvoiceDate, 'Description':Description\\\n , 'Quantity':Quantity, 'UnitPrice':UnitPrice}\n dict_invoice['CustomerID'] = CustomerID\n dict_invoice['InvoiceNo'] = InvoiceNo\n df_invoice_line \\\n = pd.DataFrame(dict_invoice, columns=dict_invoice.keys(), index=[0])\n \n self.data_transform(df_invoice_line)\n\n #self.feature_rfm_encode()\n\n self.feature_scale()\n\n self.list_feature_drop()\n\n self.feature_description_nlp()\n return", "def data_transform(self, df) :\n\n #-------------------------------------------------------------------------\n # Copy of given dataframe to be transformed\n #-------------------------------------------------------------------------\n self.df_invoice_line = df\n \n #-------------------------------------------------------------------------\n # Features issued from InvoiceDate are created\n #-------------------------------------------------------------------------\n if self.is_transform_timeFeature is True:\n self.strprint(\"\\n*** Time features transformation ***\")\n self.data_transform_timeFeature()\n\n #-------------------------------------------------------------------------\n # RFM is computed and encoded\n #-------------------------------------------------------------------------\n if self.is_transform_rfm is True:\n self.strprint(\"\\n*** RFM transformation ***\")\n self.data_transform_rfm()\n\n #-------------------------------------------------------------------------\n # NLP features issued from Description are created\n #-------------------------------------------------------------------------\n if self.is_transform_nlp is True:\n self.strprint(\"\\n*** NLP transformation ***\")\n self.data_transform_nlp()\n \n return self.df_invoice_line", "def make_features(self, x_hits, y_hits, dow, lagged_hits, pf_age, pf_si, pf_network, pf_gender, page_ix, pf_price_cat,\n page_popularity, quarter_autocorr):\n # Split day of week to train and test\n x_dow, y_dow = tf.split(dow, [self.train_window, self.predict_window], axis=0)\n\n # Normalize hits\n mean = tf.reduce_mean(x_hits)\n std = tf.sqrt(tf.reduce_mean(tf.squared_difference(x_hits, mean)))\n norm_x_hits = (x_hits - mean) / std\n norm_y_hits = (y_hits - mean) / std\n norm_lagged_hits = (lagged_hits - mean) / std\n\n # Split lagged hits to train and test\n x_lagged, y_lagged = tf.split(norm_lagged_hits, [self.train_window, self.predict_window], axis=0)\n\n # Combine all page features into single tensor\n stacked_features = tf.stack([page_popularity, quarter_autocorr])\n flat_ucdoc_features = tf.concat([pf_age, pf_si, pf_network, pf_gender, pf_price_cat, stacked_features], axis=0) #pf_region\n ucdoc_features = tf.expand_dims(flat_ucdoc_features, 0)\n\n # Train features\n x_features = tf.concat([\n # [n_days] -> [n_days, 1]\n tf.expand_dims(norm_x_hits, -1),\n x_dow,\n x_lagged,\n # Stretch ucdoc_features to all training days\n # [1, features] -> [n_days, features]\n tf.tile(ucdoc_features, [self.train_window, 1])\n ], axis=1)\n\n # Test features\n y_features = tf.concat([\n # [n_days] -> [n_days, 1]\n y_dow,\n y_lagged,\n # Stretch ucdoc_features to all testing days\n # [1, features] -> [n_days, features]\n tf.tile(ucdoc_features, [self.predict_window, 1])\n ], axis=1)\n\n return x_hits, x_features, norm_x_hits, x_lagged, y_hits, y_features, norm_y_hits, mean, std, flat_ucdoc_features, page_ix", "def new_features(df):\n print(\"Add new features ...\")\n # distinguish Spring, Fall and pregnant females (don't care about juvenilles/unknown)\n df[\"gender_plus\"] = df[\"Gender\"]\n df.loc[df.Gravid, \"gender_plus\"] = \"f_gra\"\n\n df[\"gender_seasons\"] = df[\"Gender\"]\n df.loc[df.Gravid, \"gender_seasons\"] = \"f_gra\"\n\n # add features\n df[\"Age_To_Weight\"] = df[\"Annuli\"] / df[\"Weight\"]\n\n # Calcuate Number of recaptures\n df_captures = df[[\"ID\", \"Date\"]].groupby(\"ID\").count()\n df_captures.columns = [\"recapture_count\"]\n df_captures.reset_index(inplace=True)\n df = pd.merge(df, df_captures, how=\"outer\", on=\"ID\")\n\n # recalculate annuli\n df_min = pd.pivot_table(\n df[df.Annuli > 0],\n values=[\"Date\", \"Annuli\"],\n index=[\"ID\"],\n aggfunc={\"Date\": min, \"Annuli\": min},\n )\n df_min.columns = [\"annuli_min\", \"date_min\"]\n df_min.reset_index(inplace=True)\n\n df = pd.merge(df, df_min, how=\"outer\", on=\"ID\")\n df[\"year\"] = df.Date.map(lambda x: x.year)\n df[\"year_min\"] = df.date_min.map(lambda x: x.year)\n df[\"Annuli_orig\"] = df.Annuli\n df.Annuli = df.year - df.year_min + df.annuli_min\n df.Annuli = np.nan_to_num(df.Annuli)\n df[\"Annuli\"] = pd.to_numeric(df[\"Annuli\"], downcast=\"integer\")\n\n # Annuli Buckets\n buckets = 5\n interval = int(df[\"Annuli\"].max() / buckets)\n buckets = [i for i in range(0, df[\"Annuli\"].max() + interval, interval)]\n labels = [\"'{0} - {1}'\".format(i, i + interval) for i in buckets]\n df[\"Annuli_Group\"] = pd.cut(\n df.Annuli, buckets, labels=labels[:-1], include_lowest=True\n )\n\n return df", "def create_features(energy_data, label=None):\n energy_data['date'] = energy_data.index\n energy_data['hour'] = energy_data['Datetime'].dt.hour\n energy_data['dayofweek'] = energy_data['Datetime'].dt.dayofweek\n energy_data['month'] = energy_data['Datetime'].dt.month\n energy_data['quarter'] = energy_data['Datetime'].dt.quarter\n energy_data['year'] = energy_data['Datetime'].dt.year\n energy_data['dayofyear'] = energy_data['Datetime'].dt.dayofyear\n energy_data['dayofmonth'] = energy_data['Datetime'].dt.day\n energy_data['weekofyear'] = energy_data['Datetime'].dt.weekofyear\n energy_data['pjme_2_hrs_lag'] = energy_data['PJME_MW'].shift(2)\n energy_data['pjme_4_hrs_lag'] = energy_data['PJME_MW'].shift(4)\n energy_data['pjme_8_hrs_lag'] = energy_data['PJME_MW'].shift(8)\n energy_data['pjme_12_hrs_lag'] = energy_data['PJME_MW'].shift(12)\n energy_data['pjme_24_hrs_lag'] = energy_data['PJME_MW'].shift(24)\n energy_data['pjme_4_hrs_mean'] = energy_data['PJME_MW'].rolling(window=4).mean()\n energy_data['pjme_8_hrs_mean'] = energy_data['PJME_MW'].rolling(window=8).mean()\n energy_data['pjme_12_hrs_mean'] = energy_data['PJME_MW'].rolling(window=12).mean()\n energy_data['pjme_24_hrs_mean'] = energy_data['PJME_MW'].rolling(window=24).mean()\n energy_data['pjme_4_hrs_std'] = energy_data['PJME_MW'].rolling(window=4).std()\n energy_data['pjme_8_hrs_std'] = energy_data['PJME_MW'].rolling(window=8).std()\n energy_data['pjme_12_hrs_std'] = energy_data['PJME_MW'].rolling(window=12).std()\n energy_data['pjme_24_hrs_std'] = energy_data['PJME_MW'].rolling(window=24).std()\n energy_data['pjme_4_hrs_max'] = energy_data['PJME_MW'].rolling(window=4).max()\n energy_data['pjme_8_hrs_max'] = energy_data['PJME_MW'].rolling(window=8).max()\n energy_data['pjme_12_hrs_max'] = energy_data['PJME_MW'].rolling(window=12).max()\n energy_data['pjme_24_hrs_max'] = energy_data['PJME_MW'].rolling(window=24).max()\n energy_data['pjme_4_hrs_min'] = energy_data['PJME_MW'].rolling(window=4).min()\n energy_data['pjme_8_hrs_min'] = energy_data['PJME_MW'].rolling(window=8).min()\n energy_data['pjme_12_hrs_min'] = energy_data['PJME_MW'].rolling(window=12).min()\n energy_data['pjme_24_hrs_min'] = energy_data['PJME_MW'].rolling(window=24).min()\n\n features = energy_data[['hour', 'dayofweek', 'quarter', 'month', 'year',\n 'dayofyear', 'dayofmonth', 'weekofyear', 'pjme_2_hrs_lag', 'pjme_4_hrs_lag',\n 'pjme_8_hrs_lag', 'pjme_12_hrs_lag', 'pjme_24_hrs_lag', 'pjme_4_hrs_mean',\n \"pjme_8_hrs_mean\", \"pjme_12_hrs_mean\", \"pjme_24_hrs_mean\", \"pjme_4_hrs_std\",\n \"pjme_8_hrs_std\", \"pjme_12_hrs_std\", \"pjme_24_hrs_std\",\n \"pjme_4_hrs_max\", \"pjme_8_hrs_max\", \"pjme_12_hrs_max\", \"pjme_24_hrs_max\",\n \"pjme_4_hrs_min\", \"pjme_8_hrs_min\", \"pjme_12_hrs_min\", \"pjme_24_hrs_min\"]]\n if label:\n label = energy_data[label]\n return features, label\n return features", "def df_customers_fileRead(self):\n \n #-------------------------------------------------------------------------\n # RFM features are restored\n #-------------------------------------------------------------------------\n df_customers_rfm \\\n = p5_util.object_load(self.df_customers_rfm_fileName)\n self.strprint(\"RFM features : \"+str(df_customers_rfm.shape))\n \n #-------------------------------------------------------------------------\n # Time features are restored\n #-------------------------------------------------------------------------\n df_customers_timeFeature \\\n = p5_util.object_load(self._df_customers_timeFeature_fileName)\n self.strprint(\"Time features : \"+str(df_customers_timeFeature.shape))\n \n #-------------------------------------------------------------------------\n # NLP features are restored\n #-------------------------------------------------------------------------\n df_customers_nlp = p5_util.object_load(self._df_customers_nlp_fileName)\n self.strprint(\"NLP features : \"+str(df_customers_nlp.shape))\n\n if False:\n df_customers_rfm = self._df_customers_rfm.copy()\n df_customers_timeFeature = self._df_customers_timeFeature.copy()\n df_customers_nlp = self._df_customers_pca_nlp.copy()\n\n #-------------------------------------------------------------------------\n # Dataframe are aggregated; note that indexes are customerID.\n #-------------------------------------------------------------------------\n df_customers = pd.DataFrame()\n\n df_customers = pd.concat([df_customers,df_customers_rfm], axis=1)\n\n df_customers = pd.concat([df_customers,df_customers_timeFeature]\\\n , join='inner', axis=1)\n\n df_customers = pd.concat([df_customers,df_customers_nlp]\\\n , join='inner', axis=1)\n\n self.strprint(\"All features : \"+str(df_customers.shape))\n\n #----------------------------------------------------------------------\n # Dataframe is dumped into a file\n #----------------------------------------------------------------------\n p5_util.object_dump(df_customers, self._df_customers_fileName)\n if False:\n #----------------------------------------------------------------------\n # Dataframe is copied as an attribute\n #----------------------------------------------------------------------\n self._df_customers = df_customers.copy()\n \n return", "def generate_data(filename_in, filename_out):\n file_in = open(filename_in, 'r')\n file_out = open(filename_out, 'w+')\n\n df = pd.read_csv(file_in, header=None, sep=' ', quoting=csv.QUOTE_NONE)\n x = df.iloc[:, 0].values\n y_class = df.iloc[:, -1].values\n file_in.close()\n\n y_class = np.where(y_class == 'O', 0, 1)\n\n x_features = []\n size_x = len(x)\n for i in range(3, size_x):\n if i % 5000 == 0:\n print(i, \"/\", size_x)\n x_features.append(features(x[i-2], x[i-1], x[i], y_class[i]))\n\n df_write = pd.DataFrame(x_features)\n\n tab = [x for x in range(1, NUMBER_OF_FEATURE + 2)]\n df_write.columns = tab\n write_csv(df_write, file_out)\n file_out.close()", "def create_features(\r\n df:pd.DataFrame,\r\n path_data_dir:str\r\n ) -> pd.DataFrame:\r\n # Check input.\r\n # Copy dataframe to avoid in place modification.\r\n df = df.copy()\r\n # Check file path.\r\n if not os.path.exists(path_data_dir):\r\n raise IOError(textwrap.dedent(\"\"\"\\\r\n Path does not exist:\r\n path_data_dir = {path}\"\"\".format(\r\n path=path_data_dir)))\r\n ########################################\r\n # Returned_asm\r\n # Interpretation of assumptions:\r\n # If DSEligible=0, then the vehicle is not eligible for a guarantee.\r\n # * And Returned=-1 (null) since we don't know whether or not it would have been returned,\r\n # but given that it wasn't eligible, it may have been likely to have Returned=1.\r\n # If DSEligible=1, then the vehicle is eligible for a guarantee.\r\n # * And if Returned=0 then the guarantee was purchased and the vehicle was not returned.\r\n # * And if Returned=1 then the guarantee was purchased and the vehicle was returned.\r\n # * And if Returned=-1 (null) then the guarantee was not purchased.\r\n # We don't know whether or not it would have been returned,\r\n # but given that the dealer did not purchase, it may have been likely to have Returned=0.\r\n # Assume:\r\n # If Returned=-1 and DSEligible=0, then Returned_asm=1\r\n # If Returned=-1 and DSEligible=1, then Returned_asm=0\r\n logger.info(textwrap.dedent(\"\"\"\\\r\n Returned_asm: Assume returned status to fill nulls as new feature.\r\n If Returned=-1 and DSEligible=0, then Returned_asm=1 (assumes low P(resale|buyer, car))\r\n If Returned=-1 and DSEligible=1, then Returned_asm=0 (assumes high P(resale|buyer, car))\"\"\"))\r\n df['Returned_asm'] = df['Returned']\r\n df.loc[\r\n np.logical_and(df['Returned'] == -1, df['DSEligible'] == 0),\r\n 'Returned_asm'] = 1\r\n df.loc[\r\n np.logical_and(df['Returned'] == -1, df['DSEligible'] == 1),\r\n 'Returned_asm'] = 0\r\n logger.info(\"Relationship between DSEligible and Returned:\\n{pt}\".format(\r\n pt=pd.pivot_table(\r\n df[['DSEligible', 'Returned']].astype(str),\r\n index='DSEligible', columns='Returned',\r\n aggfunc=len, margins=True, dropna=False)))\r\n logger.info(\"Relationship between DSEligible and Returned_asm:\\n{pt}\".format(\r\n pt=pd.pivot_table(\r\n df[['DSEligible', 'Returned_asm']].astype(str),\r\n index='DSEligible', columns='Returned_asm',\r\n aggfunc=len, margins=True, dropna=False)))\r\n logger.info(\"Relationship between Returned and Returned_asm:\\n{pt}\".format(\r\n pt=pd.pivot_table(\r\n df[['Returned', 'Returned_asm']].astype(str),\r\n index='Returned', columns='Returned_asm',\r\n aggfunc=len, margins=True, dropna=False)))\r\n ########################################\r\n # SellingLocation_lat, SellingLocation_lon\r\n # Cell takes ~1 min to execute if shelf does not exist.\r\n # Google API limit: https://developers.google.com/maps/documentation/geocoding/usage-limits\r\n logger.info(textwrap.dedent(\"\"\"\\\r\n SellingLocation: Geocode.\r\n Scraping webpages for addresses and looking up latitude, longitude coordinates.\"\"\"))\r\n path_shelf = os.path.join(path_data_dir, 'sellloc_geoloc.shelf')\r\n seconds_per_query = 1.0/50.0 # Google API limit\r\n sellloc_geoloc = dict()\r\n with shelve.open(filename=path_shelf, flag='c') as shelf:\r\n for loc in df['SellingLocation'].unique():\r\n if loc in shelf:\r\n raw = shelf[loc]\r\n if raw is None:\r\n location = raw\r\n else:\r\n address = raw['formatted_address']\r\n latitude = raw['geometry']['location']['lat']\r\n longitude = raw['geometry']['location']['lng']\r\n location = geopy.location.Location(\r\n address=address, point=(latitude, longitude), raw=raw)\r\n else: \r\n url = r'https://www.manheim.com/locations/{loc}/events'.format(loc=loc)\r\n page = requests.get(url)\r\n tree = bs4.BeautifulSoup(page.text, 'lxml')\r\n address = tree.find(name='p', class_='loc_address').get_text().strip()\r\n try:\r\n components = {\r\n 'country': 'United States',\r\n 'postal_code': address.split()[-1]}\r\n location = geopy.geocoders.GoogleV3().geocode(\r\n query=address,\r\n exactly_one=True,\r\n components=components)\r\n except:\r\n logger.warning(textwrap.dedent(\"\"\"\\\r\n Exception raised. Setting {loc} geo location to `None`\r\n sys.exc_info() =\r\n {exc}\"\"\".format(loc=loc, exc=sys.exc_info())))\r\n location = None\r\n finally:\r\n time.sleep(seconds_per_query)\r\n if location is None:\r\n shelf[loc] = location\r\n else:\r\n shelf[loc] = location.raw\r\n sellloc_geoloc[loc] = location\r\n logger.info(\"Mapping SellingLocation to latitude, longitude coordinates.\")\r\n sellloc_lat = {\r\n sellloc: (geoloc.latitude if geoloc is not None else 0.0)\r\n for (sellloc, geoloc) in sellloc_geoloc.items()}\r\n sellloc_lon = {\r\n sellloc: (geoloc.longitude if geoloc is not None else 0.0)\r\n for (sellloc, geoloc) in sellloc_geoloc.items()}\r\n df['SellingLocation_lat'] = df['SellingLocation'].map(sellloc_lat)\r\n df['SellingLocation_lon'] = df['SellingLocation'].map(sellloc_lon)\r\n # # TODO: experiment with one-hot encoding (problems is that it doesn't scale)\r\n # df = pd.merge(\r\n # left=df,\r\n # right=pd.get_dummies(df['SellingLocation'], prefix='SellingLocation'),\r\n # how='inner',\r\n # left_index=True,\r\n # right_index=True)\r\n ########################################\r\n # JDPowersCat: One-hot encoding\r\n # TODO: Estimate sizes from Wikipedia, e.g. https://en.wikipedia.org/wiki/Vehicle_size_class.\r\n logger.info(\"JDPowersCat: One-hot encoding.\")\r\n # Cast to string, replacing 'nan' with 'UNKNOWN'.\r\n df['JDPowersCat'] = (df['JDPowersCat'].astype(str)).str.replace(' ', '').apply(\r\n lambda cat: 'UNKNOWN' if cat == 'nan' else cat)\r\n # One-hot encoding.\r\n df = pd.merge(\r\n left=df,\r\n right=pd.get_dummies(df['JDPowersCat'], prefix='JDPowersCat'),\r\n left_index=True,\r\n right_index=True)\r\n ########################################\r\n # LIGHT_N0G1Y2R3\r\n # Rank lights by warning level.\r\n logger.info(\"LIGHT_N0G1Y2R3: Rank lights by warning level (null=0, green=1, yellow=2, red=3).\")\r\n df['LIGHT_N0G1Y2R3'] = df['LIGHTG']*1 + df['LIGHTY']*2 + df['LIGHTR']*3\r\n ########################################\r\n # SaleDate_*: Extract timeseries features.\r\n logger.info(\"SaleDate: Extract timeseries features.\")\r\n df['SaleDate_dow'] = df['SaleDate'].dt.dayofweek\r\n df['SaleDate_doy'] = df['SaleDate'].dt.dayofyear\r\n df['SaleDate_day'] = df['SaleDate'].dt.day\r\n df['SaleDate_decyear'] = df['SaleDate'].dt.year + (df['SaleDate'].dt.dayofyear-1)/366\r\n ########################################\r\n # BuyerID, SellerID, VIN, SellingLocation, CarMake, JDPowersCat:\r\n # Make cumulative informative priors (*_num*, *_frac*) for string features.\r\n logger.info(textwrap.dedent(\"\"\"\\\r\n BuyerID, SellerID, VIN, SellingLocation, CarMake, JDPowersCat:\r\n Make cumulative informative priors (*_num*, *_frac*) for string features.\"\"\"))\r\n # Cumulative features require sorting by time.\r\n df.sort_values(by=['SaleDate'], inplace=True)\r\n df.reset_index(drop=True, inplace=True)\r\n for col in ['BuyerID', 'SellerID', 'VIN', 'SellingLocation', 'CarMake', 'JDPowersCat']:\r\n logger.info(\"Processing {col}\".format(col=col))\r\n ####################\r\n # Cumulative count of transactions and DSEligible:\r\n # Cumulative count of transactions (yes including current).\r\n df[col+'_numTransactions'] = df[[col]].groupby(by=col).cumcount().astype(int) + 1\r\n df[col+'_numTransactions'].fillna(value=1, inplace=True)\r\n # Cumulative count of transactions that were DealShield-eligible (yes including current).\r\n df[col+'_numDSEligible1'] = df[[col, 'DSEligible']].groupby(by=col)['DSEligible'].cumsum().astype(int)\r\n df[col+'_numDSEligible1'].fillna(value=0, inplace=True)\r\n # Cumulative ratio of transactions that were DealShield-eligible (0=bad, 1=good).\r\n df[col+'_fracDSEligible1DivTransactions'] = (df[col+'_numDSEligible1']/df[col+'_numTransactions'])\r\n df[col+'_fracDSEligible1DivTransactions'].fillna(value=1, inplace=True)\r\n ####################\r\n # DSEligible and Returned\r\n # Note:\r\n # * DealShield-purchased ==> Returned != -1 (not null)\r\n # * below requires\r\n # DSEligible == 0 ==> Returned == -1 (is null)\r\n # Returned != -1 (not null) ==> DSEligible == 1\r\n assert (df.loc[df['DSEligible']==0, 'Returned'] == -1).all()\r\n assert (df.loc[df['Returned']!=-1, 'DSEligible'] == 1).all()\r\n # Cumulative count of transactions that were DealShield-eligible and DealShield-purchased.\r\n df_tmp = df[[col, 'Returned']].copy()\r\n df_tmp['ReturnedNotNull'] = df_tmp['Returned'] != -1\r\n df[col+'_numReturnedNotNull'] = df_tmp[[col, 'ReturnedNotNull']].groupby(by=col)['ReturnedNotNull'].cumsum().astype(int)\r\n df[col+'_numReturnedNotNull'].fillna(value=0, inplace=True)\r\n del df_tmp\r\n # Cumulative ratio of DealShield-eligible transactions that were DealShield-purchased (0=mode).\r\n df[col+'_fracReturnedNotNullDivDSEligible1'] = df[col+'_numReturnedNotNull']/df[col+'_numDSEligible1']\r\n df[col+'_fracReturnedNotNullDivDSEligible1'].fillna(value=0, inplace=True)\r\n # Cumulative count of transactions that were DealShield-elegible and DealShield-purchased and DealShield-returned.\r\n df_tmp = df[[col, 'Returned']].copy()\r\n df_tmp['Returned1'] = df_tmp['Returned'] == 1\r\n df[col+'_numReturned1'] = df_tmp[[col, 'Returned1']].groupby(by=col)['Returned1'].cumsum().astype(int)\r\n df[col+'_numReturned1'].fillna(value=0, inplace=True)\r\n del df_tmp\r\n # Cumulative ratio of DealShield-eligible, DealShield-purchased transactions that were DealShield-returned (0=good, 1=bad).\r\n # Note: BuyerID_fracReturned1DivReturnedNotNull is the cumulative return rate for a buyer.\r\n df[col+'_fracReturned1DivReturnedNotNull'] = df[col+'_numReturned1']/df[col+'_numReturnedNotNull']\r\n df[col+'_fracReturned1DivReturnedNotNull'].fillna(value=0, inplace=True)\r\n # Check that weighted average of return rate equals overall return rate.\r\n # Note: Requires groups sorted by date, ascending.\r\n assert np.isclose(\r\n (df[[col, col+'_fracReturned1DivReturnedNotNull', col+'_numReturnedNotNull']].groupby(by=col).last().product(axis=1).sum()/\\\r\n df[[col, col+'_numReturnedNotNull']].groupby(by=col).last().sum()).values[0],\r\n sum(df['Returned']==1)/sum(df['Returned'] != -1),\r\n equal_nan=True)\r\n ####################\r\n # DSEligible and Returned_asm\r\n # NOTE:\r\n # * Below requires\r\n # DSEligible == 0 ==> Returned_asm == 1\r\n # Returned_asm == 0 ==> DSEligible == 1\r\n assert (df.loc[df['DSEligible']==0, 'Returned_asm'] == 1).all()\r\n assert (df.loc[df['Returned_asm']==0, 'DSEligible'] == 1).all()\r\n # Cumulative number of transactions that were assumed to be returned.\r\n df_tmp = df[[col, 'Returned_asm']].copy()\r\n df_tmp['Returnedasm1'] = df_tmp['Returned_asm'] == 1\r\n df[col+'_numReturnedasm1'] = df_tmp[[col, 'Returnedasm1']].groupby(by=col)['Returnedasm1'].cumsum().astype(int)\r\n df[col+'_numReturnedasm1'].fillna(value=0, inplace=True)\r\n del df_tmp\r\n # Cumulative ratio of transactions that were assumed to be returned (0=mode).\r\n df[col+'_fracReturnedasm1DivTransactions'] = df[col+'_numReturnedasm1']/df[col+'_numTransactions']\r\n df[col+'_fracReturnedasm1DivTransactions'].fillna(value=0, inplace=True)\r\n # Check that weighted average of assumed return rate equals overall assumed return rate.\r\n assert np.isclose(\r\n (df[[col, col+'_fracReturnedasm1DivTransactions', col+'_numTransactions']].groupby(by=col).last().product(axis=1).sum()/\\\r\n df[[col, col+'_numTransactions']].groupby(by=col).last().sum()).values[0],\r\n sum(df['Returned_asm']==1)/sum(df['Returned_asm'] != -1),\r\n equal_nan=True)\r\n # Note:\r\n # * Number of transactions that were DealShield-eligible and assumed to be returned ==\r\n # number of transactions that were DealShield-elegible and DealShield-purchased and DealShield-returned\r\n # (numReturned1)\r\n return df", "def make_data(input_filepath, output_filepath):\n\n df_train = pd.read_csv(input_filepath+'train_u6lujuX_CVtuZ9i.csv', index_col=0)\n df_test = pd.read_csv(input_filepath+'test_Y3wMUE5_7gLdaTN.csv', index_col=0)\n print('Sizes', df_train.shape, df_test.shape)\n print(\"Outcome dispersion:\\n\", df_train['Loan_Status'].value_counts())\n\n\n # recode and save outcome vector\n y = df_train['Loan_Status'].map({'N': 0, 'Y': 1})\n\n del df_train['Loan_Status']\n\n # all in one dataframe\n df = pd.concat([df_train, df_test])\n print(df.shape)\n\n from src.features.build_features import make_features\n df = make_features(df)\n\n # Divide data on train and test again and save\n data_train = df[df.index.isin(df_train.index)]\n data_test = df[df.index.isin(df_test.index)]\n print(data_train.shape, data_test.shape)\n\n data_tmp = data_train.copy()\n data_tmp['y'] = y\n\n\n data_tmp.to_csv(output_filepath + 'train_ready.csv', index=False)\n data_test.to_csv(output_filepath + 'test_ready.csv', index=False)\n id_test = pd.DataFrame(data=df_test.index, columns=['Loan_ID'])\n id_test.to_csv(output_filepath + 'id_test.csv', index=False)", "def mistakes_dataframe(filename, outfilename):\n counter = 0\n data_list = []\n list_of_lists = []\n feature_dict = dict()\n mistake_counter = 0\n \n #The crf file only makes use of the token and assigns a label. \n #For the mistakes file, we are using the features of the gold file.\n #The features of the gold file are used together with the labels of the \n #crf file to provide the reader with a better understanding of the mistakes.\n if filename == crf_file:\n file_rows = []\n for system, gold in zip(file_to_listrows(crf_file), file_to_listrows(dev_file)):\n system_label = [system[-1]]\n line = gold + system_label\n file_rows.append(line)\n else: \n #The baseline and SVM classifier have a file with all the features \n #present, for that reason we just apply the file_to_listrows-function.\n file_rows = file_to_listrows(filename)\n \n for features in file_rows[1:]:\n counter += 1\n mistake_counter += 1\n feature_dict = {\n 'IndexInDataset': counter+1, #The number from the original \n #dataset is inserted so that the \n #tokens are easy to find.\n 'Mistake-type': None,\n 'Token': features[0],\n 'lemma': features[1],\n 'UPOS': features[2],\n 'XPOS': features[3],\n 'DepRel': features[4],\n 'head': features[5],\n 'PrevTok': features[6],\n 'PrevPOS': features[7],\n 'NextTok': features[8],\n 'NextPOS': features[9],\n 'NegPrefix': features[10],\n 'NegPostfix': features[11],\n 'NegExpList': features[12],\n 'GoldLabel': features[13],\n 'SystemLabel': features[14] #This is the label that the system gave to the token\n }\n if features[13] == 'O' and features[14] =='NEG':\n feature_dict['Mistake-type'] = 'FalsePositive'\n elif features[13] == 'NEG' and features[14] == 'O':\n feature_dict['Mistake-type'] = 'FalseNegative'\n \n data_list.append(feature_dict)\n if counter == 13567: #The last line of every classifier is empty, to prevent the code from breaking,this if-statement is inserted.\n break\n \n filename = filename.replace('-out.conll', '')\n mistakes = f'This system ({filename}) made {mistake_counter} mistakes' #The function shows the amount of mistakes the system made\n df = pd.DataFrame(data_list)\n df.to_csv(outfilename, sep='\\t')\n return data_list, df, mistakes #The list of dictionaries, together with the dataframe and the mistakes are returned", "def gen_features(log_file_path: str, out_path: str):\n raise RuntimeError(\"Feature extraction is not supported yet in AutoScheduler dialect\")", "def make_claim_df(claim_list, columns = ['Sl','Name of Bank','Name of Branch','A/C Number (15 digit)','A/C Title','Amount of Remittance in BDT','Date of A/C Credit','Remittance Received through BEFTN/RTGS','Name of Remittance Collecting/BEFTN Processing Bank','Date of Claim']):\n sl=[]\n nrbc_bank = []\n branch = []\n ac_no = []\n ac_title = []\n amount=[]\n date_account_credit=[]\n channel = []\n other_bank=[]\n claim_date=[]\n i=1\n for claim in claim_list:\n sl.append(i)\n i=i+1\n nrbc_bank.append(\"NRBC Bank Ltd.\")\n branch.append(claim.branch.name.upper())\n ac_no.append(claim.account_no)\n ac_title.append(claim.account_title)\n amount.append(claim.remittance_amount)\n date_account_credit.append(claim.date_account_credit)\n channel.append(claim.get_channel_display())\n other_bank.append(claim.collecting_bank.name)\n claim_date.append(claim.date_claim.date())\n dc = {\n 'SL':sl,\n 'Name of Bank':nrbc_bank,\n 'Name of Branch': branch,\n 'A/C Number': ac_no,\n 'A/C Title': ac_title,\n 'Amount of Remittance in BDT': amount,\n 'Date of A/C Credit': date_account_credit,\n 'Remittance Received Through BEFTN/RTGS': channel,\n 'Name of Remittance Processing Bank': other_bank,\n 'Date of Claim': claim_date\n }\n df = pd.DataFrame(dc)\n return df.sort_values(by=['Name of Remittance Processing Bank',])", "def extract_temporal_info(self, featurelist, strExpDate, strOnsetDate, strReceiveDate):\n \n expDateInput = self.parse_time_string(strExpDate)\n onsetDateInput = self.parse_time_string(strOnsetDate) \n receiveDate = self.parse_time_string(strReceiveDate) \n \n self.exposureDate = expDateInput\n self.onsetDate = onsetDateInput\n self.receiveDate = receiveDate\n self.exposureDateConfidence = 0\n self.onsetDateConfidence = 0\n \n ##: Obtain timex list\n timexList = timexan.annotateTimexes(self.text, expDateInput) \n \n self.sentence_full_tags = self.create_sentence_full_tags(featurelist, timexList)\n \n timexList = self.preprocess_timex_list(timexList, featurelist)\n \n ###: divide features that contain multiple timexes\n featurelist = self.divide_feature_containing_multiple_timexes(featurelist, timexList)\n \n featurelist = self.create_feature_timex_association(featurelist, timexList)\n \n timexList = self.construct_timeline(timexList, featurelist)\n \n# (expDate, onsetDate, state) = self.calculate_exposure_onset_dates(\n# timexList, featurelist, sentences, taggedSentences, expDateInput, onsetDateInput, expDate)\n \n featurelist = self.process_feature_durations(featurelist)\n \n featurelist = self.postprocess_features(featurelist)\n \n if self.exposureDateConfidence==1:\n if self.onsetDateConfidence==1:\n datesConfidence = 1\n else:\n datesConfidence = 0.9\n else:\n datesConfidence = 0.8\n \n ##: Create DocumentFeature object for return\n docFeature = DocumentFeature(featurelist, timexList, self.exposureDate, self.onsetDate, self.receiveDate, datesConfidence, expDateInput, onsetDateInput) \n \n return docFeature", "def featureprepare(self,df):\n try:\n df = self.featureselection(df)\n emp_len_dict= pickleload(self.dict_employ_len) # Load emp len\n df['emp_length'] = df['emp_length'].map(emp_len_dict)\n df['Long_emp_length'] = df['emp_length'].apply(lambda x: 'Yes' if x == 10 else 'No') # creating new feature\n df[\"emp_title\"].fillna('Missing', inplace=True)\n\n # Handling missing numerical value\n dict_Mean_var = pickleload(self.dict_Mean_var)\n for col, mean_val in dict_Mean_var.items():\n df[col].fillna(mean_val, inplace=True)\n\n # Handling rare values\n Freqlabels = pickleload(self.Freqlabels)\n for variable, frequent_labels in Freqlabels.items():\n df[variable] = np.where(df[variable].isin(frequent_labels), df[variable], 'Rare')\n\n # Encoding Categorical features\n x = pickleload(self.labelEncoder)\n for features, labels in x.items():\n df.loc[:, features] = labels.transform(df.loc[:, features])\n return df\n except Exception as e:\n self._Logger.error(\"Error in feature preparation: {}\".format(e))", "def create_features(df,rsi_window = 14,macd_feat = [12,26,9]):\n df.dropna(inplace=True)\n ## day and month\n df['Date'] = pd.to_datetime(df['Date'])\n df['Month'] = df['Date'].dt.month\n df['dayowk'] = df['Date'].dt.dayofweek\n df = pd.get_dummies(data = df,columns = ['Month','dayowk'])\n \n ##Previos n-day pct_changes\n df['1day_pct'] = df['Adj Close'].pct_change()\n df['2day_pct'] = df['Adj Close'].pct_change(periods = 2)\n df['3day_pct'] = df['Adj Close'].pct_change(periods = 3)\n df['4day_pct'] = df['Adj Close'].pct_change(periods = 4)\n df['5day_pct'] = df['Adj Close'].pct_change(periods = 5)\n df['7day_pct'] = df['Adj Close'].pct_change(periods = 7)\n \n ##Cumulative sum of 1day_pct\n df['1day_pct_cs'] = df['Adj Close'].pct_change().cumsum()\n \n ##EWMA of 7, 50 and 200 days\n df['ewma_7'] = df['Adj Close'].ewm(span=7).mean()/df['Adj Close']\n df['ewma_50'] = df['Adj Close'].ewm(span=50).mean()/df['Adj Close']\n df['ewma_200'] = df['Adj Close'].ewm(span=200).mean()/df['Adj Close']\n ## Golden Cross vs Death Cross etc.\n #df['7g(50&200)'] = (df['ewma_7'] > df['ewma_50']) & (df['ewma_7'] > df['ewma_200'])\n #df['7l(50&200)'] = (df['ewma_7'] < df['ewma_50']) & (df['ewma_7'] < df['ewma_200'])\n #df['7g50'] = (df['ewma_7'] > df['ewma_50']) & (df['ewma_7'] < df['ewma_200'])\n #df['7g200'] = (df['ewma_7'] < df['ewma_50']) & (df['ewma_7'] > df['ewma_200'])\n \n ##RSI and MACD\n df = RSI(df,14)\n df = MACD_mod(df,nl=macd_feat[0],nh=macd_feat[1],nsig=macd_feat[2])\n \n df['day_var'] = (df['High'] - df['Low'])/df['Close']## Days variance\n df['open_close'] = (df['Open'] - df['Close'])/df['Close'] ## Days Open-Close\n df['high_close'] = (df['High'] - df['Close'])/df['Close'] ##Days High-Close\n df['open_prev_close'] = (df['Open'] - df['Close'].shift(1))/df['Close'] ## Days open - Previos Dyas Close\n \n ##Classification target\n df['target'] = round((np.sign(df['1day_pct']).shift(-1)+1)/2) ## Target for classification\n #df['1_day_target'] = df['Adj Close'].shift(-1) - df['Adj Close'] ## Target for Regression\n #df['target2'] = round((np.sign(df['1day_pct']).shift(-1)+1)/2)## Will the price go up intra-day\n \n ## IS the stock Overbought or Oversold based on RSI?\n df['RSI_overbought'] = df['RSI']>70\n df['RSI_oversold'] = df['RSI']<30\n \n \n #df.drop(['Open','High','Low','Close'],axis=1,inplace=True)\n# df = df.dropna()\n \n #df = df.reset_index(drop=True)\n \n ## Calculating how large the previos hot and cold streaks were\n f = 0\n df['prev_hot_streak'] = np.zeros(df.shape[0])\n for i in range(df.shape[0]-1):\n if df['target'][i] ==1:\n f += 1\n if df['target'][i+1] ==0:\n df['prev_hot_streak'][i+1] = f\n f = 0\n for i in range(1,df.shape[0]):\n #print(i)\n if df['prev_hot_streak'][i]==0:\n df['prev_hot_streak'][i]=df['prev_hot_streak'][i-1]\n \n \n df['prev_cold_streak'] = np.zeros(df.shape[0])\n for i in range(df.shape[0]-1):\n if df['target'][i] ==0:\n f += 1\n if df['target'][i+1] ==1:\n df['prev_cold_streak'][i+1] = f\n f = 0\n\n for i in range(1,df.shape[0]):\n #print(i)\n if df['prev_cold_streak'][i]==0:\n df['prev_cold_streak'][i] = df['prev_cold_streak'][i-1]\n \n ## Calculating current hot and cold streaks\n df['current_hot_streak'] = np.zeros(df.shape[0])\n df['current_cold_streak'] = np.zeros(df.shape[0])\n fhot=0\n fcold=0\n for i in range(df.shape[0]):\n if df['target'][i]==1:\n fhot += 1\n fcold = 0\n df['current_hot_streak'][i] = fhot\n elif df['target'][i]==0:\n fcold += 1\n fhot = 0\n df['current_cold_streak'][i] = fcold\n \n df['prev_hot_streak'] = df['prev_hot_streak'].shift(1)\n df['prev_cold_streak'] = df['prev_cold_streak'].shift(1)\n df['current_hot_streak'] = df['current_hot_streak'].shift(1)\n df['current_cold_streak'] = df['current_cold_streak'].shift(1)\n \n ## Combinations of previos streaks\n df['prev_current_hot'] = df['prev_hot_streak'] - df['current_hot_streak']\n df['prev_current_cold'] = df['prev_cold_streak'] - df['current_cold_streak']\n df['current_hot_prev_cold'] = df['current_hot_streak'] - df['prev_cold_streak']\n df['current_cold_prev_hot'] = df['current_cold_streak'] - df['prev_hot_streak']\n \n ##Calculating days since max\n current_max = df['Adj Close'][0]\n df['days_from_max'] = np.zeros(df.shape[0])\n df['pct_from_max'] = np.zeros(df.shape[0])\n #print('blah')\n for i in range(1,df.shape[0]):\n if df['Adj Close'][i] > current_max:\n current_max = df['Adj Close'][i]\n # print(current_max)\n else:\n df['days_from_max'][i] = df['days_from_max'][i-1]+1\n df['pct_from_max'][i] = (df['Adj Close'][i]-current_max)/current_max\n #print(df['days_from_max'][i])\n \n \n \n df.dropna(inplace=True)\n df = df.reset_index(drop=True)\n return df", "def generate_features(df):\n df_new = pd.DataFrame()\n \n # 6 original features\n df_new['open'] = df['open']\n df_new['open_1'] = df['open'].shift(1)\n df_new['close_1'] = df['close'].shift(1)\n df_new['high_1'] = df['high'].shift(1)\n df_new['low_1'] = df['low'].shift(1)\n df_new['volume_1'] = df['volume'].shift(1)\n \n # 50 original features\n # average price\n df_new['avg_price_5'] = df['close'].rolling(window=5).mean().shift(1)\n df_new['avg_price_30'] = df['close'].rolling(window=21).mean().shift(1)\n df_new['avg_price_90'] = df['close'].rolling(window=63).mean().shift(1)\n df_new['avg_price_365'] = df['close'].rolling(window=252).mean().shift(1)\n \n # average price ratio\n df_new['ratio_avg_price_5_30'] = df_new['avg_price_5'] / df_new['avg_price_30']\n df_new['ratio_avg_price_905_'] = df_new['avg_price_5'] / df_new['avg_price_90']\n df_new['ratio_avg_price_5_365'] = df_new['avg_price_5'] / df_new['avg_price_365']\n df_new['ratio_avg_price_30_90'] = df_new['avg_price_30'] / df_new['avg_price_90']\n df_new['ratio_avg_price_30_365'] = df_new['avg_price_30'] / df_new['avg_price_365']\n df_new['ratio_avg_price_90_365'] = df_new['avg_price_90'] / df_new['avg_price_365'] \n \n \n # average volume\n df_new['avg_volume_5'] = df['volume'].rolling(window=5).mean().shift(1)\n df_new['avg_volume_30'] = df['volume'].rolling(window=21).mean().shift(1)\n df_new['avg_volume_90'] = df['volume'].rolling(window=63).mean().shift(1)\n df_new['avg_volume_365'] = df['volume'].rolling(window=252).mean().shift(1)\n \n #average volume ratio\n df_new['ratio_avg_volume_5_30'] = df_new['avg_volume_5'] / df_new['avg_volume_30']\n df_new['ratio_avg_volumee_5_90'] = df_new['avg_volume_5'] / df_new['avg_volume_90'] \n df_new['ratio_avg_volume_5_365'] = df_new['avg_volume_5'] / df_new['avg_volume_365']\n df_new['ratio_avg_volume_30_90'] = df_new['avg_volume_30'] / df_new['avg_volume_90']\n df_new['ratio_avg_volume_30_365'] = df_new['avg_volume_30'] / df_new['avg_volume_365']\n df_new['ratio_avg_volume_90_365'] = df_new['avg_volume_90'] / df_new['avg_volume_365'] \n \n \n # standard deviation of prices\n df_new['std_price_5'] = df['close'].rolling(window=5).std().shift(1)\n df_new['std_price_30'] = df['close'].rolling(window=21).std().shift(1)\n df_new['std_price_90'] = df['close'].rolling(window=63).std().shift(1) \n df_new['std_price_365'] = df['close'].rolling(window=252).std().shift(1)\n \n # standard deviation ratio of prices \n df_new['ratio_std_price_5_30'] = df_new['std_price_5'] / df_new['std_price_30']\n df_new['ratio_std_price_5_90'] = df_new['std_price_5'] / df_new['std_price_90']\n df_new['ratio_std_price_5_365'] = df_new['std_price_5'] / df_new['std_price_365']\n df_new['ratio_std_price_30_90'] = df_new['std_price_30'] / df_new['std_price_90'] \n df_new['ratio_std_price_30_365'] = df_new['std_price_30'] / df_new['std_price_365'] \n df_new['ratio_std_price_90_365'] = df_new['std_price_90'] / df_new['std_price_365'] \n \n \n # standard deviation of volumes\n df_new['std_volume_5'] = df['volume'].rolling(window=5).std().shift(1)\n df_new['std_volume_30'] = df['volume'].rolling(window=21).std().shift(1)\n df_new['std_volume_90'] = df['volume'].rolling(window=63).std().shift(1)\n df_new['std_volume_365'] = df['volume'].rolling(window=252).std().shift(1)\n \n #standard deviation ratio of volumes\n df_new['ratio_std_volume_5_30'] = df_new['std_volume_5'] / df_new['std_volume_30']\n df_new['ratio_std_volume_5_90'] = df_new['std_volume_5'] / df_new['std_volume_90']\n df_new['ratio_std_volume_5_365'] = df_new['std_volume_5'] / df_new['std_volume_365'] \n df_new['ratio_std_volume_30_90'] = df_new['std_volume_30'] / df_new['std_volume_90']\n df_new['ratio_std_volume_30_365'] = df_new['std_volume_30'] / df_new['std_volume_365']\n df_new['ratio_std_volume_90_365'] = df_new['std_volume_90'] / df_new['std_volume_365'] \n \n # return\n df_new['return_1'] = ((df['close'] - df['close'].shift(1)) / df['close'].shift(1)).shift(1)\n df_new['return_5'] = ((df['close'] - df['close'].shift(5)) / df['close'].shift(5)).shift(1)\n df_new['return_30'] = ((df['close'] - df['close'].shift(21)) / df['close'].shift(21)).shift(1)\n df_new['return_90'] = ((df['close'] - df['close'].shift(63)) / df['close'].shift(63)).shift(1) \n df_new['return_365'] = ((df['close'] - df['close'].shift(252)) / df['close'].shift(252)).shift(1)\n \n #average of return\n df_new['moving_avg_5'] = df_new['return_1'].rolling(window=5).mean()\n df_new['moving_avg_30'] = df_new['return_1'].rolling(window=21).mean()\n df_new['moving_avg_90'] = df_new['return_1'].rolling(window=63).mean()\n df_new['moving_avg_365'] = df_new['return_1'].rolling(window=252).mean()\n \n # the target\n df_new['close'] = df['close']\n df_new = df_new.dropna(axis=0)\n return df_new", "def generate_features(self, df):\n df = df.reset_index()\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\")\n return extract_features(df, column_id=\"id\", impute_function=impute,\n default_fc_parameters=self.extraction_settings)", "def create_date_data(gt_id,\n target_horizon,\n experiment,\n date_features=[\"mei\", \"mjo\",\n \"pca_sst_2010\", \"pca_icec_2010\",\n \"pca_wind_hgt_10_2010\",\n \"pca_wind_hgt_100_2010\",\n \"pca_wind_hgt_500_2010\",\n \"pca_wind_hgt_850_2010\"]):\n\n time_start = time.time()\n\n # --------\n # Prepare experiment cache directory and saved file names\n # --------\n\n # Name of cache directory for storing non-submission-date specific\n # intermediate files\n cache_dir = os.path.join('results', experiment, 'shared',\n '{}_{}'.format(gt_id, target_horizon))\n # e.g., cache_dir = 'results/regression/shared/contest_precip_34w'\n\n # if cache_dir doesn't exist, create it\n if not os.path.isdir(cache_dir):\n os.makedirs(cache_dir)\n\n # Filenames for data file to be stored in cache_dir\n date_data_file = os.path.join(\n cache_dir, \"date_data-{}_{}.h5\".format(gt_id, target_horizon))\n\n # --------\n # Creates date_data dataframe.\n # --------\n # Get number of days between start date of observation period used for prediction\n # (2 weeks behind) and start date of target period (2 or 4 weeks ahead)\n start_deltas = [get_start_delta(target_horizon, gt_id)\n for gt_id in date_features]\n\n # Load masked date features\n print \"Loading date features\"\n date_data = get_date_features(gt_ids=date_features, gt_shifts=start_deltas,\n first_year=get_first_year(gt_id))\n\n print \"Loading additional date features\"\n t = time.time()\n if 'mjo' in date_features:\n # Add cosine and sine transforms of MJO phase\n mjo_phase_name = 'phase_shift'+str(get_start_delta(target_horizon, 'mjo'))\n date_data['cos_'+mjo_phase_name] = np.cos((2*np.pi*date_data[mjo_phase_name])/8)\n date_data['sin_'+mjo_phase_name] = np.sin((2*np.pi*date_data[mjo_phase_name])/8)\n print \"Elapsed: {}s\".format(time.time() - t)\n # Save date features to disk\n print \"Saving date features to \"+date_data_file\n t = time.time()\n date_data.to_hdf(date_data_file, key=\"data\", mode=\"w\")\n subprocess.call(\"chmod a+w \"+date_data_file, shell=True)\n print \"Elapsed: {}s\".format(time.time() - t)\n\n print \"Finished generating date_data matrix.\"\n print \"Total time elapsed: {}s\".format(time.time()-time_start)\n return list(date_data)", "def features_customers(df_customers):\n for i in PREMIER_VALS:\n k = 'premier_' + str(i)\n df_customers[k] = np.where(df_customers['premier'] == i, 1, 0)\n\n df_customers['age'] = datetime.now().date().year - df_customers['yearOfBirth']\n df_customers['male'] = np.where(df_customers['gender'] == 'M', 1, 0)\n df_customers['female'] = np.where(df_customers['gender'] == 'F', 1, 0)\n df_customers['days_in_asos'] = (datetime.now().date() - df_customers['dateCreated']).dt.days\n\n logger.info(\"Features from the customers table: {shape} {dtypes}\"\n .format(shape=df_customers.shape, dtypes=df_customers.dtypes))\n return df_customers", "def finalizeFeatures(featureSet, path_out):\r\n features = featureSet.columns.tolist()\r\n features.remove('url')\r\n features.remove('country')\r\n features.remove('file extension')\r\n for f in features:\r\n featureSet[f] = featureSet[f].astype(int)\r\n\r\n # Data Imputation by filling NA to the country and file extension column\r\n featureSet['country']=featureSet['country'].fillna('None')\r\n featureSet['file extension']=featureSet['file extension'].fillna('None')\r\n logger.info('The NAs in feature columns country and file extension have been filled with string None.')\r\n \r\n # clean country code\r\n country = featureSet.country\r\n new_country = []\r\n for i in range(len(country)):\r\n c = str(country[i])\r\n if c.upper() in iso3166.countries_by_name:\r\n new_country.append(iso3166.countries_by_name[c.upper()][1])\r\n elif len(c) == 2 and not c.isupper():\r\n new_country.append(c.upper())\r\n elif len(c) != 2 and c != 'REDACTED FOR PRIVACY':\r\n new_country.append('None')\r\n else:\r\n new_country.append(c)\r\n featureSet['country'] = new_country\r\n\r\n ## Create a new feature called Risk Indicator\r\n c1 = featureSet['no of dots'] >= 1\r\n c2 = featureSet['no of hyphen'] >= 1\r\n c3 = featureSet['no of subdir'] >= 6\r\n c4 = featureSet['contains IP'] == 1\r\n c5 = featureSet['presence of Suspicious_TLD'] == 1\r\n\r\n featureSet['risk indicator'] = np.array([c1 | c2| c3| c4| c5]).astype(int).T\r\n logger.info('New feature Risk Indicator has been created!')\r\n featureSet.to_csv(path_out)\r\n logger.info('Feature Set has been saved to the data folder.')", "def classify_incidents(in_features, date_field, report_location, repeatdist,\n spatial_bands, temporal_bands, out_lines_dir,\n out_lines_name, *args):\n try:\n # Fix for potential issue with xlsx files as report locations\n if not path.isdir(report_location):\n report_location = path.dirname(report_location)\n\n # Build sorted lists of band values\n spatial_bands = [float(b) for b in spatial_bands.split(';')]\n temporal_bands = [float(b) for b in temporal_bands.split(';')]\n\n repeatdist = float(repeatdist)\n spatial_bands.append(repeatdist)\n\n spatial_bands = list(set(spatial_bands))\n temporal_bands = list(set(temporal_bands))\n\n spatial_bands.sort()\n temporal_bands.sort()\n\n arcpy.env.overwriteOutput = True\n\n # Report run time used for file names\n now = dt.strftime(dt.now(), \"%Y-%m-%d_%H-%M-%S\")\n now_nice = dt.strftime(dt.now(), \"%Y-%m-%d %H:%M:%S\")\n\n # Check for and delete existing fields necessary for classification\n reset_fields(in_features)\n\n # Get name of OID field\n oidname = arcpy.Describe(in_features).oidFieldName\n\n # Get sorted list of unique incident date values\n with arcpy.da.SearchCursor(in_features, date_field) as rows:\n date_vals = [row[0] for row in rows]\n\n date_vals = list(set(date_vals))\n date_vals.sort()\n\n # Range of incident dates\n min_date = date_vals[0]\n max_date = date_vals[-1]\n\n # Keep track of origins and nrs\n oids = []\n nrids = []\n rids = []\n\n # Connecting line segments and table rows\n new_lines = []\n new_rows = []\n\n # Build empty dictionary to hold type tallies\n type_counts = {}\n for sband in spatial_bands:\n type_counts[sband] = {}\n for tband in temporal_bands:\n type_counts[sband][tband] = {'oids': [],\n 'nrids': [],\n 'rids': []}\n\n # Value lists for half life calculations\n all_distances = {}\n for sband in spatial_bands:\n all_distances[sband] = []\n\n all_lives = {}\n for tband in temporal_bands:\n all_lives[tband] = []\n\n found_connections = []\n\n # Build table of all records within the max spatial band of anther feature\n near_table = arcpy.GenerateNearTable_analysis(in_features, in_features, search_radius=temporal_bands[-1], closest='ALL', method='GEODESIC')\n\n # Identify and process relevent near features\n with arcpy.da.SearchCursor(near_table, field_names=['IN_FID', 'NEAR_FID', 'NEAR_DIST']) as nearrows:\n\n # Process each identified connection within the spatial bands\n for nearrow in nearrows:\n dist = nearrow[2]\n if not dist <= spatial_bands[-1]:\n continue\n\n links= []\n\n # Find the two features that are part of the connection\n where_clause = \"\"\"{} in ({},{})\"\"\".format(oidname, nearrow[0], nearrow[1])\n fields = [oidname, date_field, z_value_field, 'SHAPE@X','SHAPE@Y']\n with arcpy.da.UpdateCursor(in_features, field_names=fields, where_clause=where_clause) as cur_link:\n for feat in cur_link:\n # Calculate the z values of each incident in the pair\n zval = feat[1] - min_date\n feat[2] = zval.days\n cur_link.updateRow(feat)\n links.append([feat[0], feat[1], feat[3], feat[4], feat[2]])\n\n # Identify which feature is the oldest and id it as the source\n if links[0][1] > links[1][1]:\n oid, odate, ox, oy, oz = links[1]\n fid, fdate, fx, fy, fz = links[0]\n\n else:\n oid, odate, ox, oy, oz = links[0]\n fid, fdate, fx, fy, fz = links[1]\n\n # test for new connection\n if (oid, fid) in found_connections:\n continue\n\n # Calculate the days between the two dates\n datediff = fdate - odate\n daydiff = datediff.days\n\n # only process rows within defined temporal bands\n if daydiff > temporal_bands[-1]:\n continue\n\n # Identify the spatial bands that are covered by this relationship and create a connecting line feature\n link_found = False\n for sband in spatial_bands:\n if dist <= sband:\n for tband in temporal_bands:\n if daydiff <= tband:\n if not link_found:\n # track distances and lives for half measures\n all_distances[sband].append(dist)\n all_lives[tband].append(daydiff)\n incident_sband = sband\n incident_tband = tband\n\n link_found = True\n\n # id classification\n if oid not in type_counts[sband][tband]['oids']:\n type_counts[sband][tband]['oids'].append(oid)\n if dist <= spatial_bands[0]:\n if fid not in type_counts[sband][tband]['rids']:\n type_counts[sband][tband]['rids'].append(fid)\n elif fid not in type_counts[sband][tband]['nrids']:\n type_counts[sband][tband]['nrids'].append(fid)\n\n if link_found:\n found_connections.append((oid, fid))\n\n # create connecting line from x, y, z values of two pts\n end = arcpy.Point(X=fx, Y=fy, Z=fz)\n start = arcpy.Point(X=ox, Y=oy, Z=oz)\n vertices = arcpy.Array([start, end])\n feature = arcpy.Polyline(vertices, None, True, False)\n new_lines.append([fid, oid, dist, daydiff, incident_sband, incident_tband, feature])\n\n # Delete near table\n arcpy.Delete_management(near_table)\n\n # Create feature class for connecting lines\n sr = arcpy.Describe(in_features).spatialReference\n connectors = arcpy.CreateFeatureclass_management(out_lines_dir,\n out_lines_name,\n 'POLYLINE',\n has_z='ENABLED',\n spatial_reference=sr)\n arcpy.AddField_management(connectors, 'FEATUREID', \"LONG\")\n arcpy.AddField_management(connectors, origin_feat_field, \"LONG\")\n arcpy.AddField_management(connectors, dist_orig_field, \"FLOAT\")\n arcpy.AddField_management(connectors, 'RPTDAYS', \"FLOAT\")\n arcpy.AddField_management(connectors, spatial_band_field, \"FLOAT\")\n arcpy.AddField_management(connectors, temporal_band_field, \"FLOAT\")\n\n # Insert connecting line features from the array of values\n fields = ['FEATUREID', origin_feat_field, dist_orig_field, 'RPTDAYS', spatial_band_field, temporal_band_field, 'SHAPE@']\n with arcpy.da.InsertCursor(connectors, fields) as rows:\n for new_line in new_lines:\n rows.insertRow(new_line)\n\n # Manage classification fields\n fieldnames = []\n for sband in spatial_bands:\n for tband in temporal_bands:\n fieldnames.append('s{}t{}'.format(int(sband), int(tband)))\n\n cur_fields = [f.name for f in arcpy.ListFields(in_features)]\n for fieldname in fieldnames:\n if fieldname in cur_fields:\n arcpy.DeleteField_management(in_features, fieldname)\n arcpy.AddField_management(in_features, fieldname, 'TEXT', field_length=2)\n\n # Classify & count incidents by type\n for sband in spatial_bands:\n for tband in temporal_bands:\n band = type_counts[sband][tband]\n type_counts[sband][tband]['oids'] = [id for id in band['oids'] if id not in band['nrids'] and id not in band['rids']]\n type_counts[sband][tband]['nrids'] = [id for id in band['nrids'] if id not in band['rids']]\n\n fields = [\"OID@\", date_field, z_value_field]\n fields.extend(fieldnames)\n\n with arcpy.da.UpdateCursor(in_features, fields) as rows:\n inc_count = 0\n for row in rows:\n inc_count += 1\n\n # calc z value if missing\n if not row[2]:\n zval = row[1] - min_date\n row[2] = zval.days\n\n classifications = []\n\n for sband in spatial_bands:\n for tband in temporal_bands:\n if row[0] in type_counts[sband][tband]['nrids']:\n classifications.append('NR')\n elif row[0] in type_counts[sband][tband]['rids']:\n classifications.append('R')\n elif row[0] in type_counts[sband][tband]['oids']:\n classifications.append('O')\n else:\n classifications.append(None)\n row[3:] = classifications\n\n rows.updateRow(row)\n\n # Build empty dictionary to hold spatial and temporal band tallies\n band_counts = {}\n for sband in spatial_bands:\n band_counts[sband] = {}\n for tband in temporal_bands:\n band_counts[sband][tband] = 0\n\n for sband in spatial_bands:\n for tband in temporal_bands:\n if sband == spatial_bands[0]:\n band_counts[sband][tband] = len(type_counts[sband][tband]['rids'])\n else:\n band_counts[sband][tband] = len(type_counts[sband][tband]['nrids'])\n\n # Get unit of feature class spatial reference system\n try:\n unit = units[sr.linearUnitName]\n except KeyError:\n unit = ''\n\n # Get half-life and half-distance\n test_distances = []\n half_distances = {}\n for sband in spatial_bands:\n test_distances.extend(all_distances[sband])\n test_distances.sort()\n if len(test_distances) > 0:\n half_distances[sband] = test_distances[int(len(test_distances)/2)]\n else:\n half_distances[sband] = 'Not Calculated'\n\n test_lives = []\n half_lives = {}\n for tband in temporal_bands:\n test_lives.extend(all_lives[tband])\n test_lives.sort()\n if len(test_lives) > 0:\n half_lives[tband] = test_lives[int(len(test_lives)/2)]\n else:\n half_lives[tband] = 'Not Calculated'\n\n # Build report content\n report_header = ('Repeat and Near Repeat Incident Summary\\n'\n 'Created {}\\n'.format(now_nice))\n\n data_info = ('Data Source: {}\\n'\n 'Incident Date Range: {} - {}\\n'\n '# Incidents Processed: {}'.format(in_features, min_date, max_date, inc_count))\n\n## inc_type_reports = ''\n## console_type_rpts = ''\n##\n## for sband in spatial_bands:\n## for tband in temporal_bands:\n## cnt_o = len(type_counts[sband][tband]['oids'])\n## cnt_n = len(type_counts[sband][tband]['nrids'])\n## cnt_r = len(type_counts[sband][tband]['rids'])\n##\n## perc_o = \"{:.1f}\".format(100.0*float(cnt_o)/inc_count)\n## perc_n = \"{:.1f}\".format(100.0*float(cnt_n)/inc_count)\n## perc_r = \"{:.1f}\".format(100.0*float(cnt_r)/inc_count)\n##\n## inc_type_reports += ('Count and percentage of each type of incident in spatial band {}{} and temporal band {} days\\n'\n## ', Count, Percentage\\n'\n## 'All Incidents,{}, 100\\n'\n## 'Originators,{},{}\\n'\n## 'Near Repeats,{},{}\\n'\n## 'Repeats,{},{}\\n\\n'.format(sband, unit, tband,\n## inc_count,\n## cnt_o, perc_o,\n## cnt_n, perc_n,\n## cnt_r, perc_r))\n## console_type_rpts += ('Count and percentage of each type of incident in spatial band {}{} and temporal band {} days\\n'\n## ' Count Percentage\\n'\n## 'All Incidents {:^10} {:^13}\\n'\n## 'Originators {:^10} {:^13}\\n'\n## 'Near Repeats {:^10} {:^13}\\n'\n## 'Repeats {:^10} {:^13}\\n\\n'.format(sband, unit, tband,\n## inc_count, 100,\n## cnt_o, perc_o,\n## cnt_n, perc_n,\n## cnt_r, perc_r))\n\n half_lives_str = 'Estimated incident half-life\\n'\n half_lives_str_console = 'Estimated incident half-life\\n'\n for tband in temporal_bands:\n half_lives_str += '{} days temporal band, {:.1f} days\\n'.format(tband, half_lives[tband])\n half_lives_str_console += '{} days temporal band: {:.1f} days\\n'.format(tband, half_lives[tband])\n\n half_distance_str = 'Estimated incident half-distance\\n'\n half_distance_str_console = 'Estimated incident half-distance\\n'\n for sband in spatial_bands[1:]:\n half_distance_str += '{0} {1} spatial band, {2:.1f} {1}\\n'.format(sband, unit, half_distances[sband])\n half_distance_str_console += '{0} {1} spatial band: {2:.1f} {1}\\n'.format(sband, unit, half_distances[sband])\n\n temp_band_strs = [\"<={} days\".format(b) for b in temporal_bands]\n temporal_band_labels = ','.join(temp_band_strs)\n console_tband_labels = ' '.join(['{:^12}'.format(bnd) for bnd in temp_band_strs])\n\n counts_title = 'Number of Repeat and Near-Repeat incidents per spatial and temporal band\\n'\n percent_title = 'Percentage of all incidents classified as Repeat or Near-Repeat and appearing in each spatial and temporal band\\n'\n\n counts_header = ',{}\\n'.format(temporal_band_labels)\n console_counts_header = ' {}'.format(console_tband_labels)\n\n percent_header = ',{}\\n'.format(temporal_band_labels)\n console_perc_header = ' {}'.format(console_tband_labels)\n\n counts_table = \"\"\n percent_table = \"\"\n console_count = \"\"\n console_perc = \"\"\n\n row_sum = [0 for tband in temporal_bands]\n\n for sband in spatial_bands:\n\n # get temporal bands and their incident counts\n vals = [band_counts[sband][tband] for tband in temporal_bands]\n\n # Get spatial band count in each temporal band\n # Sums include counts from smaller bands\n## row_counts = [vals[tband] for tband in temporal_bands]\n## try:\n## row_sums = [sum(row_counts[0:i]) for i in xrange(1,len(row_counts)+1)]\n## except:\n## row_sums = [sum(row_counts[0:i]) for i in range(1,len(row_counts)+1)]\n##\n## row_sum = [x + y for (x, y) in zip(row_sums, row_sum)]\n row_perc = [100.0 * float(val)/inc_count for val in vals]\n\n # append counts & percentages to the table\n if sband == spatial_bands[0]:\n counts_table += '<={} {},{}\\n'.format(sband, unit, ','.join([str(cnt) for cnt in vals]))\n console_count += '{:>25} {}\\n'.format('<={} {}'.format(sband, unit), ' '.join(['{:^12}'.format(cnt) for cnt in vals]))\n percent_table += '<={} {},{}\\n'.format(sband, unit, ','.join([\"{:.1f}\".format(prc) for prc in row_perc]))\n console_perc += '{:>25} {}\\n'.format('<={} {}'.format(sband, unit), ' '.join(['{:^12}'.format(\"{:.1f}\".format(prc)) for prc in row_perc]))\n else:\n counts_table += '>{} to {} {},{}\\n'.format(spatial_bands[0], sband, unit, ','.join([str(cnt) for cnt in vals]))\n console_count += '{:>25} {}\\n'.format('>{} to {} {}'.format(spatial_bands[0], sband, unit), ' '.join(['{:^12}'.format(cnt) for cnt in vals]))\n percent_table += '>{} to {} {},{}\\n'.format(spatial_bands[0], sband, unit, ','.join([\"{:.1f}\".format(prc) for prc in row_perc]))\n console_perc += '{:>25} {}\\n'.format('>{} to {} {}'.format(spatial_bands[0], sband, unit), ' '.join(['{:^12}'.format(\"{:.1f}\".format(prc)) for prc in row_perc]))\n\n # Write report\n reportname = path.join(report_location, \"{}_{}.csv\".format('Summary', now))\n with open(reportname, 'w') as report:\n\n report.write(report_header)\n report.write('\\n')\n report.write(data_info)\n report.write('\\n')\n report.write(half_distance_str)\n report.write('\\n')\n report.write(half_lives_str)\n report.write('\\n')\n## report.write(inc_type_reports)\n report.write(counts_title)\n report.write(counts_header)\n report.write(counts_table)\n report.write('\\n')\n report.write(percent_title)\n report.write(percent_header)\n report.write(percent_table)\n\n arcpy.SetParameterAsText(9, path.join(out_lines_dir, out_lines_name))\n arcpy.AddMessage(\"\\nView incident summary report: {}\\n\".format(reportname))\n\n arcpy.AddMessage(report_header)\n arcpy.AddMessage('')\n arcpy.AddMessage(data_info)\n arcpy.AddMessage('')\n arcpy.AddMessage(half_distance_str_console)\n arcpy.AddMessage('')\n arcpy.AddMessage(half_lives_str_console)\n arcpy.AddMessage('')\n## arcpy.AddMessage(console_type_rpts)\n arcpy.AddMessage(counts_title)\n arcpy.AddMessage(console_counts_header)\n arcpy.AddMessage(console_count)\n arcpy.AddMessage('')\n arcpy.AddMessage(percent_title)\n arcpy.AddMessage(console_perc_header)\n arcpy.AddMessage(console_perc)\n\n## print(\"\\nView incident summary report: {}\\n\".format(reportname))\n##\n## print(report_header)\n## print('')\n## print(data_info)\n## print('')\n## print(half_distance_str_console)\n## print('')\n## print(half_lives_str_console)\n## print('')\n#### arcpy.AddMessage(console_type_rpts)\n## print(counts_title)\n## print(console_counts_header)\n## print(console_count)\n## print('')\n## print(percent_title)\n## print(console_perc_header)\n## print(console_perc)\n\n except arcpy.ExecuteError:\n # Get the tool error messages\n msgs = arcpy.GetMessages()\n arcpy.AddError(msgs)\n print(msgs)\n\n except:\n # Return error messages for use in script tool or Python Window\n arcpy.AddError(str(sys.exc_info()[1]))\n\n # Print Python error messages for use in Python / Python Window\n print(str(sys.exc_info()[1]) + \"\\n\")", "def gen_main_df(add_list: list):\r\n # 由Bert 计算得来的 sentiment信息\r\n if 'sentiment' not in data_manipulator.used_measure_list:\r\n data_manipulator.used_measure_list.append('sentiment')\r\n sentiment = data_manipulator.read_in_file(data_manipulator.get_file_names(root, 'datasets', 'daily_svm_sentiment_6class' , 'csv')[0],\r\n 'date', ['0'], 'sentiment') # 'daily_svm_sentiment_2class' '0', '1', '2', '3', '4', '5'\r\n data_manipulator.add_column(sentiment)\r\n # 中国CPI指数\r\n if 'cpi' in add_list and 'cpi' not in data_manipulator.used_measure_list:\r\n data_manipulator.used_measure_list.append('cpi')\r\n cpi = data_manipulator.read_in_file(data_manipulator.get_file_names(root, 'datasets', 'CPI', 'csv')[0],\r\n '日期', ['最新值', '涨跌幅', '近3月涨跌幅'], 'CPI')\r\n data_manipulator.add_column(cpi)\r\n # 上海银行间同业拆放利率\r\n if 'shibor' in add_list and 'shibor' not in data_manipulator.used_measure_list:\r\n data_manipulator.used_measure_list.append('shibor')\r\n shibor = data_manipulator.read_in_file(data_manipulator.get_file_names(root, 'datasets', 'shibor', 'csv')[0],\r\n 'date', ['on', '1w', '2w', '1m', '3m'], 'Shibor')\r\n data_manipulator.add_column(shibor)\r\n # 上证综指\r\n if 'shangzheng' in add_list and 'shangzheng' not in data_manipulator.used_measure_list:\r\n data_manipulator.used_measure_list.append('shangzheng')\r\n shangzheng = data_manipulator.read_in_file(\r\n data_manipulator.get_file_names(root, 'datasets', 'ShangZheng', 'csv')[0],\r\n 'trade_date', ['open', 'high', 'low', 'close', 'pct_chg', 'vol', 'amount',\r\n 'total_mv', 'float_mv', 'total_share', 'float_share',\r\n 'free_share', 'turnover_rate', 'turnover_rate_f', 'pe',\r\n 'pe_ttm', 'pb'],\r\n 'ShangZheng')\r\n data_manipulator.add_column(shangzheng)\r\n data_manipulator.shift_columns(['ShangZheng_pct_chg'], (-1,),\r\n add=True) # name has changed to shift-1_ShangZheng_pct_chg\r\n data_manipulator.rank_df_column(['shift-1_ShangZheng_pct_chg'],\r\n rank_list=[-10, -1, -0.5, 0, 0.5, 1, 10]) # rank_list=[-10, 0, 10] [-10, -1, -0.5, 0, 0.5, 1, 10]\r\n shangzheng_30min = data_manipulator.read_in_file(\r\n data_manipulator.get_file_names(root, 'datasets', 'ShangZheng_index_30min', 'csv')[0],\r\n 'trade_time', ['open', 'high', 'low', 'close', 'pct_chg', 'vol', 'amount'],\r\n 'ShangZheng_30min')\r\n data_manipulator.news_df_add_column(shangzheng_30min)\r\n data_manipulator.shift_minute_columns(['ShangZheng_30min_pct_chg'], (-1,),\r\n add=True)\r\n data_manipulator.rank_minute_df_columns(['shift-1_ShangZheng_30min_pct_chg'],\r\n rank_list=[-10, -1, -0.5, 0, 0.5, 1, 10]) # rank_list=[-10, 0, 10] [-10, -1, -0.5, 0, 0.5, 1, 10]\r\n\r\n # M2 广义货币量\r\n if 'm2' in add_list and 'm2' not in data_manipulator.used_measure_list:\r\n data_manipulator.used_measure_list.append('m2')\r\n m2 = data_manipulator.read_in_file(data_manipulator.get_file_names(root, 'datasets', 'M2', 'csv')[0],\r\n '月份', ['M2数量(亿元)', 'M2同比增长', 'M2环比增长'], 'M2')\r\n m2 = data_manipulator.complement_df(m2, 'date')\r\n data_manipulator.add_column(m2)\r\n\r\n # 人民币美元汇率\r\n if 'rmb_usd' in add_list and 'rmb_usd' not in data_manipulator.used_measure_list:\r\n data_manipulator.used_measure_list.append('rmb_usd')\r\n rmb_usd = data_manipulator.read_in_file(data_manipulator.get_file_names(root, 'datasets', 'RMB_USD', 'csv')[0],\r\n 'trade_date',\r\n ['bid_open', 'bid_close', 'bid_high', 'bid_low', 'ask_open',\r\n 'ask_close', 'ask_high', 'ask_low', 'tick_qty'], 'exchange')\r\n data_manipulator.add_column(rmb_usd)\r\n\r\n # 沪港通 沪深通 到岸 离岸资金流\r\n if 'fund_flow' in add_list and 'fund_flow' not in data_manipulator.used_measure_list:\r\n data_manipulator.used_measure_list.append('fund_flow')\r\n fund_flow = data_manipulator.read_in_file(\r\n data_manipulator.get_file_names(root, 'datasets', 'fund_flow', 'csv')[0],\r\n 'trade_date', ['north_money', 'south_money'], 'fund_flow')\r\n data_manipulator.add_column(fund_flow)\r\n\r\n # 债券回购日行情\r\n if 'repo' in add_list and 'repo' not in data_manipulator.used_measure_list:\r\n data_manipulator.used_measure_list.append('repo')\r\n repo = data_manipulator.read_in_file(data_manipulator.get_file_names(root, 'datasets', 'repo', 'csv')[0],\r\n 'trade_date', ['repo_maturity', 'open', 'high', 'low', 'close',\r\n 'amount'], 'repo', data_manipulator.cut_time_string,\r\n (0, 10,))\r\n repo = data_manipulator.select_col_group_by(repo, 'repo_repo_maturity', ['GC001', 'GC007', 'GC014', 'GC028'],\r\n 'date')\r\n data_manipulator.add_column(repo)\r\n\r\n # 新浪新闻\r\n if 'sina_news' in add_list and 'sina_news' not in data_manipulator.used_measure_list:\r\n data_manipulator.used_measure_list.append('sina_news')\r\n columns_type = {'create_time': str, 'text': str}\r\n sina_news = data_manipulator.read_in_file(data_manipulator.get_file_names(root, 'datasets', 'sina', 'csv')[0],\r\n 'create_time', ['text', ], 'sina', dtypes=columns_type)\r\n data_manipulator.add_change_news('sina', (7, 9), columns_type, sina_news, time_col_name='create_time')\r\n data_manipulator.add_minute_change_news('sina', columns_type, sina_news, time_col_name='create_time')\r\n if 'scale' in add_list:\r\n data_manipulator.scaling_col()\r\n if 'clear' in add_list:\r\n data_manipulator.clear()", "def create_dataframe_for_training(data):\n feature_column_name = 'X'\n #data_cp = data[['label']].copy()\n for i, row in tqdm(data.iterrows(), total=len(data)):\n all_features = f'{row.claimant} {row.claim} {row.article_content}'\n data.loc[i, feature_column_name] = all_features\n\n return data[feature_column_name]", "def create_raw_data():\r\n for csv_file in glob.glob(raw_loc + 'ticket_data/PRR_*'):\r\n filestring =os.path.basename(csv_file)\r\n index_start = 1\r\n j = 0\r\n start = dt.datetime.now()\r\n print('{} file started at {}'.format(filestring, start.strftime(\"%H:%M\")))\r\n df = pd.read_csv(csv_file, encoding = 'utf-8', parse_dates = ['Tick Issue Date'])\r\n df = df.rename(columns = {c: c.replace(' ', '') for c in df.columns})\r\n try:\r\n df.to_sql('raw_ticket_data', con = conn, if_exists='append')\r\n except:\r\n print('File read error')\r\n\r\n\r\n print ('{} file finished in {:03.2f} minutes '.format(filestring, (dt.datetime.now()-start).seconds / 60))", "def gen_feats_file(data_feats,ids,feat_filename):\n if not os.path.isfile(feat_filename) :\n new_feats=np.empty((0,2))\n for iid in ids:\n print(iid)\n indices = [i for i, v in enumerate(data_feats[:,0]) if iid in v]\n new_feats=np.vstack((new_feats,data_feats[indices,:]))\n np.savetxt(feat_filename,new_feats,fmt=\"%s\")", "def get_srs_features(df):\n\n #test result classified by labels\n li = df.TestResultsCode.tolist()\n labels = [ item.split('_') for item in li ]\n for item in labels:\n if len(item)==4:\n add = item[0]+item[1]\n item = item.insert( 0, add )\n for item in labels:\n if 'not' in item:\n item.remove('not')\n if 'detected' in item:\n item.remove('detected')\n\n\n #one-hot encode the test results\n disease = [ la[0] for la in labels ]\n spread = [ la[1] for la in labels ]\n risk = [ la[2] for la in labels ]\n\n disease_encode = pd.Series( disease ).str.get_dummies()\n spread_encode = pd.Series( spread ).str.get_dummies()\n risk_encode = pd.Series( risk ).str.get_dummies()\n\n disease_encode = pd.DataFrame( disease_encode )\n spread_encode = pd.DataFrame( spread_encode )\n risk_encode = pd.DataFrame( risk_encode)\n\n #interate one hot encoding of test results back to df\n df=df.drop(['PatientID', 'Address', 'CurrentLocation'],axis=1)\n df2 = df\n df2 = df2.drop(columns = 'TestResultsCode')\n\n results = pd.concat( [risk_encode, spread_encode, disease_encode], axis=1 )\n results = results.drop(['NotSpreader', 'NotatRisk'], axis=1)\n\n from sklearn.model_selection import train_test_split\n\n X_train, X_val, y_train, y_val = train_test_split( df2, results, test_size=0.33, random_state=33 ) #tr is test results numerically coded\n X_val, X_test, y_val, y_test = train_test_split( X_val, y_val , test_size=0.4, random_state=33)\n\n #REMOVED LOCATION FROM FEATURES\n\n # choosing from those features\n cols =['AgeGroup','AvgHouseholdExpenseOnPresents','AvgHouseholdExpenseOnSocialGames',\n 'AvgHouseholdExpenseParkingTicketsPerYear','AvgMinSportsPerDay','AvgTimeOnSocialMedia','AvgTimeOnStuding','BMI',\n 'DisciplineScore','HappinessScore','Job','NrCousins','StepsPerYear','SyndromeClass','TimeOnSocialActivities']\n\n X_train_sfs = X_train[cols]\n X_train_sfs = X_train_sfs.fillna(X_train_sfs.mean())\n\n from sklearn.neighbors import KNeighborsClassifier\n from mlxtend.feature_selection import SequentialFeatureSelector as SFS #import from mlxtend library\n knn = KNeighborsClassifier(n_neighbors=2) # ml_algo used = knn\n sfs = SFS(knn,\n k_features=10,\n forward=True, # if forward = True then SFS otherwise SBS\n floating=False,\n verbose=2,\n scoring='accuracy'\n )\n\n\n #after applying sfs fit the data:\n sfs.fit(X_train_sfs, y_train)\n\n return sfs.k_feature_names_", "def _create_ts_features(df, tscol):\r\n df = copy.deepcopy(df)\r\n dt_adds = []\r\n try:\r\n df[tscol+'_hour'] = df[tscol].dt.hour.fillna(0).astype(int)\r\n df[tscol+'_minute'] = df[tscol].dt.minute.fillna(0).astype(int)\r\n dt_adds.append(tscol+'_hour')\r\n dt_adds.append(tscol+'_minute')\r\n except:\r\n print(' Error in creating hour-second derived features. Continuing...')\r\n try:\r\n df[tscol+'_dayofweek'] = df[tscol].dt.dayofweek.fillna(0).astype(int)\r\n dt_adds.append(tscol+'_dayofweek')\r\n if tscol+'_hour' in dt_adds:\r\n DAYS = dict(zip(range(7),['Sun', 'Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat']))\r\n df[tscol+'_dayofweek'] = df[tscol+'_dayofweek'].map(DAYS)\r\n df.loc[:,tscol+'_dayofweek_hour_cross'] = df[tscol+'_dayofweek'] +\" \"+ df[tscol+'_hour'].astype(str)\r\n dt_adds.append(tscol+'_dayofweek_hour_cross')\r\n df[tscol+'_quarter'] = df[tscol].dt.quarter.fillna(0).astype(int)\r\n dt_adds.append(tscol+'_quarter')\r\n df[tscol+'_month'] = df[tscol].dt.month.fillna(0).astype(int)\r\n MONTHS = dict(zip(range(1,13),['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul',\r\n 'Aug', 'Sep', 'Oct', 'Nov', 'Dec']))\r\n df[tscol+'_month'] = df[tscol+'_month'].map(MONTHS)\r\n dt_adds.append(tscol+'_month')\r\n #### Add some features for months ########################################\r\n festives = ['Oct','Nov','Dec']\r\n name_col = tscol+\"_is_festive\"\r\n df[name_col] = 0\r\n df[name_col] = df[tscol+'_month'].map(lambda x: 1 if x in festives else 0).values\r\n df[name_col].fillna(0,inplace=True)\r\n dt_adds.append(name_col)\r\n summer = ['Jun','Jul','Aug']\r\n name_col = tscol+\"_is_summer\"\r\n df[name_col] = 0\r\n df[name_col] = df[tscol+'_month'].map(lambda x: 1 if x in summer else 0).values\r\n df[name_col].fillna(0,inplace=True)\r\n dt_adds.append(name_col)\r\n winter = ['Dec','Jan','Feb']\r\n name_col = tscol+\"_is_winter\"\r\n df[name_col] = 0\r\n df[name_col] = df[tscol+'_month'].map(lambda x: 1 if x in winter else 0).values\r\n df[name_col].fillna(0,inplace=True)\r\n dt_adds.append(name_col)\r\n cold = ['Oct','Nov','Dec','Jan','Feb','Mar']\r\n name_col = tscol+\"_is_cold\"\r\n df[name_col] = 0\r\n df[name_col] = df[tscol+'_month'].map(lambda x: 1 if x in cold else 0).values\r\n df[name_col].fillna(0,inplace=True)\r\n dt_adds.append(name_col)\r\n warm = ['Apr','May','Jun','Jul','Aug','Sep']\r\n name_col = tscol+\"_is_warm\"\r\n df[name_col] = 0\r\n df[name_col] = df[tscol+'_month'].map(lambda x: 1 if x in warm else 0).values\r\n df[name_col].fillna(0,inplace=True)\r\n dt_adds.append(name_col)\r\n #########################################################################\r\n if tscol+'_dayofweek' in dt_adds:\r\n df.loc[:,tscol+'_month_dayofweek_cross'] = df[tscol+'_month'] +\" \"+ df[tscol+'_dayofweek']\r\n dt_adds.append(tscol+'_month_dayofweek_cross')\r\n df[tscol+'_year'] = df[tscol].dt.year.fillna(0).astype(int)\r\n dt_adds.append(tscol+'_year')\r\n today = date.today()\r\n df[tscol+'_age_in_years'] = today.year - df[tscol].dt.year.fillna(0).astype(int)\r\n dt_adds.append(tscol+'_age_in_years')\r\n df[tscol+'_dayofyear'] = df[tscol].dt.dayofyear.fillna(0).astype(int)\r\n dt_adds.append(tscol+'_dayofyear')\r\n df[tscol+'_dayofmonth'] = df[tscol].dt.day.fillna(0).astype(int)\r\n dt_adds.append(tscol+'_dayofmonth')\r\n df[tscol+'_weekofyear'] = df[tscol].dt.weekofyear.fillna(0).astype(int)\r\n dt_adds.append(tscol+'_weekofyear')\r\n weekends = (df[tscol+'_dayofweek'] == 'Sat') | (df[tscol+'_dayofweek'] == 'Sun')\r\n df[tscol+'_typeofday'] = 'weekday'\r\n df.loc[weekends, tscol+'_typeofday'] = 'weekend'\r\n dt_adds.append(tscol+'_typeofday')\r\n if tscol+'_typeofday' in dt_adds:\r\n df.loc[:,tscol+'_month_typeofday_cross'] = df[tscol+'_month'] +\" \"+ df[tscol+'_typeofday']\r\n dt_adds.append(tscol+'_month_typeofday_cross')\r\n except:\r\n print(' Error in creating date time derived features. Continuing...')\r\n print(' created %d columns from time series %s column' %(len(dt_adds),tscol))\r\n return df, dt_adds", "def sequence_ingest(self,sequence):\n\t\tdata=self.data\n\t\t\n\t\tcounter=0\n\n\t\tfor item in data[sequence]:\n\t\t\tdatestring=item['specimenDate']\n\t\t\tdate=fetchdate(datestring)\n\t\t\trow,created=DailyCases.objects.get_or_create(specimenDate=date,areacode=item['areaCode'])\n\t\t\trow.areaname=item['areaName']\n\t\t\trow.dailyLabConfirmedCases=item['dailyLabConfirmedCases']\n\t\t\trow.totalLabConfirmedCases=item['totalLabConfirmedCases']\n\t\t\trow.changeInDailyCases=item['changeInDailyCases']\n\t\t\trow.dailyTotalLabConfirmedCasesRate=item['dailyTotalLabConfirmedCasesRate']\n\t\t\trow.previouslyReportedDailyCases=item['previouslyReportedDailyCases']\n\t\t\trow.previouslyReportedTotalCases=item['previouslyReportedTotalCases']\n\t\t\trow.changeInTotalCases=item['changeInTotalCases']\n\t\t\trow.save()\n\t\t\tcounter+=1\n\t\tlog.info(f'Processed: {counter} rows')", "def make_data(dataFname, enc, features=None):\n\n origData = pandas.read_csv(dataFname)\n ids = origData['id']\n\n # remove unused columns\n if 'Unnamed: 0' in origData.columns: del origData['Unnamed: 0']\n del origData['id']\n\n # remove \"data leakage\" columns\n for f in prohobitedFeatures:\n del origData[f]\n\n # separate into X & y values\n xData = origData[[col for col in origData.columns if not col=='loss']]\n set_vars_as_type(xData, discreteVars, object)\n yVec = origData.loss if 'loss' in origData.columns else None\n\n # try f528 - f274\n xData['f528f274'] = xData['f528'] - xData['f274']\n\n # encode the categorical features f776 and f777\n if enc is None:\n enc = OneHotEncoder(n_values=[2, 2])\n enc.fit(xData[['f776', 'f777']])\n\n xData[['f776_isZero', 'f776_isOne', 'f777_isZero', 'f777_isOne']] = pandas.DataFrame(enc.transform(xData[['f776', 'f777']]).toarray())\n del xData['f776']\n del xData['f777']\n\n print_missing_values_info(origData)\n\n # feature selection\n if features:\n filteredXData = xData[features]\n else: # use ALL features\n filteredXData = xData\n\n return filteredXData, yVec, ids, enc", "def main():\n \n Y1, Y2 = 2005, 2017 ### range with coordinates supplied in pre-2018 generated archive\n\n if len(sys.argv) > 1 and int(sys.argv[1]) > 0:\n Y1 = int(sys.argv[1])\n \n if len(sys.argv) > 2 and int(sys.argv[2]) > Y1:\n Y2 = int(sys.argv[2])\n \n with open('data/audit.log','w') as output:\n for Y in range(Y1, Y2):\n df = pd.read_csv('data/{}.csv'.format(Y), low_memory = False)\n output.write('\\n--- {} --------------------\\n'.format(Y))\n\n # remove `deleted` records\n df['deleted'] = df['deleted'].apply(yes_no)\n df = df[df['deleted'] == 0]\n\n # remove misc misdemeanors\n df = df[~df['category'].isin(drop)]\n\n # validate date and expand into Y,N,D,W,H\n df['dt'] = df['incident_date'].apply(extract)\n df = df[~df['dt'].isnull()]\n\n # convert from plane state to longitude-latitude\n df['ll'] = df.apply(to_lnglat, axis = 1)\n\n # init features\n features = df.loc[:,['category','stat','address','city','zip']]\n features['id'] = df['incident_id']\n dt = ['year','month','day','weekday','hour']\n for i in range(len(dt)):\n features[dt[i]] = df['dt'].apply(lambda x: x[i] )\n\n features['lng'] = df['ll'].apply(lambda x: x[0])\n features['lat'] = df['ll'].apply(lambda x: x[1])\n\n features['gang'] = df['gang_related'].apply(yes_no)\n features['category'] = df['category'].apply(collapse)\n cat = set(features.groupby(['category']).size().reset_index(name='count')['category'].tolist())\n output.write('Categories: {}\\n'.format(len(cat)))\n\n output.write('Date miss: {:.4f}%\\n'\\\n .format(100 * (1 - len(features[(features['year'] > 2000) & (~features['weekday'].isnull())])/len(features))))\n output.write('Location miss: {:.4f}%\\n'\\\n .format(100 * (1 - len(features[(features['zip'] > 0) | (features['lat'] > 0)])/len(features))))\n\n # keep records with valid date\n features['date'] = df['dt'].apply(lambda x: datetime.date(x[0], x[1], x[2]))\n features = features[(features['year'] > 2000) & (~features['weekday'].isnull())]\n output.write('Time miss: {:.4f}%\\n'.format(100 * len(features[features['hour'] == -1])/len(features)))\n\n # potential `time-unknown` issue\n output.write('Hour ZERO: {:.4f}%\\n'.format(100 * len(features[features['hour'] == 0])/len(features)))\n output.write('Hour NOON: {:.4f}%\\n'.format(100 * len(features[features['hour'] == 12])/len(features)))\n\n features = features[(features['zip'] > 0) | (features['lat'] > 0)]\n\n # get the best possible coordinates + zipcode assessment\n features[['zip','lng','lat']] = features[['zip','lng','lat']].apply(fix_location, axis = 1)\n output.write('Failed location: {:.4f}%\\n'.format(100 * len(features[features['zip'].isnull()])/len(features)))\n features = features[~features['zip'].isnull()]\n features['zip'] = df['zip'].apply(lambda x: str(x)[:5])\n \n # normalize city attr\n features = features.join(zipcodes[['zip','city']].set_index('zip'), on = 'zip', lsuffix = '_orig', rsuffix = '')\n features.loc[features['city'].isnull(), 'city'] = features.loc[features['city'].isnull(), 'city_orig']\\\n .apply(lambda x: x if type(x) == float else ' '.join([l[0].upper() + l[1:] for l in x.split()]))\n\n # reduce to LA bounding-box\n features = features[(features['lng'] > -119) & (features['lng'] < -116)]\n features = features[(features['lat'] > 32) & (features['lat'] < 35)]\n\n # save csv\n features[fields].to_csv('data/F{}.csv'.format(Y), index = False)\n features[fields].to_json('data/F{}.json'.format(Y), orient = 'records')\n output.close()", "def featurize(movies):\n ###TODO \n movies['features'] = \"\" \n get_h = set() \n vocab_dict = {}\n df_dict_return = {}\n tup_list = []\n index_dict = {}\n index_dict_1 = {}\n movie_len = len(movies) \n #print(\"MovieLength::\",movie_len)\n #print(\"MOVIES:::\",movies)\n \n get_h = cal_unique_features(movies) # num_features\n\n vocab_dict = cal_unique_vocab(get_h) # vocab complete\n\n len_vocab = len(get_h)\n \n df_dict_return = cal_unique_docs(get_h,movies) # df(i)\n\n for token in get_h :\n #tup_list.clear()\n #print(\"token_GOTTTTT:::\",token)\n for index,row in movies.iterrows(): \n #print(\"row_got::\",row)\n gen_list = row['tokens']\n #print(\"gen_list::\",gen_list)\n #mov_id = row['movieId'] \n #print(\"mov_id::\",mov_id)\n token_count_1 = Counter(gen_list).most_common()[:1]\n tok = token_count_1[0]\n index_dict_1[index] = tok[1]\n token_count = gen_list.count(token)\n #print(\"token_count::\",token_count)\n tup = (index,token_count)\n #print(\"tuple::\",tup)\n tup_list.append(tup)\n #print(\"LIST_PRINT:::::::::::::\",tup_list)\n index_dict[token] = tup_list\n tup_list = []\n \n \n #print(\"INDEX_DICT:::\",index_dict) # tf(i,d)\n #print(\"INDEX_DICT_1:::\",index_dict_1) # max_k dict per docx\n \n \n for ind, row in movies.iterrows():\n data_list = []\n rows_list = []\n columns_list = []\n gen_list = row['tokens']\n #print(\"TOKENS GOTTT::\",gen_list) \n for gen in gen_list:\n tf = get_tf_value(index_dict,gen,ind)\n #print(\"TF GOTTT::\",tf) \n tf_weight = float( tf / index_dict_1[ind])\n #print(\"tf_weight::\",tf_weight)\n df_weight = float( math.log10( movie_len / df_dict_return[gen] ) )\n #print(\"df_weight::\",df_weight)\n final_tfidf = tf_weight * df_weight\n #print(\"final_tfidf::\",final_tfidf)\n data_list.append(final_tfidf)\n columns_list.append(vocab_dict[gen])\n rows_list.append(0) \n csr = csr_matrix((data_list, (rows_list,columns_list)), shape=(1,len_vocab))\n #print(\"TYPE of CSR GOTT::\",type(csr))\n #print(\"CSR GOTT:::\",csr) \n movies.set_value(ind, 'features', csr)\n \n #print(\"UPDATE movies::\",movies) \n\n return(movies,vocab_dict)\n \n\n pass", "def create_train_feats():\n features = read_process_labelled(AUDIO_DIR, debug=True)\n df = pd.DataFrame(features)\n p = './Features/dataset_features/data_features.csv'\n df.to_csv(p, index=False)\n return p", "def _process_features(self, limit):\n\n if self.testMode:\n g = self.testgraph\n else:\n g = self.graph\n model = Model(g)\n raw = '/'.join((self.rawdir, 'feature'))\n logger.info(\"building labels for features\")\n\n line_counter = 0\n with open(raw, 'r') as f:\n filereader = csv.reader(f, delimiter='\\t', quotechar='\\\"')\n f.readline() # read the header row; skip\n for line in filereader:\n (feature_id, dbxref_id, organism_id, name, uniquename,\n residues, seqlen, md5checksum, type_id, is_analysis,\n timeaccessioned, timelastmodified) = line\n\n feature_key = feature_id\n if re.search(r'[\\|\\s\\[\\]\\{\\}\\\\<\\>]', uniquename):\n # some uniquenames have pipes or other nasty chars!\n # for example: FB||||FBrf0133242|Hugh-u1\n feature_id = self._makeInternalIdentifier(\n 'feature', feature_key)\n else:\n feature_id = 'FlyBase:'+uniquename\n self.idhash['feature'][feature_key] = feature_id\n self.feature_types[feature_key] = type_id\n self.label_hash[feature_id] = name\n\n if feature_key not in self.feature_to_organism_hash:\n self.feature_to_organism_hash[feature_key] = set()\n self.feature_to_organism_hash[feature_key].add(organism_id)\n\n # HACK - FBgn are genes, and therefore classes,\n # all else be individuals\n is_gene = False\n if re.search(r'(FBgn|FBog)', feature_id):\n self.idhash['gene'][feature_key] = feature_id\n is_gene = True\n elif re.search(r'FBa[lb]', feature_id):\n self.idhash['allele'][feature_key] = feature_id\n elif re.search(r'FBt[ip]', feature_id):\n self.idhash['feature'][feature_key] = feature_id\n\n if self.testMode and \\\n int(feature_key) not in self.test_keys['gene'] + \\\n self.test_keys['allele'] + self.test_keys['feature']:\n continue\n\n # now do something with it!\n # switch on type_id\n if name.strip() == '':\n name = uniquename\n\n type_key = type_id\n type_id = self.idhash['cvterm'][type_key]\n\n # skip some features by type\n types_to_skip = [\n 'SO:0000316', # CDS\n 'SO:0000696', # oligos\n 'SO:0000358', # polypeptide\n 'SO:0000234', # transcripts\n ]\n\n type_keys_to_skip = [\n 596, # pcr_product\n 57096, # mature peptide\n 57097, # signal_peptide\n 57270, # repeat masker\n 58210, # alignment\n 59643, # cDNA_clone\n 60006, # uncharacterized_change_in_nucleotide_sequence\n 61351, # oligo\n 61467, # polypeptide_domain\n 257, # exon\n 286, # intron\n ]\n\n organisms_to_skip = [\n 2 # computational result\n ]\n\n if type_id in types_to_skip \\\n or int(type_key) in type_keys_to_skip\\\n or int(organism_id) in organisms_to_skip:\n continue\n\n line_counter += 1\n\n if int(type_key) == 604: # RNAi_reagent\n # TODO add other reagents?\n self.idhash['reagent'][feature_key] = feature_id\n\n # deal with the taxonomy\n # only get taxa for features that are actually used in our set\n tax_internal_id = self._makeInternalIdentifier(\n 'organism', organism_id)\n if organism_id not in self.checked_organisms:\n # will get the NCBITax if necessary\n tax_id = self._get_organism_id(organism_id)\n self.checked_organisms.add(organism_id)\n else:\n tax_id = self.idhash['organism'][organism_id]\n\n tax_label = self.label_hash.get(tax_id)\n if not re.search(r'FBog', feature_id) \\\n and re.search(r'Drosophila', tax_label):\n # make only fly things leaders\n model.makeLeader(feature_id)\n\n if not self.testMode \\\n and limit is not None and line_counter > limit:\n pass\n else:\n if is_gene:\n model.addClassToGraph(\n feature_id, name, type_id)\n g.addTriple(\n feature_id, model.object_properties['in_taxon'],\n tax_id)\n else:\n if re.search('FBa[lb]', feature_id):\n type_id = Genotype.genoparts['allele']\n model.addIndividualToGraph(feature_id, name, type_id)\n\n # stop adding what we do not appreciate\n # if is_obsolete == 't':\n # if is_gene:\n # model.addDeprecatedClass(feature_id)\n # else:\n # model.addDeprecatedIndividual(feature_id)\n # self.deprecated_features.add(feature_key)\n\n model.addClassToGraph(tax_id)\n if tax_id != tax_internal_id:\n model.addEquivalentClass(tax_id, tax_internal_id)\n\n model.addComment(\n feature_id,\n self._makeInternalIdentifier('feature', feature_key))\n\n # TODO save checked_organisms fbid to ncbitax mapping to\n # a local file to speed up subsequent searches\n\n return", "def preprocess_data_pandas(raw_data_file: str, features_file: str, cols_to_save: List[str]) -> None:\n\n df = pd.read_csv(raw_data_file)\n\n df.sort_values(by=[\"id\", \"loan_date\"], inplace=True)\n df.reset_index(drop=True, inplace=True)\n\n df[\"loan_date\"] = pd.to_datetime(df['loan_date'], errors='coerce')\n df[\"birthday\"] = pd.to_datetime(df['birthday'], errors='coerce')\n df[\"job_start_date\"] = pd.to_datetime(df['job_start_date'], errors='coerce')\n\n df_grouped_by_id = df.groupby('id')\n\n # Feature nb_previous_loans\n df[\"nb_previous_loans\"] = df_grouped_by_id[\"loan_date\"].rank(method=\"first\") - 1\n\n # Feature avg_amount_loans_previous\n df[\"avg_amount_loans_previous\"] = df_grouped_by_id[\"loan_amount\"].transform(lambda x: x.expanding().mean())\n\n # Feature age\n df['age'] = (pd.to_datetime('today').normalize() - df['birthday']).dt.days // 365\n\n # Feature years_on_the_job\n df['years_on_the_job'] = (pd.to_datetime('today').normalize() - df['job_start_date']).dt.days // 365\n\n # Feature flag_own_car\n df['flag_own_car'] = df.flag_own_car.apply(lambda x: 0 if x == 'N' else 1)\n\n df = df[cols_to_save]\n df.to_csv(features_file, index=False)", "def generate_new_features(data):\n utils.save_log('{0} :: {1}'.format(\n generate_new_features.__module__,\n generate_new_features.__name__))\n\n data = create_feature_is_credit_debit(data)\n data = create_feature_value_category(data)\n data = create_features_from_transaction_timestamp(data)\n data = create_feature_based_on_spent_by_timestamp(data)\n list_of_categories = config.feature_categorical_to_check_spent_value\n data = create_features_avg_ratio_value_by_categories(data,\n list_of_categories)\n return data", "def executeFeatures(dfIn, train = True):\n\n if train == True:\n dfOut = dfIn['TARGET'] #update this with numerical columns that don't need cleaning\n dfOut = standardizedIncome(dfIn, dfOut)\n dfOut = engineerDays(dfIn, dfOut)\n dfOut = createEncoders(dfIn, dfOut)\n dfOut = simplifyEducation(dfIn, dfOut)\n dfOut = simplifyFamily(dfIn, dfOut)\n dfOut = simplifyIncome(dfIn, dfOut)\n dfOut = addExtSources(dfIn, dfOut)\n dfOut = cleanNames(dfOut)\n dfOut = createPolyFeatures(dfOut)\n else:\n dfOut = dfIn['SK_ID_CURR'] ## tags from test set\n dfOut = standardizedIncome(dfIn, dfOut)\n dfOut = engineerDays(dfIn, dfOut)\n dfOut = createEncoders(dfIn, dfOut)\n dfOut = simplifyEducation(dfIn, dfOut)\n dfOut = simplifyFamily(dfIn, dfOut)\n dfOut = simplifyIncome(dfIn, dfOut)\n dfOut = addExtSources(dfIn, dfOut)\n dfOut = dfOut.drop('CODE_GENDER', axis = 1) ## Need to fix this\n #print(dfOut.columns)\n dfOut = cleanNamesTest(dfOut)\n dfOut = createPolyFeatures(dfOut)\n\n return dfOut", "def create_frequency_feature(temp_df):\n start = time.time()\n cat_dfs = []\n for num in np.arange(1080,0,-30):\n temp_df.loc[temp_df['event_time'] > int(num), 'event_time'] = np.nan\n for col in ['event_name', 'specialty', 'plan_type']:\n cat_df = temp_df.groupby([\"id\", col],).agg({\"event_time\": 'count'}).unstack(level=col)\n cat_df.columns = ['__'.join(['frequency', col, name, str(int(num))]) for name in cat_df.columns.droplevel()]\n cat_dfs.append(cat_df)\n res_df = pd.concat(cat_dfs, axis = 1)\n res_df = res_df.fillna(0)\n end = time.time()\n print('time taken (in secs) for frequency feature creation:', end-start)\n \n res_idx, res_col = np.array(res_df.index), np.array(res_df.columns)\n res_data = get_sparse_matrix(res_df.values)\n \n del res_df\n # get data\n return res_idx, res_col, res_data", "def getUserHistFeatures(transaction_list, coupon_dict, model_start_date, purchase_date):\n feat_header = [\"NoOfPurchases\", \"DaysSinceLastPurchase\", \"NoOfPurchasesLastweek\", \"NoOfPurchasesLast15Days\", \"NoOfPurchasesLast30Days\", \"NoOfPurchasesLast60Days\", \"NoOfPurchasesLast90Days\", \"NoOfPurchasesLast180Days\", \"DaysSincePrevPurchase\", \"NoOfPurchasesPrevweek\", \"NoOfPurchasesPrev15Days\", \"NoOfPurchasesPrev30Days\", \"NoOfPurchasesPrev60Days\", \"NoOfPurchasesPrev90Days\", \"NoOfPurchasesPrev180Days\"]\n\n # getting number of purchases #\n feat_list = [len(transaction_list)]\n\n # initializing variables #\n purchase_small_area_name_dict = {}\n puchase_date_list = []\n capsule_text_dict = {}\n genre_name_dict = {}\n price_rate_list = []\n catalog_price_list = []\n discount_price_list = []\n dispperiod_list = []\n valid_period_list = []\n usable_date_mon_list = {}\n usable_date_tue_list = {}\n usable_date_wed_list = {}\n usable_date_thu_list = {}\n usable_date_fri_list = {}\n usable_date_sat_list = {}\n usable_date_sun_list = {}\n usable_date_hol_list = {}\n usable_date_before_hol_list = {}\n coupon_large_area_name_dict = {}\n coupon_small_area_name_dict = {}\n coupon_ken_name_dict = {}\n days_since_last_purchase = 9999\n last_week_purchase = 0\n last_fifteendays_purchase = 0\n last_thirtydays_purchase = 0\n last_sixtydays_purchase = 0\n last_nintydays_purchase = 0\n\tlast_oneeightydays_purchase = 0\n\tdays_since_prev_purchase = 9999\n\tprev_week_purchase = 0\n prev_fifteendays_purchase = 0\n prev_thirtydays_purchase = 0\n prev_sixtydays_purchase = 0\n prev_nintydays_purchase = 0\n prev_oneeightydays_purchase = 0\n for transaction in transaction_list:\n diff_days = (model_start_date - datetime.datetime.strptime(transaction['I_DATE'], \"%Y-%m-%d %H:%M:%S\").date()).days\n if diff_days < days_since_last_purchase:\n days_since_last_purchase = diff_days\n if diff_days <= 7:\n last_week_purchase += 1\n if diff_days <= 15:\n last_fifteendays_purchase += 1\n if diff_days <= 30:\n last_thirtydays_purchase += 1\n if diff_days <= 60:\n last_sixtydays_purchase += 1\n if diff_days <= 90:\n last_nintydays_purchase += 1\n\t\tif diff_days <= 180:\n last_oneeightydays_purchase += 1\n\t\t\n\t\tdiff_days = (purchase_date - datetime.datetime.strptime(transaction['I_DATE'], \"%Y-%m-%d %H:%M:%S\").date()).days\n if diff_days < days_since_last_purchase:\n days_since_prev_purchase = diff_days\n if diff_days <= 7:\n prev_week_purchase += 1\n if diff_days <= 15:\n prev_fifteendays_purchase += 1\n if diff_days <= 30:\n prev_thirtydays_purchase += 1\n if diff_days <= 60:\n prev_sixtydays_purchase += 1\n if diff_days <= 90:\n prev_nintydays_purchase += 1\n if diff_days <= 180:\n prev_oneeightydays_purchase += 1\n\n coupon_id_dict = coupon_dict[ transaction['COUPON_ID_hash'] ]\n purchase_small_area_name_dict[transaction['SMALL_AREA_NAME']] = purchase_small_area_name_dict.get( transaction['SMALL_AREA_NAME'],0) + 1\n capsule_text_dict[ coupon_id_dict['CAPSULE_TEXT'] ] = capsule_text_dict.get( coupon_id_dict['CAPSULE_TEXT'], 0) + 1\n genre_name_dict[ coupon_id_dict['GENRE_NAME'] ] = genre_name_dict.get( coupon_id_dict['GENRE_NAME'],0 ) + 1\n coupon_large_area_name_dict[ coupon_id_dict['large_area_name'] ] = coupon_large_area_name_dict.get( coupon_id_dict['large_area_name'],0 ) + 1\n coupon_small_area_name_dict[ coupon_id_dict['small_area_name'] ] = coupon_small_area_name_dict.get( coupon_id_dict['small_area_name'],0 ) + 1\n coupon_ken_name_dict[ coupon_id_dict['ken_name'] ] = coupon_ken_name_dict.get( coupon_id_dict['ken_name'],0 ) + 1\n price_rate_list.append( float(coupon_id_dict['PRICE_RATE']) )\n catalog_price_list.append( float(coupon_id_dict['CATALOG_PRICE']) )\n discount_price_list.append( float(coupon_id_dict['DISCOUNT_PRICE']) )\n dispperiod_list.append( float(coupon_id_dict['DISPPERIOD']) )\n if coupon_id_dict['VALIDPERIOD'] not in ('','NA'):\n valid_period_list.append( float(coupon_id_dict['VALIDPERIOD']) )\n if coupon_id_dict['USABLE_DATE_MON'] not in ('','NA'):\n usable_date_mon_list[ float(coupon_id_dict['USABLE_DATE_MON']) ] = usable_date_mon_list.get( float(coupon_id_dict['USABLE_DATE_MON']),0 ) + 1\n usable_date_tue_list[ float(coupon_id_dict['USABLE_DATE_TUE']) ] = usable_date_tue_list.get( float(coupon_id_dict['USABLE_DATE_TUE']),0 ) + 1\n usable_date_wed_list[ float(coupon_id_dict['USABLE_DATE_WED']) ] = usable_date_wed_list.get( float(coupon_id_dict['USABLE_DATE_WED']),0 ) + 1\n usable_date_thu_list[ float(coupon_id_dict['USABLE_DATE_THU']) ] = usable_date_thu_list.get( float(coupon_id_dict['USABLE_DATE_THU']),0 ) + 1\n usable_date_fri_list[ float(coupon_id_dict['USABLE_DATE_FRI']) ] = usable_date_fri_list.get( float(coupon_id_dict['USABLE_DATE_FRI']),0 ) + 1\n usable_date_sat_list[ float(coupon_id_dict['USABLE_DATE_SAT']) ] = usable_date_sat_list.get( float(coupon_id_dict['USABLE_DATE_SAT']),0 ) + 1\n usable_date_sun_list[ float(coupon_id_dict['USABLE_DATE_SUN']) ] = usable_date_sun_list.get( float(coupon_id_dict['USABLE_DATE_SUN']),0 ) + 1\n usable_date_hol_list[ float(coupon_id_dict['USABLE_DATE_HOLIDAY']) ] = usable_date_hol_list.get( float(coupon_id_dict['USABLE_DATE_HOLIDAY']),0 ) + 1\n usable_date_before_hol_list[ float(coupon_id_dict['USABLE_DATE_BEFORE_HOLIDAY']) ] = usable_date_before_hol_list.get( float(coupon_id_dict['USABLE_DATE_BEFORE_HOLIDAY']),0 )+1\n else:\n usable_date_mon_list[3.0] = usable_date_mon_list.get( 3.0,0 ) + 1\n usable_date_tue_list[3.0] = usable_date_tue_list.get( 3.0,0 ) + 1\n usable_date_wed_list[3.0] = usable_date_wed_list.get( 3.0,0 ) + 1\n usable_date_thu_list[3.0] = usable_date_thu_list.get( 3.0,0 ) + 1\n usable_date_fri_list[3.0] = usable_date_fri_list.get( 3.0,0 ) + 1\n usable_date_sat_list[3.0] = usable_date_sat_list.get( 3.0,0 ) + 1\n usable_date_sun_list[3.0] = usable_date_sun_list.get( 3.0,0 ) + 1\n usable_date_hol_list[3.0] = usable_date_hol_list.get( 3.0,0 ) + 1\n usable_date_before_hol_list[3.0] = usable_date_before_hol_list.get( 3.0,0 ) + 1\n\n feat_list.extend([days_since_last_purchase, last_week_purchase, last_fifteendays_purchase, last_thirtydays_purchase, last_sixtydays_purchase, last_nintydays_purchase, last_oneeightydays_purchase, days_since_prev_purchase, prev_week_purchase, prev_fifteendays_purchase, prev_thirtydays_purchase, prev_sixtydays_purchase, prev_nintydays_purchase, prev_oneeightydays_purchase])\n return feat_list, feat_header, [purchase_small_area_name_dict, capsule_text_dict, genre_name_dict, coupon_large_area_name_dict, coupon_small_area_name_dict, coupon_ken_name_dict, price_rate_list, catalog_price_list, discount_price_list, dispperiod_list, valid_period_list, usable_date_mon_list, usable_date_tue_list, usable_date_wed_list, usable_date_thu_list, usable_date_fri_list, usable_date_sat_list, usable_date_sun_list, usable_date_hol_list, usable_date_before_hol_list]", "def _extract_ticket_purchasing_patterns(self):\n list_df_purchase_count = []\n\n for feature in self.purchase_features:\n feature_count = self._get_one_purchase_feature(feature)\n list_df_purchase_count.append(feature_count.drop(['riderID'], axis=1))\n df_purchase_count = pd.concat(list_df_purchase_count, axis=1)\n\n # append the riderID columns\n df_purchase_count.insert(0, 'riderID', feature_count['riderID'])\n\n return df_purchase_count", "def featuretest(self, args):\n db_engine = create_engine(self.root.db_url)\n feature_config = yaml.load(args.feature_config_file)\n\n FeatureGenerator(db_engine, 'features_test').create_features_before_imputation(\n feature_aggregation_config=feature_config,\n feature_dates=[args.as_of_date]\n )\n logging.info('Features created for feature_config %s and date %s', feature_config, args.as_of_date)", "def onehot_features(data):\n\n# Binary Features\n columns = ['Weekend', 'Revenue']\n for col in columns:\n data[col] = data[col].apply(lambda x: float(1) if x else float(0))\n\n columns = ['Month', 'OperatingSystems', 'Browser', 'Region', 'TrafficType',\n 'VisitorType']\n for col in columns:\n enc = OneHotEncoder()\n data_array = enc.fit_transform(data[[col]]).toarray()\n enc_data = pd.DataFrame(data_array)\n enc_data.columns = list(enc.get_feature_names([col]))\n data = data.join(enc_data)\n\n data = data.drop(columns={'Month', 'Month_May', 'OperatingSystems',\n 'OperatingSystems_2', 'Browser', 'Browser_2',\n 'Region', 'Region_1.0', 'TrafficType',\n 'TrafficType_2', 'VisitorType',\n 'VisitorType_Returning_Visitor'})\n return data", "def __feature_set__(self):\r\n import numpy as np\r\n import datetime\r\n import time\r\n cols_norm = [col for col in self.columns]\r\n cols_lower = [col.lower() for col in self.columns]\r\n fields = []\r\n features = []\r\n date_fields = []\r\n _geom_types = {\r\n arcgis.geometry._types.Point : \"esriGeometryPoint\",\r\n arcgis.geometry._types.Polyline : \"esriGeometryPolyline\",\r\n arcgis.geometry._types.MultiPoint : \"esriGeometryMultipoint\",\r\n arcgis.geometry._types.Polygon : \"esriGeometryPolygon\"\r\n }\r\n if self.sr is None:\r\n sr = {'wkid' : 4326}\r\n else:\r\n sr = self.sr\r\n fs = {\r\n \"objectIdFieldName\" : \"\",\r\n \"globalIdFieldName\" : \"\",\r\n \"displayFieldName\" : \"\",\r\n \"geometryType\" : _geom_types[type(self.geometry[self.geometry.first_valid_index()])],\r\n \"spatialReference\" : sr,\r\n \"fields\" : [],\r\n \"features\" : []\r\n }\r\n if 'objectid' in cols_lower:\r\n fs['objectIdFieldName'] = cols_norm[cols_lower.index('objectid')]\r\n fs['displayFieldName'] = cols_norm[cols_lower.index('objectid')]\r\n elif 'fid' in cols_lower:\r\n fs['objectIdFieldName'] = cols_norm[cols_lower.index('fid')]\r\n fs['displayFieldName'] = cols_norm[cols_lower.index('fid')]\r\n elif 'oid' in cols_lower:\r\n fs['objectIdFieldName'] = cols_norm[cols_lower.index('oid')]\r\n fs['displayFieldName'] = cols_norm[cols_lower.index('oid')]\r\n else:\r\n self['OBJECTID'] = list(range(1, self.shape[0] + 1))\r\n res = self.__feature_set__\r\n del self['OBJECTID']\r\n return res\r\n if 'objectIdFieldName' in fs:\r\n fields.append({\r\n \"name\" : fs['objectIdFieldName'],\r\n \"type\" : \"esriFieldTypeOID\",\r\n \"alias\" : fs['objectIdFieldName']\r\n })\r\n cols_norm.pop(cols_norm.index(fs['objectIdFieldName']))\r\n if 'globalIdFieldName' in fs and len(fs['globalIdFieldName']) > 0:\r\n fields.append({\r\n \"name\" : fs['globalIdFieldName'],\r\n \"type\" : \"esriFieldTypeGlobalID\",\r\n \"alias\" : fs['globalIdFieldName']\r\n })\r\n cols_norm.pop(cols_norm.index(fs['globalIdFieldName']))\r\n elif 'globalIdFieldName' in fs and \\\r\n len(fs['globalIdFieldName']) == 0:\r\n del fs['globalIdFieldName']\r\n if self._geometry_column_name in cols_norm:\r\n cols_norm.pop(cols_norm.index(self._geometry_column_name))\r\n for col in cols_norm:\r\n try:\r\n idx = self[col].first_valid_index()\r\n col_val = self[col].loc[idx]\r\n except:\r\n col_val = \"\"\r\n if isinstance(col_val, (str, np.str)):\r\n l = self[col].str.len().max()\r\n if str(l) == 'nan':\r\n l = 255\r\n\r\n fields.append({\r\n \"name\" : col,\r\n \"type\" : \"esriFieldTypeString\",\r\n \"length\" : int(l),\r\n \"alias\" : col\r\n })\r\n if fs['displayFieldName'] == \"\":\r\n fs['displayFieldName'] = col\r\n elif isinstance(col_val, (datetime.datetime,\r\n pd.Timestamp,\r\n np.datetime64,\r\n pd.datetime)):\r\n fields.append({\r\n \"name\" : col,\r\n \"type\" : \"esriFieldTypeDate\",\r\n \"alias\" : col\r\n })\r\n date_fields.append(col)\r\n elif isinstance(col_val, (np.int32, np.int16, np.int8)):\r\n fields.append({\r\n \"name\" : col,\r\n \"type\" : \"esriFieldTypeSmallInteger\",\r\n \"alias\" : col\r\n })\r\n elif isinstance(col_val, (int, np.int, np.int64)):\r\n fields.append({\r\n \"name\" : col,\r\n \"type\" : \"esriFieldTypeInteger\",\r\n \"alias\" : col\r\n })\r\n elif isinstance(col_val, (float, np.float64)):\r\n fields.append({\r\n \"name\" : col,\r\n \"type\" : \"esriFieldTypeDouble\",\r\n \"alias\" : col\r\n })\r\n elif isinstance(col_val, (np.float32)):\r\n fields.append({\r\n \"name\" : col,\r\n \"type\" : \"esriFieldTypeSingle\",\r\n \"alias\" : col\r\n })\r\n fs['fields'] = fields\r\n for row in self.to_dict('records'):\r\n geom = {}\r\n if self._geometry_column_name in row:\r\n geom = row[self._geometry_column_name]\r\n del row[self._geometry_column_name]\r\n for f in date_fields:\r\n try:\r\n row[f] = int(row[f].to_pydatetime().timestamp() * 1000)\r\n except:\r\n row[f] = None\r\n features.append(\r\n {\r\n \"geometry\" : dict(geom),\r\n \"attributes\" : row\r\n }\r\n )\r\n del row\r\n del geom\r\n fs['features'] = features\r\n return fs", "def preprocess(self):\n\n print('[ INFO ]: Preprocessing forest fires data...')\n\n # Rename headers of data frame\n forestfires_data = pd.read_csv(self.forestfires_path, header=0)\n forestfires_data.columns = [\n 'x_axis','y_axis','month','day','ffmc','dmc','dc','isi','temp','rh',\n 'wind','rain','area'\n ]\n categorical_features = [\n 'month','day'\n ]\n predictor = 'area'\n\n df = alg.one_hot_encode(self, forestfires_data, categorical_features)\n\n features = [df.columns[j] for j in range(len(df.columns)) if df.columns[j] != predictor]\n\n return df, features, predictor", "def addDummyFeatures(inputDf, feature):\n\n\n ## TODO ##\n if feature not in inputDf.columns:\n return('Feature not in dataset')\n rows,columns = inputDf.shape\n feature_List = []\n OHE_Matrix = np.array([[]]) #Create a matrix to store the OHE values\n for i in range(rows):\n if pd.isna(inputDf.loc[i,feature]):\n OHE_Matrix = np.concatenate((OHE_Matrix,np.zeros((1,len(feature_List)))),axis=0) #If missing data, create a new row of zeros\n elif str(inputDf.loc[i,feature]) not in feature_List:\n feature_List.append(str(inputDf.loc[i,feature]))\n OHE_Matrix = np.concatenate((OHE_Matrix,np.zeros((i+1,1))),axis=1)#if there is a new feature, create a new column of zeros\n if str(inputDf.loc[i,feature]) in feature_List:\n OHE_Matrix = np.concatenate((OHE_Matrix,np.zeros((1,len(feature_List)))),axis=0)#if this it is alreay in feature list , create a new row of zeros and set the feature related column to 1\n OHE_Matrix[i,feature_List.index(str(inputDf.loc[i,feature]))]=1\n for i in range(len(feature_List)):\n feature_List[i] = feature + '_'+feature_List[i]#New column names for OHE\n\n OHE_Matrix = np.delete(OHE_Matrix,rows,0)#Delete the extra row created\n\n dataOut= pd.DataFrame(OHE_Matrix,columns=feature_List) #Create a dataframe with OHE as matrix and the new feature list\n outDf = pd.concat([inputDf,dataOut],axis=1)#Concate new features to original matrix\n outDf = outDf.drop(feature,axis=1)#drop the original feature\n return outDf", "def addDummyFeatures(inputDf, feature):\n\n\n ## TODO ##\n if feature not in inputDf.columns:\n return('Feature not in dataset')\n rows,columns = inputDf.shape\n feature_List = []\n OHE_Matrix = np.array([[]]) #Create a matrix to store the OHE values\n for i in range(rows):\n if pd.isna(inputDf.loc[i,feature]):\n OHE_Matrix = np.concatenate((OHE_Matrix,np.zeros((1,len(feature_List)))),axis=0) #If missing data, create a new row of zeros\n elif str(inputDf.loc[i,feature]) not in feature_List:\n feature_List.append(str(inputDf.loc[i,feature]))\n OHE_Matrix = np.concatenate((OHE_Matrix,np.zeros((i+1,1))),axis=1)#if there is a new feature, create a new column of zeros\n if str(inputDf.loc[i,feature]) in feature_List:\n OHE_Matrix = np.concatenate((OHE_Matrix,np.zeros((1,len(feature_List)))),axis=0)#if this it is alreay in feature list , create a new row of zeros and set the feature related column to 1\n OHE_Matrix[i,feature_List.index(str(inputDf.loc[i,feature]))]=1\n for i in range(len(feature_List)):\n feature_List[i] = feature + '_'+feature_List[i]#New column names for OHE\n\n OHE_Matrix = np.delete(OHE_Matrix,rows,0)#Delete the extra row created\n\n dataOut= pd.DataFrame(OHE_Matrix,columns=feature_List) #Create a dataframe with OHE as matrix and the new feature list\n outDf = pd.concat([inputDf,dataOut],axis=1)#Concate new features to original matrix\n outDf = outDf.drop(feature,axis=1)#drop the original feature\n return outDf", "def create_feats_and_preds(price_df, feat_days, pred_days):\n\n # create shifted percent features\n df_feats = price_n_days_out(price_df, days=feat_days)\n df_help = df_feats.copy()[['ticker', 'prediction_date', f'price_{feat_days}_out']]\n df_help.columns = ['ticker', 'date', 'close']\n df_preds = price_n_days_out(df_help, days=pred_days)\n\n # do some cleaning\n full_df = pd.merge(df_feats, df_preds, left_on=['ticker', 'prediction_date'], right_on=['ticker', 'date'])\n full_df.columns = ['ticker', 'past_date', 'past_close', 'current_date', 'current_price',\n 'percent_change_feat', 'date_y', 'close_y', 'prediction_date',\n 'price_5_out_y', 'percent_change_pred']\n full_df = full_df[['ticker', 'past_date', 'current_date', 'prediction_date',\n 'percent_change_feat', 'percent_change_pred']]\n return full_df", "def make_dataframe(self):\n logging.info('*** Creating the dataframes from the source files ' )\n \n for k in self.datasets_keys:\n #for k in ['igra2' , 'ncar']:\n \n logging.info('*** Creating the dataframe for the dataset: %s ' , k ) \n \n p_levels = self.data[k]['df']['observations_table']['z_coordinate'][:]\n logging.debug(' Loaded the z_coordinate')\n \n z_type = self.data[k]['df']['observations_table']['z_coordinate_type'][:]\n logging.debug(' Loaded the z_coordinate_type')\n \n obs_variable = self.data[k]['df']['observations_table']['observed_variable'][:]\n logging.debug(' Loaded the observed_variable')\n \n obs_values = self.data[k]['df']['observations_table']['observation_value'][:]\n logging.debug(' Loaded the observation_value')\n \n observation_id = self.data[k]['df']['observations_table']['observation_id'][:]\n logging.debug(' Loaded the observation_id')\n \n units = self.data[k]['df']['observations_table']['units'][:].astype(int)\n logging.debug(' Loaded the units') \n \n report_id = self.data[k]['df']['observations_table']['report_id'][:] \n logging.debug(' Loaded the report_id')\n \n date_time = self.data[k]['df']['observations_table']['date_time'][:]\n logging.debug(' Loaded the date_time (deltas)')\n \n lat , lon = self.data[k]['df']['observations_table']['latitude'][:] , self.data[k]['df']['observations_table']['longitude'][:]\n logging.debug(' Loaded the lat,lon ')\n \n \n self.obs_table_columns = list(self.data[k]['df']['observations_table'].keys() )\n \n self.data[k]['df'].close()\n \n \"\"\" Creating a dataframe \"\"\"\n columns = ['date_time', 'z_coordinate' , 'z_coordinate_type', 'observed_variable' , 'observation_value' , 'report_id' , 'observation_id' , 'latitude' , 'longitude', 'units']\n logging.info(' Loaded the data, creating dataframe ')\n \n df = pd.DataFrame( list(zip( date_time, p_levels, z_type, obs_variable , obs_values, report_id, observation_id , lat , lon, units ) ) , columns = columns ) \n \n \n \"\"\" Storing the dataframe \"\"\" ### try using xarrays ??? \n logging.debug('Storing the DF ' ) \n self.data[k]['dataframe'] = df\n \n logging.debug(' PD dataframe created !!! ')", "def _feature_country_process(self):\n if 'Country' not in self._df_invoice_line.columns:\n return\n\n list_countries_keep = ['United Kingdom']\n rows_before = self._df_invoice_line.shape[0]\n \n df_invoice_line_new = pd.DataFrame()\n for country in list_countries_keep : \n df_invoice_line_new = df_invoice_line_new.append(\\\n self._df_invoice_line[self._df_invoice_line['Country']==country]\\\n , ignore_index=True)\n\n self.df_invoice_line = df_invoice_line_new\n del(df_invoice_line_new)\n \n rows_after = self._df_invoice_line.shape[0] \n _print_stat_rows(\"Countries filtering : \",rows_before, rows_after)\n\n \n #-------------------------------------------------------------------------\n # Due to the fact only one country is used, then this feature is dropped\n #-------------------------------------------------------------------------\n list_col_to_keep = [col for col in self._df_invoice_line.columns \\\n if col not in 'Country']\n \n self._df_invoice_line = self._df_invoice_line[list_col_to_keep] \n\n return", "def create_customer_df_invoice_line(self, customerID, list_stockCode\\\n , list_quantity, invoiceDate):\n \n dict_invoice = dict()\n\n dict_invoice['Quantity'] = list_quantity\n dict_invoice['StockCode'] = list_stockCode\n\n #------------------------------------------------------------------------\n # Build invoiceDate from local current time\n #------------------------------------------------------------------------\n if invoiceDate is None:\n time_struct = time.localtime()\n invoiceDate = str(time_struct.tm_year)+'-'+str(time_struct.tm_mon)\\\n +'-'+str(time_struct.tm_mday)\n invoiceDate +=' '\n invoiceDate +=str(time_struct.tm_hour)+':'+str(time_struct.tm_min)\\\n +':'+str(time_struct.tm_sec)\n invoiceDate = pd.Timestamp(invoiceDate)\n else:\n pass\n\n\n #------------------------------------------------------------------------\n # Lists initialization\n #------------------------------------------------------------------------\n list_customerID = list()\n list_invoiceNo = list()\n list_invoiceDate = list()\n list_invoice_line_index = list()\n \n #------------------------------------------------------------------------\n # Increase Invoice number\n #------------------------------------------------------------------------\n invoiceNo = max(self._df_invoice_original.InvoiceNo)\n invoiceNo += 1\n\n #------------------------------------------------------------------------\n # Get latest invoice line index value\n #------------------------------------------------------------------------\n invoice_line_index = max(self._df_invoice_original.index)\n\n #------------------------------------------------------------------------\n # Build lists for CustomerID, InvoiceNo, InvoiceDate\n # A list of incremented indexes is built for new rows.\n #------------------------------------------------------------------------\n for quantity in list_quantity:\n list_customerID.append(customerID)\n list_invoiceNo.append(invoiceNo)\n list_invoiceDate.append(invoiceDate)\n invoice_line_index += 1\n list_invoice_line_index.append(invoice_line_index) \n\n \n dict_invoice['CustomerID'] = list_customerID\n dict_invoice['InvoiceNo'] = list_invoiceNo\n dict_invoice['InvoiceDate'] = list_invoiceDate\n\n #------------------------------------------------------------------------\n # Get description list from list of stock codes.\n #------------------------------------------------------------------------\n list_description = self.getDescriptionList(list_stockCode)\n \n dict_invoice['Description'] = list_description\n\n #------------------------------------------------------------------------\n # Get unit price list from list of stock codes.\n #------------------------------------------------------------------------\n list_unitPrice = self.getUnitPriceList(list_stockCode)\n \n dict_invoice['UnitPrice'] = list_unitPrice\n\n #------------------------------------------------------------------------\n # Dataframe with new invoices lines is created.\n #------------------------------------------------------------------------\n df_invoice_line \\\n = pd.DataFrame(dict_invoice, columns=dict_invoice.keys()\\\n , index=list_invoice_line_index)\n \n return df_invoice_line", "def prepare_lv1_data(feature_choice, file_name):\n\n\tif feature_choice == \"xgb_bin\":\n\n\t\t# Get data\n\t\tdf = pd.read_csv(\"./Data/Raw/%s.csv\" % file_name)\n\t\tif file_name == \"test\":\n\t\t\tdf[\"Response\"]=-1\n\t\t# Get Id and response\n\t\tId = df[\"Id\"].values\n\t\ty = df[\"Response\"].values\n\t\t# Drop Id and Response\n\t\tdf = df.drop([\"Id\", \"Response\"], 1)\n\t\t# Deal with missing values\n\t\tprint \"Dealing with NaN\"\n\t\tdf[\"NULL\"] = df.isnull().sum(axis=1)\n\t\tdf = df.fillna(-1)\n\t\t#Get tsne data\n\t\tprint \"Getting tsne data\"\n\t\tdf_tsne_full = pd.read_csv(\"./Data/Raw/tsne_full_%s.csv\" % file_name, usecols = [\"V1\", \"V2\"])\n\t\tdf[[\"V1_full\", \"V2_full\"]] = df_tsne_full[[\"V1\", \"V2\"]]\n\t\tdf_tsne_binary = pd.read_csv(\"./Data/Raw/tsne_binary_%s.csv\" % file_name, usecols = [\"V1\", \"V2\"])\n\t\tdf[[\"V1_binary\", \"V2_binary\"]] = df_tsne_binary[[\"V1\", \"V2\"]]\n\t\tdf_tsne_distance = pd.read_csv(\"./Data/Raw/tsne_distance_%s.csv\" % file_name, usecols = [\"V1\", \"V2\"])\n\t\tdf[[\"V1_distance\", \"V2_distance\"]] = df_tsne_distance[[\"V1\", \"V2\"]]\n\n\t\tprint \"Comparison features\"\n\t\tdf[\"COMP_IH4_IH7\"] = df[\"Insurance_History_4\"].values == df[\"Insurance_History_7\"].values\n\t\tdf[\"COMP_IH4_IH3\"] = np.abs(df[\"Insurance_History_4\"].values - df[\"Insurance_History_3\"].values)\n\t\tdf[\"COMP_IH9_IH7\"] = np.abs(df[\"Insurance_History_9\"].values - df[\"Insurance_History_7\"].values)\n\t\tdf[\"COMP_MH6_MK48\"] = np.abs(df[\"Medical_History_6\"].values - df[\"Medical_Keyword_48\"].values)\n\t\tdf[\"COMP_MH33_MK23\"] = np.abs(df[\"Medical_History_33\"].values - df[\"Medical_Keyword_23\"].values)\n\t\tdf[\"COMP_MH37_MK11\"] = np.abs(df[\"Medical_History_37\"].values - df[\"Medical_Keyword_11\"].values)\n\t\tdf[\"COMP_MH25_MH26\"] = np.abs(df[\"Medical_History_25\"].values - df[\"Medical_History_26\"].values)\n\t\t\n\t\t# factorize categorical variables\n\t\tdf['Product_Info_2_char'] = df.Product_Info_2.str[0]\n\t\tdf['Product_Info_2_num'] = df.Product_Info_2.str[1]\n\t\tdf['Product_Info_2'] = pd.factorize(df['Product_Info_2'])[0]\n\t\tdf['Product_Info_2_char'] = pd.factorize(df['Product_Info_2_char'])[0]\n\t\tdf['Product_Info_2_num'] = pd.factorize(df['Product_Info_2_num'])[0]\n\n\t\t# Shuffle data\n\t\tpermut = np.random.choice(len(df), len(df), replace = False)\n\t\tdf = df.iloc[permut,:]\n\t\tX = df.values\n\t\ty = y[permut]\n\t\tId = Id[permut]\n\n\t\treturn X,y,Id\n\n\telif feature_choice == \"knn\" or feature_choice == \"cosine\":\n\n\t\t# Get data\n\t\tdf = pd.read_csv(\"./Data/Raw/%s.csv\" % file_name)\n\t\tif file_name == \"test\":\n\t\t\tdf[\"Response\"]=-1\n\t\t# Save then drop Id and y\n\t\tId = df[\"Id\"].values\n\t\ty = df[\"Response\"].values\n\t\tdf = df.drop([\"Id\", \"Response\"], 1)\n\t\t# Deal with columns with missing values\n\t\tdf = df.fillna(-1)\n\t\t# Encode categorical\t\t\n\t\tdf['Product_Info_2_char'] = df.Product_Info_2.str[0]\n\t\tdf['Product_Info_2_num'] = df.Product_Info_2.str[1]\n\t\tdf['Product_Info_2'] = pd.factorize(df['Product_Info_2'])[0]\n\t\tdf['Product_Info_2_char'] = pd.factorize(df['Product_Info_2_char'])[0]\n\t\tdf['Product_Info_2_num'] = pd.factorize(df['Product_Info_2_num'])[0]\n\t\t\n\t\tdf['BMI_Age'] = df['BMI'] * df['Ins_Age']\n\t\tmed_keyword_columns = df.columns[df.columns.str.startswith('Medical_Keyword_')]\n\t\tdf['Med_Keywords_Count'] = df[med_keyword_columns].sum(axis=1)\n\t\t# Shuffle data\n\t\tpermut = np.random.choice(len(df), len(df), replace = False)\n\t\tdf = df.iloc[permut,:]\n\t\tX = df.values\n\t\ty = y[permut]\n\t\tId = Id[permut]\n\t\t# # Standardize\n\t\tX = StandardScaler().fit_transform(X)\n\n\t\treturn X,y,Id\n\n\telif feature_choice in [\"linreg\", \"logistic\", \"keras_reg1\"]:\n\n\t\tprint \"Preprocessing\"\n\t\t# Get data\n\t\tdf = pd.read_csv(\"./Data/Raw/%s.csv\" % file_name)\n\t\tif file_name == \"test\":\n\t\t\tdf[\"Response\"]=-1\n\t\t# Get Id and response\n\t\tId = df[\"Id\"].values\n\t\ty = df[\"Response\"].values\n\t\t# Drop Id and Response\n\t\tdf = df.drop([\"Id\", \"Response\"], 1)\n\t\t# Deal with missing values\n\t\tprint \"Dealing with NaN\"\n\t\tdf[\"NULLCOUNT\"] = df.isnull().sum(axis=1)\n\t\tdf = df.fillna(df.median())\n\t\t#Get tsne data\n\t\tprint \"Getting tsne data\"\n\t\tdf_tsne_full = pd.read_csv(\"./Data/Raw/tsne_full_%s.csv\" % file_name, usecols = [\"V1\", \"V2\"])\n\t\tdf[[\"V1_full\", \"V2_full\"]] = df_tsne_full[[\"V1\", \"V2\"]]\n\t\tdf_tsne_binary = pd.read_csv(\"./Data/Raw/tsne_binary_%s.csv\" % file_name, usecols = [\"V1\", \"V2\"])\n\t\tdf[[\"V1_binary\", \"V2_binary\"]] = df_tsne_binary[[\"V1\", \"V2\"]]\n\t\tdf_tsne_ternary = pd.read_csv(\"./Data/Raw/tsne_ternary_%s.csv\" % file_name, usecols = [\"V1\", \"V2\"])\n\t\tdf[[\"V1_ternary\", \"V2_ternary\"]] = df_tsne_ternary[[\"V1\", \"V2\"]]\n\t\tdf_tsne_distance = pd.read_csv(\"./Data/Raw/tsne_distance_%s.csv\" % file_name, usecols = [\"V1\", \"V2\"])\n\t\tdf[[\"V1_distance\", \"V2_distance\"]] = df_tsne_distance[[\"V1\", \"V2\"]]\n\t\tdf_tsne_cosine = pd.read_csv(\"./Data/Raw/tsne_cosine_%s.csv\" % file_name, usecols = [\"V1\", \"V2\"])\n\t\tdf[[\"V1_cosine\", \"V2_cosine\"]] = df_tsne_cosine[[\"V1\", \"V2\"]]\n\n\t\t# Get correlation distance data\n\t\tprint \"Getting correlation data\"\n\t\tdf_distance = pd.read_csv(\"./Data/Raw/%s_distance_correlation.csv\" % file_name)\n\t\tlist_col_corr = [col for col in df_distance.columns.values if col != \"Id\" and col !=\"Response\"]\n\t\tdf[list_col_corr] = df_distance[list_col_corr]\n\n\t\t# Add custom features\n\t\tprint \"Feature engineering\"\n\t\tdf[\"SUMKEYWORD\"] = np.zeros(len(df))\n\t\tdf[\"SUMINSURED\"] = np.zeros(len(df))\n\t\tfor col in df.columns.values :\n\t\t\tif \"Key\" in col :\n\t\t\t\tdf[\"SUMKEYWORD\"]+=df[col]\n\t\t\tif \"Insured\" in col :\n\t\t\t\tdf[\"SUMINSURED\"]+=df[col]\n\n\t\tdf[\"CINSINF\"] = np.zeros(len(df))\n\t\tdf[\"CINSINFMAX\"] = np.zeros(len(df))\n\t\tfor i in range(1,8):\n\t\t\tcol = \"InsuredInfo_\" + str(i)\n\t\t\tmin_val = df[col].value_counts().idxmin()\n\t\t\tmax_val = df[col].value_counts().idxmax()\n\t\t\tdf[\"CINSINF\"] += (df[col]==min_val).apply(lambda x : 1 if x else 0)\n\t\t\tdf[\"CINSINFMAX\"] += (df[col]==max_val).apply(lambda x : 1 if x else 0)\n\n\t\tdf[\"CINSHIST\"] = np.zeros(len(df))\n\t\tdf[\"CINSHISTMAX\"] = np.zeros(len(df))\n\t\tfor i in range(1,10):\n\t\t\tif i !=6:\n\t\t\t\tcol = \"Insurance_History_\" + str(i)\n\t\t\t\tmin_val = df[col].value_counts().idxmin()\n\t\t\t\tmax_val = df[col].value_counts().idxmax()\n\t\t\t\tdf[\"CINSHIST\"] += (df[col]==min_val).apply(lambda x : 1 if x else 0)\n\t\t\t\tdf[\"CINSHISTMAX\"] += (df[col]==max_val).apply(lambda x : 1 if x else 0)\n\n\t\tdf[\"CMEDKEY\"] = np.zeros(len(df))\n\t\tdf[\"CMEDKEYMAX\"] = np.zeros(len(df))\n\t\tfor i in range(1,49):\n\t\t\tcol = \"Medical_Keyword_\" + str(i)\n\t\t\tmin_val = df[col].value_counts().idxmin()\n\t\t\tmax_val = df[col].value_counts().idxmax()\n\t\t\tdf[\"CMEDKEY\"] += (df[col]==min_val).apply(lambda x : 1 if x else 0)\n\t\t\tdf[\"CMEDKEYMAX\"] += (df[col]==max_val).apply(lambda x : 1 if x else 0)\n\n\t\tdf[\"CMEDHIST\"] = np.zeros(len(df))\n\t\tdf[\"CMEDHISTMAX\"] = np.zeros(len(df))\n\t\tfor i in range(1,42):\n\t\t\tif i not in [1,2,10,15,24]:\n\t\t\t\tcol = \"Medical_History_\" + str(i)\n\t\t\t\tmin_val = df[col].value_counts().idxmin()\n\t\t\t\tmax_val = df[col].value_counts().idxmax()\n\t\t\t\tdf[\"CMEDHIST\"] += (df[col]==min_val).apply(lambda x : 1 if x else 0)\n\t\t\t\tdf[\"CMEDHISTMAX\"] += (df[col]==max_val).apply(lambda x : 1 if x else 0)\n\n\t\tdf[\"CPRODINFO\"] = np.zeros(len(df))\n\t\tdf[\"CPRODINFOMAX\"] = np.zeros(len(df))\n\t\tfor i in range(1,8):\n\t\t\tif i not in [2,4]:\n\t\t\t\tcol = \"Product_Info_\" + str(i)\n\t\t\t\tmin_val = df[col].value_counts().idxmin()\n\t\t\t\tmax_val = df[col].value_counts().idxmax()\n\t\t\t\tdf[\"CPRODINFO\"] += (df[col]==min_val).apply(lambda x : 1 if x else 0)\n\t\t\t\tdf[\"CPRODINFOMAX\"] += (df[col]==max_val).apply(lambda x : 1 if x else 0)\n\n\t\tdf[\"CEMPINFO\"] = np.zeros(len(df))\n\t\tdf[\"CEMPINFOMAX\"] = np.zeros(len(df))\n\t\tfor i in range(2,6):\n\t\t\tcol = \"Employment_Info_\" + str(i)\n\t\t\tmin_val = df[col].value_counts().idxmin()\n\t\t\tmax_val = df[col].value_counts().idxmax()\n\t\t\tdf[\"CEMPINFO\"] += (df[col]==min_val).apply(lambda x : 1 if x else 0)\n\t\t\tdf[\"CEMPINFOMAX\"] += (df[col]==max_val).apply(lambda x : 1 if x else 0)\n\n\t\tprint \"Comparison features\"\n\t\tdf[\"COMP_IH4_IH7\"] = df[\"Insurance_History_4\"].values == df[\"Insurance_History_7\"].values\n\t\tdf[\"COMP_IH4_IH3\"] = np.abs(df[\"Insurance_History_4\"].values - df[\"Insurance_History_3\"].values)\n\t\tdf[\"COMP_IH9_IH7\"] = np.abs(df[\"Insurance_History_9\"].values - df[\"Insurance_History_7\"].values)\n\t\tdf[\"COMP_MH6_MK48\"] = np.abs(df[\"Medical_History_6\"].values - df[\"Medical_Keyword_48\"].values)\n\t\tdf[\"COMP_MH33_MK23\"] = np.abs(df[\"Medical_History_33\"].values - df[\"Medical_Keyword_23\"].values)\n\t\tdf[\"COMP_MH37_MK11\"] = np.abs(df[\"Medical_History_37\"].values - df[\"Medical_Keyword_11\"].values)\n\t\tdf[\"COMP_MH25_MH26\"] = np.abs(df[\"Medical_History_25\"].values - df[\"Medical_History_26\"].values)\n\t\t\n\t\t# factorize categorical variables\n\t\tdf['Product_Info_2_char'] = df.Product_Info_2.str[0]\n\t\tdf['Product_Info_2_num'] = df.Product_Info_2.str[1]\n\t\tdf['Product_Info_2'] = pd.factorize(df['Product_Info_2'])[0]\n\t\tdf['Product_Info_2_char'] = pd.factorize(df['Product_Info_2_char'])[0]\n\t\tdf['Product_Info_2_num'] = pd.factorize(df['Product_Info_2_num'])[0]\n\n\t\t# Custom variables\n\t\tprint \"Kaggle features\"\n\t\tdf['custom_var_1'] = df['Medical_History_15'] < 10\n\t\tdf['custom_var_3'] = df['Product_Info_4'] < 0.075\n\t\tdf['custom_var_4'] = df['Product_Info_4'] == 1\n\t\tdf['custom_var_6'] = (df['BMI'] + 1)**2\n\t\tdf['custom_var_7'] = df['BMI']**0.8\n\t\tdf['custom_var_8'] = df['Ins_Age']**8.5\n\t\tdf['BMI_Age'] = (df['BMI'] * df['Ins_Age'])**2.5\n\t\tdf['custom_var_10'] = df['BMI'] > np.percentile(df['BMI'], 0.8)\n\t\tdf['custom_var_11'] = (df['BMI'] * df['Product_Info_4'])**0.9\n\t\tage_BMI_cutoff = np.percentile(df['BMI'] * df['Ins_Age'], 0.9)\n\t\tdf['custom_var_12'] = (df['BMI'] * df['Ins_Age']) > age_BMI_cutoff\n\t\tdf['custom_var_13'] = (df['BMI'] * df['Medical_Keyword_3'] + 0.5)**3\n\n\t\t# Shuffle data\n\t\tpermut = np.random.choice(len(df), len(df), replace = False)\n\t\tdf = df.iloc[permut,:]\n\t\tX = df.values\n\t\ty = y[permut]\n\t\tId = Id[permut]\n\n\t\tprint \"Standardizing\"\n\t\tX = StandardScaler().fit_transform(X)\n\n\t\treturn X,y,Id\n\n\telif feature_choice == \"xgb_reg\":\n\n\t\tprint \"Preprocessing\"\n\t\t# Get data\n\t\tdf = pd.read_csv(\"./Data/Raw/%s.csv\" % file_name)\n\t\tif file_name == \"test\":\n\t\t\tdf[\"Response\"]=-1\n\t\t# Get Id and response\n\t\tId = df[\"Id\"].values\n\t\ty = df[\"Response\"].values\n\t\t# Drop Id and Response\n\t\tdf = df.drop([\"Id\", \"Response\"], 1)\n\t\t# Deal with missing values\n\t\tprint \"Dealing with NaN\"\n\t\tdf[\"NULLCOUNT\"] = df.isnull().sum(axis=1)\n\t\t#Get tsne data\n\t\t\n\t\t# factorize categorical variables\n\t\tdf['Product_Info_2_char'] = df.Product_Info_2.str[0]\n\t\tdf['Product_Info_2_num'] = df.Product_Info_2.str[1]\n\t\tdf['Product_Info_2'] = pd.factorize(df['Product_Info_2'])[0]\n\t\tdf['Product_Info_2_char'] = pd.factorize(df['Product_Info_2_char'])[0]\n\t\tdf['Product_Info_2_num'] = pd.factorize(df['Product_Info_2_num'])[0]\n\n\t\t# Custom variables\n\t\tprint \"Kaggle features\"\n\t\tdf['custom_var_1'] = df['Medical_History_15'] < 10\n\t\tdf['custom_var_3'] = df['Product_Info_4'] < 0.075\n\t\tdf['custom_var_4'] = df['Product_Info_4'] == 1\n\t\tdf['custom_var_6'] = (df['BMI'] + 1)**2\n\t\tdf['custom_var_7'] = df['BMI']**0.8\n\t\tdf['custom_var_8'] = df['Ins_Age']**8.5\n\t\tdf['BMI_Age'] = (df['BMI'] * df['Ins_Age'])**2.5\n\t\tdf['custom_var_10'] = df['BMI'] > np.percentile(df['BMI'], 0.8)\n\t\tdf['custom_var_11'] = (df['BMI'] * df['Product_Info_4'])**0.9\n\t\tage_BMI_cutoff = np.percentile(df['BMI'] * df['Ins_Age'], 0.9)\n\t\tdf['custom_var_12'] = (df['BMI'] * df['Ins_Age']) > age_BMI_cutoff\n\t\tdf['custom_var_13'] = (df['BMI'] * df['Medical_Keyword_3'] + 0.5)**3\n\n\t\t# Shuffle data\n\t\tpermut = np.random.choice(len(df), len(df), replace = False)\n\t\tdf = df.iloc[permut,:]\n\t\tX = df.values\n\t\ty = y[permut]\n\t\tId = Id[permut]\n\n\t\treturn X,y,Id", "def extractFeatures(sample, features):\n sample = pd.merge(sample, features, on=['Store', 'Date'])\n\n # Extract features from the Date\n sample['Date'] = pd.to_datetime(sample['Date'])\n sample['WeekOfYear'] = sample['Date'].dt.weekofyear\n sample['Year'] = sample['Date'].dt.year\n return sample", "def __init__(self):\n # File settings and locations.\n self.DATA_DIR = 'data'\n self.DATA_COL_DIR = 'data_collated'\n\n self.FIRE_DATABASE = 'FPA_FOD_20170508.sqlite'\n self.CLIMATE_DATA = 'GlobalLandTemperaturesByCity.csv'\n self.STOCK_DATA = 'historical_stock_prices.csv'\n self.COMBINED_DATA = 'combined_data.db'\n\n self.MODEL_PATH = 'models/dnn_wildfires.ckpt'\n\n # Setting to use reduced data for prototyping purposes.\n self.prototyping = False\n self.sample_size = 80000\n\n # Start date of data\n self.start = pd.to_datetime('1992-01-01')\n\n # Stocks in stock data to keep for analysis.\n self.stocks = ['MSFT', 'AAPL', 'GE', 'JNJ', 'JPM', 'PG']\n\n # Settings for validation and test set partitioning.\n self.val_set_ratio = 0.15\n self.test_set_ratio = 0.15\n\n # Separation of features for pipeline preparation \n self.cat_attribs = ['STATE', 'FIRE_SIZE_CLASS', 'OWNER_CODE', 'City']\n self.num_attribs = ['FIRE_YEAR', 'LATITUDE', 'LONGITUDE', 'FIRE_SIZE', \n 'FIRE_LENGTH', 'DIST_TO_MAJOR_CITY', 'AverageTemperature',\n 'AverageTemperatureUncertainty', 'AAPL', 'GE', 'JNJ', \n 'JPM', 'MSFT', 'PG']\n self.cycle_cols = ['DISC_MONTH', 'DISC_DAY_OF_WEEK', 'DISCOVERY_TIME', \n 'DISCOVERY_DOY', 'CONT_MONTH', 'CONT_DAY_OF_WEEK',\n 'CONT_TIME']\n\n # Define the ranges of the cycles in cycle_cols and whether any offset for\n # zero-indexing is needed (i.e., 'DISC_MONTH' cycles over a 12 month period\n # and the months need an offset of one to start the indicies at 0 for Jan.).\n self.cycle_ranges = [12, 7, 2400, 365, 12, 7, 2400]\n self.cycle_offsets = [1, 0, 0, 1, 1, 0, 0]\n\n # Parameters for deep learning model determined from randomized \n # hyperparameter search.\n self.n_hidden_layers = 4\n self.n_neurons = 200\n self.batch_size = 500\n self.batch_norm_momentum = 0.999\n self.dropout_rate = 0.4\n self.learning_rate = 0.01\n self.activation = tf.nn.elu\n\n # Hyperparameter settings .\n self.hp_search = False", "def add_features(df_in, rolling_win_size=15):\n cols =['Turbine_ID', 'Date', 'TTF', '60_days', 'Component']\n other_cols = []\n for i in df_in.columns:\n if i not in cols:\n other_cols.append(i)\n all_cols = cols + other_cols\n\n df_in = df_in[all_cols]\n\n sensor_cols = []\n for i in df_in.columns[5:]:\n sensor_cols.append(i)\n\n sensor_av_cols = [nm+'_av' for nm in sensor_cols]\n sensor_sd_cols = [nm+'_sd' for nm in sensor_cols]\n\n df_out = pd.DataFrame()\n\n ws = rolling_win_size\n\n #calculate rolling stats for each engine id\n\n for m_id in pd.unique(df_in.Turbine_ID):\n\n # get a subset for each engine sensors\n df_engine = df_in[df_in['Turbine_ID'] == m_id]\n df_sub = df_engine[sensor_cols]\n\n # get rolling mean for the subset\n av = df_sub.rolling(ws, min_periods=1).mean()\n av.columns = sensor_av_cols\n\n # get the rolling standard deviation for the subset\n sd = df_sub.rolling(ws, min_periods=1).std().fillna(0)\n sd.columns = sensor_sd_cols\n\n # combine the two new subset dataframes columns to the engine subset\n new_ftrs = pd.concat([df_engine,av,sd], axis=1)\n\n # add the new features rows to the output dataframe\n df_out = pd.concat([df_out,new_ftrs])\n df_out = df_out.sort_values(by=['Turbine_ID', 'Date'] )\n return df_out", "def Export_in_files(COVID_data, COVID_data_reconstructed):\r\n F_data_file = open(Datafiles_directory + '\\\\OWID COVID data %s formatted.csv' % (date.today().isoformat()), 'w')\r\n FR_data_file = open(Datafiles_directory + '\\\\OWID COVID data %s formatted reconstructed.csv' % (date.today().isoformat()), 'w')\r\n \r\n COVID_data_lists = [COVID_data, COVID_data_reconstructed]\r\n Data_file_list = [F_data_file, FR_data_file]\r\n Countries_list = list(COVID_data.keys())[1:]\r\n \r\n for Data_set_inc in range(2): # Each data list (raw and reconstructed) is written in its corresponding file\r\n COVID_data_temp = COVID_data_lists[Data_set_inc]\r\n Data_file_temp = Data_file_list[Data_set_inc]\r\n \r\n Data_file_temp.write('Country;Date;' + ';'.join(COVID_data_temp['_Country']['Date']) + '\\n')\r\n \r\n for Country in Countries_list:\r\n COVID_data_single_country = COVID_data_temp[Country]\r\n \r\n Date_list = list(COVID_data[Country].keys())\r\n for Date in Date_list:\r\n COVID_data_single_country_single_date = COVID_data_single_country[Date]\r\n Row_reformatted = ['' if Item == None else str(Item).replace('.', ',') for Item in COVID_data_single_country_single_date] # None elements are replaced by empty strings because an empty cell is better to see that there is no data in excel rather than None\r\n \r\n Data_file_temp.write('%s;%s;' % (Country, Date))\r\n Data_file_temp.write(';'.join(str(Item) for Item in Row_reformatted))\r\n Data_file_temp.write('\\n')\r\n \r\n Data_file_temp.close()", "def create_feature_csv(faces, objects, rgb, hsv, gray):\n faces_columns = ['faces_zs']\n objects_columns = ['comp_1', 'comp_2', 'comp_3', 'comp_4', 'comp_5', 'comp_6', 'comp_7', 'comp_8', 'comp_9',\n 'comp_10', 'comp_11', 'comp_12', 'comp_13', 'comp_14', 'comp_15', 'comp_16', 'comp_17', 'comp_18',\n 'comp_19', 'comp_20', 'comp_21', 'comp_22', 'comp_23', 'comp_24', 'comp_25']\n rgb_columns = ['max_b_zs', 'max_g_zs', 'max_r_zs', 'med_b_zs', 'med_g_zs', 'med_r_zs', 'std_b', 'std_g', 'std_r']\n hsv_columns = ['hmax1_zs', 'hmax2_zs', 'hmax3_zs', 'smax1_zs', 'smax2_zs', 'smax3_zs', 'smed_zs', 'sstd',\n 'vmax1_zs', 'vmax2_zs', 'vmax3_zs', 'vmed_zs', 'vstd']\n gray_columns = ['gray_max_zs', 'gray_med_zs', 'gray_std_zs']\n\n with open(feature_file, 'r', encoding='utf-8') as infile:\n new_csv = pd.read_csv(infile, sep=',')\n\n if not faces:\n for col in faces_columns:\n new_csv = new_csv.drop(col, 1)\n\n if not objects:\n for col in objects_columns:\n new_csv = new_csv.drop(col, 1)\n\n if not rgb:\n for col in rgb_columns:\n new_csv = new_csv.drop(col, 1)\n\n if not hsv:\n for col in hsv_columns:\n new_csv = new_csv.drop(col, 1)\n\n if not gray:\n for col in gray_columns:\n new_csv = new_csv.drop(col, 1)\n\n if not os.path.exists(os.path.join(workdir, \"output\")):\n os.makedirs(os.path.join(workdir, \"output\"))\n with open(output_file, 'w', encoding='utf-8') as outfile:\n new_csv = new_csv.drop(['Unnamed: 0'], 1)\n new_csv.to_csv(outfile, sep=',')", "def add_sf1_features(sf1_art: pd.DataFrame, sf1_arq: pd.DataFrame, metadata: pd.DataFrame):\n\n metadata_empty = True if (len(metadata) == 0) else False\n\n if metadata_empty == True:\n return sf1_art\n\n if isinstance(metadata, pd.DataFrame):\n metadata = metadata.iloc[0]\n\n ticker = metadata[\"ticker\"]\n\n first_calendardate = None\n calendardate_1y_after_first_calendardate = None\n\n # SF1_ART PREPARATION_____________________________________________________________\n # All new filings added by the forward_fill_gaps function will be dates other than the once contained in the current index.\n # This means that when later downsampling according to this index, all the ones added by forwad_fill_gaps will be dropped.\n sf1_art_index_snapshot = sf1_art.index\n\n # Foreward fill gaps\n sf1_art = forward_fill_gaps(sf1_art, 3)\n\n sf1_arq_empty = True if (len(sf1_arq) == 0) else False\n if sf1_arq_empty == False:\n sf1_arq = forward_fill_gaps(sf1_arq, 3)\n\n # This gives the forward filled dataframe a numerical index and sets the old index as a column.\n sf1_art = sf1_art.reset_index()\n\n for index_cur, art_row_cur in sf1_art.iterrows():\n caldate_cur = art_row_cur[\"calendardate\"]\n datekey_cur = art_row_cur[\"datekey\"]\n # The row might be an update of a earlier report. But this is ok, because I simply\n # want to calculate the most up to date features for the given datekey. It is up to \n # the extend_sep_for_sampling function to find the appropriate datekey (filing or update)\n # for each sample.\n \n if first_calendardate is None:\n first_calendardate = art_row_cur[\"calendardate\"]\n calendardate_1y_after_first_calendardate = get_calendardate_x_quarters_later(first_calendardate, 4)\n\n # Skip if not two 10K filings are available, being 1 year appart.\n if art_row_cur[\"calendardate\"] < calendardate_1y_after_first_calendardate:\n # print(\"CONTINUE\", art_row_cur[\"calendardate\"], calendardate_1y_after_first_calendardate)\n continue\n\n #What to do when a 10K is missing?\n art_row_1y_ago = get_most_up_to_date_10k_filing(sf1_art, caldate_cur, datekey_cur, 1) # How is this working as I am comparing a data to a numerical index????\n \n art_row_1y_ago_empty = art_row_1y_ago.dropna().empty # dont know if this is working\n\n # _____________________QUARTER FILING BASED FEATURES START_______________________\n\n if sf1_arq_empty == False:\n \"\"\"\n At this point up to tree quarters have been forward filled. Any greater gaps is not acceptable, so\n being strict in requiring arq_row_xq_ago to be aviable is warranted when calculating features below.\n \"\"\"\n arq_row_cur = get_most_up_to_date_10q_filing(sf1_arq, caldate_cur, datekey_cur, 0)\n arq_row_1q_ago = get_most_up_to_date_10q_filing(sf1_arq, caldate_cur, datekey_cur, 1)\n arq_row_2q_ago = get_most_up_to_date_10q_filing(sf1_arq, caldate_cur, datekey_cur, 2) \n arq_row_3q_ago = get_most_up_to_date_10q_filing(sf1_arq, caldate_cur, datekey_cur, 3)\n arq_row_4q_ago = get_most_up_to_date_10q_filing(sf1_arq, caldate_cur, datekey_cur, 4)\n arq_row_5q_ago = get_most_up_to_date_10q_filing(sf1_arq, caldate_cur, datekey_cur, 5)\n arq_row_6q_ago = get_most_up_to_date_10q_filing(sf1_arq, caldate_cur, datekey_cur, 6)\n arq_row_7q_ago = get_most_up_to_date_10q_filing(sf1_arq, caldate_cur, datekey_cur, 7)\n \n arq_rows = [arq_row_cur, arq_row_1q_ago, arq_row_2q_ago, arq_row_3q_ago, arq_row_4q_ago, \\\n arq_row_5q_ago, arq_row_6q_ago, arq_row_7q_ago]\n\n arq_row_cur_empty = arq_row_cur.dropna().empty\n arq_row_1q_ago_empty = arq_row_1q_ago.dropna().empty\n arq_row_2q_ago_empty = arq_row_2q_ago.dropna().empty\n arq_row_3q_ago_empty = arq_row_3q_ago.dropna().empty\n arq_row_4q_ago_empty = arq_row_4q_ago.dropna().empty\n arq_row_5q_ago_empty = arq_row_5q_ago.dropna().empty\n arq_row_6q_ago_empty = arq_row_6q_ago.dropna().empty\n arq_row_7q_ago_empty = arq_row_7q_ago.dropna().empty\n \n\n # I might want to implement approximations for those companies that do not have quarterly statements\n if (not arq_row_cur_empty) and (not arq_row_1q_ago_empty):\n \n # Return on assets (roaq), Formula: SF1[netinc]q-1 / SF1[assets]q-2\n if arq_row_1q_ago[\"assets\"] != 0:\n sf1_art.at[index_cur, \"roaq\"] = arq_row_cur[\"netinc\"] / arq_row_1q_ago[\"assets\"]\n\n if (not arq_row_cur_empty) and (not arq_row_4q_ago_empty):\n # CALCULATE FEATURES BASED ON THE SAME QUARTER FOR THAT LAST TWO YEARS\n \n # Change in tax expense (chtx), Formula: (SF1[taxexp]q-1 / SF1[taxexp]q-5) - 1\n if arq_row_4q_ago[\"taxexp\"] != 0:\n sf1_art.at[index_cur, \"chtx\"] = (arq_row_cur[\"taxexp\"] / arq_row_4q_ago[\"taxexp\"]) - 1\n\n # Revenue surprise (rsup), Formula: ( SF1[revenueusd]q-1 - SF1[revenueusd]q-5 ) / SF1[marketcap]q-1\n if arq_row_cur[\"marketcap\"] != 0:\n sf1_art.at[index_cur, \"rsup\"] = (arq_row_cur[\"revenueusd\"] - arq_row_4q_ago[\"revenueusd\"]) / arq_row_cur[\"marketcap\"]\n\n # Earnings Surprise (sue), Formula: (SF1[netinc]q-1 - SF1[netinc]q-5) / SF1[marketcap]q-1\n if arq_row_cur[\"marketcap\"] != 0:\n sf1_art.at[index_cur, \"sue\"] = (arq_row_cur[\"netinc\"] - arq_row_4q_ago[\"netinc\"]) / arq_row_cur[\"marketcap\"]\n \n\n #____MORE ADVANCED MULTI-QUARTER CALCULATIONS____\n\n # Corporate investment (cinvest), \n # \"Change over one quarter in net PP&E (ppentq) divided by sales (saleq) - average of this variable for prior 3 quarters; if saleq = 0, then scale by 0.01.\"\n # Formula: (SF1[ppnenet]q-1 - SF1[ppnenet]q-2) / SF1[revenueusd]q-1 - avg((SF1[ppnenet]q-i - SF1[ppnenet]q-i-1) / SF1[revenueusd]q-i, i=[2,3,4]) NB: if sales is zero scale change in ppenet by 0.01\n if (not arq_row_cur_empty) and (not arq_row_1q_ago_empty) and (not arq_row_2q_ago_empty) and \\\n (not arq_row_3q_ago_empty) and (not arq_row_4q_ago_empty):\n \n # Most recent quarter's chppne/sales\n if arq_row_cur[\"revenueusd\"] != 0:\n chppne_sales_cur = (arq_row_cur[\"ppnenet\"] - arq_row_1q_ago[\"ppnenet\"]) / arq_row_cur[\"revenueusd\"]\n else:\n chppne_sales_cur = (arq_row_cur[\"ppnenet\"] - arq_row_1q_ago[\"ppnenet\"]) * 0.01\n \n # Previous three quarters of chppne/sales\n if arq_row_1q_ago[\"revenueusd\"] != 0:\n chppne_sales_q_1 = (arq_row_1q_ago[\"ppnenet\"] - arq_row_2q_ago[\"ppnenet\"]) / arq_row_1q_ago[\"revenueusd\"]\n else:\n chppne_sales_q_1 = (arq_row_1q_ago[\"ppnenet\"] - arq_row_2q_ago[\"ppnenet\"]) * 0.01\n\n if arq_row_2q_ago[\"revenueusd\"] != 0:\n chppne_sales_q_2 = (arq_row_2q_ago[\"ppnenet\"] - arq_row_3q_ago[\"ppnenet\"]) / arq_row_2q_ago[\"revenueusd\"]\n else:\n chppne_sales_q_2 = (arq_row_2q_ago[\"ppnenet\"] - arq_row_3q_ago[\"ppnenet\"]) * 0.01\n\n if arq_row_3q_ago[\"revenueusd\"] != 0:\n chppne_sales_q_3 = (arq_row_3q_ago[\"ppnenet\"] - arq_row_4q_ago[\"ppnenet\"]) / arq_row_3q_ago[\"revenueusd\"]\n else:\n chppne_sales_q_3 = (arq_row_3q_ago[\"ppnenet\"] - arq_row_4q_ago[\"ppnenet\"]) * 0.01\n \n sf1_art.at[index_cur, \"cinvest\"] = chppne_sales_cur - ( (chppne_sales_q_1 + chppne_sales_q_2 + chppne_sales_q_3) / 3 )\n \n \n # Number of earnings increases (nincr)\tBarth, Elliott & Finn \t1999, JAR \t\"Number of consecutive quarters (up to eight quarters) with an increase in earnings\n # (ibq) over same quarter in the prior year.\"\tfor (i = 1, i++, i<=8) { if(SF1[netinc]q-i > SF1[netinc]q-i-4): counter++; else: break }\n \n # What if one is missing...\n nr_of_earnings_increases = 0\n for i in range(4):\n cur_row = arq_rows[i]\n prev_row = arq_rows[i+4]\n cur_row_empty = cur_row.dropna().empty\n prev_row_empty = prev_row.dropna().empty\n if (not cur_row_empty) and (not prev_row_empty):\n cur_netinc = cur_row[\"netinc\"]\n prev_netinc = prev_row[\"netinc\"]\n if cur_netinc > prev_netinc:\n nr_of_earnings_increases += 1\n else:\n break\n else:\n break\n\n sf1_art.at[index_cur, \"nincr\"] = nr_of_earnings_increases\n\n # Earnings volatility (roavol)\tFrancis, LaFond, Olsson & Schipper \t2004, TAR \t\n # \"Standard deviaiton for 16 quarters of income before extraordinary items (ibq) divided by average total assets (atq).\"\t\n # Formula: std(SF1[netinc]q) / avg(SF1[assets]q) for 8 - 16 quarters\n # Here I simplify by restricting the calculation to 2 years of data (quarter 0 to -7)\n \"\"\"\n if (not arq_row_cur.empty) and (not arq_row_1q_ago.empty) and (not arq_row_2q_ago.empty) \\\n and (not arq_row_3q_ago.empty) and (not arq_row_4q_ago.empty) and (not arq_row_5q_ago.empty) \\\n and (not arq_row_6q_ago.empty) and (not arq_row_7q_ago.empty):\n \"\"\"\n netinc_assets_list = []\n for row in arq_rows:\n row_empty = row.dropna().empty\n if (row[\"assets\"] != 0) and (not row_empty):\n netinc_assets_list.append(row[\"netinc\"] / row[\"assets\"])\n\n std_netinc_assets = np.std(netinc_assets_list)\n \n sf1_art.at[index_cur, \"roavol\"] = std_netinc_assets\n\n # _____________________QUARTER FILING BASED FEATURES END_______________________\n\n\n # _______________________FEATURES USING LAST TWO YEARS OF DATA START_________________________\n\n # ____CALCULATIONS USING ONLY CURRENT SF1_ART ROW____\n # Cash productivity (cashpr), Formula: (SF1[marketcap]t-1 + SF1[debtnc]t-1 - SF1[assets]t-1) / SF1[cashneq]t-1\n if art_row_cur[\"cashneq\"] != 0:\n sf1_art.at[index_cur, \"cashpr\"] = (art_row_cur[\"marketcap\"] + art_row_cur[\"debtnc\"] - art_row_cur[\"assets\"]) / art_row_cur[\"cashneq\"]\n\n # Cash (cash), Formula: SF1[cashnequsd]t-1 / SF1[assetsavg]t-1\n if art_row_cur[\"assetsavg\"] != 0:\n sf1_art.at[index_cur, \"cash\"] = art_row_cur[\"cashnequsd\"] / art_row_cur[\"assetsavg\"]\n\n # Book to market (bm), Formula: SF1[equityusd]t-1 / SF1[marketcap]t-1\n if art_row_cur[\"marketcap\"] != 0:\n sf1_art.at[index_cur, \"bm\"] = art_row_cur[\"equityusd\"] / art_row_cur[\"marketcap\"]\n\n # Cash flow to price ratio (cfp), Formula: SF1[ncfo]t-1 / SF1[marketcap]t-1\n if art_row_cur[\"marketcap\"] != 0:\n sf1_art.at[index_cur, \"cfp\"] = art_row_cur[\"ncfo\"] / art_row_cur[\"marketcap\"]\n\n # Current ratio (currat), Formula: SF1[assetsc]t-1 / SF1[liabilitiesc]t-1\n if art_row_cur[\"liabilitiesc\"] != 0:\n sf1_art.at[index_cur, \"currat\"] = art_row_cur[\"assetsc\"] / art_row_cur[\"liabilitiesc\"]\n\n # Depreciation over PP&E (depr), Formula: SF1[depamor]t-1 / SF1[ppnenet]t-1\n if art_row_cur[\"ppnenet\"] != 0:\n sf1_art.at[index_cur, \"depr\"] = art_row_cur[\"depamor\"] / art_row_cur[\"ppnenet\"]\n\n # Earnings to price (ep), Formula: SF1[netinc]t-1 / SF1[marketcap]t-1 \n if art_row_cur[\"marketcap\"] != 0:\n sf1_art.at[index_cur, \"ep\"] = art_row_cur[\"netinc\"] / art_row_cur[\"marketcap\"]\n\n # Leverage (lev), Formula: SF1[liabilities]t-1 / SF1[marketcap]t-1\n if art_row_cur[\"marketcap\"] != 0:\n sf1_art.at[index_cur, \"lev\"] = art_row_cur[\"liabilities\"] / art_row_cur[\"marketcap\"]\n\n\n # Quick ratio (quick), Formula: (SF1[assetsc]t-1 - SF1[inventory]t-1) / SF1[liabilitiesc]t-1\n if (art_row_cur[\"liabilitiesc\"] != 0):\n sf1_art.at[index_cur, \"quick\"] = (art_row_cur[\"assetsc\"] - art_row_cur[\"inventory\"]) / art_row_cur[\"liabilitiesc\"]\n\n # R&D to market capitalization (rd_mve), Formula: SF1[rnd]t-1 / SF1[marketcap]t-1\n if art_row_cur[\"marketcap\"] != 0:\n sf1_art.at[index_cur, \"rd_mve\"] = art_row_cur[\"rnd\"] / art_row_cur[\"marketcap\"]\n \n # R&D to sales (rd_sale), Formula: SF1[rnd]t-1 / SF1[revenueusd]t-1\n if art_row_cur[\"revenueusd\"] != 0:\n sf1_art.at[index_cur, \"rd_sale\"] = art_row_cur[\"rnd\"] / art_row_cur[\"revenueusd\"]\n\n\n # Return on invested capital (roic), Formula: (SF1[ebit]t-1 - [nopinc]t-1) / (SF1[equity]t-1 + SF1[liabilities]t-1 + SF1[cashneq]t-1 - SF1[investmentsc]t-1)\n if (art_row_cur[\"equity\"] + art_row_cur[\"liabilities\"] + art_row_cur[\"cashneq\"] - art_row_cur[\"investmentsc\"]) != 0:\n # Non-iperating income = SF1[revenueusd]t-1 - art_row_cur[\"cor\"] - SF1[opinc]t-1\n nopic_t_1 = art_row_cur[\"revenueusd\"] - art_row_cur[\"cor\"] - art_row_cur[\"opinc\"]\n sf1_art.at[index_cur, \"roic\"] = (art_row_cur[\"ebit\"] - nopic_t_1) / (art_row_cur[\"equity\"] + art_row_cur[\"liabilities\"] + art_row_cur[\"cashneq\"] - art_row_cur[\"investmentsc\"])\n\n # Sales to cash (salecash), Formula: SF1[revenueusd]t-1 / SF1[cashneq]t-1\n if art_row_cur[\"cashneq\"] != 0:\n sf1_art.at[index_cur, \"salecash\"] = art_row_cur[\"revenueusd\"] / art_row_cur[\"cashneq\"]\n\n # Sales to inventory (saleinv), Formula: SF1[revenueusd]t-1 / SF1[inventory]t-1\n if art_row_cur[\"inventory\"] != 0:\n sf1_art.at[index_cur, \"saleinv\"] = art_row_cur[\"revenueusd\"] / art_row_cur[\"inventory\"]\n\n\n # Sales to receivables (salerec), Formula: SF1[revenueusd]t-1 / SF1[receivables]t-1\n if art_row_cur[\"receivables\"] != 0:\n sf1_art.at[index_cur, \"salerec\"] = art_row_cur[\"revenueusd\"] / art_row_cur[\"receivables\"]\n\n # Sales to price (sp)\tSF1[revenueusd]t-1 / SF1[marketcap]t-1\n if art_row_cur[\"marketcap\"] != 0:\n sf1_art.at[index_cur, \"sp\"] = art_row_cur[\"revenueusd\"] / art_row_cur[\"marketcap\"]\n\n # Tax income to book income (tb), Formula: (SF1[taxexp]t-1 / 0.21) / SF1[netinc]t-1\n if art_row_cur[\"netinc\"] != 0:\n sf1_art.at[index_cur, \"tb\"] = art_row_cur[\"taxexp\"] / art_row_cur[\"netinc\"]\n \n # Sin stocks (sin)\tif TICKER[industry].isin([\"Beverages - Brewers\", \"Beverages - Wineries & Distilleries\", \"Electronic Gaming & Multimedia\", \"Gambling\", \"Tobacco\"]): 1; else: 0\n industry_cur = metadata[\"industry\"]\n if industry_cur in [\"Beverages - Brewers\", \"Beverages - Wineries & Distilleries\", \"Gambling\", \"Tobacco\"]: # \"Electronic Gaming & Multimedia\"\n sf1_art.at[index_cur, \"sin\"] = 1\n else:\n sf1_art.at[index_cur, \"sin\"] = 0\n\n # Debt capacity/firm tangibility (tang), Formula: SF1[cashnequsd]t-1 + 0.715*SF1[recievables]t-1 + 0.547*SF1[inventory]t-1 + 0.535*(SF1[ppnenet]t-1 / SF1[assets]t-1)\n if art_row_cur[\"assets\"] != 0:\n sf1_art.at[index_cur, \"tang\"] = (art_row_cur[\"cashnequsd\"] + 0.715*art_row_cur[\"receivables\"] + 0.547*art_row_cur[\"inventory\"] + 0.535*art_row_cur[\"ppnenet\"]) / art_row_cur[\"assets\"]\n\n\n # DLC/SALE (debtc_sale), Formula: SF1[debtc]t-1 / SF1[revenueusd]t-1\n if art_row_cur[\"revenueusd\"] != 0:\n sf1_art.at[index_cur, \"debtc_sale\"] = art_row_cur[\"debtc\"] / art_row_cur[\"revenueusd\"]\n\n # CEQT/MKTCAP (eqt_marketcap), Formula: (SF1[equity]t-1 - SF1[intangibles]t-1) / SF1[marketcap]t-1\n if art_row_cur[\"marketcap\"] != 0:\n sf1_art.at[index_cur, \"eqt_marketcap\"] = ( art_row_cur[\"equityusd\"] - art_row_cur[\"intangibles\"]) / art_row_cur[\"marketcap\"]\n\n # DPACT/PPENT\t(dep_ppne), Formula: SF1[depamor]t-1 / sf1[ppnenet]t-1\n if art_row_cur[\"ppnenet\"] != 0:\n sf1_art.at[index_cur, \"dep_ppne\"] = art_row_cur[\"depamor\"] / art_row_cur[\"ppnenet\"]\n\n\n # CEQL/MKTCAP\t(tangibles_marketcap), Formula: SF1[tangibles]t-1 / SF1[marketcap]t-1\n if art_row_cur[\"marketcap\"] != 0:\n sf1_art.at[index_cur, \"tangibles_marketcap\"] = art_row_cur[\"tangibles\"] / art_row_cur[\"marketcap\"]\n\n\n\n #____Calculate regular year over year SF1 features____\n if not art_row_1y_ago_empty:\n \n # Asset Growth (arg), Formula: (SF1[assets]t-1 / SF1[assets]t-2) - 1\n if art_row_1y_ago[\"assets\"] != 0:\n sf1_art.at[index_cur, \"agr\"] = (art_row_cur[\"assets\"] / art_row_1y_ago[\"assets\"] ) - 1\n\n # Cash flow to debt (cashdebt), Formula: (SF1[revenueusd]t-1+SF1[depamor]t-1) / ((SF1[liabilities]t-1 - SF1[liabilities]t-2) / 2)\n if (art_row_cur[\"liabilities\"] - art_row_1y_ago[\"liabilities\"]) != 0:\n sf1_art.at[index_cur, \"cashdebt\"] = (art_row_cur[\"revenueusd\"] + art_row_cur[\"depamor\"]) / ((art_row_cur[\"liabilities\"] - art_row_1y_ago[\"liabilities\"]) / 2)\n\n # Change in shared outstanding (chcsho), Formula: (SF1[sharesbas]t-1 - SF1[sharesbas]t-2) - 1\n if art_row_1y_ago[\"sharesbas\"] != 0:\n sf1_art.at[index_cur, \"chcsho\"] = (art_row_cur[\"sharesbas\"] / art_row_1y_ago[\"sharesbas\"]) - 1\n\n # Change in inventory (chinv), Formula: (SF1[inventory]t-1 - SF1[inventory]t-2) / SF1[assetsavg]t-1\n if art_row_cur[\"assetsavg\"] != 0:\n sf1_art.at[index_cur, \"chinv\"] = ( art_row_cur[\"inventory\"] - art_row_1y_ago[\"inventory\"] ) / art_row_cur[\"assetsavg\"]\n\n # Growth in common shareholder equity (egr), Formula: (SF1[equityusd]t-1 / SF1[equityusd]t-2) - 1\n if art_row_1y_ago[\"equityusd\"] != 0:\n sf1_art.at[index_cur, \"egr\"] = (art_row_cur[\"equityusd\"] / art_row_1y_ago[\"equityusd\"]) - 1\n\n # Gross profitability (gma), Formula: (SF1[revenueusd]t-1 - SF1[cor]t-1) / SF1[assets]t-2\n if art_row_1y_ago[\"assets\"] != 0:\n sf1_art.at[index_cur, \"gma\"] = (art_row_cur[\"revenueusd\"] - art_row_cur[\"cor\"]) / art_row_1y_ago[\"assets\"]\n\n\n # Capital expenditures and inventory (invest), Formula: ((SF1[ppnenet]t-1 - SF1[ppnenet]t-2) + (SF1[inventory]t-1 - SF1[inventory]t-2)) / SF1[assets]t-2\n if art_row_1y_ago[\"assets\"] != 0:\n sf1_art.at[index_cur, \"invest\"] = ((art_row_cur[\"ppnenet\"] - art_row_1y_ago[\"ppnenet\"]) + (art_row_cur[\"inventory\"] - art_row_1y_ago[\"inventory\"])) / art_row_1y_ago[\"assets\"]\n\n # Growth in long-term debt (lgr), Formula: (SF1[liabilities]t-1 / SF1[liabilities]t-2) - 1\n if art_row_1y_ago[\"liabilities\"] != 0:\n sf1_art.at[index_cur, \"lgr\"] = ( art_row_cur[\"liabilities\"] / art_row_1y_ago[\"liabilities\"] ) - 1\n \n # Operating profitability (operprof), Formula: (SF1[revenueusd]t-1 - SF1[cor]t-1 - SF1[sgna]t-1 - SF1[intexp]t-1) / SF1[equityusd]t-2 \n if art_row_1y_ago[\"equityusd\"] != 0:\n sf1_art.at[index_cur, \"operprof\"] = ( art_row_cur[\"revenueusd\"] - art_row_cur[\"cor\"] - art_row_cur[\"sgna\"] - art_row_cur[\"intexp\"] ) / art_row_1y_ago[\"equityusd\"]\n\n\n # Percent change in current ratio (pchcurrat), Formula: (SF1[assetsc]t-1 / SF1[liabilitiesc]t-1) / (SF1[assetsc]t-2 / SF1[liabilitiesc]t-2) - 1\n if (art_row_cur[\"liabilitiesc\"] != 0) and (art_row_1y_ago[\"liabilitiesc\"] != 0) and (art_row_1y_ago[\"assetsc\"] != 0):\n sf1_art.at[index_cur, \"pchcurrat\"] = ( (art_row_cur[\"assetsc\"] / art_row_cur[\"liabilitiesc\"]) / (art_row_1y_ago[\"assetsc\"] / art_row_1y_ago[\"liabilitiesc\"]) ) - 1\n\n # Percent chang ein depreciation (pchdepr), Formula: (SF1[depamor]t-1 / SF1[ppnenet]t-1) / (SF1[depamor]t-2 / SF1[ppnenet]t-2) - 1\n if (art_row_cur[\"ppnenet\"] != 0) and (art_row_1y_ago[\"ppnenet\"] != 0) and (art_row_1y_ago[\"depamor\"] != 0):\n sf1_art.at[index_cur, \"pchdepr\"] = ( (art_row_cur[\"depamor\"]/ art_row_cur[\"ppnenet\"]) / (art_row_1y_ago[\"depamor\"] / art_row_1y_ago[\"ppnenet\"]) ) - 1\n\n\n # Percent change in gross margin - Percent change in sales (pchgm_pchsale), Formula: ( ([gross_margin]t-1 / [gross_margin]t-2) - 1 ) - ( (SF1[revenueusd]t-1 / SF1[revenueusd]t-2) - 1 ) \n # gross_margin = (SF1[revenueusd]t-1 - SF1[cor]t-1) / SF1[revenueusd]t-1\n if (art_row_cur[\"revenueusd\"] != 0) and (art_row_1y_ago[\"revenueusd\"] != 0):\n gross_margin_t_1 = (art_row_cur[\"revenueusd\"] - art_row_cur[\"cor\"]) / art_row_cur[\"revenueusd\"]\n gross_margin_t_2 = (art_row_1y_ago[\"revenueusd\"] - art_row_1y_ago[\"cor\"]) / art_row_1y_ago[\"revenueusd\"]\n if gross_margin_t_2 != 0:\n sf1_art.at[index_cur, \"pchgm_pchsale\"] = ((gross_margin_t_1 / gross_margin_t_2) - 1) - ((art_row_cur[\"revenueusd\"] / art_row_1y_ago[\"revenueusd\"]) - 1)\n\n\n # Percent change in quick ratio (pchquick), Formula: ([quick_ratio]t-1 / [quick_ratio]t-2) - 1\n # Quick ratio = (SF1[assetsc]t-1 - SF1[inventory]t-1) / SF1[liabilitiesc]t-1\n if (art_row_cur[\"liabilitiesc\"] != 0) and (art_row_1y_ago[\"liabilitiesc\"] != 0):\n quick_ratio_cur = ( art_row_cur[\"assetsc\"] - art_row_cur[\"inventory\"] ) / art_row_cur[\"liabilitiesc\"]\n quick_ratio_1y_ago = ( art_row_1y_ago[\"assetsc\"] - art_row_1y_ago[\"inventory\"] ) / art_row_1y_ago[\"liabilitiesc\"]\n if quick_ratio_1y_ago != 0:\n sf1_art.at[index_cur, \"pchquick\"] = (quick_ratio_cur / quick_ratio_1y_ago) - 1\n\n # Percent change in sales - percent change in inventory (pchsale_pchinvt), Formula: ((SF1[revenueusd]t-1 / SF1[revenueusd]t-2) - 1) - ((SF1[inventory]t-1 / SF1[inventory]t-2) - 1)\n if (art_row_1y_ago[\"revenueusd\"] != 0) and (art_row_1y_ago[\"inventory\"] != 0):\n sf1_art.at[index_cur, \"pchsale_pchinvt\"] = ((art_row_cur[\"revenueusd\"] / art_row_1y_ago[\"revenueusd\"]) - 1) - ((art_row_cur[\"inventory\"] / art_row_1y_ago[\"inventory\"]) - 1)\n\n # % change in sales - % change in A/R (pchsale_pchrect), Formula: ((SF1[revenueusd]t-1 / SF1[revenueusd]t-2) - 1) - ((SF1[receivables]t-1 / SF1[receivables]t-2) - 1)\n if (art_row_1y_ago[\"revenueusd\"] != 0) and (art_row_1y_ago[\"receivables\"] != 0):\n sf1_art.at[index_cur, \"pchsale_pchrect\"] = ((art_row_cur[\"revenueusd\"] / art_row_1y_ago[\"revenueusd\"]) - 1) - ((art_row_cur[\"receivables\"] / art_row_1y_ago[\"receivables\"]) - 1)\n\n # % change in sales - % change in SG&A (pchsale_pchxsga ), Formula: ((SF1[revenueusd]t-1 / SF1[revenueusd]t-2) - 1) - ((SF1[sgna]t-1 / SF1[sgna]t-2) - 1)\n if (art_row_1y_ago[\"revenueusd\"] != 0) and (art_row_1y_ago[\"sgna\"] != 0):\n sf1_art.at[index_cur, \"pchsale_pchxsga\"] = ((art_row_cur[\"revenueusd\"] / art_row_1y_ago[\"revenueusd\"]) - 1) - ((art_row_cur[\"sgna\"] / art_row_1y_ago[\"sgna\"]) - 1)\n \n # % change sales-to-inventory (pchsaleinv), Formula: ((SF1[revenueusd]t-1 / SF1[inventory]t-1) / (SF1[revenueusd]t-2 / SF1[inventory]t-2)) - 1\n if (art_row_cur[\"inventory\"] != 0) and (art_row_1y_ago[\"inventory\"] != 0) and (art_row_1y_ago[\"revenueusd\"] != 0):\n sf1_art.at[index_cur, \"pchsaleinv\"] = ((art_row_cur[\"revenueusd\"] / art_row_cur[\"inventory\"]) / (art_row_1y_ago[\"revenueusd\"] / art_row_1y_ago[\"inventory\"])) - 1\n\n # R&D increase (rd), Formula: if (((SF1[rnd]t-1 / SF1[assets]t-1) - 1) - ((SF1[rnd]t-2 / SF1[assets]t-2) - 1)) > 0.05: 1; else: 0;\n if (art_row_cur[\"assets\"] != 0) and (art_row_1y_ago[\"assets\"] != 0) and (art_row_1y_ago[\"rnd\"] != 0):\n rd_cur = (art_row_cur[\"rnd\"] / art_row_cur[\"assets\"])\n rd_1y_ago = (art_row_1y_ago[\"rnd\"] / art_row_1y_ago[\"assets\"])\n pch_rd = rd_cur/rd_1y_ago - 1\n if pch_rd > 0.05:\n sf1_art.at[index_cur, \"rd\"] = 1\n else:\n sf1_art.at[index_cur, \"rd\"] = 0\n\n # Return on equity (roeq), Formula: SF1[netinc]t-1 / SF1[equity]t-2\n if art_row_1y_ago[\"equityusd\"] != 0:\n sf1_art.at[index_cur, \"roeq\"] = art_row_cur[\"netinc\"] / art_row_1y_ago[\"equityusd\"]\n\n\n # Sales growth (sgr), Formula: (SF1[revenueusd]t-1 / SF1[revenueusd]t-2) - 1\n if art_row_1y_ago[\"revenueusd\"] != 0:\n sf1_art.at[index_cur, \"sgr\"] = (art_row_cur[\"revenueusd\"] / art_row_1y_ago[\"revenueusd\"]) - 1\n\n \n # Growth in capital expenditure (grcapx), Formula: (SF1[capex]t-1 / SF1[capex]t-2) - 1\n if art_row_1y_ago[\"capex\"] != 0:\n sf1_art.at[index_cur, \"grcapx\"] = (art_row_cur[\"capex\"] / art_row_1y_ago[\"capex\"]) - 1\n\n\n # ΔLT/LAGAT (chtl_lagat), Formula: (SF1[liabilities]t-1 - SF1[liabilities]t-2) / SF1[assets]t-2\n if art_row_1y_ago[\"assets\"] != 0:\n sf1_art.at[index_cur, \"chtl_lagat\"] = (art_row_cur[\"liabilities\"] - art_row_1y_ago[\"liabilities\"]) / art_row_1y_ago[\"assets\"]\n\n # ΔLT/LAGICAPT (chlt_laginvcap), Formula: (SF1[liabilities]t-1 - SF1[liabilities]t-2) / SF1[invcap]t-2\n if art_row_1y_ago[\"invcap\"] != 0:\n sf1_art.at[index_cur, \"chlt_laginvcap\"] = (art_row_cur[\"liabilities\"] - art_row_1y_ago[\"liabilities\"]) / art_row_1y_ago[\"invcap\"]\n\n # ΔLCT/LAGAT (chlct_lagat), Formula: (SF1[liabilitiesc]t-1 - SF1[liabilitiesc]t-2) / SF1[assets]t-2\n if art_row_1y_ago[\"assets\"] != 0:\n sf1_art.at[index_cur, \"chlct_lagat\"] = (art_row_cur[\"liabilitiesc\"] - art_row_1y_ago[\"liabilitiesc\"]) / art_row_1y_ago[\"assets\"]\n\n\n # ΔXINT/LAGAT\t(chint_lagat), Formula: (SF1[intexp]t-1 - SF1[intexp]t-2)/SF1[assets]t-2\n if art_row_1y_ago[\"assets\"] != 0:\n sf1_art.at[index_cur, \"chint_lagat\"] = (art_row_cur[\"intexp\"] - art_row_1y_ago[\"intexp\"]) / art_row_1y_ago[\"assets\"]\n\n\n # ΔINVT/LAGSALE (chinvt_lagsale), Formula: (SF1[inventory]t-1 - SF1[inventory]t-2) / SF1[revenueusd]t-2\n if art_row_1y_ago[\"revenueusd\"] != 0:\n sf1_art.at[index_cur, \"chinvt_lagsale\"] = (art_row_cur[\"inventory\"] - art_row_1y_ago[\"inventory\"]) / art_row_1y_ago[\"revenueusd\"]\n\n # ΔXINT/LAGXSGA (chint_lagsgna), Formula: (SF1[intexp]t-1 - SF1[intexp]t-2) / SF1[sgna]t-2\n if art_row_1y_ago[\"sgna\"] != 0:\n sf1_art.at[index_cur, \"chint_lagsgna\"] = (art_row_cur[\"intexp\"] - art_row_1y_ago[\"intexp\"]) / art_row_1y_ago[\"sgna\"]\n\n\n # ΔLCT/LAGICAPT (chltc_laginvcap), Formula: (SF1[liabilitiesc]t-1 - SF1[liabilitiesc]t-2) / SF1[invcap]t-2\n if art_row_1y_ago[\"invcap\"] != 0:\n sf1_art.at[index_cur, \"chltc_laginvcap\"] = (art_row_cur[\"liabilitiesc\"] - art_row_1y_ago[\"liabilitiesc\"]) / art_row_1y_ago[\"invcap\"]\n\n # ΔXINT/LAGLT\t(chint_laglt), Formula: (SF1[intexp]t-1 - SF1[intexp]t-2) / SF1[liabilities]t-2\n if art_row_1y_ago[\"liabilities\"] != 0:\n sf1_art.at[index_cur, \"chint_laglt\"] = (art_row_cur[\"intexp\"] - art_row_1y_ago[\"intexp\"]) / art_row_1y_ago[\"liabilities\"]\n\n # ΔDLTT/LAGAT (chdebtnc_lagat), Formula: (SF1[debtnc]t-1 - SF1[debtnc]t-2) / SF1[assets]t-2\n if art_row_1y_ago[\"assets\"] != 0:\n sf1_art.at[index_cur, \"chdebtnc_lagat\"] = (art_row_cur[\"debtnc\"] - art_row_1y_ago[\"debtnc\"]) / art_row_1y_ago[\"assets\"]\n\n\n # ΔINVT/LAGCOGS (chinvt_lagcor), Formula:\t(SF1[inventory]t-1 - SF1[inventory]t-2) / SF1[cor]t-2\n if art_row_1y_ago[\"cor\"] != 0:\n sf1_art.at[index_cur, \"chinvt_lagcor\"] = (art_row_cur[\"inventory\"] - art_row_1y_ago[\"inventory\"]) / art_row_1y_ago[\"cor\"]\n\n # ΔPPENT/LAGLT (chppne_laglt), Formula: (SF1[ppnenet]t-1 - SF1[ppnenet]t-2) / SF1[liabilities]t-2\n if art_row_1y_ago[\"liabilities\"] != 0:\n sf1_art.at[index_cur, \"chppne_laglt\"] = (art_row_cur[\"ppnenet\"] - art_row_1y_ago[\"ppnenet\"]) / art_row_1y_ago[\"liabilities\"]\n\n # ΔAP/LAGACT (chpay_lagact), Formula: (SF1[payables]t-1 - SF1[payables]t-2) / SF1[assetsc]t-2\n if art_row_1y_ago[\"assetsc\"] != 0:\n sf1_art.at[index_cur, \"chpay_lagact\"] = (art_row_cur[\"payables\"] - art_row_1y_ago[\"payables\"]) / art_row_1y_ago[\"assetsc\"]\n \n \n # ΔXINT/LAGICAPT (chint_laginvcap), Formula: (SF1[intexp]t-1 - SF1[intexp]t-2) / SF1[invcap]t-2\n if art_row_1y_ago[\"invcap\"] != 0:\n sf1_art.at[index_cur, \"chint_laginvcap\"] = (art_row_cur[\"intexp\"] - art_row_1y_ago[\"intexp\"]) / art_row_1y_ago[\"invcap\"]\n\n # ΔINVT/LAGACT (chinvt_lagact), Formula:\t(SF1[inventory]t-1 - SF1[inventory]t-2) / SF1[assetsc]t-2\n if art_row_1y_ago[\"assetsc\"] != 0:\n sf1_art.at[index_cur, \"chinvt_lagact\"] = (art_row_cur[\"inventory\"] - art_row_1y_ago[\"inventory\"]) / art_row_1y_ago[\"assetsc\"]\n\n # %Δ in PPENT\t(pchppne), Formula: (SF1[ppnenet]t-1 / SF1[ppnenet]t-2) - 1\n if art_row_1y_ago[\"ppnenet\"] != 0:\n sf1_art.at[index_cur, \"pchppne\"] = (art_row_cur[\"ppnenet\"] / art_row_1y_ago[\"ppnenet\"]) - 1\n\n\n # %Δ in LT (pchlt), Formula: (SF1[liabilities]t-1 / SF1[liabilities]t-2) - 1\n if art_row_1y_ago[\"liabilities\"] != 0:\n sf1_art.at[index_cur, \"pchlt\"] = (art_row_cur[\"liabilities\"] / art_row_1y_ago[\"liabilities\"]) - 1\n\n # %Δ in XINT (pchint), Formula: (SF1[intexp]t-1 - SF1[intexp]t-2) - 1\n if art_row_1y_ago[\"intexp\"] != 0:\n sf1_art.at[index_cur, \"pchint\"] = (art_row_cur[\"intexp\"] / art_row_1y_ago[\"intexp\"]) - 1\n\n # DLTIS/PPENT\t(chdebtnc_ppne), Formula: (SF1[debtnc]t-1 - SF1[debtnc]t-2) / SF1[ppnenet]t-1\n if art_row_cur[\"ppnenet\"] != 0:\n sf1_art.at[index_cur, \"chdebtnc_ppne\"] = (art_row_cur[\"debtnc\"] - art_row_1y_ago[\"debtnc\"]) / art_row_cur[\"ppnenet\"]\n \n # NP/SALE\t(chdebtc_sale), Formula: (SF1[debtc]t-1 - SF1[debtc]t-2) / SF1[revenueusd]t-1\n if art_row_cur[\"revenueusd\"] != 0:\n sf1_art.at[index_cur, \"chdebtc_sale\"] = (art_row_cur[\"debtc\"] - art_row_1y_ago[\"debtc\"]) / art_row_cur[\"revenueusd\"]\n\n\n # Financial statements score (ps): Piotroski \t2000, JAR \tSum of 9 indicator variables to form fundamental health score.\tSee link in notes\n ps = get_ps(art_row_cur, art_row_1y_ago)\n sf1_art.at[index_cur, \"ps\"] = ps\n \n # _______________________ FEATURES USING LAST TWO YEARS OF DATA END_________________________\n\n \n # _________________________________OTHER_______________________________________\n # Age (age): Formula: SF1[datekey]t-1 - TICKERS[firstpricedate]\n sf1_art.at[index_cur, \"age\"] = round((art_row_cur[\"datekey\"] - metadata[\"firstpricedate\"]).days / 365)\n\n # Initial public offering (ipo), Formula: if (SF1[datekey]t-1 - TICKERS[firstpricedate]) <= 1 year: 1; else: 0\n days_since_ipo = (art_row_cur[\"datekey\"] - metadata[\"firstpricedate\"]).days\n if days_since_ipo <= 365:\n sf1_art.at[index_cur, \"ipo\"] = 1\n else:\n sf1_art.at[index_cur, \"ipo\"] = 0\n\n\n #_________________IN PREPARATION FOR INDUSTRY ADJUSTED VALUES___________________ ?????\n\n # Profit margin (profitmargin), Formula: SF1[netinc]t-1 / SF1[revenueusd]t-1\n if art_row_cur[\"revenueusd\"] != 0:\n sf1_art.at[index_cur, \"profitmargin\"] = art_row_cur[\"netinc\"] / art_row_cur[\"revenueusd\"]\n \n if not art_row_1y_ago_empty:\n # Change in profit margin (chprofitmargin), Formula: SF1[netinc]t-1 - SF1[netinc]t-2) / SF1[revenueusd]t-1\n if (art_row_cur[\"revenueusd\"] != 0) and (art_row_1y_ago[\"revenueusd\"] != 0):\n sf1_art.at[index_cur, \"chprofitmargin\"] = (art_row_cur[\"netinc\"] - art_row_cur[\"revenueusd\"]) - (art_row_1y_ago[\"netinc\"] / art_row_1y_ago[\"revenueusd\"])\n\n # change in sales (revenueusd) used in industry related feature calculations\n sf1_art.at[index_cur, \"change_sales\"] = art_row_cur[\"revenueusd\"] - art_row_1y_ago[\"revenueusd\"]\n\n\n # Drop forward filled rows by selecting based on index snapshot: sf1_art_index_snapshot.\n sf1_art = sf1_art.set_index(\"calendardate\")\n \n # sf1_art = sf1_art.loc[sf1_art_index_snapshot]\n\n sf1_art[\"industry\"] = metadata[\"industry\"]\n\n \"\"\"\n At this point as many features as possible have beeen calculated. Due to some missing values in the 10K and 10Q \n reports, some features for some companies will be have a NAN value.\n Before having an ML ready datasets, rows with missing values must be removed or amended. This is \n however not the job of this function. Selecing features and final preperation of the dataset will be conducted\n in the final_dataset_generation.py script.\n \"\"\" \n return sf1_art # This are forward filled, sf1_art has calendardate index, sf1_arq is not needed any more", "def generate_features(df):\n return np.array([np.array(xi) for xi in pd.to_datetime(df).apply(lambda x: [x.year, x.month, x.day, x.hour, x.minute, x.second, x.weekday()])])", "def generate_testing_matrix(full_df, feat_days):\n pred_ticker = full_df.ticker.unique()[0]\n feature_tickers = [i for i in full_df.ticker.unique() if i != pred_ticker]\n dfml = full_df[full_df.ticker == pred_ticker].drop('ticker', axis=1)\n dfml.rename({'percent_change_feat': f'{pred_ticker}_percent_change_{feat_days}'}, axis=1, inplace=True)\n for ticker in feature_tickers:\n help_df = full_df[full_df.ticker == ticker][['past_date', 'current_date', 'prediction_date', 'percent_change_feat']]\n help_df.rename({'percent_change_feat': f'{ticker}_percent_change_{feat_days}'}, axis=1, inplace=True)\n dfml = pd.merge(dfml, help_df,\n left_on=['past_date', 'current_date', 'prediction_date'],\n right_on=['past_date', 'current_date', 'prediction_date'],\n how='left')\n return dfml.drop('percent_change_pred', axis=1)", "def ingest_single_fec(contrib_file : str,\n contrib_header_file : str,\n min_date = \"2020-04-01\") -> pd.DataFrame:\n print(contrib_file)\n fec_df = pd.read_csv(contrib_file,\n low_memory = False,\n delimiter= '|',\n header= None\n # error_bad_lines= False\n )\n col_names = pd.read_csv(contrib_header_file)\n col_names = list(col_names.columns)\n \n fec_df.columns = [x.lower() for x in col_names]\n fec_df['transaction_dt'] = pd.to_datetime(fec_df['transaction_dt'], format=\"%m%d%Y\")\n fec_df.drop(columns = [\"image_num\", \"sub_id\", \"memo_cd\", \"file_num\", \"tran_id\"], inplace = True)\n\n fec_df = fec_df[fec_df['transaction_dt'] > pd.to_datetime(min_date)]\n return fec_df", "def dataset_extract_features_from_date(dataset,date_feature): \n dataset['dayofmonth'] = dataset[date_feature].dt.day\n dataset['dayofyear'] = dataset[date_feature].dt.dayofyear \n dataset['dayofweek'] = dataset[date_feature].dt.dayofweek\n dataset['month'] = dataset[date_feature].dt.month\n dataset['year'] = dataset[date_feature].dt.year\n dataset['weekofyear'] = dataset[date_feature].dt.weekofyear\n dataset['is_month_start'] = (dataset[date_feature].dt.is_month_start).astype(int)\n dataset['is_month_end'] = (dataset[date_feature].dt.is_month_end).astype(int)\n return dataset", "def produce_init(filename):\n training_dataset = pd.read_csv(f'../Modified Data/{filename}')\n test_dataset = pd.read_csv(f'../Raw Data/test.csv')\n features = list(training_dataset.columns)\n features.remove('SalePrice')\n predict_feature = ['SalePrice']\n\n # Produce Test Data\n test_X = test_dataset.loc[:, features]\n ids_test = test_dataset.loc[:, 'Id']\n\n for column in features:\n if str(training_dataset.loc[:, column].dtype) == 'object':\n # Initialize encoder\n labelencoder = LabelEncoder()\n # Encode Train Data\n training_dataset.loc[:, column] = training_dataset.loc[:, column].fillna('Missing')\n training_dataset.loc[:, column] = pd.Series(labelencoder.fit_transform(training_dataset.loc[:, column]))\n # Encode Test Data\n test_X.loc[:, column] = test_X.loc[:, column].fillna('Missing')\n test_X.loc[:, column] = pd.Series(labelencoder.fit_transform(test_X.loc[:, column]))\n else:\n # Fix missing values for train data\n training_dataset.loc[:, column] = training_dataset.loc[:, column].fillna(int(training_dataset.loc[:, column].mean()))\n # Fix missing values for test data\n test_X.loc[:, column] = test_X.loc[:, column].fillna(int(test_X.loc[:, column].mean()))\n\n return training_dataset, test_X, ids_test", "def processDataFrame(vcfDF, FilterStep=0, outputFile=''): \n if FilterStep==0:\n\n #drop column 'ID'\n vcfDF.drop(['ID'],axis=1, inplace=True) \n\n #remove non-variable sites\n vcfDF[vcfDF.ALT != '.']\n\n #remove multi-alleleic sites\n vcfDF[vcfDF.ALT.str.len() < 2]\n \n #add new column 'ID' which is concatenation of CHROM and POS\n #moving last column to correct position in dataframe\n vcfDF.insert(loc=2, column=\"ID\", value= vcfDF.CHROM.astype(str).str.cat(vcfDF.POS.astype(str),sep='_').replace('\\n',''))\n if FilterStep=='final':\n for column in vcfDF.iloc[:,9:]:\n vcfDF[column]=vcfDF[column].str.split(':').str.get(0).map({'0/0': '0', '1/0': '1', '0/1': '1',\n '1/1': '2', './.': '.'})\n columns2drop=[0,1,3,4,5,6,7,8]\n vcfDF.drop(vcfDF.columns[columns2drop],axis=1, inplace=True)\n vcfDF.set_index('ID', inplace=True)\n vcfDF.to_csv(outputFile)\n return None\n individualDict={}\n #individualDict is a dictionary of key being individual column ids from vcfDF and value being a dictionary of key=genotypes and value=frequency\n #explain why continuous\n for column in vcfDF.iloc[:,9:]:\n individualDict[column]= vcfDF[column].str.split(':').str.get(0).map({'0/0': '0', '1/0': '1', '0/1': '1',\n '1/1': '2', './.': '.'}).value_counts(normalize=True).to_dict()\n\n #markerDict is a dictionary of key being marker \"ID\" column from vdfDF and value being a dictionary of key=genotypes and value=frequency\n markerDict={}\n for i, row in vcfDF.iterrows(): \n markerDict[i]= row[9:].str.split(':').str.get(0).map({'0/0': '0', '1/0': '1', '0/1': '1',\n '1/1': '2', './.': '.'}).value_counts(normalize=True).to_dict()\n\n return individualDict, markerDict", "def create_features_new_data(\r\n df_prev:pd.DataFrame,\r\n df_next:pd.DataFrame,\r\n path_data_dir:str,\r\n debug:bool=False\r\n ) -> pd.DataFrame:\r\n # Check input.\r\n # Copy dataframe to avoid in place modification.\r\n (df_prev, df_next) = (df_prev.copy(), df_next.copy())\r\n # Check file path.\r\n if not os.path.exists(path_data_dir):\r\n raise IOError(textwrap.dedent(\"\"\"\\\r\n Path does not exist:\r\n path_data_dir = {path}\"\"\".format(\r\n path=path_data_dir)))\r\n ########################################\r\n # Returned_asm\r\n # Interpretation of assumptions:\r\n # If DSEligible=0, then the vehicle is not eligible for a guarantee.\r\n # * And Returned=-1 (null) since we don't know whether or not it would have been returned,\r\n # but given that it wasn't eligible, it may have been likely to have Returned=1.\r\n # If DSEligible=1, then the vehicle is eligible for a guarantee.\r\n # * And if Returned=0 then the guarantee was purchased and the vehicle was not returned.\r\n # * And if Returned=1 then the guarantee was purchased and the vehicle was returned.\r\n # * And if Returned=-1 (null) then the guarantee was not purchased.\r\n # We don't know whether or not it would have been returned,\r\n # but given that the dealer did not purchase, it may have been likely to have Returned=0.\r\n # Assume:\r\n # If Returned=-1 and DSEligible=0, then Returned_asm=1\r\n # If Returned=-1 and DSEligible=1, then Returned_asm=0\r\n # For new data:\r\n # If DSEligible=0, then Returned=-1, then Returned_asm=1\r\n # If DSEligible=1, then Returned_asm is the average of the buyer's Returned_asm, or if new buyer, then 0.\r\n logger.info(textwrap.dedent(\"\"\"\\\r\n Returned_asm: Assume returned status to fill nulls as new feature.\r\n If Returned=-1 and DSEligible=0, then Returned_asm=1 (assumes low P(resale|buyer, car))\r\n If Returned=-1 and DSEligible=1, then Returned_asm=0 (assumes high P(resale|buyer, car))\"\"\"))\r\n logger.info(textwrap.dedent(\"\"\"\\\r\n For new data:\r\n If DSEligible=0, then Returned=-1, then Returned_asm=1\r\n If DSEligible=1, then Returned_asm is the average of the buyer's Returned_asm, or if new buyer, then 0.\"\"\"))\r\n df_next.loc[df_next['DSEligible']==0, 'Returned_asm'] = 1\r\n prev_nums = df_prev.loc[df_prev['DSEligible']==1, ['BuyerID', 'Returned_asm']].groupby(by='BuyerID').mean()\r\n df_next.loc[df_next['DSEligible']==1, 'Returned_asm'] = \\\r\n df_next.loc[df_next['DSEligible']==1, 'BuyerID'].map(prev_nums['Returned_asm']).fillna(value=0)\r\n ########################################\r\n # SellingLocation_lat, SellingLocation_lon\r\n # Cell takes ~1 min to execute if shelf does not exist.\r\n # Google API limit: https://developers.google.com/maps/documentation/geocoding/usage-limits\r\n logger.info(textwrap.dedent(\"\"\"\\\r\n SellingLocation: Geocode.\r\n Scraping webpages for addresses and looking up latitude, longitude coordinates.\"\"\"))\r\n path_shelf = os.path.join(path_data_dir, 'sellloc_geoloc.shelf')\r\n seconds_per_query = 1.0/50.0 # Google API limit\r\n sellloc_geoloc = dict()\r\n with shelve.open(filename=path_shelf, flag='c') as shelf:\r\n for loc in df_next['SellingLocation'].unique():\r\n if loc in shelf:\r\n raw = shelf[loc]\r\n if raw is None:\r\n location = raw\r\n else:\r\n address = raw['formatted_address']\r\n latitude = raw['geometry']['location']['lat']\r\n longitude = raw['geometry']['location']['lng']\r\n location = geopy.location.Location(\r\n address=address, point=(latitude, longitude), raw=raw)\r\n else: \r\n url = r'https://www.manheim.com/locations/{loc}/events'.format(loc=loc)\r\n page = requests.get(url)\r\n tree = bs4.BeautifulSoup(page.text, 'lxml')\r\n address = tree.find(name='p', class_='loc_address').get_text().strip()\r\n try:\r\n components = {\r\n 'country': 'United States',\r\n 'postal_code': address.split()[-1]}\r\n location = geopy.geocoders.GoogleV3().geocode(\r\n query=address,\r\n exactly_one=True,\r\n components=components)\r\n except:\r\n logger.warning(textwrap.dedent(\"\"\"\\\r\n Exception raised. Setting {loc} geo location to `None`\r\n sys.exc_info() =\r\n {exc}\"\"\".format(loc=loc, exc=sys.exc_info())))\r\n location = None\r\n finally:\r\n time.sleep(seconds_per_query)\r\n if location is None:\r\n shelf[loc] = location\r\n else:\r\n shelf[loc] = location.raw\r\n sellloc_geoloc[loc] = location\r\n logger.info(\"Mapping SellingLocation to latitude, longitude coordinates.\")\r\n sellloc_lat = {\r\n sellloc: (geoloc.latitude if geoloc is not None else 0.0)\r\n for (sellloc, geoloc) in sellloc_geoloc.items()}\r\n sellloc_lon = {\r\n sellloc: (geoloc.longitude if geoloc is not None else 0.0)\r\n for (sellloc, geoloc) in sellloc_geoloc.items()}\r\n df_next['SellingLocation_lat'] = df_next['SellingLocation'].map(sellloc_lat)\r\n df_next['SellingLocation_lon'] = df_next['SellingLocation'].map(sellloc_lon)\r\n # # TODO: experiment with one-hot encoding (problems is that it doesn't scale)\r\n # df_next = pd.merge(\r\n # left=df_next,\r\n # right=pd.get_dummies(df_next['SellingLocation'], prefix='SellingLocation'),\r\n # how='inner',\r\n # left_index=True,\r\n # right_index=True)\r\n ########################################\r\n # JDPowersCat: One-hot encoding\r\n # TODO: Estimate sizes from Wikipedia, e.g. https://en.wikipedia.org/wiki/Vehicle_size_class.\r\n logger.info(\"JDPowersCat: One-hot encoding.\")\r\n # Cast to string, replacing 'nan' with 'UNKNOWN'.\r\n df_next['JDPowersCat'] = (df_next['JDPowersCat'].astype(str)).str.replace(' ', '').apply(\r\n lambda cat: 'UNKNOWN' if cat == 'nan' else cat)\r\n # One-hot encoding.\r\n df_next = pd.merge(\r\n left=df_next,\r\n right=pd.get_dummies(df_next['JDPowersCat'], prefix='JDPowersCat'),\r\n left_index=True,\r\n right_index=True)\r\n ########################################\r\n # LIGHT_N0G1Y2R3\r\n # Rank lights by warning level.\r\n logger.info(\"LIGHT_N0G1Y2R3: Rank lights by warning level (null=0, green=1, yellow=2, red=3).\")\r\n df_next['LIGHT_N0G1Y2R3'] = df_next['LIGHTG']*1 + df_next['LIGHTY']*2 + df_next['LIGHTR']*3\r\n ########################################\r\n # SaleDate_*: Extract timeseries features.\r\n logger.info(\"SaleDate: Extract timeseries features.\")\r\n df_next['SaleDate_dow'] = df_next['SaleDate'].dt.dayofweek\r\n df_next['SaleDate_doy'] = df_next['SaleDate'].dt.dayofyear\r\n df_next['SaleDate_day'] = df_next['SaleDate'].dt.day\r\n df_next['SaleDate_decyear'] = df_next['SaleDate'].dt.year + (df_next['SaleDate'].dt.dayofyear-1)/366\r\n ########################################\r\n # BuyerID, SellerID, VIN, SellingLocation, CarMake, JDPowersCat:\r\n # Make cumulative informative priors (*_num*, *_frac*) for string features.\r\n logger.info(textwrap.dedent(\"\"\"\\\r\n BuyerID, SellerID, VIN, SellingLocation, CarMake, JDPowersCat:\r\n Make cumulative informative priors (*_num*, *_frac*) for string features.\"\"\"))\r\n # Cumulative features require sorting by time.\r\n # Note: df_prev and df_next have overlapping indexes after `reset_index`.\r\n df_next.sort_values(by=['SaleDate'], inplace=True)\r\n df_next.reset_index(drop=True, inplace=True)\r\n if debug:\r\n assert (df_prev['SaleDate'].diff().iloc[1:] >= np.timedelta64(0, 'D')).all()\r\n assert (df_next['SaleDate'].diff().iloc[1:] >= np.timedelta64(0, 'D')).all()\r\n for col in ['BuyerID', 'SellerID', 'VIN', 'SellingLocation', 'CarMake', 'JDPowersCat']:\r\n logger.info(\"Processing {col}\".format(col=col))\r\n prev_nums = df_prev.groupby(by=col).last()\r\n ####################\r\n # Cumulative count of transactions and DSEligible:\r\n # Cumulative count of transactions (yes including current).\r\n df_next[col+'_numTransactions'] = df_next[[col]].groupby(by=col).cumcount().astype(int) + 1\r\n df_next[col+'_numTransactions'].fillna(value=1, inplace=True)\r\n df_next[col+'_numTransactions'] += df_next[col].map(prev_nums[col+'_numTransactions']).fillna(value=0)\r\n # Cumulative count of transations that were DealShield-eligible (yes including current).\r\n df_next[col+'_numDSEligible1'] = df_next[[col, 'DSEligible']].groupby(by=col)['DSEligible'].cumsum().astype(int)\r\n df_next[col+'_numDSEligible1'].fillna(value=0, inplace=True)\r\n df_next[col+'_numDSEligible1'] += df_next[col].map(prev_nums[col+'_numDSEligible1']).fillna(value=0)\r\n # Cumulative ratio of transactions that were DealShield-eligible (0=bad, 1=good).\r\n df_next[col+'_fracDSEligible1DivTransactions'] = df_next[col+'_numDSEligible1']/df_next[col+'_numTransactions']\r\n df_next[col+'_fracDSEligible1DivTransactions'].fillna(value=1, inplace=True)\r\n ####################\r\n # DSEligible and Returned\r\n # Note:\r\n # * DealShield-purchased ==> Returned != -1 (not null)\r\n # * below requires\r\n # DSEligible == 0 ==> Returned == -1 (is null)\r\n # Returned != -1 (not null) ==> DSEligible == 1\r\n if debug:\r\n assert (df_prev.loc[df_prev['DSEligible']==0, 'Returned'] == -1).all()\r\n assert (df_prev.loc[df_prev['Returned']!=-1, 'DSEligible'] == 1).all()\r\n # Cumulative count of transactions that were DealShield-eligible and DealShield-purchased.\r\n df_next[col+'_numReturnedNotNull'] = df_next[col].map(prev_nums[col+'_numReturnedNotNull']).fillna(value=0)\r\n # Cumulative ratio of DealShield-eligible transactions that were DealShield-purchased (0=mode).\r\n df_next[col+'_fracReturnedNotNullDivDSEligible1'] = df_next[col+'_numReturnedNotNull']/df_next[col+'_numDSEligible1']\r\n df_next[col+'_fracReturnedNotNullDivDSEligible1'].fillna(value=0, inplace=True)\r\n # Cumulative count of transactions that were DealShield-elegible and DealShield-purchased and DealShield-returned.\r\n df_next[col+'_numReturned1'] = df_next[col].map(prev_nums[col+'_numReturned1']).fillna(value=0)\r\n # Cumulative ratio of DealShield-eligible, DealShield-purchased transactions that were DealShield-returned (0=good, 1=bad).\r\n # Note: BuyerID_fracReturned1DivReturnedNotNull is the cumulative return rate for a buyer.\r\n df_next[col+'_fracReturned1DivReturnedNotNull'] = df_next[col+'_numReturned1']/df_next[col+'_numReturnedNotNull']\r\n df_next[col+'_fracReturned1DivReturnedNotNull'].fillna(value=0, inplace=True)\r\n # Check that weighted average of return rate equals overall return rate.\r\n # Note: Requires groups sorted by date, ascending.\r\n if debug:\r\n assert np.isclose(\r\n (df_prev[[col, col+'_fracReturned1DivReturnedNotNull', col+'_numReturnedNotNull']].groupby(by=col).last().product(axis=1).sum()/\\\r\n df_prev[[col, col+'_numReturnedNotNull']].groupby(by=col).last().sum()).values[0],\r\n sum(df_prev['Returned']==1)/sum(df_prev['Returned'] != -1),\r\n equal_nan=True)\r\n ####################\r\n # DSEligible and Returned_asm\r\n # NOTE:\r\n # * Below requires\r\n # DSEligible == 0 ==> Returned_asm == 1\r\n # Returned_asm == 0 ==> DSEligible == 1\r\n if debug:\r\n assert (df_prev.loc[df_prev['DSEligible']==0, 'Returned_asm'] == 1).all()\r\n assert (df_prev.loc[df_prev['Returned_asm']==0, 'DSEligible'] == 1).all()\r\n assert (df_next.loc[df_next['DSEligible']==0, 'Returned_asm'] == 1).all()\r\n assert (df_next.loc[df_next['Returned_asm']==0, 'DSEligible'] == 1).all()\r\n # Cumulative number of transactions that were assumed to be returned.\r\n # Note: For new data, 'Returned_asm' may be a float.\r\n df_tmp = df_next[[col, 'Returned_asm']].copy()\r\n df_tmp['Returnedasm1'] = df_tmp['Returned_asm']\r\n df_next[col+'_numReturnedasm1'] = df_tmp[[col, 'Returnedasm1']].groupby(by=col)['Returnedasm1'].cumsum()\r\n df_next[col+'_numReturnedasm1'].fillna(value=0, inplace=True)\r\n df_next[col+'_numReturnedasm1'] += df_next[col].map(prev_nums[col+'_numReturnedasm1']).fillna(value=0)\r\n del df_tmp\r\n # Cumulative ratio of transactions that were assumed to be returned (0=mode).\r\n df_next[col+'_fracReturnedasm1DivTransactions'] = df_next[col+'_numReturnedasm1']/df_next[col+'_numTransactions']\r\n df_next[col+'_fracReturnedasm1DivTransactions'].fillna(value=0, inplace=True)\r\n # Check that weighted average of assumed return rate equals overall assumed return rate.\r\n if debug:\r\n assert np.isclose(\r\n (df_prev[[col, col+'_fracReturnedasm1DivTransactions', col+'_numTransactions']].groupby(by=col).last().product(axis=1).sum()/\\\r\n df_prev[[col, col+'_numTransactions']].groupby(by=col).last().sum()).values[0],\r\n sum(df_prev['Returned_asm']==1)/sum(df_prev['Returned_asm'] != -1),\r\n equal_nan=True)\r\n # Note:\r\n # * Number of transactions that were DealShield-eligible and assumed to be returned ==\r\n # number of transactions that were DealShield-elegible and DealShield-purchased and DealShield-returned\r\n # (numReturned1)\r\n return df_next", "def process_data(self):\n logging.debug('process_data called')\n\n pd_time_series = pd.read_csv(f'{self.out_dir}docs/downloaded/'\n f'{self.filename}')\n\n pd_time_series = pd_time_series.drop('Lat', axis=1)\n pd_time_series = pd_time_series.drop('Long', axis=1)\n no_of_dates = len(pd_time_series.columns) - 2\n dateindex = pd.date_range(start='1-22-2020',\n periods=no_of_dates,\n freq='D').strftime('%d-%m')\n\n new_cols = ['Province/State', 'Country/Region']\n for index in dateindex:\n new_cols.append(index)\n pd_time_series.columns = new_cols\n\n pd_time_series = pd_time_series.drop('Province/State', axis=1)\n pd_edit_series = pd_time_series.set_index('Country/Region')\n\n pd_edit_series = pd_edit_series.T\n\n return pd_edit_series", "def add_technical_indicator(df, tic):\n\n df['date'] = df.index\n df = df.reset_index(drop=True)\n cols = ['date'] + [col for col in df if col != 'date']\n df = df[cols]\n\n # drop duplicates\n df = df.drop_duplicates()\n\n # convert Date column to datetime\n df['date'] = pd.to_datetime(df['date'], format = '%Y-%m-%d')\n # df['date'] = pd.to_datetime(df['date'])\n\n # sort by datetime\n df.sort_values(by = 'date', inplace = True, ascending = True)\n\n stock = Sdf.retype(df.copy())\n\n temp_macd = stock['macd']\n temp_macds = stock['macds']\n temp_macdh = stock['macdh']\n macd = pd.DataFrame(temp_macd)\n macds = pd.DataFrame(temp_macds)\n macdh = pd.DataFrame(temp_macdh)\n\n temp_rsi = stock['rsi_6']\n rsi = pd.DataFrame(temp_rsi)\n\n temp_cci = stock['cci']\n cci = pd.DataFrame(temp_cci)\n\n temp_adx = stock['adx']\n adx = pd.DataFrame(temp_adx)\n\n temp_pdi = stock['pdi']\n temp_mdi = stock['mdi']\n pdi = pd.DataFrame(temp_pdi)\n mdi = pd.DataFrame(temp_mdi)\n\n df.insert(len(df.columns), \"daydate\",0)\n df.insert(len(df.columns), \"tic\",tic)\n\n df.insert(len(df.columns), \"macd\",0)\n df.insert(len(df.columns), \"macd_signal_line\",0)\n df.insert(len(df.columns), \"macd_hist\",0)\n\n df.insert(len(df.columns), \"rsi\",0)\n\n df.insert(len(df.columns), \"cci\",0)\n\n df.insert(len(df.columns), \"adx\",0)\n\n df.insert(len(df.columns), \"+DI\",0)\n df.insert(len(df.columns), \"-DI\",0)\n\n len_df = len(df)\n\n # CD Comment change the for by optimized solution\n for i in range(0,len_df,1):\n\n df.loc[i,\"daydate\"] = str(df.iloc[i][\"date\"])[0:10]\n\n df.loc[i,\"macd\"] = macd.iloc[i][0]\n df.loc[i,\"macd_signal_line\"] = macds.iloc[i][0]\n df.loc[i,\"macd_hist\"] = macdh.iloc[i][0]\n\n df.loc[i,\"rsi\"] = rsi.iloc[i][0]\n\n df.loc[i,\"cci\"] = cci.iloc[i][0]\n\n df.loc[i,\"adx\"] = adx.iloc[i][0]\n\n df.loc[i,\"+DI\"] = pdi.iloc[i][0]\n df.loc[i,\"-DI\"] = mdi.iloc[i][0]\n\n df['daydate'] = pd.to_datetime(df['daydate'], format = '%Y-%m-%d')\n\n\n cols = ['daydate'] + ['date'] + ['tic'] + [col for col in df if ((col != 'date') and (col != 'daydate') and (col != 'tic'))]\n df = df[cols]\n\n #df = df.replace([np.inf, -np.inf], np.nan).dropna(axis=1)\n df = df.replace([np.inf, -np.inf], np.nan).dropna()\n\n df = df.reset_index(drop=True)\n\n return df", "def featurize_kof(*args):\n\n df = args[0]\n\n # initial cleaning & changes to df\n df = df.drop(columns='Unnamed: 0')\n df = df.rename(columns = {'Code_Style':'code_style', 'Name':'name', 'Brand':'brand', 'Date':'date',\n 'Retail_Price': 'retail_price', 'Colorway':'colorway', 'Story':'story',\n 'KOF_Wants':'kof_wants', 'Avg_Resale':'avg_resale_stockx'})\n df['retail_price'] = df['retail_price'].astype(int)\n df['avg_resale_stockx'] = df['avg_resale_stockx'].str.replace('[^\\w\\s]','')\n df['avg_resale_stockx'] = df['avg_resale_stockx'].astype(int)\n\n # feature 1: merge silhouettes\n all_silhouettes = eval(args[1])\n # strip out brand name from silhouette name\n temp = []\n for i in range(len(all_silhouettes)):\n silhouette = all_silhouettes[i]\n silhouette = silhouette.replace(\"Nike \", \"\")\n silhouette = silhouette.replace(\"Adidas \", \"\")\n silhouette = silhouette.replace(\"adidas \", \"\")\n temp.append(silhouette)\n all_silhouettes = temp\n all_silhouettes.append('Air Jordan 1')\n all_silhouettes = list(set(all_silhouettes))\n df['silhouette'] = df['name'].apply(lambda x: silhouette_generator(x, all_silhouettes))\n\n #feature 2: profitable\n #create columns to calculate net profit\n df['price_diff'] = df['avg_resale_stockx'] - df['retail_price']\n df['commission_fee'] = abs((df['avg_resale_stockx']) * (9.5/100))\n df['seller_fee'] = 5\n df['total_credit'] = df['price_diff'] - df['commission_fee'] - df['seller_fee']\n df['cashout_fee'] = abs((df['total_credit']) * (2.9/100))\n df['net_profit'] = df['total_credit'] - df['cashout_fee']\n #create purchase feature if the net profit is greater than 0\n df['profitable'] = np.where(df['net_profit'] > 0, 1, 0)\n #drop columns used to calculate net profit\n df.drop(['commission_fee', 'seller_fee', 'total_credit', 'cashout_fee'], axis=1, inplace=True)\n\n #feature 3: brand code\n brand_code = df.groupby('brand').ngroup()\n df = pd.concat([df, brand_code], axis=1).rename(columns={0:'brand_code'})\n\n #feature 4: word2vec colors\n nlp = spacy.load('en_vectors_web_lg')\n df['black'] = df['colorway'].apply(lambda x: color_word2vec(x, \"black\", nlp))\n df['white'] = df['colorway'].apply(lambda x: color_word2vec(x, \"white\", nlp))\n df['brown'] = df['colorway'].apply(lambda x: color_word2vec(x, \"brown\", nlp))\n df['red'] = df['colorway'].apply(lambda x: color_word2vec(x, \"red\", nlp))\n df['blue'] = df['colorway'].apply(lambda x: color_word2vec(x, \"blue\", nlp))\n df['yellow'] = df['colorway'].apply(lambda x: color_word2vec(x, \"yellow\", nlp))\n df['orange'] = df['colorway'].apply(lambda x: color_word2vec(x, \"orange\", nlp))\n df['green'] = df['colorway'].apply(lambda x: color_word2vec(x, \"green\", nlp))\n df['purple'] = df['colorway'].apply(lambda x: color_word2vec(x, \"purple\", nlp))\n df['multi_color'] = df['colorway'].apply(lambda x: color_word2vec(x, \"multi color\", nlp))\n df['main_color'] = df[['black', 'white', 'brown', 'red', 'blue', 'yellow',\n 'orange', 'green', 'purple', 'multi_color']].idxmax(axis=1)\n df['main_color_id'] = df.groupby('main_color').ngroup()\n\n #boolean features\n df['womens'] = df['name'].apply(lambda x: label_womens(x))\n\n df['bcollab'] = df['name'].apply(lambda x: label_bcollab(x))\n\n df['og'] = df['name'].apply(lambda x: label_og(x))\n\n df['sp'] = df['name'].apply(lambda x: label_sp(x))\n\n df['qs'] = df['name'].apply(lambda x: label_qs(x))\n\n df['sb'] = df['name'].apply(lambda x: label_sb(x))\n\n df['ls'] = df['name'].apply(lambda x: label_ls(x))\n\n df['nrg'] = df['name'].apply(lambda x: label_nrg(x))\n\n df['prm'] = df['name'].apply(lambda x: label_prm(x))\n\n df['nsw'] = df['name'].apply(lambda x: label_nsw(x))\n\n df['retro'] = df['name'].apply(lambda x: label_retro(x))\n\n df['se'] = df['name'].apply(lambda x: label_se(x))\n\n df['pe'] = df['name'].apply(lambda x: label_pe(x))\n\n df['gs'] = df['name'].apply(lambda x: label_gs(x))\n\n df['hs'] = df['name'].apply(lambda x: label_hs(x))\n\n return df", "def submission(self):\n\n\t\tprobas = self.y_pred / self.count_models\n\n\t\tsub = pd.DataFrame({'id':self.X_test.PostId, 'OpenStatus':probas}).set_index('id')\n\t\tsub.to_csv('sub.csv')", "def collect_data(self):\n categories = []\n list_of_feature_lists = []\n feature_sets = set()\n with open(self.csv_path, \"r\") as f:\n reader = csv.DictReader(f)\n # collecting some stats for FDR calculation:\n self.PSM_count = 0\n self.decoy_count = 0\n\n if self[\"dump_svm_matrix\"]:\n self.init_svm_matrix_dump()\n additional_matrix_info = []\n\n for i, row in enumerate(\n sorted(\n reader,\n reverse=self[\"bigger_scores_better\"],\n key=lambda d: float(d[self.col_for_sorting]),\n )\n ):\n\n features = self.row_to_features(row)\n\n if tuple(features) in feature_sets:\n continue\n feature_sets.add(tuple(features))\n\n category, psm_FDR = self.get_psm_category(row)\n\n list_of_feature_lists.append(features)\n categories.append(category)\n\n if self[\"dump_svm_matrix\"]:\n label = -1 if row_is_decoy(row) else 1\n sequence = \"{0}.{1}#{2}.{3}\".format(\n row[\"Sequence Pre AA\"].strip(),\n row[\"Sequence\"].strip(),\n row[\"Modifications\"].strip(),\n row[\"Sequence Post AA\"].strip(),\n )\n additional_matrix_info.append(\n {\n \"psm_id\": row[\"Spectrum Title\"].strip(),\n \"label\": label,\n \"scannr\": row[\"Spectrum Title\"].strip().split(\".\")[-2],\n \"peptide\": sequence,\n \"proteins\": self.parse_protein_ids(row[\"Protein ID\"]),\n }\n )\n\n if i % 1000 == 0:\n score_val = float(row[self.col_for_sorting])\n msg = (\n \"Generating feature matrix from input csv \"\n \"(line ~{0}) with score {1} and FDR \"\n \"{2}\".format(i, score_val, psm_FDR)\n )\n print(msg, end=\"\\r\")\n\n # All data points are collected in one big matrix, to make standardization possible\n print(\"\\nConverting feature matrix to NumPy array...\")\n X_raw = np.array(list_of_feature_lists, dtype=float)\n\n print(\"Replacing empty/NaN values with the mean of each column...\")\n self.nan_replacer = Imputer()\n self.nan_replacer.fit(X_raw)\n X_raw = self.nan_replacer.transform(X_raw)\n # Standardize input matrix to ease machine learning! Scaled data has zero mean and unit variance\n print(\"Standardizing input matrix...\")\n self.scaler = SCALER.fit(X_raw)\n self.X = self.scaler.transform(X_raw)\n self.categories = np.array(categories)\n print()\n\n if self[\"dump_svm_matrix\"]:\n print(\"Dumping SVM matrix to\", self[\"dump_svm_matrix\"])\n\n for i, matrix_row in enumerate(self.X):\n matrix_row_info = additional_matrix_info[i]\n self.dump_svm_matrix_row(\n row=list(matrix_row),\n psm_id=matrix_row_info[\"psm_id\"],\n label=matrix_row_info[\"label\"],\n scannr=matrix_row_info[\"scannr\"],\n peptide=matrix_row_info[\"peptide\"],\n proteins=matrix_row_info[\"proteins\"],\n )\n\n print(\"Dumped SVM matrix to\", self[\"dump_svm_matrix\"])\n return", "def make_submission_file(w, unused_features, filename=\"prediction.csv\"):\n\n # load test datasets\n print_banner(\"7. Read test dataset from higgs-data/test.csv\") \n test_y, test_x, ind = load_csv_data('higgs-data/test.csv')\n\n # Construct Matrix Output with values of one\n y_pred = np.ones(len(test_y))\n\n # Split test dataset based\n print_banner(\"8. Split the test dataset into 8 subsets\") \n test_sets_x, _, indices = create_subsets(test_x, test_y)\n\n # Remove features of test datasets based on PRI_JET_NUM and DER_MASS_MMC\n print_banner(\"9. Remove features in each test subset based on PRI_JET_NUM and DER_MASS_MMC\")\n test_sets_x = remove_features(test_sets_x, unused_features) \n\n # Iterate through the test subsets with their models accordingly\n print_banner(\"10. Predict each test subset using their corresponding model\") \n for x, w, index in zip(test_sets_x, w, indices):\n\n # Perform z-score standardization and expand matrix features with logarithmic & polynomial & cross_term & square root basis function\n stand_x = generate_features(x, 2, True, with_log=True, with_sqrt=True, cross_terms=True)\n\n # Get the prediction\n y_pred[index] = predict_labels(w, stand_x)\n\n print_banner(\" Predicting subset: DONE\") \n \n # Creating submission file\n print_banner(\"11. Making final submission file with csv format\") \n create_csv_submission(ind, y_pred, filename)", "def pull_usafacts_data(base_url: str, metric: str, logger: Logger, cache: str=None) -> pd.DataFrame:\n # Read data\n df = fetch(base_url.format(metric=metric), cache)\n date_cols = [i for i in df.columns if i.startswith(\"2\")]\n logger.info(\"data retrieved from source\",\n metric=metric,\n num_rows=df.shape[0],\n num_cols=df.shape[1],\n min_date=min(date_cols),\n max_date=max(date_cols),\n checksum=hashlib.sha256(pd.util.hash_pandas_object(df).values).hexdigest())\n df.columns = [i.lower() for i in df.columns]\n # Clean commas in count fields in case the input file included them\n df[df.columns[4:]] = df[df.columns[4:]].applymap(\n lambda x: int(x.replace(\",\", \"\")) if isinstance(x, str) else x)\n # Check missing FIPS\n null_mask = pd.isnull(df[\"countyfips\"])\n assert null_mask.sum() == 0\n\n unexpected_columns = [x for x in df.columns if \"Unnamed\" in x]\n unexpected_columns.extend(DROP_COLUMNS)\n\n # Assign Grand Princess Cruise Ship a special FIPS 90000\n # df.loc[df[\"FIPS\"] == 6000, \"FIPS\"] = 90000\n # df.loc[df[\"FIPS\"] == 6000, \"stateFIPS\"] = 90\n\n # Ignore Grand Princess Cruise Ship and Wade Hampton Census Area in AK\n df = df[\n (df[\"countyfips\"] != 6000)\n & (df[\"countyfips\"] != 2270)\n ]\n\n # Change FIPS from 0 to XX000 for statewise unallocated cases/deaths\n unassigned_index = (df[\"countyfips\"] == 0)\n df.loc[unassigned_index, \"countyfips\"] = df[\"statefips\"].loc[unassigned_index].values * 1000\n\n # Conform FIPS\n df[\"fips\"] = df[\"countyfips\"].apply(lambda x: f\"{int(x):05d}\")\n\n\n\n # Drop unnecessary columns (state is pre-encoded in fips)\n try:\n df.drop(DROP_COLUMNS, axis=1, inplace=True)\n except KeyError as e:\n raise ValueError(\n \"Tried to drop non-existent columns. The dataset \"\n \"schema may have changed. Please investigate and \"\n \"amend DROP_COLUMNS.\"\n ) from e\n # Check that columns are either FIPS or dates\n try:\n columns = list(df.columns)\n columns.remove(\"fips\")\n # Detects whether there is a non-date string column -- not perfect\n # USAFacts has used both / and -, so account for both cases.\n _ = [int(x.replace(\"/\", \"\").replace(\"-\", \"\")) for x in columns]\n except ValueError as e:\n raise ValueError(\n \"Detected unexpected column(s) \"\n \"after dropping DROP_COLUMNS. The dataset \"\n \"schema may have changed. Please investigate and \"\n \"amend DROP_COLUMNS.\"\n ) from e\n # Reshape dataframe\n df = df.melt(\n id_vars=[\"fips\"],\n var_name=\"timestamp\",\n value_name=\"cumulative_counts\",\n )\n # timestamp: str -> datetime\n df[\"timestamp\"] = pd.to_datetime(df[\"timestamp\"])\n # Add a dummy first row here on day before first day\n min_ts = min(df[\"timestamp\"])\n df_dummy = df.loc[df[\"timestamp\"] == min_ts].copy()\n df_dummy.loc[:, \"timestamp\"] = min_ts - pd.Timedelta(days=1)\n df_dummy.loc[:, \"cumulative_counts\"] = 0\n df = pd.concat([df_dummy, df])\n # Obtain new_counts\n df.sort_values([\"fips\", \"timestamp\"], inplace=True)\n df[\"new_counts\"] = df[\"cumulative_counts\"].diff() # 1st discrete difference\n # Handle edge cases where we diffed across fips\n mask = df[\"fips\"] != df[\"fips\"].shift(1)\n df.loc[mask, \"new_counts\"] = np.nan\n df.reset_index(inplace=True, drop=True)\n\n # Final sanity checks\n days_by_fips = df.groupby(\"fips\").count()[\"cumulative_counts\"].unique()\n unique_days = df[\"timestamp\"].unique()\n # each FIPS has same number of rows\n if (len(days_by_fips) > 1) or (days_by_fips[0] != len(unique_days)):\n raise ValueError(\"Differing number of days by fips\")\n min_timestamp = min(unique_days)\n max_timestamp = max(unique_days)\n n_days = (max_timestamp - min_timestamp) / np.timedelta64(1, \"D\") + 1\n if n_days != len(unique_days):\n raise ValueError(\n f\"Not every day between {min_timestamp} and \"\n \"{max_timestamp} is represented.\"\n )\n return df.loc[\n df[\"timestamp\"] >= min_ts,\n [ # Reorder\n \"fips\",\n \"timestamp\",\n \"new_counts\",\n \"cumulative_counts\",\n ],\n ]", "def createFeatureFrame(mode):\r\n \r\n text = textFeature(mode)\r\n sentiment = clfFeature('sentiment', mode)\r\n actors = clfFeature('actors', mode)\r\n directors = clfFeature('directors', mode)\r\n genre = clfFeature('genre', mode)\r\n titles = clfFeature('titles', mode)\r\n featureframe = pd.concat([text, sentiment, actors, directors, genre, titles], axis=1)\r\n \r\n return featureframe", "def extract_features_temporal(self, text, expDateStr = None, onsetDateStr = None, refExpDateStr = None, textType='vaers'):\n \n featurelist = []\n \n sentences = util.sentence_tokenize(text)\n taggedSentences = [] \n# id = 0\n for sentnumber, sentence0 in enumerate(sentences):\n \n sentence = self.clean_text(sentence0)\n \n # tokenize each sentence to have a list of words to be processed\n tokens = nltk.word_tokenize(sentence)\n #run the above procedure\n sentence_to_parse = self.get_untagged(tokens)\n \n # Save tagged sentences for later computing of expose date\n taggedSentences.append(sentence_to_parse)\n \n #only if the cleaned sentence is NOT empty we parse it\n if sentence_to_parse!=[]:\n tree = self.cp.parse(sentence_to_parse)\n tree1 = self.cp1.parse(sentence_to_parse)\n \n# new_sentence_to_parse = ','.join([' '.join(nltk.tag.untag(subtree.leaves())) + ' ' for subtree in tree.subtrees() if subtree.node in self.st_filter])\n new_sentence_to_parse = ','.join([' '.join(nltk.tag.untag(subtree.leaves())) + ' ' for subtree in tree.subtrees() if subtree.label() in self.st_filter])\n\n #here we delete the dash and replace it with whitespace to convert post-vac to post vac\n new_sentence_to_parse = new_sentence_to_parse.replace(', ,', ',')\n #here we delete the dash and replace it with whitespace to convert post-vac to post vac\n new_sentence_to_parse = new_sentence_to_parse.replace(',', ', ')\n\n new_sentence_to_parse = nltk.word_tokenize(new_sentence_to_parse)\n\n #run the above procedure\n new_sentence_to_parse = self.get_untagged(new_sentence_to_parse)\n \n if new_sentence_to_parse!=[]:\n tree2 = self.cp.parse(new_sentence_to_parse)\n for subtree in tree2.subtrees():\n if subtree.label() in self.st_filter: \n featString = self.massage_features(subtree)\n featurelist.append((subtree.label(), featString, sentnumber, subtree.leaves()))\n \n for subtree in tree1.subtrees():\n if subtree.label() in self.labels_gram1:\n featString = self.massage_features(subtree)\n featurelist.append((subtree.label(), featString, sentnumber, subtree.leaves()))\n\n self.initialization_text_data(text, sentences, taggedSentences, textType)\n \n featObjList = self.initialize_feature_obj_list(featurelist)\n \n docFeature = self.extract_temporal_info(featObjList, expDateStr, onsetDateStr, refExpDateStr)\n \n return docFeature", "def generate_dataframe(forecast, observed):\n dataframe = pd.DataFrame(columns=COLUMNS, dtype=str)\n\n # Write cumulative forecasts.\n forecast_date_str = FORECAST_DATE.strftime(\"%Y-%m-%d\")\n for cum_week in sorted(forecast.keys()):\n target_end_date = FIRST_WEEK + ((cum_week - 1) * datetime.timedelta(7)) \n target_end_date_str = target_end_date.strftime(\"%Y-%m-%d\")\n # Terminate the loop after 8 weeks of forecasts.\n if cum_week >= 8:\n break\n \n # Skip forecasts before the forecast date.\n if target_end_date <= FORECAST_DATE:\n continue\n\n # Write a row for \"weeks ahead\" if forecast end day is a Saturday.\n if target_end_date >= FIRST_WEEK and target_end_date.weekday() == 5:\n target = str(cum_week) + \" wk ahead cum death\"\n for state_id in forecast[cum_week].keys():\n for quantile in forecast[cum_week][state_id].keys():\n val = observed[(FORECAST_DATE - datetime.timedelta(1)).strftime(\"%Y-%m-%d\")][state_id]\n for i in range(1, cum_week + 1):\n val += forecast[i][state_id][quantile]\n if quantile == \"point\":\n dataframe = dataframe.append(\n generate_new_row(\n forecast_date=forecast_date_str,\n target=target,\n target_end_date=target_end_date_str,\n location=str(state_id),\n type=\"point\",\n quantile=\"NA\",\n value=val\n ), ignore_index=True)\n else:\n dataframe = dataframe.append(\n generate_new_row(\n forecast_date=forecast_date_str,\n target=target,\n target_end_date=target_end_date_str,\n location=str(state_id),\n type=\"quantile\",\n quantile=quantile,\n value=val\n ), ignore_index=True)\n \n # Write incident forecasts.\n forecast_date_str = FORECAST_DATE.strftime(\"%Y-%m-%d\")\n for cum_week in sorted(forecast.keys()):\n target_end_date = FIRST_WEEK + ((cum_week - 1) * datetime.timedelta(7)) \n target_end_date_str = target_end_date.strftime(\"%Y-%m-%d\")\n # Terminate the loop after 8 weeks of forecasts.\n if cum_week >= 8:\n break\n \n # Skip forecasts before the forecast date.\n if target_end_date <= FORECAST_DATE:\n continue\n\n if target_end_date >= FIRST_WEEK and target_end_date.weekday() == 5:\n target = str(cum_week) + \" wk ahead inc death\"\n for state_id in forecast[cum_week].keys():\n for quantile in forecast[cum_week][state_id].keys():\n if quantile == \"point\":\n dataframe = dataframe.append(\n generate_new_row(\n forecast_date=forecast_date_str,\n target=target,\n target_end_date=target_end_date_str,\n location=str(state_id),\n type=\"point\",\n quantile=\"NA\",\n value=forecast[cum_week][state_id][quantile]\n ), ignore_index=True)\n else:\n dataframe = dataframe.append(\n generate_new_row(\n forecast_date=forecast_date_str,\n target=target,\n target_end_date=target_end_date_str,\n location=str(state_id),\n type=\"quantile\",\n quantile=quantile,\n value=forecast[cum_week][state_id][quantile]\n ), ignore_index=True)\n \n return dataframe", "def _finalize_features(self) -> DataFrameLike:\n all_features_dict = dict(ChainMap(*self._final_features.values()))\n return pd.DataFrame(all_features_dict)", "def make_df_from_json(json_files, out_file):\n table = [[\"name\", \n \"cik\", \n \"city\",\n \"state\",\n \"street1\",\n \"street2\",\n \"zip_code\",\n \"year_of_incorp\", \n \"min_inv\", \n \"tot_off\", \n \"tot_sold\", \n \"tot_rem\", \n \"ind_group_type\", \n \"has_non_accred\", \n \"num_non_accred\", \n \"tot_num_inv\"\n ]] \n\n for json_dict in json_files:\n\n with open(json_dict, \"rb\") as f:\n data = json.load(f)\n print(json_dict)\n\n for i, key in enumerate(data):\n # if i % 1000 == 0:\n # print(i)\n entry = data[key] \n if entry == {}:\n #print(\"missing entry {0}\".format(i))\n continue\n row = []\n\n primary_issuer = entry[\"Primary Issuer\"]\n cik = primary_issuer[\"cik\"]\n name = primary_issuer[\"entity_name\"]\n phone = primary_issuer[\"phone\"]\n year_of_incorp = primary_issuer[\"year_of_incorp\"]\n address = primary_issuer[\"address\"]\n city = address[\"city\"]\n state = address[\"state\"]\n street1 = address[\"street1\"]\n street2 = address[\"street2\"]\n zip_code = address[\"zip_code\"]\n\n secondary_issuers = entry[\"Secondary Issuers\"]\n related_people = entry[\"Related People\"]\n \n offering_data = entry[\"Offering Data\"]\n min_inv = offering_data[\"min_investment_accepted\"]\n tot_off = offering_data[\"total_offering_amount\"]\n tot_sold = offering_data[\"total_amount_sold\"]\n tot_rem = offering_data[\"total_remaining\"]\n ind_group_type = offering_data[\"ind_group_type\"]\n has_non_accred = offering_data[\"has_non_accred\"]\n num_non_accred = offering_data[\"num_non_accred\"]\n tot_num_inv = offering_data[\"tot_num_inv\"] \n\n row = [name, \n cik, \n city,\n state,\n street1,\n street2,\n zip_code,\n year_of_incorp,\n min_inv,\n tot_off,\n tot_sold,\n tot_rem,\n ind_group_type,\n has_non_accred,\n num_non_accred,\n tot_num_inv\n ]\n\n table.append(row)\n\n df = pd.DataFrame(table)\n df.to_csv(out_file)\n\n return 0", "def FS1Year(inputFolderPath = './Formatted Files Without Missing', outputFolderPath = './Feature Selection'):\n\tfileList = []\n\tfor root, dirs, files in os.walk(inputFolderPath): \n\t for afile in files:\n\t \tfileList.append(afile)\n\n\ttargetList = [2704,2707,2713,2716,2718,808,811,1954]\n\t# targetList = [1994,1997,2003,2006,2008,807,810,1953]\n\tyearList = [(1998,2015),(2005,2015),(2005,2015),(2005,2015),(2005,2015),(1960,2014),(1961,2014),(2002,2012)]\n\n\n\tfor i in range(len(targetList)):\n\t\t# i = 0\n\t\trows = []\n\t\tfor year in range(yearList[i][0],yearList[i][1]+1):\n\t\t\t# print str(year) + '-' + str(targetList[i]) \n\t\t\tregex = re.compile(\"(\"+ str(year) +\").*\")\n\t\t\tfiles = [m.group(0) for l in fileList for m in [regex.search(l)] if m and len(l) == 28]\n\t\t\t# print files\n\t\t\t# call([\"java\",\"-jar\",\"MINE.jar\",\"./New Formatted Files/\"+files[0],str(targetList[i]+1),\"cv=0.5\"])\n\t\t\t\n\n\t\t\t# load the CSV file as a numpy matrix\n\t\t\t# dataset = np.loadtxt('./New Formatted Files/'+files[0], delimiter=\",\", skiprows=1, usecols=tuple(range(1,3240)))\n\t\t\t# dataset = np.genfromtxt('./New Formatted Files/'+files[0], delimiter=\",\", names=True, autostrip=True, max_rows=10, missing_values=np.nan, usecols=tuple(range(1,30)))\n\t\t\twith open(inputFolderPath+'/'+files[0],'rb') as f:\n\t\t\t reader = csv.reader(f)\n\t\t\t header = next(reader)\n\t\t\t num_cols = len(header)\n\t\t\t # print header\n\t\t\t print i\n\t\t\t target_idx = [idx for idx, item in enumerate(header) if item.startswith(str(targetList[i]).zfill(4))]\n\t\t\t if len(target_idx) > 0:\n\t\t\t \ttarget = target_idx[0]-1\n\t\t\t \tprint ('OK',year, targetList[i], inputFolderPath+'/'+files[0])\n\t\t\t else:\n\t\t\t \tprint (year, targetList[i], inputFolderPath+'/'+files[0])\n\t\t\t \tbreak\n\t\t\t f.close()\n\t\t\tdataset = np.genfromtxt(inputFolderPath+'/'+files[0], delimiter=\",\", skip_header=1, autostrip=True, missing_values=np.nan, usecols=tuple(range(1,num_cols)))\n\t\t\t# print (dataset.shape)\n\t\t\tX = np.concatenate((dataset[:,0:target],dataset[:,target+1:dataset.shape[1]]),axis=1)\n\t\t\t# X = np.concatenate((dataset[:,0:2],dataset[:,3:dataset.shape[1]),axis=1)\n\t\t\ty = dataset[:,target]\n\t\t\t# print tuple(range(1,3240))\n\t\t\t# print dataset.dtype.names[0]\n\t\t\t# print dataset.dtype.names[-1]\n\t\t\t# print dataset[0]\n\t\t\timp = Imputer(missing_values='NaN', strategy='median', axis=0)\n\t\t\timputedX = imp.fit_transform(X,y)\n\t\t\timputedX = np.array([imputedX[j] for j in range(imputedX.shape[0]) if not np.isnan(y[j])])\n\t\t\tdeleteMissingY = np.array([x1 for x1 in y if not np.isnan(x1)])\n\t\t\t# print dataset[0]\n\t\t\t# print (imputedX.shape, y.shape)\n\t\t\t# print (imputedX.shape, deleteMissingY.shape)\n\t\t\t# print (np.any(np.isnan(imputedX)), np.all(np.isfinite(imputedX)))\n\t\t\t# imputedX_new = SelectKBest(chi2, k=10).fit_transform(imputedX, y)\n\t\t\tk = 30\n\t\t\tselection = SelectKBest(f_regression, k=k)\n\t\t\timputedX_new = selection.fit_transform(imputedX, deleteMissingY)\n\t\t\t# print (len(selection.get_support()), len(header[1:target+1]+header[target+2:]))\n\t\t\tselectedFeatures = [[item, selection.scores_[idx], selection.pvalues_[idx]] for idx, item in enumerate(header[1:target+1]+header[target+2:]) if selection.get_support()[idx]]\n\t\t\tselectedFeatures.sort(key=lambda x: x[1], reverse=True)\n\t\t\t# for sf in selectedFeatures:\n\t\t\t# \tprint sf\n\t\t\t# print selection.scores_\n\t\t\t# print selection.get_support()\n\t\t\t# print (imputedX_new.shape, y.shape)\n\t\t\t# print (imputedX_new.shape, deleteMissingY.shape)\n\t\t\t# print imputedX[0,1994]\n\t\t\t# print dataset['3137_Estimates_and_projections_of_the_total_population_by_sex_age_and_rural__urban_areasSexTotal_10year_age_bands__2534_Geographical_coverage__National_Thousands_Persons__ILO']\n\t\t\t# print dataset\n\t\t\t# separate the data from the target attributes\n\t\t\t# X = np.concatenate((imputedDataset[:,0:7],imputedDataset[:,0:7]),axis=1)\n\t\t\t# y = imputedDataset[:,8]\n\t\t\trows.append([year, 'score', 'p-value'])\n\t\t\trows.extend(selectedFeatures)\n\t\t\trows.append(['', '', ''])\n\t\t\t# print 'Hey'\n\n\t\tfilename = outputFolderPath+'/'+('Indicator%d - k%d - %s.csv' % (targetList[i], k, 'f_regression'))\n\t\twith open(filename,'wb') as w:\n\t\t\ta = csv.writer(w, delimiter = ',')\n\t\t\ta.writerows(rows)\n\t\tw.close()", "def add_date_features(data):\n data['member_day'] = data.became_member_on.dt.day\n data['member_weekday'] = data.became_member_on.dt.weekday\n data['member_year'] = data.became_member_on.dt.year\n data['member_month'] = data.became_member_on.dt.month\n\n return data", "def format_tf_dataframe(self, file_path):\n xls = pd.ExcelFile(file_path)\n df = pd.read_excel(xls, \"2021 Prices\")\n df.replace(\"\", np.nan, inplace=True)\n # Make all online versions end in ' online'\n df[\"Journal Name \"] = df[\"Journal Name \"].str.replace(\n \"( online| Online| \\WOnline\\W | \\Wonline\\W)\", \" online\", regex=True\n )\n df = df.loc[:, ~df.columns.str.contains(\"^Unnamed\")]\n df = df[df[\"Journal Name \"].notna()]\n # Filter out non-online versions when an online version is present, otherwise keep the print version\n vals = df.loc[\n df[\"Journal Name \"].str.contains(\" online\"), \"Journal Name \"\n ].str.replace(\" online\", \"\")\n df = df[~df[\"Journal Name \"].isin(vals)]\n self.df = df", "def format_tf_dataframe(self, file_path):\n xls = pd.ExcelFile(file_path)\n df = pd.read_excel(xls, \"2021 Prices\")\n df.replace(\"\", np.nan, inplace=True)\n # Make all online versions end in ' online'\n df[\"Journal Name \"] = df[\"Journal Name \"].str.replace(\n \"( online| Online| \\WOnline\\W | \\Wonline\\W)\", \" online\", regex=True\n )\n df = df.loc[:, ~df.columns.str.contains(\"^Unnamed\")]\n df = df[df[\"Journal Name \"].notna()]\n # Filter out non-online versions when an online version is present, otherwise keep the print version\n vals = df.loc[\n df[\"Journal Name \"].str.contains(\" online\"), \"Journal Name \"\n ].str.replace(\" online\", \"\")\n df = df[~df[\"Journal Name \"].isin(vals)]\n self.df = df", "def submission(test_ids, pred_test, file_name):\n pred_test[pred_test < 0] = 0\n\n val_pred_df = pd.DataFrame(data={'fullVisitorId': test_ids,\n 'predictedRevenue': pred_test})\n\n val_pred_df = val_pred_df.groupby('fullVisitorId').sum().reset_index()\n\n val_pred_df.columns = ['fullVIsitorId', 'predictedLogRevenue']\n val_pred_df['predictedLogRevenue'] = val_pred_df['predictedLogRevenue']\n val_pred_df.to_csv('submission/'+file_name, index=False)", "def add_features(df_in, rolling_win_size=15):\n\n sensor_cols = []\n index = df_in.columns.get_loc('TTF')\n for i in df_in.columns[2:index]:\n sensor_cols.append(i)\n\n sensor_av_cols = [nm+'_av' for nm in sensor_cols]\n sensor_sd_cols = [nm+'_sd' for nm in sensor_cols]\n\n df_out = pd.DataFrame()\n\n ws = rolling_win_size\n\n #calculate rolling stats for each engine id\n\n for m_id in pd.unique(df_in.Turbine_ID):\n\n # get a subset for each engine sensors\n df_engine = df_in[df_in['Turbine_ID'] == m_id]\n df_sub = df_engine[sensor_cols]\n\n # get rolling mean for the subset\n av = df_sub.rolling(ws, min_periods=1).mean()\n av.columns = sensor_av_cols\n\n # get the rolling standard deviation for the subset\n sd = df_sub.rolling(ws, min_periods=1).std().fillna(0)\n sd.columns = sensor_sd_cols\n\n # combine the two new subset dataframes columns to the engine subset\n new_ftrs = pd.concat([df_engine,av,sd], axis=1)\n\n # add the new features rows to the output dataframe\n df_out = pd.concat([df_out,new_ftrs])\n df_out = df_out.sort_values(by=['Turbine_ID', 'Date'] )\n return df_out", "def create_dataframe(euctr_cond):\n def f(x):\n d = {}\n d['number_of_countries'] = x.eudract_number_with_country.nunique()\n d['min_end_date'] = x.date_of_the_global_end_of_the_trial.min()\n d['max_end_date'] = x.date_of_the_global_end_of_the_trial.max()\n d['comp_date'] = np.where(pd.notnull(x.date_of_the_global_end_of_the_trial),1,0).sum()\n d['has_results'] = x.trial_results.sum()\n d['includes_pip'] = x.trial_is_part_of_a_paediatric_investigation_plan.sum()\n d['single_blind'] = x.trial_single_blind.sum()\n d['not_single_blind'] = x.not_single_blind.sum()\n d['rare_disease'] = x.trial_condition_being_studied_is_a_rare_disease.sum()\n d['not_rare_disease'] = x.not_rare_disease.sum()\n d['rare_disease_blank'] = x.rare_disease_blank.sum()\n d['completed'] = np.where(x.end_of_trial_status == 'Completed', 1, 0).sum()\n d['ongoing'] = np.where((x.end_of_trial_status == 'Ongoing') | (x.end_of_trial_status == 'Restarted'), 1, 0).sum()\n d['terminated'] = np.where(x.end_of_trial_status == 'Prematurely Ended', 1, 0).sum()\n d['suspended'] = np.where((x.end_of_trial_status == 'Temporarily Halted') | (x.end_of_trial_status == 'Suspended by CA'), 1, 0).sum()\n d['other_status'] = np.where((x.end_of_trial_status == 'Not Authorised') | (x.end_of_trial_status == 'Prohibited by CA'), 1, 0).sum()\n d['no_status'] = np.where(pd.isnull(x.end_of_trial_status),1,0).sum()\n d['phase_1'] = x.trial_human_pharmacology_phase_i.sum()\n d['phase_2'] = x.trial_therapeutic_exploratory_phase_ii.sum()\n d['phase_3'] = x.trial_therapeutic_confirmatory_phase_iii.sum()\n d['phase_4'] = x.trial_therapeutic_use_phase_iv.sum()\n d['bioequivalence'] = x.trial_bioequivalence_study.sum()\n d['not_bioequivalence'] = x.not_bioequivalence_study.sum()\n d['healthy_volunteers'] = x.subject_healthy_volunteers.sum()\n d['not_healthy_volunteers'] = x.not_healthy_volunteers.sum()\n d['full_title'] = x.full_title.astype('str').min()\n d['abbreviated_title'] = x.abbreviated_title.astype('str').max()\n d['non_eu'] = x.non_eu.sum()\n return pd.Series(d)\n\n return euctr_cond.groupby('eudract_number').apply(f).reset_index()", "def set_features(self):\n \n # Interpret the request data based on the expected row and column structure\n row_template = ['strData', 'strData', 'strData', 'strData', 'strData', 'strData']\n col_headers = ['model_name', 'name', 'variable_type', 'data_type', 'feature_strategy', 'strategy_args']\n \n # Create a Pandas Data Frame for the request data\n self.request_df = utils.request_df(self.request, row_template, col_headers)\n \n # Initialize the persistent model\n self.model = PersistentModel()\n \n # Get the model name from the request dataframe\n self.model.name = self.request_df.loc[0, 'model_name']\n \n # Get the model from cache or disk\n self._get_model()\n \n # Debug information is printed to the terminal and logs if the paramater debug = true\n if self.model.debug:\n self._print_log(3)\n \n # Add the feature definitions to the model\n self.model.features_df = self.request_df\n self.model.features_df.set_index(\"name\", drop=False, inplace=True)\n # Store a copy of the features_df that will remain untouched in later calls\n self.model.original_features_df = self.model.features_df.copy()\n\n # Ensure there is at most one feature with variable_type identifier\n if len(self.model.features_df.loc[self.model.features_df[\"variable_type\"] == \"identifier\"]) > 1:\n err = \"Invalid feature definitions. Detected more than one feature with variable_type set to identifier. You can only pass one unique identifier.\"\n raise Exception(err)\n\n # Persist the model to disk\n self.model = self.model.save(self.model.name, self.path, overwrite=self.model.overwrite, compress=self.model.compress)\n \n # Update the cache to keep this model in memory\n self._update_cache()\n \n # Prepare the output\n message = [[self.model.name, 'Feature definitions successfully saved to model',\\\n time.strftime('%X %x %Z', time.localtime(self.model.state_timestamp))]]\n self.response = pd.DataFrame(message, columns=['model_name', 'result', 'time_stamp'])\n \n # Send the reponse table description to Qlik\n self._send_table_description(\"setup\")\n \n # Debug information is printed to the terminal and logs if the paramater debug = true\n if self.model.debug:\n self._print_log(4)\n \n # Finally send the response\n return self.response", "def transform_and_create_new_features(df):\n # 'GENDER' FEATURE MANAGEMENT\n # Transform 'Gender' feature (categorical) to numerical one\n df['Gender'] = df['Sex'].map({'female': 0, 'male': 1}).astype(int)\n\n # 'EMBARKED' FEATURE MANAGEMENT\n # 1st approach: df['Port'] = df['Embarked'].map({'C': 1, 'S': 2, 'Q': 3}).astype(int)\n # Extract from 'pycon UK Tutorial':\n # \"Replacing {C, S, Q} by {1, 2, 3} would seem to imply the ordering C < S < Q when in fact they are simply arranged\n # alphabetically. To avoid this problem, we create dummy variables. Essentially this involves creating new columns\n # to represent whether the passenger embarked at C with the value 1 if true, 0 otherwise.\"\n dummies_embarked = pd.get_dummies(df['Embarked'], prefix='Embarked')\n df = pd.concat([df, dummies_embarked], axis=1)\n\n # 'AGE' & 'FARE' FEATURES MANAGEMENT\n df = _transform_age_feature(df)\n df = _transform_fare_feature(df)\n\n # CREATION OF A NEW FEATURE: Family size + Alone or not ?\n df['Family'] = df['SibSp'] + df['Parch']\n df['Alone'] = 0\n df.loc[df['Family'] == 0, 'Alone'] = 1\n\n # Drop all columns that are now useless\n df = df.drop(['Sex', 'Age', 'Fare', 'Embarked', 'SibSp', 'Parch'], axis=1)\n print(df.head(10))\n\n return df", "def add_all_features(df):\n df.reset_index(drop=True, inplace=True)\n df = target_indicators(df)\n df = momentum_indicators(df)\n df = trend_indicators(df)\n df = volatility_indicators(df)\n df = volume_indicators(df)\n df = special_indicators(df)\n return df", "def _extract_features(self, row):\n ncep_data = self.ncep_data\n ncep_sfc_data = self.ncep_sfc_data\n date = row['date']\n features = dict(row)\n #reduce the dimensions of ncep_data(xarray dataset) by fixing coordinates(lon,lat)\n #and then convert it to dataframe\n ncep_data = ncep_data[date.year] \\\n .sel(lon=row['longitude'], lat=row['latitude'], method='nearest') \\\n .to_dask_dataframe() \\\n .compute() \\\n .set_index(['level','time'])\n #reduce the dimensions of ncep_sfc_data(xarray dataset) by fixing coordinates(lon,lat)\n #and then convert it to dataframe\n ncep_sfc_data = ncep_sfc_data[date.year] \\\n .sel(lon=row['longitude'], lat=row['latitude'], method='nearest') \\\n .to_dask_dataframe() \\\n .compute() \\\n .set_index(['time'])\n\n for level in self.levels:\n #features at different pressure level\n point = ncep_data.loc[level]\n p1w = point.rolling(7).mean() # 1 Week mean\n p2w = point.rolling(14).mean() # 2 Week mean\n p3w = point.rolling(21).mean() # 3 Week mean\n # \n v0w = point.loc[date]\n v1w = p1w.loc[date]\n v2w = p2w.loc[date]\n v3w = p3w.loc[date]\n #\n for data_var in self.ncep_data_vars:\n features[\"{0}_0w_lvl_{1}\".format(data_var,level)] = v0w[data_var]\n features[\"{0}_1w_lvl_{1}\".format(data_var,level)] = v1w[data_var]\n features[\"{0}_2w_lvl_{1}\".format(data_var,level)] = v2w[data_var]\n features[\"{0}_3w_lvl_{1}\".format(data_var,level)] = v3w[data_var]\n #features at surface level\n point = ncep_sfc_data\n p1w = point.rolling(7).mean() # 1 Week mean\n p2w = point.rolling(14).mean() # 2 Week mean\n p3w = point.rolling(21).mean() # 3 Week mean\n # \n v0w = point.loc[date]\n v1w = p1w.loc[date]\n v2w = p2w.loc[date]\n v3w = p3w.loc[date]\n #\n for data_var in self.ncep_sfc_data_vars:\n features[\"{0}_0w\".format(data_var)] = v0w[data_var]\n features[\"{0}_1w\".format(data_var)] = v1w[data_var]\n features[\"{0}_2w\".format(data_var)] = v2w[data_var]\n features[\"{0}_3w\".format(data_var)] = v3w[data_var] \n\n return features", "def feature_Engineering_rf(data):\n convert_to_categorical = ['Occupation','Product_Category_1','Product_Category_2','Marital_Status','Age','Gender','Stay_In_Current_City_Years','City_Category']\n for col in convert_to_categorical:\n data[col] = data[col].astype(str)\n data = data.drop('Product_Category_3',axis =1)\n data = pd.get_dummies(data,prefix_sep = '_',\n columns = [col for col in data.columns if col not in ['User_ID','Product_ID']])\n data = data.drop(['User_ID','Product_ID'],axis =1)\n \n return data", "def featMatGenerator(dirName, trajfile, trajFilter):\n \n #load the data and extract feature vectors for each trajectory and plate summary for each chunk\n featMatTraj = {}\n featMatPlate = pd.DataFrame()\n try:\n if len(trajfile.split('_'))<10:\n fshort = '_'.join(trajfile.split('_')[0:-2:6])\n else:\n fshort = '_'.join(trajfile.split('_')[0:-1:7])\n featMatPlate = pd.DataFrame()\n with pd.HDFStore(os.path.join(dirName, trajfile), 'r') as fid:\n nChunks = list(fid.keys())\n for chunk in nChunks:\n chunkno = [int(s) for s in chunk.split('_') if s.isdigit()]\n chunkno = chunkno[0]\n\n featMatTraj[chunkno] = pd.DataFrame()\n nWorms = np.unique(fid[chunk]['worm_index'])\n for w in nWorms:\n if fid[chunk][fid[chunk]['worm_index']==w].shape[0]>=trajFilter:\n featMatTraj[chunkno] = featMatTraj[chunkno].append(\\\n fid[chunk][fid[chunk]['worm_index']==w].mean(),ignore_index=True)\n \n featMatTraj[chunkno].reset_index(drop=True)\n \n temp = featMatTraj[chunkno].median()\n temp = temp.drop(['worm_index', 'timestamp']).rename(lambda x: x +'_med').to_frame().transpose()\n \n temp2 = featMatTraj[chunkno].quantile(0.75) - featMatTraj[chunkno].quantile(0.25)\n temp2 = temp2.drop(['worm_index', 'timestamp']).rename(lambda x: x + '_iqr').to_frame().transpose()\n \n tempfinal = pd.concat([temp, temp2], axis = 1)\n tempfinal ['exp'] = fshort\n tempfinal['Chunk'] = chunk\n tempfinal ['drug'] = fshort.split('_')[0]\n \n featMatPlate = featMatPlate.append(tempfinal, ignore_index=True)\n del temp, temp2, tempfinal\n del nWorms\n del nChunks\n \n featMatPlate.reset_index(drop=True) \n featMatPlate.drop(featMatPlate.columns[np.sum(featMatPlate.isna()>featMatPlate.shape[0]/2)], \\\n axis=1, inplace = True)\n except OSError:\n print (trajfile + 'is invalid file format') \n\n #write the featMatPlate to a .csv file\n featMatPlate.to_csv(os.path.join(os.path.dirname(dirName), fshort + '_FeatMatPlate.csv'))\n\n #save the featMatTraj to an excel file\n writer = pd.ExcelWriter(os.path.join(os.path.dirname(dirName), fshort + '_FatMatTraj.xlsx'))\n for chunk in featMatTraj.keys():\n featMatTraj[chunk].to_excel(writer, sheet_name = str(chunk))\n writer.save()\n \n return featMatTraj, featMatPlate", "def save(self):\r\n\r\n for video_name, video_data in self.data.items():\r\n save_path = os.path.join(\r\n self.features_dir, video_name + \".\" + self.file_type\r\n )\r\n write_df(\r\n df=video_data.fillna(0), file_type=self.file_type, save_path=save_path\r\n )\r\n print(\"Created additional ROI features for {}...\".format(video_name))\r\n self.timer.stop_timer()\r\n stdout_success(\r\n msg=\"Created additional ROI features for files within the project_folder/csv/features_extracted directory\",\r\n elapsed_time=self.timer.elapsed_time_str,\r\n )", "def extract_open_smile_features(args):\n\n #set up \n level = args.level \n temp_dir = os.path.join(args.output_dir, 'segments')\n output_dir = os.path.join(args.output_dir, level, args.call_type) \n \n #get segment metadata \n metadata_df = pd.read_csv(args.metadata_path)\n metadata_df['call_datetime'] = pd.to_datetime(metadata_df['call_datetime'])\n metadata_df['call_date'] = metadata_df['call_datetime'].dt.date\n metadata_df['day_id'] = metadata_df['subject_id'].apply(str) + '_' + metadata_df['call_date'].apply(str) \n\n #filter call type (personal, assessment, all) \n if metadata_df['is_assessment'].dtypes == 'bool':\n metadata_df.loc[metadata_df['is_assessment'] == True, 'is_assessment'] = 't'\n metadata_df.loc[metadata_df['is_assessment'] == False, 'is_assessment'] = 'f'\n if args.call_type == 'personal':\n metadata_df = metadata_df.loc[metadata_df['is_assessment'] == 'f', :]\n elif args.call_type == 'assessment':\n metadata_df = metadata_df.loc[metadata_df['is_assessment'] == 't', :]\n elif args.call_type != 'all':\n print('Invalid call_type: ' + str(args.call_type))\n return \n\n #get call_ids in chunk\n chunk = int(args.job_num)\n call_ids = sorted(metadata_df['call_id'].unique()) \n call_ids = list(chunks(call_ids,100))[chunk-1] \n\n #aggregation level \n if level == 'call': \n idx_vals = metadata_df['call_id'].sort_values().unique().tolist()\n idx_col = 'call_id'\n elif level == 'day': \n idx_vals = metadata_df['day_id'].drop_duplicates().values.tolist() \n idx_col = 'day_id' \n idx_vals = list(chunks(idx_vals,100))[chunk-1] \n\n #make output directories \n if not os.path.exists(temp_dir): \n os.makedirs(temp_dir) \n if not os.path.exists(output_dir): \n os.makedirs(output_dir) \n\n #COMPUTE OPENSMILE FEATURES \n #***********************************************************\n #***********************************************************\n for idx in idx_vals: #iterate over calls or days\n seg_ids = metadata_df.loc[metadata_df[idx_col] == idx, 'segment_id'].unique() \n\n #Compute features for all segment in idx \n feats_df = pd.DataFrame() \n for seg_id in seg_ids: #iterate over segments\n seg_input_path = os.path.join(args.segments_dir, str(seg_id) + '.wav')\n seg_output_path = os.path.join(temp_dir, str(seg_id) + '.csv')\n \n #compute OpenSmile features for segment \n if not os.path.exists(seg_output_path): \n # if we use the option -D instead of -csvoutput \n # we can get frame-level outputs\n runprocess = opensmile_path + ' -C ' + args.config_path + ' -I ' + seg_input_path + ' -csvoutput ' + seg_output_path \n os.system(runprocess) \n\n #add segment features to feature dataframe \n seg_feats_df = pd.read_csv(seg_output_path, sep=';') \n seg_feats_df['segment_id'] = seg_id \n feats_df = feats_df.append(seg_feats_df) \n \n #compute mean and std over segment OpenSmile features \n s = pd.Series()\n for col in feats_df.columns: \n if col in ['name', 'frameTime', 'segment_id']:\n continue \n s[col+'_mean'] = feats_df[col].mean() \n s[col+'_std'] = feats_df[col].std() \n \n #save feature stats for index to file \n output_path = os.path.join(output_dir, str(idx) + '.csv')\n s.to_csv(output_path)", "def generate_tpx_features():\n\n\tlabels = get_tpx_labels()\n\tlabels_abs = get_tpx_labels_abs()\n\tlabels_rel = get_tpx_labels_rel()\n\tlabels_prop = get_tpx_labels_prop()\n\tlabels_special = get_tpx_labels_special()\n\t\n\tlabels.append(\"num_words\")\n\n\t# read existing metadata\n\tmd_table = pd.DataFrame.from_csv(wdir + md_csv, header=0)\n\tidnos = md_table.idno\n\n\t# create new data frame\n\tht_fr = pd.DataFrame(columns=labels, index=idnos)\n\t \n\t# XPath expressions for TimeML requests\n\tnamespaces = {'tei':'http://www.tei-c.org/ns/1.0'}\n\n\txpaths = get_tpx_xpaths()\n\n\t# loop through files to get HeidelTime results, first step: absolute values\n\t# subsequent steps build on absolute values\n\tfor file in glob.glob(ht_inpath):\n\t\t\n\t\tidno = os.path.basename(file)[0:6]\n\t\txml = etree.parse(file)\n\t\t\n\t\tresult = 0\n\t\t# calculate absolute feature values\n\t\tfor label in labels_abs + labels_special:\n\t\t\t\n\t\t\tif label in xpaths:\n\t\t\t\t# apply xpaths if present\n\t\t\t\txpath = xpaths[label]\n\t\t\t\tresult = xml.xpath(xpath, namespaces=namespaces)\n\t\t\t\t\n\t\t\telse:\n\t\t\t\t# calculate features which cannot be determined directly with XPath\n\t\t\t\txpath_dates = \"//TIMEX3[@type='DATE']/@value\"\n\t\t\t\tdates = xml.xpath(xpath_dates, namespaces=namespaces)\n\t\t\t\t\n\t\t\t\t# temporal distance between mentioned years and publication year of the novel\n\t\t\t\tif (label == \"temp_dist\"):\n\t\t\t\t\t# get all date expressions with a year\n\t\t\t\t\tyears = []\n\t\t\t\t\tfor date in dates:\n\t\t\t\t\t\tif re.match(r\"^\\d{4}-\\d{2}-\\d{2}\", date): # only year: bad results\n\t\t\t\t\t\t\tyears.append(date.split(\"-\")[0])\n\t\t\t\t\t# get the median of the years mentioned in the text\n\t\t\t\t\tif years:\n\t\t\t\t\t\tyears = np.array(years).astype(np.float)\n\t\t\t\t\t\n\t\t\t\t\t\tmed = np.median(years) #median\n\t\t\t\t\t\t# get publication year\n\t\t\t\t\t\tpub_year = md_table.loc[idno,\"year\"]\n\t\t\t\t\t\t# calculate the difference\n\t\t\t\t\t\tresult = round(pub_year - med)\n\t\t\t\t\telse:\n\t\t\t\t\t\tresult = float(\"NaN\")\n\t\t\t\t\t\n\t\t\t\t# counts related to chapters\n\t\t\t\telif (label == \"tpx_date_any_chapter_first_abs\" or label == \"tpx_date_any_chapter_other_mean_abs\" or label == \"tpx_date_any_chapter_other_abs\"):\n\t\t\t\t\tdates_ch = []\n\t\t\t\t\txpaths_chapter = {\"tpx_date_any_chapter_first_abs\" : \"//TIMEX3[@type='DATE'][substring(ancestor::tei:div/@xml:id,(string-length(ancestor::tei:div/@xml:id) - 1),2) ='d1']/@value\",\n\t\t\t\t\t\t\t\t\t\t\"tpx_date_any_chapter_other_abs\" : \"//TIMEX3[@type='DATE'][substring(ancestor::tei:div/@xml:id,(string-length(ancestor::tei:div/@xml:id) - 1),2) !='d1']/@value\",\n\t\t\t\t\t\t\t\t\t\t\"tpx_date_any_chapter_other_mean_abs\" : \"//TIMEX3[@type='DATE'][substring(ancestor::tei:div/@xml:id,(string-length(ancestor::tei:div/@xml:id) - 1),2) !='d1']/@value\",\n\t\t\t\t\t\t\t\t\t\t\"chapters\" : \"//wrapper\"\n\t\t\t\t\t}\n\t\t\t\t\tchapter_dates = []\n\t\t\t\t\tchapter_dates = xml.xpath(xpaths_chapter[label], namespaces=namespaces)\n\t\t\t\t\t\n\t\t\t\t\t\n\t\t\t\t\t# filter: just \"any-dates\"\n\t\t\t\t\tfor date in chapter_dates:\n\t\t\t\t\t\tif re.match(r\"^\\d{2,4}\", date) or re.match(r\"^.{2,4}-\\d{2}\", date) or re.match(r\"^.{2,4}-.{2}-\\d{2}\", date):\n\t\t\t\t\t\t\tdates_ch.append(date)\n\t\t\t\t\t\n\t\t\t\t\tif (label == \"tpx_date_any_chapter_first_abs\" or label == \"tpx_date_any_chapter_other_abs\"):\n\t\t\t\t\t\t# return all the dates from the first / other chapters\n\t\t\t\t\t\tresult = len(dates_ch)\n\t\t\t\t\telif label == \"tpx_date_any_chapter_other_mean_abs\":\n\t\t\t\t\t\t# calculate the mean of the other chapters\n\t\t\t\t\t\tchapters = xml.xpath(xpaths_chapter[\"chapters\"])\n\t\t\t\t\t\t\n\t\t\t\t\t\tif len(chapters) <= 1:\n\t\t\t\t\t\t\traise ValueError(\"The novel \" + idno + \" has less than 2 chapters!\")\n\t\t\t\t\t\tresult = len(dates_ch) / (len(chapters) - 1)\n\t\t\t\t\t\n\t\t\t\t\n\t\t\t\t# remaining temporal expression features\t\n\t\t\t\telse:\n\t\t\t\t\tdate_counts = []\n\t\t\t\t\tfor date in dates:\n\t\t\t\t\t\tif (label == \"tpx_date_none_abs\"):\n\t\t\t\t\t\t\tif re.match(r\"^\\D+$\", date):\n\t\t\t\t\t\t\t\tdate_counts.append(date)\n\t\t\t\t\t\tif (label == \"tpx_date_year_abs\"):\n\t\t\t\t\t\t\t#if re.match(r\"^\\d{2,4}\", date): für alle Jahre geändert\n\t\t\t\t\t\t\tif re.match(r\"^\\d{4}\", date):\n\t\t\t\t\t\t\t\tdate_counts.append(date)\n\t\t\t\t\t\tif (label == \"tpx_date_year_month_abs\"):\n\t\t\t\t\t\t\tif re.match(r\"^\\d{4}-\\d{2}\", date):\n\t\t\t\t\t\t\t\tdate_counts.append(date)\n\t\t\t\t\t\tif (label == \"tpx_date_month_abs\"):\n\t\t\t\t\t\t\tif re.match(r\"^.{4}-\\d{2}\", date):\n\t\t\t\t\t\t\t\tdate_counts.append(date)\n\t\t\t\t\t\tif (label == \"tpx_date_day_abs\"):\n\t\t\t\t\t\t\tif re.match(r\"^.{4}-.{2}-\\d{2}\", date):\n\t\t\t\t\t\t\t\tdate_counts.append(date)\n\t\t\t\t\t\tif (label == \"tpx_date_month_day_abs\"):\n\t\t\t\t\t\t\tif re.match(r\"^.{4}-\\d{2}-\\d{2}\", date):\n\t\t\t\t\t\t\t\tdate_counts.append(date)\n\t\t\t\t\t\tif (label == \"tpx_date_any_abs\"):\n\t\t\t\t\t\t\tif re.match(r\"^\\d{4}\", date) or re.match(r\"^.{4}-\\d{2}\", date) or re.match(r\"^.{4}-.{2}-\\d{2}\", date):\n\t\t\t\t\t\t\t\tdate_counts.append(date)\n\t\t\t\t\t\tif (label == \"tpx_date_full_abs\"):\n\t\t\t\t\t\t\tif re.match(r\"^\\d{4}-\\d{2}-\\d{2}\", date):\n\t\t\t\t\t\t\t\tdate_counts.append(date)\n\t\t\t\t\n\t\t\t\t\tresult = len(date_counts)\n\t\t\t\t\t\n\t\t\t\n\t\t\t# check the results of XPath\n\t\t\t\"\"\"\n\t\t\tif math.isnan(result):\n\t\t\t\tresult = \"is not a number\"\n\t\t\t\"\"\"\n\t\t\t\n\t\t\t# Write the result into the data frame\n\t\t\tht_fr.loc[idno,label] = result\n\t\t\t\n\t\t\t\n\t# second step: relative values (relative to the total number of words in the text)\n\tfor file in glob.glob(ht_inpath):\n\t\t\n\t\tidno = os.path.basename(file)[0:6]\n\t\t\n\t\t# calculate total number of words in the text\n\t\tnum_words = 0\n\t\txml = etree.parse(file)\n\t\t# get XML snippets chapterwise\n\t\twrappers = xml.xpath(\"//wrapper//text()\")\n\t\tfor wrap in wrappers:\n\t\t\t\n\t\t\t# tokenize and count\n\t\t\twords = re.split(r\"[\\s\\n]+\", wrap)\n\t\t\tnum_words += len(words)\n\t\t\n\t\tht_fr.loc[idno,\"num_words\"] = num_words\n\t\t\n\t\t\n\t\tfor label in labels_rel:\n\t\t\t# set corresponding absolute value label\n\t\t\tlabel_abs = label[:-3] + \"abs\"\n\t\t\t\n\t\t\t# fetch absolute value\n\t\t\tabs_val = ht_fr.loc[idno,label_abs]\n\t\t\t\n\t\t\t# check data type\n\t\t\tif math.isnan(abs_val):\n\t\t\t\tresult = abs_val\n\t\t\telse:\n\t\t\t\t# calculate relative value\n\t\t\t\tresult = abs_val / num_words\n\t\t\t\n\t\t\t\n\t\t\t# Write the result into the data frame\n\t\t\tht_fr.loc[idno,label] = result\n\t\t\t\n\n\t# third step: calculate proportions\n\tfor file in glob.glob(ht_inpath):\n\t\t\n\t\tidno = os.path.basename(file)[0:6]\n\t\ttpx_all = ht_fr.loc[idno,\"tpx_all_abs\"]\n\t\ttpx_all_one = tpx_all / 100\n\t\t\n\t\tfor label in labels_prop:\n\t\t\t# set corresponding absolute value label\n\t\t\tlabel_abs = label[:-4] + \"abs\"\n\t\t\t\n\t\t\t# fetch absolute value\n\t\t\tabs_val = ht_fr.loc[idno,label_abs]\n\t\t\t\n\t\t\t# check data type\n\t\t\tif math.isnan(abs_val):\n\t\t\t\tresult = abs_val\n\t\t\telse:\n\t\t\t\t# calculate proportion\n\t\t\t\tresult = abs_val / tpx_all_one\n\t\t\t\n\t\t\t# Write the result into the data frame\n\t\t\tht_fr.loc[idno,label] = result\n\t\t\n\t# für FJR: absolute Werte weglassen\n\tfor label in labels_abs:\n\t\tht_fr = ht_fr.drop(label, axis=1)\n\tht_fr = ht_fr.drop(\"temp_dist\", axis=1)\n\tht_fr = ht_fr.drop(\"num_words\", axis=1)\n\t\t\n\tht_fr.to_csv(wdir + \"tpx-corpus-counts.csv\", sep=\",\", header=True)\n\n\tprint(\"Done: generate tpx features\")", "def factor_exposure(self):\n exp_hs_all = pd.DataFrame([])\n exp_zz_all = pd.DataFrame([])\n for i in range(len(self.weekly_date)):\n date = self.weekly_date.iloc[i,0]\n factor = get_barra_factor_from_sql(date)\n factor['secID'] = factor.index.tolist()\n stocklist = factor.index.tolist()\n \n hs300 = get_index_composition(date,'000300.SH')\n zz500 = get_index_composition(date,'000905.SH')\n hs300['secID'] = hs300.index.tolist()\n zz500['secID'] = zz500.index.tolist()\n \n stocklist_hs300 = list(set(hs300.index.tolist()).intersection(set(stocklist)))\n stocklist_zz500 = list(set(zz500.index.tolist()).intersection(set(stocklist)))\n stocklist_hs300.sort()\n stocklist_zz500.sort()\n \n factor_hs = extract_part_from_all(stocklist_hs300,factor,'secID')\n factor_zz = extract_part_from_all(stocklist_zz500,factor,'secID')\n hs_weight = extract_part_from_all(stocklist_hs300,hs300,'secID')\n zz_weight = extract_part_from_all(stocklist_zz500,zz500,'secID')\n del factor_hs['secID'],factor_zz['secID'],hs_weight['secID'],zz_weight['secID']\n \n \n exp_hs = pd.DataFrame(np.dot(hs_weight.T,factor_hs))\n exp_zz = pd.DataFrame(np.dot(zz_weight.T,factor_zz))\n \n \n exp_hs_all = pd.concat([exp_hs_all,exp_hs], axis = 0)\n exp_zz_all = pd.concat([exp_zz_all,exp_zz], axis = 0) \n print(i)\n exp_hs_all.columns = ['Beta','Momentum','Size','EY','RV','Growth',\\\n 'BP','Leverage','Liquidity']\n exp_zz_all.columns = ['Beta','Momentum','Size','EY','RV','Growth',\\\n 'BP','Leverage','Liquidity']\n exp_hs_all.index = self.weekly_date.iloc[:,0]\n exp_zz_all.index = self.weekly_date.iloc[:,0]\n return exp_hs_all,exp_zz_all", "def buildExposureTable(exposures, fields, instruments):\n name = []\n ra = []\n dec= []\n field= []\n inst = []\n airmass = []\n mjd = []\n exptime = []\n epoch = []\n apcorr = []\n index = 0\n for k,e in exposures.items():\n name.append(e.name)\n ra.append(getDegree(e.coords.ra))\n dec.append(getDegree(e.coords.dec))\n field.append(fields[e.field].index)\n if e.instrument in specialInstruments:\n inst.append(specialInstruments[e.instrument])\n else:\n inst.append(instruments[e.instrument].index)\n e.index = index\n index += 1\n\n airmass.append(e.airmass)\n mjd.append(e.mjd)\n exptime.append(e.exptime)\n epoch.append(e.epoch)\n apcorr.append(e.apcorr)\n hdu = pf.BinTableHDU.from_columns(\\\n pf.ColDefs( [pf.Column(name='NAME',format=py_to_fits(name),array=name),\n pf.Column(name='RA',format=py_to_fits(ra),array=ra),\n pf.Column(name='DEC',format=py_to_fits(dec),array=dec),\n pf.Column(name='FIELDNUMBER',format=py_to_fits(field),array=field),\n pf.Column(name='INSTRUMENTNUMBER',format=py_to_fits(inst),\\\n array=inst),\n pf.Column(name=\"MJD\",format=py_to_fits(mjd),array=mjd),\n pf.Column(name=\"AIRMASS\",format=py_to_fits(airmass),array=airmass),\n pf.Column(name=\"EXPTIME\",format=py_to_fits(exptime),array=exptime),\n pf.Column(name=\"EPOCH\",format=py_to_fits(epoch),array=epoch),\n pf.Column(name=\"APCORR\",format=py_to_fits(apcorr),array=apcorr)] ),\n name = 'Exposures')\n # hdu.header['EXTNAME'] = 'Exposures'\n return hdu", "def FE_create_time_series_features(dft, ts_column, ts_adds_in=[]):\r\n dtf = copy.deepcopy(dft)\r\n reset_index = False\r\n try:\r\n # ts_column = None assumes that that index is the time series index\r\n reset_index = False\r\n if ts_column is None:\r\n reset_index = True\r\n ts_column = dtf.index.name\r\n dtf = dtf.reset_index()\r\n\r\n ### In some extreme cases, date time vars are not processed yet and hence we must fill missing values here!\r\n null_nums = dtf[ts_column].isnull().sum()\r\n if null_nums > 0:\r\n # missing_flag = True\r\n new_missing_col = ts_column + '_Missing_Flag'\r\n dtf[new_missing_col] = 0\r\n dtf.loc[dtf[ts_column].isnull(),new_missing_col]=1\r\n dtf[ts_column].fillna(method='ffill', inplace=True)\r\n print(' adding %s column due to missing values in data' %new_missing_col)\r\n if dtf[dtf[ts_column].isnull()].shape[0] > 0:\r\n dtf[ts_column].fillna(method='bfill', inplace=True)\r\n\r\n if dtf[ts_column].dtype == float:\r\n dtf[ts_column] = dtf[ts_column].astype(int)\r\n\r\n ### if we have already found that it was a date time var, then leave it as it is. Thats good enough!\r\n items = dtf[ts_column].apply(str).apply(len).values\r\n #### In some extreme cases,\r\n if all(items[0] == item for item in items):\r\n if items[0] == 4:\r\n ### If it is just a year variable alone, you should leave it as just a year!\r\n dtf[ts_column] = pd.to_datetime(dtf[ts_column],format='%Y')\r\n ts_adds = []\r\n else:\r\n ### if it is not a year alone, then convert it into a date time variable\r\n dtf[ts_column] = pd.to_datetime(dtf[ts_column], infer_datetime_format=True)\r\n ### this is where you create the time series features #####\r\n dtf, ts_adds = _create_ts_features(df=dtf, tscol=ts_column)\r\n else:\r\n dtf[ts_column] = pd.to_datetime(dtf[ts_column], infer_datetime_format=True)\r\n ### this is where you create the time series features #####\r\n dtf, ts_adds = _create_ts_features(df=dtf, tscol=ts_column)\r\n if not ts_adds_in:\r\n ts_adds_copy = dtf[ts_adds].select_dtypes(include='number').columns.tolist()\r\n ### drop those columns where all rows are same i.e. zero variance ####\r\n for col in ts_adds_copy:\r\n if dtf[col].std() == 0:\r\n dtf.drop(col, axis=1, inplace=True)\r\n print(' dropping column due to zero variance in %s column' %col)\r\n ts_adds.remove(col)\r\n else:\r\n rem_cols = left_subtract(dtf.columns.tolist(), ts_adds_in)\r\n dtf = dtf[rem_cols+ts_adds_in]\r\n\r\n # If you had reset the index earlier, set it back before returning\r\n # to make it consistent with the dataframe that was sent as input\r\n if reset_index:\r\n dtf = dtf.set_index(ts_column)\r\n elif ts_column in dtf.columns:\r\n dtf.drop(ts_column, axis=1, inplace=True)\r\n else:\r\n pass\r\n except Exception as e:\r\n print(e)\r\n print('Error in Processing %s column for date time features. Continuing...' %ts_column)\r\n return dtf, ts_adds", "def main():\n raw_data = pd.read_csv('data/raw_hospital_data.csv')\n\n fe_data = new_features(raw_data)\n fe_data = compressing_admission_type(data)\n fe_data = age_to_cat(fe_data)\n fe_data = compressing_careunit(fe_data)\n fe_data = compressing_curr_serv(fe_data)\n fe_data = compressing_ethnicity(fe_data)\n fe_data = compressing_marital_status(fe_data)\n fe_data = compressing_religion(fe_data)\n fe_data = compressing_admit_location(fe_data)\n fe_data = compress_icd9_codes(fe_data)\n\n fe_data.to_csv('data/feature_engineering_data.csv')", "def create_FEMA_P58_bldg_injury_db(\n source_file,\n target_data_file='bldg_injury_DB_FEMA_P58_2nd.csv',\n target_meta_file='bldg_injury_DB_FEMA_P58_2nd.json'):\n\n # parse the source file\n df = pd.read_excel(source_file, sheet_name='Summary', header=2, index_col=1,\n true_values=[\"YES\", \"Yes\", \"yes\"],\n false_values=[\"NO\", \"No\", \"no\"])\n\n # remove empty rows and columns\n df.dropna(axis=0, how='all', inplace=True)\n df.dropna(axis=1, how='all', inplace=True)\n\n # filter the columns we need for the injury database\n cols_to_db = [\n \"Fragility Unit of Measure\",\n 'DS Hierarchy',\n ]\n for DS_i in range(1, 6):\n cols_to_db += [\n\n f'DS {DS_i}, Potential non-collapse casualty?',\n f'DS {DS_i} - Casualty Affected Area',\n f'DS {DS_i} Serious Injury Rate - Median',\n f'DS {DS_i} Serious Injury Rate - Dispersion',\n f'DS {DS_i} Loss of Life Rate - Median',\n f'DS {DS_i} Loss of Life Rate - Dispersion',\n ]\n\n # filter the columns that we need for the metadata\n cols_to_meta = [\n \"Component Name\",\n \"Component Description\",\n \"Construction Quality:\",\n \"Seismic Installation Conditions:\",\n \"Comments / Notes\",\n \"Author\",\n \"Fragility Unit of Measure\",\n \"Round to Integer Unit?\",\n \"DS 1, Description\",\n \"DS 2, Description\",\n \"DS 3, Description\",\n \"DS 4, Description\",\n \"DS 5, Description\",\n ]\n\n # remove special characters to make it easier to work with column names\n str_map = {\n ord(' '): \"_\",\n ord('.'): \"_\",\n ord('-'): \"_\",\n ord(':'): None,\n ord('('): None,\n ord(')'): None,\n ord('?'): None,\n ord('/'): None,\n ord(','): None,\n }\n\n df_db_source = df.loc[:, cols_to_db]\n df_db_source.columns = [s.translate(str_map) for s in cols_to_db]\n df_db_source.sort_index(inplace=True)\n\n df_meta = df.loc[:, cols_to_meta]\n df_meta.columns = [s.translate(str_map) for s in cols_to_meta]\n\n df_db_source.replace('BY USER', np.nan, inplace=True)\n df_db_source.replace('By User', np.nan, inplace=True)\n\n # initialize the output loss table\n # define the columns\n out_cols = [\n \"Index\",\n \"Incomplete\",\n \"Quantity-Unit\",\n \"DV-Unit\",\n ]\n for DS_i in range(1, 16):\n out_cols += [\n f\"DS{DS_i}-Family\",\n f\"DS{DS_i}-Theta_0\",\n f\"DS{DS_i}-Theta_1\",\n f\"DS{DS_i}-AffectedArea\",\n ]\n\n # create the MultiIndex\n comps = df_db_source.index.values\n DVs = ['S1', 'S2']\n df_MI = pd.MultiIndex.from_product([comps, DVs], names=['ID', 'Severity'])\n\n df_db = pd.DataFrame(\n columns=out_cols,\n index=df_MI,\n dtype=float\n )\n\n # initialize the dictionary that stores the loss metadata\n meta_dict = {}\n\n # for each component...\n # (this approach is not efficient, but easy to follow which was considered\n # more important than efficiency.)\n for cmp in df_db_source.itertuples():\n\n ID = cmp.Index.split('.')\n cmpID = f'{ID[0][0]}.{ID[0][1:3]}.{ID[0][3:5]}.{ID[1]}'\n\n # store the new index\n df_db.loc[cmp.Index, 'Index'] = cmpID\n\n # assume the component information is complete\n incomplete_S1 = False\n incomplete_S2 = False\n\n # store units\n\n df_db.loc[cmp.Index, 'Quantity-Unit'] = (\n ' '.join(cmp.Fragility_Unit_of_Measure.split(' ')[::-1]).strip())\n df_db.loc[(cmp.Index, 'S1'), 'DV-Unit'] = \"persons\"\n df_db.loc[(cmp.Index, 'S2'), 'DV-Unit'] = \"persons\"\n\n # get the raw metadata for the component\n cmp_meta = df_meta.loc[cmp.Index, :]\n\n # store the global (i.e., not DS-specific) metadata\n\n # every component is assumed to have a comp. description\n comments = cmp_meta['Component_Description']\n\n # the additional fields are added to the description if they exist\n if cmp_meta['Construction_Quality'] != 'Not Specified':\n comments += f'\\nConstruction Quality: ' \\\n f'{cmp_meta[\"Construction_Quality\"]}'\n\n if cmp_meta['Seismic_Installation_Conditions'] not in [\n 'Not Specified', 'Not applicable', 'Unknown', 'Any']:\n comments += f'\\nSeismic Installation Conditions: ' \\\n f'{cmp_meta[\"Seismic_Installation_Conditions\"]}'\n\n if cmp_meta['Comments__Notes'] != 'None':\n comments += f'\\nNotes: {cmp_meta[\"Comments__Notes\"]}'\n\n if cmp_meta['Author'] not in ['Not Given', 'By User']:\n comments += f'\\nAuthor: {cmp_meta[\"Author\"]}'\n\n # get the suggested block size and replace the misleading values with ea\n block_size = cmp_meta['Fragility_Unit_of_Measure'].split(' ')[::-1]\n\n meta_data = {\n \"Description\": cmp_meta['Component_Name'],\n \"Comments\": comments,\n \"SuggestedComponentBlockSize\": ' '.join(block_size),\n \"RoundUpToIntegerQuantity\": cmp_meta['Round_to_Integer_Unit'],\n \"ControllingDemand\": \"Damage Quantity\",\n \"DamageStates\": {}\n }\n\n # Handle components with simultaneous damage states separately\n if 'Simul' in cmp.DS_Hierarchy:\n\n # Note that we are assuming that all damage states are triggered by\n # a single limit state in these components.\n # This assumption holds for the second edition of FEMA P58, but it\n # might need to be revisited in future editions.\n\n inj_data = {}\n ds_tot = 0\n\n # get the p10, p50, and p90 estimates for all damage states\n for DS_i in range(1, 6):\n\n casualty_model = getattr(\n cmp, f'DS_{DS_i}_Potential_non_collapse_casualty')\n\n if casualty_model is True:\n\n inj_data.update({f'DS{DS_i}': np.array([\n getattr(cmp, f'DS_{DS_i}___Casualty_Affected_Area'),\n getattr(cmp, f'DS_{DS_i}_Serious_Injury_Rate___Median'),\n getattr(cmp, f'DS_{DS_i}_Serious_Injury_Rate___Dispersion'),\n getattr(cmp, f'DS_{DS_i}_Loss_of_Life_Rate___Median'),\n getattr(cmp, f'DS_{DS_i}_Loss_of_Life_Rate___Dispersion')\n ])})\n ds_tot += 1\n\n elif casualty_model is False:\n ds_tot += 1\n\n # only continue if there is injury data\n if len(inj_data) == 0:\n continue\n\n # now prepare the equivalent mutex damage states\n sim_ds_count = ds_tot\n ds_count = 2 ** (sim_ds_count) - 1\n\n # Here we take advantage of knowing that for every component with\n # simultaneous damage states, only one of the DSs has injury\n # consequences.\n # This assumption holds for the second edition of FEMA P58, but it\n # might need to be revisited in future editions.\n\n ds_trig = list(inj_data.keys())[0]\n inj_data = inj_data[ds_trig]\n ds_trig = int(ds_trig[2:])\n\n for DS_i in range(1, ds_count + 1):\n ds_map = format(DS_i, f'0{sim_ds_count}b')\n\n if ds_map[-ds_trig] == '1':\n\n # store the consequence data\n for severity in ('S1', 'S2'):\n\n A_affected = inj_data[0]\n\n if severity == 'S1':\n theta_0 = inj_data[1]\n theta_1 = inj_data[2]\n elif severity == 'S2':\n theta_0 = inj_data[3]\n theta_1 = inj_data[4]\n\n if theta_0 != 0.0:\n\n df_db.loc[(cmp.Index, severity),\n f'DS{DS_i}-Family'] = 'lognormal'\n\n df_db.loc[(cmp.Index, severity),\n f'DS{DS_i}-Theta_0'] = theta_0\n\n df_db.loc[(cmp.Index, severity),\n f'DS{DS_i}-Theta_1'] = theta_1\n\n df_db.loc[(cmp.Index, severity),\n f'DS{DS_i}-AffectedArea'] = A_affected\n\n # store the metadata\n if ds_map.count('1') == 1:\n\n ds_pure_id = ds_map[::-1].find('1') + 1\n\n meta_data['DamageStates'].update({f\"DS{DS_i}\": {\n \"Description\": f\"Pure DS{ds_pure_id}. \" +\n cmp_meta[\n f\"DS_{ds_pure_id}_Description\"]\n }})\n\n else:\n\n ds_combo = [f'DS{_.start() + 1}'\n for _ in re.finditer('1', ds_map[::-1])]\n\n meta_data['DamageStates'].update({f\"DS{DS_i}\": {\n \"Description\": 'Combination of ' +\n ' & '.join(ds_combo)\n }})\n\n # for every other component...\n else:\n # now look at each Damage State\n for DS_i in range(1, 6):\n\n casualty_flag = getattr(\n cmp, f'DS_{DS_i}_Potential_non_collapse_casualty')\n\n if casualty_flag is True:\n\n A_affected = getattr(cmp,\n f'DS_{DS_i}___Casualty_Affected_Area')\n\n for severity in ('S1', 'S2'):\n\n if severity == 'S1':\n theta_0 = getattr(cmp, f'DS_{DS_i}_Serious_Injury_'\n f'Rate___Median')\n theta_1 = getattr(cmp, f'DS_{DS_i}_Serious_Injury_'\n f'Rate___Dispersion')\n elif severity == 'S2':\n theta_0 = getattr(cmp, f'DS_{DS_i}_Loss_of_Life_'\n f'Rate___Median')\n theta_1 = getattr(cmp, f'DS_{DS_i}_Loss_of_Life_'\n f'Rate___Dispersion')\n\n if theta_0 != 0.0:\n\n df_db.loc[(cmp.Index, severity),\n f'DS{DS_i}-Family'] = 'lognormal'\n\n df_db.loc[(cmp.Index, severity),\n f'DS{DS_i}-Theta_0'] = theta_0\n\n df_db.loc[(cmp.Index, severity),\n f'DS{DS_i}-Theta_1'] = theta_1\n\n df_db.loc[(cmp.Index, severity),\n f'DS{DS_i}-AffectedArea'] = A_affected\n\n if (pd.isna(theta_0) or pd.isna(\n theta_1) or pd.isna(A_affected)):\n\n if severity == 'S1':\n incomplete_S1 = True\n else:\n incomplete_S2 = True\n\n if ~np.isnan(casualty_flag):\n\n meta_data['DamageStates'].update({\n f\"DS{DS_i}\": {\"Description\":\n cmp_meta[f\"DS_{DS_i}_Description\"]}})\n\n df_db.loc[(cmp.Index, 'S1'), 'Incomplete'] = int(incomplete_S1)\n df_db.loc[(cmp.Index, 'S2'), 'Incomplete'] = int(incomplete_S2)\n\n # store the metadata for this component\n meta_dict.update({cmpID: meta_data})\n\n # assign the Index column as the new ID\n df_db.index = pd.MultiIndex.from_arrays(\n [df_db['Index'].values, df_db.index.get_level_values(1)])\n\n df_db.drop('Index', axis=1, inplace=True)\n\n # review the database and drop rows with no information\n cmp_to_drop = []\n for cmp in df_db.index:\n\n empty = True\n\n for DS_i in range(1, 16):\n if not pd.isna(df_db.loc[cmp, f'DS{DS_i}-Family']):\n empty = False\n break\n\n if empty:\n cmp_to_drop.append(cmp)\n\n df_db.drop(cmp_to_drop, axis=0, inplace=True)\n cmp_kept = df_db.index.get_level_values(0).unique()\n\n cmp_to_drop = []\n for cmp in meta_dict:\n if cmp not in cmp_kept:\n cmp_to_drop.append(cmp)\n\n for cmp in cmp_to_drop:\n del meta_dict[cmp]\n\n # convert to optimal datatypes to reduce file size\n df_db = df_db.convert_dtypes()\n\n df_db = base.convert_to_SimpleIndex(df_db, 0)\n\n # rename the index\n df_db.index.name = \"ID\"\n\n # save the consequence data\n df_db.to_csv(target_data_file)\n\n # save the metadata\n with open(target_meta_file, 'w+', encoding='utf-8') as f:\n json.dump(meta_dict, f, indent=2)\n\n print(\"Successfully parsed and saved the injury consequence data from FEMA \"\n \"P58\")" ]
[ "0.6386668", "0.6208587", "0.62022436", "0.60953605", "0.60796374", "0.60608006", "0.6022764", "0.5993953", "0.5985361", "0.59586924", "0.5820426", "0.5809583", "0.58089864", "0.58070034", "0.5787359", "0.575435", "0.57058054", "0.56917906", "0.56418794", "0.56213087", "0.5602896", "0.5580139", "0.55740535", "0.5564288", "0.5540875", "0.5534619", "0.55277455", "0.5516493", "0.5511482", "0.5505087", "0.55026233", "0.5502202", "0.54997367", "0.54858565", "0.5466443", "0.5447422", "0.5447013", "0.54439944", "0.5423971", "0.54135305", "0.5403702", "0.5403122", "0.5399725", "0.53929937", "0.5373217", "0.53691083", "0.53682834", "0.53673345", "0.53673345", "0.5351283", "0.53451896", "0.5344939", "0.5336556", "0.5336044", "0.5299803", "0.52839696", "0.5282822", "0.5277767", "0.5267129", "0.5254324", "0.52523226", "0.5247459", "0.52424306", "0.52421415", "0.5230049", "0.52284074", "0.52115774", "0.5181202", "0.5174715", "0.51730055", "0.51681864", "0.5167898", "0.5167494", "0.5164826", "0.51624846", "0.5159174", "0.51583225", "0.5155829", "0.5145844", "0.5144421", "0.5143173", "0.51382595", "0.51382595", "0.5136519", "0.5121942", "0.5120617", "0.51124537", "0.50998855", "0.50939786", "0.5091604", "0.50894004", "0.5086267", "0.508566", "0.50798935", "0.507707", "0.50730884", "0.50726074", "0.50690264", "0.5066334", "0.50626016" ]
0.62781596
1
Builds for each customer, RFM scores and encode scores. When this method is called during building data_model step, then dataframe handling new RFM features is dumped into a file.
def data_transform_rfm(self) : is_built_step = False if self._encoder_rfm is None: is_built_step = True #------------------------------------------------------------------------- # RFM feature is built #------------------------------------------------------------------------- ser_invoice_date = self._df_invoice_line.InvoiceDate self.df_invoice_line, df_RFM, self.df_RFM_quantiles, self._day_now \ = p5_util.p5_df_rfm_build(self.df_invoice_line, day_now = self._day_now\ , df_RFM_threshold=self.df_RFM_quantiles) self._df_invoice_line.InvoiceDate = ser_invoice_date #------------------------------------------------------------------------- # RFM score is added to dataframe #------------------------------------------------------------------------- df_merged = pd.merge(self.df_invoice_line\ , df_RFM[['CustomerID','RFM']], how='left', on=['CustomerID']) self._df_invoice_line \ = pd.DataFrame(df_merged.values, index = self._df_invoice_line.index\ , columns=df_merged.columns) #self._df_invoice_line \ #= pd.concat([ self.df_invoice_line,df_RFM[['CustomerID','RFM']] ], axis=1\ #,join='inner') #------------------------------------------------------------------------- # RFM encoding #------------------------------------------------------------------------- self._encoder_rfm, df_RFM_encoded \ = p5_util.df_rfm_one_hot_encode(df_RFM,'RFM', encoder=self._encoder_rfm) #------------------------------------------------------------------------- # Encoded RFM features are renamed #------------------------------------------------------------------------- df_customers_rfm, list_col_unchanged \ = p5_util.df_rename_columns(df_RFM_encoded, df_RFM_encoded.columns\ , 'w_rfm_') self.strprint("df_customers_rfm =" +str(df_customers_rfm.shape)) #------------------------------------------------------------------------- # dataframe with RFM encoded values per customer is dumped #------------------------------------------------------------------------- if is_built_step is True: p5_util.object_dump(df_customers_rfm, self.df_customers_rfm_fileName) else : self._df_customers_rfm = df_customers_rfm.copy() return
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def df_customers_fileRead(self):\n \n #-------------------------------------------------------------------------\n # RFM features are restored\n #-------------------------------------------------------------------------\n df_customers_rfm \\\n = p5_util.object_load(self.df_customers_rfm_fileName)\n self.strprint(\"RFM features : \"+str(df_customers_rfm.shape))\n \n #-------------------------------------------------------------------------\n # Time features are restored\n #-------------------------------------------------------------------------\n df_customers_timeFeature \\\n = p5_util.object_load(self._df_customers_timeFeature_fileName)\n self.strprint(\"Time features : \"+str(df_customers_timeFeature.shape))\n \n #-------------------------------------------------------------------------\n # NLP features are restored\n #-------------------------------------------------------------------------\n df_customers_nlp = p5_util.object_load(self._df_customers_nlp_fileName)\n self.strprint(\"NLP features : \"+str(df_customers_nlp.shape))\n\n if False:\n df_customers_rfm = self._df_customers_rfm.copy()\n df_customers_timeFeature = self._df_customers_timeFeature.copy()\n df_customers_nlp = self._df_customers_pca_nlp.copy()\n\n #-------------------------------------------------------------------------\n # Dataframe are aggregated; note that indexes are customerID.\n #-------------------------------------------------------------------------\n df_customers = pd.DataFrame()\n\n df_customers = pd.concat([df_customers,df_customers_rfm], axis=1)\n\n df_customers = pd.concat([df_customers,df_customers_timeFeature]\\\n , join='inner', axis=1)\n\n df_customers = pd.concat([df_customers,df_customers_nlp]\\\n , join='inner', axis=1)\n\n self.strprint(\"All features : \"+str(df_customers.shape))\n\n #----------------------------------------------------------------------\n # Dataframe is dumped into a file\n #----------------------------------------------------------------------\n p5_util.object_dump(df_customers, self._df_customers_fileName)\n if False:\n #----------------------------------------------------------------------\n # Dataframe is copied as an attribute\n #----------------------------------------------------------------------\n self._df_customers = df_customers.copy()\n \n return", "def df_customers_features_build(self):\n\n df_customers_rfm = self._df_customers_rfm.copy()\n df_customers_timeFeature = self._df_customers_timeFeature.copy()\n df_customers_nlp = self._df_customers_pca_nlp.copy()\n\n #-------------------------------------------------------------------------\n # Dataframe are aggregated; note that indexes are customerID.\n #-------------------------------------------------------------------------\n df_customers = pd.DataFrame()\n\n df_customers = pd.concat([df_customers,df_customers_rfm], axis=1)\n\n df_customers = pd.concat([df_customers,df_customers_timeFeature]\\\n , join='inner', axis=1)\n\n df_customers = pd.concat([df_customers,df_customers_nlp]\\\n , join='inner', axis=1)\n \n self.strprint(\"All features : \"+str(df_customers.shape))\n self._df_customers = df_customers.copy()\n return", "def collect_data(self):\n categories = []\n list_of_feature_lists = []\n feature_sets = set()\n with open(self.csv_path, \"r\") as f:\n reader = csv.DictReader(f)\n # collecting some stats for FDR calculation:\n self.PSM_count = 0\n self.decoy_count = 0\n\n if self[\"dump_svm_matrix\"]:\n self.init_svm_matrix_dump()\n additional_matrix_info = []\n\n for i, row in enumerate(\n sorted(\n reader,\n reverse=self[\"bigger_scores_better\"],\n key=lambda d: float(d[self.col_for_sorting]),\n )\n ):\n\n features = self.row_to_features(row)\n\n if tuple(features) in feature_sets:\n continue\n feature_sets.add(tuple(features))\n\n category, psm_FDR = self.get_psm_category(row)\n\n list_of_feature_lists.append(features)\n categories.append(category)\n\n if self[\"dump_svm_matrix\"]:\n label = -1 if row_is_decoy(row) else 1\n sequence = \"{0}.{1}#{2}.{3}\".format(\n row[\"Sequence Pre AA\"].strip(),\n row[\"Sequence\"].strip(),\n row[\"Modifications\"].strip(),\n row[\"Sequence Post AA\"].strip(),\n )\n additional_matrix_info.append(\n {\n \"psm_id\": row[\"Spectrum Title\"].strip(),\n \"label\": label,\n \"scannr\": row[\"Spectrum Title\"].strip().split(\".\")[-2],\n \"peptide\": sequence,\n \"proteins\": self.parse_protein_ids(row[\"Protein ID\"]),\n }\n )\n\n if i % 1000 == 0:\n score_val = float(row[self.col_for_sorting])\n msg = (\n \"Generating feature matrix from input csv \"\n \"(line ~{0}) with score {1} and FDR \"\n \"{2}\".format(i, score_val, psm_FDR)\n )\n print(msg, end=\"\\r\")\n\n # All data points are collected in one big matrix, to make standardization possible\n print(\"\\nConverting feature matrix to NumPy array...\")\n X_raw = np.array(list_of_feature_lists, dtype=float)\n\n print(\"Replacing empty/NaN values with the mean of each column...\")\n self.nan_replacer = Imputer()\n self.nan_replacer.fit(X_raw)\n X_raw = self.nan_replacer.transform(X_raw)\n # Standardize input matrix to ease machine learning! Scaled data has zero mean and unit variance\n print(\"Standardizing input matrix...\")\n self.scaler = SCALER.fit(X_raw)\n self.X = self.scaler.transform(X_raw)\n self.categories = np.array(categories)\n print()\n\n if self[\"dump_svm_matrix\"]:\n print(\"Dumping SVM matrix to\", self[\"dump_svm_matrix\"])\n\n for i, matrix_row in enumerate(self.X):\n matrix_row_info = additional_matrix_info[i]\n self.dump_svm_matrix_row(\n row=list(matrix_row),\n psm_id=matrix_row_info[\"psm_id\"],\n label=matrix_row_info[\"label\"],\n scannr=matrix_row_info[\"scannr\"],\n peptide=matrix_row_info[\"peptide\"],\n proteins=matrix_row_info[\"proteins\"],\n )\n\n print(\"Dumped SVM matrix to\", self[\"dump_svm_matrix\"])\n return", "def __train__(self):\n if (self.type_camf == 'CAMF_CI'):\n #users, items, context, ratings\n ci = camf_ci.CI_class(self.__users_array__, self.__items_array__, self.__context_array__, self.__ratings__, self.fold, self.lr, self.factors)\n predictions, losses = ci.fit()\n elif (self.type_camf == 'CAMF_CU'):\n cu = camf_cu.CU_class(self.__users_array__, self.__items_array__, self.__context_array__, self.__ratings__, self.fold, self.lr, self.factors)\n predictions, losses = cu.fit()\n elif (self.type_camf == 'CAMF_C'):\n c = camf_c.C_class(self.__users_array__, self.__items_array__, self.__context_array__, self.__ratings__, self.fold, self.lr, self.factors)\n predictions, losses = c.fit()\n\n dummy_pred = np.zeros((predictions.shape))\n for r, pred_array in enumerate(predictions):\n for c, pred in enumerate(pred_array):\n dummy_pred[r][c] = self.__check_ratings__(pred)\n predictions = dummy_pred\n #save a plot with a loss function\n plots = prs.PlotRSData()\n #print(losses)\n plots.plot_loss_cars(losses, self.type_camf, self.__save_prefix__+\"_loop\"+str(self.loop))\n pd.DataFrame(losses).to_csv(\"./RecSys/out/CAMF/train/\"+self.type_camf+\"/\" + self.__save_prefix__ +\"losses_loop\"+str(self.loop)+\".csv\")\n print('Saving the feature matrix...')\n # set predictions back to the pivot table\n self.__utility_saved_training__(predictions) \n # save results\n self.utility_predictions.to_csv(\"./RecSys/out/CAMF/train/\"+self.type_camf+\"/\" + self.__save_prefix__ + \"_SGD_predictions_loop\"+str(self.loop)+\".csv\")", "def build(self):\n list_of_mafs = []\n maf_generator = self.get_dataframe()\n\n for maf_as_dict in maf_generator:\n list_of_mafs.extend(maf_as_dict)\n\n reporting_path = os.path.join(app.config.get('REPORTING_ROOT_PATH'), app.config.get('REPORTING_PATH'), 'global')\n combined_maf = None\n try:\n combined_maf = pandas.DataFrame(list_of_mafs)\n except Exception as e:\n logger.error(f'Problem creating dataframe from list of dicts: {str(e)}')\n try:\n combined_maf.to_csv(\n os.path.join(reporting_path, f'{self.method}_combined_maf.tsv'),\n sep=\"\\t\",\n encoding='utf-8',\n index='false'\n )\n except Exception as e:\n # bad practice here catching base exception, but the pandas documentation did not reveal what errors or\n # exceptions to expect\n logger.error(f'Problem writing the combined maf file to csv:{str(e)}')\n abort(500)", "def data_transform_timeFeature(self):\n #-------------------------------------------------------------------------\n # All new features are built into separate dataframes \n # and each of them are dumped into a separate file.\n #-------------------------------------------------------------------------\n self.strprint(\"self.df_invoice_line : \"+str(self.df_invoice_line.shape))\n \n self._dict_timeFeature_encoder, df_customers_timeFeature \\\n = p5_util.time_list_feature_build(self.df_invoice_line\\\n , self._list_new_feature, dict_encoder = self._dict_timeFeature_encoder\\\n ,is_verbose=self.is_verbose)\n \n #-------------------------------------------------------------------------\n # New time features are aggregated into a single dataframe.\n # Values are scaled.\n #-------------------------------------------------------------------------\n df_customers_timeFeature, self._std_scaler_timeFeature \\\n = p5_util.time_list_feature_restore(self._list_new_feature \\\n , std_scale = self._std_scaler_timeFeature\\\n , df_timeFeature = df_customers_timeFeature, is_verbose = self.is_verbose)\n\n self.strprint(\"df_customers_timeFeature : \"+str(df_customers_timeFeature.shape))\n \n #-------------------------------------------------------------------------\n # Dimension reduction thanks to PCA\n #-------------------------------------------------------------------------\n n_dim=30\n root_name = 'time_pca_'\n # Column CustomerID is used into df_pca_reduce\n df_customers_timeFeature['CustomerID'] = df_customers_timeFeature.index\n \n df_customers_timeFeature, pca_timeFeature \\\n = p5_util.df_pca_reduce(df_customers_timeFeature, n_dim, root_name\\\n , p_is_scale=False, pca = self._pca_timeFeature)\n\n self.strprint(df_customers_timeFeature.shape)\n \n if self._pca_timeFeature is None:\n #----------------------------------------------------------------------\n # Data-model is in built process with part of data-set.\n #----------------------------------------------------------------------\n self._pca_timeFeature = pca_timeFeature\n p5_util.object_dump(df_customers_timeFeature\\\n , self._df_customers_timeFeature_fileName)\n else:\n #----------------------------------------------------------------------\n # Data-model is already built and this method is called \n # for a customer classification.\n #----------------------------------------------------------------------\n self._df_customers_timeFeature = df_customers_timeFeature.copy()\n return", "def __init__(self, sc, dataset_path):\n\n logger.info(\"Starting up the Recommendation Engine: \")\n\n self.sc = sc\n\n\t#Load cusomer data for later use\n\t\n logger.info(\"Loading Customer data...\")\n customer_file_path = os.path.join(dataset_path, 'tpo_customer.csv')\n customer_raw_RDD = self.sc.textFile(customer_file_path)\n customer_raw_data_header = customer_raw_RDD.take(1)[0]\n self.customer_RDD = customer_raw_RDD.filter(lambda line: line!=customer_raw_data_header)\\\n .map(lambda line: line.split(\",\")).map(lambda tokens: ((tokens[0]),(tokens[1]),(tokens[2]),(tokens[3]),(tokens[4]),(tokens[5]),(tokens[6]),(tokens[7]))).cache()\n\tlogger.info(\"Loading Customer data success...\")\n\t#CUSTOMCUSTOMER_NAME,CUSTOMER_ADDRESS1,CUSTOMER_ADDRESS2,CUSTOMER_CITY,CUSTOMER_STATE,CUSTOMER_COUNTRY,CUSTOMER_ZIPCODE,CREATED_BY,CREATION_DATE,LAST_UPDATED_BY,LAST_UPDATE_DATE\n \n\n\n\t\n\t#Load turbine data for later use\t\n logger.info(\"Loading Turbine data...\")\n turbine_file_path = os.path.join(dataset_path, 'test_tpo_unit_config.csv')\n turbine_raw_RDD = self.sc.textFile(turbine_file_path)\n turbine_raw_data_header = turbine_raw_RDD.take(1)[0]\n self.turbine_RDD = turbine_raw_RDD.filter(lambda line: line!=turbine_raw_data_header)\\\n .map(lambda line: line.split(\",\")).map(lambda tokens: ((tokens[0]),(tokens[1]),(tokens[2]),(tokens[3]),(tokens[5]),(tokens[34]),(tokens[51]),(tokens[35]))).cache()\n\tlogger.info(\"Loading Turbine data success...\")\n \n\t\n\t\n\t\n\t#Load site data for later use\t\n logger.info(\"Loading Site data...\")\n site_file_path = os.path.join(dataset_path, 'tpo_site.csv')\n site_raw_RDD = self.sc.textFile(site_file_path)\n site_raw_data_header = site_raw_RDD.take(1)[0]\n self.site_RDD = site_raw_RDD.filter(lambda line: line!=site_raw_data_header)\\\n .map(lambda line: line.split(\",\")).map(lambda tokens: ((tokens[0]),(tokens[1]),(tokens[2]),(tokens[3]),(tokens[4]),(tokens[5]),(tokens[6]),(tokens[7]),(tokens[16]))).cache()\n\tlogger.info(\"Loading Site data success...\")\n\t\n\n\n\n\t# Load ratings data for later use\n logger.info(\"Loading Ratings data...\")\n ratings_file_path = os.path.join(dataset_path, 'ratings.csv')\n ratings_raw_RDD = self.sc.textFile(ratings_file_path)\n ratings_raw_data_header = ratings_raw_RDD.take(1)[0]\n self.ratings_RDD = ratings_raw_RDD.filter(lambda line: line!=ratings_raw_data_header)\\\n .map(lambda line: line.split(\",\")).map(lambda tokens: (int(tokens[0]),int(tokens[1]),float(tokens[2]))).cache()\n # Load movies data for later use\n logger.info(\"Loading Movies data...\")\n movies_file_path = os.path.join(dataset_path, 'movies.csv')\n movies_raw_RDD = self.sc.textFile(movies_file_path)\n movies_raw_data_header = movies_raw_RDD.take(1)[0]\n self.movies_RDD = movies_raw_RDD.filter(lambda line: line!=movies_raw_data_header)\\\n .map(lambda line: line.split(\",\")).map(lambda tokens: (int(tokens[0]),tokens[1],tokens[2])).cache()\n self.movies_titles_RDD = self.movies_RDD.map(lambda x: (int(x[0]),x[1])).cache()\n # Pre-calculate movies ratings counts\n self.__count_and_average_ratings()\n\n # Train the model\n self.rank = 8\n self.seed = 5L\n self.iterations = 10\n self.regularization_parameter = 0.1\n self.__train_model()", "def features_customers(df_customers):\n for i in PREMIER_VALS:\n k = 'premier_' + str(i)\n df_customers[k] = np.where(df_customers['premier'] == i, 1, 0)\n\n df_customers['age'] = datetime.now().date().year - df_customers['yearOfBirth']\n df_customers['male'] = np.where(df_customers['gender'] == 'M', 1, 0)\n df_customers['female'] = np.where(df_customers['gender'] == 'F', 1, 0)\n df_customers['days_in_asos'] = (datetime.now().date() - df_customers['dateCreated']).dt.days\n\n logger.info(\"Features from the customers table: {shape} {dtypes}\"\n .format(shape=df_customers.shape, dtypes=df_customers.dtypes))\n return df_customers", "def make_submission_file(w, unused_features, filename=\"prediction.csv\"):\n\n # load test datasets\n print_banner(\"7. Read test dataset from higgs-data/test.csv\") \n test_y, test_x, ind = load_csv_data('higgs-data/test.csv')\n\n # Construct Matrix Output with values of one\n y_pred = np.ones(len(test_y))\n\n # Split test dataset based\n print_banner(\"8. Split the test dataset into 8 subsets\") \n test_sets_x, _, indices = create_subsets(test_x, test_y)\n\n # Remove features of test datasets based on PRI_JET_NUM and DER_MASS_MMC\n print_banner(\"9. Remove features in each test subset based on PRI_JET_NUM and DER_MASS_MMC\")\n test_sets_x = remove_features(test_sets_x, unused_features) \n\n # Iterate through the test subsets with their models accordingly\n print_banner(\"10. Predict each test subset using their corresponding model\") \n for x, w, index in zip(test_sets_x, w, indices):\n\n # Perform z-score standardization and expand matrix features with logarithmic & polynomial & cross_term & square root basis function\n stand_x = generate_features(x, 2, True, with_log=True, with_sqrt=True, cross_terms=True)\n\n # Get the prediction\n y_pred[index] = predict_labels(w, stand_x)\n\n print_banner(\" Predicting subset: DONE\") \n \n # Creating submission file\n print_banner(\"11. Making final submission file with csv format\") \n create_csv_submission(ind, y_pred, filename)", "def run(self, dataset_path):\n features = self._generate_features(self._feature_extractors)\n features.to_csv(dataset_path)", "def pre_processing_(data_df , serialized_objects):\n max_recency_acc_dig = serialized_objects['max_recency_acc_dig'] # These values are taken from trained model values\n max_recency_dig_2yr = serialized_objects['max_recency_dig_2yr'] # These values are taken from trained model values\n max_acc_recency_mf = serialized_objects['max_acc_recency_mf'] #These are values imported in training dataset. Same values needs to be used to impute missing values in unseen data\n\n data_df = data_df.na.fill({\n 'recency_acc_dig' : max_recency_acc_dig, # Filling missing values\n 'recency_dig_2yr' : max_recency_dig_2yr,\n 'acc_recency_mf' : max_acc_recency_mf\n })\n\n freq_acc_upg_2yrs_split = [-float('inf'), 0, 1, 2, float('inf')]\n bucketizer_freq_acc_upg_2yrs = Bucketizer(splits=freq_acc_upg_2yrs_split, inputCol='freq_acc_upg_acc_2yrs', outputCol='freq_acc_upg_acc_2yrs_bkt')\n data_df = bucketizer_freq_acc_upg_2yrs.setHandleInvalid('keep').transform(data_df) # Binning the freq_acc_upg_acc_2yrs column\n\n tot_purchase_split = [-float('inf'), 0, 1, 2, 3, float('inf')]\n bucketizer_tot_purchase = Bucketizer(splits=tot_purchase_split, inputCol='tot_accsry_purchse', outputCol='tot_accsry_purchse_bkt')\n data_df = bucketizer_tot_purchase.setHandleInvalid('keep').transform(data_df) # Binning the tot_accsry_purchse column\n\n del_cols_new = ['freq_acc_upg_acc_2yrs', 'tot_accsry_purchse']\n data_df = data_df.drop(*del_cols_new) # Dropping the older continuous columns\n return data_df", "def generateMatrix(self):\n if self.tokenWeights and self.extraFeatures:\n nFeatures = self.wordId + self.wordId2 + len(self.EXTRA_WEIGHTS_LABELS)\n logging.info('Exporting TOKEN WEIGHTS AND EXTRA FEATURES %dx%d'%(self.docId, nFeatures))\n mtrx = np.zeros((self.docId, nFeatures))\n \n for docId, doc in self.documents.iteritems():\n # iterate through 1st sentence\n for wId, val in doc['s1'].iteritems():\n mtrx[docId, wId] = val\n # then iterate thru 2nd sentence, store on 2ND PARTITION\n for wId, val in doc['s2'].iteritems():\n mtrx[docId, self.wordId + wId] = val\n # finally extra features values stored at the end of the vector\n for label, val in doc['extraFeatures'].iteritems():\n mtrx[docId, self.wordId + self.wordId2 + self.EXTRA_WEIGHTS_LABELS.index(label)] = val\n\n elif self.tokenWeights and not self.extraFeatures:\n nFeatures = self.wordId + self.wordId2\n logging.info('Exporting TOKEN WEIGHTS %dx%d'%(self.docId, nFeatures))\n mtrx = np.zeros((self.docId, nFeatures))\n \n for docId, doc in self.documents.iteritems():\n # iterate through 1st sentence\n for wId, val in doc['s1'].iteritems():\n mtrx[docId, wId] = val\n # then iterate thru 2nd sentence, store on 2ND PARTITION\n for wId, val in doc['s2'].iteritems():\n mtrx[docId, self.wordId + wId] = val\n else:\n nFeatures = len(self.EXTRA_WEIGHTS_LABELS)\n logging.info('Exporting EXTRA FEATURES %dx%d'%(self.docId, nFeatures))\n mtrx = np.zeros((self.docId, nFeatures))\n \n for docId, doc in self.documents.iteritems():\n for label, val in doc['extraFeatures'].iteritems():\n mtrx[docId, self.EXTRA_WEIGHTS_LABELS.index(label)] = val\n logging.info('Matrix generated')\n logging.info(mtrx.shape)\n return mtrx", "def make_dataset(interim_file_path, processed_file_path, weights, version):\n qws_wsrf, qws_complete_numpy_array = src.dataset.compute_wsrf.compute_wsrf(interim_file_path, weights)\n # qws_complete_numpy_array_temp = np.append(qws_complete_numpy_array, qws_wsrf[:, np.newaxis], axis=1)\n qws_wsrf_level = np.array([])\n for score in qws_wsrf:\n if(score > 0.78):\n level = 1\n elif(score > 0.7):\n level = 2\n elif(score > 0.65):\n level = 3\n else:\n level = 4\n score = np.append(score, level)\n qws_wsrf_level = np.append(qws_wsrf_level, score)\n qws_wsrf_level = qws_wsrf_level.reshape(qws_wsrf.shape[0], 2)\n if(version == 1):\n qws_complete_numpy_array[:, 9:11] = qws_wsrf_level\n elif(version == 2):\n qws_complete_numpy_array = np.hstack((qws_complete_numpy_array, np.zeros((qws_wsrf.shape[0], 2))))\n qws_complete_numpy_array[:, 11:13] = qws_complete_numpy_array[:, 9:11]\n qws_complete_numpy_array[:, 9:11] = qws_wsrf_level\n else:\n print(\"Version has to be either 1 or 2\")\n\n qws_complete_dataframe_new = pd.DataFrame(qws_complete_numpy_array)\n qws_complete_dataframe_new = qws_complete_dataframe_new.astype({10: int})\n qws_complete_dataframe_new.to_csv(processed_file_path, header=False, index=False)", "def create_weka_mfcc_13():\n global ARGS\n\n ## ten thu muc can trich chon vector dac trung (RLS, LMS, NLMS, Kalman, Non)\n name = '';\n fout = open('weka/MFCC78_TUNNING_{}_dataset.arff'.format(name), 'w')\n fout.write('@RELATION {}_dataset\\n\\n'.format(name))\n\n fout.write('@ATTRIBUTE MEAN_MFCC1\tREAL\\n')\n fout.write('@ATTRIBUTE MEAN_MFCC2\tREAL\\n')\n fout.write('@ATTRIBUTE MEAN_MFCC3\tREAL\\n')\n fout.write('@ATTRIBUTE MEAN_MFCC4\tREAL\\n')\n fout.write('@ATTRIBUTE MEAN_MFCC5\tREAL\\n')\n fout.write('@ATTRIBUTE MEAN_MFCC6\tREAL\\n')\n fout.write('@ATTRIBUTE MEAN_MFCC7\tREAL\\n')\n fout.write('@ATTRIBUTE MEAN_MFCC8\tREAL\\n')\n fout.write('@ATTRIBUTE MEAN_MFCC9\tREAL\\n')\n fout.write('@ATTRIBUTE MEAN_MFCC10\tREAL\\n')\n fout.write('@ATTRIBUTE MEAN_MFCC11\tREAL\\n')\n fout.write('@ATTRIBUTE MEAN_MFCC12\tREAL\\n')\n fout.write('@ATTRIBUTE MEAN_MFCC13\tREAL\\n')\n fout.write('@ATTRIBUTE MEAN_MFCCD1\tREAL\\n')\n fout.write('@ATTRIBUTE MEAN_MFCCD2\tREAL\\n')\n fout.write('@ATTRIBUTE MEAN_MFCCD3\tREAL\\n')\n fout.write('@ATTRIBUTE MEAN_MFCCD4\tREAL\\n')\n fout.write('@ATTRIBUTE MEAN_MFCCD5\tREAL\\n')\n fout.write('@ATTRIBUTE MEAN_MFCCD6\tREAL\\n')\n fout.write('@ATTRIBUTE MEAN_MFCCD7\tREAL\\n')\n fout.write('@ATTRIBUTE MEAN_MFCCD8\tREAL\\n')\n fout.write('@ATTRIBUTE MEAN_MFCCD9\tREAL\\n')\n fout.write('@ATTRIBUTE MEAN_MFCCD10\tREAL\\n')\n fout.write('@ATTRIBUTE MEAN_MFCCD11\tREAL\\n')\n fout.write('@ATTRIBUTE MEAN_MFCCD12\tREAL\\n')\n fout.write('@ATTRIBUTE MEAN_MFCCD13\tREAL\\n')\n fout.write('@ATTRIBUTE MEAN_MFCCDD1\tREAL\\n')\n fout.write('@ATTRIBUTE MEAN_MFCCDD2\tREAL\\n')\n fout.write('@ATTRIBUTE MEAN_MFCCDD3\tREAL\\n')\n fout.write('@ATTRIBUTE MEAN_MFCCDD4\tREAL\\n')\n fout.write('@ATTRIBUTE MEAN_MFCCDD5\tREAL\\n')\n fout.write('@ATTRIBUTE MEAN_MFCCDD6\tREAL\\n')\n fout.write('@ATTRIBUTE MEAN_MFCCDD7\tREAL\\n')\n fout.write('@ATTRIBUTE MEAN_MFCCDD8\tREAL\\n')\n fout.write('@ATTRIBUTE MEAN_MFCCDD9\tREAL\\n')\n fout.write('@ATTRIBUTE MEAN_MFCCDD10\tREAL\\n')\n fout.write('@ATTRIBUTE MEAN_MFCCDD11\tREAL\\n')\n fout.write('@ATTRIBUTE MEAN_MFCCDD12\tREAL\\n')\n fout.write('@ATTRIBUTE MEAN_MFCCDD13\tREAL\\n')\n fout.write('@ATTRIBUTE STD_MFCC1\tREAL\\n')\n fout.write('@ATTRIBUTE STD_MFCC2\tREAL\\n')\n fout.write('@ATTRIBUTE STD_MFCC3\tREAL\\n')\n fout.write('@ATTRIBUTE STD_MFCC4\tREAL\\n')\n fout.write('@ATTRIBUTE STD_MFCC5\tREAL\\n')\n fout.write('@ATTRIBUTE STD_MFCC6\tREAL\\n')\n fout.write('@ATTRIBUTE STD_MFCC7\tREAL\\n')\n fout.write('@ATTRIBUTE STD_MFCC8\tREAL\\n')\n fout.write('@ATTRIBUTE STD_MFCC9\tREAL\\n')\n fout.write('@ATTRIBUTE STD_MFCC10\tREAL\\n')\n fout.write('@ATTRIBUTE STD_MFCC11\tREAL\\n')\n fout.write('@ATTRIBUTE STD_MFCC12\tREAL\\n')\n fout.write('@ATTRIBUTE STD_MFCC13\tREAL\\n')\n fout.write('@ATTRIBUTE STD_MFCCD1\tREAL\\n')\n fout.write('@ATTRIBUTE STD_MFCCD2\tREAL\\n')\n fout.write('@ATTRIBUTE STD_MFCCD3\tREAL\\n')\n fout.write('@ATTRIBUTE STD_MFCCD4\tREAL\\n')\n fout.write('@ATTRIBUTE STD_MFCCD5\tREAL\\n')\n fout.write('@ATTRIBUTE STD_MFCCD6\tREAL\\n')\n fout.write('@ATTRIBUTE STD_MFCCD7\tREAL\\n')\n fout.write('@ATTRIBUTE STD_MFCCD8\tREAL\\n')\n fout.write('@ATTRIBUTE STD_MFCCD9\tREAL\\n')\n fout.write('@ATTRIBUTE STD_MFCCD10\tREAL\\n')\n fout.write('@ATTRIBUTE STD_MFCCD11\tREAL\\n')\n fout.write('@ATTRIBUTE STD_MFCCD12\tREAL\\n')\n fout.write('@ATTRIBUTE STD_MFCCD13\tREAL\\n')\n fout.write('@ATTRIBUTE STD_MFCCDD1\tREAL\\n')\n fout.write('@ATTRIBUTE STD_MFCCDD2\tREAL\\n')\n fout.write('@ATTRIBUTE STD_MFCCDD3\tREAL\\n')\n fout.write('@ATTRIBUTE STD_MFCCDD4\tREAL\\n')\n fout.write('@ATTRIBUTE STD_MFCCDD5\tREAL\\n')\n fout.write('@ATTRIBUTE STD_MFCCDD6\tREAL\\n')\n fout.write('@ATTRIBUTE STD_MFCCDD7\tREAL\\n')\n fout.write('@ATTRIBUTE STD_MFCCDD8\tREAL\\n')\n fout.write('@ATTRIBUTE STD_MFCCDD9\tREAL\\n')\n fout.write('@ATTRIBUTE STD_MFCCDD10\tREAL\\n')\n fout.write('@ATTRIBUTE STD_MFCCDD11\tREAL\\n')\n fout.write('@ATTRIBUTE STD_MFCCDD12\tREAL\\n')\n fout.write('@ATTRIBUTE STD_MFCCDD13\tREAL\\n')\n fout.write('@ATTRIBUTE class \t{'+ARGS.labels+'}\\n\\n')\n \n fout.write('@DATA\\n')\n\n ## cua so\n windowing = Windowing(type='hamming',\n size=1104,\n zeroPhase=False)\n \n ## quang pho\n spectrum = Spectrum(size=1104)\n\n ##khoi tao MFCC\n mfcc = MFCC(highFrequencyBound=4000, ## gioi han tren cua tan so\n inputSize=201, \t\t\t ## kich thuoc pho dau vao\n lowFrequencyBound=0,\t ## gioi han duoi cua tan so\n numberBands=40,\t\t\t ## so luong cac dai Mels trong bo loc\n numberCoefficients=13, ## so luong dau ra cac he so Mel\n sampleRate=16000)\t\t ## tan so lay mau\n\n for label in ARGS.labels.split(','): ## duyet cac thu muc giong voi ten nhan\n\n ## dia chi thu muc\n dir = os.path.join(ARGS.dir, label)\n\n logging.info('Access folder <{}>'.format(dir))\n\n for file in sorted(os.listdir(dir)):\n\n \t## duyet cac file .wav\n if file.endswith('.wav'):\n logging.info('Process <{}>'.format(file))\n path = os.path.join(dir, file)\n \n ## doc file am thanh\n loader = MonoLoader(filename=path, sampleRate=ARGS.sampleRate)\n audio = loader()\n cnt = 0\n\n for window in FrameGenerator(audio, \n frameSize=ARGS.window_length*ARGS.sampleRate/1000, \n hopSize=ARGS.window_stride*ARGS.sampleRate/1000, \n startFromZero=True):\n mfccs = []\n for frame in FrameGenerator(window, \n frameSize=ARGS.frame_length*ARGS.sampleRate/1000, \n hopSize=ARGS.frame_stride*ARGS.sampleRate/1000, \n startFromZero=True):\n s = spectrum(windowing(frame))\n\n _, m = mfcc(s)\n\n m_delta = librosa.feature.delta(m, order=1) ## dao ham bac 1\n m_delta_delta = librosa.feature.delta(m, order=2) ## dao ham bac 2\n\n m_all = np.concatenate((m, m_delta, m_delta_delta), axis=0) ## them vao chuoi\n mfccs.append(m_all)\n mfccs = np.array(mfccs)\n mfccs_mean = np.mean(mfccs, axis=0)\n mfccs_std = np.std(mfccs, axis=0)\n feat = np.concatenate((mfccs_mean, mfccs_std), axis=0).tolist()\n str_feat = [str(x) for x in feat]\n line = ','.join(str_feat)+','+label\n fout.write(line+'\\n')\n cnt = cnt+1\n logging.info('{} samples'.format(cnt))", "def process_dataset(self):\n\n logger.info('\\n')\n logger.info('=' * 40)\n logger.info('=\\t DeepRank Data Set')\n logger.info('=')\n logger.info('=\\t Training data')\n for f in self.train_database:\n logger.info(f'=\\t -> {f}')\n logger.info('=')\n if self.valid_database:\n logger.info('=\\t Validation data')\n for f in self.valid_database:\n logger.info(f'=\\t -> {f}')\n logger.info('=')\n if self.test_database:\n logger.info('=\\t Test data')\n for f in self.test_database:\n logger.info(f'=\\t -> {f}')\n logger.info('=')\n logger.info('=' * 40 + '\\n')\n sys.stdout.flush()\n\n # check if the files are ok\n self.check_hdf5_files(self.train_database)\n\n if self.valid_database:\n self.valid_database = self.check_hdf5_files(\n self.valid_database)\n\n if self.test_database:\n self.test_database = self.check_hdf5_files(\n self.test_database)\n\n # create the indexing system\n # alows to associate each mol to an index\n # and get fname and mol name from the index\n self.create_index_molecules()\n\n # get the actual feature name\n if self.mapfly:\n self.get_raw_feature_name()\n else:\n self.get_mapped_feature_name()\n\n # get the pairing\n self.get_pairing_feature()\n\n # get grid shape\n self.get_grid_shape()\n\n # get the input shape\n self.get_input_shape()\n\n # get renormalization factor\n if self.normalize_features or self.normalize_targets or self.clip_features:\n if self.mapfly:\n self.compute_norm()\n else:\n self.get_norm()\n\n logger.info('\\n')\n logger.info(\" Data Set Info:\")\n logger.info(\n f' Augmentation : {self.use_rotation} rotations')\n logger.info(\n f' Training set : {self.ntrain} conformations')\n logger.info(\n f' Validation set : {self.nvalid} conformations')\n logger.info(\n f' Test set : {self.ntest} conformations')\n logger.info(f' Number of channels : {self.input_shape[0]}')\n logger.info(f' Grid Size : {self.data_shape[1]}, '\n f'{self.data_shape[2]}, {self.data_shape[3]}')\n sys.stdout.flush()", "def CCF_toExcel(self, data_set, ccf_inputs):\n file_name = self.file_path(target_filename=\"LEICode_CCF_ModelID_EndOfObservationPeriod_versionNumber.xlsx\")\n oxl = openpyxl.load_workbook(file_name)\n\n # Information missing from test results:\n start_date\t = datetime.date(2007, 1, 1)\n end_date\t = datetime.date(2015, 1, 1)\n nb_customer = len(data_set.id.unique())\n grade_nb = data_set.Bin_CCF.unique()\n grade_name = []\n grade_counts = []\n avCCFE_perGrade = []\n avCCFR_perGrade = []\n minCCFR_perGrade = []\n maxCCFR_perGrade = []\n q5CCFR_perGrade = []\n q10CCFR_perGrade = []\n q25CCFR_perGrade = []\n q50CCFR_perGrade = []\n q75CCFR_perGrade = []\n q90CCFR_perGrade = []\n q95CCFR_perGrade = []\n for g in range(1, len(grade_nb) + 1):\n grade_name.append( self.grade_mapping(grade_num = g) )\n grade_counts.append( data_set[data_set.Default_Binary == 1][\"Bin_CCF\"].value_counts()[g] )\n avCCFE_perGrade.append( data_set.groupby(\"Bin_CCF\").CCF.mean()[g] )\n avCCFR_perGrade.append( data_set.groupby(\"Bin_CCF\").CCF_realised.mean()[g] )\n minCCFR_perGrade.append( data_set.groupby(\"Bin_CCF\").CCF_realised.min()[g])\n maxCCFR_perGrade.append( data_set.groupby(\"Bin_CCF\").CCF_realised.max()[g])\n q5CCFR_perGrade.append( data_set.groupby(\"Bin_CCF\").CCF_realised.quantile(0.05)[g])\n q10CCFR_perGrade.append( data_set.groupby(\"Bin_CCF\").CCF_realised.quantile(0.10)[g])\n q25CCFR_perGrade.append( data_set.groupby(\"Bin_CCF\").CCF_realised.quantile(0.25)[g])\n q50CCFR_perGrade.append( data_set.groupby(\"Bin_CCF\").CCF_realised.quantile(0.50)[g])\n q75CCFR_perGrade.append( data_set.groupby(\"Bin_CCF\").CCF_realised.quantile(0.75)[g])\n q90CCFR_perGrade.append( data_set.groupby(\"Bin_CCF\").CCF_realised.quantile(0.90)[g])\n q95CCFR_perGrade.append( data_set.groupby(\"Bin_CCF\").CCF_realised.quantile(0.95)[g])\n\n bcktesting_ccf_ptf = [\"N/A\", #Name of facility grade/pool or segment\n len(data_set.id.unique()), # Number of facilities (R)\n data_set.CCF.mean(), # Average estimated CCF (CCF^E)\n data_set.CCF_realised.mean(), # Average realised CCF (CCF^R)\n 0.0, # Floor used (if applicable)\n 0.0, # Number of CCF realisations floored\n data_set.CCF_realised.min(), # Minimum CCF^R\n data_set.CCF_realised.quantile(0.05), # Quantiles\n data_set.CCF_realised.quantile(0.10), #\n data_set.CCF_realised.quantile(0.25), #\n data_set.CCF_realised.quantile(0.50), #\n data_set.CCF_realised.quantile(0.75), #\n data_set.CCF_realised.quantile(0.90), #\n data_set.CCF_realised.quantile(0.95), #\n data_set.CCF_realised.max(), # Maximum CCF^R\n 0 # Exposure-weighted average of CCF^R (to be created)\n ]\n\n # Predictive ability\n ## CCF back-testing using a t-test (§ 2.9.3.1) - sheet 3.1\n wbk31 = oxl.get_sheet_by_name(\"3.1\")\n # Grade Lvl\n self.array_toExcel(wb=wbk31, stat_array = grade_name, row_pos=10, col_pos=4, row_wise=True)\n self.array_toExcel(wb=wbk31, stat_array = grade_counts, row_pos=10, col_pos=5, row_wise=True)\n self.array_toExcel(wb=wbk31, stat_array = avCCFE_perGrade, row_pos=10, col_pos=6, row_wise=True)\n self.array_toExcel(wb=wbk31, stat_array = avCCFR_perGrade, row_pos=10, col_pos=7, row_wise=True)\n self.array_toExcel(wb=wbk31, stat_array=[0] * 7, row_pos=10, col_pos=8, row_wise=True) # Floor used (if applicable)\n self.array_toExcel(wb=wbk31, stat_array=[0] * 7, row_pos=10, col_pos=9, row_wise=True) # Number of CCF realisations floored\n self.array_toExcel(wb=wbk31, stat_array= minCCFR_perGrade, row_pos=10, col_pos=10, row_wise=True)\n self.array_toExcel(wb=wbk31, stat_array= maxCCFR_perGrade, row_pos=10, col_pos=18, row_wise=True)\n self.array_toExcel(wb=wbk31, stat_array=[0] * 7, row_pos=10, col_pos=19, row_wise=True) # Exposure-weighted average of CCF^R (to be created)\n self.array_toExcel(wb=wbk31, stat_array= q5CCFR_perGrade, row_pos=10, col_pos=11, row_wise=True)\n self.array_toExcel(wb=wbk31, stat_array= q10CCFR_perGrade, row_pos=10, col_pos=12, row_wise=True)\n self.array_toExcel(wb=wbk31, stat_array= q25CCFR_perGrade, row_pos=10, col_pos=13, row_wise=True)\n self.array_toExcel(wb=wbk31, stat_array= q50CCFR_perGrade, row_pos=10, col_pos=14, row_wise=True)\n self.array_toExcel(wb=wbk31, stat_array= q75CCFR_perGrade, row_pos=10, col_pos=15, row_wise=True)\n self.array_toExcel(wb=wbk31, stat_array= q90CCFR_perGrade, row_pos=10, col_pos=16, row_wise=True)\n self.array_toExcel(wb=wbk31, stat_array= q95CCFR_perGrade, row_pos=10, col_pos=17, row_wise=True)\n self.array_toExcel(wb=wbk31, stat_array= [0] * 7, row_pos=10, col_pos=23, row_wise=True) # Number of facilities excluded due to outlier handling (set to zero)\n\n # Ptf Lvl\n self.df_toExcel(wb=wbk31, df = pd.DataFrame(ccf_inputs[\"predictive_ability\"][1]).T, row_pos=10, col_pos=20)\n self.array_toExcel(wb=wbk31, stat_array=ccf_inputs[\"predictive_ability\"][0], row_pos=8, col_pos=20, row_wise=False)\n self.array_toExcel(wb=wbk31, stat_array=bcktesting_ccf_ptf, row_pos=8, col_pos=4, row_wise=False)\n wbk31.cell(row=8, column=23).value = 0 # Number of facilities excluded due to outlier handling\n\n # Discriminatory Power\n ## Current gAUC vs gAUC at initial validation/development (§ 2.9.3.1) - sheet 4.0\n wbk40 = oxl.get_sheet_by_name(\"4.0\")\n self.array_toExcel(wb=wbk40, stat_array=ccf_inputs[\"AUC\"][:-1], row_pos=7, col_pos=4, row_wise=False)\n wbk40.cell(row= 7, column= 10).value = start_date # start date\n wbk40.cell(row=7, column=11).value = end_date # end date\n wbk40.cell(row=7, column=12).value = nb_customer # nb of customers\n wbk40.cell(row=7, column=13).value = ccf_inputs[\"AUC\"][-1] # Variance (gAUC_init)\n\n # Save file\n oxl.save(file_name)\n oxl.close()\n return \"CCF results saved to Excel.\"", "def submission(self):\n\n\t\tprobas = self.y_pred / self.count_models\n\n\t\tsub = pd.DataFrame({'id':self.X_test.PostId, 'OpenStatus':probas}).set_index('id')\n\t\tsub.to_csv('sub.csv')", "def write_to_csv(self, name_suffix = ''):\n f_path = os.path.join(self.root_dir, 'res' + name_suffix + '.csv')\n field_names = [] # the first field in CSV is 'obj_val'\n\n # put the keys in the cost, prim_var_change, dual_var_change and fea_conditions as field names if any\n for key in self.cost.keys():\n field_names.append(key)\n for key in self.cost_change.keys():\n field_names.append(key)\n for key in self.prim_var_change.keys():\n field_names.append(key)\n for key in self.dual_var_change.keys():\n field_names.append(key)\n for key in self.fea_conditions.keys():\n field_names.append(key)\n\n\tprint f_path\n\n with open(f_path, mode = 'wb') as csv_file: # open the file, if not exist, create it\n writer = csv.DictWriter(csv_file, fieldnames = field_names) # create a writer which maps the dictionaries onto output rows in CSV\n writer.writeheader() # write the field names to the header\n temp_dict = {} # create a temporary dict used to output rows\n row_max = self.get_iter_num() # get the max iters which indicates the number of rows in CSV\n print ('number of rows: ' + str(row_max))\n #print (field_names)\n for row in range(row_max + 1):\n temp_dict.clear() # clear all items\n start_idx = 0\n for i in range(len(self.cost)):\n field = field_names[start_idx + i]\n\t\t if row > len(self.cost[field]) - 1:\n\t\t\ttemp_dict[field] = ''\n\t\t else: temp_dict[field] = self.get_cost_val(field, row)\n\n start_idx = start_idx + len(self.cost) # the start pos of fields in field_names for prim_var_change\n for i in range(len(self.cost_change)): # for each cost_change\n field = field_names[start_idx + i]\n if row == 0: # for row 0 (iter 0), we will set '/' to the change of primal variables\n temp_dict[field] = '/'\n elif row > len(self.cost_change[field]) - 1:\n\t\t\t temp_dict[field] = ''\n\t\t else:\n temp_dict[field] = self.get_cost_change_value(field, row - 1)\n\n\n start_idx = start_idx + len(self.cost_change)\n for i in range(len(self.prim_var_change)): # for each prim_var_change\n field = field_names[start_idx + i]\n if row == 0: # for row 0 (iter 0), we will set '/' to the change of primal variables\n temp_dict[field] = '/'\n\t\t elif row > len(self.prim_var_change[field]) - 1:\n\t\t\ttemp_dict[field] = ''\n else:\n temp_dict[field] = self.get_prim_change_value(field, row - 1)\n\n start_idx = start_idx + len(self.prim_var_change) # go to the start pos of fields in field_names for dual_var_change\n for i in range(len(self.dual_var_change)): # for each dual_var_change\n field = field_names[start_idx + i]\n if row == 0: # for row 0 (iter 0), we will set '/' to the change of dual variables\n temp_dict[field] = '/'\n elif row > len(self.dual_var_change[field]) - 1:\n\t\t\ttemp_dict[field] = '' \n\t\t else:\n temp_dict[field] = self.get_dual_change_value(field, row - 1)\n\n start_idx = start_idx + len(self.dual_var_change) # go the the start pos of fields in field_names for fea_conditions\n for i in range(len(self.fea_conditions)): # for each fea_condition\n field = field_names[start_idx + i]\n\t\t if row > len(self.fea_conditions[field]) - 1:\n\t\t\ttemp_dict[field] = ''\n else: temp_dict[field] = self.get_fea_condition_value(field, row)\n\n writer.writerow(temp_dict)\n\n # we also save the value of primal values if not saved\n if not self.pdv_to_csv:\n self.save_last_prims()", "def produce_init(filename):\n training_dataset = pd.read_csv(f'../Modified Data/{filename}')\n test_dataset = pd.read_csv(f'../Raw Data/test.csv')\n features = list(training_dataset.columns)\n features.remove('SalePrice')\n predict_feature = ['SalePrice']\n\n # Produce Test Data\n test_X = test_dataset.loc[:, features]\n ids_test = test_dataset.loc[:, 'Id']\n\n for column in features:\n if str(training_dataset.loc[:, column].dtype) == 'object':\n # Initialize encoder\n labelencoder = LabelEncoder()\n # Encode Train Data\n training_dataset.loc[:, column] = training_dataset.loc[:, column].fillna('Missing')\n training_dataset.loc[:, column] = pd.Series(labelencoder.fit_transform(training_dataset.loc[:, column]))\n # Encode Test Data\n test_X.loc[:, column] = test_X.loc[:, column].fillna('Missing')\n test_X.loc[:, column] = pd.Series(labelencoder.fit_transform(test_X.loc[:, column]))\n else:\n # Fix missing values for train data\n training_dataset.loc[:, column] = training_dataset.loc[:, column].fillna(int(training_dataset.loc[:, column].mean()))\n # Fix missing values for test data\n test_X.loc[:, column] = test_X.loc[:, column].fillna(int(test_X.loc[:, column].mean()))\n\n return training_dataset, test_X, ids_test", "def create_features_customer(profile, transcript_training):\n\n # create avg/min/max amount features. Need to calculate amount features from transcript\n # because transcript_training only contains transactions for offer received and viewed.\n # such transactions do not have amount associated\n\n query = \"\"\"\n SELECT a.person, min(amount) as min_amount, max(amount) as max_amount, avg(amount) as avg_amount\n FROM transcript a\n JOIN transcript_quantile b\n ON a.person = b.person \n WHERE a.time <= b.received_time\n GROUP BY a.person\n \"\"\"\n\n profile_amount = u.read_dataframe_from_sql(query).set_index('person')\n\n # create avg/min/max amount duration_view\n profile_duration = create_features_using_groupby(transcript_training\\\n \t, 'profile', 'duration_view')\n\n # create view rate (average of label)\n profile_view_rate = create_features_using_groupby(transcript_training, 'profile', 'label'\\\n \t, minimum=False, maximum=False)\n profile_view_rate.columns=['view_rate_profile']\n\n # create trx rate (count of transactions per person/(max received time - min received time))\n profile_trx_rate = (transcript_training.groupby('person').size()*100\\\n /(transcript_training.groupby('person')['received_time'].max() \\\n - transcript_training.groupby('person')['received_time'].min())).reset_index()\n profile_trx_rate.columns = ['person', 'avg_trx_cnt']\n # set trx rate = 1 if max received time == min received time\n profile_trx_rate.loc[profile_trx_rate['avg_trx_cnt']==np.inf, 'avg_trx_cnt'] = 1\n profile_trx_rate = profile_trx_rate.set_index('person')\n\n profile_feat = profile_amount.join(profile_duration)\\\n .join(profile_view_rate).join(profile_trx_rate)\n\n assert pd.merge(profile, profile_feat, how='left', left_index=True, right_index=True).shape[0] == profile.shape[0]\\\n , \"rows do not match with original data (profile)\"\n\n profile = pd.merge(profile, profile_feat, how='left', left_index=True, right_index=True)\n\n return profile", "def create_feables(matches, fifa_stats, bookkeepers, verbose=True):\n\n if verbose:\n print(\"Generating match features...\")\n start = time()\n\n # Get match features for all matches (apply to each row)\n match_stats = matches.apply(lambda match: get_match_features(match, matches), axis=1)\n\n # Create dummies for league ID feature\n # deleting this as i am only looking at EPL\n # dummies = pd.get_dummies(match_stats['league_id']).rename(columns=lambda x: 'League_' + str(x))\n # match_stats = pd.concat([match_stats, dummies], axis=1)\n match_stats.drop(['league_id'], inplace=True, axis=1)\n\n end = time()\n if verbose:\n print(\"Match features generated in {:.1f} minutes\".format((end - start) / 60))\n\n if verbose:\n print(\"Generating match labels...\")\n start = time()\n\n # Create match labels\n labels = matches.apply(get_match_label, axis=1)\n end = time()\n if verbose:\n print(\"Match labels generated in {:.1f} minutes\".format((end - start) / 60))\n\n # if verbose == True:\n # print(\"Generating bookkeeper data...\")\n # start = time()\n # Get bookkeeper quotas for all matches\n # bk_data = get_bookkeeper_data(matches, bookkeepers, horizontal=True)\n # bk_data.loc[:, 'match_api_id'] = matches.loc[:, 'match_api_id']\n # end = time()\n # if verbose == True:\n # print(\"Bookkeeper data generated in {:.1f} minutes\".format((end - start) / 60))\n\n # Merges features and labels into one frame\n features = pd.merge(match_stats, fifa_stats, on='match_api_id', how='left')\n # features = pd.merge(features, bk_data, on='match_api_id', how='left')\n # features = match_stats\n feables = pd.merge(features, labels, on='match_api_id', how='left')\n\n # Drop NA values\n feables.dropna(inplace=True)\n\n # Return preprocessed data\n return feables", "def build_enru_custom_ft(self):\n eval_data_file = self.data_dir + '/' + enru_newscomm\n eval_data = tf.data.experimental.CsvDataset(\n [eval_data_file],\n record_defaults=[tf.string, tf.string],\n compression_type='GZIP',\n field_delim='\\t',\n use_quote_delim=False)\n train_data = eval_data.skip(3000).take(6000)\n eval_data = eval_data.take(3000)\n\n eval_data = eval_data.cache()\n train_data = train_data.cache()\n def to_features_dict(eng, rus):\n return {'inputs': eng, 'targets': rus}\n\n train_data = train_data.map(to_features_dict)\n eval_data = eval_data.map(to_features_dict)\n\n self.default_builder_obj = None\n\n return train_data, eval_data", "def create_matrix(ratings_df, jokes_df):\r\n \"\"\" note: empty entries are populated with zeros \"\"\"\r\n\r\n matrix_handler = matrix_object()\r\n\r\n num_joke_features = 5\r\n\r\n ''' add all joke features '''\r\n for row_idx in range(0, jokes_df.shape[0]):\r\n joke_idx = int(jokes_df.iloc[row_idx][\"Idx\"])\r\n isAggressive = jokes_df.iloc[row_idx][\"isAggressive\"]\r\n isIncongruence = jokes_df.iloc[row_idx][\"isIncongruence\"]\r\n generation = jokes_df.iloc[row_idx][\"Generation\"]\r\n isMillenial = (generation == \"Millenial\")\r\n isGenX = (generation == \"Gen X\")\r\n isGenZ = (generation == \"Gen Z\")\r\n\r\n if(int(isMillenial) == 1.0 and int(isGenX) == 1.0):\r\n raise Valueerror()\r\n\r\n matrix_handler.add_value(joke_idx - 1, 0, int(isAggressive))\r\n matrix_handler.add_value(joke_idx - 1, 1, int(isIncongruence))\r\n matrix_handler.add_value(joke_idx - 1, 2, int(isMillenial))\r\n matrix_handler.add_value(joke_idx - 1, 3, int(isGenX))\r\n matrix_handler.add_value(joke_idx - 1, 4, int(isGenZ))\r\n\r\n ''' add all ratings '''\r\n for row_idx in range(0, ratings_df.shape[0]):\r\n for joke_idx in range(1, 122):\r\n col_name = \"joke\" + str(joke_idx)\r\n matrix_handler.add_value(joke_idx - 1, row_idx + num_joke_features, ratings_df.iloc[row_idx][col_name])\r\n\r\n matrix = matrix_handler.compile_matrix()\r\n new_df = matrix_handler.to_df(matrix)\r\n\r\n return matrix, new_df", "def main():\n\n logger.info('Process initiated - Building dataset')\n\n if os.path.isfile(train_path) and os.path.isfile(test_path):\n logger.info('Loading pickled data')\n return pd.read_pickle(train_path), pd.read_pickle(test_path)\n\n logger.info('Reading COSMIC Cancer Gene Census')\n gene_census = cancer_gene_census()\n gene_census.extend(civic_cancer_genes())\n\n gene_census = set(gene_census)\n\n training_data = pd.DataFrame()\n testing_data = pd.DataFrame()\n\n for cancer_type in cancer_types:\n data_file_name = cancer_type + \".meth.by_mean.data.txt\"\n data_file_location = os.path.join(data_location, data_file_name)\n\n logger.info('Reading Methylation data for {}'.format(cancer_type))\n\n methyl_data = pd.read_csv(data_file_location, delimiter='\\t', skiprows=[1], index_col=0)\n\n logger.info(\n 'Number of Genes: {0} | Number of Patients: {1}'.format(methyl_data.shape[0], methyl_data.shape[1]))\n logger.info('Preprocessing Methylation data')\n\n methyl_data = genes_feature_selection(methyl_data, gene_census)\n\n logger.info('Number of Genes after processing: {0}\\n'.format(methyl_data.shape[0]))\n\n methyl_data = add_classification_label(methyl_data)\n methyl_data = methyl_data.transpose()\n\n normal_cases = methyl_data[methyl_data['Tumor'] == 0]\n logger.info(normal_cases.shape)\n train_normal_cases = normal_cases.sample(frac=0.7, random_state=200)\n logger.info(train_normal_cases.shape)\n test_normal_cases = normal_cases.drop(train_normal_cases.index)\n logger.info(train_normal_cases.shape)\n\n tumor_cases = methyl_data[methyl_data['Tumor'] != 0]\n logger.info(tumor_cases.shape)\n train_tumor_cases = tumor_cases.sample(frac=0.7, random_state=200)\n logger.info(train_tumor_cases.shape)\n\n test_tumor_cases = tumor_cases.drop(train_tumor_cases.index)\n logger.info(test_tumor_cases.shape)\n\n training_data = training_data.append(train_normal_cases)\n training_data = training_data.append(train_tumor_cases)\n\n testing_data = testing_data.append(test_normal_cases)\n testing_data = testing_data.append(test_tumor_cases)\n\n training_data = training_data.sample(frac=1)\n testing_data = testing_data.sample(frac=1)\n\n logger.info('Pickling training and testing data')\n training_data.to_pickle(train_path)\n testing_data.to_pickle(test_path)\n\n logger.info('Processing completed!')\n visualize_data(training_data)\n\n return training_data, testing_data", "def main():\n\n if os.path.exists(os.path.join(PROCESSED_PATH,\n 'all_posts_data.csv')):\n print(\"-- all_posts_data.csv found locally - delete interm files if rerun needed\")\n total_df = pd.read_csv(PROCESSED_PATH / 'all_posts_data.csv')\n else:\n training_post_filenames = glob.glob(os.path.join(DATA_DIR,\n 'raw',\n 'clpsych16-data',\n 'data',\n 'training',\n 'posts', '*.xml'))\n dev_post_filenames = glob.glob(os.path.join(DATA_DIR,\n 'raw',\n 'clpsych16-data',\n 'data',\n 'testing',\n 'posts', '*.xml'))\n\n new_posts2017 = glob.glob(os.path.join(DATA_DIR,\n 'raw',\n 'clpsych17-test',\n 'posts', '*.xml'))\n\n training_labels = os.path.join(DATA_DIR,\n 'raw',\n 'clpsych16-data',\n 'data',\n 'training',\n 'labels.tsv')\n dev_labels = os.path.join(DATA_DIR,\n 'raw',\n 'clpsych16-data',\n 'data',\n 'testing',\n 'labels.tsv')\n\n training_df = create_posts_df(training_post_filenames)\n dev_df = create_posts_df(dev_post_filenames)\n new_df = create_posts_df(new_posts2017)\n\n training_df['corpus_source'] = '2016train_2017train'\n dev_df['corpus_source'] = '2016test_2017train'\n new_df['corpus_source'] = '2017test'\n\n training_df = merge_post_labels(training_df, training_labels)\n dev_df = merge_post_labels(dev_df, dev_labels)\n\n training_df = merge_author_ranks(training_df)\n dev_df = merge_author_ranks(dev_df)\n new_df = merge_author_ranks(new_df)\n\n total_df = pd.concat([training_df, dev_df, new_df])\n\n test_labels = os.path.join(DATA_DIR,\n 'raw',\n 'clpsych17-test',\n 'test_ids.tsv')\n total_df.reset_index(inplace=True)\n total_df = merge_test_ids(total_df, test_labels)\n label_file = os.path.join(DATA_DIR,\n 'raw',\n 'clpsych17-test-labels.tsv')\n merge_ground_truth(total_df, label_file)\n output_path = PROCESSED_PATH / 'all_posts_data.csv'\n \n\n # clean body of text\n total_df['cleaned_body'], total_df['contained_quote'] = zip(*total_df['body'].apply(process_body))\n total_df['images'] = total_df['body'].apply(process_images)\n\n print('--Writing data to {}--'.format(output_path))\n total_df.to_csv(output_path, index=False)\n\n sentences_df = total_df.loc[:, ['post_id', 'cleaned_body', 'label', 'predict_me']]\n # the following will split posts into sentences and write out to a separate csv\n split_to_sentences(sentences_df)", "def features_orders(df_customers, df_receipts):\n df_customers.sort_values(by=['customerId2'], ascending=[True], inplace=True)\n # total amount of all the orders of a cusrtomer\n df_customers = _add_column(df_customers, df_receipts.groupby('customerId2')['price'].sum(), 'sum_orders')\n # the min amount paid in one receipt by a customer\n df_customers = _add_column(df_customers,\n df_receipts.groupby(['customerId2', 'receiptId'], as_index=False)[\n 'price'].sum().groupby('customerId2').min()['price'], 'min_order')\n # the mean amount paid per receipt by a customer\n df_customers = _add_column(df_customers,\n df_receipts.groupby(['customerId2', 'receiptId'], as_index=False)['price'].sum()\n .groupby('customerId2').mean()['price'], 'mean_order')\n # the max amount paid per receipt by a customer\n df_customers = _add_column(df_customers,\n df_receipts.groupby(['customerId2', 'receiptId'], as_index=False)['price'].sum()\n .groupby('customerId2').max()['price'], 'max_order')\n # the number of orders\n df_customers = _add_column(df_customers,\n df_receipts.groupby(['customerId2', 'receiptId'], as_index=False)['price'].sum()\n .groupby('customerId2').count()['price'], 'count_orders')\n\n # the total amount of items bought by a user\n df_customers = _add_column(df_customers,\n df_receipts.groupby('customerId2')['itemQty'].sum(), 'sum_itemQty')\n # the min amount of items bought by a user in a receipt\n df_customers = _add_column(df_customers,\n df_receipts.groupby(['customerId2', 'receiptId'], as_index=False)['itemQty'].sum()\n .groupby('customerId2').min()['itemQty'], 'min_itemQty')\n # the mean amount of items bought by a user in a receipt\n df_customers = _add_column(df_customers,\n df_receipts.groupby(['customerId2', 'receiptId'], as_index=False)['itemQty'].sum()\n .groupby('customerId2').mean()['itemQty'], 'mean_itemQty')\n # the max amount of items bought by a user in a receipt\n df_customers = _add_column(df_customers,\n df_receipts.groupby(['customerId2', 'receiptId'], as_index=False)['itemQty'].sum()\n .groupby('customerId2').max()['itemQty'], 'max_itemQty')\n # from which dividion type a user buys\n for i in DIVISIONID_VALS:\n k = 'divisionId_' + str(i)\n df_receipts[k] = np.where(df_receipts['divisionId'] == i, 1, 0)\n df_customers = _add_column(df_customers, df_receipts.groupby('customerId2')[k].sum(), k)\n # which source type a user uses to pay\n for i in SOURCEID_VALS:\n k = 'sourceId_' + str(i)\n df_receipts[k] = np.where(df_receipts['sourceId'] == i, 1, 0)\n df_customers = _add_column(df_customers, df_receipts.groupby('customerId2')[k].sum(), k)\n\n logger.info(\"Features from the returns table: {shape} {dtypes}\"\n .format(shape=df_customers.shape, dtypes=df_customers.dtypes))\n return df_customers", "def build_wmt_ft(self):\n train_files = [self.data_dir + '/' + wmt_train]\n eval_files = [self.data_dir + '/' + wmt_test]\n\n train_data = tf.data.experimental.CsvDataset(\n train_files,\n record_defaults=[tf.string, tf.string],\n field_delim='\\t',\n use_quote_delim=False)\n eval_data = tf.data.experimental.CsvDataset(\n eval_files,\n record_defaults=[tf.string, tf.string],\n field_delim='\\t',\n use_quote_delim=False)\n\n eval_data = eval_data.cache()\n train_data = train_data.cache() # only read once\n\n def to_features_dict(eng, rus):\n return {'inputs': eng, 'targets': rus}\n\n train_data = train_data.map(to_features_dict)\n eval_data = eval_data.map(to_features_dict)\n\n self.default_builder_obj = None\n return train_data, eval_data", "def prepare_class_data_for_prediction(dataframe, model_dict, user_keyword, task_name):\r\n test_tweets = dataframe.iloc[:, [0, 1, 2]]\r\n\r\n parent_dir = Path.cwd().parent\r\n pickle_dir = parent_dir.joinpath('default_results', 'pickle_files_feat_eng')\r\n feature_X_user = pd.DataFrame\r\n emo_X_test_dict = {}\r\n\r\n\r\n for emotion, model_prop in model_dict.items():\r\n preprocessed_X_user = Preprocessor.perform(test_tweets, emotion, user_keyword, task_name)\r\n feature_X_user = Feature_Transformer.perform(preprocessed_X_user, emotion, user_keyword, task_name)\r\n vectorizer = Dictionaries.vectorizer_dict[model_prop[2]]\r\n\r\n #Fit transform the vectorizer with the corresponding preprocessed training data\r\n if os.path.exists(pickle_dir.joinpath(emotion + '_c_train_preprocess_df.pkl')):\r\n preprocess_train_df = pd.read_pickle(pickle_dir.joinpath(emotion + '_c_train_preprocess_df.pkl'))\r\n train_vect = vectorizer.fit_transform(preprocess_train_df['preprocessed_text'].values)\r\n print(emotion + 'TRAIN', train_vect.shape)\r\n train_vect_df = pd.DataFrame(train_vect.toarray(), columns=vectorizer.get_feature_names())\r\n else:\r\n #If the file doesnt exist, exit the program with instructions\r\n print('\\nRequired files does not exist.\\n\\n Please, train the models first by running > Modelling.py')\r\n sys.exit(1)\r\n\r\n # Use the same vectorizer to transform test data and then perform the feature union\r\n vector_X = vectorizer.transform(preprocessed_X_user['preprocessed_text'].values)\r\n test_vect_df = pd.DataFrame(vector_X.toarray(), columns=vectorizer.get_feature_names())\r\n X_test = pd.concat([test_vect_df, feature_X_user], axis=1)\r\n emo_X_test_dict[emotion] = X_test\r\n print(emotion + 'TEST', test_vect_df.shape, X_test.shape)\r\n return emo_X_test_dict", "def get_features_and_target(self, trades_features: pd.DataFrame, trades_target: pd.DataFrame) -> pd.DataFrame:\n \n sf_groups = trades_features.drop_duplicates(subset=['sf_account_id', 'trade_date', 'sku']).groupby('sf_account_id')\n\n # calculate features\n feature_dfs = []\n if 'product_name' in self.feature_categories:\n feature_dfs += [sf_groups.product_name.value_counts().unstack().notnull()]\n if 'product_category' in self.feature_categories:\n feature_dfs += [sf_groups.product_category.value_counts().unstack().notnull()]\n if 'reporting_channel' in self.feature_categories:\n feature_dfs += [sf_groups.sub_reporting_channel.value_counts().unstack().notnull()]\n if 'recency' in self.feature_categories:\n feature_dfs += [(trades_features.trade_date_dt.max()-sf_groups.trade_date_dt.max()).dt.days.to_frame().rename(columns={'trade_date_dt':'recency'})]\n if 'frequency' in self.feature_categories:\n feature_dfs += [sf_groups.product_name.count().to_frame().rename(columns={'product_name':'frequency'})]\n if 'total_spend' in self.feature_categories:\n feature_dfs += [sf_groups.cost_float.sum().to_frame().rename(columns={'cost_float':'total_spend'})]\n\n # concat features\n customer_df = pd.concat(feature_dfs, axis=1, sort=False) # outer join on index\n\n # add target variable\n for target_variable in self.target_variables:\n if (trades_target.product_name == target_variable).any():\n customer_df['target_'+target_variable] = trades_target.groupby(['sf_account_id', 'product_name']).trade_date.any().unstack()[target_variable]\n else:\n customer_df['target_'+target_variable] = False\n\n # remove customers with no purchases before cut off\n customer_df = customer_df[customer_df[customer_df.columns[customer_df.columns != 'target']].any(axis=1)]\n\n # replace nans with False\n customer_df.fillna(False, inplace=True)\n\n return customer_df", "def write_features(self):\n num_features_per_file = math.ceil(len(self.features) / self.num_jobs)\n for idx in range(self.num_jobs):\n job_features = self.features[idx * num_features_per_file: (idx + 1) * num_features_per_file]\n features_filename = constants.INPUT_FEATURES_FILENAME.format(self.args.output_dir, idx)\n with open(features_filename, \"wb\") as features_file:\n cloudpickle.dump(job_features, features_file, protocol=pickle.DEFAULT_PROTOCOL)", "def store_sorted_features(self) -> None:\n\n makedirs(dirname(self.model_weights_path_template_), exist_ok=True)\n\n # Generate feature weights files and a README.json providing\n # the parameters corresponding to each set of feature weights\n params_dict = {}\n for learner_name in self.cv_learners_:\n\n # Skip MiniBatchKMeans models\n if learner_name == 'MiniBatchKMeans':\n logdebug('Skipping MiniBatchKMeans learner instances since '\n 'coefficients can not be extracted from them.')\n continue\n\n for i, estimator in enumerate(self.cv_learners_[learner_name]):\n\n # Get dataframe of the features/coefficients\n try:\n ex.print_model_weights(estimator,\n learner_name,\n self.data_.classes,\n self.cfg_.games,\n self.vec_,\n self.model_weights_path_template_\n .format(learner_name, i + 1))\n params_dict.setdefault(learner_name, {})\n params_dict[learner_name][i] = estimator.get_params()\n except ValueError:\n logerr('Could not generate features/feature coefficients '\n 'dataframe for {0}...'.format(learner_name))\n\n # Save parameters file also\n if params_dict:\n dump(params_dict,\n open(join(dirname(self.model_weights_path_template_),\n 'model_params_readme.json'), 'w'),\n indent=4)", "def apply(self):\n\n sc = SparkContext(appName=\"Model Applier\")\n sqlContext = SQLContext(sc)\n\n # Add model and supporting files to SparkContext\n for item in self.model_location_dict.items():\n ModelApplier.add_files_to_context(item[1], sc)\n\n partition_processor = self.get_partition_processor()\n infile = sc.textFile(self.input_location)\n header_line = infile.first()\n infile = infile.filter(lambda x: x != header_line)\n\n result = infile.mapPartitions(partition_processor).flatMap(lambda x: x)\n print('result.class', result.__class__)\n\n result = result.map(lambda (x, a, y, segment, model_version):\n (int(x), float(a), float(y), segment, model_version))\n sqlContext.createDataFrame(result).saveAsParquetFile(self.output_location)", "def output_rule_feature_matrices():\n with open(config.data_path + config.sentiment_seed, 'rb') as input_file:\n sentiment_dict = pickle.load(input_file)\n seed_sentiments = set(sentiment_dict.keys())\n \n for i in range(len(config.file_names)):\n if i is 5:\n print('processing ', config.file_names[i])\n fname = config.file_names[i]\n feature_x, feature_y, opinion_x, opinion_y = text_to_matrix(\n fname, seed_sentiments)\n feature_x = np.transpose(feature_x)\n opinion_x = np.transpose(opinion_x)\n with open('../results/' + fname + '_rule_feature_matrix.pickle', 'wb') as f:\n pickle.dump(feature_x, f)\n with open('../results/' + fname + '_rule_opinion_matrix.pickle', 'wb') as f:\n pickle.dump(opinion_x, f)\n\n with open('../results/' + fname + '_feature_label.pickle', 'wb') as f:\n pickle.dump(feature_y.ravel(), f)\n with open('../results/' + fname + '_opinion_label.pickle', 'wb') as f:\n pickle.dump(opinion_y.ravel(), f)", "def computeSoftwareMLModels(df,data_label,roi_cols,covar_continuous_cols,covar_cat_cols,outcome_col,group_col,model_type,ml_model,rank_features=False,compute_null=False,n_splits=10,n_repeats=10,n_jobs=1):\n software_list = df[data_label].unique()\n print('Running ML classifer on {} {}'.format(len(software_list),data_label))\n scores_concat_df = pd.DataFrame()\n feature_rank_concat_df = pd.DataFrame()\n external_scores_concat_df = pd.DataFrame()\n\n perf_pval_dict = {}\n for pipe in software_list:\n ml_df = df[df[data_label]==pipe]\n print('{} {}'.format(data_label, pipe))\n\n #cross_val_score\n scores_df, null_df, pvalue, feature_rank_df = getMLModelPerf(ml_df,roi_cols,covar_continuous_cols,covar_cat_cols,outcome_col,model_type,ml_model,rank_features,compute_null,n_splits,n_repeats,n_jobs) \n scores_df[data_label] = np.tile(pipe,len(scores_df))\n scores_concat_df = scores_concat_df.append(scores_df)\n \n if compute_null:\n null_df[data_label] = np.tile('null',len(null_df))\n scores_concat_df = scores_concat_df.append(null_df)\n perf_pval_dict[pipe] = pvalue\n\n # RFECV\n if rank_features:\n feature_rank_df[data_label] = np.tile(pipe,len(feature_rank_df))\n feature_rank_concat_df = feature_rank_concat_df.append(feature_rank_df)\n\n # explicit CV for internal vs external perfomance\n if group_col:\n external_scores_df = getIndependentTestSetPerf(ml_df,roi_cols,covar_continuous_cols,covar_cat_cols,outcome_col,group_col,model_type,ml_model)\n external_scores_df[data_label] = np.tile(pipe,len(external_scores_df))\n external_scores_concat_df = external_scores_concat_df.append(external_scores_df) \n\n return scores_concat_df, perf_pval_dict, feature_rank_concat_df, external_scores_concat_df", "def __init__(self,\n feature_selection=True,\n clinical_path='data/tidy/train_cli.csv',\n proteomic_path='data/tidy/train_pro.csv',\n rna_path='data/tidy/train_rna.csv',\n mismatch_path='data/tidy/sum_tab_1.csv',\n test_proteomic_path='data/raw/test_pro.tsv',\n test_clinical_path='data/raw/test_cli.tsv',\n train_rna_path='data/raw/train_rna.tsv',\n test_rna_path='data/raw/test_rna.tsv',\n mislabel_path='data/tidy/sum_tab_2.csv'):\n self.clinical = pd.read_csv(clinical_path, index_col=0)\n self.proteomic = self.preprocess(\n pd.read_csv(proteomic_path, index_col=0)\n )\n self.rna = self.preprocess(\n pd.read_csv(rna_path, index_col=0)\n )\n self.mismatch = pd.read_csv(mismatch_path, index_col=0)\n self.test_proteomic = self.preprocess(\n pd.read_csv(test_proteomic_path, index_col=0, sep='\\t').T\n )\n self.test_rna = self.preprocess(\n pd.read_csv(test_rna_path, index_col=0, sep='\\t').T\n )\n self.test_clinical = pd.read_csv(test_clinical_path, index_col=0, sep='\\t')\n self.train_rna = pd.read_csv(train_rna_path, index_col=0, sep='\\t').T\n self.train_pro_rna = self.train_rna.merge(self.proteomic, how='outer', left_index=True, right_index=True)\n self.test_pro_rna = self.test_rna.merge(self.test_proteomic, how='outer', left_index=True, right_index=True)\n self.train_all = self.train_pro_rna.merge(self.clinical, how='outer', left_index=True, right_index=True)\n self.train_all = self.train_all.replace(['Female', 'Male','MSI-Low/MSS', 'MSI-High'], [0, 1, 0, 1])\n self.test_all = self.test_pro_rna.merge(self.test_clinical, how='outer', left_index=True, right_index=True)\n self.test_all = self.test_all.replace(['Female', 'Male', 'MSI-Low/MSS', 'MSI-High'], [0, 1, 0, 1])\n self.mislabel = pd.read_csv(mislabel_path, index_col=0)\n\n if feature_selection:\n self.select_features()\n\n # create training labels for if a sample has been mislabeled\n self.mislabel_labels = []\n for i in range(0, len(self.mislabel.index)):\n if self.mislabel.iloc[i, 0] == self.mislabel.iloc[i, 1] and self.mislabel.iloc[i, 1] == self.mislabel.iloc[i, 2]:\n self.mislabel_labels.append(0)\n else:\n self.mislabel_labels.append(1)", "def create_models(self):\r\n self.all_ratings = AllRatingsWithCommon(\r\n experts=self.users,\r\n objects=self.videos,\r\n output_features=self.features,\r\n name=\"prod\",\r\n )\r\n\r\n print_memory(stage=\"DPLF:ratings_nodata_created\")\r\n\r\n # creating models\r\n self.user_to_model = {\r\n user: FeaturelessPreferenceLearningModel(\r\n expert=user, all_ratings=self.all_ratings\r\n )\r\n for user in self.users\r\n }\r\n\r\n print_memory(stage=\"DPLF:models_created\")\r\n\r\n # before creating the aggregator, filling models with data\r\n self.user_to_size = {\r\n user: self.fill_model_data(self.user_to_model[user], user)\r\n for user in tqdmem(self.users, desc=\"fill_data\")\r\n }\r\n\r\n # virtual 'common' data\r\n fplm_common = FeaturelessPreferenceLearningModel(\r\n expert=AllRatingsWithCommon.COMMON_EXPERT, all_ratings=self.all_ratings\r\n )\r\n fplm_common.on_dataset_end()\r\n\r\n print_memory(stage=\"DPLF:data_filled\")\r\n\r\n # resetting the model given the data\r\n self.all_ratings.reset_model()\r\n\r\n print_memory(stage=\"DPLF:model_reset_ok\")\r\n\r\n # aggregating models\r\n self.aggregator = FeaturelessMedianPreferenceAverageRegularizationAggregator(\r\n models=[self.user_to_model[u] for u in self.users]\r\n )\r\n self.aggregator.certification_status = self.user_certified\r\n\r\n print_memory(stage=\"DPLF:aggregator_created\")", "def save(self):\r\n\r\n for video_name, video_data in self.data.items():\r\n save_path = os.path.join(\r\n self.features_dir, video_name + \".\" + self.file_type\r\n )\r\n write_df(\r\n df=video_data.fillna(0), file_type=self.file_type, save_path=save_path\r\n )\r\n print(\"Created additional ROI features for {}...\".format(video_name))\r\n self.timer.stop_timer()\r\n stdout_success(\r\n msg=\"Created additional ROI features for files within the project_folder/csv/features_extracted directory\",\r\n elapsed_time=self.timer.elapsed_time_str,\r\n )", "def main(input, output):\n # turn csv to dataframe\n df = pd.read_csv(f\"./data/{input}\", index_col=0)\n\n # split training and test\n X = df.drop(columns=['DEFAULT_NEXT_MONTH'])\n y = df['DEFAULT_NEXT_MONTH']\n X_train, X_test, y_train, y_test = train_test_split(\n X, y, test_size=0.25, random_state=122)\n\n # We use robust scalar as most of our data is not normally distributed and we have a high amount of outliers.\n columns = ['LIMIT_BAL', 'SEX', 'EDUCATION', 'MARRIAGE', 'AGE', 'PAY_1', 'PAY_2',\n 'PAY_3', 'PAY_4', 'PAY_5', 'PAY_6', 'BILL_AMT1', 'BILL_AMT2',\n 'BILL_AMT3', 'BILL_AMT4', 'BILL_AMT5', 'BILL_AMT6', 'PAY_AMT1',\n 'PAY_AMT2', 'PAY_AMT3', 'PAY_AMT4', 'PAY_AMT5', 'PAY_AMT6']\n scaler = preprocessing.RobustScaler()\n X_train = pd.DataFrame(scaler.fit_transform(X_train), columns=columns)\n X_test = pd.DataFrame(scaler.transform(X_test), columns=columns)\n\n # Use RFE to identify the most identify the most useful predictors.\n # Then we will drop those columns that are deemed as less useful.\n logreg = LogisticRegression(solver=\"lbfgs\")\n\n get_report(X_train, y_train, X_test, y_test, logreg, 'results_baseline')\n rfe = RFE(logreg, 7)\n rfe = rfe.fit(X_train, y_train.values.ravel())\n columns_to_drop = list()\n for i in range(len(rfe.support_)):\n include = rfe.support_[i]\n if include == False:\n columns_to_drop.append(columns[i])\n X_train = X_train.drop(columns_to_drop, axis=1)\n X_test = X_test.drop(columns_to_drop, axis=1)\n get_report(X_train, y_train, X_test, y_test, logreg, output)\n\n # write compounded dataframe with results of two get_report calls\n accuracies_output_df.to_csv(f'./{output}/accuracies.csv', index=False)", "def build_cc_data(iterations, original_scores):\n cc_dataset = pd.DataFrame(iterations)\n for k in cc_dataset.loc[:, \"precision\":].columns:\n cc_dataset[k] -= original_scores[k]\n\n melt_vars = list(set(iterations[0].keys()) - set(original_scores.keys()))\n cc_dataset = cc_dataset.melt(id_vars=melt_vars,\n var_name=\"metric\",\n value_name=\"value\")\n\n cc_dataset.metric = cc_dataset.metric.str.replace(\n \"hamming_loss\", \"Hamming Loss\")\n cc_dataset.metric = cc_dataset.metric.str.replace(\n \"precision_Backend\", \"Backend (Precision)\")\n cc_dataset.metric = cc_dataset.metric.str.replace(\n \"recall_Backend\", \"Backend (Recall)\")\n cc_dataset.metric = cc_dataset.metric.str.replace(\n \"f1_Backend\", \"Backend (F1)\")\n cc_dataset.metric = cc_dataset.metric.str.replace(\n \"precision_Frontend\", \"Frontend (Precision)\")\n cc_dataset.metric = cc_dataset.metric.str.replace(\n \"recall_Frontend\", \"Frontend (Recall)\")\n cc_dataset.metric = cc_dataset.metric.str.replace(\n \"f1_Frontend\", \"Frontend (F1)\")\n cc_dataset.metric = cc_dataset.metric.str.replace(\n \"precision_Mobile\", \"Mobile (Precision)\")\n cc_dataset.metric = cc_dataset.metric.str.replace(\n \"recall_Mobile\", \"Mobile (Recall)\")\n cc_dataset.metric = cc_dataset.metric.str.replace(\n \"f1_Mobile\", \"Mobile (F1)\")\n cc_dataset.metric = cc_dataset.metric.str.replace(\n \"precision_DevOps\", \"DevOps (Precision)\")\n cc_dataset.metric = cc_dataset.metric.str.replace(\n \"recall_DevOps\", \"DevOps (Recall)\")\n cc_dataset.metric = cc_dataset.metric.str.replace(\n \"f1_DevOps\", \"DevOps (F1)\")\n cc_dataset.metric = cc_dataset.metric.str.replace(\n \"precision_DataScientist\", \"DataScientist (Precision)\")\n cc_dataset.metric = cc_dataset.metric.str.replace(\n \"recall_DataScientist\", \"DataScientist (Recall)\")\n cc_dataset.metric = cc_dataset.metric.str.replace(\n \"f1_DataScientist\", \"DataScientist (F1)\")\n cc_dataset.metric = cc_dataset.metric.str.replace(\"precision\", \"Precision\")\n cc_dataset.metric = cc_dataset.metric.str.replace(\"recall\", \"Recall\")\n cc_dataset.metric = cc_dataset.metric.str.replace(\"f1\", \"F1\")\n cc_dataset.metric = cc_dataset.metric.str.replace(\"auc\", \"AUC\")\n cc_dataset.metric = cc_dataset.metric.str.replace(\"jaccard\", \"Jaccard\")\n\n cc_dataset.metric = cc_dataset.metric.astype(\n CategoricalDtype(cc_dataset.metric.unique(), ordered=True))\n\n return cc_dataset", "def produce_all_term_data(self):\n # remove cold start records if requested\n test = self.test.copy()\n test = self.handle_cold_start(test)\n\n outputs = self.output()\n trainf, testf = outputs['train'], outputs['test']\n with trainf.open('w') as ftrain, testf.open('w') as ftest:\n self.write_libfm_data(ftrain, ftest, self.train, test)\n\n # Write the term-to-id guide\n test = test.sort(('termnum'))\n test['rownum'] = np.arange(len(test))\n guide = test.groupby('termnum').max()['rownum']\n with self.output()['guide'].open('w') as f:\n guide.to_csv(f, index_label='termnum', header=True)", "def build_enru_custom(self):\n train_data_file = self.data_dir + '/' + enru_paracrawl\n eval_data_file = self.data_dir + '/' + enru_newscomm\n train_data = tf.data.experimental.CsvDataset(\n [train_data_file],\n record_defaults=[tf.string, tf.string],\n compression_type='GZIP',\n field_delim='\\t',\n use_quote_delim=False)\n train_data = train_data.cache() # only read once\n eval_data = tf.data.experimental.CsvDataset(\n [eval_data_file],\n record_defaults=[tf.string, tf.string],\n compression_type='GZIP',\n field_delim='\\t',\n use_quote_delim=False)\n\n eval_data = eval_data.take(3000)\n eval_data = eval_data.cache()\n def to_features_dict(eng, rus):\n return {'inputs': eng, 'targets': rus}\n\n train_data = train_data.map(to_features_dict)\n eval_data = eval_data.map(to_features_dict)\n\n self.default_builder_obj = None\n\n return train_data, eval_data", "def main():\n\n preprocessed_file = preprocess_clinical_trials()\n\n preprocessed_file.to_csv(PREPROCESSED_CLINICAL_TRIALS_FILE_PATH, index=False)", "def pipeline_rfe():\n\n\n\n #cols = [c for c in bank_df if bank_df[c].dtype == 'int64' or 'float64']\n #X_train = bank_df[cols].drop(columns = ['primary_merchant_name'], axis = 1)\n #y_train = bank_df['primary_merchant_name']\n #X_test = bank_df[cols].drop(columns = ['primary_merchant_name'], axis = 1)\n #y_test = bank_df['primary_merchant_name']\n\n #build a logistic regression and use recursive feature elimination to exclude trivial features\n log_reg = LogisticRegression(C = 1.0, max_iter = 2000)\n # create the RFE model and select most striking attributes\n rfe = RFE(estimator = log_reg, n_features_to_select = 8, step = 1)\n rfe = rfe.fit(X_train, y_train)\n #selected attributes\n print('Selected features: %s' % list(X_train.columns[rfe.support_]))\n print(rfe.ranking_)\n #following df contains only significant features\n X_train_rfe = X_train[X_train.columns[rfe.support_]]\n X_test_rfe = X_test[X_test.columns[rfe.support_]]\n #log_reg_param = rfe.set_params(C = 0.01, max_iter = 200, tol = 0.001)\n return X_train_rfe, X_test_rfe", "def gen_main_df(add_list: list):\r\n # 由Bert 计算得来的 sentiment信息\r\n if 'sentiment' not in data_manipulator.used_measure_list:\r\n data_manipulator.used_measure_list.append('sentiment')\r\n sentiment = data_manipulator.read_in_file(data_manipulator.get_file_names(root, 'datasets', 'daily_svm_sentiment_6class' , 'csv')[0],\r\n 'date', ['0'], 'sentiment') # 'daily_svm_sentiment_2class' '0', '1', '2', '3', '4', '5'\r\n data_manipulator.add_column(sentiment)\r\n # 中国CPI指数\r\n if 'cpi' in add_list and 'cpi' not in data_manipulator.used_measure_list:\r\n data_manipulator.used_measure_list.append('cpi')\r\n cpi = data_manipulator.read_in_file(data_manipulator.get_file_names(root, 'datasets', 'CPI', 'csv')[0],\r\n '日期', ['最新值', '涨跌幅', '近3月涨跌幅'], 'CPI')\r\n data_manipulator.add_column(cpi)\r\n # 上海银行间同业拆放利率\r\n if 'shibor' in add_list and 'shibor' not in data_manipulator.used_measure_list:\r\n data_manipulator.used_measure_list.append('shibor')\r\n shibor = data_manipulator.read_in_file(data_manipulator.get_file_names(root, 'datasets', 'shibor', 'csv')[0],\r\n 'date', ['on', '1w', '2w', '1m', '3m'], 'Shibor')\r\n data_manipulator.add_column(shibor)\r\n # 上证综指\r\n if 'shangzheng' in add_list and 'shangzheng' not in data_manipulator.used_measure_list:\r\n data_manipulator.used_measure_list.append('shangzheng')\r\n shangzheng = data_manipulator.read_in_file(\r\n data_manipulator.get_file_names(root, 'datasets', 'ShangZheng', 'csv')[0],\r\n 'trade_date', ['open', 'high', 'low', 'close', 'pct_chg', 'vol', 'amount',\r\n 'total_mv', 'float_mv', 'total_share', 'float_share',\r\n 'free_share', 'turnover_rate', 'turnover_rate_f', 'pe',\r\n 'pe_ttm', 'pb'],\r\n 'ShangZheng')\r\n data_manipulator.add_column(shangzheng)\r\n data_manipulator.shift_columns(['ShangZheng_pct_chg'], (-1,),\r\n add=True) # name has changed to shift-1_ShangZheng_pct_chg\r\n data_manipulator.rank_df_column(['shift-1_ShangZheng_pct_chg'],\r\n rank_list=[-10, -1, -0.5, 0, 0.5, 1, 10]) # rank_list=[-10, 0, 10] [-10, -1, -0.5, 0, 0.5, 1, 10]\r\n shangzheng_30min = data_manipulator.read_in_file(\r\n data_manipulator.get_file_names(root, 'datasets', 'ShangZheng_index_30min', 'csv')[0],\r\n 'trade_time', ['open', 'high', 'low', 'close', 'pct_chg', 'vol', 'amount'],\r\n 'ShangZheng_30min')\r\n data_manipulator.news_df_add_column(shangzheng_30min)\r\n data_manipulator.shift_minute_columns(['ShangZheng_30min_pct_chg'], (-1,),\r\n add=True)\r\n data_manipulator.rank_minute_df_columns(['shift-1_ShangZheng_30min_pct_chg'],\r\n rank_list=[-10, -1, -0.5, 0, 0.5, 1, 10]) # rank_list=[-10, 0, 10] [-10, -1, -0.5, 0, 0.5, 1, 10]\r\n\r\n # M2 广义货币量\r\n if 'm2' in add_list and 'm2' not in data_manipulator.used_measure_list:\r\n data_manipulator.used_measure_list.append('m2')\r\n m2 = data_manipulator.read_in_file(data_manipulator.get_file_names(root, 'datasets', 'M2', 'csv')[0],\r\n '月份', ['M2数量(亿元)', 'M2同比增长', 'M2环比增长'], 'M2')\r\n m2 = data_manipulator.complement_df(m2, 'date')\r\n data_manipulator.add_column(m2)\r\n\r\n # 人民币美元汇率\r\n if 'rmb_usd' in add_list and 'rmb_usd' not in data_manipulator.used_measure_list:\r\n data_manipulator.used_measure_list.append('rmb_usd')\r\n rmb_usd = data_manipulator.read_in_file(data_manipulator.get_file_names(root, 'datasets', 'RMB_USD', 'csv')[0],\r\n 'trade_date',\r\n ['bid_open', 'bid_close', 'bid_high', 'bid_low', 'ask_open',\r\n 'ask_close', 'ask_high', 'ask_low', 'tick_qty'], 'exchange')\r\n data_manipulator.add_column(rmb_usd)\r\n\r\n # 沪港通 沪深通 到岸 离岸资金流\r\n if 'fund_flow' in add_list and 'fund_flow' not in data_manipulator.used_measure_list:\r\n data_manipulator.used_measure_list.append('fund_flow')\r\n fund_flow = data_manipulator.read_in_file(\r\n data_manipulator.get_file_names(root, 'datasets', 'fund_flow', 'csv')[0],\r\n 'trade_date', ['north_money', 'south_money'], 'fund_flow')\r\n data_manipulator.add_column(fund_flow)\r\n\r\n # 债券回购日行情\r\n if 'repo' in add_list and 'repo' not in data_manipulator.used_measure_list:\r\n data_manipulator.used_measure_list.append('repo')\r\n repo = data_manipulator.read_in_file(data_manipulator.get_file_names(root, 'datasets', 'repo', 'csv')[0],\r\n 'trade_date', ['repo_maturity', 'open', 'high', 'low', 'close',\r\n 'amount'], 'repo', data_manipulator.cut_time_string,\r\n (0, 10,))\r\n repo = data_manipulator.select_col_group_by(repo, 'repo_repo_maturity', ['GC001', 'GC007', 'GC014', 'GC028'],\r\n 'date')\r\n data_manipulator.add_column(repo)\r\n\r\n # 新浪新闻\r\n if 'sina_news' in add_list and 'sina_news' not in data_manipulator.used_measure_list:\r\n data_manipulator.used_measure_list.append('sina_news')\r\n columns_type = {'create_time': str, 'text': str}\r\n sina_news = data_manipulator.read_in_file(data_manipulator.get_file_names(root, 'datasets', 'sina', 'csv')[0],\r\n 'create_time', ['text', ], 'sina', dtypes=columns_type)\r\n data_manipulator.add_change_news('sina', (7, 9), columns_type, sina_news, time_col_name='create_time')\r\n data_manipulator.add_minute_change_news('sina', columns_type, sina_news, time_col_name='create_time')\r\n if 'scale' in add_list:\r\n data_manipulator.scaling_col()\r\n if 'clear' in add_list:\r\n data_manipulator.clear()", "def classification_report(self):\n print('Classification Report ...')\n cr = classification_report(self.y_test, self.y_pred, output_dict=True)\n df = pd.DataFrame(cr)\n df.to_csv('csv/cr/' + self.model_name + '_' + self.label + '_cr.csv')\n print(cr)", "def run_job(self):\n \n \n utilities.print_checkpoint(self.to_string() + \"\\n\")\n utilities.print_checkpoint(\"Assign Barcode to Features\")\n utilities.print_checkpoint(\"Start\")\n \n # generate MERFISH dataset object\n dataSet = dataset.MERFISHDataSet(\n self.dataSetName)\n \n # change to work directory\n os.chdir(dataSet.analysisPath)\n \n # create output folder\n os.makedirs(os.path.dirname(self.outputName),\n exist_ok=True)\n\n # read features\n features = geo.read_file(\n self.exportedFeaturesName)\n \n # expand or shrink the feature\n features.geometry = \\\n features.geometry.buffer(self.bufferSize)\n \n # assign barcode to feature for current FOV\n barcodes = assign_barcodes_per_fov(\n self.exportedBarcodesName, features, self.fov)\n \n # get rid of unassigned barcodes\n barcodes = barcodes[\n barcodes.feature_name != \"NA\"]\n \n # export assigned barcodes\n barcodes.to_hdf(\n self.outputName,\n key = \"fov_%d\" % self.fov)\n\n utilities.print_checkpoint(\"Done\")", "def write_data(self, file_path, success_cutoff):\n agg_df = pd.DataFrame(columns=tf.Move)\n for game in self.game_list:\n agg_df = agg_df.add(game, fill_value = 0)\n agg_df.to_csv(file_path)\n pass", "def create_train_feats():\n features = read_process_labelled(AUDIO_DIR, debug=True)\n df = pd.DataFrame(features)\n p = './Features/dataset_features/data_features.csv'\n df.to_csv(p, index=False)\n return p", "def train_loop(train_per_list, cut_off_list, C_list,\n factors, non_factors, data_path, executable_path, \n trial_factors_list=None): \n if trial_factors_list is None:\n trial_factors_list=[factors]\n sql_table = 'aggregated_ctr' #Data table\n # remove cross terms\n sql_features = list(set(sum([fs.split('*') for fs in factors], [])))\n# factors+=['campaign_id','ad_account_id','pub_account_id', \n# 'campaign_id*site', 'ad*pub_account_id']\n con_dict_dse={'host':'db.lqm.io','db':'dse',\n 'user':'dse','passwd':'dSe@lQm'}\n con_dict_mad={'host':'db.lqm.io','db':'madvertise_production',\n 'user':'readonly','passwd':'z0q909TVZj'}\n \n rtb_flag=[0,1]\n model_type=0\n has_intercept = True # bias term in LR\n tol = 0.00000001\n # NB these filenames are HARDCODED in write_sparse routines\n weights_file = 'train_ais.txt'\n train_file = 'train_svm.txt'\n test_file = 'test_svm.txt'\n probability_file = 'preds_SummModel_py.txt'\n results = []\n for train_per in train_per_list:\n test_per = ( add_hour(train_per[1], 1), add_hour(train_per[1], 3))\n # DATA RANGE IS INCLUSIVE => 00:00-02:00 = 3 HOURS\n train_df=mysql_lqm.MySQL_getdata(con_dict_dse,\n sql_table, train_per, sql_features, rtb_flag)\n train_df=mysql_lqm.add_features( train_df)\n test_df= mysql_lqm.MySQL_getdata(con_dict_dse,\n sql_table, test_per, sql_features, rtb_flag)\n test_df = mysql_lqm.add_features(test_df)\n \n sc, click_no_click_df, weights, targets \\\n = libLinear_functions.create_sparse_cat(train_df, factors, non_factors)\n\n \n for cut_off in cut_off_list:\n sparse_train_all = libLinear_functions.create_sparse(sc, cut_off, click_no_click_df)\n sparse_test_all = sc.transform(test_df)\n for trial_factors in trial_factors_list:\n trial_factors=trial_factors[:] # copy\n trial_factors.sort(key=lambda x: sc.factors.index(x))\n # libsvm expects the indices in ascending order\n print (trial_factors) \n sparse_train=sc.select_factors(sparse_train_all, trial_factors)\n sparse_test=sc.select_factors(sparse_test_all, trial_factors)\n libLinear_functions.write_sparse(sc, sparse_train, weights, targets, data_path, len(trial_factors))\n libLinear_functions.write_sparse_test(sc, sparse_test, data_path, n_columns_used= len(trial_factors))\n\n\n for C in C_list:\n model_file = \\\n '{start}_{stop}_cut_{cut_off}_C_{C:0.3}.model'.format(\n start=date_name(train_per[0]),\n stop=date_name(train_per[1]),\n cut_off=cut_off, C=C)\n fit(executable_path, data_path, train_file,\n model_file, weights_file, model_type, reg_param=C, tol=tol,\n has_intercept=has_intercept)\n \n \n pCTR = libLinear_functions.predict(executable_path, data_path, test_file,\n model_file, probability_file)\n if type(pCTR) is pd.Series:\n amounts = pd.DataFrame({\n 'no_clicks':test_df['instances' ]-test_df['clicks'],\n 'clicks':test_df['clicks']})\n mean_log_loss, weighted_log_loss = log_loss_weighted(pCTR, amounts)\n results.append([train_per[:],trial_factors[:],\n cut_off,C,amounts.clicks.sum(),amounts.no_clicks.sum(), mean_log_loss])\n results_df=pd.DataFrame(results,columns=['date','features','cutoff','C','clicks','no_clicks','lloss'])\n results_df.to_csv(data_path+'resultsX.txt',index=False, sep='|')\n # what to do if ERROR?\n return results_df, weighted_log_loss", "def _generate_features(self, feature_extractors):\n results = [pd.DataFrame()]\n n_ext = len(feature_extractors)\n\n for i, extractor in enumerate(feature_extractors):\n log.info(\"generating: '%s' (%d/%d)\", extractor.name, i + 1, n_ext)\n cached_extractor = self._cache[extractor.name]\n if extractor.same(cached_extractor):\n log.info('pulling from cache')\n extractor = cached_extractor\n else:\n log.info('running...')\n extractor.extract()\n results.append(extractor.result)\n if self.cache_path:\n self._cache[extractor.name] = extractor\n\n if self.cache_path:\n with open(self.cache_path, 'wb') as f:\n pickle.dump(self._cache, f)\n\n return pd.concat(results, axis=1)", "def make_features(self, x_hits, y_hits, dow, lagged_hits, pf_age, pf_si, pf_network, pf_gender, page_ix, pf_price_cat,\n page_popularity, quarter_autocorr):\n # Split day of week to train and test\n x_dow, y_dow = tf.split(dow, [self.train_window, self.predict_window], axis=0)\n\n # Normalize hits\n mean = tf.reduce_mean(x_hits)\n std = tf.sqrt(tf.reduce_mean(tf.squared_difference(x_hits, mean)))\n norm_x_hits = (x_hits - mean) / std\n norm_y_hits = (y_hits - mean) / std\n norm_lagged_hits = (lagged_hits - mean) / std\n\n # Split lagged hits to train and test\n x_lagged, y_lagged = tf.split(norm_lagged_hits, [self.train_window, self.predict_window], axis=0)\n\n # Combine all page features into single tensor\n stacked_features = tf.stack([page_popularity, quarter_autocorr])\n flat_ucdoc_features = tf.concat([pf_age, pf_si, pf_network, pf_gender, pf_price_cat, stacked_features], axis=0) #pf_region\n ucdoc_features = tf.expand_dims(flat_ucdoc_features, 0)\n\n # Train features\n x_features = tf.concat([\n # [n_days] -> [n_days, 1]\n tf.expand_dims(norm_x_hits, -1),\n x_dow,\n x_lagged,\n # Stretch ucdoc_features to all training days\n # [1, features] -> [n_days, features]\n tf.tile(ucdoc_features, [self.train_window, 1])\n ], axis=1)\n\n # Test features\n y_features = tf.concat([\n # [n_days] -> [n_days, 1]\n y_dow,\n y_lagged,\n # Stretch ucdoc_features to all testing days\n # [1, features] -> [n_days, features]\n tf.tile(ucdoc_features, [self.predict_window, 1])\n ], axis=1)\n\n return x_hits, x_features, norm_x_hits, x_lagged, y_hits, y_features, norm_y_hits, mean, std, flat_ucdoc_features, page_ix", "def make_dataframe(self):\n logging.info('*** Creating the dataframes from the source files ' )\n \n for k in self.datasets_keys:\n #for k in ['igra2' , 'ncar']:\n \n logging.info('*** Creating the dataframe for the dataset: %s ' , k ) \n \n p_levels = self.data[k]['df']['observations_table']['z_coordinate'][:]\n logging.debug(' Loaded the z_coordinate')\n \n z_type = self.data[k]['df']['observations_table']['z_coordinate_type'][:]\n logging.debug(' Loaded the z_coordinate_type')\n \n obs_variable = self.data[k]['df']['observations_table']['observed_variable'][:]\n logging.debug(' Loaded the observed_variable')\n \n obs_values = self.data[k]['df']['observations_table']['observation_value'][:]\n logging.debug(' Loaded the observation_value')\n \n observation_id = self.data[k]['df']['observations_table']['observation_id'][:]\n logging.debug(' Loaded the observation_id')\n \n units = self.data[k]['df']['observations_table']['units'][:].astype(int)\n logging.debug(' Loaded the units') \n \n report_id = self.data[k]['df']['observations_table']['report_id'][:] \n logging.debug(' Loaded the report_id')\n \n date_time = self.data[k]['df']['observations_table']['date_time'][:]\n logging.debug(' Loaded the date_time (deltas)')\n \n lat , lon = self.data[k]['df']['observations_table']['latitude'][:] , self.data[k]['df']['observations_table']['longitude'][:]\n logging.debug(' Loaded the lat,lon ')\n \n \n self.obs_table_columns = list(self.data[k]['df']['observations_table'].keys() )\n \n self.data[k]['df'].close()\n \n \"\"\" Creating a dataframe \"\"\"\n columns = ['date_time', 'z_coordinate' , 'z_coordinate_type', 'observed_variable' , 'observation_value' , 'report_id' , 'observation_id' , 'latitude' , 'longitude', 'units']\n logging.info(' Loaded the data, creating dataframe ')\n \n df = pd.DataFrame( list(zip( date_time, p_levels, z_type, obs_variable , obs_values, report_id, observation_id , lat , lon, units ) ) , columns = columns ) \n \n \n \"\"\" Storing the dataframe \"\"\" ### try using xarrays ??? \n logging.debug('Storing the DF ' ) \n self.data[k]['dataframe'] = df\n \n logging.debug(' PD dataframe created !!! ')", "def make_data(input_filepath, output_filepath):\n\n df_train = pd.read_csv(input_filepath+'train_u6lujuX_CVtuZ9i.csv', index_col=0)\n df_test = pd.read_csv(input_filepath+'test_Y3wMUE5_7gLdaTN.csv', index_col=0)\n print('Sizes', df_train.shape, df_test.shape)\n print(\"Outcome dispersion:\\n\", df_train['Loan_Status'].value_counts())\n\n\n # recode and save outcome vector\n y = df_train['Loan_Status'].map({'N': 0, 'Y': 1})\n\n del df_train['Loan_Status']\n\n # all in one dataframe\n df = pd.concat([df_train, df_test])\n print(df.shape)\n\n from src.features.build_features import make_features\n df = make_features(df)\n\n # Divide data on train and test again and save\n data_train = df[df.index.isin(df_train.index)]\n data_test = df[df.index.isin(df_test.index)]\n print(data_train.shape, data_test.shape)\n\n data_tmp = data_train.copy()\n data_tmp['y'] = y\n\n\n data_tmp.to_csv(output_filepath + 'train_ready.csv', index=False)\n data_test.to_csv(output_filepath + 'test_ready.csv', index=False)\n id_test = pd.DataFrame(data=df_test.index, columns=['Loan_ID'])\n id_test.to_csv(output_filepath + 'id_test.csv', index=False)", "def Classify_Data(self):\n\n lem = lemmatization()\n\n # Get Mongo Client\n client = MongoClient()\n db = client['allMovies']\n collection = db['Movies']\n\n # Path to folder containing the training model files\n path = self.path\n\n # Get the list of doc ids trained\n trained_docs = []\n\n # Mongo queries to retrieve Horror, Romance and Crime movies\n qr1 = self.collection.find({\"content.genres.name\": \"Horror\"})\n qr2 = self.collection.find({\"content.genres.name\": \"Romance\"})\n qr3 = self.collection.find({\"content.genres.name\": \"Crime\"})\n qr4 = self.collection.find({\"content.genres.name\": \"Comedy\"})\n print(\"111\")\n print(qr3)\n\n myfile = open('doc_ids.pkl', 'rb')\n trained_docs = pickle.load(myfile)\n # Get 100 Horror, Romance and Crime movies each, which are not in the trained data set\n\n horr = []\n i = 0\n for rec in qr1:\n if rec['_id'] not in trained_docs:\n i = i + 1\n horr.append(rec)\n\n if i >= 333:\n break\n rom = []\n i = 0\n for rec in qr2:\n if rec['_id'] not in trained_docs:\n i = i + 1\n rom.append(rec)\n\n if i >= 333:\n break\n\n crime = []\n i = 0\n for rec in qr3:\n if rec['_id'] not in trained_docs:\n i = i + 1\n crime.append(rec)\n\n if i >= 334:\n break\n comedy = []\n i = 0\n for rec in qr4:\n if rec['_id'] not in trained_docs:\n i = i + 1\n comedy.append(rec)\n\n if i >= 334:\n break\n\n # Combine the query results\n query_results = []\n for rec in horr:\n query_results.append(rec)\n for rec in rom:\n query_results.append(rec)\n for rec in crime:\n query_results.append(rec)\n print(query_results)\n # Data to be classified\n test_data = []\n\n # Genres of records to be classified\n categories = []\n a = 0\n for movie in query_results:\n test_data.append(movie['content']['overview'])\n for genre in movie['content']['genres']:\n a = a + 1\n if ((genre['name'] == 'Horror') or (genre['name'] == 'Romance') or (genre['name'] == 'Crime') or (\n genre['name'] == 'Comedy') and a <= 80):\n categories.append(genre['name'])\n\n # Lists of training models and vectorizers\n models = [\"SVM\", \"LOGISTIC REGRESSION\", \"GAUSSIAN NB\",\n \"MULTINOMIAL NB\", \"BERNOULLI NB\", \"RANDOM FOREST\", \"BAGGING\", \"GRADIENT\",\n \"Voting\", \"Voting With Weights\"]\n\n vectorizers = [\"COUNT VECTORIZER\", \"TFIDF VECTORIZER\"]\n\n # Load dictionary containing terms appearing in genres\n dictionary = joblib.load(path + \"_Genre_Dictionary\")\n\n vec_1 = feature_extraction.text.CountVectorizer(vocabulary=dictionary)\n vec_2 = feature_extraction.text.TfidfVectorizer(vocabulary=dictionary)\n vec_list = [vec_1, vec_2]\n\n # List to store the classification stats for each model\n stats = []\n # Generate results\n for i in range(0, len(models)):\n for j in range(0, len(vectorizers)):\n time0 = time.process_time()\n model = joblib.load(path + models[i] + \"_\" + vectorizers[j].replace('-', '') + \".pkl\")\n vec = vec_list[j]\n Y = vec.fit_transform(test_data).toarray()\n print(\"y\", Y)\n predicted_genres = model.predict(Y)\n\n k = 0\n horror = 0\n romance = 0\n crime = 0\n\n # Keeps track of correct predictions\n y_correct = []\n\n # Keeps track of incorrect predictions\n y_predicted = []\n for pred in predicted_genres:\n if (categories[k] == \"Horror\"):\n if (pred == \"Horror\"):\n horror += 1\n y_predicted.append(0)\n elif (pred == \"Romance\"):\n y_predicted.append(1)\n else:\n y_predicted.append(2)\n y_correct.append(0)\n elif (categories[k] == \"Romance\"):\n if (pred == \"Romance\"):\n romance += 1\n y_predicted.append(1)\n elif (pred == \"Horror\"):\n y_predicted.append(0)\n else:\n y_predicted.append(2)\n y_correct.append(1)\n elif (categories[k] == \"Crime\"):\n if (pred == \"Crime\"):\n crime += 1\n y_predicted.append(2)\n elif (pred == \"Horror\"):\n y_predicted.append(0)\n else:\n y_predicted.append(1)\n y_correct.append(2)\n k = k + 1\n\n # Print results\n score = precision_recall_fscore_support(y_correct, y_predicted, average='weighted')\n # print(\"Number of records classified per second = %d\" % (round((1000/(time.process_time()-time0)),3)))\n print(\"________SCORES__________\")\n print(\"MODEL : \" + models[i])\n print(\"VECTORIZER : \" + vectorizers[j])\n print(\"Horror : %d/333\" % (horror))\n print(\"Romance : %d/333\" % (romance))\n print(\"Crime : %d/334\" % (crime))\n print(\"Precision : %.5f\" % (score[0]))\n print(\"Recall : %.5f\" % (score[1]))\n print(\"F(1) Score : %.5f\" % ((score[1] * score[0] / (score[1] + score[0])) * 2))\n print(\"F(W) Score : %.5f\" % (score[2]))\n print(\"Accuracy : %.5f\" % accuracy_score(y_correct, y_predicted))\n # print(confusion_matrix(y_correct, y_predicted))\n\n dic = {}\n dic['model'] = models[i].title()\n dic['vectorizer'] = vectorizers[j][:-11]\n dic['horror'] = str(horror) + '/' + '333'\n dic['romance'] = str(romance) + '/' + '333'\n dic['crime'] = str(crime) + '/' + '334'\n dic['precision'] = round(score[0], 3)\n dic['Recall'] = round(score[1], 3)\n dic['F(1) Score'] = round(((score[1] * score[0] / (score[1] + score[0])) * 2), 3)\n dic['F(W) Score'] = round(score[2], 3)\n dic['accuracy'] = round(accuracy_score(y_correct, y_predicted), 3)\n stats.append(dic)\n # Store stats in file\n joblib.dump(stats, path + \"classification_results.txt\")\n\n print(\"Done\")\n return stats", "def set_features(self):\n \n # Interpret the request data based on the expected row and column structure\n row_template = ['strData', 'strData', 'strData', 'strData', 'strData', 'strData']\n col_headers = ['model_name', 'name', 'variable_type', 'data_type', 'feature_strategy', 'strategy_args']\n \n # Create a Pandas Data Frame for the request data\n self.request_df = utils.request_df(self.request, row_template, col_headers)\n \n # Initialize the persistent model\n self.model = PersistentModel()\n \n # Get the model name from the request dataframe\n self.model.name = self.request_df.loc[0, 'model_name']\n \n # Get the model from cache or disk\n self._get_model()\n \n # Debug information is printed to the terminal and logs if the paramater debug = true\n if self.model.debug:\n self._print_log(3)\n \n # Add the feature definitions to the model\n self.model.features_df = self.request_df\n self.model.features_df.set_index(\"name\", drop=False, inplace=True)\n # Store a copy of the features_df that will remain untouched in later calls\n self.model.original_features_df = self.model.features_df.copy()\n\n # Ensure there is at most one feature with variable_type identifier\n if len(self.model.features_df.loc[self.model.features_df[\"variable_type\"] == \"identifier\"]) > 1:\n err = \"Invalid feature definitions. Detected more than one feature with variable_type set to identifier. You can only pass one unique identifier.\"\n raise Exception(err)\n\n # Persist the model to disk\n self.model = self.model.save(self.model.name, self.path, overwrite=self.model.overwrite, compress=self.model.compress)\n \n # Update the cache to keep this model in memory\n self._update_cache()\n \n # Prepare the output\n message = [[self.model.name, 'Feature definitions successfully saved to model',\\\n time.strftime('%X %x %Z', time.localtime(self.model.state_timestamp))]]\n self.response = pd.DataFrame(message, columns=['model_name', 'result', 'time_stamp'])\n \n # Send the reponse table description to Qlik\n self._send_table_description(\"setup\")\n \n # Debug information is printed to the terminal and logs if the paramater debug = true\n if self.model.debug:\n self._print_log(4)\n \n # Finally send the response\n return self.response", "def create_model_csv(self):\n\n self.model_df.to_csv(self.model_output_file)", "def set_training_data(self):\n # Optional training data period\n # TODO: add training data period feature to training data query\n if not self.training_period == None:\n training_period_date = (datetime.datetime.utcnow() - timedelta(minutes=self.training_period)).strftime(\"%Y-%m-%d\")\n print(f\"Training data start date: {training_period_date}\")\n # Extract queried data from Athena\n #athena = athena_connect.Athena()\n #features_df = athena.pandas_read_athena(self.training_data_sql)\n with open('feature_sql.txt', 'w') as f:\n print(self.training_data_sql, file=f) \n features_df = pd.read_sql(self.training_data_sql, self.logic_db_engine())\n features_df.fillna(0, inplace=True)\n print(features_df.shape)\n features_df = features_df[max(self.feature_minutes_list):]\n print(features_df.shape)\n # Remove infinity string\n features_df.replace({'Infinity': 0}, inplace=True)\n # Convert all object fields to numeric except date fields\n object_col_list = features_df.columns[features_df.dtypes.eq('object')]\n object_col_list = [col for col in object_col_list if 'trade_date' not in col]\n features_df[object_col_list] = features_df[object_col_list].apply(pd.to_numeric, errors='coerce')\n self.training_df = features_df", "def _extract_features(self, ti, tf):\n makedir(self.featdir)\n\n # number of windows in feature request\n Nw = int(np.floor(((tf-ti)/self.dt)/(self.iw-self.io)))\n\n # features to compute\n cfp = ComprehensiveFCParameters()\n if self.compute_only_features:\n cfp = dict([(k, cfp[k]) for k in cfp.keys() if k in self.compute_only_features])\n else:\n # drop features if relevant\n _ = [cfp.pop(df) for df in self.drop_features if df in list(cfp.keys())]\n\n # check if feature matrix already exists and what it contains\n if os.path.isfile(self.featfile):\n t = pd.to_datetime(pd.read_csv(self.featfile, index_col=0, parse_dates=['time'], usecols=['time'], infer_datetime_format=True).index.values)\n ti0,tf0 = t[0],t[-1]\n Nw0 = len(t)\n hds = pd.read_csv(self.featfile, index_col=0, nrows=1)\n hds = list(set([hd.split('__')[1] for hd in hds]))\n\n # option 1, expand rows\n pad_left = int((ti0-ti)/self.dto)# if ti < ti0 else 0\n pad_right = int(((ti+(Nw-1)*self.dto)-tf0)/self.dto)# if tf > tf0 else 0\n i0 = abs(pad_left) if pad_left<0 else 0\n i1 = Nw0 + max([pad_left,0]) + pad_right\n \n # option 2, expand columns\n existing_cols = set(hds) # these features already calculated, in file\n new_cols = set(cfp.keys()) - existing_cols # these features to be added\n more_cols = bool(new_cols)\n all_cols = existing_cols|new_cols\n cfp = ComprehensiveFCParameters()\n cfp = dict([(k, cfp[k]) for k in cfp.keys() if k in all_cols])\n\n # option 3, expand both\n if any([more_cols, pad_left > 0, pad_right > 0]) and self.update_feature_matrix:\n fm = pd.read_csv(self.featfile, index_col=0, parse_dates=['time'], infer_datetime_format=True)\n if more_cols:\n # expand columns now\n df0, wd = self._construct_windows(Nw0, ti0)\n cfp0 = ComprehensiveFCParameters()\n cfp0 = dict([(k, cfp0[k]) for k in cfp0.keys() if k in new_cols])\n fm2 = extract_features(df0, column_id='id', n_jobs=self.n_jobs, default_fc_parameters=cfp0, impute_function=impute)\n fm2.index = pd.Series(wd)\n \n fm = pd.concat([fm,fm2], axis=1, sort=False)\n\n # check if updates required because training period expanded\n # expanded earlier\n if pad_left > 0:\n df, wd = self._construct_windows(Nw, ti, i1=pad_left)\n fm2 = extract_features(df, column_id='id', n_jobs=self.n_jobs, default_fc_parameters=cfp, impute_function=impute)\n fm2.index = pd.Series(wd)\n fm = pd.concat([fm2,fm], sort=False)\n # expanded later\n if pad_right > 0:\n df, wd = self._construct_windows(Nw, ti, i0=Nw - pad_right)\n fm2 = extract_features(df, column_id='id', n_jobs=self.n_jobs, default_fc_parameters=cfp, impute_function=impute)\n fm2.index = pd.Series(wd)\n fm = pd.concat([fm,fm2], sort=False)\n \n # write updated file output\n fm.to_csv(self.featfile, index=True, index_label='time')\n # trim output\n fm = fm.iloc[i0:i1] \n else:\n # read relevant part of matrix\n fm = pd.read_csv(self.featfile, index_col=0, parse_dates=['time'], infer_datetime_format=True, header=0, skiprows=range(1,i0+1), nrows=i1-i0)\n else:\n # create feature matrix from scratch \n df, wd = self._construct_windows(Nw, ti)\n fm = extract_features(df, column_id='id', n_jobs=self.n_jobs, default_fc_parameters=cfp, impute_function=impute)\n fm.index = pd.Series(wd)\n fm.to_csv(self.featfile, index=True, index_label='time')\n \n ys = pd.DataFrame(self._get_label(fm.index.values), columns=['label'], index=fm.index)\n return fm, ys", "def prepare_reg_data_for_prediction(dataframe, model_dict, user_keyword, task_name):\r\n parent_dir = Path.cwd().parent\r\n pickle_dir = parent_dir.joinpath('default_results', 'pickle_files_feat_eng')\r\n\r\n feature_X_user,affect_dataframe, affect_index_dataframe = df, df, df\r\n emo_X_test_dict = {}\r\n affect_index_dict ={}\r\n\r\n for emotion, model_prop in model_dict.items():\r\n #Get the data with the emotion class\r\n if user_keyword == 'validation':\r\n affect_dataframe = dataframe[dataframe['Affect Dimension'] == 1]\r\n affect_index_list = dataframe.index[dataframe['Affect Dimension'] == 1].tolist()\r\n else:\r\n affect_dataframe = dataframe[dataframe[emotion] == 1]\r\n affect_index_list = dataframe.index[dataframe[emotion] == 1].tolist()\r\n test_tweets = affect_dataframe.iloc[:, [0, 1, 2]]\r\n\r\n #Perform preprocessing, feature extraction and transformation for the tweets to be predicted\r\n print(emotion, test_tweets.shape)\r\n if test_tweets.empty == False:\r\n preprocessed_X_user = Preprocessor.perform(test_tweets, emotion, user_keyword, task_name)\r\n feature_X_user = Feature_Transformer.perform(preprocessed_X_user, emotion, user_keyword, task_name)\r\n vectorizer = Dictionaries.vectorizer_dict[model_prop[2]]\r\n\r\n #Fit transform the vectorizer with the corresponding preprocessed training data\r\n if os.path.exists(pickle_dir.joinpath(emotion + '_r_train_preprocess_df.pkl')):\r\n preprocess_train_df = pd.read_pickle(pickle_dir.joinpath(emotion + '_r_train_preprocess_df.pkl'))\r\n train_vect = vectorizer.fit_transform(preprocess_train_df['preprocessed_text'].values)\r\n train_vect_df = pd.DataFrame(train_vect.toarray(), columns=vectorizer.get_feature_names())\r\n print(emotion, 'train-shape', train_vect_df.shape, sep='\\n')\r\n else:\r\n #If the file doesnt exist, exit the program with instructions\r\n print('\\nRequired files does not exist.\\n\\n Please, train the models first by running > Modelling.py')\r\n sys.exit(1)\r\n\r\n # Use the same vectorizer to transform test data and then perform the feature union\r\n vector_X = vectorizer.transform(preprocessed_X_user['preprocessed_text'].values)\r\n test_vect_df = pd.DataFrame(vector_X.toarray(), columns=vectorizer.get_feature_names())\r\n X_test = pd.DataFrame(pd.concat([test_vect_df, feature_X_user], axis=1)) #####?\r\n emo_X_test_dict[emotion] = X_test\r\n affect_index_dict[emotion] = affect_index_list\r\n else:\r\n emo_X_test_dict[emotion] = pd.DataFrame\r\n affect_index_dict[emotion] = []\r\n\r\n return emo_X_test_dict, affect_index_dict", "def to_learn(trxfile, cardfile, custfile, trainfile, testfile):\n feature_df = to_feature(trxfile, cardfile, custfile)\n feature_df.loc[:] = preprocessing.scale(feature_df)\n #feature_df.loc[:] = preprocessing.normalize(feature_df, norm='l2')\n \n # card_no, label\n train_df = pandas.read_csv(trainfile, header=None)\n # card_no\n test_df = pandas.read_csv(testfile, header=None)\n\n train_data = feature_df.loc[train_df.loc[:, 0]]\n train_label = train_df.loc[:, 1]\n test_data = feature_df.loc[test_df.loc[:, 0]]\n\n return (train_data.values, train_label.values, test_data.values)", "def write(self, recs, out_handle):\n self._write_header(out_handle)\n for rec in recs:\n self._write_annotations(rec.annotations, rec.id, out_handle)\n for sf in rec.features:\n self._write_feature(sf, rec.id, out_handle)", "def __init__(self, spark_session, dataset_folder_path):\n self.df = [None for i in range(10)]\n self.model = [None for i in range(10)]\n\n logger.info(\"Starting up the Recommendation Engine: \")\n self.spark_session = spark_session\n\n # Load ratings data for later use\n logger.info(\"Loading Ratings data...\")\n self.file_index = 0\n while True:\n filename = 'result' + str(self.file_index) + '.txt'\n dataset_file_path = os.path.join(dataset_folder_path,filename)\n exist_file = os.path.isfile(dataset_file_path)\n\n if exist_file:\n logger.info(self.file_index)\n self.df[self.file_index] = spark_session.read.csv(dataset_file_path,header=None, inferSchema=True)\n self.df[self.file_index] = self.df[self.file_index].selectExpr(\"_c1 as Title\" , \"_c3 as Userscore\", \"_c5 as Username\")\n self.df[self.file_index] = self.df[self.file_index].select(self.df[self.file_index].Username,self.df[self.file_index].Title,self.df[self.file_index].Userscore)\n\n logger.info(\"can load data\")\n\n stringindexer = StringIndexer(inputCol='Username',outputCol='UserId')\n stringindexer.setHandleInvalid(\"keep\")\n model = stringindexer.fit(self.df[self.file_index])\n indexed = model.transform(self.df[self.file_index]) \n\n stringindexer_item = StringIndexer(inputCol='Title',outputCol='GameId')\n stringindexer_item.setHandleInvalid(\"keep\") \n model = stringindexer_item.fit(indexed)\n indexed = model.transform(indexed)\n\n logger.info(\"sucess conver\")\n\n self.df[self.file_index] = indexed.select(indexed.Username,indexed.UserId,indexed.Title,indexed.GameId,indexed.Userscore.cast(\"int\"))\n logger.info(\"get data frame\")\n\n \n self.df[self.file_index].show()\n self.file_index+=1\n else:\n break\n\n self.__train_model()", "def run(self):\n\t\tdf_iter = self.file_to_df(50000)\n\t\tdf_airport = self.airport_file_to_df()\n\t\tfor df in df_iter: # type: pd.DataFrame\n\t\t\tdf.drop_duplicates(inplace=True)\n\t\t\tdf = self.transform(df, df_airport)\n\n\t\t\tdf_result = self.get_only_new_records(\n\t\t\t\tdf=df,\n\t\t\t\tdf_columns=self.join_columns,\n\t\t\t\ttable_columns=self.join_columns\n\t\t\t)\n\n\t\t\tif len(df_result) > 0:\n\t\t\t\t# df_result.drop(self.table_columns, axis=1)\n\n\t\t\t\tself.save(\n\t\t\t\t\tdf=df_result,\n\t\t\t\t\ttable_name=\"travel_dimension\",\n\t\t\t\t\tdf_columns=self.table_columns,\n\t\t\t\t\ttable_colums=self.table_columns\n\t\t\t\t)", "def predictAndSubmit(train, features, predCols):\n realTest = pd.read_csv('Data\\\\test.csv')\n realTest['Id'] = (realTest['Store'].map(str) + '_' +\n realTest['Dept'].map(str) + '_' +\n realTest['Date'].map(str))\n\n realTest = extractFeatures(realTest, features)\n realTestX = realTest[predCols]\n\n pipe = Pipeline([('scal', StandardScaler()),\n ('clf', xgb.XGBRegressor(learning_rate=0.07, max_depth=6,\n n_estimators=100))])\n trainX = train[predCols]\n trainY = train['Weekly_Sales']\n pipe.fit(trainX, trainY)\n prediction = pipe.predict(realTestX)\n realTest['Weekly_Sales'] = prediction\n realTest[['Id', 'Weekly_Sales']].to_csv('Output\\\\XGBSubmission.csv',\n index=False)\n\n pipe = Pipeline([('scal', StandardScaler()),\n ('clf', SGDRegressor())])\n pipe.fit(trainX, trainY)\n prediction = pipe.predict(realTestX)\n realTest['Weekly_Sales'] = prediction\n realTest[['Id', 'Weekly_Sales']].to_csv('Output\\\\SGDSubmission.csv',\n index=False)\n\n testDates = pd.to_datetime(realTest.Date)\n testDates = pd.DatetimeIndex(testDates.unique())\n storeDepts = extractStoreDeptCombos(realTest)\n noNotFit = 0\n allPred = pd.DataFrame()\n for store, dept in storeDepts.itertuples(index=False):\n trainThis = train[train['Store'] == store]\n trainThis = trainThis[trainThis['Dept'] == dept]\n if len(trainThis.index) > 142:\n # Only fit if all dates available\n trainFB = pd.DataFrame()\n trainFB['ds'] = trainThis['Date'].astype(str)\n trainFB['y'] = trainThis['Weekly_Sales']\n m = Prophet()\n m.fit(trainFB)\n realTestFBx = pd.DataFrame()\n realTestFBx['ds'] = testDates\n prediction = m.predict(realTestFBx)\n predRows = pd.DataFrame({'Store': store, 'Dept': dept,\n 'Date': testDates, 'y': prediction.yhat})\n else:\n print(\"Not enough Data\")\n noNotFit += 1\n print(\"Store: {} Dept: {}\".format(store, dept))\n allPred = allPred.append(predRows, ignore_index=True)\n\n print(\"{} store-date combos not fit\".format(noNotFit))\n\n allPred.drop_duplicates(inplace=True)\n realSub = pd.merge(realTest[['Store', 'Date', 'Dept', 'Id']], allPred,\n on=['Store', 'Date', 'Dept'], how='left')\n\n # Fill all NaNs wit the total mean. This could be more advanced obviously,\n # but this will do for the time being\n realSub['y'].fillna((realSub['y'].mean()), inplace=True)\n realSub = realSub[['Id', 'y']]\n realSub.columns = ['Id', 'Weekly_Sales']\n realSub.to_csv('Output\\\\FBProphetSubmission.csv', index=False)\n\n # XGBScore = 7972.37008\n # FBProphet score = 5357.68674\n\n FBSub = pd.read_csv('Output\\\\FBProphetSubmission.csv')\n XGSub = pd.read_csv('Output\\\\XGBSubmission.csv')\n ensembleSub = pd.DataFrame((FBSub['Weekly_Sales']*3/4\n + XGSub['Weekly_Sales']*1/4))\n ensembleSub['Id'] = FBSub['Id']\n ensembleSub[['Id', 'Weekly_Sales']].to_csv(\n 'Output\\\\EnsembleSubmission.csv', index=False)", "def featurize(movies):\n ###TODO \n movies['features'] = \"\" \n get_h = set() \n vocab_dict = {}\n df_dict_return = {}\n tup_list = []\n index_dict = {}\n index_dict_1 = {}\n movie_len = len(movies) \n #print(\"MovieLength::\",movie_len)\n #print(\"MOVIES:::\",movies)\n \n get_h = cal_unique_features(movies) # num_features\n\n vocab_dict = cal_unique_vocab(get_h) # vocab complete\n\n len_vocab = len(get_h)\n \n df_dict_return = cal_unique_docs(get_h,movies) # df(i)\n\n for token in get_h :\n #tup_list.clear()\n #print(\"token_GOTTTTT:::\",token)\n for index,row in movies.iterrows(): \n #print(\"row_got::\",row)\n gen_list = row['tokens']\n #print(\"gen_list::\",gen_list)\n #mov_id = row['movieId'] \n #print(\"mov_id::\",mov_id)\n token_count_1 = Counter(gen_list).most_common()[:1]\n tok = token_count_1[0]\n index_dict_1[index] = tok[1]\n token_count = gen_list.count(token)\n #print(\"token_count::\",token_count)\n tup = (index,token_count)\n #print(\"tuple::\",tup)\n tup_list.append(tup)\n #print(\"LIST_PRINT:::::::::::::\",tup_list)\n index_dict[token] = tup_list\n tup_list = []\n \n \n #print(\"INDEX_DICT:::\",index_dict) # tf(i,d)\n #print(\"INDEX_DICT_1:::\",index_dict_1) # max_k dict per docx\n \n \n for ind, row in movies.iterrows():\n data_list = []\n rows_list = []\n columns_list = []\n gen_list = row['tokens']\n #print(\"TOKENS GOTTT::\",gen_list) \n for gen in gen_list:\n tf = get_tf_value(index_dict,gen,ind)\n #print(\"TF GOTTT::\",tf) \n tf_weight = float( tf / index_dict_1[ind])\n #print(\"tf_weight::\",tf_weight)\n df_weight = float( math.log10( movie_len / df_dict_return[gen] ) )\n #print(\"df_weight::\",df_weight)\n final_tfidf = tf_weight * df_weight\n #print(\"final_tfidf::\",final_tfidf)\n data_list.append(final_tfidf)\n columns_list.append(vocab_dict[gen])\n rows_list.append(0) \n csr = csr_matrix((data_list, (rows_list,columns_list)), shape=(1,len_vocab))\n #print(\"TYPE of CSR GOTT::\",type(csr))\n #print(\"CSR GOTT:::\",csr) \n movies.set_value(ind, 'features', csr)\n \n #print(\"UPDATE movies::\",movies) \n\n return(movies,vocab_dict)\n \n\n pass", "def write(self):\n \n self.df.to_csv('/home/austin/Desktop/Falcon/realestate/Falcon/Datasets/mls.csv')", "def __init__(self):\n # File settings and locations.\n self.DATA_DIR = 'data'\n self.DATA_COL_DIR = 'data_collated'\n\n self.FIRE_DATABASE = 'FPA_FOD_20170508.sqlite'\n self.CLIMATE_DATA = 'GlobalLandTemperaturesByCity.csv'\n self.STOCK_DATA = 'historical_stock_prices.csv'\n self.COMBINED_DATA = 'combined_data.db'\n\n self.MODEL_PATH = 'models/dnn_wildfires.ckpt'\n\n # Setting to use reduced data for prototyping purposes.\n self.prototyping = False\n self.sample_size = 80000\n\n # Start date of data\n self.start = pd.to_datetime('1992-01-01')\n\n # Stocks in stock data to keep for analysis.\n self.stocks = ['MSFT', 'AAPL', 'GE', 'JNJ', 'JPM', 'PG']\n\n # Settings for validation and test set partitioning.\n self.val_set_ratio = 0.15\n self.test_set_ratio = 0.15\n\n # Separation of features for pipeline preparation \n self.cat_attribs = ['STATE', 'FIRE_SIZE_CLASS', 'OWNER_CODE', 'City']\n self.num_attribs = ['FIRE_YEAR', 'LATITUDE', 'LONGITUDE', 'FIRE_SIZE', \n 'FIRE_LENGTH', 'DIST_TO_MAJOR_CITY', 'AverageTemperature',\n 'AverageTemperatureUncertainty', 'AAPL', 'GE', 'JNJ', \n 'JPM', 'MSFT', 'PG']\n self.cycle_cols = ['DISC_MONTH', 'DISC_DAY_OF_WEEK', 'DISCOVERY_TIME', \n 'DISCOVERY_DOY', 'CONT_MONTH', 'CONT_DAY_OF_WEEK',\n 'CONT_TIME']\n\n # Define the ranges of the cycles in cycle_cols and whether any offset for\n # zero-indexing is needed (i.e., 'DISC_MONTH' cycles over a 12 month period\n # and the months need an offset of one to start the indicies at 0 for Jan.).\n self.cycle_ranges = [12, 7, 2400, 365, 12, 7, 2400]\n self.cycle_offsets = [1, 0, 0, 1, 1, 0, 0]\n\n # Parameters for deep learning model determined from randomized \n # hyperparameter search.\n self.n_hidden_layers = 4\n self.n_neurons = 200\n self.batch_size = 500\n self.batch_norm_momentum = 0.999\n self.dropout_rate = 0.4\n self.learning_rate = 0.01\n self.activation = tf.nn.elu\n\n # Hyperparameter settings .\n self.hp_search = False", "def build_cost_predictor(file_name, output_file, save_to_local=True):\n\n # read in the data file,\n # WS = pd.read_excel(os.path.join('saved_models', file_name),\n # sheet_name='sheet1').drop(columns=['File'])\n #\n # WS_np = WS.to_numpy().astype('float')\n WS_np = np.loadtxt(file_name, delimiter=',')\n\n X = []\n y = []\n\n for i in range(WS_np.shape[0]):\n for j in range(4, 14):\n X.append([WS_np[i][0], WS_np[i][1], WS_np[i][2], j - 4])\n y.append(WS_np[i][j])\n\n X.append([WS_np[i][0], WS_np[i][1], WS_np[i][2], j - 3])\n y.append(np.mean(y[-10:]))\n\n X = np.asarray(X)\n\n nb_classes = 11\n data = X[:, 3].astype(int)\n\n # build embeddings\n w = indices_to_one_hot(data, nb_classes)\n # p = indices_to_one_hot([2,3], 11)\n\n X = np.concatenate((X[:, [0, 1, 2]], w), axis=1)\n # this is currently a hardcoded string\n indices = [0, 1, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13]\n X = X[:, indices]\n\n r2 = []\n mse = []\n pearson = []\n spearman = []\n\n # fix for 10 fold random CV now\n for i in range(10):\n X_train, X_test, y_train, y_test = train_test_split(X[:, ], y,\n test_size=0.1)\n clf = RandomForestRegressor(n_estimators=100)\n\n clf.fit(X_train, y_train)\n test_pred = clf.predict(X_test)\n test_pred[test_pred < 0] = 0\n\n r2.append([r2_score(y_test, test_pred)])\n mse.append([mean_squared_error(y_test, test_pred)])\n pearson.append(pearsonr(y_test, test_pred)[0])\n spearman.append(spearmanr(y_test, test_pred)[0])\n\n print('Spearman Rank', np.mean(spearman))\n\n clf.fit(X, y)\n\n if save_to_local:\n # save to the local\n dump(clf, os.path.join(\"saved_models\", output_file))", "def create_data():\n data_set = pd.DataFrame()\n customer_id = list()\n for i in range(1, 10001):\n customer_id.append(i)\n data_set = pd.DataFrame()\n data_set.loc[:, 'customer_id'] = np.array(customer_id)\n product_name = ('dining chair', 'dining table', 'bed', 'dining set',\n 'stool', 'couch', 'occasional table',\n 'recliner')\n product_name_random = random.choices(product_name, k=10000)\n data_set.loc[:, 'product_name'] = np.array(product_name_random)\n quantity_rented = (1, 2, 3, 4)\n quantity_rented_random = random.choices(quantity_rented, k=10000)\n data_set.loc[:, 'quantity_rented'] = np.array(quantity_rented_random)\n unit_rental_price_monthly = list()\n for i in range(0, 10000):\n unit_rental_price_monthly.append(random.uniform(1.5, 25))\n data_set.loc[:, 'unit_rental_price'] = np.array(unit_rental_price_monthly)\n rental_period_months = list()\n for i in range(0, 10000):\n rental_period_months.append(randint(6, 60))\n data_set.loc[:, 'rental_period_months'] = np.array(rental_period_months)\n return data_set", "def generate_data():\n player_df = get_players_df(2018)\n stats_df = construct(2018, player_df[\"PlayerID\"])\n stats_df['NAME'] = player_df['FirstName'] + \" \" + player_df['LastName']\n stats_df[\"MPG\"] = pd.to_numeric(stats_df[\"MPG\"])\n stats_df.drop(stats_df[stats_df[\"MPG\"] < 15].index, inplace=True)\n stats_df.to_csv(\"data.csv\", index=False)", "def produce_next_term_data(self):\n outputs = self.output()\n for termnum in self.term_range: # includes (end term + 1)\n test = self.test[self.test.termnum == termnum]\n\n # remove cold start recordsif requested\n test = self.handle_cold_start(test)\n\n term_outputs = outputs[termnum]\n trainf, testf = term_outputs['train'], term_outputs['test']\n with trainf.open('w') as ftrain, testf.open('w') as ftest:\n self.write_libfm_data(ftrain, ftest, self.train, test)\n\n self.transfer_term(termnum) # modify train/test sets in place\n # intentionally skip writing the last time this is run\n\n # TODO: this code converts the same records to libFM format multiple\n # times. Each subsequent train set contains records in the last train\n # set. These could be cached to avoid all of the string format\n # conversion overhead.", "def dataset(self, file, latent_dim = 4, pivot = 0.2):\n data_df = pd.read_csv(file, sep=\"::\", engine='python',\n names=['UserId', 'MovieId', 'Rating', 'Timestamp'])\n print(len(data_df))\n data_df['avg_score'] = data_df.groupby(by='UserId')['Rating'].transform('mean')\n # feature columns\n user_num, item_num = data_df['UserId'].max() + 1, data_df['MovieId'].max() + 1\n feature_columns = [[self.denseFeature('avg_score')],\n [self.sparseFeature('user_id', user_num, latent_dim),\n self.sparseFeature('item_id', item_num, latent_dim)]]\n # split train dataset and test dataset\n watch_count = data_df.groupby(by='UserId')['MovieId'].agg('count')\n print(\"分割后\"+str(pivot*100)+\"%作为数据集\\n\")\n test_df = pd.concat([data_df[data_df.UserId == i].iloc[int((1 - pivot) * watch_count[i]):] for i in (watch_count.index)], axis=0)\n print(test_df.head())\n test_df = test_df.reset_index()\n train_df = data_df.drop(labels=test_df['index'])\n # 删除非需求列\n train_df = train_df.drop(['Timestamp'], axis=1).sample(frac=1.).reset_index(drop=True)\n test_df = test_df.drop(['index', 'Timestamp'], axis=1).sample(frac=1.).reset_index(drop=True)\n train_X = [train_df['avg_score'].values, train_df[['UserId', 'MovieId']].values]\n train_y = train_df['Rating'].values.astype('int32')\n test_X = [test_df['avg_score'].values, test_df[['UserId', 'MovieId']].values]\n test_y = test_df['Rating'].values.astype('int32')\n return feature_columns, (train_X, train_y), (test_X, test_y)", "def main(input_filepath, output_filepath):\n logger = logging.getLogger(__name__)\n logger.info('creating dataset for modelling')\n\n df_bkg = read_csv(input_filepath, 'booking_log.csv')\n df_pnt = read_csv(input_filepath, 'participant_log.csv')\n\n grp_bkg = df_bkg.groupby('order_id')\n\n dataset = create_dataset(df_bkg, df_pnt, grp_bkg)\n\n logger.info('dataset created and saving file')\n save_csv(dataset, output_filepath, 'dataset.csv')", "def ModelRegression():\n \n fs=125\n win_len = 10\n win_shift = 2\n \n # load the data file\n data_fls, ref_fls = LoadTroikaDataset()\n targets, features, sigs, subs = [], [], [], []\n for data_fl, ref_fl in (zip(data_fls, ref_fls)):\n \n # load the signal\n sig = LoadTroikaDataFile(data_fl)\n ref = LoadTroikaRefFile(ref_fl)\n ref = np.array([x[0] for x in ref])\n subject_name = os.path.basename(data_fl).split('.')[0] \n start_indxs, end_indxs = get_indxs(sig.shape[1], len(ref), fs, win_len,win_shift)\n for i, s in enumerate(start_indxs):\n start_i = start_indxs[i]\n end_i = end_indxs[i]\n\n ppg = sig[0, start_i:end_i] \n accx = sig[1, start_i:end_i]\n accy = sig[2, start_i:end_i]\n accz = sig[3, start_i:end_i]\n \n #band pass the channels\n ppg = BandpassFilter(ppg)\n accx = BandpassFilter(accx)\n accy = BandpassFilter(accy)\n accz = BandpassFilter(accz)\n \n # creates the features\n feature, ppg, accx, accy, accz = FeatureExtraction(ppg, accx, accy, accz)\n\n sigs.append([ppg, accx, accy, accz])\n targets.append(ref[i])\n features.append(feature)\n subs.append(subject_name)\n \n targets = np.array(targets)\n features = np.array(features)\n \n # set a Random Forest Regressor model\n #classifier = RandomForestClassifier(n_estimators=100,\n # max_depth=10,\n # random_state=42,\n # class_weight='balanced')\n \n regression = RandomForestRegressor(n_estimators=200,max_depth=10)\n \n lf = KFold(n_splits=5)\n splits = lf.split(features,targets,subs)\n \n # split the data and fit the model\n for i, (train_idx, test_idx) in enumerate(splits):\n X_train, y_train = features[train_idx], targets[train_idx]\n X_test, y_test = features[test_idx], targets[test_idx]\n regression.fit(X_train, y_train)\n \n return regression", "def buildModel (self , transformer, classifier ):\n for module in ('acct' , 'arch', 'bo', 'fo', 'risk'):\n summitAIModel = make_pipeline ( transformer , classifier )\n summitAIModel.fit ( self.X[self.ModuleData[module]], self.y[self.ModuleData[module]] )\n joblib.dump ( summitAIModel, self.modelDumps[module] )", "def buildModel (self , transformer, classifier ):\n for module in ('acct' , 'arch', 'bo', 'fo', 'risk'):\n summitAIModel = make_pipeline ( transformer , classifier )\n summitAIModel.fit ( self.X[self.ModuleData[module]], self.y[self.ModuleData[module]] )\n joblib.dump ( summitAIModel, self.modelDumps[module] )", "def submission(test_ids, pred_test, file_name):\n pred_test[pred_test < 0] = 0\n\n val_pred_df = pd.DataFrame(data={'fullVisitorId': test_ids,\n 'predictedRevenue': pred_test})\n\n val_pred_df = val_pred_df.groupby('fullVisitorId').sum().reset_index()\n\n val_pred_df.columns = ['fullVIsitorId', 'predictedLogRevenue']\n val_pred_df['predictedLogRevenue'] = val_pred_df['predictedLogRevenue']\n val_pred_df.to_csv('submission/'+file_name, index=False)", "def main_stats_model(y_train: pd.DataFrame, y_test: pd.DataFrame, y_pred: np.ndarray,\n model_name: str = '',\n model_parameters: dict = None,\n model_preprocessing: str = '',\n sequence_origin: str = '',\n primers_origin: str = '',\n taxonomy_level: Union[List[int], int] = '',\n selected_primer: Union[List[str], str] = '',\n test_size: float = 0.2,\n feature_importances: np.ndarray = None,\n k: int = 4,\n save_csv: bool = False,\n xgb_model=None,\n rf_model=None,\n save_model=False,\n save_tree: int = 0):\n model_path = folder_paths['model_results'] + model_name + '{}'.format(slash)\n if not os.path.exists(model_path):\n os.makedirs(model_path)\n\n folder_number = get_new_model_folder_number(model_name=model_name)\n analysis_path = model_path + '{:0>5d}_analysis_{}_{}{}'.format(folder_number, selected_primer, taxonomy_level, slash)\n os.makedirs(analysis_path)\n\n log_path = analysis_path + 'model_results.txt'\n logger = StatLogger(log_path=log_path)\n\n # Basic information on configuration\n test_size = get_model_info(y_test, model_name, model_parameters, model_preprocessing, sequence_origin,\n primers_origin, taxonomy_level, selected_primer, test_size, logger)\n\n # Metrics of model results\n main_class_prop, accuracy = get_metrics_model(y_train, y_test, y_pred, logger, feature_importances, k, save_tree,\n xgb_model,\n analysis_path=analysis_path)\n\n if save_csv:\n add_optimal_model_params(folder_number, selected_primer, taxonomy_level, accuracy, model_parameters,\n model_path=model_path)\n\n if save_model:\n if xgb_model is not None:\n xgb_model.save_model(analysis_path+'0001.model')\n if rf_model is not None:\n filename = analysis_path+'0001.model'\n pickle.dump(rf_model, open(filename, 'wb'))\n\n logger.close_file()\n\n return test_size, main_class_prop, accuracy", "def preprocessing(self):\n print(\"This may take a while, please grab a coffee. Average wait time: 2 - 6 mins.\")\n print(\"Loading data... \")\n df = ExternalDataRetrieval().get_data()\n\n print(\"Preprocessing data... \")\n\n amino_df = pd.DataFrame()\n # Set column names for zinc content dataframe\n zcolumns = ['value', 'group']\n # Set column names for food groups dataframe\n fcolumns = ['ID', 'food', 'group', 'manufacturer']\n # Declare zinc content dataframe\n zinc_df = pd.DataFrame(columns=zcolumns)\n # Declare food group dataframe\n food_df = pd.DataFrame(columns=fcolumns)\n # Doing this one amino acids type at a time.\n for n in AMINO_LIST:\n food = []\n # nutrients components of the food type is further nested in 'nutrients', which its components are further\n # nested\n for i, items in enumerate(df['nutrients']):\n # Iterate through the nutrient type to obtain necessary info.\n # For this project, there are many redundant data in there.\n f_flag = False\n # Only need to set the flag to activate the zinc check for one amino acid loop\n if n == AMINO_LIST[0]:\n z_flag = False\n for item in items:\n # Check to see if this nutrient type is one of the amino acids\n if item.get(\"name\") == n and item.get(\"value\") > 0:\n # If so, add the food type to the amino acid type array\n food.append(df['name'][i]['long'])\n f_flag = True\n # Check to see if this nutrient type is Zinc, only need to do this for one amino acid loop.\n if item.get(\"name\") == Z and n == AMINO_LIST[0]:\n # If so, gets its zinc content value and the food group it is in.\n zinc_df.loc[i] = [item.get(\"value\"), df['group'][i]]\n z_flag = True\n if f_flag and z_flag:\n break\n\n # Build the food group data dataframe one food at a time, only need to do this for one amino acid loop.\n if n == AMINO_LIST[0]:\n food_df.loc[i] = [df['meta'][i]['ndb_no'], df['name']\n [i]['long'], df['group'][i], df['manufacturer'][i]]\n\n # Assemble the amino acid type array in to nutrient dataframe\n fd = pd.DataFrame({n: food})\n # Since the length of each columns varies (amino acid food types appearance in food types varies),\n # there are many NaN in the dataframe as a result. We need to drop the NaN\n fd = fd.dropna()\n amino_df = pd.concat([amino_df, fd], axis=1, ignore_index=True)\n # Add column names to the nutrient dataframe\n amino_df.columns = AMINO_LIST\n print(\"Good news, preprocessing completed successfully! \")\n return amino_df, zinc_df, food_df", "def generate_data(filename_in, filename_out):\n file_in = open(filename_in, 'r')\n file_out = open(filename_out, 'w+')\n\n df = pd.read_csv(file_in, header=None, sep=' ', quoting=csv.QUOTE_NONE)\n x = df.iloc[:, 0].values\n y_class = df.iloc[:, -1].values\n file_in.close()\n\n y_class = np.where(y_class == 'O', 0, 1)\n\n x_features = []\n size_x = len(x)\n for i in range(3, size_x):\n if i % 5000 == 0:\n print(i, \"/\", size_x)\n x_features.append(features(x[i-2], x[i-1], x[i], y_class[i]))\n\n df_write = pd.DataFrame(x_features)\n\n tab = [x for x in range(1, NUMBER_OF_FEATURE + 2)]\n df_write.columns = tab\n write_csv(df_write, file_out)\n file_out.close()", "def main(self):\n\n assault_mech_df = self.get_mech_df(url=self.assault_url)\n heavy_mech_df = self.get_mech_df(url=self.heavy_url)\n medium_mech_df = self.get_mech_df(url=self.medium_url)\n light_mech_df = self.get_mech_df(url=self.light_url)\n all_weights_df = pd.concat([assault_mech_df, heavy_mech_df, medium_mech_df, \n light_mech_df])\n\n self.save_data(assault_mech_df, \"assault\")\n self.save_data(heavy_mech_df, \"heavy\")\n self.save_data(medium_mech_df, \"medium\")\n self.save_data(light_mech_df, \"light\")\n self.save_data(all_weights_df, \"all_weights\")\n #get maximum new columns needed for splitting variants\n max_cols = all_weights_df.variants.apply(lambda x: len(x)).max()\n melt_cols = []\n\n for i in range(max_cols):\n all_weights_df[\"var_\"+str(i)] = \"\"\n melt_cols.append(\"var_\"+str(i))\n\n variant_weights_df = pd.DataFrame()\n for index, row in all_weights_df.iterrows():\n for i in range(len(row[\"variants\"])):\n #add each variant to variant weights as a row with mech, tonnage, variant\n new_row_dict = {\n \"mech_name\":row[\"mechs\"],\n \"tonnage\":row[\"tonnage\"],\n \"variant\":row[\"variants\"][i].upper()\n }\n new_row_df = pd.DataFrame(new_row_dict, index=[0])\n variant_weights_df = pd.concat([variant_weights_df, new_row_df])\n\n for i in range(len(row[\"hero_chassis\"])):\n new_row_dict = {\n \"mech_name\":row[\"hero_names\"],\n \"tonnage\":row[\"tonnage\"],\n \"variant\":row[\"hero_chassis\"][i].upper()\n }\n new_row_df = pd.DataFrame(new_row_dict, index=[0])\n variant_weights_df = pd.concat([variant_weights_df, new_row_df])\n\n\n for i in range(len(row[\"special_variants\"])):\n new_row_dict = {\n \"mech_name\":row[\"mechs\"],\n \"tonnage\":row[\"tonnage\"],\n \"variant\":row[\"special_variants\"][i].upper()\n }\n new_row_df = pd.DataFrame(new_row_dict, index=[0])\n variant_weights_df = pd.concat([variant_weights_df, new_row_df]) \n\n #add champion variants by matching on \n for i in range(len(row[\"champion_variants\"])):\n new_row_dict = {\n \"mech_name\":row[\"mechs\"],\n \"tonnage\":row[\"tonnage\"],\n \"variant\":row[\"champion_variants\"][i].upper()\n }\n new_row_df = pd.DataFrame(new_row_dict, index=[0])\n variant_weights_df = pd.concat([variant_weights_df, new_row_df])\n #remove duplicate rows \n variant_weights_df = variant_weights_df[variant_weights_df.duplicated(keep=\"first\")==False]\n self.save_data(variant_weights_df, \"variant_weights\")", "def __init__(self, customer_vendor_full, valid_rating_mean):\r\n super(User_CF, self).__init__()\r\n self.customer_vendor_full = customer_vendor_full\r\n self.customer_vendor_ratings = self.select_features()\r\n self.customer_vendor_matrix = self.customer_vendor_ratings.pivot(\r\n index='customer_id', columns='vendor_id', values='mean_rating') # (26779, 100)\r\n self.rating_matrix = self.customer_vendor_matrix.fillna(0).values.astype(np.float32)\r\n self.valid_rating_mean = valid_rating_mean\r\n self.vendor2rating = self.get_vendors_mean()\r\n self.customer_similarity, = self.get_similarity()", "def json_df_builder(self, df, marketID, RFM=None):\n \n #-------------------------------------------------------------------------\n # Extract from dataframe content to be returned\n #-------------------------------------------------------------------------\n str_customerID = str(df.CustomerID.unique()[0])\n \n invoice_count = len(df.InvoiceNo.unique())\n item_count = df.Quantity.sum()\n invl_count = df.shape[0]\n \n ser_incomes = df.UnitPrice * df.Quantity\n incomes = ser_incomes.sum()\n str_incomes = \"{0:1.2F}\".format(incomes)\n \n mean_unit_price = incomes/item_count\n str_mean_unit_price = \"{0:1.2F}\".format(mean_unit_price)\n \n serInvoiceDate = df.InvoiceDate\n str_old_date = serInvoiceDate.map(str).min()\n str_new_date = serInvoiceDate.map(str).max()\n \n #-------------------------------------------------------------------------\n # Build JSON structure form content\n #-------------------------------------------------------------------------\n json_result = '{\\n'\n json_result += '\\t \"_results\":[\\n'\n json_result += \"{\\n\"\n json_result += \"\\t\\t\"+\" \\\"customerID\\\":\"+str_customerID+\"\\n\"\n json_result += \"\\t\\t\"+\",\\\"marketID\\\":\"+str(marketID)+\"\\n\"\n json_result += \"\\t\\t\"+\",\\\"invoice_count\\\":\"+str(invoice_count)+\"\\n\"\n json_result += \"\\t\\t\"+\",\\\"item_count\\\":\"+str(item_count)+\"\\n\"\n json_result += \"\\t\\t\"+\",\\\"invl_count\\\":\"+str(invl_count)+\"\\n\"\n json_result += \"\\t\\t\"+\",\\\"mean_unit_price\\\":\"+str_mean_unit_price+\"\\n\"\n json_result += \"\\t\\t\"+\",\\\"incomes\\\":\"+str_incomes+\"\\n\"\n json_result += \"\\t\\t\"+\",\\\"old_date\\\":\"+str_old_date+\"\\n\"\n json_result += \"\\t\\t\"+\",\\\"new_date\\\":\"+str_new_date+\"\\n\"\n \n if RFM is not None:\n json_result += \"\\t\\t\"+\",\\\"RFM\\\":\"+RFM+\"\\n\"\n else:\n pass\n\n json_result += \"}\\n\"\n json_result += '\\n\\t]\\n}'\n return json_result", "def train(self, df, feature, max_range, extra=False, defender=False):\n\n df2 = self._train_preprocess(df, feature, extra)\n\n # No need for names anymore\n if defender:\n df2 = df2.drop([\"Player Id\"], axis=1)\n\n # Instantiate the models\n self.rfrg = RandomForestRegressor(n_estimators=1000, n_jobs=-1, random_state=69420)\n\n if not defender:\n self.gbrg = LGBMRegressor(n_estimators=1000, learning_rate=0.01)\n\n # Then, perform regression -> This is to see how it performs over weeks\n mean_error1 = []\n mean_error2 = []\n\n for week in range(max_range - 5, max_range):\n train = df2[df2['week'] < week]\n val = df2[df2['week'] == week]\n\n x_train, x_test = train.drop([feature], axis=1), val.drop([feature], axis=1)\n y_train, y_test = train[feature].values, val[feature].values\n\n self.rfrg.fit(x_train, y_train)\n preds1 = self.rfrg.predict(x_test)\n error1 = rmsle(y_test, preds1)\n print('Week %d - Error for Random Forest %.5f' % (week, error1))\n\n mean_error1.append(error1)\n if not defender:\n self.gbrg.fit(x_train, np.log1p(y_train))\n preds2 = np.expm1(self.gbrg.predict(x_test))\n error2 = rmsle(y_test, preds2)\n print('Week %d - Error for Gradient Boosting %.5f' % (week, error2))\n mean_error2.append(error2)\n\n print()\n print()\n print(\"Feature statistics:\")\n print(f\"Min value for feature {feature}: {df[feature].min()}\")\n print(f\"Max value for feature {feature}: {df[feature].max()}\")\n print(f\"Mean value for feature {feature}: {df[feature].mean()}\")\n print(f\"Standard deviation for feature {feature}: {df[feature].std()}\")\n print()\n print(\"Results\")\n print('Mean Error for Random Forest = %.5f' % np.mean(mean_error1))\n\n # Note: the final model is trained on every week and stored in self.model!\n final_xtrain = df2.drop([feature], axis=1)\n final_ytrain = df2[feature].values\n self.rfrg.fit(final_xtrain, final_ytrain)\n\n if not defender:\n print('Mean Error for Gradient Boosting = %.5f' % np.mean(mean_error2))\n self.gbrg.fit(final_xtrain, np.log1p(final_ytrain))", "def engineer_features(self, data):\n\n # Compute quality points for each record.\n data['qpts'] = data['chrs'] * data['grdpts']\n\n # Compute total quality points per term.\n data['term_qpts'] = data.groupby(['sid', 'termnum'])\\\n [['qpts']].transform('sum')\n tmp = data[['sid', 'termnum', 'term_qpts']]\\\n .drop_duplicates(['sid', 'termnum'])\\\n .sort(['sid', 'termnum'])\n tmp['total_qpts'] = tmp.groupby('sid')[['term_qpts']]\\\n .transform('cumsum')\n del tmp['term_qpts']\n data = data.merge(tmp, how='left', on=['sid', 'termnum'])\n\n # Next compute total hours earned each term and across terms.\n data['term_chrs'] = data.groupby(['sid', 'termnum'])\\\n [['chrs']].transform('sum')\n tmp = data[['sid', 'termnum', 'term_chrs']]\\\n .drop_duplicates(['sid', 'termnum'])\\\n .sort(['sid', 'termnum'])\n tmp['total_chrs'] = tmp.groupby('sid')[['term_chrs']]\\\n .transform('cumsum')\n del tmp['term_chrs']\n data = data.merge(tmp, how='left', on=['sid', 'termnum'])\n\n # Now we can compute term gpa...\n data['term_gpa'] = data['term_qpts'] / data['term_chrs']\n\n # and the running gpa for each student.\n data['cum_gpa'] = data['total_qpts'] / data['total_chrs']\n\n # Finally, shift several attributes forward so the feature vectors\n # include information from the last term to use for predicting values in\n # the current term. Leave out quality points because gpa is a summary.\n merge_on = ['sid', 'termnum']\n tmp = data.drop_duplicates(merge_on).sort(merge_on)\n cols = ['term_gpa', 'term_chrs', 'cum_gpa', 'total_chrs']\n shifted = tmp.groupby('sid')[cols].shift(1)\n keep = ['lterm_gpa', 'lterm_chrs', 'lterm_cum_gpa', 'lterm_total_chrs']\n shifted.columns = keep\n keep += merge_on\n tmp = tmp.merge(shifted, how='left', right_index=True, left_index=True)\n tmp = tmp[keep]\n data = data.merge(tmp, how='left', on=merge_on)\n\n # Now we're done with student GPA features. Let's move on to course GPA,\n # AKA course difficulty as evidenced by student grdpts over time.\n\n # First, we add total # students enrolled at each term and across them.\n data['num_enrolled'] = data.groupby(['cid', 'termnum'])['cid']\\\n .transform('count')\n\n # Add total number of students enrolled so far at each term.\n tmp = data[['cid', 'termnum', 'num_enrolled']]\\\n .drop_duplicates(['cid', 'termnum'])\\\n .sort(['cid', 'termnum'])\n tmp['total_enrolled'] = tmp.groupby('cid')[['num_enrolled']]\\\n .transform('cumsum')\n del tmp['num_enrolled']\n data = data.merge(tmp, how='left', on=['cid', 'termnum'])\n\n # Now sum grdpts together for each term.\n data['term_grdpts_sum'] = data.groupby(['cid', 'termnum'])\\\n [['grdpts']].transform('sum')\n tmp = data[['cid', 'termnum', 'term_grdpts_sum']]\\\n .drop_duplicates(['cid', 'termnum'])\\\n .sort(['cid', 'termnum'])\n tmp['total_grdpts_sum'] = tmp.groupby('cid')[['term_grdpts_sum']]\\\n .transform('cumsum')\n del tmp['term_grdpts_sum']\n data = data.merge(tmp, how='left', on=['cid', 'termnum'])\n\n # Now we can compute course avg. gpa at each term...\n data['term_cgpa'] = data['term_grdpts_sum'] / data['num_enrolled']\n\n # and the running avg course gpa.\n data['cum_cgpa'] = data['total_grdpts_sum'] / data['total_enrolled']\n\n # Finally, shift some feature values forward one to make the previous\n # term's values accessible for prediction in the current term.\n merge_on = ['cid', 'termnum']\n tmp = data.drop_duplicates(merge_on).sort(merge_on)\n cols = ['term_cgpa', 'cum_cgpa', 'num_enrolled', 'total_enrolled']\n shifted = tmp.groupby('cid')[cols].shift(1)\n keep = ['lterm_cgpa', 'lterm_cum_cgpa', 'lterm_num_enrolled',\n 'lterm_total_enrolled']\n shifted.columns = keep\n keep += merge_on\n tmp = tmp.merge(shifted, how='left', right_index=True, left_index=True)\n tmp = tmp[keep]\n data = data.merge(tmp, how='left', on=merge_on)\n\n # Add student term (sterm).\n cols = ['sid', 'termnum']\n tmp = data.drop_duplicates(cols)[cols].sort(cols)\n tmp['tmp'] = 1\n tmp['sterm'] = tmp.groupby('sid').transform('cumsum')['tmp']\n del tmp['tmp']\n data = data.merge(tmp, how='left', on=cols)\n\n return data", "def __train_model(self):\n for i in range(self.file_index):\n logger.info(\"Training the ALS model dataset \" + str(i))\n self.als = ALS(maxIter=5, regParam=0.01, userCol=\"UserId\", itemCol=\"GameId\", ratingCol=\"Userscore\",\n coldStartStrategy=\"drop\")\n self.model[i] = self.als.fit(self.df[i])\n logger.info(\"ALS model built!\")", "def ERCC_Stat(self):\n self.l_ERCC_name = []\n# self.l_RGCs_name = []\n self.l_mRNA_name = []\n self.l_ERCC_FPKM = {}\n# self.l_RGCs_FPKM = {}\n self.l_mRNA_FPKM = {}\n\n self.l_ERCC_UMI = {}\n self.l_mRNA_UMI = {}\n\n# self.l_cirRNA_FPKM={}\n \n self.l_ERCC_HTSname = []\n# self.l_RGCs_HTSname = []\n self.l_mRNA_HTSname = []\n self.l_ERCC_RPKM = {}\n# self.l_RGCs_RPKM = {}\n self.l_mRNA_RPKM = {}\n \n \n self.l_ERCC_MOLs = {}\n# self.l_RGCs_MOLs = {}\n self.l_mRNA_MOLs = {}\n self.l_cirRNA_MOLs={}\n self.l_mRNA_MOLs_HTSname = {}\n \n self.regression = {}\n \n self.__load_FPKM()\n self.__load_MOLs() # ERCC RGC mols\n self.__get_mRNA_MOLs() # get mRNA mols using ERCC_FPKM, ERCC_MOLs and mRNA_FPKM\n# self.__load_Count()\n self.__load_umi()\n \n out_file = \"%s/02.%s.ERCC_Mols.xls\" % (self.dir_StatInfo, self.s_idx)\n f_out_file = open( out_file,\"w\" )\n \n l_info = [\n \"Sample\", \"ERCC_MOLs\", \"mRNA_MOLs\",\n \"RefSeq_mRNA_MOLs\", \"Regression_R\",\n \"Regression_P\", \"RefSeq_mRNA_TPM>0\", \"ERCC_UMI\", \"RefSeq_mRNA_UMI\"\n ] \n print >>f_out_file, \"\\t\".join(l_info)\n \n for samp in self.samInfo_pd_RNA['sample']:\n idx = (self.samInfo_pd_RNA['sample'] == samp)\n brief_name = self.samInfo_pd_RNA[idx]['brief_name'].values[0]\n rename = self.samInfo_pd_RNA[idx]['rename'].values[0]\n\n ERCC_MOLs = sum( self.l_ERCC_MOLs[brief_name])\n# RGC_MOLs = sum( self.l_RGCs_MOLs[brief_name])\n mRNA_MOLs = np.sum(self.l_mRNA_MOLs[brief_name])\n RefSeq_mRNA_MOLs = \\\n np.sum(self.l_mRNA_MOLs[brief_name][self.mRNA_refSeq_index])\n \n RefSeq_mRNA_lFPKM = \\\n np.array(self.l_mRNA_FPKM[brief_name],dtype=float)\n \n RefSeq_mRNA_lFPKM = RefSeq_mRNA_lFPKM[ self.mRNA_refSeq_index ]\n RefSeq_mRNA_Exps = \\\n np.shape(RefSeq_mRNA_lFPKM[RefSeq_mRNA_lFPKM > 0])[0]\n\n regression_R = self.regression[brief_name]['r_value']\n regression_P = self.regression[brief_name]['p_value']\n \n RefSeq_mRNA_lUMI = \\\n np.array(self.l_mRNA_UMI[brief_name],dtype=int)\n\n RefSeq_mRNA_UMI_count = np.sum( RefSeq_mRNA_lUMI )\n\n ERCC_lUMI = \\\n np.array(self.l_ERCC_UMI[brief_name],dtype=int)\n\n ERCC_UMI_count= np.sum( ERCC_lUMI )\n \n l_out = [\n rename,\n ERCC_MOLs, mRNA_MOLs, RefSeq_mRNA_MOLs,\n regression_R, regression_P, RefSeq_mRNA_Exps, ERCC_UMI_count, RefSeq_mRNA_UMI_count\n ]\n l_out = [str(i) for i in l_out]\n print >>f_out_file, \"\\t\".join(l_out)\n f_out_file.close()", "def FeaturesGen(ChopChopresults, outputDir, sgRNA_type):\n \n #make output Directory if it does not already exist\n if not os.path.isdir(outputDir):\n os.makedirs(outputDir)\n \n #list the directory contents \n for i,j,k in os.walk(ChopChopresults): #use walk to go through and find all directories\n \n if j == []: #no subdirectories\n saveDF = pd.DataFrame() #initiate dataframe\n for target in k: #loop through to find the sgRNA sequences\n if target.endswith('.offtargets'):\n with open(os.path.join(i,target), 'r+') as f:\n guide = f.readlines()\n #add them to a dataframe\n temp = pd.Series()\n temp['guideNo'] = target.split('.')[0] + sgRNA_type\n temp['guideSeq'] = guide.pop(0).rstrip()\n \n saveDF = saveDF.append(temp.to_frame().transpose())\n saveDF['type'] = 'sgRNA'\n \n if sgRNA_type == 'General' or sgRNA_type == None:\n saveDF['fwd'] = 'pink'\n saveDF['rev'] = 'green'\n elif sgRNA_type == 'GG':\n saveDF['fwd'] = 'yellow'\n saveDF['rev'] = 'plum'\n elif sgRNA_type == 'GA':\n saveDF['fwd'] = 'cyan'\n saveDF['rev'] = 'cornflower blue'\n \n \n #save to txt file with tab delimiter\n saveDF.to_csv(os.path.join(outputDir, os.path.basename(i) + '_features.txt'),\\\n index = False, header = False, sep = '\\t')\n \n del saveDF", "def _process_data(self):\r\n # Rename columns to match final feature class\r\n self._rename_columns()\r\n # Add point ID column\r\n self._add_pointid()\r\n # Sort rows by transect id and timestamp\r\n self._sort_rows()\r\n # Fill Null records with a value\r\n self._fill_nulls()\r\n # Set site_code to lower case\r\n self._lower_site_code()\r\n # Create survey_id\r\n self._calc_survey_id()\r\n # Calculate nativesg column if at least one of the veg columns is a Native seagrass type\r\n if set(self.veg_columns).intersection(set(NATIVESG_CODES)) > 0:\r\n self.nativesg_columns = list(set(self.veg_columns).intersection(set(NATIVESG_CODES)))\r\n self._calc_nativesg()\r\n #\r", "def train(self):\n for data_tier in self.data_tiers:\n fd = open(self.data_path + '/training_data_' + data_tier + '.json', 'r')\n self.preprocessed_data[data_tier] = json.load(fd)\n fd.close()\n tot = len(self.preprocessed_data[data_tier]['features'])\n p = int(math.ceil(tot*0.8))\n training_features = np.array(self.preprocessed_data[data_tier]['features'][:p])\n trend_training_classifications = np.array(self.preprocessed_data[data_tier]['trend_classifications'][:p])\n avg_training_classifications = np.array(self.preprocessed_data[data_tier]['avg_classifications'][:p])\n t1 = datetime.datetime.utcnow()\n self.clf_trend[data_tier].fit(training_features, trend_training_classifications)\n self.clf_avg[data_tier].fit(training_features, avg_training_classifications)\n t2 = datetime.datetime.utcnow()\n td = t2 - t1\n self.logger.info('Training %s for data tier %s took %s', self.name, data_tier, str(td))\n joblib.dump(self.clf_trend[data_tier], self.data_path + '/' + self.name + '_trend_' + data_tier + '.pkl')\n joblib.dump(self.clf_avg[data_tier], self.data_path + '/' + self.name + '_avg_' + data_tier + '.pkl')", "def customerReport(self):\n self._setFormat()\n for cust in self.getCustomerAccountData():\n self.output.write(self.form_line(cust))", "def __init__(self, features, nonfeature_columns, output_path):\n\n super(WritePredictionsHandler).__init__()\n\n self.needs_base_pred = False\n self._results = []\n self._samples = []\n self._NA_samples = []\n self._column_names = nonfeature_columns + features\n self._output_path = output_path\n self._output_handle = open(output_path, 'w+')\n self._output_handle.write(\"{0}\\n\".format(\n '\\t'.join(self._column_names)))", "def Preprocess_MR(path=\"datasets/raw/rt10662\"):\n\n output_path = \"datasets/preprocessed/MR_Data\"\n\n # load positive and negative data\n with io.open(os.path.join(path, \"rt-polarity.pos\"), encoding='latin-1') as f:\n pos_data = f.readlines()\n pos_data = [sentence.strip() for sentence in pos_data]\n with io.open(os.path.join(path, \"rt-polarity.neg\"), encoding='latin-1') as f:\n neg_data = f.readlines()\n neg_data = [sentence.strip() for sentence in neg_data]\n\n labels = compute_labels(pos_data, neg_data)\n text, labels = shuffle_data(pos_data + neg_data, labels)\n\n # split data in 70%/20%/10% train/test/dev split\n train_len = ((len(text) / 10) * 7) + (len(text) % 10)\n test_len = (len(text) / 10) * 2\n dev_len = len(text) / 10\n\n trX = text[0:train_len]\n teX = text[train_len:train_len + test_len]\n vaX = text[train_len + test_len: train_len + test_len + dev_len]\n\n trY = labels[0:train_len]\n teY = labels[train_len:train_len + test_len]\n vaY = labels[train_len + test_len: train_len + test_len + dev_len]\n\n if not os.path.exists(output_path):\n os.makedirs(output_path)\n\n dat1 = pd.DataFrame({'label': trY})\n dat2 = pd.DataFrame({'sentence': trX})\n df = dat1.join(dat2)\n df.to_csv(os.path.join(output_path, \"train_binary_sent.csv\"), encoding='utf-8', index=False)\n\n\n dat1 = pd.DataFrame({'label': teY})\n dat2 = pd.DataFrame({'sentence': teX})\n df = dat1.join(dat2)\n df.to_csv(os.path.join(output_path, \"test_binary_sent.csv\"), encoding='utf-8', index=False)\n\n dat1 = pd.DataFrame({'label': vaY})\n dat2 = pd.DataFrame({'sentence': vaX})\n df = dat1.join(dat2)\n df.to_csv(os.path.join(output_path, \"dev_binary_sent.csv\"), encoding='utf-8', index=False)", "def mistakes_dataframe(filename, outfilename):\n counter = 0\n data_list = []\n list_of_lists = []\n feature_dict = dict()\n mistake_counter = 0\n \n #The crf file only makes use of the token and assigns a label. \n #For the mistakes file, we are using the features of the gold file.\n #The features of the gold file are used together with the labels of the \n #crf file to provide the reader with a better understanding of the mistakes.\n if filename == crf_file:\n file_rows = []\n for system, gold in zip(file_to_listrows(crf_file), file_to_listrows(dev_file)):\n system_label = [system[-1]]\n line = gold + system_label\n file_rows.append(line)\n else: \n #The baseline and SVM classifier have a file with all the features \n #present, for that reason we just apply the file_to_listrows-function.\n file_rows = file_to_listrows(filename)\n \n for features in file_rows[1:]:\n counter += 1\n mistake_counter += 1\n feature_dict = {\n 'IndexInDataset': counter+1, #The number from the original \n #dataset is inserted so that the \n #tokens are easy to find.\n 'Mistake-type': None,\n 'Token': features[0],\n 'lemma': features[1],\n 'UPOS': features[2],\n 'XPOS': features[3],\n 'DepRel': features[4],\n 'head': features[5],\n 'PrevTok': features[6],\n 'PrevPOS': features[7],\n 'NextTok': features[8],\n 'NextPOS': features[9],\n 'NegPrefix': features[10],\n 'NegPostfix': features[11],\n 'NegExpList': features[12],\n 'GoldLabel': features[13],\n 'SystemLabel': features[14] #This is the label that the system gave to the token\n }\n if features[13] == 'O' and features[14] =='NEG':\n feature_dict['Mistake-type'] = 'FalsePositive'\n elif features[13] == 'NEG' and features[14] == 'O':\n feature_dict['Mistake-type'] = 'FalseNegative'\n \n data_list.append(feature_dict)\n if counter == 13567: #The last line of every classifier is empty, to prevent the code from breaking,this if-statement is inserted.\n break\n \n filename = filename.replace('-out.conll', '')\n mistakes = f'This system ({filename}) made {mistake_counter} mistakes' #The function shows the amount of mistakes the system made\n df = pd.DataFrame(data_list)\n df.to_csv(outfilename, sep='\\t')\n return data_list, df, mistakes #The list of dictionaries, together with the dataframe and the mistakes are returned", "def preprocess():\n\n # Read Training and Test Data\n train_df = pd.read_csv(configs.TRAIN_DATA_PATH)\n test_df = pd.read_csv(configs.TEST_DATA_PATH)\n\n # Data Exploration and Preprocessing\n user_ids = train_df['userId']\n movie_ids = train_df['movieId']\n ratings = train_df['rating']\n\n # Create the mapping and save it for later usage in testing\n all_users = create_mapping(pd.concat([train_df['userId'], test_df['userId']], axis = 0))\n all_movies = create_mapping(pd.concat([train_df['movieId'], test_df['movieId']], axis = 0))\n\n\n # Save the mapping arrays for users and movies\n with open(configs.USER_IDS_PATH, \"wb\") as f:\n np.save(f, all_users)\n\n with open(configs.MOVIE_IDS_PATH, \"wb\") as f:\n np.save(f, all_movies)\n\n\n # Resetting the ids of training data to [0-n]\n user_ids = map_ids(user_ids, users = True)\n movie_ids = map_ids(movie_ids, users = False)\n\n\n # Resetting the ids of test data\n test_user_ids = map_ids(test_df['userId'], users = True)\n test_movie_ids = map_ids(test_df['movieId'], users = False)\n test_ratings = test_df['rating']\n\n # Statistics of training data\n # Number of users and movies can be extracted from the array of mapped ids\n n_users = np.max(user_ids) + 1\n n_movies = np.max(movie_ids) + 1\n\n # Statistics of test data\n test_n_users = np.max(test_user_ids) + 1\n test_n_movies = np.max(test_movie_ids) + 1\n\n # Returning the indices back can be done using unmap_ids function\n # Example\n # unmap_ids(movie_ids, users = False)\n # unmap_ids(user_ids, users = True)\n\n # Define the training rating matrix as sparse matrix\n\n # Sparse rating matrix from training data\n R = sparse.coo_matrix(\n (ratings, (user_ids, movie_ids)),\n shape=(n_users, n_movies),\n dtype=np.float\n )\n\n\n\n # Save the rating matrix for training data\n sparse.save_npz(configs.R_TRAIN_MATRIX_PATH, R)\n\n # Sparse rating matrix from test data\n R2 = sparse.coo_matrix(\n (test_ratings, (test_user_ids, test_movie_ids)),\n shape=(test_n_users, test_n_movies),\n dtype=np.float\n )\n\n # Save the rating matrix for test data\n sparse.save_npz(configs.R_TEST_MATRIX_PATH, R2)\n\n # return user_ids, movie_ids, ratings, n_users, n_movies, R, R2\n return R, R2", "def add_to_tfrecord():\n\n record_path = os.path.join(FLAGS.dataset_dir, \"records\")\n if not os.path.isdir(record_path):\n os.makedirs(record_path)\n\n ####### Path Definition #######\n img_path = \"data/traindata/ccmpred/\"\n ss3_path = \"data/traindata/ss3/\"\n pdb_path = \"data/traindata/pdb/\"\n fasta_path = \"data/traindata/fasta/\"\n\n with open('data/train.list') as fin:\n names = [line.rstrip() for line in fin]\n\n num_shards = int(len(names) / 1000)\n num_per_shard = int(math.ceil(len(names) / float(num_shards)))\n\n gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.5)\n with tf.Graph().as_default(), tf.device('/cpu:0'):\n with tf.Session(config=tf.ConfigProto(gpu_options=gpu_options)) as sess:\n for shard_id in range(num_shards):\n record_filename = os.path.join(record_path, \"new_train_ss3_%d.tfrecord\" %shard_id)\n options = tf.python_io.TFRecordOptions(TFRecordCompressionType.ZLIB)\n with tf.python_io.TFRecordWriter(record_filename) as tfrecord_writer:\n start_ndx = shard_id * num_per_shard\n end_ndx = min((shard_id + 1) * num_per_shard, len(names))\n print \"processing data from %d to %d...\" %(start_ndx, end_ndx)\n for i in range(start_ndx, end_ndx):\n name = names[i]\n img = np.loadtxt(os.path.join(img_path, name+\".ccmpred\"))\n height, width = img.shape\n # load ss3 info\n predss3 = np.loadtxt(os.path.join(ss3_path, name+\".ss3\"))\n if predss3.shape[0] != height:\n continue\n Index = np.mgrid[0: height, 0: width]\n i, j = Index[0], Index[1]\n ss3mat = np.concatenate([predss3[i], predss3[j]], axis=-1)\n # combine ccmpred and ss3\n img = np.concatenate([img[..., np.newaxis], ss3mat], axis=-1)\n\n # box and mask info\n pdb = os.path.join(pdb_path, name+\".pdb\")\n fasta = os.path.join(fasta_path, name+\".fasta\")\n L, gt_boxes, masks, ss3 = LoadNativeBox(name, pdb, fasta)\n gt_boxes = np.array(gt_boxes).astype(np.float32)\n masks = np.array(masks).astype(np.uint8) ### Important ###\n \n # combine all masks in one mask\n mask = np.zeros(shape=(height, width), dtype=np.int8)\n for m in masks:\n mask += m\n\n img = img.astype(np.float64)\n #assert img.size == width * height * 3, '%s' % str(name)\n \n #if gt_boxes.shape[0] > 0:\n example = to_tfexample_raw(\n name, img.tostring(), mask.tostring(),\n height, width, \n gt_boxes.shape[0], gt_boxes.tostring(), masks.tostring())\n \n tfrecord_writer.write(example.SerializeToString())", "def prepare_data(self):\n if not os.path.exists(self.hparams.data_cache_dir):\n os.mkdir(self.hparams.data_cache_dir)\n for mode, filepath in zip(['train', 'val', 'test'],\n [self.hparams.train_path, self.hparams.val_path, self.hparams.test_path]):\n if mode == 'train':\n label_mode = self.hparams.label_mode\n else:\n label_mode = 'major'\n cached_features_file = self._feature_file(mode, label_mode)\n\n if not os.path.exists(cached_features_file):\n logger.info('Creating features from dataset file at %s', filepath)\n examples = read_examples_from_file(filepath, mode, label_mode)\n features = convert_examples_to_features(\n examples,\n self.labels,\n self.hparams.max_seq_length,\n self.tokenizer,\n cls_token_at_end=bool(self.hparams.model_type in ['xlnet']),\n cls_token=self.tokenizer.cls_token,\n cls_token_segment_id=2 if self.hparams.model_type in ['xlnet'] else 0,\n sep_token=self.tokenizer.sep_token,\n sep_token_extra=bool(self.hparams.model_type in ['roberta']),\n pad_on_left=bool(self.hparams.model_type in ['xlnet']),\n pad_token=self.tokenizer.convert_tokens_to_ids([self.tokenizer.pad_token])[0],\n pad_token_segment_id=4 if self.hparams.model_type in ['xlnet'] else 0,\n pad_token_label_id=self.pad_token_label_id,\n )\n logger.info('Saving features into cached file %s', cached_features_file)\n torch.save(features, cached_features_file)", "def transform_data(data_df, target_df = None):\n rec_idx, rec_col, rec_data = create_recency_feature(data_df)\n freq_idx, freq_col, freq_data = create_frequency_feature(data_df)\n norm_idx, norm_col, norm_data = create_norm_feature(data_df)\n\n # with hstack function we are concatinating a sparse matrix and a dense matirx :)\n feat_df = hstack((rec_data, freq_data, norm_data))\n print('Final feature matrix shape:', feat_df.shape)\n \n # merge all the feature names\n feat_names = list(rec_col) + list(freq_col) + list(norm_col)\n \n if isinstance(target_df, pd.core.frame.DataFrame):\n # get +ve & -ve indices\n one_idx = target_df[target_df['outcome_flag'] == 1]['id'].index.tolist()\n zero_idx = target_df[target_df['outcome_flag'] == 0]['id'].index.tolist()\n \n # calculate fitness values of features\n rcdf = create_fitness_stats(rec_data, rec_col, one_idx, zero_idx, nans = True)\n fqdf = create_fitness_stats(freq_data, freq_col, one_idx, zero_idx, nans = False)\n nrdf = create_fitness_stats(norm_data, norm_col, one_idx, zero_idx, nans=False)\n fit_df = rcdf.append(fqdf).append(nrdf)\n fit_df.reset_index(drop=1)\n return feat_df, feat_names, fit_df\n \n return feat_df, feat_names", "def fit_data(self, matrix, user_features=None, item_features=None):\r\n matrix.sort_index(inplace=True)\r\n matrix.sort_index(inplace=True, axis=1)\r\n dataset = Dataset()\r\n dataset.fit((x for x in matrix.index),\r\n (x for x in matrix.columns))\r\n interactions = pd.melt(matrix.replace(0, np.nan).reset_index(),\r\n id_vars='index',\r\n value_vars=list(matrix.columns[1:]),\r\n var_name='plu_id',\r\n value_name='rating').dropna().sort_values('index')\r\n interactions.columns = ['crd_no', 'plu_id', 'rating']\r\n self.interactions, self.weights = dataset.build_interactions([tuple(x) for x in interactions.values])\r\n\r\n if user_features is not None:\r\n user_features.sort_index(inplace=True)\r\n dataset.fit_partial(users=user_features.index,\r\n user_features=user_features)\r\n self.user_features = dataset.build_user_features(\r\n ((index, dict(row)) for index, row in user_features.iterrows()))\r\n else:\r\n self.user_features = None\r\n if item_features is not None:\r\n item_features.sort_index(inplace=True)\r\n dataset.fit_partial(items=item_features.index,\r\n item_features=item_features)\r\n self.item_features = dataset.build_item_features(\r\n ((index, dict(row)) for index, row in item_features.iterrows()))\r\n else:\r\n self.item_features = None", "def save_data(self) -> None:\n # Construct a grid in physical space\n rvals = np.logspace(start=-3,\n stop=2.5,\n num=21,\n endpoint=True)\n # Compute C, D, K1 and F on that grid\n Cvals = np.array([self.compute_C(r, Suppression.RAW) for r in rvals])\n Dvals = np.array([self.compute_D(r, Suppression.RAW) for r in rvals])\n K1vals = np.array([self.compute_K1(r, Suppression.RAW) for r in rvals])\n Fvals = np.array([self.compute_F(r, Suppression.RAW) for r in rvals])\n # Save them to file\n df = pd.DataFrame([rvals, Cvals[:, 0], Dvals[:, 0], K1vals[:, 0], Fvals[:, 0],\n Cvals[:, 1], Dvals[:, 1], K1vals[:, 1], Fvals[:, 1]]).transpose()\n df.columns = ['r', 'C(r)', 'D(r)', 'K1(r)', 'F(r)', 'dC(r)', 'dD(r)', 'dK1(r)', 'dF(r)']\n df.to_csv(self.file_path(self.filename + '.csv'), index=False)" ]
[ "0.70747465", "0.7067084", "0.62919354", "0.6007114", "0.58264214", "0.5718458", "0.560313", "0.56008935", "0.55872756", "0.55600125", "0.5532584", "0.5520967", "0.55154175", "0.5512787", "0.54379237", "0.53890693", "0.53649795", "0.5364341", "0.53604376", "0.53556436", "0.53533244", "0.53514385", "0.5339159", "0.53381556", "0.53314143", "0.5309661", "0.53057206", "0.52984893", "0.5298287", "0.52958536", "0.529474", "0.52876973", "0.5280449", "0.5279213", "0.5240862", "0.52352065", "0.523428", "0.52298284", "0.5222971", "0.521646", "0.5214852", "0.52064604", "0.519766", "0.5189067", "0.5187969", "0.517607", "0.51737356", "0.51722974", "0.5156273", "0.5156197", "0.51548654", "0.51425356", "0.51414245", "0.51408017", "0.51402116", "0.51363224", "0.51352334", "0.5135139", "0.513245", "0.51287156", "0.5127615", "0.5124145", "0.5123402", "0.5119103", "0.5118403", "0.51157886", "0.5114989", "0.5113043", "0.511074", "0.5098449", "0.5095837", "0.50854844", "0.50795", "0.5075091", "0.5072214", "0.5072214", "0.50692534", "0.5066732", "0.5065407", "0.50625986", "0.5061752", "0.5053939", "0.5051114", "0.50509703", "0.50505567", "0.5048939", "0.5048796", "0.50386137", "0.50258726", "0.5025765", "0.5024844", "0.5019144", "0.5018553", "0.501422", "0.50123316", "0.5006545", "0.5000819", "0.5000352", "0.49959943", "0.49955606" ]
0.724281
0
Creates new features from Description feature thanks to NLTK, a NLP package. NLP features are handled into a dataframe. A PCA reduction is applied on this dataframe. Features from dataframe are renamed with root ane w_nlp. When this method is called during building data_model step, then dataframe handling new NLP feature is dumped into a file.
def data_transform_nlp(self): df_invoice_line = None is_build_step = False if self._vectorizer_nlp is None: is_build_step = True list_no_words=['SET','PACK'] df_invoice_line, csr_matrix_weights, self._vectorizer_nlp \ = p5_util.nlp_process(self.df_invoice_line\ , 'Description' , vectorizer= self._vectorizer_nlp\ , list_no_words=list_no_words, is_verbose= self.is_verbose) if df_invoice_line is None: self.strprint("***ERROR : NLP process interrupted!") return #------------------------------------------------------------------------- # NLP weights are cumulated (sumerized) per customer #------------------------------------------------------------------------- if csr_matrix_weights is None: csr_matrix_weights \ = p5_util.object_load('./data/matrix_weights_NLP.dump') else: pass self.strprint("df_invoice_line : "+str(df_invoice_line.shape)) self.dbg_df = df_invoice_line.copy() root_name = 'w_nlp_' self._df_w_nlp = p5_util.df_nlp_sum_per_customer(df_invoice_line\ , csr_matrix_weights, root_name) del(csr_matrix_weights) #------------------------------------------------------------------------- # Dimension reduction thanks to PCA #------------------------------------------------------------------------- self.strprint("self._df_w_nlp : "+str(self._df_w_nlp.shape)) root_name_pca = 'nlp_pca_' n_dim = self._nlp_pca_ndim df_customers_pca_nlp, self._pca_nlp \ = p5_util.df_pca_reduce(self._df_w_nlp, n_dim, root_name_pca\ , p_is_scale=False, pca=self._pca_nlp) self.strprint("df_customers_pca_nlp : " +str(df_customers_pca_nlp.shape)) #------------------------------------------------------------------------- # Backup of NLP features per customer #------------------------------------------------------------------------- if is_build_step is True: p5_util.object_dump(df_customers_pca_nlp\ , self._df_customers_nlp_fileName) else: self._df_customers_pca_nlp = df_customers_pca_nlp.copy() return
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def feature_description_nlp(self):\n \n #-------------------------------------------------------------------------\n # Returned dataframe is aggregated with weights from self.vectorizer\n #-------------------------------------------------------------------------\n list_no_words=['SET','PACK']\n self.df_invoice_line, vectorizer, matrix_weights \\\n = p5_util.nlp_process(self.df_invoice_line,'Description'\\\n , vectorizer=self.vectorizer, list_no_words=list_no_words)\n\n #-------------------------------------------------------------------------\n # Each vectorized column 'x' is renamed w_nlp_i\n #-------------------------------------------------------------------------\n dict_matching_name = dict()\n for col in self.df_invoice_line.columns:\n if str(col).isdigit() is True:\n new_col_name = \"w_nlp_\"+str(col)\n dict_matching_name[col] = new_col_name\n \n self.df_invoice_line.rename(columns=dict_matching_name,inplace=True)\n #-------------------------------------------------------------------------\n # Description is droped from columns\n #-------------------------------------------------------------------------\n del(self.df_invoice_line['Description'])", "def featureprepare(self,df):\n try:\n df = self.featureselection(df)\n emp_len_dict= pickleload(self.dict_employ_len) # Load emp len\n df['emp_length'] = df['emp_length'].map(emp_len_dict)\n df['Long_emp_length'] = df['emp_length'].apply(lambda x: 'Yes' if x == 10 else 'No') # creating new feature\n df[\"emp_title\"].fillna('Missing', inplace=True)\n\n # Handling missing numerical value\n dict_Mean_var = pickleload(self.dict_Mean_var)\n for col, mean_val in dict_Mean_var.items():\n df[col].fillna(mean_val, inplace=True)\n\n # Handling rare values\n Freqlabels = pickleload(self.Freqlabels)\n for variable, frequent_labels in Freqlabels.items():\n df[variable] = np.where(df[variable].isin(frequent_labels), df[variable], 'Rare')\n\n # Encoding Categorical features\n x = pickleload(self.labelEncoder)\n for features, labels in x.items():\n df.loc[:, features] = labels.transform(df.loc[:, features])\n return df\n except Exception as e:\n self._Logger.error(\"Error in feature preparation: {}\".format(e))", "def generate_features(\n df: pd.DataFrame, spacy_model: str, language: str\n) -> pd.DataFrame:\n logging.info(\"Loading Spacy model...\")\n nlp = spacy.load(spacy_model)\n\n # Makes all tokens lowercase\n logging.info(\"Lowercase\")\n df[\"token_lower\"] = df[\"token\"].str.lower()\n\n logging.info(\"Lemma, pos\")\n spacy_pipe = nlp.pipe(df[\"token_lower\"].values, disable=[\"ner\", \"parser\"])\n features_gen = ((doc[0].lemma_, doc[0].pos_) for doc in spacy_pipe)\n df[\"lemma\"], df[\"pos\"] = zip(*features_gen)\n\n # Prepare stemmers\n logging.info(\"Loading Snowball Stemmer...\")\n snow = SnowballStemmer(language=language)\n\n logging.info(\"Snowball stemmer\")\n df[\"snowballStemmer\"] = df.apply(lambda row: snow.stem(row[\"token_lower\"]), axis=1)\n\n logging.info(\"Loading Porter Stemmer...\")\n port = PorterStemmer()\n\n logging.info(\"Porter stemmer\")\n df[\"porterStemmer\"] = df.apply(lambda row: port.stem(row[\"token_lower\"]), axis=1)\n\n # Adds columns with a binary if the word contains a possible negation prefix or suffix\n logging.info(\"Prefix\")\n df[\"possible_prefix\"] = df.apply(\n lambda row: possible_negation_prefix(row[\"token_lower\"]), axis=1\n )\n\n logging.info(\"Suffix\")\n df[\"possible_suffix\"] = df.apply(\n lambda row: possible_negation_suffix(row[\"token_lower\"]), axis=1\n )\n\n # Adds new columns for the previous and next lemma and pos-tag\n logging.info(\"Add prev/next shifts\")\n df[\"prev_Lemma\"] = df[\"lemma\"].shift(periods=1)\n df[\"next_Lemma\"] = df[\"lemma\"].shift(periods=-1)\n df[\"prev_pos\"] = df[\"pos\"].shift(periods=1)\n df[\"next_pos\"] = df[\"pos\"].shift(periods=-1)\n return df", "def construct_NLP_model(self, df=None):\n import review_processing as rp\n # get words\n if df is not None:\n nitems = df.shape[0]\n col_names = df.columns.values\n if self.review_col_name not in col_names or \\\n self.sentiment_col_name not in col_names:\n sys.exit('construct_NL_model: The name {0}/{1} cannot be found'.\n format(self.review_col_name, self.sentiment_col_name))\n review_list = df[self.review_col_name].values.tolist()\n meaningful_words = map(self.review_to_meaningful_words,\n review_list)\n # Get training sentiment values\n self.sentiment = df[self.sentiment_col_name].values\n\n else:\n if self.training_file_name is None:\n sys.exit('construct_NLP_model: traning file name does not '\n 'exist')\n else:\n suffix = os.path.splitext(self.training_file_name)[1][1:]\n if suffix == 'csv':\n df = pd.read_csv(self.training_file_name)\n if self.review_col_name not in col_names or \\\n self.sentiment_col_name not in col_names::\n sys.exit('construct_NL_model: The name {0}/{1} cannot '\n ' be found'.format(self.review_col_name,\n self.sentiment_col_name))\n nitems = df.shape[0]\n review_list = df[review_col_name].values.tolist()\n meaningful_words = map(self.review_to_meaningful_words,\n review_list)\n elif suffix == 'json':\n data_dict_list = rp.load_data(self.training_file_name)\n if self.review_col_name not in data_dict_list.keys():\n sys.exit('construct_NL_model: The name {0} cannot be '\n 'found'.format(review_col_name))\n review_list = map(lambda x: x[review_col_name],\n data_dict_list)\n meaningful_words = map(self.review_to_meaningful_words,\n review_list)\n else:\n sys.exit('construct_NLP_model: file type not supported '\n 'yet!')\n\n # Training process of Bag of Worlds\n if self.NLP_model == 'BagofWords':\n print('construct_NLP_model: Creating bag of words...')\n self.vectorizer = CountVectorizer(analyzer='word',\n tokenizer=None,\n preprocessor=None,\n stop_words=None,\n max_features=self.maxfeature)\n self.train_data_features = vectorizer.fit_transform(\n meaningful_words)\n self.train_data_features = train_data_features.toarray()\n\n # vocab = vectorizer.get_feature_names()\n # dist = np.sum(train_data_features, axis=0)\n # for tag, count in zip(vocab, dist):\n # print(count, tag)\n\n else:\n sys.exit('construct_NLP_model: NLP_model type not supported yet!')", "def df_lda_preprocessing(df, col_name, remove_stopwords=True, add_features=False):\n df['text'] = df[col_name] # Create a copy of the input col_name: text\n \n # df_clean_sting(df, 'text') # Clean the text from col_name # TEST FJERN RENGØRING\n\n # Test other way of handling strings\n df_simple_clean_string(df, 'text')\n\n if add_features:\n df_make_features_from_string(df, 'text') # Add features\n\n # This is a hack soly for the scope of this project to concat ThreadSubject\n # When the message is initiated by the Member\n if col_name == 'SignalMessageBodyClean':\n df_aka = df.copy(deep=True)\n # df_aka['text_1'] = df_aka['ThreadSubject']\n # df_clean_sting(df_aka, 'ThreadTopic')\n df_simple_clean_string(df_aka, 'ThreadTopic')\n\n df['text'] = (df['text'] +' '+df_aka['ThreadTopic']).where(df['IsFirstMessageInthread']==1,df['text'])\n\n df_get_tokens(df, 'text') # Returns col: tokenized_text\n\n # df_stem_words(df, 'tokenized_text') # Returns col: stemmed_text\n\n df_bigrams(df, 'tokenized_text') # Returns bigrams\n df_trigrams(df, 'tokenized_text') # Returns trigrams\n\n df['ngrams'] = df['tokenized_text'] + df['bigrams'] + df['trigrams']\n\n if remove_stopwords:\n df_remove_stopwords(df, 'ngrams') # returns stopwords_removed", "def preprocess(self, df, maxlen = 169):\n \n vocabs = self.tk.word_index.keys()\n \n df1 = self.treat_na(df)\n df2 = self.remove_punc_sw(df1)\n df3 = self.remove_numbers(df2)\n df4 = self.lemma_pos(df3)\n df5 = self.bigram(df4)\n df6 = self.combine_bigrams(df5)\n \n new_docs = []\n \n for word_list in df6:\n \n if len(word_list) == 2 and word_list[0].lower() == 'noinfo' and word_list[1].lower() == 'noinfo':\n new_docs.append(list(np.zeros(maxlen)))\n \n else:\n new_word_list = []\n for word in word_list:\n if word not in vocabs:\n word = 'UNKNOWN_TOKEN'\n new_word_list.append(word)\n \n sequence = \" \".join(new_word_list)\n vectors = self.tk.texts_to_sequences([sequence])\n padded_vectors = pad_sequences(vectors, maxlen=maxlen, padding='post', truncating='post')\n \n new_docs.append(list(padded_vectors[0]))\n \n return new_docs", "def preprocess_feature(df):", "def generate_trajectories_feature(self):\n if self.df_feature is not None:\n return self.df_feature\n trajs_feature = [traj.get_basic_feature() for traj in self.trajectories]\n self.df_feature = pd.DataFrame(trajs_feature)\n self.df_feature[\"LABEL\"] = self.df[\"LABEL\"]\n return self.df_feature", "def create_new_features(self):\n train = self.train\n \n train['is_context'] = train['context_type'].isin(CONTEXT_TYPE_TEST)\n train['is_context_flow'] = train['listen_type'] * train['is_context']\n \n train['is_listened_context'] = train['is_listened'] * train['is_context']\n train['is_listened_flow'] = train['is_listened'] * train['listen_type']\n train['is_listened_context_flow'] = train['is_listened'] * train['is_context_flow']\n \n for feature in self.categorize_features:\n gby_feat = train.groupby(feature)\n new_features(train, gby_feat, feature, feature in self.listen_type_features, self.context_features, self.flow_features, self.fillna)\n \n # Variable combinations\n for feat1 in self.combo_features1:\n for feat2 in self.combo_features2:\n gby_feat = train.groupby([feat1, feat2])\n name = feat1 + '_' + feat2\n new_features(train, gby_feat, name, feat1 in self.listen_type_features, self.context_features, self.flow_features, self.fillna)", "def other_features_(tweet, cleaned_tweet):\n #print(\"WARNING>>>>>>>>>>>>>>>>> VADERSENTIMENT DISABLED\")\n sentiment = nlp.sentiment_analyzer.polarity_scores(tweet)\n\n words = cleaned_tweet #Get text only\n\n syllables = textstat.syllable_count(words) #count syllables in words\n num_chars = sum(len(w) for w in words) #num chars in words\n num_chars_total = len(tweet)\n num_terms = len(tweet.split())\n num_words = len(words.split())\n avg_syl = round(float((syllables+0.001))/float(num_words+0.001),4)\n num_unique_terms = len(set(words.split()))\n ###Modified FK grade, where avg words per sentence is just num words/1\n FKRA = round(float(0.39 * float(num_words)/1.0) + float(11.8 * avg_syl) - 15.59,1)\n ##Modified FRE score, where sentence fixed to 1\n FRE = round(206.835 - 1.015*(float(num_words)/1.0) - (84.6*float(avg_syl)),2)\n\n\n twitter_objs = count_twitter_objs(tweet) #Count #, @, and http://\n features = [FKRA, FRE, syllables, num_chars, num_chars_total, num_terms, num_words,\n num_unique_terms, sentiment['compound'],\n twitter_objs[2], twitter_objs[1],]\n #features = pandas.DataFrame(features)\n return features", "def __init__(self, df):\n # Creating content\n df['content_str'] = df['content'].map(lambda x: self.__word_joiner(x))\n text = df['content_str'].str.cat(sep=' ')\n df.drop('content_str', axis=1, inplace=True)\n\n self._generateBigrams(text)\n self._generateUnigrams(text)\n self.corpussize=len(Utilities.CVTokeniser(text))\n print(\"Feature_PMI: Corpus size:\",self.corpussize)", "def build_feature_transform():\n\n # These features can be parsed as numeric.\n numeric = HEADER.as_feature_indices(\n [\"review_count\", \"lat\", \"lng\", \"lat2\", \"lng2\"]\n )\n\n # These features contain a relatively small number of unique items.\n categorical = HEADER.as_feature_indices(\n [\"distance\", \"price_level\", \"review_count\", \"Sp1\", \"type\"]\n )\n\n # These features can be parsed as natural language.\n text = HEADER.as_feature_indices(\n [\n \"slug\", \"menu\", \"slug.1\", \"categories\", \"name\", \"url\", \"homeurl\",\n \"resource_id1\", \"resource_id2\"\n ]\n )\n\n numeric_processors = Pipeline(steps=[(\"robustimputer\", RobustImputer())])\n\n categorical_processors = Pipeline(\n steps=[\n (\"thresholdonehotencoder\", ThresholdOneHotEncoder(threshold=162))\n ]\n )\n\n text_processors = Pipeline(\n steps=[\n (\n \"multicolumntfidfvectorizer\",\n MultiColumnTfidfVectorizer(\n max_df=0.9977,\n min_df=0.0003137465824032988,\n analyzer=\"word\",\n max_features=10000\n )\n )\n ]\n )\n\n column_transformer = ColumnTransformer(\n transformers=[\n (\"numeric_processing\", numeric_processors, numeric\n ), (\"categorical_processing\", categorical_processors,\n categorical), (\"text_processing\", text_processors, text)\n ]\n )\n\n return Pipeline(\n steps=[\n (\"column_transformer\",\n column_transformer), (\"robustpca\", RobustPCA(n_components=88)),\n (\"robuststandardscaler\", RobustStandardScaler())\n ]\n )", "def transform_and_create_new_features(df):\n # 'GENDER' FEATURE MANAGEMENT\n # Transform 'Gender' feature (categorical) to numerical one\n df['Gender'] = df['Sex'].map({'female': 0, 'male': 1}).astype(int)\n\n # 'EMBARKED' FEATURE MANAGEMENT\n # 1st approach: df['Port'] = df['Embarked'].map({'C': 1, 'S': 2, 'Q': 3}).astype(int)\n # Extract from 'pycon UK Tutorial':\n # \"Replacing {C, S, Q} by {1, 2, 3} would seem to imply the ordering C < S < Q when in fact they are simply arranged\n # alphabetically. To avoid this problem, we create dummy variables. Essentially this involves creating new columns\n # to represent whether the passenger embarked at C with the value 1 if true, 0 otherwise.\"\n dummies_embarked = pd.get_dummies(df['Embarked'], prefix='Embarked')\n df = pd.concat([df, dummies_embarked], axis=1)\n\n # 'AGE' & 'FARE' FEATURES MANAGEMENT\n df = _transform_age_feature(df)\n df = _transform_fare_feature(df)\n\n # CREATION OF A NEW FEATURE: Family size + Alone or not ?\n df['Family'] = df['SibSp'] + df['Parch']\n df['Alone'] = 0\n df.loc[df['Family'] == 0, 'Alone'] = 1\n\n # Drop all columns that are now useless\n df = df.drop(['Sex', 'Age', 'Fare', 'Embarked', 'SibSp', 'Parch'], axis=1)\n print(df.head(10))\n\n return df", "def featurize(self, data):\n \n features = []\n\n # tokens = data.split()\n\n #Modification 1: Normalization: All lowercase\n #Removing this did not seem to have any performance boost\n #but it did nothing negative either\n data = data.lower()\n\n #Modification 2: Normalization: Tokenizing using NLTK\n #Keep this\n # tokens = word_tokenize(data)\n tokens = data.split()\n\n #Modification 3: Word List: Removing stop words using NLTK\n #Keep this\n stop_words = set(stopwords.words('english'))\n tokens_filtered = []\n\n for t in tokens:\n if t not in stop_words:\n tokens_filtered.append(t)\n\n tokens = tokens_filtered\n\n #Modification 4: Pre-Processing Lemmization using NLTK\n #Surprisingly does not appear to impact performance\n # for t in tokens:\n # t = self.wordnet_lemmatizer.lemmatize(t)\n\n capital = 0\n average_word_length = 5 #It's 4.7, but we'll use 5\n short_words = 0\n long_words = 0\n\n for t in tokens:\n\n #Feature 1: Bag of words\n features.append((t, True))\n\n if(t.isupper()):\n capital += 1\n\n #Feature 3: Long or short word counter, intentionally ignoring length 4\n #and 5 as those are close to average\n #Very important that stop words were removed\n if(len(t) > average_word_length):\n long_words += 1\n elif(len(t) < average_word_length - 1):\n short_words += 1\n \n #Feature 2: Lots of capital\n #Remove this. It only appears to be a rough count of sentence number vs.\n #Capturing any sentiment. Does not impact F1 score in given train/dev sets\n # if(capital > 2):\n # features.append((\"LOTS_OF_CAPITAL\", True))\n\n #Feature 3: Long or short words\n # if(long_words > short_words):\n # features.append((\"LOTS_OF_LONG_WORDS\", True))\n\n\n\n return features", "def clfFeature(feature, mode):\r\n \r\n feature_path = 'C:\\\\Users\\\\Tom\\\\Documents\\\\Informatiekunde\\\\Thesis\\\\features\\\\' + feature + '.txt'\r\n classlist = ['negative', 'positive']\r\n features = pd.DataFrame()\r\n\r\n for label in classlist:\r\n path = 'C:\\\\Users\\\\Tom\\\\Documents\\\\Informatiekunde\\\\Thesis\\\\data\\\\' + mode + '\\\\' + label + '\\\\'\r\n allFiles = glob.glob(path + \"*.txt\")\r\n for review in allFiles:\r\n title = review.strip('.txt').split('\\\\')[-1]\r\n file = open(review, 'r', encoding='utf8').read().lower()\r\n wordlist = []\r\n featreader = csv.reader(open(feature_path, 'r'), delimiter= '\\n')\r\n for word in featreader:\r\n if word[0] in file:\r\n wordlist.append(word[0])\r\n df = pd.DataFrame({'File': [title], feature.capitalize(): [', '.join(wordlist)]}).set_index('File')\r\n features = features.append(df)\r\n \r\n return features", "def propername_featurize(input_data,N, MinFreq,model_choice =\"NGram\"):\n def to_lowercase(text):\n return text.lower()\n\n def remove_URL(text):\n return re.sub(r\"http\\S+\", \"\", text)\n def remove_non_ascii(words):\n new_words = []\n for word in words:\n new_word = unicodedata.normalize('NFKD', word).encode('ascii', 'ignore').decode('utf-8', 'ignore')\n new_words.append(new_word)\n return new_words\n\n def tokenize(text):\n return text.split()\n def remove_stopwords(words):\n new_words = []\n for word in words:\n if word not in stop_word:\n new_words.append(word)\n return new_words\n def detokenize_words(words):\n separator = ' '\n return separator.join(words)\n def preprocess_text(df):\n df['text'] = df['text'].apply(to_lowercase)\n df['text'] = df['text'].apply(remove_URL)\n df['text'] = df['text'].apply(tokenize)\n df['text'] = df['text'].apply(remove_non_ascii)\n df['text'] = df['text'].apply(detokenize_words) \n return df\n def character_ngram(text_matrix, N, MinFreq): #array of non-tokenized text\n #tokenize\n all_tokenized_text = []\n #build all token\n flatten_tokenized_text = []\n for j in text_matrix:\n cur_text = \"\".join(j.split())\n cur_feature = []\n \n for i in range(N[0]-1,N[1]): \n \n for l in range(len(cur_text) - i):\n cur_feature.append(cur_text[l:l+i+1])\n \n all_tokenized_text.append(cur_feature)\n flatten_tokenized_text.extend(cur_feature)\n charfreq = {}\n for i in flatten_tokenized_text:\n if i not in charfreq.keys():\n charfreq[i] = 1\n else:\n charfreq[i] += 1\n selected_feature = []\n for i, item in charfreq.items():\n if item >= MinFreq:\n selected_feature.append(i)\n dim = len(selected_feature)\n encoded_matrix = []\n selected_feature = np.array(selected_feature)\n for i in all_tokenized_text:\n cur_text = np.array(i)\n cur_encoded = np.zeros(dim)\n cur_idx = []\n for j in range(len(cur_text)):\n idx = np.where(selected_feature == cur_text[j]) \n if len(idx[0]) != 0: \n cur_idx.append(idx[0][0])\n #binary character presence \n cur_encoded[cur_idx] = 1\n\n encoded_matrix.append(cur_encoded)\n encoded_matrix = np.array(encoded_matrix)\n\n return encoded_matrix, selected_feature\n def task_specific_featurize(feature_value):\n feature_dic = {\"contain_numerics\":[], \"contain_special_punc\":[],\"contain_inc\":[],\"Small_token_length\":[]}\n special_pun = \"&\\?-:%\"\n company_col = [\"co.\",\"inc.\"]\n def hasNumbers(string):\n return any(char.isdigit() for char in string)\n for i in text_feature:\n if hasNumbers(i):\n feature_dic[\"contain_numerics\"].append(1)\n else:\n feature_dic[\"contain_numerics\"].append(0)\n Spec_Punc = False\n for l in special_pun:\n if i.find(l) != -1:\n feature_dic[\"contain_special_punc\"].append(1)\n Spec_Punc = True\n break\n if Spec_Punc == False:\n feature_dic[\"contain_special_punc\"].append(0)\n Contain_Com = False\n for l in company_col:\n if i.find(l) != -1:\n feature_dic[\"contain_inc\"].append(1)\n Contain_Com = True\n break\n if Contain_Com == False:\n feature_dic[\"contain_inc\"].append(0)\n token_length = len(i.split())\n if token_length <= 1:\n feature_dic[\"Small_token_length\"].append(1)\n else:\n feature_dic[\"Small_token_length\"].append(0)\n\n encoded_matrix = pd.DataFrame(feature_dic).values\n selected_feature = list(feature_dic.keys()) \n return encoded_matrix, selected_feature\n # TODO: Implement featurization of input.\n matrix_processed = preprocess_text(input_data)\n text_feature = matrix_processed[[\"text\"]].values.flatten() \n if model_choice == \"NGram\":\n \n encoded_matrix, selected_feature = character_ngram(text_feature, N, MinFreq)\n elif model_choice == \"TS\":\n encoded_matrix, selected_feature = task_specific_featurize(text_feature)\n elif model_choice == \"Combined\":\n\n encoded_matrix_specific, selected_feature_specific = task_specific_featurize(text_feature) \n encoded_matrix_bow, selected_feature_bow = character_ngram(text_feature, N, MinFreq)\n encoded_matrix = np.hstack((encoded_matrix_bow,encoded_matrix_specific))\n selected_feature = list(selected_feature_bow)\n selected_feature.extend(selected_feature_specific)\n \n return encoded_matrix,selected_feature", "def text_feature_extract(df):\n return df", "def construct_df_topics(self, n_words=20):\n\n self.check_model()\n topic_keywords = []\n keywords = array(self.vectorizer.get_feature_names())\n\n for topic_weights in self.model.components_:\n top_keyword_locs = (-topic_weights).argsort()[:n_words]\n topic_keywords.append(keywords.take(top_keyword_locs))\n\n self.df_topic_keywords = pd.DataFrame(topic_keywords)\n self.df_topic_keywords.columns = ['Word ' + str(i) for i in range(self.df_topic_keywords.shape[1])]\n self.df_topic_keywords.index = ['Topic ' + str(i) for i in range(self.df_topic_keywords.shape[0])]", "def feature_extraction(inputFile, text, label):\r\n df = pd.read_csv(inputFile, encoding=\"utf8\")\r\n df[text].replace(np.nan, '', inplace=True)\r\n for idx, line in df.iterrows():\r\n try:\r\n words = line[text]\r\n newWords = ''.join(words.split())\r\n df.set_value(idx, text, newWords)\r\n except:\r\n pass\r\n tf = TfidfVectorizer(analyzer='char', encoding=\"utf8\", min_df=10)\r\n\r\n x = tf.fit_transform(df[text])\r\n x = x.toarray()\r\n print(x.shape)\r\n y = df[label]\r\n\r\n return x, y", "def extract_features_only(self, text):\n \n featurelist = []\n \n sentences = util.sentence_tokenize(text)\n taggedSentences = [] \n for sentnumber, sentence0 in enumerate(sentences):\n \n sentence = self.clean_text(sentence0)\n \n # tokenize each sentence to have a list of words to be processed\n tokens = nltk.word_tokenize(sentence)\n #run the above procedure\n sentence_to_parse = self.get_untagged(tokens)\n \n # Save tagged sentences for later computing of expose date\n taggedSentences.append(sentence_to_parse)\n \n #only if the cleaned sentence is NOT empty we parse it\n if sentence_to_parse!=[]:\n tree = self.cp.parse(sentence_to_parse)\n tree1 = self.cp1.parse(sentence_to_parse)\n \n# new_sentence_to_parse = ','.join([' '.join(nltk.tag.untag(subtree.leaves())) + ' ' for subtree in tree.subtrees() if subtree.node in self.st_filter])\n new_sentence_to_parse = ','.join([' '.join(nltk.tag.untag(subtree.leaves())) + ' ' for subtree in tree.subtrees() if subtree.label() in self.st_filter])\n\n #here we delete the dash and replace it with whitespace to convert post-vac to post vac\n new_sentence_to_parse = new_sentence_to_parse.replace(', ,', ',')\n #here we delete the dash and replace it with whitespace to convert post-vac to post vac\n new_sentence_to_parse = new_sentence_to_parse.replace(',', ', ')\n\n new_sentence_to_parse = nltk.word_tokenize(new_sentence_to_parse)\n\n #run the above procedure\n new_sentence_to_parse = self.get_untagged(new_sentence_to_parse)\n \n if new_sentence_to_parse!=[]:\n tree2 = self.cp.parse(new_sentence_to_parse)\n for subtree in tree2.subtrees():\n if subtree.label() in self.st_filter: \n featString = self.massage_features(subtree)\n featurelist.append((subtree.label(), featString, sentnumber, subtree.leaves()))\n \n for subtree in tree1.subtrees():\n if subtree.label() in self.labels_gram1:\n featString = self.massage_features(subtree)\n featurelist.append((subtree.label(), featString, sentnumber, subtree.leaves()))\n\n self.sentences = sentences\n \n n = len(sentences)\n locsSentStarts = [-1] * n\n curpt = 0\n for i in range(n):\n pos = text[curpt:].find(sentences[i])\n locsSentStarts[i] = pos + curpt\n curpt = locsSentStarts[i] + len(sentences[i])\n self.sentence_startPos = locsSentStarts\n \n featObjList = self.initialize_feature_obj_list(featurelist)\n \n featList = [(feat.getType(), feat.getStartPos(), feat.getEndPos(), feat.getString()) for feat in featObjList]\n return featList", "def generate_features(self, df):\n df = df.reset_index()\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\")\n return extract_features(df, column_id=\"id\", impute_function=impute,\n default_fc_parameters=self.extraction_settings)", "def creating_feature_vector():\r\n\twordlist = []\r\n\tlabel = \"\"\r\n\tfw = open(\"feature_vector.txt\", \"w+\", encoding = \"utf-8\")\r\n\twith open(\"D:\\\\Python_Prac\\\\wordstag\\\\modules\\\\HI_EN_TRAIN.txt\", \"r\", encoding = \"utf-8\") as f:\r\n\t\tfor line in f:\r\n\t\t\twordlist.append(line)\r\n\t\tfor index, line in enumerate(wordlist):\r\n\t\t\tif line == \"\\n\":\r\n\t\t\t\tcontinue\r\n\t\t\tcontext = line.split(\"\\t\")\r\n\t\t\tlabel = context[1]\r\n\t\t\tfeature_vector = label+\" \"\r\n\t\t\tngram_vector = ngram_frequency(str(context[0]))\r\n\t\t\tfor vector in ngram_vector:\r\n\t\t\t\tfeature_vector += str(vector)+\" \"\r\n\t\t\tfeature_vector += str(is_english(context[0]))+\" \"\r\n\t\t\tfeature_vector += str(is_hindi(context[0]))+\" \"\r\n\t\t\tfeature_vector += str(is_abbr(context[0]))+\" \"\r\n\t\t\tfeature_vector += str(med_in_english(context[0]))+\" \"\r\n\t\t\tfeature_vector += str(med_in_hindi(context[0]))+\" \"\r\n\t\t\tbefore = [0,0,0]\r\n\t\t\tafter = [0,0,0]\r\n\t\t\tfor i in range(3):\r\n\t\t\t\tif (index-i) < 0 or (index-i+1) > len(wordlist)-1:\r\n\t\t\t\t\tcontinue\r\n\t\t\t\tbefore[2-i] = get_word_context(wordlist[index-i+1].split(\"\\t\")[0])\r\n\t\t\tfor i in range(3):\r\n\t\t\t\tif (index+i+1) > len(wordlist)-1:\r\n\t\t\t\t\tcontinue\r\n\t\t\t\tafter[2-i] = get_word_context(wordlist[index+i+1].split(\"\\t\")[0])\r\n\t\t\tfor i in before:\r\n\t\t\t\tfeature_vector += str(i)+\" \"\r\n\t\t\tfor i in after:\r\n\t\t\t\tfeature_vector += str(i)+\" \"\r\n\t\t\tfeature_vector += \"\\n\"\r\n\t\t\tfw.write(feature_vector)\r\n\t\t\tprint(\"Proceeding...\"+str(index+1)+\" of 16683\")\r\n\r\n\tfw.close()", "def preprocess_training_text(text, accented_chars=True, \n convert_num=False, extra_whitespace=True, \n lemmatization=True, lowercase=True, punctuations=True,\n remove_html=True, remove_num=True, special_chars=True, \n stop_words=True):\n \n\n \"\"\"preprocess text with default option set to true for all steps\"\"\"\n if remove_html == True: #remove html tags\n text = strip_html_tags(text)\n if extra_whitespace == True: #remove extra whitespaces\n text = remove_whitespace(text)\n if accented_chars == True: #remove accented characters\n text = remove_accented_chars(text)\n if lowercase == True: #convert all characters to lowercase\n text = text.lower()\n \n \n doc = nlp(text) #tokenise text\n\n\n clean_text = []\n for token in doc:\n flag = True\n edit = token.text\n # print(\"Word: \", edit, \" Type: \", token.pos_)\n # remove stop words\n if stop_words == True and token.is_stop and token.pos_ != 'NUM': \n flag = False\n # remove punctuations\n if punctuations == True and (token.pos_ == 'PUNCT') and flag == True: \n flag = False\n \n # remove 'X' characters:\n if token.pos_ == 'X':\n flag = False\n # remove special characters\n if special_chars == True and token.pos_ == 'SYM' and flag == True: \n flag = False\n # remove numbers\n if remove_num == True and (token.pos_ == 'NUM' or token.text.isnumeric()) \\\n and flag == True:\n flag = False\n # convert number words to numeric numbers\n if convert_num == True and token.pos_ == 'NUM' and flag == True:\n edit = w2n.word_to_num(token.text)\n # convert tokens to base form\n elif lemmatization == True and token.lemma_ != \"-PRON-\" and flag == True:\n edit = token.lemma_\n # append tokens edited and not removed to list \n if edit != \"\" and flag == True:\n clean_text.append(edit)\n \n # Convert back to string:\n new_text = ' '.join(clean_text)\n regex = re.compile('[^a-zA-Z]')\n new_text = regex.sub(' ', new_text)\n words = re.findall(r'\\w+.', new_text)\n return ' '.join(words)", "def topic_extraction(df, col_name):\n tfidf_vectorizer = TfidfVectorizer(max_df=0.95, min_df=2,\n max_features=200,\n stop_words='english')\n tfidf = tfidf_vectorizer.fit_transform(df[col_name])\n\n tf_vectorizer = CountVectorizer(max_df=0.95, min_df=2,\n max_features=200,\n stop_words='english')\n tf = tf_vectorizer.fit_transform(df[col_name])\n nmf = NMF(n_components=20, random_state=1,\n alpha=.1, l1_ratio=.5)\n tfidf_feature_names = tfidf_vectorizer.get_feature_names()\n nmf_w = nmf.fit_transform(tfidf)\n nmf_h = nmf.components_\n df['labels'] = nmf_w.argmax(axis=1) # this was the right code to get labels/clusters\n\n\n print(\"\\nTopics in NMF model:\")\n print_top_words(nmf, tfidf_feature_names)\n\n\n lda = LatentDirichletAllocation(n_topics=20, max_iter=5,\n learning_method='online',\n learning_offset=50.,\n random_state=0,\n n_jobs=-1)\n lda.fit(tf)\n doc_topic_distrib = lda.transform(tf)\n lda_labels = doc_topic_distrib.argmax(axis=1)\n print lda_labels[:100]\n df['lda_labels'] = lda_labels\n print(\"\\nTopics in LDA model:\")\n tf_feature_names = tf_vectorizer.get_feature_names()\n print_top_words(lda, tf_feature_names)\n return df", "def create_tdm(cls):\n X = cls.vectorizer.fit_transform(cls.processed_documents) # Convert the X as transposed matrix\n X = X.T.toarray() # Create a DataFrame and set the vocabulary as the index\n cls.df_tdm = pd.DataFrame(X, index=cls.vectorizer.get_feature_names())", "def _finalize_features(self) -> DataFrameLike:\n all_features_dict = dict(ChainMap(*self._final_features.values()))\n return pd.DataFrame(all_features_dict)", "def featurize(movies):\n ###TODO \n movies['features'] = \"\" \n get_h = set() \n vocab_dict = {}\n df_dict_return = {}\n tup_list = []\n index_dict = {}\n index_dict_1 = {}\n movie_len = len(movies) \n #print(\"MovieLength::\",movie_len)\n #print(\"MOVIES:::\",movies)\n \n get_h = cal_unique_features(movies) # num_features\n\n vocab_dict = cal_unique_vocab(get_h) # vocab complete\n\n len_vocab = len(get_h)\n \n df_dict_return = cal_unique_docs(get_h,movies) # df(i)\n\n for token in get_h :\n #tup_list.clear()\n #print(\"token_GOTTTTT:::\",token)\n for index,row in movies.iterrows(): \n #print(\"row_got::\",row)\n gen_list = row['tokens']\n #print(\"gen_list::\",gen_list)\n #mov_id = row['movieId'] \n #print(\"mov_id::\",mov_id)\n token_count_1 = Counter(gen_list).most_common()[:1]\n tok = token_count_1[0]\n index_dict_1[index] = tok[1]\n token_count = gen_list.count(token)\n #print(\"token_count::\",token_count)\n tup = (index,token_count)\n #print(\"tuple::\",tup)\n tup_list.append(tup)\n #print(\"LIST_PRINT:::::::::::::\",tup_list)\n index_dict[token] = tup_list\n tup_list = []\n \n \n #print(\"INDEX_DICT:::\",index_dict) # tf(i,d)\n #print(\"INDEX_DICT_1:::\",index_dict_1) # max_k dict per docx\n \n \n for ind, row in movies.iterrows():\n data_list = []\n rows_list = []\n columns_list = []\n gen_list = row['tokens']\n #print(\"TOKENS GOTTT::\",gen_list) \n for gen in gen_list:\n tf = get_tf_value(index_dict,gen,ind)\n #print(\"TF GOTTT::\",tf) \n tf_weight = float( tf / index_dict_1[ind])\n #print(\"tf_weight::\",tf_weight)\n df_weight = float( math.log10( movie_len / df_dict_return[gen] ) )\n #print(\"df_weight::\",df_weight)\n final_tfidf = tf_weight * df_weight\n #print(\"final_tfidf::\",final_tfidf)\n data_list.append(final_tfidf)\n columns_list.append(vocab_dict[gen])\n rows_list.append(0) \n csr = csr_matrix((data_list, (rows_list,columns_list)), shape=(1,len_vocab))\n #print(\"TYPE of CSR GOTT::\",type(csr))\n #print(\"CSR GOTT:::\",csr) \n movies.set_value(ind, 'features', csr)\n \n #print(\"UPDATE movies::\",movies) \n\n return(movies,vocab_dict)\n \n\n pass", "def preprocess(old_df, label_name, category_features, non_category_features):\n old_df['fraud'] = old_df[label_name].apply(lambda x: x[0] == 'f')\n\n # Creating a new dataframe with a subset of features.\n new_df = old_df[['fraud'] + non_category_features]\n\n # For categorical features, we make dummy variables,\n # and merge them into new_df.\n for feature in category_features:\n dummy_df = pd.get_dummies(old_df[feature], prefix=feature,\n dummy_na=True)\n # Since dummy_na=True, the last column will be for null values.\n dummy_df.drop(dummy_df.columns[-1], axis=1, inplace=True)\n new_df = pd.concat([new_df, dummy_df], axis=1)\n return new_df", "def textFeature(mode):\r\n \r\n classlist = ['negative', 'positive']\r\n data = pd.DataFrame()\r\n\r\n for label in classlist:\r\n path = 'C:\\\\Users\\\\Tom\\\\Documents\\\\Informatiekunde\\\\Thesis\\\\data\\\\' + mode + '\\\\' + label + '\\\\'\r\n allFiles = glob.glob(path + \"*.txt\")\r\n df1 = pd.DataFrame()\r\n for review in allFiles:\r\n title = review.strip('.txt').split('\\\\')[-1]\r\n text = open(review, 'r', encoding='utf8').read()\r\n df = pd.DataFrame({'File': [title], 'Text': [text], 'Label': [label]}).set_index('File')\r\n df1 = df1.append(df)\r\n data = data.append(df1)\r\n \r\n return data", "def extract_features(self, doc):\n\n features = dict()\n\n bow = self.vectorize_doc_simple(doc)\n\n charcount = self.char_count(doc)\n wordcount = self.word_count(doc)\n sentencecount = self.sentence_count(doc)\n paragraphcount = self.paragraph_count(doc)\n\n # extract characters features\n features['characters per word'] = charcount / wordcount\n features['characters per sentence'] = charcount / sentencecount\n features['characters per paragraph'] = charcount / paragraphcount\n features['characters per document'] = charcount\n\n features['word characters length variance'] = numpy.std(\n self.word_char_length_variance(doc))\n features['sentence characters length variance'] = numpy.std(\n self.sentence_char_length_variance(doc))\n\n # extract words features\n features['words per sentence'] = wordcount / sentencecount\n features['words per paragraph'] = wordcount / paragraphcount\n features['words per document'] = wordcount\n\n features['sentence words length variance'] = numpy.std(\n self.sentence_words_length_variance(doc))\n\n # extract sentences features\n features['sentences per paragraph'] = sentencecount / paragraphcount\n features['sentences per document'] = sentencecount\n\n # extract paragraphs features\n features['paragraphs per document'] = paragraphcount\n\n # extract syllables features\n syllablecount = 0\n for word, count in bow.iteritems():\n syllablecount += self.num_of_syllables(word) * count\n features['syllables per word'] = syllablecount / wordcount\n features['syllables per sentence'] = syllablecount / sentencecount\n features['syllables per paragraph'] = syllablecount / paragraphcount\n\n # extract part of speech features\n tokens = self.pos_tag_doc(doc)\n\n pos_counts = self.vectorize_pos_tags(tokens)\n poswordcount = sum(pos_counts.values())\n for i in xrange(82, 101):\n features['%d per word' % i] = pos_counts[i] / poswordcount\n\n sorted_pos_counts = sorted(pos_counts, key=pos_counts.get, reverse=True)\n features['1st top tag'] = str(sorted_pos_counts[0])\n features['2nd top tag'] = str(sorted_pos_counts[1])\n features['3rd top tag'] = str(sorted_pos_counts[2])\n features['4th top tag'] = str(sorted_pos_counts[3])\n features['5th top tag'] = str(sorted_pos_counts[4])\n\n # extract vocab features\n vocabsize = len(self.vectorize_doc_simple(doc))\n features['vocab size'] = vocabsize\n features['words per vocab size'] = wordcount / vocabsize\n\n return features", "def preprocess(tmp_df, preprocess=False):\n\n # all in one go in order to just have to tokenize once\n if preprocess:\n tmp_df[\"description\"] = tmp_df[\"description\"].apply(\n clean_stop_punct_digit_n_lower)\n # words = tmp_df['description'] \\\n # .str.split(expand=True).stack().value_counts()\n # ratio = tmp_df['description'].apply(remove_duplicate)\\\n # .str.split(expand=True).stack().value_counts() \\\n # / tmp_df.shape[0]\n # words.to_csv('freq_words.csv')\n # ratio.to_csv(\"ratio.csv\")\n\n return tmp_df", "def _preprocess(self):\n self.data['sentences'] = self.data['text'].apply(self._tokenize_sent)\n self.data['nouns'] = self.data['sentences'].apply(self._get_nouns)\n # self._get_frequent_features()\n # self._compactness_pruning()\n # self._redundancy_pruning()\n # self._get_features()\n self._extract_opinions()", "def preprocessing(text, tokenization=0, rm_stopwords=0, numbers_to_text=0, to_tfidf=0):\n\ttrain_data = pd.DataFrame(columns=['text', 'response'])\n\n\tprep_0 = [strip_non_alphanum(line) for line in text]\n\tprep_1 = [line for line in prep_0 if line.rstrip()]\n\tprep_2 = [strip_multiple_whitespaces(line) for line in prep_1]\n\tprep_3 = [line.lower() for line in prep_2]\n\n\tif to_tfidf == 1:\n\t\t#when using tf_idf, removes single character words given that they are ignored by sklearn's TfidfVectorizer\n\t\tprep_3 = [' '.join([word for word in line.split() if len(word) > 1]) for line in prep_3]\n\n\tif tokenization == 1:\n\t\tprep_3 = [line.split(' ') for line in prep_3]\n\t\t#removes whitespaces from the list\n\t\tprep_3 = [list(filter(None, line)) for line in prep_3]\n\telse:\n\t\tprep_3 = [line[:-1] if line[-1] == \" \" else line for line in prep_3]\n\n\tif numbers_to_text == 1 and tokenization == 1:\n\t\t#convert all numbers to integers and convert these numbers to its written form\n\t\ttemp_prep = []\n\t\tfor sentence in prep_3:\n\t\t\ttemporary_sentence = []\n\t\t\tfor word in sentence:\n\t\t\t\tif str(word).isdigit():\n\t\t\t\t\tconverted_words = num2words(int(word), to='cardinal', lang='pt').split(' ')\n\t\t\t\t\tif to_tfidf == 1 and rm_stopwords == 0:\n\t\t\t\t\t\tconverted_words = [word for word in converted_words if word != 'e']\n\t\t\t\t\ttemporary_sentence.extend(converted_words)\n\t\t\t\telse:\n\t\t\t\t\ttemporary_sentence.append(word)\n\t\t\ttemp_prep.append(temporary_sentence)\n\n\t\tprep_3 = temp_prep\n\telif numbers_to_text == 1 and tokenization == 0:\n\t\t#convert all numbers to integers and convert these numbers to its written form\n\t\ttemp_prep = []\n\t\tfor sentence in prep_3:\n\t\t\ttemporary_sentence = []\n\t\t\tfor word in sentence.split(' '):\n\t\t\t\tif str(word).isdigit():\n\t\t\t\t\tconverted_words = num2words(int(word), to='cardinal', lang='pt').split(' ')\n\t\t\t\t\tif to_tfidf == 1 and rm_stopwords == 0:\n\t\t\t\t\t\tconverted_words = [word for word in converted_words if word != 'e']\n\t\t\t\t\ttemporary_sentence.extend(converted_words)\n\t\t\t\telse:\n\t\t\t\t\ttemporary_sentence.append(word)\n\t\t\ttemporary_sentence = ' '.join(temporary_sentence)\n\t\t\ttemp_prep.append(temporary_sentence)\n\t\tprep_3 = temp_prep\n\n\tif rm_stopwords == 1:\n\t\tstp = set(stopwords.words('portuguese') + list(punctuation))\n\t\tif tokenization == 1:\n\t\t\tprep_3 = [[word for word in sentence if word not in stp] for sentence in prep_3]\n\t\telif tokenization == 0:\n\t\t\tprep_3 = [' '.join([word for word in sentence.split(' ') if word not in stp]) for sentence in prep_3]\n\n\ttmp = pd.DataFrame({'text':prep_3[::2], 'response':prep_3[1::2]})\n\ttrain_data = train_data.append(tmp[['text', 'response']], ignore_index=True)\n\n\treturn train_data", "def _analyse_and_overwrite_existing_vocabulary(self, preprocessed_content: List[str]) -> None:\n\n vectoriser = sklearn_text.TfidfVectorizer()\n vectoriser.fit(preprocessed_content)\n\n # Extract all of the unique words found\n words = vectoriser.get_feature_names()\n new_vocabulary = pd.DataFrame(data={'word': words, 'feature_matrix_index': range(len(words))})\n\n # Replace existing data\n self._db_connection.execute_database_operation(\"TRUNCATE TABLE encoded_articles.tfidf_vocabulary;\")\n\n self._db_connection.upload_dataframe(\n dataframe=new_vocabulary,\n table_name='tfidf_vocabulary',\n schema='encoded_articles',\n if_exists='append',\n index=False,\n )", "def create_train_feats():\n features = read_process_labelled(AUDIO_DIR, debug=True)\n df = pd.DataFrame(features)\n p = './Features/dataset_features/data_features.csv'\n df.to_csv(p, index=False)\n return p", "def createFeatureFrame(mode):\r\n \r\n text = textFeature(mode)\r\n sentiment = clfFeature('sentiment', mode)\r\n actors = clfFeature('actors', mode)\r\n directors = clfFeature('directors', mode)\r\n genre = clfFeature('genre', mode)\r\n titles = clfFeature('titles', mode)\r\n featureframe = pd.concat([text, sentiment, actors, directors, genre, titles], axis=1)\r\n \r\n return featureframe", "def create_dataframe_for_training(data):\n feature_column_name = 'X'\n #data_cp = data[['label']].copy()\n for i, row in tqdm(data.iterrows(), total=len(data)):\n all_features = f'{row.claimant} {row.claim} {row.article_content}'\n data.loc[i, feature_column_name] = all_features\n\n return data[feature_column_name]", "def preprocess(data_df, remove_stopwords=False):\n data_cp = data_df.copy()\n for i, row in tqdm(data_cp.iterrows(), total=len(data_cp), desc='Preprocessing dataframe contents'):\n\n article_content = _clean(row.article_content, remove_stopwords)\n row.article_content = _tokenize_stem_lem_join(article_content)\n\n _clean_claim = _clean(row.claim, remove_stopwords)\n row.claim = _tokenize_stem_lem_join(_clean_claim)\n\n data_cp.loc[i] = row\n\n return data_cp", "def executeFeatures(dfIn, train = True):\n\n if train == True:\n dfOut = dfIn['TARGET'] #update this with numerical columns that don't need cleaning\n dfOut = standardizedIncome(dfIn, dfOut)\n dfOut = engineerDays(dfIn, dfOut)\n dfOut = createEncoders(dfIn, dfOut)\n dfOut = simplifyEducation(dfIn, dfOut)\n dfOut = simplifyFamily(dfIn, dfOut)\n dfOut = simplifyIncome(dfIn, dfOut)\n dfOut = addExtSources(dfIn, dfOut)\n dfOut = cleanNames(dfOut)\n dfOut = createPolyFeatures(dfOut)\n else:\n dfOut = dfIn['SK_ID_CURR'] ## tags from test set\n dfOut = standardizedIncome(dfIn, dfOut)\n dfOut = engineerDays(dfIn, dfOut)\n dfOut = createEncoders(dfIn, dfOut)\n dfOut = simplifyEducation(dfIn, dfOut)\n dfOut = simplifyFamily(dfIn, dfOut)\n dfOut = simplifyIncome(dfIn, dfOut)\n dfOut = addExtSources(dfIn, dfOut)\n dfOut = dfOut.drop('CODE_GENDER', axis = 1) ## Need to fix this\n #print(dfOut.columns)\n dfOut = cleanNamesTest(dfOut)\n dfOut = createPolyFeatures(dfOut)\n\n return dfOut", "def create_features(energy_data, label=None):\n energy_data['date'] = energy_data.index\n energy_data['hour'] = energy_data['Datetime'].dt.hour\n energy_data['dayofweek'] = energy_data['Datetime'].dt.dayofweek\n energy_data['month'] = energy_data['Datetime'].dt.month\n energy_data['quarter'] = energy_data['Datetime'].dt.quarter\n energy_data['year'] = energy_data['Datetime'].dt.year\n energy_data['dayofyear'] = energy_data['Datetime'].dt.dayofyear\n energy_data['dayofmonth'] = energy_data['Datetime'].dt.day\n energy_data['weekofyear'] = energy_data['Datetime'].dt.weekofyear\n energy_data['pjme_2_hrs_lag'] = energy_data['PJME_MW'].shift(2)\n energy_data['pjme_4_hrs_lag'] = energy_data['PJME_MW'].shift(4)\n energy_data['pjme_8_hrs_lag'] = energy_data['PJME_MW'].shift(8)\n energy_data['pjme_12_hrs_lag'] = energy_data['PJME_MW'].shift(12)\n energy_data['pjme_24_hrs_lag'] = energy_data['PJME_MW'].shift(24)\n energy_data['pjme_4_hrs_mean'] = energy_data['PJME_MW'].rolling(window=4).mean()\n energy_data['pjme_8_hrs_mean'] = energy_data['PJME_MW'].rolling(window=8).mean()\n energy_data['pjme_12_hrs_mean'] = energy_data['PJME_MW'].rolling(window=12).mean()\n energy_data['pjme_24_hrs_mean'] = energy_data['PJME_MW'].rolling(window=24).mean()\n energy_data['pjme_4_hrs_std'] = energy_data['PJME_MW'].rolling(window=4).std()\n energy_data['pjme_8_hrs_std'] = energy_data['PJME_MW'].rolling(window=8).std()\n energy_data['pjme_12_hrs_std'] = energy_data['PJME_MW'].rolling(window=12).std()\n energy_data['pjme_24_hrs_std'] = energy_data['PJME_MW'].rolling(window=24).std()\n energy_data['pjme_4_hrs_max'] = energy_data['PJME_MW'].rolling(window=4).max()\n energy_data['pjme_8_hrs_max'] = energy_data['PJME_MW'].rolling(window=8).max()\n energy_data['pjme_12_hrs_max'] = energy_data['PJME_MW'].rolling(window=12).max()\n energy_data['pjme_24_hrs_max'] = energy_data['PJME_MW'].rolling(window=24).max()\n energy_data['pjme_4_hrs_min'] = energy_data['PJME_MW'].rolling(window=4).min()\n energy_data['pjme_8_hrs_min'] = energy_data['PJME_MW'].rolling(window=8).min()\n energy_data['pjme_12_hrs_min'] = energy_data['PJME_MW'].rolling(window=12).min()\n energy_data['pjme_24_hrs_min'] = energy_data['PJME_MW'].rolling(window=24).min()\n\n features = energy_data[['hour', 'dayofweek', 'quarter', 'month', 'year',\n 'dayofyear', 'dayofmonth', 'weekofyear', 'pjme_2_hrs_lag', 'pjme_4_hrs_lag',\n 'pjme_8_hrs_lag', 'pjme_12_hrs_lag', 'pjme_24_hrs_lag', 'pjme_4_hrs_mean',\n \"pjme_8_hrs_mean\", \"pjme_12_hrs_mean\", \"pjme_24_hrs_mean\", \"pjme_4_hrs_std\",\n \"pjme_8_hrs_std\", \"pjme_12_hrs_std\", \"pjme_24_hrs_std\",\n \"pjme_4_hrs_max\", \"pjme_8_hrs_max\", \"pjme_12_hrs_max\", \"pjme_24_hrs_max\",\n \"pjme_4_hrs_min\", \"pjme_8_hrs_min\", \"pjme_12_hrs_min\", \"pjme_24_hrs_min\"]]\n if label:\n label = energy_data[label]\n return features, label\n return features", "def convert_examples_to_features(self):\n features = []\n max_label_len = 0\n # find ou the max label length\n labels_list = []\n for ex_index, example in enumerate(self.examples):\n processor = example.processor\n label_ids = self.tokenizer.text_to_ids(processor.label2string(example.label)) + [self.tokenizer.eos_id]\n max_label_len = max(len(label_ids), max_label_len)\n labels_list.append(label_ids)\n if self.max_seq_length_decoder is None:\n self.max_seq_length_decoder = max_label_len\n else:\n self.max_seq_length_decoder = max(\n self.max_seq_length_decoder, max_label_len\n ) # take the max of the two to be conservative\n for ex_index, example in enumerate(self.examples):\n taskname = example.taskname\n taskname_ids = self.tokenizer.text_to_ids(taskname)\n processor = example.processor\n if ex_index % 10000 == 0:\n logging.info(f\"Writing example {ex_index} of {len(self.examples)}\")\n label_ids = labels_list[ex_index]\n enc_query = processor.get_ptune_query(\n example.content,\n self.pseudo_token_id,\n self.max_seq_length - self.max_seq_length_decoder + 1,\n self.templates,\n self.tokenizer,\n )\n input_ids = enc_query + label_ids[:-1]\n labels = [SMALL_NUM for i in range(len(enc_query) - 1)] + label_ids\n features.append([input_ids, labels, enc_query, taskname_ids])\n return features", "def _preprocess_data(df, use_preprocessdata=False, save_path=None):\n data = _load_data(df, use_preprocessdata, save_path)\n X = []\n X2 = []\n X3 = []\n X4 = []\n for i, (words, indexes) in enumerate(data):\n X.append(\n _vectorise_bag_of_pos_with_position(words, indexes, DEFAULT_WINDOW_SIZE,\n targets=[df['Pronoun'][i], df['A'][i], df['B'][i]]))\n X2.append(_vectorise_bag_of_pos_with_dependency(words, indexes))\n X3.append(_get_dependency_labels(words, indexes, targets=[df['Pronoun'][i], df['A'][i], df['B'][i]]))\n X4.append(_get_gpt2_likelihood(words, indexes))\n\n X5 = _bert_attentions(df, data)\n X5 = np.array(X5)\n\n X = np.array(X)\n X2 = np.array(X2)\n featur_len = int(X.shape[1] / 3)\n featur_len2 = int(X2.shape[1] / 3)\n X_pr = X[:, 0:featur_len]\n X_a = X[:, featur_len:featur_len*2]\n X_b = X[:, featur_len*2:featur_len*3]\n X2_pr = X2[:, 0:featur_len2]\n X2_a = X2[:, featur_len2:featur_len2*2]\n X2_b = X2[:, featur_len2*2:featur_len2*3]\n X = np.concatenate((\n X_pr - X_a,\n X_pr - X_b,\n X_pr * X_a,\n X_pr * X_b,\n X2_pr - X2_a,\n X2_pr - X2_b,\n X2_pr * X2_a,\n X2_pr * X2_b,\n X3,\n X5,\n (df['Pronoun-offset'] - df['A-offset']).values.reshape(len(X), 1),\n (df['Pronoun-offset'] - df['B-offset']).values.reshape(len(X), 1)\n ), axis=1)\n Y = _get_classify_labels(df)\n return X, Y", "def ETL(df, col_name= 'headline', class_col_name='category', tok_col_name='tok'):\n # Primer paso: Tokenizacion, stopwords y stemming\n df_tok_clean = tokenization_stopwords_stemming(df, col_name=col_name, tok_col_name=tok_col_name)\n \n # Segundo paso: Crear texto liquido del dataframe limpio\n df_liquid_text = to_liquid_text(df_tok_clean, class_col_name=class_col_name, tok_col_name=tok_col_name)\n \n return df_tok_clean,df_liquid_text", "def generate_graph_feature(self):\n traj_graph_feature = [traj.get_graph_feature() for traj in self.trajectories]\n self.df_graph_feature = pd.DataFrame(traj_graph_feature)\n self.df_graph_feature[\"LABEL\"] = self.df[\"LABEL\"]\n return self.df_graph_feature", "def data_transform_timeFeature(self):\n #-------------------------------------------------------------------------\n # All new features are built into separate dataframes \n # and each of them are dumped into a separate file.\n #-------------------------------------------------------------------------\n self.strprint(\"self.df_invoice_line : \"+str(self.df_invoice_line.shape))\n \n self._dict_timeFeature_encoder, df_customers_timeFeature \\\n = p5_util.time_list_feature_build(self.df_invoice_line\\\n , self._list_new_feature, dict_encoder = self._dict_timeFeature_encoder\\\n ,is_verbose=self.is_verbose)\n \n #-------------------------------------------------------------------------\n # New time features are aggregated into a single dataframe.\n # Values are scaled.\n #-------------------------------------------------------------------------\n df_customers_timeFeature, self._std_scaler_timeFeature \\\n = p5_util.time_list_feature_restore(self._list_new_feature \\\n , std_scale = self._std_scaler_timeFeature\\\n , df_timeFeature = df_customers_timeFeature, is_verbose = self.is_verbose)\n\n self.strprint(\"df_customers_timeFeature : \"+str(df_customers_timeFeature.shape))\n \n #-------------------------------------------------------------------------\n # Dimension reduction thanks to PCA\n #-------------------------------------------------------------------------\n n_dim=30\n root_name = 'time_pca_'\n # Column CustomerID is used into df_pca_reduce\n df_customers_timeFeature['CustomerID'] = df_customers_timeFeature.index\n \n df_customers_timeFeature, pca_timeFeature \\\n = p5_util.df_pca_reduce(df_customers_timeFeature, n_dim, root_name\\\n , p_is_scale=False, pca = self._pca_timeFeature)\n\n self.strprint(df_customers_timeFeature.shape)\n \n if self._pca_timeFeature is None:\n #----------------------------------------------------------------------\n # Data-model is in built process with part of data-set.\n #----------------------------------------------------------------------\n self._pca_timeFeature = pca_timeFeature\n p5_util.object_dump(df_customers_timeFeature\\\n , self._df_customers_timeFeature_fileName)\n else:\n #----------------------------------------------------------------------\n # Data-model is already built and this method is called \n # for a customer classification.\n #----------------------------------------------------------------------\n self._df_customers_timeFeature = df_customers_timeFeature.copy()\n return", "def create_training_data_file(list_of_word_lines, language):\r\n # To store each feature vector\r\n feature_vector = []\r\n\r\n # To store the entire dataset\r\n data = []\r\n\r\n for sentence in list_of_word_lines:\r\n\r\n # Contains Q\r\n CONTAINS_Q = 'N'\r\n\r\n # Contains Q\r\n CONTAINS_X = 'N'\r\n\r\n # Contains more than 1 vowel\r\n VOWELS = 'N'\r\n\r\n # Contains common dutch substrings\r\n DUTCH_SUBSTRING = 'N'\r\n\r\n # Contains is-was\r\n ISWAS = 'N'\r\n\r\n # Contains come\r\n COME = 'N'\r\n\r\n # Contains common english words\r\n COMMON_ENGLISH_WORDS = 'N'\r\n\r\n # Contains common dutch words\r\n DUTCH_WORDS = 'N'\r\n\r\n # Contains dutch ij\r\n IJ = 'N'\r\n\r\n # Contains and\r\n AND = 'N'\r\n\r\n # Contains they, he, she\r\n COLLECTIVES = 'N'\r\n\r\n for word in sentence:\r\n\r\n if re.match('[0-9]*', word):\r\n word = re.sub('[0-9]*', '', word)\r\n\r\n if re.match('[!?~`@#$%&)(_=+/.,\"»;«-]', word):\r\n word = re.sub('[!?~`@#$%&)(_=+/.,\"»;«-]', '', word)\r\n\r\n word = word.lower()\r\n if \"de\" == word or \"het\" == word or \"dat\" == word or \"en\" == word or \"een\" == word or \"voor\" == word or \"van\" == word or \"welke\" == word \\\r\n or \"te\" == word or \"hij\" == word or \"zij\" == word or \"op\" == word or \"ik\" == word or \"bij\" == word:\r\n DUTCH_WORDS = 'Y'\r\n\r\n if \"ij\" in word:\r\n IJ = 'Y'\r\n\r\n if \"the\" == word or \"but\" == word or \"for\" == word or \"which\" == word or \"that\" == word or \"and\" == word or \"not\" == word \\\r\n or \"to\" == word or \"in\" == word:\r\n COMMON_ENGLISH_WORDS = 'Y'\r\n\r\n if \"q\" in word:\r\n CONTAINS_Q = 'Y'\r\n\r\n if \"x\" in word:\r\n CONTAINS_X = 'Y'\r\n\r\n if \"aa\" in word or \"ee\" in word or \"ii\" in word or \"uu\" in word:\r\n VOWELS = 'Y'\r\n\r\n if \"ijk\" in word or \"sch\" in word or \"ijn\" in word:\r\n DUTCH_SUBSTRING = 'Y'\r\n\r\n if \"is\" == word or \"of\" == word or \"was\" == word or \"all\" in word:\r\n ISWAS = 'Y'\r\n\r\n if \"come\" == word or \"a\" == word:\r\n COME = 'Y'\r\n\r\n if \"and\" == word:\r\n AND = 'Y'\r\n\r\n if \"he\" == word or \"she\" == word or \"it\" == word or \"they\" == word:\r\n COLLECTIVES = 'Y'\r\n\r\n feature_vector.append([DUTCH_WORDS, IJ, COMMON_ENGLISH_WORDS, CONTAINS_Q, CONTAINS_X,\r\n VOWELS, DUTCH_SUBSTRING, ISWAS,\r\n COME, AND, COLLECTIVES, language])\r\n\r\n data.append(feature_vector)\r\n feature_vector = []\r\n return data", "def getFeatures(featureInput):\n featureList = []\n for defTerm,candidateSent in featureInput:\n tokens = nltk.word_tokenize(candidateSent)\n features = {}\n POScenter,POSleft,POSright = wordPOS(tokens,defTerm)\n features['Pos of first Article'] = posFirstArticle(tokens)\n## features['Num Punct Marks'] = numPunctuation(tokens)\n features['Subj words Predicate'] = subWordPerdicate(candidateSent,defTerm,tokens)\n features['Word before def term'] = wordBeforeDef(tokens,defTerm)\n features['POS centered word'] = POScenter\n features['POS left word'] = POSleft\n## features['POS right word'] = POSright \n featureList.append(features)\n return featureList", "def feature_vecs_NLP(train_pos, train_neg, test_pos, test_neg):\n # English stopwords from nltk\n stopwords = set(nltk.corpus.stopwords.words('english'))\n \n # Determine a list of words that will be used as features. \n # This list should have the following properties:\n # (1) Contains no stop words\n # (2) Is in at least 1% of the positive texts or 1% of the negative texts\n # (3) Is in at least twice as many postive texts as negative texts, or vice-versa.\n # YOUR CODE HERE\n\n pos_unique_words = []\n neg_unique_words = []\n intermediate_vec = []\n feature_vec = []\n\n for line in train_pos:\n line = list(set(line))\n for word in line:\n if word not in stopwords:\n pos_unique_words.append(word)\n\n for line in train_neg:\n line = list(set(line))\n for word in line:\n if word not in stopwords:\n neg_unique_words.append(word)\n\n\n pos_word_dict = collections.Counter(pos_unique_words)\n neg_word_dict = collections.Counter(neg_unique_words)\n\n unique_words = list(set(pos_word_dict.keys()).intersection(set(neg_word_dict.keys())))\n\n for word in unique_words:\n if(pos_word_dict[word] >= 0.01*len(train_pos) or neg_word_dict[word] >= 0.01*len(train_neg)):\n intermediate_vec.append(word)\n\n for word in intermediate_vec:\n if (int(pos_word_dict[word]) >= 2*int(neg_word_dict[word])or neg_word_dict[word] >= 2*pos_word_dict[word]):\n feature_vec.append(word)\n\n train_pos_vec = []\n train_neg_vec = []\n test_pos_vec = []\n test_neg_vec = []\n # Using the above words as features, construct binary vectors for each text in the training and test set.\n # These should be python lists containing 0 and 1 integers.\n # YOUR CODE HERE\n for line in train_pos:\n lst = []\n for word in feature_vec:\n if word in line:\n lst.append(1)\n else:\n lst.append(0)\n train_pos_vec.append(lst)\n\n for line in train_neg:\n lst = []\n for word in feature_vec:\n if word in line:\n lst.append(1)\n else:\n lst.append(0)\n train_neg_vec.append(lst)\n\n for line in test_pos:\n lst = []\n for word in feature_vec:\n if word in line:\n lst.append(1)\n else:\n lst.append(0)\n test_pos_vec.append(lst)\n\n for line in test_neg:\n lst = []\n for word in feature_vec:\n if word in line:\n lst.append(1)\n else:\n lst.append(0)\n test_neg_vec.append(lst)\n\n # Return the four feature vectors\n return train_pos_vec, train_neg_vec, test_pos_vec, test_neg_vec", "def generate_tpx_features():\n\n\tlabels = get_tpx_labels()\n\tlabels_abs = get_tpx_labels_abs()\n\tlabels_rel = get_tpx_labels_rel()\n\tlabels_prop = get_tpx_labels_prop()\n\tlabels_special = get_tpx_labels_special()\n\t\n\tlabels.append(\"num_words\")\n\n\t# read existing metadata\n\tmd_table = pd.DataFrame.from_csv(wdir + md_csv, header=0)\n\tidnos = md_table.idno\n\n\t# create new data frame\n\tht_fr = pd.DataFrame(columns=labels, index=idnos)\n\t \n\t# XPath expressions for TimeML requests\n\tnamespaces = {'tei':'http://www.tei-c.org/ns/1.0'}\n\n\txpaths = get_tpx_xpaths()\n\n\t# loop through files to get HeidelTime results, first step: absolute values\n\t# subsequent steps build on absolute values\n\tfor file in glob.glob(ht_inpath):\n\t\t\n\t\tidno = os.path.basename(file)[0:6]\n\t\txml = etree.parse(file)\n\t\t\n\t\tresult = 0\n\t\t# calculate absolute feature values\n\t\tfor label in labels_abs + labels_special:\n\t\t\t\n\t\t\tif label in xpaths:\n\t\t\t\t# apply xpaths if present\n\t\t\t\txpath = xpaths[label]\n\t\t\t\tresult = xml.xpath(xpath, namespaces=namespaces)\n\t\t\t\t\n\t\t\telse:\n\t\t\t\t# calculate features which cannot be determined directly with XPath\n\t\t\t\txpath_dates = \"//TIMEX3[@type='DATE']/@value\"\n\t\t\t\tdates = xml.xpath(xpath_dates, namespaces=namespaces)\n\t\t\t\t\n\t\t\t\t# temporal distance between mentioned years and publication year of the novel\n\t\t\t\tif (label == \"temp_dist\"):\n\t\t\t\t\t# get all date expressions with a year\n\t\t\t\t\tyears = []\n\t\t\t\t\tfor date in dates:\n\t\t\t\t\t\tif re.match(r\"^\\d{4}-\\d{2}-\\d{2}\", date): # only year: bad results\n\t\t\t\t\t\t\tyears.append(date.split(\"-\")[0])\n\t\t\t\t\t# get the median of the years mentioned in the text\n\t\t\t\t\tif years:\n\t\t\t\t\t\tyears = np.array(years).astype(np.float)\n\t\t\t\t\t\n\t\t\t\t\t\tmed = np.median(years) #median\n\t\t\t\t\t\t# get publication year\n\t\t\t\t\t\tpub_year = md_table.loc[idno,\"year\"]\n\t\t\t\t\t\t# calculate the difference\n\t\t\t\t\t\tresult = round(pub_year - med)\n\t\t\t\t\telse:\n\t\t\t\t\t\tresult = float(\"NaN\")\n\t\t\t\t\t\n\t\t\t\t# counts related to chapters\n\t\t\t\telif (label == \"tpx_date_any_chapter_first_abs\" or label == \"tpx_date_any_chapter_other_mean_abs\" or label == \"tpx_date_any_chapter_other_abs\"):\n\t\t\t\t\tdates_ch = []\n\t\t\t\t\txpaths_chapter = {\"tpx_date_any_chapter_first_abs\" : \"//TIMEX3[@type='DATE'][substring(ancestor::tei:div/@xml:id,(string-length(ancestor::tei:div/@xml:id) - 1),2) ='d1']/@value\",\n\t\t\t\t\t\t\t\t\t\t\"tpx_date_any_chapter_other_abs\" : \"//TIMEX3[@type='DATE'][substring(ancestor::tei:div/@xml:id,(string-length(ancestor::tei:div/@xml:id) - 1),2) !='d1']/@value\",\n\t\t\t\t\t\t\t\t\t\t\"tpx_date_any_chapter_other_mean_abs\" : \"//TIMEX3[@type='DATE'][substring(ancestor::tei:div/@xml:id,(string-length(ancestor::tei:div/@xml:id) - 1),2) !='d1']/@value\",\n\t\t\t\t\t\t\t\t\t\t\"chapters\" : \"//wrapper\"\n\t\t\t\t\t}\n\t\t\t\t\tchapter_dates = []\n\t\t\t\t\tchapter_dates = xml.xpath(xpaths_chapter[label], namespaces=namespaces)\n\t\t\t\t\t\n\t\t\t\t\t\n\t\t\t\t\t# filter: just \"any-dates\"\n\t\t\t\t\tfor date in chapter_dates:\n\t\t\t\t\t\tif re.match(r\"^\\d{2,4}\", date) or re.match(r\"^.{2,4}-\\d{2}\", date) or re.match(r\"^.{2,4}-.{2}-\\d{2}\", date):\n\t\t\t\t\t\t\tdates_ch.append(date)\n\t\t\t\t\t\n\t\t\t\t\tif (label == \"tpx_date_any_chapter_first_abs\" or label == \"tpx_date_any_chapter_other_abs\"):\n\t\t\t\t\t\t# return all the dates from the first / other chapters\n\t\t\t\t\t\tresult = len(dates_ch)\n\t\t\t\t\telif label == \"tpx_date_any_chapter_other_mean_abs\":\n\t\t\t\t\t\t# calculate the mean of the other chapters\n\t\t\t\t\t\tchapters = xml.xpath(xpaths_chapter[\"chapters\"])\n\t\t\t\t\t\t\n\t\t\t\t\t\tif len(chapters) <= 1:\n\t\t\t\t\t\t\traise ValueError(\"The novel \" + idno + \" has less than 2 chapters!\")\n\t\t\t\t\t\tresult = len(dates_ch) / (len(chapters) - 1)\n\t\t\t\t\t\n\t\t\t\t\n\t\t\t\t# remaining temporal expression features\t\n\t\t\t\telse:\n\t\t\t\t\tdate_counts = []\n\t\t\t\t\tfor date in dates:\n\t\t\t\t\t\tif (label == \"tpx_date_none_abs\"):\n\t\t\t\t\t\t\tif re.match(r\"^\\D+$\", date):\n\t\t\t\t\t\t\t\tdate_counts.append(date)\n\t\t\t\t\t\tif (label == \"tpx_date_year_abs\"):\n\t\t\t\t\t\t\t#if re.match(r\"^\\d{2,4}\", date): für alle Jahre geändert\n\t\t\t\t\t\t\tif re.match(r\"^\\d{4}\", date):\n\t\t\t\t\t\t\t\tdate_counts.append(date)\n\t\t\t\t\t\tif (label == \"tpx_date_year_month_abs\"):\n\t\t\t\t\t\t\tif re.match(r\"^\\d{4}-\\d{2}\", date):\n\t\t\t\t\t\t\t\tdate_counts.append(date)\n\t\t\t\t\t\tif (label == \"tpx_date_month_abs\"):\n\t\t\t\t\t\t\tif re.match(r\"^.{4}-\\d{2}\", date):\n\t\t\t\t\t\t\t\tdate_counts.append(date)\n\t\t\t\t\t\tif (label == \"tpx_date_day_abs\"):\n\t\t\t\t\t\t\tif re.match(r\"^.{4}-.{2}-\\d{2}\", date):\n\t\t\t\t\t\t\t\tdate_counts.append(date)\n\t\t\t\t\t\tif (label == \"tpx_date_month_day_abs\"):\n\t\t\t\t\t\t\tif re.match(r\"^.{4}-\\d{2}-\\d{2}\", date):\n\t\t\t\t\t\t\t\tdate_counts.append(date)\n\t\t\t\t\t\tif (label == \"tpx_date_any_abs\"):\n\t\t\t\t\t\t\tif re.match(r\"^\\d{4}\", date) or re.match(r\"^.{4}-\\d{2}\", date) or re.match(r\"^.{4}-.{2}-\\d{2}\", date):\n\t\t\t\t\t\t\t\tdate_counts.append(date)\n\t\t\t\t\t\tif (label == \"tpx_date_full_abs\"):\n\t\t\t\t\t\t\tif re.match(r\"^\\d{4}-\\d{2}-\\d{2}\", date):\n\t\t\t\t\t\t\t\tdate_counts.append(date)\n\t\t\t\t\n\t\t\t\t\tresult = len(date_counts)\n\t\t\t\t\t\n\t\t\t\n\t\t\t# check the results of XPath\n\t\t\t\"\"\"\n\t\t\tif math.isnan(result):\n\t\t\t\tresult = \"is not a number\"\n\t\t\t\"\"\"\n\t\t\t\n\t\t\t# Write the result into the data frame\n\t\t\tht_fr.loc[idno,label] = result\n\t\t\t\n\t\t\t\n\t# second step: relative values (relative to the total number of words in the text)\n\tfor file in glob.glob(ht_inpath):\n\t\t\n\t\tidno = os.path.basename(file)[0:6]\n\t\t\n\t\t# calculate total number of words in the text\n\t\tnum_words = 0\n\t\txml = etree.parse(file)\n\t\t# get XML snippets chapterwise\n\t\twrappers = xml.xpath(\"//wrapper//text()\")\n\t\tfor wrap in wrappers:\n\t\t\t\n\t\t\t# tokenize and count\n\t\t\twords = re.split(r\"[\\s\\n]+\", wrap)\n\t\t\tnum_words += len(words)\n\t\t\n\t\tht_fr.loc[idno,\"num_words\"] = num_words\n\t\t\n\t\t\n\t\tfor label in labels_rel:\n\t\t\t# set corresponding absolute value label\n\t\t\tlabel_abs = label[:-3] + \"abs\"\n\t\t\t\n\t\t\t# fetch absolute value\n\t\t\tabs_val = ht_fr.loc[idno,label_abs]\n\t\t\t\n\t\t\t# check data type\n\t\t\tif math.isnan(abs_val):\n\t\t\t\tresult = abs_val\n\t\t\telse:\n\t\t\t\t# calculate relative value\n\t\t\t\tresult = abs_val / num_words\n\t\t\t\n\t\t\t\n\t\t\t# Write the result into the data frame\n\t\t\tht_fr.loc[idno,label] = result\n\t\t\t\n\n\t# third step: calculate proportions\n\tfor file in glob.glob(ht_inpath):\n\t\t\n\t\tidno = os.path.basename(file)[0:6]\n\t\ttpx_all = ht_fr.loc[idno,\"tpx_all_abs\"]\n\t\ttpx_all_one = tpx_all / 100\n\t\t\n\t\tfor label in labels_prop:\n\t\t\t# set corresponding absolute value label\n\t\t\tlabel_abs = label[:-4] + \"abs\"\n\t\t\t\n\t\t\t# fetch absolute value\n\t\t\tabs_val = ht_fr.loc[idno,label_abs]\n\t\t\t\n\t\t\t# check data type\n\t\t\tif math.isnan(abs_val):\n\t\t\t\tresult = abs_val\n\t\t\telse:\n\t\t\t\t# calculate proportion\n\t\t\t\tresult = abs_val / tpx_all_one\n\t\t\t\n\t\t\t# Write the result into the data frame\n\t\t\tht_fr.loc[idno,label] = result\n\t\t\n\t# für FJR: absolute Werte weglassen\n\tfor label in labels_abs:\n\t\tht_fr = ht_fr.drop(label, axis=1)\n\tht_fr = ht_fr.drop(\"temp_dist\", axis=1)\n\tht_fr = ht_fr.drop(\"num_words\", axis=1)\n\t\t\n\tht_fr.to_csv(wdir + \"tpx-corpus-counts.csv\", sep=\",\", header=True)\n\n\tprint(\"Done: generate tpx features\")", "def createMLData(self):\n\n if self._verbose:\n print('\\nCreate ML Data')\n\n # Minimum required number of input data for ML training under the\n # current implemented setup.\n if len(self._input_data.index) < 60:\n raise NotEnoughDataForMachineLearningTraining(\n len(self._input_data.index), 60)\n\n # Add features column\n for indicator, feature in zip(self._indicators_set, self._ti_features):\n feature_data = indicator.getTiData()\n # Because there are some inf values\n feature_data = feature_data.replace([np.inf, -np.inf], np.nan)\n if self._verbose:\n print('- adding feature: ', feature['ti'], ', columns: ',\n str([feature['ti'] + '_' + c\n for c in feature_data.columns]), sep='')\n\n for c in feature_data.columns:\n self._ml_data[feature['ti'] + '_' + c] = feature_data[[c]]\n\n if self._include_close_feature:\n self._ml_data['close'] = self._input_data[['close']]\n\n if self._include_volume_feature:\n self._ml_data['volume'] = self._input_data[['volume']]\n\n # Add label column\n self._ml_data['label'] = np.roll(\n a=self._input_data['close'].values, shift=-1, axis=0\n ) - self._input_data['close'].values\n\n self._ml_data.loc[\n self._ml_data.label > 0, 'label'] = ML_CLASSES['UP']\n self._ml_data.loc[\n self._ml_data.label <= 0, 'label'] = ML_CLASSES['DOWN']\n\n self._ml_data['label'] = self._ml_data['label'].apply(lambda x: int(x))\n\n # Remove last row, since it cannot include a label. Future value is not\n # known\n self._ml_data = self._ml_data.iloc[:-1, :]\n\n # Fill missing values\n self._ml_data = fillMissingValues(input_data=self._ml_data)\n\n return self._ml_data", "def enhance_metadata(metadata, features='all'):\n\n # available options\n ortographic_features = ['w_length','n_vowels','n_consonants']\n lexical_features = ['uni_freq', 'bi_freq', 'func_word','count']\n position_features = ['position','position_end','is_first_word','is_last_word']\n\n # make list of features\n if features == 'all': features = ortographic_features +lexical_features + position_features \n\n # use ws clean to lower case\n words = [word.lower() for word in metadata['word'].values]\n\n # itereate features and fill metadata\n for feature in features:\n # ORTOGRAPHIC ##############################\n if feature == 'w_length': \n metadata[feature] = w_length(words)\n if feature == 'n_consonants':\n metadata[feature] = n_consonants(words)\n if feature == 'n_vowels':\n metadata[feature] = n_vowels(words)\n\n # LEXICAL ###################################\n if feature == 'uni_freq':\n metadata[feature] = unigram(words)\n if feature == 'bi_freq':\n metadata[feature] = bigram(words)\n if feature == 'func_word':\n metadata[feature] = function_word(words)\n if feature == 'count':\n metadata[feature] = count(words)\n\n # POSITION ###################################\n if feature == 'position':\n metadata[feature] = position(words)\n if feature == 'position_end':\n metadata[feature] = position_end(words)\n if feature == 'is_first_word':\n metadata[feature] = first_word(words)\n if feature == 'is_last_word':\n metadata[feature] = last_word(words)\n\n return metadata", "def word2features(self,sent, i):\n word = sent[i][0]\n #postag = sent[i][1]\n\n features = {\n 'bias': 1.0,\n 'word.lower()': word.lower(),\n 'word.isupper()': word.isupper(),\n 'word.istitle()': word.istitle(),\n 'word.isdigit()': word.isdigit(),\n 'word.shape()':self.shape(word),\n 'word.isalnum()':word.isalnum(),\n 'word.isalpha()':word.isalpha(),\n # 'postag': postag,\n # 'postag[:2]': postag[:2],\n }\n if i > 0:\n word1 = sent[i - 1][0]\n #postag1 = sent[i - 1][1]\n features.update({\n '-1:word.lower()': word1.lower(),\n '-1:word.istitle()': word1.istitle(),\n '-1:word.isupper()': word1.isupper(),\n '-1:word.isdigit()': word1.isdigit(),\n '-1:word.isalnum()':word1.isalnum(),\n '-1:word.isalpha()':word1.isalpha(),\n # '-1:postag': postag1,\n # '-1:postag[:2]': postag1[:2],\n })\n else:\n features['BOS'] = True\n\n if i > 1:\n word2 = sent[i - 2][0]\n #postag2 = sent[i - 2][1]\n features.update({\n '-2:word.lower()': word2.lower(),\n '-2:word.istitle()': word2.istitle(),\n '-2:word.isupper()': word2.isupper(),\n '-2:word.isdigit()': word2.isdigit(),\n '-2:word.isalnum()': word2.isalnum(),\n '-2:word.isalpha()': word2.isalpha(),\n # '-2:postag': postag2,\n # '-2:postag[:2]': postag2[:2],\n })\n else:\n features['BOS1'] = True\n if i > 2:\n word3 = sent[i - 3][0]\n #postag3 = sent[i - 3][1]\n features.update({\n '-3:word.lower()': word3.lower(),\n '-3:word.istitle()': word3.istitle(),\n '-3:word.isupper()': word3.isupper(),\n '-3:word.isdigit()': word3.isdigit(),\n '-3:word.isalnum()': word3.isalnum(),\n '-3:word.isalpha()': word3.isalpha(),\n # '-3:postag': postag3,\n # '-3:postag[:2]': postag3[:2],\n })\n else:\n features['BOS2'] = True\n\n if i > 3:\n word4 = sent[i - 4][0]\n #postag4 = sent[i - 4][1]\n features.update({\n '-4:word.lower()': word4.lower(),\n '-4:word.istitle()': word4.istitle(),\n '-4:word.isupper()': word4.isupper(),\n '-4:word.isdigit()': word4.isdigit(),\n '-4:word.isalnum()': word4.isalnum(),\n '-4:word.isalpha()': word4.isalpha(),\n # '-4:postag': postag4,\n # '-4:postag[:2]': postag4[:2],\n })\n else:\n features['BOS2'] = True\n\n if i < len(sent) - 1:\n word1 = sent[i + 1][0]\n features.update({\n '+1:word.lower()': word1.lower(),\n '+1:word.istitle()': word1.istitle(),\n '+1:word.isupper()': word1.isupper(),\n '+1:word.isdigit()': word1.isdigit(),\n '+1:word.isalnum()': word1.isalnum(),\n '+1:word.isalpha()': word1.isalpha(),\n # '+1:postag': postag1,\n # '+1:postag[:2]': postag1[:2],\n })\n else:\n features['EOS'] = True\n if i < len(sent) - 2:\n word12 = sent[i + 2][0]\n #postag12 = sent[i + 2][1]\n features.update({\n '+2:word.lower()': word12.lower(),\n '+2:word.istitle()': word12.istitle(),\n '+2:word.isupper()': word12.isupper(),\n '+2:word.isdigit()': word12.isdigit(),\n '+2:word.isalnum()': word12.isalnum(),\n '+2:word.isalpha()': word12.isalpha(),\n # '+2:postag': postag12,\n # '+2:postag[:2]': postag12[:2],\n })\n else:\n features['EOS2'] = True\n if i < len(sent) - 3:\n word13 = sent[i + 3][0]\n #postag13 = sent[i + 3][1]\n features.update({\n '+3:word.lower()': word13.lower(),\n '+3:word.istitle()': word13.istitle(),\n '+3:word.isupper()': word13.isupper(),\n '+3:word.isdigit()': word13.isdigit(),\n '+3:word.isalnum()': word13.isalnum(),\n '+3:word.isalpha()': word13.isalpha(),\n # '+3:postag': postag13,\n # '+3:postag[:2]': postag13[:2],\n })\n else:\n features['EOS2'] = True\n if i < len(sent) - 4:\n word14 = sent[i + 4][0]\n #postag14 = sent[i + 4][1]\n features.update({\n '+4:word.lower()': word14.lower(),\n '+4:word.istitle()': word14.istitle(),\n '+4:word.isupper()': word14.isupper(),\n '+4:word.isdigit()': word14.isdigit(),\n '+4:word.isalnum()': word14.isalnum(),\n '+4:word.isalpha()': word14.isalpha(),\n # '+4:postag': postag14,\n # '+4:postag[:2]': postag14[:2],\n })\n else:\n features['EOS2'] = True\n return features", "def make_training_data(feature_funcs, annotations):\n extractor = FeatureExtractor(feature_funcs)\n \n training_instances = []\n \n for sent_str, anns in annotations:\n tree = parser.raw_parse(sent_str).next()\n tree = convert_brackets(tree)\n # print tree\n # some preprocessing, align the positions and \n # also use the sentence string given the parse tree\n anns = align_annotation_with_sentence(sent_str, ' '.join(tree.leaves()), anns)\n sent_str = ' '.join(tree.leaves())\n for ann in anns:\n frame_name = ann.frame_name\n start, end = ann.target.start, ann.target.end\n frame = Frame(start, end, frame_name)\n frame_node = find_node_by_positions(tree, start, end)\n\n # TODO: bug here\n if frame_node is None: \n sys.stderr.write(\"Warning: %r does not correspond to any tree node in sentence \\\"%s\\\"\\nSkip it\\n \" %(frame, sent_str))\n continue\n \n for node, (node_start_pos, node_end_pos) in collect_nodes(tree):\n node_pos = NodePosition(node_start_pos, node_end_pos)\n context = Context(sent_str, tree, frame, node_pos)\n\n feature_values = extractor.extract(node, context)\n \n # try to see the it has some semantic role\n found_matching_node = False\n for fe in ann.FE:\n other_node = find_node_by_positions(tree, fe.start, fe.end)\n if node == other_node:\n training_instances.append((feature_values, fe.name))\n found_matching_node = True\n break\n\n # semantic role => NULL\n if not found_matching_node:\n training_instances.append((feature_values, 'NULL'))\n\n return training_instances", "def build_matrix(file, feature_mode):\n\n nlp = spacy.load('de_core_news_sm')\n\n conn = sql.connect(file)\n\n sql_select = \"\"\"SELECT COMP, ISCOMP, SENTENCE FROM sentences WHERE ISCOMP!=-1\"\"\"\n\n c = conn.cursor()\n c.execute(sql_select)\n\n rows = c.fetchall()\n\n nltk_data = list()\n\n for r in rows:\n comp = r[0]\n label = r[1]\n sentence = r[2]\n\n sentence = sentence.replace('<comp>', '')\n sentence = sentence.replace('</comp>', '')\n doc = nlp(sentence)\n\n features = process_row(doc, comp, feature_mode)\n\n nltk_tuple = (features, label, sentence)\n nltk_data.append(nltk_tuple)\n\n return nltk_data", "def text_features_df(spark):\n # Replaces formatted text that has already been processed\n FILLER = ''\n # Parser helper column\n COLNAME = 'processed_text'\n COL = col(COLNAME)\n \n # Data loading\n post_history_df = spark.read.parquet(\"/user/***REMOVED***/StackOverflow/PostHistory.parquet\") \\\n .select(['_PostId', '_Text', '_PostHistoryTypeId']) \\\n .filter(col('_PostHistoryTypeId') == 2) \\\n .drop('_PostHistoryTypeId')\n post_df = spark.read.parquet('/user/***REMOVED***/StackOverflow/Posts.parquet') \\\n .select(['_Id', '_PostTypeId']) \\\n .filter(col('_PostTypeId') == 1) \\\n .drop(\"_PostTypeId\")\n df = post_history_df.join(post_df, post_df['_Id'] == post_history_df['_PostId'])\n\n # Remove code snippets from the Markdown formatted text\n df = df.withColumn(COLNAME, regexp_replace(col('_Text'), regex.CODE_BLOCK_RE, FILLER)) \\\n .withColumn(COLNAME, regexp_replace(COL, regex.HTML_BLOCK_RE, FILLER)) \\\n .withColumn(COLNAME, regexp_replace(COL, regex.FENCED_CODE_RE, FILLER)) \\\n .withColumn(COLNAME, regexp_replace(COL, regex.ESCAPE_RE, FILLER)) \\\n .withColumn(COLNAME, regexp_replace(COL, regex.HTML_RE, FILLER))\n\n # Calculate features\n df = df.withColumn('#characters', length(COL)) \\\n .withColumn('#punctuation_characters', size(split(COL, r'[-\\[\\]{}()*+?.,\\\\^$|#]')) - 1) \\\n .withColumn('punctuation_ratio', col('#punctuation_characters') / col('#characters')) \\\n .withColumn('#lines', size(split(COL, r'\\n'))) \\\n .withColumn('average_line_length', col('#characters') / col('#lines')) \\\n .withColumn('#words', size(split(COL, r'\\s+'))) \\\n .withColumn('average_word_length', col('#characters') / col('#words'))\n\n # Remove unnecessary columns, including parser helper column\n df = df.drop('_Text', '_PostHistoryTypeId', '_PostId', COLNAME)\n return df", "def prepare_length_features(text_counts, custom_vec, length_processed_flora_data_frame):\n vocab = custom_vec.get_feature_names() # https://stackoverflow.com/questions/39121104/how-to-add-another-feature\n # -length-of-text-to-current-bag-of-words-classificati\n\n length_model_data_frame = pd.DataFrame(text_counts.toarray(), columns=vocab)\n length_model_data_frame = pd.concat(\n [length_model_data_frame, length_processed_flora_data_frame['length'].reset_index(drop=True)], axis=1)\n\n length_model_data_frame_values = length_model_data_frame.values.astype(np.float64)\n length_model_sparse = sparse.csr_matrix(length_model_data_frame_values)\n\n assert length_model_sparse.shape > text_counts.shape, 'Length model should have one more column of data than BOW ' \\\n 'model '\n return length_model_sparse", "def data_mining_features(index,input_string_x1,input_string_x2,vocab_word2index,word_vec_fasttext_dict,word_vec_word2vec_dict,tfidf_dict,n_gram=8):\r\n input_string_x1=input_string_x1.decode(\"utf-8\")\r\n input_string_x2 = input_string_x2.decode(\"utf-8\")\r\n #1. get blue score vector\r\n feature_list=[]\r\n #get blue score with n-gram\r\n for i in range(n_gram):\r\n x1_list=split_string_as_list_by_ngram(input_string_x1,i+1)\r\n x2_list = split_string_as_list_by_ngram(input_string_x2, i + 1)\r\n blue_score_i_1 = compute_blue_ngram(x1_list,x2_list)\r\n blue_score_i_2 = compute_blue_ngram(x2_list,x1_list)\r\n feature_list.append(blue_score_i_1)\r\n feature_list.append(blue_score_i_2)\r\n\r\n #2. get length of questions, difference of length\r\n length1=float(len(input_string_x1))\r\n length2=float(len(input_string_x2))\r\n length_diff=(float(abs(length1-length2)))/((length1+length2)/2.0)\r\n feature_list.append(length_diff)\r\n\r\n #3. how many words are same, how many words are unique\r\n sentence_diff_overlap_features_list=get_sentence_diff_overlap_pert(index,input_string_x1,input_string_x2)\r\n feature_list.extend(sentence_diff_overlap_features_list)\r\n\r\n #4. question 1,2 start with how/why/when\r\n #how_why_feature_list=get_special_start_token(input_string_x1,input_string_x2,special_start_token)\r\n #print(\"how_why_feature_list:\",how_why_feature_list)\r\n #feature_list.extend(how_why_feature_list)\r\n\r\n #5.edit distance\r\n edit_distance=float(edit(input_string_x1, input_string_x2))/30.0\r\n feature_list.append(edit_distance)\r\n\r\n #6.cos distance from sentence embedding\r\n x1_list=token_string_as_list(input_string_x1, tokenize_style='word')\r\n x2_list = token_string_as_list(input_string_x2, tokenize_style='word')\r\n distance_list_fasttext = cos_distance_bag_tfidf(x1_list, x2_list, word_vec_fasttext_dict, tfidf_dict)\r\n distance_list_word2vec = cos_distance_bag_tfidf(x1_list, x2_list, word_vec_word2vec_dict, tfidf_dict)\r\n #distance_list2 = cos_distance_bag_tfidf(x1_list, x2_list, word_vec_fasttext_dict, tfidf_dict,tfidf_flag=False)\r\n #sentence_diffence=np.abs(np.subtract(sentence_vec_1,sentence_vec_2))\r\n #sentence_multiply=np.multiply(sentence_vec_1,sentence_vec_2)\r\n feature_list.extend(distance_list_fasttext)\r\n feature_list.extend(distance_list_word2vec)\r\n #feature_list.extend(list(sentence_diffence))\r\n #feature_list.extend(list(sentence_multiply))\r\n return feature_list", "def pre_process_dataset(self):\n sentences = []\n idx = 1\n # Iterates of dataframe to collect sentences and labels\n for index, row in self.df.iterrows():\n # Normalizing and separate words of each sentence\n norm_sentence = self.norm_text(row['comment_text'])\n word_sentences = re.sub(\"[^\\w]\", \" \", norm_sentence).split()\n sentences.append(word_sentences)\n # Creating a word dictionary\n for word in word_sentences:\n if word not in self.word_2_idx:\n self.word_2_idx[word] = idx\n idx += 1\n # Getting all labels and creates a one-hot vector\n row_label = row[['toxic', 'severe_toxic', 'obscene', 'threat', 'insult', 'identity_hate']].values\n self.labels.append(row_label)\n\n # Collect word indexes from prepared word dictionary\n for words_sentence in sentences:\n self.input_data.append([self.word_2_idx[w] for w in words_sentence])", "def text_to_corpus(text, accented_chars=True,\n convert_num=True, extra_whitespace=True, \n lemmatization=True, lowercase=True, punctuations=True,\n remove_html=True, remove_num=True, special_chars=True, \n stop_words=True): \n \n\n \"\"\"preprocess text with default option set to true for all steps\"\"\"\n if remove_html == True: #remove html tags\n text = strip_html_tags(text)\n if extra_whitespace == True: #remove extra whitespaces\n text = remove_whitespace(text)\n if accented_chars == True: #remove accented characters\n text = remove_accented_chars(text)\n if lowercase == True: #convert all characters to lowercase\n text = text.lower()\n \n # add a period to the end of the text:\n if len(text) > 0 and text[-1] != '.':\n text += '.'\n \n doc = nlp(text) #tokenise text \n clean_text = []\n \n for token in doc:\n \n flag = True\n edit = token.text\n # print(\"Word: \", edit, \" Type: \", token.pos_)\n \n # remove stop words\n if stop_words == True and token.is_stop and token.pos_ != 'NUM': \n flag = False\n \n # remove punctuations\n if punctuations == True and (token.pos_ == 'PUNCT' and not token.tag_ == '.') and flag == True: \n flag = False\n \n # remove 'X' characters:\n if token.pos_ == 'X':\n flag = False\n \n # remove special characters\n if special_chars == True and token.pos_ == 'SYM' and flag == True: \n flag = False\n \n # remove numbers\n if remove_num == True and (token.pos_ == 'NUM' or token.text.isnumeric()) \\\n and flag == True:\n flag = False\n \n # convert number words to numeric numbers\n if convert_num == True and token.pos_ == 'NUM' and flag == True:\n edit = w2n.word_to_num(token.text)\n \n # convert tokens to base form\n elif lemmatization == True and token.lemma_ != \"-PRON-\" and flag == True:\n edit = token.lemma_\n \n # convert all closing punctuation ('.', '!', '?', '...' to periods)\n if token.tag_ == '.' and flag == True:\n clean_text.append('.')\n \n # add text lemmas to the clean text:\n elif edit != \"\" and flag == True:\n clean_text.append(edit)\n \n return ' '.join(clean_text)", "def create_pos_dep_lemma(self, dataframe, col):\n pos_dict = {}\n dep_dict = {}\n lem_dict = {}\n p = []\n d = []\n l = []\n for i, val in enumerate(dataframe[col]):\n s = sp(''.join(val).replace(',', ''))\n for word in s:\n p.append(word.pos_)\n d.append(word.dep_)\n l.append(word.lemma_)\n pos_dict[i] = ', '.join(str(s) for s in p)\n dep_dict[i] = ', '.join(str(s) for s in d)\n lem_dict[i] = ', '.join(str(s) for s in l)\n p = []\n d = []\n l = []\n \n\n colname1 = col + '_pos' if col in ['e1', 'e2'] else 'pos'\n colname2 = col + '_dep' if col in ['e1', 'e2'] else 'dep'\n colname3 = col + '_lem' if col in ['e1', 'e2'] else 'lem'\n pos_dataframe = self.create_dataframe(pos_dict, [colname1])\n dep_dataframe = self.create_dataframe(dep_dict, [colname2])\n lem_dataframe = self.create_dataframe(lem_dict, [colname3])\n\n dataframe[colname1] = pos_dataframe[colname1]\n dataframe[colname2] = dep_dataframe[colname2]\n dataframe[colname3] = lem_dataframe[colname3]\n return dataframe", "def transform(self, X, y=None):\n if not isinstance(X, pd.DataFrame):\n X = pd.DataFrame(X)\n if self._features is None or len(self._features) == 0:\n return X\n\n text_columns = self._get_text_columns(X)\n es = self._make_entity_set(X, text_columns)\n X_nlp_primitives = self._ft.calculate_feature_matrix(features=self._features, entityset=es)\n if X_nlp_primitives.isnull().any().any():\n X_nlp_primitives.fillna(0, inplace=True)\n\n X_lsa = self._lsa.transform(X[text_columns])\n\n return pd.concat([X.drop(text_columns, axis=1), X_nlp_primitives, X_lsa], axis=1)", "def parse_tree_features(df):\n \n nlp = spacy.load(SPACY_MODEL, disable=['ner'])\n nlp.add_pipe(BeneparComponent(\"benepar_en_small\"))\n \n # parse text\n df['B_Tokens'] = df['Text'].apply(lambda x: nlp(x))\n \n # get features\n df['NP_per_sent'], df['VP_per_sent'], df['PP_per_sent'], \\\n df['SBAR_per_sent'], df['SBARQ_per_sent'], df['avg_NP_size'], \\\n df['avg_VP_size'], df['avg_PP_size'], df['avg_parse_tree'] = zip(*df['B_Tokens'].map(_get_parse_tree_features))\n \n # remove B_Tokens\n df.drop(columns=[\"B_Tokens\"], inplace=True)\n \n return df", "def extract_features(self, docs_train, docs_test, word_ngram_range=(1, 3), dim_reduce=False):\n\n\t\t# Build a vectorizer that splits strings into sequences of i to j words\n\t\tword_vectorizer = TfidfVectorizer(preprocessor=self.preprocess_tweet,\n\t\t\t\t\t\t\t\t\t analyzer='word', ngram_range=word_ngram_range,\n\t\t\t\t\t\t\t\t\t min_df=2, use_idf=True, sublinear_tf=True)\n\t\t# Build a vectorizer that splits strings into sequences of 3 to 5 characters\n\t\tchar_vectorizer = TfidfVectorizer(preprocessor=self.preprocess_tweet,\n\t\t\t\t\t\t\t\t\t analyzer='char', ngram_range=(3, 5),\n\t\t\t\t\t\t\t\t\t min_df=2, use_idf=True, sublinear_tf=True)\n\n\t\t# Build a transformer (vectorizer) pipeline using the previous analyzers\n\t\t# *FeatureUnion* concatenates results of multiple transformer objects\n\t\tself.ngrams_vectorizer = Pipeline([('feats', FeatureUnion([('word_ngram', word_vectorizer),\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t ('char_ngram', char_vectorizer),\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t ])),\n\t\t\t\t\t\t\t\t # ('clff', LinearSVC(random_state=42))\n\t\t\t\t\t\t\t\t ])\n\n\t\t# Fit (learn vocabulary and IDF) and transform (transform documents to the TF-IDF matrix) the training set\n\t\tX_train_ngrams_tfidf = self.ngrams_vectorizer.fit_transform(docs_train)\n\t\t'''\n\t\t↳ Check the following attributes of each of the transformers (analyzers)—*word_vectorizer* and *char_vectorizer*:\n\t\tvocabulary_ : dict. A mapping of terms to feature indices.\n\t\tstop_words_ : set. Terms that were ignored\n\t\t'''\n\t\tprint(\"%.2f seconds: Finished fit_transforming the training dataset\" % time.process_time())\n\t\tprint(\"Training set word & character ngrams .shape = \", X_train_ngrams_tfidf.shape)\n\n\t\tfeature_names_ngrams = [word_vectorizer.vocabulary_, char_vectorizer.vocabulary_]\n\n\t\t'''\n\t\tExtract the features of the test set (transform test documents to the TF-IDF matrix)\n\t\tOnly transform is called on the transformer (vectorizer), because it has already been fit to the training set.\n\t\t'''\n\t\tX_test_ngrams_tfidf = self.ngrams_vectorizer.transform(docs_test)\n\t\tprint(\"%.2f seconds: Finished transforming the test dataset\" % time.process_time())\n\t\tprint(\"Test set word & character ngrams .shape = \", X_test_ngrams_tfidf.shape)\n\n\t\t# • Dimensionality reduction using truncated SVD (aka LSA)\n\t\tif dim_reduce:\n\t\t\t# Build a truncated SVD (LSA) transformer object\n\t\t\tself.svd_reducer = TruncatedSVD(n_components=300, random_state=43)\n\t\t\t# Fit the LSI model and perform dimensionality reduction\n\t\t\tX_train_ngrams_tfidf_reduced = self.svd_reducer.fit_transform(X_train_ngrams_tfidf)\n\t\t\tprint(\"@ %.2f seconds: Finished dimensionality reduction (LSA) on the training dataset\", time.process_time())\n\t\t\tX_test_ngrams_tfidf_reduced = self.svd_reducer.transform(X_test_ngrams_tfidf)\n\t\t\tprint(\"@ %.2f seconds: Finished dimensionality reduction (LSA) on the test dataset\", time.process_time())\n\n\t\t\tX_train = X_train_ngrams_tfidf_reduced\n\t\t\tX_test = X_test_ngrams_tfidf_reduced\n\t\telse:\n\t\t\tX_train = X_train_ngrams_tfidf\n\t\t\tX_test = X_test_ngrams_tfidf\n\n\t\treturn X_train, X_test, feature_names_ngrams", "def extract_features(tlc):\n text = clean_text(tlc['body'])\n fields = dict()\n # add features here #\n fields['Top_comment_word_count'] = len(text.split(' '))\n fields['Top_comment_text'] = text\n\n # Extract time-based features\n def get_day_of_week(text):\n return datetime.datetime.strptime(text, '%Y-%m-%d %H:%M:%S').weekday() + 1\n\n def get_day_of_month(text):\n return datetime.datetime.strptime(text, '%Y-%m-%d %H:%M:%S').day\n\n def get_time_of_day(text):\n return datetime.datetime.strptime(text, '%Y-%m-%d %H:%M:%S').hour\n time_local = time.localtime(tlc['created_utc'])\n time_local = time.strftime(\"%Y-%m-%d %H:%M:%S\", time_local)\n fields['Top_comment_day'] = get_day_of_month(time_local)\n fields['Top_comment_day_of_week'] = get_day_of_week(time_local)\n fields['Top_comment_hour'] = get_time_of_day(time_local)\n\n # Extract gender value\n gp = GenderPerformr()\n probs, _ = gp.predict(tlc['author'])\n # Rescale it from [0,1] to [-1,1]\n fields['Top_comment_author_gender_value'] = 2 * probs - 1\n\n # Extract percentage of mispellings\n check = SpellChecker(\"en_US\")\n tokenizer = get_tokenizer(\"en_US\")\n # Prevent the denominator from 0\n def weird_division(n, d):\n return n / d if d else 0\n\n def get_mispellings_percentage(text):\n mispelling_count = 0\n total_count = 0\n if text == 'nan':\n return total_count\n else:\n check.set_text(text)\n for err in check:\n mispelling_count = mispelling_count + 1\n for w in tokenizer(text):\n total_count = total_count + 1\n value = weird_division(mispelling_count, total_count)\n return value\n fields['Top_comment_mispellings'] = get_mispellings_percentage(text)\n\n # Get politeness, agreement, support scores, and rescale them from [1,5] to [-1,1]\n ar = Agreementr()\n pr = Politenessr()\n sr = Supportr()\n fields['Top_comment_agreement_value'] = 0.5*float(ar.predict([text]))-1.5\n fields['Top_comment_politeness_value'] = 0.5*float(pr.predict([text]))-1.5\n fields['Top_comment_support_value'] = 0.5*float(sr.predict([text]))-1.5\n\n # Get toxicity scores\n KEY = \"yourkey.txt\" # os.getenv(\"GOOGLE_API_KEY\")\n service = discovery.build('commentanalyzer', 'v1alpha1', developerKey=KEY)\n\n def get_results(request_id, response, exception):\n toxicity_scores.append((request_id, response))\n\n toxicity_scores = []\n count = 0\n batch = service.new_batch_http_request(callback=get_results)\n analyze_request = {\n 'comment': {'text': text},\n \"requestedAttributes\": {\n \"TOXICITY\": {},\n \"SEVERE_TOXICITY\": {},\n \"ATTACK_ON_COMMENTER\": {}\n }\n }\n batch.add(service.comments().analyze(body=analyze_request), request_id=str(count))\n batch.execute()\n toxic_score = toxicity_scores[0][1]['attributeScores']['TOXICITY']['summaryScore']['value']\n attack_score = toxicity_scores[0][1]['attributeScores']['ATTACK_ON_COMMENTER']['summaryScore']['value']\n if toxic_score > 0.5:\n fields['Top_comment_untuned_toxicity'] = 1\n else:\n fields['Top_comment_untuned_toxicity'] = 0\n if toxic_score > 0.8 and attack_score > 0.5:\n fields['Top_comment_tuned_toxicity'] = 1\n else:\n fields['Top_comment_tuned_toxicity'] = 0\n # end of feature extractions #\n return fields", "def generateFeatures(self, data):\n pass", "def extract_features(docs_train, docs_test, perform_dimensionality_reduction):\n word_ngram_range = (1, 4)\n char_ngram_range = (2, 5)\n\n '''\n Build an n grams vectorizer with word_n_gram_range and char_n_gram_range\n '''\n\n ngrams_vectorizer = create_n_grams_vectorizer(\n word_ngram_range, char_ngram_range)\n\n # use the n_gram vectorizer to form the train and test dataset\n # it will take a lot of time... i think\n X_train = ngrams_vectorizer.fit_transform(docs_train)\n X_test = ngrams_vectorizer.transform(docs_test)\n print(\"Performed fitting of data\")\n\n ############ dimensionality reduction ################\n\n if(perform_dimensionality_reduction == True):\n X_train, X_test = perform_dimensionality_reduction(X_train, X_test)\n\n # print(docs_train[0])\n return X_train, X_test", "def construct_features(seq_df, paaclamb=6, paacw=0.5):\n seq_df = insert_aac(seq_df)\n seq_df = insert_ngrams(seq_df, n=2)\n seq_df = insert_cksaagp(seq_df, gap=3) # As the maximum motif length = 5.\n seq_df = insert_paac(seq_df, lamb=paaclamb, w=paacw)\n seq_df = insert_phycs(seq_df)\n\n return seq_df", "def preprocessed(self, data_frame):\n dp = DocumentPreprocessor()\n word_tokens_document = []\n\n for row_index, row in data_frame.iterrows():\n processed_document = dp.remove_punctuation_and_multi_spaces_document(\n row[1])\n processed_document = dp.remove_numbers(processed_document)\n word_tokens_document.append(\n dp.word_length_filter(processed_document, 3))\n data_frame['word_tokens_document'] = word_tokens_document\n return data_frame", "def data_transform(self, df) :\n\n #-------------------------------------------------------------------------\n # Copy of given dataframe to be transformed\n #-------------------------------------------------------------------------\n self.df_invoice_line = df\n \n #-------------------------------------------------------------------------\n # Features issued from InvoiceDate are created\n #-------------------------------------------------------------------------\n if self.is_transform_timeFeature is True:\n self.strprint(\"\\n*** Time features transformation ***\")\n self.data_transform_timeFeature()\n\n #-------------------------------------------------------------------------\n # RFM is computed and encoded\n #-------------------------------------------------------------------------\n if self.is_transform_rfm is True:\n self.strprint(\"\\n*** RFM transformation ***\")\n self.data_transform_rfm()\n\n #-------------------------------------------------------------------------\n # NLP features issued from Description are created\n #-------------------------------------------------------------------------\n if self.is_transform_nlp is True:\n self.strprint(\"\\n*** NLP transformation ***\")\n self.data_transform_nlp()\n \n return self.df_invoice_line", "def preprocess_pipeline(X_df, y_df):\n \n print(\"Context Numerical Analysis\")\n # Update X_df with number variable analysis\n numerical_features(X_df)\n \n print(\"Hash Vectorizing...\")\n # Tokenize the Context column into a sparse matrix\n vectorizer = HashingVectorizer(tokenizer = spacy_tokenizer, ngram_range=(1,1))\n# vectorizer = CountVectorizer(tokenizer = spacy_tokenizer, ngram_range=(1,1))\n sparse = vectorizer.fit_transform(X_df['Context']) \n \n print(\"Encoding cities...\")\n #encode the \"cities\" feature\n city_frequency = encode_cities_mean_frequency(X_df)\n \n print(\"Transforming sparse matrix...\")\n # transform sparse CV matrix such that each dimension is given its own column\n # drop context and join X_df with sparse (dataframe)\n X_df = X_df.join(pd.DataFrame(sparse.todense())).drop(['Context'], axis=1)\n \n print(\"Encoding the labels...\")\n #encode the labels\n encode_label(y_df)\n \n return X_df, y_df, city_frequency", "def features_to_db(db, training_dir, test_dir, label_file):\n logger.debug(\"Getting Labels\")\n labels = get_labels(label_file)\n logger.debug(\"Extracting training features\")\n train_features = extract_from_dir(training_dir)\n logger.debug(\"Saving training features in db\")\n data_source = []\n with db.atomic():\n for name, features in train_features:\n features[\"name\"] = name + \"_train\"\n features[\"label\"] = labels[name]\n data_source.append(features)\n if len(data_source) == 1000:\n Feature.insert_many(data_source).execute()\n data_source = []\n if len(data_source):\n Feature.insert_many(data_source).execute()\n\n logger.debug(\"Extracting test features\")\n test_features = extract_from_dir(test_dir)\n logger.debug(\"Saving test features in db\")\n data_source = []\n with db.atomic():\n for name, features in test_features:\n features[\"name\"] = name\n data_source.append(features)\n if len(data_source) == 1000:\n Feature.insert_many(data_source).execute()\n data_source = []\n if len(data_source):\n Feature.insert_many(data_source).execute()", "def generate_features(self):\n content_input = self.content_image * 255\n style_input = self.style_image * 255\n preprocessed_content = tf.keras.applications.vgg19.preprocess_input(\n content_input)\n preprocessed_style = tf.keras.applications.vgg19.preprocess_input(\n style_input)\n outputs_content = self.model(preprocessed_content)\n outputs_style = self.model(preprocessed_style)\n\n num_style_layers = tf.size(self.style_layers)\n style_outputs, content_outputs = (\n outputs_style[:num_style_layers],\n outputs_content[num_style_layers:])\n\n style_outputs = [self.gram_matrix(\n style_output)for style_output in style_outputs]\n self.gram_style_features = style_outputs\n self.content_feature = content_outputs", "def generate_and_save_train_features(train_input, train_output, bag_of_words, tfidf):\n df_train = get_df(train_input)\n train_words = np.array(df_train.text.str.lower().values)\n\n bag_of_words.fit(train_words)\n\n train_words_binary_matrix = bag_of_words.transform(train_words)\n feature_names = bag_of_words.get_feature_names_out()\n\n tfidf.fit(train_words_binary_matrix)\n train_words_tfidf_matrix = tfidf.transform(train_words_binary_matrix)\n\n save_matrix(df_train, train_words_tfidf_matrix, feature_names, train_output)", "def newsgroup_featurize(data_list):\n # TODO: Implement featurization of input.\n all_text = data_list[\"train\"][\"input\"] + data_list[\"test\"][\"input\"] + data_list[\"dev\"][\"input\"]\n word_dict = word_count(all_text)\n bow_noun_features = bow_noun(word_dict) # 11,925 features\n train_input = np.array([text_to_bow_noun_vector(text, bow_noun_features) for text in data_list[\"train\"][\"input\"]])\n dev_input = np.array([text_to_bow_noun_vector(text, bow_noun_features) for text in data_list[\"dev\"][\"input\"]])\n test_input = np.array([text_to_bow_noun_vector(text, bow_noun_features) for text in data_list[\"test\"][\"input\"]])\n return train_input, dev_input, test_input", "def cleaninto_df(frame:pd) -> pd:\n # remove repeated characters EXAMPLE: DIMPLLLLEEEEE -> DIMPLE\n # nopunc = word_tokenize(nopunc) this might not work. find something else\n\n stop = stopwords.words('english')\n newStopWords = ['get', 'http','there','and','i','t','it','d']\n stop.extend(newStopWords)\n lemmatizer = WordNetLemmatizer()\n clean = []\n new_col = []\n frame['Cleaned'] = None\n for tweet in frame.content:\n if 'RT' in tweet:\n if tweet.index('RT')>5:\n tweet = tweet[:tweet.index('RT')]\n else:\n tweet = tweet[2:]\n # WHAT ARE WE TRYING TO CLEAN HERE?\n # cleaning with preprocessor library https://pypi.org/project/tweet-preprocessor/\n tweet = ' '.join(re.sub(\"(@\\w+)|([^A-Za-z]+)|(\\w+:\\/\\/\\S+)\", \" \", tweet).split())\n # changes #November1 -> November: need to remove full hashtag?\n # changes @poetweatherford: -> poetweatherford\n # changes don’t -> don t, children's -> children s\n print(\"after regex:\" + str(tweet))\n clean.append(tweet.lower())\n for clean_tweet in clean:\n word_tokens = word_tokenize(clean_tweet)\n clean_tokens = [word for word in word_tokens if word not in stop]\n stems = []\n for item in clean_tokens:\n stems.append(lemmatizer.lemmatize(item))\n new_sentence = ' '.join(stems)\n new_col.append(new_sentence.lower())\n frame['Cleaned'] = new_col\n return frame", "def toLingDataToken(token):\n\n t = Token()\n\n t.set(\n id=token.i,\n word=token.orth_,\n lemma=token.lemma_,\n POS=token.tag_,\n SPOS=token.pos_,\n depID=token.dep,\n depStr=token.dep_,\n NE=token.ent_type_,\n foreign=token.is_oov\n )\n\n # setting features\n '''\n t.features = {}\n #print(t.POS)\n featureStr = translate(t.POS)\n # save string form of feature translation\n t.features['str'] = featureStr\n\n featureArr = featureStr.split(\"+\")\n #print(featureArr)\n # find the first feature\n i = 0\n while len(featureArr[i]) < 1:\n i += 1\n\n t.features['type'] = featureArr[i]\n if t.features['type'] in [\"N\"]:\n # look for number\n i += 1\n while i < len(featureArr):\n # this means it's probably a number declaration\n if len(featureArr[i]) < 4:\n t.features['number'] = featureArr[i]\n # and next feature could be type of noun\n if i + 1 < len(featureArr):\n t.features['isProper'] = featureArr[i + 1]\n break\n i += 1\n\n if t.features['type'] in [\"V\"]:\n # look for person and number\n i += 1\n while i < len(featureArr):\n # this means it's probably a person declaration\n if len(featureArr[i]) < 4:\n t.features['person'] = featureArr[i]\n # and next feature could be number\n if i + 1 < len(featureArr):\n t.features['number'] = featureArr[i + 1]\n break\n else:\n # probably a tense\n t.features['tense'] = featureArr[i]\n t.features['isParticiple'] = (\"Part\" in featureArr[i])\n\n i += 1\n #print(t.features)\n '''\n\n # setting wordType\n if token.tag_ == \"BES\": # copula\n t.set(wordType=4)\n elif token.pos_ == \"VERB\":\n t.set(wordType=1)\n elif token.pos_ == \"NOUN\" or token.pos_ == \"PROPN\":\n t.set(wordType=2)\n elif token.pos_ == \"PRON\":\n t.set(wordType=3)\n else:\n t.set(wordType=5)\n\n # spaCy does not have coreferencing...\n\n return t", "def run_pipeline() -> pd.DataFrame:\n\n print('Loading data...')\n data = load_data()\n print('Stage one processing...')\n text = data.text\n text_ = stage_one_preprocessing(text)\n data_ = data.copy()\n data_.text = text_\n #print('Splitting by sentences...')\n #data_ = split_by_sentences(data_)\n print('Stage two processing...')\n text_ = stage_two_preprocessing(data_.text)\n print('Stage three processing...')\n text_ = stage_three_preprocessing(text_)\n data_.text = text_\n print('Saving file...')\n data_.to_csv(r'./data/stage_three_text.csv')\n return data_", "def compute_sklearn_features():\n text_dir = 'text_model'\n emb_dir = 'embedding_weights'\n filename = 'glove.6B.50d.txt'\n emb_name = 'glove'\n emotions = ['happy', 'sad', 'angry', 'scared', 'disgusted', 'surprised']\n post_size = 200\n df_all, word_to_id, embedding = preprocess_df(text_dir, emb_dir, filename, emb_name, emotions, post_size)\n\n X = np.stack(df_all['text_list'])\n y = df_all['search_query'].values\n\n id_to_word = {i: k for k, i in word_to_id.iteritems()}\n config = {'word_to_id': word_to_id,\n 'id_to_word': id_to_word,\n 'batch_size': 128,\n 'vocab_size': len(word_to_id),\n 'embedding_dim': embedding.shape[1],\n 'post_size': post_size,\n 'fc1_size': 16,\n 'nb_emotions': len(emotions),\n 'dropout': 1.0, # Proba to keep neurons\n 'max_grad_norm': 5.0, # Maximum norm of gradient\n 'init_scale': 0.1, # Weights initialization scale\n 'initial_lr': 1e-3,\n 'lr_decay': 0.5,\n 'max_epoch_no_decay': 2, # Number of epochs without decaying learning rate\n 'nb_epochs': 10} # Maximum number of epochs\n \n tf.reset_default_graph()\n with tf.Session() as sess:\n print('Computing sklearn features:')\n init_scale = config['init_scale']\n initializer = tf.random_uniform_initializer(-init_scale, init_scale) \n with tf.variable_scope('Model', reuse=None, initializer=initializer):\n config['nb_epochs'] = 1\n m_train = WordModel(config)\n sess.run(tf.global_variables_initializer())\n sess.run(m_train.embedding_init, feed_dict={m_train.embedding_placeholder: embedding})\n\n batch_size = m_train.config['batch_size']\n initial_lr = m_train.config['initial_lr']\n \n nb_batches = X.shape[0] / batch_size\n dropout_param = 1.0\n ops = m_train.h1\n \n sess.run(tf.assign(m_train.learning_rate, initial_lr))\n\n X, y = _shuffling(X, y)\n X_reshaped = X[: (nb_batches * batch_size), :].reshape((nb_batches, batch_size, -1))\n y_reshaped = y[: (nb_batches * batch_size)].reshape((nb_batches, batch_size))\n h1_list = []\n for i in range(nb_batches):\n curr_input = X_reshaped[i, :, :]\n curr_target = y_reshaped[i, :]\n h1_features = sess.run(ops, feed_dict={m_train.input_data: curr_input, \n m_train.target: curr_target,\n m_train.keep_prob: dropout_param})\n h1_list.append(h1_features)\n\n X_sklearn = np.vstack(h1_list)\n y_sklearn = y_reshaped.reshape((-1))\n print('Finished')\n return X_sklearn, y_sklearn", "def extract_features_temporal(self, text, expDateStr = None, onsetDateStr = None, refExpDateStr = None, textType='vaers'):\n \n featurelist = []\n \n sentences = util.sentence_tokenize(text)\n taggedSentences = [] \n# id = 0\n for sentnumber, sentence0 in enumerate(sentences):\n \n sentence = self.clean_text(sentence0)\n \n # tokenize each sentence to have a list of words to be processed\n tokens = nltk.word_tokenize(sentence)\n #run the above procedure\n sentence_to_parse = self.get_untagged(tokens)\n \n # Save tagged sentences for later computing of expose date\n taggedSentences.append(sentence_to_parse)\n \n #only if the cleaned sentence is NOT empty we parse it\n if sentence_to_parse!=[]:\n tree = self.cp.parse(sentence_to_parse)\n tree1 = self.cp1.parse(sentence_to_parse)\n \n# new_sentence_to_parse = ','.join([' '.join(nltk.tag.untag(subtree.leaves())) + ' ' for subtree in tree.subtrees() if subtree.node in self.st_filter])\n new_sentence_to_parse = ','.join([' '.join(nltk.tag.untag(subtree.leaves())) + ' ' for subtree in tree.subtrees() if subtree.label() in self.st_filter])\n\n #here we delete the dash and replace it with whitespace to convert post-vac to post vac\n new_sentence_to_parse = new_sentence_to_parse.replace(', ,', ',')\n #here we delete the dash and replace it with whitespace to convert post-vac to post vac\n new_sentence_to_parse = new_sentence_to_parse.replace(',', ', ')\n\n new_sentence_to_parse = nltk.word_tokenize(new_sentence_to_parse)\n\n #run the above procedure\n new_sentence_to_parse = self.get_untagged(new_sentence_to_parse)\n \n if new_sentence_to_parse!=[]:\n tree2 = self.cp.parse(new_sentence_to_parse)\n for subtree in tree2.subtrees():\n if subtree.label() in self.st_filter: \n featString = self.massage_features(subtree)\n featurelist.append((subtree.label(), featString, sentnumber, subtree.leaves()))\n \n for subtree in tree1.subtrees():\n if subtree.label() in self.labels_gram1:\n featString = self.massage_features(subtree)\n featurelist.append((subtree.label(), featString, sentnumber, subtree.leaves()))\n\n self.initialization_text_data(text, sentences, taggedSentences, textType)\n \n featObjList = self.initialize_feature_obj_list(featurelist)\n \n docFeature = self.extract_temporal_info(featObjList, expDateStr, onsetDateStr, refExpDateStr)\n \n return docFeature", "def preprocess(document, max_features=150, max_sentence_len=300):\n\n def lemmatize(token, tag):\n \"\"\"\n Converts the tag to a WordNet POS tag, then uses that\n tag to perform an accurate WordNet lemmatization.\n \"\"\"\n tag = {\n 'N': wn.NOUN,\n 'V': wn.VERB,\n 'R': wn.ADV,\n 'J': wn.ADJ\n }.get(tag[0], wn.NOUN)\n\n return WordNetLemmatizer().lemmatize(token, tag)\n\n def vectorize(doc, max_features, max_sentence_len):\n \"\"\"\n Converts a document into a sequence of indices of length max_sentence_len retaining only max_features unique words\n \"\"\"\n tokenizer = Tokenizer(num_words=max_features)\n tokenizer.fit_on_texts(doc)\n doc = tokenizer.texts_to_sequences(doc)\n doc_pad = pad_sequences(doc, padding='pre', truncating='pre', maxlen=max_sentence_len)\n return np.squeeze(doc_pad), tokenizer.word_index\n\n cleaned_document = []\n vocab = []\n\n # Break the document into sentences\n for sent in document:\n\n # Clean the text using a few regular expressions\n sent = re.sub(r\"[^A-Za-z0-9^,!.\\/'+-=]\", \" \", sent)\n sent = re.sub(r\"what's\", \"what is \", sent)\n sent = re.sub(r\"\\'\", \" \", sent)\n sent = re.sub(r\"@\", \" \", sent)\n sent = re.sub(r\"\\'ve\", \" have \", sent)\n sent = re.sub(r\"can't\", \"cannot \", sent)\n sent = re.sub(r\"n't\", \" not \", sent)\n sent = re.sub(r\"i'm\", \"i am \", sent)\n sent = re.sub(r\"\\'re\", \" are \", sent)\n sent = re.sub(r\"\\'d\", \" would \", sent)\n sent = re.sub(r\"\\'ll\", \" will \", sent)\n sent = re.sub(r\"(\\d+)(k)\", r\"\\g<1>000\", sent)\n sent = sent.replace(\"\\n\", \" \")\n\n lemmatized_tokens = []\n\n # Break the sentence into part of speech tagged tokens\n for token, tag in pos_tag(wordpunct_tokenize(sent)):\n\n # Apply preprocessing to the tokens\n token = token.lower()\n token = token.strip()\n token = token.strip('_')\n token = token.strip('*')\n\n # If punctuation ignore token and continue\n if all(char in set(string.punctuation) for char in token) or token in set(sw.words('english')):\n continue\n\n # Lemmatize the token\n lemma = lemmatize(token, tag)\n lemmatized_tokens.append(lemma)\n vocab.append(lemma)\n\n cleaned_document.append(lemmatized_tokens)\n\n vocab = sorted(list(set(vocab)))\n\n return cleaned_document, vocab", "def transform(self, data: pd.DataFrame) -> pd.DataFrame:\n data = data[data.language == 'English'] # type: ignore\n data = data[['premise', 'hypothesis', 'label']] # type: ignore\n return data", "def tf_idf(data_frame, description):\n text = list(data_frame['article'])\n vectorizer = TfidfVectorizer(stop_words='english') # create the transform\n vectorizer.fit(text) # tokenize and build vocab\n # save tf_idf vectorizer as pickle\n with open('resources/tf-idf_encoder_' + description + '.pkl', 'wb') as f:\n pickle.dump(vectorizer.vocabulary_, f)\n f.close()\n data_frame['tf-idf'] = data_frame['article'].apply(lambda x: vectorizer.transform([x]))\n return data_frame", "def _load_data(self):\n\n path_data_x = \\\n '/workspace/base-ml/data/tadpole/adni_one_baseline_feature_data' \\\n '.csv'\n path_data_y = \\\n '/workspace/base-ml/data/tadpole/adni_one_baseline_label_data' \\\n '.csv'\n path_meta = '/workspace/base-ml/data/tadpole' \\\n '/adni_one_baseline_meta_data' \\\n '.csv'\n read_data_x = pd.read_csv(path_data_x)\n read_data_y = pd.read_csv(path_data_y) # 0 NL, 1, MCI, 2 Dementia\n read_data_meta = pd.read_csv(path_meta)[['AGE', 'PTGENDER', 'APOE4']]\n\n # Replace gender to numeric\n read_data_meta.PTGENDER = read_data_meta.PTGENDER.replace('Male', 0)\n read_data_meta.PTGENDER = read_data_meta.PTGENDER.replace('Female', 1)\n\n new_data_x = np.array(read_data_x).astype(np.float32)\n new_data_y = np.array(read_data_y).astype(np.float32)\n new_data_meta = np.array(read_data_meta).astype(np.float32)\n\n # Concat meta-information with feature vector input\n concat_meta = pd.DataFrame(new_data_meta)\n concat_meta.iloc[:, 2] = concat_meta.iloc[:, 2].replace(0, 'zero')\n concat_meta.iloc[:, 2] = concat_meta.iloc[:, 2].replace(1, 'one')\n concat_meta.iloc[:, 2] = concat_meta.iloc[:, 2].replace(2, 'two')\n concat_meta = concat_meta.to_numpy()\n new_data_x = np.concatenate([concat_meta, new_data_x], 1)\n print(new_data_x.shape, new_data_y.shape, new_data_meta.shape)\n\n self.orig_column_names = ['Age', 'Gender', 'APOE4'] + \\\n list(read_data_x.columns)\n self.data_x = new_data_x\n self.data_y = self.to_one_hot_encoding(new_data_y)\n self.numerical_idx = np.arange(new_data_x.shape[-1])\n self.numerical_idx = np.delete(self.numerical_idx, [2]) # Remove APOE column idx\n self.non_num_idx = np.array([2])\n self.all_non_numerical_idx = None\n\n # self.numerical_idx = np.arange(self.data_x.shape[-1])\n # self.non_num_idx = None\n # self.all_non_numerical_idx = None\n\n # Calculate adjacency matrix\n self.meta_inf = new_data_meta.astype('float32')\n if self.args.graph_type:\n self.adj = self.get_adjacency()", "def __init__(self, data_filename):\n with open(data_filename, 'rb') as data_file:\n loaded_features = pickle.load(data_file)\n self.title_nlp_tfidf_features = loaded_features['title_NLP_TFIDF_features']\n self.other_features = loaded_features['other_features']\n self.category1_features = loaded_features['category1_features']\n self.category2_features = loaded_features['category2_features']\n self.category3_features = loaded_features['category3_features']\n self.material_features = loaded_features['material_features']\n self.who_made_features = loaded_features['whoMade_features']\n self.when_made_features = loaded_features['whenMade_features']\n self.style1_features = loaded_features['style1_features']\n self.style2_features = loaded_features['style2_features']\n self.feature_labels = loaded_features['feature_labels']", "def process_lab_prn(self):\n self.gen_hts_lab_fts()\n self.gen_hts_lab_full()", "def w2f(sents,i,j,filename,freq):\n w = sents[i][j][0] #current word\n pos = sents[i][j][1] #POS of current word\n f = [ \n 'bias', #non-contextual feature \n 'w=' + w, #current word \n 'w.istitle=%s' % w.istitle(), #first letter - capitalized\n 'pos=' + pos, # POS tag\n 'w.intitle=%s' % contained_in_title(w, filename), # w matches title\n 'w.lowtitle=%s' % lower_in_title(w, filename), # w lower matches title\n 'w.freq=%s' % frequency(w, freq), # freq of w \n 'w.stopword=%s' % stop_word(w), # # stop word\n ]\n \n # previous word features\n if j>0:\n pw = sents[i][j-1][0] #previous word\n ppos = sents[i][j-1][1] #POS of previous word\n f.extend([ \n 'pw=' + pw, # previous word \n 'pw.istitle=%s' % pw.istitle(), #first letter - capitalized\n 'ppos=' + ppos, # POS tag\n 'pw.intitle=%s' % contained_in_title(pw, filename), # w matches title\n 'pw.lowtitle=%s' % lower_in_title(pw,filename), # w lower matches title\n 'pw.freq=%s' % frequency(pw, freq), # freq of w\n 'pw.stopword=%s' % stop_word(w), # # stop word\n ])\n else: \n f.append('BOS') #first word of a sentence\n\n # next word features\n if j<len(sents[i])-1:\n nw = sents[i][j+1][0] #next word\n npos = sents[i][j+1][1] #POS of next word\n f.extend([ \n 'nw=' + nw, # previous word\n 'nw.istitle=%s' % nw.istitle(), #first letter - capitalized\n 'npos=' + npos, #POS tag\n 'nw.intitle=%s' % contained_in_title(nw, filename), # w matches title\n 'nw.lowtitle=%s' % lower_in_title(nw,filename), # w lower matches title\n 'nw.freq=%s' % frequency(nw, freq), # freq of w\n 'nw.stopword=%s' % stop_word(w), # # stop word\n ])\n else: \n f.append('EOS') # last word of a sentence\n\n #if j>1: ...\n #if j<len(sents[i])-2: ...\n #if j>0 and j<len(sents[i])-1: ...\n return f", "def createPredictionData(self):\n\n if self._verbose:\n print('\\nCreate Prediction (Input Features) Data')\n\n # Minimum required number of input data for ML prediction under the\n # current implemented setup.\n if len(self._input_data.index) < 60:\n raise NotEnoughDataForMachineLearningPrediction(\n len(self._input_data.index), 60)\n\n # Add features column\n for indicator, feature in zip(self._indicators_set, self._ti_features):\n feature_data = indicator.getTiData()\n # Because there are some inf values\n feature_data = feature_data.replace([np.inf, -np.inf], np.nan)\n if self._verbose:\n print('- adding feature: ', feature['ti'], ', columns: ',\n str([feature['ti'] + '_' + c\n for c in feature_data.columns]), sep='')\n\n for c in feature_data.columns:\n self._prediction_data[\n feature['ti'] + '_' + c] = feature_data[[c]]\n\n if self._include_close_feature:\n self._prediction_data['close'] = self._input_data[['close']]\n\n if self._include_volume_feature:\n self._prediction_data['volume'] = self._input_data[['volume']]\n\n # Fill missing values\n self._prediction_data = fillMissingValues(\n input_data=self._prediction_data)\n\n return self._prediction_data", "def create_text_sequence_feature(fl, sentence, sentence_len, vocab):\n sentence_transformed = transform_sentence(sentence, vocab)\n for word_id in sentence_transformed:\n fl.feature.add().int64_list.value.extend([word_id])\n return fl", "def features(upstream, product):\n data = pd.read_parquet(str(upstream[\"get\"]))\n ft = data[\"1\"] * data[\"2\"]\n df = pd.DataFrame({\"feature\": ft, \"another\": ft**2})\n df.to_parquet(str(product))", "def fit_transform(self, X_seq):\n print('\\n====== Transformed data summary ======')\n\n # Instantiate the vectorizer object\n vectorizer = TfidfVectorizer(analyzer= self.analyzer)\n\n # Create tokens from all dataset matrix\n count_wm = vectorizer.fit_transform(X_seq)\n count_tokens = vectorizer.get_feature_names()\n\n # DF: [100000 rows x 801 columns] (for dataset1 = 100K)\n # [each row = 1 log message] , [each column = 1 word]\n df_countvect = pd.DataFrame(data=count_wm.toarray(), columns=count_tokens)\n\n print(\".Count Vectorizer results.\\n\")\n print(df_countvect)\n\n # Print the vector representation for a log message (print 1 row from df)\n print(\"DEBUG_0 : \" ,df_countvect.loc[[20500]])\n\n # Get the first position of the maximum value for each word\n m = df_countvect.ne(0).idxmax()\n df = pd.DataFrame(dict(pos=m, val=df_countvect.lookup(m, m.index)))\n print(df)\n\n print('All data shape: {}-by-{}\\n'.format(df_countvect.shape[0], df_countvect.shape[1]))\n\n X_new = df_countvect\n return X_new", "def create_features_offer(portfolio, transcript_training):\n\n # create avg/min/max duration view\n portfolio_duration = create_features_using_groupby(transcript_training, \\\n \t'portfolio', 'duration_view')\n\n # create view rate (average of label)\n portfolio_view_rate = create_features_using_groupby(transcript_training, \\\n \t'portfolio', 'label', minimum=False, maximum=False)\n portfolio_view_rate.columns=['view_rate_portfolio']\n\n portfolio_feat = pd.concat([portfolio_view_rate, portfolio_duration], axis=1)\n\n assert portfolio_feat.shape[0] == portfolio.shape[0], \\\n \"rows do not match with original data (portfolio)\"\n\n portfolio = portfolio.join(portfolio_feat)\n\n # remove constant and highly correlated features\n portfolio.drop(columns=['min_duration_view_portfolio', 'difficulty', \\\n \t'mobile', 'view_rate_portfolio', 'avg_duration_view_portfolio'], inplace=True)\n\n u.save_dataframe_to_sql(portfolio, 'portfolio')\n\n return portfolio", "def featurize(self, run, clone, gen):\n\n # Find the gro file structure template, only if it's a new RUN.\n # Otherwise we can just use the old one\n if run != self.run:\n self.gro_file = self.find_gro(self.setupdir, run)\n self.run = run\n\n # Get the xtc file trajectory for this RUN, CLONE, GEN\n xtc_filename, tpr_filename = self.find_traj()\n\n # Remove the periodic boundary conditions in a new temporary file\n xtc = self.rm_periodic_boundary_cond(xtc_filename, tpr_filename)\n\n ### DO THE FEATURIZATION HERE ###\n pass\n\n # Close the HDF5 handle to the DataFrame\n self.store.close()\n\n # Close the file handle to the pbc-processed xtc\n xtc.close()", "def extract_features(data, stopwords=STOPWORDS):\n tags = set()\n docs = []\n for document in data:\n doc_data = dict()\n doc_data['pmid'] = document['sourceid']\n text = document['text']\n\n # Insert PubTator annotations inside abstract\n denotations = document['denotations']\n sorted_denotations = []\n for denotation in denotations:\n begin = denotation['span']['begin']\n end = denotation['span']['end']\n obj = denotation['obj']\n for c in punctuation:\n obj = obj.replace(c, '')\n tags.add(obj)\n doc_data[obj] = doc_data.get(obj,0)+1\n sorted_denotations.append([begin,end,obj])\n sorted_denotations.sort()\n sorted_denotations.reverse()\n for begin, end, obj in sorted_denotations:\n text = text[:begin] + obj + ' ' + text[end:]\n\n doc_data['text'] = clean_text(text, stopwords)\n docs.append(doc_data)\n\n return docs", "def _featurize(self, predictions: SequenceSample) -> List[np.ndarray]:\n feature_vectors: List[np.ndarray] = []\n source = predictions.origin_words\n\n char_nn_scores = self.char_nn_lm_score(predictions.paths)\n word_nn_scores = self.word_nn_lm_score(predictions.paths)\n\n for i, (score, hypothesis) in enumerate(zip(predictions.scores, predictions.paths)):\n obss = list(zip(hypothesis, source))\n length = len(source)\n feature_vector = np.array([\n 1.,\n length,\n self.language_model.score(hypothesis) / length,\n char_nn_scores[i],\n word_nn_scores[i],\n score / length,\n sum(w in self.language_model for w in hypothesis) / length,\n sum(h[:self.prefix_size] == s[:self.prefix_size] for h, s in obss) / length,\n sum(h[-self.suffix_size:] == s[-self.prefix_size:] for h, s in obss) / length,\n self.language_model.score(hypothesis) * score / length,\n np.mean([editdistance.eval(h, s) for h, s in obss]),\n np.mean([float(obs in self.train_set_uniq) for obs in obss]),\n np.mean([self.train_counter.get(obs, self.discount) for obs in obss]),\n ])\n feature_vectors.append(feature_vector)\n return feature_vectors", "def preprocessKNN(self):\n\n feature_list = []\n\n for index, row in self.all_data.iterrows():\n chans = cv2.split(row['image'])\n\n features = []\n for chan in chans:\n hist = cv2.calcHist(chan, [0], None, [64], [0,256])\n features.extend(hist)\n\n features = np.array(features).flatten()\n feature_list.append(features)\n\n df = self.all_data[['name', 'genre']].copy()\n\n feature_df = pd.DataFrame(feature_list)\n\n df = df.join(feature_df)\n\n return df", "def make_features(\n dataframe: pd.DataFrame,\n feature_params: FeatureParams,\n processing_params: ProcessingParams,\n handle_target: bool = True\n) -> Tuple[pd.DataFrame, pd.Series]:\n transformer = FeaturesTransformer(feature_params, processing_params)\n\n if handle_target:\n features = dataframe.drop([feature_params.target_col], axis=1)\n target = dataframe[feature_params.target_col]\n\n if feature_params.use_log_trick:\n target = pd.Series(np.log(target.to_numpy()))\n\n features = transformer.transform(features)\n else:\n target = None\n features = dataframe.copy()\n\n return pd.DataFrame(features), target", "def make_data(input_filepath, output_filepath):\n\n df_train = pd.read_csv(input_filepath+'train_u6lujuX_CVtuZ9i.csv', index_col=0)\n df_test = pd.read_csv(input_filepath+'test_Y3wMUE5_7gLdaTN.csv', index_col=0)\n print('Sizes', df_train.shape, df_test.shape)\n print(\"Outcome dispersion:\\n\", df_train['Loan_Status'].value_counts())\n\n\n # recode and save outcome vector\n y = df_train['Loan_Status'].map({'N': 0, 'Y': 1})\n\n del df_train['Loan_Status']\n\n # all in one dataframe\n df = pd.concat([df_train, df_test])\n print(df.shape)\n\n from src.features.build_features import make_features\n df = make_features(df)\n\n # Divide data on train and test again and save\n data_train = df[df.index.isin(df_train.index)]\n data_test = df[df.index.isin(df_test.index)]\n print(data_train.shape, data_test.shape)\n\n data_tmp = data_train.copy()\n data_tmp['y'] = y\n\n\n data_tmp.to_csv(output_filepath + 'train_ready.csv', index=False)\n data_test.to_csv(output_filepath + 'test_ready.csv', index=False)\n id_test = pd.DataFrame(data=df_test.index, columns=['Loan_ID'])\n id_test.to_csv(output_filepath + 'id_test.csv', index=False)", "def convert_xnli_examples_to_features(self):\n features = self.features\n lang_filtered_features = []\n for ex_index, example in enumerate(self.examples):\n language = example.guid.split('-')[1]\n if language in self.lang_list:\n lang_filtered_features.append(features[ex_index] + [language])\n return lang_filtered_features", "def _clone_and_prepare_features(feature_config):\n output_objects = []\n\n table_configs = {}\n\n for config in tf.nest.flatten(feature_config):\n # There should be a one-to-one mapping between new TableConfig objects and\n # old ones (as each TableConfig can be thought of as a table).\n table_configs[config.table] = table_configs.get(\n config.table,\n tf.tpu.experimental.embedding.TableConfig(\n vocabulary_size=config.table.vocabulary_size,\n dim=config.table.dim,\n initializer=config.table.initializer,\n optimizer=config.table.optimizer,\n quantization_config=config.table.quantization_config,\n combiner=config.table.combiner,\n name=config.table.name,\n ),\n )\n\n output_objects.append(\n tf.tpu.experimental.embedding.FeatureConfig(\n table=table_configs[config.table],\n max_sequence_length=config.max_sequence_length,\n output_shape=config.output_shape,\n validate_weights_and_indices=config.validate_weights_and_indices,\n name=config.name))\n\n # Fix up the optimizers.\n for _, new_table in table_configs.items():\n if new_table.optimizer is not None:\n new_table.optimizer = _normalize_and_prepare_optimizer(\n new_table.optimizer)\n\n return (tf.nest.pack_sequence_as(feature_config,\n output_objects), list(table_configs.items()))", "def prepare_label_feature(self, label2id: dict):\n text, wp_text, label, wp_label, wp_mark = [], [], [], [], []\n sorted_labels = sorted(label2id.items(), key=lambda x: x[1])\n for label_name, label_id in sorted_labels:\n if label_name == '[PAD]':\n continue\n tmp_text = self.convert_label_name(label_name)\n tmp_wp_text = self.tokenizer.tokenize(' '.join(tmp_text))\n text.extend(tmp_text)\n wp_text.extend(tmp_wp_text)\n label.extend(['O'] * len(tmp_text))\n wp_label.extend(['O'] * len(tmp_wp_text))\n wp_mark.extend([0] + [1] * (len(tmp_wp_text) - 1))\n label_item = self.data_item2feature_item(DataItem(text, label, wp_text, wp_label, wp_mark), 0)\n label_input = self.get_test_model_input(label_item)\n return label_input, label_item" ]
[ "0.7509502", "0.63895476", "0.63655084", "0.6190819", "0.61304533", "0.60974437", "0.6039494", "0.6013443", "0.6001541", "0.59564126", "0.59309864", "0.584246", "0.58098847", "0.5809157", "0.5786593", "0.57630605", "0.57536405", "0.57465136", "0.57437116", "0.5743342", "0.5741203", "0.5738111", "0.567976", "0.567601", "0.5647194", "0.5631507", "0.5618429", "0.56146353", "0.5601197", "0.5601117", "0.5598824", "0.5591592", "0.5590262", "0.5583075", "0.5571061", "0.5570108", "0.55684656", "0.5550955", "0.55436033", "0.55341804", "0.5515731", "0.5515271", "0.5513218", "0.5512744", "0.5494603", "0.5473308", "0.5458289", "0.54526746", "0.54501015", "0.54482186", "0.5444966", "0.54132986", "0.5395671", "0.53937733", "0.5393722", "0.5392576", "0.5380708", "0.5355716", "0.53551495", "0.5347548", "0.5342243", "0.53420997", "0.5339668", "0.5337192", "0.53352547", "0.530768", "0.5306031", "0.5287448", "0.5283325", "0.52815783", "0.5280874", "0.5278519", "0.526616", "0.5264625", "0.52638817", "0.52566886", "0.5256129", "0.52560085", "0.52475643", "0.5239649", "0.52371955", "0.5235402", "0.52270484", "0.52253544", "0.52207655", "0.5216535", "0.52143717", "0.5195818", "0.5176387", "0.5175946", "0.5171062", "0.5168513", "0.5163271", "0.51577127", "0.51547194", "0.5143079", "0.5142876", "0.5139985", "0.5136353", "0.51335084" ]
0.6648716
1
Proceed to data transformation in order to deliver computable data model.
def data_transform(self, df) : #------------------------------------------------------------------------- # Copy of given dataframe to be transformed #------------------------------------------------------------------------- self.df_invoice_line = df #------------------------------------------------------------------------- # Features issued from InvoiceDate are created #------------------------------------------------------------------------- if self.is_transform_timeFeature is True: self.strprint("\n*** Time features transformation ***") self.data_transform_timeFeature() #------------------------------------------------------------------------- # RFM is computed and encoded #------------------------------------------------------------------------- if self.is_transform_rfm is True: self.strprint("\n*** RFM transformation ***") self.data_transform_rfm() #------------------------------------------------------------------------- # NLP features issued from Description are created #------------------------------------------------------------------------- if self.is_transform_nlp is True: self.strprint("\n*** NLP transformation ***") self.data_transform_nlp() return self.df_invoice_line
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _apply_transform(self):\n pass", "def transform(self, data):", "def _transform(self, dataset):\n raise NotImplementedError()", "def _transform_data(self, *args, **kwargs) -> None:\n raise NotImplementedError", "def _process(self):\n # choose the correct transform model before processing TI data\n self._select_transform()\n\n # process type first, fail early\n self._process_type()\n\n # process type specific data\n if isinstance(self.transform, GroupTransformModel):\n self._process_group()\n elif isinstance(self.transform, IndicatorTransformModel):\n self._process_indicator()\n\n # self.process_associations(self.transform.associations)\n self._process_associated_group(self.transform.associated_groups)\n self._process_attributes(self.transform.attributes or [])\n self._process_security_labels(self.transform.security_labels or [])\n self._process_tags(self.transform.tags or [])\n\n # date added\n self._process_metadata_datetime('dateAdded', self.transform.date_added)\n\n # last modified\n self._process_metadata_datetime('lastModified', self.transform.last_modified)\n\n # xid\n self._process_metadata('xid', self.transform.xid)", "def process(self):\n self.extract()\n self.transform()\n self.load()", "def transform():\n pass", "def transform(self):", "def process(self, data):\n return self.transformer.transform(data)", "def transform( request, data, finishing=False ):", "def transform( request, data, finishing=False ):", "def transform_thread(self):\n self.transform_result = Transformer.transform_all(self.extract_list, SelectableLabel.selected_hotel, self.hotel_address)", "def transform(self, data):\n return self.fit_transform(data, fitting=False)", "def transform():", "def __call__(\n self,\n data: TypeTransformInput,\n ) -> TypeTransformInput:\n if torch.rand(1).item() > self.probability:\n return data\n\n # Some transforms such as Compose should not modify the input data\n if self.parse_input:\n data_parser = DataParser(\n data,\n keys=self.include,\n label_keys=self.label_keys,\n )\n subject = data_parser.get_subject()\n else:\n subject = data\n\n if self.keep is not None:\n images_to_keep = {}\n for name, new_name in self.keep.items():\n images_to_keep[new_name] = copy.copy(subject[name])\n if self.copy:\n subject = copy.copy(subject)\n with np.errstate(all='raise', under='ignore'):\n transformed = self.apply_transform(subject)\n if self.keep is not None:\n for name, image in images_to_keep.items():\n transformed.add_image(image, name)\n\n if self.parse_input:\n self.add_transform_to_subject_history(transformed)\n for image in transformed.get_images(intensity_only=False):\n ndim = image.data.ndim\n assert ndim == 4, f'Output of {self.name} is {ndim}D'\n output = data_parser.get_output(transformed)\n else:\n output = transformed\n\n return output", "def transform(self, X):\n ...", "def transform(self, X):\n ...", "def transform(self, X):\n ...", "def transform(self, X):\n ...", "def transform(self, X):\n ...", "def transform(self, X):\n ...", "def transform(self, X):\n ...", "def __call__(self):\n self.tree = etree.parse(self.src)\n\n agent = transformer_factory(self.tree, self.options)\n self.tree = agent.transform()\n\n # Write out the finished product\n file = self._targetFile()\n self.tree.write(file, pretty_print=False)\n print 'wrote transformed channel:', file.name", "def transform(self, X, copy=...):\n ...", "def transform(self, X, copy=...):\n ...", "def transform(self, X, copy=...):\n ...", "def do_transform(self):\r\n if not self.transform:\r\n return\r\n try:\r\n self.latest_value = utils.Transform(\r\n expr=self.transform, value=self.latest_value,\r\n timedelta=self.time_between_updates().total_seconds()).result()\r\n except (TypeError, ValueError):\r\n logger.warn(\"Invalid transformation '%s' for metric %s\",\r\n self.transfrom, self.pk)\r\n self.transform = ''", "def transform(self, data):\n if data:\n assert isinstance(data, dict), 'Step {}, \"data\" argument in the \"transform()\" method must be dict, ' \\\n 'got {} instead.'.format(self.name, type(data))\n logger.info('Step {}, working in \"{}\" mode'.format(self.name, self._mode))\n\n if self.output_is_cached:\n logger.info('Step {} using cached output'.format(self.name))\n step_output_data = self.output\n elif self.output_is_persisted and self.load_persisted_output:\n logger.info('Step {} loading persisted output from {}'.format(self.name,\n self.experiment_directory_output_step))\n step_output_data = self._load_output(self.experiment_directory_output_step)\n else:\n step_inputs = {}\n if self.input_data is not None:\n for input_data_part in self.input_data:\n step_inputs[input_data_part] = data[input_data_part]\n\n for input_step in self.input_steps:\n input_step._mode = self._mode\n step_inputs[input_step.name] = input_step.transform(data)\n\n if self.adapter:\n step_inputs = self._adapt(step_inputs)\n else:\n step_inputs = self._unpack(step_inputs)\n step_output_data = self._transform_operation(step_inputs)\n logger.info('Step {}, transform completed'.format(self.name))\n return step_output_data", "def fit_transform(self, data):\n self.fit(data)\n return self.transform(data)", "def fit_transform(self, data):\n return self.transform(data)", "def _preprocess(self, data):\n transformed_data = _copy(data)\n for name, step in self._transformers[:-1]:\n transformed_data = step.fit_transform(transformed_data)\n if type(transformed_data) != _tc.SFrame:\n raise RuntimeError(\"The transform function in step '%s' did not\"\n \" return an SFrame (got %s instead).\" % (name,\n type(transformed_data).__name__))\n return transformed_data", "def transform(self, data, input_content_type, output_content_type):\n return self.transform_fn(data, input_content_type, output_content_type)", "def Transform(self, record):\n pass", "def transform(self, *args, **kwargs):\n raise NotImplementedError", "def transform(self, data: pd.DataFrame):\n raise NotImplementedError", "def compute(self):\n if self.loaded():\n debug(\"Skipping {} computation due to transform data already loaded.\" .format(self.name))\n return\n\n # sanity checks\n error(\"Got transform dimension of {} but input dimension is {}.\".format(self.dimension, self.input_dimension), self.input_dimension < self.dimension)\n\n # output containers\n self.term_components = []\n\n # compute\n info(\"Applying {} {}-dimensional transform on the raw representation.\".\n format(self.base_name, self.dimension))\n\n # train\n train_data = self.input_vectors[self.train_index, :]\n\n info(\"Transforming training input data shape: {}\".format(train_data.shape))\n if self.is_supervised:\n ground_truth = np.reshape(match_labels_to_instances(self.train_epi, self.train_labels), (len(train_data), ))\n self.vectors = self.process_func_train(train_data, ground_truth)\n else:\n self.vectors = self.process_func_train(train_data)\n self.output_roles = (roles.train,)\n\n if self.test_index.size > 0:\n # make zero output matrix\n output_data = np.zeros((len(self.input_vectors), self.dimension), np.float32)\n output_data[self.train_index, :] = self.vectors\n\n test_data = self.input_vectors[self.test_index, :]\n info(\"Transforming test input data shape: {}\".format(test_data.shape))\n vecs = self.process_func_test(test_data)\n output_data[self.test_index, :] = vecs\n self.vectors = output_data\n\n self.output_roles = (roles.train, roles.test)\n else:\n info(f\"Skipping empty test indexes.\")\n\n self.term_components = self.get_term_representations()\n self.verify_transformed(self.vectors)\n info(f\"Output shape: {self.vectors.shape}\")\n # write the output data\n write_pickled(self.serialization_path_preprocessed, self.get_all_preprocessed())\n # write the trained transformer model\n self.save_model()", "def __call__(self, data, keep):\n return self.transform(data, keep)", "def _transform(self, document):\n pass", "def transform(self, data, **kwargs):\n if self._pipe:\n X, _, _ = self.optimum_pipe.transform(data, y=None, **kwargs)\n return X", "def apply(self):\n\n sc = SparkContext(appName=\"Model Applier\")\n sqlContext = SQLContext(sc)\n\n # Add model and supporting files to SparkContext\n for item in self.model_location_dict.items():\n ModelApplier.add_files_to_context(item[1], sc)\n\n partition_processor = self.get_partition_processor()\n infile = sc.textFile(self.input_location)\n header_line = infile.first()\n infile = infile.filter(lambda x: x != header_line)\n\n result = infile.mapPartitions(partition_processor).flatMap(lambda x: x)\n print('result.class', result.__class__)\n\n result = result.map(lambda (x, a, y, segment, model_version):\n (int(x), float(a), float(y), segment, model_version))\n sqlContext.createDataFrame(result).saveAsParquetFile(self.output_location)", "def fit_transform(self, *args, **kwargs):\n self.fit(*args, **kwargs)\n return self.transform(*args, **kwargs)", "def doTransform(self, context, manifest, package_type, xformdata):\n xformdata['filedata'] = {}\n self.getFileData(context, manifest, xformdata['filedata'])\n self.getNavigationData(context, manifest, xformdata['filedata'])\n xformdata['manifest'] = context.performTransform(manifest, package_type)", "def transform(self, X):\n raise NotImplementedError()", "def _transform(self, X, X2=None):\n raise NotImplementedError", "def _transform(self, X, X2=None):\n raise NotImplementedError", "def transform(self, inputs: list, stage: str) -> datapack.DataPack:", "def handle(self, data, context):\n \n model_input = self.preprocess(data)\n model_out = self.inference(model_input)\n return self.postprocess(model_out)", "def _preprocess(self, data):\n\n # pipeline: first call the previous statistics:\n if self.previous_statistics is not None:\n data = self.previous_statistics.statistics(data)\n # the first of the statistics need to take list as input, in order to match the API. Then actually the\n # transformations work on np.arrays. In fact the first statistic transforms the list to array. Therefore, the\n # following code needs to be called only if the self statistic is the first, i.e. it does not have a\n # previous_statistic element.\n else:\n data = self._check_and_transform_input(data)\n\n return data", "def fit_transform(self, data):\n if not self._transformers:\n return self._preprocess(data)\n\n transformed_data = self._preprocess(data)\n final_step = self._transformers[-1]\n return final_step[1].fit_transform(transformed_data)", "def fit_transform(self, data):\n if data:\n assert isinstance(data, dict), 'Step {}, \"data\" argument in the \"fit_transform()\" method must be dict, ' \\\n 'got {} instead.'.format(self.name, type(data))\n logger.info('Step {}, working in \"{}\" mode'.format(self.name, self._mode))\n\n if self._mode == 'inference':\n ValueError('Step {}, you are in \"{}\" mode, where you cannot run \"fit\".'\n 'Please change mode to \"train\" to enable fitting.'\n 'Use: \"step.set_mode_train()\" then \"step.fit_transform()\"'.format(self.name, self._mode))\n\n if self.output_is_cached and not self.force_fitting:\n logger.info('Step {} using cached output'.format(self.name))\n step_output_data = self.output\n elif self.output_is_persisted and self.load_persisted_output and not self.force_fitting:\n logger.info('Step {} loading persisted output from {}'.format(self.name,\n self.experiment_directory_output_step))\n step_output_data = self._load_output(self.experiment_directory_output_step)\n else:\n step_inputs = {}\n if self.input_data is not None:\n for input_data_part in self.input_data:\n step_inputs[input_data_part] = data[input_data_part]\n\n for input_step in self.input_steps:\n step_inputs[input_step.name] = input_step.fit_transform(data)\n\n if self.adapter:\n step_inputs = self._adapt(step_inputs)\n else:\n step_inputs = self._unpack(step_inputs)\n step_output_data = self._fit_transform_operation(step_inputs)\n logger.info('Step {}, fit and transform completed'.format(self.name))\n return step_output_data", "def apply(self, transform_func):\n #input_shapes = transform_func.input_shapes\n #input_types = transform_func.input_types\n #data_shapes = transform_func.data_shapes\n #data_types = transform_func.data_types\n #assert input_shapes == self._data_shapes\n #assert input_types = self._data_types\n ret_gen = transform_func(self.generator)\n ret = type(self).from_generator_func(ret_gen)\n if self.name is not None:\n ret.name = self.name\n #ret.data_shapes = data_shapes\n #ret.data_types = data_types\n return ret", "def _fit_transform_data_container(self, data_container: DataContainer, context: ExecutionContext) -> ('BaseStep', DataContainer):\n new_self, (new_data_inputs, new_expected_outputs) = \\\n self.fit_transform((data_container.data_inputs, data_container.expected_outputs), None)\n\n data_container.set_data_inputs(new_data_inputs)\n data_container.set_expected_outputs(new_expected_outputs)\n\n return new_self, data_container", "def _process_group(self):\n if not isinstance(self.transform, GroupTransformModel):\n return\n\n self._process_name()\n\n if self.transformed_item['type'] == 'Campaign':\n self._process_metadata_datetime('firstSeen', self.transform.first_seen)\n\n if self.transformed_item['type'] == 'Document':\n self._process_metadata('fileName', self.transform.file_name)\n self._process_metadata('malware', self.transform.malware)\n self._process_metadata('password', self.transform.password)\n\n if self.transformed_item['type'] == 'Email':\n self._process_metadata('body', self.transform.body)\n self._process_metadata('from', self.transform.from_addr)\n self._process_metadata('header', self.transform.header)\n self._process_metadata('subject', self.transform.subject)\n self._process_metadata('to', self.transform.to_addr)\n\n if self.transformed_item['type'] in ('Event', 'Incident'):\n self._process_metadata_datetime('eventDate', self.transform.event_date)\n self._process_metadata('status', self.transform.status)\n\n if self.transformed_item['type'] == 'Report':\n self._process_metadata('fileName', self.transform.file_name)\n self._process_metadata_datetime('publishDate', self.transform.publish_date)\n\n # Handle sig specific fields here\n if self.transformed_item['type'] == 'Signature':\n self._process_metadata('fileName', self.transform.file_name)\n self._process_metadata('fileType', self.transform.file_type)\n self._process_metadata('fileText', self.transform.file_text)", "def _apply_transform(self, w2w_transform):\n raise NotImplementedError", "def postprocess(self, data, pagination):\n self.inject_data_hook(data)\n # Serialize ``data`` to python data structures\n python_data = self.serialize_to_python(data)\n # finalize any pending data processing\n self.finalize_pending(data)\n # Package the python_data to a dictionary\n return self.package(python_data, pagination)", "def transform(self, data):\n transformed_data = _copy(data)\n for name, step in self._transformers:\n transformed_data = step.transform(transformed_data)\n if type(transformed_data) != _tc.SFrame:\n raise TypeError(\"The transform function in step '%s' did not return\"\n \" an SFrame.\" % name)\n return transformed_data", "def preProcess(self, datum):\n pass", "def _transform_data_container(self, data_container: DataContainer, context: ExecutionContext) -> DataContainer:\n di_eo = (data_container.data_inputs, data_container.expected_outputs)\n new_data_inputs, new_expected_outputs = self.transform(di_eo)\n\n data_container.set_data_inputs(new_data_inputs)\n data_container.set_expected_outputs(new_expected_outputs)\n\n return data_container", "def transformation(self):\n for key in self.combination_dict.keys():\n if self.combination_dict[key]['column_count'] == 2:\n if self.data_dict[self.combination_dict[key]['column1']]['data_type'] == 'tem' or self.data_dict[self.combination_dict[key]['column2']]['data_type'] == 'tem':\n self.temporal_transformation(self.combination_dict[key])\n elif self.data_dict[self.combination_dict[key]['column1']]['data_type'] == 'cat' or self.data_dict[self.combination_dict[key]['column2']]['data_type'] == 'cat':\n self.categorical_transformation(self.combination_dict[key])\n elif self.data_dict[self.combination_dict[key]['column1']]['data_type'] == 'num' and self.data_dict[self.combination_dict[key]['column2']]['data_type'] == 'num':\n self.numerical_transformation(self.combination_dict[key])\n\n elif self.combination_dict[key]['column_count'] == 3:\n num_count = 0\n num_column = []\n if self.data_dict[self.combination_dict[key]['column1']]['data_type'] == 'num':\n num_count += 1\n num_column.append(0)\n elif self.data_dict[self.combination_dict[key]['column2']]['data_type'] == 'num':\n num_count += 1\n num_column.append(1)\n elif self.data_dict[self.combination_dict[key]['column3']]['data_type'] == 'num':\n num_count += 1\n num_column.append(2)\n\n if num_count == 1:\n self.three_column_groupby_logic(self.combination_dict[key], num_column)\n\n m_score_pie = []\n m_score_bar = []\n m_score_line = []\n m_score_scatter = []\n # for key in self.scenario_dict:\n # if self.scenario_dict\n for key in self.scenario_dict:\n if math.isnan(self.scenario_dict[key][\"Scatter_chart_score\"]):\n m_score_scatter.append(0)\n else:\n m_score_scatter.append(self.scenario_dict[key][\"Scatter_chart_score\"])\n m_score_pie.append(self.scenario_dict[key][\"Pie_chart_score\"])\n m_score_bar.append(self.scenario_dict[key][\"Bar_chart_score\"])\n m_score_line.append(self.scenario_dict[key][\"Line_chart_score\"])\n\n m_score_pie /= np.max(m_score_pie)\n m_score_bar /= np.max(m_score_bar)\n m_score_line /= np.max(m_score_line)\n m_score_scatter /= np.max(m_score_scatter)\n m_score = [m_score_pie, m_score_bar, m_score_line, m_score_scatter]\n match_index = np.argmax(m_score, axis = 0)\n i = 0\n for key in self.scenario_dict:\n if match_index[i] == 0:\n self.scenario_dict[key][\"Chart_Type\"] = \"pie\"\n if match_index[i] == 1:\n self.scenario_dict[key][\"Chart_Type\"] = \"bar\"\n if match_index[i] == 2:\n self.scenario_dict[key][\"Chart_Type\"] = \"line\"\n if match_index[i] == 3:\n self.scenario_dict[key][\"Chart_Type\"] = \"scatter\"\n self.scenario_dict[key][\"m_score\"] = m_score[match_index[i]][i]\n i += 1\n\n return self.scenario_dict", "def _TransformInputs(self, _):\n raise NotImplementedError()", "def _default_transform_fn(self, model, input_data, content_type, accept, context=None):\n # pylint: disable=unused-argument\n data = self._run_handler_function(self._input_fn, *(input_data, content_type))\n prediction = self._run_handler_function(self._predict_fn, *(data, model))\n result = self._run_handler_function(self._output_fn, *(prediction, accept))\n return result", "def _fit_transform_data_container(self, data_container: DataContainer, context: ExecutionContext) -> (BaseStep, DataContainer):\n self.wrapped, new_expected_outputs_data_container = self.wrapped.handle_fit_transform(\n DataContainer(data_inputs=data_container.expected_outputs, current_ids=data_container.current_ids,\n expected_outputs=None),\n context\n )\n data_container.set_expected_outputs(new_expected_outputs_data_container.data_inputs)\n\n return self, data_container", "def transform_model(self, data_inputs):\n inference_output_name = self._get_inference_output_name()\n\n feed_dict = {\n self['data_inputs']: data_inputs\n }\n\n results = self.session.run([self[inference_output_name], self['loss']], feed_dict=feed_dict)\n self.add_new_loss(results[1], test_only=True)\n\n return results[0]", "def transform(self, X):\n\n t0 = time.perf_counter()\n check_is_fitted(self)\n self.check_external_components_modified()#[WARN] in d3m, primitives can \"restore\" private class variables...\n X = self._validate_data(X, accept_sparse=[\"csr\", \"csc\"], reset=False)\n t1 = time.perf_counter()\n\n if X.shape[1] != self.components_af_.shape[1]:\n raise ValueError(\n \"Impossible to perform projection:\"\n \"X at fit stage had a different number of features. \"\n \"(%s != %s)\" % (X.shape[1], self.components_af_.shape[1])\n )\n\n #X_new = safe_sparse_dot(X, self.components_.T, dense_output=self.dense_output)\n #import pdb; pdb.set_trace()\n X_af = af.interop.from_ndarray(X).as_type(self.components_af_.dtype())\n X_new = af.matmulNT(X_af, self.components_af_)\n X_new = X_new.to_ndarray()\n t2 = time.perf_counter()\n return X_new", "def transform(self, original_input):\n raise NotImplementedError()", "def transform(self, data_inputs):\n steps_left_to_do, data_inputs = self.read_checkpoint(data_inputs)\n for step_name, step in steps_left_to_do:\n data_inputs = step.transform(data_inputs)\n\n return data_inputs", "def fit_transform(self, data: pd.DataFrame):\n return self.fit(data).transform(data)", "def preprocess(self):", "def transform(train_data, test_data, working_dir):\n\n options = PipelineOptions()\n options.view_as(StandardOptions).runner = 'DirectRunner'\n with beam.Pipeline(options=options) as pipeline:\n _ = (pipeline | 'ReadTrainData' >> beam.Create(train_data) |\n 'EncodeTrainData' >> beam.Map(lambda data: to_example(data)) |\n 'WriteTrainData' >> beam.io.WriteToTFRecord(\n os.path.join(working_dir, TRANSFORMED_TRAIN_DATA_FILEBASE)))\n\n _ = (pipeline | 'ReadTestData' >> beam.Create(test_data) |\n 'EncodeTestData' >> beam.Map(lambda data: to_example(data)) |\n 'WriteTestData' >> beam.io.WriteToTFRecord(\n os.path.join(working_dir, TRANSFORMED_TEST_DATA_FILEBASE)))", "def fit_transform(self, data):\n data = self._common_preprocess(data)\n\n # drop cols with only zeroes\n data = data.loc[:, (data != 0).any(axis = 0)]\n # common preprocessed columns become new include cols for transformers\n self.include_cols.extend(data.columns)\n\n X = data.drop('ckd', axis = 1)\n y = data['ckd']\n\n return X, y", "def fit_transform ( self, X ):\n self.fit ( X )\n return self.transform ( X )\n # End fit_transform()", "def run(self, ct_data, state):\n transformed_input = copy.deepcopy(ct_data)\n output = self.model.run(ct_data, state)\n result = {\"output\": output, \"transformed input\": transformed_input}\n return result", "def buildModel( self, transformer, classifier ):\n summitAIModel = make_pipeline ( transformer , classifier )\n summitAIModel.fit ( self.X , self.y )\n joblib.dump ( summitAIModel , self.modeldump )", "def transform(self):\n\n # Gets a serialized dict representation of the model, containing all its\n # layers, their connections and configuration. This is the main structure\n # which is used to understand model structure, and also manipulate it.\n #\n # config = {\n # 'input_layers': [ ... ],\n # 'layers': [{\n # 'inbound_nodes': [INPUT CONFIG OF LAYER],\n # 'name': 'LAYER_NAME',\n # 'config': { LAYER_CONFIG }\n # }, {\n # ...\n # }],\n # 'output_layers': [ ... ],\n # 'name': 'MODEL_NAME',\n #\n\n # Ensures old Keras serialization format\n self.model.use_legacy_config = True\n self._config = self.model.get_config()\n\n # Stores map of Transform -> List of layer names matched by transform.\n # Same transform should not match+replace the same layer more than once\n # to prevent infinite loops.\n self._transform_matched_layers_map = {}\n self._layer_weights_map = {}\n self._layer_names_and_weights_map = {}\n\n for layer in self.model.layers:\n self._layer_weights_map[layer.name] = self._get_keras_layer_weights(layer)\n self._layer_names_and_weights_map[\n layer.name] = self._get_keras_layer_names_and_weights(layer)\n\n # Maintains a current mutable copy of the metadata through transformation.\n self._layer_metadata_map = copy.deepcopy(self.layer_metadata)\n\n # We run an infinite loop and keep applying transformations as long as\n # patterns are found. This allows recursive pattern matching where a\n # modification by one transform may lead to another match.\n #\n # TODO(pulkitb): This leads to infinite loops with poor patterns which may\n # match their replacement. Add counters with limits to fix it.\n while True:\n match_found = False\n for transform in self.transforms:\n # A transform may find multiple instances of a pattern in the model.\n # Keep finding and replacing till done.\n while True:\n match_layer_node = self._find_pattern(\n transform.pattern(), self._get_matched_layers(transform))\n\n # Pattern did not match any layer. Move to next transform.\n if not match_layer_node:\n break\n\n self._store_successful_match(transform, match_layer_node)\n\n # Copying the match_layer_node ensures the replacement code can\n # freely modify the match.\n replacement_layer_node = transform.replacement(\n copy.deepcopy(match_layer_node))\n\n # If equal, the matched layers are being replaced with exactly the\n # same set of layers that were matched with the same config.\n # For Transforms, that may inadvertently do this we can end up in\n # an infinite loop. Move on if no meaningful change has been made.\n if match_layer_node == replacement_layer_node:\n continue\n\n match_found = True\n self._replace(match_layer_node, replacement_layer_node)\n\n # None of the transforms found a pattern. We can stop now.\n if not match_found:\n break\n\n custom_objects = {}\n for transform in self.transforms:\n custom_objects.update(transform.custom_objects())\n\n # Reconstruct model from the config, using the cloned layers.\n if self._is_functional_model(self.model):\n transformed_model = keras.Model.from_config(self._config, custom_objects)\n else:\n transformed_model = keras.Sequential.from_config(self._config,\n custom_objects)\n\n for layer in transformed_model.layers:\n weights = self._layer_weights_map.get(layer.name)\n if weights:\n self._set_layer_weights(layer, weights)\n else:\n names_and_weights = self._layer_names_and_weights_map.get(layer.name)\n if names_and_weights:\n self._set_layer_names_and_weights(layer, names_and_weights)\n\n # Ensures old Keras serialization format\n transformed_model.use_legacy_config = True\n return transformed_model, copy.deepcopy(self._layer_metadata_map)", "def transform(self, X):\n return self.transformer.transform(X)", "def _transform(self, X, y=None):\n return clone(self.transformer).fit_transform(X=X, y=y)", "def _fit(self, X, _transform=False):\n\n n_cols = X.shape[1]\n\n data = DistributedDataHandler.create(data=X, client=self.client)\n self.datatype = data.datatype\n\n if \"svd_solver\" in self.kwargs and self.kwargs[\"svd_solver\"] == \"tsqr\":\n comms = Comms(comms_p2p=True)\n else:\n comms = Comms(comms_p2p=False)\n\n comms.init(workers=data.workers)\n\n data.calculate_parts_to_sizes(comms)\n\n worker_info = comms.worker_info(comms.worker_addresses)\n parts_to_sizes, _ = parts_to_ranks(\n self.client, worker_info, data.gpu_futures\n )\n\n total_rows = data.total_rows\n\n models = dict(\n [\n (\n data.worker_info[wf[0]][\"rank\"],\n self.client.submit(\n self._create_model,\n comms.sessionId,\n self._model_func,\n self.datatype,\n **self.kwargs,\n pure=False,\n workers=[wf[0]],\n ),\n )\n for idx, wf in enumerate(data.worker_to_parts.items())\n ]\n )\n\n pca_fit = dict(\n [\n (\n wf[0],\n self.client.submit(\n DecompositionSyncFitMixin._func_fit,\n models[data.worker_info[wf[0]][\"rank\"]],\n wf[1],\n total_rows,\n n_cols,\n parts_to_sizes,\n data.worker_info[wf[0]][\"rank\"],\n _transform,\n pure=False,\n workers=[wf[0]],\n ),\n )\n for idx, wf in enumerate(data.worker_to_parts.items())\n ]\n )\n\n wait(list(pca_fit.values()))\n raise_exception_from_futures(list(pca_fit.values()))\n\n comms.destroy()\n\n self._set_internal_model(list(models.values())[0])\n\n if _transform:\n out_futures = flatten_grouped_results(\n self.client, data.gpu_futures, pca_fit\n )\n return to_output(out_futures, self.datatype)\n\n return self", "def test_transform(self):\n X = self.generate_X()\n task = mmRDTR()\n task.fit(X)\n res = task.transform(X)\n # check if Instance\n self.assertIsInstance(res,Container)\n # check if names\n self.assertEqual(np.all(res.colnames()==[str(i) for i in xrange(len(res.colnames()))]),True)\n # check if values as within the range expected\n self.assertEqual(np.all(res().min()>=-1),True)\n self.assertEqual(np.all(res().max()<=1),True)\n for i in range(len(res.colnames())):\n self.assertEqual(round(res()[:,i].mean(),8),0)\n # check with new data\n Y = self.generate_X()\n res = task.transform(Y)\n self.assertEqual(np.all(res.colnames()==[str(i) for i in xrange(len(res.colnames()))]),True)\n self.assertEqual(np.all(res().min()>=-1),True)\n self.assertEqual(np.all(res().max()<=1),True)", "def transform(self, data: pd.DataFrame) -> pd.DataFrame:\n return pd.DataFrame(self.pipeline.transform(data))", "def post_process(cls, data):\n return data", "def fit_transform(self, X, y=...):\n ...", "def preprocess(data, to_drop=[]):\n \n columns = data.columns.to_list()\n \n # split data to numeric vs categorical\n numeric_features = data.select_dtypes(include=[\n 'int64', 'float64']).columns\n \n if len(to_drop) > 0:\n categorical_features = data.select_dtypes(include=[\n 'object']).drop(to_drop, axis=1).columns\n print(categorical_features)\n else: \n categorical_features = data.select_dtypes(include=[\n 'object']).columns\n \n categorical_transformer = Pipeline(steps=[\n ('imputer', SimpleImputer(strategy='most_frequent', fill_value='missing'))])\n \n numerical_transformer = Pipeline(steps=[\n ('imputer', SimpleImputer(strategy='median')),\n ('scaler', RobustScaler())\n ])\n # missing_values = np.nan\n \n# Bundle preprocessing for numerical and categorical data\n preprocessor = ColumnTransformer(\n transformers=[\n ('num', numerical_transformer, numeric_features),\n ('cat', categorical_transformer, categorical_features)\n ])\n\n my_pipeline = Pipeline(steps=[('preprocessor', preprocessor) ])\n \n for col in to_drop:\n columns.remove(col)\n print('Hello')\n \n trans_data = my_pipeline.fit_transform(data)\n return trans_data#pd.DataFrame(#, columns=columns) ", "def preprocess(self):\n pass", "def preprocess(self):\n pass", "def preprocess(self):\n pass", "def transform(self, X, y=None):\n if self.config_filepath is None:\n raise ValueError(\"Config filepath does not exist.\"\n \" Please define a config file first.\")\n\n if self.load_woe_pickle_filepath is None:\n raise ValueError(\"Load WOE pickle filepath does not exist.\"\n \" Either fit model or load model first.\")\n\n data = X.copy()\n data['target'] = -1\n\n # with open(os.devnull, 'w') as devnull:\n # with contextlib.redirect_stdout(devnull):\n df_transformed = fp.process_woe_trans(\n data,\n self.load_woe_pickle_filepath,\n self.config_filepath)\n\n X_processed = df_transformed.drop('target', axis=1)\n\n return X_processed", "def transform_data():\n with open(f'{PATH_TO_STATE}/current_state.json', 'r') as f:\n cur_state = json.load(f)\n if cur_state['current_state'] != 0:\n print(\"You have to extract before transforming the data!\")\n return\n books = cur_state['books']\n BookDetailsWebScrapper().transform_book_details(books)\n _save_transform_state(books)\n print(\"Data successfully transformed, it is ready to be loaded to data base!\")", "def prepare_data(self):", "def transform(config, data, transfo, *args, **kwargs):\n \n# stderr.write(str((config, data, transfo) + args) + \"\\n\")\n pipe = ktpipes.KtPipe.from_json(config[transfo])\n\n return pipe.fit_transform(get_raw(data))", "def transform(self, X):\n return super().transform(X)", "def fit(self, data):\n if not self._transformers:\n return\n\n transformed_data = self._preprocess(data)\n final_step = self._transformers[-1]\n final_step[1].fit(transformed_data)", "def __prepare(self, data):\n #print(\"Running Prepare data\")\n #print(data)\n #print(type(data))\n if len(data) > 1:\n if type(data[0]) == np.ndarray:\n return np.concatenate(data)\n else:\n return torch.cat(data).cpu().numpy()\n else:\n return data[0].cpu().numpy()", "def _transform(self, X, y=None):\n import filterpy.kalman.kalman_filter as filterpy_kf\n\n time_steps = X.shape[0]\n measurement_dim = X.shape[1]\n\n if y is None:\n if self.control_transition is None:\n y_dim = 1\n else:\n y_dim = np.atleast_2d(self.control_transition).shape[-1]\n warn(\n \"Class parameter `control_transition` was initiated with user data \"\n \"but received no data through `transform` argument, `y`. \"\n \"Therefore, omitting `control_transition` \"\n \"when calculating the result. \"\n )\n y = np.zeros(y_dim)\n\n shapes = self._get_shapes(\n state_dim=self.state_dim, measurement_dim=measurement_dim, u_dim=y.shape[-1]\n )\n G = _init_matrix(\n matrices=self.control_transition,\n transform_func=np.atleast_2d,\n default_val=np.eye(*shapes[\"G\"]),\n )\n _validate_param_shape(\n param_name=\"control_transition\",\n matrix_shape=shapes[\"G\"],\n actual_shape=G.shape,\n time_steps=time_steps,\n )\n\n x_priori = np.zeros((time_steps, *shapes[\"X0\"]))\n p_priori = np.zeros((time_steps, *shapes[\"P0\"]))\n\n x_posteriori = np.zeros((time_steps, *shapes[\"X0\"]))\n p_posteriori = np.zeros((time_steps, *shapes[\"P0\"]))\n\n x = self.X0_\n p = self.P0_\n\n # kalman filter iterations\n for t in range(time_steps):\n (zt, Ft, Gt, Qt, Rt, Ht, ut) = self._get_iter_t_matrices(\n X=X, G=G, u=y, t=t, time_steps=time_steps, shapes=shapes\n )\n\n x, p = filterpy_kf.predict(x, p, u=ut, B=Gt, F=Ft, Q=Qt)\n x_priori[t, :] = x\n p_priori[t, :, :] = p\n\n x, p = filterpy_kf.update(x, p, zt, R=Rt, H=Ht)\n x_posteriori[t, :] = x\n p_posteriori[t, :, :] = p\n\n if self.denoising:\n Fs = [self.F_] * time_steps if self.F_.ndim == 2 else self.F_\n Qs = [self.Q_] * time_steps if self.Q_.ndim == 2 else self.Q_\n return filterpy_kf.rts_smoother(\n Xs=x_posteriori, Ps=p_posteriori, Fs=Fs, Qs=Qs\n )[0]\n return x_posteriori", "def forward_transform(self):\n\n if self._pipeline:\n #return functools.reduce(lambda x, y: x | y, [step[1] for step in self._pipeline[: -1]])\n return functools.reduce(lambda x, y: x | y, [step.transform for step in self._pipeline[:-1]])\n else:\n return None", "def transform(self, dataset: NumpyOrPandas) -> NumpyDataset:\n # checks here\n super().transform(dataset)\n # convert to accepted dtype and get attributes\n dataset = dataset.to_pandas()\n df = dataset.data\n\n # transform\n roles = NumericRole()\n outputs = []\n\n for n, conlumn_name in enumerate(df.columns):\n if self.cache_dir is not None:\n full_hash = get_textarr_hash(df[conlumn_name]) + get_textarr_hash(self.dicts[conlumn_name][\"feats\"])\n fname = os.path.join(self.cache_dir, full_hash + \".pkl\")\n\n if os.path.exists(fname):\n logger.info3(f\"Load saved dataset for {conlumn_name}\")\n\n with open(fname, \"rb\") as f:\n new_arr = pickle.load(f)\n\n else:\n new_arr = self.dicts[conlumn_name][\"transformer\"].transform(df[conlumn_name])\n with open(fname, \"wb\") as f:\n pickle.dump(new_arr, f)\n else:\n new_arr = self.dicts[conlumn_name][\"transformer\"].transform(df[conlumn_name])\n\n output = dataset.empty().to_numpy()\n output.set_data(new_arr, self.dicts[conlumn_name][\"feats\"], roles)\n outputs.append(output)\n logger.info3(f\"Feature {conlumn_name} transformed\")\n # create resulted\n return dataset.empty().to_numpy().concat(outputs)", "def transform(self, X):\n return self._apply_method(X, \"transform\")", "def compose(self, data):\n return super().compose(data=data)", "def transform(self, x):", "def _process(self):\n export_collect_data(self.kwargs[\"collect\"])", "def fit_transform(self, data_inputs, expected_outputs=None) -> ('Pipeline', Any):\n new_self, data_inputs = self.fit_transform_steps(data_inputs, expected_outputs)\n processed_outputs = data_inputs\n\n return new_self, processed_outputs" ]
[ "0.71496886", "0.7044139", "0.7000777", "0.6858309", "0.6785153", "0.6729821", "0.67230743", "0.65904725", "0.65476114", "0.6538246", "0.6538246", "0.6501432", "0.6452522", "0.6446105", "0.6395718", "0.639384", "0.639384", "0.639384", "0.639384", "0.639384", "0.639384", "0.639384", "0.62190247", "0.61821073", "0.61821073", "0.61821073", "0.61689806", "0.61610943", "0.61296654", "0.6113278", "0.60751164", "0.60456955", "0.603364", "0.6024538", "0.6013651", "0.59841776", "0.5967799", "0.5961076", "0.5928745", "0.5924551", "0.59167176", "0.590565", "0.59039927", "0.590364", "0.590364", "0.58768946", "0.5851583", "0.5838822", "0.58379555", "0.58375746", "0.5834594", "0.5828886", "0.58161783", "0.5755956", "0.5747554", "0.5746789", "0.57467544", "0.57460123", "0.5737973", "0.5726968", "0.57212293", "0.57135886", "0.5713347", "0.56961834", "0.5695781", "0.5693771", "0.5693402", "0.56888765", "0.56661415", "0.56655407", "0.5662554", "0.5643448", "0.56306124", "0.56276566", "0.5613127", "0.56044436", "0.55953693", "0.5593746", "0.5592029", "0.5589596", "0.5586641", "0.5581872", "0.5580392", "0.5580392", "0.5580392", "0.5572735", "0.5570237", "0.5564867", "0.5549795", "0.5542886", "0.5526111", "0.55230343", "0.5522017", "0.55191714", "0.5518174", "0.55142504", "0.55036426", "0.5488005", "0.5480377", "0.5480134" ]
0.56914866
67
Build dataframe df_customers from transformed data. Transformed data are issued from NLP, Time and RFM features. See data_transform(). These data are stored as dataframes attributes.
def df_customers_features_build(self): df_customers_rfm = self._df_customers_rfm.copy() df_customers_timeFeature = self._df_customers_timeFeature.copy() df_customers_nlp = self._df_customers_pca_nlp.copy() #------------------------------------------------------------------------- # Dataframe are aggregated; note that indexes are customerID. #------------------------------------------------------------------------- df_customers = pd.DataFrame() df_customers = pd.concat([df_customers,df_customers_rfm], axis=1) df_customers = pd.concat([df_customers,df_customers_timeFeature]\ , join='inner', axis=1) df_customers = pd.concat([df_customers,df_customers_nlp]\ , join='inner', axis=1) self.strprint("All features : "+str(df_customers.shape)) self._df_customers = df_customers.copy() return
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def features_customers(df_customers):\n for i in PREMIER_VALS:\n k = 'premier_' + str(i)\n df_customers[k] = np.where(df_customers['premier'] == i, 1, 0)\n\n df_customers['age'] = datetime.now().date().year - df_customers['yearOfBirth']\n df_customers['male'] = np.where(df_customers['gender'] == 'M', 1, 0)\n df_customers['female'] = np.where(df_customers['gender'] == 'F', 1, 0)\n df_customers['days_in_asos'] = (datetime.now().date() - df_customers['dateCreated']).dt.days\n\n logger.info(\"Features from the customers table: {shape} {dtypes}\"\n .format(shape=df_customers.shape, dtypes=df_customers.dtypes))\n return df_customers", "def df_customers_fileRead(self):\n \n #-------------------------------------------------------------------------\n # RFM features are restored\n #-------------------------------------------------------------------------\n df_customers_rfm \\\n = p5_util.object_load(self.df_customers_rfm_fileName)\n self.strprint(\"RFM features : \"+str(df_customers_rfm.shape))\n \n #-------------------------------------------------------------------------\n # Time features are restored\n #-------------------------------------------------------------------------\n df_customers_timeFeature \\\n = p5_util.object_load(self._df_customers_timeFeature_fileName)\n self.strprint(\"Time features : \"+str(df_customers_timeFeature.shape))\n \n #-------------------------------------------------------------------------\n # NLP features are restored\n #-------------------------------------------------------------------------\n df_customers_nlp = p5_util.object_load(self._df_customers_nlp_fileName)\n self.strprint(\"NLP features : \"+str(df_customers_nlp.shape))\n\n if False:\n df_customers_rfm = self._df_customers_rfm.copy()\n df_customers_timeFeature = self._df_customers_timeFeature.copy()\n df_customers_nlp = self._df_customers_pca_nlp.copy()\n\n #-------------------------------------------------------------------------\n # Dataframe are aggregated; note that indexes are customerID.\n #-------------------------------------------------------------------------\n df_customers = pd.DataFrame()\n\n df_customers = pd.concat([df_customers,df_customers_rfm], axis=1)\n\n df_customers = pd.concat([df_customers,df_customers_timeFeature]\\\n , join='inner', axis=1)\n\n df_customers = pd.concat([df_customers,df_customers_nlp]\\\n , join='inner', axis=1)\n\n self.strprint(\"All features : \"+str(df_customers.shape))\n\n #----------------------------------------------------------------------\n # Dataframe is dumped into a file\n #----------------------------------------------------------------------\n p5_util.object_dump(df_customers, self._df_customers_fileName)\n if False:\n #----------------------------------------------------------------------\n # Dataframe is copied as an attribute\n #----------------------------------------------------------------------\n self._df_customers = df_customers.copy()\n \n return", "def create_features_customer(profile, transcript_training):\n\n # create avg/min/max amount features. Need to calculate amount features from transcript\n # because transcript_training only contains transactions for offer received and viewed.\n # such transactions do not have amount associated\n\n query = \"\"\"\n SELECT a.person, min(amount) as min_amount, max(amount) as max_amount, avg(amount) as avg_amount\n FROM transcript a\n JOIN transcript_quantile b\n ON a.person = b.person \n WHERE a.time <= b.received_time\n GROUP BY a.person\n \"\"\"\n\n profile_amount = u.read_dataframe_from_sql(query).set_index('person')\n\n # create avg/min/max amount duration_view\n profile_duration = create_features_using_groupby(transcript_training\\\n \t, 'profile', 'duration_view')\n\n # create view rate (average of label)\n profile_view_rate = create_features_using_groupby(transcript_training, 'profile', 'label'\\\n \t, minimum=False, maximum=False)\n profile_view_rate.columns=['view_rate_profile']\n\n # create trx rate (count of transactions per person/(max received time - min received time))\n profile_trx_rate = (transcript_training.groupby('person').size()*100\\\n /(transcript_training.groupby('person')['received_time'].max() \\\n - transcript_training.groupby('person')['received_time'].min())).reset_index()\n profile_trx_rate.columns = ['person', 'avg_trx_cnt']\n # set trx rate = 1 if max received time == min received time\n profile_trx_rate.loc[profile_trx_rate['avg_trx_cnt']==np.inf, 'avg_trx_cnt'] = 1\n profile_trx_rate = profile_trx_rate.set_index('person')\n\n profile_feat = profile_amount.join(profile_duration)\\\n .join(profile_view_rate).join(profile_trx_rate)\n\n assert pd.merge(profile, profile_feat, how='left', left_index=True, right_index=True).shape[0] == profile.shape[0]\\\n , \"rows do not match with original data (profile)\"\n\n profile = pd.merge(profile, profile_feat, how='left', left_index=True, right_index=True)\n\n return profile", "def data_transform(self, df) :\n\n #-------------------------------------------------------------------------\n # Copy of given dataframe to be transformed\n #-------------------------------------------------------------------------\n self.df_invoice_line = df\n \n #-------------------------------------------------------------------------\n # Features issued from InvoiceDate are created\n #-------------------------------------------------------------------------\n if self.is_transform_timeFeature is True:\n self.strprint(\"\\n*** Time features transformation ***\")\n self.data_transform_timeFeature()\n\n #-------------------------------------------------------------------------\n # RFM is computed and encoded\n #-------------------------------------------------------------------------\n if self.is_transform_rfm is True:\n self.strprint(\"\\n*** RFM transformation ***\")\n self.data_transform_rfm()\n\n #-------------------------------------------------------------------------\n # NLP features issued from Description are created\n #-------------------------------------------------------------------------\n if self.is_transform_nlp is True:\n self.strprint(\"\\n*** NLP transformation ***\")\n self.data_transform_nlp()\n \n return self.df_invoice_line", "def data_transform_timeFeature(self):\n #-------------------------------------------------------------------------\n # All new features are built into separate dataframes \n # and each of them are dumped into a separate file.\n #-------------------------------------------------------------------------\n self.strprint(\"self.df_invoice_line : \"+str(self.df_invoice_line.shape))\n \n self._dict_timeFeature_encoder, df_customers_timeFeature \\\n = p5_util.time_list_feature_build(self.df_invoice_line\\\n , self._list_new_feature, dict_encoder = self._dict_timeFeature_encoder\\\n ,is_verbose=self.is_verbose)\n \n #-------------------------------------------------------------------------\n # New time features are aggregated into a single dataframe.\n # Values are scaled.\n #-------------------------------------------------------------------------\n df_customers_timeFeature, self._std_scaler_timeFeature \\\n = p5_util.time_list_feature_restore(self._list_new_feature \\\n , std_scale = self._std_scaler_timeFeature\\\n , df_timeFeature = df_customers_timeFeature, is_verbose = self.is_verbose)\n\n self.strprint(\"df_customers_timeFeature : \"+str(df_customers_timeFeature.shape))\n \n #-------------------------------------------------------------------------\n # Dimension reduction thanks to PCA\n #-------------------------------------------------------------------------\n n_dim=30\n root_name = 'time_pca_'\n # Column CustomerID is used into df_pca_reduce\n df_customers_timeFeature['CustomerID'] = df_customers_timeFeature.index\n \n df_customers_timeFeature, pca_timeFeature \\\n = p5_util.df_pca_reduce(df_customers_timeFeature, n_dim, root_name\\\n , p_is_scale=False, pca = self._pca_timeFeature)\n\n self.strprint(df_customers_timeFeature.shape)\n \n if self._pca_timeFeature is None:\n #----------------------------------------------------------------------\n # Data-model is in built process with part of data-set.\n #----------------------------------------------------------------------\n self._pca_timeFeature = pca_timeFeature\n p5_util.object_dump(df_customers_timeFeature\\\n , self._df_customers_timeFeature_fileName)\n else:\n #----------------------------------------------------------------------\n # Data-model is already built and this method is called \n # for a customer classification.\n #----------------------------------------------------------------------\n self._df_customers_timeFeature = df_customers_timeFeature.copy()\n return", "def get_features_and_target(self, trades_features: pd.DataFrame, trades_target: pd.DataFrame) -> pd.DataFrame:\n \n sf_groups = trades_features.drop_duplicates(subset=['sf_account_id', 'trade_date', 'sku']).groupby('sf_account_id')\n\n # calculate features\n feature_dfs = []\n if 'product_name' in self.feature_categories:\n feature_dfs += [sf_groups.product_name.value_counts().unstack().notnull()]\n if 'product_category' in self.feature_categories:\n feature_dfs += [sf_groups.product_category.value_counts().unstack().notnull()]\n if 'reporting_channel' in self.feature_categories:\n feature_dfs += [sf_groups.sub_reporting_channel.value_counts().unstack().notnull()]\n if 'recency' in self.feature_categories:\n feature_dfs += [(trades_features.trade_date_dt.max()-sf_groups.trade_date_dt.max()).dt.days.to_frame().rename(columns={'trade_date_dt':'recency'})]\n if 'frequency' in self.feature_categories:\n feature_dfs += [sf_groups.product_name.count().to_frame().rename(columns={'product_name':'frequency'})]\n if 'total_spend' in self.feature_categories:\n feature_dfs += [sf_groups.cost_float.sum().to_frame().rename(columns={'cost_float':'total_spend'})]\n\n # concat features\n customer_df = pd.concat(feature_dfs, axis=1, sort=False) # outer join on index\n\n # add target variable\n for target_variable in self.target_variables:\n if (trades_target.product_name == target_variable).any():\n customer_df['target_'+target_variable] = trades_target.groupby(['sf_account_id', 'product_name']).trade_date.any().unstack()[target_variable]\n else:\n customer_df['target_'+target_variable] = False\n\n # remove customers with no purchases before cut off\n customer_df = customer_df[customer_df[customer_df.columns[customer_df.columns != 'target']].any(axis=1)]\n\n # replace nans with False\n customer_df.fillna(False, inplace=True)\n\n return customer_df", "def _get_cus_info(self):\n label_enc = LabelEncoder()\n customer_info = self._inv.drop_duplicates(['customer_code'], keep='last')\n customer_info = customer_info[['customer_code', 'customer_name', 'sales_cen_code',\n 'sales_cen_name', 'sales_region_name', 'province',\n 'city', 'district', 'customer_type', 'is_usable', 'channel_level']]\n customer_info['customer_id'] = label_enc.fit_transform(customer_info['customer_code'])\n customer_info['sales_cen_id'] = label_enc.fit_transform(customer_info['sales_cen_code'])\n customer_info['sales_region_id'] = label_enc.fit_transform(customer_info['sales_region_name'])\n customer_info['province_id'] = label_enc.fit_transform(customer_info['province'])\n customer_info['city_id'] = label_enc.fit_transform(customer_info['city'])\n customer_info['district_id'] = label_enc.fit_transform(customer_info['district'])\n customer_info['customer_type'] = label_enc.fit_transform(customer_info['customer_type'])\n customer_info['is_usable'] = label_enc.fit_transform(customer_info['is_usable'])\n customer_info['channel_level'] = label_enc.fit_transform(customer_info['channel_level'])\n customer_info_encoded = customer_info.drop(\n columns=['customer_name', 'sales_cen_code', 'sales_cen_name',\n 'sales_region_name', 'province', 'city', 'district']\n ).set_index('customer_code')\n customer_info.set_index('customer_code', inplace=True)\n customer_info_encoded = customer_info_encoded.reindex(self._index.get_level_values(0))\n return customer_info, customer_info_encoded", "def data_process_deprecated(self, CustomerID, InvoiceDate, InvoiceNo, Description, Quantity\\\n , UnitPrice ):\n dict_invoice = {'InvoiceDate':InvoiceDate, 'Description':Description\\\n , 'Quantity':Quantity, 'UnitPrice':UnitPrice}\n dict_invoice['CustomerID'] = CustomerID\n dict_invoice['InvoiceNo'] = InvoiceNo\n df_invoice_line \\\n = pd.DataFrame(dict_invoice, columns=dict_invoice.keys(), index=[0])\n \n self.data_transform(df_invoice_line)\n\n #self.feature_rfm_encode()\n\n self.feature_scale()\n\n self.list_feature_drop()\n\n self.feature_description_nlp()\n return", "def data_transform_rfm(self) :\n \n is_built_step = False\n if self._encoder_rfm is None:\n is_built_step = True \n \n #-------------------------------------------------------------------------\n # RFM feature is built\n #-------------------------------------------------------------------------\n ser_invoice_date = self._df_invoice_line.InvoiceDate\n \n self.df_invoice_line, df_RFM, self.df_RFM_quantiles, self._day_now \\\n = p5_util.p5_df_rfm_build(self.df_invoice_line, day_now = self._day_now\\\n , df_RFM_threshold=self.df_RFM_quantiles)\n \n self._df_invoice_line.InvoiceDate = ser_invoice_date\n \n #-------------------------------------------------------------------------\n # RFM score is added to dataframe\n #-------------------------------------------------------------------------\n df_merged = pd.merge(self.df_invoice_line\\\n , df_RFM[['CustomerID','RFM']], how='left', on=['CustomerID'])\n\n self._df_invoice_line \\\n = pd.DataFrame(df_merged.values, index = self._df_invoice_line.index\\\n , columns=df_merged.columns)\n \n\n #self._df_invoice_line \\\n #= pd.concat([ self.df_invoice_line,df_RFM[['CustomerID','RFM']] ], axis=1\\\n #,join='inner')\n \n \n #-------------------------------------------------------------------------\n # RFM encoding\n #-------------------------------------------------------------------------\n self._encoder_rfm, df_RFM_encoded \\\n = p5_util.df_rfm_one_hot_encode(df_RFM,'RFM', encoder=self._encoder_rfm)\n\n #-------------------------------------------------------------------------\n # Encoded RFM features are renamed\n #-------------------------------------------------------------------------\n df_customers_rfm, list_col_unchanged \\\n = p5_util.df_rename_columns(df_RFM_encoded, df_RFM_encoded.columns\\\n , 'w_rfm_')\n \n self.strprint(\"df_customers_rfm =\" +str(df_customers_rfm.shape))\n\n #-------------------------------------------------------------------------\n # dataframe with RFM encoded values per customer is dumped\n #-------------------------------------------------------------------------\n if is_built_step is True:\n p5_util.object_dump(df_customers_rfm, self.df_customers_rfm_fileName)\n else :\n self._df_customers_rfm = df_customers_rfm.copy()\n return", "def get_customer_stats(self):\n try:\n names, waitings, totals, statuses, destinations, passwords, types, positions = zip(*[(p.name, p.get_waiting_time(),\n p.total_time(), status_to_str(p.status), p.get_position(), p.password, p.fleet_type, p.init_position)\n for p in self.customer_agents.values()])\n except ValueError:\n names, waitings, totals, statuses, destinations, passwords, types, positions = [], [], [], [], [], [], [], []\n\n df = pd.DataFrame.from_dict({\"name\": names, \"waiting_time\": waitings, \"total_time\": totals, \"status\": statuses, \"destination\": destinations, \"password\": passwords, \"fleet_type\": types, \"position\": positions})\n return df", "def create_data():\n data_set = pd.DataFrame()\n customer_id = list()\n for i in range(1, 10001):\n customer_id.append(i)\n data_set = pd.DataFrame()\n data_set.loc[:, 'customer_id'] = np.array(customer_id)\n product_name = ('dining chair', 'dining table', 'bed', 'dining set',\n 'stool', 'couch', 'occasional table',\n 'recliner')\n product_name_random = random.choices(product_name, k=10000)\n data_set.loc[:, 'product_name'] = np.array(product_name_random)\n quantity_rented = (1, 2, 3, 4)\n quantity_rented_random = random.choices(quantity_rented, k=10000)\n data_set.loc[:, 'quantity_rented'] = np.array(quantity_rented_random)\n unit_rental_price_monthly = list()\n for i in range(0, 10000):\n unit_rental_price_monthly.append(random.uniform(1.5, 25))\n data_set.loc[:, 'unit_rental_price'] = np.array(unit_rental_price_monthly)\n rental_period_months = list()\n for i in range(0, 10000):\n rental_period_months.append(randint(6, 60))\n data_set.loc[:, 'rental_period_months'] = np.array(rental_period_months)\n return data_set", "def _transform_df(self, data):\n # specify if has FIPS or not\n if self.has_location:\n loc_col_type = \"location\"\n elif not self.has_location:\n loc_col_type = \"location_name\"\n\n out = data.melt(\n id_vars=[\"dt\", loc_col_type], value_vars=self.crename.keys()\n ).dropna()\n out.loc[:, \"value\"] = pd.to_numeric(out[\"value\"])\n out = self.extract_CMU(out, self.crename)\n out[\"vintage\"] = self._retrieve_vintage()\n\n cols_to_keep = [\n \"vintage\",\n \"dt\",\n loc_col_type,\n \"category\",\n \"measurement\",\n \"unit\",\n \"age\",\n \"race\",\n \"ethnicity\",\n \"sex\",\n \"value\",\n ]\n return out.loc[:, cols_to_keep]", "def load_customers(dir):\n customSchema = StructType([ \\\n StructField(\"customerId2\", IntegerType(), True), \\\n StructField(\"churnlabel\", IntegerType(), True), \\\n StructField(\"gender\", StringType(), True), \\\n StructField(\"shippingCountry\", StringType(), True), \\\n StructField(\"dateCreated\", StringType(), True), \\\n StructField(\"yearOfBirth\", IntegerType(), True), \\\n StructField(\"premier\", IntegerType(), True)])\n\n df = sqlContext.read.format('com.databricks.spark.csv') \\\n .options(header='false', delimiter='\\t', nullValue='\\\\N') \\\n .load(get_dir_customers(dir) + '/*', schema=customSchema)\n\n return df", "def get_transformed_data(self, df):\n temp_df = pd.DataFrame(self.fa.transform(df))\n return temp_df", "def create_dataframe_for_training(data):\n feature_column_name = 'X'\n #data_cp = data[['label']].copy()\n for i, row in tqdm(data.iterrows(), total=len(data)):\n all_features = f'{row.claimant} {row.claim} {row.article_content}'\n data.loc[i, feature_column_name] = all_features\n\n return data[feature_column_name]", "def get_customers(filters):\n\treturn frappe.db.sql(\"\"\"\n\t\tSELECT\n\n\t\t\tpar.debtor_creditor_number as 'Konto',\n\t\t\tCASE cus.customer_type\n\t\t\t\tWHEN 'Company' THEN cus.customer_name\n\t\t\t\tELSE null\n\t\t\t\tEND as 'Name (Adressatentyp Unternehmen)',\n\t\t\tCASE cus.customer_type\n\t\t\t\tWHEN 'Individual' THEN TRIM(SUBSTR(cus.customer_name, LOCATE(' ', cus.customer_name)))\n\t\t\t\tELSE null\n\t\t\t\tEND as 'Name (Adressatentyp natürl. Person)',\n\t\t\tCASE cus.customer_type\n\t\t\t\tWHEN 'Individual' THEN SUBSTRING_INDEX(SUBSTRING_INDEX(cus.customer_name, ' ', 1), ' ', -1)\n\t\t\t\tELSE null\n\t\t\t\tEND as 'Vorname (Adressatentyp natürl. Person)',\n\t\t\tCASE cus.customer_type\n\t\t\t\tWHEN 'Individual' THEN '1'\n\t\t\t\tWHEN 'Company' THEN '2'\n\t\t\t\tELSE '0'\n\t\t\t\tEND as 'Adressatentyp',\n\t\t\tadr.address_line1 as 'Straße',\n\t\t\tadr.pincode as 'Postleitzahl',\n\t\t\tadr.city as 'Ort',\n\t\t\tUPPER(country.code) as 'Land',\n\t\t\tadr.address_line2 as 'Adresszusatz',\n\t\t\tadr.email_id as 'E-Mail',\n\t\t\tadr.phone as 'Telefon',\n\t\t\tadr.fax as 'Fax',\n\t\t\tcus.website as 'Internet',\n\t\t\tcus.tax_id as 'Steuernummer'\n\n\t\tFROM `tabCustomer` cus\n\n\t\t\tleft join `tabParty Account` par\n\t\t\ton par.parent = cus.name\n\t\t\tand par.parenttype = 'Customer'\n\t\t\tand par.company = %(company)s\n\n\t\t\tleft join `tabDynamic Link` dyn_adr\n\t\t\ton dyn_adr.link_name = cus.name\n\t\t\tand dyn_adr.link_doctype = 'Customer'\n\t\t\tand dyn_adr.parenttype = 'Address'\n\n\t\t\tleft join `tabAddress` adr\n\t\t\ton adr.name = dyn_adr.parent\n\t\t\tand adr.is_primary_address = '1'\n\n\t\t\tleft join `tabCountry` country\n\t\t\ton country.name = adr.country\n\n\t\tWHERE adr.is_primary_address = '1'\n\t\t\"\"\", filters, as_dict=1)", "def create_master_table(df_cust: pd.DataFrame,\n df_trans: pd.DataFrame,\n parameters: Dict) -> pd.DataFrame:\n\n df_cust = _process_customers(df_cust, parameters)\n df_trans = _process_transactions(df_trans, parameters)\n\n # join data\n master_table = df_cust.merge(df_trans, on=['customerID'],\n how='left')\n\n # create geo risk ranking\n # temporary solution, if used in final solution, need to prepare in fit/transform maner\n bins = [-np.inf, 0.049, 0.071, 0.088, 0.107, 0.137, np.inf]\n geo_risk_rank = master_table.groupby('residentialAddress_clean')[['hist_default_sum', 'hist_trans_count']]. \\\n sum().reset_index(). \\\n assign(geo_risk_rank=lambda x: pd.cut(x['hist_default_sum']/x['hist_trans_count'], bins).cat.codes)\n\n master_table = master_table.merge(geo_risk_rank[['residentialAddress_clean', 'geo_risk_rank']], on='residentialAddress_clean', how='left')\n\n # drop clients without transactions\n master_table = master_table.dropna(subset=['default'])\n\n return master_table", "def features_orders(df_customers, df_receipts):\n df_customers.sort_values(by=['customerId2'], ascending=[True], inplace=True)\n # total amount of all the orders of a cusrtomer\n df_customers = _add_column(df_customers, df_receipts.groupby('customerId2')['price'].sum(), 'sum_orders')\n # the min amount paid in one receipt by a customer\n df_customers = _add_column(df_customers,\n df_receipts.groupby(['customerId2', 'receiptId'], as_index=False)[\n 'price'].sum().groupby('customerId2').min()['price'], 'min_order')\n # the mean amount paid per receipt by a customer\n df_customers = _add_column(df_customers,\n df_receipts.groupby(['customerId2', 'receiptId'], as_index=False)['price'].sum()\n .groupby('customerId2').mean()['price'], 'mean_order')\n # the max amount paid per receipt by a customer\n df_customers = _add_column(df_customers,\n df_receipts.groupby(['customerId2', 'receiptId'], as_index=False)['price'].sum()\n .groupby('customerId2').max()['price'], 'max_order')\n # the number of orders\n df_customers = _add_column(df_customers,\n df_receipts.groupby(['customerId2', 'receiptId'], as_index=False)['price'].sum()\n .groupby('customerId2').count()['price'], 'count_orders')\n\n # the total amount of items bought by a user\n df_customers = _add_column(df_customers,\n df_receipts.groupby('customerId2')['itemQty'].sum(), 'sum_itemQty')\n # the min amount of items bought by a user in a receipt\n df_customers = _add_column(df_customers,\n df_receipts.groupby(['customerId2', 'receiptId'], as_index=False)['itemQty'].sum()\n .groupby('customerId2').min()['itemQty'], 'min_itemQty')\n # the mean amount of items bought by a user in a receipt\n df_customers = _add_column(df_customers,\n df_receipts.groupby(['customerId2', 'receiptId'], as_index=False)['itemQty'].sum()\n .groupby('customerId2').mean()['itemQty'], 'mean_itemQty')\n # the max amount of items bought by a user in a receipt\n df_customers = _add_column(df_customers,\n df_receipts.groupby(['customerId2', 'receiptId'], as_index=False)['itemQty'].sum()\n .groupby('customerId2').max()['itemQty'], 'max_itemQty')\n # from which dividion type a user buys\n for i in DIVISIONID_VALS:\n k = 'divisionId_' + str(i)\n df_receipts[k] = np.where(df_receipts['divisionId'] == i, 1, 0)\n df_customers = _add_column(df_customers, df_receipts.groupby('customerId2')[k].sum(), k)\n # which source type a user uses to pay\n for i in SOURCEID_VALS:\n k = 'sourceId_' + str(i)\n df_receipts[k] = np.where(df_receipts['sourceId'] == i, 1, 0)\n df_customers = _add_column(df_customers, df_receipts.groupby('customerId2')[k].sum(), k)\n\n logger.info(\"Features from the returns table: {shape} {dtypes}\"\n .format(shape=df_customers.shape, dtypes=df_customers.dtypes))\n return df_customers", "def prepare_data():\n df = pd.read_csv('Wholesale customers data.csv')\n df_numeric = df[['Fresh', 'Milk', 'Grocery', 'Frozen', 'Detergents_Paper', 'Delicassen']]\n return df, df_numeric", "def clean_customer_df(customer_df: pd.DataFrame) -> pd.DataFrame:\n \n # remove customers with more than 20 purchases\n if 'frequency' in customer_df.columns:\n customer_df = customer_df[customer_df.frequency < 20]\n \n return customer_df", "def transform(self, data: pd.DataFrame, columns: list, verbose: int=1) -> pd.DataFrame:", "def generate_customer_info(self):\n consecutive = check_consecutive(self.customerIds)\n print(\"consecutive\" + str(consecutive))\n if consecutive:\n for customer_id in self.customerIds:\n # next we need to 1) decide if the customer has insurance, and if yes, generate the EOB that gets sent to the customer\n insured = np.random.choice(\n self.distributions[\"insurance_status\"],\n 1,\n p=self.distributions[\"insurance_distribution\"],\n )[0]\n dob = self.dobs[customer_id - 1]\n customer = pd.DataFrame(\n {\n \"customer_id\": [customer_id],\n \"dob\": str(dob),\n \"insurance\": insured,\n \"experiment_id\": random.randint(1, 2),\n }\n )\n self.Customers = self.Customers.append(customer)\n else:\n print(\"Error generating customer info: customerIds aren't consecutive\")\n return True", "def createDataFrames(self):\n self._atmDF = pd.DataFrame.from_dict(self._atmDict, orient='index')\n \n self._clientDF = pd.DataFrame.from_dict(self._clientDict, orient='index')\n self._clientDF['longAccount'] = self._clientDF.client.map(str) +\\\n '_' + self._clientDF.account.map(str)\n \n self._transactionDF = pd.DataFrame.from_dict(self._transactionDict, orient='index')", "def transform_train_data(df):\n return df.rdd.map(\n lambda x: (\n Vectors.dense([x.amount, x.split, x.maintain4, x.maintain12]),\n x.intime\n )\n ).toDF([\"features\", \"label\"])", "def new_df(companies_filtered):\n name = []\n city = []\n latitude = []\n longitude = []\n zip_code = []\n for i in companies_filtered:\n name.append(i['name'])\n try: \n if i['offices'][0]['city'] == '':\n city.append(np.nan)\n else:\n city.append(i['offices'][0]['city'])\n latitude.append(i['offices'][0]['latitude'])\n longitude.append(i['offices'][0]['longitude'])\n except:\n city.append(np.nan)\n latitude.append(np.nan)\n longitude.append(np.nan)\n zip_code.append(np.nan)\n dict_ = {'company' : name, 'city' : city, 'latitude' : latitude, 'longitude': longitude}\n companies_df = pd.DataFrame.from_dict(dict_, orient='columns')\n \n return companies_df", "def make_data(dataFname, enc, features=None):\n\n origData = pandas.read_csv(dataFname)\n ids = origData['id']\n\n # remove unused columns\n if 'Unnamed: 0' in origData.columns: del origData['Unnamed: 0']\n del origData['id']\n\n # remove \"data leakage\" columns\n for f in prohobitedFeatures:\n del origData[f]\n\n # separate into X & y values\n xData = origData[[col for col in origData.columns if not col=='loss']]\n set_vars_as_type(xData, discreteVars, object)\n yVec = origData.loss if 'loss' in origData.columns else None\n\n # try f528 - f274\n xData['f528f274'] = xData['f528'] - xData['f274']\n\n # encode the categorical features f776 and f777\n if enc is None:\n enc = OneHotEncoder(n_values=[2, 2])\n enc.fit(xData[['f776', 'f777']])\n\n xData[['f776_isZero', 'f776_isOne', 'f777_isZero', 'f777_isOne']] = pandas.DataFrame(enc.transform(xData[['f776', 'f777']]).toarray())\n del xData['f776']\n del xData['f777']\n\n print_missing_values_info(origData)\n\n # feature selection\n if features:\n filteredXData = xData[features]\n else: # use ALL features\n filteredXData = xData\n\n return filteredXData, yVec, ids, enc", "def get_dataframe() -> pandas.DataFrame:\n database_connection = processing.establish_connection(database_path)\n dataframe = database_connection.to_dataframe(['CustomerId', 'InvoiceDate', 'Total'], table_name)\n database_connection.close()\n dataframe = processing.get_invoice_date_fixed(dataframe)\n analyze_dataframe = dataframe.copy()\n total_sum_dataframe = processing.get_column_sum(analyze_dataframe)\n\n customer_count_dataframe = processing.drop_duplicates(analyze_dataframe)\n customer_count_dataframe = processing.get_column_count(customer_count_dataframe)\n return customer_count_dataframe.merge(total_sum_dataframe, how='inner', on='InvoiceDate')", "def _prep_data(self, data: bytes) -> pd.DataFrame:\n # Convert the bytes into a file-like object\n buffer = io.BytesIO(data)\n\n # Unzip the file and pull out the csv file\n with zipfile.ZipFile(buffer, \"r\") as zip_file:\n csv = zip_file.read(\"QSAR_BCF_Kow.csv\")\n\n # Convert the string into a file-like object\n csv_file = io.BytesIO(csv)\n\n # Read the file-like object into a dataframe\n cols = [\"cas\", \"name\", \"smiles\", \"logkow\", \"kow_exp\", \"logbcf\"]\n df = pd.read_csv(\n csv_file,\n names=cols,\n header=0,\n usecols=[col for col in cols if col not in [\"cas\", \"name\"]],\n )\n\n # Drop NaNs\n df = df.dropna().reset_index(drop=True)\n\n # Encode KOW types\n kow_types = [\"pred\", \"exp\"]\n df[\"kow_exp\"] = df.kow_exp.map(lambda txt: kow_types.index(txt))\n\n # Get maximum SMILE string length\n max_smile = max(len(smile_string) for smile_string in df.smiles)\n\n # Pad SMILE strings\n df[\"smiles\"] = [\n smile_string + \"x\" * (max_smile - len(smile_string))\n for smile_string in df.smiles\n ]\n\n # Split up the SMILE strings into a matrix\n smile_df = pd.DataFrame(df.smiles.map(list).values.tolist())\n\n # Set the column values of the SMILE dataframe\n smile_df.columns = pd.Index(\n [f\"smiles_{idx}\" for idx in range(smile_df.shape[1])]\n )\n\n # Add the smile dataframe to the original dataframe\n df = pd.concat([df, smile_df], axis=1)\n\n # Drop original SMILE feature\n df = df.drop(columns=\"smiles\")\n\n # Put the target variable at the end\n cols = [\"logkow\", \"kow_exp\"]\n cols += [f\"smiles_{idx}\" for idx in range(max_smile)]\n cols += [\"logbcf\"]\n df = df[cols]\n\n # Ensure that the `logkow` column is numeric\n df[\"logkow\"] = pd.to_numeric(df.logkow)\n\n return df", "def log_transform_features_customer(profile):\n\n view_amount_features = ['max_duration_view_profile', 'view_rate_profile', 'max_amount', \\\n 'min_duration_view_profile', 'min_amount',\\\n 'avg_amount', 'avg_trx_cnt', 'avg_duration_view_profile']\n\n profile_transformed = np.log(profile[view_amount_features]+1)\n\n profile = pd.concat([profile[['gender', 'age', 'became_member_on', 'income']]\\\n \t,profile_transformed], axis=1)\n\n profile.drop(columns=['income', 'min_amount', 'avg_amount', 'avg_duration_view_profile']\\\n \t, inplace=True)\n\n u.save_dataframe_to_sql(profile, 'profile')\n\n return profile", "def join_customer_features(traj_result, username, season, country):\n user_features=get_k_means_data(username,season, country).set_index(\"customer_nr\")\n features_with_trajectory=user_features.join(traj_result.set_index('customer_nr')[[\"cluster\"]])\n return features_with_trajectory", "def process_customers(self, customers_file):\n\t\tmin = max = None\n\t\tcustomers = {}\n\t\ttry:\n\t\t\tfor user_id, date_str in self.read_csv_file(customers_file):\n\t\t\t\tdate = self.convert_date(date_str)\n\t\t\t\tmin, max = self.min_max_date(min, max, date)\n\t\t\t\tcustomers[user_id] = date\n\t\texcept ValueError:\n\t\t\traise Exception('Customers file has unexpected format.')\n\n\t\tself.customers = customers\n\t\tself.min = min\n\t\tself.max = max", "def fit_transform(self, df):\n\t\tdf = self.__parse_json(df)\n\t\tdf = self.__fillnan(df)\n\t\tdf = self.__parse_dates(df)\n\t\tdf['budget'] = df['budget'].apply(lambda x: self.missing_budget_imputing if int(x) == 0 else x)\n\t\tdf['has_collections'] = df['belongs_to_collection'].isna().astype(int)\n\t\tdf['homepage'] = df['homepage'].isna().astype(int)\n\t\tdf['is_en'] = df['original_language'].apply(lambda x: 1 if x == 'en' else 0)\n\t\tdf = self.__encode_genres(df)\n\t\tdf = self.__top_countries_and_companies(df)\n\t\tdf = self.__bin_columns(df)\n\t\tdf.drop(\n\t\t\t['release_date', 'original_language', 'production_countries', 'production_companies', 'id', 'backdrop_path',\n\t\t\t 'imdb_id', 'poster_path', 'video', 'belongs_to_collection', 'status', 'runtime',\n\t\t\t 'original_title', 'overview', 'tagline', 'title'], axis=1, inplace=True)\n\t\treturn df", "def generate_customer(self, start_of_month):\n customer_rates = np.random.multivariate_normal(\n mean=self.log_means, cov=self.behave_cov\n )\n customer_rates = self.exp_fun(customer_rates)\n customer_rates = np.maximum(customer_rates - 0.667, 0.333)\n new_customer = Customer(\n customer_rates, channel_name=self.version, start_of_month=start_of_month\n )\n # print(customer_rates)\n return new_customer", "def make_df(self):\n # read in file\n df = pd.read_csv(self.data_file)\n cols_to_drop = [f'view{x}' for x in range(1,4)]+['response']\n # subtract loc3 viewing from location of interest\n df[self.label_key] = df[self.predictor] - df['view3']\n df.drop(cols_to_drop, axis=1, inplace=True)\n df.reset_index(drop=True, inplace=True)\n\n return df", "def _finalize_features(self) -> DataFrameLike:\n all_features_dict = dict(ChainMap(*self._final_features.values()))\n return pd.DataFrame(all_features_dict)", "def transform(self):\n select_columns = ['Province/State','Lat','Long']\n # df = global_cases.copy()\n global_cases = self.collect_case()\n df = global_cases.copy()\n df.drop(select_columns,axis=1, inplace=True)\n df = df[df['Country/Region'].apply(lambda x: x in Africa)].T.reset_index()\n df.columns = df.iloc[0]\n df.rename(columns={'Country/Region':'Date'},inplace=True)\n df.drop([0],axis=0,inplace=True)\n \n df['Date'] = pd.to_datetime(df['Date']).dt.strftime('%m-%d-%Y')\n # sort to have the latest update on top row\n df.sort_values('Date',ascending=False, inplace=True)\n african_cases = df.copy()\n\n return african_cases", "def features_websessions(df_customers, df_websessions):\n df_websessions = df_customers.join(df_websessions, \"customerId2\", 'inner')\n res_counts = df_websessions.groupBy('customerId2').count().alias('nb_sessions')\n\n res_agg = df_websessions.groupBy('customerId2').agg(\n min('pageViewCount').alias('min_pageViewCount'),\n mean('pageViewCount').alias('mean_pageViewCount'),\n max('pageViewCount').alias('max_pageViewCount'),\n (count(when(df_websessions.pageViewCount != 0, True)) / count('customerId2')).alias('p_not0_pageViewCount'),\n\n min('nonPageViewEventsCount').alias('min_nonPageViewEventsCount'),\n mean('nonPageViewEventsCount').alias('mean_nonPageViewEventsCount'),\n max('nonPageViewEventsCount').alias('max_nonPageViewEventsCount'),\n (count(when(df_websessions.nonPageViewEventsCount != 0, True)) / count('customerId2')).alias(\n 'p_not0_nonPageViewEventsCount'),\n\n min('productViewCount').alias('min_productViewCount'),\n mean('productViewCount').alias('mean_productViewCount'),\n max('productViewCount').alias('max_productViewCount'),\n (count(when(df_websessions.productViewCount != 0, True)) / count('customerId2')).alias('p_not0_productViewCount'),\n\n min('productViewsDistinctCount').alias('min_productViewsDistinctCount'),\n mean('productViewsDistinctCount').alias('mean_productViewsDistinctCount'),\n max('productViewsDistinctCount').alias('max_productViewsDistinctCount'),\n (count(when(df_websessions.productViewsDistinctCount != 0, True)) / count('customerId2')).alias(\n 'p_not0_productViewsDistinctCount'),\n\n min('productsAddedToBagCount').alias('min_productsAddedToBagCount'),\n mean('productsAddedToBagCount').alias('mean_productsAddedToBagCount'),\n max('productsAddedToBagCount').alias('max_productsAddedToBagCount'),\n (count(when(df_websessions.productsAddedToBagCount != 0, True)) / count('customerId2')).alias(\n 'p_not0_productsAddedToBagCount'),\n\n min('productsSavedForLaterFromProductPageCount').alias('min_productsSavedForLaterFromProductPageCount'),\n mean('productsSavedForLaterFromProductPageCount').alias('mean_productsSavedForLaterFromProductPageCount'),\n max('productsSavedForLaterFromProductPageCount').alias('max_productsSavedForLaterFromProductPageCount'),\n (count(when(df_websessions.productsSavedForLaterFromProductPageCount != 0, True)) / count('customerId2')).alias(\n 'p_not0_productsSavedForLaterFromProductPageCount'),\n\n min('productsSavedForLaterFromCategoryPageCount').alias('min_productsSavedForLaterFromCategoryPageCount'),\n mean('productsSavedForLaterFromCategoryPageCount').alias('mean_productsSavedForLaterFromCategoryPageCount'),\n max('productsSavedForLaterFromCategoryPageCount').alias('max_productsSavedForLaterFromCategoryPageCount'),\n (count(when(df_websessions.productsSavedForLaterFromCategoryPageCount != 0, True)) / count('customerId2')).alias(\n 'p_not0_productsSavedForLaterFromCategoryPageCount'),\n\n min('productsPurchasedDistinctCount').alias('min_productsPurchasedDistinctCount'),\n mean('productsPurchasedDistinctCount').alias('mean_productsPurchasedDistinctCount'),\n max('productsPurchasedDistinctCount').alias('max_productsPurchasedDistinctCount'),\n (count(when(df_websessions.productsPurchasedDistinctCount != 0, True)) / count('customerId2')).alias(\n 'p_not0_productsPurchasedDistinctCount'),\n\n min('productsPurchasedTotalCount').alias('min_productsPurchasedTotalCount'),\n mean('productsPurchasedTotalCount').alias('mean_productsPurchasedTotalCount'),\n max('productsPurchasedTotalCount').alias('max_productsPurchasedTotalCount'),\n (count(when(df_websessions.productsPurchasedTotalCount != 0, True)) / count('customerId2')).alias(\n 'p_not0_productsPurchasedTotalCount'),\n )\n\n res = res_counts.join(res_agg, 'customerId2')\n return res", "def preprocess(df):\n df[\"distance\"] = compute_distance(df)\n X_train = df[[\"distance\"]]\n y_train = df[\"fare_amount\"]\n return X_train, y_train", "def transform_data(data_df, target_df = None):\n rec_idx, rec_col, rec_data = create_recency_feature(data_df)\n freq_idx, freq_col, freq_data = create_frequency_feature(data_df)\n norm_idx, norm_col, norm_data = create_norm_feature(data_df)\n\n # with hstack function we are concatinating a sparse matrix and a dense matirx :)\n feat_df = hstack((rec_data, freq_data, norm_data))\n print('Final feature matrix shape:', feat_df.shape)\n \n # merge all the feature names\n feat_names = list(rec_col) + list(freq_col) + list(norm_col)\n \n if isinstance(target_df, pd.core.frame.DataFrame):\n # get +ve & -ve indices\n one_idx = target_df[target_df['outcome_flag'] == 1]['id'].index.tolist()\n zero_idx = target_df[target_df['outcome_flag'] == 0]['id'].index.tolist()\n \n # calculate fitness values of features\n rcdf = create_fitness_stats(rec_data, rec_col, one_idx, zero_idx, nans = True)\n fqdf = create_fitness_stats(freq_data, freq_col, one_idx, zero_idx, nans = False)\n nrdf = create_fitness_stats(norm_data, norm_col, one_idx, zero_idx, nans=False)\n fit_df = rcdf.append(fqdf).append(nrdf)\n fit_df.reset_index(drop=1)\n return feat_df, feat_names, fit_df\n \n return feat_df, feat_names", "def to_learn(trxfile, cardfile, custfile, trainfile, testfile):\n feature_df = to_feature(trxfile, cardfile, custfile)\n feature_df.loc[:] = preprocessing.scale(feature_df)\n #feature_df.loc[:] = preprocessing.normalize(feature_df, norm='l2')\n \n # card_no, label\n train_df = pandas.read_csv(trainfile, header=None)\n # card_no\n test_df = pandas.read_csv(testfile, header=None)\n\n train_data = feature_df.loc[train_df.loc[:, 0]]\n train_label = train_df.loc[:, 1]\n test_data = feature_df.loc[test_df.loc[:, 0]]\n\n return (train_data.values, train_label.values, test_data.values)", "def build_feature_transform():\n\n # These features can be parsed as numeric.\n numeric = HEADER.as_feature_indices(\n [\"review_count\", \"lat\", \"lng\", \"lat2\", \"lng2\"]\n )\n\n # These features contain a relatively small number of unique items.\n categorical = HEADER.as_feature_indices(\n [\"distance\", \"price_level\", \"review_count\", \"Sp1\", \"type\"]\n )\n\n # These features can be parsed as natural language.\n text = HEADER.as_feature_indices(\n [\n \"slug\", \"menu\", \"slug.1\", \"categories\", \"name\", \"url\", \"homeurl\",\n \"resource_id1\", \"resource_id2\"\n ]\n )\n\n numeric_processors = Pipeline(steps=[(\"robustimputer\", RobustImputer())])\n\n categorical_processors = Pipeline(\n steps=[\n (\"thresholdonehotencoder\", ThresholdOneHotEncoder(threshold=162))\n ]\n )\n\n text_processors = Pipeline(\n steps=[\n (\n \"multicolumntfidfvectorizer\",\n MultiColumnTfidfVectorizer(\n max_df=0.9977,\n min_df=0.0003137465824032988,\n analyzer=\"word\",\n max_features=10000\n )\n )\n ]\n )\n\n column_transformer = ColumnTransformer(\n transformers=[\n (\"numeric_processing\", numeric_processors, numeric\n ), (\"categorical_processing\", categorical_processors,\n categorical), (\"text_processing\", text_processors, text)\n ]\n )\n\n return Pipeline(\n steps=[\n (\"column_transformer\",\n column_transformer), (\"robustpca\", RobustPCA(n_components=88)),\n (\"robuststandardscaler\", RobustStandardScaler())\n ]\n )", "def filter_and_transform_to_df(self, min_price, max_price, size_limit):\n from filtering_functions import transform_dataset, filter_dataset\n # transform the dataset to dataframe (and change format of variables)\n # and filter unwanted data\n self.filtered_data = filter_dataset(transform_dataset(self.ads_data),\n min_price,max_price,size_limit)\n # create an easily accesible description of the resulting dataset\n n, k = self.filtered_data.shape\n self.description = \"Ads dataset class. Number of observations: {}, Number of variables: {}\".format(n,k)", "def preprocessing(df, product_number):\n useless_columns = ['Customers', 'Category', 'Segment', 'Regione', 'Provincia', 'Channel']\n df = df.drop(df[df.Provincia == '**'].index) # Removing 'Estero'\n for column in useless_columns:\n df = df.drop(column, axis=1)\n df = df_filtered_product(df, product_number) # Choose the number of the product\n df = df.groupby(['Data Rif']).sum().reset_index()\n date_range = pd.date_range('2017-01-02', '2019-03-31', freq='D').to_series()\n week_num = len(date_range) // 7\n index = 0\n\n sales = []\n for week in range(0, week_num):\n STU = 0\n for day in range(0, 7):\n if index == len(df):\n break\n elif date_range[week*7 + day] == df['Data Rif'][index]:\n STU += df['Standard Units'][index]\n index += 1\n sales.append([date_range[week*7], STU])\n df_fin = pd.DataFrame(sales, columns=['Week', 'STU'])\n df_fin.Week = pd.to_datetime(df_fin.Week)\n df_fin.set_index('Week', inplace=True)\n return df_fin", "def _transform(self, X, y=None):\n # lazy imports to avoid hard dependency\n from tsfresh import extract_features\n\n Xt = extract_features(\n X,\n column_id=X.columns[0],\n column_value=X.columns[3],\n column_kind=X.columns[2],\n column_sort=X.columns[1],\n **self.default_fc_parameters_,\n )\n\n # When using the long input format, tsfresh seems to sort the index,\n # here we make sure we return the dataframe in the sort order as the\n # input data\n instances = X.iloc[:, 0].unique()\n Xt = Xt.reindex(instances)\n return Xt", "def _normalize_dataset(self):\n\n new_data = []\n columns = ['user_id', 'movie_id', 'rating']\n for line in self.data_file['users']:\n movies_by_user = [\n {'user_id': line['user_id'], 'movie_id': movie_id, 'rating': 5}\n for movie_id in line['movies']\n ]\n new_data.extend(movies_by_user)\n return pd.DataFrame(new_data, columns=columns)", "def data_transform_nlp(self):\n df_invoice_line = None\n \n is_build_step = False\n\n if self._vectorizer_nlp is None:\n is_build_step = True\n \n list_no_words=['SET','PACK']\n\n df_invoice_line, csr_matrix_weights, self._vectorizer_nlp \\\n = p5_util.nlp_process(self.df_invoice_line\\\n , 'Description' , vectorizer= self._vectorizer_nlp\\\n , list_no_words=list_no_words, is_verbose= self.is_verbose)\n \n if df_invoice_line is None:\n self.strprint(\"***ERROR : NLP process interrupted!\")\n return\n \n \n #-------------------------------------------------------------------------\n # NLP weights are cumulated (sumerized) per customer\n #-------------------------------------------------------------------------\n if csr_matrix_weights is None:\n csr_matrix_weights \\\n = p5_util.object_load('./data/matrix_weights_NLP.dump')\n else:\n pass\n \n self.strprint(\"df_invoice_line : \"+str(df_invoice_line.shape))\n \n self.dbg_df = df_invoice_line.copy()\n \n root_name = 'w_nlp_'\n self._df_w_nlp = p5_util.df_nlp_sum_per_customer(df_invoice_line\\\n , csr_matrix_weights, root_name)\n\n del(csr_matrix_weights)\n \n #-------------------------------------------------------------------------\n # Dimension reduction thanks to PCA\n #------------------------------------------------------------------------- \n self.strprint(\"self._df_w_nlp : \"+str(self._df_w_nlp.shape))\n\n root_name_pca = 'nlp_pca_'\n n_dim = self._nlp_pca_ndim\n \n df_customers_pca_nlp, self._pca_nlp \\\n = p5_util.df_pca_reduce(self._df_w_nlp, n_dim, root_name_pca\\\n , p_is_scale=False, pca=self._pca_nlp)\n \n self.strprint(\"df_customers_pca_nlp : \" +str(df_customers_pca_nlp.shape))\n\n #-------------------------------------------------------------------------\n # Backup of NLP features per customer\n #-------------------------------------------------------------------------\n if is_build_step is True:\n p5_util.object_dump(df_customers_pca_nlp\\\n , self._df_customers_nlp_fileName)\n else:\n self._df_customers_pca_nlp = df_customers_pca_nlp.copy()\n \n return", "def pre_processing_(data_df , serialized_objects):\n max_recency_acc_dig = serialized_objects['max_recency_acc_dig'] # These values are taken from trained model values\n max_recency_dig_2yr = serialized_objects['max_recency_dig_2yr'] # These values are taken from trained model values\n max_acc_recency_mf = serialized_objects['max_acc_recency_mf'] #These are values imported in training dataset. Same values needs to be used to impute missing values in unseen data\n\n data_df = data_df.na.fill({\n 'recency_acc_dig' : max_recency_acc_dig, # Filling missing values\n 'recency_dig_2yr' : max_recency_dig_2yr,\n 'acc_recency_mf' : max_acc_recency_mf\n })\n\n freq_acc_upg_2yrs_split = [-float('inf'), 0, 1, 2, float('inf')]\n bucketizer_freq_acc_upg_2yrs = Bucketizer(splits=freq_acc_upg_2yrs_split, inputCol='freq_acc_upg_acc_2yrs', outputCol='freq_acc_upg_acc_2yrs_bkt')\n data_df = bucketizer_freq_acc_upg_2yrs.setHandleInvalid('keep').transform(data_df) # Binning the freq_acc_upg_acc_2yrs column\n\n tot_purchase_split = [-float('inf'), 0, 1, 2, 3, float('inf')]\n bucketizer_tot_purchase = Bucketizer(splits=tot_purchase_split, inputCol='tot_accsry_purchse', outputCol='tot_accsry_purchse_bkt')\n data_df = bucketizer_tot_purchase.setHandleInvalid('keep').transform(data_df) # Binning the tot_accsry_purchse column\n\n del_cols_new = ['freq_acc_upg_acc_2yrs', 'tot_accsry_purchse']\n data_df = data_df.drop(*del_cols_new) # Dropping the older continuous columns\n return data_df", "def preprocess_data(df, cat_columns, cf_columns):\n \n #define cat and cf columns, convert cat to dummies\n df_cat = pd.get_dummies(df[cat_columns], columns = cat_columns, drop_first=True, dtype=float)\n df_cat.rename(mapper= {\"sex_2\": \"sex_female\", \"education_2\":\"education_university\", \"education_3\": \"education_high_school\", \"education_4\":\"education_others\",\"marital_status_2\":\"marital_status_single\", \"marital_status_3\": \"marital_status_others\"}, axis = 1, inplace = True)\n df_cf = df[cf_columns]\n X = pd.concat([df_cf, df_cat], axis = 1)\n y = df[['default_payment_next_month']]\n print(\"dummy variables created\")\n \n #train-test split \n X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=42)\n print(\"split done\")\n \n #resample the train sets\n smote = SMOTE(sampling_strategy = \"not majority\", random_state = 42)\n X_train_rs, y_train_rs = smote.fit_sample(X_train, y_train)\n print('original class distribution:')\n print(y[\"default_payment_next_month\"].value_counts())\n print('synthetic sample class distribution:')\n print(pd.Series(y_train_rs).value_counts()) \n return X, X_train_rs, X_test, y_train_rs, y_test", "def get_customer_info(self, customer_id):\n #requested_customer_RDD = self.sc.parallelize(movie_ids).map(lambda x: (user_id, x))\n # Get predicted ratings\n customers = self.__get_customers(customer_id).collect()\n\n return customers", "def create_dataframe():\n df = pd.read_csv(\"data/311-calls.csv\", parse_dates=[\"created\"])\n df[\"created\"] = df[\"created\"].dt.date\n df.drop(columns=[\"incident_zip\"], inplace=True)\n num_complaints = df[\"complaint_type\"].value_counts()\n to_remove = num_complaints[num_complaints <= 30].index\n df.replace(to_remove, np.nan, inplace=True)\n return df", "def filterAndTransform(self, df):\n\n # removing as is stated in the task along with the 'Year' and 'DepTime'\n col_to_drop = ['ArrTime',\n 'ActualElapsedTime',\n 'AirTime',\n 'TaxiIn',\n 'Diverted',\n 'CarrierDelay',\n 'WeatherDelay',\n 'NASDelay',\n 'SecurityDelay',\n 'LateAircraftDelay',\n 'Year',\n 'TailNum',\n 'CancellationCode'] # Only those 3 I added up to delay, others\n # are delayed as is stated in the task\n df = df.drop(*col_to_drop)\n\n df = df.filter(\"Cancelled == 0\") # select only those flights that happened\n df = df.drop(\"Cancelled\")\n\n df = df.drop(*[\"UniqueCarrier\",\n \"DayofMonth\",\n \"FlightNum\"]) # Droping unimportant categorical variables\n\n df = df.na.drop(\"any\")\n\n df = df.withColumn('OrigDest',\n sf.concat(sf.col('Origin'), sf.lit('_'), sf.col('Dest')))\n df = df.drop(*[\"Origin\", \"Dest\"])\n df = df.withColumn(\"Speed\", sf.round(col(\"Distance\") / col(\"CRSElapsedTime\"), 2).cast(DoubleType()))\n\n return df", "def transform(self, X, **transformparamn):\n \n concatted = pd.concat([transformer.transform(X)\n for transformer in\n self.fitted_transformers_], axis=1).copy()\n return concatted", "def pre_process_data():\n data_list, header_list = Parser.__parse_csv_data(Parser.training_data_file)\n table = pandas.DataFrame(data_list, columns=header_list)\n table.drop(['date', 'employee id'], axis=1, inplace=True)\n unique_categories = table['category'].unique()\n unique_expense_desc = table['expense description'].unique()\n unique_tax_name = table['tax name'].unique()\n\n column_index = {\n 'input': {},\n 'output': {}\n }\n\n column_index['input']['pre-tax amount'] = {\n 'column_index': 0,\n 'type': 'int'\n }\n\n column_index['input']['tax amount'] = {\n 'column_index': 1,\n 'type': 'int'\n }\n\n index = 2\n\n for i in range(len(unique_expense_desc)):\n column_index['input'][unique_expense_desc[i]] = {\n 'column_index': i + index,\n 'type': 'str'\n }\n\n index += len(unique_expense_desc)\n\n for i in range(len(unique_tax_name)):\n column_index['input'][unique_tax_name[i]] = {\n 'column_index': i + index,\n 'type': 'str'\n }\n\n for i in range(len(unique_categories)):\n column_index['output'][unique_categories[i]] = {'value': i}\n\n Parser.__save_column_index(column_index)", "def transform(self, X):\n res = X.copy()\n res = add_date_features(res)\n\n # Fill the age values\n age_missing = res[res.age.isnull()]\n age_missing_index = res.index[res.age.isnull()]\n age_values = self.age_estimator.predict(age_missing[self.features])\n res.update(pd.DataFrame(age_values, index=age_missing_index,\n columns=['age']))\n\n # Fill the income values\n income_missing = res[res.income.isnull()]\n income_missing_index = res.index[res.income.isnull()]\n income_values = self.income_estimator.predict(\n income_missing[self.features])\n res.update(pd.DataFrame(income_values, index=income_missing_index,\n columns=['income']))\n\n # Fill the gender values\n gender_missing = res[res.gender.isnull()]\n gender_missing_index = res.index[res.gender.isnull()]\n gender_values = self.gender_estimator.predict(\n gender_missing[self.features])\n res.update(pd.DataFrame(gender_values, index=gender_missing_index,\n columns=['gender']))\n\n if not self.keep_date_feats:\n res.drop(MEMBER_DATE_FEATS, axis=1)\n\n return res", "def prepare_data(train, test):\n # change the name of the target column\n train.rename(columns={\"revenue\": \"target\"}, inplace=True)\n # map bool values to yes and no\n train[\"Weekend\"] = train[\"Weekend\"].map({True: \"Yes\", False: \"No\"})\n test[\"Weekend\"] = test[\"Weekend\"].map({True: \"Yes\", False: \"No\"})\n # set the id col as index\n train.set_index(\"id\", inplace=True)\n test.set_index(\"id\", inplace=True)\n\n # seperate the fetures and the target\n X_train = train.drop(\"target\", axis=1).copy()\n y_train = train[\"target\"].copy()\n X_test = test.copy()\n\n # select numerical and categorical columns\n num_cols = X_train.select_dtypes(exclude=\"object\").columns.tolist()\n cat_cols = X_train.select_dtypes(include=\"object\").columns.tolist()\n\n # numerical pipeline\n num_pipe = make_pipeline(SimpleImputer(strategy=\"mean\"))\n\n # categorical pipeline\n cat_pipe = make_pipeline(\n SimpleImputer(strategy=\"constant\", fill_value=\"NA\"),\n OneHotEncoder(handle_unknown=\"ignore\", sparse=False),\n )\n\n # full pipeline for data preprocessing\n full_pipe = ColumnTransformer(\n [(\"num\", num_pipe, num_cols), (\"cat\", cat_pipe, cat_cols)]\n )\n return X_train, y_train, X_test, full_pipe", "def transform_data(data_orig):\n\tif isinstance(data_orig, str):\n\t\tdata_orig = pd.read_csv(data_orig)\n\n\tdata = data_orig\n\n\tnum_rows,num_variables = data.shape\n\tall_columns = data.columns.tolist()\n\tclean_data(data,all_columns,ignore_na=False,fill_mode=\"prob\")\n\texpand_features(data)\n\tvariables = ['Pclass','Sex',\"Fare\",\"Age\",\"SibSp\",\"Parch\",\"Embarked\",\"Fam_size\",\\\n\t\t\t\t \"cabin_no\",\"ticket_no\",\"friend\",\"Fare_person\",\"Child\"]\n\tX = pd.get_dummies(data[variables])\n\n\t## normalise features to zero man and unit variance\n\tscaler = preprocessing.StandardScaler().fit(X)\n\tX_scaled = scaler.transform(X)\n\tX = pd.DataFrame(X_scaled, columns=X.columns)\n\n\tif \"Survived\" in data.columns:\n\t\ty = data['Survived']\n\telse:\n\t\ty = None\n\n\treturn X, y", "def transform_features(context, params):\n\n input_features_ds = \"train/sales/features\"\n input_target_ds = \"train/sales/target\"\n\n artifacts_folder = DEFAULT_ARTIFACTS_PATH\n\n # load datasets\n train_X = load_dataset(context, input_features_ds)\n train_y = load_dataset(context, input_target_ds)\n\n cat_columns = train_X.select_dtypes(\"object\").columns\n num_columns = train_X.select_dtypes(\"number\").columns\n\n # Treating Outliers\n outlier_transformer = Outlier(method=params[\"outliers\"][\"method\"])\n train_X = outlier_transformer.fit_transform(\n train_X, drop=params[\"outliers\"][\"drop\"]\n )\n\n # NOTE: You can use ``Pipeline`` to compose a collection of transformers\n # into a single transformer. In this case, we are composing a\n # ``TargetEncoder`` and a ``SimpleImputer`` to first encode the\n # categorical variable into a numerical values and then impute any missing\n # values using ``most_frequent`` strategy.\n tgt_enc_simple_impt = Pipeline(\n [\n (\"target_encoding\", TargetEncoder(return_df=False)),\n (\"simple_impute\", SimpleImputer(strategy=\"most_frequent\")),\n ]\n )\n\n # NOTE: the list of transformations here are not sequential but weighted\n # (if multiple transforms are specified for a particular column)\n # for sequential transforms use a pipeline as shown above.\n features_transformer = ColumnTransformer(\n [\n # categorical columns\n (\n \"tgt_enc\",\n TargetEncoder(return_df=False),\n list(\n set(cat_columns)\n - set([\"technology\", \"functional_status\", \"platforms\"])\n ),\n ),\n (\n \"tgt_enc_sim_impt\",\n tgt_enc_simple_impt,\n [\"technology\", \"functional_status\", \"platforms\"],\n ),\n # numeric columns\n (\"med_enc\", SimpleImputer(strategy=\"median\"), num_columns),\n ]\n )\n\n # Check if the data should be sampled. This could be useful to quickly run\n # the pipeline for testing/debugging purposes (undersample)\n # or profiling purposes (oversample).\n # The below is an example how the sampling can be done on the train data if required.\n # Model Training in this reference code has been done on complete train data itself.\n sample_frac = params.get(\"sampling_fraction\", None)\n if sample_frac is not None:\n logger.warn(f\"The data has been sample by fraction: {sample_frac}\")\n sample_X = train_X.sample(frac=sample_frac, random_state=context.random_seed)\n else:\n sample_X = train_X\n sample_y = train_y.loc[sample_X.index]\n\n\n # Train the feature engg. pipeline prepared earlier. Note that the pipeline is\n # fitted on only the **training data** and not the full dataset.\n # This avoids leaking information about the test dataset when training the model.\n # In the below code train_X, train_y in the fit_transform can be replaced with\n # sample_X and sample_y if required. \n train_X = get_dataframe(\n features_transformer.fit_transform(train_X, train_y),\n get_feature_names_from_column_transformer(features_transformer),\n )\n\n # Note: we can create a transformer/feature selector that simply drops\n # a specified set of columns. But, we don't do that here to illustrate\n # what to do when transformations don't cleanly fall into the sklearn\n # pattern.\n curated_columns = list(\n set(train_X.columns.to_list())\n - set(\n [\n \"manufacturer\",\n \"inventory_id\",\n \"ext_grade\",\n \"source_channel\",\n \"tgt_enc_iter_impt_platforms\",\n \"ext_model_family\",\n \"order_no\",\n \"line\",\n \"inventory_id\",\n \"gp\",\n \"selling_price\",\n \"selling_cost\",\n \"invoice_no\",\n \"customername\",\n ]\n )\n )\n\n # saving the list of relevant columns and the pipeline.\n save_pipeline(\n curated_columns, op.abspath(op.join(artifacts_folder, \"curated_columns.joblib\"))\n )\n save_pipeline(\n features_transformer, op.abspath(op.join(artifacts_folder, \"features.joblib\"))\n )", "def features_returns(df_customers, df_receipts, df_returns):\n df_customers_returns = pd.merge(df_customers, df_returns, on='customerId2', how='left')\n\n df_returns['sourceId_10'] = np.where(df_returns['sourceId'] == 10, 1, 0)\n df_customers = _add_column(df_customers, df_returns.groupby('customerId2')['sourceId_10'].sum(), 'sourceId_10')\n df_customers.sourceId_10.fillna(0, inplace=True)\n\n # the returns count for each customer\n has_returns = df_customers_returns[['customerId2', 'returnId']].groupby('customerId2')['returnId'].sum()\n df_customers = _add_column(df_customers, has_returns, 'sum_returns')\n\n # the ratio of items_bought/items_returned\n df_returns_items = df_customers[['customerId2', 'sum_returns', 'sum_itemQty']]\n df_returns_items.set_index('customerId2', inplace=True)\n ratio_items_returns = df_returns_items['sum_itemQty'] / df_returns_items['sum_returns']\n ratio_items_returns = np.where(np.isinf(ratio_items_returns), 0, ratio_items_returns)\n df_customers['ratio_items_returns'] = ratio_items_returns\n\n df_customers.sum_returns.fillna(0, inplace=True)\n df_customers.ratio_items_returns.fillna(0, inplace=True)\n\n df_customers_receipts = pd.merge(df_customers, df_receipts, on='customerId2')\n df_customers_receipts_returns = pd.merge(df_customers_receipts, df_returns, how='left',\n on=['customerId2', 'receiptId', 'productId'])\n\n # the returns count for each receipt\n df_ids = df_customers_receipts_returns[['customerId2', 'receiptId', 'returnId']]\n receipts_with_returns = df_ids[df_ids.returnId.notnull()] \\\n .groupby(['customerId2', 'receiptId']).count().reset_index() \\\n .groupby('customerId2').count()['receiptId']\n df_customers = _add_column(df_customers, receipts_with_returns, 'receipts_with_returns')\n df_customers.receipts_with_returns.fillna(0, inplace=True)\n\n # the ratio of count_orders/count_returned\n df_returns_orders = df_customers[['customerId2', 'sum_orders', 'receipts_with_returns']]\n df_returns_orders.set_index('customerId2', inplace=True)\n ratio_orders_with_returns = df_returns_orders['sum_orders'] / df_returns_orders['receipts_with_returns']\n ratio_orders_with_returns = np.where(np.isinf(ratio_orders_with_returns), 0, ratio_orders_with_returns)\n df_customers['ratio_orders_returns'] = ratio_orders_with_returns\n\n logger.info(\"Features from the returns table: {shape} {dtypes}\"\n .format(shape=df_customers.shape, dtypes=df_customers.dtypes))\n return df_customers", "def __process_data(self, dataF):\n\n dataF['created_time'] = pd.to_datetime(dataF['created_time'])\n dataF['char_cnt'] = dataF['message'].str.len()\n dataF['month'] = dataF['created_time'].dt.month\n dataF['week'] = dataF['created_time'].dt.week\n\n return dataF", "def __init__(self, customer_vendor_full, valid_rating_mean):\r\n super(User_CF, self).__init__()\r\n self.customer_vendor_full = customer_vendor_full\r\n self.customer_vendor_ratings = self.select_features()\r\n self.customer_vendor_matrix = self.customer_vendor_ratings.pivot(\r\n index='customer_id', columns='vendor_id', values='mean_rating') # (26779, 100)\r\n self.rating_matrix = self.customer_vendor_matrix.fillna(0).values.astype(np.float32)\r\n self.valid_rating_mean = valid_rating_mean\r\n self.vendor2rating = self.get_vendors_mean()\r\n self.customer_similarity, = self.get_similarity()", "def transform(self, dataframe: DataFrame) -> DataFrame:", "def transform(X, transformer, y=None):\n return pd.DataFrame([random_str_generator() for x in range(len(X))])", "def make_dataset(self):\n # Read raw data\n data = self.read_raw_data()\n self.default_header = list(data.columns.values)\n # Fit the variables on the raw dataset\n self.fit(data.copy())\n return make_df(data, self.features), make_df(data, self.targets)", "def transform(self, data: pd.DataFrame):\n raise NotImplementedError", "def transform(self, X, **transform_params):\n outgoing_df = X[self.features].copy()\n return outgoing_df", "def fit_transform(self) -> DataFrame:\n\n self.fit()\n return self.transform()", "def transform(self, df):\n\t\tdf = self.__parse_json(df)\n\t\tdf = self.__fillnan(df)\n\t\tdf = self.__parse_dates(df)\n\t\tdf['budget'] = df['budget'].apply(lambda x: self.missing_budget_imputing if int(x) == 0 else x)\n\t\tdf['has_collections'] = df['belongs_to_collection'].isna().astype(int)\n\t\tdf['homepage'] = df['homepage'].isna().astype(int)\n\t\tdf['is_en'] = df['original_language'].apply(lambda x: 1 if x == 'en' else 0)\n\t\tdf = self.__encode_genre_transform(df)\n\t\tdf = self.__top_countries_and_companies_transform(df)\n\t\tdf = self.__bin_columns_transform(df)\n\t\tdf.drop(\n\t\t\t['release_date', 'original_language', 'production_countries', 'production_companies', 'id', 'backdrop_path',\n\t\t\t 'imdb_id', 'poster_path', 'video', 'belongs_to_collection', 'status', 'runtime',\n\t\t\t 'original_title', 'overview', 'tagline', 'title'], axis=1, inplace=True)\n\t\treturn df", "def prep_corals(df):\n \n # dropping duplicate rows since all rows should have unique sample ids at a minimum\n df.drop_duplicates(inplace = True)\n \n # dropping specified columns\n df = df.drop(columns = ['CatalogNumber', 'SampleID', 'SurveyID', 'EventID', 'LocationAccuracy', \n 'Station', 'Locality', 'DepthMethod', 'ScientificName', 'TaxonRank'])\n\n # dropping all null values\n df = df.dropna()\n\n # converting ObservationDate to datetime format\n df['ObservationDate']= pd.to_datetime(df['ObservationDate'])\n\n # adding underscores to various column names\n df.columns = ['Data_Provider', 'Vernacular_Name_Category', 'Observation_Date', 'latitude', \n 'longitude', 'Depth_Meters', 'Repository', 'Identification_Qualifier', 'Sampling_Equipment', 'Record_Type']\n\n # lower casing all column names\n df.columns = df.columns.str.lower()\n\n # lower casing all string values\n df = df.applymap(lambda string:string.lower() if type(string) == str else string)\n\n # filtering out all rows with negative meters\n df = df[df.depth_meters >= 0]\n\n # filtering out all creatures that are not corals\n df = df[df.vernacular_name_category.str.contains('coral') & (df.vernacular_name_category.str.contains('hydrozoan') == False)]\n\n # returning df\n return df", "def data(self):\n dfdata = pd.concat([self.weights, self.returns, self.category], axis=1)\n dfdata.columns = ['weights', 'returns', self.category_name]\n if self.period is not None:\n dfdata['date'] = self.period\n return dfdata", "def transform(self, X):\n check_array(X)\n X = _check_X(X)\n if not (0 <= len(self._k_features) <= X.shape[1]):\n raise ValueError(\"Cannot transform Data\")\n df_out = self._feat.transform(X)\n # x_train = pd.DataFrame(df_out, columns=self._k_features)\n return df_out", "def init_model_df(self):\n\n self.model_df = pd.DataFrame(columns=self.query_df[self.column_name].unique())\n\n # add _TIMESTAMP column to dataframe\n self.model_df[self.column_index] = self.min_increments\n\n # set row index to _TIMESTAMP\n self.model_df.set_index(self.column_index, inplace=True)", "def forward(self, raw_X):\n return make_df(raw_X, self.features)", "def transform(self, data: pd.DataFrame) -> pd.DataFrame:\n data = data[['premise', 'hypothesis', 'label']] # type: ignore\n return data", "def build_main_dataset(\n self, ratings: pd.DataFrame, users: pd.DataFrame, prefetch_to_gpu: bool\n ) -> torch.utils.data.Dataset:\n main_dataset_builder = self.get_main_dataset()\n return main_dataset_builder(ratings, users, prefetch_to_gpu)", "def features_past_generation(features_creation_function,\n days,\n feature_names_prefix,\n data,\n indices):\n matches_outcomes=[]\n for i,match_indice in enumerate(indices):\n match=data.iloc[match_indice,:]\n past_matches=data[(data.Date<match.Date)&(data.Date>=match.Date-datetime.timedelta(days=days))]\n match_features_outcome_1=features_creation_function(1,match,past_matches)\n match_features_outcome_2=features_creation_function(2,match,past_matches)\n matches_outcomes.append(match_features_outcome_1)\n matches_outcomes.append(match_features_outcome_2)\n if i%100==0:\n print(str(i)+\"/\"+str(len(indices))+\" matches treated. \"+ features_creation_function.__name__ + str(days))\n train=pd.DataFrame(matches_outcomes)\n train.columns=[feature_names_prefix + \"_\" + str(days) +\"_\" +str(i) for i in range(len(train.columns))]\n \n \n \n return train", "def _create_customers(self, customer_name=\"Alex\"):\n test_customer = Customer(\n name=customer_name,\n address=\"Washington Square Park\",\n phone_number=\"555-555-1234\",\n email=\"[email protected]\",\n credit_card=\"VISA\",\n active = True\n )\n return test_customer", "def get_mall_data(): \n filename = 'mall_customers.csv'\n \n if os.path.isfile(filename):\n return pd.read_csv(filename, index_col=0)\n else: \n df = pd.read_sql(\"\"\"select * from customers\"\"\", get_connection('mall_customers'))\n df.to_csv(filename)\n return df", "def fit_transform(self, X_seq):\n print('\\n====== Transformed data summary ======')\n\n # Instantiate the vectorizer object\n vectorizer = TfidfVectorizer(analyzer= self.analyzer)\n\n # Create tokens from all dataset matrix\n count_wm = vectorizer.fit_transform(X_seq)\n count_tokens = vectorizer.get_feature_names()\n\n # DF: [100000 rows x 801 columns] (for dataset1 = 100K)\n # [each row = 1 log message] , [each column = 1 word]\n df_countvect = pd.DataFrame(data=count_wm.toarray(), columns=count_tokens)\n\n print(\".Count Vectorizer results.\\n\")\n print(df_countvect)\n\n # Print the vector representation for a log message (print 1 row from df)\n print(\"DEBUG_0 : \" ,df_countvect.loc[[20500]])\n\n # Get the first position of the maximum value for each word\n m = df_countvect.ne(0).idxmax()\n df = pd.DataFrame(dict(pos=m, val=df_countvect.lookup(m, m.index)))\n print(df)\n\n print('All data shape: {}-by-{}\\n'.format(df_countvect.shape[0], df_countvect.shape[1]))\n\n X_new = df_countvect\n return X_new", "def preprocessing_data(df_entity_raw, df_names_raw):\n # Cleaning unused rows\n df_entity_filter = clean.df_cleaning(df_entity_raw)\n # Filter entity contracts: only contracts issued to a mun/dept.\n # Also gets list of names of mun/dept. with contracts with the entity\n df_entity, names_mun_list = clean.df_filter_entity(df_entity_filter)\n\n # Get list of departments and municipalities of Colombia\n df_names = clean.df_cleaning_names(df_names_raw)\n\n # First standardization: names of mun/dept. with contracts with the entity\n names_mun_list = [clean.strip_accents(item) for item in names_mun_list]\n names_mun_standard = []\n for item in names_mun_list:\n if 'MUNICIPIO' in item:\n names_mun_standard.append(clean.standarize_mun(item))\n else:\n names_mun_standard.append(clean.standarize_depto(item))\n\n # Second standardization: accent standardization without accents with official names\n names_mun_standard = clean.standardize_accents_mun(df_names, names_mun_standard)\n\n # Third standardization: format standardization to ensure a right joining\n names_mun_standard = clean.standardize_format_mun(df_names, names_mun_standard)\n\n # Assign new column to entity dataframe\n df_entity = df_entity.assign(nom_raz_soc_stand=names_mun_standard)\n names_mun_standard_list = list(set(names_mun_standard))\n\n return df_entity, names_mun_standard_list", "def transform(self, incoming_df, **transform_params):\n outgoing_df = incoming_df.copy()\n outgoing_df[self.new_feature_name] = outgoing_df[self.feature].apply(self.get_time_of_day)\n return outgoing_df", "def preprocessing(self):\n print(\"This may take a while, please grab a coffee. Average wait time: 2 - 6 mins.\")\n print(\"Loading data... \")\n df = ExternalDataRetrieval().get_data()\n\n print(\"Preprocessing data... \")\n\n amino_df = pd.DataFrame()\n # Set column names for zinc content dataframe\n zcolumns = ['value', 'group']\n # Set column names for food groups dataframe\n fcolumns = ['ID', 'food', 'group', 'manufacturer']\n # Declare zinc content dataframe\n zinc_df = pd.DataFrame(columns=zcolumns)\n # Declare food group dataframe\n food_df = pd.DataFrame(columns=fcolumns)\n # Doing this one amino acids type at a time.\n for n in AMINO_LIST:\n food = []\n # nutrients components of the food type is further nested in 'nutrients', which its components are further\n # nested\n for i, items in enumerate(df['nutrients']):\n # Iterate through the nutrient type to obtain necessary info.\n # For this project, there are many redundant data in there.\n f_flag = False\n # Only need to set the flag to activate the zinc check for one amino acid loop\n if n == AMINO_LIST[0]:\n z_flag = False\n for item in items:\n # Check to see if this nutrient type is one of the amino acids\n if item.get(\"name\") == n and item.get(\"value\") > 0:\n # If so, add the food type to the amino acid type array\n food.append(df['name'][i]['long'])\n f_flag = True\n # Check to see if this nutrient type is Zinc, only need to do this for one amino acid loop.\n if item.get(\"name\") == Z and n == AMINO_LIST[0]:\n # If so, gets its zinc content value and the food group it is in.\n zinc_df.loc[i] = [item.get(\"value\"), df['group'][i]]\n z_flag = True\n if f_flag and z_flag:\n break\n\n # Build the food group data dataframe one food at a time, only need to do this for one amino acid loop.\n if n == AMINO_LIST[0]:\n food_df.loc[i] = [df['meta'][i]['ndb_no'], df['name']\n [i]['long'], df['group'][i], df['manufacturer'][i]]\n\n # Assemble the amino acid type array in to nutrient dataframe\n fd = pd.DataFrame({n: food})\n # Since the length of each columns varies (amino acid food types appearance in food types varies),\n # there are many NaN in the dataframe as a result. We need to drop the NaN\n fd = fd.dropna()\n amino_df = pd.concat([amino_df, fd], axis=1, ignore_index=True)\n # Add column names to the nutrient dataframe\n amino_df.columns = AMINO_LIST\n print(\"Good news, preprocessing completed successfully! \")\n return amino_df, zinc_df, food_df", "def client_addons_df(generate_data, locale_limits):\n snippets = []\n for locale, maxn in locale_limits.items():\n # Copy all the clients for each locale, tagging the client ID with\n # the locale.\n for cid, cdata in SAMPLE_CLIENT_DATA.items():\n tagged_cid = \"{}_{}\".format(locale, cid)\n client_snippets = generate_rows_for_client(tagged_cid, cdata)\n for s in client_snippets:\n s[\"locale\"] = locale\n snippets.extend(client_snippets)\n\n # Add a dummy locale that should get dropped in processing.\n client_snippets = generate_rows_for_client(\n \"client_fr\", SAMPLE_CLIENT_DATA[\"client-4\"]\n )\n for s in client_snippets:\n s[\"locale\"] = \"fr\"\n snippets.extend(client_snippets)\n\n df = generate_data(snippets)\n df.createOrReplaceTempView(\"clients_daily\")\n return df", "def preprocess_data(self):\n # Fault and cavity models use same data and features. Get that now.\n signals = get_signal_names(cavities=['1', '2', '3', '4', '5', '6', '7', '8'],\n waveforms=['GMES', 'GASK', 'CRFP', 'DETA2'])\n\n # We need to crop, downsample, then do z-score. Any constant values are set to 0.001 manually.\n num_resample = 4096\n num_meta_columns = 8\n self.common_features_df = window_extractor(self.example, signals=signals, windows={'pre-fault': -1533.4},\n n_samples=7680, standardize=False, downsample=True,\n ds_kwargs={'num': num_resample})\n\n # The extractor makes a row per requested window plus some metadata. Columns are named\n # Sample_<sample_num>_<cav_num>_<signal>, and go Sample_1_1_GMES, Sample_2_1_GMES, ..., Sample_1_1_GASK, ....\n # We want to change this so that each column is all of the samples for 1_GMES, 1_GASK, ... as in the signal\n # order above.\n self.common_features_df = pd.DataFrame(\n self.common_features_df.iloc[0, num_meta_columns:].values.reshape(len(signals), -1).T, columns=signals)\n\n self.common_features_df = standard_scaling(self.common_features_df, fill=0.001)", "def transform(self, X: pd.DataFrame, y=None) -> pd.DataFrame:\n\n X_encoded = X.copy(deep=True)\n orig_features = X_encoded.columns\n self.tabular_model.model.eval()\n inference_dataloader = self.tabular_model.datamodule.prepare_inference_dataloader(X_encoded)\n logits_predictions = defaultdict(list)\n for batch in track(inference_dataloader, description=\"Generating Features...\"):\n for k, v in batch.items():\n if isinstance(v, list) and (len(v) == 0):\n # Skipping empty list\n continue\n batch[k] = v.to(self.tabular_model.model.device)\n if self.tabular_model.config.task == \"ssl\":\n ret_value = {\"backbone_features\": self.tabular_model.model.predict(batch, ret_model_output=True)}\n else:\n _, ret_value = self.tabular_model.model.predict(batch, ret_model_output=True)\n for k in self.extract_keys:\n if k in ret_value.keys():\n logits_predictions[k].append(ret_value[k].detach().cpu())\n\n for k, v in logits_predictions.items():\n v = torch.cat(v, dim=0).numpy()\n if v.ndim == 1:\n v = v.reshape(-1, 1)\n for i in range(v.shape[-1]):\n if v.shape[-1] > 1:\n X_encoded[f\"{k}_{i}\"] = v[:, i]\n else:\n X_encoded[f\"{k}\"] = v[:, i]\n\n if self.drop_original:\n X_encoded.drop(columns=orig_features, inplace=True)\n return X_encoded", "def generate_data():\n player_df = get_players_df(2018)\n stats_df = construct(2018, player_df[\"PlayerID\"])\n stats_df['NAME'] = player_df['FirstName'] + \" \" + player_df['LastName']\n stats_df[\"MPG\"] = pd.to_numeric(stats_df[\"MPG\"])\n stats_df.drop(stats_df[stats_df[\"MPG\"] < 15].index, inplace=True)\n stats_df.to_csv(\"data.csv\", index=False)", "def reshape_data(self):\n # Initialise empty dataframe\n od_data = pd.DataFrame(columns=['Origin','Destination','Cost','Flow','OriginSupply','DestinationDemand'])\n # Loop over origins and destinations to populate dataframe\n for i,orig in tqdm(enumerate(self.origins),total=len(self.origins)):\n for j,dest in enumerate(self.destinations):\n # Add row properties\n new_row = pd.Series({\"Origin\": orig,\n \"Destination\": dest,\n \"Cost\": self.cost_matrix[i,j],\n \"OriginSupply\": self.origin_supply[i],\n \"DestinationDemand\":self.destination_demand[j]})\n # Append row to dataframe\n od_data = od_data.append(new_row, ignore_index=True)\n\n # Get flatten data and et column types appropriately\n orig_supply_flat = od_data.OriginSupply.values.astype('float64')\n dest_demand_flat = od_data.DestinationDemand.values.astype('float64')\n cost_flat = od_data.Cost.values.astype('float64')\n\n return orig_supply_flat,dest_demand_flat,cost_flat", "def create_dummy_feature(movie_df):\n movie_df.director.value_counts()[:5]\n N = 4\n top_directors = movie_df.director.value_counts().index[:N]\n top_dir_movies = movie_df[movie_df['director'].isin(top_directors)]\n dummies = pd.get_dummies(top_dir_movies['director'])\n movie_df_dir = pd.merge(movie_df, dummies, left_index=True, right_index=True, how='left')\n #fill in missing value for the directors to 0\n movie_df_dir['Joel Schumacher'].fillna(0,inplace=True)\n movie_df_dir['Ridley Scott'].fillna(0,inplace=True)\n movie_df_dir['Steven Spielberg'].fillna(0,inplace=True)\n movie_df_dir['Woody Allen'].fillna(0,inplace=True)\n\n features = ['production_budget','widest_release', 'number_of_theaters_open',\n 'Steven Spielberg',\n 'Woody Allen',\n 'Ridley Scott',\n 'Joel Schumacher']\n related_columns = features + ['domestic_gross']\n print related_columns\n\n\n clean_movie_df_dir = movie_df_dir[related_columns].dropna()\n print '%i movies with all necessary info.' % len(clean_movie_df_dir)\n\n import statsmodels.api as sm\n Y = clean_movie_df_dir['domestic_gross']\n X = sm.add_constant(clean_movie_df_dir[features])\n\n\n #split train and test dataset\n from sklearn.cross_validation import train_test_split\n # splits x -> x_train, x_test\n X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.25)\n\n #fit model on train set\n model = sm.OLS(Y_train, X_train).fit()\n model.summary()\n\n #fit model on test set\n predicted_gross = model.predict(X_test)\n plt.scatter(X_test.production_budget, Y_test, color='gray')\n plt.plot(X_test.production_budget, predicted_gross)\n plt.title(\"multivariate for Domestic Gross\")\n plt.xlabel(\"Budget ($100M)\")\n plt.ylabel(\"Domestic Gross ($100M)\")\n plt.show()", "def pre_process_df(train_data, test_data):\n train_data[\"text\"] = train_data[\"sentence1\"] + \", \" + train_data[\"sentence2\"] # noqa\n test_data[\"text\"] = test_data[\"sentence1\"] + \", \" + test_data[\"sentence2\"]\n train_data.drop([\"sentence1\", \"sentence2\"], axis=1, inplace=True)\n test_data.drop([\"sentence1\", \"sentence2\"], axis=1, inplace=True)\n train_data = train_data[[\"text\", \"label\"]]\n test_data = test_data[[\"text\", \"label\"]]\n simple_pre_process_text_df(train_data)\n simple_pre_process_text_df(test_data)\n return train_data, test_data", "def _dataframe_preprocess(self):\n # 1. add baisc feature like date, time in day, ....\n if self.data_type != 'porto':\n self.df['TIMESTAMP'] = self.df.apply(lambda df: df['TIMESTAMPS'][0], axis=1)\n self.df['TIME'] = pd.to_datetime(self.df['TIMESTAMP'], unit='s', utc=True)\n \n self.df.TIME = self.df.TIME.dt.tz_convert(self.timezone)\n # 2. group df for specific driver analysis\n self.grouped_df = self.df.groupby('LABEL')\n if self.count_od_info:\n if 'SD' not in self.df.columns:\n self._add_OD_info()\n self.grouped_od = self.df.groupby('SD')", "def get_train_and_test_sets(self,\n all_trades_df: pd.DataFrame,\n train_start: pd.Timestamp,\n train_end: pd.Timestamp,\n test_end: pd.Timestamp) -> pd.DataFrame:\n \n # training set\n feature_and_target_cutoff = train_end - pd.Timedelta('120D') # last 4 months of training set\n trades_features = all_trades_df[(all_trades_df['trade_date_dt'] >= train_start) & (all_trades_df['trade_date_dt'] < feature_and_target_cutoff)]\n trades_target = all_trades_df[(all_trades_df['trade_date_dt'] >= feature_and_target_cutoff) & (all_trades_df['trade_date_dt'] < train_end)]\n training_set = self.get_features_and_target(trades_features, trades_target)\n training_set['train_or_test'] = 'train'\n\n # test set\n trades_features = all_trades_df[(all_trades_df['trade_date_dt'] >= train_start) & (all_trades_df['trade_date_dt'] < train_end)]\n trades_target = all_trades_df[(all_trades_df['trade_date_dt'] >= train_end) & (all_trades_df['trade_date_dt'] < test_end)]\n test_set = self.get_features_and_target(trades_features, trades_target)\n test_set['train_or_test'] = 'test'\n \n customer_df = pd.concat([training_set, test_set], sort=False)\n customer_df.fillna(False, inplace=True)\n customer_df.index.name = 'sf_account_id'\n \n return customer_df", "def prepare_dataset_encoder(self):\n calendar, sales_train, prices_df = self.calendar, self.sales_train, self.prices_df\n agg_endog, agg_idx, agg_sales_train = self.agg_endog, self.agg_idx, self.agg_sales_train\n \n #Prepare exog dataset ---------------------------------------------------------------\n #Prepare calendar exog: event_type & wday on a date\n calendar_exog = pd.DataFrame(index=calendar.index)\n for event_type in ['Sporting', 'Cultural', 'National', 'Religious']:\n calendar_exog['is_{}'.format(event_type)] = np.where((calendar.loc[calendar_exog.index, ['event_type_1', 'event_type_2']] == event_type).any(axis=1), 1, 0)\n wday_encoder = OneHotEncoder(drop='first', sparse=False) #drop Sat.\n wday_df = pd.DataFrame(wday_encoder.fit_transform(calendar.loc[calendar_exog.index, ['wday']]), columns=['w7'] + ['w{}'.format(i) for i in range(1,6)])\n calendar_exog = pd.concat([calendar_exog, wday_df], axis=1)\n \n #Prepare snap_exog: if there is snap event on that date & dept_store ts\n snap_exog = pd.DataFrame(0., index=calendar.index, columns=agg_endog.columns)\n for idx in snap_exog.columns:\n state = sales_train[agg_idx == idx].state_id.unique()[0]\n snap_exog[idx] = calendar.loc[snap_exog.index, 'snap_{}'.format(state)]\n \n #Prepare price discount on that date & dept_store ts\n price_exog = pd.DataFrame(index=calendar.index, columns=agg_endog.columns) #mean price across item_store for a dept_store ts\n for idx in price_exog.columns:\n price_exog[idx] = prices_df.T.loc[agg_idx == idx].mean()\n price_discount = price_exog / price_exog.max() #normalized\n \n self.calendar_exog = calendar_exog\n self.snap_exog = snap_exog\n self.price_discount = price_discount\n \n #Prepare encoder ----------------------------------------------------------------------\n #Create encoder for dept_store_id\n dept_store_encoder = OneHotEncoder(drop='first', sparse=False).fit(agg_sales_train[['dept_id', 'store_id']])\n \n #Create encoder for event name\n calendar['event_name_1'].fillna('missing', inplace=True)\n event_encoder = LabelEncoder().fit(calendar['event_name_1'])\n \n self.dept_store_encoder = dept_store_encoder\n self.event_encoder = event_encoder", "def main(args):\r\n print(\"Anonymizing ...\")\r\n print(args)\r\n\r\n file_loc = args.file\r\n origdata = pd.read_csv(file_loc, sep=',', low_memory=False)\r\n\r\n print(origdata)\r\n\r\n print(args.col_customer)\r\n\r\n ID_field = args.col_customer #TBU CustomerID field name\r\n TS_field = args.col_time #TBU TS field name \r\n\r\n sub = origdata.loc[:,:] #Uncomment for applying on complete dataset\r\n\r\n def anonymizeID(data,ID_field):\r\n df=pd.DataFrame(data.loc[:,ID_field])\r\n df2=df.applymap(lambda x: ((HMAC.new(b\"key\", bytes(x), MD5)).hexdigest()))\r\n return df2.loc[:,ID_field]\r\n\r\n def shiftTimeForTrace(data,TS_field):\r\n # print(df.head())\r\n df=pd.DataFrame(data.loc[:,TS_field])\r\n df2 = df.loc[:,TS_field].apply(lambda x: pd.to_datetime(x)) \r\n # print(df2.head())\r\n rand_days = random.randint(-5,5) #range can be updated\r\n df2 = df2 + pd.DateOffset(days=rand_days)\r\n # print(df2.head())\r\n return df2\r\n\r\n #OG subset for reference\r\n print(sub.head())\r\n\r\n # sub1=sub.sort_values(by=ID_field)\r\n sub1 = sub.loc[:,:]\r\n uniqueIDs = list(sub[ID_field].unique())\r\n # sub2 = sub.loc[:,TS_field].apply(lambda x: pd.to_datetime(x))\r\n for ID in uniqueIDs:\r\n sub3=sub1.loc[sub1[ID_field] == ID]\r\n sub3.loc[:,TS_field]=shiftTimeForTrace(sub3,TS_field)\r\n # print(sub1.loc[sub1[ID_field] == ID][TS_field])\r\n # print(pd.DataFrame(sub3[TS_field]))\r\n sub1.loc[sub1[ID_field] == ID,TS_field] = pd.DataFrame(sub3[TS_field])\r\n\r\n # Results post TS shift\r\n print(sub1.head())\r\n\r\n sub4 = sub1.loc[:,:]\r\n sub4.loc[:,ID_field] = anonymizeID(sub4,ID_field)\r\n\r\n #Results post ID anonymization\r\n print(sub4.head())\r\n sub4.to_csv('out.csv',index=False)", "def create_pandas_dataframes():\n train, test = Email.load_emails_from_data()\n\n train_y = [int(t.is_spam) for t in train]\n test_y = [int(t.is_spam) for t in test]\n\n vocab = get_vocabulary_vector(train)\n print(\"[ INF ] Vocab Size:\", len(vocab))\n\n train = [t.vectorize_tokens(vocab) for t in train]\n test = [t.vectorize_tokens(vocab) for t in test]\n\n train = pd.DataFrame.from_records(train, columns=vocab)\n test = pd.DataFrame.from_records(test, columns=vocab)\n\n train['is_spam'] = train_y\n test['is_spam'] = test_y\n\n return train, test", "def _transform(\n df_orders: pd.DataFrame,\n df_barcodes: pd.DataFrame,\n allow_useless_vouchers: bool = True,\n) -> pd.DataFrame:\n df_vouchers: pd.DataFrame = df_orders.merge(\n df_barcodes, on=[\"order_id\"], how=\"left\"\n ).astype({\"barcode\": pd.Int64Dtype()})\n\n if not allow_useless_vouchers:\n df_vouchers = df_vouchers.dropna(subset=[\"barcode\"])\n\n return (\n df_vouchers.sort_values(by=[\"customer_id\", \"order_id\"])\n .groupby([\"customer_id\", \"order_id\"])\n .apply(lambda df_group: df_group[\"barcode\"].dropna().tolist())\n .reset_index()\n .rename(columns={0: \"barcodes\"})\n )", "def preprocess_data(self, data: pd.DataFrame, stage: str = \"inference\") -> Tuple[pd.DataFrame, list]:\n added_features = None\n if self.config.encode_date_columns:\n data, added_features = self._encode_date_columns(data)\n # The only features that are added are the date features extracted\n # from the date which are categorical in nature\n if (added_features is not None) and (stage == \"fit\"):\n logger.debug(f\"Added {added_features} features after encoding the date_columns\")\n self.config.categorical_cols += added_features\n # Update the categorical dimension in config\n self.config.categorical_dim = (\n len(self.config.categorical_cols) if self.config.categorical_cols is not None else 0\n )\n # Encoding Categorical Columns\n if len(self.config.categorical_cols) > 0:\n data = self._encode_categorical_columns(data, stage)\n\n # Transforming Continuous Columns\n if (self.config.continuous_feature_transform is not None) and (len(self.config.continuous_cols) > 0):\n data = self._transform_continuous_columns(data, stage)\n # Normalizing Continuous Columns\n if (self.config.normalize_continuous_features) and (len(self.config.continuous_cols) > 0):\n data = self._normalize_continuous_columns(data, stage)\n # Converting target labels to a 0 indexed label\n data = self._label_encode_target(data, stage)\n # Target Transforms\n data = self._target_transform(data, stage)\n return data, added_features", "def build_feature_transform():\n\n # These features can be parsed as numeric.\n\n numeric = HEADER.as_feature_indices(\n [\n 'Unnamed: 0', 'V1', 'V2', 'V3', 'V4', 'V5', 'V6', 'V7', 'V8', 'V9',\n 'V10', 'V11', 'V12', 'V13', 'V14', 'V15', 'V16', 'V17', 'V18',\n 'V19', 'V20', 'V21', 'V22', 'V23', 'V24', 'V25', 'V26', 'V27',\n 'V28', 'amt'\n ]\n )\n\n # These features contain a relatively small number of unique items.\n\n categorical = HEADER.as_feature_indices(['amt'])\n\n numeric_processors = Pipeline(\n steps=[\n (\n 'robustimputer',\n RobustImputer(strategy='constant', fill_values=nan)\n )\n ]\n )\n\n categorical_processors = Pipeline(\n steps=[\n ('thresholdonehotencoder', ThresholdOneHotEncoder(threshold=635))\n ]\n )\n\n column_transformer = ColumnTransformer(\n transformers=[\n ('numeric_processing', numeric_processors, numeric\n ), ('categorical_processing', categorical_processors, categorical)\n ]\n )\n\n return Pipeline(\n steps=[\n ('column_transformer', column_transformer\n ), ('robuststandardscaler', RobustStandardScaler())\n ]\n )", "def create_features_from_transaction_timestamp(data):\n utils.save_log('{0} :: {1}'.format(\n create_features_from_transaction_timestamp.__module__,\n create_features_from_transaction_timestamp.__name__))\n\n data = data.withColumn('TransactionHour',\n hour(data[config.feature_column_timestamp]))\n data = data.withColumn('TransactionDayOfWeek',\n dayofweek(data[config.feature_column_timestamp]))\n data = data.withColumn('TransactionDayOfYear',\n dayofyear(data[config.feature_column_timestamp]))\n data = data.withColumn('TransactionWeekOfYear',\n weekofyear(data[config.feature_column_timestamp]))\n\n data = data.withColumn('WeekAction',\n when(col('TransactionWeekOfYear').\n between(50, 52), 1).\n otherwise(0))\n\n update_list_features(\"numerical\", ['TransactionHour',\n 'TransactionDayOfWeek',\n 'TransactionDayOfYear',\n 'TransactionWeekOfYear',\n 'WeekAction'])\n\n return data", "def transform(self, requests):\n self.logger.debug(\"starting predict\")\n self.logger.debug(\"request count: {}\".format(requests.shape[0]))\n\n preds = [self.predict_one(row['user'], row['movie']) for i, row in requests.iterrows()]\n\n requests['rating'] = preds\n return requests", "def reframe_df(previous_df, processed_data):\n idx = previous_df.index\n col = previous_df.columns\n df = pd.DataFrame(data=processed_data, index=idx, columns=col)\n return df", "def make_features(self, x_hits, y_hits, dow, lagged_hits, pf_age, pf_si, pf_network, pf_gender, page_ix, pf_price_cat,\n page_popularity, quarter_autocorr):\n # Split day of week to train and test\n x_dow, y_dow = tf.split(dow, [self.train_window, self.predict_window], axis=0)\n\n # Normalize hits\n mean = tf.reduce_mean(x_hits)\n std = tf.sqrt(tf.reduce_mean(tf.squared_difference(x_hits, mean)))\n norm_x_hits = (x_hits - mean) / std\n norm_y_hits = (y_hits - mean) / std\n norm_lagged_hits = (lagged_hits - mean) / std\n\n # Split lagged hits to train and test\n x_lagged, y_lagged = tf.split(norm_lagged_hits, [self.train_window, self.predict_window], axis=0)\n\n # Combine all page features into single tensor\n stacked_features = tf.stack([page_popularity, quarter_autocorr])\n flat_ucdoc_features = tf.concat([pf_age, pf_si, pf_network, pf_gender, pf_price_cat, stacked_features], axis=0) #pf_region\n ucdoc_features = tf.expand_dims(flat_ucdoc_features, 0)\n\n # Train features\n x_features = tf.concat([\n # [n_days] -> [n_days, 1]\n tf.expand_dims(norm_x_hits, -1),\n x_dow,\n x_lagged,\n # Stretch ucdoc_features to all training days\n # [1, features] -> [n_days, features]\n tf.tile(ucdoc_features, [self.train_window, 1])\n ], axis=1)\n\n # Test features\n y_features = tf.concat([\n # [n_days] -> [n_days, 1]\n y_dow,\n y_lagged,\n # Stretch ucdoc_features to all testing days\n # [1, features] -> [n_days, features]\n tf.tile(ucdoc_features, [self.predict_window, 1])\n ], axis=1)\n\n return x_hits, x_features, norm_x_hits, x_lagged, y_hits, y_features, norm_y_hits, mean, std, flat_ucdoc_features, page_ix" ]
[ "0.70902693", "0.6727287", "0.6064118", "0.60265195", "0.6024846", "0.5994544", "0.596741", "0.58626324", "0.584423", "0.5838772", "0.5787832", "0.5785175", "0.5773647", "0.5766124", "0.5743439", "0.57312465", "0.57225657", "0.5721255", "0.5717279", "0.5693215", "0.5649832", "0.56496465", "0.55862856", "0.5574678", "0.5563245", "0.54700917", "0.54481316", "0.5442918", "0.5426506", "0.5424589", "0.541944", "0.5346637", "0.5334154", "0.5301547", "0.5285439", "0.52787125", "0.5278005", "0.52740824", "0.5273717", "0.52553666", "0.52334124", "0.52294046", "0.52286303", "0.5223994", "0.521872", "0.52185464", "0.5207184", "0.5205348", "0.52041525", "0.5200075", "0.51853555", "0.5182008", "0.5171737", "0.51654357", "0.51578355", "0.51453114", "0.5140167", "0.5138616", "0.51335937", "0.5128898", "0.51261836", "0.51165336", "0.51106006", "0.5103016", "0.5102217", "0.50977945", "0.50922567", "0.5086296", "0.5086292", "0.50805604", "0.5071959", "0.5068456", "0.50624746", "0.5061499", "0.5060318", "0.5054235", "0.5050062", "0.50499403", "0.50494397", "0.50368375", "0.50351995", "0.50291324", "0.50233334", "0.50232726", "0.50202435", "0.5013346", "0.5012598", "0.5003714", "0.50030667", "0.5000891", "0.49990094", "0.49961096", "0.49901384", "0.49891075", "0.49874768", "0.49865815", "0.498588", "0.49855185", "0.49731523", "0.49728274" ]
0.80397373
0
Build dataframe df_customers from transformed data. Transformed data are loaded from dumped files issued from NLP, Time and RFM features. See data_transform()
def df_customers_fileRead(self): #------------------------------------------------------------------------- # RFM features are restored #------------------------------------------------------------------------- df_customers_rfm \ = p5_util.object_load(self.df_customers_rfm_fileName) self.strprint("RFM features : "+str(df_customers_rfm.shape)) #------------------------------------------------------------------------- # Time features are restored #------------------------------------------------------------------------- df_customers_timeFeature \ = p5_util.object_load(self._df_customers_timeFeature_fileName) self.strprint("Time features : "+str(df_customers_timeFeature.shape)) #------------------------------------------------------------------------- # NLP features are restored #------------------------------------------------------------------------- df_customers_nlp = p5_util.object_load(self._df_customers_nlp_fileName) self.strprint("NLP features : "+str(df_customers_nlp.shape)) if False: df_customers_rfm = self._df_customers_rfm.copy() df_customers_timeFeature = self._df_customers_timeFeature.copy() df_customers_nlp = self._df_customers_pca_nlp.copy() #------------------------------------------------------------------------- # Dataframe are aggregated; note that indexes are customerID. #------------------------------------------------------------------------- df_customers = pd.DataFrame() df_customers = pd.concat([df_customers,df_customers_rfm], axis=1) df_customers = pd.concat([df_customers,df_customers_timeFeature]\ , join='inner', axis=1) df_customers = pd.concat([df_customers,df_customers_nlp]\ , join='inner', axis=1) self.strprint("All features : "+str(df_customers.shape)) #---------------------------------------------------------------------- # Dataframe is dumped into a file #---------------------------------------------------------------------- p5_util.object_dump(df_customers, self._df_customers_fileName) if False: #---------------------------------------------------------------------- # Dataframe is copied as an attribute #---------------------------------------------------------------------- self._df_customers = df_customers.copy() return
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def df_customers_features_build(self):\n\n df_customers_rfm = self._df_customers_rfm.copy()\n df_customers_timeFeature = self._df_customers_timeFeature.copy()\n df_customers_nlp = self._df_customers_pca_nlp.copy()\n\n #-------------------------------------------------------------------------\n # Dataframe are aggregated; note that indexes are customerID.\n #-------------------------------------------------------------------------\n df_customers = pd.DataFrame()\n\n df_customers = pd.concat([df_customers,df_customers_rfm], axis=1)\n\n df_customers = pd.concat([df_customers,df_customers_timeFeature]\\\n , join='inner', axis=1)\n\n df_customers = pd.concat([df_customers,df_customers_nlp]\\\n , join='inner', axis=1)\n \n self.strprint(\"All features : \"+str(df_customers.shape))\n self._df_customers = df_customers.copy()\n return", "def features_customers(df_customers):\n for i in PREMIER_VALS:\n k = 'premier_' + str(i)\n df_customers[k] = np.where(df_customers['premier'] == i, 1, 0)\n\n df_customers['age'] = datetime.now().date().year - df_customers['yearOfBirth']\n df_customers['male'] = np.where(df_customers['gender'] == 'M', 1, 0)\n df_customers['female'] = np.where(df_customers['gender'] == 'F', 1, 0)\n df_customers['days_in_asos'] = (datetime.now().date() - df_customers['dateCreated']).dt.days\n\n logger.info(\"Features from the customers table: {shape} {dtypes}\"\n .format(shape=df_customers.shape, dtypes=df_customers.dtypes))\n return df_customers", "def load_customers(dir):\n customSchema = StructType([ \\\n StructField(\"customerId2\", IntegerType(), True), \\\n StructField(\"churnlabel\", IntegerType(), True), \\\n StructField(\"gender\", StringType(), True), \\\n StructField(\"shippingCountry\", StringType(), True), \\\n StructField(\"dateCreated\", StringType(), True), \\\n StructField(\"yearOfBirth\", IntegerType(), True), \\\n StructField(\"premier\", IntegerType(), True)])\n\n df = sqlContext.read.format('com.databricks.spark.csv') \\\n .options(header='false', delimiter='\\t', nullValue='\\\\N') \\\n .load(get_dir_customers(dir) + '/*', schema=customSchema)\n\n return df", "def data_transform_timeFeature(self):\n #-------------------------------------------------------------------------\n # All new features are built into separate dataframes \n # and each of them are dumped into a separate file.\n #-------------------------------------------------------------------------\n self.strprint(\"self.df_invoice_line : \"+str(self.df_invoice_line.shape))\n \n self._dict_timeFeature_encoder, df_customers_timeFeature \\\n = p5_util.time_list_feature_build(self.df_invoice_line\\\n , self._list_new_feature, dict_encoder = self._dict_timeFeature_encoder\\\n ,is_verbose=self.is_verbose)\n \n #-------------------------------------------------------------------------\n # New time features are aggregated into a single dataframe.\n # Values are scaled.\n #-------------------------------------------------------------------------\n df_customers_timeFeature, self._std_scaler_timeFeature \\\n = p5_util.time_list_feature_restore(self._list_new_feature \\\n , std_scale = self._std_scaler_timeFeature\\\n , df_timeFeature = df_customers_timeFeature, is_verbose = self.is_verbose)\n\n self.strprint(\"df_customers_timeFeature : \"+str(df_customers_timeFeature.shape))\n \n #-------------------------------------------------------------------------\n # Dimension reduction thanks to PCA\n #-------------------------------------------------------------------------\n n_dim=30\n root_name = 'time_pca_'\n # Column CustomerID is used into df_pca_reduce\n df_customers_timeFeature['CustomerID'] = df_customers_timeFeature.index\n \n df_customers_timeFeature, pca_timeFeature \\\n = p5_util.df_pca_reduce(df_customers_timeFeature, n_dim, root_name\\\n , p_is_scale=False, pca = self._pca_timeFeature)\n\n self.strprint(df_customers_timeFeature.shape)\n \n if self._pca_timeFeature is None:\n #----------------------------------------------------------------------\n # Data-model is in built process with part of data-set.\n #----------------------------------------------------------------------\n self._pca_timeFeature = pca_timeFeature\n p5_util.object_dump(df_customers_timeFeature\\\n , self._df_customers_timeFeature_fileName)\n else:\n #----------------------------------------------------------------------\n # Data-model is already built and this method is called \n # for a customer classification.\n #----------------------------------------------------------------------\n self._df_customers_timeFeature = df_customers_timeFeature.copy()\n return", "def create_features_customer(profile, transcript_training):\n\n # create avg/min/max amount features. Need to calculate amount features from transcript\n # because transcript_training only contains transactions for offer received and viewed.\n # such transactions do not have amount associated\n\n query = \"\"\"\n SELECT a.person, min(amount) as min_amount, max(amount) as max_amount, avg(amount) as avg_amount\n FROM transcript a\n JOIN transcript_quantile b\n ON a.person = b.person \n WHERE a.time <= b.received_time\n GROUP BY a.person\n \"\"\"\n\n profile_amount = u.read_dataframe_from_sql(query).set_index('person')\n\n # create avg/min/max amount duration_view\n profile_duration = create_features_using_groupby(transcript_training\\\n \t, 'profile', 'duration_view')\n\n # create view rate (average of label)\n profile_view_rate = create_features_using_groupby(transcript_training, 'profile', 'label'\\\n \t, minimum=False, maximum=False)\n profile_view_rate.columns=['view_rate_profile']\n\n # create trx rate (count of transactions per person/(max received time - min received time))\n profile_trx_rate = (transcript_training.groupby('person').size()*100\\\n /(transcript_training.groupby('person')['received_time'].max() \\\n - transcript_training.groupby('person')['received_time'].min())).reset_index()\n profile_trx_rate.columns = ['person', 'avg_trx_cnt']\n # set trx rate = 1 if max received time == min received time\n profile_trx_rate.loc[profile_trx_rate['avg_trx_cnt']==np.inf, 'avg_trx_cnt'] = 1\n profile_trx_rate = profile_trx_rate.set_index('person')\n\n profile_feat = profile_amount.join(profile_duration)\\\n .join(profile_view_rate).join(profile_trx_rate)\n\n assert pd.merge(profile, profile_feat, how='left', left_index=True, right_index=True).shape[0] == profile.shape[0]\\\n , \"rows do not match with original data (profile)\"\n\n profile = pd.merge(profile, profile_feat, how='left', left_index=True, right_index=True)\n\n return profile", "def data_transform(self, df) :\n\n #-------------------------------------------------------------------------\n # Copy of given dataframe to be transformed\n #-------------------------------------------------------------------------\n self.df_invoice_line = df\n \n #-------------------------------------------------------------------------\n # Features issued from InvoiceDate are created\n #-------------------------------------------------------------------------\n if self.is_transform_timeFeature is True:\n self.strprint(\"\\n*** Time features transformation ***\")\n self.data_transform_timeFeature()\n\n #-------------------------------------------------------------------------\n # RFM is computed and encoded\n #-------------------------------------------------------------------------\n if self.is_transform_rfm is True:\n self.strprint(\"\\n*** RFM transformation ***\")\n self.data_transform_rfm()\n\n #-------------------------------------------------------------------------\n # NLP features issued from Description are created\n #-------------------------------------------------------------------------\n if self.is_transform_nlp is True:\n self.strprint(\"\\n*** NLP transformation ***\")\n self.data_transform_nlp()\n \n return self.df_invoice_line", "def data_transform_rfm(self) :\n \n is_built_step = False\n if self._encoder_rfm is None:\n is_built_step = True \n \n #-------------------------------------------------------------------------\n # RFM feature is built\n #-------------------------------------------------------------------------\n ser_invoice_date = self._df_invoice_line.InvoiceDate\n \n self.df_invoice_line, df_RFM, self.df_RFM_quantiles, self._day_now \\\n = p5_util.p5_df_rfm_build(self.df_invoice_line, day_now = self._day_now\\\n , df_RFM_threshold=self.df_RFM_quantiles)\n \n self._df_invoice_line.InvoiceDate = ser_invoice_date\n \n #-------------------------------------------------------------------------\n # RFM score is added to dataframe\n #-------------------------------------------------------------------------\n df_merged = pd.merge(self.df_invoice_line\\\n , df_RFM[['CustomerID','RFM']], how='left', on=['CustomerID'])\n\n self._df_invoice_line \\\n = pd.DataFrame(df_merged.values, index = self._df_invoice_line.index\\\n , columns=df_merged.columns)\n \n\n #self._df_invoice_line \\\n #= pd.concat([ self.df_invoice_line,df_RFM[['CustomerID','RFM']] ], axis=1\\\n #,join='inner')\n \n \n #-------------------------------------------------------------------------\n # RFM encoding\n #-------------------------------------------------------------------------\n self._encoder_rfm, df_RFM_encoded \\\n = p5_util.df_rfm_one_hot_encode(df_RFM,'RFM', encoder=self._encoder_rfm)\n\n #-------------------------------------------------------------------------\n # Encoded RFM features are renamed\n #-------------------------------------------------------------------------\n df_customers_rfm, list_col_unchanged \\\n = p5_util.df_rename_columns(df_RFM_encoded, df_RFM_encoded.columns\\\n , 'w_rfm_')\n \n self.strprint(\"df_customers_rfm =\" +str(df_customers_rfm.shape))\n\n #-------------------------------------------------------------------------\n # dataframe with RFM encoded values per customer is dumped\n #-------------------------------------------------------------------------\n if is_built_step is True:\n p5_util.object_dump(df_customers_rfm, self.df_customers_rfm_fileName)\n else :\n self._df_customers_rfm = df_customers_rfm.copy()\n return", "def prepare_data():\n df = pd.read_csv('Wholesale customers data.csv')\n df_numeric = df[['Fresh', 'Milk', 'Grocery', 'Frozen', 'Detergents_Paper', 'Delicassen']]\n return df, df_numeric", "def data_process_deprecated(self, CustomerID, InvoiceDate, InvoiceNo, Description, Quantity\\\n , UnitPrice ):\n dict_invoice = {'InvoiceDate':InvoiceDate, 'Description':Description\\\n , 'Quantity':Quantity, 'UnitPrice':UnitPrice}\n dict_invoice['CustomerID'] = CustomerID\n dict_invoice['InvoiceNo'] = InvoiceNo\n df_invoice_line \\\n = pd.DataFrame(dict_invoice, columns=dict_invoice.keys(), index=[0])\n \n self.data_transform(df_invoice_line)\n\n #self.feature_rfm_encode()\n\n self.feature_scale()\n\n self.list_feature_drop()\n\n self.feature_description_nlp()\n return", "def make_data(dataFname, enc, features=None):\n\n origData = pandas.read_csv(dataFname)\n ids = origData['id']\n\n # remove unused columns\n if 'Unnamed: 0' in origData.columns: del origData['Unnamed: 0']\n del origData['id']\n\n # remove \"data leakage\" columns\n for f in prohobitedFeatures:\n del origData[f]\n\n # separate into X & y values\n xData = origData[[col for col in origData.columns if not col=='loss']]\n set_vars_as_type(xData, discreteVars, object)\n yVec = origData.loss if 'loss' in origData.columns else None\n\n # try f528 - f274\n xData['f528f274'] = xData['f528'] - xData['f274']\n\n # encode the categorical features f776 and f777\n if enc is None:\n enc = OneHotEncoder(n_values=[2, 2])\n enc.fit(xData[['f776', 'f777']])\n\n xData[['f776_isZero', 'f776_isOne', 'f777_isZero', 'f777_isOne']] = pandas.DataFrame(enc.transform(xData[['f776', 'f777']]).toarray())\n del xData['f776']\n del xData['f777']\n\n print_missing_values_info(origData)\n\n # feature selection\n if features:\n filteredXData = xData[features]\n else: # use ALL features\n filteredXData = xData\n\n return filteredXData, yVec, ids, enc", "def process_customers(self, customers_file):\n\t\tmin = max = None\n\t\tcustomers = {}\n\t\ttry:\n\t\t\tfor user_id, date_str in self.read_csv_file(customers_file):\n\t\t\t\tdate = self.convert_date(date_str)\n\t\t\t\tmin, max = self.min_max_date(min, max, date)\n\t\t\t\tcustomers[user_id] = date\n\t\texcept ValueError:\n\t\t\traise Exception('Customers file has unexpected format.')\n\n\t\tself.customers = customers\n\t\tself.min = min\n\t\tself.max = max", "def create_dataframe_for_training(data):\n feature_column_name = 'X'\n #data_cp = data[['label']].copy()\n for i, row in tqdm(data.iterrows(), total=len(data)):\n all_features = f'{row.claimant} {row.claim} {row.article_content}'\n data.loc[i, feature_column_name] = all_features\n\n return data[feature_column_name]", "def _transform_df(self, data):\n # specify if has FIPS or not\n if self.has_location:\n loc_col_type = \"location\"\n elif not self.has_location:\n loc_col_type = \"location_name\"\n\n out = data.melt(\n id_vars=[\"dt\", loc_col_type], value_vars=self.crename.keys()\n ).dropna()\n out.loc[:, \"value\"] = pd.to_numeric(out[\"value\"])\n out = self.extract_CMU(out, self.crename)\n out[\"vintage\"] = self._retrieve_vintage()\n\n cols_to_keep = [\n \"vintage\",\n \"dt\",\n loc_col_type,\n \"category\",\n \"measurement\",\n \"unit\",\n \"age\",\n \"race\",\n \"ethnicity\",\n \"sex\",\n \"value\",\n ]\n return out.loc[:, cols_to_keep]", "def get_features_and_target(self, trades_features: pd.DataFrame, trades_target: pd.DataFrame) -> pd.DataFrame:\n \n sf_groups = trades_features.drop_duplicates(subset=['sf_account_id', 'trade_date', 'sku']).groupby('sf_account_id')\n\n # calculate features\n feature_dfs = []\n if 'product_name' in self.feature_categories:\n feature_dfs += [sf_groups.product_name.value_counts().unstack().notnull()]\n if 'product_category' in self.feature_categories:\n feature_dfs += [sf_groups.product_category.value_counts().unstack().notnull()]\n if 'reporting_channel' in self.feature_categories:\n feature_dfs += [sf_groups.sub_reporting_channel.value_counts().unstack().notnull()]\n if 'recency' in self.feature_categories:\n feature_dfs += [(trades_features.trade_date_dt.max()-sf_groups.trade_date_dt.max()).dt.days.to_frame().rename(columns={'trade_date_dt':'recency'})]\n if 'frequency' in self.feature_categories:\n feature_dfs += [sf_groups.product_name.count().to_frame().rename(columns={'product_name':'frequency'})]\n if 'total_spend' in self.feature_categories:\n feature_dfs += [sf_groups.cost_float.sum().to_frame().rename(columns={'cost_float':'total_spend'})]\n\n # concat features\n customer_df = pd.concat(feature_dfs, axis=1, sort=False) # outer join on index\n\n # add target variable\n for target_variable in self.target_variables:\n if (trades_target.product_name == target_variable).any():\n customer_df['target_'+target_variable] = trades_target.groupby(['sf_account_id', 'product_name']).trade_date.any().unstack()[target_variable]\n else:\n customer_df['target_'+target_variable] = False\n\n # remove customers with no purchases before cut off\n customer_df = customer_df[customer_df[customer_df.columns[customer_df.columns != 'target']].any(axis=1)]\n\n # replace nans with False\n customer_df.fillna(False, inplace=True)\n\n return customer_df", "def transform(self, data: pd.DataFrame, columns: list, verbose: int=1) -> pd.DataFrame:", "def create_data():\n data_set = pd.DataFrame()\n customer_id = list()\n for i in range(1, 10001):\n customer_id.append(i)\n data_set = pd.DataFrame()\n data_set.loc[:, 'customer_id'] = np.array(customer_id)\n product_name = ('dining chair', 'dining table', 'bed', 'dining set',\n 'stool', 'couch', 'occasional table',\n 'recliner')\n product_name_random = random.choices(product_name, k=10000)\n data_set.loc[:, 'product_name'] = np.array(product_name_random)\n quantity_rented = (1, 2, 3, 4)\n quantity_rented_random = random.choices(quantity_rented, k=10000)\n data_set.loc[:, 'quantity_rented'] = np.array(quantity_rented_random)\n unit_rental_price_monthly = list()\n for i in range(0, 10000):\n unit_rental_price_monthly.append(random.uniform(1.5, 25))\n data_set.loc[:, 'unit_rental_price'] = np.array(unit_rental_price_monthly)\n rental_period_months = list()\n for i in range(0, 10000):\n rental_period_months.append(randint(6, 60))\n data_set.loc[:, 'rental_period_months'] = np.array(rental_period_months)\n return data_set", "def get_mall_data(): \n filename = 'mall_customers.csv'\n \n if os.path.isfile(filename):\n return pd.read_csv(filename, index_col=0)\n else: \n df = pd.read_sql(\"\"\"select * from customers\"\"\", get_connection('mall_customers'))\n df.to_csv(filename)\n return df", "def clean_customer_df(customer_df: pd.DataFrame) -> pd.DataFrame:\n \n # remove customers with more than 20 purchases\n if 'frequency' in customer_df.columns:\n customer_df = customer_df[customer_df.frequency < 20]\n \n return customer_df", "def transform_train_data(df):\n return df.rdd.map(\n lambda x: (\n Vectors.dense([x.amount, x.split, x.maintain4, x.maintain12]),\n x.intime\n )\n ).toDF([\"features\", \"label\"])", "def to_learn(trxfile, cardfile, custfile, trainfile, testfile):\n feature_df = to_feature(trxfile, cardfile, custfile)\n feature_df.loc[:] = preprocessing.scale(feature_df)\n #feature_df.loc[:] = preprocessing.normalize(feature_df, norm='l2')\n \n # card_no, label\n train_df = pandas.read_csv(trainfile, header=None)\n # card_no\n test_df = pandas.read_csv(testfile, header=None)\n\n train_data = feature_df.loc[train_df.loc[:, 0]]\n train_label = train_df.loc[:, 1]\n test_data = feature_df.loc[test_df.loc[:, 0]]\n\n return (train_data.values, train_label.values, test_data.values)", "def _get_cus_info(self):\n label_enc = LabelEncoder()\n customer_info = self._inv.drop_duplicates(['customer_code'], keep='last')\n customer_info = customer_info[['customer_code', 'customer_name', 'sales_cen_code',\n 'sales_cen_name', 'sales_region_name', 'province',\n 'city', 'district', 'customer_type', 'is_usable', 'channel_level']]\n customer_info['customer_id'] = label_enc.fit_transform(customer_info['customer_code'])\n customer_info['sales_cen_id'] = label_enc.fit_transform(customer_info['sales_cen_code'])\n customer_info['sales_region_id'] = label_enc.fit_transform(customer_info['sales_region_name'])\n customer_info['province_id'] = label_enc.fit_transform(customer_info['province'])\n customer_info['city_id'] = label_enc.fit_transform(customer_info['city'])\n customer_info['district_id'] = label_enc.fit_transform(customer_info['district'])\n customer_info['customer_type'] = label_enc.fit_transform(customer_info['customer_type'])\n customer_info['is_usable'] = label_enc.fit_transform(customer_info['is_usable'])\n customer_info['channel_level'] = label_enc.fit_transform(customer_info['channel_level'])\n customer_info_encoded = customer_info.drop(\n columns=['customer_name', 'sales_cen_code', 'sales_cen_name',\n 'sales_region_name', 'province', 'city', 'district']\n ).set_index('customer_code')\n customer_info.set_index('customer_code', inplace=True)\n customer_info_encoded = customer_info_encoded.reindex(self._index.get_level_values(0))\n return customer_info, customer_info_encoded", "def _prep_data(self, data: bytes) -> pd.DataFrame:\n # Convert the bytes into a file-like object\n buffer = io.BytesIO(data)\n\n # Unzip the file and pull out the csv file\n with zipfile.ZipFile(buffer, \"r\") as zip_file:\n csv = zip_file.read(\"QSAR_BCF_Kow.csv\")\n\n # Convert the string into a file-like object\n csv_file = io.BytesIO(csv)\n\n # Read the file-like object into a dataframe\n cols = [\"cas\", \"name\", \"smiles\", \"logkow\", \"kow_exp\", \"logbcf\"]\n df = pd.read_csv(\n csv_file,\n names=cols,\n header=0,\n usecols=[col for col in cols if col not in [\"cas\", \"name\"]],\n )\n\n # Drop NaNs\n df = df.dropna().reset_index(drop=True)\n\n # Encode KOW types\n kow_types = [\"pred\", \"exp\"]\n df[\"kow_exp\"] = df.kow_exp.map(lambda txt: kow_types.index(txt))\n\n # Get maximum SMILE string length\n max_smile = max(len(smile_string) for smile_string in df.smiles)\n\n # Pad SMILE strings\n df[\"smiles\"] = [\n smile_string + \"x\" * (max_smile - len(smile_string))\n for smile_string in df.smiles\n ]\n\n # Split up the SMILE strings into a matrix\n smile_df = pd.DataFrame(df.smiles.map(list).values.tolist())\n\n # Set the column values of the SMILE dataframe\n smile_df.columns = pd.Index(\n [f\"smiles_{idx}\" for idx in range(smile_df.shape[1])]\n )\n\n # Add the smile dataframe to the original dataframe\n df = pd.concat([df, smile_df], axis=1)\n\n # Drop original SMILE feature\n df = df.drop(columns=\"smiles\")\n\n # Put the target variable at the end\n cols = [\"logkow\", \"kow_exp\"]\n cols += [f\"smiles_{idx}\" for idx in range(max_smile)]\n cols += [\"logbcf\"]\n df = df[cols]\n\n # Ensure that the `logkow` column is numeric\n df[\"logkow\"] = pd.to_numeric(df.logkow)\n\n return df", "def make_df(self):\n # read in file\n df = pd.read_csv(self.data_file)\n cols_to_drop = [f'view{x}' for x in range(1,4)]+['response']\n # subtract loc3 viewing from location of interest\n df[self.label_key] = df[self.predictor] - df['view3']\n df.drop(cols_to_drop, axis=1, inplace=True)\n df.reset_index(drop=True, inplace=True)\n\n return df", "def get_transformed_data(self, df):\n temp_df = pd.DataFrame(self.fa.transform(df))\n return temp_df", "def create_master_table(df_cust: pd.DataFrame,\n df_trans: pd.DataFrame,\n parameters: Dict) -> pd.DataFrame:\n\n df_cust = _process_customers(df_cust, parameters)\n df_trans = _process_transactions(df_trans, parameters)\n\n # join data\n master_table = df_cust.merge(df_trans, on=['customerID'],\n how='left')\n\n # create geo risk ranking\n # temporary solution, if used in final solution, need to prepare in fit/transform maner\n bins = [-np.inf, 0.049, 0.071, 0.088, 0.107, 0.137, np.inf]\n geo_risk_rank = master_table.groupby('residentialAddress_clean')[['hist_default_sum', 'hist_trans_count']]. \\\n sum().reset_index(). \\\n assign(geo_risk_rank=lambda x: pd.cut(x['hist_default_sum']/x['hist_trans_count'], bins).cat.codes)\n\n master_table = master_table.merge(geo_risk_rank[['residentialAddress_clean', 'geo_risk_rank']], on='residentialAddress_clean', how='left')\n\n # drop clients without transactions\n master_table = master_table.dropna(subset=['default'])\n\n return master_table", "def dataset(self, file, latent_dim = 4, pivot = 0.2):\n data_df = pd.read_csv(file, sep=\"::\", engine='python',\n names=['UserId', 'MovieId', 'Rating', 'Timestamp'])\n print(len(data_df))\n data_df['avg_score'] = data_df.groupby(by='UserId')['Rating'].transform('mean')\n # feature columns\n user_num, item_num = data_df['UserId'].max() + 1, data_df['MovieId'].max() + 1\n feature_columns = [[self.denseFeature('avg_score')],\n [self.sparseFeature('user_id', user_num, latent_dim),\n self.sparseFeature('item_id', item_num, latent_dim)]]\n # split train dataset and test dataset\n watch_count = data_df.groupby(by='UserId')['MovieId'].agg('count')\n print(\"分割后\"+str(pivot*100)+\"%作为数据集\\n\")\n test_df = pd.concat([data_df[data_df.UserId == i].iloc[int((1 - pivot) * watch_count[i]):] for i in (watch_count.index)], axis=0)\n print(test_df.head())\n test_df = test_df.reset_index()\n train_df = data_df.drop(labels=test_df['index'])\n # 删除非需求列\n train_df = train_df.drop(['Timestamp'], axis=1).sample(frac=1.).reset_index(drop=True)\n test_df = test_df.drop(['index', 'Timestamp'], axis=1).sample(frac=1.).reset_index(drop=True)\n train_X = [train_df['avg_score'].values, train_df[['UserId', 'MovieId']].values]\n train_y = train_df['Rating'].values.astype('int32')\n test_X = [test_df['avg_score'].values, test_df[['UserId', 'MovieId']].values]\n test_y = test_df['Rating'].values.astype('int32')\n return feature_columns, (train_X, train_y), (test_X, test_y)", "def get_dataframe() -> pandas.DataFrame:\n database_connection = processing.establish_connection(database_path)\n dataframe = database_connection.to_dataframe(['CustomerId', 'InvoiceDate', 'Total'], table_name)\n database_connection.close()\n dataframe = processing.get_invoice_date_fixed(dataframe)\n analyze_dataframe = dataframe.copy()\n total_sum_dataframe = processing.get_column_sum(analyze_dataframe)\n\n customer_count_dataframe = processing.drop_duplicates(analyze_dataframe)\n customer_count_dataframe = processing.get_column_count(customer_count_dataframe)\n return customer_count_dataframe.merge(total_sum_dataframe, how='inner', on='InvoiceDate')", "def log_transform_features_customer(profile):\n\n view_amount_features = ['max_duration_view_profile', 'view_rate_profile', 'max_amount', \\\n 'min_duration_view_profile', 'min_amount',\\\n 'avg_amount', 'avg_trx_cnt', 'avg_duration_view_profile']\n\n profile_transformed = np.log(profile[view_amount_features]+1)\n\n profile = pd.concat([profile[['gender', 'age', 'became_member_on', 'income']]\\\n \t,profile_transformed], axis=1)\n\n profile.drop(columns=['income', 'min_amount', 'avg_amount', 'avg_duration_view_profile']\\\n \t, inplace=True)\n\n u.save_dataframe_to_sql(profile, 'profile')\n\n return profile", "def pre_process_data():\n data_list, header_list = Parser.__parse_csv_data(Parser.training_data_file)\n table = pandas.DataFrame(data_list, columns=header_list)\n table.drop(['date', 'employee id'], axis=1, inplace=True)\n unique_categories = table['category'].unique()\n unique_expense_desc = table['expense description'].unique()\n unique_tax_name = table['tax name'].unique()\n\n column_index = {\n 'input': {},\n 'output': {}\n }\n\n column_index['input']['pre-tax amount'] = {\n 'column_index': 0,\n 'type': 'int'\n }\n\n column_index['input']['tax amount'] = {\n 'column_index': 1,\n 'type': 'int'\n }\n\n index = 2\n\n for i in range(len(unique_expense_desc)):\n column_index['input'][unique_expense_desc[i]] = {\n 'column_index': i + index,\n 'type': 'str'\n }\n\n index += len(unique_expense_desc)\n\n for i in range(len(unique_tax_name)):\n column_index['input'][unique_tax_name[i]] = {\n 'column_index': i + index,\n 'type': 'str'\n }\n\n for i in range(len(unique_categories)):\n column_index['output'][unique_categories[i]] = {'value': i}\n\n Parser.__save_column_index(column_index)", "def get_customers(filters):\n\treturn frappe.db.sql(\"\"\"\n\t\tSELECT\n\n\t\t\tpar.debtor_creditor_number as 'Konto',\n\t\t\tCASE cus.customer_type\n\t\t\t\tWHEN 'Company' THEN cus.customer_name\n\t\t\t\tELSE null\n\t\t\t\tEND as 'Name (Adressatentyp Unternehmen)',\n\t\t\tCASE cus.customer_type\n\t\t\t\tWHEN 'Individual' THEN TRIM(SUBSTR(cus.customer_name, LOCATE(' ', cus.customer_name)))\n\t\t\t\tELSE null\n\t\t\t\tEND as 'Name (Adressatentyp natürl. Person)',\n\t\t\tCASE cus.customer_type\n\t\t\t\tWHEN 'Individual' THEN SUBSTRING_INDEX(SUBSTRING_INDEX(cus.customer_name, ' ', 1), ' ', -1)\n\t\t\t\tELSE null\n\t\t\t\tEND as 'Vorname (Adressatentyp natürl. Person)',\n\t\t\tCASE cus.customer_type\n\t\t\t\tWHEN 'Individual' THEN '1'\n\t\t\t\tWHEN 'Company' THEN '2'\n\t\t\t\tELSE '0'\n\t\t\t\tEND as 'Adressatentyp',\n\t\t\tadr.address_line1 as 'Straße',\n\t\t\tadr.pincode as 'Postleitzahl',\n\t\t\tadr.city as 'Ort',\n\t\t\tUPPER(country.code) as 'Land',\n\t\t\tadr.address_line2 as 'Adresszusatz',\n\t\t\tadr.email_id as 'E-Mail',\n\t\t\tadr.phone as 'Telefon',\n\t\t\tadr.fax as 'Fax',\n\t\t\tcus.website as 'Internet',\n\t\t\tcus.tax_id as 'Steuernummer'\n\n\t\tFROM `tabCustomer` cus\n\n\t\t\tleft join `tabParty Account` par\n\t\t\ton par.parent = cus.name\n\t\t\tand par.parenttype = 'Customer'\n\t\t\tand par.company = %(company)s\n\n\t\t\tleft join `tabDynamic Link` dyn_adr\n\t\t\ton dyn_adr.link_name = cus.name\n\t\t\tand dyn_adr.link_doctype = 'Customer'\n\t\t\tand dyn_adr.parenttype = 'Address'\n\n\t\t\tleft join `tabAddress` adr\n\t\t\ton adr.name = dyn_adr.parent\n\t\t\tand adr.is_primary_address = '1'\n\n\t\t\tleft join `tabCountry` country\n\t\t\ton country.name = adr.country\n\n\t\tWHERE adr.is_primary_address = '1'\n\t\t\"\"\", filters, as_dict=1)", "def preprocess_data_pandas(raw_data_file: str, features_file: str, cols_to_save: List[str]) -> None:\n\n df = pd.read_csv(raw_data_file)\n\n df.sort_values(by=[\"id\", \"loan_date\"], inplace=True)\n df.reset_index(drop=True, inplace=True)\n\n df[\"loan_date\"] = pd.to_datetime(df['loan_date'], errors='coerce')\n df[\"birthday\"] = pd.to_datetime(df['birthday'], errors='coerce')\n df[\"job_start_date\"] = pd.to_datetime(df['job_start_date'], errors='coerce')\n\n df_grouped_by_id = df.groupby('id')\n\n # Feature nb_previous_loans\n df[\"nb_previous_loans\"] = df_grouped_by_id[\"loan_date\"].rank(method=\"first\") - 1\n\n # Feature avg_amount_loans_previous\n df[\"avg_amount_loans_previous\"] = df_grouped_by_id[\"loan_amount\"].transform(lambda x: x.expanding().mean())\n\n # Feature age\n df['age'] = (pd.to_datetime('today').normalize() - df['birthday']).dt.days // 365\n\n # Feature years_on_the_job\n df['years_on_the_job'] = (pd.to_datetime('today').normalize() - df['job_start_date']).dt.days // 365\n\n # Feature flag_own_car\n df['flag_own_car'] = df.flag_own_car.apply(lambda x: 0 if x == 'N' else 1)\n\n df = df[cols_to_save]\n df.to_csv(features_file, index=False)", "def data_transform_nlp(self):\n df_invoice_line = None\n \n is_build_step = False\n\n if self._vectorizer_nlp is None:\n is_build_step = True\n \n list_no_words=['SET','PACK']\n\n df_invoice_line, csr_matrix_weights, self._vectorizer_nlp \\\n = p5_util.nlp_process(self.df_invoice_line\\\n , 'Description' , vectorizer= self._vectorizer_nlp\\\n , list_no_words=list_no_words, is_verbose= self.is_verbose)\n \n if df_invoice_line is None:\n self.strprint(\"***ERROR : NLP process interrupted!\")\n return\n \n \n #-------------------------------------------------------------------------\n # NLP weights are cumulated (sumerized) per customer\n #-------------------------------------------------------------------------\n if csr_matrix_weights is None:\n csr_matrix_weights \\\n = p5_util.object_load('./data/matrix_weights_NLP.dump')\n else:\n pass\n \n self.strprint(\"df_invoice_line : \"+str(df_invoice_line.shape))\n \n self.dbg_df = df_invoice_line.copy()\n \n root_name = 'w_nlp_'\n self._df_w_nlp = p5_util.df_nlp_sum_per_customer(df_invoice_line\\\n , csr_matrix_weights, root_name)\n\n del(csr_matrix_weights)\n \n #-------------------------------------------------------------------------\n # Dimension reduction thanks to PCA\n #------------------------------------------------------------------------- \n self.strprint(\"self._df_w_nlp : \"+str(self._df_w_nlp.shape))\n\n root_name_pca = 'nlp_pca_'\n n_dim = self._nlp_pca_ndim\n \n df_customers_pca_nlp, self._pca_nlp \\\n = p5_util.df_pca_reduce(self._df_w_nlp, n_dim, root_name_pca\\\n , p_is_scale=False, pca=self._pca_nlp)\n \n self.strprint(\"df_customers_pca_nlp : \" +str(df_customers_pca_nlp.shape))\n\n #-------------------------------------------------------------------------\n # Backup of NLP features per customer\n #-------------------------------------------------------------------------\n if is_build_step is True:\n p5_util.object_dump(df_customers_pca_nlp\\\n , self._df_customers_nlp_fileName)\n else:\n self._df_customers_pca_nlp = df_customers_pca_nlp.copy()\n \n return", "def preprocessing(name_file):\n\n db_data = pd.read_csv(name_file).dropna()\n db_data['Timestamp'] = pd.to_datetime(db_data['Timestamp'], unit='s')\n db_data = db_data[db_data['Timestamp'].dt.year >= 2017]\n db_data.reset_index(inplace=True, drop=True)\n db_data = db_data.drop(['Timestamp'], axis=1)\n db_data = db_data[0::60]\n\n n = len(db_data)\n\n # Split data\n train = db_data[0:int(n * 0.7)]\n validation = db_data[int(n * 0.7):int(n * 0.9)]\n test = db_data[int(n * 0.9):]\n\n # Normalize data\n train_mean = train.mean()\n train_std = train.std()\n train = (train - train_mean) / train_std\n validation = (validation - train_mean) / train_std\n test = (test - train_mean) / train_std\n\n return train, validation, test", "def fit_transform(self, df):\n\t\tdf = self.__parse_json(df)\n\t\tdf = self.__fillnan(df)\n\t\tdf = self.__parse_dates(df)\n\t\tdf['budget'] = df['budget'].apply(lambda x: self.missing_budget_imputing if int(x) == 0 else x)\n\t\tdf['has_collections'] = df['belongs_to_collection'].isna().astype(int)\n\t\tdf['homepage'] = df['homepage'].isna().astype(int)\n\t\tdf['is_en'] = df['original_language'].apply(lambda x: 1 if x == 'en' else 0)\n\t\tdf = self.__encode_genres(df)\n\t\tdf = self.__top_countries_and_companies(df)\n\t\tdf = self.__bin_columns(df)\n\t\tdf.drop(\n\t\t\t['release_date', 'original_language', 'production_countries', 'production_companies', 'id', 'backdrop_path',\n\t\t\t 'imdb_id', 'poster_path', 'video', 'belongs_to_collection', 'status', 'runtime',\n\t\t\t 'original_title', 'overview', 'tagline', 'title'], axis=1, inplace=True)\n\t\treturn df", "def features_orders(df_customers, df_receipts):\n df_customers.sort_values(by=['customerId2'], ascending=[True], inplace=True)\n # total amount of all the orders of a cusrtomer\n df_customers = _add_column(df_customers, df_receipts.groupby('customerId2')['price'].sum(), 'sum_orders')\n # the min amount paid in one receipt by a customer\n df_customers = _add_column(df_customers,\n df_receipts.groupby(['customerId2', 'receiptId'], as_index=False)[\n 'price'].sum().groupby('customerId2').min()['price'], 'min_order')\n # the mean amount paid per receipt by a customer\n df_customers = _add_column(df_customers,\n df_receipts.groupby(['customerId2', 'receiptId'], as_index=False)['price'].sum()\n .groupby('customerId2').mean()['price'], 'mean_order')\n # the max amount paid per receipt by a customer\n df_customers = _add_column(df_customers,\n df_receipts.groupby(['customerId2', 'receiptId'], as_index=False)['price'].sum()\n .groupby('customerId2').max()['price'], 'max_order')\n # the number of orders\n df_customers = _add_column(df_customers,\n df_receipts.groupby(['customerId2', 'receiptId'], as_index=False)['price'].sum()\n .groupby('customerId2').count()['price'], 'count_orders')\n\n # the total amount of items bought by a user\n df_customers = _add_column(df_customers,\n df_receipts.groupby('customerId2')['itemQty'].sum(), 'sum_itemQty')\n # the min amount of items bought by a user in a receipt\n df_customers = _add_column(df_customers,\n df_receipts.groupby(['customerId2', 'receiptId'], as_index=False)['itemQty'].sum()\n .groupby('customerId2').min()['itemQty'], 'min_itemQty')\n # the mean amount of items bought by a user in a receipt\n df_customers = _add_column(df_customers,\n df_receipts.groupby(['customerId2', 'receiptId'], as_index=False)['itemQty'].sum()\n .groupby('customerId2').mean()['itemQty'], 'mean_itemQty')\n # the max amount of items bought by a user in a receipt\n df_customers = _add_column(df_customers,\n df_receipts.groupby(['customerId2', 'receiptId'], as_index=False)['itemQty'].sum()\n .groupby('customerId2').max()['itemQty'], 'max_itemQty')\n # from which dividion type a user buys\n for i in DIVISIONID_VALS:\n k = 'divisionId_' + str(i)\n df_receipts[k] = np.where(df_receipts['divisionId'] == i, 1, 0)\n df_customers = _add_column(df_customers, df_receipts.groupby('customerId2')[k].sum(), k)\n # which source type a user uses to pay\n for i in SOURCEID_VALS:\n k = 'sourceId_' + str(i)\n df_receipts[k] = np.where(df_receipts['sourceId'] == i, 1, 0)\n df_customers = _add_column(df_customers, df_receipts.groupby('customerId2')[k].sum(), k)\n\n logger.info(\"Features from the returns table: {shape} {dtypes}\"\n .format(shape=df_customers.shape, dtypes=df_customers.dtypes))\n return df_customers", "def preprocess_data(df, cat_columns, cf_columns):\n \n #define cat and cf columns, convert cat to dummies\n df_cat = pd.get_dummies(df[cat_columns], columns = cat_columns, drop_first=True, dtype=float)\n df_cat.rename(mapper= {\"sex_2\": \"sex_female\", \"education_2\":\"education_university\", \"education_3\": \"education_high_school\", \"education_4\":\"education_others\",\"marital_status_2\":\"marital_status_single\", \"marital_status_3\": \"marital_status_others\"}, axis = 1, inplace = True)\n df_cf = df[cf_columns]\n X = pd.concat([df_cf, df_cat], axis = 1)\n y = df[['default_payment_next_month']]\n print(\"dummy variables created\")\n \n #train-test split \n X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=42)\n print(\"split done\")\n \n #resample the train sets\n smote = SMOTE(sampling_strategy = \"not majority\", random_state = 42)\n X_train_rs, y_train_rs = smote.fit_sample(X_train, y_train)\n print('original class distribution:')\n print(y[\"default_payment_next_month\"].value_counts())\n print('synthetic sample class distribution:')\n print(pd.Series(y_train_rs).value_counts()) \n return X, X_train_rs, X_test, y_train_rs, y_test", "def preprocessing(df, product_number):\n useless_columns = ['Customers', 'Category', 'Segment', 'Regione', 'Provincia', 'Channel']\n df = df.drop(df[df.Provincia == '**'].index) # Removing 'Estero'\n for column in useless_columns:\n df = df.drop(column, axis=1)\n df = df_filtered_product(df, product_number) # Choose the number of the product\n df = df.groupby(['Data Rif']).sum().reset_index()\n date_range = pd.date_range('2017-01-02', '2019-03-31', freq='D').to_series()\n week_num = len(date_range) // 7\n index = 0\n\n sales = []\n for week in range(0, week_num):\n STU = 0\n for day in range(0, 7):\n if index == len(df):\n break\n elif date_range[week*7 + day] == df['Data Rif'][index]:\n STU += df['Standard Units'][index]\n index += 1\n sales.append([date_range[week*7], STU])\n df_fin = pd.DataFrame(sales, columns=['Week', 'STU'])\n df_fin.Week = pd.to_datetime(df_fin.Week)\n df_fin.set_index('Week', inplace=True)\n return df_fin", "def new_df(companies_filtered):\n name = []\n city = []\n latitude = []\n longitude = []\n zip_code = []\n for i in companies_filtered:\n name.append(i['name'])\n try: \n if i['offices'][0]['city'] == '':\n city.append(np.nan)\n else:\n city.append(i['offices'][0]['city'])\n latitude.append(i['offices'][0]['latitude'])\n longitude.append(i['offices'][0]['longitude'])\n except:\n city.append(np.nan)\n latitude.append(np.nan)\n longitude.append(np.nan)\n zip_code.append(np.nan)\n dict_ = {'company' : name, 'city' : city, 'latitude' : latitude, 'longitude': longitude}\n companies_df = pd.DataFrame.from_dict(dict_, orient='columns')\n \n return companies_df", "def pre_processing_(data_df , serialized_objects):\n max_recency_acc_dig = serialized_objects['max_recency_acc_dig'] # These values are taken from trained model values\n max_recency_dig_2yr = serialized_objects['max_recency_dig_2yr'] # These values are taken from trained model values\n max_acc_recency_mf = serialized_objects['max_acc_recency_mf'] #These are values imported in training dataset. Same values needs to be used to impute missing values in unseen data\n\n data_df = data_df.na.fill({\n 'recency_acc_dig' : max_recency_acc_dig, # Filling missing values\n 'recency_dig_2yr' : max_recency_dig_2yr,\n 'acc_recency_mf' : max_acc_recency_mf\n })\n\n freq_acc_upg_2yrs_split = [-float('inf'), 0, 1, 2, float('inf')]\n bucketizer_freq_acc_upg_2yrs = Bucketizer(splits=freq_acc_upg_2yrs_split, inputCol='freq_acc_upg_acc_2yrs', outputCol='freq_acc_upg_acc_2yrs_bkt')\n data_df = bucketizer_freq_acc_upg_2yrs.setHandleInvalid('keep').transform(data_df) # Binning the freq_acc_upg_acc_2yrs column\n\n tot_purchase_split = [-float('inf'), 0, 1, 2, 3, float('inf')]\n bucketizer_tot_purchase = Bucketizer(splits=tot_purchase_split, inputCol='tot_accsry_purchse', outputCol='tot_accsry_purchse_bkt')\n data_df = bucketizer_tot_purchase.setHandleInvalid('keep').transform(data_df) # Binning the tot_accsry_purchse column\n\n del_cols_new = ['freq_acc_upg_acc_2yrs', 'tot_accsry_purchse']\n data_df = data_df.drop(*del_cols_new) # Dropping the older continuous columns\n return data_df", "def prepare_data(args):\n logger.info('Loading dataframe from %s' % args.newspath)\n df = pd.read_csv(args.newspath, encoding='gb18030')\n logger.info('Dataframe size: %d observations %d features after loaded' % (df.shape[0], df.shape[1]))\n\n # exclude rows with column source == NaN\n logger.info('Data cleansing...')\n df = df[~pd.isna(df['source'])]\n logger.info('Dataframe size: %d observations %d features after data cleansing' % (df.shape[0], df.shape[1]))\n\n # split the dataframe into training set and test set\n logger.info('Making training set & test set...')\n train_set, test_set = split_data(df)\n logger.info('Traning set size: %d' % train_set.shape[0])\n logger.info('Test set size: %d' % test_set.shape[0])\n\n # save the train set and test set to picke files\n logger.info('Save dataframes to files...')\n train_set.to_pickle(args.trainpath)\n test_set.to_pickle(args.testpath)", "def prepare_data(train, test):\n # change the name of the target column\n train.rename(columns={\"revenue\": \"target\"}, inplace=True)\n # map bool values to yes and no\n train[\"Weekend\"] = train[\"Weekend\"].map({True: \"Yes\", False: \"No\"})\n test[\"Weekend\"] = test[\"Weekend\"].map({True: \"Yes\", False: \"No\"})\n # set the id col as index\n train.set_index(\"id\", inplace=True)\n test.set_index(\"id\", inplace=True)\n\n # seperate the fetures and the target\n X_train = train.drop(\"target\", axis=1).copy()\n y_train = train[\"target\"].copy()\n X_test = test.copy()\n\n # select numerical and categorical columns\n num_cols = X_train.select_dtypes(exclude=\"object\").columns.tolist()\n cat_cols = X_train.select_dtypes(include=\"object\").columns.tolist()\n\n # numerical pipeline\n num_pipe = make_pipeline(SimpleImputer(strategy=\"mean\"))\n\n # categorical pipeline\n cat_pipe = make_pipeline(\n SimpleImputer(strategy=\"constant\", fill_value=\"NA\"),\n OneHotEncoder(handle_unknown=\"ignore\", sparse=False),\n )\n\n # full pipeline for data preprocessing\n full_pipe = ColumnTransformer(\n [(\"num\", num_pipe, num_cols), (\"cat\", cat_pipe, cat_cols)]\n )\n return X_train, y_train, X_test, full_pipe", "def generate_customer_info(self):\n consecutive = check_consecutive(self.customerIds)\n print(\"consecutive\" + str(consecutive))\n if consecutive:\n for customer_id in self.customerIds:\n # next we need to 1) decide if the customer has insurance, and if yes, generate the EOB that gets sent to the customer\n insured = np.random.choice(\n self.distributions[\"insurance_status\"],\n 1,\n p=self.distributions[\"insurance_distribution\"],\n )[0]\n dob = self.dobs[customer_id - 1]\n customer = pd.DataFrame(\n {\n \"customer_id\": [customer_id],\n \"dob\": str(dob),\n \"insurance\": insured,\n \"experiment_id\": random.randint(1, 2),\n }\n )\n self.Customers = self.Customers.append(customer)\n else:\n print(\"Error generating customer info: customerIds aren't consecutive\")\n return True", "def _normalize_dataset(self):\n\n new_data = []\n columns = ['user_id', 'movie_id', 'rating']\n for line in self.data_file['users']:\n movies_by_user = [\n {'user_id': line['user_id'], 'movie_id': movie_id, 'rating': 5}\n for movie_id in line['movies']\n ]\n new_data.extend(movies_by_user)\n return pd.DataFrame(new_data, columns=columns)", "def create_dataframe():\n df = pd.read_csv(\"data/311-calls.csv\", parse_dates=[\"created\"])\n df[\"created\"] = df[\"created\"].dt.date\n df.drop(columns=[\"incident_zip\"], inplace=True)\n num_complaints = df[\"complaint_type\"].value_counts()\n to_remove = num_complaints[num_complaints <= 30].index\n df.replace(to_remove, np.nan, inplace=True)\n return df", "def preprocessing(self):\n print(\"This may take a while, please grab a coffee. Average wait time: 2 - 6 mins.\")\n print(\"Loading data... \")\n df = ExternalDataRetrieval().get_data()\n\n print(\"Preprocessing data... \")\n\n amino_df = pd.DataFrame()\n # Set column names for zinc content dataframe\n zcolumns = ['value', 'group']\n # Set column names for food groups dataframe\n fcolumns = ['ID', 'food', 'group', 'manufacturer']\n # Declare zinc content dataframe\n zinc_df = pd.DataFrame(columns=zcolumns)\n # Declare food group dataframe\n food_df = pd.DataFrame(columns=fcolumns)\n # Doing this one amino acids type at a time.\n for n in AMINO_LIST:\n food = []\n # nutrients components of the food type is further nested in 'nutrients', which its components are further\n # nested\n for i, items in enumerate(df['nutrients']):\n # Iterate through the nutrient type to obtain necessary info.\n # For this project, there are many redundant data in there.\n f_flag = False\n # Only need to set the flag to activate the zinc check for one amino acid loop\n if n == AMINO_LIST[0]:\n z_flag = False\n for item in items:\n # Check to see if this nutrient type is one of the amino acids\n if item.get(\"name\") == n and item.get(\"value\") > 0:\n # If so, add the food type to the amino acid type array\n food.append(df['name'][i]['long'])\n f_flag = True\n # Check to see if this nutrient type is Zinc, only need to do this for one amino acid loop.\n if item.get(\"name\") == Z and n == AMINO_LIST[0]:\n # If so, gets its zinc content value and the food group it is in.\n zinc_df.loc[i] = [item.get(\"value\"), df['group'][i]]\n z_flag = True\n if f_flag and z_flag:\n break\n\n # Build the food group data dataframe one food at a time, only need to do this for one amino acid loop.\n if n == AMINO_LIST[0]:\n food_df.loc[i] = [df['meta'][i]['ndb_no'], df['name']\n [i]['long'], df['group'][i], df['manufacturer'][i]]\n\n # Assemble the amino acid type array in to nutrient dataframe\n fd = pd.DataFrame({n: food})\n # Since the length of each columns varies (amino acid food types appearance in food types varies),\n # there are many NaN in the dataframe as a result. We need to drop the NaN\n fd = fd.dropna()\n amino_df = pd.concat([amino_df, fd], axis=1, ignore_index=True)\n # Add column names to the nutrient dataframe\n amino_df.columns = AMINO_LIST\n print(\"Good news, preprocessing completed successfully! \")\n return amino_df, zinc_df, food_df", "def data_preprocessing():\n lineid_content = get_lineid_content()\n print('Read movie_lines.txt file complete...')\n convos = get_convos()\n print('Read movie_conversations.txt file complete...')\n print('Building dataset')\n get_data(lineid_content, convos)", "def get_customer_stats(self):\n try:\n names, waitings, totals, statuses, destinations, passwords, types, positions = zip(*[(p.name, p.get_waiting_time(),\n p.total_time(), status_to_str(p.status), p.get_position(), p.password, p.fleet_type, p.init_position)\n for p in self.customer_agents.values()])\n except ValueError:\n names, waitings, totals, statuses, destinations, passwords, types, positions = [], [], [], [], [], [], [], []\n\n df = pd.DataFrame.from_dict({\"name\": names, \"waiting_time\": waitings, \"total_time\": totals, \"status\": statuses, \"destination\": destinations, \"password\": passwords, \"fleet_type\": types, \"position\": positions})\n return df", "def read_dataset():\n\n\tonehotencoder = OneHotEncoder(categories=\"auto\", sparse=False)\n\tscaler = StandardScaler(with_mean=False)\n\tcwd = os.getcwd()\n\tfilename = cwd + '/default of credit card clients.xls'\n\tnanDict = {}\n\t#read file and create the dataframe\n\tdf = pd.read_excel(filename, header=1, skiprows=0,\n\t\t\t\t\t\tindex_col=0, na_values=nanDict)\n\n\tdf.rename(index=str,\n\t\t\t\tcolumns={\"default payment next month\": \"defaultPaymentNextMonth\"},\n\t\t\t\tinplace=True)\n\n\tdf = df.drop(df[(df.BILL_AMT1 == 0)&\n\t\t\t\t\t\t(df.BILL_AMT2 == 0)&\n\t\t\t\t\t\t(df.BILL_AMT3 == 0)&\n\t\t\t\t\t\t(df.BILL_AMT4 == 0)&\n\t\t\t\t\t\t(df.BILL_AMT5 == 0)&\n\t\t\t\t\t\t(df.BILL_AMT6 == 0)].index)\n\tdf = df.drop(df[(df.PAY_AMT1 == 0)&\n\t\t\t\t\t\t(df.PAY_AMT2 == 0)&\n\t\t\t\t\t\t(df.PAY_AMT3 == 0)&\n\t\t\t\t\t\t(df.PAY_AMT4 == 0)&\n\t\t\t\t\t\t(df.PAY_AMT5 == 0)&\n\t\t\t\t\t\t(df.PAY_AMT6 == 0)].index)\n\n\t# Creating matrix X and target variable y\n\tX = df.loc[:, df.columns != 'defaultPaymentNextMonth'].values\n\ty = df.loc[:, df.columns == 'defaultPaymentNextMonth'].values\n\n\tonehotencoder = OneHotEncoder(categories ='auto', sparse =False)\n\tscalar = StandardScaler(with_mean = False)\n\n\t#make one hots\n\tX = ColumnTransformer(\n\t\t\t\t[(\"\", onehotencoder, [1,2,3,5,6,7,8,9]),],\n\t\t\t\tremainder=\"passthrough\"\n\t\t\t).fit_transform(X)\n\n\tX = scalar.fit_transform(X)\n\n\treturn X, y", "def data_transform(filename):\n gap = 1\n dirpath = tempfile.mkdtemp()\n pd_list = []\n file_df = pd.read_csv(filename, header = 0)\n for line in range(len(file_df)):\n if line % gap == 0:\n print(line,len(file_df))\n rna_uuid = file_df.iloc[line][\"rna_seq_uuid\"]\n case_uuid = file_df.iloc[line][\"case_uuid\"]\n try:\n df = pd.read_csv(download_rna_seq([rna_uuid], dirpath),sep=\"\\t\",names = ['rna_id','value'])\n df = df.transpose()\n df.columns = df.iloc[0]\n df = df.drop(df.index[0])\n df[\"case_uuid\"] = str(case_uuid)\n pd_list.append(df.transpose())\n except:\n continue\n\n final_df = pd.concat(pd_list, axis=1, sort=False)\n final_df = final_df.transpose()\n\n return final_df", "def produce_init(filename):\n training_dataset = pd.read_csv(f'../Modified Data/{filename}')\n test_dataset = pd.read_csv(f'../Raw Data/test.csv')\n features = list(training_dataset.columns)\n features.remove('SalePrice')\n predict_feature = ['SalePrice']\n\n # Produce Test Data\n test_X = test_dataset.loc[:, features]\n ids_test = test_dataset.loc[:, 'Id']\n\n for column in features:\n if str(training_dataset.loc[:, column].dtype) == 'object':\n # Initialize encoder\n labelencoder = LabelEncoder()\n # Encode Train Data\n training_dataset.loc[:, column] = training_dataset.loc[:, column].fillna('Missing')\n training_dataset.loc[:, column] = pd.Series(labelencoder.fit_transform(training_dataset.loc[:, column]))\n # Encode Test Data\n test_X.loc[:, column] = test_X.loc[:, column].fillna('Missing')\n test_X.loc[:, column] = pd.Series(labelencoder.fit_transform(test_X.loc[:, column]))\n else:\n # Fix missing values for train data\n training_dataset.loc[:, column] = training_dataset.loc[:, column].fillna(int(training_dataset.loc[:, column].mean()))\n # Fix missing values for test data\n test_X.loc[:, column] = test_X.loc[:, column].fillna(int(test_X.loc[:, column].mean()))\n\n return training_dataset, test_X, ids_test", "def transform_features(context, params):\n\n input_features_ds = \"train/sales/features\"\n input_target_ds = \"train/sales/target\"\n\n artifacts_folder = DEFAULT_ARTIFACTS_PATH\n\n # load datasets\n train_X = load_dataset(context, input_features_ds)\n train_y = load_dataset(context, input_target_ds)\n\n cat_columns = train_X.select_dtypes(\"object\").columns\n num_columns = train_X.select_dtypes(\"number\").columns\n\n # Treating Outliers\n outlier_transformer = Outlier(method=params[\"outliers\"][\"method\"])\n train_X = outlier_transformer.fit_transform(\n train_X, drop=params[\"outliers\"][\"drop\"]\n )\n\n # NOTE: You can use ``Pipeline`` to compose a collection of transformers\n # into a single transformer. In this case, we are composing a\n # ``TargetEncoder`` and a ``SimpleImputer`` to first encode the\n # categorical variable into a numerical values and then impute any missing\n # values using ``most_frequent`` strategy.\n tgt_enc_simple_impt = Pipeline(\n [\n (\"target_encoding\", TargetEncoder(return_df=False)),\n (\"simple_impute\", SimpleImputer(strategy=\"most_frequent\")),\n ]\n )\n\n # NOTE: the list of transformations here are not sequential but weighted\n # (if multiple transforms are specified for a particular column)\n # for sequential transforms use a pipeline as shown above.\n features_transformer = ColumnTransformer(\n [\n # categorical columns\n (\n \"tgt_enc\",\n TargetEncoder(return_df=False),\n list(\n set(cat_columns)\n - set([\"technology\", \"functional_status\", \"platforms\"])\n ),\n ),\n (\n \"tgt_enc_sim_impt\",\n tgt_enc_simple_impt,\n [\"technology\", \"functional_status\", \"platforms\"],\n ),\n # numeric columns\n (\"med_enc\", SimpleImputer(strategy=\"median\"), num_columns),\n ]\n )\n\n # Check if the data should be sampled. This could be useful to quickly run\n # the pipeline for testing/debugging purposes (undersample)\n # or profiling purposes (oversample).\n # The below is an example how the sampling can be done on the train data if required.\n # Model Training in this reference code has been done on complete train data itself.\n sample_frac = params.get(\"sampling_fraction\", None)\n if sample_frac is not None:\n logger.warn(f\"The data has been sample by fraction: {sample_frac}\")\n sample_X = train_X.sample(frac=sample_frac, random_state=context.random_seed)\n else:\n sample_X = train_X\n sample_y = train_y.loc[sample_X.index]\n\n\n # Train the feature engg. pipeline prepared earlier. Note that the pipeline is\n # fitted on only the **training data** and not the full dataset.\n # This avoids leaking information about the test dataset when training the model.\n # In the below code train_X, train_y in the fit_transform can be replaced with\n # sample_X and sample_y if required. \n train_X = get_dataframe(\n features_transformer.fit_transform(train_X, train_y),\n get_feature_names_from_column_transformer(features_transformer),\n )\n\n # Note: we can create a transformer/feature selector that simply drops\n # a specified set of columns. But, we don't do that here to illustrate\n # what to do when transformations don't cleanly fall into the sklearn\n # pattern.\n curated_columns = list(\n set(train_X.columns.to_list())\n - set(\n [\n \"manufacturer\",\n \"inventory_id\",\n \"ext_grade\",\n \"source_channel\",\n \"tgt_enc_iter_impt_platforms\",\n \"ext_model_family\",\n \"order_no\",\n \"line\",\n \"inventory_id\",\n \"gp\",\n \"selling_price\",\n \"selling_cost\",\n \"invoice_no\",\n \"customername\",\n ]\n )\n )\n\n # saving the list of relevant columns and the pipeline.\n save_pipeline(\n curated_columns, op.abspath(op.join(artifacts_folder, \"curated_columns.joblib\"))\n )\n save_pipeline(\n features_transformer, op.abspath(op.join(artifacts_folder, \"features.joblib\"))\n )", "def createDataFrames(self):\n self._atmDF = pd.DataFrame.from_dict(self._atmDict, orient='index')\n \n self._clientDF = pd.DataFrame.from_dict(self._clientDict, orient='index')\n self._clientDF['longAccount'] = self._clientDF.client.map(str) +\\\n '_' + self._clientDF.account.map(str)\n \n self._transactionDF = pd.DataFrame.from_dict(self._transactionDict, orient='index')", "def transform_data(data_orig):\n\tif isinstance(data_orig, str):\n\t\tdata_orig = pd.read_csv(data_orig)\n\n\tdata = data_orig\n\n\tnum_rows,num_variables = data.shape\n\tall_columns = data.columns.tolist()\n\tclean_data(data,all_columns,ignore_na=False,fill_mode=\"prob\")\n\texpand_features(data)\n\tvariables = ['Pclass','Sex',\"Fare\",\"Age\",\"SibSp\",\"Parch\",\"Embarked\",\"Fam_size\",\\\n\t\t\t\t \"cabin_no\",\"ticket_no\",\"friend\",\"Fare_person\",\"Child\"]\n\tX = pd.get_dummies(data[variables])\n\n\t## normalise features to zero man and unit variance\n\tscaler = preprocessing.StandardScaler().fit(X)\n\tX_scaled = scaler.transform(X)\n\tX = pd.DataFrame(X_scaled, columns=X.columns)\n\n\tif \"Survived\" in data.columns:\n\t\ty = data['Survived']\n\telse:\n\t\ty = None\n\n\treturn X, y", "def build_feature_transform():\n\n # These features can be parsed as numeric.\n numeric = HEADER.as_feature_indices(\n [\"review_count\", \"lat\", \"lng\", \"lat2\", \"lng2\"]\n )\n\n # These features contain a relatively small number of unique items.\n categorical = HEADER.as_feature_indices(\n [\"distance\", \"price_level\", \"review_count\", \"Sp1\", \"type\"]\n )\n\n # These features can be parsed as natural language.\n text = HEADER.as_feature_indices(\n [\n \"slug\", \"menu\", \"slug.1\", \"categories\", \"name\", \"url\", \"homeurl\",\n \"resource_id1\", \"resource_id2\"\n ]\n )\n\n numeric_processors = Pipeline(steps=[(\"robustimputer\", RobustImputer())])\n\n categorical_processors = Pipeline(\n steps=[\n (\"thresholdonehotencoder\", ThresholdOneHotEncoder(threshold=162))\n ]\n )\n\n text_processors = Pipeline(\n steps=[\n (\n \"multicolumntfidfvectorizer\",\n MultiColumnTfidfVectorizer(\n max_df=0.9977,\n min_df=0.0003137465824032988,\n analyzer=\"word\",\n max_features=10000\n )\n )\n ]\n )\n\n column_transformer = ColumnTransformer(\n transformers=[\n (\"numeric_processing\", numeric_processors, numeric\n ), (\"categorical_processing\", categorical_processors,\n categorical), (\"text_processing\", text_processors, text)\n ]\n )\n\n return Pipeline(\n steps=[\n (\"column_transformer\",\n column_transformer), (\"robustpca\", RobustPCA(n_components=88)),\n (\"robuststandardscaler\", RobustStandardScaler())\n ]\n )", "def run_pipeline() -> pd.DataFrame:\n\n print('Loading data...')\n data = load_data()\n print('Stage one processing...')\n text = data.text\n text_ = stage_one_preprocessing(text)\n data_ = data.copy()\n data_.text = text_\n #print('Splitting by sentences...')\n #data_ = split_by_sentences(data_)\n print('Stage two processing...')\n text_ = stage_two_preprocessing(data_.text)\n print('Stage three processing...')\n text_ = stage_three_preprocessing(text_)\n data_.text = text_\n print('Saving file...')\n data_.to_csv(r'./data/stage_three_text.csv')\n return data_", "def filter_and_transform_to_df(self, min_price, max_price, size_limit):\n from filtering_functions import transform_dataset, filter_dataset\n # transform the dataset to dataframe (and change format of variables)\n # and filter unwanted data\n self.filtered_data = filter_dataset(transform_dataset(self.ads_data),\n min_price,max_price,size_limit)\n # create an easily accesible description of the resulting dataset\n n, k = self.filtered_data.shape\n self.description = \"Ads dataset class. Number of observations: {}, Number of variables: {}\".format(n,k)", "def transformer_function(path_to_df, list_of_vars_selection, mappings_dictionary, num_code_vars, path_to_output):\n # shoot off status message\n print(\"[TRANSFORMER FUNCTION]: Starting transformation!\")\n # load up the codes csv, subset to the variable of interest\n loader = pd.read_csv(path_to_df, sep=\";\")\n print(\"TRANSFORMER: orig row count \", loader.shape[0])\n loader = loader.loc[:, list_of_vars_selection]\n print(\"TRANSFORMER: subset row count \", loader.shape[0])\n # define a new dataframe which we are going to fill with the caseID, and a number of num_code_vars 0/1-columns\n # which represent markers for the unique codes used per verbatim\n list_of_keys = list(mappings_dictionary.keys()) # easier\n colnames_final_df = [\"caseID\"] + list_of_keys\n # arrange new dataframe\n new_df = pd.DataFrame(columns=colnames_final_df, index=range(0, loader.shape[0]))\n # overwrite the labels of the columns with the mapping dict keys\n new_df.columns = colnames_final_df\n # transform rows into a row with unique values, do this for all rows then build the new df with the same number\n # of rows as the original and num_code_vars-columns, fill with the binary 0 or 1s at the marker position (first,\n # second, etc.) of the original codes\n for index, row in loader.iterrows(): # old: range(0, loader.shape[0])\n # pull a row from the original dataframe and subset to the variables of interest\n old_row = row[list_of_vars_selection[1:]]\n # pull current row (case) ID from loader-dataframe\n old_row_caseID = row['ID'] # this variable is uniquely named \"ID\" in all codes files\n # old: loader.loc[index, \"ID\"]\n # print(\"Old row values: \", old_row.values, \" / type: \", type(old_row.values))\n # print(\"Row raw: \", old_row)\n # old: old_row = loader.loc[index, list_of_vars_selection[1:]]\n # get unique values from that row\n unique_values = list(set(old_row.values))\n # old: if isinstance(old_row, (int, np.integer)):\n # unique_values = old_row\n # else:\n # unique_values = list(set(old_row.values))\n # convert the list of unique vals into a binarized vector\n binarized_row = converter_func(unique_values, mappings_dictionary, bin_vector_length=num_code_vars)\n # concat caseID and binarized row vector to list for row\n new_row = [old_row_caseID] + binarized_row.tolist()\n # and finally push this into the new dataframe\n new_df.at[index, :] = new_row # roll over all columns!\n # shoot off status message\n print(\"[TRANSFORMER FUNCTION]: Transformation finished!\")\n # return and save the final new dataframe\n try:\n new_df.to_csv(path_to_output, index=False, header=True, encoding='utf-8')\n except:\n print(\"Error: could not save new dataframe to specified file-path (maybe already opened by another program?)\")\n return new_df", "def load_data(filename, yearmonth_start, yearmonth_end, nb_clients=-1):\n load_dtypes = {\"sexo\": str,\n \"ind_nuevo\": str,\n \"ult_fec_cli_1t\": str,\n \"indext\": str,\n \"indrel_1mes\": str,\n \"conyuemp\": str}\n\n skiprows = MONTH_START_END_ROW_INDICES[yearmonth_start][0]\n nrows = MONTH_START_END_ROW_INDICES[yearmonth_end][1] - skiprows + 1\n df = pd.read_csv(filename, dtype=load_dtypes, skiprows=range(1, skiprows + 1), nrows=nrows)\n df[\"age\"] = pd.to_numeric(df[\"age\"], errors=\"coerce\")\n if nb_clients > 0:\n nb_months = yearmonth_end - yearmonth_start + 1\n clients = df['ncodpers'].value_counts()[df['ncodpers'].value_counts() == nb_months].index.values\n clients = np.random.choice(clients, nb_clients)\n df = df[df['ncodpers'].isin(clients)]\n return df", "def preprocess(df):\n df[\"distance\"] = compute_distance(df)\n X_train = df[[\"distance\"]]\n y_train = df[\"fare_amount\"]\n return X_train, y_train", "def join_customer_features(traj_result, username, season, country):\n user_features=get_k_means_data(username,season, country).set_index(\"customer_nr\")\n features_with_trajectory=user_features.join(traj_result.set_index('customer_nr')[[\"cluster\"]])\n return features_with_trajectory", "def transform(self):\n select_columns = ['Province/State','Lat','Long']\n # df = global_cases.copy()\n global_cases = self.collect_case()\n df = global_cases.copy()\n df.drop(select_columns,axis=1, inplace=True)\n df = df[df['Country/Region'].apply(lambda x: x in Africa)].T.reset_index()\n df.columns = df.iloc[0]\n df.rename(columns={'Country/Region':'Date'},inplace=True)\n df.drop([0],axis=0,inplace=True)\n \n df['Date'] = pd.to_datetime(df['Date']).dt.strftime('%m-%d-%Y')\n # sort to have the latest update on top row\n df.sort_values('Date',ascending=False, inplace=True)\n african_cases = df.copy()\n\n return african_cases", "def create_train_feats():\n features = read_process_labelled(AUDIO_DIR, debug=True)\n df = pd.DataFrame(features)\n p = './Features/dataset_features/data_features.csv'\n df.to_csv(p, index=False)\n return p", "def load_data(data_path):\n \n # Load the dataset\n df = pd.read_csv(data_path)\n \n # Perform feature transformations\n df[\"description_processed\"] = nlp_preprocessing(df[\"transaction_description\"])\n df['transaction_type'] = convert_transaction_amount(df['transaction_amount'])\n \n df.loc[~df['transaction_account_type'].isin(('transaction', 'savings', 'credit-card')), 'transaction_account_type'] = \"other\"\n \n return df", "def preprocess_data(df, min_vote_count=1000):\n # note that order matters!\n df = remove_rows_without_feature(df, 'budget')\n df = remove_rows_without_feature(df, 'runtime')\n df = remove_rows_with_non_english_movies(df)\n df = binarize_homepage(df)\n df = add_producers_feature(df)\n df = add_executive_producers_feature(df)\n df = get_movie_scores(df, min_vote_count)\n df = binarize_english(df)\n df = bin_ratings(df)\n df = binarize_genres(df)\n df = binarize_belongs_to_collection(df)\n df = binarize_production_countries(df)\n df = drop_unnecessary_columns(df)\n\n # Export to CSV\n y = df[['rating']]\n x = df.drop(['rating'], 1)\n\n y.to_csv(r'../dataset/Y.csv', index=False)\n x.to_csv(r'../dataset/X.csv', index=False)", "def generate_data():\n player_df = get_players_df(2018)\n stats_df = construct(2018, player_df[\"PlayerID\"])\n stats_df['NAME'] = player_df['FirstName'] + \" \" + player_df['LastName']\n stats_df[\"MPG\"] = pd.to_numeric(stats_df[\"MPG\"])\n stats_df.drop(stats_df[stats_df[\"MPG\"] < 15].index, inplace=True)\n stats_df.to_csv(\"data.csv\", index=False)", "def preprocess_data(self):\n # Fault and cavity models use same data and features. Get that now.\n signals = get_signal_names(cavities=['1', '2', '3', '4', '5', '6', '7', '8'],\n waveforms=['GMES', 'GASK', 'CRFP', 'DETA2'])\n\n # We need to crop, downsample, then do z-score. Any constant values are set to 0.001 manually.\n num_resample = 4096\n num_meta_columns = 8\n self.common_features_df = window_extractor(self.example, signals=signals, windows={'pre-fault': -1533.4},\n n_samples=7680, standardize=False, downsample=True,\n ds_kwargs={'num': num_resample})\n\n # The extractor makes a row per requested window plus some metadata. Columns are named\n # Sample_<sample_num>_<cav_num>_<signal>, and go Sample_1_1_GMES, Sample_2_1_GMES, ..., Sample_1_1_GASK, ....\n # We want to change this so that each column is all of the samples for 1_GMES, 1_GASK, ... as in the signal\n # order above.\n self.common_features_df = pd.DataFrame(\n self.common_features_df.iloc[0, num_meta_columns:].values.reshape(len(signals), -1).T, columns=signals)\n\n self.common_features_df = standard_scaling(self.common_features_df, fill=0.001)", "def set_training_data(self):\n # Optional training data period\n # TODO: add training data period feature to training data query\n if not self.training_period == None:\n training_period_date = (datetime.datetime.utcnow() - timedelta(minutes=self.training_period)).strftime(\"%Y-%m-%d\")\n print(f\"Training data start date: {training_period_date}\")\n # Extract queried data from Athena\n #athena = athena_connect.Athena()\n #features_df = athena.pandas_read_athena(self.training_data_sql)\n with open('feature_sql.txt', 'w') as f:\n print(self.training_data_sql, file=f) \n features_df = pd.read_sql(self.training_data_sql, self.logic_db_engine())\n features_df.fillna(0, inplace=True)\n print(features_df.shape)\n features_df = features_df[max(self.feature_minutes_list):]\n print(features_df.shape)\n # Remove infinity string\n features_df.replace({'Infinity': 0}, inplace=True)\n # Convert all object fields to numeric except date fields\n object_col_list = features_df.columns[features_df.dtypes.eq('object')]\n object_col_list = [col for col in object_col_list if 'trade_date' not in col]\n features_df[object_col_list] = features_df[object_col_list].apply(pd.to_numeric, errors='coerce')\n self.training_df = features_df", "def _finalize_features(self) -> DataFrameLike:\n all_features_dict = dict(ChainMap(*self._final_features.values()))\n return pd.DataFrame(all_features_dict)", "def make_dataframe(self):\n logging.info('*** Creating the dataframes from the source files ' )\n \n for k in self.datasets_keys:\n #for k in ['igra2' , 'ncar']:\n \n logging.info('*** Creating the dataframe for the dataset: %s ' , k ) \n \n p_levels = self.data[k]['df']['observations_table']['z_coordinate'][:]\n logging.debug(' Loaded the z_coordinate')\n \n z_type = self.data[k]['df']['observations_table']['z_coordinate_type'][:]\n logging.debug(' Loaded the z_coordinate_type')\n \n obs_variable = self.data[k]['df']['observations_table']['observed_variable'][:]\n logging.debug(' Loaded the observed_variable')\n \n obs_values = self.data[k]['df']['observations_table']['observation_value'][:]\n logging.debug(' Loaded the observation_value')\n \n observation_id = self.data[k]['df']['observations_table']['observation_id'][:]\n logging.debug(' Loaded the observation_id')\n \n units = self.data[k]['df']['observations_table']['units'][:].astype(int)\n logging.debug(' Loaded the units') \n \n report_id = self.data[k]['df']['observations_table']['report_id'][:] \n logging.debug(' Loaded the report_id')\n \n date_time = self.data[k]['df']['observations_table']['date_time'][:]\n logging.debug(' Loaded the date_time (deltas)')\n \n lat , lon = self.data[k]['df']['observations_table']['latitude'][:] , self.data[k]['df']['observations_table']['longitude'][:]\n logging.debug(' Loaded the lat,lon ')\n \n \n self.obs_table_columns = list(self.data[k]['df']['observations_table'].keys() )\n \n self.data[k]['df'].close()\n \n \"\"\" Creating a dataframe \"\"\"\n columns = ['date_time', 'z_coordinate' , 'z_coordinate_type', 'observed_variable' , 'observation_value' , 'report_id' , 'observation_id' , 'latitude' , 'longitude', 'units']\n logging.info(' Loaded the data, creating dataframe ')\n \n df = pd.DataFrame( list(zip( date_time, p_levels, z_type, obs_variable , obs_values, report_id, observation_id , lat , lon, units ) ) , columns = columns ) \n \n \n \"\"\" Storing the dataframe \"\"\" ### try using xarrays ??? \n logging.debug('Storing the DF ' ) \n self.data[k]['dataframe'] = df\n \n logging.debug(' PD dataframe created !!! ')", "def main(args):\r\n print(\"Anonymizing ...\")\r\n print(args)\r\n\r\n file_loc = args.file\r\n origdata = pd.read_csv(file_loc, sep=',', low_memory=False)\r\n\r\n print(origdata)\r\n\r\n print(args.col_customer)\r\n\r\n ID_field = args.col_customer #TBU CustomerID field name\r\n TS_field = args.col_time #TBU TS field name \r\n\r\n sub = origdata.loc[:,:] #Uncomment for applying on complete dataset\r\n\r\n def anonymizeID(data,ID_field):\r\n df=pd.DataFrame(data.loc[:,ID_field])\r\n df2=df.applymap(lambda x: ((HMAC.new(b\"key\", bytes(x), MD5)).hexdigest()))\r\n return df2.loc[:,ID_field]\r\n\r\n def shiftTimeForTrace(data,TS_field):\r\n # print(df.head())\r\n df=pd.DataFrame(data.loc[:,TS_field])\r\n df2 = df.loc[:,TS_field].apply(lambda x: pd.to_datetime(x)) \r\n # print(df2.head())\r\n rand_days = random.randint(-5,5) #range can be updated\r\n df2 = df2 + pd.DateOffset(days=rand_days)\r\n # print(df2.head())\r\n return df2\r\n\r\n #OG subset for reference\r\n print(sub.head())\r\n\r\n # sub1=sub.sort_values(by=ID_field)\r\n sub1 = sub.loc[:,:]\r\n uniqueIDs = list(sub[ID_field].unique())\r\n # sub2 = sub.loc[:,TS_field].apply(lambda x: pd.to_datetime(x))\r\n for ID in uniqueIDs:\r\n sub3=sub1.loc[sub1[ID_field] == ID]\r\n sub3.loc[:,TS_field]=shiftTimeForTrace(sub3,TS_field)\r\n # print(sub1.loc[sub1[ID_field] == ID][TS_field])\r\n # print(pd.DataFrame(sub3[TS_field]))\r\n sub1.loc[sub1[ID_field] == ID,TS_field] = pd.DataFrame(sub3[TS_field])\r\n\r\n # Results post TS shift\r\n print(sub1.head())\r\n\r\n sub4 = sub1.loc[:,:]\r\n sub4.loc[:,ID_field] = anonymizeID(sub4,ID_field)\r\n\r\n #Results post ID anonymization\r\n print(sub4.head())\r\n sub4.to_csv('out.csv',index=False)", "def tranform_data(args):\n data_dir = args.data_dir\n save_dir = args.save_dir\n os.makedirs(save_dir)\n maxsize = args.maxsize\n maxlen = args.maxlen \n ext = args.ext\n datetime = args.datetime\n classes = set()\n nb_files = 0\n time_col = 'time'\n event_col = 'event'\n gt_ids = None\n if args.ext == \"pkl\":\n with open(Path(args.data_dir, \"fx_labels\"), \"rb\") as fp:\n gt_ids = pickle.load(fp)[:maxsize]\n labels = np.unique(gt_ids)\n gt_data = []\n for i in range (len(gt_ids)):\n gt_data.append(int(np.nonzero(gt_ids[i] == labels)[0]))\n gt = {'cluster_id': gt_data}\n print(gt_data)\n gt_table = pd.DataFrame(data=gt)\n gt_table.to_csv(Path(save_dir, 'clusters.csv'))\n if Path(args.data_dir, 'clusters.csv').exists():\n gt_ids = pd.read_csv(Path(args.data_dir, 'clusters.csv'))[:(maxsize)]\n gt_ids.to_csv(Path(save_dir, 'clusters.csv'))", "def make_dataset(self):\n # Read raw data\n data = self.read_raw_data()\n self.default_header = list(data.columns.values)\n # Fit the variables on the raw dataset\n self.fit(data.copy())\n return make_df(data, self.features), make_df(data, self.targets)", "def preprocess(data, to_drop=[]):\n \n columns = data.columns.to_list()\n \n # split data to numeric vs categorical\n numeric_features = data.select_dtypes(include=[\n 'int64', 'float64']).columns\n \n if len(to_drop) > 0:\n categorical_features = data.select_dtypes(include=[\n 'object']).drop(to_drop, axis=1).columns\n print(categorical_features)\n else: \n categorical_features = data.select_dtypes(include=[\n 'object']).columns\n \n categorical_transformer = Pipeline(steps=[\n ('imputer', SimpleImputer(strategy='most_frequent', fill_value='missing'))])\n \n numerical_transformer = Pipeline(steps=[\n ('imputer', SimpleImputer(strategy='median')),\n ('scaler', RobustScaler())\n ])\n # missing_values = np.nan\n \n# Bundle preprocessing for numerical and categorical data\n preprocessor = ColumnTransformer(\n transformers=[\n ('num', numerical_transformer, numeric_features),\n ('cat', categorical_transformer, categorical_features)\n ])\n\n my_pipeline = Pipeline(steps=[('preprocessor', preprocessor) ])\n \n for col in to_drop:\n columns.remove(col)\n print('Hello')\n \n trans_data = my_pipeline.fit_transform(data)\n return trans_data#pd.DataFrame(#, columns=columns) ", "def ingest_single_fec(contrib_file : str,\n contrib_header_file : str,\n min_date = \"2020-04-01\") -> pd.DataFrame:\n print(contrib_file)\n fec_df = pd.read_csv(contrib_file,\n low_memory = False,\n delimiter= '|',\n header= None\n # error_bad_lines= False\n )\n col_names = pd.read_csv(contrib_header_file)\n col_names = list(col_names.columns)\n \n fec_df.columns = [x.lower() for x in col_names]\n fec_df['transaction_dt'] = pd.to_datetime(fec_df['transaction_dt'], format=\"%m%d%Y\")\n fec_df.drop(columns = [\"image_num\", \"sub_id\", \"memo_cd\", \"file_num\", \"tran_id\"], inplace = True)\n\n fec_df = fec_df[fec_df['transaction_dt'] > pd.to_datetime(min_date)]\n return fec_df", "def transform_data(data_df, target_df = None):\n rec_idx, rec_col, rec_data = create_recency_feature(data_df)\n freq_idx, freq_col, freq_data = create_frequency_feature(data_df)\n norm_idx, norm_col, norm_data = create_norm_feature(data_df)\n\n # with hstack function we are concatinating a sparse matrix and a dense matirx :)\n feat_df = hstack((rec_data, freq_data, norm_data))\n print('Final feature matrix shape:', feat_df.shape)\n \n # merge all the feature names\n feat_names = list(rec_col) + list(freq_col) + list(norm_col)\n \n if isinstance(target_df, pd.core.frame.DataFrame):\n # get +ve & -ve indices\n one_idx = target_df[target_df['outcome_flag'] == 1]['id'].index.tolist()\n zero_idx = target_df[target_df['outcome_flag'] == 0]['id'].index.tolist()\n \n # calculate fitness values of features\n rcdf = create_fitness_stats(rec_data, rec_col, one_idx, zero_idx, nans = True)\n fqdf = create_fitness_stats(freq_data, freq_col, one_idx, zero_idx, nans = False)\n nrdf = create_fitness_stats(norm_data, norm_col, one_idx, zero_idx, nans=False)\n fit_df = rcdf.append(fqdf).append(nrdf)\n fit_df.reset_index(drop=1)\n return feat_df, feat_names, fit_df\n \n return feat_df, feat_names", "def build_real_news_dataframe(include_celeb: bool = False):\n # real news dataset\n #column_names = ['Title', 'Content']\n realDf = pd.DataFrame()\n\n # build the dataf rame with the non-celeb news\n realDf = realDf.append(get_dataset_from_file(\"fakeNewsDataset\", False, True), ignore_index=True)\n\n # include data from the celeb news\n if include_celeb:\n realDf = realDf.append(get_dataset_from_file(\"celebrityDataset\", False, False), ignore_index=True)\n\n return realDf", "def transform(self, dataframe: DataFrame) -> DataFrame:", "def create_dummy_feature(movie_df):\n movie_df.director.value_counts()[:5]\n N = 4\n top_directors = movie_df.director.value_counts().index[:N]\n top_dir_movies = movie_df[movie_df['director'].isin(top_directors)]\n dummies = pd.get_dummies(top_dir_movies['director'])\n movie_df_dir = pd.merge(movie_df, dummies, left_index=True, right_index=True, how='left')\n #fill in missing value for the directors to 0\n movie_df_dir['Joel Schumacher'].fillna(0,inplace=True)\n movie_df_dir['Ridley Scott'].fillna(0,inplace=True)\n movie_df_dir['Steven Spielberg'].fillna(0,inplace=True)\n movie_df_dir['Woody Allen'].fillna(0,inplace=True)\n\n features = ['production_budget','widest_release', 'number_of_theaters_open',\n 'Steven Spielberg',\n 'Woody Allen',\n 'Ridley Scott',\n 'Joel Schumacher']\n related_columns = features + ['domestic_gross']\n print related_columns\n\n\n clean_movie_df_dir = movie_df_dir[related_columns].dropna()\n print '%i movies with all necessary info.' % len(clean_movie_df_dir)\n\n import statsmodels.api as sm\n Y = clean_movie_df_dir['domestic_gross']\n X = sm.add_constant(clean_movie_df_dir[features])\n\n\n #split train and test dataset\n from sklearn.cross_validation import train_test_split\n # splits x -> x_train, x_test\n X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.25)\n\n #fit model on train set\n model = sm.OLS(Y_train, X_train).fit()\n model.summary()\n\n #fit model on test set\n predicted_gross = model.predict(X_test)\n plt.scatter(X_test.production_budget, Y_test, color='gray')\n plt.plot(X_test.production_budget, predicted_gross)\n plt.title(\"multivariate for Domestic Gross\")\n plt.xlabel(\"Budget ($100M)\")\n plt.ylabel(\"Domestic Gross ($100M)\")\n plt.show()", "def transform(self, dataset: NumpyOrPandas) -> NumpyDataset:\n # checks here\n super().transform(dataset)\n # convert to accepted dtype and get attributes\n dataset = dataset.to_pandas()\n df = dataset.data\n\n # transform\n roles = NumericRole()\n outputs = []\n\n for n, conlumn_name in enumerate(df.columns):\n if self.cache_dir is not None:\n full_hash = get_textarr_hash(df[conlumn_name]) + get_textarr_hash(self.dicts[conlumn_name][\"feats\"])\n fname = os.path.join(self.cache_dir, full_hash + \".pkl\")\n\n if os.path.exists(fname):\n logger.info3(f\"Load saved dataset for {conlumn_name}\")\n\n with open(fname, \"rb\") as f:\n new_arr = pickle.load(f)\n\n else:\n new_arr = self.dicts[conlumn_name][\"transformer\"].transform(df[conlumn_name])\n with open(fname, \"wb\") as f:\n pickle.dump(new_arr, f)\n else:\n new_arr = self.dicts[conlumn_name][\"transformer\"].transform(df[conlumn_name])\n\n output = dataset.empty().to_numpy()\n output.set_data(new_arr, self.dicts[conlumn_name][\"feats\"], roles)\n outputs.append(output)\n logger.info3(f\"Feature {conlumn_name} transformed\")\n # create resulted\n return dataset.empty().to_numpy().concat(outputs)", "def build_main_dataset(\n self, ratings: pd.DataFrame, users: pd.DataFrame, prefetch_to_gpu: bool\n ) -> torch.utils.data.Dataset:\n main_dataset_builder = self.get_main_dataset()\n return main_dataset_builder(ratings, users, prefetch_to_gpu)", "def preprocess(self):\n\n print('[ INFO ]: Preprocessing forest fires data...')\n\n # Rename headers of data frame\n forestfires_data = pd.read_csv(self.forestfires_path, header=0)\n forestfires_data.columns = [\n 'x_axis','y_axis','month','day','ffmc','dmc','dc','isi','temp','rh',\n 'wind','rain','area'\n ]\n categorical_features = [\n 'month','day'\n ]\n predictor = 'area'\n\n df = alg.one_hot_encode(self, forestfires_data, categorical_features)\n\n features = [df.columns[j] for j in range(len(df.columns)) if df.columns[j] != predictor]\n\n return df, features, predictor", "def pre_process_df(train_data, test_data):\n train_data[\"text\"] = train_data[\"sentence1\"] + \", \" + train_data[\"sentence2\"] # noqa\n test_data[\"text\"] = test_data[\"sentence1\"] + \", \" + test_data[\"sentence2\"]\n train_data.drop([\"sentence1\", \"sentence2\"], axis=1, inplace=True)\n test_data.drop([\"sentence1\", \"sentence2\"], axis=1, inplace=True)\n train_data = train_data[[\"text\", \"label\"]]\n test_data = test_data[[\"text\", \"label\"]]\n simple_pre_process_text_df(train_data)\n simple_pre_process_text_df(test_data)\n return train_data, test_data", "def test_data_when_import_customer_with_data(self):\n\n customer = self.customers[0]\n self.assertEqual(\"Jimena\", customer.get_first_name())\n self.assertEqual(\"Sanabria\", customer.get_last_name())\n self.assertEqual(\"21-08-1980\", customer.get_date_of_birth())\n self.assertEqual([\"Nueva Granada #1837\"], customer.get_addresses())\n self.assertEqual([4244270,70759942], customer.get_phones())\n self.assertEqual(\"[email protected]\", customer.get_email())\n self.assertEqual(\"Gold\", customer.get_membership())\n self.assertEqual(\"Active\", customer.get_status())", "def transform(self, df):\n\t\tdf = self.__parse_json(df)\n\t\tdf = self.__fillnan(df)\n\t\tdf = self.__parse_dates(df)\n\t\tdf['budget'] = df['budget'].apply(lambda x: self.missing_budget_imputing if int(x) == 0 else x)\n\t\tdf['has_collections'] = df['belongs_to_collection'].isna().astype(int)\n\t\tdf['homepage'] = df['homepage'].isna().astype(int)\n\t\tdf['is_en'] = df['original_language'].apply(lambda x: 1 if x == 'en' else 0)\n\t\tdf = self.__encode_genre_transform(df)\n\t\tdf = self.__top_countries_and_companies_transform(df)\n\t\tdf = self.__bin_columns_transform(df)\n\t\tdf.drop(\n\t\t\t['release_date', 'original_language', 'production_countries', 'production_companies', 'id', 'backdrop_path',\n\t\t\t 'imdb_id', 'poster_path', 'video', 'belongs_to_collection', 'status', 'runtime',\n\t\t\t 'original_title', 'overview', 'tagline', 'title'], axis=1, inplace=True)\n\t\treturn df", "def prepare_dataset_encoder(self):\n calendar, sales_train, prices_df = self.calendar, self.sales_train, self.prices_df\n agg_endog, agg_idx, agg_sales_train = self.agg_endog, self.agg_idx, self.agg_sales_train\n \n #Prepare exog dataset ---------------------------------------------------------------\n #Prepare calendar exog: event_type & wday on a date\n calendar_exog = pd.DataFrame(index=calendar.index)\n for event_type in ['Sporting', 'Cultural', 'National', 'Religious']:\n calendar_exog['is_{}'.format(event_type)] = np.where((calendar.loc[calendar_exog.index, ['event_type_1', 'event_type_2']] == event_type).any(axis=1), 1, 0)\n wday_encoder = OneHotEncoder(drop='first', sparse=False) #drop Sat.\n wday_df = pd.DataFrame(wday_encoder.fit_transform(calendar.loc[calendar_exog.index, ['wday']]), columns=['w7'] + ['w{}'.format(i) for i in range(1,6)])\n calendar_exog = pd.concat([calendar_exog, wday_df], axis=1)\n \n #Prepare snap_exog: if there is snap event on that date & dept_store ts\n snap_exog = pd.DataFrame(0., index=calendar.index, columns=agg_endog.columns)\n for idx in snap_exog.columns:\n state = sales_train[agg_idx == idx].state_id.unique()[0]\n snap_exog[idx] = calendar.loc[snap_exog.index, 'snap_{}'.format(state)]\n \n #Prepare price discount on that date & dept_store ts\n price_exog = pd.DataFrame(index=calendar.index, columns=agg_endog.columns) #mean price across item_store for a dept_store ts\n for idx in price_exog.columns:\n price_exog[idx] = prices_df.T.loc[agg_idx == idx].mean()\n price_discount = price_exog / price_exog.max() #normalized\n \n self.calendar_exog = calendar_exog\n self.snap_exog = snap_exog\n self.price_discount = price_discount\n \n #Prepare encoder ----------------------------------------------------------------------\n #Create encoder for dept_store_id\n dept_store_encoder = OneHotEncoder(drop='first', sparse=False).fit(agg_sales_train[['dept_id', 'store_id']])\n \n #Create encoder for event name\n calendar['event_name_1'].fillna('missing', inplace=True)\n event_encoder = LabelEncoder().fit(calendar['event_name_1'])\n \n self.dept_store_encoder = dept_store_encoder\n self.event_encoder = event_encoder", "def _transform(self, X, y=None):\n # lazy imports to avoid hard dependency\n from tsfresh import extract_features\n\n Xt = extract_features(\n X,\n column_id=X.columns[0],\n column_value=X.columns[3],\n column_kind=X.columns[2],\n column_sort=X.columns[1],\n **self.default_fc_parameters_,\n )\n\n # When using the long input format, tsfresh seems to sort the index,\n # here we make sure we return the dataframe in the sort order as the\n # input data\n instances = X.iloc[:, 0].unique()\n Xt = Xt.reindex(instances)\n return Xt", "def features_past_generation(features_creation_function,\n days,\n feature_names_prefix,\n data,\n indices):\n matches_outcomes=[]\n for i,match_indice in enumerate(indices):\n match=data.iloc[match_indice,:]\n past_matches=data[(data.Date<match.Date)&(data.Date>=match.Date-datetime.timedelta(days=days))]\n match_features_outcome_1=features_creation_function(1,match,past_matches)\n match_features_outcome_2=features_creation_function(2,match,past_matches)\n matches_outcomes.append(match_features_outcome_1)\n matches_outcomes.append(match_features_outcome_2)\n if i%100==0:\n print(str(i)+\"/\"+str(len(indices))+\" matches treated. \"+ features_creation_function.__name__ + str(days))\n train=pd.DataFrame(matches_outcomes)\n train.columns=[feature_names_prefix + \"_\" + str(days) +\"_\" +str(i) for i in range(len(train.columns))]\n \n \n \n return train", "def features_websessions(df_customers, df_websessions):\n df_websessions = df_customers.join(df_websessions, \"customerId2\", 'inner')\n res_counts = df_websessions.groupBy('customerId2').count().alias('nb_sessions')\n\n res_agg = df_websessions.groupBy('customerId2').agg(\n min('pageViewCount').alias('min_pageViewCount'),\n mean('pageViewCount').alias('mean_pageViewCount'),\n max('pageViewCount').alias('max_pageViewCount'),\n (count(when(df_websessions.pageViewCount != 0, True)) / count('customerId2')).alias('p_not0_pageViewCount'),\n\n min('nonPageViewEventsCount').alias('min_nonPageViewEventsCount'),\n mean('nonPageViewEventsCount').alias('mean_nonPageViewEventsCount'),\n max('nonPageViewEventsCount').alias('max_nonPageViewEventsCount'),\n (count(when(df_websessions.nonPageViewEventsCount != 0, True)) / count('customerId2')).alias(\n 'p_not0_nonPageViewEventsCount'),\n\n min('productViewCount').alias('min_productViewCount'),\n mean('productViewCount').alias('mean_productViewCount'),\n max('productViewCount').alias('max_productViewCount'),\n (count(when(df_websessions.productViewCount != 0, True)) / count('customerId2')).alias('p_not0_productViewCount'),\n\n min('productViewsDistinctCount').alias('min_productViewsDistinctCount'),\n mean('productViewsDistinctCount').alias('mean_productViewsDistinctCount'),\n max('productViewsDistinctCount').alias('max_productViewsDistinctCount'),\n (count(when(df_websessions.productViewsDistinctCount != 0, True)) / count('customerId2')).alias(\n 'p_not0_productViewsDistinctCount'),\n\n min('productsAddedToBagCount').alias('min_productsAddedToBagCount'),\n mean('productsAddedToBagCount').alias('mean_productsAddedToBagCount'),\n max('productsAddedToBagCount').alias('max_productsAddedToBagCount'),\n (count(when(df_websessions.productsAddedToBagCount != 0, True)) / count('customerId2')).alias(\n 'p_not0_productsAddedToBagCount'),\n\n min('productsSavedForLaterFromProductPageCount').alias('min_productsSavedForLaterFromProductPageCount'),\n mean('productsSavedForLaterFromProductPageCount').alias('mean_productsSavedForLaterFromProductPageCount'),\n max('productsSavedForLaterFromProductPageCount').alias('max_productsSavedForLaterFromProductPageCount'),\n (count(when(df_websessions.productsSavedForLaterFromProductPageCount != 0, True)) / count('customerId2')).alias(\n 'p_not0_productsSavedForLaterFromProductPageCount'),\n\n min('productsSavedForLaterFromCategoryPageCount').alias('min_productsSavedForLaterFromCategoryPageCount'),\n mean('productsSavedForLaterFromCategoryPageCount').alias('mean_productsSavedForLaterFromCategoryPageCount'),\n max('productsSavedForLaterFromCategoryPageCount').alias('max_productsSavedForLaterFromCategoryPageCount'),\n (count(when(df_websessions.productsSavedForLaterFromCategoryPageCount != 0, True)) / count('customerId2')).alias(\n 'p_not0_productsSavedForLaterFromCategoryPageCount'),\n\n min('productsPurchasedDistinctCount').alias('min_productsPurchasedDistinctCount'),\n mean('productsPurchasedDistinctCount').alias('mean_productsPurchasedDistinctCount'),\n max('productsPurchasedDistinctCount').alias('max_productsPurchasedDistinctCount'),\n (count(when(df_websessions.productsPurchasedDistinctCount != 0, True)) / count('customerId2')).alias(\n 'p_not0_productsPurchasedDistinctCount'),\n\n min('productsPurchasedTotalCount').alias('min_productsPurchasedTotalCount'),\n mean('productsPurchasedTotalCount').alias('mean_productsPurchasedTotalCount'),\n max('productsPurchasedTotalCount').alias('max_productsPurchasedTotalCount'),\n (count(when(df_websessions.productsPurchasedTotalCount != 0, True)) / count('customerId2')).alias(\n 'p_not0_productsPurchasedTotalCount'),\n )\n\n res = res_counts.join(res_agg, 'customerId2')\n return res", "def df():\n path, _ = os.path.split(os.path.abspath(__file__))\n project_path = os.path.join(path, os.pardir, os.pardir)\n\n values_path = os.path.join(project_path, \"data\", \"raw\", \"pumps_train_values.csv\")\n labels_path = os.path.join(project_path, \"data\", \"raw\", \"pumps_train_labels.csv\")\n\n train = pd.read_csv(values_path, index_col='id', parse_dates=[\"date_recorded\"])\n labels = pd.read_csv(labels_path, index_col='id')\n\n return train.join(labels)", "def _dataframe_preprocess(self):\n # 1. add baisc feature like date, time in day, ....\n if self.data_type != 'porto':\n self.df['TIMESTAMP'] = self.df.apply(lambda df: df['TIMESTAMPS'][0], axis=1)\n self.df['TIME'] = pd.to_datetime(self.df['TIMESTAMP'], unit='s', utc=True)\n \n self.df.TIME = self.df.TIME.dt.tz_convert(self.timezone)\n # 2. group df for specific driver analysis\n self.grouped_df = self.df.groupby('LABEL')\n if self.count_od_info:\n if 'SD' not in self.df.columns:\n self._add_OD_info()\n self.grouped_od = self.df.groupby('SD')", "def read_ct_data(train_start, train_count, eval_start, eval_count):\n data = pd.read_csv('/opt/train.csv')\n\n # Dropping the id column\n data.drop(['ID_code'], axis=1, inplace=True)\n\n data = data.values\n return (data[train_start:train_start + train_count],\n data[eval_start:eval_start + eval_count])", "def make_data(input_filepath, output_filepath):\n\n df_train = pd.read_csv(input_filepath+'train_u6lujuX_CVtuZ9i.csv', index_col=0)\n df_test = pd.read_csv(input_filepath+'test_Y3wMUE5_7gLdaTN.csv', index_col=0)\n print('Sizes', df_train.shape, df_test.shape)\n print(\"Outcome dispersion:\\n\", df_train['Loan_Status'].value_counts())\n\n\n # recode and save outcome vector\n y = df_train['Loan_Status'].map({'N': 0, 'Y': 1})\n\n del df_train['Loan_Status']\n\n # all in one dataframe\n df = pd.concat([df_train, df_test])\n print(df.shape)\n\n from src.features.build_features import make_features\n df = make_features(df)\n\n # Divide data on train and test again and save\n data_train = df[df.index.isin(df_train.index)]\n data_test = df[df.index.isin(df_test.index)]\n print(data_train.shape, data_test.shape)\n\n data_tmp = data_train.copy()\n data_tmp['y'] = y\n\n\n data_tmp.to_csv(output_filepath + 'train_ready.csv', index=False)\n data_test.to_csv(output_filepath + 'test_ready.csv', index=False)\n id_test = pd.DataFrame(data=df_test.index, columns=['Loan_ID'])\n id_test.to_csv(output_filepath + 'id_test.csv', index=False)", "def get_311_data():\n # reading in data and saving to separate DFs\n source = spark.read.csv(\"source.csv\", sep=\",\", header=True, inferSchema=True)\n case = spark.read.csv(\"case.csv\", sep=\",\", header=True, inferSchema=True)\n dept = spark.read.csv(\"dept.csv\", sep=\",\", header=True, inferSchema=True)\n\n # returning DFs\n return source, case, dept", "def load_data2(filename, yearmonths_list, nb_clients=-1):\n load_dtypes = {\"sexo\": str,\n \"ind_nuevo\": str,\n \"ult_fec_cli_1t\": str,\n \"indext\": str,\n \"indrel_1mes\": str,\n \"conyuemp\": str}\n\n df = pd.DataFrame()\n if len(yearmonths_list) > 0:\n for yearmonth in yearmonths_list:\n if yearmonth not in MONTH_START_END_ROW_INDICES:\n continue\n skiprows = MONTH_START_END_ROW_INDICES[yearmonth][0]\n nrows = MONTH_START_END_ROW_INDICES[yearmonth][1] - skiprows + 1\n _df = pd.read_csv(filename, dtype=load_dtypes, skiprows=range(1, skiprows + 1), nrows=nrows)\n df = pd.concat([df, _df], axis=0, ignore_index=True)\n else:\n logging.info(\"-- Read all data from the file : %s\" % filename)\n df = pd.read_csv(filename, dtype=load_dtypes)\n\n df[\"age\"] = pd.to_numeric(df[\"age\"], errors=\"coerce\")\n df[\"renta\"] = pd.to_numeric(df[\"renta\"], errors=\"coerce\")\n if nb_clients > 0 or nb_clients == 'max':\n logging.info(\"-- Select %s clients\" % nb_clients)\n nb_months = len(yearmonths_list)\n clients = df['ncodpers'].value_counts()[df['ncodpers'].value_counts() == nb_months].index.values\n np.random.shuffle(clients)\n if isinstance(nb_clients, int) and nb_clients < len(clients):\n clients = clients[:nb_clients]\n df = df[df['ncodpers'].isin(clients)]\n return df", "def import_customers(ctx):\n load_csv(ctx, 'data/sample/customers.csv', 'res.partner')", "def build_fake_news_dataframe(include_celeb: bool = False):\n # fake new dataset\n fakeDf = pd.DataFrame()\n\n # build the data frame with the non-celeb news\n fakeDf = fakeDf.append(get_dataset_from_file(\"fakeNewsDataset\", True, True), ignore_index=True)\n\n # include data from the celeb news\n if include_celeb:\n fakeDf = fakeDf.append(get_dataset_from_file(\"celebrityDataset\", True, False), ignore_index=True)\n\n return fakeDf", "def prep_corals(df):\n \n # dropping duplicate rows since all rows should have unique sample ids at a minimum\n df.drop_duplicates(inplace = True)\n \n # dropping specified columns\n df = df.drop(columns = ['CatalogNumber', 'SampleID', 'SurveyID', 'EventID', 'LocationAccuracy', \n 'Station', 'Locality', 'DepthMethod', 'ScientificName', 'TaxonRank'])\n\n # dropping all null values\n df = df.dropna()\n\n # converting ObservationDate to datetime format\n df['ObservationDate']= pd.to_datetime(df['ObservationDate'])\n\n # adding underscores to various column names\n df.columns = ['Data_Provider', 'Vernacular_Name_Category', 'Observation_Date', 'latitude', \n 'longitude', 'Depth_Meters', 'Repository', 'Identification_Qualifier', 'Sampling_Equipment', 'Record_Type']\n\n # lower casing all column names\n df.columns = df.columns.str.lower()\n\n # lower casing all string values\n df = df.applymap(lambda string:string.lower() if type(string) == str else string)\n\n # filtering out all rows with negative meters\n df = df[df.depth_meters >= 0]\n\n # filtering out all creatures that are not corals\n df = df[df.vernacular_name_category.str.contains('coral') & (df.vernacular_name_category.str.contains('hydrozoan') == False)]\n\n # returning df\n return df", "def transform(X, transformer, y=None):\n return pd.DataFrame([random_str_generator() for x in range(len(X))])", "def run(self):\n # Import datasets\n filepaths = [self.dir + f for f in listdir(self.dir) if f.endswith('.csv')]\n\n # Concatenate into single DataFrame\n df = pd.concat(map(pd.read_csv, filepaths))\n\n # Derive cvegeo column\n df['CVEGEO'] = df[\"ENTIDAD\"].astype(str).str.zfill(2) + df[\"MUN\"].astype(str).str.zfill(3) + df[\"LOC\"].astype(str).str.zfill(4) + df[\"AGEB\"].astype(str).str.zfill(4)\n\n # Write DataFrame to CSV\n df.to_csv(\".output/inegi_2020_master.csv\")", "def preprocess_data(self, data: pd.DataFrame, stage: str = \"inference\") -> Tuple[pd.DataFrame, list]:\n added_features = None\n if self.config.encode_date_columns:\n data, added_features = self._encode_date_columns(data)\n # The only features that are added are the date features extracted\n # from the date which are categorical in nature\n if (added_features is not None) and (stage == \"fit\"):\n logger.debug(f\"Added {added_features} features after encoding the date_columns\")\n self.config.categorical_cols += added_features\n # Update the categorical dimension in config\n self.config.categorical_dim = (\n len(self.config.categorical_cols) if self.config.categorical_cols is not None else 0\n )\n # Encoding Categorical Columns\n if len(self.config.categorical_cols) > 0:\n data = self._encode_categorical_columns(data, stage)\n\n # Transforming Continuous Columns\n if (self.config.continuous_feature_transform is not None) and (len(self.config.continuous_cols) > 0):\n data = self._transform_continuous_columns(data, stage)\n # Normalizing Continuous Columns\n if (self.config.normalize_continuous_features) and (len(self.config.continuous_cols) > 0):\n data = self._normalize_continuous_columns(data, stage)\n # Converting target labels to a 0 indexed label\n data = self._label_encode_target(data, stage)\n # Target Transforms\n data = self._target_transform(data, stage)\n return data, added_features" ]
[ "0.78012073", "0.70509386", "0.6482284", "0.6199711", "0.5975251", "0.5896229", "0.58942175", "0.5884299", "0.5793099", "0.57307404", "0.5701118", "0.568415", "0.56560767", "0.5655341", "0.56509733", "0.5625573", "0.5603968", "0.55976146", "0.55959636", "0.5586056", "0.5570692", "0.5557544", "0.5510036", "0.549403", "0.54827154", "0.54787415", "0.54602635", "0.545783", "0.54553646", "0.5432904", "0.54206353", "0.541709", "0.54161257", "0.54103416", "0.54062456", "0.53927493", "0.5381681", "0.5380491", "0.53747815", "0.5360946", "0.5355021", "0.5350418", "0.53425586", "0.5334296", "0.5326649", "0.532609", "0.53213984", "0.5309581", "0.5294275", "0.5282518", "0.52765375", "0.5273612", "0.52728736", "0.5258933", "0.52538", "0.52506363", "0.524287", "0.5233945", "0.52289486", "0.5224719", "0.5224379", "0.5220045", "0.5215832", "0.5199044", "0.51957333", "0.5187549", "0.51804805", "0.51796126", "0.5173712", "0.5162742", "0.51531696", "0.51462054", "0.51398087", "0.51343185", "0.51215076", "0.51129067", "0.5112739", "0.5112543", "0.5104349", "0.50938636", "0.5092858", "0.50890684", "0.5085682", "0.508432", "0.5080347", "0.50773454", "0.5074879", "0.5074565", "0.5074189", "0.507201", "0.50715125", "0.506858", "0.5067155", "0.50628996", "0.50609565", "0.5057699", "0.50469446", "0.5038218", "0.5037625", "0.5031652" ]
0.7298348
1
Creates a new customer identifier from existing dataset.
def createCustomerID(self): customerID = self._df_invoice_original.CustomerID.max() customerID += 1 return int(customerID)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def customer_id(uncapped_flatprice, uncapped_flatprice_finalizer, team_multisig) -> int:\n customer_id = int(uuid.uuid4().hex, 16) # Customer ids are 128-bit UUID v4\n return customer_id", "def create_customer(cls, api, **data):\n return api.create_customer(**data)", "def create_customer(data):\n mandatory_params = ['customer_name', 'mobile_number']\n result = api_utils.check_required_params(mandatory_params, data)\n if result:\n return result\n mobile_number = db_helper.mobile_number_unique(data['mobile_number'])\n if not mobile_number:\n return api_utils.error(\"There already is a customer with \\\n mobile number {} found\".format(data['mobile_number']), 404)\n\n new_customer = db_helper.add_new_customer(data['customer_name'],\n mobile_number)\n return jsonify({'new_customer': new_customer})", "def add_customer(insert_dict):\n return cr.add_customer(insert_dict)", "def createCustomer(self, **params):\n return self.__req('create_customer', params)", "def generate_customer_info(self):\n consecutive = check_consecutive(self.customerIds)\n print(\"consecutive\" + str(consecutive))\n if consecutive:\n for customer_id in self.customerIds:\n # next we need to 1) decide if the customer has insurance, and if yes, generate the EOB that gets sent to the customer\n insured = np.random.choice(\n self.distributions[\"insurance_status\"],\n 1,\n p=self.distributions[\"insurance_distribution\"],\n )[0]\n dob = self.dobs[customer_id - 1]\n customer = pd.DataFrame(\n {\n \"customer_id\": [customer_id],\n \"dob\": str(dob),\n \"insurance\": insured,\n \"experiment_id\": random.randint(1, 2),\n }\n )\n self.Customers = self.Customers.append(customer)\n else:\n print(\"Error generating customer info: customerIds aren't consecutive\")\n return True", "def post(self):\n data = request.json\n return save_new_customer(data=data)", "def create_or_update_customer(entity):\n\ttry:\n\t\torganisation = entity.get('organisation').replace(\"'\",\"\")\n\t\torganisation = \"%s(C)\"%organisation if is_supplier_or_customer_group(organisation) else organisation\n\t\tname = frappe.db.get_value('Customer', organisation)\n\t\tif not name:\n\t\t\tcustomer = frappe.new_doc(\"Customer\")\n\t\t\tcustomer.customer_name = organisation\n\t\telse:\n\t\t\tcustomer = frappe.get_doc(\"Customer\", name)\n\n\t\tcustomer.entity_id = entity.get('entity_id')\n\t\tcustomer.customer_type = 'Company'\n\t\tif entity.get('group'):\n\t\t\tif entity.get('group').strip() == 'General':\n\t\t\t\tcustomer.customer_group = 'All Customer Groups'\n\t\t\telif frappe.db.get_value('Customer Group', entity.get('group').strip()):\n\t\t\t\tcustomer.customer_group = entity.get('group').strip() or 'All Customer Groups'\n\t\t\telif frappe.db.get_value('Customer', entity.get('group').strip()):\n\t\t\t\tcustomer.customer_group = 'All Customer Groups'\n\t\t\telse:\n\t\t\t\tcustomer.customer_group = create_customer_group(entity.get('group').strip())\n\t\tcustomer.territory = 'Australia'\n\t\tcustomer.customer_status = 'Existing'\n\t\tcustomer.modified_date = entity.get('updated_at')\n\t\tcustomer.save(ignore_permissions=True)\n\t\tif \"(C)\" in customer.customer_name:\n\t\t\tfrappe.db.set_value(\"Cusomer\", customer.name, \"customer_name\", organisation.replace(\"(C)\", \"\"))\n\n\t\tcreate_or_update_contact(customer, entity)\n\t\tget_addresses(entity.get('entity_id'))\n\n\t\t# return status\n\t\treturn {\n\t\t\tentity.get(\"entity_id\"): {\n\t\t\t\t\"operation\": \"Customer Created\" if not name else \"Customer Updated\",\n\t\t\t\t\"name\": customer.name,\n\t\t\t\t\"modified_date\": entity.get(\"updated_at\")\n\t\t\t}\n\t\t}\n\texcept Exception, e:\n\t\tdocname = entity.get('entity_id')\n\t\tresponse = entity\n\t\tlog_sync_error(\"Customer\", docname, response, e, \"create_new_customer\")", "def add_customers(current_customers, new_customer_list):\n for new in new_customer_list:\n new_id = _get_next_cust_id()\n current_customers[new_id] = new\n customer_cases[new_id] = {}", "def add_customer(customer_id, name, lastname, homeaddress, phone_number, email, status, credit_limit):\n try:\n with customer_db.transaction():\n new_customer_mi = Customer.create(\n customer_id=customer_id,\n name=name,\n lastname=lastname,\n homeaddress=homeaddress,\n phone_number=phone_number,\n email=email,\n status=status,\n credit_limit=credit_limit\n )\n logger.debug(\"Added customer %s to %s\", new_customer_mi, customer_db.database)\n return new_customer_mi\n except Exception as e:\n logger.error(\"Error creating customer_id %s: %s\", customer_id, e)", "def customer_id(self, customer_id: str):\n self._customer_id = customer_id", "def add_customer(customer_id, first_name, last_name, home_address,\n phone_number, email_address, status, credit_limit):\n print('Adding new customer, Customer ID {}...'.format(customer_id))\n try:\n Customer.get_by_id(customer_id)\n print('Customer ID {} is already in use'.format(customer_id))\n except Exception as ex:\n if \"instance matching query does not exist\" in str(ex):\n try:\n new_customer = Customer.create(customer_ID=customer_id,\n first_name=first_name,\n last_name=last_name,\n home_address=home_address,\n phone_number=phone_number,\n email_address=email_address,\n status=status,\n credit_limit=credit_limit)\n new_customer.save()\n LOGGER.info('Added new customer, Customer ID %s', customer_id)\n except IntegrityError:\n print('Incorrect format, customer {} not saved'\n .format(customer_id))", "def store_customer(self, name):\n pass", "def _create_customers(self, customer_name=\"Alex\"):\n test_customer = Customer(\n name=customer_name,\n address=\"Washington Square Park\",\n phone_number=\"555-555-1234\",\n email=\"[email protected]\",\n credit_card=\"VISA\",\n active = True\n )\n return test_customer", "def customer(self, id):\r\n return Customer(self, id)", "def add_customer(customer_id, name, lastname, home_address,\n phone_number, email_address, status, credit_limit):\n try:\n with database.transaction():\n customer = Customer.create(\n customer_id=customer_id,\n name=name,\n lastname=lastname,\n home_address=home_address,\n phone_number=phone_number,\n email_address=email_address,\n status=status,\n credit_limit=credit_limit,\n )\n customer.save()\n except Exception as unknown_error:\n print(unknown_error)", "def add_customer(customer_id, first, last, addr, phone, email, status, limit):\n try:\n LOGGER.info('Creating customer record')\n with database.transaction():\n new_customer = Customer.create(\n customer_id=customer_id,\n first_name=first,\n last_name=last,\n home_address=addr,\n phone_number=phone,\n email_address=email,\n status=status,\n credit_limit=limit\n )\n new_customer.save()\n LOGGER.info('Added customer: %s', new_customer.customer_id)\n except IntegrityError as err:\n LOGGER.warning('Error creating = ID: %s', customer_id)\n LOGGER.warning(err)\n\n return Customer", "def test_customer_create(self):\n self._create_model(\"customer\", self.customer_data, [\"name\", \"email\", \"phone\"])", "def create_customer_token(self, _id):\n return self._get(\"/customer/{}/token\".format(_id))", "def createCustomer(sender, instance, **kwargs):\n Customer.objects.get_or_create(user=instance)", "def save_object(self, data):\n return Customer(**data)", "def set_customer_id(self, case_obj: Family) -> None:\n self._set_customer_id(case_obj.customer.internal_id)", "def create_customer(email=None, name=None, user_type='customer'):\n if user_type == 'charity':\n stripe.api_key = Config.STRIPE_SECRET_KEY_FOR_PLAN\n else:\n stripe.api_key = Config.STRIPE_SECRET_KEY\n if email and name:\n customer = stripe.Customer.create(email=email, name=name)\n else:\n customer = stripe.Customer.create()\n return customer.id", "def add_customer(customer_id, first_name, last_name, home_address, phone_number,\n email_address, is_active, credit_limit):\n try:\n LOGGER.info('Successfully connected to the database')\n\n with DATABASE.transaction():\n new_customer = Customer.create(customer_id=customer_id,\n first_name=first_name,\n last_name=last_name,\n home_address=home_address,\n phone_number=phone_number,\n email_address=email_address,\n is_active=is_active,\n credit_limit=credit_limit)\n new_customer.save()\n LOGGER.info(\"Customer added successfully\")\n\n except IntegrityError as error:\n LOGGER.info(error)\n LOGGER.info('Error occurred')", "def get_customer_id_by_sale_id(sale_id):\n\n # your code", "def CreateCustomer(Person):\n\t\t\tif Person.AddrCitytownNrID:\n\t\t\t\tcitytown = model.AddressCityTown.get(Person.AddrCitytownNrID)\n\t\t\t\tAddressLabel = '%s\\n%s\\n%s, %s\\n%s\\n%s' % (Person.AddrStr, citytown.Name, citytown.Block, citytown.District, citytown.State, citytown.ZipCode) \n\t\t\telse:\n\t\t\t\tAddressLabel = Person.AddrStr\n\t\t\tPersonName = ('%s %s,%s,%s' % (Person.Title, Person.NameFirst, Person.NameMiddle, Person.NameLast)).replace(',,',',').replace(',', ' ').strip()\n\t\t\tcustomer = model.InvCustomer(Name=PersonName ,CityID=Person.AddrCitytownNrID , AddressLabel=AddressLabel, CreditAmount=0.0, \\\n\t\t\t\tInventoryLocation=self.GetDefaultCustomerLocationID(), ExternalID=Person.id)\n\t\t\treturn customer", "def customer_id(self) -> str:\n return self._customer_id", "def add_customer(db_url: str):\n db_url = \"{}/{}\".format(db_url, \"user_api\")\n engine = create_engine(db_url, echo=True)\n session = sessionmaker(engine)()\n customer = Customer()\n session.add(customer)\n session.commit()\n return customer.id", "def add_customer(self, info, dup):\n if not dup:\n self.cursor.execute(\"INSERT INTO customerpersonal VALUES (%s,%s)\", (int(info['phone']), info['address']))\n\n self.cursor.execute(\"INSERT INTO customercredentials VALUES (%s,%s,%s,%s,%s,%s)\",\n (info['loginID'], info['firstName'], info['lastName'], info['salt'],\n info['key'], int(info['phone'])))\n self.db.commit()", "def import_customers(input_data):\n error_count = 0\n insert_count = 0\n LOGGER.info('Starting Customer import')\n for onecust in input_data:\n try:\n Customer(onecust['user_id'], onecust['first_name'], onecust['last_name'],\n onecust['address'], onecust['phone_number'], onecust['email'])\\\n .save(full_clean=True, force_insert=True)\n insert_count += 1\n except ValidationError as valerror:\n LOGGER.exception(\"Error importing data from csv: %s \", valerror.message)\n error_count += 1\n except (OperationError, DuplicateKeyError) as operror:\n LOGGER.exception(\"Error importing data from csv: %s \", operror)\n error_count += 1\n\n return insert_count, error_count", "def customer_id(self, customer_id):\n\n self._customer_id = customer_id", "def customer_id(self, customer_id):\n\n self._customer_id = customer_id", "def customer_id(self, customer_id):\n\n self._customer_id = customer_id", "def customer_id(self, customer_id):\n\n self._customer_id = customer_id", "def mint_a_new_cid(self):\n self.update({\"cid\": self.table.c.cid +1}, condition=None)", "def test_import_customer_without_data(self):\n\n customer = self.import_customer.create_customer_object(\"cust002010\", {})\n self.assertIsInstance(customer, Customer)", "def create_user(data):\n return woo_request_helper().post_details(wc_endpoint='customers', params=data)", "def update_customer(cls, customer_data):\n customer_instance = cls.get_customer(customer_data['email'])\n for field_name, values in customer_data:\n setattr(customer_instance, field_name, values)\n customer_instance.save()\n return customer_instance", "def create_dataset(client: DatasetClient, name: str, props: dict,\n dataset_type: str, override: bool = True):\n if override:\n response = client.datasets.list()\n datasets = {r.name: r.dataset_id for r in response}\n if name in datasets:\n client.datasets.delete(datasets[name])\n response = client.datasets.create(name, dataset_type, props=props)\n dataset_id = response.dataset_id\n return dataset_id", "def new_case_id():\n return uuid.uuid4().hex", "def new_case_id():\n return uuid.uuid4().hex", "def customer(self, customer_id=None):\r\n return customers.Customer(self, customer_id)", "def create(customer, **data):\n if isinstance(customer, resources.Customer):\n customer = customer.id\n\n http_client = HttpClient()\n response, _ = http_client.post(routes.url(routes.CARD_RESOURCE, customer_id=customer), data)\n return resources.Card(**response)", "def get_customer_id_by_sale_id_from_table(table, sale_id):\n\n # your code", "def test_data_when_import_customer_with_data(self):\n\n customer = self.customers[0]\n self.assertEqual(\"Jimena\", customer.get_first_name())\n self.assertEqual(\"Sanabria\", customer.get_last_name())\n self.assertEqual(\"21-08-1980\", customer.get_date_of_birth())\n self.assertEqual([\"Nueva Granada #1837\"], customer.get_addresses())\n self.assertEqual([4244270,70759942], customer.get_phones())\n self.assertEqual(\"[email protected]\", customer.get_email())\n self.assertEqual(\"Gold\", customer.get_membership())\n self.assertEqual(\"Active\", customer.get_status())", "def customer_id(self):\n return self._customer_id", "def add_customer(customer_id, name, lastname, home_address,\n phone_number, email_address, status, credit_limit):\n try:\n with database.transaction():\n customer = Customer.create(\n customer_id=customer_id,\n name=name,\n lastname=lastname,\n home_address=home_address,\n phone_number=phone_number,\n email_address=email_address,\n status=status,\n credit_limit=credit_limit,\n )\n logger.info(\n f\"Successfully added customer {customer_id} with {credit_limit}\"\n )\n customer.save()\n except Exception as unknown_error:\n logger.error(\n f\"Error. Failed to added customer {customer_id}. {unknown_error}\"\n )\n print(unknown_error)", "def new_dataset(self, name, cluster_name, compressed):\n payload = {\"name\": name, \"cluster_name\": cluster_name, \"compressed\": compressed}\n r = self.request(\n \"post\", url_path_join(USER_DATASET_RESOURCE_URL, self.owner), payload=payload\n )\n self.check_and_raise(r)\n return self.get_json(r)[\"user_dataset\"]", "def add_customer(*, customer_id, name=None, lastname=None, home_address=None,\n phone_number=None, email_address=None, status=None,\n credit_limit=None):\n with DATABASE.transaction():\n try:\n new_customer = Customer.create(\n customer_id=customer_id,\n first_name=name,\n last_name=lastname,\n address=home_address,\n phone=phone_number,\n email=email_address,\n is_active=status,\n credit_limit=credit_limit\n )\n new_customer.save()\n LOGGER.info('Database add successful: (%s, %s)', lastname, name)\n return new_customer\n except pw.IntegrityError:\n LOGGER.warning('Database add error: (%s, %s)', lastname, name)", "def token_for_customer(self, token, user):\n kwargs = dict(card=token,\n description='Poold user: %s' % user.id,\n email=user.email)\n try:\n stripe_user = _Customer.create(api_key=self.api_key, **kwargs)\n msg = 'New Stripe Customer Created'\n logger.transaction(msg, **kwargs)\n except stripe.StripeError, e:\n self._handle_error(e, user, kwargs)\n except Exception, e: # Catch any other error and log, then re-raise\n msg = 'An unknown error occurred while creating a new Stripe Customer.'\n data = dict(error_type=type(e).__class__,\n error_message=e.message)\n logger.error(msg, data=data, **kwargs)\n raise\n\n return stripe_user.id", "def save(self, customer):\n\t\t\n\t\twith sqlite3.connect(\"bangazon_cli.db\") as bang:\n\t\t\tcursor = bang.cursor()\n\n\t\t\ttry: \n\t\t\t\tcursor.execute(\"SELECT * FROM Customers\")\n\t\t\t\tcustomers = cursor.fetchall()\n\t\t\texcept sqlite3.OperationalError:\n\t\t\t\tcursor.execute(\"\"\"\n\t\t\t\tCREATE TABLE IF NOT EXISTS `Customers`\n\t\t\t\t\t(\n\t\t\t\t\t\tcustomer_id INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT,\n\t\t\t\t\t\tfirst_name TEXT NOT NULL,\n\t\t\t\t\t\tlast_name TEXT NOT NULL,\n\t\t\t\t\t\temail TEXT NOT NULL,\n\t\t\t\t\t\tphone_number TEXT NOT NULL,\n\t\t\t\t\t\tcity TEXT NOT NULL,\n\t\t\t\t\t\tstate TEXT NOT NULL,\n\t\t\t\t\t\tpostal_zip INTEGER NOT NULL,\n\t\t\t\t\t\taddress TEXT NOT NULL,\n\t\t\t\t\t\tis_active BOOLEAN NOT NULL,\n\t\t\t\t\t\tCONSTRAINT name_unique UNIQUE (first_name, last_name, email, phone_number, city, state, postal_zip, address)\n\t\t\t\t\t)\n\t\t\t\t\"\"\")\n\n\t\t\tcursor.execute(\"\"\"\n\t\t\tINSERT INTO Customers VALUES (null, \"{}\", \"{}\", \"{}\", \"{}\", \"{}\", \"{}\", \"{}\", \"{}\", \"{}\")\n\t\t\t\"\"\".format(\n\t\t\t\t\t\tcustomer.get_first_name(), \n\t\t\t\t\t\tcustomer.get_last_name(), \n\t\t\t\t\t\tcustomer.get_email(), \n\t\t\t\t\t\tcustomer.get_phone_number(),\n\t\t\t\t\t\tcustomer.get_city(),\n\t\t\t\t\t\tcustomer.get_state(),\n\t\t\t\t\t\tcustomer.get_postal_zip(),\n\t\t\t\t\t\tcustomer.get_address(),\n\t\t\t\t\t\tcustomer.get_active_status()\n\t\t\t\t\t\t)\n\t\t\t\t\t)", "def save(self, customer):\n\t\t\n\t\twith sqlite3.connect(\"bangazon_cli.db\") as bang:\n\t\t\tcursor = bang.cursor()\n\n\t\t\ttry: \n\t\t\t\tcursor.execute(\"SELECT * FROM Customers\")\n\t\t\t\tcustomers = cursor.fetchall()\n\t\t\texcept sqlite3.OperationalError:\n\t\t\t\tcursor.execute(\"\"\"\n\t\t\t\tCREATE TABLE IF NOT EXISTS `Customers`\n\t\t\t\t\t(\n\t\t\t\t\t\tcustomer_id INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT,\n\t\t\t\t\t\tfirst_name TEXT NOT NULL,\n\t\t\t\t\t\tlast_name TEXT NOT NULL,\n\t\t\t\t\t\temail TEXT NOT NULL,\n\t\t\t\t\t\tphone_number TEXT NOT NULL,\n\t\t\t\t\t\tcity TEXT NOT NULL,\n\t\t\t\t\t\tstate TEXT NOT NULL,\n\t\t\t\t\t\tpostal_zip INTEGER NOT NULL,\n\t\t\t\t\t\taddress TEXT NOT NULL,\n\t\t\t\t\t\tis_active BOOLEAN NOT NULL,\n\t\t\t\t\t\tCONSTRAINT name_unique UNIQUE (first_name, last_name, email, phone_number, city, state, postal_zip, address)\n\t\t\t\t\t)\n\t\t\t\t\"\"\")\n\n\t\t\tcursor.execute(\"\"\"\n\t\t\tINSERT INTO Customers VALUES (null, \"{}\", \"{}\", \"{}\", \"{}\", \"{}\", \"{}\", \"{}\", \"{}\", \"{}\")\n\t\t\t\"\"\".format(\n\t\t\t\t\t\tcustomer.get_first_name(), \n\t\t\t\t\t\tcustomer.get_last_name(), \n\t\t\t\t\t\tcustomer.get_email(), \n\t\t\t\t\t\tcustomer.get_phone_number(),\n\t\t\t\t\t\tcustomer.get_city(),\n\t\t\t\t\t\tcustomer.get_state(),\n\t\t\t\t\t\tcustomer.get_postal_zip(),\n\t\t\t\t\t\tcustomer.get_address(),\n\t\t\t\t\t\tcustomer.get_active_status()\n\t\t\t\t\t\t)\n\t\t\t\t\t)", "def test_newCustomer(self):\n\t\tdashboardPage = DashboardPage(self.driver)\n\t\tdashboardPage.goToOnboard()\n\n\n\t\tdashboardPage.createCustomer(USER_NAME, S3FOLDER)\n\t\tdashboardPage.goToCustomerList()\n\t\tdashboardPage.sortRecentCustomer()\n\n\t\tinitialId = dashboardPage.getId()\n\t\teditPage = dashboardPage.goToEditPage() \n\t\tcheckId, checkName, checkS3Folder, maxSize, panoMaxSize, checkBox = editPage.getParameters()\n\n\n\t\tself.assertEqual(initialId, checkId)\n\t\tself.assertEqual(checkName, USER_NAME)\n\t\tself.assertEqual(checkS3Folder, S3FOLDER)\n\t\tself.assertEqual(maxSize, MAX_SIZE)\n\t\tself.assertEqual(panoMaxSize, PANO_MAX_SIZE)\n\t\tself.assertEqual(CHECK_BOX, checkBox)", "def test_update_stripe_customer_id(self):\n pass", "def _post(self, data):\n cust_id = data['customer_id']\n user_id = data['user_id']\n\n # Now check if we have this association already\n assoc_q = Query()\n assoc_data = DB_USER_CUSTOMER_RELS_TABLE.search((assoc_q.customer_id == cust_id) &\n (assoc_q.user_id == user_id))\n if assoc_data:\n flask_restful.abort(400, message=\"Bad Request - association between customer \"\n \"and user exists already\")\n\n new_association_id = DB_USER_CUSTOMER_RELS_TABLE.insert(data)\n return new_association_id", "def set_cid(dataset, char_vocab):\n n_records = len(dataset)\n cids = []\n for i in range(n_records):\n words = dataset[i]['words']\n cids = []\n for w in words:\n cids.append([char_vocab[ch] for ch in list(w)])\n dataset[i]['cids'] = list(cids)\n return dataset", "def ht_get_stripe_customer(account, cc_token=None, cc_card=None, cust=None):\n\n\tif (account.stripe_cust is not None):\n\t\tprint 'ht_get_stripe_customer_id(): found customer', account.stripe_cust\n\t\tstripe.api_key = sc_server.config['STRIPE_SECRET']\n\t\tstripe_cust = stripe.Customer.retrieve(account.stripe_cust)\n\t\tprint 'ht_get_stripe_customer_id(): update customer,' + str(stripe_cust.get('email')) + ', w/ info(' + str(cc_token) + ', ' + str(cc_card) + ')'\n\t\tstripe_cust.cards.create(card=cc_token)\n\t\treturn account.stripe_cust\n\n\tprint 'ht_get_stripe_customer_id: customer does not exist, create'\n\ttry:\n\t\tstripe.api_key = sc_server.config['STRIPE_SECRET']\n\n\t\tht_metadata = {}\n\t\tht_metadata['ht_account'] = account.userid\n\n\t\tprint 'ht_get_stripe_customer_id: customer info cc_token: ' + str(cc_token) + ' cc_card: ' + str(cc_card)\n\t\tstripe_customer = stripe.Customer.create(card=cc_token, description=str(account.userid), metadata=ht_metadata, email=account.email)\n\t\tstripe_cust\t= stripe_customer['id']\n\t\tstripe_card\t= stripe_customer['default_card']\n\t\tprint 'ht_get_stripe_customer_id: New Customer (%s, %s)' % (stripe_cust, stripe_card)\n\t\tpp(stripe_cust)\n\n\t\tprint 'ht_get_stripe_customer_id: Update Account'\n\t\taccount.stripe_cust = stripe_cust\n\t\tdb_session.add(account)\n\t\tdb_session.commit()\n\texcept Exception as e:\n\t\t# problems with customer create\n\t\tprint type(e), e\n\t\tdb_session.rollback()\n\n\tprint 'ht_get_stripe_customer_id:', stripe_cust\n\treturn stripe_cust", "def add_customer(customer_id, first_name, last_name, home_address,\n phone_number, email_address, status, credit_limit):\n # database.transaction; all work given to database gets done or none of it\n with cm.DATABASE.transaction():\n try:\n # .create inserts the data into the database\n new_customer = cm.Customer.create(customer_id=customer_id,\n first_name=first_name,\n last_name=last_name,\n home_address=home_address,\n phone_number=phone_number,\n email_address=email_address,\n status=status,\n credit_limit=credit_limit)\n # .save() will write the data to the database\n new_customer.save()\n LOGGER.info(\"Added customer [%s]\", customer_id)\n except pw.IntegrityError:\n LOGGER.error(\"Customer [%s] not added to database!\", customer_id)\n raise pw.IntegrityError", "def create(self):\n schema = load_customer_schema(self.request)\n customer = models.Klant(**schema)\n self.request.db.add(customer)\n self.request.db.flush()\n return {'klant': customer}", "def test_create_new_customer(client, db_session):\n # Arrange\n customer_data = {\"name\": \"Customer 1\", \"isActive\": True}\n\n # Act\n response = client.post(\"api/customers/\", json=customer_data)\n response_data = response.get_json()\n\n # Assert\n assert response.status_code == status.HTTP_201_CREATED\n assert response_data[\"name\"] == customer_data[\"name\"]", "def insert_customer(self):\n if self.check_user():\n return False\n else:\n cursor = self.db.cursor()\n cursor.execute(\n \"INSERT INTO costumers (dni, costumername, costumerlastname, costumeraddress, costumerpostcode, costumertlfnumber, costumerbirth) VALUES (?, ?, ?, ?, ?, ?, ?)\",\n (self.dni, self.name, self.last_name, self.address, self.postal_code, self.t_number, self.date_birth))\n self.db.commit()\n return True", "def create(self, identity, record=None, data=None, **kwargs):\n data['id'] = data['id'].lower()\n self._validate(data['id'])\n record['id'] = data['id']\n try:\n provider = record.__class__.pid.field._provider.create(record=record)\n except PIDAlreadyExists:\n raise ValidationError(\n 'A community with this identifier already exists.',\n field_name='id',\n )\n setattr(record, 'pid', provider.pid)", "def create(self, validated_data):\n user = validated_data.pop('user')\n customer = Customer.objects.get(user=user)\n validated_data['customer'] = customer\n return SmallFix.objects.create(**validated_data)", "def get_customer_id(self, customer):\n\n\t\t# connect to the database\n\t\twith sqlite3.connect(\"bangazon_cli.db\") as bang:\n\t\t\tcursor = bang.cursor()\n\n\t\t\ttry: \n\t\t\t\t# select customer_id that matches the customer's phone number\n\t\t\t\tcursor.execute(\"SELECT customer_id FROM Customers c WHERE c.phone_number ='{}'\".format(customer.get_phone_number()))\n\n\t\t\t\t# return the data\n\t\t\t\tdata = cursor.fetchall()\n\t\t\t\t\n\t\t\t\tprint(\"Customer_id\", data[0][0])\n\t\t\t\treturn data[0][0]\n\t\t\t\t\n\n\t\t\texcept sqlite3.OperationalError:\n\t\t\t\tprint(\"NOPE.\")", "def get_customer_id(self, customer):\n\n\t\t# connect to the database\n\t\twith sqlite3.connect(\"bangazon_cli.db\") as bang:\n\t\t\tcursor = bang.cursor()\n\n\t\t\ttry: \n\t\t\t\t# select customer_id that matches the customer's phone number\n\t\t\t\tcursor.execute(\"SELECT customer_id FROM Customers c WHERE c.phone_number ='{}'\".format(customer.get_phone_number()))\n\n\t\t\t\t# return the data\n\t\t\t\tdata = cursor.fetchall()\n\t\t\t\t\n\t\t\t\tprint(\"Customer_id\", data[0][0])\n\t\t\t\treturn data[0][0]\n\t\t\t\t\n\n\t\t\texcept sqlite3.OperationalError:\n\t\t\t\tprint(\"NOPE.\")", "def add_new_customers(self): #stop, id_suffix\n while(len(self.customers)<10) &(self.minutes < 60*(self.closing_hour-self.opening_hour)-5):\n self.customers.append(Customer(self.id_suffix, 'entrance'))\n self.id_suffix += 1\n # for i in range(stop):\n # cust = Customer(str(i) + \"_\" + str(id_suffix), \"entrance\", transition_matrix,\n # )\n # self.customers.append(cust)\n\n # self.id_suffix += 1", "def create_customer(self, user, card_token, plan):\n customer = stripe.Customer.create(\n card=card_token,\n plan=plan,\n email=user.email,\n )\n user.stripe_customer_id = customer.id\n user.save()\n return customer", "def get_customer_id(self):\n return self.machine_config_file_value(\"DEFAULT.CID\").strip('\"')", "def set_or_create_dataset(conn: BlitzGateway, project_id: Union[int, None],\n dataset: Union[str, int],\n across_groups: Optional[bool] = True\n ) -> Union[int, None]:\n if isinstance(dataset, str):\n if project_id:\n dataset_id = post_dataset(conn, dataset, project_id=project_id)\n else:\n dataset_id = post_dataset(conn, dataset)\n print(f'Created new Dataset:{dataset_id}')\n elif (isinstance(dataset, int)):\n dataset_id = dataset\n else:\n raise TypeError(\"'dataset' must be str or int\")\n return dataset_id", "def add_customer(customer_id,\n name,\n lastname,\n home_address,\n phone_number,\n email,\n status,\n credit_limit):\n # pylint: disable = W0703\n try:\n with DB.transaction():\n new_customer = Customer.create(\n customer_id=customer_id,\n name=name,\n lastname=lastname,\n home_address=home_address,\n phone_number=phone_number,\n email=email,\n status=status.lower(),\n credit_limit=credit_limit)\n new_customer.save()\n logging.info('Customer(s) successfully added')\n\n except Exception as error:\n LOGGER.info(f'Error creating = {name}')\n LOGGER.info(error)", "def _newClusterId(self):\n return self.guidGenerator.new_id()", "def customer_uuid(self, customer_uuid):\n\n self._customer_uuid = customer_uuid", "def add_customer(\n customer_id,\n name,\n last_name,\n home_address,\n phone_number,\n email_address,\n status,\n credit_limit,\n):\n LOGGER.info(\"Adding new customer, %s %s to database\", name, last_name)\n try:\n Customers.create(\n customer_id=customer_id,\n name=name,\n last_name=last_name,\n home_address=home_address,\n phone_number=phone_number,\n email_address=email_address,\n status=status,\n credit_limit=credit_limit,\n )\n LOGGER.info(\"Added new customer %s %s to database\", name, last_name)\n except IntegrityError as e_val:\n LOGGER.warning(\"Customer %s already exists\", customer_id)\n LOGGER.warning(e_val)", "def test_create_customer(self):\n create_customer_url = reverse(\"customer_list\")\n\n customer_info = {\"first_name\": \"Denny\", \"last_name\": \"Wayne\"}\n\n response = self.client.post(\n create_customer_url, data=customer_info, format=\"json\"\n )\n\n self.assertEqual(response.status_code, status.HTTP_201_CREATED)\n self.assertEqual(Customer.objects.count(), 5)\n self.assertEqual(Customer.objects.get(pk=5).first_name, \"Denny\")\n self.assertEqual(Customer.objects.get(pk=5).last_name, \"Wayne\")", "def set_identifiers(self, data):\n identity = {}\n if self.unique_identifier:\n self.unique_identifiers.append(self.unique_identifier)\n\n # Remove duplicates\n self.unique_identifiers = list(dict.fromkeys(self.unique_identifiers))\n\n try:\n for unique_identifier in self.unique_identifiers:\n identity[unique_identifier] = data[unique_identifier]\n data.pop(unique_identifier, None)\n\n return identity\n except Exception as e:\n return identity", "def create_dataset(self, *args, **kwargs):\n dataset = super().create_dataset(*args, **kwargs)\n length = len(dataset._data)\n dataset.append_col([self.request.user.id] * length,\n header=\"source_user\")\n return dataset", "def create_dataset(self, *args, **kwargs):\n dataset = super().create_dataset(*args, **kwargs)\n length = len(dataset._data)\n dataset.append_col([self.request.user.id] * length,\n header=\"source_user\")\n return dataset", "def customer():\n customer = stripe.Customer.create(\n description=\"User created by pytest test_payments.py\",\n email=generate_random_email(),\n address={\"country\": \"DK\"},\n )\n yield customer\n customer.delete()", "def test_get_customer(self):\n # get the id of a customer\n test_customer = self._create_customers(\"Alex\")\n logging.debug(test_customer)\n test_customer.create() \n resp = self.app.get(\n \"/customers/{}\".format(test_customer.id), content_type=\"application/json\"\n )\n self.assertEqual(resp.status_code, status.HTTP_200_OK)\n data = resp.get_json()\n self.assertEqual(data[\"name\"], test_customer.name)", "def get_customer_id_by_sale_id(sale_id):\n sales_data = data_manager.get_table_from_file(\"sales/sales.csv\")\n return get_customer_id_by_sale_id_from_table(sales_data, sale_id)", "def test_customer_creation():\n agent = AgentFactory()\n customer = CustomerFactory(agent=agent)\n assert agent == customer.agent\n\n customer.name = 'customer test name 1'\n customer.customer_type = 'hom'\n customer.save()\n assert customer.name == 'customer test name 1'\n\n customer.name = 'customer test name 2'\n customer.customer_type = 'oth'\n customer.save()\n assert customer.name == 'customer test name 2'", "def get_customer(self) -> djstripe.models.Customer:\n if self.customer_id:\n return self.customer\n\n name = self.display_name or self.name or \"\"\n email = self.billing_email or self.email or \"\"\n\n if stripe.api_key != \"sk_test_xxxx\":\n try:\n customer = stripe.Customer.create(name=name, email=email)\n self.customer = djstripe.models.Customer.sync_from_stripe_data(customer)\n except Exception:\n logger.exception(\"Error creating customer on Stripe\")\n else:\n self.customer = djstripe.models.Customer.objects.create(\n id=shortuuid.uuid(), name=name, email=email\n )\n\n self.save()\n return self.customer", "def test_create_customer_success(self):\n customer = Customer.objects.create(**customer_data)\n\n self.assertTrue(isinstance(customer, Customer))\n self.assertEqual(customer.__str__(), customer_data['email'])", "async def create_active_dataset(\n self, dataset: python_otbr_api.OperationalDataSet\n ) -> None:\n return await self.api.create_active_dataset(dataset)", "def get_all_customer_ids():\n\n # your code", "def m_create_identity(DID, domain_name, website, commercial_name, parent_node_account, password, overwrite):\n\n error, didDoc = create_identity(\n DID, domain_name, website, commercial_name, parent_node_account, password, overwrite)\n if error is not None:\n print(error)\n\n print(f\"Created\")", "def create_dataset(request):\n body = json.loads(request.body)\n try:\n org = Organization.objects.get(pk=body['organization_id'])\n except Organization.DoesNotExist:\n return {\"status\": 'error',\n 'message': 'organization_id not provided'}\n record = ImportRecord.objects.create(\n name=body['name'],\n app=\"seed\",\n start_time=datetime.datetime.now(),\n created_at=datetime.datetime.now(),\n last_modified_by=request.user,\n super_organization=org,\n owner=request.user,\n )\n\n return {\n 'status': 'success',\n 'id': record.pk,\n 'name': record.name,\n }", "def _CreateRecordId(self):\n self._record_count += 1\n return '%s_%s' % (self._unique_id, self._record_count)", "def makeIdFactory(self, dataRef):\n # With the default configuration, this IdFactory doesn't do anything, because\n # the IDs it generates are immediately overwritten by the ID from the reference\n # catalog (since that's in config.measurement.copyColumns). But we create one here anyway, to\n # allow us to revert back to the old behavior of generating new forced source IDs,\n # just by renaming the ID in config.copyColumns to \"object_id\".\n expBits = dataRef.get(self.config.coaddName + \"CoaddId_bits\")\n expId = int(dataRef.get(self.config.coaddName + \"CoaddId\"))\n return lsst.afw.table.IdFactory.makeSource(expId, 64 - expBits)", "def _random_customer(cust_dtls) -> tuple:\n return choices(cust_dtls)[0]", "def test_activate_customer(self):\n # create a customer to activate\n body = {\n \"name\": \"Kendall\",\n \"address\": \"333 Bedford Street\",\n \"phone_number\": \"555-555-3333\",\n \"email\": \"[email protected]\",\n \"credit_card\": \"VISA\"\n }\n resp_create = self.app.post('/customers',\n json=body,\n content_type='application/json')\n self.assertEqual(resp_create.status_code, status.HTTP_201_CREATED)\n self.assertEqual(resp_create.get_json()['active'], True)\n customer_id = resp_create.get_json()[\"id\"]\n\n # deactivate the customer\n logging.debug(customer_id)\n resp_deactivate = self.app.put(\"/customers/{}/deactivate\".format(customer_id),\n json=body,\n content_type=\"application/json\")\n self.assertEqual(resp_deactivate.status_code, status.HTTP_200_OK)\n self.assertEqual(resp_deactivate.get_json()[\"active\"], False)\n\n # activate the customer\n logging.debug(customer_id)\n resp_activate = self.app.put(\"/customers/{}/activate\".format(customer_id),\n json=body,\n content_type=\"application/json\")\n self.assertEqual(resp_activate.status_code, status.HTTP_200_OK)\n self.assertEqual(resp_activate.get_json()[\"active\"], True)", "def _get_next_cust_id():\n # print('Customer roster: ' + str(customers))\n key_list = []\n for customer_key in customers:\n stripped_prefix = customer_key[1:]\n # print('Adding key: ' + str(stripped_prefix))\n key_list.append(stripped_prefix)\n key_list.sort()\n last_id = int(key_list[-1])\n return 'C' + str(last_id + 1)", "def add_customer(customer_id, name, lastname, home_address, phone_number, email_address, status,\n credit_limit):\n init_database()\n try:\n with database.transaction():\n new_customer = Customer.create(\n customer_id=customer_id,\n name=name,\n lastname=lastname,\n home_address=home_address,\n phone_number=phone_number,\n email_address=email_address,\n active_status=status,\n credit_limit=credit_limit\n )\n new_customer.save()\n logging.info('New customer, ID %s, added successfully.', customer_id)\n return True\n except peewee.IntegrityError as exc:\n logging.error('Error creating new customer with ID %s: %s.', customer_id, exc)\n return False\n finally:\n database.close()", "def generate_customer(self, start_of_month):\n customer_rates = np.random.multivariate_normal(\n mean=self.log_means, cov=self.behave_cov\n )\n customer_rates = self.exp_fun(customer_rates)\n customer_rates = np.maximum(customer_rates - 0.667, 0.333)\n new_customer = Customer(\n customer_rates, channel_name=self.version, start_of_month=start_of_month\n )\n # print(customer_rates)\n return new_customer", "def add_customer(login, password, name, phone, email):\n with MY_CONNECTION as connection:\n connection.execute(\n \"\"\"\n INSERT INTO Customers\n (login,password,customer_name,phone,email)\n VALUES(?,?,?,?,?)\n \"\"\",\n (login, password, name, phone, email))", "def create(self, identity, data=None, record=None, **kwargs):\n record.metadata = data.get('metadata', {})", "def generate_customer(self):\n customer_rates = np.random.multivariate_normal(\n mean=self.behave_means, cov=self.behave_cov\n )\n customer_rates = customer_rates.clip(\n min=self.min_rate\n ) # clip : no negative rates!\n new_customer = Customer(customer_rates)\n # print(customer_rates)\n return new_customer", "def add(doc, **metadata):\n metadata = _clean(metadata, defdict={\"schema\": \"ctsa::bts:CTSADataset\"})\n doc = validate(doc, metadata[\"schema\"])\n dataset = _index(doc, metadata, \"create\")\n return dataset.meta.id", "def customer(self, customer):\n\n self._customer = customer", "def customer(self, customer):\n\n self._customer = customer" ]
[ "0.6462454", "0.63013685", "0.6199043", "0.6019253", "0.60129994", "0.60011494", "0.59338015", "0.59171224", "0.5879934", "0.58377534", "0.5820006", "0.5756114", "0.5745354", "0.57360977", "0.57085544", "0.5678208", "0.5672292", "0.56669915", "0.56526893", "0.5649953", "0.5642908", "0.56062853", "0.5590031", "0.55736756", "0.5573076", "0.5529865", "0.5502028", "0.54781216", "0.5468273", "0.5462993", "0.5462369", "0.5462369", "0.5462369", "0.5462369", "0.5451434", "0.54438776", "0.54409885", "0.54386693", "0.5429512", "0.54094243", "0.54094243", "0.5402408", "0.54020023", "0.53965425", "0.5360139", "0.53580284", "0.5345826", "0.53368104", "0.53277564", "0.5326647", "0.5321505", "0.5321505", "0.5311982", "0.5307847", "0.53074175", "0.5299486", "0.52954704", "0.5291244", "0.5291077", "0.52722216", "0.52439916", "0.5240103", "0.5229134", "0.52152586", "0.52152586", "0.52067715", "0.5206086", "0.52052903", "0.5203727", "0.5203572", "0.5202955", "0.52012324", "0.5190114", "0.5183559", "0.51750517", "0.51745015", "0.51745015", "0.5166438", "0.5161553", "0.5159525", "0.5150038", "0.51380205", "0.5095395", "0.5094032", "0.50914747", "0.5091243", "0.5084555", "0.5076266", "0.5074712", "0.50726384", "0.50617814", "0.5060021", "0.5056408", "0.5050371", "0.5047486", "0.50382817", "0.50298625", "0.5028603", "0.5007559", "0.5007559" ]
0.6483069
0
Drop from df_invoice_line dataframe features in list given as parameter. All elements from list are checked to be into dataframe columns.
def list_feature_drop(self): list_to_drop = list() list_not_in_df = list() #------------------------------------------------------------------------- # Columns are checked to be into df_invoice_line dataframe #------------------------------------------------------------------------- for col in self._list_feature_to_drop: if col in self.df_invoice_line.columns: list_to_drop.append(col) else: list_not_in_df.append(col) if 0 == len(list_to_drop): self.strprint("\n*** ERROR : no element in list belonging to dataframe!") else: if len(self._list_feature_to_drop) != len(list_to_drop): self.strprint("\n*** WARNING : followings features do not belong to \ dataframe : {}".format(list_not_in_df)) else: pass list_col_keep \ = [col for col in self.df_invoice_line.columns \ if col not in list_to_drop] s self.df_invoice_line = self.df_invoice_line[list_col_keep] return
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def drop_dfcol(self, drop_list):\n self.data = self.df\n for lbl in drop_list:\n self.data = self.data.drop(lbl, axis=1)\n self.n_features = np.shape(self.data)[1]", "def drop(self,df, column_list):\n df.drop(columns = column_list, inplace = True)\n return df", "def select_feats(df):\n cols = list(df)\n for col in cols:\n if col not in config[\"feats\"] and col != \"label\":\n df = df.drop(columns=col)\n return df", "def _drop_features(self, X, drop_features):\n self.drop_features = drop_features\n if len(self.drop_features) != 0:\n cfp = ComprehensiveFCParameters()\n df2 = []\n for df in self.drop_features:\n if df in X.columns:\n df2.append(df) # exact match\n else:\n if df in cfp.keys() or df in ['fft_coefficient_hann']:\n df = '*__{:s}__*'.format(df) # feature calculator\n # wildcard match\n df2 += [col for col in X.columns if fnmatch(col, df)] \n X = X.drop(columns=df2)\n return X", "def test_drop_1_variables_str_input(df_vartypes):\n\n transformer = DropFeatures(features_to_drop=\"Marks\")\n X = transformer.fit_transform(df_vartypes)\n\n # expected result\n df = pd.DataFrame(\n {\n \"Name\": [\"tom\", \"nick\", \"krish\", \"jack\"],\n \"City\": [\"London\", \"Manchester\", \"Liverpool\", \"Bristol\"],\n \"Age\": [20, 21, 19, 18],\n \"dob\": pd.date_range(\"2020-02-24\", periods=4, freq=\"T\"),\n }\n )\n\n # init params\n assert transformer.features_to_drop == \"Marks\"\n\n # transform params\n assert X.shape == (4, 4)\n assert type(X) == pd.DataFrame\n pd.testing.assert_frame_equal(X, df)", "def drop(self, columns: List[str]):\n self._check_columns(columns)\n return self._fromdata(\n {\n self.dtype.fields[i].name: ColumnFromVelox.from_velox(\n self.device,\n self.dtype.fields[i].dtype,\n self._data.child_at(i),\n True,\n )\n for i in range(self._data.children_size())\n if self.dtype.fields[i].name not in columns\n },\n self._mask,\n )", "def delete_columns(houses:pd.DataFrame)-> pd.DataFrame:\n drop_columns= ['NEXT OPEN HOUSE START TIME', 'NEXT OPEN HOUSE END TIME', \n 'URL (SEE http://www.redfin.com/buy-a-home/comparative-market-analysis FOR INFO ON PRICING)',\n 'MLS#', 'FAVORITE', 'INTERESTED', 'LATITUDE', 'LONGITUDE',\n SOURCE, SALE_TYPE, CITY, STATE]\n houses= houses[houses[STATUS].isin(['Sold'])]\n houses= houses[houses[CITY].isin(['Irvine'])]\n return houses.drop(drop_columns, axis= 1)", "def columns_to_drop(filepath, skiprows):\n candidates = ['unit', 'units', 'total', 'totals', 'id']\n df = pd.read_csv(filepath, skiprows=skiprows)\n drop = set()\n \n # find columns according to a list of names we should drop\n for item in df.columns:\n if item.upper() in [x.upper() for x in candidates]:\n drop.add(item)\n \n # find columns with only one unique value\n unique = df.nunique().to_dict()\n for column, n in unique.items():\n if n == 1:\n drop.add(column)\n \n # find columns with int values that are not a time period\n for column in df.columns:\n if df[column].dtype.name == 'int64':\n if not df[column].nunique() in [12, 24, 48, 96, 24*60/5, 24*60]:\n drop.add(column)\n \n return list(drop)", "def trimDf(df):\n cols = set(df.columns)\n\n cols.remove('exclamationCount') # bug in our feature extraction code\n cols.remove('price') # considered only free apps\n cols.remove('appName') # removing appNames\n\n # return df[list(cols)]\n\n\n\n return df[list(('revSent', 'appLabel'))]", "def _feature_country_process(self):\n if 'Country' not in self._df_invoice_line.columns:\n return\n\n list_countries_keep = ['United Kingdom']\n rows_before = self._df_invoice_line.shape[0]\n \n df_invoice_line_new = pd.DataFrame()\n for country in list_countries_keep : \n df_invoice_line_new = df_invoice_line_new.append(\\\n self._df_invoice_line[self._df_invoice_line['Country']==country]\\\n , ignore_index=True)\n\n self.df_invoice_line = df_invoice_line_new\n del(df_invoice_line_new)\n \n rows_after = self._df_invoice_line.shape[0] \n _print_stat_rows(\"Countries filtering : \",rows_before, rows_after)\n\n \n #-------------------------------------------------------------------------\n # Due to the fact only one country is used, then this feature is dropped\n #-------------------------------------------------------------------------\n list_col_to_keep = [col for col in self._df_invoice_line.columns \\\n if col not in 'Country']\n \n self._df_invoice_line = self._df_invoice_line[list_col_to_keep] \n\n return", "def remove_features(data, target, fn):\n selected_data = []\n if fn == 'variance':\n sel = VarianceThreshold(threshold=(.1 * (1 - .8)))\n selected_data = sel.fit_transform(data)\n elif fn == 'L1':\n lsvc = LinearSVC(C=0.01, penalty=\"l1\", dual=False).fit(data, target)\n model = SelectFromModel(lsvc, prefit=True)\n selected_data = model.transform(data)\n\n selected_t = np.transpose(selected_data)\n data_t = np.transpose(data)\n\n i = 0\n kept_cols = []\n removed_cols = []\n for i, col in enumerate(data_t):\n if col not in selected_t:\n removed_cols.append(i)\n else:\n kept_cols.append(i)\n return kept_cols, removed_cols", "def clean(df):", "def remove_intermediate_columns(dataframe):\n\n combined_dataframe_dropped_cols = dataframe.drop(columns = ['measureland_qualifier_flag_speed',\n 'measureland_qualifier_flag_distance',\n 'measureland_qualifier_flag_acceleration',\n 'measureland_qualifier_flag_visual'])\n\n print(\"Dimensions of combined dataframe after dropping columns:\", combined_dataframe_dropped_cols.shape)\n print(\"Combined dataframe after dropping columns: \", combined_dataframe_dropped_cols.sample(10))\n\n return combined_dataframe_dropped_cols", "def remove_urequired_columns(self, unrequired_columns):\n self.df = self.df.drop(columns=unrequired_columns)", "def clean_data(data):\n data.dropna(inplace=True)\n for feature in data:\n if ((feature != 'lat') and (feature != 'long') and (feature != 'date')):\n data.drop(data[(data[feature] < 0)].index, inplace=True)\n data.drop(data[(data['price'] == 0)].index, inplace=True)\n data.drop(data[(data['bedrooms'] == 0) & (data['bathrooms'] == 0.0)].index, inplace=True)\n return data", "def data_process_deprecated(self, CustomerID, InvoiceDate, InvoiceNo, Description, Quantity\\\n , UnitPrice ):\n dict_invoice = {'InvoiceDate':InvoiceDate, 'Description':Description\\\n , 'Quantity':Quantity, 'UnitPrice':UnitPrice}\n dict_invoice['CustomerID'] = CustomerID\n dict_invoice['InvoiceNo'] = InvoiceNo\n df_invoice_line \\\n = pd.DataFrame(dict_invoice, columns=dict_invoice.keys(), index=[0])\n \n self.data_transform(df_invoice_line)\n\n #self.feature_rfm_encode()\n\n self.feature_scale()\n\n self.list_feature_drop()\n\n self.feature_description_nlp()\n return", "def select_columns(data):\n\n #Channels to be excluded\n features_delete = np.arange(46, 50)\n features_delete = np.concatenate([features_delete, np.arange(59, 63)])\n features_delete = np.concatenate([features_delete, np.arange(72, 76)])\n features_delete = np.concatenate([features_delete, np.arange(85, 89)])\n features_delete = np.concatenate([features_delete, np.arange(98, 102)])\n features_delete = np.concatenate([features_delete, np.arange(134, 243)])\n features_delete = np.concatenate([features_delete, np.arange(244, 249)])\n return np.delete(data, features_delete, 1)", "def preprocess(df):\n drop_cols = ['duration_ms', 'key', 'mode', 'time_signature', 'popularity', 'tempo']\n drop_cols += ['track_id', 'track_name', 'artist_name']\n for col in drop_cols:\n if col in list(df.columns):\n df = df.drop(columns=col)\n return df", "def clean_data(input_file, output_file):\n # Create data frame\n data = pd.read_csv(input_file, sep = \";\")\n \n # Remove unnecessary features from data frame\n data = data.drop([\"Name\",\"Ticket\",\"Cabin\"], axis=1)\n \n # Remove NaN values from remaining features\n data = data.dropna()\n \n # Save ready-to-use file\n data.to_csv(output_file, index=False)", "def _drop_inferior_features_transaction(\n df: pd.DataFrame,\n nan_threshold: float,\n target: str = \"isFraud\"\n) -> pd.DataFrame:\n print(\"Executing inferior feature removal...\")\n df = df.copy()\n num_columns = df.shape[1]\n if nan_threshold > 1.0 or nan_threshold < 0.0:\n raise ValueError(\"nan_threshold should be in range [0, 1].\")\n\n for col in df.columns:\n if col == target: # Preserve the target column.\n continue\n nan_percentage = np.mean(df[col].isna())\n if nan_percentage >= nan_threshold:\n df.drop(columns=[col], inplace=True)\n print(\"{}/{} features left with nan threshold {}\".format(\n df.shape[1], num_columns, nan_threshold\n ))\n return df", "def clean_line_generator_v2(df_pkl=None, fn='untitled'):", "def remove_features(x_train, x_val, x_test, features, ordered_feature_names):\n indices = np.where(np.isin(ordered_feature_names,unwanted_features))\n #print(indices)\n if len(indices) is not 0:\n x_train = np.delete(x_train, indices, axis=1)\n x_test = np.delete(x_test, indices, axis=1)\n x_val = np.delete(x_val,indices,axis=1)\n ordered_feature_names = np.delete(ordered_feature_names, indices, axis=None)\n return x_train,x_val, x_test, ordered_feature_names", "def remove_columns(lst):\n cols_rem = ['yearID','Team','lgID','Name','X','playerID','pops']\n\n for item in cols_rem:\n if item in lst:\n lst.remove(item)\n\n return(lst)", "def deselect (a_data,a_column) :\n loc_data = a_data.drop(a_column,axis = 1) \n return loc_data", "def _filter_features(\n record_batch: pa.RecordBatch,\n feature_allowlist: List[types.FeatureName]) -> pa.RecordBatch:\n columns_to_select = []\n column_names_to_select = []\n for feature_name in feature_allowlist:\n col = arrow_util.get_column(record_batch, feature_name, missing_ok=True)\n if col is None:\n continue\n columns_to_select.append(col)\n column_names_to_select.append(feature_name)\n return pa.RecordBatch.from_arrays(columns_to_select, column_names_to_select)", "def create_df(filename=r'.\\data\\default of credit card clients.xls', remove_pay0=True, resample=False):\n\n filename = filename\n nanDict = {}\n\n df = pd.read_excel(filename, header=1, skiprows=0, index_col=0, na_values=nanDict)\n df.rename(index=str, columns={\"default payment next month\":\"defaultPaymentNextMonth\"}, inplace=True)\n\n # Remove instances with zeros only for past bill statements or paid amounts\n # and not or, remove only when true in all columns\n print('before removing instances where all bill statements or paid amount is zero:', df.shape)\n \n df = df.drop(df[(df.BILL_AMT1 == 0) &\n (df.BILL_AMT2 == 0) &\n (df.BILL_AMT3 == 0) &\n (df.BILL_AMT4 == 0) &\n (df.BILL_AMT5 == 0) &\n (df.BILL_AMT6 == 0)].index, axis=0)\n \n df = df.drop(df[(df.PAY_AMT1 == 0) &\n (df.PAY_AMT2 == 0) &\n (df.PAY_AMT3 == 0) &\n (df.PAY_AMT4 == 0) &\n (df.PAY_AMT5 == 0) &\n (df.PAY_AMT6 == 0)].index, axis=0)\n \n print('after removing instances where all bill statements or paid amount is zero:', df.shape)\n\n \n \n print('df shape before illegal values removed:',df.shape)\n print('df after removing illegals:')\n\n df = pay_remove_value(df,-2)\n print(' remove pay=-2', df.shape)\n\n df = bill_amt_remove_negative(df, 0)\n print(' remove Pay_amt, bill_amt <0:', df.shape)\n\n\n df = edu_marr_remove_value(df)\n print(' remove edy=0,5,6, marriage=0:', df.shape)\n\n if remove_pay0:# over 80 % of data lost\n\n df = pay_remove_value(df,0)\n print(' remove pay=0:',df.shape)\n\n\n\n # features and targets\n X = df.loc[:, df.columns !='defaultPaymentNextMonth'].values\n y = df.loc[:, df.columns =='defaultPaymentNextMonth'].values\n\n # categorical variables to one-hot's\n onehotencoder = OneHotEncoder(categories='auto')\n #print(df.iloc[0:, 3])\n \n # transform cat. var. columns into cat. variables.\n # new columns are added at the start, columns before col 1 put behind new columns\n \n X = ColumnTransformer(\n [(\"\",onehotencoder, [1,2,3, 5,6,7,8,9,10]),],\n remainder='passthrough'\n ).fit_transform(X)\n print(' shape of dataset without resampling', X.shape,y.shape)\n\n if resample:\n sm = SMOTE(random_state=seed)\n X, y = sm.fit_resample(X, y.ravel())\n y = y.reshape(-1,1)\n print(' shape of dataset after resampling', X.shape,y.shape)\n #sys.exit()\n return X, y", "def purgeHighSparsedFeatures(df,threshold,barplot=False,title=''):\n \n thr = math.floor(df.shape[1] * threshold)\n rowsToDrop = np.array([])\n logger.debug(Sc+'Patient Threshold is %d' % thr) \n logger.debug(Sc+'Matrix dimensions : Rows %d , Columns %d'% (df.shape[0],df.shape[1]))\n #axis_x = np.arange(0,df.shape[0]) \n axis_y = np.array([]) \n numRows = df.shape[0] \n for i in range(1,numRows):\n arr = pd.isnull(df.iloc[i])\n nnan = np.sum(arr) \n axis_y = np.append(axis_y,nnan)\n if (nnan > thr):\n rowsToDrop = np.append(rowsToDrop,i)\n logger.debug ('%d features to drop ' % len(rowsToDrop))\n np.savetxt('debug/sparseFeaturesaxis_y.txt',axis_y)\n #if(barplot):\n # ax.title.set_text(title)\n # ax.bar(axis_x,axis_y) \n #logger.debug('After purge there are %d columns '% df.shape[1])\n return rowsToDrop", "def drop_columns(self, columns):\n dframe = self.dframe(keep_parent_ids=True)\n self.replace_observations(dframe.drop(columns, axis=1))", "def drop_columns(*, df, columns_to_drop, verbose=True):\n \n assert type(df)==pd.DataFrame, \"please provide df in pandas dataframe format\"\n df = df.copy()\n \n # find unique values in a list, just in case I made the mistake, \n columns_to_drop = list(pd.Series(columns_to_drop).unique())\n\n # .. info, header, \n if verbose==True:\n print(f\"\"\"Removing {len(columns_to_drop)} columns from df\"\"\") \n else:\n pass\n\n \n # remove columns one by one, \n for i,j in enumerate(columns_to_drop):\n try:\n df.drop(columns=[j], axis=1, inplace=True)\n if verbose==True:\n print(f\"{i} removing: {j}, ==> new df.shape: {df.shape}\")\n else:\n pass\n \n except:\n if verbose==True:\n print(f\"{i} .... column: {j}, was not found in df, check if name is correct....\")\n else:\n pass\n \n return df", "def remove(dataframe, limit=250):\n logfile = open('logfile_removecolumns.txt', 'w') # Create a logfile\n logfile.write('=====> Time: %s <=====\\n' % time.asctime(time.localtime()))\n logfile.write('=====> Log from file %s.py <===== \\n\\n' % __name__)\n\n columns_overview = dataframe.columns.summary() # Create an overview of the dataframe\n cols_list = dataframe.columns.tolist()\n cols_to_be_deleted = list()\n logfile.write('Overview of the dataframe: \\n%s' % columns_overview)\n\n for stock in range(len(cols_list)): # Walk through all stocks\n if dataframe[cols_list[stock]].isnull().sum() > limit: # Check No. of null values in a column\n cols_to_be_deleted.append(cols_list[stock])\n \n logfile.write('\\nNo. of Columns with more that %d missing values: %s\\n'\n % (limit, len(cols_to_be_deleted)))\n logfile.write('Deleted columns:\\n')\n for col in cols_to_be_deleted:\n logfile.write('%s \\n' % str(col))\n logfile.close()\n \n # Return updated dataframe or list of columns. See test code below\n dataframe_updated = dataframe[dataframe.columns.drop(cols_to_be_deleted)]\n return dataframe_updated", "def clean_train_test(train):\n\n train[\"Month\"] = train.Date.apply(lambda x: x.month)\n train[\"Year\"] = train.Date.apply(lambda x: x.year)\n train[\"Day\"] = train.Date.apply(lambda x: x.day)\n\n # Doesn't actually seem to help\n #train[\"Latitude_int\"] = train.Latitude.apply(int)\n #train[\"Longitude_int\"] = train.Longitude.apply(int)\n\n c2d = [\"Id\", \"Address\", \"AddressNumberAndStreet\", \"WnvPresent\",\n \"NumMosquitos\"]\n\n for column in c2d:\n if column in train.columns:\n train.drop(column, axis=1, inplace=True)\n\n return train", "def drop_bp_cords(self, df: pd.DataFrame) -> pd.DataFrame:\n missing_body_part_fields = list(set(self.bp_col_names) - set(list(df.columns)))\n if len(missing_body_part_fields) > 0:\n BodypartColumnNotFoundWarning(\n msg=f\"SimBA could not drop body-part coordinates, some body-part names are missing in dataframe. SimBA expected the following body-parts, that could not be found inside the file: {missing_body_part_fields}\"\n )\n else:\n return df.drop(self.bp_col_names, axis=1)", "def delete_variable(self, columns):\n if not isinstance(columns, (list, tuple)):\n columns = [columns]\n for col in columns:\n if isinstance(col, str):\n col = [i for i, v in enumerate(self.list) if v.name == col][0]\n self.list.pop(col)", "def filterfeatures(df):\n\tfilter_arr = []\n\tfor f in df.columns:\n\t\tif not '.l' in f and not '.r' in f and not '.std' in f and f != 'weight' and f != 'class':\n\t\t\t# filter_arr.append(f.rstrip('.mean'))\n\t\t\tfilter_arr.append(f)\n\treturn filter_arr", "def _drop_columns_step(self, op: data_algebra.data_ops_types.OperatorPlatform, *, data_map: Dict[str, Any]):\n res = self._compose_polars_ops(op.sources[0], data_map=data_map)\n res = res.select(op.columns_produced())\n return res", "def drop_attributes(df, cutoff=25, extra_add=[]):\n\n df_copy = df.copy()\n\n attributs_drop = []\n for var in sorted(df.columns):\n series = df[var]\n perc_missing = 100 - series.count() / len(series) * 100\n\n if perc_missing > cutoff:\n attributs_drop.append(var)\n else:\n continue\n\n if len(extra_add) == 0:\n df_copy.drop(attributs_drop, axis=1, inplace=True)\n\n else:\n attributs_drop = attributs_drop + extra_add\n df_copy.drop(attributs_drop, axis=1, inplace=True)\n\n return df_copy", "def remove_closed_store(data: pyspark.sql.dataframe.DataFrame) -> pyspark.sql.dataframe.DataFrame:\n closed_store = pd.read_csv('../data/closedStore/Closed stores list.csv')\n closed_store_list = closed_store['Store'].unique()\n \n for store in closed_store_list:\n data = data[data.Store !=store] # remove the colsed store\n return data", "def drop_columns(cols,actdata,inplace=False):\n \n for ii in cols:\n if ii in actdata.columns:\n actdata.drop(ii,axis=1,inplace=inplace)\n \n return actdata", "def executeFeatures(dfIn, train = True):\n\n if train == True:\n dfOut = dfIn['TARGET'] #update this with numerical columns that don't need cleaning\n dfOut = standardizedIncome(dfIn, dfOut)\n dfOut = engineerDays(dfIn, dfOut)\n dfOut = createEncoders(dfIn, dfOut)\n dfOut = simplifyEducation(dfIn, dfOut)\n dfOut = simplifyFamily(dfIn, dfOut)\n dfOut = simplifyIncome(dfIn, dfOut)\n dfOut = addExtSources(dfIn, dfOut)\n dfOut = cleanNames(dfOut)\n dfOut = createPolyFeatures(dfOut)\n else:\n dfOut = dfIn['SK_ID_CURR'] ## tags from test set\n dfOut = standardizedIncome(dfIn, dfOut)\n dfOut = engineerDays(dfIn, dfOut)\n dfOut = createEncoders(dfIn, dfOut)\n dfOut = simplifyEducation(dfIn, dfOut)\n dfOut = simplifyFamily(dfIn, dfOut)\n dfOut = simplifyIncome(dfIn, dfOut)\n dfOut = addExtSources(dfIn, dfOut)\n dfOut = dfOut.drop('CODE_GENDER', axis = 1) ## Need to fix this\n #print(dfOut.columns)\n dfOut = cleanNamesTest(dfOut)\n dfOut = createPolyFeatures(dfOut)\n\n return dfOut", "def data_deletion(data, columnX=\"time\", columnY=\"forceX\"):\n\n subset = data_selection(data, columnX, columnY)\n\n data = data.drop(subset.index)\n return data", "def remove_columns(df):\n avg = np.mean(df[df['sentiment'] != 'None']['sentiment'].astype('float'))\n df['sentiment'] = df['sentiment'].replace('None', avg).astype('float')\n\n to_remove = []\n print('column(s) removed: ')\n for column in df.columns:\n print(column)\n if(np.unique(df[column][df[column].notnull()]).shape[0] < 2):\n print(column)\n to_remove.append(column)\n \n return df.drop(columns = to_remove)", "def drop_cols(df, cols=['EMP_S', 'FIRMPDEMP_S', 'GEO_ID', 'GEO_TTL', 'MSA',\n 'PAYANN_S', 'RCPPDEMP_S', 'ST', 'YEAR', 'YIBSZFI',\n 'YIBSZFI_TTL', 'us', 'Unnamed: 0']):\n df = df.drop(columns=cols)\n return df", "def funcPreprocessing(fncptrain=clsTrain(), fncptest=clsTest(clsTrain), fncpTopFeatures=10):\r\n\r\n UtilityFunc = clsDataFrameUtilityFunctions()\r\n\r\n # fncptrain.COLUMNSTODROP = COLUMNSTODROP\r\n print('Columns to Drop')\r\n print(fncptrain.COLUMNSTODROP)\r\n\r\n # Feature Selection\r\n # addColumnsToDrop\r\n\r\n # addColumnsToDrop = ['f_QuantityBins', 'f_InvoiceDatePartOfDay', 'f_InvoiceDateMonthStart']\r\n # fncptrain.COLUMNSTODROP.extend(addColumnsToDrop)\r\n\r\n dictFuncTrainPreprocessing = {'BasicCheck': BasicCheck.funcTrainBasicChecksAndFilters,\r\n 'FeatureQuantity': FeatureQuantity.funcTrainQuantityFeatureProcessing,\r\n 'FeatureInvoiceDate': FeatureInvoiceDate.funcInvoiceDateDateFeatureExtraction,\r\n 'FeatureStockCode': FeatureStockCode.funcStockCodeFeatureProcessing,\r\n 'FeatureStockCodeLength': FeatureStockCode.funcStockCodeLength,\r\n 'FeatureStockCodeProductReturned': FeatureStockCode.funcTrainStockCodeProductReturned,\r\n 'FeatureCustomerID': FeatureCustomerID.funcCustomerIDFeatureProcessing,\r\n 'FeatureInvoiceNo': FeatureInvoiceNo.funcInvoiceNoFeatureProcessing,\r\n 'FeatureMultiColumnAggregation': CategoryPreprocessing.funcMultiColumnAggregation,\r\n # 'CountryIDAggregation': CountryID.funcCountryIDFeatureProcessing,\r\n 'CountryID': CountryID.CountryIDBucket,\r\n 'DroppingColumns': fncptrain.COLUMNSTODROP\r\n }\r\n # Train Set\r\n fncptrain = UtilityFunc.funcCustomPipeLine(fncptrain, dictFuncTrainPreprocessing)\r\n\r\n # Test Set\r\n dictFuncTestPreprocessing = dictFuncTrainPreprocessing.copy()\r\n if 'BasicCheck' in dictFuncTestPreprocessing.keys():\r\n dictFuncTestPreprocessing.pop('BasicCheck')\r\n if 'FeatureStockCodeProductReturned' in dictFuncTestPreprocessing.keys():\r\n dictFuncTestPreprocessing['FeatureStockCodeProductReturned'] = FeatureStockCode.funcTestStockCodeProductReturned\r\n if 'FeatureQuantity' in dictFuncTestPreprocessing.keys():\r\n dictFuncTestPreprocessing['FeatureQuantity'] = FeatureQuantity.funcTestQuantityFeatureProcessing\r\n if 'DroppingColumns' in dictFuncTrainPreprocessing.keys():\r\n dictFuncTestPreprocessing['DroppingColumns'] = fncptest.clsTrainData.COLUMNSTODROP\r\n fncptest = UtilityFunc.funcCustomPipeLine(fncptest, dictFuncTestPreprocessing)\r\n\r\n print('\\nPerforming Top X category OHE on Description column')\r\n fncptrain = CategoryPreprocessing.funcTopFeatures(fncptrain, 'Description', fncpTopXFeatures=5)\r\n fncptest = CategoryPreprocessing.funcTopFeatures(fncptest, 'Description', fncpTopXFeatures=5)\r\n\r\n print('\\nPerforming One Hot Encoding on Train DataFrame and Test DataFrame')\r\n fncptrain, fncptest = CategoryPreprocessing.funcOHE(fncptrain, fncptest)\r\n\r\n print('\\nPerforming Min Max Normalization on Train DataFrame and Test DataFrame')\r\n fncptrain, fncptest = NumericalPreprocessing.MinMaxNormalizing(fncptrain, fncptest)\r\n\r\n # print('\\nPerforming Polynomial Transformation on Train DataFrame and Test DataFrame')\r\n # fncptrain, fncptest = NumericalPreprocessing.PolynomialTransformation(fncptrain, fncptest, fncpPolynomialInteger=2)\r\n\r\n print('\\nPerforming Feature Selection on Train DataFrame and Test DataFrame')\r\n featureSelection = clsRegressionFeatureSelection(fncptrain, fncptest, fncpTopFeatures)\r\n featureSelection.funcFeatureSelectionUsingFRegression()\r\n\r\n # print('\\nPerforming Power Transformation on the Target Variable')\r\n # fncptrain = TargetPreprocessing.TargetTransformation(fncptrain)\r\n\r\n return fncptrain, fncptest, dictFuncTrainPreprocessing, dictFuncTestPreprocessing", "def clean_pokemon_database(\n df: pd.DataFrame,\n selected_generations: list,\n remove_legendary: bool = True\n):\n\n # Filter only selected generations\n df = df[df.generation.isin(selected_generations)]\n\n # Remove legendary\n if remove_legendary:\n df = df[df.is_legendary == 0]\n\n # Rename type columns\n df = df.rename(columns={\"type1\": \"primary_type\", \"type2\": \"secondary_type\"})\n\n # Drop NA values\n df = df.dropna()\n\n # When returning many variables, it is a good practice to give them names:\n return df", "def df_drop_cols(df, col_keep_ls, inplace=True):\n import pandas\n \n vdf_mem_map = isinstance(df, vaex.hdf5.dataset.Hdf5MemoryMapped)\n vdf_df_arr = isinstance(df, vaex.dataframe.DataFrameArrays)\n\n if (vdf_mem_map) or (vdf_df_arr):\n all_col_names = set(df.column_names)\n elif isinstance(df, pandas.core.frame.DataFrame):\n all_col_names = set(df.columns)\n \n col_keep_set = set(col_keep_ls)\n col_drop_set = all_col_names - col_keep_set\n \n for col in col_drop_set:\n if (vdf_mem_map) or (vdf_df_arr):\n df.drop(col, inplace=inplace)\n elif isinstance(df, pandas.core.frame.DataFrame):\n df.drop(col, axis=1, inplace=inplace)", "def clean_data(df_name):\n\n wines = df_name\n wines = wines.rename(columns={'Vintage': 'Year'})\n wines['Location'] = wines['Appellation'].apply(lambda x: x['Region']['Name'])\n wines['Region'] = wines['Appellation'].apply(lambda x: x['Name'])\n wines['Type'] = wines['Varietal'].apply(lambda x: x['WineType']['Name'])\n wines['Grape'] = wines['Varietal'].apply(lambda x: x['Name'])\n wines['Reviews'] = wines['Community'].apply(lambda x: x['Reviews']['Url'])\n drop_columns = ['Appellation', 'Community', 'Description', 'GeoLocation', 'Labels', 'ProductAttributes','Ratings','Retail', 'Url', 'Varietal', 'Vineyard', 'Vintages']\n wines.drop(drop_columns, axis=1, inplace=True)\n wines = wines[['Id', 'Name', 'Year', 'Type', 'Grape', 'Location', 'Region', 'PriceRetail', 'PriceMin', 'PriceMax', 'Reviews']]\n wines['CurrentReviews'] = '' #wines['CurrentReviews'].apply(lambda x: [\"\"])\n wines['PriorReviews'] = '' #wines['PriorReviews'].apply(lambda x: [''])\n\n return wines", "def split_dataframe(df, split_elements_list):\n y = df.filter(split_elements_list)\n x = df.drop(split_elements_list, axis=1)\n\n return x, y", "def clean_data(df_turnstiles):\n\n # sort values in a such a way that the duplicate values sit directly below the originals, so they will be removed.\n df_turnstiles.sort_values(\n [\"C/A\", \"UNIT\", \"SCP\", \"STATION\", \"DATE_TIME\"],\n inplace=True,\n ascending=False,\n )\n # keeps top row, deletes others\n df_turnstiles.drop_duplicates(\n subset=[\"C/A\", \"UNIT\", \"SCP\", \"STATION\", \"DATE_TIME\"], inplace=True\n )\n\n # remove DESC column\n df_turnstiles = df_turnstiles.drop([\"DESC\"], axis=1, errors=\"ignore\")\n\n # remove the many spaces in the EXITS column name\n df_turnstiles.rename(\n columns={\n \"EXITS \": \"EXITS\"\n },\n inplace=True,\n )\n\n return df_turnstiles", "def remove_columns(tx, header, columns_to_remove):\n print(\"\\nRemove columns...\")\n num_removed = 0\n for col in columns_to_remove:\n tx = np.delete(tx, col - num_removed, 1)\n header = np.delete(header, col - num_removed + 2)\n num_removed += 1\n print(\"\\n... finished.\")\n return tx, header", "def featureselection(self, df):\n try:\n # converting blank value to NaN value.\n df = df.replace(' ', np.nan)\n df[\"Long_emp_length\"] = \"\" # adding additional feature col.\n\n # loading list of features\n features = pd.read_csv(self.feature_selected)\n self.features = [x for x in features[\"0\"]]\n df = df[self.features]\n return df\n except Exception as e:\n self._Logger.error(\"Error in Feature Selection: {}\".format(e))", "def keep(self, columns: List[str]):\n self._check_columns(columns)\n return self._fromdata(\n {\n self.dtype.fields[i].name: ColumnFromVelox.from_velox(\n self.device,\n self.dtype.fields[i].dtype,\n self._data.child_at(i),\n True,\n )\n for i in range(self._data.children_size())\n if self.dtype.fields[i].name in columns\n },\n self._mask,\n )", "def get_cols_drop():", "def fetchAndCleanDataframe(self):\n\n df = pd.read_csv('/Users/apple4u/Desktop/goksel tez/results_with_scenarios.csv')\n df.insider_label.fillna(0, inplace=True) # replaces null fields with 0\n df = df.drop(columns=['employee_name', 'scenario', 'role'])\n df = df.rename(columns={'insider_label':'label'})\n #df['label'] = df['insider_label'].astype('int64')\n #df.drop(columns='insider_label', inplace=True)\n df.set_index('user_id', inplace=True)\n X = df.iloc[:, :5].values #fetch all records first 5 columns\n y = df.label.values\n print(df.head())\n return X, y", "def drop_columns(df: DataFrame, *columns_to_drop: str) -> DataFrame:\n return df.drop(*columns_to_drop)", "def filter_data(df, needed_columns, not_null_columns=[]):\n\n if all(x in needed_columns for x in not_null_columns):\n\n df = get_needed_columns(df, needed_columns)\n #Use the pandas method bc the other method was erroring on boolean index.\n #IM - 9/23/22\n df = df.dropna(subset=not_null_columns)#remove_rows_with_null_values(df, not_null_columns)\n\n return df\n else:\n print(\"Developer error, not null columns should be a subset of needed columns\")\n return df", "def remove_bad_columns(df):\n columns = []\n if 'job_runner_name' in df.columns:\n columns.append('job_runner_name')\n\n if 'handler' in df.columns:\n columns.append('handler')\n\n if 'destination_id' in df.columns:\n columns.append('destination_id')\n\n if 'input_file' in df.columns:\n columns.append('input_file')\n\n for column in columns:\n del df[column]\n\n return df", "def remove_features(sets_x, unused_features):\n\n # initiate empty list for return variable\n significant_x = [] \n\n # iterate through subsets and their corresponding insignificant features\n for x, features in zip(sets_x, unused_features):\n # remove features from subset and store the result into list\n significant_x.append(np.delete(x,features,1))\n \n return significant_x", "def drop_columns(cols,actdata,inplace=False):\n\n for ii in cols:\n if ii in actdata.columns:\n actdata.drop(ii,axis=1,inplace=inplace)\n\n return actdata", "def _filter_df(adjmat, df, verbose=3):\n remcols = df.columns[~np.isin(df.columns.values, adjmat.columns.values)].values\n if len(remcols)>0:\n if verbose>=3: print('[bnlearn] >Removing columns from dataframe to make consistent with DAG [%s]' %(remcols))\n df.drop(labels=remcols, axis=1, inplace=True)\n return df", "def _filter_df(adjmat, df, verbose=3):\n remcols = df.columns[~np.isin(df.columns.values, adjmat.columns.values)].values\n if len(remcols)>0:\n if verbose>=3: print('[bnlearn] >Removing columns from dataframe to make consistent with DAG [%s]' %(remcols))\n df.drop(labels=remcols, axis=1, inplace=True)\n return df", "def _drop_cols(self, duplicate_cols):\n self._hybrid_meta.drop(\n duplicate_cols + DROPPED_COLUMNS,\n axis=1, inplace=True, errors='ignore'\n )", "def clean_customer_df(customer_df: pd.DataFrame) -> pd.DataFrame:\n \n # remove customers with more than 20 purchases\n if 'frequency' in customer_df.columns:\n customer_df = customer_df[customer_df.frequency < 20]\n \n return customer_df", "def _clean_dataset(df: pd.DataFrame) -> pd.DataFrame:\n df = df.loc[:, ~df.columns.str.contains(\"^Unnamed\")]\n df = df.dropna()\n return df", "def postprocess_features(self, featurelist):\n \n ##: To overwrite the time of features that are in a clause\n for feature in featurelist:\n if feature.inClause() or self.is_in_clause(feature.getStartPos(), feature.getSentNum()):\n feature = self.assign_feature_time_with_references(feature, self.timeReferences, feature.getStartPos(), True)\n \n ##: To set time of features after death to none. Currently disabled.\n# deathDates = []\n# for feature in featurelist:\n# if 'Death' in [tg[1] for tg in feature.getTags()]:\n# dt = feature.getDateTime()\n# if dt and feature.getTlink().getTimexes()[0].getType()!='VIRTUAL': ##: only original date counts\n# deathDates.append(dt)\n# \n# if feature.getType()=='CAUSE_OF_DEATH':\n# feature.setTlink(None)\n# \n# if deathDates:\n# deathDate = min(deathDates)\n# for feature in featurelist: \n# dt = feature.getDateTime()\n# if dt and dt>deathDate:\n# feature.setTlink(None)\n \n ##: Remove time from features in the blockout range, \n ##: e.g., A 34 years old male with{ history of leg pain }who on ....\n for feature in featurelist:\n posStart = feature.getStartPos()\n posEnd = feature.getEndPos()\n for r in self.blockout_range:\n if (posStart>r[0] and posStart<r[1]) or (posEnd>r[0] and posEnd<r[1]):\n timex = feature.getTimex()\n if timex:\n tpos = timex.getStartPos()\n if tpos>=r[0] and tpos<=r[1]:\n continue\n \n feature.setTlink(None)\n \n return featurelist", "def fit(self, X, y=None):\n # check input dataframe\n X = _is_dataframe(X)\n\n # check for non existent columns\n non_existent = [x for x in self.features_to_drop if x not in X.columns]\n if non_existent:\n raise KeyError(\n f\"Columns '{', '.join(non_existent)}' not present in input dataframe, \"\n f\"please check the columns and enter a new list of features to drop\"\n )\n\n # check that user does not drop all columns returning empty dataframe\n if len(self.features_to_drop) == len(X.columns):\n raise ValueError(\n \"The resulting dataframe will have no columns after dropping all \"\n \"existing variables\"\n )\n\n # add input shape\n self.input_shape_ = X.shape\n\n return self", "def drop_indices(self, df) -> None:\n assert self.is_appropriate_data_instance(df)\n # no operation needed", "def drop_one_elem_columns(self, df):\n df_ = df.copy()\n\n # Incldue columns in dataframe\n include_idx = []\n for i in df_.columns:\n len_unique = df_[i].dropna().unique().size\n if len_unique > 1:\n include_idx.append(i)\n\n df_ = df_[include_idx]\n return df_", "def clean_data(df: pyspark.sql.dataframe.DataFrame, \n spark: pyspark.sql.session.SparkSession):\n \n ### 1) Rename column\n df = df.withColumnRenamed(\"POS Margin on Net Sales\", \"Margin\")\n df = df.withColumnRenamed(\"POS Net Sales\", \"NetSales\")\n df = df.withColumnRenamed(\"Stock Balance Qty\", \"StockQty\")\n df = df.withColumnRenamed(\"POS Qty Sold\", \"QtySold\")\n \n # 2) Conver the `df` columns to `FloatType()`\n columns = ['NetSales', 'QtySold', 'Margin', 'StockQty']\n df = convertColumn(df, columns, FloatType())\n # Convert Date column to timestamp \n df = df.withColumn(\"Date\", to_timestamp(df.Date, \"yyyyMMdd\"))\n\n # 3) Remove the closed store\n df = remove_closed_store(df)\n \n # 4) Short SKU name by removing itemID\n \"\"\"\n short_column_udf = udf(lambda name: short_column(name), StringType())\n count = df.count()\n df = df.withColumn(\"SKU\", short_column_udf(col(\"SKU\")))\n assert df.count() == count, \"Some error here\" # test on overall dataset\n print(df.count())\n \"\"\"\n \n # 5) Remove items if no sales in the whole month, since they are not OOS\n df = remove_no_sale_item(df)\n \n # 6) Remove items if no stock in the whole month, since they are not OOS\n df = remove_no_stock_item(df)\n \n # 7) Add more rows to ensure each item in each store has the full-month records\n date_generated = create_list_dates(df)\n df = clean_and_add_date(df, date_generated, spark)\n \n # 8) Replace none to 0\n df = df.fillna(0)\n \n \n # 9) convert float number between -1 and 1 to 0\n #clean_numeric_column_udf = udf(lambda name: clean_numeric_column(name), FloatType())\n #df = df.withColumn(\"StockQty\", clean_numeric_column(col(\"StockQty\")))\n \n # 10) save the cleaned dataset, overwrite the old one.\n #df.coalesce(1).write.option(\"header\", \"true\").mode('overwrite').csv(\"../data/cleanedData\") # only specify folder name\n print(\"Data processing finished.\") \n \n return df, date_generated", "def remove_rows_without_feature(df, feature):\n return df[np.isfinite(df[feature])]", "def preprocess_feature(df):", "def clean_data(df):\n # expand categories as new data frame\n new_columns = [re.sub('[^a-zA-Z]', ' ', i).strip() for i in df['categories'][0].split(';')]\n cat_df = df['categories'].str.split(';', expand=True)\n cat_df.columns = new_columns\n \n # remove anything except numerical value\n # change new feature's type\n for column in cat_df:\n cat_df[column] = cat_df[column].apply(lambda x: re.sub('[^0-9]', '', x)).astype('str')\n \n # concatenate old dataframe and new features dataframe\n # remove olf categories column\n new_df = pd.concat([df, cat_df], axis=1)\n new_df = new_df.drop('categories', axis=1).drop_duplicates()\n binary_df = new_df[new_df['related']!='2']\n \n return binary_df", "def _drop_fields(usecols, dtype, dropcols):\n for col in dropcols:\n try:\n usecols.remove(col)\n except ValueError:\n pass\n try:\n del dtype[col]\n except KeyError:\n pass\n return usecols, dtype", "def clean_features(dataframe, features, target=None, fill=None):\n\n # Copy and split data frames\n X_df = dataframe[features].copy()\n if target:\n y_df = dataframe[target].copy()\n\n # Create dummy features\n dummies = DUMMY_FEATS.intersection(set(features))\n if dummies:\n X_df = one_hot(X_df, dummies)\n \n # Fill missing dummy features\n if fill:\n X_df = fill_features(X_df, fill)\n\n # Replace YearMade == 1000 with NaN\n if 'YearMade' in features:\n X_df.loc[X_df['YearMade'] == 1000, 'YearMade'] = X_df.loc[X_df['YearMade'] > 1000, 'YearMade'].median()\n\n # Parse year from datetime sold\n if 'saledate' in features:\n X_df['SaleYear'] = pd.to_datetime(X_df['saledate']).dt.year\n X_df['SaleMonth'] = pd.to_datetime(X_df['saledate']).dt.month\n X_df.drop('saledate', axis=1, inplace=True)\n\n ## All features\n # Impute NaN values with median\n X_df.fillna(X_df.median(axis=0), axis=0, inplace=True)\n\n if target:\n return X_df, y_df\n else:\n return X_df", "def drop_columns(df, exclusion):\n for c in df.columns.values:\n if c not in exclusion:\n df.drop(c, axis=1, inplace=True)\n return df", "def remove_itineraries(itinerary_list, trip_link_df, itinerary_df, legs_df, segments_df):\n\n # gets the indexes (itinerary uuid) associated with points to be removed\n to_remove_itins = itinerary_list.index[itinerary_list.values]\n # Builds a boolean list for the trip link table\n to_remove_trip_link_bool = np.in1d(trip_link_df['itinerary_id'], to_remove_itins)\n # Builds a list of legs uuids to be removed\n to_remove_legs = trip_link_df.loc[to_remove_trip_link_bool, 'leg_id'].dropna().unique()\n # Builds a list of segments uuids to be removed\n to_remove_segs = trip_link_df.loc[to_remove_trip_link_bool, 'segment_id'].dropna().unique()\n\n # remove duplicates\n trip_link_df.drop(trip_link_df.index[to_remove_trip_link_bool], inplace=True)\n itinerary_df.drop(to_remove_itins, inplace=True)\n legs_df.drop(to_remove_legs, inplace=True)\n segments_df.drop(to_remove_segs, inplace=True)\n\n return", "def make_claim_df(claim_list, columns = ['Sl','Name of Bank','Name of Branch','A/C Number (15 digit)','A/C Title','Amount of Remittance in BDT','Date of A/C Credit','Remittance Received through BEFTN/RTGS','Name of Remittance Collecting/BEFTN Processing Bank','Date of Claim']):\n sl=[]\n nrbc_bank = []\n branch = []\n ac_no = []\n ac_title = []\n amount=[]\n date_account_credit=[]\n channel = []\n other_bank=[]\n claim_date=[]\n i=1\n for claim in claim_list:\n sl.append(i)\n i=i+1\n nrbc_bank.append(\"NRBC Bank Ltd.\")\n branch.append(claim.branch.name.upper())\n ac_no.append(claim.account_no)\n ac_title.append(claim.account_title)\n amount.append(claim.remittance_amount)\n date_account_credit.append(claim.date_account_credit)\n channel.append(claim.get_channel_display())\n other_bank.append(claim.collecting_bank.name)\n claim_date.append(claim.date_claim.date())\n dc = {\n 'SL':sl,\n 'Name of Bank':nrbc_bank,\n 'Name of Branch': branch,\n 'A/C Number': ac_no,\n 'A/C Title': ac_title,\n 'Amount of Remittance in BDT': amount,\n 'Date of A/C Credit': date_account_credit,\n 'Remittance Received Through BEFTN/RTGS': channel,\n 'Name of Remittance Processing Bank': other_bank,\n 'Date of Claim': claim_date\n }\n df = pd.DataFrame(dc)\n return df.sort_values(by=['Name of Remittance Processing Bank',])", "def clean_data(df):\n \n any_location_id_missing = (df.PULocationID > 263) | (df.DOLocationID > 263)\n df = df.drop(df.index[any_location_id_missing])\n \n df = df[df.tpep_dropoff_datetime > df.tpep_pickup_datetime]\n\n df.PULocationID.replace([104, 105], 103)\n \n return df", "def discard(self):\n for f in self.featureNames:\n self.data = self.data[self.data[:,self._getFIdx(f)] != '-99999']\n return", "def remove_data():\n # Removing the existing data\n col_answer_given.remove()\n col_answer_not_given.remove()\n col_q_not_given.remove()\n col_to_summarize.remove()", "def drop_illogical(df,var1,var2):\r\n #Mask the illogical entries\r\n mask = df[var1]>df[var2]\r\n #Record the number of entries\r\n NumRecords = df.shape[0]\r\n #drop the illogical entries\r\n df = df[df.keys()][~mask]\r\n #Notify the user how many records were dropped\r\n print('{} records dropped because {} is greater than {}'.format(NumRecords-df.shape[0],var1,var2))\r\n \r\n return df", "def exclude_some_features(data,features,given=None):\n if given is None:\n return data,features\n common,ind1,ind2=take_common_features(features,given)\n data=np.delete(data,ind1,axis=1)\n features=np.delete(features,ind1)\n return data,features", "def remove_reserved_keys(df, exclude=[]):\n reserved_keys = __column_intersect(\n df, BAMBOO_RESERVED_KEYS).difference(set(exclude))\n\n return df.drop(reserved_keys, axis=1)", "def threshold_col_del(self, threshold):\n self.data = self.data.dropna(thresh=threshold*len(self.data), axis=1) \n self.X = self.data.drop(self.target, axis =1)\n self.y = self.data[self.target]", "def _remove_unconfirmed_transactions(frame):\n\n frame.drop(frame.loc[frame['posted'] == False].index, inplace=True)\n return frame", "def select_features(dfp):\n df = dfp[['5', '7', '12']].copy() \n df.columns=['type', 'duration','location']\n for col in df.columns:\n strings = df[col].unique()\n if col != \"location\":\n mapper = dict( zip(strings, range(len(strings))) )\n df[col].replace(mapper, inplace=True)\n else:\n df[col] = df[col].str.contains(\"LONDON\").astype(int)\n return df", "def prune(df, regex_list):\n for regex_pattern in regex_list:\n df = df[~df.case_action.str.contains(regex_pattern)]\n return df", "def filter_data(self):\n self.df = self.df[HeatStrokeDataFiller.important_features]", "def drop_id_columns(df):\n id_cols = get_id_columns(df)\n if len(id_cols) > 0:\n df = df.drop(id_cols, axis = 1)\n\n return df", "def remove_not_available(df):\n drop_indices = df.index[df['genre'] == 'Not Available'].tolist()\n df = df.drop(drop_indices)\n return df", "def clean_dataframe(self, df_in , what = ''): \n \n if what == 'era5fb': # cleaning the era5 feedback only \n df = df_in[np.isfinite(df_in['obsvalue@body'])]\n try: \n df = df.loc[ df['vertco_type@body'] != 2 ] \n except:\n pass\n df = df.reindex()\n df = df[np.isfinite(df['vertco_reference_1@body'])]\n #print('check lengths: ' , len(df_in) , len(df) )\n new_ind = np.array ( range(len(df))) \n df['index'] =new_ind\n df = df.set_index('index')\n \n else: \n ### check if can be optimized ???\n df = df_in.loc[ df_in['z_coordinate_type'] != 2 ] # case where the levels are given in terms of geopotential only (pressure not available)\n \n df = df.loc[ (df['observation_value'] != -99999.0) \n & (df['observation_value'] != -999.0) \n & (df['observation_value'] != -9999) \n & (df['observation_value'] != -9999.0) \n & (df['observation_value'] != -999.9) \n & (df['observation_value'] != -8888 )\n & (df['observation_value'] != -8888.0 )\n \n #& (df['z_coordinate_type'] != 2) \n & (df['z_coordinate'] != -99999.0) \n & (df['z_coordinate'] != -9999.0 )\n & (df['z_coordinate'] != 999 )\n & (df['z_coordinate'] != 999.0 )\n \n \n ] #cleaning the values \n #clean = clean.loc[ (clean['z_coordinate_type'] != 2)] #cleaning the values\n #clean = clean.loc[ (clean['z_coordinate'] != -99999.0 )] #cleaning the values\n \n df = df[np.isfinite(df['observation_value'])] # excluding nan values \n df = df[np.isfinite(df['z_coordinate'])]\n \n return df", "def execute_drop_strategy(dataframe, arg_drop_count):\n\n logging.debug('>>>>>>>>> Using drop rows strategy <<<<<<<<<<<<')\n\n selected_drop_rows = MAX_ROW_TO_SHOW if arg_drop_count is None else arg_drop_count\n\n if selected_drop_rows == 1:\n dataframe = dataframe.drop(dataframe.index[0], inplace=True)\n else:\n dataframe.drop(dataframe.index[0:selected_drop_rows], inplace=True)", "def test_drop_zero_variance_on_subset_columns_with_zv_removals(data):\n step = DropZVColumnsStep(['released', 'episodes'], naomit=True)\n bdf = step.prepare(data).bake(data)\n\n assert 'name' in bdf.columns\n assert 'released' in bdf.columns\n assert 'episodes' not in bdf.columns", "def preprocess_data_pandas(raw_data_file: str, features_file: str, cols_to_save: List[str]) -> None:\n\n df = pd.read_csv(raw_data_file)\n\n df.sort_values(by=[\"id\", \"loan_date\"], inplace=True)\n df.reset_index(drop=True, inplace=True)\n\n df[\"loan_date\"] = pd.to_datetime(df['loan_date'], errors='coerce')\n df[\"birthday\"] = pd.to_datetime(df['birthday'], errors='coerce')\n df[\"job_start_date\"] = pd.to_datetime(df['job_start_date'], errors='coerce')\n\n df_grouped_by_id = df.groupby('id')\n\n # Feature nb_previous_loans\n df[\"nb_previous_loans\"] = df_grouped_by_id[\"loan_date\"].rank(method=\"first\") - 1\n\n # Feature avg_amount_loans_previous\n df[\"avg_amount_loans_previous\"] = df_grouped_by_id[\"loan_amount\"].transform(lambda x: x.expanding().mean())\n\n # Feature age\n df['age'] = (pd.to_datetime('today').normalize() - df['birthday']).dt.days // 365\n\n # Feature years_on_the_job\n df['years_on_the_job'] = (pd.to_datetime('today').normalize() - df['job_start_date']).dt.days // 365\n\n # Feature flag_own_car\n df['flag_own_car'] = df.flag_own_car.apply(lambda x: 0 if x == 'N' else 1)\n\n df = df[cols_to_save]\n df.to_csv(features_file, index=False)", "def delete_sample(self, rows):\n if not isinstance(rows, (list, tuple)):\n rows = [rows]\n for v in self.list:\n v.value = np.delete(v.value, rows, axis=0)", "def remove_ei(remove_fields: np.ndarray, remove_values: np.ndarray):\n remove_fields = remove_fields[2:10]\n remove_values = remove_values[:, 2:10]\n return remove_fields, remove_values", "def cleaning(df, file=\"proteinGroups\"):\r\n columns = df.columns\r\n if file == \"proteinGroups\":\r\n if (\"Potential contaminant\" not in columns) or\\\r\n (\"Reverse\" not in columns) or\\\r\n (\"Only identified by site\" not in columns):\r\n print(\"Is this data already cleaned?\\nMandatory columns for cleaning not present in data!\")\r\n print(\"Returning provided dataframe!\")\r\n return df\r\n df = df[(df['Potential contaminant'].isnull()) &\r\n (df['Reverse'].isnull()) &\r\n (df['Only identified by site'].isnull())]\r\n df.drop(['Potential contaminant',\"Reverse\", 'Only identified by site'], axis=1, inplace=True)\r\n elif (file == \"Phospho (STY)\") or (file == \"evidence\") or (file == \"modificationSpecificPeptides\"):\r\n if (\"Potential contaminant\" not in columns) or\\\r\n (\"Reverse\" not in columns):\r\n print(\"Is this data already cleaned?\\nMandatory columns for cleaning not present in data!\")\r\n print(\"Returning provided dataframe!\")\r\n return df\r\n df = df[(df['Potential contaminant'].isnull()) &\r\n (df['Reverse'].isnull())]\r\n df.drop(['Potential contaminant',\"Reverse\"], axis=1, inplace=True)\r\n return df", "def extract_description_features(df, desc_col_name, remove_var=False):\n df['isAcctNo'] = df[desc_col_name].str.contains('$ACCT_NO', regex=False).astype('int')\n df['isForeignCurr'] = df[desc_col_name].str.contains('$CURRENCY', regex=False).astype('int')\n # df['isForeignCountry'] = df[desc_col_name].str.contains('$FOREIGN_COUNTRY', regex=False).astype('int')\n\n if remove_var:\n regex = '\\$ACCT_NO|\\$CURRENCY|\\$FOREIGN_COUNTRY'\n df[desc_col_name] = df[desc_col_name].str.replace(regex, '', regex=True)\n return df", "def clean_dataframe(df, column_list, length):\n list_of_actual_columns = list(dataframe.columns.values)\n for each_column in column_list:\n if each_column not in list_of_actual_columns:\n return False\n if number_of_rows < 1000:\n return False\n return True\n\n category_value_counts = df['Type'].value_counts()\n category_value_counts.to_csv('category_value_counts')\n cdf = pd.read_csv('category_value_counts')\n df = df.merge(cdf, left_on='Type', right_on='OldCategory')\n df['Datetime'] = pd.to_datetime(df['Datetime'])\n df[(df['Datetime'] > '2019-12-31')]\n df.sort_values(by='Datetime', ascending=False)", "def filter_cols(df):\n comm_keys = list( set(df.keys()) & set(KEYS_FOR_ML) )\n filt_col_df = df.copy()[comm_keys]\n\n return filt_col_df", "def drop_corr_columns(df, drop_columns=True, print_columns=True, threshold=0.98):\n\n # 1. calculation\n CorrCoeff = df.corr()\n\n # 2. report\n CorrFieldsList = []\n print('Columns with correlations more than %s :' % str(threshold))\n for i in CorrCoeff:\n for j in CorrCoeff.index[CorrCoeff[i] >= threshold]:\n if i != j and j not in CorrFieldsList:\n CorrFieldsList.append(j)\n if print_columns:\n print(\"%s-->%s: r^2=%f\" % (i, j, CorrCoeff[i][CorrCoeff.index == j].values[0]))\n #print()\n #print('Correlated columns count: %', len(CorrFieldsList))\n\n # 3. dropping\n if drop_columns:\n print('%s columns total' % df.shape[1])\n df = df.drop(CorrFieldsList, 1)\n print('%s columns left' % df.shape[1])\n\n return df" ]
[ "0.6483683", "0.61852366", "0.61701894", "0.61167115", "0.5998328", "0.5879889", "0.5828969", "0.57773757", "0.57595223", "0.56772375", "0.55653924", "0.55105036", "0.5499361", "0.5488699", "0.5475562", "0.54356617", "0.5433665", "0.5431893", "0.54176056", "0.5401929", "0.53749037", "0.5363736", "0.53616303", "0.53577864", "0.53537077", "0.53521913", "0.5339517", "0.5323909", "0.53152", "0.52870375", "0.5284767", "0.5274541", "0.52718765", "0.52609867", "0.52483827", "0.52398044", "0.52378714", "0.52302945", "0.5221416", "0.5220149", "0.51995605", "0.5199277", "0.5191736", "0.5190914", "0.51885104", "0.51720655", "0.5167367", "0.51417947", "0.5137446", "0.51353604", "0.5131985", "0.5109337", "0.51070374", "0.51052034", "0.5099721", "0.509873", "0.50942564", "0.50840557", "0.5078604", "0.5078604", "0.5078282", "0.5045384", "0.5043412", "0.504209", "0.5038617", "0.50375414", "0.5036936", "0.50207955", "0.5014739", "0.5008082", "0.5007716", "0.49895945", "0.4985515", "0.4966026", "0.4956431", "0.49496943", "0.49445844", "0.49334073", "0.49185988", "0.49120873", "0.4908382", "0.49075893", "0.49065176", "0.49051175", "0.49048913", "0.49042255", "0.49034318", "0.49029437", "0.4886102", "0.4878605", "0.48649585", "0.48629633", "0.48590872", "0.48546994", "0.48530212", "0.48491755", "0.48462293", "0.48448962", "0.48434973", "0.48426166" ]
0.7785573
0
Process df_invoice_line.Description with NLTK package.
def feature_description_nlp(self): #------------------------------------------------------------------------- # Returned dataframe is aggregated with weights from self.vectorizer #------------------------------------------------------------------------- list_no_words=['SET','PACK'] self.df_invoice_line, vectorizer, matrix_weights \ = p5_util.nlp_process(self.df_invoice_line,'Description'\ , vectorizer=self.vectorizer, list_no_words=list_no_words) #------------------------------------------------------------------------- # Each vectorized column 'x' is renamed w_nlp_i #------------------------------------------------------------------------- dict_matching_name = dict() for col in self.df_invoice_line.columns: if str(col).isdigit() is True: new_col_name = "w_nlp_"+str(col) dict_matching_name[col] = new_col_name self.df_invoice_line.rename(columns=dict_matching_name,inplace=True) #------------------------------------------------------------------------- # Description is droped from columns #------------------------------------------------------------------------- del(self.df_invoice_line['Description'])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def data_process_deprecated(self, CustomerID, InvoiceDate, InvoiceNo, Description, Quantity\\\n , UnitPrice ):\n dict_invoice = {'InvoiceDate':InvoiceDate, 'Description':Description\\\n , 'Quantity':Quantity, 'UnitPrice':UnitPrice}\n dict_invoice['CustomerID'] = CustomerID\n dict_invoice['InvoiceNo'] = InvoiceNo\n df_invoice_line \\\n = pd.DataFrame(dict_invoice, columns=dict_invoice.keys(), index=[0])\n \n self.data_transform(df_invoice_line)\n\n #self.feature_rfm_encode()\n\n self.feature_scale()\n\n self.list_feature_drop()\n\n self.feature_description_nlp()\n return", "def treat_new_line(self,text):\n text=text.replace('.\\n','. ')\n text=re.sub(r'(\\n\\s*)+\\n+', '\\n\\n',text )\n \n lw=text.split('\\n\\n')\n lw=[c for c in lw if c.replace(' ','')!='']\n \n for i in range(1,len(lw)):\n try:\n\n el=lw[i]\n if len(el)>=1:\n try:\n first_w=el.split()[0]\n except:\n first_w=el\n first_l=first_w[0]\n if first_l.isupper() :\n if len(lw[i-1])>0 and lw[i-1].replace(' ','') !='':\n if lw[i-1].replace(' ','')[-1] not in [\":\",'.',\"-\",'/',\"'\",\";\"]:\n prec=lw[i-1].split(\".\")[-1]\n merge=(prec+' '+lw[i]).split()\n dic=dict(nltk.tag.pos_tag(merge))\n proper_noun=dic[first_w]=='NNP'\n if not proper_noun:\n if not \".\" in lw[i-1]:\n lw[i-1]=lw[i-1]+\".\\n\\n \"\n else:\n lw[i-1]=lw[i-1][:-1]+\".\\n\\n \"\n else:\n lw[i-1]+=' '\n\n\n elif first_l.islower():\n if len(lw[i-1])>0 and lw[i-1][-1].replace(' ','')!='':\n\n if lw[i-1][-1].replace(' ','')[-1]!='-':\n lw[i-1]+=\"\"\n else:\n\n ltemp_prev=lw[i-1].split(' ')\n ltemp_next=lw[i].split(' ')\n motprev=ltemp_prev[-1][:-1]\n motnext=lw[i].split(' ')[0]\n if len((motprev+' '+motnext).split())==2:\n\n if self.english_voc.check(motprev) and self.english_voc.check(motnext) and not self.english_voc.check(\"\".join([motprev,motnext])) :\n newmot=\" \".join([motprev,motnext])\n else:\n newmot=\"\".join([motprev,motnext])\n ltemp_prev[-1]=newmot\n ltemp_next[0]=\"\"\n lw[i-1]=\" \".join(ltemp_prev)\n lw[i]=\" \".join(ltemp_next)\n else:\n lw[i-1]+=\"\\n\\n\"\n \n except:\n print('Error occurs, the reader may not be suitable for your pdf files')\n \n \n text=\"\".join(lw)\n \n lw=text.split('\\n')\n lw=[c for c in lw if c.replace(' ','')!='']\n for i in range(1,len(lw)):\n try:\n el=lw[i]\n if len(el)>=1:\n try:\n first_w=el.split()[0]\n except:\n first_w=el\n first_l=first_w[0]\n if first_l.isupper() :\n if len(lw[i-1])>0 and lw[i-1].replace(' ','')!='':\n if lw[i-1].replace(' ','')[-1] not in [\":\",'.',\"-\",'/',\"'\",\";\"]:\n prec=lw[i-1].split(\".\")[-1]\n merge=(prec+' '+lw[i]).split()\n dic=dict(nltk.tag.pos_tag(merge))\n proper_noun=dic[first_w]=='NNP'\n if not proper_noun:\n if not \".\" in lw[i-1]:\n lw[i-1]=lw[i-1]+\".\\n\\n \"\n else:\n lw[i-1]=lw[i-1][:-1]+\".\\n\\n \"\n else:\n lw[i-1]+=' '\n elif first_l.islower():\n if len(lw[i-1])>0 and lw[i-1].replace(' ','')!='':\n if lw[i-1].replace(' ','')[-1]==\"-\":\n ltemp_prev=lw[i-1].split(' ')\n ltemp_next=lw[i].split(' ')\n motprev=ltemp_prev[-1][:-1]\n motnext=lw[i].split(' ')[0]\n if len((motprev+' '+motnext).split())==2:\n if self.english_voc.check(motprev) and self.english_voc.check(motnext) and not self.english_voc.check(\"\".join([motprev,motnext])) :\n newmot=\" \".join([motprev,motnext])\n else:\n newmot=\"\".join([motprev,motnext])\n ltemp_prev[-1]=newmot\n ltemp_next[0]=\"\"\n lw[i-1]=\" \".join(ltemp_prev)\n lw[i]=\" \".join(ltemp_next)\n\n\n\n else:\n lw[i-1]+=\" \"\n else:\n lw[i-1]+=\" \"\n \n except:\n print('Error occurs, the reader may not be suitable for your pdf files')\n \n text=\"\".join(lw)\n return text", "def process_text(self, text, language):", "def data_transform_nlp(self):\n df_invoice_line = None\n \n is_build_step = False\n\n if self._vectorizer_nlp is None:\n is_build_step = True\n \n list_no_words=['SET','PACK']\n\n df_invoice_line, csr_matrix_weights, self._vectorizer_nlp \\\n = p5_util.nlp_process(self.df_invoice_line\\\n , 'Description' , vectorizer= self._vectorizer_nlp\\\n , list_no_words=list_no_words, is_verbose= self.is_verbose)\n \n if df_invoice_line is None:\n self.strprint(\"***ERROR : NLP process interrupted!\")\n return\n \n \n #-------------------------------------------------------------------------\n # NLP weights are cumulated (sumerized) per customer\n #-------------------------------------------------------------------------\n if csr_matrix_weights is None:\n csr_matrix_weights \\\n = p5_util.object_load('./data/matrix_weights_NLP.dump')\n else:\n pass\n \n self.strprint(\"df_invoice_line : \"+str(df_invoice_line.shape))\n \n self.dbg_df = df_invoice_line.copy()\n \n root_name = 'w_nlp_'\n self._df_w_nlp = p5_util.df_nlp_sum_per_customer(df_invoice_line\\\n , csr_matrix_weights, root_name)\n\n del(csr_matrix_weights)\n \n #-------------------------------------------------------------------------\n # Dimension reduction thanks to PCA\n #------------------------------------------------------------------------- \n self.strprint(\"self._df_w_nlp : \"+str(self._df_w_nlp.shape))\n\n root_name_pca = 'nlp_pca_'\n n_dim = self._nlp_pca_ndim\n \n df_customers_pca_nlp, self._pca_nlp \\\n = p5_util.df_pca_reduce(self._df_w_nlp, n_dim, root_name_pca\\\n , p_is_scale=False, pca=self._pca_nlp)\n \n self.strprint(\"df_customers_pca_nlp : \" +str(df_customers_pca_nlp.shape))\n\n #-------------------------------------------------------------------------\n # Backup of NLP features per customer\n #-------------------------------------------------------------------------\n if is_build_step is True:\n p5_util.object_dump(df_customers_pca_nlp\\\n , self._df_customers_nlp_fileName)\n else:\n self._df_customers_pca_nlp = df_customers_pca_nlp.copy()\n \n return", "def create_NER(self, dataframe):\n\n dataframe['entities'] = dataframe['line']\n entity_dict = {}\n entity_type = {}\n\n for i, val in enumerate(dataframe['entities']):\n e1 = re.findall('<e1>(.*?)</e1>', val)\n e2 = re.findall('<e2>(.*?)</e2>', val)\n entity_dict[i+1] = (str(e1[0]), str(e2[0]))\n doc = nlp(e1[0])\n for ent in doc.ents:\n if ent.label_:\n entity_type[i] = ent.label_\n else:\n entity_type[i] = ('NOT RECOGNIZED')\n \n doc = nlp(e2[0])\n for ent in doc.ents:\n if ent.label_:\n entity_type[i] = entity_type[i] + ent.label_\n else:\n entity_type[i] = entity_type[i] + ('NOT RECOGNIZED')\n\n entity_dataframe = self.create_dataframe(entity_dict, ['e1', 'e2'])\n entity_type_df = self.create_dataframe(entity_type, ['e1', 'e2'])\n\n dataframe = dataframe.drop(columns=['entities'])\n dataframe['e1'] = entity_dataframe['e1']\n dataframe['e2'] = entity_dataframe['e2']\n dataframe['e1_type'] = entity_type_df['e1']\n dataframe['e2_type'] = entity_type_df['e2']\n\n return dataframe", "def remove_info(text, journal_id, label, doc_type='inkomst'):\r\n sections = text.split('NEWPAR')\r\n cleaned_text = ''\r\n diagnose_detected = False\r\n for section in sections:\r\n if section:\r\n section_header =list(filter(None, section.split(' ')))[0]\r\n #print(section_header)\r\n if 'diagnose' in section_header.lower() or 'DIAGNOSE' in section or 'Diagnose :' in section or 'Problemstilling :' in section:\r\n diagnose_detected = True\r\n else:\r\n cleaned_text += section + ' '\r\n if not diagnose_detected :\r\n print('No DIAGNOSE in: ', journal_id)\r\n return cleaned_text", "def action_generate_lines_txt(self):\n rp_obj = self.env['res.partner']\n voucher_obj = self.env['account.wh.iva']\n txt_iva_obj = self.env['txt.iva.line']\n vouchers = []\n txt_brw = self.browse(self._ids)[0]\n txt_ids = txt_iva_obj.search([('txt_id', '=', txt_brw.id)])\n if txt_ids:\n txt_ids.unlink()\n\n if txt_brw.type:\n vouchers = voucher_obj.search([\n ('date_ret', '>=', txt_brw.date_start),\n ('date_ret', '<=', txt_brw.date_end),\n ('period_id', '=', txt_brw.period_id.id),\n ('state', '=', 'done'),\n ('type', 'in', ['in_invoice', 'in_refund'])])\n else:\n vouchers = voucher_obj.search([\n ('date_ret', '>=', txt_brw.date_start),\n ('date_ret', '<=', txt_brw.date_end),\n ('period_id', '=', txt_brw.period_id.id),\n ('state', '=', 'done'),\n ('type', 'in', ['out_invoice', 'out_refund'])])\n\n for voucher in vouchers:\n acc_part_id = rp_obj._find_accounting_partner(voucher.partner_id)\n for voucher_lines in voucher.wh_lines:\n if voucher_lines.invoice_id.state not in ['open', 'paid']:\n continue\n for voucher_tax_line in voucher_lines.tax_line:\n txt_iva_obj.create(\n {'partner_id': acc_part_id.id,\n 'voucher_id': voucher.id,\n 'invoice_id': voucher_lines.invoice_id.id,\n 'txt_id': txt_brw.id,\n 'untaxed': voucher_tax_line.base,\n 'amount_withheld': voucher_tax_line.amount_ret,\n 'tax_wh_iva_id': voucher_tax_line.id,\n })\n return True", "def prepare_text_data(descriptions):\n text_data = []\n for line in descriptions:\n tokens = prepare_text_for_lda(line)\n text_data.append(tokens)\n return text_data", "def data_transform(self, df) :\n\n #-------------------------------------------------------------------------\n # Copy of given dataframe to be transformed\n #-------------------------------------------------------------------------\n self.df_invoice_line = df\n \n #-------------------------------------------------------------------------\n # Features issued from InvoiceDate are created\n #-------------------------------------------------------------------------\n if self.is_transform_timeFeature is True:\n self.strprint(\"\\n*** Time features transformation ***\")\n self.data_transform_timeFeature()\n\n #-------------------------------------------------------------------------\n # RFM is computed and encoded\n #-------------------------------------------------------------------------\n if self.is_transform_rfm is True:\n self.strprint(\"\\n*** RFM transformation ***\")\n self.data_transform_rfm()\n\n #-------------------------------------------------------------------------\n # NLP features issued from Description are created\n #-------------------------------------------------------------------------\n if self.is_transform_nlp is True:\n self.strprint(\"\\n*** NLP transformation ***\")\n self.data_transform_nlp()\n \n return self.df_invoice_line", "def _prepare_invoice(self, cr, uid, order, lines, context=None):\n if context is None:\n context = {}\n journal_ids = self.pool.get('account.journal').search(cr, uid,\n [('type', '=', 'sale'), ('company_id', '=', order.company_id.id)],\n limit=1)\n if not journal_ids:\n raise osv.except_osv(_('Error !'),\n _('There is no sales journal defined for this company: \"%s\" (id:%d)') % (order.company_id.name, order.company_id.id))\n\n invoice_vals = {\n 'name': order.client_order_ref or '',\n 'origin': order.name,\n 'type': 'out_invoice',\n 'reference': order.client_order_ref or order.name,\n 'account_id': order.partner_id.property_account_receivable.id,\n 'journal_id': order.partner_id.property_default_sale_invoice_journal.id,\n 'partner_id': order.partner_id.id,\n 'address_invoice_id': order.partner_invoice_id.id,\n #'address_contact_id': order.partner_order_id.id,\n 'invoice_line': [(6, 0, lines)],\n 'currency_id': order.pricelist_id.currency_id.id,\n 'comment': order.note,\n 'payment_term': order.payment_term and order.payment_term.id or False,\n 'fiscal_position': order.fiscal_position.id or order.partner_id.property_account_position.id,\n 'date_invoice': context.get('date_invoice', False),\n 'company_id': order.company_id.id,\n 'user_id': order.user_id and order.user_id.id or False\n }\n\n # Care for deprecated _inv_get() hook - FIXME: to be removed after 6.1\n invoice_vals.update(self._inv_get(cr, uid, order, context=context))\n\n return invoice_vals", "def summary_line_and_description():", "def _prepare_invoice(self, cr, uid, order, lines, context=None):\n if context is None:\n context = {}\n journal_id = self.pool['account.invoice'].default_get(cr, uid, ['journal_id'], context=context)['journal_id']\n if not journal_id:\n raise osv.except_osv(_('Error!'),\n _('Please define sales journal for this company: \"%s\" (id:%d).') % (order.company_id.name, order.company_id.id))\n invoice_vals = {\n 'name': order.client_order_ref or '',\n 'origin': order.name,\n 'type': 'out_invoice',\n 'reference': order.client_order_ref or order.name,\n 'account_id': order.partner_invoice_id.property_account_receivable.id,\n 'partner_id': order.partner_invoice_id.id,\n 'journal_id': journal_id,\n 'invoice_line': [(6, 0, lines)],\n 'currency_id': order.pricelist_id.currency_id.id,\n 'comment': order.note,\n 'payment_term': order.payment_term and order.payment_term.id or False,\n 'fiscal_position': order.fiscal_position.id or order.partner_invoice_id.property_account_position.id,\n 'date_invoice': context.get('date_invoice', False),\n 'company_id': order.company_id.id,\n 'user_id': order.user_id and order.user_id.id or False,\n 'section_id' : order.section_id.id,\n 'test_1' :order.test\n }\n\n # Care for deprecated _inv_get() hook - FIXME: to be removed after 6.1\n invoice_vals.update(self._inv_get(cr, uid, order, context=context))\n return invoice_vals", "def process_line(line):\n [label, text] = line.split('\\t')\n return text.split()", "def label_notes(all_notes_lines):\n# nf = nemo.core.NeuralModuleFactory(backend=nemo.core.Backend.PyTorch, log_dir=None)\n #note_line_queries = notes.split('\\n')\n #note_line_queries = ['pt arrived obtunded not answering questions responding to voice and sternal rub speaking in garbled voice pupils unequal left 3mm and right 2mm brisk bilaterally trauma sicu MD aware currently recieving keppra IV finished dilantin gtt due for level at 08a EEG today LSCTA on 3LNC sats 100 % SBP 90 s to 100 s HR NSR no ectopy 60 s NS @ 75cc continuous +BS no stools rec d lactulose at OSH to recieve PR q4h abd soft non-tender non-distended foley in place draining adequate amt clear yellow urine skin intact left 20G x2 WNL wife Name NI']\n\n# labels_dict = get_vocab(LABELS_DICT)\n# pretrained_bert_model = nemo_nlp.nm.trainables.get_huggingface_model(\n# bert_config=BERT_CONFIG, pretrained_model_name=PRETRAINED_MODEL_NAME\n# )\n\n# tokenizer = nemo.collections.nlp.data.tokenizers.get_tokenizer(\n# tokenizer_name=TOKENIZER,\n# pretrained_model_name=PRETRAINED_MODEL_NAME,\n# tokenizer_model=TOKENIZER_MODEL,\n# )\n# hidden_size = pretrained_bert_model.hidden_size\n\n load_datalayer_begin_time = time.time()\n data_layer = nemo_nlp.nm.data_layers.BertTokenClassificationInferDataLayer(\n queries=all_notes_lines, tokenizer=tokenizer, max_seq_length=MAX_SEQ_LENGTH, batch_size=2000\n )\n load_datalayer_end_time = time.time()\n\n classifier = TokenClassifier(hidden_size=hidden_size, num_classes=len(labels_dict))\n\n input_ids, input_type_ids, input_mask, _, subtokens_mask = data_layer()\n \n load_hidden_states_begin_time = time.time()\n hidden_states = pretrained_bert_model(input_ids=input_ids, token_type_ids=input_type_ids, attention_mask=input_mask)\n load_hidden_states_end_time = time.time()\n load_logits_begin_time = time.time()\n logits = classifier(hidden_states=hidden_states)\n load_logits_end_time = time.time()\n\n ###########################################################################\n\n # Instantiate an optimizer to perform `infer` action\n infer_begin_time = time.time()\n evaluated_tensors = nf.infer(tensors=[logits, subtokens_mask], checkpoint_dir=CHECKPOINT_DIR)\n infer_end_time = time.time()\n\n logits, subtokens_mask = [concatenate(tensors) for tensors in evaluated_tensors]\n\n preds = np.argmax(logits, axis=2) \n all_notes_labeled_lines = []\n\n for i, query in enumerate(all_notes_lines):\n logging.info(f'Query: {query}')\n\n pred = preds[i][subtokens_mask[i] > 0.5]\n words = query.strip().split()\n\n #replaced with logic below instead of raising an error:\n '''\n if len(pred) != len(words):\n logging.info('Preds length: ' + str(len(preds[i])))\n logging.info('subtokens_mask length: ' + str(len(subtokens_mask[i])))\n logging.info('Pred length: ' + str(len(pred)))\n logging.info('words length: ' + str(len(words)))\n logging.info('Preds: ' + str(preds.tolist()))\n logging.info('subtokens_mask: ' + str(subtokens_mask[i]))\n logging.info('Pred:' + str(pred.tolist()))\n logging.info('words:' + str(words))\n\n labeled_note = '__Prediction/Word Mismatch__ pred length: ' + str(len(pred)) + ', words length: ' + str(len(words))\n break\n #raise ValueError('Pred and words must be of the same length')\n \n output = ''\n for j, w in enumerate(words):\n output += w\n label = labels_dict[pred[j]]\n if label != NONE_LABEL:\n label = add_brackets(label)\n output += label\n output += ' '\n labeled_note += '\\n' + output.strip()\n logging.info(f'Combined: {output.strip()}')\n\n '''\n\n if len(pred) == len(words):\n output = ''\n for j, w in enumerate(words):\n output += w\n label = labels_dict[pred[j]]\n if label != NONE_LABEL:\n label = add_brackets(label)\n output += label\n output += ' '\n all_notes_labeled_lines.append(output.strip())\n logging.info(f'Combined: {output.strip()}')\n else:\n all_notes_labeled_lines.append(query)\n pred_length = str(len(pred))\n word_length = str(len(words))\n logging.info(f'__Prediction/Word Length Mismatch__ pred length: {pred_length}, words length: {word_length}')\n logging.info(f'{query}')\n \n\n print(str(load_datalayer_end_time-load_datalayer_begin_time)+' seconds to load the datalayer')\n print(str(load_hidden_states_end_time-load_hidden_states_begin_time)+' seconds to load hidden states')\n print(str(load_logits_end_time-load_logits_begin_time)+' seconds to load logits')\n print(str(infer_end_time-infer_begin_time)+' seconds to run inference')\n\n return all_notes_labeled_lines", "def nlp(self, text):\n # Runs the NLP model on the input.\n doc = self.nlp_model(text)\n\n to = []\n when = []\n body = []\n\n # Group the labels into variables.\n for token in doc:\n if token.dep_ == \"TO\":\n to.append(token.text)\n elif token.dep_ == \"WHEN\":\n when.append(token.text)\n elif token.dep_ == \"BODY\":\n body.append(token.text)\n log.debug(\"%s %s\", token.text, token.dep_)\n\n # Get the time entity from the NLP model.\n time = datetime.now()\n if len(when) == 0:\n time = time + timedelta(seconds=5)\n else:\n time = tc.parse_time(when)\n\n _body = \" \".join(body)\n\n return (to, time, _body)", "def ie_preprocess(document):\n sentences = nltk.sent_tokenize(document) #NLTK default sentence segmenter\n #print sentences # sentences are segmented\n sentences = [nltk.word_tokenize(sent) for sent in sentences] # NLTK word tokenizer \n #print sentences # sentences are tokenized\n sentences = [nltk.pos_tag(sent) for sent in sentences] # NLTK POS tagger \n #print sentences # sentences are POS tagged\n return sentences", "def convert_chn_text(detail=True):\n p = {\n \"data_path\": \"../data/data_literature\",\n \"output_dir\": \"../data/converted_data\"\n }\n if detail:\n gen_params_info(p)\n\n os.system(\"rm -rf %s\" % p[\"output_dir\"])\n os.system(\"mkdir -p %s\" % p[\"output_dir\"])\n files = os.listdir(p[\"data_path\"])\n for file_name in files:\n if detail:\n print(\"to process %s\" % file_name)\n file_path = \"%s/%s\" % (p[\"data_path\"], file_name)\n out_file_path = \"%s/%s\" % (p[\"output_dir\"], file_name)\n fh_in = codecs.open(filename=file_path, mode=\"r\", encoding='utf8')\n fh_out = codecs.open(filename=out_file_path, mode=\"w\", encoding='utf8')\n line_idx = 1\n verb = \"\"\n for line in fh_in:\n line = line.lstrip()\n if line.find(\"\\t\") < 0:\n print(\"Please check in file %s, line: %s\\nsentence :%s\\n\"\\\n \"The above sentence has NO TAB and has been skiped!\" \\\n % (file_name, line_idx, line))\n continue\n items = line.split(\"\\t\")\n if len(items) != 4:\n print(\"Please check in file %s, line: %s\\nsentence :%s\\n\"\\\n \"The above sentence has NO 4 TAB and has been skiped!\" \\\n % (file_name, line_idx, line))\n continue\n frame_id = items[0]\n if frame_id.find(\".\") >= 0:\n frame_id = frame_id.split(\".\")[0]\n verb = items[2].strip()\n left_sent = items[1].strip()\n right_sent = items[3].strip()\n out_line = \"%s\\t%s\\t%s\\t%s\"\\\n % (frame_id, left_sent, verb, right_sent)\n print(out_line, file=fh_out)\n\n line_idx += 1\n\n fh_in.close()\n fh_out.close()", "def extract_features_temporal(self, text, expDateStr = None, onsetDateStr = None, refExpDateStr = None, textType='vaers'):\n \n featurelist = []\n \n sentences = util.sentence_tokenize(text)\n taggedSentences = [] \n# id = 0\n for sentnumber, sentence0 in enumerate(sentences):\n \n sentence = self.clean_text(sentence0)\n \n # tokenize each sentence to have a list of words to be processed\n tokens = nltk.word_tokenize(sentence)\n #run the above procedure\n sentence_to_parse = self.get_untagged(tokens)\n \n # Save tagged sentences for later computing of expose date\n taggedSentences.append(sentence_to_parse)\n \n #only if the cleaned sentence is NOT empty we parse it\n if sentence_to_parse!=[]:\n tree = self.cp.parse(sentence_to_parse)\n tree1 = self.cp1.parse(sentence_to_parse)\n \n# new_sentence_to_parse = ','.join([' '.join(nltk.tag.untag(subtree.leaves())) + ' ' for subtree in tree.subtrees() if subtree.node in self.st_filter])\n new_sentence_to_parse = ','.join([' '.join(nltk.tag.untag(subtree.leaves())) + ' ' for subtree in tree.subtrees() if subtree.label() in self.st_filter])\n\n #here we delete the dash and replace it with whitespace to convert post-vac to post vac\n new_sentence_to_parse = new_sentence_to_parse.replace(', ,', ',')\n #here we delete the dash and replace it with whitespace to convert post-vac to post vac\n new_sentence_to_parse = new_sentence_to_parse.replace(',', ', ')\n\n new_sentence_to_parse = nltk.word_tokenize(new_sentence_to_parse)\n\n #run the above procedure\n new_sentence_to_parse = self.get_untagged(new_sentence_to_parse)\n \n if new_sentence_to_parse!=[]:\n tree2 = self.cp.parse(new_sentence_to_parse)\n for subtree in tree2.subtrees():\n if subtree.label() in self.st_filter: \n featString = self.massage_features(subtree)\n featurelist.append((subtree.label(), featString, sentnumber, subtree.leaves()))\n \n for subtree in tree1.subtrees():\n if subtree.label() in self.labels_gram1:\n featString = self.massage_features(subtree)\n featurelist.append((subtree.label(), featString, sentnumber, subtree.leaves()))\n\n self.initialization_text_data(text, sentences, taggedSentences, textType)\n \n featObjList = self.initialize_feature_obj_list(featurelist)\n \n docFeature = self.extract_temporal_info(featObjList, expDateStr, onsetDateStr, refExpDateStr)\n \n return docFeature", "def preprocess_document(raw,sentence_level):\r\n\r\n\t# raw = raw.decode(\"utf-8\")\r\n\t# raw = raw.encode(\"ascii\",\"ignore\")\r\n\t\r\n\tfrom nltk.tokenize.punkt import PunktSentenceTokenizer, PunktParameters\r\n\tparam = PunktParameters()\r\n\ttokenizer = PunktSentenceTokenizer(param)\r\n\tif sentence_level:\r\n\t\tsentences = tokenizer.tokenize(raw)\r\n\t\tsentences_words = list()\r\n\t\tfor s in sentences:\r\n\t\t\tsentences_words.append((s.strip(),preprocess_sentence(s)))\r\n\t\treturn sentences_words\r\n\telse:\r\n\t\treturn [(raw.strip(),preprocess_sentence(raw))]", "def generate_txt(self):\n txt_string = ''\n rp_obj = self.env['res.partner']\n for txt in self:\n vat = rp_obj._find_accounting_partner(\n txt.company_id.partner_id).vat[2:]\n vat = vat\n for txt_line in txt.txt_ids:\n vendor, buyer = self.get_buyer_vendor(txt, txt_line)\n period = txt.period_id.name.split('/')\n period2 = period[0] + period[1]\n # TODO: use the start date of the period to get the period2\n # with the 'YYYYmm'\n operation_type = ('V' if txt_line.invoice_id.type in\n ['out_invoice', 'out_refund'] else 'C')\n document_type = self.get_type_document(txt_line)\n document_number = self.get_document_number(\n txt_line, 'inv_number')\n control_number = self.get_number(\n txt_line.invoice_id.nro_ctrl, 'inv_ctrl', 20)\n document_affected = self.get_document_affected(txt_line)\n voucher_number = self.get_number(\n txt_line.voucher_id.number, 'vou_number', 14)\n amount_exempt, amount_untaxed = \\\n self.get_amount_exempt_document(txt_line)\n amount_untaxed = amount_untaxed\n alicuota = self.get_alicuota(txt_line)\n amount_total, amount_exempt = self.get_amount_line(\n txt_line, amount_exempt)\n\n txt_string = (\n txt_string + buyer + '\\t' + period2.strip() + '\\t' +\n txt_line.invoice_id.date_invoice + '\\t' + operation_type +\n '\\t' + document_type + '\\t' + vendor + '\\t' +\n document_number + '\\t' + control_number + '\\t' +\n str(round(amount_total, 2)) + '\\t' +\n str(round(txt_line.untaxed, 2)) + '\\t' +\n str(round(txt_line.amount_withheld, 2)) + '\\t' +\n document_affected + '\\t' + voucher_number + '\\t' +\n str(round(amount_exempt, 2)) + '\\t' + str(alicuota) +\n '\\t' + '0' + '\\n')\n return txt_string", "def extract_data(file_ner,file_pos,separator=\" \"):\n\n # read NER and POS from the two files\n words_tags=read_conll_file(file_ner)\n words_pos=read_conll_file(file_pos)\n \n ## some checks, e.g., that both files have same length, same tokens\n assert(len(words_tags)==len(words_pos))\n \n for (words,tags),(_,pos) in zip(words_tags,words_pos):\n for word,pos,tag in zip(words,pos,tags):\n # first letter is capitalized\n cap=\"+\" if word[0].isupper() else \"-\"\n hyphen = '+' if '-' in word else '-'\n l = str(len(word))\n #vowels = \"\".join(sorted([w for w in word.lower() if w in ['a','e','i','o','u','y']]))\n #################################\n ###### YOUR FEATURES HERE ####### \n #################################\n # 0=separator\n \n ## todo: output the cap feature and more \n ## make sure the format you output here is what the nerfeats.py script expects as fields!\n print separator.join([word.lower(),pos,cap, l, hyphen, tag])\n # sentence separator\n print \"\"", "def post_process_text(self, text):\n\t\treturn text", "def process_input(fname,onlynugget,onlyarg):\n content=utils.readFileEncode(fname,'utf8')\n lines = content.split('\\n')[:-1]\n sentences=[]\n labels=[]\n sent=[]\n label=[]\n for i in range(len(lines)):\n if len(lines[i])>3:\n words=lines[i].split('\\t')\n word={'originalText':words[0],'offset':int(words[1])}\n sent.append(word)\n if onlynugget:\n if words[2] in NuggetList10:\n label.append(words[2]) \n else:\n label.append('O')\n elif onlyarg:\n if words[2] in ArgumentList:\n\n if 'Software' in words[2]:\n label.append(words[2][0:2]+'System')\n else:\n label.append(words[2])\n else:\n label.append('O')\n else:\n if len(sent)>0 and len(label)>0: \n sentences.append(sent)\n labels.append(label) \n sent=[]\n label=[]\n elif len(sent)==0 and i < len(lines)-1:\n sentences.append([])\n labels.append([])\n \n return sentences,labels", "def label_paragraphs(root_el, fastcase_data):\n # case metadata\n citations = [alphanum_lower(\" \".join((c[\"Volume\"], c[\"Reporter\"], c[\"Page\"]) + ((c[\"Suffix\"],) if \"Suffix\" in c else ()))) for c in fastcase_data['Citations']]\n name_clean = alphanum_lower(fastcase_data['PartyHeader']) if fastcase_data['PartyHeader'] else None\n court_clean = alphanum_lower(fastcase_data['CourtName'] or fastcase_data['CourtAbbreviation'])\n docket_numbers_clean = [alphanum_lower(d) for d in fastcase_data['DocketNumbers']]\n\n # via https://github.com/harvard-lil/CaselawAccessProjectSchemas/blob/master/casebodyxml/v1/casebodyxml.xsd\n states = {k:i for i, k in enumerate([None, \"citation\", \"parties\", \"docketnumber\", \"court\", \"otherdate\", \"decisiondate\", \"history\", \"syllabus\", \"attorneys\", \"judges\", \"disposition\", \"_opinionstart\", \"_preauthor\", \"author\", \"opinion\"])}\n reverse_states = {v:k for k, v in states.items()}\n\n state = 0\n header_els = []\n opinions = [[]]\n header_complete = False\n extra_els = []\n blank_els = []\n authors = []\n opinion_starts = []\n paragraph_id = 1\n\n def shift_to_opinion(i):\n \"\"\"Move i elements from the end of header to the start of opinion.\"\"\"\n if not i:\n return\n nonlocal header_els\n opinions[0][0:0] = header_els[-i:]\n header_els = header_els[:-i]\n\n def add_el(el, state, target_list=header_els):\n nonlocal blank_els, paragraph_id\n if state:\n if not reverse_states[state].startswith('_'):\n el.attrib['class'] = reverse_states[state]\n if state == states['_opinionstart']:\n opinion_starts.append((len(target_list), el))\n elif state == states['author']:\n authors.append((len(target_list), el))\n blank_els = []\n else:\n blank_els.append(el)\n el.attrib['id'] = f'p-{paragraph_id}'\n paragraph_id += 1\n target_list.append(el)\n\n def append_to_previous(line):\n PyQuery(header_els[-1]).append(PyQuery(line))\n\n for el_pq in PyQuery(root_el)('root').children().items():\n\n if extra_els:\n extra_els.append(el_pq)\n el_pq = extra_els.pop(0)\n\n el = el_pq[0]\n\n # mark the end of the labeled front matter (which may or may not align with actual end)\n if el.tag == 'header-end':\n header_complete = True\n if state == states[\"author\"]:\n state = states[\"opinion\"]\n continue\n\n # skip\n if el.text == \"COPYRIGHT MATERIAL OMITTED\":\n continue\n\n # add linebreak after element for indentation\n if not (el.tail and el.tail.startswith('\\n')):\n el.tail = '\\n' + (el.tail or '')\n\n line = inner_html(el)\n line_text = strip_tags(line)\n line_text_lower = line_text.lower()\n line_alphanum_chars = alphanum_lower(line_text)\n\n # if we've had 5 regular paragraphs in a row, assume we missed the start of the opinion\n if state < states[\"opinion\"] and len(blank_els) >= 5:\n shift_to_opinion(len(blank_els))\n state = states[\"opinion\"]\n\n # we have now reached the opinion and no longer have to process header lines\n if state >= states[\"opinion\"]:\n # check short lines for the start of a concurrence or dissent\n m = new_opinion_re.match(line_text)\n if m:\n el.attrib['class'] = 'author'\n el.attrib['opinion-type'] = opinion_type_lookup[m[1].lower()]\n opinions.append([])\n\n add_el(el, 0, opinions[-1])\n continue\n\n # citation\n if state <= states[\"citation\"]:\n if any(c in line_alphanum_chars for c in citations) or all(citation_like_re.match(s) for s in line.split('<br>')):\n state = states[\"citation\"]\n continue # don't include citation lines in output\n\n # parties\n if state < states[\"parties\"]:\n # special case -- if the case doesn't have a name, like NE2d/939/939ne2d586.xml,\n # assume that whatever comes after the last citation is the name\n if name_clean is None or line_alphanum_chars == name_clean:\n state = states[\"parties\"]\n add_el(el, state)\n elif header_els and name_clean == alphanum_lower(inner_html(header_els[-1]) + line):\n # handle edge case where name is split across two paragraphs\n append_to_previous(line)\n elif line_alphanum_chars.startswith(name_clean) or similar_strings(line_text, fastcase_data['PartyHeader']):\n # special cases -- NW2d/881/881 N.W.2d 813-4_Replace.xml, NW2d/792/792NW2d203.xml\n state = states[\"parties\"]\n add_el(el, state)\n else:\n # if we haven't found a valid name yet, paragraphs are just regular paragraphs\n add_el(el, 0)\n continue\n\n # docket numbers or court\n if state < states[\"court\"]:\n # detect 'Supreme Judicial Court of Massachusetts.' and 'United States Bankruptcy Appellate Panel of the Ninth Circuit.' as a court, but not\n # 'Court of Appeals Case No. 04A03-1707-IF-1724' or 'Consol. Court No. 16-00054'\n # line may be 'Court of Appeals of Virginia, Chesapeake.' if court is 'Court of Appeals of Virginia'\n # line may be 'North Carolina Court of Appeals.' if court is 'Court of Appeals of North Carolina'\n # if 'court' in line.lower() or 'panel' in line.lower()) and ('No.' not in line or 'Division No.' in line):\n if any(line_alphanum_chars.startswith(s) for s in docket_numbers_clean):\n state = states[\"docketnumber\"]\n elif line_alphanum_chars.startswith(court_clean) or (\n (line_text.endswith('Court of Appeals.') or any(line_text_lower.startswith(s) for s in ('court of appeal', 'supreme court')))\n ):\n state = states[\"court\"]\n else:\n state = states[\"docketnumber\"]\n add_el(el, state)\n continue\n\n # accidental start of opinion included in head matter\n # NW2d/737/737NW2d768_3New.xml -- \"On order of the Court ...\"\n if state >= states[\"decisiondate\"]:\n if line_text.startswith(\"On order of the Court\"):\n state = states[\"opinion\"]\n add_el(el, 0, opinions[-1])\n continue\n\n # dates\n # 'DATED at Olympia, Washington, this 31st day of October, 2018.'\n # '01-04-2017'\n if state <= states[\"decisiondate\"]:\n # long line isn't decision date -- SCt/134/134sct985_2.xml\n if len(line_text) < 80 and (date_re.search(line_text) or line_text_lower.startswith('dated at') or re.match(r'\\d{1,2}-\\d{2}-\\d{4}$', line_text)):\n if any(line_text.startswith(s) for s in ('Released', 'Submitted', 'Dissenting')) and 'Decided' not in line_text:\n # handle case like\n # 'Submitted June 5, 2007, at Lansing.'\n # 'Decided June 12, 2007, at 9:05 a.m.'\n # 'Released for Publication October 11, 2007\n # 'Dissenting Opinion of Chief Justice Maynard June 27, 2008.'\n # avoid\n # 'Submitted March 2, 2010.<br>Decided April 2, 2010.'\n state = states[\"otherdate\"]\n else:\n state = states[\"decisiondate\"]\n add_el(el, state)\n continue\n\n if state < states[\"judges\"]:\n # strip off judges lines appended to current line, and add as an extra_el\n # \"for Respondent.<strong>Justice BEATTY.</strong></p>\" SE2d/708/708se2d750.xml\n # \"... West Virginia Insurance Federation.<strong>DAVIS, Justice:</strong></p>\" SE2d/719/719se2d830.xml\n # \"for appellees.<strong>Present: HUMPHREYS, McCLANAHAN and BEALES, JJ.</strong><strong>BEALES, Judge.</strong>\" SE2d/708/708se2d429.xml\n while True:\n m = re.search('(.+)(<strong>([^<]+)</strong>)$', line)\n if m and is_judges_or_author(m[3]):\n extra_els.insert(0, PyQuery('<p>'+m[2]+'</p>'))\n line = m[1]\n el_pq.html(line)\n line_text = strip_tags(line)\n line_alphanum_chars = alphanum_lower(line_text)\n continue\n break\n\n # history\n # 'Appeal by defendant from judgment entered 8 December 2004 by Judge Robert H. Hobgood in Alamance County Superior Court. Heard in the Court of Appeals 2 November 2005.'\n if line_text_lower.startswith('appeal') or any(s in line_text for s in ('Superior Court', 'District Court', 'Circuit Court')):\n state = states[\"history\"]\n add_el(el, state)\n continue\n\n # syllabus\n if 'Syllabus by the Court' in line_text or (state == states[\"syllabus\"] and re.match(r'\\d+\\.|[a-z\\[]', line_text)):\n if re.match(r'[a-z\\[]', line_text):\n # handle case where syllabus is split midsentence\n append_to_previous(line)\n else:\n state = states[\"syllabus\"]\n add_el(el, state)\n continue\n\n # attorneys\n # 'Garrett D. Blanchfield, Jr., Reinhardt Wendorf & Blanchfield, St. Paul, MN, for Appellants.'\n if any(line_text.startswith(s) for s in (\"An amicus\", \"For the\", \"On behalf of\")) or any(s in line_text for s in (' for ', 'amici curiae', 'pro se')):\n state = states[\"attorneys\"]\n add_el(el, state)\n continue\n\n # titles that mark the start of an opinion, like \"OPINION\"\n if line_alphanum_chars in opinion_start_lines or any(line_alphanum_chars.startswith(s) for s in opinion_start_line_prefixes):\n state = states[\"_opinionstart\"]\n if line_text != \"OPINION\":\n add_el(el, state)\n continue\n\n # Handle paragraph that is definitely followed by author, like \"The opinion of the court was delivered by\", A3d/148/148 A.3d 441_Replace.xml\n if line_text == \"The opinion of the court was delivered by\":\n state = states[\"_preauthor\"]\n add_el(el, 0)\n continue\n if state == states[\"_preauthor\"]:\n add_el(el, states[\"author\"])\n state = states[\"opinion\"]\n continue\n\n # author\n # note, in theory fastcase_data[\"Author\"] could be useful for identifying author paragraph, but it's often not set,\n # and when it is it can also appear in the judges line and other places ...\n judges_or_author = is_judges_or_author(line_text)\n if judges_or_author == \"judges\":\n state = states[\"judges\"]\n add_el(el, state)\n continue\n elif judges_or_author == \"author\":\n add_el(el, states[\"author\"])\n state = states[\"opinion\"] if header_complete else states[\"author\"]\n continue\n\n # weird special case where there's an order provided before the start of the opinion\n # E.g. NW2d/740/740NW2d659_1.xml, 'ORDER ENTERED JUNE 8, 2007' and subsequent unlabeled lines\n if line_text.startswith(\"ORDER ENTERED\") or state == states[\"disposition\"]:\n state = states[\"disposition\"]\n add_el(el, state)\n continue\n\n # regular paragraph\n add_el(el, 0)\n continue\n\n # fixups\n labels = [el.attrib.get('class') for el in header_els]\n # rewrite special case like NE2d/944/944ne2d1119.xml:\n # [['parties', '...'],\n # ['docketnumber', 'Feb. 15'],\n # ['docketnumber', '2011.'],\n # ['court', 'Court of Appeals of New York.']]\n # to\n # [['parties', '...'],\n # ['court', 'Court of Appeals of New York.'],\n # ['decisiondate', 'Feb. 15, 2011.']]\n if labels == [None, 'docketnumber', 'docketnumber', 'court']:\n docket_combined = header_els[1].text + \", \" + header_els[2].text\n if date_re.match(docket_combined):\n header_els[1].attrib['class'] = 'decisiondate'\n header_els[1].text = docket_combined\n header_els = [header_els[0], header_els[3], header_els[1]]\n\n # change all author labels but the last to judges; we likely misdetected one earlier\n for i, el in authors[:-1]:\n el.attrib['class'] = \"judges\"\n\n # if we didn't find an author and the last line is unlabeled, assume that's the author with a typo --\n # e.g. NW2d/753/753NW2d552_1.xml , missing comma\n if header_els and not authors and not opinion_starts and state >= states[\"judges\"] and header_els[-1].attrib.get('class') is None:\n header_els[-1].attrib['class'] = \"author\"\n authors = [(len(header_els)-1, header_els[-1])]\n\n # move author, and any paragraphs after it, to beginning of first opinion\n move_index = opinion_starts[0][0] + 1 if opinion_starts else authors[-1][0] if authors else None\n if move_index is not None:\n shift_to_opinion(len(header_els)-move_index)\n\n return header_els, opinions", "def softm_to_invoice(rechnungsnr):\n from pprint import pprint\n\n if str(rechnungsnr).startswith('RG'):\n rechnungsnr = str(rechnungsnr)[2:]\n rg, orderlines = get_rechnung('RG833645')\n hint = {}\n for attr in 'skontobetrag'.split():\n hint[attr] = rg[attr]\n out = {'hint': hint}\n for attr in '''kundenauftragsnr auftragsnr versandkosten rechnung_steuranteil rechnungsnr\n zu_zahlen'''.split():\n out[attr] = rg[attr]\n\n out['leistungsdatum'] = rg['versand_date']\n out['kundennr'] = rg['kundennr_rechnungsempfaenger']\n out['erfasst_von'] = rg['sachbearbeiternr']\n out['abschlag_prozent'] = rg['auftragsrabatt1p'] + rg['auftragsrabatt2p']\n out['auftragsrabatt'] = rg['auftragsrabatt']\n out['rechungsdatum'] = rg['druck_date']\n rabatttext = ' und '.join([x for x in [rg['rabatttext1'].strip(), rg['rabatttext2'].strip()] if x])\n rabatttext = \"\"\n if rabatttext:\n rabatttext = \"%s: %f\" % (rabatttext, out['abschlag_prozent'])\n elif out['abschlag_prozent']:\n rabatttext = u\"Ab/Zuschläge: %f\" % (out['abschlag_prozent'])\n\n out['infotext_kunde'] = '\\n'.join([rabatttext])\n\n out['orderlines'] = []\n for ol in get_connection().query(['AFU00'], condition=\"FURGNR=%s\" % sql_escape(rechnungsnr)):\n pprint(ol)\n outol = {}\n for attr in '''menge artnr abschlag rechungsbetrag warenwert'''.split(): # zu_zahlen\n outol[attr] = ol[attr]\n out['orderlines'].append(outol)\n\n #line = dict(\n # guid=p.guid,\n # menge=int(p.menge),\n # artnr=p.artnr,\n # #kundenartnr=f3.artnr_kunde,\n # #name=f3.artikelbezeichnung.strip(),\n # infotext_kunde=p.text\n # #einzelpreis=int(abs(f3.verkaufspreis)*100),\n # #warenwert=int(p.wert_netto*100),\n # #zu_zahlen=int(abs(f3.wert_brutto)*100),\n # #abschlag=int(f4.positionsrabatt_gesamt*100)\n # )\n\n #if f3.ean and int(f3.ean):\n # line['ean'] = f3.ean", "def text_preprocessing_pdf(self,p):\n #remover_end_paragraphs=np.vectorize(self.remove_end_paragraphs,otypes=[str])\n cleaner=np.vectorize(self.remove_non_alpha,otypes=[str])\n cut_text=np.vectorize(self.cut_text,otypes=[str])\n cut_text_raw=np.vectorize(self.cut_text_raw,otypes=[str])\n assert len(self.parser)==len(self.parser_raw), \"Length of the treated sentence treated list does not match length of raw text list: {} / {}\".format(len(self.parser),len(self.parser_raw))\n cut_text_raw(p)\n p=cleaner(p)\n cut_text(p)\n return p", "def debian_multiline_description(description):\n return \"\\n \".join(line for line in description.split(\"\\n\") if line.strip() != \"\")", "def _DocSim(self,df,a):\r\n #Obtain the descriptions of the two input courses.\r\n textA = df['description'][a]\r\n #Obtain the document embedding vector for each description.\r\n vectorA = self.DocVecModel.infer_vector([textA], alpha=0.1, min_alpha=0.0001, steps=300)\r\n return vectorA", "def _prepare_invoice_lines(self, exchange_line, order_line):\n invoice_type = {\n 'sale.order.line': {\n 'higher': 'out_invoice', 'lower': 'out_refund',\n 'type': 'sale', 'field': 'exchange_sale_line_id'\n },\n 'purchase.order.line': {\n 'higher': 'in_invoice', 'lower': 'in_refund',\n 'type': 'purchase', 'field': 'exchange_purchase_line_id'\n },\n }\n product = exchange_line.exchange_product_id or exchange_line.product_id\n data = {\n 'invoice_type': False,\n 'values': {\n 'product_id': product.id,\n 'quantity': exchange_line.quantity,\n 'name': 'Exchange for [%s]' % exchange_line.product_id.display_name,\n }\n }\n if exchange_line.exchange_product_id or \\\n exchange_line.price_subtotal > order_line.price_subtotal:\n data['invoice_type'] = invoice_type[order_line._name]['higher']\n elif exchange_line.price_subtotal < order_line.price_subtotal:\n data['invoice_type'] = invoice_type[order_line._name]['lower']\n else:\n return {}\n data[invoice_type[order_line._name]['type']] = order_line.order_id\n data['values'][invoice_type[order_line._name]['field']] = order_line.id\n data['values']['price_unit'] = exchange_line.price_unit\n # TODO i think we should take the different between prices NOT the all price\n # abs(exchange_line.price_unit - order_line.price_unit)\n return data", "def process_text(self):\n prp1 = preprocessor.Preprocess()\n processed_text = prp1.clean_data(self.text)\n self.vec1 = self.vec.transform(pd.Series(processed_text))", "def construct_NLP_model(self, df=None):\n import review_processing as rp\n # get words\n if df is not None:\n nitems = df.shape[0]\n col_names = df.columns.values\n if self.review_col_name not in col_names or \\\n self.sentiment_col_name not in col_names:\n sys.exit('construct_NL_model: The name {0}/{1} cannot be found'.\n format(self.review_col_name, self.sentiment_col_name))\n review_list = df[self.review_col_name].values.tolist()\n meaningful_words = map(self.review_to_meaningful_words,\n review_list)\n # Get training sentiment values\n self.sentiment = df[self.sentiment_col_name].values\n\n else:\n if self.training_file_name is None:\n sys.exit('construct_NLP_model: traning file name does not '\n 'exist')\n else:\n suffix = os.path.splitext(self.training_file_name)[1][1:]\n if suffix == 'csv':\n df = pd.read_csv(self.training_file_name)\n if self.review_col_name not in col_names or \\\n self.sentiment_col_name not in col_names::\n sys.exit('construct_NL_model: The name {0}/{1} cannot '\n ' be found'.format(self.review_col_name,\n self.sentiment_col_name))\n nitems = df.shape[0]\n review_list = df[review_col_name].values.tolist()\n meaningful_words = map(self.review_to_meaningful_words,\n review_list)\n elif suffix == 'json':\n data_dict_list = rp.load_data(self.training_file_name)\n if self.review_col_name not in data_dict_list.keys():\n sys.exit('construct_NL_model: The name {0} cannot be '\n 'found'.format(review_col_name))\n review_list = map(lambda x: x[review_col_name],\n data_dict_list)\n meaningful_words = map(self.review_to_meaningful_words,\n review_list)\n else:\n sys.exit('construct_NLP_model: file type not supported '\n 'yet!')\n\n # Training process of Bag of Worlds\n if self.NLP_model == 'BagofWords':\n print('construct_NLP_model: Creating bag of words...')\n self.vectorizer = CountVectorizer(analyzer='word',\n tokenizer=None,\n preprocessor=None,\n stop_words=None,\n max_features=self.maxfeature)\n self.train_data_features = vectorizer.fit_transform(\n meaningful_words)\n self.train_data_features = train_data_features.toarray()\n\n # vocab = vectorizer.get_feature_names()\n # dist = np.sum(train_data_features, axis=0)\n # for tag, count in zip(vocab, dist):\n # print(count, tag)\n\n else:\n sys.exit('construct_NLP_model: NLP_model type not supported yet!')", "def preprocess(doc_in, doc_out):\n def output(text, doc_id):\n doc_out.write(doc_id + \"\\n\")\n doc_out.write(text.replace(\"\\n\", \" \") + \"\\n\\n\")\n\n def filter_text(t):\n filtered_out = [\"<P>\", \"</P>\"]\n r = t\n for f in filtered_out:\n r = r.replace(f, \" \")\n return r\n\n\n doc_id = None\n reading_text = False\n text = \"\"\n for line in doc_in:\n if(str_text_start in line):\n if(reading_text):\n warning(\"Found \" + str_text_start + \" in text\")\n if(not doc_id):\n warning(\"Reading text without knowing id\")\n continue\n reading_text = True\n continue\n if((str_text_stop in line) and reading_text):\n output(text, doc_id)\n text = \"\"\n reading_text = False\n doc_id = None\n doc_id_match = pat_doc_no.match(line)\n if(doc_id_match):\n doc_id = doc_id_match.group(1)\n if(reading_text):\n warning(\"Found doc id in text\")\n continue\n if(reading_text):\n text = text + filter_text(line)", "def process_text(infile, lower,\n longs, ocr, # lemmatization,\n umlauts, punctuation,\n numbers, stopwords):\n # read file\n text = infile.read() # TODO: read via iterator? ('for line in file') (no nltk.sent_tokenize)\n\n processed_sentences = []\n\n # delete linebreaks\n text = text.replace('¬\\n', '').strip() # remove seperators and merge parts of word\n text = text.replace('-\\n', '')\n text = text.replace('\\n', ' ') # remove linebreak for sentence recognition\n\n # load stopwords\n if not umlauts:\n # take original stopwords\n stop_words = nltk.corpus.stopwords.words('german')\n else:\n # convert umlauts in stopwords to digraphs\n stop_words = [replace_umlauts(word) for word in nltk.corpus.stopwords.words('german')]\n\n \"\"\"\n if lemmatization:\n nlp = spacy.load('de_core_news_sm') # for lemmatization\n else:\n nlp = None\n \"\"\"\n\n # get sentences from text\n sentences = nltk.sent_tokenize(text, language='german')\n\n # process each sentence\n for sentence in sentences:\n if lower:\n sentence = sentence.lower()\n if ocr:\n sentence = replace_ocr_mistakes(sentence)\n if longs:\n sentence = replace_long_s(sentence)\n \"\"\"\n if lemmatization:\n sentence = nlp(sentence)\n sentence = ' '.join([word.lemma_ for word in sentence]) # rechenintensiv!\n \"\"\"\n if umlauts:\n sentence = replace_umlauts(sentence)\n if punctuation:\n sentence = sentence.translate(str.maketrans('', '', string.punctuation))\n sentence = sentence.replace('“', '') # not in string.punctuation\n sentence = sentence.replace('„', '') # not in string.punctuation\n sentence = sentence.replace('—', '') # not in string.punctuation\n if numbers:\n sentence = sentence.translate(str.maketrans('', '', string.digits))\n # TODO: How to handle ²,³, ⁴, ⁵,⁶,⁷,⁸?\n if stopwords:\n words = nltk.word_tokenize(sentence)\n words = [x for x in words if x not in stop_words]\n sentence = ' '.join(words)\n if len(sentence) > 1:\n processed_sentences.append(sentence)\n return processed_sentences", "def extract_features_only(self, text):\n \n featurelist = []\n \n sentences = util.sentence_tokenize(text)\n taggedSentences = [] \n for sentnumber, sentence0 in enumerate(sentences):\n \n sentence = self.clean_text(sentence0)\n \n # tokenize each sentence to have a list of words to be processed\n tokens = nltk.word_tokenize(sentence)\n #run the above procedure\n sentence_to_parse = self.get_untagged(tokens)\n \n # Save tagged sentences for later computing of expose date\n taggedSentences.append(sentence_to_parse)\n \n #only if the cleaned sentence is NOT empty we parse it\n if sentence_to_parse!=[]:\n tree = self.cp.parse(sentence_to_parse)\n tree1 = self.cp1.parse(sentence_to_parse)\n \n# new_sentence_to_parse = ','.join([' '.join(nltk.tag.untag(subtree.leaves())) + ' ' for subtree in tree.subtrees() if subtree.node in self.st_filter])\n new_sentence_to_parse = ','.join([' '.join(nltk.tag.untag(subtree.leaves())) + ' ' for subtree in tree.subtrees() if subtree.label() in self.st_filter])\n\n #here we delete the dash and replace it with whitespace to convert post-vac to post vac\n new_sentence_to_parse = new_sentence_to_parse.replace(', ,', ',')\n #here we delete the dash and replace it with whitespace to convert post-vac to post vac\n new_sentence_to_parse = new_sentence_to_parse.replace(',', ', ')\n\n new_sentence_to_parse = nltk.word_tokenize(new_sentence_to_parse)\n\n #run the above procedure\n new_sentence_to_parse = self.get_untagged(new_sentence_to_parse)\n \n if new_sentence_to_parse!=[]:\n tree2 = self.cp.parse(new_sentence_to_parse)\n for subtree in tree2.subtrees():\n if subtree.label() in self.st_filter: \n featString = self.massage_features(subtree)\n featurelist.append((subtree.label(), featString, sentnumber, subtree.leaves()))\n \n for subtree in tree1.subtrees():\n if subtree.label() in self.labels_gram1:\n featString = self.massage_features(subtree)\n featurelist.append((subtree.label(), featString, sentnumber, subtree.leaves()))\n\n self.sentences = sentences\n \n n = len(sentences)\n locsSentStarts = [-1] * n\n curpt = 0\n for i in range(n):\n pos = text[curpt:].find(sentences[i])\n locsSentStarts[i] = pos + curpt\n curpt = locsSentStarts[i] + len(sentences[i])\n self.sentence_startPos = locsSentStarts\n \n featObjList = self.initialize_feature_obj_list(featurelist)\n \n featList = [(feat.getType(), feat.getStartPos(), feat.getEndPos(), feat.getString()) for feat in featObjList]\n return featList", "def create_NER(dataframe):\n\n dataframe['entities'] = dataframe['line']\n entity_dict = {}\n for i, val in enumerate(dataframe['entities']):\n e1 = re.findall('<e1>(.*?)</e1>', val)\n e2 = re.findall('<e2>(.*?)</e2>', val)\n entity_dict[i+1] = (str(e1[0]), str(e2[0]))\n\n entity_dataframe = create_dataframe(entity_dict, ['e1', 'e2'])\n dataframe = dataframe.drop(columns=['entities'])\n dataframe['e1'] = entity_dataframe['e1']\n dataframe['e2'] = entity_dataframe['e2']\n\n return dataframe", "def parse_and_handle_description_tags(rr, text):\n return parse_description_tags(text).handle_tags(\n TagHandler(rr).tag_handlers).replace('\\n', '<br>').replace('\\r', '')", "def _prepare_inv_line(self, cr, uid, account_id, order_line, context=None):\n result = super(purchase_order, self)._prepare_inv_line(cr, uid, account_id, order_line, context=None)\n result['discount2'] = order_line.discount2 or 0.0\n return result", "def extract_information(preprocessed_sentences):\n parsed = list(map(lambda sentence: nlp(sentence), preprocessed_sentences))\n\n quantities = list(filter(lambda sentence: eh.sentence_has_type(sentence, 'QUANTITY'), parsed))\n dates = list(filter(lambda sentence: eh.sentence_has_type(sentence, 'DATE'), parsed))\n\n hurricane_name = eh.extract_frequent_regex_match(parsed, '[Hh]urricane ([A-Z][a-z]+)').most_common(1)[0][0]\n hurricane_category = eh.extract_frequent_regex_match(parsed, '[Cc]ategory ([0-9]+)').most_common(1)[0][0]\n\n tropical_storm_name = eh.extract_frequent_regex_match(parsed, '[Tt]ropical [Ss]torm ([A-Z][a-z]+)').most_common(1)[0][0]\n formation_date, middle_month = extract_storm_timeline(dates, hurricane_name)\n\n preperation_info = extract_preparation_information(parsed)\n prep_gpes = preperation_info[0].most_common(3)\n\n restore_info = extract_restoration_information(parsed)\n\n landfall_info = extract_landfall_information(parsed)\n\n wind_info = extract_wind_information(quantities)\n rain_info = extract_rain_information(quantities)\n size_info = extract_size_information(parsed)\n\n # formation_info = extract_formation_info(parsed)\n death_info = extract_death_damages_info(parsed)\n\n print(constants.HURRICANE_SENTENCE.format(hurricane_name, middle_month, hurricane_category))\n print(constants.LANDFALL_SENTENCE.format(hurricane_name, landfall_info[2], landfall_info[3], landfall_info[0], landfall_info[1]))\n print(constants.WIND_SENTENCE.format(wind_info[0], wind_info[1], wind_info[2]))\n print(constants.RAIN_SENTENCE.format(hurricane_name, rain_info[1], rain_info[0], rain_info[2]))\n print(constants.FORMATION_SENTENCE.format(formation_date, tropical_storm_name))\n print(constants.PREPARATION_SENTENCE.format(prep_gpes[0][0], prep_gpes[1][0], prep_gpes[2][0], preperation_info[1].\n most_common(1)[0][0]))\n print(constants.SIZE_SENTENCE.format(size_info[0], size_info[1]))", "def _prepare_order_line_invoice_line(self, cr, uid, line, account_id=False, context=None):\n res = super(sale_order_line, self)._prepare_order_line_invoice_line(cr, uid, line, account_id=account_id, context=context)\n \n res.update({'part_number': line.part_number, 'internal_part_number' : line.internal_part_number})\n return res", "def _process_cvterm(self):\n\n line_counter = 0\n raw = '/'.join((self.rawdir, 'cvterm'))\n logger.info(\"processing cvterms\")\n\n with open(raw, 'r') as f:\n f.readline() # read the header row; skip\n filereader = csv.reader(f, delimiter='\\t', quotechar='\\\"')\n for line in filereader:\n line_counter += 1\n\n (cvterm_id, cv_id, definition, dbxref_id, is_obsolete,\n is_relationshiptype, name) = line\n\n # 316 6 1665919 0 0 rRNA_cleavage_snoRNA_primary_transcript\n # 28 5 1663309 0 0 synonym\n # 455 6 1665920 0 0 tmRNA\n\n # not sure the following is necessary\n # cv_prefixes = {\n # 6 : 'SO',\n # 20: 'FBcv',\n # 28: 'GO',\n # 29: 'GO',\n # 30: 'GO',\n # 31: 'FBcv', # not actually FBcv - I think FBbt.\n # 32: 'FBdv',\n # 37: 'GO', # these are relationships\n # 73: 'DOID'\n # }\n\n # if int(cv_id) not in cv_prefixes:\n # continue\n cvterm_key = cvterm_id\n cvterm_id = self._makeInternalIdentifier('cvterm', cvterm_key)\n self.label_hash[cvterm_id] = name\n self.idhash['cvterm'][cvterm_key] = cvterm_id\n # look up the dbxref_id for the cvterm\n # hopefully it's one-to-one\n dbxrefs = self.dbxrefs.get(dbxref_id)\n if dbxrefs is not None:\n if len(dbxrefs) > 1:\n logger.info(\n \">1 dbxref for this cvterm (%s: %s): %s\",\n str(cvterm_id), name, dbxrefs.values())\n elif len(dbxrefs) == 1:\n # replace the cvterm with\n # the dbxref (external) identifier\n did = dbxrefs.popitem()[1]\n # get the value\n self.idhash['cvterm'][cvterm_key] = did\n # also add the label to the dbxref\n self.label_hash[did] = name\n return", "def raw_text(self):\n\t\t\n\t\t #eliminating more headers\n\t\traw_text = re.sub(r\".*OPERATIONS O[PF].*\",r\"\",self.doc)\n\t\traw_text = re.sub(r\"Page \\d+\",r\"\",raw_text)\n\t\traw_text = re.sub(r\".*B[lL]OCK.*\",r\"\",raw_text)\n\t\traw_text = re.sub(r\".*WEST GULF.*\",r\"\",raw_text)\n\t\traw_text = re.sub(r\".*NAVAL FORCES ON.*\",r\"\",raw_text)\n\t\traw_text = re.sub(r\"\\s\",r\" \", raw_text) #eliminating tabs etc. \t \t \n\t\treturn raw_text", "def dissect(self, text):", "def _process_adc(texts, tab_separated_title, leading_labels, titles=[]):\n documents = []\n corpus_labels = set()\n for i, text in enumerate(texts):\n if text:\n title = \"Document\" + str(i + 1) if titles == [] else titles[i]\n features = {\"contentType\": \"Text\", \"sourceFileLine\": str(i)}\n \n if tab_separated_title:\n #example: title \\t start of text\n text = text.split(\"\\t\")\n title = str(text[0])\n text = \"\\t\".join(text[1:])\n \n if leading_labels:\n #example: !LB1 !Lb2 !LBL \\t start of text\n text = text.split(\"\\t\")\n doc_labels=[]\n for label in [f.strip() for f in text[0].split(\"!\") if f != \"\"]:\n features[label] = \"true\"\n corpus_labels.add(label)\n doc_labels.append(label)\n text = \"\".join(text[1:])\n features[\"Labels\"]=json.dumps(doc_labels)\n \n documents.append(Document(name=title,\n features=features,\n text=str(text),\n annotations=[Annotation(span_start=0,\n span_end=max(0, len(str(text)) - 1),\n type=\"TextBlock\",\n features={})]))\n return documents, list(corpus_labels)", "def get_feature_string_by_document(self, _set, document):\n label = self.sets[_set][document]['label']\n line = \"{} \".format(label)\n for word in self.sets[_set][document]['words']:\n line += \"{}:{} \".format(self.dictionary[word]['id'],tfidf.tfidf(word, document, self))\n line += \"\\n\"\n return line", "def _set_additional_po_order_fields(self, invoice):\n\t\tpass", "def tokenize_text(document, nlp):\n\n return [token.text for token in nlp(document)]", "def postprocess(self, text):\r\n return text", "def Process_line(line, summary):\n line = line.lower() # lower all words in the line\n line = line.translate(str.maketrans('','', string.punctuation)) # Remove all punctuation\n words = line.split() # Split word from the line\n add_word(words, summary) # Add word to dictionary", "def get_text_lines(instText):\n\n # Find out which part this is\n part = instText.part\n # Get the necessary parameters: lng, ext, dir\n sLng = part.corpus.get_lng_display()\n sDir = part.dir\n sName = instText.fileName\n sFormat = instText.get_format_display()\n # Now try to get the information\n oBack = get_crpp_text(sLng, sDir, sFormat, sName)\n # Prepare what we return\n if oBack == None or oBack['status'] == 'error':\n return None\n else:\n return oBack", "def fix_description(text):\n separate = text.split()\n joined = ' '.join(list([x.strip('\\\\n') for x in separate]))\n final_joined = ' '.join(joined.split('\\\\n')[::3])\n return final_joined", "def read_po(self, inputfile):\n is_index = False\n lines = inputfile.readlines()\n index = ''\n value = ''\n for line in lines:\n if line.startswith('#'):\n continue\n elif line.startswith('msgid'):\n is_index = True\n self.translations[index] = value\n index = ''\n value = ''\n elif line.startswith('msgstr'):\n is_index = False\n\n v = re.match('.*\"(.*)\".*', line)\n if v:\n if is_index:\n index += ''.join(v.groups())\n else:\n value += ''.join(v.groups())", "def initialization_text_data(self, text, sentences, taggedSentences, rptType='vaers'):\n \n self.text = text\n self.sentences = sentences\n self.reportType = rptType\n \n n = len(sentences)\n locsSentStarts = [-1] * n\n curpt = 0\n for i in range(n):\n pos = text[curpt:].find(sentences[i])\n locsSentStarts[i] = pos + curpt\n curpt = locsSentStarts[i] + len(sentences[i])\n self.sentence_startPos = locsSentStarts\n \n self.taggedSentences = taggedSentences\n self.exposureDate = None\n self.blockout_range = []\n self.clauseZones = []\n \n sent_tags = []\n ##: 'IGNORE' tag breaks the timeline continuity, i.e., stops time impact zone; \n ##: 'SKIP' black out this sentence from time impact zone, and impact zone resumes after this sentence.\n for sentnumber, sentence in enumerate(sentences):\n tags = set([tg[1] for tg in taggedSentences[sentnumber]])\n \n tokens0 = nltk.word_tokenize(sentence.lower()) \n with_who_range = self.extract_standard_summary_pattern(tokens0, sentence)\n if with_who_range:\n r = (with_who_range[0]+self.sentence_startPos[sentnumber], with_who_range[1]+self.sentence_startPos[sentnumber])\n self.blockout_range.append(r)\n \n \n ##: Ignore dates in this sentence since it is about history or family\n if tags.intersection(['History', 'FamilyHistory', 'MedicalHistory']):\n #sent_tags.append('IGNORE')\n sent_tags.append('SKIP')\n continue\n \n ##: tags that breaks time continuity\n if tags.intersection(['Hospitalization']):\n sent_tags.append('IGNORE')\n continue\n \n tokens = set(tokens0)\n ##: Ignore dates in this sentence if it has a 'follow-up'\n if 'follow-up' in tokens or sentence.lower().find('follow up')>=0 or sentence.lower().find('f/u')>=0:\n sent_tags.append('IGNORE')\n continue\n \n ##: Unspecified/unknown date breaks time continuity, except this is a sentence for concomitant, which usually should not stop continuity.\n if tokens.intersection(['unknown', 'unspecified', 'unreported']) and tokens.intersection(['date', 'dates']):\n #if tokens.intersection(['unknown', 'unspecified']) and tokens.intersection(['date', 'dates']) and not tokens.intersection(['concomitant']):\n #unkSet=tokens.intersection(['unknown', 'unspecified', 'unreported'])\n sent_tags.append('IGNORE')\n continue\n \n ##: tokens that breaks time continuity\n if tokens.intersection(self.token_timeline_breakers):\n sent_tags.append('IGNORE')\n continue\n \n sent_tags.append('NORMAL')\n self.sentence_tags = sent_tags", "def real_process(raw):\n\n prod = product.TextProduct(raw)\n pil = prod.afos[:3]\n wfo = prod.source[1:]\n # sigh, can't use originating center for the route\n if (pil == \"OEP\"):\n wfo = prod.afos[3:]\n\n #raw = raw.replace(\"'\", \"\\\\'\")\n sqlraw = raw.replace(\"\\015\\015\\012\", \"\\n\").replace(\"\\000\", \"\").strip()\n\n # FTM sometimes have 'garbage' characters included, get em out\n #if (pil == \"FTM\"):\n # sqlraw = re.sub(\"[^\\n\\ra-zA-Z0-9:\\.,\\s\\$\\*]\", \"\", sqlraw)\n\n # Always insert the product into the text archive database\n product_id = prod.get_product_id()\n sql = \"\"\"INSERT into text_products(product, product_id) values (%s,%s)\"\"\"\n myargs = (sqlraw, product_id)\n if (len(prod.segments) > 0 and prod.segments[0].sbw):\n giswkt = 'SRID=4326;%s' % (MultiPolygon([prod.segments[0].sbw]).wkt,)\n sql = \"\"\"INSERT into text_products(product, product_id, geom) values (%s,%s,%s)\"\"\" \n myargs = (sqlraw, product_id, giswkt)\n deffer = POSTGIS.runOperation(sql, myargs)\n deffer.addErrback( common.email_error, sqlraw)\n myurl = \"%s?pid=%s\" % (config.get('urls', 'product'), product_id)\n\n xtra = {\n \"product_id\": product_id,\n }\n\n # Just send with optional headline to rooms...\n if SIMPLE_PRODUCTS.__contains__(pil):\n xtra['channels'] = wfo\n if pil in NEW_ROUTING:\n xtra['channels'] = prod.afos\n prodtxt = \"(%s)\" % (pil,)\n if reference.prodDefinitions.has_key(pil):\n prodtxt = reference.prodDefinitions[pil]\n\n mess = \"%s: %s issues %s %s\" % (wfo, wfo, prodtxt, myurl)\n htmlmess = \"%s issues <a href=\\\"%s\\\">%s</a> \" % (centertext.get(wfo,wfo), myurl, prodtxt)\n if (not [\"HWO\",\"NOW\",\"ZFP\"].__contains__(pil) and \n len(prod.segments) > 0 and \n len(prod.segments[0].headlines) > 0 and \n len(prod.segments[0].headlines[0]) < 200 ):\n htmlmess += \"... %s ...\" % (prod.segments[0].headlines[0],)\n\n jabber.sendMessage(mess, htmlmess, xtra)\n\n channels = [wfo,]\n if pil in NEW_ROUTING:\n channels = [prod.afos,]\n # TODO: remove manual hack\n if prod.afos == 'RFDBIS':\n channels = ['BIS',]\n # Also send message to any 'subscribing WFO chatrooms'\n for key in routes.keys():\n if (re.match(key, prod.afos)):\n for wfo2 in routes[key]:\n mess = \"%s: %s issues %s %s\" % \\\n (wfo2, wfo, prodtxt, myurl)\n jabber.sendMessage(mess, htmlmess, xtra)\n channels.append( wfo2 )\n\n twt = prodtxt\n url = myurl\n common.tweet(channels, twt, url)\n if prod.afos == \"PNSARX\":\n snowfall_pns(prod)\n # We are done for this product\n return\n\n\n # Now, lets look at segments ?\n if (pil == \"RVF\"):\n for seg in prod.segments:\n tokens = re.findall(\"\\.E ([A-Z0-9]{5}) \", seg.raw)\n if (len(tokens) == 0):\n print 'Whoa, did not find NWSLI?', seg\n return\n hsas = re.findall(\"HSA:([A-Z]{3}) \", seg.raw)\n prodtxt = reference.prodDefinitions[pil]\n mess = \"%s: %s issues %s\" % \\\n (wfo, wfo, prodtxt)\n htmlmess = \"%s issues <a href=\\\"%s\\\">%s</a> for \" \\\n % (wfo, myurl, prodtxt)\n usednwsli = {}\n hsa_cnt = -1\n rivers = {}\n for nwsli in tokens:\n if usednwsli.has_key(nwsli):\n continue\n usednwsli[nwsli] = 1\n hsa_cnt += 1\n if (nwsli_dict.has_key(nwsli)):\n rname = nwsli_dict[nwsli]['rname']\n r = nwsli_dict[nwsli]['river']\n else:\n rname = \"((%s))\" % (nwsli,)\n r = \"Unknown River\"\n if not rivers.has_key(r):\n rivers[r] = \"<br/>%s \" % (r,)\n if len(hsas) > hsa_cnt and \\\n reference.wfo_dict.has_key( hsas[hsa_cnt] ):\n uri = AHPS_TEMPLATE[ reference.wfo_dict[hsas[hsa_cnt]]['region'] ] %\\\n (hsas[hsa_cnt].lower(), nwsli.lower() ) \n rivers[r] += \"<a href=\\\"%s\\\">%s</a> (%s), \" % (uri, rname, nwsli)\n else:\n rivers[r] += \"%s (%s), \" % (rname, nwsli)\n for r in rivers.keys():\n htmlmess += \" %s\" % (rivers[r][:-2],)\n jabber.sendMessage(mess[:-1] +\" \"+ myurl, htmlmess[:-1], xtra)\n continue\n\n# PUBLIC ADVISORY NUMBER 10 FOR REMNANTS OF BARRY\n# TROPICAL DEPRESSION BARRY ADVISORY NUMBER 5\n# TROPICAL STORM BARRY INTERMEDIATE ADVISORY NUMBER 2A\n\n if (pil == \"TCM\" or pil == \"TCP\" or pil == \"TCD\"):\n mess = \"%s: %s issues %s %s\" % (wfo, wfo, pil, myurl)\n prodtxt = \"(%s)\" % (pil,)\n if reference.prodDefinitions.has_key(pil):\n prodtxt = reference.prodDefinitions[pil]\n htmlmess = \"%s issues <a href=\\\"%s\\\">%s</a> \" % (wfo, myurl, prodtxt)\n jabber.sendMessage(mess, htmlmess, xtra)\n \n common.tweet([wfo], prodtxt, myurl)\n\n\n for key in routes.keys():\n if (re.match(key, prod.afos)):\n channels = []\n for wfo2 in routes[key]:\n mess = \"%s: %s %s\" % \\\n (wfo2, prod.afos, myurl)\n htmlmess = \"<a href=\\\"%s\\\">%s</a>\" % (myurl, prodtxt)\n tokens = re.findall(\"(.*) (DISCUSSION|INTERMEDIATE ADVISORY|FORECAST/ADVISORY|ADVISORY|MEMEME) NUMBER\\s+([0-9]+)\", raw.replace(\"PUBLIC ADVISORY\", \"ZZZ MEMEME\") )\n if (len(tokens) > 0):\n tt = tokens[0][0]\n what = tokens[0][1]\n tnum = tokens[0][2]\n if (tokens[0][1] == \"MEMEME\"):\n tokens2 = re.findall(\"(PUBLIC ADVISORY) NUMBER\\s+([0-9]+) FOR (.*)\", raw)\n what = tokens2[0][0]\n tt = tokens2[0][2]\n mess = \"%s: %s issues %s #%s for %s %s\" % (wfo2, centertext.get(wfo, wfo), what, tnum, tt, myurl)\n htmlmess = \"%s issues <a href=\\\"%s\\\">%s #%s</a> for %s\" % ( centertext.get(wfo, wfo), myurl, what, tnum, tt)\n #print htmlmess, mess\n jabber.sendMessage(mess, htmlmess, xtra)\n channels.append( wfo2 )\n twt = \"%s issues %s %s for %s\" % (centertext.get(wfo, wfo), what, tnum, tt)\n common.tweet(channels, twt, myurl)\n\n\n for seg in prod.segments:\n # The segment needs to have ugc codes\n if (len(seg.ugcs) == 0):\n continue\n # If the product has VTEC, it is handled by the vtec ingestor\n if (len(seg.vtec) > 0 and ['MWS','HLS'].__contains__(pil)):\n log.msg(\"VTEC FOUND!, skipping\")\n continue\n\n # If the product has HVTEC, it is handled by other ingestor too\n if (len(seg.hvtec) > 0 and ['FLW','FFA','FLS'].__contains__(pil)):\n log.msg(\"HVTEC FOUND!, skipping\")\n continue\n\n counties = countyText(seg.ugcs)\n if (counties.strip() == \"\"):\n counties = \"entire area\"\n expire = \"\"\n if seg.ugcexpire is not None:\n if prod.z:\n expire = \"till \"+ (seg.ugcexpire - datetime.timedelta(hours= reference.offsets[prod.z] )).strftime(\"%-I:%M %p \")+ prod.z\n\n prodtxt = \"(%s)\" % (pil,)\n if reference.prodDefinitions.has_key(pil):\n prodtxt = reference.prodDefinitions[pil]\n mess = \"%s: %s issues %s for %s %s %s\" % \\\n (wfo, wfo, prodtxt, counties, expire, myurl)\n htmlmess = \"%s issues <a href=\\\"%s\\\">%s</a> for %s %s\" % (wfo, myurl, prodtxt, counties, expire)\n jabber.sendMessage(mess, htmlmess, xtra)\n twt = \"%s for %s %s\" % (prodtxt, counties, expire)\n common.tweet([wfo,], twt, myurl)\n\n# PUBLIC ADVISORY NUMBER 10 FOR REMNANTS OF BARRY\n# TROPICAL DEPRESSION BARRY ADVISORY NUMBER 5\n# TROPICAL STORM BARRY INTERMEDIATE ADVISORY NUMBER 2A\n\n if (pil == \"TCM\" or pil == \"TCP\" or pil == \"TCD\"):\n mess = \"%s: %s issues %s %s\" % (wfo, wfo, pil, myurl)\n prodtxt = \"(%s)\" % (pil,)\n if reference.prodDefinitions.has_key(pil):\n prodtxt = reference.prodDefinitions[pil]\n htmlmess = \"%s issues <a href=\\\"%s\\\">%s</a> \" % (wfo, myurl, prodtxt)\n jabber.sendMessage(mess, htmlmess, xtra)\n common.tweet([wfo,], prodtxt, myurl)\n\n\n\n for key in routes.keys():\n if (re.match(key, prod.afos)):\n channels = []\n for wfo2 in routes[key]:\n mess = \"%s: %s %s\" % \\\n (wfo2, prod.afos, myurl)\n htmlmess = \"<a href=\\\"%s\\\">%s</a>\" % (myurl, prodtxt)\n tokens = re.findall(\"(.*) (DISCUSSION|INTERMEDIATE ADVISORY|FORECAST/ADVISORY|ADVISORY|MEMEME) NUMBER\\s+([0-9]+)\", raw.replace(\"PUBLIC ADVISORY\", \"ZZZ MEMEME\") )\n if (len(tokens) > 0):\n tt = tokens[0][0]\n what = tokens[0][1]\n tnum = tokens[0][2]\n if (tokens[0][1] == \"MEMEME\"):\n tokens2 = re.findall(\"(PUBLIC ADVISORY) NUMBER\\s+([0-9]+) FOR (.*)\", raw)\n what = tokens2[0][0]\n tt = tokens2[0][2]\n mess = \"%s: %s issues %s #%s for %s %s\" % (wfo2, centertext.get(wfo, wfo), what, tnum, tt, myurl)\n htmlmess = \"%s issues <a href=\\\"%s\\\">%s #%s</a> for %s\" % ( centertext.get(wfo, wfo), myurl, what, tnum, tt)\n #print htmlmess, mess\n jabber.sendMessage(mess, htmlmess, xtra)\n channels.append( wfo2 )\n twt = \"%s issues %s %s for %s\" % (centertext.get(wfo, wfo), what, tnum, tt)\n common.tweet(channels, twt, myurl)", "def __init__(self, df):\n # Creating content\n df['content_str'] = df['content'].map(lambda x: self.__word_joiner(x))\n text = df['content_str'].str.cat(sep=' ')\n df.drop('content_str', axis=1, inplace=True)\n\n self._generateBigrams(text)\n self._generateUnigrams(text)\n self.corpussize=len(Utilities.CVTokeniser(text))\n print(\"Feature_PMI: Corpus size:\",self.corpussize)", "def _preprocess(self, txt_seq):\n input = []\n label = []\n punc = \" \"\n for token in txt_seq.split():\n if token in self.punc2id:\n punc = token\n else:\n input.append(self.word2id.get(token, self.word2id[\"<UNK>\"]))\n label.append(self.punc2id[punc])\n punc = \" \"\n input.append(self.word2id[\"<END>\"])\n label.append(self.punc2id[punc])\n input = torch.LongTensor(input)\n label = torch.LongTensor(label)\n # input = np.array(input)\n # label = np.array(label)\n return input, label", "def _extract_number_with_text_nl(tokens, short_scale=True,\n ordinals=False, fractional_numbers=True):\n number, tokens = \\\n _extract_number_with_text_nl_helper(tokens, short_scale,\n ordinals, fractional_numbers)\n while tokens and tokens[0].word in _ARTICLES_NL:\n tokens.pop(0)\n return ReplaceableNumber(number, tokens)", "def __init__(self,txt=u'',unicodeEncoding='utf-8',verbose=False,tagID=0):\n # __document capture the document level structure\n # for each sentence and then put in the archives when the next sentence\n # is processed\n super(ConTextMarkup,self).__init__(__txt=None,__rawTxt=txt,\n __SCOPEUPDATED=False,__VERBOSE=verbose,\n __tagID=tagID,\n __unicodeEncoding=unicodeEncoding)\n self.__cleanText()", "def parse_line(obj):\n quadrilateral = []\n for point in obj['points']:\n quadrilateral += point\n xmin = min(quadrilateral[0::2])\n xmax = max(quadrilateral[0::2])\n\n ymin = min(quadrilateral[1::2])\n ymax = max(quadrilateral[1::2])\n if not (xmin < xmax and ymin < ymax):\n logging.warning(f\"skip: {obj}\")\n return None\n language = obj['language'].lower()\n legibility = 1 - int(obj['illegibility'])\n transcription = obj['transcription']\n if transcription == '###':\n transcription = ''\n legibility = 0\n language = ''\n\n word_annotation = {\n 'bbox': [xmin, ymin, xmax - xmin, ymax - ymin],\n 'segmentation': [quadrilateral],\n 'attributes': {\n 'transcription': transcription,\n 'legible': legibility,\n 'language': language,\n }\n }\n return word_annotation", "def set_lic_text(self, doc, text):\n if self.has_extr_lic(doc):\n if not self.extr_text_set:\n self.extr_text_set = True\n if validations.validate_is_free_form_text(text):\n self.extr_lic(doc).text = str_from_text(text)\n return True\n else:\n raise SPDXValueError('ExtractedLicense::text')\n else:\n raise CardinalityError('ExtractedLicense::text')\n else:\n raise OrderError('ExtractedLicense::text')", "def analyse(self):\n logging.info(\"transferring text to CorpusCook...\")\n\n paragraphs = self.text.split('\\n\\n')\n print(\"mean length of splitted lines\", (mean([len(p) for p in paragraphs])))\n\n # If TIKA resolved '\\n'\n if (mean([len(p) for p in paragraphs])) > 80:\n paragraphs = [re.sub(r\"- *\\n\", '', p) for p in paragraphs]\n paragraphs = [p.replace('\\n', \" \") for p in paragraphs]\n paragraphs = [p.replace(';', \" \") for p in paragraphs]\n joiner = \" \"\n else:\n # If TIKA did not\n joiner = \" \"\n\n processed_text = joiner.join([p\n for p in paragraphs\n if\n p and\n ks_2samp(self.normal_data, list(p)).pvalue > self.threshold\n ]\n )\n\n return processed_text.strip()[:self.length_limit]", "def clean_line_generator_v2(df_pkl=None, fn='untitled'):", "def define_description(self):\n self._description = 'NODDI-based processing of DWI datasets.'", "def parse_post_describeprocess(doc):\n\n version = doc.attrib.get('version')\n wpsrequest.check_and_set_version(version)\n\n language = doc.attrib.get('language')\n wpsrequest.check_and_set_language(language)\n\n wpsrequest.operation = 'describeprocess'\n wpsrequest.identifiers = [identifier_el.text for identifier_el in\n xpath_ns(doc, './ows:Identifier')]", "def process(self, processors) -> MultiLineString:", "def create_customer_df_invoice_line(self, customerID, list_stockCode\\\n , list_quantity, invoiceDate):\n \n dict_invoice = dict()\n\n dict_invoice['Quantity'] = list_quantity\n dict_invoice['StockCode'] = list_stockCode\n\n #------------------------------------------------------------------------\n # Build invoiceDate from local current time\n #------------------------------------------------------------------------\n if invoiceDate is None:\n time_struct = time.localtime()\n invoiceDate = str(time_struct.tm_year)+'-'+str(time_struct.tm_mon)\\\n +'-'+str(time_struct.tm_mday)\n invoiceDate +=' '\n invoiceDate +=str(time_struct.tm_hour)+':'+str(time_struct.tm_min)\\\n +':'+str(time_struct.tm_sec)\n invoiceDate = pd.Timestamp(invoiceDate)\n else:\n pass\n\n\n #------------------------------------------------------------------------\n # Lists initialization\n #------------------------------------------------------------------------\n list_customerID = list()\n list_invoiceNo = list()\n list_invoiceDate = list()\n list_invoice_line_index = list()\n \n #------------------------------------------------------------------------\n # Increase Invoice number\n #------------------------------------------------------------------------\n invoiceNo = max(self._df_invoice_original.InvoiceNo)\n invoiceNo += 1\n\n #------------------------------------------------------------------------\n # Get latest invoice line index value\n #------------------------------------------------------------------------\n invoice_line_index = max(self._df_invoice_original.index)\n\n #------------------------------------------------------------------------\n # Build lists for CustomerID, InvoiceNo, InvoiceDate\n # A list of incremented indexes is built for new rows.\n #------------------------------------------------------------------------\n for quantity in list_quantity:\n list_customerID.append(customerID)\n list_invoiceNo.append(invoiceNo)\n list_invoiceDate.append(invoiceDate)\n invoice_line_index += 1\n list_invoice_line_index.append(invoice_line_index) \n\n \n dict_invoice['CustomerID'] = list_customerID\n dict_invoice['InvoiceNo'] = list_invoiceNo\n dict_invoice['InvoiceDate'] = list_invoiceDate\n\n #------------------------------------------------------------------------\n # Get description list from list of stock codes.\n #------------------------------------------------------------------------\n list_description = self.getDescriptionList(list_stockCode)\n \n dict_invoice['Description'] = list_description\n\n #------------------------------------------------------------------------\n # Get unit price list from list of stock codes.\n #------------------------------------------------------------------------\n list_unitPrice = self.getUnitPriceList(list_stockCode)\n \n dict_invoice['UnitPrice'] = list_unitPrice\n\n #------------------------------------------------------------------------\n # Dataframe with new invoices lines is created.\n #------------------------------------------------------------------------\n df_invoice_line \\\n = pd.DataFrame(dict_invoice, columns=dict_invoice.keys()\\\n , index=list_invoice_line_index)\n \n return df_invoice_line", "def nltk_text(self, text):\n text = nltk.Text(word_tokenize(text))\n return text", "def transform(self, entry):\r\n res = ''\r\n doc = self.nlp(entry, disable=['tagger', 'parser'])\r\n ents = doc.ents\r\n start_indices = {}\r\n end_indices = {}\r\n\r\n for ent in ents:\r\n start, end, label = ent.start, ent.end, ent.label_\r\n if label in ['NORP', 'GPE', 'LOC', 'PERSON', 'PRODUCT']:\r\n start_indices[start] = 'PRODUCT'\r\n end_indices[end] = 'PRODUCT'\r\n if label in ['DATE', 'QUANTITY', 'TIME', 'PERCENT', 'MONEY']:\r\n start_indices[start] = 'NUM'\r\n end_indices[end] = 'NUM'\r\n\r\n for idx, token in enumerate(doc):\r\n if idx in start_indices:\r\n res += start_indices[idx] + ' '\r\n\r\n # normalizing the numbers\r\n if token.like_num:\r\n try:\r\n val = float(token.text)\r\n if val == round(val):\r\n res += '%d ' % (int(val))\r\n else:\r\n res += '%.2f ' % (val)\r\n except:\r\n res += token.text + ' '\r\n elif len(token.text) >= 7 and \\\r\n any([ch.isdigit() for ch in token.text]):\r\n res += 'ID ' + token.text + ' '\r\n else:\r\n res += token.text + ' '\r\n return res.strip()", "def _process_text_line(self, line, columns, format, lower_case, num_line,\n fill_missing=0, filter_case=None,\n strict_separator=False):\n if not isinstance(line, list) and not isinstance(\n line, tuple) and not isinstance(line, numpy.ndarray):\n if format != \"tsv\":\n raise Exception(\"unable to process format \" + format)\n line = line.strip(\"\\r\\n \").replace(\"\\n\", \" \")\n line = DatabaseCore2._split_expr.split(line)\n\n if filter_case is not None:\n line = [filter_case(s) for s in line]\n\n try:\n if fill_missing > 0:\n m = max(columns.keys())\n if m >= len(line):\n line = copy.copy(line)\n add = 0\n while m >= len(line) and add < fill_missing:\n a, b = columns[len(line)]\n if b is int:\n line.append(\"0\")\n elif b is float:\n line.append(\"0.0\")\n elif b is decimal.Decimal:\n line.append(\"0\")\n elif b is str:\n line.append(\"\")\n else:\n line.append(\"\")\n add += 1\n\n res = {}\n for c, v in columns.items():\n if \"AUTOFILL\" in v:\n res[v[0]] = \"NULL\"\n elif \"AUTOINCREMENT\" in v:\n continue\n else:\n if c >= len(line):\n self.LOG(\n \"(a)line number \",\n num_line,\n \"*unable to process a line columns \",\n c,\n \"#\",\n line,\n \" columns \",\n columns)\n return None\n\n val = line[c]\n if len(v) > 2 and v[2].lower() not in [\n \"primarykey\", \"autofill\"]:\n val = v[2](val)\n\n try:\n if isinstance(v[1], tuple):\n val = v[1][0](val)\n elif v[1] is datetime.datetime:\n if isinstance(val, datetime.datetime):\n pass\n elif isinstance(val, str):\n val = datetime.datetime.parse(val)\n else:\n raise TypeError(\n \"unable to convert %s into datetime\" % str(\n type(val)))\n else:\n val = v[1](val)\n except ValueError: # as e :\n self.LOG(\n \"(b)line number \",\n num_line,\n \"**unable to process a line columns \",\n c,\n \"#\",\n v[0],\n \" type \",\n v[1],\n \" value \",\n repr(\n line[c]))\n return None\n\n if isinstance(val, str):\n val = val.replace(\"'\", \"''\")\n if lower_case:\n val = val.lower()\n res[v[0]] = val\n\n return res\n except Exception:\n self.LOG(\"(c)line number\", num_line,\n \"***unable to process a line columns:\", line)\n return None", "def handle_data(self, text):\n if self.bankacctfrom:\n if self.bankid:\n self.compte['banque'] = text.strip()\n self.bankid = False\n if self.branchid:\n self.compte['guichet'] = text.strip()\n self.branchid = False\n if self.acctid:\n self.compte['compte'] = text.strip()\n self.acctid = False\n if self.banktranlist:\n if self.stmttrn:\n if self.dtposted:\n self.ecriture_tmp['date'] = datetime.strptime(text.strip(), \"%Y%m%d\")\n self.dtposted = False\n if self.trnamt:\n self.ecriture_tmp['montant'] = locale.atof(text.strip())\n self.trnamt = False\n if self.trntype:\n self.ecriture_tmp['type'] = text.strip()\n self.trntype = False\n if self.name:\n self.ecriture_tmp['name'] = text.strip()\n self.name = False\n if self.memo:\n self.ecriture_tmp['memo'] = text.strip()\n self.memo = False", "def get_kit_descr_collapse(self):\n self.ensure_one()\n if not self.is_kit_invoice_line:\n return \"\"\n comp_obj = self.env['account.invoice.line.comp']\n components = comp_obj.search([('invoice_line_id','=',self.id),('is_kit_invoice_comp','=',False)]) # get all comps that are not kits\n ir_model_data = self.env['ir.model.data']\n units_id = ir_model_data.get_object_reference('product','product_uom_unit')[1]\n res = []\n for comp in components:\n qty_int_val = int(comp.qty_total)\n if comp.uom_id.id == units_id: # uom is units, no need to print it\n qty = str(qty_int_val) # qty is an int because it's in units\n comp_str = comp.name + \": \" + qty\n else:\n if qty_int_val == comp.qty_total:\n qty = str(qty_int_val)\n else:\n qty = str(comp.qty_total)\n comp_str = comp.name + \": \" + qty + \" \" + comp.uom_id.name\n res.append(comp_str)\n res = \" (\" + \", \".join(res) + \")\"\n return res", "def parsingconvtext(retrievedtext,customtextlist):\r\n if not retrievedtext: #in case empty text \r\n retrievedtext=changenonetostr(retrievedtext)\r\n newtext=BeautifulSoup(retrievedtext).get_text() \r\n #newtext=changenonetostr(retrievedtext)\r\n #newtext=BeautifulSoup(newtext).get_text() \r\n #remove http links\r\n newtext=re.sub(r'http\\S+', '', newtext)\r\n newtext=re.sub(r'\\r\\r\\r\\n', ' ', newtext)\r\n #remove LL specific text\r\n if customtextlist:\r\n for i in customtextlist:\r\n newtext=re.sub(i, '', newtext)\r\n return newtext", "def _preprocess(self):\n self.data['sentences'] = self.data['text'].apply(self._tokenize_sent)\n self.data['nouns'] = self.data['sentences'].apply(self._get_nouns)\n # self._get_frequent_features()\n # self._compactness_pruning()\n # self._redundancy_pruning()\n # self._get_features()\n self._extract_opinions()", "def docLines(self):\n summary, description = self._getDocParts()\n if description:\n return summary + [\"\"] + description\n return summary", "def testLines(self):\n\n textractor = Textractor(lines=True)\n\n # Extract text as lines\n lines = textractor(Utils.PATH + \"/article.pdf\")\n\n # Check number of lines is as expected\n self.assertEqual(len(lines), 35)", "def _dig_line_count_changed(self, text):\n self._setup_table_digital()", "def process_text(text):\n return [token.text for token in nlp(text) if not token.is_stop]", "def format_ocr_text(self, page):\n \n #read out of the text file that tesseract made\n ocr_text = open(self.ocr_text, 'r')\n \n # write into this file\n djvu_text = open( self.djvu_text, 'w' )\n \n text = \"(page 0 0 1 1\\n\"\n \n self.out_text.write('\\n## Page %d ###\\n\\n' % page )\n \n for line in ocr_text:\n \n #write to the human readable file\n self.out_text.write(line)\n \n # add each line of text\n # escaping \" to \\\" as we go\n text += '(line 0 0 1 1 \"%s\")\\n' % line.replace('\"', r'\\\"').strip()\n \n text += \")\\n\"\n \n djvu_text.write( text )\n \n ocr_text.close()\n djvu_text.close()", "def nintl(self):", "def get_document_number(self, txt_line, inv_type):\n number = 0\n if txt_line.invoice_id.type in ['in_invoice', 'in_refund']:\n if not txt_line.invoice_id.supplier_invoice_number:\n raise exceptions.except_orm(\n _('Invalid action !'),\n _(\"Unable to make txt file, because the bill has no\"\n \" reference number free!\"))\n else:\n number = self.get_number(\n txt_line.invoice_id.supplier_invoice_number.strip(),\n inv_type, 20)\n elif txt_line.invoice_id.number:\n number = self.get_number(\n txt_line.invoice_id.number.strip(), inv_type, 20)\n return number", "def extract_ntext_feature(self, hash_list, issue_id_list, candidate_issue2hash_dict, execute_flag,\n log_message_without_issueid_path, dsc_issue_dict, comment_issue_dict,\n output_dir):\n\n if execute_flag==0:\n\n return_dict = {}\n for num_ite in range(1, self.max_iteration+1):\n if self.verbose > 0:\n print(\"ntext feature num ite: {0}/{1}\".format(num_ite, self.max_iteration))\n\n temp_dict = util.load_pickle(\"{0}/cosine_similarity_dict_ite{1}.pickle\".format(output_dir, num_ite))\n\n if self.verbose > 0:\n len_issue_id = len(temp_dict)\n\n for idx_issue_id, issue_id in enumerate(temp_dict.keys()):\n\n if self.verbose > 0:\n if (idx_issue_id%100)==0:\n print(\"ntext feature -- Done {0}/{1}\".format(idx_issue_id, len_issue_id))\n\n if not issue_id in candidate_issue2hash_dict:\n continue\n\n return_dict[issue_id] = {}\n for commit_hash in candidate_issue2hash_dict[issue_id]:\n return_dict[issue_id][commit_hash] = temp_dict[issue_id][commit_hash]\n else:\n ntext_similarity_obj = ntext_similarity.NtextSimilarity(verbose=self.verbose)\n\n # repo_dict [dict<commit hash, log message] -- log message for each commit\n log_msg_repo_dict = util.load_pickle(log_message_without_issueid_path) #\n\n corpus, processed_dsc_issue_dict, processed_comment_issue_dict, processed_log_msg_repo_dict = ntext_similarity_obj.make_corpus_and_input(dsc_issue_dict, comment_issue_dict, log_msg_repo_dict, hash_list, issue_id_list)\n vectorizer = TfidfVectorizer()\n vectorizer.fit(corpus)\n\n log_msg_vec_dict = {}\n for commit_hash in hash_list:\n log_msg_vec_dict[commit_hash] = vectorizer.transform([processed_log_msg_repo_dict[commit_hash]])\n\n if self.verbose > 0:\n len_issue_id = len(candidate_issue2hash_dict)\n\n return_dict = {}\n for idx_issue_id, issue_id in enumerate(candidate_issue2hash_dict.keys()):\n\n if self.verbose > 0:\n if (idx_issue_id%100)==0:\n print(\"ntext feature -- Done {0}/{1}\".format(idx_issue_id, len_issue_id))\n\n return_dict[issue_id] = {}\n issue_text_vec = vectorizer.transform([processed_dsc_issue_dict[issue_id] + \" \" + processed_comment_issue_dict[issue_id]])\n for commit_hash in candidate_issue2hash_dict[issue_id]:\n return_dict[issue_id][commit_hash] = cosine_similarity(issue_text_vec, log_msg_vec_dict[commit_hash])[0,0]\n\n return return_dict", "def process_body(nlp, body: str, replacements=None, require_labels=False):\n\n if replacements is None:\n replacements = []\n\n entry = {\"ents\": [],\n \"cats\": [],\n \"replacements\": [],\n \"text\": None,\n \"docstrings\": []}\n\n body_ = body.lstrip()\n initial_strip = body[:len(body) - len(body_)]\n\n replacements = correct_entities(replacements, [(0, len(initial_strip))])\n\n docsting_offsets = get_docstring(body_)\n\n body_, replacements, docstrings = remove_offsets(body_, replacements, docsting_offsets)\n entry['docstrings'].extend(docstrings)\n\n was_valid = body_valid(body_)\n initial_labels = get_initial_labels(body_)\n\n if require_labels and initial_labels is None:\n return None\n\n returns, return_cuts = unpack_returns(body_, initial_labels)\n annotations, annotation_cuts = unpack_annotations(body_, initial_labels)\n\n body_, replacements_annotations, _ = remove_offsets(body_, replacements + annotations,\n return_cuts + annotation_cuts)\n is_valid = body_valid(body_)\n if was_valid != is_valid:\n print(\"Failed processing\")\n return None\n # raise Exception()\n\n replacements_annotations = adjust_offsets2(replacements_annotations, len(initial_strip))\n body_ = initial_strip + body_\n\n entry['replacements'].extend(list(filter(lambda x: isint(x[2]), replacements_annotations)))\n entry['ents'].extend(list(filter(lambda x: not isint(x[2]), replacements_annotations)))\n entry['cats'].extend(returns)\n entry['text'] = body_\n\n entry['replacements'] = resolve_self_collisions2(entry['replacements'])\n\n # assert isvalid(nlp, body_, entry['replacements'])\n assert isvalid(nlp, body_, entry['ents'])\n\n return entry", "def get_sentence(self):", "def convert_line(self, line):\n if not (line):\n # end of file\n return self.cleanup_section()\n \n # replace bold/italic with \\textbf{\\textit{x}\n line = re.sub(r\"(\\*__)(.*)(__\\*)\", r\"\\\\textbf{\\\\textit{\\2}}\", line)\n\n match_results = heading_1_re.match(line)\n # \\hypertarget{introduction}{%\n # \\section{1.0. Introduction}\\label{introduction}}\n if match_results: \n # print('matched {0} with {1}'.format(match_results.groups(), heading_1_re))\n title = match_results.groups()[0]\n return '{cleanup}\\hypertarget{{{title}}}{{%\\n\\part{{{title}}}\\label{{{title}}}}}\\n'.format(\n cleanup=self.cleanup_section(),\n title=title)\n match_results = heading_2_re.match(line)\n if match_results:\n # print('matched {0} with {1}'.format(match_results.groups(), heading_2_re))\n title = match_results.groups()[0]\n return '{cleanup}\\hypertarget{{chapter_{title}}}{{%\\n\\chapter{{{title}}}\\label{{{title}}}}}\\n'.format(\n cleanup=self.cleanup_section(),\n title=title)\n match_results = heading_3_re.match(line)\n if match_results:\n # print('matched {0} with {1}'.format(match_results.groups(), heading_2_re))\n title = match_results.groups()[0]\n return '{cleanup}\\hypertarget{{section_{title}}}{{%\\n\\section{{{title}}}\\label{{{title}}}}}\\n'.format(\n cleanup=self.cleanup_section(),\n title=title)\n match_results = heading_4_re.match(line)\n if match_results:\n # print('matched {0}'.format(match_results.groups()))\n title = match_results.groups()[0]\n return '{cleanup}\\hypertarget{{subsection_{title}}}{{%\\n\\subsection{{{title}}}\\label{{{title}}}}}\\n'.format(\n cleanup=self.cleanup_section(),\n title=title)\n match_results = heading_6_re.match(line)\n if match_results:\n # print('matched {0}'.format(match_results.groups()))\n title = match_results.groups()[0]\n return '{cleanup}\\hypertarget{{subsubsection_{title}}}{{%\\n\\subsubsection{{{title}}}\\label{{{title}}}}}\\n'.format(\n cleanup=self.cleanup_section(),\n title=title)\n # return '{cleanup}\\hypertarget{{subsubsection_{title}}}{{%\\n\\subsubsection{{{title}}}\\label{{{title}}}}}\\n'.format(\n # cleanup=self.cleanup_section(),\n # title=title)\n match_results = unordered_list_re.match(line)\n if match_results:\n # print('matched unordered_list: {0}'.format(line))\n return '{cleanup}\\n\\item {line}'.format(cleanup=self.cleanup_section(section='itemize'),line=match_results.groups()[0])\n \n match_results = unordered_sublist_re.match(line)\n if match_results:\n # print('matched unordered_sublist: {0}'.format(line))\n return '{cleanup}\\n\\t\\item {line}'.format(cleanup=self.cleanup_section(section='itemize', subsection=True),line=match_results.groups()[0])\n\n return '{cleanup}{line}'.format(cleanup=self.cleanup_section(), line=line)", "def logic_program_form(self):\r\n return '% ASP{f} Translation of System Description ' + self.name + '\\n\\n'", "def make_po(self, cr, uid, ids, context=None):\n res = super(procurement_order, self).make_po(cr, uid, ids, context=None)\n for procurement in self.browse(cr, uid, ids, context=context):\n # da procurement prendo id ordine x ripassare le righe e vedere il listino for\n pricelist_item = self.pool.get('product.pricelist').price_get(cr, uid, [procurement.purchase_id.pricelist_id.id], procurement.purchase_id.product_id.id, procurement.product_qty or 1.0, procurement.purchase_id.partner_id.id)\n pricelist_item_id = pricelist_item['item_id'][procurement.purchase_id.pricelist_id.id]\n price_item = self.pool.get('product.pricelist.item').browse(cr, uid, pricelist_item_id, context=context)\n \n if price_item:\n for line in procurement.purchase_id.order_line:\n vals = {\n 'discount': price_item.discount_line,\n 'discount2': price_item.discount2_line\n }\n self.pool.get('purchase.order.line').write(cr, uid, [line.id], vals)\n \n return res", "def set_lic_text(self, doc, text):\n if self.has_extr_lic(doc):\n if not self.extr_text_set:\n self.extr_text_set = True\n self.extr_lic(doc).text = text\n return True\n else:\n raise CardinalityError('ExtractedLicense::text')\n else:\n raise OrderError('ExtractedLicense::text')", "def processDescrString(self):\n\t\tself.descrString = self._getVal(4, 1)", "def __init__(self, text):\n self._fwf = FwfSlicer(FIXED_FORMAT)\n self.text = text\n\n # Parsing definition\n self.number = int(self._get_col(1).strip())\n self.current_status = self._get_col(7).strip()\n self.last_revised = datetime.strptime(self._get_col(8).strip(), '%m/%d/%Y %H:%M')\n self.approval_risk = self._get_col(9).strip()\n self.rtep = self._get_col(11).strip()\n self.previous_status = self._get_col(12).strip()\n\n # Related entities\n self.outages = []\n self.causes = []\n self.date_log = []\n self.history_log = []", "def process_line(nlp, line, parsed_file):\n m = json.loads(line)\n if \"highlights\" in m:\n if m['sentences'] != '' and m['sentences'] != [] and m['sentences'] != [''] and m['highlights'] != '':\n m[\"highlights_ud\"] = dependency_parse(nlp, m['highlights'])\n m[\"sentences_ud\"] = dependency_parse(nlp, m['sentences'])\n else:\n if m['sentences'] != '' and m['sentences'] != [] and m['sentences'] != ['']:\n m[\"sentences_ud\"] = dependency_parse(nlp, m['sentences'])\n if \"sentences_ud\" in m:\n parsed_file.write(json.dumps(m))\n parsed_file.write('\\n')", "def extract_text(infile):\n # Get text from mudraw\n text = subprocess.check_output(['mudraw', '-F', 'txt', infile])\n\n # Cleanup raw text\n match = re.search(\n r'.*?Activity \\/ Remarks(?P<table1>.*?)Activities not shown on the ' +\n r'DABS Chart Side:.*?Activity \\/ Remarks(?P<table2>.*?)For detailed ' +\n r'information regarding the DABS',\n text,\n re.MULTILINE | re.DOTALL)\n if not match:\n raise ExtractionError('Could not extract text from PDF.')\n false_or_none_string = lambda x: bool(x) and x.lower() != 'none'\n data = '\\n\\n\\n'.join(match.groups())\n raw_parts = re.sub(r'\\n[ \\t]+\\n', '\\n\\n', data).split('\\n\\n\\n')\n parts = filter(false_or_none_string, map(lambda x: x.strip(), raw_parts))\n\n # Write CSV\n headers = (\n b'Firing-Nr\\nD-/R-Area\\nNOTAM-Nr',\n b'Validity UTC',\n b'Lower Limit\\nAMSL or FL',\n b'Upper Limit\\nAMSL or FL',\n b'Location',\n b'Center Point',\n b'Covering Radius',\n b'Activity / Remarks',\n )\n rows = []\n for i, part in enumerate(parts):\n # Regexes\n multiple_newlines_re = re.compile(r'\\n+')\n height_re = re.compile(r'(GND|[0-9]+m \\/ [0-9]+ft|FL[0-9]{2,3}|REF AIP)')\n center_radius_re = re.compile(r'([0-9]{6}N [0-9]{7}E)\\s+?(.*?NM)')\n\n # Separate columns (warning: hackish code ahead!)\n row = {}\n step1 = re.split(r'([0-2][0-9][0-6][0-9] - [0-2][0-9][0-6][0-9])', part)\n row['nr'] = step1[0].strip()\n timestring = '\\n'.join(step1[1:-1])\n row['validity'] = multiple_newlines_re.sub('\\n', timestring)\n step2 = filter(None, height_re.split(step1[-1].strip()))\n row['lower'] = step2[0]\n row['upper'] = step2[2]\n step3 = filter(None, center_radius_re.split(step2[-1].strip()))\n row['location'] = step3[0].strip()\n row['center'] = step3[1].strip()\n row['radius'] = step3[2].strip()\n row['activity'] = multiple_newlines_re.sub('\\n', step3[3].strip())\n\n # Add to list of rows\n rows.append((\n row['nr'].encode('utf8'),\n row['validity'].encode('utf8'),\n row['lower'].encode('utf8'),\n row['upper'].encode('utf8'),\n row['location'].encode('utf8'),\n row['center'].encode('utf8'),\n row['radius'].encode('utf8'),\n row['activity'].encode('utf8'),\n ))\n\n return tablib.Dataset(*rows, headers=headers)", "def clean_line_generator_v1(df_pkl=None):\n\tdata_df = get_df(df_pkl)\n\t\n\twith open('/home/sus118/rdoc_w2v/data/one-abstract-per-line.txt', 'w') as f:\n\t\tcount = 0\n\t\tfor abst in iter(data_df.content):\n\t\t\tif not abst:\n\t\t\t\tcontinue\n\t\t\tabst = clean_sent(abst)\n\t\t\tf.write(abst+'\\n')\n\t\t\tcount+=1\n\t\t\tif count%10000==0:\n\t\t\t\tprint(f'{count} done')", "def annotate(self,corpus):\n\n\t\tassert corpus.parsed == True, \"Corpus must already be parsed before entity recognition\"\n\n\t\tfor doc in corpus.documents:\n\t\t\tentityCount = len(doc.entities)\n\t\t\tfor sentence in doc.sentences:\n\t\t\t\twords = [ t.word for t in sentence.tokens ]\n\t\t\t\t\n\t\t\t\tfor i,t in enumerate(sentence.tokens):\n\t\t\t\t\tif not isNumber(t.word):\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t\n\t\t\t\t\tsourceEntityID = \"T%d\" % (entityCount+1)\n\t\t\t\t\ttext = doc.text[t.startPos:t.endPos]\n\t\t\t\t\tloc = [i]\n\n\t\t\t\t\te = kindred.Entity('quantity',text,[(t.startPos,t.endPos)],sourceEntityID=sourceEntityID)\n\t\t\t\t\tdoc.addEntity(e)\n\t\t\t\t\tsentence.addEntityAnnotation(e,loc)\n\t\t\t\t\tentityCount += 1", "def extract_paragraph(file_name, url_text = None, show_property = False, database = None, extract_all_property=False, \n return_documenTM = False, cut_off = True, unit_dict = None, special_unit_dictionary = None):\n if not url_text:\n url_text = file_name\n \n if not database: \n database = {}\n \n if not isinstance(unit_dict, dict):\n unit_dict = unit_dict_default\n \n keyword_dict = make_keyword_dict(unit_dict)\n \n Q = DocumentTM(file_name, **database)\n Q.doc()\n Q.find_strange()\n chemical_type_dict = {}\n database = Q.database()\n \n if special_unit_dictionary:\n Q.set_special_unit(special_unit_dictionary)\n \n \n data_collection = []\n json_list = []\n \n for Para in Q.Para:\n new_split, unit = Q.tokenize_paragraph(Para, lemma = False, Strange = True, cut_off=cut_off)\n \n if not new_split:\n continue\n \n #print (new_split)\n \n before_represent_chem = False\n \n for sent in cut_paragraph(new_split):\n new_sent, unit_dictionary, next_represent_chem = matching_algorithm(sent, database, chemical_type_dict, before_represent_chem)\n\n if extract_all_property:\n #iters = chain.from_iterable(unit_dictionary.values())\n iters = chain.from_iterable([dics.values() for dics in unit_dictionary.values()])\n else:\n iters = unit_dictionary['Character'].values()\n \n \n #print (unit_dictionary['Character'])\n #if unit_dictionary['Character'] or unit_dictionary['Reaction']:\n #data_collection.append([sent, unit_dictionary])\n \n if show_property and (unit_dictionary['Character'] or unit_dictionary['Reaction']):\n \n print (\"\\n\\n------------------------------------\")\n print (file_name)\n print (\" \".join([str(t) for t in new_sent]))\n print (\"\\n\")\n #print (Para)\n #print (\" \".join(new_split))\n print (\"------------------------------------\")\n \n for T in chain.from_iterable(iters):\n #for T in t:\n dictionary_chemical = {'Material':T.target, 'Value':T.value, 'Unit':T.unit, 'Condition':T.condition, 'Property':T.prop,\n 'Reference':str(file_name)}\n \n json_list.append(dictionary_chemical)\n\n if show_property:\n print (\"value:\", T, \"condition:\", T.condition, \"chemical:\", T.target)\n \n if isinstance(next_represent_chem, Chemical) or not next_represent_chem:\n before_represent_chem = next_represent_chem \n \n if return_documenTM:\n return json_list, Q\n \n return json_list", "def text_feature_extract(df):\n return df", "def extract_features(tlc):\n text = clean_text(tlc['body'])\n fields = dict()\n # add features here #\n fields['Top_comment_word_count'] = len(text.split(' '))\n fields['Top_comment_text'] = text\n\n # Extract time-based features\n def get_day_of_week(text):\n return datetime.datetime.strptime(text, '%Y-%m-%d %H:%M:%S').weekday() + 1\n\n def get_day_of_month(text):\n return datetime.datetime.strptime(text, '%Y-%m-%d %H:%M:%S').day\n\n def get_time_of_day(text):\n return datetime.datetime.strptime(text, '%Y-%m-%d %H:%M:%S').hour\n time_local = time.localtime(tlc['created_utc'])\n time_local = time.strftime(\"%Y-%m-%d %H:%M:%S\", time_local)\n fields['Top_comment_day'] = get_day_of_month(time_local)\n fields['Top_comment_day_of_week'] = get_day_of_week(time_local)\n fields['Top_comment_hour'] = get_time_of_day(time_local)\n\n # Extract gender value\n gp = GenderPerformr()\n probs, _ = gp.predict(tlc['author'])\n # Rescale it from [0,1] to [-1,1]\n fields['Top_comment_author_gender_value'] = 2 * probs - 1\n\n # Extract percentage of mispellings\n check = SpellChecker(\"en_US\")\n tokenizer = get_tokenizer(\"en_US\")\n # Prevent the denominator from 0\n def weird_division(n, d):\n return n / d if d else 0\n\n def get_mispellings_percentage(text):\n mispelling_count = 0\n total_count = 0\n if text == 'nan':\n return total_count\n else:\n check.set_text(text)\n for err in check:\n mispelling_count = mispelling_count + 1\n for w in tokenizer(text):\n total_count = total_count + 1\n value = weird_division(mispelling_count, total_count)\n return value\n fields['Top_comment_mispellings'] = get_mispellings_percentage(text)\n\n # Get politeness, agreement, support scores, and rescale them from [1,5] to [-1,1]\n ar = Agreementr()\n pr = Politenessr()\n sr = Supportr()\n fields['Top_comment_agreement_value'] = 0.5*float(ar.predict([text]))-1.5\n fields['Top_comment_politeness_value'] = 0.5*float(pr.predict([text]))-1.5\n fields['Top_comment_support_value'] = 0.5*float(sr.predict([text]))-1.5\n\n # Get toxicity scores\n KEY = \"yourkey.txt\" # os.getenv(\"GOOGLE_API_KEY\")\n service = discovery.build('commentanalyzer', 'v1alpha1', developerKey=KEY)\n\n def get_results(request_id, response, exception):\n toxicity_scores.append((request_id, response))\n\n toxicity_scores = []\n count = 0\n batch = service.new_batch_http_request(callback=get_results)\n analyze_request = {\n 'comment': {'text': text},\n \"requestedAttributes\": {\n \"TOXICITY\": {},\n \"SEVERE_TOXICITY\": {},\n \"ATTACK_ON_COMMENTER\": {}\n }\n }\n batch.add(service.comments().analyze(body=analyze_request), request_id=str(count))\n batch.execute()\n toxic_score = toxicity_scores[0][1]['attributeScores']['TOXICITY']['summaryScore']['value']\n attack_score = toxicity_scores[0][1]['attributeScores']['ATTACK_ON_COMMENTER']['summaryScore']['value']\n if toxic_score > 0.5:\n fields['Top_comment_untuned_toxicity'] = 1\n else:\n fields['Top_comment_untuned_toxicity'] = 0\n if toxic_score > 0.8 and attack_score > 0.5:\n fields['Top_comment_tuned_toxicity'] = 1\n else:\n fields['Top_comment_tuned_toxicity'] = 0\n # end of feature extractions #\n return fields", "def test_text_classifier_get_details(self):\n pass", "def __gettextinfo(edudict, eduspan):\n # text = lnode.text + \" \" + rnode.text\n text = []\n for idx in range(eduspan[0], eduspan[1]+1, 1):\n text += edudict[idx]\n # Return: A list of token indices\n return text", "def _split_line( self, data_list, line_num, text ):\n\t\t# if blank line or context separator, just add it to the output list\n\t\tif not line_num:\n\t\t\tdata_list.append( ( line_num, text ) )\n\t\t\treturn\n\n\t\t# if line text doesn't need wrapping, just add it to the output list\n\t\tsize = len( text )\n\t\tmax_len = self._wrapcolumn\n\t\tif ( size <= max_len ) or ( ( size - ( text.count( '\\0' ) * 3 ) ) <= max_len ):\n\t\t\tdata_list.append( ( line_num, text ) )\n\t\t\treturn\n\n\t\t# scan text looking for the wrap point, keeping track if the wrap\n\t\t# point is inside markers\n\t\ti = 0\n\t\tn = 0\n\t\tmark = ''\n\t\twhile n < max_len and i < size:\n\t\t\tif text[i] == '\\0':\n\t\t\t\ti += 1\n\t\t\t\tmark = text[i]\n\t\t\t\ti += 1\n\t\t\telif text[i] == '\\1':\n\t\t\t\ti += 1\n\t\t\t\tmark = ''\n\t\t\telse:\n\t\t\t\ti += 1\n\t\t\t\tn += 1\n\n\t\t# wrap point is inside text, break it up into separate lines\n\t\tline1 = text[:i]\n\t\tline2 = text[i:]\n\n\t\t# if wrap point is inside markers, place end marker at end of first\n\t\t# line and start marker at beginning of second line because each\n\t\t# line will have its own table tag markup around it.\n\t\tif mark:\n\t\t\tline1 += '\\1'\n\t\t\tline2 = '\\0' + mark + line2\n\n\t\t# tack on first line onto the output list\n\t\tdata_list.append( ( line_num, line1 ) )\n\n\t\t# use this routine again to wrap the remaining text\n\t\tself._split_line( data_list, '>', line2 )", "def lda_description(review_text, min_topic_freq=0.05,topic_model_file='lda_model_10'):\n with warnings.catch_warnings():\n warnings.simplefilter('ignore')\n \n # parse the review text with spaCy\n parsed_review = nlp(review_text)\n \n # lemmatize the text and remove punctuation and whitespace\n unigram_review = [token.lemma_ for token in parsed_review\n if not punct_space(token)]\n \n # apply the first-order and secord-order phrase models\n bigram_review = bigram_model[unigram_review]\n trigram_review = trigram_model[bigram_review]\n \n # remove any remaining stopwords\n trigram_review = [term for term in trigram_review\n if not term in spacy.lang.en.STOP_WORDS]\n #print('bow:',trigram_review)\n \n # create a bag-of-words representation\n review_bow = sents_dict.doc2bow(trigram_review)\n \n \n # create an LDA representation\n lda = LdaMulticore.load(joinp(pilot_path, topic_model_file)) # my addition\n review_lda = lda[review_bow]\n \n \n # mine\n if topic_model_file=='lda_model_25':\n topic_names=topic_names_25\n elif topic_model_file=='lda_model_10':\n topic_names=topic_names_10\n #\n \n # sort with the most highly related topics first\n #review_lda = sorted(review_lda, key=lambda topic_number,freq: freq)\n listt=[]\n for topic_number, freq in review_lda:\n if freq < min_topic_freq:\n break\n \n # print the most highly related topic names and frequencies\n #print('{:10} {}'.format(topic_names[topic_number],round(freq, 3))) ## for now not putting yet topic names\n #print('{:25} {}'.format(topic_number,round(freq, 3))) \n x=[topic_number,topic_names[topic_number],np.round(freq, 3)]\n listt.append(x)\n return(listt)", "def parseOutText(f):\n\n\n f.seek(0) ### go back to beginning of file (annoying)\n all_text = f.read()\n ### split off metadata\n \n content = re.split(\"X-FileName:.*$\", all_text, flags=re.MULTILINE, maxsplit=1)\n words = \"\"\n if len(content) > 1:\n text_string = content[1]\n\n ## remove mails that are forwarded or to which are responded\n # e.g. ---------------------- Forwarded\"\n text_string = re.split(\"-*\\sForwarded\", text_string, maxsplit=1)[0]\n\n # -----Original Message-----\n text_string = re.split(\"-*\\Original\\sMessage\", text_string, maxsplit=1)[0]\n\n # Vince J Kaminski@ECT\n # 04/30/2001 02:28 PM\n # To:\tStanley Horton/Corp/Enron@Enron, Danny McCarty/ET&S/Enron@Enron\n # cc:\tVince J Kaminski/HOU/ECT@ECT \n # or\n # Vince J Kaminski@ECT\n # 04/30/2001 02:28 PM\n # to:\tStanley Horton/Corp/Enron@Enron, Danny McCarty/ET&S/Enron@Enron\n # cc:\tVince J Kaminski/HOU/ECT@ECT \n \n text_string = re.split(\"((.*\\n){2})[Tt]o:\\s\", text_string, maxsplit=1)[0]\n\n ### remove punctuation\n # should be autopmatically by scikit learn\n #text_string = text_string.translate(string.maketrans(\"\", \"\"), string.punctuation)\n\n ### project part 2: comment out the line below\n #words = text_string\n\n ### split the text string into individual words, stem each word,\n ### and append the stemmed word to words (make sure there's a single\n ### space between each stemmed word)\n from nltk.stem.snowball import SnowballStemmer\n\n stemmer = SnowballStemmer(\"english\")\n words = [stemmer.stem(word) for word in text_string.split()]\n\n\n\n return \" \".join(words)" ]
[ "0.5984977", "0.5817351", "0.5476201", "0.5464979", "0.54381603", "0.5437341", "0.539142", "0.5348691", "0.53211063", "0.5294485", "0.5236899", "0.51855206", "0.51376784", "0.51221997", "0.50945246", "0.50891274", "0.5085302", "0.50443596", "0.5012782", "0.5011681", "0.49394357", "0.49224195", "0.49133125", "0.4907674", "0.4897296", "0.48888484", "0.48883978", "0.4881489", "0.48669836", "0.48628157", "0.48531726", "0.48528412", "0.48445594", "0.48409894", "0.4838343", "0.4822445", "0.48221585", "0.4821897", "0.48173103", "0.4814343", "0.48080188", "0.4799911", "0.47974104", "0.47970858", "0.47935995", "0.47797868", "0.4774159", "0.4771138", "0.47690263", "0.47478116", "0.47475", "0.47330993", "0.47228995", "0.4721317", "0.47162384", "0.4712286", "0.4705035", "0.47045442", "0.47034055", "0.4702988", "0.4702785", "0.4694796", "0.4690531", "0.46837232", "0.46833783", "0.4683027", "0.46827456", "0.46776152", "0.4675718", "0.46717528", "0.46650004", "0.46627817", "0.46610668", "0.46516377", "0.465057", "0.46476045", "0.46392775", "0.46248302", "0.46229813", "0.4619051", "0.4612879", "0.46048918", "0.46042636", "0.45974267", "0.45969552", "0.4595076", "0.45936123", "0.45922068", "0.45872733", "0.45806322", "0.45776308", "0.45714647", "0.456834", "0.45680645", "0.45648935", "0.45634243", "0.45629305", "0.45621517", "0.4561817", "0.45582947" ]
0.7215489
0
Standardize quantitatives features. Standardizer is stored as object attribute. It will be copied into P5_SegmentClassifier object.
def feature_scale(self): #------------------------------------------------------------------------- # List of quantitative features to be standardized #------------------------------------------------------------------------- list_quant_feature = ['Quantity','UnitPrice'] self._list_quant_feature = list_quant_feature.copy() #------------------------------------------------------------------------- # Standardization is applied over quantitative features in list. #------------------------------------------------------------------------- X_std = self.std_scale.transform(self.df_invoice_line[self.list_quant_feature]) df_quant_std = pd.DataFrame(X_std, index=self.df_invoice_line.index) #------------------------------------------------------------------------- # Columns from standardized dataframe are renamed #------------------------------------------------------------------------- df_quant_std.rename(columns={0:'STD_Quantity',1:'STD_UnitPrice'}\ ,inplace=True) #------------------------------------------------------------------------- # Standardized values dataframe is aggregated to df_invoice_line #------------------------------------------------------------------------- list_col_drop = ['Quantity','UnitPrice'] list_col_keep = \ [col for col in self.df_invoice_line.columns if col not in list_col_drop ] self.df_invoice_line = self.df_invoice_line[list_col_keep] self.df_invoice_line \ = pd.concat([self.df_invoice_line,df_quant_std], axis=1) return
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def standardiser(self):\n # Select only numeric features first\n\n #self.X = self.data.loc[:, self.data.columns != self.target].values\n numeric_columns = []\n for col in self.X.columns:\n if self.X[col].dtype!='object':\n numeric_columns.append(col)\n scaler = preprocessing.StandardScaler().fit(self.X[numeric_columns]) \n # Now we can standardise\n self.X[numeric_columns] = scaler.transform(self.X[numeric_columns])", "def test_scale_features_standardize(self):\n data = array([[0.564, 20.661], [-18.512, 41.168], [-0.009, 20.440]])\n cdata = CData(data)\n\n # correct answer computed in Mathematica\n # TODO: can we compute the right answer in Python?\n answer = array([[0.60355, -0.568043], [-1.1543, 1.15465], [0.550748, -0.586608]])\n\n # perform standardization feature scaling and check answer\n cdata.scale_features('standardize')\n self.assertTrue(allclose(cdata.data, answer))", "def _standardize(self):\n deviation = np.std(self.series)\n self.series = (self.series - np.mean(self.series)) / (deviation if deviation != 0 else 1)", "def standardize(X):\n\n scaler = StandardScaler()\n X_scaled = scaler.fit_transform(X)\n return X_scaled", "def standardize(self, x):\n if self.preprocessing_function:\n x = self.preprocessing_function(x)\n if self.rescale:\n x *= self.rescale\n if self.samplewise_center:\n x -= np.mean(x, keepdims=True)\n if self.samplewise_std_normalization:\n x /= (np.std(x, keepdims=True) + K.epsilon())\n\n if self.featurewise_center:\n if self.mean is not None:\n x -= self.mean\n else:\n warnings.warn('This AudioDataGenerator specifies '\n '`featurewise_center`, but it hasn\\'t '\n 'been fit on any training data. Fit it '\n 'first by calling `.fit(numpy_data)`.')\n if self.featurewise_std_normalization:\n if self.std is not None:\n x /= (self.std + K.epsilon())\n else:\n warnings.warn('This AudioDataGenerator specifies '\n '`featurewise_std_normalization`, but it hasn\\'t '\n 'been fit on any training data. Fit it '\n 'first by calling `.fit(numpy_data)`.')\n if self.zca_whitening:\n if self.principal_components is not None:\n flatx = np.reshape(x, (-1, np.prod(x.shape[-2:])))\n whitex = np.dot(flatx, self.principal_components)\n x = np.reshape(whitex, x.shape)\n else:\n warnings.warn('This AudioDataGenerator specifies '\n '`zca_whitening`, but it hasn\\'t '\n 'been fit on any training data. Fit it '\n 'first by calling `.fit(numpy_data)`.')\n return x", "def pre_processing(self, whole_dataset, type=None):\n # for svm\n X = whole_dataset\n if self._scaler == None:\n self._scaler = preprocessing.StandardScaler().fit(X)\n else:\n basic.outputlogMessage('warning, StandardScaler object already exist, this operation will overwrite it')\n self._scaler = preprocessing.StandardScaler().fit(X)\n # save\n joblib.dump(self._scaler, scaler_saved_path)", "def standardize(sets_x):\n\n # initiate empty list for return variable\n standardized_x = []\n\n # iterate through subsets\n for x in sets_x:\n # call preprocess function, normalize and generate features for each subset\n # and store the result into list\n standardized_x.append(generate_features(x, 2, True, with_log=True, with_sqrt=True, cross_terms=True))\n\n return standardized_x", "def standardize(X_train_input, X_test_input):\r\n from sklearn.preprocessing import StandardScaler\r\n sc = StandardScaler()\r\n sc.fit(X_train_input)\r\n\r\n X_train_std = sc.transform(X_train_input)\r\n X_test_std = sc.transform(X_test_input)\r\n \r\n return X_train_std, X_test_std", "def standardize(self, x):\n if not self.image_resample:\n x = to_shape(x, self.image_shape, constant_values=-1024)\n elif self.image_resample:\n x = resample(x, self.image_shape)\n\n if self.preprocessing_function:\n x = self.preprocessing_function(x)\n if self.voxelwise_normalization:\n if self.voxel_bounds is not None:\n x = voxelwise_normalize(x, self.voxel_bounds)\n if self.voxelwise_center:\n if self.voxel_mean is not None:\n x -= self.voxel_mean\n if self.voxelwise_std_normalization:\n x /= (self.voxelwise_std + 1e-7)\n if self.samplewise_center:\n x -= np.mean(x, axis=self.channel_axis, keepdims=True)\n if self.samplewise_std_normalization:\n x /= (np.std(x, axis=self.channel_axis, keepdims=True) + 1e-7)\n return x", "def standardize_data(f, train_mask):\n # standardize data\n f = f.todense()\n mu = f[train_mask == True, :].mean(axis=0)\n sigma = f[train_mask == True, :].std(axis=0)\n f = f[:, np.squeeze(np.array(sigma > 0))]\n mu = f[train_mask == True, :].mean(axis=0)\n sigma = f[train_mask == True, :].std(axis=0)\n f = (f - mu) / sigma\n return f", "def calculateStandardisation(vector):\r\n global standardisation\r\n # from http://sebastianraschka.com/Articles/2014_about_feature_scaling.htm\r\n std_scale = preprocessing.StandardScaler().fit(vector)\r\n standardisation = std_scale.transform(vector)", "def standardize(tX):\n features = tX.T\n features_len = len(features)\n means = np.reshape(np.mean(features, axis=1), [features_len, 1])\n stds = np.reshape(np.std(features, axis=1), [features_len, 1])\n features_std = (features - means) / stds\n new_tX = features_std.T\n return new_tX", "def standardize(self, snpreader):\n\n for dtype in [np.float64, np.float32]:\n\n snps = snpreader.read(order=\"F\", force_python_only=True, dtype=dtype).val\n self.assertEqual(dtype, snps.dtype)\n\n snp_s1 = Unit().standardize(snps.copy(), force_python_only=True)\n snp_s2 = Unit().standardize(\n snps.copy(), block_size=100, force_python_only=True\n )\n snps_F = np.array(snps, dtype=dtype, order=\"F\")\n snp_s3 = Unit().standardize(snps_F)\n snps_C = np.array(snps, dtype=dtype, order=\"C\")\n snp_s4 = Unit().standardize(snps_C)\n\n self.assertEqual(snp_s1.shape[0], snp_s2.shape[0])\n self.assertEqual(snp_s1.shape[1], snp_s2.shape[1])\n\n self.assertEqual(snp_s1.shape[0], snp_s3.shape[0])\n self.assertEqual(snp_s1.shape[1], snp_s3.shape[1])\n\n self.assertEqual(snp_s1.shape[0], snp_s4.shape[0])\n self.assertEqual(snp_s1.shape[1], snp_s4.shape[1])\n\n self.assertTrue(np.allclose(snp_s1, snp_s2, rtol=1e-05, atol=1e-05))\n self.assertTrue(np.allclose(snp_s1, snp_s3, rtol=1e-05, atol=1e-05))\n self.assertTrue(np.allclose(snp_s1, snp_s4, rtol=1e-05, atol=1e-05))\n\n snp_beta1 = Beta(1, 25).standardize(snps.copy(), force_python_only=True)\n snps_F = np.array(snps, dtype=dtype, order=\"F\")\n snp_beta2 = Beta(1, 25).standardize(snps_F)\n snps_C = np.array(snps, dtype=dtype, order=\"C\")\n snp_beta3 = Beta(1, 25).standardize(snps_C)\n\n self.assertEqual(snp_beta1.shape[0], snp_beta2.shape[0])\n self.assertEqual(snp_beta1.shape[1], snp_beta2.shape[1])\n self.assertEqual(snp_beta1.shape[0], snp_beta3.shape[0])\n self.assertEqual(snp_beta1.shape[1], snp_beta3.shape[1])\n\n self.assertTrue(np.allclose(snp_beta1, snp_beta2, rtol=1e-05, atol=1e-05))\n self.assertTrue(np.allclose(snp_beta1, snp_beta3, rtol=1e-05, atol=1e-05))", "def do_preprocess_on_segment_raw(seg_raw_df):\n sigma = 2\n median_kernel_size = 5\n print \"=======================start preprocessing segment raw dataframe=================\"\n print \"parameters: \" + \"gaussian filter sigma: %.2f, median kernel size: %.2f\" % (sigma, median_kernel_size)\n pp_df = seg_raw_df.copy(deep=True)\n df_mean = pp_df[s_info.raw_value_names].mean()\n df_std = pp_df[s_info.raw_value_names].std()\n pp_df[s_info.raw_value_names] = pp_df.groupby(s_info.segment_col)[s_info.raw_value_names].transform(sp_signal.medfilt, median_kernel_size)\n pp_df[s_info.raw_value_names] = (pp_df[s_info.raw_value_names] - df_mean)/df_std\n pp_df[s_info.raw_value_names] = pp_df.groupby(s_info.segment_col)[s_info.raw_value_names].transform(gaussian_filter1d, sigma=sigma, axis=0, order=0, mode='reflect')\n return pp_df", "def dataset_handling_with_standardisation(init_data):\n #\n ##Maximum number of points = 72 , keep around 80 values for even number\n max_len = 80\n ##Fluxes, Standardisation is done over 1 type of feature\n data = init_data.loc[:, [u'fluxes_0', u'fluxes_1', u'fluxes_2', u'fluxes_3', u'fluxes_4', u'fluxes_5']].values\n zp_array_flux = []\n for dat in data:\n n_data = []\n for ii in range(len(dat)):\n n_data = np.append(n_data, np.pad(dat[ii], (0, max_len * 5 - len(dat[ii])), 'constant', constant_values=0))\n n_data = QuantileTransformer(output_distribution='uniform').fit_transform(n_data.reshape(-1, 1)).flatten()\n zp_array_flux.append(n_data)\n zp_array_flux = np.array(zp_array_flux)\n print(zp_array_flux.shape)\n\n ##Fluxerrors, Standardisation is done over 1 type of feature\n data = init_data.loc[:,\n [u'fluxerrs_0', u'fluxerrs_1', u'fluxerrs_2', u'fluxerrs_3', u'fluxerrs_4', u'fluxerrs_5']].values\n zp_array_flux_error = []\n for dat in data:\n n_data = []\n for ii in range(len(dat)):\n n_data = np.append(n_data, np.pad(dat[ii], (0, max_len * 5 - len(dat[ii])), 'constant', constant_values=0))\n n_data = QuantileTransformer(output_distribution='uniform').fit_transform(n_data.reshape(-1, 1)).flatten()\n zp_array_flux_error.append(n_data)\n zp_array_flux_error = np.array(zp_array_flux_error)\n print(zp_array_flux_error.shape)\n\n ##Time, Standardisation is done over 1 type of feature\n data = init_data.loc[:, [u'mjds_0', u'mjds_1', u'mjds_2', u'mjds_3', u'mjds_4', u'mjds_5']].values\n zp_array_mjds = []\n for dat in data:\n n_data = []\n for ii in range(len(dat)):\n n_data = np.append(n_data, np.pad(dat[ii], (0, max_len * 5 - len(dat[ii])), 'constant', constant_values=0))\n n_data = QuantileTransformer(output_distribution='uniform').fit_transform(n_data.reshape(-1, 1)).flatten()\n zp_array_mjds.append(n_data)\n zp_array_mjds = np.array(zp_array_mjds)\n print(zp_array_mjds.shape)\n\n ##Concatenating everything\n zp_data = np.c_[zp_array_flux, zp_array_flux_error, zp_array_mjds]\n\n ##Adding redshift info// Gal pos info might be necessary to remove\n zp_data = np.c_[\n zp_data, init_data.loc[:, [u'gal_b', u'gal_l', u'hostgal_photoz', u'hostgal_photoz_err', u'hostgal_specz', u'mwebv']].values]\n print(zp_data.shape)\n\n ##Load labels and convert to integer\n labels = init_data.loc[:, [u'target']].values\n labels = labels.flatten()\n labels_name = np.array([6, 15, 16, 42, 52, 53, 62, 64, 65, 67, 88, 90, 92, 95, 99])\n [np.place(labels, labels == labels_name[i], [i]) for i in range(len(labels_name))]\n\n return [zp_data, labels]", "def standardize(X):\n mu = X.mean(axis=0, keepdims=True)\n s = X.std(axis=0, keepdims=True)\n return (X-mu)/s", "def quantize(self, df):\n if len(self.dict_scalers) == 0:\n raise Exception(\"[ERROR] quantize method called prior to\"\n \"normalization transform method \")\n\n quant_df = pd.DataFrame()\n if 'OneForAll' in self.dict_scalers:\n # quantization is applied on all features\n min_fp = float(np.amin(df))\n max_fp = float(np.amax(df))\n scale = (max_fp - min_fp) / (127 - (-127))\n zero_point = 127 - (max_fp / scale)\n quant_df = df / scale + zero_point\n else:\n # quantization is applied independently for each feature/column\n lbl_list = df.columns.values\n for lbl in lbl_list:\n min_fp = float(np.amin(df[lbl]))\n max_fp = float(np.amax(df[lbl]))\n scale = (max_fp - min_fp) / (127 - (-127))\n zero_point = 127 - (max_fp / scale)\n quant_df[lbl] = df[lbl] / scale + zero_point\n return quant_df.astype(np.int8)", "def standard_scale(X_train, X_test):\n preprossor = StandardScaler().fit(X_train)\n X_train = preprossor.transform(X_train)\n X_test = preprossor.transform(X_test)\n return X_train, X_test", "def standardise(self):\n if self.vector.shape is ():\n return\n if self.dimensionality() != 1:\n # TODO: implement\n raise NotImplementedError\n max_value = 1.0 * max(self.vector)\n if max_value == 0.0:\n # Nothing to do\n return\n self.vector = self.vector.astype('float64') / max_value", "def _normalize_feature(self, feature):\n\n for ic in range(self.data_shape[0]):\n feature[ic] = (feature[ic] - self.feature_mean[ic]\n ) / self.feature_std[ic]\n return feature", "def _standardize(self, x):\r\n\t\tkurts = kurtosis(x) # calculate Fisher kurtosis\r\n\t\tk_x = np.abs(kurts)**(1./4) # the quantity for standardization (k_x in [1])\r\n\t\tx_hat = x / k_x # the standardized data\r\n\t\treturn x_hat", "def standardize(self, inputData):\n\n return (inputData - self.mean) / self.std", "def scalarNormalizer(df):\r\n arr=dict()\r\n for col in CONT_FEATURES_COL_TO_USE:\r\n mean, std =df[col].mean(), df[col].std()\r\n df[col]=df[col].apply(lambda x: (x-mean)/std)\r\n arr[col] = [mean, std]\r\n json.dump(arr, open('normalize.json', 'w'))\r\n return df", "def normalize_standard_deviation(dataset):\n return dataset*(1/np.std(dataset))", "def normalize_features(self, scaler: StandardScaler = None, replace_nan_token: int = 0) -> StandardScaler:\n if len(self.data) == 0 or self.data[0].features is None:\n return None\n\n if scaler is not None:\n self.scaler = scaler\n\n elif self.scaler is None:\n features = np.vstack([d.features for d in self.data])\n self.scaler = StandardScaler(replace_nan_token=replace_nan_token)\n self.scaler.fit(features)\n\n for d in self.data:\n d.set_features(self.scaler.transform(d.features.reshape(1, -1))[0])\n\n return self.scaler", "def get_normalizer(data):\n scaler = StandardScaler().fit(data)\n return scaler", "def standardize_data(X_train, X_test):\n scaler = StandardScaler()\n scaler.fit(X_train)\n X_train = scaler.transform(X_train)\n # apply same transformation to test data\n X_test = scaler.transform(X_test)\n return X_train, X_test", "def persist_standardizer(self, std_object):\n object_path = 'model_objects/'\n file_name = f'market_maker_standardizer_{self.target_coin}.pkl'\n self.s3_client.put_object(Bucket=self.s3_bucket,\n Key=object_path + file_name,\n Body=pickle.dumps(std_object, pickle.HIGHEST_PROTOCOL)\n )\n return", "def standardize(train_data_jets, test_data_jets):\n \n nbr_jets = train_data_jets.shape[0]\n \n for jet in range(nbr_jets):\n # extract features for standardization\n train_data_features = train_data_jets[jet][:,2:] \n test_data_features = test_data_jets[jet][:,2:] \n # store train mean and std without considering nan values\n train_mean = np.nanmean(train_data_features, axis=0)\n train_std = np.nanstd(train_data_features, axis=0)\n # standardize train and test data with train mean and std\n train_data_features = (train_data_features - train_mean) / train_std\n test_data_features = (test_data_features - train_mean) / train_std\n # insert standardized features into original dataset with predictions\n train_data_jets[jet][:,2:] = train_data_features\n test_data_jets[jet][:,2:] = test_data_features\n \n return train_data_jets, test_data_jets", "def standardizeRatios( self, ratios ):\n\t\tratios_standardized = ratios.copy()\n\t\tzscore = lambda x: ( x - x.mean() ) / x.std()\n\t\tfor row in ratios.iterrows():\n\t\t\tratios_standardized.loc[ row[0] ] = zscore( row[1] )\n\t\treturn ratios_standardized", "def trim_and_standardize(train_df, train_labels, test_df, train_idx, test_idx):\n # Trim\n train_reduced = perform_reduce(train_df.as_matrix(), train_idx)\n labels_reduced = perform_reduce(np.asarray(train_labels), train_idx)\n test_reduced = perform_reduce(test_df.as_matrix(), test_idx)\n # Standardize\n scaler = StandardScaler()\n train = scaler.fit_transform(train_reduced)\n test = scaler.fit_transform(test_reduced)\n return(train, labels_reduced, test)", "def __init__(self):\n self.svclassifier = SVC(kernel='linear')", "def standardization (x_train,x_test):\n scaler = StandardScaler()\n ## reshape training data to 2D, fit and transform scaler\n scaler.fit(np.reshape(x_train, [x_train.shape[0], x_train.shape[1]*x_train.shape[2]*x_train.shape[3]]))\n x_train = scaler.transform(np.reshape(x_train, [x_train.shape[0], x_train.shape[1]*x_train.shape[2]*x_train.shape[3]]))\n ## reshape training data to 3D (n * frequencyrate * number of channels)\n x_train = np.reshape(x_train, [x_train.shape[0],x_test.shape[1],x_test.shape[2],x_test.shape[3]])\n x_test = scaler.transform(np.reshape(x_test, [x_test.shape[0], x_test.shape[1]*x_test.shape[2]*x_test.shape[3]]))\n x_test = np.reshape(x_test,[x_test.shape[0], x_train.shape[1], x_train.shape[2], x_train.shape[3]])\n return x_train, x_test, scaler", "def get_sase(self, datain, points=None): \n try: #try to average over and array input\n\t\t\tdataout = np.mean(datain[-(points):])\n\t\t\tsigma = np.std( datain[-(points):])\n except: #if average fails use the scaler input\n print \"Detector is not a waveform PV, using scalar value\"\n dataout = datain\n sigma = -1\n return dataout, sigma", "def _scale_features(self, features):\n assert isinstance(features, np.ndarray), \"Input is not a numpy array!\"\n\n return self.scaler.transform(features.reshape(1, -1))", "def gain_standardization(self):\r\n \"\"\"\r\n load all gain factors from any hm stage (gains are identical for all SHM stages)\r\n \"\"\"\r\n gain_factors = []\r\n for i in range(self.number_of_paths):\r\n value = self.data_of_hm_cycle['coupon']['path_data'][0][0][0][i][4][0][0]\r\n gain_factors.append(value)\r\n gain_factors = np.array(gain_factors)\r\n gains_factor_new_dim = gain_factors[np.newaxis, ...]\r\n matrix_gains_2d = np.repeat(gains_factor_new_dim, self.signal_length, axis=0).T\r\n matrix_of_gains = matrix_gains_2d[:, :, np.newaxis]\r\n\r\n \"\"\"\r\n divide all signals by the gain factors such that all gains are standardized to one\r\n \"\"\"\r\n for i in range(self.num_of_hm_stages):\r\n entries = i*self.number_of_paths\r\n hm_cycle_set = self.sensor_data_flattened_[entries : entries + self.number_of_paths]\r\n divided_data = np.divide(hm_cycle_set, matrix_of_gains)\r\n self.sensor_data_flattened_[entries : entries + self.number_of_paths] = divided_data\r\n self.sensor_data_original_shape_[i, :, :, :] = divided_data\r\n\r\n return", "def normalize_feature(df):\n return df.apply(lambda column: (column - column.mean()) / column.std())", "def generateStandardizedColumns(self, data_set, columns):\n cstat = data_set.colStats()\n stdDevs = np.sqrt(cstat.variance())\n means = cstat.mean()\n normalizingFactors = map(lambda x: x if x != 0.0 else 1.0, stdDevs)\n result = []\n for col, mean, nf in zip(columns, means, normalizingFactors):\n standardized = self.generateStandardizationFunction(col, mean, nf)\n result.append(SparseVectorDimension(\n name=col.name,\n ind_vars=[col],\n function=standardized\n ))\n return result", "def normalize_X(X):\n scaler = preprocessing.StandardScaler()\n X = scaler.fit_transform(X)\n return X", "def standardize(\n x: torch.Tensor,\n stats: Dict[str, torch.Tensor]) -> torch.Tensor:\n\n x_scaled = (x - stats['mean']) / stats['std']\n return x_scaled", "def unstandardize(\n x: torch.Tensor,\n stats: Dict[str, torch.Tensor]) -> torch.Tensor:\n x_scaled = x * stats['std'] + stats['mean']\n return x_scaled", "def __create_scaler_type(self):\n\n if self.scalertype == \"standard\":\n return StandardScaler()\n if self.scalertype == \"minmax\":\n return MinMaxScaler(feature_range=self.featureRange)\n assert True, \"An error occured when creating a scaler of type '{}'\".format(self.scalertype)", "def normalize (a_data,a_column,b_method='MinMax') :\n if b_method == 'MinMax' :\n loc_scaler = __minmax()\n a_data[a_column] = loc_scaler.fit_transform(a_data[a_column])\n elif b_method == 'Standard' :\n loc_scaler = __standard()\n a_data[a_column] = loc_scaler.fit_transform(a_data[a_column])", "def standardize_input(self, raw_input_par):\r\n\r\n stand_input_par = IM.standardize_input(raw_input_par, self.get_in_par_means(), self.get_in_par_variances())\r\n\r\n # # Standardize by substracting the mean and dividing by the standard deviations\r\n # mean_input_par = np.subtract(raw_input_par, self.meta_model.get_in_par_means())\r\n #\r\n # input_std = np.sqrt(self.meta_model.get_in_par_variances())\r\n #\r\n # stand_input_par = np.divide(mean_input_par, input_std)\r\n\r\n return stand_input_par", "def standardize(X, axis=0, ddof=0):\n\n # Modified from scikit-learn.preprocessing.scale()!\n\n #X = np.asarray(X)\n X = np.asarray(X, dtype=np.float) # XXX: what about dtype? convert to float64? for higher precision? let client decide?\n Xr = np.rollaxis(X, axis) # view on X to enable broadcasting on the axis we are interested in\n \n mean_ = Xr.mean(axis=0)\n std_ = Xr.std(axis=0, ddof=ddof)\n std_[std_ == 0.0] = 1.0 # avoid NaNs due to div/zero\n\n # center mean on zero\n Xr -= mean_\n\n # Verify that mean_1 is 'close to zero'. If X contains very\n # large values, mean_1 can also be very large, due to a lack of\n # precision of mean_. In this case, a pre-scaling of the\n # concerned feature is efficient, for instance by its mean or\n # maximum.\n mean_1 = Xr.mean(axis=0)\n if not np.allclose(mean_1, 0.0):\n warnings.warn(\"Numerical issues were encountered \"\n \"when centering the data \"\n \"and might not be solved. Dataset may \"\n \"contain too large values. You may need \"\n \"to prescale your features.\")\n Xr -= mean_1\n mean_ += mean_1\n\n # scale to unit variance\n Xr /= std_\n\n # If mean_2 is not 'close to zero', it comes from the fact that\n # std_ is very small so that mean_2 = mean_1/std_ > 0, even if\n # mean_1 was close to zero. The problem is thus essentially due\n # to the lack of precision of mean_. A solution is then to\n # substract the mean again.\n mean_2 = Xr.mean(axis=0)\n if not np.allclose(mean_2, 0.0):\n warnings.warn(\"Numerical issues were encountered \"\n \"when scaling the data \"\n \"and might not be solved. The standard \"\n \"deviation of the data is probably \"\n \"very close to 0.\")\n Xr -= mean_2\n mean_ += mean_2\n\n # Additional check if variances are 'close to one'\n std_1 = Xr.std(axis=0, ddof=ddof)\n if not np.allclose(std_1, 1.0):\n warnings.warn(\"Numerical issues were encountered \"\n \"when scaling the data \"\n \"and might not be solved. Standard deviation \"\n \"not close to one after scaling.\")\n\n return X, mean_, std_", "def standardization(input_data):\n\n # Insert debugging assertions\n assert type(input_data) is np.ndarray, \"The 'input_data' must be numpy array.\"\n\n # Get the mean values and the standard deviations of the input numpy array along the axis \n Mean = np.mean(input_data, axis = 0)\n Std = np.std(input_data, axis = 0)\n\n # Standardization \n standardized_input_data = (input_data - Mean) / (Std + sys.float_info.min)\n\n # Return standardized input data\n return standardized_input_data", "def __init__(self) -> None:\n self.name = \"minmaxScaler\"\n self.min = 0\n self.max = 0", "def specific_normalization(df):\n # Need to scale some vars. This is done using a StandardScaler from sklearn package\n scaler = StandardScaler()\n df['Pclass'] = df['Pclass'].astype('float64')\n df['Family'] = df['Family'].astype('float64')\n # .reshape(-1, 1) is mandatory otherwise an exception is thrown (as 'data has a single feature')\n df['Pclass'] = scaler.fit_transform(df['Pclass'].values.reshape(-1, 1))\n df['Family'] = scaler.fit_transform(df['Family'].values.reshape(-1, 1))\n\n return df", "def scale_set(train,test):\n sc = StandardScaler()\n fitted = sc.fit(train)\n return sc.transform(train), sc.transform(test)", "def normalize_features(array):\n \n array_normalized = (array-array.mean())/array.std()\n mu = array.mean()\n sigma = array.std()\n\n return array_normalized, mu, sigma", "def standardize(data):\n stddev = data.std()\n #if stddev == 0.:\n # sys.exit(\"data.std() == 0. !\")\n if stddev != 0.:\n data = (data - data.mean()) / (data.std())\n\n return data", "def normalize_features(self, scaler: StandardScaler = None, replace_nan_token: int = 0) -> StandardScaler:\n raise NotImplemetedError(\"AtomisticDataset.normalize_features is not implemeneted.\")\n # if len(self.data) == 0 or self.data[0].features is None:\n # return None\n #\n # if scaler is not None:\n # self.scaler = scaler\n #\n # elif self.scaler is None:\n # features = np.vstack([d.features for d in self.data])\n # self.scaler = StandardScaler(replace_nan_token=replace_nan_token)\n # self.scaler.fit(features)\n #\n # for d in self.data:\n # d.set_features(self.scaler.transform(d.features.reshape(1, -1))[0])\n #\n # return self.scaler", "def normalize(self, normalizationLevel=\"minute\", fusionMethod=\"mean\", interpolationMethod=\"linear\"):\n raise NotImplementedError", "def SeriesStandard(series):\n mean = np.mean(series)\n variance = np.var(series)\n series = (series-mean)/variance\n return series", "def normalise_series(to_normalise: pd.Series) -> pd.Series:\n \n # return (to_normalise - to_normalise.mean()) / to_normalise.std() # 0 mean and unit standard deviation\n return to_normalise / to_normalise.std() # positive and unit standard deviation", "def standardize(data):\r\n mean = data.mean(axis=0)\r\n std = data.std(axis=0)\r\n return (data - mean)/std", "def transform(self, X, y='deprecated', copy=None): \n\n check_is_fitted(self, 'scale_')\n\n copy = copy if copy is not None else self.copy\n\n #X = check_array(X, copy=copy, warn_on_dtype=True,\n # estimator=self, dtype=FLOAT_DTYPES,\n # force_all_finite='allow-nan')\n\n if self.with_mean:\n X -= self.mean_\n if self.with_std:\n X /= self.scale_\n return X", "def set_normalization(self, dataloader):\n mean = 0\n square = 0\n for (data_in, _) in dataloader:\n mean += data_in.mean()\n square += data_in.pow(2).mean()\n\n mean /= len(dataloader)\n square /= len(dataloader)\n std = np.sqrt(square - mean ** 2)\n\n # The input data should be roughly normally distributed after\n # passing through net_fixed.\n self.scale_in.bias.data.fill_(- mean / std)\n self.scale_in.weight.data.fill_(1 / std)", "def normalize(self, factor):", "def scale(self, X_train, X_test):\n\n #X_train, X_test, y_train, y_test = self.split_X_y_sets()\n self.scaler.fit(X_train)\n X_train_sc = self.scaler.transform(X_train)\n X_test_sc = self.scaler.transform(X_test)\n\n return X_train_sc, X_test_sc #, y_train, y_test", "def _preprocess(self, data, normalize=False) -> np.ndarray:\n \n preprocessor = StandardScaler() if not normalize else Normalizer()\n\n data = preprocessor.fit_transform(data)\n \n return data", "def normalise_features(customer_df, features_to_normalise: List[str] = ['recency', 'frequency', 'total_spend']):\n \n for feature in features_to_normalise:\n if feature in customer_df.columns:\n customer_df[feature] = PropensityModel.normalise_series(customer_df[feature])\n return customer_df", "def standardize(x, axis=-1):\n stds_avg = np.std(x, axis=axis, keepdims=True)\n x -= np.mean(x, axis=axis, keepdims=True)\n x /= (stds_avg + 1e-8)\n return x", "def z_score_std(train, test):\n scalers = {}\n for i, sample in enumerate(train):\n scalers[i] = StandardScaler()\n train[i] = scalers[i].fit_transform(sample)\n\n for i, sample in enumerate(test):\n test[i] = scalers[i].transform(sample)\n\n return train, test", "def transform(self, data):\n data -= self.mean\n if 0.0 in self.std:\n self.std = np.where(self.std == 0.0, 1.0, self.std)\n data /= self.std\n return data", "def get_X_scaler(self, X_train, out_tag='lstm_scaler', save=True):\n\n X_scaler = StandardScaler()\n X_scaler.fit(X_train.values)\n self.X_scaler = X_scaler\n if save:\n print('saving X scaler: models/{}_X_scaler.pkl'.format(out_tag))\n dump(X_scaler, open('models/{}_X_scaler.pkl'.format(out_tag),'wb'))", "def normalize_dataset(self):", "def normalize_features(self, data_dict, ind):\n pre_norm_list = []\n for title in data_dict:\n pre_norm_list.append(data_dict[title][ind])\n if self.normalization_method == 'min_max':\n mini, maxi, norm_list = normalize.min_max_normalize(pre_norm_list)\n self.normalization_n.append(mini)\n self.normalization_d.append(maxi - mini)\n elif self.normalization_method == 'z_score':\n mean, var, norm_list = normalize.z_score_normalize(pre_norm_list)\n self.normalization_n.append(mean)\n self.normalization_d.append(var)\n elif self.normalization_method == 'none':\n norm_list = pre_norm_list[:]\n self.normalization_n.append(0)\n self.normalization_d.append(1)\n for i, title in enumerate(data_dict):\n data_dict[title][ind] = norm_list[i]", "def standardize_data(data):\n return (data - np.mean(data, axis=0)) / (np.std(data, axis=0) + 10 ** -16)", "def set_std(self, value, method = 'absolute'):\n for fragment in self.mdv:\n for number in self.mdv[fragment]:\n if method == 'absolute':\n self.mdv[fragment][number]['std'] = value * 1.0\n else:\n self.mdv[fragment][number]['std'] = self.mdv[fragment][number]['ratio'] * value", "def _rescale(self, result):\n if hasattr(self, \"std_statistics\"):\n if result.shape[-1] != self.std_statistics.shape[-1]:\n raise RuntimeError(\"The size of the statistics is not the same as the stored standard deviations for \"\n \"rescaling! Please check that you initialized the statistics with the correct set \"\n \"of reference samples.\")\n\n result = result / self.std_statistics\n\n return result", "def __scale(data, max_value_list, min_value_list, scale_value_list, process_cols_list):\n features = np.array(data.features, dtype=float)\n for i in process_cols_list:\n value = features[i]\n if value > max_value_list[i]:\n value = max_value_list[i]\n elif value < min_value_list[i]:\n value = min_value_list[i]\n\n features[i] = (value - min_value_list[i]) / scale_value_list[i]\n _data = copy.deepcopy(data)\n _data.features = features\n return _data", "def normalize(cls, raw_score):\n return super().normalize(raw_score)", "def normalize(cls, raw_score):\n return super().normalize(raw_score)", "def normalize(cls, raw_score):\n return super().normalize(raw_score)", "def normalize(cls, raw_score):\n return super().normalize(raw_score)", "def normalize_feature(feature):\n # Compute mean and standard deviation, and return (x-mu)/std\n mean = np.mean(feature)\n std = np.std(feature)\n return np.divide(np.subtract(feature, mean), std)", "def normalize(dataset):\n return normalize_standard_deviation(normalize_mean(dataset))", "def __init__(self,training_data, outliers_proportion,base_nu=0.95,min_nu=0.05,default_kernel=\"rbf\"):\n my_data = genfromtxt(training_data, delimiter='\\t',skip_header=0)\n\n #preprocessing data\n X = preprocessing.scale(my_data)\n\n #defining scaling\n self.scaler = preprocessing.Scaler()\n self.scaler.fit(my_data)\n\n #define classifier\n self.classifier = svm.OneClassSVM(nu=((base_nu*outliers_proportion)+min_nu), kernel=default_kernel, gamma=0.1, cache_size=DEFAULT_CACHE_SIZE)\n self.classifier.fit(X)", "def normalize(cls, raw_score):\n assert cls.min_value == 0.0\n return super().normalize(raw_score)", "def normalize(self, attr_name): # DONE\n self.data[attr_name] = (self.data[attr_name] - self.data[attr_name].mean()) / self.data[attr_name].std()", "def _scale_psf(self, input_irf_file, config):\n\n # Find all \"sigma\" values - tells how many PSF components we have in the IRF file\n column_names = [col.name.lower() for col in input_irf_file['POINT SPREAD FUNCTION'].columns]\n sigma_columns = list(filter(lambda s: \"sigma\" in s.lower(), column_names))\n\n # --------------------------\n # Reading the PSF parameters\n self._psf = dict()\n self._psf['Elow'] = input_irf_file['POINT SPREAD FUNCTION'].data['Energ_lo'][0].copy()\n self._psf['Ehigh'] = input_irf_file['POINT SPREAD FUNCTION'].data['Energ_hi'][0].copy()\n self._psf['ThetaLow'] = input_irf_file['POINT SPREAD FUNCTION'].data['Theta_lo'][0].copy()\n self._psf['ThetaHi'] = input_irf_file['POINT SPREAD FUNCTION'].data['Theta_hi'][0].copy()\n\n for i in range(0, len(sigma_columns)):\n sigma_name = 'sigma_{:d}'.format(i + 1)\n self._psf[sigma_name] = input_irf_file['POINT SPREAD FUNCTION'].data[sigma_name][0].transpose().copy()\n\n self._psf['E'] = scipy.sqrt(self._psf['Elow'] * self._psf['Ehigh'])\n self._psf['Theta'] = (self._psf['ThetaLow'] + self._psf['ThetaHi']) / 2.0\n # --------------------------\n\n # Creating the energy-theta mesh grid\n energy, theta = scipy.meshgrid(self._psf['E'], self._psf['Theta'], indexing='ij')\n\n # ---------------------------------\n # Scaling the PSF energy dependence\n\n # Constant error function\n if config['energy_scaling']['err_func_type'] == \"constant\":\n scale_params = config['energy_scaling'][\"constant\"]\n # Constant scaling. Loop over all \"sigma\" values and scale them by the same factor.\n for sigma_column in sigma_columns:\n self._psf[sigma_column + '_new'] = scale_params['scale'] * self._psf[sigma_column]\n\n # Gradients error function\n elif config['energy_scaling']['err_func_type'] == \"gradient\":\n scale_params = config['energy_scaling'][\"gradient\"]\n for sigma_column in sigma_columns:\n self._psf[sigma_column + '_new'] = self._psf[sigma_column] * (\n 1 + scale_params['scale'] * gradient(scipy.log10(energy),\n scipy.log10(scale_params['range_min']),\n scipy.log10(scale_params['range_max']))\n )\n\n # Step error function\n elif config['energy_scaling']['err_func_type'] == \"step\":\n scale_params = config['energy_scaling'][\"step\"]\n break_points = list(zip(scipy.log10(scale_params['transition_pos']),\n scale_params['transition_widths']))\n\n for sigma_column in sigma_columns:\n self._psf[sigma_column + '_new'] = self._psf[sigma_column] * (\n 1 + scale_params['scale'] * step(scipy.log10(energy), break_points)\n )\n\n else:\n raise ValueError(\"Unknown PSF scaling function {:s}\"\n .format(config['energy_scaling']['err_func_type']))\n # ---------------------------------\n\n # ---------------------------------\n # Scaling the PSF angular dependence\n\n # Constant error function\n if config['angular_scaling']['err_func_type'] == \"constant\":\n scale_params = config['angular_scaling'][\"constant\"]\n # Constant scaling. Loop over all \"sigma\" values and scale them by the same factor.\n for sigma_column in sigma_columns:\n # input_irf_file['POINT SPREAD FUNCTION'].data[sigma_column] *= scale_params['scale']\n self._psf[sigma_column + '_new'] = scale_params['scale'] * self._psf[sigma_column + '_new']\n\n # Gradients error function\n elif config['angular_scaling']['err_func_type'] == \"gradient\":\n scale_params = config['angular_scaling'][\"gradient\"]\n for sigma_column in sigma_columns:\n self._psf[sigma_column + '_new'] = self._psf[sigma_column + '_new'] * (\n 1 + scale_params['scale'] * gradient(theta,\n scale_params['range_min'],\n scale_params['range_max'])\n )\n\n # Step error function\n elif config['angular_scaling']['err_func_type'] == \"step\":\n scale_params = config['angular_scaling'][\"step\"]\n break_points = list(zip(scale_params['transition_pos'],\n scale_params['transition_widths']))\n\n for sigma_column in sigma_columns:\n self._psf[sigma_column + '_new'] = self._psf[sigma_column + '_new'] * (\n 1 + scale_params['scale'] * step(theta, break_points)\n )\n\n else:\n raise ValueError(\"Unknown PSF scaling function {:s}\"\n .format(config['angular_scaling']['err_func_type']))\n # ---------------------------------\n\n # Recording the scaled PSF\n for i in range(0, len(sigma_columns)):\n sigma_name = 'sigma_{:d}'.format(i + 1)\n\n input_irf_file['POINT SPREAD FUNCTION'].data[sigma_name][0] = self._psf[sigma_name + '_new'].transpose()", "def load_and_standardize(self, snpreader2, snpreader3):\n\n S = snpreader2.sid_count\n N_original = snpreader2.iid_count\n\n iid_index_list = range(N_original - 1, 0, -2)\n snpreader3 = snpreader3[iid_index_list, :]\n\n for dtype in [np.float64, np.float32]:\n\n G2 = snpreader2.read(order=\"F\", force_python_only=True).val\n G2 = Unit().standardize(G2, block_size=10000, force_python_only=True)\n\n SNPs_floatF = snpreader2.read(\n order=\"F\", dtype=dtype, force_python_only=False\n ).val\n GF = Unit().standardize(SNPs_floatF)\n\n SNPs_floatC = snpreader2.read(\n order=\"C\", dtype=dtype, force_python_only=False\n ).val\n GC = Unit().standardize(SNPs_floatC)\n\n self.assertTrue(np.allclose(GF, G2, rtol=1e-05, atol=1e-05))\n self.assertTrue(np.allclose(GF, GC, rtol=1e-05, atol=1e-05))\n\n # testing selecting a subset of snps and iids\n snp_index_list = range(S - 1, 0, -2)\n\n G2x = snpreader2.read(order=\"F\", force_python_only=True).val\n G2x = G2x[iid_index_list, :][:, snp_index_list]\n G2x = Unit().standardize(G2x, block_size=10000, force_python_only=True)\n\n SNPs_floatFx = (\n snpreader3[:, snp_index_list]\n .read(order=\"F\", dtype=dtype, force_python_only=False)\n .val\n )\n GFx = Unit().standardize(SNPs_floatFx)\n self.assertTrue(np.allclose(GFx, G2x, rtol=1e-05, atol=1e-05))\n\n SNPs_floatCx = (\n snpreader3[:, snp_index_list]\n .read(order=\"C\", dtype=dtype, force_python_only=False)\n .val\n )\n GCx = Unit().standardize(SNPs_floatCx)\n self.assertTrue(np.allclose(GFx, G2x, rtol=1e-05, atol=1e-05))", "def standardize(image):\n \n ### START CODE HERE (REPLACE INSTANCES OF 'None' with your code) ###\n \n # initialize to array of zeros, with same shape as the image\n standardized_image = np.zeros(image.shape)\n\n # iterate over channels\n for c in range(image.shape[0]):\n # iterate over the `z` dimension\n for z in range(image.shape[3]):\n # get a slice of the image \n # at channel c and z-th dimension `z`\n image_slice = image[c,:,:,z]\n\n # subtract the mean from image_slice\n centered = image_slice - np.mean(image_slice)\n \n # divide by the standard deviation (only if it is different from zero)\n centered_scaled = centered / np.std(centered)\n\n # update the slice of standardized image\n # with the scaled centered and scaled image\n standardized_image[c, :, :, z] = centered_scaled\n\n ### END CODE HERE ###\n\n return standardized_image", "def __init__(self):\n super().__init__()\n self.metric = 'SEGVOL'", "def normalize_data(self):\r\n # quantify data for each column except classification column for noise reduction\r\n for column_header in self.classification_training_data.columns:\r\n if column_header == \"Class\":\r\n continue\r\n if column_header == \"Age\":\r\n bin_size = 2\r\n elif column_header == \"Ht\":\r\n bin_size = 5\r\n else:\r\n bin_size = 1\r\n for idx in self.classification_training_data.index:\r\n self.classification_training_data.at[idx, column_header] = math.floor(\r\n self.classification_training_data[column_header][idx] / bin_size) * bin_size", "def scaling():\n \n for i in range(cfg.nfea):\n dm = 0\n var = 0\n for j in range(cfg.ntrain):\n dm += cfg.a[j,i]\n dm = dm/cfg.ntrain\n \n for j in range(cfg.ntrain):\n var += (cfg.a[j,i]-dm)**2\n\n var = var/cfg.ntrain\n var = np.sqrt(var)\n \n if var >= 10**(-5):\n cfg.clin[i] = 1.0/var \n cfg.dlin[i] = -dm/var \n \n else: \n if np.abs(dm)<=1.0:\n cfg.clin[i] = 1.0\n cfg.dlin[i] = 0.0 \n else: \n cfg.clin[i] = 1.0/dm\n cfg.dlin[i] = 0.0 \n \n for j in range(cfg.ntrain):\n cfg.a_scaled[j,i] = cfg.clin[i]*cfg.a[j,i] + cfg.dlin[i]\n \n return", "def normalize_test_vector(self, data_vector, clf_type = \"generic\"):\n\n\t\tassert(clf_type in [\"generic\", \"specific\"])\n\n\t\tif clf_type == \"generic\":\n\t\t\tmean_per_dim = self.mean_per_dim_generic\n\t\t\tstd_per_dim = self.std_per_dim_generic\n\t\telse:\n\t\t\tmean_per_dim = self.mean_per_dim_specific\n\t\t\tstd_per_dim = self.std_per_dim_specific\n\n\n\t\tfor i in xrange(len(mean_per_dim)):\n\t\t\tdata_vector[i] -= mean_per_dim[i]\n\t\t\tdata_vector[i] /= std_per_dim[i]\n\t\t\n\t\n\t\treturn data_vector", "def __init__(self, threshold=0.5, num_points=11, *args, **kwargs):\n super(SSD_AP, self).__init__(threshold=threshold, num_points=num_points, *args, **kwargs)", "def data_preprocessing_TA(X):\n \n #Removing the mean and scaling the data\n X_prep=StandardScaler().fit_transform(X)\n #do here your preprocessing\n return X_prep", "def get_scaler(scaler):\n if scaler == 'standard':\n from sklearn.preprocessing import StandardScaler\n return StandardScaler()\n if scaler == 'minmax':\n from sklearn.preprocessing import MinMaxScaler\n return MinMaxScaler()", "def normalize_features(X):\n std = X.std(axis=0)\n std = np.where(std == 0, 1, std) # to avoid division by zero\n x_normed = (X - X.mean(axis=0)) / std\n return x_normed", "def standard(self) -> RegionStandard:\n return RegionStandard.from_xml(\n as_xml(self.requester.parameter_request(region=self.name).text)\n )", "def transform(self, X, y=None, copy=None):\n check_is_fitted(self, 'scale_')\n\n copy = copy if copy is not None else self.copy\n\n X = check_array(X, accept_sparse='csr', copy=copy,\n estimator=self, dtype=FLOAT_DTYPES)\n\n if sparse.issparse(X):\n if self.with_mean:\n raise ValueError(\n \"Cannot center sparse matrices: pass `with_mean=False` \"\n \"instead. See docstring for motivation and alternatives.\")\n if self.scale_ is not None:\n inplace_column_scale(X, 1 / self.scale_)\n else:\n if self.with_mean:\n X -= self.mean_\n if self.with_std:\n X /= self.scale_\n return X", "def standardize_data(Xtrain,Xtest):\n \n ### Import modulates\n import numpy as np\n\n Xmean = np.nanmean(Xtrain,axis=0)\n Xstd = np.nanstd(Xtrain,axis=0)\n Xtest = (Xtest - Xmean)/Xstd\n Xtrain = (Xtrain - Xmean)/Xstd\n \n stdVals = (Xmean,Xstd)\n stdVals = stdVals[:]\n \n return Xtrain,Xtest,stdVals", "def __call__(self, q):\n # SASCalculator ignores the scale, so we add it in here\n yout = BasePDFGenerator.__call__(self, q)\n yout *= self.scale.value\n return yout", "def apply_svd(self, n):\n \n ## should really handle svd sensibly if we have multiple traces\n ## fitting multiple traces simultaneously requires they all have the\n ## same basis. Could pick the first trace to define the basis\n #svd_trace, s, self.rs_vectors = np.linalg.svd(self.traces[0], full_matrices=True)\n #transformed_traces = [svd_trace[:,:n]]\n #if len(self.traces > 1):\n # # haven't tested this at all it's probably a bug filled mess\n # # idea is to represent all the traces with the principle components\n # # defined by the first set of traces\n # transformed_traces += [self.rs_vectors.dot(t)[:,:n] for t in self.traces[1:]] \n\n # or look for svd like transformation to apply the the entire block of traces?\n\n # either way current approach is totally dodgey if fitting against \n # multiple svd transformed traces\n\n transformed_traces = []\n # wavelengths now correspond to principle components\n \n for trace in self.traces:\n U,s,V = np.linalg.svd(trace, full_matrices=True)\n transformed_traces.append(U[:,:n])\n \n self.traces = transformed_traces\n self.wavelengths = np.arange(n)", "def _normalise(self):\n if not self.is_unit():\n n = self.norm\n if n > 0:\n self.q = self.q / n", "def scale(self, factor):\n for a in self.symbol_attributes:\n a.scale(factor)", "def __preprocess(data, sample_size: int = 200000):\n mean = data[:sample_size].mean(axis=0)\n data -= mean\n stdev = data[:sample_size].std(axis=0)\n data /= stdev\n return data" ]
[ "0.66549325", "0.5813407", "0.57823", "0.57652164", "0.57482", "0.5628171", "0.56227326", "0.54852945", "0.54692763", "0.5413241", "0.5376666", "0.53717995", "0.5229775", "0.51795113", "0.5171377", "0.51348805", "0.51237977", "0.5088582", "0.5076588", "0.50762415", "0.50561696", "0.50408816", "0.5024084", "0.50040805", "0.4950862", "0.4945896", "0.49209926", "0.49128872", "0.49062297", "0.49034885", "0.49031383", "0.48682728", "0.48465073", "0.48451737", "0.4839483", "0.48262522", "0.48217642", "0.48196226", "0.48091918", "0.48041025", "0.48003605", "0.47975865", "0.47970217", "0.47915488", "0.47777992", "0.47597575", "0.47453487", "0.4739019", "0.47368878", "0.47348395", "0.47322842", "0.47122622", "0.4701505", "0.4691281", "0.4662757", "0.4645968", "0.46302176", "0.46295267", "0.46293905", "0.46191615", "0.46011996", "0.4596929", "0.4596364", "0.4592896", "0.4592829", "0.45892182", "0.4576282", "0.45726505", "0.45620152", "0.4561064", "0.45606625", "0.45583057", "0.4554595", "0.4554595", "0.4554595", "0.4554595", "0.4549819", "0.4545136", "0.45411348", "0.4528866", "0.4521281", "0.45181504", "0.45150596", "0.45099694", "0.45076546", "0.45063424", "0.45000833", "0.44953215", "0.44919205", "0.44835836", "0.44803786", "0.4478849", "0.4477418", "0.44679707", "0.44624454", "0.44570884", "0.44568777", "0.4456839", "0.44477367", "0.44462624" ]
0.64846426
1
Returns market segment ID related to a customer thanks to customer invoices lines given as parameter. Features transformations are applied on data included into invoice lines. Once done, a machine learning algorithm is invocated in order to predict customer market segment.
def get_customer_marketSegment(self, df_invoice_line_customer): #------------------------------------------------------------------------- # Building data model #------------------------------------------------------------------------- self.data_transform(df_invoice_line_customer) #------------------------------------------------------------------------- # Customer features are built thanks to transformers. #------------------------------------------------------------------------- self.df_customers_features_build() #------------------------------------------------------------------------- # Customer market segment is predicted #------------------------------------------------------------------------- X_test = self._df_customers.values y_pred = self._classifier_model.predict(X_test) segmentID = y_pred[0] return segmentID
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def predict_segment(self, df_invoice_line=None):\n if df_invoice_line is not None:\n self.data_transform(df_invoice_line) \n self.df_customers_features_build() \n else:\n pass\n X_test = self._df_customers.values\n y_pred = self._classifier_model.predict(X_test)\n return y_pred[0]", "def order_process(self, customerID, list_stockCode, list_quantity\\\n , orderDate=None):\n\n segmentID = -1\n\n #-------------------------------------------------------------------------\n # A new customer is created and inserted into data-set.\n #-------------------------------------------------------------------------\n if customerID is None:\n customerID = int(self.createCustomerID())\n else:\n pass\n \n #-------------------------------------------------------------------------\n # A new dataframe with new invoice lines are created.\n #-------------------------------------------------------------------------\n df_invoice_line = self.create_customer_df_invoice_line(customerID\\\n , list_stockCode, list_quantity, orderDate)\n \n #-------------------------------------------------------------------------\n # Original dataframe is updated with customer invoices lines.\n #-------------------------------------------------------------------------\n print(\"order_process : shape before concat= \"+str(self._df_invoice_original.shape))\n self._df_invoice_original \\\n = pd.concat([self._df_invoice_original, df_invoice_line], axis=0)\n print(\"order_process : shape after concat= \"+str(self._df_invoice_original.shape))\n \n #-------------------------------------------------------------------------\n # All invoices lines (including new one) related to customer is retrieved \n # from original dataframe.\n #-------------------------------------------------------------------------\n df_invoice_line_customer \\\n = self.get_customer_history_df_invoice_line(customerID)\n\n #-------------------------------------------------------------------------\n # When calling get_customer_marketSegment(), df_invoice_line_customer is\n # concatened to the original dataframe.\n #------------------------------------------------------------------------- \n segmentID = self.get_customer_marketSegment(df_invoice_line_customer)\n \n return segmentID, customerID", "def invoice(customer_id):\n encoder = request.url_rule.endpoint\n template = \"{{ encoder }}#{{ customer_id|%s }}\" % encoder\n return render_template_string(template, **locals())", "def create_customer_df_invoice_line(self, customerID, list_stockCode\\\n , list_quantity, invoiceDate):\n \n dict_invoice = dict()\n\n dict_invoice['Quantity'] = list_quantity\n dict_invoice['StockCode'] = list_stockCode\n\n #------------------------------------------------------------------------\n # Build invoiceDate from local current time\n #------------------------------------------------------------------------\n if invoiceDate is None:\n time_struct = time.localtime()\n invoiceDate = str(time_struct.tm_year)+'-'+str(time_struct.tm_mon)\\\n +'-'+str(time_struct.tm_mday)\n invoiceDate +=' '\n invoiceDate +=str(time_struct.tm_hour)+':'+str(time_struct.tm_min)\\\n +':'+str(time_struct.tm_sec)\n invoiceDate = pd.Timestamp(invoiceDate)\n else:\n pass\n\n\n #------------------------------------------------------------------------\n # Lists initialization\n #------------------------------------------------------------------------\n list_customerID = list()\n list_invoiceNo = list()\n list_invoiceDate = list()\n list_invoice_line_index = list()\n \n #------------------------------------------------------------------------\n # Increase Invoice number\n #------------------------------------------------------------------------\n invoiceNo = max(self._df_invoice_original.InvoiceNo)\n invoiceNo += 1\n\n #------------------------------------------------------------------------\n # Get latest invoice line index value\n #------------------------------------------------------------------------\n invoice_line_index = max(self._df_invoice_original.index)\n\n #------------------------------------------------------------------------\n # Build lists for CustomerID, InvoiceNo, InvoiceDate\n # A list of incremented indexes is built for new rows.\n #------------------------------------------------------------------------\n for quantity in list_quantity:\n list_customerID.append(customerID)\n list_invoiceNo.append(invoiceNo)\n list_invoiceDate.append(invoiceDate)\n invoice_line_index += 1\n list_invoice_line_index.append(invoice_line_index) \n\n \n dict_invoice['CustomerID'] = list_customerID\n dict_invoice['InvoiceNo'] = list_invoiceNo\n dict_invoice['InvoiceDate'] = list_invoiceDate\n\n #------------------------------------------------------------------------\n # Get description list from list of stock codes.\n #------------------------------------------------------------------------\n list_description = self.getDescriptionList(list_stockCode)\n \n dict_invoice['Description'] = list_description\n\n #------------------------------------------------------------------------\n # Get unit price list from list of stock codes.\n #------------------------------------------------------------------------\n list_unitPrice = self.getUnitPriceList(list_stockCode)\n \n dict_invoice['UnitPrice'] = list_unitPrice\n\n #------------------------------------------------------------------------\n # Dataframe with new invoices lines is created.\n #------------------------------------------------------------------------\n df_invoice_line \\\n = pd.DataFrame(dict_invoice, columns=dict_invoice.keys()\\\n , index=list_invoice_line_index)\n \n return df_invoice_line", "def get_customer_history_df_invoice_line(self, customerID):\n df_invoice_line \\\n = self._df_invoice_original[self._df_invoice_original.CustomerID \\\n == customerID]\n return df_invoice_line", "def _create_invoice(self):\n self.ensure_one()\n partner = self.member_id.partner_id\n invoice = self.env['account.invoice'].create({\n 'partner_id': partner.id,\n 'account_id': partner.property_account_receivable_id.id,\n 'fiscal_position_id': partner.property_account_position_id.id\n })\n for line in self.line_ids:\n product = line.activity_id.product_id\n # Handling of invoice lines : needs cache record for onchange, then\n # real writing...\n invoice_line = self.env['account.invoice.line'].new({\n 'product_id': product.id,\n 'invoice_id': invoice.id\n })\n invoice_line._onchange_product_id()\n line_values = dict(invoice_line._cache)\n line_values['price_unit'] = line.price\n invoice_line = self.env['account.invoice.line'].create(line_values)\n invoice.compute_taxes()\n line.registration_id.invoice_line_id = invoice_line.id\n return invoice", "def _prepare_invoice(self, cr, uid, order, lines, context=None):\n if context is None:\n context = {}\n journal_ids = self.pool.get('account.journal').search(cr, uid,\n [('type', '=', 'sale'), ('company_id', '=', order.company_id.id)],\n limit=1)\n if not journal_ids:\n raise osv.except_osv(_('Error !'),\n _('There is no sales journal defined for this company: \"%s\" (id:%d)') % (order.company_id.name, order.company_id.id))\n\n invoice_vals = {\n 'name': order.client_order_ref or '',\n 'origin': order.name,\n 'type': 'out_invoice',\n 'reference': order.client_order_ref or order.name,\n 'account_id': order.partner_id.property_account_receivable.id,\n 'journal_id': order.partner_id.property_default_sale_invoice_journal.id,\n 'partner_id': order.partner_id.id,\n 'address_invoice_id': order.partner_invoice_id.id,\n #'address_contact_id': order.partner_order_id.id,\n 'invoice_line': [(6, 0, lines)],\n 'currency_id': order.pricelist_id.currency_id.id,\n 'comment': order.note,\n 'payment_term': order.payment_term and order.payment_term.id or False,\n 'fiscal_position': order.fiscal_position.id or order.partner_id.property_account_position.id,\n 'date_invoice': context.get('date_invoice', False),\n 'company_id': order.company_id.id,\n 'user_id': order.user_id and order.user_id.id or False\n }\n\n # Care for deprecated _inv_get() hook - FIXME: to be removed after 6.1\n invoice_vals.update(self._inv_get(cr, uid, order, context=context))\n\n return invoice_vals", "def _prepare_invoice(self, cr, uid, order, lines, context=None):\n if context is None:\n context = {}\n journal_id = self.pool['account.invoice'].default_get(cr, uid, ['journal_id'], context=context)['journal_id']\n if not journal_id:\n raise osv.except_osv(_('Error!'),\n _('Please define sales journal for this company: \"%s\" (id:%d).') % (order.company_id.name, order.company_id.id))\n invoice_vals = {\n 'name': order.client_order_ref or '',\n 'origin': order.name,\n 'type': 'out_invoice',\n 'reference': order.client_order_ref or order.name,\n 'account_id': order.partner_invoice_id.property_account_receivable.id,\n 'partner_id': order.partner_invoice_id.id,\n 'journal_id': journal_id,\n 'invoice_line': [(6, 0, lines)],\n 'currency_id': order.pricelist_id.currency_id.id,\n 'comment': order.note,\n 'payment_term': order.payment_term and order.payment_term.id or False,\n 'fiscal_position': order.fiscal_position.id or order.partner_invoice_id.property_account_position.id,\n 'date_invoice': context.get('date_invoice', False),\n 'company_id': order.company_id.id,\n 'user_id': order.user_id and order.user_id.id or False,\n 'section_id' : order.section_id.id,\n 'test_1' :order.test\n }\n\n # Care for deprecated _inv_get() hook - FIXME: to be removed after 6.1\n invoice_vals.update(self._inv_get(cr, uid, order, context=context))\n return invoice_vals", "def data_process_deprecated(self, CustomerID, InvoiceDate, InvoiceNo, Description, Quantity\\\n , UnitPrice ):\n dict_invoice = {'InvoiceDate':InvoiceDate, 'Description':Description\\\n , 'Quantity':Quantity, 'UnitPrice':UnitPrice}\n dict_invoice['CustomerID'] = CustomerID\n dict_invoice['InvoiceNo'] = InvoiceNo\n df_invoice_line \\\n = pd.DataFrame(dict_invoice, columns=dict_invoice.keys(), index=[0])\n \n self.data_transform(df_invoice_line)\n\n #self.feature_rfm_encode()\n\n self.feature_scale()\n\n self.list_feature_drop()\n\n self.feature_description_nlp()\n return", "def line_get_convert(self, line, part):\n ret = super(AccountInvoice, self).line_get_convert(line, part)\n\n if 'invl_id' in line:\n line_o = self.env['account.invoice.line'].browse(line['invl_id'])\n if line_o.segment_id and line_o.segment_id.id:\n ret.update({'segment_id': line_o.segment_id.id, 'segment_origin_id': line_o.segment_id.id})\n\n return ret", "def _prepare_invoice_lines(self, exchange_line, order_line):\n invoice_type = {\n 'sale.order.line': {\n 'higher': 'out_invoice', 'lower': 'out_refund',\n 'type': 'sale', 'field': 'exchange_sale_line_id'\n },\n 'purchase.order.line': {\n 'higher': 'in_invoice', 'lower': 'in_refund',\n 'type': 'purchase', 'field': 'exchange_purchase_line_id'\n },\n }\n product = exchange_line.exchange_product_id or exchange_line.product_id\n data = {\n 'invoice_type': False,\n 'values': {\n 'product_id': product.id,\n 'quantity': exchange_line.quantity,\n 'name': 'Exchange for [%s]' % exchange_line.product_id.display_name,\n }\n }\n if exchange_line.exchange_product_id or \\\n exchange_line.price_subtotal > order_line.price_subtotal:\n data['invoice_type'] = invoice_type[order_line._name]['higher']\n elif exchange_line.price_subtotal < order_line.price_subtotal:\n data['invoice_type'] = invoice_type[order_line._name]['lower']\n else:\n return {}\n data[invoice_type[order_line._name]['type']] = order_line.order_id\n data['values'][invoice_type[order_line._name]['field']] = order_line.id\n data['values']['price_unit'] = exchange_line.price_unit\n # TODO i think we should take the different between prices NOT the all price\n # abs(exchange_line.price_unit - order_line.price_unit)\n return data", "def inv_line_new_characteristic_hashcode(self, invoice_line):\n return \"%s-%s-%s\"%(\n invoice_line['account_id'],\n invoice_line.get('analytic_account_id',\"False\"),\n invoice_line.get('date_maturity',\"False\"))", "def _get_account_analytic_invoice(self, cursor, user, picking, move_line):\n if move_line.purchase_line_id:\n return move_line.purchase_line_id.order_id.account_analytic_id.id\n return super(stock_picking, self)._get_account_analytic_invoice(cursor, user, picking, move_line)", "def get_customer_id_by_sale_id(sale_id):\n\n # your code", "def single_customer(customer_name, invoice_file):\n def customer_rental(rental_items):\n \"\"\"Closure to add single customer details\"\"\"\n with open(rental_items, 'r', newline='') as rentals:\n reader = csv.reader(rentals)\n add_invoice_items = partial(add_furniture, invoice_file, customer_name)\n for row in reader:\n add_invoice_items(item_code=row[0],\n item_description=row[1],\n item_monthly_price=row[2])\n return customer_rental", "def get_invoiced_lot_values(self):\n self.ensure_one()\n\n if self.state == 'draft':\n return []\n\n sale_orders = self.mapped('invoice_line_ids.sale_line_ids.order_id')\n stock_move_lines = sale_orders.mapped('picking_ids.move_lines.move_line_ids')\n\n # Get the other customer invoices and refunds.\n ordered_invoice_ids = sale_orders.mapped('invoice_ids') \\\n .filtered(lambda i: i.state not in ['draft', 'cancel']) \\\n .sorted(lambda i: (i.invoice_date, i.id))\n\n # Get the position of self in other customer invoices and refunds.\n self_index = None\n i = 0\n for invoice in ordered_invoice_ids:\n if invoice.id == self.id:\n self_index = i\n break\n i += 1\n\n # Get the previous invoice if any.\n previous_invoices = ordered_invoice_ids[:self_index]\n last_invoice = previous_invoices[-1] if len(previous_invoices) else None\n\n # Get the incoming and outgoing sml between self.invoice_date and the previous invoice (if any).\n write_dates = [wd for wd in self.invoice_line_ids.mapped('write_date') if wd]\n self_datetime = max(write_dates) if write_dates else None\n last_write_dates = last_invoice and [wd for wd in last_invoice.invoice_line_ids.mapped('write_date') if wd]\n last_invoice_datetime = max(last_write_dates) if last_write_dates else None\n\n def _filter_incoming_sml(ml):\n if ml.state == 'done' and ml.location_id.usage == 'customer' and ml.lot_id:\n if last_invoice_datetime:\n return last_invoice_datetime <= ml.date <= self_datetime\n else:\n return ml.date <= self_datetime\n return False\n\n def _filter_outgoing_sml(ml):\n if ml.state == 'done' and ml.location_dest_id.usage == 'customer' and ml.lot_id:\n if last_invoice_datetime:\n return last_invoice_datetime <= ml.date <= self_datetime\n else:\n return ml.date <= self_datetime\n return False\n\n incoming_sml = stock_move_lines.filtered(_filter_incoming_sml)\n outgoing_sml = stock_move_lines.filtered(_filter_outgoing_sml)\n\n # Prepare and return lot_values\n qties_per_lot = defaultdict(lambda: 0)\n if self.type == 'out_refund':\n for ml in outgoing_sml:\n qties_per_lot[ml.lot_id] -= ml.product_uom_id._compute_quantity(ml.qty_done, ml.product_id.uom_id)\n for ml in incoming_sml:\n qties_per_lot[ml.lot_id] += ml.product_uom_id._compute_quantity(ml.qty_done, ml.product_id.uom_id)\n else:\n for ml in outgoing_sml:\n qties_per_lot[ml.lot_id] += ml.product_uom_id._compute_quantity(ml.qty_done, ml.product_id.uom_id)\n for ml in incoming_sml:\n qties_per_lot[ml.lot_id] -= ml.product_uom_id._compute_quantity(ml.qty_done, ml.product_id.uom_id)\n lot_values = []\n for lot_id, qty in qties_per_lot.items():\n if float_is_zero(qty, precision_rounding=lot_id.product_id.uom_id.rounding):\n continue\n lot_values.append({\n 'product_name': lot_id.product_id.display_name,\n 'product_color': lot_id.x_studio_color.x_name,\n 'quantity': qty,\n 'uom_name': lot_id.product_uom_id.name,\n 'lot_name': lot_id.name\n })\n #AQUI ORDENO TODOS LOS LOTES QUE ME QUEDAN EN lot_values POR EL COLOR\n lot_values.sort(key=lambda r: r['product_color'], reverse=False)\n return lot_values", "def _prepare_order_line_invoice_line(self, cr, uid, line, account_id=False, context=None):\n res = super(sale_order_line, self)._prepare_order_line_invoice_line(cr, uid, line, account_id=account_id, context=context)\n \n res.update({'part_number': line.part_number, 'internal_part_number' : line.internal_part_number})\n return res", "def generate_customer_info(self):\n consecutive = check_consecutive(self.customerIds)\n print(\"consecutive\" + str(consecutive))\n if consecutive:\n for customer_id in self.customerIds:\n # next we need to 1) decide if the customer has insurance, and if yes, generate the EOB that gets sent to the customer\n insured = np.random.choice(\n self.distributions[\"insurance_status\"],\n 1,\n p=self.distributions[\"insurance_distribution\"],\n )[0]\n dob = self.dobs[customer_id - 1]\n customer = pd.DataFrame(\n {\n \"customer_id\": [customer_id],\n \"dob\": str(dob),\n \"insurance\": insured,\n \"experiment_id\": random.randint(1, 2),\n }\n )\n self.Customers = self.Customers.append(customer)\n else:\n print(\"Error generating customer info: customerIds aren't consecutive\")\n return True", "def get_customer_segments(self, date):\n date = current_date_to_day().isoformat() if date is None else date\n self.products = pd.merge(self.products,\n self.cs.fetch(start_date=convert_dt_to_day_str(date))[['client', 'segments']],\n on='client', how='left')", "def compute_counterpart_lines(self):\n for item in self:\n move_debit_lines = []\n move_credit_lines = []\n\n # list of all the move lines of the payment's move\n line_list = []\n for entry in item.journal_entry_ids:\n for line in entry.line_ids:\n if line.account_id.treasury_planning:\n line_list.append(line)\n\n # for each line above collect all the reconciled counterpart lines\n for line in line_list:\n if line.credit > 0 and line.debit == 0:\n for match in line.matched_debit_ids:\n move_debit_lines.append(match.debit_move_id.id)\n\n if line.credit == 0 and line.debit > 0:\n for match in line.matched_credit_ids:\n move_credit_lines.append(match.credit_move_id.id)\n\n if move_credit_lines:\n counterpart_move_ids = move_credit_lines\n else:\n counterpart_move_ids = move_debit_lines\n\n # bank move share is transformed to dictionary\n bank_move_dict = (ast.literal_eval(item.cf_share) if\n item.cf_share else {})\n\n # the share of each counterpart line is \"merged or added\"\n # in a weighted manner to the bank line share\n for cpt in counterpart_move_ids:\n dest_move_line = self.env['account.move.line'].browse(cpt)\n weight = round(dest_move_line.balance / item.amount, 2)\n # counterpart share is transformed into dictionary\n move_line_dict = ast.literal_eval(dest_move_line.cf_share)\n\n # each key is finally added to the bank line share\n for key, value in move_line_dict.iteritems():\n draft_dictionary = dictop.sum_dictionary(\n bank_move_dict.get(key, {}), 1,\n move_line_dict.get(key, {}), weight)\n bank_move_dict[key] = dictop.check_dict_total(\n draft_dictionary, 1)\n\n # the dictionary is transformed into string and assigned\n item.cf_share = json.dumps(bank_move_dict)", "def single_customer(customer_name, invoice_file):\n def customer_rental(rental_items):\n \"\"\"\n Loop through rental_items file and append each row to curried invoice_file with same\n customer_name\n \"\"\"\n customer = partial(add_furniture, invoice_file=invoice_file, customer_name=customer_name)\n with open(rental_items, \"r\") as rental_csv:\n for row in csv.reader(rental_csv):\n customer(item_code=row[0], item_description=row[1], item_monthly_price=row[2])\n return customer_rental", "def _prepare_invoice_line(self, inv_id):\n res = {}\n account_id = self.product_id.property_account_income_id.id or self.product_id.categ_id.property_account_income_categ_id.id\n if not account_id:\n raise UserError(_('Please define income account for this product: \"%s\" (id:%d).') % \\\n (self.product_id.name, self.product_id.id,))\n price_unit = self.product_id.lst_price\n res = {\n 'invoice_id': inv_id.id,\n 'name': self.name,\n 'origin': self.order_id.name,\n 'account_id': account_id,\n 'uom_id': self.product_uom_id.id,\n 'quantity': self.product_uom_qty,\n 'price_unit': price_unit,\n 'product_id': self.product_id.id,\n 'invoice_line_tax_id': False,\n 'order_line_id': self.id\n }\n return res", "def get_num_of_sales_per_customer_ids():\n\n # your code", "def get_customer_info(self, customer_id):\n #requested_customer_RDD = self.sc.parallelize(movie_ids).map(lambda x: (user_id, x))\n # Get predicted ratings\n customers = self.__get_customers(customer_id).collect()\n\n return customers", "def invoices(self,org_id=None,invoice_id=''):\n if org_id is None:\n org_id = self.org_id\n return self.get('{}/orgs/{}/invoices/{}'.format(ApiVersion.A1.value,org_id,invoice_id))", "def onchange_invoice_id(self):\n # self.invoice_id = False\n # self.base_amount = 0.0\n # self.wh_src_rate = 5.0\n if self._context is None:\n context = {}\n res = {}\n inv_obj = self.env['account.invoice']\n if not self.invoice_id:\n return {'value': {\n 'invoice_id': False,\n 'base_amount': 0.0,\n 'wh_src_rate': 0.0,\n 'wh_amount': 0.0, }\n }\n\n inv_brw = inv_obj.browse(self.invoice_id.id)\n base_amount = self.base_amount or inv_brw.amount_untaxed\n wh_src_rate = self.wh_src_rate or inv_brw.wh_src_rate or 5.0\n wh_amount = base_amount * wh_src_rate / 100.0\n res = {'value': {\n 'base_amount': base_amount,\n 'wh_src_rate': wh_src_rate,\n 'wh_amount': wh_amount,\n }\n }\n return res", "def create_invoice(self):\n for line in self:\n # if not line.account_id:\n # raise UserError(_('Please Add the incoming Account !!'))\n self.ensure_one()\n journal_id = self.env['account.journal'].search([\n ('type', '=', 'sale')], limit=1)\n inv_line_main = {\n 'name': line.description.name,\n 'price_unit': line.amount or 0.00,\n 'quantity': 1,\n 'discount': line.discount,\n 'account_id': line.description.property_account_income_id.id or line.description.categ_id.property_account_income_categ_id.id or False,\n }\n inv_values = {\n 'partner_id': line.patient_id.partner_id.id,\n 'patient_id': line.patient_id.id,\n 'dentist': line.dentist.id,\n 'move_type': 'out_invoice',\n 'invoice_date': datetime.now().strftime(DF) or False,\n 'journal_id': journal_id and journal_id.id or False,\n 'teeth_id': line.patient_id and line.patient_id.id or False,\n }\n acc_id = self.env['account.move'].create(inv_values)\n acc_id.write({'invoice_line_ids': [(0, 0, inv_line_main)]})\n\n self.write({'invc_id': acc_id.id, 'inv': True})\n context = dict(self._context or {})\n wiz_form_id = self.env['ir.model.data'].get_object_reference(\n 'account', 'view_move_form')[1]\n\n return {\n 'view_type': 'form',\n 'view_id': wiz_form_id,\n 'view_mode': 'form',\n 'res_model': 'account.move',\n 'res_id': self.invc_id.id,\n 'type': 'ir.actions.act_window',\n 'target': 'current',\n 'context': context,\n }", "def customer_id(uncapped_flatprice, uncapped_flatprice_finalizer, team_multisig) -> int:\n customer_id = int(uuid.uuid4().hex, 16) # Customer ids are 128-bit UUID v4\n return customer_id", "def _get_customer_ID(self, file):\n tree = ET.parse(file)\n root = tree.getroot()\n \n try:\n customer_ID = root.find('JournalReport').find('SaleEvent').find('TransactionDetailGroup').find('TransactionLine').find('CustomerID').find('PersonalID').text\n except:\n time.sleep(1)\n customer_ID = self._get_customer_ID(file)\n\n return customer_ID", "def line_segment_intersection(line1,\n line2):\n a = float(line1[0][0]*line1[1][1] - line1[0][1]*line1[1][0])\n b = float(line1[0][1] - line1[1][1])\n c = float(line1[1][0] - line1[0][0])\n\n d = float(line2[0][0]*line2[1][1] - line2[0][1]*line2[1][0])\n e = float(line2[0][1] - line2[1][1])\n f = float(line2[1][0] - line2[0][0])\n\n prod = b*f - c*e\n if abs(prod) < 1e-10:\n return (np.inf, np.inf)\n\n xc = (d*c - a*f) / prod\n yc = (a*e - b*d) / prod\n\n sign_x1 = (xc - line1[0][0])*(xc - line1[1][0])\n sign_y1 = (yc - line1[0][1])*(yc - line1[1][1])\n\n if sign_x1 > 1e-10:\n return (np.inf, np.inf)\n if sign_x1 < 1e-10:\n if sign_y1 > 1e-10:\n return (np.inf, np.inf)\n\n sign_x2 = (xc - line2[0][0])*(xc - line2[1][0])\n sign_y2 = (yc - line2[0][1])*(yc - line2[1][1])\n\n if sign_x2 > 1e-10:\n return (np.inf, np.inf)\n if sign_x2 == 1e-10:\n if sign_y2 > 1e-10:\n return (np.inf, np.inf)\n return (int(xc), int(yc))", "def createCustomerID(self):\n\n customerID = self._df_invoice_original.CustomerID.max()\n customerID += 1\n return int(customerID)", "def get_document_number(self, txt_line, inv_type):\n number = 0\n if txt_line.invoice_id.type in ['in_invoice', 'in_refund']:\n if not txt_line.invoice_id.supplier_invoice_number:\n raise exceptions.except_orm(\n _('Invalid action !'),\n _(\"Unable to make txt file, because the bill has no\"\n \" reference number free!\"))\n else:\n number = self.get_number(\n txt_line.invoice_id.supplier_invoice_number.strip(),\n inv_type, 20)\n elif txt_line.invoice_id.number:\n number = self.get_number(\n txt_line.invoice_id.number.strip(), inv_type, 20)\n return number", "def single_customer(customer_name, invoice_file):\n def single_customer_rentals(rental_items):\n add_item = partial(add_furniture, customer_name=customer_name,\n invoice_file=invoice_file)\n with open(rental_items, \"r\") as file:\n for row in csv.reader(file):\n add_item(item_code=row[0], item_description=row[1],\n item_monthly_price=row[2])\n return single_customer_rentals", "def get_all_sales_ids_for_customer_ids():\n\n # your code", "def discretize_line(p0, p1, segments):\n p0, p1 = Point(p0), Point(p1)\n dx, dy = p1.x - p0.x, p1.y - p0.y\n vtx = [Point(p0).as_tuple()]\n if isinstance(segments, list):\n for ds in segments:\n x0 = p0.x + ds * dx\n y0 = p0.y + ds * dy\n vtx.append((x0, y0))\n return vtx\n for i in range(segments):\n ds = (i + 1) / segments\n x0 = p0.x + ds * dx\n y0 = p0.y + ds * dy\n vtx.append((x0, y0))\n return vtx", "def predict_if_customer_purchases_next_quarter(self, customer_id: Union[float, list], ignore_if_not_exists=False) -> Union[int, pd.Series]:\n\n if isinstance(customer_id, float):\n\n \n if self._load_model:\n y_labels = self._predict_labels()\n\n if customer_id in y_labels.index:\n return int(y_labels.loc[customer_id])\n \n elif ignore_if_not_exists:\n return None\n \n else:\n raise ValueError('There is no prediction for the given customer_id, as the customer seems to not exist.')\n else:\n raise NoTrainedModelError('There is no trained model to make predictions with, please call initialize_purchase_predictor() first or set load_existing_model to True.')\n \n elif isinstance(customer_id, list):\n # check if all elements in the list are float\n if all(isinstance(n, float) for n in customer_id):\n pass\n\n else:\n raise TypeError('One or more customer_id elements in the given list have the wrong type (must be of type float).')\n\n if self._load_model:\n y_labels = self._predict_labels()\n\n # for each customer id get the label and store it into a series\n customers = []\n for customer in customer_id:\n if customer in y_labels.index:\n customers.append(int(y_labels.loc[customer]))\n\n elif ignore_if_not_exists:\n customers.append(np.nan)\n \n else:\n raise ValueError('There are no recommendations for the given customer_id, as the customer seems to not exist.')\n \n customer_predictions = pd.Series(data=customers, index=customer_id)\n return customer_predictions\n else:\n raise NoTrainedModelError('There is no trained model to make predictions with, please call initialize_purchase_predictor() first or set load_existing_model to True.')\n\n else:\n raise TypeError(f'customer_id has the wrong type: {type(customer_id).__name__} given but expected float.')", "def invoice_lines(self, invoice_lines):\n if self.local_vars_configuration.client_side_validation and invoice_lines is None: # noqa: E501\n raise ValueError(\"Invalid value for `invoice_lines`, must not be `None`\") # noqa: E501\n\n self._invoice_lines = invoice_lines", "def get_customer_id(self, customer):\n\n\t\t# connect to the database\n\t\twith sqlite3.connect(\"bangazon_cli.db\") as bang:\n\t\t\tcursor = bang.cursor()\n\n\t\t\ttry: \n\t\t\t\t# select customer_id that matches the customer's phone number\n\t\t\t\tcursor.execute(\"SELECT customer_id FROM Customers c WHERE c.phone_number ='{}'\".format(customer.get_phone_number()))\n\n\t\t\t\t# return the data\n\t\t\t\tdata = cursor.fetchall()\n\t\t\t\t\n\t\t\t\tprint(\"Customer_id\", data[0][0])\n\t\t\t\treturn data[0][0]\n\t\t\t\t\n\n\t\t\texcept sqlite3.OperationalError:\n\t\t\t\tprint(\"NOPE.\")", "def get_customer_id(self, customer):\n\n\t\t# connect to the database\n\t\twith sqlite3.connect(\"bangazon_cli.db\") as bang:\n\t\t\tcursor = bang.cursor()\n\n\t\t\ttry: \n\t\t\t\t# select customer_id that matches the customer's phone number\n\t\t\t\tcursor.execute(\"SELECT customer_id FROM Customers c WHERE c.phone_number ='{}'\".format(customer.get_phone_number()))\n\n\t\t\t\t# return the data\n\t\t\t\tdata = cursor.fetchall()\n\t\t\t\t\n\t\t\t\tprint(\"Customer_id\", data[0][0])\n\t\t\t\treturn data[0][0]\n\t\t\t\t\n\n\t\t\texcept sqlite3.OperationalError:\n\t\t\t\tprint(\"NOPE.\")", "def merge_purchase_invoice(self):\r\n active_id = self.env['purchase.order'].browse(self.env['purchase.order']._context.get('active_ids'))\r\n journal_id = self.env['account.journal'].search([('type', '=', 'purchase')]) \r\n active_id_count = 0\r\n active_count = 0\r\n exist_vendor = []; invoice = [];exist_vendors = [];ctx = ();invoice_id = []\r\n for rec in active_id : \r\n po_reference = self.env['account.invoice'].search([('origin', 'like', rec.name)])\r\n active_count = len(active_id)\r\n if rec.picking_count >= 1 and rec.picking_count != rec.invoice_count:\r\n len_name = [] \r\n for inv in po_reference: \r\n len_name = inv.origin.split(\":\") \r\n if rec.name in len_name:\r\n if po_reference.state == 'draft':\r\n for record in po_reference.invoice_line_ids:\r\n print (record.line_id)\r\n for res in rec.order_line:\r\n if res.id == record.line_id: \r\n record.write({'quantity':res.qty_received})\r\n res.write({'qty_invoiced':record.quantity})\r\n \r\n else:\r\n \r\n po_list = [];line_values = {};lines = {};purchase = []\r\n if rec.state in 'purchase' and rec.invoice_status in 'to invoice':\r\n purchase.append(rec.id)\r\n active_id_count = len(purchase)\r\n if rec.partner_id.id in exist_vendor:\r\n for inv in invoice:\r\n if inv['partner_id'] == rec.partner_id.id:\r\n for recc in rec.order_line:\r\n if rec.picking_count > 1 and rec.invoice_count >= 1:\r\n qty_received = recc.qty_received - recc.qty_invoiced \r\n else:\r\n qty_received = recc.qty_received \r\n line_values = (0, 0, {'product_id': recc.product_id.id,\r\n 'quantity': qty_received ,\r\n 'price_unit': recc.price_unit,\r\n 'invoice_line_tax_ids': [(6, 0, recc.taxes_id and recc.taxes_id.ids) or False] ,\r\n 'price_subtotal': recc.price_subtotal,\r\n 'product_uom': recc.product_uom.id,\r\n 'name': recc.name,\r\n 'account_id': journal_id.default_debit_account_id.id ,\r\n 'line_id':recc.id\r\n }) \r\n inv['invoice_line_ids'].append(line_values)\r\n inv['origin'] = inv['origin'] + ':' + rec.name\r\n if rec.partner_id.id not in exist_vendor:\r\n exist_vendors.append(rec.partner_id.id) \r\n else: \r\n for recc in rec.order_line:\r\n if rec.picking_count > 1 and rec.invoice_count >= 1:\r\n qty_received = recc.qty_received - recc.qty_invoiced \r\n else:\r\n qty_received = recc.qty_received\r\n line_values = (0, 0, {'product_id': recc.product_id.id,\r\n 'quantity': qty_received,\r\n 'price_unit': recc.price_unit,\r\n 'invoice_line_tax_ids': [(6, 0, recc.taxes_id and recc.taxes_id.ids)or False],\r\n 'price_subtotal': recc.price_subtotal,\r\n 'product_uom': recc.product_uom.id,\r\n 'name': recc.name,\r\n 'account_id': journal_id.default_debit_account_id.id,\r\n 'line_id':recc.id\r\n }) \r\n print (rec.id)\r\n po_list.append(line_values) \r\n invoice.append({'origin':rec.name, 'partner_id': rec.partner_id.id, 'invoice_line_ids':po_list, 'account_id': rec.partner_id.property_account_payable_id.id, 'type': 'in_invoice', 'journal_id':journal_id.id,'date_invoice':datetime.today()}) \r\n if rec.partner_id.id not in exist_vendor:\r\n exist_vendor.append(rec.partner_id.id) \r\n \r\n else:\r\n po_list = [];line_values = {};lines = {};purchase = []\r\n if rec.state in 'purchase' and rec.invoice_status in 'to invoice':\r\n purchase.append(rec.id)\r\n active_id_count = len(purchase)\r\n if rec.partner_id.id in exist_vendor:\r\n for inv in invoice:\r\n if inv['partner_id'] == rec.partner_id.id:\r\n for recc in rec.order_line:\r\n if rec.picking_count > 1 and rec.invoice_count >= 1:\r\n qty_received = recc.qty_received - recc.qty_invoiced \r\n else:\r\n qty_received = recc.qty_received\r\n line_values = (0, 0, {'product_id': recc.product_id.id,\r\n 'quantity': qty_received ,\r\n 'price_unit': recc.price_unit,\r\n 'invoice_line_tax_ids': [(6, 0, recc.taxes_id and recc.taxes_id.ids) or False] ,\r\n 'price_subtotal': recc.price_subtotal,\r\n 'product_uom': recc.product_uom.id,\r\n 'name': recc.name,\r\n 'account_id': journal_id.default_debit_account_id.id ,\r\n 'line_id':recc.id\r\n }) \r\n inv['invoice_line_ids'].append(line_values)\r\n inv['origin'] = inv['origin'] + ':' + rec.name\r\n if rec.partner_id.id not in exist_vendor:\r\n exist_vendors.append(rec.partner_id.id) \r\n else: \r\n for recc in rec.order_line:\r\n if rec.picking_count > 1 and rec.invoice_count >= 1:\r\n qty_received = recc.qty_received - recc.qty_invoiced \r\n else:\r\n qty_received = recc.qty_received\r\n line_values = (0, 0, {'product_id': recc.product_id.id,\r\n 'quantity': qty_received,\r\n 'price_unit': recc.price_unit,\r\n 'invoice_line_tax_ids': [(6, 0, recc.taxes_id and recc.taxes_id.ids)or False],\r\n 'price_subtotal': recc.price_subtotal,\r\n 'product_uom': recc.product_uom.id,\r\n 'name': recc.name,\r\n 'account_id': journal_id.default_debit_account_id.id,\r\n 'line_id':recc.id\r\n }) \r\n print (rec.id)\r\n po_list.append(line_values) \r\n invoice.append({'origin':rec.name, 'partner_id': rec.partner_id.id, 'invoice_line_ids':po_list, 'account_id': rec.partner_id.property_account_payable_id.id, 'type': 'in_invoice', 'journal_id':journal_id.id,'date_invoice':date.today()}) \r\n if rec.partner_id.id not in exist_vendor:\r\n exist_vendor.append(rec.partner_id.id) \r\n \r\n invoices = []\r\n invoice_counts = 0\r\n for record in invoice:\r\n invoice_id = self.env['account.invoice'].create(record)\r\n invoices.append(invoice_id.id)\r\n invoice_counts = len(invoices)\r\n if active_id_count == 1:\r\n if invoice_counts == 1:\r\n form_view = self.env.ref('purchase.view_invoice_supplier_purchase_form').id\r\n tree_view = self.env.ref('account.invoice_tree').id \r\n return{\r\n 'name': _('Invoice'),\r\n 'type':'ir.actions.act_window',\r\n 'view_type':'form',\r\n 'view_mode':'form,tree',\r\n 'res_model':'account.invoice',\r\n 'res_id':invoices[0],\r\n 'views_id':False,\r\n 'views':[(form_view , 'form'), (tree_view , 'tree')],\r\n 'domain':[('id', 'in', invoices)],\r\n 'target': 'current',\r\n } \r\n else: \r\n form_view = self.env.ref('account.invoice_supplier_form').id\r\n tree_view = self.env.ref('account.invoice_supplier_tree').id \r\n return{\r\n 'name': _('Invoice'),\r\n 'type':'ir.actions.act_window',\r\n 'view_type':'form',\r\n 'view_mode':'form,tree',\r\n 'res_model':'account.invoice',\r\n 'views_id':True,\r\n 'views':[(tree_view , 'tree'), (form_view , 'form')],\r\n 'domain':[('id', 'in', invoices)],\r\n 'target': 'current',\r\n }", "def get_customer_count(self):\n return self._df_invoice_original.CustomerID.unique().shape[0]", "def _get_cus_info(self):\n label_enc = LabelEncoder()\n customer_info = self._inv.drop_duplicates(['customer_code'], keep='last')\n customer_info = customer_info[['customer_code', 'customer_name', 'sales_cen_code',\n 'sales_cen_name', 'sales_region_name', 'province',\n 'city', 'district', 'customer_type', 'is_usable', 'channel_level']]\n customer_info['customer_id'] = label_enc.fit_transform(customer_info['customer_code'])\n customer_info['sales_cen_id'] = label_enc.fit_transform(customer_info['sales_cen_code'])\n customer_info['sales_region_id'] = label_enc.fit_transform(customer_info['sales_region_name'])\n customer_info['province_id'] = label_enc.fit_transform(customer_info['province'])\n customer_info['city_id'] = label_enc.fit_transform(customer_info['city'])\n customer_info['district_id'] = label_enc.fit_transform(customer_info['district'])\n customer_info['customer_type'] = label_enc.fit_transform(customer_info['customer_type'])\n customer_info['is_usable'] = label_enc.fit_transform(customer_info['is_usable'])\n customer_info['channel_level'] = label_enc.fit_transform(customer_info['channel_level'])\n customer_info_encoded = customer_info.drop(\n columns=['customer_name', 'sales_cen_code', 'sales_cen_name',\n 'sales_region_name', 'province', 'city', 'district']\n ).set_index('customer_code')\n customer_info.set_index('customer_code', inplace=True)\n customer_info_encoded = customer_info_encoded.reindex(self._index.get_level_values(0))\n return customer_info, customer_info_encoded", "def invoice(self, invoice_number):\r\n return inv.Invoice(self, invoice_number)", "def _prepare_invoice_line(self, qty):\n self.ensure_one()\n res = super(SaleOrderLine, self)._prepare_invoice_line(qty)\n\n res.update({\n 'cost_center_id': self.cost_center_id and self.cost_center_id.id or False\n })\n return res", "def invoice(self,context,params):\n url = f\"https://api.freshbooks.com/accounting/account/{params['account_id']}/invoices/invoices/{params['invoice_id']}\"\n result = json.loads(util.rest(\"GET\", url, {}, context[\"headers\"][\"access_token\"]).text)\n invoice = result[\"response\"][\"result\"][\"invoice\"]\n invoice_obj = FreshbooksInvoice(\n account_id=invoice['accountid'],\n customerid=invoice['customerid'], \n invoice_id=invoice['invoiceid'],\n currency_code=invoice['currency_code'],\n language=invoice['language'],\n terms=invoice['terms'],\n discount_value=invoice['discount_value'],\n discount_amount=invoice['discount_total']['amount'],\n invoice_number=invoice['invoice_number'],\n po_number=invoice['po_number'],\n amount=invoice['amount']['amount'],\n code=invoice['amount']['code'],\n create_date=invoice['create_date']\n )\n return invoice_obj.__dict__", "def _get_taxes_invoice(self, cursor, user, move_line, type):\n if move_line.purchase_line_id:\n return [x.id for x in move_line.purchase_line_id.order_id.taxes_id]\n return super(stock_picking, self)._get_taxes_invoice(cursor, user, move_line, type)", "def action_create_invoice(self):\n if self.partner_id:\n supplier = self.partner_id\n else:\n supplier = self.partner_id.search(\n [(\"name\", \"=\", \"Salon Default Customer\")])\n lines = []\n product_id = self.env['product.product'].search(\n [(\"name\", \"=\", \"Salon Service\")])\n for records in self.order_line_ids:\n if product_id.property_account_income_id.id:\n income_account = product_id.property_account_income_id.id\n elif product_id.categ_id.property_account_income_categ_id.id:\n income_account = product_id.categ_id.\\\n property_account_income_categ_id.id\n else:\n raise UserError(\n _(\"Please define income account for this product: \"\n \"'%s' (id:%d).\") % (product_id.name, product_id.id))\n value = (0, 0, {\n 'name': records.service_id.name,\n 'account_id': income_account,\n 'price_unit': records.price,\n 'quantity': 1,\n 'product_id': product_id.id,\n })\n lines.append(value)\n invoice_line = {\n 'move_type': 'out_invoice',\n 'partner_id': supplier.id,\n 'invoice_user_id': self.env.user.id,\n 'invoice_origin': self.name,\n 'invoice_line_ids': lines,\n }\n inv = self.env['account.move'].create(invoice_line)\n action = self.env.ref('account.action_move_out_invoice_type',\n raise_if_not_found=False)\n result = {\n 'name': action.name,\n 'type': 'ir.actions.act_window',\n 'views': [[False, 'form']],\n 'target': 'current',\n 'res_id': inv.id,\n 'res_model': 'account.move',\n }\n self.inv_stage_identifier = True\n self.stage_id = 3\n invoiced_records = self.env['salon.order'].search(\n [('stage_id', 'in', [3, 4]), ('chair_id', '=', self.chair_id.id)])\n total = 0\n for rows in invoiced_records:\n invoiced_date = str(rows.date)\n invoiced_date = invoiced_date[0:10]\n if invoiced_date == str(date.today()):\n total = total + rows.price_subtotal\n self.chair_id.collection_today = total\n self.update_number_of_orders()\n return result", "def invoice_onsettled(invoice):\n\n db = current.db\n s3db = current.s3db\n\n # Look up claim, invoice number, program and billing\n btable = s3db.fin_voucher_billing\n ctable = s3db.fin_voucher_claim\n itable = s3db.fin_voucher_invoice\n ptable = s3db.fin_voucher_program\n join = [ptable.on(ptable.id == ctable.program_id),\n btable.on(btable.id == ctable.billing_id),\n itable.on(itable.id == ctable.invoice_id),\n ]\n query = (ctable.invoice_id == invoice.id) & \\\n (ctable.deleted == False)\n row = db(query).select(ctable.id,\n ctable.program_id,\n ctable.billing_id,\n ctable.pe_id,\n btable.date,\n itable.invoice_no,\n ptable.name,\n ptable.organisation_id,\n join = join,\n limitby = (0, 1),\n ).first()\n if not row:\n return\n program = row.fin_voucher_program\n billing = row.fin_voucher_billing\n claim = row.fin_voucher_claim\n invoice_no = row.fin_voucher_invoice.invoice_no\n\n error = None\n\n # Look up the provider organisation\n pe_id = claim.pe_id\n otable = s3db.org_organisation\n provider = db(otable.pe_id == pe_id).select(otable.id,\n otable.name,\n limitby = (0, 1),\n ).first()\n\n from .helpers import get_role_emails\n provider_accountants = get_role_emails(\"PROVIDER_ACCOUNTANT\", pe_id)\n if not provider_accountants:\n error = \"No provider accountant found\"\n\n if not error:\n # Lookup the template variables\n base_url = current.deployment_settings.get_base_public_url()\n appname = current.request.application\n data = {\"program\": program.name,\n \"date\": btable.date.represent(billing.date),\n \"invoice\": invoice_no,\n \"organisation\": provider.name,\n \"url\": \"%s/%s/fin/voucher_claim/%s\" % (base_url, appname, claim.id),\n }\n\n # Send the email notification\n from .notifications import CMSNotifications\n error = CMSNotifications.send(provider_accountants,\n \"InvoiceSettled\",\n data,\n module = \"fin\",\n resource = \"voucher_invoice\",\n )\n if error:\n msg = \"%s could not be notified about invoice settlement: %s\"\n current.log.error(msg % (provider.name, error))\n else:\n msg = \"%s notified about invoice settlement\"\n current.log.debug(msg % provider.name)", "def action_invoice_create(self, cr, uid, ids, grouped=False, states=None, date_invoice=False, context=None):\n order = self.browse(cr, uid, ids[0], context=context)\n inv_obj = self.pool.get('account.invoice')\n # create the invoice\n inv_id = super(sale_order, self).action_invoice_create(cr, uid, ids, grouped, states, date_invoice, context=context)\n # modify the invoice\n inv_obj.write(cr, uid, [inv_id], {'past_doc': order.past_doc})\n return inv_id", "def _prepare_invoice(self):\n self.ensure_one()\n journal_id = self.env['account.invoice'].default_get(['journal_id'])['journal_id']\n if not journal_id:\n raise UserError(_('Please define an accounting sales journal for this company.'))\n invoice_vals = {\n 'name': self.client_order_ref or '',\n 'origin': self.name,\n 'type': 'out_invoice',\n 'account_id': self.partner_invoice_id.property_account_receivable_id.id,\n 'partner_id': self.partner_invoice_id.id,\n 'partner_shipping_id': self.partner_shipping_id.id,\n 'journal_id': journal_id,\n 'currency_id': self.pricelist_id.currency_id.id,\n 'comment': self.note,\n 'payment_term_id': self.payment_term_id.id,\n 'fiscal_position_id': self.fiscal_position_id.id or self.partner_invoice_id.property_account_position_id.id,\n 'company_id': self.company_id.id,\n 'user_id': self.user_id and self.user_id.id,\n 'team_id': self.team_id.id,\n 'x_studio_field_rgEdd': self.x_studio_field_icWOZ.id,\n 'x_studio_car_type_1': self.vehicle.id,\n 'x_studio_job_card_1': self.x_studio_agency_job_card,\n 'x_studio_car_type_name': self.vehicle.model_id.name,\n 'x_studio_plate_num': self.vehicle.license_plate,\n 'x_studio_claim_num': self.claim_no,\n\n 'x_studio_is_insured':self.is_insured,\n 'x_studio_service_provider': self.service_advisor.id,\n 'date_invoice': fields.Date.today(),\n 'transaction_ids': [(6, 0, self.transaction_ids.ids)],\n }\n return invoice_vals\n\n # 'x_studio_field_rgEdd':order.x_studio_field_icWOZ.id,", "def _compute_tax_id(self):\n for inv in self:\n for line in inv.invoice_line_ids:\n line.tax_ids = line._get_computed_taxes()\n\n inv._recompute_dynamic_lines(recompute_all_taxes=True, recompute_tax_base_amount=True)", "def linestringToKicad(linestring):\n lineChain = pcbnew.SHAPE_LINE_CHAIN()\n lineChain.SetClosed(True)\n for c in linestring.coords:\n lineChain.Append(int(c[0]), int(c[1]))\n return lineChain", "def invoice_line_create(self, invoice_id, qty):\n invoice_lines = self.env['account.invoice.line']\n precision = self.env['decimal.precision'].precision_get('Product Unit of Measure')\n for line in self:\n if not float_is_zero(qty, precision_digits=precision):\n vals = line._prepare_invoice_line(qty=qty)\n vals.update({'invoice_id': invoice_id, 'purchase_line_id': line.id})\n invoice_lines |= self.env['account.invoice.line'].create(vals)\n return invoice_lines", "def test_invoice_create(self):\n # first we create a customer\n id = self._create_model(\"customer\", self.customer_data, [\"name\", \"email\", \"phone\"])\n if id:\n # then we can create the invoice\n data = self.invoice_data\n data[\"customer_id\"] = id\n self._create_model(\"invoice\", data, [])\n self.assertIsNotNone(id)", "def draw_line_set(canvas, position, nib_width, partitions, pagesize):\n offset = position\n canvas.line(1*mm, offset, pagesize[0], offset)\n for i in (float(x) for x in partitions.split(\",\")):\n offset += i * nib_width * mm\n canvas.line(1*mm, offset, pagesize[0], offset)\n return offset", "def generate_new_visit(self):\n if self.consecutive:\n customer_id = np.random.choice(\n self.customerIds, 1\n ) # choose a customer at random\n insured = self.Customers[self.Customers[\"customer_id\"] == customer_id[0]][\n \"insurance\"\n ].values[\n 0\n ] # does the customer have insurance?\n experiment_id = self.Customers[\n self.Customers[\"customer_id\"] == customer_id[0]\n ][\"experiment_id\"].values[\n 0\n ] # does the customer have insurance?\n\n event_list = (\n self.billing_choose_dates()\n ) # generate dates associated with this invoice\n cpt_code = random.sample(self.CPTCodes, 1)[0]\n date_of_service = str(event_list.values[0][0])\n created_on = str(event_list.values[1][0])\n date_of_eob = str(event_list.values[2][0])\n date_of_provider_adjustment = str(event_list.values[3][0])\n date_of_patient_payment = str(event_list.values[4][0])\n # generate a new invoice\n (invoice_id, charge_amount) = self.generate_new_invoice(\n created_on, date_of_service, customer_id, cpt_code\n )\n # generate subsequent EOB (i.e. copay, EOB adjustment, EOB payment)\n remaining_amount = self.generate_eob(\n date_of_service,\n date_of_eob,\n insured,\n invoice_id,\n cpt_code,\n charge_amount,\n )\n # generate provider adjustments\n remaining_amount = self.generate_provider_adjustment(\n date_of_provider_adjustment, invoice_id, cpt_code, remaining_amount\n )\n # generate a possible payment from the patient\n remaining_amount = self.generate_patient_payment(\n date_of_patient_payment,\n invoice_id,\n cpt_code,\n remaining_amount,\n experiment_id,\n )\n # record the remaining amounts in a separate table.\n self.record_remaining_amount(\n date_of_patient_payment, invoice_id, cpt_code, remaining_amount\n )\n return True\n else:\n print(\"Error generating new invoice- customerIds aren't consecutive\")", "def get_customer_id_by_sale_id_from_table(table, sale_id):\n\n # your code", "def get_customer_id_by_sale_id(sale_id):\n sales_data = data_manager.get_table_from_file(\"sales/sales.csv\")\n return get_customer_id_by_sale_id_from_table(sales_data, sale_id)", "def _prepare_invoice_line(self, qty):\n self.ensure_one()\n res = {\n 'name': self.name,\n 'sequence': self.sequence,\n 'origin': self.order_id.name,\n 'account_id': self.product_id.product_tmpl_id._get_product_accounts()['stock_input'].id,\n 'price_unit': self.price_unit,\n 'quantity': qty,\n 'uom_id': self.product_uom.id,\n 'product_id': self.product_id.id or False,\n 'invoice_line_tax_ids': [(6, 0, self.taxes_id.ids)],\n 'account_analytic_id': self.account_analytic_id.id,\n 'analytic_tag_ids': [(6, 0, self.analytic_tag_ids.ids)],\n }\n return res", "def get_invoice(self):\n\n # Check if unclosed invoice for the client exists\n old_inv = connection.Kinko.find_one({'cl': self.cl, 'tid': None,\n 'typ': TYPE_MAP[self.tab_type]})\n\n inv_num = None\n # If it does, update its values and update packages\n if old_inv:\n old_inv.dt = datetime.datetime.today()\n old_inv.range.lt = self.q_dict[\"cs.sd\"].get(\"$lt\", None)\n old_inv.save()\n\n inv_num = old_inv.num\n\n else:\n #kinko dict to be updated in Kinko Collection.\n kdict = {\n \"amt\": 0.0,\n \"cl\": unicode(self.cl),\n \"dt\": datetime.datetime.today(),\n \"typ\": TYPE_MAP[self.tab_type],\n \"range\": {\"lt\": self.q_dict[\"cs.sd\"].get(\"$lt\", None),\n \"gt\": self.q_dict[\"cs.sd\"].get(\"$gte\", None),\n }\n }\n\n k = Kinko(kdict)\n\n k_count = 1\n\n #the get num method of Kinko model generates the unique no for new kinko\n k[\"num\"] = self.get_knum(1)\n while connection.Kinko.collection.find({\"num\": k.num}).count() > 0:\n k[\"num\"] = self.get_knum(k_count+1)\n k_count += k_count\n\n connection.Kinko(k).save()\n\n inv_num = k['num']\n\n if inv_num:\n #after creating a new document in Kinko all packages are updated.\n connection.Package.collection.update(self.q_dict, {'$set': {'inv.num': inv_num}}, safe=True, multi=True)\n \n #Aggrigation of remitted amount for requested client\n non_invoiced = kinko_map_reduce(inv_num, TYPE_MAP[self.tab_type])\n\n if len(non_invoiced) == 0:\n return False\n else:\n inv = connection.Kinko.find_one({'num': inv_num})\n if inv:\n inv.amt = non_invoiced[0]['value']['amt']\n inv.save()\n return inv\n else:\n return False\n else:\n return False", "def invoice(self, start, end):\n\n if self.invoice_type is None:\n invoice_type = self.conn.config[\"main\"][\"invoice:object\"]\n if \":\" not in invoice_type:\n raise AttributeError(\"Invoice configuration incorrect! %s\" % invoice_type)\n module, call = invoice_type.split(\":\")\n _package = __import__(module, globals(), locals(), [ call ])\n\n funct = getattr(_package, call)\n self.invoice_type = funct\n config = self.conn.config[\"invoice_object\"]\n invoice = self.invoice_type(self, config)\n return invoice", "def _prepare_invoice(self, cr, uid, order, lines, context=None):\n invoice_vals = super(my_sale_order, self)._prepare_invoice(cr, uid, order,\n lines, context)\n\n invoice_vals.update({\n 'partner_shipping_id': order.partner_shipping_id.id,\n })\n\n # Care for deprecated _inv_get() hook - FIXME: to be removed after 6.1\n invoice_vals.update(self._inv_get(cr, uid, order, context=context))\n\n return invoice_vals", "def create_invoices(self, cr, uid, ids, context=None):\n invoice_list = []\n po_obj = self.pool.get('purchase.order')\n inv_line_obj = self.pool.get('account.invoice.line')\n inv_obj = self.pool.get('account.invoice')\n addr_obj = self.pool.get('res.partner')\n journal_obj = self.pool.get('account.journal')\n if context is None:\n context = {}\n\n for purchase_adv_obj in self.browse(cr, uid, ids, context=context):\n for purchase_order in po_obj.browse(cr, uid, context.get('active_ids', []), context=context):\n inv_line_ids = []\n invoice_ids = []\n val = inv_line_obj.product_id_change(cr, uid, [], purchase_adv_obj.product_id.id,\n uom_id=False, partner_id=purchase_order.partner_id.id, fposition_id=purchase_order.fiscal_position.id)\n line_id = inv_line_obj.create(cr, uid, {\n 'name': val['value']['name'],\n 'account_id': val['value']['account_id'],\n 'price_unit': purchase_adv_obj.amount,\n 'quantity': purchase_adv_obj.qtty,\n 'discount': False,\n 'uos_id': val['value']['uos_id'],\n 'product_id': purchase_adv_obj.product_id.id,\n 'invoice_line_tax_id': [(6, 0, val['value']['invoice_line_tax_id'])],\n })\n inv_line_ids.append(line_id)\n addr = addr_obj.address_get(cr, uid, [purchase_order.partner_id.id], ['invoice'])\n journal_ids = journal_obj.search(cr, uid, [('type', '=', 'purchase')])\n context.update({'type':'in_invoice','journal_type':'purchase'})\n inv_vals = {\n 'name': purchase_order.partner_ref or purchase_order.name,\n 'origin': purchase_order.name,\n 'type': 'in_invoice',\n 'reference': False,\n 'account_id': purchase_order.partner_id.property_account_payable.id,\n 'journal_id':journal_ids and journal_ids[0] or False,\n 'partner_id': purchase_order.partner_id.id,\n 'address_invoice_id': addr['invoice'],\n 'invoice_line': [(6, 0, inv_line_ids)],\n 'currency_id': purchase_order.pricelist_id.currency_id.id,\n 'comment': '',\n 'payment_term': purchase_order.payment_term_id and purchase_order.payment_term_id.id or False,\n 'fiscal_position': purchase_order.fiscal_position.id or purchase_order.partner_id.property_account_position.id,\n 'prepaid': True\n }\n\n inv_id = inv_obj.create(cr, uid, inv_vals, context=context)\n inv_obj.button_reset_taxes(cr, uid, [inv_id], context=context)\n for invoice in purchase_order.invoice_ids:\n invoice_ids.append(invoice.id)\n invoice_ids.append(inv_id)\n po_obj.write(cr, uid, purchase_order.id, {'invoice_ids': [(6, 0, invoice_ids)]})\n invoice_list.append(inv_id)\n\n if purchase_order.invoice_method in ('picking','order'):\n self.pool.get('purchase.order.line').create(cr, uid, {\n 'order_id': purchase_order.id,\n 'name': val['value']['name'],\n 'date_planned':purchase_order.date_order,\n 'price_unit': -purchase_adv_obj.amount,\n 'product_uom_qty': purchase_adv_obj.qtty,\n 'product_uos': val['value']['uos_id'],\n 'product_uom': val['value']['uos_id'],\n 'product_id': purchase_adv_obj.product_id.id,\n 'adavance_product':True,\n 'discount': False,\n 'taxes_id': [(6, 0, val['value']['invoice_line_tax_id'])],\n }, context=context)\n\n\n context.update({'invoice_id':invoice_list})\n return {\n 'name': 'Open Invoice',\n 'view_type': 'form',\n 'view_mode': 'form',\n 'res_model': 'purchase.open.invoice',\n 'type': 'ir.actions.act_window',\n 'target': 'new',\n 'context': context\n }", "def _prepare_invoice_line(self, cr, uid, group, picking, move_line, invoice_id,\n invoice_vals, context=None):\n result = super(stock_picking, self)._prepare_invoice_line(cr, uid, group, picking, move_line, invoice_id,\n invoice_vals, context=None)\n \n result['discount'] = self._get_discount_invoice(cr, uid, move_line)\n result['discount2'] = self._get_discount2_invoice(cr, uid, move_line)\n result['price_unit'] = self._get_price_unit_invoice(cr, uid, move_line, invoice_vals['type'], context=None)\n return result", "def get_all_customer_ids():\n\n # your code", "def line_intercept(p1,p2,p3,p4):\n # Note if vertical line m = None and b holds x-val\n (m1,b1) = line_param(p1,p2)\n (m2,b2) = line_param(p3,p4)\n if (m1 != None) and (m2 != None):\n if (m1-m2) != 0.:\n x = (b2-b1)/(m1-m2)\n y = m1*x + b1\n else:\n return (None,0)\n elif (m1 == None) and (m2 != None):\n x = b1 \n y = m2*x + b2\n elif (m1 != None) and (m2 == None):\n x = b2\n y = m1*x + b1\n else:\n return (None,0) \n \n # min and max of points. \n max_x1 = max(p1[0], p2[0])\n min_x1 = min(p1[0], p2[0])\n max_y1 = max(p1[1], p2[1])\n min_y1 = min(p1[1], p2[1])\n max_x2 = max(p3[0], p4[0])\n min_x2 = min(p3[0], p4[0])\n max_y2 = max(p3[1], p4[1])\n min_y2 = min(p3[1], p4[1])\n #check if the intersection is in bounds\n flag = 1\n if x > max_x1 or x < min_x1:\n flag = 0\n elif x > max_x2 or x < min_x2:\n flag = 0\n elif y > max_y1 or y < min_y1: \n flag = 0\n elif y > max_y2 or y < min_y2: \n flag = 0\n #check if the intersection point corresponds to an end point\n intercept = num.array([x,y])\n def _same(p1,p2,prec=0.0001):\n \"\"\" are two points the same \"\"\"\n #return num.all(num.equal(p1,p2))\n t1 = num.fabs(p1[0]-p2[0]) < prec\n t2 = num.fabs(p1[1]-p2[1]) < prec\n if t1 and t2:\n #print \"same\", p1,p2\n return True\n if flag == 1:\n if _same(intercept,p1):\n flag = 2\n elif _same(intercept,p2):\n flag = 2\n elif _same(intercept,p3):\n flag = 2\n elif _same(intercept,p4):\n flag = 2\n return (intercept,flag)", "def get_all_customer_ids():\n table = data_manager.get_table_from_file(\"sales/sales.csv\")\n return get_all_customer_ids_from_table(table)", "def cinters_segment(self, s):\r\n if self.contains_point(s.start[0], s.start[1]) == self.contains_point(s.end[0], s.end[1]):\r\n # The segment doesn't cross the contour of the polygon\r\n return None\r\n else:\r\n if self.__segments == None:\r\n self.__load_segments()\r\n \r\n for segment in self.__segments:\r\n p = segment.inters_segment(s)\r\n if p != None:\r\n return p\r\n \r\n return None", "def createLineSegment(self):\n return _libsbml.Curve_createLineSegment(self)", "def create_or_find_b2b_invoices_and_process_ept(self, row, sale_order, invoice_date, tax):\n\n vat_number = row.get('Buyer Tax Registration', False)\n invoice_number = row.get('VAT Invoice Number', False)\n\n invoices = sale_order.invoice_ids.filtered(\n lambda x: x.type == 'out_invoice' and x.state != 'cancel')\n if not invoices:\n lines = sale_order.order_line.filtered(lambda line: line.qty_to_invoice > 0)\n if not lines:\n return False\n invoices = sale_order._create_invoices()\n self.write({'invoice_ids': [(4, invoices and invoices.id)]})\n\n for invoice in invoices:\n if not invoice.partner_id.vat:\n invoice.partner_id.vat = vat_number\n\n payments_lines = []\n if invoice.invoice_payments_widget != 'false':\n payments_dict = json.loads(invoice.invoice_payments_widget)\n payments_content = payments_dict.get('content', [])\n for line in payments_content:\n payments_lines.append(line.get('payment_id', False))\n\n invoice_line = invoice.mapped('invoice_line_ids').filtered(\\\n lambda line: line.tax_ids != tax)\n if invoice_line:\n invoice.button_draft()\n invoice.write({'ref': invoice_number, 'date': invoice_date})\n\n if len(invoice_line) > 1:\n for line in invoice_line:\n line.with_context({'check_move_validity': False}).write( \\\n {'tax_ids': [(6, 0, [tax.id])]})\n else:\n invoice_line.with_context({'check_move_validity': False}).write( \\\n {'tax_ids': [(6, 0, [tax.id])]})\n\n invoice.with_context({'check_move_validity': False})._recompute_tax_lines( \\\n recompute_tax_base_amount=True)\n invoice.action_post()\n for line in payments_lines:\n invoice.js_assign_outstanding_line(line)\n\n return True", "def data_transform(self, df) :\n\n #-------------------------------------------------------------------------\n # Copy of given dataframe to be transformed\n #-------------------------------------------------------------------------\n self.df_invoice_line = df\n \n #-------------------------------------------------------------------------\n # Features issued from InvoiceDate are created\n #-------------------------------------------------------------------------\n if self.is_transform_timeFeature is True:\n self.strprint(\"\\n*** Time features transformation ***\")\n self.data_transform_timeFeature()\n\n #-------------------------------------------------------------------------\n # RFM is computed and encoded\n #-------------------------------------------------------------------------\n if self.is_transform_rfm is True:\n self.strprint(\"\\n*** RFM transformation ***\")\n self.data_transform_rfm()\n\n #-------------------------------------------------------------------------\n # NLP features issued from Description are created\n #-------------------------------------------------------------------------\n if self.is_transform_nlp is True:\n self.strprint(\"\\n*** NLP transformation ***\")\n self.data_transform_nlp()\n \n return self.df_invoice_line", "def getNumOfInvoice(self,id,start,finish):\n self.calls += 1\n invoice = self.getResponse(self.buildParams(id,start,finish))\n if not self.isNumeric(invoice):\n middle = self.diveDates(start,finish)\n plusMiddle = middle + timedelta(days = 1)\n middle = self.removeHours(middle)\n plusMiddle = self.removeHours(plusMiddle)\n invoice = self.getNumOfInvoice(id,start,middle)+\\\n self.getNumOfInvoice(id,plusMiddle,finish)\n return invoice", "def customer_id(self):\n return self._customer_id", "def IRIS_ARC_IC(input, clients):\n \n if input[clients + '_ic_auto'] == 'Y':\n global events \n Period = input['min_date'].split('T')[0] + '_' + \\\n input['max_date'].split('T')[0] + '_' + \\\n str(input['min_mag']) + '_' + str(input['max_mag'])\n eventpath = os.path.join(input['datapath'], Period)\n address = eventpath\n elif input[clients + '_ic'] != 'N':\n address = input[clients + '_ic']\n \n events, address_events = quake_info(address, 'info')\n \n for i in range(0, len(events)):\n sta_ev = read_station_event(address_events[i])\n ls_saved_stas = []\n \n for j in range(0, len(sta_ev[0])):\n if clients == sta_ev[0][j][13]:\n station_id = sta_ev[0][j][0] + '.' + sta_ev[0][j][1] + '.' + \\\n sta_ev[0][j][2] + '.' + sta_ev[0][j][3]\n ls_saved_stas.append(os.path.join(address_events[i], 'BH_RAW',\\\n station_id))\n \n print 'event: ' + str(i+1) + '/' + str(len(events)) + \\\n ' -- ' + clients\n print '------------------------------------'\n inst_correct(input, ls_saved_stas, address_events[i], clients) \n \n print \"**********************************\"\n print clients.upper() + ' Instrument Correction is DONE'\n print \"**********************************\"", "def getInvoice(self):\n return self.base.get(\"invoice\", [])", "def customer_acccounting(customer_orders):", "def action_generate_lines_txt(self):\n rp_obj = self.env['res.partner']\n voucher_obj = self.env['account.wh.iva']\n txt_iva_obj = self.env['txt.iva.line']\n vouchers = []\n txt_brw = self.browse(self._ids)[0]\n txt_ids = txt_iva_obj.search([('txt_id', '=', txt_brw.id)])\n if txt_ids:\n txt_ids.unlink()\n\n if txt_brw.type:\n vouchers = voucher_obj.search([\n ('date_ret', '>=', txt_brw.date_start),\n ('date_ret', '<=', txt_brw.date_end),\n ('period_id', '=', txt_brw.period_id.id),\n ('state', '=', 'done'),\n ('type', 'in', ['in_invoice', 'in_refund'])])\n else:\n vouchers = voucher_obj.search([\n ('date_ret', '>=', txt_brw.date_start),\n ('date_ret', '<=', txt_brw.date_end),\n ('period_id', '=', txt_brw.period_id.id),\n ('state', '=', 'done'),\n ('type', 'in', ['out_invoice', 'out_refund'])])\n\n for voucher in vouchers:\n acc_part_id = rp_obj._find_accounting_partner(voucher.partner_id)\n for voucher_lines in voucher.wh_lines:\n if voucher_lines.invoice_id.state not in ['open', 'paid']:\n continue\n for voucher_tax_line in voucher_lines.tax_line:\n txt_iva_obj.create(\n {'partner_id': acc_part_id.id,\n 'voucher_id': voucher.id,\n 'invoice_id': voucher_lines.invoice_id.id,\n 'txt_id': txt_brw.id,\n 'untaxed': voucher_tax_line.base,\n 'amount_withheld': voucher_tax_line.amount_ret,\n 'tax_wh_iva_id': voucher_tax_line.id,\n })\n return True", "def _intersection(line_points_0, line_points_1):\n u,v = line_points_0,line_points_1\n (A,B),(C,D) = line_points_0,line_points_1\n h1 = _homogenous_line(A,B)\n h2 = _homogenous_line(C,D)\n P = _intersection_homogenous(h1, h2)\n return P", "def customer_id(self) -> str:\n return self._customer_id", "def customer_id(self, customer_id: str):\n self._customer_id = customer_id", "def _compute_fp_line_ids(self): \n #Partie produit final\n list_val_fp = []\n for fp in self.wo_id.fp_draft_ids:\n if fp.type_qty == 'variable':\n need_qty = fp.efficient_unit_qty * self.quantity or self.quantity\n else:\n need_qty = fp.uom_qty\n \n vals = {\n 'wo_id': self.wo_id.id,\n 'rm_fp_draft_id': fp.id,\n 'product_id': fp.product_id.id,\n 'need_qty': need_qty,\n 'type_rm_fp': 'fp',\n 'wiz_qty': self.quantity,\n 'date': self.date,\n }\n list_val_fp.append((0, 0, vals))\n \n self.fp_line_ids = list_val_fp", "def single_customer(customer_name, customer_file):\n def add_rentals(rental_file):\n with open(customer_file) as rental_csv:\n reader = csv.reader(rental_csv)\n\n add_item = partial(add_furniture, customer_name=customer_name, invoice_file=rental_file)\n\n for row in reader:\n add_item(item_code=row[1], item_description=row[2], item_monthly_price=row[3])\n\n return add_rentals", "def _get_line_no_(obj, line):\n \n iNo = 0\n for item in obj.order_line:\n iNo += 1\n if (item.id == line.id):\n break\n \n return iNo", "def classify_incidents(in_features, date_field, report_location, repeatdist,\n spatial_bands, temporal_bands, out_lines_dir,\n out_lines_name, *args):\n try:\n # Fix for potential issue with xlsx files as report locations\n if not path.isdir(report_location):\n report_location = path.dirname(report_location)\n\n # Build sorted lists of band values\n spatial_bands = [float(b) for b in spatial_bands.split(';')]\n temporal_bands = [float(b) for b in temporal_bands.split(';')]\n\n repeatdist = float(repeatdist)\n spatial_bands.append(repeatdist)\n\n spatial_bands = list(set(spatial_bands))\n temporal_bands = list(set(temporal_bands))\n\n spatial_bands.sort()\n temporal_bands.sort()\n\n arcpy.env.overwriteOutput = True\n\n # Report run time used for file names\n now = dt.strftime(dt.now(), \"%Y-%m-%d_%H-%M-%S\")\n now_nice = dt.strftime(dt.now(), \"%Y-%m-%d %H:%M:%S\")\n\n # Check for and delete existing fields necessary for classification\n reset_fields(in_features)\n\n # Get name of OID field\n oidname = arcpy.Describe(in_features).oidFieldName\n\n # Get sorted list of unique incident date values\n with arcpy.da.SearchCursor(in_features, date_field) as rows:\n date_vals = [row[0] for row in rows]\n\n date_vals = list(set(date_vals))\n date_vals.sort()\n\n # Range of incident dates\n min_date = date_vals[0]\n max_date = date_vals[-1]\n\n # Keep track of origins and nrs\n oids = []\n nrids = []\n rids = []\n\n # Connecting line segments and table rows\n new_lines = []\n new_rows = []\n\n # Build empty dictionary to hold type tallies\n type_counts = {}\n for sband in spatial_bands:\n type_counts[sband] = {}\n for tband in temporal_bands:\n type_counts[sband][tband] = {'oids': [],\n 'nrids': [],\n 'rids': []}\n\n # Value lists for half life calculations\n all_distances = {}\n for sband in spatial_bands:\n all_distances[sband] = []\n\n all_lives = {}\n for tband in temporal_bands:\n all_lives[tband] = []\n\n found_connections = []\n\n # Build table of all records within the max spatial band of anther feature\n near_table = arcpy.GenerateNearTable_analysis(in_features, in_features, search_radius=temporal_bands[-1], closest='ALL', method='GEODESIC')\n\n # Identify and process relevent near features\n with arcpy.da.SearchCursor(near_table, field_names=['IN_FID', 'NEAR_FID', 'NEAR_DIST']) as nearrows:\n\n # Process each identified connection within the spatial bands\n for nearrow in nearrows:\n dist = nearrow[2]\n if not dist <= spatial_bands[-1]:\n continue\n\n links= []\n\n # Find the two features that are part of the connection\n where_clause = \"\"\"{} in ({},{})\"\"\".format(oidname, nearrow[0], nearrow[1])\n fields = [oidname, date_field, z_value_field, 'SHAPE@X','SHAPE@Y']\n with arcpy.da.UpdateCursor(in_features, field_names=fields, where_clause=where_clause) as cur_link:\n for feat in cur_link:\n # Calculate the z values of each incident in the pair\n zval = feat[1] - min_date\n feat[2] = zval.days\n cur_link.updateRow(feat)\n links.append([feat[0], feat[1], feat[3], feat[4], feat[2]])\n\n # Identify which feature is the oldest and id it as the source\n if links[0][1] > links[1][1]:\n oid, odate, ox, oy, oz = links[1]\n fid, fdate, fx, fy, fz = links[0]\n\n else:\n oid, odate, ox, oy, oz = links[0]\n fid, fdate, fx, fy, fz = links[1]\n\n # test for new connection\n if (oid, fid) in found_connections:\n continue\n\n # Calculate the days between the two dates\n datediff = fdate - odate\n daydiff = datediff.days\n\n # only process rows within defined temporal bands\n if daydiff > temporal_bands[-1]:\n continue\n\n # Identify the spatial bands that are covered by this relationship and create a connecting line feature\n link_found = False\n for sband in spatial_bands:\n if dist <= sband:\n for tband in temporal_bands:\n if daydiff <= tband:\n if not link_found:\n # track distances and lives for half measures\n all_distances[sband].append(dist)\n all_lives[tband].append(daydiff)\n incident_sband = sband\n incident_tband = tband\n\n link_found = True\n\n # id classification\n if oid not in type_counts[sband][tband]['oids']:\n type_counts[sband][tband]['oids'].append(oid)\n if dist <= spatial_bands[0]:\n if fid not in type_counts[sband][tband]['rids']:\n type_counts[sband][tband]['rids'].append(fid)\n elif fid not in type_counts[sband][tband]['nrids']:\n type_counts[sband][tband]['nrids'].append(fid)\n\n if link_found:\n found_connections.append((oid, fid))\n\n # create connecting line from x, y, z values of two pts\n end = arcpy.Point(X=fx, Y=fy, Z=fz)\n start = arcpy.Point(X=ox, Y=oy, Z=oz)\n vertices = arcpy.Array([start, end])\n feature = arcpy.Polyline(vertices, None, True, False)\n new_lines.append([fid, oid, dist, daydiff, incident_sband, incident_tband, feature])\n\n # Delete near table\n arcpy.Delete_management(near_table)\n\n # Create feature class for connecting lines\n sr = arcpy.Describe(in_features).spatialReference\n connectors = arcpy.CreateFeatureclass_management(out_lines_dir,\n out_lines_name,\n 'POLYLINE',\n has_z='ENABLED',\n spatial_reference=sr)\n arcpy.AddField_management(connectors, 'FEATUREID', \"LONG\")\n arcpy.AddField_management(connectors, origin_feat_field, \"LONG\")\n arcpy.AddField_management(connectors, dist_orig_field, \"FLOAT\")\n arcpy.AddField_management(connectors, 'RPTDAYS', \"FLOAT\")\n arcpy.AddField_management(connectors, spatial_band_field, \"FLOAT\")\n arcpy.AddField_management(connectors, temporal_band_field, \"FLOAT\")\n\n # Insert connecting line features from the array of values\n fields = ['FEATUREID', origin_feat_field, dist_orig_field, 'RPTDAYS', spatial_band_field, temporal_band_field, 'SHAPE@']\n with arcpy.da.InsertCursor(connectors, fields) as rows:\n for new_line in new_lines:\n rows.insertRow(new_line)\n\n # Manage classification fields\n fieldnames = []\n for sband in spatial_bands:\n for tband in temporal_bands:\n fieldnames.append('s{}t{}'.format(int(sband), int(tband)))\n\n cur_fields = [f.name for f in arcpy.ListFields(in_features)]\n for fieldname in fieldnames:\n if fieldname in cur_fields:\n arcpy.DeleteField_management(in_features, fieldname)\n arcpy.AddField_management(in_features, fieldname, 'TEXT', field_length=2)\n\n # Classify & count incidents by type\n for sband in spatial_bands:\n for tband in temporal_bands:\n band = type_counts[sband][tband]\n type_counts[sband][tband]['oids'] = [id for id in band['oids'] if id not in band['nrids'] and id not in band['rids']]\n type_counts[sband][tband]['nrids'] = [id for id in band['nrids'] if id not in band['rids']]\n\n fields = [\"OID@\", date_field, z_value_field]\n fields.extend(fieldnames)\n\n with arcpy.da.UpdateCursor(in_features, fields) as rows:\n inc_count = 0\n for row in rows:\n inc_count += 1\n\n # calc z value if missing\n if not row[2]:\n zval = row[1] - min_date\n row[2] = zval.days\n\n classifications = []\n\n for sband in spatial_bands:\n for tband in temporal_bands:\n if row[0] in type_counts[sband][tband]['nrids']:\n classifications.append('NR')\n elif row[0] in type_counts[sband][tband]['rids']:\n classifications.append('R')\n elif row[0] in type_counts[sband][tband]['oids']:\n classifications.append('O')\n else:\n classifications.append(None)\n row[3:] = classifications\n\n rows.updateRow(row)\n\n # Build empty dictionary to hold spatial and temporal band tallies\n band_counts = {}\n for sband in spatial_bands:\n band_counts[sband] = {}\n for tband in temporal_bands:\n band_counts[sband][tband] = 0\n\n for sband in spatial_bands:\n for tband in temporal_bands:\n if sband == spatial_bands[0]:\n band_counts[sband][tband] = len(type_counts[sband][tband]['rids'])\n else:\n band_counts[sband][tband] = len(type_counts[sband][tband]['nrids'])\n\n # Get unit of feature class spatial reference system\n try:\n unit = units[sr.linearUnitName]\n except KeyError:\n unit = ''\n\n # Get half-life and half-distance\n test_distances = []\n half_distances = {}\n for sband in spatial_bands:\n test_distances.extend(all_distances[sband])\n test_distances.sort()\n if len(test_distances) > 0:\n half_distances[sband] = test_distances[int(len(test_distances)/2)]\n else:\n half_distances[sband] = 'Not Calculated'\n\n test_lives = []\n half_lives = {}\n for tband in temporal_bands:\n test_lives.extend(all_lives[tband])\n test_lives.sort()\n if len(test_lives) > 0:\n half_lives[tband] = test_lives[int(len(test_lives)/2)]\n else:\n half_lives[tband] = 'Not Calculated'\n\n # Build report content\n report_header = ('Repeat and Near Repeat Incident Summary\\n'\n 'Created {}\\n'.format(now_nice))\n\n data_info = ('Data Source: {}\\n'\n 'Incident Date Range: {} - {}\\n'\n '# Incidents Processed: {}'.format(in_features, min_date, max_date, inc_count))\n\n## inc_type_reports = ''\n## console_type_rpts = ''\n##\n## for sband in spatial_bands:\n## for tband in temporal_bands:\n## cnt_o = len(type_counts[sband][tband]['oids'])\n## cnt_n = len(type_counts[sband][tband]['nrids'])\n## cnt_r = len(type_counts[sband][tband]['rids'])\n##\n## perc_o = \"{:.1f}\".format(100.0*float(cnt_o)/inc_count)\n## perc_n = \"{:.1f}\".format(100.0*float(cnt_n)/inc_count)\n## perc_r = \"{:.1f}\".format(100.0*float(cnt_r)/inc_count)\n##\n## inc_type_reports += ('Count and percentage of each type of incident in spatial band {}{} and temporal band {} days\\n'\n## ', Count, Percentage\\n'\n## 'All Incidents,{}, 100\\n'\n## 'Originators,{},{}\\n'\n## 'Near Repeats,{},{}\\n'\n## 'Repeats,{},{}\\n\\n'.format(sband, unit, tband,\n## inc_count,\n## cnt_o, perc_o,\n## cnt_n, perc_n,\n## cnt_r, perc_r))\n## console_type_rpts += ('Count and percentage of each type of incident in spatial band {}{} and temporal band {} days\\n'\n## ' Count Percentage\\n'\n## 'All Incidents {:^10} {:^13}\\n'\n## 'Originators {:^10} {:^13}\\n'\n## 'Near Repeats {:^10} {:^13}\\n'\n## 'Repeats {:^10} {:^13}\\n\\n'.format(sband, unit, tband,\n## inc_count, 100,\n## cnt_o, perc_o,\n## cnt_n, perc_n,\n## cnt_r, perc_r))\n\n half_lives_str = 'Estimated incident half-life\\n'\n half_lives_str_console = 'Estimated incident half-life\\n'\n for tband in temporal_bands:\n half_lives_str += '{} days temporal band, {:.1f} days\\n'.format(tband, half_lives[tband])\n half_lives_str_console += '{} days temporal band: {:.1f} days\\n'.format(tband, half_lives[tband])\n\n half_distance_str = 'Estimated incident half-distance\\n'\n half_distance_str_console = 'Estimated incident half-distance\\n'\n for sband in spatial_bands[1:]:\n half_distance_str += '{0} {1} spatial band, {2:.1f} {1}\\n'.format(sband, unit, half_distances[sband])\n half_distance_str_console += '{0} {1} spatial band: {2:.1f} {1}\\n'.format(sband, unit, half_distances[sband])\n\n temp_band_strs = [\"<={} days\".format(b) for b in temporal_bands]\n temporal_band_labels = ','.join(temp_band_strs)\n console_tband_labels = ' '.join(['{:^12}'.format(bnd) for bnd in temp_band_strs])\n\n counts_title = 'Number of Repeat and Near-Repeat incidents per spatial and temporal band\\n'\n percent_title = 'Percentage of all incidents classified as Repeat or Near-Repeat and appearing in each spatial and temporal band\\n'\n\n counts_header = ',{}\\n'.format(temporal_band_labels)\n console_counts_header = ' {}'.format(console_tband_labels)\n\n percent_header = ',{}\\n'.format(temporal_band_labels)\n console_perc_header = ' {}'.format(console_tband_labels)\n\n counts_table = \"\"\n percent_table = \"\"\n console_count = \"\"\n console_perc = \"\"\n\n row_sum = [0 for tband in temporal_bands]\n\n for sband in spatial_bands:\n\n # get temporal bands and their incident counts\n vals = [band_counts[sband][tband] for tband in temporal_bands]\n\n # Get spatial band count in each temporal band\n # Sums include counts from smaller bands\n## row_counts = [vals[tband] for tband in temporal_bands]\n## try:\n## row_sums = [sum(row_counts[0:i]) for i in xrange(1,len(row_counts)+1)]\n## except:\n## row_sums = [sum(row_counts[0:i]) for i in range(1,len(row_counts)+1)]\n##\n## row_sum = [x + y for (x, y) in zip(row_sums, row_sum)]\n row_perc = [100.0 * float(val)/inc_count for val in vals]\n\n # append counts & percentages to the table\n if sband == spatial_bands[0]:\n counts_table += '<={} {},{}\\n'.format(sband, unit, ','.join([str(cnt) for cnt in vals]))\n console_count += '{:>25} {}\\n'.format('<={} {}'.format(sband, unit), ' '.join(['{:^12}'.format(cnt) for cnt in vals]))\n percent_table += '<={} {},{}\\n'.format(sband, unit, ','.join([\"{:.1f}\".format(prc) for prc in row_perc]))\n console_perc += '{:>25} {}\\n'.format('<={} {}'.format(sband, unit), ' '.join(['{:^12}'.format(\"{:.1f}\".format(prc)) for prc in row_perc]))\n else:\n counts_table += '>{} to {} {},{}\\n'.format(spatial_bands[0], sband, unit, ','.join([str(cnt) for cnt in vals]))\n console_count += '{:>25} {}\\n'.format('>{} to {} {}'.format(spatial_bands[0], sband, unit), ' '.join(['{:^12}'.format(cnt) for cnt in vals]))\n percent_table += '>{} to {} {},{}\\n'.format(spatial_bands[0], sband, unit, ','.join([\"{:.1f}\".format(prc) for prc in row_perc]))\n console_perc += '{:>25} {}\\n'.format('>{} to {} {}'.format(spatial_bands[0], sband, unit), ' '.join(['{:^12}'.format(\"{:.1f}\".format(prc)) for prc in row_perc]))\n\n # Write report\n reportname = path.join(report_location, \"{}_{}.csv\".format('Summary', now))\n with open(reportname, 'w') as report:\n\n report.write(report_header)\n report.write('\\n')\n report.write(data_info)\n report.write('\\n')\n report.write(half_distance_str)\n report.write('\\n')\n report.write(half_lives_str)\n report.write('\\n')\n## report.write(inc_type_reports)\n report.write(counts_title)\n report.write(counts_header)\n report.write(counts_table)\n report.write('\\n')\n report.write(percent_title)\n report.write(percent_header)\n report.write(percent_table)\n\n arcpy.SetParameterAsText(9, path.join(out_lines_dir, out_lines_name))\n arcpy.AddMessage(\"\\nView incident summary report: {}\\n\".format(reportname))\n\n arcpy.AddMessage(report_header)\n arcpy.AddMessage('')\n arcpy.AddMessage(data_info)\n arcpy.AddMessage('')\n arcpy.AddMessage(half_distance_str_console)\n arcpy.AddMessage('')\n arcpy.AddMessage(half_lives_str_console)\n arcpy.AddMessage('')\n## arcpy.AddMessage(console_type_rpts)\n arcpy.AddMessage(counts_title)\n arcpy.AddMessage(console_counts_header)\n arcpy.AddMessage(console_count)\n arcpy.AddMessage('')\n arcpy.AddMessage(percent_title)\n arcpy.AddMessage(console_perc_header)\n arcpy.AddMessage(console_perc)\n\n## print(\"\\nView incident summary report: {}\\n\".format(reportname))\n##\n## print(report_header)\n## print('')\n## print(data_info)\n## print('')\n## print(half_distance_str_console)\n## print('')\n## print(half_lives_str_console)\n## print('')\n#### arcpy.AddMessage(console_type_rpts)\n## print(counts_title)\n## print(console_counts_header)\n## print(console_count)\n## print('')\n## print(percent_title)\n## print(console_perc_header)\n## print(console_perc)\n\n except arcpy.ExecuteError:\n # Get the tool error messages\n msgs = arcpy.GetMessages()\n arcpy.AddError(msgs)\n print(msgs)\n\n except:\n # Return error messages for use in script tool or Python Window\n arcpy.AddError(str(sys.exc_info()[1]))\n\n # Print Python error messages for use in Python / Python Window\n print(str(sys.exc_info()[1]) + \"\\n\")", "def _prepare_invoice_grp(self, cr, uid, order, line_ids, context=None):\n if context is None:\n context = {}\n context = dict(context)\n\n inv_data = super(grp_orden_compra, self)._prepare_invoice_grp(cr, uid, order, line_ids, context=context)\n\n # adicionando campos numero compromiso y no obligacion desde la OC\n monto_oc = math.floor(order.total_llavep or 0)\n monto_oc = int(monto_oc)\n inv_data.update({'nro_compromiso': order.nro_compromiso or False, 'monto_comprometido': monto_oc or 0, 'currency_id':order.currency_oc.id})\n\n # adicionando campos no afectacion y monto autorizado desde la primera APG\n if order.pc_apg_id:\n first_apg = order.pc_apg_id\n monto_apg = math.floor(first_apg.total_llavep)\n monto_apg = int(monto_apg)\n # TODO R SPRING X ADICIONANDO CABEZALES SIIF A LA FACTURA A PARTIR DE LA APG\n inv_data.update({'nro_afectacion': first_apg.nro_afectacion_siif or False,\n 'monto_afectado': monto_apg or 0,\n 'siif_tipo_ejecucion':first_apg.siif_tipo_ejecucion.id,\n 'siif_concepto_gasto':first_apg.siif_concepto_gasto.id,\n 'siif_financiamiento':first_apg.siif_financiamiento.id,\n 'siif_codigo_sir':first_apg.siif_codigo_sir.id,\n 'siif_nro_fondo_rot':first_apg.siif_nro_fondo_rot.id,\n }) # cambiando nro_afectacion 23/10\n # inv.update({'nro_afectacion': first_apg.nro_afectacion_apg or False, 'monto_afectado': monto_apg or 0})\n\n # # TODO R SPRING X NO LLEVAR LAS LLAVES PRESUPUESTALES POR DEFECTO\n # if order.pc_apg_id.llpapg_ids:\n # llavep_ids = []\n # for llavep in order.pc_apg_id.llpapg_ids:\n # llavep_ids.append((0, 0, {\n # 'programa_id': llavep.programa_id.id,\n # 'odg_id': llavep.odg_id.id,\n # 'auxiliar_id': llavep.auxiliar_id.id,\n # 'disponible': llavep.disponible,\n # 'proyecto_id': llavep.proyecto_id.id,\n # 'fin_id': llavep.fin_id.id,\n # 'mon_id': llavep.mon_id.id,\n # 'tc_id': llavep.tc_id.id,\n # 'importe': llavep.importe\n # }))\n # inv_data.update({'llpapg_ids': llavep_ids})\n\n return inv_data", "def intersection(line1, line2):\n p0, p1, p2, p3 = map(\n lambda tup : np.array(tup[:2]),\n [line1[0], line1[1], line2[0], line2[1]]\n )\n p1, p2, p3 = map(lambda x : x - p0, [p1, p2, p3])\n transform = np.zeros((2, 2))\n transform[:,0], transform[:,1] = p1, p2\n if np.linalg.det(transform) == 0: return\n inv = np.linalg.inv(transform)\n new_p3 = np.dot(inv, p3.reshape((2, 1)))\n #Where does line connecting (0, 1) to new_p3 hit x axis\n x_intercept = new_p3[0] / (1 - new_p3[1]) \n result = np.dot(transform, [[x_intercept], [0]])\n result = result.reshape((2,)) + p0\n return result", "def _get_point_line(self):\n user = self.env.user\n user_company = user.partner_id.parent_id\n if user.has_group('loyalty.group_merchant_admin') or user.has_group('loyalty.group_merchant_user'):\n \n # Get Other merchant points which are grouped (i.e is_grouped=True)\n group_merchant_ids = self.env['loyalty.group']._get_merchants_group_partners(merchant=user_company)\n other_merchant_group_earned_point_line_ids = self.env['loyalty.points.history.purchase.lines'].search([('is_group','=',True),('merchant_id','in',group_merchant_ids)])\n other_merchant_point_history_ids = [point_line.merchant_point_history_id.id for point_line in other_merchant_group_earned_point_line_ids]\n \n # Get current merchants all points\n current_merchant_point_history_ids = self.env['loyalty.points.history'].search([('merchant_id','=',user_company.id)]).ids\n\n all_point_history_ids = list(set(other_merchant_point_history_ids + current_merchant_point_history_ids))\n return [('id','in',all_point_history_ids)]\n\n else: \n # If not Merchant admin or merchant user then show all points \n return []", "def __init__(self, currency_code=None, client_id=None, show_history=False, show_outstanding_invoices=False, response_limit=10, end_date=None, history_frequency_interval='month', currency_conversion=None, start_date=None, future_due_periods=None, overdue_periods=None, business_id=None, invoice_ids=None, customer_id=None, _configuration=None): # noqa: E501 # noqa: E501\n if _configuration is None:\n _configuration = Configuration()\n self._configuration = _configuration\n\n self._currency_code = None\n self._client_id = None\n self._show_history = None\n self._show_outstanding_invoices = None\n self._response_limit = None\n self._end_date = None\n self._history_frequency_interval = None\n self._currency_conversion = None\n self._start_date = None\n self._future_due_periods = None\n self._overdue_periods = None\n self._business_id = None\n self._invoice_ids = None\n self._customer_id = None\n self.discriminator = None\n\n if currency_code is not None:\n self.currency_code = currency_code\n if client_id is not None:\n self.client_id = client_id\n if show_history is not None:\n self.show_history = show_history\n if show_outstanding_invoices is not None:\n self.show_outstanding_invoices = show_outstanding_invoices\n if response_limit is not None:\n self.response_limit = response_limit\n if end_date is not None:\n self.end_date = end_date\n if history_frequency_interval is not None:\n self.history_frequency_interval = history_frequency_interval\n if currency_conversion is not None:\n self.currency_conversion = currency_conversion\n if start_date is not None:\n self.start_date = start_date\n if future_due_periods is not None:\n self.future_due_periods = future_due_periods\n if overdue_periods is not None:\n self.overdue_periods = overdue_periods\n if business_id is not None:\n self.business_id = business_id\n if invoice_ids is not None:\n self.invoice_ids = invoice_ids\n if customer_id is not None:\n self.customer_id = customer_id", "def get_line_nr(view, point):\n return view.rowcol(point)[0] + 1", "def fetch_customer_info_identities(self, client_id):\n\n try:\n return self._make_private_api_request(\n method=PyttributionIo.GET_REQUEST,\n endpoint='customers',\n subject_id=client_id,\n show_identities='true'\n ).get('customer')\n except RequestException as e:\n logger.error('Pyttribution.io: Retrieval of customer identities failed with HTTP status {exception}'.format(\n exception=e))", "def _IC(df): # Balance\n # No adjustments for cash-flow and off-balance sheet yet\n return Invested_Book_Capital(df)", "def from_invoice_and_line_item(cls, invoice: InvoiceModel, line_item: LineItemModel, line_number: int,\n distribution: str):\n # Note the invoice_date should be the payment_date in the future.\n return cls(total=line_item.total, invoice_number=invoice.id,\n line_number=line_number,\n is_reversal=invoice.invoice_status_code in\n [InvoiceStatus.REFUNDED.value, InvoiceStatus.REFUND_REQUESTED.value],\n distribution=distribution)", "def intersection_line_line(ab, cd):\n a, b = ab\n c, d = cd\n\n line_vector_1 = vector_from_points(a, b)\n line_vector_2 = vector_from_points(c, d)\n d_vector = cross_vectors(line_vector_1, line_vector_2)\n\n normal_1 = cross_vectors(line_vector_1, d_vector)\n normal_2 = cross_vectors(line_vector_2, d_vector)\n plane_1 = (a, normal_1)\n plane_2 = (c, normal_2)\n\n intx_point_line_1 = intersection_line_plane(ab, plane_2)\n intx_point_line_2 = intersection_line_plane(cd, plane_1)\n\n return [intx_point_line_1, intx_point_line_2]", "def find_line_model(points):\n\n # [WARNING] vertical and horizontal lines should be treated differently\n # here we just add some noise to avoid division by zero\n\n # find a line model for these points\n m = (points[1, 1] - points[0, 1]) / (\n points[1, 0] - points[0, 0] + sys.float_info.epsilon) # slope (gradient) of the line\n c = points[1, 1] - m * points[1, 0] # y-intercept of the line\n\n return m, c", "def invoice(self, id):\r\n return Invoice(self, id)", "def parallelogram_vertices_from_grouped_lines(lines):\n if len(lines) > 2:\n raise Exception(\"parallelogram finder \\\n called with too many lines\")\n c_1 = lines[0]\n c_2 = lines[1]\n intercepts = None\n for l1, l2 in list(zip(c_1, c_2)) + list(zip(c_1, c_2[::-1])):\n x = solve_for_intersection(np.array([l1, l2]))\n if intercepts is None:\n intercepts = np.array([x])\n else:\n intercepts = np.vstack((intercepts, x))\n return intercepts", "def test_invoice_detail(self):\n # first we create a customer\n id = self._create_model(\"customer\", self.customer_data, [\"name\", \"email\", \"phone\"])\n if id:\n # then we can create the invoice\n data = self.invoice_data\n data[\"customer_id\"] = id\n id_inv = self._create_model(\"invoice\", data, [])\n if id_inv:\n # then performing detail\n self._detail_model(\"invoice\", self.invoice_data, id, [])\n self.assertIsNotNone(id_inv)\n self.assertIsNotNone(id)", "def test_invoice_delete(self):\n # first we create a customer\n id = self._create_model(\"customer\", self.customer_data, [\"name\", \"email\", \"phone\"])\n if id:\n # then we can create the invoice\n data = self.invoice_data\n data[\"customer_id\"] = id\n id_inv = self._create_model(\"invoice\", data, [])\n if id_inv:\n # then performe delete\n self._delete_model(\"invoice\", id_inv)\n self.assertIsNotNone(id_inv)\n self.assertIsNotNone(id)", "def draw_header(canvas, invoice):\n\n canvas.setLineWidth(2)\n canvas.line(2 * cm, -4 * cm, 19 * cm, -4 * cm)\n \"\"\" Draws the business address \"\"\"\n business_details = settings.BUSINESS_DETAIL\n business_data = []\n for line in business_details:\n business_data.append([line])\n\n table = Table(business_data, colWidths=[17 * cm], rowHeights=[15, 17, 11, 11, 11, 11, 11])\n table.setStyle([\n ('FONT', (0, 0), (-1, -1), 'Helvetica-Oblique'),\n ('FONTSIZE', (0, 0), (0, 0), 14),\n ('FONTSIZE', (0, 1), (0, -1), 6),\n ('TEXTCOLOR', (0, 0), (-1, -1), (0.2, 0.2, 0.2)),\n ('ALIGN', (0, 0), (-1, -1), 'CENTER'),\n ('BACKGROUND', (0, 0), (-1, -1), (0.95, 0.95,0.95)),\n ])\n tw, th, = table.wrapOn(canvas, 2 * cm, 19 * cm)\n table.drawOn(canvas, 2 * cm, -4 * cm)", "def features_orders(df_customers, df_receipts):\n df_customers.sort_values(by=['customerId2'], ascending=[True], inplace=True)\n # total amount of all the orders of a cusrtomer\n df_customers = _add_column(df_customers, df_receipts.groupby('customerId2')['price'].sum(), 'sum_orders')\n # the min amount paid in one receipt by a customer\n df_customers = _add_column(df_customers,\n df_receipts.groupby(['customerId2', 'receiptId'], as_index=False)[\n 'price'].sum().groupby('customerId2').min()['price'], 'min_order')\n # the mean amount paid per receipt by a customer\n df_customers = _add_column(df_customers,\n df_receipts.groupby(['customerId2', 'receiptId'], as_index=False)['price'].sum()\n .groupby('customerId2').mean()['price'], 'mean_order')\n # the max amount paid per receipt by a customer\n df_customers = _add_column(df_customers,\n df_receipts.groupby(['customerId2', 'receiptId'], as_index=False)['price'].sum()\n .groupby('customerId2').max()['price'], 'max_order')\n # the number of orders\n df_customers = _add_column(df_customers,\n df_receipts.groupby(['customerId2', 'receiptId'], as_index=False)['price'].sum()\n .groupby('customerId2').count()['price'], 'count_orders')\n\n # the total amount of items bought by a user\n df_customers = _add_column(df_customers,\n df_receipts.groupby('customerId2')['itemQty'].sum(), 'sum_itemQty')\n # the min amount of items bought by a user in a receipt\n df_customers = _add_column(df_customers,\n df_receipts.groupby(['customerId2', 'receiptId'], as_index=False)['itemQty'].sum()\n .groupby('customerId2').min()['itemQty'], 'min_itemQty')\n # the mean amount of items bought by a user in a receipt\n df_customers = _add_column(df_customers,\n df_receipts.groupby(['customerId2', 'receiptId'], as_index=False)['itemQty'].sum()\n .groupby('customerId2').mean()['itemQty'], 'mean_itemQty')\n # the max amount of items bought by a user in a receipt\n df_customers = _add_column(df_customers,\n df_receipts.groupby(['customerId2', 'receiptId'], as_index=False)['itemQty'].sum()\n .groupby('customerId2').max()['itemQty'], 'max_itemQty')\n # from which dividion type a user buys\n for i in DIVISIONID_VALS:\n k = 'divisionId_' + str(i)\n df_receipts[k] = np.where(df_receipts['divisionId'] == i, 1, 0)\n df_customers = _add_column(df_customers, df_receipts.groupby('customerId2')[k].sum(), k)\n # which source type a user uses to pay\n for i in SOURCEID_VALS:\n k = 'sourceId_' + str(i)\n df_receipts[k] = np.where(df_receipts['sourceId'] == i, 1, 0)\n df_customers = _add_column(df_customers, df_receipts.groupby('customerId2')[k].sum(), k)\n\n logger.info(\"Features from the returns table: {shape} {dtypes}\"\n .format(shape=df_customers.shape, dtypes=df_customers.dtypes))\n return df_customers" ]
[ "0.7056929", "0.62926954", "0.61490464", "0.6014562", "0.59545195", "0.5511189", "0.54317766", "0.5338872", "0.53378683", "0.53256035", "0.5227046", "0.5198791", "0.5182063", "0.51410466", "0.5138717", "0.51151025", "0.51103526", "0.51066583", "0.5078583", "0.5048637", "0.50242376", "0.5010777", "0.49733216", "0.49539632", "0.4936336", "0.49357", "0.49330315", "0.49115035", "0.49027282", "0.49017695", "0.48389542", "0.48368657", "0.48345527", "0.48275456", "0.48229194", "0.4799105", "0.47872075", "0.4782573", "0.4782573", "0.4775893", "0.47524828", "0.4732034", "0.47293898", "0.4704198", "0.46989033", "0.46829024", "0.4677983", "0.46760103", "0.46715716", "0.46690926", "0.46684384", "0.46370918", "0.46364218", "0.46264634", "0.4616163", "0.4612672", "0.46035504", "0.46025205", "0.45997328", "0.4597682", "0.45976076", "0.45975327", "0.45960402", "0.45859236", "0.45839322", "0.45681518", "0.4548424", "0.45418814", "0.45380357", "0.4537519", "0.45349932", "0.45300868", "0.45225722", "0.4513522", "0.45095465", "0.45087594", "0.45050418", "0.45017108", "0.4500596", "0.44974196", "0.44854745", "0.4475004", "0.44715238", "0.4465836", "0.4462766", "0.4461851", "0.44521704", "0.4451742", "0.44501662", "0.44497222", "0.44494802", "0.44487467", "0.44459993", "0.44438118", "0.44392478", "0.4433689", "0.44262156", "0.4425119", "0.4417406", "0.44105333" ]
0.81526893
0
This function creates an invoice compounding invoices lines from data given as parameters. Once done, this function computes market segment customer belongs to. If customerID is None, then a new customer identifier is created before order process to take place.
def order_process(self, customerID, list_stockCode, list_quantity\ , orderDate=None): segmentID = -1 #------------------------------------------------------------------------- # A new customer is created and inserted into data-set. #------------------------------------------------------------------------- if customerID is None: customerID = int(self.createCustomerID()) else: pass #------------------------------------------------------------------------- # A new dataframe with new invoice lines are created. #------------------------------------------------------------------------- df_invoice_line = self.create_customer_df_invoice_line(customerID\ , list_stockCode, list_quantity, orderDate) #------------------------------------------------------------------------- # Original dataframe is updated with customer invoices lines. #------------------------------------------------------------------------- print("order_process : shape before concat= "+str(self._df_invoice_original.shape)) self._df_invoice_original \ = pd.concat([self._df_invoice_original, df_invoice_line], axis=0) print("order_process : shape after concat= "+str(self._df_invoice_original.shape)) #------------------------------------------------------------------------- # All invoices lines (including new one) related to customer is retrieved # from original dataframe. #------------------------------------------------------------------------- df_invoice_line_customer \ = self.get_customer_history_df_invoice_line(customerID) #------------------------------------------------------------------------- # When calling get_customer_marketSegment(), df_invoice_line_customer is # concatened to the original dataframe. #------------------------------------------------------------------------- segmentID = self.get_customer_marketSegment(df_invoice_line_customer) return segmentID, customerID
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_customer_df_invoice_line(self, customerID, list_stockCode\\\n , list_quantity, invoiceDate):\n \n dict_invoice = dict()\n\n dict_invoice['Quantity'] = list_quantity\n dict_invoice['StockCode'] = list_stockCode\n\n #------------------------------------------------------------------------\n # Build invoiceDate from local current time\n #------------------------------------------------------------------------\n if invoiceDate is None:\n time_struct = time.localtime()\n invoiceDate = str(time_struct.tm_year)+'-'+str(time_struct.tm_mon)\\\n +'-'+str(time_struct.tm_mday)\n invoiceDate +=' '\n invoiceDate +=str(time_struct.tm_hour)+':'+str(time_struct.tm_min)\\\n +':'+str(time_struct.tm_sec)\n invoiceDate = pd.Timestamp(invoiceDate)\n else:\n pass\n\n\n #------------------------------------------------------------------------\n # Lists initialization\n #------------------------------------------------------------------------\n list_customerID = list()\n list_invoiceNo = list()\n list_invoiceDate = list()\n list_invoice_line_index = list()\n \n #------------------------------------------------------------------------\n # Increase Invoice number\n #------------------------------------------------------------------------\n invoiceNo = max(self._df_invoice_original.InvoiceNo)\n invoiceNo += 1\n\n #------------------------------------------------------------------------\n # Get latest invoice line index value\n #------------------------------------------------------------------------\n invoice_line_index = max(self._df_invoice_original.index)\n\n #------------------------------------------------------------------------\n # Build lists for CustomerID, InvoiceNo, InvoiceDate\n # A list of incremented indexes is built for new rows.\n #------------------------------------------------------------------------\n for quantity in list_quantity:\n list_customerID.append(customerID)\n list_invoiceNo.append(invoiceNo)\n list_invoiceDate.append(invoiceDate)\n invoice_line_index += 1\n list_invoice_line_index.append(invoice_line_index) \n\n \n dict_invoice['CustomerID'] = list_customerID\n dict_invoice['InvoiceNo'] = list_invoiceNo\n dict_invoice['InvoiceDate'] = list_invoiceDate\n\n #------------------------------------------------------------------------\n # Get description list from list of stock codes.\n #------------------------------------------------------------------------\n list_description = self.getDescriptionList(list_stockCode)\n \n dict_invoice['Description'] = list_description\n\n #------------------------------------------------------------------------\n # Get unit price list from list of stock codes.\n #------------------------------------------------------------------------\n list_unitPrice = self.getUnitPriceList(list_stockCode)\n \n dict_invoice['UnitPrice'] = list_unitPrice\n\n #------------------------------------------------------------------------\n # Dataframe with new invoices lines is created.\n #------------------------------------------------------------------------\n df_invoice_line \\\n = pd.DataFrame(dict_invoice, columns=dict_invoice.keys()\\\n , index=list_invoice_line_index)\n \n return df_invoice_line", "def _create_invoice(self):\n self.ensure_one()\n partner = self.member_id.partner_id\n invoice = self.env['account.invoice'].create({\n 'partner_id': partner.id,\n 'account_id': partner.property_account_receivable_id.id,\n 'fiscal_position_id': partner.property_account_position_id.id\n })\n for line in self.line_ids:\n product = line.activity_id.product_id\n # Handling of invoice lines : needs cache record for onchange, then\n # real writing...\n invoice_line = self.env['account.invoice.line'].new({\n 'product_id': product.id,\n 'invoice_id': invoice.id\n })\n invoice_line._onchange_product_id()\n line_values = dict(invoice_line._cache)\n line_values['price_unit'] = line.price\n invoice_line = self.env['account.invoice.line'].create(line_values)\n invoice.compute_taxes()\n line.registration_id.invoice_line_id = invoice_line.id\n return invoice", "def get_customer_marketSegment(self, df_invoice_line_customer):\n #-------------------------------------------------------------------------\n # Building data model \n #-------------------------------------------------------------------------\n self.data_transform(df_invoice_line_customer)\n\n #-------------------------------------------------------------------------\n # Customer features are built thanks to transformers.\n #-------------------------------------------------------------------------\n self.df_customers_features_build()\n \n #-------------------------------------------------------------------------\n # Customer market segment is predicted\n #-------------------------------------------------------------------------\n X_test = self._df_customers.values\n y_pred = self._classifier_model.predict(X_test)\n segmentID = y_pred[0]\n \n return segmentID", "def action_invoice_create(self, grouped=False, final=False):\n if self.invoice_option == 'before_delivery':\n inv_obj = self.env['account.invoice']\n for order in self:\n inv_data = order._prepare_invoice()\n invoice = inv_obj.create(inv_data)\n for inv_line in order.order_line:\n inv_line.invoice_line_create(invoice.id, inv_line.product_uom_qty)\n\n else:\n inv_obj = self.env['account.invoice']\n precision = self.env['decimal.precision'].precision_get('Product Unit of Measure')\n invoices = {}\n references = {}\n invoices_origin = {}\n invoices_name = {}\n\n # Keep track of the sequences of the lines\n # To keep lines under their section\n inv_line_sequence = 0\n for order in self:\n group_key = order.id if grouped else (order.partner_invoice_id.id, order.currency_id.id)\n\n # We only want to create sections that have at least one invoiceable line\n pending_section = None\n\n # Create lines in batch to avoid performance problems\n line_vals_list = []\n # sequence is the natural order of order_lines\n for line in order.order_line:\n if line.display_type == 'line_section':\n pending_section = line\n continue\n if float_is_zero(line.qty_to_invoice, precision_digits=precision):\n continue\n if group_key not in invoices:\n inv_data = order._prepare_invoice()\n invoice = inv_obj.create(inv_data)\n references[invoice] = order\n invoices[group_key] = invoice\n invoices_origin[group_key] = [invoice.origin]\n invoices_name[group_key] = [invoice.name]\n elif group_key in invoices:\n if order.name not in invoices_origin[group_key]:\n invoices_origin[group_key].append(order.name)\n if order.client_order_ref and order.client_order_ref not in invoices_name[group_key]:\n invoices_name[group_key].append(order.client_order_ref)\n\n if line.qty_to_invoice > 0 or (line.qty_to_invoice < 0 and final):\n if pending_section:\n section_invoice = pending_section.invoice_line_create_vals(\n invoices[group_key].id,\n pending_section.qty_to_invoice\n )\n inv_line_sequence += 1\n section_invoice[0]['sequence'] = inv_line_sequence\n line_vals_list.extend(section_invoice)\n pending_section = None\n\n inv_line_sequence += 1\n inv_line = line.invoice_line_create_vals(\n invoices[group_key].id, line.qty_to_invoice\n )\n inv_line[0]['sequence'] = inv_line_sequence\n line_vals_list.extend(inv_line)\n\n if references.get(invoices.get(group_key)):\n if order not in references[invoices[group_key]]:\n references[invoices[group_key]] |= order\n\n self.env['account.invoice.line'].create(line_vals_list)\n\n for group_key in invoices:\n invoices[group_key].write({'name': ', '.join(invoices_name[group_key]),\n 'origin': ', '.join(invoices_origin[group_key])})\n sale_orders = references[invoices[group_key]]\n if len(sale_orders) == 1:\n invoices[group_key].reference = sale_orders.reference\n\n if not invoices:\n raise UserError(_(\n 'There is no invoiceable line. If a product has a Delivered quantities invoicing policy, please make sure that a quantity has been delivered.'))\n\n for invoice in invoices.values():\n invoice.compute_taxes()\n if not invoice.invoice_line_ids:\n raise UserError(_(\n 'There is no invoiceable line. If a product has a Delivered quantities invoicing policy, please make sure that a quantity has been delivered.'))\n # If invoice is negative, do a refund invoice instead\n if invoice.amount_total < 0:\n invoice.type = 'out_refund'\n for line in invoice.invoice_line_ids:\n line.quantity = -line.quantity\n # Use additional field helper function (for account extensions)\n for line in invoice.invoice_line_ids:\n line._set_additional_fields(invoice)\n # Necessary to force computation of taxes. In account_invoice, they are triggered\n # by onchanges, which are not triggered when doing a create.\n invoice.compute_taxes()\n # Idem for partner\n so_payment_term_id = invoice.payment_term_id.id\n fp_invoice = invoice.fiscal_position_id\n invoice._onchange_partner_id()\n invoice.fiscal_position_id = fp_invoice\n # To keep the payment terms set on the SO\n invoice.payment_term_id = so_payment_term_id\n invoice.message_post_with_view('mail.message_origin_link',\n values={'self': invoice, 'origin': references[invoice]},\n subtype_id=self.env.ref('mail.mt_note').id)\n return [inv.id for inv in invoices.values()]", "def action_create_invoices(self, data):\n invoice_obj = self.env['account.invoice']\n values = {}\n for val in data:\n values.setdefault(val['invoice_type'], {\n 'order': val.get('sale', val.get('purchase')),\n 'values': []\n })\n values[val['invoice_type']]['values'].append((0, 0, val['values']))\n\n for inv_type, inv_data in values.items():\n invoice = invoice_obj.new(self._prepare_invoice(inv_type))\n invoice._onchange_partner_id()\n inv = invoice._convert_to_write({\n name: invoice[name] for name in invoice._cache\n })\n for _, _, line in inv_data['values']:\n line['account_id'] = inv['account_id']\n inv['invoice_line_ids'] = inv_data['values']\n new_invoice = invoice_obj.sudo().create(inv)\n new_invoice.action_invoice_open()\n inv_data['order'].write({\n 'exchange_invoice_ids': [(4, new_invoice.id)]\n })", "def _prepare_invoice(self, cr, uid, order, lines, context=None):\n if context is None:\n context = {}\n journal_id = self.pool['account.invoice'].default_get(cr, uid, ['journal_id'], context=context)['journal_id']\n if not journal_id:\n raise osv.except_osv(_('Error!'),\n _('Please define sales journal for this company: \"%s\" (id:%d).') % (order.company_id.name, order.company_id.id))\n invoice_vals = {\n 'name': order.client_order_ref or '',\n 'origin': order.name,\n 'type': 'out_invoice',\n 'reference': order.client_order_ref or order.name,\n 'account_id': order.partner_invoice_id.property_account_receivable.id,\n 'partner_id': order.partner_invoice_id.id,\n 'journal_id': journal_id,\n 'invoice_line': [(6, 0, lines)],\n 'currency_id': order.pricelist_id.currency_id.id,\n 'comment': order.note,\n 'payment_term': order.payment_term and order.payment_term.id or False,\n 'fiscal_position': order.fiscal_position.id or order.partner_invoice_id.property_account_position.id,\n 'date_invoice': context.get('date_invoice', False),\n 'company_id': order.company_id.id,\n 'user_id': order.user_id and order.user_id.id or False,\n 'section_id' : order.section_id.id,\n 'test_1' :order.test\n }\n\n # Care for deprecated _inv_get() hook - FIXME: to be removed after 6.1\n invoice_vals.update(self._inv_get(cr, uid, order, context=context))\n return invoice_vals", "def _prepare_invoice(self, cr, uid, order, lines, context=None):\n if context is None:\n context = {}\n journal_ids = self.pool.get('account.journal').search(cr, uid,\n [('type', '=', 'sale'), ('company_id', '=', order.company_id.id)],\n limit=1)\n if not journal_ids:\n raise osv.except_osv(_('Error !'),\n _('There is no sales journal defined for this company: \"%s\" (id:%d)') % (order.company_id.name, order.company_id.id))\n\n invoice_vals = {\n 'name': order.client_order_ref or '',\n 'origin': order.name,\n 'type': 'out_invoice',\n 'reference': order.client_order_ref or order.name,\n 'account_id': order.partner_id.property_account_receivable.id,\n 'journal_id': order.partner_id.property_default_sale_invoice_journal.id,\n 'partner_id': order.partner_id.id,\n 'address_invoice_id': order.partner_invoice_id.id,\n #'address_contact_id': order.partner_order_id.id,\n 'invoice_line': [(6, 0, lines)],\n 'currency_id': order.pricelist_id.currency_id.id,\n 'comment': order.note,\n 'payment_term': order.payment_term and order.payment_term.id or False,\n 'fiscal_position': order.fiscal_position.id or order.partner_id.property_account_position.id,\n 'date_invoice': context.get('date_invoice', False),\n 'company_id': order.company_id.id,\n 'user_id': order.user_id and order.user_id.id or False\n }\n\n # Care for deprecated _inv_get() hook - FIXME: to be removed after 6.1\n invoice_vals.update(self._inv_get(cr, uid, order, context=context))\n\n return invoice_vals", "def action_invoice_create(self, grouped=False, final=False):\n inv_obj = self.env['account.invoice']\n precision = self.env['decimal.precision'].sudo().precision_get('Product Unit of Measure')\n invoices = {}\n references = {}\n for order in self:\n group_key = order.id if grouped else (order.partner_invoice_id.id, order.currency_id.id)\n for line in order.order_line.sorted(key=lambda l: l.qty_to_invoice < 0):\n if float_is_zero(line.qty_to_invoice, precision_digits=precision):\n continue\n if group_key not in invoices:\n inv_data = order._prepare_invoice()\n invoice = inv_obj.sudo().create(inv_data)\n references[invoice] = order\n invoices[group_key] = invoice\n invoice['sale_order_id'] = order.id\n elif group_key in invoices:\n vals = {}\n if order.name not in invoices[group_key].origin.split(', '):\n vals['origin'] = invoices[group_key].origin + ', ' + order.name\n if order.client_order_ref and order.client_order_ref not in invoices[group_key].name.split(\n ', ') and order.client_order_ref != invoices[group_key].name:\n vals['name'] = invoices[group_key].name + ', ' + order.client_order_ref\n invoices[group_key].sudo().write(vals)\n if line.qty_to_invoice > 0:\n line.invoice_line_create(invoices[group_key].id, line.qty_to_invoice)\n elif line.qty_to_invoice < 0 and final:\n line.invoice_line_create(invoices[group_key].id, line.qty_to_invoice)\n\n if references.get(invoices.get(group_key)):\n if order not in references[invoices[group_key]]:\n references[invoices[group_key]] |= order\n if not invoices:\n raise UserError(_('There is no invoiceable line.'))\n for invoice in invoices.values():\n if not invoice.invoice_line_ids:\n raise UserError(_('There is no invoiceable line.'))\n # If invoice is negative, do a refund invoice instead\n if invoice.amount_untaxed < 0:\n invoice.type = 'out_refund'\n for line in invoice.invoice_line_ids:\n line.quantity = -line.quantity\n # Use additional field helper function (for account extensions)\n for line in invoice.invoice_line_ids:\n line._set_additional_fields(invoice)\n # Necessary to force computation of taxes. In account_invoice, they are triggered\n # by onchanges, which are not triggered when doing a create.\n invoice.compute_taxes()\n invoice.message_post_with_view('mail.message_origin_link',\n values={'self': invoice, 'origin': references[invoice]},\n subtype_id=self.env.ref('mail.mt_note').id)\n return [inv.id for inv in invoices.values()]", "def action_invoice_create(self, grouped=False, final=False):\n inv_obj = self.env['account.invoice']\n precision = self.env['decimal.precision'].precision_get('Product Unit of Measure')\n invoices = {}\n references = {}\n invoices_origin = {}\n invoices_name = {}\n for order in self:\n group_key = order.id if grouped else (order.partner_id.id, order.currency_id.id)\n for line in order.order_line.sorted(key=lambda l: l.qty_received - l.qty_invoiced < 0):\n if float_is_zero(line.qty_received - line.qty_invoiced, precision_digits=precision):\n continue\n if group_key not in invoices:\n inv_data = order._prepare_invoice()\n invoice = inv_obj.create(inv_data)\n references[invoice] = order\n invoices[group_key] = invoice\n invoices_origin[group_key] = [invoice.origin]\n invoices_name[group_key] = [invoice.name]\n elif group_key in invoices:\n if order.name not in invoices_origin[group_key]:\n invoices_origin[group_key].append(order.name)\n if order.partner_ref and order.partner_ref not in invoices_name[group_key]:\n invoices_name[group_key].append(order.partner_ref)\n\n if line.qty_received - line.qty_invoiced > 0:\n line.invoice_line_create(invoices[group_key].id, line.qty_received - line.qty_invoiced)\n elif line.qty_received - line.qty_invoiced < 0 and final:\n line.invoice_line_create(invoices[group_key].id, line.qty_received - line.qty_invoiced)\n\n if references.get(invoices.get(group_key)):\n if order not in references[invoices[group_key]]:\n references[invoices[group_key]] |= order\n\n for group_key in invoices:\n invoices[group_key].write({'name': ', '.join(invoices_name[group_key]),\n 'origin': ', '.join(invoices_origin[group_key])})\n\n if not invoices:\n raise UserError(_('There is no invoicable line.'))\n\n for invoice in invoices.values():\n if not invoice.invoice_line_ids:\n raise UserError(_('There is no invoicable line.'))\n # If invoice is negative, do a refund invoice instead\n if invoice.amount_total < 0:\n invoice.type = 'in_refund'\n for line in invoice.invoice_line_ids:\n line.quantity = -line.quantity\n # Necessary to force computation of taxes. In account_invoice, they are triggered\n # by onchanges, which are not triggered when doing a create.\n invoice.compute_taxes()\n invoice.message_post_with_view('mail.message_origin_link',\n values={'self': invoice, 'origin': references[invoice]},\n subtype_id=self.env.ref('mail.mt_note').id)\n return [inv.id for inv in invoices.values()]", "def invoice(customer_id):\n encoder = request.url_rule.endpoint\n template = \"{{ encoder }}#{{ customer_id|%s }}\" % encoder\n return render_template_string(template, **locals())", "def create_invoice(self):\n for line in self:\n # if not line.account_id:\n # raise UserError(_('Please Add the incoming Account !!'))\n self.ensure_one()\n journal_id = self.env['account.journal'].search([\n ('type', '=', 'sale')], limit=1)\n inv_line_main = {\n 'name': line.description.name,\n 'price_unit': line.amount or 0.00,\n 'quantity': 1,\n 'discount': line.discount,\n 'account_id': line.description.property_account_income_id.id or line.description.categ_id.property_account_income_categ_id.id or False,\n }\n inv_values = {\n 'partner_id': line.patient_id.partner_id.id,\n 'patient_id': line.patient_id.id,\n 'dentist': line.dentist.id,\n 'move_type': 'out_invoice',\n 'invoice_date': datetime.now().strftime(DF) or False,\n 'journal_id': journal_id and journal_id.id or False,\n 'teeth_id': line.patient_id and line.patient_id.id or False,\n }\n acc_id = self.env['account.move'].create(inv_values)\n acc_id.write({'invoice_line_ids': [(0, 0, inv_line_main)]})\n\n self.write({'invc_id': acc_id.id, 'inv': True})\n context = dict(self._context or {})\n wiz_form_id = self.env['ir.model.data'].get_object_reference(\n 'account', 'view_move_form')[1]\n\n return {\n 'view_type': 'form',\n 'view_id': wiz_form_id,\n 'view_mode': 'form',\n 'res_model': 'account.move',\n 'res_id': self.invc_id.id,\n 'type': 'ir.actions.act_window',\n 'target': 'current',\n 'context': context,\n }", "def create_invoices(self, cr, uid, ids, context=None):\n invoice_list = []\n po_obj = self.pool.get('purchase.order')\n inv_line_obj = self.pool.get('account.invoice.line')\n inv_obj = self.pool.get('account.invoice')\n addr_obj = self.pool.get('res.partner')\n journal_obj = self.pool.get('account.journal')\n if context is None:\n context = {}\n\n for purchase_adv_obj in self.browse(cr, uid, ids, context=context):\n for purchase_order in po_obj.browse(cr, uid, context.get('active_ids', []), context=context):\n inv_line_ids = []\n invoice_ids = []\n val = inv_line_obj.product_id_change(cr, uid, [], purchase_adv_obj.product_id.id,\n uom_id=False, partner_id=purchase_order.partner_id.id, fposition_id=purchase_order.fiscal_position.id)\n line_id = inv_line_obj.create(cr, uid, {\n 'name': val['value']['name'],\n 'account_id': val['value']['account_id'],\n 'price_unit': purchase_adv_obj.amount,\n 'quantity': purchase_adv_obj.qtty,\n 'discount': False,\n 'uos_id': val['value']['uos_id'],\n 'product_id': purchase_adv_obj.product_id.id,\n 'invoice_line_tax_id': [(6, 0, val['value']['invoice_line_tax_id'])],\n })\n inv_line_ids.append(line_id)\n addr = addr_obj.address_get(cr, uid, [purchase_order.partner_id.id], ['invoice'])\n journal_ids = journal_obj.search(cr, uid, [('type', '=', 'purchase')])\n context.update({'type':'in_invoice','journal_type':'purchase'})\n inv_vals = {\n 'name': purchase_order.partner_ref or purchase_order.name,\n 'origin': purchase_order.name,\n 'type': 'in_invoice',\n 'reference': False,\n 'account_id': purchase_order.partner_id.property_account_payable.id,\n 'journal_id':journal_ids and journal_ids[0] or False,\n 'partner_id': purchase_order.partner_id.id,\n 'address_invoice_id': addr['invoice'],\n 'invoice_line': [(6, 0, inv_line_ids)],\n 'currency_id': purchase_order.pricelist_id.currency_id.id,\n 'comment': '',\n 'payment_term': purchase_order.payment_term_id and purchase_order.payment_term_id.id or False,\n 'fiscal_position': purchase_order.fiscal_position.id or purchase_order.partner_id.property_account_position.id,\n 'prepaid': True\n }\n\n inv_id = inv_obj.create(cr, uid, inv_vals, context=context)\n inv_obj.button_reset_taxes(cr, uid, [inv_id], context=context)\n for invoice in purchase_order.invoice_ids:\n invoice_ids.append(invoice.id)\n invoice_ids.append(inv_id)\n po_obj.write(cr, uid, purchase_order.id, {'invoice_ids': [(6, 0, invoice_ids)]})\n invoice_list.append(inv_id)\n\n if purchase_order.invoice_method in ('picking','order'):\n self.pool.get('purchase.order.line').create(cr, uid, {\n 'order_id': purchase_order.id,\n 'name': val['value']['name'],\n 'date_planned':purchase_order.date_order,\n 'price_unit': -purchase_adv_obj.amount,\n 'product_uom_qty': purchase_adv_obj.qtty,\n 'product_uos': val['value']['uos_id'],\n 'product_uom': val['value']['uos_id'],\n 'product_id': purchase_adv_obj.product_id.id,\n 'adavance_product':True,\n 'discount': False,\n 'taxes_id': [(6, 0, val['value']['invoice_line_tax_id'])],\n }, context=context)\n\n\n context.update({'invoice_id':invoice_list})\n return {\n 'name': 'Open Invoice',\n 'view_type': 'form',\n 'view_mode': 'form',\n 'res_model': 'purchase.open.invoice',\n 'type': 'ir.actions.act_window',\n 'target': 'new',\n 'context': context\n }", "def invoice_line_create(self, invoice_id, qty):\n invoice_lines = self.env['account.invoice.line']\n precision = self.env['decimal.precision'].precision_get('Product Unit of Measure')\n for line in self:\n if not float_is_zero(qty, precision_digits=precision):\n vals = line._prepare_invoice_line(qty=qty)\n vals.update({'invoice_id': invoice_id, 'purchase_line_id': line.id})\n invoice_lines |= self.env['account.invoice.line'].create(vals)\n return invoice_lines", "def generate_eob(\n self, date_of_service, date_of_eob, insured, invoice_id, cpt_code, charge_amount\n ):\n if insured == \"insured\":\n # first copayments\n copay_amount = np.random.choice(\n self.distributions[\"copay_amounts\"],\n 1,\n p=self.distributions[\"copay_distribution\"],\n )[0]\n if copay_amount > 0:\n invoicelineitemeob = pd.DataFrame(\n {\n \"invoice_id\": [invoice_id],\n \"cpt_code\": [cpt_code],\n \"created_on\": [date_of_service],\n \"copay_amount\": [copay_amount],\n \"adjustment_amount\": [0],\n \"paid_amount\": [0],\n }\n )\n self.InvoiceLineItemsEob = self.InvoiceLineItemsEob.append(\n invoicelineitemeob\n )\n remaining_charge = charge_amount - copay_amount\n else:\n remaining_charge = charge_amount\n # next eob discounts\n eob_discount_percent = np.random.choice(\n self.distributions[\"eob_discount_percentages\"],\n 1,\n p=self.distributions[\"eob_discount_distribution\"],\n )[0]\n if eob_discount_percent > 0:\n insurance_adjustment = remaining_charge * eob_discount_percent / 100\n remaining_charge = remaining_charge - insurance_adjustment\n invoicelineitemeob = pd.DataFrame(\n {\n \"invoice_id\": [invoice_id],\n \"cpt_code\": [cpt_code],\n \"created_on\": [date_of_eob],\n \"copay_amount\": [0],\n \"adjustment_amount\": [insurance_adjustment],\n \"paid_amount\": [0],\n }\n )\n self.InvoiceLineItemsEob = self.InvoiceLineItemsEob.append(\n invoicelineitemeob\n )\n # next handle eob payments where relevant\n eob_payment_percentage = np.random.choice(\n self.distributions[\"eob_payment_percentages\"],\n 1,\n p=self.distributions[\"eob_payment_distribution\"],\n )[0]\n eob_payment_amount = remaining_charge * (eob_payment_percentage / 100.0)\n if eob_payment_amount > 0:\n invoicelineitemeob = pd.DataFrame(\n {\n \"invoice_id\": [invoice_id],\n \"cpt_code\": [cpt_code],\n \"created_on\": [date_of_eob],\n \"copay_amount\": [0],\n \"adjustment_amount\": [0],\n \"paid_amount\": [eob_payment_amount],\n }\n )\n self.InvoiceLineItemsEob = self.InvoiceLineItemsEob.append(\n invoicelineitemeob\n )\n remaining_charge = remaining_charge - eob_payment_amount\n else:\n remaining_charge = charge_amount\n return remaining_charge", "def action_invoice_create(self, cr, uid, ids, grouped=False, states=None, date_invoice=False, context=None):\n order = self.browse(cr, uid, ids[0], context=context)\n inv_obj = self.pool.get('account.invoice')\n # create the invoice\n inv_id = super(sale_order, self).action_invoice_create(cr, uid, ids, grouped, states, date_invoice, context=context)\n # modify the invoice\n inv_obj.write(cr, uid, [inv_id], {'past_doc': order.past_doc})\n return inv_id", "def action_invoice_create(self, cr, uid, ids, context=None):\n res = False\n\n journal_obj = self.pool.get('account.journal')\n inv_obj = self.pool.get('account.invoice')\n inv_line_obj = self.pool.get('account.invoice.line')\n fiscal_obj = self.pool.get('account.fiscal.position')\n\n for order in self.browse(cr, uid, ids, context=context):\n# pay_acc_id = order.partner_id.property_account_payable.id\n #use a new method to get the account_id\n pay_acc_id = self._get_inv_pay_acc_id(cr,uid,order) \n journal_ids = journal_obj.search(cr, uid, [('type', '=','purchase'),('company_id', '=', order.company_id.id)], limit=1)\n if not journal_ids:\n raise osv.except_osv(_('Error!'),\n _('Define purchase journal for this company: \"%s\" (id:%d).') % (order.company_id.name, order.company_id.id))\n\n # generate invoice line correspond to PO line and link that to created invoice (inv_id) and PO line\n inv_lines = []\n for po_line in order.order_line:\n #check if this line have quantity to generate invoice, by johnw\n if po_line.product_qty <= po_line.invoice_qty:\n continue \n# if po_line.product_id:\n# acc_id = po_line.product_id.property_account_expense.id\n# if not acc_id:\n# acc_id = po_line.product_id.categ_id.property_account_expense_categ.id\n# if not acc_id:\n# raise osv.except_osv(_('Error!'), _('Define expense account for this company: \"%s\" (id:%d).') % (po_line.product_id.name, po_line.product_id.id,))\n# else:\n# acc_id = property_obj.get(cr, uid, 'property_account_expense_categ', 'product.category').id \n #use a new method to get the account_id, by johnw \n acc_id = self._get_inv_line_exp_acc_id(cr,uid,order,po_line)\n fpos = order.fiscal_position or False\n acc_id = fiscal_obj.map_account(cr, uid, fpos, acc_id)\n\n inv_line_data = self._prepare_inv_line(cr, uid, acc_id, po_line, context=context)\n #update the quantity to the quantity, by johnw\n inv_line_data.update({'quantity':(po_line.product_qty - po_line.invoice_qty)})\n inv_line_id = inv_line_obj.create(cr, uid, inv_line_data, context=context)\n inv_lines.append(inv_line_id)\n\n po_line.write({'invoiced':True, 'invoice_lines': [(4, inv_line_id)]}, context=context)\n \n #if no lines then return direct, by johnw\n if len(inv_lines) == 0:\n continue\n \n # get invoice data and create invoice\n inv_data = {\n 'name': order.partner_ref or order.name,\n 'reference': order.partner_ref or order.name,\n 'account_id': pay_acc_id,\n 'type': 'in_invoice',\n 'partner_id': order.partner_id.id,\n 'currency_id': order.pricelist_id.currency_id.id,\n 'journal_id': len(journal_ids) and journal_ids[0] or False,\n 'invoice_line': [(6, 0, inv_lines)],\n 'origin': order.name,\n 'fiscal_position': order.fiscal_position.id or False,\n 'payment_term': order.payment_term_id.id or False,\n 'company_id': order.company_id.id,\n }\n inv_id = inv_obj.create(cr, uid, inv_data, context=context)\n\n # compute the invoice\n inv_obj.button_compute(cr, uid, [inv_id], context=context, set_total=True)\n\n # Link this new invoice to related purchase order\n order.write({'invoice_ids': [(4, inv_id)]}, context=context)\n res = inv_id\n return res", "def _prepare_invoice_lines(self, exchange_line, order_line):\n invoice_type = {\n 'sale.order.line': {\n 'higher': 'out_invoice', 'lower': 'out_refund',\n 'type': 'sale', 'field': 'exchange_sale_line_id'\n },\n 'purchase.order.line': {\n 'higher': 'in_invoice', 'lower': 'in_refund',\n 'type': 'purchase', 'field': 'exchange_purchase_line_id'\n },\n }\n product = exchange_line.exchange_product_id or exchange_line.product_id\n data = {\n 'invoice_type': False,\n 'values': {\n 'product_id': product.id,\n 'quantity': exchange_line.quantity,\n 'name': 'Exchange for [%s]' % exchange_line.product_id.display_name,\n }\n }\n if exchange_line.exchange_product_id or \\\n exchange_line.price_subtotal > order_line.price_subtotal:\n data['invoice_type'] = invoice_type[order_line._name]['higher']\n elif exchange_line.price_subtotal < order_line.price_subtotal:\n data['invoice_type'] = invoice_type[order_line._name]['lower']\n else:\n return {}\n data[invoice_type[order_line._name]['type']] = order_line.order_id\n data['values'][invoice_type[order_line._name]['field']] = order_line.id\n data['values']['price_unit'] = exchange_line.price_unit\n # TODO i think we should take the different between prices NOT the all price\n # abs(exchange_line.price_unit - order_line.price_unit)\n return data", "def action_create_invoice(self):\n if self.partner_id:\n supplier = self.partner_id\n else:\n supplier = self.partner_id.search(\n [(\"name\", \"=\", \"Salon Default Customer\")])\n lines = []\n product_id = self.env['product.product'].search(\n [(\"name\", \"=\", \"Salon Service\")])\n for records in self.order_line_ids:\n if product_id.property_account_income_id.id:\n income_account = product_id.property_account_income_id.id\n elif product_id.categ_id.property_account_income_categ_id.id:\n income_account = product_id.categ_id.\\\n property_account_income_categ_id.id\n else:\n raise UserError(\n _(\"Please define income account for this product: \"\n \"'%s' (id:%d).\") % (product_id.name, product_id.id))\n value = (0, 0, {\n 'name': records.service_id.name,\n 'account_id': income_account,\n 'price_unit': records.price,\n 'quantity': 1,\n 'product_id': product_id.id,\n })\n lines.append(value)\n invoice_line = {\n 'move_type': 'out_invoice',\n 'partner_id': supplier.id,\n 'invoice_user_id': self.env.user.id,\n 'invoice_origin': self.name,\n 'invoice_line_ids': lines,\n }\n inv = self.env['account.move'].create(invoice_line)\n action = self.env.ref('account.action_move_out_invoice_type',\n raise_if_not_found=False)\n result = {\n 'name': action.name,\n 'type': 'ir.actions.act_window',\n 'views': [[False, 'form']],\n 'target': 'current',\n 'res_id': inv.id,\n 'res_model': 'account.move',\n }\n self.inv_stage_identifier = True\n self.stage_id = 3\n invoiced_records = self.env['salon.order'].search(\n [('stage_id', 'in', [3, 4]), ('chair_id', '=', self.chair_id.id)])\n total = 0\n for rows in invoiced_records:\n invoiced_date = str(rows.date)\n invoiced_date = invoiced_date[0:10]\n if invoiced_date == str(date.today()):\n total = total + rows.price_subtotal\n self.chair_id.collection_today = total\n self.update_number_of_orders()\n return result", "def _prepare_invoice_line(self, qty):\n self.ensure_one()\n res = super(SaleOrderLine, self)._prepare_invoice_line(qty)\n\n res.update({\n 'cost_center_id': self.cost_center_id and self.cost_center_id.id or False\n })\n return res", "def _prepare_invoice(self):\n self.ensure_one()\n journal_id = self.env['account.invoice'].default_get(['journal_id'])['journal_id']\n if not journal_id:\n raise UserError(_('Please define an accounting sales journal for this company.'))\n invoice_vals = {\n 'name': self.client_order_ref or '',\n 'origin': self.name,\n 'type': 'out_invoice',\n 'account_id': self.partner_invoice_id.property_account_receivable_id.id,\n 'partner_id': self.partner_invoice_id.id,\n 'partner_shipping_id': self.partner_shipping_id.id,\n 'journal_id': journal_id,\n 'currency_id': self.pricelist_id.currency_id.id,\n 'comment': self.note,\n 'payment_term_id': self.payment_term_id.id,\n 'fiscal_position_id': self.fiscal_position_id.id or self.partner_invoice_id.property_account_position_id.id,\n 'company_id': self.company_id.id,\n 'user_id': self.user_id and self.user_id.id,\n 'team_id': self.team_id.id,\n 'x_studio_field_rgEdd': self.x_studio_field_icWOZ.id,\n 'x_studio_car_type_1': self.vehicle.id,\n 'x_studio_job_card_1': self.x_studio_agency_job_card,\n 'x_studio_car_type_name': self.vehicle.model_id.name,\n 'x_studio_plate_num': self.vehicle.license_plate,\n 'x_studio_claim_num': self.claim_no,\n\n 'x_studio_is_insured':self.is_insured,\n 'x_studio_service_provider': self.service_advisor.id,\n 'date_invoice': fields.Date.today(),\n 'transaction_ids': [(6, 0, self.transaction_ids.ids)],\n }\n return invoice_vals\n\n # 'x_studio_field_rgEdd':order.x_studio_field_icWOZ.id,", "def generate_customer_info(self):\n consecutive = check_consecutive(self.customerIds)\n print(\"consecutive\" + str(consecutive))\n if consecutive:\n for customer_id in self.customerIds:\n # next we need to 1) decide if the customer has insurance, and if yes, generate the EOB that gets sent to the customer\n insured = np.random.choice(\n self.distributions[\"insurance_status\"],\n 1,\n p=self.distributions[\"insurance_distribution\"],\n )[0]\n dob = self.dobs[customer_id - 1]\n customer = pd.DataFrame(\n {\n \"customer_id\": [customer_id],\n \"dob\": str(dob),\n \"insurance\": insured,\n \"experiment_id\": random.randint(1, 2),\n }\n )\n self.Customers = self.Customers.append(customer)\n else:\n print(\"Error generating customer info: customerIds aren't consecutive\")\n return True", "def compute_counterpart_lines(self):\n for item in self:\n move_debit_lines = []\n move_credit_lines = []\n\n # list of all the move lines of the payment's move\n line_list = []\n for entry in item.journal_entry_ids:\n for line in entry.line_ids:\n if line.account_id.treasury_planning:\n line_list.append(line)\n\n # for each line above collect all the reconciled counterpart lines\n for line in line_list:\n if line.credit > 0 and line.debit == 0:\n for match in line.matched_debit_ids:\n move_debit_lines.append(match.debit_move_id.id)\n\n if line.credit == 0 and line.debit > 0:\n for match in line.matched_credit_ids:\n move_credit_lines.append(match.credit_move_id.id)\n\n if move_credit_lines:\n counterpart_move_ids = move_credit_lines\n else:\n counterpart_move_ids = move_debit_lines\n\n # bank move share is transformed to dictionary\n bank_move_dict = (ast.literal_eval(item.cf_share) if\n item.cf_share else {})\n\n # the share of each counterpart line is \"merged or added\"\n # in a weighted manner to the bank line share\n for cpt in counterpart_move_ids:\n dest_move_line = self.env['account.move.line'].browse(cpt)\n weight = round(dest_move_line.balance / item.amount, 2)\n # counterpart share is transformed into dictionary\n move_line_dict = ast.literal_eval(dest_move_line.cf_share)\n\n # each key is finally added to the bank line share\n for key, value in move_line_dict.iteritems():\n draft_dictionary = dictop.sum_dictionary(\n bank_move_dict.get(key, {}), 1,\n move_line_dict.get(key, {}), weight)\n bank_move_dict[key] = dictop.check_dict_total(\n draft_dictionary, 1)\n\n # the dictionary is transformed into string and assigned\n item.cf_share = json.dumps(bank_move_dict)", "def _prepare_invoice_grp(self, cr, uid, order, line_ids, context=None):\n if context is None:\n context = {}\n context = dict(context)\n\n inv_data = super(grp_orden_compra, self)._prepare_invoice_grp(cr, uid, order, line_ids, context=context)\n\n # adicionando campos numero compromiso y no obligacion desde la OC\n monto_oc = math.floor(order.total_llavep or 0)\n monto_oc = int(monto_oc)\n inv_data.update({'nro_compromiso': order.nro_compromiso or False, 'monto_comprometido': monto_oc or 0, 'currency_id':order.currency_oc.id})\n\n # adicionando campos no afectacion y monto autorizado desde la primera APG\n if order.pc_apg_id:\n first_apg = order.pc_apg_id\n monto_apg = math.floor(first_apg.total_llavep)\n monto_apg = int(monto_apg)\n # TODO R SPRING X ADICIONANDO CABEZALES SIIF A LA FACTURA A PARTIR DE LA APG\n inv_data.update({'nro_afectacion': first_apg.nro_afectacion_siif or False,\n 'monto_afectado': monto_apg or 0,\n 'siif_tipo_ejecucion':first_apg.siif_tipo_ejecucion.id,\n 'siif_concepto_gasto':first_apg.siif_concepto_gasto.id,\n 'siif_financiamiento':first_apg.siif_financiamiento.id,\n 'siif_codigo_sir':first_apg.siif_codigo_sir.id,\n 'siif_nro_fondo_rot':first_apg.siif_nro_fondo_rot.id,\n }) # cambiando nro_afectacion 23/10\n # inv.update({'nro_afectacion': first_apg.nro_afectacion_apg or False, 'monto_afectado': monto_apg or 0})\n\n # # TODO R SPRING X NO LLEVAR LAS LLAVES PRESUPUESTALES POR DEFECTO\n # if order.pc_apg_id.llpapg_ids:\n # llavep_ids = []\n # for llavep in order.pc_apg_id.llpapg_ids:\n # llavep_ids.append((0, 0, {\n # 'programa_id': llavep.programa_id.id,\n # 'odg_id': llavep.odg_id.id,\n # 'auxiliar_id': llavep.auxiliar_id.id,\n # 'disponible': llavep.disponible,\n # 'proyecto_id': llavep.proyecto_id.id,\n # 'fin_id': llavep.fin_id.id,\n # 'mon_id': llavep.mon_id.id,\n # 'tc_id': llavep.tc_id.id,\n # 'importe': llavep.importe\n # }))\n # inv_data.update({'llpapg_ids': llavep_ids})\n\n return inv_data", "def get_customer_history_df_invoice_line(self, customerID):\n df_invoice_line \\\n = self._df_invoice_original[self._df_invoice_original.CustomerID \\\n == customerID]\n return df_invoice_line", "def generate_new_visit(self):\n if self.consecutive:\n customer_id = np.random.choice(\n self.customerIds, 1\n ) # choose a customer at random\n insured = self.Customers[self.Customers[\"customer_id\"] == customer_id[0]][\n \"insurance\"\n ].values[\n 0\n ] # does the customer have insurance?\n experiment_id = self.Customers[\n self.Customers[\"customer_id\"] == customer_id[0]\n ][\"experiment_id\"].values[\n 0\n ] # does the customer have insurance?\n\n event_list = (\n self.billing_choose_dates()\n ) # generate dates associated with this invoice\n cpt_code = random.sample(self.CPTCodes, 1)[0]\n date_of_service = str(event_list.values[0][0])\n created_on = str(event_list.values[1][0])\n date_of_eob = str(event_list.values[2][0])\n date_of_provider_adjustment = str(event_list.values[3][0])\n date_of_patient_payment = str(event_list.values[4][0])\n # generate a new invoice\n (invoice_id, charge_amount) = self.generate_new_invoice(\n created_on, date_of_service, customer_id, cpt_code\n )\n # generate subsequent EOB (i.e. copay, EOB adjustment, EOB payment)\n remaining_amount = self.generate_eob(\n date_of_service,\n date_of_eob,\n insured,\n invoice_id,\n cpt_code,\n charge_amount,\n )\n # generate provider adjustments\n remaining_amount = self.generate_provider_adjustment(\n date_of_provider_adjustment, invoice_id, cpt_code, remaining_amount\n )\n # generate a possible payment from the patient\n remaining_amount = self.generate_patient_payment(\n date_of_patient_payment,\n invoice_id,\n cpt_code,\n remaining_amount,\n experiment_id,\n )\n # record the remaining amounts in a separate table.\n self.record_remaining_amount(\n date_of_patient_payment, invoice_id, cpt_code, remaining_amount\n )\n return True\n else:\n print(\"Error generating new invoice- customerIds aren't consecutive\")", "def _prepare_order_line_invoice_line(self, cr, uid, line, account_id=False, context=None):\n res = super(sale_order_line, self)._prepare_order_line_invoice_line(cr, uid, line, account_id=account_id, context=context)\n \n res.update({'part_number': line.part_number, 'internal_part_number' : line.internal_part_number})\n return res", "def _prepare_invoice_line(self, inv_id):\n res = {}\n account_id = self.product_id.property_account_income_id.id or self.product_id.categ_id.property_account_income_categ_id.id\n if not account_id:\n raise UserError(_('Please define income account for this product: \"%s\" (id:%d).') % \\\n (self.product_id.name, self.product_id.id,))\n price_unit = self.product_id.lst_price\n res = {\n 'invoice_id': inv_id.id,\n 'name': self.name,\n 'origin': self.order_id.name,\n 'account_id': account_id,\n 'uom_id': self.product_uom_id.id,\n 'quantity': self.product_uom_qty,\n 'price_unit': price_unit,\n 'product_id': self.product_id.id,\n 'invoice_line_tax_id': False,\n 'order_line_id': self.id\n }\n return res", "def test_invoice_create(self):\n # first we create a customer\n id = self._create_model(\"customer\", self.customer_data, [\"name\", \"email\", \"phone\"])\n if id:\n # then we can create the invoice\n data = self.invoice_data\n data[\"customer_id\"] = id\n self._create_model(\"invoice\", data, [])\n self.assertIsNotNone(id)", "def merge_purchase_invoice(self):\r\n active_id = self.env['purchase.order'].browse(self.env['purchase.order']._context.get('active_ids'))\r\n journal_id = self.env['account.journal'].search([('type', '=', 'purchase')]) \r\n active_id_count = 0\r\n active_count = 0\r\n exist_vendor = []; invoice = [];exist_vendors = [];ctx = ();invoice_id = []\r\n for rec in active_id : \r\n po_reference = self.env['account.invoice'].search([('origin', 'like', rec.name)])\r\n active_count = len(active_id)\r\n if rec.picking_count >= 1 and rec.picking_count != rec.invoice_count:\r\n len_name = [] \r\n for inv in po_reference: \r\n len_name = inv.origin.split(\":\") \r\n if rec.name in len_name:\r\n if po_reference.state == 'draft':\r\n for record in po_reference.invoice_line_ids:\r\n print (record.line_id)\r\n for res in rec.order_line:\r\n if res.id == record.line_id: \r\n record.write({'quantity':res.qty_received})\r\n res.write({'qty_invoiced':record.quantity})\r\n \r\n else:\r\n \r\n po_list = [];line_values = {};lines = {};purchase = []\r\n if rec.state in 'purchase' and rec.invoice_status in 'to invoice':\r\n purchase.append(rec.id)\r\n active_id_count = len(purchase)\r\n if rec.partner_id.id in exist_vendor:\r\n for inv in invoice:\r\n if inv['partner_id'] == rec.partner_id.id:\r\n for recc in rec.order_line:\r\n if rec.picking_count > 1 and rec.invoice_count >= 1:\r\n qty_received = recc.qty_received - recc.qty_invoiced \r\n else:\r\n qty_received = recc.qty_received \r\n line_values = (0, 0, {'product_id': recc.product_id.id,\r\n 'quantity': qty_received ,\r\n 'price_unit': recc.price_unit,\r\n 'invoice_line_tax_ids': [(6, 0, recc.taxes_id and recc.taxes_id.ids) or False] ,\r\n 'price_subtotal': recc.price_subtotal,\r\n 'product_uom': recc.product_uom.id,\r\n 'name': recc.name,\r\n 'account_id': journal_id.default_debit_account_id.id ,\r\n 'line_id':recc.id\r\n }) \r\n inv['invoice_line_ids'].append(line_values)\r\n inv['origin'] = inv['origin'] + ':' + rec.name\r\n if rec.partner_id.id not in exist_vendor:\r\n exist_vendors.append(rec.partner_id.id) \r\n else: \r\n for recc in rec.order_line:\r\n if rec.picking_count > 1 and rec.invoice_count >= 1:\r\n qty_received = recc.qty_received - recc.qty_invoiced \r\n else:\r\n qty_received = recc.qty_received\r\n line_values = (0, 0, {'product_id': recc.product_id.id,\r\n 'quantity': qty_received,\r\n 'price_unit': recc.price_unit,\r\n 'invoice_line_tax_ids': [(6, 0, recc.taxes_id and recc.taxes_id.ids)or False],\r\n 'price_subtotal': recc.price_subtotal,\r\n 'product_uom': recc.product_uom.id,\r\n 'name': recc.name,\r\n 'account_id': journal_id.default_debit_account_id.id,\r\n 'line_id':recc.id\r\n }) \r\n print (rec.id)\r\n po_list.append(line_values) \r\n invoice.append({'origin':rec.name, 'partner_id': rec.partner_id.id, 'invoice_line_ids':po_list, 'account_id': rec.partner_id.property_account_payable_id.id, 'type': 'in_invoice', 'journal_id':journal_id.id,'date_invoice':datetime.today()}) \r\n if rec.partner_id.id not in exist_vendor:\r\n exist_vendor.append(rec.partner_id.id) \r\n \r\n else:\r\n po_list = [];line_values = {};lines = {};purchase = []\r\n if rec.state in 'purchase' and rec.invoice_status in 'to invoice':\r\n purchase.append(rec.id)\r\n active_id_count = len(purchase)\r\n if rec.partner_id.id in exist_vendor:\r\n for inv in invoice:\r\n if inv['partner_id'] == rec.partner_id.id:\r\n for recc in rec.order_line:\r\n if rec.picking_count > 1 and rec.invoice_count >= 1:\r\n qty_received = recc.qty_received - recc.qty_invoiced \r\n else:\r\n qty_received = recc.qty_received\r\n line_values = (0, 0, {'product_id': recc.product_id.id,\r\n 'quantity': qty_received ,\r\n 'price_unit': recc.price_unit,\r\n 'invoice_line_tax_ids': [(6, 0, recc.taxes_id and recc.taxes_id.ids) or False] ,\r\n 'price_subtotal': recc.price_subtotal,\r\n 'product_uom': recc.product_uom.id,\r\n 'name': recc.name,\r\n 'account_id': journal_id.default_debit_account_id.id ,\r\n 'line_id':recc.id\r\n }) \r\n inv['invoice_line_ids'].append(line_values)\r\n inv['origin'] = inv['origin'] + ':' + rec.name\r\n if rec.partner_id.id not in exist_vendor:\r\n exist_vendors.append(rec.partner_id.id) \r\n else: \r\n for recc in rec.order_line:\r\n if rec.picking_count > 1 and rec.invoice_count >= 1:\r\n qty_received = recc.qty_received - recc.qty_invoiced \r\n else:\r\n qty_received = recc.qty_received\r\n line_values = (0, 0, {'product_id': recc.product_id.id,\r\n 'quantity': qty_received,\r\n 'price_unit': recc.price_unit,\r\n 'invoice_line_tax_ids': [(6, 0, recc.taxes_id and recc.taxes_id.ids)or False],\r\n 'price_subtotal': recc.price_subtotal,\r\n 'product_uom': recc.product_uom.id,\r\n 'name': recc.name,\r\n 'account_id': journal_id.default_debit_account_id.id,\r\n 'line_id':recc.id\r\n }) \r\n print (rec.id)\r\n po_list.append(line_values) \r\n invoice.append({'origin':rec.name, 'partner_id': rec.partner_id.id, 'invoice_line_ids':po_list, 'account_id': rec.partner_id.property_account_payable_id.id, 'type': 'in_invoice', 'journal_id':journal_id.id,'date_invoice':date.today()}) \r\n if rec.partner_id.id not in exist_vendor:\r\n exist_vendor.append(rec.partner_id.id) \r\n \r\n invoices = []\r\n invoice_counts = 0\r\n for record in invoice:\r\n invoice_id = self.env['account.invoice'].create(record)\r\n invoices.append(invoice_id.id)\r\n invoice_counts = len(invoices)\r\n if active_id_count == 1:\r\n if invoice_counts == 1:\r\n form_view = self.env.ref('purchase.view_invoice_supplier_purchase_form').id\r\n tree_view = self.env.ref('account.invoice_tree').id \r\n return{\r\n 'name': _('Invoice'),\r\n 'type':'ir.actions.act_window',\r\n 'view_type':'form',\r\n 'view_mode':'form,tree',\r\n 'res_model':'account.invoice',\r\n 'res_id':invoices[0],\r\n 'views_id':False,\r\n 'views':[(form_view , 'form'), (tree_view , 'tree')],\r\n 'domain':[('id', 'in', invoices)],\r\n 'target': 'current',\r\n } \r\n else: \r\n form_view = self.env.ref('account.invoice_supplier_form').id\r\n tree_view = self.env.ref('account.invoice_supplier_tree').id \r\n return{\r\n 'name': _('Invoice'),\r\n 'type':'ir.actions.act_window',\r\n 'view_type':'form',\r\n 'view_mode':'form,tree',\r\n 'res_model':'account.invoice',\r\n 'views_id':True,\r\n 'views':[(tree_view , 'tree'), (form_view , 'form')],\r\n 'domain':[('id', 'in', invoices)],\r\n 'target': 'current',\r\n }", "def predict_segment(self, df_invoice_line=None):\n if df_invoice_line is not None:\n self.data_transform(df_invoice_line) \n self.df_customers_features_build() \n else:\n pass\n X_test = self._df_customers.values\n y_pred = self._classifier_model.predict(X_test)\n return y_pred[0]", "def invoices(self, account_id):\n from pureport_client.commands.accounts.invoices import Command\n return Command(self.client, account_id)", "def action_invoice_create(self, cr, uid, ids, context=None):\n res = False\n\n journal_obj = self.pool.get('account.journal')\n inv_obj = self.pool.get('account.invoice')\n inv_line_obj = self.pool.get('account.invoice.line')\n fiscal_obj = self.pool.get('account.fiscal.position')\n property_obj = self.pool.get('ir.property')\n\n for order in self.browse(cr, uid, ids, context=context):\n pay_acc_id = order.partner_id.property_account_payable.id\n journal_ids = journal_obj.search(cr, uid, [('type', '=','purchase'),('company_id', '=', order.company_id.id)], limit=1)\n if not journal_ids:\n raise osv.except_osv(_('Error !'),\n _('There is no purchase journal defined for this company: \"%s\" (id:%d)') % (order.company_id.name, order.company_id.id))\n\n # generate invoice line correspond to PO line and link that to created invoice (inv_id) and PO line\n inv_lines = []\n for po_line in order.order_line:\n if po_line.product_id:\n acc_id = po_line.product_id.product_tmpl_id.property_account_expense.id\n if not acc_id:\n acc_id = po_line.product_id.categ_id.property_account_expense_categ.id\n if not acc_id:\n raise osv.except_osv(_('Error !'), _('There is no expense account defined for this product: \"%s\" (id:%d)') % (po_line.product_id.name, po_line.product_id.id,))\n else:\n acc_id = property_obj.get(cr, uid, 'property_account_expense_categ', 'product.category').id\n fpos = order.fiscal_position or False\n acc_id = fiscal_obj.map_account(cr, uid, fpos, acc_id)\n\n inv_line_data = self._prepare_inv_line(cr, uid, acc_id, po_line, context=context)\n inv_line_id = inv_line_obj.create(cr, uid, inv_line_data, context=context)\n inv_lines.append(inv_line_id)\n\n po_line.write({'invoiced':True, 'invoice_lines': [(4, inv_line_id)]}, context=context)\n\n # get invoice data and create invoice\n inv_data = {\n 'name': order.partner_ref or order.name,\n 'reference': order.partner_ref or order.name,\n 'account_id': pay_acc_id,\n 'type': 'in_invoice',\n 'partner_id': order.partner_id.id,\n 'currency_id': order.pricelist_id.currency_id.id,\n 'address_invoice_id': order.partner_address_id.id,\n 'address_contact_id': order.partner_address_id.id,\n 'journal_id': len(journal_ids) and journal_ids[0] or False,\n 'invoice_line': [(6, 0, inv_lines)], \n 'origin': order.name,\n 'fiscal_position': order.fiscal_position.id or order.partner_id.property_account_position.id,\n 'payment_term': order.partner_id.property_payment_term and order.partner_id.property_payment_term.id or False,\n 'company_id': order.company_id.id,\n 'add_disc': order.add_disc or 0.0\n }\n inv_id = inv_obj.create(cr, uid, inv_data, context=context)\n\n # compute the invoice\n inv_obj.button_compute(cr, uid, [inv_id], context=context, set_total=True)\n\n # Link this new invoice to related purchase order\n order.write({'invoice_ids': [(4, inv_id)]}, context=context)\n res = inv_id\n return res", "def _prepare_invoice(self):\n self.ensure_one()\n # journal_id = self.env['account.invoice'].with_context(force_company=self.env.user.company_id.id).default_get(['journal_id'])['journal_id']\n journal_id = self.company_id.journal_id.id\n if not journal_id:\n raise UserError(_('Please define an accounting sales journal for this company.'))\n invoice_vals = {\n 'name': self.client_order_ref or '',\n 'origin': self.name,\n 'type': 'out_invoice',\n 'account_id': self.partner_invoice_id.property_account_receivable_id.id,\n 'partner_id': self.partner_invoice_id.id,\n 'partner_shipping_id': self.partner_shipping_id.id,\n 'journal_id': journal_id,\n 'currency_id': self.pricelist_id.currency_id.id,\n 'comment': self.note,\n 'payment_term_id': self.payment_term_id.id,\n 'fiscal_position_id': self.fiscal_position_id.id or self.partner_invoice_id.property_account_position_id.id,\n 'company_id': self.company_id.id,\n 'user_id': self.user_id and self.user_id.id,\n 'team_id': self.team_id.id\n }\n return invoice_vals", "def _prepare_invoice(self):\n self.ensure_one()\n result = super(SaleOrder, self)._prepare_invoice()\n result.update({\n 'cost_center_id': self.cost_center_id and self.cost_center_id.id or False\n })\n return result", "def _prepare_invoice(self):\n # get current logged in user's timezone\n local = pytz.timezone(self.env['res.users'].browse(self._uid).tz) or pytz.utc\n\n self.ensure_one()\n journal_id = self.env['account.journal'].search([('type', '=', 'purchase')], limit=1).id\n if not journal_id:\n raise UserError(_('Please define an accounting purchase journal for this company.'))\n invoice_vals = {\n 'name': self.partner_ref or '',\n 'origin': self.name,\n 'type': 'in_invoice',\n 'account_id': self.partner_id.property_account_payable_id.id,\n 'partner_id': self.partner_id.id,\n 'journal_id': journal_id,\n 'currency_id': self.currency_id.id,\n 'comment': self.notes,\n 'payment_term_id': self.payment_term_id.id,\n 'fiscal_position_id': self.fiscal_position_id.id or self.partner_id.property_account_position_id.id,\n 'company_id': self.company_id.id,\n 'purchase_id': self.id,\n 'date_invoice':pytz.utc.localize(datetime.datetime.now()).astimezone(local).strftime('%Y-%m-%d'),\n }\n return invoice_vals", "def single_customer(customer_name, invoice_file):\n def customer_rental(rental_items):\n \"\"\"Closure to add single customer details\"\"\"\n with open(rental_items, 'r', newline='') as rentals:\n reader = csv.reader(rentals)\n add_invoice_items = partial(add_furniture, invoice_file, customer_name)\n for row in reader:\n add_invoice_items(item_code=row[0],\n item_description=row[1],\n item_monthly_price=row[2])\n return customer_rental", "def single_customer(customer_name, invoice_file):\n def customer_rental(rental_items):\n \"\"\"\n Loop through rental_items file and append each row to curried invoice_file with same\n customer_name\n \"\"\"\n customer = partial(add_furniture, invoice_file=invoice_file, customer_name=customer_name)\n with open(rental_items, \"r\") as rental_csv:\n for row in csv.reader(rental_csv):\n customer(item_code=row[0], item_description=row[1], item_monthly_price=row[2])\n return customer_rental", "def _prepare_invoice_line(self, qty):\n self.ensure_one()\n res = {\n 'name': self.name,\n 'sequence': self.sequence,\n 'origin': self.order_id.name,\n 'account_id': self.product_id.product_tmpl_id._get_product_accounts()['stock_input'].id,\n 'price_unit': self.price_unit,\n 'quantity': qty,\n 'uom_id': self.product_uom.id,\n 'product_id': self.product_id.id or False,\n 'invoice_line_tax_ids': [(6, 0, self.taxes_id.ids)],\n 'account_analytic_id': self.account_analytic_id.id,\n 'analytic_tag_ids': [(6, 0, self.analytic_tag_ids.ids)],\n }\n return res", "def create_or_find_b2b_invoices_and_process_ept(self, row, sale_order, invoice_date, tax):\n\n vat_number = row.get('Buyer Tax Registration', False)\n invoice_number = row.get('VAT Invoice Number', False)\n\n invoices = sale_order.invoice_ids.filtered(\n lambda x: x.type == 'out_invoice' and x.state != 'cancel')\n if not invoices:\n lines = sale_order.order_line.filtered(lambda line: line.qty_to_invoice > 0)\n if not lines:\n return False\n invoices = sale_order._create_invoices()\n self.write({'invoice_ids': [(4, invoices and invoices.id)]})\n\n for invoice in invoices:\n if not invoice.partner_id.vat:\n invoice.partner_id.vat = vat_number\n\n payments_lines = []\n if invoice.invoice_payments_widget != 'false':\n payments_dict = json.loads(invoice.invoice_payments_widget)\n payments_content = payments_dict.get('content', [])\n for line in payments_content:\n payments_lines.append(line.get('payment_id', False))\n\n invoice_line = invoice.mapped('invoice_line_ids').filtered(\\\n lambda line: line.tax_ids != tax)\n if invoice_line:\n invoice.button_draft()\n invoice.write({'ref': invoice_number, 'date': invoice_date})\n\n if len(invoice_line) > 1:\n for line in invoice_line:\n line.with_context({'check_move_validity': False}).write( \\\n {'tax_ids': [(6, 0, [tax.id])]})\n else:\n invoice_line.with_context({'check_move_validity': False}).write( \\\n {'tax_ids': [(6, 0, [tax.id])]})\n\n invoice.with_context({'check_move_validity': False})._recompute_tax_lines( \\\n recompute_tax_base_amount=True)\n invoice.action_post()\n for line in payments_lines:\n invoice.js_assign_outstanding_line(line)\n\n return True", "def create_invoice(self):\n sales_tax = 0.06\n item_sum = 0\n inv = f'Invoice#: {self.invoice_id}\\n'\n for key, value in self.items_with_price.items():\n item_sum += value\n inv += f'{key}.....${value:.2f}\\n'\n\n tax = item_sum * sales_tax\n inv += f'Tax.....${tax:.2f}\\n'\n inv += f'Total.....${tax + item_sum:.2f}'\n # print(inv)\n # returning for unit testing purposes\n return inv", "def add_invoice(self, sys_department_id, contact_id=None, company_id=None, for_attention_of=None,\n payment_term=None, invoice_lines=None, draft_invoice=False, layout_id=None, date=None,\n po_number=None, direct_debit=False, comments=None, force_set_number=None, custom_fields=None):\n\n # get all arguments\n data = self._clean_input_to_dict(locals())\n\n # argument validation\n if contact_id is None and company_id is None:\n raise InvalidInputError(\"One of contact_id or company_id is required.\")\n\n if contact_id is not None and company_id is not None:\n raise InvalidInputError(\"Only one of contact_id or company_id is can be set.\")\n\n if payment_term is not None:\n if payment_term not in self._valid_payment_terms:\n raise InvalidInputError(\"Invalid contents of argument payment_term.\")\n\n invoice_lines = self._validate_type(invoice_lines, list)\n for line in invoice_lines:\n if not {'description', 'amount', 'vat', 'price'}.issubset(line.keys()):\n raise InvalidInputError(\"Fields description, amount, vat and price are required for each line.\")\n\n if line['vat'] not in ['00', '06', '12', '21', 'CM', 'EX', 'MC', 'VCMD']:\n raise InvalidInputError(\"Invalid contents of argument vat.\")\n\n if date is not None and type(date) != datetime.date:\n raise InvalidInputError(\"Invalid contents of argument date.\")\n\n custom_fields = self._validate_type(custom_fields, dict)\n\n # convert data elements that need conversion\n self._convert_custom_fields(data)\n\n if contact_id is not None:\n data['contact_or_company'] = 'contact'\n data['contact_or_company_id'] = data.pop('contact_id')\n else:\n data['contact_or_company'] = 'company'\n data['contact_or_company_id'] = data.pop('company_id')\n\n i = 1\n for line in invoice_lines:\n data['description_' + str(i)] = line['description']\n data['price_' + str(i)] = line['price']\n data['amount_' + str(i)] = line['amount']\n data['vat_' + str(i)] = line['vat']\n\n if 'product_id' in data:\n data['product_id_' + str(i)] = line['product_id']\n if 'account' in data:\n data['account_' + str(i)] = line['account']\n if 'subtitle' in data:\n data['subtitle_' + str(i)] = line['subtitle']\n\n i += 1\n\n del(data['invoice_lines'])\n\n if date is not None:\n data['date'] = data.pop('date').strftime('%d/%m/%Y')\n\n return self._request('addInvoice', data)", "def action_generate_lines_txt(self):\n rp_obj = self.env['res.partner']\n voucher_obj = self.env['account.wh.iva']\n txt_iva_obj = self.env['txt.iva.line']\n vouchers = []\n txt_brw = self.browse(self._ids)[0]\n txt_ids = txt_iva_obj.search([('txt_id', '=', txt_brw.id)])\n if txt_ids:\n txt_ids.unlink()\n\n if txt_brw.type:\n vouchers = voucher_obj.search([\n ('date_ret', '>=', txt_brw.date_start),\n ('date_ret', '<=', txt_brw.date_end),\n ('period_id', '=', txt_brw.period_id.id),\n ('state', '=', 'done'),\n ('type', 'in', ['in_invoice', 'in_refund'])])\n else:\n vouchers = voucher_obj.search([\n ('date_ret', '>=', txt_brw.date_start),\n ('date_ret', '<=', txt_brw.date_end),\n ('period_id', '=', txt_brw.period_id.id),\n ('state', '=', 'done'),\n ('type', 'in', ['out_invoice', 'out_refund'])])\n\n for voucher in vouchers:\n acc_part_id = rp_obj._find_accounting_partner(voucher.partner_id)\n for voucher_lines in voucher.wh_lines:\n if voucher_lines.invoice_id.state not in ['open', 'paid']:\n continue\n for voucher_tax_line in voucher_lines.tax_line:\n txt_iva_obj.create(\n {'partner_id': acc_part_id.id,\n 'voucher_id': voucher.id,\n 'invoice_id': voucher_lines.invoice_id.id,\n 'txt_id': txt_brw.id,\n 'untaxed': voucher_tax_line.base,\n 'amount_withheld': voucher_tax_line.amount_ret,\n 'tax_wh_iva_id': voucher_tax_line.id,\n })\n return True", "def add_invoice() -> str:\r\n invoice_details = []\r\n #Catching values user has entered in UI\r\n invoice_number = request.args.get(\"invoice_number\")\r\n invoice_details.append(invoice_number)\r\n customer = request.args.get(\"customer\")\r\n invoice_details.append(customer)\r\n date_required = request.args.get(\"date_required\")\r\n invoice_details.append(date_required)\r\n recipe = request.args.get(\"recipe\")\r\n invoice_details.append(recipe)\r\n gyle_number = request.args.get(\"gyle_number\")\r\n invoice_details.append(gyle_number)\r\n quantity_ordered = request.args.get(\"quantity_ordered\")\r\n invoice_details.append(quantity_ordered)\r\n #Passing list to function which writes list to CSV file\r\n data_add(invoice_details)\r\n invoice_message = \"INVOICE ADDED\"\r\n return render_template(\"singular_message.html\",\r\n user_display=invoice_message)", "def _prepare_invoice(self, cr, uid, order, lines, context=None):\n invoice_vals = super(my_sale_order, self)._prepare_invoice(cr, uid, order,\n lines, context)\n\n invoice_vals.update({\n 'partner_shipping_id': order.partner_shipping_id.id,\n })\n\n # Care for deprecated _inv_get() hook - FIXME: to be removed after 6.1\n invoice_vals.update(self._inv_get(cr, uid, order, context=context))\n\n return invoice_vals", "def create_order(cls, invoice):\n order = cls(\n order_id=str(uuid.uuid4().int),\n invoice=invoice\n ).save()\n\n invoice_line_items = InvoiceLineItem.objects.filter(invoice=invoice, type=\"item\").all()\n\n for invoice_line_item in invoice_line_items:\n OrderLineItem.create_order_line_item(order=order, invoice_line_item=invoice_line_item)\n\n return order", "def create_invoice(invoice: Invoice, callback_url: Optional[HttpUrl] = None):\n # Send the invoice, collect the money, send the notification (the callback)\n return {\"msg\": \"Invoice received\"}", "def invoice(self,context,params):\n url = f\"https://api.freshbooks.com/accounting/account/{params['account_id']}/invoices/invoices/{params['invoice_id']}\"\n result = json.loads(util.rest(\"GET\", url, {}, context[\"headers\"][\"access_token\"]).text)\n invoice = result[\"response\"][\"result\"][\"invoice\"]\n invoice_obj = FreshbooksInvoice(\n account_id=invoice['accountid'],\n customerid=invoice['customerid'], \n invoice_id=invoice['invoiceid'],\n currency_code=invoice['currency_code'],\n language=invoice['language'],\n terms=invoice['terms'],\n discount_value=invoice['discount_value'],\n discount_amount=invoice['discount_total']['amount'],\n invoice_number=invoice['invoice_number'],\n po_number=invoice['po_number'],\n amount=invoice['amount']['amount'],\n code=invoice['amount']['code'],\n create_date=invoice['create_date']\n )\n return invoice_obj.__dict__", "def __init__(self, currency_code=None, client_id=None, show_history=False, show_outstanding_invoices=False, response_limit=10, end_date=None, history_frequency_interval='month', currency_conversion=None, start_date=None, future_due_periods=None, overdue_periods=None, business_id=None, invoice_ids=None, customer_id=None, _configuration=None): # noqa: E501 # noqa: E501\n if _configuration is None:\n _configuration = Configuration()\n self._configuration = _configuration\n\n self._currency_code = None\n self._client_id = None\n self._show_history = None\n self._show_outstanding_invoices = None\n self._response_limit = None\n self._end_date = None\n self._history_frequency_interval = None\n self._currency_conversion = None\n self._start_date = None\n self._future_due_periods = None\n self._overdue_periods = None\n self._business_id = None\n self._invoice_ids = None\n self._customer_id = None\n self.discriminator = None\n\n if currency_code is not None:\n self.currency_code = currency_code\n if client_id is not None:\n self.client_id = client_id\n if show_history is not None:\n self.show_history = show_history\n if show_outstanding_invoices is not None:\n self.show_outstanding_invoices = show_outstanding_invoices\n if response_limit is not None:\n self.response_limit = response_limit\n if end_date is not None:\n self.end_date = end_date\n if history_frequency_interval is not None:\n self.history_frequency_interval = history_frequency_interval\n if currency_conversion is not None:\n self.currency_conversion = currency_conversion\n if start_date is not None:\n self.start_date = start_date\n if future_due_periods is not None:\n self.future_due_periods = future_due_periods\n if overdue_periods is not None:\n self.overdue_periods = overdue_periods\n if business_id is not None:\n self.business_id = business_id\n if invoice_ids is not None:\n self.invoice_ids = invoice_ids\n if customer_id is not None:\n self.customer_id = customer_id", "def merge_invoice(self, cr, uid, invoices, context=None):\n order_ids = []\n pick_ids = []\n if len(invoices) <= 1:\n return False\n parent = self.pool.get('account.invoice').browse(cr, uid, context['active_id'])\n for inv in invoices:\n if parent.partner_id != inv.partner_id:\n raise osv.except_osv(_(\"Partners don't match!\"), _(\"Can not merge invoice(s) on different partners or states !.\"))\n\n if inv.state != 'draft':\n raise osv.except_osv(_(\"Invalid action !\"), _(\"You can merge only invoices in draft state.\"))\n\n # Merge invoices that are in draft state\n inv_line_obj = self.pool.get('account.invoice.line')\n name = parent.name\n comment = parent.comment\n origin = parent.origin\n for inv in invoices:\n if inv.id == parent.id:\n continue\n\n # check if a line with the same product already exist. if so add quantity. else hang up invoice line to first invoice head.\n if inv.name:\n # Find if the same name already exist, if yes, skip to add.\n name_list = name.replace(' ', '').split(',')\n if inv.name not in name_list:\n name += ', %s' % inv.name\n if inv.comment:\n comment = comment and comment + ', %s' % inv.comment or inv.comment\n if inv.origin:\n origin += ', %s' % inv.origin\n line_ids = inv_line_obj.search(cr, uid, [('invoice_id', '=', inv.id)])\n for inv_lin in inv_line_obj.browse(cr, uid, line_ids):\n mrg_pdt_ids = inv_line_obj.search(cr, uid, [('invoice_id', '=', parent.id), ('product_id', '=', inv_lin.product_id.id),\n ('uos_id', '=', inv_lin.uos_id.id), ('price_unit', '=', inv_lin.price_unit) # kittiu: extra condition, unit price must also be the same.\n ])\n if len(mrg_pdt_ids) == 1 and inv.type == parent.type: # product found --> add quantity\n inv_line_obj.write(cr, uid, mrg_pdt_ids, {'quantity': inv_line_obj._can_merge_quantity(cr, uid, mrg_pdt_ids[0], inv_lin.id)})\n inv_line_obj.unlink(cr, uid, inv_lin.id)\n elif inv.type == parent.type:\n inv_line_obj.write(cr, uid, inv_lin.id, {'invoice_id': parent.id})\n else:\n inv_line_obj.write(cr, uid, inv_lin.id, {'invoice_id': parent.id, 'quantity': -inv_lin.quantity})\n\n if inv.sale_order_ids:\n order_ids += [order.id for order in inv.sale_order_ids]\n if inv.picking_ids:\n pick_ids += [picking.id for picking in inv.picking_ids]\n\n self.write(cr, uid, parent.id, {'origin': origin, 'name': name, 'comment': comment})\n\n #Remove By DRB\n #cr.execute('update sale_order_invoice_rel set invoice_id = %s where invoice_id = %s', (parent.id, inv.id))\n #cr.execute('update picking_invoice_rel set invoice_id = %s where invoice_id = %s', (parent.id, inv.id))\n\n self.unlink(cr, uid, [inv.id])\n #Distinct List\n order_ids = list(set(order_ids))\n pick_ids = list(set(pick_ids))\n\n self.write(cr, uid, parent.id, {'sale_order_ids': [(6, 0, order_ids)], 'picking_ids': [(6, 0, pick_ids)]})\n self.button_reset_taxes(cr, uid, [parent.id])\n return parent.id", "def process_b2c_amazon_order_ept(self, row, sale_order, invoice_date):\n invoice_number = row.get('VAT Invoice Number', False)\n invoice_url = row.get('Invoice Url', '')\n\n invoices = sale_order.invoice_ids.filtered(\n lambda x: x.type == 'out_invoice' and x.state != 'cancel')\n if not invoices:\n lines = sale_order.order_line.filtered(lambda line: line.qty_to_invoice > 0)\n if not lines:\n return False\n invoices = sale_order.with_context({'vcs_invoice_number': invoice_number})._create_invoices()\n self.write({'invoice_ids': [(4, invoices and invoices.id)]})\n\n for invoice in invoices:\n invoice_vals = {}\n invoice_vals.update({'date': invoice_date, 'invoice_url': invoice_url})\n if invoice.state == 'draft' and \\\n sale_order.amz_seller_id.is_invoice_number_same_as_vcs_report:\n invoice_vals.update({'name': invoice_number})\n invoice.write(invoice_vals)\n return True", "def invoice_create_onaccept(form):\n\n # Get record ID\n form_vars = form.vars\n if \"id\" in form_vars:\n record_id = form_vars.id\n elif hasattr(form, \"record_id\"):\n record_id = form.record_id\n else:\n return\n\n # Look up the billing ID\n table = current.s3db.fin_voucher_invoice\n query = (table.id == record_id)\n invoice = current.db(query).select(table.billing_id,\n limitby = (0, 1),\n ).first()\n\n if invoice:\n # Assign the invoice\n from .helpers import assign_pending_invoices\n assign_pending_invoices(invoice.billing_id,\n invoice_id = record_id,\n )", "def create_landlord_invoice(self):\n if self.tenancy_id.is_landlord_rent:\n account_jrnl_obj = self.env['account.journal'].search(\n [('type', '=', 'purchase')], limit=1)\n inv_lines_values = {\n # 'origin': 'tenancy.rent.schedule',\n 'name': 'Rent Cost for' + self.tenancy_id.name,\n 'quantity': 1,\n 'price_unit': self.amount or 0.00,\n 'account_id':\n self.tenancy_id.property_id.account_depreciation_expense_id.id or False,\n 'analytic_account_id': self.tenancy_id.id or False,\n }\n owner_rec = self.tenancy_id.property_owner_id\n invo_values = {\n 'partner_id': self.tenancy_id.property_owner_id.id or False,\n 'type': 'in_invoice',\n 'invoice_line_ids': [(0, 0, inv_lines_values)],\n 'property_id': self.tenancy_id.property_id.id or False,\n 'invoice_date': self.start_date or False,\n # 'account_id': owner_rec.property_account_payable_id.id,\n # 'schedule_id': self.id,\n 'new_tenancy_id': self.tenancy_id.id,\n 'journal_id': account_jrnl_obj.id or False\n }\n\n acc_id = self.env['account.move'].with_context({'default_type': 'in_invoice'}).create(invo_values)\n self.write({'invc_id': acc_id.id, 'inv': True})\n wiz_form_id = self.env['ir.model.data'].get_object_reference(\n 'account', 'view_move_form')[1]\n return {\n 'view_type': 'form',\n 'view_id': wiz_form_id,\n 'view_mode': 'form',\n 'res_model': 'account.move',\n 'res_id': self.invc_id.id,\n 'type': 'ir.actions.act_window',\n 'target': 'current',\n 'context': self._context,\n }", "def _prepare_invoice_line(self, cr, uid, group, picking, move_line, invoice_id,\n invoice_vals, context=None):\n result = super(stock_picking, self)._prepare_invoice_line(cr, uid, group, picking, move_line, invoice_id,\n invoice_vals, context=None)\n \n result['discount'] = self._get_discount_invoice(cr, uid, move_line)\n result['discount2'] = self._get_discount2_invoice(cr, uid, move_line)\n result['price_unit'] = self._get_price_unit_invoice(cr, uid, move_line, invoice_vals['type'], context=None)\n return result", "def invoice(self, start, end):\n\n if self.invoice_type is None:\n invoice_type = self.conn.config[\"main\"][\"invoice:object\"]\n if \":\" not in invoice_type:\n raise AttributeError(\"Invoice configuration incorrect! %s\" % invoice_type)\n module, call = invoice_type.split(\":\")\n _package = __import__(module, globals(), locals(), [ call ])\n\n funct = getattr(_package, call)\n self.invoice_type = funct\n config = self.conn.config[\"invoice_object\"]\n invoice = self.invoice_type(self, config)\n return invoice", "def invoices(self,org_id=None,invoice_id=''):\n if org_id is None:\n org_id = self.org_id\n return self.get('{}/orgs/{}/invoices/{}'.format(ApiVersion.A1.value,org_id,invoice_id))", "def generate_orders(self, cr, uid, ids, context=None):\n voucher_pool = self.pool.get('account.voucher')\n payment_term_obj = self.pool.get('account.payment.term')\n account_budget_confirmation_obj = self.pool.get('account.budget.confirmation')\n period_obj = self.pool.get('account.period')\n if context is None:\n context = {}\n for order in self.browse(cr, uid, ids, context=context):\n #################################to remind\n total_fixed = total_percent = 0\n for line in order.payment_term.line_ids:\n if line.value == 'fixed':\n total_fixed += line.value_amount\n if line.value == 'procent':\n total_percent += line.value_amount\n total_fixed = (total_fixed * 100) / (order.amount or 1.0)\n if (total_fixed + total_percent) > 100:\n raise orm.except_orm(_('Error!'), _(\"Can not create the payments !\\n\\\n The related payment term is probably miss configured as it gives a computed amount greater than the total permanent payment amount. \\\n The latest line of your payment term must be of type 'balance' to avoid rounding issues.\"))\n # create one move line for the total and possibly adjust the other lines amount\n totlines1 = []\n for o in order.line_ids:\n totlines1 += payment_term_obj.compute(cr, uid, order.payment_term.id, o.amount, order.date or False, context=context)\n \n d = {}\n for k, v in totlines1:\n d.setdefault(k, [k]).append(v)\n totlines = map(tuple, d.values())\n\n for t in totlines :\n #to substract date from the interval number \n order_date = t[0]\n entered_date = datetime.datetime.strptime(order_date, '%Y-%m-%d')\n entered_date = entered_date.date()\n account_id = (order.partner_id.property_account_payable and order.partner_id.property_account_payable.id) or \\\n (order.journal_id.default_credit_account_id and order.journal_id.default_credit_account_id.id)\n period_id = period_obj.find(cr, uid, t[0], context=context)[0]\n\n list_confirm = [conf.id for conf in o.confirmation_ids]\n confirmations = account_budget_confirmation_obj.search(cr, uid, [('id','in', list_confirm),('period_id','=', period_id)], context=context) #('date','=',t[0]),\n\n for confirm in confirmations:\n confirm_id = confirm\n\n voucher_lines = [(0, 0, {'name':ol.name, 'account_id':ol.account_id.id, 'type':'dr',\n 'amount':t[count + 1], 'account_analytic_id':ol.account_analytic_id.id, 'budget_confirm_id': confirm_id })\n for count, ol in enumerate(order.line_ids)]\n res = voucher_pool.onchange_price(cr, uid, 0, voucher_lines, [], partner_id=order.partner_id.id, context=context).get(\"value\", {})\n voucher_dict = {\n 'partner_id' : order.partner_id.id,\n 'account_id': account_id,\n 'company_id' : order.company_id.id,\n 'journal_id' : order.journal_id.id,\n 'period_id': order.period_id.id,\n 'type':'purchase',\n 'date' : t[0],\n 'reference': order.name,\n 'payment_permanent_voucher_id': order.id,\n 'line_ids':voucher_lines,\n 'amount':res.get(\"amount\", 0.0)\n }\n voucher_pool.create(cr, uid, voucher_dict, context=context)\n return self.write(cr, uid, ids, {'state':'done'}, context=context)", "def create_order_invoice(sender, instance, created, using, **kwargs):\n\n # Create invoice if it doesn't already exist\n if (\n created\n and not Invoice.objects.filter(\n order__order_number=instance.order_number\n ).exists()\n ):\n invoice = Invoice(order=instance)\n # Saving it in reverse to avoid having this signal called again\n invoice.save()\n\n for slug, cls in discount_rules.get_all_discount_rules():\n if cls.can_user_have_access(instance.user, invoice):\n cls.apply_discount(instance.user, invoice)", "def test_invoice_item_create(self):\n # first we create a customer\n id = self._create_model(\"customer\", self.customer_data, [\"name\", \"email\", \"phone\"])\n if id:\n # then we create a invoice\n data = self.invoice_data\n data[\"customer_id\"] = id\n id_inv = self._create_model(\"invoice\", data, [])\n if id_inv:\n # then we create a product\n id_prod = self._create_model(\"product\", self.product_data, [\"name\", \"description\", \"image_link\", \"price\"])\n if id_prod:\n # then we can create the invoice's item\n data = self.invoice_item_data\n data[\"invoice_id\"] = id_inv\n data[\"product_id\"] = id_prod\n self._create_model(\"invoiceitem\", data, [\"quantity\", \"quote_price\"])\n self.assertIsNotNone(id_prod)\n self.assertIsNotNone(id_inv)\n self.assertIsNotNone(id)", "def test_invoice_update(self):\n # first we create a customer\n id = self._create_model(\"customer\", self.customer_data, [\"name\", \"email\", \"phone\"])\n if id:\n # then we can create the invoice\n data = self.invoice_data\n data[\"customer_id\"] = id\n id_inv = self._create_model(\"invoice\", data, [])\n if id_inv:\n # create another customer\n id_other = self._create_model(\"customer\", self.customer_data, [\"name\", \"email\", \"phone\"])\n if id_other:\n # then performe update\n data = self.invoice_data\n data[\"customer_id\"] = id_other\n self._update_model(\"invoice\", id, data, [])\n self.assertIsNotNone(id_other)\n self.assertIsNotNone(id_inv)\n self.assertIsNotNone(id)", "def create_invoice(sender, invoice, issuer_details, **kwargs):\n if not invoice.items:\n return\n\n price = sum([item.price for item in invoice.items.all()])\n\n if not price:\n return\n\n paypal_invoice = models.Invoice(\n customer=invoice.customer,\n year=invoice.year,\n month=invoice.month,\n invoice_date=invoice.invoice_date,\n end_date=invoice.due_date,\n tax_percent=invoice.tax_percent,\n issuer_details=issuer_details,\n )\n\n paypal_invoice.payment_details = {\n 'name': invoice.customer.name,\n 'address': invoice.customer.address,\n 'country': invoice.customer.country,\n 'country_name': invoice.customer.get_country_display(),\n 'email': invoice.customer.email,\n 'postal': invoice.customer.postal,\n 'phone_number': invoice.customer.phone_number,\n 'bank_name': invoice.customer.bank_name,\n 'bank_account': invoice.customer.bank_account,\n }\n\n paypal_invoice.save()\n\n for item in invoice.items.all():\n models.InvoiceItem.objects.create(\n invoice=paypal_invoice,\n price=item.price,\n tax=item.tax,\n quantity=item.quantity,\n unit_price=item.unit_price,\n unit_of_measure=helpers.convert_unit_of_measure(item.unit),\n name=item.name,\n start=item.start,\n end=item.end,\n )", "def create_order(self, serializer):\n data = serializer.validated_data\n service: Service = data['service']\n customer: Customer = Customer.objects.get_or_create(\n email=data['email'])[0]\n invoice: Invoice = Invoice(\n charged_amount=service.price.amount,\n currency=service.price.currency,\n timestamp=now(),\n customer=customer,\n service=service\n )\n invoice.save()\n serializer.validated_data['invoice_id'] = invoice.id\n serializer.save()\n\n self.send_order_email(invoice, serializer.instance)", "def data_process_deprecated(self, CustomerID, InvoiceDate, InvoiceNo, Description, Quantity\\\n , UnitPrice ):\n dict_invoice = {'InvoiceDate':InvoiceDate, 'Description':Description\\\n , 'Quantity':Quantity, 'UnitPrice':UnitPrice}\n dict_invoice['CustomerID'] = CustomerID\n dict_invoice['InvoiceNo'] = InvoiceNo\n df_invoice_line \\\n = pd.DataFrame(dict_invoice, columns=dict_invoice.keys(), index=[0])\n \n self.data_transform(df_invoice_line)\n\n #self.feature_rfm_encode()\n\n self.feature_scale()\n\n self.list_feature_drop()\n\n self.feature_description_nlp()\n return", "def get_customer_segments(self, date):\n date = current_date_to_day().isoformat() if date is None else date\n self.products = pd.merge(self.products,\n self.cs.fetch(start_date=convert_dt_to_day_str(date))[['client', 'segments']],\n on='client', how='left')", "def createIntersectorVertices(self):\n for i in self.inter1 + self.inter2:\n self.result.addVertex(self.newVertex(i))", "def prepare_invoice(self):\n journal_id = self.env['account.invoice'].default_get(['journal_id'])['journal_id']\n if not journal_id:\n raise UserError(_('Please define sales journal for this company: \"%s\" (id:%d).') % (self.company_id.name, self.company_id.id))\n invoice_vals = {\n 'order_id': self.id,\n 'name': self.order_no,\n 'origin': self.order_no,\n 'type': 'out_invoice',\n 'reference': self.patient_id.name + ':' + self.name,\n 'account_id': self.patient_id.partner_id.property_account_receivable_id.id,\n 'partner_id': self.patient_id.partner_id.id,\n 'journal_id': journal_id,\n 'comment': self.note,\n 'doctor_id': self.doctor_id.id,\n 'payment_term': False,\n 'user_id': False,\n }\n return invoice_vals", "def get_invoiced_lot_values(self):\n self.ensure_one()\n\n if self.state == 'draft':\n return []\n\n sale_orders = self.mapped('invoice_line_ids.sale_line_ids.order_id')\n stock_move_lines = sale_orders.mapped('picking_ids.move_lines.move_line_ids')\n\n # Get the other customer invoices and refunds.\n ordered_invoice_ids = sale_orders.mapped('invoice_ids') \\\n .filtered(lambda i: i.state not in ['draft', 'cancel']) \\\n .sorted(lambda i: (i.invoice_date, i.id))\n\n # Get the position of self in other customer invoices and refunds.\n self_index = None\n i = 0\n for invoice in ordered_invoice_ids:\n if invoice.id == self.id:\n self_index = i\n break\n i += 1\n\n # Get the previous invoice if any.\n previous_invoices = ordered_invoice_ids[:self_index]\n last_invoice = previous_invoices[-1] if len(previous_invoices) else None\n\n # Get the incoming and outgoing sml between self.invoice_date and the previous invoice (if any).\n write_dates = [wd for wd in self.invoice_line_ids.mapped('write_date') if wd]\n self_datetime = max(write_dates) if write_dates else None\n last_write_dates = last_invoice and [wd for wd in last_invoice.invoice_line_ids.mapped('write_date') if wd]\n last_invoice_datetime = max(last_write_dates) if last_write_dates else None\n\n def _filter_incoming_sml(ml):\n if ml.state == 'done' and ml.location_id.usage == 'customer' and ml.lot_id:\n if last_invoice_datetime:\n return last_invoice_datetime <= ml.date <= self_datetime\n else:\n return ml.date <= self_datetime\n return False\n\n def _filter_outgoing_sml(ml):\n if ml.state == 'done' and ml.location_dest_id.usage == 'customer' and ml.lot_id:\n if last_invoice_datetime:\n return last_invoice_datetime <= ml.date <= self_datetime\n else:\n return ml.date <= self_datetime\n return False\n\n incoming_sml = stock_move_lines.filtered(_filter_incoming_sml)\n outgoing_sml = stock_move_lines.filtered(_filter_outgoing_sml)\n\n # Prepare and return lot_values\n qties_per_lot = defaultdict(lambda: 0)\n if self.type == 'out_refund':\n for ml in outgoing_sml:\n qties_per_lot[ml.lot_id] -= ml.product_uom_id._compute_quantity(ml.qty_done, ml.product_id.uom_id)\n for ml in incoming_sml:\n qties_per_lot[ml.lot_id] += ml.product_uom_id._compute_quantity(ml.qty_done, ml.product_id.uom_id)\n else:\n for ml in outgoing_sml:\n qties_per_lot[ml.lot_id] += ml.product_uom_id._compute_quantity(ml.qty_done, ml.product_id.uom_id)\n for ml in incoming_sml:\n qties_per_lot[ml.lot_id] -= ml.product_uom_id._compute_quantity(ml.qty_done, ml.product_id.uom_id)\n lot_values = []\n for lot_id, qty in qties_per_lot.items():\n if float_is_zero(qty, precision_rounding=lot_id.product_id.uom_id.rounding):\n continue\n lot_values.append({\n 'product_name': lot_id.product_id.display_name,\n 'product_color': lot_id.x_studio_color.x_name,\n 'quantity': qty,\n 'uom_name': lot_id.product_uom_id.name,\n 'lot_name': lot_id.name\n })\n #AQUI ORDENO TODOS LOS LOTES QUE ME QUEDAN EN lot_values POR EL COLOR\n lot_values.sort(key=lambda r: r['product_color'], reverse=False)\n return lot_values", "def _get_account_analytic_invoice(self, cursor, user, picking, move_line):\n if move_line.purchase_line_id:\n return move_line.purchase_line_id.order_id.account_analytic_id.id\n return super(stock_picking, self)._get_account_analytic_invoice(cursor, user, picking, move_line)", "def IRIS_ARC_IC(input, clients):\n \n if input[clients + '_ic_auto'] == 'Y':\n global events \n Period = input['min_date'].split('T')[0] + '_' + \\\n input['max_date'].split('T')[0] + '_' + \\\n str(input['min_mag']) + '_' + str(input['max_mag'])\n eventpath = os.path.join(input['datapath'], Period)\n address = eventpath\n elif input[clients + '_ic'] != 'N':\n address = input[clients + '_ic']\n \n events, address_events = quake_info(address, 'info')\n \n for i in range(0, len(events)):\n sta_ev = read_station_event(address_events[i])\n ls_saved_stas = []\n \n for j in range(0, len(sta_ev[0])):\n if clients == sta_ev[0][j][13]:\n station_id = sta_ev[0][j][0] + '.' + sta_ev[0][j][1] + '.' + \\\n sta_ev[0][j][2] + '.' + sta_ev[0][j][3]\n ls_saved_stas.append(os.path.join(address_events[i], 'BH_RAW',\\\n station_id))\n \n print 'event: ' + str(i+1) + '/' + str(len(events)) + \\\n ' -- ' + clients\n print '------------------------------------'\n inst_correct(input, ls_saved_stas, address_events[i], clients) \n \n print \"**********************************\"\n print clients.upper() + ' Instrument Correction is DONE'\n print \"**********************************\"", "def invoice(self, id):\r\n return Invoice(self, id)", "def invoice_items(self,org_id=None,query={}):\n if org_id is None:\n org_id = self.org_id\n query_end_date = datetime.strptime(query['endDate'],'%Y-%m-%dT%H:%M:%SZ')\n # Given a 'query_end_date' to find the invoice containing the\n # line items for that date we need to find the invoice which \n # has 'endDate' equal to the end of the month of the `query_end_date`\n query_first_next_month = query_end_date + relativedelta(months=+1) - relativedelta(days=(query_end_date.day-1))\n target_invoices = []\n invoices = self.invoices(org_id)\n if self.verbose:\n print('Searching invoices org_id={}'.format(org_id))\n print('query={} query_end_date={}'.format(query,query_end_date))\n print('Result keys: {}'.format( invoices['content'].keys() ))\n print('Total result count: {}'.format( invoices['content']['totalCount'] ))\n for invoice in invoices['content']['results']:\n #pprint.pprint(invoice)\n end_date = datetime.strptime(invoice['endDate'],'%Y-%m-%dT%H:%M:%SZ')\n if self.verbose: \n print('invoice({})[\\'endDate\\']={} end_date={}'.format(invoice['id'],invoice['endDate'],end_date))\n if end_date == query_first_next_month:\n target_invoices.append(invoice)\n \n if self.verbose: \n print('Target invoices: {}'.format(target_invoices))\n \n\n target_line_items = []\n for invoice in target_invoices:\n invoice_details = self.invoices(org_id,invoice['id']) \n print('invoice_details: {}'.format(invoice_details))\n for item in invoice_details['content']['lineItems']:\n end_date = datetime.strptime(item['endDate'],'%Y-%m-%dT%H:%M:%SZ')\n if end_date == query_end_date:\n target_line_items.append(item)\n if self.verbose:\n print('target_line_items: {}'.format(target_line_items)) \n return target_line_items", "def draw_header(canvas, invoice):\n\n canvas.setLineWidth(2)\n canvas.line(2 * cm, -4 * cm, 19 * cm, -4 * cm)\n \"\"\" Draws the business address \"\"\"\n business_details = settings.BUSINESS_DETAIL\n business_data = []\n for line in business_details:\n business_data.append([line])\n\n table = Table(business_data, colWidths=[17 * cm], rowHeights=[15, 17, 11, 11, 11, 11, 11])\n table.setStyle([\n ('FONT', (0, 0), (-1, -1), 'Helvetica-Oblique'),\n ('FONTSIZE', (0, 0), (0, 0), 14),\n ('FONTSIZE', (0, 1), (0, -1), 6),\n ('TEXTCOLOR', (0, 0), (-1, -1), (0.2, 0.2, 0.2)),\n ('ALIGN', (0, 0), (-1, -1), 'CENTER'),\n ('BACKGROUND', (0, 0), (-1, -1), (0.95, 0.95,0.95)),\n ])\n tw, th, = table.wrapOn(canvas, 2 * cm, 19 * cm)\n table.drawOn(canvas, 2 * cm, -4 * cm)", "def create(self, validated_data):\n orderlines = validated_data.pop('orderlines', None)\n if not (orderlines and len(orderlines)):\n raise EmptyOrderException\n\n # Create order and associated orderlines\n order = models.Order.objects.create(**validated_data)\n for orderline in orderlines:\n order.orderlines.create(**orderline)\n\n return order", "def register_prepayment(self, prepayment_line, writeoff_acc_id=False, writeoff_journal_id=False):\n\n #TODO: CREATE THE BRIDGE MOVE\n\n line_to_reconcile = self.env['account.move.line']\n for inv in self:\n line_to_reconcile += inv.move_id.line_ids.filtered(\n lambda r: not r.reconciled and r.account_id.internal_type in ('payable', 'receivable')).sorted(key=lambda r: r.date_maturity)\n\n #CREATING BRIDGE\n for lr in line_to_reconcile:\n\n new_move = inv._create_bridge_move(lr, prepayment_line)\n\n bridge_move_line = False\n payment_line = False\n\n for line in new_move.line_ids:\n if line.account_id.id == lr.account_id.id:\n payment_line = line\n elif line.account_id.id == prepayment_line.account_id.id:\n bridge_move_line = line\n\n (bridge_move_line + prepayment_line).reconcile(writeoff_acc_id, writeoff_journal_id)\n break\n\n return (line_to_reconcile + payment_line).reconcile(writeoff_acc_id, writeoff_journal_id)", "def action_move_create(self, cr, uid, ids, context=None):\n ait_obj = self.pool.get('account.invoice.tax')\n cur_obj = self.pool.get('res.currency')\n period_obj = self.pool.get('account.period')\n payment_term_obj = self.pool.get('account.payment.term')\n journal_obj = self.pool.get('account.journal')\n move_obj = self.pool.get('account.move')\n if context is None:\n context = {}\n for inv in self.browse(cr, uid, ids, context=context):\n if not inv.journal_id.sequence_id:\n raise osv.except_osv(_('Error!'), _('Please define sequence on the journal related to this invoice.'))\n if not inv.invoice_line:\n raise osv.except_osv(_('No Invoice Lines!'), _('Please create some invoice lines.'))\n if inv.move_id:\n continue\n\n ctx = context.copy()\n ctx.update({'lang': inv.partner_id.lang})\n if not inv.date_invoice:\n self.write(cr, uid, [inv.id], {'date_invoice': fields.date.context_today(self,cr,uid,context=context)}, context=ctx)\n company_currency = self.pool['res.company'].browse(cr, uid, inv.company_id.id).currency_id.id\n # create the analytical lines\n # one move line per invoice line\n iml = self._get_analytic_lines(cr, uid, inv.id, context=ctx)\n # check if taxes are all computed\n compute_taxes = ait_obj.compute(cr, uid, inv.id, context=ctx)\n self.check_tax_lines(cr, uid, inv, compute_taxes, ait_obj)\n\n # I disabled the check_total feature\n group_check_total_id = self.pool.get('ir.model.data').get_object_reference(cr, uid, 'account', 'group_supplier_inv_check_total')[1]\n group_check_total = self.pool.get('res.groups').browse(cr, uid, group_check_total_id, context=context)\n if group_check_total and uid in [x.id for x in group_check_total.users]:\n if (inv.type in ('in_invoice', 'in_refund') and abs(inv.check_total - inv.amount_total) >= (inv.currency_id.rounding/2.0)):\n raise osv.except_osv(_('Bad Total!'), _('Please verify the price of the invoice!\\nThe encoded total does not match the computed total.'))\n\n if inv.payment_term:\n total_fixed = total_percent = 0\n for line in inv.payment_term.line_ids:\n if line.value == 'fixed':\n total_fixed += line.value_amount\n if line.value == 'procent':\n total_percent += line.value_amount\n total_fixed = (total_fixed * 100) / (inv.amount_total or 1.0)\n if (total_fixed + total_percent) > 100:\n raise osv.except_osv(_('Error!'), _(\"Cannot create the invoice.\\nThe related payment term is probably misconfigured as it gives a computed amount greater than the total invoiced amount. In order to avoid rounding issues, the latest line of your payment term must be of type 'balance'.\"))\n\n # one move line per tax line\n iml += ait_obj.move_line_get(cr, uid, inv.id)\n\n entry_type = ''\n if inv.type in ('in_invoice', 'in_refund'):\n ref = inv.reference\n entry_type = 'journal_pur_voucher'\n if inv.type == 'in_refund':\n entry_type = 'cont_voucher'\n else:\n ref = self._convert_ref(cr, uid, inv.number)\n entry_type = 'journal_sale_vou'\n if inv.type == 'out_refund':\n entry_type = 'cont_voucher'\n\n diff_currency_p = inv.currency_id.id <> company_currency\n # create one move line for the total and possibly adjust the other lines amount\n total = 0\n total_currency = 0\n total, total_currency, iml = self.compute_invoice_totals(cr, uid, inv, company_currency, ref, iml, context=ctx)\n acc_id = inv.account_id.id\n\n name = inv['name'] or inv['supplier_invoice_number'] or '/'\n totlines = False\n # kittiu\n #if inv.payment_term:\n if inv.payment_term and not inv.date_due:\n # --\n totlines = payment_term_obj.compute(cr,\n uid, inv.payment_term.id, total, inv.date_invoice or False, context=ctx)\n if totlines:\n res_amount_currency = total_currency\n i = 0\n ctx.update({'date': inv.date_invoice})\n for t in totlines:\n if inv.currency_id.id != company_currency:\n amount_currency = cur_obj.compute(cr, uid, company_currency, inv.currency_id.id, t[1], context=ctx)\n else:\n amount_currency = False\n\n # last line add the diff\n res_amount_currency -= amount_currency or 0\n i += 1\n if i == len(totlines):\n amount_currency += res_amount_currency\n\n iml.append({\n 'type': 'dest',\n 'name': name,\n 'price': t[1],\n 'account_id': acc_id,\n 'date_maturity': t[0],\n 'amount_currency': diff_currency_p \\\n and amount_currency or False,\n 'currency_id': diff_currency_p \\\n and inv.currency_id.id or False,\n 'ref': ref,\n })\n else:\n iml.append({\n 'type': 'dest',\n 'name': name,\n 'price': total,\n 'account_id': acc_id,\n 'date_maturity': inv.date_due or False,\n 'amount_currency': diff_currency_p \\\n and total_currency or False,\n 'currency_id': diff_currency_p \\\n and inv.currency_id.id or False,\n 'ref': ref\n })\n\n date = inv.date_invoice or time.strftime('%Y-%m-%d')\n\n part = self.pool.get(\"res.partner\")._find_accounting_partner(inv.partner_id)\n\n line = map(lambda x:(0,0,self.line_get_convert(cr, uid, x, part.id, date, context=ctx)),iml)\n\n line = self.group_lines(cr, uid, iml, line, inv)\n\n journal_id = inv.journal_id.id\n journal = journal_obj.browse(cr, uid, journal_id, context=ctx)\n if journal.centralisation:\n raise osv.except_osv(_('User Error!'),\n _('You cannot create an invoice on a centralized journal. Uncheck the centralized counterpart box in the related journal from the configuration menu.'))\n\n line = self.finalize_invoice_move_lines(cr, uid, inv, line)\n\n move = {\n 'ref': inv.reference and inv.reference or inv.name,\n 'line_id': line,\n 'journal_id': journal_id,\n 'date': date,\n 'narration': inv.comment,\n 'company_id': inv.company_id.id,\n }\n period_id = inv.period_id and inv.period_id.id or False\n ctx.update(company_id=inv.company_id.id,\n account_period_prefer_normal=True)\n if not period_id:\n period_ids = period_obj.find(cr, uid, inv.date_invoice, context=ctx)\n period_id = period_ids and period_ids[0] or False\n if period_id:\n move['period_id'] = period_id\n for i in line:\n i[2]['period_id'] = period_id\n\n ctx.update(invoice=inv)\n move_id = move_obj.create(cr, uid, move, context=ctx)\n new_move_name = move_obj.browse(cr, uid, move_id, context=ctx).name\n # make the invoice point to that move\n self.write(cr, uid, [inv.id], {'move_id': move_id,'period_id':period_id, 'move_name':new_move_name}, context=ctx)\n # Pass invoice in context in method post: used if you want to get the same\n # account move reference when creating the same invoice after a cancelled one:\n move_obj.post(cr, uid, [move_id], context=ctx)\n self._log_event(cr, uid, ids)\n return True", "def test_invoice_detail(self):\n # first we create a customer\n id = self._create_model(\"customer\", self.customer_data, [\"name\", \"email\", \"phone\"])\n if id:\n # then we can create the invoice\n data = self.invoice_data\n data[\"customer_id\"] = id\n id_inv = self._create_model(\"invoice\", data, [])\n if id_inv:\n # then performing detail\n self._detail_model(\"invoice\", self.invoice_data, id, [])\n self.assertIsNotNone(id_inv)\n self.assertIsNotNone(id)", "def single_customer(customer_name, invoice_file):\n def single_customer_rentals(rental_items):\n add_item = partial(add_furniture, customer_name=customer_name,\n invoice_file=invoice_file)\n with open(rental_items, \"r\") as file:\n for row in csv.reader(file):\n add_item(item_code=row[0], item_description=row[1],\n item_monthly_price=row[2])\n return single_customer_rentals", "def abc_confirm_invoice(self, lines, packages, data, params, res):\n invoice = params.get('invoice')\n if invoice and invoice.state == 'draft':\n self.env.cr.commit()\n env = None\n try:\n # Ne cursor doesn't time out when requesting lock.\n # Could be bad I guess? Works for now.\n # TODO: Look into setting a more reasonable lock wait time.\n new_cr = Registry(self.env.cr.dbname).cursor()\n new_cr.autocommit(True)\n env = api.Environment(new_cr, self.env.uid, self.env.context)\n # Validate invoice\n invoice.signal_workflow('invoice_open')\n res['invoice']['name'] = invoice.number\n res['messages'].append(u\"Created and confirmed invoice %s.\" % invoice.number)\n res['results']['invoice'] = 'confirmed'\n # Commit to unlock the invoice sequence\n env.cr.commit()\n except Exception as e:\n res['warnings'].append((\n _(u\"Failed to confirm invoice %s!\") % (invoice and (invoice.number or invoice.name) or 'Unknown'),\n '%s\\n\\nTraceback:\\n%s' % (e.message or 'Unknown Error', traceback.format_exc())))\n finally:\n if env:\n env.cr.close()", "def invoice_lines(self, invoice_lines):\n if self.local_vars_configuration.client_side_validation and invoice_lines is None: # noqa: E501\n raise ValueError(\"Invalid value for `invoice_lines`, must not be `None`\") # noqa: E501\n\n self._invoice_lines = invoice_lines", "def build_invoice(payment_object):\n # Fill html template with the domain orders and user profile info\n html_template = get_template('billing/billing_invoice.html')\n rendered_html = html_template.render({\n 'payment': payment_object,\n 'user_profile': payment_object.owner.profile,\n })\n # Create pdf file from a html file\n pdfkit.from_string(rendered_html, '/tmp/out.pdf')\n with open(\"/tmp/out.pdf\", \"rb\") as pdf_file:\n pdf_raw = pdf_file.read()\n os.remove(\"/tmp/out.pdf\")\n return {\n 'body': pdf_raw,\n 'filename': 'invoice_{}.pdf'.format(payment_object.transaction_id),\n }", "def action_move_create(self, cr, uid, ids, context=None):\n ait_obj = self.pool.get('account.invoice.tax')\n cur_obj = self.pool.get('res.currency')\n period_obj = self.pool.get('account.period')\n payment_term_obj = self.pool.get('account.payment.term')\n journal_obj = self.pool.get('account.journal')\n move_obj = self.pool.get('account.move')\n if context is None:\n context = {}\n for inv in self.browse(cr, uid, ids, context=context):\n if not inv.journal_id:\n raise orm.except_orm(_('Error!'),\n _('Journal not defined for this invoice!'))\n if not inv.journal_id.iva_registry_id:\n raise orm.except_orm(_('Error!'),\n _('You must link %s with a VAT registry!') % (inv.journal_id.name))\n if not inv.journal_id.sequence_id:\n raise orm.except_orm(_('Error!'),\n _('Please define sequence on the journal related to this invoice.')) \n if not inv.invoice_line:\n raise orm.except_orm(_('No Invoice Lines!'),\n _('Please create some invoice lines.'))\n if inv.move_id:\n continue\n\n ctx = context.copy()\n ctx.update({'lang': inv.partner_id.lang})\n if not inv.date_invoice:\n self.write(cr, uid, [inv.id],\n {'date_invoice': fields.date.context_today(self,\n cr,\n uid,\n context=context)},\n context=ctx)\n company_currency = self.pool['res.company'].browse(cr, uid,\n inv.company_id.id).currency_id.id\n # create the analytical lines\n # one move line per invoice line\n # iml = self._get_analytic_lines(cr, uid, inv.id, context=ctx)\n iml = super(account_invoice_makeover, self)._get_analytic_lines(cr, uid, inv.id, context=ctx)\n # check if taxes are all computed\n compute_taxes = ait_obj.compute(cr, uid, inv.id, context=ctx)\n # self.check_tax_lines(cr, uid, inv, compute_taxes, ait_obj)\n super(account_invoice_makeover, self).check_tax_lines(cr, uid, inv, compute_taxes, ait_obj)\n\n # I disabled the check_total feature\n group_check_total_id = self.pool.get('ir.model.data').get_object_reference(cr, uid, 'account', 'group_supplier_inv_check_total')[1]\n group_check_total = self.pool.get('res.groups').browse(cr, uid,\n group_check_total_id,\n context=context)\n if group_check_total and uid in [x.id for x in group_check_total.users]:\n if (inv.type in ('in_invoice', 'in_refund') and abs(inv.check_total - inv.amount_total) >= (inv.currency_id.rounding / 2.0)):\n raise orm.except_orm(_('Bad Total!'), _('Please verify the price of the invoice!\\nThe encoded total does not match the computed total.'))\n\n if inv.payment_term:\n total_fixed = total_percent = 0\n for line in inv.payment_term.line_ids:\n if line.value == 'fixed':\n total_fixed += line.value_amount\n if line.value == 'procent':\n total_percent += line.value_amount\n total_fixed = (total_fixed * 100) / (inv.amount_total or 1.0)\n if (total_fixed + total_percent) > 100:\n raise orm.except_orm(_('Error!'), _(\"Cannot create the invoice.\\nThe related payment term is probably misconfigured as it gives a computed amount greater than the total invoiced amount. In order to avoid rounding issues, the latest line of your payment term must be of type 'balance'.\"))\n\n # one move line per tax line\n iml += ait_obj.move_line_get(cr, uid, inv.id)\n\n# entry_type = ''\n if inv.type in ('in_invoice', 'in_refund'):\n ref = inv.reference\n# entry_type = 'journal_pur_voucher'\n# if inv.type == 'in_refund':\n# entry_type = 'cont_voucher'\n else:\n # ref = self._convert_ref(cr, uid, inv.number)\n ref = super(account_invoice_makeover, self)._convert_ref(cr, uid, inv.number)\n# entry_type = 'journal_sale_vou'\n# if inv.type == 'out_refund':\n# entry_type = 'cont_voucher'\n\n diff_currency_p = inv.currency_id.id <> company_currency\n # create one move line for the total and possibly adjust the other lines amount\n total = 0\n total_currency = 0\n # total, total_currency, iml = self.compute_invoice_totals(cr, uid, inv, company_currency, ref, iml, context=ctx)\n total, total_currency, iml = super(account_invoice_makeover, self).compute_invoice_totals(cr, uid, inv, company_currency, ref, iml, context=ctx)\n acc_id = inv.account_id.id\n\n name = inv['name'] or inv['supplier_invoice_number'] or '/'\n totlines = False\n if inv.payment_term:\n totlines = payment_term_obj.compute(cr,\n uid, inv.payment_term.id, total, inv.date_invoice or False, context=ctx)\n if totlines:\n res_amount_currency = total_currency\n i = 0\n ctx.update({'date': inv.date_invoice})\n for t_line in totlines:\n if inv.currency_id.id != company_currency:\n amount_currency = cur_obj.compute(cr, uid, company_currency, inv.currency_id.id, t_line[1], context=ctx)\n else:\n amount_currency = False\n\n # last line add the diff\n res_amount_currency -= amount_currency or 0\n i += 1\n if i == len(totlines):\n amount_currency += res_amount_currency\n\n iml.append({\n 'type': 'dest',\n 'name': name,\n 'price': t_line[1],\n 'account_id': acc_id,\n 'date_maturity': t_line[0],\n 'amount_currency': diff_currency_p \\\n and amount_currency or False,\n 'currency_id': diff_currency_p \\\n and inv.currency_id.id or False,\n 'ref': ref,\n 'payment_type': t_line[2]\n })\n else:\n iml.append({\n 'type': 'dest',\n 'name': name,\n 'price': total,\n 'account_id': acc_id,\n 'date_maturity': inv.date_due or False,\n 'amount_currency': diff_currency_p \\\n and total_currency or False,\n 'currency_id': diff_currency_p \\\n and inv.currency_id.id or False,\n 'ref': ref,\n 'payment_type': None\n })\n\n date = inv.date_invoice or time.strftime('%Y-%m-%d')\n\n part = self.pool.get(\"res.partner\")._find_accounting_partner(inv.partner_id)\n\n line = map(lambda x:(0, 0, self.line_get_convert(cr, uid, x, part.id, date, context=ctx)), iml)\n\n # line = self.group_lines(cr, uid, iml, line, inv)\n line = super(account_invoice_makeover, self).group_lines(cr, uid, iml, line, inv)\n\n journal_id = inv.journal_id.id\n journal = journal_obj.browse(cr, uid, journal_id, context=ctx)\n if journal.centralisation:\n raise orm.except_orm(_('User Error!'),\n _('You cannot create an invoice on a centralized journal. Uncheck the centralized counterpart box in the related journal from the configuration menu.'))\n\n line = self.finalize_invoice_move_lines(cr, uid, inv, line)\n\n move = {\n 'ref': inv.reference and inv.reference or inv.name,\n 'line_id': line,\n 'journal_id': journal_id,\n 'date': date,\n 'narration': inv.comment,\n 'company_id': inv.company_id.id,\n }\n period_id = inv.period_id and inv.period_id.id or False\n ctx.update(company_id=inv.company_id.id,\n account_period_prefer_normal=True)\n if not period_id:\n period_ids = period_obj.find(cr, uid, inv.registration_date, context=ctx)\n period_id = period_ids and period_ids[0] or False\n if period_id:\n move['period_id'] = period_id\n for i in line:\n i[2]['period_id'] = period_id\n\n ctx.update(invoice=inv)\n move_id = move_obj.create(cr, uid, move, context=ctx)\n new_move_name = move_obj.browse(cr, uid, move_id, context=ctx).name\n # make the invoice point to that move\n self.write(cr, uid, [inv.id], {'move_id': move_id, 'period_id':period_id, 'move_name':new_move_name}, context=ctx)\n # Pass invoice in context in method post: used if you want to get the same\n # account move reference when creating the same invoice after a cancelled one:\n move_obj.post(cr, uid, [move_id], context=ctx)\n # self._log_event(cr, uid, ids)\n super(account_invoice_makeover, self)._log_event(cr, uid, ids)\n return True", "def _create_payments(self, invoice):\n self.ensure_one()\n if self.schedule_id and self.schedule_id.occurences > 0:\n # TODO: make more intelligent price cut\n amount = invoice.amount_total\n amount_per_occurence = amount / self.schedule_id.occurences\n for day in self.schedule_id.day_ids:\n payment = self.env['account.payment'].new({\n 'payment_type': 'inbound',\n 'partner_type': 'customer',\n 'partner_id': self.member_id.partner_id.id,\n 'amount': amount_per_occurence,\n 'payment_date': day.day,\n 'journal_id': self.journal_id.id,\n })\n payment._onchange_journal()\n payment_values = dict(payment._cache)\n payment = self.env['account.payment'].create(payment_values)\n payment.invoice_ids = [(4, invoice.id, False)]", "def from_invoice_and_line_item(cls, invoice: InvoiceModel, line_item: LineItemModel, line_number: int,\n distribution: str):\n # Note the invoice_date should be the payment_date in the future.\n return cls(total=line_item.total, invoice_number=invoice.id,\n line_number=line_number,\n is_reversal=invoice.invoice_status_code in\n [InvoiceStatus.REFUNDED.value, InvoiceStatus.REFUND_REQUESTED.value],\n distribution=distribution)", "def duplicate_invoice(invoice):\n from invoicer.models import Invoice\n from invoicer.models import LineItem\n\n # copy main attributes\n new_invoice = Invoice(\n company=invoice.company,\n invoice_date=datetime.now(),\n client=invoice.client,\n location=invoice.location,\n tax_rate=invoice.tax_rate,\n left_address=invoice.left_address,\n right_address=invoice.right_address,\n terms=invoice.terms,\n footer=invoice.footer\n )\n new_invoice.save()\n\n # now line items\n for line_item in invoice.line_items.all():\n new_invoice.line_items.add(LineItem(\n name=line_item.name,\n description=line_item.description,\n price=line_item.price,\n taxable=line_item.taxable,\n item=line_item.item,\n quantity=line_item.quantity\n ))\n\n return new_invoice", "def create_invoice(self, order): # noqa:max-complexity=18\n\n if len(order['order_lines']) == 0:\n raise RuntimeError(\n \"Expected 1 order_lines in order {}, got: {}\".format(\n order['order_id'],\n order['order_lines']\n )\n )\n\n order_id = order['order_id']\n\n refund = False\n if order['state'] == 'REFUND':\n refund = True\n self.stdout.write(self.style.WARNING(\"Refunded order: {}\".format(order_id)))\n elif order['state'] == 'PAID':\n pass\n else:\n self.stdout.write(self.style.WARNING(\"Not processing unknown order state {} for: {}\".format(order['state'], order_id)))\n return\n\n if self.only_known and order_id not in billy.TICKETBUTLER_IGNORE_LIST:\n self.stdout.write(self.style.WARNING(\"Only processing known invoices, skipping {}\".format(order_id)))\n return\n\n # Object containing all created tickets, to have an invoice relation\n # appended later\n ticketbutler_tickets = []\n\n for ticket in order['tickets']:\n\n sprints = list(filter(\n lambda q: q['question'] == 148,\n ticket['answers']\n ))[0]\n\n if any(filter(lambda c: c['choice_heading'].lower() == 'no', sprints['answered_choices'])):\n sprints = models.TicketbutlerTicket.SPRINTS_NO\n elif any(filter(lambda c: c['choice_heading'].lower() == 'maybe', sprints['answered_choices'])):\n sprints = models.TicketbutlerTicket.SPRINTS_MAYBE\n elif any(filter(lambda c: c['choice_heading'].lower() == 'yes', sprints['answered_choices'])):\n sprints = models.TicketbutlerTicket.SPRINTS_YES\n\n ticketbutler_ticket = models.TicketbutlerTicket.get_or_create(\n ticket['email'],\n ticket['full_name'],\n order_id,\n sprints,\n ticket['ticket_type_name'],\n )\n if refund:\n self.stdout.write(self.style.WARNING(\"This ticket was marked refunded: {}\".format(order_id)))\n ticketbutler_ticket.refunded = True\n ticketbutler_ticket.save()\n else:\n ticketbutler_ticket.refunded = False\n ticketbutler_ticket.save()\n\n ticketbutler_tickets.append(ticketbutler_ticket)\n\n if refund:\n self.stdout.write(self.style.WARNING(\"Skipping refunded order: {}\".format(order_id)))\n return\n\n # If an email is changed on a TicketButler ticket and an old user exists without any other tickets,\n # then disable this user's account and delete the ticket from the system\n all_order_tickets = models.TicketbutlerTicket.objects.filter(ticketbutler_orderid=order_id)\n\n for ticket in order['tickets']:\n\n for verify_ticket in all_order_tickets:\n # Check if the ticket is active in the current order, if it is\n # then skip it.\n if any(active.id == verify_ticket.id for active in ticketbutler_tickets):\n continue\n # Yeah, it's not active anymore, so delete it and potentially\n # disable the user account\n inactive_ticket = verify_ticket\n self.stdout.write(self.style.WARNING(\"Going to remove ticket for {}, order_id: {}\".format(inactive_ticket.user.email, order_id)))\n if inactive_ticket.user.tickets.all().exclude(id=inactive_ticket.id).exists():\n # Just remove the ticket\n self.stdout.write(self.style.WARNING(\"Found another ticket for user {} and deleted the inactive ticket in question but not the user\".format(inactive_ticket.user.email)))\n if inactive_ticket.pk:\n inactive_ticket.delete()\n continue\n else:\n # Remove the user account too if there are no submissions and it's not a superuser\n if not inactive_ticket.user.is_superuser and not inactive_ticket.user.submissions.all().exists():\n if inactive_ticket.user.is_active:\n self.stdout.write(self.style.WARNING(\"Also disabling user account for: {}\".format(inactive_ticket.user.email)))\n inactive_ticket.user.is_active = False\n inactive_ticket.user.save()\n else:\n self.stdout.write(self.style.WARNING(\"User was already inactive: {}\".format(inactive_ticket.user.email)))\n # In case the user had several tickets, and one of them was already deleted\n if inactive_ticket.pk:\n inactive_ticket.delete()\n\n if 'discount' in order:\n if order['discount']['amount'] == 100:\n\n for ticket in ticketbutler_tickets:\n ticket.free_ticket = True\n ticket.save()\n\n self.stdout.write(self.style.SUCCESS(\"Skipping invoice for free ticket for order id: {}\".format(order_id)))\n return\n else:\n self.stdout.write(self.style.ERROR(\"!!! Order id {} will have an invoice generated with missing information, Ticketbutler said the discount was: {}\".format(order_id, order['discount']['amount'])))\n\n for ticketbutler_order_line_no, order_line in enumerate(order['order_lines']):\n\n self.process_order_line(order, order_line, ticketbutler_tickets, ticketbutler_order_line_no=ticketbutler_order_line_no)", "def invoice(self, reference_no=None, with_vat=True):\n\n return self.invoice_class(apiobj=self, reference_no=reference_no)", "def invoice(self, invoice_number):\r\n return inv.Invoice(self, invoice_number)", "def _generate_valuation_lines_data(self, partner_id, qty, debit_value, credit_value, debit_account_id, credit_account_id):\n self.ensure_one()\n\n rslt = super(StockMove, self)._generate_valuation_lines_data(partner_id, qty, debit_value, credit_value, debit_account_id, credit_account_id)\n if self.subcontract_line_id:\n subcontract_currency = self.subcontract_line_id.currency_id\n if subcontract_currency != self.company_id.currency_id:\n subcontract_price_unit = self.subcontract_line_id.price_unit\n currency_move_valuation = subcontract_currency.round(subcontract_price_unit * abs(qty))\n rslt['credit_line_vals']['amount_currency'] = rslt['credit_line_vals']['credit'] and -currency_move_valuation or currency_move_valuation\n rslt['credit_line_vals']['currency_id'] = subcontract_currency.id\n rslt['debit_line_vals']['amount_currency'] = rslt['debit_line_vals']['credit'] and -currency_move_valuation or currency_move_valuation\n rslt['debit_line_vals']['currency_id'] = subcontract_currency.id\n return rslt", "def _get_query(self, type, date_from=False, date_to=False, users=None, products=None):\n # TODO: Revisar def _create_invoice(self, order, so_line, amount):...\n # so.user_id AS id_salesman\n # AND so.user_id IN (%s)\n # AND pp.id IN (%s)\n # GROUP BY salesman\n\n if type == 'most_sold':\n sql = \"\"\"\n SELECT min(sol.id) AS id, \n so.user_id AS salesman, \n sol.product_id AS product,\n AVG(sol.price_reduce_taxexcl) AS price, \n pp.product_tmpl_id AS product_template,\n so.company_id AS company,\n SUM(sol.product_uom_qty) AS qty,\n SUM(sol.price_subtotal) AS subtotal\n FROM sale_order_line sol\n LEFT JOIN sale_order so ON so.id = sol.order_id\n LEFT JOIN product_product pp ON pp.id = sol.product_id\n LEFT JOIN product_template pt ON pt.id = pp.product_tmpl_id\n WHERE so.state NOT IN ('draft', 'sent', 'cancel')\n AND so.date_order BETWEEN '%s' AND '%s'\n AND so.user_id IN (%s)\n AND pp.id IN (%s)\n GROUP BY salesman, sol.product_id, pp.product_tmpl_id, so.company_id\n ORDER BY qty DESC;\n \"\"\" % (date_from, date_to, ', '.join(str(u) for u in users), ', '.join(str(p) for p in products))\n else:\n sql = \"\"\" \n \"\"\"\n self.env.cr.execute(sql)\n return self.env.cr.dictfetchall()", "def prep_incorp_filing(session, identifier, payment_id, option, legal_type=None):\n business = create_business(identifier, legal_type=legal_type, legal_name=LEGAL_NAME)\n filing_template = copy.deepcopy(INCORPORATION_FILING_TEMPLATE)\n filing_template['filing']['business'] = {'identifier': business.identifier}\n if business.legal_type:\n filing_template['filing']['business']['legalType'] = business.legal_type\n filing_template['filing']['incorporationApplication']['nameRequest']['legalType'] = business.legal_type\n for party in filing_template['filing']['incorporationApplication']['parties']:\n for role in party['roles']:\n if role['roleType'] == 'Completing Party':\n party['officer']['email'] = '[email protected]'\n filing_template['filing']['incorporationApplication']['contactPoint']['email'] = '[email protected]'\n\n temp_identifier = 'Tb31yQIuBw'\n temp_reg = RegistrationBootstrap()\n temp_reg._identifier = temp_identifier\n temp_reg.save()\n filing = create_filing(token=payment_id, filing_json=filing_template,\n business_id=business.id, bootstrap_id=temp_identifier)\n filing.payment_completion_date = filing.filing_date\n filing.save()\n if option in ['COMPLETED', 'bn']:\n uow = versioning_manager.unit_of_work(session)\n transaction = uow.create_transaction(session)\n filing.transaction_id = transaction.id\n filing.save()\n return filing", "def make_invoices(self):\n for invoice in self.policy.invoices:\n db.session.delete(invoice)\n db.session.commit()\n\n billing_schedules = {'Annual': None, 'Semi-Annual': 3, 'Quarterly': 4, 'Monthly': 12}\n\n invoices = []\n first_invoice = Invoice(self.policy.id,\n self.policy.effective_date, # bill_date\n self.policy.effective_date + relativedelta(months=1), # due\n self.policy.effective_date + relativedelta(months=1, days=14), # cancel\n self.policy.annual_premium)\n invoices.append(first_invoice)\n\n if self.policy.billing_schedule == \"Annual\":\n pass\n elif self.policy.billing_schedule == \"Two-Pay\":\n first_invoice.amount_due = first_invoice.amount_due / billing_schedules.get(self.policy.billing_schedule)\n for i in range(1, billing_schedules.get(self.policy.billing_schedule)):\n months_after_eff_date = i*6\n bill_date = self.policy.effective_date + relativedelta(months=months_after_eff_date)\n invoice = Invoice(self.policy.id,\n bill_date,\n bill_date + relativedelta(months=1),\n bill_date + relativedelta(months=1, days=14),\n self.policy.annual_premium / billing_schedules.get(self.policy.billing_schedule))\n invoices.append(invoice)\n elif self.policy.billing_schedule == \"Quarterly\":\n first_invoice.amount_due = first_invoice.amount_due / billing_schedules.get(self.policy.billing_schedule)\n for i in range(1, billing_schedules.get(self.policy.billing_schedule)):\n months_after_eff_date = i*3\n bill_date = self.policy.effective_date + relativedelta(months=months_after_eff_date)\n invoice = Invoice(self.policy.id,\n bill_date,\n bill_date + relativedelta(months=1),\n bill_date + relativedelta(months=1, days=14),\n self.policy.annual_premium / billing_schedules.get(self.policy.billing_schedule))\n invoices.append(invoice)\n elif self.policy.billing_schedule == \"Monthly\":\n first_invoice.amount_due = first_invoice.amount_due / billing_schedules.get(self.policy.billing_schedule)\n for i in range(1, billing_schedules.get(self.policy.billing_schedule)):\n months_after_eff_date = i\n bill_date = self.policy.effective_date + relativedelta(months=months_after_eff_date)\n invoice = Invoice(self.policy.id,\n bill_date,\n bill_date + relativedelta(months=1),\n bill_date + relativedelta(months=1, days=14),\n self.policy.annual_premium / billing_schedules.get(self.policy.billing_schedule))\n invoices.append(invoice)\n else:\n print \"You have chosen a bad billing schedule.\"\n\n logger.info(str(len(invoices)) + \" invoices generated for policy %s\" % self.policy.id)\n\n for invoice in invoices:\n db.session.add(invoice)\n db.session.commit()", "def createLineSegment(self):\n return _libsbml.Curve_createLineSegment(self)", "def onchange_invoice_id(self):\n # self.invoice_id = False\n # self.base_amount = 0.0\n # self.wh_src_rate = 5.0\n if self._context is None:\n context = {}\n res = {}\n inv_obj = self.env['account.invoice']\n if not self.invoice_id:\n return {'value': {\n 'invoice_id': False,\n 'base_amount': 0.0,\n 'wh_src_rate': 0.0,\n 'wh_amount': 0.0, }\n }\n\n inv_brw = inv_obj.browse(self.invoice_id.id)\n base_amount = self.base_amount or inv_brw.amount_untaxed\n wh_src_rate = self.wh_src_rate or inv_brw.wh_src_rate or 5.0\n wh_amount = base_amount * wh_src_rate / 100.0\n res = {'value': {\n 'base_amount': base_amount,\n 'wh_src_rate': wh_src_rate,\n 'wh_amount': wh_amount,\n }\n }\n return res", "def test_invoice_delete(self):\n # first we create a customer\n id = self._create_model(\"customer\", self.customer_data, [\"name\", \"email\", \"phone\"])\n if id:\n # then we can create the invoice\n data = self.invoice_data\n data[\"customer_id\"] = id\n id_inv = self._create_model(\"invoice\", data, [])\n if id_inv:\n # then performe delete\n self._delete_model(\"invoice\", id_inv)\n self.assertIsNotNone(id_inv)\n self.assertIsNotNone(id)", "def get_invoice(self):\n\n # Check if unclosed invoice for the client exists\n old_inv = connection.Kinko.find_one({'cl': self.cl, 'tid': None,\n 'typ': TYPE_MAP[self.tab_type]})\n\n inv_num = None\n # If it does, update its values and update packages\n if old_inv:\n old_inv.dt = datetime.datetime.today()\n old_inv.range.lt = self.q_dict[\"cs.sd\"].get(\"$lt\", None)\n old_inv.save()\n\n inv_num = old_inv.num\n\n else:\n #kinko dict to be updated in Kinko Collection.\n kdict = {\n \"amt\": 0.0,\n \"cl\": unicode(self.cl),\n \"dt\": datetime.datetime.today(),\n \"typ\": TYPE_MAP[self.tab_type],\n \"range\": {\"lt\": self.q_dict[\"cs.sd\"].get(\"$lt\", None),\n \"gt\": self.q_dict[\"cs.sd\"].get(\"$gte\", None),\n }\n }\n\n k = Kinko(kdict)\n\n k_count = 1\n\n #the get num method of Kinko model generates the unique no for new kinko\n k[\"num\"] = self.get_knum(1)\n while connection.Kinko.collection.find({\"num\": k.num}).count() > 0:\n k[\"num\"] = self.get_knum(k_count+1)\n k_count += k_count\n\n connection.Kinko(k).save()\n\n inv_num = k['num']\n\n if inv_num:\n #after creating a new document in Kinko all packages are updated.\n connection.Package.collection.update(self.q_dict, {'$set': {'inv.num': inv_num}}, safe=True, multi=True)\n \n #Aggrigation of remitted amount for requested client\n non_invoiced = kinko_map_reduce(inv_num, TYPE_MAP[self.tab_type])\n\n if len(non_invoiced) == 0:\n return False\n else:\n inv = connection.Kinko.find_one({'num': inv_num})\n if inv:\n inv.amt = non_invoiced[0]['value']['amt']\n inv.save()\n return inv\n else:\n return False\n else:\n return False", "def _generate_invoice_report(self, request, queryset):\n logger.info('Generating invoice report for model {}'.format(\n queryset.model\n ))\n data = self._get_report_data(request, queryset)\n content = self._get_pdf_content(data)\n file_name = '{}-{}.pdf'.format(\n self._invoice_report_name, data['id'],\n )\n return generate_pdf_response(content, file_name)", "def generate_txt(self):\n txt_string = ''\n rp_obj = self.env['res.partner']\n for txt in self:\n vat = rp_obj._find_accounting_partner(\n txt.company_id.partner_id).vat[2:]\n vat = vat\n for txt_line in txt.txt_ids:\n vendor, buyer = self.get_buyer_vendor(txt, txt_line)\n period = txt.period_id.name.split('/')\n period2 = period[0] + period[1]\n # TODO: use the start date of the period to get the period2\n # with the 'YYYYmm'\n operation_type = ('V' if txt_line.invoice_id.type in\n ['out_invoice', 'out_refund'] else 'C')\n document_type = self.get_type_document(txt_line)\n document_number = self.get_document_number(\n txt_line, 'inv_number')\n control_number = self.get_number(\n txt_line.invoice_id.nro_ctrl, 'inv_ctrl', 20)\n document_affected = self.get_document_affected(txt_line)\n voucher_number = self.get_number(\n txt_line.voucher_id.number, 'vou_number', 14)\n amount_exempt, amount_untaxed = \\\n self.get_amount_exempt_document(txt_line)\n amount_untaxed = amount_untaxed\n alicuota = self.get_alicuota(txt_line)\n amount_total, amount_exempt = self.get_amount_line(\n txt_line, amount_exempt)\n\n txt_string = (\n txt_string + buyer + '\\t' + period2.strip() + '\\t' +\n txt_line.invoice_id.date_invoice + '\\t' + operation_type +\n '\\t' + document_type + '\\t' + vendor + '\\t' +\n document_number + '\\t' + control_number + '\\t' +\n str(round(amount_total, 2)) + '\\t' +\n str(round(txt_line.untaxed, 2)) + '\\t' +\n str(round(txt_line.amount_withheld, 2)) + '\\t' +\n document_affected + '\\t' + voucher_number + '\\t' +\n str(round(amount_exempt, 2)) + '\\t' + str(alicuota) +\n '\\t' + '0' + '\\n')\n return txt_string", "def parse_curves_line(L):\n data = L.split()\n if len(data) != len(column_names['curves']):\n print(\"curves line {} does not have 12 fields, skipping\".format(L))\n return\n label, record = parse_line_label_cols(L)\n\n record['conductor_ideal'] = data[4]\n record['conductor_norm'] = N = ZZ(data[5])\n record['conductor_norm_factors'] = N.support()\n\n record['ainvs'] = data[6]\n record['jinv'] = data[7]\n record['disc'] = disc = data[8]\n if \".\" in disc:\n print(\"Old disc: {}\".format(disc))\n disc = \"({})\".format(ZZ(RR(disc[1:-1])))\n print(\"New disc: {}\".format(disc))\n record['disc'] = disc\n record['normdisc'] = ZZ(data[9])\n from sage.all import sqrt\n record['root_analytic_conductor'] = sqrt(0.00798504020212804*float(N)**(1.0/float(record['degree']))*float(record['abs_disc']))\n #print('root_analytic_conductor = {}'.format(record['root_analytic_conductor']))\n\n eqn = data[10]\n # the reason for doing the following is for the unique field\n # 2.2.5.1 where the field generator is not a single character such\n # as 'a' or 'i' but is '\\phi', and we don't want to have '\\phix'\n # in a latex string (and also do not want any whitespace).\n if \"{x}\" not in eqn:\n eqn = eqn.replace('x', '{x}').replace('y', '{y}')\n record['equation'] = eqn\n\n record['cm'] = cm = ZZ(data[11]) if data[11] != '?' else '?'\n # The 'cm_type' column holds +1 for a curve with rational, -1 for\n # potential, 0 if no CM\n if cm:\n if 'CM' in label:\n record['cm_type'] = +1\n else:\n record['cm_type'] = -1\n else:\n record['cm_type'] = 0\n bc = data[12][1:-1]\n record['base_change'] = [str(lab) for lab in bc.split(\",\")] if bc else []\n record['q_curve'] = (data[13] == '1')\n return label, record", "def customerReport(self):\n self._setFormat()\n for cust in self.getCustomerAccountData():\n self.output.write(self.form_line(cust))", "def submit_invoices(self, **kwargs) -> ApiResponse:\n \n return self._request(kwargs.pop('path'), data=kwargs)", "def single_customer(customer_name, customer_file):\n def add_rentals(rental_file):\n with open(customer_file) as rental_csv:\n reader = csv.reader(rental_csv)\n\n add_item = partial(add_furniture, customer_name=customer_name, invoice_file=rental_file)\n\n for row in reader:\n add_item(item_code=row[1], item_description=row[2], item_monthly_price=row[3])\n\n return add_rentals" ]
[ "0.65959746", "0.6386151", "0.6226632", "0.5918294", "0.58827716", "0.5865145", "0.5850917", "0.58120507", "0.57048744", "0.57040673", "0.563682", "0.56352204", "0.5545573", "0.55451196", "0.5510552", "0.5419621", "0.54099417", "0.5400348", "0.53597164", "0.5323142", "0.5315464", "0.5270124", "0.52435124", "0.5240882", "0.52162296", "0.5213667", "0.51926345", "0.517341", "0.51313394", "0.5125631", "0.51112115", "0.5081939", "0.5074783", "0.50673246", "0.50429326", "0.5010643", "0.5000282", "0.49984717", "0.498137", "0.49669737", "0.49606034", "0.49529028", "0.49499092", "0.49228176", "0.4901383", "0.48925513", "0.48765314", "0.48706093", "0.48660222", "0.48381594", "0.48380128", "0.48314586", "0.48243836", "0.48212773", "0.4818863", "0.48081586", "0.4806043", "0.48054406", "0.4793139", "0.47893658", "0.4788507", "0.47757956", "0.4774608", "0.47625008", "0.47567046", "0.47472692", "0.4728076", "0.4709795", "0.4709514", "0.46983492", "0.46938935", "0.46921188", "0.46803868", "0.46799254", "0.46613503", "0.4659308", "0.46527034", "0.46518964", "0.4651329", "0.46348608", "0.46272516", "0.46227017", "0.46178687", "0.45933616", "0.45844883", "0.45786485", "0.45783615", "0.4566699", "0.45534295", "0.4543654", "0.45315665", "0.45271796", "0.45072776", "0.45020026", "0.4485181", "0.44820723", "0.4472809", "0.44694978", "0.44682765", "0.4468009" ]
0.68115205
0
Performs data processing in order data to feed prediction algorithm.
def data_process_deprecated(self, CustomerID, InvoiceDate, InvoiceNo, Description, Quantity\ , UnitPrice ): dict_invoice = {'InvoiceDate':InvoiceDate, 'Description':Description\ , 'Quantity':Quantity, 'UnitPrice':UnitPrice} dict_invoice['CustomerID'] = CustomerID dict_invoice['InvoiceNo'] = InvoiceNo df_invoice_line \ = pd.DataFrame(dict_invoice, columns=dict_invoice.keys(), index=[0]) self.data_transform(df_invoice_line) #self.feature_rfm_encode() self.feature_scale() self.list_feature_drop() self.feature_description_nlp() return
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def process(self, data):\n return self.estimator.predict(data)", "def process(self, data_batch: Any, predictions: Sequence[dict]) -> None:\n self.results.extend(_to_cpu(predictions))", "def run_prediction(self):\r\n self.get_prediction_indices()\r\n self.walk_forward_prediction()", "def preprocess_data(self):\n\n self._preprocess_train_data()\n self._preprocess_test_data()", "def predict(self, first_preprocessed_inputs,second_preprocessed_inputs,third_preprocessed_inputs):\r\n pass", "def preprocessing_pipeline(self):\n self.__multilabel_processing()\n self.__split_dataset()\n self.__save_datasets()", "def postprocess(self, prediction_dict, **params):\r\n pass", "def _process(self, data: np.ndarray) -> np.ndarray:", "def _process(self, data: np.ndarray) -> np.ndarray:", "def _predict_all(self, data):\n preds = np.zeros(len(data))\n for row in data.itertuples():\n index, item, _, user = row\n preds[index] = self.predict(user, item)\n return preds", "def predict(self, data):\n\t\traise NotImplementedError", "def preprocess_dataset(self, dataset, params=None):\n if params is None:\n assert self.params_loaded, (\n \"You must either provide parameters or load the model params before preprocessing.\")\n params = self.params\n for key in dataset.keys():\n if dataset[key] is None:\n continue\n if hasattr(params, \"whiten_data\") and params.whiten_data:\n if hasattr(params, \"whiten_method\"):\n if params.whiten_method == \"FT\": # other methods require patching first\n if hasattr(params, \"whiten_batch_size\"):\n batch_size = params.whiten_batch_size\n else:\n batch_size = None\n dataset[key].images, dataset[key].data_mean, dataset[key].w_filter = \\\n dp.whiten_data_batch(dataset[key].images, method=params.whiten_method,\n batch_size=batch_size)\n print(\"INFO:preprocessing:FT Whitened \"+key+\" data\")\n if hasattr(params, \"lpf_data\") and params.lpf_data:\n dataset[key].images, dataset[key].data_mean, dataset[key].lpf_filter = \\\n dp.lpf_data(dataset[key].images, cutoff=params.lpf_cutoff)\n print(\"INFO:preprocessing:Low pass filtered \"+key+\" data\")\n if hasattr(params, \"contrast_normalize\") and params.contrast_normalize:\n if hasattr(params, \"gauss_patch_size\"):\n dataset[key].images = dp.contrast_normalize(dataset[key].images,\n params.gauss_patch_size)\n else:\n dataset[key].images = dp.contrast_normalize(dataset[key].images)\n print(\"INFO:preprocessing:Contrast normalized \"+key+\" data\")\n if hasattr(params, \"standardize_data\") and params.standardize_data:\n if params.data_type == \"mnist\":\n eps = 1e-5\n else:\n eps = None\n dataset[key].images, dataset[key].data_mean, dataset[key].data_std = \\\n dp.standardize_data(dataset[key].images, eps)\n self.data_mean = dataset[key].data_mean\n self.data_std = dataset[key].data_std\n print(\"INFO:preprocessing:Standardized \"+key+\" data\")\n if hasattr(params, \"extract_patches\") and params.extract_patches:\n assert all(key in params.__dict__.keys()\n for key in [\"num_patches\", \"patch_edge_size\", \"overlapping_patches\",\n \"randomize_patches\"]), (\"Insufficient params for patches.\")\n out_shape = (int(params.num_patches), int(params.patch_edge_size),\n int(params.patch_edge_size), dataset[key].num_channels)\n dataset[key].num_examples = out_shape[0]\n dataset[key].reset_counters()\n if hasattr(params, \"patch_variance_threshold\"):\n dataset[key].images = dp.extract_patches(dataset[key].images, out_shape,\n params.overlapping_patches, params.randomize_patches,\n params.patch_variance_threshold, dataset[key].rand_state)\n else:\n dataset[key].images = dp.extract_patches(dataset[key].images, out_shape,\n params.overlapping_patches, params.randomize_patches,\n var_thresh=0, rand_state=dataset[key].rand_state)\n dataset[key].shape = dataset[key].images.shape\n dataset[key].num_rows = dataset[key].shape[1]\n dataset[key].num_cols = dataset[key].shape[2]\n dataset[key].num_channels = dataset[key].shape[3]\n dataset[key].num_pixels = np.prod(dataset[key].shape[1:])\n print(\"INFO:preprocessing:Extracted patches from \"+key+\" data\")\n if hasattr(params, \"whiten_data\") and params.whiten_data:\n if hasattr(params, \"whiten_method\") and params.whiten_method != \"FT\":\n dataset[key].images, dataset[key].data_mean, dataset[key].w_filter = \\\n dp.whiten_data(dataset[key].images, method=params.whiten_method)\n print(\"INFO:preprocessing:Whitened \"+key+\" data\")\n if hasattr(params, \"norm_data\") and params.norm_data:\n dataset[key].images, dataset[key].data_max = dp.normalize_data_with_max(dataset[key].images)\n self.data_max = dataset[key].data_max\n print(\"INFO:preprocessing:Normalized \"+key+\" data with maximum\")\n if hasattr(params, \"rescale_data\") and params.rescale_data:\n dataset[key].images, dataset[key].data_min, dataset[key].data_max = dp.rescale_data_to_one(dataset[key].images)\n self.data_max = dataset[key].data_max\n self.data_min = dataset[key].data_min\n print(\"INFO:preprocessing:Rescaled each \"+key+\" datapoint to one\")\n if hasattr(params, \"center_data\") and params.center_data:\n dataset[key].images, dataset[key].data_mean = dp.center_data(dataset[key].images,\n use_dataset_mean=True)\n self.data_mean = dataset[key].data_mean\n print(\"INFO:preprocessing:Centered \"+key+\" data\")\n return dataset", "def pre_process_data(self, all_labels, all_data):\n\n # [1] Normalizes data\n all_data = self.pre_precess_manager.normalization(all_data)\n\n data_train, data_test, label_train, label_test = train_test_split(all_data, all_labels, test_size=0.1,\n shuffle=True)\n\n return data_train, data_test, label_train, label_test", "def handle(self, data, context):\n \n model_input = self.preprocess(data)\n model_out = self.inference(model_input)\n return self.postprocess(model_out)", "def predict(self, data: List):", "def doPredict(self, data: StockData) -> float:\r\n pass", "def prediction_data(median_split,mean_split,std_split,degrees_split,weight_split,export_file):\n DATA_TEST_PATH = '../data/test.csv' # Download train data and supply path here \n print('\\nIMPORTING TESTING DATA :',end=\" \")\n y_test, tX_test, ids_test = load_csv_data(DATA_TEST_PATH)\n print('DONE')\n \n #5.a. Splitting the testing data\n print('SPLITTING TESTING DATA :',end=\" \")\n y_test_split,tx_test_split,id_test_split = split_dataset(y_test,tX_test,ids_test) \n print('DONE') \n #5.b. prediction on each model\n y_pred = list()\n \n for split,(y_test_s,tx_test_s,id_test_s) in enumerate(zip(y_test_split,tx_test_split,id_test_split)): \n print('PREDICTION FOR TESTING DATA SPLIT NUMBER',split)\n \n #Formatting to the correct datatype\n y_test_s = np.squeeze(y_test_s)\n tx_test_s = np.squeeze(tx_test_s)\n id_test_s = np.squeeze(id_test_s)\n print('Size of the vectors',y_test_s.shape,tx_test_s.shape) \n #Formatting the data themselves\n print('Counting NaN',end='. ')\n tx_test_s = count_NaN(tx_test_s)\n print('Sanitizing',end = ' . ')\n tx_test_s,median_vec = sanitize_NaN(tx_test_s,median_split[split])\n print('Standardizing',end = ' .')\n tx_test_s,mean_te,std_te = standardize(tx_test_s,mean_split[split],std_split[split])\n print('Building polynomial basis') \n tx_test_s = build_poly(tx_test_s, degrees_split[split])\n \n #Prediction\n y_pred.append(predict_labels(np.array(weight_split[split]), np.array(tx_test_s))) \n \n print('MERGING TESTING DATA',end=\"\")\n y_pred_merged, ids_merged = merge_dataset(y_pred,id_test_split)\n print('DONE')\n \n OUTPUT_PATH = 'results/output_sanitized_normalization_'+export_file+'.csv' \n print('EXPORTING TESTING DATA WITH PREDICTIONS :',end=\" \")\n \n create_csv_submission(ids_merged, y_pred_merged, OUTPUT_PATH)\n print('DONE')", "def predict(self):\n for track in self.tracks:\n track.predict(self.kf)\n #track.du_doan(self.kf_test)", "def predict(self, datafile):", "def preprocess(data):\n raise NotImplementedError", "def postprocess(self, data):\n if self.error is not None:\n return [self.error]\n\n # Iterating over inference results to render the normalized probabilities\n response = []\n for inference_result in data:\n softmax_result = inference_result.softmax().asnumpy()\n for idx, label in enumerate(self.labels):\n response.append({label: float(softmax_result[0][idx])})\n return [response]", "def preprocessing(dataset):\r\n # upload the processed time series data to its distinct numpy arrays\r\n print('')\r\n training_input = []\r\n training_output = []\r\n validation_input = []\r\n validation_output = []\r\n loop = tqdm.tqdm(total = len(dataset), position = 0, leave = False)\r\n for d in range(len(dataset)):\r\n loop.set_description('Packaging all processed time series data... ' .format(len(dataset)))\r\n time_series = dataset[d]\r\n if time_series.get_dataset_label() == \"TRAINING\":\r\n training_input.append(time_series.sampled_matrix())\r\n training_output.append(time_series.get_close_value())\r\n else:\r\n validation_input.append(time_series.sampled_matrix())\r\n validation_output.append(time_series.get_close_value())\r\n loop.update(1)\r\n\r\n training_input, training_output = np.array(training_input), np.array(training_output)\r\n training_input = np.reshape(training_input, (training_input.shape[0], training_input.shape[1], 1))\r\n validation_input, validation_output = np.array(validation_input), np.array(validation_output)\r\n validation_input = np.reshape(validation_input, (validation_input.shape[0], validation_input.shape[1], 1))\r\n print('\\n')\r\n loop.close()\r\n return training_input, training_output, validation_input, validation_output", "def pre_process(self, dataset):\n\n # np.empty creates an empty array only. You have to replace this with your code.\n X = np.empty((0,0))\n y = np.empty((0))\n\n if dataset == 0:\n # Implement for the abalone dataset\n df = pd.DataFrame(columns=['sex', 'length', 'diameter', 'height', 'whole_weight', 'shucked_weight', 'viscera_weight', 'shell_weight', 'rings'])\n count = 0\n\n with open('Dataset.data') as file: # reading data from file\n data = file.read()\n\n data = data.split('\\n') # split data into different rows\n data = data[:-1] # last one is empty\n for row in data:\n row = row.split()\n df.loc[count] = row # add in dataframe\n count += 1\n\n df['M'] = np.where(df.sex=='M', 1,0) # genders are turned to a one hot encoding\n df['F'] = np.where(df.sex=='F', 1,0)\n df['I'] = np.where(df.sex=='I', 1,0)\n df = df.drop(['sex'], axis=1)\n df = df.dropna()\n\n df = df.sample(frac=1).reset_index(drop=True) # shuffle dataframe\n\n X = df.drop(['rings'], axis=1)\n X = X.values\n X = X.astype(float)\n y = df['rings'].values\n y = y.astype(float)\n\n elif dataset == 1:\n # Implement for the video game dataset\n df = pd.read_csv('VideoGameDataset - Video_Games_Sales_as_at_22_Dec_2016.csv') # read csv directly into a dataframe\n df1 = df[['Critic_Score', 'User_Score', 'Global_Sales']]\n df1 = df1.dropna()\n df1 = df1[df1.User_Score != 'tbd']\n\n df1 = df1.sample(frac=1).reset_index(drop=True) # shuffle rows\n\n X = df1.drop(['Global_Sales'], axis=1)\n X = X.values\n X = X.astype(float)\n y = df1['Global_Sales'].values\n y = y.astype(float)\n\n elif dataset == 2:\n # Implement for the banknote authentication dataset\n df = pd.DataFrame(columns=['variance', 'skewness', 'curtosis', 'entropy', 'class'])\n count = 0\n\n with open('data_banknote_authentication.txt') as file: # reading file \n data = file.read()\n data = data.split('\\n')\n data = data[:-1]\n for row in data:\n row = row.split(',')\n df.loc[count] = [float(elt) for elt in row[:-1]] + [int(row[-1])] # last column has class so it is int rest are float\n count += 1\n\n df = df.sample(frac=1).reset_index(drop=True) # shuffle dataset\n\n X = df.drop(['class'], axis=1)\n X = X.values\n y = df['class'].values\n y = y.astype(int)\n\n return X, y", "def process_dataset(self):\n\n logger.info('\\n')\n logger.info('=' * 40)\n logger.info('=\\t DeepRank Data Set')\n logger.info('=')\n logger.info('=\\t Training data')\n for f in self.train_database:\n logger.info(f'=\\t -> {f}')\n logger.info('=')\n if self.valid_database:\n logger.info('=\\t Validation data')\n for f in self.valid_database:\n logger.info(f'=\\t -> {f}')\n logger.info('=')\n if self.test_database:\n logger.info('=\\t Test data')\n for f in self.test_database:\n logger.info(f'=\\t -> {f}')\n logger.info('=')\n logger.info('=' * 40 + '\\n')\n sys.stdout.flush()\n\n # check if the files are ok\n self.check_hdf5_files(self.train_database)\n\n if self.valid_database:\n self.valid_database = self.check_hdf5_files(\n self.valid_database)\n\n if self.test_database:\n self.test_database = self.check_hdf5_files(\n self.test_database)\n\n # create the indexing system\n # alows to associate each mol to an index\n # and get fname and mol name from the index\n self.create_index_molecules()\n\n # get the actual feature name\n if self.mapfly:\n self.get_raw_feature_name()\n else:\n self.get_mapped_feature_name()\n\n # get the pairing\n self.get_pairing_feature()\n\n # get grid shape\n self.get_grid_shape()\n\n # get the input shape\n self.get_input_shape()\n\n # get renormalization factor\n if self.normalize_features or self.normalize_targets or self.clip_features:\n if self.mapfly:\n self.compute_norm()\n else:\n self.get_norm()\n\n logger.info('\\n')\n logger.info(\" Data Set Info:\")\n logger.info(\n f' Augmentation : {self.use_rotation} rotations')\n logger.info(\n f' Training set : {self.ntrain} conformations')\n logger.info(\n f' Validation set : {self.nvalid} conformations')\n logger.info(\n f' Test set : {self.ntest} conformations')\n logger.info(f' Number of channels : {self.input_shape[0]}')\n logger.info(f' Grid Size : {self.data_shape[1]}, '\n f'{self.data_shape[2]}, {self.data_shape[3]}')\n sys.stdout.flush()", "def _preprocess(self, data):\n\n # pipeline: first call the previous statistics:\n if self.previous_statistics is not None:\n data = self.previous_statistics.statistics(data)\n # the first of the statistics need to take list as input, in order to match the API. Then actually the\n # transformations work on np.arrays. In fact the first statistic transforms the list to array. Therefore, the\n # following code needs to be called only if the self statistic is the first, i.e. it does not have a\n # previous_statistic element.\n else:\n data = self._check_and_transform_input(data)\n\n return data", "def predict(self, corpus, preprocessed_information, timeline_properties, params):\n sents = [sent for doc in corpus for sent in doc]\n\n random.shuffle(sents)\n\n post_processed = post_processing.post_process(\n sents,\n None,\n timeline_properties.daily_summary_length,\n timeline_properties.num_dates,\n timeline_properties.start,\n timeline_properties.end\n )\n\n return post_processed", "def process_data(data, labels):\n\t\n\t# Split the dataset of string into train, validation, and test \n\t# Use a 70/15/15 split\n\t# train_test_split shuffles the data before splitting it \n\t# Stratify keeps the proportion of labels the same in each split\n\n\t# -- WRITE THE SPLITTING CODE HERE --\n\t# Split the data into 70 percent train and 30 percent test and validate data\n\ttrain_X, test_X_split, train_Y, test_Y_split = train_test_split(data, labels, test_size=0.30, stratify=labels,random_state= 1)\n\t# Split the remaining 30 percent data into 15 percent test and validate data each\n\ttest_X, val_X, test_Y, val_Y = train_test_split(test_X_split, test_Y_split, test_size=0.50, stratify=test_Y_split, random_state= 1)\n\n\t# Preprocess each dataset of strings into a dataset of feature vectors\n\t# using the CountVectorizer function. \n\t# Note, fit the Vectorizer using the training set only, and then\n\t# transform the validation and test sets.\n\n\t# -- WRITE THE PROCESSING CODE HERE --\n\t# Preprocess dataset using CountVectorizer from ngram range of 1 to 3\n\tvector = CountVectorizer(ngram_range=(1,3))\n\t# Fit data on train dataset\n\ttrain_X = vector.fit_transform(train_X)\n\t# Transform data on test dataset\n\ttest_X = vector.transform(test_X)\n\t# Transform data on validate dataset.\n\tval_X = vector.transform(val_X)\n\t# Return the training, validation, and test set inputs and labels\n\treturn train_X, train_Y, val_X, val_Y, test_X, test_Y\n\t# -- RETURN THE ARRAYS HERE -- ", "def preprocess_train_data(self):\r\n print(\"* Preprocessing training data.\", flush=True)\r\n prep.create_HDF_file(self.C.training_set, is_training_set=True)\r\n\r\n self.print_time_elapsed()", "def run_prediction(data, sess, placeholders, scores): \r\n \r\n X, Y = data\r\n source_pl, target_pl, training_pl = placeholders\r\n split_set = int(math.floor(0.9*len(X))) # Value for spliting the set in 90% training / 10% test \r\n \r\n X_batch, Y_batch = batch(X[:split_set:], Y[:split_set:]) # Take a batch of sentences from the training set\r\n feed_dict = {source_pl: X_batch, target_pl: Y_batch, training_pl: False}\r\n prediction = sess.run(scores, feed_dict=feed_dict) \r\n X_batch = np.squeeze(X_batch) \r\n prediction = np.squeeze(prediction)\r\n \r\n source_sentences = treatment.sentences_from_one_hot(X_batch, CORPUS_ENGLISH)\r\n target_sentences = treatment.sentences_from_one_hot(prediction, CORPUS_FRENCH)\r\n\r\n\t# Print the pairs source -> target\r\n for source, target in zip(source_sentences, target_sentences): \r\n print(source + \" --> \" + target)\r\n \r\n print(\"================================================================\")\r\n \r\n X_batch, Y_batch = batch(X[split_set+1:], Y[split_set+1:]) # Take a batch of sentences from the test set\r\n feed_dict = {source_pl: X_batch, target_pl: Y_batch, training_pl: False}\r\n prediction = sess.run(scores, feed_dict=feed_dict) \r\n \r\n X_batch = np.squeeze(X_batch) \r\n prediction = np.squeeze(prediction)\r\n \r\n \r\n source_sentences = treatment.sentences_from_one_hot(X_batch, CORPUS_ENGLISH)\r\n target_sentences = treatment.sentences_from_one_hot(prediction, CORPUS_FRENCH)\r\n\r\n\t# Print the pairs source -> target\r\n for source, target in zip(source_sentences, target_sentences): \r\n print(source + \" --> \" + target)", "def update_predictions(data):\n # TODO: Priority 1 - update predictions with inference results\n # TODO: Understand from a research team exactly what the data is going to look like\n trackID = data[0]\n prediction = data[1]\n confidence = data[2]\n to_Insert_Array = [trackID, prediction, confidence]\n OutPut_Data[trackID] = to_Insert_Array", "def process(self, data):\n allocating = (self._output is None)\n ind = 0\n for i, (name, feature) in enumerate(self.features):\n if allocating:\n x = feature.compute(data)\n self.feature_indices[name] = (ind, ind+x.size)\n ind += x.size\n\n if self._output is None:\n self._output = x\n else:\n self._output = np.hstack([self._output, x])\n else:\n self._output[self.feature_indices[name][0]:\n self.feature_indices[name][1]] = \\\n feature.compute(data)\n\n return self._output", "def predict(self, data, **kwargs):\n # Todo: if local_search = true then use optimized pipe here?\n if self._pipe:\n return self.optimum_pipe.predict(data, **kwargs)", "def predict(self):\n for track in self.tracks:\n track.predict(self.kf)", "def predict(self):\n for track in self.tracks:\n track.predict(self.kf)", "def predict(self):\n for track in self.tracks:\n track.predict(self.kf)", "def preprocess(self, data, label):\n\t\traise NotImplementedError", "def algo(self):\n audio = np.array([self.audio.popleft() for _ in range(self.count)])\n # Run Classifier\n wav_data = np.abs(np.fft.rfft(audio.flatten()))\n if len(wav_data) > 0:\n pred = self.clf.predict(np.expand_dims(wav_data, 0))\n if self.verbose > 1:\n print('The prediction is : ' + str(pred))\n self.finished.emit(int(pred[-1]))\n else:\n self.finished.emit(0)", "def data_preprocessing():\n lineid_content = get_lineid_content()\n print('Read movie_lines.txt file complete...')\n convos = get_convos()\n print('Read movie_conversations.txt file complete...')\n print('Building dataset')\n get_data(lineid_content, convos)", "def _prediction_loop(self, dataloader: DataLoader, description: str) -> PredictionOutput:\n\n logger.info(\"***** Running %s *****\", description)\n logger.info(\" Num examples = %d\", len(dataloader.dataset))\n logger.info(\" Batch size = %d\", dataloader.batch_size)\n eval_losses: List[float] = []\n preds: np.ndarray = None\n label_ids: np.ndarray = None\n\n for inputs in tqdm(dataloader, desc=description):\n has_labels = any(inputs.get(k) is not None for k in [\"labels\", \"masked_lm_labels\"])\n\n for k, v in inputs.items():\n inputs[k] = v.to(self.args.device)\n\n with torch.no_grad():\n outputs = self.model.eval_step(**inputs)\n\n if has_labels:\n step_eval_loss, logits = outputs[:2]\n eval_losses += [step_eval_loss.mean().item()]\n else:\n logits = outputs[0]\n\n if preds is None:\n preds = logits.detach().cpu().numpy()\n else:\n preds = np.append(preds, logits.detach().cpu().numpy(), axis=0)\n if inputs.get(\"labels\") is not None:\n if label_ids is None:\n label_ids = inputs[\"labels\"].detach().cpu().numpy()\n else:\n label_ids = np.append(label_ids, inputs[\"labels\"].detach().cpu().numpy(), axis=0)\n\n if self.compute_metrics is not None and preds is not None and label_ids is not None:\n metrics = self.compute_metrics(EvalPrediction(predictions=preds, label_ids=label_ids))\n else:\n metrics = {}\n if len(eval_losses) > 0:\n metrics[\"loss\"] = np.mean(eval_losses)\n\n return PredictionOutput(predictions=preds, label_ids=label_ids, metrics=metrics)", "def main():\n df = prepro_last()\n X, y = train_build(df)\n fit_store(X, y)", "def process(self, data_batch: Sequence[Dict],\n data_samples: Sequence[Dict]) -> None:\n for data_sample in data_samples:\n pred_labels = data_sample.get('pred_instances').get(self.key).cpu()\n gt_labels = data_sample.get('gt_instances').get(self.key).cpu()\n\n result = dict(\n pred_labels=pred_labels.flatten(),\n gt_labels=gt_labels.flatten())\n self.results.append(result)", "def _predict(self):\n \n # Initialization\n x_k = np.random.rand(4,0)\n w_k = np.ones((1,0))\n \n self.reset(phd_only=True)\n \n # Prediction of the targets' positions\n for k in range(1, self.n_time_steps+1):\n\n if k in self.observed_data.keys():\n\n # perform phd filter\n\n x_k1, w_k1, estimated_x_k1, n_targ_pred = self.PHDfilter(x_k, w_k, self.observed_data[k], k)\n\n # save predicted positions and update parameters\n self.phd_filter['n_targets_predicted'][k] = n_targ_pred\n self.phd_filter['particles_positions'][k] = x_k1\n if estimated_x_k1 is not None:\n self.phd_filter['estimated_positions'][k] = estimated_x_k1\n x_k, w_k = np.copy(x_k1), np.copy(w_k1)\n\n else:\n self.phd_filter['n_targets_predicted'][k] = 0", "def predict(self, data_in):\n pass", "def postprocess(self, data):\n all_predictions, all_nbest_json, scores_diff_json = predictions(self._dev_dataset,\n data,\n self._tokenizer)\n\n if len(all_nbest_json) == 0 or len(all_nbest_json[0]) == 0:\n return [{'predicted': '',\n 'confidence': 0}]\n\n return [{'predicted': all_nbest_json[0][0]['text'],\n 'confidence': all_nbest_json[0][0]['probability']}]", "def process(self):\n self.extract()\n self.transform()\n self.load()", "def main(**kwargs):\n data_file = kwargs.get('data_file', None)\n predict_unlabelled = kwargs.get('predict_unlabelled', False)\n output_preds = kwargs.get('output_preds', True)\n eval_results = kwargs.get('eval_results', True)\n\n # Prepare run_str\n run_str = datetime.now().strftime('%Y%m%d%H%M')\n\n initialise_print_logger('logs/prediction-' + run_str + '.log')\n\n print('Starting sharecast prediction:', run_str)\n\n # Load and divide data\n share_data = load_data(data_file)\n gc.collect()\n\n print('Number of \"NA\" symbols:',\n share_data[share_data['symbol'] == 'NA'].shape[0])\n\n # Divide data into symbols and general data for training an testing\n if predict_unlabelled:\n # Only return x values\n df_all_x, df_symbol_date = prepare_data_for_model(share_data, False)\n else:\n # Return x and y values\n df_all_x, df_all_y, df_all_actuals, df_symbol_date = prepare_data_for_model(\n share_data, True)\n\n del df_all_y\n\n del share_data\n gc.collect()\n\n print('Number of \"NA\" symbols:',\n df_symbol_date[df_symbol_date['symbol'] == 'NA'].shape[0])\n\n # Retain model names for train and test\n print('Retaining model name data. Number of rows:', len(df_all_x))\n model_names = df_all_x['model'].values\n gics_sectors = df_all_x['GICSSector'].values\n gics_industry_groups = df_all_x['GICSIndustryGroup'].values\n gics_industries = df_all_x['GICSIndustry'].values\n\n # Fix the names used in the GICS data - remove '&' ',' and ' '\n gics_sectors = fix_categorical(gics_sectors)\n gics_industry_groups = fix_categorical(gics_industry_groups)\n gics_industries = fix_categorical(gics_industries)\n\n # Drop model names and GICS values\n df_all_x = df_all_x.drop(\n ['model', 'GICSSector', 'GICSIndustryGroup', 'GICSIndustry'], axis=1)\n\n print('Loading pre-processing models')\n # Load pre-processing models\n symbol_encoder = load('models/se.pkl.gz')\n imputer = load('models/imputer.pkl.gz')\n scaler = load('models/scaler.pkl.gz')\n\n print('Executing pre-processing. Number of rows:', len(df_all_x))\n # Execute pre-processing\n df_all_x = execute_preprocessor(df_all_x, symbol_encoder, imputer, scaler)\n\n print('Loading keras models. Number of rows:', len(df_all_x))\n # Load keras models\n keras_models = {\n 'mape_model': load_model('models/keras-mape-model.h5', custom_objects={\n 'k_mean_absolute_percentage_error': k_mean_absolute_percentage_error,\n 'k_mae_mape': k_mae_mape,\n }),\n 'mae_model': load_model('models/keras-mae-model.h5', custom_objects={\n 'k_mean_absolute_percentage_error': k_mean_absolute_percentage_error,\n 'k_mae_mape': k_mae_mape,\n }),\n 'mae_intermediate_model': load_model('models/keras-mae-intermediate-model.h5',\n custom_objects={\n 'k_mean_absolute_percentage_error': k_mean_absolute_percentage_error,\n 'k_mae_mape': k_mae_mape,\n }),\n }\n\n print('Loading xgboost model list')\n xgb_models = load_xgb_models()\n\n print('Loading xgboost industry model list')\n xgb_industry_models = load_xgb_models('industry')\n\n predictions = execute_model_predictions(\n df_all_x, model_names, gics_industry_groups, xgb_models, xgb_industry_models, keras_models)\n\n print('Loading bagging models')\n bagging_model = load_model('models/keras-bagging-model.h5', custom_objects={\n 'k_mean_absolute_percentage_error': k_mean_absolute_percentage_error,\n 'k_mae_mape': k_mae_mape,\n })\n bagging_scaler = load('models/deep-bagging-scaler.pkl.gz')\n deep_bagged_predictions = execute_deep_bagging(\n bagging_model, bagging_scaler, predictions)\n predictions['deep_bagged_predictions'] = deep_bagged_predictions\n\n if eval_results:\n assess_results(predictions, model_names, df_all_actuals, run_str)\n\n if output_preds:\n output_predictions(predictions, df_symbol_date, run_str)\n\n print('Prediction completed')", "def __predict(self):\n frame_ind = 0\n while True:\n if not self.__queue_frame.empty():\n frame_ind += 1\n frame = self.__queue_frame.get()\n self.detector_lock.acquire()\n rects, probs, classesID = self.detect_frame(frame)\n valid_rects, valid_scores, valid_distances = self.check_valid_detection(rects, probs, classesID)\n\n cnt_in, cnt_out = self.tracker.update(frame_ind, valid_rects, valid_distances)\n if self.direction:\n self.total_in += cnt_in\n self.total_out += cnt_out\n else:\n self.total_in += cnt_out\n self.total_out += cnt_in\n\n self.detector_lock.release()\n self.__queue_predict.put((valid_rects, valid_scores, valid_distances, frame))", "def predict(self, data_dict, label_dict, phases=[\"test\"]):\n loaders = self.init_loaders_predict(data_dict, label_dict)\n loss_dict = self.init_loss_dict(phases=phases)\n performance_dict = self.init_performance_dict(phases=phases)\n self.model.train(False)\n with torch.no_grad():\n output_dict_dict = {}\n for phase in phases:\n i = 0\n running_loss_dict = self.init_running_loss_dict(\n list(loss_dict[phase].keys())\n )\n output_dict = self.init_output_dict()\n for the_data in loaders[phase]:\n i += 1\n inputs, labels = self.transform_batch(the_data)\n outputs = self.model(inputs)\n output_dict = self.update_output_dict(output_dict, outputs, labels)\n loss_dict[\"loss\"] = self.criterion(outputs, labels)\n running_loss_dict[\"loss\"] += loss_dict[\"loss\"].item()\n\n # Compute epoch losses and update loss dict\n epoch_loss_dict = {\n key: running_loss_dict[key] / i for key in running_loss_dict.keys()\n }\n loss_dict[phase] = self.update_metric_dict(\n loss_dict[phase], epoch_loss_dict\n )\n # Compute epoch performance and update performance dict\n epoch_statistics = self.compute_epoch_statistics(output_dict)\n performance_dict[phase] = self.update_metric_dict(\n performance_dict[phase], epoch_statistics\n )\n output_dict_dict[phase] = self.finalize_output_dict(output_dict)\n result_dict = {\n phase: {**performance_dict[phase], **loss_dict[phase]}\n for phase in performance_dict.keys()\n }\n return output_dict_dict, result_dict", "def run(self, images):\n\n # Apply filtering\n if len(self.preprocessing) > 0: \n print('Applying', len(self.preprocessing), 'filter(s) to input images')\n for filter in self.preprocessing:\n for i in range(len(images)):\n images[i] = filter(images[i])\n\n # Apply feature extraction\n if len(self.features) > 0:\n print('Extracting', len(self.features), 'feature(s) from input images')\n scaler = MinMaxScaler(feature_range=(0, 1))\n for i in range(len(images)):\n features = []\n for feature in self.features:\n features.append(feature(images[i]))\n images[i] = np.hstack(features)\n images = scaler.fit_transform(images)\n else:\n # Flatten images (not necessary when using feature extraction)\n train_data = np.array(train_data).reshape((len(train_data), -1))\n\n # Run predictions\n print('Predicting presence of parasites in', len(images), 'images\\n')\n return self.classifier.predict(images)", "def prepare_reg_data_for_prediction(dataframe, model_dict, user_keyword, task_name):\r\n parent_dir = Path.cwd().parent\r\n pickle_dir = parent_dir.joinpath('default_results', 'pickle_files_feat_eng')\r\n\r\n feature_X_user,affect_dataframe, affect_index_dataframe = df, df, df\r\n emo_X_test_dict = {}\r\n affect_index_dict ={}\r\n\r\n for emotion, model_prop in model_dict.items():\r\n #Get the data with the emotion class\r\n if user_keyword == 'validation':\r\n affect_dataframe = dataframe[dataframe['Affect Dimension'] == 1]\r\n affect_index_list = dataframe.index[dataframe['Affect Dimension'] == 1].tolist()\r\n else:\r\n affect_dataframe = dataframe[dataframe[emotion] == 1]\r\n affect_index_list = dataframe.index[dataframe[emotion] == 1].tolist()\r\n test_tweets = affect_dataframe.iloc[:, [0, 1, 2]]\r\n\r\n #Perform preprocessing, feature extraction and transformation for the tweets to be predicted\r\n print(emotion, test_tweets.shape)\r\n if test_tweets.empty == False:\r\n preprocessed_X_user = Preprocessor.perform(test_tweets, emotion, user_keyword, task_name)\r\n feature_X_user = Feature_Transformer.perform(preprocessed_X_user, emotion, user_keyword, task_name)\r\n vectorizer = Dictionaries.vectorizer_dict[model_prop[2]]\r\n\r\n #Fit transform the vectorizer with the corresponding preprocessed training data\r\n if os.path.exists(pickle_dir.joinpath(emotion + '_r_train_preprocess_df.pkl')):\r\n preprocess_train_df = pd.read_pickle(pickle_dir.joinpath(emotion + '_r_train_preprocess_df.pkl'))\r\n train_vect = vectorizer.fit_transform(preprocess_train_df['preprocessed_text'].values)\r\n train_vect_df = pd.DataFrame(train_vect.toarray(), columns=vectorizer.get_feature_names())\r\n print(emotion, 'train-shape', train_vect_df.shape, sep='\\n')\r\n else:\r\n #If the file doesnt exist, exit the program with instructions\r\n print('\\nRequired files does not exist.\\n\\n Please, train the models first by running > Modelling.py')\r\n sys.exit(1)\r\n\r\n # Use the same vectorizer to transform test data and then perform the feature union\r\n vector_X = vectorizer.transform(preprocessed_X_user['preprocessed_text'].values)\r\n test_vect_df = pd.DataFrame(vector_X.toarray(), columns=vectorizer.get_feature_names())\r\n X_test = pd.DataFrame(pd.concat([test_vect_df, feature_X_user], axis=1)) #####?\r\n emo_X_test_dict[emotion] = X_test\r\n affect_index_dict[emotion] = affect_index_list\r\n else:\r\n emo_X_test_dict[emotion] = pd.DataFrame\r\n affect_index_dict[emotion] = []\r\n\r\n return emo_X_test_dict, affect_index_dict", "def process(self):\n # check already processed\n proc_dir = os.path.join(self.root, self.processed_dir)\n train_path = os.path.join(proc_dir, self.train_fn)\n test_path = os.path.join(proc_dir, self.test_fn)\n if os.path.exists(train_path) and os.path.exists(test_path):\n # already exists => load process file\n print(\"processed dataset already exists; load it\")\n self.train_data = torch.load(train_path)\n self.test_data = torch.load(test_path)\n return\n\n # read and process raw data\n print(\"read and process raw dataset ...\")\n label_path = os.path.join(self.root, self.raw_dir, \"labels.txt\")\n image_path_format = os.path.join(self.root, self.raw_dir, \"img_{}.png\")\n \n with open(label_path) as f:\n for line in f:\n if not line.strip():\n break\n \n idx, label = map(int, line.strip().split('\\t'))\n image_path = image_path_format.format(idx)\n image = load_image(image_path)\n \n if idx <= self.split:\n self.train_data.append((image, label))\n elif idx > self.split:\n self.test_data.append((image, label))\n\n # write processed file\n if not os.path.exists(proc_dir):\n os.mkdir(proc_dir)\n\n with open(train_path, 'wb') as f:\n torch.save(self.train_data, f)\n with open(test_path, 'wb') as f:\n torch.save(self.test_data, f)\n\n print(\"Done!\")", "def predict_data(self, data, context = {}):\n datapoints = policy_model.policy2datapoint(data)\n result = self.predict_datapoint(datapoints, context)\n return result", "def _process(self, data: np.ndarray) -> np.ndarray:\n probabilities = np.empty(data.size, dtype=object)\n\n for idx, counts_dict in enumerate(data):\n shots = sum(counts_dict.values())\n freq = counts_dict.get(self._outcome, 0)\n alpha_posterior = [freq + self._alpha_prior[0], shots - freq + self._alpha_prior[1]]\n alpha_sum = sum(alpha_posterior)\n\n p_mean = alpha_posterior[0] / alpha_sum\n p_var = p_mean * (1 - p_mean) / (alpha_sum + 1)\n\n probabilities[idx] = ufloat(nominal_value=p_mean, std_dev=np.sqrt(p_var))\n\n return probabilities", "def post_processing(predictions, indexes, k=5):\n\n processed_predictions = []\n all_scores = []\n for idx, prediction in tqdm(enumerate(predictions)):\n # line_input = np.asarray(line_input,np.str)\n scores = np.asarray(np.reshape(prediction, newshape=(-1,)))\n orders = np.argsort(scores, axis=-1)[::-1]\n scores = scores[orders]\n sents_indexes = np.asarray(indexes[idx])\n processed_predictions.append(sents_indexes[orders][:k])\n all_scores.append(scores[:k])\n return processed_predictions, all_scores", "def predict(self, data):\n\t\tres = self.neuralNetworks.inference(self.dataCenter.process_inference_data(data))\n\t\tprint(res)", "def _preprocess(self):\n self.data['sentences'] = self.data['text'].apply(self._tokenize_sent)\n self.data['nouns'] = self.data['sentences'].apply(self._get_nouns)\n # self._get_frequent_features()\n # self._compactness_pruning()\n # self._redundancy_pruning()\n # self._get_features()\n self._extract_opinions()", "def process(dataset, result):\n global AR_TYPE\n if AR_TYPE.startswith('fixed'):\n ar_authors = loader.get_fixed_authors()\n else:\n ar_authors = dataset.authors[0:40]\n\n tups = []\n for unknown in ar_authors:\n tups.append((unknown, dataset))\n\n pool = Pool(processes=NUMBER_OF_CORES)\n it = pool.imap(process_distance_unknown, tups)\n pool.close()\n pool.join()\n\n for unknown in ar_authors:\n distance_results = it.next()\n for distance_result in distance_results:\n [ar_size, position, distance] = distance_result\n result.add(ar_size, unknown, position, distance)\n return", "def _prepare_data(self):\n #TODO hardcoded values need to change\n print_info(\"Preprocessing the train data...\")\n self._place_dataset(os.path.join(self._hparams[\"temp-data\"], \"train\"),\n self.TRAIN_OUT_PATH)\n\n print_info(\"Preprocessing the test data...\")\n self._place_dataset(os.path.join(self._hparams[\"temp-data\"], \"test\"),\n self.TEST_OUT_PATH)\n\n print_info(\"Preprocessing the validation data...\")\n self._place_dataset(os.path.join(self._hparams[\"temp-data\"], \"val\"),\n self.VAL_OUT_PATH)", "def process(self, data_batch: Any, data_samples: Sequence[dict]) -> None:", "def main():\n datasets = {}\n for dataset_name in tqdm(SOURCE_DATASET_NAMES, desc=\"Processing datasets and fitting base models\"):\n logger.info(f\"processing dataset {dataset_name}\")\n clusters_path: Optional[str] = None\n if dataset_name not in PAIRWISE_ONLY_DATASETS:\n clusters_path = os.path.join(DATA_DIR, dataset_name, dataset_name + \"_clusters.json\")\n train_pairs_path = None\n val_pairs_path = None\n test_pairs_path = None\n else:\n train_pairs_path = os.path.join(DATA_DIR, dataset_name, \"train_pairs.csv\")\n val_pairs_path = os.path.join(DATA_DIR, dataset_name, \"val_pairs.csv\")\n if not os.path.exists(val_pairs_path):\n val_pairs_path = None\n test_pairs_path = os.path.join(DATA_DIR, dataset_name, \"test_pairs.csv\")\n\n logger.info(f\"loading dataset {dataset_name}\")\n anddata = ANDData(\n signatures=os.path.join(DATA_DIR, dataset_name, dataset_name + \"_signatures.json\"),\n papers=os.path.join(DATA_DIR, dataset_name, dataset_name + \"_papers.json\"),\n name=dataset_name,\n mode=\"train\",\n specter_embeddings=os.path.join(DATA_DIR, dataset_name, dataset_name + \"_specter.pickle\"),\n clusters=clusters_path,\n block_type=BLOCK_TYPE,\n train_pairs=train_pairs_path,\n val_pairs=val_pairs_path,\n test_pairs=test_pairs_path,\n train_pairs_size=N_TRAIN_PAIRS_SIZE,\n val_pairs_size=N_VAL_TEST_SIZE,\n test_pairs_size=N_VAL_TEST_SIZE,\n preprocess=True,\n )\n\n logger.info(f\"featurizing {dataset_name}\")\n train, val, test = featurize(\n anddata,\n FEATURIZER_INFO,\n n_jobs=N_JOBS,\n use_cache=True,\n chunk_size=100,\n nameless_featurizer_info=NAMELESS_FEATURIZER_INFO,\n nan_value=NAN_VALUE,\n )\n X_train, y_train, nameless_X_train = train\n X_val, y_val, nameless_X_val = val\n X_test, y_test, nameless_X_test = test\n\n dataset = {}\n dataset[\"anddata\"] = anddata\n dataset[\"X_train\"] = X_train\n dataset[\"y_train\"] = y_train\n dataset[\"X_val\"] = X_val\n dataset[\"y_val\"] = y_val\n dataset[\"X_test\"] = X_test\n dataset[\"y_test\"] = y_test\n dataset[\"nameless_X_train\"] = nameless_X_train\n dataset[\"nameless_X_val\"] = nameless_X_val\n dataset[\"nameless_X_test\"] = nameless_X_test\n dataset[\"name\"] = anddata.name\n datasets[dataset_name] = dataset\n\n anddatas = [\n datasets[dataset_name][\"anddata\"]\n for dataset_name in SOURCE_DATASET_NAMES\n if dataset_name not in PAIRWISE_ONLY_DATASETS\n ]\n\n X_train = np.vstack([datasets[dataset_name][\"X_train\"] for dataset_name in SOURCE_DATASET_NAMES])\n y_train = np.hstack([datasets[dataset_name][\"y_train\"] for dataset_name in SOURCE_DATASET_NAMES])\n X_val = np.vstack(\n [datasets[dataset_name][\"X_val\"] for dataset_name in SOURCE_DATASET_NAMES if dataset_name not in {\"augmented\"}]\n )\n y_val = np.hstack(\n [datasets[dataset_name][\"y_val\"] for dataset_name in SOURCE_DATASET_NAMES if dataset_name not in {\"augmented\"}]\n )\n\n nameless_X_train = np.vstack([datasets[dataset_name][\"nameless_X_train\"] for dataset_name in SOURCE_DATASET_NAMES])\n nameless_X_val = np.vstack(\n [\n datasets[dataset_name][\"nameless_X_val\"]\n for dataset_name in SOURCE_DATASET_NAMES\n if dataset_name not in {\"augmented\"}\n ]\n )\n\n logger.info(\"fitting pairwise\")\n union_classifier = PairwiseModeler(n_iter=N_ITER, monotone_constraints=MONOTONE_CONSTRAINTS)\n union_classifier.fit(X_train, y_train, X_val, y_val)\n\n nameless_union_classifier = None\n if USE_NAMELESS_MODEL:\n logger.info(\"nameless fitting pairwise for \" + str(SOURCE_DATASET_NAMES))\n nameless_union_classifier = PairwiseModeler(\n n_iter=N_ITER,\n monotone_constraints=NAMELESS_MONOTONE_CONSTRAINTS,\n )\n nameless_union_classifier.fit(nameless_X_train, y_train, nameless_X_val, y_val)\n logger.info(\"nameless pairwise fit for \" + str(SOURCE_DATASET_NAMES))\n\n logger.info(\"fitting clusterer for\")\n union_clusterer = Clusterer(\n FEATURIZER_INFO,\n union_classifier.classifier,\n cluster_model=FastCluster(),\n search_space=search_space,\n n_jobs=N_JOBS,\n nameless_classifier=nameless_union_classifier.classifier if nameless_union_classifier is not None else None,\n nameless_featurizer_info=NAMELESS_FEATURIZER_INFO if nameless_union_classifier is not None else None,\n )\n union_clusterer.fit(anddatas)\n print(\n \"best clustering parameters:\",\n union_clusterer.best_params,\n )\n\n models = {}\n models[\"clusterer\"] = union_clusterer\n\n with open(\n f\"full_union_model_script_dump_average_{FEATURIZER_VERSION}.pickle\",\n \"wb\",\n ) as _pickle_file:\n pickle.dump(models, _pickle_file)\n logger.info(\"Done.\")", "def _predict(self, testX):\n pass", "def predict(self, data):\r\n return self.sess.run([self.predict_op, self.Mu], feed_dict={self.X: data})", "def after_pred(self):\n # If training then skip\n if self.training:\n return\n\n # Get ground truths in epoch 0 i.e. start of training\n if self.epoch == 0:\n self.y_true.extend(self.y.cpu().flatten().numpy())\n\n # Get predictions from each batch and add them to prediction container\n y_pred = self.pred.detach().cpu()\n \n self.y_pred.extend(y_pred.flatten().numpy())", "def predict(self, model, context, data):\n pass", "def _process(self):\n # choose the correct transform model before processing TI data\n self._select_transform()\n\n # process type first, fail early\n self._process_type()\n\n # process type specific data\n if isinstance(self.transform, GroupTransformModel):\n self._process_group()\n elif isinstance(self.transform, IndicatorTransformModel):\n self._process_indicator()\n\n # self.process_associations(self.transform.associations)\n self._process_associated_group(self.transform.associated_groups)\n self._process_attributes(self.transform.attributes or [])\n self._process_security_labels(self.transform.security_labels or [])\n self._process_tags(self.transform.tags or [])\n\n # date added\n self._process_metadata_datetime('dateAdded', self.transform.date_added)\n\n # last modified\n self._process_metadata_datetime('lastModified', self.transform.last_modified)\n\n # xid\n self._process_metadata('xid', self.transform.xid)", "def load_data_preprocess(self):\n\n print(\"Loading the dataset ...\")\n # load the data\n c_util = CarUtils()\n train_x, train_y, test_x, test_y, classes = c_util.load_data()\n\n # set the image ordering\n K.set_image_dim_ordering(\"th\")\n\n print(\"Pre-processing the dataset ...\")\n # pre-process the data\n train_x = train_x.astype('float32')\n test_x = test_x.astype('float32')\n\n train_x = train_x / 255\n test_x = test_x / 255\n\n print(train_x.shape[0], ' train samples')\n print(test_x.shape[0], ' test samples')\n\n train_y = np_utils.to_categorical(train_y, CarsClassifierModel._nb_classes)\n test_y = np_utils.to_categorical(test_y, CarsClassifierModel._nb_classes)\n\n return train_x, train_y, test_x, test_y", "def walk_forward_prediction(self):\r\n for output_name in self.output_names:\r\n print('\\t\\t\\t|--Prediction type: {}'.format(output_name))\r\n predictions_by_model = {}\r\n pred_metadata_by_model = {}\r\n \r\n print('\\t\\t\\t\\t|--SVM Model')\r\n svm = SupportVectorMachine()\r\n svm.pred_indices = self.pred_indices\r\n svm.full_df = self.full_df\r\n svm.feature_names = self.feature_names\r\n svm.output_name = output_name\r\n svm.svm_optimal_params = self.optimal_params_by_output[output_name]['SVM']\r\n svm.run_svm_prediction()\r\n predictions_by_model['SVM'] = svm.svm_predictions\r\n pred_metadata_by_model['SVM'] = svm.metadata\r\n \r\n self.predictions_by_output[output_name] = predictions_by_model\r\n self.pred_metadata_by_output[output_name] = pred_metadata_by_model", "def train(self, data):\n pass", "def predict(self, X):\n N, D = X.shape\n\n # init prediction array\n prediction = np.array([-1] * N)\n\n # retrieve the probability of predicting fraud for each model (K models)\n predict_proba_fraud = [-1] * self.K\n\n # we do the computation for all input test examples\n for i, instance in enumerate(X):\n sum_weight = 0\n F_k = 0\n\n # for k in= {1,2.....K} do\n k = -1\n for model in self.models.islice(start=0, stop=self.K, reverse=True):\n k += 1\n clf = model.clf\n sum_weight += model.weight\n\n # (1) compute the corresponding Fk(x)\n # compute one part of Fk(y) with the weights (be careful: sum_weight may be 0)\n F_k = (F_k * sum_weight) / sum_weight if sum_weight != 0 else 0\n\n # if the probability is not initialized we call the predict proba method\n if (type(predict_proba_fraud[k]) is int and predict_proba_fraud[k] == -1) \\\n or (predict_proba_fraud[k].shape[0] != self.S):\n predict_proba_fraud[k] = clf.predict_proba(self.X_chunk)\n\n # if we don't have the probability of predicting fraud --> p = 0, do nothing\n if len(predict_proba_fraud[k][i]) == 2:\n F_k += (model.weight * predict_proba_fraud[k][i][1]) / sum_weight\n\n # (2) we assign Fk value to a bin j\n t_y = instance[-1] # amount of the transaction (in the last column of the features)\n found = False # found: if a label has been decided (deal with 2 for's break)\n j = 0\n eps = len(self.bins)\n\n # while we haven't found the bin AND no prediction has not yet been given\n while j < eps and not found:\n stat = self.bins[j][k]\n\n # find the bin i y belongs to\n if (j / eps) <= F_k < ((j + 1) / eps):\n # (3) apply rule (10) for this bin (What if the amount is 0 ?)\n if t_y != 0:\n if F_k - stat['mean'] - self.t * stat['var'] > (self.cost / t_y): # FRAUD\n found = True\n prediction[i] = 1\n elif F_k + stat['mean'] + self.t * stat['var'] <= (self.cost / t_y): # NON-FRAUD\n found = True\n prediction[i] = 0\n else:\n found = True\n prediction[i] = 0\n\n j = j + 1\n\n if found: # if we found a value we go to the next example\n break\n\n # (4) if no classifier left i.e. we have consulted every classifier without having an answer\n # --> prediction[i] is not yet given\n if prediction[i] == -1:\n if instance[-1] != 0 and F_k > self.cost / instance[-1]: # instance[-1] is just t(y)\n prediction[i] = 1\n else:\n prediction[i] = 0\n\n return prediction", "def do_instance_pruning(self):\n\n # retrieve the probability of predicting fraud for each model (K models)\n # size: K x ChunkSize x 2 (2 for binary labels)\n predict_proba_fraud = [-1] * self.K\n\n # for each instance in the data chunk\n for i, instance in enumerate(self.y_chunk):\n sum_weight = 0\n current_F = 0\n F_vect = np.zeros(self.K) # Fk at each stage\n\n # compute F_k(y) for k = 1...K - the classifiers are sorted in DESCENDING order of weights\n k = -1\n for model in self.models.islice(start=0, stop=self.K, reverse=True):\n k += 1\n clf = model.clf\n sum_weight += model.weight\n\n # compute the current probability\n # if the probability is not initialized we call the `predict_proba` method\n if (type(predict_proba_fraud[k]) is int and predict_proba_fraud[k] == -1) \\\n or (predict_proba_fraud[k].shape[0] != self.S):\n predict_proba_fraud[k] = clf.predict_proba(self.X_chunk)\n\n # check if we have the probabilities of 2 labels (because we're working with BINARY classification)\n # if we don't have the probability of predicting fraud it will be 0 so we don't do anything\n if len(predict_proba_fraud[k][i]) == 2:\n current_F += model.weight * predict_proba_fraud[k][i][1]\n\n # (2) compute the Fk for each example seen at each stage\n F_k = current_F / sum_weight\n F_vect[k] = F_k\n\n # (3) compute the error\n err_x = F_vect - F_vect[-1]\n\n # (4) update the mean and the variance of the error of these training examples for each bin (i,k)\n # we look at the error at each step for the given example\n for k, err in enumerate(err_x):\n # 1 --> we assign Fk to the corresponding bin (i,k) or (j,k)here because we used i index before\n eps = len(self.bins)\n\n for j in range(0, eps):\n if (j / eps) <= F_vect[k] < ((j + 1) / eps):\n self.bins[j][k]['num'] += 1\n\n # 2--> we compute the mean error in this bin\n self.bins[j][k]['mean'] += err\n\n # 2--> we compute the variance of the error in this bin\n # (basically we will just compute the squared error and do the division later)\n self.bins[j][k]['var'] += err ** 2\n\n # if we've assigned it to a bin, break and go to the next stage\n break\n\n # after computing everything we do the division by the total number assigned to a bin\n for i in range(0, len(self.bins)):\n # a bit tricky because sometimes we have bins that don't have any input example --> remains at 0\n for k in range(self.K):\n if self.bins[i][k]['num'] > 0:\n # divide the sum of error by the number of examples in the bin\n self.bins[i][k]['mean'] = self.bins[i][k]['mean'] / self.bins[i][k]['num']\n\n # compute the variance\n self.bins[i][k]['var'] = (self.bins[i][k]['var'] / self.bins[i][k]['num']) - \\\n (self.bins[i][k]['mean']) ** 2", "def predict(self, data, *args, **kwargs):\n return self._predict(data, async=False, *args, **kwargs)", "def prediction_processing(dataset_path, predictions):\n\n final_predictions = []\n jsr = JSONLineReader()\n\n with open(dataset_path, \"r\") as f:\n lines = jsr.process(f)\n prediction_processing_no_reload(lines, predictions)\n\n return final_predictions", "def __process_element(data):\n print('prosessing {}'.format(data))\n x_i = data[0]\n y_i = data[1]\n\n file_name = FeatureExtractor.get_file_name(x_i, feature_name)\n try:\n # try to load if file already exist\n np.load(out_path / file_name, allow_pickle=True)\n print('info: {} loaded from .npy !'.format(file_name))\n new_labels.append([file_name, y_i])\n except FileNotFoundError or OSError or EOFError:\n # OSError and EOFError are raised if file are inconsistent\n voice_activation = np.load(source_path / x_i, allow_pickle=True)\n mean_voice_activation = FeatureExtractor.get_mean_voice_activation(voice_activation[0])\n # this is kind-of standard\n FeatureExtractor.save_feature([voice_activation[0], mean_voice_activation], feature_name, out_path, x_i,\n y_i, new_labels)", "def _predict(self, X):\n raise NotImplementedError", "def prepare_class_data_for_prediction(dataframe, model_dict, user_keyword, task_name):\r\n test_tweets = dataframe.iloc[:, [0, 1, 2]]\r\n\r\n parent_dir = Path.cwd().parent\r\n pickle_dir = parent_dir.joinpath('default_results', 'pickle_files_feat_eng')\r\n feature_X_user = pd.DataFrame\r\n emo_X_test_dict = {}\r\n\r\n\r\n for emotion, model_prop in model_dict.items():\r\n preprocessed_X_user = Preprocessor.perform(test_tweets, emotion, user_keyword, task_name)\r\n feature_X_user = Feature_Transformer.perform(preprocessed_X_user, emotion, user_keyword, task_name)\r\n vectorizer = Dictionaries.vectorizer_dict[model_prop[2]]\r\n\r\n #Fit transform the vectorizer with the corresponding preprocessed training data\r\n if os.path.exists(pickle_dir.joinpath(emotion + '_c_train_preprocess_df.pkl')):\r\n preprocess_train_df = pd.read_pickle(pickle_dir.joinpath(emotion + '_c_train_preprocess_df.pkl'))\r\n train_vect = vectorizer.fit_transform(preprocess_train_df['preprocessed_text'].values)\r\n print(emotion + 'TRAIN', train_vect.shape)\r\n train_vect_df = pd.DataFrame(train_vect.toarray(), columns=vectorizer.get_feature_names())\r\n else:\r\n #If the file doesnt exist, exit the program with instructions\r\n print('\\nRequired files does not exist.\\n\\n Please, train the models first by running > Modelling.py')\r\n sys.exit(1)\r\n\r\n # Use the same vectorizer to transform test data and then perform the feature union\r\n vector_X = vectorizer.transform(preprocessed_X_user['preprocessed_text'].values)\r\n test_vect_df = pd.DataFrame(vector_X.toarray(), columns=vectorizer.get_feature_names())\r\n X_test = pd.concat([test_vect_df, feature_X_user], axis=1)\r\n emo_X_test_dict[emotion] = X_test\r\n print(emotion + 'TEST', test_vect_df.shape, X_test.shape)\r\n return emo_X_test_dict", "def predict_all():\n\n # need train dir to list category names\n cfg = configparser.ConfigParser()\n cfg.read(sys.argv[1])\n base = os.environ['DATA_ROOT']\n eval_type = cfg.get('args', 'eval_type')\n train_xml_dir = os.path.join(base, cfg.get('data', 'train_xml_dir'))\n\n if eval_type == 'sparse':\n predict_sparse(train_xml_dir)\n else:\n predict_dense(train_xml_dir)", "def post_process_datasets(self, train_data, val_data, test_data, info=None):\n return train_data, val_data, test_data, info", "def _data_preproc(self, X, y, X_test, y_test=None):\n \n X = np.array(X)\n y = np.array(y)\n X_test = np.array(X_test)\n y_test = np.array(y_test) \n\n # y need to be a column:\n if y.shape == y.flatten().shape:\n y = y.reshape(-1, 1)\n\n # Scale the data\n stda = StandardScaler()\n stda.fit(np.vstack([X, X_test]))\n\n X_test = stda.transform(X_test)\n X = stda.transform(X)\n\n # Stack target to X (train)\n X = np.column_stack((y, X))\n\n # Stack id to X_test\n #X_test = np.column_stack((ids, X_test))\n\n # Export to txt files (, del.)\n np.savetxt(self._train_file, X, delimiter=\",\", fmt='%.5f')\n np.savetxt(self._test_file, X_test, delimiter=\",\", fmt='%.5f')", "def __data_generation(self, batch_data):\n X = np.zeros((self.batch_size, self.num_features), dtype=float)\n y = np.zeros((self.batch_size, self.num_outputs), dtype=float)\n\n for i, sample in batch_data.iterrows():\n # Get lat/long of pickup and dropoff locations\n PULocation = self.taxizone_data.loc[sample['PULocationID']].centroids\n PULocationLong, PULocationLat = PULocation.x, PULocation.y\n DOLocation = self.taxizone_data.loc[sample['DOLocationID']].centroids\n DOLocationLong, DOLocationLat = DOLocation.x, DOLocation.y\n\n # Get month date, day of week and hours/mins for pickup\n PUDateTime = datetime.strptime(sample.tpep_pickup_datetime, '%Y-%m-%d %H:%M:%S')\n PUDate = PUDateTime.strftime('%Y-%m-%d')\n PUYear, PUMonth, PUMonthDate = PUDate.split('-')\n # TODO - Add this to pre-processing of trip data! Some random months in the data!!\n if PUYear != '2018' or PUMonth != '06':\n continue\n PUDayOfWeek = PUDateTime.weekday()\n PUTimeHour, PUTimeMinute = datetime.strptime(\n sample.tpep_pickup_datetime, '%Y-%m-%d %H:%M:%S'\n ).strftime('%H:%M').split(':')\n\n # Get precipitation for that day\n Precipitation = self.weather_data[self.weather_data['DATE'] == PUDate]['PRCP'].values[0]\n\n X[i] = np.concatenate((np.array([\n\n PULocationLat,\n PULocationLong,\n DOLocationLat,\n DOLocationLong,\n abs((PULocationLat - DOLocationLat) ** 2 + abs(PULocationLong - DOLocationLong) ** 2) ** 0.5,\n Precipitation\n ]),\n to_categorical(PUDayOfWeek, 7),\n to_categorical(PUMonthDate, 31),\n to_categorical(PUTimeHour, 24)\n ))\n\n y[i] = [sample['duration']] if self.generator_type == 'duration' \\\n else [sample['total_amount'] - sample['tip_amount']]\n\n return X, y", "def getPreProcessData(self, data:np.ndarray, dataIncludesLabels:bool) -> DataProcessorWithVisitor:\n scaledDataCategoryVisitor = ScaledDataCategoryVisitor()\n train_data = pd.read_csv(\"titanic/train.csv\")\n train_data = train_data.to_numpy().tolist()\n dataProcessorWithVisitor = DataProcessorWithVisitor(train_data, True, scaledDataCategoryVisitor)\n _, dataToCompareTo = dataProcessorWithVisitor.getProcessedData()\n \n dataProcessor = DataProcessorGaussAndCosine(data, dataIncludesLabels, dataCategoryVisitor=scaledDataCategoryVisitor, sigma=1.0, dataToCompareTo=dataToCompareTo)\n return dataProcessor", "async def predict(input_data: schemas.MultipleUserDataInputs) -> Any:\n print(input_data.inputs)\n input_df = jsonable_encoder(input_data.inputs)[0][\"user_handle\"]\n print(input_df)\n logger.info(f\"Making prediction on inputs: {input_data.inputs}\")\n results = make_prediction(input_df)\n\n logger.info(f\"Prediction results: {results.get('predictions')}\")\n\n return results", "def process_data(self) -> Tuple[list, List[Sequence[int]]]:\n features, labels = self.load_files()\n x = [self.process_text(f, self.max_length) for f in features]\n y = [self._convert_labels(l, self.max_length) for l in labels]\n return x, y", "def predict(self, instances):\r\n raise NotImplementedError", "def predict(data, model_predict):\n # Execute any steps you need to do before scoring\n\n # This method makes predictions against the raw, deserialized model\n #predictions = model_predict(data)\n\n data.to_csv(\"/opt/code/chemprop_folder/for_scoring.csv\", index=False)\n\n args = PredictArgs().parse_args([\n '--test_path', '/opt/chemprop_folder/for_scoring.csv',\n '--checkpoint_path', '/opt/code/model.pth',\n '--preds_path', '/opt/chemprop_folder/preds.csv'\n ])\n\n make_predictions(args)\n\n preds_df = pds.read_csv(\"/opt/chemprop_folder/preds.csv\")\n sh = str(preds_df.shape)\n print(sh)\n\n preds_df = preds_df.rename(columns = {\"p_np\": \"positive_class_label\"})\n preds_df = preds_df.drop(columns=['smiles'])\n preds_df[\"negative_class_label\"] = 1 - preds_df[\"positive_class_label\"]\n\n print(preds_df.head())\n\n # Execute any steps you need to do after scoring\n # Note: To properly send predictions back to DataRobot, the returned DataFrame should contain a\n # column for each output label for classification or a single value column for regression\n return preds_df", "def training_data_preprocessing(raw_data, num_passed_rows=72):\r\n # some samples have errors\r\n raw_data = raw_data[num_passed_rows:].reset_index(drop=True) \r\n \r\n # get data output\r\n data_output = raw_data[['Submitby Date Time', 'Challenge Manager', 'Challenge Copilot', 'Posting Date Date', 'Track',\r\n 'Technology List', 'First Place Prize', 'Num Registrations', 'Total Prize']]\r\n data_output, extended_columns = class_binaryzation(data_output)\r\n \r\n # save extended columns to cache\r\n extended_columns_filepath = 'cache/extended_columns.pkl'\r\n with open(extended_columns_filepath, 'wb') as f:\r\n pickle.dump(extended_columns, f)\r\n\r\n num_date_columns_filepath = 'cache/num_date_columns.pkl'\r\n try:\r\n data_output = date_separation1(data_output) \r\n with open(num_date_columns_filepath, 'wb') as f:\r\n pickle.dump(6, f)\r\n\r\n except:\r\n data_output = date_separation2(data_output)\r\n with open(num_date_columns_filepath, 'wb') as f:\r\n pickle.dump(5, f)\r\n\r\n data_output = money_digitalization(data_output)\r\n data_output = get_date_in_days(data_output)\r\n data_output['Days from Posting to Submit'] = data_output['Submitby Date Time Days from 2016'] \\\r\n - data_output['Posting Date Date Days from 2016'] \r\n \r\n # get other output\r\n label_output = pd.DataFrame(columns=['Success'])\r\n success_output = pd.DataFrame(columns=data_output.columns)\r\n failure_output = pd.DataFrame(columns=data_output.columns)\r\n for i in range(len(raw_data)):\r\n if raw_data.loc[i, 'Num Submissions Passed Review'] >= 1:\r\n label_output.loc[i, 'Success'] = 1\r\n success_output.loc[len(success_output)] = data_output.loc[i]\r\n else:\r\n label_output.loc[i, 'Success'] = 0\r\n failure_output.loc[len(failure_output)] = data_output.loc[i]\r\n\r\n return data_output, label_output, success_output, failure_output, extended_columns", "def post_processing(\n cfg: CfgNode, y: torch.Tensor, orig_img_size: torch.Tensor, transformed_labels: torch.Tensor\n) -> Tuple[Tuple[List[np.array], List[np.array]], float]:\n post_processing_start_time = time.time()\n pruned_preds_batch = post_process_prediction(y, orig_img_size, cfg)\n post_processing_end_time = time.time()\n processed_labels_batch = post_process_labels(transformed_labels, orig_img_size, cfg)\n\n return (pruned_preds_batch, processed_labels_batch), (post_processing_end_time - post_processing_start_time)", "def predict_batch(self, model, context, data=None):\n pass", "def preProcess(self, datum):\n pass", "def _postprocess(self, preds):\n ntok = preds.pop(\"ntok\")\n ids = preds.pop(\"input_ids\")[:ntok]\n preds[\"tokens\"] = self._detokenize(ids)\n\n # Decode predicted top-k tokens.\n # token_topk_preds will be a List[List[(word, prob)]]\n # Initialize prediction for 0th token as N/A.\n token_topk_preds = [[(\"N/A\", 1.)]]\n pred_ids = preds.pop(\"top_k_indices\")[:ntok] # <int>[num_tokens, k]\n pred_probs = preds.pop(\"top_k_probs\")[:ntok] # <float32>[num_tokens, k]\n for token_pred_ids, token_pred_probs in zip(pred_ids, pred_probs):\n token_pred_words = self._detokenize(token_pred_ids)\n token_topk_preds.append(list(zip(token_pred_words, token_pred_probs)))\n preds[\"pred_tokens\"] = token_topk_preds\n\n # Process attention.\n for key in preds:\n if not re.match(r\"layer_(\\d+)/attention\", key):\n continue\n # Select only real tokens, since most of this matrix is padding.\n # <float32>[num_heads, max_seq_length, max_seq_length]\n # -> <float32>[num_heads, num_tokens, num_tokens]\n preds[key] = preds[key][:, :ntok, :ntok].transpose((0, 2, 1))\n # Make a copy of this array to avoid memory leaks, since NumPy otherwise\n # keeps a pointer around that prevents the source array from being GCed.\n preds[key] = preds[key].copy()\n\n return preds", "def prediction_processing(predictions, labels, threshold, step_nb):\n new_labels = []\n new_predictions = []\n number_sequences = step_nb//50\n\n for k in range(len(labels)//number_sequences):\n total_prediction = 0\n isLabelTrue = labels[number_sequences*k]\n for i in range(number_sequences):\n total_prediction += (1/predictions[number_sequences*k+i])\n if not(isLabelTrue == (labels[number_sequences*k+i])):\n logger.error('Problem.')\n if total_prediction > threshold:\n total_prediction = False\n else:\n total_prediction = True\n new_labels.append(isLabelTrue)\n new_predictions.append(total_prediction)\n\n recall_1 = recall_score(new_labels, new_predictions)\n recall_0 = recall_score(new_labels, new_predictions, pos_label=0)\n precision_1 = precision_score(new_labels, new_predictions)\n precision_0 = precision_score(new_labels, new_predictions, pos_label=0)\n return((recall_1, recall_0, precision_1, precision_0), new_predictions, new_labels)", "def _process_data(self):\r\n # Rename columns to match final feature class\r\n self._rename_columns()\r\n # Add point ID column\r\n self._add_pointid()\r\n # Sort rows by transect id and timestamp\r\n self._sort_rows()\r\n # Fill Null records with a value\r\n self._fill_nulls()\r\n # Set site_code to lower case\r\n self._lower_site_code()\r\n # Create survey_id\r\n self._calc_survey_id()\r\n # Calculate nativesg column if at least one of the veg columns is a Native seagrass type\r\n if set(self.veg_columns).intersection(set(NATIVESG_CODES)) > 0:\r\n self.nativesg_columns = list(set(self.veg_columns).intersection(set(NATIVESG_CODES)))\r\n self._calc_nativesg()\r\n #\r", "def predict(self, data):\n return self.result.predict(data)", "def inference(self):\r\n\t\tfor partition, loader in self.loaders.items():\r\n\t\t\tavg_loss, (y, y_hat), post, attentions, tags = self.eval_loader(\r\n\t\t\t\tloader)\r\n\t\t\tself.preds[partition] = {\r\n\t\t\t\t'tag': tags,\r\n\t\t\t\t'y': y,\r\n\t\t\t\t'y_hat': y_hat,\r\n\t\t\t\t# 'posteriors': post,\r\n\t\t\t\t# 'attentions': attentions\r\n\t\t\t}", "def _process(self, data: np.ndarray) -> np.ndarray:\n\n # Step 1. Reorder the data.\n memory = self._reorder(data)\n\n # Step 2. Do the restless classification into counts.\n counts = [defaultdict(int) for _ in range(self._n_circuits)]\n prev_shot = \"0\" * self._num_qubits\n header = {\"memory_slots\": self._num_qubits}\n\n for idx, shot in enumerate(memory):\n shot = format_counts_memory(shot, header)\n\n restless_adjusted_shot = RestlessToCounts._restless_classify(shot, prev_shot)\n\n circuit_idx = idx % self._n_circuits\n\n counts[circuit_idx][restless_adjusted_shot] += 1\n\n prev_shot = shot\n\n return np.array([dict(counts_dict) for counts_dict in counts])", "def predict(self,data):\n results = []\n predict_instances = np.shape(data)[0]\n stored_instances = np.shape(self.data)[0]\n for predict_index in range(predict_instances):\n neighbors = [] # dist, label\n for stored_index in range(stored_instances):\n neighbors.append((self._distance(self.data[stored_index], data[predict_index]), self.data_labels[stored_index][0], data[predict_index]))\n neighbors = sorted(neighbors, key=lambda x: x[0])[:self.k]\n results.append(self._analyze_neighbors(neighbors))", "def predict(image_data):\n PAYLOAD = {}\n PAYLOAD[\"timestamp\"] = str(datetime.now())\n PAYLOAD[\"inference-type\"] = \"image-classification\"\n PAYLOAD[\"inference-description\"] = \"Top {} predictions with score {} or above \".format(\n config_utils.MAX_NO_OF_RESULTS, config_utils.SCORE_THRESHOLD\n )\n PAYLOAD[\"inference-results\"] = []\n\n try:\n # Run DLR to perform inference with DLC optimized model\n model_output = dlr_model.run(image_data)\n config_utils.logger.info(\"pred shape: '{}'.\".format(model_output[0][0].shape)) \n probabilities = softmax(model_output[0][0])\n config_utils.logger.info(\"pred shape softmax: '{}'.\".format(probabilities.shape)) \n sort_classes_by_probability = argsort(probabilities)[::-1]\n\n config_utils.logger.info(\"pred classes: '{}'.\".format(sort_classes_by_probability[: config_utils.MAX_NO_OF_RESULTS])) \n\n for i in sort_classes_by_probability[: config_utils.MAX_NO_OF_RESULTS]:\n if probabilities[i] >= config_utils.SCORE_THRESHOLD:\n result = {\"Label\": str(synset[i]), \"Score\": str(probabilities[i])}\n PAYLOAD[\"inference-results\"].append(result)\n\n config_utils.logger.info(dumps(PAYLOAD))\n\n if config_utils.TOPIC.strip() != \"\":\n ipc_utils.IPCUtils().publish_results_to_cloud(PAYLOAD)\n else:\n config_utils.logger.info(\"No topic set to publish the inference results to the cloud.\")\n\n except Exception as e:\n config_utils.logger.error(\"Exception occured during prediction: {}\".format(e))", "def predict(self, data: Union[Any, List[Any]]):\n # predict without labels\n self._engine.eval()\n\n # prepare a list of (data, label) to make it iterable\n # for compatibility with schedule\n simple_dataloader = [(data, None)]\n data_iter = iter(simple_dataloader)\n output, _, _ = self.engine.execute_schedule(data_iter, forward_only=True, return_loss=False)\n return output", "def test_data_preprocessing(raw_data): \r\n\r\n # get data output\r\n data_output = raw_data[['Submitby Date Time', 'Challenge Manager', 'Challenge Copilot', 'Posting Date Date', 'Track',\r\n 'Technology List', 'First Place Prize', 'Num Registrations', 'Total Prize']]\r\n with open('cache/extended_columns.pkl', 'rb') as f:\r\n extended_columns = pickle.load(f)\r\n with open('cache/num_date_columns.pkl', 'rb') as f:\r\n max_date_columns = pickle.load(f)\r\n \r\n data_output = class_binaryzation_for_test(data_output, extended_columns)\r\n try:\r\n data_output = date_separation1(data_output, max_num_columns=NUM_DATE_COLUMNS)\r\n except:\r\n data_output = date_separation2(data_output)\r\n data_output = money_digitalization(data_output)\r\n data_output = get_date_in_days(data_output)\r\n data_output['Days from Posting to Submit'] = data_output['Submitby Date Time Days from 2016'] \\\r\n - data_output['Posting Date Date Days from 2016'] \r\n\r\n return data_output", "def process_data(self):\r\n \r\n self.processed_data = dict()\r\n for split,text_data_ in self.text_data.items():\r\n y = text_data_[self.target_col].values\r\n print(\"Vectorizing for split: \"+split)\r\n x = np.array([self.vectorizer(x_) for x_ in text_data_['Text']])\r\n \r\n self.processed_data[split] = {'x':x,'y':y}\r\n \r\n self.set_split(self.split_)", "def getProcessedData(self, data, labels):\n if self.underSamplePercentage != 0:\n data, labels = self.underSample(data, labels)\n if self.beta != 0: \n synData, synLabels = self.adaSynAdd(data, labels)\n if synData is not None:\n data, labels = combineTestSets(data, labels, synData, synLabels)\n return data, labels", "def run_data (arguments):\n if arguments.define_labels:\n data.define_labels()\n elif arguments.preprocess:\n # Preprocess from data_raw --> data_preprocessed\n data.preprocess()\n elif arguments.annotate:\n # Annotate from data_preprocessed --> data_annotated\n reverse = False # DEBUG\n annotator.annotate(reverse)\n elif arguments.split:\n # Split from data_annotated --> train.txt/valid.txt\n restrict = 100 # Default: Keep 100% of all files\n splitter.train_valid(restrict_to=restrict)" ]
[ "0.6911158", "0.68740946", "0.6579429", "0.65545535", "0.65444416", "0.6536913", "0.64836574", "0.6466419", "0.6466419", "0.6391505", "0.63673496", "0.6256279", "0.6249069", "0.6240798", "0.62286144", "0.6211028", "0.6206762", "0.61867833", "0.6183594", "0.6156628", "0.61436504", "0.61085784", "0.6108401", "0.60972655", "0.6089194", "0.60730904", "0.606412", "0.6052729", "0.60329765", "0.6030822", "0.60264057", "0.6024429", "0.60183424", "0.60183424", "0.60183424", "0.6017981", "0.6009821", "0.6005958", "0.59817135", "0.5972372", "0.59554297", "0.5947774", "0.5947117", "0.5940548", "0.5921446", "0.59019846", "0.5901754", "0.5887223", "0.5886989", "0.58766437", "0.5876018", "0.58648676", "0.5864369", "0.58477867", "0.5847454", "0.5844296", "0.58441466", "0.58405983", "0.5840463", "0.5839199", "0.58385587", "0.583708", "0.5830095", "0.582845", "0.58205956", "0.58081925", "0.58008665", "0.5797515", "0.57971084", "0.57937366", "0.57868445", "0.5784014", "0.5781444", "0.577778", "0.5764723", "0.57623756", "0.5756362", "0.5753983", "0.57461107", "0.57402873", "0.5739962", "0.5728303", "0.5725018", "0.572425", "0.572352", "0.5721473", "0.571968", "0.57177097", "0.5716703", "0.5711896", "0.5709266", "0.5708903", "0.5704473", "0.5701758", "0.5695111", "0.5692334", "0.5688223", "0.56845236", "0.5680606", "0.56780374", "0.5676898" ]
0.0
-1
Return the segment identifier a customers is predicted to belongs to.
def predict_segment(self, df_invoice_line=None): if df_invoice_line is not None: self.data_transform(df_invoice_line) self.df_customers_features_build() else: pass X_test = self._df_customers.values y_pred = self._classifier_model.predict(X_test) return y_pred[0]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_customer_marketSegment(self, df_invoice_line_customer):\n #-------------------------------------------------------------------------\n # Building data model \n #-------------------------------------------------------------------------\n self.data_transform(df_invoice_line_customer)\n\n #-------------------------------------------------------------------------\n # Customer features are built thanks to transformers.\n #-------------------------------------------------------------------------\n self.df_customers_features_build()\n \n #-------------------------------------------------------------------------\n # Customer market segment is predicted\n #-------------------------------------------------------------------------\n X_test = self._df_customers.values\n y_pred = self._classifier_model.predict(X_test)\n segmentID = y_pred[0]\n \n return segmentID", "def segment_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"segment_name\")", "def get_segment_name(self, offset):\n self.ret = idc.get_segm_name(offset)\n return self.ret", "def segment_number(self):\n if hasattr(self, '_m_segment_number'):\n return self._m_segment_number if hasattr(self, '_m_segment_number') else None\n\n self._m_segment_number = self.segment_number_raw.value\n return self._m_segment_number if hasattr(self, '_m_segment_number') else None", "def get_segment(self):\n return self.segment", "def get_segment(self):\n return self.segment", "def segment_counter(self):\n return self._data_writer.get_segment_counter()", "def get_segm_num(*args):\n return _ida_segment.get_segm_num(*args)", "def segment_func1(self):\n # computing neighboors graph\n A = self.normal_graph()\n\n # SpectralClustering segmentation\n sc = SpectralClustering(3, affinity='precomputed', n_init=10, assign_labels='discretize')\n labels = sc.fit_predict(A)\n\n return labels", "def getSegment(self):\n return self.segment", "def getSentenceId(self):\n return( int(self.id.split('.')[1]) )", "def find_index(segmentation, stroke_id):\n for i, symbol in enumerate(segmentation):\n for sid in symbol:\n if sid == stroke_id:\n return i\n return -1", "def get_seg(self):\n self.seg = self.render()[4]\n return self.seg", "def identifier(self):\n return self.viztrail.identifier", "def getSegment(self):\n\n segname = self.getSegname()\n if segname is not None:\n return self._hv.getSegment(segname)", "def get_segm_name(*args):\n return _ida_segment.get_segm_name(*args)", "def segment_func2(self):\n # computing neighboors graph\n A = self.boundaryprob_graph()\n\n # SpectralClustering segmentation\n sc = SpectralClustering(3, affinity='precomputed', n_init=10, assign_labels='discretize')\n labels = sc.fit_predict(A)\n\n return labels", "def cinters_segment(self, s):\r\n if self.contains_point(s.start[0], s.start[1]) == self.contains_point(s.end[0], s.end[1]):\r\n # The segment doesn't cross the contour of the polygon\r\n return None\r\n else:\r\n if self.__segments == None:\r\n self.__load_segments()\r\n \r\n for segment in self.__segments:\r\n p = segment.inters_segment(s)\r\n if p != None:\r\n return p\r\n \r\n return None", "def get_visible_segm_name(*args):\n return _ida_segment.get_visible_segm_name(*args)", "def inters_segment(self, s):\r\n if (self.m == s.m) and (self.n == s.n):\r\n # The segment s is over this segment. Return the middle point\r\n x = (self.start[0] + self.end[0]) / 2\r\n y = (self.start[1] + self.end[1]) / 2\r\n elif self.m == s.m:\r\n # The segments are parallels\r\n return None\r\n elif self.m == None:\r\n x = self.start[0]\r\n y = int(s.m * x + s.n)\r\n elif s.m == None:\r\n x = s.start[0]\r\n y = self.m * x + self.n\r\n else:\r\n x = (s.n - self.n) / (self.m - s.m)\r\n y = self.m * x + self.n \r\n \r\n if self.contains_point(x, y) and s.contains_point(x, y):\r\n return int(x), int(y)\r\n else:\r\n return None", "def customer_id(self) -> str:\n return self._customer_id", "def get_physical_seg_id(self, local_seg_id: int) -> int:\n return 0 if (self.seg_id == local_seg_id) else self.seg_id", "def get_classification(self):\n try:\n receiver = self.cleaned_data[\"customer\"]\n classification = self.CLASSIFICATION_DICT[receiver]\n except KeyError:\n return \"------\"\n except AttributeError:\n return \"------\"\n\n return classification", "def get_customer_count(self):\n return self._df_invoice_original.CustomerID.unique().shape[0]", "def get_customer_id(self):\n return self.machine_config_file_value(\"DEFAULT.CID\").strip('\"')", "def get_customer_info(self, customer_id):\n #requested_customer_RDD = self.sc.parallelize(movie_ids).map(lambda x: (user_id, x))\n # Get predicted ratings\n customers = self.__get_customers(customer_id).collect()\n\n return customers", "def getSegments(points):\n return _identifyStrokes(points)[1]", "def get_identifier(self):\n return 'Sequence SMNIST'", "def predict(self, test_vector):\n return self.find_closest(test_vector)[1].class_id", "def get_segment_index(datadb):\n #match in time!!!!\n if cfg.use_saliency:\n segment_index_tar = util.get_time_for_visual(datadb)\n segment_index_tar_future = OrderedDict()\n for key in segment_index_tar.keys():\n segment_index_tar_future[key] = np.array(segment_index_tar[key])+max_encoder_seq_length\n return segment_index_tar,segment_index_tar_future", "def getSegmentCount(self) -> int:\n ...", "def nit_sin_digito_verificacion(self):\n\n return self.identificacion.split('-')[0]", "def customer_id(self):\n return self._customer_id", "def cen_region_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"cen_region_id\")", "def createCustomerID(self):\n\n customerID = self._df_invoice_original.CustomerID.max()\n customerID += 1\n return int(customerID)", "def cen_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"cen_id\")", "def getIdentity():\n return Sentience.__IDENTITY.lower()", "def _getSegmentIndexFromClusterIndex(self, cluster, labelMap, clusterIndex=None, chooseRandom=False):\n # Need to either provide the index, or let it be random, but not both\n ## TODO modify to use internal RNG from randUtils\n assert not (clusterIndex is None and chooseRandom is False)\n assert not (clusterIndex is not None and chooseRandom is True)\n # indices of all segments\n indices = np.arange(len(labelMap))\n # indices who belong to this cluster\n eligible = indices[labelMap == cluster]\n # if random, choose now\n if chooseRandom:\n i = randomUtils.randomIntegers(0, len(eligible) - 1, self)\n clusterIndex = eligible[i]\n # global index\n segmentIndex = eligible[clusterIndex]\n return segmentIndex, clusterIndex", "def _graph_segment_str_at_line(self, line: int) -> str:\n if line == 0:\n result_str = self._node_count_segment_str()\n result_str += \" \" * (self._max_segment_columns() - len(result_str))\n return result_str\n if line == 1:\n result_str = self._graph_id_segment_str()\n result_str += \" \" * (self._max_segment_columns() - len(result_str))\n return result_str\n if 0 <= line < self._total_rows():\n return \" \" * self._max_segment_columns()\n return \"\"", "def getSerpentId(self):\n raise NotImplementedError", "def get_patient_nr(segment):\n try:\n national_register = str(segment[19])\n except IndexError:\n nr_list = segment[2:5]\n national_register = [nr for nr in nr_list if str(nr) is not \"\"].pop()[0]\n national_register = str(national_register).split(\"^\")[0]\n return national_register", "def get_customer_id(self, customer):\n\n\t\t# connect to the database\n\t\twith sqlite3.connect(\"bangazon_cli.db\") as bang:\n\t\t\tcursor = bang.cursor()\n\n\t\t\ttry: \n\t\t\t\t# select customer_id that matches the customer's phone number\n\t\t\t\tcursor.execute(\"SELECT customer_id FROM Customers c WHERE c.phone_number ='{}'\".format(customer.get_phone_number()))\n\n\t\t\t\t# return the data\n\t\t\t\tdata = cursor.fetchall()\n\t\t\t\t\n\t\t\t\tprint(\"Customer_id\", data[0][0])\n\t\t\t\treturn data[0][0]\n\t\t\t\t\n\n\t\t\texcept sqlite3.OperationalError:\n\t\t\t\tprint(\"NOPE.\")", "def get_customer_id(self, customer):\n\n\t\t# connect to the database\n\t\twith sqlite3.connect(\"bangazon_cli.db\") as bang:\n\t\t\tcursor = bang.cursor()\n\n\t\t\ttry: \n\t\t\t\t# select customer_id that matches the customer's phone number\n\t\t\t\tcursor.execute(\"SELECT customer_id FROM Customers c WHERE c.phone_number ='{}'\".format(customer.get_phone_number()))\n\n\t\t\t\t# return the data\n\t\t\t\tdata = cursor.fetchall()\n\t\t\t\t\n\t\t\t\tprint(\"Customer_id\", data[0][0])\n\t\t\t\treturn data[0][0]\n\t\t\t\t\n\n\t\t\texcept sqlite3.OperationalError:\n\t\t\t\tprint(\"NOPE.\")", "def segment_counter(self, _):\n raise NotImplementedError(\n \"We do not support externally altering the segment counter\")", "def customer_id(uncapped_flatprice, uncapped_flatprice_finalizer, team_multisig) -> int:\n customer_id = int(uuid.uuid4().hex, 16) # Customer ids are 128-bit UUID v4\n return customer_id", "def _segment_labels(Yi):\n idxs = [0] + (np.nonzero(np.diff(Yi))[0]+1).tolist() + [len(Yi)]\n Yi_split = np.array([Yi[idxs[i]] for i in range(len(idxs)-1)])\n return Yi_split", "def identifier(self):\n mrn_field = 'Patient Account No'\n if mrn_field in self.data:\n ident = {\"system\": KentPatientAdapter.SITE_SYSTEM, \"value\": self.data[mrn_field]}\n # FHIR keeps lists of identifiers, return as list\n return [ident]", "def get_id(disk):\n\n #TODO\n return \"Unknown\"", "def __segVal(self, string):\r\n return {\r\n \"local\": \"LCL\",\r\n \"argument\": \"ARG\",\r\n \"this\": \"THIS\",\r\n \"that\": \"THAT\",\r\n \"temp\": Consts.SEG_TEMP,\r\n 0: \"THIS\",\r\n 1: \"THAT\"\r\n }[string]", "def get_mica_id(\n pheno_a: str,\n pheno_b: str,\n graph: Graph,\n ic_map:Dict[str, float],\n root: str) -> str:\n predicate = RDFS['subClassOf']\n p1_closure = get_closure(graph, pheno_a, predicate, root)\n p2_closure = get_closure(graph, pheno_b, predicate, root)\n overlap = p1_closure.intersection(p2_closure)\n max_ic = max([ic_map[parent]for parent in overlap])\n mica = ''\n for pheno in overlap:\n if ic_map[pheno] == max_ic:\n mica = pheno\n return mica", "def generate_subsegment_id():\n return uuid.uuid4().hex[:16]", "def ctc_seg_metric(prediction, ground_truth, path_evaluation_software):\n\n # Check for empty predictions\n num_nuclei_prediction = len(get_nucleus_ids(prediction))\n if num_nuclei_prediction == 0:\n return 0\n\n # Clear temporary result directory if exists\n if os.path.exists(path_evaluation_software + '/tmp'):\n shutil.rmtree(path_evaluation_software + '/tmp')\n\n # Create new clean result directory\n for directory in ['/tmp', '/tmp/01_GT', '/tmp/01_GT/SEG', '/tmp/01_RES']:\n os.mkdir(path_evaluation_software + directory)\n\n # Chose the executable in dependency of the operating system\n if platform.system() == 'Linux':\n path_seg_executable = path_evaluation_software + '/Linux/SEGMeasure'\n elif platform.system() == 'Windows':\n path_seg_executable = path_evaluation_software + '/Win/SEGMeasure.exe'\n elif platform.system() == 'Darwin':\n path_seg_executable = path_evaluation_software + '/Mac/SEGMeasure'\n else:\n raise ValueError('Platform not supported')\n\n # Check for missing nuclei ids in the prediction. To build the intersection histogram the nuclei_ids should range\n # from 1 to the number of nuclei. Copy the prediction to avoid changing it.\n pred = np.copy(prediction)\n\n if num_nuclei_prediction != pred.max():\n\n hist = np.histogram(prediction, bins=range(1, pred.max() + 2), range=(1, pred.max() + 1))\n\n # Find missing values\n missing_values = np.where(hist[0] == 0)[0]\n\n # Decrease the ids of the nucleus with higher id than the missing. Reverse the list to avoid problems in case\n # of multiple missing objects\n for th in reversed(missing_values):\n pred[pred > th] = pred[pred > th] - 1\n\n # Temporarily save the prediction and the ground truth with the naming convention needed for the evaluation software\n tiff.imsave(path_evaluation_software + '/tmp/01_GT/SEG/man_seg000.tif', ground_truth.astype(np.uint16))\n tiff.imsave(path_evaluation_software + '/tmp/01_RES/mask000.tif', pred.astype(np.uint16))\n\n # Apply the evaluation software to calculate the cell tracking challenge SEG measure\n output = subprocess.Popen([path_seg_executable, path_evaluation_software + '/tmp', '01', '3'],\n stdout=subprocess.PIPE)\n result, _ = output.communicate()\n seg_measure = re.findall(r'\\d\\.\\d*', result.decode('utf-8'))\n seg_measure = float(seg_measure[0])\n\n # Remove the temporary folder\n shutil.rmtree(path_evaluation_software + '/tmp')\n\n return seg_measure", "def sosid(self):\r\n return self.word2idx.get(SOS, 0)", "def cmid(self):\n return self[\"cmid\"]", "def get_train_segmentation_path(self, subject_name):\n \"\"\"\n Arguments:\n subject_name: name of the subject\n Returns:\n full train segmentation path\n \"\"\"\n if (int(subject_name) < 28):\n db_batch = \"/Training Batch 1\"\n else:\n db_batch = \"/Training Batch 2\"\n\n segmentation_path = self.db_path + db_batch + \"/segmentation-\" +\\\n subject_name + \".nii\"\n\n return segmentation_path", "def cisid(self):\n return self._cisid", "def getElementName(self):\n return _libsbml.LineSegment_getElementName(self)", "def getnseg(*args):\n return _ida_segment.getnseg(*args)", "def get_segments(cst):\n assert isinstance(cst, ChromStruct)\n\n # create a set of coordinates for the start and end of segments\n segs = np.load(cst.sg_files)['sg']\n end = np.cumsum(segs)\n start = np.concatenate(([0], end[:-1]))\n\n return np.column_stack((start, end)).astype(int)", "def prediction_guid_id(self) -> str:\n return pulumi.get(self, \"prediction_guid_id\")", "def get_label_id(self) -> int:\n pass", "def cluster_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"cluster_id\")", "def cluster_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"cluster_id\")", "def cluster_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"cluster_id\")", "def cluster_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"cluster_id\")", "def first_segment(self):\n\t\tseg_sort = sorted(self.segments, key=lambda x: stringutil.extract_numbers(x.filename))\n\t\tif seg_sort:\n\t\t\treturn seg_sort[0]\n\t\telse:\n\t\t\treturn None", "def get_segment_by_name(self, name):\n for seg in self.segments:\n if seg.segname == name:\n return seg\n\n return None", "def get_identifier(self):", "def cluster_id(self) -> str:\n return pulumi.get(self, \"cluster_id\")", "def cluster_id(self) -> str:\n return pulumi.get(self, \"cluster_id\")", "def cluster_id(self) -> str:\n return pulumi.get(self, \"cluster_id\")", "def cluster_id(self) -> str:\n return pulumi.get(self, \"cluster_id\")", "def cluster_id(self) -> str:\n return pulumi.get(self, \"cluster_id\")", "def instrID(self):\n return self.query('*IDN?')", "def get_kid(self):\n\n return 'f825ccd5-9b4a-476f-ae12-c1c1ea99e6b2'", "def get_province_number(corr_pixel):\n\tcorr_pixel = str(corr_pixel).strip(\"()\").replace(\", \", \";\") #Reformats the pixel to ensure it can be compared.\n\twith open(os.getcwd()+\"\\\\shatterednippon\\\\map\\\\definition.csv\", \"r\") as definitions:\n\t\tprov_num = 1\n\t\tfor line in definitions:\n\t\t\tif corr_pixel in line:\n\t\t\t\treturn prov_num\n\t\t\tprov_num += 1\n\treturn None", "def get_identifier_string(self):\n return self.identifier", "def get_ident():\n return -1", "def get_student_id(self):\n return self.__student_id", "def _get_det_id(self, source):\n match = re.match(r\"Camp\\.0:pnCCD\\.(\\d)\", source)\n number = str.zfill(match.groups()[0], 4)\n return \"pnccd_\" + number", "def annulus_ident(self) -> int:\n return self._ann_ident", "def get_customer_id_by_sale_id(sale_id):\n\n # your code", "def getseg(*args):\n return _ida_segment.getseg(*args)", "def get_segmentations(self, aids):\n sseg_list = []\n for aid in aids:\n ann = self.dset.anns[aid]\n coco_sseg = ann.get('segmentation', None)\n if coco_sseg is None:\n sseg = None\n else:\n sseg = kwimage.MultiPolygon.coerce(coco_sseg)\n sseg_list.append(sseg)\n return sseg_list", "def customer_profile_oid(self):\n return self._customer_profile_oid", "def get_student_id(self):\n return self._student_id", "def getTypeCode(self):\n return _libsbml.LineSegment_getTypeCode(self)", "def segment(data):", "def getSN(self):\n return self.spavg.getSN()", "def key(self):\n return self.sentence_idx * (10 ** 6) + self.get_id()", "def cen_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"cen_id\")", "def identity(self) -> pulumi.Output['outputs.ClusterIdentity']:\n return pulumi.get(self, \"identity\")", "def virtual_network_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"virtual_network_id\")", "def get_id(self):\n if self.mlat:\n return f'm{-self.mlat}_{self.mlng}'\n else:\n return f'{-self.clat}_{self.clng}'", "def sdcToClassifier(self, sdc):\n vIndexes, vTokens = chunker.tokenize(sdc.verb.text)\n srIndexes, srTokens = chunker.tokenize(sdc.spatialRelation.text)\n vTokens = [x.lower() for x in vTokens]\n srTokens = [x.lower() for x in srTokens]\n \n\n if \"pass\" in vTokens and \"past\" in self.tokenToEngine:\n return self.tokenToEngine[\"past\"]\n else:\n for sp, engine in self.tokenToEngine.iteritems():\n if sp in srTokens:\n if sp == \"to\" and sdc.spatialRelation.text.lower() != \"to\" and len(sdc.spatialRelation.text.split()) > 2:\n # if they say \"with your back to\" or \"to the left of\", don't use \"to.\"\n continue\n else:\n return self.tokenToEngine[sp]\n\n\n if \"stop\" in vTokens:\n if not sdc.landmark.isNull() and \"until\" in self.tokenToEngine:\n return self.tokenToEngine[\"until\"]\n\n# if \"exit\" in vTokens or \"leave\" in vTokens:\n# if not sdc.landmark.isNull():\n# return self.tokenToEngine[\"out\"]\n\n return None", "def gee_ic_id(self):\n toa = 'COPERNICUS/S2'\n sr = 'COPERNICUS/S2_SR'\n return toa if self._isTOA() else sr", "def access_point_id(self) -> str:\n return pulumi.get(self, \"access_point_id\")", "def get_num_of_sales_per_customer_ids():\n\n # your code", "def cen_id(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"cen_id\")", "def get_segments(file_name):\n count = 1\n total_num_lines = num_lines_in_file(file_name)\n with open(file_name, 'r') as file_in:\n pre_segment = file_in.readline().split()[0]\n segments = [pre_segment]\n num_lines = []\n for line in file_in:\n line = line.split()\n if line[0].startswith(';;'):\n count += 1\n else:\n if len(line) >= LINE_LEN:\n if line[0] == pre_segment:\n count += 1\n else:\n segments.append(line[0])\n pre_segment = line[0]\n num_lines.append(count)\n count = 1\n else:\n count += 1\n last_num_lines_entry = total_num_lines - sum(num_lines)\n num_lines.append(last_num_lines_entry)\n assert len(segments) == len(num_lines), \"%i != %i\" %(len(segments), len(num_lines))\n return segments, num_lines" ]
[ "0.768172", "0.6201237", "0.61484486", "0.5937784", "0.5902539", "0.5902539", "0.58263624", "0.57347125", "0.5691365", "0.56732774", "0.5623136", "0.55613863", "0.5555856", "0.5535719", "0.5528222", "0.55247766", "0.5446849", "0.54316", "0.5398863", "0.5389348", "0.52877736", "0.52812827", "0.5259461", "0.5229752", "0.5216766", "0.52108204", "0.52103513", "0.5195189", "0.51856345", "0.51749325", "0.5160321", "0.5159441", "0.51439524", "0.5136944", "0.513506", "0.5129404", "0.51230806", "0.51185644", "0.5096263", "0.5073758", "0.5067447", "0.5038181", "0.5038181", "0.5020505", "0.49991977", "0.49954754", "0.49805498", "0.49640843", "0.49587467", "0.49479672", "0.49443632", "0.494", "0.49350664", "0.49268076", "0.49234635", "0.49115327", "0.4901622", "0.48995462", "0.4893655", "0.48888096", "0.4885929", "0.48819983", "0.48819983", "0.48819983", "0.48819983", "0.4880073", "0.4873811", "0.48678014", "0.48594123", "0.48594123", "0.48594123", "0.48594123", "0.48594123", "0.4858305", "0.48557544", "0.48547536", "0.48406398", "0.4838184", "0.4832123", "0.4829912", "0.48253366", "0.4821036", "0.48118046", "0.48114026", "0.4810538", "0.48097286", "0.48061562", "0.48039222", "0.4803906", "0.48004574", "0.4799878", "0.47950396", "0.4782349", "0.47814175", "0.47810593", "0.478074", "0.4775934", "0.4773991", "0.47736108", "0.4773262" ]
0.63881826
1
Returns list of stock codes from list of items descriptions.
def getStockCodeList(self, list_description=None): list_stockCode = list() df = self._df_invoice_original if list_description is None: list_stockCode = list(df.StockCode.unique()) else : for description in list_description: stockCode = df[df.Description==description].StockCode.unique()[0] list_stockCode.append(stockCode) return list_stockCode
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getDescriptionList(self, list_stockCode=None):\n df = self._df_invoice_original\n\n list_description = list()\n if list_stockCode is None :\n list_description = list(df.Description.unique())\n else:\n for stockCode in list_stockCode:\n description = df[df.StockCode==stockCode].Description.unique()[0]\n list_description.append(description)\n \n return list_description", "def codes(self):\n return [card.code for card in self.cards]", "def find_item_codes(transaction):\n t = transaction\n item_codes = []\n if t['transaction_type'] in ('SALE', 'REFUND'):\n # Search using line item IDs and order_id\n for oli in (t['order_line_items'] or []):\n li_id = oli['line_item_id']\n item_codes.append(\n get_item_code_for_order(t['order_id'], order_line_item_id=li_id)\n )\n else:\n # Search for ITEM reference\n for ref in (transaction['references'] or []):\n if ref['reference_type'] == 'ITEM_ID':\n item_codes.append(\n get_item_code_for_item_id(ref['reference_id'])\n )\n\n return item_codes", "def parse_quantities(quantities):\n codes = []; names = []\n\n for q in quantities:\n c, n = parse_quantity(q)\n codes.append(c)\n names.append(n)\n\n return codes, names", "def discount_codes(self):\n return [DiscountCode(x) for x in self._dict.get('discount_codes', [])]", "def codelists():\n return CodelistSet()", "def get_order_lists(self, n_items, n_quantities):\n arr_stock_code = self._df_invoice_original.StockCode.unique()\n arr_stock_code = np.random.choice(arr_stock_code, n_items)\n list_stockCode = list(arr_stock_code)\n list_quantities = np.ones(arr_stock_code.shape[0])\n list_quantities *=n_quantities\n\n return list_stockCode, list_quantities", "def license_codes(self) -> Sequence[str]:\n return pulumi.get(self, \"license_codes\")", "def get_pcode_list(self) -> List[str]:\n return self.pcodes", "def all_currency_codes():\n return [(a, CURRENCIES[a].name) for a in CURRENCIES]", "def currency_codes():\n return list(settings.CURRENCIES)", "def extract_promocodes(self):\n promocode_description = self.text\n\n sentences: list = self._split_by_sentences(promocode_description)\n\n sentence_with_promocode = promocode_description # no needed\n\n promocodes = ()\n\n for sentence in sentences:\n if any(keyword in sentence.lower()\n for keyword in (\"промокод\", \"купон\", \"промо-код\", )):\n\n sentence_with_promocode = sentence\n\n promocodes: list = \\\n self.get_promocodes(sentence_with_promocode,\n parser_constants.instagram_patterns)\n if promocodes:\n break\n # TODO:\n # make probabilities and do not break\n # continue iter by senteces and search4 promo in every\n # after that (we know that here is 1 promo)\n # we can choose the most suitable coupon\n\n for p in promocodes:\n if p and len(p) >= 3:\n promocode = p\n if self.is_valid_promocode_morph_check(promocode):\n break\n else:\n return []\n\n if any(forbidden_promocode in promocode.lower()\n for forbidden_promocode in\n parser_constants.forbidden_promocodes):\n\n return []\n\n expiration_date = self.parse_date(promocode_description)\n\n for key in parser_constants.replacement_table.keys():\n promocode_description = \\\n promocode_description.replace(\n key, parser_constants.replacement_table[key]\n )\n\n return [data_classes.Promocode(\n coupon=promocode,\n promoCodeDescription=promocode_description,\n estimated_date=expiration_date,\n source=self.source\n )]", "def create_not_included_list(codes):\n string = '\\\\begin{itemize}\\n'\n for code in codes:\n title = get_course_title_only(code)\n string += '\\\\item{' + title + '}\\n'\n string += '\\\\end{itemize}\\n'\n return string", "def get_code_mapping( id ):\n returnVal = []\n theCodes = _theRegistry.get_code( id )\n codes = theCodes.get_codes()\n descs = theCodes.get_descriptions()\n for (code, desc) in map(None, codes, descs):\n returnVal.append( { 'code' : code, 'description' : desc } )\n return returnVal", "def list():\n\n return cache.codeTableList()", "def to_iob(text: str, items: List[Instance]) -> List[str]:\n coding = [\"O\"] * len(text)\n for (s, e), label in items:\n b = f\"B-{label}\"\n i = f\"I-{label}\"\n coding[s] = b\n for x in range(s + 1, e):\n coding[x] = i\n\n return coding", "def list_of_langs(data):\n lang_codes = []\n for lang_data in data:\n lang_codes.append(lang_data.get('value'))\n return lang_codes", "def get_item_code_for_order(order_id, order_line_item_id=None, item_id=None):\n\n params = {\n 'order_id': order_id,\n 'order_line_item_id': order_line_item_id,\n 'item_id': item_id\n }\n\n if order_line_item_id and item_id:\n raise ValueError('Supply either order_line_item_id or item_id!')\n\n if order_line_item_id:\n filter_line = \"\"\"sii.ebay_order_line_item_id = %(order_line_item_id)s\"\"\"\n else:\n filter_line = \"\"\"sii.ebay_item_id = %(item_id)s\"\"\"\n\n # Try loading from SINVs first\n records = frappe.db.sql(f\"\"\"\n SELECT sii.item_code\n FROM `tabSales Invoice Item` AS sii\n LEFT JOIN `tabSales Invoice` AS si\n ON sii.parent = si.name\n WHERE si.ebay_order_id = %(order_id)s\n AND {filter_line};\n \"\"\", params, as_dict=True)\n item_code = {x.item_code for x in records}\n if len(item_code) > 1:\n raise ValueError(\n f'Multiple results for order {order_id} line '\n + f'item {order_line_item_id or item_id}!'\n )\n if item_code:\n # We have a single result; return it\n item_code, = item_code\n return item_code\n # We will have to look up the order\n try:\n order = get_order(order_id)\n except eBayRestError as e:\n raise ErpnextEbaySyncError(\n f'Unable to load order to get item code!\\n{e}')\n for li in order['line_items']:\n if order_line_item_id:\n # Check order line ID\n if li['line_item_id'] == order_line_item_id:\n return li['sku']\n else:\n # Check legacy item ID\n if li['legacy_item_id'] == item_id:\n return li['sku']\n # We could not locate correct line item\n if order_line_item_id:\n msg = f'line item {order_line_item_id}'\n else:\n msg = f'item ID {item_id}'\n raise ErpnextEbaySyncError(f'Order {order_id} did not contain {msg}?')", "def currency_code_mappings():\n return [(a, CURRENCIES[a].name) for a in settings.CURRENCIES]", "def get_lock_codes(device: Device) -> Sequence[str]:\n try:\n codes_str = cast(str, device.attributes[ATTR_LOCK_CODES].value)\n codes = loads(codes_str)\n return [codes[id][\"name\"] for id in codes]\n except Exception as e:\n _LOGGER.warn(\"Error getting lock codes for %s: %s\", device, e)\n return []", "def getUnitPriceList(self, list_stockCode):\n df = self._df_invoice_original\n\n list_unitPrice = list()\n \n for stockCode in list_stockCode:\n unitPrice = df[df.StockCode==stockCode].UnitPrice.unique()[0]\n list_unitPrice.append(unitPrice)\n return list_unitPrice", "def lcode(self):\n###############################################################################\n lcode = []\n for M in list(self.estimates.values()):\n if (M.code not in lcode):lcode.append(M.code)\n return(lcode)", "def get_codes(cls, query: Optional[str] = None):\n return search(\n SummaryItemCounts.get_data_frame(cls.table_name()), query=query\n )", "def populate_code_list():\n\tletter_code_ST = \"JZIHGFEDCBA\"\n\tletter_code_FG = \"XWUTRQPNMLK\"\n\tfor pos in range(\n\t len(letter_code_ST)): #Interestingly, the values start from 0\n\t\tcode_ST.append(pos) # Number first\n\t\tcode_ST.append(letter_code_ST[pos])\n\tfor pos in range(len(letter_code_FG)):\n\t\tcode_FG.append(pos)\n\t\tcode_FG.append(letter_code_FG[pos])", "def itemnames():\n g = ['KIS_NA_39', 'VII_57', 'MX_48', 'MX_56', 'KIS_NA_42', 'VII_54',\n 'MX_S_48', 'MX_S_52', 'MX_52', 'KIS_NA_45', 'KIS_NA_51', 'MIP_45',\n 'MIP_49', 'MIP_52', 'MIP_plus_48', 'MIP_plus_51', 'MX_42', 'MX_45',\n 'MIP_G_42', 'KIS_42', 'KIS_NA_48']\n return(g)", "def _get_stock_item_ids(cls, *skus):\n return linnapi.inventory.get_stock_item_ids_by_sku(*skus)", "def hs_code_process(si):\n hs_code = re.sub(r'\\W+', '', si.get('hs_code', ''))\n descrip = re.sub(r'\\W+', '', si.get('description_of_goods', ''))\n bl_type = re.sub(r'\\W+', '', si.get('bl_type', ''))\n hs_codes = []\n if hs_code != '' and hs_code in descrip:\n hs_raw = si.pop('hs_code')\n for raw_line in hs_raw.split('\\n'):\n line = re.sub(r'\\W+', '', raw_line).upper()\n if 'HSCODE' in line:\n remain = line.replace('HSCODE', '').replace('\\n', '')\n remain = re.sub(r'[A-Z]+', '', remain)\n if remain.isdigit() and len(remain) > 4:\n hs_codes.append(remain)\n else:\n # CODE in line below\n hs_line_no = hs_raw.split('\\n').index(raw_line)\n for hs_line in hs_raw.split('\\n')[hs_line_no + 1:]:\n if len(re.findall(r'[a-zA-Z]+', hs_line)) < 1:\n for hs_code in re.findall(r'\\d+', hs_line):\n hs_codes.append(hs_code)\n else:\n break\n\n bl_type = si.get('bl_type', '')\n\n elif hs_code != '' and hs_code in bl_type:\n hs_raw = si.pop('hs_code')\n for raw_info in hs_raw.split('/'):\n info = re.sub(r'\\W+', '', raw_info).upper()\n if 'HSCODE' in info:\n hs_code = info.replace('HSCODE', '').replace('\\n', '')\n hs_code = re.sub(r'[A-Z]+', '', hs_code)\n hs_codes.append(hs_code)\n break\n bl_type = hs_raw.split('/')[0]\n\n else:\n hs_code = re.sub(r'[^\\d]+', '', hs_code)\n hs_codes.append(hs_code)\n bl_type = si.get('bl_type', '')\n\n return hs_codes, bl_type", "def product_db() -> List[Text]:\n\n return [\n \"credit\",\n \"forex\",\n \"debit\",\n \"atm\"\n ]", "def get_products(self):\n return [item.code for item in self._products]", "def get_registry_codes( ):\n return _theRegistry.get_codes( )", "def get_stocks():\n print(\"fetching remote...\")\n code_dataframes = pd.read_html(\n 'http://kind.krx.co.kr/corpgeneral/corpList.do?method=download&searchType=13', header=0)[0]\n # 우리가 필요한 것은 회사명과 종목코드이기 때문에 필요없는 column들은 제외해준다.\n print(\"parsing and filtering data...\")\n code_dataframes.종목코드 = code_dataframes.종목코드.map('{:06d}'.format)\n # 한글로된 컬럼명을 영어로 바꿔준다.\n code_dataframes = code_dataframes[['회사명', '종목코드']]\n code_dataframes = code_dataframes.rename(\n columns={'회사명': 'name', '종목코드': 'code'})\n codes = code_dataframes['code']\n names = code_dataframes['name']\n stocks = []\n for i in range(len(names)):\n stocks.append({\n 'name': names[i],\n 'code': codes[i]\n })\n return stocks", "def create_identifiers_lists(identifiers):\n issn_list = []\n isbn_list = []\n\n for ident in identifiers:\n if ident[\"scheme\"] == \"ISSN\":\n issn_list.append(ident[\"value\"])\n\n if ident[\"scheme\"] == \"ISBN\":\n isbn_list.append(ident[\"value\"])\n\n return issn_list, isbn_list", "def _expand_expected_codes(codes):\n\n retval = set()\n for code in codes.replace(',', ' ').split(' '):\n code = code.strip()\n\n if not code:\n continue\n elif '-' in code:\n low, hi = code.split('-')[:2]\n retval.update(\n str(i) for i in six.moves.range(int(low), int(hi) + 1))\n else:\n retval.add(code)\n return retval", "def getOldCodeList(self):\n if self.modification == 'none':\n old_code = [(x, 'white') for x in self.body.splitlines()]\n elif self.modification == 'change':\n old_code = [self._getOldCodeList(x) for x in self.body.splitlines() \\\n if self._getOldCodeList(x)[0]]\n # we want old_code_list and new_code_list to have the same length\n if(self.old_code_length < self.new_code_length):\n filling = [(None, self.color)] * (self.new_code_length - \\\n self.old_code_length)\n old_code.extend(filling)\n else: # deletion or addition\n old_code = [self._getOldCodeList(x) for x in self.body.splitlines()]\n return old_code", "def get_country_codes(prices):\n # your code here\n \n#_________________# 1. Break the string into a list. \n prices = prices.split('$') # breaks the list into a list of elements.\n \n#_________________# 2. Manipulate the individual elements.\n\n #_________________# A. Remove integers\n# nation = prices[0], prices[1]\n length = len(prices)\n\n for nation in (prices):\n nation == prices[0:]\n print(nation)\n #_________________# B.\n \n nations = []\n for each_char in (0, prices, 2):\n if each_char in prices[0:2]:\n nation = each_char\n nations = list(nations)\n # lastitem = nations.pop()\n print(nations)", "def get_items(self):\n\n items = []\n\n params = self.request.query_params\n\n if 'items[]' in params:\n items = params.getlist('items[]', [])\n elif 'item' in params:\n items = [params.get('item', None)]\n\n if type(items) not in [list, tuple]:\n items = [items]\n\n valid_ids = []\n\n for item in items:\n try:\n valid_ids.append(int(item))\n except (ValueError):\n pass\n\n # List of StockItems which match provided values\n valid_items = StockItem.objects.filter(pk__in=valid_ids)\n\n return valid_items", "def get_location_codes(scanner, input):\n matches = scanner.search_places(input)\n codes = []\n for i in matches[\"Places\"]:\n codes.append(i[\"PlaceId\"])\n return codes", "def encode(self, preprocessed: List[str]) -> List[int]:\n return [\n self.vocab.get(statement, self.unknown_vocab_element)\n for statement in preprocessed\n ]", "def items(self) -> typing.ItemsView[str, Category]:\n return self._primary_code_map.items()", "def opcode_list(self, script):\n opcodes = []\n new_pc = 0\n try:\n for opcode, data, pc, new_pc in self.get_opcodes(script):\n opcodes.append(self.disassemble_for_opcode_data(opcode, data))\n except ScriptError:\n opcodes.append(binascii.hexlify(script[new_pc:]).decode(\"utf8\"))\n\n return opcodes", "def convert_str_encoded_cards_to_int_encoded(cards: List[str]) -> List[int]:\n return [card_ids[card] for card in cards]", "def find_symbols(lst):\n ret = []\n for ii in lst:\n ret += [find_symbol(ii)]\n return ret", "def list(self, body, ordered=True):\n return [[MdStyleInstructionListStart(ordered)]] + body + [[MdStyleInstructionListEnd()]]", "def get_city_codes(cls):\n return [(city.code, city.name) for city in City.query.all()]", "def getAllDescWords(itemList):\r\n itemList = list(set(itemList)) # make itemList unique\r\n descWords = []\r\n for item in itemList:\r\n descWords.extend(worldItems[item][DESCWORDS])\r\n return list(set(descWords))", "def read_stock_codes_from_db():\n\n print('connecting to database...')\n Stocks = get_db()['Stocks']\n print('reading...')\n\n stocks = Stocks.find()\n return stocks", "def _get_v2_symbols(self, assets):\n\n v2_symbols = []\n for asset in assets:\n v2_symbols.append(self._get_v2_symbol(asset))\n\n return v2_symbols", "def get_opcodes(old: str, new: str) -> List[Opcode]:\n diff = difflib.SequenceMatcher(a=old, b=new)\n return [Opcode(*opcode) for opcode in diff.get_opcodes()]", "def description_to_colors(description):\n\tcolors = set()\n\tfor i, description_string in enumerate(description):\n\t\tfor token in description_string.split(\" \"):\n\t\t\t# Remove punctuation.\n\t\t\ttoken = token.translate(str.maketrans(\"\", \"\", string.punctuation))\n\t\t\t# Only multi-token color descriptor (I think).\n\t\t\tif token.lower().startswith(\"bouton\"):\n\t\t\t\tcolors.add(\"bouton d'or\")\n\t\t\t\tcontinue\n\t\t\tfor color in hm_utils.COLOR_DICT:\n\t\t\t\tif unidecode.unidecode(token.capitalize()) == unidecode.unidecode(color):\n\t\t\t\t\tcolors.add(token.lower())\n\treturn list(colors)", "def serve_recos(ids, ref_catalog):\r\n desc_list = []\r\n for desc_id in ids:\r\n desc_list.append(ref_catalog[ref_catalog['id'] == desc_id].iloc[0]['description'])\r\n return desc_list", "def inventory(self) -> [str]:\r\n inventory_to_use = []\r\n items = [\"Apple\", \"Sword\", \"Shield\", \"Dagger\"]\r\n\r\n for item_in_items in range(2):\r\n if item_in_items <= 2:\r\n index = randint(0, len(items)) - 1\r\n inventory_to_use.append(items[index])\r\n del items[index]\r\n return inventory_to_use", "def getNewCodeList(self):\n if self.modification == 'none':\n new_code = [(x, 'white') for x in self.body.splitlines()]\n elif self.modification == 'change':\n new_code = [self._getNewCodeList(x) for x in self.body.splitlines() \\\n if self._getNewCodeList(x)[0]]\n # we want old_code_list and new_code_list to have the same length\n if(self.new_code_length < self.old_code_length):\n filling = [(None, self.color)] * (self.old_code_length - \\\n self.new_code_length)\n new_code.extend(filling)\n else: # deletion or addition\n new_code = [self._getNewCodeList(x) for x in self.body.splitlines()]\n return new_code", "def get_items_from_iob2(annotations, tags):\n # prepare dictionary to store items by tags\n items = {}\n for tag in tags.tags:\n items[tag] = {}\n\n # build items for coders ()\n for i in range(1, len(annotations)):\n # coder name\n coder = 'coder_' + str(i)\n\n for tag in tags.tags:\n items[tag][coder] = []\n\n # initialize for coder\n start = None\n prev_tag = None\n n = 0\n\n # iterate by document iob marks by coder to collect items\n for n, elem in enumerate(annotations[i]):\n tag_prefix, tag = split_tag(elem, tags)\n\n if tag_prefix == tags.iob.begin:\n # close opened items\n add_item(items, coder, prev_tag, start, n)\n # open new item\n start = n\n\n elif not tag_prefix:\n # close opened items\n add_item(items, coder, prev_tag, start, n)\n # no open items\n start = None\n\n prev_tag = tag\n\n # close opened items\n add_item(items, coder, prev_tag, start, n)\n\n # add zero item to the full length of the text for all tags\n start = n + 1\n for tag in tags.tags:\n add_zero_item(items, coder, tag, start)\n\n return items", "def codebook_json_data_factory() -> List[Dict[str, Any]]:\n codebook_data = [\n {\n Features.CODEWORD: [\n {Indices.ROUND.value: 0, Indices.CH.value: 0, Features.CODE_VALUE: 1},\n {Indices.ROUND.value: 1, Indices.CH.value: 1, Features.CODE_VALUE: 1}\n ],\n Features.TARGET: \"GENE_A\"\n },\n {\n Features.CODEWORD: [\n {Indices.ROUND.value: 0, Indices.CH.value: 2, Features.CODE_VALUE: 1},\n {Indices.ROUND.value: 1, Indices.CH.value: 1, Features.CODE_VALUE: 1}\n ],\n Features.TARGET: \"GENE_B\"\n },\n ]\n return codebook_data", "def item_code(self):\n return self._item_code", "def tokenLookup(instrument_df,symbol_list):\r\n token_list = []\r\n for symbol in symbol_list:\r\n token_list.append(int(instrument_df[instrument_df.tradingsymbol==symbol].instrument_token.values[0]))\r\n return token_list", "def tokenLookup(instrument_df,symbol_list):\r\n token_list = []\r\n for symbol in symbol_list:\r\n token_list.append(int(instrument_df[instrument_df.tradingsymbol==symbol].instrument_token.values[0]))\r\n return token_list", "def traffic_statuscodes_cachecodes(self, **kwargs):\n url_path = 'traffic/statuscodes/cachecodes'\n self.logger.debug(f\"Get list of cache codes\")\n body = self._make_body(kwargs)\n return self._common_get(request_path=url_path, parameters=body)", "def get_list_html(self, items):\n html = \"\"\"\n <html>\n\t\t\t<head>\n\t\t\t\t<title>OpenFDA Cool App</title>\n\t\t\t</head>\n\t\t\t<body>\n <ol>\n \"\"\"\n\n for item in items:\n html += \"<li>\" + item + \"</li>\\n\"\n\n html += \"\"\"\n </ol>\n\t\t\t</body>\n </html>\n \"\"\"\n\n return html", "def get_items_to_find(self):\n self.items_to_find = ['sole', 'farina', 'innaffiatoio']", "def lookup (barcode, ID_TYPES=['ISBN', 'UPC','EAN']):\n\n matches = [] # list of {'desc', 'sku', 'type', 'vnd'}\n\n for idtype in ID_TYPES:\n try:\n result = api.item_lookup(barcode, SearchIndex='All', IdType=idtype)\n for item in result.Items.Item:\n if not _is_duplicate(item.ASIN, matches):\n matches.append({'desc': unicode(item.ItemAttributes.Title),\n 'sku': unicode(item.ASIN),\n 'type': idtype,\n 'vnd': 'AMZN:'+AMZLOCALE}) # vendor id\n\n except (errors.InvalidAccount, errors.InvalidClientTokenId, errors.MissingClientTokenId):\n print >>sys.stderr, \"Amazon Product API lookup: bad account credentials\"\n\n except errors.TooManyRequests, toomanyerr:\n print >>sys.stderr, \"Amazon Product API lookup error:\", toomanyerr\n\n except errors.InternalError, awserr:\n print >>sys.stderr, \"Amazon Product API lookup error:\", awserr\n\n except errors.InvalidParameterValue:\n # this simply means the barcode\n # does not exist for the given type,\n # so no need to do anything explicit\n pass\n\n return matches", "def decode_list(self, tokens: list) -> str:\r\n return NotImplementedError", "def codes(self, name):\n return self._get_valuemap(name, non_mapped='codes')", "def aggregated_item_codes( dfs ):\n for (series, regexes) in [\n (\"inversion\" , ac.regexes_for_2_codes() )\n , (\"funcionamiento\" , ac.regexes_for_2_codes() )\n , (\"ingresos\" , ac.regexes_for_ingresos() ) ]:\n df = dfs[series]\n\n # build some columns\n (category, top, child) = regexes\n df[\"item categ\"] = (\n df[\"item code\"]\n . str.extract( category ) )\n df[\"item top\"] = ~ pd.isnull(\n df[\"item code\"]\n . str.extract( top ) )\n df[\"item child\"] = ~ pd.isnull(\n df[\"item code\"]\n . str.extract( child ) )\n\n df = ( # keep only rows labeled with top categories\n # or the first generation below the top categories\n df[ (df[\"item top\"])\n | (df[\"item child\"]) ] )\n\n # Verify that codigo-top is the boolean negative of codigo-child.\n # (That's not true before we drop rows categorized deeper than top or child.)\n assert ( len ( df[ ( (df[\"item top\"].astype(int)) +\n (df[\"item child\"]).astype(int) )\n != 1 ] )\n == 0 )\n df = df.drop( columns = [\"item child\"] )\n\n dfs[series] = df\n return dfs", "def extract_symbols(f: TextIO) -> List[str]:\n\n df = pd.read_csv(f, delimiter=\"|\")\n return df[\"Symbol\"].tolist(), df[\"NASDAQ Symbol\"].tolist()", "def to_strs(items) -> List[str]:\n result = []\n for item in items:\n if isinstance(item, str):\n if len(item) > 0:\n if not ((item[0] == '\"' and item[-1] == '\"') or (item[0] == \"'\" and item[-1] == \"'\")):\n result.append('\"' + item + '\"')\n else:\n result.append(item)\n else:\n result.append(item)\n else:\n result.append(str(item))\n return result", "def get_items_for_catalog(catalog_id):\n pass", "def tokenLookup(instrument_df, symbol_list):\n token_list = []\n for symbol in symbol_list:\n token_list.append(int(instrument_df[instrument_df.tradingsymbol == symbol].instrument_token.values[0]))\n return token_list", "def get_library_code(library_ids, api):\n code = []\n for library_id in library_ids:\n source_code = api.get_library(library_id)[\"object\"][\"source_code\"]\n code.append(source_code)", "def tokenLookup(instrument_df,symbol_list):\n token_list = []\n for symbol in symbol_list:\n token_list.append(int(instrument_df[instrument_df.tradingsymbol==symbol].instrument_token.values[0]))\n return token_list", "def decode_sequence(sequence, encoding_to_item):\n return_sequence = []\n\n for i, itemset in enumerate(sequence):\n decoded_itemset = set()\n for item in itemset:\n decoded_itemset.add(encoding_to_item[item])\n return_sequence.append(decoded_itemset)\n return return_sequence", "def ISBNs(self, default=[{}]):\n tmp = self.data.get('metadata', {}).get('isbns', default)\n return [HEP.ISBNObject(i) for i in tmp]", "def decode(self, encoded):\n decoded = []\n for codes in encoded:\n tmp = []\n for code in codes:\n try:\n word = self.vocab[code]\n tmp.append(word)\n except:\n tmp.append(self.unk_token)\n decoded.append(tmp)\n return decoded", "def __init__(self):\n # note: We could have implemented the list as a dictionary, with\n # the barcode as the key, however if the barcode for the item\n # changes we might have problems.\n self.stocklist = [] # a list of stock items", "def get_codecs_list():\n for codec in CODECS_IN_FILE.iterkeys():\n print codec", "def get_skill_list(self):\n return [\n i.strip() for i in\n self.ansi_escape.sub('', check_output([BIN, 'list'])).split('\\n')\n ]", "def _get_symbols(exchange_code: str, token: str) -> List[mtypes.Symbol]:\n _LOG.info(\"Getting symbols list for exchange: '%s'\", exchange_code)\n response = get_client().service.SymbolList(\n Token=token, Exchange=exchange_code\n )\n\n if response.SYMBOLS is None:\n _LOG.error(\"No symbols found for exchange: '%s'\", exchange_code)\n return []\n\n symbols = [\n mtypes.Symbol.from_dict(d=obj)\n for obj in zeep.helpers.serialize_object(response.SYMBOLS[\"SYMBOL\"])\n ]\n\n _LOG.info(\"Got %s symbols for exchange '%s'\", len(symbols), exchange_code)\n return symbols", "def description(self):\n item_counts = [f'{i.quantity}x {i.item.name}' for i in self.items]\n return ','.join(item_counts)", "def getIngredients():\n ingredients = ['Whiskey', 'Tequila', 'Vodka', 'Blue Curacao', 'Orange Juice',\n 'Pineapple Juice', 'Cranberry Juice', 'Sour Mix']\n return ingredients", "def get_share_list():\n url = \"https://www1.nseindia.com/content/equities/EQUITY_L.csv\"\n resp = requests.get(url)\n resp = csv_to_list(resp)[1:-1]\n return create_stock(resp)", "def comunindades():\n data = fetch_url(SIGPAC_SERVER + COMUNINDADES)\n return parse_code_name(data)", "def getAllFirstDescWords(itemList):\r\n itemList = list(set(itemList)) # make itemList unique\r\n descWords = []\r\n for item in itemList:\r\n descWords.append(worldItems[item][DESCWORDS][0])\r\n return list(set(descWords))", "def parse_items(lines):\n # pozice bodu\n anchors = [idx for idx, line in enumerate(lines) if PROGRAMME_ITEM_RE.match(line)]\n\n # syrove bloky po jednotlivych bodech\n blocks = []\n for idx in range(len(anchors)-1):\n blocks.append(lines[anchors[idx]:anchors[idx+1]])\n blocks.append(lines[anchors[idx+1]:])\n\n # preciznejsi vyparsovani udaju z bloku\n out = []\n for block in blocks:\n data = [i.strip() for i in block if i.strip()]\n if not len(data):\n continue\n item = {'proposer':None, 'number':None, 'title':None}\n\n # predkladatel\n proposer_found = False\n m = PROGRAMME_PROPOSER_RE.match(data[-1])\n if m:\n item['proposer'] = m.group(1).strip()\n proposer_found = True\n\n # cislo bodu\n m = PROGRAMME_ITEM_RE.match(data[0])\n item['number'] = m.group(2).strip()\n\n # titulek bodu\n title = [m.group(3)]\n if proposer_found:\n title.extend(data[1:-1])\n else:\n title.extend(data[1:])\n item['title'] = u\" \".join([i.strip() for i in title])\n out.append(item)\n\n return out", "def get_kcca_devices_codes():\n headers = {'x-api-key': CLARITY_API_KEY, 'Accept-Encoding': 'gzip'}\n api_url = CLARITY_API_BASE_URL + \"devices\"\n results = requests.get(api_url, headers=headers)\n\n device_data = pd.DataFrame(results.json())\n\n device_codes = []\n\n for index, row in device_data.iterrows():\n device_codes.append(row['code'])\n\n return device_codes", "def get_kcca_devices_codes():\n headers = {'x-api-key': CLARITY_API_KEY, 'Accept-Encoding': 'gzip'}\n api_url = CLARITY_API_BASE_URL + \"devices\"\n results = requests.get(api_url, headers=headers)\n\n device_data = pd.DataFrame(results.json())\n\n device_codes = []\n\n for index, row in device_data.iterrows():\n device_codes.append(row['code'])\n\n return device_codes", "def getArtistsofArtwork(catalog, codes):\n return model.getArtistname(catalog,codes)", "def slist(body):\n return SList(body.split(\"\\n\"))", "def sanitize_sample_descriptions(sample_description_list, sanitize_fn=sanitize_text):\n filtered_sample_desc_list = []\n for text in sample_description_list:\n filtered_sample_desc_list.append(sanitize_fn(text))\n\n return filtered_sample_desc_list", "def convert_int_encoded_cards_to_str_encoded(cards: List[int]) -> List[str]:\n return [card_strings[i] for i in cards]", "def code_to_sequences( self, ucode ):\n\t\t\n\t\tassert isinstance( ucode, unicode ), 'ucode must be unicode string!' \n\t\t\n\t\tfor uchar in ucode:\n\t\t\tif not( uchar in self._char39 ):\n\t\t\t\traise Barcode39Error( '%s char is not listed in Barcode39 characters [0..9,A..Z,space,9,-,.,$,/,+,%]' )\n\n\t\tresult = []\n\t\tfor uchar in ucode:\n\t\t\tresult = result + self.char_to_seq(uchar) \n\t\t\t\n\t\treturn result", "def get_fire_centre_station_codes() -> List[int]:\n station_codes = []\n with get_read_session_scope() as session:\n station_query = get_all_stations(session)\n for station in station_query:\n if isinstance(station, dict):\n station_codes.append(int(station['station_code']))\n else:\n station_codes.append(int(station._mapping['station_code']))\n\n return station_codes", "def convert_items(items):\n for idx in range(len(items)):\n item_name, item_sell_in, item_quality = items[idx].name, items[idx].sell_in, items[idx].quality,\n comp_name = item_name.lower() # the name with which we compare by\n\n new_item = items[idx]\n if 'aged brie' in comp_name:\n new_item = AgedItem(item_name, item_sell_in, item_quality)\n elif 'sulfuras' in comp_name:\n new_item = LegendaryItem(item_name, item_sell_in, item_quality)\n elif 'conjured' in comp_name:\n new_item = ConjuredItem(item_name, item_sell_in, item_quality)\n elif 'backstage passes' in comp_name:\n new_item = BackstagePass(item_name, item_sell_in, item_quality)\n items[idx] = new_item\n\n return items", "def get_contracts_from_list(save_path, repo_name, file_list=None, session=None):\n github_raw_api = 'https://raw.githubusercontent.com/{}/master/'.format(repo_name)\n\n contract_list = {}\n if file_list and session:\n # Download each file from the list\n for file in file_list:\n file_data = session.get('{}{}'.format(github_raw_api, file.get('path')))\n with open(os.path.join(save_path, file.get('name')), 'wb') as f:\n f.write(file_data.content)\n\n contract_list[file.get('name')] = file_data.text\n return contract_list\n return None", "def __get_instrument_items(self, i):\n all_instruments = self.__read_instrumentslist()\n return [x for x in all_instruments[i * 8:i * 8 + 8]]", "def CARD_SUITS() -> tuple:\n return \"Diamonds\", \"Hearts\", \"Clubs\", \"Spades\"", "def values(self, items_list):\n return [self.resolve(value) for value in items_list]", "def _get_codes_for_sorting(self) -> list[Categorical]:\n\n def cats(level_codes):\n return np.arange(\n np.array(level_codes).max() + 1 if len(level_codes) else 0,\n dtype=level_codes.dtype,\n )\n\n return [\n Categorical.from_codes(level_codes, cats(level_codes), True, validate=False)\n for level_codes in self.codes\n ]", "def ConstructList(title, items):\n buf = cStringIO.StringIO()\n fmt = 'list[title=\"{title}\",always-display-title]'.format(title=title)\n resource_printer.Print(sorted(set(items)), fmt, out=buf)\n return buf.getvalue()", "def get_species_list(index: int, list_species: List[str]) -> List[str]:\n species = []\n for sp in list_species:\n species.extend([sp] * index)\n return species", "def demo_get_all_books(self):\n results = []\n self.cursor.execute(\"\"\"SELECT ISBN FROM book\"\"\")\n for book in self.cursor.fetchall():\n results.append(book[0])\n return results" ]
[ "0.63454723", "0.5953065", "0.58789575", "0.5782579", "0.5730202", "0.551616", "0.5417973", "0.5402704", "0.5288644", "0.5257909", "0.5255299", "0.5200416", "0.5199842", "0.51931006", "0.5064142", "0.50508547", "0.50235844", "0.49738976", "0.4950196", "0.49323055", "0.49277037", "0.49023208", "0.4864917", "0.4849586", "0.48492593", "0.4833745", "0.4795869", "0.47937497", "0.4766169", "0.4765236", "0.4757155", "0.47528774", "0.47082758", "0.47058168", "0.47049603", "0.47015262", "0.4699893", "0.4696882", "0.4694448", "0.46918333", "0.4675466", "0.46539095", "0.4644362", "0.46212062", "0.45906118", "0.4580103", "0.45662475", "0.45535", "0.45517343", "0.45486912", "0.4542399", "0.45195186", "0.4519003", "0.45032308", "0.4494987", "0.44908217", "0.44908217", "0.44818506", "0.44758835", "0.4475853", "0.44750628", "0.44723925", "0.44676685", "0.44658017", "0.44593865", "0.44486612", "0.44431984", "0.44414634", "0.44401637", "0.44376227", "0.44353828", "0.44343767", "0.443391", "0.44312048", "0.4429254", "0.44291544", "0.44276935", "0.44228107", "0.44211504", "0.44200832", "0.44195914", "0.44167787", "0.44041145", "0.44039348", "0.44039348", "0.43981767", "0.43969157", "0.439505", "0.4394659", "0.43905795", "0.4388841", "0.43795103", "0.43745175", "0.43736356", "0.43690947", "0.43634742", "0.43591705", "0.43545142", "0.43495792", "0.43460542" ]
0.7344879
0
Returns list of imtes unit price from list of stock codes.
def getUnitPriceList(self, list_stockCode): df = self._df_invoice_original list_unitPrice = list() for stockCode in list_stockCode: unitPrice = df[df.StockCode==stockCode].UnitPrice.unique()[0] list_unitPrice.append(unitPrice) return list_unitPrice
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getStockCodeList(self, list_description=None):\n list_stockCode = list()\n df = self._df_invoice_original\n \n if list_description is None:\n list_stockCode = list(df.StockCode.unique())\n else :\n for description in list_description:\n stockCode = df[df.Description==description].StockCode.unique()[0]\n list_stockCode.append(stockCode)\n return list_stockCode", "def getDescriptionList(self, list_stockCode=None):\n df = self._df_invoice_original\n\n list_description = list()\n if list_stockCode is None :\n list_description = list(df.Description.unique())\n else:\n for stockCode in list_stockCode:\n description = df[df.StockCode==stockCode].Description.unique()[0]\n list_description.append(description)\n \n return list_description", "def dishlist_prices(n: list) -> list:\r\n return [dish.price for dish in n]", "def get_units(self) -> List[str]:\n result = []\n for elements in self._get_results_list():\n result.append(elements[3])\n return result", "def add_gst (list_of_prices):\n\n add_gst=[]\n for item in list_of_prices:\n list_with_gst = round(item*1.15,2)\n add_gst+=[list_with_gst]\n return add_gst", "def get_lp(s):\n sl = [] \n for stock in s.symbols: \n #creates a list of latest stock prices\n quote = get(stock,\"LON\")\n #changes string to integer and removes ','\n x = (quote.replace(',',''))\n x = float(x)\n sl.append(x)\n return sl", "def get_prices(self):\n price = self.get_price()\n if price:\n return [price]\n return []", "def getCurrencies():", "def calculate_prices(self, good=None):\n\n stock = self.calculate_init_stock(good)\n buy = self.buying_price()\n\n if stock == 0:\n sell = 0\n buy = buy + (buy * 0.5)\n\n elif stock < 500:\n # mild bug: stock, without selling price\n sell = self.selling_price()\n elif stock >= 500:\n # higher production, lower prices\n sell = self.selling_price() / 2\n buy = buy - (buy * 0.5)\n\n return [buy, sell, stock]", "def get_order_lists(self, n_items, n_quantities):\n arr_stock_code = self._df_invoice_original.StockCode.unique()\n arr_stock_code = np.random.choice(arr_stock_code, n_items)\n list_stockCode = list(arr_stock_code)\n list_quantities = np.ones(arr_stock_code.shape[0])\n list_quantities *=n_quantities\n\n return list_stockCode, list_quantities", "def parse_quantities(quantities):\n codes = []; names = []\n\n for q in quantities:\n c, n = parse_quantity(q)\n codes.append(c)\n names.append(n)\n\n return codes, names", "def prices(parsed, appearance_value, removal_value, gate_single_value, gate_double_value):\n\t\tprices = []\n\n\t\tfor item in parsed:\n\t\t\tif item.entityType == \"fence\":\n\t\t\t\tprices.append(QuoteCalculation._fencePrice(item, appearance_value, removal_value))\n\n\t\t\telif item.entityType == \"gate\":\n\t\t\t\tprices.append(QuoteCalculation._gatePrice(item, gate_single_value, gate_double_value, removal_value))\n\n\t\t\t# Not required?\n\t\t\t#elif item.entityType == \"post\":\n\t\t\t\t#prices.append(QuoteCalculation._postPrice(item))\n\n\t\treturn prices", "def return_currency_pairs(self):\n return list(sorted(list(c for c in self.return_24_volume().keys()\n if not c.startswith('total'))))", "def get_coin_price_list(df: pd.DataFrame) -> list:\n return df['rates'].to_list()", "def uCSIsCurrencySymbols(code):\n ret = libxml2mod.xmlUCSIsCurrencySymbols(code)\n return ret", "def get_stock_price(df_excld):\n\n ts = TimeSeries(os.environ['ALPHA_VANTAGE_KEY'])\n\n info = []\n symbols = []\n counter = 0\n\n for t in df_excld['Ticker']:\n\n if counter % 5 == 0:\n time.sleep(65)\n\n i, m = ts.get_daily(symbol=t, outputsize='full')\n info.append(i)\n symbols.append(m['2. Symbol'])\n counter += 1\n\n return info, symbols", "def _get_prix_tarif(self,cout,pricelist):\n cr = self._cr\n product=cout.name\n prix_tarif=0\n date=time.strftime('%Y-%m-%d') # Date du jour\n if pricelist:\n #Convertion du lot_mini de US vers UA\n min_quantity = self.env['product.uom']._compute_qty(cout.name.uom_id.id, cout.name.lot_mini, cout.name.uom_po_id.id)\n #TODO : Pour contourner un bug d'arrondi (le 31/01/2017)\n min_quantity=min_quantity+0.00000000001\n #TODO en utilisant la fonction repr à la place de str, cela ne tronque pas les décimales\n SQL=\"\"\"\n select ppi.price_surcharge\n from product_pricelist_version ppv inner join product_pricelist_item ppi on ppv.id=ppi.price_version_id\n where ppv.pricelist_id=\"\"\"+str(pricelist.id)+ \"\"\" \n and min_quantity<=\"\"\"+repr(min_quantity)+\"\"\"\n and (ppv.date_start <= '\"\"\"+date+\"\"\"' or ppv.date_start is null)\n and (ppv.date_end >= '\"\"\"+date+\"\"\"' or ppv.date_end is null)\n\n and ppi.product_id=\"\"\"+str(product.id)+ \"\"\" \n and (ppi.date_start <= '\"\"\"+date+\"\"\"' or ppi.date_start is null)\n and (ppi.date_end >= '\"\"\"+date+\"\"\"' or ppi.date_end is null)\n order by ppi.sequence\n limit 1\n \"\"\"\n cr.execute(SQL)\n result = cr.fetchall()\n for row in result:\n coef=1\n if min_quantity:\n coef=cout.name.lot_mini/min_quantity\n prix_tarif=row[0]/coef\n\n\n\n return prix_tarif", "def currency_codes():\n return list(settings.CURRENCIES)", "async def _get_stock_data(self, stocks: list):\n\t\tapi_url = 'https://sandbox.tradier.com/v1/markets/quotes'\n\t\tstocks = ','.join(stocks)\n\t\tif not stocks:\n\t\t\treturn []\n\t\ttoken = await self.bot.get_shared_api_tokens('stocks')\n\t\ttoken = token.get('key', None)\n\t\tif not token:\n\t\t\traise ValueError(\n\t\t\t\t'You need to set an API key!\\n'\n\t\t\t\t'Follow this guide for instructions on how to get one:\\n'\n\t\t\t\t'<https://github.com/Flame442/FlameCogs/blob/master/stocks/setup.md>'\n\t\t\t)\n\t\tparams = {'symbols': stocks}\n\t\theaders = {'Authorization': f'Bearer {token}', 'Accept': 'application/json'}\n\t\tasync with aiohttp.ClientSession() as session:\n\t\t\tasync with session.get(api_url, params=params, headers=headers) as r:\n\t\t\t\ttry:\n\t\t\t\t\tr = await r.json()\n\t\t\t\texcept aiohttp.client_exceptions.ContentTypeError:\n\t\t\t\t\t#This might happen when being rate limited, but IDK for sure...\n\t\t\t\t\traise ValueError('Could not get stock data. The API key entered is most likely not valid.')\n\t\tr = r['quotes']\n\t\tif 'quote' not in r:\n\t\t\treturn []\n\t\tr = r['quote']\n\t\tif not isinstance(r, list):\n\t\t\tr = [r]\n\t\tstock = {\n\t\t\tx['symbol']: {\n\t\t\t\t'price': max(1, int(x['last'] * 100)),\n\t\t\t\t#New API does not give this info.\n\t\t\t\t'total_count': None, #int(x['marketCap'] / x['last']) if x['marketCap'] else None\n\t\t\t} for x in r if 'last' in x and x['last'] is not None\n\t\t}\n\t\treturn stock", "def bond_price(fv, c,n,m,r):\n return sum([a*b for a,b in zip(discount_factors(r,n,m),bond_cashflows(fv, c, n, m))])", "def __call__(self):\n currency_data = getUtility(ICurrencyData)\n currency_data_list = currency_data.currency_data_list()\n results = {}\n for i in currency_data_list:\n results.update({i['code']:i['decimal']})\n return results", "def test_lowest_price_many_listings(self):\n listings = steam_market.get_lowest_price(soup=get_soup_from_path(TEST_FILE_MANY_RESULTS))\n self.assertEqual('0,03€', listings)", "def to_quantities(\n values: list,\n uncertainties: list\n) -> typing.List[IndexedQuantity]:\n\n return [\n IndexedQuantity(i, v)\n for i, v in enumerate(mstats.values.join(values, uncertainties))\n ]", "def getListOfUnits(self, *args):\n return _libsbml.UnitDefinition_getListOfUnits(self, *args)", "def get_stocks():\n print(\"fetching remote...\")\n code_dataframes = pd.read_html(\n 'http://kind.krx.co.kr/corpgeneral/corpList.do?method=download&searchType=13', header=0)[0]\n # 우리가 필요한 것은 회사명과 종목코드이기 때문에 필요없는 column들은 제외해준다.\n print(\"parsing and filtering data...\")\n code_dataframes.종목코드 = code_dataframes.종목코드.map('{:06d}'.format)\n # 한글로된 컬럼명을 영어로 바꿔준다.\n code_dataframes = code_dataframes[['회사명', '종목코드']]\n code_dataframes = code_dataframes.rename(\n columns={'회사명': 'name', '종목코드': 'code'})\n codes = code_dataframes['code']\n names = code_dataframes['name']\n stocks = []\n for i in range(len(names)):\n stocks.append({\n 'name': names[i],\n 'code': codes[i]\n })\n return stocks", "def get_stock_price(stock):\n pass", "def get_units(self, names):\n # Make sure names is a list\n if isinstance(names, str) or isinstance(names, unicode):\n names = [names]\n \n # Return the list of units\n ans = []\n for name in names:\n if name in self.interp_ds:\n ans.append(self.interp_ds[name].attrs['units'])\n else:\n ans.append('Not Available in Dataset')\n \n return ans", "def get_prices(symbol, sd, ed):\n\n # get symbol list and date range\n syms = [symbol]\n dates = pd.date_range(sd, ed)\n\n # Get prices data, automatically adds SPY\n prices_all = ut.get_data(syms, dates)\n\n # normalize price, price[t] /= price[0]\n prices_all = ind.normalize(prices_all)\n\n prices = prices_all[syms] # only portfolio symbols\n prices_SPY = prices_all['SPY'] # only SPY, for comparison later\n # if self.verbose: print prices\n\n return prices", "def all_currency_codes():\n return [(a, CURRENCIES[a].name) for a in CURRENCIES]", "def construct_futures_symbols(symbol, start_year=2015, end_year=2017):\n futures = []\n # March, June, September and December delivery codes\n months = 'HMUZ'\n for y in range(start_year, end_year+1):\n for m in months:\n futures.append('%s%s%s' % (symbol, m, y))\n return futures", "def get_prices(self):\n pass", "def get_price_list(self, symbol, interval=None, ref='close'):\n if interval == None:\n interval = self.fetch_interval\n return self.trader.queue.get_symbol_interval_prices(symbol, interval, ref)", "def _construct_all_prices(self):\n d = dict([(s+'-', 0.0) for s in self.symbol_list] +\n [(s+'+', 0.0) for s in self.symbol_list])\n d['datetime'] = self.backtest_date\n return [d]", "def _get_units(self, unit_tag):\n\n # a list that contains apartment unit's information\n unit = []\n # use a loop to list all the cells in a row \n for cell in unit_tag.find_all('td'):\n if cell.attrs: # omit the cell with nothing in it \n # look for the apartment #, however, this info is not\n # consistent across the entire webiste\n if cell['data-tid'] == 'pdpfloorplans-unit-displayText':\n unit_num = cell.get_text()\n unit.append(unit_num)\n # scrape the price of the unit\n if cell['data-tid'] == 'pdpfloorplans-unit-price':\n try:\n unit_price = cell.get_text().replace('$', '')\n # try to convert the price to float \n unit.append(float(unit_price))\n except:\n # if there's no price for this unit\n # append the list with a null value \n unit.append(np.nan)\n if cell['data-tid'] == 'pdpfloorplans-unit-bedbath':\n try:\n # try to extract the tags that include the number\n # of bedrooms and bathrooms \n bedbath_tag = cell.find_all('span')\n bed_tag, bath_tag = bedbath_tag[0], bedbath_tag[1]\n # regular expression pattern for extracting any types\n # of numbers, including integer and floating numbers \n pattern = r'[-+]?\\d*\\.\\d+|\\d+'\n bed = re.findall(pattern, bed_tag.get_text())\n bath = re.findall(pattern, bath_tag.get_text())\n bed_unit, bath_unit = 0, 0\n if bed:\n bed_unit = bed[0]\n if bath:\n bath_unit = bath[0]\n unit.append(float(bed_unit))\n unit.append(float(bath_unit))\n except:\n # if the convertion failed, append the list\n # will two null values \n unit.append(np.nan)\n unit.append(np.nan)\n if cell['data-tid'] == 'pdpfloorplans-unit-sqft':\n # follow the same procedure as above, but this time\n # scrape the square foot of the apartment unit\n try:\n pattern = r'[-+]?\\d*\\.\\d+|\\d+'\n sqft_unit = re.findall(pattern, cell.get_text())[0]\n unit.append(float(sqft_unit))\n except:\n unit.append(np.nan)\n return unit", "def get_currency_list(self, df):\n return list(df[\"Currency\"].unique())", "def returnCurrencies(self):\n pass", "def test_currency_code(self):\n currencies_codes = ['JPY', 'AUD', 'GBP', 'EUR']\n rates = [0.03, 3.12, 4.98, 4.28]\n helper.currency_loop_helper(get_historical_currency_rate, TestHistoricalRates.dates_rate,\n rates, currencies_codes)", "def get_stock_prices(ticker, start_date, end_date=None):\n if end_date is None:\n end_date = dt.date.today()\n\n shares = Share(ticker)\n df = pd.DataFrame(shares.get_historical(start_date.isoformat(),\n end_date.isoformat()))\n return df.set_index(\"Date\", drop=True) \\\n .drop(\"Symbol\", axis=1) \\\n .astype(float) \\\n .sort_index()", "def extract_listing_price_from_result(soup, prices):\r\n for description in soup.find_all(name='div', class_='descr'):\r\n price = description.find(name='div', class_='price')\r\n if price == None:\r\n prices.append('No Price')\r\n else:\r\n prices.append(price.get_text())\r\n # print(prices)\r\n return prices", "def units(inventory):\n return inventory.reduce(convert.get_units)", "def get_country_codes(prices):\n # your code here\n \n#_________________# 1. Break the string into a list. \n prices = prices.split('$') # breaks the list into a list of elements.\n \n#_________________# 2. Manipulate the individual elements.\n\n #_________________# A. Remove integers\n# nation = prices[0], prices[1]\n length = len(prices)\n\n for nation in (prices):\n nation == prices[0:]\n print(nation)\n #_________________# B.\n \n nations = []\n for each_char in (0, prices, 2):\n if each_char in prices[0:2]:\n nation = each_char\n nations = list(nations)\n # lastitem = nations.pop()\n print(nations)", "def get_list_powers(self):\r\n s = self.query('SOUR1:LIST:POW?')\r\n if s == None: return None\r\n a = []\r\n n = 0\r\n for x in s.split(','):\r\n try:\r\n a.append(float(x.strip()))\r\n except:\r\n print('ERROR get_list_powers(): non-float in list ', n, x)\r\n n += 1\r\n return a", "def get_list_powers(self):\r\n s = self.query('SOUR1:LIST:POW?')\r\n if s == None: return None\r\n a = []\r\n n = 0\r\n for x in s.split(','):\r\n try:\r\n a.append(float(x.strip()))\r\n except:\r\n print('ERROR get_list_powers(): non-float in list ', n, x)\r\n n += 1\r\n return a", "def get_currencyinfo(table):\n cuinfo = []\n for item in table:\n if \"Beschäft\" in item:\n continue\n currency = \"\"\n unit = \"1\"\n if \"TEUR\" in item:\n currency = \"EUR\"\n unit = \"1000\"\n elif \"TDM\" in item:\n currency = \"DM\"\n unit = \"1000\"\n elif \"%\" in item:\n unit = \"%\"\n elif len(item.split(\"(\")) > 1:\n currency = item.split(\"(\")[-1].split(\" \")[-1].replace(\")\", \"\").replace(\",\", \"\").strip()\n if len(item.split(\"(\")[-1].split(\" \")) > 1:\n unit = item.split(\"(\")[-1].split(\" \")[-2]\n if \"Mio\" in item:\n unit = \"1000000\"\n if \"Mrd\" in item:\n unit = \"1000000000\"\n else:\n currency = item\n cuinfo.append({'currency': currency, 'unit': unit,'text': item.split(\"(\")[0]})\n return cuinfo", "def test_simtk_list_of_quantities_to_pint():\n list_of_quantities = [val * omm_unit.meter for val in range(10)]\n quantity_list = omm_unit.meter * [val for val in range(10)]\n\n assert list_of_quantities != quantity_list\n assert all(simtk_to_pint(list_of_quantities) == simtk_to_pint(quantity_list))", "def list_tick_dates(self, stock_code):\n\n conn = self.db_engine.connect()\n try:\n get_tick_dates_sql = \"\"\"\n SELECT DISTINCT \"update_date\" FROM \"{0}\".\"{1}\"\n WHERE end_update_time IS NOT NULL AND stock_code='{2}'\n \"\"\".format(Schemas.SCHEMA_META, Tables.TABLE_TICK_UPDATE_LOGS, stock_code)\n\n res = pd.read_sql(get_tick_dates_sql, conn)['update_date'].tolist()\n res.sort()\n return Error.SUCCESS, res\n except Exception:\n self.logger.log_error(traceback.format_exc())\n return Error.ERROR_DB_EXECUTION_FAILED, None\n finally:\n conn.close()", "def get_ether_current_prices():\n req = requests.get('https://min-api.cryptocompare.com/data/price?fsym=ETH&tsyms=BTC,USD,EUR')\n data = req.json()\n\n print('{0}, {1}, {2}'.format(data['EUR'], data['USD'], data['BTC']))", "def eur_bid_prices(self, eth_binance_symbols):\n\n data = []\n for item in eth_binance_symbols:\n data.extend([{\n \"hour_micro\": item[\"EthEurTime\"],\n \"symbol\": item[\"symbol\"][:-3],\n \"EURbidPrice\": item[\"EURbidPrice\"]\n }])\n DB.eur_prices.insert(data)\n\n return", "def get_stock_prices(ticker_symbol, start_date, finnhub_client):\n end_date = pd.Timestamp(pd.Timestamp.today().date())\n end_unix = get_unix_time(end_date)\n start_unix = get_unix_time(start_date)\n\n # Pause shortly\n time.sleep(1)\n\n # Stock candles\n res = finnhub_client.stock_candles(ticker_symbol, 'D', start_unix, end_unix)\n if res[\"s\"] == \"no_data\":\n return pd.DataFrame()\n # Convert to Pandas Dataframe\n df_finnhub = pd.DataFrame(res)\n timestamp_index = df_finnhub[\"t\"].apply(lambda x: pd.Timestamp(pd.to_datetime(x, unit='s', origin='unix').date()))\n df_ticker = pd.DataFrame(df_finnhub[\"o\"].values, index=timestamp_index.values)\n return df_ticker", "def get_vendor_price_lists_from_date_value(self):\n return self.get_text_from_element(self.set_from_date_locator, is_a_input_field=True)", "def list_power_supply_units(self):\n\n doc = self.client.enumerate(uris.CIM_PowerSupply)\n\n psus = doc.find('.//s:Body/wsen:EnumerateResponse/wsman:Items',\n wsman.NS_MAP)\n\n return [self._parse_psus(psu) for psu in psus]", "def tidy_up_prices(prices: typing.List[typing.List]) -> typing.List[typing.Tuple]:\n p2 = [(datetime.fromtimestamp(int(p[0]/1000), timezone.utc), p[1]) for p in prices]\n p2.sort(key=lambda p: p[0])\n\n return p2", "def get_dividends(self, stock_list, start_date=None, end_date=None):\n df_dict = {}\n df_list = []\n file_in_path = [year.replace(\".csv\", \"\") for year in self.get_csv_in_path(self.dividend_eps_path)]\n if not start_date:\n start_date = file_in_path[0]\n if not end_date:\n end_date = file_in_path[-1]\n if start_date > end_date:\n return df_dict\n for year in range(int(start_date), int(end_date)+1):\n target_path = \"{}/{}.csv\".format(self.dividend_eps_path, year)\n df = pd.read_csv(target_path, index_col=\"名稱\")\n self.replace_nan_to_other(df, \"\")\n for stock in stock_list:\n pd_index = df.index.to_list()\n old_list = []\n if stock in pd_index:\n data = df.loc[stock]\n\n # print(\"日期 = {}\".format(data.get(\"除息交易日\")))\n if df_dict.get(stock):\n old_list = df_dict.get(stock)\n\n # check data is available\n dict = {}\n if data.get(\"現金股利\") != \"\":\n dict.update({\"除息交易日\": \"{}{}\".format(year, data.get(\"除息交易日\").split(\"'\")[1].replace(\"/\", \"\")) if data.get('除息交易日') else \"\",\n \"現金股利\": data.get(\"現金股利\"),\n })\n if data.get(\"股票股利\") != \"\":\n dict.update({\"除權交易日\": \"{}{}\".format(year, data.get(\"除權交易日\").split(\"'\")[1].replace(\"/\", \"\")) if data.get('除權交易日') else \"\",\n \"股票股利\": data.get(\"股票股利\"),\n })\n if dict:\n old_list.append(dict)\n df_dict.update({stock: old_list})\n\n return df_dict", "def get_num_ls_ls(ls_ls_prices):\n return [[get_num_str(price) for price in ls_prices] for ls_prices in ls_ls_prices]", "def lookup_dividends(ticker):\n dividend_df = ticker.dividends\n return(convert_df_to_list(dividend_df))", "def get_list_powers(self):\r\n s = self.query('LIST:POW?')\r\n if s == None: return None\r\n a = []\r\n n = 0\r\n for x in s.split(','):\r\n try:\r\n a.append(float(x.strip()))\r\n except:\r\n print('ERROR get_list_powers(): non-float in list ', n, x)\r\n n += 1\r\n return a", "def _lazy_load_units_by_code():\n if UNITS_BY_CODE:\n # already populated\n return\n\n for unit in units.UNITS_BY_NAME.values():\n UNITS_BY_CODE[unit.code] = unit", "def currencies():\n return _CURRENCIES", "def get_volume_at_price(ticks, limit_price, volume, is_back):\n ret_ticks = list()\n\n for tick in ticks:\n o, v = _game_avail_volume(tick, limit_price, volume, is_back)\n ret_ticks.append({'o': o, 'v': v, 't': tick[MarketTick.timestamp.db_field]})\n\n return ret_ticks", "def get_items(self):\n\n items = []\n\n params = self.request.query_params\n\n if 'items[]' in params:\n items = params.getlist('items[]', [])\n elif 'item' in params:\n items = [params.get('item', None)]\n\n if type(items) not in [list, tuple]:\n items = [items]\n\n valid_ids = []\n\n for item in items:\n try:\n valid_ids.append(int(item))\n except (ValueError):\n pass\n\n # List of StockItems which match provided values\n valid_items = StockItem.objects.filter(pk__in=valid_ids)\n\n return valid_items", "def getQuantityList(self, infoFilePath):\n quList = []\n with open(infoFilePath, 'r')as infoFile:\n line = infoFile.readline()\n while line != '':\n if 'File.At.' in line:\n name = line.split(' ', 2)[2].rstrip()\n type = infoFile.readline().split(' ', 2)[2].rstrip()\n quList.append(Quantity(name, type))\n line = infoFile.readline()\n return quList", "def list_data_units(self):\n return self.data_units.items()", "def clean_units(units):\n return [clean_unit(unit) for unit in units]", "def target_prices(self) -> List[float]:\n return self._target_prices", "def convert_html_to_list(html: str) -> List[dict]:\n soup = BeautifulSoup(html, 'html.parser')\n if not (tbodies := soup.find_all('tbody')): # Find the Table\n raise ValueError('No tables found in the HTML passed!')\n\n index_column_map = {\n 1: 'date',\n 2: 'open',\n 3: 'high',\n 4: 'low',\n 5: 'close',\n 6: 'adj_close',\n 7: 'volume'\n }\n\n stock_data = []\n current_record = {}\n index = 1\n for td in tbodies[0].find_all('td'):\n\n if index not in index_column_map: # surpassed the max value in the dict\n stock_data.append(current_record)\n index = 1 # Reset index\n current_record = {} # Reset current record dict\n\n value = re.search(r'>(.*)<', str(td.contents[0])).group(1)\n # Convert to float if possible\n value = float(val_no_comma) if (val_no_comma := value.replace(',', '')).replace('.', '').isalnum() else value\n current_record[index_column_map[index]] = value\n index += 1\n\n return stock_data", "def _do_the_math(a_list):\n b_list = []\n for i in a_list:\n i = i.replace('pi', \"np.pi\")\n # ...\n i = str(eval(i)) # pylint: disable-msg=eval-used\n b_list.append(i)\n return b_list", "def client_units(self) -> List[FlyingUnit]:\n return [u for u in self.units if u.is_human()]", "def test_run():\n for symbol in ['AAPL', 'IBM']:\n print(\"Mean Volume\")\n print(symbol, get_mean_volume(symbol))", "def build_dividend_lists(portfolio_dict):\n # ETF dividend list\n dow_dividends = lookup_dividends(yf.Ticker(\"DIA\")) \n sp500_dividends = lookup_dividends(yf.Ticker(\"SPY\")) \n nasdaq_dividends = lookup_dividends(yf.Ticker(\"QQQ\")) \n totalmarket_dividends = lookup_dividends(yf.Ticker(\"VTI\")) \n \n # Portfolio dividends\n portfolio_dividend_dict = {}\n for key in portfolio_dict:\n portfolio_dividend_dict[key] = lookup_dividends(yf.Ticker(key))\n \n return (dow_dividends, sp500_dividends, nasdaq_dividends, totalmarket_dividends, portfolio_dividend_dict)", "def populateListFormulaUnitsData(self):\n return _libsbml.Model_populateListFormulaUnitsData(self)", "def tokenLookup(instrument_df,symbol_list):\r\n token_list = []\r\n for symbol in symbol_list:\r\n token_list.append(int(instrument_df[instrument_df.tradingsymbol==symbol].instrument_token.values[0]))\r\n return token_list", "def tokenLookup(instrument_df,symbol_list):\r\n token_list = []\r\n for symbol in symbol_list:\r\n token_list.append(int(instrument_df[instrument_df.tradingsymbol==symbol].instrument_token.values[0]))\r\n return token_list", "def get(self, *args):\n return _libsbml.ListOfUnits_get(self, *args)", "def close_lst(self, f):\n print(\"Closing price running...\")\n close_price = []\n dates = []\n for i,j in enumerate(f['Close']):\n close_price.append(f['Close'][i])\n dates.append(f.ix[i].name.date())\n print(\"Closing price done!\")\n return dates, close_price", "def remove_number_symbols(data_lists):\n billion = 'B'\n million = 'M'\n thousand = 'K'\n percent = '%'\n dot = '\\.'\n comma = ','\n B = re.compile(billion)\n M = re.compile(million)\n K = re.compile(thousand)\n Perc = re.compile(percent)\n Dot = re.compile(dot)\n Comm = re.compile(comma)\n\n fltpoint_dict = {}\n for symbol, datalist in data_lists.iteritems():\n new_data_list = []\n if len(datalist) > 1:\n for statistic in datalist:\n if percent in statistic:\n statistic = Perc.sub('', statistic)\n statistic = Comm.sub('', statistic)\n new_data_list.append(float(statistic))\n elif comma in statistic or 'May' in statistic or 'Mar' in statistic:\n statistic = Comm.sub('', statistic)\n try:\n statistic = float(statistic)\n except:\n pass\n new_data_list.append(statistic)\n elif billion in statistic or million in statistic or thousand in statistic:\n statistic = B.sub('0000000', statistic)\n statistic = M.sub('00000', statistic)\n statistic = K.sub('0', statistic)\n statistic = Dot.sub('', statistic)\n new_data_list.append(float(statistic))\n else:\n try:\n statistic = float(statistic)\n except:\n pass\n new_data_list.append(statistic)\n fltpoint_dict[symbol] = new_data_list\n else:\n fltpoint_dict[symbol] = ['N/A']\n\n return fltpoint_dict", "def get_basket_items_discount(self, offer_info, actual_volume, product_prices):\n prod_code = offer_info.base_prod_code\n base_prod_vol = actual_volume.get(prod_code.lower())\n\n discount_basket = []\n\n if base_prod_vol >= offer_info.min_vol:\n offer_on_prod = offer_info.offer_on\n if actual_volume.get(offer_on_prod.lower()):\n print(f\"Base product volume is greater than minimum required volume & product on offer is also available \"\n f\"in cart..\")\n if offer_info.is_limited:\n print(f\"Limited offer..\")\n if prod_code == offer_on_prod:\n # total_allowed_items_on_offer = Limit Volume of base product * (Offer Product Max Volume/Minimum volume of base product)\n total_allowed_items_on_offer = offer_info.limit_vol * (offer_info.offer_prod_volume/offer_info.min_vol)\n max_limit = 1\n for i in range(0, base_prod_vol):\n base_prod_actual_price = product_prices.get(prod_code.lower()).get('price')\n discount_basket.append((prod_code, base_prod_actual_price))\n while max_limit <= total_allowed_items_on_offer:\n discounted_price = (base_prod_actual_price *(offer_info.discount_perc/100))*-1\n discount_basket.append((offer_info.offer_code, discounted_price))\n max_limit += 1\n else:\n total_allowed_items_on_offer = offer_info.limit_vol * (offer_info.offer_prod_volume / offer_info.min_vol)\n for i in range(0, base_prod_vol):\n base_prod_actual_price = product_prices.get(prod_code.lower()).get('price')\n discount_basket.append((prod_code, base_prod_actual_price))\n max_limit = 1\n while max_limit <= total_allowed_items_on_offer:\n offer_onprod_actual_price = product_prices.get(offer_on_prod.lower()).get('price')\n discounted_price = (offer_onprod_actual_price *(offer_info.discount_perc/100))*-1\n for j in range(0, actual_volume.get(offer_on_prod.lower())):\n discount_basket.append((offer_on_prod, offer_onprod_actual_price))\n discount_basket.append((offer_info.offer_code, discounted_price))\n max_limit += 1\n else:\n print(f\"Unlimited offer..\")\n if prod_code == offer_on_prod:\n if base_prod_vol > offer_info.min_vol:\n for i in range(0, base_prod_vol):\n base_prod_actual_price = product_prices.get(prod_code.lower()).get('price')\n discount_basket.append((prod_code, base_prod_actual_price))\n if i%2 != 0:\n discounted_price = (base_prod_actual_price *(offer_info.discount_perc/100))*-1\n discount_basket.append((offer_info.offer_code, discounted_price))\n else:\n for i in range(0, base_prod_vol):\n base_prod_actual_price = product_prices.get(prod_code.lower()).get('price')\n discount_basket.append((prod_code, base_prod_actual_price))\n else:\n for i in range(0, base_prod_vol):\n base_prod_actual_price = product_prices.get(prod_code.lower()).get('price')\n discount_basket.append((prod_code, base_prod_actual_price))\n\n offer_onprod_actual_price = product_prices.get(offer_on_prod.lower()).get('price')\n discounted_price = (offer_onprod_actual_price * (offer_info.discount_perc / 100))*-1\n\n for j in range(0, actual_volume.get(offer_on_prod.lower())):\n discount_basket.append((offer_on_prod, offer_onprod_actual_price))\n discount_basket.append((offer_info.offer_code, discounted_price))\n\n\n return discount_basket", "def get_list_powers(self):\r\n _debug('simq03b_api.get_list_powers')\r\n \r\n s = self.query('SOUR:LIST:POW?')\r\n if s == None: return None\r\n a = []\r\n n = 0\r\n for x in s.split(','):\r\n try:\r\n a.append(float(x.strip()))\r\n except:\r\n print('ERROR get_list_powers(): non-float in list ', n, x)\r\n n += 1\r\n return a", "def _get_symbols(exchange_code: str, token: str) -> List[mtypes.Symbol]:\n _LOG.info(\"Getting symbols list for exchange: '%s'\", exchange_code)\n response = get_client().service.SymbolList(\n Token=token, Exchange=exchange_code\n )\n\n if response.SYMBOLS is None:\n _LOG.error(\"No symbols found for exchange: '%s'\", exchange_code)\n return []\n\n symbols = [\n mtypes.Symbol.from_dict(d=obj)\n for obj in zeep.helpers.serialize_object(response.SYMBOLS[\"SYMBOL\"])\n ]\n\n _LOG.info(\"Got %s symbols for exchange '%s'\", len(symbols), exchange_code)\n return symbols", "def GenerateUnitList(self):\n # Verify platform specific terms. Skip whole term if platform does not\n # match.\n if self.term.platform:\n if 'silverpeak' not in self.term.platform:\n return []\n if self.term.platform_exclude:\n if 'silverpeak' in self.term.platform_exclude:\n return []\n\n ret_str = []\n if 'established' in self.term.option:\n dst_ports = ['any']\n else:\n dst_ports = self._NormalizePortRanges(\n self._FixLongList(self.term.destination_port, 10)\n )\n source_all_ipv6, source_address = self._FixIPv6Address(\n self.term.source_address\n )\n if source_all_ipv6:\n return ret_str\n destination_all_ipv6, destination_address = self._FixIPv6Address(\n self.term.destination_address\n )\n if destination_all_ipv6:\n return ret_str\n src_ips = self._NormalizeNetblocks(self._FixLongList(source_address, 50))\n src_ports = self._NormalizePortRanges(\n self._FixLongList(self.term.source_port, 10)\n )\n dst_ips = self._NormalizeNetblocks(\n self._FixLongList(destination_address, 50)\n )\n for src_ip in src_ips:\n for src_port in src_ports:\n for dst_ip in dst_ips:\n for dst_port in dst_ports:\n ret_str.append((src_ip, src_port, dst_ip, dst_port))\n return ret_str", "def _get_stations_by_codes_local(station_codes: List[int]) -> List[WeatherStation]:\n LOGGER.info('Using pre-generated json to retrieve station by code')\n with open(weather_stations_file_path) as file_pointer:\n stations = json.load(file_pointer)\n results = []\n for station in stations['weather_stations']:\n if int(station['code']) in station_codes:\n results.append(WeatherStation(**station))\n return results", "def get_price_range_map(data_map):\n res_map = defaultdict(lambda: deepcopy(static_constants.UNIT_PRICE_DEFAULT))\n for key, list_of_price in data_map.items():\n list_of_price.sort()\n lower_price = np.percentile(list_of_price, 40)\n higher_price = np.percentile(list_of_price, 70)\n median_price = np.percentile(list_of_price, 50)\n res_map[key] = {'lower_price': lower_price, 'median_price': median_price, 'higher_price': higher_price}\n return res_map", "def prices(tickers):\n try:\n start = dt.datetime.today()\n start = start.strftime('%Y-%m-%d') \n data = pdr.get_data_yahoo(tickers, start=start)\n price = data['Adj Close']\n vol = data['Volume']\n data_dic = {}\n for stock in tickers:\n data_dic[str(stock)] = price[str(stock)][0], vol[str(stock)][0]\n \n df_data = pd.DataFrame(data_dic.values(), columns=['precio_usa', 'volumen_usa'])\n df_data['Ticker'] = tickers\n df_data = df_data.loc[:,['Ticker', 'precio_usa', 'volumen_usa']]\n\n except:\n start = dt.datetime.today()\n start = start - Day(3)\n start = start.strftime('%Y-%m-%d') \n data = pdr.get_data_yahoo(tickers, start=start)\n price = data['Adj Close']\n vol = data['Volume']\n data_dic = {}\n for stock in tickers:\n data_dic[str(stock)] = price[str(stock)][0], vol[str(stock)][0]\n \n df_data = pd.DataFrame(data_dic.values(), columns=['precio_usa', 'volumen_usa'])\n df_data['Ticker'] = tickers\n df_data = df_data.loc[:,['Ticker', 'precio_usa', 'volumen_usa']]\n\n return df_data", "def test_get_stock_price_summary4(self):\n\n actual = a1.stock_price_summary([0.02, 0.14, 0.10])\n expected = (0.26,0)\n self.assertEqual(actual, expected)", "def get_prices(self, coin, quote):\n\n return self.public_client.ticker(coin, quote)", "def calculate_prices(self, merged_data):\n calculated_prices = []\n for record in merged_data:\n prices_dict = dict()\n supplier_price_id = record.get('supplier_detail').get('identifier') # get the supplier price id\n session_id = record.get('supplier_transaction').get('session_id') # get the transaction session\n supplier_trans_fee_price = self.compute_fee_price(\n record) # Get the fee price for each transaction if needed\n supplier_trans_time_price = self.compute_time_price(\n record) # Get the time price for each transaction if needed\n supplier_trans_kwh_price = self.compute_kwh_price(record)\n total_price = supplier_trans_fee_price + supplier_trans_time_price + supplier_trans_kwh_price\n prices_dict.update({'fee_price': supplier_trans_fee_price,\n 'time_price': supplier_trans_time_price,\n 'kwh_price': supplier_trans_kwh_price,\n 'total_price': total_price,\n 'session_id': session_id,\n 'supplier_price_id': supplier_price_id})\n calculated_prices.append(prices_dict)\n\n return calculated_prices", "def currency_code_mappings():\n return [(a, CURRENCIES[a].name) for a in settings.CURRENCIES]", "def get_prices(self, grab, subject):\n prices = []\n try:\n extras = grab.doc.rex_text(\n '<h3 class\\=\"h6 copy-sp-m\">.*?%s.*?</h3>(.+?)</ul>' % subject,\n flags=re.S\n )\n except DataNotFound:\n logging.debug(\n \"Price %s is not found on %s\"\n % (subject, grab.doc.url)\n )\n return None\n\n sel = XpathSelector(fromstring(extras))\n prices = []\n for li in sel.select('//li[@class=\"list__item u-cf\"]'):\n obligatory = OrderedDict()\n obligatory['name'] = li.select('node()').text()\n money = li.select('node()/strong').text()\n obligatory['value'] = money[1:].replace(',', '')\n\n # Find perweek or perday\n if li.select(\n 'span[@class=\"boatview__extras-amount\"' +\n ' and contains(text(),\"per week\")]'\n ).exists():\n obligatory['perweek'] = True\n elif li.select(\n 'span[@class=\"boatview__extras-amount\"' +\n ' and contains(text(),\"per day\")]'\n ).exists():\n obligatory['perday'] = True\n obligatory['currency'] = money[0]\n prices.append(obligatory)\n\n if len(prices) < 1:\n logging.debug(\n \"Price %s contains less than one element on: %s\"\n % (subject, grab.doc.url)\n )\n return None\n\n return prices", "def pop_stock_list(self, page):\n soup = bs(page, 'html5lib')\n table = soup('table', {'class':\n ['sortable',\n 'wikitable',\n 'jquery-tablesorter']})[0]\n\n stock_list = OrderedDict()\n\n # Grab the headers\n headers = [self.filter_headers(th.text) for th in table('tr')[0]('th')]\n\n for idx,tr in enumerate(table('tr')):\n # If we specified a maximum number of stocks.\n if self.count and idx > self.count:\n break\n\n if tr.td:\n symbol = tr.td.text\n\n stock_list[symbol] = {headers[i]: td.text.strip()\n for i, td in enumerate(tr('td'))}\n\n return stock_list", "def get_prices(uuid, card_format, price_source, price_list, card_type_order, price_data_json):\n if price_source not in price_data_json[uuid][card_format]:\n pass\n #print(f'Price source value of {price_source} is not available for {card_format} and {uuid}')\n else:\n source = price_data_json[uuid][card_format][price_source]\n if price_list not in source:\n pass\n #print(f'Price list value of {price_list} is not available for {price_source} and {uuid}')\n else:\n retail = source[price_list]\n for type in card_type_order:\n if type in retail:\n return retail[type]", "def fetch_series(tickers: List[str]) -> List[dict]:\n with requests.Session() as session:\n c = suds.client.Client(\n 'https://www3.bcb.gov.br/sgspub/JSP/sgsgeral/FachadaWSSGS.wsdl',\n transport=suds_requests.RequestsTransport(session))\n \n def _fetch(tck):\n try:\n resp = c.service.getUltimoValorVO(tck)\n if resp is not None:\n return _process_info(resp)\n except:\n tcks_off.append(tck)\n\n with executor() as e:\n ls = list(e.map(_fetch, tickers))\n return ls", "def get_products(self):\n return [item.code for item in self._products]", "def get_historical_prices(codes, start_date=None, end_date=None,\n universes=None, conids=None,\n exclude_universes=None, exclude_conids=None,\n times=None, cont_fut=None, fields=None,\n timezone=None, infer_timezone=None,\n master_fields=None):\n from quantrocket import get_prices\n\n return get_prices(codes,\n start_date=start_date, end_date=end_date,\n universes=universes, conids=conids,\n exclude_universes=exclude_universes,\n exclude_conids=exclude_conids,\n times=times, cont_fut=cont_fut,\n fields=fields, timezone=timezone,\n infer_timezone=infer_timezone,\n master_fields=master_fields)", "def tokenLookup(instrument_df,symbol_list):\n token_list = []\n for symbol in symbol_list:\n token_list.append(int(instrument_df[instrument_df.tradingsymbol==symbol].instrument_token.values[0]))\n return token_list", "def inp_item_price(self) -> List[str]:\n \n return [str(input(\"Enter desired price for item: \"))]", "def get_related_forex_currencies(self, currency = None):\n new_symbol_names_list = []\n for symbol_name in self.symbol_names_list:\n if (symbol_name[:3] == currency ) | (symbol_name[3:] == currency ):\n new_symbol_names_list.append(symbol_name)\n symbol_names_list = new_symbol_names_list\n return symbol_names_list", "def get_unit_details(driver):\n unit_details = [\n a\n for a in driver.find_elements_by_xpath(\"//span[contains(@class, 'tooltip')]\")\n if \"$\" in a.get_attribute(\"title\")\n ]\n data = []\n for unit in unit_details:\n unit_dict = {}\n unit_dict[\"Unit\"] = unit.get_attribute(\"data-selector\")\n title = unit.get_attribute(\"title\").replace(\"\\xa0\", \" \")\n unit_dict[\"Price\"] = re.compile(\"(\\\\$.+?)<\").findall(title)[0]\n unit_dict[\"Size\"] = re.compile(\">(\\\\d.+Sqm)\").findall(title)[0]\n data.append(unit_dict)\n return data", "def get_company_and_price(shares: list[Share]) -> list[Share]:\n\n for share in shares:\n\n share_info = lookup(share.symbol)\n if share_info is not None:\n share.company_name = share_info[\"name\"]\n share.price = share_info[\"price\"]\n share.total = share.price * share.qty\n else:\n share.company_name = CMP_NOT_FOUND\n\n return shares", "def names_and_prices():\n # Just an example\n cg = CoinGeckoAPI()\n data = cg.get_price(ids='bitcoin,ethereum', vs_currencies='usd,eur')\n # Flatten\n names = list()\n prices = list()\n for coin, fx in data.items():\n for currency, price in fx.items():\n name = coin+'_'+currency\n names.append(name)\n prices.append(price)\n\n return names, prices", "def getActiveCurrencies():", "def currencies(self):\r\n return currencies.Currencies(self)" ]
[ "0.6023552", "0.5827989", "0.5810261", "0.57817996", "0.57182974", "0.5632579", "0.55539757", "0.5493873", "0.5484011", "0.54804665", "0.54708654", "0.5443625", "0.5422661", "0.5405019", "0.5299616", "0.5295281", "0.527327", "0.5272383", "0.52600485", "0.52459085", "0.52217275", "0.52160186", "0.52093273", "0.5187559", "0.5186118", "0.517191", "0.5165962", "0.5137883", "0.5137879", "0.5132765", "0.5132293", "0.5122199", "0.5121893", "0.5121529", "0.50674784", "0.50622284", "0.5054525", "0.5026209", "0.50231344", "0.50163037", "0.50071716", "0.49957815", "0.49957815", "0.498961", "0.4988208", "0.4985643", "0.49731177", "0.49573845", "0.49455446", "0.49349755", "0.49281108", "0.49233148", "0.49232808", "0.49102044", "0.48948523", "0.487695", "0.48752725", "0.48702466", "0.48636404", "0.4861385", "0.48597705", "0.48582366", "0.4850134", "0.48358995", "0.4828478", "0.48277515", "0.4809204", "0.48084524", "0.47996157", "0.47982958", "0.47971094", "0.47971094", "0.47941652", "0.479046", "0.47847673", "0.4774322", "0.47732782", "0.47725254", "0.47724", "0.47712392", "0.4770337", "0.47681248", "0.47634217", "0.47621682", "0.47541994", "0.4741577", "0.4740607", "0.47395027", "0.47350934", "0.4732756", "0.4726246", "0.472617", "0.47241145", "0.47212848", "0.47210196", "0.47203222", "0.47160894", "0.4714856", "0.47139293", "0.4712997" ]
0.8246834
0
Returns list of items descriptions from list of stock codes.
def getDescriptionList(self, list_stockCode=None): df = self._df_invoice_original list_description = list() if list_stockCode is None : list_description = list(df.Description.unique()) else: for stockCode in list_stockCode: description = df[df.StockCode==stockCode].Description.unique()[0] list_description.append(description) return list_description
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getStockCodeList(self, list_description=None):\n list_stockCode = list()\n df = self._df_invoice_original\n \n if list_description is None:\n list_stockCode = list(df.StockCode.unique())\n else :\n for description in list_description:\n stockCode = df[df.Description==description].StockCode.unique()[0]\n list_stockCode.append(stockCode)\n return list_stockCode", "def create_not_included_list(codes):\n string = '\\\\begin{itemize}\\n'\n for code in codes:\n title = get_course_title_only(code)\n string += '\\\\item{' + title + '}\\n'\n string += '\\\\end{itemize}\\n'\n return string", "def get_order_lists(self, n_items, n_quantities):\n arr_stock_code = self._df_invoice_original.StockCode.unique()\n arr_stock_code = np.random.choice(arr_stock_code, n_items)\n list_stockCode = list(arr_stock_code)\n list_quantities = np.ones(arr_stock_code.shape[0])\n list_quantities *=n_quantities\n\n return list_stockCode, list_quantities", "def getUnitPriceList(self, list_stockCode):\n df = self._df_invoice_original\n\n list_unitPrice = list()\n \n for stockCode in list_stockCode:\n unitPrice = df[df.StockCode==stockCode].UnitPrice.unique()[0]\n list_unitPrice.append(unitPrice)\n return list_unitPrice", "def _getListing(self):\n\n # lets assure consistent litsting order\n items = self._items.items()\n items.sort()\n return [ \"%s%s%s: %s\" % (_def_sep, str(x[1]), _def_sep, x[1].__doc__)\n for x in items ]", "def parse_quantities(quantities):\n codes = []; names = []\n\n for q in quantities:\n c, n = parse_quantity(q)\n codes.append(c)\n names.append(n)\n\n return codes, names", "def get_stocks():\n print(\"fetching remote...\")\n code_dataframes = pd.read_html(\n 'http://kind.krx.co.kr/corpgeneral/corpList.do?method=download&searchType=13', header=0)[0]\n # 우리가 필요한 것은 회사명과 종목코드이기 때문에 필요없는 column들은 제외해준다.\n print(\"parsing and filtering data...\")\n code_dataframes.종목코드 = code_dataframes.종목코드.map('{:06d}'.format)\n # 한글로된 컬럼명을 영어로 바꿔준다.\n code_dataframes = code_dataframes[['회사명', '종목코드']]\n code_dataframes = code_dataframes.rename(\n columns={'회사명': 'name', '종목코드': 'code'})\n codes = code_dataframes['code']\n names = code_dataframes['name']\n stocks = []\n for i in range(len(names)):\n stocks.append({\n 'name': names[i],\n 'code': codes[i]\n })\n return stocks", "def itemnames():\n g = ['KIS_NA_39', 'VII_57', 'MX_48', 'MX_56', 'KIS_NA_42', 'VII_54',\n 'MX_S_48', 'MX_S_52', 'MX_52', 'KIS_NA_45', 'KIS_NA_51', 'MIP_45',\n 'MIP_49', 'MIP_52', 'MIP_plus_48', 'MIP_plus_51', 'MX_42', 'MX_45',\n 'MIP_G_42', 'KIS_42', 'KIS_NA_48']\n return(g)", "def getAllDescWords(itemList):\r\n itemList = list(set(itemList)) # make itemList unique\r\n descWords = []\r\n for item in itemList:\r\n descWords.extend(worldItems[item][DESCWORDS])\r\n return list(set(descWords))", "def get_items_for_catalog(catalog_id):\n pass", "def description(self):\n item_counts = [f'{i.quantity}x {i.item.name}' for i in self.items]\n return ','.join(item_counts)", "def ConstructList(title, items):\n buf = cStringIO.StringIO()\n fmt = 'list[title=\"{title}\",always-display-title]'.format(title=title)\n resource_printer.Print(sorted(set(items)), fmt, out=buf)\n return buf.getvalue()", "def list_items(self):\n click.echo(\"ID --|-- Item Title\")\n for index, item in enumerate(self.items):\n click.echo(\" {} --|-- {}\".format(index, item.title))", "def serve_recos(ids, ref_catalog):\r\n desc_list = []\r\n for desc_id in ids:\r\n desc_list.append(ref_catalog[ref_catalog['id'] == desc_id].iloc[0]['description'])\r\n return desc_list", "def get_code_mapping( id ):\n returnVal = []\n theCodes = _theRegistry.get_code( id )\n codes = theCodes.get_codes()\n descs = theCodes.get_descriptions()\n for (code, desc) in map(None, codes, descs):\n returnVal.append( { 'code' : code, 'description' : desc } )\n return returnVal", "def list():\n\n return cache.codeTableList()", "def getArtistsofArtwork(catalog, codes):\n return model.getArtistname(catalog,codes)", "def list(self):\n return 'Decks available: \\n{}'.format(\"\\n\".join([\n 'Deck {}: {} ({} cards)'.format(deck['id'], deck['title'], len(deck['cards']))\n for key, deck in self.decks.items()\n ]))", "def consult_books(self, bar_code: str):\n try:\n book_data = []\n self.db.cursor.execute('SELECT * from books WHERE bar_code = %s', (bar_code,))\n for i in self.db.cursor.fetchall():\n book_data.append(i)\n except Exception as error:\n print(error)\n else:\n print(f\"ID BOOK: {book_data[0][0]}\\n\"\n f\"TITLE: {book_data[0][1]}\\n\"\n f\"AUTHOR: {book_data[0][2]}\\n\"\n f\"PRICE: R$:{book_data[0][3]}\\n\"\n f\"BAR CODE: {book_data[0][4]}\\n\"\n f\"STOCK: {book_data[0][5]}\")", "def hs_code_process(si):\n hs_code = re.sub(r'\\W+', '', si.get('hs_code', ''))\n descrip = re.sub(r'\\W+', '', si.get('description_of_goods', ''))\n bl_type = re.sub(r'\\W+', '', si.get('bl_type', ''))\n hs_codes = []\n if hs_code != '' and hs_code in descrip:\n hs_raw = si.pop('hs_code')\n for raw_line in hs_raw.split('\\n'):\n line = re.sub(r'\\W+', '', raw_line).upper()\n if 'HSCODE' in line:\n remain = line.replace('HSCODE', '').replace('\\n', '')\n remain = re.sub(r'[A-Z]+', '', remain)\n if remain.isdigit() and len(remain) > 4:\n hs_codes.append(remain)\n else:\n # CODE in line below\n hs_line_no = hs_raw.split('\\n').index(raw_line)\n for hs_line in hs_raw.split('\\n')[hs_line_no + 1:]:\n if len(re.findall(r'[a-zA-Z]+', hs_line)) < 1:\n for hs_code in re.findall(r'\\d+', hs_line):\n hs_codes.append(hs_code)\n else:\n break\n\n bl_type = si.get('bl_type', '')\n\n elif hs_code != '' and hs_code in bl_type:\n hs_raw = si.pop('hs_code')\n for raw_info in hs_raw.split('/'):\n info = re.sub(r'\\W+', '', raw_info).upper()\n if 'HSCODE' in info:\n hs_code = info.replace('HSCODE', '').replace('\\n', '')\n hs_code = re.sub(r'[A-Z]+', '', hs_code)\n hs_codes.append(hs_code)\n break\n bl_type = hs_raw.split('/')[0]\n\n else:\n hs_code = re.sub(r'[^\\d]+', '', hs_code)\n hs_codes.append(hs_code)\n bl_type = si.get('bl_type', '')\n\n return hs_codes, bl_type", "def getNamesFromDescr(descr):\n i = getIter(descr)\n if not i:\n return\n\n try:\n item = i.next()\n while item:\n if isinstance(item[1], str):\n yield item[0]\n else:\n l = []\n for j in getNamesFromDescr(item[1]):\n l.append(j)\n r = (item[0], l)\n yield r\n item = i.next()\n except StopIteration:\n pass", "def codes(self):\n return [card.code for card in self.cards]", "def get_snippet_info_list(self):\n snippets = []\n for snippet in self.snippets:\n snippets.append([snippet[\"title\"], snippet[\"description\"]])\n return snippets", "def product_db() -> List[Text]:\n\n return [\n \"credit\",\n \"forex\",\n \"debit\",\n \"atm\"\n ]", "def run(self):\n logging.debug('List Available Recipes')\n if self.short:\n print(' '.join(pakit.recipe.RDB.names(desc=False)))\n return\n\n available = ['Program Description']\n available.extend(pakit.recipe.RDB.names(desc=True))\n\n msg = 'Available Recipes:'\n msg += PREFIX + PREFIX.join(available)\n print(msg)\n return msg", "def list():\n\n click.secho('List of libraries in SJSU-Dev2\\n', fg='white', bold=True)\n package_registry = GetListOfSJSUDev2Repos()\n library_list = [f'{x : <20}: {package_registry[x]}'\n for x in package_registry if x.startswith('lib')]\n print('\\n'.join(library_list))", "def codelists():\n return CodelistSet()", "def extract_promocodes(self):\n promocode_description = self.text\n\n sentences: list = self._split_by_sentences(promocode_description)\n\n sentence_with_promocode = promocode_description # no needed\n\n promocodes = ()\n\n for sentence in sentences:\n if any(keyword in sentence.lower()\n for keyword in (\"промокод\", \"купон\", \"промо-код\", )):\n\n sentence_with_promocode = sentence\n\n promocodes: list = \\\n self.get_promocodes(sentence_with_promocode,\n parser_constants.instagram_patterns)\n if promocodes:\n break\n # TODO:\n # make probabilities and do not break\n # continue iter by senteces and search4 promo in every\n # after that (we know that here is 1 promo)\n # we can choose the most suitable coupon\n\n for p in promocodes:\n if p and len(p) >= 3:\n promocode = p\n if self.is_valid_promocode_morph_check(promocode):\n break\n else:\n return []\n\n if any(forbidden_promocode in promocode.lower()\n for forbidden_promocode in\n parser_constants.forbidden_promocodes):\n\n return []\n\n expiration_date = self.parse_date(promocode_description)\n\n for key in parser_constants.replacement_table.keys():\n promocode_description = \\\n promocode_description.replace(\n key, parser_constants.replacement_table[key]\n )\n\n return [data_classes.Promocode(\n coupon=promocode,\n promoCodeDescription=promocode_description,\n estimated_date=expiration_date,\n source=self.source\n )]", "def get_list_html(self, items):\n html = \"\"\"\n <html>\n\t\t\t<head>\n\t\t\t\t<title>OpenFDA Cool App</title>\n\t\t\t</head>\n\t\t\t<body>\n <ol>\n \"\"\"\n\n for item in items:\n html += \"<li>\" + item + \"</li>\\n\"\n\n html += \"\"\"\n </ol>\n\t\t\t</body>\n </html>\n \"\"\"\n\n return html", "def get_pcode_list(self) -> List[str]:\n return self.pcodes", "def fill_item_list(self):\n return_list = []\n with Transaction().start(DBNAME, 1):\n self.productlist = self.Product.search([('description', '=', 'Stock'), ('type', '=', 'goods')])\n for i in self.productlist:\n return_list.append(i.template.name)\n return return_list", "def list_to_desc_array(data_list):\n\n data_array = _populate_array(data_list, driver.ble_gattc_desc_array)\n return data_array", "def inventory(self) -> [str]:\r\n inventory_to_use = []\r\n items = [\"Apple\", \"Sword\", \"Shield\", \"Dagger\"]\r\n\r\n for item_in_items in range(2):\r\n if item_in_items <= 2:\r\n index = randint(0, len(items)) - 1\r\n inventory_to_use.append(items[index])\r\n del items[index]\r\n return inventory_to_use", "def get_descr_full(self):\n desc_text = []\n stack = [(self._desc, 0)]\n while stack:\n dl, di = stack.pop()\n while di < len(dl):\n if dl[di] == 0:\n di += 1\n elif dl[di] > 0 and dl[di] < 100000:\n desc_text.append(str(self._tables.tab_b[dl[di]]))\n di += 1\n elif dl[di] >= 100000 and dl[di] < 200000:\n lm = dl[di] // 1000 - 100\n ln = dl[di] % 1000\n desc_text.append(\"%06d : LOOP, %d desc., %d times\" % (dl[di], lm , ln))\n di += 1\n elif dl[di] >= 200000 and dl[di] < 300000:\n en = self._tables.tab_c.get(dl[di])\n am = dl[di] // 1000 - 200\n an = dl[di] % 1000\n if en is None:\n en = (str(am), \"\")\n if dl[di] < 222000:\n desc_text.append(\"%06d : OPERATOR %s: %d\" % (dl[di], en[0], an))\n else:\n desc_text.append(\"%06d : OPERATOR '%s'\" % (dl[di], en[0]))\n di += 1\n elif dl[di] >= 300000 and dl[di] < 400000:\n stack.append((dl, di + 1))\n da = dl[di]\n dl = self._tables.tab_d[dl[di]]\n di = 0\n desc_text.append(\"%06d : SEQUENCE, %d desc.\" % (da, len(dl)))\n return desc_text", "def populate_code_list():\n\tletter_code_ST = \"JZIHGFEDCBA\"\n\tletter_code_FG = \"XWUTRQPNMLK\"\n\tfor pos in range(\n\t len(letter_code_ST)): #Interestingly, the values start from 0\n\t\tcode_ST.append(pos) # Number first\n\t\tcode_ST.append(letter_code_ST[pos])\n\tfor pos in range(len(letter_code_FG)):\n\t\tcode_FG.append(pos)\n\t\tcode_FG.append(letter_code_FG[pos])", "def getOldCodeList(self):\n if self.modification == 'none':\n old_code = [(x, 'white') for x in self.body.splitlines()]\n elif self.modification == 'change':\n old_code = [self._getOldCodeList(x) for x in self.body.splitlines() \\\n if self._getOldCodeList(x)[0]]\n # we want old_code_list and new_code_list to have the same length\n if(self.old_code_length < self.new_code_length):\n filling = [(None, self.color)] * (self.new_code_length - \\\n self.old_code_length)\n old_code.extend(filling)\n else: # deletion or addition\n old_code = [self._getOldCodeList(x) for x in self.body.splitlines()]\n return old_code", "def get_items(data, requisites, formatted):\n returndata = \"\"\n traits = requisites['trait']\n allergens = requisites['allergens']\n\n if formatted:\n prefix = '\\t'\n suffix = '\\n'\n else:\n prefix = ''\n suffix = ', '\n\n for course in data['menu']['meal']['course']:\n item_data = []\n datatype = type(course['menuitem'])\n\n if datatype is list:\n item_data += course['menuitem']\n else:\n item_data.append(course['menuitem'])\n\n for item in item_data:\n if check_item_specifications(item, traits, allergens) and 'No Service at this Time' not in item['name']:\n returndata += (prefix + (item['name']).rstrip(', ') + suffix)\n\n return returndata", "def discount_codes(self):\n return [DiscountCode(x) for x in self._dict.get('discount_codes', [])]", "def lookup (barcode, ID_TYPES=['ISBN', 'UPC','EAN']):\n\n matches = [] # list of {'desc', 'sku', 'type', 'vnd'}\n\n for idtype in ID_TYPES:\n try:\n result = api.item_lookup(barcode, SearchIndex='All', IdType=idtype)\n for item in result.Items.Item:\n if not _is_duplicate(item.ASIN, matches):\n matches.append({'desc': unicode(item.ItemAttributes.Title),\n 'sku': unicode(item.ASIN),\n 'type': idtype,\n 'vnd': 'AMZN:'+AMZLOCALE}) # vendor id\n\n except (errors.InvalidAccount, errors.InvalidClientTokenId, errors.MissingClientTokenId):\n print >>sys.stderr, \"Amazon Product API lookup: bad account credentials\"\n\n except errors.TooManyRequests, toomanyerr:\n print >>sys.stderr, \"Amazon Product API lookup error:\", toomanyerr\n\n except errors.InternalError, awserr:\n print >>sys.stderr, \"Amazon Product API lookup error:\", awserr\n\n except errors.InvalidParameterValue:\n # this simply means the barcode\n # does not exist for the given type,\n # so no need to do anything explicit\n pass\n\n return matches", "def to_iob(text: str, items: List[Instance]) -> List[str]:\n coding = [\"O\"] * len(text)\n for (s, e), label in items:\n b = f\"B-{label}\"\n i = f\"I-{label}\"\n coding[s] = b\n for x in range(s + 1, e):\n coding[x] = i\n\n return coding", "def get_details_of_code(self, code):\n row = {}\n try:\n with Transaction().start(DBNAME, 1):\n i = self.Product.search([('code', '=', code),\n ('description', '=', 'Stock'), ('type', '=', 'goods')])\n if i:\n i = i[-1]\n row['item'] = i.template.name\n row['category'] = i.template.category.name\n row['units'] = i.template.default_uom.name\n row['rate'] = i.template.list_price.to_eng()\n suppliers = i.template.product_suppliers\n if suppliers:\n row['supplier'] = suppliers[0].party.name\n return row\n except Exception:\n if settings.level == 10:\n logger.exception('raised exception')\n return row", "def sanitize_sample_descriptions(sample_description_list, sanitize_fn=sanitize_text):\n filtered_sample_desc_list = []\n for text in sample_description_list:\n filtered_sample_desc_list.append(sanitize_fn(text))\n\n return filtered_sample_desc_list", "def read_stock_codes_from_db():\n\n print('connecting to database...')\n Stocks = get_db()['Stocks']\n print('reading...')\n\n stocks = Stocks.find()\n return stocks", "def get_description(self, code):\n try:\n return self.message[str(code)]\n except KeyError:\n return \"Unknown (\" + str(code) + \")\"", "def codebook_json_data_factory() -> List[Dict[str, Any]]:\n codebook_data = [\n {\n Features.CODEWORD: [\n {Indices.ROUND.value: 0, Indices.CH.value: 0, Features.CODE_VALUE: 1},\n {Indices.ROUND.value: 1, Indices.CH.value: 1, Features.CODE_VALUE: 1}\n ],\n Features.TARGET: \"GENE_A\"\n },\n {\n Features.CODEWORD: [\n {Indices.ROUND.value: 0, Indices.CH.value: 2, Features.CODE_VALUE: 1},\n {Indices.ROUND.value: 1, Indices.CH.value: 1, Features.CODE_VALUE: 1}\n ],\n Features.TARGET: \"GENE_B\"\n },\n ]\n return codebook_data", "def summarize_food_data(unprocessed_food_list: List[str]) -> List[Dict[str, str]]:\n summary: List[Dict[str, str]] = []\n item_count_data: Dict[str, int] = {}\n\n for item in unprocessed_food_list:\n if item not in item_count_data:\n item_count_data[item] = 1\n else:\n item_count_data[item] += 1\n \n for product in item_count_data:\n item_information: Dict[str, str] = {}\n item_information[\"name\"] = product\n item_information[\"quantity\"] = str(item_count_data[product])\n item_information[\"units\"] = \"-\"\n summary.append(item_information)\n \n return summary", "async def _get_stock_data(self, stocks: list):\n\t\tapi_url = 'https://sandbox.tradier.com/v1/markets/quotes'\n\t\tstocks = ','.join(stocks)\n\t\tif not stocks:\n\t\t\treturn []\n\t\ttoken = await self.bot.get_shared_api_tokens('stocks')\n\t\ttoken = token.get('key', None)\n\t\tif not token:\n\t\t\traise ValueError(\n\t\t\t\t'You need to set an API key!\\n'\n\t\t\t\t'Follow this guide for instructions on how to get one:\\n'\n\t\t\t\t'<https://github.com/Flame442/FlameCogs/blob/master/stocks/setup.md>'\n\t\t\t)\n\t\tparams = {'symbols': stocks}\n\t\theaders = {'Authorization': f'Bearer {token}', 'Accept': 'application/json'}\n\t\tasync with aiohttp.ClientSession() as session:\n\t\t\tasync with session.get(api_url, params=params, headers=headers) as r:\n\t\t\t\ttry:\n\t\t\t\t\tr = await r.json()\n\t\t\t\texcept aiohttp.client_exceptions.ContentTypeError:\n\t\t\t\t\t#This might happen when being rate limited, but IDK for sure...\n\t\t\t\t\traise ValueError('Could not get stock data. The API key entered is most likely not valid.')\n\t\tr = r['quotes']\n\t\tif 'quote' not in r:\n\t\t\treturn []\n\t\tr = r['quote']\n\t\tif not isinstance(r, list):\n\t\t\tr = [r]\n\t\tstock = {\n\t\t\tx['symbol']: {\n\t\t\t\t'price': max(1, int(x['last'] * 100)),\n\t\t\t\t#New API does not give this info.\n\t\t\t\t'total_count': None, #int(x['marketCap'] / x['last']) if x['marketCap'] else None\n\t\t\t} for x in r if 'last' in x and x['last'] is not None\n\t\t}\n\t\treturn stock", "def getListFromSupplier(self, ddata, supplier):\n # browse through all the components\n head = self.header.getHeaders()\n items = map(lambda cmpn:\n list(map(lambda it: cmpn[it], head)), ddata[supplier])\n for it in items:\n yield it", "def suppliers(ticker: str, other_args: List[str]):\n parser = argparse.ArgumentParser(\n prog=\"supplier\",\n add_help=False,\n description=\"List of suppliers from ticker provided. [Source: CSIMarket]\",\n )\n try:\n ns_parser = parse_known_args_and_warn(parser, other_args)\n if not ns_parser:\n return\n\n url_supply_chain = f\"https://csimarket.com/stocks/competitionNO3.php?supply&code={ticker.upper()}\"\n text_supplier_chain = BeautifulSoup(requests.get(url_supply_chain).text, \"lxml\")\n\n l_supplier = list()\n for supplier in text_supplier_chain.findAll(\n \"td\", {\"class\": \"plavat svjetlirub dae al\"}\n ):\n l_supplier.append(supplier.text)\n\n if l_supplier:\n print(\"List of Suppliers: \" + \", \".join(l_supplier) + \"\\n\")\n else:\n print(\"No suppliers found.\\n\")\n\n except Exception as e:\n print(e, \"\\n\")", "def items(self) -> typing.ItemsView[str, Category]:\n return self._primary_code_map.items()", "def getDataInterfaces(context, export_only=False):\n from bika.lims.exportimport import instruments\n exims = []\n for exim_id in instruments.__all__:\n exim = instruments.getExim(exim_id)\n if export_only and not hasattr(exim, 'Export'):\n pass\n else:\n exims.append((exim_id, exim.title))\n exims.sort(lambda x, y: cmp(x[1].lower(), y[1].lower()))\n exims.insert(0, ('', t(_('None'))))\n return DisplayList(exims)", "def all_currency_codes():\n return [(a, CURRENCIES[a].name) for a in CURRENCIES]", "def dishlist_prices(n: list) -> list:\r\n return [dish.price for dish in n]", "def list_to_text(ingridients_list):\n to_return = \"List\\n\"\n for (ingridient, quantity) in ingridients_list:\n to_return = f\"{to_return}{ingridient.name} {quantity}\\n\"\n return to_return", "def __read_instrumentslist(self):\n available_instruments = []\n with open(\"instrumentslist.txt\", \"r\") as file:\n for line in file:\n splited = line.split(\" \")\n name = \"\"\n for j in splited[1:]:\n name += j\n available_instruments.append(name)\n return available_instruments", "def _collect_quantizers_descriptions(self) -> List[QuantizerDescription]:\n # `True` for weight quantizer, `False` otherwise.\n quantizers = chain(\n map(lambda x: (True, x), self._weight_quantizers.values()),\n map(lambda x: (False, x), self._non_weight_quantizers.values()),\n )\n\n quantizers_descriptions = []\n for is_weight_quantizer, q in quantizers:\n is_symmetric = isinstance(q, SymmetricQuantizer)\n\n quantizers_descriptions.append(\n QuantizerDescription(\n q.num_bits, q.per_channel, q.signed, is_symmetric, is_weight_quantizer, q.is_enabled_quantization()\n )\n )\n\n return quantizers_descriptions", "def drug_names_on_drug_list(drug_list):\n return [dl[\"Drug (brand name)\"] for dl in drug_list]", "def load_description():\n with open('description.txt') as description:\n return [line.strip() for line in description]", "def get_products(self):\n return [item.code for item in self._products]", "def getAllFirstDescWords(itemList):\r\n itemList = list(set(itemList)) # make itemList unique\r\n descWords = []\r\n for item in itemList:\r\n descWords.append(worldItems[item][DESCWORDS][0])\r\n return list(set(descWords))", "def get_book_titles(self, lib_db):\n titles = []\n conn = sqlite3.connect(lib_db)\n c = conn.cursor()\n for row in c.execute(\"SELECT ZTITLE FROM ZBKLIBRARYASSET WHERE ZTITLE <> '' AND ZTITLE <> 'none'\"):\n titles.append(row[0])\n conn.close()\n return titles", "def get_packages(module, repo_mgr, list_type, package):\n rc_code, out, err = module.run_command(\"/usr/bin/%s -q list %s %s\"\n % (repo_mgr, list_type, package), environ_update=ENV_LOCALE)\n if rc_code is 0:\n return out.splitlines()\n else:\n if rc_code == 1 and str(err) == 'Error: No matching Packages to list\\n':\n return out.splitlines()\n else:\n module.fail_json(msg=\"Unable to collect \" + repo_mgr + \" list \" + list_type + \" : \" + str(err) + \" - \" + str(out))", "def print_items():\n for items in inventory:\n print(f\"- {items.upper()}\")", "def license_codes(self) -> Sequence[str]:\n return pulumi.get(self, \"license_codes\")", "def build_dividend_lists(portfolio_dict):\n # ETF dividend list\n dow_dividends = lookup_dividends(yf.Ticker(\"DIA\")) \n sp500_dividends = lookup_dividends(yf.Ticker(\"SPY\")) \n nasdaq_dividends = lookup_dividends(yf.Ticker(\"QQQ\")) \n totalmarket_dividends = lookup_dividends(yf.Ticker(\"VTI\")) \n \n # Portfolio dividends\n portfolio_dividend_dict = {}\n for key in portfolio_dict:\n portfolio_dividend_dict[key] = lookup_dividends(yf.Ticker(key))\n \n return (dow_dividends, sp500_dividends, nasdaq_dividends, totalmarket_dividends, portfolio_dividend_dict)", "def get_descr_short(self):\n desc_text = []\n stack = [(self._desc, 0)]\n while stack:\n dl, di = stack.pop()\n while di < len(dl):\n if dl[di] == 0:\n di += 1\n elif dl[di] > 0 and dl[di] < 100000:\n desc_text.append(\"%06d\" % dl[di])\n elif dl[di] >= 100000 and dl[di] < 200000:\n desc_text.append(\"%06d LOOP\" % dl[di])\n elif dl[di] >= 200000 and dl[di] < 300000:\n desc_text.append(\"%06d OPER\" % dl[di])\n elif dl[di] >= 300000 and dl[di] < 400000:\n desc_text.append(\"%06d SEQ\" % dl[di])\n di += 1\n return desc_text", "def getNewCodeList(self):\n if self.modification == 'none':\n new_code = [(x, 'white') for x in self.body.splitlines()]\n elif self.modification == 'change':\n new_code = [self._getNewCodeList(x) for x in self.body.splitlines() \\\n if self._getNewCodeList(x)[0]]\n # we want old_code_list and new_code_list to have the same length\n if(self.new_code_length < self.old_code_length):\n filling = [(None, self.color)] * (self.old_code_length - \\\n self.new_code_length)\n new_code.extend(filling)\n else: # deletion or addition\n new_code = [self._getNewCodeList(x) for x in self.body.splitlines()]\n return new_code", "def list_item(self, text):\n #return [[MdStyleInstructionList(), *text]]\n return [[MdStyleInstructionListItem()] + text]", "def getFormatsFromDescr(descr):\n i = getIter(descr)\n if not i:\n return\n\n try:\n item = i.next()\n while item:\n item1 = item[1]\n if isinstance(item1, str):\n yield normalize_format(item1)\n else:\n l = []\n for j in getFormatsFromDescr(item1):\n l.append(j)\n yield l\n item = i.next()\n except StopIteration:\n pass", "def get_todo_list():\n\n # assume that a \"h264\" encoded file is complete\n return models.LibraryItem.objects.filter(h264=False)", "def get_description(self):\n return COMPONENT_LIST[self.index][1]", "def _create_pull_requests_descriptions(self, pull_requests: list) -> list:\n descriptions = []\n for pull_request in pull_requests:\n description = deepcopy(self.blocks['description'])\n reviewers = map(\n lambda name: self._get_user_mention(name),\n pull_request['reviewers'],\n )\n description['text']['text'] = ' '.join(reviewers)\n description['accessory']['url'] = pull_request['url']\n descriptions.append(description)\n return descriptions", "def GetAllDifferentDescriptionOfCost():\n\n logs.logger.debug(\n \"Start to get back all different description of \"\n \"Cost objects from database.\")\n try:\n ListOfAllDifferentDescriptionOfCost = []\n searchedCostsItems = GetAllDescriptionOfCost()\n for item in searchedCostsItems:\n if item not in ListOfAllDifferentDescriptionOfCost:\n ListOfAllDifferentDescriptionOfCost.append(item)\n logs.logger.info(\n \"Start to get back all different description of \"\n \"Cost objects from database.\")\n return ListOfAllDifferentDescriptionOfCost\n except Exception as e:\n logs.logger.error(e, exc_info=True)", "def pull_list():\n URL = 'https://salsa.debian.org/security-tracker-team/security-tracker/raw/master/data/CVE/list'\n file = urllib.request.urlopen(URL).readlines()\n generic = [line.strip().decode() for line in file]\n return generic", "def list_tick_dates(self, stock_code):\n\n conn = self.db_engine.connect()\n try:\n get_tick_dates_sql = \"\"\"\n SELECT DISTINCT \"update_date\" FROM \"{0}\".\"{1}\"\n WHERE end_update_time IS NOT NULL AND stock_code='{2}'\n \"\"\".format(Schemas.SCHEMA_META, Tables.TABLE_TICK_UPDATE_LOGS, stock_code)\n\n res = pd.read_sql(get_tick_dates_sql, conn)['update_date'].tolist()\n res.sort()\n return Error.SUCCESS, res\n except Exception:\n self.logger.log_error(traceback.format_exc())\n return Error.ERROR_DB_EXECUTION_FAILED, None\n finally:\n conn.close()", "def _get_package_items(self):\r\n mask = \"mask[description,capacity,prices.id,categories[name,id]]\"\r\n package = self.client['Product_Package']\r\n return package.getItems(id=46, mask=mask)", "def __init__(self):\n # note: We could have implemented the list as a dictionary, with\n # the barcode as the key, however if the barcode for the item\n # changes we might have problems.\n self.stocklist = [] # a list of stock items", "def obtain_stock_names():\n\n url = 'https://en.wikipedia.org/wiki/List_of_S%26P_400_companies'\n stock_names = []\n response = requests.get(url, timeout=5)\n content = BeautifulSoup(response.content, \"html.parser\")\n\n # We get stock_names from the web page\n for stock in content.findAll('a', attrs={\"class\": \"external text\"}):\n if(len(stock.text)<=5):\n stock_names.append(stock.text)\n\n # We persist the Stock Names\n save_dir = Path(__file__).parent.parent\n filename = (save_dir / \"../data/stock_names.joblib\").resolve()\n PersistenceAPI.persist_stock_data(stock_names, filename)\n\n return stock_names", "def read_stock_list():\n print(\"Reading list of stocks.\")\n stocks = {}\n with open(STOCKS_FILE) as csvfile:\n reader = csv.DictReader(csvfile)\n for row in reader:\n stocks[row['Symbol']] = (row['Name'], row['Sector'])\n return stocks", "def list(self, body, ordered=True):\n return [[MdStyleInstructionListStart(ordered)]] + body + [[MdStyleInstructionListEnd()]]", "def build_item_list(client, word=''):\n\titem_group = client.search_metadata('collection_name:cooee')\n\titems = item_group.get_all()\n\titem_list_name = word + '_list'\n\titem_urls = []\n\tfor item in items:\n\t\tprimary_text = item.get_primary_text()\n\t\tif word in primary_text:\n\t\t\tprint item.url()\n\t\t\titem_urls.append(item.url())\n\t\t\t#client.add_to_item_list_by_name([item.url()], item_list_name)\n\n\tfor url in item_urls:\n\t\tclient.add_to_item_list_by_name(item_urls, item_list_name)", "def obtain_parse_snp500():\n\n # Stores the current time, for the created_at record\n now = datetime.datetime.utcnow()\n\n # Use finsymbols to download stock symbols, industry and S&P 500 data\n sp500=finsymbols.get_sp500_symbols()\n # Obtain the symbol information for each row in the S&P500 constituent table\n symbols = []\n for index, symbol in enumerate(sp500):\n sd = {'ticker': sp500[index]['symbol'],\n 'name': sp500[index]['company'],\n 'sector': sp500[index]['industry']}\n # Create a tuple (for the DB format) and append to the grand list\n symbols.append( (sd['ticker'], 'stock', sd['name'], \n sd['sector'], 'USD', now, now) )\n \n return symbols", "def program_list():\n items = []\n\n soup = abcradionational.get_soup(URL + \"/podcasts/program\")\n \n program_heading = abcradionational.get_podcast_heading(soup)\n\n for program in program_heading:\n items.append({\n 'label': program['title'],\n 'path': plugin.url_for('program_item', url=program['url']),\n })\n\n return items", "def show_item_list():\n # 3 items per line\n line = []\n linecounter = 0\n item_string = \"\"\n counter = 0\n text_spacer = 20\n clear_messages(0)\n\n for i in range(0, len(ITEMS)):\n space = text_spacer - len(ITEMS[i])\n item_string = item_string + ITEMS[i] + (' ' * space)\n counter += 1\n if counter == 3:\n line.append(item_string)\n linecounter += 1\n item_string = \"\"\n counter = 0\n if counter < 3:\n line.append(item_string)\n\n for i in range(0, linecounter + 1):\n printmessage(line[i], i + 1, MAGENTA, 0)\n clear_messages(3)", "def get_weather_description(weather):\n for num_moods in range(0, 10):\n description = weather['list'][num_moods]['weather'][0]['description']\n weather_description_data.append(description)\n return weather_description_data", "def get_deck_as_str_list(self):\n\n ls = []\n for card in self.deck:\n ls.append(card.get_card())\n return ls", "def alfred_items_for_value(value):\n index = 0\n results = []\n\n config_list = [\n ('t2s.json', u'繁體到簡體', 'SimplifiedChinese.png'),\n ('s2t.json', u'簡體到繁體', 'TraditionalChinese.png'),\n ('s2tw.json', u'簡體到臺灣正體', 'TW_taiwan.png'),\n ('tw2s.json', u'臺灣正體到簡體', 'CN_china.png'),\n ('s2hk.json', u'簡體到香港繁體', 'HK_hongKong.png'),\n ('hk2s.json', u'香港繁體(香港小學學習字詞表標準)到簡體', 'CN_china.png'),\n ('tw2sp.json', u'繁體(臺灣正體標準)到簡體並轉換爲中國大陸常用詞彙', 'CN_china.png'),\n ('s2twp.json', u'簡體到繁體(臺灣正體標準)並轉換爲臺灣常用詞彙', 'TW_taiwan.png'),\n ]\n for config_file, description, icon in config_list:\n converter = opencc.OpenCC(\n config=config_file, opencc_path='/usr/local/bin/opencc')\n item_value = converter.convert(value)\n results.append(alfred.Item(\n title=item_value,\n subtitle=description,\n attributes={\n 'uid': alfred.uid(index),\n 'arg': item_value,\n },\n icon=icon,\n ))\n index += 1\n\n return results", "def get_raw(self):\n \n out_items = []\n for order in self.order_lst:\n out_items += [i.get_metadata() for i in order.get_items()]\n \n return out_items", "def show_list(self, desc, lst, writeln):\n if not lst:\n return\n val = ', '.join([list_escape(v) for v in lst])\n writeln(\"%s: %s\" % (desc, val))", "def lcode(self):\n###############################################################################\n lcode = []\n for M in list(self.estimates.values()):\n if (M.code not in lcode):lcode.append(M.code)\n return(lcode)", "def read_publisher_qualified_isocode(self):\n self.CATEGORIES = kpi_from_db_config.CATEGORIES\n self.SELECTED_ISOCODE = kpi_from_db_config.SELECTED_ISOCODE\n self.ID_PUBLISHER_QUALIFIED_ISOCODE = kpi_from_db_config.ID_PUBLISHER_QUALIFIED_ISOCODE\n\n size_m = len(self.CATEGORIES)\n size_n = len(self.SELECTED_ISOCODE)\n list_id = self.ID_PUBLISHER_QUALIFIED_ISOCODE\n\n self.cursor.execute('''\n SELECT value\n FROM public.kpi_report\n WHERE id = %s\n ORDER BY created_at DESC\n LIMIT %s\n ''', [list_id, size_m*size_n])\n\n list_result = [[0]*size_n for _ in range(size_m)]\n\n count_result = size_m*size_n - 1\n for doc in self.cursor:\n list_result[count_result%size_m][count_result/size_m] = doc[0]\n count_result -= 1\n\n for i in range(size_m):\n list_result[i][0:0] = [sum([list_result[i][j] for j in range(size_n)])]\n\n list_result[0:0] = [[0]*(size_n + 1)]\n for i in range(size_n + 1):\n list_result[0][i] = sum([list_result[j][i] for j in range(size_m + 1)])\n\n categories = ['--All--'] + self.CATEGORIES\n\n return list_result, categories", "def snippetList(requeset, format = None):", "def _list_of_availability_strings():\n names = [availability.name for availability in Availability]\n return names", "def CARD_SUITS() -> tuple:\n return \"Diamonds\", \"Hearts\", \"Clubs\", \"Spades\"", "def get_list_of_str2(self):\n pass", "def getIngredients():\n ingredients = ['Whiskey', 'Tequila', 'Vodka', 'Blue Curacao', 'Orange Juice',\n 'Pineapple Juice', 'Cranberry Juice', 'Sour Mix']\n return ingredients", "def listRestock(self):\n # TODO return a list of items that need restocking\n # hint: Need to loop through the stocklist\n # No. 3\n flag = 0\n mystr = \"\"\n for item in self.stocklist:\n if StockItem.needRestock(item):\n mystr = mystr + StockItem.getName(item) + '\\n'\n flag = flag + 1 # item that needs restocking found\n\n # No. 4\n if flag == 0:\n return \"All items stocked\"\n else:\n return mystr", "def scrape_descriptions_sync():\n # прочитать Symbols, for symbol in tqdm(symbols)\n # исользовать urllib get запросы на yahoo и полученное записывать в файл с помощью\n # добавить tqdm(symbols)\n\n myheader = {\n 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.142 Safari/537.36'\n }\n\n symbols = read_symbols()\n YAHOO_HTMLS.mkdir(parents=True, exist_ok=True)\n\n\n for symbol in tqdm(symbols):\n #Example myurl = \"https://finance.yahoo.com/quote/AAPL/profile?p=AAPL\"\n myurl = f'https://finance.yahoo.com/quote/{symbol}/profile?p={symbol}'\n\n try:\n req = request.Request(myurl, headers=myheader)\n response = request.urlopen(req)\n text = response.read()\n response.close()\n\n except Exception:\n print(\"Error occuried during web request!!\")\n print(sys.exc_info()[1])\n\n f = open(YAHOO_HTMLS / f'{symbol}.html', 'wb')\n f.write(text)\n f.close()", "def nameList(self):\r\n return [self.name.lower(), self.code] + self._otherNames", "def find_item_codes(transaction):\n t = transaction\n item_codes = []\n if t['transaction_type'] in ('SALE', 'REFUND'):\n # Search using line item IDs and order_id\n for oli in (t['order_line_items'] or []):\n li_id = oli['line_item_id']\n item_codes.append(\n get_item_code_for_order(t['order_id'], order_line_item_id=li_id)\n )\n else:\n # Search for ITEM reference\n for ref in (transaction['references'] or []):\n if ref['reference_type'] == 'ITEM_ID':\n item_codes.append(\n get_item_code_for_item_id(ref['reference_id'])\n )\n\n return item_codes" ]
[ "0.65685076", "0.566973", "0.5336407", "0.5295758", "0.51715976", "0.51711005", "0.5148782", "0.5139008", "0.508993", "0.5061929", "0.5032067", "0.49966037", "0.49956048", "0.4957243", "0.4957215", "0.4956653", "0.49447757", "0.49301252", "0.4907753", "0.49050713", "0.4902797", "0.48980734", "0.4886963", "0.4886797", "0.48835567", "0.48771304", "0.48710713", "0.48617464", "0.4855102", "0.4849284", "0.48486084", "0.4825037", "0.4807281", "0.47881213", "0.4759703", "0.4755037", "0.4743091", "0.47387505", "0.4730555", "0.47223693", "0.47157344", "0.47130832", "0.4687379", "0.4683173", "0.4675325", "0.46687117", "0.46561867", "0.46542323", "0.4647789", "0.46424294", "0.463977", "0.46371582", "0.46366236", "0.4635237", "0.46331847", "0.463248", "0.46308693", "0.4621067", "0.46051142", "0.46022415", "0.4601089", "0.4596357", "0.4585938", "0.45784062", "0.45743236", "0.45722234", "0.4572153", "0.45650458", "0.4564572", "0.45640412", "0.45627335", "0.45621595", "0.4560962", "0.45424607", "0.45402586", "0.4529362", "0.45192727", "0.45182914", "0.45170325", "0.45073357", "0.45057997", "0.45057723", "0.45028102", "0.4501431", "0.4500666", "0.44983116", "0.44970977", "0.44960308", "0.44957414", "0.44900882", "0.44895336", "0.44875553", "0.44870132", "0.4485799", "0.44831684", "0.44823903", "0.4478319", "0.44778916", "0.4471342", "0.44671074" ]
0.7398652
0
Creates new dataframe with invoices lines issued from given parameters. Once done, the new dataframe is aggregated with original one.
def create_customer_df_invoice_line(self, customerID, list_stockCode\ , list_quantity, invoiceDate): dict_invoice = dict() dict_invoice['Quantity'] = list_quantity dict_invoice['StockCode'] = list_stockCode #------------------------------------------------------------------------ # Build invoiceDate from local current time #------------------------------------------------------------------------ if invoiceDate is None: time_struct = time.localtime() invoiceDate = str(time_struct.tm_year)+'-'+str(time_struct.tm_mon)\ +'-'+str(time_struct.tm_mday) invoiceDate +=' ' invoiceDate +=str(time_struct.tm_hour)+':'+str(time_struct.tm_min)\ +':'+str(time_struct.tm_sec) invoiceDate = pd.Timestamp(invoiceDate) else: pass #------------------------------------------------------------------------ # Lists initialization #------------------------------------------------------------------------ list_customerID = list() list_invoiceNo = list() list_invoiceDate = list() list_invoice_line_index = list() #------------------------------------------------------------------------ # Increase Invoice number #------------------------------------------------------------------------ invoiceNo = max(self._df_invoice_original.InvoiceNo) invoiceNo += 1 #------------------------------------------------------------------------ # Get latest invoice line index value #------------------------------------------------------------------------ invoice_line_index = max(self._df_invoice_original.index) #------------------------------------------------------------------------ # Build lists for CustomerID, InvoiceNo, InvoiceDate # A list of incremented indexes is built for new rows. #------------------------------------------------------------------------ for quantity in list_quantity: list_customerID.append(customerID) list_invoiceNo.append(invoiceNo) list_invoiceDate.append(invoiceDate) invoice_line_index += 1 list_invoice_line_index.append(invoice_line_index) dict_invoice['CustomerID'] = list_customerID dict_invoice['InvoiceNo'] = list_invoiceNo dict_invoice['InvoiceDate'] = list_invoiceDate #------------------------------------------------------------------------ # Get description list from list of stock codes. #------------------------------------------------------------------------ list_description = self.getDescriptionList(list_stockCode) dict_invoice['Description'] = list_description #------------------------------------------------------------------------ # Get unit price list from list of stock codes. #------------------------------------------------------------------------ list_unitPrice = self.getUnitPriceList(list_stockCode) dict_invoice['UnitPrice'] = list_unitPrice #------------------------------------------------------------------------ # Dataframe with new invoices lines is created. #------------------------------------------------------------------------ df_invoice_line \ = pd.DataFrame(dict_invoice, columns=dict_invoice.keys()\ , index=list_invoice_line_index) return df_invoice_line
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def merge_purchase_invoice(self):\r\n active_id = self.env['purchase.order'].browse(self.env['purchase.order']._context.get('active_ids'))\r\n journal_id = self.env['account.journal'].search([('type', '=', 'purchase')]) \r\n active_id_count = 0\r\n active_count = 0\r\n exist_vendor = []; invoice = [];exist_vendors = [];ctx = ();invoice_id = []\r\n for rec in active_id : \r\n po_reference = self.env['account.invoice'].search([('origin', 'like', rec.name)])\r\n active_count = len(active_id)\r\n if rec.picking_count >= 1 and rec.picking_count != rec.invoice_count:\r\n len_name = [] \r\n for inv in po_reference: \r\n len_name = inv.origin.split(\":\") \r\n if rec.name in len_name:\r\n if po_reference.state == 'draft':\r\n for record in po_reference.invoice_line_ids:\r\n print (record.line_id)\r\n for res in rec.order_line:\r\n if res.id == record.line_id: \r\n record.write({'quantity':res.qty_received})\r\n res.write({'qty_invoiced':record.quantity})\r\n \r\n else:\r\n \r\n po_list = [];line_values = {};lines = {};purchase = []\r\n if rec.state in 'purchase' and rec.invoice_status in 'to invoice':\r\n purchase.append(rec.id)\r\n active_id_count = len(purchase)\r\n if rec.partner_id.id in exist_vendor:\r\n for inv in invoice:\r\n if inv['partner_id'] == rec.partner_id.id:\r\n for recc in rec.order_line:\r\n if rec.picking_count > 1 and rec.invoice_count >= 1:\r\n qty_received = recc.qty_received - recc.qty_invoiced \r\n else:\r\n qty_received = recc.qty_received \r\n line_values = (0, 0, {'product_id': recc.product_id.id,\r\n 'quantity': qty_received ,\r\n 'price_unit': recc.price_unit,\r\n 'invoice_line_tax_ids': [(6, 0, recc.taxes_id and recc.taxes_id.ids) or False] ,\r\n 'price_subtotal': recc.price_subtotal,\r\n 'product_uom': recc.product_uom.id,\r\n 'name': recc.name,\r\n 'account_id': journal_id.default_debit_account_id.id ,\r\n 'line_id':recc.id\r\n }) \r\n inv['invoice_line_ids'].append(line_values)\r\n inv['origin'] = inv['origin'] + ':' + rec.name\r\n if rec.partner_id.id not in exist_vendor:\r\n exist_vendors.append(rec.partner_id.id) \r\n else: \r\n for recc in rec.order_line:\r\n if rec.picking_count > 1 and rec.invoice_count >= 1:\r\n qty_received = recc.qty_received - recc.qty_invoiced \r\n else:\r\n qty_received = recc.qty_received\r\n line_values = (0, 0, {'product_id': recc.product_id.id,\r\n 'quantity': qty_received,\r\n 'price_unit': recc.price_unit,\r\n 'invoice_line_tax_ids': [(6, 0, recc.taxes_id and recc.taxes_id.ids)or False],\r\n 'price_subtotal': recc.price_subtotal,\r\n 'product_uom': recc.product_uom.id,\r\n 'name': recc.name,\r\n 'account_id': journal_id.default_debit_account_id.id,\r\n 'line_id':recc.id\r\n }) \r\n print (rec.id)\r\n po_list.append(line_values) \r\n invoice.append({'origin':rec.name, 'partner_id': rec.partner_id.id, 'invoice_line_ids':po_list, 'account_id': rec.partner_id.property_account_payable_id.id, 'type': 'in_invoice', 'journal_id':journal_id.id,'date_invoice':datetime.today()}) \r\n if rec.partner_id.id not in exist_vendor:\r\n exist_vendor.append(rec.partner_id.id) \r\n \r\n else:\r\n po_list = [];line_values = {};lines = {};purchase = []\r\n if rec.state in 'purchase' and rec.invoice_status in 'to invoice':\r\n purchase.append(rec.id)\r\n active_id_count = len(purchase)\r\n if rec.partner_id.id in exist_vendor:\r\n for inv in invoice:\r\n if inv['partner_id'] == rec.partner_id.id:\r\n for recc in rec.order_line:\r\n if rec.picking_count > 1 and rec.invoice_count >= 1:\r\n qty_received = recc.qty_received - recc.qty_invoiced \r\n else:\r\n qty_received = recc.qty_received\r\n line_values = (0, 0, {'product_id': recc.product_id.id,\r\n 'quantity': qty_received ,\r\n 'price_unit': recc.price_unit,\r\n 'invoice_line_tax_ids': [(6, 0, recc.taxes_id and recc.taxes_id.ids) or False] ,\r\n 'price_subtotal': recc.price_subtotal,\r\n 'product_uom': recc.product_uom.id,\r\n 'name': recc.name,\r\n 'account_id': journal_id.default_debit_account_id.id ,\r\n 'line_id':recc.id\r\n }) \r\n inv['invoice_line_ids'].append(line_values)\r\n inv['origin'] = inv['origin'] + ':' + rec.name\r\n if rec.partner_id.id not in exist_vendor:\r\n exist_vendors.append(rec.partner_id.id) \r\n else: \r\n for recc in rec.order_line:\r\n if rec.picking_count > 1 and rec.invoice_count >= 1:\r\n qty_received = recc.qty_received - recc.qty_invoiced \r\n else:\r\n qty_received = recc.qty_received\r\n line_values = (0, 0, {'product_id': recc.product_id.id,\r\n 'quantity': qty_received,\r\n 'price_unit': recc.price_unit,\r\n 'invoice_line_tax_ids': [(6, 0, recc.taxes_id and recc.taxes_id.ids)or False],\r\n 'price_subtotal': recc.price_subtotal,\r\n 'product_uom': recc.product_uom.id,\r\n 'name': recc.name,\r\n 'account_id': journal_id.default_debit_account_id.id,\r\n 'line_id':recc.id\r\n }) \r\n print (rec.id)\r\n po_list.append(line_values) \r\n invoice.append({'origin':rec.name, 'partner_id': rec.partner_id.id, 'invoice_line_ids':po_list, 'account_id': rec.partner_id.property_account_payable_id.id, 'type': 'in_invoice', 'journal_id':journal_id.id,'date_invoice':date.today()}) \r\n if rec.partner_id.id not in exist_vendor:\r\n exist_vendor.append(rec.partner_id.id) \r\n \r\n invoices = []\r\n invoice_counts = 0\r\n for record in invoice:\r\n invoice_id = self.env['account.invoice'].create(record)\r\n invoices.append(invoice_id.id)\r\n invoice_counts = len(invoices)\r\n if active_id_count == 1:\r\n if invoice_counts == 1:\r\n form_view = self.env.ref('purchase.view_invoice_supplier_purchase_form').id\r\n tree_view = self.env.ref('account.invoice_tree').id \r\n return{\r\n 'name': _('Invoice'),\r\n 'type':'ir.actions.act_window',\r\n 'view_type':'form',\r\n 'view_mode':'form,tree',\r\n 'res_model':'account.invoice',\r\n 'res_id':invoices[0],\r\n 'views_id':False,\r\n 'views':[(form_view , 'form'), (tree_view , 'tree')],\r\n 'domain':[('id', 'in', invoices)],\r\n 'target': 'current',\r\n } \r\n else: \r\n form_view = self.env.ref('account.invoice_supplier_form').id\r\n tree_view = self.env.ref('account.invoice_supplier_tree').id \r\n return{\r\n 'name': _('Invoice'),\r\n 'type':'ir.actions.act_window',\r\n 'view_type':'form',\r\n 'view_mode':'form,tree',\r\n 'res_model':'account.invoice',\r\n 'views_id':True,\r\n 'views':[(tree_view , 'tree'), (form_view , 'form')],\r\n 'domain':[('id', 'in', invoices)],\r\n 'target': 'current',\r\n }", "def _prepare_invoice_lines(self, exchange_line, order_line):\n invoice_type = {\n 'sale.order.line': {\n 'higher': 'out_invoice', 'lower': 'out_refund',\n 'type': 'sale', 'field': 'exchange_sale_line_id'\n },\n 'purchase.order.line': {\n 'higher': 'in_invoice', 'lower': 'in_refund',\n 'type': 'purchase', 'field': 'exchange_purchase_line_id'\n },\n }\n product = exchange_line.exchange_product_id or exchange_line.product_id\n data = {\n 'invoice_type': False,\n 'values': {\n 'product_id': product.id,\n 'quantity': exchange_line.quantity,\n 'name': 'Exchange for [%s]' % exchange_line.product_id.display_name,\n }\n }\n if exchange_line.exchange_product_id or \\\n exchange_line.price_subtotal > order_line.price_subtotal:\n data['invoice_type'] = invoice_type[order_line._name]['higher']\n elif exchange_line.price_subtotal < order_line.price_subtotal:\n data['invoice_type'] = invoice_type[order_line._name]['lower']\n else:\n return {}\n data[invoice_type[order_line._name]['type']] = order_line.order_id\n data['values'][invoice_type[order_line._name]['field']] = order_line.id\n data['values']['price_unit'] = exchange_line.price_unit\n # TODO i think we should take the different between prices NOT the all price\n # abs(exchange_line.price_unit - order_line.price_unit)\n return data", "def make_invoices(self):\n for invoice in self.policy.invoices:\n db.session.delete(invoice)\n db.session.commit()\n\n billing_schedules = {'Annual': None, 'Semi-Annual': 3, 'Quarterly': 4, 'Monthly': 12}\n\n invoices = []\n first_invoice = Invoice(self.policy.id,\n self.policy.effective_date, # bill_date\n self.policy.effective_date + relativedelta(months=1), # due\n self.policy.effective_date + relativedelta(months=1, days=14), # cancel\n self.policy.annual_premium)\n invoices.append(first_invoice)\n\n if self.policy.billing_schedule == \"Annual\":\n pass\n elif self.policy.billing_schedule == \"Two-Pay\":\n first_invoice.amount_due = first_invoice.amount_due / billing_schedules.get(self.policy.billing_schedule)\n for i in range(1, billing_schedules.get(self.policy.billing_schedule)):\n months_after_eff_date = i*6\n bill_date = self.policy.effective_date + relativedelta(months=months_after_eff_date)\n invoice = Invoice(self.policy.id,\n bill_date,\n bill_date + relativedelta(months=1),\n bill_date + relativedelta(months=1, days=14),\n self.policy.annual_premium / billing_schedules.get(self.policy.billing_schedule))\n invoices.append(invoice)\n elif self.policy.billing_schedule == \"Quarterly\":\n first_invoice.amount_due = first_invoice.amount_due / billing_schedules.get(self.policy.billing_schedule)\n for i in range(1, billing_schedules.get(self.policy.billing_schedule)):\n months_after_eff_date = i*3\n bill_date = self.policy.effective_date + relativedelta(months=months_after_eff_date)\n invoice = Invoice(self.policy.id,\n bill_date,\n bill_date + relativedelta(months=1),\n bill_date + relativedelta(months=1, days=14),\n self.policy.annual_premium / billing_schedules.get(self.policy.billing_schedule))\n invoices.append(invoice)\n elif self.policy.billing_schedule == \"Monthly\":\n first_invoice.amount_due = first_invoice.amount_due / billing_schedules.get(self.policy.billing_schedule)\n for i in range(1, billing_schedules.get(self.policy.billing_schedule)):\n months_after_eff_date = i\n bill_date = self.policy.effective_date + relativedelta(months=months_after_eff_date)\n invoice = Invoice(self.policy.id,\n bill_date,\n bill_date + relativedelta(months=1),\n bill_date + relativedelta(months=1, days=14),\n self.policy.annual_premium / billing_schedules.get(self.policy.billing_schedule))\n invoices.append(invoice)\n else:\n print \"You have chosen a bad billing schedule.\"\n\n logger.info(str(len(invoices)) + \" invoices generated for policy %s\" % self.policy.id)\n\n for invoice in invoices:\n db.session.add(invoice)\n db.session.commit()", "def _create_invoice(self):\n self.ensure_one()\n partner = self.member_id.partner_id\n invoice = self.env['account.invoice'].create({\n 'partner_id': partner.id,\n 'account_id': partner.property_account_receivable_id.id,\n 'fiscal_position_id': partner.property_account_position_id.id\n })\n for line in self.line_ids:\n product = line.activity_id.product_id\n # Handling of invoice lines : needs cache record for onchange, then\n # real writing...\n invoice_line = self.env['account.invoice.line'].new({\n 'product_id': product.id,\n 'invoice_id': invoice.id\n })\n invoice_line._onchange_product_id()\n line_values = dict(invoice_line._cache)\n line_values['price_unit'] = line.price\n invoice_line = self.env['account.invoice.line'].create(line_values)\n invoice.compute_taxes()\n line.registration_id.invoice_line_id = invoice_line.id\n return invoice", "def data_process_deprecated(self, CustomerID, InvoiceDate, InvoiceNo, Description, Quantity\\\n , UnitPrice ):\n dict_invoice = {'InvoiceDate':InvoiceDate, 'Description':Description\\\n , 'Quantity':Quantity, 'UnitPrice':UnitPrice}\n dict_invoice['CustomerID'] = CustomerID\n dict_invoice['InvoiceNo'] = InvoiceNo\n df_invoice_line \\\n = pd.DataFrame(dict_invoice, columns=dict_invoice.keys(), index=[0])\n \n self.data_transform(df_invoice_line)\n\n #self.feature_rfm_encode()\n\n self.feature_scale()\n\n self.list_feature_drop()\n\n self.feature_description_nlp()\n return", "def invoice_items(self,org_id=None,query={}):\n if org_id is None:\n org_id = self.org_id\n query_end_date = datetime.strptime(query['endDate'],'%Y-%m-%dT%H:%M:%SZ')\n # Given a 'query_end_date' to find the invoice containing the\n # line items for that date we need to find the invoice which \n # has 'endDate' equal to the end of the month of the `query_end_date`\n query_first_next_month = query_end_date + relativedelta(months=+1) - relativedelta(days=(query_end_date.day-1))\n target_invoices = []\n invoices = self.invoices(org_id)\n if self.verbose:\n print('Searching invoices org_id={}'.format(org_id))\n print('query={} query_end_date={}'.format(query,query_end_date))\n print('Result keys: {}'.format( invoices['content'].keys() ))\n print('Total result count: {}'.format( invoices['content']['totalCount'] ))\n for invoice in invoices['content']['results']:\n #pprint.pprint(invoice)\n end_date = datetime.strptime(invoice['endDate'],'%Y-%m-%dT%H:%M:%SZ')\n if self.verbose: \n print('invoice({})[\\'endDate\\']={} end_date={}'.format(invoice['id'],invoice['endDate'],end_date))\n if end_date == query_first_next_month:\n target_invoices.append(invoice)\n \n if self.verbose: \n print('Target invoices: {}'.format(target_invoices))\n \n\n target_line_items = []\n for invoice in target_invoices:\n invoice_details = self.invoices(org_id,invoice['id']) \n print('invoice_details: {}'.format(invoice_details))\n for item in invoice_details['content']['lineItems']:\n end_date = datetime.strptime(item['endDate'],'%Y-%m-%dT%H:%M:%SZ')\n if end_date == query_end_date:\n target_line_items.append(item)\n if self.verbose:\n print('target_line_items: {}'.format(target_line_items)) \n return target_line_items", "def _prepare_invoice(self, cr, uid, order, lines, context=None):\n if context is None:\n context = {}\n journal_ids = self.pool.get('account.journal').search(cr, uid,\n [('type', '=', 'sale'), ('company_id', '=', order.company_id.id)],\n limit=1)\n if not journal_ids:\n raise osv.except_osv(_('Error !'),\n _('There is no sales journal defined for this company: \"%s\" (id:%d)') % (order.company_id.name, order.company_id.id))\n\n invoice_vals = {\n 'name': order.client_order_ref or '',\n 'origin': order.name,\n 'type': 'out_invoice',\n 'reference': order.client_order_ref or order.name,\n 'account_id': order.partner_id.property_account_receivable.id,\n 'journal_id': order.partner_id.property_default_sale_invoice_journal.id,\n 'partner_id': order.partner_id.id,\n 'address_invoice_id': order.partner_invoice_id.id,\n #'address_contact_id': order.partner_order_id.id,\n 'invoice_line': [(6, 0, lines)],\n 'currency_id': order.pricelist_id.currency_id.id,\n 'comment': order.note,\n 'payment_term': order.payment_term and order.payment_term.id or False,\n 'fiscal_position': order.fiscal_position.id or order.partner_id.property_account_position.id,\n 'date_invoice': context.get('date_invoice', False),\n 'company_id': order.company_id.id,\n 'user_id': order.user_id and order.user_id.id or False\n }\n\n # Care for deprecated _inv_get() hook - FIXME: to be removed after 6.1\n invoice_vals.update(self._inv_get(cr, uid, order, context=context))\n\n return invoice_vals", "def get_dataframe() -> pandas.DataFrame:\n database_connection = processing.establish_connection(database_path)\n dataframe = database_connection.to_dataframe(['CustomerId', 'InvoiceDate', 'Total'], table_name)\n database_connection.close()\n dataframe = processing.get_invoice_date_fixed(dataframe)\n analyze_dataframe = dataframe.copy()\n total_sum_dataframe = processing.get_column_sum(analyze_dataframe)\n\n customer_count_dataframe = processing.drop_duplicates(analyze_dataframe)\n customer_count_dataframe = processing.get_column_count(customer_count_dataframe)\n return customer_count_dataframe.merge(total_sum_dataframe, how='inner', on='InvoiceDate')", "def _prepare_invoice(self, cr, uid, order, lines, context=None):\n if context is None:\n context = {}\n journal_id = self.pool['account.invoice'].default_get(cr, uid, ['journal_id'], context=context)['journal_id']\n if not journal_id:\n raise osv.except_osv(_('Error!'),\n _('Please define sales journal for this company: \"%s\" (id:%d).') % (order.company_id.name, order.company_id.id))\n invoice_vals = {\n 'name': order.client_order_ref or '',\n 'origin': order.name,\n 'type': 'out_invoice',\n 'reference': order.client_order_ref or order.name,\n 'account_id': order.partner_invoice_id.property_account_receivable.id,\n 'partner_id': order.partner_invoice_id.id,\n 'journal_id': journal_id,\n 'invoice_line': [(6, 0, lines)],\n 'currency_id': order.pricelist_id.currency_id.id,\n 'comment': order.note,\n 'payment_term': order.payment_term and order.payment_term.id or False,\n 'fiscal_position': order.fiscal_position.id or order.partner_invoice_id.property_account_position.id,\n 'date_invoice': context.get('date_invoice', False),\n 'company_id': order.company_id.id,\n 'user_id': order.user_id and order.user_id.id or False,\n 'section_id' : order.section_id.id,\n 'test_1' :order.test\n }\n\n # Care for deprecated _inv_get() hook - FIXME: to be removed after 6.1\n invoice_vals.update(self._inv_get(cr, uid, order, context=context))\n return invoice_vals", "def make_claim_df(claim_list, columns = ['Sl','Name of Bank','Name of Branch','A/C Number (15 digit)','A/C Title','Amount of Remittance in BDT','Date of A/C Credit','Remittance Received through BEFTN/RTGS','Name of Remittance Collecting/BEFTN Processing Bank','Date of Claim']):\n sl=[]\n nrbc_bank = []\n branch = []\n ac_no = []\n ac_title = []\n amount=[]\n date_account_credit=[]\n channel = []\n other_bank=[]\n claim_date=[]\n i=1\n for claim in claim_list:\n sl.append(i)\n i=i+1\n nrbc_bank.append(\"NRBC Bank Ltd.\")\n branch.append(claim.branch.name.upper())\n ac_no.append(claim.account_no)\n ac_title.append(claim.account_title)\n amount.append(claim.remittance_amount)\n date_account_credit.append(claim.date_account_credit)\n channel.append(claim.get_channel_display())\n other_bank.append(claim.collecting_bank.name)\n claim_date.append(claim.date_claim.date())\n dc = {\n 'SL':sl,\n 'Name of Bank':nrbc_bank,\n 'Name of Branch': branch,\n 'A/C Number': ac_no,\n 'A/C Title': ac_title,\n 'Amount of Remittance in BDT': amount,\n 'Date of A/C Credit': date_account_credit,\n 'Remittance Received Through BEFTN/RTGS': channel,\n 'Name of Remittance Processing Bank': other_bank,\n 'Date of Claim': claim_date\n }\n df = pd.DataFrame(dc)\n return df.sort_values(by=['Name of Remittance Processing Bank',])", "def order_process(self, customerID, list_stockCode, list_quantity\\\n , orderDate=None):\n\n segmentID = -1\n\n #-------------------------------------------------------------------------\n # A new customer is created and inserted into data-set.\n #-------------------------------------------------------------------------\n if customerID is None:\n customerID = int(self.createCustomerID())\n else:\n pass\n \n #-------------------------------------------------------------------------\n # A new dataframe with new invoice lines are created.\n #-------------------------------------------------------------------------\n df_invoice_line = self.create_customer_df_invoice_line(customerID\\\n , list_stockCode, list_quantity, orderDate)\n \n #-------------------------------------------------------------------------\n # Original dataframe is updated with customer invoices lines.\n #-------------------------------------------------------------------------\n print(\"order_process : shape before concat= \"+str(self._df_invoice_original.shape))\n self._df_invoice_original \\\n = pd.concat([self._df_invoice_original, df_invoice_line], axis=0)\n print(\"order_process : shape after concat= \"+str(self._df_invoice_original.shape))\n \n #-------------------------------------------------------------------------\n # All invoices lines (including new one) related to customer is retrieved \n # from original dataframe.\n #-------------------------------------------------------------------------\n df_invoice_line_customer \\\n = self.get_customer_history_df_invoice_line(customerID)\n\n #-------------------------------------------------------------------------\n # When calling get_customer_marketSegment(), df_invoice_line_customer is\n # concatened to the original dataframe.\n #------------------------------------------------------------------------- \n segmentID = self.get_customer_marketSegment(df_invoice_line_customer)\n \n return segmentID, customerID", "def create_invoice(self):\n for line in self:\n # if not line.account_id:\n # raise UserError(_('Please Add the incoming Account !!'))\n self.ensure_one()\n journal_id = self.env['account.journal'].search([\n ('type', '=', 'sale')], limit=1)\n inv_line_main = {\n 'name': line.description.name,\n 'price_unit': line.amount or 0.00,\n 'quantity': 1,\n 'discount': line.discount,\n 'account_id': line.description.property_account_income_id.id or line.description.categ_id.property_account_income_categ_id.id or False,\n }\n inv_values = {\n 'partner_id': line.patient_id.partner_id.id,\n 'patient_id': line.patient_id.id,\n 'dentist': line.dentist.id,\n 'move_type': 'out_invoice',\n 'invoice_date': datetime.now().strftime(DF) or False,\n 'journal_id': journal_id and journal_id.id or False,\n 'teeth_id': line.patient_id and line.patient_id.id or False,\n }\n acc_id = self.env['account.move'].create(inv_values)\n acc_id.write({'invoice_line_ids': [(0, 0, inv_line_main)]})\n\n self.write({'invc_id': acc_id.id, 'inv': True})\n context = dict(self._context or {})\n wiz_form_id = self.env['ir.model.data'].get_object_reference(\n 'account', 'view_move_form')[1]\n\n return {\n 'view_type': 'form',\n 'view_id': wiz_form_id,\n 'view_mode': 'form',\n 'res_model': 'account.move',\n 'res_id': self.invc_id.id,\n 'type': 'ir.actions.act_window',\n 'target': 'current',\n 'context': context,\n }", "def action_create_invoices(self, data):\n invoice_obj = self.env['account.invoice']\n values = {}\n for val in data:\n values.setdefault(val['invoice_type'], {\n 'order': val.get('sale', val.get('purchase')),\n 'values': []\n })\n values[val['invoice_type']]['values'].append((0, 0, val['values']))\n\n for inv_type, inv_data in values.items():\n invoice = invoice_obj.new(self._prepare_invoice(inv_type))\n invoice._onchange_partner_id()\n inv = invoice._convert_to_write({\n name: invoice[name] for name in invoice._cache\n })\n for _, _, line in inv_data['values']:\n line['account_id'] = inv['account_id']\n inv['invoice_line_ids'] = inv_data['values']\n new_invoice = invoice_obj.sudo().create(inv)\n new_invoice.action_invoice_open()\n inv_data['order'].write({\n 'exchange_invoice_ids': [(4, new_invoice.id)]\n })", "def _get_lines(self, cr, uid, ids, context=None):\n List=[]\n if ids:\n line = self.pool.get('payment.enrich.lines').browse(cr, uid, ids[0], context=context)\n \n record = line.enrich_id\n val = 0.0\n for line in record.enrich_lines:\n if line.state == 'done' :\n val += line.cost\n res = {\n 'paid_amount':val,\n 'residual_amount':record.amount - val,\n }\n record.write(res)\n return List", "def data_transform(self, df) :\n\n #-------------------------------------------------------------------------\n # Copy of given dataframe to be transformed\n #-------------------------------------------------------------------------\n self.df_invoice_line = df\n \n #-------------------------------------------------------------------------\n # Features issued from InvoiceDate are created\n #-------------------------------------------------------------------------\n if self.is_transform_timeFeature is True:\n self.strprint(\"\\n*** Time features transformation ***\")\n self.data_transform_timeFeature()\n\n #-------------------------------------------------------------------------\n # RFM is computed and encoded\n #-------------------------------------------------------------------------\n if self.is_transform_rfm is True:\n self.strprint(\"\\n*** RFM transformation ***\")\n self.data_transform_rfm()\n\n #-------------------------------------------------------------------------\n # NLP features issued from Description are created\n #-------------------------------------------------------------------------\n if self.is_transform_nlp is True:\n self.strprint(\"\\n*** NLP transformation ***\")\n self.data_transform_nlp()\n \n return self.df_invoice_line", "def create_invoices(self, cr, uid, ids, context=None):\n invoice_list = []\n po_obj = self.pool.get('purchase.order')\n inv_line_obj = self.pool.get('account.invoice.line')\n inv_obj = self.pool.get('account.invoice')\n addr_obj = self.pool.get('res.partner')\n journal_obj = self.pool.get('account.journal')\n if context is None:\n context = {}\n\n for purchase_adv_obj in self.browse(cr, uid, ids, context=context):\n for purchase_order in po_obj.browse(cr, uid, context.get('active_ids', []), context=context):\n inv_line_ids = []\n invoice_ids = []\n val = inv_line_obj.product_id_change(cr, uid, [], purchase_adv_obj.product_id.id,\n uom_id=False, partner_id=purchase_order.partner_id.id, fposition_id=purchase_order.fiscal_position.id)\n line_id = inv_line_obj.create(cr, uid, {\n 'name': val['value']['name'],\n 'account_id': val['value']['account_id'],\n 'price_unit': purchase_adv_obj.amount,\n 'quantity': purchase_adv_obj.qtty,\n 'discount': False,\n 'uos_id': val['value']['uos_id'],\n 'product_id': purchase_adv_obj.product_id.id,\n 'invoice_line_tax_id': [(6, 0, val['value']['invoice_line_tax_id'])],\n })\n inv_line_ids.append(line_id)\n addr = addr_obj.address_get(cr, uid, [purchase_order.partner_id.id], ['invoice'])\n journal_ids = journal_obj.search(cr, uid, [('type', '=', 'purchase')])\n context.update({'type':'in_invoice','journal_type':'purchase'})\n inv_vals = {\n 'name': purchase_order.partner_ref or purchase_order.name,\n 'origin': purchase_order.name,\n 'type': 'in_invoice',\n 'reference': False,\n 'account_id': purchase_order.partner_id.property_account_payable.id,\n 'journal_id':journal_ids and journal_ids[0] or False,\n 'partner_id': purchase_order.partner_id.id,\n 'address_invoice_id': addr['invoice'],\n 'invoice_line': [(6, 0, inv_line_ids)],\n 'currency_id': purchase_order.pricelist_id.currency_id.id,\n 'comment': '',\n 'payment_term': purchase_order.payment_term_id and purchase_order.payment_term_id.id or False,\n 'fiscal_position': purchase_order.fiscal_position.id or purchase_order.partner_id.property_account_position.id,\n 'prepaid': True\n }\n\n inv_id = inv_obj.create(cr, uid, inv_vals, context=context)\n inv_obj.button_reset_taxes(cr, uid, [inv_id], context=context)\n for invoice in purchase_order.invoice_ids:\n invoice_ids.append(invoice.id)\n invoice_ids.append(inv_id)\n po_obj.write(cr, uid, purchase_order.id, {'invoice_ids': [(6, 0, invoice_ids)]})\n invoice_list.append(inv_id)\n\n if purchase_order.invoice_method in ('picking','order'):\n self.pool.get('purchase.order.line').create(cr, uid, {\n 'order_id': purchase_order.id,\n 'name': val['value']['name'],\n 'date_planned':purchase_order.date_order,\n 'price_unit': -purchase_adv_obj.amount,\n 'product_uom_qty': purchase_adv_obj.qtty,\n 'product_uos': val['value']['uos_id'],\n 'product_uom': val['value']['uos_id'],\n 'product_id': purchase_adv_obj.product_id.id,\n 'adavance_product':True,\n 'discount': False,\n 'taxes_id': [(6, 0, val['value']['invoice_line_tax_id'])],\n }, context=context)\n\n\n context.update({'invoice_id':invoice_list})\n return {\n 'name': 'Open Invoice',\n 'view_type': 'form',\n 'view_mode': 'form',\n 'res_model': 'purchase.open.invoice',\n 'type': 'ir.actions.act_window',\n 'target': 'new',\n 'context': context\n }", "def prepare_report(user, from_date, to_date,\n show_which=\"worked\", # \"worked\", \"invoiced\", or \"paid\"\n client_ids=[], project_ids=[]):\n\n if show_which == \"worked\":\n sessions = Session.objects.filter(\n project__client__user=user,\n date__gte=from_date,\n date__lte=to_date\n )\n elif show_which == \"invoiced\":\n sessions = Session.objects.filter(\n project__client__user=user,\n invoice__invoice_date__gte=from_date,\n invoice__invoice_date__lte=to_date\n )\n elif show_which == \"paid\":\n sessions = Session.objects.filter(\n project__client__user=user,\n invoice__paid_date__gte=from_date,\n invoice__paid_date__lte=to_date\n )\n else:\n raise ValueError(\"Invalid value for the 'show_which' argument \"\n \"supplied\")\n\n if client_ids != []:\n sessions = sessions.filter(\n project__client__in=client_ids\n )\n if project_ids != []:\n sessions = sessions.filter(\n project__in=project_ids\n )\n\n # Starting Python 3.6, the dict maintains order as inserted\n # When running this on a different computer with older Python,\n # the sessions_per_date was all jumbled-up.\n # https://stackoverflow.com/questions/1867861/dictionaries-how-to-keep-keys-values-in-same-order-as-declared\n date_range = pd.date_range(from_date, to_date).date\n sessions_per_date = {today: sessions.filter(date=today)\n for today in date_range}\n\n total_earned = sum([sesh.get_money_earned() for sesh in sessions])\n\n context = {\n 'sessions': sessions, # obsolete if sessions_per_date will work\n 'from': from_date,\n 'to': to_date,\n 'date_range': date_range,\n 'sessions_per_date': sessions_per_date,\n 'total_earned': total_earned,\n }\n\n if client_ids != []:\n context['clients'] = Client.objects.filter(pk__in=client_ids)\n\n if project_ids != []:\n context['projects'] = Project.objects.filter(pk__in=project_ids)\n\n return context", "def generate_eob(\n self, date_of_service, date_of_eob, insured, invoice_id, cpt_code, charge_amount\n ):\n if insured == \"insured\":\n # first copayments\n copay_amount = np.random.choice(\n self.distributions[\"copay_amounts\"],\n 1,\n p=self.distributions[\"copay_distribution\"],\n )[0]\n if copay_amount > 0:\n invoicelineitemeob = pd.DataFrame(\n {\n \"invoice_id\": [invoice_id],\n \"cpt_code\": [cpt_code],\n \"created_on\": [date_of_service],\n \"copay_amount\": [copay_amount],\n \"adjustment_amount\": [0],\n \"paid_amount\": [0],\n }\n )\n self.InvoiceLineItemsEob = self.InvoiceLineItemsEob.append(\n invoicelineitemeob\n )\n remaining_charge = charge_amount - copay_amount\n else:\n remaining_charge = charge_amount\n # next eob discounts\n eob_discount_percent = np.random.choice(\n self.distributions[\"eob_discount_percentages\"],\n 1,\n p=self.distributions[\"eob_discount_distribution\"],\n )[0]\n if eob_discount_percent > 0:\n insurance_adjustment = remaining_charge * eob_discount_percent / 100\n remaining_charge = remaining_charge - insurance_adjustment\n invoicelineitemeob = pd.DataFrame(\n {\n \"invoice_id\": [invoice_id],\n \"cpt_code\": [cpt_code],\n \"created_on\": [date_of_eob],\n \"copay_amount\": [0],\n \"adjustment_amount\": [insurance_adjustment],\n \"paid_amount\": [0],\n }\n )\n self.InvoiceLineItemsEob = self.InvoiceLineItemsEob.append(\n invoicelineitemeob\n )\n # next handle eob payments where relevant\n eob_payment_percentage = np.random.choice(\n self.distributions[\"eob_payment_percentages\"],\n 1,\n p=self.distributions[\"eob_payment_distribution\"],\n )[0]\n eob_payment_amount = remaining_charge * (eob_payment_percentage / 100.0)\n if eob_payment_amount > 0:\n invoicelineitemeob = pd.DataFrame(\n {\n \"invoice_id\": [invoice_id],\n \"cpt_code\": [cpt_code],\n \"created_on\": [date_of_eob],\n \"copay_amount\": [0],\n \"adjustment_amount\": [0],\n \"paid_amount\": [eob_payment_amount],\n }\n )\n self.InvoiceLineItemsEob = self.InvoiceLineItemsEob.append(\n invoicelineitemeob\n )\n remaining_charge = remaining_charge - eob_payment_amount\n else:\n remaining_charge = charge_amount\n return remaining_charge", "def action_invoice_create(self, grouped=False, final=False):\n if self.invoice_option == 'before_delivery':\n inv_obj = self.env['account.invoice']\n for order in self:\n inv_data = order._prepare_invoice()\n invoice = inv_obj.create(inv_data)\n for inv_line in order.order_line:\n inv_line.invoice_line_create(invoice.id, inv_line.product_uom_qty)\n\n else:\n inv_obj = self.env['account.invoice']\n precision = self.env['decimal.precision'].precision_get('Product Unit of Measure')\n invoices = {}\n references = {}\n invoices_origin = {}\n invoices_name = {}\n\n # Keep track of the sequences of the lines\n # To keep lines under their section\n inv_line_sequence = 0\n for order in self:\n group_key = order.id if grouped else (order.partner_invoice_id.id, order.currency_id.id)\n\n # We only want to create sections that have at least one invoiceable line\n pending_section = None\n\n # Create lines in batch to avoid performance problems\n line_vals_list = []\n # sequence is the natural order of order_lines\n for line in order.order_line:\n if line.display_type == 'line_section':\n pending_section = line\n continue\n if float_is_zero(line.qty_to_invoice, precision_digits=precision):\n continue\n if group_key not in invoices:\n inv_data = order._prepare_invoice()\n invoice = inv_obj.create(inv_data)\n references[invoice] = order\n invoices[group_key] = invoice\n invoices_origin[group_key] = [invoice.origin]\n invoices_name[group_key] = [invoice.name]\n elif group_key in invoices:\n if order.name not in invoices_origin[group_key]:\n invoices_origin[group_key].append(order.name)\n if order.client_order_ref and order.client_order_ref not in invoices_name[group_key]:\n invoices_name[group_key].append(order.client_order_ref)\n\n if line.qty_to_invoice > 0 or (line.qty_to_invoice < 0 and final):\n if pending_section:\n section_invoice = pending_section.invoice_line_create_vals(\n invoices[group_key].id,\n pending_section.qty_to_invoice\n )\n inv_line_sequence += 1\n section_invoice[0]['sequence'] = inv_line_sequence\n line_vals_list.extend(section_invoice)\n pending_section = None\n\n inv_line_sequence += 1\n inv_line = line.invoice_line_create_vals(\n invoices[group_key].id, line.qty_to_invoice\n )\n inv_line[0]['sequence'] = inv_line_sequence\n line_vals_list.extend(inv_line)\n\n if references.get(invoices.get(group_key)):\n if order not in references[invoices[group_key]]:\n references[invoices[group_key]] |= order\n\n self.env['account.invoice.line'].create(line_vals_list)\n\n for group_key in invoices:\n invoices[group_key].write({'name': ', '.join(invoices_name[group_key]),\n 'origin': ', '.join(invoices_origin[group_key])})\n sale_orders = references[invoices[group_key]]\n if len(sale_orders) == 1:\n invoices[group_key].reference = sale_orders.reference\n\n if not invoices:\n raise UserError(_(\n 'There is no invoiceable line. If a product has a Delivered quantities invoicing policy, please make sure that a quantity has been delivered.'))\n\n for invoice in invoices.values():\n invoice.compute_taxes()\n if not invoice.invoice_line_ids:\n raise UserError(_(\n 'There is no invoiceable line. If a product has a Delivered quantities invoicing policy, please make sure that a quantity has been delivered.'))\n # If invoice is negative, do a refund invoice instead\n if invoice.amount_total < 0:\n invoice.type = 'out_refund'\n for line in invoice.invoice_line_ids:\n line.quantity = -line.quantity\n # Use additional field helper function (for account extensions)\n for line in invoice.invoice_line_ids:\n line._set_additional_fields(invoice)\n # Necessary to force computation of taxes. In account_invoice, they are triggered\n # by onchanges, which are not triggered when doing a create.\n invoice.compute_taxes()\n # Idem for partner\n so_payment_term_id = invoice.payment_term_id.id\n fp_invoice = invoice.fiscal_position_id\n invoice._onchange_partner_id()\n invoice.fiscal_position_id = fp_invoice\n # To keep the payment terms set on the SO\n invoice.payment_term_id = so_payment_term_id\n invoice.message_post_with_view('mail.message_origin_link',\n values={'self': invoice, 'origin': references[invoice]},\n subtype_id=self.env.ref('mail.mt_note').id)\n return [inv.id for inv in invoices.values()]", "def _get_query(self, type, date_from=False, date_to=False, users=None, products=None):\n # TODO: Revisar def _create_invoice(self, order, so_line, amount):...\n # so.user_id AS id_salesman\n # AND so.user_id IN (%s)\n # AND pp.id IN (%s)\n # GROUP BY salesman\n\n if type == 'most_sold':\n sql = \"\"\"\n SELECT min(sol.id) AS id, \n so.user_id AS salesman, \n sol.product_id AS product,\n AVG(sol.price_reduce_taxexcl) AS price, \n pp.product_tmpl_id AS product_template,\n so.company_id AS company,\n SUM(sol.product_uom_qty) AS qty,\n SUM(sol.price_subtotal) AS subtotal\n FROM sale_order_line sol\n LEFT JOIN sale_order so ON so.id = sol.order_id\n LEFT JOIN product_product pp ON pp.id = sol.product_id\n LEFT JOIN product_template pt ON pt.id = pp.product_tmpl_id\n WHERE so.state NOT IN ('draft', 'sent', 'cancel')\n AND so.date_order BETWEEN '%s' AND '%s'\n AND so.user_id IN (%s)\n AND pp.id IN (%s)\n GROUP BY salesman, sol.product_id, pp.product_tmpl_id, so.company_id\n ORDER BY qty DESC;\n \"\"\" % (date_from, date_to, ', '.join(str(u) for u in users), ', '.join(str(p) for p in products))\n else:\n sql = \"\"\" \n \"\"\"\n self.env.cr.execute(sql)\n return self.env.cr.dictfetchall()", "def get_customer_history_df_invoice_line(self, customerID):\n df_invoice_line \\\n = self._df_invoice_original[self._df_invoice_original.CustomerID \\\n == customerID]\n return df_invoice_line", "def _prepare_invoice_line(self, inv_id):\n res = {}\n account_id = self.product_id.property_account_income_id.id or self.product_id.categ_id.property_account_income_categ_id.id\n if not account_id:\n raise UserError(_('Please define income account for this product: \"%s\" (id:%d).') % \\\n (self.product_id.name, self.product_id.id,))\n price_unit = self.product_id.lst_price\n res = {\n 'invoice_id': inv_id.id,\n 'name': self.name,\n 'origin': self.order_id.name,\n 'account_id': account_id,\n 'uom_id': self.product_uom_id.id,\n 'quantity': self.product_uom_qty,\n 'price_unit': price_unit,\n 'product_id': self.product_id.id,\n 'invoice_line_tax_id': False,\n 'order_line_id': self.id\n }\n return res", "def action_invoice_create(self, grouped=False, final=False):\n inv_obj = self.env['account.invoice']\n precision = self.env['decimal.precision'].precision_get('Product Unit of Measure')\n invoices = {}\n references = {}\n invoices_origin = {}\n invoices_name = {}\n for order in self:\n group_key = order.id if grouped else (order.partner_id.id, order.currency_id.id)\n for line in order.order_line.sorted(key=lambda l: l.qty_received - l.qty_invoiced < 0):\n if float_is_zero(line.qty_received - line.qty_invoiced, precision_digits=precision):\n continue\n if group_key not in invoices:\n inv_data = order._prepare_invoice()\n invoice = inv_obj.create(inv_data)\n references[invoice] = order\n invoices[group_key] = invoice\n invoices_origin[group_key] = [invoice.origin]\n invoices_name[group_key] = [invoice.name]\n elif group_key in invoices:\n if order.name not in invoices_origin[group_key]:\n invoices_origin[group_key].append(order.name)\n if order.partner_ref and order.partner_ref not in invoices_name[group_key]:\n invoices_name[group_key].append(order.partner_ref)\n\n if line.qty_received - line.qty_invoiced > 0:\n line.invoice_line_create(invoices[group_key].id, line.qty_received - line.qty_invoiced)\n elif line.qty_received - line.qty_invoiced < 0 and final:\n line.invoice_line_create(invoices[group_key].id, line.qty_received - line.qty_invoiced)\n\n if references.get(invoices.get(group_key)):\n if order not in references[invoices[group_key]]:\n references[invoices[group_key]] |= order\n\n for group_key in invoices:\n invoices[group_key].write({'name': ', '.join(invoices_name[group_key]),\n 'origin': ', '.join(invoices_origin[group_key])})\n\n if not invoices:\n raise UserError(_('There is no invoicable line.'))\n\n for invoice in invoices.values():\n if not invoice.invoice_line_ids:\n raise UserError(_('There is no invoicable line.'))\n # If invoice is negative, do a refund invoice instead\n if invoice.amount_total < 0:\n invoice.type = 'in_refund'\n for line in invoice.invoice_line_ids:\n line.quantity = -line.quantity\n # Necessary to force computation of taxes. In account_invoice, they are triggered\n # by onchanges, which are not triggered when doing a create.\n invoice.compute_taxes()\n invoice.message_post_with_view('mail.message_origin_link',\n values={'self': invoice, 'origin': references[invoice]},\n subtype_id=self.env.ref('mail.mt_note').id)\n return [inv.id for inv in invoices.values()]", "def action_invoice_create(self, grouped=False, final=False):\n inv_obj = self.env['account.invoice']\n precision = self.env['decimal.precision'].sudo().precision_get('Product Unit of Measure')\n invoices = {}\n references = {}\n for order in self:\n group_key = order.id if grouped else (order.partner_invoice_id.id, order.currency_id.id)\n for line in order.order_line.sorted(key=lambda l: l.qty_to_invoice < 0):\n if float_is_zero(line.qty_to_invoice, precision_digits=precision):\n continue\n if group_key not in invoices:\n inv_data = order._prepare_invoice()\n invoice = inv_obj.sudo().create(inv_data)\n references[invoice] = order\n invoices[group_key] = invoice\n invoice['sale_order_id'] = order.id\n elif group_key in invoices:\n vals = {}\n if order.name not in invoices[group_key].origin.split(', '):\n vals['origin'] = invoices[group_key].origin + ', ' + order.name\n if order.client_order_ref and order.client_order_ref not in invoices[group_key].name.split(\n ', ') and order.client_order_ref != invoices[group_key].name:\n vals['name'] = invoices[group_key].name + ', ' + order.client_order_ref\n invoices[group_key].sudo().write(vals)\n if line.qty_to_invoice > 0:\n line.invoice_line_create(invoices[group_key].id, line.qty_to_invoice)\n elif line.qty_to_invoice < 0 and final:\n line.invoice_line_create(invoices[group_key].id, line.qty_to_invoice)\n\n if references.get(invoices.get(group_key)):\n if order not in references[invoices[group_key]]:\n references[invoices[group_key]] |= order\n if not invoices:\n raise UserError(_('There is no invoiceable line.'))\n for invoice in invoices.values():\n if not invoice.invoice_line_ids:\n raise UserError(_('There is no invoiceable line.'))\n # If invoice is negative, do a refund invoice instead\n if invoice.amount_untaxed < 0:\n invoice.type = 'out_refund'\n for line in invoice.invoice_line_ids:\n line.quantity = -line.quantity\n # Use additional field helper function (for account extensions)\n for line in invoice.invoice_line_ids:\n line._set_additional_fields(invoice)\n # Necessary to force computation of taxes. In account_invoice, they are triggered\n # by onchanges, which are not triggered when doing a create.\n invoice.compute_taxes()\n invoice.message_post_with_view('mail.message_origin_link',\n values={'self': invoice, 'origin': references[invoice]},\n subtype_id=self.env.ref('mail.mt_note').id)\n return [inv.id for inv in invoices.values()]", "def merge_invoice(self, cr, uid, invoices, context=None):\n order_ids = []\n pick_ids = []\n if len(invoices) <= 1:\n return False\n parent = self.pool.get('account.invoice').browse(cr, uid, context['active_id'])\n for inv in invoices:\n if parent.partner_id != inv.partner_id:\n raise osv.except_osv(_(\"Partners don't match!\"), _(\"Can not merge invoice(s) on different partners or states !.\"))\n\n if inv.state != 'draft':\n raise osv.except_osv(_(\"Invalid action !\"), _(\"You can merge only invoices in draft state.\"))\n\n # Merge invoices that are in draft state\n inv_line_obj = self.pool.get('account.invoice.line')\n name = parent.name\n comment = parent.comment\n origin = parent.origin\n for inv in invoices:\n if inv.id == parent.id:\n continue\n\n # check if a line with the same product already exist. if so add quantity. else hang up invoice line to first invoice head.\n if inv.name:\n # Find if the same name already exist, if yes, skip to add.\n name_list = name.replace(' ', '').split(',')\n if inv.name not in name_list:\n name += ', %s' % inv.name\n if inv.comment:\n comment = comment and comment + ', %s' % inv.comment or inv.comment\n if inv.origin:\n origin += ', %s' % inv.origin\n line_ids = inv_line_obj.search(cr, uid, [('invoice_id', '=', inv.id)])\n for inv_lin in inv_line_obj.browse(cr, uid, line_ids):\n mrg_pdt_ids = inv_line_obj.search(cr, uid, [('invoice_id', '=', parent.id), ('product_id', '=', inv_lin.product_id.id),\n ('uos_id', '=', inv_lin.uos_id.id), ('price_unit', '=', inv_lin.price_unit) # kittiu: extra condition, unit price must also be the same.\n ])\n if len(mrg_pdt_ids) == 1 and inv.type == parent.type: # product found --> add quantity\n inv_line_obj.write(cr, uid, mrg_pdt_ids, {'quantity': inv_line_obj._can_merge_quantity(cr, uid, mrg_pdt_ids[0], inv_lin.id)})\n inv_line_obj.unlink(cr, uid, inv_lin.id)\n elif inv.type == parent.type:\n inv_line_obj.write(cr, uid, inv_lin.id, {'invoice_id': parent.id})\n else:\n inv_line_obj.write(cr, uid, inv_lin.id, {'invoice_id': parent.id, 'quantity': -inv_lin.quantity})\n\n if inv.sale_order_ids:\n order_ids += [order.id for order in inv.sale_order_ids]\n if inv.picking_ids:\n pick_ids += [picking.id for picking in inv.picking_ids]\n\n self.write(cr, uid, parent.id, {'origin': origin, 'name': name, 'comment': comment})\n\n #Remove By DRB\n #cr.execute('update sale_order_invoice_rel set invoice_id = %s where invoice_id = %s', (parent.id, inv.id))\n #cr.execute('update picking_invoice_rel set invoice_id = %s where invoice_id = %s', (parent.id, inv.id))\n\n self.unlink(cr, uid, [inv.id])\n #Distinct List\n order_ids = list(set(order_ids))\n pick_ids = list(set(pick_ids))\n\n self.write(cr, uid, parent.id, {'sale_order_ids': [(6, 0, order_ids)], 'picking_ids': [(6, 0, pick_ids)]})\n self.button_reset_taxes(cr, uid, [parent.id])\n return parent.id", "def invoices(self):\r\n return Invoices(self)", "def generate_report(df, start_date, end_date):\n # Remove any transactions that had to do with collecting or returning security\n security_df = df[(df[CATEGORY] == 'Security') | (df[CATEGORY] == 'Security-Income')]\n df = df[(df[CATEGORY] != 'Security')]\n\n # Exclude the data for everything except our quarter\n period_data = df.loc[start_date:end_date] # Note: NOT using extended quarter range\n rental_income = period_data[period_data[CATEGORY] == 'Rent']\n utilities = period_data[(period_data[CATEGORY] == 'Utilities')]\n other_income = period_data[(period_data['Transaction Type'] == 'credit') & (period_data[CATEGORY] != 'Rent')]\n expenses = period_data[(period_data['Transaction Type'] == 'debit')]\n unpaid_util_overages = float(0)\n\n # print(rental_income)\n # print(other_income)\n # print(expenses)\n \n html_config.initialize()\n print(html_config.HTML_OPEN)\n\n print('<H1>Income and Expense Report for %s-%s:' % (start_date, end_date), '</H1><p>')\n\n # List all unit specific rents and expenses for the quarter\n for UNIT in sorted(rental_income['Unit'].unique()):\n # Show rental income info\n temp_df = rental_income[rental_income['Unit'] == UNIT]\n print('<br><H2>Total rent for Unit ', UNIT, ': ${:,.2f}'.format(temp_df['Amount'].sum()), '</H2>')\n print(temp_df[['Description', 'Amount']].to_html())\n \n if not SKIP_UTIL_ANALYSIS:\n # Show utilities payments and calculate any overage due\n temp_df = utilities[(utilities['Unit'] == UNIT) & (utilities['Transaction Type'] == 'debit')]\n print('<br><H2>Utilities Expenses for Unit', UNIT, ': ${:,.2f}'.format(temp_df['Amount'].sum()), '</h2>')\n overage = temp_df.assign(Overage=lambda x: x.Amount - limit_df.loc[UNIT].Amount)\n # Disable warning when setting negative overage values to zero\n pd.set_option('mode.chained_assignment', None)\n overage.Overage[overage.Overage < 0] = 0\n pd.set_option('mode.chained_assignment', 'warn')\n print(overage[['Description', 'Amount', 'Overage']].to_html())\n # Show if utilties costs exceeded allotted amount\n if (overage['Overage'].sum() > 0):\n print('<h3>Utilities overage for unit', UNIT, ': ${:,.2f}'.format(overage['Overage'].sum()), '</h3>')\n unpaid_util_overages += overage['Overage'].sum()\n # Show any untilities that were collected \n overage_collected = utilities[(utilities['Unit'] == UNIT) & (utilities['Transaction Type'] == 'credit')]\n if not overage_collected.empty:\n print('<br><H2>Util overages collected for Unit ', UNIT, ': ${:,.2f}'.format(overage_collected['Amount'].sum()), '</H2>')\n print(overage_collected[['Description', 'Amount', CATEGORY]].to_html())\n print('<h3>Net Utils overage for unit', UNIT, 'for period: ${:,.2f}'.format(overage['Overage'].sum() - overage_collected['Amount'].sum()), '</h3>')\n \n\n \n # Generate unit specific Utility usage reports\n if GEN_TENANT_UTIL_REPORTS and OUTPUT_DIRECTORY:\n TENANT_FILE = '%s/122-Spring-St-%s-%s-Unit-%s-utils.html' % (OUTPUT_DIRECTORY, start_date, end_date, UNIT)\n TENANT_REPORTS.append(TENANT_FILE)\n sys.stdout = open(TENANT_FILE, 'w')\n print(html_config.HTML_OPEN)\n\n print('<H1>Unit', UNIT, '</H1>')\n print('<br><H2>Utilities Expenses for: %s-%s' % (start_date, end_date))\n print('<br>Utilites included in rent: ${:,.2f}'.format(limit_df.loc[UNIT].Amount))\n print('</H2>')\n print(overage[['Description', 'Amount', 'Overage']].to_html())\n # Show if any utilties overage may be due\n if (overage['Overage'].sum() > 0):\n print('<h3>Utilities overage for unit', UNIT, ': ${:,.2f}'.format(overage['Overage'].sum()), '</h3>')\n if not overage_collected.empty:\n print('<br><H2>Util overages collected for Unit ', UNIT, ': ${:,.2f}'.format(overage_collected['Amount'].sum()), '</H2>')\n print(overage_collected[['Description', 'Amount', CATEGORY]].to_html())\n print('<h3>Net Utils overage for unit', UNIT, 'for period: ${:,.2f}'.format(overage['Overage'].sum() - overage_collected['Amount'].sum()), '</h3>')\n\n print(html_config.HTML_CLOSE)\n\n # Restore stdout to the main report file\n sys.stdout = open(REPORT_FILE, 'a')\n \n # Show other unit specific transactions\n if SKIP_UTIL_ANALYSIS:\n unit_exp = expenses[(expenses['Unit'] == UNIT)]\n unit_income = other_income[other_income['Unit'] == UNIT]\n else:\n unit_exp = expenses[(expenses['Unit'] == UNIT) & (expenses[CATEGORY] != 'Utilities')]\n unit_income = other_income[(other_income['Unit'] == UNIT) & (other_income[CATEGORY] != 'Utilities')]\n \n if not unit_exp.empty:\n print('<br><H2>Other Unit specific expenses for: ', UNIT, ': ${:,.2f}'.format(unit_exp['Amount'].sum()), '</h2>')\n print(unit_exp[['Description', 'Amount', 'Unit', CATEGORY]].to_html())\n print('<p>')\n \n # Show any other unit specific credit\n other_income = other_income[other_income['Unit'] == UNIT]\n if not other_income.empty:\n print('<br><H2>Expense offsets for Unit ', UNIT, ': ${:,.2f}'.format(other_income['Amount'].sum()), '</H2>')\n print(other_income[['Description', 'Amount', CATEGORY]].to_html())\n \n # Add a line between units\n print('<hr>')\n \n # List the shared income and expenses for the quarter\n temp_df = other_income[other_income['Unit'].isnull()]\n if not temp_df.empty:\n print ('<br><H2>Non unit specific income: ${:,.2f}'.format(temp_df['Amount'].sum()), '</h2>')\n print(temp_df[['Description', 'Amount', CATEGORY]].to_html())\n gen_expenses = expenses[expenses['Unit'].isnull()]\n if not gen_expenses.empty:\n print ('<br><H1>Non unit specific expenses</h1>')\n # Get the list of expense categories and generate summary for each\n for category in sorted(gen_expenses[CATEGORY].unique()):\n temp_df = gen_expenses[(gen_expenses[CATEGORY] == category)]\n print ('<br><H2>'+ category +': ${:,.2f}'.format(temp_df['Amount'].sum()), '</h2>')\n print(temp_df[['Description', 'Amount', CATEGORY]].to_html())\n \n # If there were any security transactions in the period give a security report\n if not security_df.loc[start_date:end_date].empty:\n temp_df = security_df.loc[start_date:end_date] \n print('<hr><H2>Security related transactions:</H2>')\n print(temp_df[['Description', 'Amount', 'Transaction Type', 'Unit']].to_html())\n for UNIT in sorted(rental_income['Unit'].unique()):\n unit_df = security_df[security_df['Unit'] == UNIT]\n collected = unit_df[(unit_df['Transaction Type'] == 'credit')]['Amount'].sum()\n returned = unit_df[(unit_df['Transaction Type'] == 'debit')]['Amount'].sum()\n print('<center><H4>Current Liability on Unit '+str(UNIT)+': ${:,.2f}'.format(collected-returned), '</H4></center>')\n \n # # Summarize the periods income and expenses -- old way to be discarded...\n # print('<br><H3>Total income for period: ${:,.2f}'.format(rental_income['Amount'].sum()), '</H3>')\n # print('<H3>Total expense for period: ${:,.2f}'.format(expenses['Amount'].sum() - other_income['Amount'].sum()), '</H3>')\n # print('<H3>Profit for period: ${:,.2f}'.format(rental_income['Amount'].sum() + other_income['Amount'].sum() -expenses['Amount'].sum()), '</H3>')\n # Summarize the periods income and expenses\n print('<br><H3>Total income for period: ${:,.2f}'.format(rental_income['Amount'].sum()))\n print('<br><H3>Total expense for period: ${:,.2f}'.format(expenses['Amount'].sum() - other_income['Amount'].sum()), '</H3>')\n print('<H3>Profit for period: ${:,.2f}'.format(rental_income['Amount'].sum() + other_income['Amount'].sum() -expenses['Amount'].sum()), '</H3>')\n# print('</H3>')\n \n print(html_config.HTML_CLOSE)\n sys.stdout.flush()", "def _prepare_invoice_grp(self, cr, uid, order, line_ids, context=None):\n if context is None:\n context = {}\n context = dict(context)\n\n inv_data = super(grp_orden_compra, self)._prepare_invoice_grp(cr, uid, order, line_ids, context=context)\n\n # adicionando campos numero compromiso y no obligacion desde la OC\n monto_oc = math.floor(order.total_llavep or 0)\n monto_oc = int(monto_oc)\n inv_data.update({'nro_compromiso': order.nro_compromiso or False, 'monto_comprometido': monto_oc or 0, 'currency_id':order.currency_oc.id})\n\n # adicionando campos no afectacion y monto autorizado desde la primera APG\n if order.pc_apg_id:\n first_apg = order.pc_apg_id\n monto_apg = math.floor(first_apg.total_llavep)\n monto_apg = int(monto_apg)\n # TODO R SPRING X ADICIONANDO CABEZALES SIIF A LA FACTURA A PARTIR DE LA APG\n inv_data.update({'nro_afectacion': first_apg.nro_afectacion_siif or False,\n 'monto_afectado': monto_apg or 0,\n 'siif_tipo_ejecucion':first_apg.siif_tipo_ejecucion.id,\n 'siif_concepto_gasto':first_apg.siif_concepto_gasto.id,\n 'siif_financiamiento':first_apg.siif_financiamiento.id,\n 'siif_codigo_sir':first_apg.siif_codigo_sir.id,\n 'siif_nro_fondo_rot':first_apg.siif_nro_fondo_rot.id,\n }) # cambiando nro_afectacion 23/10\n # inv.update({'nro_afectacion': first_apg.nro_afectacion_apg or False, 'monto_afectado': monto_apg or 0})\n\n # # TODO R SPRING X NO LLEVAR LAS LLAVES PRESUPUESTALES POR DEFECTO\n # if order.pc_apg_id.llpapg_ids:\n # llavep_ids = []\n # for llavep in order.pc_apg_id.llpapg_ids:\n # llavep_ids.append((0, 0, {\n # 'programa_id': llavep.programa_id.id,\n # 'odg_id': llavep.odg_id.id,\n # 'auxiliar_id': llavep.auxiliar_id.id,\n # 'disponible': llavep.disponible,\n # 'proyecto_id': llavep.proyecto_id.id,\n # 'fin_id': llavep.fin_id.id,\n # 'mon_id': llavep.mon_id.id,\n # 'tc_id': llavep.tc_id.id,\n # 'importe': llavep.importe\n # }))\n # inv_data.update({'llpapg_ids': llavep_ids})\n\n return inv_data", "def _prepare_invoice_line(self, qty):\n self.ensure_one()\n res = {\n 'name': self.name,\n 'sequence': self.sequence,\n 'origin': self.order_id.name,\n 'account_id': self.product_id.product_tmpl_id._get_product_accounts()['stock_input'].id,\n 'price_unit': self.price_unit,\n 'quantity': qty,\n 'uom_id': self.product_uom.id,\n 'product_id': self.product_id.id or False,\n 'invoice_line_tax_ids': [(6, 0, self.taxes_id.ids)],\n 'account_analytic_id': self.account_analytic_id.id,\n 'analytic_tag_ids': [(6, 0, self.analytic_tag_ids.ids)],\n }\n return res", "def invoice(self,context,params):\n url = f\"https://api.freshbooks.com/accounting/account/{params['account_id']}/invoices/invoices/{params['invoice_id']}\"\n result = json.loads(util.rest(\"GET\", url, {}, context[\"headers\"][\"access_token\"]).text)\n invoice = result[\"response\"][\"result\"][\"invoice\"]\n invoice_obj = FreshbooksInvoice(\n account_id=invoice['accountid'],\n customerid=invoice['customerid'], \n invoice_id=invoice['invoiceid'],\n currency_code=invoice['currency_code'],\n language=invoice['language'],\n terms=invoice['terms'],\n discount_value=invoice['discount_value'],\n discount_amount=invoice['discount_total']['amount'],\n invoice_number=invoice['invoice_number'],\n po_number=invoice['po_number'],\n amount=invoice['amount']['amount'],\n code=invoice['amount']['code'],\n create_date=invoice['create_date']\n )\n return invoice_obj.__dict__", "def clean_qpcr_data(df) -> pd.DataFrame:\n ASSAYDATE_COL = \"Assay Date\"\n INSTRUMENT_COL = \"Instrument\"\n POOL_COL = \"Pool\"\n\n # Names and types of all our columns. We also add the columns INSTRUMENT_COL and POOL_COL\n original_columns = [ASSAYDATE_COL, DATE_COL, \"Sample ID\", \"GENE\", \"Ct [1]\", \"Ct [2]\", \"Ct [3]\", \"Ct Avg\", \"Ct Stdev\", \"Copies [1]\", \"Copies [2]\", \"Copies [3]\", \"Copies AVG\", \"Copies Stdev\", \"PMMoV Ct [1]\", \"PMMoV Ct [2]\", \"PMMoV Ct [3]\", \"PMMoV Avg\", \"PMMoV Stdev\", \"Empty tube weight (g)\", \"Full tube weight (g)\", \"Pellet weight (g)\", \"Extracted Mass (in 100 uL) (g)\", \"Copies per Extracted Mass (copies/g) [1]\", \"Copies per Extracted Mass (copies/g) [2]\", \"Copies per Extracted Mass (copies/g) [3]\", \"Copies per Extracted Mass (copies/g) Avg\", \"Copies per Extracted Mass Stdev\", \"2^Ct\", \"2^Ct normalized to a value\", \"(2^Ct normalized to a value per Extracted Mass)\", \"PMMoV Copies [1]\", \"PMMoV Copies [2]\", \"PMMoV Copies [3]\", \"PMMoV Copies Avg\", \"PMMoV Copies Stdev\", \"Copies per Copies of PMMoV Avg\", \"Copies per Copies of PMMoV * 10^3 Avg\", \"Copies per Copies normalized to a value\", \"Copies per Copies of PMMoV Stdev\", \"Copies per Copies of PMMoV * 10^3 Stdev\", \"Copies per Copies normalized to a value Stdev\", \"Date [2]\", \"Gene\", \"APPROVED: Copies per Copies of PMMoV [1]\", \"APPROVED: Copies per Copies of PMMoV [2]\", \"APPROVED: Copies per Copies of PMMoV [3]\", \"APPROVED: Copies per Copies of PMMoV Avg\", \"APPROVED: Copies per Copies of PMMoV Stdev\", \"APPROVED: Copies per Extracted Mass (copies/g) [1]\", \"APPROVED: Copies per Extracted Mass (copies/g) [2]\", \"APPROVED: Copies per Extracted Mass (copies/g) [3]\", \"APPROVED: Copies per Extracted Mass (copies/g) Avg\", \"APPROVED: Copies/L [1]\", \"APPROVED: Copies/L [2]\", \"APPROVED: Copies/L [3]\", \"APPROVED: Copies/L Avg\", \"MESP UPLOAD: PMMoV Copies per Extracted Mass (copies/g)\", \"MESP UPLOAD: PMMoV Copies/L\", \"INHIBITION CTRL: Date\", \"INHIBITION CTRL: Sample Name\", \"INHIBITION CTRL: Pepper 1/10 [1]\", \"INHIBITION CTRL: Pepper 1/10 [2]\", \"INHIBITION CTRL: Pepper 1/10 [3]\", \"INHIBITION CTRL: Pepper 1/10 Avg\", \"INHIBITION CTRL: Pepper 1/40 [1]\", \"INHIBITION CTRL: Pepper 1/40 [2]\", \"INHIBITION CTRL: Pepper 1/40 [3]\", \"INHIBITION CTRL: Pepper 1/40 Avg\", \"INHIBITION CTRL: Pepper No Dilution [1]\", \"INHIBITION CTRL: Pepper No Dilution [2]\", \"INHIBITION CTRL: Pepper No Dilution [3]\", \"INHIBITION CTRL: Pepper No Dilution Avg\", \"Pepper 1/10 ΔCt\", \"Pepper 1/40 ΔCt\"]\n original_types = [\"date\", \"date\", \"text\", \"text\", \"number\", \"number\", \"number\", \"number\", \"number\", \"number\", \"number\", \"number\", \"number\", \"number\", \"number\", \"number\", \"number\", \"number\", \"number\", \"number\", \"number\", \"number\", \"number\", \"number\", \"number\", \"number\", \"number\", \"number\", \"number\", \"number\", \"number\", \"number\", \"number\", \"number\", \"number\", \"number\", \"number\", \"number\", \"number\", \"number\", \"number\", \"number\", \"date\", \"text\", \"number\", \"number\", \"number\", \"number\", \"number\", \"number\", \"number\", \"number\", \"number\", \"number\", \"number\", \"number\", \"number\", \"number\", \"number\", \"date\", \"text\", \"number\", \"number\", \"number\", \"number\", \"number\", \"number\", \"number\", \"number\", \"number\", \"number\", \"number\", \"number\", \"number\", \"number\"]\n new_columns = original_columns.copy()\n new_columns[2:2] = [INSTRUMENT_COL, POOL_COL]\n new_types = original_types.copy()\n new_types[2:2] = [\"text\", \"text\"]\n\n # Remove unwanted columns at end of dataframe, and add new columns in the correct order\n df = df[df.columns[:len(original_columns)]].copy()\n df.columns = original_columns\n df[list(set(new_columns) - set(original_columns))] = None\n df = df[new_columns]\n\n # Go through all rows and clean them, assign appropriate values for current instrument, pool, assay date, etc\n cur_instrument, cur_pool = None, None\n cur_assaydate = None\n has_inhibition_controls = False\n has_copies_per_liter = False\n for i,row in df.iterrows():\n cur_instrument, cur_pool = get_inst_and_pool(row, cur_instrument, cur_pool)\n if row[ASSAYDATE_COL]:\n row_date = parse_date(row[ASSAYDATE_COL])\n cur_assaydate = row_date or cur_assaydate\n if row_date is not None:\n has_inhibition_controls = False\n has_copies_per_liter = False\n\n if np.any([\"Pepper 1/10\" in c for c in row if isinstance(c, str)]):\n has_inhibition_controls = True\n if np.any([\"Copies / VS\" in c or \"Copies / L\" in c for c in row if isinstance(c, str)]):\n has_copies_per_liter = True\n df.loc[i, ASSAYDATE_COL] = cur_assaydate\n df.loc[i, INSTRUMENT_COL] = cur_instrument\n df.loc[i, POOL_COL] = cur_pool\n if not has_inhibition_controls:\n df.loc[i, [c for c in df.columns if c.startswith(\"INHIBITION CTRL: \")]] = \"\"\n if not has_copies_per_liter:\n index = [idx for idx, c in enumerate(df.columns) if c.startswith(\"APPROVED: Copies/L\")][0]\n df.loc[i, df.columns[index:]] = \"\"\n\n # Remove invalid rows (where no valid \"Date\" is specified)\n df[DATE_COL] = pd.to_datetime(df[DATE_COL], errors=\"coerce\")\n df = df.loc[~df[DATE_COL].isna()]\n\n # 6 rows have a year of 1900 (from \"Septembe 27th\" assay date), change these to 2020.\n # 12 rows have a year of 2014 (from \"Septembe 17th\" assay date), change these to 2020.\n f = (df[DATE_COL].dt.year == 1900) | (df[DATE_COL].dt.year == 2014)\n df.loc[f, DATE_COL] = pd.to_datetime(df.loc[f, DATE_COL].dt.strftime(\"2020-%m-%d\"), format=\"%Y-%m-%d\")\n\n # Rows from assay date \"Tuesday, December 22, 2020\" and \"Wednesday, December 23, 2020\" have incorrect\n # year in \"Date\" column (2021 instead of 2020)\n df[ASSAYDATE_COL] = pd.to_datetime(df[ASSAYDATE_COL], errors=\"coerce\")\n f = (df[ASSAYDATE_COL] == pd.to_datetime(\"December 22, 2020\")) | (df[ASSAYDATE_COL] == pd.to_datetime(\"December 23, 2020\"))\n df.loc[f, DATE_COL] = pd.to_datetime(df.loc[f, DATE_COL].dt.strftime(\"2020-%m-%d\"), format=\"%Y-%m-%d\")\n\n # Add row at start that specifies the column types (date, text, number)\n df.loc[-1] = new_types\n df.index = df.index + 1\n df = df.sort_index()\n df.index = list(range(len(df.index)))\n\n return df", "def create_invoice(self):\n sales_tax = 0.06\n item_sum = 0\n inv = f'Invoice#: {self.invoice_id}\\n'\n for key, value in self.items_with_price.items():\n item_sum += value\n inv += f'{key}.....${value:.2f}\\n'\n\n tax = item_sum * sales_tax\n inv += f'Tax.....${tax:.2f}\\n'\n inv += f'Total.....${tax + item_sum:.2f}'\n # print(inv)\n # returning for unit testing purposes\n return inv", "def get_rows(contract_address: str, file_name: str, receipts_filename: str) -> pd.DataFrame():\n receipts_df = pd.read_csv(receipts_filename)\n receipts_df = receipts_df.sort_values(['block_number', 'transaction_index'], ignore_index=True)\n df = pd.read_csv(file_name)\n df = df.sort_values(['block_number', 'transaction_index'], ignore_index=True)\n\n df = df.loc[receipts_df['status'] == 1] \n df = df.loc[df[\"to_address\"] == contract_address.lower()]\n df = df.reset_index()\n df = df.drop(columns='index')\n return df", "def cash_flow_response_to_df(\n portfolio_cash_flows_response: lusid.ResourceListOfPortfolioCashFlow,\n sum_by_date: bool = True\n) -> pd.DataFrame:\n\n def select_cols(\n df: pd.DataFrame,\n filter_col: str,\n filter_value: str,\n cols_to_keep: list,\n ) -> pd.DataFrame:\n return df[df[filter_col] == filter_value][cols_to_keep]\n\n # Extract cash payment data from cash flow response\n cash_flows_dict = portfolio_cash_flows_response.to_dict()\n cash_flow_data = pd.json_normalize(cash_flows_dict[\"values\"])\n\n # Split pays and receives and handle -ve signage for pay outflows\n pay_data = select_cols(\n cash_flow_data,\n \"diagnostics.PayReceive\",\n \"Pay\",[\"payment_date\", \"amount\", \"source_transaction_id\"]\n )\n pay_data[\"amount\"] = pay_data[\"amount\"].apply(lambda x: -1 * x)\n pay_data.rename(columns={\"amount\": \"payAmount\"}, inplace=True)\n rec_data = select_cols(\n cash_flow_data,\n \"diagnostics.PayReceive\",\n \"Receive\",\n [\"payment_date\", \"amount\", \"source_transaction_id\"]\n )\n rec_data.rename(columns={\"amount\": \"receiveAmount\"}, inplace=True)\n\n # Merge on payment date and ignore join dupes\n merged_df = pay_data.merge(rec_data, on=[\"payment_date\", \"source_transaction_id\"])\n merged_df.drop_duplicates(subset=[\"payment_date\", \"source_transaction_id\"], keep=\"first\", inplace=True,\n ignore_index=True)\n\n # Add net flows and reduce index to dates\n merged_df['netAmount'] = merged_df['payAmount'] + merged_df['receiveAmount']\n merged_df[\"payment_date\"] = merged_df[\"payment_date\"].apply(lambda x: x.date())\n merged_df.set_index(keys=\"payment_date\", inplace=True)\n\n # Aggregate sub-holdings\n if sum_by_date:\n merged_df = merged_df.groupby(merged_df.index).sum()\n\n return merged_df", "def investment_line(self):\n inv, marks = self._get_marks()\n fig = plt.figure(figsize=(4, 2), dpi=200)\n fig.patch.set_facecolor('#ececec')\n ax = fig.add_subplot(111)\n investmentValues = inv['Invested']\n #investmentValues = pd.Series([0], index=[investmentValues.index[0]-timedelta(1)]).append(investmentValues)\n ax.plot(investmentValues, lw=1.2, color=\"blue\", label='Invested', marker=\"o\", markersize=3, markerfacecolor=\"grey\")\n ax.set_xlabel('Time')\n ax.set_ylabel('Investments (€)')\n ax.set_title('Investment Amount (€) - Daily')\n ax.xaxis.set_major_locator(dates.MonthLocator())\n ax.xaxis.set_major_formatter(dates.DateFormatter('%b-%Y'))\n for x, y, mark in zip(marks.index, marks['Invested'], marks['Marks']):\n a = ax.get_ylim()\n if x == marks.index[0]:\n ax.annotate(str(mark) + \" €\", xy=(x + timedelta(abs((self.data.index[0] - self.data.index[-1]).days) / 80), y + (a[1]-a[0])/35), fontsize=5)\n else:\n ax.annotate(str(mark) + \" €\", xy=(x + timedelta(abs((self.data.index[0] - self.data.index[-1]).days) / 50), y - (a[1]-a[0])/35), fontsize=5)\n ax.grid(True)\n fig.autofmt_xdate()\n ax.legend()\n return fig, ax", "def invoice_line_create(self, invoice_id, qty):\n invoice_lines = self.env['account.invoice.line']\n precision = self.env['decimal.precision'].precision_get('Product Unit of Measure')\n for line in self:\n if not float_is_zero(qty, precision_digits=precision):\n vals = line._prepare_invoice_line(qty=qty)\n vals.update({'invoice_id': invoice_id, 'purchase_line_id': line.id})\n invoice_lines |= self.env['account.invoice.line'].create(vals)\n return invoice_lines", "def emissions_baseline(self):\n baseline = DataFrame(columns=[\"CO2\", \"NOx\", \"PM10\", \"PM2.5\", \"SO2\"])\n baseline = baseline.append(year_1(self.plant.emissions()))\n baseline = baseline.append(year_1(self.plant.fuel_reseller().emissions()))\n baseline = baseline.append(year_1(self.farmer.emissions_exante))\n baseline.loc[\"Total\"] = baseline.sum()\n baseline.loc[\"Total_plant\"] = baseline.iloc[0]\n baseline.loc[\"Total_transport\"] = baseline.iloc[1]\n baseline.loc[\"Total_field\"] = baseline.iloc[2]\n return baseline", "def from_invoice_and_line_item(cls, invoice: InvoiceModel, line_item: LineItemModel, line_number: int,\n distribution: str):\n # Note the invoice_date should be the payment_date in the future.\n return cls(total=line_item.total, invoice_number=invoice.id,\n line_number=line_number,\n is_reversal=invoice.invoice_status_code in\n [InvoiceStatus.REFUNDED.value, InvoiceStatus.REFUND_REQUESTED.value],\n distribution=distribution)", "def submit_invoices(self, **kwargs) -> ApiResponse:\n \n return self._request(kwargs.pop('path'), data=kwargs)", "def create_equity_curve_dataframe(self):\n # returns the cumulative product for percent change over every timestamp in the index\n curve = pd.DataFrame(self.all_holdings)\n curve.set_index('datetime', inplace=True)\n curve['returns'] = curve['total'].pct_change()\n #curve['equity_curve'] = (1.0+curve['returns']).cumprod()\n curve['equity_curve'] = curve['returns']\n curve['equity_curve'] += 1\n curve['equity_curve'] = curve['equity_curve'].cumprod()\n self.equity_curve = curve\n print(curve)", "def _prepare_invoice(self):\n # get current logged in user's timezone\n local = pytz.timezone(self.env['res.users'].browse(self._uid).tz) or pytz.utc\n\n self.ensure_one()\n journal_id = self.env['account.journal'].search([('type', '=', 'purchase')], limit=1).id\n if not journal_id:\n raise UserError(_('Please define an accounting purchase journal for this company.'))\n invoice_vals = {\n 'name': self.partner_ref or '',\n 'origin': self.name,\n 'type': 'in_invoice',\n 'account_id': self.partner_id.property_account_payable_id.id,\n 'partner_id': self.partner_id.id,\n 'journal_id': journal_id,\n 'currency_id': self.currency_id.id,\n 'comment': self.notes,\n 'payment_term_id': self.payment_term_id.id,\n 'fiscal_position_id': self.fiscal_position_id.id or self.partner_id.property_account_position_id.id,\n 'company_id': self.company_id.id,\n 'purchase_id': self.id,\n 'date_invoice':pytz.utc.localize(datetime.datetime.now()).astimezone(local).strftime('%Y-%m-%d'),\n }\n return invoice_vals", "def Generating_stock_daily_return_table():\r\n #Getting Names list\r\n Profitfile='pap//CombProfit.csv'\r\n path='D://Doktorat Marek//dane//'\r\n ProfitsFilePath=path+Profitfile\r\n quarterly_profit=pd.read_csv(ProfitsFilePath,index_col=0,header=0,parse_dates=True)\r\n Names_list=quarterly_profit.columns.tolist()\r\n \r\n Stock_returns=pd.DataFrame(index=pd.date_range('19980101','20180918',freq='D'),columns=Names_list)\r\n for name in Names_list:\r\n Stock_returns[name]=1+stock_returns(name)['Return']/100\r\n Stock_returns[name].fillna(1,inplace=True)\r\n \r\n WIG=pd.read_excel('D://Doktorat Marek//dane//notowania//Infostrefa//PL9999999995.xls')\r\n WIG['Date']=pd.to_datetime(WIG['Data'])\r\n WIG.set_index('Date',inplace=True)\r\n Stock_returns['WIG'] = 1+WIG['Zmiana']/100\r\n Stock_returns['WIG'].fillna(1,inplace=True)\r\n Stock_returns['Average']=Stock_returns.mean(1)\r\n \r\n FileReturns='D://Doktorat Marek//dane//Notowania//Stock_returns.csv'\r\n Stock_returns.to_csv(FileReturns,encoding='UTF-8')\r\n return 0", "def create_invoice(sender, invoice, issuer_details, **kwargs):\n if not invoice.items:\n return\n\n price = sum([item.price for item in invoice.items.all()])\n\n if not price:\n return\n\n paypal_invoice = models.Invoice(\n customer=invoice.customer,\n year=invoice.year,\n month=invoice.month,\n invoice_date=invoice.invoice_date,\n end_date=invoice.due_date,\n tax_percent=invoice.tax_percent,\n issuer_details=issuer_details,\n )\n\n paypal_invoice.payment_details = {\n 'name': invoice.customer.name,\n 'address': invoice.customer.address,\n 'country': invoice.customer.country,\n 'country_name': invoice.customer.get_country_display(),\n 'email': invoice.customer.email,\n 'postal': invoice.customer.postal,\n 'phone_number': invoice.customer.phone_number,\n 'bank_name': invoice.customer.bank_name,\n 'bank_account': invoice.customer.bank_account,\n }\n\n paypal_invoice.save()\n\n for item in invoice.items.all():\n models.InvoiceItem.objects.create(\n invoice=paypal_invoice,\n price=item.price,\n tax=item.tax,\n quantity=item.quantity,\n unit_price=item.unit_price,\n unit_of_measure=helpers.convert_unit_of_measure(item.unit),\n name=item.name,\n start=item.start,\n end=item.end,\n )", "def invoices_report_filter(request):\n qry = DBSession.query(Invoice)\n supplierlist = []\n for supplier in qry:\n if supplier.SupplierID:\n entry = {'Name': supplier.Order.Supplier.Name,\n 'ID': supplier.SupplierID}\n if entry not in supplierlist:\n supplierlist.append(entry)\n\n projectlist = []\n for project in qry:\n if project.ProjectID:\n entry = {'Name': project.Order.Project.Name,\n 'ID': project.ProjectID}\n if entry not in projectlist:\n projectlist.append(entry)\n\n paymentdatelist = []\n for paymentdate in qry:\n if paymentdate.PaymentDate:\n entry = paymentdate.PaymentDate.strftime(\"%d %B %Y\")\n if entry not in paymentdatelist:\n paymentdatelist.append(entry)\n\n return {'projects': sorted(projectlist, key=lambda k: k['Name'].upper()),\n 'suppliers': sorted(supplierlist, key=lambda k: k['Name'].upper()),\n 'paymentdates': sorted(paymentdatelist),\n 'paymentdates_exist': paymentdatelist != [],\n 'statuses': ['Draft', 'Due', 'Paid']}", "def invoices(self,org_id=None,invoice_id=''):\n if org_id is None:\n org_id = self.org_id\n return self.get('{}/orgs/{}/invoices/{}'.format(ApiVersion.A1.value,org_id,invoice_id))", "def sub_tax_sales_transfer_pricing(manager, df_fields, seq_recs, seq_reservas):\n # df_hotel = manager.get_dataframe(tables['dwc_bok_t_canco_hotel'])\n # df_circuit = manager.get_dataframe(tables['dwc_bok_t_canco_hotel_circuit'])\n # df_other = manager.get_dataframe(tables['dwc_bok_t_canco_other'])\n # df_transfer = manager.get_dataframe(tables['dwc_bok_t_canco_transfer'])\n # df_endow = manager.get_dataframe(tables['dwc_bok_t_canco_endowments'])\n # df_extra = manager.get_dataframe(tables['dwc_bok_t_canco_extra'])\n\n df_aux = df_fields.select(\"operative_incoming\", \"booking_id\")\n\n df_hotel = sub_tax_sales_transfer_pricing_aux(manager, df_hotelt, seq_recs, seq_reservas, df_aux)\n df_circuit = sub_tax_sales_transfer_pricing_aux(manager, df_circuitt, seq_recs, seq_reservas, df_aux)\n df_other = sub_tax_sales_transfer_pricing_aux(manager, df_othert, seq_recs, seq_reservas, df_aux)\n df_transfer = sub_tax_sales_transfer_pricing_aux(manager, df_transfert, seq_recs, seq_reservas, df_aux)\n df_endow = sub_tax_sales_transfer_pricing_aux(manager, df_endowt, seq_recs, seq_reservas, df_aux)\n df_extra = sub_tax_sales_transfer_pricing_aux_extra(manager, df_extrat, seq_recs, seq_reservas, df_aux)\n\n df_impuesto_canal = df_hotel.union(df_circuit).union(df_other).union(df_transfer).union(df_endow).union(\n df_extra)\n\n df_impuesto_canal = df_impuesto_canal.groupBy(\"seq_rec\", \"seq_reserva\") \\\n .agg({'impuesto_canal': 'sum'}).withColumnRenamed(\"SUM(impuesto_canal)\", \"Tax_Sales_Transfer_pricing\")\n\n df_fields = df_fields.join(df_impuesto_canal, [df_fields.operative_incoming == df_impuesto_canal.seq_rec,\n df_fields.booking_id == df_impuesto_canal.seq_reserva],\n 'left_outer').drop(df_impuesto_canal.seq_rec).drop(df_impuesto_canal.seq_reserva)\n\n df_fields = df_fields.na.fill({\"Tax_Sales_Transfer_pricing\": 0})\n\n df_fields = df_fields.withColumn(\"Tax_Sales_Transfer_pricing\",\n udf_round_ccy(df_fields.Tax_Sales_Transfer_pricing,\n df_fields.booking_currency))\n\n del df_hotel, df_circuit, df_other, df_transfer, df_endow, df_extra, df_impuesto_canal\n\n return df_fields", "def income_report_gen(start, end):\n payments = get_income(start, end)\n row_title = [\"Name\", \"Boat\", \"Rent Day\", \"Pay Day\", \"Amount\"]\n data = []\n for payment in payments:\n temp = []\n for title, value in payment.items():\n temp.append(str(value))\n data.append(temp)\n row_format = \"{:>15}\" * (len(row_title)+1)\n print(row_format.format(\"\", *row_title))\n total_income = 0\n for i in range(len(data)):\n print(row_format.format(i+1, *data[i]))\n total_income += int(data[i][4])\n print(row_format.format(\"SUM\", *([\"--------------\"] * 4), str(total_income)))", "def get_equal_dataframe() -> pandas.DataFrame:\n data = {\n 'InvoiceDate': ['2009-01', '2012-01'],\n 'Count': [4, 4],\n 'Total': [8198.79, 5323.15]\n }\n return pandas.DataFrame(data, columns=['InvoiceDate', 'Count', 'Total'])", "def order_report():", "def _prepare_invoice(self, cr, uid, order, lines, context=None):\n invoice_vals = super(my_sale_order, self)._prepare_invoice(cr, uid, order,\n lines, context)\n\n invoice_vals.update({\n 'partner_shipping_id': order.partner_shipping_id.id,\n })\n\n # Care for deprecated _inv_get() hook - FIXME: to be removed after 6.1\n invoice_vals.update(self._inv_get(cr, uid, order, context=context))\n\n return invoice_vals", "def action_invoice_create(self, cr, uid, ids, context=None):\n res = False\n\n journal_obj = self.pool.get('account.journal')\n inv_obj = self.pool.get('account.invoice')\n inv_line_obj = self.pool.get('account.invoice.line')\n fiscal_obj = self.pool.get('account.fiscal.position')\n\n for order in self.browse(cr, uid, ids, context=context):\n# pay_acc_id = order.partner_id.property_account_payable.id\n #use a new method to get the account_id\n pay_acc_id = self._get_inv_pay_acc_id(cr,uid,order) \n journal_ids = journal_obj.search(cr, uid, [('type', '=','purchase'),('company_id', '=', order.company_id.id)], limit=1)\n if not journal_ids:\n raise osv.except_osv(_('Error!'),\n _('Define purchase journal for this company: \"%s\" (id:%d).') % (order.company_id.name, order.company_id.id))\n\n # generate invoice line correspond to PO line and link that to created invoice (inv_id) and PO line\n inv_lines = []\n for po_line in order.order_line:\n #check if this line have quantity to generate invoice, by johnw\n if po_line.product_qty <= po_line.invoice_qty:\n continue \n# if po_line.product_id:\n# acc_id = po_line.product_id.property_account_expense.id\n# if not acc_id:\n# acc_id = po_line.product_id.categ_id.property_account_expense_categ.id\n# if not acc_id:\n# raise osv.except_osv(_('Error!'), _('Define expense account for this company: \"%s\" (id:%d).') % (po_line.product_id.name, po_line.product_id.id,))\n# else:\n# acc_id = property_obj.get(cr, uid, 'property_account_expense_categ', 'product.category').id \n #use a new method to get the account_id, by johnw \n acc_id = self._get_inv_line_exp_acc_id(cr,uid,order,po_line)\n fpos = order.fiscal_position or False\n acc_id = fiscal_obj.map_account(cr, uid, fpos, acc_id)\n\n inv_line_data = self._prepare_inv_line(cr, uid, acc_id, po_line, context=context)\n #update the quantity to the quantity, by johnw\n inv_line_data.update({'quantity':(po_line.product_qty - po_line.invoice_qty)})\n inv_line_id = inv_line_obj.create(cr, uid, inv_line_data, context=context)\n inv_lines.append(inv_line_id)\n\n po_line.write({'invoiced':True, 'invoice_lines': [(4, inv_line_id)]}, context=context)\n \n #if no lines then return direct, by johnw\n if len(inv_lines) == 0:\n continue\n \n # get invoice data and create invoice\n inv_data = {\n 'name': order.partner_ref or order.name,\n 'reference': order.partner_ref or order.name,\n 'account_id': pay_acc_id,\n 'type': 'in_invoice',\n 'partner_id': order.partner_id.id,\n 'currency_id': order.pricelist_id.currency_id.id,\n 'journal_id': len(journal_ids) and journal_ids[0] or False,\n 'invoice_line': [(6, 0, inv_lines)],\n 'origin': order.name,\n 'fiscal_position': order.fiscal_position.id or False,\n 'payment_term': order.payment_term_id.id or False,\n 'company_id': order.company_id.id,\n }\n inv_id = inv_obj.create(cr, uid, inv_data, context=context)\n\n # compute the invoice\n inv_obj.button_compute(cr, uid, [inv_id], context=context, set_total=True)\n\n # Link this new invoice to related purchase order\n order.write({'invoice_ids': [(4, inv_id)]}, context=context)\n res = inv_id\n return res", "def _prepare_invoice(self):\n self.ensure_one()\n journal_id = self.env['account.invoice'].default_get(['journal_id'])['journal_id']\n if not journal_id:\n raise UserError(_('Please define an accounting sales journal for this company.'))\n invoice_vals = {\n 'name': self.client_order_ref or '',\n 'origin': self.name,\n 'type': 'out_invoice',\n 'account_id': self.partner_invoice_id.property_account_receivable_id.id,\n 'partner_id': self.partner_invoice_id.id,\n 'partner_shipping_id': self.partner_shipping_id.id,\n 'journal_id': journal_id,\n 'currency_id': self.pricelist_id.currency_id.id,\n 'comment': self.note,\n 'payment_term_id': self.payment_term_id.id,\n 'fiscal_position_id': self.fiscal_position_id.id or self.partner_invoice_id.property_account_position_id.id,\n 'company_id': self.company_id.id,\n 'user_id': self.user_id and self.user_id.id,\n 'team_id': self.team_id.id,\n 'x_studio_field_rgEdd': self.x_studio_field_icWOZ.id,\n 'x_studio_car_type_1': self.vehicle.id,\n 'x_studio_job_card_1': self.x_studio_agency_job_card,\n 'x_studio_car_type_name': self.vehicle.model_id.name,\n 'x_studio_plate_num': self.vehicle.license_plate,\n 'x_studio_claim_num': self.claim_no,\n\n 'x_studio_is_insured':self.is_insured,\n 'x_studio_service_provider': self.service_advisor.id,\n 'date_invoice': fields.Date.today(),\n 'transaction_ids': [(6, 0, self.transaction_ids.ids)],\n }\n return invoice_vals\n\n # 'x_studio_field_rgEdd':order.x_studio_field_icWOZ.id,", "def renter_accounting_report_gen(sid, start, end):\n results = renter_accounting(sid, start, end)\n print(\"Name: \" + results[0])\n sum_value = 0\n row_title = [\"Date\", \"Boat\", \"Rent\", \"Payment\", \"Sum\"]\n row_format = \"{:>15}\" * len(row_title)\n print(row_format.format(*row_title))\n for result in results[1]:\n temp = list(result.keys()) + [value for key, value in list(result.values())[0].items()]\n if temp[2]:\n sum_value += temp[3]\n temp[2] = \"\"\n else:\n sum_value -= temp[3]\n temp[2] = temp[3]\n temp[3] = \"\"\n temp.append(sum_value)\n print(row_format.format(*[str(x) for x in temp]))", "def _calculate(self):\n source = self.source\n res = {}\n l_cols = [[], [], [], []]\n r_lines = {}\n dateline=None\n ###delete the below code when fetch data from database(assume: data in database has been pretreatment)\n if source[t.ror].min() > -99.0:\n pass\n else:\n source[t.ror] = np.where(\n source[t.ror] > -99.0, source[t.ror], -99.0)\n ###\n for account in self.accounts:\n source_account = source[source[t.account] == account]\n source_account = source_account.reset_index(drop=True)\n dateline=source_account[t.effective_date]\n ror=source_account[t.ror]/100\n returns_cum = ROR.ror_cum_ann(source_account, self.annualized)\n # double_return_cum=round(double_return_cum,2)+1\n returns_cum = returns_cum + 1\n growth_amounts = returns_cum * self.starting_value\n returns_cum, growth_amounts = round(returns_cum - 1, 4), \\\n round(growth_amounts, 2)\n l_cols[0].append(growth_amounts.iloc[-1, 0])#account growth amount\n l_cols[1].append(growth_amounts.iloc[-1, 1])#bench growth amount\n l_cols[2].append(returns_cum.iloc[-1, 0])#account return\n l_cols[3].append(returns_cum.iloc[-1, 1])#bench return\n r_lines[account] = [list(returns_cum.iloc[:,0]), list(growth_amounts.iloc[:, 0]),#list(returns_cum.iloc[:, 0])\n list(growth_amounts.iloc[:, 1])]#account return, account growth amount, bench growth amount\n res['account_vs_benchmark'] = {'xAxis': self.accounts,\n 'series': l_cols}\n res['growth_of_unit'] = {'xAxis': list(dateline),\n 'series': r_lines}\n return res\n # ret_dict = self._ret(accounts, starting_value, source, annualized)\n # return ret_dict", "def quick_report(qbo_session, filter_attributes={}, headers=True):\n\n #basically, you can filter on any attribute massage.ledgerize() kicks out\n filterable_attributes = {\n \"TxnDate\":0, \"qbbo_type\":1, \"entity_id\":2, \n \"line_number\":3, \"document_type\":4,\n \"domain\":5, \"user_number\":6,\n \"CreateTime\":7, \"LastUpdatedTime\":8, \"SyncToken\":9, \"Adjustment\":10,\n \"account\":11, \"amount\":13, \"description\":14, \"name\":15,\n \"linked_transactions\":16\n }\n \n fa = copy.deepcopy(filter_attributes)\n\n for a in filter_attributes:\n \n if not a in filterable_attributes:\n raise Exception(\"QuickReport() doesn't know how to filter on\"+\n \" %s. Please use one of:\\n%s\" % \n (a, filterable_attributes))\n \n #yes, we're being permissive\n if isinstance(filter_attributes[a],(int,float,long,str)):\n \n fa[a]=[filter_attributes[a]]\n\n elif isinstance(filter_attributes[a],(list,tuple)):\n \n fa[a]=filter_attributes[a]\n\n else:\n \n raise Exception(\"filter_attributes items must be lists,\" + \\\n \"tuples, or stand-alone values\")\n\n filtered_lines = [qbo_session.ledgerize(\"_\", headers=True)]\n\n for ledger_line in qbo_session.ledger_lines():\n\n #now let's apply the filter, white-list style\n\n for a in fa:\n\n white_list = fa[a]\n\n #sometimes a Line will just HAVE the attribute\n #e.g. a JournalEntry line will always have an account\n #othertimes, we'll have to look it up with a cross reference\n #e.g. an Invoice line will NOT have an account, it'll have\n #an item, so we need to look up the account in the item\n\n #so we're breaking that functionality out into it's own function\n\n i = filterable_attributes[a]\n\n if ledger_line[i] in white_list:\n\n filtered_lines.append(ledger_line)\n\n return filtered_lines", "def create_equity_curve_dataframe(self):\n # returns the cumulative product for percent change over every timestamp in the index\n curve = pd.DataFrame(self.all_holdings)\n curve.set_index('datetime', inplace=True)\n curve['returns'] = curve['total'].pct_change()\n #curve['equity_curve'] = (1.0+curve['returns']).cumprod()\n curve['equity_curve'] = curve['returns']\n curve['equity_curve'] += 1\n curve['equity_curve'] = curve['equity_curve'].cumprod()\n self.equity_curve = curve", "def pnl(qbo_session, period = \"YEARLY\", start_date=\"first\", end_date=\"last\",\n **kwargs):\n\n pnl_account_types = [\n \n \"Income\", \"Other Income\",\n \"Expense\", \"Other Expense\", \"Cost of Goods Sold\"\n \n ]\n\n \n\n # go through the accounts, collecting a list of those that are \n # pnl accounts\n\n relevant_accounts = []\n\n coa = qbo_session.chart_of_accounts()\n\n AccountType_i = coa[0].index(\"AccountType\")\n fqa_i = coa[0].index(\"FullyQualifiedName\")\n\n for a in coa:\n\n AccountType = a[AccountType_i]\n\n if AccountType in pnl_account_types:\n\n relevant_accounts.append(a[fqa_i])\n \n # now collect the ledger_lines that are even relevant to the time\n # period and pnl accounts (and we'll handle presentation last)\n\n relevant_activity = {} #{account:[relevant lines]}\n\n all_ledger_lines = qbo_session.ledger_lines(None, None, None, True,\n **kwargs)\n\n headers = all_ledger_lines[0]\n\n account_i = headers.index(\"account\") \n amount_i = headers.index(\"amount\")\n date_i = headers.index(\"TxnDate\")\n \n earliest_date = datetime(2100,1,1)\n latest_date = datetime(1900,1,1)\n\n for line in all_ledger_lines[1:]:\n\n account = line[account_i]\n line_date = line[date_i]\n\n #first apply the date filter!\n if not start_date == \"first\" and line_date < start_date:\n continue\n \n if not end_date == \"last\" and line_date > end_date:\n continue\n \n #if it's made the cut, we can update the report date bounds\n earliest_date = min(line_date,earliest_date)\n latest_date = max(line_date,latest_date)\n\n #then apply the account filter!\n\n if not account in relevant_activity:\n #then let's confirm that its account type is a pnl one\n \n if not account in relevant_accounts:\n \n continue\n\n else:\n relevant_activity[account] = []\n\n relevant_activity[account].append(line)\n\n #now let's do presentation\n #TODO -- incorporate pandas tables...do only minimal work on it until then\n\n pnl_lines = []\n\n if period == \"YEARLY\":\n\n report_start_date = datetime(earliest_date.year,1,1)\n report_end_date = datetime(latest_date.year,12,31)\n\n period_start_dates = list(rrule(YEARLY, bymonth=1, bymonthday=1,\n dtstart=report_start_date,\n until=report_end_date))\n\n period_end_dates = list(rrule(YEARLY, bymonth=12, bymonthday=-1,\n dtstart=report_start_date,\n until=report_end_date))\n\n elif period == \"MONTHLY\":\n\n report_start_date = datetime(earliest_date.year,\n earliest_date.month,\n 1)\n report_end_date = datetime(latest_date.year,\n latest_date.month,\n calendar.monthrange(latest_date.year,\n latest_date.month)[1])\n\n period_start_dates = list(rrule(MONTHLY, bymonthday=1,\n dtstart=report_start_date,\n until=report_end_date))\n\n period_end_dates = list(rrule(YEARLY, bymonthday=-1,\n dtstart=report_start_date,\n until=report_end_date)) \n\n header_1 = [\"\", \"Period Start -->\"] + period_start_dates\n header_2 = [\"Account\", \"Period End -->\"] + period_end_dates\n\n pnl_lines.append(header_1)\n pnl_lines.append(header_2)\n\n \"\"\"Clearly, there's a way to do this with only one pass of the data...\n let's get that right in the first re-write...probably with pandas\"\"\"\n\n #now let's fill up the pnl_lines with what we know to be the relevant data\n #for now, we'll rely on the knowledge that the data is coming to us in\n #date order, but that should be fixed too...\n\n for account in relevant_activity:\n\n account_row = [account, \"\"] #one value per period \n\n current_period_index = 0 #primitive counter, yes!\n this_period_total = 0 #this will be this period's total\n\n for line in relevant_activity[account]:\n \n line_amount = line[amount_i]\n line_date = line[date_i] \n\n if line_date > period_end_dates[current_period_index]:\n\n account_row.append(this_period_total)\n this_period_total = line_amount\n current_period_index +=1\n\n else:\n \n this_period_total = round(this_period_total +\n line_amount, 2)\n\n \"\"\"super sloppy...\"\"\"\n account_row.append(this_period_total) #for the last period\n current_period_index +=1\n\n while current_period_index < len(period_end_dates):\n account_row.append(0)\n current_period_index +=1\n\n pnl_lines.append(account_row)\n\n return pnl_lines", "def action_generate_lines_txt(self):\n rp_obj = self.env['res.partner']\n voucher_obj = self.env['account.wh.iva']\n txt_iva_obj = self.env['txt.iva.line']\n vouchers = []\n txt_brw = self.browse(self._ids)[0]\n txt_ids = txt_iva_obj.search([('txt_id', '=', txt_brw.id)])\n if txt_ids:\n txt_ids.unlink()\n\n if txt_brw.type:\n vouchers = voucher_obj.search([\n ('date_ret', '>=', txt_brw.date_start),\n ('date_ret', '<=', txt_brw.date_end),\n ('period_id', '=', txt_brw.period_id.id),\n ('state', '=', 'done'),\n ('type', 'in', ['in_invoice', 'in_refund'])])\n else:\n vouchers = voucher_obj.search([\n ('date_ret', '>=', txt_brw.date_start),\n ('date_ret', '<=', txt_brw.date_end),\n ('period_id', '=', txt_brw.period_id.id),\n ('state', '=', 'done'),\n ('type', 'in', ['out_invoice', 'out_refund'])])\n\n for voucher in vouchers:\n acc_part_id = rp_obj._find_accounting_partner(voucher.partner_id)\n for voucher_lines in voucher.wh_lines:\n if voucher_lines.invoice_id.state not in ['open', 'paid']:\n continue\n for voucher_tax_line in voucher_lines.tax_line:\n txt_iva_obj.create(\n {'partner_id': acc_part_id.id,\n 'voucher_id': voucher.id,\n 'invoice_id': voucher_lines.invoice_id.id,\n 'txt_id': txt_brw.id,\n 'untaxed': voucher_tax_line.base,\n 'amount_withheld': voucher_tax_line.amount_ret,\n 'tax_wh_iva_id': voucher_tax_line.id,\n })\n return True", "def invoices(self):\r\n return inv.Invoices(self)", "def features_orders(df_customers, df_receipts):\n df_customers.sort_values(by=['customerId2'], ascending=[True], inplace=True)\n # total amount of all the orders of a cusrtomer\n df_customers = _add_column(df_customers, df_receipts.groupby('customerId2')['price'].sum(), 'sum_orders')\n # the min amount paid in one receipt by a customer\n df_customers = _add_column(df_customers,\n df_receipts.groupby(['customerId2', 'receiptId'], as_index=False)[\n 'price'].sum().groupby('customerId2').min()['price'], 'min_order')\n # the mean amount paid per receipt by a customer\n df_customers = _add_column(df_customers,\n df_receipts.groupby(['customerId2', 'receiptId'], as_index=False)['price'].sum()\n .groupby('customerId2').mean()['price'], 'mean_order')\n # the max amount paid per receipt by a customer\n df_customers = _add_column(df_customers,\n df_receipts.groupby(['customerId2', 'receiptId'], as_index=False)['price'].sum()\n .groupby('customerId2').max()['price'], 'max_order')\n # the number of orders\n df_customers = _add_column(df_customers,\n df_receipts.groupby(['customerId2', 'receiptId'], as_index=False)['price'].sum()\n .groupby('customerId2').count()['price'], 'count_orders')\n\n # the total amount of items bought by a user\n df_customers = _add_column(df_customers,\n df_receipts.groupby('customerId2')['itemQty'].sum(), 'sum_itemQty')\n # the min amount of items bought by a user in a receipt\n df_customers = _add_column(df_customers,\n df_receipts.groupby(['customerId2', 'receiptId'], as_index=False)['itemQty'].sum()\n .groupby('customerId2').min()['itemQty'], 'min_itemQty')\n # the mean amount of items bought by a user in a receipt\n df_customers = _add_column(df_customers,\n df_receipts.groupby(['customerId2', 'receiptId'], as_index=False)['itemQty'].sum()\n .groupby('customerId2').mean()['itemQty'], 'mean_itemQty')\n # the max amount of items bought by a user in a receipt\n df_customers = _add_column(df_customers,\n df_receipts.groupby(['customerId2', 'receiptId'], as_index=False)['itemQty'].sum()\n .groupby('customerId2').max()['itemQty'], 'max_itemQty')\n # from which dividion type a user buys\n for i in DIVISIONID_VALS:\n k = 'divisionId_' + str(i)\n df_receipts[k] = np.where(df_receipts['divisionId'] == i, 1, 0)\n df_customers = _add_column(df_customers, df_receipts.groupby('customerId2')[k].sum(), k)\n # which source type a user uses to pay\n for i in SOURCEID_VALS:\n k = 'sourceId_' + str(i)\n df_receipts[k] = np.where(df_receipts['sourceId'] == i, 1, 0)\n df_customers = _add_column(df_customers, df_receipts.groupby('customerId2')[k].sum(), k)\n\n logger.info(\"Features from the returns table: {shape} {dtypes}\"\n .format(shape=df_customers.shape, dtypes=df_customers.dtypes))\n return df_customers", "def create_order(df_stock, df_signal, moneyness=('OTM', 'ITM'),\n cycle=0, strike=0, expire=(False, True)):\n symbol = df_stock.ix[df_stock.index.values[0]]['symbol']\n\n tb_closes = {\n stock.date.strftime('%Y-%m-%d'): np.float(stock.close) for stock in\n Stock.objects.filter(Q(symbol=symbol) & Q(source='thinkback'))\n }\n\n holding = df_signal['holding'].apply(\n lambda x: int(x / np.timedelta64(1, 'D'))\n ).astype(np.int).min()\n\n data = list()\n dates0, options0 = get_options_by_cycle_strike(\n symbol=symbol,\n name='CALL',\n dates0=df_signal['date0'],\n dte=holding,\n moneyness=moneyness,\n cycle=cycle,\n strike=strike\n )\n\n for date0, (index, signal) in zip(dates0, df_signal.iterrows()):\n date1 = signal['date1']\n\n if date0:\n option0 = options0.get(date=date0)\n\n option1 = None\n if option0 and option0.bid > 0:\n date1, option1 = get_option_by_contract_date(option0.contract, date1)\n\n if option0 and option1:\n stock0 = tb_closes[option0.date.strftime('%Y-%m-%d')]\n close0 = stock0 - np.float(option0.bid)\n\n ask1 = 0\n if int(expire):\n ask1 = np.float(\n tb_closes[option1.date.strftime('%Y-%m-%d')]\n - np.float(option0.contract.strike)\n )\n ask1 = ask1 if ask1 > 0 else 0.0\n\n date1 = option1.date\n stock1 = tb_closes[option1.date.strftime('%Y-%m-%d')]\n close1 = stock1 - np.float(ask1)\n else:\n date1 = option1.date\n stock1 = tb_closes[option1.date.strftime('%Y-%m-%d')]\n close1 = stock1 - np.float(option1.ask)\n\n data.append({\n 'date0': option0.date,\n 'date1': date1,\n 'signal0': 'BUY',\n 'signal1': 'SELL',\n 'stock0': stock0,\n 'stock1': stock1,\n 'option0': option0.bid,\n 'option1': ask1 if expire else option1.ask,\n 'close0': np.round(close0, 2), # buy using ask\n 'close1': np.round(close1, 2), # sell using bid\n 'option_code': option0.contract.option_code,\n 'strike': np.float(option0.contract.strike),\n 'dte0': np.int(option0.dte),\n 'dte1': np.int(option1.dte),\n 'intrinsic0': np.float(option0.intrinsic),\n 'intrinsic1': np.float(option1.intrinsic)\n })\n\n df = DataFrame()\n if len(data):\n df = DataFrame(data, columns=[\n 'date0', 'date1', 'signal0', 'signal1',\n 'stock0', 'stock1', 'option0', 'option1', 'close0', 'close1',\n 'option_code', 'strike', 'dte0', 'dte1',\n 'intrinsic0', 'intrinsic1'\n ])\n\n df['holding'] = df['date1'] - df['date0']\n df['pct_chg'] = np.round((df['close1'] - df['close0']) / df['close0'], 2)\n\n f = lambda x: np.round(x['pct_chg'] * -1 if x['signal0'] == 'SELL' else x['pct_chg'], 2)\n df['pct_chg'] = df.apply(f, axis=1)\n\n df['sqm0'] = 100\n df['sqm1'] = -100\n df['oqm0'] = -1\n df['oqm1'] = 1\n\n return df", "def df(client_ids, start, end):\n obj = search(client_ids, start, end)\n df = DataFrame.from_dict(obj).T\n\n if df.empty:\n return df\n\n df.index.name = 'client_id'\n df = df.rename(columns={ 0: 'inactive', 1: 'active' })\n df['total'] = df.sum(axis=1)\n df = df.fillna(0).astype('int64')\n\n return df", "def _prepare_invoice(self):\n self.ensure_one()\n # journal_id = self.env['account.invoice'].with_context(force_company=self.env.user.company_id.id).default_get(['journal_id'])['journal_id']\n journal_id = self.company_id.journal_id.id\n if not journal_id:\n raise UserError(_('Please define an accounting sales journal for this company.'))\n invoice_vals = {\n 'name': self.client_order_ref or '',\n 'origin': self.name,\n 'type': 'out_invoice',\n 'account_id': self.partner_invoice_id.property_account_receivable_id.id,\n 'partner_id': self.partner_invoice_id.id,\n 'partner_shipping_id': self.partner_shipping_id.id,\n 'journal_id': journal_id,\n 'currency_id': self.pricelist_id.currency_id.id,\n 'comment': self.note,\n 'payment_term_id': self.payment_term_id.id,\n 'fiscal_position_id': self.fiscal_position_id.id or self.partner_invoice_id.property_account_position_id.id,\n 'company_id': self.company_id.id,\n 'user_id': self.user_id and self.user_id.id,\n 'team_id': self.team_id.id\n }\n return invoice_vals", "def sub_transfer_pricing(manager, df_fields, seq_recs, seq_reservas):\n # df_hotel = manager.get_dataframe(tables[\"dwc_bok_t_canco_hotel\"])\n # df_circuit = manager.get_dataframe(tables[\"dwc_bok_t_canco_hotel_circuit\"])\n # df_other = manager.get_dataframe(tables[\"dwc_bok_t_canco_other\"])\n # df_transfer = manager.get_dataframe(tables[\"dwc_bok_t_canco_transfer\"])\n # df_endow = manager.get_dataframe(tables[\"dwc_bok_t_canco_endowments\"])\n # df_extra = manager.get_dataframe(tables[\"dwc_bok_t_canco_extra\"])\n\n df_aux = df_fields.select(\"operative_incoming\", \"booking_id\", \"invoicing_company\",\n \"creation_date\", \"booking_currency\")\n\n df_hotel = sub_transfer_pricing_aux(manager, df_hotelt, seq_recs, seq_reservas, df_aux)\n df_circuit = sub_transfer_pricing_aux(manager, df_circuitt, seq_recs, seq_reservas, df_aux)\n df_other = sub_transfer_pricing_aux(manager, df_othert, seq_recs, seq_reservas, df_aux)\n df_transfer = sub_transfer_pricing_aux(manager, df_transfert, seq_recs, seq_reservas, df_aux)\n df_endow = sub_transfer_pricing_aux(manager, df_endowt, seq_recs, seq_reservas, df_aux)\n df_extra = sub_transfer_pricing_aux_extra(manager, df_extrat, seq_recs, seq_reservas, df_aux)\n\n df_impuesto_canco = df_hotel.union(df_circuit).union(df_other).union(df_transfer).union(df_endow).union(\n df_extra)\n\n df_impuesto_canco = df_impuesto_canco.groupBy(\"operative_incoming\", \"booking_id\") \\\n .agg({'impuesto_canco': 'sum'}).withColumnRenamed(\"SUM(impuesto_canco)\", \"impuesto_canco\")\n\n df_impuesto_canco = df_impuesto_canco.withColumnRenamed(\"operative_incoming\", \"seq_rec\") \\\n .withColumnRenamed(\"booking_id\", \"seq_res\")\n\n # add impuesto_canco\n df_fields = df_fields.join(df_impuesto_canco, [df_fields.operative_incoming == df_impuesto_canco.seq_rec,\n df_fields.booking_id == df_impuesto_canco.seq_res],\n 'left_outer').drop(\"seq_rec\", \"seq_res\")\n\n df_addcanco = sub_transfer_pricing_aux_add_canco(manager, df_fields, seq_recs, seq_reservas)\n\n df_addcanco = df_addcanco.withColumnRenamed(\"operative_incoming\", \"seq_rec\") \\\n .withColumnRenamed(\"booking_id\", \"seq_res\")\n\n # add add_impuesto_canco\n df_fields = df_fields.join(df_addcanco, [df_fields.operative_incoming == df_addcanco.seq_rec,\n df_fields.booking_id == df_addcanco.seq_res],\n \"left_outer\").drop(df_addcanco.seq_rec).drop(df_addcanco.seq_res)\n\n df_fields = df_fields.na.fill({'impuesto_canco': 0, 'add_impuesto_canco': 0})\n\n df_fields = df_fields.withColumn(\"Transfer_pricing\", df_fields.impuesto_canco + df_fields.add_impuesto_canco) \\\n .drop(\"impuesto_canco\", \"add_impuesto_canco\")\n\n df_fields = df_fields.withColumn(\"Transfer_pricing\", udf_round_ccy(df_fields.Transfer_pricing,\n df_fields.booking_currency))\n\n del df_hotel, df_circuit, df_other, df_transfer, df_endow, df_extra, df_impuesto_canco, df_addcanco, df_aux\n\n return df_fields", "def _create_payments(self, invoice):\n self.ensure_one()\n if self.schedule_id and self.schedule_id.occurences > 0:\n # TODO: make more intelligent price cut\n amount = invoice.amount_total\n amount_per_occurence = amount / self.schedule_id.occurences\n for day in self.schedule_id.day_ids:\n payment = self.env['account.payment'].new({\n 'payment_type': 'inbound',\n 'partner_type': 'customer',\n 'partner_id': self.member_id.partner_id.id,\n 'amount': amount_per_occurence,\n 'payment_date': day.day,\n 'journal_id': self.journal_id.id,\n })\n payment._onchange_journal()\n payment_values = dict(payment._cache)\n payment = self.env['account.payment'].create(payment_values)\n payment.invoice_ids = [(4, invoice.id, False)]", "def _getdata(self, data):\n lines = []\n start_date = str(data['form']['start_date'])\n end_date = str(data['form']['end_date'])\n department_ids = data['form']['department_ids']\n\n vehicles_ids = self.pool.get('fleet.vehicle').search(self.cr, self.uid,\\\n [('department_id', 'in', department_ids)], context=self.context)\n\n fuel_qty_line_obj = self.pool.get('fuel.qty.line')\n\n sdate = datetime.strptime(start_date, \"%Y-%m-%d\")\n syear = sdate.year\n smonth = sdate.month\n edate = datetime.strptime(end_date, \"%Y-%m-%d\")\n eyear = edate.year\n emonth = edate.month\n\n fuel_qty_line_ids = fuel_qty_line_obj.search(self.cr, self.uid,\\\n [('vehicles_id', 'in', vehicles_ids)], context=self.context)\n\n\n\n counter = 1\n for qty_line in fuel_qty_line_obj.browse(self.cr, self.uid, \\\n fuel_qty_line_ids, context=self.context):\n current_m = int(qty_line.month)\n current_y = int(qty_line.year)\n start = current_m >= smonth and current_y >= syear\n end = current_m <= emonth and current_y <= eyear\n if start and end:\n line = {'type':str(counter)+\" : \"+\\\n qty_line.vehicles_id.type.name}\n line['vehicle_no'] = qty_line.vehicles_id.vin_sn\n line['spent'] = qty_line.spent_qty\n line['counter_no'] = str(qty_line.vehicles_id.odometer)+\" \"+\\\n qty_line.vehicles_id.odometer_unit\n line['date'] = qty_line.month+\"/\"+qty_line.year\n lines.append(line)\n counter += 1\n return lines", "def invoice_onsettled(invoice):\n\n db = current.db\n s3db = current.s3db\n\n # Look up claim, invoice number, program and billing\n btable = s3db.fin_voucher_billing\n ctable = s3db.fin_voucher_claim\n itable = s3db.fin_voucher_invoice\n ptable = s3db.fin_voucher_program\n join = [ptable.on(ptable.id == ctable.program_id),\n btable.on(btable.id == ctable.billing_id),\n itable.on(itable.id == ctable.invoice_id),\n ]\n query = (ctable.invoice_id == invoice.id) & \\\n (ctable.deleted == False)\n row = db(query).select(ctable.id,\n ctable.program_id,\n ctable.billing_id,\n ctable.pe_id,\n btable.date,\n itable.invoice_no,\n ptable.name,\n ptable.organisation_id,\n join = join,\n limitby = (0, 1),\n ).first()\n if not row:\n return\n program = row.fin_voucher_program\n billing = row.fin_voucher_billing\n claim = row.fin_voucher_claim\n invoice_no = row.fin_voucher_invoice.invoice_no\n\n error = None\n\n # Look up the provider organisation\n pe_id = claim.pe_id\n otable = s3db.org_organisation\n provider = db(otable.pe_id == pe_id).select(otable.id,\n otable.name,\n limitby = (0, 1),\n ).first()\n\n from .helpers import get_role_emails\n provider_accountants = get_role_emails(\"PROVIDER_ACCOUNTANT\", pe_id)\n if not provider_accountants:\n error = \"No provider accountant found\"\n\n if not error:\n # Lookup the template variables\n base_url = current.deployment_settings.get_base_public_url()\n appname = current.request.application\n data = {\"program\": program.name,\n \"date\": btable.date.represent(billing.date),\n \"invoice\": invoice_no,\n \"organisation\": provider.name,\n \"url\": \"%s/%s/fin/voucher_claim/%s\" % (base_url, appname, claim.id),\n }\n\n # Send the email notification\n from .notifications import CMSNotifications\n error = CMSNotifications.send(provider_accountants,\n \"InvoiceSettled\",\n data,\n module = \"fin\",\n resource = \"voucher_invoice\",\n )\n if error:\n msg = \"%s could not be notified about invoice settlement: %s\"\n current.log.error(msg % (provider.name, error))\n else:\n msg = \"%s notified about invoice settlement\"\n current.log.debug(msg % provider.name)", "def generate_agreement_orders(self, start_date, end_date):\n self.ensure_one()\n if not self.active:\n return\n lines_to_order = {}\n exp_date = fields.Date.from_string(self.next_expiration_date)\n if exp_date < end_date and self.prolong != 'unlimited':\n end_date = exp_date\n for line in self.agreement_line:\n # Check if there is any agreement line to order\n if not line.active_chk:\n continue\n # Check future orders for this line until end_date\n next_order_date = self._get_next_order_date(line, start_date)\n while next_order_date <= end_date:\n # Add to a list to order all lines together\n if not lines_to_order.get(next_order_date):\n lines_to_order[next_order_date] = self.env[\n 'sale.recurring_orders.agreement.line']\n lines_to_order[next_order_date] |= line\n next_order_date = self._get_next_order_date(\n line, next_order_date)\n # Order all pending lines\n dates = lines_to_order.keys()\n dates.sort()\n for date in dates:\n # Check if an order exists for that date\n order = self.order_line.filtered(\n lambda x: (\n fields.Date.to_string(\n fields.Datetime.from_string(x.date_order)) ==\n fields.Date.to_string(date)))\n if not order:\n # create it if not exists\n self.create_order(\n fields.Date.to_string(date), lines_to_order[date])", "def get_invoices(self, since, until):\n return self._request('getInvoices', data={\n 'date_from': since.strftime('%d/%m/%Y'),\n 'date_to': until.strftime('%d/%m/%Y')\n })", "def duplicate_invoice(invoice):\n from invoicer.models import Invoice\n from invoicer.models import LineItem\n\n # copy main attributes\n new_invoice = Invoice(\n company=invoice.company,\n invoice_date=datetime.now(),\n client=invoice.client,\n location=invoice.location,\n tax_rate=invoice.tax_rate,\n left_address=invoice.left_address,\n right_address=invoice.right_address,\n terms=invoice.terms,\n footer=invoice.footer\n )\n new_invoice.save()\n\n # now line items\n for line_item in invoice.line_items.all():\n new_invoice.line_items.add(LineItem(\n name=line_item.name,\n description=line_item.description,\n price=line_item.price,\n taxable=line_item.taxable,\n item=line_item.item,\n quantity=line_item.quantity\n ))\n\n return new_invoice", "def _prepare_invoice_line(self, cr, uid, group, picking, move_line, invoice_id,\n invoice_vals, context=None):\n result = super(stock_picking, self)._prepare_invoice_line(cr, uid, group, picking, move_line, invoice_id,\n invoice_vals, context=None)\n \n result['discount'] = self._get_discount_invoice(cr, uid, move_line)\n result['discount2'] = self._get_discount2_invoice(cr, uid, move_line)\n result['price_unit'] = self._get_price_unit_invoice(cr, uid, move_line, invoice_vals['type'], context=None)\n return result", "def invoices_in_route(self):\n from invoicing.models import Invoice\n invoices = Invoice.objects.filter(\n route=self.number, print_date__range=(date.today() - timedelta(6), date.today()),\n canceled=False).count()\n return invoices", "def get_historic_rates(cls, client, product, start_date, end_date,\n granularity=1):\n startDate = dt.datetime.strptime(start_date, \"%Y-%m-%d\")\n startDateTimestamp = startDate.timestamp()\n endDate = dt.datetime.strptime(end_date, \"%Y-%m-%d\")\n endDateTimestamp = endDate.timestamp()\n\n # List of time divisions for retrieving data.\n timeRange = range(int(startDateTimestamp), int(endDateTimestamp),\n 200 * granularity)\n timeRange = list(timeRange) + [endDateTimestamp]\n\n # New DataFrame.\n columns = ['time', 'low', 'high', 'open', 'close', 'volume']\n data = pd.DataFrame(columns=columns)\n\n # Populating dataframe.\n for i in tqdm(range(len(timeRange) - 1)):\n try:\n data = cls.append_data(client, data, product, columns,\n timeRange[i], timeRange[i + 1])\n except ValueError:\n time.sleep(3)\n data = cls.append_data(data, columns, product,\n timeRange[i], timeRange[i + 1])\n\n # Reindexing dataframe.\n data['time'] = data.time.apply(dt.datetime.fromtimestamp)\n data.set_index('time', inplace=True)\n\n # Using data points where the price has changed.\n data = data.where(data.close != data.close.shift()).dropna().sort_index()\n\n return data", "def execute(filters=None):\n\tcolumns, data = [\n\t\t\"Accident:Link/Accident:150\",\n\t\t\"Victim:Data:200\",\n\t\t\"Nature of Accident:Data:200\",\n\t\t\"Fire Extinguisher Used:Link/Fire Extinguisher:150\",\n\t\t\"Fire Extinguisher Location: Data:150\",\n\t\t\"Reported By:Data:100\",\n\t\t\"Reported Date:Data:100\"\n\t], []\n\n\tconditions = \"tabAcc.fire_related = 1\" # Only Fire related accidents\n\n\tif filters.get('modified_from') and filters.get('modified_to'):\n\t\tconditions += ' and tabAcc.prepared_date BETWEEN \\'{0}\\' and \\'{1}\\''.format(filters.get('modified_from'),filters.get('modified_to'))\n\tif filters.get('fire_extinguisher'):\n\t\tconditions += ' and tabAcc.fire_extinguisher_used = \\'{0}\\''.format(filters.get('fire_extinguisher'))\n\tif filters.get('nature_of_accident'):\n\t\tconditions += ' and tabAcc.type_of_accident = \\'{0}\\''.format(filters.get('nature_of_accident'))\n\tif filters.get('reported_by'):\n\t\tconditions += ' and tabAcc.prepared_by = \\'{0}\\''.format(filters.get('reported_by'))\n\n\tsql = '''select tabAcc.name accident,tabAcc.employee_name victim, tabAcc.type_of_accident nature_of_accident , tabAcc.fire_extinguisher_used fire_extinguisher_used,\n\ttabFire.location fire_extinguisher_used, tabAcc.prepared_by reported_by,tabAcc.prepared_date reported_date from `tabFire Extinguisher` tabFire LEFT JOIN `tabAccident` tabAcc\n\tON (tabFire.name = tabAcc.fire_extinguisher_used) WHERE {0}'''\n\n\tfrappe.errprint(sql.format(conditions))\n\tdata = frappe.db.sql(sql.format(conditions))\n\treturn columns,data", "def generate_orders(self, cr, uid, ids, context=None):\n voucher_pool = self.pool.get('account.voucher')\n payment_term_obj = self.pool.get('account.payment.term')\n account_budget_confirmation_obj = self.pool.get('account.budget.confirmation')\n period_obj = self.pool.get('account.period')\n if context is None:\n context = {}\n for order in self.browse(cr, uid, ids, context=context):\n #################################to remind\n total_fixed = total_percent = 0\n for line in order.payment_term.line_ids:\n if line.value == 'fixed':\n total_fixed += line.value_amount\n if line.value == 'procent':\n total_percent += line.value_amount\n total_fixed = (total_fixed * 100) / (order.amount or 1.0)\n if (total_fixed + total_percent) > 100:\n raise orm.except_orm(_('Error!'), _(\"Can not create the payments !\\n\\\n The related payment term is probably miss configured as it gives a computed amount greater than the total permanent payment amount. \\\n The latest line of your payment term must be of type 'balance' to avoid rounding issues.\"))\n # create one move line for the total and possibly adjust the other lines amount\n totlines1 = []\n for o in order.line_ids:\n totlines1 += payment_term_obj.compute(cr, uid, order.payment_term.id, o.amount, order.date or False, context=context)\n \n d = {}\n for k, v in totlines1:\n d.setdefault(k, [k]).append(v)\n totlines = map(tuple, d.values())\n\n for t in totlines :\n #to substract date from the interval number \n order_date = t[0]\n entered_date = datetime.datetime.strptime(order_date, '%Y-%m-%d')\n entered_date = entered_date.date()\n account_id = (order.partner_id.property_account_payable and order.partner_id.property_account_payable.id) or \\\n (order.journal_id.default_credit_account_id and order.journal_id.default_credit_account_id.id)\n period_id = period_obj.find(cr, uid, t[0], context=context)[0]\n\n list_confirm = [conf.id for conf in o.confirmation_ids]\n confirmations = account_budget_confirmation_obj.search(cr, uid, [('id','in', list_confirm),('period_id','=', period_id)], context=context) #('date','=',t[0]),\n\n for confirm in confirmations:\n confirm_id = confirm\n\n voucher_lines = [(0, 0, {'name':ol.name, 'account_id':ol.account_id.id, 'type':'dr',\n 'amount':t[count + 1], 'account_analytic_id':ol.account_analytic_id.id, 'budget_confirm_id': confirm_id })\n for count, ol in enumerate(order.line_ids)]\n res = voucher_pool.onchange_price(cr, uid, 0, voucher_lines, [], partner_id=order.partner_id.id, context=context).get(\"value\", {})\n voucher_dict = {\n 'partner_id' : order.partner_id.id,\n 'account_id': account_id,\n 'company_id' : order.company_id.id,\n 'journal_id' : order.journal_id.id,\n 'period_id': order.period_id.id,\n 'type':'purchase',\n 'date' : t[0],\n 'reference': order.name,\n 'payment_permanent_voucher_id': order.id,\n 'line_ids':voucher_lines,\n 'amount':res.get(\"amount\", 0.0)\n }\n voucher_pool.create(cr, uid, voucher_dict, context=context)\n return self.write(cr, uid, ids, {'state':'done'}, context=context)", "def process_ticket_data():\r\n c = conn.cursor()\r\n c.execute('Select Count(*) from raw_ticket_data')\r\n totalleft = c.fetchone()[0]\r\n print('{} total rows required'.format(totalleft))\r\n np.random.seed(1)\r\n df_total = pd.read_sql_query('Select Ticketnumber, TickIssueDate, TickIssueTime, ViolationDesc, '\r\n ' VehMake, TickRPPlate, TickStreetNo, TickMeter, Agency, TickBadgeIssued, '\r\n 'TickStreetName , TotalPaid, TotalAmtDue from raw_ticket_data ', conn)\r\n columnlist = df_total.columns.tolist()\r\n df_total.sort_values(by = 'TickIssueDate', inplace = True)\r\n n = 500000\r\n totalsize = df_total.shape[0]\r\n indexes = [i for i in range(0,totalsize, n)]\r\n columnlist = df_total.columns.tolist()\r\n columnlist.append('address')\r\n tqdm.pandas()\r\n j = 1\r\n for i in indexes:\r\n df = df_total[i:i+n]\r\n print('Iteration {} started at {}. {} records left'.format(j, dt.datetime.now().strftime(\"%H:%M\"), totalsize))\r\n df['TickStreetNo'] = df['TickStreetNo'].apply(return_num)\r\n df['ViolationDesc'] = df['ViolationDesc'].apply(lambda x: x.replace('METER DTN','MTR OUT DT'))\r\n df['TickStreetName'] = df['TickStreetName'].apply(replace_street)\r\n df['TickStreetName'] = df['TickStreetName'].apply(return_street)\r\n df['TotalPaid'] = df['TotalPaid'].apply(return_cost)\r\n df['TotalAmtDue'] = df['TotalAmtDue'].apply(lambda x: re.sub('[^1-9]', '', str(x)))\r\n df['TickRPPlate'] = df['TickRPPlate'].apply(lambda x: 'None' if len(re.findall('[\\w+]', str(x))) == 0 else str(x).replace('[^\\w+]', ''))\r\n df['Tdelt'] = df['TickIssueTime'].apply(return_time_delta)\r\n\r\n\r\n df_1 = df.merge(single_address, left_on = ['TickStreetNo', 'TickStreetName'], right_on = ['number', 'streetname'])\r\n df_2 = df.merge(double_address, left_on = ['TickStreetNo', 'TickStreetName'], right_on = ['number', 'streetname'])\r\n\r\n df_2 = df_2.merge(df_1, how = 'left', left_on = ['TickIssueDate', 'TickBadgeIssued', 'nhood'], right_on = ['TickIssueDate', 'TickBadgeIssued', 'nhood'])\r\n df_3 = df_2[pd.isnull(df_2['Tdelt_y'])]\r\n df_2.dropna(subset = ['Tdelt_y'], inplace = True)\r\n df_2['timedelta'] = df_2.apply(lambda x: np.abs(x['Tdelt_y'] - x['Tdelt_x']), axis = 1)\r\n df_2.sort_values(by = 'timedelta', inplace = True)\r\n\r\n df_2.columns = [col.replace('_x', '') for col in df_2.columns]\r\n df_3.columns = [col.replace('_x', '') for col in df_3.columns]\r\n df_2.drop_duplicates(subset = 'TicketNumber', inplace = True)\r\n print(\"Searching for unmatchable addresses\")\r\n df_3['address'] = df_3.progress_apply(return_address, axis = 1)\r\n\r\n df = df_1.append(df_2)\r\n df = df.append(df_3)\r\n df['TickIssueDate'] = df.apply(Time, axis = 1)\r\n df = df[columnlist]\r\n\r\n if i == 0:\r\n df.to_sql('ticket_data', if_exists = 'replace',con = conn)\r\n else:\r\n df.to_sql('ticket_data', if_exists = 'append',con = conn)\r\n\r\n totalsize -= n\r\n j+=1\r\n\r\n del c\r\n\r\n return", "def compute_counterpart_lines(self):\n for item in self:\n move_debit_lines = []\n move_credit_lines = []\n\n # list of all the move lines of the payment's move\n line_list = []\n for entry in item.journal_entry_ids:\n for line in entry.line_ids:\n if line.account_id.treasury_planning:\n line_list.append(line)\n\n # for each line above collect all the reconciled counterpart lines\n for line in line_list:\n if line.credit > 0 and line.debit == 0:\n for match in line.matched_debit_ids:\n move_debit_lines.append(match.debit_move_id.id)\n\n if line.credit == 0 and line.debit > 0:\n for match in line.matched_credit_ids:\n move_credit_lines.append(match.credit_move_id.id)\n\n if move_credit_lines:\n counterpart_move_ids = move_credit_lines\n else:\n counterpart_move_ids = move_debit_lines\n\n # bank move share is transformed to dictionary\n bank_move_dict = (ast.literal_eval(item.cf_share) if\n item.cf_share else {})\n\n # the share of each counterpart line is \"merged or added\"\n # in a weighted manner to the bank line share\n for cpt in counterpart_move_ids:\n dest_move_line = self.env['account.move.line'].browse(cpt)\n weight = round(dest_move_line.balance / item.amount, 2)\n # counterpart share is transformed into dictionary\n move_line_dict = ast.literal_eval(dest_move_line.cf_share)\n\n # each key is finally added to the bank line share\n for key, value in move_line_dict.iteritems():\n draft_dictionary = dictop.sum_dictionary(\n bank_move_dict.get(key, {}), 1,\n move_line_dict.get(key, {}), weight)\n bank_move_dict[key] = dictop.check_dict_total(\n draft_dictionary, 1)\n\n # the dictionary is transformed into string and assigned\n item.cf_share = json.dumps(bank_move_dict)", "def _prepare_analytic_line(self, cr, uid, obj_line, context=None):\n return {'name': obj_line.name,\n 'date': obj_line.date,\n 'account_id': obj_line.analytic_account_id.id,\n 'unit_amount': obj_line.quantity,\n 'product_id': obj_line.product_id and obj_line.product_id.id or False,\n 'product_uom_id': obj_line.product_uom_id and obj_line.product_uom_id.id or False,\n 'amount': (obj_line.credit or 0.0) - (obj_line.debit or 0.0),\n 'general_account_id': obj_line.account_id.id,\n 'journal_id': obj_line.journal_id.analytic_journal_id.id,\n 'ref': obj_line.ref,\n 'move_id': obj_line.id,\n 'user_id': uid,\n }", "def _prepare_order_line_invoice_line(self, cr, uid, line, account_id=False, context=None):\n res = super(sale_order_line, self)._prepare_order_line_invoice_line(cr, uid, line, account_id=account_id, context=context)\n \n res.update({'part_number': line.part_number, 'internal_part_number' : line.internal_part_number})\n return res", "def _prepare_invoice_line(self, qty):\n self.ensure_one()\n res = super(SaleOrderLine, self)._prepare_invoice_line(qty)\n\n res.update({\n 'cost_center_id': self.cost_center_id and self.cost_center_id.id or False\n })\n return res", "def billing_history(cls, user=None):\n invoices = Invoice.query.filter(Invoice.user_id == user.id) \\\n .order_by(Invoice.created_on.desc()).limit(12)\n\n return invoices", "def _initialize_df(self, df):\n df['values'] = (self.tc.instrument_returns['cumulative'] *\n self.tc.starting_cash).mul(self.target_weights, axis=1).values * (1 - self.tc.commission)\n df['allocations'] = self.df['values'].div(df['values'].sum(axis=1), axis=0)\n df['returns'] = (df['values'].sum(axis=1)).pct_change(1).fillna(0)", "def generate_pr_table(start_ref, end_ref):\r\n header = \"|| Merged By || Author || Title || PR || JIRA || Verified? ||\"\r\n pr_link = \"[#{num}|https://github.com/edx/edx-platform/pull/{num}]\"\r\n user_link = \"[@{user}|https://github.com/{user}]\"\r\n rows = [header]\r\n prbe = prs_by_email(start_ref, end_ref)\r\n for email, pull_requests in prbe.items():\r\n for i, pull_request in enumerate(pull_requests):\r\n try:\r\n pr_info = get_pr_info(pull_request)\r\n title = pr_info[\"title\"] or \"\"\r\n body = pr_info[\"body\"] or \"\"\r\n author = pr_info[\"user\"][\"login\"]\r\n except requests.exceptions.RequestException as e:\r\n message = (\r\n \"Warning: could not fetch data for #{num}: \"\r\n \"{message}\".format(num=pull_request, message=e.message)\r\n )\r\n print(colorize(\"red\", message), file=sys.stderr)\r\n title = \"?\"\r\n body = \"?\"\r\n author = \"\"\r\n rows.append(\"| {merged_by} | {author} | {title} | {pull_request} | {jira} | {verified} |\".format(\r\n merged_by=email if i == 0 else \"\",\r\n author=user_link.format(user=author) if author else \"\",\r\n title=title.replace(\"|\", \"\\|\"),\r\n pull_request=pr_link.format(num=pull_request),\r\n jira=\", \".join(parse_ticket_references(body)),\r\n verified=\"\",\r\n ))\r\n return \"\\n\".join(rows)", "def _get_report_data(self, request, queryset):\n first_item = queryset[0]\n data = {\n 'id': str(slugify(first_item.invoice_no)),\n 'property_of_id': (\n first_item.property_of.id\n if first_item.property_of else None\n ),\n 'model': queryset.model._meta.model_name,\n 'base_info': {\n 'invoice_no': first_item.invoice_no,\n 'invoice_date': first_item.invoice_date,\n 'provider': first_item.provider,\n 'datetime': datetime.datetime.now().strftime(\n self._invoice_report_datetime_format\n ),\n },\n 'items': list(map(self._parse_item, queryset)),\n 'sum_price': str(\n queryset.aggregate(\n Sum(self._price_field)\n ).get('{}__sum'.format(self._price_field))\n )\n }\n logger.info('Invoice report data: {}'.format(data))\n return data", "def add_invoice() -> str:\r\n invoice_details = []\r\n #Catching values user has entered in UI\r\n invoice_number = request.args.get(\"invoice_number\")\r\n invoice_details.append(invoice_number)\r\n customer = request.args.get(\"customer\")\r\n invoice_details.append(customer)\r\n date_required = request.args.get(\"date_required\")\r\n invoice_details.append(date_required)\r\n recipe = request.args.get(\"recipe\")\r\n invoice_details.append(recipe)\r\n gyle_number = request.args.get(\"gyle_number\")\r\n invoice_details.append(gyle_number)\r\n quantity_ordered = request.args.get(\"quantity_ordered\")\r\n invoice_details.append(quantity_ordered)\r\n #Passing list to function which writes list to CSV file\r\n data_add(invoice_details)\r\n invoice_message = \"INVOICE ADDED\"\r\n return render_template(\"singular_message.html\",\r\n user_display=invoice_message)", "def create_or_find_b2b_invoices_and_process_ept(self, row, sale_order, invoice_date, tax):\n\n vat_number = row.get('Buyer Tax Registration', False)\n invoice_number = row.get('VAT Invoice Number', False)\n\n invoices = sale_order.invoice_ids.filtered(\n lambda x: x.type == 'out_invoice' and x.state != 'cancel')\n if not invoices:\n lines = sale_order.order_line.filtered(lambda line: line.qty_to_invoice > 0)\n if not lines:\n return False\n invoices = sale_order._create_invoices()\n self.write({'invoice_ids': [(4, invoices and invoices.id)]})\n\n for invoice in invoices:\n if not invoice.partner_id.vat:\n invoice.partner_id.vat = vat_number\n\n payments_lines = []\n if invoice.invoice_payments_widget != 'false':\n payments_dict = json.loads(invoice.invoice_payments_widget)\n payments_content = payments_dict.get('content', [])\n for line in payments_content:\n payments_lines.append(line.get('payment_id', False))\n\n invoice_line = invoice.mapped('invoice_line_ids').filtered(\\\n lambda line: line.tax_ids != tax)\n if invoice_line:\n invoice.button_draft()\n invoice.write({'ref': invoice_number, 'date': invoice_date})\n\n if len(invoice_line) > 1:\n for line in invoice_line:\n line.with_context({'check_move_validity': False}).write( \\\n {'tax_ids': [(6, 0, [tax.id])]})\n else:\n invoice_line.with_context({'check_move_validity': False}).write( \\\n {'tax_ids': [(6, 0, [tax.id])]})\n\n invoice.with_context({'check_move_validity': False})._recompute_tax_lines( \\\n recompute_tax_base_amount=True)\n invoice.action_post()\n for line in payments_lines:\n invoice.js_assign_outstanding_line(line)\n\n return True", "def get_invoice(self):\n\n # Check if unclosed invoice for the client exists\n old_inv = connection.Kinko.find_one({'cl': self.cl, 'tid': None,\n 'typ': TYPE_MAP[self.tab_type]})\n\n inv_num = None\n # If it does, update its values and update packages\n if old_inv:\n old_inv.dt = datetime.datetime.today()\n old_inv.range.lt = self.q_dict[\"cs.sd\"].get(\"$lt\", None)\n old_inv.save()\n\n inv_num = old_inv.num\n\n else:\n #kinko dict to be updated in Kinko Collection.\n kdict = {\n \"amt\": 0.0,\n \"cl\": unicode(self.cl),\n \"dt\": datetime.datetime.today(),\n \"typ\": TYPE_MAP[self.tab_type],\n \"range\": {\"lt\": self.q_dict[\"cs.sd\"].get(\"$lt\", None),\n \"gt\": self.q_dict[\"cs.sd\"].get(\"$gte\", None),\n }\n }\n\n k = Kinko(kdict)\n\n k_count = 1\n\n #the get num method of Kinko model generates the unique no for new kinko\n k[\"num\"] = self.get_knum(1)\n while connection.Kinko.collection.find({\"num\": k.num}).count() > 0:\n k[\"num\"] = self.get_knum(k_count+1)\n k_count += k_count\n\n connection.Kinko(k).save()\n\n inv_num = k['num']\n\n if inv_num:\n #after creating a new document in Kinko all packages are updated.\n connection.Package.collection.update(self.q_dict, {'$set': {'inv.num': inv_num}}, safe=True, multi=True)\n \n #Aggrigation of remitted amount for requested client\n non_invoiced = kinko_map_reduce(inv_num, TYPE_MAP[self.tab_type])\n\n if len(non_invoiced) == 0:\n return False\n else:\n inv = connection.Kinko.find_one({'num': inv_num})\n if inv:\n inv.amt = non_invoiced[0]['value']['amt']\n inv.save()\n return inv\n else:\n return False\n else:\n return False", "async def daily(self, ctx):\r\n # TODO: Asssess whether this can be cleaned up. \r\n # As it stands, very similar to inv()\r\n author = ctx.author\r\n with DB() as db:\r\n company = await self.get_active_company(ctx, db, author)\r\n stock = self.iex.get_held_stocks(db, company.id)\r\n inventory = []\r\n for s in stock:\r\n close = await self.get_latest_close(ctx, db, s.symbol)\r\n inventory.append([s.symbol, s.quantity, s.purchase_price, close.close, s.quantity*close.close - s.quantity*s.purchase_price ]) \r\n inv_df = pd.DataFrame(inventory, columns=['Symbol', 'Quantity', 'Purchase Price', 'Close', 'Current Value'])\r\n inv_df['sign'] = np.where(inv_df['Current Value']>=0, '+', '-')\r\n inv_df['%'] = abs(((inv_df['Close'] - inv_df['Purchase Price']) / inv_df['Purchase Price']) * 100)\r\n inv_df['%'] = inv_df['%'].round(1)\r\n inv_df = inv_df.sort_values(['Symbol'])\r\n inv_df = inv_df[['sign', '%', 'Symbol', 'Quantity', 'Purchase Price', 'Close', 'Current Value']]\r\n aggregated = tabulate(inv_df.values.tolist(), headers=['Δ', '%', 'Symbol', 'Quantity', 'Purchase Price', 'Close', 'Current Value'])\r\n await ctx.send(f'```diff\\n{aggregated}```')", "def records(self, variables=None, **kwargs):\n # Get necessary data for the variables\n all_df = self._data.records_all().set_index(self.DATE)\n variables = self._convert_variables(variables, all_df.columns.tolist())\n df = all_df.loc[:, variables]\n # Figure\n if self._data.complemented:\n title = f\"{self.area}: Cases over time\\nwith {self._data.complemented}\"\n else:\n title = f\"{self.area}: Cases over time\"\n self.line_plot(df=df, title=title, y_integer=True, **kwargs)\n return df.reset_index()", "def action_invoice_create(self, cr, uid, ids, context=None):\n res = False\n\n journal_obj = self.pool.get('account.journal')\n inv_obj = self.pool.get('account.invoice')\n inv_line_obj = self.pool.get('account.invoice.line')\n fiscal_obj = self.pool.get('account.fiscal.position')\n property_obj = self.pool.get('ir.property')\n\n for order in self.browse(cr, uid, ids, context=context):\n pay_acc_id = order.partner_id.property_account_payable.id\n journal_ids = journal_obj.search(cr, uid, [('type', '=','purchase'),('company_id', '=', order.company_id.id)], limit=1)\n if not journal_ids:\n raise osv.except_osv(_('Error !'),\n _('There is no purchase journal defined for this company: \"%s\" (id:%d)') % (order.company_id.name, order.company_id.id))\n\n # generate invoice line correspond to PO line and link that to created invoice (inv_id) and PO line\n inv_lines = []\n for po_line in order.order_line:\n if po_line.product_id:\n acc_id = po_line.product_id.product_tmpl_id.property_account_expense.id\n if not acc_id:\n acc_id = po_line.product_id.categ_id.property_account_expense_categ.id\n if not acc_id:\n raise osv.except_osv(_('Error !'), _('There is no expense account defined for this product: \"%s\" (id:%d)') % (po_line.product_id.name, po_line.product_id.id,))\n else:\n acc_id = property_obj.get(cr, uid, 'property_account_expense_categ', 'product.category').id\n fpos = order.fiscal_position or False\n acc_id = fiscal_obj.map_account(cr, uid, fpos, acc_id)\n\n inv_line_data = self._prepare_inv_line(cr, uid, acc_id, po_line, context=context)\n inv_line_id = inv_line_obj.create(cr, uid, inv_line_data, context=context)\n inv_lines.append(inv_line_id)\n\n po_line.write({'invoiced':True, 'invoice_lines': [(4, inv_line_id)]}, context=context)\n\n # get invoice data and create invoice\n inv_data = {\n 'name': order.partner_ref or order.name,\n 'reference': order.partner_ref or order.name,\n 'account_id': pay_acc_id,\n 'type': 'in_invoice',\n 'partner_id': order.partner_id.id,\n 'currency_id': order.pricelist_id.currency_id.id,\n 'address_invoice_id': order.partner_address_id.id,\n 'address_contact_id': order.partner_address_id.id,\n 'journal_id': len(journal_ids) and journal_ids[0] or False,\n 'invoice_line': [(6, 0, inv_lines)], \n 'origin': order.name,\n 'fiscal_position': order.fiscal_position.id or order.partner_id.property_account_position.id,\n 'payment_term': order.partner_id.property_payment_term and order.partner_id.property_payment_term.id or False,\n 'company_id': order.company_id.id,\n 'add_disc': order.add_disc or 0.0\n }\n inv_id = inv_obj.create(cr, uid, inv_data, context=context)\n\n # compute the invoice\n inv_obj.button_compute(cr, uid, [inv_id], context=context, set_total=True)\n\n # Link this new invoice to related purchase order\n order.write({'invoice_ids': [(4, inv_id)]}, context=context)\n res = inv_id\n return res", "def GenerateVersion(self):\n pol_obj = self.env['account.analytic.account']\n hist_obj = self.env['account.analytic.account']\n inv_obj = self.env['account.invoice']\n values = self.GeneratePolicy()\n # logger.info('\\n === values = %s' % values)\n policies = values.get('policy', [])\n res = []\n i = 0\n for policy in pol_obj.browse(policies):\n i += 1\n logger.info('version %s -> %s (%s / %s)' % (policy.name, policy.id, i, len(policies)))\n # search invoice\n inv_ids = inv_obj.search([('pol_numpol', '=', policy.name), ('id','in', values.get('invoice'))], order='prm_datedeb')\n # logger.info('=== inv_ids = %s' % inv_ids.mapped('prm_datedeb'))\n inv_len = len(inv_ids)\n c = 0\n for inv_id in inv_ids:\n c += 1\n # logger.info('inv_len = %s ?= %s c' % (inv_len,c))\n hist_buf = {\n 'type': 'contract',\n 'is_insurance': True,\n 'partner_id': policy.partner_id.id,\n 'property_account_position': policy.property_account_position.id,\n 'insured_id': policy.insured_id.id,\n 'manager_id': policy.manager_id.id,\n 'branch_id': policy.branch_id.id,\n 'ins_product_id': policy.ins_product_id.id,\n 'fraction_id': policy.fraction_id.id or False,\n #'name': policy.name + '_' + str(c).zfill(4) + 'AA',\n 'name': policy.name + '_' + str(policy.next_sequence).zfill(4) + 'AA',\n 'parent_id': policy.id,\n 'date_start': inv_id.prm_datedeb,\n 'date': inv_id.prm_datefin,\n 'agency_id': inv_id.journal_id.agency_id.id or False,\n 'invoice_id': inv_id.id,\n 'stage_id': self.env.ref('insurance_management.avenant').id\n }\n if c == inv_len:\n hist_buf['is_last_situation'] = True\n hist_ids = hist_obj.search([('name','=', hist_buf.get('name'))])\n if not hist_ids:\n # logger.info('===> create history')\n res.append(hist_obj.create(hist_buf).id)\n next_sequence = policy.next_sequence + 1\n policy.write({'next_sequence': next_sequence})\n else:\n hist_ids.update(hist_buf)\n res += hist_ids.ids\n return res", "def income_model_user_defined_returns(num_of_years=30, trials=100, method='normal'):\n\n print(\"Running method income_model_user_defined_returns()\")\n\n sim_fia_cv = pd.DataFrame(index=range(num_of_years + 1))\n\n sim_base_total = pd.DataFrame(index=range(num_of_years + 1))\n sim_base_income = pd.DataFrame(index=range(num_of_years + 1))\n\n sim_port_total = pd.DataFrame(index=range(num_of_years + 1))\n sim_port_income = pd.DataFrame(index=range(num_of_years + 1))\n\n read_income_inputs = pd.read_excel(src + \"portfolio_information.xlsx\", sheet_name='income_model_inputs',\n index_col=[0])\n\n read_returns_est = pd.read_excel(src + \"portfolio_information.xlsx\", sheet_name='income_assets_returns_estimates',\n index_col=[0])\n clean_names = list(read_returns_est.index)\n clean_names = [s.split(' ')[0] for s in clean_names]\n read_returns_est.loc[:, 'names'] = clean_names\n read_returns_est.set_index('names', drop=True, inplace=True)\n read_returns_est = read_returns_est[:-1]\n read_returns_est.rename(index={'SBMMTB3': 'Cash', read_returns_est.index[-1]: 'FIA'}, inplace=True)\n\n # ---------------Returns DataFrame based on the user input------------------------------------\n ann_ret = np.full((num_of_years + 1, len(read_returns_est)), read_returns_est.loc[:, 'Annualized Returns'])\n read_normal = pd.DataFrame(ann_ret, index=np.arange(num_of_years + 1), columns=read_returns_est.index)\n # read_normal.rename(columns={read_normal.columns[-1]: 'FIA'}, inplace=True)\n user_est_fia_return = float(read_income_inputs.loc['fia_forecast', 'inputs'])\n read_normal.loc[:, 'FIA'] = user_est_fia_return\n\n read_asset_weights = pd.read_excel(src + \"portfolio_information.xlsx\", sheet_name='asset_weights',\n index_col=[0])\n\n # read_asset_weights.drop(read_asset_weights.index[-1], axis=0, inplace=True)\n\n # read random returns for simulation\n read_normal = pd.read_csv(src + 'median_returns_unsorted.csv', index_col=[0], parse_dates=True)\n cols = [read_normal.columns[c].split('_')[1] for c in np.arange(len(read_normal.columns))]\n read_normal.rename(columns=dict(zip(list(read_normal.columns), cols)), inplace=True)\n\n # ann_ret = np.full((num_of_years+1, len(read_returns_est)), read_returns_est.loc[:, 'Annualized Returns'])\n # read_normal = pd.DataFrame(ann_ret, index=np.arange(num_of_years+1), columns=read_returns_est.index)\n # # read_normal.rename(columns={read_normal.columns[-1]: 'FIA'}, inplace=True)\n # user_est_fia_return = float(read_income_inputs.loc['fia_forecast', 'inputs'])\n # read_normal.loc[:, 'FIA'] = user_est_fia_return\n\n assets_col_names = list(read_normal.columns)\n\n tickers = list(read_asset_weights.index)\n wts = np.array(read_asset_weights.loc[:, 'base'])\n\n def asset_median_returns(data, ticker):\n return data.filter(regex=ticker).median(axis=1)\n\n # dataframe for unsorted returns (normal)\n median_returns_normal = read_normal.copy()\n median_returns_normal.loc[:, 'portfolio_return'] = median_returns_normal.dot(wts)\n median_normal_fia = pd.DataFrame({'FIA': asset_median_returns(read_normal, 'FIA')})\n\n years = list(range(0, num_of_years + 1))\n income_cols = ['year', 'strategy_term', 'index_returns', 'term_ret', 'term_ret_with_par', 'term_annualize',\n 'ann_net_spread', 'term_ret_netspr', 'high_inc_benefit_base', 'rider_fee', 'eoy_income',\n 'contract_value']\n\n term = int(read_income_inputs.loc['term', 'inputs'])\n fia_ret = read_returns_est.loc[read_returns_est.index[-1], 'Annualized Returns']\n fia_risk = read_returns_est.loc[read_returns_est.index[-1], 'Annualized Risk']\n par_rate = float(read_income_inputs.loc['par_rate', 'inputs'])\n spread = float(read_income_inputs.loc['spread', 'inputs'])\n bonus_term = int(read_income_inputs.loc['bonus_term', 'inputs'])\n premium = float(read_income_inputs.loc['premium', 'inputs'])\n income_bonus = float(read_income_inputs.loc['income_bonus', 'inputs'])\n\n income_starts = int(read_income_inputs.loc['start_income_years', 'inputs'])\n income_growth = float(read_income_inputs.loc['income_growth', 'inputs'])\n rider_fee = float(read_income_inputs.loc['rider_fee', 'inputs'])\n inc_payout_factor = float(read_income_inputs.loc['income_payout_factor', 'inputs'])\n contract_bonus = float(read_income_inputs.loc['contract_bonus', 'inputs'])\n social = float(read_income_inputs.loc['social', 'inputs'])\n inflation = float(read_income_inputs.loc['inflation', 'inputs'])\n wtd_cpn_yield = float(read_income_inputs.loc['wtd_coupon_yld', 'inputs'])\n life_expectancy = int(read_income_inputs.loc['life_expectancy_age', 'inputs'])\n clients_age = int(read_income_inputs.loc['clients_age', 'inputs'])\n\n # ---------------INCOME MODEL--------------------------------------------\n runs = 0\n returns_dict = {}\n asset_dict = {}\n fia_dict = {}\n\n income_df = pd.DataFrame(index=years, columns=income_cols)\n income_df.loc[:, 'year'] = years\n income_df.loc[:, 'strategy_term'] = income_df.loc[:, 'year'] % term\n income_df.loc[:, 'strategy_term'] = income_df['strategy_term'].apply(lambda x: 1 if x == 0 else 0)\n income_df.loc[:, 'index_returns'] = read_normal.loc[:, 'FIA']\n\n # income_df.loc[:, 'index_returns'] = np.random.normal(fia_ret, fia_risk, size=(len(years), 1))\n cumprod = (1. + income_df['index_returns']).rolling(window=term).agg(lambda x: x.prod()) - 1\n income_df.loc[:, 'term_ret'] = np.where(income_df.loc[:, 'strategy_term'] == 1, cumprod, 0)\n income_df.loc[:, 'term_ret_with_par'] = income_df.loc[:, 'term_ret'] * par_rate\n income_df.loc[:, 'term_annualize'] = income_df.loc[:, 'term_ret_with_par'].apply(\n lambda x: (1 + x) ** (1 / term) - 1)\n income_df.loc[:, 'ann_net_spread'] = income_df.loc[:, 'term_annualize'] - spread\n income_df.loc[:, 'ann_net_spread'] = np.where(income_df.loc[:, 'strategy_term'] == 1,\n income_df.loc[:, 'ann_net_spread'], 0)\n income_df.loc[:, 'term_ret_netspr'] = income_df.loc[:, 'ann_net_spread'].apply(lambda x: (1 + x) ** term - 1)\n\n for counter in years:\n if counter == 0:\n income_df.loc[counter, 'high_inc_benefit_base'] = premium * (1 + income_bonus)\n\n elif counter <= min(bonus_term, income_starts):\n income_df.loc[counter, 'high_inc_benefit_base'] = income_df.loc[counter - 1, 'high_inc_benefit_base'] * \\\n (1 + income_growth)\n else:\n income_df.loc[counter, 'high_inc_benefit_base'] = income_df.loc[counter - 1, 'high_inc_benefit_base']\n\n income_df.loc[:, 'rider_fee'] = income_df.loc[:, 'high_inc_benefit_base'] * rider_fee\n income_df.loc[:, 'eoy_income'] = np.where(income_df.loc[:, 'year'] > income_starts,\n income_df.loc[:, 'high_inc_benefit_base'] * inc_payout_factor, 0)\n\n for counter in years:\n if counter == 0:\n income_df.loc[counter, 'contract_value'] = premium * (1 + contract_bonus)\n\n elif income_df.loc[counter, 'strategy_term'] == 1:\n x1 = income_df.loc[counter - 1, 'contract_value'] - income_df.loc[counter, 'rider_fee']\n x2 = (x1 * (1 + income_df.loc[counter, 'term_ret_netspr'])) - income_df.loc[counter, 'eoy_income']\n income_df.loc[counter, 'contract_value'] = x2\n\n else:\n x1 = income_df.loc[counter - 1, 'contract_value'] - income_df.loc[counter, 'rider_fee'] - \\\n income_df.loc[counter, 'eoy_income']\n\n income_df.loc[counter, 'contract_value'] = x1\n\n # variable stores the income number that is used in the base and fia portfolio calcs.\n\n income_from_fia = income_df.loc[income_df.index[-1], 'eoy_income']\n\n income_df.loc[:, 'contract_value'] = income_df.loc[:, 'contract_value'].apply(lambda x: 0 if x <= 0 else x)\n\n sim_fia_cv.loc[:, str(runs)] = income_df.loc[:, 'contract_value']\n\n # --------------------BASE MODEL---------------------------------------------\n base_wts = read_asset_weights.loc[:, 'base']\n base_assets = list(base_wts.index)\n base_weights = list(base_wts.values)\n base_returns = list(read_returns_est.loc[:, 'Annualized Returns'].values)\n base_std = list(read_returns_est.loc[:, 'Annualized Risk'].values)\n\n base_investment = float(read_income_inputs.loc['risky_assets', 'Base'])\n adv_fees = float(read_income_inputs.loc['advisor_fees', 'Base'])\n\n # -------------------required income----------------------------------\n req_annual_income = float(read_income_inputs.loc['annual_income', 'inputs'])\n income_needed = req_annual_income - social\n income_net_fia_income = max(0, income_needed - income_from_fia)\n cpn_income_base = base_investment * wtd_cpn_yield\n\n # ----------------------RANDOM RETURNS--------------------------\n r_cols = base_assets\n boy_value = ['bv_{}'.format(name) for name in base_assets]\n eoy_value = ['ev_{}'.format(name) for name in base_assets]\n\n random_returns = pd.DataFrame(index=income_df.index, columns=r_cols)\n\n for c in range(len(r_cols)):\n ret = np.random.normal(base_returns[c], base_std[c], size=(len(random_returns.index), 1))\n\n random_returns = read_normal.copy()\n\n base_df = random_returns.copy()\n fia_portfolio_df = random_returns.copy()\n port_investment = float(read_income_inputs.loc['risky_assets', 'FIA'])\n cpn_income_port = port_investment * wtd_cpn_yield\n\n # -------------BASE PORTFOLIO----------------------------\n for name in boy_value:\n base_df.loc[:, name] = 0.0\n\n for counter in years:\n period_returns = list(random_returns.loc[counter, :])\n if counter == 0:\n\n base_df.loc[counter, boy_value] = [base_weights[c] * base_investment for c in range(len(boy_value))]\n\n base_df.loc[counter, 'total'] = base_df.loc[counter, boy_value].sum()\n base_df.loc[counter, 'total_net_fees'] = 0.0\n base_df.loc[counter, 'income'] = 0.0\n base_investment = base_df.loc[counter, boy_value].sum()\n\n elif (counter > 0) and (counter < income_starts):\n\n base_df.loc[counter, boy_value] = [base_weights[c] * base_investment * (1 + period_returns[c])\n for c in range(len(boy_value))]\n base_df.loc[counter, 'total'] = base_df.loc[counter, boy_value].sum()\n base_df.loc[counter, 'adv_fees'] = base_df.loc[counter, 'total'] * adv_fees\n base_df.loc[counter, 'total_net_fees'] = base_df.loc[counter, 'total'] - base_df.loc[\n counter, 'adv_fees']\n\n # --coupon payment is invested back into the risky portfolio until the income is withdrawn----\n base_investment = base_df.loc[counter, 'total_net_fees'] + cpn_income_base\n\n else:\n\n base_df.loc[counter, boy_value] = [base_weights[c] * base_investment * (1 + period_returns[c])\n for c in range(len(boy_value))]\n base_df.loc[counter, 'total'] = base_df.loc[counter, boy_value].sum()\n base_df.loc[counter, 'adv_fees'] = base_df.loc[counter, 'total'] * adv_fees\n\n # ---req. income is adjusted for inflation from the second year of withdrawal. Reinvestment of coupon\n # stops from the year income starts. Req. income is reduced by the coupon payments\n\n if counter == income_starts:\n\n income_needed = req_annual_income - social\n base_df.loc[counter, 'income'] = income_needed - cpn_income_base\n income_needed = req_annual_income\n\n else:\n income_needed = income_needed * (1 + inflation) - social\n base_df.loc[counter, 'income'] = income_needed - cpn_income_base\n income_needed = income_needed + social\n\n base_df.loc[counter, 'total_net_fees'] = base_df.loc[counter, 'total'] - \\\n base_df.loc[counter, 'adv_fees'] - \\\n base_df.loc[counter, 'income']\n\n base_investment = base_df.loc[counter, 'total_net_fees']\n\n base_df.loc[:, 'adj_total'] = base_df.loc[:, 'total_net_fees'].apply(lambda x: x if x > 0 else 0)\n sim_base_total.loc[:, 's_{}'.format(str(runs))] = base_df.loc[:, 'total_net_fees']\n sim_base_income.loc[:, 's_{}'.format(str(runs))] = base_df.loc[:, 'income']\n\n # ----------------------------FIA PORTFOLIO----------------------------------------------\n for name in boy_value:\n fia_portfolio_df.loc[:, name] = 0.0\n\n for counter in years:\n period_returns = list(random_returns.loc[counter, :])\n if counter == 0:\n\n fia_portfolio_df.loc[counter, boy_value] = [base_weights[c] * port_investment\n for c in range(len(boy_value))]\n fia_portfolio_df.loc[counter, 'total'] = fia_portfolio_df.loc[counter, boy_value].sum()\n fia_portfolio_df.loc[counter, 'total_net_fees'] = 0.0\n fia_portfolio_df.loc[counter, 'income'] = 0.0\n port_investment = fia_portfolio_df.loc[counter, boy_value].sum()\n\n elif (counter > 0) and (counter < income_starts):\n\n fia_portfolio_df.loc[counter, boy_value] = [base_weights[c] * port_investment * (1 + period_returns[c])\n for c in range(len(boy_value))]\n fia_portfolio_df.loc[counter, 'total'] = fia_portfolio_df.loc[counter, boy_value].sum()\n fia_portfolio_df.loc[counter, 'adv_fees'] = fia_portfolio_df.loc[counter, 'total'] * adv_fees\n fia_portfolio_df.loc[counter, 'total_net_fees'] = fia_portfolio_df.loc[counter, 'total'] - \\\n fia_portfolio_df.loc[counter, 'adv_fees']\n\n port_investment = fia_portfolio_df.loc[counter, 'total_net_fees'] + cpn_income_port\n\n else:\n fia_portfolio_df.loc[counter, boy_value] = [base_weights[c] * port_investment * (1 + period_returns[c])\n for c in range(len(boy_value))]\n fia_portfolio_df.loc[counter, 'total'] = fia_portfolio_df.loc[counter, boy_value].sum()\n fia_portfolio_df.loc[counter, 'adv_fees'] = fia_portfolio_df.loc[counter, 'total'] * adv_fees\n\n # ---req. income is adjusted for inflation from the second year of withdrawal. Reinvestment of coupon\n # stops from the year income starts. Req. income is reduced by the coupon payments\n\n if counter == income_starts:\n\n income_needed = req_annual_income - social\n income_net_fia_income = max(0, income_needed - income_from_fia)\n fia_portfolio_df.loc[counter, 'income'] = max(0, income_net_fia_income - cpn_income_port)\n income_needed = req_annual_income\n\n else:\n income_needed = income_needed * (1 + inflation) - social\n income_net_fia_income = max(0, income_needed - income_from_fia)\n fia_portfolio_df.loc[counter, 'income'] = max(0, income_net_fia_income - cpn_income_port)\n income_needed = income_needed + social\n\n fia_portfolio_df.loc[counter, 'total_net_fees'] = fia_portfolio_df.loc[counter, 'total'] - \\\n fia_portfolio_df.loc[counter, 'adv_fees'] - \\\n fia_portfolio_df.loc[counter, 'income']\n\n port_investment = fia_portfolio_df.loc[counter, 'total_net_fees']\n\n sim_port_total.loc[:, 's_{}'.format(str(runs))] = fia_portfolio_df.loc[:, 'total_net_fees'] + \\\n income_df.loc[:, 'contract_value']\n\n sim_port_income.loc[:, 's_{}'.format(str(runs))] = fia_portfolio_df.loc[:, 'income']\n\n fia_portfolio_df.loc[:, 'adj_total'] = fia_portfolio_df.loc[:, 'total_net_fees'].apply(\n lambda x: x if x > 0 else 0)\n\n # ---------income breakdown for Base portfolio----------------------------------\n base_df.to_csv(src + 'base_port_detail.csv')\n sim_base_total.to_csv(src + 'base_ending_values.csv')\n income_breakdown_base = pd.DataFrame(sim_base_total.quantile(0.5, axis=1))\n income_breakdown_base.loc[:, 'income_from_portfolio'] = sim_base_income.quantile(0.5, axis=1)\n income_breakdown_base.loc[:, 'fia_income'] = 0.0\n income_breakdown_base.loc[:, 'social_security_income'] = social\n income_breakdown_base.loc[:, 'coupon_income'] = cpn_income_base\n\n income_breakdown_base.rename(columns={income_breakdown_base.columns[0]: 'portfolio_ending_value'}, inplace=True)\n income_breakdown_base.loc[:, 'income_from_portfolio'][\n income_breakdown_base.loc[:, 'portfolio_ending_value'] <= 0] = 0\n income_breakdown_base.loc[:, 'total_income'] = income_breakdown_base.loc[:, income_breakdown_base.columns[1:]].sum(\n axis=1)\n\n # ------------Block Ends-------------------------------------------------------------\n\n # ---------income breakdown for FIA portfolio----------------------------------\n fia_portfolio_df.to_csv(src + 'fia_port_detail.csv')\n sim_port_total.to_csv(src + 'fiaport_ending_values.csv')\n\n income_breakdown_port = pd.DataFrame(sim_port_total.quantile(0.5, axis=1))\n income_breakdown_port.loc[:, 'income_from_portfolio'] = sim_port_income.quantile(0.5, axis=1)\n income_breakdown_port.loc[:, 'fia_income'] = income_from_fia\n income_breakdown_port.loc[:, 'social_security_income'] = social\n income_breakdown_port.loc[:, 'coupon_income'] = cpn_income_port\n\n income_breakdown_port.rename(columns={income_breakdown_port.columns[0]: 'portfolio_ending_value'}, inplace=True)\n income_breakdown_port.loc[:, 'income_from_portfolio'][\n income_breakdown_port.loc[:, 'portfolio_ending_value'] <= 0] = 0\n income_breakdown_port.loc[:, 'total_income'] = income_breakdown_port.loc[:, income_breakdown_port.columns[1:]].sum(\n axis=1)\n\n # ------------Block Ends-------------------------------------------------------------\n q_cut = [0.0, 0.05, 0.25, 0.5, 0.75, 0.95, 1.0]\n sim_base_income[sim_base_total < income_needed] = 0.0\n\n sim_port_income[sim_port_total < income_net_fia_income] = 0\n\n sim_port_income = sim_port_income + income_from_fia\n\n # base_quantile = sim_base_total.loc[sim_base_total.index[-1]].quantile([0.05, 0.25, 0.50, 0.75, 0.90])\n\n # port_quantile = sim_port_total.loc[sim_port_total.index[-1]].quantile([0.05, 0.25, 0.50, 0.75, 0.90])\n\n base_quantile = sim_base_total.loc[sim_base_total.index[-1]].quantile(q_cut)\n\n port_quantile = sim_port_total.loc[sim_port_total.index[-1]].quantile(q_cut)\n\n # q_cut = [0.0, .05, 0.25, 0.5, 0.75, 0.95, 1.0]\n cols = ['Min', '5th', '25th', '50th', '75th', '90th', 'Max']\n\n # ----drop year 0--------\n sim_base_total = sim_base_total[1:]\n sim_port_total = sim_port_total[1:]\n\n # ----------------quantile analysis for base terminal value--------------------------\n base_qcut = pd.DataFrame(index=sim_base_total.index, columns=cols)\n for c in range(len(cols)):\n base_qcut.loc[:, cols[c]] = sim_base_total.quantile(q_cut[c], axis=1)\n\n base_qcut.clip(lower=0, inplace=True)\n\n # ----------------------quantile analysis for base income----------------------------\n base_income_qcut = pd.DataFrame(index=sim_base_income.index, columns=cols)\n for c in range(len(cols)):\n base_income_qcut.loc[:, cols[c]] = sim_base_income.quantile(q_cut[c], axis=1)\n\n # ----Remove NaN's prior to the income start years------------\n # base_income_qcut = base_income_qcut.loc[income_starts:]\n\n # -------------quantile analysis for portfolio terminal value ----------------\n port_qcut = pd.DataFrame(index=sim_port_total.index, columns=cols)\n for c in range(len(cols)):\n port_qcut.loc[:, cols[c]] = sim_port_total.quantile(q_cut[c], axis=1)\n\n port_qcut.clip(lower=0, inplace=True)\n\n # ---------------quantile analysis for portfolio income----------------------------\n port_income_qcut = pd.DataFrame(index=sim_port_income.index, columns=cols)\n for c in range(len(cols)):\n port_income_qcut.loc[:, cols[c]] = sim_port_income.quantile(q_cut[c], axis=1)\n\n # ----Remove NaN's prior to the income start years------------\n # port_income_qcut = port_income_qcut.loc[income_starts:]\n\n # ----------probability ending value will be less than 0 at the end of the horizon -----------------------\n base_legacy_risk = (sim_base_total.loc[sim_base_total.index[life_expectancy - clients_age]] < 0).sum() / (\n trials + 1)\n port_legacy_risk = (sim_port_total.loc[sim_port_total.index[life_expectancy - clients_age]] < 0).sum() / (\n trials + 1)\n\n legacy_risk = pd.DataFrame([base_legacy_risk, port_legacy_risk,\n 'Prob. of portfolio value less than 0 at the end of the expected life'],\n index=['base', 'fia_portfolio', 'Notes'],\n columns=['Ruin Probability'])\n\n # -----------Year-wise probability of ending value greater than 0 -----------------\n base_psuccess = sim_base_total.apply(lambda x: x > 0).sum(axis=1) / (trials + 1)\n port_psuccess = sim_port_total.apply(lambda x: x > 0).sum(axis=1) / (trials + 1)\n\n # -----------------------WRITING FILES TO EXCEL ---------------------------\n col_names = ['50th', 'age', 'comment']\n writer = pd.ExcelWriter(src + method + '_simulated_income_summary_custom.xlsx', engine='xlsxwriter')\n read_income_inputs.to_excel(writer, sheet_name='inputs_for_income')\n\n read_returns_est.to_excel(writer, sheet_name='asset_returns_estimates')\n\n age_index = list(range(clients_age + 1, clients_age + len(base_qcut) + 1))\n base_qcut.loc[:, 'age'] = age_index\n base_qcut.loc[life_expectancy - clients_age, 'comment'] = 'expected_life'\n base_qcut.loc[income_starts:, col_names].to_excel(writer, sheet_name='base_ending_value_quantiles')\n\n base_income_qcut = base_income_qcut.loc[1:, :]\n base_income_qcut.loc[:, 'age'] = age_index\n base_income_qcut.loc[life_expectancy - clients_age, 'comment'] = 'expected_life'\n base_income_qcut.loc[income_starts:, col_names].to_excel(writer, sheet_name='base_income_quantiles')\n\n port_qcut.loc[:, 'age'] = age_index\n port_qcut.loc[life_expectancy - clients_age, 'comment'] = 'expected_life'\n port_qcut.loc[income_starts:, col_names].to_excel(writer, sheet_name='fia_port_ending_value_quantiles')\n\n port_income_qcut = port_income_qcut.loc[1:, :]\n port_income_qcut.loc[:, 'age'] = age_index\n port_income_qcut.loc[life_expectancy - clients_age, 'comment'] = 'expected_life'\n port_income_qcut.loc[income_starts:, col_names].to_excel(writer, sheet_name='fia_port_income_quantiles')\n\n income_breakdown_base = income_breakdown_base.loc[1:, :]\n income_breakdown_base.loc[:, 'age'] = age_index\n income_breakdown_base.loc[life_expectancy - clients_age, 'comment'] = 'expected_life'\n income_breakdown_base.loc[income_starts:, :].to_excel(writer, sheet_name='base_income_breakdown_median')\n\n income_breakdown_port = income_breakdown_port.loc[1:, :]\n income_breakdown_port.loc[:, 'age'] = age_index\n income_breakdown_port.loc[life_expectancy - clients_age, 'comment'] = 'expected_life'\n income_breakdown_port.loc[:, 'ending_contract_value'] = income_df.loc[:, 'contract_value']\n income_breakdown_port.loc[income_starts:, :].to_excel(writer, sheet_name='fia_income_breakdown_median')\n\n median_returns_normal.to_excel(writer, sheet_name='gr_port_median_normal')\n\n terminal_val = pd.read_csv(src + 'terminal_values.csv', index_col=[0])\n ending_val = pd.read_csv(src + 'ending_values.csv', index_col=[0])\n ending_val_ror = pd.read_csv(src + 'ending_values_ror.csv', index_col=[0])\n\n terminal_val.to_excel(writer, sheet_name='terminal_values')\n ending_val.to_excel(writer, sheet_name='port_ending_values')\n ending_val_ror.to_excel(writer, sheet_name='port_annual_growth')\n\n writer.save()\n\n # -----------------Plotting charts--------------------------------------------\n base_qcut.loc[income_starts:].plot(grid=True, title='Quantile Terminal Value - Base Portfolio')\n plt.savefig(src + \"quantile_terminal_base.png\")\n plt.close('all')\n\n base_income_qcut.plot(grid=True, title='Quantile Income - Base Portfolio')\n plt.savefig(src + \"quantile_income_base.png\")\n plt.close('all')\n\n base_psuccess.plot(grid=True, title='Probability of Success (Portfolio Ending Value > 0) - Base Portfolio')\n plt.savefig(src + \"success_probabilty_base.png\")\n plt.close('all')\n\n (1 - base_psuccess).plot(grid=True, title='Probability of Ruin (Portfolio Ending Value < 0) - Base Portfolio')\n plt.savefig(src + \"ruin_probability_base.png\")\n plt.close('all')\n\n port_qcut.loc[income_starts:].plot(grid=True, title='Quantile Terminal Value - FIA Portfolio')\n plt.savefig(src + \"quantile_terminal_fia.png\")\n plt.close('all')\n\n port_income_qcut.plot(grid=True, title='Quantile Income - FIA Portfolio')\n plt.savefig(src + \"quantile_income_fia.png\")\n plt.close('all')\n\n port_psuccess.plot(grid=True, title='Probability of Success (Portfolio Ending Value > 0) - FIA Portfolio')\n plt.savefig(src + \"success_probabilty_fia.png\")\n plt.close('all')\n\n (1 - port_psuccess).plot(grid=True, title='Probability of Ruin (Portfolio Ending Value < 0) - FIA Portfolio')\n plt.savefig(src + \"ruin_probability_fia.png\")\n plt.close('all')\n\n print(\"simulation completed for {}\".format(method))", "def prepare_invoice(self):\n journal_id = self.env['account.invoice'].default_get(['journal_id'])['journal_id']\n if not journal_id:\n raise UserError(_('Please define sales journal for this company: \"%s\" (id:%d).') % (self.company_id.name, self.company_id.id))\n invoice_vals = {\n 'order_id': self.id,\n 'name': self.order_no,\n 'origin': self.order_no,\n 'type': 'out_invoice',\n 'reference': self.patient_id.name + ':' + self.name,\n 'account_id': self.patient_id.partner_id.property_account_receivable_id.id,\n 'partner_id': self.patient_id.partner_id.id,\n 'journal_id': journal_id,\n 'comment': self.note,\n 'doctor_id': self.doctor_id.id,\n 'payment_term': False,\n 'user_id': False,\n }\n return invoice_vals", "def prepare_data(self):\r\n annual_df = self.annual_df\r\n coef_df = self.coef_df\r\n quarter_df = self.quarter_df\r\n # historical_df = self.historical_df\r\n Event_Buffer = self.Event_Buffer\r\n\r\n Tot_Prod = coef_df[\"Product\"].nunique()\r\n # Tot_Week = coef_df[\"wk\"].nunique()\r\n Tot_Week = 52\r\n\r\n EDLP_Events = list(annual_df[\"RP_Events\"])\r\n Min_EDLP_Events = [\r\n i - Event_Buffer if i - Event_Buffer >= 0 else 0 for i in EDLP_Events\r\n ]\r\n Max_EDLP_Events = [\r\n i + Event_Buffer if i + Event_Buffer < Tot_Week + 1 else Tot_Week\r\n for i in EDLP_Events\r\n ]\r\n\r\n TPR_Events = list(annual_df[\"TPR_Events\"])\r\n Min_TPR_Events = [\r\n i - Event_Buffer if i - Event_Buffer >= 0 else 0 for i in TPR_Events\r\n ]\r\n Max_TPR_Events = [\r\n i + Event_Buffer if i + Event_Buffer < Tot_Week + 1 else Tot_Week\r\n for i in TPR_Events\r\n ]\r\n\r\n Target_EDLP_Spend = [i for i in annual_df[\"PPG_RP_Spend\"]]\r\n Target_TPR_Spend = [i for i in annual_df[\"PPG_TPR_Spend\"]]\r\n Target_Trade_Spend = [i for i in annual_df[\"PPG_Total_Spend\"]]\r\n\r\n Mapping = {}\r\n Prod_Ind = coef_df[\"Product\"][0:Tot_Prod]\r\n for i, j in zip(Prod_Ind.index, Prod_Ind.values):\r\n Mapping[j] = i\r\n Mapping_reverse = {i: j for j, i in Mapping.items()}\r\n\r\n constants = [i for i in coef_df[\"constant\"]]\r\n\r\n Cat_Coef = coef_df[\"Catalogue\"][0:Tot_Prod]\r\n\r\n Disp_Coef = coef_df[\"Display\"][0:Tot_Prod]\r\n\r\n Base_Price_stg1 = [i for i in quarter_df[\"Final_baseprice\"]]\r\n Intercepts_stg1 = []\r\n for pr in range(Tot_Prod):\r\n Intercepts_stg1.append(\r\n np.mean([constants[j * Tot_Prod + pr] for j in range(0, Tot_Week)])\r\n )\r\n\r\n Base_Price_stg2 = [[i] * Tot_Week for i in quarter_df[\"Final_baseprice\"]]\r\n Intercepts_stg2 = [\r\n constants[j : j + Tot_Prod] for j in range(0, len(constants), Tot_Prod)\r\n ] # noqa\r\n\r\n EDLP_Coef = np.array(\r\n coef_df[[i for i in coef_df.columns if i.count(\"Retailer_Regular\") == 1]]\r\n )\r\n TPR_Coef = np.array(\r\n coef_df[[i for i in coef_df.columns if i.count(\"Retailer_Promoted\") == 1]]\r\n )\r\n\r\n # ################################ Available EDLP Interactions pairs ##############################\r\n\r\n EDLP = [\r\n re.findall(r\"[0-9]+\", i)\r\n for i in coef_df.columns\r\n if i.count(\"Retailer_Regular\") > 1\r\n ]\r\n EDLP_Interactions = []\r\n for i in EDLP:\r\n temp = []\r\n for j in i:\r\n temp.append(int(j))\r\n EDLP_Interactions.append(temp)\r\n\r\n # ###################################### Available TPR Interactions pairs #########################\r\n\r\n TPR = [\r\n re.findall(r\"[0-9]+\", i)\r\n for i in coef_df.columns\r\n if i.count(\"Retailer_Promoted\") > 1\r\n ]\r\n TPR_Interactions = []\r\n for i in TPR:\r\n temp = []\r\n for j in i:\r\n temp.append(int(j))\r\n TPR_Interactions.append(temp)\r\n\r\n # ###################################### EDLP_Interaction_Coef_Values ############################\r\n\r\n EDLP_Int_Coef_Values = {}\r\n for col in coef_df.columns:\r\n if col.count(\"Retailer_Regular\") > 1:\r\n Pair_name = \"_\".join([str(int(i)) for i in re.findall(r\"[0-9]+\", col)])\r\n EDLP_Int_Coef_Values[Pair_name] = list(coef_df[col])\r\n\r\n # ###################################### TPR_Interaction_Coef_Values #############################\r\n\r\n TPR_Int_Coef_Values = {}\r\n for col in coef_df.columns:\r\n if col.count(\"Retailer_Promoted\") > 1:\r\n Pair_name = \"_\".join([str(int(i)) for i in re.findall(r\"[0-9]+\", col)])\r\n TPR_Int_Coef_Values[Pair_name] = list(coef_df[col])\r\n\r\n # ##################################### Loading Pantry Loading Coefficients #######################\r\n\r\n Pantry_1 = list(coef_df[\"Pantry_Loading_1\"])\r\n Pantry_1 = [\r\n Pantry_1[j : j + Tot_Prod] for j in range(0, len(Pantry_1), Tot_Prod)\r\n ]\r\n Pantry_2 = list(coef_df[\"Pantry_Loading_2\"])\r\n Pantry_2 = [\r\n Pantry_2[j : j + Tot_Prod] for j in range(0, len(Pantry_2), Tot_Prod)\r\n ]\r\n\r\n # TE_Coeff = np.array(Promo_df[[\"TE_Promo\",\"TE_NoPromo\"]])\r\n self.Tot_Prod = Tot_Prod\r\n self.Tot_Week = Tot_Week\r\n self.EDLP_Events = EDLP_Events\r\n self.Min_EDLP_Events = Min_EDLP_Events\r\n self.Max_EDLP_Events = Max_EDLP_Events\r\n self.TPR_Events = TPR_Events\r\n self.Min_TPR_Events = Min_TPR_Events\r\n self.Max_TPR_Events = Max_TPR_Events\r\n\r\n self.Target_EDLP_Spend = Target_EDLP_Spend\r\n self.Target_TPR_Spend = Target_TPR_Spend\r\n self.Target_Trade_Spend = Target_Trade_Spend\r\n self.Mapping = Mapping\r\n self.Mapping_reverse = Mapping_reverse\r\n self.constants = constants\r\n self.EDLP_Coef = EDLP_Coef\r\n self.TPR_Coef = TPR_Coef\r\n\r\n self.EDLP_Interactions = EDLP_Interactions\r\n self.TPR_Interactions = TPR_Interactions\r\n self.EDLP_Int_Coef_Values = EDLP_Int_Coef_Values\r\n self.TPR_Int_Coef_Values = TPR_Int_Coef_Values\r\n self.Pantry_1 = Pantry_1\r\n self.Pantry_2 = Pantry_2\r\n\r\n self.Base_Price_stg1 = Base_Price_stg1\r\n self.Intercepts_stg1 = Intercepts_stg1\r\n self.Base_Price_stg2 = Base_Price_stg2\r\n self.Intercepts_stg2 = Intercepts_stg2\r\n\r\n self.Cat_Coef = Cat_Coef\r\n self.Disp_Coef = Disp_Coef", "def __init__(self, numQueues, rate, start_hour, end_hour, appt_low, appt_high):\n\n self.rate = rate\n self.numQueues = numQueues\n self.start = datetime.datetime.combine(datetime.date.today(), datetime.time(start_hour,0,0))\n self.end = datetime.datetime.combine(datetime.date.today(), datetime.time(end_hour,0,0))\n self.appt_low = appt_low\n self.appt_high = appt_high\n minutes_for_new_items = (end_hour-start_hour)*60 #new patients seen between 9AM and 4PM\n time_between_items = rate #exponential dist. time parameter\n self.expected_count = int(np.ceil(stats.poisson.ppf(.9999, minutes_for_new_items/time_between_items)))\n self.ques = [datetime.datetime.combine(datetime.datetime.today(), datetime.time(start_hour,0,0)) for i in range(0, self.numQueues)]\n cols = ['simulation', 'num_items', 'wait_count', 'avg_wait_time', 'close_time']\n self.results = pd.DataFrame(columns = cols)\n return", "def run(params, conn, outputfile):\n date_begin = parse(params['date_begin'] + ' 00:00:00 +0700')\n date_end = parse(params['date_end'] + ' 23:59:59 +0700')\n domain_id = params['domain_id']\n authority_ids = params['authority_ids']\n\n covid_report_type_id = fetch_report_type_id(conn, 'surveillance-covid-19', domain_id)\n main_data = fetch_data(conn, date_begin, date_end, authority_ids, domain_id, covid_report_type_id)\n\n covid_report_type_id = fetch_report_type_id(conn, 'surveillance-covid-19-followup', domain_id)\n follow_data = fetch_data(conn, date_begin, date_end, authority_ids, domain_id, covid_report_type_id)\n\n line_list = join(main_data, follow_data)\n tabular(line_list)\n\n if len(line_list) == 0:\n return False\n\n df = pandas.DataFrame(line_list)\n df['date'] = df['date'].dt.tz_convert(tz)\n df['date'] = df['date'].dt.strftime('%d/%m/%Y %H:%M')\n writer = pandas.ExcelWriter(outputfile)\n df.to_excel(writer, 'covid-19', columns=['report_id', 'name', 'gender', 'age',\n 'village_no', 'village', 'tumbols', 'amphurs',\n 'arrival_date_village', 'mobile_phone',\n 'risk_factor', 'symptom_check', 'symptom_covid',\n 'date', 'latitude', 'longitude',\n '01', '02', '03', '04', '05', '06',\n '07', '08', '09', '10', '11', '12', '13', '14'], index=False)\n ldf = pandas.DataFrame(flat(main_data))\n ldf['date'] = ldf['date'].dt.tz_convert(tz)\n ldf.sort_values(by=['date'], inplace=True)\n ldf['date'] = ldf['date'].dt.strftime('%d/%m/%Y %H:%M')\n\n def is_followup(row):\n return row['report_id'] != row['group_id']\n\n ldf['followup'] = ldf.apply(is_followup, axis=1)\n ldf.to_excel(writer,\n 'all',\n columns=['report_id', 'group_id', 'followup', 'name', 'gender', 'age',\n 'village_no', 'village', 'tumbols', 'amphurs',\n 'arrival_date_village', 'mobile_phone',\n 'risk_factor', 'symptom_check', 'symptom_covid',\n 'total_times', 'activity_other',\n 'date', 'latitude', 'longitude'],\n index=False)\n writer.save()\n return True", "def action_invoice_create(self, cr, uid, ids, grouped=False, states=None, date_invoice=False, context=None):\n order = self.browse(cr, uid, ids[0], context=context)\n inv_obj = self.pool.get('account.invoice')\n # create the invoice\n inv_id = super(sale_order, self).action_invoice_create(cr, uid, ids, grouped, states, date_invoice, context=context)\n # modify the invoice\n inv_obj.write(cr, uid, [inv_id], {'past_doc': order.past_doc})\n return inv_id", "def get_rows(self):\n rows = []\n common_fields = self.get_common_data()\n\n p_identities = self.data['participantIdentities']\n p_data = self.data['participants']\n winning_team = self.get_winning_team()\n items_purchased = self.get_items_purchased()\n team_champions = self.get_team_champions()\n teams = set(team_champions.keys())\n gold_per_player = self.get_gold_per_player()\n xp_per_player = self.get_xp_per_player()\n gold_diff = self.get_gold_diff()\n xp_diff = self.get_xp_diff()\n dmg_taken_diff = self.get_dmg_taken_diff()\n\n for p in p_identities:\n p_id = int(p['participantId'])\n p_idx = p_id - 1\n team_id = p_data[p_idx]['teamId']\n opposing_team_id = (teams - {team_id}).pop()\n player_purchases = items_purchased[p_idx]\n purchase_list = [item_pair[0] for item_pair in player_purchases]\n items_10min = {\n item_pair[0] for item_pair in player_purchases\n if item_pair[1] < 1000*60*10\n }\n cur_row = {\n 'summonerId': p['player']['summonerId'],\n\n 'allyChampions': team_champions[team_id],\n 'championId': p_data[p_idx]['championId'],\n 'gold5': gold_per_player[p_idx].get(5, None),\n 'gold10': gold_per_player[p_idx].get(10, None),\n 'gold15': gold_per_player[p_idx].get(15, None),\n 'gold20': gold_per_player[p_idx].get(20, None),\n 'xp5': xp_per_player[p_idx].get(5, None),\n 'xp10': xp_per_player[p_idx].get(10, None),\n 'xp15': xp_per_player[p_idx].get(15, None),\n 'xp20': xp_per_player[p_idx].get(20, None),\n 'items10': list(items_10min),\n 'opponentChampions': team_champions[opposing_team_id],\n 'purchases': purchase_list,\n 'lane': p_data[p_idx]['timeline']['lane'],\n 'role': p_data[p_idx]['timeline']['role'],\n 'xpDiff10': xp_diff[p_idx],\n 'damageTakenDiff10': dmg_taken_diff[p_idx],\n 'gdPerMin10': gold_diff[p_idx],\n 'winner': (team_id == winning_team),\n }\n\n cur_row.update(common_fields)\n rows.append(cur_row)\n\n return rows", "def show_outstanding_invoices(self, show_outstanding_invoices):\n\n self._show_outstanding_invoices = show_outstanding_invoices", "def create_data():\n data_set = pd.DataFrame()\n customer_id = list()\n for i in range(1, 10001):\n customer_id.append(i)\n data_set = pd.DataFrame()\n data_set.loc[:, 'customer_id'] = np.array(customer_id)\n product_name = ('dining chair', 'dining table', 'bed', 'dining set',\n 'stool', 'couch', 'occasional table',\n 'recliner')\n product_name_random = random.choices(product_name, k=10000)\n data_set.loc[:, 'product_name'] = np.array(product_name_random)\n quantity_rented = (1, 2, 3, 4)\n quantity_rented_random = random.choices(quantity_rented, k=10000)\n data_set.loc[:, 'quantity_rented'] = np.array(quantity_rented_random)\n unit_rental_price_monthly = list()\n for i in range(0, 10000):\n unit_rental_price_monthly.append(random.uniform(1.5, 25))\n data_set.loc[:, 'unit_rental_price'] = np.array(unit_rental_price_monthly)\n rental_period_months = list()\n for i in range(0, 10000):\n rental_period_months.append(randint(6, 60))\n data_set.loc[:, 'rental_period_months'] = np.array(rental_period_months)\n return data_set" ]
[ "0.58933586", "0.58273345", "0.5787141", "0.5730418", "0.57022715", "0.56592727", "0.5587117", "0.5582993", "0.5557562", "0.55526274", "0.55505604", "0.55257595", "0.5520605", "0.54978883", "0.5488627", "0.5473216", "0.5465187", "0.54585433", "0.54323196", "0.53706306", "0.5359521", "0.5348314", "0.53445005", "0.5337536", "0.5308676", "0.52830005", "0.52545595", "0.5254459", "0.5245022", "0.5233254", "0.52227414", "0.52169895", "0.5212123", "0.52061456", "0.51900005", "0.5183848", "0.51801354", "0.51608694", "0.5129148", "0.5118703", "0.51106626", "0.51043624", "0.51041967", "0.5069794", "0.5069502", "0.5053597", "0.50529736", "0.5030675", "0.50295323", "0.5025132", "0.50219774", "0.5016678", "0.50110257", "0.50099194", "0.50089276", "0.5001617", "0.49978667", "0.49924296", "0.49906254", "0.4988517", "0.49851596", "0.4981629", "0.49691814", "0.49354625", "0.4933138", "0.49310076", "0.49308822", "0.49284595", "0.49177936", "0.49116883", "0.49013725", "0.48992148", "0.4895135", "0.48943272", "0.4893423", "0.48932913", "0.48863798", "0.4880579", "0.48781058", "0.48766106", "0.48757657", "0.4875047", "0.48737803", "0.48716915", "0.48674923", "0.48625204", "0.4858591", "0.4856049", "0.48459306", "0.48428443", "0.48425218", "0.481653", "0.48075104", "0.48051313", "0.48012373", "0.4795892", "0.47882003", "0.47876215", "0.47791627", "0.47791484" ]
0.61920017
0
Returns a dataframe with all invoice lines from customerID given as parameter.
def get_customer_history_df_invoice_line(self, customerID): df_invoice_line \ = self._df_invoice_original[self._df_invoice_original.CustomerID \ == customerID] return df_invoice_line
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_customer_df_invoice_line(self, customerID, list_stockCode\\\n , list_quantity, invoiceDate):\n \n dict_invoice = dict()\n\n dict_invoice['Quantity'] = list_quantity\n dict_invoice['StockCode'] = list_stockCode\n\n #------------------------------------------------------------------------\n # Build invoiceDate from local current time\n #------------------------------------------------------------------------\n if invoiceDate is None:\n time_struct = time.localtime()\n invoiceDate = str(time_struct.tm_year)+'-'+str(time_struct.tm_mon)\\\n +'-'+str(time_struct.tm_mday)\n invoiceDate +=' '\n invoiceDate +=str(time_struct.tm_hour)+':'+str(time_struct.tm_min)\\\n +':'+str(time_struct.tm_sec)\n invoiceDate = pd.Timestamp(invoiceDate)\n else:\n pass\n\n\n #------------------------------------------------------------------------\n # Lists initialization\n #------------------------------------------------------------------------\n list_customerID = list()\n list_invoiceNo = list()\n list_invoiceDate = list()\n list_invoice_line_index = list()\n \n #------------------------------------------------------------------------\n # Increase Invoice number\n #------------------------------------------------------------------------\n invoiceNo = max(self._df_invoice_original.InvoiceNo)\n invoiceNo += 1\n\n #------------------------------------------------------------------------\n # Get latest invoice line index value\n #------------------------------------------------------------------------\n invoice_line_index = max(self._df_invoice_original.index)\n\n #------------------------------------------------------------------------\n # Build lists for CustomerID, InvoiceNo, InvoiceDate\n # A list of incremented indexes is built for new rows.\n #------------------------------------------------------------------------\n for quantity in list_quantity:\n list_customerID.append(customerID)\n list_invoiceNo.append(invoiceNo)\n list_invoiceDate.append(invoiceDate)\n invoice_line_index += 1\n list_invoice_line_index.append(invoice_line_index) \n\n \n dict_invoice['CustomerID'] = list_customerID\n dict_invoice['InvoiceNo'] = list_invoiceNo\n dict_invoice['InvoiceDate'] = list_invoiceDate\n\n #------------------------------------------------------------------------\n # Get description list from list of stock codes.\n #------------------------------------------------------------------------\n list_description = self.getDescriptionList(list_stockCode)\n \n dict_invoice['Description'] = list_description\n\n #------------------------------------------------------------------------\n # Get unit price list from list of stock codes.\n #------------------------------------------------------------------------\n list_unitPrice = self.getUnitPriceList(list_stockCode)\n \n dict_invoice['UnitPrice'] = list_unitPrice\n\n #------------------------------------------------------------------------\n # Dataframe with new invoices lines is created.\n #------------------------------------------------------------------------\n df_invoice_line \\\n = pd.DataFrame(dict_invoice, columns=dict_invoice.keys()\\\n , index=list_invoice_line_index)\n \n return df_invoice_line", "def invoice(customer_id):\n encoder = request.url_rule.endpoint\n template = \"{{ encoder }}#{{ customer_id|%s }}\" % encoder\n return render_template_string(template, **locals())", "def get_dataframe() -> pandas.DataFrame:\n database_connection = processing.establish_connection(database_path)\n dataframe = database_connection.to_dataframe(['CustomerId', 'InvoiceDate', 'Total'], table_name)\n database_connection.close()\n dataframe = processing.get_invoice_date_fixed(dataframe)\n analyze_dataframe = dataframe.copy()\n total_sum_dataframe = processing.get_column_sum(analyze_dataframe)\n\n customer_count_dataframe = processing.drop_duplicates(analyze_dataframe)\n customer_count_dataframe = processing.get_column_count(customer_count_dataframe)\n return customer_count_dataframe.merge(total_sum_dataframe, how='inner', on='InvoiceDate')", "def order_process(self, customerID, list_stockCode, list_quantity\\\n , orderDate=None):\n\n segmentID = -1\n\n #-------------------------------------------------------------------------\n # A new customer is created and inserted into data-set.\n #-------------------------------------------------------------------------\n if customerID is None:\n customerID = int(self.createCustomerID())\n else:\n pass\n \n #-------------------------------------------------------------------------\n # A new dataframe with new invoice lines are created.\n #-------------------------------------------------------------------------\n df_invoice_line = self.create_customer_df_invoice_line(customerID\\\n , list_stockCode, list_quantity, orderDate)\n \n #-------------------------------------------------------------------------\n # Original dataframe is updated with customer invoices lines.\n #-------------------------------------------------------------------------\n print(\"order_process : shape before concat= \"+str(self._df_invoice_original.shape))\n self._df_invoice_original \\\n = pd.concat([self._df_invoice_original, df_invoice_line], axis=0)\n print(\"order_process : shape after concat= \"+str(self._df_invoice_original.shape))\n \n #-------------------------------------------------------------------------\n # All invoices lines (including new one) related to customer is retrieved \n # from original dataframe.\n #-------------------------------------------------------------------------\n df_invoice_line_customer \\\n = self.get_customer_history_df_invoice_line(customerID)\n\n #-------------------------------------------------------------------------\n # When calling get_customer_marketSegment(), df_invoice_line_customer is\n # concatened to the original dataframe.\n #------------------------------------------------------------------------- \n segmentID = self.get_customer_marketSegment(df_invoice_line_customer)\n \n return segmentID, customerID", "def get_rows(contract_address: str, file_name: str, receipts_filename: str) -> pd.DataFrame():\n receipts_df = pd.read_csv(receipts_filename)\n receipts_df = receipts_df.sort_values(['block_number', 'transaction_index'], ignore_index=True)\n df = pd.read_csv(file_name)\n df = df.sort_values(['block_number', 'transaction_index'], ignore_index=True)\n\n df = df.loc[receipts_df['status'] == 1] \n df = df.loc[df[\"to_address\"] == contract_address.lower()]\n df = df.reset_index()\n df = df.drop(columns='index')\n return df", "def invoices(self,org_id=None,invoice_id=''):\n if org_id is None:\n org_id = self.org_id\n return self.get('{}/orgs/{}/invoices/{}'.format(ApiVersion.A1.value,org_id,invoice_id))", "def data_process_deprecated(self, CustomerID, InvoiceDate, InvoiceNo, Description, Quantity\\\n , UnitPrice ):\n dict_invoice = {'InvoiceDate':InvoiceDate, 'Description':Description\\\n , 'Quantity':Quantity, 'UnitPrice':UnitPrice}\n dict_invoice['CustomerID'] = CustomerID\n dict_invoice['InvoiceNo'] = InvoiceNo\n df_invoice_line \\\n = pd.DataFrame(dict_invoice, columns=dict_invoice.keys(), index=[0])\n \n self.data_transform(df_invoice_line)\n\n #self.feature_rfm_encode()\n\n self.feature_scale()\n\n self.list_feature_drop()\n\n self.feature_description_nlp()\n return", "def return_customer_orders(customer_id):\n with MY_CONNECTION as connection:\n cursor = connection.cursor()\n cursor.execute(\n \"\"\"\n SELECT id_order, id_customer, id_product, quantity, total_price,\n payment_status, send_status, order_date, location\n FROM Orders\n Where id_customer=?\n \"\"\",\n (customer_id,))\n return cursor.fetchall()", "def invoice(self, id):\r\n return Invoice(self, id)", "def get_all_customer_ids():\n table = data_manager.get_table_from_file(\"sales/sales.csv\")\n return get_all_customer_ids_from_table(table)", "def get_all_sales_ids_for_customer_ids():\n\n # your code", "def invoices(self):\r\n return Invoices(self)", "def invoices(self):\r\n return inv.Invoices(self)", "def invoice(self,context,params):\n url = f\"https://api.freshbooks.com/accounting/account/{params['account_id']}/invoices/invoices/{params['invoice_id']}\"\n result = json.loads(util.rest(\"GET\", url, {}, context[\"headers\"][\"access_token\"]).text)\n invoice = result[\"response\"][\"result\"][\"invoice\"]\n invoice_obj = FreshbooksInvoice(\n account_id=invoice['accountid'],\n customerid=invoice['customerid'], \n invoice_id=invoice['invoiceid'],\n currency_code=invoice['currency_code'],\n language=invoice['language'],\n terms=invoice['terms'],\n discount_value=invoice['discount_value'],\n discount_amount=invoice['discount_total']['amount'],\n invoice_number=invoice['invoice_number'],\n po_number=invoice['po_number'],\n amount=invoice['amount']['amount'],\n code=invoice['amount']['code'],\n create_date=invoice['create_date']\n )\n return invoice_obj.__dict__", "def invoices(self, account_id):\n from pureport_client.commands.accounts.invoices import Command\n return Command(self.client, account_id)", "def invoice_items(self,org_id=None,query={}):\n if org_id is None:\n org_id = self.org_id\n query_end_date = datetime.strptime(query['endDate'],'%Y-%m-%dT%H:%M:%SZ')\n # Given a 'query_end_date' to find the invoice containing the\n # line items for that date we need to find the invoice which \n # has 'endDate' equal to the end of the month of the `query_end_date`\n query_first_next_month = query_end_date + relativedelta(months=+1) - relativedelta(days=(query_end_date.day-1))\n target_invoices = []\n invoices = self.invoices(org_id)\n if self.verbose:\n print('Searching invoices org_id={}'.format(org_id))\n print('query={} query_end_date={}'.format(query,query_end_date))\n print('Result keys: {}'.format( invoices['content'].keys() ))\n print('Total result count: {}'.format( invoices['content']['totalCount'] ))\n for invoice in invoices['content']['results']:\n #pprint.pprint(invoice)\n end_date = datetime.strptime(invoice['endDate'],'%Y-%m-%dT%H:%M:%SZ')\n if self.verbose: \n print('invoice({})[\\'endDate\\']={} end_date={}'.format(invoice['id'],invoice['endDate'],end_date))\n if end_date == query_first_next_month:\n target_invoices.append(invoice)\n \n if self.verbose: \n print('Target invoices: {}'.format(target_invoices))\n \n\n target_line_items = []\n for invoice in target_invoices:\n invoice_details = self.invoices(org_id,invoice['id']) \n print('invoice_details: {}'.format(invoice_details))\n for item in invoice_details['content']['lineItems']:\n end_date = datetime.strptime(item['endDate'],'%Y-%m-%dT%H:%M:%SZ')\n if end_date == query_end_date:\n target_line_items.append(item)\n if self.verbose:\n print('target_line_items: {}'.format(target_line_items)) \n return target_line_items", "def invoice(self, invoice_number):\r\n return inv.Invoice(self, invoice_number)", "def get_all_customer_ids():\n\n # your code", "def invoices(self):\r\n return inv.AccountInvoices(self)", "def invoices_in_route(self):\n from invoicing.models import Invoice\n invoices = Invoice.objects.filter(\n route=self.number, print_date__range=(date.today() - timedelta(6), date.today()),\n canceled=False).count()\n return invoices", "def _prepare_invoice(self, cr, uid, order, lines, context=None):\n if context is None:\n context = {}\n journal_ids = self.pool.get('account.journal').search(cr, uid,\n [('type', '=', 'sale'), ('company_id', '=', order.company_id.id)],\n limit=1)\n if not journal_ids:\n raise osv.except_osv(_('Error !'),\n _('There is no sales journal defined for this company: \"%s\" (id:%d)') % (order.company_id.name, order.company_id.id))\n\n invoice_vals = {\n 'name': order.client_order_ref or '',\n 'origin': order.name,\n 'type': 'out_invoice',\n 'reference': order.client_order_ref or order.name,\n 'account_id': order.partner_id.property_account_receivable.id,\n 'journal_id': order.partner_id.property_default_sale_invoice_journal.id,\n 'partner_id': order.partner_id.id,\n 'address_invoice_id': order.partner_invoice_id.id,\n #'address_contact_id': order.partner_order_id.id,\n 'invoice_line': [(6, 0, lines)],\n 'currency_id': order.pricelist_id.currency_id.id,\n 'comment': order.note,\n 'payment_term': order.payment_term and order.payment_term.id or False,\n 'fiscal_position': order.fiscal_position.id or order.partner_id.property_account_position.id,\n 'date_invoice': context.get('date_invoice', False),\n 'company_id': order.company_id.id,\n 'user_id': order.user_id and order.user_id.id or False\n }\n\n # Care for deprecated _inv_get() hook - FIXME: to be removed after 6.1\n invoice_vals.update(self._inv_get(cr, uid, order, context=context))\n\n return invoice_vals", "def get_customer_orders(customerId):\n data = user_obj.get_customer_orders(customerId)\n return data", "def ListInvoices(self, **kwargs):\n return self._stub.ListInvoices(ln.ListInvoiceRequest(**kwargs))", "def get_invoices(self, since, until):\n return self._request('getInvoices', data={\n 'date_from': since.strftime('%d/%m/%Y'),\n 'date_to': until.strftime('%d/%m/%Y')\n })", "def get_all_customer_ids_from_table(table):\n\n # your code", "def _get_invoices_for_payment(cls, account_id: int) -> List[InvoiceModel]:\n valid_statuses = (InvoiceStatus.APPROVED.value, InvoiceStatus.REFUND_REQUESTED.value)\n invoice_ref_subquery = db.session.query(InvoiceReferenceModel.invoice_id). \\\n filter(InvoiceReferenceModel.status_code.in_((InvoiceReferenceStatus.ACTIVE.value,)))\n\n invoices: List[InvoiceModel] = db.session.query(InvoiceModel) \\\n .filter(InvoiceModel.invoice_status_code.in_(valid_statuses)) \\\n .filter(InvoiceModel.payment_method_code == PaymentMethod.EJV.value) \\\n .filter(InvoiceModel.payment_account_id == account_id) \\\n .filter(InvoiceModel.id.notin_(invoice_ref_subquery)) \\\n .all()\n return invoices", "def _prepare_invoice(self, cr, uid, order, lines, context=None):\n if context is None:\n context = {}\n journal_id = self.pool['account.invoice'].default_get(cr, uid, ['journal_id'], context=context)['journal_id']\n if not journal_id:\n raise osv.except_osv(_('Error!'),\n _('Please define sales journal for this company: \"%s\" (id:%d).') % (order.company_id.name, order.company_id.id))\n invoice_vals = {\n 'name': order.client_order_ref or '',\n 'origin': order.name,\n 'type': 'out_invoice',\n 'reference': order.client_order_ref or order.name,\n 'account_id': order.partner_invoice_id.property_account_receivable.id,\n 'partner_id': order.partner_invoice_id.id,\n 'journal_id': journal_id,\n 'invoice_line': [(6, 0, lines)],\n 'currency_id': order.pricelist_id.currency_id.id,\n 'comment': order.note,\n 'payment_term': order.payment_term and order.payment_term.id or False,\n 'fiscal_position': order.fiscal_position.id or order.partner_invoice_id.property_account_position.id,\n 'date_invoice': context.get('date_invoice', False),\n 'company_id': order.company_id.id,\n 'user_id': order.user_id and order.user_id.id or False,\n 'section_id' : order.section_id.id,\n 'test_1' :order.test\n }\n\n # Care for deprecated _inv_get() hook - FIXME: to be removed after 6.1\n invoice_vals.update(self._inv_get(cr, uid, order, context=context))\n return invoice_vals", "def _create_invoice(self):\n self.ensure_one()\n partner = self.member_id.partner_id\n invoice = self.env['account.invoice'].create({\n 'partner_id': partner.id,\n 'account_id': partner.property_account_receivable_id.id,\n 'fiscal_position_id': partner.property_account_position_id.id\n })\n for line in self.line_ids:\n product = line.activity_id.product_id\n # Handling of invoice lines : needs cache record for onchange, then\n # real writing...\n invoice_line = self.env['account.invoice.line'].new({\n 'product_id': product.id,\n 'invoice_id': invoice.id\n })\n invoice_line._onchange_product_id()\n line_values = dict(invoice_line._cache)\n line_values['price_unit'] = line.price\n invoice_line = self.env['account.invoice.line'].create(line_values)\n invoice.compute_taxes()\n line.registration_id.invoice_line_id = invoice_line.id\n return invoice", "def _prepare_invoice_line(self, inv_id):\n res = {}\n account_id = self.product_id.property_account_income_id.id or self.product_id.categ_id.property_account_income_categ_id.id\n if not account_id:\n raise UserError(_('Please define income account for this product: \"%s\" (id:%d).') % \\\n (self.product_id.name, self.product_id.id,))\n price_unit = self.product_id.lst_price\n res = {\n 'invoice_id': inv_id.id,\n 'name': self.name,\n 'origin': self.order_id.name,\n 'account_id': account_id,\n 'uom_id': self.product_uom_id.id,\n 'quantity': self.product_uom_qty,\n 'price_unit': price_unit,\n 'product_id': self.product_id.id,\n 'invoice_line_tax_id': False,\n 'order_line_id': self.id\n }\n return res", "def get_listCustomer_out_sample(self, customerCount=10):\n \n if customerCount is None :\n listCustomer= list(self._df_invoice_line_out_sample.CustomerID.unique())\n else:\n if customerCount <= 0 :\n listCustomer \\\n = list(self._df_invoice_line_out_sample.CustomerID.unique())\n else:\n listCustomer \\\n = list(self._df_invoice_line_out_sample.CustomerID.unique()[:customerCount])\n return listCustomer", "def _get_lines(self, cr, uid, ids, context=None):\n List=[]\n if ids:\n line = self.pool.get('payment.enrich.lines').browse(cr, uid, ids[0], context=context)\n \n record = line.enrich_id\n val = 0.0\n for line in record.enrich_lines:\n if line.state == 'done' :\n val += line.cost\n res = {\n 'paid_amount':val,\n 'residual_amount':record.amount - val,\n }\n record.write(res)\n return List", "def single_customer(customer_name, invoice_file):\n def customer_rental(rental_items):\n \"\"\"Closure to add single customer details\"\"\"\n with open(rental_items, 'r', newline='') as rentals:\n reader = csv.reader(rentals)\n add_invoice_items = partial(add_furniture, invoice_file, customer_name)\n for row in reader:\n add_invoice_items(item_code=row[0],\n item_description=row[1],\n item_monthly_price=row[2])\n return customer_rental", "def invoice_lines(self, invoice_lines):\n if self.local_vars_configuration.client_side_validation and invoice_lines is None: # noqa: E501\n raise ValueError(\"Invalid value for `invoice_lines`, must not be `None`\") # noqa: E501\n\n self._invoice_lines = invoice_lines", "def get_customer_marketSegment(self, df_invoice_line_customer):\n #-------------------------------------------------------------------------\n # Building data model \n #-------------------------------------------------------------------------\n self.data_transform(df_invoice_line_customer)\n\n #-------------------------------------------------------------------------\n # Customer features are built thanks to transformers.\n #-------------------------------------------------------------------------\n self.df_customers_features_build()\n \n #-------------------------------------------------------------------------\n # Customer market segment is predicted\n #-------------------------------------------------------------------------\n X_test = self._df_customers.values\n y_pred = self._classifier_model.predict(X_test)\n segmentID = y_pred[0]\n \n return segmentID", "def billing_history(cls, user=None):\n invoices = Invoice.query.filter(Invoice.user_id == user.id) \\\n .order_by(Invoice.created_on.desc()).limit(12)\n\n return invoices", "def customerReport(self):\n self._setFormat()\n for cust in self.getCustomerAccountData():\n self.output.write(self.form_line(cust))", "def features_orders(df_customers, df_receipts):\n df_customers.sort_values(by=['customerId2'], ascending=[True], inplace=True)\n # total amount of all the orders of a cusrtomer\n df_customers = _add_column(df_customers, df_receipts.groupby('customerId2')['price'].sum(), 'sum_orders')\n # the min amount paid in one receipt by a customer\n df_customers = _add_column(df_customers,\n df_receipts.groupby(['customerId2', 'receiptId'], as_index=False)[\n 'price'].sum().groupby('customerId2').min()['price'], 'min_order')\n # the mean amount paid per receipt by a customer\n df_customers = _add_column(df_customers,\n df_receipts.groupby(['customerId2', 'receiptId'], as_index=False)['price'].sum()\n .groupby('customerId2').mean()['price'], 'mean_order')\n # the max amount paid per receipt by a customer\n df_customers = _add_column(df_customers,\n df_receipts.groupby(['customerId2', 'receiptId'], as_index=False)['price'].sum()\n .groupby('customerId2').max()['price'], 'max_order')\n # the number of orders\n df_customers = _add_column(df_customers,\n df_receipts.groupby(['customerId2', 'receiptId'], as_index=False)['price'].sum()\n .groupby('customerId2').count()['price'], 'count_orders')\n\n # the total amount of items bought by a user\n df_customers = _add_column(df_customers,\n df_receipts.groupby('customerId2')['itemQty'].sum(), 'sum_itemQty')\n # the min amount of items bought by a user in a receipt\n df_customers = _add_column(df_customers,\n df_receipts.groupby(['customerId2', 'receiptId'], as_index=False)['itemQty'].sum()\n .groupby('customerId2').min()['itemQty'], 'min_itemQty')\n # the mean amount of items bought by a user in a receipt\n df_customers = _add_column(df_customers,\n df_receipts.groupby(['customerId2', 'receiptId'], as_index=False)['itemQty'].sum()\n .groupby('customerId2').mean()['itemQty'], 'mean_itemQty')\n # the max amount of items bought by a user in a receipt\n df_customers = _add_column(df_customers,\n df_receipts.groupby(['customerId2', 'receiptId'], as_index=False)['itemQty'].sum()\n .groupby('customerId2').max()['itemQty'], 'max_itemQty')\n # from which dividion type a user buys\n for i in DIVISIONID_VALS:\n k = 'divisionId_' + str(i)\n df_receipts[k] = np.where(df_receipts['divisionId'] == i, 1, 0)\n df_customers = _add_column(df_customers, df_receipts.groupby('customerId2')[k].sum(), k)\n # which source type a user uses to pay\n for i in SOURCEID_VALS:\n k = 'sourceId_' + str(i)\n df_receipts[k] = np.where(df_receipts['sourceId'] == i, 1, 0)\n df_customers = _add_column(df_customers, df_receipts.groupby('customerId2')[k].sum(), k)\n\n logger.info(\"Features from the returns table: {shape} {dtypes}\"\n .format(shape=df_customers.shape, dtypes=df_customers.dtypes))\n return df_customers", "def get_invoices(self, limit=50, closed=False, get_all=False):\n mask = \"mask[invoiceTotalAmount, itemCount]\"\n _filter = {\n 'invoices': {\n 'createDate': {\n 'operation': 'orderBy',\n 'options': [{\n 'name': 'sort',\n 'value': ['DESC']\n }]\n },\n 'statusCode': {'operation': 'OPEN'},\n }\n }\n if closed:\n del _filter['invoices']['statusCode']\n\n return self.client.call('Account', 'getInvoices', mask=mask, filter=_filter, iter=get_all, limit=limit)", "def generate_customer_info(self):\n consecutive = check_consecutive(self.customerIds)\n print(\"consecutive\" + str(consecutive))\n if consecutive:\n for customer_id in self.customerIds:\n # next we need to 1) decide if the customer has insurance, and if yes, generate the EOB that gets sent to the customer\n insured = np.random.choice(\n self.distributions[\"insurance_status\"],\n 1,\n p=self.distributions[\"insurance_distribution\"],\n )[0]\n dob = self.dobs[customer_id - 1]\n customer = pd.DataFrame(\n {\n \"customer_id\": [customer_id],\n \"dob\": str(dob),\n \"insurance\": insured,\n \"experiment_id\": random.randint(1, 2),\n }\n )\n self.Customers = self.Customers.append(customer)\n else:\n print(\"Error generating customer info: customerIds aren't consecutive\")\n return True", "def df(client_ids, start, end):\n obj = search(client_ids, start, end)\n df = DataFrame.from_dict(obj).T\n\n if df.empty:\n return df\n\n df.index.name = 'client_id'\n df = df.rename(columns={ 0: 'inactive', 1: 'active' })\n df['total'] = df.sum(axis=1)\n df = df.fillna(0).astype('int64')\n\n return df", "def open_invoices(self):\n return {\n 'domain': \"[('id', 'in', \" + str(self.invoice_ids.ids) + \" )]\",\n 'name': 'Invoices',\n 'view_mode': 'tree,form',\n 'res_model': 'account.move',\n 'type': 'ir.actions.act_window',\n }", "def get_invoiced_lot_values(self):\n self.ensure_one()\n\n if self.state == 'draft':\n return []\n\n sale_orders = self.mapped('invoice_line_ids.sale_line_ids.order_id')\n stock_move_lines = sale_orders.mapped('picking_ids.move_lines.move_line_ids')\n\n # Get the other customer invoices and refunds.\n ordered_invoice_ids = sale_orders.mapped('invoice_ids') \\\n .filtered(lambda i: i.state not in ['draft', 'cancel']) \\\n .sorted(lambda i: (i.invoice_date, i.id))\n\n # Get the position of self in other customer invoices and refunds.\n self_index = None\n i = 0\n for invoice in ordered_invoice_ids:\n if invoice.id == self.id:\n self_index = i\n break\n i += 1\n\n # Get the previous invoice if any.\n previous_invoices = ordered_invoice_ids[:self_index]\n last_invoice = previous_invoices[-1] if len(previous_invoices) else None\n\n # Get the incoming and outgoing sml between self.invoice_date and the previous invoice (if any).\n write_dates = [wd for wd in self.invoice_line_ids.mapped('write_date') if wd]\n self_datetime = max(write_dates) if write_dates else None\n last_write_dates = last_invoice and [wd for wd in last_invoice.invoice_line_ids.mapped('write_date') if wd]\n last_invoice_datetime = max(last_write_dates) if last_write_dates else None\n\n def _filter_incoming_sml(ml):\n if ml.state == 'done' and ml.location_id.usage == 'customer' and ml.lot_id:\n if last_invoice_datetime:\n return last_invoice_datetime <= ml.date <= self_datetime\n else:\n return ml.date <= self_datetime\n return False\n\n def _filter_outgoing_sml(ml):\n if ml.state == 'done' and ml.location_dest_id.usage == 'customer' and ml.lot_id:\n if last_invoice_datetime:\n return last_invoice_datetime <= ml.date <= self_datetime\n else:\n return ml.date <= self_datetime\n return False\n\n incoming_sml = stock_move_lines.filtered(_filter_incoming_sml)\n outgoing_sml = stock_move_lines.filtered(_filter_outgoing_sml)\n\n # Prepare and return lot_values\n qties_per_lot = defaultdict(lambda: 0)\n if self.type == 'out_refund':\n for ml in outgoing_sml:\n qties_per_lot[ml.lot_id] -= ml.product_uom_id._compute_quantity(ml.qty_done, ml.product_id.uom_id)\n for ml in incoming_sml:\n qties_per_lot[ml.lot_id] += ml.product_uom_id._compute_quantity(ml.qty_done, ml.product_id.uom_id)\n else:\n for ml in outgoing_sml:\n qties_per_lot[ml.lot_id] += ml.product_uom_id._compute_quantity(ml.qty_done, ml.product_id.uom_id)\n for ml in incoming_sml:\n qties_per_lot[ml.lot_id] -= ml.product_uom_id._compute_quantity(ml.qty_done, ml.product_id.uom_id)\n lot_values = []\n for lot_id, qty in qties_per_lot.items():\n if float_is_zero(qty, precision_rounding=lot_id.product_id.uom_id.rounding):\n continue\n lot_values.append({\n 'product_name': lot_id.product_id.display_name,\n 'product_color': lot_id.x_studio_color.x_name,\n 'quantity': qty,\n 'uom_name': lot_id.product_uom_id.name,\n 'lot_name': lot_id.name\n })\n #AQUI ORDENO TODOS LOS LOTES QUE ME QUEDAN EN lot_values POR EL COLOR\n lot_values.sort(key=lambda r: r['product_color'], reverse=False)\n return lot_values", "def invoiceitem(self, id):\r\n return InvoiceItem(self, id)", "def create_invoices(self, cr, uid, ids, context=None):\n invoice_list = []\n po_obj = self.pool.get('purchase.order')\n inv_line_obj = self.pool.get('account.invoice.line')\n inv_obj = self.pool.get('account.invoice')\n addr_obj = self.pool.get('res.partner')\n journal_obj = self.pool.get('account.journal')\n if context is None:\n context = {}\n\n for purchase_adv_obj in self.browse(cr, uid, ids, context=context):\n for purchase_order in po_obj.browse(cr, uid, context.get('active_ids', []), context=context):\n inv_line_ids = []\n invoice_ids = []\n val = inv_line_obj.product_id_change(cr, uid, [], purchase_adv_obj.product_id.id,\n uom_id=False, partner_id=purchase_order.partner_id.id, fposition_id=purchase_order.fiscal_position.id)\n line_id = inv_line_obj.create(cr, uid, {\n 'name': val['value']['name'],\n 'account_id': val['value']['account_id'],\n 'price_unit': purchase_adv_obj.amount,\n 'quantity': purchase_adv_obj.qtty,\n 'discount': False,\n 'uos_id': val['value']['uos_id'],\n 'product_id': purchase_adv_obj.product_id.id,\n 'invoice_line_tax_id': [(6, 0, val['value']['invoice_line_tax_id'])],\n })\n inv_line_ids.append(line_id)\n addr = addr_obj.address_get(cr, uid, [purchase_order.partner_id.id], ['invoice'])\n journal_ids = journal_obj.search(cr, uid, [('type', '=', 'purchase')])\n context.update({'type':'in_invoice','journal_type':'purchase'})\n inv_vals = {\n 'name': purchase_order.partner_ref or purchase_order.name,\n 'origin': purchase_order.name,\n 'type': 'in_invoice',\n 'reference': False,\n 'account_id': purchase_order.partner_id.property_account_payable.id,\n 'journal_id':journal_ids and journal_ids[0] or False,\n 'partner_id': purchase_order.partner_id.id,\n 'address_invoice_id': addr['invoice'],\n 'invoice_line': [(6, 0, inv_line_ids)],\n 'currency_id': purchase_order.pricelist_id.currency_id.id,\n 'comment': '',\n 'payment_term': purchase_order.payment_term_id and purchase_order.payment_term_id.id or False,\n 'fiscal_position': purchase_order.fiscal_position.id or purchase_order.partner_id.property_account_position.id,\n 'prepaid': True\n }\n\n inv_id = inv_obj.create(cr, uid, inv_vals, context=context)\n inv_obj.button_reset_taxes(cr, uid, [inv_id], context=context)\n for invoice in purchase_order.invoice_ids:\n invoice_ids.append(invoice.id)\n invoice_ids.append(inv_id)\n po_obj.write(cr, uid, purchase_order.id, {'invoice_ids': [(6, 0, invoice_ids)]})\n invoice_list.append(inv_id)\n\n if purchase_order.invoice_method in ('picking','order'):\n self.pool.get('purchase.order.line').create(cr, uid, {\n 'order_id': purchase_order.id,\n 'name': val['value']['name'],\n 'date_planned':purchase_order.date_order,\n 'price_unit': -purchase_adv_obj.amount,\n 'product_uom_qty': purchase_adv_obj.qtty,\n 'product_uos': val['value']['uos_id'],\n 'product_uom': val['value']['uos_id'],\n 'product_id': purchase_adv_obj.product_id.id,\n 'adavance_product':True,\n 'discount': False,\n 'taxes_id': [(6, 0, val['value']['invoice_line_tax_id'])],\n }, context=context)\n\n\n context.update({'invoice_id':invoice_list})\n return {\n 'name': 'Open Invoice',\n 'view_type': 'form',\n 'view_mode': 'form',\n 'res_model': 'purchase.open.invoice',\n 'type': 'ir.actions.act_window',\n 'target': 'new',\n 'context': context\n }", "def invoice_line_create(self, invoice_id, qty):\n invoice_lines = self.env['account.invoice.line']\n precision = self.env['decimal.precision'].precision_get('Product Unit of Measure')\n for line in self:\n if not float_is_zero(qty, precision_digits=precision):\n vals = line._prepare_invoice_line(qty=qty)\n vals.update({'invoice_id': invoice_id, 'purchase_line_id': line.id})\n invoice_lines |= self.env['account.invoice.line'].create(vals)\n return invoice_lines", "def create_invoice(self):\n for line in self:\n # if not line.account_id:\n # raise UserError(_('Please Add the incoming Account !!'))\n self.ensure_one()\n journal_id = self.env['account.journal'].search([\n ('type', '=', 'sale')], limit=1)\n inv_line_main = {\n 'name': line.description.name,\n 'price_unit': line.amount or 0.00,\n 'quantity': 1,\n 'discount': line.discount,\n 'account_id': line.description.property_account_income_id.id or line.description.categ_id.property_account_income_categ_id.id or False,\n }\n inv_values = {\n 'partner_id': line.patient_id.partner_id.id,\n 'patient_id': line.patient_id.id,\n 'dentist': line.dentist.id,\n 'move_type': 'out_invoice',\n 'invoice_date': datetime.now().strftime(DF) or False,\n 'journal_id': journal_id and journal_id.id or False,\n 'teeth_id': line.patient_id and line.patient_id.id or False,\n }\n acc_id = self.env['account.move'].create(inv_values)\n acc_id.write({'invoice_line_ids': [(0, 0, inv_line_main)]})\n\n self.write({'invc_id': acc_id.id, 'inv': True})\n context = dict(self._context or {})\n wiz_form_id = self.env['ir.model.data'].get_object_reference(\n 'account', 'view_move_form')[1]\n\n return {\n 'view_type': 'form',\n 'view_id': wiz_form_id,\n 'view_mode': 'form',\n 'res_model': 'account.move',\n 'res_id': self.invc_id.id,\n 'type': 'ir.actions.act_window',\n 'target': 'current',\n 'context': context,\n }", "def filter_by_customer(table, customer):\n transactions = []\n for record in table:\n if record[CUSTOMER_ID] == customer:\n transactions.append(record)\n return transactions", "def customer(self, customer_id=None):\r\n return customers.Customer(self, customer_id)", "def single_customer(customer_name, invoice_file):\n def customer_rental(rental_items):\n \"\"\"\n Loop through rental_items file and append each row to curried invoice_file with same\n customer_name\n \"\"\"\n customer = partial(add_furniture, invoice_file=invoice_file, customer_name=customer_name)\n with open(rental_items, \"r\") as rental_csv:\n for row in csv.reader(rental_csv):\n customer(item_code=row[0], item_description=row[1], item_monthly_price=row[2])\n return customer_rental", "def df_customers_fileRead(self):\n \n #-------------------------------------------------------------------------\n # RFM features are restored\n #-------------------------------------------------------------------------\n df_customers_rfm \\\n = p5_util.object_load(self.df_customers_rfm_fileName)\n self.strprint(\"RFM features : \"+str(df_customers_rfm.shape))\n \n #-------------------------------------------------------------------------\n # Time features are restored\n #-------------------------------------------------------------------------\n df_customers_timeFeature \\\n = p5_util.object_load(self._df_customers_timeFeature_fileName)\n self.strprint(\"Time features : \"+str(df_customers_timeFeature.shape))\n \n #-------------------------------------------------------------------------\n # NLP features are restored\n #-------------------------------------------------------------------------\n df_customers_nlp = p5_util.object_load(self._df_customers_nlp_fileName)\n self.strprint(\"NLP features : \"+str(df_customers_nlp.shape))\n\n if False:\n df_customers_rfm = self._df_customers_rfm.copy()\n df_customers_timeFeature = self._df_customers_timeFeature.copy()\n df_customers_nlp = self._df_customers_pca_nlp.copy()\n\n #-------------------------------------------------------------------------\n # Dataframe are aggregated; note that indexes are customerID.\n #-------------------------------------------------------------------------\n df_customers = pd.DataFrame()\n\n df_customers = pd.concat([df_customers,df_customers_rfm], axis=1)\n\n df_customers = pd.concat([df_customers,df_customers_timeFeature]\\\n , join='inner', axis=1)\n\n df_customers = pd.concat([df_customers,df_customers_nlp]\\\n , join='inner', axis=1)\n\n self.strprint(\"All features : \"+str(df_customers.shape))\n\n #----------------------------------------------------------------------\n # Dataframe is dumped into a file\n #----------------------------------------------------------------------\n p5_util.object_dump(df_customers, self._df_customers_fileName)\n if False:\n #----------------------------------------------------------------------\n # Dataframe is copied as an attribute\n #----------------------------------------------------------------------\n self._df_customers = df_customers.copy()\n \n return", "def bc_records_dataframe(self, run_idxs):\n\n return pd.DataFrame(self.bc_records(run_idxs))", "def get(self):\n user = get_authenticated_user()\n if not user.stripe_id:\n raise NotFound()\n\n return get_invoices(user.stripe_id)", "def get_customer_segments(self, date):\n date = current_date_to_day().isoformat() if date is None else date\n self.products = pd.merge(self.products,\n self.cs.fetch(start_date=convert_dt_to_day_str(date))[['client', 'segments']],\n on='client', how='left')", "def getInvoice(self):\n return self.base.get(\"invoice\", [])", "def single_customer(customer_name, invoice_file):\n def single_customer_rentals(rental_items):\n add_item = partial(add_furniture, customer_name=customer_name,\n invoice_file=invoice_file)\n with open(rental_items, \"r\") as file:\n for row in csv.reader(file):\n add_item(item_code=row[0], item_description=row[1],\n item_monthly_price=row[2])\n return single_customer_rentals", "def show_all_customers():\n return cr.show_all_customers()", "def extract_table_customer(last_id, db_engine):\n\n if last_id == None:\n last_id = -1\n\n query = \"SELECT * FROM customer WHERE customer_id > {} LIMIT 100000\".format(\n last_id)\n return pd.read_sql(query, db_engine)", "def invoiceitems(self):\r\n return InvoiceItems(self)", "def test_single_customer(_full_invoice):\n\n test_invoice = \"../data/test-invoice.csv\"\n items_to_insert = \"../data/items.csv\"\n csv_contents = []\n\n function = l.single_customer(\"Kyouma Hououin\", test_invoice)\n function(items_to_insert)\n\n with open(test_invoice, \"r\") as csv_file:\n contents = reader(csv_file, delimiter=',')\n for line in contents:\n if line != []:\n csv_contents += [line]\n\n csv_contents += contents\n\n assert _full_invoice == csv_contents", "def draw_header(canvas, invoice):\n\n canvas.setLineWidth(2)\n canvas.line(2 * cm, -4 * cm, 19 * cm, -4 * cm)\n \"\"\" Draws the business address \"\"\"\n business_details = settings.BUSINESS_DETAIL\n business_data = []\n for line in business_details:\n business_data.append([line])\n\n table = Table(business_data, colWidths=[17 * cm], rowHeights=[15, 17, 11, 11, 11, 11, 11])\n table.setStyle([\n ('FONT', (0, 0), (-1, -1), 'Helvetica-Oblique'),\n ('FONTSIZE', (0, 0), (0, 0), 14),\n ('FONTSIZE', (0, 1), (0, -1), 6),\n ('TEXTCOLOR', (0, 0), (-1, -1), (0.2, 0.2, 0.2)),\n ('ALIGN', (0, 0), (-1, -1), 'CENTER'),\n ('BACKGROUND', (0, 0), (-1, -1), (0.95, 0.95,0.95)),\n ])\n tw, th, = table.wrapOn(canvas, 2 * cm, 19 * cm)\n table.drawOn(canvas, 2 * cm, -4 * cm)", "def get_invoices(\n page_number=None,\n limit=10,\n filter_by=None,\n filter_value=None,\n order_by=None\n):\n where_filter = \"\"\n \n valid_fields = {\n 'month': 'ReferenceMonth',\n 'year': 'ReferenceYear',\n 'document': 'Document'\n }\n\n ob_fields = []\n for ob in order_by:\n field = valid_fields.get(ob)\n if field:\n ob_fields.append(field)\n\n order_by = ob_fields\n\n if not order_by:\n order_by = [\"CreatedAt\"]\n\n order_by = [ob + \" ASC\" for ob in order_by]\n order_by = \", \".join(order_by)\n\n if page_number:\n pagination_filter = f\"\"\"\n id NOT IN (\n SELECT\n id\n FROM\n invoice\n ORDER BY\n {order_by} LIMIT {limit*(page_number-1)} )\n \"\"\"\n where_filter += \"AND \" + pagination_filter\n order_by += f\" LIMIT {limit}\"\n \n if filter_by and filter_value:\n if filter_by == 'document':\n filter_value = f'\"{filter_value}\"'\n\n where_filter += f\"AND {filter_by} = {filter_value}\"\n \n query = f\"\"\"\n SELECT\n id,\n ReferenceMonth AS month,\n ReferenceYear AS year,\n Document AS document,\n Description AS description,\n Amount AS amount\n FROM\n invoice\n WHERE\n IsActive = 1\n {where_filter}\n ORDER BY\n {order_by}\n ;\n \"\"\"\n\n try:\n conn = sqlite3.connect(DATABASE)\n conn.row_factory = dict_factory\n cursor = conn.cursor()\n cursor.execute(query)\n result = cursor.fetchall()\n return result, True\n except:\n return [], False", "def _prepare_invoice_lines(self, exchange_line, order_line):\n invoice_type = {\n 'sale.order.line': {\n 'higher': 'out_invoice', 'lower': 'out_refund',\n 'type': 'sale', 'field': 'exchange_sale_line_id'\n },\n 'purchase.order.line': {\n 'higher': 'in_invoice', 'lower': 'in_refund',\n 'type': 'purchase', 'field': 'exchange_purchase_line_id'\n },\n }\n product = exchange_line.exchange_product_id or exchange_line.product_id\n data = {\n 'invoice_type': False,\n 'values': {\n 'product_id': product.id,\n 'quantity': exchange_line.quantity,\n 'name': 'Exchange for [%s]' % exchange_line.product_id.display_name,\n }\n }\n if exchange_line.exchange_product_id or \\\n exchange_line.price_subtotal > order_line.price_subtotal:\n data['invoice_type'] = invoice_type[order_line._name]['higher']\n elif exchange_line.price_subtotal < order_line.price_subtotal:\n data['invoice_type'] = invoice_type[order_line._name]['lower']\n else:\n return {}\n data[invoice_type[order_line._name]['type']] = order_line.order_id\n data['values'][invoice_type[order_line._name]['field']] = order_line.id\n data['values']['price_unit'] = exchange_line.price_unit\n # TODO i think we should take the different between prices NOT the all price\n # abs(exchange_line.price_unit - order_line.price_unit)\n return data", "def print_customers(self):\n output = ''\n for i in range(len(self.customers)):\n output += f'Customer no. {self.customers[i].id} is in {self.customers[i].state[0]} section\\n'\n #print(output)\n with open('oneday.csv','a') as outfile:\n for i in range(len(self.customers)):\n outfile.write(f'{self.get_time()};{self.customers[i].id};{self.customers[i].state[0]}\\n')", "def create_data():\n data_set = pd.DataFrame()\n customer_id = list()\n for i in range(1, 10001):\n customer_id.append(i)\n data_set = pd.DataFrame()\n data_set.loc[:, 'customer_id'] = np.array(customer_id)\n product_name = ('dining chair', 'dining table', 'bed', 'dining set',\n 'stool', 'couch', 'occasional table',\n 'recliner')\n product_name_random = random.choices(product_name, k=10000)\n data_set.loc[:, 'product_name'] = np.array(product_name_random)\n quantity_rented = (1, 2, 3, 4)\n quantity_rented_random = random.choices(quantity_rented, k=10000)\n data_set.loc[:, 'quantity_rented'] = np.array(quantity_rented_random)\n unit_rental_price_monthly = list()\n for i in range(0, 10000):\n unit_rental_price_monthly.append(random.uniform(1.5, 25))\n data_set.loc[:, 'unit_rental_price'] = np.array(unit_rental_price_monthly)\n rental_period_months = list()\n for i in range(0, 10000):\n rental_period_months.append(randint(6, 60))\n data_set.loc[:, 'rental_period_months'] = np.array(rental_period_months)\n return data_set", "def df_customers_features_build(self):\n\n df_customers_rfm = self._df_customers_rfm.copy()\n df_customers_timeFeature = self._df_customers_timeFeature.copy()\n df_customers_nlp = self._df_customers_pca_nlp.copy()\n\n #-------------------------------------------------------------------------\n # Dataframe are aggregated; note that indexes are customerID.\n #-------------------------------------------------------------------------\n df_customers = pd.DataFrame()\n\n df_customers = pd.concat([df_customers,df_customers_rfm], axis=1)\n\n df_customers = pd.concat([df_customers,df_customers_timeFeature]\\\n , join='inner', axis=1)\n\n df_customers = pd.concat([df_customers,df_customers_nlp]\\\n , join='inner', axis=1)\n \n self.strprint(\"All features : \"+str(df_customers.shape))\n self._df_customers = df_customers.copy()\n return", "def get_customers(filters):\n\treturn frappe.db.sql(\"\"\"\n\t\tSELECT\n\n\t\t\tpar.debtor_creditor_number as 'Konto',\n\t\t\tCASE cus.customer_type\n\t\t\t\tWHEN 'Company' THEN cus.customer_name\n\t\t\t\tELSE null\n\t\t\t\tEND as 'Name (Adressatentyp Unternehmen)',\n\t\t\tCASE cus.customer_type\n\t\t\t\tWHEN 'Individual' THEN TRIM(SUBSTR(cus.customer_name, LOCATE(' ', cus.customer_name)))\n\t\t\t\tELSE null\n\t\t\t\tEND as 'Name (Adressatentyp natürl. Person)',\n\t\t\tCASE cus.customer_type\n\t\t\t\tWHEN 'Individual' THEN SUBSTRING_INDEX(SUBSTRING_INDEX(cus.customer_name, ' ', 1), ' ', -1)\n\t\t\t\tELSE null\n\t\t\t\tEND as 'Vorname (Adressatentyp natürl. Person)',\n\t\t\tCASE cus.customer_type\n\t\t\t\tWHEN 'Individual' THEN '1'\n\t\t\t\tWHEN 'Company' THEN '2'\n\t\t\t\tELSE '0'\n\t\t\t\tEND as 'Adressatentyp',\n\t\t\tadr.address_line1 as 'Straße',\n\t\t\tadr.pincode as 'Postleitzahl',\n\t\t\tadr.city as 'Ort',\n\t\t\tUPPER(country.code) as 'Land',\n\t\t\tadr.address_line2 as 'Adresszusatz',\n\t\t\tadr.email_id as 'E-Mail',\n\t\t\tadr.phone as 'Telefon',\n\t\t\tadr.fax as 'Fax',\n\t\t\tcus.website as 'Internet',\n\t\t\tcus.tax_id as 'Steuernummer'\n\n\t\tFROM `tabCustomer` cus\n\n\t\t\tleft join `tabParty Account` par\n\t\t\ton par.parent = cus.name\n\t\t\tand par.parenttype = 'Customer'\n\t\t\tand par.company = %(company)s\n\n\t\t\tleft join `tabDynamic Link` dyn_adr\n\t\t\ton dyn_adr.link_name = cus.name\n\t\t\tand dyn_adr.link_doctype = 'Customer'\n\t\t\tand dyn_adr.parenttype = 'Address'\n\n\t\t\tleft join `tabAddress` adr\n\t\t\ton adr.name = dyn_adr.parent\n\t\t\tand adr.is_primary_address = '1'\n\n\t\t\tleft join `tabCountry` country\n\t\t\ton country.name = adr.country\n\n\t\tWHERE adr.is_primary_address = '1'\n\t\t\"\"\", filters, as_dict=1)", "def fetch_customer_info_identities(self, client_id):\n\n try:\n return self._make_private_api_request(\n method=PyttributionIo.GET_REQUEST,\n endpoint='customers',\n subject_id=client_id,\n show_identities='true'\n ).get('customer')\n except RequestException as e:\n logger.error('Pyttribution.io: Retrieval of customer identities failed with HTTP status {exception}'.format(\n exception=e))", "def get_customer_info(self, customer_id):\n #requested_customer_RDD = self.sc.parallelize(movie_ids).map(lambda x: (user_id, x))\n # Get predicted ratings\n customers = self.__get_customers(customer_id).collect()\n\n return customers", "def get_invoiceitems(self):\n from invoicing.models import InvoiceItem\n\n invoiceitem_list = []\n # First we get all the product invoiceitems\n for product in self.products: # TODO: SOLVE BUNDLED PRODUCTS!\n item = InvoiceItem()\n # Get the copies for this product, when used on with_copies\n item.copies = product[1]\n # Add the amount of frequency if necessary\n frequency_extra = (\n _(\" {} months\".format(self.frequency)) if self.frequency > 1 else \"\"\n )\n item.description = product[0].name + frequency_extra\n item.price = product[0].price * self.frequency\n item.amount = item.price * item.copies\n item.product = product[0]\n item.subscription = self\n # TODO: Service from, service to\n invoiceitem_list.append(item)\n\n # Next, we append all discount invoiceitems\n for discount in self.get_discounts():\n discount_item = InvoiceItem()\n # Add the amount of frequency if necessary\n frequency_extra = (\n _(\" {} months\".format(self.frequency)) if self.frequency > 1 else \"\"\n )\n discount_item.description = discount[\"description\"] + frequency_extra\n discount_item.amount = discount[\"amount\"] * self.frequency\n discount_item.type_dr = discount[\"type_dr\"]\n discount_item.type = discount[\"type\"]\n discount_item.subscription = self\n invoiceitem_list.append(discount_item)\n\n return invoiceitem_list", "def show_table(table, has_customer_id=True):\n titles = [\"ID\", \"Title\", \"Price\", \"Date\"]\n if has_customer_id:\n titles.append(\"Customer ID\")\n output_table = [[row[ID], row[TITLE], row[PRICE],\n '/'.join((str(row[YEAR]), str(row[MONTH]), str(row[DAY]))), row[CUSTOMER_ID]] for row in table]\n else:\n output_table = [[row[ID], row[TITLE], row[PRICE],\n '/'.join((str(row[YEAR]), str(row[MONTH]), str(row[DAY])))] for row in table]\n\n ui.clear_scr()\n ui.print_table(output_table, titles, TITLE)", "def merge_purchase_invoice(self):\r\n active_id = self.env['purchase.order'].browse(self.env['purchase.order']._context.get('active_ids'))\r\n journal_id = self.env['account.journal'].search([('type', '=', 'purchase')]) \r\n active_id_count = 0\r\n active_count = 0\r\n exist_vendor = []; invoice = [];exist_vendors = [];ctx = ();invoice_id = []\r\n for rec in active_id : \r\n po_reference = self.env['account.invoice'].search([('origin', 'like', rec.name)])\r\n active_count = len(active_id)\r\n if rec.picking_count >= 1 and rec.picking_count != rec.invoice_count:\r\n len_name = [] \r\n for inv in po_reference: \r\n len_name = inv.origin.split(\":\") \r\n if rec.name in len_name:\r\n if po_reference.state == 'draft':\r\n for record in po_reference.invoice_line_ids:\r\n print (record.line_id)\r\n for res in rec.order_line:\r\n if res.id == record.line_id: \r\n record.write({'quantity':res.qty_received})\r\n res.write({'qty_invoiced':record.quantity})\r\n \r\n else:\r\n \r\n po_list = [];line_values = {};lines = {};purchase = []\r\n if rec.state in 'purchase' and rec.invoice_status in 'to invoice':\r\n purchase.append(rec.id)\r\n active_id_count = len(purchase)\r\n if rec.partner_id.id in exist_vendor:\r\n for inv in invoice:\r\n if inv['partner_id'] == rec.partner_id.id:\r\n for recc in rec.order_line:\r\n if rec.picking_count > 1 and rec.invoice_count >= 1:\r\n qty_received = recc.qty_received - recc.qty_invoiced \r\n else:\r\n qty_received = recc.qty_received \r\n line_values = (0, 0, {'product_id': recc.product_id.id,\r\n 'quantity': qty_received ,\r\n 'price_unit': recc.price_unit,\r\n 'invoice_line_tax_ids': [(6, 0, recc.taxes_id and recc.taxes_id.ids) or False] ,\r\n 'price_subtotal': recc.price_subtotal,\r\n 'product_uom': recc.product_uom.id,\r\n 'name': recc.name,\r\n 'account_id': journal_id.default_debit_account_id.id ,\r\n 'line_id':recc.id\r\n }) \r\n inv['invoice_line_ids'].append(line_values)\r\n inv['origin'] = inv['origin'] + ':' + rec.name\r\n if rec.partner_id.id not in exist_vendor:\r\n exist_vendors.append(rec.partner_id.id) \r\n else: \r\n for recc in rec.order_line:\r\n if rec.picking_count > 1 and rec.invoice_count >= 1:\r\n qty_received = recc.qty_received - recc.qty_invoiced \r\n else:\r\n qty_received = recc.qty_received\r\n line_values = (0, 0, {'product_id': recc.product_id.id,\r\n 'quantity': qty_received,\r\n 'price_unit': recc.price_unit,\r\n 'invoice_line_tax_ids': [(6, 0, recc.taxes_id and recc.taxes_id.ids)or False],\r\n 'price_subtotal': recc.price_subtotal,\r\n 'product_uom': recc.product_uom.id,\r\n 'name': recc.name,\r\n 'account_id': journal_id.default_debit_account_id.id,\r\n 'line_id':recc.id\r\n }) \r\n print (rec.id)\r\n po_list.append(line_values) \r\n invoice.append({'origin':rec.name, 'partner_id': rec.partner_id.id, 'invoice_line_ids':po_list, 'account_id': rec.partner_id.property_account_payable_id.id, 'type': 'in_invoice', 'journal_id':journal_id.id,'date_invoice':datetime.today()}) \r\n if rec.partner_id.id not in exist_vendor:\r\n exist_vendor.append(rec.partner_id.id) \r\n \r\n else:\r\n po_list = [];line_values = {};lines = {};purchase = []\r\n if rec.state in 'purchase' and rec.invoice_status in 'to invoice':\r\n purchase.append(rec.id)\r\n active_id_count = len(purchase)\r\n if rec.partner_id.id in exist_vendor:\r\n for inv in invoice:\r\n if inv['partner_id'] == rec.partner_id.id:\r\n for recc in rec.order_line:\r\n if rec.picking_count > 1 and rec.invoice_count >= 1:\r\n qty_received = recc.qty_received - recc.qty_invoiced \r\n else:\r\n qty_received = recc.qty_received\r\n line_values = (0, 0, {'product_id': recc.product_id.id,\r\n 'quantity': qty_received ,\r\n 'price_unit': recc.price_unit,\r\n 'invoice_line_tax_ids': [(6, 0, recc.taxes_id and recc.taxes_id.ids) or False] ,\r\n 'price_subtotal': recc.price_subtotal,\r\n 'product_uom': recc.product_uom.id,\r\n 'name': recc.name,\r\n 'account_id': journal_id.default_debit_account_id.id ,\r\n 'line_id':recc.id\r\n }) \r\n inv['invoice_line_ids'].append(line_values)\r\n inv['origin'] = inv['origin'] + ':' + rec.name\r\n if rec.partner_id.id not in exist_vendor:\r\n exist_vendors.append(rec.partner_id.id) \r\n else: \r\n for recc in rec.order_line:\r\n if rec.picking_count > 1 and rec.invoice_count >= 1:\r\n qty_received = recc.qty_received - recc.qty_invoiced \r\n else:\r\n qty_received = recc.qty_received\r\n line_values = (0, 0, {'product_id': recc.product_id.id,\r\n 'quantity': qty_received,\r\n 'price_unit': recc.price_unit,\r\n 'invoice_line_tax_ids': [(6, 0, recc.taxes_id and recc.taxes_id.ids)or False],\r\n 'price_subtotal': recc.price_subtotal,\r\n 'product_uom': recc.product_uom.id,\r\n 'name': recc.name,\r\n 'account_id': journal_id.default_debit_account_id.id,\r\n 'line_id':recc.id\r\n }) \r\n print (rec.id)\r\n po_list.append(line_values) \r\n invoice.append({'origin':rec.name, 'partner_id': rec.partner_id.id, 'invoice_line_ids':po_list, 'account_id': rec.partner_id.property_account_payable_id.id, 'type': 'in_invoice', 'journal_id':journal_id.id,'date_invoice':date.today()}) \r\n if rec.partner_id.id not in exist_vendor:\r\n exist_vendor.append(rec.partner_id.id) \r\n \r\n invoices = []\r\n invoice_counts = 0\r\n for record in invoice:\r\n invoice_id = self.env['account.invoice'].create(record)\r\n invoices.append(invoice_id.id)\r\n invoice_counts = len(invoices)\r\n if active_id_count == 1:\r\n if invoice_counts == 1:\r\n form_view = self.env.ref('purchase.view_invoice_supplier_purchase_form').id\r\n tree_view = self.env.ref('account.invoice_tree').id \r\n return{\r\n 'name': _('Invoice'),\r\n 'type':'ir.actions.act_window',\r\n 'view_type':'form',\r\n 'view_mode':'form,tree',\r\n 'res_model':'account.invoice',\r\n 'res_id':invoices[0],\r\n 'views_id':False,\r\n 'views':[(form_view , 'form'), (tree_view , 'tree')],\r\n 'domain':[('id', 'in', invoices)],\r\n 'target': 'current',\r\n } \r\n else: \r\n form_view = self.env.ref('account.invoice_supplier_form').id\r\n tree_view = self.env.ref('account.invoice_supplier_tree').id \r\n return{\r\n 'name': _('Invoice'),\r\n 'type':'ir.actions.act_window',\r\n 'view_type':'form',\r\n 'view_mode':'form,tree',\r\n 'res_model':'account.invoice',\r\n 'views_id':True,\r\n 'views':[(tree_view , 'tree'), (form_view , 'form')],\r\n 'domain':[('id', 'in', invoices)],\r\n 'target': 'current',\r\n }", "def top_ten_customers(data_frame):\n data_frame.loc[:, 'total_payment'] = (data_frame['unit_rental_price']\n * data_frame['quantity_rented']\n * data_frame['rental_period_months'])\n data_set = data_frame.groupby(['customer_id']).agg({'total_payment': 'sum'})\n data_set = data_set.nlargest(10, 'total_payment')\n return data_set", "def get_customer_data(self, customer_id):\n customer_url = urllib.parse.urljoin(self.base_url,\n \"/api/v1/pgecustomers/{}\".format(customer_id))\n response = requests.get(customer_url, headers=self.headers)\n\n if response.status_code == 200:\n return response.json()\n else:\n raise ValueError(response)", "def display_customers_list():\n selected_path = request.url_rule\n selected_customer_list = get_selected_path(selected_path)\n page_number = 0\n if 'page' in request.args:\n page_number = int(request.args.get('page'))\n customers, pagination = generate_pagination(page_number, selected_customer_list)\n total_number_of_records = get_number_of_records(selected_customer_list)\n return render_template(\"clients.html\", domain_name= DOMAIN_NAME, selected_customer_list=selected_customer_list, customers=customers, pagination=pagination, url_path=selected_path, total_number_of_records=total_number_of_records, phone_error=None)", "def _generate_invoice_report(self, request, queryset):\n logger.info('Generating invoice report for model {}'.format(\n queryset.model\n ))\n data = self._get_report_data(request, queryset)\n content = self._get_pdf_content(data)\n file_name = '{}-{}.pdf'.format(\n self._invoice_report_name, data['id'],\n )\n return generate_pdf_response(content, file_name)", "def get_mall_data(): \n filename = 'mall_customers.csv'\n \n if os.path.isfile(filename):\n return pd.read_csv(filename, index_col=0)\n else: \n df = pd.read_sql(\"\"\"select * from customers\"\"\", get_connection('mall_customers'))\n df.to_csv(filename)\n return df", "def get_customer_viewmodel(self, customer_id):\n credit_card_list = []\n customers = self._customer_repo.get_customer_list()\n credit_cards = self._customer_repo.get_credit_card_list()\n for customer in customers:\n if customer.get_customer_id() == customer_id:\n customer_first_name = customer.get_first_name()\n customer_last_name = customer.get_last_name()\n country = customer.get_country()\n for credit_card in credit_cards:\n if credit_card.get_customer_id() == customer_id:\n credit_card_list.append(credit_card.get_card_number())\n customer_to_view = CustomerViewModel(customer_id, customer_first_name,\n customer_last_name, country, credit_card_list)\n return customer_to_view", "def get_billing_items(self, identifier):\n\n mask = \"\"\"mask[\n id, description, hostName, domainName, oneTimeAfterTaxAmount, recurringAfterTaxAmount, createDate,\n categoryCode,\n category[name],\n location[name],\n children[id, category[name], description, oneTimeAfterTaxAmount, recurringAfterTaxAmount]\n ]\"\"\"\n return self.client.call(\n 'Billing_Invoice',\n 'getInvoiceTopLevelItems',\n id=identifier,\n mask=mask,\n iter=True,\n limit=100\n )", "def clean_customer_df(customer_df: pd.DataFrame) -> pd.DataFrame:\n \n # remove customers with more than 20 purchases\n if 'frequency' in customer_df.columns:\n customer_df = customer_df[customer_df.frequency < 20]\n \n return customer_df", "def from_invoice_and_line_item(cls, invoice: InvoiceModel, line_item: LineItemModel, line_number: int,\n distribution: str):\n # Note the invoice_date should be the payment_date in the future.\n return cls(total=line_item.total, invoice_number=invoice.id,\n line_number=line_number,\n is_reversal=invoice.invoice_status_code in\n [InvoiceStatus.REFUNDED.value, InvoiceStatus.REFUND_REQUESTED.value],\n distribution=distribution)", "def load_customers(dir):\n customSchema = StructType([ \\\n StructField(\"customerId2\", IntegerType(), True), \\\n StructField(\"churnlabel\", IntegerType(), True), \\\n StructField(\"gender\", StringType(), True), \\\n StructField(\"shippingCountry\", StringType(), True), \\\n StructField(\"dateCreated\", StringType(), True), \\\n StructField(\"yearOfBirth\", IntegerType(), True), \\\n StructField(\"premier\", IntegerType(), True)])\n\n df = sqlContext.read.format('com.databricks.spark.csv') \\\n .options(header='false', delimiter='\\t', nullValue='\\\\N') \\\n .load(get_dir_customers(dir) + '/*', schema=customSchema)\n\n return df", "def _prepare_invoice(self, cr, uid, order, lines, context=None):\n invoice_vals = super(my_sale_order, self)._prepare_invoice(cr, uid, order,\n lines, context)\n\n invoice_vals.update({\n 'partner_shipping_id': order.partner_shipping_id.id,\n })\n\n # Care for deprecated _inv_get() hook - FIXME: to be removed after 6.1\n invoice_vals.update(self._inv_get(cr, uid, order, context=context))\n\n return invoice_vals", "def landing(request):\n\n data = Invoice.objects.all().order_by(\"-number\")[:10]\n\n return render(request,\n \"invoice/invoice_landing.html\",\n {\n \"invoices\": data\n })", "def get_flat_file_data(kind: str, server: str='PROD', ID: str='42') -> DataFrame:\r\n k = {\r\n 'c': 'customer_data_{0}_{1}_.csv',\r\n 'b': 'vendor_data_{0}_{1}_.csv'\r\n }\r\n f = k[kind].format(server, ID)\r\n df = pd.read_csv(f'{BASE_DIR}/{f}', encoding='UTF-8')\r\n df = prepare_input_df(df)\r\n return df", "def stripe_invoices(self, stripe_invoices):\n\n self._stripe_invoices = stripe_invoices", "def upcoming(cls, customer_id):\n invoice = PaymentInvoice.upcoming(customer_id)\n\n return Invoice.parse_from_api(invoice)", "def return_orders():\n with MY_CONNECTION as connection:\n cursor = connection.cursor()\n cursor.execute(\n \"\"\"\n SELECT id_order, id_customer, id_product, quantity, total_price,\n payment_status, send_status, order_date, location\n FROM Orders\n \"\"\")\n records = cursor.fetchall()\n return records", "def run_contig_records_dataframe(self, run_idxs, run_record_key):\n records = self.run_contig_records(run_idxs, run_record_key)\n return pd.DataFrame(records)", "def search_customer(cust_id):\n query = (Customer\n .select(Customer.customer_id, Customer.name, Customer.lastname,\n Customer.email, Customer.phone_number,\n Customer.home_address, Customer.status,\n Customer.credit_limit)\n .where(Customer.customer_id == cust_id))\n result = {}\n for person in query:\n result[\"customer_id\"] = person.customer_id\n result[\"name\"] = person.name\n result[\"lastname\"] = person.lastname\n result[\"email\"] = person.email\n result[\"phone_number\"] = person.phone_number\n result[\"home_address\"] = person.home_address\n result[\"status\"] = person.status\n result[\"credit_limit\"] = person.credit_limit\n return result", "def view_budget_lines(self, cr, uid, ids, context=None):\n ctx = context.copy()\n ctx['default_line_id'] = ids[0]\n ctx['allow_create'] = True\n # Return view with budget lines\n return {\n 'name': _('Budget lines'),\n 'domain': \"[('line_id', 'in', %s)]\" % ids,\n 'view_type': 'form',\n 'view_mode': 'tree',\n 'res_model': 'account.balance.reporting.template.line.budget',\n 'context': ctx,\n 'type': 'ir.actions.act_window',\n }", "def get_all_customer_ids_from_table(table):\n return {row[CUSTOMER_ID] for row in table}", "def get_latest_invoice(self) -> CreditorInvoice:\n\n LOGGER.info(\"Getting latest invoice from EON Romania\")\n\n session = http.create_session()\n\n response = session.get(self._login_page_url)\n if response.status_code != 200:\n raise self.Error(\"Login page is not functioning\")\n\n soup = bs4.BeautifulSoup(response.content, \"html.parser\")\n csrf_token_elem = soup.find(\"input\", {\"name\": \"_csrf_token\"})\n if not csrf_token_elem:\n raise self.Error(\"Could not extract CSRF token\")\n\n login_data = {\n \"_username\": self._email,\n \"_password\": self._password,\n \"_csrf_token\": csrf_token_elem.get(\"value\"),\n }\n\n if session.post(self._login_url, login_data).status_code != 200:\n raise self.AuthError()\n\n response = session.get(self._invoices_url)\n soup = bs4.BeautifulSoup(response.content, \"html.parser\")\n\n if not soup.select_one(self._selectors.sidebar):\n raise self.AuthError()\n\n invoice_date_elem = soup.select_one(self._selectors.invoice_date)\n if not invoice_date_elem:\n raise self.Error(\"Failed to get invoice date\")\n\n invoice_due_date_elem = soup.select_one(self._selectors.invoice_due_date)\n if not invoice_due_date_elem:\n raise self.Error(\"Failed to get invoice due date\")\n\n invoice_payment_status_elem = soup.select_one(\n self._selectors.invoice_payment_status\n )\n if not invoice_payment_status_elem:\n raise self.Error(\"Failed to get invoice payment status\")\n\n invoice_amount_elem = soup.select_one(self._selectors.invoice_amount)\n if not invoice_amount_elem:\n raise self.Error(\"Failed to get invoice amount\")\n\n invoice_date = invoice_date_elem.contents[-1]\n invoice_due_date = invoice_due_date_elem.contents[-1]\n invoice_payment_status = invoice_payment_status_elem.contents[-1]\n invoice_amount = invoice_amount_elem.contents[-1]\n\n invoice = CreditorInvoice(\n float(invoice_amount.replace(\",\", \".\")),\n Currency.RON,\n datetime.strptime(invoice_date, \"%d.%m.%Y\"),\n datetime.strptime(invoice_due_date, \"%d.%m.%Y\"),\n PaymentStatus.PAID_CONFIRMED\n if invoice_payment_status == \"0.00\"\n else PaymentStatus.UNPAID,\n )\n\n LOGGER.info(\"Found latest EON Romania invoice\", invoice=invoice)\n return invoice", "def invoice(self, reference_no=None, with_vat=True):\n\n return self.invoice_class(apiobj=self, reference_no=reference_no)", "def customer(self, id):\r\n return Customer(self, id)", "def _prepare_invoice_line(self, qty):\n self.ensure_one()\n res = {\n 'name': self.name,\n 'sequence': self.sequence,\n 'origin': self.order_id.name,\n 'account_id': self.product_id.product_tmpl_id._get_product_accounts()['stock_input'].id,\n 'price_unit': self.price_unit,\n 'quantity': qty,\n 'uom_id': self.product_uom.id,\n 'product_id': self.product_id.id or False,\n 'invoice_line_tax_ids': [(6, 0, self.taxes_id.ids)],\n 'account_analytic_id': self.account_analytic_id.id,\n 'analytic_tag_ids': [(6, 0, self.analytic_tag_ids.ids)],\n }\n return res", "def transactions_df():\n return pd.DataFrame(\n {\n \"user_id\": [1, 1, 1, 2, 2, 2, 3, 3, 3],\n \"item_id\": [11, 22, 22, 11, 22, 33, 33, 33, 44],\n \"amount\": [10, 20, 30, 40, 50, 60, 70, 80, 90],\n }\n )", "def build_invoice(payment_object):\n # Fill html template with the domain orders and user profile info\n html_template = get_template('billing/billing_invoice.html')\n rendered_html = html_template.render({\n 'payment': payment_object,\n 'user_profile': payment_object.owner.profile,\n })\n # Create pdf file from a html file\n pdfkit.from_string(rendered_html, '/tmp/out.pdf')\n with open(\"/tmp/out.pdf\", \"rb\") as pdf_file:\n pdf_raw = pdf_file.read()\n os.remove(\"/tmp/out.pdf\")\n return {\n 'body': pdf_raw,\n 'filename': 'invoice_{}.pdf'.format(payment_object.transaction_id),\n }", "def get_ARNA_flight_log_as_df():\n flight_nums = [\n # 216,\n 217, 218, 219, 220, 221, 222, 223, 224, 225\n ]\n flight_IDs = ['C{}'.format(i) for i in flight_nums]\n dfs = []\n for flight_ID in flight_IDs:\n dfs += [get_summary4flight(flight_ID=flight_ID)]\n # Combine and return as a single dataframe sorted by time\n df = pd.concat(dfs)\n df = df.sort_index()\n return df", "def get_customer_count(self):\n return self._df_invoice_original.CustomerID.unique().shape[0]", "def predict_segment(self, df_invoice_line=None):\n if df_invoice_line is not None:\n self.data_transform(df_invoice_line) \n self.df_customers_features_build() \n else:\n pass\n X_test = self._df_customers.values\n y_pred = self._classifier_model.predict(X_test)\n return y_pred[0]" ]
[ "0.6706535", "0.637564", "0.6069902", "0.59014344", "0.58117783", "0.5807765", "0.57650644", "0.56994027", "0.5648026", "0.5595924", "0.5531552", "0.55192786", "0.54935724", "0.54334897", "0.5429923", "0.5344283", "0.53376174", "0.5330216", "0.532311", "0.5300056", "0.5274764", "0.52722394", "0.5269863", "0.5219844", "0.521598", "0.52118987", "0.5197928", "0.5197842", "0.51852095", "0.5170273", "0.516928", "0.5154288", "0.5143536", "0.5134455", "0.5126698", "0.51252824", "0.51150334", "0.503439", "0.50236344", "0.5015868", "0.4992553", "0.4989618", "0.49772316", "0.49752775", "0.49654034", "0.49631137", "0.49570814", "0.49528533", "0.49473453", "0.49449715", "0.49375638", "0.49341756", "0.49199423", "0.49108192", "0.48983532", "0.4893141", "0.48883817", "0.48743278", "0.4873027", "0.48677635", "0.48666134", "0.48421705", "0.48345634", "0.48243216", "0.48005515", "0.47952944", "0.47876883", "0.47873965", "0.4780031", "0.47785765", "0.47675258", "0.47567818", "0.47465554", "0.47415647", "0.47356957", "0.47343823", "0.4732808", "0.47262654", "0.47257543", "0.47123232", "0.4710476", "0.4701827", "0.46935883", "0.46840206", "0.46823874", "0.46802357", "0.46786046", "0.4677307", "0.4667701", "0.46603394", "0.46563148", "0.4650109", "0.464094", "0.46402997", "0.46371892", "0.46364066", "0.46287006", "0.462498", "0.45975658", "0.45913565" ]
0.76564085
0
Returns a list of customers that have been excluded of data sampling used for building model. By default, 10 customers identifier is returned. If customerCount value is None, or <= 0, then list of all customers that have been excluded of data sampling is returned.
def get_listCustomer_out_sample(self, customerCount=10): if customerCount is None : listCustomer= list(self._df_invoice_line_out_sample.CustomerID.unique()) else: if customerCount <= 0 : listCustomer \ = list(self._df_invoice_line_out_sample.CustomerID.unique()) else: listCustomer \ = list(self._df_invoice_line_out_sample.CustomerID.unique()[:customerCount]) return listCustomer
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_all_customers_count():\n data = user_obj.get_all_customers(\"1\")\n return data", "def pdelements_num_customers(self) -> int:\n return self.dss_obj.PDElementsI(ctypes.c_int32(4), ctypes.c_int32(0))", "def get_all_customer_ids():\n table = data_manager.get_table_from_file(\"sales/sales.csv\")\n return get_all_customer_ids_from_table(table)", "def get_all_customers():\n data = user_obj.get_all_customers()\n return data", "def get_all_customer_ids():\n\n # your code", "def pdelements_total_customers(self) -> int:\n return self.dss_obj.PDElementsI(ctypes.c_int32(5), ctypes.c_int32(0))", "def show_all_customers():\n return cr.show_all_customers()", "def get_all_customers_not_purchasing_next_quarter(self) -> list:\n\n #Check if predictions can be made\n if self._load_model:\n y_labels = self._predict_labels()\n return y_labels[y_labels == 0].index.tolist()\n\n else:\n raise NoTrainedModelError('There is no trained model to make predictions with, please call initialize_purchase_predictor() first or set load_existing_model to True.')", "def get_customers(self):\n self.navigate_to_page()\n customer_list=[]\n while True:\n page_customer = [{\n 'name': self.get_name(customer), \n 'parent':self.get_parent(customer),\n 'active':self.get_active(customer),\n 'link':self.get_details_link(customer)\n } for customer in self.get_page_customers()]\n customer_list = page_customer + customer_list\n if not CustomerGroupsPage.have_next_page(self):\n break\n self.navigate_to_page()\n return customer_list", "def get_customer_list(self):\n return self._customer_repo.get_customer_list()", "def get(self):\n return get_all_customers()", "def get_all_sales_ids_for_customer_ids():\n\n # your code", "def get_page_customers(self):\n return self.driver.find_elements(*CustomerGroupsPage.CUSTOMER_GROUP)", "def users_excludeds(self) -> pulumi.Output[Optional[Sequence[str]]]:\n return pulumi.get(self, \"users_excludeds\")", "def get_excluded_observations(self):\n\n return copy.deepcopy(self._excluded_observations)", "def list_active_customers():\n return Customer.select().where(Customer.is_active).count()", "def get_customer_count(self):\n return self._df_invoice_original.CustomerID.unique().shape[0]", "def get_unselected_benefits(cls, excluded_benefits):\n benefits = cls.query.filter(cls.id.notin_(excluded_benefits))\n return [benefit.serialize() for benefit in benefits]", "def excluded(cls):\n return []", "def list_active_customers():\n count_query = (Customer\n .select(Customer, fn.COUNT(Customer.name)\n .alias('cust_count'))\n .where(Customer.status == 'active'))\n for count in count_query:\n return count.cust_count", "def generateCustomers(self):\r\n\r\n # Counters\r\n shoppers = 0\r\n models = 0\r\n oldl = 0\r\n oldf = 0\r\n doctor = 0\r\n nudist = 0\r\n hippie = 0\r\n nerd = 0\r\n\r\n for i in range(self.num_of_customers):\r\n\r\n # With these weights, our store has plenty of youngs and olds, but few mids\r\n # Most grocery shoppers come in the evening\r\n # Young people have equal distribution between morning and evening\r\n # etc\r\n age1 = random.randint(18, 28)\r\n age2 = random.randint(28, 50)\r\n age3 = random.randint(50, 85)\r\n weighted_ages = [(age1, 10), (age2, 2), (age3, 15)]\r\n randomAge = [val for val, cnt in weighted_ages for a in range(cnt)]\r\n\r\n hour1 = random.randint(8, 13)\r\n hour2 = random.randint(13, 18)\r\n hour3 = random.randint(18, 22)\r\n weighted_hours = [(hour1, 10), (hour2, 3), (hour3, 20)]\r\n randomHour = [val for val, cnt in weighted_hours for b in range(cnt)]\r\n\r\n age = random.choice(randomAge)\r\n hour = random.choice(randomHour)\r\n gender = random.choice(['M', 'M', 'M', 'M', 'F', 'F', 'F', 'F', 'F', 'F', 'F', 'F'])\r\n\r\n # Base chances, 100 total\r\n gs, sm, hp, ol, nrd, of, sd, nud = 20, 5, 5, 5, 5, 5, 10, 10\r\n\r\n customerID = random.randint(0, self.num_of_customers*2)\r\n while customerID in self.all_customers:\r\n customerID = random.randint(0, self.num_of_customers*2)\r\n\r\n # Weights\r\n if 18 < age < 22:\r\n if gender == 'M':\r\n if 8 <= hour <= 12:\r\n ol, sm, nrd, hp = 2, 2, 35, 20\r\n elif 13 <= hour <= 17:\r\n ol, sm, nrd, hp, gs = 2, 2, 15, 30, 5\r\n elif 18 <= hour <= 22:\r\n ol, sm, gs = 2, 2, 50\r\n\r\n elif gender == 'F':\r\n if 8 <= hour <= 12:\r\n ol, sm, nrd = 5, 35, 15\r\n elif 13 <= hour <= 17:\r\n ol, sm, hp = 5, 30, 30\r\n elif 18 <= hour <= 22:\r\n ol, sm, gs, = 5, 25, 50\r\n\r\n elif gender == 'M' and 22 < age < 29:\r\n if 8 <= hour <= 12:\r\n ol, sm, nrd, hp = 5, 5, 35, 25\r\n elif 13 <= hour <= 17:\r\n ol, sm, nrd, hp = 5, 5, 35, 40\r\n elif 18 <= hour <= 22:\r\n ol, sm, nrd, hp, gs = 5, 5, 20, 20, 50\r\n\r\n elif gender == 'M' and 29 < age < 50:\r\n if 8 <= hour <= 12:\r\n ol, sm, nrd, gs = 5, 5, 40, 30\r\n elif 13 <= hour <= 17:\r\n ol, sm, nrd = 5, 5, 30\r\n elif 18 <= hour <= 22:\r\n ol, sm, gs = 5, 5, 70\r\n\r\n elif gender == 'M' and age > 50:\r\n if 8 <= hour <= 12:\r\n ol, sm, gs, of, hp = 5, 5, 30, 60, 20\r\n elif 13 <= hour <= 17:\r\n ol, sm, gs, of, hp = 5, 5, 15, 70, 20\r\n elif 18 <= hour <= 22:\r\n ol, sm, gs, of, hp = 5, 5, 50, 25, 20\r\n\r\n elif gender == 'F' and 22 < age < 35:\r\n if 8 <= hour <= 12:\r\n ol, sm, hp, gs = 5, 30, 30, 30\r\n elif 13 <= hour <= 17:\r\n ol, sm, hp, gs = 5, 30, 30, 15\r\n elif 18 <= hour <= 22:\r\n ol, sm, hp, gs = 5, 15, 25, 60\r\n\r\n elif gender == 'F' and 35 < age < 55:\r\n if 8 <= hour <= 12:\r\n ol, sm, hp, gs = 5, 5, 5, 40\r\n elif 13 <= hour <= 17:\r\n ol, sm, hp, gs = 25, 5, 5, 25\r\n elif 18 <= hour <= 22:\r\n ol, sm, hp, gs = 30, 5, 5, 40\r\n\r\n elif gender == 'F' and age > 55:\r\n if 8 <= hour <= 12:\r\n ol, sm, of, gs = 20, 5, 15, 30\r\n elif 13 <= hour <= 17:\r\n ol, sm, of, gs = 60, 5, 30, 15\r\n elif 18 <= hour <= 22:\r\n ol, sm, of, gs = 40, 5, 20, 40\r\n\r\n weighted_choices = [('Grocery Shopper', gs), ('Supermodel', sm), ('Hippie', hp), ('Old Lady', ol), ('Nerd', nrd), ('Self Doctor', sd), ('Nudist', nud), ('Old Fart', of)]\r\n randomType = [val for val, cnt in weighted_choices for n in range(cnt)]\r\n\r\n customer = random.choice(randomType)\r\n\r\n if customer == 'Grocery Shopper':\r\n shoppers += 1\r\n num_of_purchases = random.randint(0, 20)\r\n foodChance = 18\r\n medicalChance = 3\r\n electronicsChance = 1\r\n outdoorsChance = 1\r\n clothingChance = 1\r\n beautyChance = 2\r\n self.generatePurchases(num_of_purchases, foodChance, medicalChance, electronicsChance, outdoorsChance, clothingChance, beautyChance, customer)\r\n self.all_customers[customerID] = [age, gender, hour, customer, self.customer_purchases]\r\n\r\n elif customer == 'Supermodel':\r\n models += 1\r\n num_of_purchases = random.randint(0, 20)\r\n foodChance = 0\r\n medicalChance = 5\r\n electronicsChance = 0\r\n outdoorsChance = 0\r\n clothingChance = 10\r\n beautyChance = 13\r\n self.generatePurchases(num_of_purchases, foodChance, medicalChance, electronicsChance, outdoorsChance, clothingChance, beautyChance, customer)\r\n self.all_customers[customerID] = [age, gender, hour, customer, self.customer_purchases]\r\n\r\n elif customer == 'Hippie':\r\n hippie += 1\r\n num_of_purchases = random.randint(0, 20)\r\n foodChance = 6\r\n medicalChance = 2\r\n electronicsChance = 1\r\n outdoorsChance = 14\r\n clothingChance = 7\r\n beautyChance = 1\r\n self.generatePurchases(num_of_purchases, foodChance, medicalChance, electronicsChance, outdoorsChance, clothingChance, beautyChance, customer)\r\n self.all_customers[customerID] = [age, gender, hour, customer, self.customer_purchases]\r\n\r\n elif customer == 'Old Lady':\r\n oldl += 1\r\n num_of_purchases = random.randint(0, 20)\r\n foodChance = 6\r\n medicalChance = 8\r\n electronicsChance = 0\r\n outdoorsChance = 0\r\n clothingChance = 3\r\n beautyChance = 10\r\n self.generatePurchases(num_of_purchases, foodChance, medicalChance, electronicsChance, outdoorsChance, clothingChance, beautyChance, customer)\r\n self.all_customers[customerID] = [age, gender, hour, customer, self.customer_purchases]\r\n\r\n elif customer == 'Nerd':\r\n nerd += 1\r\n num_of_purchases = random.randint(0, 20)\r\n foodChance = 4\r\n medicalChance = 3\r\n electronicsChance = 14\r\n outdoorsChance = 0\r\n clothingChance = 2\r\n beautyChance = 1\r\n self.generatePurchases(num_of_purchases, foodChance, medicalChance, electronicsChance, outdoorsChance, clothingChance, beautyChance, customer)\r\n self.all_customers[customerID] = [age, gender, hour, customer, self.customer_purchases]\r\n\r\n elif customer == 'Self Doctor':\r\n doctor += 1\r\n num_of_purchases = random.randint(0, 20)\r\n foodChance = 5\r\n medicalChance = 32\r\n electronicsChance = 4\r\n outdoorsChance = 1\r\n clothingChance = 2\r\n beautyChance = 1\r\n self.generatePurchases(num_of_purchases, foodChance, medicalChance, electronicsChance, outdoorsChance, clothingChance, beautyChance, customer)\r\n self.all_customers[customerID] = [age, gender, hour, customer, self.customer_purchases]\r\n\r\n elif customer == 'Nudist':\r\n nudist += 1\r\n num_of_purchases = random.randint(0, 20)\r\n foodChance = 10\r\n medicalChance = 5\r\n electronicsChance = 0\r\n outdoorsChance = 14\r\n clothingChance = 0\r\n beautyChance = 0\r\n self.generatePurchases(num_of_purchases, foodChance, medicalChance, electronicsChance, outdoorsChance, clothingChance, beautyChance, customer)\r\n self.all_customers[customerID] = [age, gender, hour, customer, self.customer_purchases]\r\n\r\n elif customer == 'Old Fart':\r\n oldf += 1\r\n num_of_purchases = random.randint(0, 20)\r\n foodChance = 10\r\n medicalChance = 18\r\n electronicsChance = 5\r\n outdoorsChance = 3\r\n clothingChance = 3\r\n beautyChance = 0\r\n self.generatePurchases(num_of_purchases, foodChance, medicalChance, electronicsChance, outdoorsChance, clothingChance, beautyChance, customer)\r\n self.all_customers[customerID] = [age, gender, hour, customer, self.customer_purchases]\r\n\r\n itemsBought = (\", \".join(repr(e) for e in self.customer_purchases))\r\n self.c.execute(\"INSERT INTO Customer (CustomerID, Hour, Age, Gender, Items) VALUES (?, ?, ?, ?, ?)\", (customerID, hour, age, gender, itemsBought))\r\n self.conn.commit()\r\n\r\n if self.print_counters:\r\n print(\"\\nShoppers:\", shoppers)\r\n print(\"Models:\", models)\r\n print(\"Old Ladies:\", oldl)\r\n print(\"Old Farts:\", oldf)\r\n print(\"Self doctors:\", doctor)\r\n print(\"Nerds:\", nerd)\r\n print(\"Hippies:\", hippie)\r\n print(\"Nudists:\", nudist)\r\n\r\n if self.print_customers:\r\n print(\"\\nRaw Customer Data: \")\r\n print(self.all_customers)", "def list_active_customers():\n active_customers = 0\n for customer in cm.Customers:\n if customer.status == \"Active \":\n active_customers += 1\n return active_customers", "def getCustomer(self):\n return self.base.get(\"customer\", [])", "def remove_existing_customers(self):\n\n for i in range(len(self.customers)):\n if self.customers[i].is_active() == False:\n self.customers[i]= 'out'\n self.customers = [item for item in self.customers if item!='out' ]", "def display_customers_list():\n selected_path = request.url_rule\n selected_customer_list = get_selected_path(selected_path)\n page_number = 0\n if 'page' in request.args:\n page_number = int(request.args.get('page'))\n customers, pagination = generate_pagination(page_number, selected_customer_list)\n total_number_of_records = get_number_of_records(selected_customer_list)\n return render_template(\"clients.html\", domain_name= DOMAIN_NAME, selected_customer_list=selected_customer_list, customers=customers, pagination=pagination, url_path=selected_path, total_number_of_records=total_number_of_records, phone_error=None)", "def list_active_customers():\n init_database()\n return Customer.select().where(Customer.active_status).count()", "def list_active_customers():\n try:\n active_customer_count = 0\n for _ in Customer.select().where(Customer.status == 'Active'):\n active_customer_count += 1\n logger.info(\n f\"Successfully counted active customers {active_customer_count}\"\n )\n return active_customer_count\n except Exception as unknown_error:\n logger.error(f\"Error. Failed to count customers. {unknown_error}\")\n print(\n f'Error. Not able to count number of active customers.'\n ' {unknown_error}'\n )", "def get_num_of_sales_per_customer_ids():\n\n # your code", "def customer_agents(self):\n return self.get(\"customer_agents\")", "def getCustomersInfo(self):\n rval = None\n tries = 0\n while not rval and tries < 5:\n if tries > 0:\n time.sleep(30)\n rval = self._getService(self.ns_customer,\n self.soapCustomer % (self.ns_customer, \"\",\n \"100\", \"Advertiser\"),\n \"GetCustomersInfo\", '', '',\n self.cwsdl, self.chost)\n tries += 1\n return rval", "def list_active_customers():\n with cm.DATABASE.transaction():\n # .select() has a .where() method to specify criteria for searching\n active_customers = cm.Customer.select().where(\n cm.Customer.status == \"Active\").count()\n LOGGER.info(\"Active customers: %s\", active_customers)\n return active_customers", "def get_all_nodes_without_provider(self):\n\n no_provider_nodes = []\n # create list of all nodes without provider and more than tier1_threshold customers\n for node in self.nodes():\n tier1 = True\n\n # check that node is not a customer of any node\n if not self.has_providers(node):\n no_provider_nodes.append(node)\n\n return no_provider_nodes", "def list_active_customers():\n customer_active = Customer.select().where(Customer.status == 'Active')\n print('{} Active Customers'.format(len(customer_active)))\n return len(customer_active)", "def list_active_customer():\n active_customer = Customer.select().where(Customer.is_active).count()\n LOGGER.info('Number of active customers retrieved.')\n return active_customer", "def excluded_from_scan(self):\n return six.next(six.itervalues(self.zap._request(self.zap.base + 'spider/view/excludedFromScan/')))", "def list_active_customers():\n db_customers = Customers.select()\n LOGGER.debug(\"Calculating number of active customers\")\n # Technically used this in Lesson 03, but it is a comprehension. Another method added below.\n number_active = sum([int(x.status) for x in db_customers])\n LOGGER.info(\"There are %d active customers\", number_active)\n\n return number_active", "def clean_customer_df(customer_df: pd.DataFrame) -> pd.DataFrame:\n \n # remove customers with more than 20 purchases\n if 'frequency' in customer_df.columns:\n customer_df = customer_df[customer_df.frequency < 20]\n \n return customer_df", "def get_customer_info(self, customer_id):\n #requested_customer_RDD = self.sc.parallelize(movie_ids).map(lambda x: (user_id, x))\n # Get predicted ratings\n customers = self.__get_customers(customer_id).collect()\n\n return customers", "def filter_sent_to_customer(self, queryset, name, value):\n if str2bool(value):\n return queryset.exclude(customer=None)\n else:\n return queryset.filter(customer=None)", "def generatePurchases(self, num_of_purchases, food, medical, electronics, outdoors, clothing, beauty, customer):\r\n\r\n # Empty purchases\r\n self.customer_purchases = []\r\n\r\n # Customer is *likely* to buy from some categories, but anything can happen\r\n weighted_categories = [('Food', food), ('Medical', medical), ('Electronics', electronics), ('Outdoors', outdoors), ('Clothing', clothing), ('Beauty', beauty)]\r\n randomCategory = [val for val, cnt in weighted_categories for i in range(cnt)]\r\n\r\n # Buy items\r\n for i in range(num_of_purchases):\r\n choice = random.choice(randomCategory)\r\n if choice == 'Food':\r\n tempIndex = random.randint(self.num_of_items, self.num_of_items*2-1)\r\n self.customer_purchases.append(tempIndex)\r\n elif choice == 'Medical':\r\n tempIndex = random.randint(self.num_of_items*2, self.num_of_items*3-1)\r\n self.customer_purchases.append(tempIndex)\r\n elif choice == 'Electronics':\r\n tempIndex = random.randint(3*self.num_of_items, 4*self.num_of_items-1)\r\n self.customer_purchases.append(tempIndex)\r\n elif choice == 'Outdoors':\r\n tempIndex = random.randint(4*self.num_of_items, 5*self.num_of_items-1)\r\n self.customer_purchases.append(tempIndex)\r\n elif choice == 'Clothing':\r\n tempIndex = random.randint(5*self.num_of_items, 6*self.num_of_items-1)\r\n self.customer_purchases.append(tempIndex)\r\n elif choice == 'Beauty':\r\n tempIndex = random.randint(6*self.num_of_items, 7*self.num_of_items-1)\r\n self.customer_purchases.append(tempIndex)\r\n\r\n if self.print_customers:\r\n print(self.customer_purchases)", "def generate_customer_info(self):\n consecutive = check_consecutive(self.customerIds)\n print(\"consecutive\" + str(consecutive))\n if consecutive:\n for customer_id in self.customerIds:\n # next we need to 1) decide if the customer has insurance, and if yes, generate the EOB that gets sent to the customer\n insured = np.random.choice(\n self.distributions[\"insurance_status\"],\n 1,\n p=self.distributions[\"insurance_distribution\"],\n )[0]\n dob = self.dobs[customer_id - 1]\n customer = pd.DataFrame(\n {\n \"customer_id\": [customer_id],\n \"dob\": str(dob),\n \"insurance\": insured,\n \"experiment_id\": random.randint(1, 2),\n }\n )\n self.Customers = self.Customers.append(customer)\n else:\n print(\"Error generating customer info: customerIds aren't consecutive\")\n return True", "def customers(self):\r\n return Customers(self)", "def list_customers():\n customers = db_helper.get_all_customers()\n return jsonify({\"customers\": customers})", "def list_active_customers():\n try:\n active_customer_count = 0\n for _ in Customer.select().where(Customer.status == 'Active'):\n active_customer_count += 1\n return active_customer_count\n except Exception as unknown_error:\n print(f'Error. Not able to count number of active customers. {unknown_error}')", "def get_absent_client_names(self, clients):\n return list(set(self.get_all_client_names()) - set(clients))", "def is_customer_out_sample(self, customerID):\n listCustomer = list(self._df_invoice_line_out_sample.CustomerID.unique())\n is_flag = customerID in listCustomer\n return is_flag", "def users_excludeds(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:\n return pulumi.get(self, \"users_excludeds\")", "def users_excludeds(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:\n return pulumi.get(self, \"users_excludeds\")", "def search_all_customers(customer_ids):\n return (search_customer(cid) for cid in customer_ids)", "def get_seated_customers(restaurant: Restaurant):\n customers = Reservation.query.filter(Reservation.restaurant == restaurant).filter(Reservation.status == ReservationState.SEATED).all()\n \n return reduce(lambda acc,rsv: acc + rsv.seats, customers, 0)", "def customers(self):\r\n return customers.Customers(self)", "def get_contacts(self, count=-1, excluded_guid=None):\n current_len = len(self._contacts)\n if current_len == 0 or count == 0:\n return []\n\n if count < 0:\n count = current_len\n else:\n count = min(count, current_len)\n\n if excluded_guid is None:\n # Get the last `count` contacts.\n contact_list = self._contacts[-count:]\n else:\n contact_list = []\n for contact in reversed(self._contacts):\n if contact.guid == excluded_guid:\n continue\n contact_list.append(contact)\n if len(contact_list) >= count:\n break\n return contact_list", "def get_customers(filters):\n\treturn frappe.db.sql(\"\"\"\n\t\tSELECT\n\n\t\t\tpar.debtor_creditor_number as 'Konto',\n\t\t\tCASE cus.customer_type\n\t\t\t\tWHEN 'Company' THEN cus.customer_name\n\t\t\t\tELSE null\n\t\t\t\tEND as 'Name (Adressatentyp Unternehmen)',\n\t\t\tCASE cus.customer_type\n\t\t\t\tWHEN 'Individual' THEN TRIM(SUBSTR(cus.customer_name, LOCATE(' ', cus.customer_name)))\n\t\t\t\tELSE null\n\t\t\t\tEND as 'Name (Adressatentyp natürl. Person)',\n\t\t\tCASE cus.customer_type\n\t\t\t\tWHEN 'Individual' THEN SUBSTRING_INDEX(SUBSTRING_INDEX(cus.customer_name, ' ', 1), ' ', -1)\n\t\t\t\tELSE null\n\t\t\t\tEND as 'Vorname (Adressatentyp natürl. Person)',\n\t\t\tCASE cus.customer_type\n\t\t\t\tWHEN 'Individual' THEN '1'\n\t\t\t\tWHEN 'Company' THEN '2'\n\t\t\t\tELSE '0'\n\t\t\t\tEND as 'Adressatentyp',\n\t\t\tadr.address_line1 as 'Straße',\n\t\t\tadr.pincode as 'Postleitzahl',\n\t\t\tadr.city as 'Ort',\n\t\t\tUPPER(country.code) as 'Land',\n\t\t\tadr.address_line2 as 'Adresszusatz',\n\t\t\tadr.email_id as 'E-Mail',\n\t\t\tadr.phone as 'Telefon',\n\t\t\tadr.fax as 'Fax',\n\t\t\tcus.website as 'Internet',\n\t\t\tcus.tax_id as 'Steuernummer'\n\n\t\tFROM `tabCustomer` cus\n\n\t\t\tleft join `tabParty Account` par\n\t\t\ton par.parent = cus.name\n\t\t\tand par.parenttype = 'Customer'\n\t\t\tand par.company = %(company)s\n\n\t\t\tleft join `tabDynamic Link` dyn_adr\n\t\t\ton dyn_adr.link_name = cus.name\n\t\t\tand dyn_adr.link_doctype = 'Customer'\n\t\t\tand dyn_adr.parenttype = 'Address'\n\n\t\t\tleft join `tabAddress` adr\n\t\t\ton adr.name = dyn_adr.parent\n\t\t\tand adr.is_primary_address = '1'\n\n\t\t\tleft join `tabCountry` country\n\t\t\ton country.name = adr.country\n\n\t\tWHERE adr.is_primary_address = '1'\n\t\t\"\"\", filters, as_dict=1)", "def get_all_customers(connection):\n connection.command_path = \"customers\"\n extra_headers = {connection.header_key: connection.token}\n url = connection.build_url()\n verify_ssl = connection.verify_ssl\n res = requests.get(url=url, headers=extra_headers, verify=verify_ssl)\n if res.status_code > 210:\n return\n body = res.content\n return customers.parse_all_customers(body)", "def get_targeted_campaign_ids(client, customer_id, resource_name):\n ga_service = client.get_service(\"GoogleAdsService\")\n\n query = \"\"\"\n SELECT\n campaign.id,\n campaign_extension_setting.extension_feed_items\n FROM campaign_extension_setting\n WHERE\n campaign_extension_setting.extension_type = 'PROMOTION'\n AND campaign.status != 'REMOVED'\"\"\"\n\n stream = ga_service.search_stream(customer_id=customer_id, query=query)\n\n campaign_ids = []\n\n for batch in stream:\n for row in batch.results:\n feed_items = row.campaign_extension_setting.extension_feed_items\n if resource_name in feed_items:\n print(f\"Found matching campaign with ID: '{row.campaign.id}'\")\n campaign_ids.append(row.campaign.id)\n\n return campaign_ids", "def list_active_customers():\n with database.transaction():\n query = (Customer\n .select(fn.COUNT(Customer.status).alias('count'))\n .where(Customer.status == 'Active'))\n LOGGER.info(query)\n\n customer_count = [item.count for item in query]\n LOGGER.info('Number of active customers: %s', customer_count[0])\n\n return customer_count[0]", "def filter_by_customer(table, customer):\n transactions = []\n for record in table:\n if record[CUSTOMER_ID] == customer:\n transactions.append(record)\n return transactions", "def customers_presence(self):\n return self._customers_presence", "def get_customers(cls, api, id='', **params):\n return api.get_customers(id, **params)", "def getCustomerAccount(self):\n self.logger.debug(\"\")\n for cust in self.getCustomerAccountData():\n accounts = len(cust['accounts'])\n self.logger.debug(\"%d accounts in %s\", accounts, cust['CustomerId'])\n ii = 1\n for acct in cust['accounts']:\n self.logger.debug(\"yield %s, %s\", cust['CustomerId'], acct['Id'])\n yield cust['CustomerId'], acct['Id'], ii, accounts\n ii += 1", "def get_customers(cls, agent_id):\n customer_numbers = [str(customer.customer_number)\n for customer in cls.query.filter_by(agent_id=agent_id).all()]\n customer_data = {'individual':[], 'organization':[]}\n for customer_number in customer_numbers:\n customer_type = helper.get_customer_type(customer_number)\n if customer_type == 'IN':\n customer_details = IndividualCustomer.query.filter_by(\n customer_number=customer_number).first()\n customer_data['individual'].append(customer_details.serialize())\n else:\n customer_details = OrganizationCustomer.query.filter_by(\n customer_number=customer_number).first()\n customer_data['organization'].append(customer_details.serialize())\n\n return customer_data", "def _exclude_indices(self):\n idx = self._next_idx\n exclude = np.arange(idx - 1, idx + self.obs_len) % self._maxsize\n return exclude", "def fetch_customer_info_identities(self, client_id):\n\n try:\n return self._make_private_api_request(\n method=PyttributionIo.GET_REQUEST,\n endpoint='customers',\n subject_id=client_id,\n show_identities='true'\n ).get('customer')\n except RequestException as e:\n logger.error('Pyttribution.io: Retrieval of customer identities failed with HTTP status {exception}'.format(\n exception=e))", "def test_list_active_users(_list_active_customers):\n for customer in _list_active_customers:\n bo.add_customer(\n customer[0],\n customer[1],\n customer[2],\n customer[3],\n customer[4],\n customer[5],\n customer[6],\n customer[7]\n )\n\n assert bo.list_active_customers() == 4\n\n for customer in _list_active_customers:\n bo.delete_customer(customer[0])\n \n assert bo.list_active_customers() == 0", "def get_vendor_bills(self, count: int = 10) -> list:\n return list(\n itertools.islice(self.client.vendor_bills.get_all_generator(), count)\n )", "def bus_total_customers(self) -> int:\n return self.dss_obj.BUSI(4, 0)", "def download_latest_customers(self):\n file_id, data = self._gdrive.get_last_customer_data_file()\n self._datastore.force_data_storage('customer', data)", "def excludes(self):\r\n\r\n return self._excludes", "def get_customer_columns():\n return cr.get_columns()", "def get_customer_statistics(self, n):\n trusted = []\n useful = []\n\n trust_dict = {}\n self.cursor.execute(\"\"\"select otherLoginID, COUNT(loginID) as score_trusted\n FROM trusts GROUP BY otherLoginID, trustStatus HAVING trustStatus='TRUSTED'\"\"\")\n for cust in self.cursor.fetchall():\n trust_dict[cust[0]] = cust[1]\n self.cursor.execute(\"\"\"SELECT otherLoginID, COUNT(loginID) as score_trusted FROM trusts\n GROUP BY otherLoginID, trustStatus HAVING trustStatus='UNTRUSTED'\"\"\")\n for cust in self.cursor.fetchall():\n if cust[0] in trust_dict:\n trust_dict[cust[0]] = trust_dict[cust[0]] - cust[1]\n else:\n trust_dict[cust[0]] = -cust[1]\n m = 0\n n_temp = n\n while n_temp > m and len(trust_dict):\n loginID = max(trust_dict.items(), key=operator.itemgetter(1))[0]\n self.cursor.execute(\"\"\"SELECT firstName, lastName FROM customercredentials WHERE loginID=%s\"\"\", (loginID,))\n name = self.cursor.fetchone()\n trusted.append([loginID, name[0], name[1], trust_dict[loginID]])\n del trust_dict[loginID]\n n_temp = n_temp - 1\n\n self.cursor.execute(\"\"\"SELECT C.loginID, firstName, lastName, AVG(avg_usefulness) as total_avg\n FROM comment C, customercredentials CR WHERE C.loginID = CR.loginID GROUP BY C.loginID\n ORDER BY total_avg DESC LIMIT %s\"\"\", (n,))\n for cust in self.cursor.fetchall():\n useful.append(cust)\n return trusted, useful", "def get_customer_orders_count(customerId):\n data = user_obj.get_customer_orders(customerId,\"1\")\n return data", "def load_known_customer_data(customers_filename: str) -> Tuple[List[Customer], Dict[Tuple[str, str], float]]:\n\n if not customers_filename or not os.path.exists(customers_filename):\n # empty list of customers, empty dict of distances\n return [], {}\n\n else:\n all_cust_data = load_json(customers_filename)\n custs = [Customer(**customer) for customer in all_cust_data['customers']]\n dists = dist_dict_from_json(all_cust_data['distances'])\n return custs, dists", "def test_get_customer_list(self):\n customer = self._create_customers(\"Alex\")\n customer.create()\n customer = self._create_customers(\"Sally\")\n customer.create()\n customer = self._create_customers(\"John\")\n customer.create()\n resp = self.app.get(\"/customers\")\n self.assertEqual(resp.status_code, status.HTTP_200_OK)\n data = resp.get_json()\n self.assertEqual(len(data), 3)", "def test_search_no_customer(self):\n set_up_db()\n self.assertEqual({}, search_customer(1))", "def movable_intraroute_customers(route, customers):\n mcust = []\n for c in range(route.ncustomers):\n if len(factible_route_positions(route.customers[c+1],\n route,customers)) > 1:\n mcust.append(c)\n return mcust", "def test_list_active_customers(self):\n set_up_db()\n add_customer(*self.test_customer)\n add_customer(customer_id=2, name=\"Clark\", last_name=\"Kent\", home_address=None,\n phone_number=\"228-626-7899\", email=\"[email protected]\",\n status=True, credit_limit=200.00)\n add_customer(customer_id=3, name=\"Diana\", last_name=\"Prince\", home_address=None,\n phone_number=\"587-8423\", email=\"[email protected]\",\n status=False, credit_limit=100.00)\n self.assertEqual(2, list_active_customers())", "def get_users_with_missing_data() -> Set[str]:\n users_data = {user[\"_source\"][\"VENDOR_UUID\"] for user in Handlers.elastic_handler.get_all_today_data(\n _type=\"status\",\n date_start=dt.date.today() + dt.timedelta(days=1),\n date_end=dt.date.today() + dt.timedelta(days=7),\n )}\n\n all_tokens = Handlers.token_handler.get_all_today_data(_type=\"token\")\n to_dict = {dict_[\"_source\"][\"VENDOR_UUID\"]: dict_[\"_source\"][\"TOKEN\"] for dict_ in all_tokens}\n\n return set(dict(filter(lambda item_tup: item_tup[0] not in users_data, to_dict.items())).values())", "def get(self, customer=None, count=None, offset=None):\r\n params = base.get_params(None, locals())\r\n request = http.Request('GET', self.get_url(), params)\r\n\r\n return request, parsers.parse_json", "def get(self, customer=None, count=None, offset=None):\r\n params = base.get_params(None, locals())\r\n request = http.Request('GET', self.get_url(), params)\r\n\r\n return request, parsers.parse_json", "def get(self, customer=None, count=None, offset=None):\r\n params = base.get_params(None, locals())\r\n request = http.Request('GET', self.get_url(), params)\r\n\r\n return request, parsers.parse_json", "def get(self, customer=None, count=None, offset=None):\r\n params = base.get_params(None, locals())\r\n request = http.Request('GET', self.get_url(), params)\r\n\r\n return request, parsers.parse_json", "def get_dataset_names(self, include = ['*'], exclude = []):\n \n raise NotImplementedError('get_dataset_names')", "def test_no_counterfactuals_found(self):\n threshold = 4.0\n self._config['Regression threshold'] = str(threshold)\n self._example = {'x_1': 1.0, 'x_2': 1.0}\n output = self._gen.generate(\n example=self._example,\n model=self._model,\n dataset=self._dataset,\n config=self._config)\n self.assertEmpty(output)", "def get_all_user_data(request):\n \n try:\n customers = Customer.objects.all()\n serializer = CustomerSerializer(customers, many=True)\n \n return Response(serializer.data)\n except Exception as e:\n return Response({\"Error\":str(e)})", "def fetch_customer_info_full(self, client_id):\n\n try:\n return self._make_private_api_request(\n method=PyttributionIo.GET_REQUEST,\n endpoint='customers',\n subject_id=client_id,\n show_all='true'\n ).get('customer')\n except RequestException as e:\n logger.error('Pyttribution.io: Retrieval of full customer info failed with HTTP status {exception}'.format(\n exception=e))", "def filter_out_non_helpful_records(data_records):\n for r in data_records:\n if r.get_cruciality() is None:\n continue\n # We only want records that got exactly 3 votes\n if len(r.cruciality) != 3:\n continue\n else:\n yield r", "def get_dependent_accounts(client):\n\n PAGE_SIZE = 500\n managed_customer_service = client.GetService(\n 'ManagedCustomerService', version=settings.API_VERSION)\n\n offset = 0\n selector = {\n 'fields': ['CustomerId', 'Name'],\n 'predicates': {\n 'field': 'CanManageClients',\n 'operator': 'EQUALS',\n 'values': 'False'\n },\n 'paging': {\n 'startIndex': str(offset),\n 'numberResults': str(PAGE_SIZE)\n }\n }\n more_pages = True\n accounts = {}\n while more_pages:\n page = managed_customer_service.get(selector)\n\n if 'entries' in page and page['entries']:\n for account in page['entries']:\n accounts[account['customerId']] = str(account['name']) \\\n if hasattr(account, 'name') else 'None'\n\n offset += PAGE_SIZE\n selector['paging']['startIndex'] = str(offset)\n more_pages = offset < int(page['totalNumEntries'])\n\n return accounts", "def get_random_index(self, excludes: Set[int]) -> int:\n index = random.randint(0, self.space.categorical_size() - 1)\n while index in excludes:\n index = random.randint(0, self.space.categorical_size() - 1)\n return index", "def crew_needing_reports(self):\n reports = self.ccreport_set.all().values_list('crew_chief', flat=True)\n return self.ccinstances.exclude(crew_chief__in=reports)", "def get_customers_by_name(name: str = '') -> List[Customer]:\n customers = []\n for c in get_market().customers.values():\n if not name or c.name == name:\n customers.append(c)\n return customers", "def not_applicable_device_count(self):\n if \"notApplicableDeviceCount\" in self._prop_dict:\n return self._prop_dict[\"notApplicableDeviceCount\"]\n else:\n return None", "def to_exclude(self):\n midnight = now().replace(hour=0, minute=0, microsecond=0)\n return CenterOpen.objects.filter(\n creation_date__gte=midnight,\n ).values_list('phone_number', flat=True)", "def prune(self):\n target_user_ids = self.get_queryset().values_list('id', flat=True)\n exclude_user_ids = SentDrip.objects.filter(date__lt=conditional_now(),\n drip=self.drip_model,\n user__id__in=target_user_ids)\\\n .values_list('user_id', flat=True)\n self._queryset = self.get_queryset().exclude(id__in=exclude_user_ids)", "def remove_existing_customers(self):\n # remove the customers which are not active (.is_active )\n self.to_move = False\n #for cust in self.customers:\n # print(cust.state)\n self.customers = [cust for cust in self.customers if cust.state != 'checkout']\n #if cust.to_move():\n # self.to_move = True", "def GetListDoctors(self):\n\t\treturn self.ClientsMap.values()", "def get_users(self, customer_id='my_customer'):\n try:\n paged_results = self.repository.users.list(customer=customer_id,\n viewType='admin_view')\n flattened_results = api_helpers.flatten_list_results(\n paged_results, 'users')\n LOGGER.debug('Getting all the users for customer_id = %s,'\n ' flattened_results = %s',\n customer_id, flattened_results)\n return flattened_results\n except RefreshError as e:\n # Authentication failed, log before raise.\n LOGGER.exception(GSUITE_AUTH_FAILURE_MESSAGE)\n raise e\n except (errors.HttpError, HttpLib2Error) as e:\n raise api_errors.ApiExecutionError('users', e)", "def get_all_except(id_list, order_by_field=DATA_SORTING_FIELDS):\n return Data.get_all_except(order_by_field, id_list)", "def _random_customer(cust_dtls) -> tuple:\n return choices(cust_dtls)[0]", "def fetch_data(excludeNonPollutants):\n if excludeNonPollutants:\n return AirMeasurement().select().where(~AirMeasurement.uploaded,\n AirMeasurement.type != 'Temperature',\n AirMeasurement.type != 'Altitude',\n AirMeasurement.type != 'Pressure',\n AirMeasurement.type != 'Humidity',\n AirMeasurement.type != 'PM1.0').limit(500)\n\n return AirMeasurement().select().where(~(AirMeasurement.uploaded)).limit(500)", "def getExcludedAtoms(self):\n excludedAtomsIdList = self.getFlagData('EXCLUDED_ATOMS_LIST')\n numberExcludedAtoms = self.getFlagData('NUMBER_EXCLUDED_ATOMS')\n atoms = self.atoms\n interval = 0\n excludedAtomsList = []\n for number in numberExcludedAtoms:\n temp = excludedAtomsIdList[interval:interval + number]\n if temp == [0]:\n excludedAtomsList.append([])\n else:\n excludedAtomsList.append([atoms[a-1] for a in temp])\n interval += number\n self.excludedAtoms = excludedAtomsList\n self.printDebug(\"getExcludedAtoms\")" ]
[ "0.5755343", "0.5630526", "0.5468172", "0.5455088", "0.5447088", "0.53655386", "0.5348006", "0.5255881", "0.52160037", "0.5184504", "0.5118855", "0.51016146", "0.50954854", "0.5094486", "0.50568765", "0.50267607", "0.49917355", "0.49625525", "0.49529928", "0.49519303", "0.49320677", "0.49264386", "0.49157014", "0.4894947", "0.48897344", "0.4889139", "0.48599762", "0.48556495", "0.48475817", "0.48341656", "0.4827593", "0.4824127", "0.48215884", "0.48111558", "0.48075563", "0.4803708", "0.4793977", "0.4738182", "0.4727644", "0.47134954", "0.47122827", "0.47013596", "0.469767", "0.4694221", "0.46777287", "0.46562466", "0.46538055", "0.46538055", "0.46522975", "0.46454203", "0.464386", "0.46349066", "0.46322682", "0.4624423", "0.46088406", "0.4608093", "0.45889562", "0.45762601", "0.4570697", "0.45690352", "0.45425043", "0.451929", "0.45174477", "0.4508532", "0.45010126", "0.44999465", "0.44910756", "0.44861954", "0.4478877", "0.44775662", "0.44412106", "0.44368094", "0.44321892", "0.441941", "0.43984798", "0.4394206", "0.439287", "0.43892136", "0.43892136", "0.43892136", "0.43892136", "0.4383296", "0.43615562", "0.43568084", "0.43567845", "0.43558758", "0.43447962", "0.43443578", "0.4342788", "0.43326914", "0.43266642", "0.43235832", "0.43188372", "0.4304886", "0.43013406", "0.42984247", "0.42974642", "0.42893043", "0.4289288", "0.42832902" ]
0.7302814
0
Returns a True if a customer identifier does not belongs to dataframe used to build classifier model.
def is_customer_out_sample(self, customerID): listCustomer = list(self._df_invoice_line_out_sample.CustomerID.unique()) is_flag = customerID in listCustomer return is_flag
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def is_customer(self) -> bool:\n return self.customer_id is not None", "def has_customer(self):\n return self.customer is not None", "def is_customer(self):\n return self.user_type == 'C'", "def is_label_dataframe(label, df):\n\n setdiff = set(label) - set(df.columns.tolist())\n\n if len(setdiff) == 0:\n return True\n else:\n return False", "def is_customer(self):\n return self.rol == ProfileRoles.CUSTOMER", "def check_data(dataframe):\n if dataframe.iloc[0, 0] == 'No data available':\n return False\n else:\n return True", "def is_velas_df(df):\n empty_df = get_empty_df()\n \n if sorted(list(empty_df.columns)) != sorted(list(df.columns)):\n return False\n if empty_df.index.name != df.index.name:\n return False\n return True", "def is_dataset(self):\n return self._dataset is not None", "def clean_customer_df(customer_df: pd.DataFrame) -> pd.DataFrame:\n \n # remove customers with more than 20 purchases\n if 'frequency' in customer_df.columns:\n customer_df = customer_df[customer_df.frequency < 20]\n \n return customer_df", "def test_case_user_not_yet_customer(self):\n pass", "def generate_customer_info(self):\n consecutive = check_consecutive(self.customerIds)\n print(\"consecutive\" + str(consecutive))\n if consecutive:\n for customer_id in self.customerIds:\n # next we need to 1) decide if the customer has insurance, and if yes, generate the EOB that gets sent to the customer\n insured = np.random.choice(\n self.distributions[\"insurance_status\"],\n 1,\n p=self.distributions[\"insurance_distribution\"],\n )[0]\n dob = self.dobs[customer_id - 1]\n customer = pd.DataFrame(\n {\n \"customer_id\": [customer_id],\n \"dob\": str(dob),\n \"insurance\": insured,\n \"experiment_id\": random.randint(1, 2),\n }\n )\n self.Customers = self.Customers.append(customer)\n else:\n print(\"Error generating customer info: customerIds aren't consecutive\")\n return True", "def is_appropriate_data_instance(self, df) -> bool:\n return isinstance(df, pl.DataFrame) or isinstance(df, pl.LazyFrame)", "def customer_wants_condiments(self):\n return True", "def has_data(self):\n return ([0] != self.__contexts) and ([0] != self.__weights)", "def is_specific(self) -> bool:\n return False", "def is_customer_id_exist(customer_id) -> bool:\n with MY_CONNECTION as connection:\n cursor = connection.cursor()\n cursor.execute(\"SELECT exists(SELECT 1 FROM Customers WHERE id_customer=?)\", (customer_id,))\n return cursor.fetchone()[0] == 1", "def check_user(self):\n try:\n if self.get_customer()[0][0] == self.dni:\n return True\n else:\n return False\n except:\n return False", "def has_cid(self):\n return hasattr(self, 'cid')", "def _isIndexedDataframe(self, dataframe):\n return len(dataframe.index.names) > 1 or not dataframe.index.names[0] is None", "def __is_nan(self):\n return _VirtualBooleanColumn(\n df_name=self.thisptr[\"df_name_\"],\n operator=\"is_nan\",\n operand1=self,\n operand2=None\n )", "def is_new_data(df: pd.DataFrame) -> bool:\n import io\n\n with io.StringIO() as s:\n save_as_data_table(df, s)\n new_data = s.getvalue()\n\n existing_data = read_data_table(as_text=True)\n\n return new_data != existing_data", "def check_sparkdf_not_nulls(sparkdf,columns):\n\n\tfor column in columns:\n\n\t\tempties = sparkdf.select(col(column)).where(col(column).isNull())\n\t\tif len(empties.head(1)) > 0:\n\t\t\tprint(\"Checking DataFrame. I found null values in column\", column)\n\t\t\treturn False\n\t\telse:\n\t\t\tprint(\"Checking DataFrame. No null values found in column\", column)\n\n\treturn True", "def is_false(self):\n return _VirtualBooleanColumn(\n df_name=self.thisptr[\"df_name_\"],\n operator=\"not\",\n operand1=self,\n operand2=None\n )", "def is_non_reducing(self):\n return bool(set(self.kind) & set(\"ABC\"))", "def single_records(df,\n key_cols=['report_date', 'plant_id_eia', 'generator_id']):\n len_1 = len(df)\n len_2 = len(df.drop_duplicates(subset=key_cols))\n return bool(len_1 == len_2)", "def to_drop(self):\n return self.id is None", "def __bool__(self):\n return self.fam.c_nonzero(self)", "def has_customers(self, asn):\n for neighbor in nx.all_neighbors(self, asn):\n edge_data = self.get_edge_data(asn, neighbor)\n\n # node is a provider of neighbor\n if edge_data[\"relationship\"] == -1 and edge_data[\"as1\"] == asn:\n return True\n return False", "def bool(self) -> bool:\n if isinstance(self, ps.DataFrame):\n df = self\n elif isinstance(self, ps.Series):\n df = self.to_dataframe()\n return df.head(2)._to_internal_pandas().bool()", "def __is_inf(self):\n return _VirtualBooleanColumn(\n df_name=self.thisptr[\"df_name_\"],\n operator=\"is_inf\",\n operand1=self,\n operand2=None\n )", "def is_valid(self, dataset):\n pass", "def noyable(self):\n return False", "def is_relevant(self):\n return self.metadata.is_relevant", "def have_cdc() -> bool:", "def train_column_is_all_null(self, column):\n return self.df_column_is_all_null(self.train, column)", "def test_import_customer_without_data(self):\n\n customer = self.import_customer.create_customer_object(\"cust002010\", {})\n self.assertIsInstance(customer, Customer)", "def test_not_contains_user(self):\n print('(' + self.test_not_contains_user.__name__+')',\n self.test_not_contains_user.__doc__)\n # non existing doctor, it could be patient as well\n self.assertFalse(self.connection.contains_user(\n NON_EXIST_DOCTOR_USERNAME))", "def test_single_missing_column():\n imp = MissingnessClassifier()\n imp.fit_predict(dfs.df_mis_classifier)\n imp.fit_predict_proba(dfs.df_mis_classifier)", "def _has_data(cls):\n return User.objects.count() > 0", "def is_isolated_cds(gene, gene_info, idx):\n\n if len(gene_info.vertex_succ_list[idx]) > 0:\n return False\n\n return np.sum(gene.splicegraph.edges[:, idx]) == 0", "def is_absent(self, tokenized_record):\n\n return bool(set(tokenized_record).intersection(self.absent_markers))", "def has_stockrecords(self):\n try:\n a=self.stockrecords.pk\n return True\n except:\n return False", "def isUnConditional(self) -> bool:\n ...", "def customers_presence(self):\n return self._customers_presence", "def _exists(self) -> bool:\n client = MlflowClient()\n all_metrics = client._tracking_client.store.get_all_metrics(\n run_uuid=self.run_id\n )\n return any(self._is_dataset_metric(x) for x in all_metrics)", "def has_no_uses(self) -> bool:\n\n return len(self.users_) == 0", "def verify_pandas(self):\n self.check_dataset_duplicate_ids(self.vertices)\n # self.check_dataset_children_ids()\n self.check_dataset_litter_ids()\n self.check_dataset_dates()", "def ref_known_flag(self):\n if CredentialApplication.objects.filter(\n reference_email__iexact=self.reference_email,\n reference_contact_datetime__isnull=False).exclude(\n reference_email=''):\n return True\n elif LegacyCredential.objects.filter(\n reference_email__iexact=self.reference_email).exclude(\n reference_email=''):\n return True\n else:\n return False", "def cols_valid(self,\n df: pd.DataFrame,\n req_cols: set) -> bool:\n missing_cols = req_cols.difference(df.columns)\n\n if len(missing_cols) > 0:\n logging.error(f\"{missing_cols} columns required but missing\")\n return False\n\n return True", "def __contains__(self, item):\n return item in self.default_dataset", "def _check_notnull(self):\n candnull = self.df_test_resampled[self.candidate_col_name].isnull().all()\n refnull = self.df_test_resampled[self.reference_col_name].isnull().all()\n if candnull or refnull:\n return 1, 'No data for selected time frame'\n else:\n return 0, 'No error occurred'", "def validate(\n self, feature_set: FeatureSet, dataframe: DataFrame, spark_client: SparkClient\n ) -> Any:", "def _is_dataclass_instance(obj):\n return hasattr(type(obj), '__dataclass_fields__')", "def does_usage_charges_grid_have_no_records(self):\n return self.is_element_present(self.usage_charges_grid_no_record_found_message_locator)", "def is_identity(self):\n\n if self.rows != self.columns:\n return False\n\n for i in range(self.rows):\n row = self.row(i + 1)\n for j in range(self.columns):\n if i == j and row[j] != 1:\n return False\n\n if i != j and row[j] != 0:\n return False\n\n return True;", "def is_allc(self):\n g = self.get_gene().get_seq()\n for i in range(1, len(g)):\n if g[i] is 'd':\n return False\n return True", "def is_indicator():\n return True", "def is_id_only(self):\n for key, value in self.items():\n if key not in {'names', 'labels', 'roles'} and value:\n return False\n if self.names or self.labels:\n return True\n return False", "def has_carbon(self):\n return len(self.c_indices) > 0", "def isEmptyLandmarkset(self):\n return self.subsetpointcloud is None", "def _is_categorical(df, field):\n return df[field].dtype.name == 'category'", "def is_dataclass_instance(obj: Any) -> bool:\n return dataclasses.is_dataclass(obj) and not isinstance(obj, type)", "def is_ode_noad_link(self):\n if self.project_name in IDENTIFIERS:\n return True\n else:\n return False", "def is_yummy(self):\n return False", "def is_customers_room(room, customer_id):\n room_last_10 = room.title[-10:]\n customer_id_last10 = customer_id[-10:]\n if room_last_10 == customer_id_last10:\n return True\n logging.log(logging.INFO, \"NO MATCH for %s %s \" % (room_last_10, customer_id_last10))\n return False", "def is_emptiable(self) -> bool:\n raise NotImplementedError()", "def __ne__(self, other):\n if not isinstance(other, BusinessInvoiceAnalysisRequest):\n return True\n\n return self.to_dict() != other.to_dict()", "def __nonzero__(self):\n return True", "def check_if_sectors_are_naics(df_load, crosswalk_list, column_headers):\n\n # create a df of non-sectors to export\n non_sectors_df = []\n # create a df of just the non-sectors column\n non_sectors_list = []\n # loop through the df headers and determine if value\n # is not in crosswalk list\n for c in column_headers:\n # create df where sectors do not exist in master crosswalk\n non_sectors = df_load[~df_load[c].isin(crosswalk_list)]\n # drop rows where c is empty\n non_sectors = non_sectors[non_sectors[c] != '']\n # subset to just the sector column\n if len(non_sectors) != 0:\n sectors = non_sectors[[c]].rename(columns={c: 'NonSectors'})\n non_sectors_df.append(non_sectors)\n non_sectors_list.append(sectors)\n\n if len(non_sectors_df) != 0:\n # concat the df and the df of sectors\n ns_list = pd.concat(non_sectors_list, sort=False, ignore_index=True)\n # print the NonSectors\n non_sectors = ns_list['NonSectors'].drop_duplicates().tolist()\n vLog.debug('There are sectors that are not NAICS 2012 Codes')\n vLog.debug(non_sectors)\n else:\n vLog.debug('All sectors are NAICS 2012 Codes')\n\n return non_sectors", "def isInternal(self):\n if self.data.depend_er_job == self.data.depend_on_job:\n return True\n return False", "def is_duplicate(self, **kwargs):\n return len(list(self.c.select(**kwargs))) > 0", "def _isNewTxn(self, identifier, reply, txnId) -> bool:\n return (identifier not in self.processedRequests or\n reply.reqId not in self.processedRequests[identifier]) and \\\n txnId is not None", "def __ne__(self, other):\n if not isinstance(other, CreditSupportAnnex):\n return True\n\n return self.to_dict() != other.to_dict()", "def exists(cls, customer_id):\n customer_id = int(customer_id)\n cust = DB_CUSTOMER_TABLE.get(doc_id=customer_id)\n if not cust:\n raise ValueError(f\"unknown customer '{customer_id}'\")\n return customer_id", "def _check_primary_key(df: \"pd.DataFrame\", primary_key_name: str):\n if primary_key_name in df.columns and primary_key_name == df.index.name:\n raise primary_key.Ambiguous(\n f\"Index {primary_key_name} has the same name as column {primary_key_name}\"\n )\n elif primary_key_name not in df.columns and primary_key_name != df.index.name:\n raise primary_key.NotFound(\n f\"Primary key: {primary_key_name} is not DataFrame index name: {df.index.name} or in\"\n f\" DataFrame column names: {df.columns}\"\n )", "def is_distributed_model(model):\n try:\n get_tenant_field(model)\n return True\n except ValueError:\n return False", "def is_dnf(self):\n return False", "def is_data(i):\n keys = ['_id', '_time']\n return all(i != k for k in keys)", "def check_for_null_values(self):\r\n print(self.dataframe_name)\r\n print(self.data_frame.isna().sum())", "def invariant(self):\n\t\treturn ((self.tenant != \"\") and (self.loginUser != \"\"))", "def validateModelCol(self):\n \n ret = False\n \n dc = self.__args['datacolumn'].upper() \n if \"MODEL\" in dc or dc == 'ALL':\n ret = True\n\n return ret", "def invariant(self):\n\t\treturn ((self.name != \"\") and (self.locationId != \"\"))", "def checkfornan(chosen_df):\n if not chosen_df.isnull().values.any():\n raise ValueError('NaN in DataFrame')", "def tie_exists(self):\n return len(self.marks) == 9", "def __eq__(self, other):\n if not isinstance(other, Customer):\n return False\n\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n if not isinstance(other, CustomersCustomerDetails):\n return False\n\n return self.__dict__ == other.__dict__", "def is_alld(self):\n g = self.get_gene().get_seq()\n for i in range(1, len(g)):\n if g[i] is 'c':\n return False\n return True", "def chcek_exist(df, Point):\n\n exist = (df.OperatingPoint == Point)\n exist = exist.sum()\n if exist == 0:\n return False\n else:\n return True", "def unmatching_driver_id(df):\n\ttemp = df[df['driver_id_bkg'].notnull()]\n\torder_ids = temp[temp['driver_id_bkg'] != temp['driver_id_pnt']]['order_id'].values\n\treturn df[~df['order_id'].isin(order_ids)]", "def get_customer_count(self):\n return self._df_invoice_original.CustomerID.unique().shape[0]", "def delete_customer(customer_id):\n del_query = Customer.get(Customer.customer_id == customer_id)\n return bool(del_query.delete_instance())", "def _check_features_df(df, features):\n # check columns\n if not set(features).issubset(df.columns):\n raise ValueError(\"The dataframe does not seem to have the right \"\n \"features. {0} instead of {1}\"\n .format(df.columns, features))\n\n return", "def is_internal(self):\n return bool(self.is_reducing() and self.is_non_reducing())", "def is_indeed(self) -> bool:\n return self.mukluk > 5", "def IgnorePersistedDecision(self) -> bool:", "def is_inequality(self): \n return False", "def __bool__(self):\n return self.taxonomy.exists", "def is_person_identifier_used(person_id):\n try:\n conn = sqlite3.connect(settings.database_name)\n conn.row_factory = sqlite3.Row\n c = conn.cursor()\n c.execute(\"PRAGMA foreign_keys = ON\")\n c.execute(\"SELECT personid FROM person WHERE personid =?\", (person_id,))\n person_identifier = \"\"\n is_used = True\n for row in c:\n person_identifier = row[\"personid\"]\n conn.close()\n if len(person_identifier) == 0:\n is_used = False\n if len(person_identifier) > 0:\n is_used = True\n return is_used\n except:\n return False", "def is_trainable(self):\n return False", "def isDeleted(self):\n return self.air is None" ]
[ "0.661681", "0.6462838", "0.59257627", "0.5741564", "0.5628499", "0.55574435", "0.5537199", "0.5509296", "0.5507439", "0.5461328", "0.5405652", "0.53825015", "0.5370098", "0.5365358", "0.5328702", "0.53143424", "0.529039", "0.52698815", "0.52663475", "0.52634853", "0.52370775", "0.52231854", "0.5220413", "0.51524615", "0.5137677", "0.51331055", "0.5102486", "0.5101493", "0.5096956", "0.5080815", "0.50726473", "0.50654197", "0.50583327", "0.50484157", "0.5031694", "0.50285304", "0.50268906", "0.5021727", "0.50192124", "0.49972594", "0.49842697", "0.49818462", "0.49800837", "0.4974319", "0.49574834", "0.49545977", "0.49542335", "0.49349925", "0.4929173", "0.49284396", "0.49217662", "0.4920269", "0.49192423", "0.49140215", "0.49076545", "0.4907331", "0.49031", "0.48979303", "0.4897661", "0.48917526", "0.4886968", "0.48839128", "0.48584569", "0.48571783", "0.4854011", "0.48536837", "0.48520273", "0.4849794", "0.4846591", "0.4843272", "0.48382357", "0.48360077", "0.48343554", "0.48331633", "0.48315686", "0.48314548", "0.4828786", "0.48276383", "0.4825132", "0.48237842", "0.48231232", "0.48219508", "0.4821141", "0.48177683", "0.48146838", "0.48142496", "0.4813486", "0.48127973", "0.48126787", "0.481166", "0.4810733", "0.48083964", "0.48059034", "0.48043257", "0.4796356", "0.47944203", "0.47888148", "0.47876745", "0.47837427", "0.47829413" ]
0.63775516
2
Returns number of invoices from original dataset.
def get_invoice_count(self): return self._df_invoice_original.InvoiceNo.unique().shape[0]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _compute_no_of_invoices(self):\n for record in self:\n record.invoice_count = len(record.invoice_ids)", "def get_invl_count(self):\n return self._df_invoice_original.index.unique().shape[0]", "def invoices(self):\r\n return inv.Invoices(self)", "def get_customer_count(self):\n return self._df_invoice_original.CustomerID.unique().shape[0]", "def expired_invoices_count(self):\n return self.get_expired_invoices().count()", "def getNumOfInvoice(self,id,start,finish):\n self.calls += 1\n invoice = self.getResponse(self.buildParams(id,start,finish))\n if not self.isNumeric(invoice):\n middle = self.diveDates(start,finish)\n plusMiddle = middle + timedelta(days = 1)\n middle = self.removeHours(middle)\n plusMiddle = self.removeHours(plusMiddle)\n invoice = self.getNumOfInvoice(id,start,middle)+\\\n self.getNumOfInvoice(id,plusMiddle,finish)\n return invoice", "def invoices(self):\r\n return inv.AccountInvoices(self)", "def countAndGetCallInvoice(self,id,start,finish):\n self.calls = 0\n return self.getNumOfInvoice(id,start,finish)", "def get_num_of_sales_per_customer_ids():\n\n # your code", "def invoices_in_route(self):\n from invoicing.models import Invoice\n invoices = Invoice.objects.filter(\n route=self.number, print_date__range=(date.today() - timedelta(6), date.today()),\n canceled=False).count()\n return invoices", "def test_total_invoices(self):\n sale = SaleFactory(total_value=1000)\n InvoiceFactory(sale=sale, total_value=50)\n InvoiceFactory(sale=sale, total_value=500)\n self.assertEqual(sale.total_invoices, 550)", "def invoices(self):\r\n return Invoices(self)", "def test_total_invoices_in_cero(self):\n sale = SaleFactory(total_value=100)\n self.assertEqual(sale.total_invoices, 0)", "def _compute_count(self):\n for orders in self:\n orders.count = self.env['account.move'].search_count(\n [('invoice_origin', '=', self.name)])", "def getNoOfRows(self):\n return _patchExtractor.patchExtractor_getNoOfRows(self)", "def num_items(self):\n num_items = 0\n for line in self.lines.all():\n num_items += line.quantity\n return num_items", "def getNumRows(self) -> int:\n ...", "def getInvoice(self):\n return self.base.get(\"invoice\", [])", "def get_entity_contracts_count():\n url = 'http://www.base.gov.pt/base2/rest/contratos?adjudicatariaid=%d' \\\n '&sort(-id)' % entity.base_id\n\n response = requests.get(url, headers={'Range': 'items=0-24'})\n\n results_range = response.headers['content-range']\n _, count = results_range.split('/')\n\n return int(count)", "def row_count(data):\n return int(arcpy.GetCount_management(data).getOutput(0))", "def get_num_rows(self, data, omit_metric=False):\n if omit_metric:\n num_rows = int((len(data.keys())-1)/4)\n else:\n num_rows = int(len(data.keys())/4)\n if len(data.keys())%4 != 0:\n num_rows += 1\n return num_rows", "def invoices(self,org_id=None,invoice_id=''):\n if org_id is None:\n org_id = self.org_id\n return self.get('{}/orgs/{}/invoices/{}'.format(ApiVersion.A1.value,org_id,invoice_id))", "def get_invoices(self, limit=50, closed=False, get_all=False):\n mask = \"mask[invoiceTotalAmount, itemCount]\"\n _filter = {\n 'invoices': {\n 'createDate': {\n 'operation': 'orderBy',\n 'options': [{\n 'name': 'sort',\n 'value': ['DESC']\n }]\n },\n 'statusCode': {'operation': 'OPEN'},\n }\n }\n if closed:\n del _filter['invoices']['statusCode']\n\n return self.client.call('Account', 'getInvoices', mask=mask, filter=_filter, iter=get_all, limit=limit)", "def get_total_item_size(dataset):\n total_items = 0\n for element in dataset:\n total_items += 1\n return total_items", "def invoices(self):\n if self.__invoices_manager is None:\n self.__invoices_manager = TaxRetunsManager(\"/invoices\", self._client)\n return self.__invoices_manager", "def count_elements_in_dataset(dataset):\n return dataset.count()", "def get_invoices(self, since, until):\n return self._request('getInvoices', data={\n 'date_from': since.strftime('%d/%m/%Y'),\n 'date_to': until.strftime('%d/%m/%Y')\n })", "def count(self):\n return self.data_container.count", "def get_data_count(self, collection):\n # Use 'data_count' attribute when available. It is created in the\n # BaseCollectionViewSet class.\n return (\n collection.data_count\n if hasattr(collection, \"data_count\")\n else collection.data.count()\n )", "def data_count(self):\n return(len(self.data))", "def get_num_countries():\n num_countries = np.zeros(shape=(len(annual_files), 1))\n \n for year in annual_files:\n df = get_runners_data(year)\n country_count = df['countryCode'].value_counts()\n num_countries[annual_files.index(\n year)] = len(country_count.index)\n return num_countries", "def count_indications(self) -> int:\n return self._count_model(Indication)", "def count_data(self):\n try:\n ndata = len(self.x)\n logger.info(\"Number of data points: {0}\".format(ndata))\n except AttributeError:\n logger.error(\"Data object has not been defined\")\n ndata = 0\n return ndata", "def __len__(self):\n return int(self.total)", "def prepare_count_incidents(self, object):\n roles = object.actorrole_set.all()\n return Incident.objects.filter(actors_role__in=roles).count()", "def get_num_records(self):\n return self.__num_records", "def count_items(self):\n count = 0\n for o in self.order_lst:\n count += o.count()\n \n return count", "def get_medicines_count(visit):\r\n return visit.medicines.all().count()", "def summarize_invoice(line_items):\n sku_summary = AtlasClient.summarize_invoice_items_by_sku(line_items)\n total = 0\n for item in sku_summary:\n total += sku_summary[item]['totalPriceCents']\n\n return total", "def Count(self) -> int:", "def Count(self) -> int:", "def Count(self) -> int:", "def Count(self) -> int:", "def get_total_instruments(self):\n\n total = 0\n for exchange in self.exchanges:\n total += len(exchange.symbols)\n return total", "def gettingNumberVaccines(tx, query, personId):\n return tx.run(query, personId=personId).data()", "def total_exs(dataset):\n total = 0\n for article in dataset['data']:\n for para in article['paragraphs']:\n total += len(para['qas'])\n return total", "def get_data_ninstances(self):\n return self.data_ninstances", "def dataCount(self, collectionName):\n count = collectionName.find().count()\n return count", "def numnems(self):\n count = 0\n for o in self._objs.values():\n count += len(o.netifs())\n return count", "def get_number_of_persons(self):\n self.__load_persons_from_file_into_memory()\n return super().get_number_of_persons()", "def getNumRows(self):\n return self.__rows", "def count(self):\r\n return self.data_array.size", "def get_total_number_of_documents(self):\n return self.total_number_of_documents", "def employees_count(self, obj):\n return obj.employees_count()", "def get_contracts_count():\n url = 'http://www.base.gov.pt/base2/rest/contratos'\n\n response = requests.get(url, headers={'Range': 'items=0-24'})\n\n # should be \"items 0-%d/%d\", we want the second %d that represents the\n # total\n results_range = response.headers['content-range']\n _, count = results_range.split('/')\n\n return int(count)", "def __len__(self):\r\n if self.is_superset:\r\n length = 0\r\n for ds in self.data:\r\n length += len(ds)\r\n return length\r\n else:\r\n return len(self.data)", "def getRowCount(self) -> int:\n ...", "def getNumberOfReviews(self):\n try:\n count = 0\n with open(self.metadata_path, \"r\", newline='') as metadata:\n mdata = csv.reader(metadata, delimiter=' ', quotechar='|')\n for review_data in mdata:\n count += 1\n return count\n except Exception:\n print(\"Cant load metadata file\")\n traceback.print_exc()", "def get_count(self, denom: CashDenomination) -> int:\n if self.__open:\n raise RuntimeError(\"Cash drawer must be closed to count.\")\n return self.__contents[denom]", "def fit(stat, invoices):\n total_entries = sum(invoice.total_entries for invoice in invoices.values())\n total_number = sum(invoice.total_number for invoice in invoices.values())\n\n return stat[\"загальна кількість записів\"] == total_entries and stat[\"сума кількостей\"] == total_number", "def getTotalIndividualCount(self):\r\n return self._n", "def get_invoice(self):\n\n # Check if unclosed invoice for the client exists\n old_inv = connection.Kinko.find_one({'cl': self.cl, 'tid': None,\n 'typ': TYPE_MAP[self.tab_type]})\n\n inv_num = None\n # If it does, update its values and update packages\n if old_inv:\n old_inv.dt = datetime.datetime.today()\n old_inv.range.lt = self.q_dict[\"cs.sd\"].get(\"$lt\", None)\n old_inv.save()\n\n inv_num = old_inv.num\n\n else:\n #kinko dict to be updated in Kinko Collection.\n kdict = {\n \"amt\": 0.0,\n \"cl\": unicode(self.cl),\n \"dt\": datetime.datetime.today(),\n \"typ\": TYPE_MAP[self.tab_type],\n \"range\": {\"lt\": self.q_dict[\"cs.sd\"].get(\"$lt\", None),\n \"gt\": self.q_dict[\"cs.sd\"].get(\"$gte\", None),\n }\n }\n\n k = Kinko(kdict)\n\n k_count = 1\n\n #the get num method of Kinko model generates the unique no for new kinko\n k[\"num\"] = self.get_knum(1)\n while connection.Kinko.collection.find({\"num\": k.num}).count() > 0:\n k[\"num\"] = self.get_knum(k_count+1)\n k_count += k_count\n\n connection.Kinko(k).save()\n\n inv_num = k['num']\n\n if inv_num:\n #after creating a new document in Kinko all packages are updated.\n connection.Package.collection.update(self.q_dict, {'$set': {'inv.num': inv_num}}, safe=True, multi=True)\n \n #Aggrigation of remitted amount for requested client\n non_invoiced = kinko_map_reduce(inv_num, TYPE_MAP[self.tab_type])\n\n if len(non_invoiced) == 0:\n return False\n else:\n inv = connection.Kinko.find_one({'num': inv_num})\n if inv:\n inv.amt = non_invoiced[0]['value']['amt']\n inv.save()\n return inv\n else:\n return False\n else:\n return False", "def getNumData(self):\n return len(self.data)", "def _compute_total_invoices_amount(self):\n payment_currency = self.currency_id or self.register_id.journal_id.currency_id or self.register_id.journal_id.company_id.currency_id or self.env.user.company_id.currency_id\n if self.invoice_id.company_currency_id != payment_currency:\n total = self.invoice_id.company_currency_id.with_context(date=self.register_id.payment_date).compute(self.invoice_id.residual_company_signed, payment_currency)\n else:\n total = self.invoice_id.residual_company_signed\n return abs(total)", "def __len__(self):\n return sum(item['cantidad'] for item in self.carro.values())", "def count(self):\n\n raise NotImplementedError", "def get_total_line_counts(self):\n return get_total_line_counts(self.files.all())", "def get_order_detail_count(orderid): \n data = order_obj.get_order_detail(orderid,\"1\")\n return data", "def data_count(self):\r\n\r\n shp = self.df.shape\r\n row_count = shp[0]\r\n return row_count", "def ListInvoices(self, **kwargs):\n return self._stub.ListInvoices(ln.ListInvoiceRequest(**kwargs))", "def getDataSetCount(self):\n\t\treturn int(self.numberOfImages / self.slicesPerTimepoint)", "def pdelements_total_customers(self) -> int:\n return self.dss_obj.PDElementsI(ctypes.c_int32(5), ctypes.c_int32(0))", "def size(self):\n ret = 0\n for ii in self.__data:\n ret += int(ii.get_size())\n return ret", "def cpp_getInRowCount(self, patchNo, rowsInPatch):\n return _patchExtractor.patchExtractor_cpp_getInRowCount(self, patchNo, rowsInPatch)", "def get_table_nb_lines(self, table):\n sql = \"SELECT COUNT(*) FROM \" + table + \";\"\n cur = self._connection.cursor()\n cur.execute(sql)\n res = cur.fetchall()\n cur.close()\n return res[0][0]", "def totallines(self):\n return self._totallines", "def customer_acccounting(customer_orders):", "def get_all_orders_count(): \n data = order_obj.get_all_orders(\"1\")\n return data", "def document_count(self):\n raise NotImplementedError", "def count(self):\n\t\treturn sum(read.copy for read in self.__iter__())", "def _count(self):\n if self._count_valid:\n return self._total_results\n\n result = self._cb.get_object(self._doc_class.urlobject.format(self._cb.credentials.org_key))\n results = result.get(\"results\", [])\n\n self._total_results = len(results)\n self._count_valid = True\n\n return self._total_results", "def __len__(self):\n\n return int(self._rows)", "def count_data_items(fileids, train=True):\n sizes = 28000 if train else 22500\n return len(fileids) * sizes", "def size(self) -> int:\n return sum(ob.size for ob in self.objects.ravel())", "def get_inventory_count(self):\n resp = self.app.get('/inventories')\n self.assertEqual(resp.status_code, status.HTTP_200_OK)\n data = json.loads(resp.data)\n return len(data)", "def ingested_record_count(self) -> pulumi.Output[int]:\n return pulumi.get(self, \"ingested_record_count\")", "def numPostings(years):\n\tcount = []\n\tfor year in years:\n\t\tfilename = \"SmartEnergy\" +str(year) +\".xlsx\"\n\t\tDB = pd.read_excel(filename, sheet_name = 'Filters')\n\t\tcount.append(DB.iloc[10][1])\n\treturn count", "def office_count(self):\n return self.candidate_set.values(\"office_id\").distinct().count()", "def GetNumberOfElements(self, assoc):\n result = 0\n for dataset in self:\n result += dataset.GetNumberOfElements(assoc)\n return int(result)", "def number_of_employees(self) -> object:\n return self._number_of_employees", "def __len__(self): \r\n length = len(self.data) - 2* self.skip_window\r\n #print ('length', length)\r\n return length\r\n #raise NotImplementedError('Implement the __len__ method of the dataset')\r", "def len(self, table):\n return self.get_table_nb_lines(table)", "def test_data_source_soaps_id_dynamic_datas_count_get(self):\n pass", "def datacounts(self):\n return self._properties[\"datacounts\"]", "def _grand_total(self):\n count = 0\n for product in self.products:\n count += product.price\n return count", "def _get_as_dict_count(self):\n counter = Counter()\n for product in self.products:\n counter[product.id] += 1\n return counter", "def test_data_source_soaps_count_get(self):\n pass", "def getNrEntries(self):\n return len(self.data)", "def __len__(self):\n return sum(item[\"quantity\"] for item in self.carro.values())", "def price_count(self):\n return self.price_set.count()" ]
[ "0.7744239", "0.73818606", "0.63354856", "0.6231307", "0.62079424", "0.6154787", "0.6135494", "0.6101816", "0.606694", "0.6054052", "0.60302943", "0.5938585", "0.5850497", "0.58089167", "0.57880515", "0.56934583", "0.568253", "0.56357646", "0.55606055", "0.55459076", "0.54968005", "0.5485106", "0.5460834", "0.54239196", "0.5421888", "0.54191077", "0.5399579", "0.5378061", "0.5360827", "0.53525543", "0.5338459", "0.5330316", "0.5321197", "0.5318589", "0.5308966", "0.530786", "0.5287273", "0.5272226", "0.5268587", "0.52672607", "0.52672607", "0.52672607", "0.52672607", "0.5266757", "0.5259575", "0.5243879", "0.5241024", "0.52335805", "0.52264345", "0.5222952", "0.5210757", "0.52049506", "0.5203487", "0.5190994", "0.51863927", "0.518001", "0.5174888", "0.5173043", "0.5160422", "0.5160228", "0.5149745", "0.51456", "0.51399404", "0.51374507", "0.51307994", "0.51275206", "0.5119661", "0.5118743", "0.5116079", "0.5110573", "0.51034147", "0.51033324", "0.5101613", "0.5095857", "0.50953174", "0.50916654", "0.50865495", "0.50830024", "0.50828624", "0.50781906", "0.5077424", "0.50771844", "0.5074266", "0.5065841", "0.5055209", "0.5051975", "0.50486887", "0.5045918", "0.50433725", "0.50429696", "0.50414944", "0.50311434", "0.50276893", "0.5025031", "0.5021916", "0.5020568", "0.5020076", "0.50197095", "0.5019685", "0.50187796" ]
0.7667302
1
Returns number of customers from original dataset.
def get_customer_count(self): return self._df_invoice_original.CustomerID.unique().shape[0]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_num_of_sales_per_customer_ids():\n\n # your code", "def get_all_customers_count():\n data = user_obj.get_all_customers(\"1\")\n return data", "def customer_acccounting(customer_orders):", "def pdelements_num_customers(self) -> int:\n return self.dss_obj.PDElementsI(ctypes.c_int32(4), ctypes.c_int32(0))", "def pdelements_total_customers(self) -> int:\n return self.dss_obj.PDElementsI(ctypes.c_int32(5), ctypes.c_int32(0))", "def get_customer_orders_count(customerId):\n data = user_obj.get_customer_orders(customerId,\"1\")\n return data", "def bus_total_customers(self) -> int:\n return self.dss_obj.BUSI(4, 0)", "def get_total_trans(all_customers_data, trans_column):\n return all_customers_data.select(trans_column).distinct().count()", "def list_active_customers():\n try:\n active_customer_count = 0\n for _ in Customer.select().where(Customer.status == 'Active'):\n active_customer_count += 1\n logger.info(\n f\"Successfully counted active customers {active_customer_count}\"\n )\n return active_customer_count\n except Exception as unknown_error:\n logger.error(f\"Error. Failed to count customers. {unknown_error}\")\n print(\n f'Error. Not able to count number of active customers.'\n ' {unknown_error}'\n )", "def list_active_customers():\n try:\n active_customer_count = 0\n for _ in Customer.select().where(Customer.status == 'Active'):\n active_customer_count += 1\n return active_customer_count\n except Exception as unknown_error:\n print(f'Error. Not able to count number of active customers. {unknown_error}')", "def get_invoice_count(self):\n return self._df_invoice_original.InvoiceNo.unique().shape[0]", "def list_active_customers():\n db_customers = Customers.select()\n LOGGER.debug(\"Calculating number of active customers\")\n # Technically used this in Lesson 03, but it is a comprehension. Another method added below.\n number_active = sum([int(x.status) for x in db_customers])\n LOGGER.info(\"There are %d active customers\", number_active)\n\n return number_active", "def bus_interruptions_total_customers(self) -> float:\n return self.dss_obj.BUSF(9, 0)", "def bottom_twenty_customers(data_frame):\n data_frame.loc[:, 'total_payment'] = (data_frame['unit_rental_price']\n * data_frame['quantity_rented']\n * data_frame['rental_period_months'])\n data_set = data_frame.groupby(['customer_id']).agg({'total_payment': 'sum'})\n data_set = data_set.nsmallest(20, 'total_payment')\n return data_set", "def cluster_obs_count(self):\n return(self.merged_data.groupby(\n 'labels').count().transpose().iloc[0, :])", "def count_elements_in_dataset(dataset):\n return dataset.count()", "def get_invl_count(self):\n return self._df_invoice_original.index.unique().shape[0]", "def top_ten_customers(data_frame):\n data_frame.loc[:, 'total_payment'] = (data_frame['unit_rental_price']\n * data_frame['quantity_rented']\n * data_frame['rental_period_months'])\n data_set = data_frame.groupby(['customer_id']).agg({'total_payment': 'sum'})\n data_set = data_set.nlargest(10, 'total_payment')\n return data_set", "def data_center_count(self) -> int:\n return pulumi.get(self, \"data_center_count\")", "def list_active_customers():\n active_customers = 0\n for customer in cm.Customers:\n if customer.status == \"Active \":\n active_customers += 1\n return active_customers", "def all_client_number():\n\n url = CMX_URL + '/api/location/v2/clients/count'\n header = {'content-type': 'application/json', 'accept': 'application/json'}\n response = requests.get(url, headers=header, auth=CMX_AUTH, verify=False)\n response_json = response.json()\n clients_number = response_json['count']\n return clients_number", "def count_target_class_data(data, target_class):\n count = 0\n for row in data:\n if row[0] == target_class:\n count += 1\n\n return count", "def data_count(self):\r\n\r\n shp = self.df.shape\r\n row_count = shp[0]\r\n return row_count", "def get_num_countries():\n num_countries = np.zeros(shape=(len(annual_files), 1))\n \n for year in annual_files:\n df = get_runners_data(year)\n country_count = df['countryCode'].value_counts()\n num_countries[annual_files.index(\n year)] = len(country_count.index)\n return num_countries", "def get_data_count(self, collection):\n # Use 'data_count' attribute when available. It is created in the\n # BaseCollectionViewSet class.\n return (\n collection.data_count\n if hasattr(collection, \"data_count\")\n else collection.data.count()\n )", "def list_active_customers():\n with database.transaction():\n query = (Customer\n .select(fn.COUNT(Customer.status).alias('count'))\n .where(Customer.status == 'Active'))\n LOGGER.info(query)\n\n customer_count = [item.count for item in query]\n LOGGER.info('Number of active customers: %s', customer_count[0])\n\n return customer_count[0]", "def list_active_customers():\n count_query = (Customer\n .select(Customer, fn.COUNT(Customer.name)\n .alias('cust_count'))\n .where(Customer.status == 'active'))\n for count in count_query:\n return count.cust_count", "def carn_count(self):\n return len(self.carnivores)", "def coauthor_count(self):\n return self._json.get('coauthor-count', '0')", "def get_count(cls):\n total = 0\n for counter in SimpleCounterShard.objects.all():\n total += counter.count\n return total", "def get_all_customer_ids():\n table = data_manager.get_table_from_file(\"sales/sales.csv\")\n return get_all_customer_ids_from_table(table)", "def count(self):\n return self.data_container.count", "def row_count(data):\n return int(arcpy.GetCount_management(data).getOutput(0))", "def count_data(self):\n try:\n ndata = len(self.x)\n logger.info(\"Number of data points: {0}\".format(ndata))\n except AttributeError:\n logger.error(\"Data object has not been defined\")\n ndata = 0\n return ndata", "def createCustomerID(self):\n\n customerID = self._df_invoice_original.CustomerID.max()\n customerID += 1\n return int(customerID)", "def count_codon(self, codon):\n return sum([1 for c in self if c == codon])", "def list_active_customers():\n customer_active = Customer.select().where(Customer.status == 'Active')\n print('{} Active Customers'.format(len(customer_active)))\n return len(customer_active)", "def dataCountBy(self, collectionName, catagory, data):\n count = collectionName.find({catagory: data}).count()\n return count", "def data_count(self):\n return(len(self.data))", "def getDataCount(self, filter: t.Mapping[t.Text, t.Any] = {}\n ) -> DatasetCount:\n aggregate_data = self.getAggregateData(\n pipeline={\"count\": {\"$sum\": 1}},\n filter=filter,\n )\n count = first(aggregate_data.data)[\"count\"]\n return DatasetCount(count=count)", "def count_codon_all(self):\n return Counter(list(self))", "def _count_parties(data_set): #DEMOCRATS, THEN REPUBLICANS\r\n reps = 0\r\n dems = 0\r\n for data_point in data_set:\r\n if data_point.dat_party == \"R\": reps+=1\r\n if data_point.dat_party == \"D\": dems+=1\r\n\r\n return (dems, reps)", "def getNumberOfReviews(self):\n try:\n count = 0\n with open(self.metadata_path, \"r\", newline='') as metadata:\n mdata = csv.reader(metadata, delimiter=' ', quotechar='|')\n for review_data in mdata:\n count += 1\n return count\n except Exception:\n print(\"Cant load metadata file\")\n traceback.print_exc()", "def n_points(self):\n\n if self.data_reduced:\n return len(self.data_reduced[0])\n else:\n return 0", "def get_count(username):\n return get_contributor(username)[\"count\"]", "def get_all_customers():\n data = user_obj.get_all_customers()\n return data", "def features_customers(df_customers):\n for i in PREMIER_VALS:\n k = 'premier_' + str(i)\n df_customers[k] = np.where(df_customers['premier'] == i, 1, 0)\n\n df_customers['age'] = datetime.now().date().year - df_customers['yearOfBirth']\n df_customers['male'] = np.where(df_customers['gender'] == 'M', 1, 0)\n df_customers['female'] = np.where(df_customers['gender'] == 'F', 1, 0)\n df_customers['days_in_asos'] = (datetime.now().date() - df_customers['dateCreated']).dt.days\n\n logger.info(\"Features from the customers table: {shape} {dtypes}\"\n .format(shape=df_customers.shape, dtypes=df_customers.dtypes))\n return df_customers", "def dataCount(self, collectionName):\n count = collectionName.find().count()\n return count", "def top_five_customers(data_frame):\n data_set = data_frame.groupby(['customer_id']).agg({'quantity_rented': 'sum'})\n data_set = data_set.nlargest(5, 'quantity_rented')\n return data_set", "def Count(self) -> int:", "def Count(self) -> int:", "def Count(self) -> int:", "def Count(self) -> int:", "def getCustomersInfo(self):\n rval = None\n tries = 0\n while not rval and tries < 5:\n if tries > 0:\n time.sleep(30)\n rval = self._getService(self.ns_customer,\n self.soapCustomer % (self.ns_customer, \"\",\n \"100\", \"Advertiser\"),\n \"GetCustomersInfo\", '', '',\n self.cwsdl, self.chost)\n tries += 1\n return rval", "def reducer(self, customer, visit_counts):\n total =0\n total = sum(i for i in visit_counts)\n yield customer,total", "def get_all_customer_ids():\n\n # your code", "def test_data_when_import_customer_with_data(self):\n\n customer = self.customers[0]\n self.assertEqual(\"Jimena\", customer.get_first_name())\n self.assertEqual(\"Sanabria\", customer.get_last_name())\n self.assertEqual(\"21-08-1980\", customer.get_date_of_birth())\n self.assertEqual([\"Nueva Granada #1837\"], customer.get_addresses())\n self.assertEqual([4244270,70759942], customer.get_phones())\n self.assertEqual(\"[email protected]\", customer.get_email())\n self.assertEqual(\"Gold\", customer.get_membership())\n self.assertEqual(\"Active\", customer.get_status())", "def number_of_crew(self):\n return self._number_of_crew", "def count(self):\r\n return self.data_array.size", "def get_total_item_size(dataset):\n total_items = 0\n for element in dataset:\n total_items += 1\n return total_items", "def get_customer_info(self, customer_id):\n #requested_customer_RDD = self.sc.parallelize(movie_ids).map(lambda x: (user_id, x))\n # Get predicted ratings\n customers = self.__get_customers(customer_id).collect()\n\n return customers", "def get_num_countries(self):\n return len(self.countries)", "def import_customers(input_data):\n error_count = 0\n insert_count = 0\n LOGGER.info('Starting Customer import')\n for onecust in input_data:\n try:\n Customer(onecust['user_id'], onecust['first_name'], onecust['last_name'],\n onecust['address'], onecust['phone_number'], onecust['email'])\\\n .save(full_clean=True, force_insert=True)\n insert_count += 1\n except ValidationError as valerror:\n LOGGER.exception(\"Error importing data from csv: %s \", valerror.message)\n error_count += 1\n except (OperationError, DuplicateKeyError) as operror:\n LOGGER.exception(\"Error importing data from csv: %s \", operror)\n error_count += 1\n\n return insert_count, error_count", "def getNumRows(self) -> int:\n ...", "def count():", "def test_get_customer_list(self):\n customer = self._create_customers(\"Alex\")\n customer.create()\n customer = self._create_customers(\"Sally\")\n customer.create()\n customer = self._create_customers(\"John\")\n customer.create()\n resp = self.app.get(\"/customers\")\n self.assertEqual(resp.status_code, status.HTTP_200_OK)\n data = resp.get_json()\n self.assertEqual(len(data), 3)", "def _compute_count(self):\n for orders in self:\n orders.count = self.env['account.move'].search_count(\n [('invoice_origin', '=', self.name)])", "def test_addsNewCustomersFromAPIs(self):\n DataManagerUnitTest.dm.reload()\n result = DataManagerUnitTest.dm.onlineStoreDatabase.getCustomers()\n customers = set([key['user']['email'] for key in DataManagerUnitTest.dm.getAllOrders()])\n self.assertEqual(len(customers), len(result))", "def count_records(batches: List[Batch]) -> int:\n return sum(b.current_size for b in batches)", "def _compute_no_of_invoices(self):\n for record in self:\n record.invoice_count = len(record.invoice_ids)", "def number_of_trips(filename): \r\n \r\n with open(filename, 'r') as f_in:\r\n # set up csv reader object\r\n trip_reader = csv.DictReader(f_in)\r\n \r\n # initialize count variables\r\n n_subscribers = 0\r\n n_customers = 0\r\n \r\n # tally up ride types\r\n for row in trip_reader:\r\n if row['user_type'] == 'Subscriber':\r\n n_subscribers += 1\r\n else:\r\n n_customers += 1\r\n \r\n # compute total number of rides\r\n n_total = n_subscribers + n_customers\r\n \r\n # return tallies as a tuple\r\n return(n_subscribers, n_customers, n_total)", "def N(self):\n return len(self.cavity_grid.cavities) + 1", "def count_subscribers(self):\n return self.request(\"count:Contact\", [ None ])", "def get_seated_customers(restaurant: Restaurant):\n customers = Reservation.query.filter(Reservation.restaurant == restaurant).filter(Reservation.status == ReservationState.SEATED).all()\n \n return reduce(lambda acc,rsv: acc + rsv.seats, customers, 0)", "def num_carns(self):\n return self._num_carns", "def customers(self):\r\n return customers.Customers(self)", "def count_comentions_csv(csv_reader):\n header = next(csv_reader)\n index_of = {col: index for index, col in enumerate(header)}\n comention_counter = Counter()\n for line in csv_reader:\n body = line[index_of['body']]\n for isbn1, isbn2 in get_comentions(body):\n comention_counter[(isbn1, isbn2)] += 1\n return comention_counter", "def customers(self):\r\n return Customers(self)", "def total_demand(self) -> float:\n return self.inputs.num_customers * self.operations.m3_per_customer()", "def _get_cus_info(self):\n label_enc = LabelEncoder()\n customer_info = self._inv.drop_duplicates(['customer_code'], keep='last')\n customer_info = customer_info[['customer_code', 'customer_name', 'sales_cen_code',\n 'sales_cen_name', 'sales_region_name', 'province',\n 'city', 'district', 'customer_type', 'is_usable', 'channel_level']]\n customer_info['customer_id'] = label_enc.fit_transform(customer_info['customer_code'])\n customer_info['sales_cen_id'] = label_enc.fit_transform(customer_info['sales_cen_code'])\n customer_info['sales_region_id'] = label_enc.fit_transform(customer_info['sales_region_name'])\n customer_info['province_id'] = label_enc.fit_transform(customer_info['province'])\n customer_info['city_id'] = label_enc.fit_transform(customer_info['city'])\n customer_info['district_id'] = label_enc.fit_transform(customer_info['district'])\n customer_info['customer_type'] = label_enc.fit_transform(customer_info['customer_type'])\n customer_info['is_usable'] = label_enc.fit_transform(customer_info['is_usable'])\n customer_info['channel_level'] = label_enc.fit_transform(customer_info['channel_level'])\n customer_info_encoded = customer_info.drop(\n columns=['customer_name', 'sales_cen_code', 'sales_cen_name',\n 'sales_region_name', 'province', 'city', 'district']\n ).set_index('customer_code')\n customer_info.set_index('customer_code', inplace=True)\n customer_info_encoded = customer_info_encoded.reindex(self._index.get_level_values(0))\n return customer_info, customer_info_encoded", "def getNoOfRows(self):\n return _patchExtractor.patchExtractor_getNoOfRows(self)", "def citation_count(self):\n return self._json['coredata'].get('citation-count', '0')", "def get_num_datasets(self, data):\n dsets = set()\n for items in data:\n dsetid = items[3]\n dsets.add(dsetid)\n return len(dsets)", "def generate_customer_info(self):\n consecutive = check_consecutive(self.customerIds)\n print(\"consecutive\" + str(consecutive))\n if consecutive:\n for customer_id in self.customerIds:\n # next we need to 1) decide if the customer has insurance, and if yes, generate the EOB that gets sent to the customer\n insured = np.random.choice(\n self.distributions[\"insurance_status\"],\n 1,\n p=self.distributions[\"insurance_distribution\"],\n )[0]\n dob = self.dobs[customer_id - 1]\n customer = pd.DataFrame(\n {\n \"customer_id\": [customer_id],\n \"dob\": str(dob),\n \"insurance\": insured,\n \"experiment_id\": random.randint(1, 2),\n }\n )\n self.Customers = self.Customers.append(customer)\n else:\n print(\"Error generating customer info: customerIds aren't consecutive\")\n return True", "def test_get_customers(self):\n get_customers_url = reverse(\"customer_list\")\n response = self.client.get(get_customers_url)\n\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n\n # get data from db\n customers = Customer.objects.all()\n serializer = CustomerSerializer(customers, many=True)\n self.assertEqual(response.data, serializer.data)\n\n self.assertEqual(len(response.data), 4)", "def get_entity_contracts_count():\n url = 'http://www.base.gov.pt/base2/rest/contratos?adjudicatariaid=%d' \\\n '&sort(-id)' % entity.base_id\n\n response = requests.get(url, headers={'Range': 'items=0-24'})\n\n results_range = response.headers['content-range']\n _, count = results_range.split('/')\n\n return int(count)", "def customer_id(uncapped_flatprice, uncapped_flatprice_finalizer, team_multisig) -> int:\n customer_id = int(uuid.uuid4().hex, 16) # Customer ids are 128-bit UUID v4\n return customer_id", "def get_listCustomer_out_sample(self, customerCount=10):\n \n if customerCount is None :\n listCustomer= list(self._df_invoice_line_out_sample.CustomerID.unique())\n else:\n if customerCount <= 0 :\n listCustomer \\\n = list(self._df_invoice_line_out_sample.CustomerID.unique())\n else:\n listCustomer \\\n = list(self._df_invoice_line_out_sample.CustomerID.unique()[:customerCount])\n return listCustomer", "def prepare_count_incidents(self, object):\n roles = object.actorrole_set.all()\n return Incident.objects.filter(actors_role__in=roles).count()", "def count_children(self, element_name, customers):\n count = 0\n childlist = [customer for customer in customers if customer.get('parent') == element_name]\n count = len(childlist)\n # Call the function recursively for each child in the list (Customer group childs and descendents)\n for child in childlist:\n count = count + CustomerGroupsPage.count_children(self, child.get('name'), customers)\n return count", "def getCustomer(self):\n return self.base.get(\"customer\", [])", "def count(self):\n return self._reduce_for_stat_function(F.count, only_numeric=False)", "def list_active_customer():\n active_customer = Customer.select().where(Customer.is_active).count()\n LOGGER.info('Number of active customers retrieved.')\n return active_customer", "def get_num_records(self):\n return self.__num_records", "def get_sum_of_sales_per_customer_from_table(table):\n summed_sales_per_customer = {}\n for customer in {line[CUSTOMER_ID] for line in table}:\n sum_of_sales = common.szum_list([line[PRICE] for line in table if line[CUSTOMER_ID] == customer])\n summed_sales_per_customer[customer] = sum_of_sales\n return summed_sales_per_customer", "def count(self):\n\n if self.cluster:\n return self.cluster.count()\n\n return super().count()", "def get_country_count():\n numbers=country_populations.split('\\n')\n count_numbers= len(numbers)-1\n return count_numbers", "def cluster_count(self) -> int:\n cluster_count = max(1, round(16**3 * (self.vein.purity / 100.0) / self.cluster_size))\n return self.distribution.scale_cluster_count(cluster_count)", "def getTotalIndividualCount(self):\r\n return self._n", "def get_total_dim(client_template):\n return sum(np.prod(x.shape) for x in client_template)" ]
[ "0.7528541", "0.741451", "0.7031075", "0.6915427", "0.68836576", "0.6863681", "0.6667784", "0.616603", "0.61287034", "0.6101881", "0.59332806", "0.5910718", "0.5855194", "0.58472735", "0.58410436", "0.5774803", "0.57345396", "0.57238173", "0.572342", "0.5717885", "0.56658965", "0.56646013", "0.56542027", "0.56331044", "0.55945414", "0.5580822", "0.5579558", "0.55575264", "0.55567914", "0.5548149", "0.5531757", "0.5524814", "0.5483556", "0.5476428", "0.5473485", "0.5468041", "0.5449823", "0.5448093", "0.54428476", "0.5432522", "0.5418882", "0.54042727", "0.53786004", "0.5363866", "0.53576905", "0.5340247", "0.53341746", "0.5333146", "0.53305423", "0.5326809", "0.5326809", "0.5326809", "0.5326809", "0.5320343", "0.53171074", "0.53091294", "0.5303633", "0.5302074", "0.5300916", "0.52999884", "0.52960175", "0.52945805", "0.52919424", "0.5277712", "0.5276107", "0.5271022", "0.5263932", "0.5252489", "0.52481586", "0.5246491", "0.52381486", "0.5234534", "0.5220975", "0.52105397", "0.5204587", "0.51881856", "0.51857144", "0.5181162", "0.51715326", "0.51567143", "0.5147119", "0.5145199", "0.5139283", "0.5136319", "0.51305956", "0.51303196", "0.5127429", "0.51229084", "0.51198864", "0.51190037", "0.5118167", "0.5115953", "0.51130235", "0.51099783", "0.51086617", "0.5103266", "0.51019204", "0.50885737", "0.50816697", "0.5076146" ]
0.8029719
0
Returns number of invoice lines (number of rows) from original dataset.
def get_invl_count(self): return self._df_invoice_original.index.unique().shape[0]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_invoice_count(self):\n return self._df_invoice_original.InvoiceNo.unique().shape[0]", "def getNumRows(self) -> int:\n ...", "def _compute_no_of_invoices(self):\n for record in self:\n record.invoice_count = len(record.invoice_ids)", "def getNoOfRows(self):\n return _patchExtractor.patchExtractor_getNoOfRows(self)", "def num_items(self):\n num_items = 0\n for line in self.lines.all():\n num_items += line.quantity\n return num_items", "def row_count(data):\n return int(arcpy.GetCount_management(data).getOutput(0))", "def __len__(self):\n nlines = self.get_endline() - self.get_startline() + 1\n if nlines < 0:\n nlines = 0\n return nlines", "def getNumRows(self):\n return self.__rows", "def get_table_nb_lines(self, table):\n sql = \"SELECT COUNT(*) FROM \" + table + \";\"\n cur = self._connection.cursor()\n cur.execute(sql)\n res = cur.fetchall()\n cur.close()\n return res[0][0]", "def get_line_count(blob):\n return len(blob.split('\\n'))", "def getRowCount(self) -> int:\n ...", "def n_lines(self):\n try: \n return self._n_lines\n except AttributeError:\n self._n_lines = len(self.lines())\n return self._n_lines", "def len(self, table):\n return self.get_table_nb_lines(table)", "def get_num_rows(self, data, omit_metric=False):\n if omit_metric:\n num_rows = int((len(data.keys())-1)/4)\n else:\n num_rows = int(len(data.keys())/4)\n if len(data.keys())%4 != 0:\n num_rows += 1\n return num_rows", "def get_customer_count(self):\n return self._df_invoice_original.CustomerID.unique().shape[0]", "def get_total_line_counts(self):\n return get_total_line_counts(self.files.all())", "def getNbRows(self):\n return self.data.shape[1]", "def num_lines(self, snapshot: Bug, filepath: str) -> int:\n return len(self._line_offsets(snapshot, filepath))", "def data_count(self):\r\n\r\n shp = self.df.shape\r\n row_count = shp[0]\r\n return row_count", "def cpp_getInRowCount(self, patchNo, rowsInPatch):\n return _patchExtractor.patchExtractor_cpp_getInRowCount(self, patchNo, rowsInPatch)", "def get_row_number(self):\n return int(len(self.data_items)/12)", "def get_rows(self) -> int:\r\n return 1 + self.display.get_rows() + 1", "def NumberOfRows(self):\n return _table.DSTable_NumberOfRows(self)", "def data_len(self):\n Nrows_data = 0\n with self._compression_safe_file_opener(self.input_fname, \"r\") as f:\n for i, l in enumerate(f):\n if (l[0 : len(self.header_char)] != self.header_char) and (l != \"\\n\"):\n Nrows_data += 1\n return Nrows_data", "def _get_line_no_(obj, line):\n \n iNo = 0\n for item in obj.order_line:\n iNo += 1\n if (item.id == line.id):\n break\n \n return iNo", "def countLines(file_name, start, end):\r\n\r\n with open(file_name, \"r\") as file:\r\n counter_lines = 0\r\n\r\n for line in islice(file, start, end):\r\n counter_lines += 1\r\n\r\n return counter_lines", "def linecounter(x):\n return linecount(x) + longlines(x)", "def get_number_lines(running_reward_file, running_loss_file, action_count_file):\n if Path(running_reward_file).exists():\n data = np.loadtxt(running_reward_file).reshape(-1,2)\n return data.shape[0]\n if Path(running_loss_file).exists():\n data = np.loadtxt(running_loss_file).reshape(-1,2)\n return data.shape[0]\n if Path(action_count_file).exists():\n data = np.loadtxt(action_count_file).reshape(-1,2)\n return data.shape[0]\n raise NameError(\"No files to count lines\")", "def linecount(x):\n return sum(1 for char in x if char == \"\\n\")", "def num_data_lines(filepath):\n\tif not file_exists(filepath):\n\t\treturn -1\n\tcount = 0\n\twith open(filepath, 'r') as f:\n\t\twhile read_floats(f):\n\t\t\tcount += 1\n\tf.close()\n\treturn count", "def get_nrows(self):\n return self.nrows", "def get_nrows(self):\n return self.nrows", "def _get_num_lines_from_csv(self, filename):\n\n _file = open(get_full_path(filename))\n _reader = csv.reader(_file)\n\n return len(list(_reader))", "def num_rows(self) -> str:\n return pulumi.get(self, \"num_rows\")", "def n_rows(self) -> int:\n\n return len(self.plaincolumns[0].values)", "def count_lines(file_uri):\n\n with open(file_uri) as file_obj:\n for i, line in enumerate(file_obj):\n pass\n num_lines = i + 1\n return num_lines", "def get_linecount(self):\n self._update_linetab(len(self.input))\n lcount = len(self.__linepos)\n return lcount - (self.input.endswith('\\n'))", "def count_lines(file_obj):\n for idx, line in enumerate(file_obj):\n pass\n file_obj.seek(0)\n return idx + 1", "def nrows(self):\n return len(self.__data)", "def number_of_rows(self):\n if self.number_of_columns():\n return self._number_of_rows\n else:\n return 0", "def get_total_item_size(dataset):\n total_items = 0\n for element in dataset:\n total_items += 1\n return total_items", "def _chunklines(self):\r\n text = self.textwnd.toPlainText()\r\n lines_in_chunk = len(text.split(\"\\n\"))\r\n logger.debug(\"Lines in chunk: {}\".format(lines_in_chunk))\r\n return lines_in_chunk", "def _get_line_number(vcf):\n with open(vcf) as vcf_input_file:\n i = -1\n for line in vcf_input_file:\n i += 1\n return i", "def rows_count(self):\n return len(self.value)", "def __len__(self):\n\n return int(self._rows)", "def no_of_lines():\n number_of_lines = len(open(FILE_NAME).readlines())\n return number_of_lines", "def calculate_line_number(text):\n return len([line for line in text.split(\"\\n\") if line.strip() != \"\"])", "def n(self):\n return nrow(self._array)", "def row_count(self) -> int:\n return len(self.rows)", "def row_count(self) -> int:\n return len(self.rows)", "def _calc_nrows(self, len_data, target_pop):\n return target_pop - len_data", "def read_num_lines(data_socket):\r\n size_bytes = b''\r\n for i in range(0, 4):\r\n size_bytes += next_byte(data_socket)\r\n return int.from_bytes(size_bytes, 'big')", "def num_rows(self):\n return (len(self.rows))", "def number_of_lines(filename=\"\"):\n with open(filename, encoding='UTF8') as a_file:\n\n lineNum = 0\n\n for eachLine in a_file:\n lineNum += 1\n return lineNum", "def GetNumRows(self):\n return _hypre.HypreParMatrix_GetNumRows(self)", "def row_count(self):\n return self.well_count // self.col_count", "def get_nrows(self):\n return self._nrows", "def num_rows(self):\n return len(self.rows)", "def get_num_records(self):\n return self.__num_records", "def rowCount(self, parent):\r\n return len(self.arraydata)", "def get_number_rows(si_settings, ship_height, alien_height):\r\n available_space_y = (si_settings.screen_height - alien_height - ship_height)/2\r\n number_rows = int(available_space_y / (2 * alien_height))\r\n return number_rows", "def num_lines(file_name):\n with open(file_name) as file:\n for i, line in enumerate(file):\n pass\n return i + 1", "def count_elements_in_dataset(dataset):\n return dataset.count()", "def data_count(self):\n return(len(self.data))", "def num_rows(self):\n return len(self[0])", "def _calc_nrows(self, len_data, growth_rate):\n return int(round(len_data * growth_rate))", "def __len__(self):\n return self._nrows", "def get_line_nr(view, point):\n return view.rowcol(point)[0] + 1", "def lineNumber(self):\n if self.__lineNumber is None:\n self.__lineNumber = self.__source.count(\"\\n\", 0, self.__offset) + 1\n\n return self.__lineNumber", "def _getNewCodeLength(self):\n nb_lines = 0\n for line in self.body.splitlines():\n if not line.startswith(\"-\"):\n nb_lines += 1\n return nb_lines", "def row_counter(self) -> int:\n return self.writer.row_counter", "def num_cells_for_rows(self, rows):\r\n return (rows * rows + rows) // 2", "def totallines(self):\n return self._totallines", "def count_lines(filename):\r\n with open(filename, 'rb') as f:\r\n return sum(1 for line in f)", "def nrows(self):\n return self.__nrows", "def rowCount(self, index):\n return len(self._data)", "def get_num_rows(self):\n return self._num_rows", "def row_count(self):\n return self.__row_count", "def test_line_count(self):\n\t\tself.assertEqual(analyse_text(self.filename)[0], 4)", "def nrows(self):\n if self.ncolumns() == 0:\n return 0\n nrows = self.table_column(0).nrows()\n for i in range(1, self.ncolumns()):\n nrows = min(self.table_column(i).nrows(), nrows)\n return nrows", "def rowcount(self):\n return self._count", "def number_of_lines(filename=\"\"):\n\n number_lines = 0\n with open(filename) as file_opened:\n for line in file_opened:\n number_lines += 1\n return number_lines", "def get_num_of_sales_per_customer_ids():\n\n # your code", "def count_lines(filename):\n with open(filename, 'rb') as f:\n return sum(1 for line in f)", "def get_rows_num(file_name):\n # By default add the first datetime\n row_num = 1\n with open(file_name, \"r\") as file:\n for line in file:\n if line == \"\\n\":\n break\n else:\n row_num += 1\n\n return row_num", "def count_lines(stream):\n return len(stream.readlines())", "def rowcount(self):\n\t\treturn self._count", "def num_rows(self):\n if self._is_vertex_frame():\n return self.__graph__.summary()['num_vertices']\n elif self._is_edge_frame():\n return self.__graph__.summary()['num_edges']", "def total_rows(self):\n self._fetch_if_needed()\n # reduce case, count number of lines\n if self._total_rows is None:\n return self.count()\n return self._total_rows", "def checkEachLineCount(mat):\n n = sum(mat[0])\n \n assert all(sum(line) == n for line in mat[1:]), \"Line count != %d (n value).\" % n\n return n", "def number_of_lines(filename=\"\"):\n with open(filename, encoding='utf-8') as myFile:\n return sum([1 for line in myFile])", "def get_roi_line_len(self):\n return len(self.line_list)", "def getNumOfInvoice(self,id,start,finish):\n self.calls += 1\n invoice = self.getResponse(self.buildParams(id,start,finish))\n if not self.isNumeric(invoice):\n middle = self.diveDates(start,finish)\n plusMiddle = middle + timedelta(days = 1)\n middle = self.removeHours(middle)\n plusMiddle = self.removeHours(plusMiddle)\n invoice = self.getNumOfInvoice(id,start,middle)+\\\n self.getNumOfInvoice(id,plusMiddle,finish)\n return invoice", "def checkEachLineCount(mat):\n n = sum(mat[0])\n\n assert all(sum(line) == n for line in mat[1:]), \"Line count != %d (n value).\" % n\n return n", "def num_lineages(self, t):\n return self._ll_tree.get_num_lineages(t)", "def leafNumberOfRows(self):\r\n\r\n shape = self.data_source.shape\r\n if shape == None:\r\n # Node is not a Leaf or there was problems getting the shape\r\n nrows = 0\r\n elif shape == ():\r\n # Node is a rank 0 array (e.g. numpy.array(5))\r\n nrows = 1\r\n elif isinstance(self.data_source, tables.EArray):\r\n # Warning: the number of rows of an EArray, ea, can be different\r\n # from the number of rows of the numpy array ea.read()\r\n nrows = self.data_source.shape[0]\r\n else:\r\n nrows = self.data_source.nrows\r\n\r\n return numpy.array(nrows, dtype=numpy.int64)", "def row_count(self):\n return self._row_count", "def nrows(filename):\n with fopen(filename, 'rb') as fdsc:\n bufgen = takewhile(lambda x: x, (fdsc.read(1024*1024) for _ in repeat(None)))\n return sum([buf.count(b'\\n') for buf in bufgen])", "def rows(self):\n return self.prov[PROV_ROWS]", "def getNumData(self):\n return len(self.data)" ]
[ "0.7554764", "0.69679993", "0.69573605", "0.6792377", "0.6672587", "0.6513209", "0.649585", "0.6461261", "0.63839567", "0.63666666", "0.62937206", "0.62934756", "0.62633395", "0.6255323", "0.62531394", "0.6229491", "0.62243587", "0.62149376", "0.6199522", "0.61816734", "0.6143403", "0.6119764", "0.6068898", "0.6065976", "0.60635984", "0.6019333", "0.60095745", "0.6004078", "0.598484", "0.59634006", "0.5942197", "0.5942197", "0.59370714", "0.59357464", "0.59351766", "0.59282154", "0.5921665", "0.5913103", "0.59015846", "0.5879758", "0.5877579", "0.58711284", "0.586797", "0.5866093", "0.58623797", "0.58475757", "0.5846488", "0.58416605", "0.5833643", "0.5833643", "0.5832923", "0.5809249", "0.5798335", "0.5795151", "0.57939434", "0.57912433", "0.57664037", "0.57591206", "0.5758961", "0.5754973", "0.57529634", "0.5747646", "0.5746591", "0.573754", "0.57304007", "0.57299453", "0.5726847", "0.5724813", "0.57241255", "0.5713855", "0.5695984", "0.5692786", "0.56895894", "0.567945", "0.56770504", "0.56734014", "0.5668757", "0.5665486", "0.5662684", "0.56584096", "0.5647418", "0.5640751", "0.56395733", "0.5638604", "0.5634486", "0.5631732", "0.5626726", "0.56230414", "0.5622943", "0.56229043", "0.5619804", "0.56185436", "0.56090724", "0.5607252", "0.5607041", "0.55957663", "0.5593497", "0.55890036", "0.55882055", "0.55865383" ]
0.7293856
1
Returns a json sructure built from given parameters. {
def json_all_builder(self, customer_count, invoice_count, invl_count ): json_result = '{\n' json_result += '\t "_results":[\n' json_result += '\t\t{ "customer_count": "' + str(customer_count) json_result += ', "invoice_count": "' + str(invoice_count) json_result += ', "invl_count": "' + str(invl_count) json_result += '}\n' json_result += '\n\t]\n}' return json_result
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_json_string(self, **kwargs):\n ...", "def format_data(self, params):\n return json.dumps(params)", "def json(self):\n robot_dict = self.robot_dict()\n target_dict = self.target_dict()\n json_str = '{'\n json_str = json_str + '\"robot_obj\" : ' + json.dumps(robot_dict) + \",\\n\"\n json_str = json_str + '\"target_obj\" : ' + json.dumps(target_dict) + \"\\n\"\n json_str = json_str + '}'\n return(json_str)", "def json_friendly(self):", "def json(self):\n return self.kwargs", "def json(self):\n class ExtendedJSONEncoder(json.JSONEncoder):\n def default(self, obj):\n if isinstance(obj, datetime.date) or isinstance(obj, datetime.time):\n encoded_object = obj.isoformat()\n else:\n encoded_object = json.JSONEncoder.default(self, obj)\n return encoded_object\n\n obj = {\n 'operation': self.operation,\n 'version': self.version,\n 'language': self.language,\n 'identifiers': self.identifiers,\n 'store_execute': self.store_execute,\n 'status': self.status,\n 'lineage': self.lineage,\n 'inputs': dict((i, [inpt.json for inpt in self.inputs[i]]) for i in self.inputs),\n 'outputs': self.outputs,\n 'raw': self.raw\n }\n\n return json.dumps(obj, allow_nan=False, cls=ExtendedJSONEncoder)", "def to_json(self) -> dict:\n return {\n 'script': base64.b64encode(self.script).decode('utf-8'),\n 'parameters': list(map(lambda index: {'name': self.parameter_names[index],\n 'type': self.parameter_list[index].PascalCase()\n },\n range(len(self.parameter_list)))),\n 'deployed': self.deployed\n }", "def format(self, *args):\n\t\tweb.header('Content-Type', 'application/json; charset=utf-8')\n\t\treturn json.dumps(self.content)", "def create_json(sensor):\n json_object = {'building': sensor.get('building'),\n 'name': sensor.get('name'),\n 'tags': sensor.get('tags'),\n 'metadata': sensor.get('metadata'),\n 'source_identifier': sensor.get('source_identifier'),\n 'source_name': sensor.get('source_name')\n }\n return json_object", "def json(self):\n return {'id': self.id, 'name': self.name, 'description': self.description}", "def serialize(self):\n\t\treturn { 'type': self.type, 'parameters' : self.parameters}", "def json(self):\r\n return {\"id\": self.id, \"code\": self.code, \"description\": self.description, \"xCoor\": self.x_coor, \"yCoor\": self.y_coor, \"latitude\": self.latitude,\r\n \"longitude\": self.longitude, \"waterschapId\": self.waterschap_id, \"watertypeId\": self.watertype_id, \"watertypeKrwId\": self.watertype_krw_id}", "def get_json(self):\n json_item = {\"id: \": self.id,\n \"question: \": self.question,\n \"documents: \": self.documents,\n \"document_ids: \": self.document_ids,\n \"gold answers: \": self.gold}\n return json_item", "def GetJSON(self):\n return json.dumps(self.GetDict())", "def get_json(self):\n return {\n \"power\": self.get_power(), \n \"timestamp\": self.get_timestamp(), \n \"shortage\": self.get_shortage()\n }", "def json_api():\n if 'category' in request.args:\n sqlsession = SQLSESSION()\n category = sqlsession.query(Category)\\\n .filter_by(name=request.args['category']).first()\n items = sqlsession.query(Item).filter_by(category_id=category.id)\\\n .all()\n return json.dumps({'category_id': category.id,\n 'category_name': category.name,\n 'items': [item.serialize() for item in items]})\n elif 'item' in request.args:\n sqlsession = SQLSESSION()\n items = sqlsession.query(Item).filter_by(name=request.args['item'])\\\n .all()\n return json.dumps([item.serialize() for item in items])\n sqlsession = SQLSESSION()\n categories = sqlsession.query(Category).all()\n items = sqlsession.query(Item).all()\n return json.dumps(\n {'categories': [cat.serialize() for cat in categories],\n 'items': [item.serialize() for item in items]})", "def bridge_create_json():\n return {\n \"base_stations\": {\n \"id\": 98765,\n \"name\": \"New Bridge\",\n \"mode\": \"home\",\n \"hardware_id\": \"0x1234567890abcdef\",\n \"hardware_revision\": 4,\n \"firmware_version\": {\n \"wifi\": \"0.121.0\",\n \"wifi_app\": \"3.3.0\",\n \"silabs\": \"1.0.1\",\n },\n \"missing_at\": None,\n \"created_at\": \"2019-04-30T01:43:50.497Z\",\n \"updated_at\": \"2019-04-30T01:44:43.749Z\",\n \"system_id\": 12345,\n \"firmware\": {\"wifi\": \"0.121.0\", \"wifi_app\": \"3.3.0\", \"silabs\": \"1.0.1\"},\n \"links\": {\"system\": 12345},\n }\n }", "def json(self):\n return {\n \"qualified_name\": self.qualified_name,\n \"description\": self.description,\n \"data\": self.data,\n }", "def _json_spec(request):\n return make_param_spec(JSON_PARAMETER_SKELETON, request.param)", "def getJSON(self):\n text = super().getJSON() + f', \"exchange\": \"{self.__exchange}\"'\n text += f', \"market pair\": \"{self.__market_pairs}\"'\n text += f', \"interval\": \"{self.__interval}\"}}'\n return text", "def as_json(self):", "def getJSONfromUI(self):\n newParams = JSONObject()\n try:\n if not self.name.getText().isEmpty():\n newParams.put(\"name\", self.name.getText())\n newParams.put(\"strategy\", self.strategy.getSelectedItem().__str__())\n newParams.put(\"metagameStrategy\", self.metagameStrategy.getSelectedItem().__str__())\n newParams.put(\"stateMachine\", self.stateMachine.getSelectedItem().__str__())\n newParams.put(\"cacheStateMachine\", self.cacheStateMachine.isSelected())\n newParams.put(\"maxPlys\", self.maxPlys.getModel().getValue())\n newParams.put(\"heuristicFocus\", self.heuristicFocus.getModel().getValue())\n newParams.put(\"heuristicMobility\", self.heuristicMobility.getModel().getValue())\n newParams.put(\"heuristicOpponentFocus\", self.heuristicOpponentFocus.getModel().getValue())\n newParams.put(\"heuristicOpponentMobility\", self.heuristicOpponentMobility.getModel().getValue())\n newParams.put(\"mcDecayRate\", self.mcDecayRate.getModel().getValue())\n except JSONException as je:\n je.printStackTrace()\n return newParams", "def json(self):\n return {\n 'id': self.id,\n 'name': self.name\n }", "def data_json(request):\n json_data = []\n for resource in Resource.objects.all():\n record = {} \n record['title'] = resource.name\n record['description'] = resource.description\n record['keyword'] = resource.csw_keywords.split(',')\n record['modified'] = resource.last_updated\n record['publisher'] = resource.organization\n record['contactPoint'] = resource.metadata_contact\n record['mbox'] = resource.contact_email\n record['identifier'] = resource.csw_identifier\n if resource.is_published:\n record['accessLevel'] = 'public'\n else:\n record['accessLevel'] = 'non-public'\n\n json_data.append(record)\n\n return HttpResponse(json.dumps(json_data), 'application/json')", "def json(self) -> dict:\n return {\n 'id': self.id,\n 'requestType': self.request_type.name,\n 'isProcessed': self.is_processed,\n 'serviceName': self.service_name.name,\n 'isAdmin': self.is_admin,\n 'creationDate': LegislationDatetime.as_legislation_timezone(self.creation_date).isoformat()\n }", "def construct_json(self):\n\n if 'message' not in self.data:\n self.data['message'] = self.message\n\n if self.status == 200:\n self.data['status'] = 'OK'\n else:\n self.data['status'] = 'Not OK'\n\n return json.dumps(self.data)", "def json(self):\n raise NotImplementedError(\"JSON not specified!\")", "def generate(self, sorted=False):\n json_result = json.dumps(self.generate_dict(), sort_keys=sorted)\n return json_result", "def get_params(self):\n return {}", "def create_gen_json(self, out_file):\n\n params = self.create_package_dict()\n with open(out_file, 'w') as fp:\n json.dump(params, fp)", "def _build_data_string(self, params):\n return \"{}={}\".format(self.KLAVIYO_DATA_VARIABLE, quote(json.dumps(params)))", "def generate_hyperoptimize_json(parameters: Dict[str, List[Any]], path_json: str):\n if not path_json.endswith(\".json\"):\n path_json += \".json\"\n with open(path_json, \"w\") as file:\n json.dump(parameters, file, indent=2)\n\n print(\"\\n======= JSON string of hyperparameters =======\\n\")\n json_formatted_str = json.dumps(parameters, indent=2)\n print(json_formatted_str)\n print(\"\\n======= End JSON string =======\\n\")\n print(f\"Saved hyperoptimize json file at: {path_json}\")", "def dict(self):\n\t\treturn self.json", "def serialize(self) -> dict:\n return {\n \"parameters\": self.parameters,\n \"results\": self.results,\n }", "def json_market_builder(self, customerID, marketID) :\n json_result = '{\\n'\n json_result += '\\t \"_results\":[\\n'\n json_result += '\\t\\t{ \"customerID\": \"' + str(customerID)\n json_result += ', \"marketID\": \"' + str(marketID)\n json_result += '}\\n'\n json_result += '\\n\\t]\\n}'\n return json_result", "def toJson(p):\n data = {p.name: {'Memory': p.memory, 'Camera': p.camera, 'Battery': p.battery, 'Ram': p.ram, 'Price': p.price,\n 'Image url': p.image}}\n return data", "def jsonizable_object(self):\n obj = {\n 'title': self.title,\n 'url': self.url,\n 'abstract': self.abstract\n }\n if self.metadata:\n obj['metadata'] = self.metadata\n return obj", "def create_json_from_model(self):\n json = {\n \"enableAutoReply\": self.enable_auto_reply,\n \"responseSubject\": self.response_subject,\n \"responseBodyPlainText\": self.response_body_plain_text,\n \"responseBodyHtml\": self.response_body_html,\n \"restrictToContacts\": self.restrict_to_contacts,\n \"restrictToDomain\": self.restrict_to_domain,\n \"startTime\": self.start_time,\n \"endTime\": self.end_time\n }\n return json", "def to_init_json(self) -> JSON:\n pass", "def params_helper(self,**kwargs):\n\n dic = {'output' : 'json, xml, kml',\n 'maxresults' : 'limit on max number of results returned ; Default is limited to 100',\n 'countrycode' : 'GB, US etc ISO Country Code ==> Only 2 caracters !',\n 'latitude' : 'latitude reference for distance calculation',\n 'distance' : 'return results based on specified distance from specified latitude/longitude',\n 'distanceunit' : 'Miles or km',\n 'operatorid' : 'exact match on a given EVSE operator id (comma separated list)',\n 'connectiontypeid' : ' exact match on a given connection type id (comma separated list)',\n 'countryid' : 'exact match on a given country id (comma separated list)',\n 'levelid' : 'exact match on a given charging level (1-3) id (comma separated list)',\n 'minpowerkw' : 'minimum output power in kW (this information is not known for many locations)',\n 'usagetypeid' : 'exact match on a given usage type id (comma separated list) ',\n 'statustypeid' : ' exact match on a given status type id (comma separated list)',\n 'dataproviderid ' : 'exact match on a given data provider id id (comma separated list). Use opendata=true for only OCM provided (\"Open\") data.',\n 'modifiedsince' : 'POIs modified since the given date (UTC) e.g. 2016-09-15T09:30',\n 'opendata' : ' true or false. Set to true to include only Open Data licensed content, false to return only non-open licensed data. By default all available data is returned.',\n 'includecomments' : ' true or false. Set to true to also include user comments and media items (photos) per charging location. Default = false.',\n 'verbose ' : ' true or false. Set to false to get a smaller result set with null items removed. Default = true.',\n 'compact ' : 'true or false. Set to true to remove reference data objects from output (just returns IDs for common reference data such as DataProvider etc). Default = false.',\n 'camelcase' : 'true or false. Set to true to get a property names in camelCase format. Default = false',\n 'callback' : 'specify the name of the JSONP callback (if required), JSON response type only.'\n }\n\n if len(kwargs)==0 :\n\n for key in dic.keys() :\n print(key)\n\n else :\n \n for k in kwargs: \n print(dic.get(k))", "def data(self, **kw):\n return dict(params=kw)", "def data(self, **kw):\n return dict(params=kw)", "def json(self):\n return {\n 'id': self.id,\n 'id_bank_data': self.id_bank_data,\n 'national_id_document': self.national_id_document,\n 'country': self.country,\n 'name': self.name,\n 'surname': self.surname,\n 'mail': self.mail,\n 'google_token': self.google_token,\n 'role': self.role\n }", "def json(self):\n\n json = {}\n json['type'] = self.type\n json['value'] = self.value\n json['status'] = self.status\n json['tags'] = list(set(self.tags))\n json['relationships'] = list(set(self.relationships))\n json['whitelisted'] = self.whitelisted\n json['path'] = self.path\n\n return json", "def get_json(self):\n return {'name': self.name, \n 'path': self.path, \n 'enabled': self.enabled}", "def gen_python_api(json_data, model_name,model_version,endpoint=\"http://127.0.0.1:8400\"):\n\n code_template = \"\"\"#!/usr/bin/env python\n\n import requests\n\n def main():\n #endpoint = \"http://127.0.0.1:8000\"\n endpoint = {{endpoint}}\n param={\"model_name\": \"{{ model_name }}\", \"model_version\": \"{{ model_version }}\"}\n json_data = {{json_data}}\n result = requests.post(endpoint, param=param,json=json_data)\n print(result.text)\n\n if __name__ == \"__main__\":\n main()\n \"\"\"\n\n generated_tensor_data_string = json.dumps(json_data)\n template = Template(code_template)\n generate_code = template.render(\n model_name=model_name, model_version=model_version,json_data=generated_tensor_data_string,endpoint=endpoint)\n logging.debug(\"Generate the code in Python:\\n{}\".format(generate_code))\n return generate_code", "def __json__(self) -> dict[Any, Any]:\n return self.dict(\n include={\n **{k: ... for k in self.dict().keys() if k != \"input\"},\n \"input\": {\n \"dataset\": {\"id\"},\n \"asset\": {\"id\"},\n },\n },\n exclude={\n \"steps\": {\"__all__\": {\"id\"}},\n },\n )", "def __json_encode__(self) -> Dict[str, Any]:\n return {\"figure\": self.figure, \"name\": self.name, \"metadata\": self.metadata}", "def build(self, data: dict):", "def generate_json_query(self):\n if not self.properties:\n print(\"ERROR: no properties given to generate JSON query.\")\n raise ValueError\n\n if self.data_type == DataType.ENTRY:\n q_str = \"entry_ids\"\n elif \"entit\" in self.data_type.value:\n if \"instance\" in self.data_type.value:\n q_str = \"instance_ids\"\n else:\n q_str = \"entity_ids\"\n elif self.data_type == DataType.ASSEMBLY:\n q_str = \"assembly_ids\"\n elif self.data_type == DataType.CHEMICAL_COMPONENT:\n q_str = \"comp_ids\"\n\n data_str = f\"{self.data_type.value}({q_str}: [\" + \",\".join(f\"\\\"{w}\\\"\" for w in self.id) + \"])\"\n\n props_string = \"\"\n for key, val in self.properties.items():\n if len(val) == 0:\n props_string += f\"{key},\"\n else:\n props_string += f\"{key} {{\" + \",\".join(val) + \"}\"\n\n self.json_query = {'query': \"{\" + data_str + \"{\" + props_string + \"}}\"}", "def json(self) -> Dict[str, Union[List, Dict, str, int, float]]:", "def to_api_repr(self):\n value = self.value\n converter = _SCALAR_VALUE_TO_JSON_PARAM.get(self.type_)\n if converter is not None:\n value = converter(value)\n resource = {\n \"parameterType\": {\"type\": self.type_},\n \"parameterValue\": {\"value\": value},\n }\n if self.name is not None:\n resource[\"name\"] = self.name\n return resource", "def json(self) -> Dict[str, Any]:\n return {\n \"product_id\": self.product_id,\n \"detection_index\": self.detection_index,\n \"product_name\": self.class_name,\n \"confidence\": self.conf,\n \"bounding_box\": [int(coord)\n for coord in self.scale_coordinates.round()],\n \"top_k_product_names\": self.top_k_names,\n \"top_k_confidences\": self.top_k_confidences,\n \"top_k_product_ids\": self.top_k_product_ids,\n \"top_k_detection_indices\": self.top_k_indices\n }", "def render_dictionary(self): \n asset_json = {\n 'name': self.name,\n 'product_name': self.product_name,\n 'product_vendor': self.product_vendor,\n 'configuration': self.configuration,\n 'description': self.description,\n 'primary_users': self.primary_users,\n 'primary_voting': self.primary_voting,\n 'secondary_users': self.secondary_users,\n 'secondary_voting': self.secondary_voting,\n 'tags': self.tags,\n 'type': self.asset_type,\n 'action_whitelist': self.action_whitelist\n }\n\n if self.ingest_container_label:\n asset_json['ingest'] = {\n 'container_label': self.ingest_container_label,\n 'interval_mins': self.ingest_interval_mins,\n 'poll': self.ingest_poll,\n 'start_time_epoch_utc': self.ingest_start_time\n }\n\n return asset_json", "def parameters(self):\n return {}", "def gen_output(json_dct, *args):\n keys_to_add = ('job_title', 'location', 'date', 'company', 'num_stars')\n for arg, key in zip(args, keys_to_add): \n if arg: \n json_dct[key] = arg\n\n return json_dct", "def json_response(self, request, *args, **kwargs):\n\n return HttpResponse(self.construct_json(),\n content_type='application/json',\n mimetype='application/json', status=self.status)", "def AssembleProjectParametersJson(KratosWindowManager,root_path):\n\n ##Create the dict.\n projectparameters_dict = {}\n\n projectparameters_dict[\"problem_data\"] = pp.problem_data_dict\n if KratosWindowManager.is2D:\n projectparameters_dict[\"problem_data\"][\"domain_size\"]=2\n \n for key in KratosWindowManager.SSsave:\n pp.solver_settings[key] = KratosWindowManager.SSsave[key] \n\n projectparameters_dict[\"solver_settings\"] = pp.solver_settings\n \n projectparameters_dict[\"solver_settings\"][\"model_import_settings\"][\"input_filename\"]=root_path[\"mdpa_name\"]\n\n\n\n\n\n projectparameters_dict[\"output_configuration\"] = pp.output_configuration_value\n\n projectparameters_dict[\"list_other_processes\"] = []\n projectparameters_dict[\"contact_process_list\"] = []\n\n\n projectparameters_dict[\"loads_process_list\"]=[]\n projectparameters_dict[\"constraints_process_list\"]=[]\n for boundarycondition in KratosWindowManager.boundaryConditionEditor:\n if boundarycondition.load_process_list:\n projectparameters_dict[\"solver_settings\"][\"processes_sub_model_part_list\"][1]=boundarycondition.name\n projectparameters_dict[\"loads_process_list\"].append(boundarycondition.load_process_list)\n \n if boundarycondition.constrain_process_list:\n projectparameters_dict[\"solver_settings\"][\"processes_sub_model_part_list\"][0]=boundarycondition.name\n projectparameters_dict[\"constraints_process_list\"].append(boundarycondition.constrain_process_list)\n if boundarycondition.entityType=='Element':## if element, it is the domain and get its name\n projectparameters_dict[\"solver_settings\"][\"problem_domain_sub_model_part_list\"][0]=boundarycondition.name\n if(DEBUG): \n print(projectparameters_dict)\n return pp.WriteProjectToJson(projectparameters_dict)", "def create() -> TJsonResponse:\n if request.headers['Content-Type'] == 'application/json':\n url = request.json.get('url')\n else:\n url = request.form.get('url')\n if not url:\n return jsonify(error='bad request'), 400\n result = scrape.scrape_meta_for_url(url)\n inserted_id, tags = result.get()\n url_hash = encode(inserted_id)\n response_body: Dict[str, Any] = jsonify(hash=url_hash, short_url=f'https://fanlens.io/@{url_hash}', tags=tags)\n return response_body", "def get_json(box=None, \n out = 'dict', \n language = 'en'):\n \n table_url = box.children[3].value\n variables = get_variables(full_url = table_url)\n nvars = len(box.children[2].children)\n var_list = list(range(nvars))\n query_element = {}\n \n # create a dict of strings, one for each variable that specifies \n # the json-stat that selects the variables/values\n \n for x in var_list:\n value_list = str(list(box.children[2].children[x].value))\n query_element[x] = '{{\"code\": \"{code}\", \"selection\": {{\"filter\": \"item\", \"values\": {values} }}}}'.format(\n code = variables[x]['code'], \n values = value_list)\n query_element[x] = query_element[x].replace(\"\\'\", '\"')\n \n all_elements = str(list(query_element.values()))\n all_elements = all_elements.replace(\"\\'\", \"\")\n\n query = '{{\"query\": {all_elements} , \"response\": {{\"format\": \"json-stat\" }}}}'.format(all_elements = all_elements)\n \n if out == 'dict':\n query = ast.literal_eval(query)\n \n \n # todo: build it as a dictionary to start with (and not a string that is made into a dict as now)\n # todo: add error message if required variables are not selected\n # todo: avoid repeat downloading of same information \n # eg. get_variables is sometimes used three times before a table is downloaded\n \n return query", "def json(self, **kwargs):\n return json.loads(self.content, **kwargs)", "def json(data):\n if isinstance(data, dict):\n data = ujson.encode(data)\n uid = str(uuid.uuid4())\n display(HTML('<div id=\"{0}\" style=\"height: 600px; width:100%;\"></div>'.format(uid)))\n display(Javascript(\"\"\"\n require([\"https://rawgit.com/caldwell/renderjson/master/renderjson.js\"], function() {\n document.getElementById('%s').appendChild(renderjson(%s))\n });\n \"\"\" % (uid, data)))", "def _build_payload(self, query):\n\n def _normalize_payload(payload):\n if isinstance(payload, ClientObject) or isinstance(payload, ClientValue):\n return payload.to_json(self._default_json_format)\n elif isinstance(payload, dict):\n return {k: _normalize_payload(v) for k, v in payload.items() if v is not None}\n elif isinstance(payload, list):\n return [_normalize_payload(item) for item in payload]\n return payload\n\n json = _normalize_payload(query.parameters_type)\n if isinstance(query, ServiceOperationQuery) and query.parameters_name is not None:\n json = {query.parameters_name: json}\n return json", "def __json_init__(cls, **kwargs):\n return cls(**kwargs)", "def to_json(self):\n params = {\n 'id': self.id,\n 'team_1_id': self.team_1_id,\n 'team_2_id': self.team_2_id,\n 'table': self.table,\n 'score_1': self.score_1,\n 'score_2': self.score_2,\n 'start_date': self.start_date.isoformat() if self.start_date else None,\n 'end_date': self.end_date.isoformat() if self.end_date else None,\n 'version': self.version,\n 'created_on': self.created_on.isoformat() if self.created_on else None\n }\n\n return params", "def create_custom():\n # Extract initialisation parameters\n alpha = request.args.get('alpha')\n alpha = float(alpha)\n generations = request.args.get('generations')\n generations = int(generations)\n beta = request.args.get('beta')\n beta = float(beta)\n pec = request.args.get('pec')\n pec = float(pec)\n q = request.args.get('q')\n q = float(q)\n\n # Extract the custom coordinates and create a list of nodes\n coords = request.args.get('custom_coords')\n coords = str(coords)\n nodes = custom_nodes(coords)\n\n # Initialise instance\n i = Instance(nodes, alpha, beta, pec, q)\n\n return jsonify(nodes=i.nodes, alpha=i.alpha, beta=i.beta, decay=i.decay,\n min_pheromone=i.min_pheromone, q=i.q,\n local_deposit=i.local_deposit, distances=i.distances,\n pheromones=i.pheromones, ants=i.ants, shortest_path=i.shortest_path,\n min_distance=i.min_distance, message=\"Instance Initialised\")", "def to_json(self, *args, **kwargs):\n return json.dumps(self.serialize(), *args, **kwargs)", "def json(self):\n return {\n '_id' : self._id,\n 'name' : self.name,\n 'description' : self.description,\n }", "def create_json(self, request, qs):\n\n j = Work.objects.get_dict(qs)\n\n response = JsonResponse(j, json_dumps_params={'indent': 4})\n name = '{}{}'.format(\n settings.PUBLISHER_CODE, datetime.now().toordinal())\n cd = 'attachment; filename=\"{}.json\"'.format(name)\n response['Content-Disposition'] = cd\n return response", "def format(cls):\n return \"json\"", "def serialize(self):\n return{\n 'name':self.name,\n 'id' :self.id,\n }", "def to_json(self):\n return {'name': self.__class__.__name__, 'args': self.args}", "def to_json(self) -> Dict:\n return {\"function\": self.function.__name__, \"kwargs\": self.kwargs_to_json()}", "def json(self) -> typing.Dict[str, typing.Any]:\n if self.getboolean(\"JSON\", \"compact\", fallback=False):\n kwargs = {\"separators\": (\",\", \":\")}\n else:\n kwargs = {\"indent\": 4}\n kwargs[\"ensure_ascii\"] = self.getboolean(\"JSON\", \"ensure ascii\", fallback=True)\n kwargs[\"sort_keys\"] = self.getboolean(\"JSON\", \"sort keys\", fallback=False)\n return kwargs", "def _to_json(self):\n const_list = [\n {'key': key, 'value': value}\n for key, value in self.constants.items()\n ]\n constr_list = [\n {'key': key, 'value': value}\n for key, value in self.constraints.items()\n ]\n\n json_task = {\n 'name': self._name,\n 'profile': self._profile,\n 'pooluuid': self._pooluuid,\n 'constants': const_list,\n 'constraints': constr_list\n }\n\n if self._shortname is not None:\n json_task['shortname'] = self._shortname\n\n alldisk = all(isinstance(x, Disk) for x in self._resource_objects)\n allbucket = all(isinstance(x, Bucket) for x in self._resource_objects)\n\n if alldisk or allbucket:\n self._resource_objects_ids = [x.uuid for x in self._resource_objects]\n else:\n raise ValueError(\"Can't mix Buckets and Disks as resources\")\n if allbucket:\n self._resource_type = Bucket\n json_task['resourceBuckets'] = self._resource_objects_ids\n if alldisk:\n self._resource_type = Disk\n json_task['resourceDisks'] = self._resource_objects_ids\n\n if self._result_object is not None:\n self._result_type = type(self._result_object)\n if isinstance(self._result_object, Bucket):\n json_task['resultBucket'] = self._result_object.uuid\n elif isinstance(self._result_object, Disk):\n json_task['resultDisk'] = self._result_object.uuid\n\n if self._advanced_range is not None:\n json_task['advancedRanges'] = self._advanced_range\n else:\n json_task['instanceCount'] = self._instancecount\n\n json_task[\"tags\"] = self._tags\n\n if self._snapshot_whitelist is not None:\n json_task['snapshotWhitelist'] = self._snapshot_whitelist\n if self._snapshot_blacklist is not None:\n json_task['snapshotBlacklist'] = self._snapshot_blacklist\n if self._results_whitelist is not None:\n json_task['resultsWhitelist'] = self._results_whitelist\n if self._results_blacklist is not None:\n json_task['resultsBlacklist'] = self._results_blacklist\n return json_task", "def __json_encode__(self):\r\n return self.config()", "def encode_params(params, **kwargs):\n cleaned = clean_params(params, **kwargs)\n return json.dumps(cleaned)", "def getJsonObject(self):\n jsonObject = {\n 'id': self.id,\n 'name': self.name,\n 'cards': [self.cards[card].getJsonObject() for card in self.cards] if self.cards else [\"\"]\n }\n return jsonObject", "def to_json(self, *args, **kwargs):\n data = self.to_dict()\n\n return json_util.dumps(data)", "def as_json(self, args=None):\n seg_data = _dict()\n seg_data[u'id'] = int(self.id)\n seg_data[u'parentID'] = int(self.parent_id)\n if self.biological_annotation is not None:\n seg_data[u'biologicalAnnotation'] = self.biological_annotation.as_json(args=args)\n if self.complexes_and_macromolecules:\n complexes = list()\n for _complex in self.complexes_and_macromolecules.complexes:\n complexes.append(_complex)\n macromolecules = list()\n for macromolecule in self.complexes_and_macromolecules.macromolecules:\n macromolecules.append(macromolecule)\n seg_data[u'complexesAndMacromolecules'] = {\n u'complexes': complexes,\n u'macromolecules': macromolecules,\n }\n seg_data.update(self.colour.as_json(args=args))\n # seg_data[u'colour'] = tuple(map(float, self.colour.value))\n if self.meshes:\n seg_data[u'meshList'] = len(self.meshes)\n if self.shapes:\n seg_data[u'shapePrimitiveList'] = len(self.shapes)\n return seg_data", "def json(self, value):\n\n self.operation = value['operation']\n self.version = value['version']\n self.language = value['language']\n self.identifiers = value['identifiers']\n self.store_execute = value['store_execute']\n self.status = value['status']\n self.lineage = value['lineage']\n self.outputs = value['outputs']\n self.raw = value['raw']\n self.inputs = {}\n\n for identifier in value['inputs']:\n inpt = None\n inpt_defs = value['inputs'][identifier]\n\n for inpt_def in inpt_defs:\n\n if inpt_def['type'] == 'complex':\n inpt = ComplexInput(\n identifier=inpt_def['identifier'],\n title=inpt_def.get('title'),\n abstract=inpt_def.get('abstract'),\n workdir=inpt_def.get('workdir'),\n data_format=Format(\n schema=inpt_def['data_format'].get('schema'),\n extension=inpt_def['data_format'].get('extension'),\n mime_type=inpt_def['data_format']['mime_type'],\n encoding=inpt_def['data_format'].get('encoding')\n ),\n supported_formats=[\n Format(\n schema=infrmt.get('schema'),\n extension=infrmt.get('extension'),\n mime_type=infrmt['mime_type'],\n encoding=infrmt.get('encoding')\n ) for infrmt in inpt_def['supported_formats']\n ],\n mode=MODE.NONE\n )\n inpt.file = inpt_def['file']\n elif inpt_def['type'] == 'literal':\n\n allowed_values = []\n for allowed_value in inpt_def['allowed_values']:\n if allowed_value['type'] == 'anyvalue':\n allowed_values.append(AnyValue())\n elif allowed_value['type'] == 'novalue':\n allowed_values.append(NoValue())\n elif allowed_value['type'] == 'valuesreference':\n allowed_values.append(ValuesReference())\n elif allowed_value['type'] == 'allowedvalue':\n allowed_values.append(AllowedValue(\n allowed_type=allowed_value['allowed_type'],\n value=allowed_value['value'],\n minval=allowed_value['minval'],\n maxval=allowed_value['maxval'],\n spacing=allowed_value['spacing'],\n range_closure=allowed_value['range_closure']\n ))\n\n inpt = LiteralInput(\n identifier=inpt_def['identifier'],\n title=inpt_def.get('title'),\n abstract=inpt_def.get('abstract'),\n data_type=inpt_def.get('data_type'),\n workdir=inpt_def.get('workdir'),\n allowed_values=AnyValue,\n uoms=inpt_def.get('uoms'),\n mode=inpt_def.get('mode')\n )\n inpt.uom = inpt_def.get('uom')\n inpt.data = inpt_def.get('data')\n\n elif inpt_def['type'] == 'bbox':\n inpt = BBoxInput(\n identifier=inpt_def['identifier'],\n title=inpt_def['title'],\n abstract=inpt_def['abstract'],\n crss=inpt_def['crs'],\n dimensions=inpt_def['dimensions'],\n workdir=inpt_def['workdir'],\n mode=inpt_def['mode']\n )\n inpt.ll = inpt_def['bbox'][0]\n inpt.ur = inpt_def['bbox'][1]\n\n if identifier in self.inputs:\n self.inputs[identifier].append(inpt)\n else:\n self.inputs[identifier] = [inpt]", "def __init__( self, parameters={} ):\n self.params = {}", "def create_params(self):\n\n params = {'time_step':\n DesignParameter('time_step',\n unit='s',\n description='Time step with which the component model will be discretized'),\n 'horizon':\n DesignParameter('horizon',\n unit='s',\n description='Horizon of the optimization problem'),\n 'lines': DesignParameter('lines',\n unit='-',\n description='List of names of the lines that can be found in the network, e.g. '\n '\\'supply\\' and \\'return\\'',\n val=['supply', 'return'])\n }\n return params", "def json_data(self):\n self.check_proof()\n return {\n \"vars\": [{'name': v.name, 'T': str(v.T)} for v in self.vars],\n \"proof\": sum([printer.export_proof_item(self.thy, item, unicode=True, highlight=True)\n for item in self.prf.items], []),\n \"report\": self.rpt.json_data(),\n \"method_sig\": self.get_method_sig()\n }", "def json_meta_obj(location, from_date, to_date, item):\n return '{\"meta\":{\"location\":\"' + location \\\n + '\",\"from\":\"' + from_date \\\n + '\",\"to\":\"' + to_date \\\n + '\",\"item\":\"' + item \\\n + '\"},\\n'", "def readjson():\n uuid = request.query.get('uuid','')\n if(uuid == \"\"):\n result = { \"code\":\"fail\", \"message\":\"empty uuid\"}\n return result\n else:\n zenodo = ZenodoRequest(uuid)\n return {'data':zenodo.saveInDatabase()}", "def __json__(self, request=None):\n # start = self.start.isoformat() if self.start else None\n # end = self.end.isoformat() if self.end else None\n return dict(\n timeref_type=\"daterange\",\n interval=self.interval,\n start=self.start.isoformat(),\n end=self.end.isoformat(),\n )", "def get_json(self, cid, **kwargs):\n\t\treturn self.cat(cid, decoder='json', **kwargs)", "def format_json(self,query_results):\n results=query_results.data\n factory=factory_json()\n dump=factory.dumps(results)\n print(dump)\n # TODO return output for this\n return \"\"", "def json(self):\n d = OrderedDict()\n if self.id is not None:\n d[\"id\"] = str(self.id)\n d[\"status\"] = self.http_status\n d[\"title\"] = self.title\n if self.about:\n d[\"links\"] = OrderedDict()\n d[\"links\"][\"about\"] = self.about\n if self.code:\n d[\"code\"] = self.code\n if self.detail:\n d[\"detail\"] = self.detail\n if self.source_pointer or self.source_parameter:\n d[\"source\"] = OrderedDict()\n if self.source_pointer:\n d[\"source\"][\"pointer\"] = self.source_pointer\n if self.source_parameter:\n d[\"source\"][\"parameter\"] = self.source_parameter\n if self.meta:\n d[\"meta\"] = meta\n return d", "def make():\n data = {}\n data.update({'earth' : {'description': 'Planet with 20% O2 with 75% of surface covered by H2O. Humans inhabitants enjoy both of these aspects.',\n 'order' : 1,\n 'type': 'planet',\n }})\n \n return data", "def toJSON(self):\n return {\n 'id': self.id,\n 'name': self.name,\n 'dependencies': self.dependencies\n }", "def request_body(self):\n return json.dumps({\n \"schemaId\": self.schemaId,\n \"associationIds\": self.associationIds,\n \"name\": self.name,\n \"value\": self.value\n })", "def to_json(self, *args, **kwargs):\n return json.dumps(self.serialize(primitive=True), *args, **kwargs)", "def generate(self):\n return {\n 'json_class': 'Pact::ArrayLike',\n 'contents': from_term(self.matcher),\n 'min': self.minimum}", "def minimal_json43():\n return {\n 'identifiers': [{\n 'identifierType': 'DOI',\n 'identifier': '10.1234/foo.bar',\n }],\n 'creators': [\n {'name': 'Nielsen, Lars Holm'},\n ],\n 'titles': [\n {'title': 'Minimal Test Case'}\n ],\n 'publisher': 'Invenio Software',\n 'publicationYear': '2016',\n 'types': {\n 'resourceType': '',\n 'resourceTypeGeneral': 'Software'\n },\n 'schemaVersion': 'http://datacite.org/schema/kernel-4'\n }", "def json(self):\n ar_min_date, ar_max_date = self.get_ar_dates(\n (self.last_ar_year if self.last_ar_year else self.founding_date.year) + 1\n )\n d = {\n 'foundingDate': self.founding_date.isoformat(),\n 'identifier': self.identifier,\n 'lastModified': self.last_modified.isoformat(),\n 'lastAnnualReport': datetime.date(self.last_ar_date).isoformat() if self.last_ar_date else '',\n 'nextAnnualReport': LegislationDatetime.as_legislation_timezone_from_date(\n self.next_anniversary\n ).astimezone(timezone.utc).isoformat(),\n 'lastAnnualGeneralMeetingDate': datetime.date(self.last_agm_date).isoformat() if self.last_agm_date else '',\n 'lastLedgerTimestamp': self.last_ledger_timestamp.isoformat(),\n 'legalName': self.legal_name,\n 'legalType': self.legal_type,\n 'hasRestrictions': self.restriction_ind,\n 'goodStanding': self.good_standing,\n 'arMinDate': ar_min_date.isoformat(),\n 'arMaxDate': ar_max_date.isoformat()\n }\n # if self.last_remote_ledger_timestamp:\n # # this is not a typo, we want the external facing view object ledger timestamp to be the remote one\n # d['last_ledger_timestamp'] = self.last_remote_ledger_timestamp.isoformat()\n # else:\n # d['last_ledger_timestamp'] = None\n\n if self.dissolution_date:\n d['dissolutionDate'] = datetime.date(self.dissolution_date).isoformat()\n if self.fiscal_year_end_date:\n d['fiscalYearEndDate'] = datetime.date(self.fiscal_year_end_date).isoformat()\n if self.tax_id:\n d['taxId'] = self.tax_id\n\n return d", "def jasonizar(self, todosPermiso, listaPermisoEnRol, enRol):\n p='' \n pre=\"{\\\"totalpages\\\": \\\"\"+str(self.totalPages) + \"\\\",\\\"currpage\\\" : \\\"\" + str(self.currPage) + \"\\\",\\\"totalrecords\\\" : \\\"\" \n pre= pre + str(self.totalRecords) + \" \\\",\\\"invdata\\\" : [\" \n SIoNO='no'\n if(enRol=='Todos'):\n for permiso in todosPermiso:\n if permiso in listaPermisoEnRol:\n SIoNO='Si'\n else:\n SIoNO='No'\n \n p=p+\"{\\\"nombre\\\": \\\"\"+permiso.nombre+\"\\\",\\\"descripcion\\\": \\\"\"+permiso.descripcion+\"\\\", \\\"enRol\\\": \\\"\"+SIoNO+\"\\\",\\\"idPermiso\\\": \\\"\"+str(permiso.id)+\"\\\"},\"\n elif enRol=='Si':\n SIoNO='Si'\n for permiso in listaPermisoEnRol:\n p=p+\"{\\\"nombre\\\": \\\"\"+permiso.nombre+\"\\\",\\\"descripcion\\\": \\\"\"+permiso.descripcion+\"\\\", \\\"enRol\\\": \\\"\"+SIoNO+\"\\\",\\\"idPermiso\\\": \\\"\"+str(permiso.id)+\"\\\"},\"\n elif enRol=='No':\n SIoNO='No'\n for permiso in todosPermiso:\n if permiso not in listaPermisoEnRol:\n p=p+\"{\\\"nombre\\\": \\\"\"+permiso.nombre+\"\\\",\\\"descripcion\\\": \\\"\"+permiso.descripcion+\"\\\", \\\"enRol\\\": \\\"\"+SIoNO+\"\\\",\\\"idPermiso\\\": \\\"\"+str(permiso.id)+\"\\\"},\"\n \n \n p=p[0:len(p)-1] \n p=p+\"]}\" \n p=pre+p\n return p", "def to_api_repr(self):\n s_types = {}\n values = {}\n for name, value in self.struct_values.items():\n type_ = self.struct_types[name]\n if type_ in (\"STRUCT\", \"ARRAY\"):\n repr_ = value.to_api_repr()\n s_types[name] = {\"name\": name, \"type\": repr_[\"parameterType\"]}\n values[name] = repr_[\"parameterValue\"]\n else:\n s_types[name] = {\"name\": name, \"type\": {\"type\": type_}}\n converter = _SCALAR_VALUE_TO_JSON_PARAM.get(type_)\n if converter is not None:\n value = converter(value)\n values[name] = {\"value\": value}\n\n resource = {\n \"parameterType\": {\n \"type\": \"STRUCT\",\n \"structTypes\": [s_types[key] for key in self.struct_types],\n },\n \"parameterValue\": {\"structValues\": values},\n }\n if self.name is not None:\n resource[\"name\"] = self.name\n return resource", "def test_06_CreateJson(self):\n l_buttons = self.m_api.read_all_buttons_xml(self.m_pyhouse_obj, self.m_xml.button_sect, self.m_version)\n # print('ButtonsS: {0:}'.format(l_buttons))\n # print('Button 0: {0:}'.format(vars(l_buttons[0])))\n l_json = json_tools.encode_json(l_buttons)\n # print('JSON: {0:}'.format(l_json))" ]
[ "0.6857647", "0.6645711", "0.6554232", "0.65235794", "0.6426044", "0.6354805", "0.6256294", "0.62538326", "0.62132627", "0.62050444", "0.6196949", "0.6165802", "0.6154298", "0.6132933", "0.6096993", "0.6083696", "0.6067892", "0.6067397", "0.606541", "0.60606605", "0.6041364", "0.6040328", "0.60323185", "0.5976703", "0.5960331", "0.59419984", "0.5930116", "0.59277177", "0.58699954", "0.5869324", "0.5866438", "0.5860992", "0.5845905", "0.5828563", "0.58105284", "0.58041316", "0.5790436", "0.57802415", "0.57717127", "0.5769771", "0.5760462", "0.5760462", "0.57572633", "0.57350427", "0.5732893", "0.5714532", "0.57103425", "0.56945586", "0.5677327", "0.5674568", "0.5667098", "0.56668895", "0.5663047", "0.56608295", "0.56458735", "0.5643502", "0.5637894", "0.5630685", "0.5625672", "0.56246126", "0.56188744", "0.561275", "0.561225", "0.560185", "0.5597176", "0.55953723", "0.5592145", "0.5590307", "0.558896", "0.55815107", "0.5568854", "0.55682606", "0.5562299", "0.55616444", "0.55608773", "0.55454445", "0.5530052", "0.5520436", "0.5519693", "0.5498833", "0.5491575", "0.5481645", "0.5478158", "0.5477973", "0.5472057", "0.54654306", "0.546486", "0.5463099", "0.5456311", "0.5442346", "0.54358476", "0.5434536", "0.5433218", "0.542951", "0.54277164", "0.54217744", "0.5419493", "0.5418262", "0.54163504", "0.5415879" ]
0.5603465
63
Returns JSON structure issued form dataframe content given as parameter .
def json_df_builder(self, df, marketID, RFM=None): #------------------------------------------------------------------------- # Extract from dataframe content to be returned #------------------------------------------------------------------------- str_customerID = str(df.CustomerID.unique()[0]) invoice_count = len(df.InvoiceNo.unique()) item_count = df.Quantity.sum() invl_count = df.shape[0] ser_incomes = df.UnitPrice * df.Quantity incomes = ser_incomes.sum() str_incomes = "{0:1.2F}".format(incomes) mean_unit_price = incomes/item_count str_mean_unit_price = "{0:1.2F}".format(mean_unit_price) serInvoiceDate = df.InvoiceDate str_old_date = serInvoiceDate.map(str).min() str_new_date = serInvoiceDate.map(str).max() #------------------------------------------------------------------------- # Build JSON structure form content #------------------------------------------------------------------------- json_result = '{\n' json_result += '\t "_results":[\n' json_result += "{\n" json_result += "\t\t"+" \"customerID\":"+str_customerID+"\n" json_result += "\t\t"+",\"marketID\":"+str(marketID)+"\n" json_result += "\t\t"+",\"invoice_count\":"+str(invoice_count)+"\n" json_result += "\t\t"+",\"item_count\":"+str(item_count)+"\n" json_result += "\t\t"+",\"invl_count\":"+str(invl_count)+"\n" json_result += "\t\t"+",\"mean_unit_price\":"+str_mean_unit_price+"\n" json_result += "\t\t"+",\"incomes\":"+str_incomes+"\n" json_result += "\t\t"+",\"old_date\":"+str_old_date+"\n" json_result += "\t\t"+",\"new_date\":"+str_new_date+"\n" if RFM is not None: json_result += "\t\t"+",\"RFM\":"+RFM+"\n" else: pass json_result += "}\n" json_result += '\n\t]\n}' return json_result
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def to_json(self):\n\t\treturn self._dataframe.reset_index().to_json(orient=\"records\")", "def Mydata():\n\n stmt = db.session.query(Appsdata).statement\n df = pd.read_sql_query(stmt, db.session.bind)\n \n return jsonify(df.to_dict())", "def get_data(dataframe,index=None):\n dflen = len(dataframe)\n if index==None or index <0 or index >= dflen:\n index = randint(0,dflen)\n return dataframe.iloc[index].to_json()", "def json_temp_ges(df):\n\n json_str = (\n df.groupby(\n [\n \"sensor_id\",\n \"measure_name\",\n \"run_id\",\n \"ventilation_rate\",\n \"num_dehumidifiers\",\n \"lighting_shift\",\n \"scenario_type\",\n ],\n as_index=True,\n )\n .apply(\n lambda x: x[\n [\n \"prediction_value\",\n \"prediction_index\",\n \"run_id\",\n \"time\",\n \"timestamp\",\n ]\n ].to_dict(orient=\"records\")\n )\n .reset_index()\n .rename(columns={0: \"Values\"})\n .to_json(orient=\"records\")\n )\n return json_str", "def receive_json_ids(dataframe, jsondata, just_headers = False):\n\n dict_data = ast.literal_eval(jsondata)\n jsondict = {1: 'Kingdom', 2: 'Phylum', 3: 'Class', 4: 'Order', 5: 'Family', 6: 'Genus', 7: 'Species', 8 : 'Strain'}\n # this checks how long the jsondata is and from this it selects the correct Letter out of the jsondict #\n suffix = jsondict[len(dict_data)]\n\n # This selects the data which has the same name as the recieved jsondata\n fw_subset = dataframe[(dataframe[\"fw_\"+ suffix] == dict_data[-1])] \n rv_subset = dataframe[(dataframe[\"rv_\"+suffix] == dict_data[-1])]\n\n # This is only used so that the columns can be easily renamed in something more generic so the append will merge the correct columns\n columns_rename = pd.DataFrame(columns=[\"bitscore\", \"identity\", \"length\"])\n\n # Get the specified data\n fw_sideDf = fw_subset[[\"fw_bit\", \"fw_id\", \"fw_coverage_length\"]]\n rv_sideDf = rv_subset[[\"rv_bit\", \"rv_id\", \"rv_coverage_length\"]]\n\n # Get headers\n fw_headers = fw_subset.index.values.tolist()\n rv_headers = rv_subset.index.values.tolist()\n\n if just_headers:\n return fw_headers, rv_headers\n \n # Rename the columns\n fw_sideDf.columns = columns_rename.columns\n rv_sideDf.columns = columns_rename.columns\n # Combine the two dataframes in one since they have the same column names it will merge completly\n sideDf = fw_sideDf.append(rv_sideDf)\n # Count and group the different entries also convert them into a json\n count_id = sideDf.round(0).groupby(['identity']).size().to_json(orient='table')\n count_bit = sideDf.round(0).groupby(['bitscore']).size().to_json(orient='table')\n count_length = sideDf.round(0).groupby(['length']).size().to_json(orient='table')\n fw_seqs = fw_subset[\"fw_seq\"].tolist()\n rv_seqs = rv_subset[\"rv_seq\"].tolist()\n\n # Get taxonomy id's\n tax_ids = set([*fw_subset.fw_accession.tolist(), *rv_subset.rv_accession.tolist()])\n tax_len = len(tax_ids)\n if tax_len == 0:\n tax_id = \"None\"\n elif tax_len == 1:\n tax_id = list(tax_ids)[0]\n else:\n tax_id = \"More\"\n\n response = {\n \"count_id\":count_id,\n \"count_bit\": count_bit,\n \"count_length\": count_length,\n \"node_name\": dict_data[-1],\n \"tax_id\": str(tax_id),\n \"fw_headers\": fw_headers,\n \"rv_headers\": rv_headers,\n \"fw_seqs\": fw_seqs,\n \"rv_seqs\": rv_seqs\n }\n return jsonify(response)", "def _data_frame(content):\n response = loads(content)\n key = [x for x in response.keys() if x in c.response_data][0]\n frame = DataFrame(response[key])\n final_frame = _convert(frame)\n return final_frame", "def write_jason(df):\n\n\t# set Country as index of dataframe\n\tdf = df.set_index('Country')\n\n\t# write datafram to jason file \n\tdf = df.to_json('eda.json', orient='index')", "async def full_report():\n return DF.to_dict(orient=\"records\")", "def convert_to_json(dataframe):\n dataframe = dataframe.set_index('YYYYMMDD').to_json('schiphol_windstoten.json', orient = 'index')", "def get_data(self)->pd.DataFrame:\n pass", "def _dict(content):\n response = _data_frame(content).to_dict(orient='records')\n return response", "def __parse_json(df):\n\t\tcol_names = ['genres', 'production_companies', 'production_countries', 'cast', 'crew', 'spoken_languages',\n\t\t\t\t\t 'Keywords']\n\t\tvalue_names = ['name', 'name', 'iso_3166_1', 'name', 'name', 'name', 'name']\n\t\tfor col_name, value_name in zip(col_names, value_names):\n\t\t\t# df[col_name] = df[col_name].fillna(\"{}\")\n\t\t\tdf[col_name] = df[col_name].apply(literal_eval_error_handling)\n\t\t\tdf[col_name] = df[col_name].apply(lambda x: [i[value_name] for i in x])\n\t\treturn df", "def toDataFrame(self):\r\n if self.output_format in ('json', 'jsonExtended'):\r\n return json.dumps(self.result)\r\n \r\n elif self.output_format in ('tab2', 'extendedTab2'):\r\n return StringIO('\\t'.join(self.headers) + self.result)", "def pandas_to_njson(df):\n \n #df['secao'] = df['secao'].astype(int)\n \n records = df.to_dict(orient='records')\n json_list = [json.dumps(add_process_date(record), ensure_ascii=False) for record in records]\n njson = '\\n'.join(json_list)\n \n return njson", "def names():\n\n df = pd.read_sql_query(f\"SELECT * FROM olympics_raw\", con = engine)\n print(df.head())\n \n\n # return jsonify(all_olympians)\n return jsonify(df.to_dict(orient='records'))", "def display_raw_data(df):\n raw_data_lenght=df.shape[0]\n #loop through from 0 to number of rows in steps of 5\n for i in range(0,raw_data_lenght,5):\n response=input('\\n Do you want examin a perticular user data? Type \\'yes \\'or \\'no \\'\\n')\n if response.lower()!='yes':\n break\n \n data=df.iloc[i: i+5].to_json(orient='records',lines=True).split('\\n')\n for row in data:\n passed=json.loads(row)\n j_row=json.dumps(passed,indent=3)\n print(j_row)", "def create_data_frame(input_filepath):\n df = pd.read_json(input_filepath)\n logger = logging.getLogger(__name__)\n logger.info('Imported dataframe:')\n logger.info(df.info())\n logger.info(df.describe())\n logger.info(df.head())\n return df", "def to_dataframe(self):\n return df_util.to_dataframe(requests.get(self.__url).json())", "def json2dataframe(data):\n # Load and parse the available streamflow data\n siteId = data['value']['timeSeries'][0]['sourceInfo']['siteCode'][0]['value']\n df = pd.DataFrame(data['value']['timeSeries'][0]['values'][0]['value'])\n df = df.set_index(df['dateTime'], drop=True)\n df['value'] = df['value'].astype('float32')\n df.index = pd.to_datetime(df.index)\n last_available_date = df.index[-1].strftime(\"%Y-%m-%d\")\n return df, siteId, last_available_date", "def converttojson(edge_df):\n\tedge_df_str = edge_df.copy()\n\tfor idx, col in enumerate(edge_df.columns):\n\t\tfirst_row_element = edge_df.iloc[0, idx]\n\t\tif isinstance(first_row_element, list) or isinstance(first_row_element, dict):\n\t\t\tedge_df_str[col] = edge_df[col].apply(json.dumps)\n\t\t\tprint('Field \"{}\" of class {} converted to json string'.format(col, type(first_row_element)))\n\t\t#else:\n\t\t#\tprint(col,type(edge_df[col][0]))\n\treturn edge_df_str", "def json(self) -> CellJson:\n\n return {\"id\": self.id, \"content\": self.content, \"data\": self.data}", "def insert_df_json(conn, table_name: str, df: pd.DataFrame):\n insert_json(conn=conn, table_name=table_name, data=df.reset_index().to_json(orient='records', lines=True))", "def createDataframe(httpData):\n jsonData = json.loads(httpData)\n return pd.json_normalize(jsonData['data'])", "def generate_df(js_dict, naming, value=\"value\"):\n\n values = []\n dimensions, dim_names = get_dimensions(js_dict, naming)\n values = get_values(js_dict, value=value)\n output = pd.DataFrame([category + [values[i]]\n for i, category in\n enumerate(get_df_row(dimensions, naming))])\n output.columns = dim_names + [value]\n output.index = range(0, len(values))\n return output", "def input_fn(request_body, request_content_type):\n if request_content_type == \"application/json\":\n json_load = json.loads(request_body)\n data=get_dataframe_from_dict(json_load)\n csv_data=data.to_csv(index=False,header=None)\n data=csv_data.replace(\"\\n\",\"\")\n s = StringIO(data)\n data = pd.read_csv(s, header=None)\n\n return data\n else:\n # Handle other content-types here or raise an Exception\n # if the content type is not supported.\n pass", "def json(path):\n try:\n # TODO: Check a better way to handle this Spark.instance.spark. Very verbose.\n df = Spark.instance.spark.read.json(path)\n except IOError as error:\n logging.error(error)\n raise\n return df", "def dataframe(self):\n\t\treturn self._dataframe", "def df():\n fs.df()", "def to_df(self) -> pd.DataFrame:\n data = []\n for action in self.actions:\n data.append(action.to_df())\n df = pd.read_json(json.dumps(data), orient=\"list\")\n return df[self.fields]", "def save_json(df):\n dict = {}\n for row in df.iterrows():\n dict[row[1]['Country']] = {'Region' : row[1]['Region'],\n 'Pop. Density (per sq. mi.)' : row[1]['Pop. Density (per sq. mi.)'],\n 'Infant mortality (per 1000 births)' : row[1]['Infant mortality (per 1000 births)'],\n 'GDP ($ per capita) dollars' : row[1]['GDP ($ per capita) dollars']\n }\n\n with open('data.json', 'w', encoding='utf8') as outfile:\n data = json.dumps(dict, indent=4, sort_keys=False, separators=(',', ': '), ensure_ascii=False)\n outfile.write(data)", "def getDataFrame(self):\n return self.df", "def as_json(self):", "def df_to_json(complete_dataset, output_path, static_columns):\n megajson = {}\n\n static_columns = [\"continent\", \"location\"] + list(static_columns)\n\n complete_dataset = complete_dataset.dropna(axis=\"rows\", subset=[\"iso_code\"])\n\n for _, row in complete_dataset.iterrows():\n\n row_iso = row[\"iso_code\"]\n row_dict_static = row.drop(\"iso_code\")[static_columns].dropna().to_dict()\n row_dict_dynamic = row.drop(\"iso_code\").drop(static_columns).dropna().to_dict()\n\n if row_iso not in megajson:\n megajson[row_iso] = row_dict_static\n megajson[row_iso][\"data\"] = [row_dict_dynamic]\n else:\n megajson[row_iso][\"data\"].append(row_dict_dynamic)\n\n with open(output_path, \"w\") as file:\n file.write(json.dumps(megajson, indent=4))", "def get_data_rows(self, as_json=False, pretty_print=False):\n if self.has_error():\n return None\n\n # Read the table\n try:\n if self.is_excel:\n msgt('Excel!')\n\n df = pd.read_excel(self.filepath)\n #error_bad_lines=False)\n else:\n df = pd.read_table(self.filepath)\n except Exception as ex_obj:\n msg(ex_obj)\n msgt('Failed to open file via pandas!')\n temp_file_helper.make_sure_file_deleted(self.filepath)\n if self.is_excel:\n self.add_error('Failed to open Excel file via pandas. [%s]' % ex_obj)\n else:\n self.add_error('<b>Probably not a tabular file!</b> Failed to open file via pandas. [%s]' % ex_obj)\n return None\n\n self.describe_as_html = df.describe().to_html()\n json_string = df.describe().to_json()\n self.describe_as_dict =json.loads(json_string, object_pairs_hook=OrderedDict)\n\n # Retrieve the columns\n self.column_names = df.columns.tolist()\n\n # Retrieve the rows\n self.data_rows = df[:self.num_preview_rows].values.tolist()\n\n #print 'rows', json.dumps(rows)\n\n # Format the response\n info_dict = OrderedDict()\n\n info_dict['total_row_count'] = len(df.index)\n info_dict['preview_row_count'] = len(self.data_rows)\n info_dict['column_names'] = self.column_names\n info_dict['rows'] = self.data_rows\n info_dict['describe_as_html'] = self.describe_as_html\n info_dict['describe_as_dict'] = self.describe_as_dict\n\n if as_json:\n if pretty_print:\n return json.dumps(info_dict, indent=4)\n return json.dumps(info_dict)\n\n return info_dict", "def process_data(self):\n structure_data = self.parse_root(self.root)\n\n dict_data = {}\n for d in structure_data:\n dict_data = {**dict_data, **d}\n df = pd.DataFrame(data=list(dict_data.values()), index=dict_data.keys()).T\n\n return df", "def addJSON(file: str, df, creat: bool):\n if creat is False :\n with open(file) as train_file:\n dict_train = json.load(train_file)\n data = pd.read_json(dict_train, orient=\"records\")\n df = pd.concat([data, df])\n \n js = df.to_json(orient='records').replace(\n \"[\\\\\\\"[\", '').replace(\"]\\\\\\\"]\", '')\n \n with open(file, 'w', encoding='utf8') as outfile:\n json.dump(js, outfile, ensure_ascii=False, indent=4)", "def dataframe(self):\n return self.generator.dataframe", "def df_to_json(tokenList):\n # If df's ouput format changes, we'll be in trouble, of course.\n # the 0 token is the name of the filesystem\n # the 1 token is the size of the filesystem in 1K blocks\n # the 2 token is the amount used of the filesystem\n # the 5 token is the mount point\n offset = len(tokenList) - 9\n result = {}\n fsName = tokenList[0+offset]\n fsSize = tokenList[1+offset]\n fsUsed = tokenList[2+offset]\n fsMountPoint = tokenList[5+offset]\n result[\"filesystem\"] = {}\n result[\"filesystem\"][\"name\"] = fsName\n result[\"filesystem\"][\"size\"] = float(fsSize)/1000\n result[\"filesystem\"][\"used\"] = float(fsUsed)/1000\n result[\"filesystem\"][\"mount_point\"] = fsMountPoint\n if result[\"filesystem\"][\"size\"] > 0 :\n result[\"filesystem\"][\"percentage\"] = result[\"filesystem\"][\"used\"]/result[\"filesystem\"][\"size\"]\n return result", "def __gen_datatable__(self):\n # | - __generate_data_table\n rows_list = []\n for Job_i in self.Job_list:\n # | - FOR LOOP BODY\n entry_param_dict = {}\n for prop, value in Job_i.job_params.items():\n entry_param_dict[prop] = value\n\n entry_param_dict[\"Job\"] = Job_i\n entry_param_dict[\"path\"] = Job_i.full_path\n entry_param_dict[\"max_revision\"] = Job_i.max_revision\n entry_param_dict[\"revision_number\"] = Job_i.revision_number\n\n rows_list.append(entry_param_dict)\n # __|\n\n data_frame = pd.DataFrame(rows_list)\n\n return(data_frame)\n # __|", "def list_data_frames():\n\n cmd = dict()\n cmd[\"type_\"] = \"list_data_frames\"\n cmd[\"name_\"] = \"\"\n \n s = comm.send_and_receive_socket(cmd)\n\n msg = comm.recv_string(s)\n\n if msg != \"Success!\":\n raise Exception(msg)\n \n json_str = comm.recv_string(s) \n \n s.close() \n\n return json.loads(json_str)", "def create_cris_data_dict(df, filename, outdir):\n json_dict = {}\n for key in df.keys():\n if key != 'participant_id':\n json_dict[key] = {'Units': key.split()[-1]}\n else:\n json_dict[key] = {'Description': 'OpenNeuro ID of the subject.'}\n with open(outdir.joinpath(filename + '.json'), \"w\") as f:\n json.dump(json_dict, f, indent=4)", "def json_view(self, recursive=False):\n\n context = self.context.aq_inner\n data = self.export(context, recursive=recursive)\n pretty = json.dumps(data, sort_keys=True, indent=4)\n self.request.response.setHeader(\"Content-type\", \"application/json\")\n return pretty", "def _get_frame_data(self):\n\n def _text_formatting(bs4_tag):\n \"\"\"Simple formatting that will be called a lot\"\"\"\n return bs4_tag.get_text().replace('\\n', '')\n\n tables = Character._get_main_page(self._fd).find_all('table', {'class': 'wikitable'})\n result = {}\n for table in tables:\n rows = table.find_all('tr')\n for row in rows:\n move = row.find('th')\n\n if move is not None:\n move_small = move.find('small')\n else:\n continue\n\n if move_small is not None:\n move_name = move.get_text().replace(move_small.get_text(), ' ' + move_small.get_text()).replace(\n '\\n', '')\n else:\n move_name = _text_formatting(move)\n\n move_name = move_name.replace('+', '') # B+C fix\n fd = row.find_all('td')\n\n try:\n result.update({\n move_name: {\n \"Damage\": _text_formatting(fd[0]),\n \"Cancel\": _text_formatting(fd[1]),\n \"Attribute\": _text_formatting(fd[4]),\n \"Guard\": _text_formatting(fd[5]),\n \"Startup\": _text_formatting(fd[6]),\n \"Active\": _text_formatting(fd[7]),\n \"Recovery\": _text_formatting(fd[8]),\n \"Frame Adv\": _text_formatting(fd[9]),\n \"Blockstun\": _text_formatting(fd[11]),\n \"Invul/GP\": _text_formatting(fd[19])\n }\n })\n except (AttributeError, IndexError):\n continue\n\n return result", "def download_data_df(main=False) -> pd.DataFrame:\n temp_file: str = './temp.csv'\n json_text: str = download_json_data(main)\n result: pd.DataFrame = json_to_csv(json_text, temp_file)\n os.remove(temp_file)\n return result", "def toJSON(self):\r\n\r\n jsonToRet = []\r\n rowJson = []\r\n matrixJson = []\r\n\r\n if len(self.slctData) > 100:\r\n self.getSimMatSummary(100)\r\n jsonToRet.append(self.summaryOrdering)\r\n for i in range(0,len(self.simMatSmm)):\r\n for n in self.simMatSmm[i]:\r\n rowJson.append(n)\r\n matrixJson.append(rowJson)\r\n rowJson = []\r\n jsonToRet.append(matrixJson)\r\n\r\n jsonToRet.append(self.patchOrdering)\r\n # jsonToRet = []\r\n rowJson = []\r\n matrixJson = []\r\n\r\n for i in range(0,len(self.simMat)):\r\n for n in self.simMat[i]:\r\n rowJson.append(n)\r\n matrixJson.append(rowJson)\r\n rowJson = []\r\n jsonToRet.append(matrixJson)\r\n return jsonToRet", "def dataframe_to_list(df: pandas.DataFrame) -> list:\n return json.loads(df.to_json(orient=\"records\"))", "def format(df, format, features):\n\n def mapDocFormat(row):\n if format == \"csv\":\n return (row.id.encode(\"utf-8\", errors='ignore') + \",\" + \" \".join(\n [x.encode(\"utf-8\", errors='ignore') for x in [row.comments, row.caption, row.tags]])).lower()\n if format == \"tsv\":\n return (row.id.encode(\"utf-8\") + \"\\t\" + \" \".join(\n [x.encode(\"utf-8\", errors='ignore') for x in [row.comments, row.caption, row.tags]])).lower()\n if format == \"json\":\n return pyspark.sql.Row(doc=row.id.encode(\"utf-8\", errors='ignore').lower(),\n text=(''.join([x.encode(\"utf-8\", errors='ignore') for x in row]))).lower()\n\n if features:\n df = df.rdd.map(mapDocFormat)\n if format == \"json\":\n return df.toDF().toJSON()\n else:\n return df\n else:\n if format == \"csv\":\n return df.rdd.map(lambda row: (','.join(\n [x.encode(\"utf-8\", errors='ignore') for x in [row.id, row.url, row.comments, row.caption, row.tags]])).lower())\n if format == \"tsv\":\n return df.rdd.map(lambda row: ('\\t'.join(\n [x.encode(\"utf-8\", errors='ignore') for x in [row.id, row.url, row.comments, row.caption, row.tags]])).lower())\n if format == \"json\":\n return df.toJSON()", "def parse_jsons(self):\n # store all data in a pandas DataFrame\n pandas_df = pandas.DataFrame(self.__input_jsons)\n return pandas_df", "def display_data(df):\n row_length = df.shape[0]\n # iterate from 0 to the number of rows in steps of 5\n for i in range(0, row_length, 5):\n decision = input('\\nWould you like to examine the particular user trip data? Type \\'yes\\' or \\'no\\'\\n> ')\n if decision.lower() != 'yes':\n break\n # retrieve and convert data to json format\n # split each json row data \n # retrieving rows by iloc method\n #And use to_json which takes (orient = ‘records’ that list like [{column -> value}, … , {column -> value}]\n row_data = df.iloc[i: i + 5].to_json(orient='records', lines=True).split('\\n')\n for row in row_data:\n # pretty print each user data\n parsed_row = json.loads(row)\n #converts a Python object into a json string.\n json_row = json.dumps(parsed_row, indent=2)\n print(json_row)", "def get_data(config_path):\n config = read_params(config_path)\n data_path = config[\"data_source\"][\"s3_source\"]\n df = pd.read_json(data_path, lines=True, orient='str')\n return df", "def glass_pandas(self):\n # pandas.set_option('display.width', 120)\n # TODO timeit (git_implementation) vs (my_implementation)\n # * df = pd.DataFrame(json.loads(r.text))\n # * df = df.set_index('t')\n # * df.index = pd.to_datetime(df.index, unit='s')\n # * df = df.sort_index()\n # * s = df.v\n # * s.name = '_'.join(url.split('/')[-2:])\n # * return s\n # for elem in self.loaded:\n # _metric, _data = elem[1]['_metrics'], elem[1]['_data']\n # try:\n # frame_keys = ['t'] + list(_data[0]['o'].keys())\n # framed = pandas.DataFrame(\n # data=[{k: (_data[iters]['t'] if k in 't' else _data[iters]['o'][k])\n # for k in frame_keys} for iters in range(len(_data))],\n # columns=frame_keys)\n # except KeyError:\n # framed = pandas.DataFrame(_data)\n # framed.set_index('t', inplace=True)\n # framed.index = pandas.to_datetime(\n # framed.index.to_flat_index(), unit='s', infer_datetime_format=True)\n # framed.sort_index(inplace=True)\n # framed.name = _metric\n # print(framed.name)\n # print(framed)", "def json_data(self):\n self.check_proof()\n return {\n \"vars\": [{'name': v.name, 'T': str(v.T)} for v in self.vars],\n \"proof\": sum([printer.export_proof_item(self.thy, item, unicode=True, highlight=True)\n for item in self.prf.items], []),\n \"report\": self.rpt.json_data(),\n \"method_sig\": self.get_method_sig()\n }", "def __data_row_to_json(self, row):\n raw_data = {}\n raw_data[\"body\"] = row.body\n raw_data[\"score_hidden\"] = row.score_hidden\n raw_data[\"archived\"] = row.archived\n raw_data[\"name\"] = row.name\n raw_data[\"author\"] = row.author\n raw_data[\"author_flair_text\"] = row.author_flair_text\n raw_data[\"downs\"] = row.downs\n raw_data[\"created_utc\"] = row.created_utc\n raw_data[\"subreddit_id\"] = row.subreddit_id\n raw_data[\"link_id\"] = row.link_id\n raw_data[\"parent_id\"] = row.parent_id\n raw_data[\"score\"] = row.score\n raw_data[\"retrieved_on\"] = row.retrieved_on\n raw_data[\"controversiality\"] = row.controversiality\n raw_data[\"gilded\"] = row.gilded\n raw_data[\"id\"] = row.id\n raw_data[\"subreddit\"] = row.subreddit\n raw_data[\"ups\"] = row.ups\n raw_data[\"distinguished\"] = row.distinguished\n raw_data[\"author_flair_css_class\"] = row.author_flair_css_class\n\n return json.dumps(raw_data)", "def get_data(self):\n return self.data.to_json()", "def get_data(self):\n\n # Import json data into a pandas dataframe\n df = pd.read_json(\"./nutrients.json\", lines=True)\n\n return df", "def pandas_convert(self):\n data = {}\n\n for names in self.data[0]:\n col_values = []\n\n if names in objects:\n for items in self.data[0][names]:\n col_values = []\n\n col_name = names + \"_\" + items\n\n for i in range(len(self.data)):\n col_values.append(self.data[i][names][items])\n\n data[col_name] = col_values\n else:\n for i in range(len(self.data)):\n col_values.append(self.data[i][names])\n \n data[names] = col_values\n\n self.pandas_df = pd.DataFrame(data=data)\n self.__clean_df()\n\n return self.pandas_df", "def get_time_series_data():\r\n # Grab the requested years and columns from the query arguments\r\n ls_year = [int(year) for year in request.args.getlist(\"n\")]\r\n ls_col = request.args.getlist(\"m\")\r\n\r\n # Generate a list of all the months we need to get\r\n all_years = [str(year) for year in range(min(ls_year), max(ls_year) + 1)]\r\n\r\n # Grab all of the wanted months by filtering for the ones we want\r\n wanted_months = reduce(\r\n lambda a, b: a | b, (app.df[\"month\"].str.contains(year) for year in all_years)\r\n )\r\n\r\n # Create a new dataframe from the one that\r\n df_new = app.df[wanted_months][[\"month\"] + ls_col]\r\n\r\n # Convert all string dates into datetime objects and then sort them\r\n df_new[\"month\"] = pd.to_datetime(df_new[\"month\"])\r\n df_new = df_new.sort_values(by=[\"month\"])\r\n\r\n # Return the dataframe as json\r\n return df_new.to_json(), 200", "def format_bgc_metadata(df,float_id):\n mdf = df[bgc_metadata_columns]\n bgc_metadata_dict = {}\n for col in list(mdf):\n bgc_metadata_dict[col] = list(pd.Series(mdf[col].unique()).astype(str).str.strip())\n bgc_metadata_dict[col] = list(pd.Series(mdf[col].unique()).astype(str).str.strip().replace(\"'\",'\"'))\n bgc_metadata_dict = json.dumps(bgc_metadata_dict) \n bgc_metadata_df = pd.DataFrame({\"float_id\": [float_id], \"Metadata_Dict\": [bgc_metadata_dict]})\n return bgc_metadata_df", "def __generate_data_table__(self):\n # | - __generate_data_table__\n rows_list = []\n for job in self.job_var_lst:\n revisions = self.job_revision_number(job)\n for revision in range(revisions + 1)[1:]:\n # | - FOR LOOP BODY\n entry_param_dict = {}\n for prop in job:\n entry_param_dict[prop[\"property\"]] = prop[\"value\"]\n\n entry_param_dict[\"variable_list\"] = job\n entry_param_dict[\"path\"] = self.var_lst_to_path(job)\n\n entry_param_dict[\"max_revision\"] = revisions\n entry_param_dict[\"revision_number\"] = revision\n\n rows_list.append(entry_param_dict)\n # __|\n\n data_frame = pd.DataFrame(rows_list)\n\n return(data_frame)\n # __|", "def dataFrame(self):\n\n memory_file = StringIO(initial_value=self.sparql_result.decode('utf-8'), newline='\\n')\n reader = DictReader(memory_file)\n\n schema = StructType(\n list(map(lambda f: StructField(f, StringType()), reader.fieldnames))\n )\n\n data = list(map(lambda d: [d[f] for f in reader.fieldnames], list(reader)))\n\n return self.spark.createDataFrame(data, schema)", "def get_datalist_fr_json(self):\n raw_data = json.load(open(self.saved_json_file, 'r'))\n for indivdual_set in raw_data['query']['results']['stats']:\n temp_dict_data = {}\n if type(indivdual_set) == str:\n #for single data\n continue # temp do not use\n for parameters in indivdual_set.keys():\n if type(indivdual_set[parameters]) == str:\n temp_dict_data[parameters] = indivdual_set[parameters]#for symbol\n elif type(indivdual_set[parameters]) == dict:\n if indivdual_set[parameters].has_key('content'):\n temp_dict_data[parameters] = indivdual_set[parameters]['content']\n\n ## append to list\n self.com_data_allstock_list.append(temp_dict_data)", "def df(self):\n return self._df", "def _jsonify(self):\n return self.experiment_record.to_ddb_record()", "def precipitation():\n\n return jsonify(prcp_df)", "def json_temp_arima(df_arima):\n # The shift_hours=1 accounts for df_arima starting its indexing from 1\n # instead of 0.\n df_arima = add_time_columns(df_arima, shift_hours=1)\n if len(df_arima) < 1:\n return \"{}\"\n json_str = (\n df_arima.groupby([\"sensor_id\", \"measure_name\", \"run_id\"], as_index=True)\n .apply(\n lambda x: x[\n [\n \"prediction_value\",\n \"prediction_index\",\n \"run_id\",\n \"time\",\n \"timestamp\",\n ]\n ].to_dict(orient=\"records\")\n )\n .reset_index()\n .rename(columns={0: \"Values\"})\n .to_json(orient=\"records\")\n )\n return json_str", "def dataframe(self):\n if not self.all_records:\n print('No rows cached.')\n return\n dict_list = [row.as_dict() for row in self.all_records]\n columns = self.all_records[0].keys\n dataframe = pd.DataFrame(dict_list, columns=columns)\n return dataframe", "def make_df_from_json(json_files, out_file):\n table = [[\"name\", \n \"cik\", \n \"city\",\n \"state\",\n \"street1\",\n \"street2\",\n \"zip_code\",\n \"year_of_incorp\", \n \"min_inv\", \n \"tot_off\", \n \"tot_sold\", \n \"tot_rem\", \n \"ind_group_type\", \n \"has_non_accred\", \n \"num_non_accred\", \n \"tot_num_inv\"\n ]] \n\n for json_dict in json_files:\n\n with open(json_dict, \"rb\") as f:\n data = json.load(f)\n print(json_dict)\n\n for i, key in enumerate(data):\n # if i % 1000 == 0:\n # print(i)\n entry = data[key] \n if entry == {}:\n #print(\"missing entry {0}\".format(i))\n continue\n row = []\n\n primary_issuer = entry[\"Primary Issuer\"]\n cik = primary_issuer[\"cik\"]\n name = primary_issuer[\"entity_name\"]\n phone = primary_issuer[\"phone\"]\n year_of_incorp = primary_issuer[\"year_of_incorp\"]\n address = primary_issuer[\"address\"]\n city = address[\"city\"]\n state = address[\"state\"]\n street1 = address[\"street1\"]\n street2 = address[\"street2\"]\n zip_code = address[\"zip_code\"]\n\n secondary_issuers = entry[\"Secondary Issuers\"]\n related_people = entry[\"Related People\"]\n \n offering_data = entry[\"Offering Data\"]\n min_inv = offering_data[\"min_investment_accepted\"]\n tot_off = offering_data[\"total_offering_amount\"]\n tot_sold = offering_data[\"total_amount_sold\"]\n tot_rem = offering_data[\"total_remaining\"]\n ind_group_type = offering_data[\"ind_group_type\"]\n has_non_accred = offering_data[\"has_non_accred\"]\n num_non_accred = offering_data[\"num_non_accred\"]\n tot_num_inv = offering_data[\"tot_num_inv\"] \n\n row = [name, \n cik, \n city,\n state,\n street1,\n street2,\n zip_code,\n year_of_incorp,\n min_inv,\n tot_off,\n tot_sold,\n tot_rem,\n ind_group_type,\n has_non_accred,\n num_non_accred,\n tot_num_inv\n ]\n\n table.append(row)\n\n df = pd.DataFrame(table)\n df.to_csv(out_file)\n\n return 0", "def _get_data_as_json(self, data):\n data = self._get_data_as_df(data)\n data = data.to_json(orient=\"records\")\n char_per_line = min(len(data), self.SAMPLES_PER_LINE_DEFAULT)\n return list(map(''.join, zip(*[iter(data)] * char_per_line)))", "def dataframe(self):\n return self._df", "def data(self):\n return self.as_named_DataFrame()", "def format_data(self, params):\n return json.dumps(params)", "def convert_data(df):\n print(\"Converting history...\")\n return [ dict(row) for i, row in df.iterrows() ]", "def expected_df():\n return pd.DataFrame(\n {\n \"growth\": [0.873922, 0.814298, 0.0],\n \"gene\": [\"b2935\", \"b0723\", \"b0451\"],\n \"status\": [\"optimal\", \"optimal\", \"optimal\"],\n }\n )", "async def get_all_record():\n # X_new = item.to_df()\n # item_str = item.to_string()\n # project_code = int(item_str[item_str.find('=')+1:])\n pg = PostgreSQL()\n return_json = pg.fetch_all_records()\n return return_json", "def cNames():\n a = pd.DataFrame(df['Country Name'].unique(), columns=['cname']).to_json()\n r = Response(response=a,\n status=200,\n mimetype=\"application/json\")\n r.headers[\"Content-Type\"] = \"text/json; charset=utf-8\"\n return r", "def __str__(self) -> str:\n if self.data is not None:\n list_of_params = []\n for key, data_dict in self.data.to_dict(orient=\"index\").items():\n data_dict[\"index\"] = key\n list_of_params.append(data_dict)\n formated_list_of_params = self.format_params(list_of_params)\n return f\"\\n{tabulate(formated_list_of_params, headers='keys', tablefmt='fancy_grid')}\"\n else:\n return \"Empty DataFrame\"", "def lines_():\n query = f\"\"\"\nSELECT script_l, `name`, episode\nFROM script\nINNER JOIN characters\nON characters.char_id = script.characters_char_id\nINNER JOIN episodes\nON episodes.ep_id = script.episodes_ep_id\n\"\"\"\n data = pd.read_sql_query(query, engine)\n return data.to_json(orient=\"records\")", "def dx(dataframe, sampled=None):\n # All the metadata keys that we'll apply for just the data explorer media\n # type, `application/vnd.dataresource+json`\n scoped_metadata = {}\n\n if sampled:\n scoped_metadata[\"sampled\"] = sampled\n\n metadata = {\"application/vnd.dataresource+json\": scoped_metadata}\n\n with pd.option_context('display.html.table_schema', True):\n display(dataframe, metadata=metadata)", "def to_df(self) -> Dict[str, Any]:\n return {key: self.render_df(key) for key, value in self.__dict__.items()}", "def format(self, row):\n return json.dumps(row.print_fields)", "def frame_data(self) -> str:\n pass", "def dataframe(self):\n return self.get_target().dataframe()", "def FetchQueryResultToDF(data, col_name: List[str]) -> pd.DataFrame:\r\n result = []\r\n for row in data:\r\n to_be_append = []\r\n for col in row:\r\n to_be_append.append(col)\r\n result.append(to_be_append)\r\n df = pd.DataFrame(result, columns=col_name)\r\n print(df)\r\n return df", "def __create_data_frame(self, soup):\n self.__data_frame = pd.read_html(str(soup))[0]\n timestamp = self.__navigate_rows(soup)\n # rename dataframe columns by columns name in sqlite\n self.__data_frame = self.__data_frame.rename(\n columns=self.__columns_name)\n self.__data_frame['time'] = pd.Series(timestamp)\n self.__data_frame['chg_perc'] = self.__data_frame['chg_perc'].\\\n str.replace('%', '')\n self.__data_frame['created_date'] = datetime.now()\n # save_file(self.__name_file, self.__data_frame.to_string())", "def transform(self, dataframe: DataFrame) -> DataFrame:", "def get_updated_dataframe():\n # pylint: disable=import-outside-toplevel\n from sotaque_brasileiro.io import fetch_paginated_data\n records = fetch_paginated_data(constants.API_RECORDS_ENDPOINT.value)\n df = parse_records_to_dataframe(records) # pylint: disable=invalid-name\n return df", "def _get_data_as_flattened_dataframe(self, json_lines):\n if isinstance(json_lines, pd.DataFrame):\n return json_lines\n payload_data = None\n if isinstance(json_lines, dict):\n # Glean Payload Data\n found_payload_key = None\n payloads = {}\n for payload_key in self._payload_keys:\n if payload_key in json_lines.keys():\n payload_data = json_lines[payload_key]\n if isinstance(payload_data, dict):\n payload_data = self._find_data(payload_data)\n payload_data = self._coalesce_dicts(payload_data)\n payload_data, original_df_dtypes = data_utils.json_to_dataframe(\n json_lines=payload_data,\n selected_columns=self.selected_keys,\n read_in_string=False\n )\n for column in payload_data.columns:\n payload_data.rename(\n columns={column: payload_key + self._key_separator + str(column)},\n inplace=True)\n payloads[payload_key] = payload_data\n \n max_payload_length = 0\n for payload in payloads:\n if len(payloads[payload]) > max_payload_length:\n payload_data = payloads[payload]\n max_payload_length = len(payloads[payload])\n found_payload_key = payload\n \n # Get the non-payload data\n flattened_json = []\n for key in json_lines:\n if key != found_payload_key:\n flattened_json = flattened_json + self._find_data(json_lines[key], path=key)\n\n # Coalesce the data together\n json_lines = self._coalesce_dicts(flattened_json)\n\n data, original_df_dtypes = data_utils.json_to_dataframe(\n json_lines=json_lines,\n selected_columns=self.selected_keys,\n read_in_string=False\n )\n self._original_df_dtypes = original_df_dtypes\n\n if payload_data is not None:\n self._metadata = data\n data = payload_data\n\n return data", "def dataframe(self, *args, **kwargs):\n\n try:\n return self.url.generator.dataframe(*args, **kwargs)\n except AttributeError:\n pass\n\n try:\n return self.url.dataframe(*args, **kwargs)\n except AttributeError:\n pass\n\n raise NotImplementedError(\"Url '{}' of type '{}' can't generate a dataframe \".format(self.url, type(self.url)))", "def to_dict_records(df):\r\n return df.to_dict('records')", "def data():\n df = gen_sliced_df()\n df = df[[\"x\", \"z_categ\", \"y\", \"residual\"]]\n new_df = df.iloc[[1, 100, 150, 200, 250, 300, 305, 400, 405, 500, 550, 609]].copy()\n return {\"df\": df, \"new_df\": new_df}", "def _get_data_as_df(self, data):\n if isinstance(data, pd.DataFrame):\n return data\n if isinstance(data, dict):\n data = [data]\n data, original_df_dtypes = data_utils.json_to_dataframe(\n json_lines=data,\n selected_columns=self.selected_keys,\n read_in_string=False\n )\n self._original_df_dtypes = original_df_dtypes\n return data", "def to_df(self):\r\n return pd.DataFrame([dict(self)])", "def _getvXXXXAsOneString(self,vXXXX=None,start=0,end=-1,dropColList=None,filterColList=None,mapFunc={},sortList=None,ascending=True,roundDct=None,fmtFunc={},index=True,header=True):\r\n\r\n logStr = \"{0:s}.{1:s}: \".format(self.__class__.__name__, sys._getframe().f_code.co_name)\r\n logger.debug(\"{0:s}{1:s}\".format(logStr,'Start.')) \r\n\r\n dfContentAsOneString=None\r\n\r\n df=self.dataFrames[vXXXX]\r\n\r\n # select rows\r\n if end == -1:\r\n df=df[start:]\r\n else:\r\n df=df[start:end]\r\n\r\n # select cols \r\n colList=df.columns.values.tolist()\r\n if isinstance(dropColList,list):\r\n colListOut=[col for col in colList if col not in dropColList]\r\n else:\r\n colListOut=colList\r\n df=df.loc[:,colListOut]\r\n if filterColList!=None:\r\n df=df.filter(items=filterColList)\r\n\r\n # map cols\r\n for col,func in mapFunc.items(): \r\n if col not in df.columns:\r\n pass\r\n else:\r\n df[col]=df[col].map(func)\r\n\r\n # sort \r\n if isinstance(sortList,list):\r\n df=df.sort_values(sortList,ascending=ascending) \r\n\r\n # round \r\n if isinstance(roundDct,dict):\r\n df=df.round(roundDct) \r\n\r\n try: \r\n dfContentAsOneString=df.to_string(formatters=fmtFunc,index=index,header=header,justify='right') \r\n except MxError:\r\n raise \r\n except Exception as e:\r\n logStrFinal=\"{:s}Exception: Line: {:d}: {!s:s}: {:s}\".format(logStr,sys.exc_info()[-1].tb_lineno,type(e),str(e))\r\n logger.error(logStrFinal) \r\n raise XmError(logStrFinal) \r\n finally:\r\n logger.debug(\"{0:s}{1:s}\".format(logStr,'_Done.')) \r\n return dfContentAsOneString", "def _json_export(self, exppath):\n # TODO: Settle on JSON format for colortable\n pass", "def get_json_data():\n return None", "def linedata():\n get_values = request.args\n pc = get_values.get('pc') is not None # Per Capita\n gr = get_values.get('gr') is not None # Growth Rate\n place_args, _ = get_place_args(get_values)\n plot_data, _ = datachart_handler.get_plot_data(place_args, pc, gr)\n return json.dumps(plot_data)", "def listings_data():\n\n stmt = db.session.query(nyc).statement\n df = pd.read_sql_query(stmt, db.session.bind)\n df[\"latitude\"] = pd.to_numeric(df[\"latitude\"])\n df[\"longitude\"] = pd.to_numeric(df[\"longitude\"])\n df[\"accommodates\"] = pd.to_numeric(df[\"accommodates\"])\n\n data = df.to_dict(orient='index')\n # Create a dictionary entry for each row of metadata information\n # data = {}\n # for result in results:\n #\n # data[\"ID\"] = result[0]\n # data[\"LISTING_URL\"] = result[1]\n # data[\"NAME\"] = result[2]\n # data[\"HOST_ID\"] = result[3]\n # data[\"NEIGHBORHOOD\"] = result[4]\n # data[\"NEIGHBORHOOD_GROUP\"] = result[5]\n # data[\"CITY\"] = result[6]\n # data[\"ZIPCODE\"] = result[7]\n # data[\"LAT\"] = float(result[8])\n # data[\"LON\"] = float(result[9])\n #\n # print(data)\n\n return jsonify(data)", "def bubblechart(department, position):\n # Use Pandas to perform the sql query\n stmt = db.session.query(oc_salary_db).statement\n df = pd.read_sql_query(stmt, db.session.bind)\n # print(dept.upper())\n dept_total = df.loc[(df[\"department\"] == department.upper()) & (df[\"position\"] == position.upper()), [department, position, \"2014\",\"2015\",\"2016\",\"2017\",\"2018\"]]\n # Format the data to send as json\n data = {\n \"2014\": dept_total[\"2014\"].tolist(),\n \"2015\": dept_total[\"2015\"].tolist(),\n \"2016\": dept_total[\"2016\"].tolist(),\n \"2017\": dept_total[\"2017\"].tolist(),\n \"2018\": dept_total[\"2018\"].tolist()\n }\n return jsonify(data)", "def get_file_df(filepath):\n dd = [json.loads(f) for f in open(filepath).readlines()]\n return pd.DataFrame(dd)", "def my_feature_xxx(df: pd.DataFrame):\n\n # CODE HERE\n\n return df" ]
[ "0.64720726", "0.63688964", "0.62659526", "0.62423277", "0.61731887", "0.61728024", "0.6165461", "0.610696", "0.608971", "0.608501", "0.60734797", "0.6067765", "0.60032827", "0.5975856", "0.59611905", "0.5942278", "0.5940992", "0.59368914", "0.5936283", "0.5933788", "0.5910125", "0.58526474", "0.57914597", "0.578663", "0.57680786", "0.57611877", "0.5734831", "0.5733008", "0.5713259", "0.5712131", "0.57013756", "0.5667361", "0.5653684", "0.56121963", "0.5609037", "0.56043917", "0.5593471", "0.5587373", "0.55801827", "0.5572345", "0.55614245", "0.55541474", "0.55497986", "0.55477005", "0.5536616", "0.5533723", "0.5523065", "0.5517754", "0.55160123", "0.55133444", "0.55055434", "0.54977125", "0.549696", "0.54903024", "0.5482618", "0.54810697", "0.54809046", "0.5480375", "0.5475449", "0.5472704", "0.547248", "0.5464888", "0.54643726", "0.5454899", "0.5446767", "0.5443123", "0.54307973", "0.54279673", "0.54041535", "0.54010785", "0.5390844", "0.5385566", "0.53795356", "0.5378529", "0.5377424", "0.537345", "0.5371914", "0.53675467", "0.53625745", "0.53588146", "0.535555", "0.53510463", "0.5349599", "0.5341289", "0.53411806", "0.5337197", "0.5336624", "0.53314006", "0.53307575", "0.5322711", "0.5322071", "0.53197044", "0.53190225", "0.53115416", "0.53079593", "0.5303784", "0.5301862", "0.5301385", "0.5300864", "0.52915525" ]
0.6871927
0