path
stringlengths 13
17
| screenshot_names
sequencelengths 1
873
| code
stringlengths 0
40.4k
| cell_type
stringclasses 1
value |
---|---|---|---|
32068790/cell_11 | [
"text_html_output_1.png"
] | import numpy as np
import pandas as pd
import os, gc, pickle, copy, datetime, warnings
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import lightgbm as lgb
from sklearn import metrics
pd.set_option('display.max_columns', 100)
warnings.filterwarnings('ignore')
df_train = pd.read_csv('../input/covid19-global-forecasting-week-4/train.csv')
df_test = pd.read_csv('../input/covid19-global-forecasting-week-4/test.csv')
df_traintest = pd.concat([df_train, df_test])
df_traintest['Date'] = pd.to_datetime(df_traintest['Date'])
df_traintest['day'] = df_traintest['Date'].apply(lambda x: x.dayofyear).astype(np.int16)
day_before_valid = 71 + 7 + 7
day_before_public = 78 + 7 + 7
day_before_private = df_traintest['day'][pd.isna(df_traintest['ForecastId'])].max()
df_latlong = pd.read_csv('../input/smokingstats/df_Latlong.csv')
df_latlong.head() | code |
32068790/cell_7 | [
"text_plain_output_1.png"
] | import numpy as np
import pandas as pd
import os, gc, pickle, copy, datetime, warnings
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import lightgbm as lgb
from sklearn import metrics
pd.set_option('display.max_columns', 100)
warnings.filterwarnings('ignore')
df_train = pd.read_csv('../input/covid19-global-forecasting-week-4/train.csv')
df_test = pd.read_csv('../input/covid19-global-forecasting-week-4/test.csv')
df_traintest = pd.concat([df_train, df_test])
df_traintest['Date'] = pd.to_datetime(df_traintest['Date'])
df_traintest['day'] = df_traintest['Date'].apply(lambda x: x.dayofyear).astype(np.int16)
df_traintest.head() | code |
32068790/cell_8 | [
"text_html_output_1.png",
"text_plain_output_1.png"
] | import numpy as np
import pandas as pd
import os, gc, pickle, copy, datetime, warnings
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import lightgbm as lgb
from sklearn import metrics
pd.set_option('display.max_columns', 100)
warnings.filterwarnings('ignore')
df_train = pd.read_csv('../input/covid19-global-forecasting-week-4/train.csv')
df_test = pd.read_csv('../input/covid19-global-forecasting-week-4/test.csv')
df_traintest = pd.concat([df_train, df_test])
df_traintest['Date'] = pd.to_datetime(df_traintest['Date'])
df_traintest['day'] = df_traintest['Date'].apply(lambda x: x.dayofyear).astype(np.int16)
day_before_valid = 71 + 7 + 7
day_before_public = 78 + 7 + 7
day_before_private = df_traintest['day'][pd.isna(df_traintest['ForecastId'])].max()
print(df_traintest['Date'][df_traintest['day'] == day_before_valid].values[0])
print(df_traintest['Date'][df_traintest['day'] == day_before_public].values[0])
print(df_traintest['Date'][df_traintest['day'] == day_before_private].values[0]) | code |
32068790/cell_15 | [
"text_plain_output_1.png"
] | import numpy as np
import pandas as pd
import os, gc, pickle, copy, datetime, warnings
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import lightgbm as lgb
from sklearn import metrics
pd.set_option('display.max_columns', 100)
warnings.filterwarnings('ignore')
df_train = pd.read_csv('../input/covid19-global-forecasting-week-4/train.csv')
df_test = pd.read_csv('../input/covid19-global-forecasting-week-4/test.csv')
df_traintest = pd.concat([df_train, df_test])
df_traintest['Date'] = pd.to_datetime(df_traintest['Date'])
df_traintest['day'] = df_traintest['Date'].apply(lambda x: x.dayofyear).astype(np.int16)
day_before_valid = 71 + 7 + 7
day_before_public = 78 + 7 + 7
day_before_private = df_traintest['day'][pd.isna(df_traintest['ForecastId'])].max()
def func(x):
try:
x_new = x['Country_Region'] + '/' + x['Province_State']
except:
x_new = x['Country_Region']
return x_new
df_traintest['place_id'] = df_traintest.apply(lambda x: func(x), axis=1)
df_latlong = pd.read_csv('../input/smokingstats/df_Latlong.csv')
def func(x):
try:
x_new = x['Country/Region'] + '/' + x['Province/State']
except:
x_new = x['Country/Region']
return x_new
df_latlong['place_id'] = df_latlong.apply(lambda x: func(x), axis=1)
df_latlong = df_latlong[df_latlong['place_id'].duplicated() == False]
df_traintest = pd.merge(df_traintest, df_latlong[['place_id', 'Lat', 'Long']], on='place_id', how='left')
places = np.sort(df_traintest['place_id'].unique())
print(len(places)) | code |
32068790/cell_16 | [
"text_html_output_1.png"
] | import numpy as np
import pandas as pd
import os, gc, pickle, copy, datetime, warnings
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import lightgbm as lgb
from sklearn import metrics
pd.set_option('display.max_columns', 100)
warnings.filterwarnings('ignore')
df_train = pd.read_csv('../input/covid19-global-forecasting-week-4/train.csv')
df_test = pd.read_csv('../input/covid19-global-forecasting-week-4/test.csv')
df_traintest = pd.concat([df_train, df_test])
df_traintest['Date'] = pd.to_datetime(df_traintest['Date'])
df_traintest['day'] = df_traintest['Date'].apply(lambda x: x.dayofyear).astype(np.int16)
day_before_valid = 71 + 7 + 7
day_before_public = 78 + 7 + 7
day_before_private = df_traintest['day'][pd.isna(df_traintest['ForecastId'])].max()
def func(x):
try:
x_new = x['Country_Region'] + '/' + x['Province_State']
except:
x_new = x['Country_Region']
return x_new
df_traintest['place_id'] = df_traintest.apply(lambda x: func(x), axis=1)
df_latlong = pd.read_csv('../input/smokingstats/df_Latlong.csv')
def func(x):
try:
x_new = x['Country/Region'] + '/' + x['Province/State']
except:
x_new = x['Country/Region']
return x_new
df_latlong['place_id'] = df_latlong.apply(lambda x: func(x), axis=1)
df_latlong = df_latlong[df_latlong['place_id'].duplicated() == False]
df_traintest = pd.merge(df_traintest, df_latlong[['place_id', 'Lat', 'Long']], on='place_id', how='left')
places = np.sort(df_traintest['place_id'].unique())
df_traintest2 = copy.deepcopy(df_traintest)
df_traintest2['cases/day'] = 0
df_traintest2['fatal/day'] = 0
tmp_list = np.zeros(len(df_traintest2))
for place in places:
tmp = df_traintest2['ConfirmedCases'][df_traintest2['place_id'] == place].values
tmp[1:] -= tmp[:-1]
df_traintest2['cases/day'][df_traintest2['place_id'] == place] = tmp
tmp = df_traintest2['Fatalities'][df_traintest2['place_id'] == place].values
tmp[1:] -= tmp[:-1]
df_traintest2['fatal/day'][df_traintest2['place_id'] == place] = tmp
print(df_traintest2.shape)
df_traintest2[df_traintest2['place_id'] == 'China/Hubei'].head() | code |
32068790/cell_3 | [
"text_html_output_1.png"
] | import pandas as pd
import os, gc, pickle, copy, datetime, warnings
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import lightgbm as lgb
from sklearn import metrics
pd.set_option('display.max_columns', 100)
warnings.filterwarnings('ignore')
df_train = pd.read_csv('../input/covid19-global-forecasting-week-4/train.csv')
print(df_train.shape)
df_train.tail() | code |
32068790/cell_14 | [
"text_html_output_1.png"
] | import numpy as np
import pandas as pd
import os, gc, pickle, copy, datetime, warnings
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import lightgbm as lgb
from sklearn import metrics
pd.set_option('display.max_columns', 100)
warnings.filterwarnings('ignore')
df_train = pd.read_csv('../input/covid19-global-forecasting-week-4/train.csv')
df_test = pd.read_csv('../input/covid19-global-forecasting-week-4/test.csv')
df_traintest = pd.concat([df_train, df_test])
df_traintest['Date'] = pd.to_datetime(df_traintest['Date'])
df_traintest['day'] = df_traintest['Date'].apply(lambda x: x.dayofyear).astype(np.int16)
day_before_valid = 71 + 7 + 7
day_before_public = 78 + 7 + 7
day_before_private = df_traintest['day'][pd.isna(df_traintest['ForecastId'])].max()
def func(x):
try:
x_new = x['Country_Region'] + '/' + x['Province_State']
except:
x_new = x['Country_Region']
return x_new
df_traintest['place_id'] = df_traintest.apply(lambda x: func(x), axis=1)
df_latlong = pd.read_csv('../input/smokingstats/df_Latlong.csv')
def func(x):
try:
x_new = x['Country/Region'] + '/' + x['Province/State']
except:
x_new = x['Country/Region']
return x_new
df_latlong['place_id'] = df_latlong.apply(lambda x: func(x), axis=1)
df_latlong = df_latlong[df_latlong['place_id'].duplicated() == False]
df_traintest = pd.merge(df_traintest, df_latlong[['place_id', 'Lat', 'Long']], on='place_id', how='left')
print(pd.isna(df_traintest['Lat']).sum())
df_traintest[pd.isna(df_traintest['Lat'])].head() | code |
32068790/cell_10 | [
"text_html_output_1.png",
"text_plain_output_1.png"
] | import numpy as np
import pandas as pd
import os, gc, pickle, copy, datetime, warnings
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import lightgbm as lgb
from sklearn import metrics
pd.set_option('display.max_columns', 100)
warnings.filterwarnings('ignore')
df_train = pd.read_csv('../input/covid19-global-forecasting-week-4/train.csv')
df_test = pd.read_csv('../input/covid19-global-forecasting-week-4/test.csv')
df_traintest = pd.concat([df_train, df_test])
df_traintest['Date'] = pd.to_datetime(df_traintest['Date'])
df_traintest['day'] = df_traintest['Date'].apply(lambda x: x.dayofyear).astype(np.int16)
day_before_valid = 71 + 7 + 7
day_before_public = 78 + 7 + 7
day_before_private = df_traintest['day'][pd.isna(df_traintest['ForecastId'])].max()
def func(x):
try:
x_new = x['Country_Region'] + '/' + x['Province_State']
except:
x_new = x['Country_Region']
return x_new
df_traintest['place_id'] = df_traintest.apply(lambda x: func(x), axis=1)
df_traintest[(df_traintest['day'] >= day_before_public - 3) & (df_traintest['place_id'] == 'China/Hubei')].head() | code |
32068790/cell_12 | [
"text_html_output_1.png",
"text_plain_output_1.png"
] | import numpy as np
import pandas as pd
import os, gc, pickle, copy, datetime, warnings
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import lightgbm as lgb
from sklearn import metrics
pd.set_option('display.max_columns', 100)
warnings.filterwarnings('ignore')
df_train = pd.read_csv('../input/covid19-global-forecasting-week-4/train.csv')
df_test = pd.read_csv('../input/covid19-global-forecasting-week-4/test.csv')
df_traintest = pd.concat([df_train, df_test])
df_traintest['Date'] = pd.to_datetime(df_traintest['Date'])
df_traintest['day'] = df_traintest['Date'].apply(lambda x: x.dayofyear).astype(np.int16)
day_before_valid = 71 + 7 + 7
day_before_public = 78 + 7 + 7
day_before_private = df_traintest['day'][pd.isna(df_traintest['ForecastId'])].max()
def func(x):
try:
x_new = x['Country_Region'] + '/' + x['Province_State']
except:
x_new = x['Country_Region']
return x_new
df_traintest['place_id'] = df_traintest.apply(lambda x: func(x), axis=1)
df_latlong = pd.read_csv('../input/smokingstats/df_Latlong.csv')
def func(x):
try:
x_new = x['Country/Region'] + '/' + x['Province/State']
except:
x_new = x['Country/Region']
return x_new
df_latlong['place_id'] = df_latlong.apply(lambda x: func(x), axis=1)
df_latlong = df_latlong[df_latlong['place_id'].duplicated() == False]
df_latlong.head() | code |
32068790/cell_5 | [
"text_html_output_1.png"
] | import pandas as pd
import os, gc, pickle, copy, datetime, warnings
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import lightgbm as lgb
from sklearn import metrics
pd.set_option('display.max_columns', 100)
warnings.filterwarnings('ignore')
df_train = pd.read_csv('../input/covid19-global-forecasting-week-4/train.csv')
df_test = pd.read_csv('../input/covid19-global-forecasting-week-4/test.csv')
print(df_test.shape)
df_test.head() | code |
17113130/cell_25 | [
"text_plain_output_1.png"
] | from keras.layers import Input, Embedding, Flatten, Dot, Dense
from keras.models import Model
import numpy as np
import pandas as pd
def import_data():
dataset = pd.read_csv('../input/ratings.csv')
books = pd.read_csv('../input/books.csv')
return (dataset, books)
def extract_book_and_user(dataset):
n_user = len(dataset.user_id.unique())
n_books = len(dataset.book_id.unique())
return (n_user, n_books)
def nural_net_model_function():
from keras.layers import Input, Embedding, Flatten, Dot, Dense
from keras.models import Model
book_input = Input(shape=[1], name='Book-Input')
book_embadding = Embedding(n_books + 1, 5, name='Bok-embadding')(book_input)
book_vec = Flatten(name='Flatten-books')(book_embadding)
user_input = Input(shape=[1], name='User-Input')
user_embedding = Embedding(n_user + 1, 5, name='User-Embedding')(user_input)
user_vec = Flatten(name='Flatten-Users')(user_embedding)
prod = Dot(name='Dot-Product', axes=1)([book_vec, user_vec])
x_train = [user_input, book_input]
y_train = prod
model = Model(x_train, y_train)
OPTIMIZER = 'adam'
ERROR_FUNCTION = 'mean_squared_error'
model.compile(OPTIMIZER, ERROR_FUNCTION)
model.fit([train.user_id, train.book_id], train.rating, epochs=10, verbose=1)
return model
model = nural_net_model_function()
model.save('regresssion.model.h5')
def get_unique_value():
book_data = np.array(list(set(datasets.book_id)))
return book_data
book_data = get_unique_value()
def setting_user(user_id):
user = np.array([user_id for i in range(len(book_data))])
return user
user = setting_user(1)
predictions = model.predict([user, book_data])
predictions = np.array([item[0] for item in predictions])
def get_recommended_book_id(predictions):
recommended_book_ids = (-predictions).argsort()[:5]
return recommended_book_ids
recomended_book = get_recommended_book_id(predictions)
recomended_book | code |
17113130/cell_1 | [
"text_plain_output_1.png"
] | ## importing necessary packges
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
try:
!pip install tensorflow-gpu
import tensorflow as tf
except:
!pip install tensorflow
import tensorflow as tf | code |
17113130/cell_8 | [
"text_plain_output_1.png"
] | import pandas as pd
def import_data():
dataset = pd.read_csv('../input/ratings.csv')
books = pd.read_csv('../input/books.csv')
return (dataset, books)
def extract_book_and_user(dataset):
n_user = len(dataset.user_id.unique())
n_books = len(dataset.book_id.unique())
return (n_user, n_books)
print(n_user)
print(n_books) | code |
17113130/cell_16 | [
"text_plain_output_1.png"
] | import numpy as np
def get_unique_value():
book_data = np.array(list(set(datasets.book_id)))
return book_data
book_data = get_unique_value()
def setting_user(user_id):
user = np.array([user_id for i in range(len(book_data))])
return user
user = setting_user(1)
user | code |
17113130/cell_3 | [
"text_html_output_1.png"
] | import pandas as pd
def import_data():
dataset = pd.read_csv('../input/ratings.csv')
books = pd.read_csv('../input/books.csv')
return (dataset, books)
datasets, book = import_data()
book = book[['id', 'original_title', 'authors', 'isbn', 'original_publication_year']]
book.head() | code |
17113130/cell_24 | [
"text_plain_output_2.png",
"application_vnd.jupyter.stderr_output_1.png"
] | from keras.layers import Input, Embedding, Flatten, Dot, Dense
from keras.models import Model
import numpy as np
import pandas as pd
def import_data():
dataset = pd.read_csv('../input/ratings.csv')
books = pd.read_csv('../input/books.csv')
return (dataset, books)
datasets, book = import_data()
book = book[['id', 'original_title', 'authors', 'isbn', 'original_publication_year']]
def extract_book_and_user(dataset):
n_user = len(dataset.user_id.unique())
n_books = len(dataset.book_id.unique())
return (n_user, n_books)
def nural_net_model_function():
from keras.layers import Input, Embedding, Flatten, Dot, Dense
from keras.models import Model
book_input = Input(shape=[1], name='Book-Input')
book_embadding = Embedding(n_books + 1, 5, name='Bok-embadding')(book_input)
book_vec = Flatten(name='Flatten-books')(book_embadding)
user_input = Input(shape=[1], name='User-Input')
user_embedding = Embedding(n_user + 1, 5, name='User-Embedding')(user_input)
user_vec = Flatten(name='Flatten-Users')(user_embedding)
prod = Dot(name='Dot-Product', axes=1)([book_vec, user_vec])
x_train = [user_input, book_input]
y_train = prod
model = Model(x_train, y_train)
OPTIMIZER = 'adam'
ERROR_FUNCTION = 'mean_squared_error'
model.compile(OPTIMIZER, ERROR_FUNCTION)
model.fit([train.user_id, train.book_id], train.rating, epochs=10, verbose=1)
return model
model = nural_net_model_function()
model.save('regresssion.model.h5')
def get_unique_value():
book_data = np.array(list(set(datasets.book_id)))
return book_data
book_data = get_unique_value()
def setting_user(user_id):
user = np.array([user_id for i in range(len(book_data))])
return user
user = setting_user(1)
predictions = model.predict([user, book_data])
predictions = np.array([item[0] for item in predictions])
def get_recommended_book_id(predictions):
recommended_book_ids = (-predictions).argsort()[:5]
return recommended_book_ids
recomended_book = get_recommended_book_id(predictions)
def get_recommended_book_name(book, recomended_book):
books_index = book['id'].isin(recomended_book)
value = book[books_index]
return value
name = get_recommended_book_name(book, recomended_book)
name | code |
17113130/cell_10 | [
"text_html_output_1.png"
] | from keras.layers import Input, Embedding, Flatten, Dot, Dense
from keras.models import Model
import pandas as pd
def import_data():
dataset = pd.read_csv('../input/ratings.csv')
books = pd.read_csv('../input/books.csv')
return (dataset, books)
def extract_book_and_user(dataset):
n_user = len(dataset.user_id.unique())
n_books = len(dataset.book_id.unique())
return (n_user, n_books)
def nural_net_model_function():
from keras.layers import Input, Embedding, Flatten, Dot, Dense
from keras.models import Model
book_input = Input(shape=[1], name='Book-Input')
book_embadding = Embedding(n_books + 1, 5, name='Bok-embadding')(book_input)
book_vec = Flatten(name='Flatten-books')(book_embadding)
user_input = Input(shape=[1], name='User-Input')
user_embedding = Embedding(n_user + 1, 5, name='User-Embedding')(user_input)
user_vec = Flatten(name='Flatten-Users')(user_embedding)
prod = Dot(name='Dot-Product', axes=1)([book_vec, user_vec])
x_train = [user_input, book_input]
y_train = prod
model = Model(x_train, y_train)
OPTIMIZER = 'adam'
ERROR_FUNCTION = 'mean_squared_error'
model.compile(OPTIMIZER, ERROR_FUNCTION)
model.fit([train.user_id, train.book_id], train.rating, epochs=10, verbose=1)
return model
model = nural_net_model_function() | code |
16154407/cell_13 | [
"text_plain_output_2.png",
"application_vnd.jupyter.stderr_output_1.png"
] | import gensim
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('../input/train.csv')
df.shape
def read_questions(row, column_name):
return gensim.utils.simple_preprocess(str(row[column_name]).encode('utf-8'))
documents = []
for index, row in df.iterrows():
documents.append(read_questions(row, 'question1'))
if row['is_duplicate'] == 0:
documents.append(read_questions(row, 'question2'))
model = gensim.models.Word2Vec(size=150, window=10, min_count=10, sg=1, workers=10)
model.build_vocab(documents)
model.train(sentences=documents, total_examples=len(documents), epochs=model.iter)
w1 = 'phone'
model.wv.most_similar(positive=w1, topn=5)
w1 = ['women', 'rights']
w2 = ['girls']
model.wv.most_similar(positive=w1, negative=w2, topn=2)
model.wv.doesnt_match(['government', 'corruption', 'peace']) | code |
16154407/cell_4 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('../input/train.csv')
df.shape | code |
16154407/cell_11 | [
"text_plain_output_1.png"
] | import gensim
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('../input/train.csv')
df.shape
def read_questions(row, column_name):
return gensim.utils.simple_preprocess(str(row[column_name]).encode('utf-8'))
documents = []
for index, row in df.iterrows():
documents.append(read_questions(row, 'question1'))
if row['is_duplicate'] == 0:
documents.append(read_questions(row, 'question2'))
model = gensim.models.Word2Vec(size=150, window=10, min_count=10, sg=1, workers=10)
model.build_vocab(documents)
model.train(sentences=documents, total_examples=len(documents), epochs=model.iter)
w1 = 'phone'
model.wv.most_similar(positive=w1, topn=5) | code |
16154407/cell_7 | [
"text_plain_output_2.png",
"application_vnd.jupyter.stderr_output_1.png"
] | import gensim
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('../input/train.csv')
df.shape
def read_questions(row, column_name):
return gensim.utils.simple_preprocess(str(row[column_name]).encode('utf-8'))
documents = []
for index, row in df.iterrows():
documents.append(read_questions(row, 'question1'))
if row['is_duplicate'] == 0:
documents.append(read_questions(row, 'question2'))
print("List of lists. Let's confirm: ", type(documents), ' of ', type(documents[0])) | code |
16154407/cell_3 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('../input/train.csv')
df.head() | code |
16154407/cell_10 | [
"text_html_output_1.png"
] | import gensim
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('../input/train.csv')
df.shape
def read_questions(row, column_name):
return gensim.utils.simple_preprocess(str(row[column_name]).encode('utf-8'))
documents = []
for index, row in df.iterrows():
documents.append(read_questions(row, 'question1'))
if row['is_duplicate'] == 0:
documents.append(read_questions(row, 'question2'))
model = gensim.models.Word2Vec(size=150, window=10, min_count=10, sg=1, workers=10)
model.build_vocab(documents)
model.train(sentences=documents, total_examples=len(documents), epochs=model.iter) | code |
16154407/cell_12 | [
"text_plain_output_1.png"
] | import gensim
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('../input/train.csv')
df.shape
def read_questions(row, column_name):
return gensim.utils.simple_preprocess(str(row[column_name]).encode('utf-8'))
documents = []
for index, row in df.iterrows():
documents.append(read_questions(row, 'question1'))
if row['is_duplicate'] == 0:
documents.append(read_questions(row, 'question2'))
model = gensim.models.Word2Vec(size=150, window=10, min_count=10, sg=1, workers=10)
model.build_vocab(documents)
model.train(sentences=documents, total_examples=len(documents), epochs=model.iter)
w1 = 'phone'
model.wv.most_similar(positive=w1, topn=5)
w1 = ['women', 'rights']
w2 = ['girls']
model.wv.most_similar(positive=w1, negative=w2, topn=2) | code |
32065814/cell_42 | [
"text_html_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import matplotlib.pyplot as plt
import seaborn as sns
age_groups = pd.read_csv('/kaggle/input/covid19-in-india/AgeGroupDetails.csv')
std = pd.read_csv('/kaggle/input/covid19-in-india/StatewiseTestingDetails.csv')
std = std.drop(['Date'], axis=1)
x = std.groupby(['State'], as_index=False).max()
x['State / Union Territory'] = x['State']
x = x.drop(['State'], axis=1)
x.sort_values(by='Positive', ascending=False).head(5)
pop = pd.read_csv('/kaggle/input/covid19-in-india/population_india_census2011.csv')
pop['Area/km2'] = pop['Area'].str.split(expand=True)[0]
pop['Density/km2'] = pop['Density'].str.split('/', expand=True)[0]
pop = pop.drop(['Sno', 'Population', 'Rural population', 'Urban population', 'Gender Ratio', 'Area', 'Density'], axis=1)
pop.to_csv('pop.csv', index=False)
covid = pd.read_csv('/kaggle/input/covid19-in-india/covid_19_india.csv')
y = covid.groupby(['State/UnionTerritory'], as_index=False)['Cured', 'Deaths', 'Confirmed'].max()
y['State / Union Territory'] = y['State/UnionTerritory']
y = y.drop(['State/UnionTerritory'], axis=1)
master = pd.merge(pop, y, how='inner')
master = master.sort_values(by='Confirmed', ascending=False)
master2 = pd.merge(master, x)
master2 = master2.drop(['Negative', 'Positive'], axis=1)
beds = pd.read_csv('/kaggle/input/covid19-in-india/HospitalBedsIndia.csv')
beds = beds.drop(['NumPrimaryHealthCenters_HMIS', 'NumCommunityHealthCenters_HMIS', 'NumSubDistrictHospitals_HMIS', 'NumDistrictHospitals_HMIS'], axis=1)
beds['TotalPublicHealthFacilities_NHP18'] = beds['NumRuralHospitals_NHP18'] + beds['NumUrbanHospitals_NHP18']
beds = beds.drop(['Sno', 'NumRuralHospitals_NHP18', 'NumUrbanHospitals_NHP18'], axis=1)
beds['TotalBeds_HMIS'] = beds['NumPublicBeds_HMIS']
beds = beds.drop(['NumPublicBeds_HMIS'], axis=1)
beds['TotalBeds_NHP18'] = beds['NumRuralBeds_NHP18'] + beds['NumUrbanBeds_NHP18']
beds = beds.drop(['NumRuralBeds_NHP18', 'NumUrbanBeds_NHP18'], axis=1)
beds_hmis = beds[['State/UT', 'TotalPublicHealthFacilities_HMIS', 'TotalBeds_HMIS']]
beds_hmis = beds_hmis[:-1]
beds_hmis['State / Union Territory'] = beds_hmis['State/UT']
beds_nhp18 = beds[['State/UT', 'TotalPublicHealthFacilities_NHP18', 'TotalBeds_NHP18']]
beds_nhp18 = beds_nhp18[:-1]
beds_nhp18['State / Union Territory'] = beds_nhp18['State/UT']
print(beds_nhp18.shape) | code |
32065814/cell_9 | [
"text_html_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
import matplotlib.pyplot as plt
import seaborn as sns
age_groups = pd.read_csv('/kaggle/input/covid19-in-india/AgeGroupDetails.csv')
age_groups = age_groups.drop(['Sno', 'TotalCases'], axis=1)
sns.barplot(x='AgeGroup', y='Deaths', data=age_groups) | code |
32065814/cell_25 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import matplotlib.pyplot as plt
import seaborn as sns
age_groups = pd.read_csv('/kaggle/input/covid19-in-india/AgeGroupDetails.csv')
std = pd.read_csv('/kaggle/input/covid19-in-india/StatewiseTestingDetails.csv')
std = std.drop(['Date'], axis=1)
pop = pd.read_csv('/kaggle/input/covid19-in-india/population_india_census2011.csv')
pop['Area/km2'] = pop['Area'].str.split(expand=True)[0]
pop['Density/km2'] = pop['Density'].str.split('/', expand=True)[0]
pop = pop.drop(['Sno', 'Population', 'Rural population', 'Urban population', 'Gender Ratio', 'Area', 'Density'], axis=1)
pop.to_csv('pop.csv', index=False)
pop.head() | code |
32065814/cell_4 | [
"text_html_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import matplotlib.pyplot as plt
import seaborn as sns
age_groups = pd.read_csv('/kaggle/input/covid19-in-india/AgeGroupDetails.csv')
print('People in India died due to COVID-19 as of 11th April 2019, 05:00 pm :', age_groups.TotalCases.sum()) | code |
32065814/cell_29 | [
"text_html_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import matplotlib.pyplot as plt
import seaborn as sns
age_groups = pd.read_csv('/kaggle/input/covid19-in-india/AgeGroupDetails.csv')
std = pd.read_csv('/kaggle/input/covid19-in-india/StatewiseTestingDetails.csv')
std = std.drop(['Date'], axis=1)
pop = pd.read_csv('/kaggle/input/covid19-in-india/population_india_census2011.csv')
covid = pd.read_csv('/kaggle/input/covid19-in-india/covid_19_india.csv')
covid.head() | code |
32065814/cell_48 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import matplotlib.pyplot as plt
import seaborn as sns
age_groups = pd.read_csv('/kaggle/input/covid19-in-india/AgeGroupDetails.csv')
std = pd.read_csv('/kaggle/input/covid19-in-india/StatewiseTestingDetails.csv')
std = std.drop(['Date'], axis=1)
x = std.groupby(['State'], as_index=False).max()
x['State / Union Territory'] = x['State']
x = x.drop(['State'], axis=1)
x.sort_values(by='Positive', ascending=False).head(5)
pop = pd.read_csv('/kaggle/input/covid19-in-india/population_india_census2011.csv')
pop['Area/km2'] = pop['Area'].str.split(expand=True)[0]
pop['Density/km2'] = pop['Density'].str.split('/', expand=True)[0]
pop = pop.drop(['Sno', 'Population', 'Rural population', 'Urban population', 'Gender Ratio', 'Area', 'Density'], axis=1)
pop.to_csv('pop.csv', index=False)
covid = pd.read_csv('/kaggle/input/covid19-in-india/covid_19_india.csv')
y = covid.groupby(['State/UnionTerritory'], as_index=False)['Cured', 'Deaths', 'Confirmed'].max()
y['State / Union Territory'] = y['State/UnionTerritory']
y = y.drop(['State/UnionTerritory'], axis=1)
master = pd.merge(pop, y, how='inner')
master = master.sort_values(by='Confirmed', ascending=False)
master2 = pd.merge(master, x)
master2 = master2.drop(['Negative', 'Positive'], axis=1)
beds = pd.read_csv('/kaggle/input/covid19-in-india/HospitalBedsIndia.csv')
beds = beds.drop(['NumPrimaryHealthCenters_HMIS', 'NumCommunityHealthCenters_HMIS', 'NumSubDistrictHospitals_HMIS', 'NumDistrictHospitals_HMIS'], axis=1)
beds['TotalPublicHealthFacilities_NHP18'] = beds['NumRuralHospitals_NHP18'] + beds['NumUrbanHospitals_NHP18']
beds = beds.drop(['Sno', 'NumRuralHospitals_NHP18', 'NumUrbanHospitals_NHP18'], axis=1)
beds['TotalBeds_HMIS'] = beds['NumPublicBeds_HMIS']
beds = beds.drop(['NumPublicBeds_HMIS'], axis=1)
beds['TotalBeds_NHP18'] = beds['NumRuralBeds_NHP18'] + beds['NumUrbanBeds_NHP18']
beds = beds.drop(['NumRuralBeds_NHP18', 'NumUrbanBeds_NHP18'], axis=1)
beds_hmis = beds[['State/UT', 'TotalPublicHealthFacilities_HMIS', 'TotalBeds_HMIS']]
beds_hmis = beds_hmis[:-1]
beds_hmis['State / Union Territory'] = beds_hmis['State/UT']
beds_nhp18 = beds[['State/UT', 'TotalPublicHealthFacilities_NHP18', 'TotalBeds_NHP18']]
beds_nhp18 = beds_nhp18[:-1]
beds_nhp18['State / Union Territory'] = beds_nhp18['State/UT']
hmis = pd.merge(master, beds_hmis, on='State / Union Territory')
hmis = hmis.drop(['State/UT'], axis=1)
nhp18 = pd.merge(master, beds_nhp18, on='State / Union Territory')
nhp18 = nhp18.drop(['State/UT'], axis=1)
nhp18.head() | code |
32065814/cell_2 | [
"text_html_output_1.png"
] | import os
import numpy as np
import pandas as pd
import os
for dirname, _, filenames in os.walk('/kaggle/input'):
for filename in filenames:
print(os.path.join(dirname, filename)) | code |
32065814/cell_7 | [
"text_html_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import matplotlib.pyplot as plt
import seaborn as sns
age_groups = pd.read_csv('/kaggle/input/covid19-in-india/AgeGroupDetails.csv')
age_groups = age_groups.drop(['Sno', 'TotalCases'], axis=1)
age_groups | code |
32065814/cell_18 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import matplotlib.pyplot as plt
import seaborn as sns
age_groups = pd.read_csv('/kaggle/input/covid19-in-india/AgeGroupDetails.csv')
std = pd.read_csv('/kaggle/input/covid19-in-india/StatewiseTestingDetails.csv')
std = std.drop(['Date'], axis=1)
pop = pd.read_csv('/kaggle/input/covid19-in-india/population_india_census2011.csv')
pop.head() | code |
32065814/cell_32 | [
"text_html_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import matplotlib.pyplot as plt
import seaborn as sns
age_groups = pd.read_csv('/kaggle/input/covid19-in-india/AgeGroupDetails.csv')
std = pd.read_csv('/kaggle/input/covid19-in-india/StatewiseTestingDetails.csv')
std = std.drop(['Date'], axis=1)
pop = pd.read_csv('/kaggle/input/covid19-in-india/population_india_census2011.csv')
pop['Area/km2'] = pop['Area'].str.split(expand=True)[0]
pop['Density/km2'] = pop['Density'].str.split('/', expand=True)[0]
pop = pop.drop(['Sno', 'Population', 'Rural population', 'Urban population', 'Gender Ratio', 'Area', 'Density'], axis=1)
pop.to_csv('pop.csv', index=False)
covid = pd.read_csv('/kaggle/input/covid19-in-india/covid_19_india.csv')
y = covid.groupby(['State/UnionTerritory'], as_index=False)['Cured', 'Deaths', 'Confirmed'].max()
y['State / Union Territory'] = y['State/UnionTerritory']
y = y.drop(['State/UnionTerritory'], axis=1)
master = pd.merge(pop, y, how='inner')
master = master.sort_values(by='Confirmed', ascending=False)
master.head() | code |
32065814/cell_47 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import matplotlib.pyplot as plt
import seaborn as sns
age_groups = pd.read_csv('/kaggle/input/covid19-in-india/AgeGroupDetails.csv')
std = pd.read_csv('/kaggle/input/covid19-in-india/StatewiseTestingDetails.csv')
std = std.drop(['Date'], axis=1)
x = std.groupby(['State'], as_index=False).max()
x['State / Union Territory'] = x['State']
x = x.drop(['State'], axis=1)
x.sort_values(by='Positive', ascending=False).head(5)
pop = pd.read_csv('/kaggle/input/covid19-in-india/population_india_census2011.csv')
pop['Area/km2'] = pop['Area'].str.split(expand=True)[0]
pop['Density/km2'] = pop['Density'].str.split('/', expand=True)[0]
pop = pop.drop(['Sno', 'Population', 'Rural population', 'Urban population', 'Gender Ratio', 'Area', 'Density'], axis=1)
pop.to_csv('pop.csv', index=False)
covid = pd.read_csv('/kaggle/input/covid19-in-india/covid_19_india.csv')
y = covid.groupby(['State/UnionTerritory'], as_index=False)['Cured', 'Deaths', 'Confirmed'].max()
y['State / Union Territory'] = y['State/UnionTerritory']
y = y.drop(['State/UnionTerritory'], axis=1)
master = pd.merge(pop, y, how='inner')
master = master.sort_values(by='Confirmed', ascending=False)
master2 = pd.merge(master, x)
master2 = master2.drop(['Negative', 'Positive'], axis=1)
beds = pd.read_csv('/kaggle/input/covid19-in-india/HospitalBedsIndia.csv')
beds = beds.drop(['NumPrimaryHealthCenters_HMIS', 'NumCommunityHealthCenters_HMIS', 'NumSubDistrictHospitals_HMIS', 'NumDistrictHospitals_HMIS'], axis=1)
beds['TotalPublicHealthFacilities_NHP18'] = beds['NumRuralHospitals_NHP18'] + beds['NumUrbanHospitals_NHP18']
beds = beds.drop(['Sno', 'NumRuralHospitals_NHP18', 'NumUrbanHospitals_NHP18'], axis=1)
beds['TotalBeds_HMIS'] = beds['NumPublicBeds_HMIS']
beds = beds.drop(['NumPublicBeds_HMIS'], axis=1)
beds['TotalBeds_NHP18'] = beds['NumRuralBeds_NHP18'] + beds['NumUrbanBeds_NHP18']
beds = beds.drop(['NumRuralBeds_NHP18', 'NumUrbanBeds_NHP18'], axis=1)
beds_hmis = beds[['State/UT', 'TotalPublicHealthFacilities_HMIS', 'TotalBeds_HMIS']]
beds_hmis = beds_hmis[:-1]
beds_hmis['State / Union Territory'] = beds_hmis['State/UT']
beds_nhp18 = beds[['State/UT', 'TotalPublicHealthFacilities_NHP18', 'TotalBeds_NHP18']]
beds_nhp18 = beds_nhp18[:-1]
beds_nhp18['State / Union Territory'] = beds_nhp18['State/UT']
hmis = pd.merge(master, beds_hmis, on='State / Union Territory')
hmis = hmis.drop(['State/UT'], axis=1)
hmis.head() | code |
32065814/cell_35 | [
"text_html_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import matplotlib.pyplot as plt
import seaborn as sns
age_groups = pd.read_csv('/kaggle/input/covid19-in-india/AgeGroupDetails.csv')
std = pd.read_csv('/kaggle/input/covid19-in-india/StatewiseTestingDetails.csv')
std = std.drop(['Date'], axis=1)
x = std.groupby(['State'], as_index=False).max()
x['State / Union Territory'] = x['State']
x = x.drop(['State'], axis=1)
x.sort_values(by='Positive', ascending=False).head(5)
pop = pd.read_csv('/kaggle/input/covid19-in-india/population_india_census2011.csv')
pop['Area/km2'] = pop['Area'].str.split(expand=True)[0]
pop['Density/km2'] = pop['Density'].str.split('/', expand=True)[0]
pop = pop.drop(['Sno', 'Population', 'Rural population', 'Urban population', 'Gender Ratio', 'Area', 'Density'], axis=1)
pop.to_csv('pop.csv', index=False)
covid = pd.read_csv('/kaggle/input/covid19-in-india/covid_19_india.csv')
y = covid.groupby(['State/UnionTerritory'], as_index=False)['Cured', 'Deaths', 'Confirmed'].max()
y['State / Union Territory'] = y['State/UnionTerritory']
y = y.drop(['State/UnionTerritory'], axis=1)
master = pd.merge(pop, y, how='inner')
master = master.sort_values(by='Confirmed', ascending=False)
master2 = pd.merge(master, x)
master2 = master2.drop(['Negative', 'Positive'], axis=1)
master2 = master2.sort_values(by='TotalSamples', ascending=False)
master2.head() | code |
32065814/cell_43 | [
"text_html_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import matplotlib.pyplot as plt
import seaborn as sns
age_groups = pd.read_csv('/kaggle/input/covid19-in-india/AgeGroupDetails.csv')
std = pd.read_csv('/kaggle/input/covid19-in-india/StatewiseTestingDetails.csv')
std = std.drop(['Date'], axis=1)
x = std.groupby(['State'], as_index=False).max()
x['State / Union Territory'] = x['State']
x = x.drop(['State'], axis=1)
x.sort_values(by='Positive', ascending=False).head(5)
pop = pd.read_csv('/kaggle/input/covid19-in-india/population_india_census2011.csv')
pop['Area/km2'] = pop['Area'].str.split(expand=True)[0]
pop['Density/km2'] = pop['Density'].str.split('/', expand=True)[0]
pop = pop.drop(['Sno', 'Population', 'Rural population', 'Urban population', 'Gender Ratio', 'Area', 'Density'], axis=1)
pop.to_csv('pop.csv', index=False)
covid = pd.read_csv('/kaggle/input/covid19-in-india/covid_19_india.csv')
y = covid.groupby(['State/UnionTerritory'], as_index=False)['Cured', 'Deaths', 'Confirmed'].max()
y['State / Union Territory'] = y['State/UnionTerritory']
y = y.drop(['State/UnionTerritory'], axis=1)
master = pd.merge(pop, y, how='inner')
master = master.sort_values(by='Confirmed', ascending=False)
master2 = pd.merge(master, x)
master2 = master2.drop(['Negative', 'Positive'], axis=1)
beds = pd.read_csv('/kaggle/input/covid19-in-india/HospitalBedsIndia.csv')
beds = beds.drop(['NumPrimaryHealthCenters_HMIS', 'NumCommunityHealthCenters_HMIS', 'NumSubDistrictHospitals_HMIS', 'NumDistrictHospitals_HMIS'], axis=1)
beds['TotalPublicHealthFacilities_NHP18'] = beds['NumRuralHospitals_NHP18'] + beds['NumUrbanHospitals_NHP18']
beds = beds.drop(['Sno', 'NumRuralHospitals_NHP18', 'NumUrbanHospitals_NHP18'], axis=1)
beds['TotalBeds_HMIS'] = beds['NumPublicBeds_HMIS']
beds = beds.drop(['NumPublicBeds_HMIS'], axis=1)
beds['TotalBeds_NHP18'] = beds['NumRuralBeds_NHP18'] + beds['NumUrbanBeds_NHP18']
beds = beds.drop(['NumRuralBeds_NHP18', 'NumUrbanBeds_NHP18'], axis=1)
beds_hmis = beds[['State/UT', 'TotalPublicHealthFacilities_HMIS', 'TotalBeds_HMIS']]
beds_hmis = beds_hmis[:-1]
beds_hmis['State / Union Territory'] = beds_hmis['State/UT']
beds_nhp18 = beds[['State/UT', 'TotalPublicHealthFacilities_NHP18', 'TotalBeds_NHP18']]
beds_nhp18 = beds_nhp18[:-1]
beds_nhp18['State / Union Territory'] = beds_nhp18['State/UT']
print(beds_hmis.shape) | code |
32065814/cell_14 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import matplotlib.pyplot as plt
import seaborn as sns
age_groups = pd.read_csv('/kaggle/input/covid19-in-india/AgeGroupDetails.csv')
std = pd.read_csv('/kaggle/input/covid19-in-india/StatewiseTestingDetails.csv')
std = std.drop(['Date'], axis=1)
x = std.groupby(['State'], as_index=False).max()
x['State / Union Territory'] = x['State']
x = x.drop(['State'], axis=1)
x.sort_values(by='Positive', ascending=False).head(5) | code |
32065814/cell_37 | [
"text_html_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import matplotlib.pyplot as plt
import seaborn as sns
age_groups = pd.read_csv('/kaggle/input/covid19-in-india/AgeGroupDetails.csv')
std = pd.read_csv('/kaggle/input/covid19-in-india/StatewiseTestingDetails.csv')
std = std.drop(['Date'], axis=1)
x = std.groupby(['State'], as_index=False).max()
x['State / Union Territory'] = x['State']
x = x.drop(['State'], axis=1)
x.sort_values(by='Positive', ascending=False).head(5)
pop = pd.read_csv('/kaggle/input/covid19-in-india/population_india_census2011.csv')
pop['Area/km2'] = pop['Area'].str.split(expand=True)[0]
pop['Density/km2'] = pop['Density'].str.split('/', expand=True)[0]
pop = pop.drop(['Sno', 'Population', 'Rural population', 'Urban population', 'Gender Ratio', 'Area', 'Density'], axis=1)
pop.to_csv('pop.csv', index=False)
covid = pd.read_csv('/kaggle/input/covid19-in-india/covid_19_india.csv')
y = covid.groupby(['State/UnionTerritory'], as_index=False)['Cured', 'Deaths', 'Confirmed'].max()
y['State / Union Territory'] = y['State/UnionTerritory']
y = y.drop(['State/UnionTerritory'], axis=1)
master = pd.merge(pop, y, how='inner')
master = master.sort_values(by='Confirmed', ascending=False)
master2 = pd.merge(master, x)
master2 = master2.drop(['Negative', 'Positive'], axis=1)
beds = pd.read_csv('/kaggle/input/covid19-in-india/HospitalBedsIndia.csv')
beds.head() | code |
90108705/cell_21 | [
"text_plain_output_2.png",
"application_vnd.jupyter.stderr_output_1.png",
"image_output_1.png"
] | import pandas as pd
import seaborn as sns
train = pd.read_csv('/kaggle/input/titanic/train.csv')
test = pd.read_csv('/kaggle/input/titanic/test.csv')
sns.set_style('whitegrid')
sns.set_style('whitegrid')
sns.set_style('whitegrid')
sns.boxplot(x='Pclass', y='Age', data=train) | code |
90108705/cell_4 | [
"text_plain_output_1.png"
] | import pandas as pd
train = pd.read_csv('/kaggle/input/titanic/train.csv')
test = pd.read_csv('/kaggle/input/titanic/test.csv')
train.head() | code |
90108705/cell_34 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import pandas as pd
import seaborn as sns
train = pd.read_csv('/kaggle/input/titanic/train.csv')
test = pd.read_csv('/kaggle/input/titanic/test.csv')
sns.set_style('whitegrid')
sns.set_style('whitegrid')
sns.set_style('whitegrid')
def impute_age(cols):
Age = cols[0]
Pclass = cols[1]
if pd.isnull(Age):
if Pclass == 1:
return 37
elif Pclass == 2:
return 29
else:
return 24
else:
return Age
train.drop('Cabin', axis=1, inplace=True)
test.drop('Cabin', axis=1, inplace=True)
train.dropna(inplace=True)
test.dropna(inplace=True)
sex_train = pd.get_dummies(train['Sex'], drop_first=True)
sex_test = pd.get_dummies(test['Sex'], drop_first=True)
embark_train = pd.get_dummies(train['Embarked'], drop_first=True)
embark_test = pd.get_dummies(test['Embarked'], drop_first=True)
train = pd.concat([train, sex_train, embark_train], axis=1)
test = pd.concat([test, sex_test, embark_test], axis=1)
train.head() | code |
90108705/cell_6 | [
"text_html_output_1.png"
] | import pandas as pd
import seaborn as sns
train = pd.read_csv('/kaggle/input/titanic/train.csv')
test = pd.read_csv('/kaggle/input/titanic/test.csv')
sns.heatmap(train.isnull(), yticklabels=False, cbar=False, cmap='viridis') | code |
90108705/cell_29 | [
"text_plain_output_2.png",
"application_vnd.jupyter.stderr_output_1.png",
"image_output_1.png"
] | import pandas as pd
import seaborn as sns
train = pd.read_csv('/kaggle/input/titanic/train.csv')
test = pd.read_csv('/kaggle/input/titanic/test.csv')
sns.set_style('whitegrid')
sns.set_style('whitegrid')
sns.set_style('whitegrid')
train.drop('Cabin', axis=1, inplace=True)
test.drop('Cabin', axis=1, inplace=True)
train.head() | code |
90108705/cell_26 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import pandas as pd
import seaborn as sns
train = pd.read_csv('/kaggle/input/titanic/train.csv')
test = pd.read_csv('/kaggle/input/titanic/test.csv')
sns.set_style('whitegrid')
sns.set_style('whitegrid')
sns.set_style('whitegrid')
sns.heatmap(train.isnull(), yticklabels=False, cbar=False) | code |
90108705/cell_18 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import pandas as pd
import seaborn as sns
train = pd.read_csv('/kaggle/input/titanic/train.csv')
test = pd.read_csv('/kaggle/input/titanic/test.csv')
sns.set_style('whitegrid')
sns.set_style('whitegrid')
sns.set_style('whitegrid')
sns.distplot(train['Fare'].dropna(), kde=False, bins=40) | code |
90108705/cell_32 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import pandas as pd
import seaborn as sns
train = pd.read_csv('/kaggle/input/titanic/train.csv')
test = pd.read_csv('/kaggle/input/titanic/test.csv')
sns.set_style('whitegrid')
sns.set_style('whitegrid')
sns.set_style('whitegrid')
train.drop('Cabin', axis=1, inplace=True)
test.drop('Cabin', axis=1, inplace=True)
train.dropna(inplace=True)
test.dropna(inplace=True)
train.info() | code |
90108705/cell_8 | [
"text_html_output_1.png"
] | import pandas as pd
import seaborn as sns
train = pd.read_csv('/kaggle/input/titanic/train.csv')
test = pd.read_csv('/kaggle/input/titanic/test.csv')
sns.set_style('whitegrid')
sns.countplot(x='Survived', data=train, palette='RdBu_r') | code |
90108705/cell_16 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import pandas as pd
import seaborn as sns
train = pd.read_csv('/kaggle/input/titanic/train.csv')
test = pd.read_csv('/kaggle/input/titanic/test.csv')
sns.set_style('whitegrid')
sns.set_style('whitegrid')
sns.set_style('whitegrid')
sns.countplot(x='SibSp', data=train) | code |
90108705/cell_14 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import pandas as pd
import seaborn as sns
train = pd.read_csv('/kaggle/input/titanic/train.csv')
test = pd.read_csv('/kaggle/input/titanic/test.csv')
sns.set_style('whitegrid')
sns.set_style('whitegrid')
sns.set_style('whitegrid')
sns.distplot(train['Age'].dropna(), kde=False, bins=30) | code |
90108705/cell_10 | [
"text_html_output_1.png"
] | import pandas as pd
import seaborn as sns
train = pd.read_csv('/kaggle/input/titanic/train.csv')
test = pd.read_csv('/kaggle/input/titanic/test.csv')
sns.set_style('whitegrid')
sns.set_style('whitegrid')
sns.countplot(x='Survived', hue='Sex', data=train, palette='RdBu_r') | code |
90108705/cell_12 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import pandas as pd
import seaborn as sns
train = pd.read_csv('/kaggle/input/titanic/train.csv')
test = pd.read_csv('/kaggle/input/titanic/test.csv')
sns.set_style('whitegrid')
sns.set_style('whitegrid')
sns.set_style('whitegrid')
sns.countplot(x='Survived', data=train, hue='Pclass') | code |
74051946/cell_21 | [
"text_plain_output_1.png"
] | from sklearn.compose import make_column_transformer
from sklearn.impute import SimpleImputer
from sklearn.model_selection import train_test_split
from sklearn.model_selection import train_test_split
from sklearn.model_selection import train_test_split
from sklearn.pipeline import make_pipeline, Pipeline
from sklearn.preprocessing import OneHotEncoder
from sklearn.preprocessing import PowerTransformer, StandardScaler,Normalizer,RobustScaler,MaxAbsScaler,MinMaxScaler,QuantileTransformer
from tensorflow.keras import layers
from tensorflow.keras.callbacks import EarlyStopping
from tensorflow.keras.callbacks import LearningRateScheduler
import tensorflow as tf
import pandas as pd #To work with dataset
train = pd.read_csv('../input/30-days-of-ml/train.csv')
test = pd.read_csv('../input/30-days-of-ml/test.csv')
cat_columns = train.drop(['id', 'target'], axis=1).select_dtypes(exclude=['int64', 'float64']).columns
num_columns = train.drop(['id', 'target'], axis=1).select_dtypes(include=['int64', 'float64']).columns
train[train.select_dtypes(['float64']).columns] = train[train.select_dtypes(['float64']).columns].apply(pd.to_numeric)
train[train.select_dtypes(['object']).columns] = train.select_dtypes(['object']).apply(lambda x: x.astype('category'))
num_columns = ['cont0', 'cont1', 'cont2', 'cont3', 'cont4', 'cont5', 'cont6', 'cont7', 'cont8', 'cont9', 'cont10', 'cont11', 'cont12', 'cont13']
cat_columns = ['cat0', 'cat1', 'cat2', 'cat3', 'cat4', 'cat5', 'cat6', 'cat7', 'cat8', 'cat9']
all_columns = num_columns + cat_columns
Robustscaler = make_pipeline(SimpleImputer(strategy='median', add_indicator=True), RobustScaler())
OneHotencoder = make_pipeline(SimpleImputer(strategy='most_frequent', add_indicator=True), OneHotEncoder())
OneHot_RobustScaler = make_column_transformer((OneHotencoder, cat_columns), (Robustscaler, num_columns))
y = train['target']
X = train.drop(['id', 'target'], axis=1)
OneHot_RobustScaler.fit(X)
Xpre = OneHot_RobustScaler.transform(X)
test_final = test.drop(['id'], axis=1)
test_finalpre = OneHot_RobustScaler.transform(test_final)
X_train, X_test, y_train, y_test = train_test_split(Xpre, y, test_size=0.1)
def lr_schedul(epoch):
x = 0.01
if epoch >= 5:
x = 0.005
if epoch >= 10:
x = 0.001
if epoch >= 15:
x = 0.0008
if epoch >= 20:
x = 0.0005
if epoch >= 30:
x = 0.0001
if epoch >= 60:
x = 1e-05
return x
lr_decay = LearningRateScheduler(lr_schedul, verbose=1)
from pandas import read_csv
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelEncoder
from tensorflow.keras import Sequential
from tensorflow.keras.layers import Dense
n_features = X_train.shape[1]
model = tf.keras.Sequential()
model.add(layers.Dense(20, kernel_initializer='he_normal', input_shape=(n_features,), activation='relu'))
model.add(layers.Dense(10, activation='relu'))
model.add(layers.Dense(1, activation='linear'))
optimizer = tf.keras.optimizers.Adam(learning_rate=2e-05)
model.compile(loss='mean_squared_error', optimizer=optimizer, metrics=[tf.keras.metrics.RootMeanSquaredError(name='rmse')])
model.summary()
EPOCHS = 1000
es = EarlyStopping(monitor='val_loss', min_delta=1e-13, restore_best_weights=True, patience=10)
with tf.device('/gpu:0'):
history = model.fit(Xpre, y, batch_size=256, epochs=EPOCHS, validation_split=0.1, verbose=0, callbacks=[lr_decay, es], shuffle=True)
from pandas import read_csv
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelEncoder
from tensorflow.keras import Sequential
from tensorflow.keras.layers import Dense
n_features = X_train.shape[1]
model_reg = tf.keras.Sequential()
model_reg.add(layers.Dense(50, input_shape=(n_features,), activation='relu'))
model_reg.add(layers.BatchNormalization())
model_reg.add(layers.Dropout(0.4))
model_reg.add(layers.Dense(30, activation='relu'))
model_reg.add(layers.Dropout(0.2))
model_reg.add(layers.Dense(5, activation='relu'))
model_reg.add(layers.Dense(1, activation='linear'))
optimizer = tf.keras.optimizers.Adam(learning_rate=2e-05)
model_reg.compile(loss='mean_squared_error', optimizer=optimizer, metrics=[tf.keras.metrics.RootMeanSquaredError(name='rmse')])
model_reg.summary()
EPOCHS = 1000
es = EarlyStopping(monitor='val_loss', min_delta=1e-13, restore_best_weights=True, patience=10)
with tf.device('/gpu:0'):
history2 = model_reg.fit(Xpre, y, batch_size=256, epochs=EPOCHS, validation_split=0.1, verbose=0, callbacks=[lr_decay, es], shuffle=True) | code |
74051946/cell_13 | [
"text_html_output_1.png"
] | from sklearn.compose import make_column_transformer
from sklearn.impute import SimpleImputer
from sklearn.model_selection import train_test_split
from sklearn.model_selection import train_test_split
from sklearn.pipeline import make_pipeline, Pipeline
from sklearn.preprocessing import OneHotEncoder
from sklearn.preprocessing import PowerTransformer, StandardScaler,Normalizer,RobustScaler,MaxAbsScaler,MinMaxScaler,QuantileTransformer
from tensorflow.keras import layers
import tensorflow as tf
import pandas as pd #To work with dataset
train = pd.read_csv('../input/30-days-of-ml/train.csv')
test = pd.read_csv('../input/30-days-of-ml/test.csv')
cat_columns = train.drop(['id', 'target'], axis=1).select_dtypes(exclude=['int64', 'float64']).columns
num_columns = train.drop(['id', 'target'], axis=1).select_dtypes(include=['int64', 'float64']).columns
train[train.select_dtypes(['float64']).columns] = train[train.select_dtypes(['float64']).columns].apply(pd.to_numeric)
train[train.select_dtypes(['object']).columns] = train.select_dtypes(['object']).apply(lambda x: x.astype('category'))
num_columns = ['cont0', 'cont1', 'cont2', 'cont3', 'cont4', 'cont5', 'cont6', 'cont7', 'cont8', 'cont9', 'cont10', 'cont11', 'cont12', 'cont13']
cat_columns = ['cat0', 'cat1', 'cat2', 'cat3', 'cat4', 'cat5', 'cat6', 'cat7', 'cat8', 'cat9']
all_columns = num_columns + cat_columns
Robustscaler = make_pipeline(SimpleImputer(strategy='median', add_indicator=True), RobustScaler())
OneHotencoder = make_pipeline(SimpleImputer(strategy='most_frequent', add_indicator=True), OneHotEncoder())
OneHot_RobustScaler = make_column_transformer((OneHotencoder, cat_columns), (Robustscaler, num_columns))
y = train['target']
X = train.drop(['id', 'target'], axis=1)
OneHot_RobustScaler.fit(X)
Xpre = OneHot_RobustScaler.transform(X)
test_final = test.drop(['id'], axis=1)
test_finalpre = OneHot_RobustScaler.transform(test_final)
X_train, X_test, y_train, y_test = train_test_split(Xpre, y, test_size=0.1)
from pandas import read_csv
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelEncoder
from tensorflow.keras import Sequential
from tensorflow.keras.layers import Dense
n_features = X_train.shape[1]
model = tf.keras.Sequential()
model.add(layers.Dense(20, kernel_initializer='he_normal', input_shape=(n_features,), activation='relu'))
model.add(layers.Dense(10, activation='relu'))
model.add(layers.Dense(1, activation='linear'))
optimizer = tf.keras.optimizers.Adam(learning_rate=2e-05)
model.compile(loss='mean_squared_error', optimizer=optimizer, metrics=[tf.keras.metrics.RootMeanSquaredError(name='rmse')])
model.summary() | code |
74051946/cell_4 | [
"text_plain_output_1.png"
] | import pandas as pd #To work with dataset
train = pd.read_csv('../input/30-days-of-ml/train.csv')
test = pd.read_csv('../input/30-days-of-ml/test.csv')
train.head() | code |
74051946/cell_20 | [
"text_plain_output_1.png"
] | from sklearn.compose import make_column_transformer
from sklearn.impute import SimpleImputer
from sklearn.model_selection import train_test_split
from sklearn.model_selection import train_test_split
from sklearn.model_selection import train_test_split
from sklearn.pipeline import make_pipeline, Pipeline
from sklearn.preprocessing import OneHotEncoder
from sklearn.preprocessing import PowerTransformer, StandardScaler,Normalizer,RobustScaler,MaxAbsScaler,MinMaxScaler,QuantileTransformer
from tensorflow.keras import layers
from tensorflow.keras.callbacks import EarlyStopping
from tensorflow.keras.callbacks import LearningRateScheduler
import tensorflow as tf
import pandas as pd #To work with dataset
train = pd.read_csv('../input/30-days-of-ml/train.csv')
test = pd.read_csv('../input/30-days-of-ml/test.csv')
cat_columns = train.drop(['id', 'target'], axis=1).select_dtypes(exclude=['int64', 'float64']).columns
num_columns = train.drop(['id', 'target'], axis=1).select_dtypes(include=['int64', 'float64']).columns
train[train.select_dtypes(['float64']).columns] = train[train.select_dtypes(['float64']).columns].apply(pd.to_numeric)
train[train.select_dtypes(['object']).columns] = train.select_dtypes(['object']).apply(lambda x: x.astype('category'))
num_columns = ['cont0', 'cont1', 'cont2', 'cont3', 'cont4', 'cont5', 'cont6', 'cont7', 'cont8', 'cont9', 'cont10', 'cont11', 'cont12', 'cont13']
cat_columns = ['cat0', 'cat1', 'cat2', 'cat3', 'cat4', 'cat5', 'cat6', 'cat7', 'cat8', 'cat9']
all_columns = num_columns + cat_columns
Robustscaler = make_pipeline(SimpleImputer(strategy='median', add_indicator=True), RobustScaler())
OneHotencoder = make_pipeline(SimpleImputer(strategy='most_frequent', add_indicator=True), OneHotEncoder())
OneHot_RobustScaler = make_column_transformer((OneHotencoder, cat_columns), (Robustscaler, num_columns))
y = train['target']
X = train.drop(['id', 'target'], axis=1)
OneHot_RobustScaler.fit(X)
Xpre = OneHot_RobustScaler.transform(X)
test_final = test.drop(['id'], axis=1)
test_finalpre = OneHot_RobustScaler.transform(test_final)
X_train, X_test, y_train, y_test = train_test_split(Xpre, y, test_size=0.1)
def lr_schedul(epoch):
x = 0.01
if epoch >= 5:
x = 0.005
if epoch >= 10:
x = 0.001
if epoch >= 15:
x = 0.0008
if epoch >= 20:
x = 0.0005
if epoch >= 30:
x = 0.0001
if epoch >= 60:
x = 1e-05
return x
lr_decay = LearningRateScheduler(lr_schedul, verbose=1)
from pandas import read_csv
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelEncoder
from tensorflow.keras import Sequential
from tensorflow.keras.layers import Dense
n_features = X_train.shape[1]
model = tf.keras.Sequential()
model.add(layers.Dense(20, kernel_initializer='he_normal', input_shape=(n_features,), activation='relu'))
model.add(layers.Dense(10, activation='relu'))
model.add(layers.Dense(1, activation='linear'))
optimizer = tf.keras.optimizers.Adam(learning_rate=2e-05)
model.compile(loss='mean_squared_error', optimizer=optimizer, metrics=[tf.keras.metrics.RootMeanSquaredError(name='rmse')])
model.summary()
EPOCHS = 1000
es = EarlyStopping(monitor='val_loss', min_delta=1e-13, restore_best_weights=True, patience=10)
with tf.device('/gpu:0'):
history = model.fit(Xpre, y, batch_size=256, epochs=EPOCHS, validation_split=0.1, verbose=0, callbacks=[lr_decay, es], shuffle=True)
from pandas import read_csv
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelEncoder
from tensorflow.keras import Sequential
from tensorflow.keras.layers import Dense
n_features = X_train.shape[1]
model_reg = tf.keras.Sequential()
model_reg.add(layers.Dense(50, input_shape=(n_features,), activation='relu'))
model_reg.add(layers.BatchNormalization())
model_reg.add(layers.Dropout(0.4))
model_reg.add(layers.Dense(30, activation='relu'))
model_reg.add(layers.Dropout(0.2))
model_reg.add(layers.Dense(5, activation='relu'))
model_reg.add(layers.Dense(1, activation='linear'))
optimizer = tf.keras.optimizers.Adam(learning_rate=2e-05)
model_reg.compile(loss='mean_squared_error', optimizer=optimizer, metrics=[tf.keras.metrics.RootMeanSquaredError(name='rmse')])
model_reg.summary()
tf.keras.utils.plot_model(model=model_reg, show_shapes=True, dpi=76) | code |
74051946/cell_6 | [
"image_output_1.png"
] | import pandas as pd #To work with dataset
train = pd.read_csv('../input/30-days-of-ml/train.csv')
test = pd.read_csv('../input/30-days-of-ml/test.csv')
cat_columns = train.drop(['id', 'target'], axis=1).select_dtypes(exclude=['int64', 'float64']).columns
num_columns = train.drop(['id', 'target'], axis=1).select_dtypes(include=['int64', 'float64']).columns
train[train.select_dtypes(['float64']).columns] = train[train.select_dtypes(['float64']).columns].apply(pd.to_numeric)
train[train.select_dtypes(['object']).columns] = train.select_dtypes(['object']).apply(lambda x: x.astype('category'))
num_columns = ['cont0', 'cont1', 'cont2', 'cont3', 'cont4', 'cont5', 'cont6', 'cont7', 'cont8', 'cont9', 'cont10', 'cont11', 'cont12', 'cont13']
cat_columns = ['cat0', 'cat1', 'cat2', 'cat3', 'cat4', 'cat5', 'cat6', 'cat7', 'cat8', 'cat9']
all_columns = num_columns + cat_columns
print(cat_columns)
print(num_columns)
print(all_columns) | code |
74051946/cell_2 | [
"image_output_1.png"
] | import warnings
import warnings
import pandas as pd
import numpy as np
import matplotlib.gridspec as gridspec
import seaborn as sns
import matplotlib.pyplot as plt
import warnings
from sklearn.preprocessing import LabelEncoder, OrdinalEncoder
from sklearn.neighbors import KNeighborsClassifier
from sklearn.preprocessing import PowerTransformer, StandardScaler, Normalizer, RobustScaler, MaxAbsScaler, MinMaxScaler, QuantileTransformer
from sklearn.impute import SimpleImputer
from sklearn.preprocessing import OneHotEncoder
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.compose import make_column_transformer
from sklearn.pipeline import make_pipeline, Pipeline
from sklearn.preprocessing import FunctionTransformer
from sklearn.manifold import TSNE
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import train_test_split
from sklearn.model_selection import KFold
from sklearn.metrics import mean_squared_error
from datetime import datetime, date
from sklearn.linear_model import ElasticNet, Lasso, BayesianRidge, LassoLarsIC
from sklearn.model_selection import cross_val_score
import lightgbm as lgbm
from catboost import CatBoostRegressor
import tensorflow as tf
from tensorflow.keras import layers
from tensorflow.keras.callbacks import EarlyStopping
from tensorflow.keras.callbacks import LearningRateScheduler
from sklearn.base import BaseEstimator, TransformerMixin, RegressorMixin, clone
from sklearn.kernel_ridge import KernelRidge
from sklearn.ensemble import GradientBoostingRegressor, RandomForestRegressor
import lightgbm as lgb
from scipy import sparse
from sklearn.neighbors import KNeighborsRegressor
from sklearn.decomposition import PCA
from sklearn.cluster import KMeans
from sklearn.feature_selection import SelectKBest
from sklearn.feature_selection import f_regression, f_classif
from sklearn.feature_selection import mutual_info_regression
from sklearn.preprocessing import PolynomialFeatures
from itertools import combinations
from sklearn.linear_model import LinearRegression, RidgeCV
import category_encoders as ce
import warnings
import optuna
warnings.filterwarnings('ignore') | code |
74051946/cell_19 | [
"text_plain_output_1.png"
] | from sklearn.compose import make_column_transformer
from sklearn.impute import SimpleImputer
from sklearn.model_selection import train_test_split
from sklearn.model_selection import train_test_split
from sklearn.model_selection import train_test_split
from sklearn.pipeline import make_pipeline, Pipeline
from sklearn.preprocessing import OneHotEncoder
from sklearn.preprocessing import PowerTransformer, StandardScaler,Normalizer,RobustScaler,MaxAbsScaler,MinMaxScaler,QuantileTransformer
from tensorflow.keras import layers
from tensorflow.keras.callbacks import EarlyStopping
from tensorflow.keras.callbacks import LearningRateScheduler
import tensorflow as tf
import pandas as pd #To work with dataset
train = pd.read_csv('../input/30-days-of-ml/train.csv')
test = pd.read_csv('../input/30-days-of-ml/test.csv')
cat_columns = train.drop(['id', 'target'], axis=1).select_dtypes(exclude=['int64', 'float64']).columns
num_columns = train.drop(['id', 'target'], axis=1).select_dtypes(include=['int64', 'float64']).columns
train[train.select_dtypes(['float64']).columns] = train[train.select_dtypes(['float64']).columns].apply(pd.to_numeric)
train[train.select_dtypes(['object']).columns] = train.select_dtypes(['object']).apply(lambda x: x.astype('category'))
num_columns = ['cont0', 'cont1', 'cont2', 'cont3', 'cont4', 'cont5', 'cont6', 'cont7', 'cont8', 'cont9', 'cont10', 'cont11', 'cont12', 'cont13']
cat_columns = ['cat0', 'cat1', 'cat2', 'cat3', 'cat4', 'cat5', 'cat6', 'cat7', 'cat8', 'cat9']
all_columns = num_columns + cat_columns
Robustscaler = make_pipeline(SimpleImputer(strategy='median', add_indicator=True), RobustScaler())
OneHotencoder = make_pipeline(SimpleImputer(strategy='most_frequent', add_indicator=True), OneHotEncoder())
OneHot_RobustScaler = make_column_transformer((OneHotencoder, cat_columns), (Robustscaler, num_columns))
y = train['target']
X = train.drop(['id', 'target'], axis=1)
OneHot_RobustScaler.fit(X)
Xpre = OneHot_RobustScaler.transform(X)
test_final = test.drop(['id'], axis=1)
test_finalpre = OneHot_RobustScaler.transform(test_final)
X_train, X_test, y_train, y_test = train_test_split(Xpre, y, test_size=0.1)
def lr_schedul(epoch):
x = 0.01
if epoch >= 5:
x = 0.005
if epoch >= 10:
x = 0.001
if epoch >= 15:
x = 0.0008
if epoch >= 20:
x = 0.0005
if epoch >= 30:
x = 0.0001
if epoch >= 60:
x = 1e-05
return x
lr_decay = LearningRateScheduler(lr_schedul, verbose=1)
from pandas import read_csv
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelEncoder
from tensorflow.keras import Sequential
from tensorflow.keras.layers import Dense
n_features = X_train.shape[1]
model = tf.keras.Sequential()
model.add(layers.Dense(20, kernel_initializer='he_normal', input_shape=(n_features,), activation='relu'))
model.add(layers.Dense(10, activation='relu'))
model.add(layers.Dense(1, activation='linear'))
optimizer = tf.keras.optimizers.Adam(learning_rate=2e-05)
model.compile(loss='mean_squared_error', optimizer=optimizer, metrics=[tf.keras.metrics.RootMeanSquaredError(name='rmse')])
model.summary()
EPOCHS = 1000
es = EarlyStopping(monitor='val_loss', min_delta=1e-13, restore_best_weights=True, patience=10)
with tf.device('/gpu:0'):
history = model.fit(Xpre, y, batch_size=256, epochs=EPOCHS, validation_split=0.1, verbose=0, callbacks=[lr_decay, es], shuffle=True)
from pandas import read_csv
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelEncoder
from tensorflow.keras import Sequential
from tensorflow.keras.layers import Dense
n_features = X_train.shape[1]
model_reg = tf.keras.Sequential()
model_reg.add(layers.Dense(50, input_shape=(n_features,), activation='relu'))
model_reg.add(layers.BatchNormalization())
model_reg.add(layers.Dropout(0.4))
model_reg.add(layers.Dense(30, activation='relu'))
model_reg.add(layers.Dropout(0.2))
model_reg.add(layers.Dense(5, activation='relu'))
model_reg.add(layers.Dense(1, activation='linear'))
optimizer = tf.keras.optimizers.Adam(learning_rate=2e-05)
model_reg.compile(loss='mean_squared_error', optimizer=optimizer, metrics=[tf.keras.metrics.RootMeanSquaredError(name='rmse')])
model_reg.summary() | code |
74051946/cell_15 | [
"text_plain_output_1.png"
] | from sklearn.compose import make_column_transformer
from sklearn.impute import SimpleImputer
from sklearn.model_selection import train_test_split
from sklearn.model_selection import train_test_split
from sklearn.pipeline import make_pipeline, Pipeline
from sklearn.preprocessing import OneHotEncoder
from sklearn.preprocessing import PowerTransformer, StandardScaler,Normalizer,RobustScaler,MaxAbsScaler,MinMaxScaler,QuantileTransformer
from tensorflow.keras import layers
from tensorflow.keras.callbacks import EarlyStopping
from tensorflow.keras.callbacks import LearningRateScheduler
import tensorflow as tf
import matplotlib.pyplot as plt
import matplotlib.pyplot as plt #to plot some parameters in seaborn
import pandas as pd #To work with dataset
train = pd.read_csv('../input/30-days-of-ml/train.csv')
test = pd.read_csv('../input/30-days-of-ml/test.csv')
cat_columns = train.drop(['id', 'target'], axis=1).select_dtypes(exclude=['int64', 'float64']).columns
num_columns = train.drop(['id', 'target'], axis=1).select_dtypes(include=['int64', 'float64']).columns
train[train.select_dtypes(['float64']).columns] = train[train.select_dtypes(['float64']).columns].apply(pd.to_numeric)
train[train.select_dtypes(['object']).columns] = train.select_dtypes(['object']).apply(lambda x: x.astype('category'))
num_columns = ['cont0', 'cont1', 'cont2', 'cont3', 'cont4', 'cont5', 'cont6', 'cont7', 'cont8', 'cont9', 'cont10', 'cont11', 'cont12', 'cont13']
cat_columns = ['cat0', 'cat1', 'cat2', 'cat3', 'cat4', 'cat5', 'cat6', 'cat7', 'cat8', 'cat9']
all_columns = num_columns + cat_columns
Robustscaler = make_pipeline(SimpleImputer(strategy='median', add_indicator=True), RobustScaler())
OneHotencoder = make_pipeline(SimpleImputer(strategy='most_frequent', add_indicator=True), OneHotEncoder())
OneHot_RobustScaler = make_column_transformer((OneHotencoder, cat_columns), (Robustscaler, num_columns))
y = train['target']
X = train.drop(['id', 'target'], axis=1)
OneHot_RobustScaler.fit(X)
Xpre = OneHot_RobustScaler.transform(X)
test_final = test.drop(['id'], axis=1)
test_finalpre = OneHot_RobustScaler.transform(test_final)
X_train, X_test, y_train, y_test = train_test_split(Xpre, y, test_size=0.1)
def lr_schedul(epoch):
x = 0.01
if epoch >= 5:
x = 0.005
if epoch >= 10:
x = 0.001
if epoch >= 15:
x = 0.0008
if epoch >= 20:
x = 0.0005
if epoch >= 30:
x = 0.0001
if epoch >= 60:
x = 1e-05
return x
lr_decay = LearningRateScheduler(lr_schedul, verbose=1)
from pandas import read_csv
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelEncoder
from tensorflow.keras import Sequential
from tensorflow.keras.layers import Dense
n_features = X_train.shape[1]
model = tf.keras.Sequential()
model.add(layers.Dense(20, kernel_initializer='he_normal', input_shape=(n_features,), activation='relu'))
model.add(layers.Dense(10, activation='relu'))
model.add(layers.Dense(1, activation='linear'))
optimizer = tf.keras.optimizers.Adam(learning_rate=2e-05)
model.compile(loss='mean_squared_error', optimizer=optimizer, metrics=[tf.keras.metrics.RootMeanSquaredError(name='rmse')])
model.summary()
EPOCHS = 1000
es = EarlyStopping(monitor='val_loss', min_delta=1e-13, restore_best_weights=True, patience=10)
with tf.device('/gpu:0'):
history = model.fit(Xpre, y, batch_size=256, epochs=EPOCHS, validation_split=0.1, verbose=0, callbacks=[lr_decay, es], shuffle=True)
import matplotlib.pyplot as plt
plt.style.use('ggplot')
def plot_history(history):
acc = history.history['rmse']
val_acc = history.history['val_rmse']
loss = history.history['loss']
val_loss = history.history['val_loss']
x = range(1, len(acc) + 1)
plt.figure(figsize=(12, 5))
plt.subplot(1, 2, 1)
plt.plot(x, acc, 'b', label='Training rmse')
plt.plot(x, val_acc, 'r', label='Validation rmse')
plt.title('Training and validation accuracy')
plt.legend()
plt.subplot(1, 2, 2)
plt.plot(x, loss, 'b', label='Training loss')
plt.plot(x, val_loss, 'r', label='Validation loss')
plt.title('Training and validation loss')
plt.legend()
plot_history(history) | code |
74051946/cell_16 | [
"text_plain_output_1.png"
] | from sklearn.compose import make_column_transformer
from sklearn.impute import SimpleImputer
from sklearn.model_selection import train_test_split
from sklearn.model_selection import train_test_split
from sklearn.pipeline import make_pipeline, Pipeline
from sklearn.preprocessing import OneHotEncoder
from sklearn.preprocessing import PowerTransformer, StandardScaler,Normalizer,RobustScaler,MaxAbsScaler,MinMaxScaler,QuantileTransformer
from tensorflow.keras import layers
from tensorflow.keras.callbacks import EarlyStopping
from tensorflow.keras.callbacks import LearningRateScheduler
import tensorflow as tf
import pandas as pd #To work with dataset
train = pd.read_csv('../input/30-days-of-ml/train.csv')
test = pd.read_csv('../input/30-days-of-ml/test.csv')
cat_columns = train.drop(['id', 'target'], axis=1).select_dtypes(exclude=['int64', 'float64']).columns
num_columns = train.drop(['id', 'target'], axis=1).select_dtypes(include=['int64', 'float64']).columns
train[train.select_dtypes(['float64']).columns] = train[train.select_dtypes(['float64']).columns].apply(pd.to_numeric)
train[train.select_dtypes(['object']).columns] = train.select_dtypes(['object']).apply(lambda x: x.astype('category'))
num_columns = ['cont0', 'cont1', 'cont2', 'cont3', 'cont4', 'cont5', 'cont6', 'cont7', 'cont8', 'cont9', 'cont10', 'cont11', 'cont12', 'cont13']
cat_columns = ['cat0', 'cat1', 'cat2', 'cat3', 'cat4', 'cat5', 'cat6', 'cat7', 'cat8', 'cat9']
all_columns = num_columns + cat_columns
Robustscaler = make_pipeline(SimpleImputer(strategy='median', add_indicator=True), RobustScaler())
OneHotencoder = make_pipeline(SimpleImputer(strategy='most_frequent', add_indicator=True), OneHotEncoder())
OneHot_RobustScaler = make_column_transformer((OneHotencoder, cat_columns), (Robustscaler, num_columns))
y = train['target']
X = train.drop(['id', 'target'], axis=1)
OneHot_RobustScaler.fit(X)
Xpre = OneHot_RobustScaler.transform(X)
test_final = test.drop(['id'], axis=1)
test_finalpre = OneHot_RobustScaler.transform(test_final)
X_train, X_test, y_train, y_test = train_test_split(Xpre, y, test_size=0.1)
def lr_schedul(epoch):
x = 0.01
if epoch >= 5:
x = 0.005
if epoch >= 10:
x = 0.001
if epoch >= 15:
x = 0.0008
if epoch >= 20:
x = 0.0005
if epoch >= 30:
x = 0.0001
if epoch >= 60:
x = 1e-05
return x
lr_decay = LearningRateScheduler(lr_schedul, verbose=1)
from pandas import read_csv
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelEncoder
from tensorflow.keras import Sequential
from tensorflow.keras.layers import Dense
n_features = X_train.shape[1]
model = tf.keras.Sequential()
model.add(layers.Dense(20, kernel_initializer='he_normal', input_shape=(n_features,), activation='relu'))
model.add(layers.Dense(10, activation='relu'))
model.add(layers.Dense(1, activation='linear'))
optimizer = tf.keras.optimizers.Adam(learning_rate=2e-05)
model.compile(loss='mean_squared_error', optimizer=optimizer, metrics=[tf.keras.metrics.RootMeanSquaredError(name='rmse')])
model.summary()
EPOCHS = 1000
es = EarlyStopping(monitor='val_loss', min_delta=1e-13, restore_best_weights=True, patience=10)
with tf.device('/gpu:0'):
history = model.fit(Xpre, y, batch_size=256, epochs=EPOCHS, validation_split=0.1, verbose=0, callbacks=[lr_decay, es], shuffle=True)
loss, rmse = model.evaluate(X_test, y_test, verbose=2)
print(' rmse'.format(rmse)) | code |
74051946/cell_14 | [
"text_plain_output_1.png"
] | from sklearn.compose import make_column_transformer
from sklearn.impute import SimpleImputer
from sklearn.model_selection import train_test_split
from sklearn.model_selection import train_test_split
from sklearn.pipeline import make_pipeline, Pipeline
from sklearn.preprocessing import OneHotEncoder
from sklearn.preprocessing import PowerTransformer, StandardScaler,Normalizer,RobustScaler,MaxAbsScaler,MinMaxScaler,QuantileTransformer
from tensorflow.keras import layers
from tensorflow.keras.callbacks import EarlyStopping
from tensorflow.keras.callbacks import LearningRateScheduler
import tensorflow as tf
import pandas as pd #To work with dataset
train = pd.read_csv('../input/30-days-of-ml/train.csv')
test = pd.read_csv('../input/30-days-of-ml/test.csv')
cat_columns = train.drop(['id', 'target'], axis=1).select_dtypes(exclude=['int64', 'float64']).columns
num_columns = train.drop(['id', 'target'], axis=1).select_dtypes(include=['int64', 'float64']).columns
train[train.select_dtypes(['float64']).columns] = train[train.select_dtypes(['float64']).columns].apply(pd.to_numeric)
train[train.select_dtypes(['object']).columns] = train.select_dtypes(['object']).apply(lambda x: x.astype('category'))
num_columns = ['cont0', 'cont1', 'cont2', 'cont3', 'cont4', 'cont5', 'cont6', 'cont7', 'cont8', 'cont9', 'cont10', 'cont11', 'cont12', 'cont13']
cat_columns = ['cat0', 'cat1', 'cat2', 'cat3', 'cat4', 'cat5', 'cat6', 'cat7', 'cat8', 'cat9']
all_columns = num_columns + cat_columns
Robustscaler = make_pipeline(SimpleImputer(strategy='median', add_indicator=True), RobustScaler())
OneHotencoder = make_pipeline(SimpleImputer(strategy='most_frequent', add_indicator=True), OneHotEncoder())
OneHot_RobustScaler = make_column_transformer((OneHotencoder, cat_columns), (Robustscaler, num_columns))
y = train['target']
X = train.drop(['id', 'target'], axis=1)
OneHot_RobustScaler.fit(X)
Xpre = OneHot_RobustScaler.transform(X)
test_final = test.drop(['id'], axis=1)
test_finalpre = OneHot_RobustScaler.transform(test_final)
X_train, X_test, y_train, y_test = train_test_split(Xpre, y, test_size=0.1)
def lr_schedul(epoch):
x = 0.01
if epoch >= 5:
x = 0.005
if epoch >= 10:
x = 0.001
if epoch >= 15:
x = 0.0008
if epoch >= 20:
x = 0.0005
if epoch >= 30:
x = 0.0001
if epoch >= 60:
x = 1e-05
return x
lr_decay = LearningRateScheduler(lr_schedul, verbose=1)
from pandas import read_csv
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelEncoder
from tensorflow.keras import Sequential
from tensorflow.keras.layers import Dense
n_features = X_train.shape[1]
model = tf.keras.Sequential()
model.add(layers.Dense(20, kernel_initializer='he_normal', input_shape=(n_features,), activation='relu'))
model.add(layers.Dense(10, activation='relu'))
model.add(layers.Dense(1, activation='linear'))
optimizer = tf.keras.optimizers.Adam(learning_rate=2e-05)
model.compile(loss='mean_squared_error', optimizer=optimizer, metrics=[tf.keras.metrics.RootMeanSquaredError(name='rmse')])
model.summary()
EPOCHS = 1000
es = EarlyStopping(monitor='val_loss', min_delta=1e-13, restore_best_weights=True, patience=10)
with tf.device('/gpu:0'):
history = model.fit(Xpre, y, batch_size=256, epochs=EPOCHS, validation_split=0.1, verbose=0, callbacks=[lr_decay, es], shuffle=True) | code |
90120308/cell_9 | [
"text_html_output_1.png"
] | from sklearn.impute import SimpleImputer
from sklearn.preprocessing import OneHotEncoder
from torch import nn
from torch.utils.data import DataLoader, TensorDataset, SubsetRandomSampler
import numpy as np
import pandas as pd
import random
import torch
import pandas as pd
import numpy as np
import torch
from torch import nn
import matplotlib.pyplot as plt
from torch.utils.data import DataLoader, TensorDataset, SubsetRandomSampler
from sklearn.impute import SimpleImputer
from sklearn.preprocessing import OneHotEncoder
import random
random.seed(1)
torch.manual_seed(1)
np.random.seed(1)
rng = np.random.default_rng(1)
def load_data(train_path, test_path):
train_data = pd.read_csv(train_path)
test_data = pd.read_csv(test_path)
return (train_data, test_data)
def describe_unique_dataset(df: pd.DataFrame, show_values=False):
nunique = {}
unique = {}
nan = {}
for col in df.columns:
nunique[col] = df[col].nunique()
unique[col] = df[col].unique()
nan[col] = df[col].isna().sum()
[print(f'{item} : {nunique[item]}') for item in nunique.keys()]
[print(f'{item} : {nan[item]}') for item in nan.keys()]
if show_values == True:
[print(f'{item} : {unique[item]}') for item in unique.keys()]
def impute(data):
data_c = data.copy()
s_imputer = SimpleImputer(strategy='most_frequent')
data_c['Embarked'] = s_imputer.fit_transform(np.array(data_c['Embarked']).reshape(-1, 1))
data_c['Age'] = data_c.groupby(['Pclass', 'Embarked', 'Sex'])['Age'].transform(lambda x: x.fillna(x.mean()))
data_c['Fare'] = data_c.groupby(['Pclass', 'Embarked', 'Sex'])['Fare'].transform(lambda x: x.fillna(x.mean()))
return data_c
def ohe_data(data: pd.DataFrame, cat_name):
data_c = data.copy()
oh_encoder = OneHotEncoder(sparse=False)
transformed_data = oh_encoder.fit_transform(np.array(data_c[cat_name]).reshape(-1, 1))
df_transformed_data = pd.DataFrame(transformed_data)
df_transformed_data.columns = oh_encoder.get_feature_names_out(input_features=[cat_name])
data_c[df_transformed_data.columns] = transformed_data
return data_c
def encode(data: pd.DataFrame):
data_c = data.copy()
data_c = ohe_data(data_c, 'Embarked')
data_c = ohe_data(data_c, 'Sex')
data_c['Pclass'].replace({3: 1, 1: 3}, inplace=True)
for col in data_c.select_dtypes('object'):
data_c[col], _ = data_c[col].factorize()
return data_c
def feature_extraction(data: pd.DataFrame):
data_c = data.copy()
data_c['TicketNr'] = data_c['Ticket'].str.extract('(\\d+)$', expand=True).astype(np.float32)
data_c['TicketNr'].fillna(0, inplace=True)
name_extraction = data_c['Name'].str.extract('^(\\w+)..(\\w+)\\W+(\\w+\\s*\\w+)', expand=True)
name_extraction.columns = ['LastName', 'Hon', 'FirstName']
data_c[name_extraction.columns] = name_extraction
return data_c
def split_train_test_indices(data_length, ratio):
r_num = int(data_length * ratio)
indices = np.random.permutation(data_length)
return (indices[:r_num], indices[r_num:])
def load_data(train_path, test_path):
train_data = pd.read_csv(train_path, index_col='PassengerId')
test_data = pd.read_csv(test_path, index_col='PassengerId')
return (train_data, test_data)
train_data, test_data = load_data('../input/titanic/train.csv', '../input/titanic/test.csv')
dataset = pd.concat([train_data, test_data])
dataset = impute(dataset)
dataset = feature_extraction(dataset)
dataset = encode(dataset)
dataset.drop(columns=['Name', 'Sex', 'Ticket', 'Cabin', 'Embarked'], inplace=True)
dataset = dataset.astype(np.float32)
train_data = dataset.loc[train_data.index, :]
test_data = dataset.loc[test_data.index, :]
test_data.drop(columns=['Survived'], inplace=True)
train_indices, valid_indices = split_train_test_indices(len(train_data), 0.8)
y = train_data.pop('Survived')
X = train_data
t_train_data = TensorDataset(torch.Tensor(X.to_numpy()), torch.Tensor(y.to_numpy().reshape(-1, 1)))
t_test_data = torch.Tensor(test_data.to_numpy())
class BinaryLogisticNetwork(nn.Module):
def __init__(self, input_size):
super(BinaryLogisticNetwork, self).__init__()
self.input_size = input_size
self.binary_logistic = nn.Sequential(nn.BatchNorm1d(input_size), nn.Linear(input_size, 2048), nn.ReLU(), nn.Dropout(0.2), nn.BatchNorm1d(2048), nn.Linear(2048, 2048), nn.ReLU(), nn.Dropout(0.2), nn.BatchNorm1d(2048), nn.Linear(2048, 2048), nn.ReLU(), nn.Dropout(0.2), nn.BatchNorm1d(2048), nn.Linear(2048, 2048), nn.ReLU(), nn.Dropout(0.2), nn.BatchNorm1d(2048), nn.Linear(2048, 2048), nn.ReLU(), nn.Dropout(0.2), nn.BatchNorm1d(2048), nn.Linear(2048, 2048), nn.ReLU(), nn.Dropout(0.2), nn.Linear(2048, 1), nn.Sigmoid())
def forward(self, x):
out = self.binary_logistic(x)
return out
def accuracy(y_preds, y_true):
y_preds = torch.round(y_preds)
return float((y_preds == y_true).sum().float() / y_preds.shape[0])
BATCH_SIZE = 32
EPOCHS = 20
train_sampler = SubsetRandomSampler(train_indices)
valid_sampler = SubsetRandomSampler(valid_indices)
g = torch.Generator()
g.manual_seed(1)
train_loader = DataLoader(t_train_data, BATCH_SIZE, sampler=train_sampler, worker_init_fn=1, generator=g)
valid_loader = DataLoader(t_train_data, BATCH_SIZE, sampler=valid_sampler, worker_init_fn=1, generator=g)
model = BinaryLogisticNetwork(len(X.columns))
loss_fn = nn.functional.binary_cross_entropy
optimizer = torch.optim.Adagrad(model.parameters(), lr=0.01, eps=0.01)
def evaluate(model, valid_dl, loss_fn, metric):
losses = []
val_accuracy = []
with torch.no_grad():
for x, y in valid_dl:
preds = model(x)
loss = loss_fn(preds, y)
losses.append(loss.item())
val_accuracy.append(metric(preds, y))
return (sum(losses) / len(losses), sum(val_accuracy) / len(val_accuracy))
def fit(epochs, model, loss_fn, opt, train_dl, valid_dl, metric):
history = {'Loss': [], 'Accuracy': [], 'Val_Loss': [], 'Val_Accuracy': []}
scheduler = torch.optim.lr_scheduler.StepLR(opt, 30, gamma=0.01)
for epoch in range(epochs):
loss_list = []
scores = []
for count, (x, y) in enumerate(train_dl):
preds = model(x)
loss = loss_fn(preds, y)
loss.backward()
opt.step()
opt.zero_grad()
scheduler.step()
score = accuracy(preds, y)
loss_list.append(loss.item())
scores.append(score)
val_loss, val_accuracy = evaluate(model, valid_dl, loss_fn, metric)
history['Loss'].append(sum(loss_list) / len(loss_list))
history['Accuracy'].append(sum(scores) / len(scores))
history['Val_Loss'].append(val_loss)
history['Val_Accuracy'].append(val_accuracy)
return history
history = fit(EPOCHS, model, loss_fn, optimizer, train_loader, valid_loader, accuracy) | code |
90120308/cell_10 | [
"text_plain_output_1.png"
] | from sklearn.impute import SimpleImputer
from sklearn.preprocessing import OneHotEncoder
from torch import nn
from torch.utils.data import DataLoader, TensorDataset, SubsetRandomSampler
import numpy as np
import pandas as pd
import random
import torch
import pandas as pd
import numpy as np
import torch
from torch import nn
import matplotlib.pyplot as plt
from torch.utils.data import DataLoader, TensorDataset, SubsetRandomSampler
from sklearn.impute import SimpleImputer
from sklearn.preprocessing import OneHotEncoder
import random
random.seed(1)
torch.manual_seed(1)
np.random.seed(1)
rng = np.random.default_rng(1)
def load_data(train_path, test_path):
train_data = pd.read_csv(train_path)
test_data = pd.read_csv(test_path)
return (train_data, test_data)
def describe_unique_dataset(df: pd.DataFrame, show_values=False):
nunique = {}
unique = {}
nan = {}
for col in df.columns:
nunique[col] = df[col].nunique()
unique[col] = df[col].unique()
nan[col] = df[col].isna().sum()
[print(f'{item} : {nunique[item]}') for item in nunique.keys()]
[print(f'{item} : {nan[item]}') for item in nan.keys()]
if show_values == True:
[print(f'{item} : {unique[item]}') for item in unique.keys()]
def impute(data):
data_c = data.copy()
s_imputer = SimpleImputer(strategy='most_frequent')
data_c['Embarked'] = s_imputer.fit_transform(np.array(data_c['Embarked']).reshape(-1, 1))
data_c['Age'] = data_c.groupby(['Pclass', 'Embarked', 'Sex'])['Age'].transform(lambda x: x.fillna(x.mean()))
data_c['Fare'] = data_c.groupby(['Pclass', 'Embarked', 'Sex'])['Fare'].transform(lambda x: x.fillna(x.mean()))
return data_c
def ohe_data(data: pd.DataFrame, cat_name):
data_c = data.copy()
oh_encoder = OneHotEncoder(sparse=False)
transformed_data = oh_encoder.fit_transform(np.array(data_c[cat_name]).reshape(-1, 1))
df_transformed_data = pd.DataFrame(transformed_data)
df_transformed_data.columns = oh_encoder.get_feature_names_out(input_features=[cat_name])
data_c[df_transformed_data.columns] = transformed_data
return data_c
def encode(data: pd.DataFrame):
data_c = data.copy()
data_c = ohe_data(data_c, 'Embarked')
data_c = ohe_data(data_c, 'Sex')
data_c['Pclass'].replace({3: 1, 1: 3}, inplace=True)
for col in data_c.select_dtypes('object'):
data_c[col], _ = data_c[col].factorize()
return data_c
def feature_extraction(data: pd.DataFrame):
data_c = data.copy()
data_c['TicketNr'] = data_c['Ticket'].str.extract('(\\d+)$', expand=True).astype(np.float32)
data_c['TicketNr'].fillna(0, inplace=True)
name_extraction = data_c['Name'].str.extract('^(\\w+)..(\\w+)\\W+(\\w+\\s*\\w+)', expand=True)
name_extraction.columns = ['LastName', 'Hon', 'FirstName']
data_c[name_extraction.columns] = name_extraction
return data_c
def split_train_test_indices(data_length, ratio):
r_num = int(data_length * ratio)
indices = np.random.permutation(data_length)
return (indices[:r_num], indices[r_num:])
def load_data(train_path, test_path):
train_data = pd.read_csv(train_path, index_col='PassengerId')
test_data = pd.read_csv(test_path, index_col='PassengerId')
return (train_data, test_data)
train_data, test_data = load_data('../input/titanic/train.csv', '../input/titanic/test.csv')
dataset = pd.concat([train_data, test_data])
dataset = impute(dataset)
dataset = feature_extraction(dataset)
dataset = encode(dataset)
dataset.drop(columns=['Name', 'Sex', 'Ticket', 'Cabin', 'Embarked'], inplace=True)
dataset = dataset.astype(np.float32)
train_data = dataset.loc[train_data.index, :]
test_data = dataset.loc[test_data.index, :]
test_data.drop(columns=['Survived'], inplace=True)
train_indices, valid_indices = split_train_test_indices(len(train_data), 0.8)
y = train_data.pop('Survived')
X = train_data
t_train_data = TensorDataset(torch.Tensor(X.to_numpy()), torch.Tensor(y.to_numpy().reshape(-1, 1)))
t_test_data = torch.Tensor(test_data.to_numpy())
class BinaryLogisticNetwork(nn.Module):
def __init__(self, input_size):
super(BinaryLogisticNetwork, self).__init__()
self.input_size = input_size
self.binary_logistic = nn.Sequential(nn.BatchNorm1d(input_size), nn.Linear(input_size, 2048), nn.ReLU(), nn.Dropout(0.2), nn.BatchNorm1d(2048), nn.Linear(2048, 2048), nn.ReLU(), nn.Dropout(0.2), nn.BatchNorm1d(2048), nn.Linear(2048, 2048), nn.ReLU(), nn.Dropout(0.2), nn.BatchNorm1d(2048), nn.Linear(2048, 2048), nn.ReLU(), nn.Dropout(0.2), nn.BatchNorm1d(2048), nn.Linear(2048, 2048), nn.ReLU(), nn.Dropout(0.2), nn.BatchNorm1d(2048), nn.Linear(2048, 2048), nn.ReLU(), nn.Dropout(0.2), nn.Linear(2048, 1), nn.Sigmoid())
def forward(self, x):
out = self.binary_logistic(x)
return out
def accuracy(y_preds, y_true):
y_preds = torch.round(y_preds)
return float((y_preds == y_true).sum().float() / y_preds.shape[0])
BATCH_SIZE = 32
EPOCHS = 20
train_sampler = SubsetRandomSampler(train_indices)
valid_sampler = SubsetRandomSampler(valid_indices)
g = torch.Generator()
g.manual_seed(1)
train_loader = DataLoader(t_train_data, BATCH_SIZE, sampler=train_sampler, worker_init_fn=1, generator=g)
valid_loader = DataLoader(t_train_data, BATCH_SIZE, sampler=valid_sampler, worker_init_fn=1, generator=g)
model = BinaryLogisticNetwork(len(X.columns))
loss_fn = nn.functional.binary_cross_entropy
optimizer = torch.optim.Adagrad(model.parameters(), lr=0.01, eps=0.01)
def evaluate(model, valid_dl, loss_fn, metric):
losses = []
val_accuracy = []
with torch.no_grad():
for x, y in valid_dl:
preds = model(x)
loss = loss_fn(preds, y)
losses.append(loss.item())
val_accuracy.append(metric(preds, y))
return (sum(losses) / len(losses), sum(val_accuracy) / len(val_accuracy))
def fit(epochs, model, loss_fn, opt, train_dl, valid_dl, metric):
history = {'Loss': [], 'Accuracy': [], 'Val_Loss': [], 'Val_Accuracy': []}
scheduler = torch.optim.lr_scheduler.StepLR(opt, 30, gamma=0.01)
for epoch in range(epochs):
loss_list = []
scores = []
for count, (x, y) in enumerate(train_dl):
preds = model(x)
loss = loss_fn(preds, y)
loss.backward()
opt.step()
opt.zero_grad()
scheduler.step()
score = accuracy(preds, y)
loss_list.append(loss.item())
scores.append(score)
val_loss, val_accuracy = evaluate(model, valid_dl, loss_fn, metric)
history['Loss'].append(sum(loss_list) / len(loss_list))
history['Accuracy'].append(sum(scores) / len(scores))
history['Val_Loss'].append(val_loss)
history['Val_Accuracy'].append(val_accuracy)
return history
history = fit(EPOCHS, model, loss_fn, optimizer, train_loader, valid_loader, accuracy)
df_history = pd.DataFrame(history)
df_history.plot()
df_history | code |
130017473/cell_13 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
data = pd.read_csv('/kaggle/input/non-alcohol-fatty-liver-disease/nafld1.csv')
import seaborn as sns
sns.kdeplot(data=data, x='weight', hue='status', multiple='stack') | code |
130017473/cell_9 | [
"application_vnd.jupyter.stderr_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
data = pd.read_csv('/kaggle/input/non-alcohol-fatty-liver-disease/nafld1.csv')
import seaborn as sns
sns.kdeplot(data=data, x='futime') | code |
130017473/cell_4 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
data = pd.read_csv('/kaggle/input/non-alcohol-fatty-liver-disease/nafld1.csv')
import seaborn as sns
sns.countplot(x='male', data=data) | code |
130017473/cell_6 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
data = pd.read_csv('/kaggle/input/non-alcohol-fatty-liver-disease/nafld1.csv')
import seaborn as sns
sns.kdeplot(data=data, x='weight') | code |
130017473/cell_2 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
data = pd.read_csv('/kaggle/input/non-alcohol-fatty-liver-disease/nafld1.csv')
data.head() | code |
130017473/cell_11 | [
"text_html_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
data = pd.read_csv('/kaggle/input/non-alcohol-fatty-liver-disease/nafld1.csv')
import seaborn as sns
sns.countplot(data=data, x='male', hue='status') | code |
130017473/cell_1 | [
"text_plain_output_1.png"
] | import os
import numpy as np
import pandas as pd
import os
for dirname, _, filenames in os.walk('/kaggle/input'):
for filename in filenames:
print(os.path.join(dirname, filename)) | code |
130017473/cell_7 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
data = pd.read_csv('/kaggle/input/non-alcohol-fatty-liver-disease/nafld1.csv')
import seaborn as sns
sns.kdeplot(data=data, x='height') | code |
130017473/cell_8 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
data = pd.read_csv('/kaggle/input/non-alcohol-fatty-liver-disease/nafld1.csv')
import seaborn as sns
sns.kdeplot(data=data, x='bmi') | code |
130017473/cell_15 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
data = pd.read_csv('/kaggle/input/non-alcohol-fatty-liver-disease/nafld1.csv')
import seaborn as sns
sns.kdeplot(data=data, x='bmi', hue='status', multiple='stack') | code |
130017473/cell_16 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
data = pd.read_csv('/kaggle/input/non-alcohol-fatty-liver-disease/nafld1.csv')
import seaborn as sns
sns.kdeplot(data=alldata, x='futime', hue='status', multiple='stack') | code |
130017473/cell_14 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
data = pd.read_csv('/kaggle/input/non-alcohol-fatty-liver-disease/nafld1.csv')
import seaborn as sns
sns.kdeplot(data=data, x='height', hue='status', multiple='stack') | code |
130017473/cell_12 | [
"text_plain_output_2.png",
"application_vnd.jupyter.stderr_output_1.png",
"image_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
data = pd.read_csv('/kaggle/input/non-alcohol-fatty-liver-disease/nafld1.csv')
import seaborn as sns
sns.kdeplot(data=data, x='age', hue='status', multiple='stack') | code |
130017473/cell_5 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
data = pd.read_csv('/kaggle/input/non-alcohol-fatty-liver-disease/nafld1.csv')
import seaborn as sns
sns.kdeplot(data=data, x='age') | code |
72116744/cell_4 | [
"text_plain_output_1.png"
] | import pandas as pd
kfold_df = pd.read_csv('../input/braintumor-sampling/brain_tumor_kfold.csv')
kfold_df.head(4) | code |
72116744/cell_7 | [
"application_vnd.jupyter.stderr_output_27.png",
"text_plain_output_5.png",
"text_plain_output_15.png",
"text_plain_output_9.png",
"text_plain_output_20.png",
"application_vnd.jupyter.stderr_output_26.png",
"text_plain_output_4.png",
"text_plain_output_13.png",
"text_plain_output_14.png",
"text_plain_output_10.png",
"text_plain_output_6.png",
"text_plain_output_24.png",
"text_plain_output_21.png",
"text_plain_output_25.png",
"text_plain_output_18.png",
"text_plain_output_3.png",
"text_plain_output_22.png",
"text_plain_output_7.png",
"text_plain_output_16.png",
"text_plain_output_8.png",
"text_plain_output_23.png",
"text_plain_output_2.png",
"text_plain_output_1.png",
"text_plain_output_19.png",
"image_output_2.png",
"image_output_1.png",
"text_plain_output_17.png",
"text_plain_output_11.png",
"text_plain_output_12.png"
] | from torch.utils.data import DataLoader, Dataset
import pandas as pd
kfold_df = pd.read_csv('../input/braintumor-sampling/brain_tumor_kfold.csv')
train_df = kfold_df[kfold_df.fold != 0]
train_ds = DataRetriever(train_df['BraTS21ID'].values, '../input/rsna-miccai-brain-tumor-radiogenomic-classification/train', 'FLAIR', targets=train_df['MGMT_value'].values)
train_dl = DataLoader(train_ds, batch_size=1, shuffle=True, num_workers=1)
next(iter(train_dl))['X'].shape | code |
72116744/cell_3 | [
"text_html_output_1.png"
] | from pathlib import Path
import pytorch_lightning as pl
class Config:
seed = 42
img_size = 256
num_imgs = 64
lr = 2e-08
data_dir = Path('/kaggle/input/rsna-miccai-brain-tumor-radiogenomic-classification')
pl.utilities.seed.seed_everything(Config.seed, workers=True) | code |
72116744/cell_12 | [
"text_plain_output_1.png"
] | from IPython.core.magic import register_cell_magic
from efficientnet_pytorch_3d import EfficientNet3D
from pathlib import Path
from pytorch_lightning.core.memory import ModelSummary
from sklearn.metrics import roc_auc_score, roc_curve, auc
from time import time
from torch.utils.data import DataLoader, Dataset
import cv2
import glob
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import pydicom
import pytorch_lightning as pl
import re
import sys
import time
import torch
import torch
from IPython.core.magic import register_cell_magic
import os
from pathlib import Path
@register_cell_magic
def write_and_run(line, cell):
argz = line.split()
file = argz[-1]
mode = 'w'
if len(argz) == 2 and argz[0] == '-a':
mode = 'a'
with open(file, mode) as f:
f.write(cell)
get_ipython().run_cell(cell)
Path('/kaggle/working/scripts').mkdir(exist_ok=True)
models_dir = Path('/kaggle/working/models')
models_dir.mkdir(exist_ok=True)
import os
import json
import glob
import random
import collections
import numpy as np
import pandas as pd
import pydicom
from pydicom.pixel_data_handlers.util import apply_voi_lut
import cv2
import matplotlib.pyplot as plt
import seaborn as sns
plt.style.use('seaborn-talk')
import torch
from sklearn.metrics import roc_auc_score, roc_curve, auc
from torchvision import transforms
import time
import torch
from torch import nn
from torch.utils.data import DataLoader, Dataset
from sklearn import model_selection as sk_model_selection
from torch.nn import functional as F
from sklearn.model_selection import StratifiedKFold
import pytorch_lightning as pl
from transformers import DeiTFeatureExtractor, DeiTForImageClassification, AutoConfig
from pytorch_lightning.core.memory import ModelSummary
import sys
sys.path.append('../input/efficientnetpyttorch3d/EfficientNet-PyTorch-3D')
from efficientnet_pytorch_3d import EfficientNet3D
class Config:
seed = 42
img_size = 256
num_imgs = 64
lr = 2e-08
data_dir = Path('/kaggle/input/rsna-miccai-brain-tumor-radiogenomic-classification')
pl.utilities.seed.seed_everything(Config.seed, workers=True)
kfold_df = pd.read_csv('../input/braintumor-sampling/brain_tumor_kfold.csv')
import re
def load_dicom_image(path, rotate=False):
dicom = pydicom.read_file(path)
data = dicom.pixel_array
data = dicom.pixel_array
if rotate:
data = cv2.rotate(data, cv2.ROTATE_180)
data = cv2.resize(data, (Config.img_size, Config.img_size))
return data
def load_images_3d(dp_id, mri_type='FLAIR', split='train', augment=False):
dp_dir = str(Config.data_dir / split / dp_id / mri_type / '*.dcm')
files = sorted(glob.glob(dp_dir), key=lambda var: [int(x) if x.isdigit() else x for x in re.findall('[^0-9]|[0-9]+', var)])
middle = len(files) // 2
num_imgs2 = Config.num_imgs // 2
p1 = max(0, middle - num_imgs2)
p2 = min(len(files), middle + num_imgs2)
img3d = np.stack([load_dicom_image(f, rotate=augment) for f in files[p1:p2]]).T
if img3d.shape[-1] < Config.num_imgs:
n_zero = np.zeros((Config.img_size, Config.img_size, Config.num_imgs - img3d.shape[-1]))
img3d = np.concatenate((img3d, n_zero), axis=-1)
if np.min(img3d) < np.max(img3d):
img3d = img3d - np.min(img3d)
img3d = img3d / np.max(img3d)
return np.expand_dims(img3d, 0)
train_df = kfold_df[kfold_df.fold != 0]
train_ds = DataRetriever(train_df['BraTS21ID'].values, '../input/rsna-miccai-brain-tumor-radiogenomic-classification/train', 'FLAIR', targets=train_df['MGMT_value'].values)
train_dl = DataLoader(train_ds, batch_size=1, shuffle=True, num_workers=1)
next(iter(train_dl))['X'].shape
def calc_roc_auc(y_true, y_pred):
return roc_auc_score(y_true, y_pred)
return 0.5
from time import time
class Metrics:
def __init__(self):
self.losses = []
self.reduced_losses = []
self.y_batches = []
self.y_hat_batches = []
self.roc_auc_list = []
self.train_epoch_start_time = None
self.validation_epoch_start_time = None
class MetricsCallback(pl.callbacks.Callback):
def __init__(self):
self.train_metrics = Metrics()
self.validation_metrics = Metrics()
self.best_validation_roc_auc = float('-inf')
def on_train_epoch_start(self, trainer, pl_module):
self.train_epoch_start_time = time()
def on_validation_epoch_start(self, trainer, pl_module):
self.validation_epoch_start_time = time()
def on_train_batch_end(self, trainer, pl_module, outputs, batch, batch_idx, dataloader_idx):
self.after_batch(self.train_metrics, outputs)
def on_validation_batch_end(self, trainer, pl_module, outputs, batch, batch_idx, dataloader_idx):
self.after_batch(self.validation_metrics, outputs)
def on_train_epoch_end(self, trainer, pl_module):
self.train_epoch_start_time = None
def on_validation_epoch_end(self, trainer, pl_module):
train_loss = self.get_avg_loss(self.train_metrics)
validation_loss = self.get_avg_loss(self.validation_metrics)
train_roc_auc = self.get_roc_auc(self.train_metrics)
validation_roc_auc = self.get_roc_auc(self.validation_metrics)
self.log('roc_auc', validation_roc_auc)
if validation_roc_auc > self.best_validation_roc_auc:
self.best_validation_roc_auc = validation_roc_auc
self.validation_epoch_start_time = None
def get_avg_loss(self, metrics):
avg_loss = np.array(metrics.losses).mean()
metrics.reduced_losses.append(avg_loss)
metrics.losses = []
return avg_loss
def after_batch(self, metrics, outputs):
metrics.losses.append(outputs['loss'].item())
metrics.y_batches.append(outputs['y'])
metrics.y_hat_batches.append(outputs['y_hat'])
def get_roc_auc(self, metrics):
if not metrics.y_batches:
return None
y_np = torch.hstack(metrics.y_batches).detach().cpu().numpy()
y_hat_np = torch.hstack(metrics.y_hat_batches).detach().cpu().numpy()
roc_auc = calc_roc_auc(y_np, y_hat_np)
metrics.roc_auc_list.append(roc_auc)
metrics.y_batches = []
metrics.y_hat_batches = []
return roc_auc
def plot_metrics(metrics_callback):
train_losses = metrics_callback.train_metrics.reduced_losses
validation_losses = metrics_callback.validation_metrics.reduced_losses
train_roc_aucs = metrics_callback.train_metrics.roc_auc_list
validation_roc_aucs = metrics_callback.validation_metrics.roc_auc_list
mri_types = ['FLAIR', 'T1w', 'T1wCE', 'T2w']
mri_type_roc_aucs = []
for mri_type in mri_types:
print('MRI TYPE:', mri_type)
roc_aucs = []
for fold_n in range(5):
print('Fold:', fold_n + 1)
train_df = kfold_df[kfold_df.fold != fold_n]
train_ds = DataRetriever(train_df['BraTS21ID'].values, '../input/rsna-miccai-brain-tumor-radiogenomic-classification/train', mri_type=mri_type, targets=train_df['MGMT_value'].values)
val_df = kfold_df[kfold_df.fold == fold_n]
val_ds = DataRetriever(val_df['BraTS21ID'].values, '../input/rsna-miccai-brain-tumor-radiogenomic-classification/train', mri_type=mri_type, targets=val_df['MGMT_value'].values)
train_dl = DataLoader(train_ds, batch_size=4, shuffle=True, num_workers=2, pin_memory=True)
val_dl = DataLoader(val_ds, batch_size=4, shuffle=False, num_workers=2, pin_memory=True)
net = EfficientNet3D.from_name('efficientnet-b5', override_params={'num_classes': 1}, in_channels=1)
model = Model(net)
checkpoint_callback = pl.callbacks.ModelCheckpoint(dirpath='models', filename=f'model_{mri_type}_{fold_n}_' + '{epoch}_{roc_auc:.3}', monitor='roc_auc', mode='max', save_weights_only=True)
metrics_callback = MetricsCallback()
print(ModelSummary(model))
trainer = pl.Trainer(fast_dev_run=False, max_epochs=10, gpus=1, auto_lr_find=True, precision=16, limit_train_batches=5, limit_val_batches=5, num_sanity_val_steps=0, val_check_interval=1.0, callbacks=[metrics_callback, checkpoint_callback])
trainer.fit(model, train_dl, val_dl)
plot_metrics(metrics_callback)
roc_aucs.append(metrics_callback.best_validation_roc_auc)
print('roc_auc_s:', roc_aucs)
print('roc_auc mean:', np.array(roc_aucs).mean())
print('roc_auc std:', np.array(roc_aucs).std())
mri_type_roc_aucs.append(np.array(roc_aucs).mean())
print('mri_type roc_auc_s:', mri_type_roc_aucs)
print('mri_type roc_auc mean:', np.array(mri_type_roc_aucs).mean())
print('mri_type roc_auc std:', np.array(mri_type_roc_aucs).std()) | code |
74058231/cell_11 | [
"image_output_2.png",
"image_output_1.png"
] | from sklearn.metrics import classification_report, explained_variance_score, r2_score, max_error
from sklearn.model_selection import train_test_split
import matplotlib.pyplot as plt
import numpy as np
import numpy as np # linear algebra
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import torch
train = pd.read_csv('/kaggle/input/house-prices-advanced-regression-techniques/train.csv', index_col='Id')
test = pd.read_csv('/kaggle/input/house-prices-advanced-regression-techniques/test.csv', index_col='Id')
def fill_na_in_data(dataset):
df = dataset
for column in df.columns:
column_type = df[column].dtype
if column_type == 'int64':
df[column] = df[column].fillna(0)
elif column_type == 'object':
df[column] = df[column].fillna('None')
elif column_type == 'float64':
df[column] = df[column].fillna(df[column].median())
return df
train = fill_na_in_data(train)
test = fill_na_in_data(test)
def map_non_float_values(dataset):
for cat_column in dataset.dtypes.loc[dataset.dtypes == 'O'].index:
dataset[cat_column] = dataset[cat_column].astype('category')
dataset[cat_column + '_cat'] = dataset[cat_column].cat.codes
return dataset
train = map_non_float_values(train)
test = map_non_float_values(test)
X = train.loc[:, train.dtypes != 'category'].drop('SalePrice', axis=1)
y = train.SalePrice
X_val = test.loc[:, test.dtypes != 'category']
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2)
X_train = torch.Tensor(X_train.values)
X_test = torch.Tensor(X_test.values)
y_train = torch.Tensor(y_train.values)
y_test = torch.Tensor(y_test.values)
X_val = torch.Tensor(X_val.values)
class HouseNet(torch.nn.Module):
def __init__(self, hidden_neurons):
super(HouseNet, self).__init__()
self.fc1 = torch.nn.Linear(79, hidden_neurons)
self.ac1 = torch.nn.ReLU()
self.bn1 = torch.nn.BatchNorm1d(hidden_neurons)
self.fc2 = torch.nn.Linear(hidden_neurons, hidden_neurons)
self.ac2 = torch.nn.Sigmoid()
self.bn2 = torch.nn.BatchNorm1d(hidden_neurons)
self.fc3 = torch.nn.Linear(hidden_neurons, hidden_neurons)
self.ac3 = torch.nn.Sigmoid()
self.fc4 = torch.nn.Linear(hidden_neurons, 1)
def forward(self, x):
x = self.fc1(x)
x = self.ac1(x)
x = self.bn1(x)
x = self.fc2(x)
x = self.ac2(x)
x = self.bn2(x)
x = self.fc3(x)
x = self.fc3(x)
x = self.fc4(x)
return x
loss = torch.nn.MSELoss()
house_net = HouseNet(50)
optimizer = torch.optim.Adam(house_net.parameters(), lr=0.001, weight_decay=1e-05)
batch_size = 50
r2 = []
test_loss_history = []
val_loss_history_avg = []
for epoch in range(50):
batch_loss_history = []
order = np.random.permutation(len(X_train))
for start_index in range(0, len(X_train), batch_size):
optimizer.zero_grad()
house_net.train()
batch_indexes = order[start_index:start_index + batch_size]
X_batch = X_train[batch_indexes]
y_batch = y_train[batch_indexes]
preds = house_net.forward(X_batch)
loss_value = loss(preds.view(preds.size()[0]), y_batch)
batch_loss_history.append(loss_value)
loss_value.backward()
optimizer.step()
house_net.eval()
val_loss_history_avg.append(sum(batch_loss_history) / len(batch_loss_history))
test_preds = house_net.forward(X_test)
test_loss_history.append(loss(test_preds.view(test_preds.size()[0]), y_test))
r2.append(r2_score(list(y_test), list(test_preds)))
plt.plot(test_loss_history, label='test')
plt.plot(val_loss_history_avg, label='train loss(avg)')
plt.legend()
plt.show()
plt.plot(r2)
plt.show() | code |
74058231/cell_1 | [
"text_plain_output_1.png"
] | import os
import numpy as np
import pandas as pd
import os
for dirname, _, filenames in os.walk('/kaggle/input'):
for filename in filenames:
print(os.path.join(dirname, filename)) | code |
74058231/cell_10 | [
"text_plain_output_1.png"
] | from sklearn.metrics import classification_report, explained_variance_score, r2_score, max_error
from sklearn.model_selection import train_test_split
import numpy as np
import numpy as np # linear algebra
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import torch
train = pd.read_csv('/kaggle/input/house-prices-advanced-regression-techniques/train.csv', index_col='Id')
test = pd.read_csv('/kaggle/input/house-prices-advanced-regression-techniques/test.csv', index_col='Id')
def fill_na_in_data(dataset):
df = dataset
for column in df.columns:
column_type = df[column].dtype
if column_type == 'int64':
df[column] = df[column].fillna(0)
elif column_type == 'object':
df[column] = df[column].fillna('None')
elif column_type == 'float64':
df[column] = df[column].fillna(df[column].median())
return df
train = fill_na_in_data(train)
test = fill_na_in_data(test)
def map_non_float_values(dataset):
for cat_column in dataset.dtypes.loc[dataset.dtypes == 'O'].index:
dataset[cat_column] = dataset[cat_column].astype('category')
dataset[cat_column + '_cat'] = dataset[cat_column].cat.codes
return dataset
train = map_non_float_values(train)
test = map_non_float_values(test)
X = train.loc[:, train.dtypes != 'category'].drop('SalePrice', axis=1)
y = train.SalePrice
X_val = test.loc[:, test.dtypes != 'category']
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2)
X_train = torch.Tensor(X_train.values)
X_test = torch.Tensor(X_test.values)
y_train = torch.Tensor(y_train.values)
y_test = torch.Tensor(y_test.values)
X_val = torch.Tensor(X_val.values)
class HouseNet(torch.nn.Module):
def __init__(self, hidden_neurons):
super(HouseNet, self).__init__()
self.fc1 = torch.nn.Linear(79, hidden_neurons)
self.ac1 = torch.nn.ReLU()
self.bn1 = torch.nn.BatchNorm1d(hidden_neurons)
self.fc2 = torch.nn.Linear(hidden_neurons, hidden_neurons)
self.ac2 = torch.nn.Sigmoid()
self.bn2 = torch.nn.BatchNorm1d(hidden_neurons)
self.fc3 = torch.nn.Linear(hidden_neurons, hidden_neurons)
self.ac3 = torch.nn.Sigmoid()
self.fc4 = torch.nn.Linear(hidden_neurons, 1)
def forward(self, x):
x = self.fc1(x)
x = self.ac1(x)
x = self.bn1(x)
x = self.fc2(x)
x = self.ac2(x)
x = self.bn2(x)
x = self.fc3(x)
x = self.fc3(x)
x = self.fc4(x)
return x
loss = torch.nn.MSELoss()
house_net = HouseNet(50)
optimizer = torch.optim.Adam(house_net.parameters(), lr=0.001, weight_decay=1e-05)
batch_size = 50
r2 = []
test_loss_history = []
val_loss_history_avg = []
for epoch in range(50):
batch_loss_history = []
order = np.random.permutation(len(X_train))
for start_index in range(0, len(X_train), batch_size):
optimizer.zero_grad()
house_net.train()
batch_indexes = order[start_index:start_index + batch_size]
X_batch = X_train[batch_indexes]
y_batch = y_train[batch_indexes]
preds = house_net.forward(X_batch)
loss_value = loss(preds.view(preds.size()[0]), y_batch)
batch_loss_history.append(loss_value)
loss_value.backward()
optimizer.step()
house_net.eval()
val_loss_history_avg.append(sum(batch_loss_history) / len(batch_loss_history))
test_preds = house_net.forward(X_test)
test_loss_history.append(loss(test_preds.view(test_preds.size()[0]), y_test))
r2.append(r2_score(list(y_test), list(test_preds)))
print(r2[-1]) | code |
74058231/cell_12 | [
"text_plain_output_1.png"
] | from sklearn.metrics import classification_report, explained_variance_score, r2_score, max_error
from sklearn.model_selection import train_test_split
import numpy as np
import numpy as np # linear algebra
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import torch
train = pd.read_csv('/kaggle/input/house-prices-advanced-regression-techniques/train.csv', index_col='Id')
test = pd.read_csv('/kaggle/input/house-prices-advanced-regression-techniques/test.csv', index_col='Id')
def fill_na_in_data(dataset):
df = dataset
for column in df.columns:
column_type = df[column].dtype
if column_type == 'int64':
df[column] = df[column].fillna(0)
elif column_type == 'object':
df[column] = df[column].fillna('None')
elif column_type == 'float64':
df[column] = df[column].fillna(df[column].median())
return df
train = fill_na_in_data(train)
test = fill_na_in_data(test)
def map_non_float_values(dataset):
for cat_column in dataset.dtypes.loc[dataset.dtypes == 'O'].index:
dataset[cat_column] = dataset[cat_column].astype('category')
dataset[cat_column + '_cat'] = dataset[cat_column].cat.codes
return dataset
train = map_non_float_values(train)
test = map_non_float_values(test)
X = train.loc[:, train.dtypes != 'category'].drop('SalePrice', axis=1)
y = train.SalePrice
X_val = test.loc[:, test.dtypes != 'category']
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2)
X_train = torch.Tensor(X_train.values)
X_test = torch.Tensor(X_test.values)
y_train = torch.Tensor(y_train.values)
y_test = torch.Tensor(y_test.values)
X_val = torch.Tensor(X_val.values)
class HouseNet(torch.nn.Module):
def __init__(self, hidden_neurons):
super(HouseNet, self).__init__()
self.fc1 = torch.nn.Linear(79, hidden_neurons)
self.ac1 = torch.nn.ReLU()
self.bn1 = torch.nn.BatchNorm1d(hidden_neurons)
self.fc2 = torch.nn.Linear(hidden_neurons, hidden_neurons)
self.ac2 = torch.nn.Sigmoid()
self.bn2 = torch.nn.BatchNorm1d(hidden_neurons)
self.fc3 = torch.nn.Linear(hidden_neurons, hidden_neurons)
self.ac3 = torch.nn.Sigmoid()
self.fc4 = torch.nn.Linear(hidden_neurons, 1)
def forward(self, x):
x = self.fc1(x)
x = self.ac1(x)
x = self.bn1(x)
x = self.fc2(x)
x = self.ac2(x)
x = self.bn2(x)
x = self.fc3(x)
x = self.fc3(x)
x = self.fc4(x)
return x
loss = torch.nn.MSELoss()
house_net = HouseNet(50)
optimizer = torch.optim.Adam(house_net.parameters(), lr=0.001, weight_decay=1e-05)
batch_size = 50
r2 = []
test_loss_history = []
val_loss_history_avg = []
for epoch in range(50):
batch_loss_history = []
order = np.random.permutation(len(X_train))
for start_index in range(0, len(X_train), batch_size):
optimizer.zero_grad()
house_net.train()
batch_indexes = order[start_index:start_index + batch_size]
X_batch = X_train[batch_indexes]
y_batch = y_train[batch_indexes]
preds = house_net.forward(X_batch)
loss_value = loss(preds.view(preds.size()[0]), y_batch)
batch_loss_history.append(loss_value)
loss_value.backward()
optimizer.step()
house_net.eval()
val_loss_history_avg.append(sum(batch_loss_history) / len(batch_loss_history))
test_preds = house_net.forward(X_test)
test_loss_history.append(loss(test_preds.view(test_preds.size()[0]), y_test))
r2.append(r2_score(list(y_test), list(test_preds)))
predictions = house_net.forward(X_val).detach()
predictions = predictions.numpy()
output = pd.DataFrame({'Id': test.index, 'SalePrice': pd.DataFrame(predictions)[0]})
output.to_csv('my_submission.csv', index=False)
print('Your submission was successfully saved!') | code |
89139379/cell_21 | [
"text_plain_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
df = pd.read_csv('/kaggle/input/wine-quality-dataset/WineQT.csv')
df.shape
df.isnull().sum()
df.Id.nunique()
df.Id.unique()
df = df.drop('Id', axis=1)
df.quality.unique()
df.quality.value_counts()
df.quality.value_counts(normalize=True)
sns.countplot(x='quality', data=df) | code |
89139379/cell_13 | [
"text_plain_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('/kaggle/input/wine-quality-dataset/WineQT.csv')
df.shape
df.isnull().sum()
df.describe() | code |
89139379/cell_33 | [
"text_plain_output_1.png"
] | from sklearn.preprocessing import MinMaxScaler
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('/kaggle/input/wine-quality-dataset/WineQT.csv')
df.shape
df.isnull().sum()
df.Id.nunique()
df.Id.unique()
df = df.drop('Id', axis=1)
df.quality.unique()
df.quality.value_counts()
df.quality.value_counts(normalize=True)
scaler = MinMaxScaler()
norm_df = scaler.fit_transform(df.drop('quality', axis=1))
norm_df = pd.DataFrame(norm_df, columns=df.columns[:-1])
X = norm_df
y = df.quality
X.shape
y.shape
y.value_counts() | code |
89139379/cell_20 | [
"text_plain_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('/kaggle/input/wine-quality-dataset/WineQT.csv')
df.shape
df.isnull().sum()
df.Id.nunique()
df.Id.unique()
df = df.drop('Id', axis=1)
df.quality.unique()
df.quality.value_counts()
df.quality.value_counts(normalize=True) | code |
89139379/cell_29 | [
"text_html_output_1.png"
] | from sklearn.preprocessing import MinMaxScaler
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('/kaggle/input/wine-quality-dataset/WineQT.csv')
df.shape
df.isnull().sum()
df.Id.nunique()
df.Id.unique()
df = df.drop('Id', axis=1)
df.quality.unique()
df.quality.value_counts()
df.quality.value_counts(normalize=True)
scaler = MinMaxScaler()
norm_df = scaler.fit_transform(df.drop('quality', axis=1))
norm_df = pd.DataFrame(norm_df, columns=df.columns[:-1])
X = norm_df
y = df.quality
X.shape
y.shape | code |
89139379/cell_11 | [
"text_plain_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('/kaggle/input/wine-quality-dataset/WineQT.csv')
df.shape
df.info() | code |
89139379/cell_19 | [
"text_plain_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('/kaggle/input/wine-quality-dataset/WineQT.csv')
df.shape
df.isnull().sum()
df.Id.nunique()
df.Id.unique()
df = df.drop('Id', axis=1)
df.quality.unique()
df.quality.value_counts() | code |
89139379/cell_1 | [
"text_plain_output_1.png"
] | import os
import numpy as np
import pandas as pd
import os
for dirname, _, filenames in os.walk('/kaggle/input'):
for filename in filenames:
print(os.path.join(dirname, filename)) | code |
89139379/cell_7 | [
"text_plain_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('/kaggle/input/wine-quality-dataset/WineQT.csv')
df.head(7) | code |
89139379/cell_18 | [
"text_plain_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('/kaggle/input/wine-quality-dataset/WineQT.csv')
df.shape
df.isnull().sum()
df.Id.nunique()
df.Id.unique()
df = df.drop('Id', axis=1)
df.quality.unique() | code |
89139379/cell_32 | [
"text_plain_output_1.png"
] | from sklearn.preprocessing import MinMaxScaler
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('/kaggle/input/wine-quality-dataset/WineQT.csv')
df.shape
df.isnull().sum()
df.Id.nunique()
df.Id.unique()
df = df.drop('Id', axis=1)
df.quality.unique()
df.quality.value_counts()
df.quality.value_counts(normalize=True)
scaler = MinMaxScaler()
norm_df = scaler.fit_transform(df.drop('quality', axis=1))
norm_df = pd.DataFrame(norm_df, columns=df.columns[:-1])
X = norm_df
y = df.quality
X.shape
y.shape
X.shape | code |
89139379/cell_15 | [
"text_plain_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('/kaggle/input/wine-quality-dataset/WineQT.csv')
df.shape
df.isnull().sum()
df.Id.nunique()
df.Id.unique() | code |
89139379/cell_24 | [
"text_plain_output_1.png",
"image_output_1.png"
] | from sklearn.preprocessing import MinMaxScaler
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('/kaggle/input/wine-quality-dataset/WineQT.csv')
df.shape
df.isnull().sum()
df.Id.nunique()
df.Id.unique()
df = df.drop('Id', axis=1)
df.quality.unique()
df.quality.value_counts()
df.quality.value_counts(normalize=True)
scaler = MinMaxScaler()
norm_df = scaler.fit_transform(df.drop('quality', axis=1))
norm_df = pd.DataFrame(norm_df, columns=df.columns[:-1])
norm_df.head() | code |
89139379/cell_14 | [
"text_html_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('/kaggle/input/wine-quality-dataset/WineQT.csv')
df.shape
df.isnull().sum()
df.Id.nunique() | code |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.