path
stringlengths
13
17
screenshot_names
sequencelengths
1
873
code
stringlengths
0
40.4k
cell_type
stringclasses
1 value
122260425/cell_25
[ "text_html_output_1.png" ]
import pandas as pd traindf = pd.read_csv('/kaggle/input/bigmart-sales-data/Train.csv') testdf = pd.read_csv('/kaggle/input/bigmart-sales-data/Test.csv') traindf.dtypes traindf.describe().round(3) traindf.isna().sum() df = pd.concat([traindf, testdf], axis=0) df.shape df.isnull().sum() df = df.dropna(subset=['Item_Weight', 'Outlet_Size', 'Item_Outlet_Sales']) df_itemwt = df.Item_Weight.value_counts().reset_index().rename(columns={'index': 'Item_Weight', 'Item_Weight': 'Number_Of_Items'}) df_itemwt_sorted = df_itemwt.sort_values(by='Number_Of_Items', ascending=False).head(20).reset_index(drop=True) df_itemwt_sorted df.loc[df['Item_Fat_Content'] == 'LF', 'Item_Fat_Content'] = 'Low Fat' df.loc[df['Item_Fat_Content'] == 'low fat', 'Item_Fat_Content'] = 'Low Fat' df.loc[df['Item_Fat_Content'] == 'reg', 'Item_Fat_Content'] = 'Regular' df['Item_Fat_Content'].value_counts() df_itemfat = df.Item_Fat_Content.value_counts().reset_index().rename(columns={'index': 'Fat_Content', 'Item_Fat_Content': 'Number_of_items'}) df_itemfat item_type_visibility = df[['Item_Type', 'Item_Visibility']].sort_values(by='Item_Visibility', ascending=False).reset_index()[['Item_Type', 'Item_Visibility']] item_type_visibility = item_type_visibility[item_type_visibility['Item_Visibility'] != 0] item_type_visibility_average = item_type_visibility.groupby('Item_Type').mean().sort_values(by='Item_Visibility', ascending=False).reset_index() item_type_visibility_average
code
122260425/cell_23
[ "text_plain_output_1.png" ]
import pandas as pd traindf = pd.read_csv('/kaggle/input/bigmart-sales-data/Train.csv') testdf = pd.read_csv('/kaggle/input/bigmart-sales-data/Test.csv') traindf.dtypes traindf.describe().round(3) traindf.isna().sum() df = pd.concat([traindf, testdf], axis=0) df.shape df.isnull().sum() df = df.dropna(subset=['Item_Weight', 'Outlet_Size', 'Item_Outlet_Sales']) df_itemwt = df.Item_Weight.value_counts().reset_index().rename(columns={'index': 'Item_Weight', 'Item_Weight': 'Number_Of_Items'}) df_itemwt_sorted = df_itemwt.sort_values(by='Number_Of_Items', ascending=False).head(20).reset_index(drop=True) df_itemwt_sorted df.loc[df['Item_Fat_Content'] == 'LF', 'Item_Fat_Content'] = 'Low Fat' df.loc[df['Item_Fat_Content'] == 'low fat', 'Item_Fat_Content'] = 'Low Fat' df.loc[df['Item_Fat_Content'] == 'reg', 'Item_Fat_Content'] = 'Regular' df['Item_Fat_Content'].value_counts() df_itemfat = df.Item_Fat_Content.value_counts().reset_index().rename(columns={'index': 'Fat_Content', 'Item_Fat_Content': 'Number_of_items'}) df_itemfat item_type_visibility = df[['Item_Type', 'Item_Visibility']].sort_values(by='Item_Visibility', ascending=False).reset_index()[['Item_Type', 'Item_Visibility']] item_type_visibility = item_type_visibility[item_type_visibility['Item_Visibility'] != 0] display(item_type_visibility.head(10)) item_type_visibility.tail(10)
code
122260425/cell_30
[ "text_html_output_2.png", "text_html_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns traindf = pd.read_csv('/kaggle/input/bigmart-sales-data/Train.csv') testdf = pd.read_csv('/kaggle/input/bigmart-sales-data/Test.csv') traindf.dtypes traindf.describe().round(3) traindf.isna().sum() df = pd.concat([traindf, testdf], axis=0) df.shape df.isnull().sum() df = df.dropna(subset=['Item_Weight', 'Outlet_Size', 'Item_Outlet_Sales']) df_itemwt = df.Item_Weight.value_counts().reset_index().rename(columns={'index': 'Item_Weight', 'Item_Weight': 'Number_Of_Items'}) df_itemwt_sorted = df_itemwt.sort_values(by='Number_Of_Items', ascending=False).head(20).reset_index(drop=True) df_itemwt_sorted df.loc[df['Item_Fat_Content'] == 'LF', 'Item_Fat_Content'] = 'Low Fat' df.loc[df['Item_Fat_Content'] == 'low fat', 'Item_Fat_Content'] = 'Low Fat' df.loc[df['Item_Fat_Content'] == 'reg', 'Item_Fat_Content'] = 'Regular' df['Item_Fat_Content'].value_counts() df_itemfat = df.Item_Fat_Content.value_counts().reset_index().rename(columns={'index': 'Fat_Content', 'Item_Fat_Content': 'Number_of_items'}) df_itemfat item_type_visibility = df[['Item_Type', 'Item_Visibility']].sort_values(by='Item_Visibility', ascending=False).reset_index()[['Item_Type', 'Item_Visibility']] item_type_visibility = item_type_visibility[item_type_visibility['Item_Visibility'] != 0] item_type_visibility_average = item_type_visibility.groupby('Item_Type').mean().sort_values(by='Item_Visibility', ascending=False).reset_index() item_type_visibility_average item_type_count = df.groupby('Item_Type').count()['Item_Identifier'].reset_index().rename(columns={'Item_Identifier': 'Number_Of_Items'}).sort_values(by='Number_Of_Items', ascending=False) item_type_count plt.figure(figsize=(25, 8)) sns.barplot(data=item_type_count, x='Item_Type', y='Number_Of_Items', palette='autumn') plt.ylabel('Type of Item') plt.xlabel('Number of Items') plt.title('Items and their types') plt.show()
code
122260425/cell_33
[ "image_output_1.png" ]
import pandas as pd traindf = pd.read_csv('/kaggle/input/bigmart-sales-data/Train.csv') testdf = pd.read_csv('/kaggle/input/bigmart-sales-data/Test.csv') traindf.dtypes traindf.describe().round(3) traindf.isna().sum() df = pd.concat([traindf, testdf], axis=0) df.shape df.isnull().sum() df = df.dropna(subset=['Item_Weight', 'Outlet_Size', 'Item_Outlet_Sales']) df_itemwt = df.Item_Weight.value_counts().reset_index().rename(columns={'index': 'Item_Weight', 'Item_Weight': 'Number_Of_Items'}) df_itemwt_sorted = df_itemwt.sort_values(by='Number_Of_Items', ascending=False).head(20).reset_index(drop=True) df_itemwt_sorted df.loc[df['Item_Fat_Content'] == 'LF', 'Item_Fat_Content'] = 'Low Fat' df.loc[df['Item_Fat_Content'] == 'low fat', 'Item_Fat_Content'] = 'Low Fat' df.loc[df['Item_Fat_Content'] == 'reg', 'Item_Fat_Content'] = 'Regular' df['Item_Fat_Content'].value_counts() df_itemfat = df.Item_Fat_Content.value_counts().reset_index().rename(columns={'index': 'Fat_Content', 'Item_Fat_Content': 'Number_of_items'}) df_itemfat item_type_count = df.groupby('Item_Type').count()['Item_Identifier'].reset_index().rename(columns={'Item_Identifier': 'Number_Of_Items'}).sort_values(by='Number_Of_Items', ascending=False) item_type_count df_outlets = df['Outlet_Identifier'].value_counts().sort_index().reset_index().rename(columns={'index': 'Outlet_Identifier', 'Outlet_Identifier': 'Number_Of_Outlets'}) df_outlets.sort_values(by='Number_Of_Outlets', ascending=False)
code
122260425/cell_20
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns traindf = pd.read_csv('/kaggle/input/bigmart-sales-data/Train.csv') testdf = pd.read_csv('/kaggle/input/bigmart-sales-data/Test.csv') traindf.dtypes traindf.describe().round(3) traindf.isna().sum() df = pd.concat([traindf, testdf], axis=0) df.shape df.isnull().sum() df = df.dropna(subset=['Item_Weight', 'Outlet_Size', 'Item_Outlet_Sales']) df_itemwt = df.Item_Weight.value_counts().reset_index().rename(columns={'index': 'Item_Weight', 'Item_Weight': 'Number_Of_Items'}) df_itemwt_sorted = df_itemwt.sort_values(by='Number_Of_Items', ascending=False).head(20).reset_index(drop=True) df_itemwt_sorted df.loc[df['Item_Fat_Content'] == 'LF', 'Item_Fat_Content'] = 'Low Fat' df.loc[df['Item_Fat_Content'] == 'low fat', 'Item_Fat_Content'] = 'Low Fat' df.loc[df['Item_Fat_Content'] == 'reg', 'Item_Fat_Content'] = 'Regular' df['Item_Fat_Content'].value_counts() df_itemfat = df.Item_Fat_Content.value_counts().reset_index().rename(columns={'index': 'Fat_Content', 'Item_Fat_Content': 'Number_of_items'}) df_itemfat sns.barplot(data=df_itemfat, x='Fat_Content', y='Number_of_items', palette='autumn') plt.xlabel('Type of item') plt.ylabel('Number of items') plt.title('Items with different fat contents') plt.show()
code
122260425/cell_6
[ "image_output_1.png" ]
import pandas as pd traindf = pd.read_csv('/kaggle/input/bigmart-sales-data/Train.csv') testdf = pd.read_csv('/kaggle/input/bigmart-sales-data/Test.csv') display(traindf.shape) traindf.info()
code
122260425/cell_29
[ "text_plain_output_3.png", "text_plain_output_2.png", "text_plain_output_1.png" ]
import pandas as pd traindf = pd.read_csv('/kaggle/input/bigmart-sales-data/Train.csv') testdf = pd.read_csv('/kaggle/input/bigmart-sales-data/Test.csv') traindf.dtypes traindf.describe().round(3) traindf.isna().sum() df = pd.concat([traindf, testdf], axis=0) df.shape df.isnull().sum() df = df.dropna(subset=['Item_Weight', 'Outlet_Size', 'Item_Outlet_Sales']) df_itemwt = df.Item_Weight.value_counts().reset_index().rename(columns={'index': 'Item_Weight', 'Item_Weight': 'Number_Of_Items'}) df_itemwt_sorted = df_itemwt.sort_values(by='Number_Of_Items', ascending=False).head(20).reset_index(drop=True) df_itemwt_sorted df.loc[df['Item_Fat_Content'] == 'LF', 'Item_Fat_Content'] = 'Low Fat' df.loc[df['Item_Fat_Content'] == 'low fat', 'Item_Fat_Content'] = 'Low Fat' df.loc[df['Item_Fat_Content'] == 'reg', 'Item_Fat_Content'] = 'Regular' df['Item_Fat_Content'].value_counts() df_itemfat = df.Item_Fat_Content.value_counts().reset_index().rename(columns={'index': 'Fat_Content', 'Item_Fat_Content': 'Number_of_items'}) df_itemfat item_type_count = df.groupby('Item_Type').count()['Item_Identifier'].reset_index().rename(columns={'Item_Identifier': 'Number_Of_Items'}).sort_values(by='Number_Of_Items', ascending=False) item_type_count
code
122260425/cell_26
[ "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns traindf = pd.read_csv('/kaggle/input/bigmart-sales-data/Train.csv') testdf = pd.read_csv('/kaggle/input/bigmart-sales-data/Test.csv') traindf.dtypes traindf.describe().round(3) traindf.isna().sum() df = pd.concat([traindf, testdf], axis=0) df.shape df.isnull().sum() df = df.dropna(subset=['Item_Weight', 'Outlet_Size', 'Item_Outlet_Sales']) df_itemwt = df.Item_Weight.value_counts().reset_index().rename(columns={'index': 'Item_Weight', 'Item_Weight': 'Number_Of_Items'}) df_itemwt_sorted = df_itemwt.sort_values(by='Number_Of_Items', ascending=False).head(20).reset_index(drop=True) df_itemwt_sorted df.loc[df['Item_Fat_Content'] == 'LF', 'Item_Fat_Content'] = 'Low Fat' df.loc[df['Item_Fat_Content'] == 'low fat', 'Item_Fat_Content'] = 'Low Fat' df.loc[df['Item_Fat_Content'] == 'reg', 'Item_Fat_Content'] = 'Regular' df['Item_Fat_Content'].value_counts() df_itemfat = df.Item_Fat_Content.value_counts().reset_index().rename(columns={'index': 'Fat_Content', 'Item_Fat_Content': 'Number_of_items'}) df_itemfat item_type_visibility = df[['Item_Type', 'Item_Visibility']].sort_values(by='Item_Visibility', ascending=False).reset_index()[['Item_Type', 'Item_Visibility']] item_type_visibility = item_type_visibility[item_type_visibility['Item_Visibility'] != 0] item_type_visibility_average = item_type_visibility.groupby('Item_Type').mean().sort_values(by='Item_Visibility', ascending=False).reset_index() item_type_visibility_average sns.barplot(data=item_type_visibility_average, y='Item_Type', x='Item_Visibility', palette='autumn', errorbar=None, orient='h') plt.ylabel('Type of Item') plt.xlabel('Visibility of item') plt.title('Various items and the visibility') plt.show()
code
122260425/cell_11
[ "text_html_output_2.png", "text_html_output_1.png" ]
import pandas as pd traindf = pd.read_csv('/kaggle/input/bigmart-sales-data/Train.csv') testdf = pd.read_csv('/kaggle/input/bigmart-sales-data/Test.csv') traindf.dtypes traindf.describe().round(3) traindf.isna().sum() df = pd.concat([traindf, testdf], axis=0) df.shape df.head() df.tail() df.isnull().sum()
code
122260425/cell_19
[ "text_plain_output_1.png" ]
import pandas as pd traindf = pd.read_csv('/kaggle/input/bigmart-sales-data/Train.csv') testdf = pd.read_csv('/kaggle/input/bigmart-sales-data/Test.csv') traindf.dtypes traindf.describe().round(3) traindf.isna().sum() df = pd.concat([traindf, testdf], axis=0) df.shape df.isnull().sum() df = df.dropna(subset=['Item_Weight', 'Outlet_Size', 'Item_Outlet_Sales']) df_itemwt = df.Item_Weight.value_counts().reset_index().rename(columns={'index': 'Item_Weight', 'Item_Weight': 'Number_Of_Items'}) df_itemwt_sorted = df_itemwt.sort_values(by='Number_Of_Items', ascending=False).head(20).reset_index(drop=True) df_itemwt_sorted df.loc[df['Item_Fat_Content'] == 'LF', 'Item_Fat_Content'] = 'Low Fat' df.loc[df['Item_Fat_Content'] == 'low fat', 'Item_Fat_Content'] = 'Low Fat' df.loc[df['Item_Fat_Content'] == 'reg', 'Item_Fat_Content'] = 'Regular' df['Item_Fat_Content'].value_counts() df_itemfat = df.Item_Fat_Content.value_counts().reset_index().rename(columns={'index': 'Fat_Content', 'Item_Fat_Content': 'Number_of_items'}) df_itemfat
code
122260425/cell_1
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import seaborn as sns import matplotlib.pyplot as plt from sklearn.model_selection import train_test_split from sklearn.linear_model import LinearRegression from sklearn.ensemble import RandomForestRegressor from sklearn.tree import DecisionTreeRegressor from sklearn.neighbors import KNeighborsRegressor from sklearn.svm import SVR from sklearn.decomposition import PCA from sklearn.preprocessing import StandardScaler from sklearn.metrics import accuracy_score, r2_score, mean_squared_error, mean_absolute_error import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
122260425/cell_7
[ "text_html_output_1.png" ]
import pandas as pd traindf = pd.read_csv('/kaggle/input/bigmart-sales-data/Train.csv') testdf = pd.read_csv('/kaggle/input/bigmart-sales-data/Test.csv') traindf.dtypes
code
122260425/cell_18
[ "text_plain_output_1.png" ]
import pandas as pd traindf = pd.read_csv('/kaggle/input/bigmart-sales-data/Train.csv') testdf = pd.read_csv('/kaggle/input/bigmart-sales-data/Test.csv') traindf.dtypes traindf.describe().round(3) traindf.isna().sum() df = pd.concat([traindf, testdf], axis=0) df.shape df.isnull().sum() df = df.dropna(subset=['Item_Weight', 'Outlet_Size', 'Item_Outlet_Sales']) df_itemwt = df.Item_Weight.value_counts().reset_index().rename(columns={'index': 'Item_Weight', 'Item_Weight': 'Number_Of_Items'}) df_itemwt_sorted = df_itemwt.sort_values(by='Number_Of_Items', ascending=False).head(20).reset_index(drop=True) df_itemwt_sorted df.loc[df['Item_Fat_Content'] == 'LF', 'Item_Fat_Content'] = 'Low Fat' df.loc[df['Item_Fat_Content'] == 'low fat', 'Item_Fat_Content'] = 'Low Fat' df.loc[df['Item_Fat_Content'] == 'reg', 'Item_Fat_Content'] = 'Regular' df['Item_Fat_Content'].value_counts()
code
122260425/cell_8
[ "text_html_output_1.png" ]
import pandas as pd traindf = pd.read_csv('/kaggle/input/bigmart-sales-data/Train.csv') testdf = pd.read_csv('/kaggle/input/bigmart-sales-data/Test.csv') traindf.dtypes traindf.describe().round(3)
code
122260425/cell_15
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns traindf = pd.read_csv('/kaggle/input/bigmart-sales-data/Train.csv') testdf = pd.read_csv('/kaggle/input/bigmart-sales-data/Test.csv') traindf.dtypes traindf.describe().round(3) traindf.isna().sum() df = pd.concat([traindf, testdf], axis=0) df.shape df.isnull().sum() df = df.dropna(subset=['Item_Weight', 'Outlet_Size', 'Item_Outlet_Sales']) df_itemwt = df.Item_Weight.value_counts().reset_index().rename(columns={'index': 'Item_Weight', 'Item_Weight': 'Number_Of_Items'}) df_itemwt_sorted = df_itemwt.sort_values(by='Number_Of_Items', ascending=False).head(20).reset_index(drop=True) df_itemwt_sorted sns.histplot(data=df, x='Item_Weight', kde=True, color='orange') plt.show() sns.barplot(y='Item_Weight', x='Number_Of_Items', data=df_itemwt_sorted, palette='autumn', order=df_itemwt_sorted['Item_Weight'], orient='h') plt.ylabel('Weight of Items') plt.xlabel('Number of Items') plt.title('Items with different weights') plt.show()
code
122260425/cell_16
[ "text_html_output_1.png" ]
import pandas as pd traindf = pd.read_csv('/kaggle/input/bigmart-sales-data/Train.csv') testdf = pd.read_csv('/kaggle/input/bigmart-sales-data/Test.csv') traindf.dtypes traindf.describe().round(3) traindf.isna().sum() df = pd.concat([traindf, testdf], axis=0) df.shape df.isnull().sum() df = df.dropna(subset=['Item_Weight', 'Outlet_Size', 'Item_Outlet_Sales']) df_itemwt = df.Item_Weight.value_counts().reset_index().rename(columns={'index': 'Item_Weight', 'Item_Weight': 'Number_Of_Items'}) df_itemwt_sorted = df_itemwt.sort_values(by='Number_Of_Items', ascending=False).head(20).reset_index(drop=True) df_itemwt_sorted df['Item_Fat_Content'].value_counts()
code
122260425/cell_3
[ "text_html_output_1.png" ]
import pandas as pd traindf = pd.read_csv('/kaggle/input/bigmart-sales-data/Train.csv') testdf = pd.read_csv('/kaggle/input/bigmart-sales-data/Test.csv') display(traindf.head()) display(testdf.head())
code
122260425/cell_31
[ "text_html_output_1.png" ]
import pandas as pd traindf = pd.read_csv('/kaggle/input/bigmart-sales-data/Train.csv') testdf = pd.read_csv('/kaggle/input/bigmart-sales-data/Test.csv') traindf.dtypes traindf.describe().round(3) traindf.isna().sum() df = pd.concat([traindf, testdf], axis=0) df.shape df.isnull().sum() df = df.dropna(subset=['Item_Weight', 'Outlet_Size', 'Item_Outlet_Sales']) df_itemwt = df.Item_Weight.value_counts().reset_index().rename(columns={'index': 'Item_Weight', 'Item_Weight': 'Number_Of_Items'}) df_itemwt_sorted = df_itemwt.sort_values(by='Number_Of_Items', ascending=False).head(20).reset_index(drop=True) df_itemwt_sorted df.loc[df['Item_Fat_Content'] == 'LF', 'Item_Fat_Content'] = 'Low Fat' df.loc[df['Item_Fat_Content'] == 'low fat', 'Item_Fat_Content'] = 'Low Fat' df.loc[df['Item_Fat_Content'] == 'reg', 'Item_Fat_Content'] = 'Regular' df['Item_Fat_Content'].value_counts() df_itemfat = df.Item_Fat_Content.value_counts().reset_index().rename(columns={'index': 'Fat_Content', 'Item_Fat_Content': 'Number_of_items'}) df_itemfat item_type_count = df.groupby('Item_Type').count()['Item_Identifier'].reset_index().rename(columns={'Item_Identifier': 'Number_Of_Items'}).sort_values(by='Number_Of_Items', ascending=False) item_type_count df.head()
code
122260425/cell_14
[ "text_plain_output_2.png", "text_plain_output_1.png" ]
import pandas as pd traindf = pd.read_csv('/kaggle/input/bigmart-sales-data/Train.csv') testdf = pd.read_csv('/kaggle/input/bigmart-sales-data/Test.csv') traindf.dtypes traindf.describe().round(3) traindf.isna().sum() df = pd.concat([traindf, testdf], axis=0) df.shape df.isnull().sum() df = df.dropna(subset=['Item_Weight', 'Outlet_Size', 'Item_Outlet_Sales']) df_itemwt = df.Item_Weight.value_counts().reset_index().rename(columns={'index': 'Item_Weight', 'Item_Weight': 'Number_Of_Items'}) df_itemwt_sorted = df_itemwt.sort_values(by='Number_Of_Items', ascending=False).head(20).reset_index(drop=True) df_itemwt_sorted
code
122260425/cell_22
[ "text_plain_output_1.png" ]
import pandas as pd traindf = pd.read_csv('/kaggle/input/bigmart-sales-data/Train.csv') testdf = pd.read_csv('/kaggle/input/bigmart-sales-data/Test.csv') traindf.dtypes traindf.describe().round(3) traindf.isna().sum() df = pd.concat([traindf, testdf], axis=0) df.shape df.isnull().sum() df = df.dropna(subset=['Item_Weight', 'Outlet_Size', 'Item_Outlet_Sales']) df_itemwt = df.Item_Weight.value_counts().reset_index().rename(columns={'index': 'Item_Weight', 'Item_Weight': 'Number_Of_Items'}) df_itemwt_sorted = df_itemwt.sort_values(by='Number_Of_Items', ascending=False).head(20).reset_index(drop=True) df_itemwt_sorted df.loc[df['Item_Fat_Content'] == 'LF', 'Item_Fat_Content'] = 'Low Fat' df.loc[df['Item_Fat_Content'] == 'low fat', 'Item_Fat_Content'] = 'Low Fat' df.loc[df['Item_Fat_Content'] == 'reg', 'Item_Fat_Content'] = 'Regular' df['Item_Fat_Content'].value_counts() df_itemfat = df.Item_Fat_Content.value_counts().reset_index().rename(columns={'index': 'Fat_Content', 'Item_Fat_Content': 'Number_of_items'}) df_itemfat item_type_visibility = df[['Item_Type', 'Item_Visibility']].sort_values(by='Item_Visibility', ascending=False).reset_index()[['Item_Type', 'Item_Visibility']] item_type_visibility.head()
code
128030068/cell_13
[ "text_plain_output_1.png" ]
from sklearn.impute import SimpleImputer import pandas as pd df = pd.read_csv('/kaggle/input/music-genre-classification/train.csv') df.shape df.isna().sum() df.duplicated().sum() features = df.drop('Class', axis=1) target = df['Class'].values from sklearn.impute import SimpleImputer nums_data = features.select_dtypes('number') text_data = features.select_dtypes('object') imputer = SimpleImputer(strategy='median') nums_data_imp = imputer.fit_transform(nums_data) text_data['Artist Name'].value_counts()
code
128030068/cell_25
[ "text_plain_output_1.png" ]
from sklearn.linear_model import LogisticRegression from sklearn.linear_model import LogisticRegression from sklearn.metrics import classification_report from sklearn.multiclass import OneVsRestClassifier from sklearn.preprocessing import StandardScaler from sklearn.preprocessing import StandardScaler scaler = StandardScaler() scaler.fit(Xtrain) Xtrain = scaler.transform(Xtrain) Xtest = scaler.transform(Xtest) from sklearn.linear_model import LogisticRegression lr = LogisticRegression(max_iter=1000) lr.fit(Xtrain, ytrain) prediction = lr.predict(Xtest) from sklearn.metrics import classification_report from sklearn.linear_model import LogisticRegression from sklearn.multiclass import OneVsRestClassifier lr = OneVsRestClassifier(LogisticRegression(max_iter=1000)) lr.fit(Xtrain, ytrain)
code
128030068/cell_4
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns df = pd.read_csv('/kaggle/input/music-genre-classification/train.csv') sns.countplot(data=df, x='Class') plt.show()
code
128030068/cell_23
[ "text_plain_output_1.png" ]
from sklearn.linear_model import LogisticRegression from sklearn.preprocessing import StandardScaler from sklearn.preprocessing import StandardScaler scaler = StandardScaler() scaler.fit(Xtrain) Xtrain = scaler.transform(Xtrain) Xtest = scaler.transform(Xtest) from sklearn.linear_model import LogisticRegression lr = LogisticRegression(max_iter=1000) lr.fit(Xtrain, ytrain)
code
128030068/cell_6
[ "text_html_output_1.png" ]
import pandas as pd df = pd.read_csv('/kaggle/input/music-genre-classification/train.csv') df.shape df.info()
code
128030068/cell_2
[ "text_plain_output_1.png" ]
import pandas as pd df = pd.read_csv('/kaggle/input/music-genre-classification/train.csv') df.head(2)
code
128030068/cell_19
[ "text_plain_output_1.png" ]
from sklearn.impute import SimpleImputer import pandas as pd df = pd.read_csv('/kaggle/input/music-genre-classification/train.csv') df.shape df.isna().sum() df.duplicated().sum() features = df.drop('Class', axis=1) target = df['Class'].values from sklearn.impute import SimpleImputer nums_data = features.select_dtypes('number') text_data = features.select_dtypes('object') imputer = SimpleImputer(strategy='median') nums_data_imp = imputer.fit_transform(nums_data) nums_data_imp = pd.DataFrame(nums_data_imp, columns=imputer.get_feature_names_out()) nums_data_imp.isna().sum() pd.Series(ytrain).value_counts()
code
128030068/cell_7
[ "text_plain_output_1.png" ]
import pandas as pd df = pd.read_csv('/kaggle/input/music-genre-classification/train.csv') df.shape df.isna().sum()
code
128030068/cell_8
[ "text_html_output_1.png" ]
import pandas as pd df = pd.read_csv('/kaggle/input/music-genre-classification/train.csv') df.shape df.isna().sum() df.duplicated().sum()
code
128030068/cell_15
[ "image_output_1.png" ]
from sklearn.impute import SimpleImputer import pandas as pd df = pd.read_csv('/kaggle/input/music-genre-classification/train.csv') df.shape df.isna().sum() df.duplicated().sum() features = df.drop('Class', axis=1) target = df['Class'].values from sklearn.impute import SimpleImputer nums_data = features.select_dtypes('number') text_data = features.select_dtypes('object') imputer = SimpleImputer(strategy='median') nums_data_imp = imputer.fit_transform(nums_data) nums_data_imp = pd.DataFrame(nums_data_imp, columns=imputer.get_feature_names_out()) nums_data_imp.isna().sum() nums_data_imp.head()
code
128030068/cell_3
[ "text_plain_output_1.png" ]
import pandas as pd df = pd.read_csv('/kaggle/input/music-genre-classification/train.csv') df['Class'].value_counts()
code
128030068/cell_24
[ "text_plain_output_1.png" ]
from sklearn.linear_model import LogisticRegression from sklearn.metrics import classification_report from sklearn.preprocessing import StandardScaler from sklearn.preprocessing import StandardScaler scaler = StandardScaler() scaler.fit(Xtrain) Xtrain = scaler.transform(Xtrain) Xtest = scaler.transform(Xtest) from sklearn.linear_model import LogisticRegression lr = LogisticRegression(max_iter=1000) lr.fit(Xtrain, ytrain) prediction = lr.predict(Xtest) from sklearn.metrics import classification_report print(classification_report(prediction, ytest))
code
128030068/cell_12
[ "text_html_output_1.png" ]
from sklearn.impute import SimpleImputer import pandas as pd df = pd.read_csv('/kaggle/input/music-genre-classification/train.csv') df.shape df.isna().sum() df.duplicated().sum() features = df.drop('Class', axis=1) target = df['Class'].values from sklearn.impute import SimpleImputer nums_data = features.select_dtypes('number') text_data = features.select_dtypes('object') imputer = SimpleImputer(strategy='median') nums_data_imp = imputer.fit_transform(nums_data) nums_data_imp = pd.DataFrame(nums_data_imp, columns=imputer.get_feature_names_out()) nums_data_imp.isna().sum()
code
128030068/cell_5
[ "text_plain_output_1.png" ]
import pandas as pd df = pd.read_csv('/kaggle/input/music-genre-classification/train.csv') df.shape
code
2034634/cell_6
[ "image_output_5.png", "image_output_7.png", "image_output_4.png", "image_output_8.png", "image_output_6.png", "image_output_3.png", "image_output_2.png", "image_output_1.png", "image_output_10.png", "image_output_9.png" ]
import matplotlib.pyplot as plt def plot_analysis(ww): for n in range(1, 7): gga = scipy.ndimage.filters.gaussian_filter(ww, 2 * n * 1.0) ggb = scipy.ndimage.filters.gaussian_filter(ww, n * 1.0) xx = ggb - gga mm = xx == scipy.ndimage.morphology.grey_dilation(xx, size=(3, 3)) plt.axis('off') plt.axis('off') plt.axis('off') imgs = img_gen() for k in range(10): qq = next(imgs) ww = qq.reshape(20, 20) plt.figure(figsize=(10, 5)) plot_analysis(ww)
code
2034634/cell_8
[ "image_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np def plot_analysis(ww): for n in range(1, 7): gga = scipy.ndimage.filters.gaussian_filter(ww, 2 * n * 1.0) ggb = scipy.ndimage.filters.gaussian_filter(ww, n * 1.0) xx = ggb - gga mm = xx == scipy.ndimage.morphology.grey_dilation(xx, size=(3, 3)) plt.axis('off') plt.axis('off') plt.axis('off') imgs = img_gen() for k in range(10): qq = next(imgs) ww = qq.reshape(20, 20) def find_dice(ww): gga = scipy.ndimage.filters.gaussian_filter(ww, 4.0) ggb = scipy.ndimage.filters.gaussian_filter(ww, 2.0) ggb - gga xx = ggb - gga mm = xx == scipy.ndimage.morphology.grey_dilation(xx, size=(3, 3)) mm[0, :] = 0 mm[-1, :] = 0 mm[:, 0] = 0 mm[:, -1] = 0 return np.nonzero(mm) plt.figure(figsize=(15, 8)) imgs = img_gen() for k in range(50): qq = next(imgs) ww = qq.reshape(20, 20) plt.subplot(5, 10, k + 1) plt.imshow(ww) plt.axis('off') for y, x in zip(*find_dice(ww)): plt.plot(x, y, 'ro')
code
129011803/cell_9
[ "text_plain_output_1.png" ]
import librosa from scipy.stats import skew from scipy.stats import kurtosis
code
129011803/cell_4
[ "application_vnd.jupyter.stderr_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) sounds_df = pd.read_csv('/kaggle/input/urbansound8k/UrbanSound8K.csv') sounds_freq = sounds_df['class'].value_counts().sort_values() print(sounds_freq)
code
129011803/cell_6
[ "application_vnd.jupyter.stderr_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) sounds_df = pd.read_csv('/kaggle/input/urbansound8k/UrbanSound8K.csv') folds_freq = sounds_df['fold'].value_counts().sort_index() print(folds_freq)
code
129011803/cell_7
[ "image_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) sounds_df = pd.read_csv('/kaggle/input/urbansound8k/UrbanSound8K.csv') folds_freq = sounds_df['fold'].value_counts().sort_index() folds_freq.plot(kind='pie', figsize=(5, 5), title='Folds', autopct='%1.1f%%', shadow=False, fontsize=8)
code
129011803/cell_8
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) sounds_df = pd.read_csv('/kaggle/input/urbansound8k/UrbanSound8K.csv') import matplotlib.pyplot as plt plt.figure(figsize=[25, 10]) for i in range(1, 11): fold_df = sounds_df[sounds_df['fold'] == i] fold_freq = fold_df['class'].value_counts() plt.subplot(2, 5, i) fold_freq.plot(kind='pie', title=f'fold {i}', autopct='%1.1f%%', shadow=False, fontsize=8)
code
129011803/cell_15
[ "text_plain_output_1.png", "image_output_1.png" ]
from scipy.stats import kurtosis from scipy.stats import skew import librosa import matplotlib.pyplot as plt import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) sounds_df = pd.read_csv('/kaggle/input/urbansound8k/UrbanSound8K.csv') import matplotlib.pyplot as plt for i in range(1, 11): fold_df = sounds_df[sounds_df['fold'] == i] fold_freq = fold_df['class'].value_counts() def get_mfcc(filename, fold): wave, sr = librosa.load(f'../input/urbansound8k/fold{fold}/{filename}', mono=True, sr=22050) wave = librosa.util.normalize(wave) mfccs = librosa.feature.mfcc(y=wave, sr=sr, n_mfcc=40, hop_length=int(0.0232 * sr / 2.0), n_fft=int(0.0232 * sr)) mfccs_min = mfccs.min(axis=1) mfccs_max = mfccs.max(axis=1) mfccs_median = np.median(mfccs, axis=1) mfccs_mean = np.mean(mfccs, axis=1) mfccs_var = np.var(mfccs, axis=1) mfccs_skewness = skew(mfccs, axis=1) mfccs_kurtosis = kurtosis(mfccs, axis=1) mfccs_first_derivative = np.diff(mfccs, n=1, axis=1) mfccs_first_derivative_mean = np.mean(mfccs_first_derivative, axis=1) mfccs_first_derivative_var = np.var(mfccs_first_derivative, axis=1) mfccs_second_derivative = np.diff(mfccs, n=2, axis=1) mfccs_second_derivative_mean = np.mean(mfccs_second_derivative, axis=1) mfccs_second_derivative_var = np.var(mfccs_second_derivative, axis=1) mfccs_stats = np.vstack((mfccs_min, mfccs_max, mfccs_median, mfccs_mean, mfccs_var, mfccs_skewness, mfccs_kurtosis, mfccs_first_derivative_mean, mfccs_first_derivative_var, mfccs_second_derivative_mean, mfccs_second_derivative_var)) return pd.Series([mfccs, mfccs_stats.transpose()]) plt.tight_layout() plt.figure(figsize=[15, 10]) for i in range(0, 9): ax = plt.subplot(3, 3, i + 1) img = librosa.display.specshow(sounds_df['mfccs'][i], x_axis='time') ax.set(title=sounds_df['class'][i]) plt.colorbar() plt.tight_layout()
code
129011803/cell_16
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) sounds_df = pd.read_csv('/kaggle/input/urbansound8k/UrbanSound8K.csv') import matplotlib.pyplot as plt for i in range(1, 11): fold_df = sounds_df[sounds_df['fold'] == i] fold_freq = fold_df['class'].value_counts() plt.tight_layout() sounds_df.head()
code
129011803/cell_3
[ "image_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) sounds_df = pd.read_csv('/kaggle/input/urbansound8k/UrbanSound8K.csv') sounds_df.head()
code
129011803/cell_17
[ "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) sounds_df = pd.read_csv('/kaggle/input/urbansound8k/UrbanSound8K.csv') import matplotlib.pyplot as plt for i in range(1, 11): fold_df = sounds_df[sounds_df['fold'] == i] fold_freq = fold_df['class'].value_counts() plt.tight_layout() max_length = sounds_df['mfccs_stats'][0].shape print(max_length)
code
129011803/cell_14
[ "text_plain_output_1.png" ]
from scipy.stats import kurtosis from scipy.stats import skew from tqdm import tqdm import librosa import matplotlib.pyplot as plt import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) sounds_df = pd.read_csv('/kaggle/input/urbansound8k/UrbanSound8K.csv') import matplotlib.pyplot as plt for i in range(1, 11): fold_df = sounds_df[sounds_df['fold'] == i] fold_freq = fold_df['class'].value_counts() def get_mfcc(filename, fold): wave, sr = librosa.load(f'../input/urbansound8k/fold{fold}/{filename}', mono=True, sr=22050) wave = librosa.util.normalize(wave) mfccs = librosa.feature.mfcc(y=wave, sr=sr, n_mfcc=40, hop_length=int(0.0232 * sr / 2.0), n_fft=int(0.0232 * sr)) mfccs_min = mfccs.min(axis=1) mfccs_max = mfccs.max(axis=1) mfccs_median = np.median(mfccs, axis=1) mfccs_mean = np.mean(mfccs, axis=1) mfccs_var = np.var(mfccs, axis=1) mfccs_skewness = skew(mfccs, axis=1) mfccs_kurtosis = kurtosis(mfccs, axis=1) mfccs_first_derivative = np.diff(mfccs, n=1, axis=1) mfccs_first_derivative_mean = np.mean(mfccs_first_derivative, axis=1) mfccs_first_derivative_var = np.var(mfccs_first_derivative, axis=1) mfccs_second_derivative = np.diff(mfccs, n=2, axis=1) mfccs_second_derivative_mean = np.mean(mfccs_second_derivative, axis=1) mfccs_second_derivative_var = np.var(mfccs_second_derivative, axis=1) mfccs_stats = np.vstack((mfccs_min, mfccs_max, mfccs_median, mfccs_mean, mfccs_var, mfccs_skewness, mfccs_kurtosis, mfccs_first_derivative_mean, mfccs_first_derivative_var, mfccs_second_derivative_mean, mfccs_second_derivative_var)) return pd.Series([mfccs, mfccs_stats.transpose()]) plt.tight_layout() tqdm.pandas() sounds_df[['mfccs', 'mfccs_stats']] = sounds_df[['slice_file_name', 'fold']].progress_apply(lambda x: get_mfcc(*x), axis=1)
code
129011803/cell_12
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) sounds_df = pd.read_csv('/kaggle/input/urbansound8k/UrbanSound8K.csv') import matplotlib.pyplot as plt for i in range(1, 11): fold_df = sounds_df[sounds_df['fold'] == i] fold_freq = fold_df['class'].value_counts() sounds_df.plot.hist(bins=10, column=['duration'], by='class', figsize=(5, 20)) plt.tight_layout()
code
129011803/cell_5
[ "image_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) sounds_df = pd.read_csv('/kaggle/input/urbansound8k/UrbanSound8K.csv') sounds_freq = sounds_df['class'].value_counts().sort_values() sounds_freq.plot(kind='pie', figsize=(5, 5), title='Sounds', autopct='%1.1f%%', shadow=False, fontsize=8)
code
121148889/cell_21
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns import seaborn as sns train_path = '/kaggle/input/playground-series-s3e8/train.csv' test_path = '/kaggle/input/playground-series-s3e8/test.csv' train_df = pd.DataFrame(pd.read_csv(train_path)) test_df = pd.DataFrame(pd.read_csv(test_path)) ids = test_df[['id']] df_copy = train_df import seaborn as sns def plot(dataframe, col): pass train_df = train_df.drop('id', axis=1) for col in train_df.select_dtypes('object'): plot(train_df, col)
code
121148889/cell_13
[ "text_plain_output_1.png" ]
import pandas as pd train_path = '/kaggle/input/playground-series-s3e8/train.csv' test_path = '/kaggle/input/playground-series-s3e8/test.csv' train_df = pd.DataFrame(pd.read_csv(train_path)) test_df = pd.DataFrame(pd.read_csv(test_path)) ids = test_df[['id']] df_copy = train_df print(train_df.describe(include='number'))
code
121148889/cell_9
[ "text_plain_output_1.png" ]
import pandas as pd train_path = '/kaggle/input/playground-series-s3e8/train.csv' test_path = '/kaggle/input/playground-series-s3e8/test.csv' train_df = pd.DataFrame(pd.read_csv(train_path)) test_df = pd.DataFrame(pd.read_csv(test_path)) ids = test_df[['id']] df_copy = train_df print(train_df.head())
code
121148889/cell_30
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns import seaborn as sns train_path = '/kaggle/input/playground-series-s3e8/train.csv' test_path = '/kaggle/input/playground-series-s3e8/test.csv' train_df = pd.DataFrame(pd.read_csv(train_path)) test_df = pd.DataFrame(pd.read_csv(test_path)) ids = test_df[['id']] df_copy = train_df import seaborn as sns def plot(dataframe, col): pass train_df = train_df.drop('id', axis=1) df_copy = df_copy.drop('id', axis=1) dataplot = sns.heatmap(df_copy.corr()) plt.show()
code
121148889/cell_6
[ "image_output_2.png", "image_output_1.png" ]
import pandas as pd import numpy as np import matplotlib.pyplot as plt import seaborn as sns from sklearn.model_selection import train_test_split from sklearn.preprocessing import MinMaxScaler, OneHotEncoder, StandardScaler from sklearn.inspection import PartialDependenceDisplay from lightgbm import LGBMRegressor
code
121148889/cell_26
[ "image_output_5.png", "image_output_7.png", "image_output_4.png", "image_output_6.png", "image_output_3.png", "image_output_2.png", "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns import seaborn as sns train_path = '/kaggle/input/playground-series-s3e8/train.csv' test_path = '/kaggle/input/playground-series-s3e8/test.csv' train_df = pd.DataFrame(pd.read_csv(train_path)) test_df = pd.DataFrame(pd.read_csv(test_path)) ids = test_df[['id']] df_copy = train_df import seaborn as sns def plot(dataframe, col): pass train_df = train_df.drop('id', axis=1) test_df = test_df.drop('id', axis=1) train_y = train_df['price'] train_df = train_df.drop(['price'], axis=1) print(train_df)
code
121148889/cell_18
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns import seaborn as sns train_path = '/kaggle/input/playground-series-s3e8/train.csv' test_path = '/kaggle/input/playground-series-s3e8/test.csv' train_df = pd.DataFrame(pd.read_csv(train_path)) test_df = pd.DataFrame(pd.read_csv(test_path)) ids = test_df[['id']] df_copy = train_df import seaborn as sns def plot(dataframe, col): sns.histplot(dataframe[col], bins=30, kde=True) plt.show() train_df = train_df.drop('id', axis=1) for col in train_df.select_dtypes('number'): plot(train_df, col)
code
121148889/cell_28
[ "image_output_3.png", "image_output_2.png", "image_output_1.png" ]
from sklearn.preprocessing import MinMaxScaler, OneHotEncoder, StandardScaler import matplotlib.pyplot as plt import pandas as pd import seaborn as sns import seaborn as sns train_path = '/kaggle/input/playground-series-s3e8/train.csv' test_path = '/kaggle/input/playground-series-s3e8/test.csv' train_df = pd.DataFrame(pd.read_csv(train_path)) test_df = pd.DataFrame(pd.read_csv(test_path)) ids = test_df[['id']] df_copy = train_df import seaborn as sns def plot(dataframe, col): pass train_df = train_df.drop('id', axis=1) test_df = test_df.drop('id', axis=1) train_y = train_df['price'] train_df = train_df.drop(['price'], axis=1) encoder = OneHotEncoder(handle_unknown='ignore') OH_cols_train = pd.DataFrame(encoder.fit_transform(train_df[['cut', 'color', 'clarity']]).toarray()) OH_cols_valid = pd.DataFrame(encoder.transform(test_df[['cut', 'color', 'clarity']]).toarray()) test_df = test_df.join(OH_cols_valid) train_df = train_df.join(OH_cols_train) train_df = train_df.drop(['cut', 'color', 'clarity'], axis=1) test_df = test_df.drop(['cut', 'color', 'clarity'], axis=1) print(train_df)
code
121148889/cell_38
[ "image_output_1.png" ]
from lightgbm import LGBMRegressor from sklearn.inspection import PartialDependenceDisplay from sklearn.model_selection import train_test_split from sklearn.preprocessing import MinMaxScaler, OneHotEncoder, StandardScaler import matplotlib.pyplot as plt import pandas as pd import seaborn as sns import seaborn as sns train_path = '/kaggle/input/playground-series-s3e8/train.csv' test_path = '/kaggle/input/playground-series-s3e8/test.csv' train_df = pd.DataFrame(pd.read_csv(train_path)) test_df = pd.DataFrame(pd.read_csv(test_path)) ids = test_df[['id']] df_copy = train_df import seaborn as sns def plot(dataframe, col): pass train_df = train_df.drop('id', axis=1) test_df = test_df.drop('id', axis=1) train_y = train_df['price'] train_df = train_df.drop(['price'], axis=1) encoder = OneHotEncoder(handle_unknown='ignore') OH_cols_train = pd.DataFrame(encoder.fit_transform(train_df[['cut', 'color', 'clarity']]).toarray()) OH_cols_valid = pd.DataFrame(encoder.transform(test_df[['cut', 'color', 'clarity']]).toarray()) test_df = test_df.join(OH_cols_valid) train_df = train_df.join(OH_cols_train) train_df = train_df.drop(['cut', 'color', 'clarity'], axis=1) test_df = test_df.drop(['cut', 'color', 'clarity'], axis=1) df_copy = df_copy.drop('id', axis=1) dataplot=sns.heatmap(df_copy.corr()) plt.show() X_train, X_test, y_train, y_test = train_test_split(train_df, train_y, test_size=0.2, random_state=42) lgbm = LGBMRegressor(random_state=10, n_estimators=2730, reg_alpha=8.432915874559963, reg_lambda=1.140459608805678, colsample_bytree=0.5820085284323611, subsample=0.3005306113635547, learning_rate=0.003848337624183948, max_depth=82, num_leaves=60, min_child_samples=43) lgbm.fit(X_train, y_train) def PartialDependence(model, X_test, feature_name): PartialDependenceDisplay.from_estimator(model, X_test, feature_name) plt.show() PartialDependence(lgbm, X_test, 'y') PartialDependence(lgbm, X_test, 'x')
code
121148889/cell_35
[ "text_plain_output_1.png" ]
from lightgbm import LGBMRegressor from sklearn.model_selection import train_test_split from sklearn.preprocessing import MinMaxScaler, OneHotEncoder, StandardScaler import matplotlib.pyplot as plt import pandas as pd import seaborn as sns import seaborn as sns train_path = '/kaggle/input/playground-series-s3e8/train.csv' test_path = '/kaggle/input/playground-series-s3e8/test.csv' train_df = pd.DataFrame(pd.read_csv(train_path)) test_df = pd.DataFrame(pd.read_csv(test_path)) ids = test_df[['id']] df_copy = train_df import seaborn as sns def plot(dataframe, col): pass train_df = train_df.drop('id', axis=1) test_df = test_df.drop('id', axis=1) train_y = train_df['price'] train_df = train_df.drop(['price'], axis=1) encoder = OneHotEncoder(handle_unknown='ignore') OH_cols_train = pd.DataFrame(encoder.fit_transform(train_df[['cut', 'color', 'clarity']]).toarray()) OH_cols_valid = pd.DataFrame(encoder.transform(test_df[['cut', 'color', 'clarity']]).toarray()) test_df = test_df.join(OH_cols_valid) train_df = train_df.join(OH_cols_train) train_df = train_df.drop(['cut', 'color', 'clarity'], axis=1) test_df = test_df.drop(['cut', 'color', 'clarity'], axis=1) X_train, X_test, y_train, y_test = train_test_split(train_df, train_y, test_size=0.2, random_state=42) lgbm = LGBMRegressor(random_state=10, n_estimators=2730, reg_alpha=8.432915874559963, reg_lambda=1.140459608805678, colsample_bytree=0.5820085284323611, subsample=0.3005306113635547, learning_rate=0.003848337624183948, max_depth=82, num_leaves=60, min_child_samples=43) lgbm.fit(X_train, y_train)
code
121148889/cell_43
[ "text_plain_output_1.png" ]
from lightgbm import LGBMRegressor from sklearn.model_selection import train_test_split from sklearn.preprocessing import MinMaxScaler, OneHotEncoder, StandardScaler import matplotlib.pyplot as plt import pandas as pd import seaborn as sns import seaborn as sns train_path = '/kaggle/input/playground-series-s3e8/train.csv' test_path = '/kaggle/input/playground-series-s3e8/test.csv' train_df = pd.DataFrame(pd.read_csv(train_path)) test_df = pd.DataFrame(pd.read_csv(test_path)) ids = test_df[['id']] df_copy = train_df import seaborn as sns def plot(dataframe, col): pass train_df = train_df.drop('id', axis=1) test_df = test_df.drop('id', axis=1) train_y = train_df['price'] train_df = train_df.drop(['price'], axis=1) encoder = OneHotEncoder(handle_unknown='ignore') OH_cols_train = pd.DataFrame(encoder.fit_transform(train_df[['cut', 'color', 'clarity']]).toarray()) OH_cols_valid = pd.DataFrame(encoder.transform(test_df[['cut', 'color', 'clarity']]).toarray()) test_df = test_df.join(OH_cols_valid) train_df = train_df.join(OH_cols_train) train_df = train_df.drop(['cut', 'color', 'clarity'], axis=1) test_df = test_df.drop(['cut', 'color', 'clarity'], axis=1) X_train, X_test, y_train, y_test = train_test_split(train_df, train_y, test_size=0.2, random_state=42) lgbm = LGBMRegressor(random_state=10, n_estimators=2730, reg_alpha=8.432915874559963, reg_lambda=1.140459608805678, colsample_bytree=0.5820085284323611, subsample=0.3005306113635547, learning_rate=0.003848337624183948, max_depth=82, num_leaves=60, min_child_samples=43) lgbm.fit(X_train, y_train) prediction = lgbm.predict(test_df) print(prediction)
code
121148889/cell_14
[ "text_plain_output_1.png" ]
import pandas as pd train_path = '/kaggle/input/playground-series-s3e8/train.csv' test_path = '/kaggle/input/playground-series-s3e8/test.csv' train_df = pd.DataFrame(pd.read_csv(train_path)) test_df = pd.DataFrame(pd.read_csv(test_path)) ids = test_df[['id']] df_copy = train_df print(train_df.describe(include='object'))
code
16123550/cell_4
[ "text_plain_output_1.png" ]
from keras import layers from keras import models from keras.applications import VGG16 from keras.preprocessing import image from keras.preprocessing.image import ImageDataGenerator import numpy as np # linear algebra import os import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import numpy as np import pandas as pd import os train_path = '../input/train/train' test_path = '../input/test/test' label_frame = pd.read_csv('../input/train.csv') test_frame = pd.read_csv('../input/sample_submission.csv') x_train = [] x_test = [] y_train = np.array(label_frame['has_cactus']) from keras.preprocessing import image from keras.preprocessing.image import ImageDataGenerator for fname in label_frame['id']: image_path = os.path.join(train_path, fname) pil_image = image.load_img(image_path, target_size=(32, 32, 3)) np_image = image.img_to_array(pil_image) x_train.append(np_image) for fname in test_frame['id']: image_path = os.path.join(test_path, fname) pil_image = image.load_img(image_path, target_size=(32, 32, 3)) np_image = image.img_to_array(pil_image) x_test.append(np_image) x_train = np.array(x_train) x_train = x_train.astype('float32') / 255 x_test = np.array(x_test) x_test = x_test.astype('float32') / 255 augmentations = ImageDataGenerator(vertical_flip=True, horizontal_flip=True, zoom_range=0.1) augmentations.fit(x_train) from keras.applications import VGG16 from keras import models from keras import layers from keras import optimizers from keras.callbacks import EarlyStopping, ModelCheckpoint, ReduceLROnPlateau def get_model(): base = VGG16(include_top=False, weights='imagenet', input_shape=(32, 32, 3)) base.trainable = True base.summary() set_trainable = False for layer in base.layers: if layer.name == 'block5_conv3': set_trainable = True if set_trainable: layer.trainable = True else: layer.trainable = False model = models.Sequential() model.add(base) model.add(layers.Flatten()) model.add(layers.BatchNormalization()) model.add(layers.Dense(256, activation='relu')) model.add(layers.BatchNormalization()) model.add(layers.Dropout(0.5)) model.add(layers.Dense(1, activation='sigmoid')) model.summary() model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['acc']) return model model = get_model() "\nx_val = x_train[16600:]\ny_val = y_train[16600:]\n\nmodel_check_point = ModelCheckpoint('./model.h5',monitor = 'val_loss',save_best_only = True)\nearly_stopping = EarlyStopping(monitor = 'val_loss',patience = 25)\nreduce_lr_on_plateau = ReduceLROnPlateau(monitor = 'val_loss',patience = 15)\n\nhistory = model.fit(x_train[:16600],y_train[:16600],epochs = 80,batch_size = 250,validation_data = (x_val,y_val), callbacks = [model_check_point,reduce_lr_on_plateau])\n#visualize\nimport matplotlib.pyplot as plt\n\nacc = history.history['acc']\nval_acc = history.history['val_acc']\nloss = history.history['loss']\nval_loss = history.history['val_loss']\n\nepochs = range(len(acc))\n\nplt.plot(epochs, acc, 'bo', label='Training acc')\nplt.plot(epochs, val_acc, 'b', label='Validation acc')\nplt.title('Training and validation accuracy')\nplt.legend()\n\nplt.figure()\n\nplt.plot(epochs, loss, 'bo', label='Training loss')\nplt.plot(epochs, val_loss, 'b', label='Validation loss')\nplt.title('Training and validation loss')\nplt.legend()\n\nplt.show()\n" model.fit_generator(augmentations.flow(x_train, y_train), epochs=100, steps_per_epoch=250) y_predictions = model.predict(x_test) result = pd.DataFrame({'id': pd.read_csv('../input/sample_submission.csv')['id'], 'has_cactus': y_predictions.squeeze()}) result.to_csv('submissionMax.csv', index=False, columns=['id', 'has_cactus']) print('submit successful')
code
16123550/cell_1
[ "text_plain_output_2.png", "application_vnd.jupyter.stderr_output_1.png" ]
from keras.preprocessing import image import numpy as np # linear algebra import os import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import numpy as np import pandas as pd import os train_path = '../input/train/train' test_path = '../input/test/test' label_frame = pd.read_csv('../input/train.csv') test_frame = pd.read_csv('../input/sample_submission.csv') x_train = [] x_test = [] y_train = np.array(label_frame['has_cactus']) from keras.preprocessing import image from keras.preprocessing.image import ImageDataGenerator for fname in label_frame['id']: image_path = os.path.join(train_path, fname) pil_image = image.load_img(image_path, target_size=(32, 32, 3)) np_image = image.img_to_array(pil_image) x_train.append(np_image) for fname in test_frame['id']: image_path = os.path.join(test_path, fname) pil_image = image.load_img(image_path, target_size=(32, 32, 3)) np_image = image.img_to_array(pil_image) x_test.append(np_image) x_train = np.array(x_train) x_train = x_train.astype('float32') / 255 x_test = np.array(x_test) x_test = x_test.astype('float32') / 255 print(x_train.shape) print(x_test.shape)
code
16123550/cell_3
[ "text_plain_output_2.png", "text_plain_output_1.png" ]
from keras import layers from keras import models from keras.applications import VGG16 from keras.applications import VGG16 from keras import models from keras import layers from keras import optimizers from keras.callbacks import EarlyStopping, ModelCheckpoint, ReduceLROnPlateau def get_model(): base = VGG16(include_top=False, weights='imagenet', input_shape=(32, 32, 3)) base.trainable = True base.summary() set_trainable = False for layer in base.layers: if layer.name == 'block5_conv3': set_trainable = True if set_trainable: layer.trainable = True else: layer.trainable = False model = models.Sequential() model.add(base) model.add(layers.Flatten()) model.add(layers.BatchNormalization()) model.add(layers.Dense(256, activation='relu')) model.add(layers.BatchNormalization()) model.add(layers.Dropout(0.5)) model.add(layers.Dense(1, activation='sigmoid')) model.summary() model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['acc']) return model model = get_model() "\nx_val = x_train[16600:]\ny_val = y_train[16600:]\nmodel_check_point = ModelCheckpoint('./model.h5',monitor = 'val_loss',save_best_only = True)\nearly_stopping = EarlyStopping(monitor = 'val_loss',patience = 25)\nreduce_lr_on_plateau = ReduceLROnPlateau(monitor = 'val_loss',patience = 15)\nhistory = model.fit(x_train[:16600],y_train[:16600],epochs = 80,batch_size = 250,validation_data = (x_val,y_val), callbacks = [model_check_point,reduce_lr_on_plateau])\n#visualize\nimport matplotlib.pyplot as plt\nacc = history.history['acc']\nval_acc = history.history['val_acc']\nloss = history.history['loss']\nval_loss = history.history['val_loss']\nepochs = range(len(acc))\nplt.plot(epochs, acc, 'bo', label='Training acc')\nplt.plot(epochs, val_acc, 'b', label='Validation acc')\nplt.title('Training and validation accuracy')\nplt.legend()\nplt.figure()\nplt.plot(epochs, loss, 'bo', label='Training loss')\nplt.plot(epochs, val_loss, 'b', label='Validation loss')\nplt.title('Training and validation loss')\nplt.legend()\nplt.show()\n"
code
18153040/cell_21
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) rape_victim = pd.read_csv('../input/20_Victims_of_rape.csv', na_filter='False') rape_victim.columns rape_victim.fillna('') rape_victim.isnull().sum().sum() total_rape = rape_victim[rape_victim['Subgroup'] == 'Total Rape Victims'] total_rape.info()
code
18153040/cell_25
[ "text_html_output_1.png", "application_vnd.jupyter.stderr_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) rape_victim = pd.read_csv('../input/20_Victims_of_rape.csv', na_filter='False') rape_victim.columns rape_victim.fillna('') rape_victim.isnull().sum().sum() total_rape = rape_victim[rape_victim['Subgroup'] == 'Total Rape Victims'] total_rape.Year = pd.to_datetime(total_rape['Year'], format='%Y').dt.strftime('%Y') total_rape['Total_Rape_per_Year'] = total_rape.groupby('Year')['Victims_of_Rape_Total'].transform('sum') plot_total_rape = total_rape.drop_duplicates('Year', keep='first', inplace=False) plot_total_rape total_rape
code
18153040/cell_4
[ "application_vnd.jupyter.stderr_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) rape_victim = pd.read_csv('../input/20_Victims_of_rape.csv', na_filter='False') rape_victim.head()
code
18153040/cell_23
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) rape_victim = pd.read_csv('../input/20_Victims_of_rape.csv', na_filter='False') rape_victim.columns rape_victim.fillna('') rape_victim.isnull().sum().sum() total_rape = rape_victim[rape_victim['Subgroup'] == 'Total Rape Victims'] total_rape.Year = pd.to_datetime(total_rape['Year'], format='%Y').dt.strftime('%Y') total_rape['Total_Rape_per_Year'] = total_rape.groupby('Year')['Victims_of_Rape_Total'].transform('sum') plot_total_rape = total_rape.drop_duplicates('Year', keep='first', inplace=False) plot_total_rape plt.figure(figsize=(15, 5)) x = plot_total_rape['Year'] y = plot_total_rape['Total_Rape_per_Year'] plt.plot(x, y) plt.title('Number of rapes per year in all of India for the decade (2001 - 2010)') plt.xlabel('Year') plt.ylabel('Total_Rape_per_Year') plt.grid(True) plt.show()
code
18153040/cell_26
[ "image_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) rape_victim = pd.read_csv('../input/20_Victims_of_rape.csv', na_filter='False') rape_victim.columns rape_victim.fillna('') rape_victim.isnull().sum().sum() total_rape = rape_victim[rape_victim['Subgroup'] == 'Total Rape Victims'] total_rape.Year = pd.to_datetime(total_rape['Year'], format='%Y').dt.strftime('%Y') total_rape['Total_Rape_per_Year'] = total_rape.groupby('Year')['Victims_of_Rape_Total'].transform('sum') plot_total_rape = total_rape.drop_duplicates('Year', keep='first', inplace=False) plot_total_rape total_rape['Total_Rape_per_Area'] = total_rape.groupby('Area_Name')['Victims_of_Rape_Total'].transform('sum') plot_total_area_rape = total_rape.drop_duplicates('Area_Name', keep='first', inplace=False)
code
18153040/cell_1
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import seaborn as sns import matplotlib.pyplot as plt import os print(os.listdir('../input'))
code
18153040/cell_7
[ "application_vnd.jupyter.stderr_output_1.png", "image_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) rape_victim = pd.read_csv('../input/20_Victims_of_rape.csv', na_filter='False') rape_victim.columns
code
18153040/cell_18
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) rape_victim = pd.read_csv('../input/20_Victims_of_rape.csv', na_filter='False') rape_victim.columns rape_victim.fillna('') rape_victim.isnull().sum().sum() total_rape = rape_victim[rape_victim['Subgroup'] == 'Total Rape Victims'] total_rape.describe()
code
18153040/cell_28
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) rape_victim = pd.read_csv('../input/20_Victims_of_rape.csv', na_filter='False') rape_victim.columns rape_victim.fillna('') rape_victim.isnull().sum().sum() total_rape = rape_victim[rape_victim['Subgroup'] == 'Total Rape Victims'] total_rape.Year = pd.to_datetime(total_rape['Year'], format='%Y').dt.strftime('%Y') total_rape['Total_Rape_per_Year'] = total_rape.groupby('Year')['Victims_of_Rape_Total'].transform('sum') plot_total_rape = total_rape.drop_duplicates('Year', keep='first', inplace=False) plot_total_rape x = plot_total_rape['Year'] y = plot_total_rape['Total_Rape_per_Year'] total_rape['Total_Rape_per_Area'] = total_rape.groupby('Area_Name')['Victims_of_Rape_Total'].transform('sum') plot_total_area_rape = total_rape.drop_duplicates('Area_Name', keep='first', inplace=False) plt.figure(figsize=(75, 25)) x = plot_total_area_rape['Area_Name'] y = plot_total_area_rape['Total_Rape_per_Area'] plt.plot(x, y) plt.title('Number of rapes in different parts of India for the decade (2001 - 2010)') plt.xlabel('Name of Areas') plt.ylabel('Total Rape per Area') plt.grid(True) plt.show()
code
18153040/cell_16
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) rape_victim = pd.read_csv('../input/20_Victims_of_rape.csv', na_filter='False') rape_victim.columns rape_victim.fillna('') rape_victim.isnull().sum().sum() total_rape = rape_victim[rape_victim['Subgroup'] == 'Total Rape Victims'] total_rape.head()
code
18153040/cell_22
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) rape_victim = pd.read_csv('../input/20_Victims_of_rape.csv', na_filter='False') rape_victim.columns rape_victim.fillna('') rape_victim.isnull().sum().sum() total_rape = rape_victim[rape_victim['Subgroup'] == 'Total Rape Victims'] total_rape.Year = pd.to_datetime(total_rape['Year'], format='%Y').dt.strftime('%Y') total_rape['Total_Rape_per_Year'] = total_rape.groupby('Year')['Victims_of_Rape_Total'].transform('sum') plot_total_rape = total_rape.drop_duplicates('Year', keep='first', inplace=False) plot_total_rape
code
18153040/cell_10
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) rape_victim = pd.read_csv('../input/20_Victims_of_rape.csv', na_filter='False') rape_victim.columns rape_victim.fillna('')
code
18153040/cell_12
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) rape_victim = pd.read_csv('../input/20_Victims_of_rape.csv', na_filter='False') rape_victim.columns rape_victim.fillna('') rape_victim.isnull().sum().sum()
code
16157191/cell_25
[ "text_plain_output_1.png" ]
from keras.layers import Conv2D, MaxPooling2D, Dense, Input, Activation, Dropout, GlobalAveragePooling2D, \ from keras.models import Model from keras.optimizers import Adam from keras.utils import np_utils from sklearn import metrics from sklearn import metrics from sklearn.model_selection import train_test_split import matplotlib import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import sys df = pd.read_csv('../input/aerial-cactus-identification/train.csv') train = pd.read_csv('../input/cactus-images-csv/dataset/train.csv') test = pd.read_csv('../input/cactus-images-csv/dataset/test.csv') train = train.drop('Unnamed: 0', axis=1) test = test.drop('Unnamed: 0', axis=1) import keras from keras.models import Model from keras.layers import Conv2D, MaxPooling2D, Dense, Input, Activation, Dropout, GlobalAveragePooling2D, BatchNormalization, concatenate, AveragePooling2D from keras.optimizers import Adam def conv_layer(conv_x, filters): conv_x = BatchNormalization()(conv_x) conv_x = Activation('relu')(conv_x) conv_x = Conv2D(filters, (3, 3), kernel_initializer='he_uniform', padding='same', use_bias=False)(conv_x) conv_x = Dropout(0.2)(conv_x) return conv_x def dense_block(block_x, filters, growth_rate, layers_in_block): for i in range(layers_in_block): each_layer = conv_layer(block_x, growth_rate) block_x = concatenate([block_x, each_layer], axis=-1) filters += growth_rate return (block_x, filters) def transition_block(trans_x, tran_filters): trans_x = BatchNormalization()(trans_x) trans_x = Activation('relu')(trans_x) trans_x = Conv2D(tran_filters, (1, 1), kernel_initializer='he_uniform', padding='same', use_bias=False)(trans_x) trans_x = AveragePooling2D((2, 2), strides=(2, 2))(trans_x) return (trans_x, tran_filters) def dense_net(filters, growth_rate, classes, dense_block_size, layers_in_block): input_img = Input(shape=(32, 32, 3)) x = Conv2D(24, (3, 3), kernel_initializer='he_uniform', padding='same', use_bias=False)(input_img) dense_x = BatchNormalization()(x) dense_x = Activation('relu')(x) dense_x = MaxPooling2D((3, 3), strides=(2, 2), padding='same')(dense_x) for block in range(dense_block_size - 1): dense_x, filters = dense_block(dense_x, filters, growth_rate, layers_in_block) dense_x, filters = transition_block(dense_x, filters) dense_x, filters = dense_block(dense_x, filters, growth_rate, layers_in_block) dense_x = BatchNormalization()(dense_x) dense_x = Activation('relu')(dense_x) dense_x = GlobalAveragePooling2D()(dense_x) output = Dense(classes, activation='softmax')(dense_x) return Model(input_img, output) from keras.utils import np_utils from sklearn.model_selection import train_test_split images = train.drop('label', axis=1) images = np.asarray(images) images = images.reshape(images.shape[0], 32, 32, 3) label = train['label'] X_train, X_test, y_train, y_test = train_test_split(images, label.values, test_size=0.33) Cat_test_y = np_utils.to_categorical(y_test) y_train = np_utils.to_categorical(y_train) dense_block_size = 3 layers_in_block = 4 growth_rate = 12 classes = 2 model = dense_net(growth_rate * 2, growth_rate, classes, dense_block_size, layers_in_block) model.summary() batch_size = 32 epochs = 10 optimizer = Adam(lr=0.0001, beta_1=0.9, beta_2=0.999, epsilon=1e-08) model.compile(optimizer=optimizer, loss='binary_crossentropy', metrics=['accuracy']) history = model.fit(X_train, y_train, epochs=epochs, batch_size=batch_size, shuffle=True, validation_data=(X_test, Cat_test_y)) import sys import matplotlib sys.stdout.flush() matplotlib.use('Agg') matplotlib.pyplot.style.use('ggplot') N = epochs from sklearn import metrics label_pred = model.predict(X_test) pred = [] for i in range(len(label_pred)): pred.append(np.argmax(label_pred[i])) Y_test = np.argmax(Cat_test_y, axis=1) from sklearn import metrics label_pred = model.predict(X_test) pred = [] for i in range(len(label_pred)): pred.append(np.argmax(label_pred[i])) Y_test = np.argmax(Cat_test_y, axis=1) print(metrics.accuracy_score(Y_test, pred))
code
16157191/cell_4
[ "text_plain_output_1.png" ]
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/aerial-cactus-identification/train.csv') print('Number of samples: ', len(df)) print('Number of Labels: ', np.unique(df.has_cactus))
code
16157191/cell_23
[ "text_plain_output_1.png" ]
from keras.layers import Conv2D, MaxPooling2D, Dense, Input, Activation, Dropout, GlobalAveragePooling2D, \ from keras.models import Model from keras.optimizers import Adam from keras.utils import np_utils from sklearn import metrics from sklearn.model_selection import train_test_split import matplotlib import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import sys df = pd.read_csv('../input/aerial-cactus-identification/train.csv') train = pd.read_csv('../input/cactus-images-csv/dataset/train.csv') test = pd.read_csv('../input/cactus-images-csv/dataset/test.csv') train = train.drop('Unnamed: 0', axis=1) test = test.drop('Unnamed: 0', axis=1) import keras from keras.models import Model from keras.layers import Conv2D, MaxPooling2D, Dense, Input, Activation, Dropout, GlobalAveragePooling2D, BatchNormalization, concatenate, AveragePooling2D from keras.optimizers import Adam def conv_layer(conv_x, filters): conv_x = BatchNormalization()(conv_x) conv_x = Activation('relu')(conv_x) conv_x = Conv2D(filters, (3, 3), kernel_initializer='he_uniform', padding='same', use_bias=False)(conv_x) conv_x = Dropout(0.2)(conv_x) return conv_x def dense_block(block_x, filters, growth_rate, layers_in_block): for i in range(layers_in_block): each_layer = conv_layer(block_x, growth_rate) block_x = concatenate([block_x, each_layer], axis=-1) filters += growth_rate return (block_x, filters) def transition_block(trans_x, tran_filters): trans_x = BatchNormalization()(trans_x) trans_x = Activation('relu')(trans_x) trans_x = Conv2D(tran_filters, (1, 1), kernel_initializer='he_uniform', padding='same', use_bias=False)(trans_x) trans_x = AveragePooling2D((2, 2), strides=(2, 2))(trans_x) return (trans_x, tran_filters) def dense_net(filters, growth_rate, classes, dense_block_size, layers_in_block): input_img = Input(shape=(32, 32, 3)) x = Conv2D(24, (3, 3), kernel_initializer='he_uniform', padding='same', use_bias=False)(input_img) dense_x = BatchNormalization()(x) dense_x = Activation('relu')(x) dense_x = MaxPooling2D((3, 3), strides=(2, 2), padding='same')(dense_x) for block in range(dense_block_size - 1): dense_x, filters = dense_block(dense_x, filters, growth_rate, layers_in_block) dense_x, filters = transition_block(dense_x, filters) dense_x, filters = dense_block(dense_x, filters, growth_rate, layers_in_block) dense_x = BatchNormalization()(dense_x) dense_x = Activation('relu')(dense_x) dense_x = GlobalAveragePooling2D()(dense_x) output = Dense(classes, activation='softmax')(dense_x) return Model(input_img, output) from keras.utils import np_utils from sklearn.model_selection import train_test_split images = train.drop('label', axis=1) images = np.asarray(images) images = images.reshape(images.shape[0], 32, 32, 3) label = train['label'] X_train, X_test, y_train, y_test = train_test_split(images, label.values, test_size=0.33) Cat_test_y = np_utils.to_categorical(y_test) y_train = np_utils.to_categorical(y_train) dense_block_size = 3 layers_in_block = 4 growth_rate = 12 classes = 2 model = dense_net(growth_rate * 2, growth_rate, classes, dense_block_size, layers_in_block) model.summary() batch_size = 32 epochs = 10 optimizer = Adam(lr=0.0001, beta_1=0.9, beta_2=0.999, epsilon=1e-08) model.compile(optimizer=optimizer, loss='binary_crossentropy', metrics=['accuracy']) history = model.fit(X_train, y_train, epochs=epochs, batch_size=batch_size, shuffle=True, validation_data=(X_test, Cat_test_y)) import sys import matplotlib sys.stdout.flush() matplotlib.use('Agg') matplotlib.pyplot.style.use('ggplot') N = epochs from sklearn import metrics label_pred = model.predict(X_test) pred = [] for i in range(len(label_pred)): pred.append(np.argmax(label_pred[i])) Y_test = np.argmax(Cat_test_y, axis=1) print(metrics.classification_report(Y_test, pred))
code
16157191/cell_20
[ "application_vnd.jupyter.stderr_output_1.png" ]
from keras.layers import Conv2D, MaxPooling2D, Dense, Input, Activation, Dropout, GlobalAveragePooling2D, \ from keras.models import Model from keras.optimizers import Adam from keras.utils import np_utils from sklearn.model_selection import train_test_split import matplotlib import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import sys df = pd.read_csv('../input/aerial-cactus-identification/train.csv') train = pd.read_csv('../input/cactus-images-csv/dataset/train.csv') test = pd.read_csv('../input/cactus-images-csv/dataset/test.csv') train = train.drop('Unnamed: 0', axis=1) test = test.drop('Unnamed: 0', axis=1) import keras from keras.models import Model from keras.layers import Conv2D, MaxPooling2D, Dense, Input, Activation, Dropout, GlobalAveragePooling2D, BatchNormalization, concatenate, AveragePooling2D from keras.optimizers import Adam def conv_layer(conv_x, filters): conv_x = BatchNormalization()(conv_x) conv_x = Activation('relu')(conv_x) conv_x = Conv2D(filters, (3, 3), kernel_initializer='he_uniform', padding='same', use_bias=False)(conv_x) conv_x = Dropout(0.2)(conv_x) return conv_x def dense_block(block_x, filters, growth_rate, layers_in_block): for i in range(layers_in_block): each_layer = conv_layer(block_x, growth_rate) block_x = concatenate([block_x, each_layer], axis=-1) filters += growth_rate return (block_x, filters) def transition_block(trans_x, tran_filters): trans_x = BatchNormalization()(trans_x) trans_x = Activation('relu')(trans_x) trans_x = Conv2D(tran_filters, (1, 1), kernel_initializer='he_uniform', padding='same', use_bias=False)(trans_x) trans_x = AveragePooling2D((2, 2), strides=(2, 2))(trans_x) return (trans_x, tran_filters) def dense_net(filters, growth_rate, classes, dense_block_size, layers_in_block): input_img = Input(shape=(32, 32, 3)) x = Conv2D(24, (3, 3), kernel_initializer='he_uniform', padding='same', use_bias=False)(input_img) dense_x = BatchNormalization()(x) dense_x = Activation('relu')(x) dense_x = MaxPooling2D((3, 3), strides=(2, 2), padding='same')(dense_x) for block in range(dense_block_size - 1): dense_x, filters = dense_block(dense_x, filters, growth_rate, layers_in_block) dense_x, filters = transition_block(dense_x, filters) dense_x, filters = dense_block(dense_x, filters, growth_rate, layers_in_block) dense_x = BatchNormalization()(dense_x) dense_x = Activation('relu')(dense_x) dense_x = GlobalAveragePooling2D()(dense_x) output = Dense(classes, activation='softmax')(dense_x) return Model(input_img, output) from keras.utils import np_utils from sklearn.model_selection import train_test_split images = train.drop('label', axis=1) images = np.asarray(images) images = images.reshape(images.shape[0], 32, 32, 3) label = train['label'] X_train, X_test, y_train, y_test = train_test_split(images, label.values, test_size=0.33) Cat_test_y = np_utils.to_categorical(y_test) y_train = np_utils.to_categorical(y_train) dense_block_size = 3 layers_in_block = 4 growth_rate = 12 classes = 2 model = dense_net(growth_rate * 2, growth_rate, classes, dense_block_size, layers_in_block) model.summary() batch_size = 32 epochs = 10 optimizer = Adam(lr=0.0001, beta_1=0.9, beta_2=0.999, epsilon=1e-08) model.compile(optimizer=optimizer, loss='binary_crossentropy', metrics=['accuracy']) history = model.fit(X_train, y_train, epochs=epochs, batch_size=batch_size, shuffle=True, validation_data=(X_test, Cat_test_y)) import sys import matplotlib print('Generating plots...') sys.stdout.flush() matplotlib.use('Agg') matplotlib.pyplot.style.use('ggplot') matplotlib.pyplot.figure() N = epochs matplotlib.pyplot.plot(np.arange(0, N), history.history['loss'], label='train_loss') matplotlib.pyplot.plot(np.arange(0, N), history.history['val_loss'], label='val_loss') matplotlib.pyplot.plot(np.arange(0, N), history.history['acc'], label='train_acc') matplotlib.pyplot.plot(np.arange(0, N), history.history['val_acc'], label='val_acc') matplotlib.pyplot.title('Cactus Image Classification') matplotlib.pyplot.xlabel('Epoch #') matplotlib.pyplot.ylabel('Loss/Accuracy') matplotlib.pyplot.legend(loc='lower left') matplotlib.pyplot.savefig('plot.png')
code
16157191/cell_6
[ "text_plain_output_1.png" ]
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns df = pd.read_csv('../input/aerial-cactus-identification/train.csv') sns.distplot(df.has_cactus)
code
16157191/cell_2
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import matplotlib.pyplot as plt from PIL import Image import seaborn as sns import os print(os.listdir('../input'))
code
16157191/cell_11
[ "text_plain_output_1.png" ]
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/aerial-cactus-identification/train.csv') train = pd.read_csv('../input/cactus-images-csv/dataset/train.csv') test = pd.read_csv('../input/cactus-images-csv/dataset/test.csv') print('TRAIN---------------------') print('Shape: {}'.format(train.shape)) train = train.drop('Unnamed: 0', axis=1) test = test.drop('Unnamed: 0', axis=1) print('Label 0 (False): {}'.format(np.sum(train.label == 0))) print('Label 1 (True): {}'.format(np.sum(train.label == 1))) print('TEST----------------------') print('Shape: {}'.format(test.shape))
code
16157191/cell_18
[ "text_plain_output_1.png", "image_output_1.png" ]
from keras.layers import Conv2D, MaxPooling2D, Dense, Input, Activation, Dropout, GlobalAveragePooling2D, \ from keras.models import Model from keras.optimizers import Adam from keras.utils import np_utils from sklearn.model_selection import train_test_split import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/aerial-cactus-identification/train.csv') train = pd.read_csv('../input/cactus-images-csv/dataset/train.csv') test = pd.read_csv('../input/cactus-images-csv/dataset/test.csv') train = train.drop('Unnamed: 0', axis=1) test = test.drop('Unnamed: 0', axis=1) import keras from keras.models import Model from keras.layers import Conv2D, MaxPooling2D, Dense, Input, Activation, Dropout, GlobalAveragePooling2D, BatchNormalization, concatenate, AveragePooling2D from keras.optimizers import Adam def conv_layer(conv_x, filters): conv_x = BatchNormalization()(conv_x) conv_x = Activation('relu')(conv_x) conv_x = Conv2D(filters, (3, 3), kernel_initializer='he_uniform', padding='same', use_bias=False)(conv_x) conv_x = Dropout(0.2)(conv_x) return conv_x def dense_block(block_x, filters, growth_rate, layers_in_block): for i in range(layers_in_block): each_layer = conv_layer(block_x, growth_rate) block_x = concatenate([block_x, each_layer], axis=-1) filters += growth_rate return (block_x, filters) def transition_block(trans_x, tran_filters): trans_x = BatchNormalization()(trans_x) trans_x = Activation('relu')(trans_x) trans_x = Conv2D(tran_filters, (1, 1), kernel_initializer='he_uniform', padding='same', use_bias=False)(trans_x) trans_x = AveragePooling2D((2, 2), strides=(2, 2))(trans_x) return (trans_x, tran_filters) def dense_net(filters, growth_rate, classes, dense_block_size, layers_in_block): input_img = Input(shape=(32, 32, 3)) x = Conv2D(24, (3, 3), kernel_initializer='he_uniform', padding='same', use_bias=False)(input_img) dense_x = BatchNormalization()(x) dense_x = Activation('relu')(x) dense_x = MaxPooling2D((3, 3), strides=(2, 2), padding='same')(dense_x) for block in range(dense_block_size - 1): dense_x, filters = dense_block(dense_x, filters, growth_rate, layers_in_block) dense_x, filters = transition_block(dense_x, filters) dense_x, filters = dense_block(dense_x, filters, growth_rate, layers_in_block) dense_x = BatchNormalization()(dense_x) dense_x = Activation('relu')(dense_x) dense_x = GlobalAveragePooling2D()(dense_x) output = Dense(classes, activation='softmax')(dense_x) return Model(input_img, output) from keras.utils import np_utils from sklearn.model_selection import train_test_split images = train.drop('label', axis=1) images = np.asarray(images) images = images.reshape(images.shape[0], 32, 32, 3) label = train['label'] X_train, X_test, y_train, y_test = train_test_split(images, label.values, test_size=0.33) Cat_test_y = np_utils.to_categorical(y_test) y_train = np_utils.to_categorical(y_train) dense_block_size = 3 layers_in_block = 4 growth_rate = 12 classes = 2 model = dense_net(growth_rate * 2, growth_rate, classes, dense_block_size, layers_in_block) model.summary() batch_size = 32 epochs = 10 optimizer = Adam(lr=0.0001, beta_1=0.9, beta_2=0.999, epsilon=1e-08) model.compile(optimizer=optimizer, loss='binary_crossentropy', metrics=['accuracy']) history = model.fit(X_train, y_train, epochs=epochs, batch_size=batch_size, shuffle=True, validation_data=(X_test, Cat_test_y))
code
16157191/cell_16
[ "text_plain_output_1.png" ]
from keras.utils import np_utils from sklearn.model_selection import train_test_split import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/aerial-cactus-identification/train.csv') train = pd.read_csv('../input/cactus-images-csv/dataset/train.csv') test = pd.read_csv('../input/cactus-images-csv/dataset/test.csv') train = train.drop('Unnamed: 0', axis=1) test = test.drop('Unnamed: 0', axis=1) from keras.utils import np_utils from sklearn.model_selection import train_test_split images = train.drop('label', axis=1) images = np.asarray(images) images = images.reshape(images.shape[0], 32, 32, 3) label = train['label'] X_train, X_test, y_train, y_test = train_test_split(images, label.values, test_size=0.33) Cat_test_y = np_utils.to_categorical(y_test) y_train = np_utils.to_categorical(y_train) print('X_train shape : ', X_train.shape) print('y_train shape : ', y_train.shape) print('X_test shape : ', X_test.shape) print('y_test shape : ', y_test.shape)
code
16157191/cell_3
[ "application_vnd.jupyter.stderr_output_2.png", "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/aerial-cactus-identification/train.csv') df.head()
code
16157191/cell_14
[ "text_html_output_1.png" ]
from keras.layers import Conv2D, MaxPooling2D, Dense, Input, Activation, Dropout, GlobalAveragePooling2D, \ from keras.models import Model import keras from keras.models import Model from keras.layers import Conv2D, MaxPooling2D, Dense, Input, Activation, Dropout, GlobalAveragePooling2D, BatchNormalization, concatenate, AveragePooling2D from keras.optimizers import Adam def conv_layer(conv_x, filters): conv_x = BatchNormalization()(conv_x) conv_x = Activation('relu')(conv_x) conv_x = Conv2D(filters, (3, 3), kernel_initializer='he_uniform', padding='same', use_bias=False)(conv_x) conv_x = Dropout(0.2)(conv_x) return conv_x def dense_block(block_x, filters, growth_rate, layers_in_block): for i in range(layers_in_block): each_layer = conv_layer(block_x, growth_rate) block_x = concatenate([block_x, each_layer], axis=-1) filters += growth_rate return (block_x, filters) def transition_block(trans_x, tran_filters): trans_x = BatchNormalization()(trans_x) trans_x = Activation('relu')(trans_x) trans_x = Conv2D(tran_filters, (1, 1), kernel_initializer='he_uniform', padding='same', use_bias=False)(trans_x) trans_x = AveragePooling2D((2, 2), strides=(2, 2))(trans_x) return (trans_x, tran_filters) def dense_net(filters, growth_rate, classes, dense_block_size, layers_in_block): input_img = Input(shape=(32, 32, 3)) x = Conv2D(24, (3, 3), kernel_initializer='he_uniform', padding='same', use_bias=False)(input_img) dense_x = BatchNormalization()(x) dense_x = Activation('relu')(x) dense_x = MaxPooling2D((3, 3), strides=(2, 2), padding='same')(dense_x) for block in range(dense_block_size - 1): dense_x, filters = dense_block(dense_x, filters, growth_rate, layers_in_block) dense_x, filters = transition_block(dense_x, filters) dense_x, filters = dense_block(dense_x, filters, growth_rate, layers_in_block) dense_x = BatchNormalization()(dense_x) dense_x = Activation('relu')(dense_x) dense_x = GlobalAveragePooling2D()(dense_x) output = Dense(classes, activation='softmax')(dense_x) return Model(input_img, output)
code
72077003/cell_21
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/brooklyn-food-waste/brooklyn.csv') df = df[df['label_date'].notna()] df = df[(df['diff'] > df['diff'].quantile(0.05)) & (df['diff'] < df['diff'].quantile(0.95))] df_labels = df.groupby('label_language').agg({'id': 'count', 'diff': 'mean'}) df_labels.columns = ['count', 'diff'] df.groupby('retailer_type')['diff'].mean().plot(kind='bar')
code
72077003/cell_33
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/brooklyn-food-waste/brooklyn.csv') df = df[df['label_date'].notna()] df = df[(df['diff'] > df['diff'].quantile(0.05)) & (df['diff'] < df['diff'].quantile(0.95))] df_labels = df.groupby('label_language').agg({'id': 'count', 'diff': 'mean'}) df_labels.columns = ['count', 'diff'] df.groupby('retailer_type').count().id.to_frame().sort_values(by='id', ascending=False) df_food = df.groupby('food_detail').agg({'id': 'count', 'diff': 'mean'}).sort_values(by='id', ascending=False).head(10).reset_index() df_food.columns = ['food_detail', 'count', 'diff'] df_food
code
72077003/cell_29
[ "text_plain_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/brooklyn-food-waste/brooklyn.csv') df = df[df['label_date'].notna()] df = df[(df['diff'] > df['diff'].quantile(0.05)) & (df['diff'] < df['diff'].quantile(0.95))] df_labels = df.groupby('label_language').agg({'id': 'count', 'diff': 'mean'}) df_labels.columns = ['count', 'diff'] df.groupby('retailer_type').count().id.to_frame().sort_values(by='id', ascending=False) df.groupby('food_type')['id'].count().plot(kind='bar')
code
72077003/cell_26
[ "text_html_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/brooklyn-food-waste/brooklyn.csv') df = df[df['label_date'].notna()] df = df[(df['diff'] > df['diff'].quantile(0.05)) & (df['diff'] < df['diff'].quantile(0.95))] df_labels = df.groupby('label_language').agg({'id': 'count', 'diff': 'mean'}) df_labels.columns = ['count', 'diff'] df.groupby('retailer_type').count().id.to_frame().sort_values(by='id', ascending=False) df[df['retailer_type'] == 'health food grocer']['food_type'].value_counts()
code
72077003/cell_48
[ "text_plain_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/brooklyn-food-waste/brooklyn.csv') df = df[df['label_date'].notna()] df = df[(df['diff'] > df['diff'].quantile(0.05)) & (df['diff'] < df['diff'].quantile(0.95))] df_labels = df.groupby('label_language').agg({'id': 'count', 'diff': 'mean'}) df_labels.columns = ['count', 'diff'] df.groupby('retailer_type').count().id.to_frame().sort_values(by='id', ascending=False) df_food = df.groupby('food_detail').agg({'id': 'count', 'diff': 'mean'}).sort_values(by='id', ascending=False).head(10).reset_index() df_r = df.groupby(['retailer_type', 'food_detail'])['diff'].mean().to_frame().reset_index() df.groupby('organic')['diff'].sum() df.groupby('organic')['diff'].mean()
code
72077003/cell_41
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/brooklyn-food-waste/brooklyn.csv') df = df[df['label_date'].notna()] df = df[(df['diff'] > df['diff'].quantile(0.05)) & (df['diff'] < df['diff'].quantile(0.95))] df['diff'] = df['diff'].astype('str').apply(lambda x: x.split(' ')[0]).astype('int') df_labels = df.groupby('label_language').agg({'id': 'count', 'diff': 'mean'}) df_labels.columns = ['count', 'diff'] df.groupby('retailer_type').count().id.to_frame().sort_values(by='id', ascending=False) df_food = df.groupby('food_detail').agg({'id': 'count', 'diff': 'mean'}).sort_values(by='id', ascending=False).head(10).reset_index() df_r = df.groupby(['retailer_type', 'food_detail'])['diff'].mean().to_frame().reset_index() dup = df_r[df_r['food_detail'].duplicated()] df_duplicates = df[df['food_detail'].apply(lambda x: x in dup['food_detail'].to_list())] df_duplicates = df_duplicates.groupby(['retailer_type', 'food_detail']).agg({'diff': 'mean', 'id': 'count'}) df_duplicates.columns = ['diff', 'count'] df_duplicates.head()
code
72077003/cell_16
[ "text_html_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/brooklyn-food-waste/brooklyn.csv') df = df[df['label_date'].notna()] df = df[(df['diff'] > df['diff'].quantile(0.05)) & (df['diff'] < df['diff'].quantile(0.95))] df_labels = df.groupby('label_language').agg({'id': 'count', 'diff': 'mean'}) df_labels.columns = ['count', 'diff'] df_labels.nlargest(5, 'count')
code
72077003/cell_17
[ "text_html_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/brooklyn-food-waste/brooklyn.csv') df = df[df['label_date'].notna()] df = df[(df['diff'] > df['diff'].quantile(0.05)) & (df['diff'] < df['diff'].quantile(0.95))] df_labels = df.groupby('label_language').agg({'id': 'count', 'diff': 'mean'}) df_labels.columns = ['count', 'diff'] df_labels.nlargest(5, 'count') df_labels.nlargest(5, 'count').plot.bar(y='diff')
code
72077003/cell_35
[ "text_html_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/brooklyn-food-waste/brooklyn.csv') df = df[df['label_date'].notna()] df = df[(df['diff'] > df['diff'].quantile(0.05)) & (df['diff'] < df['diff'].quantile(0.95))] df_labels = df.groupby('label_language').agg({'id': 'count', 'diff': 'mean'}) df_labels.columns = ['count', 'diff'] df.groupby('retailer_type').count().id.to_frame().sort_values(by='id', ascending=False) df_food = df.groupby('food_detail').agg({'id': 'count', 'diff': 'mean'}).sort_values(by='id', ascending=False).head(10).reset_index() df_food.columns = ['food_detail', 'count', 'diff'] df_food.sort_values(by='diff').plot.bar(x='food_detail', y='diff')
code
72077003/cell_46
[ "text_html_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/brooklyn-food-waste/brooklyn.csv') df = df[df['label_date'].notna()] df = df[(df['diff'] > df['diff'].quantile(0.05)) & (df['diff'] < df['diff'].quantile(0.95))] df_labels = df.groupby('label_language').agg({'id': 'count', 'diff': 'mean'}) df_labels.columns = ['count', 'diff'] df.groupby('retailer_type').count().id.to_frame().sort_values(by='id', ascending=False) df_food = df.groupby('food_detail').agg({'id': 'count', 'diff': 'mean'}).sort_values(by='id', ascending=False).head(10).reset_index() df_r = df.groupby(['retailer_type', 'food_detail'])['diff'].mean().to_frame().reset_index() df.groupby('organic')['diff'].sum()
code
72077003/cell_24
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/brooklyn-food-waste/brooklyn.csv') df = df[df['label_date'].notna()] df = df[(df['diff'] > df['diff'].quantile(0.05)) & (df['diff'] < df['diff'].quantile(0.95))] df_labels = df.groupby('label_language').agg({'id': 'count', 'diff': 'mean'}) df_labels.columns = ['count', 'diff'] df.groupby('retailer_type').count().id.to_frame().sort_values(by='id', ascending=False)
code
72077003/cell_27
[ "text_plain_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/brooklyn-food-waste/brooklyn.csv') df = df[df['label_date'].notna()] df = df[(df['diff'] > df['diff'].quantile(0.05)) & (df['diff'] < df['diff'].quantile(0.95))] df_labels = df.groupby('label_language').agg({'id': 'count', 'diff': 'mean'}) df_labels.columns = ['count', 'diff'] df.groupby('retailer_type').count().id.to_frame().sort_values(by='id', ascending=False) df[df['retailer_type'] == 'drugstore']['food_type'].value_counts()
code
72077003/cell_5
[ "text_plain_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/brooklyn-food-waste/brooklyn.csv') df.head()
code