path
stringlengths
13
17
screenshot_names
sequencelengths
1
873
code
stringlengths
0
40.4k
cell_type
stringclasses
1 value
106210118/cell_8
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('../input/marketing-strategy-personalised-offer/sample.csv') df1 = pd.read_csv('../input/marketing-strategy-personalised-offer/train_data.csv') feature_list = df1.columns[:-1].values label = [df1.columns[-1]] print('Feature list:', feature_list) print('Label:', label)
code
106210118/cell_10
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('../input/marketing-strategy-personalised-offer/sample.csv') df1 = pd.read_csv('../input/marketing-strategy-personalised-offer/train_data.csv') feature_list = df1.columns[:-1].values label = [df1.columns[-1]] df1.info()
code
106210118/cell_5
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('../input/marketing-strategy-personalised-offer/sample.csv') data.head(10)
code
32071603/cell_4
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('/kaggle/input/associativeinference/Associative Inference.csv') data.columns
code
32071603/cell_20
[ "text_html_output_1.png" ]
from plotly.subplots import make_subplots import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import plotly.express as px import plotly.graph_objects as go data = pd.read_csv('/kaggle/input/associativeinference/Associative Inference.csv') data.columns age_group = data[["AgeGroup", "AB1-BC1"]] age_group.head() grouped = age_group.groupby('AgeGroup', as_index=False) summary = grouped.mean() summary temp = grouped.describe()["AB1-BC1"][["count", 'std']] temp summary["std_err"] = temp["std"] / np.sqrt(temp["count"]) summary = summary.reindex([1,0,3,2]) fig = px.bar(summary, x="AgeGroup", y="AB1-BC1", error_y="std_err", width=500, title="AB1-BC1") fig.show() age_group = data[["AgeGroup", 'AB_acc_1', 'AB_acc_2', 'AB_acc_3', 'AB_acc_4', 'BC_acc_1', 'BC_acc_2', 'BC_acc_3', 'BC_acc_4']] age_group.head() grouped = age_group.groupby('AgeGroup', as_index=False) summary = grouped.mean() summary col_names = ['AB_acc_1', 'AB_acc_2', 'AB_acc_3', 'AB_acc_4', 'BC_acc_1', 'BC_acc_2', 'BC_acc_3', 'BC_acc_4'] # add standard error for each accuracy for name in col_names: temp = grouped.describe()[name][["count", 'std']] summary["se_" + name[:2] + name[-1]] = temp["std"] / np.sqrt(temp["count"]) # get AB, BC values from AgeGroup child_AB = summary[summary["AgeGroup"] == "child"][['AB_acc_1', 'AB_acc_2', 'AB_acc_3', 'AB_acc_4']].values[0] child_BC = summary[summary["AgeGroup"] == "child"][['BC_acc_1', 'BC_acc_2', 'BC_acc_3', 'BC_acc_4']].values[0] ado_AB = summary[summary["AgeGroup"] == "adolescent"][['AB_acc_1', 'AB_acc_2', 'AB_acc_3', 'AB_acc_4']].values[0] ado_BC = summary[summary["AgeGroup"] == "adolescent"][['BC_acc_1', 'BC_acc_2', 'BC_acc_3', 'BC_acc_4']].values[0] y_adl_AB = summary[summary["AgeGroup"] == "younger adult"][['AB_acc_1', 'AB_acc_2', 'AB_acc_3', 'AB_acc_4']].values[0] y_adl_BC = summary[summary["AgeGroup"] == "younger adult"][['BC_acc_1', 'BC_acc_2', 'BC_acc_3', 'BC_acc_4']].values[0] o_adl_AB = summary[summary["AgeGroup"] == "older adult"][['AB_acc_1', 'AB_acc_2', 'AB_acc_3', 'AB_acc_4']].values[0] o_adl_BC = summary[summary["AgeGroup"] == "older adult"][['BC_acc_1', 'BC_acc_2', 'BC_acc_3', 'BC_acc_4']].values[0] # get standard error for AB, BC values from AgeGroup child_AB_se = summary[summary["AgeGroup"] == "child"][['se_AB1', 'se_AB2', 'se_AB3', 'se_AB4']].values[0] child_BC_se = summary[summary["AgeGroup"] == "child"][['se_BC1', 'se_BC2', 'se_BC3', 'se_BC4']].values[0] ado_AB_se = summary[summary["AgeGroup"] == "adolescent"][['se_AB1', 'se_AB2', 'se_AB3', 'se_AB4']].values[0] ado_BC_se = summary[summary["AgeGroup"] == "adolescent"][['se_BC1', 'se_BC2', 'se_BC3', 'se_BC4']].values[0] y_adl_AB_se = summary[summary["AgeGroup"] == "younger adult"][['se_AB1', 'se_AB2', 'se_AB3', 'se_AB4']].values[0] y_adl_BC_se = summary[summary["AgeGroup"] == "younger adult"][['se_BC1', 'se_BC2', 'se_BC3', 'se_BC4']].values[0] o_adl_AB_se = summary[summary["AgeGroup"] == "older adult"][['se_AB1', 'se_AB2', 'se_AB3', 'se_AB4']].values[0] o_adl_BC_se = summary[summary["AgeGroup"] == "older adult"][['se_BC1', 'se_BC2', 'se_BC3', 'se_BC4']].values[0] # make 2x2 subplots fig = make_subplots(rows=2, cols=2, subplot_titles=("Children", "Adolescent", "Younger Adults", "Older Adults")) fig.add_trace(go.Scatter(x=['1', '2', '3', '4'], y=child_AB, name="AB", line=dict(color='firebrick', width=2), error_y=dict(array=child_AB_se)), row=1, col=1) fig.add_trace(go.Scatter(x=['1', '2', '3', '4'], y=child_BC, name="BC", line=dict(color='royalblue', width=2), error_y=dict(array=child_BC_se)),row=1, col=1) fig.add_trace(go.Scatter(x=['1', '2', '3', '4'], y=ado_AB, name="AB", line=dict(color='firebrick', width=2), error_y=dict(array=ado_AB_se)),row=1, col=2) fig.add_trace(go.Scatter(x=['1', '2', '3', '4'], y=ado_BC, name="BC", line=dict(color='royalblue', width=2), error_y=dict(array=ado_BC_se)),row=1, col=2) fig.add_trace(go.Scatter(x=['1', '2', '3', '4'], y=y_adl_AB, name="AB", line=dict(color='firebrick', width=2), error_y=dict(array=y_adl_AB_se)),row=2, col=1) fig.add_trace(go.Scatter(x=['1', '2', '3', '4'], y=y_adl_BC, name="BC", line=dict(color='royalblue', width=2), error_y=dict(array=y_adl_BC_se)),row=2, col=1) fig.add_trace(go.Scatter(x=['1', '2', '3', '4'], y=o_adl_AB, name="AB", line=dict(color='firebrick', width=2), error_y=dict(array=o_adl_AB_se)),row=2, col=2) fig.add_trace(go.Scatter(x=['1', '2', '3', '4'], y=o_adl_BC, name="BC", line=dict(color='royalblue', width=2), error_y=dict(array=o_adl_BC_se)),row=2, col=2) fig.update_layout(width=850, height=600) fig.show() age_group = data[["AgeGroup", 'AB_rt_1', 'AB_rt_2', 'AB_rt_3', 'AB_rt_4', 'BC_rt_1', 'BC_rt_2', 'BC_rt_3', 'BC_rt_4']] age_group.head() grouped = age_group.groupby('AgeGroup', as_index=False) summary = grouped.mean() summary col_names = ['AB_rt_1', 'AB_rt_2', 'AB_rt_3', 'AB_rt_4', 'BC_rt_1', 'BC_rt_2', 'BC_rt_3', 'BC_rt_4'] # add standard error for each accuracy for name in col_names: temp = grouped.describe()[name][["count", 'std']] summary["se_" + name[:2] + name[-1]] = temp["std"] / np.sqrt(temp["count"]) # get AB, BC values from AgeGroup child_AB = summary[summary["AgeGroup"] == "child"][['AB_rt_1', 'AB_rt_2', 'AB_rt_3', 'AB_rt_4']].values[0] child_BC = summary[summary["AgeGroup"] == "child"][['BC_rt_1', 'BC_rt_2', 'BC_rt_3', 'BC_rt_4']].values[0] ado_AB = summary[summary["AgeGroup"] == "adolescent"][['AB_rt_1', 'AB_rt_2', 'AB_rt_3', 'AB_rt_4']].values[0] ado_BC = summary[summary["AgeGroup"] == "adolescent"][['BC_rt_1', 'BC_rt_2', 'BC_rt_3', 'BC_rt_4']].values[0] y_adl_AB = summary[summary["AgeGroup"] == "younger adult"][['AB_rt_1', 'AB_rt_2', 'AB_rt_3', 'AB_rt_4']].values[0] y_adl_BC = summary[summary["AgeGroup"] == "younger adult"][['BC_rt_1', 'BC_rt_2', 'BC_rt_3', 'BC_rt_4']].values[0] o_adl_AB = summary[summary["AgeGroup"] == "older adult"][['AB_rt_1', 'AB_rt_2', 'AB_rt_3', 'AB_rt_4']].values[0] o_adl_BC = summary[summary["AgeGroup"] == "older adult"][['BC_rt_1', 'BC_rt_2', 'BC_rt_3', 'BC_rt_4']].values[0] # get standard error for AB, BC values from AgeGroup child_AB_se = summary[summary["AgeGroup"] == "child"][['se_AB1', 'se_AB2', 'se_AB3', 'se_AB4']].values[0] child_BC_se = summary[summary["AgeGroup"] == "child"][['se_BC1', 'se_BC2', 'se_BC3', 'se_BC4']].values[0] ado_AB_se = summary[summary["AgeGroup"] == "adolescent"][['se_AB1', 'se_AB2', 'se_AB3', 'se_AB4']].values[0] ado_BC_se = summary[summary["AgeGroup"] == "adolescent"][['se_BC1', 'se_BC2', 'se_BC3', 'se_BC4']].values[0] y_adl_AB_se = summary[summary["AgeGroup"] == "younger adult"][['se_AB1', 'se_AB2', 'se_AB3', 'se_AB4']].values[0] y_adl_BC_se = summary[summary["AgeGroup"] == "younger adult"][['se_BC1', 'se_BC2', 'se_BC3', 'se_BC4']].values[0] o_adl_AB_se = summary[summary["AgeGroup"] == "older adult"][['se_AB1', 'se_AB2', 'se_AB3', 'se_AB4']].values[0] o_adl_BC_se = summary[summary["AgeGroup"] == "older adult"][['se_BC1', 'se_BC2', 'se_BC3', 'se_BC4']].values[0] # make 2x2 subplots fig = make_subplots(rows=2, cols=2, subplot_titles=("Children", "Adolescent", "Younger Adults", "Older Adults")) fig.add_trace(go.Scatter(x=['1', '2', '3', '4'], y=child_AB, name="AB", line=dict(color='firebrick', width=2), error_y=dict(array=child_AB_se)), row=1, col=1) fig.add_trace(go.Scatter(x=['1', '2', '3', '4'], y=child_BC, name="BC", line=dict(color='royalblue', width=2), error_y=dict(array=child_BC_se)),row=1, col=1) fig.add_trace(go.Scatter(x=['1', '2', '3', '4'], y=ado_AB, name="AB", line=dict(color='firebrick', width=2), error_y=dict(array=ado_AB_se)),row=1, col=2) fig.add_trace(go.Scatter(x=['1', '2', '3', '4'], y=ado_BC, name="BC", line=dict(color='royalblue', width=2), error_y=dict(array=ado_BC_se)),row=1, col=2) fig.add_trace(go.Scatter(x=['1', '2', '3', '4'], y=y_adl_AB, name="AB", line=dict(color='firebrick', width=2), error_y=dict(array=y_adl_AB_se)),row=2, col=1) fig.add_trace(go.Scatter(x=['1', '2', '3', '4'], y=y_adl_BC, name="BC", line=dict(color='royalblue', width=2), error_y=dict(array=y_adl_BC_se)),row=2, col=1) fig.add_trace(go.Scatter(x=['1', '2', '3', '4'], y=o_adl_AB, name="AB", line=dict(color='firebrick', width=2), error_y=dict(array=o_adl_AB_se)),row=2, col=2) fig.add_trace(go.Scatter(x=['1', '2', '3', '4'], y=o_adl_BC, name="BC", line=dict(color='royalblue', width=2), error_y=dict(array=o_adl_BC_se)),row=2, col=2) fig.update_layout(width=850, height=600) fig.show() data_new = data.copy() data_new['AB_acc_4_T'] = data['AB_acc_4'] == 1 data_new['AB_acc_4_T'] = data_new['AB_acc_4_T'].apply(lambda x: 'correct' if x else 'incorrect') fig = px.histogram(data_new, x='BC_acc_4', color='AB_acc_4_T', marginal='rug', barmode='overlay', width=700, title='BC_acc_4 distribution between AB_acc_4 = 1 or not 1') fig.show()
code
32071603/cell_6
[ "text_html_output_1.png", "text_plain_output_1.png" ]
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import plotly.express as px data = pd.read_csv('/kaggle/input/associativeinference/Associative Inference.csv') data.columns age_group = data[['AgeGroup', 'AB1-BC1']] age_group.head() grouped = age_group.groupby('AgeGroup', as_index=False) summary = grouped.mean() summary temp = grouped.describe()['AB1-BC1'][['count', 'std']] temp summary['std_err'] = temp['std'] / np.sqrt(temp['count']) summary = summary.reindex([1, 0, 3, 2]) fig = px.bar(summary, x='AgeGroup', y='AB1-BC1', error_y='std_err', width=500, title='AB1-BC1') fig.show()
code
32071603/cell_19
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('/kaggle/input/associativeinference/Associative Inference.csv') data.columns data_new = data.copy() data_new['AB_acc_4_T'] = data['AB_acc_4'] == 1 data_new['AB_acc_4_T'] = data_new['AB_acc_4_T'].apply(lambda x: 'correct' if x else 'incorrect') data_new[data_new['AB_acc_4_T'] == 'incorrect'][['BC_acc_4']].describe()
code
32071603/cell_1
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import plotly.express as px import plotly.graph_objects as go from plotly.subplots import make_subplots from scipy import stats import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
32071603/cell_8
[ "text_html_output_1.png" ]
from plotly.subplots import make_subplots import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import plotly.express as px import plotly.graph_objects as go data = pd.read_csv('/kaggle/input/associativeinference/Associative Inference.csv') data.columns age_group = data[["AgeGroup", "AB1-BC1"]] age_group.head() grouped = age_group.groupby('AgeGroup', as_index=False) summary = grouped.mean() summary temp = grouped.describe()["AB1-BC1"][["count", 'std']] temp summary["std_err"] = temp["std"] / np.sqrt(temp["count"]) summary = summary.reindex([1,0,3,2]) fig = px.bar(summary, x="AgeGroup", y="AB1-BC1", error_y="std_err", width=500, title="AB1-BC1") fig.show() age_group = data[['AgeGroup', 'AB_acc_1', 'AB_acc_2', 'AB_acc_3', 'AB_acc_4', 'BC_acc_1', 'BC_acc_2', 'BC_acc_3', 'BC_acc_4']] age_group.head() grouped = age_group.groupby('AgeGroup', as_index=False) summary = grouped.mean() summary col_names = ['AB_acc_1', 'AB_acc_2', 'AB_acc_3', 'AB_acc_4', 'BC_acc_1', 'BC_acc_2', 'BC_acc_3', 'BC_acc_4'] for name in col_names: temp = grouped.describe()[name][['count', 'std']] summary['se_' + name[:2] + name[-1]] = temp['std'] / np.sqrt(temp['count']) child_AB = summary[summary['AgeGroup'] == 'child'][['AB_acc_1', 'AB_acc_2', 'AB_acc_3', 'AB_acc_4']].values[0] child_BC = summary[summary['AgeGroup'] == 'child'][['BC_acc_1', 'BC_acc_2', 'BC_acc_3', 'BC_acc_4']].values[0] ado_AB = summary[summary['AgeGroup'] == 'adolescent'][['AB_acc_1', 'AB_acc_2', 'AB_acc_3', 'AB_acc_4']].values[0] ado_BC = summary[summary['AgeGroup'] == 'adolescent'][['BC_acc_1', 'BC_acc_2', 'BC_acc_3', 'BC_acc_4']].values[0] y_adl_AB = summary[summary['AgeGroup'] == 'younger adult'][['AB_acc_1', 'AB_acc_2', 'AB_acc_3', 'AB_acc_4']].values[0] y_adl_BC = summary[summary['AgeGroup'] == 'younger adult'][['BC_acc_1', 'BC_acc_2', 'BC_acc_3', 'BC_acc_4']].values[0] o_adl_AB = summary[summary['AgeGroup'] == 'older adult'][['AB_acc_1', 'AB_acc_2', 'AB_acc_3', 'AB_acc_4']].values[0] o_adl_BC = summary[summary['AgeGroup'] == 'older adult'][['BC_acc_1', 'BC_acc_2', 'BC_acc_3', 'BC_acc_4']].values[0] child_AB_se = summary[summary['AgeGroup'] == 'child'][['se_AB1', 'se_AB2', 'se_AB3', 'se_AB4']].values[0] child_BC_se = summary[summary['AgeGroup'] == 'child'][['se_BC1', 'se_BC2', 'se_BC3', 'se_BC4']].values[0] ado_AB_se = summary[summary['AgeGroup'] == 'adolescent'][['se_AB1', 'se_AB2', 'se_AB3', 'se_AB4']].values[0] ado_BC_se = summary[summary['AgeGroup'] == 'adolescent'][['se_BC1', 'se_BC2', 'se_BC3', 'se_BC4']].values[0] y_adl_AB_se = summary[summary['AgeGroup'] == 'younger adult'][['se_AB1', 'se_AB2', 'se_AB3', 'se_AB4']].values[0] y_adl_BC_se = summary[summary['AgeGroup'] == 'younger adult'][['se_BC1', 'se_BC2', 'se_BC3', 'se_BC4']].values[0] o_adl_AB_se = summary[summary['AgeGroup'] == 'older adult'][['se_AB1', 'se_AB2', 'se_AB3', 'se_AB4']].values[0] o_adl_BC_se = summary[summary['AgeGroup'] == 'older adult'][['se_BC1', 'se_BC2', 'se_BC3', 'se_BC4']].values[0] fig = make_subplots(rows=2, cols=2, subplot_titles=('Children', 'Adolescent', 'Younger Adults', 'Older Adults')) fig.add_trace(go.Scatter(x=['1', '2', '3', '4'], y=child_AB, name='AB', line=dict(color='firebrick', width=2), error_y=dict(array=child_AB_se)), row=1, col=1) fig.add_trace(go.Scatter(x=['1', '2', '3', '4'], y=child_BC, name='BC', line=dict(color='royalblue', width=2), error_y=dict(array=child_BC_se)), row=1, col=1) fig.add_trace(go.Scatter(x=['1', '2', '3', '4'], y=ado_AB, name='AB', line=dict(color='firebrick', width=2), error_y=dict(array=ado_AB_se)), row=1, col=2) fig.add_trace(go.Scatter(x=['1', '2', '3', '4'], y=ado_BC, name='BC', line=dict(color='royalblue', width=2), error_y=dict(array=ado_BC_se)), row=1, col=2) fig.add_trace(go.Scatter(x=['1', '2', '3', '4'], y=y_adl_AB, name='AB', line=dict(color='firebrick', width=2), error_y=dict(array=y_adl_AB_se)), row=2, col=1) fig.add_trace(go.Scatter(x=['1', '2', '3', '4'], y=y_adl_BC, name='BC', line=dict(color='royalblue', width=2), error_y=dict(array=y_adl_BC_se)), row=2, col=1) fig.add_trace(go.Scatter(x=['1', '2', '3', '4'], y=o_adl_AB, name='AB', line=dict(color='firebrick', width=2), error_y=dict(array=o_adl_AB_se)), row=2, col=2) fig.add_trace(go.Scatter(x=['1', '2', '3', '4'], y=o_adl_BC, name='BC', line=dict(color='royalblue', width=2), error_y=dict(array=o_adl_BC_se)), row=2, col=2) fig.update_layout(width=850, height=600) fig.show()
code
32071603/cell_15
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('/kaggle/input/associativeinference/Associative Inference.csv') data.columns data_new = data.copy() data_new['AB_acc_4_T'] = data['AB_acc_4'] == 1 data_new['AB_acc_4_T'] = data_new['AB_acc_4_T'].apply(lambda x: 'correct' if x else 'incorrect') data_new['AB_acc_4_T'].value_counts()
code
32071603/cell_3
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('/kaggle/input/associativeinference/Associative Inference.csv') data.head(5)
code
32071603/cell_17
[ "text_html_output_2.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('/kaggle/input/associativeinference/Associative Inference.csv') data.columns data_new = data.copy() data_new['AB_acc_4_T'] = data['AB_acc_4'] == 1 data_new['AB_acc_4_T'] = data_new['AB_acc_4_T'].apply(lambda x: 'correct' if x else 'incorrect') data_new[data_new['AB_acc_4_T'] == 'correct'][['BC_acc_4']].describe()
code
32071603/cell_22
[ "text_plain_output_1.png" ]
from plotly.subplots import make_subplots import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import plotly.express as px import plotly.graph_objects as go data = pd.read_csv('/kaggle/input/associativeinference/Associative Inference.csv') data.columns age_group = data[["AgeGroup", "AB1-BC1"]] age_group.head() grouped = age_group.groupby('AgeGroup', as_index=False) summary = grouped.mean() summary temp = grouped.describe()["AB1-BC1"][["count", 'std']] temp summary["std_err"] = temp["std"] / np.sqrt(temp["count"]) summary = summary.reindex([1,0,3,2]) fig = px.bar(summary, x="AgeGroup", y="AB1-BC1", error_y="std_err", width=500, title="AB1-BC1") fig.show() age_group = data[["AgeGroup", 'AB_acc_1', 'AB_acc_2', 'AB_acc_3', 'AB_acc_4', 'BC_acc_1', 'BC_acc_2', 'BC_acc_3', 'BC_acc_4']] age_group.head() grouped = age_group.groupby('AgeGroup', as_index=False) summary = grouped.mean() summary col_names = ['AB_acc_1', 'AB_acc_2', 'AB_acc_3', 'AB_acc_4', 'BC_acc_1', 'BC_acc_2', 'BC_acc_3', 'BC_acc_4'] # add standard error for each accuracy for name in col_names: temp = grouped.describe()[name][["count", 'std']] summary["se_" + name[:2] + name[-1]] = temp["std"] / np.sqrt(temp["count"]) # get AB, BC values from AgeGroup child_AB = summary[summary["AgeGroup"] == "child"][['AB_acc_1', 'AB_acc_2', 'AB_acc_3', 'AB_acc_4']].values[0] child_BC = summary[summary["AgeGroup"] == "child"][['BC_acc_1', 'BC_acc_2', 'BC_acc_3', 'BC_acc_4']].values[0] ado_AB = summary[summary["AgeGroup"] == "adolescent"][['AB_acc_1', 'AB_acc_2', 'AB_acc_3', 'AB_acc_4']].values[0] ado_BC = summary[summary["AgeGroup"] == "adolescent"][['BC_acc_1', 'BC_acc_2', 'BC_acc_3', 'BC_acc_4']].values[0] y_adl_AB = summary[summary["AgeGroup"] == "younger adult"][['AB_acc_1', 'AB_acc_2', 'AB_acc_3', 'AB_acc_4']].values[0] y_adl_BC = summary[summary["AgeGroup"] == "younger adult"][['BC_acc_1', 'BC_acc_2', 'BC_acc_3', 'BC_acc_4']].values[0] o_adl_AB = summary[summary["AgeGroup"] == "older adult"][['AB_acc_1', 'AB_acc_2', 'AB_acc_3', 'AB_acc_4']].values[0] o_adl_BC = summary[summary["AgeGroup"] == "older adult"][['BC_acc_1', 'BC_acc_2', 'BC_acc_3', 'BC_acc_4']].values[0] # get standard error for AB, BC values from AgeGroup child_AB_se = summary[summary["AgeGroup"] == "child"][['se_AB1', 'se_AB2', 'se_AB3', 'se_AB4']].values[0] child_BC_se = summary[summary["AgeGroup"] == "child"][['se_BC1', 'se_BC2', 'se_BC3', 'se_BC4']].values[0] ado_AB_se = summary[summary["AgeGroup"] == "adolescent"][['se_AB1', 'se_AB2', 'se_AB3', 'se_AB4']].values[0] ado_BC_se = summary[summary["AgeGroup"] == "adolescent"][['se_BC1', 'se_BC2', 'se_BC3', 'se_BC4']].values[0] y_adl_AB_se = summary[summary["AgeGroup"] == "younger adult"][['se_AB1', 'se_AB2', 'se_AB3', 'se_AB4']].values[0] y_adl_BC_se = summary[summary["AgeGroup"] == "younger adult"][['se_BC1', 'se_BC2', 'se_BC3', 'se_BC4']].values[0] o_adl_AB_se = summary[summary["AgeGroup"] == "older adult"][['se_AB1', 'se_AB2', 'se_AB3', 'se_AB4']].values[0] o_adl_BC_se = summary[summary["AgeGroup"] == "older adult"][['se_BC1', 'se_BC2', 'se_BC3', 'se_BC4']].values[0] # make 2x2 subplots fig = make_subplots(rows=2, cols=2, subplot_titles=("Children", "Adolescent", "Younger Adults", "Older Adults")) fig.add_trace(go.Scatter(x=['1', '2', '3', '4'], y=child_AB, name="AB", line=dict(color='firebrick', width=2), error_y=dict(array=child_AB_se)), row=1, col=1) fig.add_trace(go.Scatter(x=['1', '2', '3', '4'], y=child_BC, name="BC", line=dict(color='royalblue', width=2), error_y=dict(array=child_BC_se)),row=1, col=1) fig.add_trace(go.Scatter(x=['1', '2', '3', '4'], y=ado_AB, name="AB", line=dict(color='firebrick', width=2), error_y=dict(array=ado_AB_se)),row=1, col=2) fig.add_trace(go.Scatter(x=['1', '2', '3', '4'], y=ado_BC, name="BC", line=dict(color='royalblue', width=2), error_y=dict(array=ado_BC_se)),row=1, col=2) fig.add_trace(go.Scatter(x=['1', '2', '3', '4'], y=y_adl_AB, name="AB", line=dict(color='firebrick', width=2), error_y=dict(array=y_adl_AB_se)),row=2, col=1) fig.add_trace(go.Scatter(x=['1', '2', '3', '4'], y=y_adl_BC, name="BC", line=dict(color='royalblue', width=2), error_y=dict(array=y_adl_BC_se)),row=2, col=1) fig.add_trace(go.Scatter(x=['1', '2', '3', '4'], y=o_adl_AB, name="AB", line=dict(color='firebrick', width=2), error_y=dict(array=o_adl_AB_se)),row=2, col=2) fig.add_trace(go.Scatter(x=['1', '2', '3', '4'], y=o_adl_BC, name="BC", line=dict(color='royalblue', width=2), error_y=dict(array=o_adl_BC_se)),row=2, col=2) fig.update_layout(width=850, height=600) fig.show() age_group = data[["AgeGroup", 'AB_rt_1', 'AB_rt_2', 'AB_rt_3', 'AB_rt_4', 'BC_rt_1', 'BC_rt_2', 'BC_rt_3', 'BC_rt_4']] age_group.head() grouped = age_group.groupby('AgeGroup', as_index=False) summary = grouped.mean() summary col_names = ['AB_rt_1', 'AB_rt_2', 'AB_rt_3', 'AB_rt_4', 'BC_rt_1', 'BC_rt_2', 'BC_rt_3', 'BC_rt_4'] # add standard error for each accuracy for name in col_names: temp = grouped.describe()[name][["count", 'std']] summary["se_" + name[:2] + name[-1]] = temp["std"] / np.sqrt(temp["count"]) # get AB, BC values from AgeGroup child_AB = summary[summary["AgeGroup"] == "child"][['AB_rt_1', 'AB_rt_2', 'AB_rt_3', 'AB_rt_4']].values[0] child_BC = summary[summary["AgeGroup"] == "child"][['BC_rt_1', 'BC_rt_2', 'BC_rt_3', 'BC_rt_4']].values[0] ado_AB = summary[summary["AgeGroup"] == "adolescent"][['AB_rt_1', 'AB_rt_2', 'AB_rt_3', 'AB_rt_4']].values[0] ado_BC = summary[summary["AgeGroup"] == "adolescent"][['BC_rt_1', 'BC_rt_2', 'BC_rt_3', 'BC_rt_4']].values[0] y_adl_AB = summary[summary["AgeGroup"] == "younger adult"][['AB_rt_1', 'AB_rt_2', 'AB_rt_3', 'AB_rt_4']].values[0] y_adl_BC = summary[summary["AgeGroup"] == "younger adult"][['BC_rt_1', 'BC_rt_2', 'BC_rt_3', 'BC_rt_4']].values[0] o_adl_AB = summary[summary["AgeGroup"] == "older adult"][['AB_rt_1', 'AB_rt_2', 'AB_rt_3', 'AB_rt_4']].values[0] o_adl_BC = summary[summary["AgeGroup"] == "older adult"][['BC_rt_1', 'BC_rt_2', 'BC_rt_3', 'BC_rt_4']].values[0] # get standard error for AB, BC values from AgeGroup child_AB_se = summary[summary["AgeGroup"] == "child"][['se_AB1', 'se_AB2', 'se_AB3', 'se_AB4']].values[0] child_BC_se = summary[summary["AgeGroup"] == "child"][['se_BC1', 'se_BC2', 'se_BC3', 'se_BC4']].values[0] ado_AB_se = summary[summary["AgeGroup"] == "adolescent"][['se_AB1', 'se_AB2', 'se_AB3', 'se_AB4']].values[0] ado_BC_se = summary[summary["AgeGroup"] == "adolescent"][['se_BC1', 'se_BC2', 'se_BC3', 'se_BC4']].values[0] y_adl_AB_se = summary[summary["AgeGroup"] == "younger adult"][['se_AB1', 'se_AB2', 'se_AB3', 'se_AB4']].values[0] y_adl_BC_se = summary[summary["AgeGroup"] == "younger adult"][['se_BC1', 'se_BC2', 'se_BC3', 'se_BC4']].values[0] o_adl_AB_se = summary[summary["AgeGroup"] == "older adult"][['se_AB1', 'se_AB2', 'se_AB3', 'se_AB4']].values[0] o_adl_BC_se = summary[summary["AgeGroup"] == "older adult"][['se_BC1', 'se_BC2', 'se_BC3', 'se_BC4']].values[0] # make 2x2 subplots fig = make_subplots(rows=2, cols=2, subplot_titles=("Children", "Adolescent", "Younger Adults", "Older Adults")) fig.add_trace(go.Scatter(x=['1', '2', '3', '4'], y=child_AB, name="AB", line=dict(color='firebrick', width=2), error_y=dict(array=child_AB_se)), row=1, col=1) fig.add_trace(go.Scatter(x=['1', '2', '3', '4'], y=child_BC, name="BC", line=dict(color='royalblue', width=2), error_y=dict(array=child_BC_se)),row=1, col=1) fig.add_trace(go.Scatter(x=['1', '2', '3', '4'], y=ado_AB, name="AB", line=dict(color='firebrick', width=2), error_y=dict(array=ado_AB_se)),row=1, col=2) fig.add_trace(go.Scatter(x=['1', '2', '3', '4'], y=ado_BC, name="BC", line=dict(color='royalblue', width=2), error_y=dict(array=ado_BC_se)),row=1, col=2) fig.add_trace(go.Scatter(x=['1', '2', '3', '4'], y=y_adl_AB, name="AB", line=dict(color='firebrick', width=2), error_y=dict(array=y_adl_AB_se)),row=2, col=1) fig.add_trace(go.Scatter(x=['1', '2', '3', '4'], y=y_adl_BC, name="BC", line=dict(color='royalblue', width=2), error_y=dict(array=y_adl_BC_se)),row=2, col=1) fig.add_trace(go.Scatter(x=['1', '2', '3', '4'], y=o_adl_AB, name="AB", line=dict(color='firebrick', width=2), error_y=dict(array=o_adl_AB_se)),row=2, col=2) fig.add_trace(go.Scatter(x=['1', '2', '3', '4'], y=o_adl_BC, name="BC", line=dict(color='royalblue', width=2), error_y=dict(array=o_adl_BC_se)),row=2, col=2) fig.update_layout(width=850, height=600) fig.show() data_new = data.copy() data_new['AB_acc_4_T'] = data['AB_acc_4'] == 1 data_new['AB_acc_4_T'] = data_new['AB_acc_4_T'].apply(lambda x: 'correct' if x else 'incorrect') fig = px.histogram(data_new, x="BC_acc_4", color="AB_acc_4_T", marginal="rug", barmode="overlay", width=700, title="BC_acc_4 distribution between AB_acc_4 = 1 or not 1") fig.show() data_new = data.copy() data_new['BC1-AC'] = data_new['BC_acc_1'] - data_new['AC_acc'] age_group = data_new[['AgeGroup', 'BC1-AC']] age_group.head() grouped = age_group.groupby('AgeGroup', as_index=False) summary = grouped.mean() temp = grouped.describe()['BC1-AC'][['count', 'std']] temp summary['std_err'] = temp['std'] / np.sqrt(temp['count']) summary = summary.reindex([1, 0, 3, 2]) print(summary) fig = px.bar(summary, x='AgeGroup', y='BC1-AC', error_y='std_err', width=500, title='(BC rep1 - AC rep1) x Age') fig.show()
code
32071603/cell_10
[ "text_html_output_1.png" ]
from plotly.subplots import make_subplots import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import plotly.express as px import plotly.graph_objects as go data = pd.read_csv('/kaggle/input/associativeinference/Associative Inference.csv') data.columns age_group = data[["AgeGroup", "AB1-BC1"]] age_group.head() grouped = age_group.groupby('AgeGroup', as_index=False) summary = grouped.mean() summary temp = grouped.describe()["AB1-BC1"][["count", 'std']] temp summary["std_err"] = temp["std"] / np.sqrt(temp["count"]) summary = summary.reindex([1,0,3,2]) fig = px.bar(summary, x="AgeGroup", y="AB1-BC1", error_y="std_err", width=500, title="AB1-BC1") fig.show() age_group = data[["AgeGroup", 'AB_acc_1', 'AB_acc_2', 'AB_acc_3', 'AB_acc_4', 'BC_acc_1', 'BC_acc_2', 'BC_acc_3', 'BC_acc_4']] age_group.head() grouped = age_group.groupby('AgeGroup', as_index=False) summary = grouped.mean() summary col_names = ['AB_acc_1', 'AB_acc_2', 'AB_acc_3', 'AB_acc_4', 'BC_acc_1', 'BC_acc_2', 'BC_acc_3', 'BC_acc_4'] # add standard error for each accuracy for name in col_names: temp = grouped.describe()[name][["count", 'std']] summary["se_" + name[:2] + name[-1]] = temp["std"] / np.sqrt(temp["count"]) # get AB, BC values from AgeGroup child_AB = summary[summary["AgeGroup"] == "child"][['AB_acc_1', 'AB_acc_2', 'AB_acc_3', 'AB_acc_4']].values[0] child_BC = summary[summary["AgeGroup"] == "child"][['BC_acc_1', 'BC_acc_2', 'BC_acc_3', 'BC_acc_4']].values[0] ado_AB = summary[summary["AgeGroup"] == "adolescent"][['AB_acc_1', 'AB_acc_2', 'AB_acc_3', 'AB_acc_4']].values[0] ado_BC = summary[summary["AgeGroup"] == "adolescent"][['BC_acc_1', 'BC_acc_2', 'BC_acc_3', 'BC_acc_4']].values[0] y_adl_AB = summary[summary["AgeGroup"] == "younger adult"][['AB_acc_1', 'AB_acc_2', 'AB_acc_3', 'AB_acc_4']].values[0] y_adl_BC = summary[summary["AgeGroup"] == "younger adult"][['BC_acc_1', 'BC_acc_2', 'BC_acc_3', 'BC_acc_4']].values[0] o_adl_AB = summary[summary["AgeGroup"] == "older adult"][['AB_acc_1', 'AB_acc_2', 'AB_acc_3', 'AB_acc_4']].values[0] o_adl_BC = summary[summary["AgeGroup"] == "older adult"][['BC_acc_1', 'BC_acc_2', 'BC_acc_3', 'BC_acc_4']].values[0] # get standard error for AB, BC values from AgeGroup child_AB_se = summary[summary["AgeGroup"] == "child"][['se_AB1', 'se_AB2', 'se_AB3', 'se_AB4']].values[0] child_BC_se = summary[summary["AgeGroup"] == "child"][['se_BC1', 'se_BC2', 'se_BC3', 'se_BC4']].values[0] ado_AB_se = summary[summary["AgeGroup"] == "adolescent"][['se_AB1', 'se_AB2', 'se_AB3', 'se_AB4']].values[0] ado_BC_se = summary[summary["AgeGroup"] == "adolescent"][['se_BC1', 'se_BC2', 'se_BC3', 'se_BC4']].values[0] y_adl_AB_se = summary[summary["AgeGroup"] == "younger adult"][['se_AB1', 'se_AB2', 'se_AB3', 'se_AB4']].values[0] y_adl_BC_se = summary[summary["AgeGroup"] == "younger adult"][['se_BC1', 'se_BC2', 'se_BC3', 'se_BC4']].values[0] o_adl_AB_se = summary[summary["AgeGroup"] == "older adult"][['se_AB1', 'se_AB2', 'se_AB3', 'se_AB4']].values[0] o_adl_BC_se = summary[summary["AgeGroup"] == "older adult"][['se_BC1', 'se_BC2', 'se_BC3', 'se_BC4']].values[0] # make 2x2 subplots fig = make_subplots(rows=2, cols=2, subplot_titles=("Children", "Adolescent", "Younger Adults", "Older Adults")) fig.add_trace(go.Scatter(x=['1', '2', '3', '4'], y=child_AB, name="AB", line=dict(color='firebrick', width=2), error_y=dict(array=child_AB_se)), row=1, col=1) fig.add_trace(go.Scatter(x=['1', '2', '3', '4'], y=child_BC, name="BC", line=dict(color='royalblue', width=2), error_y=dict(array=child_BC_se)),row=1, col=1) fig.add_trace(go.Scatter(x=['1', '2', '3', '4'], y=ado_AB, name="AB", line=dict(color='firebrick', width=2), error_y=dict(array=ado_AB_se)),row=1, col=2) fig.add_trace(go.Scatter(x=['1', '2', '3', '4'], y=ado_BC, name="BC", line=dict(color='royalblue', width=2), error_y=dict(array=ado_BC_se)),row=1, col=2) fig.add_trace(go.Scatter(x=['1', '2', '3', '4'], y=y_adl_AB, name="AB", line=dict(color='firebrick', width=2), error_y=dict(array=y_adl_AB_se)),row=2, col=1) fig.add_trace(go.Scatter(x=['1', '2', '3', '4'], y=y_adl_BC, name="BC", line=dict(color='royalblue', width=2), error_y=dict(array=y_adl_BC_se)),row=2, col=1) fig.add_trace(go.Scatter(x=['1', '2', '3', '4'], y=o_adl_AB, name="AB", line=dict(color='firebrick', width=2), error_y=dict(array=o_adl_AB_se)),row=2, col=2) fig.add_trace(go.Scatter(x=['1', '2', '3', '4'], y=o_adl_BC, name="BC", line=dict(color='royalblue', width=2), error_y=dict(array=o_adl_BC_se)),row=2, col=2) fig.update_layout(width=850, height=600) fig.show() age_group = data[['AgeGroup', 'AB_rt_1', 'AB_rt_2', 'AB_rt_3', 'AB_rt_4', 'BC_rt_1', 'BC_rt_2', 'BC_rt_3', 'BC_rt_4']] age_group.head() grouped = age_group.groupby('AgeGroup', as_index=False) summary = grouped.mean() summary col_names = ['AB_rt_1', 'AB_rt_2', 'AB_rt_3', 'AB_rt_4', 'BC_rt_1', 'BC_rt_2', 'BC_rt_3', 'BC_rt_4'] for name in col_names: temp = grouped.describe()[name][['count', 'std']] summary['se_' + name[:2] + name[-1]] = temp['std'] / np.sqrt(temp['count']) child_AB = summary[summary['AgeGroup'] == 'child'][['AB_rt_1', 'AB_rt_2', 'AB_rt_3', 'AB_rt_4']].values[0] child_BC = summary[summary['AgeGroup'] == 'child'][['BC_rt_1', 'BC_rt_2', 'BC_rt_3', 'BC_rt_4']].values[0] ado_AB = summary[summary['AgeGroup'] == 'adolescent'][['AB_rt_1', 'AB_rt_2', 'AB_rt_3', 'AB_rt_4']].values[0] ado_BC = summary[summary['AgeGroup'] == 'adolescent'][['BC_rt_1', 'BC_rt_2', 'BC_rt_3', 'BC_rt_4']].values[0] y_adl_AB = summary[summary['AgeGroup'] == 'younger adult'][['AB_rt_1', 'AB_rt_2', 'AB_rt_3', 'AB_rt_4']].values[0] y_adl_BC = summary[summary['AgeGroup'] == 'younger adult'][['BC_rt_1', 'BC_rt_2', 'BC_rt_3', 'BC_rt_4']].values[0] o_adl_AB = summary[summary['AgeGroup'] == 'older adult'][['AB_rt_1', 'AB_rt_2', 'AB_rt_3', 'AB_rt_4']].values[0] o_adl_BC = summary[summary['AgeGroup'] == 'older adult'][['BC_rt_1', 'BC_rt_2', 'BC_rt_3', 'BC_rt_4']].values[0] child_AB_se = summary[summary['AgeGroup'] == 'child'][['se_AB1', 'se_AB2', 'se_AB3', 'se_AB4']].values[0] child_BC_se = summary[summary['AgeGroup'] == 'child'][['se_BC1', 'se_BC2', 'se_BC3', 'se_BC4']].values[0] ado_AB_se = summary[summary['AgeGroup'] == 'adolescent'][['se_AB1', 'se_AB2', 'se_AB3', 'se_AB4']].values[0] ado_BC_se = summary[summary['AgeGroup'] == 'adolescent'][['se_BC1', 'se_BC2', 'se_BC3', 'se_BC4']].values[0] y_adl_AB_se = summary[summary['AgeGroup'] == 'younger adult'][['se_AB1', 'se_AB2', 'se_AB3', 'se_AB4']].values[0] y_adl_BC_se = summary[summary['AgeGroup'] == 'younger adult'][['se_BC1', 'se_BC2', 'se_BC3', 'se_BC4']].values[0] o_adl_AB_se = summary[summary['AgeGroup'] == 'older adult'][['se_AB1', 'se_AB2', 'se_AB3', 'se_AB4']].values[0] o_adl_BC_se = summary[summary['AgeGroup'] == 'older adult'][['se_BC1', 'se_BC2', 'se_BC3', 'se_BC4']].values[0] fig = make_subplots(rows=2, cols=2, subplot_titles=('Children', 'Adolescent', 'Younger Adults', 'Older Adults')) fig.add_trace(go.Scatter(x=['1', '2', '3', '4'], y=child_AB, name='AB', line=dict(color='firebrick', width=2), error_y=dict(array=child_AB_se)), row=1, col=1) fig.add_trace(go.Scatter(x=['1', '2', '3', '4'], y=child_BC, name='BC', line=dict(color='royalblue', width=2), error_y=dict(array=child_BC_se)), row=1, col=1) fig.add_trace(go.Scatter(x=['1', '2', '3', '4'], y=ado_AB, name='AB', line=dict(color='firebrick', width=2), error_y=dict(array=ado_AB_se)), row=1, col=2) fig.add_trace(go.Scatter(x=['1', '2', '3', '4'], y=ado_BC, name='BC', line=dict(color='royalblue', width=2), error_y=dict(array=ado_BC_se)), row=1, col=2) fig.add_trace(go.Scatter(x=['1', '2', '3', '4'], y=y_adl_AB, name='AB', line=dict(color='firebrick', width=2), error_y=dict(array=y_adl_AB_se)), row=2, col=1) fig.add_trace(go.Scatter(x=['1', '2', '3', '4'], y=y_adl_BC, name='BC', line=dict(color='royalblue', width=2), error_y=dict(array=y_adl_BC_se)), row=2, col=1) fig.add_trace(go.Scatter(x=['1', '2', '3', '4'], y=o_adl_AB, name='AB', line=dict(color='firebrick', width=2), error_y=dict(array=o_adl_AB_se)), row=2, col=2) fig.add_trace(go.Scatter(x=['1', '2', '3', '4'], y=o_adl_BC, name='BC', line=dict(color='royalblue', width=2), error_y=dict(array=o_adl_BC_se)), row=2, col=2) fig.update_layout(width=850, height=600) fig.show()
code
32068445/cell_9
[ "text_plain_output_1.png", "image_output_1.png" ]
import matplotlib.pyplot as plt import os import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns import numpy as np import pandas as pd import seaborn as sns import matplotlib.patches as mpatches import matplotlib.pyplot as plt from datetime import datetime, timedelta, date from statistics import mean import copy from keras.models import Sequential from keras.layers import LSTM, Dropout, Dense, Activation from sklearn.preprocessing import StandardScaler sns.set(rc={'figure.figsize': (11, 8)}) pd.set_option('display.max_rows', 20) pd.set_option('display.max_columns', 20) pd.set_option('display.width', 1000) plt.rcParams['axes.grid'] = False import os paths = [] for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: paths.append(os.path.join(dirname, filename)) lotteryPath = paths[0] lottery = pd.read_csv(lotteryPath, encoding='latin-1') lottery all_balls = {} for i in range(1, 7): ball_ser = lottery['Ball ' + str(i)].value_counts() for key in ball_ser.keys(): all_balls[key] = all_balls.get(key, 0) + ball_ser[key] ball_ser = lottery['Extra Ball'].value_counts() for key in ball_ser.keys(): all_balls[key] = all_balls.get(key, 0) + ball_ser[key] all_balls = pd.Series(all_balls) plt.xticks(rotation=0) f, axes = plt.subplots(7, 1) f.tight_layout() for i in range(1, 7): ball_dist = lottery['Ball ' + str(i)].value_counts().sort_index() axes[i - 1].set_title('Distribution of ball ' + str(i)) plt.xticks(rotation=90) sns.barplot(x=ball_dist.keys(), y=ball_dist.values, ax=axes[i - 1], palette='PuBuGn_d') ball_dist = lottery['Extra Ball'].value_counts().sort_index() axes[6].set_title('Distribution of extra ball') plt.xticks(rotation=90) sns.barplot(x=ball_dist.keys(), y=ball_dist.values, ax=axes[6], palette='PuBuGn_d')
code
32068445/cell_4
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import os import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns import numpy as np import pandas as pd import seaborn as sns import matplotlib.patches as mpatches import matplotlib.pyplot as plt from datetime import datetime, timedelta, date from statistics import mean import copy from keras.models import Sequential from keras.layers import LSTM, Dropout, Dense, Activation from sklearn.preprocessing import StandardScaler sns.set(rc={'figure.figsize': (11, 8)}) pd.set_option('display.max_rows', 20) pd.set_option('display.max_columns', 20) pd.set_option('display.width', 1000) plt.rcParams['axes.grid'] = False import os paths = [] for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: paths.append(os.path.join(dirname, filename)) lotteryPath = paths[0] lottery = pd.read_csv(lotteryPath, encoding='latin-1') lottery
code
32068445/cell_6
[ "text_plain_output_1.png", "image_output_1.png" ]
import matplotlib.pyplot as plt import os import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns import numpy as np import pandas as pd import seaborn as sns import matplotlib.patches as mpatches import matplotlib.pyplot as plt from datetime import datetime, timedelta, date from statistics import mean import copy from keras.models import Sequential from keras.layers import LSTM, Dropout, Dense, Activation from sklearn.preprocessing import StandardScaler sns.set(rc={'figure.figsize': (11, 8)}) pd.set_option('display.max_rows', 20) pd.set_option('display.max_columns', 20) pd.set_option('display.width', 1000) plt.rcParams['axes.grid'] = False import os paths = [] for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: paths.append(os.path.join(dirname, filename)) lotteryPath = paths[0] lottery = pd.read_csv(lotteryPath, encoding='latin-1') lottery all_balls = {} for i in range(1, 7): ball_ser = lottery['Ball ' + str(i)].value_counts() for key in ball_ser.keys(): all_balls[key] = all_balls.get(key, 0) + ball_ser[key] ball_ser = lottery['Extra Ball'].value_counts() for key in ball_ser.keys(): all_balls[key] = all_balls.get(key, 0) + ball_ser[key] all_balls = pd.Series(all_balls) plt.title('Distribution of all balls') plt.xticks(rotation=0) sns.barplot(x=all_balls.keys(), y=all_balls.values, palette='OrRd')
code
32068445/cell_29
[ "text_plain_output_1.png", "image_output_1.png" ]
from datetime import datetime, timedelta, date import matplotlib.pyplot as plt import os import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns import numpy as np import pandas as pd import seaborn as sns import matplotlib.patches as mpatches import matplotlib.pyplot as plt from datetime import datetime, timedelta, date from statistics import mean import copy from keras.models import Sequential from keras.layers import LSTM, Dropout, Dense, Activation from sklearn.preprocessing import StandardScaler sns.set(rc={'figure.figsize': (11, 8)}) pd.set_option('display.max_rows', 20) pd.set_option('display.max_columns', 20) pd.set_option('display.width', 1000) plt.rcParams['axes.grid'] = False import os paths = [] for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: paths.append(os.path.join(dirname, filename)) lotteryPath = paths[0] lottery = pd.read_csv(lotteryPath, encoding='latin-1') lottery all_balls = {} for i in range(1, 7): ball_ser = lottery['Ball ' + str(i)].value_counts() for key in ball_ser.keys(): all_balls[key] = all_balls.get(key, 0) + ball_ser[key] ball_ser = lottery['Extra Ball'].value_counts() for key in ball_ser.keys(): all_balls[key] = all_balls.get(key, 0) + ball_ser[key] all_balls = pd.Series(all_balls) plt.xticks(rotation=0) # Visualize the distributions of each ball f, axes = plt.subplots(7, 1) f.tight_layout() for i in range(1,7): ball_dist = lottery['Ball ' +str(i)].value_counts().sort_index() axes[i-1].set_title('Distribution of ball '+str(i)) plt.xticks(rotation=90) sns.barplot(x=ball_dist.keys(), y=ball_dist.values, ax=axes[i-1], palette="PuBuGn_d") ball_dist = lottery['Extra Ball'].value_counts().sort_index() axes[6].set_title('Distribution of extra ball') plt.xticks(rotation=90) sns.barplot(x=ball_dist.keys(), y=ball_dist.values, ax=axes[6], palette="PuBuGn_d") # Correlation matrix def plotCorrelationMatrix(df, graphWidth): #filename = df.dataframeName df = df.dropna('columns') # drop columns with NaN del df['Draw Number'] df = df[[col for col in df if df[col].nunique() > 1]] # keep columns where there are more than 1 unique values if df.shape[1] < 2: print(f'No correlation plots shown: The number of non-NaN or constant columns ({df.shape[1]}) is less than 2') return corr = df.corr('pearson') plt.figure(num=None, figsize=(graphWidth, graphWidth), dpi=80, facecolor='w', edgecolor='k') corrMat = plt.matshow(corr, fignum = 1,cmap = "BuGn") plt.xticks(range(len(corr.columns)), corr.columns, rotation=0) plt.yticks(range(len(corr.columns)), corr.columns) plt.gca().xaxis.tick_bottom() plt.colorbar(corrMat) plt.title(f'Correlation Matrix', fontsize=15) plt.show() plotCorrelationMatrix(lottery, 8) def getDate(strDate): return datetime.strptime(strDate, '%Y-%m-%d').date() allMonthsData = [] class MonthData: def __init__(self, date, ballsDict): self.date = date self.ballsDict = ballsDict def generateStatsForDraws(draws, drawDate): if draws.empty == False: currentBalls = {} del draws['Date'] del draws['Draw Number'] del draws['Jackpot'] balls_list = draws.values.T.tolist() balls_flat_list = [item for sublist in balls_list for item in sublist] for i in range(1, 43): currentBalls[i] = balls_flat_list.count(i) data = MonthData(drawDate, currentBalls) allMonthsData.append(data) def plotBallsInMonths(index): all_balls = pd.Series(allMonthsData[index].ballsDict) plt.xticks(rotation=0) ball_month = pd.DataFrame() initDate = getDate(lottery['Date'][0]) currentMonth = initDate.month currentYear = initDate.year def getOccurencesPerMonth(): global ball_month global currentMonth global currentYear for index, draw in lottery.iterrows(): drawDate = getDate(draw['Date']) if drawDate.month == currentMonth and drawDate.year == currentYear: ball_month = ball_month.append(draw) else: generateStatsForDraws(ball_month, drawDate) ball_month = pd.DataFrame() currentMonth = currentMonth % 12 + 1 if currentYear != drawDate.year: currentYear = drawDate.year ball_dataset = pd.DataFrame(columns=['Year', 'Ball Number', 'Occurences']) ball_dataset['Year'] = pd.to_numeric(ball_dataset['Year']) ball_dataset['Ball Number'] = pd.to_numeric(ball_dataset['Ball Number']) ball_dataset['Occurences'] = pd.to_numeric(ball_dataset['Occurences']) def generateYearStatsForDraws(draws, drawDate): global ball_dataset if draws.empty == False: currentBalls = {} del draws['Date'] balls_list = draws.values.T.tolist() balls_flat_list = [item for sublist in balls_list for item in sublist] for i in range(1, 43): currentBalls['Year'] = int(drawDate.year) currentBalls['Ball Number'] = int(i) currentBalls['Occurences'] = int(balls_flat_list.count(i)) ball_at_year = pd.Series(currentBalls) currentBalls = {} ball_dataset = ball_dataset.append(ball_at_year, ignore_index=True) ball_month = pd.DataFrame() initDate = getDate(lottery['Date'][0]) currentMonth = initDate.month currentYear = initDate.year for index, draw in lottery.iterrows(): del draw['Draw Number'] del draw['Jackpot'] drawDate = getDate(draw['Date']) if drawDate.month == currentMonth and drawDate.year == currentYear: ball_month = ball_month.append(draw) else: currentMonth = currentMonth % 12 + 1 if currentYear != drawDate.year: generateYearStatsForDraws(ball_month, drawDate) ball_month = pd.DataFrame() currentYear = drawDate.year print(ball_dataset)
code
32068445/cell_26
[ "text_plain_output_1.png", "image_output_1.png" ]
from datetime import datetime, timedelta, date import matplotlib.pyplot as plt import os import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns import numpy as np import pandas as pd import seaborn as sns import matplotlib.patches as mpatches import matplotlib.pyplot as plt from datetime import datetime, timedelta, date from statistics import mean import copy from keras.models import Sequential from keras.layers import LSTM, Dropout, Dense, Activation from sklearn.preprocessing import StandardScaler sns.set(rc={'figure.figsize': (11, 8)}) pd.set_option('display.max_rows', 20) pd.set_option('display.max_columns', 20) pd.set_option('display.width', 1000) plt.rcParams['axes.grid'] = False import os paths = [] for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: paths.append(os.path.join(dirname, filename)) lotteryPath = paths[0] lottery = pd.read_csv(lotteryPath, encoding='latin-1') lottery all_balls = {} for i in range(1, 7): ball_ser = lottery['Ball ' + str(i)].value_counts() for key in ball_ser.keys(): all_balls[key] = all_balls.get(key, 0) + ball_ser[key] ball_ser = lottery['Extra Ball'].value_counts() for key in ball_ser.keys(): all_balls[key] = all_balls.get(key, 0) + ball_ser[key] all_balls = pd.Series(all_balls) plt.xticks(rotation=0) # Visualize the distributions of each ball f, axes = plt.subplots(7, 1) f.tight_layout() for i in range(1,7): ball_dist = lottery['Ball ' +str(i)].value_counts().sort_index() axes[i-1].set_title('Distribution of ball '+str(i)) plt.xticks(rotation=90) sns.barplot(x=ball_dist.keys(), y=ball_dist.values, ax=axes[i-1], palette="PuBuGn_d") ball_dist = lottery['Extra Ball'].value_counts().sort_index() axes[6].set_title('Distribution of extra ball') plt.xticks(rotation=90) sns.barplot(x=ball_dist.keys(), y=ball_dist.values, ax=axes[6], palette="PuBuGn_d") # Correlation matrix def plotCorrelationMatrix(df, graphWidth): #filename = df.dataframeName df = df.dropna('columns') # drop columns with NaN del df['Draw Number'] df = df[[col for col in df if df[col].nunique() > 1]] # keep columns where there are more than 1 unique values if df.shape[1] < 2: print(f'No correlation plots shown: The number of non-NaN or constant columns ({df.shape[1]}) is less than 2') return corr = df.corr('pearson') plt.figure(num=None, figsize=(graphWidth, graphWidth), dpi=80, facecolor='w', edgecolor='k') corrMat = plt.matshow(corr, fignum = 1,cmap = "BuGn") plt.xticks(range(len(corr.columns)), corr.columns, rotation=0) plt.yticks(range(len(corr.columns)), corr.columns) plt.gca().xaxis.tick_bottom() plt.colorbar(corrMat) plt.title(f'Correlation Matrix', fontsize=15) plt.show() plotCorrelationMatrix(lottery, 8) allMonthsData = [] class MonthData: def __init__(self, date, ballsDict): self.date = date self.ballsDict = ballsDict def generateStatsForDraws(draws, drawDate): if draws.empty == False: currentBalls = {} del draws['Date'] del draws['Draw Number'] del draws['Jackpot'] balls_list = draws.values.T.tolist() balls_flat_list = [item for sublist in balls_list for item in sublist] for i in range(1, 43): currentBalls[i] = balls_flat_list.count(i) data = MonthData(drawDate, currentBalls) allMonthsData.append(data) def plotBallsInMonths(index): all_balls = pd.Series(allMonthsData[index].ballsDict) plt.xticks(rotation=0) plotBallsInMonths(50)
code
32068445/cell_1
[ "application_vnd.jupyter.stderr_output_1.png" ]
import matplotlib.pyplot as plt import os import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns import numpy as np import pandas as pd import seaborn as sns import matplotlib.patches as mpatches import matplotlib.pyplot as plt from datetime import datetime, timedelta, date from statistics import mean import copy from keras.models import Sequential from keras.layers import LSTM, Dropout, Dense, Activation from sklearn.preprocessing import StandardScaler sns.set(rc={'figure.figsize': (11, 8)}) pd.set_option('display.max_rows', 20) pd.set_option('display.max_columns', 20) pd.set_option('display.width', 1000) plt.rcParams['axes.grid'] = False import os paths = [] for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: paths.append(os.path.join(dirname, filename))
code
32068445/cell_31
[ "image_output_1.png" ]
from datetime import datetime, timedelta, date import matplotlib.pyplot as plt import os import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns import numpy as np import pandas as pd import seaborn as sns import matplotlib.patches as mpatches import matplotlib.pyplot as plt from datetime import datetime, timedelta, date from statistics import mean import copy from keras.models import Sequential from keras.layers import LSTM, Dropout, Dense, Activation from sklearn.preprocessing import StandardScaler sns.set(rc={'figure.figsize': (11, 8)}) pd.set_option('display.max_rows', 20) pd.set_option('display.max_columns', 20) pd.set_option('display.width', 1000) plt.rcParams['axes.grid'] = False import os paths = [] for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: paths.append(os.path.join(dirname, filename)) lotteryPath = paths[0] lottery = pd.read_csv(lotteryPath, encoding='latin-1') lottery all_balls = {} for i in range(1, 7): ball_ser = lottery['Ball ' + str(i)].value_counts() for key in ball_ser.keys(): all_balls[key] = all_balls.get(key, 0) + ball_ser[key] ball_ser = lottery['Extra Ball'].value_counts() for key in ball_ser.keys(): all_balls[key] = all_balls.get(key, 0) + ball_ser[key] all_balls = pd.Series(all_balls) plt.xticks(rotation=0) # Visualize the distributions of each ball f, axes = plt.subplots(7, 1) f.tight_layout() for i in range(1,7): ball_dist = lottery['Ball ' +str(i)].value_counts().sort_index() axes[i-1].set_title('Distribution of ball '+str(i)) plt.xticks(rotation=90) sns.barplot(x=ball_dist.keys(), y=ball_dist.values, ax=axes[i-1], palette="PuBuGn_d") ball_dist = lottery['Extra Ball'].value_counts().sort_index() axes[6].set_title('Distribution of extra ball') plt.xticks(rotation=90) sns.barplot(x=ball_dist.keys(), y=ball_dist.values, ax=axes[6], palette="PuBuGn_d") # Correlation matrix def plotCorrelationMatrix(df, graphWidth): #filename = df.dataframeName df = df.dropna('columns') # drop columns with NaN del df['Draw Number'] df = df[[col for col in df if df[col].nunique() > 1]] # keep columns where there are more than 1 unique values if df.shape[1] < 2: print(f'No correlation plots shown: The number of non-NaN or constant columns ({df.shape[1]}) is less than 2') return corr = df.corr('pearson') plt.figure(num=None, figsize=(graphWidth, graphWidth), dpi=80, facecolor='w', edgecolor='k') corrMat = plt.matshow(corr, fignum = 1,cmap = "BuGn") plt.xticks(range(len(corr.columns)), corr.columns, rotation=0) plt.yticks(range(len(corr.columns)), corr.columns) plt.gca().xaxis.tick_bottom() plt.colorbar(corrMat) plt.title(f'Correlation Matrix', fontsize=15) plt.show() plotCorrelationMatrix(lottery, 8) def getDate(strDate): return datetime.strptime(strDate, '%Y-%m-%d').date() allMonthsData = [] class MonthData: def __init__(self, date, ballsDict): self.date = date self.ballsDict = ballsDict def generateStatsForDraws(draws, drawDate): if draws.empty == False: currentBalls = {} del draws['Date'] del draws['Draw Number'] del draws['Jackpot'] balls_list = draws.values.T.tolist() balls_flat_list = [item for sublist in balls_list for item in sublist] for i in range(1, 43): currentBalls[i] = balls_flat_list.count(i) data = MonthData(drawDate, currentBalls) allMonthsData.append(data) def plotBallsInMonths(index): all_balls = pd.Series(allMonthsData[index].ballsDict) plt.xticks(rotation=0) ball_month = pd.DataFrame() initDate = getDate(lottery['Date'][0]) currentMonth = initDate.month currentYear = initDate.year def getOccurencesPerMonth(): global ball_month global currentMonth global currentYear for index, draw in lottery.iterrows(): drawDate = getDate(draw['Date']) if drawDate.month == currentMonth and drawDate.year == currentYear: ball_month = ball_month.append(draw) else: generateStatsForDraws(ball_month, drawDate) ball_month = pd.DataFrame() currentMonth = currentMonth % 12 + 1 if currentYear != drawDate.year: currentYear = drawDate.year ball_dataset = pd.DataFrame(columns=['Year', 'Ball Number', 'Occurences']) ball_dataset['Year'] = pd.to_numeric(ball_dataset['Year']) ball_dataset['Ball Number'] = pd.to_numeric(ball_dataset['Ball Number']) ball_dataset['Occurences'] = pd.to_numeric(ball_dataset['Occurences']) def generateYearStatsForDraws(draws, drawDate): global ball_dataset if draws.empty == False: currentBalls = {} del draws['Date'] balls_list = draws.values.T.tolist() balls_flat_list = [item for sublist in balls_list for item in sublist] for i in range(1, 43): currentBalls['Year'] = int(drawDate.year) currentBalls['Ball Number'] = int(i) currentBalls['Occurences'] = int(balls_flat_list.count(i)) ball_at_year = pd.Series(currentBalls) currentBalls = {} ball_dataset = ball_dataset.append(ball_at_year, ignore_index=True) ball_month = pd.DataFrame() initDate = getDate(lottery['Date'][0]) currentMonth = initDate.month currentYear = initDate.year for index, draw in lottery.iterrows(): del draw['Draw Number'] del draw['Jackpot'] drawDate = getDate(draw['Date']) if drawDate.month == currentMonth and drawDate.year == currentYear: ball_month = ball_month.append(draw) else: currentMonth = currentMonth % 12 + 1 if currentYear != drawDate.year: generateYearStatsForDraws(ball_month, drawDate) ball_month = pd.DataFrame() currentYear = drawDate.year balls = ball_dataset.pivot('Ball Number', 'Year', 'Occurences') f, ax = plt.subplots(figsize=(18, 18)) plt.title('Occurence of Each Ball per Year') sns.heatmap(balls, annot=True, fmt='d', linewidths=0.0, ax=ax)
code
32068445/cell_12
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import os import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns import numpy as np import pandas as pd import seaborn as sns import matplotlib.patches as mpatches import matplotlib.pyplot as plt from datetime import datetime, timedelta, date from statistics import mean import copy from keras.models import Sequential from keras.layers import LSTM, Dropout, Dense, Activation from sklearn.preprocessing import StandardScaler sns.set(rc={'figure.figsize': (11, 8)}) pd.set_option('display.max_rows', 20) pd.set_option('display.max_columns', 20) pd.set_option('display.width', 1000) plt.rcParams['axes.grid'] = False import os paths = [] for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: paths.append(os.path.join(dirname, filename)) lotteryPath = paths[0] lottery = pd.read_csv(lotteryPath, encoding='latin-1') lottery all_balls = {} for i in range(1, 7): ball_ser = lottery['Ball ' + str(i)].value_counts() for key in ball_ser.keys(): all_balls[key] = all_balls.get(key, 0) + ball_ser[key] ball_ser = lottery['Extra Ball'].value_counts() for key in ball_ser.keys(): all_balls[key] = all_balls.get(key, 0) + ball_ser[key] all_balls = pd.Series(all_balls) plt.xticks(rotation=0) # Visualize the distributions of each ball f, axes = plt.subplots(7, 1) f.tight_layout() for i in range(1,7): ball_dist = lottery['Ball ' +str(i)].value_counts().sort_index() axes[i-1].set_title('Distribution of ball '+str(i)) plt.xticks(rotation=90) sns.barplot(x=ball_dist.keys(), y=ball_dist.values, ax=axes[i-1], palette="PuBuGn_d") ball_dist = lottery['Extra Ball'].value_counts().sort_index() axes[6].set_title('Distribution of extra ball') plt.xticks(rotation=90) sns.barplot(x=ball_dist.keys(), y=ball_dist.values, ax=axes[6], palette="PuBuGn_d") def plotCorrelationMatrix(df, graphWidth): df = df.dropna('columns') del df['Draw Number'] df = df[[col for col in df if df[col].nunique() > 1]] if df.shape[1] < 2: print(f'No correlation plots shown: The number of non-NaN or constant columns ({df.shape[1]}) is less than 2') return corr = df.corr('pearson') plt.figure(num=None, figsize=(graphWidth, graphWidth), dpi=80, facecolor='w', edgecolor='k') corrMat = plt.matshow(corr, fignum=1, cmap='BuGn') plt.xticks(range(len(corr.columns)), corr.columns, rotation=0) plt.yticks(range(len(corr.columns)), corr.columns) plt.gca().xaxis.tick_bottom() plt.colorbar(corrMat) plt.title(f'Correlation Matrix', fontsize=15) plt.show() plotCorrelationMatrix(lottery, 8)
code
73089289/cell_9
[ "text_html_output_1.png" ]
from keras.applications.xception import Xception, preprocess_input, decode_predictions from skimage.io import imread_collection, imread import numpy as np import numpy as np import os import pandas as pd import pandas as pd path = '../input/pneumoniamulti/pneumonia-multi2/' image_names = os.listdir(path) gray_images = [imread(path + str(name) + '') for name in image_names] images = np.zeros((len(gray_images), gray_images[0].shape[0], gray_images[0].shape[1], 3)) for i, im in enumerate(gray_images): for j in range(3): images[i, :, :, j] = im pretrained = Xception(weights='imagenet', include_top=False, pooling='avg') pretrained.summary() x = preprocess_input(images) features = pretrained.predict(x) nome_file = 'xcption_covid_sseg' matriz_csv = pd.DataFrame(features) matriz_csv
code
73089289/cell_4
[ "text_plain_output_1.png" ]
from keras.applications.xception import Xception, preprocess_input, decode_predictions pretrained = Xception(weights='imagenet', include_top=False, pooling='avg')
code
73089289/cell_2
[ "text_plain_output_1.png" ]
from skimage.io import imread_collection, imread import os path = '../input/pneumoniamulti/pneumonia-multi2/' image_names = os.listdir(path) gray_images = [imread(path + str(name) + '') for name in image_names] print('The database has {} segmented images'.format(len(gray_images)))
code
73089289/cell_7
[ "text_plain_output_1.png" ]
from keras.applications.xception import Xception, preprocess_input, decode_predictions from skimage.io import imread_collection, imread import numpy as np import numpy as np import os path = '../input/pneumoniamulti/pneumonia-multi2/' image_names = os.listdir(path) gray_images = [imread(path + str(name) + '') for name in image_names] images = np.zeros((len(gray_images), gray_images[0].shape[0], gray_images[0].shape[1], 3)) for i, im in enumerate(gray_images): for j in range(3): images[i, :, :, j] = im pretrained = Xception(weights='imagenet', include_top=False, pooling='avg') pretrained.summary() x = preprocess_input(images) features = pretrained.predict(x) features
code
73089289/cell_5
[ "text_plain_output_1.png" ]
from keras.applications.xception import Xception, preprocess_input, decode_predictions pretrained = Xception(weights='imagenet', include_top=False, pooling='avg') pretrained.summary()
code
18124360/cell_13
[ "application_vnd.jupyter.stderr_output_2.png", "text_plain_output_1.png" ]
from efficientnet import EfficientNetB5 from keras.applications import DenseNet121, ResNet50, InceptionV3, Xception from keras.layers import Dense, Dropout, GlobalAveragePooling2D, LeakyReLU from keras.models import Model, Sequential import cv2 import matplotlib.pyplot as plt import numpy as np import os import pandas as pd import sys test_df = pd.read_csv('../input/aptos2019-blindness-detection/test.csv') test_df['id_code'] = test_df['id_code'].apply(lambda x: x + '.png') diag_text = ['Normal', 'Mild', 'Moderate', 'Severe', 'Proliferative'] num_classes = 5 def display_raw_images(df, columns = 4, rows = 3): fig=plt.figure(figsize = (5 * columns, 4 * rows)) for i in range(columns * rows): image_name = df.loc[i,'id_code'] img = cv2.imread(f'../input/aptos2019-blindness-detection/test_images/{image_name}')[...,[2, 1, 0]] fig.add_subplot(rows, columns, i + 1) plt.imshow(img) plt.tight_layout() display_raw_images(test_df) sys.path.append(os.path.abspath('../input/efficientnet/efficientnet-master/efficientnet-master/')) from efficientnet import EfficientNetB5 from keras.callbacks import EarlyStopping, ReduceLROnPlateau, ModelCheckpoint, Callback from keras.layers import Dense, Dropout, GlobalAveragePooling2D, LeakyReLU from keras.applications import DenseNet121, ResNet50, InceptionV3, Xception from keras.models import Model, Sequential from keras.optimizers import Adam def create_resnet50_model(input_shape, n_out): base_model = ResNet50(weights=None, include_top=False, input_shape=input_shape) base_model.load_weights('../input/keras-pretrained-models/resnet50_weights_tf_dim_ordering_tf_kernels_notop.h5') model = Sequential() model.add(base_model) model.add(GlobalAveragePooling2D()) model.add(Dropout(0.5)) model.add(Dense(2048, activation='relu')) model.add(Dropout(0.5)) model.add(Dense(n_out, activation='sigmoid')) return model def create_inception_v3_model(input_shape, n_out): base_model = InceptionV3(weights=None, include_top=False, input_shape=input_shape) base_model.load_weights('../input/keras-pretrained-models/inception_v3_weights_tf_dim_ordering_tf_kernels_notop.h5') model = Sequential() model.add(base_model) model.add(GlobalAveragePooling2D()) model.add(Dropout(0.5)) model.add(Dense(2048, activation='relu')) model.add(Dropout(0.5)) model.add(Dense(n_out, activation='sigmoid')) return model def create_xception_model(input_shape, n_out): base_model = Xception(weights=None, include_top=False, input_shape=input_shape) base_model.load_weights('../input/keras-pretrained-models/xception_weights_tf_dim_ordering_tf_kernels_notop.h5') model = Sequential() model.add(base_model) model.add(GlobalAveragePooling2D()) model.add(Dropout(0.5)) model.add(Dense(2048, activation='relu')) model.add(Dropout(0.5)) model.add(Dense(n_out, activation='sigmoid')) return model def create_densenet121_model(input_shape, n_out): base_model = DenseNet121(weights=None, include_top=False, input_shape=input_shape) base_model.load_weights('../input/densenet-keras/DenseNet-BC-121-32-no-top.h5') model = Sequential() model.add(base_model) model.add(GlobalAveragePooling2D()) model.add(Dropout(0.5)) model.add(Dense(2048, activation='relu')) model.add(Dropout(0.5)) model.add(Dense(n_out, activation='sigmoid')) return model def create_effnetB5_model(input_shape, n_out): base_model = EfficientNetB5(weights=sys.path.append(os.path.abspath('../input/efficientnet-keras-weights-b0b5/efficientnet-b5_imagenet_1000_notop.h5')), include_top=False, input_shape=input_shape) model = Sequential() model.add(base_model) model.add(Dropout(0.25)) model.add(Dense(1024)) model.add(LeakyReLU()) model.add(GlobalAveragePooling2D()) model.add(Dropout(0.5)) model.add(Dense(n_out, activation='sigmoid')) return model IMAGE_HEIGHT = 340 IMAGE_WIDTH = 340 model = create_effnetB5_model(input_shape=(IMAGE_HEIGHT, IMAGE_WIDTH, 3), n_out=num_classes) model.summary() PRETRAINED_MODEL = '../input/efficientnetb5-blindness-detector/blindness_detector_bestqwk.h5' if os.path.exists(PRETRAINED_MODEL): model.load_weights(PRETRAINED_MODEL) from tqdm import tqdm_notebook as tqdm submit = pd.read_csv('../input/aptos2019-blindness-detection/sample_submission.csv') predicted = [] for i, name in tqdm(enumerate(submit['id_code'])): path = os.path.join('../input/aptos2019-blindness-detection/test_images/', name + '.png') image = cv2.imread(path) image = cv2.resize(image, (IMAGE_HEIGHT, IMAGE_WIDTH)) X = np.array(image[np.newaxis] / 255) raw_prediction = model.predict(X) > 0.5 prediction = raw_prediction.astype(int).sum(axis=1) - 1 predicted.append(prediction[0])
code
18124360/cell_2
[ "text_plain_output_1.png" ]
import tensorflow import numpy as np import matplotlib.pyplot as plt import pandas as pd import cv2 import os import sys print(os.listdir('../input'))
code
18124360/cell_11
[ "image_output_1.png" ]
from efficientnet import EfficientNetB5 from keras.applications import DenseNet121, ResNet50, InceptionV3, Xception from keras.layers import Dense, Dropout, GlobalAveragePooling2D, LeakyReLU from keras.models import Model, Sequential import os import pandas as pd import sys test_df = pd.read_csv('../input/aptos2019-blindness-detection/test.csv') test_df['id_code'] = test_df['id_code'].apply(lambda x: x + '.png') diag_text = ['Normal', 'Mild', 'Moderate', 'Severe', 'Proliferative'] num_classes = 5 sys.path.append(os.path.abspath('../input/efficientnet/efficientnet-master/efficientnet-master/')) from efficientnet import EfficientNetB5 from keras.callbacks import EarlyStopping, ReduceLROnPlateau, ModelCheckpoint, Callback from keras.layers import Dense, Dropout, GlobalAveragePooling2D, LeakyReLU from keras.applications import DenseNet121, ResNet50, InceptionV3, Xception from keras.models import Model, Sequential from keras.optimizers import Adam def create_resnet50_model(input_shape, n_out): base_model = ResNet50(weights=None, include_top=False, input_shape=input_shape) base_model.load_weights('../input/keras-pretrained-models/resnet50_weights_tf_dim_ordering_tf_kernels_notop.h5') model = Sequential() model.add(base_model) model.add(GlobalAveragePooling2D()) model.add(Dropout(0.5)) model.add(Dense(2048, activation='relu')) model.add(Dropout(0.5)) model.add(Dense(n_out, activation='sigmoid')) return model def create_inception_v3_model(input_shape, n_out): base_model = InceptionV3(weights=None, include_top=False, input_shape=input_shape) base_model.load_weights('../input/keras-pretrained-models/inception_v3_weights_tf_dim_ordering_tf_kernels_notop.h5') model = Sequential() model.add(base_model) model.add(GlobalAveragePooling2D()) model.add(Dropout(0.5)) model.add(Dense(2048, activation='relu')) model.add(Dropout(0.5)) model.add(Dense(n_out, activation='sigmoid')) return model def create_xception_model(input_shape, n_out): base_model = Xception(weights=None, include_top=False, input_shape=input_shape) base_model.load_weights('../input/keras-pretrained-models/xception_weights_tf_dim_ordering_tf_kernels_notop.h5') model = Sequential() model.add(base_model) model.add(GlobalAveragePooling2D()) model.add(Dropout(0.5)) model.add(Dense(2048, activation='relu')) model.add(Dropout(0.5)) model.add(Dense(n_out, activation='sigmoid')) return model def create_densenet121_model(input_shape, n_out): base_model = DenseNet121(weights=None, include_top=False, input_shape=input_shape) base_model.load_weights('../input/densenet-keras/DenseNet-BC-121-32-no-top.h5') model = Sequential() model.add(base_model) model.add(GlobalAveragePooling2D()) model.add(Dropout(0.5)) model.add(Dense(2048, activation='relu')) model.add(Dropout(0.5)) model.add(Dense(n_out, activation='sigmoid')) return model def create_effnetB5_model(input_shape, n_out): base_model = EfficientNetB5(weights=sys.path.append(os.path.abspath('../input/efficientnet-keras-weights-b0b5/efficientnet-b5_imagenet_1000_notop.h5')), include_top=False, input_shape=input_shape) model = Sequential() model.add(base_model) model.add(Dropout(0.25)) model.add(Dense(1024)) model.add(LeakyReLU()) model.add(GlobalAveragePooling2D()) model.add(Dropout(0.5)) model.add(Dense(n_out, activation='sigmoid')) return model IMAGE_HEIGHT = 340 IMAGE_WIDTH = 340 model = create_effnetB5_model(input_shape=(IMAGE_HEIGHT, IMAGE_WIDTH, 3), n_out=num_classes) model.summary() PRETRAINED_MODEL = '../input/efficientnetb5-blindness-detector/blindness_detector_bestqwk.h5' if os.path.exists(PRETRAINED_MODEL): print('Restoring model from ' + PRETRAINED_MODEL) model.load_weights(PRETRAINED_MODEL) else: print('No pretrained model found. Using fresh model.')
code
18124360/cell_7
[ "text_html_output_1.png" ]
import os import sys print(os.listdir('../input/efficientnet/efficientnet-master/efficientnet-master/efficientnet')) sys.path.append(os.path.abspath('../input/efficientnet/efficientnet-master/efficientnet-master/')) from efficientnet import EfficientNetB5
code
18124360/cell_15
[ "text_plain_output_1.png" ]
from efficientnet import EfficientNetB5 from keras.applications import DenseNet121, ResNet50, InceptionV3, Xception from keras.layers import Dense, Dropout, GlobalAveragePooling2D, LeakyReLU from keras.models import Model, Sequential import cv2 import matplotlib.pyplot as plt import numpy as np import os import pandas as pd import sys test_df = pd.read_csv('../input/aptos2019-blindness-detection/test.csv') test_df['id_code'] = test_df['id_code'].apply(lambda x: x + '.png') diag_text = ['Normal', 'Mild', 'Moderate', 'Severe', 'Proliferative'] num_classes = 5 def display_raw_images(df, columns = 4, rows = 3): fig=plt.figure(figsize = (5 * columns, 4 * rows)) for i in range(columns * rows): image_name = df.loc[i,'id_code'] img = cv2.imread(f'../input/aptos2019-blindness-detection/test_images/{image_name}')[...,[2, 1, 0]] fig.add_subplot(rows, columns, i + 1) plt.imshow(img) plt.tight_layout() display_raw_images(test_df) sys.path.append(os.path.abspath('../input/efficientnet/efficientnet-master/efficientnet-master/')) from efficientnet import EfficientNetB5 from keras.callbacks import EarlyStopping, ReduceLROnPlateau, ModelCheckpoint, Callback from keras.layers import Dense, Dropout, GlobalAveragePooling2D, LeakyReLU from keras.applications import DenseNet121, ResNet50, InceptionV3, Xception from keras.models import Model, Sequential from keras.optimizers import Adam def create_resnet50_model(input_shape, n_out): base_model = ResNet50(weights=None, include_top=False, input_shape=input_shape) base_model.load_weights('../input/keras-pretrained-models/resnet50_weights_tf_dim_ordering_tf_kernels_notop.h5') model = Sequential() model.add(base_model) model.add(GlobalAveragePooling2D()) model.add(Dropout(0.5)) model.add(Dense(2048, activation='relu')) model.add(Dropout(0.5)) model.add(Dense(n_out, activation='sigmoid')) return model def create_inception_v3_model(input_shape, n_out): base_model = InceptionV3(weights=None, include_top=False, input_shape=input_shape) base_model.load_weights('../input/keras-pretrained-models/inception_v3_weights_tf_dim_ordering_tf_kernels_notop.h5') model = Sequential() model.add(base_model) model.add(GlobalAveragePooling2D()) model.add(Dropout(0.5)) model.add(Dense(2048, activation='relu')) model.add(Dropout(0.5)) model.add(Dense(n_out, activation='sigmoid')) return model def create_xception_model(input_shape, n_out): base_model = Xception(weights=None, include_top=False, input_shape=input_shape) base_model.load_weights('../input/keras-pretrained-models/xception_weights_tf_dim_ordering_tf_kernels_notop.h5') model = Sequential() model.add(base_model) model.add(GlobalAveragePooling2D()) model.add(Dropout(0.5)) model.add(Dense(2048, activation='relu')) model.add(Dropout(0.5)) model.add(Dense(n_out, activation='sigmoid')) return model def create_densenet121_model(input_shape, n_out): base_model = DenseNet121(weights=None, include_top=False, input_shape=input_shape) base_model.load_weights('../input/densenet-keras/DenseNet-BC-121-32-no-top.h5') model = Sequential() model.add(base_model) model.add(GlobalAveragePooling2D()) model.add(Dropout(0.5)) model.add(Dense(2048, activation='relu')) model.add(Dropout(0.5)) model.add(Dense(n_out, activation='sigmoid')) return model def create_effnetB5_model(input_shape, n_out): base_model = EfficientNetB5(weights=sys.path.append(os.path.abspath('../input/efficientnet-keras-weights-b0b5/efficientnet-b5_imagenet_1000_notop.h5')), include_top=False, input_shape=input_shape) model = Sequential() model.add(base_model) model.add(Dropout(0.25)) model.add(Dense(1024)) model.add(LeakyReLU()) model.add(GlobalAveragePooling2D()) model.add(Dropout(0.5)) model.add(Dense(n_out, activation='sigmoid')) return model IMAGE_HEIGHT = 340 IMAGE_WIDTH = 340 model = create_effnetB5_model(input_shape=(IMAGE_HEIGHT, IMAGE_WIDTH, 3), n_out=num_classes) model.summary() PRETRAINED_MODEL = '../input/efficientnetb5-blindness-detector/blindness_detector_bestqwk.h5' if os.path.exists(PRETRAINED_MODEL): model.load_weights(PRETRAINED_MODEL) from tqdm import tqdm_notebook as tqdm submit = pd.read_csv('../input/aptos2019-blindness-detection/sample_submission.csv') predicted = [] for i, name in tqdm(enumerate(submit['id_code'])): path = os.path.join('../input/aptos2019-blindness-detection/test_images/', name + '.png') image = cv2.imread(path) image = cv2.resize(image, (IMAGE_HEIGHT, IMAGE_WIDTH)) X = np.array(image[np.newaxis] / 255) raw_prediction = model.predict(X) > 0.5 prediction = raw_prediction.astype(int).sum(axis=1) - 1 predicted.append(prediction[0]) submit['diagnosis'] = predicted submit.to_csv('submission.csv', index=False) submit.head(10)
code
18124360/cell_10
[ "text_plain_output_1.png" ]
from efficientnet import EfficientNetB5 from keras.applications import DenseNet121, ResNet50, InceptionV3, Xception from keras.layers import Dense, Dropout, GlobalAveragePooling2D, LeakyReLU from keras.models import Model, Sequential import os import pandas as pd import sys test_df = pd.read_csv('../input/aptos2019-blindness-detection/test.csv') test_df['id_code'] = test_df['id_code'].apply(lambda x: x + '.png') diag_text = ['Normal', 'Mild', 'Moderate', 'Severe', 'Proliferative'] num_classes = 5 sys.path.append(os.path.abspath('../input/efficientnet/efficientnet-master/efficientnet-master/')) from efficientnet import EfficientNetB5 from keras.callbacks import EarlyStopping, ReduceLROnPlateau, ModelCheckpoint, Callback from keras.layers import Dense, Dropout, GlobalAveragePooling2D, LeakyReLU from keras.applications import DenseNet121, ResNet50, InceptionV3, Xception from keras.models import Model, Sequential from keras.optimizers import Adam def create_resnet50_model(input_shape, n_out): base_model = ResNet50(weights=None, include_top=False, input_shape=input_shape) base_model.load_weights('../input/keras-pretrained-models/resnet50_weights_tf_dim_ordering_tf_kernels_notop.h5') model = Sequential() model.add(base_model) model.add(GlobalAveragePooling2D()) model.add(Dropout(0.5)) model.add(Dense(2048, activation='relu')) model.add(Dropout(0.5)) model.add(Dense(n_out, activation='sigmoid')) return model def create_inception_v3_model(input_shape, n_out): base_model = InceptionV3(weights=None, include_top=False, input_shape=input_shape) base_model.load_weights('../input/keras-pretrained-models/inception_v3_weights_tf_dim_ordering_tf_kernels_notop.h5') model = Sequential() model.add(base_model) model.add(GlobalAveragePooling2D()) model.add(Dropout(0.5)) model.add(Dense(2048, activation='relu')) model.add(Dropout(0.5)) model.add(Dense(n_out, activation='sigmoid')) return model def create_xception_model(input_shape, n_out): base_model = Xception(weights=None, include_top=False, input_shape=input_shape) base_model.load_weights('../input/keras-pretrained-models/xception_weights_tf_dim_ordering_tf_kernels_notop.h5') model = Sequential() model.add(base_model) model.add(GlobalAveragePooling2D()) model.add(Dropout(0.5)) model.add(Dense(2048, activation='relu')) model.add(Dropout(0.5)) model.add(Dense(n_out, activation='sigmoid')) return model def create_densenet121_model(input_shape, n_out): base_model = DenseNet121(weights=None, include_top=False, input_shape=input_shape) base_model.load_weights('../input/densenet-keras/DenseNet-BC-121-32-no-top.h5') model = Sequential() model.add(base_model) model.add(GlobalAveragePooling2D()) model.add(Dropout(0.5)) model.add(Dense(2048, activation='relu')) model.add(Dropout(0.5)) model.add(Dense(n_out, activation='sigmoid')) return model def create_effnetB5_model(input_shape, n_out): base_model = EfficientNetB5(weights=sys.path.append(os.path.abspath('../input/efficientnet-keras-weights-b0b5/efficientnet-b5_imagenet_1000_notop.h5')), include_top=False, input_shape=input_shape) model = Sequential() model.add(base_model) model.add(Dropout(0.25)) model.add(Dense(1024)) model.add(LeakyReLU()) model.add(GlobalAveragePooling2D()) model.add(Dropout(0.5)) model.add(Dense(n_out, activation='sigmoid')) return model IMAGE_HEIGHT = 340 IMAGE_WIDTH = 340 model = create_effnetB5_model(input_shape=(IMAGE_HEIGHT, IMAGE_WIDTH, 3), n_out=num_classes) model.summary()
code
105174125/cell_6
[ "application_vnd.jupyter.stderr_output_1.png" ]
import cv2 import numpy as np import pandas as pd import tensorflow as tf import layoutparser as lp import matplotlib.pyplot as plt from PIL import Image from pdf2image import convert_from_path from paddleocr import PaddleOCR, draw_ocr
code
104122172/cell_13
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns df = pd.read_csv('../input/world-population-dataset/world_population.csv') df.shape df.isnull().sum() df.notnull() df.dropna() df.fillna sns.scatterplot(x='Rank', y='Country', data=df, hue='Continent') plt.legend(bbox_to_anchor=(1, 1), loc=2) plt.show()
code
104122172/cell_9
[ "image_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/world-population-dataset/world_population.csv') df.shape df.isnull().sum() df.notnull() df.dropna()
code
104122172/cell_4
[ "image_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/world-population-dataset/world_population.csv') df.info()
code
104122172/cell_6
[ "image_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/world-population-dataset/world_population.csv') df.shape df.describe()
code
104122172/cell_11
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns df = pd.read_csv('../input/world-population-dataset/world_population.csv') df.shape df.isnull().sum() df.notnull() df.dropna() df.fillna sns.histplot(x='World Population Percentage', data=df) plt.show()
code
104122172/cell_1
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
104122172/cell_7
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/world-population-dataset/world_population.csv') df.shape df.isnull().sum()
code
104122172/cell_8
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/world-population-dataset/world_population.csv') df.shape df.isnull().sum() df.notnull()
code
104122172/cell_15
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns df = pd.read_csv('../input/world-population-dataset/world_population.csv') df.shape df.isnull().sum() df.notnull() df.dropna() df.fillna sns.pairplot(df, hue='Country', height=2)
code
104122172/cell_16
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns df = pd.read_csv('../input/world-population-dataset/world_population.csv') df.shape df.isnull().sum() df.notnull() df.dropna() df.fillna df = np.random.rand(10, 12) ax = sns.heatmap(df)
code
104122172/cell_3
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/world-population-dataset/world_population.csv') df.head()
code
104122172/cell_14
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns df = pd.read_csv('../input/world-population-dataset/world_population.csv') df.shape df.isnull().sum() df.notnull() df.dropna() df.fillna sns.pairplot(df, hue='Continent', height=2)
code
104122172/cell_10
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/world-population-dataset/world_population.csv') df.shape df.isnull().sum() df.notnull() df.dropna() df.fillna
code
104122172/cell_12
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns df = pd.read_csv('../input/world-population-dataset/world_population.csv') df.shape df.isnull().sum() df.notnull() df.dropna() df.fillna sns.boxplot(x='Rank', y='Country', data=df) plt.show()
code
104122172/cell_5
[ "image_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/world-population-dataset/world_population.csv') df.shape
code
328803/cell_4
[ "text_plain_output_1.png" ]
import pandas as pd import math import pandas as pd names_data = pd.read_csv('../input/NationalNames.csv') frequent_names = names_data[names_data['Count'] > 500] indexed_names = frequent_names.set_index(['Year', 'Name'])['Count'] def ambiguity_measure(grouped_frame): return 2 * (1 - grouped_frame.max() / grouped_frame.sum()) ambiguity_data = ambiguity_measure(indexed_names.groupby(level=['Year', 'Name'])) names_vs_years = ambiguity_data.unstack(level='Year') yearly_ambiguity = ambiguity_data.groupby(level='Year') print('Average ambiguity: %s' % str(ambiguity_data.mean())) print('Average by year: %s' % str(yearly_ambiguity.mean())) print('Most ambiguous by year: %s' % str(yearly_ambiguity.idxmax().apply(lambda x: x[1])))
code
328803/cell_5
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd import math import pandas as pd names_data = pd.read_csv('../input/NationalNames.csv') frequent_names = names_data[names_data['Count'] > 500] indexed_names = frequent_names.set_index(['Year', 'Name'])['Count'] def ambiguity_measure(grouped_frame): return 2 * (1 - grouped_frame.max() / grouped_frame.sum()) ambiguity_data = ambiguity_measure(indexed_names.groupby(level=['Year', 'Name'])) names_vs_years = ambiguity_data.unstack(level='Year') yearly_ambiguity = ambiguity_data.groupby(level='Year') potentially_ambiguous_names = names_vs_years[(names_vs_years > 0).any(axis=1)] potentially_ambiguous_names.transpose().plot(figsize=(20, 10))
code
106203709/cell_13
[ "text_plain_output_1.png" ]
import nltk import pandas as pd df = pd.read_csv('/kaggle/input/online-retails-sale-dataset/Online Retail.csv') import nltk sc = df.copy()[['StockCode', 'Description']] sc.dropna(inplace=True) sc.Description = sc.Description.str.lower() items = sc.groupby('StockCode').Description.unique() items = list(zip(items.index.tolist(), items.values.tolist())) clean_items = [] for x in items: temp = x[1] if len(temp) > 1: clean_items.append((x[0], x[1][0])) else: clean_items.append(x) descs = [x[1].tolist() for x in clean_items] tks = [nltk.word_tokenize(x[0]) for x in descs] descs_p = [] for x in tks: temp = nltk.pos_tag(x) new_desc = [] for y in temp: word, tag = y if tag.startswith('N'): new_desc.append(word) else: pass descs_p.append(new_desc) print(descs_p[:25])
code
106203709/cell_6
[ "text_plain_output_1.png" ]
import pandas as pd df = pd.read_csv('/kaggle/input/online-retails-sale-dataset/Online Retail.csv') for x in df.columns: print(x) print('\n') print(df.size)
code
106203709/cell_11
[ "text_plain_output_1.png" ]
import pandas as pd df = pd.read_csv('/kaggle/input/online-retails-sale-dataset/Online Retail.csv') print('Number of Unique Items {}'.format(df.StockCode.nunique())) print('\n') print(df.Description)
code
106203709/cell_8
[ "application_vnd.jupyter.stderr_output_1.png" ]
import pandas as pd df = pd.read_csv('/kaggle/input/online-retails-sale-dataset/Online Retail.csv') print('Number of duplicates: {}'.format(df.duplicated().sum())) print('\n') print(df.isnull().sum())
code
106203709/cell_10
[ "text_plain_output_1.png" ]
import pandas as pd df = pd.read_csv('/kaggle/input/online-retails-sale-dataset/Online Retail.csv') print(df.CustomerID.nunique()) print('\n') print(df.Country.value_counts())
code
104121398/cell_13
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns df = pd.read_csv('../input/heartdisease-dataset/heart.csv') df.drop(['exang'], axis=1, inplace=True) sns.scatterplot(x=df['age'], y=df['sex'], hue=df['target'])
code
104121398/cell_25
[ "text_plain_output_1.png" ]
from sklearn.linear_model import LogisticRegression classifier = LogisticRegression(random_state=41) classifier.fit(X_train, y_train)
code
104121398/cell_4
[ "text_plain_output_2.png", "application_vnd.jupyter.stderr_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/heartdisease-dataset/heart.csv') df.info()
code
104121398/cell_20
[ "text_plain_output_1.png", "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns df = pd.read_csv('../input/heartdisease-dataset/heart.csv') df.drop(['exang'], axis=1, inplace=True) df[df['age'] >= 50]['target'].value_counts() * 100 / df.shape[0] df[df['age'] < 50]['target'].value_counts() * 100 / df.shape[0]
code
104121398/cell_6
[ "text_plain_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/heartdisease-dataset/heart.csv') df.describe()
code
104121398/cell_29
[ "text_plain_output_1.png", "image_output_1.png" ]
from sklearn.ensemble import RandomForestClassifier from sklearn.metrics import confusion_matrix from sklearn.metrics import confusion_matrix from sklearn.model_selection import cross_val_predict from sklearn.model_selection import cross_val_score import matplotlib.pyplot as plt import pandas as pd import seaborn as sns df = pd.read_csv('../input/heartdisease-dataset/heart.csv') df.drop(['exang'], axis=1, inplace=True) df[df['age'] >= 50]['target'].value_counts() * 100 / df.shape[0] df[df['age'] < 50]['target'].value_counts() * 100 / df.shape[0] X = df.iloc[:, :12] y = df.iloc[:, 12] from sklearn.ensemble import RandomForestClassifier rf_model = RandomForestClassifier(n_estimators=100) rf_predictions = cross_val_predict(rf_model, X, y, cv=5) print(confusion_matrix(y, rf_predictions)) rf_scores = cross_val_score(rf_model, X, y, scoring='recall', cv=5) print('recall:', rf_scores.mean())
code
104121398/cell_26
[ "text_plain_output_1.png", "image_output_1.png" ]
from sklearn.linear_model import LogisticRegression from sklearn.metrics import confusion_matrix from sklearn.metrics import confusion_matrix classifier = LogisticRegression(random_state=41) classifier.fit(X_train, y_train) y_pred = classifier.predict(X_test) from sklearn.metrics import confusion_matrix cm = confusion_matrix(y_test, y_pred) cm
code
104121398/cell_11
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns df = pd.read_csv('../input/heartdisease-dataset/heart.csv') df.drop(['exang'], axis=1, inplace=True) df[df['sex'] == 1]['target'].value_counts()
code
104121398/cell_19
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns df = pd.read_csv('../input/heartdisease-dataset/heart.csv') df.drop(['exang'], axis=1, inplace=True) df[df['age'] >= 50]['target'].value_counts() * 100 / df.shape[0]
code
104121398/cell_7
[ "text_plain_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/heartdisease-dataset/heart.csv') df['sex'].value_counts()
code
104121398/cell_8
[ "text_plain_output_1.png" ]
import pandas as pd import seaborn as sns df = pd.read_csv('../input/heartdisease-dataset/heart.csv') sns.countplot(x='sex', data=df)
code
104121398/cell_15
[ "text_plain_output_1.png", "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns df = pd.read_csv('../input/heartdisease-dataset/heart.csv') df.drop(['exang'], axis=1, inplace=True) females = df[df['sex'] == 1]['age'].value_counts() females plt.figure(figsize=(15, 15)) sns.barplot(x=females.index, y=females.values)
code
104121398/cell_16
[ "text_plain_output_1.png", "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns df = pd.read_csv('../input/heartdisease-dataset/heart.csv') df.drop(['exang'], axis=1, inplace=True) males = df[df['sex'] == 0]['age'].value_counts() males
code
104121398/cell_3
[ "text_plain_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/heartdisease-dataset/heart.csv') df.head()
code
104121398/cell_17
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns df = pd.read_csv('../input/heartdisease-dataset/heart.csv') df.drop(['exang'], axis=1, inplace=True) females = df[df['sex'] == 1]['age'].value_counts() females males = df[df['sex'] == 0]['age'].value_counts() males plt.figure(figsize=(15, 15)) sns.barplot(x=males.index, y=males.values)
code
104121398/cell_31
[ "text_plain_output_1.png" ]
from sklearn import metrics from sklearn.linear_model import LogisticRegression from sklearn.metrics import confusion_matrix from sklearn.metrics import confusion_matrix from sklearn.tree import DecisionTreeClassifier classifier = LogisticRegression(random_state=41) classifier.fit(X_train, y_train) y_pred = classifier.predict(X_test) from sklearn.metrics import confusion_matrix cm = confusion_matrix(y_test, y_pred) cm from sklearn.tree import DecisionTreeClassifier clf = DecisionTreeClassifier(criterion='entropy', max_depth=5) clf = clf.fit(X_train, y_train) y_pred = clf.predict(X_test) from sklearn import metrics metrics.accuracy_score(y_test, y_pred)
code
104121398/cell_14
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns df = pd.read_csv('../input/heartdisease-dataset/heart.csv') df.drop(['exang'], axis=1, inplace=True) females = df[df['sex'] == 1]['age'].value_counts() females
code
104121398/cell_10
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns df = pd.read_csv('../input/heartdisease-dataset/heart.csv') df.drop(['exang'], axis=1, inplace=True) plt.figure(figsize=(20, 10)) sns.heatmap(df.corr(), annot=True)
code
104121398/cell_27
[ "text_plain_output_1.png" ]
from sklearn.linear_model import LogisticRegression from sklearn.metrics import classification_report from sklearn.metrics import confusion_matrix from sklearn.metrics import confusion_matrix classifier = LogisticRegression(random_state=41) classifier.fit(X_train, y_train) y_pred = classifier.predict(X_test) from sklearn.metrics import confusion_matrix cm = confusion_matrix(y_test, y_pred) cm from sklearn.metrics import classification_report cr = classification_report(y_test, y_pred) print(cr)
code
104121398/cell_12
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns df = pd.read_csv('../input/heartdisease-dataset/heart.csv') df.drop(['exang'], axis=1, inplace=True) df[df['sex'] == 0]['target'].value_counts()
code
104121398/cell_5
[ "text_plain_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/heartdisease-dataset/heart.csv') print(df.isnull().sum()) print(df.isnull().values.any()) print(df.isnull().values.sum())
code
106204307/cell_1
[ "application_vnd.jupyter.stderr_output_1.png" ]
from sentence_transformers import SentenceTransformer, util from typing import List, Union import PIL import clip import os import requests import torch from typing import List, Union import torch import clip import PIL from PIL import Image import requests import numpy as np import os from sentence_transformers import SentenceTransformer, util class ZeroShotImageClassification: def __init__(self, *args, **kwargs): """ Load CLIP models based on either language needs or vision backbone needs With english labelling users have the liberty to choose different vision backbones Multi-lingual labelling is only supported with ViT as vision backbone. Args: Model (`str`, *optional*, defaults to `ViT-B/32`): Any one of the CNN or Transformer based pretrained models can be used as Vision backbone. `RN50`, `RN101`, `RN50x4`, `RN50x16`, `RN50x64`, `ViT-B/32`, `ViT-B/16`, `ViT-L/14` Lang (`str`, *optional*, defaults to `en`): Any one of the language codes below ar, bg, ca, cs, da, de, el, es, et, fa, fi, fr, fr-ca, gl, gu, he, hi, hr, hu, hy, id, it, ja, ka, ko, ku, lt, lv, mk, mn, mr, ms, my, nb, nl, pl, pt, pt, pt-br, ro, ru, sk, sl, sq, sr, sv, th, tr, uk, ur, vi, zh-cn, zh-tw. """ if 'lang' in kwargs: self.lang = kwargs['lang'] else: self.lang = 'en' lang_codes = self.available_languages() if self.lang not in lang_codes: raise Exception('Language code {} not valid, supported codes are {} '.format(self.lang, lang_codes)) return device = 'cuda:0' if torch.cuda.is_available() else 'cpu' if self.lang == 'en': model_tag = 'ViT-B/32' if 'model' in kwargs: model_tag = kwargs['model'] print('Loading OpenAI CLIP model {} ...'.format(model_tag)) self.model, self.preprocess = clip.load(model_tag, device=device) print('Label language {} ...'.format(self.lang)) else: model_tag = 'clip-ViT-B-32' print('Loading sentence transformer model {} ...'.format(model_tag)) self.model = SentenceTransformer('clip-ViT-B-32', device=device) self.text_model = SentenceTransformer('sentence-transformers/clip-ViT-B-32-multilingual-v1', device=device) print('Label language {} ...'.format(self.lang)) def available_models(self): """Returns the names of available CLIP models""" return clip.available_models() def available_languages(self): """Returns the codes of available languages""" codes = 'ar, bg, ca, cs, da, de, en, el, es, et, fa, fi, fr, fr-ca, gl, gu, he, hi, hr, hu, \n hy, id, it, ja, ka, ko, ku, lt, lv, mk, mn, mr, ms, my, nb, nl, pl, pt, pt, pt-br, \n ro, ru, sk, sl, sq, sr, sv, th, tr, uk, ur, vi, zh-cn, zh-tw' return set([code.strip() for code in codes.split(',')]) def _load_image(self, image: str) -> 'PIL.Image.Image': """ Loads `image` to a PIL Image. Args: image (`str` ): The image to convert to the PIL Image format. Returns: `PIL.Image.Image`: A PIL Image. """ if isinstance(image, str): if image.startswith('http://') or image.startswith('https://'): image = PIL.Image.open(requests.get(image, stream=True).raw) elif os.path.isfile(image): image = PIL.Image.open(image) else: raise ValueError(f'Incorrect path or url, URLs must start with `http://` or `https://`, and {image} is not a valid path') elif isinstance(image, PIL.Image.Image): image = image else: raise ValueError('Incorrect format used for image. Should be an url linking to an image, a local path, or a PIL image.') image = PIL.ImageOps.exif_transpose(image) image = image.convert('RGB') return image def __call__(self, image: str, candidate_labels: Union[str, List[str]], *args, **kwargs): """ Classify the image using the candidate labels given Args: image (`str`): Fully Qualified path of a local image or URL of image candidate_labels (`str` or `List[str]`): The set of possible class labels to classify each sequence into. Can be a single label, a string of comma-separated labels, or a list of labels. hypothesis_template (`str`, *optional*, defaults to `"A photo of {}."`, if lang is default / `en`): The template used to turn each label into a string. This template must include a {} or similar syntax for the candidate label to be inserted into the template. top_k (`int`, *optional*, defaults to 5): The number of top labels that will be returned by the pipeline. If the provided number is higher than the number of labels available in the model configuration, it will default to the number of labels. Return: A `dict` or a list of `dict`: Each result comes as a dictionary with the following keys: - **image** (`str`) -- The image for which this is the output. - **labels** (`List[str]`) -- The labels sorted by order of likelihood. - **scores** (`List[float]`) -- The probabilities for each of the labels. """ device = 'cuda:0' if torch.cuda.is_available() else 'cpu' if self.lang == 'en': if 'hypothesis_template' in kwargs: hypothesis_template = kwargs['hypothesis_template'] else: hypothesis_template = 'A photo of {}' if isinstance(candidate_labels, str): labels = [hypothesis_template.format(candidate_label) for candidate_label in candidate_labels.split(',')] else: labels = [hypothesis_template.format(candidate_label) for candidate_label in candidate_labels] else: if 'hypothesis_template' in kwargs: hypothesis_template = kwargs['hypothesis_template'] else: hypothesis_template = '{}' if isinstance(candidate_labels, str): labels = [hypothesis_template.format(candidate_label) for candidate_label in candidate_labels.split(',')] else: labels = [hypothesis_template.format(candidate_label) for candidate_label in candidate_labels] if 'top_k' in kwargs: top_k = kwargs['top_k'] else: top_k = len(labels) if str(type(self.model)) == "<class 'clip.model.CLIP'>": img = self.preprocess(self._load_image(image)).unsqueeze(0).to(device) text = clip.tokenize(labels).to(device) image_features = self.model.encode_image(img) text_features = self.model.encode_text(text) else: image_features = torch.tensor(self.model.encode(self._load_image(image))) text_features = torch.tensor(self.text_model.encode(labels)) sim_scores = util.cos_sim(text_features, image_features) out = [] for sim_score in sim_scores: out.append(sim_score.item() * 100) probs = torch.tensor([out]) probs = probs.softmax(dim=-1).cpu().numpy() scores = list(probs.flatten()) sorted_sl = sorted(zip(scores, candidate_labels), key=lambda t: t[0], reverse=True) scores, candidate_labels = zip(*sorted_sl) preds = {} preds['image'] = image preds['scores'] = scores preds['labels'] = candidate_labels return preds
code
129024099/cell_6
[ "text_plain_output_1.png" ]
from sklearn.preprocessing import LabelEncoder, StandardScaler import matplotlib.pyplot as plt import numpy as np import pandas as pd import seaborn as sns import pandas as pd import numpy as np df = pd.read_csv('/kaggle/input/car-price-prediction/CarPrice_Assignment.csv') dfx = df.copy() cat = [] num = [] for n, d in dfx.items(): if d.dtype == 'object': cat.append(n) else: num.append(n) dfx = df.copy() from sklearn.preprocessing import LabelEncoder, StandardScaler le = LabelEncoder() for i in cat: dfx[i] = le.fit_transform(dfx[i]) ss = StandardScaler() for i in num: dfx[i] = ss.fit_transform(dfx[[i]]) import matplotlib.pyplot as plt import seaborn as sns corr = dfx.corr() matrix = np.triu(corr) X = df.drop(['price'], axis=1) y = df[['price']] cat = [] num = [] for n, d in X.items(): if d.dtype == 'object': cat.append(n) else: num.append(n) le = LabelEncoder() for i in cat: X[i] = le.fit_transform(X[i]) ss = StandardScaler() for i in num: X[i] = ss.fit_transform(X[[i]]) for i in cat: X[i] = ss.fit_transform(X[[i]]) X.head()
code
129024099/cell_2
[ "text_plain_output_1.png" ]
from sklearn.preprocessing import LabelEncoder, StandardScaler import matplotlib.pyplot as plt import numpy as np import pandas as pd import seaborn as sns import pandas as pd import numpy as np df = pd.read_csv('/kaggle/input/car-price-prediction/CarPrice_Assignment.csv') dfx = df.copy() cat = [] num = [] for n, d in dfx.items(): if d.dtype == 'object': cat.append(n) else: num.append(n) dfx = df.copy() from sklearn.preprocessing import LabelEncoder, StandardScaler le = LabelEncoder() for i in cat: dfx[i] = le.fit_transform(dfx[i]) ss = StandardScaler() for i in num: dfx[i] = ss.fit_transform(dfx[[i]]) import matplotlib.pyplot as plt import seaborn as sns corr = dfx.corr() matrix = np.triu(corr) plt.figure(figsize=(17, 7)) sns.heatmap(corr, annot=True, mask=matrix, fmt='.2f', cmap='inferno')
code
129024099/cell_1
[ "text_html_output_1.png" ]
import pandas as pd import pandas as pd import numpy as np df = pd.read_csv('/kaggle/input/car-price-prediction/CarPrice_Assignment.csv') df.head()
code
129024099/cell_8
[ "application_vnd.jupyter.stderr_output_2.png", "text_plain_output_1.png" ]
from sklearn.metrics import mean_absolute_error, r2_score, mean_squared_error from sklearn.model_selection import train_test_split, cross_validate from sklearn.preprocessing import LabelEncoder, StandardScaler import matplotlib.pyplot as plt import numpy as np import pandas as pd import pprint import seaborn as sns import pandas as pd import numpy as np df = pd.read_csv('/kaggle/input/car-price-prediction/CarPrice_Assignment.csv') dfx = df.copy() cat = [] num = [] for n, d in dfx.items(): if d.dtype == 'object': cat.append(n) else: num.append(n) dfx = df.copy() from sklearn.preprocessing import LabelEncoder, StandardScaler le = LabelEncoder() for i in cat: dfx[i] = le.fit_transform(dfx[i]) ss = StandardScaler() for i in num: dfx[i] = ss.fit_transform(dfx[[i]]) import matplotlib.pyplot as plt import seaborn as sns corr = dfx.corr() matrix = np.triu(corr) X = df.drop(['price'], axis=1) y = df[['price']] cat = [] num = [] for n, d in X.items(): if d.dtype == 'object': cat.append(n) else: num.append(n) le = LabelEncoder() for i in cat: X[i] = le.fit_transform(X[i]) ss = StandardScaler() for i in num: X[i] = ss.fit_transform(X[[i]]) for i in cat: X[i] = ss.fit_transform(X[[i]]) import pprint from sklearn.model_selection import train_test_split, cross_validate from sklearn.linear_model import LinearRegression from sklearn.metrics import mean_absolute_error, r2_score, mean_squared_error def model_eval(m, X, y): a, d, s, f = train_test_split(X, y) m.fit(a, s) g = m.predict(d) def model_cv(m, X, y): scoring = ['neg_mean_absolute_error', 'neg_root_mean_squared_error', 'r2'] scores = cross_validate(m, X, y, scoring=scoring, cv=4, return_train_score=False) X1 = X.copy() X1['enginesize'].loc[np.random.randint(153, size=30)] = np.nan X1.isnull().sum()
code
129024099/cell_3
[ "text_plain_output_1.png" ]
from sklearn.preprocessing import LabelEncoder, StandardScaler import matplotlib.pyplot as plt import numpy as np import pandas as pd import seaborn as sns import pandas as pd import numpy as np df = pd.read_csv('/kaggle/input/car-price-prediction/CarPrice_Assignment.csv') dfx = df.copy() cat = [] num = [] for n, d in dfx.items(): if d.dtype == 'object': cat.append(n) else: num.append(n) dfx = df.copy() from sklearn.preprocessing import LabelEncoder, StandardScaler le = LabelEncoder() for i in cat: dfx[i] = le.fit_transform(dfx[i]) ss = StandardScaler() for i in num: dfx[i] = ss.fit_transform(dfx[[i]]) import matplotlib.pyplot as plt import seaborn as sns corr = dfx.corr() matrix = np.triu(corr) abs(corr['price']).sort_values(ascending=False)
code
129024099/cell_10
[ "application_vnd.jupyter.stderr_output_1.png", "image_output_1.png" ]
from sklearn.metrics import mean_absolute_error, r2_score, mean_squared_error from sklearn.model_selection import train_test_split, cross_validate from sklearn.preprocessing import LabelEncoder, StandardScaler import matplotlib.pyplot as plt import numpy as np import pandas as pd import pprint import seaborn as sns import pandas as pd import numpy as np df = pd.read_csv('/kaggle/input/car-price-prediction/CarPrice_Assignment.csv') dfx = df.copy() cat = [] num = [] for n, d in dfx.items(): if d.dtype == 'object': cat.append(n) else: num.append(n) dfx = df.copy() from sklearn.preprocessing import LabelEncoder, StandardScaler le = LabelEncoder() for i in cat: dfx[i] = le.fit_transform(dfx[i]) ss = StandardScaler() for i in num: dfx[i] = ss.fit_transform(dfx[[i]]) import matplotlib.pyplot as plt import seaborn as sns corr = dfx.corr() matrix = np.triu(corr) X = df.drop(['price'], axis=1) y = df[['price']] cat = [] num = [] for n, d in X.items(): if d.dtype == 'object': cat.append(n) else: num.append(n) le = LabelEncoder() for i in cat: X[i] = le.fit_transform(X[i]) ss = StandardScaler() for i in num: X[i] = ss.fit_transform(X[[i]]) for i in cat: X[i] = ss.fit_transform(X[[i]]) import pprint from sklearn.model_selection import train_test_split, cross_validate from sklearn.linear_model import LinearRegression from sklearn.metrics import mean_absolute_error, r2_score, mean_squared_error def model_eval(m, X, y): a, d, s, f = train_test_split(X, y) m.fit(a, s) g = m.predict(d) def model_cv(m, X, y): scoring = ['neg_mean_absolute_error', 'neg_root_mean_squared_error', 'r2'] scores = cross_validate(m, X, y, scoring=scoring, cv=4, return_train_score=False) X1 = X.copy() X1['enginesize'].loc[np.random.randint(153, size=30)] = np.nan X1.isnull().sum() es_mean = X1['enginesize'].mean() X11 = X1.copy() X11 = X11.fillna(value=0) X12 = X1.copy() X12 = X12.fillna(value=es_mean) des = ['without null values :', 'null values replaced by 0', 'null values replaced by mean'] j = 0 for i in [X, X11, X12]: print(des[j]) j += 1 model_eval(lr, i, y) print('cv\t:') model_cv(lr, i, y) print()
code
129024099/cell_5
[ "text_html_output_1.png" ]
from sklearn.preprocessing import LabelEncoder, StandardScaler import matplotlib.pyplot as plt import numpy as np import pandas as pd import seaborn as sns import pandas as pd import numpy as np df = pd.read_csv('/kaggle/input/car-price-prediction/CarPrice_Assignment.csv') dfx = df.copy() cat = [] num = [] for n, d in dfx.items(): if d.dtype == 'object': cat.append(n) else: num.append(n) dfx = df.copy() from sklearn.preprocessing import LabelEncoder, StandardScaler le = LabelEncoder() for i in cat: dfx[i] = le.fit_transform(dfx[i]) ss = StandardScaler() for i in num: dfx[i] = ss.fit_transform(dfx[[i]]) import matplotlib.pyplot as plt import seaborn as sns corr = dfx.corr() matrix = np.triu(corr) X = df.drop(['price'], axis=1) y = df[['price']] cat = [] num = [] for n, d in X.items(): if d.dtype == 'object': cat.append(n) else: num.append(n) print(f'categorical columns : {cat}') print(f'numerical columns : {num}')
code
34126027/cell_13
[ "application_vnd.jupyter.stderr_output_1.png" ]
from keras.layers import Flatten, Dense, Input, Dropout, Conv2D, MaxPool2D, BatchNormalization from keras.models import Model from keras.optimizers import Adam from keras.utils import to_categorical, plot_model DROPOUT_RATE = 0.3 CONV_ACTIVATION = 'relu' img_in = Input(shape=(48, 48, 1)) X = Conv2D(64, (3, 3), padding='same', kernel_initializer='he_normal', activation=CONV_ACTIVATION)(img_in) X = BatchNormalization()(X) X = Conv2D(64, (3, 3), padding='same', kernel_initializer='he_normal', activation=CONV_ACTIVATION)(X) X = BatchNormalization()(X) X = MaxPool2D((2, 2), strides=(2, 2), padding='same')(X) X = Dropout(DROPOUT_RATE)(X) X = Conv2D(128, (3, 3), padding='same', kernel_initializer='he_normal', activation=CONV_ACTIVATION)(X) X = BatchNormalization()(X) X = Conv2D(128, (3, 3), padding='same', kernel_initializer='he_normal', activation=CONV_ACTIVATION)(X) X = BatchNormalization()(X) X = Conv2D(128, (3, 3), padding='same', kernel_initializer='he_normal', activation=CONV_ACTIVATION)(X) X = BatchNormalization()(X) X = MaxPool2D((2, 2), strides=(2, 2), padding='same')(X) X = Dropout(DROPOUT_RATE)(X) X = Conv2D(256, (3, 3), padding='same', kernel_initializer='he_normal', activation=CONV_ACTIVATION)(X) X = BatchNormalization()(X) X = Conv2D(256, (3, 3), padding='same', kernel_initializer='he_normal', activation=CONV_ACTIVATION)(X) X = BatchNormalization()(X) X = Conv2D(256, (3, 3), padding='same', kernel_initializer='he_normal', activation=CONV_ACTIVATION)(X) X = BatchNormalization()(X) X = Conv2D(256, (3, 3), padding='same', kernel_initializer='he_normal', activation=CONV_ACTIVATION)(X) X = BatchNormalization()(X) X = MaxPool2D((2, 2), strides=(2, 2), padding='same')(X) X = Dropout(DROPOUT_RATE)(X) X = Conv2D(256, (3, 3), padding='same', kernel_initializer='he_normal', activation=CONV_ACTIVATION)(X) X = BatchNormalization()(X) X = Conv2D(256, (3, 3), padding='same', kernel_initializer='he_normal', activation=CONV_ACTIVATION)(X) X = BatchNormalization()(X) X = Conv2D(256, (3, 3), padding='same', kernel_initializer='he_normal', activation=CONV_ACTIVATION)(X) X = BatchNormalization()(X) X = Conv2D(256, (3, 3), padding='same', kernel_initializer='he_normal', activation=CONV_ACTIVATION)(X) X = BatchNormalization()(X) X = MaxPool2D((2, 2), strides=(2, 2), padding='same')(X) X = Dropout(DROPOUT_RATE)(X) X = Conv2D(512, (3, 3), padding='same', kernel_initializer='he_normal', activation=CONV_ACTIVATION)(X) X = BatchNormalization()(X) X = Conv2D(512, (3, 3), padding='same', kernel_initializer='he_normal', activation=CONV_ACTIVATION)(X) X = BatchNormalization()(X) X = Conv2D(512, (3, 3), padding='same', kernel_initializer='he_normal', activation=CONV_ACTIVATION)(X) X = BatchNormalization()(X) X = Conv2D(512, (3, 3), padding='same', kernel_initializer='he_normal', activation=CONV_ACTIVATION)(X) X = BatchNormalization()(X) X = MaxPool2D((2, 2), strides=(2, 2), padding='same')(X) X = Dropout(DROPOUT_RATE)(X) X = Flatten()(X) X = Dense(2048, activation='relu')(X) X = Dropout(DROPOUT_RATE)(X) X = Dense(1024, activation='relu')(X) X = Dropout(DROPOUT_RATE)(X) X = Dense(512, activation='relu')(X) X = Dropout(DROPOUT_RATE)(X) out = Dense(7, activation='softmax')(X) model = Model(inputs=img_in, outputs=out) model.compile(loss='categorical_crossentropy', optimizer=Adam(0.001), metrics=['categorical_accuracy']) model.summary() plot_model(model, show_shapes=True, show_layer_names=False)
code
34126027/cell_6
[ "text_plain_output_1.png", "image_output_1.png" ]
from keras.models import Model from keras.layers import Flatten, Dense, Input, Dropout, Conv2D, MaxPool2D, BatchNormalization from keras.utils import to_categorical, plot_model from keras.callbacks import EarlyStopping, ModelCheckpoint, ReduceLROnPlateau from keras.optimizers import Adam from keras.preprocessing.image import ImageDataGenerator
code
34126027/cell_19
[ "image_output_2.png", "image_output_1.png" ]
from keras.callbacks import EarlyStopping, ModelCheckpoint, ReduceLROnPlateau from keras.layers import Flatten, Dense, Input, Dropout, Conv2D, MaxPool2D, BatchNormalization from keras.models import Model from keras.optimizers import Adam from keras.preprocessing.image import ImageDataGenerator from keras.utils import to_categorical, plot_model from sklearn.metrics import confusion_matrix, classification_report, accuracy_score, f1_score import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns data_fer = pd.read_csv('../input/fer2013/fer2013.csv') idx_to_emotion_fer = {0: 'Angry', 1: 'Disgust', 2: 'Fear', 3: 'Happy', 4: 'Sad', 5: 'Surprise', 6: 'Neutral'} X_fer_train, y_fer_train = np.rollaxis(data_fer[data_fer.Usage == 'Training'][['pixels', 'emotion']].values, -1) X_fer_train = np.array([np.fromstring(x, dtype='uint8', sep=' ') for x in X_fer_train]).reshape((-1, 48, 48)) y_fer_train = y_fer_train.astype('int8') X_fer_test_public, y_fer_test_public = np.rollaxis(data_fer[data_fer.Usage == 'PublicTest'][['pixels', 'emotion']].values, -1) X_fer_test_public = np.array([np.fromstring(x, dtype='uint8', sep=' ') for x in X_fer_test_public]).reshape((-1, 48, 48)) y_fer_test_public = y_fer_test_public.astype('int8') X_fer_test_private, y_fer_test_private = np.rollaxis(data_fer[data_fer.Usage == 'PrivateTest'][['pixels', 'emotion']].values, -1) X_fer_test_private = np.array([np.fromstring(x, dtype='uint8', sep=' ') for x in X_fer_test_private]).reshape((-1, 48, 48)) y_fer_test_private = y_fer_test_private.astype('int8') BATCH_SIZE = 128 X_train = X_fer_train.reshape((-1, 48, 48, 1)) X_val = X_fer_test_public.reshape((-1, 48, 48, 1)) X_test = X_fer_test_private.reshape((-1, 48, 48, 1)) y_train = to_categorical(y_fer_train, 7) y_val = to_categorical(y_fer_test_public, 7) y_test = to_categorical(y_fer_test_private, 7) train_datagen = ImageDataGenerator(featurewise_center=False, featurewise_std_normalization=False, rotation_range=10, width_shift_range=0.1, height_shift_range=0.1, zoom_range=0.1, horizontal_flip=True) val_datagen = ImageDataGenerator(featurewise_center=False, featurewise_std_normalization=False) train_datagen.fit(X_train) val_datagen.fit(X_train) train_flow = train_datagen.flow(X_train, y_train, batch_size=BATCH_SIZE) val_flow = val_datagen.flow(X_val, y_val, batch_size=BATCH_SIZE, shuffle=False) test_flow = val_datagen.flow(X_test, y_test, batch_size=1, shuffle=False) DROPOUT_RATE = 0.3 CONV_ACTIVATION = 'relu' img_in = Input(shape=(48, 48, 1)) X = Conv2D(64, (3, 3), padding='same', kernel_initializer='he_normal', activation=CONV_ACTIVATION)(img_in) X = BatchNormalization()(X) X = Conv2D(64, (3, 3), padding='same', kernel_initializer='he_normal', activation=CONV_ACTIVATION)(X) X = BatchNormalization()(X) X = MaxPool2D((2, 2), strides=(2, 2), padding='same')(X) X = Dropout(DROPOUT_RATE)(X) X = Conv2D(128, (3, 3), padding='same', kernel_initializer='he_normal', activation=CONV_ACTIVATION)(X) X = BatchNormalization()(X) X = Conv2D(128, (3, 3), padding='same', kernel_initializer='he_normal', activation=CONV_ACTIVATION)(X) X = BatchNormalization()(X) X = Conv2D(128, (3, 3), padding='same', kernel_initializer='he_normal', activation=CONV_ACTIVATION)(X) X = BatchNormalization()(X) X = MaxPool2D((2, 2), strides=(2, 2), padding='same')(X) X = Dropout(DROPOUT_RATE)(X) X = Conv2D(256, (3, 3), padding='same', kernel_initializer='he_normal', activation=CONV_ACTIVATION)(X) X = BatchNormalization()(X) X = Conv2D(256, (3, 3), padding='same', kernel_initializer='he_normal', activation=CONV_ACTIVATION)(X) X = BatchNormalization()(X) X = Conv2D(256, (3, 3), padding='same', kernel_initializer='he_normal', activation=CONV_ACTIVATION)(X) X = BatchNormalization()(X) X = Conv2D(256, (3, 3), padding='same', kernel_initializer='he_normal', activation=CONV_ACTIVATION)(X) X = BatchNormalization()(X) X = MaxPool2D((2, 2), strides=(2, 2), padding='same')(X) X = Dropout(DROPOUT_RATE)(X) X = Conv2D(256, (3, 3), padding='same', kernel_initializer='he_normal', activation=CONV_ACTIVATION)(X) X = BatchNormalization()(X) X = Conv2D(256, (3, 3), padding='same', kernel_initializer='he_normal', activation=CONV_ACTIVATION)(X) X = BatchNormalization()(X) X = Conv2D(256, (3, 3), padding='same', kernel_initializer='he_normal', activation=CONV_ACTIVATION)(X) X = BatchNormalization()(X) X = Conv2D(256, (3, 3), padding='same', kernel_initializer='he_normal', activation=CONV_ACTIVATION)(X) X = BatchNormalization()(X) X = MaxPool2D((2, 2), strides=(2, 2), padding='same')(X) X = Dropout(DROPOUT_RATE)(X) X = Conv2D(512, (3, 3), padding='same', kernel_initializer='he_normal', activation=CONV_ACTIVATION)(X) X = BatchNormalization()(X) X = Conv2D(512, (3, 3), padding='same', kernel_initializer='he_normal', activation=CONV_ACTIVATION)(X) X = BatchNormalization()(X) X = Conv2D(512, (3, 3), padding='same', kernel_initializer='he_normal', activation=CONV_ACTIVATION)(X) X = BatchNormalization()(X) X = Conv2D(512, (3, 3), padding='same', kernel_initializer='he_normal', activation=CONV_ACTIVATION)(X) X = BatchNormalization()(X) X = MaxPool2D((2, 2), strides=(2, 2), padding='same')(X) X = Dropout(DROPOUT_RATE)(X) X = Flatten()(X) X = Dense(2048, activation='relu')(X) X = Dropout(DROPOUT_RATE)(X) X = Dense(1024, activation='relu')(X) X = Dropout(DROPOUT_RATE)(X) X = Dense(512, activation='relu')(X) X = Dropout(DROPOUT_RATE)(X) out = Dense(7, activation='softmax')(X) model = Model(inputs=img_in, outputs=out) model.compile(loss='categorical_crossentropy', optimizer=Adam(0.001), metrics=['categorical_accuracy']) model.summary() early_stopping = EarlyStopping(monitor='val_categorical_accuracy', mode='max', verbose=1, patience=20) checkpoint_loss = ModelCheckpoint('best_loss_weights.h5', verbose=1, monitor='val_loss', save_best_only=True, mode='min') checkpoint_acc = ModelCheckpoint('best_accuracy_weights.h5', verbose=1, monitor='val_categorical_accuracy', save_best_only=True, mode='max') lr_reduce = ReduceLROnPlateau(monitor='val_categorical_accuracy', mode='max', factor=0.5, patience=5, min_lr=1e-07, cooldown=1, verbose=1) history = model.fit_generator(train_flow, steps_per_epoch=X_train.shape[0] // BATCH_SIZE, epochs=150, validation_data=val_flow, validation_steps=X_val.shape[0] // BATCH_SIZE, callbacks=[early_stopping, checkpoint_acc, checkpoint_loss, lr_reduce]) model.load_weights('best_loss_weights.h5') y_pred = model.predict_generator(test_flow, steps=X_test.shape[0]) y_pred_cat = np.argmax(y_pred, axis=1) y_true_cat = np.argmax(test_flow.y, axis=1) report = classification_report(y_true_cat, y_pred_cat) print(report) conf = confusion_matrix(y_true_cat, y_pred_cat, normalize="true") labels = idx_to_emotion_fer.values() _, ax = plt.subplots(figsize=(8, 6)) ax = sns.heatmap(conf, annot=True, cmap='YlGnBu', xticklabels=labels, yticklabels=labels) plt.show() model.load_weights('best_accuracy_weights.h5') y_pred = model.predict_generator(test_flow, steps=X_test.shape[0]) y_pred_cat = np.argmax(y_pred, axis=1) y_true_cat = np.argmax(test_flow.y, axis=1) report = classification_report(y_true_cat, y_pred_cat) print(report) conf = confusion_matrix(y_true_cat, y_pred_cat, normalize='true') labels = idx_to_emotion_fer.values() _, ax = plt.subplots(figsize=(8, 6)) ax = sns.heatmap(conf, annot=True, cmap='YlGnBu', xticklabels=labels, yticklabels=labels) plt.show()
code
34126027/cell_15
[ "image_output_1.png" ]
from keras.callbacks import EarlyStopping, ModelCheckpoint, ReduceLROnPlateau from keras.layers import Flatten, Dense, Input, Dropout, Conv2D, MaxPool2D, BatchNormalization from keras.models import Model from keras.optimizers import Adam from keras.preprocessing.image import ImageDataGenerator from keras.utils import to_categorical, plot_model import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data_fer = pd.read_csv('../input/fer2013/fer2013.csv') X_fer_train, y_fer_train = np.rollaxis(data_fer[data_fer.Usage == 'Training'][['pixels', 'emotion']].values, -1) X_fer_train = np.array([np.fromstring(x, dtype='uint8', sep=' ') for x in X_fer_train]).reshape((-1, 48, 48)) y_fer_train = y_fer_train.astype('int8') X_fer_test_public, y_fer_test_public = np.rollaxis(data_fer[data_fer.Usage == 'PublicTest'][['pixels', 'emotion']].values, -1) X_fer_test_public = np.array([np.fromstring(x, dtype='uint8', sep=' ') for x in X_fer_test_public]).reshape((-1, 48, 48)) y_fer_test_public = y_fer_test_public.astype('int8') X_fer_test_private, y_fer_test_private = np.rollaxis(data_fer[data_fer.Usage == 'PrivateTest'][['pixels', 'emotion']].values, -1) X_fer_test_private = np.array([np.fromstring(x, dtype='uint8', sep=' ') for x in X_fer_test_private]).reshape((-1, 48, 48)) y_fer_test_private = y_fer_test_private.astype('int8') BATCH_SIZE = 128 X_train = X_fer_train.reshape((-1, 48, 48, 1)) X_val = X_fer_test_public.reshape((-1, 48, 48, 1)) X_test = X_fer_test_private.reshape((-1, 48, 48, 1)) y_train = to_categorical(y_fer_train, 7) y_val = to_categorical(y_fer_test_public, 7) y_test = to_categorical(y_fer_test_private, 7) train_datagen = ImageDataGenerator(featurewise_center=False, featurewise_std_normalization=False, rotation_range=10, width_shift_range=0.1, height_shift_range=0.1, zoom_range=0.1, horizontal_flip=True) val_datagen = ImageDataGenerator(featurewise_center=False, featurewise_std_normalization=False) train_datagen.fit(X_train) val_datagen.fit(X_train) train_flow = train_datagen.flow(X_train, y_train, batch_size=BATCH_SIZE) val_flow = val_datagen.flow(X_val, y_val, batch_size=BATCH_SIZE, shuffle=False) test_flow = val_datagen.flow(X_test, y_test, batch_size=1, shuffle=False) DROPOUT_RATE = 0.3 CONV_ACTIVATION = 'relu' img_in = Input(shape=(48, 48, 1)) X = Conv2D(64, (3, 3), padding='same', kernel_initializer='he_normal', activation=CONV_ACTIVATION)(img_in) X = BatchNormalization()(X) X = Conv2D(64, (3, 3), padding='same', kernel_initializer='he_normal', activation=CONV_ACTIVATION)(X) X = BatchNormalization()(X) X = MaxPool2D((2, 2), strides=(2, 2), padding='same')(X) X = Dropout(DROPOUT_RATE)(X) X = Conv2D(128, (3, 3), padding='same', kernel_initializer='he_normal', activation=CONV_ACTIVATION)(X) X = BatchNormalization()(X) X = Conv2D(128, (3, 3), padding='same', kernel_initializer='he_normal', activation=CONV_ACTIVATION)(X) X = BatchNormalization()(X) X = Conv2D(128, (3, 3), padding='same', kernel_initializer='he_normal', activation=CONV_ACTIVATION)(X) X = BatchNormalization()(X) X = MaxPool2D((2, 2), strides=(2, 2), padding='same')(X) X = Dropout(DROPOUT_RATE)(X) X = Conv2D(256, (3, 3), padding='same', kernel_initializer='he_normal', activation=CONV_ACTIVATION)(X) X = BatchNormalization()(X) X = Conv2D(256, (3, 3), padding='same', kernel_initializer='he_normal', activation=CONV_ACTIVATION)(X) X = BatchNormalization()(X) X = Conv2D(256, (3, 3), padding='same', kernel_initializer='he_normal', activation=CONV_ACTIVATION)(X) X = BatchNormalization()(X) X = Conv2D(256, (3, 3), padding='same', kernel_initializer='he_normal', activation=CONV_ACTIVATION)(X) X = BatchNormalization()(X) X = MaxPool2D((2, 2), strides=(2, 2), padding='same')(X) X = Dropout(DROPOUT_RATE)(X) X = Conv2D(256, (3, 3), padding='same', kernel_initializer='he_normal', activation=CONV_ACTIVATION)(X) X = BatchNormalization()(X) X = Conv2D(256, (3, 3), padding='same', kernel_initializer='he_normal', activation=CONV_ACTIVATION)(X) X = BatchNormalization()(X) X = Conv2D(256, (3, 3), padding='same', kernel_initializer='he_normal', activation=CONV_ACTIVATION)(X) X = BatchNormalization()(X) X = Conv2D(256, (3, 3), padding='same', kernel_initializer='he_normal', activation=CONV_ACTIVATION)(X) X = BatchNormalization()(X) X = MaxPool2D((2, 2), strides=(2, 2), padding='same')(X) X = Dropout(DROPOUT_RATE)(X) X = Conv2D(512, (3, 3), padding='same', kernel_initializer='he_normal', activation=CONV_ACTIVATION)(X) X = BatchNormalization()(X) X = Conv2D(512, (3, 3), padding='same', kernel_initializer='he_normal', activation=CONV_ACTIVATION)(X) X = BatchNormalization()(X) X = Conv2D(512, (3, 3), padding='same', kernel_initializer='he_normal', activation=CONV_ACTIVATION)(X) X = BatchNormalization()(X) X = Conv2D(512, (3, 3), padding='same', kernel_initializer='he_normal', activation=CONV_ACTIVATION)(X) X = BatchNormalization()(X) X = MaxPool2D((2, 2), strides=(2, 2), padding='same')(X) X = Dropout(DROPOUT_RATE)(X) X = Flatten()(X) X = Dense(2048, activation='relu')(X) X = Dropout(DROPOUT_RATE)(X) X = Dense(1024, activation='relu')(X) X = Dropout(DROPOUT_RATE)(X) X = Dense(512, activation='relu')(X) X = Dropout(DROPOUT_RATE)(X) out = Dense(7, activation='softmax')(X) model = Model(inputs=img_in, outputs=out) model.compile(loss='categorical_crossentropy', optimizer=Adam(0.001), metrics=['categorical_accuracy']) model.summary() early_stopping = EarlyStopping(monitor='val_categorical_accuracy', mode='max', verbose=1, patience=20) checkpoint_loss = ModelCheckpoint('best_loss_weights.h5', verbose=1, monitor='val_loss', save_best_only=True, mode='min') checkpoint_acc = ModelCheckpoint('best_accuracy_weights.h5', verbose=1, monitor='val_categorical_accuracy', save_best_only=True, mode='max') lr_reduce = ReduceLROnPlateau(monitor='val_categorical_accuracy', mode='max', factor=0.5, patience=5, min_lr=1e-07, cooldown=1, verbose=1) history = model.fit_generator(train_flow, steps_per_epoch=X_train.shape[0] // BATCH_SIZE, epochs=150, validation_data=val_flow, validation_steps=X_val.shape[0] // BATCH_SIZE, callbacks=[early_stopping, checkpoint_acc, checkpoint_loss, lr_reduce]) plt.plot(history.history['categorical_accuracy']) plt.plot(history.history['val_categorical_accuracy']) plt.title('model accuracy') plt.ylabel('accuracy') plt.xlabel('epoch') plt.legend(['train', 'validation'], loc='upper left') plt.show() plt.plot(history.history['loss']) plt.plot(history.history['val_loss']) plt.title('model loss') plt.ylabel('loss') plt.xlabel('epoch') plt.legend(['train', 'validation'], loc='upper left') plt.show()
code
34126027/cell_3
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data_fer = pd.read_csv('../input/fer2013/fer2013.csv') data_fer.head()
code
34126027/cell_17
[ "text_plain_output_5.png", "text_plain_output_4.png", "text_plain_output_3.png", "text_plain_output_2.png", "text_plain_output_1.png" ]
from keras.callbacks import EarlyStopping, ModelCheckpoint, ReduceLROnPlateau from keras.layers import Flatten, Dense, Input, Dropout, Conv2D, MaxPool2D, BatchNormalization from keras.models import Model from keras.optimizers import Adam from keras.preprocessing.image import ImageDataGenerator from keras.utils import to_categorical, plot_model from sklearn.metrics import confusion_matrix, classification_report, accuracy_score, f1_score import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns data_fer = pd.read_csv('../input/fer2013/fer2013.csv') idx_to_emotion_fer = {0: 'Angry', 1: 'Disgust', 2: 'Fear', 3: 'Happy', 4: 'Sad', 5: 'Surprise', 6: 'Neutral'} X_fer_train, y_fer_train = np.rollaxis(data_fer[data_fer.Usage == 'Training'][['pixels', 'emotion']].values, -1) X_fer_train = np.array([np.fromstring(x, dtype='uint8', sep=' ') for x in X_fer_train]).reshape((-1, 48, 48)) y_fer_train = y_fer_train.astype('int8') X_fer_test_public, y_fer_test_public = np.rollaxis(data_fer[data_fer.Usage == 'PublicTest'][['pixels', 'emotion']].values, -1) X_fer_test_public = np.array([np.fromstring(x, dtype='uint8', sep=' ') for x in X_fer_test_public]).reshape((-1, 48, 48)) y_fer_test_public = y_fer_test_public.astype('int8') X_fer_test_private, y_fer_test_private = np.rollaxis(data_fer[data_fer.Usage == 'PrivateTest'][['pixels', 'emotion']].values, -1) X_fer_test_private = np.array([np.fromstring(x, dtype='uint8', sep=' ') for x in X_fer_test_private]).reshape((-1, 48, 48)) y_fer_test_private = y_fer_test_private.astype('int8') BATCH_SIZE = 128 X_train = X_fer_train.reshape((-1, 48, 48, 1)) X_val = X_fer_test_public.reshape((-1, 48, 48, 1)) X_test = X_fer_test_private.reshape((-1, 48, 48, 1)) y_train = to_categorical(y_fer_train, 7) y_val = to_categorical(y_fer_test_public, 7) y_test = to_categorical(y_fer_test_private, 7) train_datagen = ImageDataGenerator(featurewise_center=False, featurewise_std_normalization=False, rotation_range=10, width_shift_range=0.1, height_shift_range=0.1, zoom_range=0.1, horizontal_flip=True) val_datagen = ImageDataGenerator(featurewise_center=False, featurewise_std_normalization=False) train_datagen.fit(X_train) val_datagen.fit(X_train) train_flow = train_datagen.flow(X_train, y_train, batch_size=BATCH_SIZE) val_flow = val_datagen.flow(X_val, y_val, batch_size=BATCH_SIZE, shuffle=False) test_flow = val_datagen.flow(X_test, y_test, batch_size=1, shuffle=False) DROPOUT_RATE = 0.3 CONV_ACTIVATION = 'relu' img_in = Input(shape=(48, 48, 1)) X = Conv2D(64, (3, 3), padding='same', kernel_initializer='he_normal', activation=CONV_ACTIVATION)(img_in) X = BatchNormalization()(X) X = Conv2D(64, (3, 3), padding='same', kernel_initializer='he_normal', activation=CONV_ACTIVATION)(X) X = BatchNormalization()(X) X = MaxPool2D((2, 2), strides=(2, 2), padding='same')(X) X = Dropout(DROPOUT_RATE)(X) X = Conv2D(128, (3, 3), padding='same', kernel_initializer='he_normal', activation=CONV_ACTIVATION)(X) X = BatchNormalization()(X) X = Conv2D(128, (3, 3), padding='same', kernel_initializer='he_normal', activation=CONV_ACTIVATION)(X) X = BatchNormalization()(X) X = Conv2D(128, (3, 3), padding='same', kernel_initializer='he_normal', activation=CONV_ACTIVATION)(X) X = BatchNormalization()(X) X = MaxPool2D((2, 2), strides=(2, 2), padding='same')(X) X = Dropout(DROPOUT_RATE)(X) X = Conv2D(256, (3, 3), padding='same', kernel_initializer='he_normal', activation=CONV_ACTIVATION)(X) X = BatchNormalization()(X) X = Conv2D(256, (3, 3), padding='same', kernel_initializer='he_normal', activation=CONV_ACTIVATION)(X) X = BatchNormalization()(X) X = Conv2D(256, (3, 3), padding='same', kernel_initializer='he_normal', activation=CONV_ACTIVATION)(X) X = BatchNormalization()(X) X = Conv2D(256, (3, 3), padding='same', kernel_initializer='he_normal', activation=CONV_ACTIVATION)(X) X = BatchNormalization()(X) X = MaxPool2D((2, 2), strides=(2, 2), padding='same')(X) X = Dropout(DROPOUT_RATE)(X) X = Conv2D(256, (3, 3), padding='same', kernel_initializer='he_normal', activation=CONV_ACTIVATION)(X) X = BatchNormalization()(X) X = Conv2D(256, (3, 3), padding='same', kernel_initializer='he_normal', activation=CONV_ACTIVATION)(X) X = BatchNormalization()(X) X = Conv2D(256, (3, 3), padding='same', kernel_initializer='he_normal', activation=CONV_ACTIVATION)(X) X = BatchNormalization()(X) X = Conv2D(256, (3, 3), padding='same', kernel_initializer='he_normal', activation=CONV_ACTIVATION)(X) X = BatchNormalization()(X) X = MaxPool2D((2, 2), strides=(2, 2), padding='same')(X) X = Dropout(DROPOUT_RATE)(X) X = Conv2D(512, (3, 3), padding='same', kernel_initializer='he_normal', activation=CONV_ACTIVATION)(X) X = BatchNormalization()(X) X = Conv2D(512, (3, 3), padding='same', kernel_initializer='he_normal', activation=CONV_ACTIVATION)(X) X = BatchNormalization()(X) X = Conv2D(512, (3, 3), padding='same', kernel_initializer='he_normal', activation=CONV_ACTIVATION)(X) X = BatchNormalization()(X) X = Conv2D(512, (3, 3), padding='same', kernel_initializer='he_normal', activation=CONV_ACTIVATION)(X) X = BatchNormalization()(X) X = MaxPool2D((2, 2), strides=(2, 2), padding='same')(X) X = Dropout(DROPOUT_RATE)(X) X = Flatten()(X) X = Dense(2048, activation='relu')(X) X = Dropout(DROPOUT_RATE)(X) X = Dense(1024, activation='relu')(X) X = Dropout(DROPOUT_RATE)(X) X = Dense(512, activation='relu')(X) X = Dropout(DROPOUT_RATE)(X) out = Dense(7, activation='softmax')(X) model = Model(inputs=img_in, outputs=out) model.compile(loss='categorical_crossentropy', optimizer=Adam(0.001), metrics=['categorical_accuracy']) model.summary() early_stopping = EarlyStopping(monitor='val_categorical_accuracy', mode='max', verbose=1, patience=20) checkpoint_loss = ModelCheckpoint('best_loss_weights.h5', verbose=1, monitor='val_loss', save_best_only=True, mode='min') checkpoint_acc = ModelCheckpoint('best_accuracy_weights.h5', verbose=1, monitor='val_categorical_accuracy', save_best_only=True, mode='max') lr_reduce = ReduceLROnPlateau(monitor='val_categorical_accuracy', mode='max', factor=0.5, patience=5, min_lr=1e-07, cooldown=1, verbose=1) history = model.fit_generator(train_flow, steps_per_epoch=X_train.shape[0] // BATCH_SIZE, epochs=150, validation_data=val_flow, validation_steps=X_val.shape[0] // BATCH_SIZE, callbacks=[early_stopping, checkpoint_acc, checkpoint_loss, lr_reduce]) model.load_weights('best_loss_weights.h5') y_pred = model.predict_generator(test_flow, steps=X_test.shape[0]) y_pred_cat = np.argmax(y_pred, axis=1) y_true_cat = np.argmax(test_flow.y, axis=1) report = classification_report(y_true_cat, y_pred_cat) print(report) conf = confusion_matrix(y_true_cat, y_pred_cat, normalize='true') labels = idx_to_emotion_fer.values() _, ax = plt.subplots(figsize=(8, 6)) ax = sns.heatmap(conf, annot=True, cmap='YlGnBu', xticklabels=labels, yticklabels=labels) plt.show()
code
34126027/cell_14
[ "text_plain_output_1.png" ]
from keras.callbacks import EarlyStopping, ModelCheckpoint, ReduceLROnPlateau from keras.layers import Flatten, Dense, Input, Dropout, Conv2D, MaxPool2D, BatchNormalization from keras.models import Model from keras.optimizers import Adam from keras.preprocessing.image import ImageDataGenerator from keras.utils import to_categorical, plot_model import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data_fer = pd.read_csv('../input/fer2013/fer2013.csv') X_fer_train, y_fer_train = np.rollaxis(data_fer[data_fer.Usage == 'Training'][['pixels', 'emotion']].values, -1) X_fer_train = np.array([np.fromstring(x, dtype='uint8', sep=' ') for x in X_fer_train]).reshape((-1, 48, 48)) y_fer_train = y_fer_train.astype('int8') X_fer_test_public, y_fer_test_public = np.rollaxis(data_fer[data_fer.Usage == 'PublicTest'][['pixels', 'emotion']].values, -1) X_fer_test_public = np.array([np.fromstring(x, dtype='uint8', sep=' ') for x in X_fer_test_public]).reshape((-1, 48, 48)) y_fer_test_public = y_fer_test_public.astype('int8') X_fer_test_private, y_fer_test_private = np.rollaxis(data_fer[data_fer.Usage == 'PrivateTest'][['pixels', 'emotion']].values, -1) X_fer_test_private = np.array([np.fromstring(x, dtype='uint8', sep=' ') for x in X_fer_test_private]).reshape((-1, 48, 48)) y_fer_test_private = y_fer_test_private.astype('int8') BATCH_SIZE = 128 X_train = X_fer_train.reshape((-1, 48, 48, 1)) X_val = X_fer_test_public.reshape((-1, 48, 48, 1)) X_test = X_fer_test_private.reshape((-1, 48, 48, 1)) y_train = to_categorical(y_fer_train, 7) y_val = to_categorical(y_fer_test_public, 7) y_test = to_categorical(y_fer_test_private, 7) train_datagen = ImageDataGenerator(featurewise_center=False, featurewise_std_normalization=False, rotation_range=10, width_shift_range=0.1, height_shift_range=0.1, zoom_range=0.1, horizontal_flip=True) val_datagen = ImageDataGenerator(featurewise_center=False, featurewise_std_normalization=False) train_datagen.fit(X_train) val_datagen.fit(X_train) train_flow = train_datagen.flow(X_train, y_train, batch_size=BATCH_SIZE) val_flow = val_datagen.flow(X_val, y_val, batch_size=BATCH_SIZE, shuffle=False) test_flow = val_datagen.flow(X_test, y_test, batch_size=1, shuffle=False) DROPOUT_RATE = 0.3 CONV_ACTIVATION = 'relu' img_in = Input(shape=(48, 48, 1)) X = Conv2D(64, (3, 3), padding='same', kernel_initializer='he_normal', activation=CONV_ACTIVATION)(img_in) X = BatchNormalization()(X) X = Conv2D(64, (3, 3), padding='same', kernel_initializer='he_normal', activation=CONV_ACTIVATION)(X) X = BatchNormalization()(X) X = MaxPool2D((2, 2), strides=(2, 2), padding='same')(X) X = Dropout(DROPOUT_RATE)(X) X = Conv2D(128, (3, 3), padding='same', kernel_initializer='he_normal', activation=CONV_ACTIVATION)(X) X = BatchNormalization()(X) X = Conv2D(128, (3, 3), padding='same', kernel_initializer='he_normal', activation=CONV_ACTIVATION)(X) X = BatchNormalization()(X) X = Conv2D(128, (3, 3), padding='same', kernel_initializer='he_normal', activation=CONV_ACTIVATION)(X) X = BatchNormalization()(X) X = MaxPool2D((2, 2), strides=(2, 2), padding='same')(X) X = Dropout(DROPOUT_RATE)(X) X = Conv2D(256, (3, 3), padding='same', kernel_initializer='he_normal', activation=CONV_ACTIVATION)(X) X = BatchNormalization()(X) X = Conv2D(256, (3, 3), padding='same', kernel_initializer='he_normal', activation=CONV_ACTIVATION)(X) X = BatchNormalization()(X) X = Conv2D(256, (3, 3), padding='same', kernel_initializer='he_normal', activation=CONV_ACTIVATION)(X) X = BatchNormalization()(X) X = Conv2D(256, (3, 3), padding='same', kernel_initializer='he_normal', activation=CONV_ACTIVATION)(X) X = BatchNormalization()(X) X = MaxPool2D((2, 2), strides=(2, 2), padding='same')(X) X = Dropout(DROPOUT_RATE)(X) X = Conv2D(256, (3, 3), padding='same', kernel_initializer='he_normal', activation=CONV_ACTIVATION)(X) X = BatchNormalization()(X) X = Conv2D(256, (3, 3), padding='same', kernel_initializer='he_normal', activation=CONV_ACTIVATION)(X) X = BatchNormalization()(X) X = Conv2D(256, (3, 3), padding='same', kernel_initializer='he_normal', activation=CONV_ACTIVATION)(X) X = BatchNormalization()(X) X = Conv2D(256, (3, 3), padding='same', kernel_initializer='he_normal', activation=CONV_ACTIVATION)(X) X = BatchNormalization()(X) X = MaxPool2D((2, 2), strides=(2, 2), padding='same')(X) X = Dropout(DROPOUT_RATE)(X) X = Conv2D(512, (3, 3), padding='same', kernel_initializer='he_normal', activation=CONV_ACTIVATION)(X) X = BatchNormalization()(X) X = Conv2D(512, (3, 3), padding='same', kernel_initializer='he_normal', activation=CONV_ACTIVATION)(X) X = BatchNormalization()(X) X = Conv2D(512, (3, 3), padding='same', kernel_initializer='he_normal', activation=CONV_ACTIVATION)(X) X = BatchNormalization()(X) X = Conv2D(512, (3, 3), padding='same', kernel_initializer='he_normal', activation=CONV_ACTIVATION)(X) X = BatchNormalization()(X) X = MaxPool2D((2, 2), strides=(2, 2), padding='same')(X) X = Dropout(DROPOUT_RATE)(X) X = Flatten()(X) X = Dense(2048, activation='relu')(X) X = Dropout(DROPOUT_RATE)(X) X = Dense(1024, activation='relu')(X) X = Dropout(DROPOUT_RATE)(X) X = Dense(512, activation='relu')(X) X = Dropout(DROPOUT_RATE)(X) out = Dense(7, activation='softmax')(X) model = Model(inputs=img_in, outputs=out) model.compile(loss='categorical_crossentropy', optimizer=Adam(0.001), metrics=['categorical_accuracy']) model.summary() early_stopping = EarlyStopping(monitor='val_categorical_accuracy', mode='max', verbose=1, patience=20) checkpoint_loss = ModelCheckpoint('best_loss_weights.h5', verbose=1, monitor='val_loss', save_best_only=True, mode='min') checkpoint_acc = ModelCheckpoint('best_accuracy_weights.h5', verbose=1, monitor='val_categorical_accuracy', save_best_only=True, mode='max') lr_reduce = ReduceLROnPlateau(monitor='val_categorical_accuracy', mode='max', factor=0.5, patience=5, min_lr=1e-07, cooldown=1, verbose=1) history = model.fit_generator(train_flow, steps_per_epoch=X_train.shape[0] // BATCH_SIZE, epochs=150, validation_data=val_flow, validation_steps=X_val.shape[0] // BATCH_SIZE, callbacks=[early_stopping, checkpoint_acc, checkpoint_loss, lr_reduce])
code
34126027/cell_12
[ "text_html_output_1.png" ]
from keras.layers import Flatten, Dense, Input, Dropout, Conv2D, MaxPool2D, BatchNormalization from keras.models import Model from keras.optimizers import Adam DROPOUT_RATE = 0.3 CONV_ACTIVATION = 'relu' img_in = Input(shape=(48, 48, 1)) X = Conv2D(64, (3, 3), padding='same', kernel_initializer='he_normal', activation=CONV_ACTIVATION)(img_in) X = BatchNormalization()(X) X = Conv2D(64, (3, 3), padding='same', kernel_initializer='he_normal', activation=CONV_ACTIVATION)(X) X = BatchNormalization()(X) X = MaxPool2D((2, 2), strides=(2, 2), padding='same')(X) X = Dropout(DROPOUT_RATE)(X) X = Conv2D(128, (3, 3), padding='same', kernel_initializer='he_normal', activation=CONV_ACTIVATION)(X) X = BatchNormalization()(X) X = Conv2D(128, (3, 3), padding='same', kernel_initializer='he_normal', activation=CONV_ACTIVATION)(X) X = BatchNormalization()(X) X = Conv2D(128, (3, 3), padding='same', kernel_initializer='he_normal', activation=CONV_ACTIVATION)(X) X = BatchNormalization()(X) X = MaxPool2D((2, 2), strides=(2, 2), padding='same')(X) X = Dropout(DROPOUT_RATE)(X) X = Conv2D(256, (3, 3), padding='same', kernel_initializer='he_normal', activation=CONV_ACTIVATION)(X) X = BatchNormalization()(X) X = Conv2D(256, (3, 3), padding='same', kernel_initializer='he_normal', activation=CONV_ACTIVATION)(X) X = BatchNormalization()(X) X = Conv2D(256, (3, 3), padding='same', kernel_initializer='he_normal', activation=CONV_ACTIVATION)(X) X = BatchNormalization()(X) X = Conv2D(256, (3, 3), padding='same', kernel_initializer='he_normal', activation=CONV_ACTIVATION)(X) X = BatchNormalization()(X) X = MaxPool2D((2, 2), strides=(2, 2), padding='same')(X) X = Dropout(DROPOUT_RATE)(X) X = Conv2D(256, (3, 3), padding='same', kernel_initializer='he_normal', activation=CONV_ACTIVATION)(X) X = BatchNormalization()(X) X = Conv2D(256, (3, 3), padding='same', kernel_initializer='he_normal', activation=CONV_ACTIVATION)(X) X = BatchNormalization()(X) X = Conv2D(256, (3, 3), padding='same', kernel_initializer='he_normal', activation=CONV_ACTIVATION)(X) X = BatchNormalization()(X) X = Conv2D(256, (3, 3), padding='same', kernel_initializer='he_normal', activation=CONV_ACTIVATION)(X) X = BatchNormalization()(X) X = MaxPool2D((2, 2), strides=(2, 2), padding='same')(X) X = Dropout(DROPOUT_RATE)(X) X = Conv2D(512, (3, 3), padding='same', kernel_initializer='he_normal', activation=CONV_ACTIVATION)(X) X = BatchNormalization()(X) X = Conv2D(512, (3, 3), padding='same', kernel_initializer='he_normal', activation=CONV_ACTIVATION)(X) X = BatchNormalization()(X) X = Conv2D(512, (3, 3), padding='same', kernel_initializer='he_normal', activation=CONV_ACTIVATION)(X) X = BatchNormalization()(X) X = Conv2D(512, (3, 3), padding='same', kernel_initializer='he_normal', activation=CONV_ACTIVATION)(X) X = BatchNormalization()(X) X = MaxPool2D((2, 2), strides=(2, 2), padding='same')(X) X = Dropout(DROPOUT_RATE)(X) X = Flatten()(X) X = Dense(2048, activation='relu')(X) X = Dropout(DROPOUT_RATE)(X) X = Dense(1024, activation='relu')(X) X = Dropout(DROPOUT_RATE)(X) X = Dense(512, activation='relu')(X) X = Dropout(DROPOUT_RATE)(X) out = Dense(7, activation='softmax')(X) model = Model(inputs=img_in, outputs=out) model.compile(loss='categorical_crossentropy', optimizer=Adam(0.001), metrics=['categorical_accuracy']) model.summary()
code
2025290/cell_42
[ "application_vnd.jupyter.stderr_output_1.png" ]
from sklearn.linear_model import LinearRegression from sklearn.preprocessing import LabelEncoder from sklearn.preprocessing import LabelEncoder import numpy as np import pandas as pd houseprice = pd.read_csv('C:/Users/hp/Desktop/KAGGLE/Ames Houses DATA/train.csv') houseprice.isnull().sum() housedfnum = houseprice.select_dtypes(include=[np.number]) housedfcat = houseprice.select_dtypes(include=[object]) le = LabelEncoder() housedfnum['MSSubClass'] = le.fit_transform(housedfnum['MSSubClass'].values) housedfnum['OverallQual'] = le.fit_transform(housedfnum['OverallQual'].values) housedfnum['OverallCond'] = le.fit_transform(housedfnum['OverallCond'].values) housedfnum['YearBuilt'] = le.fit_transform(housedfnum['YearBuilt'].values) housedfnum['YearRemodAdd'] = le.fit_transform(housedfnum['YearRemodAdd'].values) housedfnum['YrSold'] = le.fit_transform(housedfnum['YrSold'].values) housedfnum['GarageYrBlt'] = le.fit_transform(housedfnum['GarageYrBlt'].values) housedfcat1 = housedfcat.drop(['Alley', 'FireplaceQu', 'PoolQC', 'Fence', 'MiscFeature'], axis=1) housedfcat2 = housedfcat1.apply(le.fit_transform) housefinal = pd.concat([housedfnum, housedfcat2], axis=1) housefinal.shape LiR = LinearRegression() y = housefinal['SalePrice'] X = housefinal.drop(['Id', 'SalePrice'], axis=1) LiR.fit(X, y) LiR.score(X, y) predictedprice = LiR.predict(X) priceresidual = housefinal.SalePrice - predictedprice np.sqrt(np.mean(priceresidual ** 2)) housetest = pd.read_csv('C:/Users/hp/Desktop/KAGGLE/Ames Houses DATA/test.csv') housetest.isnull().sum() housetestnum = housetest.select_dtypes(include=[np.number]) housetestcat = housetest.select_dtypes(include=[object]) housetestnum.isnull().sum() housetestnum.isnull().sum() housetestcat.isnull().sum() le = LabelEncoder() housetestnum['MSSubClass'] = le.fit_transform(housetestnum['MSSubClass'].values) housetestnum['OverallQual'] = le.fit_transform(housetestnum['OverallQual'].values) housetestnum['OverallCond'] = le.fit_transform(housetestnum['OverallCond'].values) housetestnum['YearBuilt'] = le.fit_transform(housetestnum['YearBuilt'].values) housetestnum['YearRemodAdd'] = le.fit_transform(housetestnum['YearRemodAdd'].values) housetestnum['YrSold'] = le.fit_transform(housetestnum['YrSold'].values) housetestnum['GarageYrBlt'] = le.fit_transform(housetestnum['GarageYrBlt'].values) housetestcat1 = housetestcat.drop(['Alley', 'FireplaceQu', 'PoolQC', 'Fence', 'MiscFeature'], axis=1) housetestcat1.isnull().sum() housetestcat1['MSZoning'] = le.fit_transform(housetestcat1['MSZoning'].astype(str))
code
2025290/cell_21
[ "application_vnd.jupyter.stderr_output_1.png" ]
from sklearn.linear_model import LinearRegression from sklearn.model_selection import cross_val_score from sklearn.preprocessing import LabelEncoder import numpy as np import pandas as pd houseprice = pd.read_csv('C:/Users/hp/Desktop/KAGGLE/Ames Houses DATA/train.csv') houseprice.isnull().sum() housedfnum = houseprice.select_dtypes(include=[np.number]) housedfcat = houseprice.select_dtypes(include=[object]) le = LabelEncoder() housedfnum['MSSubClass'] = le.fit_transform(housedfnum['MSSubClass'].values) housedfnum['OverallQual'] = le.fit_transform(housedfnum['OverallQual'].values) housedfnum['OverallCond'] = le.fit_transform(housedfnum['OverallCond'].values) housedfnum['YearBuilt'] = le.fit_transform(housedfnum['YearBuilt'].values) housedfnum['YearRemodAdd'] = le.fit_transform(housedfnum['YearRemodAdd'].values) housedfnum['YrSold'] = le.fit_transform(housedfnum['YrSold'].values) housedfnum['GarageYrBlt'] = le.fit_transform(housedfnum['GarageYrBlt'].values) housedfcat1 = housedfcat.drop(['Alley', 'FireplaceQu', 'PoolQC', 'Fence', 'MiscFeature'], axis=1) housedfcat2 = housedfcat1.apply(le.fit_transform) housefinal = pd.concat([housedfnum, housedfcat2], axis=1) housefinal.shape LiR = LinearRegression() y = housefinal['SalePrice'] X = housefinal.drop(['Id', 'SalePrice'], axis=1) LiR.fit(X, y) LiR.score(X, y) lircross = cross_val_score(LiR, X, y, cv=10)
code
2025290/cell_13
[ "application_vnd.jupyter.stderr_output_1.png" ]
from sklearn.preprocessing import LabelEncoder import numpy as np import pandas as pd houseprice = pd.read_csv('C:/Users/hp/Desktop/KAGGLE/Ames Houses DATA/train.csv') houseprice.isnull().sum() housedfnum = houseprice.select_dtypes(include=[np.number]) housedfcat = houseprice.select_dtypes(include=[object]) le = LabelEncoder() housedfnum['MSSubClass'] = le.fit_transform(housedfnum['MSSubClass'].values) housedfnum['OverallQual'] = le.fit_transform(housedfnum['OverallQual'].values) housedfnum['OverallCond'] = le.fit_transform(housedfnum['OverallCond'].values) housedfnum['YearBuilt'] = le.fit_transform(housedfnum['YearBuilt'].values) housedfnum['YearRemodAdd'] = le.fit_transform(housedfnum['YearRemodAdd'].values) housedfnum['YrSold'] = le.fit_transform(housedfnum['YrSold'].values) housedfnum['GarageYrBlt'] = le.fit_transform(housedfnum['GarageYrBlt'].values) housedfcat1 = housedfcat.drop(['Alley', 'FireplaceQu', 'PoolQC', 'Fence', 'MiscFeature'], axis=1) housedfcat2 = housedfcat1.apply(le.fit_transform) housefinal = pd.concat([housedfnum, housedfcat2], axis=1)
code
2025290/cell_9
[ "application_vnd.jupyter.stderr_output_1.png" ]
from sklearn.preprocessing import LabelEncoder import numpy as np import pandas as pd houseprice = pd.read_csv('C:/Users/hp/Desktop/KAGGLE/Ames Houses DATA/train.csv') houseprice.isnull().sum() housedfnum = houseprice.select_dtypes(include=[np.number]) le = LabelEncoder() housedfnum['MSSubClass'] = le.fit_transform(housedfnum['MSSubClass'].values) housedfnum['OverallQual'] = le.fit_transform(housedfnum['OverallQual'].values) housedfnum['OverallCond'] = le.fit_transform(housedfnum['OverallCond'].values) housedfnum['YearBuilt'] = le.fit_transform(housedfnum['YearBuilt'].values) housedfnum['YearRemodAdd'] = le.fit_transform(housedfnum['YearRemodAdd'].values) housedfnum['YrSold'] = le.fit_transform(housedfnum['YrSold'].values) housedfnum['GarageYrBlt'] = le.fit_transform(housedfnum['GarageYrBlt'].values)
code
2025290/cell_25
[ "application_vnd.jupyter.stderr_output_1.png" ]
from sklearn.linear_model import LinearRegression from sklearn.preprocessing import LabelEncoder import numpy as np import pandas as pd houseprice = pd.read_csv('C:/Users/hp/Desktop/KAGGLE/Ames Houses DATA/train.csv') houseprice.isnull().sum() housedfnum = houseprice.select_dtypes(include=[np.number]) housedfcat = houseprice.select_dtypes(include=[object]) le = LabelEncoder() housedfnum['MSSubClass'] = le.fit_transform(housedfnum['MSSubClass'].values) housedfnum['OverallQual'] = le.fit_transform(housedfnum['OverallQual'].values) housedfnum['OverallCond'] = le.fit_transform(housedfnum['OverallCond'].values) housedfnum['YearBuilt'] = le.fit_transform(housedfnum['YearBuilt'].values) housedfnum['YearRemodAdd'] = le.fit_transform(housedfnum['YearRemodAdd'].values) housedfnum['YrSold'] = le.fit_transform(housedfnum['YrSold'].values) housedfnum['GarageYrBlt'] = le.fit_transform(housedfnum['GarageYrBlt'].values) housedfcat1 = housedfcat.drop(['Alley', 'FireplaceQu', 'PoolQC', 'Fence', 'MiscFeature'], axis=1) housedfcat2 = housedfcat1.apply(le.fit_transform) housefinal = pd.concat([housedfnum, housedfcat2], axis=1) housefinal.shape LiR = LinearRegression() y = housefinal['SalePrice'] X = housefinal.drop(['Id', 'SalePrice'], axis=1) LiR.fit(X, y) LiR.score(X, y) predictedprice = LiR.predict(X) predictedprice
code
2025290/cell_4
[ "application_vnd.jupyter.stderr_output_1.png" ]
import numpy as np import pandas as pd houseprice = pd.read_csv('C:/Users/hp/Desktop/KAGGLE/Ames Houses DATA/train.csv') houseprice.isnull().sum() housedfnum = houseprice.select_dtypes(include=[np.number])
code
2025290/cell_34
[ "application_vnd.jupyter.stderr_output_1.png" ]
from sklearn.linear_model import LinearRegression from sklearn.preprocessing import LabelEncoder import numpy as np import pandas as pd houseprice = pd.read_csv('C:/Users/hp/Desktop/KAGGLE/Ames Houses DATA/train.csv') houseprice.isnull().sum() housedfnum = houseprice.select_dtypes(include=[np.number]) housedfcat = houseprice.select_dtypes(include=[object]) le = LabelEncoder() housedfnum['MSSubClass'] = le.fit_transform(housedfnum['MSSubClass'].values) housedfnum['OverallQual'] = le.fit_transform(housedfnum['OverallQual'].values) housedfnum['OverallCond'] = le.fit_transform(housedfnum['OverallCond'].values) housedfnum['YearBuilt'] = le.fit_transform(housedfnum['YearBuilt'].values) housedfnum['YearRemodAdd'] = le.fit_transform(housedfnum['YearRemodAdd'].values) housedfnum['YrSold'] = le.fit_transform(housedfnum['YrSold'].values) housedfnum['GarageYrBlt'] = le.fit_transform(housedfnum['GarageYrBlt'].values) housedfcat1 = housedfcat.drop(['Alley', 'FireplaceQu', 'PoolQC', 'Fence', 'MiscFeature'], axis=1) housedfcat2 = housedfcat1.apply(le.fit_transform) housefinal = pd.concat([housedfnum, housedfcat2], axis=1) housefinal.shape LiR = LinearRegression() y = housefinal['SalePrice'] X = housefinal.drop(['Id', 'SalePrice'], axis=1) LiR.fit(X, y) LiR.score(X, y) predictedprice = LiR.predict(X) priceresidual = housefinal.SalePrice - predictedprice np.sqrt(np.mean(priceresidual ** 2)) housetest = pd.read_csv('C:/Users/hp/Desktop/KAGGLE/Ames Houses DATA/test.csv') housetest.isnull().sum() housetestnum = housetest.select_dtypes(include=[np.number]) housetestnum.isnull().sum() housetestnum.isnull().sum()
code