path
stringlengths
13
17
screenshot_names
sequencelengths
1
873
code
stringlengths
0
40.4k
cell_type
stringclasses
1 value
90153288/cell_20
[ "text_html_output_1.png" ]
from sklearn import linear_model import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns df = pd.read_csv('../input/prediction-of-music-genre/music_genre.csv') import seaborn as sns import matplotlib.pyplot as plt df_mr = df.drop(columns=[i for i in df.columns if i not in ['popularity', 'tempo', 'instrumentalness']]) df_mr.tempo = df_mr.tempo.apply(lambda row: str(row).replace('?', '0') if row == '?' else str(row)) df_mr.tempo = df_mr.tempo.apply(pd.to_numeric, errors='coerce') df_mr = df_mr.dropna() x = df_mr[['instrumentalness', 'tempo']] y = df_mr['popularity'] from sklearn import linear_model regr = linear_model.LinearRegression() regr.fit(x, y)
code
90153288/cell_1
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
90153288/cell_7
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/prediction-of-music-genre/music_genre.csv') for col in df.columns: print(f'{col}:', df.dtypes[col])
code
90153288/cell_32
[ "text_plain_output_1.png", "image_output_1.png" ]
from sklearn import linear_model import matplotlib.pyplot as plt import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns df = pd.read_csv('../input/prediction-of-music-genre/music_genre.csv') import seaborn as sns import matplotlib.pyplot as plt df_mr = df.drop(columns=[i for i in df.columns if i not in ['popularity', 'tempo', 'instrumentalness']]) df_mr.tempo = df_mr.tempo.apply(lambda row: str(row).replace('?', '0') if row == '?' else str(row)) df_mr.tempo = df_mr.tempo.apply(pd.to_numeric, errors='coerce') df_mr = df_mr.dropna() x = df_mr[['instrumentalness', 'tempo']] y = df_mr['popularity'] from sklearn import linear_model regr = linear_model.LinearRegression() regr.fit(x, y) def fit(x, a): f = a[2] * x * x + a[1] * x + a[0] return f def grad(x, a): g = 2 * a[2] * x + a[1] return g x = df_mr[['tempo', 'instrumentalness']] y = df_mr['popularity'] f = fit(x, y) x = df_mr[['tempo']] y = df_mr['popularity'] def find_theta(X, y): m = X.shape[0] X = np.append(X, np.ones((m,1)), axis=1) theta = np.dot(np.linalg.inv(np.dot(X.T, X)), np.dot(X.T, y)) return theta def predict(X): X = np.append(X, np.ones((X.shape[0],1)), axis=1) preds = np.dot(X, theta) return preds theta = find_theta(x, y) print(theta) preds = predict(x) fig = plt.figure(figsize=(8,6)) plt.plot(x, y, 'y.') plt.plot(x, preds, 'c-') plt.xlabel('Input') plt.ylabel('target') x = df_mr[['tempo', 'instrumentalness']] y = df_mr['popularity'] regr2 = linear_model.LinearRegression() regr2.fit(x.values, y) regr2.predict([[2, 0.555]])
code
90153288/cell_28
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns df = pd.read_csv('../input/prediction-of-music-genre/music_genre.csv') import seaborn as sns import matplotlib.pyplot as plt df_mr = df.drop(columns=[i for i in df.columns if i not in ['popularity', 'tempo', 'instrumentalness']]) df_mr.tempo = df_mr.tempo.apply(lambda row: str(row).replace('?', '0') if row == '?' else str(row)) df_mr.tempo = df_mr.tempo.apply(pd.to_numeric, errors='coerce') df_mr = df_mr.dropna() x = df_mr[['instrumentalness', 'tempo']] y = df_mr['popularity'] def fit(x, a): f = a[2] * x * x + a[1] * x + a[0] return f def grad(x, a): g = 2 * a[2] * x + a[1] return g x = df_mr[['tempo', 'instrumentalness']] y = df_mr['popularity'] f = fit(x, y) plt.scatter(x, f) plt.plot(x, f) plt.xlabel('X') plt.ylabel('f(X)')
code
90153288/cell_16
[ "text_plain_output_1.png", "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns df = pd.read_csv('../input/prediction-of-music-genre/music_genre.csv') import seaborn as sns import matplotlib.pyplot as plt df_mr = df.drop(columns=[i for i in df.columns if i not in ['popularity', 'tempo', 'instrumentalness']]) df_mr.head(100)
code
90153288/cell_17
[ "text_plain_output_1.png", "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns df = pd.read_csv('../input/prediction-of-music-genre/music_genre.csv') import seaborn as sns import matplotlib.pyplot as plt df_mr = df.drop(columns=[i for i in df.columns if i not in ['popularity', 'tempo', 'instrumentalness']]) df_mr.tempo = df_mr.tempo.apply(lambda row: str(row).replace('?', '0') if row == '?' else str(row)) df_mr.tempo = df_mr.tempo.apply(pd.to_numeric, errors='coerce') df_mr = df_mr.dropna() print(df_mr.dtypes.tempo) df_mr.head(100)
code
90153288/cell_10
[ "text_html_output_1.png", "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns df = pd.read_csv('../input/prediction-of-music-genre/music_genre.csv') import seaborn as sns import matplotlib.pyplot as plt plt.figure(figsize=[10, 5]) sns.barplot(x=df.music_genre, y=df.popularity)
code
90153288/cell_12
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns df = pd.read_csv('../input/prediction-of-music-genre/music_genre.csv') import seaborn as sns import matplotlib.pyplot as plt sns.lmplot(x='speechiness', y='popularity', data=df, ci=None, scatter=False)
code
90153288/cell_5
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/prediction-of-music-genre/music_genre.csv') print(df.shape) df.head()
code
130009806/cell_21
[ "text_html_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import plotly.express as px import plotly.graph_objects as go ci = pd.read_csv('../input/farmers-markets-in-the-united-states/wiki_county_info.csv') fm = pd.read_csv('../input/farmers-markets-in-the-united-states/farmers_markets_from_usda.csv') import plotly.express as px # create a DataFrame with the count of farmers markets by state state_farms = pd.DataFrame(fm['State'].value_counts()) # rename the column 'State' to 'Number of farmers markets' state_farms.rename(columns={'State':'Number of farmers markets'}, inplace = True) # create a bar chart fig = px.bar( state_farms, x=state_farms.index, y='Number of farmers markets', color='Number of farmers markets', # Assign colors based on the number of farmers markets color_continuous_scale='Blues', # Set the color scale title='Number of Farmers Markets by State' ) # change visual style fig.update_layout( xaxis_title='State', yaxis_title='Number of Farmers Markets', plot_bgcolor='white', # Set the background color of the plot showlegend=False # Hide the legend ) # show the bar chart fig.show() import plotly.graph_objects as go fig = go.Figure(data=go.Scattergeo(lon=fm['x'], lat=fm['y'], mode='markers', marker=dict(size=1, opacity=1, reversescale=True, autocolorscale=False, symbol=0, line=dict(width=1, color='rgba(102, 102, 102)'), colorscale='icefire', cmin=0))) fig.update_layout(title='US farmers markets', geo_scope='usa') categories = ['Coffee', 'Beans', 'Fruits', 'Grains', 'Juices', 'Mushrooms', 'PetFood', 'Tofu', 'WildHarvested'] category_counts = fm[categories].eq('Y').sum() df_category_totals = pd.DataFrame({'Category': category_counts.index, 'Count': category_counts.values}) df_category_totals = df_category_totals.sort_values('Count', ascending=False) fig = px.bar(df_category_totals, x='Category', y='Count', color='Category', title='Frequency of Categories in Markets', labels={'Category': 'Category', 'Count': 'Count'}, color_discrete_sequence=px.colors.qualitative.Safe) fig.show()
code
130009806/cell_13
[ "text_html_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import plotly.express as px ci = pd.read_csv('../input/farmers-markets-in-the-united-states/wiki_county_info.csv') fm = pd.read_csv('../input/farmers-markets-in-the-united-states/farmers_markets_from_usda.csv') import plotly.express as px state_farms = pd.DataFrame(fm['State'].value_counts()) state_farms.rename(columns={'State': 'Number of farmers markets'}, inplace=True) fig = px.bar(state_farms, x=state_farms.index, y='Number of farmers markets', color='Number of farmers markets', color_continuous_scale='Blues', title='Number of Farmers Markets by State') fig.update_layout(xaxis_title='State', yaxis_title='Number of Farmers Markets', plot_bgcolor='white', showlegend=False) fig.show()
code
130009806/cell_30
[ "text_html_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import plotly.express as px import plotly.graph_objects as go ci = pd.read_csv('../input/farmers-markets-in-the-united-states/wiki_county_info.csv') fm = pd.read_csv('../input/farmers-markets-in-the-united-states/farmers_markets_from_usda.csv') import plotly.express as px # create a DataFrame with the count of farmers markets by state state_farms = pd.DataFrame(fm['State'].value_counts()) # rename the column 'State' to 'Number of farmers markets' state_farms.rename(columns={'State':'Number of farmers markets'}, inplace = True) # create a bar chart fig = px.bar( state_farms, x=state_farms.index, y='Number of farmers markets', color='Number of farmers markets', # Assign colors based on the number of farmers markets color_continuous_scale='Blues', # Set the color scale title='Number of Farmers Markets by State' ) # change visual style fig.update_layout( xaxis_title='State', yaxis_title='Number of Farmers Markets', plot_bgcolor='white', # Set the background color of the plot showlegend=False # Hide the legend ) # show the bar chart fig.show() import plotly.graph_objects as go fig = go.Figure(data=go.Scattergeo(lon=fm['x'], lat=fm['y'], mode='markers', marker=dict(size=1, opacity=1, reversescale=True, autocolorscale=False, symbol=0, line=dict(width=1, color='rgba(102, 102, 102)'), colorscale='icefire', cmin=0))) fig.update_layout(title='US farmers markets', geo_scope='usa') # create a collection of strings categories = ['Coffee', 'Beans', 'Fruits', 'Grains', 'Juices', 'Mushrooms', 'PetFood', 'Tofu', 'WildHarvested'] # count the occurrences of "Y"(yes) for each category to see how many markets have those categories category_counts = fm[categories].eq('Y').sum() # convert the category totals to a DataFrame for plotting df_category_totals = pd.DataFrame({'Category': category_counts.index, 'Count': category_counts.values}) # sort the DataFrame by count in descending order df_category_totals = df_category_totals.sort_values('Count', ascending=False) # plot the bar chart fig = px.bar(df_category_totals, x='Category', y='Count', color='Category', title='Frequency of Categories in Markets', labels={'Category': 'Category', 'Count': 'Count'}, color_discrete_sequence=px.colors.qualitative.Safe) fig.show() def convert_dollar_number(data): if type(data) == float: return data elif type(data) == str and '$' in data: return int(''.join(data.split('$')[1].split(','))) elif type(data) == str and '$' not in data: return int(''.join(data.split(','))) for i in range(3, 8): ci[ci.columns[i]] = ci[ci.columns[i]].apply(convert_dollar_number) state_level = ci[['State', 'median household income', 'population', 'number of households']].groupby('State').agg({'median household income': 'mean', 'population': 'mean', 'number of households': 'mean'}) markets = pd.concat([state_level, no_markets_per_state], axis=1) markets.head()
code
130009806/cell_26
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) ci = pd.read_csv('../input/farmers-markets-in-the-united-states/wiki_county_info.csv') fm = pd.read_csv('../input/farmers-markets-in-the-united-states/farmers_markets_from_usda.csv') def convert_dollar_number(data): if type(data) == float: return data elif type(data) == str and '$' in data: return int(''.join(data.split('$')[1].split(','))) elif type(data) == str and '$' not in data: return int(''.join(data.split(','))) for i in range(3, 8): ci[ci.columns[i]] = ci[ci.columns[i]].apply(convert_dollar_number) print('\n{:^30}'.format('Change Data Format')) print('*' * 120)
code
130009806/cell_11
[ "text_html_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) ci = pd.read_csv('../input/farmers-markets-in-the-united-states/wiki_county_info.csv') fm = pd.read_csv('../input/farmers-markets-in-the-united-states/farmers_markets_from_usda.csv') fm.head()
code
130009806/cell_1
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
130009806/cell_32
[ "text_html_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import plotly.express as px import plotly.graph_objects as go ci = pd.read_csv('../input/farmers-markets-in-the-united-states/wiki_county_info.csv') fm = pd.read_csv('../input/farmers-markets-in-the-united-states/farmers_markets_from_usda.csv') import plotly.express as px # create a DataFrame with the count of farmers markets by state state_farms = pd.DataFrame(fm['State'].value_counts()) # rename the column 'State' to 'Number of farmers markets' state_farms.rename(columns={'State':'Number of farmers markets'}, inplace = True) # create a bar chart fig = px.bar( state_farms, x=state_farms.index, y='Number of farmers markets', color='Number of farmers markets', # Assign colors based on the number of farmers markets color_continuous_scale='Blues', # Set the color scale title='Number of Farmers Markets by State' ) # change visual style fig.update_layout( xaxis_title='State', yaxis_title='Number of Farmers Markets', plot_bgcolor='white', # Set the background color of the plot showlegend=False # Hide the legend ) # show the bar chart fig.show() import plotly.graph_objects as go fig = go.Figure(data=go.Scattergeo(lon=fm['x'], lat=fm['y'], mode='markers', marker=dict(size=1, opacity=1, reversescale=True, autocolorscale=False, symbol=0, line=dict(width=1, color='rgba(102, 102, 102)'), colorscale='icefire', cmin=0))) fig.update_layout(title='US farmers markets', geo_scope='usa') # create a collection of strings categories = ['Coffee', 'Beans', 'Fruits', 'Grains', 'Juices', 'Mushrooms', 'PetFood', 'Tofu', 'WildHarvested'] # count the occurrences of "Y"(yes) for each category to see how many markets have those categories category_counts = fm[categories].eq('Y').sum() # convert the category totals to a DataFrame for plotting df_category_totals = pd.DataFrame({'Category': category_counts.index, 'Count': category_counts.values}) # sort the DataFrame by count in descending order df_category_totals = df_category_totals.sort_values('Count', ascending=False) # plot the bar chart fig = px.bar(df_category_totals, x='Category', y='Count', color='Category', title='Frequency of Categories in Markets', labels={'Category': 'Category', 'Count': 'Count'}, color_discrete_sequence=px.colors.qualitative.Safe) fig.show() def convert_dollar_number(data): if type(data) == float: return data elif type(data) == str and '$' in data: return int(''.join(data.split('$')[1].split(','))) elif type(data) == str and '$' not in data: return int(''.join(data.split(','))) for i in range(3, 8): ci[ci.columns[i]] = ci[ci.columns[i]].apply(convert_dollar_number) state_level = ci[['State', 'median household income', 'population', 'number of households']].groupby('State').agg({'median household income': 'mean', 'population': 'mean', 'number of households': 'mean'}) markets = pd.concat([state_level, no_markets_per_state], axis=1) mean_population = markets['population'].mean() markets['population'].fillna(mean_population, inplace=True) fig = px.scatter(data_frame=markets, x='median household income', y='no. farmers markets', size='population', color='population', color_continuous_scale='Greens', trendline='ols', trendline_color_override='red', hover_data=markets.columns, title='Relationship between median household income and no. farmers markets.') fig.show()
code
130009806/cell_17
[ "text_html_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import plotly.express as px import plotly.graph_objects as go ci = pd.read_csv('../input/farmers-markets-in-the-united-states/wiki_county_info.csv') fm = pd.read_csv('../input/farmers-markets-in-the-united-states/farmers_markets_from_usda.csv') import plotly.express as px # create a DataFrame with the count of farmers markets by state state_farms = pd.DataFrame(fm['State'].value_counts()) # rename the column 'State' to 'Number of farmers markets' state_farms.rename(columns={'State':'Number of farmers markets'}, inplace = True) # create a bar chart fig = px.bar( state_farms, x=state_farms.index, y='Number of farmers markets', color='Number of farmers markets', # Assign colors based on the number of farmers markets color_continuous_scale='Blues', # Set the color scale title='Number of Farmers Markets by State' ) # change visual style fig.update_layout( xaxis_title='State', yaxis_title='Number of Farmers Markets', plot_bgcolor='white', # Set the background color of the plot showlegend=False # Hide the legend ) # show the bar chart fig.show() import plotly.graph_objects as go fig = go.Figure(data=go.Scattergeo(lon=fm['x'], lat=fm['y'], mode='markers', marker=dict(size=1, opacity=1, reversescale=True, autocolorscale=False, symbol=0, line=dict(width=1, color='rgba(102, 102, 102)'), colorscale='icefire', cmin=0))) fig.update_layout(title='US farmers markets', geo_scope='usa') fig.show()
code
130009806/cell_10
[ "text_html_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) ci = pd.read_csv('../input/farmers-markets-in-the-united-states/wiki_county_info.csv') fm = pd.read_csv('../input/farmers-markets-in-the-united-states/farmers_markets_from_usda.csv') ci.head()
code
130009806/cell_36
[ "text_html_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import plotly.express as px import plotly.graph_objects as go ci = pd.read_csv('../input/farmers-markets-in-the-united-states/wiki_county_info.csv') fm = pd.read_csv('../input/farmers-markets-in-the-united-states/farmers_markets_from_usda.csv') import plotly.express as px # create a DataFrame with the count of farmers markets by state state_farms = pd.DataFrame(fm['State'].value_counts()) # rename the column 'State' to 'Number of farmers markets' state_farms.rename(columns={'State':'Number of farmers markets'}, inplace = True) # create a bar chart fig = px.bar( state_farms, x=state_farms.index, y='Number of farmers markets', color='Number of farmers markets', # Assign colors based on the number of farmers markets color_continuous_scale='Blues', # Set the color scale title='Number of Farmers Markets by State' ) # change visual style fig.update_layout( xaxis_title='State', yaxis_title='Number of Farmers Markets', plot_bgcolor='white', # Set the background color of the plot showlegend=False # Hide the legend ) # show the bar chart fig.show() import plotly.graph_objects as go fig = go.Figure(data=go.Scattergeo(lon=fm['x'], lat=fm['y'], mode='markers', marker=dict(size=1, opacity=1, reversescale=True, autocolorscale=False, symbol=0, line=dict(width=1, color='rgba(102, 102, 102)'), colorscale='icefire', cmin=0))) fig.update_layout(title='US farmers markets', geo_scope='usa') # create a collection of strings categories = ['Coffee', 'Beans', 'Fruits', 'Grains', 'Juices', 'Mushrooms', 'PetFood', 'Tofu', 'WildHarvested'] # count the occurrences of "Y"(yes) for each category to see how many markets have those categories category_counts = fm[categories].eq('Y').sum() # convert the category totals to a DataFrame for plotting df_category_totals = pd.DataFrame({'Category': category_counts.index, 'Count': category_counts.values}) # sort the DataFrame by count in descending order df_category_totals = df_category_totals.sort_values('Count', ascending=False) # plot the bar chart fig = px.bar(df_category_totals, x='Category', y='Count', color='Category', title='Frequency of Categories in Markets', labels={'Category': 'Category', 'Count': 'Count'}, color_discrete_sequence=px.colors.qualitative.Safe) fig.show() def convert_dollar_number(data): if type(data) == float: return data elif type(data) == str and '$' in data: return int(''.join(data.split('$')[1].split(','))) elif type(data) == str and '$' not in data: return int(''.join(data.split(','))) for i in range(3, 8): ci[ci.columns[i]] = ci[ci.columns[i]].apply(convert_dollar_number) state_level = ci[['State', 'median household income', 'population', 'number of households']].groupby('State').agg({'median household income': 'mean', 'population': 'mean', 'number of households': 'mean'}) markets = pd.concat([state_level, no_markets_per_state], axis=1) # replace NaN values in 'population' column with 0 mean_population = markets['population'].mean() markets['population'].fillna(mean_population, inplace=True) # plot the scatter plot fig = px.scatter( data_frame=markets, x='median household income', y='no. farmers markets', size='population', color='population', color_continuous_scale='Greens', trendline='ols', trendline_color_override='red', hover_data=markets.columns, title='Relationship between median household income and no. farmers markets.' ) fig.show() markets['median household income'].fillna(0, inplace=True) fig = px.scatter(data_frame=markets, x='population', y='no. farmers markets', trendline='ols', size='median household income', color='median household income', color_continuous_scale='Blues', hover_data=markets.columns, title='Relationship between population and no. farmers markets per state.') fig.show()
code
1006479/cell_7
[ "text_html_output_1.png" ]
import pandas as pd df_train = pd.read_csv('../input/train.csv') df_store = pd.read_csv('../input/store.csv') df_test = pd.read_csv('../input/test.csv') df_store.head()
code
1006479/cell_3
[ "application_vnd.jupyter.stderr_output_1.png" ]
import pandas as pd df_train = pd.read_csv('../input/train.csv') df_store = pd.read_csv('../input/store.csv') df_test = pd.read_csv('../input/test.csv')
code
1006479/cell_5
[ "text_html_output_1.png" ]
import pandas as pd df_train = pd.read_csv('../input/train.csv') df_store = pd.read_csv('../input/store.csv') df_test = pd.read_csv('../input/test.csv') df_train.head()
code
73075563/cell_13
[ "text_html_output_1.png" ]
from sklearn.metrics import accuracy_score from sklearn.metrics import matthews_corrcoef from sklearn.tree import DecisionTreeClassifier model = DecisionTreeClassifier(random_state=0) model.fit(X_train, y_train) print(matthews_corrcoef(y_test, model.predict(X_test))) print(accuracy_score(y_test, model.predict(X_test)))
code
73075563/cell_9
[ "text_plain_output_1.png" ]
import hypertools as hyp import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('../input/heart-failure-clinical-data/heart_failure_clinical_records_dataset.csv') data.isnull().sum() data.groupby('DEATH_EVENT').size() labels = data['DEATH_EVENT'] hyp.plot(data, '.', hue=labels, reduce='PCA', normalize='within', legend=labels.unique().tolist())
code
73075563/cell_4
[ "text_plain_output_2.png", "application_vnd.jupyter.stderr_output_1.png", "image_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('../input/heart-failure-clinical-data/heart_failure_clinical_records_dataset.csv') data.head()
code
73075563/cell_6
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('../input/heart-failure-clinical-data/heart_failure_clinical_records_dataset.csv') data.isnull().sum() data.describe()
code
73075563/cell_1
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
73075563/cell_7
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('../input/heart-failure-clinical-data/heart_failure_clinical_records_dataset.csv') data.isnull().sum() data.groupby('DEATH_EVENT').size()
code
73075563/cell_18
[ "text_plain_output_1.png" ]
from keras import callbacks from keras.layers import Dense, BatchNormalization, Dropout, LSTM from keras.models import Sequential from sklearn.metrics import accuracy_score from sklearn.metrics import matthews_corrcoef from sklearn.preprocessing import StandardScaler from sklearn.tree import DecisionTreeClassifier import numpy as np import numpy as np # linear algebra import torch import torch import torch.nn as nn model = DecisionTreeClassifier(random_state=0) model.fit(X_train, y_train) from keras.layers import Dense, BatchNormalization, Dropout, LSTM from keras.models import Sequential from keras.utils import to_categorical from keras import callbacks from sklearn.preprocessing import StandardScaler scaler = StandardScaler() X_train = scaler.fit_transform(X_train) early_stopping = callbacks.EarlyStopping(min_delta=0.001, patience=20, restore_best_weights=True) model = Sequential() model.add(Dense(units=9, kernel_initializer='uniform', activation='relu', input_dim=12)) model.add(Dense(units=9, kernel_initializer='uniform', activation='relu')) model.add(Dense(units=7, kernel_initializer='uniform', activation='relu')) model.add(Dense(units=5, kernel_initializer='uniform', activation='relu')) model.add(Dense(units=1, kernel_initializer='uniform', activation='sigmoid')) model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy']) history = model.fit(X_train, y_train, batch_size=32, epochs=500, validation_split=0.2, callbacks=[early_stopping]) val_accuracy = np.mean(history.history['val_accuracy']) import torch import torch.nn as nn class HeartFailureSimpleNNModel(nn.Module): def __init__(self): super().__init__() self.linear1 = nn.Linear(12, 9) self.ReLU1 = nn.ReLU() self.linear2 = nn.Linear(9, 9) self.ReLU2 = nn.ReLU() self.linear3 = nn.Linear(9, 7) self.ReLU3 = nn.ReLU() self.linear4 = nn.Linear(7, 5) self.ReLU4 = nn.ReLU() self.linear5 = nn.Linear(5, 1) def forward(self, x): lin1_out = self.linear1(x) ReLU_out1 = self.ReLU1(lin1_out) ReLU_out2 = self.ReLU2(self.linear2(ReLU_out1)) ReLU_out3 = self.ReLU3(self.linear3(ReLU_out2)) ReLU_out4 = self.ReLU4(self.linear4(ReLU_out3)) return self.linear5(ReLU_out4).sigmoid_() net = HeartFailureSimpleNNModel() criterion = nn.BCELoss() num_epochs = 500 optimizer = torch.optim.Adam(net.parameters(), lr=0.001) input_tensor = torch.from_numpy(X_train_2).type(torch.FloatTensor) label_tensor = torch.from_numpy(y_train_2) label_tensor = label_tensor.float() for epoch in range(num_epochs): output = net(input_tensor.float()) loss = criterion(output, label_tensor.unsqueeze(1)) optimizer.zero_grad() loss.backward() optimizer.step() validation_tensor = torch.from_numpy(X_val).type(torch.FloatTensor) print(input_tensor) print(validation_tensor) out_probs = net(input_tensor).detach().numpy() print(out_probs) out_classes = np.round(out_probs) print(out_classes) print(label_tensor) validation_out_probs = net(validation_tensor).detach().numpy() print(validation_out_probs)
code
73075563/cell_8
[ "text_plain_output_1.png" ]
import hypertools as hyp import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('../input/heart-failure-clinical-data/heart_failure_clinical_records_dataset.csv') data.isnull().sum() data.groupby('DEATH_EVENT').size() labels = data['DEATH_EVENT'] hyp.plot(data, '.', hue=labels, reduce='PCA', legend=labels.unique().tolist())
code
73075563/cell_3
[ "text_plain_output_2.png", "application_vnd.jupyter.stderr_output_1.png", "image_output_1.png" ]
import sklearn from sklearn.tree import DecisionTreeClassifier from sklearn.metrics import matthews_corrcoef from sklearn.metrics import accuracy_score from sklearn.model_selection import train_test_split import hypertools as hyp import seaborn as sns import numpy as np import torch !pip install captum from captum.attr import IntegratedGradients
code
73075563/cell_17
[ "text_html_output_1.png" ]
import torch import torch import torch.nn as nn import torch import torch.nn as nn class HeartFailureSimpleNNModel(nn.Module): def __init__(self): super().__init__() self.linear1 = nn.Linear(12, 9) self.ReLU1 = nn.ReLU() self.linear2 = nn.Linear(9, 9) self.ReLU2 = nn.ReLU() self.linear3 = nn.Linear(9, 7) self.ReLU3 = nn.ReLU() self.linear4 = nn.Linear(7, 5) self.ReLU4 = nn.ReLU() self.linear5 = nn.Linear(5, 1) def forward(self, x): lin1_out = self.linear1(x) ReLU_out1 = self.ReLU1(lin1_out) ReLU_out2 = self.ReLU2(self.linear2(ReLU_out1)) ReLU_out3 = self.ReLU3(self.linear3(ReLU_out2)) ReLU_out4 = self.ReLU4(self.linear4(ReLU_out3)) return self.linear5(ReLU_out4).sigmoid_() net = HeartFailureSimpleNNModel() criterion = nn.BCELoss() num_epochs = 500 optimizer = torch.optim.Adam(net.parameters(), lr=0.001) input_tensor = torch.from_numpy(X_train_2).type(torch.FloatTensor) label_tensor = torch.from_numpy(y_train_2) label_tensor = label_tensor.float() for epoch in range(num_epochs): output = net(input_tensor.float()) loss = criterion(output, label_tensor.unsqueeze(1)) optimizer.zero_grad() loss.backward() optimizer.step() if epoch % 20 == 0: print('Epoch {}/{} => Loss: {:.2f}'.format(epoch + 1, num_epochs, loss.item()))
code
73075563/cell_14
[ "text_plain_output_1.png" ]
from keras import callbacks from keras.layers import Dense, BatchNormalization, Dropout, LSTM from keras.models import Sequential from sklearn.metrics import accuracy_score from sklearn.metrics import matthews_corrcoef from sklearn.preprocessing import StandardScaler from sklearn.tree import DecisionTreeClassifier import numpy as np import numpy as np # linear algebra model = DecisionTreeClassifier(random_state=0) model.fit(X_train, y_train) from keras.layers import Dense, BatchNormalization, Dropout, LSTM from keras.models import Sequential from keras.utils import to_categorical from keras import callbacks from sklearn.preprocessing import StandardScaler scaler = StandardScaler() X_train = scaler.fit_transform(X_train) early_stopping = callbacks.EarlyStopping(min_delta=0.001, patience=20, restore_best_weights=True) model = Sequential() model.add(Dense(units=9, kernel_initializer='uniform', activation='relu', input_dim=12)) model.add(Dense(units=9, kernel_initializer='uniform', activation='relu')) model.add(Dense(units=7, kernel_initializer='uniform', activation='relu')) model.add(Dense(units=5, kernel_initializer='uniform', activation='relu')) model.add(Dense(units=1, kernel_initializer='uniform', activation='sigmoid')) model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy']) history = model.fit(X_train, y_train, batch_size=32, epochs=500, validation_split=0.2, callbacks=[early_stopping]) val_accuracy = np.mean(history.history['val_accuracy']) print('\n%s: %.2f%%' % ('val_accuracy', val_accuracy * 100))
code
73075563/cell_10
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns data = pd.read_csv('../input/heart-failure-clinical-data/heart_failure_clinical_records_dataset.csv') data.isnull().sum() data.groupby('DEATH_EVENT').size() corr = data.corr() sns.heatmap(corr)
code
73075563/cell_5
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('../input/heart-failure-clinical-data/heart_failure_clinical_records_dataset.csv') data.isnull().sum()
code
105206902/cell_42
[ "text_html_output_1.png" ]
from sklearn.ensemble import RandomForestClassifier from sklearn.preprocessing import StandardScaler from sklearn.preprocessing import StandardScaler sc = StandardScaler() X_train = sc.fit_transform(X_train) X_test = sc.fit_transform(X_test) from sklearn.ensemble import RandomForestClassifier rfcfl = RandomForestClassifier(random_state=0) rfcfl.fit(X_train, y_train) rfcfl.score(X_test, y_test)
code
105206902/cell_21
[ "text_html_output_1.png" ]
import pandas as pd dftrain = pd.read_csv('../input/tabular-playground-series-aug-2022/train.csv') dftest = pd.read_csv('../input/tabular-playground-series-aug-2022/test.csv') dftotal = dftrain.append(dftest) dftotal.reset_index(inplace=True) dftotal.columns dftotal.isnull().sum() dftotal.isnull().sum() dftotal = dftotal[dftotal['loading'].notna()] dftotal['attribute_0'].value_counts()
code
105206902/cell_9
[ "text_plain_output_1.png" ]
import pandas as pd dftrain = pd.read_csv('../input/tabular-playground-series-aug-2022/train.csv') dftest = pd.read_csv('../input/tabular-playground-series-aug-2022/test.csv') dftotal = dftrain.append(dftest) dftotal.reset_index(inplace=True) dftotal.describe()
code
105206902/cell_25
[ "text_plain_output_1.png" ]
import pandas as pd dftrain = pd.read_csv('../input/tabular-playground-series-aug-2022/train.csv') dftest = pd.read_csv('../input/tabular-playground-series-aug-2022/test.csv') dftotal = dftrain.append(dftest) dftotal.reset_index(inplace=True) dftotal.columns dftotal.isnull().sum() dftotal.isnull().sum() dftotal = dftotal[dftotal['loading'].notna()] dftotal['product_code'].value_counts()
code
105206902/cell_4
[ "text_plain_output_1.png" ]
import pandas as pd dftrain = pd.read_csv('../input/tabular-playground-series-aug-2022/train.csv') dftest = pd.read_csv('../input/tabular-playground-series-aug-2022/test.csv') dftest.head()
code
105206902/cell_34
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns dftrain = pd.read_csv('../input/tabular-playground-series-aug-2022/train.csv') dftest = pd.read_csv('../input/tabular-playground-series-aug-2022/test.csv') dftotal = dftrain.append(dftest) dftotal.reset_index(inplace=True) dftotal.columns dftotal.isnull().sum() dftotal.isnull().sum() dftotal = dftotal[dftotal['loading'].notna()] f = plt.figure(figsize=(20, 10)) sns.heatmap(dftotal.corr()); corrtable = dftotal.corr() print('the minimum value for loading for failure cases is {} and maximum value is {}'.format(dftotal[dftotal['failure'] == 1]['loading'].min(), dftotal[dftotal['failure'] == 1]['loading'].max())) print('the minimum value for loading for failure cases is {} and maximum value is {}'.format(dftotal[dftotal['failure'] == 0]['loading'].min(), dftotal[dftotal['failure'] == 0]['loading'].max()))
code
105206902/cell_23
[ "text_plain_output_1.png" ]
import pandas as pd dftrain = pd.read_csv('../input/tabular-playground-series-aug-2022/train.csv') dftest = pd.read_csv('../input/tabular-playground-series-aug-2022/test.csv') dftotal = dftrain.append(dftest) dftotal.reset_index(inplace=True) dftotal.columns dftotal.isnull().sum() dftotal.isnull().sum() dftotal = dftotal[dftotal['loading'].notna()] dftotal['attribute_1'].value_counts()
code
105206902/cell_44
[ "text_plain_output_1.png" ]
from sklearn.preprocessing import StandardScaler from sklearn.svm import SVC from sklearn.preprocessing import StandardScaler sc = StandardScaler() X_train = sc.fit_transform(X_train) X_test = sc.fit_transform(X_test) from sklearn.svm import SVC svclassifier = SVC(kernel='linear') svclassifier.fit(X_train, y_train) y_pred = svclassifier.predict(X_test) svclassifier.score(X_test, y_test)
code
105206902/cell_6
[ "text_html_output_1.png" ]
import pandas as pd dftrain = pd.read_csv('../input/tabular-playground-series-aug-2022/train.csv') dftest = pd.read_csv('../input/tabular-playground-series-aug-2022/test.csv') dftotal = dftrain.append(dftest) print(dftrain.shape) print(dftest.shape) print(dftotal.shape)
code
105206902/cell_40
[ "image_output_1.png" ]
from sklearn.linear_model import LogisticRegression from sklearn.preprocessing import StandardScaler from sklearn.preprocessing import StandardScaler sc = StandardScaler() X_train = sc.fit_transform(X_train) X_test = sc.fit_transform(X_test) from sklearn.linear_model import LogisticRegression lrclf = LogisticRegression(random_state=0) lrclf.fit(X_train, y_train) y_predict = lrclf.predict(X_test) lrclf.score(X_test, y_test)
code
105206902/cell_29
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns dftrain = pd.read_csv('../input/tabular-playground-series-aug-2022/train.csv') dftest = pd.read_csv('../input/tabular-playground-series-aug-2022/test.csv') dftotal = dftrain.append(dftest) dftotal.reset_index(inplace=True) dftotal.columns dftotal.isnull().sum() dftotal.isnull().sum() dftotal = dftotal[dftotal['loading'].notna()] f = plt.figure(figsize=(20, 10)) sns.heatmap(dftotal.corr()); corrtable = dftotal.corr() corrtable.head()
code
105206902/cell_26
[ "text_plain_output_1.png" ]
from sklearn.preprocessing import LabelEncoder import pandas as pd dftrain = pd.read_csv('../input/tabular-playground-series-aug-2022/train.csv') dftest = pd.read_csv('../input/tabular-playground-series-aug-2022/test.csv') dftotal = dftrain.append(dftest) dftotal.reset_index(inplace=True) dftotal.columns dftotal.isnull().sum() dftotal.isnull().sum() dftotal = dftotal[dftotal['loading'].notna()] from sklearn.preprocessing import LabelEncoder labelencoder = LabelEncoder() dftotal['product_code'] = labelencoder.fit_transform(dftotal['product_code']) dftotal['product_code'].value_counts()
code
105206902/cell_48
[ "text_plain_output_1.png" ]
from sklearn.decomposition import PCA from sklearn.model_selection import train_test_split import matplotlib.pyplot as plt import pandas as pd import seaborn as sns dftrain = pd.read_csv('../input/tabular-playground-series-aug-2022/train.csv') dftest = pd.read_csv('../input/tabular-playground-series-aug-2022/test.csv') dftotal = dftrain.append(dftest) dftotal.reset_index(inplace=True) dftotal.columns dftotal.isnull().sum() dftotal.isnull().sum() dftotal = dftotal[dftotal['loading'].notna()] f = plt.figure(figsize=(20, 10)) sns.heatmap(dftotal.corr()); corrtable = dftotal.corr() import numpy as np from sklearn.model_selection import train_test_split dftrainset = dftotal[~dftotal['failure'].isnull()] dftestset = dftotal[dftotal['failure'].isnull()] X = dftrainset[['id', 'product_code', 'loading', 'attribute_0', 'attribute_1', 'attribute_2', 'attribute_3', 'measurement_0', 'measurement_1', 'measurement_2', 'measurement_3', 'measurement_4', 'measurement_5', 'measurement_6', 'measurement_7', 'measurement_8', 'measurement_9', 'measurement_10', 'measurement_11', 'measurement_12', 'measurement_13', 'measurement_14', 'measurement_15', 'measurement_16', 'measurement_17']] y = dftrainset['failure'] X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=42) from sklearn.decomposition import PCA pca = PCA(n_components=4) X = dftotal[['loading', 'attribute_0', 'attribute_1', 'attribute_2', 'attribute_3', 'measurement_0', 'measurement_1', 'measurement_2', 'measurement_3', 'measurement_4', 'measurement_5', 'measurement_6', 'measurement_7', 'measurement_8', 'measurement_9', 'measurement_10', 'measurement_11', 'measurement_12', 'measurement_13', 'measurement_14', 'measurement_15', 'measurement_16', 'measurement_17']] y = dftotal['failure'] principalComponents = pca.fit_transform(X) principalDf = pd.DataFrame(data=principalComponents, columns=['principal component 1', 'principal component 2', 'principal component 3', 'principal component 4']) dfpcatotal = pd.concat([principalDf, dftrainset[['failure', 'id', 'product_code']]], axis=1) dfpcatotal.head()
code
105206902/cell_19
[ "text_plain_output_1.png" ]
import pandas as pd dftrain = pd.read_csv('../input/tabular-playground-series-aug-2022/train.csv') dftest = pd.read_csv('../input/tabular-playground-series-aug-2022/test.csv') dftotal = dftrain.append(dftest) dftotal.reset_index(inplace=True) dftotal.columns dftotal.isnull().sum() dftotal.isnull().sum() dftotal = dftotal[dftotal['loading'].notna()] print(dftotal.shape)
code
105206902/cell_7
[ "text_html_output_1.png" ]
import pandas as pd dftrain = pd.read_csv('../input/tabular-playground-series-aug-2022/train.csv') dftest = pd.read_csv('../input/tabular-playground-series-aug-2022/test.csv') dftotal = dftrain.append(dftest) dftotal.reset_index(inplace=True) dftotal.info()
code
105206902/cell_45
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns dftrain = pd.read_csv('../input/tabular-playground-series-aug-2022/train.csv') dftest = pd.read_csv('../input/tabular-playground-series-aug-2022/test.csv') dftotal = dftrain.append(dftest) dftotal.reset_index(inplace=True) dftotal.columns dftotal.isnull().sum() dftotal.isnull().sum() dftotal = dftotal[dftotal['loading'].notna()] f = plt.figure(figsize=(20, 10)) sns.heatmap(dftotal.corr()); corrtable = dftotal.corr() dftotal.head()
code
105206902/cell_28
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns dftrain = pd.read_csv('../input/tabular-playground-series-aug-2022/train.csv') dftest = pd.read_csv('../input/tabular-playground-series-aug-2022/test.csv') dftotal = dftrain.append(dftest) dftotal.reset_index(inplace=True) dftotal.columns dftotal.isnull().sum() dftotal.isnull().sum() dftotal = dftotal[dftotal['loading'].notna()] f = plt.figure(figsize=(20, 10)) sns.heatmap(dftotal.corr())
code
105206902/cell_15
[ "text_html_output_1.png" ]
import pandas as pd dftrain = pd.read_csv('../input/tabular-playground-series-aug-2022/train.csv') dftest = pd.read_csv('../input/tabular-playground-series-aug-2022/test.csv') dftotal = dftrain.append(dftest) dftotal.reset_index(inplace=True) dftotal.columns dftotal.isnull().sum() dftotal.isnull().sum()
code
105206902/cell_16
[ "text_plain_output_1.png" ]
import pandas as pd dftrain = pd.read_csv('../input/tabular-playground-series-aug-2022/train.csv') dftest = pd.read_csv('../input/tabular-playground-series-aug-2022/test.csv') dftotal = dftrain.append(dftest) dftotal.reset_index(inplace=True) dftotal.columns dftotal.isnull().sum() dftotal.isnull().sum() dftotal.head()
code
105206902/cell_3
[ "text_plain_output_1.png" ]
import pandas as pd dftrain = pd.read_csv('../input/tabular-playground-series-aug-2022/train.csv') dftrain.head()
code
105206902/cell_31
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns dftrain = pd.read_csv('../input/tabular-playground-series-aug-2022/train.csv') dftest = pd.read_csv('../input/tabular-playground-series-aug-2022/test.csv') dftotal = dftrain.append(dftest) dftotal.reset_index(inplace=True) dftotal.columns dftotal.isnull().sum() dftotal.isnull().sum() dftotal = dftotal[dftotal['loading'].notna()] f = plt.figure(figsize=(20, 10)) sns.heatmap(dftotal.corr()); corrtable = dftotal.corr() irow, icol = corrtable.shape for row in range(0, irow): for col in range(0, icol): corval = corrtable.iloc[row, col] if corval > 0.4: if corrtable.index[row] != corrtable.columns[col]: print('correlation with {} and {} is {}'.format(corrtable.index[row], corrtable.columns[col], corrtable.iloc[row, col]))
code
105206902/cell_10
[ "text_html_output_1.png" ]
import pandas as pd dftrain = pd.read_csv('../input/tabular-playground-series-aug-2022/train.csv') dftest = pd.read_csv('../input/tabular-playground-series-aug-2022/test.csv') dftotal = dftrain.append(dftest) dftotal.reset_index(inplace=True) dftotal.columns
code
105206902/cell_12
[ "text_html_output_1.png" ]
import pandas as pd dftrain = pd.read_csv('../input/tabular-playground-series-aug-2022/train.csv') dftest = pd.read_csv('../input/tabular-playground-series-aug-2022/test.csv') dftotal = dftrain.append(dftest) dftotal.reset_index(inplace=True) dftotal.columns dftotal.isnull().sum()
code
105206902/cell_5
[ "text_plain_output_1.png" ]
import pandas as pd dftrain = pd.read_csv('../input/tabular-playground-series-aug-2022/train.csv') dftest = pd.read_csv('../input/tabular-playground-series-aug-2022/test.csv') dftotal = dftrain.append(dftest) dftotal.head()
code
89132929/cell_13
[ "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) lista = pd.read_csv('/kaggle/input/airbnb-gz/listings_gz.csv') calendar = pd.read_csv('/kaggle/input/airbnb-gz/calendar_gz.csv') reviews = pd.read_csv('/kaggle/input/airbnb-gz/reviews_gz.csv') mapa = plt.imread('/kaggle/input/mapamelbourne/Melbourne.png') lista['price'] = lista['price'].str.lstrip('$') lista['price'] = lista['price'].str.replace(',', '') lista['price'] = pd.to_numeric(lista['price'], downcast='float') lista['host_since'] = pd.to_datetime(lista['host_since']) calendar['price'] = calendar['price'].str.lstrip('$') calendar['price'] = calendar['price'].str.replace(',', '') calendar['price'] = pd.to_numeric(calendar['price'], downcast='float') calendar['adjusted_price'] = calendar['adjusted_price'].str.lstrip('$') calendar['adjusted_price'] = calendar['adjusted_price'].str.replace(',', '') calendar['adjusted_price'] = pd.to_numeric(calendar['adjusted_price'], downcast='float') bins = range(0, 400, 20) plt.plot bins = range(0, 370, 20) plt.plot bins = range(0, 1000, 100) plt.plot plt.hist(lista['room_type'], histtype='bar', rwidth=1.2, color='grey') plt.xlabel('Tipo de Acomodação') plt.ylabel('Contagem') plt.plot plt.show()
code
89132929/cell_9
[ "image_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) lista = pd.read_csv('/kaggle/input/airbnb-gz/listings_gz.csv') calendar = pd.read_csv('/kaggle/input/airbnb-gz/calendar_gz.csv') reviews = pd.read_csv('/kaggle/input/airbnb-gz/reviews_gz.csv') mapa = plt.imread('/kaggle/input/mapamelbourne/Melbourne.png') precmax = max(lista['price']) precmin = min(lista['price']) precmed = np.mean(lista['price']) print(f'O preço máximo é {precmax}. O preço mínimo é {precmin}. O preço médio é {precmed}')
code
89132929/cell_6
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) lista = pd.read_csv('/kaggle/input/airbnb-gz/listings_gz.csv') calendar = pd.read_csv('/kaggle/input/airbnb-gz/calendar_gz.csv') reviews = pd.read_csv('/kaggle/input/airbnb-gz/reviews_gz.csv') mapa = plt.imread('/kaggle/input/mapamelbourne/Melbourne.png') reviews.head(n=6)
code
89132929/cell_11
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) lista = pd.read_csv('/kaggle/input/airbnb-gz/listings_gz.csv') calendar = pd.read_csv('/kaggle/input/airbnb-gz/calendar_gz.csv') reviews = pd.read_csv('/kaggle/input/airbnb-gz/reviews_gz.csv') mapa = plt.imread('/kaggle/input/mapamelbourne/Melbourne.png') Tipos = lista['room_type'].unique() print(Tipos)
code
89132929/cell_1
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import matplotlib.pyplot as plt import folium import seaborn as sns import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
89132929/cell_7
[ "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) lista = pd.read_csv('/kaggle/input/airbnb-gz/listings_gz.csv') calendar = pd.read_csv('/kaggle/input/airbnb-gz/calendar_gz.csv') reviews = pd.read_csv('/kaggle/input/airbnb-gz/reviews_gz.csv') mapa = plt.imread('/kaggle/input/mapamelbourne/Melbourne.png') lista['price'] = lista['price'].str.lstrip('$') lista['price'] = lista['price'].str.replace(',', '') lista['price'] = pd.to_numeric(lista['price'], downcast='float') lista['host_since'] = pd.to_datetime(lista['host_since']) calendar['price'] = calendar['price'].str.lstrip('$') calendar['price'] = calendar['price'].str.replace(',', '') calendar['price'] = pd.to_numeric(calendar['price'], downcast='float') calendar['adjusted_price'] = calendar['adjusted_price'].str.lstrip('$') calendar['adjusted_price'] = calendar['adjusted_price'].str.replace(',', '') calendar['adjusted_price'] = pd.to_numeric(calendar['adjusted_price'], downcast='float') bins = range(0, 400, 20) plt.hist(lista['minimum_nights'], bins, histtype='bar', rwidth=1.2) plt.xlabel('Mínimas noites') plt.ylabel('Contagem') plt.plot plt.axvline(calendar['minimum_nights'].mean(), linestyle='dashed', color='red') plt.show()
code
89132929/cell_8
[ "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) lista = pd.read_csv('/kaggle/input/airbnb-gz/listings_gz.csv') calendar = pd.read_csv('/kaggle/input/airbnb-gz/calendar_gz.csv') reviews = pd.read_csv('/kaggle/input/airbnb-gz/reviews_gz.csv') mapa = plt.imread('/kaggle/input/mapamelbourne/Melbourne.png') lista['price'] = lista['price'].str.lstrip('$') lista['price'] = lista['price'].str.replace(',', '') lista['price'] = pd.to_numeric(lista['price'], downcast='float') lista['host_since'] = pd.to_datetime(lista['host_since']) calendar['price'] = calendar['price'].str.lstrip('$') calendar['price'] = calendar['price'].str.replace(',', '') calendar['price'] = pd.to_numeric(calendar['price'], downcast='float') calendar['adjusted_price'] = calendar['adjusted_price'].str.lstrip('$') calendar['adjusted_price'] = calendar['adjusted_price'].str.replace(',', '') calendar['adjusted_price'] = pd.to_numeric(calendar['adjusted_price'], downcast='float') bins = range(0, 400, 20) plt.plot bins = range(0, 370, 20) plt.hist(lista['availability_365'], bins, histtype='bar', rwidth=1.2, color='green') plt.xlabel('Disponibilidade em 365 dias') plt.ylabel('Contagem') plt.plot plt.axvline(lista['availability_365'].mean(), linestyle='dashed', color='red') plt.show()
code
89132929/cell_3
[ "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) lista = pd.read_csv('/kaggle/input/airbnb-gz/listings_gz.csv') calendar = pd.read_csv('/kaggle/input/airbnb-gz/calendar_gz.csv') reviews = pd.read_csv('/kaggle/input/airbnb-gz/reviews_gz.csv') mapa = plt.imread('/kaggle/input/mapamelbourne/Melbourne.png') lista['price'] = lista['price'].str.lstrip('$') lista['price'] = lista['price'].str.replace(',', '') lista['price'] = pd.to_numeric(lista['price'], downcast='float') lista['price'].head(6)
code
89132929/cell_14
[ "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) lista = pd.read_csv('/kaggle/input/airbnb-gz/listings_gz.csv') calendar = pd.read_csv('/kaggle/input/airbnb-gz/calendar_gz.csv') reviews = pd.read_csv('/kaggle/input/airbnb-gz/reviews_gz.csv') mapa = plt.imread('/kaggle/input/mapamelbourne/Melbourne.png') lista['price'] = lista['price'].str.lstrip('$') lista['price'] = lista['price'].str.replace(',', '') lista['price'] = pd.to_numeric(lista['price'], downcast='float') lista['host_since'] = pd.to_datetime(lista['host_since']) calendar['price'] = calendar['price'].str.lstrip('$') calendar['price'] = calendar['price'].str.replace(',', '') calendar['price'] = pd.to_numeric(calendar['price'], downcast='float') calendar['adjusted_price'] = calendar['adjusted_price'].str.lstrip('$') calendar['adjusted_price'] = calendar['adjusted_price'].str.replace(',', '') calendar['adjusted_price'] = pd.to_numeric(calendar['adjusted_price'], downcast='float') bins = range(0, 400, 20) plt.plot bins = range(0, 370, 20) plt.plot bins = range(0, 1000, 100) plt.plot plt.plot plt.scatter(lista['room_type'], lista['price']) plt.xlabel('Tipo de Acomodação') plt.ylabel('Preço') plt.plot plt.show()
code
89132929/cell_10
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) lista = pd.read_csv('/kaggle/input/airbnb-gz/listings_gz.csv') calendar = pd.read_csv('/kaggle/input/airbnb-gz/calendar_gz.csv') reviews = pd.read_csv('/kaggle/input/airbnb-gz/reviews_gz.csv') mapa = plt.imread('/kaggle/input/mapamelbourne/Melbourne.png') lista['price'] = lista['price'].str.lstrip('$') lista['price'] = lista['price'].str.replace(',', '') lista['price'] = pd.to_numeric(lista['price'], downcast='float') lista['host_since'] = pd.to_datetime(lista['host_since']) calendar['price'] = calendar['price'].str.lstrip('$') calendar['price'] = calendar['price'].str.replace(',', '') calendar['price'] = pd.to_numeric(calendar['price'], downcast='float') calendar['adjusted_price'] = calendar['adjusted_price'].str.lstrip('$') calendar['adjusted_price'] = calendar['adjusted_price'].str.replace(',', '') calendar['adjusted_price'] = pd.to_numeric(calendar['adjusted_price'], downcast='float') bins = range(0, 400, 20) plt.plot bins = range(0, 370, 20) plt.plot bins = range(0, 1000, 100) plt.hist(lista['price'], bins, histtype='bar', rwidth=100, color='yellow') plt.xlabel('Preços') plt.ylabel('Contagem') plt.plot plt.axvline(lista['price'].mean(), linestyle='dashed', color='blue') plt.show()
code
89132929/cell_12
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) lista = pd.read_csv('/kaggle/input/airbnb-gz/listings_gz.csv') calendar = pd.read_csv('/kaggle/input/airbnb-gz/calendar_gz.csv') reviews = pd.read_csv('/kaggle/input/airbnb-gz/reviews_gz.csv') mapa = plt.imread('/kaggle/input/mapamelbourne/Melbourne.png') Tipos_Propriedades = lista['property_type'].unique() print(Tipos_Propriedades)
code
89132929/cell_5
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) lista = pd.read_csv('/kaggle/input/airbnb-gz/listings_gz.csv') calendar = pd.read_csv('/kaggle/input/airbnb-gz/calendar_gz.csv') reviews = pd.read_csv('/kaggle/input/airbnb-gz/reviews_gz.csv') mapa = plt.imread('/kaggle/input/mapamelbourne/Melbourne.png') lista['price'] = lista['price'].str.lstrip('$') lista['price'] = lista['price'].str.replace(',', '') lista['price'] = pd.to_numeric(lista['price'], downcast='float') lista['host_since'] = pd.to_datetime(lista['host_since']) calendar['price'] = calendar['price'].str.lstrip('$') calendar['price'] = calendar['price'].str.replace(',', '') calendar['price'] = pd.to_numeric(calendar['price'], downcast='float') calendar['adjusted_price'] = calendar['adjusted_price'].str.lstrip('$') calendar['adjusted_price'] = calendar['adjusted_price'].str.replace(',', '') calendar['adjusted_price'] = pd.to_numeric(calendar['adjusted_price'], downcast='float') calendar.head(n=6)
code
122249887/cell_4
[ "text_plain_output_1.png" ]
import pandas as pd import seaborn as sns import pandas as pd df = pd.read_csv('../input/spaceship-titanic/train.csv') test_df = pd.read_csv('/kaggle/input/spaceship-titanic/test.csv') useless_cols = ['PassengerId', 'Name', 'Cabin'] df.drop(columns=useless_cols, inplace=True) import seaborn as sns corr_matrix = df.corr() high_corr = set() for i in range(len(corr_matrix.columns)): for j in range(i): if abs(corr_matrix.iloc[i, j]) > 0.7: colname = corr_matrix.columns[i] high_corr.add(colname) print(high_corr)
code
122249887/cell_7
[ "text_plain_output_1.png" ]
from sklearn.compose import ColumnTransformer from sklearn.impute import SimpleImputer from sklearn.linear_model import LogisticRegression from sklearn.model_selection import cross_val_score from sklearn.model_selection import train_test_split from sklearn.pipeline import Pipeline from sklearn.preprocessing import OneHotEncoder, StandardScaler import pandas as pd import seaborn as sns import pandas as pd df = pd.read_csv('../input/spaceship-titanic/train.csv') test_df = pd.read_csv('/kaggle/input/spaceship-titanic/test.csv') useless_cols = ['PassengerId', 'Name', 'Cabin'] df.drop(columns=useless_cols, inplace=True) import seaborn as sns corr_matrix = df.corr() from sklearn.compose import ColumnTransformer from sklearn.pipeline import Pipeline from sklearn.impute import SimpleImputer from sklearn.model_selection import cross_val_score from sklearn.model_selection import train_test_split from sklearn.preprocessing import OneHotEncoder, StandardScaler from sklearn.linear_model import LogisticRegression X = df.drop(columns=['Transported']) y = df.Transported X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42) numerical_cols = [cname for cname in X.columns if X[cname].dtype in ['int64', 'float64']] numerical_transformer = Pipeline(steps=[('imputer', SimpleImputer()), ('scaler', StandardScaler())]) categorical_cols = [cname for cname in X.columns if X[cname].nunique() < 10 and X[cname].dtype == 'object'] categorical_transformer = Pipeline(steps=[('imputer', SimpleImputer(strategy='most_frequent')), ('onehot', OneHotEncoder(handle_unknown='ignore'))]) preprocessor = ColumnTransformer(transformers=[('num', numerical_transformer, numerical_cols), ('cat', categorical_transformer, categorical_cols)]) pipe = Pipeline(steps=[('preprocessor', preprocessor), ('model', LogisticRegression())]) pipe.fit(X_train, y_train) y_pred = pipe.predict(X_test) scores = cross_val_score(pipe, X, y, cv=5) print('Cross-validation scores:', scores) print('MAE:', scores.mean())
code
122249887/cell_8
[ "text_plain_output_1.png" ]
from sklearn.compose import ColumnTransformer from sklearn.impute import SimpleImputer from sklearn.linear_model import LogisticRegression from sklearn.metrics import classification_report from sklearn.model_selection import train_test_split from sklearn.pipeline import Pipeline from sklearn.preprocessing import OneHotEncoder, StandardScaler import pandas as pd import seaborn as sns import pandas as pd df = pd.read_csv('../input/spaceship-titanic/train.csv') test_df = pd.read_csv('/kaggle/input/spaceship-titanic/test.csv') useless_cols = ['PassengerId', 'Name', 'Cabin'] df.drop(columns=useless_cols, inplace=True) import seaborn as sns corr_matrix = df.corr() from sklearn.compose import ColumnTransformer from sklearn.pipeline import Pipeline from sklearn.impute import SimpleImputer from sklearn.model_selection import cross_val_score from sklearn.model_selection import train_test_split from sklearn.preprocessing import OneHotEncoder, StandardScaler from sklearn.linear_model import LogisticRegression X = df.drop(columns=['Transported']) y = df.Transported X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42) numerical_cols = [cname for cname in X.columns if X[cname].dtype in ['int64', 'float64']] numerical_transformer = Pipeline(steps=[('imputer', SimpleImputer()), ('scaler', StandardScaler())]) categorical_cols = [cname for cname in X.columns if X[cname].nunique() < 10 and X[cname].dtype == 'object'] categorical_transformer = Pipeline(steps=[('imputer', SimpleImputer(strategy='most_frequent')), ('onehot', OneHotEncoder(handle_unknown='ignore'))]) preprocessor = ColumnTransformer(transformers=[('num', numerical_transformer, numerical_cols), ('cat', categorical_transformer, categorical_cols)]) pipe = Pipeline(steps=[('preprocessor', preprocessor), ('model', LogisticRegression())]) pipe.fit(X_train, y_train) y_pred = pipe.predict(X_test) from sklearn.metrics import classification_report print(classification_report(y_test, y_pred))
code
122249887/cell_3
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd import seaborn as sns import pandas as pd df = pd.read_csv('../input/spaceship-titanic/train.csv') test_df = pd.read_csv('/kaggle/input/spaceship-titanic/test.csv') useless_cols = ['PassengerId', 'Name', 'Cabin'] df.drop(columns=useless_cols, inplace=True) import seaborn as sns corr_matrix = df.corr() sns.heatmap(corr_matrix, cmap='coolwarm', annot=True)
code
130002260/cell_21
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('/kaggle/input/starbucks-nutrition/starbucks.csv', index_col=0) df.describe df.isnull().sum() highest_calorie_item = df.loc[df['calories'].idxmax(), 'item'] highest_calorie_value = df['calories'].max() lowest_calorie_item = df.loc[df['calories'].idxmin(), 'item'] lowest_calorie_value = df['calories'].min() highest_fat_item = df.loc[df['fat'].idxmax(), 'item'] highest_fat_value = df['fat'].max() highest_carb_item = df.loc[df['carb'].idxmax(), 'item'] highest_carb_value = df['carb'].max() highest_fiber_item = df.loc[df['fiber'].idxmax(), 'item'] highest_fiber_value = df['fiber'].max() highest_protein_item = df.loc[df['protein'].idxmax(), 'item'] highest_protein_value = df['protein'].max() average_calories = df.groupby('type')['calories'].mean() correlation = df[['calories', 'fat', 'carb', 'fiber', 'protein']].corr() for nutrient in ['fat', 'carb', 'fiber', 'protein']: corr_value = correlation.loc['calories', nutrient] average_nutrients = df.groupby('type')[['fat', 'carb', 'fiber', 'protein']].mean() print('Food Item Types with the Highest Average Nutrient Content:') for nutrient in ['fat', 'carb', 'fiber', 'protein']: max_nutrient_type = average_nutrients[nutrient].idxmax() max_nutrient_value = average_nutrients[nutrient].max() print(f'- Highest Average {nutrient.capitalize()}: {max_nutrient_type} ({max_nutrient_value:.2f} grams)')
code
130002260/cell_13
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('/kaggle/input/starbucks-nutrition/starbucks.csv', index_col=0) df.describe df.isnull().sum() highest_calorie_item = df.loc[df['calories'].idxmax(), 'item'] highest_calorie_value = df['calories'].max() lowest_calorie_item = df.loc[df['calories'].idxmin(), 'item'] lowest_calorie_value = df['calories'].min() highest_fat_item = df.loc[df['fat'].idxmax(), 'item'] highest_fat_value = df['fat'].max() highest_carb_item = df.loc[df['carb'].idxmax(), 'item'] highest_carb_value = df['carb'].max() highest_fiber_item = df.loc[df['fiber'].idxmax(), 'item'] highest_fiber_value = df['fiber'].max() highest_protein_item = df.loc[df['protein'].idxmax(), 'item'] highest_protein_value = df['protein'].max() print(f'Food Item with Highest Fat: {highest_fat_item} ({highest_fat_value} grams)') print(f'Food Item with Highest Carbohydrates: {highest_carb_item} ({highest_carb_value} grams)') print(f'Food Item with Highest Fiber: {highest_fiber_item} ({highest_fiber_value} grams)') print(f'Food Item with Highest Protein: {highest_protein_item} ({highest_protein_value} grams)')
code
130002260/cell_25
[ "text_plain_output_1.png" ]
from pprint import pprint import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import pprint df = pd.read_csv('/kaggle/input/starbucks-nutrition/starbucks.csv', index_col=0) df.describe df.isnull().sum() highest_calorie_item = df.loc[df['calories'].idxmax(), 'item'] highest_calorie_value = df['calories'].max() lowest_calorie_item = df.loc[df['calories'].idxmin(), 'item'] lowest_calorie_value = df['calories'].min() highest_fat_item = df.loc[df['fat'].idxmax(), 'item'] highest_fat_value = df['fat'].max() highest_carb_item = df.loc[df['carb'].idxmax(), 'item'] highest_carb_value = df['carb'].max() highest_fiber_item = df.loc[df['fiber'].idxmax(), 'item'] highest_fiber_value = df['fiber'].max() highest_protein_item = df.loc[df['protein'].idxmax(), 'item'] highest_protein_value = df['protein'].max() average_calories = df.groupby('type')['calories'].mean() correlation = df[['calories', 'fat', 'carb', 'fiber', 'protein']].corr() for nutrient in ['fat', 'carb', 'fiber', 'protein']: corr_value = correlation.loc['calories', nutrient] average_nutrients = df.groupby('type')[['fat', 'carb', 'fiber', 'protein']].mean() for nutrient in ['fat', 'carb', 'fiber', 'protein']: max_nutrient_type = average_nutrients[nutrient].idxmax() max_nutrient_value = average_nutrients[nutrient].max() mean_nutrients = df.groupby('type')[['fat', 'carb', 'fiber', 'protein']].mean() nutrient_columns = mean_nutrients.columns for nutrient in nutrient_columns: nutrient_values = mean_nutrients[nutrient] plt.xticks(rotation=45) # Calculate summary statistics for each nutrient by food item type summary_statistics = df.groupby('type')[['fat', 'carb', 'fiber', 'protein']].describe() # Use pprint for pretty printing pp = pprint.PrettyPrinter() # Print the summary statistics print("Summary Statistics for Nutrient Values by Food Item Type:") pp.pprint(summary_statistics) mean_fat_by_type = df.groupby('type')['fat'].mean() pp = pprint.PrettyPrinter() print('Mean Fat Content by Food Item Type:') pp.pprint(mean_fat_by_type)
code
130002260/cell_23
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('/kaggle/input/starbucks-nutrition/starbucks.csv', index_col=0) df.describe df.isnull().sum() highest_calorie_item = df.loc[df['calories'].idxmax(), 'item'] highest_calorie_value = df['calories'].max() lowest_calorie_item = df.loc[df['calories'].idxmin(), 'item'] lowest_calorie_value = df['calories'].min() highest_fat_item = df.loc[df['fat'].idxmax(), 'item'] highest_fat_value = df['fat'].max() highest_carb_item = df.loc[df['carb'].idxmax(), 'item'] highest_carb_value = df['carb'].max() highest_fiber_item = df.loc[df['fiber'].idxmax(), 'item'] highest_fiber_value = df['fiber'].max() highest_protein_item = df.loc[df['protein'].idxmax(), 'item'] highest_protein_value = df['protein'].max() average_calories = df.groupby('type')['calories'].mean() correlation = df[['calories', 'fat', 'carb', 'fiber', 'protein']].corr() for nutrient in ['fat', 'carb', 'fiber', 'protein']: corr_value = correlation.loc['calories', nutrient] average_nutrients = df.groupby('type')[['fat', 'carb', 'fiber', 'protein']].mean() for nutrient in ['fat', 'carb', 'fiber', 'protein']: max_nutrient_type = average_nutrients[nutrient].idxmax() max_nutrient_value = average_nutrients[nutrient].max() mean_nutrients = df.groupby('type')[['fat', 'carb', 'fiber', 'protein']].mean() nutrient_columns = mean_nutrients.columns for nutrient in nutrient_columns: nutrient_values = mean_nutrients[nutrient] plt.figure() plt.bar(nutrient_values.index, nutrient_values) plt.xlabel('Food Item Type') plt.ylabel('Mean ' + nutrient.capitalize() + ' (grams)') plt.title('Mean ' + nutrient.capitalize() + ' Content by Food Item Type') plt.xticks(rotation=45) plt.show()
code
130002260/cell_6
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('/kaggle/input/starbucks-nutrition/starbucks.csv', index_col=0) df.describe
code
130002260/cell_11
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('/kaggle/input/starbucks-nutrition/starbucks.csv', index_col=0) df.describe df.isnull().sum() highest_calorie_item = df.loc[df['calories'].idxmax(), 'item'] highest_calorie_value = df['calories'].max() lowest_calorie_item = df.loc[df['calories'].idxmin(), 'item'] lowest_calorie_value = df['calories'].min() print(f'Highest Calorie Food Item: {highest_calorie_item} ({highest_calorie_value} calories)') print(f'Lowest Calorie Food Item: {lowest_calorie_item} ({lowest_calorie_value} calories)')
code
130002260/cell_19
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('/kaggle/input/starbucks-nutrition/starbucks.csv', index_col=0) df.describe df.isnull().sum() highest_calorie_item = df.loc[df['calories'].idxmax(), 'item'] highest_calorie_value = df['calories'].max() lowest_calorie_item = df.loc[df['calories'].idxmin(), 'item'] lowest_calorie_value = df['calories'].min() highest_fat_item = df.loc[df['fat'].idxmax(), 'item'] highest_fat_value = df['fat'].max() highest_carb_item = df.loc[df['carb'].idxmax(), 'item'] highest_carb_value = df['carb'].max() highest_fiber_item = df.loc[df['fiber'].idxmax(), 'item'] highest_fiber_value = df['fiber'].max() highest_protein_item = df.loc[df['protein'].idxmax(), 'item'] highest_protein_value = df['protein'].max() average_calories = df.groupby('type')['calories'].mean() correlation = df[['calories', 'fat', 'carb', 'fiber', 'protein']].corr() print('Correlation between Calorie Content and Nutrients:') for nutrient in ['fat', 'carb', 'fiber', 'protein']: corr_value = correlation.loc['calories', nutrient] print(f'- {nutrient.capitalize()}: {corr_value:.2f}')
code
130002260/cell_1
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd from pprint import pprint import matplotlib.pyplot as plt import pprint import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
130002260/cell_7
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('/kaggle/input/starbucks-nutrition/starbucks.csv', index_col=0) df.describe df.info()
code
130002260/cell_8
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('/kaggle/input/starbucks-nutrition/starbucks.csv', index_col=0) df.describe df.isnull().sum()
code
130002260/cell_15
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('/kaggle/input/starbucks-nutrition/starbucks.csv', index_col=0) df.describe df.isnull().sum() highest_calorie_item = df.loc[df['calories'].idxmax(), 'item'] highest_calorie_value = df['calories'].max() lowest_calorie_item = df.loc[df['calories'].idxmin(), 'item'] lowest_calorie_value = df['calories'].min() highest_fat_item = df.loc[df['fat'].idxmax(), 'item'] highest_fat_value = df['fat'].max() highest_carb_item = df.loc[df['carb'].idxmax(), 'item'] highest_carb_value = df['carb'].max() highest_fiber_item = df.loc[df['fiber'].idxmax(), 'item'] highest_fiber_value = df['fiber'].max() highest_protein_item = df.loc[df['protein'].idxmax(), 'item'] highest_protein_value = df['protein'].max() filtered_items = df[(df['fiber'] > 0) & (df['fat'] < 5) & (df['carb'] < 30)] if filtered_items.empty: print('No food items are particularly rich in fiber but low in fat and carbohydrates.') else: print('Food items that are particularly rich in fiber but low in fat and carbohydrates:') for item in filtered_items['item']: print(f'- {item}')
code
130002260/cell_17
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('/kaggle/input/starbucks-nutrition/starbucks.csv', index_col=0) df.describe df.isnull().sum() highest_calorie_item = df.loc[df['calories'].idxmax(), 'item'] highest_calorie_value = df['calories'].max() lowest_calorie_item = df.loc[df['calories'].idxmin(), 'item'] lowest_calorie_value = df['calories'].min() highest_fat_item = df.loc[df['fat'].idxmax(), 'item'] highest_fat_value = df['fat'].max() highest_carb_item = df.loc[df['carb'].idxmax(), 'item'] highest_carb_value = df['carb'].max() highest_fiber_item = df.loc[df['fiber'].idxmax(), 'item'] highest_fiber_value = df['fiber'].max() highest_protein_item = df.loc[df['protein'].idxmax(), 'item'] highest_protein_value = df['protein'].max() average_calories = df.groupby('type')['calories'].mean() print('Average Calorie Content by Food Item Type:') for food_type, avg_calories in average_calories.items(): print(f'- {food_type}: {avg_calories:.2f} calories')
code
130002260/cell_24
[ "text_plain_output_1.png" ]
from pprint import pprint import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import pprint df = pd.read_csv('/kaggle/input/starbucks-nutrition/starbucks.csv', index_col=0) df.describe df.isnull().sum() highest_calorie_item = df.loc[df['calories'].idxmax(), 'item'] highest_calorie_value = df['calories'].max() lowest_calorie_item = df.loc[df['calories'].idxmin(), 'item'] lowest_calorie_value = df['calories'].min() highest_fat_item = df.loc[df['fat'].idxmax(), 'item'] highest_fat_value = df['fat'].max() highest_carb_item = df.loc[df['carb'].idxmax(), 'item'] highest_carb_value = df['carb'].max() highest_fiber_item = df.loc[df['fiber'].idxmax(), 'item'] highest_fiber_value = df['fiber'].max() highest_protein_item = df.loc[df['protein'].idxmax(), 'item'] highest_protein_value = df['protein'].max() average_calories = df.groupby('type')['calories'].mean() correlation = df[['calories', 'fat', 'carb', 'fiber', 'protein']].corr() for nutrient in ['fat', 'carb', 'fiber', 'protein']: corr_value = correlation.loc['calories', nutrient] average_nutrients = df.groupby('type')[['fat', 'carb', 'fiber', 'protein']].mean() for nutrient in ['fat', 'carb', 'fiber', 'protein']: max_nutrient_type = average_nutrients[nutrient].idxmax() max_nutrient_value = average_nutrients[nutrient].max() mean_nutrients = df.groupby('type')[['fat', 'carb', 'fiber', 'protein']].mean() nutrient_columns = mean_nutrients.columns for nutrient in nutrient_columns: nutrient_values = mean_nutrients[nutrient] plt.xticks(rotation=45) summary_statistics = df.groupby('type')[['fat', 'carb', 'fiber', 'protein']].describe() pp = pprint.PrettyPrinter() print('Summary Statistics for Nutrient Values by Food Item Type:') pp.pprint(summary_statistics)
code
130002260/cell_27
[ "text_plain_output_1.png" ]
from pprint import pprint import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import pprint df = pd.read_csv('/kaggle/input/starbucks-nutrition/starbucks.csv', index_col=0) df.describe df.isnull().sum() highest_calorie_item = df.loc[df['calories'].idxmax(), 'item'] highest_calorie_value = df['calories'].max() lowest_calorie_item = df.loc[df['calories'].idxmin(), 'item'] lowest_calorie_value = df['calories'].min() highest_fat_item = df.loc[df['fat'].idxmax(), 'item'] highest_fat_value = df['fat'].max() highest_carb_item = df.loc[df['carb'].idxmax(), 'item'] highest_carb_value = df['carb'].max() highest_fiber_item = df.loc[df['fiber'].idxmax(), 'item'] highest_fiber_value = df['fiber'].max() highest_protein_item = df.loc[df['protein'].idxmax(), 'item'] highest_protein_value = df['protein'].max() average_calories = df.groupby('type')['calories'].mean() correlation = df[['calories', 'fat', 'carb', 'fiber', 'protein']].corr() for nutrient in ['fat', 'carb', 'fiber', 'protein']: corr_value = correlation.loc['calories', nutrient] average_nutrients = df.groupby('type')[['fat', 'carb', 'fiber', 'protein']].mean() for nutrient in ['fat', 'carb', 'fiber', 'protein']: max_nutrient_type = average_nutrients[nutrient].idxmax() max_nutrient_value = average_nutrients[nutrient].max() mean_nutrients = df.groupby('type')[['fat', 'carb', 'fiber', 'protein']].mean() nutrient_columns = mean_nutrients.columns for nutrient in nutrient_columns: nutrient_values = mean_nutrients[nutrient] plt.xticks(rotation=45) # Calculate summary statistics for each nutrient by food item type summary_statistics = df.groupby('type')[['fat', 'carb', 'fiber', 'protein']].describe() # Use pprint for pretty printing pp = pprint.PrettyPrinter() # Print the summary statistics print("Summary Statistics for Nutrient Values by Food Item Type:") pp.pprint(summary_statistics) mean_fat_by_type = df.groupby('type')['fat'].mean() pp = pprint.PrettyPrinter() mean_nutrients = df.groupby('type')[['fat', 'carb', 'fiber', 'protein']].mean() overall_mean_nutrients = df[['fat', 'carb', 'fiber', 'protein']].mean() nutrient_differences = mean_nutrients.subtract(overall_mean_nutrients, axis='columns') print('Notable Differences in Nutritional Profile across Food Item Categories:') print(nutrient_differences)
code
130002260/cell_5
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('/kaggle/input/starbucks-nutrition/starbucks.csv', index_col=0) df
code
88100087/cell_23
[ "text_html_output_1.png" ]
from sklearn.compose import ColumnTransformer from sklearn.preprocessing import Binarizer from sklearn.preprocessing import KBinsDiscretizer import pandas as pd df = pd.read_csv('/kaggle/input/titanic/train.csv', usecols=['Age', 'Fare', 'Survived']) df.dropna(inplace=True) X = df.iloc[:, 1:] y = df.iloc[:, 0] kbin_age = KBinsDiscretizer(n_bins=15, encode='ordinal', strategy='quantile') kbin_fare = KBinsDiscretizer(n_bins=15, encode='ordinal', strategy='quantile') trf = ColumnTransformer([('first', kbin_age, [0]), ('second', kbin_fare, [1])]) X_train_trf = trf.fit_transform(X_train) X_test_trf = trf.transform(X_test) output = pd.DataFrame({'age': X_train['Age'], 'age_trf': X_train_trf[:, 0], 'fare': X_train['Fare'], 'fare_trf': X_train_trf[:, 1]}) output['age_labels'] = pd.cut(x=X_train['Age'], bins=trf.named_transformers_['first'].bin_edges_[0].tolist()) output['fare_labels'] = pd.cut(x=X_train['Fare'], bins=trf.named_transformers_['second'].bin_edges_[0].tolist()) df = pd.read_csv('/kaggle/input/titanic/train.csv')[['Age', 'Fare', 'SibSp', 'Parch', 'Survived']] trf = ColumnTransformer([('bin', Binarizer(copy=False), ['family'])], remainder='passthrough') X_train_trf = trf.fit_transform(X_train) X_test_trf = trf.transform(X_test) pd.DataFrame(X_train_trf, columns=['family', 'Age', 'Fare'])
code
88100087/cell_1
[ "text_plain_output_1.png" ]
import os import pandas as pd import numpy as np import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
88100087/cell_3
[ "text_html_output_1.png" ]
import pandas as pd df = pd.read_csv('/kaggle/input/titanic/train.csv', usecols=['Age', 'Fare', 'Survived']) df.head()
code
88100087/cell_17
[ "text_html_output_1.png" ]
from sklearn.compose import ColumnTransformer from sklearn.preprocessing import KBinsDiscretizer import pandas as pd df = pd.read_csv('/kaggle/input/titanic/train.csv', usecols=['Age', 'Fare', 'Survived']) df.dropna(inplace=True) X = df.iloc[:, 1:] y = df.iloc[:, 0] kbin_age = KBinsDiscretizer(n_bins=15, encode='ordinal', strategy='quantile') kbin_fare = KBinsDiscretizer(n_bins=15, encode='ordinal', strategy='quantile') trf = ColumnTransformer([('first', kbin_age, [0]), ('second', kbin_fare, [1])]) X_train_trf = trf.fit_transform(X_train) X_test_trf = trf.transform(X_test) output = pd.DataFrame({'age': X_train['Age'], 'age_trf': X_train_trf[:, 0], 'fare': X_train['Fare'], 'fare_trf': X_train_trf[:, 1]}) output['age_labels'] = pd.cut(x=X_train['Age'], bins=trf.named_transformers_['first'].bin_edges_[0].tolist()) output['fare_labels'] = pd.cut(x=X_train['Fare'], bins=trf.named_transformers_['second'].bin_edges_[0].tolist()) df = pd.read_csv('/kaggle/input/titanic/train.csv')[['Age', 'Fare', 'SibSp', 'Parch', 'Survived']] df.drop(columns=['SibSp', 'Parch'], inplace=True) df.head()
code
88100087/cell_14
[ "text_html_output_1.png" ]
from sklearn.compose import ColumnTransformer from sklearn.preprocessing import KBinsDiscretizer import pandas as pd df = pd.read_csv('/kaggle/input/titanic/train.csv', usecols=['Age', 'Fare', 'Survived']) df.dropna(inplace=True) X = df.iloc[:, 1:] y = df.iloc[:, 0] kbin_age = KBinsDiscretizer(n_bins=15, encode='ordinal', strategy='quantile') kbin_fare = KBinsDiscretizer(n_bins=15, encode='ordinal', strategy='quantile') trf = ColumnTransformer([('first', kbin_age, [0]), ('second', kbin_fare, [1])]) X_train_trf = trf.fit_transform(X_train) X_test_trf = trf.transform(X_test) output = pd.DataFrame({'age': X_train['Age'], 'age_trf': X_train_trf[:, 0], 'fare': X_train['Fare'], 'fare_trf': X_train_trf[:, 1]}) output['age_labels'] = pd.cut(x=X_train['Age'], bins=trf.named_transformers_['first'].bin_edges_[0].tolist()) output['fare_labels'] = pd.cut(x=X_train['Fare'], bins=trf.named_transformers_['second'].bin_edges_[0].tolist()) df = pd.read_csv('/kaggle/input/titanic/train.csv')[['Age', 'Fare', 'SibSp', 'Parch', 'Survived']] df.head()
code
88100087/cell_12
[ "text_html_output_1.png" ]
from sklearn.compose import ColumnTransformer from sklearn.preprocessing import KBinsDiscretizer import pandas as pd df = pd.read_csv('/kaggle/input/titanic/train.csv', usecols=['Age', 'Fare', 'Survived']) kbin_age = KBinsDiscretizer(n_bins=15, encode='ordinal', strategy='quantile') kbin_fare = KBinsDiscretizer(n_bins=15, encode='ordinal', strategy='quantile') trf = ColumnTransformer([('first', kbin_age, [0]), ('second', kbin_fare, [1])]) X_train_trf = trf.fit_transform(X_train) X_test_trf = trf.transform(X_test) output = pd.DataFrame({'age': X_train['Age'], 'age_trf': X_train_trf[:, 0], 'fare': X_train['Fare'], 'fare_trf': X_train_trf[:, 1]}) output.sample(5)
code
105194628/cell_2
[ "text_plain_output_1.png" ]
input1 = int(input('Enter a integer')) if input1 > 0: print('The number is postive') elif input1 < 0: print('The number is not positive') else: print('The number is zero')
code
105194628/cell_7
[ "text_plain_output_1.png" ]
n1 = int(input('Enter your number 1')) n2 = int(input('Enter your number 2')) n3 = int(input('Enter your number 3')) if n1 > n2 and n3: print(n1, 'It is the maximun value') elif n2 > n1 and n3: print(n2, 'It is the maximun value') elif n3 > n1 and n2: print(n3, 'It is the maximun value')
code
105194628/cell_8
[ "text_plain_output_1.png" ]
n1 = int(input('Enter your number 1')) n2 = int(input('Enter your number 2')) n3 = int(input('Enter your number 3')) max = n1 if max < n2: max = n2 if max < n3: max = n3 print(max)
code