path
stringlengths
13
17
screenshot_names
sequencelengths
1
873
code
stringlengths
0
40.4k
cell_type
stringclasses
1 value
104120795/cell_4
[ "text_plain_output_1.png" ]
import pandas as pd # read and wrangle dataframes df = pd.read_csv('../input/glass/glass.csv') features = df.columns[:-1].tolist() print(df.shape)
code
104120795/cell_23
[ "image_output_5.png", "image_output_7.png", "image_output_4.png", "image_output_8.png", "image_output_6.png", "image_output_3.png", "image_output_2.png", "image_output_1.png", "image_output_9.png" ]
from collections import Counter import numpy as np # linear algebra import pandas as pd # read and wrangle dataframes df = pd.read_csv('../input/glass/glass.csv') features = df.columns[:-1].tolist() df.dtypes def outlier_hunt(df): """ Takes a dataframe df of features and returns a list of the indices corresponding to the observations containing more than 2 outliers. """ outlier_indices = [] for col in df.columns.tolist(): Q1 = np.percentile(df[col], 25) Q3 = np.percentile(df[col], 75) IQR = Q3 - Q1 outlier_step = 1.5 * IQR outlier_list_col = df[(df[col] < Q1 - outlier_step) | (df[col] > Q3 + outlier_step)].index outlier_indices.extend(outlier_list_col) outlier_indices = Counter(outlier_indices) multiple_outliers = list((k for k, v in outlier_indices.items() if v > 2)) return multiple_outliers df.info()
code
104120795/cell_20
[ "text_plain_output_1.png" ]
from collections import Counter import matplotlib.pyplot as plt # visualization import numpy as np # linear algebra import pandas as pd # read and wrangle dataframes import seaborn as sns # statistical visualizations and aesthetics df = pd.read_csv('../input/glass/glass.csv') features = df.columns[:-1].tolist() df.dtypes for feat in features: skew = df[feat].skew() def outlier_hunt(df): """ Takes a dataframe df of features and returns a list of the indices corresponding to the observations containing more than 2 outliers. """ outlier_indices = [] for col in df.columns.tolist(): Q1 = np.percentile(df[col], 25) Q3 = np.percentile(df[col], 75) IQR = Q3 - Q1 outlier_step = 1.5 * IQR outlier_list_col = df[(df[col] < Q1 - outlier_step) | (df[col] > Q3 + outlier_step)].index outlier_indices.extend(outlier_list_col) outlier_indices = Counter(outlier_indices) multiple_outliers = list((k for k, v in outlier_indices.items() if v > 2)) return multiple_outliers corr = df[features].corr() plt.figure(figsize=(16, 16)) sns.heatmap(corr, cbar=True, square=True, annot=True, fmt='.2f', annot_kws={'size': 15}, xticklabels=features, yticklabels=features, alpha=0.7, cmap='coolwarm') plt.show()
code
104120795/cell_6
[ "image_output_5.png", "image_output_7.png", "image_output_4.png", "image_output_8.png", "image_output_6.png", "image_output_3.png", "image_output_2.png", "image_output_1.png", "image_output_9.png" ]
import pandas as pd # read and wrangle dataframes df = pd.read_csv('../input/glass/glass.csv') features = df.columns[:-1].tolist() df.dtypes
code
104120795/cell_18
[ "text_html_output_1.png" ]
from collections import Counter import matplotlib.pyplot as plt # visualization import numpy as np # linear algebra import pandas as pd # read and wrangle dataframes import seaborn as sns # statistical visualizations and aesthetics df = pd.read_csv('../input/glass/glass.csv') features = df.columns[:-1].tolist() df.dtypes for feat in features: skew = df[feat].skew() def outlier_hunt(df): """ Takes a dataframe df of features and returns a list of the indices corresponding to the observations containing more than 2 outliers. """ outlier_indices = [] for col in df.columns.tolist(): Q1 = np.percentile(df[col], 25) Q3 = np.percentile(df[col], 75) IQR = Q3 - Q1 outlier_step = 1.5 * IQR outlier_list_col = df[(df[col] < Q1 - outlier_step) | (df[col] > Q3 + outlier_step)].index outlier_indices.extend(outlier_list_col) outlier_indices = Counter(outlier_indices) multiple_outliers = list((k for k, v in outlier_indices.items() if v > 2)) return multiple_outliers plt.figure(figsize=(8, 8)) sns.pairplot(df[features], palette='coolwarm') plt.show()
code
104120795/cell_28
[ "text_plain_output_1.png", "image_output_1.png" ]
from collections import Counter import numpy as np # linear algebra import pandas as pd # read and wrangle dataframes df = pd.read_csv('../input/glass/glass.csv') features = df.columns[:-1].tolist() df.dtypes def outlier_hunt(df): """ Takes a dataframe df of features and returns a list of the indices corresponding to the observations containing more than 2 outliers. """ outlier_indices = [] for col in df.columns.tolist(): Q1 = np.percentile(df[col], 25) Q3 = np.percentile(df[col], 75) IQR = Q3 - Q1 outlier_step = 1.5 * IQR outlier_list_col = df[(df[col] < Q1 - outlier_step) | (df[col] > Q3 + outlier_step)].index outlier_indices.extend(outlier_list_col) outlier_indices = Counter(outlier_indices) multiple_outliers = list((k for k, v in outlier_indices.items() if v > 2)) return multiple_outliers outlier_indices = outlier_hunt(df[features]) df = df.drop(outlier_indices).reset_index(drop=True) df['Type'].value_counts()
code
104120795/cell_8
[ "text_plain_output_1.png" ]
import pandas as pd # read and wrangle dataframes df = pd.read_csv('../input/glass/glass.csv') features = df.columns[:-1].tolist() df.dtypes df.describe()
code
104120795/cell_16
[ "text_plain_output_1.png" ]
from collections import Counter import numpy as np # linear algebra import pandas as pd # read and wrangle dataframes df = pd.read_csv('../input/glass/glass.csv') features = df.columns[:-1].tolist() df.dtypes def outlier_hunt(df): """ Takes a dataframe df of features and returns a list of the indices corresponding to the observations containing more than 2 outliers. """ outlier_indices = [] for col in df.columns.tolist(): Q1 = np.percentile(df[col], 25) Q3 = np.percentile(df[col], 75) IQR = Q3 - Q1 outlier_step = 1.5 * IQR outlier_list_col = df[(df[col] < Q1 - outlier_step) | (df[col] > Q3 + outlier_step)].index outlier_indices.extend(outlier_list_col) outlier_indices = Counter(outlier_indices) multiple_outliers = list((k for k, v in outlier_indices.items() if v > 2)) return multiple_outliers df[features].plot(figsize=(8, 6), kind='box')
code
104120795/cell_38
[ "image_output_5.png", "image_output_7.png", "image_output_4.png", "image_output_8.png", "image_output_6.png", "image_output_3.png", "image_output_2.png", "image_output_1.png", "image_output_9.png" ]
from collections import Counter import numpy as np # linear algebra import pandas as pd # read and wrangle dataframes df = pd.read_csv('../input/glass/glass.csv') features = df.columns[:-1].tolist() df.dtypes def outlier_hunt(df): """ Takes a dataframe df of features and returns a list of the indices corresponding to the observations containing more than 2 outliers. """ outlier_indices = [] for col in df.columns.tolist(): Q1 = np.percentile(df[col], 25) Q3 = np.percentile(df[col], 75) IQR = Q3 - Q1 outlier_step = 1.5 * IQR outlier_list_col = df[(df[col] < Q1 - outlier_step) | (df[col] > Q3 + outlier_step)].index outlier_indices.extend(outlier_list_col) outlier_indices = Counter(outlier_indices) multiple_outliers = list((k for k, v in outlier_indices.items() if v > 2)) return multiple_outliers outlier_indices = outlier_hunt(df[features]) df = df.drop(outlier_indices).reset_index(drop=True) features_boxcox = [] for feature in features: bc_transformed, _ = boxcox(df[feature] + 1) features_boxcox.append(bc_transformed) features_boxcox = np.column_stack(features_boxcox) df_bc = pd.DataFrame(data=features_boxcox, columns=features) df_bc['Type'] = df['Type'] for feature in features: delta = np.abs(df_bc[feature].skew() / df[feature].skew()) if delta < 1.0: print('Feature %s is less skewed after a Box-Cox transform' % feature) else: print('Feature %s is more skewed after a Box-Cox transform' % feature)
code
104120795/cell_31
[ "image_output_1.png" ]
from collections import Counter import matplotlib.pyplot as plt # visualization import numpy as np # linear algebra import pandas as pd # read and wrangle dataframes import seaborn as sns # statistical visualizations and aesthetics df = pd.read_csv('../input/glass/glass.csv') features = df.columns[:-1].tolist() df.dtypes for feat in features: skew = df[feat].skew() def outlier_hunt(df): """ Takes a dataframe df of features and returns a list of the indices corresponding to the observations containing more than 2 outliers. """ outlier_indices = [] for col in df.columns.tolist(): Q1 = np.percentile(df[col], 25) Q3 = np.percentile(df[col], 75) IQR = Q3 - Q1 outlier_step = 1.5 * IQR outlier_list_col = df[(df[col] < Q1 - outlier_step) | (df[col] > Q3 + outlier_step)].index outlier_indices.extend(outlier_list_col) outlier_indices = Counter(outlier_indices) multiple_outliers = list((k for k, v in outlier_indices.items() if v > 2)) return multiple_outliers corr = df[features].corr() outlier_indices = outlier_hunt(df[features]) df = df.drop(outlier_indices).reset_index(drop=True) for feat in features: skew = df[feat].skew() sns.countplot(df['Type']) plt.show()
code
104120795/cell_14
[ "text_html_output_1.png" ]
from collections import Counter import numpy as np # linear algebra import pandas as pd # read and wrangle dataframes df = pd.read_csv('../input/glass/glass.csv') features = df.columns[:-1].tolist() df.dtypes def outlier_hunt(df): """ Takes a dataframe df of features and returns a list of the indices corresponding to the observations containing more than 2 outliers. """ outlier_indices = [] for col in df.columns.tolist(): Q1 = np.percentile(df[col], 25) Q3 = np.percentile(df[col], 75) IQR = Q3 - Q1 outlier_step = 1.5 * IQR outlier_list_col = df[(df[col] < Q1 - outlier_step) | (df[col] > Q3 + outlier_step)].index outlier_indices.extend(outlier_list_col) outlier_indices = Counter(outlier_indices) multiple_outliers = list((k for k, v in outlier_indices.items() if v > 2)) return multiple_outliers print('The dataset contains %d observations with more than 2 outliers' % len(outlier_hunt(df[features])))
code
104120795/cell_27
[ "text_plain_output_1.png", "image_output_1.png" ]
from collections import Counter import matplotlib.pyplot as plt # visualization import numpy as np # linear algebra import pandas as pd # read and wrangle dataframes import seaborn as sns # statistical visualizations and aesthetics df = pd.read_csv('../input/glass/glass.csv') features = df.columns[:-1].tolist() df.dtypes for feat in features: skew = df[feat].skew() def outlier_hunt(df): """ Takes a dataframe df of features and returns a list of the indices corresponding to the observations containing more than 2 outliers. """ outlier_indices = [] for col in df.columns.tolist(): Q1 = np.percentile(df[col], 25) Q3 = np.percentile(df[col], 75) IQR = Q3 - Q1 outlier_step = 1.5 * IQR outlier_list_col = df[(df[col] < Q1 - outlier_step) | (df[col] > Q3 + outlier_step)].index outlier_indices.extend(outlier_list_col) outlier_indices = Counter(outlier_indices) multiple_outliers = list((k for k, v in outlier_indices.items() if v > 2)) return multiple_outliers corr = df[features].corr() outlier_indices = outlier_hunt(df[features]) df = df.drop(outlier_indices).reset_index(drop=True) for feat in features: skew = df[feat].skew() sns.distplot(df[feat], kde=False, label='Skew = %.3f' % skew, bins=30) plt.legend(loc='best') plt.show()
code
104120795/cell_37
[ "text_plain_output_1.png" ]
from collections import Counter import matplotlib.pyplot as plt # visualization import numpy as np # linear algebra import pandas as pd # read and wrangle dataframes import seaborn as sns # statistical visualizations and aesthetics df = pd.read_csv('../input/glass/glass.csv') features = df.columns[:-1].tolist() df.dtypes for feat in features: skew = df[feat].skew() def outlier_hunt(df): """ Takes a dataframe df of features and returns a list of the indices corresponding to the observations containing more than 2 outliers. """ outlier_indices = [] for col in df.columns.tolist(): Q1 = np.percentile(df[col], 25) Q3 = np.percentile(df[col], 75) IQR = Q3 - Q1 outlier_step = 1.5 * IQR outlier_list_col = df[(df[col] < Q1 - outlier_step) | (df[col] > Q3 + outlier_step)].index outlier_indices.extend(outlier_list_col) outlier_indices = Counter(outlier_indices) multiple_outliers = list((k for k, v in outlier_indices.items() if v > 2)) return multiple_outliers corr = df[features].corr() outlier_indices = outlier_hunt(df[features]) df = df.drop(outlier_indices).reset_index(drop=True) for feat in features: skew = df[feat].skew() features_boxcox = [] for feature in features: bc_transformed, _ = boxcox(df[feature] + 1) features_boxcox.append(bc_transformed) features_boxcox = np.column_stack(features_boxcox) df_bc = pd.DataFrame(data=features_boxcox, columns=features) df_bc['Type'] = df['Type'] for feature in features: fig, ax = plt.subplots(1, 2, figsize=(7, 3.5)) ax[0].hist(df[feature], color='blue', bins=30, alpha=0.3, label='Skew = %s' % str(round(df[feature].skew(), 3))) ax[0].set_title(str(feature)) ax[0].legend(loc=0) ax[1].hist(df_bc[feature], color='red', bins=30, alpha=0.3, label='Skew = %s' % str(round(df_bc[feature].skew(), 3))) ax[1].set_title(str(feature) + ' after a Box-Cox transformation') ax[1].legend(loc=0) plt.show()
code
104120795/cell_12
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt # visualization import pandas as pd # read and wrangle dataframes import seaborn as sns # statistical visualizations and aesthetics df = pd.read_csv('../input/glass/glass.csv') features = df.columns[:-1].tolist() df.dtypes for feat in features: skew = df[feat].skew() sns.distplot(df[feat], kde=False, label='Skew = %.3f' % skew, bins=30) plt.legend(loc='best') plt.show()
code
104120795/cell_5
[ "text_html_output_1.png" ]
import pandas as pd # read and wrangle dataframes df = pd.read_csv('../input/glass/glass.csv') features = df.columns[:-1].tolist() df.head(15)
code
104120795/cell_36
[ "text_plain_output_1.png" ]
from collections import Counter import numpy as np # linear algebra import pandas as pd # read and wrangle dataframes df = pd.read_csv('../input/glass/glass.csv') features = df.columns[:-1].tolist() df.dtypes def outlier_hunt(df): """ Takes a dataframe df of features and returns a list of the indices corresponding to the observations containing more than 2 outliers. """ outlier_indices = [] for col in df.columns.tolist(): Q1 = np.percentile(df[col], 25) Q3 = np.percentile(df[col], 75) IQR = Q3 - Q1 outlier_step = 1.5 * IQR outlier_list_col = df[(df[col] < Q1 - outlier_step) | (df[col] > Q3 + outlier_step)].index outlier_indices.extend(outlier_list_col) outlier_indices = Counter(outlier_indices) multiple_outliers = list((k for k, v in outlier_indices.items() if v > 2)) return multiple_outliers outlier_indices = outlier_hunt(df[features]) df = df.drop(outlier_indices).reset_index(drop=True) features_boxcox = [] for feature in features: bc_transformed, _ = boxcox(df[feature] + 1) features_boxcox.append(bc_transformed) features_boxcox = np.column_stack(features_boxcox) df_bc = pd.DataFrame(data=features_boxcox, columns=features) df_bc['Type'] = df['Type'] df_bc.describe()
code
2044446/cell_42
[ "text_plain_output_1.png", "image_output_1.png" ]
from wordcloud import WordCloud, STOPWORDS import datetime import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns df = pd.read_excel('../input/tweets.xlsx', sheet_name='tweets') pd.isnull(df).any() toptweeps = df.groupby('username')[['tweet ']].count() toptweeps.sort_values('tweet ', ascending=False)[:10] topretweets = df.groupby('username')[['retweets']].sum() topretweets.sort_values('retweets', ascending=False)[:10] corpus = ' '.join(df['tweet ']) corpus = corpus.replace('.', '. ') wordcloud = WordCloud(stopwords=STOPWORDS, background_color='white', width=2400, height=2000).generate(corpus) plt.axis('off') mest = df[df['username'] == 'MESTAfrica'] corpu = ' '.join(df['tweet ']) corpu = corpu.replace('.', '. ') wordcloud = WordCloud(stopwords=STOPWORDS, background_color='white', width=2400, height=2000).generate(corpu) plt.axis('off') tony = df[df['username'] == 'TonyElumeluFDN'] corp = ' '.join(df['tweet ']) corp = corp.replace('.', '. ') wordcloud = WordCloud(stopwords=STOPWORDS, background_color='white', width=2400, height=2000).generate(corp) plt.axis('off') df2 = df df2['date'] = df2['created_at'].map(lambda x: x.split(' ')[0]) df2['time'] = df2['created_at'].map(lambda x: x.split(' ')[-1]) del df2['created_at'] month_order = ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec'] day_order = ['Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun'] df2 = df[['tweet_id', 'date', 'time', 'tweet ', 'retweets', 'username']] df2['month'] = df2['date'].apply(lambda x: month_order[int(x.split('-')[1]) - 1]) month_df = pd.DataFrame(df2['month'].value_counts()).reset_index() month_df.columns = ['month', 'tweets'] def getday(x): year, month, day = (int(i) for i in x.split('-')) answer = datetime.date(year, month, day).weekday() return day_order[answer] df['day'] = df['date'].apply(getday) day_df = pd.DataFrame(df['day'].value_counts()).reset_index() day_df.columns = ['day', 'tweets'] mesting = df2[df2['username'] == 'MESTAfrica'] month_mest = pd.DataFrame(mesting['month'].value_counts()).reset_index() month_mest.columns = ['month', 'tweets'] month_mest.head()
code
2044446/cell_13
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns df = pd.read_excel('../input/tweets.xlsx', sheet_name='tweets') plt.figure(figsize=(12, 8)) sns.countplot(data=df, y='username')
code
2044446/cell_25
[ "text_html_output_1.png" ]
from wordcloud import WordCloud, STOPWORDS import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns df = pd.read_excel('../input/tweets.xlsx', sheet_name='tweets') toptweeps = df.groupby('username')[['tweet ']].count() toptweeps.sort_values('tweet ', ascending=False)[:10] topretweets = df.groupby('username')[['retweets']].sum() topretweets.sort_values('retweets', ascending=False)[:10] corpus = ' '.join(df['tweet ']) corpus = corpus.replace('.', '. ') wordcloud = WordCloud(stopwords=STOPWORDS, background_color='white', width=2400, height=2000).generate(corpus) plt.axis('off') mest = df[df['username'] == 'MESTAfrica'] corpu = ' '.join(df['tweet ']) corpu = corpu.replace('.', '. ') wordcloud = WordCloud(stopwords=STOPWORDS, background_color='white', width=2400, height=2000).generate(corpu) plt.axis('off') tony = df[df['username'] == 'TonyElumeluFDN'] corp = ' '.join(df['tweet ']) corp = corp.replace('.', '. ') wordcloud = WordCloud(stopwords=STOPWORDS, background_color='white', width=2400, height=2000).generate(corp) plt.figure(figsize=(12, 15)) plt.imshow(wordcloud) plt.axis('off') plt.show()
code
2044446/cell_4
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_excel('../input/tweets.xlsx', sheet_name='tweets') df.head()
code
2044446/cell_34
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_excel('../input/tweets.xlsx', sheet_name='tweets') pd.isnull(df).any() toptweeps = df.groupby('username')[['tweet ']].count() toptweeps.sort_values('tweet ', ascending=False)[:10] topretweets = df.groupby('username')[['retweets']].sum() topretweets.sort_values('retweets', ascending=False)[:10] df2 = df df2['date'] = df2['created_at'].map(lambda x: x.split(' ')[0]) df2['time'] = df2['created_at'].map(lambda x: x.split(' ')[-1]) del df2['created_at'] month_order = ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec'] day_order = ['Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun'] df2 = df[['tweet_id', 'date', 'time', 'tweet ', 'retweets', 'username']] df2['month'] = df2['date'].apply(lambda x: month_order[int(x.split('-')[1]) - 1]) month_df = pd.DataFrame(df2['month'].value_counts()).reset_index() month_df.columns = ['month', 'tweets']
code
2044446/cell_23
[ "text_plain_output_1.png", "image_output_1.png" ]
from wordcloud import WordCloud, STOPWORDS import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns df = pd.read_excel('../input/tweets.xlsx', sheet_name='tweets') toptweeps = df.groupby('username')[['tweet ']].count() toptweeps.sort_values('tweet ', ascending=False)[:10] topretweets = df.groupby('username')[['retweets']].sum() topretweets.sort_values('retweets', ascending=False)[:10] corpus = ' '.join(df['tweet ']) corpus = corpus.replace('.', '. ') wordcloud = WordCloud(stopwords=STOPWORDS, background_color='white', width=2400, height=2000).generate(corpus) plt.axis('off') mest = df[df['username'] == 'MESTAfrica'] corpu = ' '.join(df['tweet ']) corpu = corpu.replace('.', '. ') wordcloud = WordCloud(stopwords=STOPWORDS, background_color='white', width=2400, height=2000).generate(corpu) plt.figure(figsize=(12, 15)) plt.imshow(wordcloud) plt.axis('off') plt.show()
code
2044446/cell_33
[ "image_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_excel('../input/tweets.xlsx', sheet_name='tweets') toptweeps = df.groupby('username')[['tweet ']].count() toptweeps.sort_values('tweet ', ascending=False)[:10] topretweets = df.groupby('username')[['retweets']].sum() topretweets.sort_values('retweets', ascending=False)[:10] df2 = df df2['date'] = df2['created_at'].map(lambda x: x.split(' ')[0]) df2['time'] = df2['created_at'].map(lambda x: x.split(' ')[-1]) del df2['created_at'] month_order = ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec'] day_order = ['Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun'] df2 = df[['tweet_id', 'date', 'time', 'tweet ', 'retweets', 'username']] df2.head()
code
2044446/cell_44
[ "text_plain_output_1.png", "image_output_1.png" ]
from wordcloud import WordCloud, STOPWORDS import datetime import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns df = pd.read_excel('../input/tweets.xlsx', sheet_name='tweets') pd.isnull(df).any() toptweeps = df.groupby('username')[['tweet ']].count() toptweeps.sort_values('tweet ', ascending=False)[:10] topretweets = df.groupby('username')[['retweets']].sum() topretweets.sort_values('retweets', ascending=False)[:10] corpus = ' '.join(df['tweet ']) corpus = corpus.replace('.', '. ') wordcloud = WordCloud(stopwords=STOPWORDS, background_color='white', width=2400, height=2000).generate(corpus) plt.axis('off') mest = df[df['username'] == 'MESTAfrica'] corpu = ' '.join(df['tweet ']) corpu = corpu.replace('.', '. ') wordcloud = WordCloud(stopwords=STOPWORDS, background_color='white', width=2400, height=2000).generate(corpu) plt.axis('off') tony = df[df['username'] == 'TonyElumeluFDN'] corp = ' '.join(df['tweet ']) corp = corp.replace('.', '. ') wordcloud = WordCloud(stopwords=STOPWORDS, background_color='white', width=2400, height=2000).generate(corp) plt.axis('off') df2 = df df2['date'] = df2['created_at'].map(lambda x: x.split(' ')[0]) df2['time'] = df2['created_at'].map(lambda x: x.split(' ')[-1]) del df2['created_at'] month_order = ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec'] day_order = ['Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun'] df2 = df[['tweet_id', 'date', 'time', 'tweet ', 'retweets', 'username']] df2['month'] = df2['date'].apply(lambda x: month_order[int(x.split('-')[1]) - 1]) month_df = pd.DataFrame(df2['month'].value_counts()).reset_index() month_df.columns = ['month', 'tweets'] def getday(x): year, month, day = (int(i) for i in x.split('-')) answer = datetime.date(year, month, day).weekday() return day_order[answer] df['day'] = df['date'].apply(getday) day_df = pd.DataFrame(df['day'].value_counts()).reset_index() day_df.columns = ['day', 'tweets'] mesting = df2[df2['username'] == 'MESTAfrica'] month_mest = pd.DataFrame(mesting['month'].value_counts()).reset_index() month_mest.columns = ['month', 'tweets'] month_mest['tweets'].sum()
code
2044446/cell_20
[ "text_html_output_1.png" ]
from wordcloud import WordCloud, STOPWORDS import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns df = pd.read_excel('../input/tweets.xlsx', sheet_name='tweets') toptweeps = df.groupby('username')[['tweet ']].count() toptweeps.sort_values('tweet ', ascending=False)[:10] topretweets = df.groupby('username')[['retweets']].sum() topretweets.sort_values('retweets', ascending=False)[:10] corpus = ' '.join(df['tweet ']) corpus = corpus.replace('.', '. ') wordcloud = WordCloud(stopwords=STOPWORDS, background_color='white', width=2400, height=2000).generate(corpus) plt.figure(figsize=(12, 15)) plt.imshow(wordcloud) plt.axis('off') plt.show()
code
2044446/cell_6
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_excel('../input/tweets.xlsx', sheet_name='tweets') pd.isnull(df).any()
code
2044446/cell_29
[ "image_output_1.png" ]
from wordcloud import WordCloud, STOPWORDS import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns df = pd.read_excel('../input/tweets.xlsx', sheet_name='tweets') toptweeps = df.groupby('username')[['tweet ']].count() toptweeps.sort_values('tweet ', ascending=False)[:10] topretweets = df.groupby('username')[['retweets']].sum() topretweets.sort_values('retweets', ascending=False)[:10] corpus = ' '.join(df['tweet ']) corpus = corpus.replace('.', '. ') wordcloud = WordCloud(stopwords=STOPWORDS, background_color='white', width=2400, height=2000).generate(corpus) plt.axis('off') mest = df[df['username'] == 'MESTAfrica'] corpu = ' '.join(df['tweet ']) corpu = corpu.replace('.', '. ') wordcloud = WordCloud(stopwords=STOPWORDS, background_color='white', width=2400, height=2000).generate(corpu) plt.axis('off') mest[mest['retweets'] == 2157]
code
2044446/cell_41
[ "application_vnd.jupyter.stderr_output_1.png" ]
from wordcloud import WordCloud, STOPWORDS import datetime import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns df = pd.read_excel('../input/tweets.xlsx', sheet_name='tweets') pd.isnull(df).any() toptweeps = df.groupby('username')[['tweet ']].count() toptweeps.sort_values('tweet ', ascending=False)[:10] topretweets = df.groupby('username')[['retweets']].sum() topretweets.sort_values('retweets', ascending=False)[:10] corpus = ' '.join(df['tweet ']) corpus = corpus.replace('.', '. ') wordcloud = WordCloud(stopwords=STOPWORDS, background_color='white', width=2400, height=2000).generate(corpus) plt.axis('off') mest = df[df['username'] == 'MESTAfrica'] corpu = ' '.join(df['tweet ']) corpu = corpu.replace('.', '. ') wordcloud = WordCloud(stopwords=STOPWORDS, background_color='white', width=2400, height=2000).generate(corpu) plt.axis('off') tony = df[df['username'] == 'TonyElumeluFDN'] corp = ' '.join(df['tweet ']) corp = corp.replace('.', '. ') wordcloud = WordCloud(stopwords=STOPWORDS, background_color='white', width=2400, height=2000).generate(corp) plt.axis('off') df2 = df df2['date'] = df2['created_at'].map(lambda x: x.split(' ')[0]) df2['time'] = df2['created_at'].map(lambda x: x.split(' ')[-1]) del df2['created_at'] month_order = ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec'] day_order = ['Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun'] df2 = df[['tweet_id', 'date', 'time', 'tweet ', 'retweets', 'username']] df2['month'] = df2['date'].apply(lambda x: month_order[int(x.split('-')[1]) - 1]) month_df = pd.DataFrame(df2['month'].value_counts()).reset_index() month_df.columns = ['month', 'tweets'] def getday(x): year, month, day = (int(i) for i in x.split('-')) answer = datetime.date(year, month, day).weekday() return day_order[answer] df['day'] = df['date'].apply(getday) day_df = pd.DataFrame(df['day'].value_counts()).reset_index() day_df.columns = ['day', 'tweets'] mesting = df2[df2['username'] == 'MESTAfrica'] month_mest = pd.DataFrame(mesting['month'].value_counts()).reset_index() month_mest.columns = ['month', 'tweets'] plt.figure(figsize=(12, 6)) plt.title('MESTAfrica Tweets Per Month') sns.barplot(x='month', y='tweets', data=month_mest, order=month_order)
code
2044446/cell_2
[ "text_html_output_1.png" ]
from subprocess import check_output import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns from wordcloud import WordCloud, STOPWORDS import datetime from subprocess import check_output print(check_output(['ls', '../input']).decode('utf8'))
code
2044446/cell_32
[ "image_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_excel('../input/tweets.xlsx', sheet_name='tweets') toptweeps = df.groupby('username')[['tweet ']].count() toptweeps.sort_values('tweet ', ascending=False)[:10] topretweets = df.groupby('username')[['retweets']].sum() topretweets.sort_values('retweets', ascending=False)[:10] df2 = df df2['date'] = df2['created_at'].map(lambda x: x.split(' ')[0]) df2['time'] = df2['created_at'].map(lambda x: x.split(' ')[-1]) del df2['created_at'] df2.head()
code
2044446/cell_8
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_excel('../input/tweets.xlsx', sheet_name='tweets') df.describe()
code
2044446/cell_15
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_excel('../input/tweets.xlsx', sheet_name='tweets') toptweeps = df.groupby('username')[['tweet ']].count() toptweeps.sort_values('tweet ', ascending=False)[:10]
code
2044446/cell_17
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_excel('../input/tweets.xlsx', sheet_name='tweets') toptweeps = df.groupby('username')[['tweet ']].count() toptweeps.sort_values('tweet ', ascending=False)[:10] topretweets = df.groupby('username')[['retweets']].sum() topretweets.sort_values('retweets', ascending=False)[:10]
code
2044446/cell_35
[ "text_html_output_1.png" ]
from wordcloud import WordCloud, STOPWORDS import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns df = pd.read_excel('../input/tweets.xlsx', sheet_name='tweets') pd.isnull(df).any() toptweeps = df.groupby('username')[['tweet ']].count() toptweeps.sort_values('tweet ', ascending=False)[:10] topretweets = df.groupby('username')[['retweets']].sum() topretweets.sort_values('retweets', ascending=False)[:10] corpus = ' '.join(df['tweet ']) corpus = corpus.replace('.', '. ') wordcloud = WordCloud(stopwords=STOPWORDS, background_color='white', width=2400, height=2000).generate(corpus) plt.axis('off') mest = df[df['username'] == 'MESTAfrica'] corpu = ' '.join(df['tweet ']) corpu = corpu.replace('.', '. ') wordcloud = WordCloud(stopwords=STOPWORDS, background_color='white', width=2400, height=2000).generate(corpu) plt.axis('off') tony = df[df['username'] == 'TonyElumeluFDN'] corp = ' '.join(df['tweet ']) corp = corp.replace('.', '. ') wordcloud = WordCloud(stopwords=STOPWORDS, background_color='white', width=2400, height=2000).generate(corp) plt.axis('off') df2 = df df2['date'] = df2['created_at'].map(lambda x: x.split(' ')[0]) df2['time'] = df2['created_at'].map(lambda x: x.split(' ')[-1]) del df2['created_at'] month_order = ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec'] day_order = ['Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun'] df2 = df[['tweet_id', 'date', 'time', 'tweet ', 'retweets', 'username']] df2['month'] = df2['date'].apply(lambda x: month_order[int(x.split('-')[1]) - 1]) month_df = pd.DataFrame(df2['month'].value_counts()).reset_index() month_df.columns = ['month', 'tweets'] plt.figure(figsize=(12, 6)) plt.title('All Tweets Per Day') sns.barplot(x='month', y='tweets', data=month_df, order=month_order)
code
2044446/cell_10
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_excel('../input/tweets.xlsx', sheet_name='tweets') df[df['retweets'] == 79537]
code
2044446/cell_27
[ "text_html_output_1.png" ]
from wordcloud import WordCloud, STOPWORDS import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns df = pd.read_excel('../input/tweets.xlsx', sheet_name='tweets') toptweeps = df.groupby('username')[['tweet ']].count() toptweeps.sort_values('tweet ', ascending=False)[:10] topretweets = df.groupby('username')[['retweets']].sum() topretweets.sort_values('retweets', ascending=False)[:10] corpus = ' '.join(df['tweet ']) corpus = corpus.replace('.', '. ') wordcloud = WordCloud(stopwords=STOPWORDS, background_color='white', width=2400, height=2000).generate(corpus) plt.axis('off') mest = df[df['username'] == 'MESTAfrica'] corpu = ' '.join(df['tweet ']) corpu = corpu.replace('.', '. ') wordcloud = WordCloud(stopwords=STOPWORDS, background_color='white', width=2400, height=2000).generate(corpu) plt.axis('off') mest.describe()
code
2044446/cell_37
[ "text_html_output_1.png" ]
from wordcloud import WordCloud, STOPWORDS import datetime import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns df = pd.read_excel('../input/tweets.xlsx', sheet_name='tweets') pd.isnull(df).any() toptweeps = df.groupby('username')[['tweet ']].count() toptweeps.sort_values('tweet ', ascending=False)[:10] topretweets = df.groupby('username')[['retweets']].sum() topretweets.sort_values('retweets', ascending=False)[:10] corpus = ' '.join(df['tweet ']) corpus = corpus.replace('.', '. ') wordcloud = WordCloud(stopwords=STOPWORDS, background_color='white', width=2400, height=2000).generate(corpus) plt.axis('off') mest = df[df['username'] == 'MESTAfrica'] corpu = ' '.join(df['tweet ']) corpu = corpu.replace('.', '. ') wordcloud = WordCloud(stopwords=STOPWORDS, background_color='white', width=2400, height=2000).generate(corpu) plt.axis('off') tony = df[df['username'] == 'TonyElumeluFDN'] corp = ' '.join(df['tweet ']) corp = corp.replace('.', '. ') wordcloud = WordCloud(stopwords=STOPWORDS, background_color='white', width=2400, height=2000).generate(corp) plt.axis('off') df2 = df df2['date'] = df2['created_at'].map(lambda x: x.split(' ')[0]) df2['time'] = df2['created_at'].map(lambda x: x.split(' ')[-1]) del df2['created_at'] month_order = ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec'] day_order = ['Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun'] df2 = df[['tweet_id', 'date', 'time', 'tweet ', 'retweets', 'username']] df2['month'] = df2['date'].apply(lambda x: month_order[int(x.split('-')[1]) - 1]) month_df = pd.DataFrame(df2['month'].value_counts()).reset_index() month_df.columns = ['month', 'tweets'] def getday(x): year, month, day = (int(i) for i in x.split('-')) answer = datetime.date(year, month, day).weekday() return day_order[answer] df['day'] = df['date'].apply(getday) day_df = pd.DataFrame(df['day'].value_counts()).reset_index() day_df.columns = ['day', 'tweets'] plt.figure(figsize=(12, 6)) plt.title('All Tweets Per Day') sns.barplot(x='day', y='tweets', data=day_df, order=day_order)
code
2044446/cell_5
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_excel('../input/tweets.xlsx', sheet_name='tweets') df.info()
code
88075343/cell_21
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import os import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import numpy as np import pandas as pd import os df = pd.read_csv('../input/sales-dataset/Sales_April_2019.csv') files = [file for file in os.listdir('../input/sales-dataset')] df = pd.DataFrame() for file in files: df1 = pd.read_csv('../input/sales-dataset/' + file) df = pd.concat([df, df1]) df.to_csv('all_data.csv', index=False) df1 = pd.read_csv('all_data.csv') df1.shape nan_df = df1[df1.isna().any(axis=1)] df1 = df1.dropna(how='all') df1 = df1[df1['Order Date'].str[0:2] != 'Or'] df1['sales'] = 0 df1['sales'] = df1['Price Each'].astype('float') * df1['Quantity Ordered'].astype('float') results = df1.groupby('Month').sum()['sales'] import matplotlib.pyplot as plt months = range(1, 13) plt.bar(months, results) plt.xticks(months) plt.ylabel('Sales') plt.xlabel('Month Number') plt.ticklabel_format(style='plain') plt.show()
code
88075343/cell_30
[ "text_html_output_1.png" ]
import os import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import numpy as np import pandas as pd import os df = pd.read_csv('../input/sales-dataset/Sales_April_2019.csv') files = [file for file in os.listdir('../input/sales-dataset')] df = pd.DataFrame() for file in files: df1 = pd.read_csv('../input/sales-dataset/' + file) df = pd.concat([df, df1]) df.to_csv('all_data.csv', index=False) df1 = pd.read_csv('all_data.csv') df1.shape nan_df = df1[df1.isna().any(axis=1)] df1 = df1.dropna(how='all') df1 = df1[df1['Order Date'].str[0:2] != 'Or'] df1['sales'] = 0 df1['sales'] = df1['Price Each'].astype('float') * df1['Quantity Ordered'].astype('float') results = df1.groupby('Month').sum()['sales'] def getCity(str): return str.split(',')[1] def getState(str): return str.split(',')[2].split(' ')[1] df1['Purchase Address'].apply(lambda x: x.split(',')[2].split(' ')[1]) df1['city'] = df1['Purchase Address'].apply(lambda x: f'{getCity(x)} ({getState(x)})') results = df1.groupby('city').sum() results
code
88075343/cell_6
[ "text_html_output_1.png" ]
import os import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import numpy as np import pandas as pd import os df = pd.read_csv('../input/sales-dataset/Sales_April_2019.csv') files = [file for file in os.listdir('../input/sales-dataset')] df = pd.DataFrame() for file in files: df1 = pd.read_csv('../input/sales-dataset/' + file) df = pd.concat([df, df1]) df.to_csv('all_data.csv', index=False) df1 = pd.read_csv('all_data.csv') df1.shape df1.head()
code
88075343/cell_2
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/sales-dataset/Sales_April_2019.csv') df.head()
code
88075343/cell_19
[ "text_plain_output_1.png" ]
import os import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import numpy as np import pandas as pd import os df = pd.read_csv('../input/sales-dataset/Sales_April_2019.csv') files = [file for file in os.listdir('../input/sales-dataset')] df = pd.DataFrame() for file in files: df1 = pd.read_csv('../input/sales-dataset/' + file) df = pd.concat([df, df1]) df.to_csv('all_data.csv', index=False) df1 = pd.read_csv('all_data.csv') df1.shape nan_df = df1[df1.isna().any(axis=1)] df1 = df1.dropna(how='all') df1 = df1[df1['Order Date'].str[0:2] != 'Or'] df1['sales'] = 0 df1['sales'] = df1['Price Each'].astype('float') * df1['Quantity Ordered'].astype('float') df1.head()
code
88075343/cell_1
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
88075343/cell_7
[ "image_output_1.png" ]
import os import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import numpy as np import pandas as pd import os df = pd.read_csv('../input/sales-dataset/Sales_April_2019.csv') files = [file for file in os.listdir('../input/sales-dataset')] df = pd.DataFrame() for file in files: df1 = pd.read_csv('../input/sales-dataset/' + file) df = pd.concat([df, df1]) df.to_csv('all_data.csv', index=False) df1 = pd.read_csv('all_data.csv') df1.shape nan_df = df1[df1.isna().any(axis=1)] df1 = df1.dropna(how='all') df1.head()
code
88075343/cell_28
[ "image_output_1.png" ]
import os import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import numpy as np import pandas as pd import os df = pd.read_csv('../input/sales-dataset/Sales_April_2019.csv') files = [file for file in os.listdir('../input/sales-dataset')] df = pd.DataFrame() for file in files: df1 = pd.read_csv('../input/sales-dataset/' + file) df = pd.concat([df, df1]) df.to_csv('all_data.csv', index=False) df1 = pd.read_csv('all_data.csv') df1.shape nan_df = df1[df1.isna().any(axis=1)] df1 = df1.dropna(how='all') df1 = df1[df1['Order Date'].str[0:2] != 'Or'] df1['sales'] = 0 df1['sales'] = df1['Price Each'].astype('float') * df1['Quantity Ordered'].astype('float') results = df1.groupby('Month').sum()['sales'] def getCity(str): return str.split(',')[1] def getState(str): return str.split(',')[2].split(' ')[1] df1['Purchase Address'].apply(lambda x: x.split(',')[2].split(' ')[1]) df1['city'] = df1['Purchase Address'].apply(lambda x: f'{getCity(x)} ({getState(x)})') df1.head()
code
88075343/cell_15
[ "text_html_output_1.png" ]
import os import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import numpy as np import pandas as pd import os df = pd.read_csv('../input/sales-dataset/Sales_April_2019.csv') files = [file for file in os.listdir('../input/sales-dataset')] df = pd.DataFrame() for file in files: df1 = pd.read_csv('../input/sales-dataset/' + file) df = pd.concat([df, df1]) df.to_csv('all_data.csv', index=False) df1 = pd.read_csv('all_data.csv') df1.shape nan_df = df1[df1.isna().any(axis=1)] df1 = df1.dropna(how='all') df1 = df1[df1['Order Date'].str[0:2] != 'Or'] df1['sales'] = 0 df1.head()
code
88075343/cell_24
[ "text_html_output_1.png" ]
import os import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import numpy as np import pandas as pd import os df = pd.read_csv('../input/sales-dataset/Sales_April_2019.csv') files = [file for file in os.listdir('../input/sales-dataset')] df = pd.DataFrame() for file in files: df1 = pd.read_csv('../input/sales-dataset/' + file) df = pd.concat([df, df1]) df.to_csv('all_data.csv', index=False) df1 = pd.read_csv('all_data.csv') df1.shape nan_df = df1[df1.isna().any(axis=1)] df1 = df1.dropna(how='all') df1 = df1[df1['Order Date'].str[0:2] != 'Or'] df1['sales'] = 0 df1['sales'] = df1['Price Each'].astype('float') * df1['Quantity Ordered'].astype('float') results = df1.groupby('Month').sum()['sales'] df1.head()
code
88075343/cell_27
[ "text_html_output_1.png" ]
import os import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import numpy as np import pandas as pd import os df = pd.read_csv('../input/sales-dataset/Sales_April_2019.csv') files = [file for file in os.listdir('../input/sales-dataset')] df = pd.DataFrame() for file in files: df1 = pd.read_csv('../input/sales-dataset/' + file) df = pd.concat([df, df1]) df.to_csv('all_data.csv', index=False) df1 = pd.read_csv('all_data.csv') df1.shape nan_df = df1[df1.isna().any(axis=1)] df1 = df1.dropna(how='all') df1 = df1[df1['Order Date'].str[0:2] != 'Or'] df1['sales'] = 0 df1['sales'] = df1['Price Each'].astype('float') * df1['Quantity Ordered'].astype('float') results = df1.groupby('Month').sum()['sales'] df1['Purchase Address'].apply(lambda x: x.split(',')[2].split(' ')[1])
code
88075343/cell_5
[ "text_html_output_1.png" ]
import os import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import numpy as np import pandas as pd import os df = pd.read_csv('../input/sales-dataset/Sales_April_2019.csv') files = [file for file in os.listdir('../input/sales-dataset')] df = pd.DataFrame() for file in files: df1 = pd.read_csv('../input/sales-dataset/' + file) df = pd.concat([df, df1]) df.to_csv('all_data.csv', index=False) df1 = pd.read_csv('all_data.csv') df1.shape
code
128022780/cell_13
[ "text_plain_output_1.png" ]
x = 4 x = 2 y = 902385873792631 z = -4938686 x = 2.1 y = 2.0 z = -45.69 x = 3 + 4j y = 4j z = -4j print(type(x)) print(type(y))
code
128022780/cell_4
[ "text_plain_output_1.png" ]
x = 4 print(type(x))
code
128022780/cell_7
[ "application_vnd.jupyter.stderr_output_1.png" ]
x = 4 x = 2 y = 902385873792631 z = -4938686 print(type(x)) print(type(y)) print(type(z))
code
128022780/cell_18
[ "text_plain_output_1.png" ]
import random import random print(random.randrange(1, 1))
code
128022780/cell_16
[ "text_plain_output_1.png" ]
x = 4 x = 2 y = 902385873792631 z = -4938686 x = 2.1 y = 2.0 z = -45.69 x = 3 + 4j y = 4j z = -4j x = 1 y = 4.4 z = 1j a = float(x) b = int(y) c = complex(x) print(a) print(b) print(c) print(type(a)) print(type(b)) print(type(c))
code
128022780/cell_10
[ "text_plain_output_1.png" ]
x = 4 x = 2 y = 902385873792631 z = -4938686 x = 2.1 y = 2.0 z = -45.69 print(type(x)) print(type(y)) print(type(x))
code
16124219/cell_21
[ "text_plain_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/train.csv') df_ = df.drop(['id', 'vendor_id', 'store_and_fwd_flag'], axis=1) df_.head()
code
16124219/cell_34
[ "image_output_5.png", "image_output_4.png", "image_output_3.png", "image_output_2.png", "image_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/train.csv') df_ = df.drop(['id', 'vendor_id', 'store_and_fwd_flag'], axis=1) time = df_.set_index('trip_duration') cols = ['passenger_count', 'Turno', 'Dia', 'Mês', 'Dia_Semana'] df_.drop(['dropoff_datetime', 'pickup_datetime', 'Hora', 'Data'], axis=1, inplace=True) df_['Duração'] = df_['trip_duration'] df_.drop(['trip_duration'], axis=1, inplace=True) df_.head()
code
16124219/cell_40
[ "text_html_output_1.png" ]
import numpy as np import pandas as pd df = pd.read_csv('../input/train.csv') df_ = df.drop(['id', 'vendor_id', 'store_and_fwd_flag'], axis=1) time = df_.set_index('trip_duration') cols = ['passenger_count', 'Turno', 'Dia', 'Mês', 'Dia_Semana'] df_.drop(['dropoff_datetime', 'pickup_datetime', 'Hora', 'Data'], axis=1, inplace=True) def distancia(pickup_lat, pickup_lon, dropoff_lat, dropoff_lon): R_terra = 6371 inicio_lat, inicio_lon, fim_lat, fim_lon = map(np.radians, [pickup_lat, pickup_lon, dropoff_lat, dropoff_lon]) dlat = fim_lat - inicio_lat dlon = fim_lon - inicio_lon d = np.sin(dlat / 2.0) ** 2 + np.cos(inicio_lat) * np.cos(fim_lat) * np.sin(dlon / 2.0) ** 2 return 2 * R_terra * np.arcsin(np.sqrt(d)) df_['Duração'] = df_['trip_duration'] df_.drop(['trip_duration'], axis=1, inplace=True) X = df_.values[:, :-1] y = np.log(df_.values[:, -1] + 1) y.min() y[0:5]
code
16124219/cell_65
[ "text_plain_output_1.png" ]
from sklearn.ensemble import GradientBoostingRegressor from sklearn.ensemble import RandomForestRegressor from sklearn.linear_model import LinearRegression from sklearn.metrics import mean_squared_error from sklearn.metrics import mean_squared_log_error from sklearn.tree import DecisionTreeRegressor import numpy as np import pandas as pd df = pd.read_csv('../input/train.csv') df_ = df.drop(['id', 'vendor_id', 'store_and_fwd_flag'], axis=1) time = df_.set_index('trip_duration') cols = ['passenger_count', 'Turno', 'Dia', 'Mês', 'Dia_Semana'] df_.drop(['dropoff_datetime', 'pickup_datetime', 'Hora', 'Data'], axis=1, inplace=True) def distancia(pickup_lat, pickup_lon, dropoff_lat, dropoff_lon): R_terra = 6371 inicio_lat, inicio_lon, fim_lat, fim_lon = map(np.radians, [pickup_lat, pickup_lon, dropoff_lat, dropoff_lon]) dlat = fim_lat - inicio_lat dlon = fim_lon - inicio_lon d = np.sin(dlat / 2.0) ** 2 + np.cos(inicio_lat) * np.cos(fim_lat) * np.sin(dlon / 2.0) ** 2 return 2 * R_terra * np.arcsin(np.sqrt(d)) df_['Duração'] = df_['trip_duration'] df_.drop(['trip_duration'], axis=1, inplace=True) X = df_.values[:, :-1] y = np.log(df_.values[:, -1] + 1) models = [] models.append(('Linear', LinearRegression())) models.append(('GBooting', GradientBoostingRegressor())) models.append(('RFR', RandomForestRegressor(n_estimators=10))) models.append(('DTR', DecisionTreeRegressor())) def rmsle(y_pred, y_test): assert len(ytest) == len(ypred) return np.sqrt(np.mean((np.log1p(y_pred) - np.log1p(y_test)) ** 2)) rmse_calc = [] rmsle = [] for nome, model in models: model.fit(X_train, y_train) y_pred = model.predict(X_test) rmse_calc.append((nome, np.sqrt(mean_squared_error(y_test, y_pred)))) rmsle.append((nome, np.sqrt(mean_squared_log_error(y_test, y_pred)))) rmsle
code
16124219/cell_11
[ "text_html_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/train.csv') df_ = df.drop(['id', 'vendor_id', 'store_and_fwd_flag'], axis=1) df_.head()
code
16124219/cell_60
[ "text_plain_output_1.png" ]
from sklearn.ensemble import GradientBoostingRegressor from sklearn.ensemble import RandomForestRegressor from sklearn.linear_model import LinearRegression from sklearn.metrics import mean_squared_error from sklearn.metrics import mean_squared_log_error from sklearn.tree import DecisionTreeRegressor import numpy as np import pandas as pd import xgboost as xgb df = pd.read_csv('../input/train.csv') df_ = df.drop(['id', 'vendor_id', 'store_and_fwd_flag'], axis=1) time = df_.set_index('trip_duration') cols = ['passenger_count', 'Turno', 'Dia', 'Mês', 'Dia_Semana'] df_.drop(['dropoff_datetime', 'pickup_datetime', 'Hora', 'Data'], axis=1, inplace=True) def distancia(pickup_lat, pickup_lon, dropoff_lat, dropoff_lon): R_terra = 6371 inicio_lat, inicio_lon, fim_lat, fim_lon = map(np.radians, [pickup_lat, pickup_lon, dropoff_lat, dropoff_lon]) dlat = fim_lat - inicio_lat dlon = fim_lon - inicio_lon d = np.sin(dlat / 2.0) ** 2 + np.cos(inicio_lat) * np.cos(fim_lat) * np.sin(dlon / 2.0) ** 2 return 2 * R_terra * np.arcsin(np.sqrt(d)) df_['Duração'] = df_['trip_duration'] df_.drop(['trip_duration'], axis=1, inplace=True) X = df_.values[:, :-1] y = np.log(df_.values[:, -1] + 1) models = [] models.append(('Linear', LinearRegression())) models.append(('GBooting', GradientBoostingRegressor())) models.append(('RFR', RandomForestRegressor(n_estimators=10))) models.append(('DTR', DecisionTreeRegressor())) def rmsle(y_pred, y_test): assert len(ytest) == len(ypred) return np.sqrt(np.mean((np.log1p(y_pred) - np.log1p(y_test)) ** 2)) rmse_calc = [] rmsle = [] for nome, model in models: model.fit(X_train, y_train) y_pred = model.predict(X_test) rmse_calc.append((nome, np.sqrt(mean_squared_error(y_test, y_pred)))) rmsle.append((nome, np.sqrt(mean_squared_log_error(y_test, y_pred)))) dtrain = xgb.DMatrix(X_train, label=y_train) dtest = xgb.DMatrix(X_test, label=y_test) watchlist = [(dtrain, 'train'), (dtest, 'valid')] xgb_pars = {'min_child_weight': 10, 'eta': 0.03, 'colsample_bytree': 0.3, 'max_depth': 10, 'subsample': 0.8, 'lambda': 0.5, 'nthread': -1, 'booster': 'gbtree', 'silent': 1, 'eval_metric': 'rmse', 'objective': 'reg:linear'} model = xgb.train(xgb_pars, dtrain, 1000, watchlist, early_stopping_rounds=90, maximize=False, verbose_eval=100) y_pred = model.predict(dtest) y_pred
code
16124219/cell_64
[ "text_plain_output_1.png" ]
from sklearn.ensemble import GradientBoostingRegressor from sklearn.ensemble import RandomForestRegressor from sklearn.linear_model import LinearRegression from sklearn.metrics import mean_squared_error from sklearn.metrics import mean_squared_log_error from sklearn.tree import DecisionTreeRegressor import numpy as np import pandas as pd df = pd.read_csv('../input/train.csv') df_ = df.drop(['id', 'vendor_id', 'store_and_fwd_flag'], axis=1) time = df_.set_index('trip_duration') cols = ['passenger_count', 'Turno', 'Dia', 'Mês', 'Dia_Semana'] df_.drop(['dropoff_datetime', 'pickup_datetime', 'Hora', 'Data'], axis=1, inplace=True) def distancia(pickup_lat, pickup_lon, dropoff_lat, dropoff_lon): R_terra = 6371 inicio_lat, inicio_lon, fim_lat, fim_lon = map(np.radians, [pickup_lat, pickup_lon, dropoff_lat, dropoff_lon]) dlat = fim_lat - inicio_lat dlon = fim_lon - inicio_lon d = np.sin(dlat / 2.0) ** 2 + np.cos(inicio_lat) * np.cos(fim_lat) * np.sin(dlon / 2.0) ** 2 return 2 * R_terra * np.arcsin(np.sqrt(d)) df_['Duração'] = df_['trip_duration'] df_.drop(['trip_duration'], axis=1, inplace=True) X = df_.values[:, :-1] y = np.log(df_.values[:, -1] + 1) models = [] models.append(('Linear', LinearRegression())) models.append(('GBooting', GradientBoostingRegressor())) models.append(('RFR', RandomForestRegressor(n_estimators=10))) models.append(('DTR', DecisionTreeRegressor())) def rmsle(y_pred, y_test): assert len(ytest) == len(ypred) return np.sqrt(np.mean((np.log1p(y_pred) - np.log1p(y_test)) ** 2)) rmse_calc = [] rmsle = [] for nome, model in models: model.fit(X_train, y_train) y_pred = model.predict(X_test) rmse_calc.append((nome, np.sqrt(mean_squared_error(y_test, y_pred)))) rmsle.append((nome, np.sqrt(mean_squared_log_error(y_test, y_pred)))) rmse_calc
code
16124219/cell_7
[ "text_plain_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/train.csv') df.head()
code
16124219/cell_45
[ "text_plain_output_1.png" ]
import numpy as np import pandas as pd import seaborn as sns df = pd.read_csv('../input/train.csv') df_ = df.drop(['id', 'vendor_id', 'store_and_fwd_flag'], axis=1) time = df_.set_index('trip_duration') cols = ['passenger_count', 'Turno', 'Dia', 'Mês', 'Dia_Semana'] df_.drop(['dropoff_datetime', 'pickup_datetime', 'Hora', 'Data'], axis=1, inplace=True) def distancia(pickup_lat, pickup_lon, dropoff_lat, dropoff_lon): R_terra = 6371 inicio_lat, inicio_lon, fim_lat, fim_lon = map(np.radians, [pickup_lat, pickup_lon, dropoff_lat, dropoff_lon]) dlat = fim_lat - inicio_lat dlon = fim_lon - inicio_lon d = np.sin(dlat / 2.0) ** 2 + np.cos(inicio_lat) * np.cos(fim_lat) * np.sin(dlon / 2.0) ** 2 return 2 * R_terra * np.arcsin(np.sqrt(d)) df_['Duração'] = df_['trip_duration'] df_.drop(['trip_duration'], axis=1, inplace=True) X = df_.values[:, :-1] y = np.log(df_.values[:, -1] + 1) corr = df_.corr() sns.heatmap(corr) plt.show()
code
16124219/cell_58
[ "text_plain_output_1.png" ]
import xgboost as xgb dtrain = xgb.DMatrix(X_train, label=y_train) dtest = xgb.DMatrix(X_test, label=y_test) watchlist = [(dtrain, 'train'), (dtest, 'valid')] xgb_pars = {'min_child_weight': 10, 'eta': 0.03, 'colsample_bytree': 0.3, 'max_depth': 10, 'subsample': 0.8, 'lambda': 0.5, 'nthread': -1, 'booster': 'gbtree', 'silent': 1, 'eval_metric': 'rmse', 'objective': 'reg:linear'} model = xgb.train(xgb_pars, dtrain, 1000, watchlist, early_stopping_rounds=90, maximize=False, verbose_eval=100)
code
16124219/cell_8
[ "text_plain_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/train.csv') print(df.shape)
code
16124219/cell_47
[ "image_output_1.png" ]
X_train
code
16124219/cell_66
[ "text_plain_output_1.png" ]
from sklearn.ensemble import GradientBoostingRegressor from sklearn.ensemble import RandomForestRegressor from sklearn.linear_model import LinearRegression from sklearn.metrics import mean_squared_error from sklearn.metrics import mean_squared_log_error from sklearn.tree import DecisionTreeRegressor import numpy as np import pandas as pd import xgboost as xgb df = pd.read_csv('../input/train.csv') df_ = df.drop(['id', 'vendor_id', 'store_and_fwd_flag'], axis=1) time = df_.set_index('trip_duration') cols = ['passenger_count', 'Turno', 'Dia', 'Mês', 'Dia_Semana'] df_.drop(['dropoff_datetime', 'pickup_datetime', 'Hora', 'Data'], axis=1, inplace=True) def distancia(pickup_lat, pickup_lon, dropoff_lat, dropoff_lon): R_terra = 6371 inicio_lat, inicio_lon, fim_lat, fim_lon = map(np.radians, [pickup_lat, pickup_lon, dropoff_lat, dropoff_lon]) dlat = fim_lat - inicio_lat dlon = fim_lon - inicio_lon d = np.sin(dlat / 2.0) ** 2 + np.cos(inicio_lat) * np.cos(fim_lat) * np.sin(dlon / 2.0) ** 2 return 2 * R_terra * np.arcsin(np.sqrt(d)) df_['Duração'] = df_['trip_duration'] df_.drop(['trip_duration'], axis=1, inplace=True) X = df_.values[:, :-1] y = np.log(df_.values[:, -1] + 1) models = [] models.append(('Linear', LinearRegression())) models.append(('GBooting', GradientBoostingRegressor())) models.append(('RFR', RandomForestRegressor(n_estimators=10))) models.append(('DTR', DecisionTreeRegressor())) def rmsle(y_pred, y_test): assert len(ytest) == len(ypred) return np.sqrt(np.mean((np.log1p(y_pred) - np.log1p(y_test)) ** 2)) rmse_calc = [] rmsle = [] for nome, model in models: model.fit(X_train, y_train) y_pred = model.predict(X_test) rmse_calc.append((nome, np.sqrt(mean_squared_error(y_test, y_pred)))) rmsle.append((nome, np.sqrt(mean_squared_log_error(y_test, y_pred)))) dtrain = xgb.DMatrix(X_train, label=y_train) dtest = xgb.DMatrix(X_test, label=y_test) watchlist = [(dtrain, 'train'), (dtest, 'valid')] xgb_pars = {'min_child_weight': 10, 'eta': 0.03, 'colsample_bytree': 0.3, 'max_depth': 10, 'subsample': 0.8, 'lambda': 0.5, 'nthread': -1, 'booster': 'gbtree', 'silent': 1, 'eval_metric': 'rmse', 'objective': 'reg:linear'} model = xgb.train(xgb_pars, dtrain, 1000, watchlist, early_stopping_rounds=90, maximize=False, verbose_eval=100) y_pred = model.predict(dtest) rmse_xgb = np.sqrt(mean_squared_error(y_test, y_pred)) rmsle_xgb = np.sqrt(mean_squared_log_error(y_test, y_pred)) rmsle_xgb
code
16124219/cell_43
[ "text_plain_output_1.png" ]
from sklearn.preprocessing import StandardScaler import numpy as np import pandas as pd df = pd.read_csv('../input/train.csv') df_ = df.drop(['id', 'vendor_id', 'store_and_fwd_flag'], axis=1) time = df_.set_index('trip_duration') cols = ['passenger_count', 'Turno', 'Dia', 'Mês', 'Dia_Semana'] df_.drop(['dropoff_datetime', 'pickup_datetime', 'Hora', 'Data'], axis=1, inplace=True) def distancia(pickup_lat, pickup_lon, dropoff_lat, dropoff_lon): R_terra = 6371 inicio_lat, inicio_lon, fim_lat, fim_lon = map(np.radians, [pickup_lat, pickup_lon, dropoff_lat, dropoff_lon]) dlat = fim_lat - inicio_lat dlon = fim_lon - inicio_lon d = np.sin(dlat / 2.0) ** 2 + np.cos(inicio_lat) * np.cos(fim_lat) * np.sin(dlon / 2.0) ** 2 return 2 * R_terra * np.arcsin(np.sqrt(d)) df_['Duração'] = df_['trip_duration'] df_.drop(['trip_duration'], axis=1, inplace=True) X = df_.values[:, :-1] y = np.log(df_.values[:, -1] + 1) std = StandardScaler() X_train_str = std.fit_transform(X_train) X_test_str = std.transform(X_test) df_std = std.fit_transform(df_) df_std = pd.DataFrame(df_std) df_std.head()
code
16124219/cell_46
[ "text_html_output_1.png" ]
import numpy as np import pandas as pd import seaborn as sns df = pd.read_csv('../input/train.csv') df_ = df.drop(['id', 'vendor_id', 'store_and_fwd_flag'], axis=1) time = df_.set_index('trip_duration') cols = ['passenger_count', 'Turno', 'Dia', 'Mês', 'Dia_Semana'] df_.drop(['dropoff_datetime', 'pickup_datetime', 'Hora', 'Data'], axis=1, inplace=True) def distancia(pickup_lat, pickup_lon, dropoff_lat, dropoff_lon): R_terra = 6371 inicio_lat, inicio_lon, fim_lat, fim_lon = map(np.radians, [pickup_lat, pickup_lon, dropoff_lat, dropoff_lon]) dlat = fim_lat - inicio_lat dlon = fim_lon - inicio_lon d = np.sin(dlat / 2.0) ** 2 + np.cos(inicio_lat) * np.cos(fim_lat) * np.sin(dlon / 2.0) ** 2 return 2 * R_terra * np.arcsin(np.sqrt(d)) df_['Duração'] = df_['trip_duration'] df_.drop(['trip_duration'], axis=1, inplace=True) X = df_.values[:, :-1] y = np.log(df_.values[:, -1] + 1) corr = df_.corr() corr
code
16124219/cell_24
[ "text_html_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/train.csv') df_ = df.drop(['id', 'vendor_id', 'store_and_fwd_flag'], axis=1) time = df_.set_index('trip_duration') cols = ['passenger_count', 'Turno', 'Dia', 'Mês', 'Dia_Semana'] for c in cols: plt.figure() plt.title(c) time[c].plot(kind='hist') plt.show()
code
16124219/cell_22
[ "text_html_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/train.csv') df_ = df.drop(['id', 'vendor_id', 'store_and_fwd_flag'], axis=1) df_['passenger_count'].unique()
code
16124219/cell_53
[ "text_html_output_1.png" ]
from sklearn.ensemble import GradientBoostingRegressor from sklearn.ensemble import RandomForestRegressor from sklearn.linear_model import LinearRegression from sklearn.metrics import mean_squared_error from sklearn.metrics import mean_squared_log_error from sklearn.tree import DecisionTreeRegressor import numpy as np import pandas as pd df = pd.read_csv('../input/train.csv') df_ = df.drop(['id', 'vendor_id', 'store_and_fwd_flag'], axis=1) time = df_.set_index('trip_duration') cols = ['passenger_count', 'Turno', 'Dia', 'Mês', 'Dia_Semana'] df_.drop(['dropoff_datetime', 'pickup_datetime', 'Hora', 'Data'], axis=1, inplace=True) def distancia(pickup_lat, pickup_lon, dropoff_lat, dropoff_lon): R_terra = 6371 inicio_lat, inicio_lon, fim_lat, fim_lon = map(np.radians, [pickup_lat, pickup_lon, dropoff_lat, dropoff_lon]) dlat = fim_lat - inicio_lat dlon = fim_lon - inicio_lon d = np.sin(dlat / 2.0) ** 2 + np.cos(inicio_lat) * np.cos(fim_lat) * np.sin(dlon / 2.0) ** 2 return 2 * R_terra * np.arcsin(np.sqrt(d)) df_['Duração'] = df_['trip_duration'] df_.drop(['trip_duration'], axis=1, inplace=True) X = df_.values[:, :-1] y = np.log(df_.values[:, -1] + 1) models = [] models.append(('Linear', LinearRegression())) models.append(('GBooting', GradientBoostingRegressor())) models.append(('RFR', RandomForestRegressor(n_estimators=10))) models.append(('DTR', DecisionTreeRegressor())) def rmsle(y_pred, y_test): assert len(ytest) == len(ypred) return np.sqrt(np.mean((np.log1p(y_pred) - np.log1p(y_test)) ** 2)) rmse_calc = [] rmsle = [] for nome, model in models: print(nome) model.fit(X_train, y_train) y_pred = model.predict(X_test) rmse_calc.append((nome, np.sqrt(mean_squared_error(y_test, y_pred)))) rmsle.append((nome, np.sqrt(mean_squared_log_error(y_test, y_pred))))
code
16124219/cell_27
[ "text_plain_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/train.csv') df_ = df.drop(['id', 'vendor_id', 'store_and_fwd_flag'], axis=1) time = df_.set_index('trip_duration') cols = ['passenger_count', 'Turno', 'Dia', 'Mês', 'Dia_Semana'] df_.drop(['dropoff_datetime', 'pickup_datetime', 'Hora', 'Data'], axis=1, inplace=True) df_.head()
code
16124219/cell_37
[ "text_html_output_1.png" ]
import numpy as np import pandas as pd df = pd.read_csv('../input/train.csv') df_ = df.drop(['id', 'vendor_id', 'store_and_fwd_flag'], axis=1) time = df_.set_index('trip_duration') cols = ['passenger_count', 'Turno', 'Dia', 'Mês', 'Dia_Semana'] df_.drop(['dropoff_datetime', 'pickup_datetime', 'Hora', 'Data'], axis=1, inplace=True) def distancia(pickup_lat, pickup_lon, dropoff_lat, dropoff_lon): R_terra = 6371 inicio_lat, inicio_lon, fim_lat, fim_lon = map(np.radians, [pickup_lat, pickup_lon, dropoff_lat, dropoff_lon]) dlat = fim_lat - inicio_lat dlon = fim_lon - inicio_lon d = np.sin(dlat / 2.0) ** 2 + np.cos(inicio_lat) * np.cos(fim_lat) * np.sin(dlon / 2.0) ** 2 return 2 * R_terra * np.arcsin(np.sqrt(d)) df_['Duração'] = df_['trip_duration'] df_.drop(['trip_duration'], axis=1, inplace=True) X = df_.values[:, :-1] y = np.log(df_.values[:, -1] + 1) y.min()
code
16111538/cell_9
[ "text_html_output_1.png" ]
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('../input/train.csv') test = pd.read_csv('../input/test.csv') train.reset_index(drop=True, inplace=True) train['SalePrice'] = np.log1p(train['SalePrice']) train['SalePrice'].hist(bins=50) y = train['SalePrice'].reset_index(drop=True)
code
16111538/cell_6
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('../input/train.csv') test = pd.read_csv('../input/test.csv') test.describe()
code
16111538/cell_7
[ "image_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('../input/train.csv') test = pd.read_csv('../input/test.csv') train['SalePrice'].hist(bins=50)
code
16111538/cell_10
[ "text_html_output_1.png" ]
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('../input/train.csv') test = pd.read_csv('../input/test.csv') train.reset_index(drop=True, inplace=True) train['SalePrice'] = np.log1p(train['SalePrice']) y = train['SalePrice'].reset_index(drop=True) train = train.drop(['Id', 'SalePrice'], axis=1) test = test.drop(['Id'], axis=1) x = pd.concat([train, test]).reset_index(drop=True) x.describe()
code
16111538/cell_5
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('../input/train.csv') test = pd.read_csv('../input/test.csv') train.describe()
code
121150515/cell_9
[ "text_plain_output_1.png" ]
from sklearn.model_selection import train_test_split,cross_val_score from sklearn.tree import DecisionTreeRegressor from sklearn.tree import DecisionTreeRegressor dt = DecisionTreeRegressor() cross_val_score(dt, X, y, cv=5).mean()
code
121150515/cell_4
[ "text_plain_output_1.png" ]
import pandas as pd import seaborn as sns train = pd.read_csv('/kaggle/input/playground-series-s3e9/train.csv', index_col='id') test = pd.read_csv('/kaggle/input/playground-series-s3e9/test.csv', index_col='id') submission = pd.read_csv('/kaggle/input/playground-series-s3e9/sample_submission.csv', index_col='id') heatmap = sns.heatmap(train.corr(), vmin=-1, vmax=1, annot=True, cmap='BrBG')
code
121150515/cell_11
[ "text_plain_output_1.png" ]
from sklearn.ensemble import RandomForestRegressor from sklearn.model_selection import train_test_split,cross_val_score from sklearn.ensemble import RandomForestRegressor rf = RandomForestRegressor(max_depth=6, random_state=73, n_estimators=90) print(cross_val_score(rf, X, y, cv=5).mean())
code
121150515/cell_7
[ "text_plain_output_1.png" ]
from sklearn.linear_model import RidgeCV from sklearn.linear_model import RidgeCV ridge = RidgeCV(cv=5).fit(X, y) ridge.score(X, y)
code
121150515/cell_8
[ "text_plain_output_1.png" ]
from sklearn.linear_model import LassoCV from sklearn.linear_model import LassoCV lasso = LassoCV(cv=5).fit(X, y) lasso.score(X, y)
code
121150515/cell_17
[ "text_plain_output_1.png" ]
from sklearn.linear_model import LinearRegression from sklearn.model_selection import train_test_split,cross_val_score from sklearn.linear_model import LinearRegression lr = LinearRegression() cross_val_score(lr, X, y, cv=5).mean()
code
121150515/cell_10
[ "image_output_1.png" ]
from sklearn.model_selection import train_test_split,cross_val_score from sklearn.neighbors import KNeighborsRegressor from sklearn.neighbors import KNeighborsRegressor knn = KNeighborsRegressor(n_neighbors=9) cross_val_score(knn, X, y, cv=5).mean()
code
121150515/cell_12
[ "text_plain_output_1.png" ]
from sklearn.model_selection import train_test_split,cross_val_score import xgboost as xgb model = xgb.XGBRegressor(max_depth=5, n_estimators=10, random_state=73) print(cross_val_score(model, X, y, cv=5).mean())
code
32065347/cell_6
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) spotify_filepath = '../input/data-for-datavis/spotify.csv' spotify_data = pd.read_csv(spotify_filepath, index_col='Date', parse_dates=True) spotify_data.sample(10)
code
32065347/cell_1
[ "text_plain_output_1.png" ]
import numpy as np import pandas as pd pd.plotting.register_matplotlib_converters() import matplotlib.pyplot as plt import seaborn as sns import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
32065347/cell_7
[ "text_plain_output_1.png", "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns sns.set_style('dark') spotify_filepath = '../input/data-for-datavis/spotify.csv' spotify_data = pd.read_csv(spotify_filepath, index_col='Date', parse_dates=True) spotify_data.sample(10) plt.figure(figsize=(12, 6)) sns.lineplot(data=spotify_data)
code
32065347/cell_8
[ "text_plain_output_1.png", "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns sns.set_style('dark') spotify_filepath = '../input/data-for-datavis/spotify.csv' spotify_data = pd.read_csv(spotify_filepath, index_col='Date', parse_dates=True) spotify_data.sample(10) ign_filepath = '../input/data-for-datavis/ign_scores.csv' ign_data = pd.read_csv(ign_filepath, index_col='Platform') plt.figure(figsize=(8, 6)) sns.barplot(x=ign_data['Racing'], y=ign_data.index) plt.xlabel('') plt.title('Average Score for Racing Games, by Platform')
code
32065347/cell_10
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns sns.set_style('dark') spotify_filepath = '../input/data-for-datavis/spotify.csv' spotify_data = pd.read_csv(spotify_filepath, index_col='Date', parse_dates=True) spotify_data.sample(10) ign_filepath = '../input/data-for-datavis/ign_scores.csv' ign_data = pd.read_csv(ign_filepath, index_col='Platform') plt.figure(figsize=(8, 6)) sns.heatmap(data=ign_data, annot=True)
code
89142938/cell_3
[ "text_plain_output_1.png" ]
def factorial(n): result = 1 for i in range(1, n + 1): result = result * i return result factorial(5)
code
105180553/cell_4
[ "application_vnd.jupyter.stderr_output_1.png" ]
from google.colab import drive from google.colab import drive drive.mount('/content/drive')
code
128044935/cell_2
[ "application_vnd.jupyter.stderr_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_data = pd.read_csv('./train.csv') train_data.head()
code
128044935/cell_1
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
128049433/cell_21
[ "text_plain_output_1.png" ]
from tensorflow.keras import models,layers import tensorflow as tf IMAGE_SIZE = (256, 256) BATCH_SIZE = 32 CHANNELS = 3 EPOCHES = 100 dataset = tf.keras.preprocessing.image_dataset_from_directory('/kaggle/input/corn-or-maize-leaf-disease-dataset/data', shuffle=True, image_size=IMAGE_SIZE, batch_size=BATCH_SIZE) def get_dataset(ds, train_split=0.8, val_split=0.1, test_split=0.1, shuffle=True, shuffle_size=10000): ds_size = len(ds) if shuffle: ds = ds.shuffle(shuffle_size, seed=8) train_size = int(train_split * ds_size) val_size = int(val_split * ds_size) train_ds = ds.take(train_size) val_ds = ds.skip(train_size).take(val_size) test_ds = ds.skip(train_size).skip(val_size) return (train_ds, val_ds, test_ds) train_ds = train_ds.cache().shuffle(1000).prefetch(buffer_size=tf.data.AUTOTUNE) val_ds = val_ds.cache().shuffle(1000).prefetch(buffer_size=tf.data.AUTOTUNE) test_ds = test_ds.cache().shuffle(1000).prefetch(buffer_size=tf.data.AUTOTUNE) resize_and_rescale = tf.keras.Sequential([layers.experimental.preprocessing.Resizing(256, 256), layers.experimental.preprocessing.Rescaling(1.0 / 255)]) data_augmentation = tf.keras.Sequential([layers.experimental.preprocessing.RandomFlip('horizontal_and_vertical'), layers.experimental.preprocessing.RandomRotation(0.3)]) n_classes = 4 input_shape = (BATCH_SIZE, 256, 256, 3) model = models.Sequential([resize_and_rescale, data_augmentation, layers.Conv2D(32, (3, 3), activation='relu', input_shape=input_shape), layers.MaxPooling2D((2, 2)), layers.Conv2D(64, kernel_size=(3, 3), activation='relu'), layers.MaxPooling2D((2, 2)), layers.Conv2D(128, kernel_size=(3, 3), activation='relu'), layers.MaxPooling2D((2, 2)), layers.Flatten(), layers.Dense(256, activation='relu'), layers.Dense(128, activation='relu'), layers.Dense(64, activation='relu'), layers.Dense(n_classes, activation='softmax')]) model.build(input_shape=input_shape) model.summary()
code
128049433/cell_9
[ "text_plain_output_1.png", "image_output_1.png" ]
import tensorflow as tf IMAGE_SIZE = (256, 256) BATCH_SIZE = 32 CHANNELS = 3 EPOCHES = 100 dataset = tf.keras.preprocessing.image_dataset_from_directory('/kaggle/input/corn-or-maize-leaf-disease-dataset/data', shuffle=True, image_size=IMAGE_SIZE, batch_size=BATCH_SIZE) class_names = dataset.class_names class_names
code