File size: 17,945 Bytes
e6adc05 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 |
{
"cells": [
{
"cell_type": "code",
"execution_count": 9,
"metadata": {},
"outputs": [],
"source": [
"import os\n",
"import pandas as pd\n",
"import glob\n",
"import re"
]
},
{
"cell_type": "code",
"execution_count": 24,
"metadata": {},
"outputs": [],
"source": [
"def combine_excel_files(directory_path, output_csv='combined_mcas_data.csv', output_parquet='combined_mcas_data.parquet'):\n",
" \"\"\"\n",
" Combine all Excel files in a directory into a single dataset with proper headers.\n",
" Converts to both CSV and Parquet formats.\n",
" Handles data types: numbers for most fields, strings for District name, subject, district code and year.\n",
" \n",
" Parameters:\n",
" directory_path (str): Path to the directory containing Excel files\n",
" output_csv (str): Name of the output CSV file\n",
" output_parquet (str): Name of the output Parquet file\n",
" \"\"\"\n",
" # Get list of all Excel files in the directory\n",
" all_files = glob.glob(os.path.join(directory_path, \"*.xlsx\"))\n",
" \n",
" # Create an empty list to store DataFrames\n",
" dfs = []\n",
" all_columns_sets = []\n",
" \n",
" # First pass - examine structure of the first file to identify headers\n",
" if all_files:\n",
" # Read the first few rows of the first file to inspect\n",
" sample_df = pd.read_excel(all_files[0], header=None, nrows=10)\n",
" print(f\"Preview of first file ({all_files[0]}):\")\n",
" print(sample_df.head(10))\n",
" \n",
" # Find the actual header row by looking for rows where many columns have values\n",
" non_empty_counts = sample_df.notna().sum(axis=1)\n",
" potential_header_rows = non_empty_counts[non_empty_counts > 5].index.tolist()\n",
" \n",
" if potential_header_rows:\n",
" header_row = potential_header_rows[1] if len(potential_header_rows) > 1 else potential_header_rows[0]\n",
" # Use row index where we detect MCAS data pattern\n",
" for i in potential_header_rows:\n",
" if 'DISTRICT NAME' in str(sample_df.iloc[i].values).upper() or 'SCHOOL NAME' in str(sample_df.iloc[i].values).upper():\n",
" header_row = i\n",
" break\n",
" print(f\"Detected header row at index {header_row}: {sample_df.iloc[header_row].tolist()}\")\n",
" else:\n",
" header_row = 0\n",
" print(\"Could not detect header row, using first row\")\n",
" \n",
" # Get the first file's column order as a reference\n",
" first_file_columns = None\n",
" \n",
" # Loop through each Excel file\n",
" for file in all_files:\n",
" try:\n",
" # Extract year from filename\n",
" year_match = re.search(r'NextGenMCAS_(\\d{4})\\.xlsx', os.path.basename(file))\n",
" if year_match:\n",
" year = year_match.group(1)\n",
" else:\n",
" year = \"Unknown\"\n",
" \n",
" # Define columns that should be treated as strings\n",
" string_cols = []\n",
" \n",
" # First check what columns exist in the file\n",
" temp_df = pd.read_excel(file, header=header_row, nrows=0)\n",
" for col in temp_df.columns:\n",
" col_str = str(col).upper()\n",
" if ('DISTRICT' in col_str and ('CODE' in col_str or 'ID' in col_str or 'NUMBER' in col_str)) or \\\n",
" ('DISTRICT NAME' in col_str) or ('SUBJECT' in col_str):\n",
" string_cols.append(col)\n",
" \n",
" # Read the Excel file\n",
" df = pd.read_excel(\n",
" file, \n",
" header=header_row, \n",
" dtype={col: str for col in string_cols} # Convert specified columns to string\n",
" )\n",
" \n",
" # Clean column names\n",
" df.columns = [col.strip().replace('\\n', ' ') if isinstance(col, str) else str(col) for col in df.columns]\n",
" \n",
" # Store the first file's column order\n",
" if first_file_columns is None:\n",
" first_file_columns = df.columns.tolist()\n",
" \n",
" # Add year column as string\n",
" df['Year'] = str(year)\n",
" \n",
" # Store the columns from this file\n",
" all_columns_sets.append(set(df.columns))\n",
" \n",
" # Append to list\n",
" dfs.append(df)\n",
" print(f\"Successfully processed: {file} (Year: {year})\")\n",
" except Exception as e:\n",
" print(f\"Error processing {file}: {e}\")\n",
" \n",
" # Find common columns across all files (intersection of all column sets)\n",
" if all_columns_sets:\n",
" common_columns = set.intersection(*all_columns_sets)\n",
" print(f\"Common columns across all files: {common_columns}\")\n",
" \n",
" # Ensure 'Year' is in common columns\n",
" if 'Year' not in common_columns:\n",
" common_columns.add('Year')\n",
" \n",
" # Keep only common columns in each DataFrame\n",
" for i in range(len(dfs)):\n",
" dfs[i] = dfs[i][list(common_columns)]\n",
" \n",
" # Combine all DataFrames\n",
" if dfs:\n",
" # Combine DataFrames with only the common columns\n",
" combined_df = pd.concat(dfs, ignore_index=True)\n",
" \n",
" # Remove rows that are likely headers from other files\n",
" possible_header_rows = []\n",
" for col in common_columns:\n",
" if col != 'Year': # Skip checking Year column\n",
" for i, row in combined_df.iterrows():\n",
" for val in row:\n",
" if isinstance(val, str) and col.lower() in str(val).lower():\n",
" possible_header_rows.append(i)\n",
" \n",
" # Remove duplicate header rows\n",
" possible_header_rows = list(set(possible_header_rows))\n",
" print(f\"Removing {len(possible_header_rows)} possible header rows\")\n",
" combined_df = combined_df.drop(possible_header_rows, errors='ignore')\n",
" \n",
" # Identify string columns and numeric columns\n",
" string_columns = []\n",
" district_col = None\n",
" district_code_col = None\n",
" subject_col = None\n",
" \n",
" for col in combined_df.columns:\n",
" col_upper = col.upper()\n",
" if 'DISTRICT NAME' in col_upper:\n",
" district_col = col\n",
" string_columns.append(col)\n",
" elif ('DISTRICT' in col_upper and ('CODE' in col_upper or 'ID' in col_upper or 'NUMBER' in col_upper)):\n",
" district_code_col = col\n",
" string_columns.append(col)\n",
" elif 'SUBJECT' in col_upper:\n",
" subject_col = col\n",
" string_columns.append(col)\n",
" \n",
" # Add Year to string columns\n",
" string_columns.append('Year')\n",
" \n",
" # Convert all other columns to numeric\n",
" for col in combined_df.columns:\n",
" if col not in string_columns:\n",
" # Try to convert to numeric, replace errors with NaN\n",
" combined_df[col] = pd.to_numeric(combined_df[col], errors='coerce')\n",
" \n",
" # Reorder columns to put DISTRICT NAME and Year first\n",
" if district_col:\n",
" # Create new column order with district_col and Year first, then others in original order\n",
" remaining_cols = [col for col in combined_df.columns \n",
" if col != district_col and col != 'Year']\n",
" \n",
" # Sort remaining columns based on their order in the first file\n",
" if first_file_columns:\n",
" # Get the indices of each column in the original order\n",
" col_indices = {}\n",
" for i, col in enumerate(first_file_columns):\n",
" if col in remaining_cols:\n",
" col_indices[col] = i\n",
" \n",
" # Sort remaining columns based on their original indices\n",
" remaining_cols.sort(key=lambda col: col_indices.get(col, 999))\n",
" \n",
" new_column_order = [district_col, 'Year'] + remaining_cols\n",
" combined_df = combined_df[new_column_order]\n",
" \n",
" # Export to CSV\n",
" csv_path = os.path.join(directory_path, output_csv)\n",
" combined_df.to_csv(csv_path, index=False)\n",
" print(f\"Successfully created CSV: {csv_path}\")\n",
" \n",
" # Export to Parquet\n",
" parquet_path = os.path.join(directory_path, output_parquet)\n",
" combined_df.to_parquet(parquet_path, index=False)\n",
" print(f\"Successfully created Parquet: {parquet_path}\")\n",
" \n",
" print(f\"Combined {len(dfs)} Excel files with {len(combined_df)} total rows\")\n",
" print(f\"Final columns: {', '.join(combined_df.columns)}\")\n",
" \n",
" # Print data types for verification\n",
" print(\"\\nData types in final dataset:\")\n",
" for col, dtype in combined_df.dtypes.items():\n",
" print(f\"{col}: {dtype}\")\n",
" else:\n",
" print(\"No Excel files were successfully processed\")"
]
},
{
"cell_type": "code",
"execution_count": 25,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Preview of first file (/fsx/avijit/projects/datacommonsMA/mcas_results/NextGenMCAS_2018.xlsx):\n",
" 0 1 2 \\\n",
"0 2018 Next Generation MCAS Achievement Results NaN NaN \n",
"1 District Name District Code Subject \n",
"2 Abby Kelley Foster Charter Public (District) 04450000 ELA \n",
"3 Abby Kelley Foster Charter Public (District) 04450000 MATH \n",
"4 Abington 00010000 ELA \n",
"5 Abington 00010000 MATH \n",
"6 Academy Of the Pacific Rim Charter Public (Dis... 04120000 ELA \n",
"7 Academy Of the Pacific Rim Charter Public (Dis... 04120000 MATH \n",
"8 Acton-Boxborough 06000000 ELA \n",
"9 Acton-Boxborough 06000000 MATH \n",
"\n",
" 3 4 5 6 7 8 9 \\\n",
"0 NaN NaN NaN NaN NaN NaN NaN \n",
"1 M+E # M+E % E # E % M # M % PM # \n",
"2 313 44 31 4 282 39 347 \n",
"3 266 37 20 3 246 34 390 \n",
"4 554 55 90 9 464 46 374 \n",
"5 482 48 46 5 436 44 440 \n",
"6 138 44 15 5 123 39 151 \n",
"7 103 33 10 3 93 30 175 \n",
"8 1,908 73 522 20 1,386 53 622 \n",
"9 1,882 72 574 22 1,308 50 619 \n",
"\n",
" 10 11 12 13 14 \\\n",
"0 NaN NaN NaN NaN NaN \n",
"1 PM % NM # NM % No. of Students Included Avg. Scaled Score \n",
"2 48 56 8 716 497.3 \n",
"3 54 61 9 717 494.6 \n",
"4 37 75 7 1,003 502.3 \n",
"5 44 79 8 1,001 498.7 \n",
"6 48 26 8 315 497.3 \n",
"7 56 37 12 315 492.3 \n",
"8 24 92 4 2,622 513.3 \n",
"9 24 118 5 2,619 512.7 \n",
"\n",
" 15 16 \n",
"0 NaN NaN \n",
"1 SGP Included In SGP \n",
"2 48.7 586 \n",
"3 52.9 587 \n",
"4 50.2 784 \n",
"5 47.3 785 \n",
"6 57.7 289 \n",
"7 52.9 289 \n",
"8 58.9 2,069 \n",
"9 58.8 2,073 \n",
"Detected header row at index 1: ['District Name', 'District Code', 'Subject', 'M+E #', 'M+E %', 'E #', 'E %', 'M #', 'M %', 'PM #', 'PM %', 'NM #', 'NM %', 'No. of Students Included', 'Avg. Scaled Score', 'SGP', 'Included In SGP']\n",
"Successfully processed: /fsx/avijit/projects/datacommonsMA/mcas_results/NextGenMCAS_2018.xlsx (Year: 2018)\n",
"Successfully processed: /fsx/avijit/projects/datacommonsMA/mcas_results/NextGenMCAS_2017.xlsx (Year: 2017)\n",
"Successfully processed: /fsx/avijit/projects/datacommonsMA/mcas_results/NextGenMCAS_2020.xlsx (Year: 2020)\n",
"Successfully processed: /fsx/avijit/projects/datacommonsMA/mcas_results/NextGenMCAS_2024.xlsx (Year: 2024)\n",
"Successfully processed: /fsx/avijit/projects/datacommonsMA/mcas_results/NextGenMCAS_2021.xlsx (Year: 2021)\n",
"Successfully processed: /fsx/avijit/projects/datacommonsMA/mcas_results/NextGenMCAS_2022.xlsx (Year: 2022)\n",
"Successfully processed: /fsx/avijit/projects/datacommonsMA/mcas_results/NextGenMCAS_2023.xlsx (Year: 2023)\n",
"Successfully processed: /fsx/avijit/projects/datacommonsMA/mcas_results/NextGenMCAS_2019.xlsx (Year: 2019)\n",
"Common columns across all files: {'E %', 'M+E #', 'PM #', 'SGP', 'No. of Students Included', 'NM #', 'District Code', 'Avg. Scaled Score', 'M #', 'Year', 'Included In SGP', 'PM %', 'E #', 'NM %', 'M+E %', 'Subject', 'M %', 'District Name'}\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"/tmp/ipykernel_2144425/3133257492.py:108: FutureWarning: The behavior of DataFrame concatenation with empty or all-NA entries is deprecated. In a future version, this will no longer exclude empty or all-NA columns when determining the result dtypes. To retain the old behavior, exclude the relevant entries before the concat operation.\n",
" combined_df = pd.concat(dfs, ignore_index=True)\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"Removing 0 possible header rows\n",
"Successfully created CSV: /fsx/avijit/projects/datacommonsMA/mcas_results/combined_mcas_data.csv\n",
"Successfully created Parquet: /fsx/avijit/projects/datacommonsMA/mcas_results/combined_mcas_data.parquet\n",
"Combined 8 Excel files with 6741 total rows\n",
"Final columns: District Name, Year, District Code, Subject, M+E #, M+E %, E #, E %, M #, M %, PM #, PM %, NM #, NM %, No. of Students Included, Avg. Scaled Score, SGP, Included In SGP\n",
"\n",
"Data types in final dataset:\n",
"District Name: object\n",
"Year: object\n",
"District Code: object\n",
"Subject: object\n",
"M+E #: float64\n",
"M+E %: int64\n",
"E #: float64\n",
"E %: int64\n",
"M #: float64\n",
"M %: int64\n",
"PM #: float64\n",
"PM %: int64\n",
"NM #: float64\n",
"NM %: int64\n",
"No. of Students Included: float64\n",
"Avg. Scaled Score: float64\n",
"SGP: float64\n",
"Included In SGP: float64\n"
]
}
],
"source": [
"data_folder = \"/fsx/avijit/projects/datacommonsMA/mcas_results\"\n",
"combine_excel_files(data_folder)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
"kernelspec": {
"display_name": "py312",
"language": "python",
"name": "py312"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.12.9"
}
},
"nbformat": 4,
"nbformat_minor": 2
}
|