{ "cells": [ { "cell_type": "code", "execution_count": 1, "metadata": {}, "outputs": [], "source": [ "import torch\n", "from PIL import Image\n", "from transformers import AutoProcessor, AutoModelForVision2Seq\n", "import re\n", "import html\n", "from threading import Thread\n", "from transformers.generation.streamers import TextIteratorStreamer\n", "from docling_core.types.doc import DoclingDocument\n", "from docling_core.types.doc.document import DocTagsDocument\n", "import fitz\n", "import os\n", "import pandas as pd\n", "import json\n", "from tqdm import tqdm" ] }, { "cell_type": "code", "execution_count": 10, "metadata": {}, "outputs": [], "source": [ "# Load model and processor\n", "processor = AutoProcessor.from_pretrained(\"ds4sd/SmolDocling-256M-preview\")\n", "model = AutoModelForVision2Seq.from_pretrained(\n", " \"ds4sd/SmolDocling-256M-preview\", \n", " torch_dtype=torch.bfloat16\n", ").to(\"cuda\" if torch.cuda.is_available() else \"cpu\")" ] }, { "cell_type": "code", "execution_count": 11, "metadata": {}, "outputs": [], "source": [ "def image_to_markdown(image, query_text=\"Convert this page to docling.\"):\n", " \"\"\"\n", " Convert an input image to markdown output using SmolDocling model\n", " \n", " Parameters:\n", " image: Input image file in RGB\n", " query_text (str): Query text to guide the conversion (default: \"Convert this page to docling.\")\n", " \n", " Returns:\n", " str: Markdown output of the converted image\n", " \"\"\"\n", " \n", " # Special handling for code or OTSL content\n", " if \"OTSL\" in query_text or \"code\" in query_text:\n", " # Add padding to image as in the original code\n", " width, height = image.size\n", " pad_w = int(width * 0.1) # 10% padding\n", " pad_h = int(height * 0.1) # 10% padding\n", " corner_pixel = image.getpixel((0, 0))\n", " from PIL import ImageOps\n", " image = ImageOps.expand(image, border=(pad_w, pad_h, pad_w, pad_h), fill=corner_pixel)\n", " \n", " # Prepare input for the model\n", " resulting_messages = [\n", " {\n", " \"role\": \"user\",\n", " \"content\": [{\"type\": \"image\"}] + [\n", " {\"type\": \"text\", \"text\": query_text}\n", " ]\n", " }\n", " ]\n", " \n", " prompt = processor.apply_chat_template(resulting_messages, add_generation_prompt=True)\n", " inputs = processor(text=prompt, images=[[image]], return_tensors=\"pt\").to(model.device)\n", " \n", " # Generate output using streamer for better memory management\n", " streamer = TextIteratorStreamer(processor, skip_prompt=True, skip_special_tokens=False)\n", " generation_args = dict(inputs, streamer=streamer, max_new_tokens=8192)\n", " \n", " thread = Thread(target=model.generate, kwargs=generation_args)\n", " thread.start()\n", " \n", " # Collect the generated output\n", " full_output = \"\"\n", " for new_text in streamer:\n", " full_output += new_text\n", " \n", " # Clean up the output\n", " cleaned_output = full_output.replace(\"\", \"\").strip()\n", " \n", " # Process doctags if present\n", " if any(tag in cleaned_output for tag in [\"\", \"\", \"\", \"\", \"\"]):\n", " doctag_output = cleaned_output\n", " \n", " # Handle chart tags\n", " if \"\" in doctag_output:\n", " doctag_output = doctag_output.replace(\"\", \"\").replace(\"\", \"\")\n", " doctag_output = re.sub(r'()(?!.*)<[^>]+>', r'\\1', doctag_output)\n", " \n", " # Create document and convert to markdown\n", " doc = DoclingDocument(name=\"Document\")\n", " doctags_doc = DocTagsDocument.from_doctags_and_image_pairs([doctag_output], [image])\n", " doc.load_from_doctags(doctags_doc)\n", " \n", " return doc.export_to_markdown()\n", " \n", " # Return the cleaned output if no doctags are present\n", " return cleaned_output" ] }, { "cell_type": "code", "execution_count": 13, "metadata": {}, "outputs": [], "source": [ "def process_pdfs_folder(pdf_folder, output_folder):\n", " \"\"\"\n", " Process all PDFs in a folder, converting each page to markdown immediately and saving results as JSON.\n", " \n", " Parameters:\n", " pdf_folder (str): Path to folder containing PDFs\n", " output_folder (str): Path to save output JSON files\n", " \"\"\"\n", " # Create output folder if it doesn't exist\n", " if not os.path.exists(output_folder):\n", " os.makedirs(output_folder)\n", " \n", " # Get all PDF files in the folder\n", " pdf_files = [f for f in os.listdir(pdf_folder) if f.lower().endswith('.pdf')]\n", " \n", " # Process each PDF file\n", " for pdf_file in tqdm(pdf_files, desc=\"Processing PDFs\"):\n", " pdf_path = os.path.join(pdf_folder, pdf_file)\n", " pdf_name = os.path.splitext(pdf_file)[0]\n", " output_json = os.path.join(output_folder, f\"{pdf_name}.json\")\n", " \n", " # Initialize an empty list to store the data\n", " pdf_data = []\n", " \n", " try:\n", " # Open the PDF\n", " pdf_document = fitz.open(pdf_path)\n", " total_pages = pdf_document.page_count\n", " \n", " print(f\"Processing {pdf_file} ({total_pages} pages)\")\n", " \n", " # Process each page one by one\n", " for page_number in tqdm(range(total_pages), desc=f\"Pages in {pdf_file}\", leave=False):\n", " try:\n", " # Get the page\n", " page = pdf_document[page_number]\n", " \n", " # Convert page to image\n", " pixmap = page.get_pixmap()\n", " image = Image.frombytes(\"RGB\", [pixmap.width, pixmap.height], pixmap.samples)\n", " \n", " # Convert image to markdown immediately\n", " markdown_text = image_to_markdown(image)\n", " \n", " # Display first 100 characters for verification\n", " preview = markdown_text[:100].replace('\\n', ' ')\n", " print(f\"Page {page_number+1}/{total_pages}: {preview}...\")\n", " \n", " # Add to data list\n", " page_data = {\n", " 'pdf_name': pdf_name,\n", " 'slide_number': page_number+1,\n", " 'markdown_text': markdown_text\n", " }\n", " pdf_data.append(page_data)\n", " \n", " # Save JSON after each page\n", " with open(output_json, 'w', encoding='utf-8') as jsonfile:\n", " json.dump(pdf_data, jsonfile, ensure_ascii=False, indent=2)\n", " \n", " except Exception as e:\n", " error_msg = f\"Error processing page {page_number+1} from {pdf_file}: {e}\"\n", " print(error_msg)\n", " # Add error info to data\n", " error_data = {\n", " 'pdf_name': pdf_name,\n", " 'slide_number': page_number+1,\n", " 'markdown_text': f\"ERROR: {str(e)}\"\n", " }\n", " pdf_data.append(error_data)\n", " \n", " # Save JSON after error\n", " with open(output_json, 'w', encoding='utf-8') as jsonfile:\n", " json.dump(pdf_data, jsonfile, ensure_ascii=False, indent=2)\n", " \n", " # Close the PDF after processing\n", " pdf_document.close()\n", " \n", " except Exception as e:\n", " error_msg = f\"Error opening PDF {pdf_file}: {e}\"\n", " print(error_msg)\n", " error_data = {\n", " 'pdf_name': pdf_name,\n", " 'slide_number': 1,\n", " 'markdown_text': f\"ERROR: Failed to process PDF: {str(e)}\"\n", " }\n", " pdf_data.append(error_data)\n", " \n", " # Save JSON after PDF error\n", " with open(output_json, 'w', encoding='utf-8') as jsonfile:\n", " json.dump(pdf_data, jsonfile, ensure_ascii=False, indent=2)\n", " \n", " print(f\"Processing complete. Results saved to {output_folder}\")" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "# input_folder = \"/fsx/avijit/projects/datacommonsMA/labormarketreports/pdfs\" ## First convert the ppts to pdf\n", "# output_folder = \"/fsx/avijit/projects/datacommonsMA/labormarketreports/processed_reports\"\n", "\n", "input_folder = \"/fsx/avijit/projects/datacommonsMA/occupational_injury_reports/pdfs\"\n", "output_folder = \"/fsx/avijit/projects/datacommonsMA/occupational_injury_reports/processed_reports\"\n", "\n", "process_pdfs_folder(input_folder,output_folder)" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "def combine_json_files(folder_path, output_file=\"combined_results.json\"):\n", " \"\"\"\n", " Read individual JSON files from a folder and simply concatenate them,\n", " changing \"pdf_name\" to \"report name\" in each entry.\n", " \n", " Args:\n", " folder_path (str): Path to the folder containing JSON files\n", " output_file (str): Path to save the combined JSON file\n", " \n", " Returns:\n", " list: The combined data list\n", " \"\"\"\n", " import json\n", " from pathlib import Path\n", " \n", " # Initialize data list\n", " combined_data = []\n", " \n", " # Get all JSON files in the folder\n", " folder_path = Path(folder_path)\n", " json_files = list(folder_path.glob(\"*.json\"))\n", " \n", " if not json_files:\n", " print(f\"No JSON files found in {folder_path}\")\n", " return []\n", " \n", " print(f\"Found {len(json_files)} JSON files in {folder_path}\")\n", " \n", " # Read each JSON file\n", " for json_file in json_files:\n", " try:\n", " with open(json_file, \"r\", encoding=\"utf-8\") as f:\n", " file_data = json.load(f)\n", " \n", " # Handle both list and single object formats\n", " if isinstance(file_data, list):\n", " items = file_data\n", " else:\n", " items = [file_data]\n", " \n", " # Rename pdf_name to report name in each item\n", " for item in items:\n", " if \"pdf_name\" in item:\n", " item[\"report name\"] = item.pop(\"pdf_name\")\n", " item[\"page number\"] = item.pop(\"slide_number\")\n", " \n", " # Add to combined data\n", " combined_data.extend(items)\n", " \n", " except Exception as e:\n", " print(f\"Error reading {json_file}: {e}\")\n", " \n", " # Write to file\n", " with open(output_file, \"w\", encoding=\"utf-8\") as f:\n", " json.dump(combined_data, output_folder+'/'+f, indent=2, ensure_ascii=False)\n", " \n", " print(f\"Combined {len(combined_data)} items into {output_file}\")\n", " return combined_data" ] }, { "cell_type": "code", "execution_count": 4, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "Found 7 JSON files in /fsx/avijit/projects/datacommonsMA/occupational_injury_reports/processed_reports\n", "Error reading /fsx/avijit/projects/datacommonsMA/occupational_injury_reports/processed_reports/combined_reports.json: Extra data: line 73 column 1 (char 109380)\n", "Combined 78 items into occupational_injury_combined_reports.json\n" ] } ], "source": [ "combined_data = combine_json_files(output_folder, \"occupational_injury_combined_reports.json\")" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [] } ], "metadata": { "kernelspec": { "display_name": "py312", "language": "python", "name": "py312" } }, "nbformat": 4, "nbformat_minor": 2 }