File size: 24,594 Bytes
0bfbee7
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
import requests
from bs4 import BeautifulSoup
import pandas as pd
import time
import json
import re
import logging
from urllib.parse import urlparse
import fastavro
import msgpack
import os
from datasets import Dataset

# Function to scrape data and return as a Hugging Face Dataset
def scrape_data(url):
    response = requests.get(url)
    soup = BeautifulSoup(response.text, 'html.parser')
    
    # Example scraping logic (to be customized)
    data = []
    for item in soup.find_all('div', class_='gpu-item'):
        gpu_info = {
            'gpu_name': item.find('h2').text,
            'architecture': item.find('span', class_='architecture').text,
            'memory_size': item.find('span', class_='memory-size').text,
            # Add more fields as necessary
        }
        data.append(gpu_info)
    
    # Convert to Hugging Face Dataset
    return Dataset.from_list(data)

# Import additional libraries for new formats
import pyarrow as pa
import pyarrow.parquet as pq
import fastavro
import h5py
import sqlite3
import xml.etree.ElementTree as ET
import yaml
import pickle
from scipy.io import savemat

# Set up logging
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
logger = logging.getLogger(__name__)

# Try to import Selenium components - they'll be used if available
try:
    from selenium import webdriver
    from selenium.webdriver.chrome.options import Options
    from selenium.webdriver.chrome.service import Service
    from selenium.webdriver.common.by import By
    from selenium.webdriver.support.ui import WebDriverWait
    from selenium.webdriver.support import expected_conditions as EC
    from webdriver_manager.chrome import ChromeDriverManager
    SELENIUM_AVAILABLE = True
    logger.info("Selenium is available and will be used for JavaScript-heavy sites")
except ImportError:
    SELENIUM_AVAILABLE = False
    logger.warning("Selenium not available. Install with: pip install selenium webdriver-manager")

class NvidiaGpuScraper:
    def __init__(self, use_selenium=True):
        self.use_selenium = use_selenium and SELENIUM_AVAILABLE
        self.driver = self._setup_driver() if self.use_selenium else None
        
    def _setup_driver(self):
        """Set up and return a Selenium WebDriver if available"""
        if not SELENIUM_AVAILABLE:
            return None
            
        try:
            options = Options()
            options.add_argument('--headless')
            options.add_argument('--no-sandbox')
            options.add_argument('--disable-dev-shm-usage')
            options.add_argument('--disable-gpu')
            options.add_argument("--window-size=1920,1080")
            options.add_argument("--user-agent=Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/98.0.4758.102 Safari/537.36")
            
            service = Service(ChromeDriverManager().install())
            driver = webdriver.Chrome(service=service, options=options)
            return driver
        except Exception as e:
            logger.error(f"Failed to initialize Selenium: {e}")
            return None
    
    def _fetch_with_selenium(self, url):
        """Fetch page content using Selenium for JavaScript-heavy sites"""
        if self.driver is None:
            return None
            
        try:
            logger.info(f"Fetching with Selenium: {url}")
            self.driver.get(url)
            # Wait for the page to load completely
            WebDriverWait(self.driver, 20).until(
                EC.presence_of_element_located((By.TAG_NAME, "body"))
            )
            
            # Scroll down to load lazy content
            self.driver.execute_script("window.scrollTo(0, document.body.scrollHeight/2);")
            time.sleep(1)
            self.driver.execute_script("window.scrollTo(0, document.body.scrollHeight);")
            time.sleep(2)  # Additional wait for dynamic content
            
            # Expand any "See more specifications" buttons if they exist
            try:
                see_more_buttons = self.driver.find_elements(By.XPATH, 
                    "//button[contains(text(), 'See more') or contains(text(), 'specifications') or contains(text(), 'specs')]")
                for button in see_more_buttons:
                    self.driver.execute_script("arguments[0].click();", button)
                    time.sleep(1)
            except Exception as e:
                logger.warning(f"Could not expand specification sections: {e}")
            
            # Get the page source after JavaScript execution
            page_source = self.driver.page_source
            return BeautifulSoup(page_source, 'html.parser')
        except Exception as e:
            logger.error(f"Selenium error for {url}: {e}")
            return None
    
    def _fetch_with_requests(self, url):
        """Fetch page content using requests library"""
        headers = {
            'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/98.0.4758.102 Safari/537.36',
            'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',
            'Accept-Language': 'en-US,en;q=0.5',
            'Referer': 'https://www.google.com/',
            'DNT': '1',
            'Connection': 'keep-alive',
            'Upgrade-Insecure-Requests': '1',
            'Cache-Control': 'max-age=0',
        }
        
        for attempt in range(3):
            try:
                logger.info(f"Fetching with requests: {url}")
                response = requests.get(url, timeout=30, headers=headers)
                response.raise_for_status()
                return BeautifulSoup(response.content, 'html.parser')
            except requests.exceptions.RequestException as e:
                wait_time = 2 ** attempt
                logger.warning(f"Request error for {url}: {e}. Retrying in {wait_time} seconds...")
                time.sleep(wait_time)
        
        return None
    
    def fetch_page(self, url):
        if not url:
            raise ValueError("The URL provided is empty.")
        """Fetch page content, trying Selenium first if available"""
        if self.use_selenium:
            soup = self._fetch_with_selenium(url)
            if soup:
                return soup
                
        # Fall back to requests if Selenium failed or isn't available
        return self._fetch_with_requests(url)
    
    def extract_gpu_specs(self, soup, url):
        """Extract GPU specifications from NVIDIA product pages"""
        specs = {
            'model': 'N/A',
            'gpu_name': 'N/A',
            'architecture': 'N/A',
            'boost_clock': 'N/A',
            'memory_size': 'N/A',
            'memory_type': 'N/A',
            'memory_interface': 'N/A',
            'tdp': 'N/A',
            'cuda_cores': 'N/A',
            'tensor_cores': 'N/A',
            'rt_cores': 'N/A',
            'process_node': 'N/A',
            'transistor_count': 'N/A',
            'price': 'N/A',
            'release_date': 'N/A',
            'url': url,
        }
        
        try:
            # Extract model name
            for selector in ['h1', '.product-title', '.product-name', '.prod-title']:
                title_element = soup.select_one(selector)
                if title_element and title_element.text.strip():
                    specs['model'] = title_element.text.strip()
                    # Try to extract GPU name (e.g., RTX 4090)
                    gpu_match = re.search(r'(GTX|RTX|RTX\s+SUPER|GTX\s+SUPER)\s+(\d{4}\s*(?:Ti|SUPER)?)', 
                                         specs['model'], re.IGNORECASE)
                    if gpu_match:
                        specs['gpu_name'] = f"{gpu_match.group(1)} {gpu_match.group(2)}".strip()
                    break
            
            # Field mapping dictionary - different ways NVIDIA might label each spec
            field_mappings = {
                'architecture': ['gpu architecture', 'architecture', 'nvidia architecture'],
                'boost_clock': ['boost clock', 'gpu boost clock', 'clock speed', 'boost'],
                'memory_size': ['memory size', 'standard memory config', 'memory configuration', 'video memory'],
                'memory_type': ['memory type', 'memory spec', 'standard memory'],
                'memory_interface': ['memory interface', 'memory bus', 'interface width', 'bit width'],
                'tdp': ['graphics card power', 'tdp', 'total graphics power', 'power consumption', 'tgp', 'maximum power'],
                'cuda_cores': ['cuda cores', 'cuda', 'nvidia cuda cores'],
                'tensor_cores': ['tensor cores', 'tensor', 'ai cores'],
                'rt_cores': ['rt cores', 'ray tracing cores', 'rt'],
                'process_node': ['process', 'fabrication process', 'manufacturing process', 'fab'],
                'transistor_count': ['transistor', 'transistor count', 'number of transistors'],
                'price': ['price', 'msrp', 'suggested price', 'starting at'],
                'release_date': ['release date', 'availability', 'launch date', 'available']
            }
            
            # Look for various specs sections
            spec_sections = soup.select('.specs-section, .tech-specs, .product-specs, .specs, .spec-table, .spec, [class*="spec"]')
            
            # If no dedicated sections found, look through the entire page
            if not spec_sections:
                spec_sections = [soup]
            
            for section in spec_sections:
                # Method 1: Look for labeled pairs or tables
                self._extract_from_tables_and_pairs(section, specs, field_mappings)
                
                # Method 2: Look for text patterns throughout the page
                self._extract_from_text_patterns(section, specs)
            
            # Extract from specification headings and adjacent elements
            self._extract_from_spec_headings(soup, specs, field_mappings)
            
            # Try to find any JSON-LD or structured data with specs
            self._extract_from_json_ld(soup, specs)
            
            # Clean and standardize specs
            self._clean_specs(specs)
            
            logger.info(f"Extracted NVIDIA GPU specs: {specs}")
            return specs
            
        except Exception as e:
            logger.error(f"Error extracting GPU specs: {e}")
            return specs
    
    def _extract_from_tables_and_pairs(self, section, specs, field_mappings):
        """Extract specs from table-like structures or label-value pairs"""
        # Check for table rows
        rows = section.select('tr, .spec-row, .specs-row, [class*="row"]')
        for row in rows:
            cells = row.select('th, td, .spec-label, .spec-value, .specs-label, .specs-value')
            if len(cells) >= 2:
                header = cells[0].text.strip().lower()
                value = cells[1].text.strip()
                
                # Match header to our fields
                for field, possible_headers in field_mappings.items():
                    if any(h in header for h in possible_headers):
                        specs[field] = value
        
        # Check for definition lists
        terms = section.select('dt, .term, .specs-term')
        for term in terms:
            header = term.text.strip().lower()
            value_el = term.find_next_sibling(['dd', '.definition', '.specs-definition'])
            if value_el:
                value = value_el.text.strip()
                
                # Match header to our fields
                for field, possible_headers in field_mappings.items():
                    if any(h in header for h in possible_headers):
                        specs[field] = value
        
        # Check for labeled pairs (common in NVIDIA's newer layout)
        labels = section.select('.specs-label, .spec-label, .specs-name, .label, [class*="label"]')
        for label in labels:
            header = label.text.strip().lower()
            # Try to find the adjacent value element
            value_el = label.find_next_sibling('.specs-value, .spec-value, .specs-data, .value, [class*="value"]')
            if value_el:
                value = value_el.text.strip()
                
                # Match header to our fields
                for field, possible_headers in field_mappings.items():
                    if any(h in header for h in possible_headers):
                        specs[field] = value
    
    def _extract_from_text_patterns(self, section, specs):
        """Extract specs using regex patterns in the page text"""
        text = section.get_text(' ', strip=True)
        
        # Extract CUDA cores
        cuda_matches = re.search(r'(\d[\d,]+)\s*(?:nvidia)?\s*cuda\s*cores', text, re.IGNORECASE)
        if cuda_matches and specs['cuda_cores'] == 'N/A':
            specs['cuda_cores'] = cuda_matches.group(1)
        
        # Extract Tensor cores
        tensor_matches = re.search(r'(\d+)\s*(?:nvidia)?\s*tensor\s*cores', text, re.IGNORECASE)
        if tensor_matches and specs['tensor_cores'] == 'N/A':
            specs['tensor_cores'] = tensor_matches.group(1)
        
        # Extract RT cores
        rt_matches = re.search(r'(\d+)\s*(?:nvidia)?\s*rt\s*cores', text, re.IGNORECASE)
        if rt_matches and specs['rt_cores'] == 'N/A':
            specs['rt_cores'] = rt_matches.group(1)
        
        # Extract memory size
        mem_matches = re.search(r'(\d+)\s*GB\s*(?:G?DDR\d+[X]?)', text, re.IGNORECASE)
        if mem_matches and specs['memory_size'] == 'N/A':
            specs['memory_size'] = f"{mem_matches.group(1)} GB"
            if specs['memory_type'] == 'N/A':
                specs['memory_type'] = mem_matches.group(2)
        
        # Extract boost clock
        clock_matches = re.search(r'boost\s*clock\s*(?:up\s*to)?\s*:?\s*([\d.]+)\s*(?:MHz|GHz)', text, re.IGNORECASE)
        if clock_matches and specs['boost_clock'] == 'N/A':
            value = clock_matches.group(1)
            unit = 'GHz' if float(value) < 100 else 'MHz'  # Infer unit if not in match
            specs['boost_clock'] = f"{value} {unit}"
        
        # Extract memory interface
        interface_matches = re.search(r'(\d+)[\s-]*bit(?:\s*memory)?\s*(?:interface|bus)', text, re.IGNORECASE)
        if interface_matches and specs['memory_interface'] == 'N/A':
            specs['memory_interface'] = f"{interface_matches.group(1)}-bit"
    
    def _extract_from_spec_headings(self, soup, specs, field_mappings):
        """Extract specs from headings and their adjacent content"""
        for field, terms in field_mappings.items():
            if specs[field] != 'N/A':  # Skip if already found
                continue
                
            for term in terms:
                # Look for headings containing the term
                headers = soup.select(f'h1:contains("{term}"), h2:contains("{term}"), h3:contains("{term}"), h4:contains("{term}"), h5:contains("{term}")')
                
                for header in headers:
                    # Look at next sibling or child for the value
                    value_el = header.find_next()
                    if value_el:
                        specs[field] = value_el.text.strip()
                        break
    
    def _extract_from_json_ld(self, soup, specs):
        """Extract specs from JSON-LD structured data if available"""
        for script in soup.select('script[type="application/ld+json"]'):
            try:
                data = json.loads(script.string)
                
                # Look for product data
                if 'name' in data and specs['model'] == 'N/A':
                    specs['model'] = data['name']
                
                # Check for specs in properties
                if 'additionalProperty' in data:
                    for prop in data['additionalProperty']:
                        name = prop.get('name', '').lower()
                        value = prop.get('value', '')
                        
                        if 'cuda' in name and specs['cuda_cores'] == 'N/A':
                            specs['cuda_cores'] = value
                        elif 'clock' in name and 'boost' in name and specs['boost_clock'] == 'N/A':
                            specs['boost_clock'] = value
                        elif 'memory' in name and 'size' in name and specs['memory_size'] == 'N/A':
                            specs['memory_size'] = value
                        # Add other mappings as needed
                
                # Check for offer data
                if 'offers' in data and specs['price'] == 'N/A':
                    if isinstance(data['offers'], list) and len(data['offers']) > 0:
                        specs['price'] = data['offers'][0].get('price', 'N/A')
                    elif isinstance(data['offers'], dict):
                        specs['price'] = data['offers'].get('price', 'N/A')
            except:
                pass
    
    def _clean_specs(self, specs):
        """Clean and standardize the extracted specs"""
        # Clean CUDA cores (remove commas)
        if specs['cuda_cores'] != 'N/A':
            specs['cuda_cores'] = specs['cuda_cores'].replace(',', '')
        
        # Standardize memory size format
        if specs['memory_size'] != 'N/A' and 'GB' not in specs['memory_size']:
            if specs['memory_size'].isdigit():
                specs['memory_size'] = f"{specs['memory_size']} GB"
        
        # Standardize boost clock format
        if specs['boost_clock'] != 'N/A':
            # If it's just a number, add units
            if re.match(r'^\d+(\.\d+)?$', specs['boost_clock']):
                value = float(specs['boost_clock'])
                if value > 100:  # Likely MHz
                    specs['boost_clock'] = f"{value} MHz"
                else:  # Likely GHz
                    specs['boost_clock'] = f"{value} GHz"
    
    def scrape_gpu(self, url):
        if not url:
            raise ValueError("The URL provided is empty.")
        """Scrape a single GPU product page"""
        soup = self.fetch_page(url)
        if not soup:
            return {
                'model': 'Failed to fetch',
                'url': url
            }
        
        return self.extract_gpu_specs(soup, url)
    
    def scrape_multiple_gpus(self, urls):
        if not urls:
            raise ValueError("The list of URLs is empty.")
        """Scrape multiple GPU product pages"""
        results = []
        
        for url in urls:
            try:
                specs = self.scrape_gpu(url)
                results.append(specs)
                # Be polite with a delay between requests
                time.sleep(2)
            except Exception as e:
                logger.error(f"Error processing {url}: {e}")
                results.append({
                    'model': f"Error: {str(e)[:50]}",
                    'url': url
                })
        
        return results
    
    def cleanup(self):
        """Clean up resources"""
        if self.driver:
            self.driver.quit()

# Main execution function
def main():
    # NVIDIA GPU product URLs - focused on specific product pages
    nvidia_urls = [
        "https://www.nvidia.com/en-us/geforce/graphics-cards/40-series/rtx-4090/",
        "https://www.nvidia.com/en-us/geforce/graphics-cards/40-series/rtx-4080/",
        "https://www.nvidia.com/en-us/geforce/graphics-cards/40-series/rtx-4070-ti-super/",
        "https://www.nvidia.com/en-us/geforce/graphics-cards/40-series/rtx-4070/",
        "https://www.nvidia.com/en-us/geforce/graphics-cards/30-series/rtx-3090-3090ti/",
        "https://www.nvidia.com/en-us/geforce/graphics-cards/30-series/rtx-3080-3080ti/",
        "https://www.nvidia.com/en-us/geforce/graphics-cards/30-series/rtx-3070-3070ti/",
    ]
    
    # Create the scraper and run
    scraper = NvidiaGpuScraper(use_selenium=SELENIUM_AVAILABLE)
    
    try:
        # Scrape the GPUs
        results = scraper.scrape_multiple_gpus(nvidia_urls)
        
        # Create and save DataFrame
        df = pd.DataFrame(results)
        df.to_csv('nvidia_gpus.csv', index=False)
        df.to_json('nvidia_gpus.json', orient='records', lines=True)
        df.to_excel('nvidia_gpus.xlsx', index=False)
        
        # Save DataFrame in various formats
        try:
            df.to_parquet('nvidia_gpus.parquet')
        except Exception as e:
            logger.warning(f"Failed to save as Parquet: {e}")

        try:
            # Convert DataFrame to list of dictionaries
            records = df.to_dict(orient='records')
            # Define Avro schema
            schema = {
                'type': 'record',
                'name': 'GPU',
                'fields': [
                    {'name': col, 'type': ['string', 'null']} for col in df.columns
                ]
            }
            # Write to Avro file
            with open('nvidia_gpus.avro', 'wb') as avro_file:
                fastavro.writer(avro_file, schema, records)
        except Exception as e:
            logger.warning(f"Failed to save as Avro: {e}")

        try:
            df.to_orc('nvidia_gpus.orc')
        except Exception as e:
            logger.warning(f"Failed to save as ORC: {e}")

        try:
            df.to_hdf('nvidia_gpus.h5', key='df', mode='w')
        except Exception as e:
            logger.warning(f"Failed to save as HDF5: {e}")

        try:
            with sqlite3.connect('nvidia_gpus.db') as conn:
                df.to_sql('gpus', conn, if_exists='replace', index=False)
        except Exception as e:
            logger.warning(f"Failed to save as SQLite: {e}")

        try:
            df.to_xml('nvidia_gpus.xml')
        except Exception as e:
            logger.warning(f"Failed to save as XML: {e}")

        try:
            with open('nvidia_gpus.yaml', 'w') as yaml_file:
                yaml.dump(df.to_dict(orient='records'), yaml_file)
        except Exception as e:
            logger.warning(f"Failed to save as YAML: {e}")

        try:
            with open('nvidia_gpus.pkl', 'wb') as pickle_file:
                pickle.dump(df, pickle_file)
        except Exception as e:
            logger.warning(f"Failed to save as Pickle: {e}")

        try:
            savemat('nvidia_gpus.mat', {'gpus': df.to_dict(orient='records')})
        except Exception as e:
            logger.warning(f"Failed to save as MAT: {e}")

        try:
            df.to_csv('nvidia_gpus.tsv', sep='\t', index=False)
        except Exception as e:
            logger.warning(f"Failed to save as TSV: {e}")

        try:
            df.to_json('nvidia_gpus.ndjson', orient='records', lines=True)
        except Exception as e:
            logger.warning(f"Failed to save as NDJSON: {e}")

        try:
            df.to_csv('nvidia_gpus.arff', index=False)
        except Exception as e:
            logger.warning(f"Failed to save as ARFF: {e}")

        try:
            # Convert DataFrame to dictionary
            data = df.to_dict(orient='records')
            # Write to MessagePack file
            with open('nvidia_gpus.msgpack', 'wb') as msgpack_file:
                msgpack.pack(data, msgpack_file)
        except Exception as e:
            logger.warning(f"Failed to save as MessagePack: {e}")

        try:
            df.to_pickle('nvidia_gpus.protobuf')
        except Exception as e:
            logger.warning(f"Failed to save as ProtoBuf: {e}")

        try:
            df.to_csv('nvidia_gpus.dta', index=False)
        except Exception as e:
            logger.warning(f"Failed to save as DTA: {e}")

        try:
            df.to_csv('nvidia_gpus.sas', index=False)
        except Exception as e:
            logger.warning(f"Failed to save as SAS: {e}")

        try:
            df.to_csv('nvidia_gpus.spss', index=False)
        except Exception as e:
            logger.warning(f"Failed to save as SPSS: {e}")

        print("\nResults:")
        print("\nResults:")
        print(df)
        
        # Print summary
        successful = sum(1 for spec in results if spec.get('model') not in ['N/A', 'Failed to fetch'])
        print(f"\nSummary: Successfully scraped {successful} out of {len(results)} NVIDIA GPUs")
        
        return df
        
    finally:
        # Always clean up resources
        scraper.cleanup()

if __name__ == "__main__":
    main()