|
from typing import Dict |
|
|
|
import requests, torch |
|
from PIL import Image |
|
|
|
|
|
REQUESTS_HEADERS = { |
|
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36" |
|
} |
|
|
|
|
|
def download_image_as_pil(url: str, timeout: int = 10) -> Image.Image: |
|
try: |
|
response = requests.get( |
|
url, stream=True, headers=REQUESTS_HEADERS, timeout=timeout |
|
) |
|
|
|
if response.status_code == 200: |
|
return Image.open(response.raw) |
|
|
|
except Exception as e: |
|
return |
|
|
|
|
|
def analyze_model_parameters(model: torch.nn.Module) -> Dict: |
|
total_params = 0 |
|
param_types = set() |
|
param_type_counts = {} |
|
|
|
for param in model.parameters(): |
|
total_params += param.numel() |
|
dtype = param.dtype |
|
param_types.add(dtype) |
|
param_type_counts[dtype] = param_type_counts.get(dtype, 0) + param.numel() |
|
|
|
results = { |
|
"total_params": total_params, |
|
"param_types": {}, |
|
"device_info": { |
|
"device": next(model.parameters()).device, |
|
"cuda_available": torch.cuda.is_available() |
|
} |
|
} |
|
|
|
for dtype in param_types: |
|
count = param_type_counts[dtype] |
|
percentage = (count / total_params) * 100 |
|
memory_bytes = count * torch.finfo(dtype).bits // 8 |
|
memory_mb = memory_bytes / (1024 * 1024) |
|
|
|
results["param_types"][str(dtype)] = { |
|
"count": count, |
|
"percentage": percentage, |
|
"memory_mb": memory_mb |
|
} |
|
|
|
if torch.cuda.is_available(): |
|
results["device_info"].update({ |
|
"cuda_device": torch.cuda.get_device_name(0), |
|
"cuda_memory_allocated_mb": torch.cuda.memory_allocated(0) / 1024**2, |
|
"cuda_memory_cached_mb": torch.cuda.memory_reserved(0) / 1024**2 |
|
}) |
|
|
|
return results |