File size: 1,884 Bytes
88e0bae |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 |
from typing import Dict
import requests, torch
from PIL import Image
REQUESTS_HEADERS = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36"
}
def download_image_as_pil(url: str, timeout: int = 10) -> Image.Image:
try:
response = requests.get(
url, stream=True, headers=REQUESTS_HEADERS, timeout=timeout
)
if response.status_code == 200:
return Image.open(response.raw)
except Exception as e:
return
def analyze_model_parameters(model: torch.nn.Module) -> Dict:
total_params = 0
param_types = set()
param_type_counts = {}
for param in model.parameters():
total_params += param.numel()
dtype = param.dtype
param_types.add(dtype)
param_type_counts[dtype] = param_type_counts.get(dtype, 0) + param.numel()
results = {
"total_params": total_params,
"param_types": {},
"device_info": {
"device": next(model.parameters()).device,
"cuda_available": torch.cuda.is_available()
}
}
for dtype in param_types:
count = param_type_counts[dtype]
percentage = (count / total_params) * 100
memory_bytes = count * torch.finfo(dtype).bits // 8
memory_mb = memory_bytes / (1024 * 1024)
results["param_types"][str(dtype)] = {
"count": count,
"percentage": percentage,
"memory_mb": memory_mb
}
if torch.cuda.is_available():
results["device_info"].update({
"cuda_device": torch.cuda.get_device_name(0),
"cuda_memory_allocated_mb": torch.cuda.memory_allocated(0) / 1024**2,
"cuda_memory_cached_mb": torch.cuda.memory_reserved(0) / 1024**2
})
return results |