from cgitb import enable import torch from .device import * class MemProfiler: enable = False @staticmethod def print_memory_stats(prefix, last_allocated=None, device=None): if not MemProfiler.enable: return if device is None: device = default() allocated = torch.cuda.memory_allocated(device) if last_allocated is not None: delta = (allocated - last_allocated) / 1024 / 1024 delta_str = f'{-delta:.2f}MB is freed, ' if delta < 0 else f'{delta:.2f}MB is allocated, ' else: delta_str = '' print(f'{prefix}: {delta_str}currently PyTorch allocates {torch.cuda.memory_allocated(device)/1024/1024:.2f}MB and ' f'reserves {torch.cuda.memory_reserved(device)/1024/1024:.2f}MB memory') def __init__(self, name, device=None) -> None: self.name = name self.device = device or default() self.alloc0 = 0 def __enter__(self): self.alloc0 = torch.cuda.memory_allocated(self.device) return self def __exit__(self, exc_type, exc_val, exc_traceback): MemProfiler.print_memory_stats(self.name, self.alloc0, self.device)