@@ -80,7 +80,7 @@ def get_basic_vram_info(device: Optional[torch.device] = None) -> Dict[str, Any]
8080 elif not isinstance (device , torch .device ):
8181 device = torch .device (device )
8282 free_memory , total_memory = torch .cuda .mem_get_info (device )
83- elif torch .mps .is_available ():
83+ elif hasattr ( torch , 'mps' ) and callable ( getattr ( torch . mps , 'is_available' , None )) and torch .mps .is_available ():
8484 # MPS doesn't support per-device queries or mem_get_info
8585 # Use system memory as proxy
8686 mem = psutil .virtual_memory ()
@@ -100,7 +100,7 @@ def get_basic_vram_info(device: Optional[torch.device] = None) -> Dict[str, Any]
100100# Initial VRAM check at module load
101101vram_info = get_basic_vram_info (device = None )
102102if "error" not in vram_info :
103- backend = "MPS" if torch .mps . is_available () else "CUDA"
103+ backend = "MPS" if ( hasattr ( torch , 'mps' ) and callable ( getattr ( torch .mps , 'is_available' , None )) and torch . mps . is_available () ) else "CUDA"
104104 print (f"📊 Initial { backend } memory: { vram_info ['free_gb' ]:.2f} GB free / { vram_info ['total_gb' ]:.2f} GB total" )
105105else :
106106 print (f"⚠️ Memory check failed: { vram_info ['error' ]} - No available backend!" )
@@ -129,7 +129,7 @@ def get_vram_usage(device: Optional[torch.device] = None, debug: Optional['Debug
129129 reserved = torch .cuda .memory_reserved (device ) / (1024 ** 3 )
130130 max_allocated = torch .cuda .max_memory_allocated (device ) / (1024 ** 3 )
131131 return allocated , reserved , max_allocated
132- elif torch .mps .is_available ():
132+ elif hasattr ( torch , 'mps' ) and callable ( getattr ( torch . mps , 'is_available' , None )) and torch .mps .is_available ():
133133 # MPS doesn't support per-device queries - uses global memory tracking
134134 allocated = torch .mps .current_allocated_memory () / (1024 ** 3 )
135135 reserved = torch .mps .driver_allocated_memory () / (1024 ** 3 )
@@ -235,11 +235,11 @@ def clear_memory(debug: Optional['Debug'] = None, deep: bool = False, force: boo
235235 if free_ratio < 0.05 :
236236 should_clear = True
237237 if debug :
238- backend = "MPS" if torch .mps . is_available () else "VRAM"
238+ backend = "MPS" if ( hasattr ( torch , 'mps' ) and callable ( getattr ( torch .mps , 'is_available' , None )) and torch . mps . is_available () ) else "VRAM"
239239 debug .log (f"{ backend } pressure: { mem_info ['free_gb' ]:.2f} GB free of { mem_info ['total_gb' ]:.2f} GB" , category = "memory" )
240240
241241 # For non-MPS systems, also check system RAM separately
242- if not should_clear and not torch .mps . is_available ():
242+ if not should_clear and not ( hasattr ( torch , 'mps' ) and callable ( getattr ( torch .mps , 'is_available' , None )) and torch . mps . is_available () ):
243243 mem = psutil .virtual_memory ()
244244 if mem .available < mem .total * 0.05 :
245245 should_clear = True
@@ -265,7 +265,7 @@ def clear_memory(debug: Optional['Debug'] = None, deep: bool = False, force: boo
265265 if torch .cuda .is_available ():
266266 torch .cuda .empty_cache ()
267267 torch .cuda .ipc_collect ()
268- elif torch .mps .is_available ():
268+ elif hasattr ( torch , 'mps' ) and callable ( getattr ( torch . mps , 'is_available' , None )) and torch .mps .is_available ():
269269 torch .mps .empty_cache ()
270270
271271 if debug :
@@ -302,7 +302,7 @@ def clear_memory(debug: Optional['Debug'] = None, deep: bool = False, force: boo
302302 handle = _os_memory_lib .GetCurrentProcess ()
303303 _os_memory_lib .SetProcessWorkingSetSize (handle , - 1 , - 1 )
304304
305- elif torch .mps .is_available ():
305+ elif hasattr ( torch , 'mps' ) and callable ( getattr ( torch . mps , 'is_available' , None )) and torch .mps .is_available ():
306306 # macOS with MPS
307307 import ctypes # Import only when needed
308308 import ctypes .util
0 commit comments