Skip to content

Commit a7bb895

Browse files
committed
feat(depth-estimation): wire HardwareEnv for multi-backend GPU support + pin torch/torchvision versions
- Replace basic _select_device() with full HardwareEnv.detect() from skills/lib/env_config.py - Supports: NVIDIA CUDA, AMD ROCm, Apple MPS/Neural Engine, Intel OpenVINO/NPU, CPU - Pin torch~=2.7.0 and torchvision~=0.22.0 to prevent pip resolver conflicts - Move torch/torchvision above depth-anything-v2 in requirements.txt for install order - Expose self.env (HardwareEnv) to subclasses for GPU name, memory, backend info - Include backend and gpu_name in ready event for Aegis UI display
1 parent 2cfba37 commit a7bb895

2 files changed

Lines changed: 80 additions & 18 deletions

File tree

skills/transformation/depth-estimation/requirements.txt

Lines changed: 5 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,9 @@
1-
# Depth Estimation
1+
# Depth Estimation — Privacy Transform Skill
2+
# NOTE: torch and torchvision MUST be version-paired.
3+
# Loose ranges cause pip to flip between incompatible versions.
4+
torch~=2.7.0
5+
torchvision~=0.22.0
26
depth-anything-v2>=0.1.0
3-
torch>=2.0.0
4-
torchvision>=0.15.0
57
numpy>=1.24.0
68
opencv-python-headless>=4.8.0
79
Pillow>=10.0.0

skills/transformation/depth-estimation/scripts/transform_base.py

Lines changed: 75 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -46,6 +46,7 @@ def transform_frame(self, image, metadata):
4646

4747
import sys
4848
import json
49+
import os
4950
import signal
5051
import time
5152
import argparse
@@ -55,6 +56,52 @@ def transform_frame(self, image, metadata):
5556
from pathlib import Path
5657

5758

59+
# ═══════════════════════════════════════════════════════════════════════════════
60+
# Hardware detection — reuse env_config.py from skills/lib/
61+
# ═══════════════════════════════════════════════════════════════════════════════
62+
63+
_script_dir = Path(__file__).resolve().parent
64+
_lib_candidates = [
65+
_script_dir, # bundled alongside script
66+
_script_dir.parent.parent.parent.parent / "lib", # repo: skills/lib/
67+
_script_dir.parent / "lib", # skill-level lib/
68+
]
69+
_env_config_loaded = False
70+
for _lib_path in _lib_candidates:
71+
if (_lib_path / "env_config.py").exists():
72+
sys.path.insert(0, str(_lib_path))
73+
from env_config import HardwareEnv # noqa: E402
74+
_env_config_loaded = True
75+
break
76+
77+
if not _env_config_loaded:
78+
# Minimal fallback — auto-detect via PyTorch only
79+
class HardwareEnv: # type: ignore[no-redef]
80+
def __init__(self):
81+
self.backend = "cpu"
82+
self.device = "cpu"
83+
self.gpu_name = ""
84+
self.gpu_memory_mb = 0
85+
self.export_format = "none"
86+
self.framework_ok = False
87+
88+
@staticmethod
89+
def detect():
90+
env = HardwareEnv()
91+
try:
92+
import torch
93+
if torch.cuda.is_available():
94+
env.backend = "cuda"; env.device = "cuda"
95+
elif hasattr(torch.backends, "mps") and torch.backends.mps.is_available():
96+
env.backend = "mps"; env.device = "mps"
97+
except ImportError:
98+
pass
99+
return env
100+
101+
def to_dict(self):
102+
return {"backend": self.backend, "device": self.device}
103+
104+
58105
# ═══════════════════════════════════════════════════════════════════════════════
59106
# Performance Tracker
60107
# ═══════════════════════════════════════════════════════════════════════════════
@@ -152,6 +199,7 @@ class TransformSkillBase(ABC):
152199

153200
def __init__(self):
154201
self.device = "cpu"
202+
self.env = None # HardwareEnv — populated in run()
155203
self.config = {}
156204
self.perf = PerfTracker()
157205
self._running = True
@@ -206,18 +254,26 @@ def run(self):
206254
"""Parse args, load model, enter stdin loop."""
207255
args = self._parse_args()
208256
self.config = self._load_config(args)
209-
self.device = self._select_device(self.config.get("device", "auto"))
257+
258+
# Hardware detection — full multi-backend probe
259+
device_pref = self.config.get("device", "auto")
260+
self.env = self._detect_hardware(device_pref)
261+
self.device = self.env.device
210262

211263
# Load model
212264
try:
213-
_emit({"event": "progress", "stage": "init", "message": "Loading model..."})
265+
gpu_msg = f"{self.env.gpu_name} ({self.env.backend})" if self.env.gpu_name else self.env.backend
266+
_emit({"event": "progress", "stage": "init", "message": f"Hardware: {gpu_msg}"})
267+
_emit({"event": "progress", "stage": "model", "message": "Loading model..."})
214268
t0 = time.perf_counter()
215269
ready_fields = self.load_model(self.config)
216270
self.perf.model_load_ms = (time.perf_counter() - t0) * 1000
217271

218272
ready_event = {
219273
"event": "ready",
220274
"model_load_ms": round(self.perf.model_load_ms, 1),
275+
"backend": self.env.backend,
276+
"gpu": self.env.gpu_name,
221277
**ready_fields,
222278
}
223279
_emit(ready_event)
@@ -348,7 +404,6 @@ def _parse_args(self):
348404
return parser.parse_args()
349405

350406
def _load_config(self, args) -> dict:
351-
import os
352407
env_params = os.environ.get("AEGIS_SKILL_PARAMS")
353408
if env_params:
354409
try:
@@ -363,15 +418,20 @@ def _load_config(self, args) -> dict:
363418
return {"device": args.device}
364419

365420
@staticmethod
366-
def _select_device(pref: str) -> str:
367-
if pref != "auto":
368-
return pref
369-
try:
370-
import torch
371-
if torch.cuda.is_available():
372-
return "cuda"
373-
if hasattr(torch.backends, "mps") and torch.backends.mps.is_available():
374-
return "mps"
375-
except ImportError:
376-
pass
377-
return "cpu"
421+
def _detect_hardware(device_pref: str = "auto") -> HardwareEnv:
422+
"""
423+
Full hardware detection using shared env_config.py.
424+
425+
Supports: NVIDIA CUDA, AMD ROCm, Apple MPS/Neural Engine,
426+
Intel OpenVINO/NPU, CPU fallback.
427+
428+
Returns a HardwareEnv with .backend, .device, .gpu_name, etc.
429+
"""
430+
env = HardwareEnv.detect()
431+
432+
# Honour explicit device preference
433+
if device_pref != "auto":
434+
env.device = device_pref
435+
env.backend = device_pref
436+
437+
return env

0 commit comments

Comments
 (0)