Skip to content

Commit 6db1047

Browse files
authored
Merge pull request #146 from SharpAI/develop
Develop
2 parents 314242d + 70a182c commit 6db1047

File tree

4 files changed

+392
-29
lines changed

4 files changed

+392
-29
lines changed

skills/detection/yolo-detection-2026/deploy.bat

Lines changed: 10 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -120,9 +120,13 @@ if exist "%SKILL_DIR%\scripts\env_config.py" (
120120
if defined ENV_CONFIG_DIR (
121121
echo %LOG_PREFIX% Detecting hardware via env_config.py...>&2
122122

123-
REM Run env_config detection via Python
124-
for /f "tokens=*" %%B in ('"%VPYTHON%" -c "import sys; sys.path.insert(0, r'!ENV_CONFIG_DIR!'); from env_config import HardwareEnv; env = HardwareEnv.detect(); print(env.backend)" 2^>nul') do (
125-
set "DETECTED_BACKEND=%%B"
123+
REM Run env_config detection via Python (temp file avoids for /f quoting issues)
124+
set "ENV_CONFIG_DIR=!ENV_CONFIG_DIR!"
125+
set "_TMPBACK=%TEMP%\_aegis_detect_backend.txt"
126+
"%VPYTHON%" -c "import sys,os; sys.path.insert(0, os.environ['ENV_CONFIG_DIR']); from env_config import HardwareEnv; env = HardwareEnv.detect(); print(env.backend)" > "!_TMPBACK!" 2>nul
127+
if exist "!_TMPBACK!" (
128+
set /p DETECTED_BACKEND=<"!_TMPBACK!"
129+
del "!_TMPBACK!" 2>nul
126130
)
127131

128132
REM Validate backend value (Windows: only cuda, intel, cpu are realistic)
@@ -187,7 +191,7 @@ if "!BACKEND!" neq "cpu" (
187191
echo %LOG_PREFIX% Pre-converting model to optimized format for !BACKEND!...>&2
188192
echo {"event": "progress", "stage": "optimize", "message": "Converting model for !BACKEND! (~30-120s)..."}
189193

190-
"%VPYTHON%" -c "import sys; sys.path.insert(0, r'!ENV_CONFIG_DIR!'); from env_config import HardwareEnv; env = HardwareEnv.detect(); from ultralytics import YOLO; model = YOLO('yolo26n.pt'); result = env.export_model(model, 'yolo26n'); print(f'Optimized model exported: {result}' if result else 'Export skipped or failed')" 2>&1
194+
"%VPYTHON%" -c "import sys,os; sys.path.insert(0, os.environ['ENV_CONFIG_DIR']); from env_config import HardwareEnv; env = HardwareEnv.detect(); from ultralytics import YOLO; model = YOLO('yolo26n.pt'); result = env.export_model(model, 'yolo26n'); print(f'Optimized model exported: {result}' if result else 'Export skipped or failed')" 2>&1
191195

192196
if !errorlevel! equ 0 (
193197
echo {"event": "progress", "stage": "optimize", "message": "Model optimization complete"}
@@ -199,7 +203,7 @@ if "!BACKEND!" neq "cpu" (
199203
echo %LOG_PREFIX% Pre-converting model to ONNX for CPU...>&2
200204
echo {"event": "progress", "stage": "optimize", "message": "Converting model for cpu (~30-120s)..."}
201205

202-
"%VPYTHON%" -c "import sys; sys.path.insert(0, r'!ENV_CONFIG_DIR!'); from env_config import HardwareEnv; env = HardwareEnv.detect(); from ultralytics import YOLO; model = YOLO('yolo26n.pt'); result = env.export_model(model, 'yolo26n'); print(f'Optimized model exported: {result}' if result else 'Export skipped or failed')" 2>&1
206+
"%VPYTHON%" -c "import sys,os; sys.path.insert(0, os.environ['ENV_CONFIG_DIR']); from env_config import HardwareEnv; env = HardwareEnv.detect(); from ultralytics import YOLO; model = YOLO('yolo26n.pt'); result = env.export_model(model, 'yolo26n'); print(f'Optimized model exported: {result}' if result else 'Export skipped or failed')" 2>&1
203207

204208
if !errorlevel! equ 0 (
205209
echo {"event": "progress", "stage": "optimize", "message": "Model optimization complete"}
@@ -212,7 +216,7 @@ if "!BACKEND!" neq "cpu" (
212216
REM ─── Step 6: Verify installation ───────────────────────────────────────────
213217

214218
echo %LOG_PREFIX% Verifying installation...>&2
215-
"%VPYTHON%" -c "import sys, json; sys.path.insert(0, r'!ENV_CONFIG_DIR!'); from env_config import HardwareEnv; env = HardwareEnv.detect(); print(json.dumps(env.to_dict(), indent=2))" 2>&1
219+
"%VPYTHON%" -c "import sys,os,json; sys.path.insert(0, os.environ['ENV_CONFIG_DIR']); from env_config import HardwareEnv; env = HardwareEnv.detect(); print(json.dumps(env.to_dict(), indent=2))" 2>&1
216220

217221
echo {"event": "complete", "backend": "!BACKEND!", "message": "YOLO 2026 skill installed (!BACKEND! backend)"}
218222
echo %LOG_PREFIX% Done! Backend: !BACKEND!>&2

skills/detection/yolo-detection-2026/scripts/detect.py

Lines changed: 15 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -240,14 +240,22 @@ def main():
240240
target_classes = [c.strip() for c in target_classes.split(",")]
241241

242242
# ── Hardware detection & optimized model loading ──
243+
emit({"event": "progress", "stage": "init", "message": "Detecting compute hardware..."})
243244
env = HardwareEnv.detect()
244245
perf = PerfTracker(interval=PERF_STATS_INTERVAL)
245246

247+
gpu_msg = f"{env.gpu_name} ({env.backend})" if env.gpu_name else env.backend
248+
emit({"event": "progress", "stage": "init", "message": f"Hardware: {gpu_msg}"})
249+
246250
try:
251+
emit({"event": "progress", "stage": "model", "message": f"Loading {model_name} model ({env.export_format} format)..."})
247252
model, model_format = env.load_optimized(model_name, use_optimized=use_optimized)
248253
perf.model_load_ms = env.load_ms
249254
perf.export_ms = env.export_ms
250255

256+
if env.export_ms > 0:
257+
emit({"event": "progress", "stage": "model", "message": f"Model optimized in {env.export_ms:.0f}ms"})
258+
251259
ready_event = {
252260
"event": "ready",
253261
"model": f"yolo2026{model_size[0]}",
@@ -268,18 +276,19 @@ def main():
268276
emit({"event": "error", "message": f"Failed to load model: {e}", "retriable": False})
269277
sys.exit(1)
270278

271-
# Graceful shutdown
272-
running = True
279+
# Graceful shutdown — exit immediately with code 0.
280+
# The stdin read loop blocks, so setting a flag doesn't work;
281+
# we must exit in the signal handler to avoid being killed (code null).
273282
def handle_signal(signum, frame):
274-
nonlocal running
275-
running = False
283+
sig_name = "SIGTERM" if signum == signal.SIGTERM else "SIGINT"
284+
log(f"Received {sig_name}, shutting down gracefully")
285+
perf.emit_final()
286+
sys.exit(0)
276287
signal.signal(signal.SIGTERM, handle_signal)
277288
signal.signal(signal.SIGINT, handle_signal)
278289

279290
# Main loop: read frames from stdin, output detections to stdout
280291
for line in sys.stdin:
281-
if not running:
282-
break
283292

284293
line = line.strip()
285294
if not line:

skills/lib/env_config.py

Lines changed: 65 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -133,31 +133,79 @@ def detect() -> "HardwareEnv":
133133
return env
134134

135135
def _try_cuda(self) -> bool:
136-
"""Detect NVIDIA GPU via nvidia-smi and torch."""
137-
if not shutil.which("nvidia-smi"):
138-
return False
136+
"""Detect NVIDIA GPU via nvidia-smi (with Windows path search) and WMI fallback."""
137+
nvidia_smi = shutil.which("nvidia-smi")
138+
139+
# Windows: check well-known paths if not on PATH
140+
if not nvidia_smi and platform.system() == "Windows":
141+
for candidate in [
142+
Path(os.environ.get("PROGRAMFILES", r"C:\Program Files"))
143+
/ "NVIDIA Corporation" / "NVSMI" / "nvidia-smi.exe",
144+
Path(os.environ.get("WINDIR", r"C:\Windows"))
145+
/ "System32" / "nvidia-smi.exe",
146+
]:
147+
if candidate.is_file():
148+
nvidia_smi = str(candidate)
149+
_log(f"Found nvidia-smi at {nvidia_smi}")
150+
break
151+
152+
if nvidia_smi:
153+
try:
154+
result = subprocess.run(
155+
[nvidia_smi, "--query-gpu=name,memory.total,driver_version",
156+
"--format=csv,noheader,nounits"],
157+
capture_output=True, text=True, timeout=10,
158+
)
159+
if result.returncode == 0:
160+
line = result.stdout.strip().split("\n")[0]
161+
parts = [p.strip() for p in line.split(",")]
162+
if len(parts) >= 3:
163+
self.backend = "cuda"
164+
self.device = "cuda"
165+
self.gpu_name = parts[0]
166+
self.gpu_memory_mb = int(float(parts[1]))
167+
self.driver_version = parts[2]
168+
self.detection_details["nvidia_smi"] = line
169+
_log(f"NVIDIA GPU: {self.gpu_name} ({self.gpu_memory_mb}MB, driver {self.driver_version})")
170+
return True
171+
except (subprocess.TimeoutExpired, FileNotFoundError, ValueError) as e:
172+
_log(f"nvidia-smi probe failed: {e}")
173+
174+
# Windows WMI fallback: detect NVIDIA GPU even without nvidia-smi on PATH
175+
if platform.system() == "Windows":
176+
return self._try_cuda_wmi()
177+
178+
return False
179+
180+
def _try_cuda_wmi(self) -> bool:
181+
"""Windows-only: detect NVIDIA GPU via WMI (wmic)."""
139182
try:
140183
result = subprocess.run(
141-
["nvidia-smi", "--query-gpu=name,memory.total,driver_version",
142-
"--format=csv,noheader,nounits"],
184+
["wmic", "path", "win32_VideoController", "get",
185+
"Name,AdapterRAM,DriverVersion", "/format:csv"],
143186
capture_output=True, text=True, timeout=10,
144187
)
145188
if result.returncode != 0:
146189
return False
147190

148-
line = result.stdout.strip().split("\n")[0]
149-
parts = [p.strip() for p in line.split(",")]
150-
if len(parts) >= 3:
151-
self.backend = "cuda"
152-
self.device = "cuda"
153-
self.gpu_name = parts[0]
154-
self.gpu_memory_mb = int(float(parts[1]))
155-
self.driver_version = parts[2]
156-
self.detection_details["nvidia_smi"] = line
157-
_log(f"NVIDIA GPU: {self.gpu_name} ({self.gpu_memory_mb}MB, driver {self.driver_version})")
158-
return True
191+
for line in result.stdout.strip().split("\n"):
192+
if "NVIDIA" in line.upper():
193+
parts = [p.strip() for p in line.split(",")]
194+
# CSV format: Node,AdapterRAM,DriverVersion,Name
195+
if len(parts) >= 4:
196+
self.backend = "cuda"
197+
self.device = "cuda"
198+
self.gpu_name = parts[3]
199+
try:
200+
self.gpu_memory_mb = int(int(parts[1]) / (1024 * 1024))
201+
except (ValueError, IndexError):
202+
pass
203+
self.driver_version = parts[2] if len(parts) > 2 else ""
204+
self.detection_details["wmi"] = line
205+
_log(f"NVIDIA GPU (WMI): {self.gpu_name} ({self.gpu_memory_mb}MB)")
206+
return True
159207
except (subprocess.TimeoutExpired, FileNotFoundError, ValueError) as e:
160-
_log(f"nvidia-smi probe failed: {e}")
208+
_log(f"WMI probe failed: {e}")
161209
return False
162210

163211
def _try_rocm(self) -> bool:

0 commit comments

Comments
 (0)