22# ─────────────────────────────────────────────────────────────────────────────
33# Coral Edge TPU Model Compiler
44#
5- # Converts YOLO 2026 nano (.pt) → ONNX → TFLite INT8 → Edge TPU (.tflite)
5+ # Compiles YOLO 2026 nano to Google Coral Edge TPU .tflite format using
6+ # ultralytics' built-in format="edgetpu" export pipeline.
67#
7- # IMPORTANT: Must run on linux/amd64. edgetpu_compiler only exists for
8- # x86_64 Linux. On Apple Silicon or Windows, Docker Desktop handles emulation
9- # via QEMU automatically — just pass --platform linux/amd64.
8+ # Per https://docs.ultralytics.com/guides/coral-edge-tpu-on-raspberry-pi/:
9+ # model.export(format="edgetpu") handles the full pipeline:
10+ # .pt → ONNX → onnx2tf SavedModel → TFLite INT8 → edgetpu_compiler
1011#
11- # Why tensorflow 2.13?
12- # edgetpu_compiler v16.0 only understands TFLite flatbuffer schema ≤ v3.
13- # tensorflow-cpu 2.13.1 is the last version that produces schema v3.
14- # TF 2.14+ produces schema v6 → "Invalid model" error from the compiler.
15- #
16- # Why NOT onnx2tf?
17- # onnx2tf ≥ 1.17 + TF 2.13 hits a TF-Keras symbolic-tensor TypeError
18- # during INT8 calibration. We bypass it entirely by using the ONNX Runtime
19- # directly to run calibration, then drive TFLiteConverter ourselves.
20- # This is the approach used in compile_model.py's two-stage pipeline.
12+ # MUST run on linux/amd64 — edgetpu_compiler is x86_64 Linux only.
13+ # On Apple Silicon or Windows, Docker Desktop handles QEMU emulation.
2114# ─────────────────────────────────────────────────────────────────────────────
2215
23- FROM --platform=linux/amd64 python:3.9 -slim
16+ FROM --platform=linux/amd64 python:3.11 -slim
2417
2518LABEL maintainer="Aegis-AI / DeepCamera"
26- LABEL description="Compiles YOLO 2026 nano to Google Coral Edge TPU .tflite"
19+ LABEL description="Compiles YOLO 2026 to Google Coral Edge TPU .tflite"
2720
2821# ── System deps ───────────────────────────────────────────────────────────────
2922RUN apt-get update && apt-get install -y --no-install-recommends \
@@ -35,7 +28,7 @@ RUN apt-get update && apt-get install -y --no-install-recommends \
3528 libglib2.0-0 \
3629 && rm -rf /var/lib/apt/lists/*
3730
38- # ── Google Coral edgetpu_compiler (x86_64 Debian package) ────────────────────
31+ # ── edgetpu_compiler from Google Coral apt (x86_64 only) ─ ────────────────────
3932RUN curl -fsSL https://packages.cloud.google.com/apt/doc/apt-key.gpg \
4033 | gpg --dearmor -o /usr/share/keyrings/coral-edgetpu.gpg \
4134 && echo "deb [signed-by=/usr/share/keyrings/coral-edgetpu.gpg] \
@@ -45,31 +38,17 @@ RUN curl -fsSL https://packages.cloud.google.com/apt/doc/apt-key.gpg \
4538 && apt-get install -y --no-install-recommends edgetpu-compiler \
4639 && rm -rf /var/lib/apt/lists/*
4740
48- # ── Python deps ───────────────────────────────────────────────────────────────
49- # Pipeline: ultralytics exports ONNX, then we convert ONNX→TFLite INT8
50- # using onnx-tf (not onnx2tf) + tensorflow 2.13 + tflite-support calibrator.
51- #
52- # tensorflow-cpu==2.13.1 → TFLite schema v3, compatible with edgetpu_compiler v16
53- # onnx-tf==1.10.0 → stable ONNX→TF SavedModel, works with TF 2.13
54- # ultralytics → YOLO .pt download + ONNX export only (not tflite export)
55- # onnxruntime → ONNX inference for INT8 representative dataset generation
56- # numpy<2 → required by tensorflow 2.13
41+ # ── Python: ultralytics handles all TF/onnx2tf version management ─────────────
42+ # ultralytics auto-installs: onnx2tf, tensorflow-cpu, onnxslim, etc.
5743RUN pip install --no-cache-dir \
58- "tensorflow-cpu==2.13.1" \
59- "tensorflow_probability==0.21.0" \
60- "keras==2.13.1" \
61- "onnx-tf==1.10.0" \
62- "onnx>=1.14.0,<1.17.0" \
63- "onnxruntime==1.17.3" \
6444 "ultralytics>=8.3.0" \
65- "numpy>=1.24.0,<2.0" \
66- "Pillow>=10.0.0"
45+ "numpy>=1.24.0,<2.0"
6746
68- # ── Copy compile script ──── ───────────────────────────────────────────────────
47+ # ── Copy compile entrypoint ───────────────────────────────────────────────────
6948WORKDIR /compile
7049COPY scripts/compile_model.py /compile/compile_model.py
7150
72- # ── Output volume ─────────────────────────────────────── ──────────────────────
51+ # ── Output volume (mount skill's models/ directory here) ──────────────────────
7352VOLUME ["/compile/output" ]
7453
7554# ── Entrypoint ────────────────────────────────────────────────────────────────
0 commit comments