22# ─────────────────────────────────────────────────────────────────────────────
33# Coral Edge TPU Model Compiler
44#
5- # Converts YOLO 2026 nano (.pt) → TFLite INT8 → Edge TPU (.tflite)
5+ # Converts YOLO 2026 nano (.pt) → ONNX → TFLite INT8 → Edge TPU (.tflite)
66#
77# IMPORTANT: Must run on linux/amd64. edgetpu_compiler only exists for
8- # x86_64 Linux. On Apple Silicon or Windows, use:
9- # docker run --platform linux/amd64 ...
8+ # x86_64 Linux. On Apple Silicon or Windows, Docker Desktop handles emulation
9+ # via QEMU automatically — just pass --platform linux/amd64.
10+ #
11+ # Why tensorflow 2.13?
12+ # edgetpu_compiler v16.0 only understands TFLite flatbuffer schema ≤ v3.
13+ # tensorflow-cpu 2.13.1 is the last version that produces schema v3.
14+ # TF 2.14+ produces schema v6 → "Invalid model" error from the compiler.
15+ #
16+ # Why NOT onnx2tf?
17+ # onnx2tf ≥ 1.17 + TF 2.13 hits a TF-Keras symbolic-tensor TypeError
18+ # during INT8 calibration. We bypass it entirely by using the ONNX Runtime
19+ # directly to run calibration, then drive TFLiteConverter ourselves.
20+ # This is the approach used in compile_model.py's two-stage pipeline.
1021# ─────────────────────────────────────────────────────────────────────────────
1122
12- FROM --platform=linux/amd64 python:3.11 -slim
23+ FROM --platform=linux/amd64 python:3.9 -slim
1324
1425LABEL maintainer="Aegis-AI / DeepCamera"
1526LABEL description="Compiles YOLO 2026 nano to Google Coral Edge TPU .tflite"
@@ -25,7 +36,6 @@ RUN apt-get update && apt-get install -y --no-install-recommends \
2536 && rm -rf /var/lib/apt/lists/*
2637
2738# ── Google Coral edgetpu_compiler (x86_64 Debian package) ────────────────────
28- # Official Coral apt repository — edgetpu_compiler is only available here.
2939RUN curl -fsSL https://packages.cloud.google.com/apt/doc/apt-key.gpg \
3040 | gpg --dearmor -o /usr/share/keyrings/coral-edgetpu.gpg \
3141 && echo "deb [signed-by=/usr/share/keyrings/coral-edgetpu.gpg] \
@@ -36,22 +46,30 @@ RUN curl -fsSL https://packages.cloud.google.com/apt/doc/apt-key.gpg \
3646 && rm -rf /var/lib/apt/lists/*
3747
3848# ── Python deps ───────────────────────────────────────────────────────────────
39- # ultralytics: YOLO export to TFLite INT8
40- # tensorflow-cpu: required by ultralytics INT8 export (calibration + quantization)
49+ # Pipeline: ultralytics exports ONNX, then we convert ONNX→TFLite INT8
50+ # using onnx-tf (not onnx2tf) + tensorflow 2.13 + tflite-support calibrator.
51+ #
52+ # tensorflow-cpu==2.13.1 → TFLite schema v3, compatible with edgetpu_compiler v16
53+ # onnx-tf==1.10.0 → stable ONNX→TF SavedModel, works with TF 2.13
54+ # ultralytics → YOLO .pt download + ONNX export only (not tflite export)
55+ # onnxruntime → ONNX inference for INT8 representative dataset generation
56+ # numpy<2 → required by tensorflow 2.13
4157RUN pip install --no-cache-dir \
58+ "tensorflow-cpu==2.13.1" \
59+ "tensorflow_probability==0.21.0" \
60+ "keras==2.13.1" \
61+ "onnx-tf==1.10.0" \
62+ "onnx>=1.14.0,<1.17.0" \
63+ "onnxruntime==1.17.3" \
4264 "ultralytics>=8.3.0" \
43- "tensorflow-cpu>=2.17.0,<3.0" \
4465 "numpy>=1.24.0,<2.0" \
45- "Pillow>=10.0.0" \
46- "onnx>=1.16.0"
66+ "Pillow>=10.0.0"
4767
4868# ── Copy compile script ───────────────────────────────────────────────────────
4969WORKDIR /compile
5070COPY scripts/compile_model.py /compile/compile_model.py
5171
5272# ── Output volume ─────────────────────────────────────────────────────────────
53- # Mount the skill's models/ directory here at runtime:
54- # docker run -v /path/to/models:/compile/output ...
5573VOLUME ["/compile/output" ]
5674
5775# ── Entrypoint ────────────────────────────────────────────────────────────────
0 commit comments