Skip to content

Commit ab7916b

Browse files
committed
Update
1 parent 9a5acc6 commit ab7916b

File tree

2 files changed

+29
-1
lines changed

2 files changed

+29
-1
lines changed

examples/DeepLabv3/config.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -29,7 +29,7 @@
2929
IMAGENET_STD = [0.229, 0.224, 0.225]
3030

3131
# Device Configuration
32-
DEVICE_PREFERENCE = 'cpu' # 'cuda' or 'cpu'
32+
DEVICE_PREFERENCE = 'cuda' # 'cuda' or 'cpu'
3333

3434
# UI Configuration
3535
WINDOW_CONFIG = {
Lines changed: 28 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,28 @@
1+
import onnx
2+
from onnxruntime.quantization import quantize_dynamic, QuantType
3+
from pathlib import Path
4+
import config
5+
6+
def quantize_model(input_model_path, output_model_path):
7+
print(f"Quantizing model {input_model_path}...")
8+
9+
try:
10+
quantize_dynamic(
11+
model_input=input_model_path,
12+
model_output=output_model_path,
13+
weight_type=QuantType.QUInt8
14+
)
15+
print(f"✅ Quantization successful! Saved to {output_model_path}")
16+
return True
17+
except Exception as e:
18+
print(f"❌ Quantization failed: {e}")
19+
return False
20+
21+
if __name__ == "__main__":
22+
input_path = config.BASE_DIR / "web_app" / "document_detector.onnx"
23+
output_path = config.BASE_DIR / "web_app" / "document_detector_quant.onnx"
24+
25+
if not input_path.exists():
26+
print(f"Error: Input model {input_path} not found. Please run export_onnx.py first.")
27+
else:
28+
quantize_model(input_path, output_path)

0 commit comments

Comments
 (0)