File tree Expand file tree Collapse file tree 2 files changed +29
-1
lines changed
Expand file tree Collapse file tree 2 files changed +29
-1
lines changed Original file line number Diff line number Diff line change 2929IMAGENET_STD = [0.229 , 0.224 , 0.225 ]
3030
3131# Device Configuration
32- DEVICE_PREFERENCE = 'cpu ' # 'cuda' or 'cpu'
32+ DEVICE_PREFERENCE = 'cuda ' # 'cuda' or 'cpu'
3333
3434# UI Configuration
3535WINDOW_CONFIG = {
Original file line number Diff line number Diff line change 1+ import onnx
2+ from onnxruntime .quantization import quantize_dynamic , QuantType
3+ from pathlib import Path
4+ import config
5+
6+ def quantize_model (input_model_path , output_model_path ):
7+ print (f"Quantizing model { input_model_path } ..." )
8+
9+ try :
10+ quantize_dynamic (
11+ model_input = input_model_path ,
12+ model_output = output_model_path ,
13+ weight_type = QuantType .QUInt8
14+ )
15+ print (f"✅ Quantization successful! Saved to { output_model_path } " )
16+ return True
17+ except Exception as e :
18+ print (f"❌ Quantization failed: { e } " )
19+ return False
20+
21+ if __name__ == "__main__" :
22+ input_path = config .BASE_DIR / "web_app" / "document_detector.onnx"
23+ output_path = config .BASE_DIR / "web_app" / "document_detector_quant.onnx"
24+
25+ if not input_path .exists ():
26+ print (f"Error: Input model { input_path } not found. Please run export_onnx.py first." )
27+ else :
28+ quantize_model (input_path , output_path )
You can’t perform that action at this time.
0 commit comments