Skip to content

Commit 803d79b

Browse files
authored
Add files via upload
1 parent 6071912 commit 803d79b

1 file changed

Lines changed: 38 additions & 0 deletions

File tree

quantization.py

Lines changed: 38 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,38 @@
1+
import sys
2+
import coremltools
3+
from coremltools.models.neural_network.quantization_utils import *
4+
5+
6+
def quantize(file,bits,functions):
7+
8+
9+
10+
"""
11+
processes a file to quantize it for each bit_per-weight
12+
and function listed.
13+
def quantize(file, bits, functions):
14+
15+
Processes a file to quantize it for each bit-per-weight
16+
and function listed.
17+
file : Core ML file to process (example : mymodel.mlmodel)
18+
bits : Array of bit per weight (example : [16,8,6,4,2,1])
19+
functions : Array of distribution functions (example : ["linear", "linear_lut", "kmeans"])
20+
"""
21+
if not file.endswith(".mlmodel"): return # We only consider .mlmodel files
22+
model_name = file.split(".")[0]
23+
model = coremltools.models.MLModel(file)
24+
for function in functions :
25+
for bit in bits:
26+
print("--------------------------------------------------------------------")
27+
print("Processing "+model_name+" for "+str(bit)+"-bits with "+function+" function")
28+
sys.stdout.flush()
29+
quantized_model = quantize_weights(model, bit, function)
30+
sys.stdout.flush()
31+
quantized_model.author = "Alexis Creuzot"
32+
quantized_model.short_description = str(bit)+"-bit per quantized weight, using "+function+"."
33+
quantized_model.save(model_name+"_"+function+"_"+str(bit)+".mlmodel")
34+
35+
# Launch quantization
36+
quantize("CNNEmotions.mlmodel",
37+
[6,4,3,2],
38+
["linear"])

0 commit comments

Comments
 (0)