14parser = argparse.ArgumentParser()
21 'All the param list to test. e.g. ${RECIPE_NAME_0} ${GRANULARITY_0} ${DTYPE_0} ${RECIPE_NAME_1} ${GRANULARITY_1} ${DTYPE_1}..'
23parser.add_argument(
'--bin_dir',
26 help=
'Directory path wehre test files are generated')
27parser.add_argument(
'--source_dir',
30 help=
'Directory path where expected outputs exist')
31parser.add_argument(
'--mode', type=str, required=
True, help=
'Mode to test')
32args = parser.parse_args()
34modes_to_expected_folder = {
35 'fake_quantization':
'fake_quantization',
36 'mixed_fake_quantization':
'fake_quantization',
37 'record_minmax':
'record_minmax',
38 'parallel_record_minmax':
'record_minmax',
39 'quantization':
'quantization',
40 'mixed_quantization':
'quantization',
41 'weights_only_quantization':
'wo_quantization'
43modes_to_input_h5_suffix = {
44 'fake_quantization':
'fake_quantized.circle.h5',
45 'mixed_fake_quantization':
'fake_quantized.mixed.circle.h5',
46 'record_minmax':
'minmax_recorded.circle.h5',
47 'parallel_record_minmax':
'parallel_minmax_recorded.circle.h5',
48 'quantization':
'quantized.circle.h5',
49 'mixed_quantization':
'quantized.mixed.circle.h5',
50 'weights_only_quantization':
'wo_quantized.circle.h5'
53test_param = args.test_param
55source_dir = args.source_dir
58if mode
in [
'mixed_fake_quantization',
'parallel_record_minmax',
'mixed_quantization']:
59 source_dir +=
'_config'
61log_format =
'%(levelname)s: %(message)s'
62formatter = logging.Formatter(log_format)
64stream_handler = logging.StreamHandler(stream=streamer)
65stream_handler.setFormatter(formatter)
66logging.basicConfig(handlers=[stream_handler])
68if mode
not in modes_to_expected_folder.keys():
69 raise SystemExit(
"Unsupported mode. --mode should be one of " +
70 str(modes_to_expected_folder.keys()))
74 with open(expect_dir +
"/" + tensor_name +
".json",
"r")
as expect_file:
75 json_load = json.load(expect_file)
76 expected_weights = np.array(json_load[
"weights"])
77 input_weights = tensor[
"weights"][:]
78 if np.allclose(input_weights, expected_weights, rtol=1.e-5, atol=1.e-5) ==
False:
79 logging.error(
"Fake-quantized weights of " + tensor_name +
" (" +
80 str(input_weights) +
") do not match with expected value (" +
81 str(expected_weights) +
").")
88 with open(expect_dir +
"/" + tensor_name +
".json",
"r")
as expect_file:
89 json_load = json.load(expect_file)
90 expected_min = np.array(json_load[
"min"])
91 expected_max = np.array(json_load[
"max"])
92 input_min = tensor[
"min"][:]
93 input_max = tensor[
"max"][:]
94 if np.allclose(input_min, expected_min, rtol=1.e-5, atol=1.e-5) ==
False:
95 logging.error(
"Recorded min of " + tensor_name +
" (" +
str(input_min) +
96 ") does not match with expected value (" +
str(expected_min) +
").")
98 if np.allclose(input_max, expected_max, rtol=1.e-5, atol=1.e-5) ==
False:
99 logging.error(
"Recorded max of " + tensor_name +
" (" +
str(input_max) +
100 ") does not match with expected value (" +
str(expected_max) +
").")
107 with open(expect_dir +
"/" + tensor_name +
".json",
"r")
as expect_file:
108 json_load = json.load(expect_file)
109 for key
in json_load:
111 expected_weights = np.array(json_load[
"weights"])
112 input_weights = tensor[
"weights"][()]
115 if tensor[
"weights"].dtype ==
'int64':
118 if np.allclose(input_weights, expected_weights, rtol=0,
119 atol=abs_tolerance) ==
False:
120 logging.error(
"Quantized weights of " + tensor_name +
" (" +
122 ") do not match with expected value (" +
123 str(expected_weights) +
").")
127 expected_scale = np.array(json_load[
"scale"])
128 input_scale = tensor[
"scale"][:]
129 if np.allclose(input_scale, expected_scale, rtol=1.e-5, atol=1.e-5) ==
False:
130 logging.error(
"Quantized scale of " + tensor_name +
" (" +
131 str(input_scale) +
") do not match with expected value (" +
132 str(expected_scale) +
").")
135 if key ==
"zero_point":
136 expected_zero_point = np.array(json_load[
"zero_point"])
137 input_zero_point = tensor[
"zero_point"][:]
138 if np.allclose(input_zero_point, expected_zero_point, rtol=0,
140 logging.error(
"Quantized zero_point of " + tensor_name +
" (" +
141 str(input_zero_point) +
142 ") do not match with expected value (" +
143 str(expected_zero_point) +
").")
149 """Yield successive n-sized chunks from the list"""
150 for i
in range(0, len(lst), n):
155inputs = test_param[0].split()
157assert (len(inputs) % PARAM_SET_SIZE) == 0
158inputs = list(
chunks(inputs, PARAM_SET_SIZE))
159for idx
in range(len(inputs)):
160 model_name = inputs[idx][0]
161 granularity = inputs[idx][1]
162 dtype = inputs[idx][2]
164 testcase = f
'{model_name}.{granularity}.{dtype}'
165 test_result_file = os.path.join(bin_dir, testcase)
166 input_h5 = f
'{test_result_file}.{modes_to_input_h5_suffix[mode]}'
167 with h5.File(input_h5,
'r')
as input:
168 for tensor_name
in input.keys():
169 expect_dir = f
'{source_dir}/expected_outputs/{model_name}/{granularity}/{dtype}/{modes_to_expected_folder[mode]}'
170 if os.path.isfile(expect_dir +
"/" + tensor_name +
".json"):
172 if mode
in [
"fake_quantization",
"mixed_fake_quantization"]:
174 tensor_name, expect_dir)
175 elif mode
in [
"record_minmax",
"parallel_record_minmax"]:
178 elif mode
in [
"quantization",
"mixed_quantization"]:
181 elif mode ==
"weights_only_quantization":
183 if tensor_name ==
"ker":
185 tensor_name, expect_dir)
187 raise SystemExit(
"Unsupproted mode.")
190 failed_log[testcase] = streamer.getvalue().rstrip()
192 streamer = StringIO()
193 stream_handler.setStream(streamer)
195failed_number = len(failed_log)
196if failed_number != 0:
198 for testcase
in failed_log:
199 print(f
'- {testcase}')
200 print(failed_log[testcase])
202sys.exit(failed_number)
compare_quantization(tensor, tensor_name, expect_dir)
compare_record_minmax(tensor, tensor_name, expect_dir)
compare_fake_quantization(tensor, tensor_name, expect_dir)