|
|
|
|
|
|
|
import os |
|
from rknn.api import RKNN |
|
from sys import exit |
|
import argparse |
|
import cv2 |
|
import numpy as np |
|
os.chdir(os.path.dirname(os.path.abspath(__file__))) |
|
|
|
image_sizes= [[448, 448]] |
|
batch_sizes = [1] |
|
|
|
def convert_encoder(): |
|
rknn = RKNN(verbose=True) |
|
|
|
ONNX_MODEL=f"vision_transformer.onnx" |
|
RKNN_MODEL=ONNX_MODEL.replace(".onnx",".rknn") |
|
DATASET="dataset.txt" |
|
QUANTIZE=False |
|
input_shapes = [[[batch_size, 3, image_size[0], image_size[1]]] for batch_size in batch_sizes for image_size in image_sizes] |
|
print(input_shapes) |
|
|
|
|
|
print('--> Config model') |
|
rknn.config(quantized_algorithm='normal', quantized_method='channel', target_platform='rk3588', optimization_level=3, |
|
mean_values=[128, 128, 128], std_values=[128, 128, 128], dynamic_input=input_shapes) |
|
print('done') |
|
|
|
|
|
print("--> Loading model") |
|
ret = rknn.load_onnx( |
|
model=ONNX_MODEL, |
|
) |
|
|
|
if ret != 0: |
|
print('Load model failed!') |
|
exit(ret) |
|
print('done') |
|
|
|
|
|
print('--> Building model') |
|
ret = rknn.build(do_quantization=QUANTIZE, dataset=DATASET, rknn_batch_size=None) |
|
if ret != 0: |
|
print('Build model failed!') |
|
exit(ret) |
|
print('done') |
|
|
|
|
|
print('--> Export RKNN model') |
|
ret = rknn.export_rknn(RKNN_MODEL) |
|
if ret != 0: |
|
print('Export RKNN model failed!') |
|
exit(ret) |
|
print('done') |
|
rknn.init_runtime(target='rk3588') |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
if __name__ == "__main__": |
|
parser = argparse.ArgumentParser() |
|
parser.add_argument("model", type=str, help="model to convert", choices=["encoder", "all"], nargs='?') |
|
args = parser.parse_args() |
|
if args.model is None: |
|
args.model = "all" |
|
if args.model == "encoder": |
|
convert_encoder() |
|
elif args.model == "all": |
|
convert_encoder() |
|
else: |
|
print(f"Unknown model: {args.model}") |
|
exit(1) |
|
|