pesi
/

rtmo / fix_input_batch_size_all.sh
Luigi's picture
Make ONNX models compatible with ONNXruntime's TensorrtExecutionProvider
b75f05d
raw
history blame
1.18 kB
#!/bin/bash
# Directory containing the original ONNX models
MODEL_DIR="."
# Directory to save the modified ONNX models
OUTPUT_DIR="."
# Ensure output directory exists
mkdir -p "$OUTPUT_DIR"
# Path to the Python script
PYTHON_SCRIPT="fix_input_batch_size.py"
# Loop through each ONNX model in the directory
for model in "$MODEL_DIR"/*.onnx; do
echo "Processing $model..."
filename=$(basename -- "$model")
base="${filename%.*}"
# Generate models with batch size 1
output_model_1="$OUTPUT_DIR/${base}_batch1.onnx"
python "$PYTHON_SCRIPT" "$model" "$output_model_1" --batch_size 1
echo "Generated model with batch size 1: $output_model_1"
# Generate models with batch size 2
output_model_2="$OUTPUT_DIR/${base}_batch2.onnx"
python "$PYTHON_SCRIPT" "$model" "$output_model_2" --batch_size 1
echo "Generated model with batch size 2: $output_model_2"
# Generate models with batch size 4
output_model_4="$OUTPUT_DIR/${base}_batch4.onnx"
python "$PYTHON_SCRIPT" "$model" "$output_model_4" --batch_size 4
echo "Generated model with batch size 4: $output_model_4"
done
echo "Batch size modification complete."