File size: 3,203 Bytes
04e6b4c
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6d38c83
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
import tensorflow as tf

# Load the TFLite model
tflite_model_path = 'model.tflite'
interpreter = tf.lite.Interpreter(model_path=tflite_model_path)
interpreter.allocate_tensors()

# Export the TFLite model back to a TensorFlow SavedModel
saved_model_dir = 'saved_model'

# Convert the TFLite model back to a TensorFlow model
converter = tf.lite.TFLiteConverter.from_saved_model(saved_model_dir)
tf.saved_model.save(interpreter, saved_model_dir)


pip install tf2onnx
pip install onnx_runtime
python -m tf2onnx.convert --saved-model saved_model --output model.onnx --opset 11


import onnxruntime as ort
import numpy as np
from PIL import Image

# Load ONNX model
onnx_model_path = 'model.onnx'
session = ort.InferenceSession(onnx_model_path)

# Load image and preprocess (resize, normalize)
image_path = 'image.jpg'
image = Image.open(image_path).resize((320, 320))  # Assuming 320x320 model input size
image_data = np.array(image).astype('float32')
image_data = np.expand_dims(image_data, axis=0)  # Add batch dimension

# Run inference
input_name = session.get_inputs()[0].name
output = session.run(None, {input_name: image_data})

# Output contains predictions, including bounding boxes and class labels
print(output)


import onnxruntime as ort
import numpy as np
from PIL import Image

# Load ONNX model
onnx_model_path = 'model.onnx'
session = ort.InferenceSession(onnx_model_path)

# Function to preprocess a single image (resize and normalize)
def preprocess_image(image_path, input_size=(320, 320)):
    image = Image.open(image_path).resize(input_size)  # Resize to match model input size
    image_data = np.array(image).astype('float32')  # Convert to float32
    image_data = np.expand_dims(image_data, axis=0)  # Add batch dimension (1, height, width, channels)
    return image_data

# Prepare a batch of images
image_paths = ['image1.jpg', 'image2.jpg', 'image3.jpg']  # List of image file paths
batch_size = len(image_paths)

# Preprocess each image and stack them into a batch
batch_images = np.vstack([preprocess_image(image_path) for image_path in image_paths])

# Check input name from the ONNX model
input_name = session.get_inputs()[0].name

# Run batch inference
outputs = session.run(None, {input_name: batch_images})

# Postprocessing: Extract scores, bounding boxes, and labels for each image in the batch
scores_batch, bboxes_batch, labels_batch = outputs[0], outputs[1], outputs[2]

# Iterate over the batch of results and filter based on score threshold
score_threshold = 0.5

for i in range(batch_size):
    scores = scores_batch[i]  # Scores for i-th image
    bboxes = bboxes_batch[i]  # Bounding boxes for i-th image
    labels = labels_batch[i]  # Labels for i-th image

    # Filter indices where scores are greater than the threshold
    valid_indices = np.where(scores > score_threshold)

    # Filter the outputs based on valid indices
    filtered_scores = scores[valid_indices]
    filtered_bboxes = bboxes[valid_indices]
    filtered_labels = labels[valid_indices]

    print(f"Image {i+1}:")
    print("Filtered Scores:", filtered_scores)
    print("Filtered Bounding Boxes:", filtered_bboxes)
    print("Filtered Labels:", filtered_labels)
    print('---')