diff --git a/models/ResNet50/pokedex_ResNet50.onnx b/models/ResNet50/pokedex_ResNet50.onnx new file mode 100644 index 0000000000000000000000000000000000000000..0730f25196dc2551aefbbec09ce54e69200fe9bb Binary files /dev/null and b/models/ResNet50/pokedex_ResNet50.onnx differ diff --git a/python/convert_onnx.py b/python/convert_onnx.py index 846a0b8b66d6be8285407ce952242c4d0400c942..a90d6d1b941da7cd367a300c02670a3393ddd9ff 100644 --- a/python/convert_onnx.py +++ b/python/convert_onnx.py @@ -1,6 +1,7 @@ import tensorflow as tf import tf2onnx import argparse +import keras # --- WHAT ? --- parser = argparse.ArgumentParser(description="WHAT ?!") @@ -20,16 +21,16 @@ elif args.model == "2": size_2=(1, 256, 256, 3) # --- Load Sequential model --- -seq_model = tf.keras.models.load_model(h5_path, compile=False) +seq_model = keras.models.load_model(h5_path, compile=False) # --- Create input layer with same shape --- -inputs = tf.keras.Input(shape=size, name="input") +inputs = keras.Input(shape=size, name="input") # --- Call the Sequential model as a function --- outputs = seq_model(inputs) # --- Wrap in Functional model --- -model = tf.keras.Model(inputs=inputs, outputs=outputs) +model = keras.Model(inputs=inputs, outputs=outputs) # --- Convert to ONNX --- spec = (tf.TensorSpec(size_2, tf.float32, name="input"),) diff --git a/python/hailo_compile.py b/python/hailo_compile.py deleted file mode 100644 index 2f25e2c51e3d0b8edd916bf30457d1d53b2551c7..0000000000000000000000000000000000000000 --- a/python/hailo_compile.py +++ /dev/null @@ -1,59 +0,0 @@ -import os -import subprocess -import argparse -import onnx - -parser = argparse.ArgumentParser(description="Convert, parse, optimize and compile model for Hailo.") -parser.add_argument("--model", choices=["1", "2"], required=True, help="1 = ResNet50, 2 = Xception") -parser.add_argument("--calib", default="--use-random-calib-set", help="Calibration data path or '--use-random-calib-set'") -args = parser.parse_args() - -def get_onnx_io_names(onnx_path): - model = onnx.load(onnx_path) - input_name = model.graph.input[0].name - output_name = model.graph.output[0].name - return input_name, output_name - -# Paths -if args.model == "1": - base_model_path = "../models/ResNet50" - model = "pokedex_ResNet50" -elif args.model == "2": - base_model_path = "../models/Xception" - model = "pokedex_Xception" - -onnx_path = os.path.join(base_model_path, f"pokedex_{model}.onnx") -har_path = os.path.join(base_model_path, f"{model}.har") -optimized_har_path = os.path.join(base_model_path, f"{model}_optimized.har") -hef_path = os.path.join(base_model_path, f"{model}.hef") - -# Node names -start_node, end_node = get_onnx_io_names(onnx_path) -print(f"-- Using start_node: {start_node}, end_node: {end_node}") - -# Step 1: Parse -print(f"-- Parsing {onnx_path}...") -subprocess.run([ - "hailo", "parser", "onnx", onnx_path, - "--start-node-names", start_node, - "--end-nodes-names", end_node, - "--hw-arch", "hailo8l" -]) - -# Step 2: Optimize -print(f"-- Optimizing to {optimized_har_path}...") -optimize_cmd = [ - "hailo", "optimize", har_path, - "--hw-arch", "hailo8l", - "--use-random-calib-set" -] -subprocess.run(optimize_cmd) - -# Step 3: Compile -print(f"-- Compiling to {hef_path}...") -subprocess.run([ - "hailo", "compiler", optimized_har_path, - "--hw-arch", "hailo8l", - "--performance", -]) -print("-- Done.") diff --git a/python/pokedex_test.py b/python/pokedex_test.py index 011ab2951b78163e7f483a2f91350766749eab0a..6a3b9668832f8856e6b3fd5ce29b68eff5a50c55 100644 --- a/python/pokedex_test.py +++ b/python/pokedex_test.py @@ -1,5 +1,5 @@ -from tensorflow import keras import tensorflow as tf +import keras import matplotlib.pyplot as plt import numpy as np import os @@ -7,74 +7,70 @@ import random import json import argparse -# --- WHAT ? --- -parser = argparse.ArgumentParser(description="WHAT ?!") +# --- Parse CLI arguments --- +parser = argparse.ArgumentParser(description="Test trained Pokémon model.") parser.add_argument("--model", choices=["1", "2"], required=True, help="1 = ResNet50, 2 = Xception") args = parser.parse_args() -# Paths +# --- Paths --- if args.model == "1": h5_path = "../models/ResNet50/pokedex_ResNet50.h5" json_path = "../models/ResNet50/class_names.json" - size = (224,224) + size = (224, 224) elif args.model == "2": h5_path = "../models/Xception/pokedex_Xception.h5" json_path = "../models/Xception/class_names.json" - size = (256,256) + size = (256, 256) + +base_path = "../Combined_Dataset" -# --- Load class names from JSON --- +# --- Load class names --- with open(json_path, "r") as f: class_names = json.load(f) - class_names = [class_names[i] for i in range(len(class_names))] # convert to list + class_names = [class_names[i] for i in range(len(class_names))] -# --- Load trained model --- -model = keras.models.load_model(h5_path) +# --- Load model (NO COMPILE to avoid 'reduction=auto' bug) --- +model = keras.models.load_model(h5_path, compile=False) -# --- Paths --- -base_path = "../Combined_Dataset" - -# --- Prepare 2x2 Plot --- +# --- 2x2 Random Image Test --- plt.figure(figsize=(10, 10)) for i in range(4): - # Pick random class and image - random_class = random.choice(class_names) - class_folder = os.path.join(base_path, random_class) - random_image = random.choice([ + # Pick random Pokémon class & image + true_class = random.choice(class_names) + class_folder = os.path.join(base_path, true_class) + img_file = random.choice([ f for f in os.listdir(class_folder) if f.lower().endswith(('.png', '.jpg', '.jpeg')) ]) - img_path = os.path.join(class_folder, random_image) + img_path = os.path.join(class_folder, img_file) - # --- Load & Preprocess Image --- - img = keras.utils.load_img(img_path, target_size=size) # resize to match model input + # Load and preprocess image + img = keras.utils.load_img(img_path, target_size=size) img_array = keras.utils.img_to_array(img) - img_array = img_array / 255.0 # normalize if your model expects it - img_array = tf.expand_dims(img_array, 0) + img_array = tf.expand_dims(img_array, 0) # [1, height, width, 3] - # --- Predict --- + # Predict predictions = model.predict(img_array, verbose=0) - probabilities = tf.nn.softmax(predictions[0]) - predicted_class_index = np.argmax(probabilities) - predicted_label = class_names[predicted_class_index] - confidence = 100 * probabilities[predicted_class_index] - - top_5_indices = np.argsort(probabilities)[-5:][::-1] - print("\nTop 5 predictions:") - for idx in top_5_indices: - print(f"{class_names[idx]:<20}: {probabilities[idx]:.4f}") - + probabilities = tf.nn.softmax(predictions[0]).numpy() + predicted_index = np.argmax(probabilities) + predicted_label = class_names[predicted_index] + confidence = 100 * probabilities[predicted_index] + is_correct = predicted_label == true_class - # Compare with actual - is_correct = predicted_label == random_class + # Show top 5 + print(f"\n Image: {img_file} | True: {true_class}") + print("-- Top 5 predictions:") + for idx in np.argsort(probabilities)[-5:][::-1]: + print(f"{class_names[idx]:<20}: {probabilities[idx]*100:.2f}%") - # --- Plot --- + # Plot ax = plt.subplot(2, 2, i + 1) plt.imshow(img) plt.axis("off") plt.title( f"Pred: {predicted_label}\n" - f"True: {random_class}\n" + f"True: {true_class}\n" f"{'YES' if is_correct else 'NO'} | {confidence:.1f}%", fontsize=10 )