Rifqi AbdillahLutfi Hidayati
Published © GPL3+

BisindoMate - Indonesian Sign Language Translator

BisindoMate Empowering deaf individuals by providing accessible sign language interpretation

AdvancedFull instructions providedOver 1 day202

Things used in this project

Hardware components

AMD Ryzen AI PCs
×1
Webcam USB Universal
×1

Software apps and online services

Python
Keras Tensorflow
OpenCV
OpenCV
Vitis Unified Software Platform
AMD Vitis Unified Software Platform

Story

Read more

Code

TestHandLandmarks.py

Python
Code to create hand landmarks
import cv2
import mediapipe as mp

# Initialize MediaPipe hands module
mp_hands = mp.solutions.hands
hands = mp_hands.Hands(static_image_mode=False,
                       max_num_hands=2,
                       min_detection_confidence=0.5,
                       min_tracking_confidence=0.5)
mp_draw = mp.solutions.drawing_utils

# Open the webcam
cap = cv2.VideoCapture(0)

while cap.isOpened():
    success, image = cap.read()
    if not success:
        print("Ignoring empty camera frame.")
        continue

    # Convert the BGR image to RGB
    image_rgb = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)

    # Process the image and find hands
    results = hands.process(image_rgb)

    # Draw the hand annotations on the image
    if results.multi_hand_landmarks:
        for hand_landmarks in results.multi_hand_landmarks:
            mp_draw.draw_landmarks(image, hand_landmarks, mp_hands.HAND_CONNECTIONS)

    # Display the image
    cv2.imshow('Hand Tracking', image)

    # Exit on pressing 'q'
    if cv2.waitKey(5) & 0xFF == ord('q'):
        break

# Release the webcam and close the window
cap.release()
cv2.destroyAllWindows()

CreateModel.py

Python
Code to Create Model using Keras Tensorflow
import tensorflow as tf
from tensorflow.keras.models import Sequential
from tensorflow.keras import layers

# Define the number of output classes
num_classes = 4

# Create a Sequential model
model = Sequential([
    layers.experimental.preprocessing.Rescaling(1./255, input_shape=(150, 150, 3)),
    layers.Conv2D(32, 3, activation='relu'),
    layers.MaxPooling2D(pool_size=(2, 2)),
    layers.Conv2D(64, 3, activation='relu'),
    layers.MaxPooling2D(pool_size=(2, 2)),
    layers.Conv2D(128, 3, activation='relu'),
    layers.MaxPooling2D(pool_size=(2, 2)),
    layers.Flatten(),
    layers.Dense(128, activation='relu'),
    layers.Dropout(0.5),
    layers.Dense(num_classes)
])

# Compile the model with the Adam optimizer, sparse categorical cross-entropy loss, and accuracy metric
model.compile(optimizer='adam',
              loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
              metrics=['accuracy'])

# Define a custom callback to stop training when validation accuracy reaches above 99%
class myCallback(tf.keras.callbacks.Callback):
    def on_epoch_end(self, epoch, logs={}):
        if(logs.get('val_accuracy') > 0.99):
            print("\nAkurasi telah mencapai > 99%!")
            self.model.stop_training = True

# Instantiate the callback
callbacks = myCallback()

# Set the number of epochs
epochs = 5

# Train the model with training and validation datasets, including the custom callback
history = model.fit(
    train_ds,
    validation_data=val_ds,
    epochs=epochs,
    callbacks=[callbacks]
)

ConvertModel.py

Python
Code to convert model into ONNX
import tf2onnx

# Define the ONNX conversion
spec = (tf.TensorSpec((None, 150, 150, 3), tf.float32, name="input"),)
model_proto, _ = tf2onnx.convert.from_keras(model, input_signature=spec, opset=13)

output_path = "model.onnx"
with open(output_path, "wb") as f:
    f.write(model_proto.SerializeToString())

print(f"Model telah disimpan ke {output_path}")

TestModelONXX.py

Python
Quantization and Compilation for Deployment with Vitis AI
import os
import numpy as np
import onnxruntime as ort
from pathlib import Path
from tensorflow.keras.preprocessing import image

model = "model.onnx"

path = r'voe-4.0-win_amd64'
providers = ['VitisAIExecutionProvider']
cache_dir = Path(__file__).parent.resolve()
provider_options = [{
    'config_file': os.path.join('..', path, 'vaip_config.json'),
    'cacheDir': str(cache_dir),
    'cacheKey': 'modelcachekey_quick'
}]

try:
    session = ort.InferenceSession(model, providers=providers, provider_options=provider_options)
except Exception as e:
    print("Test Failed:", e)
    exit()

def preprocess_image(img_path):
    img = image.load_img(img_path, target_size=(150, 150))
    img_array = image.img_to_array(img)
    img_array = np.expand_dims(img_array, axis=0)  # Add batch dimension
    img_array = img_array / 255.0  # Rescale as done during training
    return img_array.astype(np.float32)

# Test with a random image from your dataset
test_image_path = 'C:/Users/rifqi/Downloads/Dataset Test/R/778_R_18.jpg'
input_data = preprocess_image(test_image_path)

try:
    outputs = session.run(None, {'input': input_data})
    print("Test Passed. Output:", outputs)
except Exception as e:
    print("Test Failed:", e)

Credits

Rifqi Abdillah

Rifqi Abdillah

11 projects • 10 followers
Lecturer in Informatics & Electronics Engineering. Passionate about Machine Learning, Edge Computing, Image Processing, and IoT.
Lutfi Hidayati

Lutfi Hidayati

1 project • 0 followers

Comments