Ashwini kumar sinha
Created July 30, 2024

AMD AI X-RAY Machine

Transform healthcare with our AI-powered AMD X-Ray machine. Real-time analysis detects fractures and tumors instantly.

34

Things used in this project

Hardware components

AMD Radeon™ Pro W7900 GPU
AMD Radeon™ Pro W7900 GPU
×1
Minisforum Venus UM790 Pro with AMD Ryzen™ 9
Minisforum Venus UM790 Pro with AMD Ryzen™ 9
×1

Software apps and online services

AMD ROCm™ Software
AMD ROCm™ Software

Hand tools and fabrication machines

X-Ray Machin

Story

Read more

Code

Training code

Python
import tensorflow as tf
from tensorflow.keras import layers, models
from tensorflow.keras.datasets import cifar10
from tensorflow.keras.utils import to_categorical

# Check if AMD ROCm is available
physical_devices = tf.config.experimental.list_physical_devices('GPU')
if physical_devices:
    try:
        tf.config.experimental.set_memory_growth(physical_devices[0], True)
        logical_gpus = tf.config.experimental.list_logical_devices('GPU')
        print(len(physical_devices), "Physical GPUs,", len(logical_gpus), "Logical GPUs")
    except RuntimeError as e:
        print(e)

# Load CIFAR-10 dataset
(train_images, train_labels), (test_images, test_labels) = cifar10.load_data()

# Normalize pixel values to be between 0 and 1
train_images, test_images = train_images / 255.0, test_images / 255.0

# One-hot encode the labels
train_labels = to_categorical(train_labels)
test_labels = to_categorical(test_labels)

# Define a simple CNN model
model = models.Sequential([
    layers.Conv2D(32, (3, 3), activation='relu', input_shape=(32, 32, 3)),
    layers.MaxPooling2D((2, 2)),
    layers.Conv2D(64, (3, 3), activation='relu'),
    layers.MaxPooling2D((2, 2)),
    layers.Conv2D(64, (3, 3), activation='relu'),
    layers.Flatten(),
    layers.Dense(64, activation='relu'),
    layers.Dense(10, activation='softmax')
])

# Compile the model
model.compile(optimizer='adam',
              loss='categorical_crossentropy',
              metrics=['accuracy'])

# Train the model
model.fit(train_images, train_labels, epochs=10, batch_size=64, validation_data=(test_images, test_labels))

# Save the trained model
model.save('keras_model.h5')

print('Finished Training and saved model as keras_model.h5')

test_train_model.py

Python
import tensorflow as tf
from tensorflow.keras import layers, models
from tensorflow.keras.datasets import cifar10
from tensorflow.keras.utils import to_categorical
import numpy as np

# Check if AMD ROCm is available
physical_devices = tf.config.experimental.list_physical_devices('GPU')
if physical_devices:
    try:
        tf.config.experimental.set_memory_growth(physical_devices[0], True)
        logical_gpus = tf.config.experimental.list_logical_devices('GPU')
        print(len(physical_devices), "Physical GPUs,", len(logical_gpus), "Logical GPUs")
    except RuntimeError as e:
        print(e)

# Load CIFAR-10 dataset
(train_images, train_labels), (test_images, test_labels) = cifar10.load_data()

# Normalize pixel values to be between 0 and 1
train_images, test_images = train_images / 255.0, test_images / 255.0

# One-hot encode the labels
train_labels = to_categorical(train_labels)
test_labels = to_categorical(test_labels)

# Define a simple CNN model
model = models.Sequential([
    layers.Conv2D(32, (3, 3), activation='relu', input_shape=(32, 32, 3)),
    layers.MaxPooling2D((2, 2)),
    layers.Conv2D(64, (3, 3), activation='relu'),
    layers.MaxPooling2D((2, 2)),
    layers.Conv2D(64, (3, 3), activation='relu'),
    layers.Flatten(),
    layers.Dense(64, activation='relu'),
    layers.Dense(10, activation='softmax')
])

# Compile the model
model.compile(optimizer='adam',
              loss='categorical_crossentropy',
              metrics=['accuracy'])

# Train the model
model.fit(train_images, train_labels, epochs=10, batch_size=64, validation_data=(test_images, test_labels))

# Save the trained model
model.save('keras_model.h5')

print('Finished Training and saved model as keras_model.h5')

# Load the model
loaded_model = models.load_model('keras_model.h5')

# Evaluate the model on test data
test_loss, test_acc = loaded_model.evaluate(test_images, test_labels, verbose=2)
print('\nTest accuracy:', test_acc)

# Make predictions on test data
predictions = loaded_model.predict(test_images)

# Print the first 5 predictions and corresponding labels
for i in range(5):
    print(f"Prediction: {np.argmax(predictions[i])}, Actual: {np.argmax(test_labels[i])}")

testing_Xraydetecion model.py

Python
import tensorflow as tf
from tensorflow.keras.models import load_model
import numpy as np
import cv2

# Load the trained model
model = load_model('keras_model.h5')

# Function to preprocess the video frame
def preprocess_frame(frame):
    frame = cv2.resize(frame, (32, 32))  # Resize to match model's input size
    frame = frame.astype('float32') / 255.0  # Normalize to range [0, 1]
    frame = np.expand_dims(frame, axis=0)  # Add batch dimension
    return frame

# Function to get predictions
def get_prediction(frame):
    processed_frame = preprocess_frame(frame)
    prediction = model.predict(processed_frame)
    return prediction

# Open a connection to the camera
cap = cv2.VideoCapture(0)

if not cap.isOpened():
    print("Error: Could not open camera.")
    exit()

while True:
    ret, frame = cap.read()

    if not ret:
        print("Error: Could not read frame.")
        break

    # Get prediction for the current frame
    prediction = get_prediction(frame)
    predicted_class = np.argmax(prediction)

    # Display the prediction on the frame
    cv2.putText(frame, f'Prediction: {predicted_class}', (10, 30), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 0, 0), 2, cv2.LINE_AA)

    # Show the frame
    cv2.imshow('Real-Time Prediction', frame)

    # Break the loop on 'q' key press
    if cv2.waitKey(1) & 0xFF == ord('q'):
        break

# Release the camera and close all OpenCV windows
cap.release()
cv2.destroyAllWindows()

testingXraymodel2.py

Python
import tensorflow as tf
from tensorflow.keras.models import load_model
import numpy as np
import cv2

# Load the trained model
model = load_model('keras_model.h5')

# Function to preprocess the video frame
def preprocess_frame(frame):
    frame = cv2.resize(frame, (32, 32))  # Resize to match model's input size
    frame = frame.astype('float32') / 255.0  # Normalize to range [0, 1]
    frame = np.expand_dims(frame, axis=0)  # Add batch dimension
    return frame

# Function to get predictions
def get_prediction(frame):
    processed_frame = preprocess_frame(frame)
    prediction = model.predict(processed_frame)
    return prediction

# Open a connection to the camera
cap = cv2.VideoCapture(0)

if not cap.isOpened():
    print("Error: Could not open camera.")
    exit()

while True:
    ret, frame = cap.read()

    if not ret:
        print("Error: Could not read frame.")
        break

    # Get prediction for the current frame
    prediction = get_prediction(frame)
    predicted_class = np.argmax(prediction)
    fracture_probability = prediction[0][predicted_class] * 100

    # Display the prediction on the frame
    if predicted_class == 1:
        result_text = f'Fracture: {fracture_probability:.2f}%'
    else:
        result_text = f'No Fracture: {fracture_probability:.2f}%'

    cv2.putText(frame, result_text, (10, 30), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 0, 0), 2, cv2.LINE_AA)

    # Show the frame
    cv2.imshow('Real-Time X-Ray Analysis', frame)

    # Break the loop on 'q' key press
    if cv2.waitKey(1) & 0xFF == ord('q'):
        break

# Release the camera and close all OpenCV windows
cap.release()
cv2.destroyAllWindows()
S

Credits

Ashwini kumar sinha
34 projects • 80 followers
Ashwini kumar sinha a Robotic lover and electronics hobbyist. Works at EFY-I, Founder, CTO Buttonboard LLC
Thanks to .kaggle.com.

Comments