Goran Vuksic
Published © MIT

BD-1: vision AI powered droid

Star Wars BD-1 droid powered by Raspberry Pi and vision AI, assembled from LEGO bricks.

IntermediateFull instructions providedOver 3 days10,365
BD-1: vision AI powered droid

Things used in this project

Hardware components

Raspberry Pi 4 Model B
Raspberry Pi 4 Model B
×1
LEGO Star Wars 75335 BD-1
×2
Max7219 8x32 dot matrix LED display
×1
M5Stack Servo Kit 180° Brick-compatible
×1
Webcam Aukey PC-W1 1080p 30fps
×1

Software apps and online services

Raspbian
Raspberry Pi Raspbian
Microsoft Azure
Microsoft Azure

Story

Read more

Schematics

How to connect Max7219 and M5Stack servo

Code

Max7219 controller

Python
# BD1 LEGO Max7219 controller
# Author: Goran Vuksic

import time
import argparse
import random
import numpy as np

from luma.led_matrix.device import max7219
from luma.core.interface.serial import spi, noop
from luma.core.render import canvas

def bd1(n, block_orientation, rotate, inreverse):

    # create matrix device
    serial = spi(port=0, device=0, gpio=noop())
    device = max7219(serial,
                     cascaded=n,
                     block_orientation=block_orientation,
                     rotate=rotate,
                     blocks_arranged_in_reverse_order=inreverse)

    # set initial intensity
    intensity = 16
    device.contrast(intensity)

    # initial pattern
    matrixbd1 = [
    [[1,0,1,0,1,1,0,1],[1,1,0,1,1,1,0,1],[1,1,1,0,0,1,0,1],[0,1,1,0,1,0,1,1]],
    [[0,1,1,0,1,0,1,1],[0,1,1,1,0,1,1,0],[0,1,1,1,1,1,0,1],[1,1,0,1,1,0,1,1]],
    [[1,1,0,1,1,1,0,1],[1,1,0,1,1,1,0,1],[1,1,0,1,1,0,1,1],[1,1,0,1,0,1,1,0]],
    [[1,1,1,0,1,1,1,0],[1,1,1,0,1,0,1,1],[1,0,1,1,0,1,1,1],[1,1,1,0,1,1,0,1]],
    [[1,0,0,1,1,1,0,1],[1,1,0,1,1,0,1,1],[0,1,1,1,1,0,1,1],[1,1,0,1,1,0,1,1]],
    [[0,1,1,0,1,0,1,1],[1,1,1,1,1,0,1,1],[1,1,1,0,1,1,1,0],[1,0,1,1,1,1,1,1]],
    [[1,1,0,1,1,1,0,1],[1,1,1,0,1,1,0,1],[1,0,1,1,0,1,0,1],[1,1,0,1,1,0,1,0]],
    [[1,1,0,1,0,1,1,1],[1,0,1,1,1,1,0,1],[1,1,0,0,1,1,0,1],[0,1,1,0,0,1,1,1]]
    ]

    # main loop
    while True:
        # turn on lights
        with canvas(device) as draw:
            for i in range (4):
                for j in range (8):
                    for k in range (8):
                        if matrixbd1[j][i][k] == 1:
                            draw.point((k,j + 24 - 8*i), fill="white")

        # sleep
        time.sleep(0.2)
        
        # change intensity
        intensity = random.randint(1, 8) * 2
        device.contrast(intensity)
        
        # shift matrix
        for a in range (8):
            for b in range (4):
                if a % 2 == 0:
                    matrixbd1[a][b] = np.roll(matrixbd1[a][b], -1)
                else:
                    matrixbd1[a][b] = np.roll(matrixbd1[a][b], 1)
        for a in range (8):
            if a % 2 == 0:
                templastled = matrixbd1[a][0][7]
            else:
                templastled = matrixbd1[a][3][0]
            if a % 2 == 0:
                for b in range (3):
                    matrixbd1[a][b][7] = matrixbd1[a][b + 1][7]
            else:
                for b in range (3, 0, -1):
                    matrixbd1[a][b][0] = matrixbd1[a][b - 1][0]
            if a % 2 == 0:
                matrixbd1[a][b + 1][7] = templastled
            else:
                matrixbd1[a][0][0] = templastled
        
               
        
if __name__ == "__main__":
    parser = argparse.ArgumentParser(description='bd1_max7219 arguments',
        formatter_class=argparse.ArgumentDefaultsHelpFormatter)

    parser.add_argument('--cascaded', '-n', type=int, default=4, help='Number of cascaded MAX7219 LED matrices')
    parser.add_argument('--block-orientation', type=int, default=0, choices=[0, 90, -90], help='Corrects block orientation when wired vertically')
    parser.add_argument('--rotate', type=int, default=1, choices=[0, 1, 2, 3], help='Rotate display 0=0°, 1=90°, 2=180°, 3=270°')
    parser.add_argument('--reverse-order', type=bool, default=False, help='Set to true if blocks are in reverse order')

    args = parser.parse_args()

    try:
        bd1(args.cascaded, args.block_orientation, args.rotate, args.reverse_order)
    except KeyboardInterrupt:
        pass

Servo controller

Python
# BD1 LEGO servo M5Stack controller
# Author: Goran Vuksic

import RPi.GPIO as GPIO
from time import sleep

GPIO.setmode(GPIO.BOARD)

# servo motor is connected on pin 8
GPIO.setup(8, GPIO.OUT)

servo=GPIO.PWM(8, 50)
servo.start(0)
sleep(1)

# left (-90 deg position)
servo.ChangeDutyCycle(5)
sleep(1)

# neutral position
servo.ChangeDutyCycle(7.5)
sleep(1)

# right (+90 deg position)
servo.ChangeDutyCycle(10)
sleep(1)

# stop and cleanup
servo.stop()
GPIO.cleanup()

Vision controller

Python
# BD1 LEGO Controller
# Author: Goran Vuksic

import cv2
import RPi.GPIO as GPIO
from time import sleep
from azure.cognitiveservices.vision.customvision.prediction import CustomVisionPredictionClient
from msrest.authentication import ApiKeyCredentials

# credentials for Azure Custom Vision
credentials = ApiKeyCredentials(in_headers={"Prediction-key": "<PREDICTION_KEY>"})
predictor = CustomVisionPredictionClient("<ENDPOINT_URL>", credentials)

GPIO.setmode(GPIO.BOARD)

# servo motor is connected on pin 8
GPIO.setup(8, GPIO.OUT)

# start servo
servo=GPIO.PWM(8, 50)
servo.start(0)
sleep(1)
# position it in neutral position
servo.ChangeDutyCycle(7.5)
sleep(0.5)
servo.ChangeDutyCycle(0)
sleep(0.5)

# camera
camera = cv2.VideoCapture(0)
camera.set(cv2.CAP_PROP_FRAME_WIDTH, 320)
camera.set(cv2.CAP_PROP_FRAME_HEIGHT, 240)

# positions: 0 - left, 1 - middle, 2 - right
currentposition = 1

try:
    while True:
        # store image
        ret, image = camera.read()
        cv2.imwrite('capture.png', image)

        with open("capture.png", mode="rb") as captured_image:
            results = predictor.detect_image("<PROJECT_ID>", "<ITERATION_NAME>", captured_image)

        for prediction in results.predictions:
            if prediction.probability > 0.5:
                print (prediction.bounding_box)
                locationBD1 = prediction.bounding_box.left + prediction.bounding_box.width / 2
                if locationBD1 < 0.3:
                    if currentposition != 0:
                        servo.ChangeDutyCycle(5)
                        currentposition = 0
                elif locationBD1 > 0.7:
                    if currentposition != 2:
                        servo.ChangeDutyCycle(10)
                        currentposition = 2
                elif locationBD1 > 0.3 and locationBD1 < 0.7:
                    if currentposition != 1:
                        servo.ChangeDutyCycle(7.5)
                        currentposition = 1
                sleep(0.5)
            servo.ChangeDutyCycle(0)
        # enable to store result as image with bounding box 
        #bbox = prediction.bounding_box
        #result_image = cv2.rectangle(image, (int(bbox.left * 640), int(bbox.top * 480)), (int((bbox.left + bbox.width) * 640), int((bbox.top + bbox.height) * 480)), (0, 255, 0), 3)
        #cv2.imwrite('result.png', result_image)

except KeyboardInterrupt:
    GPIO.cleanup()
    camera.release()

Github

Credits

Goran Vuksic

Goran Vuksic

4 projects • 25 followers
Engineering manager, Microsoft AI MVP, cofounder of syntheticAIdata, father, hitchhiker through the galaxy...

Comments