Joel Laguna
Published © CC0

Raspberry Arcade Light gun tracking for MAME

I am using raspberry pi 4, pi cam with infrared attachments. Play MAME arcade Light gun games

IntermediateWork in progress2 hours2
Raspberry Arcade Light gun tracking for MAME

Things used in this project

Hardware components

Raspberry Pi 4 Model B
Raspberry Pi 4 Model B
×1
Camera Module
Raspberry Pi Camera Module
×1

Software apps and online services

Raspbian
Raspberry Pi Raspbian

Hand tools and fabrication machines

Multitool, Screwdriver
Multitool, Screwdriver

Story

Read more

Code

lightgun_cam_mouse.py

Python
Light gun cam mouse tracking, make sure to add a shinny tape spot on the bottom of your mouse
#!/usr/bin/env python3
import argparse
import json
import os
import time

import cv2
import numpy as np
from pynput.mouse import Controller as MouseController


def load_calibration(path):
    if not os.path.exists(path):
        return None
    with open(path, "r") as f:
        data = json.load(f)
    H = np.array(data["H"], dtype=np.float64)
    screen_w = int(data["screen_w"])
    screen_h = int(data["screen_h"])
    return H, screen_w, screen_h


def save_calibration(path, H, screen_w, screen_h):
    data = {"H": H.tolist(), "screen_w": int(screen_w), "screen_h": int(screen_h), "saved_at": time.time()}
    with open(path, "w") as f:
        json.dump(data, f, indent=2)


def apply_homography(H, x, y):
    pt = np.array([[[x, y]]], dtype=np.float64)
    mapped = cv2.perspectiveTransform(pt, H)[0][0]
    return float(mapped[0]), float(mapped[1])


def clamp(v, lo, hi):
    return max(lo, min(hi, v))


def find_bright_blob(frame_bgr, thresh=235, min_area=20):
    """
    Bright-spot tracking:
      - grayscale
      - blur
      - threshold
      - morphology open
      - largest contour centroid
    """
    gray = cv2.cvtColor(frame_bgr, cv2.COLOR_BGR2GRAY)
    gray = cv2.GaussianBlur(gray, (5, 5), 0)

    _, mask = cv2.threshold(gray, thresh, 255, cv2.THRESH_BINARY)

    kernel = np.ones((3, 3), np.uint8)
    mask = cv2.morphologyEx(mask, cv2.MORPH_OPEN, kernel, iterations=1)

    cnts, _ = cv2.findContours(mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
    if not cnts:
        return None, None, None, mask

    best = max(cnts, key=cv2.contourArea)
    area = cv2.contourArea(best)
    if area < min_area:
        return None, None, None, mask

    M = cv2.moments(best)
    if M["m00"] == 0:
        return None, None, None, mask

    cx = int(M["m10"] / M["m00"])
    cy = int(M["m01"] / M["m00"])
    return cx, cy, area, mask


def main():
    ap = argparse.ArgumentParser()
    ap.add_argument("--cam", type=int, default=0)
    ap.add_argument("--width", type=int, default=640)
    ap.add_argument("--height", type=int, default=480)
    ap.add_argument("--fps", type=int, default=60)

    ap.add_argument("--thresh", type=int, default=235, help="0-255 threshold; lower = easier to detect")
    ap.add_argument("--min-area", type=int, default=20)
    ap.add_argument("--smooth", type=float, default=0.25, help="0..1 smoothing; higher=snappier")

    ap.add_argument("--screen-w", type=int, default=1920)
    ap.add_argument("--screen-h", type=int, default=1080)
    ap.add_argument("--calib", default="calibration.json")

    ap.add_argument("--preview", action="store_true")
    ap.add_argument("--show-mask", action="store_true")
    args = ap.parse_args()

    cap = cv2.VideoCapture(args.cam, cv2.CAP_V4L2)
    cap.set(cv2.CAP_PROP_FRAME_WIDTH, args.width)
    cap.set(cv2.CAP_PROP_FRAME_HEIGHT, args.height)
    cap.set(cv2.CAP_PROP_FPS, args.fps)

    if not cap.isOpened():
        raise RuntimeError("Could not open camera. Try --cam 0/1 and ensure /dev/video0 works.")

    mouse = MouseController()

    H = None
    screen_w, screen_h = args.screen_w, args.screen_h
    loaded = load_calibration(args.calib)
    if loaded:
        H, screen_w, screen_h = loaded
        print(f"[OK] Loaded calibration {args.calib} ({screen_w}x{screen_h})")
    else:
        print("[INFO] No calibration loaded. Press 'c' to calibrate.")

    calibrating = False
    cam_points = {}  # 1..4
    smx = smy = None

    print("\nControls:")
    print("  c = start calibration (aim corner, press 1/2/3/4)")
    print("  s = save calibration")
    print("  r = reset calibration")
    print("  q / ESC = quit\n")

    while True:
        ok, frame = cap.read()
        if not ok:
            continue

        cx, cy, area, mask = find_bright_blob(frame, thresh=args.thresh, min_area=args.min_area)

        debug = frame.copy()
        if cx is not None:
            cv2.circle(debug, (cx, cy), 8, (0, 255, 0), 2)
            cv2.putText(debug, f"blob area={int(area)}", (10, 20),
                        cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 255, 0), 2)

        if H is not None and cx is not None:
            mx, my = apply_homography(H, cx, cy)
            mx = clamp(mx, 0, screen_w - 1)
            my = clamp(my, 0, screen_h - 1)

            if smx is None:
                smx, smy = mx, my
            else:
                a = args.smooth
                smx = (1 - a) * smx + a * mx
                smy = (1 - a) * smy + a * my

            mouse.position = (int(smx), int(smy))

            cv2.putText(debug, f"screen=({int(mx)},{int(my)})", (10, 45),
                        cv2.FONT_HERSHEY_SIMPLEX, 0.6, (255, 255, 255), 2)

        if calibrating:
            cv2.putText(debug, "CAL: aim corners TL/TR/BR/BL then press 1/2/3/4", (10, args.height - 15),
                        cv2.FONT_HERSHEY_SIMPLEX, 0.55, (0, 255, 255), 2)
            cv2.putText(debug, f"Captured: {sorted(cam_points.keys())}", (10, args.height - 40),
                        cv2.FONT_HERSHEY_SIMPLEX, 0.55, (0, 255, 255), 2)

        if args.preview:
            cv2.imshow("lightgun debug", debug)
            if args.show_mask:
                cv2.imshow("mask", mask)

        k = cv2.waitKey(1) & 0xFF
        if k in (27, ord('q')):
            break
        elif k == ord('c'):
            calibrating = True
            cam_points = {}
            H = None
            print("[CAL] Started. Press 1/2/3/4 for TL/TR/BR/BL.")
        elif k == ord('r'):
            calibrating = False
            cam_points = {}
            H = None
            print("[CAL] Reset.")
        elif k == ord('s'):
            if H is not None:
                save_calibration(args.calib, H, screen_w, screen_h)
                print(f"[OK] Saved {args.calib}")
            else:
                print("[WARN] No calibration to save.")

        if calibrating and cx is not None and k in (ord('1'), ord('2'), ord('3'), ord('4')):
            idx = int(chr(k))
            cam_points[idx] = (cx, cy)
            print(f"[CAL] Point {idx} captured at camera=({cx},{cy})")

            if all(i in cam_points for i in (1, 2, 3, 4)):
                # 1=top-left, 2=top-right, 3=bottom-right, 4=bottom-left
                src = np.array([cam_points[1], cam_points[2], cam_points[3], cam_points[4]], dtype=np.float32)
                dst = np.array([[0, 0],
                                [screen_w - 1, 0],
                                [screen_w - 1, screen_h - 1],
                                [0, screen_h - 1]], dtype=np.float32)
                H, _ = cv2.findHomography(src, dst, method=0)
                calibrating = False
                print("[OK] Calibration complete. Mouse should now follow marker. Press 's' to save.")

    cap.release()
    cv2.destroyAllWindows()


if __name__ == "__main__":
    main()

mjpeg_stream.py

Python
Script runs a video stream to port http://x.x.x.x/8000
from flask import Flask, Response
from picamera2 import Picamera2
import cv2
import time

app = Flask(__name__)

picam2 = Picamera2()
config = picam2.create_video_configuration(main={"format": "RGB888", "size": (640, 480)})
picam2.configure(config)
picam2.start()
time.sleep(0.5)

def gen():
    while True:
        frame = picam2.capture_array()                       # RGB
        frame = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR)       # BGR for OpenCV encode
        ok, jpg = cv2.imencode(".jpg", frame, [int(cv2.IMWRITE_JPEG_QUALITY), 80])
        if not ok:
            continue
        yield (b"--frame\r\n"
               b"Content-Type: image/jpeg\r\n\r\n" + jpg.tobytes() + b"\r\n")

@app.route("/stream.mjpg")
def stream():
    return Response(gen(), mimetype="multipart/x-mixed-replace; boundary=frame")

@app.route("/")
def index():
    return '<html><body><h3>Pi Camera Stream</h3><img src="/stream.mjpg"></body></html>'

if __name__ == "__main__":
    # 0.0.0.0 = listen on all interfaces so other devices can connect
    app.run(host="0.0.0.0", port=8000, threaded=True)
    

Credits

Joel Laguna
3 projects • 2 followers
My name is Joel Laguna. I am a game programmer. Game programming has been a long time passion. I use Python.

Comments