Guillermo Perez Guillen
Created July 29, 2020 © CC BY

TESLA Robot

Cost-effective robotic solution for surface sanitization - Amazon Alexa / Neural Networks / PID Controller / Tesla coil / UV lamp / OpenCV

ExpertFull instructions provided8 days237

Things used in this project

Hardware components

Echo Dot
Amazon Alexa Echo Dot
×1
Raspberry Pi 3 Model B
Raspberry Pi 3 Model B
×1
Camera Module V2
Raspberry Pi Camera Module V2
×1
Arduino UNO
Arduino UNO
×1
thingSoC ESP32 WiFi Module
thingSoC ESP32 WiFi Module
×1
Arduino Pro Mini 328 - 5V/16MHz
SparkFun Arduino Pro Mini 328 - 5V/16MHz
×1
Test Cable Assembly, USB to TTL Interface Cable
Test Cable Assembly, USB to TTL Interface Cable
×1
Ultrasonic Sensor - HC-SR04 (Generic)
Ultrasonic Sensor - HC-SR04 (Generic)
×1
Ultrasonic Sensor - SRF05
×1
IR distance sensor - GP2Y0A515K0F
×2
IR distance sensor - GP2Y0A415K0F
×1
SparkFun Full-Bridge Motor Driver Breakout - L298N
SparkFun Full-Bridge Motor Driver Breakout - L298N
×1
4WD Mobile Platform for Robot Car
×1
Grove - Relay
Seeed Studio Grove - Relay
×1
UV lamp 6W
×1
Mini DIY Tesla Coil Kit
×1
Rechargeable Battery, 4.8 V
Rechargeable Battery, 4.8 V
×1
Rechargeable Battery, 7.2 V
Rechargeable Battery, 7.2 V
×1
SG90 Micro-servo motor
SG90 Micro-servo motor
×1

Software apps and online services

Arduino IDE
Arduino IDE
OpenCV
OpenCV
Python 3.7.3
Amazon Alexa App
FreeCAD
Ultimaker Cura

Hand tools and fabrication machines

3D Printer (generic)
3D Printer (generic)
Soldering iron (generic)
Soldering iron (generic)
Smartphone Android 9
Hot glue gun (generic)
Hot glue gun (generic)
Premium Female/Male Extension Jumper Wires, 40 x 6" (150mm)
Premium Female/Male Extension Jumper Wires, 40 x 6" (150mm)
Plastic cable ties
Aluminum

Story

Read more

Custom parts and enclosures

Custom Parts of TESLA Robot

STL files

Schematics

TESLA Robot version 1

Schematic diagram

TESLA Robot version 2

Schematic diagram

UV Meter

You can see the electrical connections of the UV meter in the image below

Code

esp32-wroom-32

Arduino
This code must be uploaded to the ESP32-WROOM-32 board.
SECTION 4. ECHO DOT & ESP32-WROOM-32
# AUTHOR: GUILLERMO PEREZ GUILLEN

#include <Arduino.h>
#include <NewPing.h> // SRFO4
#define ultrasonic_pin_1 4 // SRF04
#define ultrasonic_pin_2 25 // SRF05

const int UltrasonicPin = 2; // SRFO4 
const int MaxDistance = 200; // SRFO4

const unsigned int TRIG_PIN=27; //SRF05
const unsigned int ECHO_PIN=26; //SRF05


NewPing sonar(UltrasonicPin, UltrasonicPin, MaxDistance); // SRFO4

#ifdef ESP32
  #include <WiFi.h>
  #define RF_RECEIVER 13
  #define RELAY_PIN_1 12
  #define RELAY_PIN_2 14
#else
  #include <ESP8266WiFi.h>
  #define RF_RECEIVER 5
  #define RELAY_PIN_1 4
  #define RELAY_PIN_2 14
#endif
#include "fauxmoESP.h"

#include <RCSwitch.h>

#define SERIAL_BAUDRATE 115200

#define WIFI_SSID "XXXXXXXXXX"
#define WIFI_PASS "XXXXXXXXXX"

#define LAMP_1 "lamp one"
#define LAMP_2 "lamp two"

fauxmoESP fauxmo;

RCSwitch mySwitch = RCSwitch();

// Wi-Fi Connection
void wifiSetup() {
  // Set WIFI module to STA mode
  WiFi.mode(WIFI_STA);


  // Connected!
  Serial.printf("[WIFI] STATION Mode, SSID: %s, IP address: %s\n", WiFi.SSID().c_str(), WiFi.localIP().toString().c_str());
}

void setup() {
  pinMode(ultrasonic_pin_1, OUTPUT); // SRF04
  digitalWrite(ultrasonic_pin_1, LOW); // SRF04

  pinMode(ultrasonic_pin_2, OUTPUT); // SRF05
  digitalWrite(ultrasonic_pin_2, LOW); // SRF05    
  pinMode(TRIG_PIN, OUTPUT); // SRF05
  pinMode(ECHO_PIN, INPUT); // SRF05
  
  // Init serial port and clean garbage
  Serial.begin(SERIAL_BAUDRATE);
  Serial.println();


  // LED
  pinMode(RELAY_PIN_1, OUTPUT);
  digitalWrite(RELAY_PIN_1, LOW);

  pinMode(RELAY_PIN_2, OUTPUT);
  digitalWrite(RELAY_PIN_2, LOW);
  
  mySwitch.enableReceive(RF_RECEIVER);  // Receiver on interrupt 0 => that is pin #2

  // By default, fauxmoESP creates it's own webserver on the defined port
  // The TCP port must be 80 for gen3 devices (default is 1901)
  // This has to be done before the call to enable()
  fauxmo.createServer(true); // not needed, this is the default value
  fauxmo.setPort(80); // This is required for gen3 devices

  // You have to call enable(true) once you have a WiFi connection
  // You can enable or disable the library at any moment
  // Disabling it will prevent the devices from being discovered and switched
  fauxmo.enable(true);
  // You can use different ways to invoke alexa to modify the devices state:
  // "Alexa, turn lamp two on"

  // Add virtual devices
  fauxmo.addDevice(LAMP_1);
  fauxmo.addDevice(LAMP_2);

  fauxmo.onSetState([](unsigned char device_id, const char * device_name, bool state, unsigned char value) {
    // Callback when a command from Alexa is received. 
    // You can use device_id or device_name to choose the element to perform an action onto (relay, LED,...)
    // State is a boolean (ON/OFF) and value a number from 0 to 255 (if you say "set kitchen light to 50%" you will receive a 128 here).
    // Just remember not to delay too much here, this is a callback, exit as soon as possible.
    // If you have to do something more involved here set a flag and process it in your main loop.
        
    Serial.printf("[MAIN] Device #%d (%s) state: %s value: %d\n", device_id, device_name, state ? "ON" : "OFF", value);
    if ( (strcmp(device_name, LAMP_1) == 0) ) {
      // this just sets a variable that the main loop() does something about
      Serial.println("RELAY 1 switched by Alexa");
      //digitalWrite(RELAY_PIN_1, !digitalRead(RELAY_PIN_1));
      if (state) {
        digitalWrite(RELAY_PIN_1, HIGH);
      } else {
        digitalWrite(RELAY_PIN_1, LOW);
      }
    }
    if ( (strcmp(device_name, LAMP_2) == 0) ) {
      // this just sets a variable that the main loop() does something about
      Serial.println("RELAY 2 switched by Alexa");
      if (state) {
        digitalWrite(RELAY_PIN_2, HIGH);
      } else {
        digitalWrite(RELAY_PIN_2, LOW);
      }
    }
  });

}

void loop() {
  delay(25);
  int rf_sensor_left = sonar.ping_cm(); // SRFO4
  
  if (rf_sensor_left<30){digitalWrite(ultrasonic_pin_1, HIGH);} // SRFO4
  else {digitalWrite(ultrasonic_pin_1, LOW);} // SRFO4 


  const unsigned long duration= pulseIn(ECHO_PIN, HIGH); // SRFO5
  int rf_sensor_right = duration/29/2; // SRFO5

  if (rf_sensor_right<30){digitalWrite(ultrasonic_pin_2, HIGH);} // SRFO5
  else {digitalWrite(ultrasonic_pin_2, LOW);} // SRFO5 

  Serial.print("Distance1: ");
  Serial.println(rf_sensor_left);  
  Serial.print("Distance2: ");
  Serial.println(rf_sensor_right);  
  Serial.println("  ");
  
  // fauxmoESP uses an async TCP server but a sync UDP server
  // Therefore, we have to manually poll for UDP packets
  fauxmo.handle();

  static unsigned long last = millis();
  if (millis() - last > 5000) {
    last = millis();
    Serial.printf("[MAIN] Free heap: %d bytes\n", ESP.getFreeHeap());
  }
    
  if (mySwitch.available()) {    
    if (mySwitch.getReceivedValue()==6819768) {
      digitalWrite(RELAY_PIN_1, !digitalRead(RELAY_PIN_1));
    }
    if (mySwitch.getReceivedValue()==9463928) {
      digitalWrite(RELAY_PIN_2, !digitalRead(RELAY_PIN_2));     
    }
    delay(600);
    mySwitch.resetAvailable();
  }
}

graphic

Python
Code to graph the Cost function.
SECTION 5. NEURAL NETWORKS
import numpy as np

    def __init__(self, layers, activation='tanh'):
        if activation == 'sigmoid':
            self.activation = sigmoid
            self.activation_prime = sigmoid_derivada
        elif activation == 'tanh':
            self.activation = tanh
            self.activation_prime = tanh_derivada

        # Assign random values to input layer and hidden layer
        for i in range(1, len(layers) - 1):
            r = 2*np.random.random((layers[i-1] + 1, layers[i] + 1)) -1
            self.weights.append(r)
        # Assigned random to output layer
        r = 2*np.random.random( (layers[i] + 1, layers[i+1])) - 1
        self.weights.append(r)

    def fit(self, X, y, learning_rate=0.2, epochs=100000):
        # I add column of ones to the X inputs. With this we add the Bias unit to the input layer
        ones = np.atleast_2d(np.ones(X.shape[0]))
        X = np.concatenate((ones.T, X), axis=1)
        
        for k in range(epochs):
            i = np.random.randint(X.shape[0])
            a = [X[i]]

            for l in range(len(self.weights)):
                    dot_value = np.dot(a[l], self.weights[l])
                    activation = self.activation(dot_value)
                    a.append(activation)
            #Calculate the difference in the output layer and the value obtained
            error = y[i] - a[-1]
            deltas = [error * self.activation_prime(a[-1])]
            
            # We start in the second layer until the last one (A layer before the output one)
            for l in range(len(a) - 2, 0, -1): 
                deltas.append(deltas[-1].dot(self.weights[l].T)*self.activation_prime(a[l]))
            self.deltas.append(deltas)

            # Backpropagation
            # 1. Multiply the output delta with the input activations to obtain the weight gradient.             
            # 2. Updated the weight by subtracting a percentage of the gradient
            for i in range(len(self.weights)):
                layer = np.atleast_2d(a[i])
                delta = np.atleast_2d(deltas[i])
                self.weights[i] += learning_rate * layer.T.dot(delta)

            if k % 10000 == 0: print('epochs:', k)

    def predict(self, x): 
        ones = np.atleast_2d(np.ones(x.shape[0]))
        a = np.concatenate((np.ones(1).T, np.array(x)), axis=0)
        for l in range(0, len(self.weights)):
            a = self.activation(np.dot(a, self.weights[l]))
        return a

    def print_weights(self):
        print("LIST OF CONNECTION WEIGHTS")
        for i in range(len(self.weights)):
            print(self.weights[i])

    def get_weights(self):
        return self.weights
    
    def get_deltas(self):
        return self.deltas

# When creating the network, we can choose between using the sigmoid or tanh function
def sigmoid(x):
    return 1.0/(1.0 + np.exp(-x))

def sigmoid_derivada(x):
    return sigmoid(x)*(1.0-sigmoid(x))


def tanh_derivada(x):
    return 1.0 - x**2

########## CAR NETWORK

nn = NeuralNetwork([6,3,4],activation ='tanh')
X = np.array([[0,0,0,0,0,0],   
              [0,0,0,0,0,1],   
              [0,0,0,0,1,0],   
              [0,0,0,0,1,1],   
              [0,0,0,1,0,0],   
              [0,0,0,1,0,1],   
              [0,0,0,1,1,0],   
              [0,0,0,1,1,1],   
              [0,0,1,0,0,0],  
              [0,0,1,0,0,1],   
              [0,0,1,0,1,1],   
              [0,0,1,1,0,0],   
              [0,0,1,1,0,1],   
              [0,0,1,1,1,1],   
              [0,1,0,0,0,0],   
              [0,1,0,0,0,1],   
              [0,1,0,0,1,0],   
              [0,1,0,1,0,0],   
              [0,1,0,1,0,1],   
              [0,1,0,1,1,0],   
              [0,1,1,0,0,0],   
              [0,1,1,0,1,0],   
              [0,1,1,1,0,0],   
              [0,1,1,1,1,0],   
              [1,0,0,0,0,0],   
              [1,0,0,0,0,1],   
              [1,0,0,0,1,0],   
              [1,0,0,0,1,1],   
              [1,0,0,1,0,0],   
              [1,0,0,1,0,1],   
              [1,0,0,1,1,0],   
              [1,0,0,1,1,1],   
              [1,0,1,0,0,0],   
              [1,0,1,0,0,1],   
              [1,0,1,0,1,1],   
              [1,0,1,1,0,0],   
              [1,0,1,1,0,1],   
              [1,0,1,1,1,1],   
              [1,1,0,0,0,0],   
              [1,1,0,0,0,1],   
              [1,1,0,0,1,0],   
              [1,1,0,1,0,0],   
              [1,1,0,1,0,1],   
              [1,1,0,1,1,0],   
              [1,1,1,0,0,0],   
              [1,1,1,0,1,0],   
              [1,1,1,1,0,0],   
              [1,1,1,1,1,0],   
             ])
# the outputs correspond to starting (or not) the motors
y = np.array([[0,0,0,0], # stop
              [0,0,0,0], # stop 
              [0,0,0,0], # stop
              [0,0,0,0], # stop
              [0,0,0,0], # stop 
              [0,0,0,0], # stop
              [0,0,0,0], # stop 
              [0,0,0,0], # stop 
              [0,0,0,0], # stop 
              [0,0,0,0], # stop 
              [0,0,0,0], # stop 
              [0,0,0,0], # stop
              [0,0,0,0], # stop 
              [0,0,0,0], # stop              
              [0,0,0,0], # stop 
              [0,0,0,0], # stop 
              [0,0,0,0], # stop
              [0,0,0,0], # stop
              [0,0,0,0], # stop 
              [0,0,0,0], # stop 
              [0,0,0,0], # stop 
              [0,0,0,0], # stop 
              [0,0,0,0], # stop 
              [0,0,0,0], # stop                
              [1,0,1,0], # forward 
              [1,0,1,0], # forward 
              [0,1,1,0], # turn-left
              [0,1,1,0], # turn-left 
              [0,1,0,1], # back
              [0,1,1,0], # turn-left 
              [0,1,1,0], # turn-left 
              [0,1,1,0], # turn-left 
              [1,0,0,1], # turn-right 
              [0,1,1,0], # turn-left
              [0,1,1,0], # turn-left
              [1,0,0,1], # turn-right
              [0,1,1,0], # turn-left              
              [1,0,0,1], # turn-right 
              [1,0,1,0], # forward 
              [1,0,1,0], # forward
              [1,0,0,1], # turn-right 
              [1,0,0,1], # turn-right 
              [0,1,0,1], # back 
              [1,0,0,1], # turn-right 
              [1,0,0,1], # turn-right
              [1,0,0,1], # turn-right
              [1,0,0,1], # turn-right              
              [1,0,0,1], # turn-right            
             ])
nn.fit(X, y, learning_rate=0.03,epochs=550001)
 
def valNN(x):
    return (int)(abs(round(x)))
 
index=0
for e in X:
    prediccion = nn.predict(e)
    print("X:",e,"expected:",y[index],"obtained:", valNN(prediccion[0]),valNN(prediccion[1]),valNN(prediccion[2]),valNN(prediccion[3]))
    index=index+1

generate-arduino-code

Python
We generate the Arduino code: Hidden weights and Output weights.
SECTION 5. NEURAL NETWORKS
import numpy as np

    def __init__(self, layers, activation='tanh'):
        if activation == 'sigmoid':
            self.activation = sigmoid
            self.activation_prime = sigmoid_derivada
        elif activation == 'tanh':
            self.activation = tanh
            self.activation_prime = tanh_derivada

        # Initialize the weights
        self.weights = []
        self.deltas = []
        # Assign random values to input layer and hidden layer
        for i in range(1, len(layers) - 1):
            r = 2*np.random.random((layers[i-1] + 1, layers[i] + 1)) -1
            self.weights.append(r)

    def fit(self, X, y, learning_rate=0.2, epochs=100000):
        # I add column of ones to the X inputs. With this we add the Bias unit to the input layer
        ones = np.atleast_2d(np.ones(X.shape[0]))
        X = np.concatenate((ones.T, X), axis=1)
        
            for l in range(len(self.weights)):
                    dot_value = np.dot(a[l], self.weights[l])
                    activation = self.activation(dot_value)
                    a.append(activation)
            #Calculate the difference in the output layer and the value obtained
            error = y[i] - a[-1]
            deltas = [error * self.activation_prime(a[-1])]
            
            # We start in the second layer until the last one (A layer before the output one)
            for l in range(len(a) - 2, 0, -1): 
                deltas.append(deltas[-1].dot(self.weights[l].T)*self.activation_prime(a[l]))
            self.deltas.append(deltas)

            # Backpropagation
            # 1. Multiply the output delta with the input activations to obtain the weight gradient.             
            # 2. Updated the weight by subtracting a percentage of the gradient
            for i in range(len(self.weights)):
                layer = np.atleast_2d(a[i])
                delta = np.atleast_2d(deltas[i])
                self.weights[i] += learning_rate * layer.T.dot(delta)

            if k % 10000 == 0: print('epochs:', k)

    def predict(self, x): 
        ones = np.atleast_2d(np.ones(x.shape[0]))
        a = np.concatenate((np.ones(1).T, np.array(x)), axis=0)
        for l in range(0, len(self.weights)):
            a = self.activation(np.dot(a, self.weights[l]))
        return a

    def print_weights(self):
        print("LIST OF CONNECTION WEIGHTS")
        for i in range(len(self.weights)):
            print(self.weights[i])

    def get_weights(self):
        return self.weights
    
# When creating the network, we can choose between using the sigmoid or tanh function
def sigmoid(x):
    return 1.0/(1.0 + np.exp(-x))

def sigmoid_derivada(x):
    return sigmoid(x)*(1.0-sigmoid(x))

def tanh(x):
    return np.tanh(x)

########## CAR NETWORK

nn = NeuralNetwork([6,3,4],activation ='tanh')
X = np.array([[0,0,0,0,0,0],   
              [0,0,0,0,0,1],   
              [0,0,0,0,1,0],   
              [0,0,0,0,1,1],   
              [0,0,0,1,0,0],   
              [0,0,0,1,0,1],   
              [0,0,0,1,1,0],   
              [0,0,0,1,1,1],   
              [0,0,1,0,0,0],  
              [0,0,1,0,0,1],   
              [0,0,1,0,1,1],   
              [0,0,1,1,0,0],   
              [0,0,1,1,0,1],   
              [0,0,1,1,1,1],   
              [0,1,0,0,0,0],   
              [0,1,0,0,0,1],   
              [0,1,0,0,1,0],   
              [0,1,0,1,0,0],   
              [0,1,0,1,0,1],   
              [0,1,0,1,1,0],   
              [0,1,1,0,0,0],   
              [0,1,1,0,1,0],   
              [0,1,1,1,0,0],   
              [0,1,1,1,1,0],   
              [1,0,0,0,0,0],   
              [1,0,0,0,0,1],   
              [1,0,0,0,1,0],   
              [1,0,0,0,1,1],   
              [1,0,0,1,0,0],   
              [1,0,0,1,0,1],   
              [1,0,0,1,1,0],   
              [1,0,0,1,1,1],   
              [1,0,1,0,0,0],   
              [1,0,1,0,0,1],   
              [1,0,1,0,1,1],   
              [1,0,1,1,0,0],   
              [1,0,1,1,0,1],   
              [1,0,1,1,1,1],   
              [1,1,0,0,0,0],   
              [1,1,0,0,0,1],   
              [1,1,0,0,1,0],   
              [1,1,0,1,0,0],   
              [1,1,0,1,0,1],   
              [1,1,0,1,1,0],   
              [1,1,1,0,0,0],   
              [1,1,1,0,1,0],   
              [1,1,1,1,0,0],   
              [1,1,1,1,1,0],   
             ])
# the outputs correspond to starting (or not) the motors
y = np.array([[0,0,0,0], # stop
              [0,0,0,0], # stop 
              [0,0,0,0], # stop
              [0,0,0,0], # stop
              [0,0,0,0], # stop 
              [0,0,0,0], # stop
              [0,0,0,0], # stop 
              [0,0,0,0], # stop 
              [0,0,0,0], # stop 
              [0,0,0,0], # stop 
              [0,0,0,0], # stop 
              [0,0,0,0], # stop
              [0,0,0,0], # stop 
              [0,0,0,0], # stop              
              [0,0,0,0], # stop 
              [0,0,0,0], # stop 
              [0,0,0,0], # stop
              [0,0,0,0], # stop
              [0,0,0,0], # stop 
              [0,0,0,0], # stop 
              [0,0,0,0], # stop 
              [0,0,0,0], # stop 
              [0,0,0,0], # stop 
              [0,0,0,0], # stop                
              [1,0,1,0], # forward 
              [1,0,1,0], # forward 
              [0,1,1,0], # turn-left
              [0,1,1,0], # turn-left 
              [0,1,0,1], # back
              [0,1,1,0], # turn-left 
              [0,1,1,0], # turn-left 
              [0,1,1,0], # turn-left 
              [1,0,0,1], # turn-right 
              [0,1,1,0], # turn-left
              [0,1,1,0], # turn-left
              [1,0,0,1], # turn-right
              [0,1,1,0], # turn-left              
              [1,0,0,1], # turn-right 
              [1,0,1,0], # forward 
              [1,0,1,0], # forward
              [1,0,0,1], # turn-right 
              [1,0,0,1], # turn-right 
              [0,1,0,1], # back 
              [1,0,0,1], # turn-right 
              [1,0,0,1], # turn-right
              [1,0,0,1], # turn-right
              [1,0,0,1], # turn-right              
              [1,0,0,1], # turn-right            
             ])
nn.fit(X, y, learning_rate=0.03,epochs=550001)
 
index=0
for e in X:
    prediccion = nn.predict(e)
    print("X:",e,"expected:",y[index],"obtained:", valNN(prediccion[0]),valNN(prediccion[1]),valNN(prediccion[2]),valNN(prediccion[3]))
    index=index+1


########## WE GENERATE THE ARDUINO CODE
def to_str(name, W):
    s = str(W.tolist()).replace('[', '{').replace(']', '}')
    return 'float '+name+'['+str(W.shape[0])+']['+str(W.shape[1])+'] = ' + s + ';'

print('// Replace these lines in your arduino code:')
print('// float HiddenWeights ...')
print('// float OutputWeights ...')
print('// With trained weights.')
print('\n')
print(to_str('HiddenWeights', pesos[0]))
print(to_str('OutputWeights', pesos[1]))

arduino_pro_mini

Arduino
SECTION 11. OPENCV
#include <Servo.h>
int data_x = 0;
int data_y = 0;
int data[1];
Servo myservo_x;
Servo myservo_y;// create servo object to control a servo

void setup() {
  Serial.begin(9600);
  myservo_x.attach(9);  // attaches the servo on pin 9 to the servo object
  myservo_y.attach(10);
  myservo_x.write(90);
  myservo_y.write(90);
}

void loop() {
  while (Serial.available() >= 2) {
    for (int i = 0; i < 2; i++) {
      data[i] = Serial.read();
    }

    myservo_x.write(data[0]);
    myservo_y.write(data[1]);

  }
  
}

Credits

Guillermo Perez Guillen

Guillermo Perez Guillen

43 projects • 49 followers
Electronics and Communications Engineer (ECE): Ten awards in Hackster / Finalist in 2021 Hackaday Prize / Two runner up winner in element14

Comments