Kutluhan Aktar
Published © CC BY

IoT AI-driven Tree Disease Identifier w/ Edge Impulse & MMS

Detect tree diseases and get informed of the results via MMS to prevent them from spreading and harming forests, farms, and arable lands.

ExpertFull instructions provided4,658

Things used in this project

Hardware components

SenseCAP K1100 - The Sensor Prototype Kit with LoRa® and AI
Seeed Studio SenseCAP K1100 - The Sensor Prototype Kit with LoRa® and AI
×1
Wio Terminal
Seeed Studio Wio Terminal
×1
Grove - CO2 & Temperature & Humidity Sensor (SCD30)
Seeed Studio Grove - CO2 & Temperature & Humidity Sensor (SCD30)
×1
LattePanda 3 Delta
LattePanda 3 Delta
×1
7'' HDMI Display with Capacitive Touchscreen
DFRobot 7'' HDMI Display with Capacitive Touchscreen
×1
Creality CR-200B 3D Printer
×1
MicroSD Card (FAT32)
×1

Software apps and online services

Edge Impulse Studio
Edge Impulse Studio
Arduino IDE
Arduino IDE
Thonny
Fusion 360
Autodesk Fusion 360
Ultimaker Cura

Hand tools and fabrication machines

Hot glue gun (generic)
Hot glue gun (generic)

Story

Read more

Custom parts and enclosures

tree_disease_identifier_main_case.stl

tree_disease_identifier_back_cover.stl

tree_disease_identifier_camera.stl

Edge Impulse Model (Linux x86_64 Application)

Histogram.cpp

Histogram.h

Schematics

Wio Terminal

Code

tree_disease_detection_wio_controls.ino

Arduino
       /////////////////////////////////////////////
      //       IoT AI-driven Tree Disease        //
     //     Identifier w/ Edge Impulse & MMS    //
    //             ---------------             //
   //              (Wio Terminal)             //
  //             by Kutluhan Aktar           //
 //                                         //
/////////////////////////////////////////////

//
// Detect tree diseases and get informed of the results via MMS to prevent them from spreading and harming forests, farms, and arable lands.
//
// For more information:
// https://www.theamplituhedron.com/projects/IoT_AI_driven_Tree_Disease_Identifier_w_Edge_Impulse_MMS
//
//
// Connections
// Wio Terminal :
//                                Grove - VOC and eCO2 Gas Sensor
// SDA --------------------------- SDA
// SCL --------------------------- SCL
//                                Grove - CO2 & Temperature & Humidity Sensor
// SDA --------------------------- SDA
// SCL --------------------------- SCL
//                                Grove - Soil Moisture Sensor
// A0  --------------------------- SIG


// Include the required libraries.
#include <SPI.h>
#include <Seeed_FS.h>
#include "SD/Seeed_SD.h"
#include "TFT_eSPI.h"
#include "Histogram.h"
#include "RawImage.h"
#include "sensirion_common.h"
#include "sgp30.h"
#include "SCD30.h"
#include "RTC_SAMD51.h"
#include "DateTime.h"

// Define the built-in TFT screen and the histogram settings.
TFT_Histogram histogram=TFT_Histogram();
TFT_eSPI tft = TFT_eSPI();

// Initialize the File class and define the file name: 
File myFile;
const char* data_file = "environmental_factors.csv";

// Define the environmental factor thresholds to inform the user of potential tree disease risks.
int thresholds[3][6] = {
                        {800,38,42,435,350,1500},
                        {830,35,45,435,375,1650},
                        {950,42,60,600,485,1735}
                       };

// Define the required settings for the Grove - VOC and eCO2 Gas Sensor.
s16 err;
u32 ah = 0;
u16 scaled_ethanol_signal, scaled_h2_signal, tvoc_ppb, co2_eq_ppm;

// Define the Grove - Soil Moisture Sensor signal pin.
#define moisture_sensor A0

// Define the built-in RTC module.
RTC_SAMD51 rtc;

// Define the data holders.
#define DEBUG 0
float co2_value, temp_value, humd_value;
int tvoc_value, co2_eq_value, moisture_value;
int column_w = 40;
int background_color = TFT_BLACK;
int w = TFT_HEIGHT, h = 60, offset = 5;
long timer = 0;
long model_timer = 0;

void setup(){
  Serial.begin(115200);

  // Configurable Buttons:
  pinMode(WIO_KEY_A, INPUT_PULLUP);
  pinMode(WIO_KEY_B, INPUT_PULLUP);
  pinMode(WIO_KEY_C, INPUT_PULLUP);
  // Buzzer:
  pinMode(WIO_BUZZER, OUTPUT);
  
  // Initialize the built-in RTC module.  Then, adjust the date & time as the compiled date & time.
  rtc.begin();
  DateTime now = DateTime(F(__DATE__), F(__TIME__));
  rtc.adjust(now);

  // Check the connection status between Wio Terminal and the SD card.
  if(!SD.begin(SDCARD_SS_PIN, SDCARD_SPI)) while (1);

  // Check the SGP probe status.
  while(sgp_probe() != STATUS_OK){
    if(DEBUG) Serial.println("VOC and eCO2 Gas Sensor => SGP probe failed!");
    while (1);
  }
  // Read the H2 and Ethanol signal with the VOC and eCO2 gas sensor.
  err = sgp_measure_signals_blocking_read(&scaled_ethanol_signal, &scaled_h2_signal);
  // Check the VOC and eCO2 gas sensor status after reading the signal.
  if(err == STATUS_OK){ if(DEBUG) Serial.println("VOC and eCO2 Gas Sensor => Signal acquired successfully!"); }
  else{ if(DEBUG) Serial.println("VOC and eCO2 Gas Sensor => Signal reading error!"); }
  // Set the default absolute humidity value - 13.000 g/m^3.
  sgp_set_absolute_humidity(13000);
  // Initiate the VOC and eCO2 gas sensor.
  err = sgp_iaq_init();

  // Initialize the Grove - CO2 & Temperature & Humidity Sensor.
  scd30.initialize();

  // Initiate the built-in TFT screen.
  tft.init();
  tft.setTextColor(TFT_WHITE);
  tft.setTextSize(2);
  // Create the histogram.
  histogram.initHistogram(&tft);
  histogram.formHistogram("a", 1, 10, column_w, TFT_RED);     // Column 1
  histogram.formHistogram("b", 2, 10, column_w, TFT_PINK);    // Column 2
  histogram.formHistogram("c", 3, 10, column_w, TFT_GREEN);   // Column 3
  histogram.formHistogram("d", 4, 10, column_w, TFT_BLUE);    // Column 4
  histogram.formHistogram("e", 5, 10, column_w, TFT_YELLOW);  // Column 5
  histogram.formHistogram("f", 6, 10, column_w, TFT_MAGENTA); // Column 6
  // Hide the histogram axes.
  histogram.notShowAxis();

  // Define and display the 8-bit images saved on the SD card:
  drawImage<uint8_t>("forest_disease.bmp", TFT_HEIGHT, 0);
   
}

void loop(){
  get_VOC_and_eCO2();
  get_co2_temp_humd();
  get_moisture();
  check_thresholds(10);

  // Every 1 minute, update the histogram and append the collected environmental factors to the CSV file on the SD card.
  if(millis() - timer > 60*1000 || timer == 0){
    // Display the histogram on the TFT screen.
    update_histogram();
    show_resize_histogram(TFT_WHITE, TFT_BLACK);
    // Save the collected environmental factors to the SD card.
    save_data_to_SD_Card();
    // Every 5 minutes, send the model run command ('B') automatically to LattePanda 3 Delta.
    if(millis() - model_timer > 5*60*1000){
      Serial.println("B"); delay(500);
      tft.fillRect(0, TFT_WIDTH-h, w, h, TFT_WHITE);
      tft.fillRect(offset, TFT_WIDTH-h+offset, w-2*offset, h-2*offset, TFT_BLACK);
      tft.setTextSize(2);
      tft.drawString("Model Running!", (w-14*12)/2, TFT_WIDTH-25-12);
      // Update the model timer.
      model_timer = millis();
    }
    // Update the timer.
    timer = millis();
  }

  // If the configurable button A is pressed, send the capture command ('A') to LattePanda 3 Delta.  
  if(digitalRead(WIO_KEY_A) == LOW){
    Serial.println("A"); delay(500);
    tft.fillRect(0, 0, w, h, TFT_WHITE);
    tft.fillRect(offset, offset, w-2*offset, h-2*offset, TFT_BLACK);
    tft.setTextSize(2);
    tft.drawString("Image Captured!", (w-15*12)/2, 23);
  }

  // If the configurable button B is pressed, send the model run command ('B') manually to LattePanda 3 Delta.
  if(digitalRead(WIO_KEY_B) == LOW){
    Serial.println("B"); delay(500);
    tft.fillRect(0, TFT_WIDTH-h, w, h, TFT_WHITE);
    tft.fillRect(offset, TFT_WIDTH-h+offset, w-2*offset, h-2*offset, TFT_BLACK);
    tft.setTextSize(2);
    tft.drawString("Model Running!", (w-14*12)/2, TFT_WIDTH-25-12);
  }
}

void update_histogram(){
  // Update histogram parameters with the collected data.
  histogram.changeParam(1, "a", co2_value/10, TFT_RED);
  histogram.changeParam(2, "b", temp_value, TFT_PINK);
  histogram.changeParam(3, "c", humd_value, TFT_GREEN);
  histogram.changeParam(4, "d", moisture_value/10, TFT_BLUE);
  histogram.changeParam(5, "e", tvoc_value, TFT_YELLOW);
  histogram.changeParam(6, "f", co2_eq_value/10, TFT_MAGENTA);
}

void show_resize_histogram(int text, int background){
  // Resize, place, and display the histogram on the TFT screen.
  histogram.shrinkShowHistogram(25, 45, 1.4, text, background, background);
  tft.setRotation(3);
  tft.setTextSize(1);
  tft.drawString("a:CO2 b:Temp c:Humd d:Mois e:tVOC f:CO2eq", 30, 5);
  delay(5000);
  // Set the background image.
  drawImage<uint8_t>("forest_disease.bmp", 0, 0);
  delay(2000);
}

void save_data_to_SD_Card(){
  // Open the given CSV file on the SD card in the APPEND file mode.
  // FILE MODES: WRITE, READ, APPEND
  myFile = SD.open(data_file, FILE_APPEND);
  // If the given file is opened successfully:
  if(myFile){
    if(DEBUG){ Serial.print("\nWriting to "); Serial.print(data_file); Serial.println("..."); }
    // Obtain the current date & time.
    DateTime now = rtc.now();
    String _date = String(now.year(), DEC) + "_" + String(now.month(), DEC) + "_" + String(now.day(), DEC) + "_" + String(now.hour(), DEC) + "_" + String(now.minute(), DEC) + "_" + String(now.second(), DEC);
    // Create the data record to be inserted as a new row: 
    String data_record = String(_date)
                         + "," + String(co2_value)
                         + "," + String(temp_value)
                         + "," + String(humd_value)
                         + "," + String(moisture_value)
                         + "," + String(tvoc_value)
                         + "," + String(co2_eq_value)
                       ; 
    // Append the data record:
    myFile.println(data_record);
    // Close the CSV file:
    myFile.close();
    if(DEBUG) Serial.println("Data saved successfully!\n");
    // Notify the user after appending the given data record successfully.
    tft.fillRect(0, 0, w, h, TFT_WHITE);
    tft.fillRect(offset, offset, w-2*offset, h-2*offset, TFT_BLACK);
    tft.setTextSize(2);
    tft.drawString("Data Stored!", (w-12*12)/2, 23);
  }else{
    // If Wio Terminal cannot open the given CSV file successfully:
    if(DEBUG) Serial.println("Wio Terminal cannot open the given CSV file!\n");
    tft.setTextSize(2);
    tft.drawString("Wio Terminal", 35, 10);
    tft.drawString("cannot open the file!", 35, 30);
  }
  // Exit and clear:
  delay(3000);
}

void get_VOC_and_eCO2(){
  // Get the VOC (Volatile Organic Compounds) and CO2eq (Carbon dioxide equivalent) measurements evaluated by the VOC and eCO2 gas sensor.
  s16 err = 0;
  u16 tvoc_ppb, co2_eq_ppm;
  err = sgp_measure_iaq_blocking_read(&tvoc_ppb, &co2_eq_ppm);
  if(err == STATUS_OK){
    tvoc_value = tvoc_ppb;
    co2_eq_value = co2_eq_ppm;
    if(DEBUG){ Serial.print("tVOC (Volatile Organic Compounds): "); Serial.print(tvoc_value); Serial.println(" ppb"); }
    if(DEBUG){ Serial.print("CO2eq (Carbon dioxide equivalent): "); Serial.print(co2_eq_value); Serial.println(" ppm\n"); }
  }else{
    if(DEBUG) Serial.println("VOC and eCO2 Gas Sensor => IAQ values reading error!\n");
  }
  delay(1000);  
}

void get_co2_temp_humd(){
  // Obtain the CO2, temperature, and humidity measurements generated by the CO2 & Temperature & Humidity sensor.
  float result[3] = {0};
  if(scd30.isAvailable()){
    scd30.getCarbonDioxideConcentration(result);
    co2_value = result[0];
    temp_value = result[1];
    humd_value = result[2];
    if(DEBUG){ Serial.print("CO2 (Carbon dioxide): "); Serial.print(co2_value); Serial.println(" ppm"); }
    if(DEBUG){ Serial.print("Temperature: "); Serial.print(temp_value); Serial.println(" "); }
    if(DEBUG){ Serial.print("Humidity: "); Serial.print(result[2]); Serial.println(" %\n"); }
  }
  delay(1000);
} 

void get_moisture(){
  moisture_value = analogRead(moisture_sensor);
  if(DEBUG){ Serial.print("Moisture: "); Serial.print(moisture_value); Serial.println("\n"); }
}

void check_thresholds(int s){
  // If the collected environmental factors exceed the given thresholds, notify the user via the built-in buzzer.
  for(int i=0; i<3; i++){
    if(co2_value >= thresholds[i][0] && temp_value >= thresholds[i][1] && humd_value >= thresholds[i][2] && moisture_value >= thresholds[i][3] && tvoc_value >= thresholds[i][4] && co2_eq_value >= thresholds[i][5]){
      analogWrite(WIO_BUZZER, 128);
      if(DEBUG) Serial.println("\nPotential tree disease risk detected!\n");
      delay(s*1000);
      analogWrite(WIO_BUZZER, 0);
    }
  }
}

main.py

Python
# IoT AI-driven Tree Disease Identifier w/ Edge Impulse & MMS
#
# LattePanda 3 Delta 864
#
# Detect tree diseases and get informed of the results via MMS
# to prevent them from spreading and harming forests, farms, and arable lands.
#
# By Kutluhan Aktar


import serial
import usb1
from PIL import Image
from io import BytesIO
from time import sleep
import cv2
import numpy as np
import datetime
from threading import Thread
import os
from edge_impulse_linux.image import ImageImpulseRunner
import requests
from twilio.rest import Client 

# Define the Grove - Vision AI Module image descriptions.
WEBUSB_JPEG_MAGIC = 0x2B2D2B2D
WEBUSB_TEXT_MAGIC = 0x0F100E12
# Define the Grove - Vision AI Module vendor and product IDs. 
VendorId = 0x2886
ProductId = 0x8060


class tree_disease_detection():
    def __init__(self, show_img, modelfile):
        # Define the required parameters for the Vision AI module.
        self.expect_size = 0
        self.buff = bytearray()
        self.show_img = show_img
        # Get data from the connected USB devices.
        self.context = usb1.USBContext()
        # Find the Vision AI module if connected.
        self.find_vision_ai(False)
        self.vision_ai_disconnect()
        # Initialize serial communication with Wio Terminal to get commands.
        self.wio = serial.Serial("/dev/ttyACM1", 115200, timeout=1000)
        # Define the Edge Impulse model settings.
        dir_path = os.path.dirname(os.path.realpath(__file__))
        self.modelfile = os.path.join(dir_path, modelfile)
        self.detection_result = ""
        # Define the required Twilio account settings.
        self.tw_account_sid = '<account_sid>' 
        self.tw_auth_token = '<auth_token>'
        self.tw_messaging_service_sid = '<messaging_service_sid>'
        self.verified_number = '+<verified_number>'
        # Define the PHP web application (image logger) settings.
        self.server = "www.theamplituhedron.com"
        self.url_path = "https://" + self.server + "/tree_disease_detection_web/"        

    def wio_commands(self):
        # Obtain commands from Wio Terminal via serial communication.
        command = self.wio.read(1).decode("utf-8")
        if(command.find("A") >= 0):
            self.vision_ai_save_sample()
        elif(command.find("B") >= 0):
            self.run_inference()
    
    def run_inference(self):
        # Run inference to detect potential tree diseases to prevent them from spreading.
        with ImageImpulseRunner(self.modelfile) as runner:
            try:
                # Print the information of the Edge Impulse model converted to a Linux x86_64 (64-bit) application (.eim).
                model_info = runner.init()
                print('Loaded runner for "' + model_info['project']['owner'] + ' / ' + model_info['project']['name'] + '"')
                labels = model_info['model_parameters']['labels']
                # Get the currently captured image with the Vision AI module, resize it depending on the given model, and run inference. 
                test_img = Image.open(BytesIO(self.captured_img))
                test_img = np.array(test_img)
                features, cropped = runner.get_features_from_image(test_img)
                res = runner.classify(features)
                # Obtain the prediction (detection) results for each label (class).
                results = "Detections: "
                if "bounding_boxes" in res["result"].keys():
                    print('Found %d bounding boxes (%d ms.)' % (len(res["result"]["bounding_boxes"]), res['timing']['dsp'] + res['timing']['classification']))
                    for bb in res["result"]["bounding_boxes"]:
                        # Get the detected labels:
                        results+="\n"+bb['label']
                        print('\t%s (%.2f): x=%d y=%d w=%d h=%d' % (bb['label'], bb['value'], bb['x'], bb['y'], bb['width'], bb['height']))
                        cropped = cv2.rectangle(cropped, (bb['x'], bb['y']), (bb['x'] + bb['width'], bb['y'] + bb['height']), (255, 0, 0), 1)
                # Save the resized (modified) image to the computer by appending the current date & time to its filename.
                date = datetime.datetime.now().strftime("%Y-%m-%d_%H_%M_%S")
                filename = 'detections/{}.jpg'.format(date)
                cv2.imwrite(filename, cv2.cvtColor(cropped, cv2.COLOR_RGB2BGR))
                # Then, send the saved image to the web application.
                self.send_captured_image(filename)
                # After uploading the image to the given server via the web application,
                # send an MMS to the verified phone number via Twilio so as to inform the user of the detection results.
                if not results == "Detections: ":
                    self.detection_result = "\n" + results
                else:
                    self.detection_result = "\nNot Detected!"                
                self.send_MMS_via_Twilio(self.detection_result, filename)
                
            # Stop the running inference.    
            finally:
                if(runner):
                    runner.stop()    
    
    def find_vision_ai(self, _open=True):
        print('*' * 50)
        print('Searching for Vision AI Module...')
        # Search all connected USB devices to find the Vision AI module. 
        for device in self.context.getDeviceIterator(skip_on_error=True):
            product_id = device.getProductID()
            vendor_id = device.getVendorID()
            device_addr = device.getDeviceAddress()
            bus = '->'.join(str(x) for x in ['Bus %03i' % (device.getBusNumber(),)] + device.getPortNumberList())
            # If the device vendor and product IDs correspond to the Vision AI module vendor and product IDs, start communicating with the Vision AI module.
            if vendor_id == VendorId and product_id == ProductId:
                print('\r' + f'\033[4;31mID {vendor_id:04x}:{product_id:04x} {bus} Device {device_addr} \033[0m',
                      end='')
                if _open:
                    return device.open()
                else:
                    device.close()
                    print(
                        '\r' + f'\033[4;31mID {vendor_id:04x}:{product_id:04x} {bus} Device {device_addr} CLOSED\033[0m',
                        flush=True)
            else:
                print(f'ID {vendor_id:04x}:{product_id:04x} {bus} Device {device_addr}')
                
    def vision_ai_connect(self):
        # Connect to the Vision AI module if found successfully.
        self.handle = self.find_vision_ai(True)
        if self.handle is None:
            print('\rPlease plug in the Vision AI Module!')
            return False
        with self.handle.claimInterface(2):
            # Set up the default Vision AI module settings to read data (buffer).
            self.handle.setInterfaceAltSetting(2, 0)
            self.handle.controlRead(0x01 << 5, request=0x22, value=0x01, index=2, length=2048, timeout=1000)
            print('\nVision AI Module is connected!')
        return True

    def vision_ai_disconnect(self):
        # Disconnect from the Vision AI module by resetting the module.
        try:
            print('Resetting device...')
            with usb1.USBContext() as context:
                handle = context.getByVendorIDAndProductID(VendorId, ProductId,
                                                           skip_on_error=False).open()
                handle.controlRead(0x01 << 5, request=0x22, value=0x00, index=2, length=2048, timeout=1000)
                handle.close()
                print('Device has been reset!')
            return True
        except:
            return False               

    def read_vision_ai_data(self):
        # Obtain the transferred data from the Vision AI module.
        with self.handle.claimInterface(2):
            # Utilize endpoints:
            self.handle.setInterfaceAltSetting(2, 0)
            self.handle.controlRead(0x01 << 5, request=0x22, value=0x01, index=2, length=2048, timeout=1000)
            # Save all transferred objects in a list so as to avoid any possible glitch.
            transfer_list = []
            for _ in range(1):
                transfer = self.handle.getTransfer()
                transfer.setBulk(usb1.ENDPOINT_IN | 2, 2048, callback=self.process_vision_ai_data, timeout=1000)
                transfer.submit()
                transfer_list.append(transfer)
            # Wait until one successful transfer.
            while any(x.isSubmitted() for x in transfer_list):
                self.context.handleEvents()

    def process_vision_ai_data(self, transfer):
        # If the Vision AI module transferred an object successfully, process the received data.
        if transfer.getStatus() != usb1.TRANSFER_COMPLETED:
            # transfer.close()
            return
        # Obtain the transferred data.
        data = transfer.getBuffer()[:transfer.getActualLength()]
        # Get the accurate buffer size.
        if len(data) == 8 and int.from_bytes(bytes(data[:4]), 'big') == WEBUSB_JPEG_MAGIC:
            self.expect_size = int.from_bytes(bytes(data[4:]), 'big')
            self.buff = bytearray()
        elif len(data) == 8 and int.from_bytes(bytes(data[:4]), 'big') == WEBUSB_TEXT_MAGIC:
            self.expect_size = int.from_bytes(bytes(data[4:]), 'big')
            self.buff = bytearray()
        else:
            self.buff = self.buff + data
        # If the obtained buffer size is equal to the actual buffer size, show the captured image on the screen.
        if self.expect_size == len(self.buff) and self.show_img:
            try:
                self.captured_img = self.buff
                img = Image.open(BytesIO(self.buff))
                img = np.array(img)
                cv2.imshow('Tree Disease Samples', cv2.cvtColor(img,cv2.COLOR_RGB2BGR))
                cv2.waitKey(1)
            except:
                self.buff = bytearray()
                return
        # Resubmit the transfer object after being processed.
        transfer.submit()
        
    def vision_ai_save_sample(self):    
        date = datetime.datetime.now().strftime("%Y%m%d_%H%M%S")
        filename = './samples/IMG_{}.jpg'.format(date)
        # If requested, save the recently captured image as a sample.
        with open(filename, 'wb') as f:
            f.write(bytes(self.captured_img))
        print("\nSaved: " + filename)

    def send_captured_image(self, file_path):
        files = {'captured_image': open("./"+file_path, 'rb')}
        # Make an HTTP POST request to the given web application to send the captured image.
        request = requests.post(self.url_path, files=files)
        print("\nRecently Captured Image Transferred!")
        # Print the response from the server.
        print("\nServer: " + request.text + "\n")
        
    def send_MMS_via_Twilio(self, body, file_path):
        # Define the Twilio client object.
        tw_client = Client(self.tw_account_sid, self.tw_auth_token)
        # Send an MMS to the verified phone number via Twilio.
        media_url = self.url_path + file_path
        message = tw_client.messages.create(
                                  messaging_service_sid=self.tw_messaging_service_sid, 
                                  body=body,
                                  media_url=media_url,
                                  to=self.verified_number
                              )
        print("\nTransferred Message ID:" + message.sid)
        print("Transferred Media URL:" + media_url)

# Define the detection object.
detection = tree_disease_detection(True, "model/tree-disease-identifier-linux-x86_64.eim")
detection.vision_ai_connect()

# Define and initialize threads.
def start_data_collection():
    while True:
        detection.read_vision_ai_data()
        
def activate_wio_commands():
    while True:
        detection.wio_commands()
        sleep(1)

Thread(target=start_data_collection).start()
Thread(target=activate_wio_commands).start()

    

index.php

PHP
<?php

// If LattePanda 3 Delta transfers the captured image after running an inference with the Edge Impulse model successfully, save it to the detections folder.
if(!empty($_FILES["captured_image"]['name'])){
	// Image File:
	$captured_image_properties = array(
	    "name" => $_FILES["captured_image"]["name"],
	    "tmp_name" => $_FILES["captured_image"]["tmp_name"],
		"size" => $_FILES["captured_image"]["size"],
		"extension" => pathinfo($_FILES["captured_image"]["name"], PATHINFO_EXTENSION)
	);
	
    // Check whether the uploaded file extension is in the allowed file formats.
	$allowed_formats = array('jpg', 'png');
	if(!in_array($captured_image_properties["extension"], $allowed_formats)){
		echo 'FILE => File Format Not Allowed!';
	}else{
		// Check whether the uploaded file size exceeds the 5MB data limit.
		if($captured_image_properties["size"] > 5000000){
			echo "FILE => File size cannot exceed 5MB!";
		}else{
			// Save the uploaded file (image).
			move_uploaded_file($captured_image_properties["tmp_name"], "./detections/".$captured_image_properties["name"]);
			echo "FILE => Saved Successfully!";
		}
	}
}

?>

Credits

Kutluhan Aktar

Kutluhan Aktar

81 projects • 304 followers
AI & Full-Stack Developer | @EdgeImpulse | @Particle | Maker | Independent Researcher

Comments