Xiangxin WanLechuan SunZening LiChenran Li
Published

Autowheelchair (Self-driving Car)

An Autonomous Vehicle on a BBAI-64 to implement self lane keeping, stop box recognition, and also drivable space identification by YOLOP.

BeginnerFull instructions provided10 hours184
Autowheelchair (Self-driving Car)

Things used in this project

Hardware components

USB Webcam (Logitech C270 720p Webcam)
×1
BeagleBone AI-64
BeagleBoard.org BeagleBone AI-64
×1
USB WiFi Adapter (AC600 Dual Band 2.4/5 GHz)
×1
Optical speed encoder
×1
Portable charger
×1

Software apps and online services

MobaXTerm
OpenCV
OpenCV
YOLOP Machine Learning Platform

Story

Read more

Schematics

Circuit diagram and hardware connection

Code

BONE-PWM0.dts

C/C++
Device tree of Beaglebone
// SPDX-License-Identifier: GPL-2.0
/*
 * Copyright (C) 2022 BeagleBoard.org - https://beagleboard.org/
 *
 * https://docs.beagleboard.io/latest/boards/capes/cape-interface-spec.html#pwm
 */
#include <dt-bindings/gpio/gpio.h>
#include <dt-bindings/board/k3-j721e-bone-pins.h>

/dts-v1/;
/plugin/;

/*
 * Helper to show loaded overlays under: /proc/device-tree/chosen/overlays/
 */
&{/chosen} {
	overlays {
		BONE-PWM0.kernel = __TIMESTAMP__;
	};
};

&bone_pwm_0 {
	status = "okay";
};

&{/} {
	gpio-leds {
		compatible = "autowheelchair";
		userbutton-gpios = <gpio_P9_12 GPIO_ACTIVE_HIGH>;
	};
};

gpiod_driver.c

C/C++
#include <linux/module.h>
#include <linux/of_device.h>
#include <linux/kernel.h>
#include <linux/delay.h>
#include <linux/gpio/consumer.h>
#include <linux/platform_device.h>
#include <linux/interrupt.h>
#include <linux/timekeeping.h>
struct gpio_desc *btn, *blue;
unsigned int irq_number;

// Counter Value
int state = 0;
// Time Variables
long curent_time = 0;
long last_time = 0;
long time_diff = 0;
// Current Speed
long speed;

module_param(speed,long,S_IRUGO);

// interrupt function
static irq_handler_t gpio_irq_handler(unsigned int irq, void *dev_id, struct pt_regs *regs) {
    
    printk("Detecting\n");
    //curent_time = ktime_get_real_ns();
    //time_diff = curent_time - last_time;
    //last_time = curent_time;
    //speed = time_diff;
    printk("Speed recorded......\n");
    // return
    return (irq_handler_t) IRQ_HANDLED;
}


// probe function
static int led_probe(struct platform_device *pdev)
{
    int ret;// Announce a variable that determine if irq was requested
    /* "blueled" label is matching the device tree declaration. OUT_LOW is the value
    at init */
    // blue = devm_gpiod_get(&pdev->dev, "blueled", GPIOD_OUT_LOW);
    btn = devm_gpiod_get(&pdev->dev, "userbutton", GPIOD_IN);
    
    //gpiod_set_debounce(btn, 1000000);
    
    /* Setup the interrupt */
    irq_number = gpiod_to_irq(btn);
    ret = request_irq(irq_number, (irq_handler_t) gpio_irq_handler,IRQF_TRIGGER_FALLING, "gpio_irq", NULL);
    if(ret)
    {
        printk("Error!\nCan not request interrupt nr.: %d\n", irq_number);
        gpiod_put(btn);
        return ret;
    }
    printk("Button GPIOs initialized.......\n");
    return 0;
}
// remove function
static int led_remove(struct platform_device *pdev)
{
    printk("GPIO example exit\n");
    gpiod_put(btn);
    free_irq(irq_number, NULL);
    return 0;
}
static struct of_device_id matchy_match[] = {
    {.compatible = "autowheelchair"},
    {/* end node */}
};
// platform driver object
static struct platform_driver adam_driver = {
    .probe = led_probe,
    .remove = led_remove,
    .driver = {
        .name = "adam_driver",
        .owner = THIS_MODULE,
        .of_match_table = matchy_match,
    },
};
module_platform_driver(adam_driver);
MODULE_DESCRIPTION("424\'s finest");
MODULE_AUTHOR("GOAT");
MODULE_LICENSE("GPL v2");
MODULE_ALIAS("platform:adam_driver");

car_team2.py

Python
import cv2
import numpy as np
import time
import math
from collections import Counter
import matplotlib.pyplot as plt

# Code references 1: https://www.instructables.com/Autonomous-Lane-Keeping-Car-Using-Raspberry-Pi-and/
# Code references 2: https://www.hackster.io/covid-debuff/covid-debuff-semi-autonomous-rc-car-platform-75b072


# Car Control
#####################################################################
class Car():
    def __init__(self):
        self.servo_lower = 1200000 # servo's lower bound of duty cycle
        self.servo_upper = 1800000 # servo's upper bound of duty cycle

    def car_init(self):
        if_init = 0 # flag for initialization
        # P9_14 - Speed/ESC
        with open('/dev/bone/pwm/1/a/period', 'r+') as filetowrite:
            if filetowrite.read() != '20000000':
                if_init = 1  # set flag to 1 if initialization performed
                filetowrite.write('20000000')
        with open('/dev/bone/pwm/1/a/duty_cycle', 'r+') as filetowrite:
            if filetowrite.read() != '1500000':
                if_init = 1 # set flag to 1 if initialization performed
                filetowrite.write('1500000')
        with open('/dev/bone/pwm/1/a/enable', 'r+') as filetowrite:
            if filetowrite.read() != '1':
                if_init = 1 # set flag to 1 if initialization performed
                filetowrite.write('1')

        # P9_16 - Steering
        with open('/dev/bone/pwm/1/b/period', 'w') as filetowrite:
            filetowrite.write('20000000')
        with open('/dev/bone/pwm/1/b/duty_cycle', 'w') as filetowrite:
            filetowrite.write('1500000')
        with open('/dev/bone/pwm/1/b/enable', 'w') as filetowrite:
            filetowrite.write('1')

        # if flag is 1, wait...
        if if_init:
            print('Car Initializing......')
            self.wait(2)
        else:
            print("Already Initialized")

    # Set motor speed
    def motor_set(self, percentage):    # percentage 750 ~ 900(7.5%~9%)
        duty_cycle = str(percentage * 2000)
        # P9_14 - Speed/ESC
        with open('/dev/bone/pwm/1/a/period', 'w') as filetowrite:
            filetowrite.write('20000000')
        with open('/dev/bone/pwm/1/a/duty_cycle', 'w') as filetowrite:
            filetowrite.write(duty_cycle)
        with open('/dev/bone/pwm/1/a/enable', 'w') as filetowrite:
            filetowrite.write('1')
        print('Forwarding as Speed', percentage, '......')


    # Set servo angle
    # angle is (-10,10), positive for right turn, negative for left turn
    def servo_set(self, angle):
        step = (self.servo_upper - 1500000) // 1000
        duty_cycle = str(1500000 + angle * step)
        if angle == 0:  # reset if angle is 0
            duty_cycle = '1500000'

        # P9_16 - Steering
        with open('/dev/bone/pwm/1/b/period', 'w') as filetowrite:
            filetowrite.write('20000000')
        with open('/dev/bone/pwm/1/b/duty_cycle', 'w') as filetowrite:
            filetowrite.write(duty_cycle)
        with open('/dev/bone/pwm/1/b/enable', 'w') as filetowrite:
            filetowrite.write('1')

        # print information by the angle turned
        if angle > 0:
            print('Turn Left as Angle', angle, '......')
        if angle < 0:
            print('Turn Right as Angle', angle, '......')

    # wait for certain seconds
    def wait(self, second):
        print('Waiting for ', second, 'second(s).....')
        time.sleep(second)

# OpenCV
#####################################################################

# Convert frame to HSV color range
def convert_to_HSV(frame):
    hsv_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
    cv2.imshow("HSV",hsv_frame)
    return hsv_frame

# Detect edges based on the color selected
def detect_edges(frame):
    lower_blue = np.array([70, 90, 0], dtype = "uint8") # lower limit of blue color
    upper_blue = np.array([160, 255, 255], dtype="uint8") # upper limit of blue color
    mask = cv2.inRange(hsv,lower_blue,upper_blue) # this mask will filter out everything but blue

    # detect edges
    edges = cv2.Canny(mask, 50, 200)
    cv2.imshow("edges", edges)
    return edges

# Draw the region of interest
def region_of_interest(edges):
    height, width = edges.shape # extract the height and width of the edges frame
    mask = np.zeros_like(edges) # make an empty matrix with same dimensions of the edges frame

    # only focus lower half of the screen
    # specify the coordinates of 4 points (lower left, upper left, upper right, lower right)
    polygon = np.array([[
        (0, height),
        (0,  height/2),
        (width , height/2),
        (width , height),
    ]], np.int32)

    cv2.fillPoly(mask, polygon, 255) # fill the polygon with blue color
    cropped_edges = cv2.bitwise_and(edges, mask)
    cv2.imshow("roi",cropped_edges)
    return cropped_edges

# Detect line segmentations
def detect_line_segments(cropped_edges):
    rho = 1  # distance resolution in pixels
    theta = np.pi / 180  # angular resolution in radians
    min_threshold = 15 # minimum number of intersections to "detect" a line.

    # detect straight line in image
    line_segments = cv2.HoughLinesP(cropped_edges, rho, theta, min_threshold,
                                    np.array([]), minLineLength=5, maxLineGap=0)
    return line_segments

# calculate the average slope intercept
def average_slope_intercept(frame, line_segments):
    lane_lines = []

    # return if no line detected
    if line_segments is None:
        print("no line segment detected")
        return lane_lines

    # set shape of the frame
    height, width,_ = frame.shape
    left_fit = []
    right_fit = []
    boundary = 1/3

    # set boundary
    left_region_boundary = width * (1 - boundary)
    right_region_boundary = width * boundary

    # loop each line
    for line_segment in line_segments:
        for x1, y1, x2, y2 in line_segment:
            if x1 == x2:
                print("skipping vertical lines (slope = infinity)")
                continue
            # fit the matrix
            fit = np.polyfit((x1, x2), (y1, y2), 1)
            slope = (y2 - y1) / (x2 - x1)
            intercept = y1 - (slope * x1)

            # if slope < 0
            if slope < 0:
                if x1 < left_region_boundary and x2 < left_region_boundary:
                    left_fit.append((slope, intercept))
            # if slope > 0
            else:
                if x1 > right_region_boundary and x2 > right_region_boundary:
                    right_fit.append((slope, intercept))

    left_fit_average = np.average(left_fit, axis=0)
    if len(left_fit) > 0:
        lane_lines.append(make_points(frame, left_fit_average))

    right_fit_average = np.average(right_fit, axis=0)
    if len(right_fit) > 0:
        lane_lines.append(make_points(frame, right_fit_average))

    # lane_lines is a 2-D array consisting the coordinates of the right and left lane lines
    # for example: lane_lines = [[x1,y1,x2,y2],[x1,y1,x2,y2]]
    # where the left array is for left lane and the right array is for right lane
    # all coordinate points are in pixels
    return lane_lines

# generate points based on the frame and line
def make_points(frame, line):
    height, width, _ = frame.shape
    slope, intercept = line
    y1 = height  # bottom of the frame
    y2 = int(y1 / 2)  # make points from middle of the frame down

    # manually add slope to horizontal line
    if slope == 0:
        slope = 0.1
    x1 = int((y1 - intercept) / slope) # calculate for x1 coordinate
    x2 = int((y2 - intercept) / slope) # calculate for x2 coordinate

    return [[x1, y1, x2, y2]]

# Display the lane lines on the frame
def display_lines(frame, lines, line_color=(0, 255, 0), line_width=6): # line color (B,G,R)
    line_image = np.zeros_like(frame)
    # draw lines on the frame
    if lines is not None:
        for line in lines:
            for x1, y1, x2, y2 in line:
                cv2.line(line_image, (x1, y1), (x2, y2), line_color, line_width)

    line_image = cv2.addWeighted(frame, 0.8, line_image, 1, 1) # weighted sum of two images
    cv2.imshow("Lines",line_image)
    return line_image


# calculate the steering angle to turn
def get_steering_angle(frame, lane_lines):
    height, width, _ = frame.shape

    if len(lane_lines) == 2: # if two lane lines are detected
        _, _, left_x2, _ = lane_lines[0][0] # extract left x2 from lane_lines array
        _, _, right_x2, _ = lane_lines[1][0] # extract right x2 from lane_lines array
        mid = int(width / 2)
        x_offset = (left_x2 + right_x2) / 2 - mid
        y_offset = int(height / 2)

    elif len(lane_lines) == 1: # if only one line is detected
        x1, _, x2, _ = lane_lines[0][0]
        x_offset = x2 - x1
        y_offset = int(height / 2)

    elif len(lane_lines) == 0: # if no line is detected
        x_offset = 0
        y_offset = int(height / 2)

    angle_to_mid_radian = math.atan(x_offset / y_offset)
    angle_to_mid_deg = int(angle_to_mid_radian * 180.0 / math.pi)
    steering_angle = angle_to_mid_deg + 90

    return steering_angle

# display the heading line in the frame
def display_heading_line(frame, steering_angle, line_color=(0, 0, 255), line_width=5):
    heading_image = np.zeros_like(frame) # create empty iamge
    height, width, _ = frame.shape   #get the shape of image
    steering_angle_radian = steering_angle / 180.0 * math.pi # convert steering angle to radian
    # calculate the start and end points for line
    x1 = int(width / 2)
    y1 = height
    x2 = int(x1 - height / 2 / math.tan(steering_angle_radian))
    y2 = int(height / 2)

    cv2.line(heading_image, (x1, y1), (x2, y2), line_color, line_width) # draw the heading line in the frame
    heading_image = cv2.addWeighted(frame, 0.8, heading_image, 1, 1)  # weighted sum of two images
    cv2.imshow("Heading Line",heading_image) # show the frame with heading line
    return heading_image

# filter all the color but red
def check_red_flag(frame):
    lower_red = np.array([150, 30, 30], dtype="uint8")  # lower limit of red color
    upper_red = np.array([180, 255, 255], dtype="uint8")  # upper limit of red color
    mask = cv2.inRange(hsv, lower_red, upper_red)  # this mask will filter out everything but red
    cv2.imshow("Red Flag", mask)  # show the frame with heading line
    return mask


##################################################
#####################Main#########################
##################################################

# turn on camera
video = cv2.VideoCapture(2)  # set port the camera connected
video.set(cv2.CAP_PROP_FRAME_WIDTH,160) # set the width to 160 pixels
video.set(cv2.CAP_PROP_FRAME_HEIGHT,120) # set the height to 120 pixels

# initialize the car
car = Car()
car.car_init()

speed = 10

#Variables to be updated each loop
lastTime = 0
lastError = 0

# PD constants
kp = 0.4
kd = kp * 0.65
prev_frame_time = 0
new_frame_time = 0

# the red square counter
end_flag = 0

# Status vairables
last_error_st = 0
last_error_sp = 0
last_time = 0
last_encoder = 0
# PID parameters
Kp_st = 0.1944
Ki_st = 0.10935
Kd_st = 0.03888*1.5

# Records for graphs
iter_cnt = 0
T = []
steer_error = []
steer_P = []
steer_I = []
steer_D = []
steer_pwm_duty = []
servo_percentage = 7.5

# main loop
while True:
    ret,frame = video.read()
    #frame = cv2.flip(frame,-1)  # filp the image if needed

    if ret: # check if frame captured
        hsv = convert_to_HSV(frame) # convert to hsv

        hsv_frame_copy = frame # make a copy of frame
        check_red_flag(hsv_frame_copy) # filter color but red

        edges = detect_edges(hsv)  # detect edges
        roi = region_of_interest(edges) # select region of interest
        line_segments = detect_line_segments(roi) # detect line segmentations
        lane_lines = average_slope_intercept(frame,line_segments) # calculate the slope
        lane_lines_image = display_lines(frame,lane_lines) # display the lane lines
        steering_angle = get_steering_angle(frame, lane_lines) # calculate for the angle turned
        heading_image = display_heading_line(lane_lines_image,steering_angle) # display the heading lines
        red_edges = check_red_flag(hsv)  # filter color but red
        red_edges = region_of_interest(red_edges)   # select region of interest

        # decides if the red square detected
        sum = Counter(red_edges.flatten())
        for key,value in sum.items():
            if(key == 255 and abs(90-steering_angle)<50):  # stop only the car going straight
                if(value>100):
                    time.sleep(0.2)
                    end_flag += 1
                    car.motor_set(750)
                    time.sleep(3)
                    print("RED DETECTED")

        key = cv2.waitKey(1)
        if cv2.waitKey(1) & 0xFF == ord('q'):
            break

        now = time.time() # current time variable
        dt = now - lastTime
        deviation = steering_angle - 90 # equivalent to angle_to_mid_deg variable
        error = abs(deviation)
        # Steer PID
        error_st = 90-steering_angle
        error_diff_st = (error_st - last_error_st)/dt
        if(iter_cnt != 0):
            error_int_st =  (error_st - last_error_st)*dt
        else:
            error_int_st = 0
        last_error_st = error_st
        servo_percentage = servo_percentage + Kp_st*error_st + Kd_st*error_diff_st + Ki_st*error_int_st
        
        if deviation < 3 and deviation > -3: # do not steer if there is a 10-degree error range
            deviation = 0
            error = 0
            car.servo_set(0)

        elif deviation >= 3: # steer right if the deviation is positive
            car.servo_set(-750)

        elif deviation <= -3: # steer left if deviation is negative
            car.servo_set(750)

        # update for PID
        derivative = kd * (error - lastError) / dt
        proportional = kp * error
        PD = int(speed + derivative + proportional)

        spd = abs(PD)
        if spd > 25:
            spd = 25

        # update for PID
        lastError = error
        lastTime = time.time()
        if cv2.waitKey(1) & 0xFF == ord('q'):
            car.motor_set(750)
            break

        # stop if the second red square detected
        if(end_flag == 2):
            time.sleep(1)
            car.motor_set(750)
            break
        # Record the variables
        T = np.append(T, [iter_cnt])
        steer_error = np.append(steer_error, [error_st/100])
        steer_P = np.append(steer_P, [Kp_st*error_st])
        steer_I = np.append(steer_I, [Kd_st*error_diff_st])
        steer_D = np.append(steer_D, [Ki_st*error_int_st])
        steer_pwm_duty = np.append(steer_pwm_duty, [servo_percentage])

        iter_cnt = iter_cnt + 1
        # set motor speed
        car.motor_set(802)

video.release()
#cv2.destroyAllWindows()

# Plot the first graph
plt.figure()
plt.plot(T, steer_error, color = 'orange',linewidth = 1.0,linestyle = 'solid', label='Error')
plt.plot(T, steer_P, color = 'red',linewidth = 1.0,linestyle = 'solid', label='Proportional Response')
plt.plot(T, steer_I, color = 'blue',linewidth = 1.0,linestyle = 'solid', label='Derivative Response')
plt.plot(T, steer_D, color = 'green',linewidth = 1.0,linestyle = 'solid', label='Integral Response')
plt.legend(loc = 'upper right')
plt.title('PID Responses vs. Frame Number')
plt.xlabel('Frame')
plt.ylabel('PID Responses')
plt.grid()



# Plot the second graph
plt.figure()
plt.plot(T, steer_error, color = 'orange',linewidth = 1.0,linestyle = 'solid', label='Steer Error')
plt.plot(T, steer_pwm_duty, color = 'green',linewidth = 1.0,linestyle = 'solid', label='Steer PWM')
plt.legend(loc = 'upper right')
plt.title('PWM and Error vs. Frame Number')
plt.xlabel('Frame')
plt.ylabel('Values')
plt.grid()
# Do the plot
plt.show()

YOLOP_demo.py

Python
import argparse
import os, sys
import shutil
import time
from pathlib import Path
import imageio

BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.append(BASE_DIR)

print(sys.path)
import cv2
import torch
import torch.backends.cudnn as cudnn
from numpy import random
import scipy.special
import numpy as np
import torchvision.transforms as transforms
import PIL.Image as image

from lib.config import cfg
from lib.config import update_config
from lib.utils.utils import create_logger, select_device, time_synchronized
from lib.models import get_net
from lib.dataset import LoadImages, LoadStreams
from lib.core.general import non_max_suppression, scale_coords
from lib.utils import plot_one_box,show_seg_result
from lib.core.function import AverageMeter
from lib.core.postprocess import morphological_process, connect_lane
from tqdm import tqdm
normalize = transforms.Normalize(
        mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]
    )

transform=transforms.Compose([
            transforms.ToTensor(),
            normalize,
        ])


def detect(cfg,opt):

    logger, _, _ = create_logger(
        cfg, cfg.LOG_DIR, 'demo')

    device = select_device(logger,opt.device)
    if os.path.exists(opt.save_dir):  # output dir
        shutil.rmtree(opt.save_dir)  # delete dir
    os.makedirs(opt.save_dir)  # make new dir
    half = device.type != 'cpu'  # half precision only supported on CUDA

    # Load model
    model = get_net(cfg)
    checkpoint = torch.load(opt.weights, map_location= device)
    model.load_state_dict(checkpoint['state_dict'])
    model = model.to(device)
    if half:
        model.half()  # to FP16

    # Set Dataloader
    if opt.source.isnumeric():
        cudnn.benchmark = True  # set True to speed up constant image size inference
        dataset = LoadStreams(opt.source, img_size=opt.img_size)
        bs = len(dataset)  # batch_size
    else:
        dataset = LoadImages(opt.source, img_size=opt.img_size)
        bs = 1  # batch_size


    # Get names and colors
    names = model.module.names if hasattr(model, 'module') else model.names
    colors = [[random.randint(0, 255) for _ in range(3)] for _ in range(len(names))]


    # Run inference
    t0 = time.time()

    vid_path, vid_writer = None, None
    img = torch.zeros((1, 3, opt.img_size, opt.img_size), device=device)  # init img
    _ = model(img.half() if half else img) if device.type != 'cpu' else None  # run once
    model.eval()

    inf_time = AverageMeter()
    nms_time = AverageMeter()
    
    for i, (path, img, img_det, vid_cap,shapes) in tqdm(enumerate(dataset),total = len(dataset)):
        img = transform(img).to(device)
        img = img.half() if half else img.float()  # uint8 to fp16/32
        if img.ndimension() == 3:
            img = img.unsqueeze(0)
        # Inference
        t1 = time_synchronized()
        det_out, da_seg_out,ll_seg_out= model(img)
        t2 = time_synchronized()
        # if i == 0:
        #     print(det_out)
        inf_out, _ = det_out
        inf_time.update(t2-t1,img.size(0))

        # Apply NMS
        t3 = time_synchronized()
        det_pred = non_max_suppression(inf_out, conf_thres=opt.conf_thres, iou_thres=opt.iou_thres, classes=None, agnostic=False)
        t4 = time_synchronized()

        nms_time.update(t4-t3,img.size(0))
        det=det_pred[0]

        save_path = str(opt.save_dir +'/'+ Path(path).name) if dataset.mode != 'stream' else str(opt.save_dir + '/' + "web.mp4")

        _, _, height, width = img.shape
        h,w,_=img_det.shape
        pad_w, pad_h = shapes[1][1]
        pad_w = int(pad_w)
        pad_h = int(pad_h)
        ratio = shapes[1][0][1]

        da_predict = da_seg_out[:, :, pad_h:(height-pad_h),pad_w:(width-pad_w)]
        da_seg_mask = torch.nn.functional.interpolate(da_predict, scale_factor=int(1/ratio), mode='bilinear')
        _, da_seg_mask = torch.max(da_seg_mask, 1)
        da_seg_mask = da_seg_mask.int().squeeze().cpu().numpy()
        # da_seg_mask = morphological_process(da_seg_mask, kernel_size=7)

        
        ll_predict = ll_seg_out[:, :,pad_h:(height-pad_h),pad_w:(width-pad_w)]
        ll_seg_mask = torch.nn.functional.interpolate(ll_predict, scale_factor=int(1/ratio), mode='bilinear')
        _, ll_seg_mask = torch.max(ll_seg_mask, 1)
        ll_seg_mask = ll_seg_mask.int().squeeze().cpu().numpy()
        # Lane line post-processing
        #ll_seg_mask = morphological_process(ll_seg_mask, kernel_size=7, func_type=cv2.MORPH_OPEN)
        #ll_seg_mask = connect_lane(ll_seg_mask)

        img_det = show_seg_result(img_det, (da_seg_mask, ll_seg_mask), _, _, is_demo=True)

        if len(det):
            det[:,:4] = scale_coords(img.shape[2:],det[:,:4],img_det.shape).round()
            for *xyxy,conf,cls in reversed(det):
                label_det_pred = f'{names[int(cls)]} {conf:.2f}'
                plot_one_box(xyxy, img_det , label=label_det_pred, color=colors[int(cls)], line_thickness=2)
        
        if dataset.mode == 'images':
            cv2.imwrite(save_path,img_det)

        elif dataset.mode == 'video':
            if vid_path != save_path:  # new video
                vid_path = save_path
                if isinstance(vid_writer, cv2.VideoWriter):
                    vid_writer.release()  # release previous video writer

                fourcc = 'mp4v'  # output video codec
                fps = vid_cap.get(cv2.CAP_PROP_FPS)
                h,w,_=img_det.shape
                vid_writer = cv2.VideoWriter(save_path, cv2.VideoWriter_fourcc(*fourcc), fps, (w, h))
            vid_writer.write(img_det)
        
        else:
            cv2.imshow('image', img_det)
            cv2.waitKey(1)  # 1 millisecond

    print('Results saved to %s' % Path(opt.save_dir))
    print('Done. (%.3fs)' % (time.time() - t0))
    print('inf : (%.4fs/frame)   nms : (%.4fs/frame)' % (inf_time.avg,nms_time.avg))




if __name__ == '__main__':
    parser = argparse.ArgumentParser()
    parser.add_argument('--weights', nargs='+', type=str, default='weights/End-to-end.pth', help='model.pth path(s)')
    parser.add_argument('--source', type=str, default='inference/videos', help='source')  # file/folder   ex:inference/images
    parser.add_argument('--img-size', type=int, default=640, help='inference size (pixels)')
    parser.add_argument('--conf-thres', type=float, default=0.25, help='object confidence threshold')
    parser.add_argument('--iou-thres', type=float, default=0.45, help='IOU threshold for NMS')
    parser.add_argument('--device', default='cpu', help='cuda device, i.e. 0 or 0,1,2,3 or cpu')
    parser.add_argument('--save-dir', type=str, default='inference/output', help='directory to save results')
    parser.add_argument('--augment', action='store_true', help='augmented inference')
    parser.add_argument('--update', action='store_true', help='update all models')
    opt = parser.parse_args()
    with torch.no_grad():
        detect(cfg,opt)

Credits

Xiangxin Wan
1 project • 0 followers
Lechuan Sun
1 project • 0 followers
Zening Li
1 project • 0 followers
Chenran Li
1 project • 0 followers

Comments