Chuanhong GuoYankui Wang
Published © MIT

Self-driving Car Based on Learning from Vision Demonstration

A model car driven by KV260 and a USB camera mounted on the front.

AdvancedFull instructions provided20 hours1,383

Things used in this project

Hardware components

Kria KV260 Vision AI Starter Kit
AMD Kria KV260 Vision AI Starter Kit
×1
USB Webcam
×1
Model car with ROS support
an ackermann-steering model car that supports ROS
×1

Software apps and online services

Vitis Unified Software Platform
AMD Vitis Unified Software Platform
Robot Operating System
ROS Robot Operating System
Ubuntu 20.04
Ubuntu 20.04 with ROS Noetic and Docker installed.

Story

Read more

Code

extractor.py

Python
Extract training data from rosbag
#!/usr/bin/env python3
import rosbag
from cv_bridge import CvBridge
import cv2
import csv
import sys

# CvBridge is the utility to convert a ROS image message into an OpenCV matrix.
bridge = CvBridge()

# parameters: extractor.py {label.csv} {extracted_image_path}
with rosbag.Bag(sys.argv[2], 'r') as bag, open(sys.argv[1], 'a', newline='') as csvfile:
        speed = 0
        angle = 0
        img_counter = 0
        # use CSV writer to write our CSV file
        writer = csv.writer(csvfile)
        # only read the camera topic and command topic
        for topic, msg, t in bag.read_messages(topics=['/usb_cam/image_raw', '/ackermann_cmd']):
                if topic == '/ackermann_cmd':
                        # record the current speed and steering angle
                        print("command: speed {} angle {}".format(msg.drive.speed,msg.drive.steering_angle))
                        speed = msg.drive.speed
                        angle = msg.drive.steering_angle
                        # if the speed is positive, meaning the car is actually driving,
                        # save the image and steering angle as label:
                elif speed > 0: # usb_cam/image_raw
                        cv_image = bridge.imgmsg_to_cv2(msg, desired_encoding='bgr8')
                        # This is used for the lane scene. The car should be following drawn lanes,
                        # which only appears on the lower half of the image. Let's crop off the upper
                        # half because we don't need it here.
                        img_roi = cv_image[240:480, 0:640]
                        img_name = '{}/{}.png'.format(sys.argv[3], img_counter)
                        # save the image
                        cv2.imwrite(img_name, img_roi)
                        # save the label into CSV.
                        writer.writerow([img_name, angle])
                        img_counter += 1

lane_dl_xlnx.launch

XML
<launch>
  <node name="lane_dl_xlnx" pkg="lane_dl_xlnx" type="lane_dl_xlnx_node">
    <param name="cam" value="/dev/video0" />
    <param name="cmd_topic" value="ackermann_cmd" />
    <param name="model" value="$(find lane_dl_xlnx)/model/lane.xmodel" />
    <param name="speed" value="0.2" />
  </node>
</launch>

dataset.py

Python
Dataset
import os
import csv
import torch
from torchvision.io import read_image
from torch.utils.data import Dataset

class LaneDataset(Dataset):
    def __init__(self, label_path, transform=None, target_transform=None):
        self.transform = transform
        self.target_transform = target_transform
        self.labels = []
        with open(label_path, 'r', newline='') as csvfile:
            csv_reader = csv.reader(csvfile)
            for row in csv_reader:
                self.labels.append(row)

    def __len__(self):
        return len(self.labels)

    def __getitem__(self, idx):
        img_path = os.path.join(self.labels[idx][0])
        image = read_image(img_path)
        label = float(self.labels[idx][1])
        label = torch.tensor([label])
        if self.transform:
            image = self.transform(image)
        if self.target_transform:
            label = self.target_transform(label)
        return image, label

model_reg.py

Python
neural network
import torch
from torch import nn
import torch.nn.functional as F

class Net(nn.Module):
    def __init__(self):
        super().__init__()
        self.conv1 = nn.Conv2d(3, 6, 5)
        self.pool = nn.MaxPool2d(3, 3)
        self.conv2 = nn.Conv2d(6, 16, 5)
        self.fc1 = nn.Linear(16 * 24 * 69, 120)
        self.fc2 = nn.Linear(120, 84)
        self.fc3 = nn.Linear(84, 7)
        self.fc4 = nn.Linear(7, 1)

    def forward(self, x):
        #print(x.shape)
        x = self.pool(F.relu(self.conv1(x)))
        #print(x.shape)
        x = self.pool(F.relu(self.conv2(x)))
        #print(x.shape)
        x = torch.flatten(x, 1) # flatten all dimensions except batch
        #print(x.shape)
        x = F.relu(self.fc1(x))
        x = F.relu(self.fc2(x))
        x = self.fc3(x)
        x = self.fc4(x)
        return x

train.py

Python
Training script
#!/usr/bin/env python3
import torch
from torch.utils.data import DataLoader
from torch import nn
import torchvision.transforms as transforms
import torch.optim as optim
from dataset import LaneDataset
from model_reg import Net

transform = transforms.Compose([
    transforms.ConvertImageDtype(torch.float),
    transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])

train_dataset = LaneDataset('labels.csv', transform=transform)
dataloader = DataLoader(train_dataset, batch_size=500, shuffle=True, num_workers=6)

device = 'cuda' if torch.cuda.is_available() else 'cpu'
print(f'Using {device} device')

net = Net()
net.to(device)

criterion = nn.MSELoss()
optimizer = optim.Adam(net.parameters(), lr=0.0001)
scheduler = optim.lr_scheduler.ExponentialLR(optimizer, gamma=0.9)

for epoch in range(20):  # loop over the dataset multiple times

    running_loss = 0.0
    for i, data in enumerate(dataloader, 0):
        # get the inputs; data is a list of [inputs, labels]
        inputs, labels = data
        inputs = inputs.to(device)
        labels = labels.to(device)

        # zero the parameter gradients
        optimizer.zero_grad()

        # forward + backward + optimize
        outputs = net(inputs)
        loss = criterion(outputs, labels)
        loss.backward()
        optimizer.step()
        scheduler.step()

        # print statistics
        running_loss += loss.item()
        print('[%d, %d] loss: %.5f' % (epoch, i, running_loss))
        running_loss = 0.0
    torch.save(net.state_dict(), 'state_dict_rgb.pth')

print('Finished Training')

quantize_xlnx.py

Python
Quantization script
import pytorch_nndct

import torch
from torch.utils.data import DataLoader
from torch import nn
import torchvision.transforms as transforms
import torch.optim as optim
from dataset import LaneDataset
from model_reg import Net

from pytorch_nndct.apis import torch_quantizer, dump_xmodel
import sys

quant_mode = sys.argv[1]
if quant_mode == 'calib':
  batch_size = 100
elif quant_mode == 'test':
  batch_size = 1

transform = transforms.Compose([
    transforms.ConvertImageDtype(torch.float),
    transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])

test_dataset = LaneDataset('dataset.csv', transform=transform)
testloader = DataLoader(test_dataset, batch_size=batch_size)

net = Net()

net.load_state_dict(torch.load('state_dict_rgb.pth'))
#net.eval()
device = 'cuda' if torch.cuda.is_available() else 'cpu'
print(f'Using {device} device')
device = torch.device(device)

input = torch.randn([batch_size, 3, 240, 640])
quantizer = torch_quantizer(quant_mode, net, (input), device=device)
quant_model = quantizer.quant_model

quant_model.eval()
quant_model.to(device)


def evaluate(model, val_loader, loss_fn):
  correct = 0.0
  total = 0
  for data in val_loader:
      inputs, labels = data
      inputs = inputs.to(device)
      labels = labels.to(device)
      # calculate outputs by running images through the network
      outputs = model(inputs)
      total += labels.size(0)
      correct += (labels - outputs).abs().sum()
      #print("Group ",(labels - outputs))
  print(f'avg err {correct / total}')

loss_fn = nn.MSELoss().to(device)

evaluate(quant_model, testloader, loss_fn)

if quant_mode == 'calib':
  quantizer.fast_finetune(evaluate, (quant_model, testloader, loss_fn))
elif quant_mode == 'test':
  quantizer.load_ft_param()

# export config
if quant_mode == 'calib':
  quantizer.export_quant_config()
if quant_mode == 'test':
  quantizer.export_xmodel()

kv260_vitis_2021.2.patch

Diff
A patch adding Vitis 2021.2 to the xilinx provided TCL scripts.
--- a/platforms/vivado/kv260_ispMipiRx_vcu_DP/scripts/main.tcl
+++ b/platforms/vivado/kv260_ispMipiRx_vcu_DP/scripts/main.tcl
@@ -3,7 +3,7 @@
 
 set proj_name kv260_ispMipiRx_vcu_DP
 set proj_dir ./project
-set proj_board [get_board_parts "*:kv260:*" -latest_file_version]
+set proj_board [get_board_parts "*:kv260_som:*" -latest_file_version]
 set bd_tcl_dir ./scripts
 set board vision_som
 set device k26
@@ -28,7 +28,7 @@ set_property board_part $proj_board [current_project]
 
 import_files -fileset constrs_1 $xdc_list
 
-set_property board_connections {som240_1_connector xilinx.com:som240:som240_1_connector:1.0}  [current_project]
+set_property board_connections {som240_1_connector xilinx.com:kv260_carrier:som240_1_connector:1.2}  [current_project]
 
 
 set_property ip_repo_paths $ip_repo_path [current_project]

vai20-2uart.dtsi

C/C++
complete device tree
// SPDX-License-Identifier: GPL-2.0
/*
 * dts file for Xilinx KV260 smartcam
 *
 * (C) Copyright 2020 - 2021, Xilinx, Inc.
 *
 */

/dts-v1/;
/plugin/;
#include <dt-bindings/thermal/thermal.h>

&fpga_full {
	#address-cells = <2>;
	#size-cells = <2>;
	firmware-name = "vai20-2uart.bit.bin";
	resets = <&zynqmp_reset 116>, <&zynqmp_reset 117>, <&zynqmp_reset 118>, <&zynqmp_reset 119>;
};

&uart0 {
	status = "okay";
};

&zynqmp_dpsub {
	status = "okay";
};

&zynqmp_dp_snd_pcm0 {
	status = "okay";
};

&zynqmp_dp_snd_pcm1 {
	status = "okay";
};

&zynqmp_dp_snd_card0 {
	status = "okay";
};

&zynqmp_dp_snd_codec0 {
	status = "okay";
};

&amba {
	afi0: afi0 {
		compatible = "xlnx,afi-fpga";
		config-afi = <0 0>, <1 0>, <2 0>, <3 0>, <4 0>, <5 0>, <6 0>, <7 0>, <8 0>, <9 0>, <10 0>, <11 0>, <12 0>, <13 0>, <14 0x0>, <15 0x000>;
	};

	clocking0: clocking0 {
		#clock-cells = <0>;
		assigned-clock-rates = <99999001>;
		assigned-clocks = <&zynqmp_clk 71>;
		clock-output-names = "fabric_clk";
		clocks = <&zynqmp_clk 71>;
		compatible = "xlnx,fclk";
	};

	clocking1: clocking1 {
		#clock-cells = <0>;
		assigned-clock-rates = <99999001>;
		assigned-clocks = <&zynqmp_clk 72>;
		clock-output-names = "fabric_clk";
		clocks = <&zynqmp_clk 72>;
		compatible = "xlnx,fclk";
	};

	/* fpga clocks */
	misc_clk_0: misc_clk_0 {
		#clock-cells = <0x0>;
		clock-frequency = <99999000>;
		compatible = "fixed-clock";
	};

	misc_clk_1: misc_clk_1 {
		#clock-cells = <0x0>;
		clock-frequency = <199998000>;
		compatible = "fixed-clock";
	};

	misc_clk_2: misc_clk_2 {
		#clock-cells = <0x0>;
		clock-frequency = <299997000>;
		compatible = "fixed-clock";
	};

	misc_clk_5: misc_clk_5 {
		#clock-cells = <0x0>;
		clock-frequency = <49999500>;
		compatible = "fixed-clock";
	};

	misc_clk_6: misc_clk_6 {
		#clock-cells = <0x0>;
		clock-frequency = <18432019>;
		compatible = "fixed-clock";
	};

	/* ar1335 isp mipi rx pipeline */
	ap1302_clk: sensor_clk {
		#clock-cells = <0x0>;
		compatible = "fixed-clock";
		clock-frequency = <0x48000000>;
	};

	ap1302_vdd: fixedregulator@0 {
		compatible = "regulator-fixed";
		regulator-name = "ap1302_vdd";
		regulator-min-microvolt = <2800000>;
		regulator-max-microvolt = <2800000>;
		enable-active-high;
	};

	ap1302_vaa: fixedregulator@1 {
		compatible = "regulator-fixed";
		regulator-name = "ap1302_vaa";
		regulator-min-microvolt = <1800000>;
		regulator-max-microvolt = <1800000>;
	};

	ap1302_vddio: fixedregulator@2 {
		compatible = "regulator-fixed";
		regulator-name = "ap1302_vddio";
		regulator-min-microvolt = <1200000>;
		regulator-max-microvolt = <1200000>;
	};

	serial@80010000 {
		compatible = "xlnx,xps-uartlite-1.00.a";
		reg = <0 0x80010000 0 0x10000>;
		interrupt-parent = <&gic>;
		interrupts = <0 107 4>;
		clock = <100000000>;
	};

	isp_csiss: csiss@80000000 {
		compatible = "xlnx,mipi-csi2-rx-subsystem-5.0";
		reg = <0x0 0x80000000 0x0 0x10000>;
		clock-names = "lite_aclk", "dphy_clk_200M", "video_aclk";
		clocks = <&misc_clk_0>, <&misc_clk_1>, <&misc_clk_2>;
		interrupt-parent = <&gic>;
		interrupts = <0 104 4>;
		xlnx,csi-pxl-format = <0x18>;
		xlnx,axis-tdata-width = <32>;
		xlnx,max-lanes = <4>;
		xlnx,en-active-lanes;
		xlnx,vc = <4>;
		xlnx,ppc = <2>;
		xlnx,vfb;

		ports {
			#address-cells = <0x1>;
			#size-cells = <0x0>;

			port@1 {
				reg = <0x1>;
				xlnx,video-format = <0x3>;
				xlnx,video-width = <0x8>;

				isp_csiss_out: endpoint {
					remote-endpoint = <&isp_vcap_csi_in>;
				};
			};
			port@0 {
				reg = <0x0>;
				xlnx,video-format = <0x3>;
				xlnx,video-width = <0x8>;

				isp_csiss_in: endpoint {
					data-lanes = <1 2 3 4>;
					remote-endpoint = <&isp_out>;
				};
			};
		};
	};

	isp_fb_wr_csi: fb_wr@b0010000 {
		compatible = "xlnx,axi-frmbuf-wr-v2.1";
		reg = <0x0 0xb0010000 0x0 0x10000>;
		#dma-cells = <1>;
		interrupt-parent = <&gic>;
		interrupts = <0 105 4>;
		xlnx,vid-formats = "nv12";
		reset-gpios = <&gpio 78 1>;
		xlnx,dma-addr-width = <32>;
		xlnx,pixels-per-clock = <2>;
		xlnx,max-width = <3840>;
		xlnx,max-height = <2160>;
		clocks = <&misc_clk_2>;
		clock-names = "ap_clk";
	};

	isp_vcap_csi {
		compatible = "xlnx,video";
		dmas = <&isp_fb_wr_csi 0>;
		dma-names = "port0";

		ports {
			#address-cells = <1>;
			#size-cells = <0>;

			port@0 {
				reg = <0>;
				direction = "input";

				isp_vcap_csi_in: endpoint {
					remote-endpoint = <&isp_csiss_out>;
				};
			};
		};
	};

	/* vcu encode/decode */
	vcu: vcu@80100000 {
		#address-cells = <2>;
		#clock-cells = <1>;
		#size-cells = <2>;
		clock-names = "pll_ref", "aclk", "vcu_core_enc", "vcu_core_dec", "vcu_mcu_enc", "vcu_mcu_dec";
		clocks = <&misc_clk_5>, <&misc_clk_0>, <&vcu 1>, <&vcu 2>, <&vcu 3>, <&vcu 4>;
		compatible = "xlnx,vcu-1.2", "xlnx,vcu";
		interrupt-names = "vcu_host_interrupt";
		interrupt-parent = <&gic>;
		interrupts = <0 106 4>;
		ranges;
		reg = <0x0 0x80140000 0x0 0x1000>, <0x0 0x80141000 0x0 0x1000>;
		reg-names = "vcu_slcr", "logicore";
		reset-gpios = <&gpio 80 0>;
		xlnx,skip-isolation;

		al5e: al5e@80100000 {
			compatible = "al,al5e-1.2", "al,al5e";
			interrupt-parent = <&gic>;
			interrupts = <0 106 4>;
			reg = <0x0 0x80100000 0x0 0x10000>;
		};

		al5d: al5d@80120000 {
			compatible = "al,al5d-1.2", "al,al5d";
			interrupt-parent = <&gic>;
			interrupts = <0 106 4>;
			reg = <0x0 0x80120000 0x0 0x10000>;
		};
	};

	/* zocl */
	zocl: zyxclmm_drm {
		compatible = "xlnx,zocl";
		status = "okay";
		interrupt-parent = <&gic>;
		interrupts = <0 89  4>, <0 90  4>, <0 91  4>, <0 92  4>,
			     <0 93  4>, <0 94  4>, <0 95  4>, <0 96  4>;
	};
};

/*
 * This must come after ap1302_clk!
 * The ap1302 driver is buggy and can't handle probe deferring.
 */
&i2c0 {
	status = "okay";
	i2c_mux: i2c-mux@74 {
		compatible = "nxp,pca9546";
		#address-cells = <1>;
		#size-cells = <0>;
		reg = <0x74>;
		i2c@0 {
			#address-cells = <1>;
			#size-cells = <0>;
			reg = <0>;
			ap1302: isp@3c {
				compatible = "onnn,ap1302";
				reg = <0x3c>;
				#address-cells = <1>;
				#size-cells = <0>;
				reset-gpios = <&gpio 79 1>;
				clocks = <&ap1302_clk>;
				sensors {
					#address-cells = <1>;
					#size-cells = <0>;
					onnn,model = "onnn,ar1335";
					sensor@0 {
						reg = <0>;
						vdd-supply = <&ap1302_vdd>;
						vaa-supply = <&ap1302_vaa>;
						vddio-supply = <&ap1302_vddio>;
					};
				};
				ports {
					#address-cells = <1>;
					#size-cells = <0>;
					port@0 {
						reg = <2>;
						isp_out: endpoint {
							remote-endpoint = <&isp_csiss_in>;
							data-lanes = <1 2 3 4>;
						};
					};
				};
			};
		};
	};
};

lane_dl_xlnx.cpp

C/C++
The model deployment code. Save it as main.cpp.
#include <iostream>
#include <xir/graph/graph.hpp>
#include <vart/runner.hpp>
#include <vart/runner_ext.hpp>
#include <cmath>

#include <opencv2/opencv.hpp>

#include <ros/ros.h>
#include <ackermann_msgs/AckermannDriveStamped.h>

int main(int argc, char *argv[]) {
    ros::init(argc, argv, "lane_dl_xlnx");
    ros::NodeHandle n;
    ros::NodeHandle pn("~");

    std::string cmd_topic, cam_path, model_path;
    // node parameters
    pn.param<std::string>("cmd_topic", cmd_topic, "ackermann_cmd");
    pn.param<std::string>("cam", cam_path, "/dev/video0");
    pn.param<std::string>("model", model_path, "net.xmodel");
    // publisher for the speed command
    ros::Publisher speed_pub = n.advertise<ackermann_msgs::AckermannDriveStamped>(cmd_topic, 1);

    // load the compiled mode.
    auto graph = xir::Graph::deserialize(model_path);
    auto root = graph->get_root_subgraph();
    xir::Subgraph *subgraph = nullptr;
    // and get the DPU subgraph
    for (auto c: root->children_topological_sort()) {
        CHECK(c->has_attr("device"));
        if (c->get_attr<std::string>("device") == "DPU") {
            subgraph = c;
            break;
        }
    }
    if (!subgraph) {
        ROS_ERROR_STREAM("no dpu subgraph in xmodel.");
        exit(-1);
    }
    auto attrs = xir::Attrs::create();
    std::unique_ptr<vart::RunnerExt> runner = vart::RunnerExt::create_runner(subgraph, attrs.get());

    // get input & output tensor buffers
    auto input_tensor_buffers = runner->get_inputs();
    auto output_tensor_buffers = runner->get_outputs();
    ROS_ERROR_COND(input_tensor_buffers.size() != 1u, "unsupported model");
    ROS_ERROR_COND(output_tensor_buffers.size() != 1u, "unsupported model");

    // get input_scale & output_scale
    auto input_tensor = input_tensor_buffers[0]->get_tensor();
    auto input_fixpos = input_tensor->template get_attr<int>("fix_point");
    //ROS_ERROR_COND(input_fixpos != 7, "hack doesn't work. Write it properly...");
    auto input_scale = std::exp2f(1.0f * (float)input_fixpos);

    auto output_tensor = output_tensor_buffers[0]->get_tensor();
    auto output_fixpos = output_tensor->template get_attr<int>("fix_point");
    auto output_scale = std::exp2f(-1.0f * (float) output_fixpos);

    auto n_batch = input_tensor->get_shape().at(0);
    ROS_ERROR_COND(n_batch != 1, "unsupported batch size");
    auto n_height = input_tensor->get_shape().at(1);
    auto n_width = input_tensor->get_shape().at(2);
    auto n_channel = input_tensor->get_shape().at(3);

    ROS_WARN_STREAM("Model H" << n_height << " W" << n_width);

    // open the camera
    cv::VideoCapture cap;
    cap.open(cam_path.c_str());

    if (!cap.isOpened()) {
        ROS_ERROR_STREAM("cannot open " << cam_path);
        exit(-1);
    }

    cap.set(cv::CAP_PROP_FRAME_WIDTH, 640);
    cap.set(cv::CAP_PROP_FRAME_HEIGHT, 480);

    ackermann_msgs::AckermannDriveStamped pub_msg;
    double drive_speed;
    n.param<double>("speed", drive_speed, 0.3);  
    pub_msg.drive.speed = drive_speed;

    while(ros::ok()) {
        // get a frame
        cv::Mat frame;
        if (!cap.read(frame)) {
            ROS_ERROR("failed to capture a frame.");
            break;
        }
        // crop off the top half
        cv::Mat cropped_ref(frame, cv::Rect(0, 240, 640, 240));

        // preprocessing
        uint64_t data_in = 0u;
        size_t size_in = 0u;
        // set the input image and preprocessing
        std::tie(data_in, size_in) = input_tensor_buffers[0]->data(std::vector<int>{0, 0, 0, 0});
        CHECK_NE(size_in, 0u);
        auto *data_in_arr = (int8_t *) data_in;
        int parr = 0;
        for (auto row = 0; row < cropped_ref.rows; row++) {
            for (auto col = 0; col < cropped_ref.cols; col++) {
                auto pixel = cropped_ref.at<cv::Vec3b>(row, col);
                // CV image is BGR while our model input is RGB. Flip it.
                for (int pix = 2; pix >= 0; pix--) {
                    auto val = pixel[pix];
                    float val_float = ((((float)val / 255.0f) - 0.5f) / 0.5f) * input_scale;
                    val_float = std::max(std::min(val_float, 127.0f), -128.0f);
                    data_in_arr[parr++] = (int8_t) val_float;
                }
            }
        }
        // sync data for input
        for (auto &input: input_tensor_buffers) {
            input->sync_for_write(0, input->get_tensor()->get_data_size() /
                                     input->get_tensor()->get_shape()[0]);
        }
        // start the dpu
        auto v = runner->execute_async(input_tensor_buffers, output_tensor_buffers);

        // do ROS work while DPU is running
        ros::spinOnce();

        auto status = runner->wait((int) v.first, -1);
        ROS_ERROR_COND(status != 0, "failed to run dpu");
        // sync data for output
        for (auto &output: output_tensor_buffers) {
            output->sync_for_read(0, output->get_tensor()->get_data_size() /
                                     output->get_tensor()->get_shape()[0]);
        }
        uint64_t data_o = 0u;
        size_t size_o = 0u;
        std::tie(data_o, size_o) = output_tensor_buffers[0]->data(std::vector<int>{0, 0});
        // convert the int value back to float
        int8_t val = *(int8_t *) data_o;
        float val_fp = (float) val * output_scale;
        ROS_DEBUG_STREAM("Deg int :" << (int)val << " dec " << val_fp);
        // publish the speed command
        pub_msg.drive.steering_angle = val_fp;
        speed_pub.publish(pub_msg);
    }
    speed_pub.publish(ackermann_msgs::AckermannDriveStamped());
    ros::spinOnce();
    return 0;
}

CMakeLists.txt

Makefile
CMake script for the ROS package
cmake_minimum_required(VERSION 3.0.2)
project(lane_dl_xlnx)

## Compile as C++11, supported in ROS Kinetic and newer
# add_compile_options(-std=c++11)

## Find catkin macros and libraries
## if COMPONENTS list like find_package(catkin REQUIRED COMPONENTS xyz)
## is used, also find other catkin packages
find_package(catkin REQUIRED COMPONENTS
  roscpp
  ackermann_msgs
  sensor_msgs
)

## System dependencies are found with CMake's conventions
# find_package(Boost REQUIRED COMPONENTS system)

find_package(OpenCV REQUIRED)
find_package(xir REQUIRED)
find_package(unilog REQUIRED)
find_package(vart REQUIRED COMPONENTS util runner dpu-controller xrt-device-handle buffer-object runner-assistant)

catkin_package()

## Specify additional locations of header files
include_directories(
  ${catkin_INCLUDE_DIRS}
  ${OpenCV_INCLUDE_DIRS}
  ${xir_INCLUDE_DIRS}
  ${vart_INCLUDE_DIRS}
)

## Declare a C++ executable
add_executable(${PROJECT_NAME}_node src/main.cpp)

## Add cmake target dependencies of the executable
## same as for the library above
add_dependencies(${PROJECT_NAME}_node ${${PROJECT_NAME}_EXPORTED_TARGETS} ${catkin_EXPORTED_TARGETS})

## Specify libraries to link a library or executable target against
target_link_libraries(${PROJECT_NAME}_node
  ${catkin_LIBRARIES} ${OpenCV_LIBS} vart::runner
)

## Mark executables for installation
install(TARGETS ${PROJECT_NAME}_node
  RUNTIME DESTINATION ${CATKIN_PACKAGE_BIN_DESTINATION}
)

Credits

Chuanhong Guo

Chuanhong Guo

1 project • 0 followers
Yankui Wang

Yankui Wang

0 projects • 0 followers

Comments