Elephant Robotics
Published

Object Tracking on myCobot 280 Jetson Nano: A Case Study

The article delves into the development of a fascinating project that involves object detection, tracking, and control of a mechanical arm u

IntermediateProtip2 hours1,656

Things used in this project

Hardware components

NVIDIA Jetson Nano Developer Kit
NVIDIA Jetson Nano Developer Kit
×1
myCobot-6 DOF collaborative robot
Elephant Robotics myCobot-6 DOF collaborative robot
×1

Software apps and online services

OpenCV
OpenCV

Hand tools and fabrication machines

myCobot 280 Jetson Nano
Elephant Robotics myCobot 280 Jetson Nano
Camera Flange
Elephant Robotics Camera Flange

Story

Read more

Code

eye to hand

Python
choose right port on your robotic arm
import cv2
import numpy as np
from pymycobot.mycobot import MyCobot
import time
import platform
mc = MyCobot('COM3',115200)

def model_track(camera):
    model_pos = np.array([-camera[0], -camera[2], -camera[1]])
    camera_pos = np.array([-37.5, 416.6, 322.9])
    target_pos = model_pos + camera_pos
    # print("model_pos", model_pos)
    # print("target_pos", target_pos)
    return target_pos


def calculate_similarity(camera):
    n = camera.shape[0]
    total_similarity = 0
    for i in range(n):
        for j in range(i+1, n):
            vector_a = camera[i]
            vector_b = camera[j]
            dot_product = np.dot(vector_a, vector_b)
            norm_a = np.linalg.norm(vector_a)
            norm_b = np.linalg.norm(vector_b)
            similarity = dot_product / (norm_a * norm_b)
            total_similarity += similarity
    return total_similarity/n

def similarity_change_rate(new_similarity):
    global prev_similarity
    if prev_similarity is None:
        prev_similarity = new_similarity
        return 0
    else:
        change_rate = (new_similarity - prev_similarity) / prev_similarity
        prev_similarity = new_similarity
        return change_rate


def CvtRotationMatrixToEulerAngle(pdtRotationMatrix):
    pdtEulerAngle = np.zeros(3)

    pdtEulerAngle[2] = np.arctan2(pdtRotationMatrix[1, 0], pdtRotationMatrix[0, 0])

    fCosRoll = np.cos(pdtEulerAngle[2])
    fSinRoll = np.sin(pdtEulerAngle[2])

    pdtEulerAngle[1] = np.arctan2(-pdtRotationMatrix[2, 0], (fCosRoll * pdtRotationMatrix[0, 0]) + (fSinRoll * pdtRotationMatrix[1, 0]))
    pdtEulerAngle[0] = np.arctan2((fSinRoll * pdtRotationMatrix[0, 2]) - (fCosRoll * pdtRotationMatrix[1, 2]), (-fSinRoll * pdtRotationMatrix[0, 1]) + (fCosRoll * pdtRotationMatrix[1, 1]))

    return pdtEulerAngle

def CvtEulerAngleToRotationMatrix(ptrEulerAngle):
    ptrSinAngle = np.sin(ptrEulerAngle)
    ptrCosAngle = np.cos(ptrEulerAngle)

    ptrRotationMatrix = np.zeros((3, 3))
    ptrRotationMatrix[0, 0] = ptrCosAngle[2] * ptrCosAngle[1]
    ptrRotationMatrix[0, 1] = ptrCosAngle[2] * ptrSinAngle[1] * ptrSinAngle[0] - ptrSinAngle[2] * ptrCosAngle[0]
    ptrRotationMatrix[0, 2] = ptrCosAngle[2] * ptrSinAngle[1] * ptrCosAngle[0] + ptrSinAngle[2] * ptrSinAngle[0]
    ptrRotationMatrix[1, 0] = ptrSinAngle[2] * ptrCosAngle[1]
    ptrRotationMatrix[1, 1] = ptrSinAngle[2] * ptrSinAngle[1] * ptrSinAngle[0] + ptrCosAngle[2] * ptrCosAngle[0]
    ptrRotationMatrix[1, 2] = ptrSinAngle[2] * ptrSinAngle[1] * ptrCosAngle[0] - ptrCosAngle[2] * ptrSinAngle[0]
    ptrRotationMatrix[2, 0] = -ptrSinAngle[1]
    ptrRotationMatrix[2, 1] = ptrCosAngle[1] * ptrSinAngle[0]
    ptrRotationMatrix[2, 2] = ptrCosAngle[1] * ptrCosAngle[0]

    return ptrRotationMatrix

def Visual_tracking(coord, camera):
    pose_camera = camera[:3]
    angle_camear = camera[3:]
    r = CvtEulerAngleToRotationMatrix(angle_camear)
    euler = coord[3:]
    R = CvtEulerAngleToRotationMatrix(euler)
    offset = np.array([0, -33.9, -138])
    Roff = np.array([[1, 0, 0],
                     [0, -1, 0],
                     [0, 0, -1]])
    vector = pose_camera + offset
    pos = coord[:3] + np.dot(np.dot(R, r), Roff).dot(vector)
    angle = np.array(CvtRotationMatrixToEulerAngle(np.dot(np.dot(R, r), Roff))) * 180/np.pi
    target = np.concatenate((pos, angle))
    return target

def Visual_tracking280(coord, camera):
    pose_camera = camera[:3]
    angle_camear = camera[3:]
    r = CvtEulerAngleToRotationMatrix(angle_camear)
    # r = np.array([[1, 0, 0],
    #                  [0, 1, 0],
    #                  [0, 0, 1]])
    euler = np.radians(coord[3:])
    R = CvtEulerAngleToRotationMatrix(euler)
    offset = np.array([0, 0, -250])
    Roff = np.array([[1, 0, 0],
                     [0, -1, 0],
                     [0, 0, -1]])
    # Roff = np.array([[1, 0, 0],
    #                  [0, 1, 0],
    #                  [0, 0, 1]])
    vector = pose_camera + offset
    # print("R", R)
    # print("r", r)

    move_pos = np.dot(np.dot(R, r), Roff).dot(vector)
    pos = coord[:3] + move_pos
    # angle = np.array(CvtRotationMatrixToEulerAngle(np.dot(np.dot(R, r), Roff))) * 180/np.pi
    angle =  coord[3:]
    target = np.concatenate((pos, angle))
    return target

class Detect_marker():
    def __init__(self):

        # Creating a Camera Objectt
        if platform.system() == "Windows":
            cap_num = 0
            self.cap = cv2.VideoCapture(1)
            self.cap.set(3, 640)
            self.cap.set(4, 640)
        elif platform.system() == "Linux":
            cap_num = 0
            self.cap = cv2.VideoCapture(cap_num)
            self.cap.set(3, 640)
            self.cap.set(4, 640)
        # self.cap = cv2.VideoCapture('detect.mp4')

        # choose place to set cube
        self.color = 0

        # Get ArUco marker dict that can be detected.
        self.aruco_dict = cv2.aruco.Dictionary_get(cv2.aruco.DICT_6X6_250)


        # Get ArUco marker params.
        self.aruco_params = cv2.aruco.DetectorParameters_create()
        # 摄像头的内参矩阵
        self.camera_matrix = np.array([
            [781.33379113, 0., 347.53500524],
            [0., 783.79074192, 246.67627253],
            [0., 0., 1.]])

        # 摄像头的畸变系数
        self.dist_coeffs = np.array(([[3.41360787e-01, -2.52114260e+00, -1.28012469e-03, 6.70503562e-03,
                                       2.57018000e+00]]))

        # self.robot.init_robot()




    def show_video_v2(self):
        # self.robot.init_robot()
        xyz = np.array([0,0,0])
        LIST = []
        num_count = 0
        list_len = 5
        # cmax = [180, 80, 240]
        # cmin = [130, -80, 200]
        cmax = [150, -150, 300]
        cmin = [-150, -250, 200]

        while cv2.waitKey(1) < 0:
            success, img = self.cap.read()
            if not success:
                print("It seems that the image cannot be acquired correctly.")
                break
            # transfrom the img to model of gray
            gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
            # Detect ArUco marker.
            corners, ids, rejectImaPoint = cv2.aruco.detectMarkers(
                gray, self.aruco_dict, parameters=self.aruco_params
            )

            if len(corners) > 0:
                if ids is not None:
                    # get informations of aruco
                    ret = cv2.aruco.estimatePoseSingleMarkers(
                        # '''https://stackoverflow.com/questions/53303730/what-is-the-value-for-markerlength-in-aruco-estimateposesinglemarkers'''
                        corners, 0.025, self.camera_matrix, self.dist_coeffs
                    )
                    # rvec:rotation offset,tvec:translation deviator
                    (rvec, tvec) = (ret[0], ret[1])
                    
                    (rvec - tvec).any()
                    xyz = tvec[0, 0, :] * 1000
                    rpy = rvec[0,0,:]

                    camera = np.array([xyz[0], xyz[1], xyz[2]])

                    if num_count > list_len:
                        target = model_track(camera)
                        print("target", target)

                        for i in range(3):
                            if target[i] > cmax[i]:
                                target[i] = cmax[i]
                            if target[i] < cmin[i]:
                                target[i] = cmin[i]

                        pose = np.array([-103, 8.9, -164])
                        coord = np.concatenate((target.copy(), pose), axis=0)

                        # q1 = math.atan(xyz[0] / xyz[2])*180/np.pi
                        mc.send_coords(coord,50,0)
                        
                        
                        # print('target', coord)
                        num_count = 1;
                    else:
                        num_count = num_count + 1
                    

                    for i in range(rvec.shape[0]):
                        # draw the aruco on img
                        cv2.aruco.drawDetectedMarkers(img, corners)
            cv2.imshow("show_video", img)

        return  xyz[0],xyz[1]

if __name__ == "__main__":
    if mc.get_fresh_mode() == 0:
        mc.set_free_mode(1)
    time.sleep(1)
    mc.set_tool_reference([0, 0, 130, 0, 0, 0])
    mc.set_end_type(1)
    # angles = mc.get_angles()
    # while len(angles) == 0:
    #     angles = mc.get_angles()
    # print("angles", angles)
    # exit()


    prev_similarity = None
    time.sleep(1)
    mc.send_angles([-77.78, 18.89, -103.62, 70.48, 5.53, 7.73], 50)
    time.sleep(3)

    detect = Detect_marker()
    mc.set_joint_max
    print('开始线程')
    detect.show_video_v2()

Credits

Elephant Robotics

Elephant Robotics

72 projects • 184 followers
An Innovative Robotics Company.

Comments