We would like to express our sincere gratitude to Chinmay Amrutkar for his valuable contributions and for making his work publicly available on GitHub: https://github.com/ChinmayAmrutkar. We also wish to acknowledge the Robotics Lab at Arizona State University for their contribution to the development of this project.
ProductThe flexible cobot myCobot Pro 630 utilizes the bigger working space and perfectly integrates into the production environment, and the anti-collision detection function based on the precise dynamic model enables it to work well with human.
In this project, we will control the robotic arm myCobot Pro 630 with digital twin technology, utilizing computer vision for maze navigation. The methods of image capturing, image processing and maze path solving will be used. The solution will be converted into a trajectory that the robotic arm can recognize, enabling automated navigation with the collaborative arm myCobot Pro 630.
Firstly, use 'cv2.VideoCapture' function to capture real-time images from the camera. The user needs to press the 'c' key to capture the image of the maze, and then manually select the area that needs to be cropped.
def capture_and_crop():
# Open the camera
cap = cv2.VideoCapture(0)
if not cap.isOpened():
raise Exception("Camera not accessible!")
print("Adjust the camera and press 'c' to capture the image of the maze.")
while True:
ret, frame = cap.read()
if not ret:
continue
cv2.imshow("Camera Feed", frame)
key = cv2.waitKey(1) & 0xFF
if key == ord('c'):
captured_image = frame
break
cap.release()
cv2.destroyAllWindows()
# captured_image = cv2.imread('captured.jpeg')
print("Draw a rectangle to crop the maze.")
r = cv2.selectROI("Select Maze Region", captured_image)
cropped_maze = captured_image[int(r[1]):int(r[1]+r[3]), int(r[0]):int(r[0]+r[2])]
cv2.destroyAllWindows()
# Save the image
output_path = 'cropped.png'
cv2.imwrite(output_path, cropped_maze)
print(f"Maze solution image saved to {output_path}")
return cropped_maze
Using algorithms of computer vision such as threshold and contour detection in OpenCV, we can get the key geometric features of the maze image. The maze image will be processed into a binary image, and effective paths are extracted through erosion and dilation operations. Finally, the maze solution is obtained, marked with green highlights.
def solve_maze(cropped_img):
orgimg = cv2.imread(cropped_img,0)
# orgimg = cropped_img
img = orgimg.copy()
ret, binary_image = cv2.threshold(img, 0, 255, cv2.THRESH_BINARY_INV + cv2.THRESH_OTSU)
contours , hierarchy = cv2.findContours(binary_image, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
path = np.zeros(binary_image.shape, np.uint8)
cv2.drawContours(path, contours, 0, (255,255,255), 10)
#WHITE SPACE EXPANDS
kernel = np.ones((10,10),np.uint8)
dilated = cv2.dilate(path, kernel, iterations=11)
eroded = cv2.erode(dilated, kernel, iterations = 1)
diff = cv2.absdiff(eroded, dilated)
img = cv2.cvtColor(img,cv2.COLOR_GRAY2RGB)
img[diff==255] = (0,255,0)
maze_solution_green = img
# Save the image
output_path = 'maze_solution_green.png'
cv2.imwrite(output_path, maze_solution_green)
print(f"Maze solution image saved to {output_path}")
3. Extract WaypointsTo ensure smooth movement of the robotic arm, we extract path points from the maze solution and limit the maximum number of points based on the maze's dimensions. This ensures the path points remain manageable and not excessively numerous.
def extract_limited_waypoints(img, maze_dimensions, max_waypoints):
green_mask = (img[:, :, 0] == 0) & (img[:, :, 1] == 255) & (img[:, :, 2] == 0)
# Get pixel coordinates of green path
path_pixels = np.column_stack(np.where(green_mask))
# Normalize to the maze dimensions (assuming top-left (0,0) and bottom-right (16,16))
maze_height, maze_width = img.shape[:2]
normalized_waypoints = [
(
(x / maze_width) * maze_dimensions[0],
(y / maze_height) * maze_dimensions[1]
)
for y, x in path_pixels
]
# Restrict the waypoints to no more than max_waypoints
if len(normalized_waypoints) > max_waypoints:
step = len(normalized_waypoints) // max_waypoints
limited_waypoints = normalized_waypoints[::step][:max_waypoints]
else:
limited_waypoints = normalized_waypoints
limited_waypoints_last = limited_waypoints[-1]
print(limited_waypoints_last)
limited_waypoints_1 = limited_waypoints + [(limited_waypoints_last[0],maze_dimensions[1])]
return limited_waypoints_1
4. Verify the Waypointsdef display_waypoints_on_image(img, waypoints, maze_dimensions, point_color=(0, 0, 255), point_radius=5):
"""
Displays waypoints on the given image using the provided displayImg function.
- img: The image on which waypoints are to be displayed.
- waypoints: List of waypoints (x, y) in normalized coordinates.
- point_color: Color of the waypoints (default: red).
- point_radius: Radius of each waypoint point.
"""
img_copy = img.copy()
# Convert normalized waypoints to pixel coordinates
height, width = img.shape[:2]
for x, y in waypoints:
pixel_x = int((x / maze_dimensions[0]) * width)
pixel_y = int((y / maze_dimensions[1]) * height)
# Draw circle at each waypoint
cv2.circle(img_copy, (pixel_x, pixel_y), point_radius, point_color, -1)
# Use your displayImg function to show the image
displayImg([img_copy], ["Waypoints on Image"])
The maze waypoints are mapped to the robotic arm's coordinate system using linear interpolation, allowing cobot myCobot Pro 630 to move according to the calculated trajectory.
def map_coordinates_to_robot(waypoints, corners, maze_dimensions):
top_left, top_right, bottom_left, bottom_right = corners
maze_width, maze_height = maze_dimensions
def interpolate(value, src_min, src_max, dst_min, dst_max):
return dst_min + ((value - src_min) / (src_max - src_min)) * (dst_max - dst_min)
robot_trajectory = []
for point in waypoints:
x_maze, y_maze = point
x_robot = interpolate(x_maze, 0, maze_width - 1, top_left[0], top_right[0])
y_robot = interpolate(y_maze, 0, maze_height - 1, top_left[1], bottom_left[1])
robot_trajectory.append((x_robot, y_robot, 230))
return robot_trajectory
6. Save the Trajectory to CSVThe calculated robot trajectory is saved to a CSV file for later import and execution.
import csv
def save_trajectory_to_csv(robot_trajectory, filename='robot_trajectory.csv'):
with open(filename, 'w', newline='') as csvfile:
writer = csv.writer(csvfile)
for point in robot_trajectory:
writer.writerow(point)
print(f"save to {filename}")
7. Run the Trajectory in MatlabWe can use Matlab for simulation to verify whether the obtained robotic arm pose is correct and feasible, and then drive the robotic arm to perform the movement.
% Import robot model
robot = importrobot("Cobot\myCobot_Pro_600_2.urdf");
robot.DataFormat = 'row';
% show(robot);
% % Run Python script and get coordinates
% robot_coords = pyrunfile("Code.py", "Return_List");
% % Convert Python numpy array to MATLAB array
% robot_coords = double(robot_coords);
robot_coords = readmatrix('robot_trajectory.csv');
disp(robot_coords);
% Get number of waypoints
num_waypoints = size(robot_coords, 1);
% Initialize joint angles array
joint_angles = zeros(num_waypoints, 6); % Assuming 6 joints
digital_joint_angles = zeros(num_waypoints, 6); % Assuming 6 joints
% Setup inverse kinematics solver
ik = inverseKinematics('RigidBodyTree', robot);
weights = [0.25, 0.25, 0.25, 1, 1, 1];
initialguess = robot.homeConfiguration;
% Process each waypoint
disp("Solving Inverse Kinematics for each waypoint:");
for i = 1:num_waypoints
% Get current waypoint (convert from mm to m if necessary)
waypoint = robot_coords(i, :) / 1000; % Convert mm to m if needed
% Create transformation matrix
endEffectorPose = trvec2tform(waypoint);
% Solve IK
[configsol, solInfo] = ik('Link_6', endEffectorPose, weights, initialguess);
if strcmp(solInfo.Status, 'success')
% Convert to degrees and apply adjustments
joint_angles_degrees = rad2deg(configsol);
digital_joint_angles_deg = rad2deg(configsol);
joint_angles_degrees(1) = joint_angles_degrees(1) + 180;
joint_angles_degrees(2) = joint_angles_degrees(2) - 90;
joint_angles_degrees(4) = joint_angles_degrees(4) - 90;
joint_angles_degrees(5) = joint_angles_degrees(5) * (-1);
% Store result
joint_angles(i, :) = joint_angles_degrees;
% Update initial guess for next iteration
initialguess = configsol;
else
warning('No solution found for waypoint %d', i);
joint_angles(i, :) = nan(1, 6);
end
end
% Save joint angles
csvwrite('joint_angles.csv', joint_angles);
% Display joint angles
disp('Joint angles for each waypoint (degrees):');
disp(joint_angles);
% Execute additional Python script
% pyrunfile("TCP_Code.py");
This case demonstrates how to achieve maze navigation using the robot arm myCobot Pro 630 and digital twin technology. Further optimizations can be made based on specific needs, such as adding actual robot control and path execution. We're excited to see more users and makers using the myCobot series robots to create innovative projects and participate in our cases collection activity: https://www.elephantrobotics.com/en/call-for-user-cases-en/.
Comments