jsol
Created July 31, 2024

Robotic Third Hand

3DoF robot system with object detection capability to identify and intercept moving objects

18
Robotic Third Hand

Things used in this project

Hand tools and fabrication machines

Soldering iron (generic)
Soldering iron (generic)
Drill / Driver, Cordless
Drill / Driver, Cordless

Story

Read more

Code

inference_caps_resnetv2.py

Python
# -*- coding: utf-8 -*-
"""inference_caps_resnetv2.ipynb

Automatically generated by Colab.


# Inference Learning - Part A  

[tensorflow - transfer learning](https://www.tensorflow.org/tutorials/images/transfer_learning)

```
# This is formatted as code
```
"""

import matplotlib.pyplot as plt
import numpy as np
import os
import tensorflow as tf

import zipfile
from google.colab import output
from google.colab import drive
# import os
import cv2 as cv
# import numpy as np
from sklearn.model_selection import train_test_split
print(tf.__version__)

# GET DATASET

# DataSetUrl = 'https://storage.googleapis.com/mledu-datasets/cats_and_dogs_filtered.zip'
# PathToZip = tf.keras.utils.get_file('cats_and_dogs.zip', origin = DataSetUrl, extract = True)
# DataSetPath = os.path.join(os.path.dirname(PathToZip), 'cats_and_dogs_filtered')
DataSetPath = "/content/drive/MyDrive/datasets_caps1/"
# TrainDir = os.path.join(DataSetPath, 'train')
# ValDir = os.path.join(DataSetPath, 'validation')

BatchSize = 32
ImgSize = (160, 160)

TrainDS, ValDS = tf.keras.utils.image_dataset_from_directory(DataSetPath,
                                                           shuffle = True,
                                                          seed = 1432,
                                                          validation_split = 0.2,
                                                          subset = "both",
                                                           batch_size = BatchSize,
                                                           image_size = ImgSize)


print("Files are in : {}".format(DataSetPath))

ClassNames = TrainDS.class_names
plt.figure(figsize = (10, 10))
for images, labels in TrainDS.take(1):
  for i in range(9):
    ax = plt.subplot(3, 3, i + 1)
    numpyimage = images[i].numpy().astype("uint8")
    print("Image ndim={}; shape={}; size={}; type={}".format(numpyimage.ndim, numpyimage.shape, numpyimage.size, numpyimage.dtype))

    plt.imshow(numpyimage)
    plt.title(ClassNames[labels[i]])
    plt.axis("off")

ValBatches = tf.data.experimental.cardinality(ValDS)
TestDS = ValDS.take(ValBatches // 5)
ValDS = ValDS.skip(ValBatches // 5)

# Buffer data without loading slowly from disk...

# https://www.tensorflow.org/guide/data_performance


AUTOTUNE = tf.data.AUTOTUNE

TrainDS = TrainDS.prefetch(buffer_size = AUTOTUNE)
ValDS = ValDS.prefetch(buffer_size = AUTOTUNE)
TestDS = TestDS.prefetch(buffer_size = AUTOTUNE)

# Randomly flip and rotate pictures

DataAug = tf.keras.Sequential([
    tf.keras.layers.RandomFlip('horizontal'),
    tf.keras.layers.RandomRotation(0.2)
])

### testing the stuff

for image, _ in TrainDS.take(1):
  plt.figure(figsize=(10, 10))
  first_image = image[0]
  for i in range(9):
    ax = plt.subplot(3, 3, i + 1)
    augmented_image = DataAug(tf.expand_dims(first_image, 0))
    plt.imshow(augmented_image[0] / 255)
    plt.axis('off')

# Pictures are in 8-bit = 0 - 255. Imagenet needs pics from -1 to 1!

PreprocessInput = tf.keras.applications.mobilenet_v2.preprocess_input
Rescale = tf.keras.layers.Rescaling(1./127.5, offset = -1)

# Create imagenet without last (top = classification) layer to add new layers for inference/feature extraction

ImgShape = ImgSize  + (3,)

BaseModel = tf.keras.applications.MobileNetV2(input_shape = ImgShape,
                                              include_top = False,
                                              weights = 'imagenet') # Training model with about 1.4 Millin fotos and 1000 classes...

# This feature extractor converts each 160x160x3 image into a 5x5x1280 block of features???

ImageBatch, LabelBatch = next(iter(TrainDS))
FeatureBatch = BaseModel(ImageBatch)
print(FeatureBatch.shape)
print(LabelBatch)

# Freeze all MobileNetV2 layers convolution base (complete model, add layers on bottom)
# NOTE : DO not train tf.keras.layers.BatchNormalization...
BaseModel.trainable = False

BaseModel.summary()

# GAP converts 2D images with 5x5 feature to one single 1280 vector?
GAPLayer = tf.keras.layers.GlobalAveragePooling2D()
FeatureBatchAvg = GAPLayer(FeatureBatch)
print(FeatureBatchAvg.shape)

# Convert 1280 vector to one signle float value
PredLayer = tf.keras.layers.Dense(1, activation = 'sigmoid')
PredBatch = PredLayer(FeatureBatchAvg)
print(PredBatch.shape)

# Glue together the new model with new layers...

Inputs = tf.keras.Input(shape = (160, 160, 3))
X = DataAug(Inputs)
X = PreprocessInput(X)
X = BaseModel(X, training=False) # Pretrained MobileNet Layer
X = GAPLayer(X)
X = tf.keras.layers.Dropout(0.2)(X)
Outputs = PredLayer(X)
Model = tf.keras.Model(Inputs, Outputs)

Model.summary()

len(Model.trainable_variables)



# Compile the model before training it ...

# Generate and save the infered model
BaseLearnRate = 0.0001

BaseModel.compile(
    optimizer = tf.keras.optimizers.Adam(
      learning_rate = BaseLearnRate),
    loss = tf.keras.losses.BinaryCrossentropy(),
    metrics=[tf.keras.metrics.BinaryAccuracy(
        threshold=0.5,
        name = 'accuracy')]
    )
BaseModel.summary()
FilePath = "/root/home/models/model_imagenet_mobilenetv2.h5"
Model.save(FilePath)

# Compile the model before training it ...

# Generate and save the infered model
BaseLearnRate = 0.0001

Model.compile(
    optimizer = tf.keras.optimizers.Adam(
      learning_rate = BaseLearnRate),
    loss = tf.keras.losses.BinaryCrossentropy(),
    metrics=[tf.keras.metrics.BinaryAccuracy(
        threshold=0.5,
        name = 'accuracy')]
    )
Model.summary()

# Initial Accuracy of our model (very bad, not trained yet...)

Loss0, Accuracy0 = Model.evaluate(ValDS)
print("Initial loss : {:.2f}".format(Loss0))
print("Initial Accuracy : {:.2f}".format(Accuracy0))

# Training the model (about 10 min)

InitialEpochs = 10
history = Model.fit(TrainDS, epochs = InitialEpochs, validation_data = ValDS)

# Training evaluation

# Note: If you are wondering why the validation metrics are clearly better than the training metrics, the main factor is because layers like tf.keras.layers.BatchNormalization and tf.keras.layers.Dropout affect accuracy during training. They are turned off when calculating validation loss.

acc = history.history['accuracy']
val_acc = history.history['val_accuracy']

loss = history.history['loss']
val_loss = history.history['val_loss']

plt.figure(figsize=(8, 8))
plt.subplot(2, 1, 1)
plt.plot(acc, label='Training Accuracy')
plt.plot(val_acc, label='Validation Accuracy')
plt.legend(loc='lower right')
plt.ylabel('Accuracy')
plt.ylim([min(plt.ylim()),1])
plt.title('Training and Validation Accuracy')

plt.subplot(2, 1, 2)
plt.plot(loss, label='Training Loss')
plt.plot(val_loss, label='Validation Loss')
plt.legend(loc='upper right')
plt.ylabel('Cross Entropy')
plt.ylim([0,1.0])
plt.title('Training and Validation Loss')
plt.xlabel('epoch')
plt.show()

FilePath = "/root/home/models/model_caps_imagenet_mobilenetv2.h5"
Model.save(FilePath)

LoadedModel = tf.keras.models.load_model(FilePath)
LoadedModel.summary()

# Get a number of images.
for image, _ in TrainDS.take(1):
  plt.figure(figsize=(10, 10))
  first_image = image[0]
  for i in range(9):
    ax = plt.subplot(3, 3, i + 1)
    augmented_image = DataAug(tf.expand_dims(first_image, 0))
    plt.imshow(augmented_image[0] / 255)
    plt.axis('off')

### Testing the Trained/Untrained model.
# NOTE : If you already have the Models below, do not retrain everything above...
# NOTE : The untrained model should result in values around 0.5...


OrigModelPath = "/root/home/models/model_imagenet_mobilenetv2.h5"
CapsModelPath = "/root/home/models/model_caps_imagenet_mobilenetv2.h5"
TestTrainedModel = True

if TestTrainedModel == True :
  CheckModel = tf.keras.models.load_model(CapsModelPath)

else :
  CheckModel = tf.keras.models.load_model(OrigModelPath)

iter = 0

for images, labels in TrainDS.take(1):
  print(labels)
  image = images[0]
  print("Iteration={}; Image ndim={}; shap={}; size={}; len={}; dtype={}".format(iter, image.ndim, image.shape, 999, 999, image.dtype))
  iter = iter + 1
  plt.figure(figsize=(10, 10))

  for i in range(16):
    image = images[i]
    img = image[np.newaxis, :, :]
    Prediction = CheckModel.predict(img)

    ax = plt.subplot(4, 4, i+1)
    plt.imshow(image / 255)
    plt.axis('off')
    plt.title("Pred: {}".format(Prediction))
  plt.show()

quantize_model.py

Python
import tempfile
import os

from PIL import Image

import tensorflow as tf
print("Tensorflow version: {}".format(tf.__version__))
import numpy as np
import datetime

from tensorflow import keras


from tensorflow_model_optimization.quantization.keras import vitis_quantize

# from keras.preprocessing.image import load_img
# from keras.preprocessing.image import img_to_array
# from keras.preprocessing.image import array_to_img

DataSetPath = "/workspace/mymodel/tiny-imagenet-200/test2/images/"
ImgSize = (160, 160)
BatchSize = 100

print("Number of files in path={} := {}".format(DataSetPath, len(os.listdir(DataSetPath))))


#ImgArr = np.empty(( 160, 160, 3), np.int8)
ImgArr = []

if False : 

    TrainDS, ValDS = tf.keras.utils.image_dataset_from_directory(
        DataSetPath, 
        shuffle = True,
        seed = 1432,
        validation_split  = 0.2,
        subset='both',
        batch_size= BatchSize,
        image_size = ImgSize)

else :

    for path in os.listdir(DataSetPath):
        ImagePath = os.path.join(DataSetPath ,path)
        # Convert image to nparray: asarray
        Img = Image.open(ImagePath)
        Img = Img.resize((160, 160))
        print("Image path={}; format={}; size={}; mode={}".format(ImagePath, Img.format, Img.size, Img.mode))
        if Img.mode == "RGB" : 
            ImgNp = np.asarray(Img)
            # np.append(ImgArr, ImgNp, axis = 0)
            ImgArr.append(ImgNp)
            # print("Add image as np array with shape={}; Array Shape={}".format(ImgNp.shape, ImgArr.shape))
            print("Add image as np array with shape={};".format(ImgNp.shape))
        else:
            print("WARN: Could not append image with mode != RGB!")
        
        # im = np.array(Image.open(path))

ImgArr = np.array(ImgArr, dtype='int8')

Img.show()

# exit(0)

# CalibDS = TrainDS.take(1)
print("ImgArr[0] ndim={}; shape={}; size={}; len={}".format(ImgArr[0].ndim, ImgArr[0].shape, ImgArr[0].size, len(ImgArr)))

# exit(0)



kwargs = {}

model = tf.keras.models.load_model('mobilenetv2_float.h5')
quantizer = vitis_quantize.VitisQuantizer(model)


# model.save('./mobilenetv2_int8.h5', overwrite=True, **kwargs)
# exit(0)

# For dataset and complete model example see here : https://github.com/Xilinx/Vitis-AI/blob/v3.5/src/vai_quantizer/vai_q_tensorflow2.x/tensorflow_model_optimization/python/examples/quantization/keras/vitis/mnist_cnn_ptq.py



quantized_model = quantizer.quantize_model(calib_dataset=ImgArr, 
                                           calib_steps=100, 
                                           calib_batch_size=10,
                                           **kwargs)

quantized_model.save('./mobilenetv2_int8.h5', overwrite=True, **kwargs)

velpos_object_detect.py

Python
#!/usr/bin/env python3
import sys
import numpy as np
import cv2

# Adapt this for object

hue = 85
FrameTimeMS = 100
draw_min_area = 30000
draw_max_area = 80000

# Adapt this for camera

FrameHalfMaxX = 1280/2
FrameHalfMaxY = 720/2

ObjectParamsOld = { "valid" : False, "x" : 0.0,  "y" : 0.0 }

def read_rgb_image(image_name, show= False):
    rgb_image = cv2.imread(image_name)
    if show: 
        cv2.imshow("RGB Image", rgb_image)
    return rgb_image

def filter_color(rgb_image, lower_bound_color, upper_bound_color, show = False):
    hsv_image = cv2.cvtColor(rgb_image, cv2.COLOR_BGR2HSV)
    if show:
        cv2.imshow("hsv image", hsv_image)

    mask_binimg = cv2.inRange(hsv_image, lower_bound_color, upper_bound_color)

    if show: 
        cv2.imshow("mask", mask_binimg)

    return mask_binimg

def get_contours(binary_image):
    contours, hierarchy = cv2.findContours(binary_image.copy(), 
            cv2.RETR_EXTERNAL, 
            cv2.CHAIN_APPROX_SIMPLE)
    return contours, hierarchy

def get_contour_center(contour):
    M = cv2.moments(contour)
    cx=-1
    cy=-1
    if (M['m00']!=0):
        cx = int(M['m10']/M['m00'])
        cy = int(M['m01']/M['m00'])
    return cx, cy

def draw_ball_contour(binary_image, rgb_image, contours, MinArea = 100, MaxArea = 10000,  show=False):

    ObjectParams = {"valid" : False, "centered_x" : 0.0, "centerd_y": 0.0}

    black_image = np.zeros([binary_image.shape[0], binary_image.shape[1], 3], 'uint8')
    ContourParamsList = []
    ContourParams = {}
    biggestArea = 0
    for c in contours:
        contour_area = cv2.contourArea(c)

        ContourParam = {'contour': c, 'area': contour_area}

        if contour_area > biggestArea : 
            ContourParamsList.insert(0, ContourParam)
            biggestArea = contour_area
        else:
            ContourParamsList.append(ContourParam)
        # if (contour_area > area):

    # ObjectParamsNew = { "x" : 0.0,  "y" : 0.0 }



    for CP in ContourParamsList[0:1]:
        
        area = CP["area"]
        c = CP["contour"]

        area = CP['area']
        if area <= MinArea or area >= MaxArea : 
            break

        perimeter = cv2.arcLength(c, True)
        ((x, y), radius) = cv2.minEnclosingCircle(c)
        cx, cy = get_contour_center(c)
        cv2.drawContours(rgb_image, [c], -1, (150, 250, 150), 1)
        # cv2.drawContours(black_image, [c], -1, (150, 250, 150), 1)

        cv2.circle(rgb_image, (cx, cy), (int)(radius), (0, 0, 255), 1)
        # cv2.circle(black_image, (cx, cy), (int)(radius), (0, 0, 255), 1)
        # cv2.circle(black_image, (cx, cy), 5, (150, 150, 255), -1)
        # print("Area: {};\n\t Contour-Pos:[{:0.0f},\t{:0.0f}]\n\tCircle-Pos:[{:0.0f},\t{:0.0f}]; Circle-radius:{:0.0f}".format(area, cx, cy, x, y, radius))
        CenteredPosX = FrameHalfMaxX - x
        CenteredPosY = FrameHalfMaxY - y

        ObjectParams = {"valid" : True, "x" : CenteredPosX, "y": CenteredPosY}

    if show:
        print("number of Contours: {}".format(len(contours)))
        cv2.imshow("Black Image Contours", black_image)
    cv2.imshow("RGB Image Contours", rgb_image)

    return ObjectParams




def get_balls(rgb_image, hue, show):
    lowerMult = 0.9
    hue_off = hue - 20
    if hue_off < 0 :
        hue_off = 0
    colorHSVLower = (hue_off, 100, 100)
    colorHSVUpper = (hue, 250, 250)
#    draw_min_area = 70000
#    draw_max_area = 250000
    # rgb_image  = read_rgb_image(image_name, show)
    mask_binimg = filter_color(rgb_image, colorHSVLower, colorHSVUpper, show)
    contours, hierarchy = get_contours(mask_binimg)
    return draw_ball_contour(mask_binimg, rgb_image, contours, draw_min_area, draw_max_area, show)


def calc_velpos(ObjectParams, FrameTimeMS):
        global ObjectParamsOld
        if ObjectParams["valid"] == False:
            ObjectParamsOld["valid"] = False
            return
        CenteredPosX = ObjectParams["x"]
        CenteredPosY = ObjectParams["y"]
        if ObjectParamsOld['valid'] == True:
            VelocityX = (CenteredPosX - ObjectParamsOld["x"])*1000/FrameTimeMS
            VelocityY = (CenteredPosY - ObjectParamsOld["y"])*1000/FrameTimeMS
        else :
            VelocityX = 0.0
            VelocityY = 0.0
        print("Object relative pos: ({:0.0f}, {:0.0f}); vel[pixel/sec]: ({:0.0f}, {:0.0f}".format( CenteredPosX, CenteredPosY, VelocityX, VelocityY))
        ObjectParamsOld = ObjectParams


def main(): 

    video_capture = cv2.VideoCapture(0)
    #  show = 1
    #  if int(sys.argv[2]) == 0:
    #      show = 0
    #  image_name = str(sys.argv[1])
    # rgb_image = read_rgb_image(image_name, show)
    #  get_balls(rgb_image, show)

    # blue cap ~= 110-120?
    # red cap ~= 10-20
    ## redish
    ## blueish
    # hue = 120
    ## greenish?
    # hue = 80
    while(True):
        ret, frame = video_capture.read()
        ObjectParams = get_balls(frame, hue, False)

        calc_velpos(ObjectParams, FrameTimeMS)
        # cv2.imshow("Frame", frame)
        
        # hue = hue + 3
        # if hue >= 250:
        #     hue = 0
        print("Hue: {}".format(hue))
        if cv2.waitKey(FrameTimeMS) & 0xFF == ord('q'):
            break

    cv2.waitKey(0)
    video_capture.release()
    cv2.destroyAllWindows()
if __name__ == "__main__": 
    main()

guictrl_robot_al_int32.py

Python
import tkinter as ttk
import time, threading

SystemActiveState = False

class LoggerType:
    def __init__(self, listbox, scroll):
        self.logtext= ""
        self.listbox = listbox
        self.id = 0
        self.scroll = scroll
        # self.stringvar =
    def log(self, str):
        self.logtext += str + "\n"
        # self.stringvar.set(self.logtext)
        self.listbox.insert(ttk.END, "{}:".format(self.id)  +  str + "\n")
        self.id += 1
        self.listbox.yview_moveto('1.0')

        

def Button1Hook(*args):
    global SystemActiveState
    SystemActiveState = not SystemActiveState
    SystemText = f"System active: {SystemActiveState}"
    print(SystemText)
    Label1Text.set(SystemText)
    if SystemActiveState == True:
        Label1.configure(bg = "green")
    else :
        Label1.configure(bg = "red")


root = ttk.Tk()
root.title("RTH GUI CTRL")
# root.geometry("600x400")
root.grid_columnconfigure(0, minsize = 200)
root.grid_rowconfigure(0, minsize = 200)

mainframe = ttk.Frame(root, bg = "blue")
LogFrame = ttk.Frame(root, bg= 'grey')

# 
Label1Text = ttk.StringVar()
Label1Text.set("System active: False")
Label1 = ttk.Label(mainframe, textvariable = Label1Text, font=('calibre', 10, 'bold'))
Label1.configure(bg = "red")

Button1 = ttk.Button(mainframe, text="De/Activate", command =Button1Hook)
# 
# Entry1Text = ttk.StringVar()
# Entry1Text.set("Entry1 text...")
# Entry1 = ttk.Entry(mainframe, width = 7, textvariable=Entry1Text, show="*")

Servo0Value = ttk.DoubleVar()
Servo0Label = ttk.Label(mainframe, text = "Servo0", font=('calibre', 10, 'bold') )
Servo0Scale = ttk.Scale(mainframe, variable= Servo0Value, from_=0, to=1000, tickinterval=200, orient=ttk.HORIZONTAL, length = 500)

Servo1Label = ttk.Label(mainframe, text = "Servo1", font=('calibre', 10, 'bold') )
Servo1Scale = ttk.Scale(mainframe, from_=0, to=1000, tickinterval=200, orient=ttk.HORIZONTAL, length = 500)

Rotation0Label = ttk.Label(mainframe, text = "Rotation0(base)", font=('calibre', 10, 'bold') )
Rotation0Scale = ttk.Scale(mainframe, from_=-1, to=1000, tickinterval=200, orient=ttk.HORIZONTAL, length = 500)

Rotation1Label = ttk.Label(mainframe, text = "Rotation1(lower link)", font=('calibre', 10, 'bold') )
Rotation1Scale = ttk.Scale(mainframe, from_=-1, to=1000, tickinterval=200, orient=ttk.HORIZONTAL, length = 500)

Rotation2Label = ttk.Label(mainframe, text = "Rotation2(upper link)", font=('calibre', 10, 'bold') )
Rotation2Scale = ttk.Scale(mainframe, from_=-1, to=1000, tickinterval=200, orient=ttk.HORIZONTAL, length = 500)

Linear0Label = ttk.Label(mainframe, text = "Linear0 (X)", font=('calibre', 10, 'bold') )
Linear0Scale = ttk.Scale(mainframe, from_=-1, to=1000, tickinterval=200, orient=ttk.HORIZONTAL, length = 500)

Linear1Label = ttk.Label(mainframe, text = "Linear1 (Z)", font=('calibre', 10, 'bold') )
Linear1Scale = ttk.Scale(mainframe, from_=-1, to=1000, tickinterval=200, orient=ttk.HORIZONTAL, length = 500)

LogText = ttk.Label(LogFrame, text = "LOGGER : ")
LoggerScroll = ttk.Scrollbar(LogFrame)
LogLabel = ttk.Listbox(LogFrame, yscrollcommand = LoggerScroll.set, width=100, height = 5)
Logger = LoggerType(LogLabel, LoggerScroll)
LoggerScroll.config(command = LogLabel.yview)
Logger.log("This is a log message...")
Logger.log("This is another log message...")

mainframe.grid(column = 0, row=0, sticky = "nsew")
LogFrame.grid(column = 0, row = 1, sticky = "nwse")



Label1.grid(row = 0, column=0, sticky=(ttk.W, ttk.E))
# Entry1.grid(row= 0, column = 1)
Button1.grid(row = 0, column = 2)

Servo1Label.grid(row = 2, column = 0)
Servo1Scale.grid(row = 2, column = 1)

Servo0Label.grid(row = 3, column = 0)
Servo0Scale.grid(row = 3, column = 1)

Rotation0Label.grid(row = 4, column = 0)
Rotation0Scale.grid(row = 4, column = 1)

Rotation1Label.grid(row = 5, column = 0)
Rotation1Scale.grid(row = 5, column = 1)

Rotation2Label.grid(row = 6, column = 0)
Rotation2Scale.grid(row = 6, column = 1)

Linear0Label.grid(row = 7, column = 0)
Linear0Scale.grid(row = 7, column = 1)

Linear1Label.grid(row = 8, column = 0)
Linear1Scale.grid(row = 8, column = 1)


LogText.grid(row=0, column=0)
LogLabel.grid(row=1, column=0, sticky="nwes")
LoggerScroll.grid(row = 1, column = 1)


import rclpy
from rclpy.node import Node
from std_msgs.msg import String, Int32, Int32MultiArray, MultiArrayDimension
# from geometry_msgs.msg import Point32
from robot_msgs.msg import RobotALInt32

class RobotAPublisher(Node):
    def __init__(self, topic):
        super().__init__('RobotNode')
        self.tick_cnt = 20
        self.publisher_ = self.create_publisher(RobotALInt32, topic, 10)
        self.timestamp = 0
        self.servo0 = 0
        self.servo1 = 0
        self.angular0 = 0
        self.angular1 = 0
        self.angular2 = 0
        self.linear0 = 0
        self.linear1 = 0
        self.linear2 = 0
        timer_periode = 0.1 # secons
        # self.timer = self.create_timer(timer_periode, self.timer_callback)
        self.i = 0
    def send(self):
        # msg = Int32()
        msg = RobotALInt32()
        msg.timestamp = self.timestamp
        msg.servo0 = self.servo0
        msg.servo1 = self.servo1
        msg.angular0 = self.angular0
        msg.angular1 = self.angular1
        msg.angular2 = self.angular2
        msg.axis0 = self.linear0
        msg.axis1 = self.linear1
        msg.axis2 = self.linear2

        # Take look here: https://robotics.stackexchange.com/questions/91999/how-can-we-actually-use-float32multiarray-to-publish-2d-array-using-python

        # msg = Int32MultiArray()
        # msg.layout.data_offset = 0
        # msg.layout.dim.append(MultiArrayDimension())
        # msg.layout.dim[0].label = "test"
        # msg.layout.dim[0].size = 5
        # msg.layout.dim[0].stride = 5
        
        # msg.data = [self.timestamp, self.timestamp, self.timestamp, self.timestamp, self.timestamp]

        self.publisher_.publish(msg)

    # def timer_callback(self):
    #     # msg = String()
    #     msg = Int32()
    #     # msg.data = 'Testing my stuff %d' % self.i 
    #     # self.tick_cnt += 4
    #     # if self.tick_cnt >= 30 :
    #     #     self.tick_cnt = 1
    #     msg.data = self.timestamp
    #     self.publisher_.publish(msg)
    #     self.get_logger().info('Publishing tick_cnt: "%i"' % msg.data)
    #     self.i += 1


for i in range(1, 100):
    Logger.log("Test")

def update(timestamp, Publisher, Value): 
    global Servo0Scale
    global SystemActiveState
    text = "Timestammp: {}; Servo0 : {}".format(timestamp, Servo0Scale.get())

    StepperMult = 20
    Publisher.timestamp += 1
    Publisher.servo0 = 100 * int(Servo0Scale.get())
    Publisher.servo1 = 100 * int(Servo1Scale.get())
    Publisher.angular0 = StepperMult * int(Rotation0Scale.get())
    Publisher.angular1 = StepperMult * int(Rotation1Scale.get())
    Publisher.angular2 = StepperMult * int(Rotation2Scale.get())
    Publisher.linear0  = StepperMult * int(Linear0Scale.get())
    Publisher.linear1  = StepperMult * int(Linear1Scale.get())
    Publisher.linear2  = StepperMult * int(0)
    if SystemActiveState == True:
        print(text)
        Logger.log(text)
        Publisher.send()

    timestamp += 1
    root.after(100, update, timestamp, Publisher, Value)


def main(args=None):
    timestamp = 0
    rclpy.init()
    RobotAPub = RobotAPublisher('Tower2Robot_RobotALInt32')

    # for i in range(1, 3):
    #     RobotAPub.timestamp = timestamp
    #     timestamp += 1
    #     RobotAPub.send()
    #     time.sleep(1)


    root.after(100, update, timestamp, RobotAPub, 0)
    # root.after(100, testupdate)
    

    root.bind('<Return>', Button1Hook)
    root.mainloop()
    RobotAPub.destroy_node()
    rclpy.shutdown()


if __name__ == "__main__":
    main()

Credits

jsol
1 project • 0 followers

Comments