Tom Minnich
Published © GPL3+

Terrace Farming Rover

Create a rover purpose built to aid terrace farming, maximizing the yields in existing terrace farms.

AdvancedFull instructions providedOver 7 days114
Terrace Farming Rover

Things used in this project

Hardware components

NXP navq+
×1
NXP Mobile Robotics Buggy 3 Kit
×1

Software apps and online services

TensorFlow
TensorFlow

Story

Read more

Schematics

NAVQ+ To RT1010 board serial communication connection

Serial communication between NAVQ+ board and RT1010 evaluation board. The RT1010 evaluation board does various I/O and processing tasks not able to be quickly developed on NAVQ+ board in time for this project deadline.

Code

Unpickle CIFAR-10 and Make Tensor Flow Lite model

Python
Use a local copy of the CIFAR-10 data set unpickle it and make a CNN Tensor FLow Lite model, which we can copy to NAVQ + board, I will change this soon to replace a category of training and test data with pictures of leaves.
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt


def unpickle(file):
    import pickle
    with open(file, 'rb') as fo:
        t_dict = pickle.load(fo, encoding='bytes')
        return t_dict



# load in data from a local copy of the data set
#file = r'/home/hovergames/Desktop/hovergames3/cifar-10-python/cifar-10-batches-py/data_batch_1'
file = r'e:/cifar-10-python/cifar-10-batches-py/data_batch_1'
data_batch_1 = unpickle(file)
print(data_batch_1[b'data'].shape)
print(data_batch_1.keys())

X_train_batch_1 = data_batch_1[b'data']
X_train_batch_1 = X_train_batch_1.reshape(len(X_train_batch_1), 3, 32, 32).transpose(0, 2, 3, 1)
X_train_batch_1 = X_train_batch_1 / 255.0

y_train_batch_1 = np.array(data_batch_1[b'labels'])

#image = data_batch_1[b'data'][0]
#image = image.reshape(3, 32, 32)
#image = image.transpose(1, 2, 0)
#plt.imshow(image)
#plt.show()

#data_batch2,3,4, & 5 are not in use now but I may use them sometime
#file = r'e:/cifar-10-python/cifar-10-batches-py/data_batch_2'
#data_batch_2 = unpickle(file)
#print(data_batch_2[b'data'].shape)
#print(data_batch_2.keys())

#file = r'e:/cifar-10-python/cifar-10-batches-py/data_batch_3'
#data_batch_3 = unpickle(file)
#print(data_batch_3[b'data'].shape)
#print(data_batch_3.keys())

#file = r'e:/cifar-10-python/cifar-10-batches-py/data_batch_4'
#data_batch_4 = unpickle(file)
#print(data_batch_4[b'data'].shape)
#print(data_batch_4.keys())

#file = r'e:/cifar-10-python/cifar-10-batches-py/data_batch_5'
#data_batch_5 = unpickle(file)
#print(data_batch_5[b'data'].shape)
#print(data_batch_5.keys())

file = r'e:/cifar-10-python/cifar-10-batches-py/test_batch'
test_batch = unpickle(file)
print(test_batch[b'data'].shape)
print(test_batch.keys())

X_test = test_batch[b'data']
X_test = X_test.reshape(len(X_test), 3, 32, 32).transpose(0, 2, 3, 1)
print(X_test.shape)

X_test = X_test / 255.0
y_test = np.array(test_batch[b'labels'])

class_names = ['airplane', 'automobile', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck']

model = tf.keras.models.Sequential()

# Adding the first CNN Layer
model.add(tf.keras.layers.Conv2D(filters=32, kernel_size=3, padding="same", activation="relu", input_shape=[32, 32, 3]))

# Adding the second CNN Layer and max pool layer
model.add(tf.keras.layers.Conv2D(filters=32, kernel_size=3, padding="same", activation="relu"))
model.add(tf.keras.layers.MaxPool2D(pool_size=2, strides=2, padding='valid'))

# Adding the third CNN Layer
model.add(tf.keras.layers.Conv2D(filters=64, kernel_size=3, padding="same", activation="relu"))

#Adding the fourth CNN Layer and max pool layer
model.add(tf.keras.layers.Conv2D(filters=64, kernel_size=3, padding="same", activation="relu"))
model.add(tf.keras.layers.MaxPool2D(pool_size=2, strides=2, padding='valid'))

#Adding the Flatten layer
model.add(tf.keras.layers.Flatten())

#Adding the first Dense layer
model.add(tf.keras.layers.Dense(units=128, activation='relu'))

#Adding the second Dense layer (output layer)
model.add(tf.keras.layers.Dense(units=10, activation='softmax'))

print(model.summary())

model.compile(loss="sparse_categorical_crossentropy",
              optimizer="Adam", metrics=["sparse_categorical_accuracy"])

model.fit(X_train_batch_1, y_train_batch_1, epochs=1)
test_loss, test_accuracy = model.evaluate(X_test, y_test)
print("Test accuracy: {}".format(test_accuracy))
converter =  tf.lite.TFLiteConverter.from_keras_model(model)
tflite_model = converter.convert()
with open('tom_cnn5_model.tflite','wb') as f: f.write(tflite_model)

Example Inference NPU accelerator used

BatchFile
Invoke the example Tensor Flow Lite model using NPU acceleration
cd /usr/bin/tensorflow-lite-2.6.0/examples
USE_GPU_INFERENCE=0 ./label_image --external_delegate_path=/lib/libvx_delegate.so -m mobilenet_v1_1.0_224_quant.tflite -i grace_hopper.bmp -l labels.txt
cd /home/user

label_image.py Tensor Flow Lite example script

Python
Invokes the Tensor Flow Lite interpreter
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""label_image for tflite."""

from __future__ import absolute_import
from __future__ import division
from __future__ import print_function

import argparse
import time

import numpy as np
from PIL import Image
import tflite_runtime.interpreter as tflite


def load_labels(filename):
  with open(filename, 'r') as f:
    return [line.strip() for line in f.readlines()]


if __name__ == '__main__':
  parser = argparse.ArgumentParser()
  parser.add_argument(
      '-i',
      '--image',
      default='grace_hopper.bmp',
      help='image to be classified')
  parser.add_argument(
      '-m',
      '--model_file',
      default='mobilenet_v1_1.0_224_quant.tflite',
      help='.tflite model to be executed')
  parser.add_argument(
      '-l',
      '--label_file',
      default='labels.txt',
      help='name of file containing labels')
  parser.add_argument(
      '--input_mean',
      default=127.5, type=float,
      help='input_mean')
  parser.add_argument(
      '--input_std',
      default=127.5, type=float,
      help='input standard deviation')
  parser.add_argument(
      '--num_threads', default=None, type=int, help='number of threads')
  parser.add_argument(
      '-e',
      '--ext_delegate',
      help='external_delegate_library path')
  parser.add_argument(
      '-o',
      '--ext_delegate_options',
      help='external delegate options, format: "option1: value1; option2: value2"')

  args = parser.parse_args()

  ext_delegate = None
  ext_delegate_options = {}

  # parse extenal delegate options
  if args.ext_delegate_options is not None:
    options = args.ext_delegate_options.split(';')
    for o in options:
      kv = o.split(':')
      if(len(kv) == 2):
        ext_delegate_options[kv[0].strip()] = kv[1].strip()

  # load external delegate
  if args.ext_delegate is not None:
    print("Loading external delegate from {} with args: {}".format(args.ext_delegate, ext_delegate_options))
    ext_delegate = [ tflite.load_delegate(args.ext_delegate, ext_delegate_options) ]

  interpreter = tflite.Interpreter(
        model_path=args.model_file, experimental_delegates=ext_delegate, num_threads=args.num_threads)
  interpreter.allocate_tensors()

  input_details = interpreter.get_input_details()
  output_details = interpreter.get_output_details()

  # check the type of the input tensor
  floating_model = input_details[0]['dtype'] == np.float32

  # NxHxWxC, H:1, W:2
  height = input_details[0]['shape'][1]
  width = input_details[0]['shape'][2]
  img = Image.open(args.image).resize((width, height))

  # add N dim
  input_data = np.expand_dims(img, axis=0)

  if floating_model:
    input_data = (np.float32(input_data) - args.input_mean) / args.input_std

  interpreter.set_tensor(input_details[0]['index'], input_data)

  # ignore the 1st invoke
  startTime = time.time()
  interpreter.invoke()
  delta = time.time() - startTime
  print("Warm-up time:", '%.1f' % (delta * 1000), "ms\n")

  startTime = time.time()
  interpreter.invoke()
  delta = time.time() - startTime
  print("Inference time:", '%.1f' % (delta * 1000), "ms\n")

  output_data = interpreter.get_tensor(output_details[0]['index'])
  results = np.squeeze(output_data)

  top_k = results.argsort()[-5:][::-1]
  labels = load_labels(args.label_file)
  for i in top_k:
    if floating_model:
      print('{:08.6f}: {}'.format(float(results[i]), labels[i]))
    else:
      print('{:08.6f}: {}'.format(float(results[i] / 255.0), labels[i]))

CNN Make Tensor Flow Lite Model

Python
Uses Tensor Flow to create a CNN Tensor Flow Lite Model
import tensorflow as tf
from keras.datasets import cifar10

print(tf.__version__)
(X_train, y_train), (X_test, y_test) = cifar10.load_data()
print(X_train.shape)

X_train = X_train / 255.0
X_test = X_test / 255.0

model = tf.keras.models.Sequential()
model.add(tf.keras.layers.Conv2D(filters=32, kernel_size=3, padding="same", activation="relu", input_shape=[32, 32, 3]))

model.add(tf.keras.layers.Conv2D(filters=32, kernel_size=3, padding="same", activation="relu"))
model.add(tf.keras.layers.MaxPool2D(pool_size=2, strides=2, padding='valid'))

model.add(tf.keras.layers.Conv2D(filters=64, kernel_size=3, padding="same", activation="relu"))

model.add(tf.keras.layers.Conv2D(filters=64, kernel_size=3, padding="same", activation="relu"))
model.add(tf.keras.layers.MaxPool2D(pool_size=2, strides=2, padding='valid'))

model.add(tf.keras.layers.Flatten())
model.add(tf.keras.layers.Dense(units=128, activation='relu'))

model.add(tf.keras.layers.Dense(units=10, activation='softmax'))

print(model.summary())

model.compile(loss="sparse_categorical_crossentropy",
              optimizer="Adam", metrics=["sparse_categorical_accuracy"])

model.fit(X_train, y_train, epochs=50)

test_loss, test_accuracy = model.evaluate(X_test, y_test)
print("Test accuracy: {}".format(test_accuracy))

converter = tf.lite.TFLiteConverter.from_keras_model(model)
tflite_model = converter.convert()
with open('cnn_model.tflite', 'wb') as f:
    f.write(tflite_model)

Unpickle CIFAR-10 Data Set

Python
Unpacks the CIFAR-10 Data Set
import matplotlib.pyplot as plt


def unpickle(file):
    import pickle
    with open(file, 'rb') as fo:
        t_dict = pickle.load(fo, encoding='bytes')
        return t_dict



# load in data from a local copy of the data set
file = r'/home/hovergames/Desktop/hovergames3/cifar-10-python/cifar-10-batches-py/data_batch_1'
data_batch_1 = unpickle(file)
print(type(data_batch_1))
print(len(data_batch_1))
print(data_batch_1[b'data'].shape)
print(data_batch_1.keys())

print(data_batch_1[b'labels'][0])
print(data_batch_1[b'batch_label'][0])

image = data_batch_1[b'data'][0]
image = image.reshape(3, 32, 32)

image = image.transpose(1, 2, 0)

plt.imshow(image)
plt.show()

data = data_batch_1[b'data']
data = data.reshape(len(data), 3, 32, 32).transpose(0, 2, 3, 1)
print(data.shape)
plt.imshow(data[0])

class_names = ['airplane', 'automobile', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck']

for i in range(16):
    print(class_names[data_batch_1[b'labels'][i]])
    plt.subplot(4, 4, i + 1)
    plt.imshow(data[i])
plt.show()

Example Inference No NPU Used

BatchFile
Invoke the NAVQ+ Tensor Flow Lite model without NPU accelerator
cd /usr/bin/tensorflow-lite-2.6.0/examples
 ./label_image  -m mobilenet_v1_1.0_224_quant.tflite -i grace_hopper.bmp -l labels.txt
cd /home/user

CNN Batch FILE

BatchFile
A shell script for executing a Tensor Flow Lite CNN model inference
cd /usr/bin/tensorflow-lite-2.6.0/examples
python3 /home/user/tom_label_image3.py  --ext_delegate=/lib/libvx_delegate.so -m /home/user/cnn_model.tflite -i grace_hopper.bmp -l /home/user/cnn_label.txt
cd /home/user

CNN Tensorflow Lite Inference Code

Python
Executes an inference on the CNN Tensor Flow Lite model with one image from the CIFAR-10 dataset
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""label_image for tflite."""

from __future__ import absolute_import
from __future__ import division
from __future__ import print_function

import argparse
import time

import numpy as np
from PIL import Image
import tflite_runtime.interpreter as tflite

def unpickle(file):
    import pickle
    with open(file, 'rb') as fo:
        t_dict = pickle.load(fo, encoding='bytes')
        return t_dict

def load_labels(filename):
  with open(filename, 'r') as f:
    return [line.strip() for line in f.readlines()]


if __name__ == '__main__':
  parser = argparse.ArgumentParser()
  parser.add_argument(
      '-i',
      '--image',
      default='grace_hopper.bmp',
      help='image to be classified')
  parser.add_argument(
      '-m',
      '--model_file',
      default='mobilenet_v1_1.0_224_quant.tflite',
      help='.tflite model to be executed')
  parser.add_argument(
      '-l',
      '--label_file',
      default='labels.txt',
      help='name of file containing labels')
  parser.add_argument(
      '--input_mean',
      default=127.5, type=float,
      help='input_mean')
  parser.add_argument(
      '--input_std',
      default=127.5, type=float,
      help='input standard deviation')
  parser.add_argument(
      '--num_threads', default=None, type=int, help='number of threads')
  parser.add_argument(
      '-e',
      '--ext_delegate',
      help='external_delegate_library path')
  parser.add_argument(
      '-o',
      '--ext_delegate_options',
      help='external delegate options, format: "option1: value1; option2: value2"')

  args = parser.parse_args()

  ext_delegate = None
  ext_delegate_options = {}

  # parse extenal delegate options
  if args.ext_delegate_options is not None:
    options = args.ext_delegate_options.split(';')
    for o in options:
      kv = o.split(':')
      if(len(kv) == 2):
        ext_delegate_options[kv[0].strip()] = kv[1].strip()

  # load external delegate
  if args.ext_delegate is not None:
    print("Loading external delegate from {} with args: {}".format(args.ext_delegate, ext_delegate_options))
    ext_delegate = [ tflite.load_delegate(args.ext_delegate, ext_delegate_options) ]

  interpreter = tflite.Interpreter(
        model_path=args.model_file, experimental_delegates=ext_delegate, num_threads=args.num_threads)
  interpreter.allocate_tensors()

  input_details = interpreter.get_input_details()
  print(input_details)

  output_details = interpreter.get_output_details()
  print(output_details)

  # check the type of the input tensor
  floating_model = input_details[0]['dtype'] == np.float32

  # NxHxWxC, H:1, W:2
  height = input_details[0]['shape'][1]
  # width = input_details[0]['shape'][2]

  # using unpickeled data
  # so the next two lines are skipped
  # img = Image.open(args.image).resize(( 28, 28 ))
  # img = img.convert("L")

  # so we will need
  # img to be initialized

  #img = 
  file2 = r'/home/user/test_batch' # test images from cifar-10 data set
  test_batch = unpickle(file2)
  print(type(test_batch))
  print(len(test_batch))
  print(test_batch[b'data'].shape)
  print(test_batch.keys())

  print(test_batch[b'labels'][0])
  print(test_batch[b'batch_label'][0])

  img =  test_batch[b'data'][0]
  img = img.reshape(3, 32, 32)

  img = img.transpose(1, 2, 0)

  #img = np.reshape(img,28*28,order='F')

  img = np.array(img)

  #img = img.flatten()
  print(img)
  print("********************************")

  # add N dim
  input_data = np.expand_dims(img, axis=0)

  if floating_model:
    #input_data = (np.float32(input_data) - args.input_mean) / args.input_std
    input_data =  np.float32(input_data) / 255.0
    print("A FLOATING MODEL IS IN USE!!!")
    print(input_data)

  interpreter.set_tensor(input_details[0]['index'], input_data)

  # ignore the 1st invoke
  startTime = time.time()
  interpreter.invoke()
  delta = time.time() - startTime
  print("Warm-up time:", '%.1f' % (delta * 1000), "ms\n")

  startTime = time.time()
  interpreter.invoke()
  delta = time.time() - startTime
  print("Inference time:", '%.1f' % (delta * 1000), "ms\n")

  output_data = interpreter.get_tensor(output_details[0]['index'])
  results = np.squeeze(output_data)

  top_k = results.argsort()[-5:][::-1]
  labels = load_labels(args.label_file)
  for i in top_k:
    if floating_model:
      print('{:08.6f}: {}'.format(float(results[i]), labels[i]))
    else:
      print('{:08.6f}: {}'.format(float(results[i] / 255.0), labels[i]))

Credits

Tom Minnich

Tom Minnich

19 projects • 80 followers
Embedded software guy for a long time

Comments