Siyang Wang
Created March 12, 2022

Garbage Classification

a garbage classification program designed with Ultra-96

15
Garbage Classification

Things used in this project

Hardware components

Ultra96-V2
Avnet Ultra96-V2
×1

Software apps and online services

Vitis Unified Software Platform
AMD Vitis Unified Software Platform

Hand tools and fabrication machines

TensorFlow Lite
TensorFlow Lite

Story

Read more

Code

process.py

Python
train model
import os
import cv2
import numpy as np

currentFolder = os.getcwd()
opening_folder = os.path.join(currentFolder, 'DATASET2')

for category in os.listdir(opening_folder):
    for imageCode in os.listdir(os.path.join(opening_folder, category)):
        imDir = os.path.join(opening_folder, category, imageCode)
        i = cv2.imread(imDir)
        ratio = min(i.shape[0], i.shape[1]) / 48
        processedImage = np.zeros((48, 48, 3))
        for m in range(48):
            targetM = max(0, min(i.shape[0] - 1, int(0.5 * i.shape[0] + (m - 23) * ratio)))
            for n in range(48):
                targetN = max(0, min(i.shape[1] - 1, int(0.5 * i.shape[1] + (n - 23) * ratio)))
                processedImage[m, n] = i[targetM, targetN]
        cv2.imwrite(imDir, processedImage)
                    
                

train

Python
train the model
# -*- coding: UTF-8 -*-
import os
import cv2
import numpy as np
import random
import json
from tensorflow.keras.models import load_model
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Dropout, Flatten, Conv2D, MaxPool2D
from tensorflow.keras.callbacks import ReduceLROnPlateau
from tensorflow.keras.utils import to_categorical

# 显示当前文件夹
currentFolder = os.getcwd()
imgSize = (48, 48, 3)
classes = 4

def oneHot(total, activated):
    result = np.zeros((total,))
    result[activated] = 1
    return result

def loadDataset(length = -1, offset = 0):
    opening_folder = os.path.join(currentFolder, 'DATASET2')
    with open(os.path.join(currentFolder, 'DATASET2_classes.json'), 'r') as f:
        categoryList = json.loads(f.read())
    imgList = []
    for category in os.listdir(opening_folder):
        for imageCode in os.listdir(os.path.join(opening_folder, category)):
            imgList.append([category, imageCode])
    imgCount = len(imgList)
    imgCount = min(imgCount, length) if length > 0 else imgCount
    result_X = np.zeros((imgCount, imgSize[0], imgSize[1], imgSize[2]))
    result_y = np.zeros((imgCount, classes))
    random.shuffle(imgList)
    currentIndex = 0
    for imgPath in imgList:
        i = cv2.imread(os.path.join(opening_folder, imgPath[0], imgPath[1]))
        result_X[currentIndex] = i
        result_y[currentIndex] = oneHot(classes, categoryList[int(imgPath[0])] - 1)
        currentIndex += 1
        if imgCount <= currentIndex: break
    result_X = result_X / 255
    print(result_y)
    return result_X, result_y

def crop(originalImage):
    ratio = min(originalImage.shape[0], originalImage.shape[1]) / 48
    processedImage = np.zeros((48, 48, originalImage.shape[2]))
    for m in range(48):
        targetM = max(0, min(originalImage.shape[0] - 1, int(0.5 * originalImage.shape[0] + (m - 23) * ratio)))
        for n in range(48):
            targetN = max(0, min(originalImage.shape[1] - 1, int(0.5 * originalImage.shape[1] + (n - 23) * ratio)))
            processedImage[m, n] = originalImage[targetM, targetN]
    return processedImage

def loadTestset():
    opening_folder = os.path.join(currentFolder, 'TESTSET')
    imgList = os.listdir(opening_folder)
    imgCount = len(imgList)
    result_X = np.zeros((imgCount, 48, 48, 3))
    result_y = []
    random.shuffle(imgList)
    for currentIndex in range(imgCount):
        imgPath = imgList[currentIndex]
        i = cv2.imread(os.path.join(opening_folder, imgPath))
        result_X[currentIndex] = crop(i)
        result_y.append(imgPath)
    return result_X, result_y, imgCount

# 主程序
def main():
    # 建立模型
    model = Sequential()
    
    model.add(Conv2D(filters = 8, kernel_size = (5, 5), padding = 'Same', activation = 'relu',input_shape = imgSize))
    model.add(MaxPool2D(pool_size = (2, 2)))
    model.add(Dropout(0.4))
    model.add(Conv2D(filters = 16, kernel_size = (3, 3), padding = 'Same', activation = 'relu'))
    model.add(MaxPool2D(pool_size = (2, 2), strides = (2, 2)))
    model.add(Dropout(0.4))
    model.add(Flatten())
    model.add(Dense(256, activation = 'relu'))   
    model.add(Dropout(0.4))
    model.add(Dense(classes, activation = 'softmax'))
    model.compile(optimizer = 'adam', loss = 'categorical_crossentropy', metrics = ['accuracy'])
    
    learning_rate_reduction = ReduceLROnPlateau(monitor = 'accuracy', patience = 3, verbose = 1, factor = 0.5, min_lr = 0.00001)
    
    # 给出训练和测试数据
    X_train, Y_train = loadDataset()
    print(X_train.shape)
    print(Y_train.shape)
    
    # 训练模型
    model.fit(X_train, Y_train, epochs = 200, batch_size = 32, callbacks=[learning_rate_reduction])
    
    # 把训练好的模型保存到文件
    model.save(os.path.join(currentFolder, 'dustbin.h5'))
    
    # model = load_model(os.path.join(currentFolder, 'dustbin.h5'))
    
    # 评估模型
    # X_test, Y_test = loadDataset(False)
    # loss, accuracy = model.evaluate(X_test, Y_test, batch_size = 32)
    
    a, b, count = loadTestset()
    print(a.shape)
    
    result = model.predict(a)
    # 测试分类的结果
    for i in range(count):
        print("该图片所属的类别:", b[i])
        print(result[i])
        print('预测出的该图片类别:', np.argmax(result[i]))
 
if __name__ == "__main__":
    main()

load_data

Python
change .h5 to xmodel
import os

# Silence TensorFlow messages
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'

import tensorflow as tf
import tensorflow.keras as keras
import numpy as np
import json
import random
import cv2

batch_size = 16
currentFolder = os.getcwd()
imgSize = (48, 48, 3)
classes = 4

def oneHot(total, activated):
    result = np.zeros((total,))
    result[activated] = 1
    return result

def get_dataset(length = 500, offset = 0):
    opening_folder = os.path.join(currentFolder, 'DATASET2')
    with open(os.path.join(currentFolder, 'DATASET2_classes.json'), 'r') as f:
        categoryList = json.loads(f.read())
    imgList = []
    for category in os.listdir(opening_folder):
        for imageCode in os.listdir(os.path.join(opening_folder, category)):
            imgList.append([category, imageCode])
    imgCount = len(imgList)
    imgCount = min(imgCount, length) if length > 0 else imgCount
    result_X = np.zeros((imgCount, imgSize[0], imgSize[1], imgSize[2]))
    result_y = np.zeros((imgCount, classes))
    random.shuffle(imgList)
    currentIndex = 0
    for imgPath in imgList:
        i = cv2.imread(os.path.join(opening_folder, imgPath[0], imgPath[1]))
        result_X[currentIndex] = i
        result_y[currentIndex] = oneHot(classes, categoryList[int(imgPath[0])] - 1)
        currentIndex += 1
        if imgCount <= currentIndex: break
    # result_X = result_X / 255
    # print(result_y)
    test_dataset = tf.data.Dataset.from_tensor_slices((result_X, result_y)).batch(batch_size)
    return test_dataset

def crop(originalImage):
    ratio = min(originalImage.shape[0], originalImage.shape[1]) / 48
    processedImage = np.zeros((48, 48, originalImage.shape[2]))
    for m in range(48):
        targetM = max(0, min(originalImage.shape[0] - 1, int(0.5 * originalImage.shape[0] + (m - 23) * ratio)))
        for n in range(48):
            targetN = max(0, min(originalImage.shape[1] - 1, int(0.5 * originalImage.shape[1] + (n - 23) * ratio)))
            processedImage[m, n] = originalImage[targetM, targetN]
    return processedImage

# def get_dataset():
#     opening_folder = os.path.join(currentFolder, 'TESTSET')
#     imgList = os.listdir(opening_folder)
#     imgCount = len(imgList)
#     result_X = np.zeros((imgCount, 48, 48, 3))
#     result_y = []
#     random.shuffle(imgList)
#     for currentIndex in range(imgCount):
#         imgPath = imgList[currentIndex]
#         print(imgPath.split('.')[0])
        
#         i = cv2.imread(os.path.join(opening_folder, imgPath))
#         result_X[currentIndex] = crop(i)
#         result_y.append(imgPath)
#     result_y = keras.utils.to_categorical(result_y)
#     test_dataset = tf.data.Dataset.from_tensor_slices((result_X, result_y)).batch(batch_size)
#     return test_dataset

test_dataset = get_dataset()

quantize

Python
change .h5 to xmodel
import os
 
# Silence TensorFlow messages
os.environ['TF_CPP_MIN_LOG_LEVEL']='3'

import tensorflow as tf
import tensorflow.keras as keras
import tensorflow.keras.models as models
from tensorflow_model_optimization.quantization.keras import vitis_quantize
from load_data import get_dataset

MODEL_DIR = './models'
FLOAT_MODEL = 'float_model.h5'
QAUNT_MODEL = 'quantized_model.h5'

# Load the floating point trained model
print('Load float model..')
path = os.path.join(MODEL_DIR, FLOAT_MODEL)
try:
    float_model = models.load_model(path)
except:
    print('\nError:load float model failed!')

# float_model.summary()
# path = os.path.join(MODEL_DIR, QAUNT_MODEL)
# float_model.save(path)

# get input dimensions of the floating-point model
height = float_model.input_shape[1]
width = float_model.input_shape[2]
channel = float_model.input_shape[3]

# get dataset
print("\nLoad dataset..")
test_dataset = get_dataset()

# Run quantization
print('\nRun quantization..')
quantizer = vitis_quantize.VitisQuantizer(float_model)
quantized_model = quantizer.quantize_model(calib_dataset=test_dataset)

# Save quantized model
path = os.path.join(MODEL_DIR, QAUNT_MODEL)
quantized_model.save(path)
print('\nSaved quantized model as',path)

eval_quantized

Python
change .h5 to xmodel
import os
 
# Silence TensorFlow messages
os.environ['TF_CPP_MIN_LOG_LEVEL']='3'

import tensorflow as tf
import tensorflow.keras as keras
import tensorflow.keras.models as models
from tensorflow_model_optimization.quantization.keras import vitis_quantize
from load_data import get_dataset

MODEL_DIR = './models'
QAUNT_MODEL = 'quantized_model.h5'

# Load the quantized model
print('\nLoad quantized model..')
path = os.path.join(MODEL_DIR, QAUNT_MODEL)
with vitis_quantize.quantize_scope():
    model = models.load_model(path)

model.summary()

# get Mnist dataset
print("\nLoad dataset..")
test_dataset = get_dataset()

# Compile the model
print('\nCompile model..')
model.compile(optimizer="adam", 
        loss="categorical_crossentropy",
        metrics=['accuracy']
        )

# Evaluate model with test data
print("\nEvaluate model on test Dataset")
loss, acc = model.evaluate(test_dataset)  # returns loss and metrics

print("loss: %.3f" % loss)
print("acc: %.3f" % acc)

path = os.path.join(MODEL_DIR, QAUNT_MODEL)
model.save(path)

compile

SH
change .h5 to xmodel
ARCH=./arch/arch.json
OUTDIR=./compiled_model/u96
NET_NAME=customcnn
MODEL=./models/quantized_model.h5

echo "-----------------------------------------"
echo "COMPILING MODEL FOR Ultra96.."
echo "-----------------------------------------"

compile() {
      vai_c_tensorflow2 \
            --model           $MODEL \
            --arch            $ARCH \
            --output_dir      $OUTDIR \
            --net_name        $NET_NAME
	}


compile 2>&1 | tee compile.log


echo "-----------------------------------------"
echo "MODEL COMPILED"
echo "-----------------------------------------"

garbage_classification

Python
deploy model on Ultra-96
{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "application/javascript": [
       "\n",
       "try {\n",
       "require(['notebook/js/codecell'], function(codecell) {\n",
       "  codecell.CodeCell.options_default.highlight_modes[\n",
       "      'magic_text/x-csrc'] = {'reg':[/^%%microblaze/]};\n",
       "  Jupyter.notebook.events.one('kernel_ready.Kernel', function(){\n",
       "      Jupyter.notebook.get_cells().map(function(cell){\n",
       "          if (cell.cell_type == 'code'){ cell.auto_highlight(); } }) ;\n",
       "  });\n",
       "});\n",
       "} catch (e) {};\n"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    },
    {
     "data": {
      "application/javascript": [
       "\n",
       "try {\n",
       "require(['notebook/js/codecell'], function(codecell) {\n",
       "  codecell.CodeCell.options_default.highlight_modes[\n",
       "      'magic_text/x-csrc'] = {'reg':[/^%%pybind11/]};\n",
       "  Jupyter.notebook.events.one('kernel_ready.Kernel', function(){\n",
       "      Jupyter.notebook.get_cells().map(function(cell){\n",
       "          if (cell.cell_type == 'code'){ cell.auto_highlight(); } }) ;\n",
       "  });\n",
       "});\n",
       "} catch (e) {};\n"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    }
   ],
   "source": [
    "#第一步:准备叠加层\n",
    "#The first step is to prepare the oveerlay.\n",
    "from pynq_dpu import DpuOverlay\n",
    "overlay = DpuOverlay(\"dpu.bit\")\n",
    "overlay.load_model(\"r50.xmodel\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "(27, 48, 48, 3)\n",
      "27\n"
     ]
    }
   ],
   "source": [
    "#第二步:准备数据\n",
    "#The second step is to prepare our data to run.\n",
    "import numpy as np\n",
    "import matplotlib.pyplot as plt\n",
    "import os\n",
    "import cv2\n",
    "import random\n",
    "\n",
    "currentFolder = '/home/xilinx'\n",
    "\n",
    "def crop(originalImage):\n",
    "    ratio = min(originalImage.shape[0], originalImage.shape[1]) / 48\n",
    "    processedImage = np.zeros((48, 48, originalImage.shape[2]))\n",
    "    for m in range(48):\n",
    "        targetM = max(0, min(originalImage.shape[0] - 1, int(0.5 * originalImage.shape[0] + (m - 23) * ratio)))\n",
    "        for n in range(48):\n",
    "            targetN = max(0, min(originalImage.shape[1] - 1, int(0.5 * originalImage.shape[1] + (n - 23) * ratio)))\n",
    "            processedImage[m, n] = originalImage[targetM, targetN]\n",
    "    return processedImage\n",
    "\n",
    "def loadDataset():\n",
    "    opening_folder = os.path.join(currentFolder, 'test_set')\n",
    "    imgList = os.listdir(opening_folder)\n",
    "    imgCount = len(imgList)\n",
    "    result_X = np.zeros((imgCount, 48, 48, 3))\n",
    "    result_y = []\n",
    "    random.shuffle(imgList)\n",
    "    for currentIndex in range(imgCount):\n",
    "        imgPath = imgList[currentIndex]\n",
    "        i = cv2.imread(os.path.join(opening_folder, imgPath))\n",
    "        result_X[currentIndex] = crop(i)\n",
    "        result_y.append(imgPath)\n",
    "    return result_X, result_y, imgCount\n",
    "\n",
    "def category(oneHot):\n",
    "    categoryList = ['有害垃圾', '厨余垃圾', '可回收垃圾', '其它垃圾']\n",
    "    print(oneHot)\n",
    "    return categoryList[np.argmax(oneHot)]\n",
    "\n",
    "test_data, test_label, num_pics = loadDataset()\n",
    "\n",
    "print(test_data.shape)\n",
    "print(len(test_label))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "(1, 48, 48, 3)\n",
      "(1, 4)\n",
      "4\n"
     ]
    }
   ],
   "source": [
    "#第三步:使用VART\n",
    "#The third one is using VART to create runner.\n",
    "dpu = overlay.runner\n",
    "\n",
    "inputTensors = dpu.get_input_tensors()\n",
    "outputTensors = dpu.get_output_tensors()\n",
    "\n",
    "shapeIn = tuple(inputTensors[0].dims)\n",
    "shapeOut = tuple(outputTensors[0].dims)\n",
    "outputSize = int(outputTensors[0].get_data_size() / shapeIn[0])\n",
    "\n",
    "print(shapeIn)\n",
    "print(shapeOut)\n",
    "print(outputSize)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {
    "scrolled": true
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "[array([[-1248.,   -32.,  -736.,  -704.]], dtype=float32)]\n",
      "[[[-1.569995  -1.5395565 -1.5694376 -1.5693759]]]\n",
      "9.jpg 最有可能是 厨余垃圾\n",
      "[array([[-1408.,    96.,  -832., -1024.]], dtype=float32)]\n",
      "[[[-1.5700861  1.56038   -1.5695944 -1.5698198]]]\n",
      "13.jpg 最有可能是 厨余垃圾\n",
      "[array([[-1312.,   224., -1216., -1856.]], dtype=float32)]\n",
      "[[[-1.5700341  1.5663321 -1.569974  -1.5702575]]]\n",
      "22.jpg 最有可能是 厨余垃圾\n",
      "[array([[-1344.,   -96.,  -960.,  -672.]], dtype=float32)]\n",
      "[[[-1.5700523 -1.56038   -1.5697547 -1.5693083]]]\n",
      "12.jpg 最有可能是 厨余垃圾\n",
      "[array([[-448.,   96., -832., -928.]], dtype=float32)]\n",
      "[[[-1.5685642  1.56038   -1.5695944 -1.5697187]]]\n",
      "25.jpg 最有可能是 厨余垃圾\n",
      "[array([[  32., -800., -448., -512.]], dtype=float32)]\n",
      "[[[ 1.5395565 -1.5695463 -1.5685642 -1.5688432]]]\n",
      "18.jpg 最有可能是 有害垃圾\n",
      "[array([[-128., -960.,  -96., -416.]], dtype=float32)]\n",
      "[[[-1.562984  -1.5697547 -1.56038   -1.5683925]]]\n",
      "5.jpg 最有可能是 可回收垃圾\n",
      "[array([[-640., -640., -864., -864.]], dtype=float32)]\n",
      "[[[-1.5692338 -1.5692338 -1.569639  -1.569639 ]]]\n",
      "4.jpg 最有可能是 有害垃圾\n",
      "[array([[ -480., -1632.,  -256.,  -768.]], dtype=float32)]\n",
      "[[[-1.568713  -1.5701836 -1.5668901 -1.5694942]]]\n",
      "7.jpg 最有可能是 可回收垃圾\n",
      "[array([[    0., -1888.,  -768.,  -544.]], dtype=float32)]\n",
      "[[[ 0.        -1.5702667 -1.5694942 -1.568958 ]]]\n",
      "6.jpg 最有可能是 有害垃圾\n",
      "[array([[ -736., -2144.,  1024.,   288.]], dtype=float32)]\n",
      "[[[-1.5694376 -1.5703299  1.5698198  1.5673242]]]\n",
      "8.jpg 最有可能是 可回收垃圾\n",
      "[array([[-1344.,  -704.,  -736.,  -928.]], dtype=float32)]\n",
      "[[[-1.5700523 -1.5693759 -1.5694376 -1.5697187]]]\n",
      "14.jpg 最有可能是 厨余垃圾\n",
      "[array([[ -800., -1344.,  -928., -1024.]], dtype=float32)]\n",
      "[[[-1.5695463 -1.5700523 -1.5697187 -1.5698198]]]\n",
      "20.jpg 最有可能是 有害垃圾\n",
      "[array([[-352., -480.,  -64., -320.]], dtype=float32)]\n",
      "[[[-1.5679554 -1.568713  -1.5551726 -1.5676713]]]\n",
      "2.jpg 最有可能是 可回收垃圾\n",
      "[array([[ -992., -1920., -1152., -1248.]], dtype=float32)]\n",
      "[[[-1.5697882 -1.5702755 -1.5699283 -1.569995 ]]]\n",
      "3.jpg 最有可能是 有害垃圾\n",
      "[array([[  256., -1440.,  -512., -1088.]], dtype=float32)]\n",
      "[[[ 1.5668901 -1.5701019 -1.5688432 -1.5698773]]]\n",
      "19.jpg 最有可能是 有害垃圾\n",
      "[array([[ 256., -320., -704., -480.]], dtype=float32)]\n",
      "[[[ 1.5668901 -1.5676713 -1.5693759 -1.568713 ]]]\n",
      "17.jpg 最有可能是 有害垃圾\n",
      "[array([[   32., -1664.,    64.,  -448.]], dtype=float32)]\n",
      "[[[ 1.5395565 -1.5701953  1.5551726 -1.5685642]]]\n",
      "11.jpg 最有可能是 可回收垃圾\n",
      "[array([[-896., -416.,   32.,   32.]], dtype=float32)]\n",
      "[[[-1.5696802 -1.5683925  1.5395565  1.5395565]]]\n",
      "27.jpg 最有可能是 可回收垃圾\n",
      "[array([[  448., -1024.,    96.,  -288.]], dtype=float32)]\n",
      "[[[ 1.5685642 -1.5698198  1.56038   -1.5673242]]]\n",
      "10.jpg 最有可能是 有害垃圾\n",
      "[array([[-640.,    0., -832., -896.]], dtype=float32)]\n",
      "[[[-1.5692338  0.        -1.5695944 -1.5696802]]]\n",
      "24.jpg 最有可能是 厨余垃圾\n",
      "[array([[-608., -896.,  160.,  -96.]], dtype=float32)]\n",
      "[[[-1.5691516 -1.5696802  1.5645465 -1.56038  ]]]\n",
      "16.jpg 最有可能是 可回收垃圾\n",
      "[array([[-1376.,  -704.,  -384.,  -576.]], dtype=float32)]\n",
      "[[[-1.5700696 -1.5693759 -1.5681921 -1.5690602]]]\n",
      "1.jpg 最有可能是 可回收垃圾\n",
      "[array([[  288., -1216., -1184.,  -768.]], dtype=float32)]\n",
      "[[[ 1.5673242 -1.569974  -1.5699518 -1.5694942]]]\n",
      "21.jpg 最有可能是 有害垃圾\n",
      "[array([[-1376.,   160., -1152., -1472.]], dtype=float32)]\n",
      "[[[-1.5700696  1.5645465 -1.5699283 -1.570117 ]]]\n",
      "23.jpg 最有可能是 厨余垃圾\n",
      "[array([[ -704., -1376.,  -256.,  -352.]], dtype=float32)]\n",
      "[[[-1.5693759 -1.5700696 -1.5668901 -1.5679554]]]\n",
      "26.jpg 最有可能是 可回收垃圾\n",
      "[array([[-1184.,  -448.,    32.,  -256.]], dtype=float32)]\n",
      "[[[-1.5699518 -1.5685642  1.5395565 -1.5668901]]]\n",
      "15.jpg 最有可能是 可回收垃圾\n"
     ]
    }
   ],
   "source": [
    "#第四步:开跑\n",
    "#Then just run for result.\n",
    "output_data = np.empty(shapeOut, dtype=np.float32, order=\"C\")\n",
    "input_data = np.empty(shapeIn, dtype=np.float32, order=\"C\")\n",
    "softmax = np.empty(outputSize)\n",
    "\n",
    "for i in range(num_pics):\n",
    "    input_data[0] = test_data[i]\n",
    "    job_id = dpu.execute_async(input_data, output_data)\n",
    "    dpu.wait(job_id)\n",
    "    temp = [j.reshape(1, outputSize) for j in output_data]\n",
    "    print(temp)\n",
    "    print(test_label[i], '最有可能是', category(np.arctan(temp)))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 16,
   "metadata": {},
   "outputs": [],
   "source": [
    "#第五步:清除缓存\n",
    "#The final step is clear the cache.\n",
    "del overlay\n",
    "del dpu"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.6.5"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}

Credits

Siyang Wang

Siyang Wang

2 projects • 0 followers

Comments