Sumit Kumar
Created February 4, 2020 © GPL3+

NVive - Nvidia Intelligent Virtual Environmentalism

Using the power of AI and Nvidia to solve the problem of blind spots in vehicle. Also using innovation to reduce noise pollution on road.

AdvancedFull instructions provided18 hours135
NVive - Nvidia Intelligent Virtual Environmentalism

Things used in this project

Story

Read more

Schematics

Working

Code

NVIVE-blind_spotDetection

Python
Preventing accidents due to blind spot in vehicles
""" importing all the required dependencies; jetson packages are essential to use prebuilt models and numpy plays the whole game of AI"""

import jetson.inference 
import jetson.utils
from graphics import *
#from PIL import Image
import cv2
import numpy as np

net = jetson.inference.detectNet("ssd-mobilenet-v2", threshold=0.5)
camera = jetson.utils.gstCamera(480, 360, "0")  # using CSI
display = jetson.utils.glDisplay()
class_id = {1, 2, 3, 4, 6, 8, 18, 21}  # class id for detection to look for
vehicle = {3, 6, 8}                    # these are the class id for bus,truck and cars which will be shown in different colour spots to track their path
living = {1, 18, 21}                   # person, cow ,dog class id which will be shown in different colour spot
win = GraphWin('Graphic', 480, 360, autoflush=False)
win.setCoords(0,0,480,360)
win.setBackground("black")
fpr = None
p = None
prvs = None
hsv = None

def optic(frame1):
	global fpr
	if fpr == None:
		global prvs
		global hsv
		prvs = cv2.cvtColor(frame1,cv2.COLOR_BGR2GRAY)
		hsv = np.zeros_like(frame1)
		hsv[...,1] = 255
		fpr=1
	else:
		fil = cv2.cvtColor(frame1,cv2.COLOR_BGR2GRAY)
		flow = cv2.calcOpticalFlowFarneback(prvs,fil, None, 0.5, 3, 15, 3, 5, 1.2, 0)
		mag, ang = cv2.cartToPolar(flow[...,0], flow[...,1])
		hsv[...,0] = ang*180/np.pi/2
		hsv[...,2] = cv2.normalize(mag,None,0,255,cv2.NORM_MINMAX)
		rgb = cv2.cvtColor(hsv,cv2.COLOR_HSV2BGR)
		cv2.imshow('frame2',rgb)
		#cv2.imwrite('frame2.png',rgb)   to save frame as picture
		k=cv2.waitKey(30) & 0xff
		if k==27:
			global p
			p=1
		elif k == ord('s'):
			print("updated")
		prvs = fil			
	



while display.IsOpen():
	img, width, height = camera.CaptureRGBA(zeroCopy=1)
	jetson.utils.cudaDeviceSynchronize ()
	img1, width1, height1 = camera.CaptureRGBA(zeroCopy=1)
	jetson.utils.cudaDeviceSynchronize ()
	detections = net.Detect(img, width, height)
	for item in win.items[:]:
		item.undraw()
	win.update()
	array = jetson.utils.cudaToNumpy(img1, width1, height1, 4)
	dt = array.dtype
	sh = array.shape
	array2 = np.ones(sh, np.float32)
	arr = array + array2
	aimg = cv2.cvtColor(arr.astype (np.uint8), cv2.COLOR_RGBA2BGR)
	for detection in detections :
		identity = detection.ClassID
		if identity in class_id:
			x, y = detection.Center
			display.RenderOnce(img, width, height)
			display.SetTitle("Object Detection | Network {:.0f} FPS".format(net.GetNetworkFPS()))
			if identity in vehicle:
				head = Circle(Point(x,y), 10)
				head.setFill('blue')
				head.draw(win)
				win.update()
			elif identity in living:
				head = Circle(Point(x,y), 10)
				head.setFill('green')
				head.draw(win)
				win.update()
			else:
				head = Circle(Point(x,y), 10)
				head.setFill('red')
				head.draw(win)
				win.update()		
										
		else:
			display.RenderOnce(img1, width1, height1)
			display.SetTitle("Object Detection | Network {:.0f} FPS".format(net.GetNetworkFPS()))
			
	""" To store the graph results in .png format
        win.postscript(file="image.eps", colormode='color')
	imag = Image.open("image.eps")
	fig = imag.convert('RGBA')
	fig.save('image.png','png')
	img = cv2.imread('image.png')
	img = cv2.resize(img, (1280,720))"""
	
	optic(aimg)
	

NVIVE-vehicleHornInnovation

Python
Using a new concept to reduce vehicular noise pollution on road
import RPi.GPIO as GPIO
import cv2
import imutils
import numpy as np
import pytesseract           # used to read text from image 
#from PIL import Image
import jetson.inference
import jetson.utils
#import math
import requests              # used for REST API

ext_horn = 12
horn_button = 18

net = jetson.inference.detectNet("ssd-mobilenet-v2", threshold=0.5)
camera = jetson.utils.gstCamera(1280, 720, "0")  # using CSI
display = jetson.utils.glDisplay()
identity = {1, 2, 3, 4, 6, 8, 18, 21}   # class id for car,bus,motorcycle,person,dog,truck,bus,bicycle
vehicle = {3, 6, 8}
ids = []

GPIO.setmode(GPIO.BOARD)
GPIO.setwarnings(False)
GPIO.setup(ext_horn, GPIO.OUT)
GPIO.setup(horn_button, GPIO.IN)

access_token = '4da7a7a16973cf8da612a3b47a658b1810b9b290'
command1='D7 LOW'
command2='D7 HIGH'
address = 'https://api.particle.io/v1/devices/{0}'.format(example_device_id)+'/'+'digitalwrite'
dictionary = {'4da7a7a16973cf8da612a3b47a658b1810b9b290' : 'HR26DA2330', 'e00fce681ecfedb1b1d5b74a' : 'KA03AB3289'} #device id associated with license number
                             
def license_as_id():
	image = cv2.resize(aimg,(620,480))
	gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
	gray = cv2.bilateralFilter(gray, 11, 17, 17)
	edged = cv2.Canny(gray, 30, 200)
	cnts = cv2.findContours(edged.copy(), cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
	cnts = imutils.grab_contours(cnts)
	cnts = sorted(cnts, key = cv2.contourArea, reverse = True)[:10]
	screenCnt = None
	text = ""
	
	for c in cnts:
		peri = cv2.arcLength(c, True)
		approx = cv2.approxPolyDP(c, 0.018 * peri, True)
		if len(approx) == 4:
			screenCnt = approx
			break
	if screenCnt is None:
		detected = 0
		print("No contour detected")
	else:
		detected = 1
		
		
	if detected == 1:
		cv2.drawContours(image, [screenCnt], -1, (0, 255, 0), 3)
		mask = np.zeros(gray.shape,np.uint8)
		new_image = cv2.drawContours(mask,[screenCnt],0,255,-1,)
		new_image = cv2.bitwise_and(image,image,mask=mask)
		(x, y) = np.where(mask == 255)
		(topx, topy) = (np.min(x), np.min(y))
		(bottomx, bottomy) = (np.max(x), np.max(y))
		Cropped = gray[topx:bottomx+1, topy:bottomy+1]
		text = pytesseract.image_to_string(Cropped, config='--psm 11')
		print("Detected Number is:",text)#return (text)
		cv2.imshow('Cropped',Cropped) # I used this to check weather the device id is recognized while running the program
		#cv2.waitKey(0)
	#cv2.destroyAllWindows()
	return (text)
	
def message():
	for key in ids:
		for example_device_id,ref_device_id in dictionary.items():
			if key == ref_device_id:
				data = {'access_token': access_token,'args': command2} #activating the internal horn of targeted vehicle
				r = requests.post(address, data=data)
				print(r.text)

	
while display.IsOpen():
	img, width, height = camera.CaptureRGBA(zeroCopy=1)
	jetson.utils.cudaDeviceSynchronize ()
	img1, width1, height1 = camera.CaptureRGBA(zeroCopy=1)
	jetson.utils.cudaDeviceSynchronize ()
	detections = net.Detect(img, width, height)
	l = 0
	p = 0
	prev_value = None
	for detection in detections :
		#print(detection)
		class_id = detection.ClassID
		if class_id in identity:
			display.RenderOnce(img, width, height)
			display.SetTitle("Object Detection | Network {:.0f} FPS".format(net.GetNetworkFPS()))
			if class_id in vehicle:
				l = l+1
				array = jetson.utils.cudaToNumpy(img1, width1, height1, 4)
				dt = array.dtype
				sh = array.shape
				array2 = np.ones(sh, np.float32)
				arr = array + array2
				aimg = cv2.cvtColor(arr.astype (np.uint8), cv2.COLOR_RGBA2BGR)
				#cv2.imshow('img', aimg)
				#cv2.waitKey(0)
				device_id = license_as_id() # calling function to convert the detected contour to license number
				
				print (device_id)
				device_id.replace(" ", "")
				ids.append(device_id)
				
			else:
				p = p+1	
				
		else:
			display.RenderOnce(img1, width1, height1)
			display.SetTitle("Object Detection | Network {:.0f} FPS".format(net.GetNetworkFPS()))
	
	global s		
	s = GPIO.input(horn_button)
	if s != prev_value:
		if p >= 1:
			GPIO.output(ext_horn, GPIO.HIGH)		
		else:
			if l >= 1:
				message()
			
		GPIO.output(ext_horn, GPIO.LOW)
		
			
	for key in ids:
		ids.remove(key) #remove every device id to start new detection
	

Credits

Sumit Kumar

Sumit Kumar

32 projects • 94 followers
19 y/o. My daily routine involves dealing with electronics, code, distributed storage and cloud APIs.

Comments