JeVoisBase  1.21
JeVois Smart Embedded Machine Vision Toolkit Base Modules
Share this page:
Loading...
Searching...
No Matches
PyCoralSegment.py
Go to the documentation of this file.
1import pyjevois
2if pyjevois.pro: import libjevoispro as jevois
3else: import libjevois as jevois
4import cv2 as cv
5import numpy as np
6from PIL import Image
7from pycoral.utils import edgetpu
8from pycoral.adapters import common
9from pycoral.adapters import segment
10import time
11
12## Semantic segmentation using Coral Edge TPU
13#
14# More pre-trained models are available at https://coral.ai/models/
15#
16#
17# @author Laurent Itti
18#
19# @videomapping YUYV 320 264 30.0 YUYV 320 240 30.0 JeVois PyCoralSegment
20# @videomapping JVUI 0 0 30.0 CropScale=RGB24@512x288:YUYV 1920 1080 30.0 JeVois PyCoralSegment
21# @email itti@usc.edu
22# @address 880 W 1st St Suite 807, Los Angeles CA 90012, USA
23# @copyright Copyright (C) 2020 by Laurent Itti
24# @mainurl http://jevois.org
25# @supporturl http://jevois.org
26# @otherurl http://jevois.org
27# @license GPL v3
28# @distribution Unrestricted
29# @restrictions None
30# @ingroup modules
32 # ####################################################################################################
33 ## Constructor
34 def __init__(self):
36 jevois.LFATAL("A Google Coral EdgeTPU is required for this module (PCIe M.2 2230 A+E or USB)")
37
38 self.rgb = True # True if model expects RGB inputs, otherwise it expects BGR
39 self.keepaspect = True # Keep aspect ratio using zero padding
40 alpha = 128 # Transparency alpha values for processGUI, higher is less transparent
41 tidx = 0 # Class index of transparent background
42
43 # Select one of the models:
44 self.model = 'UNet128' # expects 128x128
45 #self.model = 'MobileNetV2DeepLabV3' # expects 513x513
46
47 # You should not have to edit anything beyond this point.
48 if (self.model == 'MobileNetV2DeepLabV3'):
49 modelname = 'deeplabv3_mnv2_dm05_pascal_quant_edgetpu.tflite'
50 elif (self.model == 'UNet128'):
51 modelname = 'keras_post_training_unet_mv2_128_quant_edgetpu.tflite'
52 tidx = 1
53
54 # Load network:
55 sdir = pyjevois.share + '/coral/segmentation/'
56 self.interpreter = edgetpu.make_interpreter(sdir + modelname)
57 #self.interpreter = edgetpu.make_interpreter(*modelname.split('@'))
58 self.interpreter.allocate_tensors()
59 self.timer = jevois.Timer('Coral segmentation', 10, jevois.LOG_DEBUG)
62
63 # ####################################################################################################
65 """Creates a label colormap used in PASCAL VOC segmentation benchmark.
66 Returns:
67 A Colormap for visualizing segmentation results.
68 """
69 colormap = np.zeros((256, 3), dtype=int)
70 indices = np.arange(256, dtype=int)
71
72 for shift in reversed(range(8)):
73 for channel in range(3):
74 colormap[:, channel] |= ((indices >> channel) & 1) << shift
75 indices >>= 3
76
77 return colormap.astype(np.uint8)
78
79 # ####################################################################################################
80 def create_pascal_label_colormapRGBA(self, alpha, tidx):
81 """Creates a label colormap used in PASCAL VOC segmentation benchmark.
82 Returns:
83 A Colormap for visualizing segmentation results.
84 """
85 colormap = np.zeros((256, 4), dtype=int)
86 indices = np.arange(256, dtype=int)
87
88 for shift in reversed(range(8)):
89 for channel in range(3):
90 colormap[:, channel] |= ((indices >> channel) & 1) << shift
91 indices >>= 3
92
93 colormap[:, 3] = alpha
94 colormap[tidx, 3] = 0 # force fully transparent for entry tidx
95 return colormap.astype(np.uint8)
96
97 # ####################################################################################################
98 ## JeVois main processing function
99 def process(self, inframe, outframe):
100 frame = inframe.getCvRGB() if self.rgb else inframe.getCvBGR()
101 self.timer.start()
102
103 h = frame.shape[0]
104 w = frame.shape[1]
105
106 # Set the input:
107 width, height = common.input_size(self.interpreter)
108 img = Image.fromarray(frame);
109 if self.keepaspect:
110 resized_img, _ = common.set_resized_input(self.interpreter, img.size,
111 lambda size: img.resize(size, Image.LANCZOS))
112 else:
113 resized_img = img.resize((width, height), Image.LANCZOS)
114 common.set_input(self.interpreter, resized_img)
115
116 # Run the model
117 start = time.perf_counter()
118 self.interpreter.invoke()
119 inference_time = time.perf_counter() - start
120
121 # Draw segmentation results:
122 result = segment.get_output(self.interpreter)
123 if len(result.shape) == 3: result = np.argmax(result, axis=-1)
124
125 # If keep_aspect_ratio, we need to remove the padding area.
126 new_width, new_height = resized_img.size
127 result = result[:new_height, :new_width]
128 mask_img = Image.fromarray(self.cmapRGB[result])
129
130 # Concat resized input image and processed segmentation results.
131 output_img = Image.new('RGB', (2 * img.width, img.height))
132 output_img.paste(img, (0, 0))
133 output_img.paste(mask_img.resize(img.size), (img.width, 0))
134
135 # Back to opencv:
136 outcv = np.array(output_img)
137
138 # Put efficiency information.
139 cv.putText(outcv, 'JeVois Coral Segmentation - ' + self.model, (3, 15),
140 cv.FONT_HERSHEY_SIMPLEX, 0.4, (255, 255, 255), 1, cv.LINE_AA)
141
142 fps = self.timer.stop()
143 label = fps + ', %dms' % (inference_time * 1000.0)
144 cv.putText(outcv, label, (3, h-5), cv.FONT_HERSHEY_SIMPLEX, 0.4, (255, 255, 255), 1, cv.LINE_AA)
145
146 # Send output frame to host:
147 if self.rgb: outframe.sendCvRGB(outcv)
148 else: outframe.sendCv(outcv)
149
150 # ###################################################################################################
151 ## Process function with GUI output
152 def processGUI(self, inframe, helper):
153 # Start a new display frame, gets its size and also whether mouse/keyboard are idle:
154 idle, winw, winh = helper.startFrame()
155
156 # Draw full-resolution input frame from camera:
157 x, y, w, h = helper.drawInputFrame("c", inframe, False, False)
158
159 # Get the next camera image at processing resolution (may block until it is captured):
160 frame = inframe.getCvRGBp() if self.rgb else inframe.getCvBGRp()
161 iw, ih = frame.shape[1], frame.shape[0]
162
163 # Start measuring image processing time:
164 self.timer.start()
165
166 # Set the input:
167 width, height = common.input_size(self.interpreter)
168 img = Image.fromarray(frame);
169 if self.keepaspect:
170 resized_img, _ = common.set_resized_input(self.interpreter, img.size,
171 lambda size: img.resize(size, Image.LANCZOS))
172 else:
173 resized_img = img.resize((width, height), Image.LANCZOS)
174 common.set_input(self.interpreter, resized_img)
175
176 # Run the model:
177 start = time.perf_counter()
178 self.interpreter.invoke()
179 inference_time = time.perf_counter() - start
180
181 # Draw segmentation results:
182 result = segment.get_output(self.interpreter)
183 if len(result.shape) == 3: result = np.argmax(result, axis=-1)
184
185 # If keep_aspect_ratio, we need to remove the padding area:
186 new_width, new_height = resized_img.size
187 result = result[:new_height, :new_width]
188 mask = self.cmapRGBA[result]
189
190 # Draw the mask on top of our image, OpenGL will do the alpha blending:
191 helper.drawImage("m", mask, self.rgb, False, True)
192
193 # Put efficiency information:
194 helper.itext('JeVois-Pro Python Coral Segmentation - %s - %dms/inference' %
195 (self.model, inference_time * 1000.0))
196
197 # Write frames/s info from our timer:
198 fps = self.timer.stop()
199 helper.iinfo(inframe, fps, winw, winh);
200
201 # End of frame:
202 helper.endFrame()
203
Semantic segmentation using Coral Edge TPU.
process(self, inframe, outframe)
JeVois main processing function.
create_pascal_label_colormapRGBA(self, alpha, tidx)
processGUI(self, inframe, helper)
Process function with GUI output.
size_t getNumInstalledTPUs()