Skip to content
Snippets Groups Projects
Commit 93ab0535 authored by raoul.dupuis's avatar raoul.dupuis
Browse files

add modules source code

parent d2a83079
No related branches found
No related tags found
No related merge requests found
Showing
with 2351 additions and 0 deletions
env 0 → 100644
# Rename this file to ".env"
# Replace the value of these variables with your own container registry
# ACR
# CONTAINER_REGISTRY_ADDRESS="<username>.azurecr.io"
# CONTAINER_REGISTRY_USERNAME="<username>"
# CONTAINER_REGISTRY_PASSWORD="KpJ1e****************"
# Docker Hub
CONTAINER_REGISTRY_ADDRESS="<Docker ID>/<registry name>"
CONTAINER_REGISTRY_ADDRESS_EDGE="https://index.docker.io/v1/"
CONTAINER_REGISTRY_USERNAME="<Docker ID"
CONTAINER_REGISTRY_PASSWORD="3f123***************"
\ No newline at end of file
# To make python 2 and python 3 compatible code
from __future__ import absolute_import
# Returns rectangle boundaries in the CV2 format (topLeftX, topLeftY, bottomRightX, bottomRightY) given by a processing service
class AnnotationParser:
def getCV2RectanglesFromProcessingService1(self, response):
try:
listOfCV2Rectangles = []
for item in response["regions"]:
for decoration in item:
if "box" in decoration.lower():
rectList = item[decoration].split(",")
top = int(rectList[0])
left = int(rectList[1])
width = int(rectList[2])
height = int(rectList[3])
for decorationProperty in item[decoration]:
if "top" in decorationProperty.lower():
top = int(item[decoration][decorationProperty])
if "left" in decorationProperty.lower():
left = int(item[decoration]
[decorationProperty])
if "width" in decorationProperty.lower():
width = int(item[decoration]
[decorationProperty])
if "height" in decorationProperty.lower():
height = int(item[decoration]
[decorationProperty])
if top is not None and left is not None and width is not None and height is not None:
topLeftX = left
topLeftY = top
bottomRightX = left + width
bottomRightY = top + height
listOfCV2Rectangles.append(
[topLeftX, topLeftY, bottomRightX, bottomRightY])
return listOfCV2Rectangles
except:
# Ignoring exceptions for now so that video can be read and analyzed without post-processing in case of errors
pass
def getCV2RectanglesFromProcessingService2(self, response):
try:
listOfCV2Rectangles = []
for item in response:
for decoration in item:
if "rect" in decoration.lower():
for decorationProperty in item[decoration]:
if "top" in decorationProperty.lower():
top = int(item[decoration][decorationProperty])
if "left" in decorationProperty.lower():
left = int(item[decoration]
[decorationProperty])
if "width" in decorationProperty.lower():
width = int(item[decoration]
[decorationProperty])
if "height" in decorationProperty.lower():
height = int(item[decoration]
[decorationProperty])
if top is not None and left is not None and width is not None and height is not None:
topLeftX = left
topLeftY = top
bottomRightX = left + width
bottomRightY = top + height
listOfCV2Rectangles.append(
[topLeftX, topLeftY, bottomRightX, bottomRightY])
return listOfCV2Rectangles
except:
# Ignoring exceptions for now so that video can be read and analyzed without post-processing in case of errors
pass
#To make python 2 and python 3 compatible code
from __future__ import division
from __future__ import absolute_import
#Imports
import sys
if sys.version_info[0] < 3:#e.g python version <3
import cv2
else:
import cv2
from cv2 import cv2
# pylint: disable=E1101
# pylint: disable=E0401
# Disabling linting that is not supported by Pylint for C extensions such as OpenCV. See issue https://github.com/PyCQA/pylint/issues/1955
import numpy
import requests
import json
import time
import os
import VideoStream
from VideoStream import VideoStream
import AnnotationParser
from AnnotationParser import AnnotationParser
import ImageServer
from ImageServer import ImageServer
class CameraCapture(object):
def __IsInt(self,string):
try:
int(string)
return True
except ValueError:
return False
def __init__(
self,
videoPath,
imageProcessingEndpoint = "",
imageUploadingEndpoint = "",
imageProcessingParams = "",
showVideo = False,
verbose = False,
loopVideo = True,
convertToGray = False,
resizeWidth = 0,
resizeHeight = 0,
annotate = False,
sendToHubCallback = None):
self.videoPath = videoPath
if self.__IsInt(videoPath):
#case of a usb camera (usually mounted at /dev/video* where * is an int)
self.isWebcam = True
else:
#case of a video file
self.isWebcam = False
self.imageProcessingEndpoint = imageProcessingEndpoint
self.imageUploadingEndpoint = imageUploadingEndpoint
if imageProcessingParams == "":
self.imageProcessingParams = ""
else:
self.imageProcessingParams = json.loads(imageProcessingParams)
self.showVideo = showVideo
self.verbose = verbose
self.loopVideo = loopVideo
self.convertToGray = convertToGray
self.resizeWidth = resizeWidth
self.resizeHeight = resizeHeight
self.annotate = (self.imageProcessingEndpoint != "") and self.showVideo & annotate
self.nbOfPreprocessingSteps = 0
self.autoRotate = False
self.sendToHubCallback = sendToHubCallback
self.vs = None
if self.convertToGray:
self.nbOfPreprocessingSteps +=1
if self.resizeWidth != 0 or self.resizeHeight != 0:
self.nbOfPreprocessingSteps +=1
if self.verbose:
print("Initialising the camera capture with the following parameters: ")
print(" - Video path: " + self.videoPath)
print(" - Image processing endpoint: " + self.imageProcessingEndpoint)
print(" - Image processing params: " + json.dumps(self.imageProcessingParams))
print(" - Show video: " + str(self.showVideo))
print(" - Loop video: " + str(self.loopVideo))
print(" - Convert to gray: " + str(self.convertToGray))
print(" - Resize width: " + str(self.resizeWidth))
print(" - Resize height: " + str(self.resizeHeight))
print(" - Annotate: " + str(self.annotate))
print(" - Send processing results to hub: " + str(self.sendToHubCallback is not None))
print()
self.displayFrame = None
if self.showVideo:
self.imageServer = ImageServer(5012, self)
self.imageServer.start()
def __annotate(self, frame, response):
AnnotationParserInstance = AnnotationParser()
#TODO: Make the choice of the service configurable
listOfRectanglesToDisplay = AnnotationParserInstance.getCV2RectanglesFromProcessingService1(response)
for rectangle in listOfRectanglesToDisplay:
cv2.rectangle(frame, (rectangle(0), rectangle(1)), (rectangle(2), rectangle(3)), (0,0,255),4)
return
def __sendFrameForProcessing(self, frame):
headers = {'Content-Type': 'application/octet-stream'}
try:
response = requests.post(self.imageProcessingEndpoint, headers = headers, params = self.imageProcessingParams, data = frame)
except Exception as e:
print('__sendFrameForProcessing Excpetion -' + str(e))
return "[]"
if self.verbose:
try:
print("Response from external processing service: (" + str(response.status_code) + ") " + json.dumps(response.json()))
except Exception:
print("Response from external processing service (status code): " + str(response.status_code))
return json.dumps(response.json())
def __sendFrameForUploading(self, frame, uri):
headers = {'Content-Type': 'application/octet-stream'}
endpoint = self.imageUploadingEndpoint + uri
try:
response = requests.post(endpoint, headers = headers, params = self.imageProcessingParams, data = frame)
except Exception as e:
print('__sendFrameForUploading Excpetion -' + str(e))
return "[]"
if self.verbose:
try:
print("Response from external Uploading service: (" + str(response.status_code) + ") " + json.dumps(response.json()))
except Exception:
print("Response from external Uploading service (status code): " + str(response.status_code))
return response #json.dumps(response.json())
def __displayTimeDifferenceInMs(self, endTime, startTime):
return str(int((endTime-startTime) * 1000)) + " ms"
def __enter__(self):
if self.isWebcam:
#The VideoStream class always gives us the latest frame from the webcam. It uses another thread to read the frames.
self.vs = VideoStream(int(self.videoPath)).start()
time.sleep(1.0)#needed to load at least one frame into the VideoStream class
#self.capture = cv2.VideoCapture(int(self.videoPath))
else:
#In the case of a video file, we want to analyze all the frames of the video thus are not using VideoStream class
self.capture = cv2.VideoCapture(self.videoPath)
return self
def get_display_frame(self):
return self.displayFrame
def start(self):
frameCounter = 0
perfForOneFrameInMs = None
cpt_img = 0
while True:
if self.showVideo or self.verbose:
startOverall = time.time()
if self.verbose:
startCapture = time.time()
frameCounter +=1
if self.isWebcam:
frame = self.vs.read()
else:
frame = self.capture.read()[1]
if frameCounter == 1:
if self.capture.get(cv2.CAP_PROP_FRAME_WIDTH) < self.capture.get(cv2.CAP_PROP_FRAME_HEIGHT):
self.autoRotate = True
if self.autoRotate:
frame = cv2.rotate(frame, cv2.ROTATE_90_COUNTERCLOCKWISE) #The counterclockwise is random...It coudl well be clockwise. Is there a way to auto detect it?
if self.verbose:
if frameCounter == 1:
if not self.isWebcam:
print("Original frame size: " + str(int(self.capture.get(cv2.CAP_PROP_FRAME_WIDTH))) + "x" + str(int(self.capture.get(cv2.CAP_PROP_FRAME_HEIGHT))))
print("Frame rate (FPS): " + str(int(self.capture.get(cv2.CAP_PROP_FPS))))
print("Frame number: " + str(frameCounter))
print("Time to capture (+ straighten up) a frame: " + self.__displayTimeDifferenceInMs(time.time(), startCapture))
startPreProcessing = time.time()
#Loop video
if not self.isWebcam:
if frameCounter == self.capture.get(cv2.CAP_PROP_FRAME_COUNT):
if self.loopVideo:
frameCounter = 0
self.capture.set(cv2.CAP_PROP_POS_FRAMES, 0)
else:
break
#Pre-process locally
if self.nbOfPreprocessingSteps == 1 and self.convertToGray:
preprocessedFrame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
if self.nbOfPreprocessingSteps == 1 and (self.resizeWidth != 0 or self.resizeHeight != 0):
preprocessedFrame = cv2.resize(frame, (self.resizeWidth, self.resizeHeight))
if self.nbOfPreprocessingSteps > 1:
preprocessedFrame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
preprocessedFrame = cv2.resize(preprocessedFrame, (self.resizeWidth,self.resizeHeight))
if self.verbose:
print("Time to pre-process a frame: " + self.__displayTimeDifferenceInMs(time.time(), startPreProcessing))
startEncodingForProcessing = time.time()
color_green = True
#Process externally
if self.imageProcessingEndpoint != "":
#Encode frame to send over HTTP
if self.nbOfPreprocessingSteps == 0:
encodedFrame = cv2.imencode(".jpg", frame)[1].tostring()
else:
encodedFrame = cv2.imencode(".jpg", preprocessedFrame)[1].tostring()
if self.verbose:
print("Time to encode a frame for processing: " + self.__displayTimeDifferenceInMs(time.time(), startEncodingForProcessing))
startProcessingExternally = time.time()
#Send over HTTP for processing
response = self.__sendFrameForProcessing(encodedFrame)
if self.verbose:
print("Time to process frame externally: " + self.__displayTimeDifferenceInMs(time.time(), startProcessingExternally))
startSendingToEdgeHub = time.time()
#forwarding outcome of external processing to the EdgeHub
if response != "[]" and self.sendToHubCallback is not None:
# self.sendToHubCallback(response)
print("abc {}".format(response))
#accuracy = float(json.loads(response)['accuracy'])
accuracy = float(json.loads(response)['predictions'][0]['probability'])
acc_threshold = int(os.environ['THRESHOLD'])/100
if accuracy < acc_threshold :
color_green = False
if self.verbose:
print("abcd {}".format(accuracy))
cpt_img +=1
if(cpt_img > 4):
cpt_img = 0
# send image to storage blob
response1 = self.__sendFrameForUploading(encodedFrame, "/data?ext=jpg")
response2 = self.__sendFrameForUploading(response, "/data?ext=txt")
if self.verbose and response1 != "[]" :
print("Upload file : {}".format(response1.text))
if self.verbose:
print("Time to message from processing service to edgeHub: " + self.__displayTimeDifferenceInMs(time.time(), startSendingToEdgeHub))
startDisplaying = time.time()
#Display frames
if self.showVideo:
try:
if self.nbOfPreprocessingSteps == 0:
if self.verbose and (perfForOneFrameInMs is not None):
cv2.putText(frame, "FPS " + str(round(1000/perfForOneFrameInMs, 2)),(10, 35),cv2.FONT_HERSHEY_SIMPLEX,1.0,(0,0,255), 2)
if response != "[]":
if color_green:
cv2.putText(frame, "label " + str(json.loads(response)['predictions'][0]['tagName']),(10, 70),cv2.FONT_HERSHEY_SIMPLEX,1.0,(0,0,255), 2)
cv2.putText(frame, "proba " + str(round(float(json.loads(response)['predictions'][0]['probability'])*100,1)) + "%",(10, 105),cv2.FONT_HERSHEY_SIMPLEX,1.0,(0,255,0), 2)
else:
cv2.putText(frame, "label " + str(json.loads(response)['predictions'][0]['tagName']),(10, 70),cv2.FONT_HERSHEY_SIMPLEX,1.0,(0,0,255), 2)
cv2.putText(frame, "proba " + str(round(float(json.loads(response)['predictions'][0]['probability'])*100,1)) + "%",(10, 105),cv2.FONT_HERSHEY_SIMPLEX,1.0,(0,0,255), 2)
if self.annotate:
#TODO: fix bug with annotate function
self.__annotate(frame, response)
self.displayFrame = cv2.imencode('.jpg', frame)[1].tobytes()
else:
if self.verbose and (perfForOneFrameInMs is not None):
cv2.putText(preprocessedFrame, "FPS " + str(round(1000/perfForOneFrameInMs, 2)),(10, 35),cv2.FONT_HERSHEY_SIMPLEX,1.0,(0,0,255), 2)
if self.annotate:
#TODO: fix bug with annotate function
self.__annotate(preprocessedFrame, response)
self.displayFrame = cv2.imencode('.jpg', preprocessedFrame)[1].tobytes()
except Exception as e:
print("Could not display the video to a web browser.")
print('Excpetion -' + str(e))
if self.verbose:
if 'startDisplaying' in locals():
print("Time to display frame: " + self.__displayTimeDifferenceInMs(time.time(), startDisplaying))
elif 'startSendingToEdgeHub' in locals():
print("Time to display frame: " + self.__displayTimeDifferenceInMs(time.time(), startSendingToEdgeHub))
else:
print("Time to display frame: " + self.__displayTimeDifferenceInMs(time.time(), startEncodingForProcessing))
perfForOneFrameInMs = int((time.time()-startOverall) * 1000)
if not self.isWebcam:
waitTimeBetweenFrames = max(int(1000 / self.capture.get(cv2.CAP_PROP_FPS))-perfForOneFrameInMs, 1)
print("Wait time between frames :" + str(waitTimeBetweenFrames))
if cv2.waitKey(waitTimeBetweenFrames) & 0xFF == ord('q'):
break
if self.verbose:
perfForOneFrameInMs = int((time.time()-startOverall) * 1000)
print("Total time for one frame: " + self.__displayTimeDifferenceInMs(time.time(), startOverall))
def __exit__(self, exception_type, exception_value, traceback):
if not self.isWebcam:
self.capture.release()
if self.showVideo:
self.imageServer.close()
cv2.destroyAllWindows()
\ No newline at end of file
import os
import asyncio
from azure.iot.device.aio import IoTHubDeviceClient
from azure.core.exceptions import AzureError
from azure.storage.blob import BlobClient
import sys
#CONNECTION_STRING = "[Device Connection String]"
#PATH_TO_FILE = r"[Full path to local file]"
CONNECTION_STRING = "HostName=Mon-hub-IoT.azure-devices.net;DeviceId=raspberry1;SharedAccessKey=Y8xVIGo6dDQZoMFBwImvzhvX9r2jVIUp/jyShRGgOqA="
PATH_TO_FILE = r"./templates/index.html"
#/home/harrond/Documents/azure/MNIST/modules/CameraCapture/app/templates/index.html
class FileUpload(object):
def __init__():
print ( "IoT Hub file upload init")
conn_str = CONNECTION_STRING
device_client = IoTHubDeviceClient.create_from_connection_string(conn_str)
# Connect the client
#await device_client.connect()
async def store_blob(blob_info, file_name):
try:
sas_url = "https://{}/{}/{}{}".format(
blob_info["hostName"],
blob_info["containerName"],
blob_info["blobName"],
blob_info["sasToken"]
)
print("\nUploading file: {} to Azure Storage as blob: {} in container {}\n".format(file_name, blob_info["blobName"], blob_info["containerName"]))
# Upload the specified file
with BlobClient.from_blob_url(sas_url) as blob_client:
with open(file_name, "rb") as f:
result = blob_client.upload_blob(f, overwrite=True)
return (True, result)
except FileNotFoundError as ex:
# catch file not found and add an HTTP status code to return in notification to IoT Hub
ex.status_code = 404
return (False, ex)
except AzureError as ex:
# catch Azure errors that might result from the upload operation
return (False, ex)
async def upload(path_to_file):
try:
file_name = path_to_file
blob_name = os.path.basename(file_name)
# Get the storage info for the blob
storage_info = await device_client.get_storage_info_for_blob(blob_name)
# Upload to blob
success, result = await store_blob(storage_info, file_name)
if success == True:
print("Upload succeeded. Result is: \n")
print(result)
print()
await device_client.notify_blob_upload_status(
storage_info["correlationId"], True, 200, "OK: {}".format(file_name)
)
else :
# If the upload was not successful, the result is the exception object
print("Upload failed. Exception is: \n")
print(result)
print()
await device_client.notify_blob_upload_status(
storage_info["correlationId"], False, result.status_code, str(result)
)
except Exception as ex:
print("\nException:")
print(ex)
finally:
# Finally, disconnect the client
await device_client.disconnect()
async def store_blob(blob_info, file_name):
try:
sas_url = "https://{}/{}/{}{}".format(
blob_info["hostName"],
blob_info["containerName"],
blob_info["blobName"],
blob_info["sasToken"]
)
print("\nUploading file: {} to Azure Storage as blob: {} in container {}\n".format(file_name, blob_info["blobName"], blob_info["containerName"]))
# Upload the specified file
with BlobClient.from_blob_url(sas_url) as blob_client:
with open(file_name, "rb") as f:
result = blob_client.upload_blob(f, overwrite=True)
return (True, result)
except FileNotFoundError as ex:
# catch file not found and add an HTTP status code to return in notification to IoT Hub
ex.status_code = 404
return (False, ex)
except AzureError as ex:
# catch Azure errors that might result from the upload operation
return (False, ex)
async def main():
try:
print ( "IoT Hub file upload sample, press Ctrl-C to exit" )
conn_str = CONNECTION_STRING
file_name = PATH_TO_FILE
blob_name = os.path.basename(file_name)
device_client = IoTHubDeviceClient.create_from_connection_string(conn_str)
# Connect the client
await device_client.connect()
# Get the storage info for the blob
storage_info = await device_client.get_storage_info_for_blob(blob_name)
# Upload to blob
success, result = await store_blob(storage_info, file_name)
if success == True:
print("Upload succeeded. Result is: \n")
print(result)
print()
await device_client.notify_blob_upload_status(
storage_info["correlationId"], True, 200, "OK: {}".format(file_name)
)
else :
# If the upload was not successful, the result is the exception object
print("Upload failed. Exception is: \n")
print(result)
print()
await device_client.notify_blob_upload_status(
storage_info["correlationId"], False, result.status_code, str(result)
)
except Exception as ex:
print("\nException:")
print(ex)
except KeyboardInterrupt:
print ( "\nIoTHubDeviceClient sample stopped" )
finally:
# Finally, disconnect the client
await device_client.disconnect()
if __name__ == "__main__":
#asyncio.run(main())
print("Python version")
print (sys.version)
loop = asyncio.get_event_loop()
loop.run_until_complete(main())
loop.close()
#blob = BlobClient.from_connection_string(conn_str="DefaultEndpointsProtocol=https;AccountName=mnisthepia;AccountKey=tIkw3moqsSMQQ3t4ZMMjkAUO1gi088jPNvmR08vtIzmEVWdgcWmXeDa+uoSJOc5B/f/oWgZWWKlSTqP9294LvQ==;EndpointSuffix=core.windows.net", container_name="imagesfail", blob_name="my_blob")
#with open("./templates/index.html", "rb") as data:
# blob.upload_blob(data)
#print("Python version")
#print (sys.version)
#print("Version info.")
#print (sys.version_info)
# Base on work from https://github.com/Bronkoknorb/PyImageStream
#import trollius as asyncio
import asyncio
import tornado.ioloop
import tornado.web
import tornado.websocket
import threading
import base64
import os
class ImageStreamHandler(tornado.websocket.WebSocketHandler):
def initialize(self, camera):
self.clients = []
self.camera = camera
def check_origin(self, origin):
return True
def open(self):
self.clients.append(self)
print("Image Server Connection::opened")
def on_message(self, msg):
if msg == 'next':
frame = self.camera.get_display_frame()
if frame != None:
encoded = base64.b64encode(frame)
self.write_message(encoded, binary=False)
def on_close(self):
self.clients.remove(self)
print("Image Server Connection::closed")
class ImageServer(threading.Thread):
def __init__(self, port, cameraObj):
threading.Thread.__init__(self)
self.setDaemon(True)
self.port = port
self.camera = cameraObj
def run(self):
try:
asyncio.set_event_loop(asyncio.new_event_loop())
indexPath = os.path.join(os.path.dirname(
os.path.realpath(__file__)), 'templates')
app = tornado.web.Application([
(r"/stream", ImageStreamHandler, {'camera': self.camera}),
(r"/(.*)", tornado.web.StaticFileHandler,
{'path': indexPath, 'default_filename': 'index.html'})
])
app.listen(self.port)
print('ImageServer::Started.')
tornado.ioloop.IOLoop.current().start()
except Exception as e:
print('ImageServer::exited run loop. Exception - ' + str(e))
def close(self):
print('ImageServer::Closed.')
# To make python 2 and python 3 compatible code
from __future__ import absolute_import
from threading import Thread
import sys
if sys.version_info[0] < 3: # e.g python version <3
import cv2
else:
import cv2
from cv2 import cv2
# pylint: disable=E1101
# pylint: disable=E0401
# Disabling linting that is not supported by Pylint for C extensions such as OpenCV. See issue https://github.com/PyCQA/pylint/issues/1955
# import the Queue class from Python 3
if sys.version_info >= (3, 0):
from queue import Queue
# otherwise, import the Queue class for Python 2.7
else:
from Queue import Queue
# This class reads all the video frames in a separate thread and always has the keeps only the latest frame in its queue to be grabbed by another thread
class VideoStream(object):
def __init__(self, path, queueSize=3):
self.stream = cv2.VideoCapture(path)
self.stopped = False
self.Q = Queue(maxsize=queueSize)
def start(self):
# start a thread to read frames from the video stream
t = Thread(target=self.update, args=())
t.daemon = True
t.start()
return self
def update(self):
try:
while True:
if self.stopped:
return
if not self.Q.full():
(grabbed, frame) = self.stream.read()
# if the `grabbed` boolean is `False`, then we have
# reached the end of the video file
if not grabbed:
self.stop()
return
self.Q.put(frame)
# Clean the queue to keep only the latest frame
while self.Q.qsize() > 1:
self.Q.get()
except Exception as e:
print("got error: "+str(e))
def read(self):
return self.Q.get()
def more(self):
return self.Q.qsize() > 0
def stop(self):
self.stopped = True
def __exit__(self, exception_type, exception_value, traceback):
self.stream.release()
# Copyright (c) Microsoft. All rights reserved.
# Licensed under the MIT license. See LICENSE file in the project root for
# full license information.
import os
import random
import sys
import time
#import iothub_client
# pylint: disable=E0611
# Disabling linting that is not supported by Pylint for C extensions such as iothub_client. See issue https://github.com/PyCQA/pylint/issues/1955
#from iothub_client import (IoTHubModuleClient, IoTHubClientError, IoTHubError,
# IoTHubMessage, IoTHubMessageDispositionResult,
# IoTHubTransportProvider)
from azure.iot.device import IoTHubModuleClient
import CameraCapture
from CameraCapture import CameraCapture
# global counters
SEND_CALLBACKS = 0
def send_to_Hub_callback(strMessage):
#message = IoTHubMessage(bytearray(strMessage, 'utf8'))
#hubManager.send_event_to_output("output1", message, 0)
pass
# Callback received when the message that we're forwarding is processed.
def send_confirmation_callback(message, result, user_context):
global SEND_CALLBACKS
SEND_CALLBACKS += 1
class HubManager(object):
def __init__(
self,
messageTimeout,
protocol,
verbose):
'''
Communicate with the Edge Hub
:param int messageTimeout: the maximum time in milliseconds until a message times out. The timeout period starts at IoTHubClient.send_event_async. By default, messages do not expire.
:param IoTHubTransportProvider protocol: Choose HTTP, AMQP or MQTT as transport protocol. Currently only MQTT is supported.
:param bool verbose: set to true to get detailed logs on messages
'''
self.messageTimeout = messageTimeout
self.client_protocol = protocol
self.client = IoTHubModuleClient()
self.client.create_from_environment(protocol)
self.client.set_option("messageTimeout", self.messageTimeout)
self.client.set_option("product_info", "edge-camera-capture")
if verbose:
self.client.set_option("logtrace", 1) # enables MQTT logging
def send_event_to_output(self, outputQueueName, event, send_context):
if VERBOSE:
print("send message")
self.client.send_event_async(
outputQueueName, event, send_confirmation_callback, send_context)
def main(
videoPath,
imageProcessingEndpoint="",
imageUploadingEndpoint="",
imageProcessingParams="",
showVideo=False,
verbose=False,
loopVideo=True,
convertToGray=False,
resizeWidth=0,
resizeHeight=0,
annotate=False
):
'''
Capture a camera feed, send it to processing and forward outputs to EdgeHub
:param int videoPath: camera device path such as /dev/video0 or a test video file such as /TestAssets/myvideo.avi. Mandatory.
:param str imageProcessingEndpoint: service endpoint to send the frames to for processing. Example: "http://face-detect-service:8080". Leave empty when no external processing is needed (Default). Optional.
:param str imageProcessingParams: query parameters to send to the processing service. Example: "'returnLabels': 'true'". Empty by default. Optional.
:param bool showVideo: show the video in a windows. False by default. Optional.
:param bool verbose: show detailed logs and perf timers. False by default. Optional.
:param bool loopVideo: when reading from a video file, it will loop this video. True by default. Optional.
:param bool convertToGray: convert to gray before sending to external service for processing. False by default. Optional.
:param int resizeWidth: resize frame width before sending to external service for processing. Does not resize by default (0). Optional.
:param int resizeHeight: resize frame width before sending to external service for processing. Does not resize by default (0). Optional.ion(
:param bool annotate: when showing the video in a window, it will annotate the frames with rectangles given by the image processing service. False by default. Optional. Rectangles should be passed in a json blob with a key containing the string rectangle, and a top left corner + bottom right corner or top left corner with width and height.
'''
try:
print("\nPython %s\n" % sys.version)
print("Camera Capture Azure IoT Edge Module. Press Ctrl-C to exit.")
try:
global hubManager
#hubManager = HubManager(
# 10000, IoTHubTransportProvider.MQTT, verbose)
except error as iothub_error:
print("Unexpected error %s from IoTHub" % iothub_error)
return
with CameraCapture(videoPath, imageProcessingEndpoint, imageUploadingEndpoint, imageProcessingParams, showVideo, verbose, loopVideo, convertToGray, resizeWidth, resizeHeight, annotate, send_to_Hub_callback) as cameraCapture:
cameraCapture.start()
except KeyboardInterrupt:
print("Camera capture module stopped")
def __convertStringToBool(env):
if env in ['True', 'TRUE', '1', 'y', 'YES', 'Y', 'Yes']:
return True
elif env in ['False', 'FALSE', '0', 'n', 'NO', 'N', 'No']:
return False
else:
raise ValueError('Could not convert string to bool.')
if __name__ == '__main__':
try:
VIDEO_PATH = os.environ['VIDEO_PATH']
IMAGE_PROCESSING_ENDPOINT = os.getenv('IMAGE_PROCESSING_ENDPOINT', "")
IMAGE_UPLOADING_ENDPOINT = os.getenv('IMAGE_UPLOADING_ENDPOINT', "")
IMAGE_PROCESSING_PARAMS = os.getenv('IMAGE_PROCESSING_PARAMS', "")
SHOW_VIDEO = __convertStringToBool(os.getenv('SHOW_VIDEO', 'False'))
VERBOSE = __convertStringToBool(os.getenv('VERBOSE', 'False'))
LOOP_VIDEO = __convertStringToBool(os.getenv('LOOP_VIDEO', 'True'))
CONVERT_TO_GRAY = __convertStringToBool(
os.getenv('CONVERT_TO_GRAY', 'False'))
RESIZE_WIDTH = int(os.getenv('RESIZE_WIDTH', 0))
RESIZE_HEIGHT = int(os.getenv('RESIZE_HEIGHT', 0))
ANNOTATE = __convertStringToBool(os.getenv('ANNOTATE', 'False'))
except ValueError as error:
print(error)
sys.exit(1)
main(VIDEO_PATH, IMAGE_PROCESSING_ENDPOINT, IMAGE_UPLOADING_ENDPOINT, IMAGE_PROCESSING_PARAMS, SHOW_VIDEO,
VERBOSE, LOOP_VIDEO, CONVERT_TO_GRAY, RESIZE_WIDTH, RESIZE_HEIGHT, ANNOTATE)
<html>
<head>
<title>Video Stream</title>
</head>
<body style="background-color:#222;">
<h1>Video Stream</h1>
<img id="currentImage" style="border:2px solid teal;height:700px;">
<script>
var img = document.getElementById("currentImage");
var ws = new WebSocket("ws://" + location.host + "/stream");
ws.onopen = function() {
console.log("connection was established");
ws.send("next");
};
ws.onmessage = function(msg) {
img.src = 'data:image/png;base64, ' + msg.data;
};
img.onload = function() {
ws.send("next");
}
</script>
</body>
</html>
FROM balenalib/raspberrypi4-64-python:3.9
# The balena base image for building apps on Raspberry Pi 3.
# Raspbian Stretch required for piwheels support. https://downloads.raspberrypi.org/raspbian/images/raspbian-2019-04-09/
# Enforces cross-compilation through Quemu
RUN [ "cross-build-start" ]
RUN echo "BUILD MODULE: CameraCapture"
# Update package index and install dependencies
#RUN install_packages \
# python3 \
# python3-pip \
# python3-dev \
# build-essential \
RUN apt update && apt upgrade
# Required for OpenCV
RUN install_packages \
# Hierarchical Data Format
libhdf5-dev libhdf5-serial-dev \
# for image files
#libjpeg-dev libtiff5-dev libjasper-dev libpng-dev \
# for video files
libavcodec-dev libavformat-dev libswscale-dev libv4l-dev
# for gui
#libqt4-test libqtgui4 libqtwebkit4 libgtk2.0-dev \
# high def image processing
#libilmbase-dev libopenexr-dev
#RUN apt install libgl1
#RUN apt-get install libilmbase-dev libopenexr-dev
RUN install_packages libgl-dev
# Install Python packages
RUN pip install --upgrade pip
RUN pip install --upgrade wheel
RUN pip install --upgrade setuptools
RUN pip install --upgrade requests
#RUN install_packages python-requests
COPY /build/arm32v7-requirements.txt ./
#RUN pip3 install --upgrade setuptools
#RUN pip install --index-url=https://www.piwheels.org/simple -r arm32v7-requirements.txt
RUN pip install -r arm32v7-requirements.txt
#RUN pip install iothub-client
# Cleanup
RUN rm -rf /var/lib/apt/lists/* \
&& apt-get -y autoremove
RUN [ "cross-build-end" ]
ADD /app/ .
# Expose the port
EXPOSE 5012
ENTRYPOINT [ "python3", "-u", "./main.py" ]
FROM balenalib/raspberrypi3:stretch
# The balena base image for building apps on Raspberry Pi 3.
# Raspbian Stretch required for piwheels support. https://downloads.raspberrypi.org/raspbian/images/raspbian-2019-04-09/
# Enforces cross-compilation through Quemu
RUN [ "cross-build-start" ]
RUN echo "BUILD MODULE: CameraCapture"
# Update package index and install dependencies
RUN install_packages \
python3 \
python3-pip \
python3-dev \
build-essential \
libopenjp2-7-dev \
zlib1g-dev \
libatlas-base-dev \
wget \
libboost-python1.62.0 \
curl \
libcurl4-openssl-dev
# Required for OpenCV
RUN install_packages \
# Hierarchical Data Format
libhdf5-dev libhdf5-serial-dev \
# for image files
libjpeg-dev libtiff5-dev libjasper-dev libpng-dev \
# for video files
libavcodec-dev libavformat-dev libswscale-dev libv4l-dev \
# for gui
libqt4-test libqtgui4 libqtwebkit4 libgtk2.0-dev \
# high def image processing
libilmbase-dev libopenexr-dev
# Install Python packages
COPY /build/arm32v7-requirements.txt ./
RUN pip3 install --upgrade pip
RUN pip3 install --upgrade setuptools
RUN pip3 install --index-url=https://www.piwheels.org/simple -r arm32v7-requirements.txt
# Cleanup
RUN rm -rf /var/lib/apt/lists/* \
&& apt-get -y autoremove
RUN [ "cross-build-end" ]
ADD /app/ .
# Expose the port
EXPOSE 5012
ENTRYPOINT [ "python3", "-u", "./main.py" ]
azure-iothub-device-client
numpy
opencv-contrib-python
requests
trollius
tornado==4.5.3
\ No newline at end of file
#azure-iot-deviceupdate
#azure-iothub-device-client
numpy
opencv-contrib-python==4.5.5.64
requests
trollius
tornado==4.5.3
azure-iot-device
azure.storage.blob
asyncio
\ No newline at end of file
{
"$schema-version": "0.0.1",
"description": "",
"image": {
"repository": "$CONTAINER_REGISTRY_ADDRESS",
"tag": {
"version": "cameracapture_2022.2.10",
"platforms": {
"arm32v7": "./arm32v7.Dockerfile"
}
},
"buildOptions": []
},
"language": "python"
}
\ No newline at end of file
import json
import os
import io
# Imports for the REST API
from flask import Flask, request, jsonify
# Imports for image procesing
from PIL import Image
# Imports for prediction
from predict import initialize, predict_image, predict_url
app = Flask(__name__)
# 4MB Max image size limit
app.config['MAX_CONTENT_LENGTH'] = 4 * 1024 * 1024
# Default route just shows simple text
@app.route('/')
def index():
return 'CustomVision.ai model host harness'
# Like the CustomVision.ai Prediction service /image route handles either
# - octet-stream image file
# - a multipart/form-data with files in the imageData parameter
@app.route('/image', methods=['POST'])
@app.route('/<project>/image', methods=['POST'])
@app.route('/<project>/image/nostore', methods=['POST'])
@app.route('/<project>/classify/iterations/<publishedName>/image', methods=['POST'])
@app.route('/<project>/classify/iterations/<publishedName>/image/nostore', methods=['POST'])
@app.route('/<project>/detect/iterations/<publishedName>/image', methods=['POST'])
@app.route('/<project>/detect/iterations/<publishedName>/image/nostore', methods=['POST'])
def predict_image_handler(project=None, publishedName=None):
try:
imageData = None
if ('imageData' in request.files):
imageData = request.files['imageData']
elif ('imageData' in request.form):
imageData = request.form['imageData']
else:
imageData = io.BytesIO(request.get_data())
img = Image.open(imageData)
results = predict_image(img)
return jsonify(results)
except Exception as e:
print('EXCEPTION:', str(e))
return 'Error processing image', 500
# Like the CustomVision.ai Prediction service /url route handles url's
# in the body of hte request of the form:
# { 'Url': '<http url>'}
@app.route('/url', methods=['POST'])
@app.route('/<project>/url', methods=['POST'])
@app.route('/<project>/url/nostore', methods=['POST'])
@app.route('/<project>/classify/iterations/<publishedName>/url', methods=['POST'])
@app.route('/<project>/classify/iterations/<publishedName>/url/nostore', methods=['POST'])
@app.route('/<project>/detect/iterations/<publishedName>/url', methods=['POST'])
@app.route('/<project>/detect/iterations/<publishedName>/url/nostore', methods=['POST'])
def predict_url_handler(project=None, publishedName=None):
try:
image_url = json.loads(request.get_data().decode('utf-8'))['url']
results = predict_url(image_url)
return jsonify(results)
except Exception as e:
print('EXCEPTION:', str(e))
return 'Error processing image'
if __name__ == '__main__':
# Load and intialize the model
if os.environ['ML_MODEL'] == "0":
mnist_model = True
else:
mnist_model = False
initialize(mnist_model)
# Run the server
app.run(host='0.0.0.0', port=80)
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Common utilities."""
import numpy as np
import tflite_runtime.interpreter as tflite
EDGETPU_SHARED_LIB = 'libedgetpu.so.1'
#EDGETPU_SHARED_LIB = 'delegate.so'
def make_interpreter(model_file):
model_file, *device = model_file.split('@')
return tflite.Interpreter(
model_path=model_file,
experimental_delegates=[
tflite.load_delegate(EDGETPU_SHARED_LIB,
{'device': device[0]} if device else {})
])
def input_image_size(interpreter):
"""Returns input image size as (width, height, channels) tuple."""
_, height, width, channels = interpreter.get_input_details()[0]['shape']
return width, height, channels
def input_tensor(interpreter):
"""Returns input tensor view as numpy array of shape (height, width, 3)."""
tensor_index = interpreter.get_input_details()[0]['index']
return interpreter.tensor(tensor_index)()[0]
def output_tensor(interpreter, i):
"""Returns dequantized output tensor if quantized before."""
output_details = interpreter.get_output_details()[i]
output_data = np.squeeze(interpreter.tensor(output_details['index'])())
if 'quantization' not in output_details:
return output_data
scale, zero_point = output_details['quantization']
if scale == 0:
return output_data - zero_point
return scale * (output_data - zero_point)
This diff is collapsed.
eight
five
four
nine
one
seven
six
three
two
zero
\ No newline at end of file
File added
File added
# from edgetpu.classification.engine import ClassificationEngine
#from edgetpu.utils import dataset_utils
from pycoral.utils.dataset import read_label_file
from PIL import Image
import collections
from collections import deque
import common
import io
import numpy as np
import operator
import tflite_runtime.interpreter as tflite
import time
from urllib.request import urlopen
from datetime import datetime
import sys
#global variable
global labels
global interpreter
global new_model
Category = collections.namedtuple('Category', ['id', 'score'])
def input_tensor(interpreter):
"""Returns input tensor view as numpy array of shape (height, width, 3)."""
tensor_index = interpreter.get_input_details()[0]['index']
return interpreter.tensor(tensor_index)()[0]
def get_output(interpreter, top_k, score_threshold):
"""Returns no more than top_k categories with score >= score_threshold."""
scores = common.output_tensor(interpreter, 0)
categories = [
Category(i, scores[i])
for i in np.argpartition(scores, -top_k)[-top_k:]
if scores[i] >= score_threshold
]
return sorted(categories, key=operator.itemgetter(1), reverse=True)
def initialize(mnist = True):
print('Loading model...')
global labels
#mnist = True
if mnist:
label_filename = 'mnist_labels.txt'
model_filename = 'mnist_model.tflite'
else:
label_filename = 'labels.txt'
model_filename = 'model.tflite'
labels = read_label_file(label_filename)
global interpreter
interpreter = common.make_interpreter(model_filename)
interpreter.allocate_tensors()
def log_msg(msg):
print("{}: {}".format(datetime.now(),msg))
def extract_and_resize_to_256_square(image):
h, w = image.shape[:2]
log_msg("crop_center: " + str(w) + "x" + str(h) +" and resize to " + str(256) + "x" + str(256))
if use_opencv:
return cv2.resize(image, (256, 256), interpolation = cv2.INTER_LINEAR)
else:
return extract_and_resize(image, (256, 256))
def crop_center(img,cropx,cropy):
h, w = img.shape[:2]
startx = max(0, w//2-(cropx//2))
starty = max(0, h//2-(cropy//2))
log_msg("crop_center: " + str(w) + "x" + str(h) +" to " + str(cropx) + "x" + str(cropy))
return img[starty:starty+cropy, startx:startx+cropx]
def resize_down_to_1600_max_dim(image):
w,h = image.size
if h < 1600 and w < 1600:
return image
new_size = (1600 * w // h, 1600) if (h > w) else (1600, 1600 * h // w)
log_msg("resize: " + str(w) + "x" + str(h) + " to " + str(new_size[0]) + "x" + str(new_size[1]))
if use_opencv:
# Convert image to numpy array
image = convert_to_nparray(image)
return cv2.resize(image, new_size, interpolation = cv2.INTER_LINEAR)
else:
if max(new_size) / max(image.size) >= 0.5:
method = Image.BILINEAR
else:
method = Image.BICUBIC
image = image.resize(new_size, method)
return image
def convert_to_nparray(image):
# RGB -> BGR
log_msg("Convert to numpy array")
image = np.array(image)
return image[:, :, (2,1,0)]
def update_orientation(image):
exif_orientation_tag = 0x0112
if hasattr(image, '_getexif'):
exif = image._getexif()
if exif != None and exif_orientation_tag in exif:
orientation = exif.get(exif_orientation_tag, 1)
log_msg('Image has EXIF Orientation: ' + str(orientation))
# orientation is 1 based, shift to zero based and flip/transpose based on 0-based values
orientation -= 1
if orientation >= 4:
image = image.transpose(Image.TRANSPOSE)
if orientation == 2 or orientation == 3 or orientation == 6 or orientation == 7:
image = image.transpose(Image.FLIP_TOP_BOTTOM)
if orientation == 1 or orientation == 2 or orientation == 5 or orientation == 6:
image = image.transpose(Image.FLIP_LEFT_RIGHT)
return image
def predict_url(imageUrl):
log_msg("Predicting from url: " +imageUrl)
with urlopen(imageUrl) as testImage:
image = Image.open(testImage)
return predict_image(image)
def predict_image(image):
global interpreter
global labels
log_msg('Predicting image')
w,h = image.size
log_msg("Image size: " + str(w) + "x" + str(h))
width, height, channels = common.input_image_size(interpreter)
# print(width, height, channels)
# Update orientation based on EXIF tags
image = update_orientation(image)
# If the image has either w or h greater than 1600 we resize it down respecting
# aspect ratio such that the largest dimention is 1600
image = resize_down_to_1600_max_dim(image)
# Convert image to numpy array
image = convert_to_nparray(image)
# Crop the center square and resize that square down to 256x256
resized_image = image # extract_and_resize_to_256_square(image)
# Crop the center for the specified network_input_Size
cropped_image = crop_center(resized_image, width, height)
common.input_tensor(interpreter)[:,:] = np.reshape(cropped_image, (common.input_image_size(interpreter)))
interpreter.invoke()
results = get_output(interpreter, top_k=3, score_threshold=0)
annotate_text = ''
result_rep = []
for result in results:
annotate_text += '\n{:.0f}% {}'.format(100*result[1], labels[result[0]])
result_rep.append({
'tagName': "{}".format(labels[result[0]]),
'probability': "{}".format(result[1]),
'tagId': '',
'boundingBox': None })
print(annotate_text)
#result = []
#for p, label in zip(predictions, labels):
# truncated_probablity = np.float64(round(p,8))
# if truncated_probablity > 1e-8:
# result.append({
# 'tagName': label,
# 'probability': truncated_probablity,
# 'tagId': '',
# 'boundingBox': None })
response = {
'id': '',
'project': '',
'iteration': '',
'created': datetime.utcnow().isoformat(),
'predictions': result_rep
#'accuracy' : "{}".format(results[0][1])
}
log_msg("Results: " + str(response))
return response
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment