diff --git a/CMakeLists.txt b/CMakeLists.txt
new file mode 100644
index 0000000..bb3a1c2
--- /dev/null
+++ b/CMakeLists.txt
@@ -0,0 +1,133 @@
+cmake_minimum_required(VERSION 2.8.3)
+project(openvino_object_detection)
+
+find_package(InferenceEngine 1.5)
+
+find_package(catkin REQUIRED COMPONENTS
+message_generation roscpp InferenceEngine sensor_msgs cv_bridge)
+
+if(UNIX OR APPLE)
+ # Linker flags.
+ if( ${CMAKE_CXX_COMPILER_ID} STREQUAL "GNU" OR ${CMAKE_CXX_COMPILER_ID} STREQUAL "Intel")
+ # GCC specific flags. ICC is compatible with them.
+ set(CMAKE_SHARED_LINKER_FLAGS "${CMAKE_SHARED_LINKER_FLAGS} -z noexecstack -z relro -z now")
+ set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -z noexecstack -z relro -z now")
+ elseif(${CMAKE_CXX_COMPILER_ID} STREQUAL "Clang")
+ # In Clang, -z flags are not compatible, they need to be passed to linker via -Wl.
+ set(CMAKE_SHARED_LINKER_FLAGS "${CMAKE_SHARED_LINKER_FLAGS} -Wl,-z,noexecstack -Wl,-z,relro -Wl,-z,now")
+ set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -Wl,-z,noexecstack -Wl,-z,relro -Wl,-z,now")
+ endif()
+
+ # Compiler flags.
+ if( ${CMAKE_CXX_COMPILER_ID} STREQUAL "GNU")
+ # GCC specific flags.
+ if(CMAKE_CXX_COMPILER_VERSION VERSION_GREATER 4.9 OR CMAKE_CXX_COMPILER_VERSION VERSION_EQUAL 4.9)
+ set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fPIE -fstack-protector-strong")
+ else()
+ set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fPIE -fstack-protector")
+ endif()
+ elseif(${CMAKE_CXX_COMPILER_ID} STREQUAL "Clang")
+ # Clang is compatbile with some of the flags.
+ set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fPIE -fstack-protector")
+ elseif(${CMAKE_CXX_COMPILER_ID} STREQUAL "Intel" )
+ # Same as above, with exception that ICC compilation crashes with -fPIE option, even
+ # though it uses -pie linker option that require -fPIE during compilation. Checksec
+ # shows that it generates correct PIE anyway if only -pie is provided.
+ set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fstack-protector")
+ endif()
+
+ # Generic flags.
+ set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fPIC -fno-operator-names -Wformat -Wformat-security -Wall")
+ set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++11")
+
+ # Add OpenMP support
+ set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fopenmp")
+endif()
+
+# Add x86 intrinsic compiler support
+set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -march=native")
+execute_process(
+ COMMAND bash -c "lscpu | grep -qi flags | grep -qi flags | grep -qi f16c"
+ RESULT_VARIABLE SUPPORT_F16C)
+if (SUPPORT_F16C EQUAL 0)
+ set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -mf16c")
+ add_definitions(-DSUPPORT_MF16C)
+endif()
+
+execute_process(
+ COMMAND bash -c "lscpu | grep -qi flags | grep -qi flags | grep -qi sse4_1"
+ RESULT_VARIABLE SUPPORT_SSE41)
+if (SUPPORT_SSE41 EQUAL 0)
+ set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -msse4.1")
+endif()
+
+## Generate messages in the 'msg' folder
+add_message_files(FILES
+ Object.msg
+)
+
+add_service_files(FILES
+ Objects.srv
+)
+
+generate_messages(DEPENDENCIES
+ std_msgs
+ sensor_msgs
+)
+
+
+catkin_package(
+ CATKIN_DEPENDS
+ message_runtime
+ std_msgs
+ sensor_msgs
+)
+
+find_package(OpenCV REQUIRED)
+
+message( ${InferenceEngine_LIBRARIES}
+)
+
+include_directories(
+ include ${catkin_INCLUDE_DIRS}
+ ${InferenceEngine_INCLUDE_DIRS}
+ ${InferenceEngine_INCLUDE_DIRS}/../samples
+ ${InferenceEngine_INCLUDE_DIRS}/../samples/common
+ ${InferenceEngine_DIR}/../src
+ ${InferenceEngine_DIR}/../src/extension
+ ${OpenCV_INCLUDE_DIRS}
+
+)
+
+
+
+add_executable(object_detection
+ src/main.cpp
+)
+
+add_executable(object_detection_test
+ src/test.cpp
+)
+
+add_dependencies(object_detection ${${PROJECT_NAME}_EXPORTED_TARGETS} ${catkin_EXPORTED_TARGETS})
+
+add_dependencies(object_detection_test ${${PROJECT_NAME}_EXPORTED_TARGETS} ${catkin_EXPORTED_TARGETS})
+
+set_target_properties(object_detection PROPERTIES "CMAKE_CXX_FLAGS" "${CMAKE_CXX_FLAGS} -fPIE"
+ COMPILE_PDB_NAME object_detection)
+
+target_link_libraries(object_detection
+ IE::ie_cpu_extension
+ gflags
+ dl
+ pthread
+ ${catkin_LIBRARIES}
+ ${InferenceEngine_LIBRARIES}
+ ${OpenCV_LIBRARIES}
+)
+
+target_link_libraries(object_detection_test
+ ${catkin_LIBRARIES}
+
+ ${OpenCV_LIBRARIES}
+)
diff --git a/irmodels/tiny-YoloV3/FP16/.gitkeep b/irmodels/tiny-YoloV3/FP16/.gitkeep
new file mode 100644
index 0000000..e69de29
diff --git a/irmodels/tiny-YoloV3/FP16/frozen_yolov3-tiny-mine.bin b/irmodels/tiny-YoloV3/FP16/frozen_yolov3-tiny-mine.bin
new file mode 100644
index 0000000..197f9a0
Binary files /dev/null and b/irmodels/tiny-YoloV3/FP16/frozen_yolov3-tiny-mine.bin differ
diff --git a/irmodels/tiny-YoloV3/FP16/frozen_yolov3-tiny-mine.labels b/irmodels/tiny-YoloV3/FP16/frozen_yolov3-tiny-mine.labels
new file mode 100644
index 0000000..ca76c80
--- /dev/null
+++ b/irmodels/tiny-YoloV3/FP16/frozen_yolov3-tiny-mine.labels
@@ -0,0 +1,80 @@
+person
+bicycle
+car
+motorbike
+aeroplane
+bus
+train
+truck
+boat
+traffic light
+fire hydrant
+stop sign
+parking meter
+bench
+bird
+cat
+dog
+horse
+sheep
+cow
+elephant
+bear
+zebra
+giraffe
+backpack
+umbrella
+handbag
+tie
+suitcase
+frisbee
+skis
+snowboard
+sports ball
+kite
+baseball bat
+baseball glove
+skateboard
+surfboard
+tennis racket
+bottle
+wine glass
+cup
+fork
+knife
+spoon
+bowl
+banana
+apple
+sandwich
+orange
+broccoli
+carrot
+hot dog
+pizza
+donut
+cake
+chair
+sofa
+pottedplant
+bed
+diningtable
+toilet
+tvmonitor
+laptop
+mouse
+remote
+keyboard
+cell phone
+microwave
+oven
+toaster
+sink
+refrigerator
+book
+clock
+vase
+scissors
+teddy bear
+hair drier
+toothbrush
diff --git a/irmodels/tiny-YoloV3/FP16/frozen_yolov3-tiny-mine.mapping b/irmodels/tiny-YoloV3/FP16/frozen_yolov3-tiny-mine.mapping
new file mode 100644
index 0000000..c1e3e80
--- /dev/null
+++ b/irmodels/tiny-YoloV3/FP16/frozen_yolov3-tiny-mine.mapping
@@ -0,0 +1,139 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/irmodels/tiny-YoloV3/FP16/frozen_yolov3-tiny-mine.xml b/irmodels/tiny-YoloV3/FP16/frozen_yolov3-tiny-mine.xml
new file mode 100644
index 0000000..b65ecb6
--- /dev/null
+++ b/irmodels/tiny-YoloV3/FP16/frozen_yolov3-tiny-mine.xml
@@ -0,0 +1,783 @@
+
+
+
+
+
+
+
+
+
+
+ 1
+ 3
+ 416
+ 416
+
+
+
+
+
+
+
+
+
+
+
+
+ 1
+ 16
+ 416
+ 416
+
+
+
+
+
+
+
+
+ 1
+ 16
+ 416
+ 416
+
+
+
+
+
+
+
+
+ 1
+ 16
+ 208
+ 208
+
+
+
+
+
+
+
+
+
+
+
+
+ 1
+ 32
+ 208
+ 208
+
+
+
+
+
+
+
+
+ 1
+ 32
+ 208
+ 208
+
+
+
+
+
+
+
+
+ 1
+ 32
+ 104
+ 104
+
+
+
+
+
+
+
+
+
+
+
+
+ 1
+ 64
+ 104
+ 104
+
+
+
+
+
+
+
+
+ 1
+ 64
+ 104
+ 104
+
+
+
+
+
+
+
+
+ 1
+ 64
+ 52
+ 52
+
+
+
+
+
+
+
+
+
+
+
+
+ 1
+ 128
+ 52
+ 52
+
+
+
+
+
+
+
+
+ 1
+ 128
+ 52
+ 52
+
+
+
+
+
+
+
+
+ 1
+ 128
+ 26
+ 26
+
+
+
+
+
+
+
+
+
+
+
+
+ 1
+ 256
+ 26
+ 26
+
+
+
+
+
+
+
+
+ 1
+ 256
+ 26
+ 26
+
+
+
+
+
+
+
+
+ 1
+ 256
+ 13
+ 13
+
+
+
+
+
+
+
+
+
+
+
+
+ 1
+ 512
+ 13
+ 13
+
+
+
+
+
+
+
+
+ 1
+ 512
+ 13
+ 13
+
+
+
+
+
+
+
+
+ 1
+ 512
+ 13
+ 13
+
+
+
+
+
+
+
+
+
+
+
+
+ 1
+ 1024
+ 13
+ 13
+
+
+
+
+
+
+
+
+ 1
+ 1024
+ 13
+ 13
+
+
+
+
+
+
+
+
+
+
+
+
+ 1
+ 256
+ 13
+ 13
+
+
+
+
+
+
+
+
+ 1
+ 256
+ 13
+ 13
+
+
+
+
+
+
+
+
+
+
+
+
+ 1
+ 128
+ 13
+ 13
+
+
+
+
+
+
+
+
+ 1
+ 128
+ 13
+ 13
+
+
+
+
+
+
+
+
+ 1
+ 128
+ 26
+ 26
+
+
+ 1
+ 256
+ 26
+ 26
+
+
+
+
+
+
+
+
+ 1
+ 384
+ 26
+ 26
+
+
+
+
+
+
+
+
+
+
+
+
+ 1
+ 256
+ 26
+ 26
+
+
+
+
+
+
+
+
+ 1
+ 256
+ 26
+ 26
+
+
+
+
+
+
+
+
+
+
+
+
+ 1
+ 255
+ 26
+ 26
+
+
+
+
+
+
+
+
+ 1
+ 256
+ 13
+ 13
+
+
+
+
+
+
+
+
+
+
+
+
+ 1
+ 512
+ 13
+ 13
+
+
+
+
+
+
+
+
+ 1
+ 512
+ 13
+ 13
+
+
+
+
+
+
+
+
+
+
+
+
+ 1
+ 255
+ 13
+ 13
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/irmodels/tiny-YoloV3/FP32/.gitkeep b/irmodels/tiny-YoloV3/FP32/.gitkeep
new file mode 100644
index 0000000..e69de29
diff --git a/irmodels/tiny-YoloV3/FP32/frozen_yolov3-tiny-mine.bin b/irmodels/tiny-YoloV3/FP32/frozen_yolov3-tiny-mine.bin
new file mode 100644
index 0000000..8ec8d82
Binary files /dev/null and b/irmodels/tiny-YoloV3/FP32/frozen_yolov3-tiny-mine.bin differ
diff --git a/irmodels/tiny-YoloV3/FP32/frozen_yolov3-tiny-mine.labels b/irmodels/tiny-YoloV3/FP32/frozen_yolov3-tiny-mine.labels
new file mode 100644
index 0000000..ca76c80
--- /dev/null
+++ b/irmodels/tiny-YoloV3/FP32/frozen_yolov3-tiny-mine.labels
@@ -0,0 +1,80 @@
+person
+bicycle
+car
+motorbike
+aeroplane
+bus
+train
+truck
+boat
+traffic light
+fire hydrant
+stop sign
+parking meter
+bench
+bird
+cat
+dog
+horse
+sheep
+cow
+elephant
+bear
+zebra
+giraffe
+backpack
+umbrella
+handbag
+tie
+suitcase
+frisbee
+skis
+snowboard
+sports ball
+kite
+baseball bat
+baseball glove
+skateboard
+surfboard
+tennis racket
+bottle
+wine glass
+cup
+fork
+knife
+spoon
+bowl
+banana
+apple
+sandwich
+orange
+broccoli
+carrot
+hot dog
+pizza
+donut
+cake
+chair
+sofa
+pottedplant
+bed
+diningtable
+toilet
+tvmonitor
+laptop
+mouse
+remote
+keyboard
+cell phone
+microwave
+oven
+toaster
+sink
+refrigerator
+book
+clock
+vase
+scissors
+teddy bear
+hair drier
+toothbrush
diff --git a/irmodels/tiny-YoloV3/FP32/frozen_yolov3-tiny-mine.mapping b/irmodels/tiny-YoloV3/FP32/frozen_yolov3-tiny-mine.mapping
new file mode 100644
index 0000000..eabe1cc
--- /dev/null
+++ b/irmodels/tiny-YoloV3/FP32/frozen_yolov3-tiny-mine.mapping
@@ -0,0 +1,139 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/irmodels/tiny-YoloV3/FP32/frozen_yolov3-tiny-mine.xml b/irmodels/tiny-YoloV3/FP32/frozen_yolov3-tiny-mine.xml
new file mode 100644
index 0000000..5c49dd4
--- /dev/null
+++ b/irmodels/tiny-YoloV3/FP32/frozen_yolov3-tiny-mine.xml
@@ -0,0 +1,783 @@
+
+
+
+
+
+
+
+
+
+
+ 1
+ 3
+ 416
+ 416
+
+
+
+
+
+
+
+
+
+
+
+
+ 1
+ 16
+ 416
+ 416
+
+
+
+
+
+
+
+
+ 1
+ 16
+ 416
+ 416
+
+
+
+
+
+
+
+
+ 1
+ 16
+ 208
+ 208
+
+
+
+
+
+
+
+
+
+
+
+
+ 1
+ 32
+ 208
+ 208
+
+
+
+
+
+
+
+
+ 1
+ 32
+ 208
+ 208
+
+
+
+
+
+
+
+
+ 1
+ 32
+ 104
+ 104
+
+
+
+
+
+
+
+
+
+
+
+
+ 1
+ 64
+ 104
+ 104
+
+
+
+
+
+
+
+
+ 1
+ 64
+ 104
+ 104
+
+
+
+
+
+
+
+
+ 1
+ 64
+ 52
+ 52
+
+
+
+
+
+
+
+
+
+
+
+
+ 1
+ 128
+ 52
+ 52
+
+
+
+
+
+
+
+
+ 1
+ 128
+ 52
+ 52
+
+
+
+
+
+
+
+
+ 1
+ 128
+ 26
+ 26
+
+
+
+
+
+
+
+
+
+
+
+
+ 1
+ 256
+ 26
+ 26
+
+
+
+
+
+
+
+
+ 1
+ 256
+ 26
+ 26
+
+
+
+
+
+
+
+
+ 1
+ 256
+ 13
+ 13
+
+
+
+
+
+
+
+
+
+
+
+
+ 1
+ 512
+ 13
+ 13
+
+
+
+
+
+
+
+
+ 1
+ 512
+ 13
+ 13
+
+
+
+
+
+
+
+
+ 1
+ 512
+ 13
+ 13
+
+
+
+
+
+
+
+
+
+
+
+
+ 1
+ 1024
+ 13
+ 13
+
+
+
+
+
+
+
+
+ 1
+ 1024
+ 13
+ 13
+
+
+
+
+
+
+
+
+
+
+
+
+ 1
+ 256
+ 13
+ 13
+
+
+
+
+
+
+
+
+ 1
+ 256
+ 13
+ 13
+
+
+
+
+
+
+
+
+
+
+
+
+ 1
+ 128
+ 13
+ 13
+
+
+
+
+
+
+
+
+ 1
+ 128
+ 13
+ 13
+
+
+
+
+
+
+
+
+ 1
+ 128
+ 26
+ 26
+
+
+ 1
+ 256
+ 26
+ 26
+
+
+
+
+
+
+
+
+ 1
+ 384
+ 26
+ 26
+
+
+
+
+
+
+
+
+
+
+
+
+ 1
+ 256
+ 26
+ 26
+
+
+
+
+
+
+
+
+ 1
+ 256
+ 26
+ 26
+
+
+
+
+
+
+
+
+
+
+
+
+ 1
+ 255
+ 26
+ 26
+
+
+
+
+
+
+
+
+ 1
+ 256
+ 13
+ 13
+
+
+
+
+
+
+
+
+
+
+
+
+ 1
+ 512
+ 13
+ 13
+
+
+
+
+
+
+
+
+ 1
+ 512
+ 13
+ 13
+
+
+
+
+
+
+
+
+
+
+
+
+ 1
+ 255
+ 13
+ 13
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/launch/tiny_yolov3.launch b/launch/tiny_yolov3.launch
new file mode 100644
index 0000000..0faeaef
--- /dev/null
+++ b/launch/tiny_yolov3.launch
@@ -0,0 +1,9 @@
+
+
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/msg/Object.msg b/msg/Object.msg
new file mode 100644
index 0000000..6bb7bc7
--- /dev/null
+++ b/msg/Object.msg
@@ -0,0 +1,16 @@
+string label
+
+#confidence of result
+float32 confidence
+
+#normalized value of box center point coordinate x
+float32 x
+
+#normalized value of box center point coordinate y
+float32 y
+
+#normalized value of box height
+float32 h
+
+#normalized value of box width
+float32 w
diff --git a/package.xml b/package.xml
new file mode 100644
index 0000000..6bc9c1a
--- /dev/null
+++ b/package.xml
@@ -0,0 +1,33 @@
+
+
+ openvino_object_detection
+ 1.0.1
+ The openvino package for tiny yolov3 object detection
+
+ Rishabh Kundu
+
+ MIT
+
+ catkin
+ genmsg
+
+ roscpp
+ OpenCV
+ message_generation
+ InferenceEngine
+ sensor_msgs
+
+ roscpp
+ OpenCV
+ InferenceEngine
+ sensor_msgs
+ roscpp
+ OpenCV
+ InferenceEngine
+ sensor_msgs
+ std_msgs
+
+ message_generation
+ message_runtime
+
+
diff --git a/src/main.cpp b/src/main.cpp
new file mode 100644
index 0000000..3914b01
--- /dev/null
+++ b/src/main.cpp
@@ -0,0 +1,250 @@
+// Copyright (C) 2018 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+/**
+ * \brief The entry point for the Inference Engine object_detection demo
+ * application \file object_detection_demo_yolov3_async/main.cpp \example
+ * object_detection_demo_yolov3_async/main.cpp
+ */
+
+#include "object_detection_demo_yolov3_async.hpp"
+
+InferRequest::Ptr async_infer_request_curr;
+std::string inputName;
+CNNNetReader netReader;
+OutputsDataMap outputInf;
+bool auto_resize;
+InputsDataMap inputInf;
+int width;
+int height;
+std::vector labels;
+typedef std::chrono::duration> ms;
+
+bool getObjects(openvino_object_detection::Objects::Request &req,
+ openvino_object_detection::Objects::Response &res) {
+ auto t = std::chrono::high_resolution_clock::now();
+ cv::Mat frame = cv_bridge::toCvCopy(req.img, "bgr8")->image;
+
+ FrameToBlob(frame, async_infer_request_curr, inputName, auto_resize);
+
+ async_infer_request_curr->StartAsync();
+
+ if (OK ==
+ async_infer_request_curr->Wait(IInferRequest::WaitMode::RESULT_READY)) {
+
+ // ---------------------------Processing output
+ // blobs-------------------------------------------------- Processing
+ // results of the CURRENT request
+ unsigned long resized_im_h = inputInf.begin()->second.get()->getDims()[0];
+ unsigned long resized_im_w = inputInf.begin()->second.get()->getDims()[1];
+ std::vector objects;
+ // Parsing outputs
+ for (auto &output : outputInf) {
+ auto output_name = output.first;
+ // slog::info << "output_name = " + output_name << slog::endl;
+
+ CNNLayerPtr layer =
+ netReader.getNetwork().getLayerByName(output_name.c_str());
+ Blob::Ptr blob = async_infer_request_curr->GetBlob(output_name);
+ ParseYOLOV3Output(layer, blob, resized_im_h, resized_im_w, height, width,
+ req.t, objects);
+ }
+ // Filtering overlapping boxes
+ std::sort(objects.begin(), objects.end());
+ for (int i = 0; i < objects.size(); ++i) {
+ if (objects[i].confidence == 0)
+ continue;
+ for (int j = i + 1; j < objects.size(); ++j) {
+ if (IntersectionOverUnion(objects[i], objects[j]) >= req.iou_t) {
+ objects[j].confidence = 0;
+ }
+ // if (objects[j].confidence == 1) {
+ // objects[j].confidence = 0;
+ //}
+ }
+ }
+ res.len = 0;
+ for (auto &object : objects) {
+ if (object.confidence < req.t)
+ continue;
+ openvino_object_detection::Object tmp;
+ tmp.confidence = object.confidence;
+ tmp.label = labels[object.class_id];
+ tmp.x = (object.xmin + object.xmax) / 2.0;
+ tmp.y = (object.ymin + object.ymax) / 2.0;
+ tmp.w = object.xmax - object.xmin;
+ tmp.h = object.ymax - object.ymin;
+ res.objects.push_back(tmp);
+ res.len++;
+ }
+ }
+ res.ms = std::chrono::duration_cast(
+ std::chrono::high_resolution_clock::now() - t)
+ .count();
+ return true;
+}
+
+int main(int argc, char *argv[]) {
+
+ ros::init(argc, argv, "object_detection");
+ ros::NodeHandle n;
+ ros::ServiceServer conf_service =
+ n.advertiseService("tiny_yolov3", getObjects);
+ std::string dev;
+ std::string def = "CPU";
+ n.param("/object_detection/target", dev, def);
+ n.param("/object_detection/width", width, 648);
+ n.param("/object_detection/height", height, 488);
+ auto_resize = n.hasParam("/object_detection/auto_resize");
+ std::cout << dev.c_str() << std::endl;
+ try {
+ /** This demo covers a certain topology and cannot be generalized for any
+ * object detection **/
+ std::cout << "InferenceEngine: " << GetInferenceEngineVersion()
+ << std::endl;
+
+ // -----------------------------------------------------------------------------------------------------
+
+ // --------------------------- 1. Load Plugin for inference engine
+ // -------------------------------------
+ slog::info << "Loading plugin" << slog::endl;
+ InferencePlugin plugin =
+ PluginDispatcher({"../lib", ""}).getPluginByDevice(dev.c_str());
+ printPluginVersion(plugin, std::cout);
+
+ /**Loading extensions to the plugin **/
+
+ /** Loading default extensions **/
+ if (dev == "CPU") {
+ /**
+ * cpu_extensions library is compiled from the "extension" folder
+ *containing custom CPU layer implementations.
+ **/
+ plugin.AddExtension(std::make_shared());
+ }
+
+ if (n.hasParam("/object_detection/l")) {
+ std::string l_flags;
+ n.getParam("/object_detection/l", l_flags);
+ // CPU extensions are loaded as a shared library and passed as a pointer
+ // to the base extension
+ IExtensionPtr extension_ptr =
+ make_so_pointer(l_flags.c_str());
+ plugin.AddExtension(extension_ptr);
+ }
+ if (n.hasParam("/object_detection/c")) {
+ std::string c_flags;
+ n.getParam("/object_detection/c", c_flags);
+ // GPU extensions are loaded from an .xml description and OpenCL kernel
+ // files
+ plugin.SetConfig(
+ {{PluginConfigParams::KEY_CONFIG_FILE, c_flags.c_str()}});
+ }
+
+ /** Per-layer metrics **/
+ if (n.hasParam("/object_detection/pc")) {
+ plugin.SetConfig(
+ {{PluginConfigParams::KEY_PERF_COUNT, PluginConfigParams::YES}});
+ }
+ // -----------------------------------------------------------------------------------------------------
+
+ // --------------- 2. Reading the IR generated by the Model Optimizer
+ // (.xml and .bin files) ------------
+ slog::info << "Loading network files" << slog::endl;
+ /** Reading network model **/
+ std::string model;
+ if (!n.getParam("/object_detection/model", model)) {
+ std::cout << "Model not found";
+ return 0;
+ }
+ netReader.ReadNetwork(model);
+ /** Setting batch size to 1 **/
+ slog::info << "Batch size is forced to 1." << slog::endl;
+ netReader.getNetwork().setBatchSize(1);
+ /** Extracting the model name and loading its weights **/
+ std::string binFileName = fileNameNoExt(model) + ".bin";
+ netReader.ReadWeights(binFileName);
+ /** Reading labels (if specified) **/
+ std::string labelFileName = fileNameNoExt(model) + ".labels";
+ std::vector label_list;
+ std::ifstream inputFile(labelFileName);
+ std::copy(std::istream_iterator(inputFile),
+ std::istream_iterator(),
+ std::back_inserter(label_list));
+ labels = label_list;
+ // -----------------------------------------------------------------------------------------------------
+
+ /** YOLOV3-based network should have one input and three output **/
+ // --------------------------- 3. Configuring input and output
+ // -----------------------------------------
+ // --------------------------------- Preparing input blobs
+ // ---------------------------------------------
+ slog::info << "Checking that the inputs are as the demo expects"
+ << slog::endl;
+ InputsDataMap inputInfo(netReader.getNetwork().getInputsInfo());
+ inputInf = inputInfo;
+ if (inputInfo.size() != 1) {
+ throw std::logic_error(
+ "This demo accepts networks that have only one input");
+ }
+ InputInfo::Ptr &input = inputInfo.begin()->second;
+ inputName = inputInfo.begin()->first;
+ input->setPrecision(Precision::U8);
+ if (n.hasParam("auto_resize")) {
+ input->getPreProcess().setResizeAlgorithm(
+ ResizeAlgorithm::RESIZE_BILINEAR);
+ input->getInputData()->setLayout(Layout::NHWC);
+ } else {
+ input->getInputData()->setLayout(Layout::NCHW);
+ }
+ // --------------------------------- Preparing output blobs
+ // -------------------------------------------
+ slog::info << "Checking that the outputs are as the demo expects"
+ << slog::endl;
+ OutputsDataMap outputInfo(netReader.getNetwork().getOutputsInfo());
+ outputInf = outputInfo;
+ // if (outputInfo.size() != 3) {
+ // throw std::logic_error("This demo only accepts networks with three
+ // layers");
+ //}
+ for (auto &output : outputInfo) {
+ output.second->setPrecision(Precision::FP32);
+ output.second->setLayout(Layout::NCHW);
+ }
+ // -----------------------------------------------------------------------------------------------------
+
+ // --------------------------- 4. Loading model to the plugin
+ // ------------------------------------------
+ slog::info << "Loading model to the plugin" << slog::endl;
+ ExecutableNetwork network = plugin.LoadNetwork(netReader.getNetwork(), {});
+
+ // -----------------------------------------------------------------------------------------------------
+
+ // --------------------------- 5. Creating infer request
+ // -----------------------------------------------
+
+ async_infer_request_curr = network.CreateInferRequestPtr();
+ // -----------------------------------------------------------------------------------------------------
+
+ // --------------------------- 6. Doing inference
+ // ------------------------------------------------------
+ slog::info << "Start inference " << slog::endl;
+
+ ros::spin();
+
+ /** Showing performace results **/
+ if (n.hasParam("/object_detection/pc")) {
+ printPerformanceCounts(*async_infer_request_curr, std::cout);
+ }
+ } catch (const std::exception &error) {
+ std::cerr << "[ ERROR ] " << error.what() << std::endl;
+ return 1;
+ } catch (...) {
+ std::cerr << "[ ERROR ] Unknown/internal exception happened." << std::endl;
+ return 1;
+ }
+
+ slog::info << "Execution successful" << slog::endl;
+ return 0;
+}
diff --git a/src/object_detection_demo_yolov3_async.hpp b/src/object_detection_demo_yolov3_async.hpp
new file mode 100644
index 0000000..2a0deae
--- /dev/null
+++ b/src/object_detection_demo_yolov3_async.hpp
@@ -0,0 +1,220 @@
+// Copyright (C) 2018 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+#pragma once
+
+#include
+#include
+#include
+
+#include
+#include
+#include
+#include
+#include
+#include
+
+#include
+#include
+#include
+#include
+#include
+#include
+
+// ROS packages
+#include
+#include
+#include
+#include
+#include
+
+#include
+
+#include
+#include
+
+using namespace InferenceEngine;
+
+#define yolo_scale_13 13
+#define yolo_scale_26 26
+#define yolo_scale_52 52
+
+void FrameToBlob(const cv::Mat &frame, InferRequest::Ptr &inferRequest,
+ const std::string &inputName, bool auto_resize) {
+ if (auto_resize) {
+ /* Just set input blob containing read image. Resize and layout conversion
+ * will be done automatically */
+ inferRequest->SetBlob(inputName, wrapMat2Blob(frame));
+ } else {
+ /* Resize and copy data from the image to the input blob */
+ Blob::Ptr frameBlob = inferRequest->GetBlob(inputName);
+ matU8ToBlob(frame, frameBlob);
+ }
+}
+
+static int EntryIndex(int side, int lcoords, int lclasses, int location,
+ int entry) {
+ int n = location / (side * side);
+ int loc = location % (side * side);
+ return n * side * side * (lcoords + lclasses + 1) + entry * side * side + loc;
+}
+
+struct DetectionObject {
+ int xmin, ymin, xmax, ymax, class_id;
+ float confidence;
+
+ DetectionObject(double x, double y, double h, double w, int class_id,
+ float confidence, float h_scale, float w_scale) {
+ this->xmin = static_cast((x - w / 2) * w_scale);
+ this->ymin = static_cast((y - h / 2) * h_scale);
+ this->xmax = static_cast(this->xmin + w * w_scale);
+ this->ymax = static_cast(this->ymin + h * h_scale);
+ this->class_id = class_id;
+ this->confidence = confidence;
+ }
+
+ bool operator<(const DetectionObject &s2) const {
+ return this->confidence < s2.confidence;
+ }
+};
+
+double IntersectionOverUnion(const DetectionObject &box_1,
+ const DetectionObject &box_2) {
+ double width_of_overlap_area =
+ fmin(box_1.xmax, box_2.xmax) - fmax(box_1.xmin, box_2.xmin);
+ double height_of_overlap_area =
+ fmin(box_1.ymax, box_2.ymax) - fmax(box_1.ymin, box_2.ymin);
+ double area_of_overlap;
+ if (width_of_overlap_area < 0 || height_of_overlap_area < 0)
+ area_of_overlap = 0;
+ else
+ area_of_overlap = width_of_overlap_area * height_of_overlap_area;
+ double box_1_area = (box_1.ymax - box_1.ymin) * (box_1.xmax - box_1.xmin);
+ double box_2_area = (box_2.ymax - box_2.ymin) * (box_2.xmax - box_2.xmin);
+ double area_of_union = box_1_area + box_2_area - area_of_overlap;
+ return area_of_overlap / area_of_union;
+}
+
+void ParseYOLOV3Output(const CNNLayerPtr &layer, const Blob::Ptr &blob,
+ const unsigned long resized_im_h,
+ const unsigned long resized_im_w,
+ const unsigned long original_im_h,
+ const unsigned long original_im_w,
+ const double threshold,
+ std::vector &objects) {
+ // --------------------------- Validating output parameters
+ // -------------------------------------
+ if (layer->type != "RegionYolo")
+ throw std::runtime_error("Invalid output type: " + layer->type +
+ ". RegionYolo expected");
+ const int out_blob_h = static_cast(blob->getTensorDesc().getDims()[2]);
+ const int out_blob_w = static_cast(blob->getTensorDesc().getDims()[3]);
+ if (out_blob_h != out_blob_w)
+ throw std::runtime_error("Invalid size of output " + layer->name +
+ " It should be in NCHW layout and H should be "
+ "equal to W. Current H = " +
+ std::to_string(out_blob_h) +
+ ", current W = " + std::to_string(out_blob_h));
+ // --------------------------- Extracting layer parameters
+ // -------------------------------------
+ auto num = layer->GetParamAsInt("num");
+ try {
+ num = layer->GetParamAsInts("mask").size();
+ } catch (...) {
+ }
+ auto coords = layer->GetParamAsInt("coords");
+ auto classes = layer->GetParamAsInt("classes");
+ std::vector anchors = {10.0, 13.0, 16.0, 30.0, 33.0, 23.0,
+ 30.0, 61.0, 62.0, 45.0, 59.0, 119.0,
+ 116.0, 90.0, 156.0, 198.0, 373.0, 326.0};
+ try {
+ anchors = layer->GetParamAsFloats("anchors");
+ } catch (...) {
+ }
+ auto side = out_blob_h;
+ int anchor_offset = 0;
+
+ // throw std::runtime_error("anchors.size() ==" +
+ // std::to_string(anchors.size()));
+
+ if (anchors.size() == 18) { // YoloV3
+ switch (side) {
+ case yolo_scale_13:
+ anchor_offset = 2 * 6;
+ break;
+ case yolo_scale_26:
+ anchor_offset = 2 * 3;
+ break;
+ case yolo_scale_52:
+ anchor_offset = 2 * 0;
+ break;
+ default:
+ throw std::runtime_error("Invalid output size");
+ }
+ } else if (anchors.size() == 12) { // tiny-YoloV3
+ switch (side) {
+ case yolo_scale_13:
+ anchor_offset = 2 * 3;
+ break;
+ case yolo_scale_26:
+ anchor_offset = 2 * 0;
+ break;
+ default:
+ throw std::runtime_error("Invalid output size");
+ }
+ } else { // ???
+ switch (side) {
+ case yolo_scale_13:
+ anchor_offset = 2 * 6;
+ break;
+ case yolo_scale_26:
+ anchor_offset = 2 * 3;
+ break;
+ case yolo_scale_52:
+ anchor_offset = 2 * 0;
+ break;
+ default:
+ throw std::runtime_error("Invalid output size");
+ }
+ }
+ auto side_square = side * side;
+ const float *output_blob =
+ blob->buffer().as::value_type *>();
+ // --------------------------- Parsing YOLO Region output
+ // -------------------------------------
+ for (int i = 0; i < side_square; ++i) {
+ int row = i / side;
+ int col = i % side;
+ for (int n = 0; n < num; ++n) {
+ int obj_index =
+ EntryIndex(side, coords, classes, n * side * side + i, coords);
+ int box_index = EntryIndex(side, coords, classes, n * side * side + i, 0);
+ float scale = output_blob[obj_index];
+ if (scale < threshold)
+ continue;
+ double x = (col + output_blob[box_index + 0 * side_square]) / side *
+ resized_im_w;
+ double y = (row + output_blob[box_index + 1 * side_square]) / side *
+ resized_im_h;
+ double height = std::exp(output_blob[box_index + 3 * side_square]) *
+ anchors[anchor_offset + 2 * n + 1];
+ double width = std::exp(output_blob[box_index + 2 * side_square]) *
+ anchors[anchor_offset + 2 * n];
+ for (int j = 0; j < classes; ++j) {
+ int class_index = EntryIndex(side, coords, classes, n * side_square + i,
+ coords + 1 + j);
+ float prob = scale * output_blob[class_index];
+ if (prob < threshold)
+ continue;
+ DetectionObject obj(x, y, height, width, j, prob,
+ static_cast(original_im_h) /
+ static_cast(resized_im_h),
+ static_cast(original_im_w) /
+ static_cast(resized_im_w));
+ objects.push_back(obj);
+ }
+ }
+ }
+}
diff --git a/src/test.cpp b/src/test.cpp
new file mode 100644
index 0000000..44f1006
--- /dev/null
+++ b/src/test.cpp
@@ -0,0 +1,47 @@
+#include
+#include
+#include
+#include
+#include
+#include
+using namespace cv;
+
+int main(int argc, char **argv) {
+ ros::init(argc, argv, "object_detection_test");
+ // Handle creation
+ ros::NodeHandle n;
+ Mat frame;
+ VideoCapture cap("/home/yikes/vid/2.avi");
+ ros::ServiceClient client =
+ n.serviceClient("/tiny_yolov3");
+ openvino_object_detection::Objects t;
+ sensor_msgs::Image output_image_msg;
+ cv::namedWindow("view");
+
+ while (1) {
+ cap >> frame;
+ t.request.t = 0.5;
+ t.request.iou_t = 0.4;
+ t.request.img =
+ *cv_bridge::CvImage(std_msgs::Header(), "bgr8", frame).toImageMsg();
+ if (client.call(t)) {
+ std::cout << t.response.ms << "\n";
+
+ for (openvino_object_detection::Object obj : t.response.objects) {
+ std::ostringstream conf;
+ conf << ":" << std::fixed << std::setprecision(3) << obj.confidence;
+ cv::putText(frame, (std::string)obj.label + conf.str(),
+ cv::Point2f(obj.x - obj.w / 2, obj.y - obj.h / 2 - 5),
+ cv::FONT_HERSHEY_COMPLEX_SMALL, 1, cv::Scalar(0, 0, 255), 1,
+ cv::LINE_AA);
+ cv::rectangle(frame, cv::Point2f(obj.x - obj.w / 2, obj.y - obj.h / 2),
+ cv::Point2f(obj.x + obj.w / 2, obj.y + obj.h / 2),
+ cv::Scalar(0, 0, 255), 1, cv::LINE_AA);
+ }
+
+ cv::imshow("view", frame);
+ cv::waitKey(300);
+ }
+ ros::spinOnce();
+ }
+}
diff --git a/srv/Objects.srv b/srv/Objects.srv
new file mode 100644
index 0000000..2e1d37c
--- /dev/null
+++ b/srv/Objects.srv
@@ -0,0 +1,7 @@
+sensor_msgs/Image img
+float32 t
+float32 iou_t
+---
+int16 len
+Object[] objects
+int32 ms