mirror of
https://gitee.com/bianbu-linux/ai-support
synced 2025-04-24 22:27:13 -04:00
Update for v1.0beta3.1
This commit is contained in:
parent
614ab9dba0
commit
0aee1c36b1
58 changed files with 1447 additions and 1293 deletions
|
@ -64,7 +64,7 @@ build-job-x86_64: # This job runs in the build stage, which runs first.
|
|||
- |
|
||||
mkdir ${BUILD_DIR}
|
||||
pushd ${BUILD_DIR}
|
||||
cmake .. -DORT_HOME=${ORT_HOME} -DCMAKE_INSTALL_PREFIX=${INSTALL_PREFIX} -DDEMO=ON
|
||||
cmake .. -DORT_HOME=${ORT_HOME} -DCMAKE_INSTALL_PREFIX=${INSTALL_PREFIX} -DDEMO=ON -DXDG=OFF
|
||||
make install -j`nproc` VERBOSE=1
|
||||
popd
|
||||
# copy demo and data
|
||||
|
@ -100,7 +100,7 @@ build-job-riscv64: # This job runs in the build stage, which runs second.
|
|||
#- echo "CI_BUILDS_DIR ${CI_BUILDS_DIR}"
|
||||
#- echo "CI_PROJECT_DIR ${CI_PROJECT_DIR}"
|
||||
- echo "Downloading latest spacemit-ort ..."
|
||||
- wget $(curl -X GET ${CI_NEXUS_URL} | grep -oP 'https:[^>]*.rv64.v[\d\.]*tar.gz' | tail -n 1) -O spacemit-ort.latest.tar.gz --no-check-certificate
|
||||
- wget $(curl -X GET ${CI_NEXUS_URL} | grep -oP 'https:[^>]*.riscv64.[\d\.]*tar.gz' | sort -V | tail -n 1) -O spacemit-ort.latest.tar.gz --no-check-certificate
|
||||
- |
|
||||
mkdir spacemit-ort
|
||||
tar xzf spacemit-ort.latest.tar.gz -C spacemit-ort --strip-components 1
|
||||
|
@ -111,7 +111,7 @@ build-job-riscv64: # This job runs in the build stage, which runs second.
|
|||
- |
|
||||
mkdir ${BUILD_DIR}
|
||||
pushd ${BUILD_DIR}
|
||||
cmake .. -DORT_HOME=${ORT_HOME} -DOpenCV_DIR=${OPENCV_STATIC_DIR}/lib/cmake/opencv4 -DCMAKE_C_COMPILER=${CROSS_TOOL}gcc -DCMAKE_CXX_COMPILER=${CROSS_TOOL}g++ -DCMAKE_SYSROOT=${SYSROOT} -DCMAKE_INSTALL_PREFIX=${INSTALL_PREFIX} -DDEMO=ON
|
||||
cmake .. -DORT_HOME=${ORT_HOME} -DOpenCV_DIR=${OPENCV_STATIC_DIR}/lib/cmake/opencv4 -DCMAKE_C_COMPILER=${CROSS_TOOL}gcc -DCMAKE_CXX_COMPILER=${CROSS_TOOL}g++ -DCMAKE_SYSROOT=${SYSROOT} -DCMAKE_INSTALL_PREFIX=${INSTALL_PREFIX} -DDEMO=ON -DXDG=OFF
|
||||
make install -j`nproc` VERBOSE=1
|
||||
popd
|
||||
# copy demo and data
|
||||
|
@ -164,9 +164,17 @@ x86_64-test-job: # This job runs in the test stage.
|
|||
script:
|
||||
- echo "Running unit tests ... (This will take several seconds.)"
|
||||
- export LD_LIBRARY_PATH=${ORT_HOME_X86}/lib:$LD_LIBRARY_PATH
|
||||
- demo/build.x86_64/classification_demo $DATA/models/squeezenet1.1-7.onnx $DATA/labels/synset.txt $DATA/imgs/dog.jpg
|
||||
- result=`demo/build.x86_64/classification_demo $DATA/models/squeezenet1.1-7.onnx $DATA/labels/synset.txt $DATA/imgs/dog.jpg`
|
||||
- $([[ ${result:0-30:8} == "Pembroke" ]])
|
||||
- demo/build.x86_64/detection_demo $DATA/models/nanodet-plus-m_320.onnx $DATA/labels/coco.txt $DATA/imgs/person0.jpg result0.jpg
|
||||
- $([[ "3aeb3a152ce55a94c0c7bac303534b8d" == "$(md5sum result0.jpg | awk '{print $1}')" ]])
|
||||
- demo/build.x86_64/detection_video_demo rootfs/usr/share/ai-support/models/yolov6p5_n.q.onnx $DATA/labels/coco.txt rootfs/usr/share/ai-support/videos/test.mp4 test.avi
|
||||
- $([[ "c427e3575f4369522dabc7af46ecf0d2" == "$(md5sum test.avi | awk '{print $1}')" ]])
|
||||
- export SUPPORT_SHOW=-1
|
||||
- demo/build.x86_64/detection_stream_demo rootfs/usr/share/ai-support/models/yolov6p5_n.q.onnx $DATA/labels/coco.txt rootfs/usr/share/ai-support/videos/test.mp4 video
|
||||
- demo/build.x86_64/estimation_demo rootfs/usr/share/ai-support/models/yolov6p5_n.q.onnx $DATA/labels/coco.txt rootfs/usr/share/ai-support/models/rtmpose-t.q.onnx $DATA/imgs/person0.jpg result1.jpg
|
||||
- $([[ "dc531ec076418357e8478a0a2823285d" == "$(md5sum result1.jpg | awk '{print $1}')" ]])
|
||||
- demo/build.x86_64/tracker_stream_demo rootfs/usr/share/ai-support/models/yolov6p5_n.q.onnx $DATA/labels/coco.txt rootfs/usr/share/ai-support/models/rtmpose-t.q.onnx rootfs/usr/share/ai-support/videos/test.mp4 video
|
||||
- echo "Running x86_64 tests done!"
|
||||
<<: *only-defaults
|
||||
|
||||
|
@ -176,9 +184,12 @@ riscv64-test-job: # This job runs in the test stage.
|
|||
- build-job-riscv64
|
||||
script:
|
||||
- echo "Running unit tests ... (This will take several seconds.)"
|
||||
- ${QEMU_CMD} demo/build.riscv64/classification_demo $DATA/models/squeezenet1.1-7.onnx $DATA/labels/synset.txt $DATA/imgs/dog.jpg
|
||||
- result=`${QEMU_CMD} demo/build.riscv64/classification_demo $DATA/models/squeezenet1.1-7.onnx $DATA/labels/synset.txt $DATA/imgs/dog.jpg`
|
||||
- $([[ ${result:0-30:8} == "Pembroke" ]])
|
||||
- ${QEMU_CMD} demo/build.riscv64/detection_demo $DATA/models/nanodet-plus-m_320.onnx $DATA/labels/coco.txt $DATA/imgs/person0.jpg result0.jpg
|
||||
- $([[ "3aeb3a152ce55a94c0c7bac303534b8d" == "$(md5sum result0.jpg | awk '{print $1}')" ]])
|
||||
- ${QEMU_CMD} demo/build.riscv64/estimation_demo rootfs/usr/share/ai-support/models/yolov6p5_n.q.onnx $DATA/labels/coco.txt rootfs/usr/share/ai-support/models/rtmpose-t.q.onnx $DATA/imgs/person0.jpg result1.jpg
|
||||
- $([[ "e2cb2b8d9e216d68ff653415f92be86c" == "$(md5sum result1.jpg | awk '{print $1}')" ]])
|
||||
- echo "Running riscv64 tests done!"
|
||||
<<: *only-defaults
|
||||
|
||||
|
@ -186,7 +197,7 @@ package-daily-dpkg:
|
|||
stage: package
|
||||
variables:
|
||||
CI_PKG_OPTION: "--skip-py" # "--skip-ort --skip-py"
|
||||
CI_PKG_VERSION: "1.0.7"
|
||||
CI_PKG_VERSION: "latest"
|
||||
dependencies:
|
||||
- build-job-x86_64
|
||||
- build-job-riscv64
|
||||
|
@ -239,12 +250,14 @@ upload-archive-nexus:
|
|||
- |
|
||||
pushd build.x86_64
|
||||
tar czf bianbu-ai-support.x86_64.${tag}.tar.gz bianbu-ai-support.x86_64
|
||||
curl -k -u $NEXUS_USERNAME:$NEXUS_PASSWORD --upload-file bianbu-ai-support.x86_64.${tag}.tar.gz https://nexus.bianbu.xyz/repository/bianbu-ai/support-library/bianbu-ai-support.x86_64.${tag}.tar.gz
|
||||
http_code=$(curl -k -u $NEXUS_USERNAME:$NEXUS_PASSWORD --upload-file bianbu-ai-support.x86_64.${tag}.tar.gz https://nexus.bianbu.xyz/repository/bianbu-ai/support-library/bianbu-ai-support.x86_64.${tag}.tar.gz -w %{http_code})
|
||||
[[ ${http_code} == "201" ]]
|
||||
popd
|
||||
- |
|
||||
pushd build.riscv64
|
||||
tar czf bianbu-ai-support.riscv64.${tag}.tar.gz bianbu-ai-support.riscv64
|
||||
curl -k -u $NEXUS_USERNAME:$NEXUS_PASSWORD --upload-file bianbu-ai-support.riscv64.${tag}.tar.gz https://nexus.bianbu.xyz/repository/bianbu-ai/support-library/bianbu-ai-support.riscv64.${tag}.tar.gz
|
||||
http_code=$(curl -k -u $NEXUS_USERNAME:$NEXUS_PASSWORD --upload-file bianbu-ai-support.riscv64.${tag}.tar.gz https://nexus.bianbu.xyz/repository/bianbu-ai/support-library/bianbu-ai-support.riscv64.${tag}.tar.gz -w %{http_code})
|
||||
[[ ${http_code} == "201" ]]
|
||||
popd
|
||||
<<: *rule-upload
|
||||
|
||||
|
|
|
@ -4,18 +4,31 @@ project(bianbuai)
|
|||
add_subdirectory(${CMAKE_SOURCE_DIR}/src)
|
||||
|
||||
option(DEMO "option for Demo" OFF)
|
||||
option(XDG "option for XDG autostart support" ON)
|
||||
|
||||
if (DEMO)
|
||||
set(BIANBUAI_HOME ${CMAKE_SOURCE_DIR}) # useless but necessary
|
||||
add_subdirectory(${CMAKE_SOURCE_DIR}/demo)
|
||||
install(DIRECTORY ${CMAKE_SOURCE_DIR}/rootfs/usr/bin DESTINATION .)
|
||||
install(DIRECTORY ${CMAKE_SOURCE_DIR}/rootfs/usr/share DESTINATION .)
|
||||
endif()
|
||||
|
||||
if (DEMO OR XDG)
|
||||
# add resources for smoke test
|
||||
install(DIRECTORY ${CMAKE_SOURCE_DIR}/rootfs/usr/share/ai-support DESTINATION share)
|
||||
endif()
|
||||
|
||||
if (XDG)
|
||||
# add resources for xgd autostart
|
||||
install(PROGRAMS ${CMAKE_SOURCE_DIR}/rootfs/usr/bin/bianbu-ai-autotools DESTINATION bin)
|
||||
install(DIRECTORY ${CMAKE_SOURCE_DIR}/rootfs/usr/share/applications DESTINATION share)
|
||||
install(DIRECTORY ${CMAKE_SOURCE_DIR}/rootfs/usr/share/icons DESTINATION share)
|
||||
install(DIRECTORY ${CMAKE_SOURCE_DIR}/rootfs/etc DESTINATION ..)
|
||||
endif()
|
||||
|
||||
# always install demo project with test data
|
||||
#install(DIRECTORY ${CMAKE_SOURCE_DIR}/demo DESTINATION .)
|
||||
#install(DIRECTORY ${CMAKE_SOURCE_DIR}/data DESTINATION demo)
|
||||
|
||||
option(TEST "option for Test" ON)
|
||||
if (TEST)
|
||||
add_subdirectory(${CMAKE_SOURCE_DIR}/src/tests)
|
||||
add_subdirectory(${CMAKE_SOURCE_DIR}/tests)
|
||||
endif()
|
42
README.md
42
README.md
|
@ -40,12 +40,40 @@ cmake .. -DORT_HOME=${ORT_HOME} -DOpenCV_DIR=${OPENCV_DIR} \
|
|||
### run demo
|
||||
|
||||
```bash
|
||||
./classification_demo <modelFilepath> <labelFilepath> <imageFilepath>
|
||||
./detection_stream_demo <configFilepath> <input> <inputType>
|
||||
./detection_demo <modelFilepath> <labelFilepath> <imageFilepath> <saveImgpath>
|
||||
./classification_demo <model_file_path> <label_file_path> <image_file_path>
|
||||
or
|
||||
./detection_demo <configFilepath> <imageFilepath> <saveImgpath>
|
||||
./detection_video_demo <configFilepath> <videoFilepath> <saveFilepath>
|
||||
./estimation_demo <detConfigFilepath> <poseConfigFilepath> <imageFilepath> <saveImgpath>
|
||||
./tracker_stream_demo <detConfigFilepath> <poseConfigFilepath> <input> <inputType>
|
||||
./classification_demo <config_file_path> <image_file_path>
|
||||
./detection_demo <model_file_path> <label_file_path> <image_file_path> <save_img_path>
|
||||
or
|
||||
./detection_demo <config_file_path> <image_file_path> <save_img_path>
|
||||
./detection_stream_demo <model_file_path> <label_file_path> <input> <input_type> (video or camera_id) option(-h <resize_height>) option(-w <resize_width>)
|
||||
or
|
||||
./detection_stream_demo <config_file_path> <input> <input_type> (video or camera_id) option(-h <resize_height>) option(-w <resize_width>)
|
||||
./detection_video_demo <model_file_path> <label_file_path> <video_file_path> <dst_file_path> (end with .avi)
|
||||
or
|
||||
./detection_video_demo <config_file_path> <video_file_path> <dst_file_path> (end with .avi)
|
||||
./estimation_demo <det_model_file_path> <det_label_file_path> <pose_model_file_path> <image_file_path> <save_img_path>
|
||||
or
|
||||
./estimation_demo <det_config_file_path> <pose_config_file_path> <image_file_path> <save_img_path>
|
||||
./tracker_stream_demo <det_model_file_path> <det_label_file_path> <pose_model_file_path> <input> <input_type> (video or cameraId) option(-h <resize_height>) option(-w <resize_width>)
|
||||
or
|
||||
./tracker_stream_demo <det_config_file_path> <pose_config_file_path> <input> <input_type> (video or cameraId) option(-h <resize_height>) option(-w <resize_width>)
|
||||
```
|
||||
|
||||
### Using environment variables to implement functions
|
||||
| Environment variable name | Remarks |
|
||||
|-----------------------------------|:---------------------------------------------------------------------------------------------------:|
|
||||
| SUPPORT_SHOW (stream demo) | -1 means not to display |
|
||||
| SUPPORT_SHOWFPS (stream demo) | If there is content, fps will be displayed |
|
||||
| SUPPORT_PROFILING_PROJECTS | Generated profile file address |
|
||||
| SUPPORT_LOG_LEVEL | The range is 0-4 |
|
||||
| SUPPORT_GRAPH_OPTIMIZATION_LEVEL | Graph optimization level (ort_disable_all, ort_enable_basic, ort_enable_extended, ort_enable_all) |
|
||||
| SUPPORT_OPT_MODEL_PATH | Optimized model path |
|
||||
| SUPPORT_DISABLE_SPACEMIT_EP | 1 means to disable spacemit-ep |
|
||||
|
||||
### Description of formats related to label files, configuration files, and model files
|
||||
Model files format: [ONNX(Open Neural Network Exchange)](https://github.com/onnx/onnx)
|
||||
|
||||
label files format: using text document, [here](https://github.com/microsoft/onnxruntime-inference-examples/blob/main/c_cxx/OpenVINO_EP/Linux/squeezenet_classification/synset.txt) is a recommended example
|
||||
|
||||
configuration files format: using [json](https://github.com/nlohmann/json), the recommended configuration file content is as [here](https://gitlab.dc.com:8443/bianbu/ai/support/-/blob/main/rootfs/usr/share/ai-support/models/yolov6.json)
|
|
@ -1 +1 @@
|
|||
1.0.8
|
||||
1.0.12
|
|
@ -65,7 +65,9 @@ EOF
|
|||
cp -rd ${INSTALL_LOCAL}/lib/3rdparty/onnxruntime/* ${PKG_DIR}/usr/
|
||||
fi
|
||||
cp -rdf ${INSTALL_LOCAL}/* ${PKG_DIR}/usr/
|
||||
if [[ -d ${INSTALL_LOCAL}/../etc ]]; then
|
||||
cp -rdf ${INSTALL_LOCAL}/../etc ${PKG_DIR}/
|
||||
fi
|
||||
# post process
|
||||
rm -rdf ${PKG_DIR}/usr/lib/3rdparty ${PKG_DIR}/usr/demo
|
||||
|
||||
|
|
2
debian/README.Debian → debian/README
vendored
2
debian/README.Debian → debian/README
vendored
|
@ -5,4 +5,4 @@ this bianbu-ai-support Debian package.
|
|||
|
||||
(Automatically generated by debmake Version 4.3.2)
|
||||
|
||||
-- root <> Thu, 04 Jan 2024 20:25:23 +0800
|
||||
-- Hongjie Qin <hongjie.qin@spacemit.com> Thu, 04 Jan 2024 20:25:23 +0800
|
30
debian/changelog
vendored
30
debian/changelog
vendored
|
@ -1,3 +1,31 @@
|
|||
bianbu-ai-support (1.0.12) mantic-spacemit; urgency=medium
|
||||
|
||||
[ bianbu-ci ]
|
||||
* Sync change from bianbu-23.10/1.0.12
|
||||
|
||||
-- qinhongjie <hongjie.qin@spacemit.com> Thu, 11 Apr 2024 19:01:48 +0800
|
||||
|
||||
bianbu-ai-support (1.0.11) mantic-spacemit; urgency=medium
|
||||
|
||||
[ bianbu-ci ]
|
||||
* Sync change from bianbu-23.10/1.0.11
|
||||
|
||||
-- qinhongjie <hongjie.qin@spacemit.com> Tue, 19 Mar 2024 17:13:48 +0800
|
||||
|
||||
bianbu-ai-support (1.0.10) mantic-spacemit; urgency=medium
|
||||
|
||||
[ bianbu-ci ]
|
||||
* Sync change from bianbu-23.10/1.0.10
|
||||
|
||||
-- qinhongjie <hongjie.qin@spacemit.com> Mon, 11 Mar 2024 20:59:48 +0800
|
||||
|
||||
bianbu-ai-support (1.0.9) mantic-spacemit; urgency=medium
|
||||
|
||||
[ bianbu-ci ]
|
||||
* Sync change from bianbu-23.10/1.0.9
|
||||
|
||||
-- qinhongjie <hongjie.qin@spacemit.com> Fri, 8 Mar 2024 19:59:48 +0800
|
||||
|
||||
bianbu-ai-support (1.0.8) mantic-spacemit; urgency=medium
|
||||
|
||||
[ bianbu-ci ]
|
||||
|
@ -51,5 +79,5 @@ bianbu-ai-support (1.0.1) mantic-spacemit; urgency=medium
|
|||
|
||||
* Initial for bianbu-23.10
|
||||
|
||||
-- root <root@SW-Station> Thu, 04 Jan 2024 20:25:23 +0800
|
||||
-- qinhongjie <hongjie.qin@spacemit.com> Thu, 04 Jan 2024 20:25:23 +0800
|
||||
|
||||
|
|
3
debian/control
vendored
3
debian/control
vendored
|
@ -1,5 +1,5 @@
|
|||
Source: bianbu-ai-support
|
||||
Section: Utils
|
||||
Section: utils
|
||||
Priority: optional
|
||||
Maintainer: bianbu-ai-support <bianbu-ai-support@spacemit.com>
|
||||
Build-Depends: cmake, debhelper-compat (= 12), onnxruntime, libopencv-dev
|
||||
|
@ -8,7 +8,6 @@ Homepage: https://gitlab.dc.com:8443/bianbu/ai/support
|
|||
|
||||
Package: bianbu-ai-support
|
||||
Architecture: any
|
||||
Multi-Arch: foreign
|
||||
Depends: ${misc:Depends}, ${shlibs:Depends}, onnxruntime, libopencv-dev
|
||||
Description: auto-generated package by debmake
|
||||
This Debian binary package was auto-generated by the
|
||||
|
|
162
debian/postinst
vendored
162
debian/postinst
vendored
|
@ -3,62 +3,142 @@
|
|||
set -e
|
||||
#set -x
|
||||
|
||||
export XDG_DATA_HOME=${XDG_DATA_HOME:-/usr/share/}
|
||||
|
||||
function copy_install_to_desktop() {
|
||||
if [ $# -ne 3 ]; then
|
||||
return;
|
||||
fi
|
||||
local curFileName=$1
|
||||
local aiDesktopName=$2
|
||||
local curUserName=$3
|
||||
if [ -e "/usr/share/applications/${aiDesktopName}" ]; then
|
||||
cp -f /usr/share/applications/${aiDesktopName} "${curFileName}/${gDesktopName}/${wpsDesktopName}"
|
||||
if test $? -eq 0; then
|
||||
chmod +x "${curFileName}/${gDesktopName}/${aiDesktopName}"
|
||||
chown ${curUserName} "${curFileName}/${gDesktopName}/${aiDesktopName}"
|
||||
fi
|
||||
fi
|
||||
}
|
||||
|
||||
aiDesktop=(
|
||||
"object-detection.desktop"
|
||||
#"hand-tracker.desktop"
|
||||
"pose-tracker.desktop"
|
||||
)
|
||||
function ai_config_desktop() {
|
||||
|
||||
function update_desktop_name() {
|
||||
if [ $# -ne 1 ]; then
|
||||
return;
|
||||
fi
|
||||
local curFileName=$1
|
||||
|
||||
if [ -f "${curFileName}/.config/user-dirs.dirs" ]; then
|
||||
if [ ! $HOME ]; then HOME=${curFileName}; fi
|
||||
# Import user dir config
|
||||
source "${curFileName}/.config/user-dirs.dirs"
|
||||
if [ ! -d "${XDG_DESKTOP_DIR}" ]; then
|
||||
mkdir -p "${XDG_DESKTOP_DIR}" >/dev/null 2>&1 || true
|
||||
fi
|
||||
# Replace(Remove) "${HOME}/" by "" in ${XDG_DESKTOP_DIR} with greedy mode
|
||||
gDesktopName="${XDG_DESKTOP_DIR//${HOME}\//}"
|
||||
else
|
||||
if [ -d "${curFileName}/桌面" ]; then
|
||||
gDesktopName="桌面"
|
||||
elif [ -d "${curFileName}/Desktop" ]; then
|
||||
gDesktopName="Desktop"
|
||||
fi
|
||||
fi
|
||||
}
|
||||
|
||||
function copy_install_to_desktop() {
|
||||
if [ $# -ne 5 ]; then
|
||||
return;
|
||||
fi
|
||||
local curFileName=$1
|
||||
local aiDesktopName=$2
|
||||
local curUserName=$3
|
||||
local initSetup=$4
|
||||
local action=$5
|
||||
if [ -e "${APP_DATA}/${aiDesktopName}" ]; then
|
||||
cp -f ${APP_DATA}/${aiDesktopName} "${curFileName}/${gDesktopName}/"
|
||||
if test $? -eq 0; then
|
||||
chmod +x "${curFileName}/${gDesktopName}/${aiDesktopName}"
|
||||
chown ${curUserName} "${curFileName}/${gDesktopName}/${aiDesktopName}"
|
||||
# update init setup info
|
||||
echo ${curFileName}/${gDesktopName}/${aiDesktopName} "postinst ${action}" $(date) >> ${initSetup}
|
||||
chown ${curUserName} ${initSetup}
|
||||
fi
|
||||
fi
|
||||
}
|
||||
|
||||
function config_desktop_shortcut() {
|
||||
if [ $# -ne 2 ]; then
|
||||
return;
|
||||
fi
|
||||
local curFileName=$1
|
||||
local curUserName=$2
|
||||
local action="copy" # "update"
|
||||
# prepare config dir for application
|
||||
local cfgAppDir=${curFileName}/.config/${APP_NAME}/applications
|
||||
mkdir -p ${cfgAppDir}
|
||||
chown -R ${curUserName} $(dirname ${cfgAppDir})
|
||||
# config desktop shortcut
|
||||
for desktop in "${aiDesktop[@]}"; do
|
||||
local initSetup="${curFileName}/.config/${APP_NAME}/applications/${desktop%.desktop}-initial-setup-done"
|
||||
if [ -f ${initSetup} ] && [ "$(cat ${initSetup} | grep ${gDesktopName})" ]; then
|
||||
# i.e. desktop is already configured(initial-setup-done)
|
||||
#continue
|
||||
if [ -e ${curFileName}/${gDesktopName}/${desktop} ]; then
|
||||
if cmp -s "${APP_DATA}/${desktop}" "${curFileName}/${gDesktopName}/${desktop}"; then
|
||||
# desktop exist and exactly same
|
||||
continue
|
||||
fi
|
||||
# i.e. desktop exist but need to be updated
|
||||
action="update"
|
||||
else
|
||||
# TODO: distinguish shortcut removed by user or `postrm remove`
|
||||
: #continue
|
||||
fi
|
||||
fi
|
||||
copy_install_to_desktop ${curFileName} ${desktop} ${curUserName} ${initSetup} ${action}
|
||||
done
|
||||
}
|
||||
|
||||
function config_desktop() {
|
||||
gDesktopName="桌面"
|
||||
if [ -d "/root/桌面" ]; then
|
||||
gDesktopName="桌面"
|
||||
elif [ -d "/root/Desktop" ]; then
|
||||
gDesktopName="Desktop"
|
||||
fi
|
||||
if [ -d "/root/${gDesktopName}" ]; then
|
||||
config_desktop_shortcut "/root" "root"
|
||||
fi
|
||||
|
||||
for FILENAME in /home/*; do
|
||||
|
||||
if [ -f "${FILENAME}/.config/user-dirs.dirs" ]; then
|
||||
if [ ! $HOME ]; then HOME=${FILENAME}; fi
|
||||
source "${FILENAME}/.config/user-dirs.dirs"
|
||||
if [ ! -d "${XDG_DESKTOP_DIR}" ]; then
|
||||
mkdir -p "${XDG_DESKTOP_DIR}" >/dev/null 2>&1 || true
|
||||
fi
|
||||
gDesktopName="${XDG_DESKTOP_DIR//${HOME}\//}"
|
||||
else
|
||||
if [ -d "${FILENAME}/桌面" ]; then
|
||||
gDesktopName="桌面"
|
||||
elif [ -d "${FILENAME}/Desktop" ]; then
|
||||
gDesktopName="Desktop"
|
||||
fi
|
||||
fi
|
||||
|
||||
update_desktop_name ${FILENAME}
|
||||
if [ -d "${FILENAME}/${gDesktopName}" ]; then
|
||||
local curUserName=$(echo ${FILENAME} | awk '{print substr($FILENAME, 7, 32)}')
|
||||
for desktop in "${aiDesktop[@]}"; do
|
||||
rm -rf ${FILENAME}/${gDesktopName}/${desktop}
|
||||
copy_install_to_desktop ${FILENAME} ${desktop} ${curUserName}
|
||||
done
|
||||
config_desktop_shortcut ${FILENAME} "$(echo ${FILENAME} | awk '{print substr($FILENAME, 7, 32)}')"
|
||||
fi
|
||||
done
|
||||
}
|
||||
|
||||
ai_config_desktop
|
||||
## ------------------------- ##
|
||||
|
||||
function postinst_init() {
|
||||
export APP_NAME=bianbu-ai-support
|
||||
export APP_DATA=/usr/share/applications
|
||||
}
|
||||
|
||||
function postinst_configure() {
|
||||
config_desktop
|
||||
}
|
||||
|
||||
function postinst_triggered() {
|
||||
for triggername in $1; do
|
||||
case "$triggername" in
|
||||
*)
|
||||
echo "unhandled/unknown trigger!" $triggername
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
done
|
||||
}
|
||||
|
||||
function postinst_main() {
|
||||
if [ $# -eq 0 ]; then
|
||||
return;
|
||||
fi
|
||||
|
||||
postinst_init
|
||||
case $1 in
|
||||
configure ) shift; postinst_configure $@;;
|
||||
triggered ) shift; postinst_triggered $@;;
|
||||
esac
|
||||
}
|
||||
|
||||
args="$@"
|
||||
postinst_main $@
|
||||
|
|
84
debian/postrm
vendored
84
debian/postrm
vendored
|
@ -1,35 +1,83 @@
|
|||
#!/bin/bash
|
||||
|
||||
ai_xdg_dirs=/usr/share
|
||||
ai_xdg_dir=
|
||||
set -e
|
||||
#set -x
|
||||
|
||||
function ai_demo_init() {
|
||||
set -e
|
||||
}
|
||||
aiDesktop=(
|
||||
"object-detection.desktop"
|
||||
#"hand-tracker.desktop"
|
||||
"pose-tracker.desktop"
|
||||
)
|
||||
|
||||
function ai_demo_remove() {
|
||||
if [ -x /usr/bin/update-mime-database ] ; then
|
||||
: #update-mime-database "${ai_xdg_dir}/mime"
|
||||
function remove_desktop_shortcut() {
|
||||
if [ $# -ne 1 ]; then
|
||||
return;
|
||||
fi
|
||||
local curFileName=$1
|
||||
|
||||
if [ -x /usr/bin/update-desktop-database ] ; then
|
||||
: #update-desktop-database -q "${ai_xdg_dir}/applications"
|
||||
for desktop in "${aiDesktop[@]}"; do
|
||||
local initSetup="${curFileName}/.config/${APP_NAME}/applications/${desktop%.desktop}-initial-setup-done"
|
||||
if [ ! -f ${initSetup} ]; then
|
||||
echo "[WARN] ${APP_NAME}: setup file for ${desktop} not found!"
|
||||
else
|
||||
# remove all desktop shortcuts of different languages
|
||||
while IFS= read -r line; do
|
||||
# line format: </path/to/desktop/shortcut> [date]
|
||||
rm -rf $(echo $line | awk '{print $1}')
|
||||
done < ${initSetup}
|
||||
fi
|
||||
|
||||
for HOMEDIR in /home/*; do
|
||||
:
|
||||
done
|
||||
}
|
||||
|
||||
function ai_demo_main() {
|
||||
function remove_config() {
|
||||
rm -rf /root/.config/${APP_NAME}
|
||||
for FILENAME in /home/*; do
|
||||
# remove user config of application
|
||||
rm -rf ${FILENAME}/.config/${APP_NAME}
|
||||
done
|
||||
}
|
||||
|
||||
## ------------------------- ##
|
||||
|
||||
function postrm_init() {
|
||||
export APP_NAME=bianbu-ai-support
|
||||
export APP_DATA=/usr/share/applications
|
||||
}
|
||||
|
||||
function postrm_upgrade() {
|
||||
if [ -x /usr/bin/update-mime-database ] ; then
|
||||
: #update-mime-database "/usr/share/mime"
|
||||
fi
|
||||
|
||||
if [ -x /usr/bin/update-desktop-database ] ; then
|
||||
: #update-desktop-database -q "/usr/share/applications"
|
||||
fi
|
||||
}
|
||||
|
||||
function postrm_remove() {
|
||||
# remove desktop shortcut
|
||||
remove_desktop_shortcut "/root"
|
||||
for FILENAME in /home/*; do
|
||||
remove_desktop_shortcut ${FILENAME}
|
||||
done
|
||||
|
||||
postrm_upgrade
|
||||
}
|
||||
|
||||
function postrm_purge() {
|
||||
remove_config
|
||||
}
|
||||
|
||||
function postrm_main() {
|
||||
if [ $# -eq 0 ] ; then
|
||||
return;
|
||||
fi
|
||||
|
||||
ai_demo_init
|
||||
postrm_init
|
||||
case $1 in
|
||||
remove | upgrade ) shift; ai_demo_remove $@;;
|
||||
purge ) ;;
|
||||
remove ) shift; postrm_remove $@;;
|
||||
upgrade ) shift; postrm_upgrade $@;;
|
||||
purge ) shift; postrm_purge $@;;
|
||||
abort-install ) ;;
|
||||
abort-upgrade ) ;;
|
||||
failed-upgrade ) ;;
|
||||
|
@ -37,4 +85,4 @@ function ai_demo_main() {
|
|||
}
|
||||
|
||||
args="$@"
|
||||
ai_demo_main $@
|
||||
postrm_main $@
|
||||
|
|
31
debian/preinst
vendored
31
debian/preinst
vendored
|
@ -1 +1,32 @@
|
|||
#!/bin/bash
|
||||
|
||||
set -e
|
||||
#set -x
|
||||
|
||||
function preinst_init() {
|
||||
export APP_NAME=bianbu-ai-support
|
||||
export APP_DATA=/usr/share/applications
|
||||
}
|
||||
|
||||
function preinst_install() {
|
||||
:
|
||||
}
|
||||
|
||||
function preinst_upgrade() {
|
||||
:
|
||||
}
|
||||
|
||||
function preinst_main() {
|
||||
if [ $# -eq 0 ]; then
|
||||
return;
|
||||
fi
|
||||
|
||||
preinst_init
|
||||
case $1 in
|
||||
install ) shift; preinst_install $@;;
|
||||
upgrade ) shift; preinst_upgrade $@;;
|
||||
esac
|
||||
}
|
||||
|
||||
args="$@"
|
||||
preinst_main $@
|
||||
|
|
64
debian/prerm
vendored
64
debian/prerm
vendored
|
@ -1,65 +1,33 @@
|
|||
#!/bin/bash
|
||||
|
||||
export XDG_DATA_HOME=${XDG_DATA_HOME:-/usr/share/}
|
||||
set -e
|
||||
#set -x
|
||||
|
||||
function ai_prerm_init() {
|
||||
set -e
|
||||
function prerm_init() {
|
||||
export APP_NAME=bianbu-ai-support
|
||||
export APP_DATA=/usr/share/applications
|
||||
}
|
||||
|
||||
aiDesktop=(
|
||||
"object-detection.desktop"
|
||||
"hand-tracker.desktop"
|
||||
"pose-tracker.desktop"
|
||||
)
|
||||
function ai_prerm_uninstall_desktop() {
|
||||
gDesktopName="桌面"
|
||||
if [ -d "/root/桌面" ]; then
|
||||
gDesktopName="桌面"
|
||||
elif [ -d "/root/Desktop" ]; then
|
||||
gDesktopName="Desktop"
|
||||
fi
|
||||
|
||||
#desktop
|
||||
if [ -d "/root/${gDesktopName}" ]; then
|
||||
for desktop in "${aiDesktop[@]}"; do
|
||||
rm -rf /root/${gDesktopName}/${desktop}
|
||||
done
|
||||
fi
|
||||
|
||||
for FILENAME in /home/*; do
|
||||
if [ -f "${FILENAME}/.config/user-dirs.dirs" ]; then
|
||||
source "${FILENAME}/.config/user-dirs.dirs"
|
||||
if [ ! -d "${XDG_DESKTOP_DIR}" ]; then
|
||||
mkdir -p "${XDG_DESKTOP_DIR}" >/dev/null 2>&1 || true
|
||||
fi
|
||||
gDesktopName="${XDG_DESKTOP_DIR//${HOME}\//}"
|
||||
else
|
||||
if [ -d "${FILENAME}/桌面" ]; then
|
||||
gDesktopName="桌面"
|
||||
elif [ -d "${FILENAME}/Desktop" ]; then
|
||||
gDesktopName="Desktop"
|
||||
fi
|
||||
fi
|
||||
|
||||
for desktop in "${aiDesktop[@]}"; do
|
||||
rm -rf "${FILENAME}/${gDesktopName}/${desktop}"
|
||||
done
|
||||
rm -rf ${FILENAME}/.config/bianbu-ai-support
|
||||
done
|
||||
function prerm_remove() {
|
||||
:
|
||||
}
|
||||
|
||||
function ai_prerm_main() {
|
||||
function prerm_upgrade() {
|
||||
:
|
||||
}
|
||||
|
||||
function prerm_main() {
|
||||
if [ $# -eq 0 ]; then
|
||||
return;
|
||||
fi
|
||||
|
||||
ai_prerm_init
|
||||
prerm_init
|
||||
case $1 in
|
||||
remove ) shift; ai_prerm_uninstall_desktop $@;;
|
||||
upgrade ) shift;;
|
||||
remove ) shift; prerm_remove $@;;
|
||||
upgrade ) shift; prerm_upgrade $@;;
|
||||
failed-upgrade ) ;;
|
||||
esac
|
||||
}
|
||||
|
||||
args="$@"
|
||||
ai_prerm_main $@
|
||||
prerm_main $@
|
||||
|
|
2
debian/rules
vendored
2
debian/rules
vendored
|
@ -9,7 +9,7 @@
|
|||
dh $@
|
||||
|
||||
override_dh_auto_configure:
|
||||
dh_auto_configure -- -DORT_HOME=/usr -DDEMO=ON
|
||||
dh_auto_configure -- -DORT_HOME=/usr -DDEMO=ON -DXDG=ON
|
||||
|
||||
override_dh_shlibdeps:
|
||||
dh_shlibdeps --dpkg-shlibdeps-params=--ignore-missing-info
|
||||
|
|
|
@ -4,11 +4,14 @@
|
|||
# Brief: Build demos and run smoke test.
|
||||
#
|
||||
|
||||
set -e
|
||||
set -e #u
|
||||
#set -x
|
||||
|
||||
DEMO_DIR=$(dirname $(readlink -f ${BASH_SOURCE[0]}))
|
||||
|
||||
# Note: update the following settings if necessary
|
||||
_NAME=bianbu # cuspace
|
||||
SDK=$(dirname $(which ${_NAME})) #$(dirname ${BASH_SOURCE[0]})
|
||||
_NAME=cuspace
|
||||
SDK=$(dirname $(which ${_NAME}))
|
||||
|
||||
function config_native() {
|
||||
BIANBUAI_HOME=$SDK/bianbu-ai-support
|
||||
|
@ -34,7 +37,7 @@ fi
|
|||
|
||||
function build() {
|
||||
mkdir build && pushd build
|
||||
cmake .. -DBIANBUAI_HOME=${BIANBUAI_HOME} -DORT_HOME=${ORT_HOME} -DOpenCV_DIR=${OPENCV_DIR} -DCMAKE_C_COMPILER=${CROSS_TOOL}gcc -DCMAKE_CXX_COMPILER=${CROSS_TOOL}g++ -DCMAKE_SYSROOT=${SYSROOT}
|
||||
cmake ${DEMO_DIR} -DBIANBUAI_HOME=${BIANBUAI_HOME} -DORT_HOME=${ORT_HOME} -DOpenCV_DIR=${OPENCV_DIR} -DCMAKE_C_COMPILER=${CROSS_TOOL}gcc -DCMAKE_CXX_COMPILER=${CROSS_TOOL}g++ -DCMAKE_SYSROOT=${SYSROOT}
|
||||
make -j4
|
||||
popd
|
||||
echo "[INFO] Building demos done."
|
||||
|
|
|
@ -15,25 +15,30 @@ class DataLoader {
|
|||
public:
|
||||
DataLoader(const int& resize_height, const int& resize_width) {
|
||||
enable = true;
|
||||
updated = false;
|
||||
resize_height_ = resize_height;
|
||||
resize_width_ = resize_width;
|
||||
preview_fps_ = 0;
|
||||
detection_fps_ = 0;
|
||||
}
|
||||
~DataLoader() {}
|
||||
bool ifenable() { return enable; }
|
||||
void set_disable() { enable = false; }
|
||||
void set_preview_fps(int preview_fps) { preview_fps_ = preview_fps; }
|
||||
void set_detection_fps(int detection_fps) { detection_fps_ = detection_fps; }
|
||||
int get_preview_fps() { return preview_fps_; }
|
||||
int get_detection_fps() { return detection_fps_; }
|
||||
int get_resize_height() { return resize_height_; }
|
||||
int get_resize_width() { return resize_width_; }
|
||||
virtual cv::Mat fetch_frame() = 0;
|
||||
virtual cv::Mat peek_frame() = 0;
|
||||
bool isUpdated() { return updated; }
|
||||
void setUpdate() { updated = true; }
|
||||
void setNoUpdate() { updated = false; }
|
||||
bool ifEnable() { return enable; }
|
||||
void setDisable() { enable = false; }
|
||||
void setPreviewFps(int preview_fps) { preview_fps_ = preview_fps; }
|
||||
void setDetectionFps(int detection_fps) { detection_fps_ = detection_fps; }
|
||||
int getPreviewFps() { return preview_fps_; }
|
||||
int getDetectionFps() { return detection_fps_; }
|
||||
int getResizeHeight() { return resize_height_; }
|
||||
int getResizeWidth() { return resize_width_; }
|
||||
virtual cv::Mat fetchFrame() = 0;
|
||||
virtual cv::Mat peekFrame() = 0;
|
||||
|
||||
private:
|
||||
bool enable;
|
||||
bool updated;
|
||||
int resize_height_;
|
||||
int resize_width_;
|
||||
int preview_fps_;
|
||||
|
@ -64,12 +69,12 @@ class ExclusiveDataLoader : public DataLoader {
|
|||
return -1;
|
||||
}
|
||||
}
|
||||
cv::Mat fetch_frame() {
|
||||
cv::Mat fetchFrame() {
|
||||
cv::Mat frame;
|
||||
capture_.read(frame);
|
||||
return frame;
|
||||
}
|
||||
cv::Mat peek_frame() { return fetch_frame(); }
|
||||
cv::Mat peekFrame() { return fetchFrame(); }
|
||||
|
||||
private:
|
||||
cv::VideoCapture capture_;
|
||||
|
@ -82,7 +87,7 @@ class ExclusiveDataLoader : public DataLoader {
|
|||
#include <sys/ioctl.h>
|
||||
#include <unistd.h> //for close
|
||||
|
||||
static bool is_valid_camera(const std::string& path) {
|
||||
static bool isValidCamera(const std::string& path) {
|
||||
int fd = open(path.c_str(), O_RDWR);
|
||||
if (fd == -1) {
|
||||
return false;
|
||||
|
@ -119,21 +124,12 @@ class SharedDataLoader : public DataLoader {
|
|||
return init(std::stoi(path));
|
||||
}
|
||||
capture_.open(path);
|
||||
if (capture_.isOpened()) {
|
||||
int width = 1280;
|
||||
int height = 720;
|
||||
capture_.set(cv::CAP_PROP_FRAME_WIDTH, width);
|
||||
capture_.set(cv::CAP_PROP_FRAME_HEIGHT, height);
|
||||
return 0;
|
||||
} else {
|
||||
std::cout << "Open video capture failed" << std::endl;
|
||||
return -1;
|
||||
}
|
||||
return setCapture();
|
||||
}
|
||||
|
||||
int init(int camera_id) {
|
||||
#ifndef _WIN32
|
||||
capture_.open(camera_id);
|
||||
capture_.open(camera_id, cv::CAP_V4L2);
|
||||
if (!capture_.isOpened()) {
|
||||
std::cout
|
||||
<< "Open camera capture failed, try to figure out right camera id"
|
||||
|
@ -141,8 +137,8 @@ class SharedDataLoader : public DataLoader {
|
|||
std::string path = "/dev/video";
|
||||
for (int i = 0; i <= 100; ++i) {
|
||||
std::string device_path = path + std::to_string(i);
|
||||
if (is_valid_camera(device_path)) {
|
||||
capture_.open(i);
|
||||
if (isValidCamera(device_path)) {
|
||||
capture_.open(i, cv::CAP_V4L2);
|
||||
if (capture_.isOpened()) {
|
||||
break;
|
||||
}
|
||||
|
@ -152,30 +148,24 @@ class SharedDataLoader : public DataLoader {
|
|||
#else
|
||||
capture_.open(camera_id);
|
||||
#endif
|
||||
if (capture_.isOpened()) {
|
||||
int width = 640;
|
||||
int height = 480;
|
||||
capture_.set(cv::CAP_PROP_FRAME_WIDTH, width);
|
||||
capture_.set(cv::CAP_PROP_FRAME_HEIGHT, height);
|
||||
return 0;
|
||||
} else {
|
||||
std::cout << "Open camera capture failed" << std::endl;
|
||||
return -1;
|
||||
}
|
||||
return setCapture();
|
||||
}
|
||||
|
||||
cv::Mat fetch_frame() {
|
||||
cv::Mat fetchFrame() {
|
||||
cv::Mat frame, temp;
|
||||
capture_.read(frame);
|
||||
if (!frame.empty()) {
|
||||
resize_unscale(frame, temp, get_resize_height(), get_resize_width());
|
||||
resizeUnscale(frame, temp, getResizeHeight(), getResizeWidth());
|
||||
setUpdate();
|
||||
} else {
|
||||
setNoUpdate();
|
||||
}
|
||||
frame_mutex_.lock();
|
||||
frame_ = temp.clone();
|
||||
frame_mutex_.unlock();
|
||||
return frame;
|
||||
}
|
||||
cv::Mat peek_frame() {
|
||||
cv::Mat peekFrame() {
|
||||
cv::Mat frame;
|
||||
frame_mutex_.lock();
|
||||
frame = frame_.clone(); // 深拷贝
|
||||
|
@ -183,6 +173,40 @@ class SharedDataLoader : public DataLoader {
|
|||
return frame;
|
||||
}
|
||||
|
||||
int setCapture() {
|
||||
if (capture_.isOpened()) {
|
||||
int width = 640; // Because of k1x performance reasons, the resolution is
|
||||
// set to 640*480
|
||||
if (capture_.get(cv::CAP_PROP_FRAME_WIDTH) > 640) {
|
||||
if (capture_.set(cv::CAP_PROP_FRAME_WIDTH, width) &&
|
||||
capture_.get(cv::CAP_PROP_FRAME_WIDTH) == width) {
|
||||
std::cout << "The video capture width is set to " << width
|
||||
<< " successfully" << std::endl;
|
||||
} else {
|
||||
std::cout << "[ WARNING ] Video capture width set to " << width
|
||||
<< " failed, the resolution is "
|
||||
<< capture_.get(cv::CAP_PROP_FRAME_WIDTH) << "*"
|
||||
<< capture_.get(cv::CAP_PROP_FRAME_HEIGHT) << std::endl;
|
||||
}
|
||||
}
|
||||
if (capture_.set(cv::CAP_PROP_FOURCC,
|
||||
cv::VideoWriter::fourcc('M', 'J', 'P', 'G')) &&
|
||||
capture_.get(cv::CAP_PROP_FOURCC) ==
|
||||
cv::VideoWriter::fourcc('M', 'J', 'P', 'G')) {
|
||||
std::cout << "Video capture format has been set to MJPG successfully"
|
||||
<< std::endl;
|
||||
} else {
|
||||
std::cout << "[ WARNING ] Video capture format set to MJPG failed, "
|
||||
"using default format"
|
||||
<< std::endl;
|
||||
}
|
||||
return 0;
|
||||
} else {
|
||||
std::cout << "Open camera capture failed" << std::endl;
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
|
||||
private:
|
||||
std::shared_ptr<cv::Mat> frame;
|
||||
cv::Mat frame_;
|
||||
|
|
|
@ -1,42 +1,35 @@
|
|||
#include <iostream>
|
||||
|
||||
#include "task/vision/image_classification_task.h"
|
||||
#include "utils/check_utils.h"
|
||||
#include "utils/time.h"
|
||||
#include "utils/utils.h"
|
||||
|
||||
int main(int argc, char* argv[]) {
|
||||
std::string filePath, labelFilepath, imageFilepath;
|
||||
if (argc == 4) {
|
||||
filePath = argv[1];
|
||||
labelFilepath = argv[2];
|
||||
imageFilepath = argv[3];
|
||||
} else if (argc > 4) {
|
||||
filePath = argv[1];
|
||||
labelFilepath = argv[2];
|
||||
imageFilepath = argv[3];
|
||||
if (!checkImageFileExtension(imageFilepath)) {
|
||||
std::cout << "[ ERROR ] The ImageFilepath is not correct. Make sure you "
|
||||
"are setting the path to an imgae file (.jpg/.jpeg/.png)"
|
||||
<< std::endl;
|
||||
return -1;
|
||||
}
|
||||
if (!exists_check(imageFilepath)) {
|
||||
std::cout << "[ ERROR ] The Image File does not exist. Make sure you are "
|
||||
"setting the correct path to the file"
|
||||
<< std::endl;
|
||||
return -1;
|
||||
}
|
||||
ImageClassificationOption option;
|
||||
std::string config_file_path, image_file_path;
|
||||
std::unique_ptr<ImageClassificationTask> imageclassificationtask;
|
||||
if (argc == 3) {
|
||||
config_file_path = argv[1];
|
||||
image_file_path = argv[2];
|
||||
imageclassificationtask = std::unique_ptr<ImageClassificationTask>(
|
||||
new ImageClassificationTask(config_file_path));
|
||||
} else if (argc == 4) {
|
||||
option.model_path = argv[1];
|
||||
option.label_path = argv[2];
|
||||
image_file_path = argv[3];
|
||||
imageclassificationtask = std::unique_ptr<ImageClassificationTask>(
|
||||
new ImageClassificationTask(option));
|
||||
} else {
|
||||
std::cout << "run with " << argv[0]
|
||||
<< " <modelFilepath> <labelFilepath> <imageFilepath>"
|
||||
std::cout << "Please run with " << argv[0]
|
||||
<< " <model_file_path> <label_file_path> <image_file_path> or "
|
||||
<< argv[0] << " <config_file_path> <image_file_path>"
|
||||
<< std::endl;
|
||||
return -1;
|
||||
}
|
||||
cv::Mat imgRaw;
|
||||
std::unique_ptr<imageClassificationTask> imageclassification =
|
||||
std::unique_ptr<imageClassificationTask>(
|
||||
new imageClassificationTask(filePath, labelFilepath));
|
||||
if (imageclassificationtask->getInitFlag() != 0) {
|
||||
return -1;
|
||||
}
|
||||
cv::Mat img_raw;
|
||||
#ifdef DEBUG
|
||||
std::cout << "." << std::endl;
|
||||
#endif
|
||||
|
@ -44,11 +37,13 @@ int main(int argc, char* argv[]) {
|
|||
#ifdef DEBUG
|
||||
TimeWatcher t("|-- Load input data");
|
||||
#endif
|
||||
imgRaw = cv::imread(imageFilepath);
|
||||
img_raw = cv::imread(image_file_path);
|
||||
}
|
||||
if (!imageclassification->getInitFlag()) {
|
||||
ImageClassificationResult result = imageclassification->Classify(imgRaw);
|
||||
std::cout << "classify result: " << result.label_text << std::endl;
|
||||
if (img_raw.empty()) {
|
||||
std::cout << "[ ERROR ] Read image failed" << std::endl;
|
||||
return -1;
|
||||
}
|
||||
ImageClassificationResult result = imageclassificationtask->Classify(img_raw);
|
||||
std::cout << "Classify result: " << result.label_text << std::endl;
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -13,7 +13,6 @@ inline void draw_boxes_inplace(cv::Mat &mat_inplace,
|
|||
return;
|
||||
}
|
||||
for (const auto &box : boxes) {
|
||||
if (box.flag) {
|
||||
cv::rectangle(mat_inplace, box.rect(), cv::Scalar(255, 255, 0), 2);
|
||||
if (box.label_text) {
|
||||
std::string label_text(box.label_text);
|
||||
|
@ -22,7 +21,6 @@ inline void draw_boxes_inplace(cv::Mat &mat_inplace,
|
|||
.5f, cv::Scalar(0, 69, 255), 1);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#endif // SUPPORT_DEMO_OBJECT_DETECTION_HPP_
|
||||
|
|
|
@ -3,142 +3,85 @@
|
|||
|
||||
#include "object_detection.hpp"
|
||||
#include "task/vision/object_detection_task.h"
|
||||
#include "utils/check_utils.h"
|
||||
#include "utils/time.h"
|
||||
#include "utils/utils.h"
|
||||
|
||||
int main(int argc, char* argv[]) {
|
||||
std::vector<Boxi> resultBoxes;
|
||||
std::string filePath, modelFilepath, imageFilepath, saveImgpath,
|
||||
labelFilepath, configFilepath;
|
||||
cv::Mat imgRaw;
|
||||
std::vector<Boxi> bboxes;
|
||||
std::string image_file_path, save_img_path, config_file_path;
|
||||
ObjectDetectionOption option;
|
||||
std::unique_ptr<ObjectDetectionTask> objectdetectiontask;
|
||||
cv::Mat img_raw;
|
||||
#ifdef DEBUG
|
||||
std::cout << "." << std::endl;
|
||||
#endif
|
||||
if (argc == 4) {
|
||||
filePath = argv[1];
|
||||
imageFilepath = argv[2];
|
||||
saveImgpath = argv[3];
|
||||
if (!checkImageFileExtension(imageFilepath) ||
|
||||
!checkImageFileExtension(saveImgpath)) {
|
||||
std::cout << "[ ERROR ] The ImageFilepath is not correct. Make sure you "
|
||||
"are setting the path to an imgae file (.jpg/.jpeg/.png)"
|
||||
<< std::endl;
|
||||
return -1;
|
||||
}
|
||||
if (!exists_check(imageFilepath)) {
|
||||
std::cout << "[ ERROR ] The Image File does not exist. Make sure you are "
|
||||
"setting the correct path to the file"
|
||||
<< std::endl;
|
||||
return -1;
|
||||
}
|
||||
{
|
||||
#ifdef DEBUG
|
||||
TimeWatcher t("|-- Load input data");
|
||||
#endif
|
||||
imgRaw = cv::imread(imageFilepath);
|
||||
}
|
||||
std::unique_ptr<objectDetectionTask> objectdetectiontask =
|
||||
std::unique_ptr<objectDetectionTask>(new objectDetectionTask(filePath));
|
||||
resultBoxes = objectdetectiontask->Detect(imgRaw).result_bboxes;
|
||||
{
|
||||
#ifdef DEBUG
|
||||
TimeWatcher t("|-- Output result");
|
||||
#endif
|
||||
for (int i = 0; i < static_cast<int>(resultBoxes.size()); i++) {
|
||||
if (resultBoxes[i].flag) {
|
||||
std::cout << "bbox[" << std::setw(2) << i << "]"
|
||||
<< " "
|
||||
<< "x1y1x2y2: "
|
||||
<< "(" << std::setw(4) << resultBoxes[i].x1 << ","
|
||||
<< std::setw(4) << resultBoxes[i].y1 << "," << std::setw(4)
|
||||
<< resultBoxes[i].x2 << "," << std::setw(4)
|
||||
<< resultBoxes[i].y2 << ")"
|
||||
<< ", "
|
||||
<< "score: " << std::fixed << std::setprecision(3)
|
||||
<< std::setw(4) << resultBoxes[i].score << ", "
|
||||
<< "label_text: " << resultBoxes[i].label_text << std::endl;
|
||||
}
|
||||
}
|
||||
}
|
||||
{
|
||||
#ifdef DEBUG
|
||||
TimeWatcher t("|-- Box drawing");
|
||||
#endif
|
||||
draw_boxes_inplace(imgRaw, resultBoxes);
|
||||
}
|
||||
|
||||
cv::imwrite(saveImgpath, imgRaw);
|
||||
// cv::imshow("detected.jpg",imgRaw);
|
||||
// cv::waitKey(0);
|
||||
config_file_path = argv[1];
|
||||
image_file_path = argv[2];
|
||||
save_img_path = argv[3];
|
||||
objectdetectiontask = std::unique_ptr<ObjectDetectionTask>(
|
||||
new ObjectDetectionTask(config_file_path));
|
||||
} else if (argc == 5) {
|
||||
filePath = argv[1];
|
||||
labelFilepath = argv[2];
|
||||
imageFilepath = argv[3];
|
||||
saveImgpath = argv[4];
|
||||
if (!checkImageFileExtension(imageFilepath) ||
|
||||
!checkImageFileExtension(saveImgpath)) {
|
||||
std::cout << "[ ERROR ] The ImageFilepath is not correct. Make sure you "
|
||||
"are setting the path to an imgae file (.jpg/.jpeg/.png)"
|
||||
option.model_path = argv[1];
|
||||
option.label_path = argv[2];
|
||||
image_file_path = argv[3];
|
||||
save_img_path = argv[4];
|
||||
objectdetectiontask =
|
||||
std::unique_ptr<ObjectDetectionTask>(new ObjectDetectionTask(option));
|
||||
} else {
|
||||
std::cout << "Please run with " << argv[0]
|
||||
<< " <model_file_path> <label_file_path> <image_file_path> "
|
||||
"<save_img_path> or "
|
||||
<< argv[0]
|
||||
<< " <config_file_path> <image_file_path> <save_img_path>"
|
||||
<< std::endl;
|
||||
return -1;
|
||||
}
|
||||
if (!exists_check(imageFilepath)) {
|
||||
std::cout << "[ ERROR ] The Image File does not exist. Make sure you are "
|
||||
"setting the correct path to the file"
|
||||
<< std::endl;
|
||||
return -1;
|
||||
}
|
||||
{
|
||||
#ifdef DEBUG
|
||||
TimeWatcher t("|-- Load input data");
|
||||
#endif
|
||||
imgRaw = cv::imread(imageFilepath);
|
||||
}
|
||||
std::unique_ptr<objectDetectionTask> objectdetectiontask =
|
||||
std::unique_ptr<objectDetectionTask>(
|
||||
new objectDetectionTask(filePath, labelFilepath));
|
||||
if (objectdetectiontask->getInitFlag() != 0) {
|
||||
return -1;
|
||||
}
|
||||
resultBoxes = objectdetectiontask->Detect(imgRaw).result_bboxes;
|
||||
{
|
||||
#ifdef DEBUG
|
||||
TimeWatcher t("|-- Load input data");
|
||||
#endif
|
||||
img_raw = cv::imread(image_file_path);
|
||||
}
|
||||
if (img_raw.empty()) {
|
||||
std::cout << "[ ERROR ] Read image failed" << std::endl;
|
||||
return -1;
|
||||
}
|
||||
bboxes = objectdetectiontask->Detect(img_raw).result_bboxes;
|
||||
{
|
||||
#ifdef DEBUG
|
||||
TimeWatcher t("|-- Output result");
|
||||
#endif
|
||||
for (int i = 0; i < static_cast<int>(resultBoxes.size()); i++) {
|
||||
if (resultBoxes[i].flag) {
|
||||
for (size_t i = 0; i < bboxes.size(); i++) {
|
||||
std::cout << "bbox[" << std::setw(2) << i << "]"
|
||||
<< " "
|
||||
<< "x1y1x2y2: "
|
||||
<< "(" << std::setw(4) << resultBoxes[i].x1 << ","
|
||||
<< std::setw(4) << resultBoxes[i].y1 << "," << std::setw(4)
|
||||
<< resultBoxes[i].x2 << "," << std::setw(4)
|
||||
<< resultBoxes[i].y2 << ")"
|
||||
<< "(" << std::setw(4) << bboxes[i].x1 << "," << std::setw(4)
|
||||
<< bboxes[i].y1 << "," << std::setw(4) << bboxes[i].x2 << ","
|
||||
<< std::setw(4) << bboxes[i].y2 << ")"
|
||||
<< ", "
|
||||
<< "score: " << std::fixed << std::setprecision(3)
|
||||
<< std::setw(4) << resultBoxes[i].score << ", "
|
||||
<< "label_text: " << resultBoxes[i].label_text << std::endl;
|
||||
}
|
||||
<< std::setw(4) << bboxes[i].score << ", "
|
||||
<< "label_text: " << bboxes[i].label_text << std::endl;
|
||||
}
|
||||
}
|
||||
{
|
||||
#ifdef DEBUG
|
||||
TimeWatcher t("|-- Box drawing");
|
||||
#endif
|
||||
draw_boxes_inplace(imgRaw, resultBoxes);
|
||||
draw_boxes_inplace(img_raw, bboxes);
|
||||
}
|
||||
|
||||
cv::imwrite(saveImgpath, imgRaw);
|
||||
// cv::imshow("detected.jpg",imgRaw);
|
||||
// cv::waitKey(0);
|
||||
} else {
|
||||
std::cout
|
||||
<< "run with " << argv[0]
|
||||
<< " <modelFilepath> <labelFilepath> <imageFilepath> <saveImgpath> or "
|
||||
<< argv[0] << " <configFilepath> <imageFilepath> <saveImgpath>"
|
||||
try {
|
||||
cv::imwrite(save_img_path, img_raw);
|
||||
} catch (cv::Exception& e) {
|
||||
std::cout << "[ ERROR ] Write result image failed : " << e.what()
|
||||
<< std::endl;
|
||||
return -1;
|
||||
}
|
||||
// cv::imshow("detected.jpg",img_raw);
|
||||
// cv::waitKey(0);
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -1,5 +1,7 @@
|
|||
#include <pthread.h>
|
||||
#include <stdlib.h>
|
||||
#include <stdlib.h>
|
||||
#ifndef _WIN32
|
||||
#include <sys/prctl.h> // for: prctl
|
||||
#endif
|
||||
#include <unistd.h> // for: getopt
|
||||
|
||||
#include <algorithm> // for: swap
|
||||
|
@ -21,15 +23,29 @@
|
|||
|
||||
#include "utils/utils.h"
|
||||
|
||||
void setThreadName(const char* name) {
|
||||
#ifndef _WIN32
|
||||
prctl(PR_SET_NAME, name);
|
||||
#endif
|
||||
}
|
||||
|
||||
class Detector {
|
||||
public:
|
||||
explicit Detector(const std::string& filePath) { filePath_ = filePath; }
|
||||
explicit Detector(const std::string& config_file_path) {
|
||||
config_file_path_ = config_file_path;
|
||||
}
|
||||
explicit Detector(ObjectDetectionOption& option) { option_ = option; }
|
||||
~Detector() {}
|
||||
// 初始化/反初始化
|
||||
int init() {
|
||||
objectdetectiontask_ = std::unique_ptr<objectDetectionTask>(
|
||||
new objectDetectionTask(filePath_));
|
||||
return get_init_flag();
|
||||
if (!config_file_path_.empty()) {
|
||||
objectdetectiontask_ = std::unique_ptr<ObjectDetectionTask>(
|
||||
new ObjectDetectionTask(config_file_path_));
|
||||
} else {
|
||||
objectdetectiontask_ = std::unique_ptr<ObjectDetectionTask>(
|
||||
new ObjectDetectionTask(option_));
|
||||
}
|
||||
return getInitFlag();
|
||||
}
|
||||
|
||||
int uninit() { return 0; }
|
||||
|
@ -48,9 +64,9 @@ class Detector {
|
|||
|
||||
// 查询检测结果
|
||||
int detected() { return objs_array_.size(); }
|
||||
int get_init_flag() { return objectdetectiontask_->getInitFlag(); }
|
||||
int getInitFlag() { return objectdetectiontask_->getInitFlag(); }
|
||||
// 移走检测结果
|
||||
ObjectDetectionResult get_object() {
|
||||
ObjectDetectionResult getObject() {
|
||||
ObjectDetectionResult objs_moved;
|
||||
objs_mutex_.lock();
|
||||
objs_moved = objs_array_.back();
|
||||
|
@ -63,35 +79,46 @@ class Detector {
|
|||
private:
|
||||
std::mutex objs_mutex_;
|
||||
std::queue<struct ObjectDetectionResult> objs_array_;
|
||||
std::unique_ptr<objectDetectionTask> objectdetectiontask_;
|
||||
std::string filePath_;
|
||||
std::unique_ptr<ObjectDetectionTask> objectdetectiontask_;
|
||||
std::string config_file_path_;
|
||||
ObjectDetectionOption option_;
|
||||
};
|
||||
|
||||
// 检测线程
|
||||
void Detection(DataLoader& dataloader, Detector& detector) {
|
||||
if (detector.init() != 0) {
|
||||
std::cout << "[ERROR] detector init error" << std::endl;
|
||||
dataloader.set_disable();
|
||||
}
|
||||
void Inference(DataLoader& dataloader, Detector& detector) {
|
||||
setThreadName("DetectionThread");
|
||||
cv::Mat frame;
|
||||
while (dataloader.ifenable()) {
|
||||
while (dataloader.ifEnable()) {
|
||||
auto start = std::chrono::steady_clock::now();
|
||||
frame = dataloader.peek_frame(); // 取(拷贝)一帧数据
|
||||
if ((frame).empty()) {
|
||||
dataloader.set_disable();
|
||||
if (!dataloader.isUpdated()) {
|
||||
continue;
|
||||
}
|
||||
frame = dataloader.peekFrame(); // 取(拷贝)一帧数据
|
||||
if ((frame).empty()) {
|
||||
dataloader.setDisable();
|
||||
break;
|
||||
}
|
||||
int flag = detector.infer(frame); // 推理并保存检测结果
|
||||
auto end = std::chrono::steady_clock::now();
|
||||
auto detection_duration =
|
||||
std::chrono::duration_cast<std::chrono::milliseconds>(end - start);
|
||||
dataloader.set_detection_fps(1000 / (detection_duration.count()));
|
||||
dataloader.setDetectionFps(1000 / (detection_duration.count()));
|
||||
if (flag == -1) {
|
||||
std::cout << "[Error] infer frame failed" << std::endl;
|
||||
std::cout << "[ ERROR ] Infer frame failed" << std::endl;
|
||||
break; // 摄像头结束拍摄或者故障
|
||||
}
|
||||
}
|
||||
std::cout << "detection thread quit" << std::endl;
|
||||
}
|
||||
|
||||
// 检测线程
|
||||
void Detection(DataLoader& dataloader, Detector& detector) {
|
||||
setThreadName("OnnxruntimeThread");
|
||||
if (detector.init() != 0) {
|
||||
std::cout << "[ ERROR ] Detector init error" << std::endl;
|
||||
dataloader.setDisable();
|
||||
}
|
||||
std::thread t1(Inference, std::ref(dataloader), std::ref(detector));
|
||||
t1.join();
|
||||
std::cout << "Detection thread quit" << std::endl;
|
||||
}
|
||||
|
||||
// 预览线程
|
||||
|
@ -103,26 +130,26 @@ void Preview(DataLoader& dataloader, Detector& detector) {
|
|||
int count = 0;
|
||||
int dur = 0;
|
||||
int enable_show = 1;
|
||||
const char* showfps = getenv("SHOWFPS");
|
||||
const char* show = getenv("SHOW");
|
||||
const char* showfps = getenv("SUPPORT_SHOWFPS");
|
||||
const char* show = getenv("SUPPORT_SHOW");
|
||||
if (show && strcmp(show, "-1") == 0) {
|
||||
enable_show = -1;
|
||||
}
|
||||
while (dataloader.ifenable()) {
|
||||
while (dataloader.ifEnable()) {
|
||||
auto start = std::chrono::steady_clock::now();
|
||||
frame = dataloader.fetch_frame(); // 取(搬走)一帧数据
|
||||
frame = dataloader.fetchFrame(); // 取(搬走)一帧数据
|
||||
if ((frame).empty()) {
|
||||
dataloader.set_disable();
|
||||
dataloader.setDisable();
|
||||
break;
|
||||
}
|
||||
if (detector.detected()) // 判断原因: detector.detected 不用锁,
|
||||
// detector.get_object 需要锁;
|
||||
{
|
||||
// 是否有检测结果
|
||||
objs = detector.get_object(); // 取(搬走)检测结果(移动赋值)
|
||||
objs = detector.getObject(); // 取(搬走)检测结果(移动赋值)
|
||||
if (objs.result_bboxes.size()) {
|
||||
int input_height = dataloader.get_resize_height();
|
||||
int input_width = dataloader.get_resize_width();
|
||||
int input_height = dataloader.getResizeHeight();
|
||||
int input_width = dataloader.getResizeWidth();
|
||||
int img_height = frame.rows;
|
||||
int img_width = frame.cols;
|
||||
float resize_ratio = std::min(
|
||||
|
@ -130,7 +157,7 @@ void Preview(DataLoader& dataloader, Detector& detector) {
|
|||
static_cast<float>(input_width) / static_cast<float>(img_width));
|
||||
float dw = (input_width - resize_ratio * img_width) / 2;
|
||||
float dh = (input_height - resize_ratio * img_height) / 2;
|
||||
for (int i = 0; i < static_cast<int>(objs.result_bboxes.size()); i++) {
|
||||
for (size_t i = 0; i < objs.result_bboxes.size(); i++) {
|
||||
objs.result_bboxes[i].x1 =
|
||||
(objs.result_bboxes[i].x1 - dw) / resize_ratio;
|
||||
objs.result_bboxes[i].x2 =
|
||||
|
@ -145,8 +172,7 @@ void Preview(DataLoader& dataloader, Detector& detector) {
|
|||
#ifdef DEBUG
|
||||
TimeWatcher t("|-- Output result");
|
||||
#endif
|
||||
for (int i = 0; i < static_cast<int>(objs.result_bboxes.size()); i++) {
|
||||
if (objs.result_bboxes[i].flag) {
|
||||
for (size_t i = 0; i < objs.result_bboxes.size(); i++) {
|
||||
std::cout << "bbox[" << std::setw(2) << i << "]"
|
||||
<< " "
|
||||
<< "x1y1x2y2: "
|
||||
|
@ -162,7 +188,6 @@ void Preview(DataLoader& dataloader, Detector& detector) {
|
|||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
// 调用 detector.detected 和 detector.get_object 期间,
|
||||
// 检测结果依然可能被刷新
|
||||
now = std::chrono::steady_clock::now();
|
||||
|
@ -171,8 +196,8 @@ void Preview(DataLoader& dataloader, Detector& detector) {
|
|||
if (duration.count() < 1000) {
|
||||
draw_boxes_inplace((frame), objs.result_bboxes); // 画框
|
||||
}
|
||||
int preview_fps = dataloader.get_preview_fps();
|
||||
int detection_fps = dataloader.get_detection_fps();
|
||||
int preview_fps = dataloader.getPreviewFps();
|
||||
int detection_fps = dataloader.getDetectionFps();
|
||||
if (showfps != nullptr) {
|
||||
cv::putText(frame, "preview fps: " + std::to_string(preview_fps),
|
||||
cv::Point(0, 15), cv::FONT_HERSHEY_SIMPLEX, 0.5f,
|
||||
|
@ -191,40 +216,28 @@ void Preview(DataLoader& dataloader, Detector& detector) {
|
|||
count++;
|
||||
dur = dur + preview_duration.count();
|
||||
if (dur >= 1000) {
|
||||
dataloader.set_preview_fps(count);
|
||||
dataloader.setPreviewFps(count);
|
||||
dur = 0;
|
||||
count = 0;
|
||||
}
|
||||
if (enable_show != -1) {
|
||||
if (cv::getWindowProperty("Detection", cv::WND_PROP_VISIBLE) < 1) {
|
||||
dataloader.set_disable();
|
||||
dataloader.setDisable();
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
std::cout << "preview thread quit" << std::endl;
|
||||
std::cout << "Preview thread quit" << std::endl;
|
||||
if (enable_show != -1) {
|
||||
cv::destroyAllWindows();
|
||||
}
|
||||
}
|
||||
|
||||
#ifndef _WIN32
|
||||
void setThreadName(std::thread& thread, const char* name) {
|
||||
pthread_setname_np(thread.native_handle(), name);
|
||||
}
|
||||
#endif
|
||||
|
||||
int main(int argc, char* argv[]) {
|
||||
std::string filePath, input, inputType;
|
||||
std::string config_file_path, input, input_type;
|
||||
ObjectDetectionOption option;
|
||||
int resize_height{320}, resize_width{320};
|
||||
if (argc == 4) {
|
||||
filePath = argv[1];
|
||||
input = argv[2];
|
||||
inputType = argv[3];
|
||||
} else if (argc > 4) {
|
||||
filePath = argv[1];
|
||||
input = argv[2];
|
||||
inputType = argv[3];
|
||||
std::unique_ptr<Detector> detector;
|
||||
int o;
|
||||
const char* optstring = "w:h:";
|
||||
while ((o = getopt(argc, argv, optstring)) != -1) {
|
||||
|
@ -236,33 +249,42 @@ int main(int argc, char* argv[]) {
|
|||
resize_height = atoi(optarg);
|
||||
break;
|
||||
case '?':
|
||||
std::cout << "[ERROR] Unsupported usage" << std::endl;
|
||||
std::cout << "[ ERROR ] Unsupported usage" << std::endl;
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (argc - optind == 3) {
|
||||
config_file_path = argv[optind];
|
||||
input = argv[optind + 1];
|
||||
input_type = argv[optind + 2];
|
||||
detector = std::unique_ptr<Detector>(new Detector(config_file_path));
|
||||
} else if (argc - optind == 4) {
|
||||
option.model_path = argv[optind];
|
||||
option.label_path = argv[optind + 1];
|
||||
input = argv[optind + 2];
|
||||
input_type = argv[optind + 3];
|
||||
detector = std::unique_ptr<Detector>(new Detector(option));
|
||||
} else {
|
||||
std::cout
|
||||
<< "run with " << argv[0]
|
||||
<< " <configFilepath> <input> <inputType> (video "
|
||||
"or cameraId) option(-h <resize_height>) option(-w <resize_width>)"
|
||||
std::cout << "Please run with " << argv[0]
|
||||
<< " <model_file_path> <label_file_path> <input> <input_type> "
|
||||
"(video or camera_id) option(-h <resize_height>) option(-w "
|
||||
"<resize_width>) or "
|
||||
<< argv[0]
|
||||
<< " <config_file_path> <input> <input_type> (video "
|
||||
"or camera_id) option(-h <resize_height>) option(-w "
|
||||
"<resize_width>)"
|
||||
<< std::endl;
|
||||
return -1;
|
||||
}
|
||||
Detector detector{filePath};
|
||||
SharedDataLoader dataloader{resize_height, resize_width};
|
||||
if (dataloader.init(input) != 0) {
|
||||
std::cout << "[ERROR] dataloader init error" << std::endl;
|
||||
std::cout << "[ ERROR ] Dataloader init error" << std::endl;
|
||||
return -1;
|
||||
}
|
||||
|
||||
std::thread t1(Preview, std::ref(dataloader), std::ref(detector));
|
||||
// std::this_thread::sleep_for(std::chrono::seconds(5));
|
||||
std::thread t2(Detection, std::ref(dataloader), std::ref(detector));
|
||||
#ifndef _WIN32
|
||||
setThreadName(t1, "PreviewThread");
|
||||
setThreadName(t2, "DetectionThread");
|
||||
#endif
|
||||
t1.join();
|
||||
t2.join();
|
||||
std::thread t(Detection, std::ref(dataloader), std::ref(*detector));
|
||||
setThreadName("PreviewThread");
|
||||
Preview(dataloader, *detector);
|
||||
t.join();
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -7,21 +7,49 @@
|
|||
#endif
|
||||
|
||||
#include "utils/utils.h"
|
||||
int DetectVideo(const std::string &filepath, const std::string &videoPath,
|
||||
const std::string &srcPath) {
|
||||
std::unique_ptr<objectDetectionTask> objectdetectiontask =
|
||||
std::unique_ptr<objectDetectionTask>(new objectDetectionTask(filepath));
|
||||
|
||||
int main(int argc, char *argv[]) {
|
||||
std::unique_ptr<ObjectDetectionTask> objectdetectiontask;
|
||||
std::string config_file_path, video_file_path, dst_file_path;
|
||||
ObjectDetectionOption option;
|
||||
#ifdef DEBUG
|
||||
std::cout << "." << std::endl;
|
||||
#endif
|
||||
if (argc == 4) {
|
||||
config_file_path = argv[1];
|
||||
video_file_path = argv[2];
|
||||
dst_file_path = argv[3];
|
||||
objectdetectiontask = std::unique_ptr<ObjectDetectionTask>(
|
||||
new ObjectDetectionTask(config_file_path));
|
||||
}
|
||||
if (argc == 5) {
|
||||
option.model_path = argv[1];
|
||||
option.label_path = argv[2];
|
||||
video_file_path = argv[3];
|
||||
dst_file_path = argv[4];
|
||||
objectdetectiontask =
|
||||
std::unique_ptr<ObjectDetectionTask>(new ObjectDetectionTask(option));
|
||||
} else {
|
||||
std::cout << "Please run with " << argv[0]
|
||||
<< " <model_file_path> <label_file_path> <video_file_path> "
|
||||
"<dst_file_path> (end with .avi) or "
|
||||
<< argv[0]
|
||||
<< " <config_file_path> <video_file_path> "
|
||||
"<dst_file_path> (end with .avi)"
|
||||
<< std::endl;
|
||||
return -1;
|
||||
}
|
||||
if (objectdetectiontask->getInitFlag() != 0) {
|
||||
return -1;
|
||||
}
|
||||
cv::VideoCapture capture(videoPath);
|
||||
cv::VideoCapture capture(video_file_path);
|
||||
if (!capture.isOpened()) {
|
||||
std::cout << "Open video capture failed" << std::endl;
|
||||
std::cout << "[ ERROR ] Open video capture failed" << std::endl;
|
||||
return -1;
|
||||
}
|
||||
cv::Mat frame;
|
||||
if (!capture.read(frame)) {
|
||||
std::cout << "Read frame failed" << std::endl;
|
||||
std::cout << "[ ERROR ] Read frame failed" << std::endl;
|
||||
return -1;
|
||||
}
|
||||
double rate = capture.get(cv::CAP_PROP_FPS);
|
||||
|
@ -29,36 +57,33 @@ int DetectVideo(const std::string &filepath, const std::string &videoPath,
|
|||
int fps = rate;
|
||||
int frameWidth = frame.rows;
|
||||
int frameHeight = frame.cols;
|
||||
cv::VideoWriter writer(srcPath, cv::VideoWriter::fourcc('D', 'I', 'V', 'X'),
|
||||
fps, cv::Size(frameHeight, frameWidth), 1);
|
||||
cv::VideoWriter writer(dst_file_path,
|
||||
cv::VideoWriter::fourcc('D', 'I', 'V', 'X'), fps,
|
||||
cv::Size(frameHeight, frameWidth), 1);
|
||||
while (true) {
|
||||
capture >> frame;
|
||||
if (frame.empty()) {
|
||||
break;
|
||||
}
|
||||
std::vector<Boxi> resultBoxes =
|
||||
objectdetectiontask->Detect(frame).result_bboxes;
|
||||
std::vector<Boxi> bboxes = objectdetectiontask->Detect(frame).result_bboxes;
|
||||
{
|
||||
#ifdef DEBUG
|
||||
TimeWatcher t("|-- Output result");
|
||||
#endif
|
||||
for (int i = 0; i < static_cast<int>(resultBoxes.size()); i++) {
|
||||
if (resultBoxes[i].flag) {
|
||||
for (size_t i = 0; i < bboxes.size(); i++) {
|
||||
std::cout << "bbox[" << std::setw(2) << i << "]"
|
||||
<< " "
|
||||
<< "x1y1x2y2: "
|
||||
<< "(" << std::setw(4) << resultBoxes[i].x1 << ","
|
||||
<< std::setw(4) << resultBoxes[i].y1 << "," << std::setw(4)
|
||||
<< resultBoxes[i].x2 << "," << std::setw(4)
|
||||
<< resultBoxes[i].y2 << ")"
|
||||
<< "(" << std::setw(4) << bboxes[i].x1 << "," << std::setw(4)
|
||||
<< bboxes[i].y1 << "," << std::setw(4) << bboxes[i].x2 << ","
|
||||
<< std::setw(4) << bboxes[i].y2 << ")"
|
||||
<< ", "
|
||||
<< "score: " << std::fixed << std::setprecision(3)
|
||||
<< std::setw(4) << resultBoxes[i].score << ", "
|
||||
<< "label_text: " << resultBoxes[i].label_text << std::endl;
|
||||
<< std::setw(4) << bboxes[i].score << ", "
|
||||
<< "label_text: " << bboxes[i].label_text << std::endl;
|
||||
}
|
||||
}
|
||||
}
|
||||
draw_boxes_inplace(frame, resultBoxes);
|
||||
draw_boxes_inplace(frame, bboxes);
|
||||
writer.write(frame);
|
||||
cv::waitKey(
|
||||
delay); // 因为图像处理需要消耗一定时间,所以图片展示速度比保存视频要慢
|
||||
|
@ -68,26 +93,3 @@ int DetectVideo(const std::string &filepath, const std::string &videoPath,
|
|||
writer.release();
|
||||
return 0;
|
||||
}
|
||||
|
||||
int main(int argc, char *argv[]) {
|
||||
std::string filepath, videoFilepath, dstFilepath;
|
||||
#ifdef DEBUG
|
||||
std::cout << "." << std::endl;
|
||||
#endif
|
||||
if (argc == 4) {
|
||||
filepath = argv[1];
|
||||
videoFilepath = argv[2];
|
||||
dstFilepath = argv[3];
|
||||
int flag = DetectVideo(filepath, videoFilepath, dstFilepath);
|
||||
if (flag != 0) {
|
||||
std::cout << "[Error] Detect fail" << std::endl;
|
||||
}
|
||||
} else {
|
||||
std::cout << "run with " << argv[0]
|
||||
<< " <configFilepath> <videoFilepath> "
|
||||
"<dstFilepath> (end with .avi)"
|
||||
<< std::endl;
|
||||
return -1;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -3,7 +3,6 @@
|
|||
|
||||
#include "task/vision/object_detection_task.h"
|
||||
#include "task/vision/pose_estimation_task.h"
|
||||
#include "utils/check_utils.h"
|
||||
#include "utils/time.h"
|
||||
#include "utils/utils.h"
|
||||
|
||||
|
@ -12,28 +11,44 @@ int main(int argc, char* argv[]) {
|
|||
{0, 1}, {0, 2}, {1, 3}, {2, 4}, {5, 7}, {7, 9},
|
||||
{6, 8}, {8, 10}, {5, 6}, {5, 11}, {6, 12}, {11, 12},
|
||||
{11, 13}, {13, 15}, {12, 14}, {14, 16}};
|
||||
std::vector<PosePoint> resultPoints;
|
||||
std::vector<Boxi> resultBoxes;
|
||||
std::string detFilePath, poseFilePath, imageFilepath, saveImgpath;
|
||||
cv::Mat imgRaw, img;
|
||||
std::vector<PosePoint> points;
|
||||
std::vector<Boxi> bboxes;
|
||||
std::string det_file_path, pose_file_path, image_file_path, save_img_path;
|
||||
PoseEstimationOption estimation_option;
|
||||
ObjectDetectionOption detection_option;
|
||||
std::unique_ptr<ObjectDetectionTask> objectdetectiontask;
|
||||
std::unique_ptr<PoseEstimationTask> poseestimationtask;
|
||||
cv::Mat img_raw, img;
|
||||
#ifdef DEBUG
|
||||
std::cout << "." << std::endl;
|
||||
#endif
|
||||
if (argc == 5) {
|
||||
detFilePath = argv[1];
|
||||
poseFilePath = argv[2];
|
||||
imageFilepath = argv[3];
|
||||
saveImgpath = argv[4];
|
||||
if (!checkImageFileExtension(imageFilepath) ||
|
||||
!checkImageFileExtension(saveImgpath)) {
|
||||
std::cout << "[ ERROR ] The ImageFilepath is not correct. Make sure you "
|
||||
"are setting the path to an imgae file (.jpg/.jpeg/.png)"
|
||||
<< std::endl;
|
||||
return -1;
|
||||
}
|
||||
if (!exists_check(imageFilepath)) {
|
||||
std::cout << "[ ERROR ] The Image File does not exist. Make sure you are "
|
||||
"setting the correct path to the file"
|
||||
det_file_path = argv[1];
|
||||
pose_file_path = argv[2];
|
||||
image_file_path = argv[3];
|
||||
save_img_path = argv[4];
|
||||
objectdetectiontask = std::unique_ptr<ObjectDetectionTask>(
|
||||
new ObjectDetectionTask(det_file_path));
|
||||
poseestimationtask = std::unique_ptr<PoseEstimationTask>(
|
||||
new PoseEstimationTask(pose_file_path));
|
||||
} else if (argc == 6) {
|
||||
detection_option.model_path = argv[1];
|
||||
detection_option.label_path = argv[2];
|
||||
estimation_option.model_path = argv[3];
|
||||
image_file_path = argv[4];
|
||||
save_img_path = argv[5];
|
||||
objectdetectiontask = std::unique_ptr<ObjectDetectionTask>(
|
||||
new ObjectDetectionTask(detection_option));
|
||||
poseestimationtask = std::unique_ptr<PoseEstimationTask>(
|
||||
new PoseEstimationTask(estimation_option));
|
||||
} else {
|
||||
std::cout << "Please run with " << argv[0]
|
||||
<< " <det_model_file_path> <det_label_file_path> "
|
||||
"<pose_model_file_path> <image_file_path> "
|
||||
"<save_img_path> or "
|
||||
<< argv[0]
|
||||
<< " <det_config_file_path> <pose_config_file_path> "
|
||||
"<image_file_path> <save_img_path>"
|
||||
<< std::endl;
|
||||
return -1;
|
||||
}
|
||||
|
@ -41,64 +56,60 @@ int main(int argc, char* argv[]) {
|
|||
#ifdef DEBUG
|
||||
TimeWatcher t("|-- Load input data");
|
||||
#endif
|
||||
imgRaw = cv::imread(imageFilepath);
|
||||
resize_unscale(imgRaw, img, 320, 320);
|
||||
img_raw = cv::imread(image_file_path);
|
||||
if (img_raw.empty()) {
|
||||
std::cout << "[ ERROR ] Read image failed" << std::endl;
|
||||
return -1;
|
||||
}
|
||||
resizeUnscale(img_raw, img, 320, 320);
|
||||
}
|
||||
std::unique_ptr<objectDetectionTask> objectdetectiontask =
|
||||
std::unique_ptr<objectDetectionTask>(
|
||||
new objectDetectionTask(detFilePath));
|
||||
if (objectdetectiontask->getInitFlag() != 0) {
|
||||
return -1;
|
||||
}
|
||||
resultBoxes = objectdetectiontask->Detect(img).result_bboxes;
|
||||
std::unique_ptr<poseEstimationTask> poseestimationtask =
|
||||
std::unique_ptr<poseEstimationTask>(
|
||||
new poseEstimationTask(poseFilePath));
|
||||
bboxes = objectdetectiontask->Detect(img).result_bboxes;
|
||||
if (poseestimationtask->getInitFlag() != 0) {
|
||||
return -1;
|
||||
}
|
||||
Boxi box;
|
||||
for (int i = 0; i < static_cast<int>(resultBoxes.size()); i++) {
|
||||
box = resultBoxes[i];
|
||||
for (size_t i = 0; i < bboxes.size(); i++) {
|
||||
box = bboxes[i];
|
||||
if (box.label != 0) {
|
||||
continue;
|
||||
}
|
||||
resultPoints = poseestimationtask->Estimate(img, box).result_points;
|
||||
if (resultPoints.size()) {
|
||||
points = poseestimationtask->Estimate(img, box).result_points;
|
||||
if (points.size()) {
|
||||
int input_height = 320;
|
||||
int input_width = 320;
|
||||
int img_height = imgRaw.rows;
|
||||
int img_width = imgRaw.cols;
|
||||
int img_height = img_raw.rows;
|
||||
int img_width = img_raw.cols;
|
||||
float resize_ratio = std::min(
|
||||
static_cast<float>(input_height) / static_cast<float>(img_height),
|
||||
static_cast<float>(input_width) / static_cast<float>(img_width));
|
||||
float dw = (input_width - resize_ratio * img_width) / 2;
|
||||
float dh = (input_height - resize_ratio * img_height) / 2;
|
||||
for (int i = 0; i < static_cast<int>(resultPoints.size()); i++) {
|
||||
resultPoints[i].x = (resultPoints[i].x - dw) / resize_ratio;
|
||||
resultPoints[i].y = (resultPoints[i].y - dh) / resize_ratio;
|
||||
for (size_t i = 0; i < points.size(); i++) {
|
||||
points[i].x = (points[i].x - dw) / resize_ratio;
|
||||
points[i].y = (points[i].y - dh) / resize_ratio;
|
||||
}
|
||||
}
|
||||
for (int i = 0; i < static_cast<int>(resultPoints.size()); ++i) {
|
||||
cv::circle(imgRaw, cv::Point(resultPoints[i].x, resultPoints[i].y), 2,
|
||||
for (size_t i = 0; i < points.size(); ++i) {
|
||||
cv::circle(img_raw, cv::Point(points[i].x, points[i].y), 2,
|
||||
cv::Scalar{0, 0, 255}, 2, cv::LINE_AA);
|
||||
}
|
||||
|
||||
for (int i = 0; i < static_cast<int>(coco_17_joint_links.size()); ++i) {
|
||||
for (size_t i = 0; i < coco_17_joint_links.size(); ++i) {
|
||||
std::pair<int, int> joint_links = coco_17_joint_links[i];
|
||||
cv::line(imgRaw,
|
||||
cv::Point(resultPoints[joint_links.first].x,
|
||||
resultPoints[joint_links.first].y),
|
||||
cv::Point(resultPoints[joint_links.second].x,
|
||||
resultPoints[joint_links.second].y),
|
||||
cv::line(
|
||||
img_raw,
|
||||
cv::Point(points[joint_links.first].x, points[joint_links.first].y),
|
||||
cv::Point(points[joint_links.second].x, points[joint_links.second].y),
|
||||
cv::Scalar{0, 255, 0}, 2, cv::LINE_AA);
|
||||
}
|
||||
}
|
||||
cv::imwrite(saveImgpath, imgRaw);
|
||||
} else {
|
||||
std::cout << "run with " << argv[0]
|
||||
<< " <detConfigFilepath> <poseConfigFilepath> <imageFilepath> "
|
||||
"<saveImgpath> "
|
||||
try {
|
||||
cv::imwrite(save_img_path, img_raw);
|
||||
} catch (cv::Exception& e) {
|
||||
std::cout << "[ ERROR ] Write result image failed : " << e.what()
|
||||
<< std::endl;
|
||||
return -1;
|
||||
}
|
||||
|
|
|
@ -1,5 +1,7 @@
|
|||
#include <pthread.h>
|
||||
#include <stdlib.h>
|
||||
#include <stdlib.h>
|
||||
#ifndef _WIN32
|
||||
#include <sys/prctl.h> // for: prctl
|
||||
#endif
|
||||
#include <unistd.h> // for: getopt
|
||||
|
||||
#include <algorithm> // for: swap
|
||||
|
@ -22,23 +24,41 @@
|
|||
|
||||
#include "utils/utils.h"
|
||||
|
||||
void setThreadName(const char* name) {
|
||||
#ifndef _WIN32
|
||||
prctl(PR_SET_NAME, name);
|
||||
#endif
|
||||
}
|
||||
|
||||
class Tracker {
|
||||
public:
|
||||
Tracker(const std::string& detFilePath, const std::string& poseFilePath) {
|
||||
detFilePath_ = detFilePath;
|
||||
poseFilePath_ = poseFilePath;
|
||||
Tracker(const std::string& det_file_path, const std::string& pose_file_path) {
|
||||
det_file_path_ = det_file_path;
|
||||
pose_file_path_ = pose_file_path;
|
||||
}
|
||||
Tracker(const ObjectDetectionOption& detection_option,
|
||||
const PoseEstimationOption& estimation_option) {
|
||||
detection_option_ = detection_option;
|
||||
estimation_option_ = estimation_option;
|
||||
}
|
||||
~Tracker() {}
|
||||
// 初始化/反初始化
|
||||
int init() {
|
||||
objectdetectiontask_ = std::unique_ptr<objectDetectionTask>(
|
||||
new objectDetectionTask(detFilePath_));
|
||||
poseestimationtask_ = std::unique_ptr<poseEstimationTask>(
|
||||
new poseEstimationTask(poseFilePath_));
|
||||
return get_init_flag();
|
||||
if (!det_file_path_.empty()) {
|
||||
objectdetectiontask_ = std::unique_ptr<ObjectDetectionTask>(
|
||||
new ObjectDetectionTask(det_file_path_));
|
||||
poseestimationtask_ = std::unique_ptr<PoseEstimationTask>(
|
||||
new PoseEstimationTask(pose_file_path_));
|
||||
} else {
|
||||
objectdetectiontask_ = std::unique_ptr<ObjectDetectionTask>(
|
||||
new ObjectDetectionTask(detection_option_));
|
||||
poseestimationtask_ = std::unique_ptr<PoseEstimationTask>(
|
||||
new PoseEstimationTask(estimation_option_));
|
||||
}
|
||||
return getInitFlag();
|
||||
}
|
||||
|
||||
int get_init_flag() {
|
||||
int getInitFlag() {
|
||||
return (objectdetectiontask_->getInitFlag() ||
|
||||
poseestimationtask_->getInitFlag());
|
||||
}
|
||||
|
@ -75,7 +95,7 @@ class Tracker {
|
|||
int estimated() { return poses_array_.size(); }
|
||||
|
||||
// 移走检测结果
|
||||
struct PoseEstimationResult get_pose() {
|
||||
struct PoseEstimationResult getPose() {
|
||||
struct PoseEstimationResult poses_moved;
|
||||
poses_mutex_.lock();
|
||||
poses_moved = poses_array_.back();
|
||||
|
@ -88,41 +108,52 @@ class Tracker {
|
|||
private:
|
||||
std::mutex poses_mutex_;
|
||||
std::queue<struct PoseEstimationResult> poses_array_;
|
||||
std::unique_ptr<objectDetectionTask> objectdetectiontask_;
|
||||
std::unique_ptr<poseEstimationTask> poseestimationtask_;
|
||||
std::string poseFilePath_;
|
||||
std::string detFilePath_;
|
||||
std::string labelFilepath_;
|
||||
std::unique_ptr<ObjectDetectionTask> objectdetectiontask_;
|
||||
std::unique_ptr<PoseEstimationTask> poseestimationtask_;
|
||||
std::string pose_file_path_;
|
||||
std::string det_file_path_;
|
||||
ObjectDetectionOption detection_option_;
|
||||
PoseEstimationOption estimation_option_;
|
||||
};
|
||||
|
||||
// 检测线程
|
||||
void Track(DataLoader& dataloader, Tracker& tracker) {
|
||||
if (tracker.init() != 0) {
|
||||
std::cout << "[ERROR] tracker init error" << std::endl;
|
||||
return;
|
||||
}
|
||||
void Inference(DataLoader& dataloader, Tracker& tracker) {
|
||||
setThreadName("TrackerThread");
|
||||
cv::Mat frame;
|
||||
while (dataloader.ifenable()) {
|
||||
while (dataloader.ifEnable()) {
|
||||
auto start = std::chrono::steady_clock::now();
|
||||
frame = dataloader.peek_frame(); // 取(拷贝)一帧数据
|
||||
if ((frame).empty()) {
|
||||
dataloader.set_disable();
|
||||
if (!dataloader.isUpdated()) {
|
||||
continue;
|
||||
}
|
||||
frame = dataloader.peekFrame(); // 取(拷贝)一帧数据
|
||||
if ((frame).empty()) {
|
||||
dataloader.setDisable();
|
||||
break;
|
||||
}
|
||||
int flag = tracker.infer(frame); // 推理并保存检测结果
|
||||
auto end = std::chrono::steady_clock::now();
|
||||
auto detection_duration =
|
||||
std::chrono::duration_cast<std::chrono::milliseconds>(end - start);
|
||||
dataloader.set_detection_fps(1000 / (detection_duration.count()));
|
||||
dataloader.setDetectionFps(1000 / (detection_duration.count()));
|
||||
if (flag == 0) {
|
||||
std::cout << "[Warning] unable to catch person" << std::endl; // 无人
|
||||
std::cout << "[ WARNING ] Unable to catch person" << std::endl; // 无人
|
||||
}
|
||||
if (flag == -1) {
|
||||
std::cout << "[Error] infer frame failed" << std::endl;
|
||||
std::cout << "[ ERROR ] Infer frame failed" << std::endl;
|
||||
break; // 摄像头结束拍摄或者故障
|
||||
}
|
||||
}
|
||||
std::cout << "track thread quit" << std::endl;
|
||||
}
|
||||
|
||||
// 检测线程
|
||||
void Track(DataLoader& dataloader, Tracker& tracker) {
|
||||
setThreadName("OnnxruntimeThread");
|
||||
if (tracker.init() != 0) {
|
||||
std::cout << "[ ERROR ] Tracker init error" << std::endl;
|
||||
return;
|
||||
}
|
||||
std::thread t1(Inference, std::ref(dataloader), std::ref(tracker));
|
||||
t1.join();
|
||||
std::cout << "Track thread quit" << std::endl;
|
||||
}
|
||||
|
||||
// 预览线程
|
||||
|
@ -134,26 +165,26 @@ void Preview(DataLoader& dataloader, Tracker& tracker) {
|
|||
int count = 0;
|
||||
int dur = 0;
|
||||
int enable_show = 1;
|
||||
const char* showfps = getenv("SHOWFPS");
|
||||
const char* show = getenv("SHOW");
|
||||
const char* showfps = getenv("SUPPORT_SHOWFPS");
|
||||
const char* show = getenv("SUPPORT_SHOW");
|
||||
if (show && strcmp(show, "-1") == 0) {
|
||||
enable_show = -1;
|
||||
}
|
||||
while (dataloader.ifenable()) {
|
||||
while (dataloader.ifEnable()) {
|
||||
auto start = std::chrono::steady_clock::now();
|
||||
frame = dataloader.fetch_frame(); // 取(搬走)一帧数据
|
||||
frame = dataloader.fetchFrame(); // 取(搬走)一帧数据
|
||||
if ((frame).empty()) {
|
||||
dataloader.set_disable();
|
||||
dataloader.setDisable();
|
||||
break;
|
||||
}
|
||||
if (tracker.estimated()) // 判断原因: detector.detected 不用锁,
|
||||
// detector.get_object 需要锁;
|
||||
{
|
||||
// 是否有检测结果
|
||||
poses = tracker.get_pose(); // 取(搬走)检测结果(移动赋值)
|
||||
poses = tracker.getPose(); // 取(搬走)检测结果(移动赋值)
|
||||
if (poses.result_points.size()) {
|
||||
int input_height = dataloader.get_resize_height();
|
||||
int input_width = dataloader.get_resize_width();
|
||||
int input_height = dataloader.getResizeHeight();
|
||||
int input_width = dataloader.getResizeWidth();
|
||||
int img_height = frame.rows;
|
||||
int img_width = frame.cols;
|
||||
float resize_ratio = std::min(
|
||||
|
@ -161,7 +192,7 @@ void Preview(DataLoader& dataloader, Tracker& tracker) {
|
|||
static_cast<float>(input_width) / static_cast<float>(img_width));
|
||||
float dw = (input_width - resize_ratio * img_width) / 2;
|
||||
float dh = (input_height - resize_ratio * img_height) / 2;
|
||||
for (int i = 0; i < static_cast<int>(poses.result_points.size()); i++) {
|
||||
for (size_t i = 0; i < poses.result_points.size(); i++) {
|
||||
poses.result_points[i].x =
|
||||
(poses.result_points[i].x - dw) / resize_ratio;
|
||||
poses.result_points[i].y =
|
||||
|
@ -177,8 +208,8 @@ void Preview(DataLoader& dataloader, Tracker& tracker) {
|
|||
if (duration.count() < 1000 && poses.result_points.size()) {
|
||||
draw_points_inplace((frame), poses.result_points); // 画框
|
||||
}
|
||||
int preview_fps = dataloader.get_preview_fps();
|
||||
int detection_fps = dataloader.get_detection_fps();
|
||||
int preview_fps = dataloader.getPreviewFps();
|
||||
int detection_fps = dataloader.getDetectionFps();
|
||||
if (showfps != nullptr) {
|
||||
cv::putText(frame, "preview fps: " + std::to_string(preview_fps),
|
||||
cv::Point(0, 15), cv::FONT_HERSHEY_SIMPLEX, 0.5f,
|
||||
|
@ -197,42 +228,29 @@ void Preview(DataLoader& dataloader, Tracker& tracker) {
|
|||
count++;
|
||||
dur = dur + preview_duration.count();
|
||||
if (dur >= 1000) {
|
||||
dataloader.set_preview_fps(count);
|
||||
dataloader.setPreviewFps(count);
|
||||
dur = 0;
|
||||
count = 0;
|
||||
}
|
||||
if (enable_show != -1) {
|
||||
if (cv::getWindowProperty("Track", cv::WND_PROP_VISIBLE) < 1) {
|
||||
dataloader.set_disable();
|
||||
dataloader.setDisable();
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
std::cout << "preview thread quit" << std::endl;
|
||||
std::cout << "Preview thread quit" << std::endl;
|
||||
if (enable_show != -1) {
|
||||
cv::destroyAllWindows();
|
||||
}
|
||||
}
|
||||
|
||||
#ifndef _WIN32
|
||||
void setThreadName(std::thread& thread, const char* name) {
|
||||
pthread_setname_np(thread.native_handle(), name);
|
||||
}
|
||||
#endif
|
||||
|
||||
int main(int argc, char* argv[]) {
|
||||
std::string detFilePath, poseFilePath, input, inputType;
|
||||
std::string det_file_path, pose_file_path, input, input_type;
|
||||
int resize_height{320}, resize_width{320};
|
||||
if (argc == 5) {
|
||||
detFilePath = argv[1];
|
||||
poseFilePath = argv[2];
|
||||
input = argv[3];
|
||||
inputType = argv[4];
|
||||
} else if (argc > 5) {
|
||||
detFilePath = argv[1];
|
||||
poseFilePath = argv[2];
|
||||
input = argv[3];
|
||||
inputType = argv[4];
|
||||
ObjectDetectionOption detection_option;
|
||||
PoseEstimationOption estimation_option;
|
||||
std::unique_ptr<Tracker> tracker;
|
||||
int o;
|
||||
const char* optstring = "w:h:";
|
||||
while ((o = getopt(argc, argv, optstring)) != -1) {
|
||||
|
@ -244,32 +262,47 @@ int main(int argc, char* argv[]) {
|
|||
resize_height = atoi(optarg);
|
||||
break;
|
||||
case '?':
|
||||
std::cout << "[ERROR] Unsupported usage" << std::endl;
|
||||
std::cout << "[ ERROR ] Unsupported usage" << std::endl;
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (argc - optind == 4) {
|
||||
det_file_path = argv[optind];
|
||||
pose_file_path = argv[optind + 1];
|
||||
input = argv[optind + 2];
|
||||
input_type = argv[optind + 3];
|
||||
tracker =
|
||||
std::unique_ptr<Tracker>(new Tracker(det_file_path, pose_file_path));
|
||||
} else if (argc - optind == 5) {
|
||||
detection_option.model_path = argv[optind];
|
||||
detection_option.label_path = argv[optind + 1];
|
||||
estimation_option.model_path = argv[optind + 2];
|
||||
input = argv[optind + 3];
|
||||
input_type = argv[optind + 4];
|
||||
tracker = std::unique_ptr<Tracker>(
|
||||
new Tracker(detection_option, estimation_option));
|
||||
} else {
|
||||
std::cout << "run with " << argv[0]
|
||||
<< " <detFilepath> <poseFilepath> <input> <inputType> (video or "
|
||||
"cameraId option(-h <resize_height>) option(-w <resize_width>)"
|
||||
std::cout
|
||||
<< "Please run with " << argv[0]
|
||||
<< " <det_model_file_path> <det_label_file_path> "
|
||||
"<pose_model_file_path> <input> <input_type> (video or cameraId "
|
||||
"option(-h <resize_height>) option(-w <resize_width>) or "
|
||||
<< argv[0]
|
||||
<< " <det_config_file_path> <pose_config_file_path> <input> "
|
||||
"<input_type> (video or cameraId option(-h <resize_height>) "
|
||||
"option(-w <resize_width>)"
|
||||
<< std::endl;
|
||||
return -1;
|
||||
}
|
||||
Tracker tracker{detFilePath, poseFilePath};
|
||||
SharedDataLoader dataloader{resize_height, resize_width};
|
||||
if (dataloader.init(input) != 0) {
|
||||
std::cout << "[ERROR] dataloader init error" << std::endl;
|
||||
std::cout << "[ ERROR ] dataloader init error" << std::endl;
|
||||
return -1;
|
||||
}
|
||||
|
||||
std::thread t1(Preview, std::ref(dataloader), std::ref(tracker));
|
||||
// std::this_thread::sleep_for(std::chrono::seconds(5));
|
||||
std::thread t2(Track, std::ref(dataloader), std::ref(tracker));
|
||||
#ifndef _WIN32
|
||||
setThreadName(t1, "PreviewThread");
|
||||
setThreadName(t2, "TrackerThread");
|
||||
#endif
|
||||
t1.join();
|
||||
t2.join();
|
||||
std::thread t(Track, std::ref(dataloader), std::ref(*tracker));
|
||||
setThreadName("PreviewThread");
|
||||
Preview(dataloader, *tracker);
|
||||
t.join();
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -1,23 +0,0 @@
|
|||
#ifndef SUPPORT_DEMO_UTILS_CHECK_UTILS_H_
|
||||
#define SUPPORT_DEMO_UTILS_CHECK_UTILS_H_
|
||||
#include <cstdlib>
|
||||
#include <fstream>
|
||||
#include <iostream>
|
||||
#include <string>
|
||||
|
||||
static bool checkImageFileExtension(const std::string& filename) {
|
||||
size_t pos = filename.rfind('.');
|
||||
if (filename.empty()) {
|
||||
std::cout << "[ ERROR ] The Image file path is empty" << std::endl;
|
||||
return false;
|
||||
}
|
||||
if (pos == std::string::npos) return false;
|
||||
std::string ext = filename.substr(pos + 1);
|
||||
if (ext == "jpeg" || ext == "jpg" || ext == "png") {
|
||||
return true;
|
||||
} else {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
#endif // SUPPORT_DEMO_UTILS_CHECK_UTILS_H_
|
|
@ -7,11 +7,11 @@
|
|||
#include "opencv2/opencv.hpp"
|
||||
#include "task/vision/image_classification_types.h"
|
||||
|
||||
class imageClassificationTask {
|
||||
class ImageClassificationTask {
|
||||
public:
|
||||
imageClassificationTask(const std::string& filePath,
|
||||
const std::string& labelFilepath);
|
||||
~imageClassificationTask() = default;
|
||||
explicit ImageClassificationTask(const std::string& config_file_path);
|
||||
explicit ImageClassificationTask(const ImageClassificationOption& option);
|
||||
~ImageClassificationTask() = default;
|
||||
ImageClassificationResult Classify(const cv::Mat& img_raw);
|
||||
int getInitFlag();
|
||||
|
||||
|
|
|
@ -9,4 +9,12 @@ struct ImageClassificationResult {
|
|||
int label;
|
||||
float score;
|
||||
};
|
||||
|
||||
struct ImageClassificationOption {
|
||||
std::string model_path;
|
||||
std::string label_path;
|
||||
int intra_threads_num = 2;
|
||||
int inter_threads_num = 2;
|
||||
};
|
||||
|
||||
#endif // SUPPORT_INCLUDE_TASK_VISION_IMAGE_CLASSIFICATION_TYPES_H_
|
||||
|
|
|
@ -8,13 +8,12 @@
|
|||
#include "opencv2/opencv.hpp"
|
||||
#include "task/vision/object_detection_types.h"
|
||||
|
||||
class objectDetectionTask {
|
||||
class ObjectDetectionTask {
|
||||
public:
|
||||
objectDetectionTask(const std::string &filePath,
|
||||
const std::string &labelFilepath);
|
||||
explicit objectDetectionTask(const std::string &filePath);
|
||||
~objectDetectionTask() = default;
|
||||
ObjectDetectionResult Detect(const cv::Mat &raw_img);
|
||||
explicit ObjectDetectionTask(const std::string &config_file_path);
|
||||
explicit ObjectDetectionTask(const ObjectDetectionOption &option);
|
||||
~ObjectDetectionTask() = default;
|
||||
ObjectDetectionResult Detect(const cv::Mat &img_raw);
|
||||
ObjectDetectionResult Detect(
|
||||
const std::vector<std::vector<float>> &input_tensors,
|
||||
const int img_height, const int img_width);
|
||||
|
|
|
@ -3,6 +3,7 @@
|
|||
|
||||
#include <chrono>
|
||||
#include <limits> // for numeric_limits<>
|
||||
#include <string>
|
||||
#include <type_traits>
|
||||
#include <vector>
|
||||
|
||||
|
@ -27,15 +28,14 @@ struct BoundingBoxType {
|
|||
value_type x2;
|
||||
value_type y2;
|
||||
score_type score;
|
||||
const char *label_text;
|
||||
const char* label_text;
|
||||
unsigned int label; // for general object detection.
|
||||
bool flag; // future use.
|
||||
// convert type.
|
||||
template <typename O1, typename O2 = score_type>
|
||||
BoundingBoxType<O1, O2> convert_type() const;
|
||||
|
||||
template <typename O1, typename O2 = score_type>
|
||||
value_type iou_of(const BoundingBoxType<O1, O2> &other) const;
|
||||
value_type iou_of(const BoundingBoxType<O1, O2>& other) const;
|
||||
|
||||
value_type width() const;
|
||||
|
||||
|
@ -58,8 +58,7 @@ struct BoundingBoxType {
|
|||
y2(static_cast<value_type>(0)),
|
||||
score(static_cast<score_type>(0)),
|
||||
label_text(nullptr),
|
||||
label(0),
|
||||
flag(false) {
|
||||
label(0) {
|
||||
__assert_type<value_type, score_type>();
|
||||
}
|
||||
}; // End BoundingBox.
|
||||
|
@ -84,14 +83,13 @@ inline BoundingBoxType<O1, O2> BoundingBoxType<T1, T2>::convert_type() const {
|
|||
other.score = static_cast<other_score_type>(score);
|
||||
other.label_text = label_text;
|
||||
other.label = label;
|
||||
other.flag = flag;
|
||||
return other;
|
||||
}
|
||||
|
||||
template <typename T1, typename T2>
|
||||
template <typename O1, typename O2>
|
||||
inline typename BoundingBoxType<T1, T2>::value_type
|
||||
BoundingBoxType<T1, T2>::iou_of(const BoundingBoxType<O1, O2> &other) const {
|
||||
BoundingBoxType<T1, T2>::iou_of(const BoundingBoxType<O1, O2>& other) const {
|
||||
BoundingBoxType<value_type, score_type> tbox =
|
||||
other.template convert_type<value_type, score_type>();
|
||||
value_type inner_x1 = x1 > tbox.x1 ? x1 : tbox.x1;
|
||||
|
@ -159,7 +157,18 @@ BoundingBoxType<T1, T2>::area() const {
|
|||
|
||||
struct ObjectDetectionResult {
|
||||
std::vector<Boxi> result_bboxes;
|
||||
std::chrono::time_point< std::chrono::steady_clock > timestamp;
|
||||
std::chrono::time_point<std::chrono::steady_clock> timestamp;
|
||||
};
|
||||
|
||||
struct ObjectDetectionOption {
|
||||
std::string model_path;
|
||||
std::string label_path;
|
||||
int intra_threads_num = 2;
|
||||
int inter_threads_num = 2;
|
||||
float score_threshold = -1.f;
|
||||
float nms_threshold = -1.f;
|
||||
std::vector<int> class_name_whitelist;
|
||||
std::vector<int> class_name_blacklist;
|
||||
};
|
||||
|
||||
#endif // SUPPORT_INCLUDE_TASK_VISION_OBJECT_DETECTION_TYPES_H_
|
||||
|
|
|
@ -8,12 +8,13 @@
|
|||
#include "task/vision/object_detection_types.h"
|
||||
#include "task/vision/pose_estimation_types.h"
|
||||
|
||||
class poseEstimationTask {
|
||||
class PoseEstimationTask {
|
||||
public:
|
||||
explicit poseEstimationTask(const std::string &filePath);
|
||||
~poseEstimationTask() = default;
|
||||
explicit PoseEstimationTask(const std::string &config_file_path);
|
||||
explicit PoseEstimationTask(const PoseEstimationOption &option);
|
||||
~PoseEstimationTask() = default;
|
||||
int getInitFlag();
|
||||
PoseEstimationResult Estimate(const cv::Mat &raw_img, const Boxi &box);
|
||||
PoseEstimationResult Estimate(const cv::Mat &img_raw, const Boxi &box);
|
||||
|
||||
private:
|
||||
class impl;
|
||||
|
|
|
@ -1,8 +1,9 @@
|
|||
#ifndef SUPPORT_INCLUDE_TASK_VISION_POSE_ESTIMATION_TYPES_H_
|
||||
#define SUPPORT_INCLUDE_TASK_VISION_POSE_ESTIMATION_TYPES_H_
|
||||
|
||||
#include <chrono> //for chrono
|
||||
#include <vector> //for vector
|
||||
#include <chrono> // for chrono
|
||||
#include <string> // for string
|
||||
#include <vector> // for vector
|
||||
|
||||
struct PosePoint {
|
||||
int x;
|
||||
|
@ -20,7 +21,13 @@ typedef PosePoint Vector2D;
|
|||
|
||||
struct PoseEstimationResult {
|
||||
std::vector<PosePoint> result_points;
|
||||
std::chrono::time_point< std::chrono::steady_clock > timestamp;
|
||||
std::chrono::time_point<std::chrono::steady_clock> timestamp;
|
||||
};
|
||||
|
||||
struct PoseEstimationOption {
|
||||
std::string model_path;
|
||||
int intra_threads_num = 2;
|
||||
int inter_threads_num = 2;
|
||||
};
|
||||
|
||||
#endif // SUPPORT_INCLUDE_TASK_VISION_POSE_ESTIMATION_TYPES_H_
|
||||
|
|
|
@ -1,16 +1,24 @@
|
|||
#ifndef SUPPORT_INCLUDE_UTILS_UTILS_H_
|
||||
#define SUPPORT_INCLUDE_UTILS_UTILS_H_
|
||||
|
||||
#include <sys/stat.h>
|
||||
|
||||
#include <algorithm>
|
||||
#include <string>
|
||||
|
||||
#include "opencv2/opencv.hpp"
|
||||
#include "task/vision/image_classification_types.h"
|
||||
#include "task/vision/object_detection_types.h"
|
||||
#include "task/vision/pose_estimation_types.h"
|
||||
|
||||
bool exists_check(const std::string &name);
|
||||
|
||||
void resize_unscale(const cv::Mat &mat, cv::Mat &mat_rs, int target_height,
|
||||
void resizeUnscale(const cv::Mat &mat, cv::Mat &mat_rs, int target_height,
|
||||
int target_width);
|
||||
|
||||
int configToOption(const std::string &config_file_path,
|
||||
PoseEstimationOption &option);
|
||||
|
||||
int configToOption(const std::string &config_file_path,
|
||||
ObjectDetectionOption &option);
|
||||
|
||||
int configToOption(const std::string &config_file_path,
|
||||
ImageClassificationOption &option);
|
||||
|
||||
#endif // SUPPORT_INCLUDE_UTILS_UTILS_H_
|
||||
|
|
|
@ -25,28 +25,69 @@
|
|||
# Brief: Bianbu AI Auto Toolkit.
|
||||
#
|
||||
|
||||
#set -e
|
||||
set -e
|
||||
#set -x
|
||||
entry_point=bianbu-ai-support # $(basename ${BASH_SOURCE[0]})
|
||||
app_data_dir=/usr/share/applications
|
||||
|
||||
cmd=$1
|
||||
shift
|
||||
|
||||
#xdg-user-dirs-update
|
||||
test -f ${XDG_CONFIG_HOME:-~/.config}/user-dirs.dirs && . ${XDG_CONFIG_HOME:-~/.config}/user-dirs.dirs
|
||||
CONFIG_HOME=${XDG_CONFIG_HOME:-~/.config}
|
||||
test -f ${CONFIG_HOME}/user-dirs.dirs && . ${CONFIG_HOME}/user-dirs.dirs
|
||||
|
||||
function auto_desktop() {
|
||||
desktop=$1
|
||||
shortcut=$(basename $desktop)
|
||||
if [[ ! -f ${XDG_CONFIG_HOME:-~/.config}/bianbu-ai-support/applications/${shortcut%.desktop}-initial-setup-done ]]; then
|
||||
function auto_config_desktop() {
|
||||
# desktop dir not exist or missing shortcut parameter
|
||||
if [ ! -d "${XDG_DESKTOP_DIR}" ] || [ $# -ne 1 ]; then return; fi
|
||||
if [ ! $HOME ]; then return; fi
|
||||
|
||||
local desktop=$1
|
||||
local shortcut=$(basename $desktop)
|
||||
local init_setup=${CONFIG_HOME}/${entry_point}/applications/${shortcut%.desktop}-initial-setup-done
|
||||
local action="create" # "update"
|
||||
|
||||
if [ -f ${init_setup} ] && [ "$(cat ${init_setup} | grep ${XDG_DESKTOP_DIR})" ]; then
|
||||
# i.e. desktop is already configured(initial-setup-done)
|
||||
#return
|
||||
if [ -e ${XDG_DESKTOP_DIR}/${shortcut} ]; then
|
||||
if cmp -s "${XDG_DESKTOP_DIR}/${shortcut}" "${app_data_dir}/${shortcut}"; then
|
||||
# desktop exist and exactly same
|
||||
if [ ! "$(gio info ${XDG_DESKTOP_DIR}/${shortcut} | grep 'metadata::trusted: true')" ]; then
|
||||
gio set ${XDG_DESKTOP_DIR}/${shortcut} metadata::trusted true
|
||||
fi
|
||||
return
|
||||
fi
|
||||
# i.e. desktop exist but need to be updated
|
||||
action="update"
|
||||
else
|
||||
# desktop has been removed
|
||||
return
|
||||
fi
|
||||
fi
|
||||
|
||||
# install icon if not exist or need update
|
||||
if [ ! -f ${XDG_DESKTOP_DIR}/${shortcut} ] || [ "${action}" = "update" ]; then
|
||||
xdg-desktop-icon install --novendor ${desktop}
|
||||
gio set $(xdg-user-dir DESKTOP)/${shortcut} metadata::trusted true
|
||||
if test $? -eq 0; then
|
||||
chmod +x ${XDG_DESKTOP_DIR}/${shortcut}
|
||||
else
|
||||
action="${action} failed"
|
||||
fi
|
||||
if [[ -f $(xdg-user-dir DESKTOP)/${shortcut} ]]; then
|
||||
mkdir -p ${XDG_CONFIG_HOME:-~/.config}/bianbu-ai-support/applications
|
||||
touch ${XDG_CONFIG_HOME:-~/.config}/bianbu-ai-support/applications/${shortcut%.desktop}-initial-setup-done
|
||||
fi
|
||||
if [ -f ${XDG_DESKTOP_DIR}/${shortcut} ]; then
|
||||
if [ ! "$(gio info ${XDG_DESKTOP_DIR}/${shortcut} | grep 'metadata::trusted: true')" ]; then
|
||||
gio set ${XDG_DESKTOP_DIR}/${shortcut} metadata::trusted true
|
||||
fi
|
||||
fi
|
||||
# update init setup info
|
||||
mkdir -p ${CONFIG_HOME}/${entry_point}/applications
|
||||
echo ${XDG_DESKTOP_DIR}/${shortcut} "auto" ${action} $(date) >> ${init_setup}
|
||||
}
|
||||
|
||||
if [[ $cmd =~ "desktop" ]]; then
|
||||
auto_desktop $@
|
||||
if [[ $cmd == "desktop" ]]; then
|
||||
auto_config_desktop $@
|
||||
else
|
||||
echo "[ERROR] Unknown command: ${cmd}"
|
||||
exit 1
|
||||
fi
|
||||
|
|
|
@ -5,5 +5,5 @@
|
|||
"label_path": "/usr/share/ai-support/models/coco.txt",
|
||||
"intra_threads_num": 2,
|
||||
"graph_optimization_level": "",
|
||||
"score_threshold": 0.3
|
||||
"score_threshold": 0.39
|
||||
}
|
|
@ -1,8 +1,10 @@
|
|||
[Desktop Entry]
|
||||
Type=Application
|
||||
#Encoding=UTF-8
|
||||
Version=1.0.3
|
||||
Version=1.0.12
|
||||
Name=object-detection
|
||||
Exec=/usr/bin/detection_stream_demo /usr/share/ai-support/models/yolov6.json 0 cameraId
|
||||
Name[en]=object-detection
|
||||
Name[zh_CN]=目标检测
|
||||
Exec=/usr/bin/detection_stream_demo /usr/share/ai-support/models/yolov6.json 0 camera_id
|
||||
Terminal=true
|
||||
Icon=/usr/share/icons/bianbu-ai-demo/object-detection.png
|
||||
|
|
|
@ -1,8 +1,10 @@
|
|||
[Desktop Entry]
|
||||
Type=Application
|
||||
#Encoding=UTF-8
|
||||
Version=1.0.3
|
||||
Version=1.0.12
|
||||
Name=pose-tracker
|
||||
Name[en]=pose-tracker
|
||||
Name[zh_CN]=姿态追踪
|
||||
Exec=/usr/bin/tracker_stream_demo /usr/share/ai-support/models/yolov6.json /usr/share/ai-support/models/rtmpose.json 0 cameraId
|
||||
Terminal=true
|
||||
Icon=/usr/share/icons/bianbu-ai-demo/pose-tracker.png
|
||||
|
|
|
@ -1,26 +1,22 @@
|
|||
#include "src/core/engine.h"
|
||||
|
||||
#ifdef _WIN32
|
||||
#include <codecvt>
|
||||
inline std::wstring to_wstring(const std::string& input) {
|
||||
std::wstring_convert<std::codecvt_utf8<wchar_t>> converter;
|
||||
return converter.from_bytes(input);
|
||||
}
|
||||
#endif /* _WIN32 */
|
||||
#include "src/utils/utils.h"
|
||||
#include "utils/utils.h"
|
||||
|
||||
int Engine::Init(std::string instanceName, std::string modelFilepath) {
|
||||
return ortwrapper_.Init(instanceName,
|
||||
int Engine::Init(const std::string &instance_name,
|
||||
const std::string &model_file_path,
|
||||
const int intra_threads_num, const int inter_threads_num) {
|
||||
return ortwrapper_.Init(instance_name,
|
||||
#ifdef _WIN32
|
||||
to_wstring(modelFilepath)
|
||||
to_wstring(model_file_path), intra_threads_num,
|
||||
inter_threads_num
|
||||
#else
|
||||
modelFilepath
|
||||
model_file_path, intra_threads_num, inter_threads_num
|
||||
#endif /* _WIN32 */
|
||||
);
|
||||
}
|
||||
|
||||
int Engine::Init(json config) { return ortwrapper_.Init(config); }
|
||||
|
||||
std::vector<Ort::Value> Engine::Interpreter(
|
||||
std::vector<std::vector<float>>& input_values_handler) {
|
||||
std::vector<std::vector<float>> &input_values_handler) {
|
||||
return ortwrapper_.Invoke(input_values_handler);
|
||||
}
|
||||
|
|
|
@ -6,14 +6,12 @@
|
|||
|
||||
#include "opencv2/opencv.hpp"
|
||||
#include "src/core/ort_wrapper.h"
|
||||
#include "src/utils/json.hpp"
|
||||
using json = nlohmann::json;
|
||||
class Engine {
|
||||
public:
|
||||
Engine() { OrtWrapper ortwrapper_; }
|
||||
~Engine() {}
|
||||
int Init(std::string instanceName, std::string modelFilepath);
|
||||
int Init(json config);
|
||||
int Init(const std::string &instance_name, const std::string &model_file_path,
|
||||
const int intra_threads_num = 2, const int inter_threads_num = 2);
|
||||
size_t GetInputCount() { return ortwrapper_.GetInputCount(); }
|
||||
size_t GetOutputCount() { return ortwrapper_.GetOutputCount(); }
|
||||
std::vector<std::vector<int64_t>> GetInputDims() {
|
||||
|
|
|
@ -1,110 +1,59 @@
|
|||
#include "src/core/ort_wrapper.h"
|
||||
|
||||
#include <stdlib.h> // for: getenv atoi
|
||||
|
||||
#include <memory>
|
||||
#include <utility> // for move
|
||||
|
||||
#ifdef _WIN32
|
||||
#include "src/utils/utils.h"
|
||||
#endif /* _WIN32 */
|
||||
#include "utils/time.h"
|
||||
#ifdef HAS_SPACEMIT_EP
|
||||
#include "spacemit_ort_env.h"
|
||||
#endif
|
||||
|
||||
int OrtWrapper::Init(std::string instanceName,
|
||||
std::basic_string<ORTCHAR_T> modelFilepath) {
|
||||
int OrtWrapper::Init(const std::string &instance_name,
|
||||
const std::basic_string<ORTCHAR_T> &model_file_path,
|
||||
const int intra_threads_num, const int inter_threads_num) {
|
||||
std::unique_ptr<Ort::Env> env(new Ort::Env(
|
||||
OrtLoggingLevel::ORT_LOGGING_LEVEL_WARNING, instanceName.c_str()));
|
||||
OrtLoggingLevel::ORT_LOGGING_LEVEL_WARNING, instance_name.c_str()));
|
||||
// Creation: The Ort::Session is created here
|
||||
env_ = std::move(env);
|
||||
sessionOptions_.SetIntraOpNumThreads(2);
|
||||
sessionOptions_.AddConfigEntry("session.intra_op.allow_spinning", "0");
|
||||
sessionOptions_.SetInterOpNumThreads(2);
|
||||
sessionOptions_.AddConfigEntry("session.inter_op.allow_spinning", "0");
|
||||
sessionOptions_.SetIntraOpNumThreads(intra_threads_num);
|
||||
sessionOptions_.SetInterOpNumThreads(inter_threads_num);
|
||||
#ifdef HAS_SPACEMIT_EP
|
||||
const char *disable_spacemit_ep = getenv("SUPPORT_DISABLE_SPACEMIT_EP");
|
||||
if (disable_spacemit_ep != nullptr && strcmp(disable_spacemit_ep, "1") == 0) {
|
||||
std::cout << "Disable spacemit ep now" << std::endl;
|
||||
} else {
|
||||
SessionOptionsSpaceMITEnvInit(sessionOptions_);
|
||||
// auto providers = Ort::GetAvailableProviders();
|
||||
std::cout << "Enable spacemit ep now" << std::endl;
|
||||
}
|
||||
#else
|
||||
std::cout << "Disable spacemit ep now" << std::endl;
|
||||
std::cout << "Unsupport spacemit ep now" << std::endl;
|
||||
#endif
|
||||
// Sets graph optimization level
|
||||
// Available levels are
|
||||
// ORT_DISABLE_ALL -> To disable all optimizations
|
||||
// ORT_ENABLE_BASIC -> To enable basic optimizations (Such as redundant node
|
||||
// removals) ORT_ENABLE_EXTENDED -> To enable extended optimizations
|
||||
// (Includes level 1 + more complex optimizations like node fusions)
|
||||
// ORT_ENABLE_ALL -> To Enable All possible optimizations
|
||||
// sessionOptions_.SetGraphOptimizationLevel(
|
||||
// GraphOptimizationLevel::ORT_DISABLE_ALL);
|
||||
std::unique_ptr<Ort::Session> session(
|
||||
new Ort::Session(*env_, modelFilepath.c_str(), sessionOptions_));
|
||||
session_ = std::move(session);
|
||||
return 0;
|
||||
}
|
||||
|
||||
int OrtWrapper::Init(json config) {
|
||||
std::string instanceName;
|
||||
if (config.contains("instance_name")) {
|
||||
instanceName = config["instance_name"];
|
||||
}
|
||||
std::basic_string<ORTCHAR_T> modelFilepath = config["model_path"];
|
||||
std::unique_ptr<Ort::Env> env(new Ort::Env(
|
||||
OrtLoggingLevel::ORT_LOGGING_LEVEL_WARNING, instanceName.c_str()));
|
||||
// Creation: The Ort::Session is created here
|
||||
env_ = std::move(env);
|
||||
if (!config.contains("disable_spcacemit_ep") ||
|
||||
config["disable_spcacemit_ep"] == false) {
|
||||
#ifdef HAS_SPACEMIT_EP
|
||||
SessionOptionsSpaceMITEnvInit(sessionOptions_);
|
||||
// auto providers = Ort::GetAvailableProviders();
|
||||
std::cout << "Enable spacemit ep now" << std::endl;
|
||||
const char *opt_model_path = getenv("SUPPORT_OPT_MODEL_PATH");
|
||||
if (opt_model_path != nullptr) {
|
||||
#ifdef _WIN32
|
||||
std::wstring wstr = to_wstring(opt_model_path);
|
||||
sessionOptions_.SetOptimizedModelFilePath(wstr.c_str());
|
||||
#else
|
||||
std::cout << "[Warning] Unsupport spacemit ep now" << std::endl;
|
||||
#endif
|
||||
} else {
|
||||
std::cout << "Disable spacemit ep now" << std::endl;
|
||||
sessionOptions_.SetOptimizedModelFilePath(opt_model_path);
|
||||
#endif /* _WIN32 */
|
||||
}
|
||||
if (config.contains("intra_threads_num")) {
|
||||
int intraThreadsnum = config["intra_threads_num"];
|
||||
sessionOptions_.SetIntraOpNumThreads(intraThreadsnum);
|
||||
sessionOptions_.AddConfigEntry("session.intra_op.allow_spinning", "0");
|
||||
} else {
|
||||
sessionOptions_.SetIntraOpNumThreads(4);
|
||||
sessionOptions_.AddConfigEntry("session.intra_op.allow_spinning", "0");
|
||||
}
|
||||
sessionOptions_.SetInterOpNumThreads(1);
|
||||
sessionOptions_.AddConfigEntry("session.inter_op.allow_spinning", "0");
|
||||
if (config.contains("profiling_projects")) {
|
||||
std::basic_string<ORTCHAR_T> profiling_projects =
|
||||
config["profiling_projects"];
|
||||
if (profiling_projects.size()) {
|
||||
sessionOptions_.EnableProfiling(profiling_projects.c_str());
|
||||
}
|
||||
}
|
||||
if (config.contains("opt_model_path")) {
|
||||
std::basic_string<ORTCHAR_T> opt_model_path = config["opt_model_path"];
|
||||
if (opt_model_path.size()) {
|
||||
sessionOptions_.SetOptimizedModelFilePath(opt_model_path.c_str());
|
||||
}
|
||||
}
|
||||
if (config.contains("log_level")) {
|
||||
int log_level = config["log_level"];
|
||||
if (log_level >= 0 && log_level <= 4) {
|
||||
sessionOptions_.SetLogSeverityLevel(log_level);
|
||||
}
|
||||
}
|
||||
// Sets graph optimization level
|
||||
// Available levels are
|
||||
// ORT_DISABLE_ALL -> To disable all optimizations
|
||||
// ORT_ENABLE_BASIC -> To enable basic optimizations (Such as redundant node
|
||||
// removals) ORT_ENABLE_EXTENDED -> To enable extended optimizations
|
||||
// (Includes level 1 + more complex optimizations like node fusions)
|
||||
// ORT_ENABLE_ALL -> To Enable All possible optimizations
|
||||
if (config.contains("graph_optimization_level")) {
|
||||
if (config["graph_optimization_level"] == "ort_disable_all") {
|
||||
const char *graph_optimization_level =
|
||||
getenv("SUPPORT_GRAPH_OPTIMIZATION_LEVEL");
|
||||
if (graph_optimization_level != nullptr) {
|
||||
if (strcmp(graph_optimization_level, "ort_disable_all") == 0) {
|
||||
sessionOptions_.SetGraphOptimizationLevel(
|
||||
GraphOptimizationLevel::ORT_DISABLE_ALL);
|
||||
} else if (config["graph_optimization_level"] == "ort_enable_basic") {
|
||||
} else if (strcmp(graph_optimization_level, "ort_enable_basic") == 0) {
|
||||
sessionOptions_.SetGraphOptimizationLevel(
|
||||
GraphOptimizationLevel::ORT_ENABLE_BASIC);
|
||||
} else if (config["graph_optimization_level"] == "ort_enable_extended") {
|
||||
} else if (strcmp(graph_optimization_level, "ort_enable_extended") == 0) {
|
||||
sessionOptions_.SetGraphOptimizationLevel(
|
||||
GraphOptimizationLevel::ORT_ENABLE_EXTENDED);
|
||||
} else {
|
||||
|
@ -112,8 +61,33 @@ int OrtWrapper::Init(json config) {
|
|||
GraphOptimizationLevel::ORT_ENABLE_ALL);
|
||||
}
|
||||
}
|
||||
std::unique_ptr<Ort::Session> session(
|
||||
new Ort::Session(*env_, modelFilepath.c_str(), sessionOptions_));
|
||||
const char *profiling_projects = getenv("SUPPORT_PROFILING_PROJECTS");
|
||||
if (profiling_projects != nullptr) {
|
||||
#ifdef _WIN32
|
||||
std::wstring wstr = to_wstring(profiling_projects);
|
||||
sessionOptions_.EnableProfiling(wstr.c_str());
|
||||
#else
|
||||
sessionOptions_.EnableProfiling(profiling_projects);
|
||||
#endif /* _WIN32 */
|
||||
}
|
||||
|
||||
const char *log_level_str = getenv("SUPPORT_LOG_LEVEL");
|
||||
if (log_level_str != nullptr) {
|
||||
int log_level = atoi(log_level_str);
|
||||
if (log_level >= 0 && log_level <= 4) {
|
||||
sessionOptions_.SetLogSeverityLevel(log_level);
|
||||
}
|
||||
}
|
||||
std::unique_ptr<Ort::Session> session;
|
||||
try {
|
||||
session = std::make_unique<Ort::Session>(*env_, model_file_path.c_str(),
|
||||
sessionOptions_);
|
||||
} catch (Ort::Exception &e) {
|
||||
std::cout << "[ ERROR ] Init failed, onnxruntime error code = "
|
||||
<< e.GetOrtErrorCode() << ", error message: " << e.what()
|
||||
<< std::endl;
|
||||
return -1;
|
||||
}
|
||||
session_ = std::move(session);
|
||||
return 0;
|
||||
}
|
||||
|
@ -202,7 +176,7 @@ std::vector<Ort::Value> OrtWrapper::Invoke(
|
|||
Ort::MemoryInfo memoryInfo = Ort::MemoryInfo::CreateCpu(
|
||||
OrtAllocatorType::OrtArenaAllocator, OrtMemType::OrtMemTypeDefault);
|
||||
|
||||
for (int i = 0; i < static_cast<int>(num_inputs); i++) {
|
||||
for (size_t i = 0; i < num_inputs; i++) {
|
||||
input_tensors.push_back(Ort::Value::CreateTensor<float>(
|
||||
memoryInfo, input_tensor_values[i].data(), input_tensor_size[i],
|
||||
input_node_dims[i].data(), input_node_dims[i].size()));
|
||||
|
|
|
@ -10,16 +10,14 @@
|
|||
|
||||
#include "onnxruntime_cxx_api.h"
|
||||
#include "opencv2/opencv.hpp"
|
||||
#include "src/utils/json.hpp"
|
||||
using json = nlohmann::json;
|
||||
|
||||
class OrtWrapper {
|
||||
public:
|
||||
OrtWrapper() {}
|
||||
~OrtWrapper() {}
|
||||
int Init(std::string instanceName,
|
||||
std::basic_string<ORTCHAR_T> modelFilepath);
|
||||
int Init(json config);
|
||||
int Init(const std::string& instance_name,
|
||||
const std::basic_string<ORTCHAR_T>& model_file_path,
|
||||
const int intra_threads_num, const int inter_threads_num);
|
||||
size_t GetInputCount() { return session_->GetInputCount(); }
|
||||
size_t GetOutputCount() { return session_->GetOutputCount(); }
|
||||
std::vector<std::vector<int64_t>> GetInputDims();
|
||||
|
|
|
@ -25,7 +25,7 @@ ImageClassificationResult ClassificationPostprocessor::Postprocess(
|
|||
which are usually the buffer from std::vector instances. */
|
||||
Ort::Value &pred = output_tensors.at(0);
|
||||
const float *output_pred_ptr = pred.GetTensorData<float>();
|
||||
for (int i = 0; i < static_cast<int>(labels.size()); i++) {
|
||||
for (size_t i = 0; i < labels.size(); i++) {
|
||||
activation = output_pred_ptr[i];
|
||||
expSum += std::exp(activation);
|
||||
if (activation > maxActivation) {
|
||||
|
|
|
@ -75,7 +75,6 @@ void DetectionPostprocessor::Postprocess(
|
|||
box.score = conf;
|
||||
box.label = label;
|
||||
box.label_text = labels[label].c_str();
|
||||
box.flag = true;
|
||||
bbox_collection.push_back(box);
|
||||
count += 1; // limit boxes for nms.
|
||||
if (count > max_nms) break;
|
||||
|
@ -98,7 +97,6 @@ void DetectionPostprocessor::Postprocess(
|
|||
result_box.label = detected_boxes[i].label;
|
||||
result_box.score = detected_boxes[i].score;
|
||||
result_box.label_text = detected_boxes[i].label_text;
|
||||
result_box.flag = detected_boxes[i].flag;
|
||||
result_boxes.push_back(result_box);
|
||||
}
|
||||
}
|
||||
|
@ -111,7 +109,7 @@ void DetectionPostprocessor::PostprocessYolov6(
|
|||
TimeWatcher t("|-- Postprocess");
|
||||
#endif
|
||||
if (score_threshold == -1.f) {
|
||||
score_threshold = 0.59f;
|
||||
score_threshold = 0.39f;
|
||||
}
|
||||
std::vector<Boxf> bbox_collection;
|
||||
bbox_collection.clear();
|
||||
|
@ -148,7 +146,6 @@ void DetectionPostprocessor::PostprocessYolov6(
|
|||
}
|
||||
result_box.label = output_labels.At<int>({0, i});
|
||||
result_box.label_text = labels[result_box.label].c_str();
|
||||
result_box.flag = true;
|
||||
result_boxes.push_back(result_box);
|
||||
}
|
||||
}
|
||||
|
@ -222,7 +219,6 @@ void DetectionPostprocessor::PostprocessNanoDetPlus(
|
|||
box.label = label;
|
||||
box.label_text = labels[label].c_str();
|
||||
box.score = cls_conf;
|
||||
box.flag = true;
|
||||
box.x1 = ((ct_x - dis_pred[0]) * strides[i] - dw) / resize_ratio;
|
||||
box.x1 = std::max(box.x1, .0f);
|
||||
box.y1 = ((ct_y - dis_pred[1]) * strides[i] - dh) / resize_ratio;
|
||||
|
@ -250,7 +246,6 @@ void DetectionPostprocessor::PostprocessNanoDetPlus(
|
|||
result_box.label = detected_boxes[i].label;
|
||||
result_box.score = detected_boxes[i].score;
|
||||
result_box.label_text = detected_boxes[i].label_text;
|
||||
result_box.flag = detected_boxes[i].flag;
|
||||
result_boxes.push_back(result_box);
|
||||
}
|
||||
}
|
||||
|
@ -294,7 +289,6 @@ void DetectionPostprocessor::PostprocessRtmDet(
|
|||
int classes = label_result[i];
|
||||
if (classes != 0) continue;
|
||||
Boxf box;
|
||||
box.flag = true;
|
||||
box.score = det_result[i * reshap_dims + 4];
|
||||
if (box.score < score_threshold) continue; // filter
|
||||
box.x1 = (det_result[i * reshap_dims] - dw) / resize_ratio;
|
||||
|
@ -324,7 +318,6 @@ void DetectionPostprocessor::PostprocessRtmDet(
|
|||
result_box.label = detected_boxes[i].label;
|
||||
result_box.score = detected_boxes[i].score;
|
||||
result_box.label_text = detected_boxes[i].label_text;
|
||||
result_box.flag = detected_boxes[i].flag;
|
||||
result_boxes.push_back(result_box);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -14,7 +14,7 @@ void DetectionPreprocessor::PreprocessNanoDetPlus(
|
|||
TimeWatcher t("| |-- Resize unscale");
|
||||
#endif
|
||||
if (input_height != mat.cols || input_width != mat.rows) {
|
||||
resize_unscale(mat, resizedImage, input_height, input_width);
|
||||
resizeUnscale(mat, resizedImage, input_height, input_width);
|
||||
} else {
|
||||
resizedImage = mat;
|
||||
}
|
||||
|
@ -62,7 +62,7 @@ void DetectionPreprocessor::Preprocess(
|
|||
TimeWatcher t("| |-- Resize unscale");
|
||||
#endif
|
||||
if (input_height != mat.cols || input_width != mat.rows) {
|
||||
resize_unscale(mat, resizedImageBGR, input_height, input_width);
|
||||
resizeUnscale(mat, resizedImageBGR, input_height, input_width);
|
||||
} else {
|
||||
resizedImageBGR = mat;
|
||||
}
|
||||
|
@ -85,7 +85,7 @@ void DetectionPreprocessor::Preprocess(
|
|||
TimeWatcher t("| |-- Resize unscale");
|
||||
#endif
|
||||
if (input_height != mat.cols || input_width != mat.rows) {
|
||||
resize_unscale(mat, resizedImageBGR, input_height, input_width);
|
||||
resizeUnscale(mat, resizedImageBGR, input_height, input_width);
|
||||
} else {
|
||||
resizedImageBGR = mat;
|
||||
}
|
||||
|
|
|
@ -62,7 +62,7 @@ void EstimationPostprocessor::Postprocess(
|
|||
// anti affine transformation to obtain the coordinates on the original
|
||||
// picture
|
||||
cv::Mat affine_transform_reverse = crop_result_pair.second;
|
||||
for (int i = 0; i < static_cast<int>(result_points.size()); ++i) {
|
||||
for (size_t i = 0; i < result_points.size(); ++i) {
|
||||
cv::Mat origin_point_Mat = cv::Mat::ones(3, 1, CV_64FC1);
|
||||
origin_point_Mat.at<double>(0, 0) = result_points[i].x;
|
||||
origin_point_Mat.at<double>(1, 0) = result_points[i].y;
|
||||
|
|
|
@ -60,10 +60,6 @@ std::pair<cv::Mat, cv::Mat> EstimationPreprocessor::CropImageByDetectBox(
|
|||
return result_pair;
|
||||
}
|
||||
|
||||
if (!box.flag) {
|
||||
return result_pair;
|
||||
}
|
||||
|
||||
// deep copy
|
||||
cv::Mat input_mat_copy;
|
||||
input_image.copyTo(input_mat_copy);
|
||||
|
|
|
@ -1,29 +1,38 @@
|
|||
#include "src/task/vision/imageclassification/image_classification.h"
|
||||
|
||||
#include "utils/time.h"
|
||||
#include <iostream>
|
||||
|
||||
int imageClassification::Init(const std::string modelFilepath,
|
||||
const std::string labelFilepath) {
|
||||
instanceName_ = "image-classification-inference";
|
||||
modelFilepath_ = modelFilepath;
|
||||
labelFilepath_ = labelFilepath;
|
||||
labels_ = readLabels(labelFilepath_);
|
||||
initFlag_ = GetEngine()->Init(instanceName_, modelFilepath_);
|
||||
return initFlag_;
|
||||
#include "utils/time.h"
|
||||
#include "utils/utils.h"
|
||||
|
||||
int ImageClassification::InitFromOption(
|
||||
const ImageClassificationOption &option) {
|
||||
option_ = option;
|
||||
init_flag_ = 1;
|
||||
instance_name_ = "image-classification-inference";
|
||||
labels_ = readLabels(option_.label_path);
|
||||
if (labels_.empty()) {
|
||||
std::cout << "[ ERROR ] label file is empty, init fail" << std::endl;
|
||||
return init_flag_;
|
||||
}
|
||||
init_flag_ =
|
||||
GetEngine()->Init(instance_name_, option_.model_path,
|
||||
option.intra_threads_num, option.inter_threads_num);
|
||||
return init_flag_;
|
||||
}
|
||||
|
||||
void imageClassification::Preprocess(const cv::Mat &img_raw) {
|
||||
void ImageClassification::Preprocess(const cv::Mat &img_raw) {
|
||||
auto input_dims = GetInputShape();
|
||||
preprocessor_.Preprocess(img_raw, input_dims, input_tensors_);
|
||||
}
|
||||
|
||||
ImageClassificationResult imageClassification::Postprocess() {
|
||||
ImageClassificationResult ImageClassification::Postprocess() {
|
||||
return postprocessor_.Postprocess(Infer(input_tensors_), labels_);
|
||||
}
|
||||
|
||||
ImageClassificationResult imageClassification::Classify(
|
||||
ImageClassificationResult ImageClassification::Classify(
|
||||
const cv::Mat &img_raw) {
|
||||
if (initFlag_ != 0) {
|
||||
if (init_flag_ != 0) {
|
||||
std::cout << "[ ERROR ] Init fail return empty result" << std::endl;
|
||||
ImageClassificationResult empty_result{"", -1, .0f};
|
||||
return empty_result;
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
#ifndef SUPPORT_SRC_TASK_VISION_IMAGECLASSIFICATION_IMAGE_CLASSIFICATION_H_
|
||||
#define SUPPORT_SRC_TASK_VISION_IMAGECLASSIFICATION_IMAGE_CLASSIFICATION_H_
|
||||
#ifndef SUPPORT_SRC_TASK_VISION_ImageClassification_IMAGE_CLASSIFICATION_H_
|
||||
#define SUPPORT_SRC_TASK_VISION_ImageClassification_IMAGE_CLASSIFICATION_H_
|
||||
|
||||
#include <cmath>
|
||||
#include <exception>
|
||||
|
@ -19,14 +19,14 @@
|
|||
#include "src/utils/utils.h"
|
||||
#include "task/vision/image_classification_types.h"
|
||||
|
||||
class imageClassification
|
||||
class ImageClassification
|
||||
: public BaseVisionTaskApi<ImageClassificationResult> {
|
||||
public:
|
||||
imageClassification() : BaseVisionTaskApi<ImageClassificationResult>() {
|
||||
initFlag_ = -1;
|
||||
ImageClassification() : BaseVisionTaskApi<ImageClassificationResult>() {
|
||||
init_flag_ = -1;
|
||||
}
|
||||
~imageClassification() {}
|
||||
int Init(const std::string modelFilepath, const std::string labelFilepath);
|
||||
~ImageClassification() {}
|
||||
int InitFromOption(const ImageClassificationOption& option);
|
||||
ImageClassificationResult Classify(const cv::Mat& img_raw);
|
||||
|
||||
protected:
|
||||
|
@ -36,14 +36,13 @@ class imageClassification
|
|||
private:
|
||||
ClassificationPreprocessor preprocessor_;
|
||||
ClassificationPostprocessor postprocessor_;
|
||||
std::string instanceName_;
|
||||
std::string modelFilepath_;
|
||||
ImageClassificationOption option_;
|
||||
std::string instance_name_;
|
||||
cv::Mat img_raw_;
|
||||
std::string labelFilepath_;
|
||||
std::vector<std::string> labels_;
|
||||
std::vector<Ort::Value> output_tensors_;
|
||||
std::vector<std::vector<float>> input_tensors_;
|
||||
int initFlag_ = false;
|
||||
int init_flag_;
|
||||
};
|
||||
|
||||
#endif // SUPPORT_SRC_TASK_VISION_IMAGECLASSIFICATION_IMAGE_CLASSIFICATION_H_
|
||||
#endif // SUPPORT_SRC_TASK_VISION_ImageClassification_IMAGE_CLASSIFICATION_H_
|
||||
|
|
|
@ -4,39 +4,35 @@
|
|||
#include "src/task/vision/imageclassification/image_classification.h"
|
||||
#include "src/utils/utils.h"
|
||||
|
||||
class imageClassificationTask::impl {
|
||||
class ImageClassificationTask::impl {
|
||||
public:
|
||||
std::unique_ptr<imageClassification> imageclassification_;
|
||||
std::unique_ptr<ImageClassification> imageclassification_;
|
||||
};
|
||||
|
||||
imageClassificationTask::imageClassificationTask(
|
||||
const std::string& filePath, const std::string& labelFilepath)
|
||||
ImageClassificationTask::ImageClassificationTask(
|
||||
const std::string& config_file_path)
|
||||
: pimpl_(std::make_unique<impl>()) {
|
||||
init_flag_ = -1;
|
||||
pimpl_->imageclassification_ =
|
||||
std::unique_ptr<imageClassification>(new imageClassification());
|
||||
if (!checkLabelFileExtension(labelFilepath)) {
|
||||
std::cout << "[ ERROR ] The LabelFilepath is not set correctly and the "
|
||||
"labels file should end with extension .txt"
|
||||
<< std::endl;
|
||||
} else if (!checkModelFileExtension(filePath)) {
|
||||
std::cout << "[ ERROR ] The ModelFilepath is not correct. Make sure you "
|
||||
"are setting the path to an onnx model file (.onnx)"
|
||||
<< std::endl;
|
||||
} else if (!exists_check(filePath) || !exists_check(labelFilepath)) {
|
||||
std::cout << "[ ERROR ] The File does not exist. Make sure you are setting "
|
||||
"the correct path to the file"
|
||||
<< std::endl;
|
||||
} else {
|
||||
init_flag_ = pimpl_->imageclassification_->Init(filePath, labelFilepath);
|
||||
if (init_flag_ != 0) {
|
||||
std::cout << "[Error] Init fail" << std::endl;
|
||||
}
|
||||
std::unique_ptr<ImageClassification>(new ImageClassification());
|
||||
ImageClassificationOption option;
|
||||
if (!configToOption(config_file_path, option)) {
|
||||
init_flag_ = pimpl_->imageclassification_->InitFromOption(option);
|
||||
}
|
||||
}
|
||||
|
||||
int imageClassificationTask::getInitFlag() { return init_flag_; }
|
||||
ImageClassificationTask::ImageClassificationTask(
|
||||
const ImageClassificationOption& option)
|
||||
: pimpl_(std::make_unique<impl>()) {
|
||||
init_flag_ = -1;
|
||||
pimpl_->imageclassification_ =
|
||||
std::unique_ptr<ImageClassification>(new ImageClassification());
|
||||
init_flag_ = pimpl_->imageclassification_->InitFromOption(option);
|
||||
}
|
||||
|
||||
ImageClassificationResult imageClassificationTask::Classify(
|
||||
int ImageClassificationTask::getInitFlag() { return init_flag_; }
|
||||
|
||||
ImageClassificationResult ImageClassificationTask::Classify(
|
||||
const cv::Mat& img_raw) {
|
||||
return pimpl_->imageclassification_->Classify(img_raw);
|
||||
}
|
||||
|
|
|
@ -5,23 +5,25 @@
|
|||
|
||||
#include "src/utils/json.hpp"
|
||||
#include "utils/time.h"
|
||||
#include "utils/utils.h"
|
||||
using json = nlohmann::json;
|
||||
|
||||
std::vector<std::vector<float>> ObjectDetection::Process(
|
||||
const cv::Mat &raw_img) {
|
||||
const cv::Mat &img_raw) {
|
||||
input_tensors_.clear();
|
||||
if (initFlag_ != 0) {
|
||||
if (init_flag_ != 0) {
|
||||
std::cout << "[ ERROR ] Init fail return empty tensors" << std::endl;
|
||||
return input_tensors_;
|
||||
}
|
||||
if (modelFilepath_.find("yolov4") != modelFilepath_.npos) {
|
||||
preprocessor_.Preprocess(raw_img, inputDims_, input_tensors_, HWC);
|
||||
} else if (modelFilepath_.find("yolov6") != modelFilepath_.npos) {
|
||||
preprocessor_.Preprocess(raw_img, inputDims_, input_tensors_, CHW);
|
||||
} else if (modelFilepath_.find("nanodet-plus") != modelFilepath_.npos) {
|
||||
preprocessor_.PreprocessNanoDetPlus(raw_img, inputDims_, input_tensors_);
|
||||
} else if (modelFilepath_.find("rtmdet") != modelFilepath_.npos) {
|
||||
preprocessor_.Preprocess(raw_img, inputDims_, input_tensors_, CHW);
|
||||
if (option_.model_path.find("yolov4") != option_.model_path.npos) {
|
||||
preprocessor_.Preprocess(img_raw, input_dims_, input_tensors_, HWC);
|
||||
} else if (option_.model_path.find("yolov6") != option_.model_path.npos) {
|
||||
preprocessor_.Preprocess(img_raw, input_dims_, input_tensors_, CHW);
|
||||
} else if (option_.model_path.find("nanodet-plus") !=
|
||||
option_.model_path.npos) {
|
||||
preprocessor_.PreprocessNanoDetPlus(img_raw, input_dims_, input_tensors_);
|
||||
} else if (option_.model_path.find("rtmdet") != option_.model_path.npos) {
|
||||
preprocessor_.Preprocess(img_raw, input_dims_, input_tensors_, CHW);
|
||||
} else {
|
||||
std::cout << "[ ERROR ] Unsupported model return empty tensors"
|
||||
<< std::endl;
|
||||
|
@ -30,10 +32,10 @@ std::vector<std::vector<float>> ObjectDetection::Process(
|
|||
return input_tensors_;
|
||||
}
|
||||
|
||||
ObjectDetectionResult ObjectDetection::Detect(const cv::Mat &raw_img) {
|
||||
ObjectDetectionResult ObjectDetection::Detect(const cv::Mat &img_raw) {
|
||||
result_boxes_.clear();
|
||||
input_tensors_.clear();
|
||||
Preprocess(raw_img);
|
||||
Preprocess(img_raw);
|
||||
return Postprocess();
|
||||
}
|
||||
|
||||
|
@ -42,28 +44,29 @@ ObjectDetectionResult ObjectDetection::Detect(
|
|||
const int img_width) {
|
||||
result_boxes_.clear();
|
||||
input_tensors_ = input_tensors;
|
||||
if (initFlag_ != 0) {
|
||||
if (init_flag_ != 0) {
|
||||
std::cout << "[ ERROR ] Init fail return empty result" << std::endl;
|
||||
result_.result_bboxes = result_boxes_;
|
||||
result_.timestamp = std::chrono::steady_clock::now();
|
||||
return result_;
|
||||
}
|
||||
if (modelFilepath_.find("yolov4") != modelFilepath_.npos) {
|
||||
postprocessor_.Postprocess(Infer(input_tensors_), result_boxes_, inputDims_,
|
||||
img_height, img_width, labels_, score_threshold_,
|
||||
nms_threshold_);
|
||||
} else if (modelFilepath_.find("yolov6") != modelFilepath_.npos) {
|
||||
if (option_.model_path.find("yolov4") != option_.model_path.npos) {
|
||||
postprocessor_.Postprocess(Infer(input_tensors_), result_boxes_,
|
||||
input_dims_, img_height, img_width, labels_,
|
||||
option_.score_threshold, option_.nms_threshold);
|
||||
} else if (option_.model_path.find("yolov6") != option_.model_path.npos) {
|
||||
postprocessor_.PostprocessYolov6(Infer(input_tensors_), result_boxes_,
|
||||
inputDims_, img_height, img_width, labels_,
|
||||
score_threshold_);
|
||||
} else if (modelFilepath_.find("nanodet-plus") != modelFilepath_.npos) {
|
||||
input_dims_, img_height, img_width,
|
||||
labels_, option_.score_threshold);
|
||||
} else if (option_.model_path.find("nanodet-plus") !=
|
||||
option_.model_path.npos) {
|
||||
postprocessor_.PostprocessNanoDetPlus(
|
||||
Infer(input_tensors_), result_boxes_, inputDims_, img_height, img_width,
|
||||
labels_, score_threshold_, nms_threshold_);
|
||||
} else if (modelFilepath_.find("rtmdet") != modelFilepath_.npos) {
|
||||
postprocessor_.PostprocessRtmDet(Infer(input_tensors_), result_boxes_,
|
||||
inputDims_, img_height, img_width, labels_,
|
||||
score_threshold_, nms_threshold_);
|
||||
Infer(input_tensors_), result_boxes_, input_dims_, img_height,
|
||||
img_width, labels_, option_.score_threshold, option_.nms_threshold);
|
||||
} else if (option_.model_path.find("rtmdet") != option_.model_path.npos) {
|
||||
postprocessor_.PostprocessRtmDet(
|
||||
Infer(input_tensors_), result_boxes_, input_dims_, img_height,
|
||||
img_width, labels_, option_.score_threshold, option_.nms_threshold);
|
||||
} else {
|
||||
std::cout << "[ ERROR ] Unsupported model return empty result" << std::endl;
|
||||
}
|
||||
|
@ -72,123 +75,109 @@ ObjectDetectionResult ObjectDetection::Detect(
|
|||
return result_;
|
||||
}
|
||||
|
||||
void ObjectDetection::Preprocess(const cv::Mat &raw_img) {
|
||||
if (initFlag_ != 0) {
|
||||
std::cout << "[ ERROR ] Init fail" << std::endl;
|
||||
void ObjectDetection::Preprocess(const cv::Mat &img_raw) {
|
||||
if (init_flag_ != 0) {
|
||||
return;
|
||||
}
|
||||
img_height_ = raw_img.rows;
|
||||
img_width_ = raw_img.cols;
|
||||
if (modelFilepath_.find("yolov4") != modelFilepath_.npos) {
|
||||
preprocessor_.Preprocess(raw_img, inputDims_, input_tensors_, HWC);
|
||||
} else if (modelFilepath_.find("yolov6") != modelFilepath_.npos) {
|
||||
preprocessor_.Preprocess(raw_img, inputDims_, input_tensors_, CHW);
|
||||
} else if (modelFilepath_.find("nanodet-plus") != modelFilepath_.npos) {
|
||||
preprocessor_.PreprocessNanoDetPlus(raw_img, inputDims_, input_tensors_);
|
||||
} else if (modelFilepath_.find("rtmdet") != modelFilepath_.npos) {
|
||||
preprocessor_.Preprocess(raw_img, inputDims_, input_tensors_, CHW);
|
||||
img_height_ = img_raw.rows;
|
||||
img_width_ = img_raw.cols;
|
||||
if (option_.model_path.find("yolov4") != option_.model_path.npos) {
|
||||
preprocessor_.Preprocess(img_raw, input_dims_, input_tensors_, HWC);
|
||||
} else if (option_.model_path.find("yolov6") != option_.model_path.npos) {
|
||||
preprocessor_.Preprocess(img_raw, input_dims_, input_tensors_, CHW);
|
||||
} else if (option_.model_path.find("nanodet-plus") !=
|
||||
option_.model_path.npos) {
|
||||
preprocessor_.PreprocessNanoDetPlus(img_raw, input_dims_, input_tensors_);
|
||||
} else if (option_.model_path.find("rtmdet") != option_.model_path.npos) {
|
||||
preprocessor_.Preprocess(img_raw, input_dims_, input_tensors_, CHW);
|
||||
} else {
|
||||
std::cout << "[ ERROR ] Unsupported model" << std::endl;
|
||||
}
|
||||
}
|
||||
|
||||
ObjectDetectionResult ObjectDetection::Postprocess() {
|
||||
if (initFlag_ != 0) {
|
||||
if (init_flag_ != 0) {
|
||||
std::cout << "[ ERROR ] Init fail return empty result" << std::endl;
|
||||
result_.result_bboxes = result_boxes_;
|
||||
result_.timestamp = std::chrono::steady_clock::now();
|
||||
return result_;
|
||||
}
|
||||
if (modelFilepath_.find("yolov4") != modelFilepath_.npos) {
|
||||
postprocessor_.Postprocess(Infer(input_tensors_), result_boxes_, inputDims_,
|
||||
img_height_, img_width_, labels_,
|
||||
score_threshold_, nms_threshold_);
|
||||
} else if (modelFilepath_.find("yolov6") != modelFilepath_.npos) {
|
||||
if (option_.model_path.find("yolov4") != option_.model_path.npos) {
|
||||
postprocessor_.Postprocess(Infer(input_tensors_), result_boxes_,
|
||||
input_dims_, img_height_, img_width_, labels_,
|
||||
option_.score_threshold, option_.nms_threshold);
|
||||
} else if (option_.model_path.find("yolov6") != option_.model_path.npos) {
|
||||
postprocessor_.PostprocessYolov6(Infer(input_tensors_), result_boxes_,
|
||||
inputDims_, img_height_, img_width_,
|
||||
labels_, score_threshold_);
|
||||
} else if (modelFilepath_.find("nanodet-plus") != modelFilepath_.npos) {
|
||||
input_dims_, img_height_, img_width_,
|
||||
labels_, option_.score_threshold);
|
||||
} else if (option_.model_path.find("nanodet-plus") !=
|
||||
option_.model_path.npos) {
|
||||
postprocessor_.PostprocessNanoDetPlus(
|
||||
Infer(input_tensors_), result_boxes_, inputDims_, img_height_,
|
||||
img_width_, labels_, score_threshold_, nms_threshold_);
|
||||
} else if (modelFilepath_.find("rtmdet") != modelFilepath_.npos) {
|
||||
postprocessor_.PostprocessRtmDet(Infer(input_tensors_), result_boxes_,
|
||||
inputDims_, img_height_, img_width_,
|
||||
labels_, score_threshold_, nms_threshold_);
|
||||
Infer(input_tensors_), result_boxes_, input_dims_, img_height_,
|
||||
img_width_, labels_, option_.score_threshold, option_.nms_threshold);
|
||||
} else if (option_.model_path.find("rtmdet") != option_.model_path.npos) {
|
||||
postprocessor_.PostprocessRtmDet(
|
||||
Infer(input_tensors_), result_boxes_, input_dims_, img_height_,
|
||||
img_width_, labels_, option_.score_threshold, option_.nms_threshold);
|
||||
} else {
|
||||
std::cout << "[ ERROR ] Unsupported model return empty result" << std::endl;
|
||||
}
|
||||
if (!class_name_blacklist_.empty()) {
|
||||
for (int i = 0; i < static_cast<int>(class_name_blacklist_.size()); i++) {
|
||||
for (int j = 0; j < static_cast<int>(result_boxes_.size()); j++) {
|
||||
if (class_name_blacklist_[i] ==
|
||||
static_cast<int>(result_boxes_[j].label)) {
|
||||
result_boxes_[j].flag = false;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
if (!class_name_whitelist_.empty()) {
|
||||
for (int j = 0; j < static_cast<int>(result_boxes_.size()); j++) {
|
||||
result_boxes_[j].flag = false;
|
||||
}
|
||||
for (int i = 0; i < static_cast<int>(class_name_whitelist_.size()); i++) {
|
||||
for (int j = 0; j < static_cast<int>(result_boxes_.size()); j++) {
|
||||
if (class_name_whitelist_[i] ==
|
||||
static_cast<int>(result_boxes_[j].label)) {
|
||||
result_boxes_[j].flag = true;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
ApllyList();
|
||||
result_.result_bboxes = result_boxes_;
|
||||
result_.timestamp = std::chrono::steady_clock::now();
|
||||
return result_;
|
||||
}
|
||||
|
||||
int ObjectDetection::InitFromCommand(const std::string &modelFilepath,
|
||||
const std::string &labelFilepath) {
|
||||
instanceName_ = "object-detection-inference";
|
||||
modelFilepath_ = modelFilepath;
|
||||
labelFilepath_ = labelFilepath;
|
||||
score_threshold_ = -1.f;
|
||||
nms_threshold_ = -1.f;
|
||||
initFlag_ = GetEngine()->Init(instanceName_, modelFilepath_);
|
||||
inputDims_ = GetEngine()->GetInputDims();
|
||||
labels_ = readLabels(labelFilepath_);
|
||||
return initFlag_;
|
||||
void ObjectDetection::ApllyList() {
|
||||
if (option_.class_name_blacklist.empty() &&
|
||||
option_.class_name_whitelist.empty()) {
|
||||
return;
|
||||
}
|
||||
for (auto it = result_boxes_.begin(); it != result_boxes_.end();) {
|
||||
if (!class_name_list_[static_cast<int>(it->label)]) {
|
||||
it = result_boxes_.erase(it);
|
||||
} else {
|
||||
++it;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
int ObjectDetection::InitFromConfig(const std::string &configFilepath) {
|
||||
std::ifstream f(configFilepath);
|
||||
json config = json::parse(f);
|
||||
if (configCheck(config)) {
|
||||
initFlag_ = 1;
|
||||
std::cout << "[ ERROR ] Config check fail" << std::endl;
|
||||
return initFlag_;
|
||||
int ObjectDetection::InitFromOption(const ObjectDetectionOption &option) {
|
||||
init_flag_ = 1;
|
||||
option_ = option;
|
||||
instance_name_ = "object-detection-inference";
|
||||
labels_ = readLabels(option_.label_path);
|
||||
if (labels_.empty()) {
|
||||
std::cout << "[ ERROR ] label file is empty, init fail" << std::endl;
|
||||
return init_flag_;
|
||||
}
|
||||
int label_size = labels_.size();
|
||||
if (!option_.class_name_whitelist.empty()) {
|
||||
std::vector<int> list(label_size, 0);
|
||||
class_name_list_ = list;
|
||||
for (size_t i = 0; i < option_.class_name_whitelist.size(); i++) {
|
||||
if (option_.class_name_whitelist[i] < label_size &&
|
||||
option_.class_name_whitelist[i] >= 0) {
|
||||
class_name_list_[option_.class_name_whitelist[i]] = 1;
|
||||
}
|
||||
}
|
||||
modelFilepath_ = config["model_path"];
|
||||
labelFilepath_ = config["label_path"];
|
||||
if (config.contains("score_threshold")) {
|
||||
score_threshold_ = config["score_threshold"];
|
||||
} else {
|
||||
score_threshold_ = -1.f;
|
||||
std::vector<int> list(label_size, 1);
|
||||
class_name_list_ = list;
|
||||
}
|
||||
if (config.contains("nms_threshold")) {
|
||||
nms_threshold_ = config["nms_threshold"];
|
||||
} else {
|
||||
nms_threshold_ = -1.f;
|
||||
if (!option_.class_name_blacklist.empty()) {
|
||||
for (size_t i = 0; i < option_.class_name_blacklist.size(); i++) {
|
||||
if (option_.class_name_blacklist[i] < label_size &&
|
||||
option_.class_name_blacklist[i] >= 0) {
|
||||
class_name_list_[option_.class_name_blacklist[i]] = 0;
|
||||
}
|
||||
if (config.contains("class_name_whitelist")) {
|
||||
class_name_whitelist_ =
|
||||
config["class_name_whitelist"].get<std::vector<int>>();
|
||||
}
|
||||
if (config.contains("class_name_blacklist")) {
|
||||
class_name_blacklist_ =
|
||||
config["class_name_blacklist"].get<std::vector<int>>();
|
||||
}
|
||||
labels_ = readLabels(labelFilepath_);
|
||||
initFlag_ = GetEngine()->Init(config);
|
||||
inputDims_ = GetEngine()->GetInputDims();
|
||||
return initFlag_;
|
||||
init_flag_ =
|
||||
GetEngine()->Init(instance_name_, option_.model_path,
|
||||
option.intra_threads_num, option.inter_threads_num);
|
||||
if (!init_flag_) {
|
||||
input_dims_ = GetEngine()->GetInputDims();
|
||||
}
|
||||
return init_flag_;
|
||||
}
|
||||
|
|
|
@ -16,28 +16,26 @@
|
|||
class ObjectDetection : public BaseVisionTaskApi<ObjectDetectionResult> {
|
||||
public:
|
||||
ObjectDetection() : BaseVisionTaskApi<ObjectDetectionResult>() {
|
||||
initFlag_ = -1;
|
||||
init_flag_ = -1;
|
||||
}
|
||||
~ObjectDetection() {}
|
||||
ObjectDetectionResult Detect(const cv::Mat &raw_img);
|
||||
int InitFromCommand(const std::string &modelFilepath,
|
||||
const std::string &labelFilepath);
|
||||
int InitFromConfig(const std::string &configFilepath);
|
||||
std::vector<std::vector<float>> Process(const cv::Mat &raw_img);
|
||||
ObjectDetectionResult Detect(const cv::Mat &img_raw);
|
||||
int InitFromOption(const ObjectDetectionOption &option);
|
||||
std::vector<std::vector<float>> Process(const cv::Mat &img_raw);
|
||||
ObjectDetectionResult Detect(
|
||||
const std::vector<std::vector<float>> &input_tensors,
|
||||
const int img_height, const int img_width);
|
||||
|
||||
protected:
|
||||
void Preprocess(const cv::Mat &raw_img) override;
|
||||
void ApllyList();
|
||||
void Preprocess(const cv::Mat &img_raw) override;
|
||||
ObjectDetectionResult Postprocess() override;
|
||||
|
||||
private:
|
||||
std::string instanceName_;
|
||||
std::string modelFilepath_;
|
||||
std::string labelFilepath_;
|
||||
std::string instance_name_;
|
||||
std::vector<std::string> labels_;
|
||||
std::vector<std::vector<int64_t>> inputDims_;
|
||||
ObjectDetectionOption option_;
|
||||
std::vector<std::vector<int64_t>> input_dims_;
|
||||
std::vector<std::vector<float>> input_tensors_;
|
||||
DetectionPreprocessor preprocessor_;
|
||||
DetectionPostprocessor postprocessor_;
|
||||
|
@ -45,11 +43,8 @@ class ObjectDetection : public BaseVisionTaskApi<ObjectDetectionResult> {
|
|||
ObjectDetectionResult result_;
|
||||
int img_height_;
|
||||
int img_width_;
|
||||
int initFlag_;
|
||||
float score_threshold_;
|
||||
float nms_threshold_;
|
||||
std::vector<int> class_name_whitelist_;
|
||||
std::vector<int> class_name_blacklist_;
|
||||
int init_flag_;
|
||||
std::vector<int> class_name_list_;
|
||||
};
|
||||
|
||||
#endif // SUPPORT_SRC_TASK_VISION_OBJECTDETECTION_OBJECT_DETECTION_H_
|
||||
|
|
|
@ -4,105 +4,43 @@
|
|||
#include "src/task/vision/objectdetection/object_detection.h"
|
||||
#include "src/utils/utils.h"
|
||||
|
||||
class objectDetectionTask::impl {
|
||||
class ObjectDetectionTask::impl {
|
||||
public:
|
||||
std::unique_ptr<ObjectDetection> objectdetection_;
|
||||
};
|
||||
|
||||
objectDetectionTask::objectDetectionTask(const std::string &filePath,
|
||||
const std::string &labelFilepath)
|
||||
ObjectDetectionTask::ObjectDetectionTask(const ObjectDetectionOption &option)
|
||||
: pimpl_(std::make_unique<impl>()) {
|
||||
init_flag_ = -1;
|
||||
pimpl_->objectdetection_ =
|
||||
std::unique_ptr<ObjectDetection>(new ObjectDetection());
|
||||
if (!checkLabelFileExtension(labelFilepath)) {
|
||||
std::cout << "[ ERROR ] The LabelFilepath is not set correctly and the "
|
||||
"labels file should end with extension .txt"
|
||||
<< std::endl;
|
||||
} else if (filePath.length() > 4) {
|
||||
std::string suffixStr = filePath.substr(filePath.length() - 4, 4);
|
||||
if (strcmp(suffixStr.c_str(), "onnx") == 0) {
|
||||
if (!checkModelFileExtension(filePath)) {
|
||||
std::cout << "[ ERROR ] The ModelFilepath is not correct. Make sure "
|
||||
"you are setting the path to an onnx model file (.onnx)"
|
||||
<< std::endl;
|
||||
} else if (!exists_check(filePath) || !exists_check(labelFilepath)) {
|
||||
std::cout << "[ ERROR ] The File does not exist. Make sure you are "
|
||||
"setting the correct path to the file"
|
||||
<< std::endl;
|
||||
} else {
|
||||
init_flag_ =
|
||||
pimpl_->objectdetection_->InitFromCommand(filePath, labelFilepath);
|
||||
if (init_flag_ != 0) {
|
||||
std::cout << "[Error] Init fail" << std::endl;
|
||||
}
|
||||
}
|
||||
} else if (strcmp(suffixStr.c_str(), "json") == 0) {
|
||||
if (!checkConfigFileExtension(filePath)) {
|
||||
std::cout << "[ ERROR ] The ConfigFilepath is not correct. Make sure "
|
||||
"you are setting the path to an json file (.json)"
|
||||
<< std::endl;
|
||||
} else if (!exists_check(filePath)) {
|
||||
std::cout << "[ ERROR ] The File does not exist. Make sure you are "
|
||||
"setting the correct path to the file"
|
||||
<< std::endl;
|
||||
} else {
|
||||
init_flag_ = pimpl_->objectdetection_->InitFromConfig(filePath);
|
||||
if (init_flag_ != 0) {
|
||||
std::cout << "[Error] Init fail" << std::endl;
|
||||
}
|
||||
}
|
||||
} else {
|
||||
std::cout << "[ ERROR ] Unsupport file" << std::endl;
|
||||
}
|
||||
} else {
|
||||
std::cout << "[ ERROR ] Unsupport filepath" << std::endl;
|
||||
}
|
||||
init_flag_ = pimpl_->objectdetection_->InitFromOption(option);
|
||||
}
|
||||
|
||||
objectDetectionTask::objectDetectionTask(const std::string &filePath)
|
||||
ObjectDetectionTask::ObjectDetectionTask(const std::string &config_file_path)
|
||||
: pimpl_(std::make_unique<impl>()) {
|
||||
init_flag_ = -1;
|
||||
pimpl_->objectdetection_ =
|
||||
std::unique_ptr<ObjectDetection>(new ObjectDetection());
|
||||
if (filePath.length() > 4) {
|
||||
std::string suffixStr = filePath.substr(filePath.length() - 4, 4);
|
||||
if (strcmp(suffixStr.c_str(), "json") == 0) {
|
||||
if (!checkConfigFileExtension(filePath)) {
|
||||
std::cout << "[ ERROR ] The ConfigFilepath is not correct. Make sure "
|
||||
"you are setting the path to an json file (.json)"
|
||||
<< std::endl;
|
||||
} else if (!exists_check(filePath)) {
|
||||
std::cout << "[ ERROR ] The File does not exist. Make sure you are "
|
||||
"setting the correct path to the file"
|
||||
<< std::endl;
|
||||
} else {
|
||||
init_flag_ = pimpl_->objectdetection_->InitFromConfig(filePath);
|
||||
if (init_flag_ != 0) {
|
||||
std::cout << "[Error] Init fail" << std::endl;
|
||||
}
|
||||
}
|
||||
} else {
|
||||
std::cout << "[ ERROR ] Unsupport file" << std::endl;
|
||||
}
|
||||
} else {
|
||||
std::cout << "[ ERROR ] Unsupport filepath" << std::endl;
|
||||
ObjectDetectionOption option;
|
||||
if (!configToOption(config_file_path, option)) {
|
||||
init_flag_ = pimpl_->objectdetection_->InitFromOption(option);
|
||||
}
|
||||
}
|
||||
|
||||
int objectDetectionTask::getInitFlag() { return init_flag_; }
|
||||
int ObjectDetectionTask::getInitFlag() { return init_flag_; }
|
||||
|
||||
ObjectDetectionResult objectDetectionTask::Detect(const cv::Mat &raw_img) {
|
||||
return pimpl_->objectdetection_->Detect(raw_img);
|
||||
ObjectDetectionResult ObjectDetectionTask::Detect(const cv::Mat &img_raw) {
|
||||
return pimpl_->objectdetection_->Detect(img_raw);
|
||||
}
|
||||
|
||||
ObjectDetectionResult objectDetectionTask::Detect(
|
||||
ObjectDetectionResult ObjectDetectionTask::Detect(
|
||||
const std::vector<std::vector<float>> &input_tensors, const int img_height,
|
||||
const int img_width) {
|
||||
return pimpl_->objectdetection_->Detect(input_tensors, img_height, img_width);
|
||||
}
|
||||
|
||||
std::vector<std::vector<float>> objectDetectionTask::Process(
|
||||
std::vector<std::vector<float>> ObjectDetectionTask::Process(
|
||||
const cv::Mat &img_raw) {
|
||||
return pimpl_->objectdetection_->Process(img_raw);
|
||||
}
|
||||
|
|
|
@ -1,14 +1,13 @@
|
|||
#include "src/task/vision/poseestimation/pose_estimation.h"
|
||||
|
||||
#include <chrono>
|
||||
#include <fstream>
|
||||
|
||||
#include "src/utils/json.hpp"
|
||||
#include "utils/time.h"
|
||||
|
||||
using json = nlohmann::json;
|
||||
|
||||
PoseEstimationResult PoseEstimation::Estimate(const cv::Mat &raw_img,
|
||||
PoseEstimationResult PoseEstimation::Estimate(const cv::Mat &img_raw,
|
||||
const Boxi &box) {
|
||||
result_points_.clear();
|
||||
input_tensors_.clear();
|
||||
|
@ -18,17 +17,16 @@ PoseEstimationResult PoseEstimation::Estimate(const cv::Mat &raw_img,
|
|||
std::cout << "|-- Preprocess" << std::endl;
|
||||
TimeWatcher t("|--");
|
||||
#endif
|
||||
Preprocess(raw_img);
|
||||
Preprocess(img_raw);
|
||||
}
|
||||
return Postprocess();
|
||||
}
|
||||
|
||||
void PoseEstimation::Preprocess(const cv::Mat &img_raw) {
|
||||
if (initFlag_ != 0) {
|
||||
std::cout << "[ ERROR ] Init fail" << std::endl;
|
||||
if (init_flag_ != 0) {
|
||||
return;
|
||||
}
|
||||
if (modelFilepath_.find("rtmpose") != modelFilepath_.npos) {
|
||||
if (option_.model_path.find("rtmpose") != option_.model_path.npos) {
|
||||
processor_.Preprocess(img_raw, box_, input_tensors_, crop_result_pair_,
|
||||
CHW);
|
||||
} else {
|
||||
|
@ -37,13 +35,13 @@ void PoseEstimation::Preprocess(const cv::Mat &img_raw) {
|
|||
}
|
||||
|
||||
PoseEstimationResult PoseEstimation::Postprocess() {
|
||||
if (initFlag_ != 0) {
|
||||
if (init_flag_ != 0) {
|
||||
std::cout << "[ ERROR ] Init fail" << std::endl;
|
||||
result_.result_points = result_points_;
|
||||
result_.timestamp = std::chrono::steady_clock::now();
|
||||
return result_;
|
||||
}
|
||||
if (modelFilepath_.find("rtmpose") != modelFilepath_.npos) {
|
||||
if (option_.model_path.find("rtmpose") != option_.model_path.npos) {
|
||||
postprocessor_.Postprocess(Infer(input_tensors_), crop_result_pair_,
|
||||
result_points_);
|
||||
} else {
|
||||
|
@ -54,24 +52,12 @@ PoseEstimationResult PoseEstimation::Postprocess() {
|
|||
return result_;
|
||||
}
|
||||
|
||||
int PoseEstimation::InitFromCommand(const std::string &modelFilepath) {
|
||||
instanceName_ = "pose-estimation-inference";
|
||||
modelFilepath_ = modelFilepath;
|
||||
initFlag_ = GetEngine()->Init(instanceName_, modelFilepath_);
|
||||
inputDims_ = GetEngine()->GetInputDims();
|
||||
return initFlag_;
|
||||
}
|
||||
|
||||
int PoseEstimation::InitFromConfig(const std::string &configFilepath) {
|
||||
std::ifstream f(configFilepath);
|
||||
json config = json::parse(f);
|
||||
if (configCheck(config)) {
|
||||
initFlag_ = 1;
|
||||
std::cout << "[ ERROR ] Config check fail" << std::endl;
|
||||
return initFlag_;
|
||||
}
|
||||
modelFilepath_ = config["model_path"];
|
||||
initFlag_ = GetEngine()->Init(config);
|
||||
inputDims_ = GetEngine()->GetInputDims();
|
||||
return initFlag_;
|
||||
int PoseEstimation::InitFromOption(const PoseEstimationOption &option) {
|
||||
init_flag_ = 1;
|
||||
instance_name_ = "pose-estimation-inference";
|
||||
option_ = option;
|
||||
init_flag_ =
|
||||
GetEngine()->Init(instance_name_, option_.model_path,
|
||||
option.intra_threads_num, option.inter_threads_num);
|
||||
return init_flag_;
|
||||
}
|
||||
|
|
|
@ -17,29 +17,26 @@
|
|||
class PoseEstimation : public BaseVisionTaskApi<PoseEstimationResult> {
|
||||
public:
|
||||
PoseEstimation() : BaseVisionTaskApi<PoseEstimationResult>() {
|
||||
initFlag_ = -1;
|
||||
init_flag_ = -1;
|
||||
}
|
||||
~PoseEstimation() {}
|
||||
PoseEstimationResult Estimate(const cv::Mat &raw_img, const Boxi &box);
|
||||
int InitFromCommand(const std::string &modelFilepath);
|
||||
int InitFromConfig(const std::string &configFilepath);
|
||||
PoseEstimationResult Estimate(const cv::Mat &img_raw, const Boxi &box);
|
||||
int InitFromOption(const PoseEstimationOption &option);
|
||||
|
||||
protected:
|
||||
void Preprocess(const cv::Mat &img_raw) override;
|
||||
PoseEstimationResult Postprocess() override;
|
||||
|
||||
private:
|
||||
std::string instanceName_;
|
||||
std::string modelFilepath_;
|
||||
std::vector<std::string> labels_;
|
||||
std::vector<std::vector<int64_t>> inputDims_;
|
||||
std::string instance_name_;
|
||||
PoseEstimationOption option_;
|
||||
std::vector<std::vector<float>> input_tensors_;
|
||||
EstimationPreprocessor processor_;
|
||||
EstimationPostprocessor postprocessor_;
|
||||
std::vector<PosePoint> result_points_;
|
||||
PoseEstimationResult result_;
|
||||
Boxi box_;
|
||||
int initFlag_;
|
||||
int init_flag_;
|
||||
std::pair<cv::Mat, cv::Mat> crop_result_pair_;
|
||||
};
|
||||
|
||||
|
|
36
src/task/vision/poseestimation/pose_estimation_task.cc
Normal file
36
src/task/vision/poseestimation/pose_estimation_task.cc
Normal file
|
@ -0,0 +1,36 @@
|
|||
#include "task/vision/pose_estimation_task.h"
|
||||
|
||||
#include "include/utils/utils.h"
|
||||
#include "src/task/vision/poseestimation/pose_estimation.h"
|
||||
#include "src/utils/utils.h"
|
||||
|
||||
class PoseEstimationTask::impl {
|
||||
public:
|
||||
std::unique_ptr<PoseEstimation> poseestimation_;
|
||||
};
|
||||
|
||||
PoseEstimationTask::PoseEstimationTask(const PoseEstimationOption &option)
|
||||
: pimpl_(std::make_unique<impl>()) {
|
||||
init_flag_ = -1;
|
||||
pimpl_->poseestimation_ =
|
||||
std::unique_ptr<PoseEstimation>(new PoseEstimation());
|
||||
init_flag_ = pimpl_->poseestimation_->InitFromOption(option);
|
||||
}
|
||||
|
||||
PoseEstimationTask::PoseEstimationTask(const std::string &config_file_path)
|
||||
: pimpl_(std::make_unique<impl>()) {
|
||||
init_flag_ = -1;
|
||||
pimpl_->poseestimation_ =
|
||||
std::unique_ptr<PoseEstimation>(new PoseEstimation());
|
||||
PoseEstimationOption option;
|
||||
if (!configToOption(config_file_path, option)) {
|
||||
init_flag_ = pimpl_->poseestimation_->InitFromOption(option);
|
||||
}
|
||||
}
|
||||
|
||||
int PoseEstimationTask::getInitFlag() { return init_flag_; }
|
||||
|
||||
PoseEstimationResult PoseEstimationTask::Estimate(const cv::Mat &img_raw,
|
||||
const Boxi &box) {
|
||||
return pimpl_->poseestimation_->Estimate(img_raw, box);
|
||||
}
|
|
@ -1,61 +0,0 @@
|
|||
#include "include/utils/utils.h"
|
||||
#include "src/task/vision/poseestimation/pose_estimation.h"
|
||||
#include "src/utils/utils.h"
|
||||
#include "task/vision/pose_estimation_task.h"
|
||||
|
||||
class poseEstimationTask::impl {
|
||||
public:
|
||||
std::unique_ptr<PoseEstimation> poseestimation_;
|
||||
};
|
||||
|
||||
poseEstimationTask::poseEstimationTask(const std::string &filePath)
|
||||
: pimpl_(std::make_unique<impl>()) {
|
||||
init_flag_ = -1;
|
||||
pimpl_->poseestimation_ =
|
||||
std::unique_ptr<PoseEstimation>(new PoseEstimation());
|
||||
if (filePath.length() > 4) {
|
||||
std::string suffixStr = filePath.substr(filePath.length() - 4, 4);
|
||||
if (strcmp(suffixStr.c_str(), "onnx") == 0) {
|
||||
if (!checkModelFileExtension(filePath)) {
|
||||
std::cout << "[ ERROR ] The ModelFilepath is not correct. Make sure "
|
||||
"you are setting the path to an onnx model file (.onnx)"
|
||||
<< std::endl;
|
||||
} else if (!exists_check(filePath)) {
|
||||
std::cout << "[ ERROR ] The File does not exist. Make sure you are "
|
||||
"setting the correct path to the file"
|
||||
<< std::endl;
|
||||
} else {
|
||||
init_flag_ = pimpl_->poseestimation_->InitFromCommand(filePath);
|
||||
if (init_flag_ != 0) {
|
||||
std::cout << "[Error] Init fail" << std::endl;
|
||||
}
|
||||
}
|
||||
} else if (strcmp(suffixStr.c_str(), "json") == 0) {
|
||||
if (!checkConfigFileExtension(filePath)) {
|
||||
std::cout << "[ ERROR ] The ConfigFilepath is not correct. Make sure "
|
||||
"you are setting the path to an json file (.json)"
|
||||
<< std::endl;
|
||||
} else if (!exists_check(filePath)) {
|
||||
std::cout << "[ ERROR ] The File does not exist. Make sure you are "
|
||||
"setting the correct path to the file"
|
||||
<< std::endl;
|
||||
} else {
|
||||
init_flag_ = pimpl_->poseestimation_->InitFromConfig(filePath);
|
||||
if (init_flag_ != 0) {
|
||||
std::cout << "[Error] Init fail" << std::endl;
|
||||
}
|
||||
}
|
||||
} else {
|
||||
std::cout << "[ ERROR ] Unsupport file" << std::endl;
|
||||
}
|
||||
} else {
|
||||
std::cout << "[ ERROR ] Unsupport filepath" << std::endl;
|
||||
}
|
||||
}
|
||||
|
||||
int poseEstimationTask::getInitFlag() { return init_flag_; }
|
||||
|
||||
PoseEstimationResult poseEstimationTask::Estimate(const cv::Mat &raw_img,
|
||||
const Boxi &box) {
|
||||
return pimpl_->poseestimation_->Estimate(raw_img, box);
|
||||
}
|
|
@ -79,7 +79,6 @@ void blending_nms(std::vector<Boxf> &input, std::vector<Boxf> &output,
|
|||
rects.y2 += buf[l].y2 * rate;
|
||||
rects.score += buf[l].score * rate;
|
||||
}
|
||||
rects.flag = true;
|
||||
output.push_back(rects);
|
||||
|
||||
// keep top k
|
||||
|
|
|
@ -2,7 +2,8 @@
|
|||
|
||||
#include <cmath>
|
||||
#include <cstdint> // for: uint32_t
|
||||
#include <fstream>
|
||||
#include <fstream> // for ifstream
|
||||
#include <iostream>
|
||||
#include <string>
|
||||
#include <vector>
|
||||
|
||||
|
@ -11,75 +12,16 @@
|
|||
#include "src/utils/utils.h"
|
||||
using json = nlohmann::json;
|
||||
|
||||
bool checkLabelFileExtension(const std::string& filename) {
|
||||
size_t pos = filename.rfind('.');
|
||||
if (filename.empty()) {
|
||||
std::cout << "[ ERROR ] The Label file path is empty" << std::endl;
|
||||
return false;
|
||||
}
|
||||
if (pos == std::string::npos) return false;
|
||||
std::string ext = filename.substr(pos + 1);
|
||||
if (ext == "txt") {
|
||||
return true;
|
||||
} else {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
std::vector<std::string> readLabels(const std::string& labelFilepath) {
|
||||
std::vector<std::string> readLabels(const std::string& label_file_path) {
|
||||
std::vector<std::string> labels;
|
||||
std::string line;
|
||||
std::ifstream fp(labelFilepath);
|
||||
std::ifstream fp(label_file_path);
|
||||
while (std::getline(fp, line)) {
|
||||
labels.push_back(line);
|
||||
}
|
||||
return labels;
|
||||
}
|
||||
|
||||
bool checkModelFileExtension(const std::string& filename) {
|
||||
size_t pos = filename.rfind('.');
|
||||
if (filename.empty()) {
|
||||
std::cout << "[ ERROR ] The Model file path is empty" << std::endl;
|
||||
return false;
|
||||
}
|
||||
if (pos == std::string::npos) return false;
|
||||
std::string ext = filename.substr(pos + 1);
|
||||
if (ext == "onnx") {
|
||||
return true;
|
||||
} else {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
int checkConfigFileExtension(const std::string& filename) {
|
||||
size_t pos = filename.rfind('.');
|
||||
if (filename.empty()) {
|
||||
std::cout << "[ ERROR ] The Config file path is empty" << std::endl;
|
||||
return false;
|
||||
}
|
||||
if (pos == std::string::npos) return false;
|
||||
std::string ext = filename.substr(pos + 1);
|
||||
if (ext == "json") {
|
||||
return true;
|
||||
} else {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
int configCheck(const json& config) {
|
||||
if (!config.contains("model_path") || !config.contains("label_path")) {
|
||||
return 1;
|
||||
} else if (!checkModelFileExtension(config["model_path"]) ||
|
||||
!checkLabelFileExtension(config["label_path"])) {
|
||||
return 1;
|
||||
} else if (!exists_check(config["model_path"]) ||
|
||||
!exists_check(config["label_path"])) {
|
||||
return 1;
|
||||
} else {
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
float sigmoid(float x) { return (1 / (1 + exp(-x))); }
|
||||
|
||||
float fast_exp(float x) {
|
||||
|
@ -91,12 +33,7 @@ float fast_exp(float x) {
|
|||
return v.f;
|
||||
}
|
||||
|
||||
bool exists_check(const std::string& name) {
|
||||
struct stat buffer;
|
||||
return (stat(name.c_str(), &buffer) == 0);
|
||||
}
|
||||
|
||||
void resize_unscale(const cv::Mat& mat, cv::Mat& mat_rs, int target_height,
|
||||
void resizeUnscale(const cv::Mat& mat, cv::Mat& mat_rs, int target_height,
|
||||
int target_width) {
|
||||
if (mat.empty()) return;
|
||||
int img_height = static_cast<int>(mat.rows);
|
||||
|
@ -127,3 +64,84 @@ void resize_unscale(const cv::Mat& mat, cv::Mat& mat_rs, int target_height,
|
|||
|
||||
new_unpad_mat.copyTo(mat_rs(cv::Rect(dw, dh, new_unpad_w, new_unpad_h)));
|
||||
}
|
||||
|
||||
int getConfig(const std::string& config_file_path, json& config) {
|
||||
std::ifstream f(config_file_path);
|
||||
try {
|
||||
config = json::parse(f);
|
||||
} catch (json::parse_error& ex) {
|
||||
std::cout << "[ ERROR ] Init fail, parse json config file fail"
|
||||
<< std::endl;
|
||||
return 0;
|
||||
}
|
||||
return 1;
|
||||
}
|
||||
|
||||
int configToOption(const std::string& config_file_path,
|
||||
ImageClassificationOption& option) {
|
||||
json config;
|
||||
if (!getConfig(config_file_path, config)) {
|
||||
return -1;
|
||||
}
|
||||
std::string model_path = config["model_path"];
|
||||
option.model_path = model_path;
|
||||
std::string label_path = config["label_path"];
|
||||
option.label_path = label_path;
|
||||
if (config.contains("intra_threads_num")) {
|
||||
option.intra_threads_num = config["intra_threads_num"];
|
||||
}
|
||||
if (config.contains("inter_threads_num")) {
|
||||
option.inter_threads_num = config["inter_threads_num"];
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
int configToOption(const std::string& config_file_path,
|
||||
ObjectDetectionOption& option) {
|
||||
json config;
|
||||
if (!getConfig(config_file_path, config)) {
|
||||
return -1;
|
||||
}
|
||||
std::string model_path = config["model_path"];
|
||||
option.model_path = model_path;
|
||||
std::string label_path = config["label_path"];
|
||||
option.label_path = label_path;
|
||||
if (config.contains("intra_threads_num")) {
|
||||
option.intra_threads_num = config["intra_threads_num"];
|
||||
}
|
||||
if (config.contains("inter_threads_num")) {
|
||||
option.inter_threads_num = config["inter_threads_num"];
|
||||
}
|
||||
if (config.contains("score_threshold")) {
|
||||
option.score_threshold = config["score_threshold"];
|
||||
}
|
||||
if (config.contains("nms_threshold")) {
|
||||
option.nms_threshold = config["nms_threshold"];
|
||||
}
|
||||
if (config.contains("class_name_whitelist")) {
|
||||
option.class_name_whitelist =
|
||||
config["class_name_whitelist"].get<std::vector<int>>();
|
||||
}
|
||||
if (config.contains("class_name_blacklist")) {
|
||||
option.class_name_blacklist =
|
||||
config["class_name_blacklist"].get<std::vector<int>>();
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
int configToOption(const std::string& config_file_path,
|
||||
PoseEstimationOption& option) {
|
||||
json config;
|
||||
if (!getConfig(config_file_path, config)) {
|
||||
return -1;
|
||||
}
|
||||
std::string model_path = config["model_path"];
|
||||
option.model_path = model_path;
|
||||
if (config.contains("intra_threads_num")) {
|
||||
option.intra_threads_num = config["intra_threads_num"];
|
||||
}
|
||||
if (config.contains("inter_threads_num")) {
|
||||
option.inter_threads_num = config["inter_threads_num"];
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -4,21 +4,24 @@
|
|||
#include <string>
|
||||
|
||||
#include "include/utils/utils.h"
|
||||
#include "src/utils/json.hpp"
|
||||
using json = nlohmann::json;
|
||||
|
||||
bool checkLabelFileExtension(const std::string& filename);
|
||||
|
||||
std::vector<std::string> readLabels(const std::string& labelFilepath);
|
||||
|
||||
bool checkModelFileExtension(const std::string& filename);
|
||||
|
||||
int checkConfigFileExtension(const std::string& filename);
|
||||
|
||||
int configCheck(const json& config);
|
||||
|
||||
float sigmoid(float x);
|
||||
|
||||
float fast_exp(float x);
|
||||
|
||||
#ifdef _WIN32
|
||||
#include <codecvt>
|
||||
inline std::wstring to_wstring(const std::string& input) {
|
||||
std::wstring_convert<std::codecvt_utf8<wchar_t>> converter;
|
||||
return converter.from_bytes(input);
|
||||
}
|
||||
inline std::wstring to_wstring(const char* input) {
|
||||
std::wstring_convert<std::codecvt_utf8<wchar_t>> converter;
|
||||
std::string str(input);
|
||||
return converter.from_bytes(str);
|
||||
}
|
||||
#endif /* _WIN32 */
|
||||
|
||||
#endif // SUPPORT_SRC_UTILS_UTILS_H_
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
#include "../utils/json.hpp"
|
||||
#include "../src/utils/json.hpp"
|
||||
|
||||
#include <fstream>
|
||||
#include <iostream>
|
Loading…
Add table
Add a link
Reference in a new issue