mirror of
https://github.com/blakeblackshear/frigate.git
synced 2025-03-22 00:17:13 +01:00
ROCm AMD/GPU based build and detector, WIP
This commit is contained in:
parent
487c626e00
commit
42f1168898
88
docker/rocm/Dockerfile
Normal file
88
docker/rocm/Dockerfile
Normal file
@ -0,0 +1,88 @@
|
|||||||
|
# syntax=docker/dockerfile:1.4
|
||||||
|
|
||||||
|
# https://askubuntu.com/questions/972516/debian-frontend-environment-variable
|
||||||
|
ARG DEBIAN_FRONTEND=noninteractive
|
||||||
|
ARG ROCM=5.7.3
|
||||||
|
ARG AMDGPU=gfx900
|
||||||
|
ARG HSA_OVERRIDE_GFX_VERSION
|
||||||
|
|
||||||
|
#######################################################################
|
||||||
|
FROM ubuntu:focal as rocm
|
||||||
|
|
||||||
|
RUN apt-get update && apt-get -y upgrade
|
||||||
|
RUN apt-get -y install gnupg wget
|
||||||
|
|
||||||
|
RUN mkdir --parents --mode=0755 /etc/apt/keyrings
|
||||||
|
|
||||||
|
RUN wget https://repo.radeon.com/rocm/rocm.gpg.key -O - | gpg --dearmor | tee /etc/apt/keyrings/rocm.gpg > /dev/null
|
||||||
|
COPY docker/rocm/rocm.list /etc/apt/sources.list.d/
|
||||||
|
COPY docker/rocm/rocm-pin-600 /etc/apt/preferences.d/
|
||||||
|
|
||||||
|
RUN apt-get update
|
||||||
|
|
||||||
|
RUN apt-get -y install --no-install-recommends migraphx
|
||||||
|
RUN apt-get -y install --no-install-recommends migraphx-dev
|
||||||
|
|
||||||
|
#######################################################################
|
||||||
|
FROM --platform=linux/amd64 debian:11 as debian-base
|
||||||
|
|
||||||
|
RUN apt-get update && apt-get -y upgrade
|
||||||
|
RUN apt-get -y install --no-install-recommends libelf1 libdrm2 libdrm-amdgpu1 libnuma1 kmod
|
||||||
|
|
||||||
|
RUN apt-get -y install python3
|
||||||
|
|
||||||
|
#######################################################################
|
||||||
|
FROM debian-base as debian-build
|
||||||
|
|
||||||
|
ARG ROCM
|
||||||
|
|
||||||
|
COPY --from=rocm /opt/rocm-$ROCM /opt/rocm-$ROCM
|
||||||
|
RUN ln -s /opt/rocm-$ROCM /opt/rocm
|
||||||
|
|
||||||
|
RUN apt-get -y install g++ cmake
|
||||||
|
RUN apt-get -y install python3-pybind11 python3.9-distutils python3-dev
|
||||||
|
|
||||||
|
WORKDIR /opt/build
|
||||||
|
|
||||||
|
COPY docker/rocm/migraphx .
|
||||||
|
|
||||||
|
RUN mkdir build && cd build && cmake .. && make install
|
||||||
|
|
||||||
|
#######################################################################
|
||||||
|
FROM deps AS rocm-deps
|
||||||
|
|
||||||
|
ARG ROCM
|
||||||
|
ARG AMDGPU
|
||||||
|
ARG HSA_OVERRIDE_GFX_VERSION
|
||||||
|
|
||||||
|
RUN apt-get update
|
||||||
|
# no ugprade?!?!
|
||||||
|
RUN apt-get -y install libnuma1
|
||||||
|
|
||||||
|
RUN mkdir -p /opt/rocm-$ROCM
|
||||||
|
|
||||||
|
# Docker does not copy symbolic links so have to resort to tar
|
||||||
|
RUN --mount=from=rocm,src=/opt/rocm-$ROCM,dst=/opt/rocm-copy cd /opt/rocm-copy && tar cf - lib/libMIOpen*.so* lib/libamd*.so* lib/libhip*.so* lib/libhsa*.so* lib/libmigraphx*.so* lib/librocm*.so* lib/librocblas*.so* | (cd /opt/rocm-$ROCM/ && tar xf -)
|
||||||
|
#COPY --from=rocm /opt/rocm-$ROCM/lib/libMIOpen*.so* /opt/rocm-$ROCM/lib/libamd*.so* /opt/rocm-$ROCM/lib/libhip*.so* /opt/rocm-$ROCM/lib/libhsa*.so* /opt/rocm-$ROCM/lib/libmigraphx*.so* /opt/rocm-$ROCM/lib/librocm*.so* /opt/rocm-$ROCM/lib/librocblas*.so* /opt/rocm-$ROCM/lib/
|
||||||
|
|
||||||
|
COPY --from=rocm /opt/rocm-$ROCM/bin/rocminfo /opt/rocm-$ROCM/bin/migraphx-driver /opt/rocm-$ROCM/bin/
|
||||||
|
COPY --from=rocm /opt/rocm-$ROCM/share/miopen/db/*$AMDGPU* /opt/rocm-$ROCM/share/miopen/db/
|
||||||
|
COPY --from=rocm /opt/rocm-$ROCM/lib/rocblas/library/*$AMDGPU* /opt/rocm-$ROCM/lib/rocblas/library/
|
||||||
|
|
||||||
|
COPY --from=debian-build /opt/rocm/lib/migraphx.cpython-39-x86_64-linux-gnu.so /opt/rocm-$ROCM/lib/
|
||||||
|
|
||||||
|
RUN ln -s /opt/rocm-$ROCM /opt/rocm
|
||||||
|
|
||||||
|
WORKDIR /opt/frigate/
|
||||||
|
COPY --from=rootfs / /
|
||||||
|
|
||||||
|
ENV HSA_ENABLE_SDMA=0
|
||||||
|
ENV HSA_OVERRIDE_GFX_VERSION=$HSA_OVERRIDE_GFX_VERSION
|
||||||
|
|
||||||
|
ADD https://github.com/harakas/models/raw/main/ultralytics/yolov8.1/yolov8n_320x320.onnx /
|
||||||
|
ADD https://github.com/harakas/models/raw/main/ultralytics/yolov8.1/yolov8n_labels.txt /
|
||||||
|
ADD https://github.com/harakas/models/raw/main/ultralytics/yolov8.1/yolov8s_320x320.onnx /
|
||||||
|
ADD https://github.com/harakas/models/raw/main/ultralytics/yolov8.1/yolov8s_labels.txt /
|
||||||
|
ADD https://github.com/harakas/models/raw/main/ultralytics/yolov8.1/yolov8m_320x320.onnx /
|
||||||
|
ADD https://github.com/harakas/models/raw/main/ultralytics/yolov8.1/yolov8m_labels.txt /
|
||||||
|
|
26
docker/rocm/migraphx/CMakeLists.txt
Normal file
26
docker/rocm/migraphx/CMakeLists.txt
Normal file
@ -0,0 +1,26 @@
|
|||||||
|
|
||||||
|
cmake_minimum_required(VERSION 3.1)
|
||||||
|
|
||||||
|
set(CMAKE_CXX_STANDARD 17)
|
||||||
|
set(CMAKE_CXX_STANDARD_REQUIRED ON)
|
||||||
|
set(CMAKE_CXX_EXTENSIONS OFF)
|
||||||
|
|
||||||
|
if(NOT CMAKE_BUILD_TYPE)
|
||||||
|
set(CMAKE_BUILD_TYPE Release)
|
||||||
|
endif()
|
||||||
|
|
||||||
|
SET(CMAKE_INSTALL_RPATH_USE_LINK_PATH TRUE)
|
||||||
|
|
||||||
|
project(migraphx_py)
|
||||||
|
|
||||||
|
include_directories(/opt/rocm/include)
|
||||||
|
|
||||||
|
find_package(pybind11 REQUIRED)
|
||||||
|
pybind11_add_module(migraphx migraphx_py.cpp)
|
||||||
|
|
||||||
|
target_link_libraries(migraphx PRIVATE /opt/rocm/lib/libmigraphx.so /opt/rocm/lib/libmigraphx_tf.so /opt/rocm/lib/libmigraphx_onnx.so)
|
||||||
|
|
||||||
|
install(TARGETS migraphx
|
||||||
|
COMPONENT python
|
||||||
|
LIBRARY DESTINATION /opt/rocm/lib
|
||||||
|
)
|
582
docker/rocm/migraphx/migraphx_py.cpp
Normal file
582
docker/rocm/migraphx/migraphx_py.cpp
Normal file
@ -0,0 +1,582 @@
|
|||||||
|
/*
|
||||||
|
* The MIT License (MIT)
|
||||||
|
*
|
||||||
|
* Copyright (c) 2015-2022 Advanced Micro Devices, Inc. All rights reserved.
|
||||||
|
*
|
||||||
|
* Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||||
|
* of this software and associated documentation files (the "Software"), to deal
|
||||||
|
* in the Software without restriction, including without limitation the rights
|
||||||
|
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||||
|
* copies of the Software, and to permit persons to whom the Software is
|
||||||
|
* furnished to do so, subject to the following conditions:
|
||||||
|
*
|
||||||
|
* The above copyright notice and this permission notice shall be included in
|
||||||
|
* all copies or substantial portions of the Software.
|
||||||
|
*
|
||||||
|
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||||
|
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||||
|
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||||
|
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||||
|
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||||
|
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||||
|
* THE SOFTWARE.
|
||||||
|
*/
|
||||||
|
|
||||||
|
#include <pybind11/pybind11.h>
|
||||||
|
#include <pybind11/stl.h>
|
||||||
|
#include <pybind11/numpy.h>
|
||||||
|
#include <migraphx/program.hpp>
|
||||||
|
#include <migraphx/instruction_ref.hpp>
|
||||||
|
#include <migraphx/operation.hpp>
|
||||||
|
#include <migraphx/quantization.hpp>
|
||||||
|
#include <migraphx/generate.hpp>
|
||||||
|
#include <migraphx/instruction.hpp>
|
||||||
|
#include <migraphx/ref/target.hpp>
|
||||||
|
#include <migraphx/stringutils.hpp>
|
||||||
|
#include <migraphx/tf.hpp>
|
||||||
|
#include <migraphx/onnx.hpp>
|
||||||
|
#include <migraphx/load_save.hpp>
|
||||||
|
#include <migraphx/register_target.hpp>
|
||||||
|
#include <migraphx/json.hpp>
|
||||||
|
#include <migraphx/make_op.hpp>
|
||||||
|
#include <migraphx/op/common.hpp>
|
||||||
|
|
||||||
|
#ifdef HAVE_GPU
|
||||||
|
#include <migraphx/gpu/hip.hpp>
|
||||||
|
#endif
|
||||||
|
|
||||||
|
using half = half_float::half;
|
||||||
|
namespace py = pybind11;
|
||||||
|
|
||||||
|
#ifdef __clang__
|
||||||
|
#define MIGRAPHX_PUSH_UNUSED_WARNING \
|
||||||
|
_Pragma("clang diagnostic push") \
|
||||||
|
_Pragma("clang diagnostic ignored \"-Wused-but-marked-unused\"")
|
||||||
|
#define MIGRAPHX_POP_WARNING _Pragma("clang diagnostic pop")
|
||||||
|
#else
|
||||||
|
#define MIGRAPHX_PUSH_UNUSED_WARNING
|
||||||
|
#define MIGRAPHX_POP_WARNING
|
||||||
|
#endif
|
||||||
|
#define MIGRAPHX_PYBIND11_MODULE(...) \
|
||||||
|
MIGRAPHX_PUSH_UNUSED_WARNING \
|
||||||
|
PYBIND11_MODULE(__VA_ARGS__) \
|
||||||
|
MIGRAPHX_POP_WARNING
|
||||||
|
|
||||||
|
#define MIGRAPHX_PYTHON_GENERATE_SHAPE_ENUM(x, t) .value(#x, migraphx::shape::type_t::x)
|
||||||
|
namespace migraphx {
|
||||||
|
|
||||||
|
migraphx::value to_value(py::kwargs kwargs);
|
||||||
|
migraphx::value to_value(py::list lst);
|
||||||
|
|
||||||
|
template <class T, class F>
|
||||||
|
void visit_py(T x, F f)
|
||||||
|
{
|
||||||
|
if(py::isinstance<py::kwargs>(x))
|
||||||
|
{
|
||||||
|
f(to_value(x.template cast<py::kwargs>()));
|
||||||
|
}
|
||||||
|
else if(py::isinstance<py::list>(x))
|
||||||
|
{
|
||||||
|
f(to_value(x.template cast<py::list>()));
|
||||||
|
}
|
||||||
|
else if(py::isinstance<py::bool_>(x))
|
||||||
|
{
|
||||||
|
f(x.template cast<bool>());
|
||||||
|
}
|
||||||
|
else if(py::isinstance<py::int_>(x) or py::hasattr(x, "__index__"))
|
||||||
|
{
|
||||||
|
f(x.template cast<int>());
|
||||||
|
}
|
||||||
|
else if(py::isinstance<py::float_>(x))
|
||||||
|
{
|
||||||
|
f(x.template cast<float>());
|
||||||
|
}
|
||||||
|
else if(py::isinstance<py::str>(x))
|
||||||
|
{
|
||||||
|
f(x.template cast<std::string>());
|
||||||
|
}
|
||||||
|
else if(py::isinstance<migraphx::shape::dynamic_dimension>(x))
|
||||||
|
{
|
||||||
|
f(migraphx::to_value(x.template cast<migraphx::shape::dynamic_dimension>()));
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
MIGRAPHX_THROW("VISIT_PY: Unsupported data type!");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
migraphx::value to_value(py::list lst)
|
||||||
|
{
|
||||||
|
migraphx::value v = migraphx::value::array{};
|
||||||
|
for(auto val : lst)
|
||||||
|
{
|
||||||
|
visit_py(val, [&](auto py_val) { v.push_back(py_val); });
|
||||||
|
}
|
||||||
|
|
||||||
|
return v;
|
||||||
|
}
|
||||||
|
|
||||||
|
migraphx::value to_value(py::kwargs kwargs)
|
||||||
|
{
|
||||||
|
migraphx::value v = migraphx::value::object{};
|
||||||
|
|
||||||
|
for(auto arg : kwargs)
|
||||||
|
{
|
||||||
|
auto&& key = py::str(arg.first);
|
||||||
|
auto&& val = arg.second;
|
||||||
|
visit_py(val, [&](auto py_val) { v[key] = py_val; });
|
||||||
|
}
|
||||||
|
return v;
|
||||||
|
}
|
||||||
|
} // namespace migraphx
|
||||||
|
|
||||||
|
namespace pybind11 {
|
||||||
|
namespace detail {
|
||||||
|
|
||||||
|
template <>
|
||||||
|
struct npy_format_descriptor<half>
|
||||||
|
{
|
||||||
|
static std::string format()
|
||||||
|
{
|
||||||
|
// following: https://docs.python.org/3/library/struct.html#format-characters
|
||||||
|
return "e";
|
||||||
|
}
|
||||||
|
static constexpr auto name() { return _("half"); }
|
||||||
|
};
|
||||||
|
|
||||||
|
} // namespace detail
|
||||||
|
} // namespace pybind11
|
||||||
|
|
||||||
|
template <class F>
|
||||||
|
void visit_type(const migraphx::shape& s, F f)
|
||||||
|
{
|
||||||
|
s.visit_type(f);
|
||||||
|
}
|
||||||
|
|
||||||
|
template <class T, class F>
|
||||||
|
void visit(const migraphx::raw_data<T>& x, F f)
|
||||||
|
{
|
||||||
|
x.visit(f);
|
||||||
|
}
|
||||||
|
|
||||||
|
template <class F>
|
||||||
|
void visit_types(F f)
|
||||||
|
{
|
||||||
|
migraphx::shape::visit_types(f);
|
||||||
|
}
|
||||||
|
|
||||||
|
template <class T>
|
||||||
|
py::buffer_info to_buffer_info(T& x)
|
||||||
|
{
|
||||||
|
migraphx::shape s = x.get_shape();
|
||||||
|
assert(s.type() != migraphx::shape::tuple_type);
|
||||||
|
if(s.dynamic())
|
||||||
|
MIGRAPHX_THROW("MIGRAPHX PYTHON: dynamic shape argument passed to to_buffer_info");
|
||||||
|
auto strides = s.strides();
|
||||||
|
std::transform(
|
||||||
|
strides.begin(), strides.end(), strides.begin(), [&](auto i) { return i * s.type_size(); });
|
||||||
|
py::buffer_info b;
|
||||||
|
visit_type(s, [&](auto as) {
|
||||||
|
// migraphx use int8_t data to store bool type, we need to
|
||||||
|
// explicitly specify the data type as bool for python
|
||||||
|
if(s.type() == migraphx::shape::bool_type)
|
||||||
|
{
|
||||||
|
b = py::buffer_info(x.data(),
|
||||||
|
as.size(),
|
||||||
|
py::format_descriptor<bool>::format(),
|
||||||
|
s.ndim(),
|
||||||
|
s.lens(),
|
||||||
|
strides);
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
b = py::buffer_info(x.data(),
|
||||||
|
as.size(),
|
||||||
|
py::format_descriptor<decltype(as())>::format(),
|
||||||
|
s.ndim(),
|
||||||
|
s.lens(),
|
||||||
|
strides);
|
||||||
|
}
|
||||||
|
});
|
||||||
|
return b;
|
||||||
|
}
|
||||||
|
|
||||||
|
migraphx::shape to_shape(const py::buffer_info& info)
|
||||||
|
{
|
||||||
|
migraphx::shape::type_t t;
|
||||||
|
std::size_t n = 0;
|
||||||
|
visit_types([&](auto as) {
|
||||||
|
if(info.format == py::format_descriptor<decltype(as())>::format() or
|
||||||
|
(info.format == "l" and py::format_descriptor<decltype(as())>::format() == "q") or
|
||||||
|
(info.format == "L" and py::format_descriptor<decltype(as())>::format() == "Q"))
|
||||||
|
{
|
||||||
|
t = as.type_enum();
|
||||||
|
n = sizeof(as());
|
||||||
|
}
|
||||||
|
else if(info.format == "?" and py::format_descriptor<decltype(as())>::format() == "b")
|
||||||
|
{
|
||||||
|
t = migraphx::shape::bool_type;
|
||||||
|
n = sizeof(bool);
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
if(n == 0)
|
||||||
|
{
|
||||||
|
MIGRAPHX_THROW("MIGRAPHX PYTHON: Unsupported data type " + info.format);
|
||||||
|
}
|
||||||
|
|
||||||
|
auto strides = info.strides;
|
||||||
|
std::transform(strides.begin(), strides.end(), strides.begin(), [&](auto i) -> std::size_t {
|
||||||
|
return n > 0 ? i / n : 0;
|
||||||
|
});
|
||||||
|
|
||||||
|
// scalar support
|
||||||
|
if(info.shape.empty())
|
||||||
|
{
|
||||||
|
return migraphx::shape{t};
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
return migraphx::shape{t, info.shape, strides};
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
MIGRAPHX_PYBIND11_MODULE(migraphx, m)
|
||||||
|
{
|
||||||
|
py::class_<migraphx::shape> shape_cls(m, "shape");
|
||||||
|
shape_cls
|
||||||
|
.def(py::init([](py::kwargs kwargs) {
|
||||||
|
auto v = migraphx::to_value(kwargs);
|
||||||
|
auto t = migraphx::shape::parse_type(v.get("type", "float"));
|
||||||
|
if(v.contains("dyn_dims"))
|
||||||
|
{
|
||||||
|
auto dyn_dims =
|
||||||
|
migraphx::from_value<std::vector<migraphx::shape::dynamic_dimension>>(
|
||||||
|
v.at("dyn_dims"));
|
||||||
|
return migraphx::shape(t, dyn_dims);
|
||||||
|
}
|
||||||
|
auto lens = v.get<std::size_t>("lens", {1});
|
||||||
|
if(v.contains("strides"))
|
||||||
|
return migraphx::shape(t, lens, v.at("strides").to_vector<std::size_t>());
|
||||||
|
else
|
||||||
|
return migraphx::shape(t, lens);
|
||||||
|
}))
|
||||||
|
.def("type", &migraphx::shape::type)
|
||||||
|
.def("lens", &migraphx::shape::lens)
|
||||||
|
.def("strides", &migraphx::shape::strides)
|
||||||
|
.def("ndim", &migraphx::shape::ndim)
|
||||||
|
.def("elements", &migraphx::shape::elements)
|
||||||
|
.def("bytes", &migraphx::shape::bytes)
|
||||||
|
.def("type_string", &migraphx::shape::type_string)
|
||||||
|
.def("type_size", &migraphx::shape::type_size)
|
||||||
|
.def("dyn_dims", &migraphx::shape::dyn_dims)
|
||||||
|
.def("packed", &migraphx::shape::packed)
|
||||||
|
.def("transposed", &migraphx::shape::transposed)
|
||||||
|
.def("broadcasted", &migraphx::shape::broadcasted)
|
||||||
|
.def("standard", &migraphx::shape::standard)
|
||||||
|
.def("scalar", &migraphx::shape::scalar)
|
||||||
|
.def("dynamic", &migraphx::shape::dynamic)
|
||||||
|
.def("__eq__", std::equal_to<migraphx::shape>{})
|
||||||
|
.def("__ne__", std::not_equal_to<migraphx::shape>{})
|
||||||
|
.def("__repr__", [](const migraphx::shape& s) { return migraphx::to_string(s); });
|
||||||
|
|
||||||
|
py::enum_<migraphx::shape::type_t>(shape_cls, "type_t")
|
||||||
|
MIGRAPHX_SHAPE_VISIT_TYPES(MIGRAPHX_PYTHON_GENERATE_SHAPE_ENUM);
|
||||||
|
|
||||||
|
py::class_<migraphx::shape::dynamic_dimension>(shape_cls, "dynamic_dimension")
|
||||||
|
.def(py::init<>())
|
||||||
|
.def(py::init<std::size_t, std::size_t>())
|
||||||
|
.def(py::init<std::size_t, std::size_t, std::set<std::size_t>>())
|
||||||
|
.def_readwrite("min", &migraphx::shape::dynamic_dimension::min)
|
||||||
|
.def_readwrite("max", &migraphx::shape::dynamic_dimension::max)
|
||||||
|
.def_readwrite("optimals", &migraphx::shape::dynamic_dimension::optimals)
|
||||||
|
.def("is_fixed", &migraphx::shape::dynamic_dimension::is_fixed);
|
||||||
|
|
||||||
|
py::class_<migraphx::argument>(m, "argument", py::buffer_protocol())
|
||||||
|
.def_buffer([](migraphx::argument& x) -> py::buffer_info { return to_buffer_info(x); })
|
||||||
|
.def(py::init([](py::buffer b) {
|
||||||
|
py::buffer_info info = b.request();
|
||||||
|
return migraphx::argument(to_shape(info), info.ptr);
|
||||||
|
}))
|
||||||
|
.def("get_shape", &migraphx::argument::get_shape)
|
||||||
|
.def("data_ptr",
|
||||||
|
[](migraphx::argument& x) { return reinterpret_cast<std::uintptr_t>(x.data()); })
|
||||||
|
.def("tolist",
|
||||||
|
[](migraphx::argument& x) {
|
||||||
|
py::list l{x.get_shape().elements()};
|
||||||
|
visit(x, [&](auto data) { l = py::cast(data.to_vector()); });
|
||||||
|
return l;
|
||||||
|
})
|
||||||
|
.def("__eq__", std::equal_to<migraphx::argument>{})
|
||||||
|
.def("__ne__", std::not_equal_to<migraphx::argument>{})
|
||||||
|
.def("__repr__", [](const migraphx::argument& x) { return migraphx::to_string(x); });
|
||||||
|
|
||||||
|
py::class_<migraphx::target>(m, "target");
|
||||||
|
|
||||||
|
py::class_<migraphx::instruction_ref>(m, "instruction_ref")
|
||||||
|
.def("shape", [](migraphx::instruction_ref i) { return i->get_shape(); })
|
||||||
|
.def("op", [](migraphx::instruction_ref i) { return i->get_operator(); });
|
||||||
|
|
||||||
|
py::class_<migraphx::module, std::unique_ptr<migraphx::module, py::nodelete>>(m, "module")
|
||||||
|
.def("print", [](const migraphx::module& mm) { std::cout << mm << std::endl; })
|
||||||
|
.def(
|
||||||
|
"add_instruction",
|
||||||
|
[](migraphx::module& mm,
|
||||||
|
const migraphx::operation& op,
|
||||||
|
std::vector<migraphx::instruction_ref>& args,
|
||||||
|
std::vector<migraphx::module*>& mod_args) {
|
||||||
|
return mm.add_instruction(op, args, mod_args);
|
||||||
|
},
|
||||||
|
py::arg("op"),
|
||||||
|
py::arg("args"),
|
||||||
|
py::arg("mod_args") = std::vector<migraphx::module*>{})
|
||||||
|
.def(
|
||||||
|
"add_literal",
|
||||||
|
[](migraphx::module& mm, py::buffer data) {
|
||||||
|
py::buffer_info info = data.request();
|
||||||
|
auto literal_shape = to_shape(info);
|
||||||
|
return mm.add_literal(literal_shape, reinterpret_cast<char*>(info.ptr));
|
||||||
|
},
|
||||||
|
py::arg("data"))
|
||||||
|
.def(
|
||||||
|
"add_parameter",
|
||||||
|
[](migraphx::module& mm, const std::string& name, const migraphx::shape shape) {
|
||||||
|
return mm.add_parameter(name, shape);
|
||||||
|
},
|
||||||
|
py::arg("name"),
|
||||||
|
py::arg("shape"))
|
||||||
|
.def(
|
||||||
|
"add_return",
|
||||||
|
[](migraphx::module& mm, std::vector<migraphx::instruction_ref>& args) {
|
||||||
|
return mm.add_return(args);
|
||||||
|
},
|
||||||
|
py::arg("args"))
|
||||||
|
.def("__repr__", [](const migraphx::module& mm) { return migraphx::to_string(mm); });
|
||||||
|
|
||||||
|
py::class_<migraphx::program>(m, "program")
|
||||||
|
.def(py::init([]() { return migraphx::program(); }))
|
||||||
|
.def("get_parameter_names", &migraphx::program::get_parameter_names)
|
||||||
|
.def("get_parameter_shapes", &migraphx::program::get_parameter_shapes)
|
||||||
|
.def("get_output_shapes", &migraphx::program::get_output_shapes)
|
||||||
|
.def("is_compiled", &migraphx::program::is_compiled)
|
||||||
|
.def(
|
||||||
|
"compile",
|
||||||
|
[](migraphx::program& p,
|
||||||
|
const migraphx::target& t,
|
||||||
|
bool offload_copy,
|
||||||
|
bool fast_math,
|
||||||
|
bool exhaustive_tune) {
|
||||||
|
migraphx::compile_options options;
|
||||||
|
options.offload_copy = offload_copy;
|
||||||
|
options.fast_math = fast_math;
|
||||||
|
options.exhaustive_tune = exhaustive_tune;
|
||||||
|
p.compile(t, options);
|
||||||
|
},
|
||||||
|
py::arg("t"),
|
||||||
|
py::arg("offload_copy") = true,
|
||||||
|
py::arg("fast_math") = true,
|
||||||
|
py::arg("exhaustive_tune") = false)
|
||||||
|
.def("get_main_module", [](const migraphx::program& p) { return p.get_main_module(); })
|
||||||
|
.def(
|
||||||
|
"create_module",
|
||||||
|
[](migraphx::program& p, const std::string& name) { return p.create_module(name); },
|
||||||
|
py::arg("name"))
|
||||||
|
.def("run",
|
||||||
|
[](migraphx::program& p, py::dict params) {
|
||||||
|
migraphx::parameter_map pm;
|
||||||
|
for(auto x : params)
|
||||||
|
{
|
||||||
|
std::string key = x.first.cast<std::string>();
|
||||||
|
py::buffer b = x.second.cast<py::buffer>();
|
||||||
|
py::buffer_info info = b.request();
|
||||||
|
pm[key] = migraphx::argument(to_shape(info), info.ptr);
|
||||||
|
}
|
||||||
|
return p.eval(pm);
|
||||||
|
})
|
||||||
|
.def("run_async",
|
||||||
|
[](migraphx::program& p,
|
||||||
|
py::dict params,
|
||||||
|
std::uintptr_t stream,
|
||||||
|
std::string stream_name) {
|
||||||
|
migraphx::parameter_map pm;
|
||||||
|
for(auto x : params)
|
||||||
|
{
|
||||||
|
std::string key = x.first.cast<std::string>();
|
||||||
|
py::buffer b = x.second.cast<py::buffer>();
|
||||||
|
py::buffer_info info = b.request();
|
||||||
|
pm[key] = migraphx::argument(to_shape(info), info.ptr);
|
||||||
|
}
|
||||||
|
migraphx::execution_environment exec_env{
|
||||||
|
migraphx::any_ptr(reinterpret_cast<void*>(stream), stream_name), true};
|
||||||
|
return p.eval(pm, exec_env);
|
||||||
|
})
|
||||||
|
.def("sort", &migraphx::program::sort)
|
||||||
|
.def("print", [](const migraphx::program& p) { std::cout << p << std::endl; })
|
||||||
|
.def("__eq__", std::equal_to<migraphx::program>{})
|
||||||
|
.def("__ne__", std::not_equal_to<migraphx::program>{})
|
||||||
|
.def("__repr__", [](const migraphx::program& p) { return migraphx::to_string(p); });
|
||||||
|
|
||||||
|
py::class_<migraphx::operation> op(m, "op");
|
||||||
|
op.def(py::init([](const std::string& name, py::kwargs kwargs) {
|
||||||
|
migraphx::value v = migraphx::value::object{};
|
||||||
|
if(kwargs)
|
||||||
|
{
|
||||||
|
v = migraphx::to_value(kwargs);
|
||||||
|
}
|
||||||
|
return migraphx::make_op(name, v);
|
||||||
|
}))
|
||||||
|
.def("name", &migraphx::operation::name);
|
||||||
|
|
||||||
|
py::enum_<migraphx::op::pooling_mode>(op, "pooling_mode")
|
||||||
|
.value("average", migraphx::op::pooling_mode::average)
|
||||||
|
.value("max", migraphx::op::pooling_mode::max)
|
||||||
|
.value("lpnorm", migraphx::op::pooling_mode::lpnorm);
|
||||||
|
|
||||||
|
py::enum_<migraphx::op::rnn_direction>(op, "rnn_direction")
|
||||||
|
.value("forward", migraphx::op::rnn_direction::forward)
|
||||||
|
.value("reverse", migraphx::op::rnn_direction::reverse)
|
||||||
|
.value("bidirectional", migraphx::op::rnn_direction::bidirectional);
|
||||||
|
|
||||||
|
m.def(
|
||||||
|
"argument_from_pointer",
|
||||||
|
[](const migraphx::shape shape, const int64_t address) {
|
||||||
|
return migraphx::argument(shape, reinterpret_cast<void*>(address));
|
||||||
|
},
|
||||||
|
py::arg("shape"),
|
||||||
|
py::arg("address"));
|
||||||
|
|
||||||
|
m.def(
|
||||||
|
"parse_tf",
|
||||||
|
[](const std::string& filename,
|
||||||
|
bool is_nhwc,
|
||||||
|
unsigned int batch_size,
|
||||||
|
std::unordered_map<std::string, std::vector<std::size_t>> map_input_dims,
|
||||||
|
std::vector<std::string> output_names) {
|
||||||
|
return migraphx::parse_tf(
|
||||||
|
filename, migraphx::tf_options{is_nhwc, batch_size, map_input_dims, output_names});
|
||||||
|
},
|
||||||
|
"Parse tf protobuf (default format is nhwc)",
|
||||||
|
py::arg("filename"),
|
||||||
|
py::arg("is_nhwc") = true,
|
||||||
|
py::arg("batch_size") = 1,
|
||||||
|
py::arg("map_input_dims") = std::unordered_map<std::string, std::vector<std::size_t>>(),
|
||||||
|
py::arg("output_names") = std::vector<std::string>());
|
||||||
|
|
||||||
|
m.def(
|
||||||
|
"parse_onnx",
|
||||||
|
[](const std::string& filename,
|
||||||
|
unsigned int default_dim_value,
|
||||||
|
migraphx::shape::dynamic_dimension default_dyn_dim_value,
|
||||||
|
std::unordered_map<std::string, std::vector<std::size_t>> map_input_dims,
|
||||||
|
std::unordered_map<std::string, std::vector<migraphx::shape::dynamic_dimension>>
|
||||||
|
map_dyn_input_dims,
|
||||||
|
bool skip_unknown_operators,
|
||||||
|
bool print_program_on_error,
|
||||||
|
int64_t max_loop_iterations) {
|
||||||
|
migraphx::onnx_options options;
|
||||||
|
options.default_dim_value = default_dim_value;
|
||||||
|
options.default_dyn_dim_value = default_dyn_dim_value;
|
||||||
|
options.map_input_dims = map_input_dims;
|
||||||
|
options.map_dyn_input_dims = map_dyn_input_dims;
|
||||||
|
options.skip_unknown_operators = skip_unknown_operators;
|
||||||
|
options.print_program_on_error = print_program_on_error;
|
||||||
|
options.max_loop_iterations = max_loop_iterations;
|
||||||
|
return migraphx::parse_onnx(filename, options);
|
||||||
|
},
|
||||||
|
"Parse onnx file",
|
||||||
|
py::arg("filename"),
|
||||||
|
py::arg("default_dim_value") = 0,
|
||||||
|
py::arg("default_dyn_dim_value") = migraphx::shape::dynamic_dimension{1, 1},
|
||||||
|
py::arg("map_input_dims") = std::unordered_map<std::string, std::vector<std::size_t>>(),
|
||||||
|
py::arg("map_dyn_input_dims") =
|
||||||
|
std::unordered_map<std::string, std::vector<migraphx::shape::dynamic_dimension>>(),
|
||||||
|
py::arg("skip_unknown_operators") = false,
|
||||||
|
py::arg("print_program_on_error") = false,
|
||||||
|
py::arg("max_loop_iterations") = 10);
|
||||||
|
|
||||||
|
m.def(
|
||||||
|
"parse_onnx_buffer",
|
||||||
|
[](const std::string& onnx_buffer,
|
||||||
|
unsigned int default_dim_value,
|
||||||
|
migraphx::shape::dynamic_dimension default_dyn_dim_value,
|
||||||
|
std::unordered_map<std::string, std::vector<std::size_t>> map_input_dims,
|
||||||
|
std::unordered_map<std::string, std::vector<migraphx::shape::dynamic_dimension>>
|
||||||
|
map_dyn_input_dims,
|
||||||
|
bool skip_unknown_operators,
|
||||||
|
bool print_program_on_error) {
|
||||||
|
migraphx::onnx_options options;
|
||||||
|
options.default_dim_value = default_dim_value;
|
||||||
|
options.default_dyn_dim_value = default_dyn_dim_value;
|
||||||
|
options.map_input_dims = map_input_dims;
|
||||||
|
options.map_dyn_input_dims = map_dyn_input_dims;
|
||||||
|
options.skip_unknown_operators = skip_unknown_operators;
|
||||||
|
options.print_program_on_error = print_program_on_error;
|
||||||
|
return migraphx::parse_onnx_buffer(onnx_buffer, options);
|
||||||
|
},
|
||||||
|
"Parse onnx file",
|
||||||
|
py::arg("filename"),
|
||||||
|
py::arg("default_dim_value") = 0,
|
||||||
|
py::arg("default_dyn_dim_value") = migraphx::shape::dynamic_dimension{1, 1},
|
||||||
|
py::arg("map_input_dims") = std::unordered_map<std::string, std::vector<std::size_t>>(),
|
||||||
|
py::arg("map_dyn_input_dims") =
|
||||||
|
std::unordered_map<std::string, std::vector<migraphx::shape::dynamic_dimension>>(),
|
||||||
|
py::arg("skip_unknown_operators") = false,
|
||||||
|
py::arg("print_program_on_error") = false);
|
||||||
|
|
||||||
|
m.def(
|
||||||
|
"load",
|
||||||
|
[](const std::string& name, const std::string& format) {
|
||||||
|
migraphx::file_options options;
|
||||||
|
options.format = format;
|
||||||
|
return migraphx::load(name, options);
|
||||||
|
},
|
||||||
|
"Load MIGraphX program",
|
||||||
|
py::arg("filename"),
|
||||||
|
py::arg("format") = "msgpack");
|
||||||
|
|
||||||
|
m.def(
|
||||||
|
"save",
|
||||||
|
[](const migraphx::program& p, const std::string& name, const std::string& format) {
|
||||||
|
migraphx::file_options options;
|
||||||
|
options.format = format;
|
||||||
|
return migraphx::save(p, name, options);
|
||||||
|
},
|
||||||
|
"Save MIGraphX program",
|
||||||
|
py::arg("p"),
|
||||||
|
py::arg("filename"),
|
||||||
|
py::arg("format") = "msgpack");
|
||||||
|
|
||||||
|
m.def("get_target", &migraphx::make_target);
|
||||||
|
m.def("create_argument", [](const migraphx::shape& s, const std::vector<double>& values) {
|
||||||
|
if(values.size() != s.elements())
|
||||||
|
MIGRAPHX_THROW("Values and shape elements do not match");
|
||||||
|
migraphx::argument a{s};
|
||||||
|
a.fill(values.begin(), values.end());
|
||||||
|
return a;
|
||||||
|
});
|
||||||
|
m.def("generate_argument", &migraphx::generate_argument, py::arg("s"), py::arg("seed") = 0);
|
||||||
|
m.def("fill_argument", &migraphx::fill_argument, py::arg("s"), py::arg("value"));
|
||||||
|
m.def("quantize_fp16",
|
||||||
|
&migraphx::quantize_fp16,
|
||||||
|
py::arg("prog"),
|
||||||
|
py::arg("ins_names") = std::vector<std::string>{"all"});
|
||||||
|
m.def("quantize_int8",
|
||||||
|
&migraphx::quantize_int8,
|
||||||
|
py::arg("prog"),
|
||||||
|
py::arg("t"),
|
||||||
|
py::arg("calibration") = std::vector<migraphx::parameter_map>{},
|
||||||
|
py::arg("ins_names") = std::vector<std::string>{"dot", "convolution"});
|
||||||
|
|
||||||
|
#ifdef HAVE_GPU
|
||||||
|
m.def("allocate_gpu", &migraphx::gpu::allocate_gpu, py::arg("s"), py::arg("host") = false);
|
||||||
|
m.def("to_gpu", &migraphx::gpu::to_gpu, py::arg("arg"), py::arg("host") = false);
|
||||||
|
m.def("from_gpu", &migraphx::gpu::from_gpu);
|
||||||
|
m.def("gpu_sync", [] { migraphx::gpu::gpu_sync(); });
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#ifdef VERSION_INFO
|
||||||
|
m.attr("__version__") = VERSION_INFO;
|
||||||
|
#else
|
||||||
|
m.attr("__version__") = "dev";
|
||||||
|
#endif
|
||||||
|
}
|
3
docker/rocm/rocm-pin-600
Normal file
3
docker/rocm/rocm-pin-600
Normal file
@ -0,0 +1,3 @@
|
|||||||
|
Package: *
|
||||||
|
Pin: release o=repo.radeon.com
|
||||||
|
Pin-Priority: 600
|
34
docker/rocm/rocm.hcl
Normal file
34
docker/rocm/rocm.hcl
Normal file
@ -0,0 +1,34 @@
|
|||||||
|
variable "AMDGPU" {
|
||||||
|
default = "gfx900"
|
||||||
|
}
|
||||||
|
variable "ROCM" {
|
||||||
|
default = "5.7.3"
|
||||||
|
}
|
||||||
|
variable "HSA_OVERRIDE_GFX_VERSION" {
|
||||||
|
default = ""
|
||||||
|
}
|
||||||
|
target deps {
|
||||||
|
dockerfile = "docker/main/Dockerfile"
|
||||||
|
platforms = ["linux/amd64"]
|
||||||
|
target = "deps"
|
||||||
|
}
|
||||||
|
|
||||||
|
target rootfs {
|
||||||
|
dockerfile = "docker/main/Dockerfile"
|
||||||
|
platforms = ["linux/amd64"]
|
||||||
|
target = "rootfs"
|
||||||
|
}
|
||||||
|
|
||||||
|
target rocm {
|
||||||
|
dockerfile = "docker/rocm/Dockerfile"
|
||||||
|
contexts = {
|
||||||
|
deps = "target:deps",
|
||||||
|
rootfs = "target:rootfs"
|
||||||
|
}
|
||||||
|
platforms = ["linux/amd64"]
|
||||||
|
args = {
|
||||||
|
AMDGPU = AMDGPU,
|
||||||
|
ROCM = ROCM,
|
||||||
|
HSA_OVERRIDE_GFX_VERSION = HSA_OVERRIDE_GFX_VERSION
|
||||||
|
}
|
||||||
|
}
|
1
docker/rocm/rocm.list
Normal file
1
docker/rocm/rocm.list
Normal file
@ -0,0 +1 @@
|
|||||||
|
deb [arch=amd64 signed-by=/etc/apt/keyrings/rocm.gpg] https://repo.radeon.com/rocm/apt/5.7.3 focal main
|
17
docker/rocm/rocm.mk
Normal file
17
docker/rocm/rocm.mk
Normal file
@ -0,0 +1,17 @@
|
|||||||
|
BOARDS += rocm
|
||||||
|
|
||||||
|
# AMD/ROCm is chunky so we build couple of smaller images for specific chipsets
|
||||||
|
ROCM_CHIPSETS:=gfx900:9.0.0 gfx1030:10.3.0
|
||||||
|
|
||||||
|
local-rocm: version
|
||||||
|
$(foreach chipset,$(ROCM_CHIPSETS),AMDGPU=$(word 1,$(subst :, ,$(chipset))) HSA_OVERRIDE_GFX_VERSION=$(word 2,$(subst :, ,$(chipset))) docker buildx bake --load --file=docker/rocm/rocm.hcl --set rocm.tags=frigate:latest-rocm-$(word 1,$(subst :, ,$(chipset))) rocm;)
|
||||||
|
AMDGPU=gfx docker buildx bake --load --file=docker/rocm/rocm.hcl --set rocm.tags=frigate:latest-rocm rocm
|
||||||
|
|
||||||
|
build-rocm: version
|
||||||
|
$(foreach chipset,$(ROCM_CHIPSETS),AMDGPU=$(chipset) docker buildx bake --file=docker/rocm/rocm.hcl --set rocm.tags=$(IMAGE_REPO):${GITHUB_REF_NAME}-$(COMMIT_HASH)-rocm-$(chipset) rocm;)
|
||||||
|
AMDGPU=gfx docker buildx bake --file=docker/rocm/rocm.hcl --set rocm.tags=$(IMAGE_REPO):${GITHUB_REF_NAME}-$(COMMIT_HASH)-rocm rocm
|
||||||
|
|
||||||
|
push-rocm: build-rocm
|
||||||
|
$(foreach chipset,$(ROCM_CHIPSETS),AMDGPU=$(chipset) docker buildx bake --push --file=docker/rocm/rocm.hcl --set rocm.tags=$(IMAGE_REPO):${GITHUB_REF_NAME}-$(COMMIT_HASH)-rocm-$(chipset) rocm;)
|
||||||
|
AMDGPU=gfx docker buildx bake --push --file=docker/rocm/rocm.hcl --set rocm.tags=$(IMAGE_REPO):${GITHUB_REF_NAME}-$(COMMIT_HASH)-rocm rocm
|
||||||
|
|
94
frigate/detectors/plugins/rocm.py
Normal file
94
frigate/detectors/plugins/rocm.py
Normal file
@ -0,0 +1,94 @@
|
|||||||
|
import logging
|
||||||
|
|
||||||
|
import sys
|
||||||
|
import os
|
||||||
|
import numpy as np
|
||||||
|
import ctypes
|
||||||
|
from pydantic import Field
|
||||||
|
from typing_extensions import Literal
|
||||||
|
import glob
|
||||||
|
|
||||||
|
from frigate.detectors.detection_api import DetectionApi
|
||||||
|
from frigate.detectors.detector_config import BaseDetectorConfig
|
||||||
|
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
DETECTOR_KEY = "rocm"
|
||||||
|
|
||||||
|
class ROCmDetectorConfig(BaseDetectorConfig):
|
||||||
|
type: Literal[DETECTOR_KEY]
|
||||||
|
|
||||||
|
class ROCmDetector(DetectionApi):
|
||||||
|
type_key = DETECTOR_KEY
|
||||||
|
|
||||||
|
def __init__(self, detector_config: ROCmDetectorConfig):
|
||||||
|
try:
|
||||||
|
sys.path.append('/opt/rocm/lib')
|
||||||
|
import migraphx
|
||||||
|
|
||||||
|
logger.info(f"AMD/ROCm: loaded migraphx module")
|
||||||
|
except ValueError:
|
||||||
|
logger.error(
|
||||||
|
"AMD/ROCm: module loading failed, missing ROCm environment?"
|
||||||
|
)
|
||||||
|
raise
|
||||||
|
|
||||||
|
assert detector_config.model.path is not None, "No model.path configured, please configure model.path and model.labelmap_path; some suggestions: " + ', '.join(glob.glob("/*.onnx")) + " and " + ', '.join(glob.glob("/*_labels.txt"))
|
||||||
|
path = detector_config.model.path
|
||||||
|
os.makedirs("/config/model_cache/rocm", exist_ok=True)
|
||||||
|
mxr_path = "/config/model_cache/rocm/" + os.path.basename(os.path.splitext(path)[0] + '.mxr')
|
||||||
|
if os.path.exists(mxr_path):
|
||||||
|
logger.info(f"AMD/ROCm: loading parsed model from {mxr_path}")
|
||||||
|
self.model = migraphx.load(mxr_path)
|
||||||
|
else:
|
||||||
|
logger.info(f"AMD/ROCm: loading model from {path}")
|
||||||
|
if path.endswith('.onnx'):
|
||||||
|
self.model = migraphx.parse_onnx(path)
|
||||||
|
elif path.endswith('.tf') or path.endswith('.tf2') or path.endswith('.tflite'):
|
||||||
|
self.model = migraphx.parse_tf(path)
|
||||||
|
else:
|
||||||
|
raise Exception(f'AMD/ROCm: unkown model format {path}')
|
||||||
|
logger.info(f"AMD/ROCm: compiling the model")
|
||||||
|
self.model.compile(migraphx.get_target('gpu'), offload_copy=True, fast_math=True)
|
||||||
|
logger.info(f"AMD/ROCm: saving parsed model into {mxr_path}")
|
||||||
|
migraphx.save(self.model, mxr_path)
|
||||||
|
logger.info(f"AMD/ROCm: model loaded")
|
||||||
|
|
||||||
|
def detect_raw(self, tensor_input):
|
||||||
|
model_input_name = self.model.get_parameter_names()[0];
|
||||||
|
model_input_shape = tuple(self.model.get_parameter_shapes()[model_input_name].lens());
|
||||||
|
|
||||||
|
# adapt to nchw/nhwc shape dynamically
|
||||||
|
if (tensor_input.shape[0], tensor_input.shape[3], tensor_input.shape[1], tensor_input.shape[2]) == model_input_shape:
|
||||||
|
tensor_input = np.transpose(tensor_input, (0, 3, 1, 2))
|
||||||
|
|
||||||
|
assert tensor_input.shape == model_input_shape, f"invalid shapes for input ({tensor_input.shape}) and model ({model_input_shape}):"
|
||||||
|
|
||||||
|
tensor_input = (1 / 255.0) * np.ascontiguousarray(tensor_input, dtype=np.float32)
|
||||||
|
|
||||||
|
detector_result = self.model.run({model_input_name: tensor_input})[0]
|
||||||
|
|
||||||
|
addr = ctypes.cast(detector_result.data_ptr(), ctypes.POINTER(ctypes.c_float))
|
||||||
|
npr = np.ctypeslib.as_array(addr, shape=detector_result.get_shape().lens())
|
||||||
|
|
||||||
|
model_box_count = npr.shape[2]
|
||||||
|
model_class_count = npr.shape[1] - 4
|
||||||
|
|
||||||
|
probs = npr[0, 4:, :]
|
||||||
|
all_ids = np.argmax(probs, axis=0)
|
||||||
|
all_confidences = np.take(probs.T, model_class_count*np.arange(0, model_box_count) + all_ids)
|
||||||
|
all_boxes = npr[0, 0:4, :].T
|
||||||
|
mask = (all_confidences > 0.25)
|
||||||
|
class_ids = all_ids[mask]
|
||||||
|
confidences = all_confidences[mask]
|
||||||
|
cx, cy, w, h = all_boxes[mask].T
|
||||||
|
|
||||||
|
detections = np.stack((class_ids, confidences, cx - w / 2, cy - h / 2, cx + w / 2, cy + h / 2), axis=1)
|
||||||
|
if detections.shape[0] > 20:
|
||||||
|
logger.warn(f'Found {detections.shape[0]} boxes, discarding last {detections.shape[0] - 20} entries to limit to 20')
|
||||||
|
# keep best confidences
|
||||||
|
detections = detections[detections[:,1].argsort()[::-1]]
|
||||||
|
detections.resize((20, 6))
|
||||||
|
|
||||||
|
return detections
|
||||||
|
|
Loading…
Reference in New Issue
Block a user