Skip to content

Commit b935290

Browse files
authored
[DLStreamer] Remove deprecated VPUX (#1528)
1 parent c37b893 commit b935290

File tree

12 files changed

+8
-173
lines changed

12 files changed

+8
-173
lines changed

libraries/dl-streamer/CMakeLists.txt

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -104,7 +104,6 @@ cmake_dependent_option(ENABLE_TESTS "Parameter to enable tests building" ON "UNI
104104
cmake_dependent_option(ENABLE_FUZZING "Parameter to enable fuzzy tests building" OFF "UNIX" OFF)
105105
cmake_dependent_option(ENABLE_RDKAFKA_INSTALLATION "Enables rdkafka installation" OFF "UNIX" OFF)
106106
option(ENABLE_AUDIO_INFERENCE_ELEMENTS "Enables audio inference elements" ON)
107-
option(ENABLE_VPUX "Enables VPUX specific features" OFF)
108107
option(ENABLE_REALSENSE "Parameter to enable RelaseSense plugin compilation" OFF)
109108
option(ENABLE_GENAI "Enables GenAI elements" OFF)
110109
# Enable GenAI by default on Windows.
Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,5 @@
11
/*******************************************************************************
2-
* Copyright (C) 2018-2021 Intel Corporation
2+
* Copyright (C) 2018-2025 Intel Corporation
33
*
44
* SPDX-License-Identifier: MIT
55
******************************************************************************/
@@ -8,7 +8,6 @@
88
#define CONFIG_H
99

1010
#cmakedefine ENABLE_VAAPI
11-
#cmakedefine ENABLE_VPUX
1211
#cmakedefine ENABLE_ITT
1312

1413
#endif

libraries/dl-streamer/include/dlstreamer/gst/mappers/gst_to_cpu.h

Lines changed: 0 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -112,16 +112,6 @@ class MemoryMapperGSTToCPU : public BaseMemoryMapper {
112112
tensors[i] = std::make_shared<CPUTensor>(info.tensors[i], data);
113113
}
114114
auto dst = new BaseFrame(info.media_type, info.format, tensors);
115-
116-
#ifdef ENABLE_VPUX // also get DMA FD
117-
GstMemory *mem = gst_buffer_peek_memory(src->gst_buffer(), 0);
118-
if (!mem)
119-
throw std::runtime_error("Failed to get GstBuffer memory");
120-
if (gst_is_dmabuf_memory(mem)) {
121-
int dma_fd = gst_dmabuf_memory_get_fd(mem.get());
122-
set_handle("dma_fd", dma_fd);
123-
}
124-
#endif
125115
auto deleter = [frame_ptr](BaseFrame *dst) {
126116
gst_video_frame_unmap(frame_ptr.get());
127117
delete dst;

libraries/dl-streamer/src/monolithic/gst/audio_inference_elements/base/gva_audio_base_inference.c

Lines changed: 1 addition & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,5 @@
11
/*******************************************************************************
2-
* Copyright (C) 2018-2024 Intel Corporation
2+
* Copyright (C) 2018-2025 Intel Corporation
33
*
44
* SPDX-License-Identifier: MIT
55
******************************************************************************/
@@ -13,7 +13,6 @@
1313
#define DEFAULT_SLIDING_WINDOW 1
1414
#define DEFAULT_THRESHOLD 0.5
1515
#define DEFAULT_DEVICE "CPU"
16-
#define DEFAULT_DMA_FD 0
1716

1817
enum { PROP_0, PROP_MODEL, PROP_MODEL_PROC, PROP_SLIDING_WINDOW, PROP_THRESHOLD, PROP_DEVICE };
1918

@@ -41,7 +40,6 @@ static void gva_audio_base_inference_init(GvaAudioBaseInference *audio_base_infe
4140
audio_base_inference->threshold = DEFAULT_THRESHOLD;
4241
audio_base_inference->device = g_strdup(DEFAULT_DEVICE);
4342
audio_base_inference->values_checked = FALSE;
44-
audio_base_inference->dma_fd = DEFAULT_DMA_FD;
4543
}
4644

4745
static void gva_audio_base_inference_class_init(GvaAudioBaseInferenceClass *klass) {

libraries/dl-streamer/src/monolithic/gst/audio_inference_elements/base/gva_audio_base_inference.h

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -29,7 +29,6 @@ typedef struct _GvaAudioBaseInference {
2929
gchar *device;
3030

3131
// other fields
32-
int dma_fd; // used if VPUX remote blob enabled
3332
gboolean values_checked;
3433
guint sample_length;
3534
// smart pointers cannot be used because of mixed c and c++ code

libraries/dl-streamer/src/monolithic/gst/audio_inference_elements/base/processor.cpp

Lines changed: 2 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -132,17 +132,6 @@ GstFlowReturn infer_audio(GvaAudioBaseInference *audio_base_inference, GstBuffer
132132
throw std::runtime_error("Invalid Audio buffer");
133133
auto map_context = std::unique_ptr<GstMapInfo, std::function<void(GstMapInfo *)>>(
134134
&map, [buf](GstMapInfo *map) { gst_buffer_unmap(buf, map); });
135-
#ifdef ENABLE_VPUX
136-
auto mem = GstMemoryUniquePtr(gst_buffer_get_memory(buf, 0), gst_memory_unref);
137-
if (not mem.get())
138-
throw std::runtime_error("Failed to get GstBuffer memory");
139-
if (gst_is_dmabuf_memory(mem.get())) {
140-
int fd = gst_dmabuf_memory_get_fd(mem);
141-
if (fd <= 0)
142-
throw std::runtime_error("Failed to get file desc associated with GstBuffer memory");
143-
audio_base_inference->dma_fd = fd;
144-
}
145-
#endif
146135
auto samples = reinterpret_cast<int16_t *>(map.data);
147136
uint32_t num_samples = map.size / sizeof(int16_t);
148137
check_and_adjust_properties(num_samples, audio_base_inference);
@@ -155,9 +144,9 @@ GstFlowReturn infer_audio(GvaAudioBaseInference *audio_base_inference, GstBuffer
155144
std::vector<float> normalized_samples = audio_base_inference->pre_proc(&frame);
156145
auto normalized_samples_u8 = inf_handle->convertFloatToU8(normalized_samples);
157146
if (normalized_samples_u8.empty())
158-
inf_handle->setInputBlob(normalized_samples.data(), audio_base_inference->dma_fd);
147+
inf_handle->setInputBlob(normalized_samples.data());
159148
else
160-
inf_handle->setInputBlob(normalized_samples_u8.data(), audio_base_inference->dma_fd);
149+
inf_handle->setInputBlob(normalized_samples_u8.data());
161150
inf_handle->infer();
162151
audio_base_inference->post_proc(&frame, inf_handle->getInferenceOutput());
163152
}

libraries/dl-streamer/src/monolithic/gst/common/gva_caps.h

Lines changed: 1 addition & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -25,9 +25,7 @@
2525
#define VASURFACE_CAPS
2626
#endif
2727

28-
#ifdef ENABLE_VPUX
29-
#define DMA_BUFFER_CAPS GST_VIDEO_CAPS_MAKE_WITH_FEATURES(DMABUF_FEATURE_STR, "{ DMA_DRM }") "; "
30-
#elif defined ENABLE_VAAPI
28+
#ifdef ENABLE_VAAPI
3129
#define DMA_BUFFER_CAPS GST_VIDEO_CAPS_MAKE_WITH_FEATURES(DMABUF_FEATURE_STR, "{ DMA_DRM }") "; "
3230
#else
3331
#define DMA_BUFFER_CAPS

libraries/dl-streamer/src/monolithic/gst/inference_elements/base/inference_impl.cpp

Lines changed: 0 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -326,11 +326,7 @@ GetPreferredImagePreproc(CapsFeature caps, const std::vector<ModelInputProcessor
326326
}
327327
break;
328328
case DMA_BUF_CAPS_FEATURE:
329-
#ifdef ENABLE_VPUX
330-
result = ImagePreprocessorType::IE;
331-
#else
332329
result = ImagePreprocessorType::VAAPI_SYSTEM;
333-
#endif
334330
break;
335331
case D3D11_MEMORY_CAPS_FEATURE:
336332
result = ImagePreprocessorType::D3D11;
@@ -556,11 +552,7 @@ MemoryType GetMemoryType(CapsFeature caps_feature) {
556552
case CapsFeature::SYSTEM_MEMORY_CAPS_FEATURE:
557553
return MemoryType::SYSTEM;
558554
case CapsFeature::DMA_BUF_CAPS_FEATURE:
559-
#ifdef ENABLE_VPUX
560-
return MemoryType::SYSTEM;
561-
#else
562555
return MemoryType::DMA_BUFFER;
563-
#endif
564556
case CapsFeature::VA_SURFACE_CAPS_FEATURE:
565557
case CapsFeature::VA_MEMORY_CAPS_FEATURE:
566558
return MemoryType::VAAPI;

libraries/dl-streamer/src/monolithic/inference_backend/image_inference/openvino/openvino_image_inference.cpp

Lines changed: 0 additions & 97 deletions
Original file line numberDiff line numberDiff line change
@@ -1522,103 +1522,6 @@ void OpenVINOImageInference::FreeRequest(std::shared_ptr<BatchRequest> request)
15221522
request_processed_.notify_all();
15231523
}
15241524

1525-
#if 0
1526-
InferenceEngine::RemoteContext::Ptr
1527-
OpenVINOImageInference::CreateRemoteContext(const InferenceBackend::InferenceConfig &config) {
1528-
InferenceEngine::RemoteContext::Ptr remote_context;
1529-
const std::string &device = config.at(KEY_BASE).at(KEY_DEVICE);
1530-
1531-
#ifdef ENABLE_VPUX
1532-
std::string vpu_device_name;
1533-
bool has_vpu_device_id = false;
1534-
std::tie(has_vpu_device_id, vpu_device_name) = Utils::parseDeviceName(device);
1535-
if (!vpu_device_name.empty()) {
1536-
const std::string msg = "VPUX device defined as " + vpu_device_name;
1537-
GVA_INFO(msg.c_str());
1538-
1539-
const std::string base_device = "VPUX";
1540-
std::string device = vpu_device_name;
1541-
if (!has_vpu_device_id) {
1542-
// Retrieve ID of the first available device
1543-
std::vector<std::string> device_list =
1544-
IeCoreSingleton::Instance().GetMetric(base_device, METRIC_KEY(AVAILABLE_DEVICES));
1545-
if (!device_list.empty())
1546-
device = device_list.at(0);
1547-
// else device is already set to VPU-0
1548-
}
1549-
const InferenceEngine::ParamMap params = {{InferenceEngine::KMB_PARAM_KEY(DEVICE_ID), device}};
1550-
remote_context = IeCoreSingleton::Instance().CreateContext(base_device, params);
1551-
}
1552-
#endif
1553-
1554-
#ifdef ENABLE_VAAPI
1555-
const bool is_gpu_device = device.rfind("GPU", 0) == 0;
1556-
// There are 3 possible scenarios:
1557-
// 1. memory_type == VAAPI: we are going to use the surface sharing mode and we have to provide
1558-
// VADisplay to GPU plugin so it can work with VaSurfaces that we are going to submit via BLOBs.
1559-
// 2.1. memory_type == SYSTEM and display is available: VAAPI serves only as pre-processing and
1560-
// we don't have to provide VADisplay to the plugin in such case. However, by providing VADisplay,
1561-
// we can make sure that the plugin will choose the same GPU for inference as for decoding by
1562-
// vaapi elements.
1563-
// 2.2. memory_type == SYSTEM and display is not available: user chose to perform decode on CPU
1564-
// and inference on GPU. IE pre-processing is used in this case.
1565-
// In cases 1 and 2.1 we don't need to specify GPU number (GPU.0, GPU.1) to achieve device affinity.
1566-
// In case 2.2 user may choose desired GPU for inference. Currently matching GPU ID to actual
1567-
// hardware is not possible due to OpenVINO™ API lacking.
1568-
if (is_gpu_device && (memory_type == MemoryType::VAAPI || memory_type == MemoryType::SYSTEM)) {
1569-
if (context_) {
1570-
using namespace InferenceEngine;
1571-
1572-
// TODO: Bug in OpenVINO™ 2021.4.X. Caused by using GPU_THROUGHPUT_STREAMS=GPU_THROUGHPUT_AUTO.
1573-
// During CreateContext() call, IE creates queues for each of GPU_THROUGHPUT_STREAMS. By
1574-
// default GPU_THROUGHPUT_STREAMS is set to 1, so IE creates only 1 queue. Then, during
1575-
// LoadNetwork() call we pass GPU_THROUGHPUT_STREAMS=GPU_THROUGHPUT_AUTO and GPU plugin may
1576-
// set number of streams to more than 1 (for example, 2). Then for each of the streams (2)
1577-
// GPU plugin tries to get IE queue, but fails because number of streams is greater than
1578-
// number of created queues.
1579-
// Because of that user may get an error:
1580-
// Unable to create network with stream_id=1
1581-
// Workaround for this is to set GPU_THROUGHPUT_STREAMS to IE prior to CreateContext() call.
1582-
auto &ie_config = config.at(KEY_INFERENCE);
1583-
auto it = ie_config.find(KEY_GPU_THROUGHPUT_STREAMS);
1584-
if (it != ie_config.end())
1585-
IeCoreSingleton::Instance().SetConfig({*it}, "GPU");
1586-
1587-
auto va_display = context_->handle(dlstreamer::VAAPIContext::key::va_display);
1588-
if (!va_display)
1589-
throw std::runtime_error("Error getting va_display from context");
1590-
1591-
InferenceEngine::ParamMap contextParams = {
1592-
{GPU_PARAM_KEY(CONTEXT_TYPE), GPU_PARAM_VALUE(VA_SHARED)},
1593-
{GPU_PARAM_KEY(VA_DEVICE), static_cast<InferenceEngine::gpu_handle_param>(va_display)}};
1594-
1595-
// GPU tile affinity
1596-
if (device == "GPU.x") {
1597-
#ifdef ENABLE_GPU_TILE_AFFINITY
1598-
VaDpyWrapper dpyWrapper(va_display);
1599-
int tile_id = dpyWrapper.currentSubDevice();
1600-
// If tile_id is -1 (single-tile GPU is used or the driver doesn't support this feature)
1601-
// then GPU plugin will choose the device and tile on its own and there will be no affinity
1602-
contextParams.insert({GPU_PARAM_KEY(TILE_ID), tile_id});
1603-
#else
1604-
GVA_WARNING("Current version of OpenVINO™ toolkit doesn't support tile affinity, version 2022.1 or "
1605-
"higher is required");
1606-
#endif
1607-
}
1608-
1609-
remote_context = IeCoreSingleton::Instance().CreateContext("GPU", contextParams);
1610-
} else if (memory_type == MemoryType::VAAPI) {
1611-
throw std::runtime_error("Display must be provided for GPU device with vaapi-surface-sharing backend");
1612-
}
1613-
}
1614-
#else
1615-
UNUSED(device);
1616-
#endif
1617-
1618-
return remote_context;
1619-
}
1620-
#endif
1621-
16221525
bool OpenVINOImageInference::IsQueueFull() {
16231526
return freeRequests.empty();
16241527
}

libraries/dl-streamer/src/monolithic/inference_backend/include/inference_backend/image.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -73,7 +73,7 @@ struct Image {
7373
void *d3d11_device;
7474
};
7575
};
76-
int dma_fd = -1; // if type==DMA_BUFFER or VPUX device is used
76+
int dma_fd = -1; // if type==DMA_BUFFER
7777

7878
int format = 0; // FourCC
7979
uint64_t drm_format_modifier = 0;

0 commit comments

Comments
 (0)