@@ -251,16 +251,16 @@ TEST(OrtEpLibrary, KernelPluginEp_Inference) {
251251 std::unordered_map<std::string, std::string> ep_options;
252252 session_options.AppendExecutionProvider_V2 (*ort_env, {plugin_ep_device}, ep_options);
253253
254- // This model has Squeeze -> Mul. The example plugin EP supports both using registered kernels.
254+ // This model has Squeeze, Mul, and Relu nodes . The example plugin EP supports all nodes using registered kernels.
255255 Ort::Session session (*ort_env, ORT_TSTR (" testdata/squeeze_mul_relu.onnx" ), session_options);
256256
257257 // Create inputs
258258 Ort::MemoryInfo memory_info = Ort::MemoryInfo::CreateCpu (OrtDeviceAllocator, OrtMemTypeCPU);
259259 std::array<int64_t , 3 > a_shape = {3 , 1 , 2 };
260260 std::array<int64_t , 2 > b_shape = {3 , 2 };
261261
262- std::array<float , 6 > a_data = {1 .f , 2 .f , 3 .f , 4 .f , 5 .f , 6 .f };
263- std::array<float , 6 > b_data = {2 .f , 3 .f , 4 .f , -5 .f , - 6 .f , 7 .f };
262+ std::array<float , 6 > a_data = {1 .f , - 2 .f , 3 .f , 4 .f , - 5 .f , 6 .f };
263+ std::array<float , 6 > b_data = {2 .f , 3 .f , 4 .f , -5 .f , 6 .f , 7 .f };
264264
265265 std::vector<Ort::Value> ort_inputs{};
266266 ort_inputs.emplace_back (
@@ -279,7 +279,7 @@ TEST(OrtEpLibrary, KernelPluginEp_Inference) {
279279 Ort::Value& ort_output = ort_outputs[0 ];
280280 const float * output_data = ort_output.GetTensorData <float >();
281281 gsl::span<const float > output_span (output_data, 6 );
282- EXPECT_THAT (output_span, ::testing::ElementsAre (2 , 6 , 12 , 0 , 0 , 42 ));
282+ EXPECT_THAT (output_span, ::testing::ElementsAre (4 , 0 , 24 , 0 , 0 , 84 ));
283283}
284284} // namespace test
285285} // namespace onnxruntime
0 commit comments