summaryrefslogtreecommitdiffstats
path: root/eval
diff options
context:
space:
mode:
authorHÃ¥vard Pettersen <havardpe@oath.com>2022-02-10 11:44:36 +0000
committerHÃ¥vard Pettersen <havardpe@oath.com>2022-02-10 11:44:36 +0000
commitf4fd8f8e29fa87ec79afce4172ce3d72ab6693f0 (patch)
tree02fd48553341f4c6da973e0cd7e5a818e4461cd8 /eval
parentfed60ae90b4e230129b4e9e6b72dd125d354e6c0 (diff)
add code to simplify onnx model testing
test model that combines probing and inference of dimension sizes
Diffstat (limited to 'eval')
-rw-r--r--eval/src/tests/tensor/onnx_wrapper/onnx_wrapper_test.cpp22
-rw-r--r--eval/src/tests/tensor/onnx_wrapper/probe_model.onnx30
-rwxr-xr-xeval/src/tests/tensor/onnx_wrapper/probe_model.py35
-rw-r--r--eval/src/vespa/eval/eval/test/CMakeLists.txt1
-rw-r--r--eval/src/vespa/eval/eval/test/eval_onnx.cpp54
-rw-r--r--eval/src/vespa/eval/eval/test/eval_onnx.h13
6 files changed, 155 insertions, 0 deletions
diff --git a/eval/src/tests/tensor/onnx_wrapper/onnx_wrapper_test.cpp b/eval/src/tests/tensor/onnx_wrapper/onnx_wrapper_test.cpp
index da957673f95..e50c41e2e09 100644
--- a/eval/src/tests/tensor/onnx_wrapper/onnx_wrapper_test.cpp
+++ b/eval/src/tests/tensor/onnx_wrapper/onnx_wrapper_test.cpp
@@ -2,6 +2,7 @@
#include <vespa/eval/eval/tensor_spec.h>
#include <vespa/eval/eval/int8float.h>
+#include <vespa/eval/eval/test/eval_onnx.h>
#include <vespa/eval/onnx/onnx_wrapper.h>
#include <vespa/eval/onnx/onnx_model_cache.h>
#include <vespa/vespalib/util/bfloat16.h>
@@ -28,6 +29,7 @@ std::string int_types_model = source_dir + "/int_types.onnx";
std::string guess_batch_model = source_dir + "/guess_batch.onnx";
std::string unstable_types_model = source_dir + "/unstable_types.onnx";
std::string float_to_int8_model = source_dir + "/float_to_int8.onnx";
+std::string probe_model = source_dir + "/probe_model.onnx";
void dump_info(const char *ctx, const std::vector<TensorInfo> &info) {
fprintf(stderr, "%s:\n", ctx);
@@ -504,4 +506,24 @@ TEST(OnnxModelCacheTest, share_and_evict_onnx_models) {
EXPECT_EQ(OnnxModelCache::count_refs(), 0);
}
+TensorSpec val(const vespalib::string &expr) {
+ auto result = TensorSpec::from_expr(expr);
+ EXPECT_FALSE(ValueType::from_spec(result.type()).is_error());
+ return result;
+}
+
+TEST(OnnxTest, eval_onnx_with_probe_model) {
+ Onnx model(probe_model, Onnx::Optimize::ENABLE);
+ auto in1 = val("tensor<float>( x[2], y[3]):[[ 1, 2, 3],[ 4, 5, 6]]");
+ auto in2 = val("tensor<float>( x[2], y[3]):[[ 7, 8, 9],[ 4, 5, 6]]");
+ auto out1 = val("tensor<float>(d0[2],d1[3]):[[ 8,10,12],[ 8,10,12]]");
+ auto out2 = val("tensor<float>(d0[2],d1[3]):[[-6,-6,-6],[ 0, 0, 0]]");
+ auto out3 = val("tensor<float>(d0[2],d1[3]):[[ 7,16,27],[16,25,36]]");
+ auto result = test::eval_onnx(model, {in1, in2});
+ ASSERT_EQ(result.size(), 3);
+ EXPECT_EQ(result[0], out1);
+ EXPECT_EQ(result[1], out2);
+ EXPECT_EQ(result[2], out3);
+}
+
GTEST_MAIN_RUN_ALL_TESTS()
diff --git a/eval/src/tests/tensor/onnx_wrapper/probe_model.onnx b/eval/src/tests/tensor/onnx_wrapper/probe_model.onnx
new file mode 100644
index 00000000000..89dab2e7c4c
--- /dev/null
+++ b/eval/src/tests/tensor/onnx_wrapper/probe_model.onnx
@@ -0,0 +1,30 @@
+probe_model.py:’
+
+in1
+in2out1"Add
+
+in1
+in2out2"Sub
+
+in1
+in2out3"Mul probe_modelZ#
+in1
+
+ ÿÿÿÿÿÿÿÿÿ
+innerZ#
+in2
+
+outer
+ ÿÿÿÿÿÿÿÿÿb$
+out1
+
+ ÿÿÿÿÿÿÿÿÿ
+innerb$
+out2
+
+outer
+ ÿÿÿÿÿÿÿÿÿb(
+out3
+
+ ÿÿÿÿÿÿÿÿÿ
+ ÿÿÿÿÿÿÿÿÿB \ No newline at end of file
diff --git a/eval/src/tests/tensor/onnx_wrapper/probe_model.py b/eval/src/tests/tensor/onnx_wrapper/probe_model.py
new file mode 100755
index 00000000000..529fa23b2b1
--- /dev/null
+++ b/eval/src/tests/tensor/onnx_wrapper/probe_model.py
@@ -0,0 +1,35 @@
+# Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+import onnx
+from onnx import helper, TensorProto
+
+IN1 = helper.make_tensor_value_info('in1', TensorProto.FLOAT, [-1, 'inner'])
+IN2 = helper.make_tensor_value_info('in2', TensorProto.FLOAT, ['outer', -1])
+OUT1 = helper.make_tensor_value_info('out1', TensorProto.FLOAT, [-1, 'inner'])
+OUT2 = helper.make_tensor_value_info('out2', TensorProto.FLOAT, ['outer', -1])
+OUT3 = helper.make_tensor_value_info('out3', TensorProto.FLOAT, [-1, -1])
+
+nodes = [
+ helper.make_node(
+ 'Add',
+ ['in1', 'in2'],
+ ['out1'],
+ ),
+ helper.make_node(
+ 'Sub',
+ ['in1', 'in2'],
+ ['out2'],
+ ),
+ helper.make_node(
+ 'Mul',
+ ['in1', 'in2'],
+ ['out3'],
+ ),
+]
+graph_def = helper.make_graph(
+ nodes,
+ 'probe_model',
+ [IN1, IN2],
+ [OUT1, OUT2, OUT3],
+)
+model_def = helper.make_model(graph_def, producer_name='probe_model.py', opset_imports=[onnx.OperatorSetIdProto(version=12)])
+onnx.save(model_def, 'probe_model.onnx')
diff --git a/eval/src/vespa/eval/eval/test/CMakeLists.txt b/eval/src/vespa/eval/eval/test/CMakeLists.txt
index e8a291adf2a..ff1505a4010 100644
--- a/eval/src/vespa/eval/eval/test/CMakeLists.txt
+++ b/eval/src/vespa/eval/eval/test/CMakeLists.txt
@@ -3,6 +3,7 @@ vespa_add_library(eval_eval_test OBJECT
SOURCES
cell_type_space.cpp
eval_fixture.cpp
+ eval_onnx.cpp
eval_spec.cpp
gen_spec.cpp
reference_evaluation.cpp
diff --git a/eval/src/vespa/eval/eval/test/eval_onnx.cpp b/eval/src/vespa/eval/eval/test/eval_onnx.cpp
new file mode 100644
index 00000000000..74a83b130c2
--- /dev/null
+++ b/eval/src/vespa/eval/eval/test/eval_onnx.cpp
@@ -0,0 +1,54 @@
+// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+#include "eval_onnx.h"
+#include <vespa/eval/eval/fast_value.h>
+#include <vespa/eval/eval/value_codec.h>
+
+#include <vespa/log/log.h>
+LOG_SETUP(".eval.eval.test.eval_onnx");
+
+namespace vespalib::eval::test {
+
+std::vector<TensorSpec> eval_onnx(const Onnx &model, const std::vector<TensorSpec> &params) {
+ if (params.size() != model.inputs().size()) {
+ LOG(error, "model with %zu inputs run with %zu parameters", model.inputs().size(), params.size());
+ return {}; // wrong number of parameters
+ }
+ Onnx::WirePlanner planner;
+ for (size_t i = 0; i < model.inputs().size(); ++i) {
+ if (!planner.bind_input_type(ValueType::from_spec(params[i].type()), model.inputs()[i])) {
+ LOG(error, "unable to bind input type: %s -> %s", params[i].type().c_str(), model.inputs()[i].type_as_string().c_str());
+ return {}; // inconsistent input types
+ }
+ }
+ planner.prepare_output_types(model);
+ for (size_t i = 0; i < model.outputs().size(); ++i) {
+ if (planner.make_output_type(model.outputs()[i]).is_error()) {
+ LOG(error, "unable to make output type: %s -> error", model.outputs()[i].type_as_string().c_str());
+ return {}; // unable to infer/probe output type
+ }
+ }
+ planner.prepare_output_types(model);
+ auto wire_info = planner.get_wire_info(model);
+ try {
+ Onnx::EvalContext context(model, wire_info);
+ std::vector<Value::UP> inputs;
+ for (const auto &param: params) {
+ inputs.push_back(value_from_spec(param, FastValueBuilderFactory::get()));
+ }
+ for (size_t i = 0; i < model.inputs().size(); ++i) {
+ context.bind_param(i, *inputs[i]);
+ }
+ context.eval();
+ std::vector<TensorSpec> results;
+ for (size_t i = 0; i < model.outputs().size(); ++i) {
+ results.push_back(spec_from_value(context.get_result(i)));
+ }
+ return results;
+ } catch (const Ort::Exception &ex) {
+ LOG(error, "model run failed: %s", ex.what());
+ return {}; // evaluation failed
+ }
+}
+
+} // namespace
diff --git a/eval/src/vespa/eval/eval/test/eval_onnx.h b/eval/src/vespa/eval/eval/test/eval_onnx.h
new file mode 100644
index 00000000000..bb346b7f21e
--- /dev/null
+++ b/eval/src/vespa/eval/eval/test/eval_onnx.h
@@ -0,0 +1,13 @@
+// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+#pragma once
+
+#include <vespa/eval/eval/tensor_spec.h>
+#include <vespa/eval/onnx/onnx_wrapper.h>
+#include <vector>
+
+namespace vespalib::eval::test {
+
+std::vector<TensorSpec> eval_onnx(const Onnx &model, const std::vector<TensorSpec> &params);
+
+} // namespace