aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorHåvard Pettersen <havardpe@oath.com>2020-11-26 15:21:53 +0000
committerHåvard Pettersen <havardpe@oath.com>2020-11-27 15:18:53 +0000
commit3f9451ec5e2e67cd9a3c91891590e0f6dd7244a7 (patch)
tree3933ddcfa943d66cb11573d7aeebb3ae9077fbc7
parent7a1a70055770a82bf42bff668abaf011af3f6e55 (diff)
reference evaluation
-rw-r--r--eval/CMakeLists.txt1
-rw-r--r--eval/src/tests/eval/interpreted_function/interpreted_function_test.cpp2
-rw-r--r--eval/src/tests/eval/reference_evaluation/CMakeLists.txt10
-rw-r--r--eval/src/tests/eval/reference_evaluation/reference_evaluation_test.cpp196
-rw-r--r--eval/src/tests/eval/reference_operations/reference_operations_test.cpp38
-rw-r--r--eval/src/vespa/eval/eval/tensor_nodes.h11
-rw-r--r--eval/src/vespa/eval/eval/tensor_spec.cpp10
-rw-r--r--eval/src/vespa/eval/eval/tensor_spec.h1
-rw-r--r--eval/src/vespa/eval/eval/test/CMakeLists.txt1
-rw-r--r--eval/src/vespa/eval/eval/test/eval_fixture.cpp14
-rw-r--r--eval/src/vespa/eval/eval/test/eval_fixture.h4
-rw-r--r--eval/src/vespa/eval/eval/test/eval_spec.h9
-rw-r--r--eval/src/vespa/eval/eval/test/reference_evaluation.cpp358
-rw-r--r--eval/src/vespa/eval/eval/test/reference_evaluation.h16
-rw-r--r--eval/src/vespa/eval/eval/test/reference_operations.cpp30
-rw-r--r--eval/src/vespa/eval/eval/test/reference_operations.h7
16 files changed, 693 insertions, 15 deletions
diff --git a/eval/CMakeLists.txt b/eval/CMakeLists.txt
index 3d7cbf03071..bbf20dc7e8a 100644
--- a/eval/CMakeLists.txt
+++ b/eval/CMakeLists.txt
@@ -28,6 +28,7 @@ vespa_define_module(
src/tests/eval/node_tools
src/tests/eval/node_types
src/tests/eval/param_usage
+ src/tests/eval/reference_evaluation
src/tests/eval/reference_operations
src/tests/eval/simple_tensor
src/tests/eval/simple_value
diff --git a/eval/src/tests/eval/interpreted_function/interpreted_function_test.cpp b/eval/src/tests/eval/interpreted_function/interpreted_function_test.cpp
index 3a3433704f6..7092d354b10 100644
--- a/eval/src/tests/eval/interpreted_function/interpreted_function_test.cpp
+++ b/eval/src/tests/eval/interpreted_function/interpreted_function_test.cpp
@@ -84,7 +84,7 @@ struct MyEvalTest : test::EvalSpec::EvalTest {
}
};
-TEST_FF("require that compiled evaluation passes all conformance tests", MyEvalTest(), test::EvalSpec()) {
+TEST_FF("require that interpreted evaluation passes all conformance tests", MyEvalTest(), test::EvalSpec()) {
f1.print_fail = true;
f2.add_all_cases();
f2.each_case(f1);
diff --git a/eval/src/tests/eval/reference_evaluation/CMakeLists.txt b/eval/src/tests/eval/reference_evaluation/CMakeLists.txt
new file mode 100644
index 00000000000..0ca6987d689
--- /dev/null
+++ b/eval/src/tests/eval/reference_evaluation/CMakeLists.txt
@@ -0,0 +1,10 @@
+# Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+vespa_add_executable(eval_reference_evaluation_test_app TEST
+ SOURCES
+ reference_evaluation_test.cpp
+ DEPENDS
+ vespaeval
+ GTest::GTest
+)
+vespa_add_test(NAME eval_reference_evaluation_test_app COMMAND eval_reference_evaluation_test_app)
diff --git a/eval/src/tests/eval/reference_evaluation/reference_evaluation_test.cpp b/eval/src/tests/eval/reference_evaluation/reference_evaluation_test.cpp
new file mode 100644
index 00000000000..a51b7cfbc89
--- /dev/null
+++ b/eval/src/tests/eval/reference_evaluation/reference_evaluation_test.cpp
@@ -0,0 +1,196 @@
+// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+#include <vespa/eval/eval/test/reference_evaluation.h>
+#include <vespa/eval/eval/function.h>
+#include <vespa/eval/eval/tensor_spec.h>
+#include <vespa/eval/eval/test/eval_spec.h>
+#include <vespa/vespalib/gtest/gtest.h>
+
+using namespace vespalib;
+using namespace vespalib::eval;
+using namespace vespalib::eval::test;
+
+//-----------------------------------------------------------------------------
+
+TensorSpec ref_eval(const Function &fun, const std::vector<TensorSpec> &params) {
+ return ReferenceEvaluation::eval(fun, params);
+}
+
+TensorSpec ref_eval(std::shared_ptr<const Function> fun, const std::vector<TensorSpec> &params) {
+ return ref_eval(*fun, params);
+}
+
+TensorSpec ref_eval(const vespalib::string &expr, const std::vector<TensorSpec> &params) {
+ return ref_eval(*Function::parse(expr), params);
+}
+
+TensorSpec make_val(const vespalib::string &expr) {
+ return ref_eval(*Function::parse(expr), {});
+}
+
+//-----------------------------------------------------------------------------
+
+struct MyEvalTest : EvalSpec::EvalTest {
+ size_t pass_cnt = 0;
+ size_t fail_cnt = 0;
+ bool print_pass = false;
+ bool print_fail = false;
+ void next_expression(const std::vector<vespalib::string> &,
+ const vespalib::string &) override {}
+ void handle_case(const std::vector<vespalib::string> &param_names,
+ const std::vector<double> &param_values,
+ const vespalib::string &expression,
+ double expected_result) override
+ {
+ auto function = Function::parse(param_names, expression);
+ ASSERT_FALSE(function->has_error());
+ std::vector<TensorSpec> params;
+ for (double param: param_values) {
+ params.push_back(TensorSpec("double").add({}, param));
+ }
+ auto eval_result = ref_eval(function, params);
+ ASSERT_EQ(eval_result.type(), "double");
+ double result = eval_result.as_double();
+ if (is_same(expected_result, result)) {
+ print_pass && fprintf(stderr, "verifying: %s -> %g ... PASS\n",
+ as_string(param_names, param_values, expression).c_str(),
+ expected_result);
+ ++pass_cnt;
+ } else {
+ print_fail && fprintf(stderr, "verifying: %s -> %g ... FAIL: got %g\n",
+ as_string(param_names, param_values, expression).c_str(),
+ expected_result, result);
+ ++fail_cnt;
+ }
+ }
+};
+
+//-----------------------------------------------------------------------------
+
+TEST(ReferenceEvaluationTest, reference_evaluation_passes_all_eval_spec_tests) {
+ MyEvalTest test;
+ EvalSpec spec;
+ test.print_fail = true;
+ spec.add_all_cases();
+ spec.each_case(test);
+ EXPECT_GT(test.pass_cnt, 1000);
+ EXPECT_EQ(test.fail_cnt, 0);
+}
+
+//-----------------------------------------------------------------------------
+
+// 'make_val' will be used to generate tensor specs for inputs and
+// expected outputs for other tests. In the production evaluation
+// pipeline this kind of tensor create will be converted to a constant
+// value when converting the Function to a TensorFunction. With the
+// reference evaluation the Function is evaluated directly with no
+// constant folding.
+
+TEST(ReferenceEvaluationTest, constant_create_expression_works) {
+ auto expect = TensorSpec("tensor(x{},y[2])")
+ .add({{"x", "a"}, {"y", 0}}, 1.0)
+ .add({{"x", "a"}, {"y", 1}}, 2.0);
+ auto result = make_val("tensor(x{},y[2]):{a:[1,2]}");
+ EXPECT_EQ(result, expect);
+}
+
+//-----------------------------------------------------------------------------
+
+TEST(ReferenceEvaluationTest, parameter_expression_works) {
+ auto a = make_val("tensor(x[2]):[1,2]");
+ auto b = make_val("tensor(x[2]):[3,4]");
+ auto fun_a = Function::parse({"a", "b"}, "a");
+ auto fun_b = Function::parse({"a", "b"}, "b");
+ EXPECT_EQ(ref_eval(fun_a, {a, b}), a);
+ EXPECT_EQ(ref_eval(fun_b, {a, b}), b);
+}
+
+TEST(ReferenceEvaluationTest, reduce_expression_works) {
+ auto a = make_val("tensor(x[2],y[2]):[[1,2],[3,4]]");
+ auto expect = make_val("tensor(x[2]):[3,7]");
+ EXPECT_EQ(ref_eval("reduce(a,sum,y)", {a}), expect);
+}
+
+TEST(ReferenceEvaluationTest, map_expression_works) {
+ auto a = make_val("tensor(x[2]):[1,10]");
+ auto expect = make_val("tensor(x[2]):[5,23]");
+ EXPECT_EQ(ref_eval("map(a,f(x)(x*2+3))", {a}), expect);
+}
+
+TEST(ReferenceEvaluationTest, join_expression_works) {
+ auto a = make_val("tensor(x[2]):[1,2]");
+ auto b = make_val("tensor(y[2]):[3,4]");
+ auto expect = make_val("tensor(x[2],y[2]):[[4,5],[5,6]]");
+ EXPECT_EQ(ref_eval("join(a,b,f(x,y)(x+y))", {a, b}), expect);
+}
+
+TEST(ReferenceEvaluationTest, merge_expression_works) {
+ auto a = make_val("tensor(x{}):{a:1,b:2,c:3}");
+ auto b = make_val("tensor(x{}):{c:3,d:4}");
+ auto expect = make_val("tensor(x{}):{a:1,b:2,c:6,d:4}");
+ EXPECT_EQ(ref_eval("merge(a,b,f(x,y)(x+y))", {a, b}), expect);
+}
+
+TEST(ReferenceEvaluationTest, concat_expression_works) {
+ auto a = make_val("tensor(x[2]):[1,2]");
+ auto b = make_val("tensor(x[2]):[3,4]");
+ auto expect = make_val("tensor(x[4]):[1,2,3,4]");
+ EXPECT_EQ(ref_eval("concat(a,b,x)", {a, b}), expect);
+}
+
+TEST(ReferenceEvaluationTest, rename_expression_works) {
+ auto a = make_val("tensor(x[2]):[1,2]");
+ auto expect = make_val("tensor(y[2]):[1,2]");
+ EXPECT_EQ(ref_eval("rename(a,x,y)", {a}), expect);
+}
+
+TEST(ReferenceEvaluationTest, create_expression_works) {
+ auto a = make_val("5");
+ auto expect = make_val("tensor(x[3]):[5,10,15]");
+ EXPECT_EQ(ref_eval("tensor(x[3]):[a,2*a,3*a]", {a}), expect);
+}
+
+TEST(ReferenceEvaluationTest, tensor_create_will_pad_with_zero) {
+ auto a = make_val("5");
+ auto expect = make_val("tensor(x[3]):[0,5,0]");
+ EXPECT_EQ(ref_eval("tensor(x[3]):{{x:1}:a}", {a}), expect);
+}
+
+TEST(ReferenceEvaluationTest, lambda_expression_works) {
+ auto a = make_val("5");
+ auto expect = make_val("tensor(x[3]):[5,10,15]");
+ EXPECT_EQ(ref_eval("tensor(x[3])((x+1)*a)", {a}), expect);
+}
+
+TEST(ReferenceEvaluationTest, peek_expression_works) {
+ auto a = make_val("tensor(x{},y[2]):{a:[3,7]}");
+ auto b = make_val("1");
+ auto expect = make_val("7");
+ EXPECT_EQ(ref_eval("a{x:a,y:(b)}", {a, b}), expect);
+}
+
+TEST(ReferenceEvaluationTest, verbatim_peek_of_dense_dimension_works) {
+ auto a = make_val("tensor(x[4]):[1,2,3,4]");
+ auto expect = make_val("3");
+ EXPECT_EQ(ref_eval("a{x:2}", {a}), expect);
+}
+
+TEST(ReferenceEvaluationTest, out_of_bounds_peek_works) {
+ auto a = make_val("tensor(x[4]):[1,2,3,4]");
+ auto b = make_val("4");
+ auto expect = make_val("0");
+ EXPECT_EQ(ref_eval("a{x:(b)}", {a, b}), expect);
+}
+
+//-----------------------------------------------------------------------------
+
+TEST(ReferenceEvaluationTest, compound_expression_works) {
+ auto a = make_val("10");
+ auto b = make_val("20");
+ auto expect = make_val("20");
+ EXPECT_EQ(ref_eval("reduce(concat(a,b,x)+5,avg,x)", {a, b}), expect);
+}
+
+//-----------------------------------------------------------------------------
+
+GTEST_MAIN_RUN_ALL_TESTS()
diff --git a/eval/src/tests/eval/reference_operations/reference_operations_test.cpp b/eval/src/tests/eval/reference_operations/reference_operations_test.cpp
index 984408f8d65..de7098e4357 100644
--- a/eval/src/tests/eval/reference_operations/reference_operations_test.cpp
+++ b/eval/src/tests/eval/reference_operations/reference_operations_test.cpp
@@ -251,7 +251,7 @@ TEST(ReferencePeekTest, verbatim_labels) {
spec.emplace("e", "nomatch");
// peek all mapped dimensions, non-matching verbatim labels
output = ReferenceOperations::peek(input, spec, {});
- expect = TensorSpec("double");
+ expect = TensorSpec("double").add({}, 0.0);
EXPECT_EQ(output, expect);
input = dense_2d_some_cells(false);
@@ -489,5 +489,41 @@ TEST(ReferenceRenameTest, swap_and_rename_dimensions) {
//-----------------------------------------------------------------------------
+TEST(ReferenceLambdaTest, make_double) {
+ auto fun = [&](const std::vector<size_t> &indexes) {
+ EXPECT_EQ(indexes.size(), 0);
+ return double(5);
+ };
+ auto expect = TensorSpec("double").add({}, 5.0);
+ EXPECT_EQ(ReferenceOperations::lambda("double", fun), expect);
+}
+
+TEST(ReferenceLambdaTest, make_vector) {
+ auto fun = [&](const std::vector<size_t> &indexes) {
+ EXPECT_EQ(indexes.size(), 1);
+ return double(indexes[0] + 1.0);
+ };
+ auto expect = TensorSpec("tensor(x[3])")
+ .add({{"x", 0}}, 1.0)
+ .add({{"x", 1}}, 2.0)
+ .add({{"x", 2}}, 3.0);
+ EXPECT_EQ(ReferenceOperations::lambda("tensor(x[3])", fun), expect);
+}
+
+TEST(ReferenceLambdaTest, make_matrix) {
+ auto fun = [&](const std::vector<size_t> &indexes) {
+ EXPECT_EQ(indexes.size(), 2);
+ return double(indexes[0] * 10 + indexes[1] + 1.0);
+ };
+ auto expect = TensorSpec("tensor(x[2],y[2])")
+ .add({{"x", 0}, {"y", 0}}, 1.0)
+ .add({{"x", 0}, {"y", 1}}, 2.0)
+ .add({{"x", 1}, {"y", 0}}, 11.0)
+ .add({{"x", 1}, {"y", 1}}, 12.0);
+ EXPECT_EQ(ReferenceOperations::lambda("tensor(x[2],y[2])", fun), expect);
+}
+
+//-----------------------------------------------------------------------------
+
GTEST_MAIN_RUN_ALL_TESTS()
diff --git a/eval/src/vespa/eval/eval/tensor_nodes.h b/eval/src/vespa/eval/eval/tensor_nodes.h
index 07be7e77b71..618e03f229e 100644
--- a/eval/src/vespa/eval/eval/tensor_nodes.h
+++ b/eval/src/vespa/eval/eval/tensor_nodes.h
@@ -22,6 +22,7 @@ private:
public:
TensorMap(Node_UP child, std::shared_ptr<Function const> lambda)
: _child(std::move(child)), _lambda(std::move(lambda)) {}
+ const Node &child() const { return *_child; }
const Function &lambda() const { return *_lambda; }
vespalib::string dump(DumpContext &ctx) const override {
vespalib::string str;
@@ -52,6 +53,8 @@ private:
public:
TensorJoin(Node_UP lhs, Node_UP rhs, std::shared_ptr<Function const> lambda)
: _lhs(std::move(lhs)), _rhs(std::move(rhs)), _lambda(std::move(lambda)) {}
+ const Node &lhs() const { return *_lhs; }
+ const Node &rhs() const { return *_rhs; }
const Function &lambda() const { return *_lambda; }
vespalib::string dump(DumpContext &ctx) const override {
vespalib::string str;
@@ -84,6 +87,8 @@ private:
public:
TensorMerge(Node_UP lhs, Node_UP rhs, std::shared_ptr<Function const> lambda)
: _lhs(std::move(lhs)), _rhs(std::move(rhs)), _lambda(std::move(lambda)) {}
+ const Node &lhs() const { return *_lhs; }
+ const Node &rhs() const { return *_rhs; }
const Function &lambda() const { return *_lambda; }
vespalib::string dump(DumpContext &ctx) const override {
vespalib::string str;
@@ -116,8 +121,9 @@ private:
public:
TensorReduce(Node_UP child, Aggr aggr_in, std::vector<vespalib::string> dimensions_in)
: _child(std::move(child)), _aggr(aggr_in), _dimensions(std::move(dimensions_in)) {}
- const std::vector<vespalib::string> &dimensions() const { return _dimensions; }
+ const Node &child() const { return *_child; }
Aggr aggr() const { return _aggr; }
+ const std::vector<vespalib::string> &dimensions() const { return _dimensions; }
vespalib::string dump(DumpContext &ctx) const override {
vespalib::string str;
str += "reduce(";
@@ -150,6 +156,7 @@ private:
public:
TensorRename(Node_UP child, std::vector<vespalib::string> from_in, std::vector<vespalib::string> to_in)
: _child(std::move(child)), _from(std::move(from_in)), _to(std::move(to_in)) {}
+ const Node &child() const { return *_child; }
const std::vector<vespalib::string> &from() const { return _from; }
const std::vector<vespalib::string> &to() const { return _to; }
vespalib::string dump(DumpContext &ctx) const override {
@@ -196,6 +203,8 @@ private:
public:
TensorConcat(Node_UP lhs, Node_UP rhs, const vespalib::string &dimension_in)
: _lhs(std::move(lhs)), _rhs(std::move(rhs)), _dimension(dimension_in) {}
+ const Node &lhs() const { return *_lhs; }
+ const Node &rhs() const { return *_rhs; }
const vespalib::string &dimension() const { return _dimension; }
vespalib::string dump(DumpContext &ctx) const override {
vespalib::string str;
diff --git a/eval/src/vespa/eval/eval/tensor_spec.cpp b/eval/src/vespa/eval/eval/tensor_spec.cpp
index 22faee004b4..30e788e3a43 100644
--- a/eval/src/vespa/eval/eval/tensor_spec.cpp
+++ b/eval/src/vespa/eval/eval/tensor_spec.cpp
@@ -115,6 +115,16 @@ TensorSpec & TensorSpec::operator = (const TensorSpec &) = default;
TensorSpec::~TensorSpec() { }
+double
+TensorSpec::as_double() const
+{
+ double result = 0.0;
+ for (const auto &[key, value]: _cells) {
+ result += value.value;
+ }
+ return result;
+}
+
TensorSpec &
TensorSpec::add(Address address, double value) {
auto [iter, inserted] = _cells.emplace(std::move(address), value);
diff --git a/eval/src/vespa/eval/eval/tensor_spec.h b/eval/src/vespa/eval/eval/tensor_spec.h
index 41c65a1c4fb..490150ec786 100644
--- a/eval/src/vespa/eval/eval/tensor_spec.h
+++ b/eval/src/vespa/eval/eval/tensor_spec.h
@@ -68,6 +68,7 @@ public:
TensorSpec(const TensorSpec &);
TensorSpec & operator = (const TensorSpec &);
~TensorSpec();
+ double as_double() const;
TensorSpec &add(Address address, double value);
const vespalib::string &type() const { return _type; }
const Cells &cells() const { return _cells; }
diff --git a/eval/src/vespa/eval/eval/test/CMakeLists.txt b/eval/src/vespa/eval/eval/test/CMakeLists.txt
index f3b0750d503..735c793f74b 100644
--- a/eval/src/vespa/eval/eval/test/CMakeLists.txt
+++ b/eval/src/vespa/eval/eval/test/CMakeLists.txt
@@ -3,6 +3,7 @@ vespa_add_library(eval_eval_test OBJECT
SOURCES
eval_fixture.cpp
eval_spec.cpp
+ reference_evaluation.cpp
reference_operations.cpp
tensor_conformance.cpp
test_io.cpp
diff --git a/eval/src/vespa/eval/eval/test/eval_fixture.cpp b/eval/src/vespa/eval/eval/test/eval_fixture.cpp
index b7655a6ee2f..b7b2571ba93 100644
--- a/eval/src/vespa/eval/eval/test/eval_fixture.cpp
+++ b/eval/src/vespa/eval/eval/test/eval_fixture.cpp
@@ -2,6 +2,7 @@
#include <vespa/vespalib/testkit/test_kit.h>
#include "eval_fixture.h"
+#include "reference_evaluation.h"
#include <vespa/eval/eval/make_tensor_function.h>
#include <vespa/eval/eval/optimize_tensor_function.h>
#include <vespa/vespalib/util/stringfmt.h>
@@ -229,4 +230,17 @@ EvalFixture::num_params() const
return _param_values.size();
}
+TensorSpec
+EvalFixture::ref(const vespalib::string &expr, const ParamRepo &param_repo)
+{
+ auto fun = Function::parse(expr);
+ std::vector<TensorSpec> params;
+ for (size_t i = 0; i < fun->num_params(); ++i) {
+ auto pos = param_repo.map.find(fun->param_name(i));
+ ASSERT_TRUE(pos != param_repo.map.end());
+ params.push_back(pos->second.value);
+ }
+ return ReferenceEvaluation::eval(*fun, params);
+}
+
} // namespace vespalib::eval::test
diff --git a/eval/src/vespa/eval/eval/test/eval_fixture.h b/eval/src/vespa/eval/eval/test/eval_fixture.h
index 9b57d7b4aae..c4ef2082a84 100644
--- a/eval/src/vespa/eval/eval/test/eval_fixture.h
+++ b/eval/src/vespa/eval/eval/test/eval_fixture.h
@@ -86,9 +86,7 @@ public:
const TensorSpec &result() const { return _result; }
const TensorSpec get_param(size_t idx) const;
size_t num_params() const;
- static TensorSpec ref(const vespalib::string &expr, const ParamRepo &param_repo) {
- return EvalFixture(SimpleTensorEngine::ref(), expr, param_repo, false, false).result();
- }
+ static TensorSpec ref(const vespalib::string &expr, const ParamRepo &param_repo);
static TensorSpec prod(const vespalib::string &expr, const ParamRepo &param_repo) {
return EvalFixture(FastValueBuilderFactory::get(), expr, param_repo, true, false).result();
}
diff --git a/eval/src/vespa/eval/eval/test/eval_spec.h b/eval/src/vespa/eval/eval/test/eval_spec.h
index d942129c934..7d4a8cb221f 100644
--- a/eval/src/vespa/eval/eval/test/eval_spec.h
+++ b/eval/src/vespa/eval/eval/test/eval_spec.h
@@ -7,9 +7,7 @@
#include <cassert>
#include <vector>
-namespace vespalib {
-namespace eval {
-namespace test {
+namespace vespalib::eval::test {
/**
* A collection of expressions with parameter bindings and their
@@ -123,7 +121,4 @@ public:
}
};
-} // namespace vespalib::eval::test
-} // namespace vespalib::eval
-} // namespace vespalib
-
+} // namespace
diff --git a/eval/src/vespa/eval/eval/test/reference_evaluation.cpp b/eval/src/vespa/eval/eval/test/reference_evaluation.cpp
new file mode 100644
index 00000000000..c46f3d08405
--- /dev/null
+++ b/eval/src/vespa/eval/eval/test/reference_evaluation.cpp
@@ -0,0 +1,358 @@
+// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+#include "reference_evaluation.h"
+#include "reference_operations.h"
+
+#include <vespa/eval/eval/string_stuff.h>
+#include <vespa/eval/eval/node_visitor.h>
+#include <vespa/eval/eval/node_traverser.h>
+#include <vespa/eval/eval/operation.h>
+
+#include <vespa/vespalib/util/exceptions.h>
+
+#include <functional>
+#include <variant>
+
+namespace vespalib::eval::test {
+
+namespace {
+
+using namespace nodes;
+
+//-----------------------------------------------------------------------------
+
+TensorSpec eval_node(const Node &node, const std::vector<TensorSpec> &params);
+
+struct EvalNode : public NodeVisitor {
+ const std::vector<TensorSpec> &params;
+ TensorSpec result;
+ EvalNode(const std::vector<TensorSpec> &params_in)
+ : params(params_in), result("error") {}
+ TensorSpec get_result() { return std::move(result); }
+
+ //-------------------------------------------------------------------------
+
+ using op1_t = std::function<double(double)>;
+ using op2_t = std::function<double(double,double)>;
+
+ static TensorSpec num(double value) {
+ return TensorSpec("double").add({}, value);
+ }
+
+ //-------------------------------------------------------------------------
+
+ void eval_const(TensorSpec spec) {
+ result = spec;
+ }
+
+ void eval_param(size_t idx) {
+ assert(idx < params.size());
+ result = params[idx];
+ }
+
+ void eval_if(const If &node) {
+ if (eval_node(node.cond(), params).as_double() != 0.0) {
+ result = eval_node(node.true_expr(), params);
+ } else {
+ result = eval_node(node.false_expr(), params);
+ }
+ }
+
+ void eval_map(const Node &a, op1_t op1) {
+ result = ReferenceOperations::map(eval_node(a, params), op1);
+ }
+
+ void eval_join(const Node &a, const Node &b, op2_t op2) {
+ result = ReferenceOperations::join(eval_node(a, params), eval_node(b, params), op2);
+ }
+
+ void eval_merge(const Node &a, const Node &b, op2_t op2) {
+ result = ReferenceOperations::merge(eval_node(a, params), eval_node(b, params), op2);
+ }
+
+ void eval_reduce(const Node &a, Aggr aggr, const std::vector<vespalib::string> &dimensions) {
+ result = ReferenceOperations::reduce(eval_node(a, params), aggr, dimensions);
+ }
+
+ void eval_rename(const Node &a, const std::vector<vespalib::string> &from, const std::vector<vespalib::string> &to) {
+ result = ReferenceOperations::rename(eval_node(a, params), from, to);
+ }
+
+ void eval_concat(const Node &a, const Node &b, const vespalib::string &dimension) {
+ result = ReferenceOperations::concat(eval_node(a, params), eval_node(b, params), dimension);
+ }
+
+ void eval_create(const TensorCreate &node) {
+ std::map<TensorSpec::Address, size_t> spec;
+ std::vector<TensorSpec> children;
+ for (size_t i = 0; i < node.num_children(); ++i) {
+ spec.emplace(node.get_child_address(i), i);
+ children.push_back(eval_node(node.get_child(i), params));
+ }
+ result = ReferenceOperations::create(node.type().to_spec(), spec, children).normalize();
+ }
+
+ void eval_lambda(const TensorLambda &node) {
+ auto fun = [&](const std::vector<size_t> &indexes) {
+ std::vector<TensorSpec> lambda_params;
+ for (size_t idx: indexes) {
+ lambda_params.push_back(num(idx));
+ }
+ for (size_t param: node.bindings()) {
+ assert(param < params.size());
+ lambda_params.push_back(params[param]);
+ }
+ return ReferenceEvaluation::eval(node.lambda(), lambda_params).as_double();
+ };
+ result = ReferenceOperations::lambda(node.type().to_spec(), fun);
+ }
+
+ void eval_peek(const TensorPeek &node) {
+ // TODO: fix Peek API so that the 'child index' sent in the
+ // spec is actually 'child index' (as defined by the function
+ // AST and Peek TensorFunction subclass) and not 'child index'
+ // - 1. This also means that the param (the object being
+ // peeked) should be sent as the first child and not as a
+ // separate parameter.
+ TensorSpec param = eval_node(node.param(), params);
+ ValueType param_type = ValueType::from_spec(param.type());
+ auto is_indexed = [&](const vespalib::string &dim_name) {
+ size_t dim_idx = param_type.dimension_index(dim_name);
+ return ((dim_idx != ValueType::Dimension::npos) &&
+ (param_type.dimensions()[dim_idx].is_indexed()));
+ };
+ std::vector<TensorSpec> children;
+ std::map<vespalib::string, std::variant<TensorSpec::Label, size_t>> spec;
+ for (const auto &[name, label]: node.dim_list()) {
+ if (label.is_expr()) {
+ spec.emplace(name, size_t(children.size()));
+ children.push_back(eval_node(*label.expr, params));
+ } else {
+ if (is_indexed(name)) {
+ spec.emplace(name, TensorSpec::Label(as_number(label.label)));
+ } else {
+ spec.emplace(name, TensorSpec::Label(label.label));
+ }
+ }
+ }
+ result = ReferenceOperations::peek(param, spec, children);
+ }
+
+ //-------------------------------------------------------------------------
+
+ void visit(const Number &node) override {
+ eval_const(num(node.value()));
+ }
+ void visit(const Symbol &node) override {
+ eval_param(node.id());
+ }
+ void visit(const String &node) override {
+ eval_const(num(node.hash()));
+ }
+ void visit(const In &node) override {
+ auto my_op1 = [&](double a) {
+ for (size_t i = 0; i < node.num_entries(); ++i) {
+ if (a == eval_node(node.get_entry(i), params).as_double()) {
+ return 1.0;
+ }
+ }
+ return 0.0;
+ };
+ eval_map(node.child(), my_op1);
+ }
+ void visit(const Neg &node) override {
+ eval_map(node.child(), operation::Neg::f);
+ }
+ void visit(const Not &node) override {
+ eval_map(node.child(), operation::Not::f);
+ }
+ void visit(const If &node) override {
+ eval_if(node);
+ }
+ void visit(const Error &) override {
+ abort();
+ }
+ void visit(const TensorMap &node) override {
+ auto my_op1 = [&](double a) {
+ return ReferenceEvaluation::eval(node.lambda(), {num(a)}).as_double();
+ };
+ eval_map(node.child(), my_op1);
+ }
+ void visit(const TensorJoin &node) override {
+ auto my_op2 = [&](double a, double b) {
+ return ReferenceEvaluation::eval(node.lambda(), {num(a), num(b)}).as_double();
+ };
+ eval_join(node.lhs(), node.rhs(), my_op2);
+ }
+ void visit(const TensorMerge &node) override {
+ auto my_op2 = [&](double a, double b) {
+ return ReferenceEvaluation::eval(node.lambda(), {num(a), num(b)}).as_double();
+ };
+ eval_merge(node.lhs(), node.rhs(), my_op2);
+ }
+ void visit(const TensorReduce &node) override {
+ eval_reduce(node.child(), node.aggr(), node.dimensions());
+ }
+ void visit(const TensorRename &node) override {
+ eval_rename(node.child(), node.from(), node.to());
+ }
+ void visit(const TensorConcat &node) override {
+ eval_concat(node.lhs(), node.rhs(), node.dimension());
+ }
+ void visit(const TensorCreate &node) override {
+ eval_create(node);
+ }
+ void visit(const TensorLambda &node) override {
+ eval_lambda(node);
+ }
+ void visit(const TensorPeek &node) override {
+ eval_peek(node);
+ }
+ void visit(const Add &node) override {
+ eval_join(node.lhs(), node.rhs(), operation::Add::f);
+ }
+ void visit(const Sub &node) override {
+ eval_join(node.lhs(), node.rhs(), operation::Sub::f);
+ }
+ void visit(const Mul &node) override {
+ eval_join(node.lhs(), node.rhs(), operation::Mul::f);
+ }
+ void visit(const Div &node) override {
+ eval_join(node.lhs(), node.rhs(), operation::Div::f);
+ }
+ void visit(const Mod &node) override {
+ eval_join(node.lhs(), node.rhs(), operation::Mod::f);
+ }
+ void visit(const Pow &node) override {
+ eval_join(node.lhs(), node.rhs(), operation::Pow::f);
+ }
+ void visit(const Equal &node) override {
+ eval_join(node.lhs(), node.rhs(), operation::Equal::f);
+ }
+ void visit(const NotEqual &node) override {
+ eval_join(node.lhs(), node.rhs(), operation::NotEqual::f);
+ }
+ void visit(const Approx &node) override {
+ eval_join(node.lhs(), node.rhs(), operation::Approx::f);
+ }
+ void visit(const Less &node) override {
+ eval_join(node.lhs(), node.rhs(), operation::Less::f);
+ }
+ void visit(const LessEqual &node) override {
+ eval_join(node.lhs(), node.rhs(), operation::LessEqual::f);
+ }
+ void visit(const Greater &node) override {
+ eval_join(node.lhs(), node.rhs(), operation::Greater::f);
+ }
+ void visit(const GreaterEqual &node) override {
+ eval_join(node.lhs(), node.rhs(), operation::GreaterEqual::f);
+ }
+ void visit(const And &node) override {
+ eval_join(node.lhs(), node.rhs(), operation::And::f);
+ }
+ void visit(const Or &node) override {
+ eval_join(node.lhs(), node.rhs(), operation::Or::f);
+ }
+ void visit(const Cos &node) override {
+ eval_map(node.get_child(0), operation::Cos::f);
+ }
+ void visit(const Sin &node) override {
+ eval_map(node.get_child(0), operation::Sin::f);
+ }
+ void visit(const Tan &node) override {
+ eval_map(node.get_child(0), operation::Tan::f);
+ }
+ void visit(const Cosh &node) override {
+ eval_map(node.get_child(0), operation::Cosh::f);
+ }
+ void visit(const Sinh &node) override {
+ eval_map(node.get_child(0), operation::Sinh::f);
+ }
+ void visit(const Tanh &node) override {
+ eval_map(node.get_child(0), operation::Tanh::f);
+ }
+ void visit(const Acos &node) override {
+ eval_map(node.get_child(0), operation::Acos::f);
+ }
+ void visit(const Asin &node) override {
+ eval_map(node.get_child(0), operation::Asin::f);
+ }
+ void visit(const Atan &node) override {
+ eval_map(node.get_child(0), operation::Atan::f);
+ }
+ void visit(const Exp &node) override {
+ eval_map(node.get_child(0), operation::Exp::f);
+ }
+ void visit(const Log10 &node) override {
+ eval_map(node.get_child(0), operation::Log10::f);
+ }
+ void visit(const Log &node) override {
+ eval_map(node.get_child(0), operation::Log::f);
+ }
+ void visit(const Sqrt &node) override {
+ eval_map(node.get_child(0), operation::Sqrt::f);
+ }
+ void visit(const Ceil &node) override {
+ eval_map(node.get_child(0), operation::Ceil::f);
+ }
+ void visit(const Fabs &node) override {
+ eval_map(node.get_child(0), operation::Fabs::f);
+ }
+ void visit(const Floor &node) override {
+ eval_map(node.get_child(0), operation::Floor::f);
+ }
+ void visit(const Atan2 &node) override {
+ eval_join(node.get_child(0), node.get_child(1), operation::Atan2::f);
+ }
+ void visit(const Ldexp &node) override {
+ eval_join(node.get_child(0), node.get_child(1), operation::Ldexp::f);
+ }
+ void visit(const Pow2 &node) override {
+ eval_join(node.get_child(0), node.get_child(1), operation::Pow::f);
+ }
+ void visit(const Fmod &node) override {
+ eval_join(node.get_child(0), node.get_child(1), operation::Mod::f);
+ }
+ void visit(const Min &node) override {
+ eval_join(node.get_child(0), node.get_child(1), operation::Min::f);
+ }
+ void visit(const Max &node) override {
+ eval_join(node.get_child(0), node.get_child(1), operation::Max::f);
+ }
+ void visit(const IsNan &node) override {
+ eval_map(node.get_child(0), operation::IsNan::f);
+ }
+ void visit(const Relu &node) override {
+ eval_map(node.get_child(0), operation::Relu::f);
+ }
+ void visit(const Sigmoid &node) override {
+ eval_map(node.get_child(0), operation::Sigmoid::f);
+ }
+ void visit(const Elu &node) override {
+ eval_map(node.get_child(0), operation::Elu::f);
+ }
+ void visit(const Erf &node) override {
+ eval_map(node.get_child(0), operation::Erf::f);
+ }
+};
+
+TensorSpec eval_node(const Node &node, const std::vector<TensorSpec> &params) {
+ EvalNode my_eval(params);
+ node.accept(my_eval);
+ return my_eval.get_result();
+}
+
+} // <unnamed>
+
+TensorSpec
+ReferenceEvaluation::eval(const Function &function, const std::vector<TensorSpec> &params) {
+ if (function.has_error()) {
+ throw IllegalArgumentException("function.has_error()");
+ }
+ if (function.num_params() != params.size()) {
+ throw IllegalArgumentException("function.num_params() != params.size()");
+ }
+ return eval_node(function.root(), params);
+}
+
+} // namespace
diff --git a/eval/src/vespa/eval/eval/test/reference_evaluation.h b/eval/src/vespa/eval/eval/test/reference_evaluation.h
new file mode 100644
index 00000000000..31089c29a93
--- /dev/null
+++ b/eval/src/vespa/eval/eval/test/reference_evaluation.h
@@ -0,0 +1,16 @@
+// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+#pragma once
+
+#include <vespa/eval/eval/tensor_spec.h>
+#include <vector>
+
+namespace vespalib::eval { class Function; }
+
+namespace vespalib::eval::test {
+
+struct ReferenceEvaluation {
+ static TensorSpec eval(const Function &function, const std::vector<TensorSpec> &params);
+};
+
+} // namespace
diff --git a/eval/src/vespa/eval/eval/test/reference_operations.cpp b/eval/src/vespa/eval/eval/test/reference_operations.cpp
index a43f7fd0273..5f1e9f1ad1a 100644
--- a/eval/src/vespa/eval/eval/test/reference_operations.cpp
+++ b/eval/src/vespa/eval/eval/test/reference_operations.cpp
@@ -232,6 +232,9 @@ TensorSpec ReferenceOperations::peek(const TensorSpec &param, const PeekSpec &pe
result.add(my_addr, cell.second);
}
}
+ if (result.cells().empty() && result_type.is_scalar()) {
+ result.add({}, 0.0);
+ }
return result;
}
@@ -283,5 +286,32 @@ TensorSpec ReferenceOperations::rename(const TensorSpec &a, const std::vector<ve
return result;
}
+TensorSpec ReferenceOperations::lambda(const vespalib::string &type_in, lambda_fun_t fun) {
+ ValueType type = ValueType::from_spec(type_in);
+ TensorSpec result(type.to_spec());
+ if (type.is_error()) {
+ return result;
+ }
+ std::vector<size_t> sizes;
+ for (const auto &dim: type.dimensions()) {
+ assert(dim.is_indexed());
+ sizes.push_back(dim.size);
+ }
+ TensorSpec::Address addr;
+ std::vector<size_t> indexes(type.dimensions().size());
+ std::function<void(size_t)> loop = [&](size_t idx) {
+ if (idx == sizes.size()) {
+ result.add(addr, fun(indexes));
+ } else {
+ for (size_t i = 0; i < sizes[idx]; ++i) {
+ addr.insert_or_assign(type.dimensions()[idx].name, TensorSpec::Label(i));
+ indexes[idx] = i;
+ loop(idx + 1);
+ }
+ }
+ };
+ loop(0);
+ return result;
+}
} // namespace
diff --git a/eval/src/vespa/eval/eval/test/reference_operations.h b/eval/src/vespa/eval/eval/test/reference_operations.h
index dd1de1f7cde..4663d935383 100644
--- a/eval/src/vespa/eval/eval/test/reference_operations.h
+++ b/eval/src/vespa/eval/eval/test/reference_operations.h
@@ -11,12 +11,14 @@
#include <vector>
#include <map>
#include <variant>
+#include <functional>
namespace vespalib::eval {
struct ReferenceOperations {
- using map_fun_t = vespalib::eval::operation::op1_t;
- using join_fun_t = vespalib::eval::operation::op2_t;
+ using map_fun_t = std::function<double(double)>;
+ using join_fun_t = std::function<double(double,double)>;
+ using lambda_fun_t = std::function<double(const std::vector<size_t> &dimension_indexes)>;
// mapping from cell address to index of child that computes the cell value
using CreateSpec = tensor_function::Create::Spec;
@@ -32,6 +34,7 @@ struct ReferenceOperations {
static TensorSpec peek(const TensorSpec &param, const PeekSpec &spec, const std::vector<TensorSpec> &children);
static TensorSpec reduce(const TensorSpec &a, Aggr aggr, const std::vector<vespalib::string> &dims);
static TensorSpec rename(const TensorSpec &a, const std::vector<vespalib::string> &from, const std::vector<vespalib::string> &to);
+ static TensorSpec lambda(const vespalib::string &type, lambda_fun_t fun);
};
} // namespace