summaryrefslogtreecommitdiffstats
path: root/eval/src/tests/instruction
diff options
context:
space:
mode:
authorArne Juul <arnej@verizonmedia.com>2020-11-12 07:25:57 +0000
committerArne Juul <arnej@verizonmedia.com>2020-11-12 07:56:57 +0000
commit69e8bac5e12a465bd2e49900b369e053bee54549 (patch)
tree8141a04a27f183ada459b43bf4a6bc797e5efabb /eval/src/tests/instruction
parent5ed4818cb8dd8fd9eebd61f49a982f2615ba4ad2 (diff)
move "keep as-is" optimizers
* from eval/tensor/dense to eval/instruction * minimal changes to track move in this commit
Diffstat (limited to 'eval/src/tests/instruction')
-rw-r--r--eval/src/tests/instruction/dense_dot_product_function/CMakeLists.txt8
-rw-r--r--eval/src/tests/instruction/dense_dot_product_function/dense_dot_product_function_test.cpp205
-rw-r--r--eval/src/tests/instruction/dense_matmul_function/CMakeLists.txt8
-rw-r--r--eval/src/tests/instruction/dense_matmul_function/dense_matmul_function_test.cpp131
-rw-r--r--eval/src/tests/instruction/dense_multi_matmul_function/CMakeLists.txt8
-rw-r--r--eval/src/tests/instruction/dense_multi_matmul_function/dense_multi_matmul_function_test.cpp161
-rw-r--r--eval/src/tests/instruction/dense_simple_expand_function/CMakeLists.txt9
-rw-r--r--eval/src/tests/instruction/dense_simple_expand_function/dense_simple_expand_function_test.cpp130
-rw-r--r--eval/src/tests/instruction/dense_tensor_peek_function/CMakeLists.txt8
-rw-r--r--eval/src/tests/instruction/dense_tensor_peek_function/dense_tensor_peek_function_test.cpp85
-rw-r--r--eval/src/tests/instruction/index_lookup_table/CMakeLists.txt9
-rw-r--r--eval/src/tests/instruction/index_lookup_table/index_lookup_table_test.cpp118
12 files changed, 880 insertions, 0 deletions
diff --git a/eval/src/tests/instruction/dense_dot_product_function/CMakeLists.txt b/eval/src/tests/instruction/dense_dot_product_function/CMakeLists.txt
new file mode 100644
index 00000000000..396c404b6cf
--- /dev/null
+++ b/eval/src/tests/instruction/dense_dot_product_function/CMakeLists.txt
@@ -0,0 +1,8 @@
+# Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+vespa_add_executable(eval_dense_dot_product_function_test_app TEST
+ SOURCES
+ dense_dot_product_function_test.cpp
+ DEPENDS
+ vespaeval
+)
+vespa_add_test(NAME eval_dense_dot_product_function_test_app COMMAND eval_dense_dot_product_function_test_app)
diff --git a/eval/src/tests/instruction/dense_dot_product_function/dense_dot_product_function_test.cpp b/eval/src/tests/instruction/dense_dot_product_function/dense_dot_product_function_test.cpp
new file mode 100644
index 00000000000..7f938a8e70b
--- /dev/null
+++ b/eval/src/tests/instruction/dense_dot_product_function/dense_dot_product_function_test.cpp
@@ -0,0 +1,205 @@
+// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+#include <vespa/vespalib/testkit/test_kit.h>
+#include <vespa/eval/eval/tensor_function.h>
+#include <vespa/eval/tensor/default_tensor_engine.h>
+#include <vespa/eval/instruction/dense_dot_product_function.h>
+#include <vespa/eval/tensor/dense/dense_tensor.h>
+#include <vespa/eval/tensor/dense/dense_tensor_view.h>
+#include <vespa/eval/eval/test/tensor_model.hpp>
+#include <vespa/eval/eval/test/eval_fixture.h>
+#include <vespa/vespalib/util/stringfmt.h>
+#include <vespa/vespalib/util/stash.h>
+
+#include <vespa/log/log.h>
+LOG_SETUP("dense_dot_product_function_test");
+
+using namespace vespalib;
+using namespace vespalib::eval;
+using namespace vespalib::eval::test;
+using namespace vespalib::tensor;
+
+const TensorEngine &prod_engine = DefaultTensorEngine::ref();
+
+struct MyVecSeq : Sequence {
+ double bias;
+ double operator[](size_t i) const override { return (i + bias); }
+ MyVecSeq(double cellBias) : bias(cellBias) {}
+};
+
+TensorSpec makeTensor(size_t numCells, double cellBias) {
+ return spec({x(numCells)}, MyVecSeq(cellBias));
+}
+
+const double leftBias = 3.0;
+const double rightBias = 5.0;
+
+double calcDotProduct(size_t numCells) {
+ double result = 0;
+ for (size_t i = 0; i < numCells; ++i) {
+ result += (i + leftBias) * (i + rightBias);
+ }
+ return result;
+}
+
+void check_gen_with_result(size_t l, size_t r, double wanted) {
+ EvalFixture::ParamRepo param_repo;
+ param_repo.add("a", makeTensor(l, leftBias));
+ param_repo.add("b", makeTensor(r, rightBias));
+ vespalib::string expr = "reduce(a*b,sum,x)";
+ EvalFixture evaluator(prod_engine, expr, param_repo, true);
+ EXPECT_EQUAL(spec(wanted), evaluator.result());
+ EXPECT_EQUAL(evaluator.result(), EvalFixture::ref(expr, param_repo));
+ auto info = evaluator.find_all<DenseDotProductFunction>();
+ EXPECT_EQUAL(info.size(), 1u);
+};
+
+// this should not be possible to set up:
+// TEST("require that empty dot product is correct")
+
+TEST("require that basic dot product with equal sizes is correct") {
+ check_gen_with_result(2, 2, (3.0 * 5.0) + (4.0 * 6.0));
+}
+
+//-----------------------------------------------------------------------------
+
+void assertDotProduct(size_t numCells) {
+ check_gen_with_result(numCells, numCells, calcDotProduct(numCells));
+}
+
+void assertDotProduct(size_t lhsNumCells, size_t rhsNumCells) {
+ size_t numCells = std::min(lhsNumCells, rhsNumCells);
+ check_gen_with_result(lhsNumCells, rhsNumCells, calcDotProduct(numCells));
+}
+
+TEST("require that dot product with equal sizes is correct") {
+ TEST_DO(assertDotProduct(8));
+ TEST_DO(assertDotProduct(16));
+ TEST_DO(assertDotProduct(32));
+ TEST_DO(assertDotProduct(64));
+ TEST_DO(assertDotProduct(128));
+ TEST_DO(assertDotProduct(256));
+ TEST_DO(assertDotProduct(512));
+ TEST_DO(assertDotProduct(1024));
+
+ TEST_DO(assertDotProduct(8 + 3));
+ TEST_DO(assertDotProduct(16 + 3));
+ TEST_DO(assertDotProduct(32 + 3));
+ TEST_DO(assertDotProduct(64 + 3));
+ TEST_DO(assertDotProduct(128 + 3));
+ TEST_DO(assertDotProduct(256 + 3));
+ TEST_DO(assertDotProduct(512 + 3));
+ TEST_DO(assertDotProduct(1024 + 3));
+}
+
+//-----------------------------------------------------------------------------
+
+EvalFixture::ParamRepo make_params() {
+ return EvalFixture::ParamRepo()
+ .add("v01_x1", spec({x(1)}, MyVecSeq(2.0)))
+ .add("v02_x3", spec({x(3)}, MyVecSeq(4.0)))
+ .add("v03_x3", spec({x(3)}, MyVecSeq(5.0)))
+ .add("v04_y3", spec({y(3)}, MyVecSeq(10)))
+ .add("v05_x5", spec({x(5)}, MyVecSeq(6.0)))
+ .add("v06_x5", spec({x(5)}, MyVecSeq(7.0)))
+ .add("v07_x5f", spec(float_cells({x(5)}), MyVecSeq(7.0)))
+ .add("v08_x5f", spec(float_cells({x(5)}), MyVecSeq(6.0)))
+ .add("m01_x3y3", spec({x(3),y(3)}, MyVecSeq(1.0)))
+ .add("m02_x3y3", spec({x(3),y(3)}, MyVecSeq(2.0)));
+}
+EvalFixture::ParamRepo param_repo = make_params();
+
+void assertOptimized(const vespalib::string &expr) {
+ EvalFixture fixture(prod_engine, expr, param_repo, true);
+ EXPECT_EQUAL(fixture.result(), EvalFixture::ref(expr, param_repo));
+ auto info = fixture.find_all<DenseDotProductFunction>();
+ ASSERT_EQUAL(info.size(), 1u);
+ EXPECT_TRUE(info[0]->result_is_mutable());
+}
+
+void assertNotOptimized(const vespalib::string &expr) {
+ EvalFixture fixture(prod_engine, expr, param_repo, true);
+ EXPECT_EQUAL(fixture.result(), EvalFixture::ref(expr, param_repo));
+ auto info = fixture.find_all<DenseDotProductFunction>();
+ EXPECT_TRUE(info.empty());
+}
+
+TEST("require that dot product works with tensor function") {
+ TEST_DO(assertOptimized("reduce(v05_x5*v06_x5,sum)"));
+ TEST_DO(assertOptimized("reduce(v05_x5*v06_x5,sum,x)"));
+ TEST_DO(assertOptimized("reduce(join(v05_x5,v06_x5,f(x,y)(x*y)),sum)"));
+ TEST_DO(assertOptimized("reduce(join(v05_x5,v06_x5,f(x,y)(x*y)),sum,x)"));
+}
+
+TEST("require that dot product with compatible dimensions is optimized") {
+ TEST_DO(assertOptimized("reduce(v01_x1*v01_x1,sum)"));
+ TEST_DO(assertOptimized("reduce(v02_x3*v03_x3,sum)"));
+ TEST_DO(assertOptimized("reduce(v05_x5*v06_x5,sum)"));
+}
+
+TEST("require that dot product with incompatible dimensions is NOT optimized") {
+ TEST_DO(assertNotOptimized("reduce(v02_x3*v04_y3,sum)"));
+ TEST_DO(assertNotOptimized("reduce(v04_y3*v02_x3,sum)"));
+ TEST_DO(assertNotOptimized("reduce(v02_x3*m01_x3y3,sum)"));
+ TEST_DO(assertNotOptimized("reduce(m01_x3y3*v02_x3,sum)"));
+}
+
+TEST("require that expressions similar to dot product are not optimized") {
+ TEST_DO(assertNotOptimized("reduce(v02_x3*v03_x3,prod)"));
+ TEST_DO(assertNotOptimized("reduce(v02_x3+v03_x3,sum)"));
+ TEST_DO(assertNotOptimized("reduce(join(v02_x3,v03_x3,f(x,y)(x+y)),sum)"));
+ TEST_DO(assertNotOptimized("reduce(join(v02_x3,v03_x3,f(x,y)(x*x)),sum)"));
+ TEST_DO(assertNotOptimized("reduce(join(v02_x3,v03_x3,f(x,y)(y*y)),sum)"));
+ // TEST_DO(assertNotOptimized("reduce(join(v02_x3,v03_x3,f(x,y)(y*x)),sum)"));
+}
+
+TEST("require that multi-dimensional dot product can be optimized") {
+ TEST_DO(assertOptimized("reduce(m01_x3y3*m02_x3y3,sum)"));
+ TEST_DO(assertOptimized("reduce(m02_x3y3*m01_x3y3,sum)"));
+}
+
+TEST("require that result must be double to trigger optimization") {
+ TEST_DO(assertOptimized("reduce(m01_x3y3*m01_x3y3,sum,x,y)"));
+ TEST_DO(assertNotOptimized("reduce(m01_x3y3*m01_x3y3,sum,x)"));
+ TEST_DO(assertNotOptimized("reduce(m01_x3y3*m01_x3y3,sum,y)"));
+}
+
+void verify_compatible(const vespalib::string &a, const vespalib::string &b) {
+ auto a_type = ValueType::from_spec(a);
+ auto b_type = ValueType::from_spec(b);
+ EXPECT_TRUE(!a_type.is_error());
+ EXPECT_TRUE(!b_type.is_error());
+ EXPECT_TRUE(DenseDotProductFunction::compatible_types(ValueType::double_type(), a_type, b_type));
+ EXPECT_TRUE(DenseDotProductFunction::compatible_types(ValueType::double_type(), b_type, a_type));
+}
+
+void verify_not_compatible(const vespalib::string &a, const vespalib::string &b) {
+ auto a_type = ValueType::from_spec(a);
+ auto b_type = ValueType::from_spec(b);
+ EXPECT_TRUE(!a_type.is_error());
+ EXPECT_TRUE(!b_type.is_error());
+ EXPECT_TRUE(!DenseDotProductFunction::compatible_types(ValueType::double_type(), a_type, b_type));
+ EXPECT_TRUE(!DenseDotProductFunction::compatible_types(ValueType::double_type(), b_type, a_type));
+}
+
+TEST("require that type compatibility test is appropriate") {
+ TEST_DO(verify_compatible("tensor(x[5])", "tensor(x[5])"));
+ TEST_DO(verify_compatible("tensor(x[5])", "tensor<float>(x[5])"));
+ TEST_DO(verify_compatible("tensor<float>(x[5])", "tensor(x[5])"));
+ TEST_DO(verify_compatible("tensor<float>(x[5])", "tensor<float>(x[5])"));
+ TEST_DO(verify_not_compatible("tensor(x[5])", "tensor(x[6])"));
+ TEST_DO(verify_not_compatible("tensor(x[5])", "tensor(y[5])"));
+ TEST_DO(verify_compatible("tensor(x[3],y[7],z[9])", "tensor(x[3],y[7],z[9])"));
+ TEST_DO(verify_not_compatible("tensor(x[3],y[7],z[9])", "tensor(x[5],y[7],z[9])"));
+ TEST_DO(verify_not_compatible("tensor(x[9],y[7],z[5])", "tensor(x[5],y[7],z[9])"));
+}
+
+TEST("require that optimization also works for tensors with non-double cells") {
+ TEST_DO(assertOptimized("reduce(v05_x5*v07_x5f,sum)"));
+ TEST_DO(assertOptimized("reduce(v07_x5f*v05_x5,sum)"));
+ TEST_DO(assertOptimized("reduce(v07_x5f*v08_x5f,sum)"));
+}
+
+//-----------------------------------------------------------------------------
+
+TEST_MAIN() { TEST_RUN_ALL(); }
diff --git a/eval/src/tests/instruction/dense_matmul_function/CMakeLists.txt b/eval/src/tests/instruction/dense_matmul_function/CMakeLists.txt
new file mode 100644
index 00000000000..7234e8b9e69
--- /dev/null
+++ b/eval/src/tests/instruction/dense_matmul_function/CMakeLists.txt
@@ -0,0 +1,8 @@
+# Copyright 2020 Oath Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+vespa_add_executable(eval_dense_matmul_function_test_app TEST
+ SOURCES
+ dense_matmul_function_test.cpp
+ DEPENDS
+ vespaeval
+)
+vespa_add_test(NAME eval_dense_matmul_function_test_app COMMAND eval_dense_matmul_function_test_app)
diff --git a/eval/src/tests/instruction/dense_matmul_function/dense_matmul_function_test.cpp b/eval/src/tests/instruction/dense_matmul_function/dense_matmul_function_test.cpp
new file mode 100644
index 00000000000..fb1270defde
--- /dev/null
+++ b/eval/src/tests/instruction/dense_matmul_function/dense_matmul_function_test.cpp
@@ -0,0 +1,131 @@
+// Copyright 2020 Oath Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+#include <vespa/vespalib/testkit/test_kit.h>
+#include <vespa/eval/eval/tensor_function.h>
+#include <vespa/eval/eval/operation.h>
+#include <vespa/eval/eval/simple_tensor.h>
+#include <vespa/eval/eval/simple_tensor_engine.h>
+#include <vespa/eval/tensor/default_tensor_engine.h>
+#include <vespa/eval/instruction/dense_matmul_function.h>
+#include <vespa/eval/tensor/dense/dense_tensor.h>
+#include <vespa/eval/tensor/dense/dense_tensor_view.h>
+#include <vespa/eval/eval/test/tensor_model.hpp>
+#include <vespa/eval/eval/test/eval_fixture.h>
+
+#include <vespa/vespalib/util/stringfmt.h>
+#include <vespa/vespalib/util/stash.h>
+
+using namespace vespalib;
+using namespace vespalib::eval;
+using namespace vespalib::eval::test;
+using namespace vespalib::tensor;
+using namespace vespalib::eval::tensor_function;
+
+const TensorEngine &prod_engine = DefaultTensorEngine::ref();
+
+EvalFixture::ParamRepo make_params() {
+ return EvalFixture::ParamRepo()
+ .add_matrix("a", 2, "d", 3) // inner/inner
+ .add_matrix("a", 2, "b", 5) // inner/outer
+ .add_matrix("b", 5, "c", 2) // outer/outer
+ .add_matrix("a", 2, "c", 3) // not matching
+ //------------------------------------------
+ .add_matrix("b", 5, "d", 3); // fixed param
+}
+EvalFixture::ParamRepo param_repo = make_params();
+
+void verify_optimized(const vespalib::string &expr,
+ size_t lhs_size, size_t common_size, size_t rhs_size,
+ bool lhs_inner, bool rhs_inner)
+{
+ EvalFixture slow_fixture(prod_engine, expr, param_repo, false);
+ EvalFixture fixture(prod_engine, expr, param_repo, true);
+ EXPECT_EQUAL(fixture.result(), EvalFixture::ref(expr, param_repo));
+ EXPECT_EQUAL(fixture.result(), slow_fixture.result());
+ auto info = fixture.find_all<DenseMatMulFunction>();
+ ASSERT_EQUAL(info.size(), 1u);
+ EXPECT_TRUE(info[0]->result_is_mutable());
+ EXPECT_EQUAL(info[0]->lhs_size(), lhs_size);
+ EXPECT_EQUAL(info[0]->common_size(), common_size);
+ EXPECT_EQUAL(info[0]->rhs_size(), rhs_size);
+ EXPECT_EQUAL(info[0]->lhs_common_inner(), lhs_inner);
+ EXPECT_EQUAL(info[0]->rhs_common_inner(), rhs_inner);
+}
+
+void verify_not_optimized(const vespalib::string &expr) {
+ EvalFixture slow_fixture(prod_engine, expr, param_repo, false);
+ EvalFixture fixture(prod_engine, expr, param_repo, true);
+ EXPECT_EQUAL(fixture.result(), EvalFixture::ref(expr, param_repo));
+ EXPECT_EQUAL(fixture.result(), slow_fixture.result());
+ auto info = fixture.find_all<DenseMatMulFunction>();
+ EXPECT_TRUE(info.empty());
+}
+
+TEST("require that matmul can be optimized") {
+ TEST_DO(verify_optimized("reduce(a2d3*b5d3,sum,d)", 2, 3, 5, true, true));
+}
+
+TEST("require that matmul with lambda can be optimized") {
+ TEST_DO(verify_optimized("reduce(join(a2d3,b5d3,f(x,y)(x*y)),sum,d)", 2, 3, 5, true, true));
+}
+
+TEST("require that expressions similar to matmul are not optimized") {
+ TEST_DO(verify_not_optimized("reduce(a2d3*b5d3,sum,a)"));
+ TEST_DO(verify_not_optimized("reduce(a2d3*b5d3,sum,b)"));
+ TEST_DO(verify_not_optimized("reduce(a2d3*b5d3,prod,d)"));
+ TEST_DO(verify_not_optimized("reduce(a2d3*b5d3,sum)"));
+ TEST_DO(verify_not_optimized("reduce(join(a2d3,b5d3,f(x,y)(y*x)),sum,d)"));
+ TEST_DO(verify_not_optimized("reduce(join(a2d3,b5d3,f(x,y)(x+y)),sum,d)"));
+ TEST_DO(verify_not_optimized("reduce(join(a2d3,b5d3,f(x,y)(x*x)),sum,d)"));
+ TEST_DO(verify_not_optimized("reduce(join(a2d3,b5d3,f(x,y)(y*y)),sum,d)"));
+ TEST_DO(verify_not_optimized("reduce(join(a2d3,b5d3,f(x,y)(x*y*1)),sum,d)"));
+ TEST_DO(verify_not_optimized("reduce(a2c3*b5d3,sum,d)"));
+ TEST_DO(verify_not_optimized("reduce(a2c3*b5d3,sum,c)"));
+}
+
+TEST("require that xw product can be debug dumped") {
+ EvalFixture fixture(prod_engine, "reduce(a2d3*b5d3,sum,d)", param_repo, true);
+ auto info = fixture.find_all<DenseMatMulFunction>();
+ ASSERT_EQUAL(info.size(), 1u);
+ fprintf(stderr, "%s\n", info[0]->as_string().c_str());
+}
+
+vespalib::string make_expr(const vespalib::string &a, const vespalib::string &b, const vespalib::string &common,
+ bool float_a, bool float_b)
+{
+ return make_string("reduce(%s%s*%s%s,sum,%s)", a.c_str(), float_a ? "f" : "", b.c_str(), float_b ? "f" : "", common.c_str());
+}
+
+void verify_optimized_multi(const vespalib::string &a, const vespalib::string &b, const vespalib::string &common,
+ size_t lhs_size, size_t common_size, size_t rhs_size,
+ bool lhs_inner, bool rhs_inner)
+{
+ for (bool float_a: {false, true}) {
+ for (bool float_b: {false, true}) {
+ {
+ auto expr = make_expr(a, b, common, float_a, float_b);
+ TEST_STATE(expr.c_str());
+ TEST_DO(verify_optimized(expr, lhs_size, common_size, rhs_size, lhs_inner, rhs_inner));
+ }
+ {
+ auto expr = make_expr(b, a, common, float_b, float_a);
+ TEST_STATE(expr.c_str());
+ TEST_DO(verify_optimized(expr, lhs_size, common_size, rhs_size, lhs_inner, rhs_inner));
+ }
+ }
+ }
+}
+
+TEST("require that matmul inner/inner works correctly") {
+ TEST_DO(verify_optimized_multi("a2d3", "b5d3", "d", 2, 3, 5, true, true));
+}
+
+TEST("require that matmul inner/outer works correctly") {
+ TEST_DO(verify_optimized_multi("a2b5", "b5d3", "b", 2, 5, 3, true, false));
+}
+
+TEST("require that matmul outer/outer works correctly") {
+ TEST_DO(verify_optimized_multi("b5c2", "b5d3", "b", 2, 5, 3, false, false));
+}
+
+TEST_MAIN() { TEST_RUN_ALL(); }
diff --git a/eval/src/tests/instruction/dense_multi_matmul_function/CMakeLists.txt b/eval/src/tests/instruction/dense_multi_matmul_function/CMakeLists.txt
new file mode 100644
index 00000000000..1619f42c897
--- /dev/null
+++ b/eval/src/tests/instruction/dense_multi_matmul_function/CMakeLists.txt
@@ -0,0 +1,8 @@
+# Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+vespa_add_executable(eval_dense_multi_matmul_function_test_app TEST
+ SOURCES
+ dense_multi_matmul_function_test.cpp
+ DEPENDS
+ vespaeval
+)
+vespa_add_test(NAME eval_dense_multi_matmul_function_test_app COMMAND eval_dense_multi_matmul_function_test_app)
diff --git a/eval/src/tests/instruction/dense_multi_matmul_function/dense_multi_matmul_function_test.cpp b/eval/src/tests/instruction/dense_multi_matmul_function/dense_multi_matmul_function_test.cpp
new file mode 100644
index 00000000000..8baf8c5e694
--- /dev/null
+++ b/eval/src/tests/instruction/dense_multi_matmul_function/dense_multi_matmul_function_test.cpp
@@ -0,0 +1,161 @@
+// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+#include <vespa/vespalib/testkit/test_kit.h>
+#include <vespa/eval/eval/tensor_function.h>
+#include <vespa/eval/eval/operation.h>
+#include <vespa/eval/eval/simple_tensor.h>
+#include <vespa/eval/eval/simple_tensor_engine.h>
+#include <vespa/eval/tensor/default_tensor_engine.h>
+#include <vespa/eval/instruction/dense_multi_matmul_function.h>
+#include <vespa/eval/tensor/dense/dense_tensor.h>
+#include <vespa/eval/tensor/dense/dense_tensor_view.h>
+#include <vespa/eval/eval/test/tensor_model.hpp>
+#include <vespa/eval/eval/test/eval_fixture.h>
+
+#include <vespa/vespalib/util/stringfmt.h>
+#include <vespa/vespalib/util/stash.h>
+
+using namespace vespalib;
+using namespace vespalib::eval;
+using namespace vespalib::eval::test;
+using namespace vespalib::tensor;
+using namespace vespalib::eval::tensor_function;
+
+const TensorEngine &prod_engine = DefaultTensorEngine::ref();
+
+EvalFixture::ParamRepo make_params() {
+ return EvalFixture::ParamRepo()
+ .add_dense({{"A", 2}, {"B", 1}, {"C", 3}, {"a", 2}, {"d", 3}}) // inner/inner
+ .add_dense({{"A", 2}, {"B", 1}, {"C", 3}, {"D", 1}, {"a", 2}, {"c", 1}, {"d", 3}, {"e", 1}}) // inner/inner, extra dims
+ .add_dense({{"B", 1}, {"C", 3}, {"a", 2}, {"d", 3}}) // inner/inner, missing A
+ .add_dense({{"A", 1}, {"a", 2}, {"d", 3}}) // inner/inner, single mat
+ .add_dense({{"A", 2}, {"D", 3}, {"a", 2}, {"b", 1}, {"c", 3}}) // inner/inner, inverted
+ .add_dense({{"A", 2}, {"B", 1}, {"C", 3}, {"a", 2}, {"b", 5}}) // inner/outer
+ .add_dense({{"A", 2}, {"B", 1}, {"C", 3}, {"b", 5}, {"c", 2}}) // outer/outer
+ .add_dense({{"A", 2}, {"B", 1}, {"C", 3}, {"a", 2}, {"c", 3}}) // not matching
+ //----------------------------------------------------------------------------------------
+ .add_dense({{"A", 2}, {"B", 1}, {"C", 3}, {"b", 5}, {"d", 3}}) // fixed param
+ .add_dense({{"B", 1}, {"C", 3}, {"b", 5}, {"d", 3}}) // fixed param, missing A
+ .add_dense({{"A", 1}, {"b", 5}, {"d", 3}}) // fixed param, single mat
+ .add_dense({{"B", 5}, {"D", 3}, {"a", 2}, {"b", 1}, {"c", 3}}); // fixed param, inverted
+}
+EvalFixture::ParamRepo param_repo = make_params();
+
+void verify_optimized(const vespalib::string &expr,
+ size_t lhs_size, size_t common_size, size_t rhs_size, size_t matmul_cnt,
+ bool lhs_inner, bool rhs_inner)
+{
+ EvalFixture slow_fixture(prod_engine, expr, param_repo, false);
+ EvalFixture fixture(prod_engine, expr, param_repo, true);
+ EXPECT_EQUAL(fixture.result(), EvalFixture::ref(expr, param_repo));
+ EXPECT_EQUAL(fixture.result(), slow_fixture.result());
+ auto info = fixture.find_all<DenseMultiMatMulFunction>();
+ ASSERT_EQUAL(info.size(), 1u);
+ EXPECT_TRUE(info[0]->result_is_mutable());
+ EXPECT_EQUAL(info[0]->lhs_size(), lhs_size);
+ EXPECT_EQUAL(info[0]->common_size(), common_size);
+ EXPECT_EQUAL(info[0]->rhs_size(), rhs_size);
+ EXPECT_EQUAL(info[0]->matmul_cnt(), matmul_cnt);
+ EXPECT_EQUAL(info[0]->lhs_common_inner(), lhs_inner);
+ EXPECT_EQUAL(info[0]->rhs_common_inner(), rhs_inner);
+}
+
+void verify_not_optimized(const vespalib::string &expr) {
+ EvalFixture slow_fixture(prod_engine, expr, param_repo, false);
+ EvalFixture fixture(prod_engine, expr, param_repo, true);
+ EXPECT_EQUAL(fixture.result(), EvalFixture::ref(expr, param_repo));
+ EXPECT_EQUAL(fixture.result(), slow_fixture.result());
+ auto info = fixture.find_all<DenseMultiMatMulFunction>();
+ EXPECT_TRUE(info.empty());
+}
+
+TEST("require that multi matmul can be optimized") {
+ TEST_DO(verify_optimized("reduce(A2B1C3a2d3*A2B1C3b5d3,sum,d)", 2, 3, 5, 6, true, true));
+}
+
+TEST("require that single multi matmul can be optimized") {
+ TEST_DO(verify_optimized("reduce(A1a2d3*A1b5d3,sum,d)", 2, 3, 5, 1, true, true));
+}
+
+TEST("require that multi matmul with lambda can be optimized") {
+ TEST_DO(verify_optimized("reduce(join(A2B1C3a2d3,A2B1C3b5d3,f(x,y)(x*y)),sum,d)", 2, 3, 5, 6, true, true));
+}
+
+TEST("require that expressions similar to multi matmul are not optimized") {
+ TEST_DO(verify_not_optimized("reduce(A2B1C3a2d3*A2B1C3b5d3,sum,a)"));
+ TEST_DO(verify_not_optimized("reduce(A2B1C3a2d3*A2B1C3b5d3,sum,b)"));
+ TEST_DO(verify_not_optimized("reduce(A2B1C3a2d3*A2B1C3b5d3,prod,d)"));
+ TEST_DO(verify_not_optimized("reduce(A2B1C3a2d3*A2B1C3b5d3,sum)"));
+ TEST_DO(verify_not_optimized("reduce(join(A2B1C3a2d3,A2B1C3b5d3,f(x,y)(y*x)),sum,d)"));
+ TEST_DO(verify_not_optimized("reduce(join(A2B1C3a2d3,A2B1C3b5d3,f(x,y)(x+y)),sum,d)"));
+ TEST_DO(verify_not_optimized("reduce(join(A2B1C3a2d3,A2B1C3b5d3,f(x,y)(x*x)),sum,d)"));
+ TEST_DO(verify_not_optimized("reduce(join(A2B1C3a2d3,A2B1C3b5d3,f(x,y)(y*y)),sum,d)"));
+ TEST_DO(verify_not_optimized("reduce(join(A2B1C3a2d3,A2B1C3b5d3,f(x,y)(x*y*1)),sum,d)"));
+ TEST_DO(verify_not_optimized("reduce(A2B1C3a2c3*A2B1C3b5d3,sum,d)"));
+ TEST_DO(verify_not_optimized("reduce(A2B1C3a2c3*A2B1C3b5d3,sum,c)"));
+}
+
+TEST("require that multi matmul must have matching cell type") {
+ TEST_DO(verify_not_optimized("reduce(A2B1C3a2d3f*A2B1C3b5d3,sum,d)"));
+ TEST_DO(verify_not_optimized("reduce(A2B1C3a2d3*A2B1C3b5d3f,sum,d)"));
+}
+
+TEST("require that multi matmul must have matching dimension prefix") {
+ TEST_DO(verify_not_optimized("reduce(B1C3a2d3*A2B1C3b5d3,sum,d)"));
+ TEST_DO(verify_not_optimized("reduce(A2B1C3a2d3*B1C3b5d3,sum,d)"));
+}
+
+TEST("require that multi matmul must have inner nesting of matmul dimensions") {
+ TEST_DO(verify_not_optimized("reduce(A2D3a2b1c3*B5D3a2b1c3,sum,D)"));
+ TEST_DO(verify_not_optimized("reduce(B5D3a2b1c3*A2D3a2b1c3,sum,D)"));
+}
+
+TEST("require that multi matmul ignores trivial dimensions") {
+ TEST_DO(verify_optimized("reduce(A2B1C3D1a2c1d3e1*A2B1C3b5d3,sum,d)", 2, 3, 5, 6, true, true));
+ TEST_DO(verify_optimized("reduce(A2B1C3b5d3*A2B1C3D1a2c1d3e1,sum,d)", 2, 3, 5, 6, true, true));
+}
+
+TEST("require that multi matmul function can be debug dumped") {
+ EvalFixture fixture(prod_engine, "reduce(A2B1C3a2d3*A2B1C3b5d3,sum,d)", param_repo, true);
+ auto info = fixture.find_all<DenseMultiMatMulFunction>();
+ ASSERT_EQUAL(info.size(), 1u);
+ fprintf(stderr, "%s\n", info[0]->as_string().c_str());
+}
+
+vespalib::string make_expr(const vespalib::string &a, const vespalib::string &b, const vespalib::string &common,
+ bool float_cells)
+{
+ return make_string("reduce(%s%s*%s%s,sum,%s)", a.c_str(), float_cells ? "f" : "", b.c_str(), float_cells ? "f" : "", common.c_str());
+}
+
+void verify_optimized_multi(const vespalib::string &a, const vespalib::string &b, const vespalib::string &common,
+ size_t lhs_size, size_t common_size, size_t rhs_size, size_t matmul_cnt,
+ bool lhs_inner, bool rhs_inner)
+{
+ for (bool float_cells: {false, true}) {
+ {
+ auto expr = make_expr(a, b, common, float_cells);
+ TEST_STATE(expr.c_str());
+ TEST_DO(verify_optimized(expr, lhs_size, common_size, rhs_size, matmul_cnt, lhs_inner, rhs_inner));
+ }
+ {
+ auto expr = make_expr(b, a, common, float_cells);
+ TEST_STATE(expr.c_str());
+ TEST_DO(verify_optimized(expr, lhs_size, common_size, rhs_size, matmul_cnt, lhs_inner, rhs_inner));
+ }
+ }
+}
+
+TEST("require that multi matmul inner/inner works correctly") {
+ TEST_DO(verify_optimized_multi("A2B1C3a2d3", "A2B1C3b5d3", "d", 2, 3, 5, 6, true, true));
+}
+
+TEST("require that multi matmul inner/outer works correctly") {
+ TEST_DO(verify_optimized_multi("A2B1C3a2b5", "A2B1C3b5d3", "b", 2, 5, 3, 6, true, false));
+}
+
+TEST("require that multi matmul outer/outer works correctly") {
+ TEST_DO(verify_optimized_multi("A2B1C3b5c2", "A2B1C3b5d3", "b", 2, 5, 3, 6, false, false));
+}
+
+TEST_MAIN() { TEST_RUN_ALL(); }
diff --git a/eval/src/tests/instruction/dense_simple_expand_function/CMakeLists.txt b/eval/src/tests/instruction/dense_simple_expand_function/CMakeLists.txt
new file mode 100644
index 00000000000..9bb22da7d88
--- /dev/null
+++ b/eval/src/tests/instruction/dense_simple_expand_function/CMakeLists.txt
@@ -0,0 +1,9 @@
+# Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+vespa_add_executable(eval_dense_simple_expand_function_test_app TEST
+ SOURCES
+ dense_simple_expand_function_test.cpp
+ DEPENDS
+ vespaeval
+ GTest::GTest
+)
+vespa_add_test(NAME eval_dense_simple_expand_function_test_app COMMAND eval_dense_simple_expand_function_test_app)
diff --git a/eval/src/tests/instruction/dense_simple_expand_function/dense_simple_expand_function_test.cpp b/eval/src/tests/instruction/dense_simple_expand_function/dense_simple_expand_function_test.cpp
new file mode 100644
index 00000000000..15979652c1a
--- /dev/null
+++ b/eval/src/tests/instruction/dense_simple_expand_function/dense_simple_expand_function_test.cpp
@@ -0,0 +1,130 @@
+// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+#include <vespa/eval/eval/tensor_function.h>
+#include <vespa/eval/eval/simple_tensor.h>
+#include <vespa/eval/eval/simple_tensor_engine.h>
+#include <vespa/eval/tensor/default_tensor_engine.h>
+#include <vespa/eval/instruction/dense_simple_expand_function.h>
+#include <vespa/eval/eval/test/eval_fixture.h>
+#include <vespa/eval/eval/test/tensor_model.hpp>
+#include <vespa/vespalib/gtest/gtest.h>
+
+using namespace vespalib;
+using namespace vespalib::eval;
+using namespace vespalib::eval::test;
+using namespace vespalib::eval::tensor_function;
+using namespace vespalib::tensor;
+
+using Inner = DenseSimpleExpandFunction::Inner;
+
+const TensorEngine &prod_engine = DefaultTensorEngine::ref();
+
+EvalFixture::ParamRepo make_params() {
+ return EvalFixture::ParamRepo()
+ .add("a", spec(1.5))
+ .add("sparse", spec({x({"a"})}, N()))
+ .add("mixed", spec({y({"a"}),z(5)}, N()))
+ .add_vector("a", 5)
+ .add_vector("b", 3)
+ .add_cube("A", 1, "a", 5, "c", 1)
+ .add_cube("B", 1, "b", 3, "c", 1)
+ .add_matrix("a", 5, "c", 3)
+ .add_matrix("x", 3, "y", 2)
+ .add_cube("a", 1, "b", 1, "c", 1)
+ .add_cube("x", 1, "y", 1, "z", 1);
+}
+
+EvalFixture::ParamRepo param_repo = make_params();
+
+void verify_optimized(const vespalib::string &expr, Inner inner) {
+ EvalFixture slow_fixture(prod_engine, expr, param_repo, false);
+ EvalFixture fixture(prod_engine, expr, param_repo, true, true);
+ EXPECT_EQ(fixture.result(), EvalFixture::ref(expr, param_repo));
+ EXPECT_EQ(fixture.result(), slow_fixture.result());
+ auto info = fixture.find_all<DenseSimpleExpandFunction>();
+ ASSERT_EQ(info.size(), 1u);
+ EXPECT_TRUE(info[0]->result_is_mutable());
+ EXPECT_EQ(info[0]->inner(), inner);
+ ASSERT_EQ(fixture.num_params(), 2);
+ EXPECT_TRUE(!(fixture.get_param(0) == fixture.result()));
+ EXPECT_TRUE(!(fixture.get_param(1) == fixture.result()));
+}
+
+void verify_not_optimized(const vespalib::string &expr) {
+ EvalFixture slow_fixture(prod_engine, expr, param_repo, false);
+ EvalFixture fixture(prod_engine, expr, param_repo, true);
+ EXPECT_EQ(fixture.result(), EvalFixture::ref(expr, param_repo));
+ EXPECT_EQ(fixture.result(), slow_fixture.result());
+ auto info = fixture.find_all<DenseSimpleExpandFunction>();
+ EXPECT_TRUE(info.empty());
+}
+
+TEST(ExpandTest, simple_expand_is_optimized) {
+ verify_optimized("join(a5,b3,f(x,y)(x*y))", Inner::RHS);
+ verify_optimized("join(b3,a5,f(x,y)(x*y))", Inner::LHS);
+}
+
+TEST(ExpandTest, multiple_dimensions_are_supported) {
+ verify_optimized("join(a5,x3y2,f(x,y)(x*y))", Inner::RHS);
+ verify_optimized("join(x3y2,a5,f(x,y)(x*y))", Inner::LHS);
+ verify_optimized("join(a5c3,x3y2,f(x,y)(x*y))", Inner::RHS);
+ verify_optimized("join(x3y2,a5c3,f(x,y)(x*y))", Inner::LHS);
+}
+
+TEST(ExpandTest, trivial_dimensions_are_ignored) {
+ verify_optimized("join(A1a5c1,B1b3c1,f(x,y)(x*y))", Inner::RHS);
+ verify_optimized("join(B1b3c1,A1a5c1,f(x,y)(x*y))", Inner::LHS);
+}
+
+TEST(ExpandTest, simple_expand_handles_asymmetric_operations_correctly) {
+ verify_optimized("join(a5,b3,f(x,y)(x-y))", Inner::RHS);
+ verify_optimized("join(b3,a5,f(x,y)(x-y))", Inner::LHS);
+ verify_optimized("join(a5,b3,f(x,y)(x/y))", Inner::RHS);
+ verify_optimized("join(b3,a5,f(x,y)(x/y))", Inner::LHS);
+}
+
+TEST(ExpandTest, simple_expand_can_have_various_cell_types) {
+ verify_optimized("join(a5,b3f,f(x,y)(x*y))", Inner::RHS);
+ verify_optimized("join(a5f,b3,f(x,y)(x*y))", Inner::RHS);
+ verify_optimized("join(a5f,b3f,f(x,y)(x*y))", Inner::RHS);
+ verify_optimized("join(b3,a5f,f(x,y)(x*y))", Inner::LHS);
+ verify_optimized("join(b3f,a5,f(x,y)(x*y))", Inner::LHS);
+ verify_optimized("join(b3f,a5f,f(x,y)(x*y))", Inner::LHS);
+}
+
+TEST(ExpandTest, simple_expand_is_never_inplace) {
+ verify_optimized("join(@a5,@b3,f(x,y)(x*y))", Inner::RHS);
+ verify_optimized("join(@b3,@a5,f(x,y)(x*y))", Inner::LHS);
+}
+
+TEST(ExpandTest, interleaved_dimensions_are_not_optimized) {
+ verify_not_optimized("join(a5c3,b3,f(x,y)(x*y))");
+ verify_not_optimized("join(b3,a5c3,f(x,y)(x*y))");
+}
+
+TEST(ExpandTest, matching_dimensions_are_not_expanding) {
+ verify_not_optimized("join(a5c3,a5,f(x,y)(x*y))");
+ verify_not_optimized("join(a5,a5c3,f(x,y)(x*y))");
+}
+
+TEST(ExpandTest, scalar_is_not_expanding) {
+ verify_not_optimized("join(a5,a,f(x,y)(x*y))");
+}
+
+TEST(ExpandTest, unit_tensor_is_not_expanding) {
+ verify_not_optimized("join(a5,x1y1z1,f(x,y)(x+y))");
+ verify_not_optimized("join(x1y1z1,a5,f(x,y)(x+y))");
+ verify_not_optimized("join(a1b1c1,x1y1z1,f(x,y)(x+y))");
+}
+
+TEST(ExpandTest, sparse_expand_is_not_optimized) {
+ verify_not_optimized("join(a5,sparse,f(x,y)(x*y))");
+ verify_not_optimized("join(sparse,a5,f(x,y)(x*y))");
+}
+
+TEST(ExpandTest, mixed_expand_is_not_optimized) {
+ verify_not_optimized("join(a5,mixed,f(x,y)(x*y))");
+ verify_not_optimized("join(mixed,a5,f(x,y)(x*y))");
+}
+
+GTEST_MAIN_RUN_ALL_TESTS()
diff --git a/eval/src/tests/instruction/dense_tensor_peek_function/CMakeLists.txt b/eval/src/tests/instruction/dense_tensor_peek_function/CMakeLists.txt
new file mode 100644
index 00000000000..a4d1bbf6fac
--- /dev/null
+++ b/eval/src/tests/instruction/dense_tensor_peek_function/CMakeLists.txt
@@ -0,0 +1,8 @@
+# Copyright 2019 Oath Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+vespa_add_executable(eval_dense_tensor_peek_function_test_app TEST
+ SOURCES
+ dense_tensor_peek_function_test.cpp
+ DEPENDS
+ vespaeval
+)
+vespa_add_test(NAME eval_dense_tensor_peek_function_test_app COMMAND eval_dense_tensor_peek_function_test_app)
diff --git a/eval/src/tests/instruction/dense_tensor_peek_function/dense_tensor_peek_function_test.cpp b/eval/src/tests/instruction/dense_tensor_peek_function/dense_tensor_peek_function_test.cpp
new file mode 100644
index 00000000000..d2def05a614
--- /dev/null
+++ b/eval/src/tests/instruction/dense_tensor_peek_function/dense_tensor_peek_function_test.cpp
@@ -0,0 +1,85 @@
+// Copyright 2019 Oath Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+#include <vespa/vespalib/testkit/test_kit.h>
+#include <vespa/eval/eval/tensor_function.h>
+#include <vespa/eval/eval/simple_tensor.h>
+#include <vespa/eval/eval/simple_tensor_engine.h>
+#include <vespa/eval/tensor/default_tensor_engine.h>
+#include <vespa/eval/instruction/dense_tensor_peek_function.h>
+#include <vespa/eval/tensor/dense/dense_tensor.h>
+#include <vespa/eval/eval/test/tensor_model.hpp>
+#include <vespa/eval/eval/test/eval_fixture.h>
+
+#include <vespa/vespalib/util/stringfmt.h>
+#include <vespa/vespalib/util/stash.h>
+
+using namespace vespalib;
+using namespace vespalib::eval;
+using namespace vespalib::eval::test;
+using namespace vespalib::tensor;
+using namespace vespalib::eval::tensor_function;
+
+const TensorEngine &prod_engine = DefaultTensorEngine::ref();
+
+EvalFixture::ParamRepo make_params() {
+ return EvalFixture::ParamRepo()
+ .add("a", spec(1.0))
+ .add("b", spec(2.0))
+ .add("c", spec(3.0))
+ .add("x3", spec(x(3), N()))
+ .add("x3f", spec(float_cells({x(3)}), N()))
+ .add("x3y2", spec({x(3),y(2)}, N()))
+ .add("x3y2f", spec(float_cells({x(3),y(2)}), N()))
+ .add("xm", spec(x({"1","2","3","-1","-2","-3"}), N()))
+ .add("xmy2", spec({x({"1","2","3"}), y(2)}, N()));
+}
+EvalFixture::ParamRepo param_repo = make_params();
+
+void verify(const vespalib::string &expr, double expect, size_t expect_optimized_cnt, size_t expect_not_optimized_cnt) {
+ EvalFixture fixture(prod_engine, expr, param_repo, true);
+ auto expect_spec = TensorSpec("double").add({}, expect);
+ EXPECT_EQUAL(EvalFixture::ref(expr, param_repo), expect_spec);
+ EXPECT_EQUAL(fixture.result(), expect_spec);
+ auto info = fixture.find_all<DenseTensorPeekFunction>();
+ EXPECT_EQUAL(info.size(), expect_optimized_cnt);
+ for (size_t i = 0; i < info.size(); ++i) {
+ EXPECT_TRUE(info[i]->result_is_mutable());
+ }
+ EXPECT_EQUAL(fixture.find_all<Peek>().size(), expect_not_optimized_cnt);
+}
+
+//-----------------------------------------------------------------------------
+
+TEST("require that tensor peek can be optimized for dense tensors") {
+ TEST_DO(verify("x3{x:0}", 1.0, 1, 0));
+ TEST_DO(verify("x3{x:(a)}", 2.0, 1, 0));
+ TEST_DO(verify("x3f{x:(c-1)}", 3.0, 1, 0));
+ TEST_DO(verify("x3{x:(c+5)}", 0.0, 1, 0));
+ TEST_DO(verify("x3{x:(a-2)}", 0.0, 1, 0));
+ TEST_DO(verify("x3y2{x:(a),y:(a-1)}", 3.0, 1, 0));
+ TEST_DO(verify("x3y2f{x:1,y:(a)}", 4.0, 1, 0));
+ TEST_DO(verify("x3y2f{x:(a-1),y:(b)}", 0.0, 1, 0));
+}
+
+TEST("require that tensor peek is not optimized for sparse tensor") {
+ TEST_DO(verify("xm{x:1}", 1.0, 0, 1));
+ TEST_DO(verify("xm{x:(c)}", 3.0, 0, 1));
+ TEST_DO(verify("xm{x:(c+1)}", 0.0, 0, 1));
+}
+
+TEST("require that tensor peek is not optimized for mixed tensor") {
+ TEST_DO(verify("xmy2{x:3,y:1}", 6.0, 0, 1));
+ TEST_DO(verify("xmy2{x:(c),y:(a)}", 6.0, 0, 1));
+ TEST_DO(verify("xmy2{x:(a),y:(b)}", 0.0, 0, 1));
+}
+
+TEST("require that indexes are truncated when converted to integers") {
+ TEST_DO(verify("x3{x:(a+0.7)}", 2.0, 1, 0));
+ TEST_DO(verify("x3{x:(a+0.3)}", 2.0, 1, 0));
+ TEST_DO(verify("xm{x:(a+0.7)}", 1.0, 0, 1));
+ TEST_DO(verify("xm{x:(a+0.3)}", 1.0, 0, 1));
+ TEST_DO(verify("xm{x:(-a-0.7)}", 4.0, 0, 1));
+ TEST_DO(verify("xm{x:(-a-0.3)}", 4.0, 0, 1));
+}
+
+TEST_MAIN() { TEST_RUN_ALL(); }
diff --git a/eval/src/tests/instruction/index_lookup_table/CMakeLists.txt b/eval/src/tests/instruction/index_lookup_table/CMakeLists.txt
new file mode 100644
index 00000000000..0343ed75978
--- /dev/null
+++ b/eval/src/tests/instruction/index_lookup_table/CMakeLists.txt
@@ -0,0 +1,9 @@
+# Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+vespa_add_executable(eval_index_lookup_table_test_app TEST
+ SOURCES
+ index_lookup_table_test.cpp
+ DEPENDS
+ vespaeval
+ GTest::GTest
+)
+vespa_add_test(NAME eval_index_lookup_table_test_app COMMAND eval_index_lookup_table_test_app)
diff --git a/eval/src/tests/instruction/index_lookup_table/index_lookup_table_test.cpp b/eval/src/tests/instruction/index_lookup_table/index_lookup_table_test.cpp
new file mode 100644
index 00000000000..988b6d3aa4a
--- /dev/null
+++ b/eval/src/tests/instruction/index_lookup_table/index_lookup_table_test.cpp
@@ -0,0 +1,118 @@
+// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+#include <vespa/eval/instruction/index_lookup_table.h>
+#include <vespa/eval/eval/function.h>
+#include <vespa/eval/eval/value_type.h>
+#include <vespa/vespalib/gtest/gtest.h>
+
+using namespace vespalib::eval;
+using namespace vespalib::tensor;
+
+std::vector<uint32_t> make_table(std::vector<uint32_t> list) { return list; }
+
+TEST(IndexLookupTableTest, single_dimension_lookup_table_is_correct)
+{
+ auto idx_fun = Function::parse({"x"}, "5-x");
+ auto type = ValueType::from_spec("tensor(x[6])");
+ auto table = IndexLookupTable::create(*idx_fun, type);
+
+ EXPECT_EQ(IndexLookupTable::num_cached(), 1);
+ EXPECT_EQ(IndexLookupTable::count_refs(), 1);
+ EXPECT_EQ(table->get(), make_table({5,4,3,2,1,0}));
+}
+
+TEST(IndexLookupTableTest, dual_dimension_lookup_table_is_correct)
+{
+ auto idx_fun = Function::parse({"x","y"}, "5-(x*2+y)");
+ auto type = ValueType::from_spec("tensor(x[3],y[2])");
+ auto table = IndexLookupTable::create(*idx_fun, type);
+
+ EXPECT_EQ(IndexLookupTable::num_cached(), 1);
+ EXPECT_EQ(IndexLookupTable::count_refs(), 1);
+ EXPECT_EQ(table->get(), make_table({5,4,3,2,1,0}));
+}
+
+TEST(IndexLookupTableTest, multi_dimension_lookup_table_is_correct)
+{
+ auto idx_fun = Function::parse({"a","b","c","d"}, "11-(a*6+b*2+c*2+d)");
+ auto type = ValueType::from_spec("tensor(a[2],b[3],c[1],d[2])");
+ auto table = IndexLookupTable::create(*idx_fun, type);
+
+ EXPECT_EQ(IndexLookupTable::num_cached(), 1);
+ EXPECT_EQ(IndexLookupTable::count_refs(), 1);
+ EXPECT_EQ(table->get(), make_table({11,10,9,8,7,6,5,4,3,2,1,0}));
+}
+
+TEST(IndexLookupTableTest, lookup_tables_can_be_shared)
+{
+ auto idx_fun1 = Function::parse({"x"}, "5-x");
+ auto type1 = ValueType::from_spec("tensor(x[6])");
+ auto table1 = IndexLookupTable::create(*idx_fun1, type1);
+
+ auto idx_fun2 = Function::parse({"x"}, "5-x");
+ auto type2 = ValueType::from_spec("tensor(x[6])");
+ auto table2 = IndexLookupTable::create(*idx_fun2, type2);
+
+ EXPECT_EQ(IndexLookupTable::num_cached(), 1);
+ EXPECT_EQ(IndexLookupTable::count_refs(), 2);
+ EXPECT_EQ(&table1->get(), &table2->get());
+ EXPECT_EQ(table1->get(), make_table({5,4,3,2,1,0}));
+}
+
+TEST(IndexLookupTableTest, lookup_tables_with_different_index_functions_are_not_shared)
+{
+ auto idx_fun1 = Function::parse({"x"}, "5-x");
+ auto type1 = ValueType::from_spec("tensor(x[6])");
+ auto table1 = IndexLookupTable::create(*idx_fun1, type1);
+
+ auto idx_fun2 = Function::parse({"x"}, "x");
+ auto type2 = ValueType::from_spec("tensor(x[6])");
+ auto table2 = IndexLookupTable::create(*idx_fun2, type2);
+
+ EXPECT_EQ(IndexLookupTable::num_cached(), 2);
+ EXPECT_EQ(IndexLookupTable::count_refs(), 2);
+ EXPECT_NE(&table1->get(), &table2->get());
+ EXPECT_EQ(table1->get(), make_table({5,4,3,2,1,0}));
+ EXPECT_EQ(table2->get(), make_table({0,1,2,3,4,5}));
+}
+
+TEST(IndexLookupTableTest, lookup_tables_with_different_value_types_are_not_shared)
+{
+ auto idx_fun1 = Function::parse({"x"}, "x");
+ auto type1 = ValueType::from_spec("tensor(x[6])");
+ auto table1 = IndexLookupTable::create(*idx_fun1, type1);
+
+ auto idx_fun2 = Function::parse({"x"}, "x");
+ auto type2 = ValueType::from_spec("tensor(x[5])");
+ auto table2 = IndexLookupTable::create(*idx_fun2, type2);
+
+ EXPECT_EQ(IndexLookupTable::num_cached(), 2);
+ EXPECT_EQ(IndexLookupTable::count_refs(), 2);
+ EXPECT_NE(&table1->get(), &table2->get());
+ EXPECT_EQ(table1->get(), make_table({0,1,2,3,4,5}));
+ EXPECT_EQ(table2->get(), make_table({0,1,2,3,4}));
+}
+
+TEST(IndexLookupTableTest, identical_lookup_tables_might_not_be_shared)
+{
+ auto idx_fun1 = Function::parse({"x"}, "5-x");
+ auto type1 = ValueType::from_spec("tensor(x[6])");
+ auto table1 = IndexLookupTable::create(*idx_fun1, type1);
+
+ auto idx_fun2 = Function::parse({"x","y"}, "5-(x*2+y)");
+ auto type2 = ValueType::from_spec("tensor(x[3],y[2])");
+ auto table2 = IndexLookupTable::create(*idx_fun2, type2);
+
+ EXPECT_EQ(IndexLookupTable::num_cached(), 2);
+ EXPECT_EQ(IndexLookupTable::count_refs(), 2);
+ EXPECT_NE(&table1->get(), &table2->get());
+ EXPECT_EQ(table1->get(), make_table({5,4,3,2,1,0}));
+ EXPECT_EQ(table2->get(), make_table({5,4,3,2,1,0}));
+}
+
+TEST(IndexLookupTableTest, unused_lookup_tables_are_discarded) {
+ EXPECT_EQ(IndexLookupTable::num_cached(), 0);
+ EXPECT_EQ(IndexLookupTable::count_refs(), 0);
+}
+
+GTEST_MAIN_RUN_ALL_TESTS()