aboutsummaryrefslogtreecommitdiffstats
path: root/eval/src/tests/instruction
diff options
context:
space:
mode:
authorArne Juul <arnej@verizonmedia.com>2020-12-09 14:54:07 +0000
committerArne Juul <arnej@verizonmedia.com>2020-12-09 14:54:07 +0000
commitb7d222356f3b0f9676a733c0e2a73405ab124b63 (patch)
treee8b6d717508ff28c3701ab02a09d5a93a0bffc73 /eval/src/tests/instruction
parentbdb8ab9e3373f95d6a06ade3bd07709565d4040e (diff)
move tests to match source location
Diffstat (limited to 'eval/src/tests/instruction')
-rw-r--r--eval/src/tests/instruction/dense_add_dimension_optimizer/CMakeLists.txt8
-rw-r--r--eval/src/tests/instruction/dense_add_dimension_optimizer/dense_add_dimension_optimizer_test.cpp109
-rw-r--r--eval/src/tests/instruction/dense_fast_rename_optimizer/CMakeLists.txt8
-rw-r--r--eval/src/tests/instruction/dense_fast_rename_optimizer/dense_fast_rename_optimizer_test.cpp75
-rw-r--r--eval/src/tests/instruction/dense_inplace_join_function/CMakeLists.txt8
-rw-r--r--eval/src/tests/instruction/dense_inplace_join_function/dense_inplace_join_function_test.cpp138
-rw-r--r--eval/src/tests/instruction/dense_pow_as_map_optimizer/CMakeLists.txt9
-rw-r--r--eval/src/tests/instruction/dense_pow_as_map_optimizer/dense_pow_as_map_optimizer_test.cpp92
-rw-r--r--eval/src/tests/instruction/dense_remove_dimension_optimizer/CMakeLists.txt8
-rw-r--r--eval/src/tests/instruction/dense_remove_dimension_optimizer/dense_remove_dimension_optimizer_test.cpp81
-rw-r--r--eval/src/tests/instruction/dense_replace_type_function/CMakeLists.txt8
-rw-r--r--eval/src/tests/instruction/dense_replace_type_function/dense_replace_type_function_test.cpp77
-rw-r--r--eval/src/tests/instruction/dense_simple_join_function/CMakeLists.txt8
-rw-r--r--eval/src/tests/instruction/dense_simple_join_function/dense_simple_join_function_test.cpp225
-rw-r--r--eval/src/tests/instruction/dense_simple_map_function/CMakeLists.txt9
-rw-r--r--eval/src/tests/instruction/dense_simple_map_function/dense_simple_map_function_test.cpp75
-rw-r--r--eval/src/tests/instruction/dense_single_reduce_function/CMakeLists.txt8
-rw-r--r--eval/src/tests/instruction/dense_single_reduce_function/dense_single_reduce_function_test.cpp170
-rw-r--r--eval/src/tests/instruction/dense_tensor_create_function/CMakeLists.txt8
-rw-r--r--eval/src/tests/instruction/dense_tensor_create_function/dense_tensor_create_function_test.cpp60
-rw-r--r--eval/src/tests/instruction/vector_from_doubles_function/CMakeLists.txt8
-rw-r--r--eval/src/tests/instruction/vector_from_doubles_function/vector_from_doubles_function_test.cpp59
22 files changed, 1251 insertions, 0 deletions
diff --git a/eval/src/tests/instruction/dense_add_dimension_optimizer/CMakeLists.txt b/eval/src/tests/instruction/dense_add_dimension_optimizer/CMakeLists.txt
new file mode 100644
index 00000000000..1bc9f93b1a2
--- /dev/null
+++ b/eval/src/tests/instruction/dense_add_dimension_optimizer/CMakeLists.txt
@@ -0,0 +1,8 @@
+# Copyright 2018 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+vespa_add_executable(eval_dense_add_dimension_optimizer_test_app TEST
+ SOURCES
+ dense_add_dimension_optimizer_test.cpp
+ DEPENDS
+ vespaeval
+)
+vespa_add_test(NAME eval_dense_add_dimension_optimizer_test_app COMMAND eval_dense_add_dimension_optimizer_test_app)
diff --git a/eval/src/tests/instruction/dense_add_dimension_optimizer/dense_add_dimension_optimizer_test.cpp b/eval/src/tests/instruction/dense_add_dimension_optimizer/dense_add_dimension_optimizer_test.cpp
new file mode 100644
index 00000000000..1812cc8c1db
--- /dev/null
+++ b/eval/src/tests/instruction/dense_add_dimension_optimizer/dense_add_dimension_optimizer_test.cpp
@@ -0,0 +1,109 @@
+// Copyright 2018 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+#include <vespa/vespalib/testkit/test_kit.h>
+#include <vespa/eval/eval/fast_value.h>
+#include <vespa/eval/eval/tensor_function.h>
+#include <vespa/eval/instruction/dense_replace_type_function.h>
+#include <vespa/eval/instruction/dense_fast_rename_optimizer.h>
+#include <vespa/eval/eval/test/tensor_model.hpp>
+#include <vespa/eval/eval/test/eval_fixture.h>
+
+#include <vespa/vespalib/util/stringfmt.h>
+#include <vespa/vespalib/util/stash.h>
+
+using namespace vespalib;
+using namespace vespalib::eval;
+using namespace vespalib::eval::test;
+using namespace vespalib::tensor;
+using namespace vespalib::eval::tensor_function;
+
+const ValueBuilderFactory &prod_factory = FastValueBuilderFactory::get();
+
+EvalFixture::ParamRepo make_params() {
+ return EvalFixture::ParamRepo()
+ .add("x5", spec({x(5)}, N()))
+ .add("x5f", spec(float_cells({x(5)}), N()))
+ .add("x5y1", spec({x(5),y(1)}, N()))
+ .add("y1z1", spec({y(1),z(1)}, N()))
+ .add("x_m", spec({x({"a"})}, N()));
+}
+EvalFixture::ParamRepo param_repo = make_params();
+
+void verify_optimized(const vespalib::string &expr) {
+ EvalFixture fixture(prod_factory, expr, param_repo, true);
+ EXPECT_EQUAL(fixture.result(), EvalFixture::ref(expr, param_repo));
+ auto info = fixture.find_all<DenseReplaceTypeFunction>();
+ EXPECT_EQUAL(info.size(), 1u);
+}
+
+void verify_not_optimized(const vespalib::string &expr) {
+ EvalFixture fixture(prod_factory, expr, param_repo, true);
+ EXPECT_EQUAL(fixture.result(), EvalFixture::ref(expr, param_repo));
+ auto info = fixture.find_all<DenseReplaceTypeFunction>();
+ EXPECT_TRUE(info.empty());
+}
+
+TEST("require that dimension addition can be optimized") {
+ TEST_DO(verify_optimized("join(x5,tensor(y[1])(1),f(a,b)(a*b))"));
+ TEST_DO(verify_optimized("join(tensor(y[1])(1),x5,f(a,b)(a*b))"));
+ TEST_DO(verify_optimized("x5*tensor(y[1])(1)"));
+ TEST_DO(verify_optimized("tensor(y[1])(1)*x5"));
+ TEST_DO(verify_optimized("x5y1*tensor(z[1])(1)"));
+ TEST_DO(verify_optimized("tensor(z[1])(1)*x5y1"));
+}
+
+TEST("require that multi-dimension addition can be optimized") {
+ TEST_DO(verify_optimized("x5*tensor(a[1],b[1],c[1])(1)"));
+}
+
+TEST("require that dimension addition can be chained (and compacted)") {
+ TEST_DO(verify_optimized("tensor(z[1])(1)*x5*tensor(y[1])(1)"));
+}
+
+TEST("require that constant dimension addition is optimized") {
+ TEST_DO(verify_optimized("tensor(x[1])(1)*tensor(y[1])(1)"));
+ TEST_DO(verify_optimized("tensor(x[1])(1.1)*tensor(y[1])(1)"));
+ TEST_DO(verify_optimized("tensor(x[1])(1)*tensor(y[1])(1.1)"));
+ TEST_DO(verify_optimized("tensor(x[2])(1)*tensor(y[1])(1)"));
+ TEST_DO(verify_optimized("tensor(x[1])(1)*tensor(y[2])(1)"));
+}
+
+TEST("require that non-canonical dimension addition is not optimized") {
+ TEST_DO(verify_not_optimized("x5+tensor(y[1])(0)"));
+ TEST_DO(verify_not_optimized("tensor(y[1])(0)+x5"));
+ TEST_DO(verify_not_optimized("x5-tensor(y[1])(0)"));
+ TEST_DO(verify_not_optimized("x5/tensor(y[1])(1)"));
+ TEST_DO(verify_not_optimized("tensor(y[1])(1)/x5"));
+}
+
+TEST("require that dimension addition with overlapping dimensions is optimized") {
+ TEST_DO(verify_optimized("x5y1*tensor(y[1],z[1])(1)"));
+ TEST_DO(verify_optimized("tensor(y[1],z[1])(1)*x5y1"));
+}
+
+TEST("require that dimension addition with inappropriate dimensions is not optimized") {
+ TEST_DO(verify_not_optimized("x_m*tensor(y[1])(1)"));
+ TEST_DO(verify_not_optimized("tensor(y[1])(1)*x_m"));
+}
+
+TEST("require that dimension addition optimization requires unit constant tensor") {
+ TEST_DO(verify_not_optimized("x5*tensor(y[1])(0.9)"));
+ TEST_DO(verify_not_optimized("tensor(y[1])(1.1)*x5"));
+ TEST_DO(verify_not_optimized("x5*tensor(y[1],z[2])(1)"));
+ TEST_DO(verify_not_optimized("tensor(y[1],z[2])(1)*x5"));
+ TEST_DO(verify_not_optimized("x5*y1z1"));
+ TEST_DO(verify_not_optimized("y1z1*x5"));
+ TEST_DO(verify_not_optimized("tensor(x[1])(1.1)*tensor(y[1])(1.1)"));
+ TEST_DO(verify_not_optimized("tensor(x[2])(1)*tensor(y[2])(1)"));
+}
+
+TEST("require that optimization also works for float cells") {
+ TEST_DO(verify_optimized("x5*tensor<float>(a[1],b[1],c[1])(1)"));
+ TEST_DO(verify_optimized("x5f*tensor<float>(a[1],b[1],c[1])(1)"));
+}
+
+TEST("require that optimization is disabled if unit vector would promote tensor cell types") {
+ TEST_DO(verify_not_optimized("x5f*tensor(a[1],b[1],c[1])(1)"));
+}
+
+TEST_MAIN() { TEST_RUN_ALL(); }
diff --git a/eval/src/tests/instruction/dense_fast_rename_optimizer/CMakeLists.txt b/eval/src/tests/instruction/dense_fast_rename_optimizer/CMakeLists.txt
new file mode 100644
index 00000000000..32cf6c45d1e
--- /dev/null
+++ b/eval/src/tests/instruction/dense_fast_rename_optimizer/CMakeLists.txt
@@ -0,0 +1,8 @@
+# Copyright 2018 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+vespa_add_executable(eval_dense_fast_rename_optimizer_test_app TEST
+ SOURCES
+ dense_fast_rename_optimizer_test.cpp
+ DEPENDS
+ vespaeval
+)
+vespa_add_test(NAME eval_dense_fast_rename_optimizer_test_app COMMAND eval_dense_fast_rename_optimizer_test_app)
diff --git a/eval/src/tests/instruction/dense_fast_rename_optimizer/dense_fast_rename_optimizer_test.cpp b/eval/src/tests/instruction/dense_fast_rename_optimizer/dense_fast_rename_optimizer_test.cpp
new file mode 100644
index 00000000000..7339a057fa3
--- /dev/null
+++ b/eval/src/tests/instruction/dense_fast_rename_optimizer/dense_fast_rename_optimizer_test.cpp
@@ -0,0 +1,75 @@
+// Copyright 2018 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+#include <vespa/vespalib/testkit/test_kit.h>
+#include <vespa/eval/eval/tensor_function.h>
+#include <vespa/eval/instruction/dense_replace_type_function.h>
+#include <vespa/eval/instruction/dense_fast_rename_optimizer.h>
+#include <vespa/eval/eval/test/tensor_model.hpp>
+#include <vespa/eval/eval/test/eval_fixture.h>
+
+#include <vespa/vespalib/util/stringfmt.h>
+#include <vespa/vespalib/util/stash.h>
+
+using namespace vespalib;
+using namespace vespalib::eval;
+using namespace vespalib::eval::test;
+using namespace vespalib::tensor;
+using namespace vespalib::eval::tensor_function;
+
+const ValueBuilderFactory &prod_factory = FastValueBuilderFactory::get();
+
+EvalFixture::ParamRepo make_params() {
+ return EvalFixture::ParamRepo()
+ .add("x5", spec({x(5)}, N()))
+ .add("x5f", spec(float_cells({x(5)}), N()))
+ .add("x_m", spec({x({"a", "b", "c"})}, N()))
+ .add("x5y3", spec({x(5),y(3)}, N()));
+}
+EvalFixture::ParamRepo param_repo = make_params();
+
+void verify_optimized(const vespalib::string &expr) {
+ EvalFixture fixture(prod_factory, expr, param_repo, true);
+ EXPECT_EQUAL(fixture.result(), EvalFixture::ref(expr, param_repo));
+ auto info = fixture.find_all<DenseReplaceTypeFunction>();
+ EXPECT_EQUAL(info.size(), 1u);
+}
+
+void verify_not_optimized(const vespalib::string &expr) {
+ EvalFixture fixture(prod_factory, expr, param_repo, true);
+ EXPECT_EQUAL(fixture.result(), EvalFixture::ref(expr, param_repo));
+ auto info = fixture.find_all<DenseReplaceTypeFunction>();
+ EXPECT_TRUE(info.empty());
+}
+
+TEST("require that non-transposing dense renames are optimized") {
+ TEST_DO(verify_optimized("rename(x5,x,y)"));
+ TEST_DO(verify_optimized("rename(x5,x,a)"));
+ TEST_DO(verify_optimized("rename(x5y3,y,z)"));
+ TEST_DO(verify_optimized("rename(x5y3,x,a)"));
+ TEST_DO(verify_optimized("rename(x5y3,(x,y),(a,b))"));
+ TEST_DO(verify_optimized("rename(x5y3,(x,y),(z,zz))"));
+ TEST_DO(verify_optimized("rename(x5y3,(x,y),(y,z))"));
+ TEST_DO(verify_optimized("rename(x5y3,(y,x),(b,a))"));
+}
+
+TEST("require that transposing dense renames are not optimized") {
+ TEST_DO(verify_not_optimized("rename(x5y3,x,z)"));
+ TEST_DO(verify_not_optimized("rename(x5y3,y,a)"));
+ TEST_DO(verify_not_optimized("rename(x5y3,(x,y),(y,x))"));
+ TEST_DO(verify_not_optimized("rename(x5y3,(x,y),(b,a))"));
+ TEST_DO(verify_not_optimized("rename(x5y3,(y,x),(a,b))"));
+}
+
+TEST("require that non-dense renames are not optimized") {
+ TEST_DO(verify_not_optimized("rename(x_m,x,y)"));
+}
+
+TEST("require that chained optimized renames are compacted into a single operation") {
+ TEST_DO(verify_optimized("rename(rename(x5,x,y),y,z)"));
+}
+
+TEST("require that optimization works for float cells") {
+ TEST_DO(verify_optimized("rename(x5f,x,y)"));
+}
+
+TEST_MAIN() { TEST_RUN_ALL(); }
diff --git a/eval/src/tests/instruction/dense_inplace_join_function/CMakeLists.txt b/eval/src/tests/instruction/dense_inplace_join_function/CMakeLists.txt
new file mode 100644
index 00000000000..2808675bc78
--- /dev/null
+++ b/eval/src/tests/instruction/dense_inplace_join_function/CMakeLists.txt
@@ -0,0 +1,8 @@
+# Copyright 2018 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+vespa_add_executable(eval_dense_inplace_join_function_test_app TEST
+ SOURCES
+ dense_inplace_join_function_test.cpp
+ DEPENDS
+ vespaeval
+)
+vespa_add_test(NAME eval_dense_inplace_join_function_test_app COMMAND eval_dense_inplace_join_function_test_app)
diff --git a/eval/src/tests/instruction/dense_inplace_join_function/dense_inplace_join_function_test.cpp b/eval/src/tests/instruction/dense_inplace_join_function/dense_inplace_join_function_test.cpp
new file mode 100644
index 00000000000..853607ae76d
--- /dev/null
+++ b/eval/src/tests/instruction/dense_inplace_join_function/dense_inplace_join_function_test.cpp
@@ -0,0 +1,138 @@
+// Copyright 2018 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+#include <vespa/vespalib/testkit/test_kit.h>
+#include <vespa/eval/eval/tensor_function.h>
+#include <vespa/eval/eval/test/tensor_model.hpp>
+#include <vespa/eval/eval/test/eval_fixture.h>
+
+#include <vespa/vespalib/util/stringfmt.h>
+#include <vespa/vespalib/util/stash.h>
+
+using namespace vespalib;
+using namespace vespalib::eval;
+using namespace vespalib::eval::test;
+using namespace vespalib::tensor;
+using namespace vespalib::eval::tensor_function;
+
+const ValueBuilderFactory &prod_factory = FastValueBuilderFactory::get();
+
+double seq_value = 0.0;
+
+struct GlobalSequence : public Sequence {
+ GlobalSequence() {}
+ double operator[](size_t) const override {
+ seq_value += 1.0;
+ return seq_value;
+ }
+ ~GlobalSequence() {}
+};
+GlobalSequence seq;
+
+EvalFixture::ParamRepo make_params() {
+ return EvalFixture::ParamRepo()
+ .add("con_x5_A", spec({x(5)}, seq))
+ .add("con_x5_B", spec({x(5)}, seq))
+ .add("con_x5_C", spec({x(5)}, seq))
+ .add("con_x5y3_A", spec({x(5),y(3)}, seq))
+ .add("con_x5y3_B", spec({x(5),y(3)}, seq))
+ .add_mutable("mut_dbl_A", spec(1.5))
+ .add_mutable("mut_dbl_B", spec(2.5))
+ .add_mutable("mut_x5_A", spec({x(5)}, seq))
+ .add_mutable("mut_x5_B", spec({x(5)}, seq))
+ .add_mutable("mut_x5_C", spec({x(5)}, seq))
+ .add_mutable("mut_x5f_D", spec(float_cells({x(5)}), seq))
+ .add_mutable("mut_x5f_E", spec(float_cells({x(5)}), seq))
+ .add_mutable("mut_x5y3_A", spec({x(5),y(3)}, seq))
+ .add_mutable("mut_x5y3_B", spec({x(5),y(3)}, seq))
+ .add_mutable("mut_x_sparse", spec({x({"a", "b", "c"})}, seq));
+}
+EvalFixture::ParamRepo param_repo = make_params();
+
+void verify_optimized(const vespalib::string &expr, size_t param_idx) {
+ EvalFixture fixture(prod_factory, expr, param_repo, true, true);
+ EXPECT_EQUAL(fixture.result(), EvalFixture::ref(expr, param_repo));
+ for (size_t i = 0; i < fixture.num_params(); ++i) {
+ TEST_STATE(vespalib::make_string("param %zu", i).c_str());
+ if (i == param_idx) {
+ EXPECT_EQUAL(fixture.get_param(i), fixture.result());
+ } else {
+ EXPECT_NOT_EQUAL(fixture.get_param(i), fixture.result());
+ }
+ }
+}
+
+void verify_p0_optimized(const vespalib::string &expr) {
+ verify_optimized(expr, 0);
+}
+
+void verify_p1_optimized(const vespalib::string &expr) {
+ verify_optimized(expr, 1);
+}
+
+void verify_p2_optimized(const vespalib::string &expr) {
+ verify_optimized(expr, 2);
+}
+
+void verify_not_optimized(const vespalib::string &expr) {
+ EvalFixture fixture(prod_factory, expr, param_repo, true, true);
+ EXPECT_EQUAL(fixture.result(), EvalFixture::ref(expr, param_repo));
+ for (size_t i = 0; i < fixture.num_params(); ++i) {
+ EXPECT_NOT_EQUAL(fixture.get_param(i), fixture.result());
+ }
+}
+
+TEST("require that mutable dense concrete tensors are optimized") {
+ TEST_DO(verify_p1_optimized("mut_x5_A-mut_x5_B"));
+ TEST_DO(verify_p0_optimized("mut_x5_A-con_x5_B"));
+ TEST_DO(verify_p1_optimized("con_x5_A-mut_x5_B"));
+ TEST_DO(verify_p1_optimized("mut_x5y3_A-mut_x5y3_B"));
+ TEST_DO(verify_p0_optimized("mut_x5y3_A-con_x5y3_B"));
+ TEST_DO(verify_p1_optimized("con_x5y3_A-mut_x5y3_B"));
+}
+
+TEST("require that self-join operations can be optimized") {
+ TEST_DO(verify_p0_optimized("mut_x5_A+mut_x5_A"));
+}
+
+TEST("require that join(tensor,scalar) operations are optimized") {
+ TEST_DO(verify_p0_optimized("mut_x5_A-mut_dbl_B"));
+ TEST_DO(verify_p1_optimized("mut_dbl_A-mut_x5_B"));
+}
+
+TEST("require that join with different tensor shapes are optimized") {
+ TEST_DO(verify_p1_optimized("mut_x5_A*mut_x5y3_B"));
+}
+
+TEST("require that inplace join operations can be chained") {
+ TEST_DO(verify_p2_optimized("mut_x5_A+(mut_x5_B+mut_x5_C)"));
+ TEST_DO(verify_p0_optimized("(mut_x5_A+con_x5_B)+con_x5_C"));
+ TEST_DO(verify_p1_optimized("con_x5_A+(mut_x5_B+con_x5_C)"));
+ TEST_DO(verify_p2_optimized("con_x5_A+(con_x5_B+mut_x5_C)"));
+}
+
+TEST("require that non-mutable tensors are not optimized") {
+ TEST_DO(verify_not_optimized("con_x5_A+con_x5_B"));
+}
+
+TEST("require that scalar values are not optimized") {
+ TEST_DO(verify_not_optimized("mut_dbl_A+mut_dbl_B"));
+ TEST_DO(verify_not_optimized("mut_dbl_A+5"));
+ TEST_DO(verify_not_optimized("5+mut_dbl_B"));
+}
+
+TEST("require that mapped tensors are not optimized") {
+ TEST_DO(verify_not_optimized("mut_x_sparse+mut_x_sparse"));
+}
+
+TEST("require that optimization works with float cells") {
+ TEST_DO(verify_p1_optimized("mut_x5f_D-mut_x5f_E"));
+}
+
+TEST("require that overwritten value must have same cell type as result") {
+ TEST_DO(verify_p0_optimized("mut_x5_A-mut_x5f_D"));
+ TEST_DO(verify_p1_optimized("mut_x5f_D-mut_x5_A"));
+ TEST_DO(verify_not_optimized("con_x5_A-mut_x5f_D"));
+ TEST_DO(verify_not_optimized("mut_x5f_D-con_x5_A"));
+}
+
+TEST_MAIN() { TEST_RUN_ALL(); }
diff --git a/eval/src/tests/instruction/dense_pow_as_map_optimizer/CMakeLists.txt b/eval/src/tests/instruction/dense_pow_as_map_optimizer/CMakeLists.txt
new file mode 100644
index 00000000000..d6ce9f1924c
--- /dev/null
+++ b/eval/src/tests/instruction/dense_pow_as_map_optimizer/CMakeLists.txt
@@ -0,0 +1,9 @@
+# Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+vespa_add_executable(eval_dense_pow_as_map_optimizer_test_app TEST
+ SOURCES
+ dense_pow_as_map_optimizer_test.cpp
+ DEPENDS
+ vespaeval
+ GTest::GTest
+)
+vespa_add_test(NAME eval_dense_pow_as_map_optimizer_test_app COMMAND eval_dense_pow_as_map_optimizer_test_app)
diff --git a/eval/src/tests/instruction/dense_pow_as_map_optimizer/dense_pow_as_map_optimizer_test.cpp b/eval/src/tests/instruction/dense_pow_as_map_optimizer/dense_pow_as_map_optimizer_test.cpp
new file mode 100644
index 00000000000..0e73a40b81a
--- /dev/null
+++ b/eval/src/tests/instruction/dense_pow_as_map_optimizer/dense_pow_as_map_optimizer_test.cpp
@@ -0,0 +1,92 @@
+// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+#include <vespa/eval/eval/fast_value.h>
+#include <vespa/eval/eval/tensor_function.h>
+#include <vespa/eval/instruction/dense_simple_map_function.h>
+#include <vespa/eval/eval/test/eval_fixture.h>
+#include <vespa/eval/eval/test/tensor_model.hpp>
+#include <vespa/vespalib/gtest/gtest.h>
+
+using namespace vespalib::eval::operation;
+using namespace vespalib::eval::tensor_function;
+using namespace vespalib::eval::test;
+using namespace vespalib::eval;
+using namespace vespalib::tensor;
+//using namespace vespalib;
+
+const ValueBuilderFactory &prod_factory = FastValueBuilderFactory::get();
+
+EvalFixture::ParamRepo make_params() {
+ return EvalFixture::ParamRepo()
+ .add("a", spec(1.5))
+ .add("b", spec(2.5))
+ .add("sparse", spec({x({"a"})}, N()))
+ .add("mixed", spec({x({"a"}),y(5)}, N()))
+ .add_matrix("x", 5, "y", 3);
+}
+EvalFixture::ParamRepo param_repo = make_params();
+
+void verify_optimized(const vespalib::string &expr, op1_t op1, bool inplace = false) {
+ EvalFixture slow_fixture(prod_factory, expr, param_repo, false);
+ EvalFixture fixture(prod_factory, expr, param_repo, true, true);
+ EXPECT_EQ(fixture.result(), EvalFixture::ref(expr, param_repo));
+ EXPECT_EQ(fixture.result(), slow_fixture.result());
+ auto info = fixture.find_all<DenseSimpleMapFunction>();
+ ASSERT_EQ(info.size(), 1u);
+ EXPECT_TRUE(info[0]->result_is_mutable());
+ EXPECT_EQ(info[0]->function(), op1);
+ EXPECT_EQ(info[0]->inplace(), inplace);
+ ASSERT_EQ(fixture.num_params(), 1);
+ if (inplace) {
+ EXPECT_EQ(fixture.get_param(0), fixture.result());
+ } else {
+ EXPECT_TRUE(!(fixture.get_param(0) == fixture.result()));
+ }
+}
+
+void verify_not_optimized(const vespalib::string &expr) {
+ EvalFixture slow_fixture(prod_factory, expr, param_repo, false);
+ EvalFixture fixture(prod_factory, expr, param_repo, true);
+ EXPECT_EQ(fixture.result(), EvalFixture::ref(expr, param_repo));
+ EXPECT_EQ(fixture.result(), slow_fixture.result());
+ auto info = fixture.find_all<Map>();
+ EXPECT_TRUE(info.empty());
+}
+
+TEST(PowAsMapTest, squared_dense_tensor_is_optimized) {
+ verify_optimized("x5y3^2.0", Square::f);
+ verify_optimized("pow(x5y3,2.0)", Square::f);
+ verify_optimized("join(x5y3,2.0,f(x,y)(x^y))", Square::f);
+ verify_optimized("join(x5y3,2.0,f(x,y)(pow(x,y)))", Square::f);
+ verify_optimized("join(x5y3f,2.0,f(x,y)(pow(x,y)))", Square::f);
+ verify_optimized("join(@x5y3,2.0,f(x,y)(pow(x,y)))", Square::f, true);
+ verify_optimized("join(@x5y3f,2.0,f(x,y)(pow(x,y)))", Square::f, true);
+}
+
+TEST(PowAsMapTest, cubed_dense_tensor_is_optimized) {
+ verify_optimized("x5y3^3.0", Cube::f);
+ verify_optimized("pow(x5y3,3.0)", Cube::f);
+ verify_optimized("join(x5y3,3.0,f(x,y)(x^y))", Cube::f);
+ verify_optimized("join(x5y3,3.0,f(x,y)(pow(x,y)))", Cube::f);
+ verify_optimized("join(x5y3f,3.0,f(x,y)(pow(x,y)))", Cube::f);
+ verify_optimized("join(@x5y3,3.0,f(x,y)(pow(x,y)))", Cube::f, true);
+ verify_optimized("join(@x5y3f,3.0,f(x,y)(pow(x,y)))", Cube::f, true);
+}
+
+TEST(PowAsMapTest, hypercubed_dense_tensor_is_not_optimized) {
+ verify_not_optimized("join(x5y3,4.0,f(x,y)(pow(x,y)))");
+}
+
+TEST(PowAsMapTest, scalar_join_is_not_optimized) {
+ verify_not_optimized("join(a,2.0,f(x,y)(pow(x,y)))");
+}
+
+TEST(PowAsMapTest, sparse_join_is_not_optimized) {
+ verify_not_optimized("join(sparse,2.0,f(x,y)(pow(x,y)))");
+}
+
+TEST(PowAsMapTest, mixed_join_is_not_optimized) {
+ verify_not_optimized("join(mixed,2.0,f(x,y)(pow(x,y)))");
+}
+
+GTEST_MAIN_RUN_ALL_TESTS()
diff --git a/eval/src/tests/instruction/dense_remove_dimension_optimizer/CMakeLists.txt b/eval/src/tests/instruction/dense_remove_dimension_optimizer/CMakeLists.txt
new file mode 100644
index 00000000000..c945bd31609
--- /dev/null
+++ b/eval/src/tests/instruction/dense_remove_dimension_optimizer/CMakeLists.txt
@@ -0,0 +1,8 @@
+# Copyright 2018 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+vespa_add_executable(eval_dense_remove_dimension_optimizer_test_app TEST
+ SOURCES
+ dense_remove_dimension_optimizer_test.cpp
+ DEPENDS
+ vespaeval
+)
+vespa_add_test(NAME eval_dense_remove_dimension_optimizer_test_app COMMAND eval_dense_remove_dimension_optimizer_test_app)
diff --git a/eval/src/tests/instruction/dense_remove_dimension_optimizer/dense_remove_dimension_optimizer_test.cpp b/eval/src/tests/instruction/dense_remove_dimension_optimizer/dense_remove_dimension_optimizer_test.cpp
new file mode 100644
index 00000000000..32f2fb20c10
--- /dev/null
+++ b/eval/src/tests/instruction/dense_remove_dimension_optimizer/dense_remove_dimension_optimizer_test.cpp
@@ -0,0 +1,81 @@
+// Copyright 2018 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+#include <vespa/vespalib/testkit/test_kit.h>
+#include <vespa/eval/eval/tensor_function.h>
+#include <vespa/eval/instruction/dense_replace_type_function.h>
+#include <vespa/eval/instruction/dense_fast_rename_optimizer.h>
+#include <vespa/eval/eval/test/tensor_model.hpp>
+#include <vespa/eval/eval/test/eval_fixture.h>
+
+#include <vespa/vespalib/util/stringfmt.h>
+#include <vespa/vespalib/util/stash.h>
+
+using namespace vespalib;
+using namespace vespalib::eval;
+using namespace vespalib::eval::test;
+using namespace vespalib::tensor;
+using namespace vespalib::eval::tensor_function;
+
+const ValueBuilderFactory &prod_factory = FastValueBuilderFactory::get();
+
+EvalFixture::ParamRepo make_params() {
+ return EvalFixture::ParamRepo()
+ .add("x1y5z1", spec({x(1),y(5),z(1)}, N()))
+ .add("x1y5z1f", spec(float_cells({x(1),y(5),z(1)}), N()))
+ .add("x1y1z1", spec({x(1),y(1),z(1)}, N()))
+ .add("x1y5z_m", spec({x(1),y(5),z({"a"})}, N()));
+}
+EvalFixture::ParamRepo param_repo = make_params();
+
+void verify_optimized(const vespalib::string &expr) {
+ EvalFixture fixture(prod_factory, expr, param_repo, true);
+ EXPECT_EQUAL(fixture.result(), EvalFixture::ref(expr, param_repo));
+ auto info = fixture.find_all<DenseReplaceTypeFunction>();
+ EXPECT_EQUAL(info.size(), 1u);
+}
+
+void verify_not_optimized(const vespalib::string &expr) {
+ EvalFixture fixture(prod_factory, expr, param_repo, true);
+ EXPECT_EQUAL(fixture.result(), EvalFixture::ref(expr, param_repo));
+ auto info = fixture.find_all<DenseReplaceTypeFunction>();
+ EXPECT_TRUE(info.empty());
+}
+
+TEST("require that dimension removal can be optimized for appropriate aggregators") {
+ TEST_DO(verify_optimized("reduce(x1y5z1,avg,x)"));
+ TEST_DO(verify_not_optimized("reduce(x1y5z1,count,x)")); // NB
+ TEST_DO(verify_optimized("reduce(x1y5z1,prod,x)"));
+ TEST_DO(verify_optimized("reduce(x1y5z1,sum,x)"));
+ TEST_DO(verify_optimized("reduce(x1y5z1,max,x)"));
+ TEST_DO(verify_optimized("reduce(x1y5z1,min,x)"));
+}
+
+TEST("require that multi-dimension removal can be optimized") {
+ TEST_DO(verify_optimized("reduce(x1y5z1,sum,x,z)"));
+}
+
+TEST("require that chained dimension removal can be optimized (and compacted)") {
+ TEST_DO(verify_optimized("reduce(reduce(x1y5z1,sum,x),sum,z)"));
+}
+
+TEST("require that reducing non-trivial dimension is not optimized") {
+ TEST_DO(verify_not_optimized("reduce(x1y5z1,sum,y)"));
+ TEST_DO(verify_not_optimized("reduce(x1y5z1,sum,x,y)"));
+ TEST_DO(verify_not_optimized("reduce(x1y5z1,sum,y,z)"));
+}
+
+TEST("require that full reduce is not optimized") {
+ TEST_DO(verify_not_optimized("reduce(x1y1z1,sum)"));
+ TEST_DO(verify_not_optimized("reduce(x1y1z1,sum,x,y,z)"));
+}
+
+TEST("require that inappropriate tensor types cannot be optimized") {
+ TEST_DO(verify_not_optimized("reduce(x1y5z_m,sum,x)"));
+ TEST_DO(verify_not_optimized("reduce(x1y5z_m,sum,z)"));
+}
+
+TEST("require that optimization works for float cells") {
+ TEST_DO(verify_optimized("reduce(x1y5z1f,avg,x)"));
+}
+
+TEST_MAIN() { TEST_RUN_ALL(); }
diff --git a/eval/src/tests/instruction/dense_replace_type_function/CMakeLists.txt b/eval/src/tests/instruction/dense_replace_type_function/CMakeLists.txt
new file mode 100644
index 00000000000..dd4a8a58082
--- /dev/null
+++ b/eval/src/tests/instruction/dense_replace_type_function/CMakeLists.txt
@@ -0,0 +1,8 @@
+# Copyright 2018 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+vespa_add_executable(eval_dense_replace_type_function_test_app TEST
+ SOURCES
+ dense_replace_type_function_test.cpp
+ DEPENDS
+ vespaeval
+)
+vespa_add_test(NAME eval_dense_replace_type_function_test_app COMMAND eval_dense_replace_type_function_test_app)
diff --git a/eval/src/tests/instruction/dense_replace_type_function/dense_replace_type_function_test.cpp b/eval/src/tests/instruction/dense_replace_type_function/dense_replace_type_function_test.cpp
new file mode 100644
index 00000000000..46fd674e15c
--- /dev/null
+++ b/eval/src/tests/instruction/dense_replace_type_function/dense_replace_type_function_test.cpp
@@ -0,0 +1,77 @@
+// Copyright 2018 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+#include <vespa/vespalib/testkit/test_kit.h>
+#include <vespa/eval/eval/fast_value.h>
+#include <vespa/eval/eval/value_codec.h>
+#include <vespa/eval/eval/interpreted_function.h>
+#include <vespa/eval/instruction/dense_replace_type_function.h>
+#include <vespa/eval/eval/test/tensor_model.hpp>
+
+using namespace vespalib::eval::tensor_function;
+using namespace vespalib::eval::test;
+using namespace vespalib::eval;
+using namespace vespalib::tensor;
+using namespace vespalib;
+
+const ValueBuilderFactory &prod_factory = FastValueBuilderFactory::get();
+
+TypedCells getCellsRef(const eval::Value &value) {
+ return value.cells();
+}
+
+struct ChildMock : Leaf {
+ bool is_mutable;
+ ChildMock(const ValueType &type) : Leaf(type), is_mutable(true) {}
+ bool result_is_mutable() const override { return is_mutable; }
+ InterpretedFunction::Instruction compile_self(const ValueBuilderFactory &, Stash &) const override { abort(); }
+};
+
+struct Fixture {
+ Value::UP my_value;
+ ValueType new_type;
+ ChildMock mock_child;
+ DenseReplaceTypeFunction my_fun;
+ std::vector<TensorFunction::Child::CREF> children;
+ InterpretedFunction::State state;
+ Fixture()
+ : my_value(value_from_spec(spec({x(10)}, N()), prod_factory)),
+ new_type(ValueType::from_spec("tensor(x[5],y[2])")),
+ mock_child(my_value->type()),
+ my_fun(new_type, mock_child),
+ children(),
+ state(prod_factory)
+ {
+ my_fun.push_children(children);
+ state.stack.push_back(*my_value);
+ my_fun.compile_self(prod_factory, state.stash).perform(state);
+ ASSERT_EQUAL(children.size(), 1u);
+ ASSERT_EQUAL(state.stack.size(), 1u);
+ ASSERT_TRUE(!new_type.is_error());
+ }
+};
+
+TEST_F("require that DenseReplaceTypeFunction works as expected", Fixture()) {
+ EXPECT_EQUAL(f1.my_fun.result_type(), f1.new_type);
+ EXPECT_EQUAL(f1.my_fun.result_is_mutable(), true);
+ f1.mock_child.is_mutable = false;
+ EXPECT_EQUAL(f1.my_fun.result_is_mutable(), false);
+ EXPECT_EQUAL(&f1.children[0].get().get(), &f1.mock_child);
+ EXPECT_EQUAL(getCellsRef(f1.state.stack[0]).data, getCellsRef(*f1.my_value).data);
+ EXPECT_EQUAL(getCellsRef(f1.state.stack[0]).size, getCellsRef(*f1.my_value).size);
+ EXPECT_EQUAL(f1.state.stack[0].get().type(), f1.new_type);
+ fprintf(stderr, "%s\n", f1.my_fun.as_string().c_str());
+}
+
+TEST("require that create_compact will collapse duplicate replace operations") {
+ Stash stash;
+ ValueType type = ValueType::double_type();
+ ChildMock leaf(type);
+ const DenseReplaceTypeFunction &a = DenseReplaceTypeFunction::create_compact(type, leaf, stash);
+ const DenseReplaceTypeFunction &b = DenseReplaceTypeFunction::create_compact(type, a, stash);
+ EXPECT_EQUAL(a.result_type(), type);
+ EXPECT_EQUAL(&a.child(), &leaf);
+ EXPECT_EQUAL(b.result_type(), type);
+ EXPECT_EQUAL(&b.child(), &leaf);
+}
+
+TEST_MAIN() { TEST_RUN_ALL(); }
diff --git a/eval/src/tests/instruction/dense_simple_join_function/CMakeLists.txt b/eval/src/tests/instruction/dense_simple_join_function/CMakeLists.txt
new file mode 100644
index 00000000000..8a2df392145
--- /dev/null
+++ b/eval/src/tests/instruction/dense_simple_join_function/CMakeLists.txt
@@ -0,0 +1,8 @@
+# Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+vespa_add_executable(eval_dense_simple_join_function_test_app TEST
+ SOURCES
+ dense_simple_join_function_test.cpp
+ DEPENDS
+ vespaeval
+)
+vespa_add_test(NAME eval_dense_simple_join_function_test_app COMMAND eval_dense_simple_join_function_test_app)
diff --git a/eval/src/tests/instruction/dense_simple_join_function/dense_simple_join_function_test.cpp b/eval/src/tests/instruction/dense_simple_join_function/dense_simple_join_function_test.cpp
new file mode 100644
index 00000000000..e367f94d7d9
--- /dev/null
+++ b/eval/src/tests/instruction/dense_simple_join_function/dense_simple_join_function_test.cpp
@@ -0,0 +1,225 @@
+// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+#include <vespa/vespalib/testkit/test_kit.h>
+#include <vespa/eval/eval/tensor_function.h>
+#include <vespa/eval/instruction/dense_simple_join_function.h>
+#include <vespa/eval/eval/test/eval_fixture.h>
+#include <vespa/eval/eval/test/tensor_model.hpp>
+
+#include <vespa/vespalib/util/stringfmt.h>
+
+using namespace vespalib;
+using namespace vespalib::eval;
+using namespace vespalib::eval::test;
+using namespace vespalib::tensor;
+using namespace vespalib::eval::tensor_function;
+
+using vespalib::make_string_short::fmt;
+
+using Primary = DenseSimpleJoinFunction::Primary;
+using Overlap = DenseSimpleJoinFunction::Overlap;
+
+namespace vespalib::tensor {
+
+std::ostream &operator<<(std::ostream &os, Primary primary)
+{
+ switch(primary) {
+ case Primary::LHS: return os << "LHS";
+ case Primary::RHS: return os << "RHS";
+ }
+ abort();
+}
+
+std::ostream &operator<<(std::ostream &os, Overlap overlap)
+{
+ switch(overlap) {
+ case Overlap::FULL: return os << "FULL";
+ case Overlap::INNER: return os << "INNER";
+ case Overlap::OUTER: return os << "OUTER";
+ }
+ abort();
+}
+
+}
+
+const ValueBuilderFactory &prod_factory = FastValueBuilderFactory::get();
+
+EvalFixture::ParamRepo make_params() {
+ return EvalFixture::ParamRepo()
+ .add("a", spec(1.5))
+ .add("b", spec(2.5))
+ .add("sparse", spec({x({"a"})}, N()))
+ .add("mixed", spec({x({"a"}),y(5)}, N()))
+ .add_cube("a", 1, "b", 1, "c", 1)
+ .add_cube("x", 1, "y", 1, "z", 1)
+ .add_cube("x", 3, "y", 5, "z", 3)
+ .add_vector("x", 5)
+ .add_dense({{"c", 5}, {"d", 1}})
+ .add_dense({{"b", 1}, {"c", 5}})
+ .add_matrix("x", 3, "y", 5, [](size_t idx) noexcept { return double((idx * 2) + 3); })
+ .add_matrix("x", 3, "y", 5, [](size_t idx) noexcept { return double((idx * 3) + 2); })
+ .add_vector("y", 5, [](size_t idx) noexcept { return double((idx * 2) + 3); })
+ .add_vector("y", 5, [](size_t idx) noexcept { return double((idx * 3) + 2); })
+ .add_matrix("y", 5, "z", 3, [](size_t idx) noexcept { return double((idx * 2) + 3); })
+ .add_matrix("y", 5, "z", 3, [](size_t idx) noexcept { return double((idx * 3) + 2); });
+}
+EvalFixture::ParamRepo param_repo = make_params();
+
+void verify_optimized(const vespalib::string &expr, Primary primary, Overlap overlap, bool pri_mut, size_t factor, int p_inplace = -1) {
+ EvalFixture slow_fixture(prod_factory, expr, param_repo, false);
+ EvalFixture fixture(prod_factory, expr, param_repo, true, true);
+ EXPECT_EQUAL(fixture.result(), EvalFixture::ref(expr, param_repo));
+ EXPECT_EQUAL(fixture.result(), slow_fixture.result());
+ auto info = fixture.find_all<DenseSimpleJoinFunction>();
+ ASSERT_EQUAL(info.size(), 1u);
+ EXPECT_TRUE(info[0]->result_is_mutable());
+ EXPECT_EQUAL(info[0]->primary(), primary);
+ EXPECT_EQUAL(info[0]->overlap(), overlap);
+ EXPECT_EQUAL(info[0]->primary_is_mutable(), pri_mut);
+ EXPECT_EQUAL(info[0]->factor(), factor);
+ EXPECT_TRUE((p_inplace == -1) || (fixture.num_params() > size_t(p_inplace)));
+ for (size_t i = 0; i < fixture.num_params(); ++i) {
+ if (i == size_t(p_inplace)) {
+ EXPECT_EQUAL(fixture.get_param(i), fixture.result());
+ } else {
+ EXPECT_NOT_EQUAL(fixture.get_param(i), fixture.result());
+ }
+ }
+}
+
+void verify_not_optimized(const vespalib::string &expr) {
+ EvalFixture slow_fixture(prod_factory, expr, param_repo, false);
+ EvalFixture fixture(prod_factory, expr, param_repo, true);
+ EXPECT_EQUAL(fixture.result(), EvalFixture::ref(expr, param_repo));
+ EXPECT_EQUAL(fixture.result(), slow_fixture.result());
+ auto info = fixture.find_all<DenseSimpleJoinFunction>();
+ EXPECT_TRUE(info.empty());
+}
+
+TEST("require that basic join is optimized") {
+ TEST_DO(verify_optimized("y5+y5$2", Primary::RHS, Overlap::FULL, false, 1));
+}
+
+TEST("require that unit join is optimized") {
+ TEST_DO(verify_optimized("a1b1c1+x1y1z1", Primary::RHS, Overlap::FULL, false, 1));
+}
+
+TEST("require that trivial dimensions do not affect overlap calculation") {
+ TEST_DO(verify_optimized("c5d1+b1c5", Primary::RHS, Overlap::FULL, false, 1));
+}
+
+TEST("require that outer nesting is preferred to inner nesting") {
+ TEST_DO(verify_optimized("a1b1c1+y5", Primary::RHS, Overlap::OUTER, false, 5));
+}
+
+TEST("require that non-subset join is not optimized") {
+ TEST_DO(verify_not_optimized("x5+y5"));
+}
+
+TEST("require that subset join with complex overlap is not optimized") {
+ TEST_DO(verify_not_optimized("x3y5z3+y5"));
+}
+
+struct LhsRhs {
+ vespalib::string lhs;
+ vespalib::string rhs;
+ size_t lhs_size;
+ size_t rhs_size;
+ Overlap overlap;
+ size_t factor;
+ LhsRhs(const vespalib::string &lhs_in, const vespalib::string &rhs_in,
+ size_t lhs_size_in, size_t rhs_size_in, Overlap overlap_in) noexcept
+ : lhs(lhs_in), rhs(rhs_in), lhs_size(lhs_size_in), rhs_size(rhs_size_in), overlap(overlap_in), factor(1)
+ {
+ if (lhs_size > rhs_size) {
+ ASSERT_EQUAL(lhs_size % rhs_size, 0u);
+ factor = (lhs_size / rhs_size);
+ } else {
+ ASSERT_EQUAL(rhs_size % lhs_size, 0u);
+ factor = (rhs_size / lhs_size);
+ }
+ }
+};
+
+vespalib::string adjust_param(const vespalib::string &str, bool float_cells, bool mut_cells, bool is_rhs) {
+ vespalib::string result = str;
+ if (mut_cells) {
+ result = "@" + result;
+ }
+ if (float_cells) {
+ result += "f";
+ }
+ if (is_rhs) {
+ result += "$2";
+ }
+ return result;
+}
+
+TEST("require that various parameter combinations work") {
+ for (bool left_float: {false, true}) {
+ for (bool right_float: {false, true}) {
+ bool float_result = (left_float && right_float);
+ for (bool left_mut: {false, true}) {
+ for (bool right_mut: {false, true}) {
+ for (const char *op_pattern: {"%s+%s", "%s-%s", "%s*%s"}) {
+ for (const LhsRhs &params:
+ { LhsRhs("y5", "y5", 5, 5, Overlap::FULL),
+ LhsRhs("y5", "x3y5", 5, 15, Overlap::INNER),
+ LhsRhs("y5", "y5z3", 5, 15, Overlap::OUTER),
+ LhsRhs("x3y5", "y5", 15, 5, Overlap::INNER),
+ LhsRhs("y5z3", "y5", 15, 5, Overlap::OUTER)})
+ {
+ vespalib::string left = adjust_param(params.lhs, left_float, left_mut, false);
+ vespalib::string right = adjust_param(params.rhs, right_float, right_mut, true);
+ vespalib::string expr = fmt(op_pattern, left.c_str(), right.c_str());
+ TEST_STATE(expr.c_str());
+ Primary primary = Primary::RHS;
+ if (params.overlap == Overlap::FULL) {
+ bool w_lhs = ((left_float == float_result) && left_mut);
+ bool w_rhs = ((right_float == float_result) && right_mut);
+ if (w_lhs && !w_rhs) {
+ primary = Primary::LHS;
+ }
+ } else if (params.lhs_size > params.rhs_size) {
+ primary = Primary::LHS;
+ }
+ bool pri_mut = (primary == Primary::LHS) ? left_mut : right_mut;
+ bool pri_float = (primary == Primary::LHS) ? left_float : right_float;
+ int p_inplace = -1;
+ if (pri_mut && (pri_float == float_result)) {
+ p_inplace = (primary == Primary::LHS) ? 0 : 1;
+ }
+ verify_optimized(expr, primary, params.overlap, pri_mut, params.factor, p_inplace);
+ }
+ }
+ }
+ }
+ }
+ }
+}
+
+TEST("require that scalar values are not optimized") {
+ TEST_DO(verify_not_optimized("a+b"));
+ TEST_DO(verify_not_optimized("a+y5"));
+ TEST_DO(verify_not_optimized("y5+b"));
+ TEST_DO(verify_not_optimized("a+sparse"));
+ TEST_DO(verify_not_optimized("sparse+a"));
+ TEST_DO(verify_not_optimized("a+mixed"));
+ TEST_DO(verify_not_optimized("mixed+a"));
+}
+
+TEST("require that mapped tensors are not optimized") {
+ TEST_DO(verify_not_optimized("sparse+sparse"));
+ TEST_DO(verify_not_optimized("sparse+y5"));
+ TEST_DO(verify_not_optimized("y5+sparse"));
+ TEST_DO(verify_not_optimized("sparse+mixed"));
+ TEST_DO(verify_not_optimized("mixed+sparse"));
+}
+
+TEST("require mixed tensors are not optimized") {
+ TEST_DO(verify_not_optimized("mixed+mixed"));
+ TEST_DO(verify_not_optimized("mixed+y5"));
+ TEST_DO(verify_not_optimized("y5+mixed"));
+}
+
+TEST_MAIN() { TEST_RUN_ALL(); }
diff --git a/eval/src/tests/instruction/dense_simple_map_function/CMakeLists.txt b/eval/src/tests/instruction/dense_simple_map_function/CMakeLists.txt
new file mode 100644
index 00000000000..8d3bb8c92aa
--- /dev/null
+++ b/eval/src/tests/instruction/dense_simple_map_function/CMakeLists.txt
@@ -0,0 +1,9 @@
+# Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+vespa_add_executable(eval_dense_simple_map_function_test_app TEST
+ SOURCES
+ dense_simple_map_function_test.cpp
+ DEPENDS
+ vespaeval
+ GTest::GTest
+)
+vespa_add_test(NAME eval_dense_simple_map_function_test_app COMMAND eval_dense_simple_map_function_test_app)
diff --git a/eval/src/tests/instruction/dense_simple_map_function/dense_simple_map_function_test.cpp b/eval/src/tests/instruction/dense_simple_map_function/dense_simple_map_function_test.cpp
new file mode 100644
index 00000000000..dc2a5ac77c1
--- /dev/null
+++ b/eval/src/tests/instruction/dense_simple_map_function/dense_simple_map_function_test.cpp
@@ -0,0 +1,75 @@
+// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+#include <vespa/eval/eval/tensor_function.h>
+#include <vespa/eval/instruction/dense_simple_map_function.h>
+#include <vespa/eval/eval/test/eval_fixture.h>
+#include <vespa/eval/eval/test/tensor_model.hpp>
+#include <vespa/vespalib/gtest/gtest.h>
+
+using namespace vespalib;
+using namespace vespalib::eval;
+using namespace vespalib::eval::test;
+using namespace vespalib::eval::tensor_function;
+using namespace vespalib::tensor;
+
+const ValueBuilderFactory &prod_factory = FastValueBuilderFactory::get();
+
+EvalFixture::ParamRepo make_params() {
+ return EvalFixture::ParamRepo()
+ .add("a", spec(1.5))
+ .add("b", spec(2.5))
+ .add("sparse", spec({x({"a"})}, N()))
+ .add("mixed", spec({x({"a"}),y(5)}, N()))
+ .add_matrix("x", 5, "y", 3);
+}
+EvalFixture::ParamRepo param_repo = make_params();
+
+void verify_optimized(const vespalib::string &expr, bool inplace) {
+ EvalFixture slow_fixture(prod_factory, expr, param_repo, false);
+ EvalFixture fixture(prod_factory, expr, param_repo, true, true);
+ EXPECT_EQ(fixture.result(), EvalFixture::ref(expr, param_repo));
+ EXPECT_EQ(fixture.result(), slow_fixture.result());
+ auto info = fixture.find_all<DenseSimpleMapFunction>();
+ ASSERT_EQ(info.size(), 1u);
+ EXPECT_TRUE(info[0]->result_is_mutable());
+ EXPECT_EQ(info[0]->inplace(), inplace);
+ ASSERT_EQ(fixture.num_params(), 1);
+ if (inplace) {
+ EXPECT_EQ(fixture.get_param(0), fixture.result());
+ } else {
+ EXPECT_TRUE(!(fixture.get_param(0) == fixture.result()));
+ }
+}
+
+void verify_not_optimized(const vespalib::string &expr) {
+ EvalFixture slow_fixture(prod_factory, expr, param_repo, false);
+ EvalFixture fixture(prod_factory, expr, param_repo, true);
+ EXPECT_EQ(fixture.result(), EvalFixture::ref(expr, param_repo));
+ EXPECT_EQ(fixture.result(), slow_fixture.result());
+ auto info = fixture.find_all<DenseSimpleMapFunction>();
+ EXPECT_TRUE(info.empty());
+}
+
+TEST(MapTest, dense_map_is_optimized) {
+ verify_optimized("map(x5y3,f(x)(x+10))", false);
+ verify_optimized("map(x5y3f,f(x)(x+10))", false);
+}
+
+TEST(MapTest, simple_dense_map_can_be_inplace) {
+ verify_optimized("map(@x5y3,f(x)(x+10))", true);
+ verify_optimized("map(@x5y3f,f(x)(x+10))", true);
+}
+
+TEST(MapTest, scalar_map_is_not_optimized) {
+ verify_not_optimized("map(a,f(x)(x+10))");
+}
+
+TEST(MapTest, sparse_map_is_not_optimized) {
+ verify_not_optimized("map(sparse,f(x)(x+10))");
+}
+
+TEST(MapTest, mixed_map_is_not_optimized) {
+ verify_not_optimized("map(mixed,f(x)(x+10))");
+}
+
+GTEST_MAIN_RUN_ALL_TESTS()
diff --git a/eval/src/tests/instruction/dense_single_reduce_function/CMakeLists.txt b/eval/src/tests/instruction/dense_single_reduce_function/CMakeLists.txt
new file mode 100644
index 00000000000..42b00699c31
--- /dev/null
+++ b/eval/src/tests/instruction/dense_single_reduce_function/CMakeLists.txt
@@ -0,0 +1,8 @@
+# Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+vespa_add_executable(eval_dense_single_reduce_function_test_app TEST
+ SOURCES
+ dense_single_reduce_function_test.cpp
+ DEPENDS
+ vespaeval
+)
+vespa_add_test(NAME eval_dense_single_reduce_function_test_app COMMAND eval_dense_single_reduce_function_test_app)
diff --git a/eval/src/tests/instruction/dense_single_reduce_function/dense_single_reduce_function_test.cpp b/eval/src/tests/instruction/dense_single_reduce_function/dense_single_reduce_function_test.cpp
new file mode 100644
index 00000000000..d9a6a14108f
--- /dev/null
+++ b/eval/src/tests/instruction/dense_single_reduce_function/dense_single_reduce_function_test.cpp
@@ -0,0 +1,170 @@
+// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+#include <vespa/vespalib/testkit/test_kit.h>
+#include <vespa/eval/eval/tensor_function.h>
+#include <vespa/eval/eval/operation.h>
+#include <vespa/eval/instruction/dense_single_reduce_function.h>
+#include <vespa/eval/eval/test/tensor_model.hpp>
+#include <vespa/eval/eval/test/eval_fixture.h>
+
+#include <vespa/vespalib/util/stringfmt.h>
+#include <vespa/vespalib/util/stash.h>
+
+using namespace vespalib;
+using namespace vespalib::eval;
+using namespace vespalib::eval::test;
+using namespace vespalib::tensor;
+using namespace vespalib::eval::tensor_function;
+
+const ValueBuilderFactory &prod_factory = FastValueBuilderFactory::get();
+
+EvalFixture::ParamRepo make_params() {
+ return EvalFixture::ParamRepo()
+ .add_dense({{"a", 2}, {"b", 3}, {"c", 4}, {"d", 5}})
+ .add_dense({{"a", 9}, {"b", 9}, {"c", 9}, {"d", 9}})
+ .add_cube("a", 2, "b", 1, "c", 1)
+ .add_cube("a", 1, "b", 2, "c", 1)
+ .add_cube("a", 1, "b", 1, "c", 2)
+ .add_cube("a", 1, "b", 1, "c", 1)
+ .add_vector("a", 10)
+ .add("xy_mapped", spec({x({"a", "b"}),y({"x", "y"})}, N()))
+ .add("xyz_mixed", spec({x({"a", "b"}),y({"x", "y"}),z(3)}, N()));
+}
+EvalFixture::ParamRepo param_repo = make_params();
+
+struct ReduceSpec {
+ size_t outer_size;
+ size_t reduce_size;
+ size_t inner_size;
+ Aggr aggr;
+};
+
+void verify_optimized_impl(const vespalib::string &expr, const std::vector<ReduceSpec> &spec_list) {
+ EvalFixture slow_fixture(prod_factory, expr, param_repo, false);
+ EvalFixture fixture(prod_factory, expr, param_repo, true);
+ EXPECT_EQUAL(fixture.result(), EvalFixture::ref(expr, param_repo));
+ EXPECT_EQUAL(fixture.result(), slow_fixture.result());
+ auto info = fixture.find_all<DenseSingleReduceFunction>();
+ ASSERT_EQUAL(info.size(), spec_list.size());
+ for (size_t i = 0; i < spec_list.size(); ++i) {
+ EXPECT_TRUE(info[i]->result_is_mutable());
+ EXPECT_EQUAL(info[i]->outer_size(), spec_list[i].outer_size);
+ EXPECT_EQUAL(info[i]->reduce_size(), spec_list[i].reduce_size);
+ EXPECT_EQUAL(info[i]->inner_size(), spec_list[i].inner_size);
+ EXPECT_EQUAL(int(info[i]->aggr()), int(spec_list[i].aggr));
+ }
+}
+
+void verify_optimized(const vespalib::string &expr, const ReduceSpec &spec) {
+ verify_optimized_impl(expr, {spec});
+}
+
+void verify_optimized(const vespalib::string &expr, const ReduceSpec &spec1, const ReduceSpec &spec2) {
+ verify_optimized_impl(expr, {spec1, spec2});
+}
+
+void verify_not_optimized(const vespalib::string &expr) {
+ EvalFixture slow_fixture(prod_factory, expr, param_repo, false);
+ EvalFixture fixture(prod_factory, expr, param_repo, true);
+ EXPECT_EQUAL(fixture.result(), EvalFixture::ref(expr, param_repo));
+ EXPECT_EQUAL(fixture.result(), slow_fixture.result());
+ auto info = fixture.find_all<DenseSingleReduceFunction>();
+ EXPECT_TRUE(info.empty());
+}
+
+TEST("require that reduce to scalar is not optimized") {
+ TEST_DO(verify_not_optimized("reduce(a10,sum,a)"));
+ TEST_DO(verify_not_optimized("reduce(a10,sum)"));
+}
+
+TEST("require that sparse reduce is not optimized") {
+ TEST_DO(verify_not_optimized("reduce(xy_mapped,sum,x)"));
+ TEST_DO(verify_not_optimized("reduce(xy_mapped,sum,y)"));
+}
+
+TEST("require that mixed reduce is not optimized") {
+ TEST_DO(verify_not_optimized("reduce(xyz_mixed,sum,x)"));
+ TEST_DO(verify_not_optimized("reduce(xyz_mixed,sum,y)"));
+ TEST_DO(verify_not_optimized("reduce(xyz_mixed,sum,z)"));
+}
+
+TEST("require that reducing trivial dimensions is not optimized") {
+ TEST_DO(verify_not_optimized("reduce(a1b1c1,avg,c)"));
+ TEST_DO(verify_not_optimized("reduce(a1b1c1,count,c)"));
+ TEST_DO(verify_not_optimized("reduce(a1b1c1,prod,c)"));
+ TEST_DO(verify_not_optimized("reduce(a1b1c1,sum,c)"));
+ TEST_DO(verify_not_optimized("reduce(a1b1c1,max,c)"));
+ TEST_DO(verify_not_optimized("reduce(a1b1c1,median,c)"));
+ TEST_DO(verify_not_optimized("reduce(a1b1c1,min,c)"));
+}
+
+TEST("require that atleast_8 dense single reduce works") {
+ TEST_DO(verify_optimized("reduce(a9b9c9d9,avg,a)", {1, 9, 729, Aggr::AVG}));
+ TEST_DO(verify_optimized("reduce(a9b9c9d9,avg,b)", {9, 9, 81, Aggr::AVG}));
+ TEST_DO(verify_optimized("reduce(a9b9c9d9,avg,c)", {81, 9, 9, Aggr::AVG}));
+ TEST_DO(verify_optimized("reduce(a9b9c9d9,avg,d)", {729, 9, 1, Aggr::AVG}));
+ TEST_DO(verify_optimized("reduce(a9b9c9d9,sum,c,d)", {81, 81, 1, Aggr::SUM}));
+}
+
+TEST("require that simple aggregators can be decomposed into multiple reduce operations") {
+ TEST_DO(verify_optimized("reduce(a2b3c4d5,sum,a,c)", {3, 4, 5, Aggr::SUM}, {1, 2, 60, Aggr::SUM}));
+ TEST_DO(verify_optimized("reduce(a2b3c4d5,min,a,c)", {3, 4, 5, Aggr::MIN}, {1, 2, 60, Aggr::MIN}));
+ TEST_DO(verify_optimized("reduce(a2b3c4d5,max,a,c)", {3, 4, 5, Aggr::MAX}, {1, 2, 60, Aggr::MAX}));
+}
+
+TEST("require that reduce dimensions can be listed in reverse order") {
+ TEST_DO(verify_optimized("reduce(a2b3c4d5,sum,c,a)", {3, 4, 5, Aggr::SUM}, {1, 2, 60, Aggr::SUM}));
+ TEST_DO(verify_optimized("reduce(a2b3c4d5,min,c,a)", {3, 4, 5, Aggr::MIN}, {1, 2, 60, Aggr::MIN}));
+ TEST_DO(verify_optimized("reduce(a2b3c4d5,max,c,a)", {3, 4, 5, Aggr::MAX}, {1, 2, 60, Aggr::MAX}));
+}
+
+TEST("require that non-simple aggregators cannot be decomposed into multiple reduce operations") {
+ TEST_DO(verify_not_optimized("reduce(a2b3c4d5,avg,a,c)"));
+ TEST_DO(verify_not_optimized("reduce(a2b3c4d5,count,a,c)"));
+ TEST_DO(verify_not_optimized("reduce(a2b3c4d5,median,a,c)"));
+}
+
+vespalib::string make_expr(const vespalib::string &arg, const vespalib::string &dim, bool float_cells, Aggr aggr) {
+ return make_string("reduce(%s%s,%s,%s)", arg.c_str(), float_cells ? "f" : "", AggrNames::name_of(aggr)->c_str(), dim.c_str());
+}
+
+void verify_optimized_multi(const vespalib::string &arg, const vespalib::string &dim, size_t outer_size, size_t reduce_size, size_t inner_size) {
+ for (bool float_cells: {false, true}) {
+ for (Aggr aggr: Aggregator::list()) {
+ if (aggr != Aggr::PROD) {
+ auto expr = make_expr(arg, dim, float_cells, aggr);
+ TEST_DO(verify_optimized(expr, {outer_size, reduce_size, inner_size, aggr}));
+ }
+ }
+ }
+}
+
+TEST("require that normal dense single reduce works") {
+ TEST_DO(verify_optimized_multi("a2b3c4d5", "a", 1, 2, 60));
+ TEST_DO(verify_optimized_multi("a2b3c4d5", "b", 2, 3, 20));
+ TEST_DO(verify_optimized_multi("a2b3c4d5", "c", 6, 4, 5));
+ TEST_DO(verify_optimized_multi("a2b3c4d5", "d", 24, 5, 1));
+}
+
+TEST("require that dimension-combined dense single reduce works") {
+ TEST_DO(verify_optimized_multi("a2b3c4d5", "a,b", 1, 6, 20));
+ TEST_DO(verify_optimized_multi("a2b3c4d5", "b,c", 2, 12, 5));
+ TEST_DO(verify_optimized_multi("a2b3c4d5", "c,d", 6, 20, 1));
+}
+
+TEST("require that minimal dense single reduce works") {
+ TEST_DO(verify_optimized_multi("a2b1c1", "a", 1, 2, 1));
+ TEST_DO(verify_optimized_multi("a1b2c1", "b", 1, 2, 1));
+ TEST_DO(verify_optimized_multi("a1b1c2", "c", 1, 2, 1));
+}
+
+TEST("require that trivial dimensions can be trivially reduced") {
+ TEST_DO(verify_optimized_multi("a2b1c1", "a,b", 1, 2, 1));
+ TEST_DO(verify_optimized_multi("a2b1c1", "a,c", 1, 2, 1));
+ TEST_DO(verify_optimized_multi("a1b2c1", "b,a", 1, 2, 1));
+ TEST_DO(verify_optimized_multi("a1b2c1", "b,c", 1, 2, 1));
+ TEST_DO(verify_optimized_multi("a1b1c2", "c,a", 1, 2, 1));
+ TEST_DO(verify_optimized_multi("a1b1c2", "c,b", 1, 2, 1));
+}
+
+TEST_MAIN() { TEST_RUN_ALL(); }
diff --git a/eval/src/tests/instruction/dense_tensor_create_function/CMakeLists.txt b/eval/src/tests/instruction/dense_tensor_create_function/CMakeLists.txt
new file mode 100644
index 00000000000..883f331bda8
--- /dev/null
+++ b/eval/src/tests/instruction/dense_tensor_create_function/CMakeLists.txt
@@ -0,0 +1,8 @@
+# Copyright 2019 Oath Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+vespa_add_executable(eval_dense_tensor_create_function_test_app TEST
+ SOURCES
+ dense_tensor_create_function_test.cpp
+ DEPENDS
+ vespaeval
+)
+vespa_add_test(NAME eval_dense_tensor_create_function_test_app COMMAND eval_dense_tensor_create_function_test_app)
diff --git a/eval/src/tests/instruction/dense_tensor_create_function/dense_tensor_create_function_test.cpp b/eval/src/tests/instruction/dense_tensor_create_function/dense_tensor_create_function_test.cpp
new file mode 100644
index 00000000000..c063415957a
--- /dev/null
+++ b/eval/src/tests/instruction/dense_tensor_create_function/dense_tensor_create_function_test.cpp
@@ -0,0 +1,60 @@
+// Copyright 2019 Oath Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+#include <vespa/vespalib/testkit/test_kit.h>
+#include <vespa/eval/eval/tensor_function.h>
+#include <vespa/eval/instruction/dense_tensor_create_function.h>
+#include <vespa/eval/eval/test/tensor_model.hpp>
+#include <vespa/eval/eval/test/eval_fixture.h>
+
+#include <vespa/vespalib/util/stringfmt.h>
+#include <vespa/vespalib/util/stash.h>
+
+using namespace vespalib;
+using namespace vespalib::eval;
+using namespace vespalib::eval::test;
+using namespace vespalib::tensor;
+using namespace vespalib::eval::tensor_function;
+
+const ValueBuilderFactory &prod_factory = FastValueBuilderFactory::get();
+
+EvalFixture::ParamRepo make_params() {
+ return EvalFixture::ParamRepo()
+ .add("a", spec(1.0))
+ .add("b", spec(2.0))
+ .add("c", spec(3.0));
+}
+EvalFixture::ParamRepo param_repo = make_params();
+
+void verify(const vespalib::string &expr, size_t expect_optimized_cnt, size_t expect_not_optimized_cnt) {
+ EvalFixture fixture(prod_factory, expr, param_repo, true);
+ EXPECT_EQUAL(fixture.result(), EvalFixture::ref(expr, param_repo));
+ auto info = fixture.find_all<DenseTensorCreateFunction>();
+ EXPECT_EQUAL(info.size(), expect_optimized_cnt);
+ for (size_t i = 0; i < info.size(); ++i) {
+ EXPECT_TRUE(info[i]->result_is_mutable());
+ }
+ EXPECT_EQUAL(fixture.find_all<Create>().size(), expect_not_optimized_cnt);
+}
+
+//-----------------------------------------------------------------------------
+
+TEST("require that tensor create can be optimized") {
+ TEST_DO(verify("tensor(x[3]):{{x:0}:1,{x:1}:2,{x:2}:3}", 0, 0)); // NB: const value
+ TEST_DO(verify("tensor(x[3]):{{x:0}:a,{x:1}:b,{x:2}:c}", 1, 0));
+ TEST_DO(verify("tensor<float>(x[3]):{{x:0}:a,{x:1}:b,{x:2}:c}", 1, 0));
+ TEST_DO(verify("tensor(x[3]):{{x:0}:a+b,{x:1}:b-c,{x:2}:c*a}", 1, 0));
+}
+
+TEST("require that tensor create can be optimized with missing cells (padded with 0.0)") {
+ TEST_DO(verify("tensor(x[3],y[5]):{{x:0,y:1}:a,{x:1,y:3}:b,{x:2,y:4}:c}", 1, 0));
+}
+
+TEST("require that tensor create in not optimized for sparse tensor") {
+ TEST_DO(verify("tensor(x{}):{{x:0}:a,{x:1}:b,{x:2}:c}", 0, 1));
+}
+
+TEST("require that tensor create in not optimized for mixed tensor") {
+ TEST_DO(verify("tensor(x{},y[3]):{{x:a,y:0}:a,{x:a,y:1}:b,{x:a,y:2}:c}", 0, 1));
+}
+
+TEST_MAIN() { TEST_RUN_ALL(); }
diff --git a/eval/src/tests/instruction/vector_from_doubles_function/CMakeLists.txt b/eval/src/tests/instruction/vector_from_doubles_function/CMakeLists.txt
new file mode 100644
index 00000000000..5b2e47ec498
--- /dev/null
+++ b/eval/src/tests/instruction/vector_from_doubles_function/CMakeLists.txt
@@ -0,0 +1,8 @@
+# Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+vespa_add_executable(eval_vector_from_doubles_function_test_app TEST
+ SOURCES
+ vector_from_doubles_function_test.cpp
+ DEPENDS
+ vespaeval
+)
+vespa_add_test(NAME eval_vector_from_doubles_function_test_app COMMAND eval_vector_from_doubles_function_test_app)
diff --git a/eval/src/tests/instruction/vector_from_doubles_function/vector_from_doubles_function_test.cpp b/eval/src/tests/instruction/vector_from_doubles_function/vector_from_doubles_function_test.cpp
new file mode 100644
index 00000000000..1cbfade6c45
--- /dev/null
+++ b/eval/src/tests/instruction/vector_from_doubles_function/vector_from_doubles_function_test.cpp
@@ -0,0 +1,59 @@
+// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+#include <vespa/vespalib/testkit/test_kit.h>
+#include <vespa/eval/eval/tensor_function.h>
+#include <vespa/eval/instruction/vector_from_doubles_function.h>
+#include <vespa/eval/eval/test/tensor_model.hpp>
+#include <vespa/eval/eval/test/eval_fixture.h>
+
+#include <vespa/vespalib/util/stringfmt.h>
+#include <vespa/vespalib/util/stash.h>
+
+using namespace vespalib;
+using namespace vespalib::eval;
+using namespace vespalib::eval::test;
+using namespace vespalib::tensor;
+using namespace vespalib::eval::tensor_function;
+
+const ValueBuilderFactory &prod_factory = FastValueBuilderFactory::get();
+
+EvalFixture::ParamRepo make_params() {
+ return EvalFixture::ParamRepo()
+ .add("a", spec(1.0))
+ .add("b", spec(2.0))
+ .add("c", spec(3.0))
+ .add("d", spec(4.0))
+ .add("x5", spec({x(5)}, N()));
+}
+EvalFixture::ParamRepo param_repo = make_params();
+
+void verify(const vespalib::string &expr, size_t expect_optimized_cnt, size_t expect_not_optimized_cnt) {
+ EvalFixture fixture(prod_factory, expr, param_repo, true);
+ EXPECT_EQUAL(fixture.result(), EvalFixture::ref(expr, param_repo));
+ auto info = fixture.find_all<VectorFromDoublesFunction>();
+ EXPECT_EQUAL(info.size(), expect_optimized_cnt);
+ for (size_t i = 0; i < info.size(); ++i) {
+ EXPECT_TRUE(info[i]->result_is_mutable());
+ }
+ EXPECT_EQUAL(fixture.find_all<Concat>().size(), expect_not_optimized_cnt);
+}
+
+//-----------------------------------------------------------------------------
+
+TEST("require that multiple concats are optimized") {
+ TEST_DO(verify("concat(a,b,x)", 1, 0));
+ TEST_DO(verify("concat(a,concat(b,concat(c,d,x),x),x)", 1, 0));
+ TEST_DO(verify("concat(concat(concat(a,b,x),c,x),d,x)", 1, 0));
+ TEST_DO(verify("concat(concat(a,b,x),concat(c,d,x),x)", 1, 0));
+}
+
+TEST("require that concat along different dimension is not optimized") {
+ TEST_DO(verify("concat(concat(a,b,x),concat(c,d,x),y)", 2, 1));
+}
+
+TEST("require that concat of vector and double is not optimized") {
+ TEST_DO(verify("concat(a,x5,x)", 0, 1));
+ TEST_DO(verify("concat(x5,b,x)", 0, 1));
+}
+
+TEST_MAIN() { TEST_RUN_ALL(); }