summaryrefslogtreecommitdiffstats
path: root/eval/src/tests
diff options
context:
space:
mode:
Diffstat (limited to 'eval/src/tests')
-rw-r--r--eval/src/tests/eval/simple_value/simple_value_test.cpp65
-rw-r--r--eval/src/tests/eval/value_codec/CMakeLists.txt10
-rw-r--r--eval/src/tests/eval/value_codec/value_codec_test.cpp271
-rw-r--r--eval/src/tests/eval/value_type/value_type_test.cpp9
-rw-r--r--eval/src/tests/instruction/generic_join/CMakeLists.txt9
-rw-r--r--eval/src/tests/instruction/generic_join/generic_join_test.cpp138
-rw-r--r--eval/src/tests/instruction/generic_rename/CMakeLists.txt9
-rw-r--r--eval/src/tests/instruction/generic_rename/generic_rename_test.cpp145
-rw-r--r--eval/src/tests/tensor/direct_sparse_tensor_builder/direct_sparse_tensor_builder_test.cpp2
-rw-r--r--eval/src/tests/tensor/instruction_benchmark/.gitignore1
-rw-r--r--eval/src/tests/tensor/instruction_benchmark/CMakeLists.txt8
-rw-r--r--eval/src/tests/tensor/instruction_benchmark/instruction_benchmark.cpp321
-rw-r--r--eval/src/tests/tensor/packed_mappings/CMakeLists.txt19
-rw-r--r--eval/src/tests/tensor/packed_mappings/packed_mappings_test.cpp226
-rw-r--r--eval/src/tests/tensor/packed_mappings/packed_mixed_test.cpp153
-rw-r--r--eval/src/tests/tensor/tensor_serialization/tensor_serialization_test.cpp21
16 files changed, 1354 insertions, 53 deletions
diff --git a/eval/src/tests/eval/simple_value/simple_value_test.cpp b/eval/src/tests/eval/simple_value/simple_value_test.cpp
index 32a099afce3..4827fa3be3c 100644
--- a/eval/src/tests/eval/simple_value/simple_value_test.cpp
+++ b/eval/src/tests/eval/simple_value/simple_value_test.cpp
@@ -1,12 +1,16 @@
// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
#include <vespa/eval/eval/simple_value.h>
+#include <vespa/eval/eval/value_codec.h>
+#include <vespa/eval/instruction/generic_join.h>
+#include <vespa/eval/eval/interpreted_function.h>
#include <vespa/eval/eval/test/tensor_model.hpp>
#include <vespa/vespalib/util/stringfmt.h>
#include <vespa/vespalib/gtest/gtest.h>
using namespace vespalib;
using namespace vespalib::eval;
+using namespace vespalib::eval::instruction;
using namespace vespalib::eval::test;
using vespalib::make_string_short::fmt;
@@ -62,24 +66,27 @@ TensorSpec simple_tensor_join(const TensorSpec &a, const TensorSpec &b, join_fun
}
TensorSpec simple_value_new_join(const TensorSpec &a, const TensorSpec &b, join_fun_t function) {
- auto lhs = new_value_from_spec(a, SimpleValueBuilderFactory());
- auto rhs = new_value_from_spec(b, SimpleValueBuilderFactory());
- auto result = new_join(*lhs, *rhs, function, SimpleValueBuilderFactory());
- return spec_from_new_value(*result);
+ Stash stash;
+ const auto &factory = SimpleValueBuilderFactory::get();
+ auto lhs = value_from_spec(a, factory);
+ auto rhs = value_from_spec(b, factory);
+ auto my_op = GenericJoin::make_instruction(lhs->type(), rhs->type(), function, factory, stash);
+ InterpretedFunction::EvalSingle single(my_op);
+ return spec_from_value(single.eval(std::vector<Value::CREF>({*lhs,*rhs})));
}
TEST(SimpleValueTest, simple_values_can_be_converted_from_and_to_tensor_spec) {
for (const auto &layout: layouts) {
TensorSpec expect = spec(layout, N());
- std::unique_ptr<NewValue> value = new_value_from_spec(expect, SimpleValueBuilderFactory());
- TensorSpec actual = spec_from_new_value(*value);
+ std::unique_ptr<Value> value = value_from_spec(expect, SimpleValueBuilderFactory::get());
+ TensorSpec actual = spec_from_value(*value);
EXPECT_EQ(actual, expect);
}
}
TEST(SimpleValueTest, simple_value_can_be_built_and_inspected) {
ValueType type = ValueType::from_spec("tensor<float>(x{},y[2],z{})");
- SimpleValueBuilderFactory factory;
+ const auto &factory = SimpleValueBuilderFactory::get();
std::unique_ptr<ValueBuilder<float>> builder = factory.create_value_builder<float>(type);
float seq = 0.0;
for (vespalib::string x: {"a", "b", "c"}) {
@@ -92,7 +99,7 @@ TEST(SimpleValueTest, simple_value_can_be_built_and_inspected) {
}
seq += 100.0;
}
- std::unique_ptr<NewValue> value = builder->build(std::move(builder));
+ std::unique_ptr<Value> value = builder->build(std::move(builder));
EXPECT_EQ(value->index().size(), 6);
auto view = value->index().create_view({0});
vespalib::stringref query = "b";
@@ -108,48 +115,6 @@ TEST(SimpleValueTest, simple_value_can_be_built_and_inspected) {
EXPECT_FALSE(view->next_result({&label}, subspace));
}
-TEST(SimpleValueTest, dense_join_plan_can_be_created) {
- auto lhs = ValueType::from_spec("tensor(a{},b[6],c[5],e[3],f[2],g{})");
- auto rhs = ValueType::from_spec("tensor(a{},b[6],c[5],d[4],h{})");
- auto plan = DenseJoinPlan(lhs, rhs);
- std::vector<size_t> expect_loop = {30,4,6};
- std::vector<size_t> expect_lhs_stride = {6,0,1};
- std::vector<size_t> expect_rhs_stride = {4,1,0};
- EXPECT_EQ(plan.lhs_size, 180);
- EXPECT_EQ(plan.rhs_size, 120);
- EXPECT_EQ(plan.out_size, 720);
- EXPECT_EQ(plan.loop_cnt, expect_loop);
- EXPECT_EQ(plan.lhs_stride, expect_lhs_stride);
- EXPECT_EQ(plan.rhs_stride, expect_rhs_stride);
-}
-
-TEST(SimpleValueTest, sparse_join_plan_can_be_created) {
- auto lhs = ValueType::from_spec("tensor(a{},b[6],c[5],e[3],f[2],g{})");
- auto rhs = ValueType::from_spec("tensor(b[6],c[5],d[4],g{},h{})");
- auto plan = SparseJoinPlan(lhs, rhs);
- using SRC = SparseJoinPlan::Source;
- std::vector<SRC> expect_sources = {SRC::LHS,SRC::BOTH,SRC::RHS};
- std::vector<size_t> expect_lhs_overlap = {1};
- std::vector<size_t> expect_rhs_overlap = {0};
- EXPECT_EQ(plan.sources, expect_sources);
- EXPECT_EQ(plan.lhs_overlap, expect_lhs_overlap);
- EXPECT_EQ(plan.rhs_overlap, expect_rhs_overlap);
-}
-
-TEST(SimpleValueTest, dense_join_plan_can_be_executed) {
- auto plan = DenseJoinPlan(ValueType::from_spec("tensor(a[2])"),
- ValueType::from_spec("tensor(b[3])"));
- std::vector<int> a({1, 2});
- std::vector<int> b({3, 4, 5});
- std::vector<int> c(6, 0);
- std::vector<int> expect = {3,4,5,6,8,10};
- ASSERT_EQ(plan.out_size, 6);
- int *dst = &c[0];
- auto cell_join = [&](size_t a_idx, size_t b_idx) { *dst++ = (a[a_idx] * b[b_idx]); };
- plan.execute(0, 0, cell_join);
- EXPECT_EQ(c, expect);
-}
-
TEST(SimpleValueTest, new_generic_join_works_for_simple_values) {
ASSERT_TRUE((join_layouts.size() % 2) == 0);
for (size_t i = 0; i < join_layouts.size(); i += 2) {
diff --git a/eval/src/tests/eval/value_codec/CMakeLists.txt b/eval/src/tests/eval/value_codec/CMakeLists.txt
new file mode 100644
index 00000000000..aa1adf10136
--- /dev/null
+++ b/eval/src/tests/eval/value_codec/CMakeLists.txt
@@ -0,0 +1,10 @@
+# Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+vespa_add_executable(eval_value_codec_test_app TEST
+ SOURCES
+ value_codec_test.cpp
+ DEPENDS
+ vespaeval
+ GTest::GTest
+)
+vespa_add_test(NAME eval_value_codec_test_app COMMAND eval_value_codec_test_app)
diff --git a/eval/src/tests/eval/value_codec/value_codec_test.cpp b/eval/src/tests/eval/value_codec/value_codec_test.cpp
new file mode 100644
index 00000000000..00f37e1f87a
--- /dev/null
+++ b/eval/src/tests/eval/value_codec/value_codec_test.cpp
@@ -0,0 +1,271 @@
+// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+#include <iostream>
+#include <vespa/eval/eval/simple_value.h>
+#include <vespa/eval/eval/test/tensor_model.hpp>
+#include <vespa/eval/eval/value_codec.h>
+#include <vespa/vespalib/data/memory.h>
+#include <vespa/vespalib/gtest/gtest.h>
+#include <vespa/vespalib/objects/nbostream.h>
+
+using namespace vespalib;
+using namespace vespalib::eval;
+using namespace vespalib::eval::test;
+
+const ValueBuilderFactory &factory = SimpleValueBuilderFactory::get();
+
+std::vector<Layout> layouts = {
+ {},
+ {x(3)},
+ {x(3),y(5)},
+ {x(3),y(5),z(7)},
+ float_cells({x(3),y(5),z(7)}),
+ {x({"a","b","c"})},
+ {x({"a","b","c"}),y({"foo","bar"})},
+ {x({"a","b","c"}),y({"foo","bar"}),z({"i","j","k","l"})},
+ float_cells({x({"a","b","c"}),y({"foo","bar"}),z({"i","j","k","l"})}),
+ {x(3),y({"foo", "bar"}),z(7)},
+ {x({"a","b","c"}),y(5),z({"i","j","k","l"})},
+ float_cells({x({"a","b","c"}),y(5),z({"i","j","k","l"})})
+};
+
+
+TEST(ValueCodecTest, simple_values_can_be_converted_from_and_to_tensor_spec) {
+ for (const auto &layout: layouts) {
+ TensorSpec expect = spec(layout, N());
+ std::unique_ptr<Value> value = value_from_spec(expect, factory);
+ TensorSpec actual = spec_from_value(*value);
+ EXPECT_EQ(actual, expect);
+ }
+}
+
+TEST(ValueCodecTest, simple_values_can_be_built_using_tensor_spec) {
+ TensorSpec spec("tensor(w{},x[2],y{},z[2])");
+ spec.add({{"w", "xxx"}, {"x", 0}, {"y", "xxx"}, {"z", 0}}, 1.0)
+ .add({{"w", "xxx"}, {"x", 0}, {"y", "yyy"}, {"z", 1}}, 2.0)
+ .add({{"w", "yyy"}, {"x", 1}, {"y", "xxx"}, {"z", 0}}, 3.0)
+ .add({{"w", "yyy"}, {"x", 1}, {"y", "yyy"}, {"z", 1}}, 4.0);
+ Value::UP tensor = value_from_spec(spec, factory);
+ TensorSpec full_spec("tensor(w{},x[2],y{},z[2])");
+ full_spec
+ .add({{"w", "xxx"}, {"x", 0}, {"y", "xxx"}, {"z", 0}}, 1.0)
+ .add({{"w", "xxx"}, {"x", 0}, {"y", "xxx"}, {"z", 1}}, 0.0)
+ .add({{"w", "xxx"}, {"x", 0}, {"y", "yyy"}, {"z", 0}}, 0.0)
+ .add({{"w", "xxx"}, {"x", 0}, {"y", "yyy"}, {"z", 1}}, 2.0)
+ .add({{"w", "xxx"}, {"x", 1}, {"y", "xxx"}, {"z", 0}}, 0.0)
+ .add({{"w", "xxx"}, {"x", 1}, {"y", "xxx"}, {"z", 1}}, 0.0)
+ .add({{"w", "xxx"}, {"x", 1}, {"y", "yyy"}, {"z", 0}}, 0.0)
+ .add({{"w", "xxx"}, {"x", 1}, {"y", "yyy"}, {"z", 1}}, 0.0)
+ .add({{"w", "yyy"}, {"x", 0}, {"y", "xxx"}, {"z", 0}}, 0.0)
+ .add({{"w", "yyy"}, {"x", 0}, {"y", "xxx"}, {"z", 1}}, 0.0)
+ .add({{"w", "yyy"}, {"x", 0}, {"y", "yyy"}, {"z", 0}}, 0.0)
+ .add({{"w", "yyy"}, {"x", 0}, {"y", "yyy"}, {"z", 1}}, 0.0)
+ .add({{"w", "yyy"}, {"x", 1}, {"y", "xxx"}, {"z", 0}}, 3.0)
+ .add({{"w", "yyy"}, {"x", 1}, {"y", "xxx"}, {"z", 1}}, 0.0)
+ .add({{"w", "yyy"}, {"x", 1}, {"y", "yyy"}, {"z", 0}}, 0.0)
+ .add({{"w", "yyy"}, {"x", 1}, {"y", "yyy"}, {"z", 1}}, 4.0);
+ Value::UP full_tensor = value_from_spec(full_spec, factory);
+ EXPECT_EQUAL(full_spec, spec_from_value(*tensor));
+ EXPECT_EQUAL(full_spec, spec_from_value(*full_tensor));
+};
+
+//-----------------------------------------------------------------------------
+
+vespalib::string make_type_spec(bool use_float, const vespalib::string &dims) {
+ vespalib::string type_spec = "tensor";
+ if (use_float) {
+ type_spec.append("<float>");
+ }
+ type_spec.append(dims);
+ return type_spec;
+}
+
+struct TensorExample {
+ virtual ~TensorExample();
+ virtual TensorSpec make_spec(bool use_float) const = 0;
+ virtual std::unique_ptr<Value> make_tensor(bool use_float) const = 0;
+ virtual void encode_default(nbostream &dst) const = 0;
+ virtual void encode_with_double(nbostream &dst) const = 0;
+ virtual void encode_with_float(nbostream &dst) const = 0;
+ void verify_encode_decode() const {
+ nbostream expect_default;
+ nbostream expect_double;
+ nbostream expect_float;
+ encode_default(expect_default);
+ encode_with_double(expect_double);
+ encode_with_float(expect_float);
+ nbostream data_double;
+ nbostream data_float;
+ encode_value(*make_tensor(false), data_double);
+ encode_value(*make_tensor(true), data_float);
+ EXPECT_EQ(Memory(data_double.peek(), data_double.size()),
+ Memory(expect_default.peek(), expect_default.size()));
+ EXPECT_EQ(Memory(data_float.peek(), data_float.size()),
+ Memory(expect_float.peek(), expect_float.size()));
+ EXPECT_EQ(spec_from_value(*decode_value(expect_default, factory)), make_spec(false));
+ EXPECT_EQ(spec_from_value(*decode_value(expect_double, factory)), make_spec(false));
+ EXPECT_EQ(spec_from_value(*decode_value(expect_float, factory)), make_spec(true));
+ }
+};
+TensorExample::~TensorExample() = default;
+
+//-----------------------------------------------------------------------------
+
+struct SparseTensorExample : TensorExample {
+ TensorSpec make_spec(bool use_float) const override {
+ return TensorSpec(make_type_spec(use_float, "(x{},y{})"))
+ .add({{"x","a"},{"y","a"}}, 1)
+ .add({{"x","a"},{"y","b"}}, 2)
+ .add({{"x","b"},{"y","a"}}, 3);
+ }
+ std::unique_ptr<Value> make_tensor(bool use_float) const override {
+ return value_from_spec(make_spec(use_float), factory);
+ }
+ template <typename T>
+ void encode_inner(nbostream &dst) const {
+ dst.putInt1_4Bytes(2);
+ dst.writeSmallString("x");
+ dst.writeSmallString("y");
+ dst.putInt1_4Bytes(3);
+ dst.writeSmallString("a");
+ dst.writeSmallString("a");
+ dst << (T) 1;
+ dst.writeSmallString("a");
+ dst.writeSmallString("b");
+ dst << (T) 2;
+ dst.writeSmallString("b");
+ dst.writeSmallString("a");
+ dst << (T) 3;
+ }
+ void encode_default(nbostream &dst) const override {
+ dst.putInt1_4Bytes(1);
+ encode_inner<double>(dst);
+ }
+ void encode_with_double(nbostream &dst) const override {
+ dst.putInt1_4Bytes(5);
+ dst.putInt1_4Bytes(0);
+ encode_inner<double>(dst);
+ }
+ void encode_with_float(nbostream &dst) const override {
+ dst.putInt1_4Bytes(5);
+ dst.putInt1_4Bytes(1);
+ encode_inner<float>(dst);
+ }
+};
+
+TEST(ValueCodecTest, sparse_tensors_can_be_encoded_and_decoded) {
+ SparseTensorExample f1;
+ f1.verify_encode_decode();
+}
+
+//-----------------------------------------------------------------------------
+
+struct DenseTensorExample : TensorExample {
+ TensorSpec make_spec(bool use_float) const override {
+ return TensorSpec(make_type_spec(use_float, "(x[3],y[2])"))
+ .add({{"x",0},{"y",0}}, 1)
+ .add({{"x",0},{"y",1}}, 2)
+ .add({{"x",1},{"y",0}}, 3)
+ .add({{"x",1},{"y",1}}, 4)
+ .add({{"x",2},{"y",0}}, 5)
+ .add({{"x",2},{"y",1}}, 6);
+ }
+ std::unique_ptr<Value> make_tensor(bool use_float) const override {
+ return value_from_spec(make_spec(use_float), factory);
+ }
+ template <typename T>
+ void encode_inner(nbostream &dst) const {
+ dst.putInt1_4Bytes(2);
+ dst.writeSmallString("x");
+ dst.putInt1_4Bytes(3);
+ dst.writeSmallString("y");
+ dst.putInt1_4Bytes(2);
+ dst << (T) 1;
+ dst << (T) 2;
+ dst << (T) 3;
+ dst << (T) 4;
+ dst << (T) 5;
+ dst << (T) 6;
+ }
+ void encode_default(nbostream &dst) const override {
+ dst.putInt1_4Bytes(2);
+ encode_inner<double>(dst);
+ }
+ void encode_with_double(nbostream &dst) const override {
+ dst.putInt1_4Bytes(6);
+ dst.putInt1_4Bytes(0);
+ encode_inner<double>(dst);
+ }
+ void encode_with_float(nbostream &dst) const override {
+ dst.putInt1_4Bytes(6);
+ dst.putInt1_4Bytes(1);
+ encode_inner<float>(dst);
+ }
+};
+
+TEST(ValueCodecTest, dense_tensors_can_be_encoded_and_decoded) {
+ DenseTensorExample f1;
+ f1.verify_encode_decode();
+}
+
+//-----------------------------------------------------------------------------
+
+struct MixedTensorExample : TensorExample {
+ TensorSpec make_spec(bool use_float) const override {
+ return TensorSpec(make_type_spec(use_float, "(x{},y{},z[2])"))
+ .add({{"x","a"},{"y","a"},{"z",0}}, 1)
+ .add({{"x","a"},{"y","a"},{"z",1}}, 2)
+ .add({{"x","a"},{"y","b"},{"z",0}}, 3)
+ .add({{"x","a"},{"y","b"},{"z",1}}, 4)
+ .add({{"x","b"},{"y","a"},{"z",0}}, 5)
+ .add({{"x","b"},{"y","a"},{"z",1}}, 6);
+ }
+ std::unique_ptr<Value> make_tensor(bool use_float) const override {
+ return value_from_spec(make_spec(use_float), factory);
+ }
+ template <typename T>
+ void encode_inner(nbostream &dst) const {
+ dst.putInt1_4Bytes(2);
+ dst.writeSmallString("x");
+ dst.writeSmallString("y");
+ dst.putInt1_4Bytes(1);
+ dst.writeSmallString("z");
+ dst.putInt1_4Bytes(2);
+ dst.putInt1_4Bytes(3);
+ dst.writeSmallString("a");
+ dst.writeSmallString("a");
+ dst << (T) 1;
+ dst << (T) 2;
+ dst.writeSmallString("a");
+ dst.writeSmallString("b");
+ dst << (T) 3;
+ dst << (T) 4;
+ dst.writeSmallString("b");
+ dst.writeSmallString("a");
+ dst << (T) 5;
+ dst << (T) 6;
+ }
+ void encode_default(nbostream &dst) const override {
+ dst.putInt1_4Bytes(3);
+ encode_inner<double>(dst);
+ }
+ void encode_with_double(nbostream &dst) const override {
+ dst.putInt1_4Bytes(7);
+ dst.putInt1_4Bytes(0);
+ encode_inner<double>(dst);
+ }
+ void encode_with_float(nbostream &dst) const override {
+ dst.putInt1_4Bytes(7);
+ dst.putInt1_4Bytes(1);
+ encode_inner<float>(dst);
+ }
+};
+
+TEST(ValueCodecTest, mixed_tensors_can_be_encoded_and_decoded) {
+ MixedTensorExample f1;
+ f1.verify_encode_decode();
+}
+
+//-----------------------------------------------------------------------------
+
+GTEST_MAIN_RUN_ALL_TESTS()
diff --git a/eval/src/tests/eval/value_type/value_type_test.cpp b/eval/src/tests/eval/value_type/value_type_test.cpp
index 2b103a91b81..9f1519ee7c0 100644
--- a/eval/src/tests/eval/value_type/value_type_test.cpp
+++ b/eval/src/tests/eval/value_type/value_type_test.cpp
@@ -325,12 +325,17 @@ TEST("require that type-related predicate functions work as expected") {
TEST_DO(verify_predicates(type("tensor<float>(x[5],y{})"), false, false, true, false, false));
}
-TEST("require that mapped dimensions can be counted") {
+TEST("require that mapped and indexed dimensions can be counted") {
EXPECT_EQUAL(type("double").count_mapped_dimensions(), 0u);
+ EXPECT_EQUAL(type("double").count_indexed_dimensions(), 0u);
EXPECT_EQUAL(type("tensor(x[5],y[5])").count_mapped_dimensions(), 0u);
+ EXPECT_EQUAL(type("tensor(x[5],y[5])").count_indexed_dimensions(), 2u);
EXPECT_EQUAL(type("tensor(x{},y[5])").count_mapped_dimensions(), 1u);
- EXPECT_EQUAL(type("tensor(x[5],y{})").count_mapped_dimensions(), 1u);
+ EXPECT_EQUAL(type("tensor(x{},y[5])").count_indexed_dimensions(), 1u);
+ EXPECT_EQUAL(type("tensor(x[1],y{})").count_mapped_dimensions(), 1u);
+ EXPECT_EQUAL(type("tensor(x[1],y{})").count_indexed_dimensions(), 1u);
EXPECT_EQUAL(type("tensor(x{},y{})").count_mapped_dimensions(), 2u);
+ EXPECT_EQUAL(type("tensor(x{},y{})").count_indexed_dimensions(), 0u);
}
TEST("require that dense subspace size calculation works as expected") {
diff --git a/eval/src/tests/instruction/generic_join/CMakeLists.txt b/eval/src/tests/instruction/generic_join/CMakeLists.txt
new file mode 100644
index 00000000000..13fc6550d3c
--- /dev/null
+++ b/eval/src/tests/instruction/generic_join/CMakeLists.txt
@@ -0,0 +1,9 @@
+# Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+vespa_add_executable(eval_generic_join_test_app TEST
+ SOURCES
+ generic_join_test.cpp
+ DEPENDS
+ vespaeval
+ GTest::GTest
+)
+vespa_add_test(NAME eval_generic_join_test_app COMMAND eval_generic_join_test_app)
diff --git a/eval/src/tests/instruction/generic_join/generic_join_test.cpp b/eval/src/tests/instruction/generic_join/generic_join_test.cpp
new file mode 100644
index 00000000000..4821bf092da
--- /dev/null
+++ b/eval/src/tests/instruction/generic_join/generic_join_test.cpp
@@ -0,0 +1,138 @@
+// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+#include <vespa/eval/eval/simple_value.h>
+#include <vespa/eval/eval/value_codec.h>
+#include <vespa/eval/instruction/generic_join.h>
+#include <vespa/eval/eval/interpreted_function.h>
+#include <vespa/eval/eval/test/tensor_model.hpp>
+#include <vespa/vespalib/util/stringfmt.h>
+#include <vespa/vespalib/gtest/gtest.h>
+
+using namespace vespalib;
+using namespace vespalib::eval;
+using namespace vespalib::eval::instruction;
+using namespace vespalib::eval::test;
+
+using vespalib::make_string_short::fmt;
+
+std::vector<Layout> join_layouts = {
+ {}, {},
+ {x(5)}, {x(5)},
+ {x(5)}, {y(5)},
+ {x(5)}, {x(5),y(5)},
+ {y(3)}, {x(2),z(3)},
+ {x(3),y(5)}, {y(5),z(7)},
+ float_cells({x(3),y(5)}), {y(5),z(7)},
+ {x(3),y(5)}, float_cells({y(5),z(7)}),
+ float_cells({x(3),y(5)}), float_cells({y(5),z(7)}),
+ {x({"a","b","c"})}, {x({"a","b","c"})},
+ {x({"a","b","c"})}, {x({"a","b"})},
+ {x({"a","b","c"})}, {y({"foo","bar","baz"})},
+ {x({"a","b","c"})}, {x({"a","b","c"}),y({"foo","bar","baz"})},
+ {x({"a","b"}),y({"foo","bar","baz"})}, {x({"a","b","c"}),y({"foo","bar"})},
+ {x({"a","b"}),y({"foo","bar","baz"})}, {y({"foo","bar"}),z({"i","j","k","l"})},
+ float_cells({x({"a","b"}),y({"foo","bar","baz"})}), {y({"foo","bar"}),z({"i","j","k","l"})},
+ {x({"a","b"}),y({"foo","bar","baz"})}, float_cells({y({"foo","bar"}),z({"i","j","k","l"})}),
+ float_cells({x({"a","b"}),y({"foo","bar","baz"})}), float_cells({y({"foo","bar"}),z({"i","j","k","l"})}),
+ {x(3),y({"foo", "bar"})}, {y({"foo", "bar"}),z(7)},
+ {x({"a","b","c"}),y(5)}, {y(5),z({"i","j","k","l"})},
+ float_cells({x({"a","b","c"}),y(5)}), {y(5),z({"i","j","k","l"})},
+ {x({"a","b","c"}),y(5)}, float_cells({y(5),z({"i","j","k","l"})}),
+ float_cells({x({"a","b","c"}),y(5)}), float_cells({y(5),z({"i","j","k","l"})})
+};
+
+bool join_address(const TensorSpec::Address &a, const TensorSpec::Address &b, TensorSpec::Address &addr) {
+ for (const auto &dim_a: a) {
+ auto pos_b = b.find(dim_a.first);
+ if ((pos_b != b.end()) && !(pos_b->second == dim_a.second)) {
+ return false;
+ }
+ addr.insert_or_assign(dim_a.first, dim_a.second);
+ }
+ return true;
+}
+
+TensorSpec reference_join(const TensorSpec &a, const TensorSpec &b, join_fun_t function) {
+ ValueType res_type = ValueType::join(ValueType::from_spec(a.type()), ValueType::from_spec(b.type()));
+ EXPECT_FALSE(res_type.is_error());
+ TensorSpec result(res_type.to_spec());
+ for (const auto &cell_a: a.cells()) {
+ for (const auto &cell_b: b.cells()) {
+ TensorSpec::Address addr;
+ if (join_address(cell_a.first, cell_b.first, addr) &&
+ join_address(cell_b.first, cell_a.first, addr))
+ {
+ result.add(addr, function(cell_a.second, cell_b.second));
+ }
+ }
+ }
+ return result;
+}
+
+TensorSpec perform_generic_join(const TensorSpec &a, const TensorSpec &b, join_fun_t function) {
+ Stash stash;
+ const auto &factory = SimpleValueBuilderFactory::get();
+ auto lhs = value_from_spec(a, factory);
+ auto rhs = value_from_spec(b, factory);
+ auto my_op = GenericJoin::make_instruction(lhs->type(), rhs->type(), function, factory, stash);
+ InterpretedFunction::EvalSingle single(my_op);
+ return spec_from_value(single.eval(std::vector<Value::CREF>({*lhs,*rhs})));
+}
+
+TEST(GenericJoinTest, dense_join_plan_can_be_created) {
+ auto lhs = ValueType::from_spec("tensor(a{},b[6],c[5],e[3],f[2],g{})");
+ auto rhs = ValueType::from_spec("tensor(a{},b[6],c[5],d[4],h{})");
+ auto plan = DenseJoinPlan(lhs, rhs);
+ std::vector<size_t> expect_loop = {30,4,6};
+ std::vector<size_t> expect_lhs_stride = {6,0,1};
+ std::vector<size_t> expect_rhs_stride = {4,1,0};
+ EXPECT_EQ(plan.lhs_size, 180);
+ EXPECT_EQ(plan.rhs_size, 120);
+ EXPECT_EQ(plan.out_size, 720);
+ EXPECT_EQ(plan.loop_cnt, expect_loop);
+ EXPECT_EQ(plan.lhs_stride, expect_lhs_stride);
+ EXPECT_EQ(plan.rhs_stride, expect_rhs_stride);
+}
+
+TEST(GenericJoinTest, sparse_join_plan_can_be_created) {
+ auto lhs = ValueType::from_spec("tensor(a{},b[6],c[5],e[3],f[2],g{})");
+ auto rhs = ValueType::from_spec("tensor(b[6],c[5],d[4],g{},h{})");
+ auto plan = SparseJoinPlan(lhs, rhs);
+ using SRC = SparseJoinPlan::Source;
+ std::vector<SRC> expect_sources = {SRC::LHS,SRC::BOTH,SRC::RHS};
+ std::vector<size_t> expect_lhs_overlap = {1};
+ std::vector<size_t> expect_rhs_overlap = {0};
+ EXPECT_EQ(plan.sources, expect_sources);
+ EXPECT_EQ(plan.lhs_overlap, expect_lhs_overlap);
+ EXPECT_EQ(plan.rhs_overlap, expect_rhs_overlap);
+}
+
+TEST(GenericJoinTest, dense_join_plan_can_be_executed) {
+ auto plan = DenseJoinPlan(ValueType::from_spec("tensor(a[2])"),
+ ValueType::from_spec("tensor(b[3])"));
+ std::vector<int> a({1, 2});
+ std::vector<int> b({3, 4, 5});
+ std::vector<int> c(6, 0);
+ std::vector<int> expect = {3,4,5,6,8,10};
+ ASSERT_EQ(plan.out_size, 6);
+ int *dst = &c[0];
+ auto cell_join = [&](size_t a_idx, size_t b_idx) { *dst++ = (a[a_idx] * b[b_idx]); };
+ plan.execute(0, 0, cell_join);
+ EXPECT_EQ(c, expect);
+}
+
+TEST(GenericJoinTest, generic_join_works_for_simple_values) {
+ ASSERT_TRUE((join_layouts.size() % 2) == 0);
+ for (size_t i = 0; i < join_layouts.size(); i += 2) {
+ TensorSpec lhs = spec(join_layouts[i], Div16(N()));
+ TensorSpec rhs = spec(join_layouts[i + 1], Div16(N()));
+ for (auto fun: {operation::Add::f, operation::Sub::f, operation::Mul::f, operation::Div::f}) {
+ SCOPED_TRACE(fmt("\n===\nLHS: %s\nRHS: %s\n===\n", lhs.to_string().c_str(), rhs.to_string().c_str()));
+ auto expect = reference_join(lhs, rhs, fun);
+ auto actual = perform_generic_join(lhs, rhs, fun);
+ EXPECT_EQ(actual, expect);
+ }
+ }
+}
+
+GTEST_MAIN_RUN_ALL_TESTS()
diff --git a/eval/src/tests/instruction/generic_rename/CMakeLists.txt b/eval/src/tests/instruction/generic_rename/CMakeLists.txt
new file mode 100644
index 00000000000..98af0fe0212
--- /dev/null
+++ b/eval/src/tests/instruction/generic_rename/CMakeLists.txt
@@ -0,0 +1,9 @@
+# Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+vespa_add_executable(eval_generic_rename_test_app TEST
+ SOURCES
+ generic_rename_test.cpp
+ DEPENDS
+ vespaeval
+ GTest::GTest
+)
+vespa_add_test(NAME eval_generic_rename_test_app COMMAND eval_generic_rename_test_app)
diff --git a/eval/src/tests/instruction/generic_rename/generic_rename_test.cpp b/eval/src/tests/instruction/generic_rename/generic_rename_test.cpp
new file mode 100644
index 00000000000..f61899e4dda
--- /dev/null
+++ b/eval/src/tests/instruction/generic_rename/generic_rename_test.cpp
@@ -0,0 +1,145 @@
+// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+#include <vespa/eval/eval/simple_value.h>
+#include <vespa/eval/eval/value_codec.h>
+#include <vespa/eval/instruction/generic_rename.h>
+#include <vespa/eval/eval/interpreted_function.h>
+#include <vespa/eval/eval/test/tensor_model.hpp>
+#include <vespa/vespalib/util/stringfmt.h>
+#include <vespa/vespalib/gtest/gtest.h>
+
+using namespace vespalib;
+using namespace vespalib::eval;
+using namespace vespalib::eval::instruction;
+using namespace vespalib::eval::test;
+
+using vespalib::make_string_short::fmt;
+
+std::vector<Layout> rename_layouts = {
+ {x(3)},
+ {x(3),y(5)},
+ {x(3),y(5),z(7)},
+ float_cells({x(3),y(5),z(7)}),
+ {x({"a","b","c"})},
+ {x({"a","b","c"}),y({"foo","bar"})},
+ {x({"a","b","c"}),y({"foo","bar"}),z({"i","j","k","l"})},
+ float_cells({x({"a","b","c"}),y({"foo","bar"}),z({"i","j","k","l"})}),
+ {x(3),y({"foo", "bar"}),z(7)},
+ {x({"a","b","c"}),y(5),z({"i","j","k","l"})},
+ float_cells({x({"a","b","c"}),y(5),z({"i","j","k","l"})})
+};
+
+struct FromTo {
+ std::vector<vespalib::string> from;
+ std::vector<vespalib::string> to;
+};
+
+std::vector<FromTo> rename_from_to = {
+ { {"x"}, {"x_renamed"} },
+ { {"x"}, {"z_was_x"} },
+ { {"x", "y"}, {"y", "x"} },
+ { {"x", "z"}, {"z", "x"} },
+ { {"x", "y", "z"}, {"a", "b", "c"} },
+ { {"z"}, {"a"} },
+ { {"y"}, {"z_was_y"} },
+ { {"y"}, {"b"} }
+};
+
+
+TEST(GenericRenameTest, dense_rename_plan_can_be_created_and_executed) {
+ auto lhs = ValueType::from_spec("tensor(a[2],c[3],d{},e[5],g[7],h{})");
+ std::vector<vespalib::string> from({"a", "c", "e"});
+ std::vector<vespalib::string> to({"f", "a", "b"});
+ ValueType renamed = lhs.rename(from, to);
+ auto plan = DenseRenamePlan(lhs, renamed, from, to);
+ std::vector<size_t> expect_loop = {15,2,7};
+ std::vector<size_t> expect_stride = {7,105,1};
+ EXPECT_EQ(plan.subspace_size, 210);
+ EXPECT_EQ(plan.loop_cnt, expect_loop);
+ EXPECT_EQ(plan.stride, expect_stride);
+ std::vector<int> out;
+ int want[3][5][2][7];
+ size_t counter = 0;
+ for (size_t a = 0; a < 2; ++a) {
+ for (size_t c = 0; c < 3; ++c) {
+ for (size_t e = 0; e < 5; ++e) {
+ for (size_t g = 0; g < 7; ++g) {
+ want[c][e][a][g] = counter++;
+ }
+ }
+ }
+ }
+ std::vector<int> expect(210);
+ memcpy(&expect[0], &want[0], 210*sizeof(int));
+ auto move_cell = [&](size_t offset) { out.push_back(offset); };
+ plan.execute(0, move_cell);
+ EXPECT_EQ(out, expect);
+}
+
+TEST(GenericRenameTest, sparse_rename_plan_can_be_created) {
+ auto lhs = ValueType::from_spec("tensor(a{},c{},d[3],e{},g{},h[5])");
+ std::vector<vespalib::string> from({"a", "c", "e"});
+ std::vector<vespalib::string> to({"f", "a", "b"});
+ ValueType renamed = lhs.rename(from, to);
+ auto plan = SparseRenamePlan(lhs, renamed, from, to);
+ EXPECT_EQ(plan.mapped_dims, 4);
+ std::vector<size_t> expect = {2,0,1,3};
+ EXPECT_EQ(plan.output_dimensions, expect);
+}
+
+vespalib::string rename_dimension(const vespalib::string &name, const FromTo &ft) {
+ assert(ft.from.size() == ft.to.size());
+ for (size_t i = 0; i < ft.from.size(); ++i) {
+ if (name == ft.from[i]) {
+ return ft.to[i];
+ }
+ }
+ return name;
+}
+
+TensorSpec reference_rename(const TensorSpec &a, const FromTo &ft) {
+ ValueType res_type = ValueType::from_spec(a.type()).rename(ft.from, ft.to);
+ EXPECT_FALSE(res_type.is_error());
+ TensorSpec result(res_type.to_spec());
+ for (const auto &cell: a.cells()) {
+ TensorSpec::Address addr;
+ for (const auto &dim: cell.first) {
+ addr.insert_or_assign(rename_dimension(dim.first, ft), dim.second);
+ }
+ result.add(addr, cell.second);
+ }
+ return result;
+}
+
+TensorSpec perform_generic_rename(const TensorSpec &a, const ValueType &res_type,
+ const FromTo &ft, const ValueBuilderFactory &factory)
+{
+ Stash stash;
+ auto lhs = value_from_spec(a, factory);
+ auto my_op = GenericRename::make_instruction(lhs->type(), res_type, ft.from, ft.to, factory, stash);
+ InterpretedFunction::EvalSingle single(my_op);
+ return spec_from_value(single.eval(std::vector<Value::CREF>({*lhs})));
+}
+
+void test_generic_rename(const ValueBuilderFactory &factory) {
+ for (const auto & layout : rename_layouts) {
+ TensorSpec lhs = spec(layout, N());
+ ValueType lhs_type = ValueType::from_spec(lhs.type());
+ // printf("lhs_type: %s\n", lhs_type.to_spec().c_str());
+ for (const auto & from_to : rename_from_to) {
+ ValueType renamed_type = lhs_type.rename(from_to.from, from_to.to);
+ if (renamed_type.is_error()) continue;
+ // printf("type %s -> %s\n", lhs_type.to_spec().c_str(), renamed_type.to_spec().c_str());
+ SCOPED_TRACE(fmt("\n===\nLHS: %s\n===\n", lhs.to_string().c_str()));
+ auto expect = reference_rename(lhs, from_to);
+ auto actual = perform_generic_rename(lhs, renamed_type, from_to, factory);
+ EXPECT_EQ(actual, expect);
+ }
+ }
+}
+
+TEST(GenericRenameTest, generic_rename_works_for_simple_values) {
+ test_generic_rename(SimpleValueBuilderFactory::get());
+}
+
+GTEST_MAIN_RUN_ALL_TESTS()
diff --git a/eval/src/tests/tensor/direct_sparse_tensor_builder/direct_sparse_tensor_builder_test.cpp b/eval/src/tests/tensor/direct_sparse_tensor_builder/direct_sparse_tensor_builder_test.cpp
index 651451d81f1..e4640cf2c6a 100644
--- a/eval/src/tests/tensor/direct_sparse_tensor_builder/direct_sparse_tensor_builder_test.cpp
+++ b/eval/src/tests/tensor/direct_sparse_tensor_builder/direct_sparse_tensor_builder_test.cpp
@@ -54,7 +54,7 @@ TEST("require that tensor can be constructed")
Tensor::UP tensor = buildTensor();
const SparseTensor &sparseTensor = dynamic_cast<const SparseTensor &>(*tensor);
const ValueType &type = sparseTensor.type();
- const SparseTensor::Cells &cells = sparseTensor.cells();
+ const SparseTensor::Cells &cells = sparseTensor.my_cells();
EXPECT_EQUAL(2u, cells.size());
assertCellValue(10, TensorAddress({{"a","1"},{"b","2"}}), type, cells);
assertCellValue(20, TensorAddress({{"c","3"},{"d","4"}}), type, cells);
diff --git a/eval/src/tests/tensor/instruction_benchmark/.gitignore b/eval/src/tests/tensor/instruction_benchmark/.gitignore
new file mode 100644
index 00000000000..31b087883e0
--- /dev/null
+++ b/eval/src/tests/tensor/instruction_benchmark/.gitignore
@@ -0,0 +1 @@
+/eval_instruction_benchmark_app
diff --git a/eval/src/tests/tensor/instruction_benchmark/CMakeLists.txt b/eval/src/tests/tensor/instruction_benchmark/CMakeLists.txt
new file mode 100644
index 00000000000..d2384eaf129
--- /dev/null
+++ b/eval/src/tests/tensor/instruction_benchmark/CMakeLists.txt
@@ -0,0 +1,8 @@
+# Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+vespa_add_executable(eval_instruction_benchmark_app TEST
+ SOURCES
+ instruction_benchmark.cpp
+ DEPENDS
+ vespaeval
+ GTest::GTest
+)
diff --git a/eval/src/tests/tensor/instruction_benchmark/instruction_benchmark.cpp b/eval/src/tests/tensor/instruction_benchmark/instruction_benchmark.cpp
new file mode 100644
index 00000000000..9db7bbae4e8
--- /dev/null
+++ b/eval/src/tests/tensor/instruction_benchmark/instruction_benchmark.cpp
@@ -0,0 +1,321 @@
+// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+// Microbenchmark exploring performance differences between
+// interpreted function instructions.
+
+// This benchmark was initially written to measure the difference in
+// performance between (old) instructions using the TensorEngine
+// immediate API and (new) instructions using the Value API
+// directly. Note that all previous optimizations for dense tensors
+// are trivially transformed to use the Value API, and thus only the
+// generic cases need to be compared. Specifically; we want to make
+// sure join performance for sparse tensors with full dimensional
+// overlap does not suffer too much. Also, we want to showcase an
+// improvement in generic dense join and possibly also in sparse join
+// with partial dimensional overlap. Benchmarks are done using float
+// cells since this is what gives best overall performance in
+// production. Also, we use the multiply operation since it is the
+// most optimized operations across all implementations. When
+// benchmarking different implementations against each other, a smoke
+// test is performed by verifying that all implementations produce the
+// same result.
+
+#include <vespa/eval/eval/simple_value.h>
+#include <vespa/eval/eval/interpreted_function.h>
+#include <vespa/eval/instruction/generic_join.h>
+#include <vespa/eval/eval/simple_tensor_engine.h>
+#include <vespa/eval/eval/tensor_spec.h>
+#include <vespa/eval/eval/value_codec.h>
+#include <vespa/eval/eval/operation.h>
+#include <vespa/eval/eval/tensor_function.h>
+#include <vespa/eval/tensor/default_tensor_engine.h>
+#include <vespa/eval/tensor/mixed/packed_mixed_tensor_builder_factory.h>
+#include <vespa/vespalib/util/benchmark_timer.h>
+#include <vespa/vespalib/util/stringfmt.h>
+#include <vespa/vespalib/util/stash.h>
+#include <vespa/vespalib/gtest/gtest.h>
+#include <optional>
+
+using namespace vespalib;
+using namespace vespalib::eval;
+using namespace vespalib::tensor;
+using namespace vespalib::eval::instruction;
+using vespalib::make_string_short::fmt;
+
+using Instruction = InterpretedFunction::Instruction;
+using EvalSingle = InterpretedFunction::EvalSingle;
+
+template <typename T> using CREF = std::reference_wrapper<const T>;
+
+//-----------------------------------------------------------------------------
+
+struct Impl {
+ virtual const vespalib::string &name() const = 0;
+ virtual Value::UP create_value(const TensorSpec &spec) const = 0;
+ virtual TensorSpec create_spec(const Value &value) const = 0;
+ virtual Instruction create_join(const ValueType &lhs, const ValueType &rhs, operation::op2_t function, Stash &stash) const = 0;
+ virtual const TensorEngine &engine() const { return SimpleTensorEngine::ref(); } // engine used by EvalSingle
+ virtual ~Impl() {}
+};
+
+struct ValueImpl : Impl {
+ vespalib::string my_name;
+ const ValueBuilderFactory &my_factory;
+ ValueImpl(const vespalib::string &name_in, const ValueBuilderFactory &factory)
+ : my_name(name_in), my_factory(factory) {}
+ const vespalib::string &name() const override { return my_name; }
+ Value::UP create_value(const TensorSpec &spec) const override { return value_from_spec(spec, my_factory); }
+ TensorSpec create_spec(const Value &value) const override { return spec_from_value(value); }
+ Instruction create_join(const ValueType &lhs, const ValueType &rhs, operation::op2_t function, Stash &stash) const override {
+ return GenericJoin::make_instruction(lhs, rhs, function, my_factory, stash);
+ }
+};
+
+struct EngineImpl : Impl {
+ vespalib::string my_name;
+ const TensorEngine &my_engine;
+ EngineImpl(const vespalib::string &name_in, const TensorEngine &engine_in)
+ : my_name(name_in), my_engine(engine_in) {}
+ const vespalib::string &name() const override { return my_name; }
+ Value::UP create_value(const TensorSpec &spec) const override { return my_engine.from_spec(spec); }
+ TensorSpec create_spec(const Value &value) const override { return my_engine.to_spec(value); }
+ Instruction create_join(const ValueType &lhs, const ValueType &rhs, operation::op2_t function, Stash &stash) const override {
+ // create a complete tensor function joining two parameters, but only compile the join instruction itself
+ const auto &lhs_node = tensor_function::inject(lhs, 0, stash);
+ const auto &rhs_node = tensor_function::inject(rhs, 1, stash);
+ const auto &join_node = tensor_function::join(lhs_node, rhs_node, function, stash);
+ return join_node.compile_self(my_engine, stash);
+ }
+ const TensorEngine &engine() const override { return my_engine; }
+};
+
+//-----------------------------------------------------------------------------
+
+EngineImpl simple_tensor_engine_impl(" [SimpleTensorEngine]", SimpleTensorEngine::ref());
+EngineImpl default_tensor_engine_impl("[DefaultTensorEngine]", DefaultTensorEngine::ref());
+ValueImpl simple_value_impl(" [SimpleValue]", SimpleValueBuilderFactory::get());
+ValueImpl packed_mixed_tensor_impl(" [PackedMixedTensor]", PackedMixedTensorBuilderFactory::get());
+
+double budget = 5.0;
+std::vector<CREF<Impl>> impl_list = {simple_tensor_engine_impl,
+ default_tensor_engine_impl,
+ simple_value_impl,
+ packed_mixed_tensor_impl};
+
+//-----------------------------------------------------------------------------
+
+struct EvalOp {
+ using UP = std::unique_ptr<EvalOp>;
+ const Impl &impl;
+ std::vector<Value::UP> values;
+ std::vector<Value::CREF> stack;
+ EvalSingle single;
+ EvalOp(const EvalOp &) = delete;
+ EvalOp &operator=(const EvalOp &) = delete;
+ EvalOp(Instruction op, const std::vector<CREF<TensorSpec>> &stack_spec, const Impl &impl_in)
+ : impl(impl_in), values(), stack(), single(impl.engine(), op)
+ {
+ for (const TensorSpec &spec: stack_spec) {
+ values.push_back(impl.create_value(spec));
+ }
+ for (const auto &value: values) {
+ stack.push_back(*value.get());
+ }
+ }
+ TensorSpec result() { return impl.create_spec(single.eval(stack)); }
+ double estimate_cost_us() {
+ auto actual = [&](){ single.eval(stack); };
+ return BenchmarkTimer::benchmark(actual, budget) * 1000.0 * 1000.0;
+ }
+};
+
+//-----------------------------------------------------------------------------
+
+void benchmark(const vespalib::string &desc, const std::vector<EvalOp::UP> &list) {
+ fprintf(stderr, "--------------------------------------------------------\n");
+ fprintf(stderr, "Benchmark Case: [%s]\n", desc.c_str());
+ std::optional<TensorSpec> expect = std::nullopt;
+ for (const auto &eval: list) {
+ if (expect.has_value()) {
+ ASSERT_EQ(eval->result(), expect.value());
+ } else {
+ expect = eval->result();
+ }
+ }
+ for (const auto &eval: list) {
+ fprintf(stderr, " %s: %10.3f us\n", eval->impl.name().c_str(), eval->estimate_cost_us());
+ }
+ fprintf(stderr, "--------------------------------------------------------\n");
+}
+
+//-----------------------------------------------------------------------------
+
+void benchmark_join(const vespalib::string &desc, const TensorSpec &lhs,
+ const TensorSpec &rhs, operation::op2_t function)
+{
+ Stash stash;
+ ValueType lhs_type = ValueType::from_spec(lhs.type());
+ ValueType rhs_type = ValueType::from_spec(rhs.type());
+ ValueType res_type = ValueType::join(lhs_type, rhs_type);
+ ASSERT_FALSE(lhs_type.is_error());
+ ASSERT_FALSE(rhs_type.is_error());
+ ASSERT_FALSE(res_type.is_error());
+ std::vector<EvalOp::UP> list;
+ for (const Impl &impl: impl_list) {
+ auto op = impl.create_join(lhs_type, rhs_type, function, stash);
+ std::vector<CREF<TensorSpec>> stack_spec({lhs, rhs});
+ list.push_back(std::make_unique<EvalOp>(op, stack_spec, impl));
+ }
+ benchmark(desc, list);
+}
+
+//-----------------------------------------------------------------------------
+
+struct D {
+ vespalib::string name;
+ bool mapped;
+ size_t size;
+ size_t stride;
+ static D map(const vespalib::string &name_in, size_t size_in, size_t stride_in) { return D{name_in, true, size_in, stride_in}; }
+ static D idx(const vespalib::string &name_in, size_t size_in) { return D{name_in, false, size_in, 1}; }
+ operator ValueType::Dimension() const {
+ if (mapped) {
+ return ValueType::Dimension(name);
+ } else {
+ return ValueType::Dimension(name, size);
+ }
+ }
+ TensorSpec::Label operator()(size_t idx) const {
+ if (mapped) {
+ return TensorSpec::Label(fmt("label_%zu", idx));
+ } else {
+ return TensorSpec::Label(idx);
+ }
+ }
+};
+
+void add_cells(TensorSpec &spec, double &seq, TensorSpec::Address addr) {
+ spec.add(addr, seq);
+ seq += 1.0;
+}
+
+template <typename ...Ds> void add_cells(TensorSpec &spec, double &seq, TensorSpec::Address addr, const D &d, const Ds &...ds) {
+ for (size_t i = 0, idx = 0; i < d.size; ++i, idx += d.stride) {
+ addr.insert_or_assign(d.name, d(idx));
+ add_cells(spec, seq, addr, ds...);
+ }
+}
+
+template <typename ...Ds> TensorSpec make_spec(double seq, const Ds &...ds) {
+ TensorSpec spec(ValueType::tensor_type({ds...}, ValueType::CellType::FLOAT).to_spec());
+ add_cells(spec, seq, TensorSpec::Address(), ds...);
+ return spec;
+}
+
+TensorSpec make_vector(const D &d1, double seq) { return make_spec(seq, d1); }
+TensorSpec make_cube(const D &d1, const D &d2, const D &d3, double seq) { return make_spec(seq, d1, d2, d3); }
+
+//-----------------------------------------------------------------------------
+
+TEST(MakeInputTest, print_some_test_input) {
+ auto number = make_spec(5.0);
+ auto sparse = make_vector(D::map("x", 5, 3), 1.0);
+ auto dense = make_vector(D::idx("x", 5), 10.0);
+ auto mixed = make_cube(D::map("x", 3, 7), D::idx("y", 2), D::idx("z", 2), 100.0);
+ fprintf(stderr, "--------------------------------------------------------\n");
+ fprintf(stderr, "simple number: %s\n", number.to_string().c_str());
+ fprintf(stderr, "sparse vector: %s\n", sparse.to_string().c_str());
+ fprintf(stderr, "dense vector: %s\n", dense.to_string().c_str());
+ fprintf(stderr, "mixed cube: %s\n", mixed.to_string().c_str());
+ fprintf(stderr, "--------------------------------------------------------\n");
+}
+
+//-----------------------------------------------------------------------------
+
+TEST(NumberJoin, plain_op2) {
+ auto lhs = make_spec(2.0);
+ auto rhs = make_spec(3.0);
+ benchmark_join("simple numbers multiply", lhs, rhs, operation::Mul::f);
+}
+
+//-----------------------------------------------------------------------------
+
+TEST(DenseJoin, small_vectors) {
+ auto lhs = make_vector(D::idx("x", 10), 1.0);
+ auto rhs = make_vector(D::idx("x", 10), 2.0);
+ benchmark_join("small dense vector multiply", lhs, rhs, operation::Mul::f);
+}
+
+TEST(DenseJoin, full_overlap) {
+ auto lhs = make_cube(D::idx("a", 16), D::idx("b", 16), D::idx("c", 16), 1.0);
+ auto rhs = make_cube(D::idx("a", 16), D::idx("b", 16), D::idx("c", 16), 2.0);
+ benchmark_join("dense full overlap multiply", lhs, rhs, operation::Mul::f);
+}
+
+TEST(DenseJoin, partial_overlap) {
+ auto lhs = make_cube(D::idx("a", 8), D::idx("c", 8), D::idx("d", 8), 1.0);
+ auto rhs = make_cube(D::idx("b", 8), D::idx("c", 8), D::idx("d", 8), 2.0);
+ benchmark_join("dense partial overlap multiply", lhs, rhs, operation::Mul::f);
+}
+
+TEST(DenseJoin, no_overlap) {
+ auto lhs = make_cube(D::idx("a", 4), D::idx("e", 4), D::idx("f", 4), 1.0);
+ auto rhs = make_cube(D::idx("b", 4), D::idx("c", 4), D::idx("d", 4), 2.0);
+ benchmark_join("dense no overlap multiply", lhs, rhs, operation::Mul::f);
+}
+
+//-----------------------------------------------------------------------------
+
+TEST(SparseJoin, small_vectors) {
+ auto lhs = make_vector(D::map("x", 10, 1), 1.0);
+ auto rhs = make_vector(D::map("x", 10, 2), 2.0);
+ benchmark_join("small sparse vector multiply", lhs, rhs, operation::Mul::f);
+}
+
+TEST(SparseJoin, full_overlap) {
+ auto lhs = make_cube(D::map("a", 16, 1), D::map("b", 16, 1), D::map("c", 16, 1), 1.0);
+ auto rhs = make_cube(D::map("a", 16, 2), D::map("b", 16, 2), D::map("c", 16, 2), 2.0);
+ benchmark_join("sparse full overlap multiply", lhs, rhs, operation::Mul::f);
+}
+
+TEST(SparseJoin, full_overlap_big_vs_small) {
+ auto lhs = make_cube(D::map("a", 16, 1), D::map("b", 16, 1), D::map("c", 16, 1), 1.0);
+ auto rhs = make_cube(D::map("a", 2, 1), D::map("b", 2, 1), D::map("c", 2, 1), 2.0);
+ benchmark_join("sparse full overlap big vs small multiply", lhs, rhs, operation::Mul::f);
+}
+
+TEST(SparseJoin, partial_overlap) {
+ auto lhs = make_cube(D::map("a", 8, 1), D::map("c", 8, 1), D::map("d", 8, 1), 1.0);
+ auto rhs = make_cube(D::map("b", 8, 2), D::map("c", 8, 2), D::map("d", 8, 2), 2.0);
+ benchmark_join("sparse partial overlap multiply", lhs, rhs, operation::Mul::f);
+}
+
+TEST(SparseJoin, no_overlap) {
+ auto lhs = make_cube(D::map("a", 4, 1), D::map("e", 4, 1), D::map("f", 4, 1), 1.0);
+ auto rhs = make_cube(D::map("b", 4, 1), D::map("c", 4, 1), D::map("d", 4, 1), 2.0);
+ benchmark_join("sparse no overlap multiply", lhs, rhs, operation::Mul::f);
+}
+
+//-----------------------------------------------------------------------------
+
+TEST(MixedJoin, full_overlap) {
+ auto lhs = make_cube(D::map("a", 16, 1), D::map("b", 16, 1), D::idx("c", 16), 1.0);
+ auto rhs = make_cube(D::map("a", 16, 2), D::map("b", 16, 2), D::idx("c", 16), 2.0);
+ benchmark_join("mixed full overlap multiply", lhs, rhs, operation::Mul::f);
+}
+
+TEST(MixedJoin, partial_sparse_overlap) {
+ auto lhs = make_cube(D::map("a", 8, 1), D::map("c", 8, 1), D::idx("d", 8), 1.0);
+ auto rhs = make_cube(D::map("b", 8, 2), D::map("c", 8, 2), D::idx("d", 8), 2.0);
+ benchmark_join("mixed partial sparse overlap multiply", lhs, rhs, operation::Mul::f);
+}
+
+TEST(MixedJoin, no_overlap) {
+ auto lhs = make_cube(D::map("a", 4, 1), D::map("e", 4, 1), D::idx("f", 4), 1.0);
+ auto rhs = make_cube(D::map("b", 4, 1), D::map("c", 4, 1), D::idx("d", 4), 2.0);
+ benchmark_join("mixed no overlap multiply", lhs, rhs, operation::Mul::f);
+}
+
+//-----------------------------------------------------------------------------
+
+GTEST_MAIN_RUN_ALL_TESTS()
diff --git a/eval/src/tests/tensor/packed_mappings/CMakeLists.txt b/eval/src/tests/tensor/packed_mappings/CMakeLists.txt
new file mode 100644
index 00000000000..2d11755a0c5
--- /dev/null
+++ b/eval/src/tests/tensor/packed_mappings/CMakeLists.txt
@@ -0,0 +1,19 @@
+# Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+vespa_add_executable(eval_packed_mappings_test_app TEST
+ SOURCES
+ packed_mappings_test.cpp
+ DEPENDS
+ vespaeval
+ GTest::GTest
+)
+vespa_add_test(NAME eval_packed_mappings_test_app COMMAND eval_packed_mappings_test_app)
+
+vespa_add_executable(eval_packed_mixed_test_app TEST
+ SOURCES
+ packed_mixed_test.cpp
+ DEPENDS
+ vespaeval
+ GTest::GTest
+)
+vespa_add_test(NAME eval_packed_mixed_test_app COMMAND eval_packed_mixed_test_app)
diff --git a/eval/src/tests/tensor/packed_mappings/packed_mappings_test.cpp b/eval/src/tests/tensor/packed_mappings/packed_mappings_test.cpp
new file mode 100644
index 00000000000..c8814372bf5
--- /dev/null
+++ b/eval/src/tests/tensor/packed_mappings/packed_mappings_test.cpp
@@ -0,0 +1,226 @@
+// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+#include <vespa/eval/tensor/mixed/packed_labels.h>
+#include <vespa/eval/tensor/mixed/packed_mappings.h>
+#include <vespa/eval/tensor/mixed/packed_mappings_builder.h>
+#include <vespa/eval/tensor/mixed/packed_mixed_tensor.h>
+#include <vespa/eval/tensor/mixed/packed_mixed_tensor_builder.h>
+#include <vespa/eval/tensor/mixed/packed_mixed_tensor_builder_factory.h>
+#include <vespa/vespalib/gtest/gtest.h>
+#include <stdlib.h>
+#include <assert.h>
+#include <set>
+
+using namespace vespalib::eval;
+using namespace vespalib::eval::packed_mixed_tensor;
+
+namespace {
+
+uint32_t random_range(uint32_t from, uint32_t to) {
+ assert(from + 1 < to);
+ int unif = rand() % (to - from);
+ return from + unif;
+}
+
+const char *mixed_tensor_types[] = {
+ "tensor<float>(x{})",
+ "tensor<float>(a{},b{},c{},d{},e{},f{})",
+ "tensor<float>(x{},y{})",
+ "tensor<float>(x{},z[3])",
+ "tensor<float>(w[5],x{},y{},z[3])"
+};
+
+const char *float_tensor_types[] = {
+ "tensor<float>(x{})",
+ "tensor<float>(x{},y{})",
+ "tensor<float>(x{},z[3])",
+ "tensor<float>(w[5],x{},y{},z[3])",
+ "tensor<float>(z[2])",
+ "tensor<float>()"
+};
+
+ vespalib::string label1(""),
+ label2("foo"),
+ label3("bar");
+ vespalib::string label4("foobar"),
+ label5("barfoo"),
+ label6("other");
+ vespalib::string label7("long text number one"),
+ label8("long text number two"),
+ label9("long text number three");
+
+std::vector<vespalib::stringref>
+generate_random_address(uint32_t dims)
+{
+ std::vector<vespalib::stringref> foo(dims, label1);
+ for (auto & ref : foo) {
+ size_t pct = random_range(0, 100);
+ if (pct < 5) { ref = label1; }
+ else if (pct < 30) { ref = label2; }
+ else if (pct < 55) { ref = label3; }
+ else if (pct < 65) { ref = label4; }
+ else if (pct < 75) { ref = label5; }
+ else if (pct < 85) { ref = label6; }
+ else if (pct < 90) { ref = label7; }
+ else if (pct < 95) { ref = label8; }
+ else { ref = label9; }
+ }
+ return foo;
+}
+
+} // namespace <unnamed>
+
+class MappingsBuilderTest : public ::testing::Test {
+public:
+ std::unique_ptr<PackedMappingsBuilder> builder;
+ std::unique_ptr<PackedMappings> built;
+
+ MappingsBuilderTest() = default;
+
+ virtual ~MappingsBuilderTest() = default;
+
+ void build_and_compare() {
+ ASSERT_TRUE(builder);
+ built = builder->build_mappings();
+ ASSERT_TRUE(built);
+ EXPECT_EQ(builder->num_mapped_dims(), built->num_mapped_dims());
+ EXPECT_EQ(builder->size(), built->size());
+ for (size_t idx = 0; idx < built->size(); ++idx) {
+ std::vector<vespalib::stringref> got(builder->num_mapped_dims());
+ built->fill_address_by_sortid(idx, got);
+ printf("Got address:");
+ for (auto ref : got) {
+ printf(" '%s'", ref.data());
+ }
+ uint32_t subspace = built->subspace_of_address(got);
+ uint32_t original = builder->add_mapping_for(got);
+ printf(" -> %u\n", original);
+ EXPECT_EQ(subspace, original);
+ }
+ }
+};
+
+TEST_F(MappingsBuilderTest, empty_mapping)
+{
+ for (uint32_t dims : { 0, 1, 2, 3 }) {
+ builder = std::make_unique<PackedMappingsBuilder>(dims);
+ build_and_compare();
+ }
+}
+
+TEST_F(MappingsBuilderTest, just_one)
+{
+ vespalib::string label("foobar");
+ for (uint32_t dims : { 0, 1, 2, 3, 7 }) {
+ builder = std::make_unique<PackedMappingsBuilder>(dims);
+ std::vector<vespalib::stringref> foo(dims, label);
+ uint32_t idx = builder->add_mapping_for(foo);
+ EXPECT_EQ(idx, 0);
+ build_and_compare();
+ }
+}
+
+TEST_F(MappingsBuilderTest, some_random)
+{
+ for (uint32_t dims : { 1, 2, 5 }) {
+ builder = std::make_unique<PackedMappingsBuilder>(dims);
+ uint32_t cnt = random_range(dims*5, dims*20);
+ printf("Generate %u addresses for %u dims\n", cnt, dims);
+ for (uint32_t i = 0; i < cnt; ++i) {
+ auto foo = generate_random_address(dims);
+ uint32_t idx = builder->add_mapping_for(foo);
+ EXPECT_LE(idx, i);
+ }
+ build_and_compare();
+ }
+}
+
+class MixedBuilderTest : public ::testing::Test {
+public:
+ std::unique_ptr<PackedMixedTensorBuilder<float>> builder;
+ std::unique_ptr<Value> built;
+
+ MixedBuilderTest() = default;
+
+ virtual ~MixedBuilderTest() = default;
+
+ size_t expected_value = 0;
+
+ void build_and_compare(size_t expect_size) {
+ built.reset(nullptr);
+ EXPECT_FALSE(built);
+ ASSERT_TRUE(builder);
+ built = builder->build(std::move(builder));
+ EXPECT_FALSE(builder);
+ ASSERT_TRUE(built);
+ EXPECT_EQ(built->index().size(), expect_size);
+ auto cells = built->cells().typify<float>();
+ for (float f : cells) {
+ float expect = ++expected_value;
+ EXPECT_EQ(f, expect);
+ }
+ }
+};
+
+TEST_F(MixedBuilderTest, empty_mapping)
+{
+ for (auto type_spec : mixed_tensor_types) {
+ ValueType type = ValueType::from_spec(type_spec);
+ size_t dims = type.count_mapped_dimensions();
+ size_t dsss = type.dense_subspace_size();
+ EXPECT_GT(dims, 0);
+ EXPECT_GT(dsss, 0);
+ builder = std::make_unique<PackedMixedTensorBuilder<float>>(type, dims, dsss, 3);
+ build_and_compare(0);
+ }
+}
+
+TEST_F(MixedBuilderTest, just_one)
+{
+ size_t counter = 0;
+ for (auto type_spec : float_tensor_types) {
+ ValueType type = ValueType::from_spec(type_spec);
+ size_t dims = type.count_mapped_dimensions();
+ size_t dsss = type.dense_subspace_size();
+ EXPECT_GT(dsss, 0);
+ builder = std::make_unique<PackedMixedTensorBuilder<float>>(type, dims, dsss, 3);
+ auto address = generate_random_address(dims);
+ auto ref = builder->add_subspace(address);
+ EXPECT_EQ(ref.size(), dsss);
+ for (size_t i = 0; i < ref.size(); ++i) {
+ ref[i] = ++counter;
+ }
+ build_and_compare(1);
+ }
+}
+
+TEST_F(MixedBuilderTest, some_random)
+{
+ size_t counter = 0;
+ for (auto type_spec : mixed_tensor_types) {
+ ValueType type = ValueType::from_spec(type_spec);
+ uint32_t dims = type.count_mapped_dimensions();
+ uint32_t dsss = type.dense_subspace_size();
+ EXPECT_GT(dims, 0);
+ EXPECT_GT(dsss, 0);
+ builder = std::make_unique<PackedMixedTensorBuilder<float>>(type, dims, dsss, 3);
+
+ uint32_t cnt = random_range(dims*5, dims*20);
+ printf("MixBuild: generate %u addresses for %u dims\n", cnt, dims);
+ std::set<std::vector<vespalib::stringref>> seen;
+ for (uint32_t i = 0; i < cnt; ++i) {
+ auto address = generate_random_address(dims);
+ if (seen.insert(address).second) {
+ auto ref = builder->add_subspace(address);
+ EXPECT_EQ(ref.size(), dsss);
+ for (size_t j = 0; j < ref.size(); ++j) {
+ ref[j] = ++counter;
+ }
+ }
+ }
+ printf("MixBuild: generated %zu unique addresses\n", seen.size());
+ build_and_compare(seen.size());
+ }
+}
+
+GTEST_MAIN_RUN_ALL_TESTS()
diff --git a/eval/src/tests/tensor/packed_mappings/packed_mixed_test.cpp b/eval/src/tests/tensor/packed_mappings/packed_mixed_test.cpp
new file mode 100644
index 00000000000..bc1efdaba1d
--- /dev/null
+++ b/eval/src/tests/tensor/packed_mappings/packed_mixed_test.cpp
@@ -0,0 +1,153 @@
+// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+#include <vespa/eval/eval/simple_value.h>
+#include <vespa/eval/eval/value_codec.h>
+#include <vespa/eval/eval/test/tensor_model.hpp>
+#include <vespa/eval/tensor/mixed/packed_mixed_tensor_builder_factory.h>
+#include <vespa/vespalib/gtest/gtest.h>
+
+using namespace vespalib::eval;
+using namespace vespalib::eval::test;
+
+std::vector<Layout> layouts = {
+ {},
+ {x(3)},
+ {x(3),y(5)},
+ {x(3),y(5),z(7)},
+ float_cells({x(3),y(5),z(7)}),
+ {x({"a","b","c"})},
+ {x({"a","b","c"}),y({"foo","bar"})},
+ {x({"a","b","c"}),y({"foo","bar"}),z({"i","j","k","l"})},
+ float_cells({x({"a","b","c"}),y({"foo","bar"}),z({"i","j","k","l"})}),
+ {x(3),y({"foo", "bar"}),z(7)},
+ {x({"a","b","c"}),y(5),z({"i","j","k","l"})},
+ float_cells({x({"a","b","c"}),y(5),z({"i","j","k","l"})})
+};
+
+TEST(PackedMixedTest, packed_mixed_tensors_can_be_converted_from_and_to_tensor_spec) {
+ for (const auto &layout: layouts) {
+ TensorSpec expect = spec(layout, N());
+ std::unique_ptr<Value> value = value_from_spec(expect, PackedMixedTensorBuilderFactory::get());
+ TensorSpec actual = spec_from_value(*value);
+ EXPECT_EQ(actual, expect);
+ }
+}
+
+TEST(PackedMixedTest, packed_mixed_tensors_can_be_built_and_inspected) {
+ ValueType type = ValueType::from_spec("tensor<float>(x{},y[2],z{})");
+ const auto & factory = PackedMixedTensorBuilderFactory::get();
+ std::unique_ptr<ValueBuilder<float>> builder = factory.create_value_builder<float>(type);
+ float seq = 0.0;
+ for (vespalib::string x: {"a", "b", "c"}) {
+ for (vespalib::string y: {"aa", "bb"}) {
+ auto subspace = builder->add_subspace({x, y});
+ EXPECT_EQ(subspace.size(), 2);
+ subspace[0] = seq + 1.0;
+ subspace[1] = seq + 5.0;
+ seq += 10.0;
+ }
+ seq += 100.0;
+ }
+ std::unique_ptr<Value> value = builder->build(std::move(builder));
+ EXPECT_EQ(value->index().size(), 6);
+ auto view = value->index().create_view({0});
+ vespalib::stringref query = "b";
+ vespalib::stringref label;
+ size_t subspace;
+ view->lookup({&query});
+ EXPECT_TRUE(view->next_result({&label}, subspace));
+ EXPECT_EQ(label, "aa");
+ EXPECT_EQ(subspace, 2);
+ EXPECT_TRUE(view->next_result({&label}, subspace));
+ EXPECT_EQ(label, "bb");
+ EXPECT_EQ(subspace, 3);
+ EXPECT_FALSE(view->next_result({&label}, subspace));
+
+ query = "c";
+ view->lookup({&query});
+ EXPECT_TRUE(view->next_result({&label}, subspace));
+ EXPECT_EQ(label, "aa");
+ EXPECT_EQ(subspace, 4);
+ EXPECT_TRUE(view->next_result({&label}, subspace));
+ EXPECT_EQ(label, "bb");
+ EXPECT_EQ(subspace, 5);
+ EXPECT_FALSE(view->next_result({&label}, subspace));
+
+ query = "notpresent";
+ view->lookup({&query});
+ EXPECT_FALSE(view->next_result({&label}, subspace));
+
+ view = value->index().create_view({1});
+ query = "aa";
+ view->lookup({&query});
+ EXPECT_TRUE(view->next_result({&label}, subspace));
+ EXPECT_EQ(label, "a");
+ EXPECT_EQ(subspace, 0);
+ EXPECT_TRUE(view->next_result({&label}, subspace));
+ EXPECT_EQ(label, "b");
+ EXPECT_EQ(subspace, 2);
+ EXPECT_TRUE(view->next_result({&label}, subspace));
+ EXPECT_EQ(label, "c");
+ EXPECT_EQ(subspace, 4);
+ EXPECT_FALSE(view->next_result({&label}, subspace));
+
+ query = "bb";
+ view->lookup({&query});
+ EXPECT_TRUE(view->next_result({&label}, subspace));
+ EXPECT_EQ(label, "a");
+ EXPECT_EQ(subspace, 1);
+ EXPECT_TRUE(view->next_result({&label}, subspace));
+ EXPECT_EQ(label, "b");
+ EXPECT_EQ(subspace, 3);
+ EXPECT_TRUE(view->next_result({&label}, subspace));
+ EXPECT_EQ(label, "c");
+ EXPECT_EQ(subspace, 5);
+ EXPECT_FALSE(view->next_result({&label}, subspace));
+
+ query = "notpresent";
+ view->lookup({&query});
+ EXPECT_FALSE(view->next_result({&label}, subspace));
+
+ view = value->index().create_view({0,1});
+ vespalib::stringref query_x = "b";
+ vespalib::stringref query_y = "bb";
+ view->lookup({&query_x, &query_y});
+ EXPECT_TRUE(view->next_result({}, subspace));
+ EXPECT_EQ(subspace, 3);
+ EXPECT_FALSE(view->next_result({}, subspace));
+
+ view = value->index().create_view({});
+ vespalib::stringref label_x;
+ vespalib::stringref label_y;
+ view->lookup({});
+
+ const std::vector<vespalib::stringref*> out({&label_x, &label_y});
+ EXPECT_TRUE(view->next_result(out, subspace));
+ EXPECT_EQ(label_x, "a");
+ EXPECT_EQ(label_y, "aa");
+ EXPECT_EQ(subspace, 0);
+ EXPECT_TRUE(view->next_result(out, subspace));
+ EXPECT_EQ(label_x, "a");
+ EXPECT_EQ(label_y, "bb");
+ EXPECT_EQ(subspace, 1);
+ EXPECT_TRUE(view->next_result(out, subspace));
+ EXPECT_EQ(label_x, "b");
+ EXPECT_EQ(label_y, "aa");
+ EXPECT_EQ(subspace, 2);
+ EXPECT_TRUE(view->next_result(out, subspace));
+ EXPECT_EQ(label_x, "b");
+ EXPECT_EQ(label_y, "bb");
+ EXPECT_EQ(subspace, 3);
+ EXPECT_TRUE(view->next_result(out, subspace));
+ EXPECT_EQ(label_x, "c");
+ EXPECT_EQ(label_y, "aa");
+ EXPECT_EQ(subspace, 4);
+ EXPECT_TRUE(view->next_result(out, subspace));
+ EXPECT_EQ(label_x, "c");
+ EXPECT_EQ(label_y, "bb");
+ EXPECT_EQ(subspace, 5);
+ EXPECT_FALSE(view->next_result(out, subspace));
+}
+
+
+GTEST_MAIN_RUN_ALL_TESTS()
diff --git a/eval/src/tests/tensor/tensor_serialization/tensor_serialization_test.cpp b/eval/src/tests/tensor/tensor_serialization/tensor_serialization_test.cpp
index d1491e4f758..e1009969b43 100644
--- a/eval/src/tests/tensor/tensor_serialization/tensor_serialization_test.cpp
+++ b/eval/src/tests/tensor/tensor_serialization/tensor_serialization_test.cpp
@@ -10,6 +10,7 @@
#include <vespa/vespalib/objects/hexdump.h>
#include <ostream>
#include <vespa/eval/tensor/dense/dense_tensor_view.h>
+#include <vespa/eval/eval/value_codec.h>
using namespace vespalib::tensor;
using vespalib::eval::TensorSpec;
@@ -47,6 +48,24 @@ void verify_cells_only(const ExpBuffer &exp, const TensorSpec &spec) {
ASSERT_EQUAL(i, cells.size());
}
+TensorSpec verify_new_value_serialized(const ExpBuffer &exp, const TensorSpec &spec) {
+ const auto &factory = vespalib::eval::SimpleValueBuilderFactory::get();
+ auto new_value = vespalib::eval::value_from_spec(spec, factory);
+ auto new_value_spec = vespalib::eval::spec_from_value(*new_value);
+ nbostream actual;
+ vespalib::eval::encode_value(*new_value, actual);
+ ASSERT_EQUAL(exp, actual);
+ auto new_decoded = vespalib::eval::decode_value(actual, factory);
+ auto new_decoded_spec = vespalib::eval::spec_from_value(*new_decoded);
+ EXPECT_EQUAL(0u, actual.size());
+ EXPECT_EQUAL(new_value_spec, new_decoded_spec);
+ if (new_value->type().is_dense()) {
+ TEST_DO(verify_cells_only<float>(exp, new_value_spec));
+ TEST_DO(verify_cells_only<double>(exp, new_value_spec));
+ }
+ return new_decoded_spec;
+}
+
void verify_serialized(const ExpBuffer &exp, const TensorSpec &spec) {
auto &engine = DefaultTensorEngine::ref();
auto value = engine.from_spec(spec);
@@ -62,6 +81,8 @@ void verify_serialized(const ExpBuffer &exp, const TensorSpec &spec) {
TEST_DO(verify_cells_only<float>(exp, value_spec));
TEST_DO(verify_cells_only<double>(exp, value_spec));
}
+ auto new_value_spec = verify_new_value_serialized(exp, spec);
+ EXPECT_EQUAL(value_spec, new_value_spec);
}
//-----------------------------------------------------------------------------