summaryrefslogtreecommitdiffstats
path: root/eval/src
diff options
context:
space:
mode:
Diffstat (limited to 'eval/src')
-rw-r--r--eval/src/tests/eval/compile_cache/compile_cache_test.cpp2
-rw-r--r--eval/src/tests/eval/fast_value/fast_value_test.cpp40
-rw-r--r--eval/src/tests/eval/gen_spec/CMakeLists.txt9
-rw-r--r--eval/src/tests/eval/gen_spec/gen_spec_test.cpp196
-rw-r--r--eval/src/tests/eval/reference_operations/reference_operations_test.cpp2
-rw-r--r--eval/src/tests/eval/simple_value/simple_value_test.cpp115
-rw-r--r--eval/src/tests/eval/tensor_lambda/tensor_lambda_test.cpp20
-rw-r--r--eval/src/tests/eval/value_codec/value_codec_test.cpp54
-rw-r--r--eval/src/tests/instruction/add_trivial_dimension_optimizer/add_trivial_dimension_optimizer_test.cpp12
-rw-r--r--eval/src/tests/instruction/dense_replace_type_function/dense_replace_type_function_test.cpp4
-rw-r--r--eval/src/tests/instruction/fast_rename_optimizer/fast_rename_optimizer_test.cpp16
-rw-r--r--eval/src/tests/instruction/generic_concat/generic_concat_test.cpp111
-rw-r--r--eval/src/tests/instruction/generic_create/generic_create_test.cpp46
-rw-r--r--eval/src/tests/instruction/generic_join/generic_join_test.cpp74
-rw-r--r--eval/src/tests/instruction/generic_map/generic_map_test.cpp46
-rw-r--r--eval/src/tests/instruction/generic_merge/generic_merge_test.cpp54
-rw-r--r--eval/src/tests/instruction/generic_peek/generic_peek_test.cpp38
-rw-r--r--eval/src/tests/instruction/generic_reduce/generic_reduce_test.cpp61
-rw-r--r--eval/src/tests/instruction/generic_rename/generic_rename_test.cpp51
-rw-r--r--eval/src/tests/instruction/join_with_number/join_with_number_function_test.cpp28
-rw-r--r--eval/src/tests/instruction/mixed_inner_product_function/mixed_inner_product_function_test.cpp71
-rw-r--r--eval/src/tests/instruction/mixed_map_function/mixed_map_function_test.cpp18
-rw-r--r--eval/src/tests/instruction/mixed_simple_join_function/mixed_simple_join_function_test.cpp44
-rw-r--r--eval/src/tests/instruction/pow_as_map_optimizer/pow_as_map_optimizer_test.cpp20
-rw-r--r--eval/src/tests/instruction/remove_trivial_dimension_optimizer/remove_trivial_dimension_optimizer_test.cpp10
-rw-r--r--eval/src/tests/instruction/sparse_dot_product_function/CMakeLists.txt9
-rw-r--r--eval/src/tests/instruction/sparse_dot_product_function/sparse_dot_product_function_test.cpp85
-rw-r--r--eval/src/tests/instruction/sum_max_dot_product_function/sum_max_dot_product_function_test.cpp51
-rw-r--r--eval/src/tests/streamed/value/streamed_value_test.cpp115
-rw-r--r--eval/src/tests/tensor/instruction_benchmark/instruction_benchmark.cpp229
-rw-r--r--eval/src/vespa/eval/eval/optimize_tensor_function.cpp8
-rw-r--r--eval/src/vespa/eval/eval/test/CMakeLists.txt1
-rw-r--r--eval/src/vespa/eval/eval/test/eval_fixture.cpp22
-rw-r--r--eval/src/vespa/eval/eval/test/eval_fixture.h5
-rw-r--r--eval/src/vespa/eval/eval/test/gen_spec.cpp63
-rw-r--r--eval/src/vespa/eval/eval/test/gen_spec.h109
-rw-r--r--eval/src/vespa/eval/eval/test/param_variants.h23
-rw-r--r--eval/src/vespa/eval/instruction/CMakeLists.txt1
-rw-r--r--eval/src/vespa/eval/instruction/generic_join.cpp11
-rw-r--r--eval/src/vespa/eval/instruction/generic_join.h10
-rw-r--r--eval/src/vespa/eval/instruction/sparse_dot_product_function.cpp111
-rw-r--r--eval/src/vespa/eval/instruction/sparse_dot_product_function.h23
42 files changed, 1315 insertions, 703 deletions
diff --git a/eval/src/tests/eval/compile_cache/compile_cache_test.cpp b/eval/src/tests/eval/compile_cache/compile_cache_test.cpp
index 5dea89b3a63..a0c71e2f756 100644
--- a/eval/src/tests/eval/compile_cache/compile_cache_test.cpp
+++ b/eval/src/tests/eval/compile_cache/compile_cache_test.cpp
@@ -1,5 +1,6 @@
// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
#include <vespa/vespalib/testkit/test_kit.h>
+#include <vespa/vespalib/testkit/time_bomb.h>
#include <vespa/eval/eval/llvm/compile_cache.h>
#include <vespa/eval/eval/key_gen.h>
#include <vespa/eval/eval/test/eval_spec.h>
@@ -7,7 +8,6 @@
#include <vespa/vespalib/util/threadstackexecutor.h>
#include <vespa/vespalib/util/blockingthreadstackexecutor.h>
#include <vespa/vespalib/util/stringfmt.h>
-#include <thread>
#include <set>
using namespace vespalib;
diff --git a/eval/src/tests/eval/fast_value/fast_value_test.cpp b/eval/src/tests/eval/fast_value/fast_value_test.cpp
index 279f17a1ead..9d29d8de660 100644
--- a/eval/src/tests/eval/fast_value/fast_value_test.cpp
+++ b/eval/src/tests/eval/fast_value/fast_value_test.cpp
@@ -3,7 +3,7 @@
#include <vespa/eval/eval/fast_value.hpp>
#include <vespa/eval/eval/fast_value.h>
#include <vespa/eval/eval/value_codec.h>
-#include <vespa/eval/eval/test/tensor_model.hpp>
+#include <vespa/eval/eval/test/gen_spec.h>
#include <vespa/vespalib/gtest/gtest.h>
using namespace vespalib;
@@ -142,29 +142,31 @@ TEST(FastValueBuilderTest, mixed_add_subspace_robustness) {
}
}
-std::vector<Layout> layouts = {
- {},
- {x(3)},
- {x(3),y(5)},
- {x(3),y(5),z(7)},
- float_cells({x(3),y(5),z(7)}),
- {x({"a","b","c"})},
- {x({"a","b","c"}),y({"foo","bar"})},
- {x({"a","b","c"}),y({"foo","bar"}),z({"i","j","k","l"})},
- float_cells({x({"a","b","c"}),y({"foo","bar"}),z({"i","j","k","l"})}),
- {x(3),y({"foo", "bar"}),z(7)},
- {x({"a","b","c"}),y(5),z({"i","j","k","l"})},
- float_cells({x({"a","b","c"}),y(5),z({"i","j","k","l"})})
+GenSpec G() { return GenSpec(); }
+
+const std::vector<GenSpec> layouts = {
+ G(),
+ G().idx("x", 3),
+ G().idx("x", 3).idx("y", 5),
+ G().idx("x", 3).idx("y", 5).idx("z", 7),
+ G().map("x", {"a","b","c"}),
+ G().map("x", {"a","b","c"}).map("y", {"foo","bar"}),
+ G().map("x", {"a","b","c"}).map("y", {"foo","bar"}).map("z", {"i","j","k","l"}),
+ G().idx("x", 3).map("y", {"foo", "bar"}).idx("z", 7),
+ G().map("x", {"a","b","c"}).idx("y", 5).map("z", {"i","j","k","l"})
};
TEST(FastValueBuilderFactoryTest, fast_values_can_be_copied) {
auto factory = FastValueBuilderFactory::get();
for (const auto &layout: layouts) {
- TensorSpec expect = spec(layout, N());
- std::unique_ptr<Value> value = value_from_spec(expect, factory);
- std::unique_ptr<Value> copy = factory.copy(*value);
- TensorSpec actual = spec_from_value(*copy);
- EXPECT_EQ(actual, expect);
+ for (TensorSpec expect : { layout.cpy().cells_float().gen(),
+ layout.cpy().cells_double().gen() })
+ {
+ std::unique_ptr<Value> value = value_from_spec(expect, factory);
+ std::unique_ptr<Value> copy = factory.copy(*value);
+ TensorSpec actual = spec_from_value(*copy);
+ EXPECT_EQ(actual, expect);
+ }
}
}
diff --git a/eval/src/tests/eval/gen_spec/CMakeLists.txt b/eval/src/tests/eval/gen_spec/CMakeLists.txt
new file mode 100644
index 00000000000..3613554f0a0
--- /dev/null
+++ b/eval/src/tests/eval/gen_spec/CMakeLists.txt
@@ -0,0 +1,9 @@
+# Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+vespa_add_executable(eval_gen_spec_test_app TEST
+ SOURCES
+ gen_spec_test.cpp
+ DEPENDS
+ vespaeval
+ GTest::GTest
+)
+vespa_add_test(NAME eval_gen_spec_test_app COMMAND eval_gen_spec_test_app)
diff --git a/eval/src/tests/eval/gen_spec/gen_spec_test.cpp b/eval/src/tests/eval/gen_spec/gen_spec_test.cpp
new file mode 100644
index 00000000000..0d1a4744e42
--- /dev/null
+++ b/eval/src/tests/eval/gen_spec/gen_spec_test.cpp
@@ -0,0 +1,196 @@
+// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+#include <vespa/eval/eval/test/gen_spec.h>
+#include <vespa/vespalib/gtest/gtest.h>
+
+using namespace vespalib::eval;
+using namespace vespalib::eval::test;
+
+//-----------------------------------------------------------------------------
+
+TEST(DimSpec, indexed_dimension) {
+ ValueType::Dimension ref("foo", 10);
+ DimSpec idx("foo", 10);
+ EXPECT_EQ(idx.type(), ref);
+ EXPECT_TRUE(ref.is_indexed());
+ EXPECT_EQ(idx.name(), "foo");
+ EXPECT_EQ(idx.size(), 10);
+ EXPECT_EQ(idx.label(3), TensorSpec::Label(size_t(3)));
+}
+
+TEST(DimSpec, mapped_dimension) {
+ ValueType::Dimension ref("foo");
+ DimSpec map("foo", {"a", "b", "c", "d"});
+ EXPECT_EQ(map.type(), ref);
+ EXPECT_TRUE(ref.is_mapped());
+ EXPECT_EQ(map.name(), "foo");
+ EXPECT_EQ(map.size(), 4);
+ EXPECT_EQ(map.label(2), TensorSpec::Label("c"));
+}
+
+TEST(DimSpec, simple_dictionary_creation) {
+ auto dict = DimSpec::make_dict(5, 1, "");
+ std::vector<vespalib::string> expect = {"0", "1", "2", "3", "4"};
+}
+
+TEST(DimSpec, advanced_dictionary_creation) {
+ auto dict = DimSpec::make_dict(5, 3, "str_");
+ std::vector<vespalib::string> expect = {"str_0", "str_3", "str_6", "str_9", "str_12"};
+}
+
+//-----------------------------------------------------------------------------
+
+TEST(GenSpec, default_spec) {
+ GenSpec spec;
+ EXPECT_TRUE(spec.dims().empty());
+ EXPECT_EQ(spec.cells(), CellType::DOUBLE);
+ auto seq = spec.seq();
+ for (size_t i = 0; i < 4096; ++i) {
+ EXPECT_EQ(seq(i), (i + 1.0));
+ }
+}
+
+//-----------------------------------------------------------------------------
+
+TensorSpec scalar_1 = TensorSpec("double").add({}, 1.0);
+TensorSpec scalar_5 = TensorSpec("double").add({}, 5.0);
+
+TEST(GenSpec, scalar_double) {
+ EXPECT_EQ(GenSpec().gen(), scalar_1);
+ EXPECT_EQ(GenSpec().seq_bias(5.0).gen(), scalar_5);
+}
+
+TEST(GenSpec, not_scalar_float_just_yet) {
+ EXPECT_EQ(GenSpec().cells_float().gen(), scalar_1);
+ EXPECT_EQ(GenSpec().cells_float().seq_bias(5.0).gen(), scalar_5);
+}
+
+//-----------------------------------------------------------------------------
+
+TEST(Seq, seq_n) {
+ GenSpec::seq_t seq = GenSpec().seq_n().seq();
+ for (size_t i = 0; i < 4096; ++i) {
+ EXPECT_EQ(seq(i), (i + 1.0));
+ }
+}
+
+TEST(Seq, seq_bias) {
+ GenSpec::seq_t seq = GenSpec().seq_bias(13.0).seq();
+ for (size_t i = 0; i < 4096; ++i) {
+ EXPECT_EQ(seq(i), (i + 13.0));
+ }
+}
+
+//-----------------------------------------------------------------------------
+
+GenSpec flt() { return GenSpec().cells_float(); }
+GenSpec dbl() { return GenSpec().cells_double(); }
+
+TEST(GenSpec, value_type) {
+ EXPECT_EQ(dbl().type().to_spec(), "double");
+ EXPECT_EQ(flt().type().to_spec(), "double"); // NB
+ EXPECT_EQ(dbl().idx("x", 10).type().to_spec(), "tensor(x[10])");
+ EXPECT_EQ(flt().idx("x", 10).type().to_spec(), "tensor<float>(x[10])");
+ EXPECT_EQ(dbl().map("y", {}).type().to_spec(), "tensor(y{})");
+ EXPECT_EQ(flt().map("y", {}).type().to_spec(), "tensor<float>(y{})");
+ EXPECT_EQ(dbl().idx("x", 10).map("y", {}).type().to_spec(), "tensor(x[10],y{})");
+ EXPECT_EQ(flt().idx("x", 10).map("y", {}).type().to_spec(), "tensor<float>(x[10],y{})");
+ EXPECT_EQ(dbl().map("y", 3, 1).idx("x", 10).type().to_spec(), "tensor(x[10],y{})");
+ EXPECT_EQ(flt().map("y", 3, 1, "str").idx("x", 10).type().to_spec(), "tensor<float>(x[10],y{})");
+}
+
+//-----------------------------------------------------------------------------
+
+TensorSpec basic_vector = TensorSpec("tensor(a[5])")
+ .add({{"a", 0}}, 1.0)
+ .add({{"a", 1}}, 2.0)
+ .add({{"a", 2}}, 3.0)
+ .add({{"a", 3}}, 4.0)
+ .add({{"a", 4}}, 5.0);
+
+TensorSpec float_vector = TensorSpec("tensor<float>(a[5])")
+ .add({{"a", 0}}, 1.0)
+ .add({{"a", 1}}, 2.0)
+ .add({{"a", 2}}, 3.0)
+ .add({{"a", 3}}, 4.0)
+ .add({{"a", 4}}, 5.0);
+
+TensorSpec custom_vector = TensorSpec("tensor(a[5])")
+ .add({{"a", 0}}, 5.0)
+ .add({{"a", 1}}, 4.0)
+ .add({{"a", 2}}, 3.0)
+ .add({{"a", 3}}, 2.0)
+ .add({{"a", 4}}, 1.0);
+
+TEST(GenSpec, generating_basic_vector) {
+ EXPECT_EQ(GenSpec().idx("a", 5).gen(), basic_vector);
+}
+
+TEST(GenSpec, generating_float_vector) {
+ EXPECT_EQ(GenSpec().idx("a", 5).cells_float().gen(), float_vector);
+}
+
+TEST(GenSpec, generating_custom_vector) {
+ GenSpec::seq_t my_seq = [](size_t idx) noexcept { return (5.0 - idx); };
+ EXPECT_EQ(GenSpec().idx("a", 5).seq(my_seq).gen(), custom_vector);
+}
+
+//-----------------------------------------------------------------------------
+
+TensorSpec basic_map = TensorSpec("tensor(a{})")
+ .add({{"a", "0"}}, 1.0)
+ .add({{"a", "1"}}, 2.0)
+ .add({{"a", "2"}}, 3.0);
+
+TensorSpec custom_map = TensorSpec("tensor(a{})")
+ .add({{"a", "s0"}}, 1.0)
+ .add({{"a", "s5"}}, 2.0)
+ .add({{"a", "s10"}}, 3.0);
+
+TEST(GenSpec, generating_basic_map) {
+ EXPECT_EQ(GenSpec().map("a", 3).gen(), basic_map);
+ EXPECT_EQ(GenSpec().map("a", 3, 1).gen(), basic_map);
+ EXPECT_EQ(GenSpec().map("a", 3, 1, "").gen(), basic_map);
+ EXPECT_EQ(GenSpec().map("a", {"0", "1", "2"}).gen(), basic_map);
+}
+
+TEST(GenSpec, generating_custom_map) {
+ EXPECT_EQ(GenSpec().map("a", 3, 5, "s").gen(), custom_map);
+ EXPECT_EQ(GenSpec().map("a", {"s0", "s5", "s10"}).gen(), custom_map);
+}
+
+//-----------------------------------------------------------------------------
+
+TensorSpec basic_mixed = TensorSpec("tensor(a{},b[1],c{},d[3])")
+ .add({{"a", "0"},{"b", 0},{"c", "0"},{"d", 0}}, 1.0)
+ .add({{"a", "0"},{"b", 0},{"c", "0"},{"d", 1}}, 2.0)
+ .add({{"a", "0"},{"b", 0},{"c", "0"},{"d", 2}}, 3.0)
+ .add({{"a", "1"},{"b", 0},{"c", "0"},{"d", 0}}, 4.0)
+ .add({{"a", "1"},{"b", 0},{"c", "0"},{"d", 1}}, 5.0)
+ .add({{"a", "1"},{"b", 0},{"c", "0"},{"d", 2}}, 6.0)
+ .add({{"a", "2"},{"b", 0},{"c", "0"},{"d", 0}}, 7.0)
+ .add({{"a", "2"},{"b", 0},{"c", "0"},{"d", 1}}, 8.0)
+ .add({{"a", "2"},{"b", 0},{"c", "0"},{"d", 2}}, 9.0);
+
+TensorSpec inverted_mixed = TensorSpec("tensor(a{},b[1],c{},d[3])")
+ .add({{"a", "0"},{"b", 0},{"c", "0"},{"d", 0}}, 1.0)
+ .add({{"a", "1"},{"b", 0},{"c", "0"},{"d", 0}}, 2.0)
+ .add({{"a", "2"},{"b", 0},{"c", "0"},{"d", 0}}, 3.0)
+ .add({{"a", "0"},{"b", 0},{"c", "0"},{"d", 1}}, 4.0)
+ .add({{"a", "1"},{"b", 0},{"c", "0"},{"d", 1}}, 5.0)
+ .add({{"a", "2"},{"b", 0},{"c", "0"},{"d", 1}}, 6.0)
+ .add({{"a", "0"},{"b", 0},{"c", "0"},{"d", 2}}, 7.0)
+ .add({{"a", "1"},{"b", 0},{"c", "0"},{"d", 2}}, 8.0)
+ .add({{"a", "2"},{"b", 0},{"c", "0"},{"d", 2}}, 9.0);
+
+TEST(GenSpec, generating_basic_mixed) {
+ EXPECT_EQ(GenSpec().map("a", 3).idx("b", 1).map("c", 1).idx("d", 3).gen(), basic_mixed);
+}
+
+TEST(GenSpec, generating_inverted_mixed) {
+ EXPECT_EQ(GenSpec().idx("d", 3).map("c", 1).idx("b", 1).map("a", 3).gen(), inverted_mixed);
+}
+
+//-----------------------------------------------------------------------------
+
+GTEST_MAIN_RUN_ALL_TESTS()
diff --git a/eval/src/tests/eval/reference_operations/reference_operations_test.cpp b/eval/src/tests/eval/reference_operations/reference_operations_test.cpp
index 0495923018e..3fcca5e34d8 100644
--- a/eval/src/tests/eval/reference_operations/reference_operations_test.cpp
+++ b/eval/src/tests/eval/reference_operations/reference_operations_test.cpp
@@ -1,13 +1,11 @@
// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
#include <vespa/eval/eval/test/reference_operations.h>
-#include <vespa/eval/eval/test/tensor_model.hpp>
#include <vespa/vespalib/gtest/gtest.h>
#include <iostream>
using namespace vespalib;
using namespace vespalib::eval;
-using namespace vespalib::eval::test;
TensorSpec dense_2d_some_cells(bool square) {
return TensorSpec("tensor(a[3],d[5])")
diff --git a/eval/src/tests/eval/simple_value/simple_value_test.cpp b/eval/src/tests/eval/simple_value/simple_value_test.cpp
index 3a653b75172..c1301bf6b1a 100644
--- a/eval/src/tests/eval/simple_value/simple_value_test.cpp
+++ b/eval/src/tests/eval/simple_value/simple_value_test.cpp
@@ -5,7 +5,7 @@
#include <vespa/eval/instruction/generic_join.h>
#include <vespa/eval/eval/interpreted_function.h>
#include <vespa/eval/eval/test/reference_operations.h>
-#include <vespa/eval/eval/test/tensor_model.hpp>
+#include <vespa/eval/eval/test/gen_spec.h>
#include <vespa/vespalib/util/stringfmt.h>
#include <vespa/vespalib/gtest/gtest.h>
@@ -23,45 +23,36 @@ using Handle = SharedStringRepo::Handle;
vespalib::string as_str(string_id label) { return Handle::string_from_id(label); }
-std::vector<Layout> layouts = {
- {},
- {x(3)},
- {x(3),y(5)},
- {x(3),y(5),z(7)},
- float_cells({x(3),y(5),z(7)}),
- {x({"a","b","c"})},
- {x({"a","b","c"}),y({"foo","bar"})},
- {x({"a","b","c"}),y({"foo","bar"}),z({"i","j","k","l"})},
- float_cells({x({"a","b","c"}),y({"foo","bar"}),z({"i","j","k","l"})}),
- {x(3),y({"foo", "bar"}),z(7)},
- {x({"a","b","c"}),y(5),z({"i","j","k","l"})},
- float_cells({x({"a","b","c"}),y(5),z({"i","j","k","l"})})
+GenSpec G() { return GenSpec(); }
+
+const std::vector<GenSpec> layouts = {
+ G(),
+ G().idx("x", 3),
+ G().idx("x", 3).idx("y", 5),
+ G().idx("x", 3).idx("y", 5).idx("z", 7),
+ G().map("x", {"a","b","c"}),
+ G().map("x", {"a","b","c"}).map("y", {"foo","bar"}),
+ G().map("x", {"a","b","c"}).map("y", {"foo","bar"}).map("z", {"i","j","k","l"}),
+ G().idx("x", 3).map("y", {"foo", "bar"}).idx("z", 7),
+ G().map("x", {"a","b","c"}).idx("y", 5).map("z", {"i","j","k","l"})
};
-std::vector<Layout> join_layouts = {
- {}, {},
- {x(5)}, {x(5)},
- {x(5)}, {y(5)},
- {x(5)}, {x(5),y(5)},
- {y(3)}, {x(2),z(3)},
- {x(3),y(5)}, {y(5),z(7)},
- float_cells({x(3),y(5)}), {y(5),z(7)},
- {x(3),y(5)}, float_cells({y(5),z(7)}),
- float_cells({x(3),y(5)}), float_cells({y(5),z(7)}),
- {x({"a","b","c"})}, {x({"a","b","c"})},
- {x({"a","b","c"})}, {x({"a","b"})},
- {x({"a","b","c"})}, {y({"foo","bar","baz"})},
- {x({"a","b","c"})}, {x({"a","b","c"}),y({"foo","bar","baz"})},
- {x({"a","b"}),y({"foo","bar","baz"})}, {x({"a","b","c"}),y({"foo","bar"})},
- {x({"a","b"}),y({"foo","bar","baz"})}, {y({"foo","bar"}),z({"i","j","k","l"})},
- float_cells({x({"a","b"}),y({"foo","bar","baz"})}), {y({"foo","bar"}),z({"i","j","k","l"})},
- {x({"a","b"}),y({"foo","bar","baz"})}, float_cells({y({"foo","bar"}),z({"i","j","k","l"})}),
- float_cells({x({"a","b"}),y({"foo","bar","baz"})}), float_cells({y({"foo","bar"}),z({"i","j","k","l"})}),
- {x(3),y({"foo", "bar"})}, {y({"foo", "bar"}),z(7)},
- {x({"a","b","c"}),y(5)}, {y(5),z({"i","j","k","l"})},
- float_cells({x({"a","b","c"}),y(5)}), {y(5),z({"i","j","k","l"})},
- {x({"a","b","c"}),y(5)}, float_cells({y(5),z({"i","j","k","l"})}),
- float_cells({x({"a","b","c"}),y(5)}), float_cells({y(5),z({"i","j","k","l"})})
+const std::vector<GenSpec> join_layouts = {
+ G(), G(),
+ G().idx("x", 5), G().idx("x", 5),
+ G().idx("x", 5), G().idx("y", 5),
+ G().idx("x", 5), G().idx("x", 5).idx("y", 5),
+ G().idx("y", 3), G().idx("x", 2).idx("z", 3),
+ G().idx("x", 3).idx("y", 5), G().idx("y", 5).idx("z", 7),
+ G().map("x", {"a","b","c"}), G().map("x", {"a","b","c"}),
+ G().map("x", {"a","b","c"}), G().map("x", {"a","b"}),
+ G().map("x", {"a","b","c"}), G().map("y", {"foo","bar","baz"}),
+ G().map("x", {"a","b","c"}), G().map("x", {"a","b","c"}).map("y", {"foo","bar","baz"}),
+ G().map("x", {"a","b"}).map("y", {"foo","bar","baz"}), G().map("x", {"a","b","c"}).map("y", {"foo","bar"}),
+ G().map("x", {"a","b"}).map("y", {"foo","bar","baz"}), G().map("y", {"foo","bar"}).map("z", {"i","j","k","l"}),
+ G().idx("x", 3).map("y", {"foo", "bar"}), G().map("y", {"foo","bar"}).idx("z", 7),
+ G().map("x", {"a","b","c"}).idx("y", 5), G().idx("y", 5).map("z", {"i","j","k","l"})
+
};
TensorSpec simple_value_join(const TensorSpec &a, const TensorSpec &b, join_fun_t function) {
@@ -76,20 +67,26 @@ TensorSpec simple_value_join(const TensorSpec &a, const TensorSpec &b, join_fun_
TEST(SimpleValueTest, simple_values_can_be_converted_from_and_to_tensor_spec) {
for (const auto &layout: layouts) {
- TensorSpec expect = spec(layout, N());
- std::unique_ptr<Value> value = value_from_spec(expect, SimpleValueBuilderFactory::get());
- TensorSpec actual = spec_from_value(*value);
- EXPECT_EQ(actual, expect);
+ for (TensorSpec expect : { layout.cpy().cells_float().gen(),
+ layout.cpy().cells_double().gen() })
+ {
+ std::unique_ptr<Value> value = value_from_spec(expect, SimpleValueBuilderFactory::get());
+ TensorSpec actual = spec_from_value(*value);
+ EXPECT_EQ(actual, expect);
+ }
}
}
TEST(SimpleValueTest, simple_values_can_be_copied) {
for (const auto &layout: layouts) {
- TensorSpec expect = spec(layout, N());
- std::unique_ptr<Value> value = value_from_spec(expect, SimpleValueBuilderFactory::get());
- std::unique_ptr<Value> copy = SimpleValueBuilderFactory::get().copy(*value);
- TensorSpec actual = spec_from_value(*copy);
- EXPECT_EQ(actual, expect);
+ for (TensorSpec expect : { layout.cpy().cells_float().gen(),
+ layout.cpy().cells_double().gen() })
+ {
+ std::unique_ptr<Value> value = value_from_spec(expect, SimpleValueBuilderFactory::get());
+ std::unique_ptr<Value> copy = SimpleValueBuilderFactory::get().copy(*value);
+ TensorSpec actual = spec_from_value(*copy);
+ EXPECT_EQ(actual, expect);
+ }
}
}
@@ -126,16 +123,26 @@ TEST(SimpleValueTest, simple_value_can_be_built_and_inspected) {
EXPECT_EQ(result["bb"], 3);
}
+GenSpec::seq_t N_16ths = [] (size_t i) noexcept { return (i + 1.0) / 16.0; };
+
TEST(SimpleValueTest, new_generic_join_works_for_simple_values) {
ASSERT_TRUE((join_layouts.size() % 2) == 0);
for (size_t i = 0; i < join_layouts.size(); i += 2) {
- TensorSpec lhs = spec(join_layouts[i], Div16(N()));
- TensorSpec rhs = spec(join_layouts[i + 1], Div16(N()));
- for (auto fun: {operation::Add::f, operation::Sub::f, operation::Mul::f, operation::Div::f}) {
- SCOPED_TRACE(fmt("\n===\nLHS: %s\nRHS: %s\n===\n", lhs.to_string().c_str(), rhs.to_string().c_str()));
- auto expect = ReferenceOperations::join(lhs, rhs, fun);
- auto actual = simple_value_join(lhs, rhs, fun);
- EXPECT_EQ(actual, expect);
+ const auto l = join_layouts[i].cpy().seq(N_16ths);
+ const auto r = join_layouts[i + 1].cpy().seq(N_16ths);
+ for (TensorSpec lhs : { l.cpy().cells_float().gen(),
+ l.cpy().cells_double().gen() })
+ {
+ for (TensorSpec rhs : { r.cpy().cells_float().gen(),
+ r.cpy().cells_double().gen() })
+ {
+ for (auto fun: {operation::Add::f, operation::Sub::f, operation::Mul::f, operation::Div::f}) {
+ SCOPED_TRACE(fmt("\n===\nLHS: %s\nRHS: %s\n===\n", lhs.to_string().c_str(), rhs.to_string().c_str()));
+ auto expect = ReferenceOperations::join(lhs, rhs, fun);
+ auto actual = simple_value_join(lhs, rhs, fun);
+ EXPECT_EQ(actual, expect);
+ }
+ }
}
}
}
diff --git a/eval/src/tests/eval/tensor_lambda/tensor_lambda_test.cpp b/eval/src/tests/eval/tensor_lambda/tensor_lambda_test.cpp
index 23bd16cb721..dd21b663fa9 100644
--- a/eval/src/tests/eval/tensor_lambda/tensor_lambda_test.cpp
+++ b/eval/src/tests/eval/tensor_lambda/tensor_lambda_test.cpp
@@ -8,7 +8,7 @@
#include <vespa/eval/instruction/dense_cell_range_function.h>
#include <vespa/eval/instruction/dense_lambda_peek_function.h>
#include <vespa/eval/instruction/fast_rename_optimizer.h>
-#include <vespa/eval/eval/test/tensor_model.hpp>
+#include <vespa/eval/eval/test/gen_spec.h>
#include <vespa/eval/eval/test/eval_fixture.h>
#include <vespa/eval/eval/tensor_nodes.h>
@@ -25,15 +25,15 @@ const ValueBuilderFactory &prod_factory = FastValueBuilderFactory::get();
EvalFixture::ParamRepo make_params() {
return EvalFixture::ParamRepo()
- .add("a", spec(1))
- .add("b", spec(2))
- .add("x3", spec({x(3)}, N()))
- .add("x3f", spec(float_cells({x(3)}), N()))
- .add("x3m", spec({x({"0", "1", "2"})}, N()))
- .add("x3y5", spec({x(3), y(5)}, N()))
- .add("x3y5f", spec(float_cells({x(3), y(5)}), N()))
- .add("x15", spec({x(15)}, N()))
- .add("x15f", spec(float_cells({x(15)}), N()));
+ .add("a", GenSpec().seq_bias(1).gen())
+ .add("b", GenSpec().seq_bias(2).gen())
+ .add("x3", GenSpec().idx("x", 3).gen())
+ .add("x3f", GenSpec().idx("x", 3).cells_float().gen())
+ .add("x3m", GenSpec().map("x", 3).gen())
+ .add("x3y5", GenSpec().idx("x", 3).idx("y", 5).gen())
+ .add("x3y5f", GenSpec().idx("x", 3).idx("y", 5).cells_float().gen())
+ .add("x15", GenSpec().idx("x", 15).gen())
+ .add("x15f", GenSpec().idx("x", 15).cells_float().gen());
}
EvalFixture::ParamRepo param_repo = make_params();
diff --git a/eval/src/tests/eval/value_codec/value_codec_test.cpp b/eval/src/tests/eval/value_codec/value_codec_test.cpp
index 2b03cffe730..110b58c27de 100644
--- a/eval/src/tests/eval/value_codec/value_codec_test.cpp
+++ b/eval/src/tests/eval/value_codec/value_codec_test.cpp
@@ -2,7 +2,7 @@
#include <iostream>
#include <vespa/eval/eval/simple_value.h>
-#include <vespa/eval/eval/test/tensor_model.hpp>
+#include <vespa/eval/eval/test/gen_spec.h>
#include <vespa/eval/eval/value_codec.h>
#include <vespa/vespalib/data/memory.h>
#include <vespa/vespalib/gtest/gtest.h>
@@ -15,28 +15,30 @@ using namespace vespalib::eval::test;
const ValueBuilderFactory &factory = SimpleValueBuilderFactory::get();
-std::vector<Layout> layouts = {
- {},
- {x(3)},
- {x(3),y(5)},
- {x(3),y(5),z(7)},
- float_cells({x(3),y(5),z(7)}),
- {x({"a","b","c"})},
- {x({"a","b","c"}),y({"foo","bar"})},
- {x({"a","b","c"}),y({"foo","bar"}),z({"i","j","k","l"})},
- float_cells({x({"a","b","c"}),y({"foo","bar"}),z({"i","j","k","l"})}),
- {x(3),y({"foo", "bar"}),z(7)},
- {x({"a","b","c"}),y(5),z({"i","j","k","l"})},
- float_cells({x({"a","b","c"}),y(5),z({"i","j","k","l"})})
+GenSpec G() { return GenSpec(); }
+
+const std::vector<GenSpec> layouts = {
+ G(),
+ G().idx("x", 3),
+ G().idx("x", 3).idx("y", 5),
+ G().idx("x", 3).idx("y", 5).idx("z", 7),
+ G().map("x", {"a","b","c"}),
+ G().map("x", {"a","b","c"}).map("y", {"foo","bar"}),
+ G().map("x", {"a","b","c"}).map("y", {"foo","bar"}).map("z", {"i","j","k","l"}),
+ G().idx("x", 3).map("y", {"foo", "bar"}).idx("z", 7),
+ G().map("x", {"a","b","c"}).idx("y", 5).map("z", {"i","j","k","l"})
};
TEST(ValueCodecTest, simple_values_can_be_converted_from_and_to_tensor_spec) {
for (const auto &layout: layouts) {
- TensorSpec expect = spec(layout, N());
- std::unique_ptr<Value> value = value_from_spec(expect, factory);
- TensorSpec actual = spec_from_value(*value);
- EXPECT_EQ(actual, expect);
+ for (TensorSpec expect : { layout.cpy().cells_float().gen(),
+ layout.cpy().cells_double().gen() })
+ {
+ std::unique_ptr<Value> value = value_from_spec(expect, factory);
+ TensorSpec actual = spec_from_value(*value);
+ EXPECT_EQ(actual, expect);
+ }
}
}
@@ -66,8 +68,8 @@ TEST(ValueCodecTest, simple_values_can_be_built_using_tensor_spec) {
.add({{"w", "yyy"}, {"x", 1}, {"y", "yyy"}, {"z", 0}}, 0.0)
.add({{"w", "yyy"}, {"x", 1}, {"y", "yyy"}, {"z", 1}}, 4.0);
Value::UP full_tensor = value_from_spec(full_spec, factory);
- EXPECT_EQUAL(full_spec, spec_from_value(*tensor));
- EXPECT_EQUAL(full_spec, spec_from_value(*full_tensor));
+ EXPECT_EQ(full_spec, spec_from_value(*tensor));
+ EXPECT_EQ(full_spec, spec_from_value(*full_tensor));
};
//-----------------------------------------------------------------------------
@@ -333,11 +335,11 @@ TEST(ValueCodecTest, bad_sparse_tensors_are_caught) {
bad.encode_default(data_default);
bad.encode_with_double(data_double);
bad.encode_with_float(data_float);
- EXPECT_EXCEPTION(decode_value(data_default, factory), vespalib::IllegalStateException,
+ VESPA_EXPECT_EXCEPTION(decode_value(data_default, factory), vespalib::IllegalStateException,
"serialized input claims 12345678 blocks of size 1*8, but only");
- EXPECT_EXCEPTION(decode_value(data_double, factory), vespalib::IllegalStateException,
+ VESPA_EXPECT_EXCEPTION(decode_value(data_double, factory), vespalib::IllegalStateException,
"serialized input claims 12345678 blocks of size 1*8, but only");
- EXPECT_EXCEPTION(decode_value(data_float, factory), vespalib::IllegalStateException,
+ VESPA_EXPECT_EXCEPTION(decode_value(data_float, factory), vespalib::IllegalStateException,
"serialized input claims 12345678 blocks of size 1*4, but only");
}
@@ -386,11 +388,11 @@ TEST(ValueCodecTest, bad_dense_tensors_are_caught) {
bad.encode_default(data_default);
bad.encode_with_double(data_double);
bad.encode_with_float(data_float);
- EXPECT_EXCEPTION(decode_value(data_default, factory), vespalib::IllegalStateException,
+ VESPA_EXPECT_EXCEPTION(decode_value(data_default, factory), vespalib::IllegalStateException,
"serialized input claims 1 blocks of size 60000*8, but only");
- EXPECT_EXCEPTION(decode_value(data_double, factory), vespalib::IllegalStateException,
+ VESPA_EXPECT_EXCEPTION(decode_value(data_double, factory), vespalib::IllegalStateException,
"serialized input claims 1 blocks of size 60000*8, but only");
- EXPECT_EXCEPTION(decode_value(data_float, factory), vespalib::IllegalStateException,
+ VESPA_EXPECT_EXCEPTION(decode_value(data_float, factory), vespalib::IllegalStateException,
"serialized input claims 1 blocks of size 60000*4, but only");
}
diff --git a/eval/src/tests/instruction/add_trivial_dimension_optimizer/add_trivial_dimension_optimizer_test.cpp b/eval/src/tests/instruction/add_trivial_dimension_optimizer/add_trivial_dimension_optimizer_test.cpp
index 35195522adc..d2dccfde2fd 100644
--- a/eval/src/tests/instruction/add_trivial_dimension_optimizer/add_trivial_dimension_optimizer_test.cpp
+++ b/eval/src/tests/instruction/add_trivial_dimension_optimizer/add_trivial_dimension_optimizer_test.cpp
@@ -5,7 +5,7 @@
#include <vespa/eval/eval/tensor_function.h>
#include <vespa/eval/instruction/replace_type_function.h>
#include <vespa/eval/instruction/fast_rename_optimizer.h>
-#include <vespa/eval/eval/test/tensor_model.hpp>
+#include <vespa/eval/eval/test/gen_spec.h>
#include <vespa/eval/eval/test/eval_fixture.h>
#include <vespa/vespalib/util/stringfmt.h>
@@ -20,11 +20,11 @@ const ValueBuilderFactory &prod_factory = FastValueBuilderFactory::get();
EvalFixture::ParamRepo make_params() {
return EvalFixture::ParamRepo()
- .add("x5", spec({x(5)}, N()))
- .add("x5f", spec(float_cells({x(5)}), N()))
- .add("x5y1", spec({x(5),y(1)}, N()))
- .add("y1z1", spec({y(1),z(1)}, N()))
- .add("x_m", spec({x({"a"})}, N()));
+ .add("x5", GenSpec().idx("x", 5).gen())
+ .add("x5f", GenSpec().idx("x", 5).cells_float().gen())
+ .add("x5y1", GenSpec().idx("x", 5).idx("y", 1).gen())
+ .add("y1z1", GenSpec().idx("y", 5).idx("z", 1).gen())
+ .add("x_m", GenSpec().map("x", {"a"}).gen());
}
EvalFixture::ParamRepo param_repo = make_params();
diff --git a/eval/src/tests/instruction/dense_replace_type_function/dense_replace_type_function_test.cpp b/eval/src/tests/instruction/dense_replace_type_function/dense_replace_type_function_test.cpp
index 988ca79a04a..5dcdbc5bab8 100644
--- a/eval/src/tests/instruction/dense_replace_type_function/dense_replace_type_function_test.cpp
+++ b/eval/src/tests/instruction/dense_replace_type_function/dense_replace_type_function_test.cpp
@@ -5,7 +5,7 @@
#include <vespa/eval/eval/value_codec.h>
#include <vespa/eval/eval/interpreted_function.h>
#include <vespa/eval/instruction/replace_type_function.h>
-#include <vespa/eval/eval/test/tensor_model.hpp>
+#include <vespa/eval/eval/test/gen_spec.h>
using namespace vespalib::eval::tensor_function;
using namespace vespalib::eval::test;
@@ -33,7 +33,7 @@ struct Fixture {
std::vector<TensorFunction::Child::CREF> children;
InterpretedFunction::State state;
Fixture()
- : my_value(value_from_spec(spec({x(10)}, N()), prod_factory)),
+ : my_value(value_from_spec(GenSpec().idx("x", 10).gen(), prod_factory)),
new_type(ValueType::from_spec("tensor(x[5],y[2])")),
mock_child(my_value->type()),
my_fun(new_type, mock_child),
diff --git a/eval/src/tests/instruction/fast_rename_optimizer/fast_rename_optimizer_test.cpp b/eval/src/tests/instruction/fast_rename_optimizer/fast_rename_optimizer_test.cpp
index dc90a5e54a1..e915a396ae7 100644
--- a/eval/src/tests/instruction/fast_rename_optimizer/fast_rename_optimizer_test.cpp
+++ b/eval/src/tests/instruction/fast_rename_optimizer/fast_rename_optimizer_test.cpp
@@ -4,7 +4,7 @@
#include <vespa/eval/eval/tensor_function.h>
#include <vespa/eval/instruction/replace_type_function.h>
#include <vespa/eval/instruction/fast_rename_optimizer.h>
-#include <vespa/eval/eval/test/tensor_model.hpp>
+#include <vespa/eval/eval/test/gen_spec.h>
#include <vespa/eval/eval/test/eval_fixture.h>
#include <vespa/vespalib/util/stringfmt.h>
@@ -19,13 +19,13 @@ const ValueBuilderFactory &prod_factory = FastValueBuilderFactory::get();
EvalFixture::ParamRepo make_params() {
return EvalFixture::ParamRepo()
- .add("x5", spec({x(5)}, N()))
- .add("x5f", spec(float_cells({x(5)}), N()))
- .add("x_m", spec({x({"a", "b", "c"})}, N()))
- .add("xy_mm", spec({x({"a", "b", "c"}),y({"d","e"})}, N()))
- .add("x5y3z_m", spec({x(5),y(3),z({"a","b"})}, N()))
- .add("x5yz_m", spec({x(5),y({"a","b"}),z({"d","e"})}, N()))
- .add("x5y3", spec({x(5),y(3)}, N()));
+ .add("x5", GenSpec().idx("x", 5).gen())
+ .add("x5f", GenSpec().idx("x", 5).cells_float().gen())
+ .add("x_m", GenSpec().map("x", {"a", "b", "c"}).gen())
+ .add("xy_mm", GenSpec().map("x", {"a", "b", "c"}).map("y", {"d","e"}).gen())
+ .add("x5y3z_m", GenSpec().idx("x", 5).idx("y", 3).map("z", {"a","b"}).gen())
+ .add("x5yz_m", GenSpec().idx("x", 5).map("y", {"a","b"}).map("z", {"d","e"}).gen())
+ .add("x5y3", GenSpec().idx("x", 5).idx("y", 3).gen());
}
EvalFixture::ParamRepo param_repo = make_params();
diff --git a/eval/src/tests/instruction/generic_concat/generic_concat_test.cpp b/eval/src/tests/instruction/generic_concat/generic_concat_test.cpp
index bc8ea84744f..17e012b8e33 100644
--- a/eval/src/tests/instruction/generic_concat/generic_concat_test.cpp
+++ b/eval/src/tests/instruction/generic_concat/generic_concat_test.cpp
@@ -7,7 +7,7 @@
#include <vespa/eval/instruction/generic_concat.h>
#include <vespa/eval/eval/interpreted_function.h>
#include <vespa/eval/eval/test/reference_operations.h>
-#include <vespa/eval/eval/test/tensor_model.hpp>
+#include <vespa/eval/eval/test/gen_spec.h>
#include <vespa/vespalib/util/stringfmt.h>
#include <vespa/vespalib/gtest/gtest.h>
@@ -18,53 +18,48 @@ using namespace vespalib::eval::test;
using vespalib::make_string_short::fmt;
-std::vector<Layout> concat_layouts = {
- {}, {},
- {}, {y(5)},
- float_cells({y(5)}), {},
- {}, float_cells({y(5)}),
- {y(5)}, {},
- {y(2)}, {y(3)},
- {y(2)}, {x(3)},
- {x(2)}, {z(3)},
- {x(2),y(3)}, {x(2),y(3)},
- {x(2),y(3)}, {x(2),y(4)},
- {y(3),z(5)}, {y(3),z(5)},
- {y(3),z(5)}, {y(4),z(5)},
- {x(2),y(3),z(5)}, {x(2),y(3),z(5)},
- {x(2),y(3),z(5)}, {x(2),y(4),z(5)},
- {x(2),y(3),z({"a","b"})}, {x(2),y(3),z({"b","c"})},
- {x(2),y(3),z({"a","b"})}, {x(2),y(4),z({"b","c"})},
- {y(5)}, {y(2),x(5)},
- {x(3)}, {y(2),z(3)},
- {y(2)}, {y(3),x(5),z(2)},
- {y(2),x(5),z(2)}, {y(3),x(5),z(2)},
- {y(3),x(5)}, {x(5),z(7)},
- float_cells({y(3),x(5)}), {x(5),z(7)},
- float_cells({y(3),x(5)}), {},
- {y(3),x(5)}, float_cells({x(5),z(7)}),
- float_cells({y(3),x(5)}), float_cells({x(5),z(7)}),
- {x({"a","b","c"})}, {x({"a","b","c"})},
- {x({"a","b","c"})}, {x({"a","b"})},
- {x({"a","b","c"})}, {x({"b","c","d"})},
- float_cells({x({"a","b","c"})}), {x({"b","c","d"})},
- {x({"a","b","c"})}, float_cells({x({"b","c","d"})}),
- float_cells({x({"a","b","c"})}), float_cells({z({"foo","bar","baz"})}),
- {x({"a","b","c"})}, {x({"a","b","c"}),z({"foo","bar","baz"})},
- {x({"a","b"}),z({"foo","bar","baz"})}, {x({"a","b","c"}),z({"foo","bar"})},
- {x({"a","b","c"}),y(3)}, {y(2)},
- {x({"a","b","c"}),y(3)}, {z(5)},
- {x({"a","b","c"}),y(3)}, {y(2),z(5)},
- {x({"a","b","c"}),y(3)}, {y(2)},
- {x({"a","b","c"}),y(3),z(5)}, {z(5)},
- {y(2)}, {x({"a","b","c"}),y(3)},
- {z(5)}, {x({"a","b","c"}),y(3)},
- {y(2),z(5)}, {x({"a","b","c"}),y(3)},
- {y(2)}, {x({"a","b","c"}),y(3)},
- {z(5)}, {x({"a","b","c"}),y(3),z(5)},
- {y(2),z(5)}, {x({"a","b","c"}),y(3),z(5)},
- {y(2),x({"a","b","c"})}, {y(3),x({"b","c","d"})},
- {y(2),x({"a","b"})}, {y(3),z({"c","d"})}
+GenSpec G() { return GenSpec(); }
+
+GenSpec::seq_t N_16ths = [] (size_t i) noexcept { return (i + 1.0) / 16.0; };
+
+const std::vector<GenSpec> concat_layouts = {
+ G(), G(),
+ G(), G().idx("y", 5),
+ G().idx("y", 5), G(),
+ G().idx("y", 2), G().idx("y", 3),
+ G().idx("y", 2), G().idx("x", 3),
+ G().idx("x", 2), G().idx("z", 3),
+ G().idx("x", 2).idx("y", 3), G().idx("x", 2).idx("y", 3),
+ G().idx("x", 2).idx("y", 3), G().idx("x", 2).idx("y", 4),
+ G().idx("y", 3).idx("z", 5), G().idx("y", 3).idx("z", 5),
+ G().idx("y", 3).idx("z", 5), G().idx("y", 4).idx("z", 5),
+ G().idx("x", 2).idx("y", 3).idx("z", 5), G().idx("x", 2).idx("y", 3).idx("z", 5),
+ G().idx("x", 2).idx("y", 3).idx("z", 5), G().idx("x", 2).idx("y", 4).idx("z", 5),
+ G().idx("x", 2).idx("y", 3).map("z", {"a","b"}), G().idx("x", 2).idx("y", 3).map("z", {"b","c"}),
+ G().idx("x", 2).idx("y", 3).map("z", {"a","b"}), G().idx("x", 2).idx("y", 4).map("z", {"b","c"}),
+ G().idx("y", 5), G().idx("x", 5).idx("y", 2),
+ G().idx("x", 3), G().idx("y", 2).idx("z", 3),
+ G().idx("y", 2), G().idx("x", 5).idx("y", 3).idx("z", 2),
+ G().idx("x", 5).idx("y", 2).idx("z", 2), G().idx("x", 5).idx("y", 3).idx("z", 2),
+ G().idx("x", 5).idx("y", 3), G().idx("x", 5).idx("z", 7),
+ G().map("x", {"a","b","c"}), G().map("x", {"a","b","c"}),
+ G().map("x", {"a","b","c"}), G().map("x", {"a","b"}),
+ G().map("x", {"a","b","c"}), G().map("x", {"b","c","d"}),
+ G().map("x", {"a","b","c"}), G().map("x", {"a","b","c"}).map("z", {"foo","bar","baz"}),
+ G().map("x", {"a","b"}).map("z", {"foo","bar","baz"}), G().map("x", {"a","b","c"}).map("z", {"foo","bar"}),
+ G().map("x", {"a","b","c"}).idx("y", 3), G().idx("y", 2),
+ G().map("x", {"a","b","c"}).idx("y", 3), G().idx("z", 5),
+ G().map("x", {"a","b","c"}).idx("y", 3), G().idx("y", 2).idx("z", 5),
+ G().map("x", {"a","b","c"}).idx("y", 3), G().idx("y", 2),
+ G().map("x", {"a","b","c"}).idx("y", 3).idx("z", 5), G().idx("z", 5),
+ G().idx("y", 2), G().map("x", {"a","b","c"}).idx("y", 3),
+ G().idx("z", 5), G().map("x", {"a","b","c"}).idx("y", 3),
+ G().idx("y", 2).idx("z", 5), G().map("x", {"a","b","c"}).idx("y", 3),
+ G().idx("y", 2), G().map("x", {"a","b","c"}).idx("y", 3),
+ G().idx("z", 5), G().map("x", {"a","b","c"}).idx("y", 3).idx("z", 5),
+ G().idx("y", 2).idx("z", 5), G().map("x", {"a","b","c"}).idx("y", 3).idx("z", 5),
+ G().map("x", {"a","b","c"}).idx("y", 2), G().map("x", {"b","c","d"}).idx("y", 3),
+ G().map("x", {"a","b"}).idx("y", 2), G().idx("y", 3).map("z", {"c","d"})
};
TensorSpec perform_generic_concat(const TensorSpec &a, const TensorSpec &b,
@@ -81,12 +76,20 @@ TensorSpec perform_generic_concat(const TensorSpec &a, const TensorSpec &b,
void test_generic_concat_with(const ValueBuilderFactory &factory) {
ASSERT_TRUE((concat_layouts.size() % 2) == 0);
for (size_t i = 0; i < concat_layouts.size(); i += 2) {
- const TensorSpec lhs = spec(concat_layouts[i], N());
- const TensorSpec rhs = spec(concat_layouts[i + 1], Div16(N()));
- SCOPED_TRACE(fmt("\n===\nin LHS: %s\nin RHS: %s\n===\n", lhs.to_string().c_str(), rhs.to_string().c_str()));
- auto actual = perform_generic_concat(lhs, rhs, "y", factory);
- auto expect = ReferenceOperations::concat(lhs, rhs, "y");
- EXPECT_EQ(actual, expect);
+ const auto l = concat_layouts[i];
+ const auto r = concat_layouts[i+1].cpy().seq(N_16ths);
+ for (TensorSpec lhs : { l.cpy().cells_float().gen(),
+ l.cpy().cells_double().gen() })
+ {
+ for (TensorSpec rhs : { r.cpy().cells_float().gen(),
+ r.cpy().cells_double().gen() })
+ {
+ SCOPED_TRACE(fmt("\n===\nin LHS: %s\nin RHS: %s\n===\n", lhs.to_string().c_str(), rhs.to_string().c_str()));
+ auto actual = perform_generic_concat(lhs, rhs, "y", factory);
+ auto expect = ReferenceOperations::concat(lhs, rhs, "y");
+ EXPECT_EQ(actual, expect);
+ }
+ }
}
}
diff --git a/eval/src/tests/instruction/generic_create/generic_create_test.cpp b/eval/src/tests/instruction/generic_create/generic_create_test.cpp
index 00af75e4d83..fcf4618d592 100644
--- a/eval/src/tests/instruction/generic_create/generic_create_test.cpp
+++ b/eval/src/tests/instruction/generic_create/generic_create_test.cpp
@@ -6,7 +6,7 @@
#include <vespa/eval/instruction/generic_create.h>
#include <vespa/eval/eval/interpreted_function.h>
#include <vespa/eval/eval/test/reference_operations.h>
-#include <vespa/eval/eval/test/tensor_model.hpp>
+#include <vespa/eval/eval/test/gen_spec.h>
#include <vespa/vespalib/util/stringfmt.h>
#include <vespa/vespalib/gtest/gtest.h>
#include <stdlib.h>
@@ -19,18 +19,17 @@ using namespace vespalib::eval::test;
using vespalib::make_string_short::fmt;
-std::vector<Layout> create_layouts = {
- {x(3)},
- {x(3),y(5)},
- {x(3),y(5),z(7)},
- float_cells({x(3),y(5),z(7)}),
- {x({"a","b","c"})},
- {x({"a","b","c"}),y({"foo","bar"})},
- {x({"a","b","c"}),y({"foo","bar"}),z({"i","j","k","l"})},
- float_cells({x({"a","b","c"}),y({"foo","bar"}),z({"i","j","k","l"})}),
- {x(3),y({"foo", "bar"}),z(7)},
- {x({"a","b","c"}),y(5),z({"i","j","k","l"})},
- float_cells({x({"a","b","c"}),y(5),z({"i","j","k","l"})})
+GenSpec G() { return GenSpec(); }
+
+const std::vector<GenSpec> create_layouts = {
+ G().idx("x", 3),
+ G().idx("x", 3).idx("y", 5),
+ G().idx("x", 3).idx("y", 5).idx("z", 7),
+ G().map("x", {"a","b","c"}),
+ G().map("x", {"a","b","c"}).map("y", {"foo","bar"}),
+ G().map("x", {"a","b","c"}).map("y", {"foo","bar"}).map("z", {"i","j","k","l"}),
+ G().idx("x", 3).map("y", {"foo", "bar"}).idx("z", 7),
+ G().map("x", {"a","b","c"}).idx("y", 5).map("z", {"i","j","k","l"})
};
TensorSpec remove_each(const TensorSpec &a, size_t n) {
@@ -91,16 +90,19 @@ TensorSpec perform_generic_create(const TensorSpec &a, const ValueBuilderFactory
}
void test_generic_create_with(const ValueBuilderFactory &factory) {
- for (const auto & layout : create_layouts) {
- TensorSpec full = spec(layout, N());
- auto actual = perform_generic_create(full, factory);
- auto expect = reference_create(full).normalize();
- EXPECT_EQ(actual, expect);
- for (size_t n : {2, 3, 4, 5}) {
- TensorSpec partial = remove_each(full, n);
- actual = perform_generic_create(partial, factory);
- expect = reference_create(partial).normalize();
+ for (const auto &layout : create_layouts) {
+ for (TensorSpec full : { layout.cpy().cells_float().gen(),
+ layout.cpy().cells_double().gen() })
+ {
+ auto actual = perform_generic_create(full, factory);
+ auto expect = reference_create(full).normalize();
EXPECT_EQ(actual, expect);
+ for (size_t n : {2, 3, 4, 5}) {
+ TensorSpec partial = remove_each(full, n);
+ actual = perform_generic_create(partial, factory);
+ expect = reference_create(partial).normalize();
+ EXPECT_EQ(actual, expect);
+ }
}
}
}
diff --git a/eval/src/tests/instruction/generic_join/generic_join_test.cpp b/eval/src/tests/instruction/generic_join/generic_join_test.cpp
index 8eca3cad763..f724cdf1024 100644
--- a/eval/src/tests/instruction/generic_join/generic_join_test.cpp
+++ b/eval/src/tests/instruction/generic_join/generic_join_test.cpp
@@ -6,7 +6,7 @@
#include <vespa/eval/instruction/generic_join.h>
#include <vespa/eval/eval/interpreted_function.h>
#include <vespa/eval/eval/test/reference_operations.h>
-#include <vespa/eval/eval/test/tensor_model.hpp>
+#include <vespa/eval/eval/test/gen_spec.h>
#include <vespa/vespalib/util/stringfmt.h>
#include <vespa/vespalib/gtest/gtest.h>
@@ -17,33 +17,25 @@ using namespace vespalib::eval::test;
using vespalib::make_string_short::fmt;
-std::vector<Layout> join_layouts = {
- {}, {},
- {x(5)}, {x(5)},
- {x(5)}, {y(5)},
- {x(5)}, {x(5),y(5)},
- {y(3)}, {x(2),z(3)},
- {x(3),y(5)}, {y(5),z(7)},
- float_cells({x(3),y(5)}), {y(5),z(7)},
- {x(3),y(5)}, float_cells({y(5),z(7)}),
- float_cells({x(3),y(5)}), float_cells({y(5),z(7)}),
- {x({"a","b","c"})}, {x({"a","b","c"})},
- {x({"a","b","c"})}, {x({"a","b"})},
- {x({"a","b","c"})}, {y({"foo","bar","baz"})},
- {x({"a","b","c"})}, {x({"a","b","c"}),y({"foo","bar","baz"})},
- {x({"a","b"}),y({"foo","bar","baz"})}, {x({"a","b","c"}),y({"foo","bar"})},
- {x({"a","b"}),y({"foo","bar","baz"})}, {y({"foo","bar"}),z({"i","j","k","l"})},
- float_cells({x({"a","b"}),y({"foo","bar","baz"})}), {y({"foo","bar"}),z({"i","j","k","l"})},
- {x({"a","b"}),y({"foo","bar","baz"})}, float_cells({y({"foo","bar"}),z({"i","j","k","l"})}),
- float_cells({x({"a","b"}),y({"foo","bar","baz"})}), float_cells({y({"foo","bar"}),z({"i","j","k","l"})}),
- {x(3),y({"foo", "bar"})}, {y({"foo", "bar"}),z(7)},
- {x({"a","b","c"}),y(5)}, {y(5),z({"i","j","k","l"})},
- float_cells({x({"a","b","c"}),y(5)}), {y(5),z({"i","j","k","l"})},
- {x({"a","b","c"}),y(5)}, float_cells({y(5),z({"i","j","k","l"})}),
- float_cells({x({"a","b","c"}),y(5)}), float_cells({y(5),z({"i","j","k","l"})}),
- {x({"a","b","c"}),y(5)}, float_cells({y(5)}),
- {y(5)}, float_cells({x({"a","b","c"}),y(5)}),
- {x({}),y(5)}, float_cells({y(5)})
+GenSpec::seq_t N_16ths = [] (size_t i) noexcept { return (i + 1.0) / 16.0; };
+
+GenSpec G() { return GenSpec().seq(N_16ths); }
+
+const std::vector<GenSpec> join_layouts = {
+ G(), G(),
+ G().idx("x", 5), G().idx("x", 5),
+ G().idx("x", 5), G().idx("y", 5),
+ G().idx("x", 5), G().idx("x", 5).idx("y", 5),
+ G().idx("y", 3), G().idx("x", 2).idx("z", 3),
+ G().idx("x", 3).idx("y", 5), G().idx("y", 5).idx("z", 7),
+ G().map("x", {"a","b","c"}), G().map("x", {"a","b","c"}),
+ G().map("x", {"a","b","c"}), G().map("x", {"a","b"}),
+ G().map("x", {"a","b","c"}), G().map("y", {"foo","bar","baz"}),
+ G().map("x", {"a","b","c"}), G().map("x", {"a","b","c"}).map("y", {"foo","bar","baz"}),
+ G().map("x", {"a","b"}).map("y", {"foo","bar","baz"}), G().map("x", {"a","b","c"}).map("y", {"foo","bar"}),
+ G().map("x", {"a","b"}).map("y", {"foo","bar","baz"}), G().map("y", {"foo","bar"}).map("z", {"i","j","k","l"}),
+ G().idx("x", 3).map("y", {"foo", "bar"}), G().map("y", {"foo", "bar"}).idx("z", 7),
+ G().map("x", {"a","b","c"}).idx("y", 5), G().idx("y", 5).map("z", {"i","j","k","l"})
};
bool join_address(const TensorSpec::Address &a, const TensorSpec::Address &b, TensorSpec::Address &addr) {
@@ -113,15 +105,23 @@ TEST(GenericJoinTest, dense_join_plan_can_be_executed) {
TEST(GenericJoinTest, generic_join_works_for_simple_and_fast_values) {
ASSERT_TRUE((join_layouts.size() % 2) == 0);
for (size_t i = 0; i < join_layouts.size(); i += 2) {
- TensorSpec lhs = spec(join_layouts[i], Div16(N()));
- TensorSpec rhs = spec(join_layouts[i + 1], Div16(N()));
- for (auto fun: {operation::Add::f, operation::Sub::f, operation::Mul::f, operation::Div::f}) {
- SCOPED_TRACE(fmt("\n===\nLHS: %s\nRHS: %s\n===\n", lhs.to_string().c_str(), rhs.to_string().c_str()));
- auto expect = ReferenceOperations::join(lhs, rhs, fun);
- auto simple = perform_generic_join(lhs, rhs, fun, SimpleValueBuilderFactory::get());
- auto fast = perform_generic_join(lhs, rhs, fun, FastValueBuilderFactory::get());
- EXPECT_EQ(simple, expect);
- EXPECT_EQ(fast, expect);
+ const auto &l = join_layouts[i];
+ const auto &r = join_layouts[i+1];
+ for (TensorSpec lhs : { l.cpy().cells_float().gen(),
+ l.cpy().cells_double().gen() })
+ {
+ for (TensorSpec rhs : { r.cpy().cells_float().gen(),
+ r.cpy().cells_double().gen() })
+ {
+ for (auto fun: {operation::Add::f, operation::Sub::f, operation::Mul::f, operation::Div::f}) {
+ SCOPED_TRACE(fmt("\n===\nLHS: %s\nRHS: %s\n===\n", lhs.to_string().c_str(), rhs.to_string().c_str()));
+ auto expect = ReferenceOperations::join(lhs, rhs, fun);
+ auto simple = perform_generic_join(lhs, rhs, fun, SimpleValueBuilderFactory::get());
+ auto fast = perform_generic_join(lhs, rhs, fun, FastValueBuilderFactory::get());
+ EXPECT_EQ(simple, expect);
+ EXPECT_EQ(fast, expect);
+ }
+ }
}
}
}
diff --git a/eval/src/tests/instruction/generic_map/generic_map_test.cpp b/eval/src/tests/instruction/generic_map/generic_map_test.cpp
index 687b6aa60ac..8e39fa68072 100644
--- a/eval/src/tests/instruction/generic_map/generic_map_test.cpp
+++ b/eval/src/tests/instruction/generic_map/generic_map_test.cpp
@@ -6,7 +6,7 @@
#include <vespa/eval/instruction/generic_map.h>
#include <vespa/eval/eval/interpreted_function.h>
#include <vespa/eval/eval/test/reference_operations.h>
-#include <vespa/eval/eval/test/tensor_model.hpp>
+#include <vespa/eval/eval/test/gen_spec.h>
#include <vespa/vespalib/util/stringfmt.h>
#include <vespa/vespalib/gtest/gtest.h>
@@ -17,18 +17,20 @@ using namespace vespalib::eval::test;
using vespalib::make_string_short::fmt;
-std::vector<Layout> map_layouts = {
- {x(3)},
- {x(3),y(5)},
- {x(3),y(5),z(7)},
- float_cells({x(3),y(5),z(7)}),
- {x({"a","b","c"})},
- {x({"a","b","c"}),y({"foo","bar"})},
- {x({"a","b","c"}),y({"foo","bar"}),z({"i","j","k","l"})},
- float_cells({x({"a","b","c"}),y({"foo","bar"}),z({"i","j","k","l"})}),
- {x(3),y({"foo", "bar"}),z(7)},
- {x({"a","b","c"}),y(5),z({"i","j","k","l"})},
- float_cells({x({"a","b","c"}),y(5),z({"i","j","k","l"})})
+GenSpec::seq_t N_16ths = [] (size_t i) noexcept { return (i + 1.0) / 16.0; };
+
+GenSpec G() { return GenSpec().seq(N_16ths); }
+
+const std::vector<GenSpec> map_layouts = {
+ G(),
+ G().idx("x", 3),
+ G().idx("x", 3).idx("y", 5),
+ G().idx("x", 3).idx("y", 5).idx("z", 7),
+ G().map("x", {"a","b","c"}),
+ G().map("x", {"a","b","c"}).map("y", {"foo","bar"}),
+ G().map("x", {"a","b","c"}).map("y", {"foo","bar"}).map("z", {"i","j","k","l"}),
+ G().idx("x", 3).map("y", {"foo", "bar"}).idx("z", 7),
+ G().map("x", {"a","b","c"}).idx("y", 5).map("z", {"i","j","k","l"})
};
TensorSpec perform_generic_map(const TensorSpec &a, map_fun_t func, const ValueBuilderFactory &factory)
@@ -40,14 +42,16 @@ TensorSpec perform_generic_map(const TensorSpec &a, map_fun_t func, const ValueB
}
void test_generic_map_with(const ValueBuilderFactory &factory) {
- for (const auto & layout : map_layouts) {
- TensorSpec lhs = spec(layout, Div16(N()));
- ValueType lhs_type = ValueType::from_spec(lhs.type());
- for (auto func : {operation::Floor::f, operation::Fabs::f, operation::Square::f, operation::Inv::f}) {
- SCOPED_TRACE(fmt("\n===\nLHS: %s\n===\n", lhs.to_string().c_str()));
- auto expect = ReferenceOperations::map(lhs, func);
- auto actual = perform_generic_map(lhs, func, factory);
- EXPECT_EQ(actual, expect);
+ for (const auto &layout : map_layouts) {
+ for (TensorSpec lhs : { layout.cpy().cells_float().gen(),
+ layout.cpy().cells_double().gen() })
+ {
+ for (auto func : {operation::Floor::f, operation::Fabs::f, operation::Square::f, operation::Inv::f}) {
+ SCOPED_TRACE(fmt("\n===\nLHS: %s\n===\n", lhs.to_string().c_str()));
+ auto expect = ReferenceOperations::map(lhs, func);
+ auto actual = perform_generic_map(lhs, func, factory);
+ EXPECT_EQ(actual, expect);
+ }
}
}
}
diff --git a/eval/src/tests/instruction/generic_merge/generic_merge_test.cpp b/eval/src/tests/instruction/generic_merge/generic_merge_test.cpp
index 60a27e6f6e9..d5f7bc071f6 100644
--- a/eval/src/tests/instruction/generic_merge/generic_merge_test.cpp
+++ b/eval/src/tests/instruction/generic_merge/generic_merge_test.cpp
@@ -6,7 +6,7 @@
#include <vespa/eval/instruction/generic_merge.h>
#include <vespa/eval/eval/interpreted_function.h>
#include <vespa/eval/eval/test/reference_operations.h>
-#include <vespa/eval/eval/test/tensor_model.hpp>
+#include <vespa/eval/eval/test/gen_spec.h>
#include <vespa/vespalib/util/stringfmt.h>
#include <vespa/vespalib/gtest/gtest.h>
#include <optional>
@@ -18,20 +18,22 @@ using namespace vespalib::eval::test;
using vespalib::make_string_short::fmt;
-std::vector<Layout> merge_layouts = {
- {}, {},
- {x(5)}, {x(5)},
- {x(3),y(5)}, {x(3),y(5)},
- float_cells({x(3),y(5)}), {x(3),y(5)},
- {x(3),y(5)}, float_cells({x(3),y(5)}),
- {x({"a","b","c"})}, {x({"a","b","c"})},
- {x({"a","b","c"})}, {x({"c","d","e"})},
- {x({"a","c","e"})}, {x({"b","c","d"})},
- {x({"b","c","d"})}, {x({"a","c","e"})},
- {x({"a","b","c"})}, {x({"c","d"})},
- {x({"a","b"}),y({"foo","bar","baz"})}, {x({"b","c"}),y({"any","foo","bar"})},
- {x(3),y({"foo", "bar"})}, {x(3),y({"baz", "bar"})},
- {x({"a","b","c"}),y(5)}, {x({"b","c","d"}),y(5)}
+GenSpec G() { return GenSpec(); }
+
+GenSpec::seq_t N_16ths = [] (size_t i) noexcept { return (i + 1.0) / 16.0; };
+
+const std::vector<GenSpec> merge_layouts = {
+ G(), G(),
+ G().idx("x", 5), G().idx("x", 5),
+ G().idx("x", 3).idx("y", 5), G().idx("x", 3).idx("y", 5),
+ G().map("x", {"a","b","c"}), G().map("x", {"a","b","c"}),
+ G().map("x", {"a","b","c"}), G().map("x", {"c","d","e"}),
+ G().map("x", {"a","c","e"}), G().map("x", {"b","c","d"}),
+ G().map("x", {"b","c","d"}), G().map("x", {"a","c","e"}),
+ G().map("x", {"a","b","c"}), G().map("x", {"c","d"}),
+ G().map("x", {"a","b"}).map("y", {"foo","bar","baz"}), G().map("x", {"b","c"}).map("y", {"any","foo","bar"}),
+ G().idx("x", 3).map("y", {"foo", "bar"}), G().idx("x", 3).map("y", {"baz", "bar"}),
+ G().map("x", {"a","b","c"}).idx("y", 5), G().map("x", {"b","c","d"}).idx("y", 5)
};
TensorSpec perform_generic_merge(const TensorSpec &a, const TensorSpec &b, join_fun_t fun, const ValueBuilderFactory &factory) {
@@ -46,13 +48,21 @@ TensorSpec perform_generic_merge(const TensorSpec &a, const TensorSpec &b, join_
void test_generic_merge_with(const ValueBuilderFactory &factory) {
ASSERT_TRUE((merge_layouts.size() % 2) == 0);
for (size_t i = 0; i < merge_layouts.size(); i += 2) {
- TensorSpec lhs = spec(merge_layouts[i], N());
- TensorSpec rhs = spec(merge_layouts[i + 1], Div16(N()));
- SCOPED_TRACE(fmt("\n===\nLHS: %s\nRHS: %s\n===\n", lhs.to_string().c_str(), rhs.to_string().c_str()));
- for (auto fun: {operation::Add::f, operation::Mul::f, operation::Sub::f, operation::Max::f}) {
- auto expect = ReferenceOperations::merge(lhs, rhs, fun);
- auto actual = perform_generic_merge(lhs, rhs, fun, factory);
- EXPECT_EQ(actual, expect);
+ const auto l = merge_layouts[i];
+ const auto r = merge_layouts[i+1].cpy().seq(N_16ths);
+ for (TensorSpec lhs : { l.cpy().cells_float().gen(),
+ l.cpy().cells_double().gen() })
+ {
+ for (TensorSpec rhs : { r.cpy().cells_float().gen(),
+ r.cpy().cells_double().gen() })
+ {
+ SCOPED_TRACE(fmt("\n===\nLHS: %s\nRHS: %s\n===\n", lhs.to_string().c_str(), rhs.to_string().c_str()));
+ for (auto fun: {operation::Add::f, operation::Mul::f, operation::Sub::f, operation::Max::f}) {
+ auto expect = ReferenceOperations::merge(lhs, rhs, fun);
+ auto actual = perform_generic_merge(lhs, rhs, fun, factory);
+ EXPECT_EQ(actual, expect);
+ }
+ }
}
}
}
diff --git a/eval/src/tests/instruction/generic_peek/generic_peek_test.cpp b/eval/src/tests/instruction/generic_peek/generic_peek_test.cpp
index 6841215038a..c80e8a1296b 100644
--- a/eval/src/tests/instruction/generic_peek/generic_peek_test.cpp
+++ b/eval/src/tests/instruction/generic_peek/generic_peek_test.cpp
@@ -7,7 +7,7 @@
#include <vespa/eval/instruction/generic_peek.h>
#include <vespa/eval/eval/interpreted_function.h>
#include <vespa/eval/eval/test/reference_operations.h>
-#include <vespa/eval/eval/test/tensor_model.hpp>
+#include <vespa/eval/eval/test/gen_spec.h>
#include <vespa/vespalib/util/stringfmt.h>
#include <vespa/vespalib/util/overload.h>
#include <vespa/vespalib/gtest/gtest.h>
@@ -22,17 +22,16 @@ using namespace vespalib::eval::test;
using vespalib::make_string_short::fmt;
-std::vector<Layout> peek_layouts = {
- {x(4)},
- {x(4),y(5)},
- {x(4),y(5),z(3)},
- float_cells({x(4),y(5),z(3)}),
- {x({"-1","0","2"})},
- {x({"-1","0","2"}),y({"-2","0","1"}),z({"-2","-1","0","1","2"})},
- float_cells({x({"-1","0","2"}),y({"-2","0","1"})}),
- {x(4),y({"-2","0","1"}),z(3)},
- {x({"-1","0","2"}),y(5),z({"-2","-1","0","1","2"})},
- float_cells({x({"-1","0","2"}),y(5),z({"-2","-1","0","1","2"})})
+GenSpec G() { return GenSpec(); }
+
+const std::vector<GenSpec> peek_layouts = {
+ G().idx("x", 4),
+ G().idx("x", 4).idx("y", 5),
+ G().idx("x", 4).idx("y", 5).idx("z", 3),
+ G().map("x", {"-1","0","2"}),
+ G().map("x", {"-1","0","2"}).map("y", {"-2","0","1"}).map("z", {"-2","-1","0","1","2"}),
+ G().idx("x", 4).map("y", {"-2","0","1"}).idx("z", 3),
+ G().map("x", {"-1","0","2"}).idx("y", 5).map("z", {"-2","-1","0","1","2"})
};
using PeekSpec = GenericPeek::SpecMap;
@@ -194,12 +193,15 @@ void fill_dims_and_check(const TensorSpec &input,
}
void test_generic_peek_with(const ValueBuilderFactory &factory) {
- for (const auto & layout : peek_layouts) {
- TensorSpec input = spec(layout, N());
- ValueType input_type = ValueType::from_spec(input.type());
- const auto &dims = input_type.dimensions();
- PeekSpec spec;
- fill_dims_and_check(input, spec, dims, factory);
+ for (const auto &layout : peek_layouts) {
+ for (TensorSpec input : { layout.cpy().cells_float().gen(),
+ layout.cpy().cells_double().gen() })
+ {
+ ValueType input_type = ValueType::from_spec(input.type());
+ const auto &dims = input_type.dimensions();
+ PeekSpec spec;
+ fill_dims_and_check(input, spec, dims, factory);
+ }
}
}
diff --git a/eval/src/tests/instruction/generic_reduce/generic_reduce_test.cpp b/eval/src/tests/instruction/generic_reduce/generic_reduce_test.cpp
index 9e2090fa968..2c5baf234c4 100644
--- a/eval/src/tests/instruction/generic_reduce/generic_reduce_test.cpp
+++ b/eval/src/tests/instruction/generic_reduce/generic_reduce_test.cpp
@@ -6,7 +6,7 @@
#include <vespa/eval/instruction/generic_reduce.h>
#include <vespa/eval/eval/interpreted_function.h>
#include <vespa/eval/eval/test/reference_operations.h>
-#include <vespa/eval/eval/test/tensor_model.hpp>
+#include <vespa/eval/eval/test/gen_spec.h>
#include <vespa/vespalib/util/stringfmt.h>
#include <vespa/vespalib/gtest/gtest.h>
#include <optional>
@@ -18,22 +18,23 @@ using namespace vespalib::eval::test;
using vespalib::make_string_short::fmt;
-std::vector<Layout> layouts = {
- {},
- {x(3)},
- {x(3),y(5)},
- {x(3),y(5),z(7)},
- float_cells({x(3),y(5),z(7)}),
- {x({"a","b","c"})},
- {x({})},
- {x({}),y(10)},
- {x({"a","b","c"}),y({"foo","bar"})},
- {x({"a","b","c"}),y({"foo","bar"}),z({"i","j","k","l"})},
- float_cells({x({"a","b","c"}),y({"foo","bar"}),z({"i","j","k","l"})}),
- {x(3),y({"foo", "bar"}),z(7)},
- {x({"a","b","c"}),y(5),z({"i","j","k","l"})},
- float_cells({x({"a","b","c"}),y(5),z({"i","j","k","l"})}),
- {x(3),y({}),z(7)}
+GenSpec::seq_t N_16ths = [] (size_t i) noexcept { return (i + 1.0) / 16.0; };
+
+GenSpec G() { return GenSpec().seq(N_16ths); }
+
+const std::vector<GenSpec> layouts = {
+ G(),
+ G().idx("x", 3),
+ G().idx("x", 3).idx("y", 5),
+ G().idx("x", 3).idx("y", 5).idx("z", 7),
+ G().map("x", {"a","b","c"}),
+ G().map("x", {}),
+ G().map("x", {}).idx("y", 10),
+ G().map("x", {"a","b","c"}).map("y", {"foo","bar"}),
+ G().map("x", {"a","b","c"}).map("y", {"foo","bar"}).map("z", {"i","j","k","l"}),
+ G().idx("x", 3).map("y", {"foo", "bar"}).idx("z", 7),
+ G().map("x", {"a","b","c"}).idx("y", 5).map("z", {"i","j","k","l"}),
+ G().idx("x", 3).map("y", {}).idx("z", 7)
};
TensorSpec perform_generic_reduce(const TensorSpec &a, Aggr aggr, const std::vector<vespalib::string> &dims,
@@ -68,19 +69,23 @@ TEST(GenericReduceTest, sparse_reduce_plan_can_be_created) {
}
void test_generic_reduce_with(const ValueBuilderFactory &factory) {
- for (const Layout &layout: layouts) {
- TensorSpec input = spec(layout, Div16(N()));
- SCOPED_TRACE(fmt("tensor type: %s, num_cells: %zu", input.type().c_str(), input.cells().size()));
- for (Aggr aggr: {Aggr::SUM, Aggr::AVG, Aggr::MIN, Aggr::MAX}) {
- SCOPED_TRACE(fmt("aggregator: %s", AggrNames::name_of(aggr)->c_str()));
- for (const Domain &domain: layout) {
- auto expect = ReferenceOperations::reduce(input, aggr, {domain.dimension}).normalize();
- auto actual = perform_generic_reduce(input, aggr, {domain.dimension}, factory);
+ for (const auto &layout: layouts) {
+ for (TensorSpec input : { layout.cpy().cells_float().gen(),
+ layout.cpy().cells_double().gen() })
+ {
+ SCOPED_TRACE(fmt("tensor type: %s, num_cells: %zu", input.type().c_str(), input.cells().size()));
+ for (Aggr aggr: {Aggr::SUM, Aggr::AVG, Aggr::MIN, Aggr::MAX}) {
+ SCOPED_TRACE(fmt("aggregator: %s", AggrNames::name_of(aggr)->c_str()));
+ auto t = layout.type();
+ for (const auto & dim: t.dimensions()) {
+ auto expect = ReferenceOperations::reduce(input, aggr, {dim.name}).normalize();
+ auto actual = perform_generic_reduce(input, aggr, {dim.name}, factory);
+ EXPECT_EQ(actual, expect);
+ }
+ auto expect = ReferenceOperations::reduce(input, aggr, {}).normalize();
+ auto actual = perform_generic_reduce(input, aggr, {}, factory);
EXPECT_EQ(actual, expect);
}
- auto expect = ReferenceOperations::reduce(input, aggr, {}).normalize();
- auto actual = perform_generic_reduce(input, aggr, {}, factory);
- EXPECT_EQ(actual, expect);
}
}
}
diff --git a/eval/src/tests/instruction/generic_rename/generic_rename_test.cpp b/eval/src/tests/instruction/generic_rename/generic_rename_test.cpp
index 20d155822b5..f0c2241202e 100644
--- a/eval/src/tests/instruction/generic_rename/generic_rename_test.cpp
+++ b/eval/src/tests/instruction/generic_rename/generic_rename_test.cpp
@@ -5,7 +5,7 @@
#include <vespa/eval/eval/value_codec.h>
#include <vespa/eval/instruction/generic_rename.h>
#include <vespa/eval/eval/interpreted_function.h>
-#include <vespa/eval/eval/test/tensor_model.hpp>
+#include <vespa/eval/eval/test/gen_spec.h>
#include <vespa/eval/eval/test/reference_operations.h>
#include <vespa/vespalib/util/stringfmt.h>
#include <vespa/vespalib/gtest/gtest.h>
@@ -17,18 +17,17 @@ using namespace vespalib::eval::test;
using vespalib::make_string_short::fmt;
-std::vector<Layout> rename_layouts = {
- {x(3)},
- {x(3),y(5)},
- {x(3),y(5),z(7)},
- float_cells({x(3),y(5),z(7)}),
- {x({"a","b","c"})},
- {x({"a","b","c"}),y({"foo","bar"})},
- {x({"a","b","c"}),y({"foo","bar"}),z({"i","j","k","l"})},
- float_cells({x({"a","b","c"}),y({"foo","bar"}),z({"i","j","k","l"})}),
- {x(3),y({"foo", "bar"}),z(7)},
- {x({"a","b","c"}),y(5),z({"i","j","k","l"})},
- float_cells({x({"a","b","c"}),y(5),z({"i","j","k","l"})})
+GenSpec G() { return GenSpec(); }
+
+const std::vector<GenSpec> rename_layouts = {
+ G().idx("x", 3),
+ G().idx("x", 3).idx("y", 5),
+ G().idx("x", 3).idx("y", 5).idx("z", 7),
+ G().map("x", {"a","b","c"}),
+ G().map("x", {"a","b","c"}).map("y", {"foo","bar"}),
+ G().map("x", {"a","b","c"}).map("y", {"foo","bar"}).map("z", {"i","j","k","l"}),
+ G().idx("x", 3).map("y", {"foo", "bar"}).idx("z", 7),
+ G().map("x", {"a","b","c"}).idx("y", 5).map("z", {"i","j","k","l"})
};
struct FromTo {
@@ -110,18 +109,20 @@ TensorSpec perform_generic_rename(const TensorSpec &a,
}
void test_generic_rename_with(const ValueBuilderFactory &factory) {
- for (const auto & layout : rename_layouts) {
- TensorSpec lhs = spec(layout, N());
- ValueType lhs_type = ValueType::from_spec(lhs.type());
- // printf("lhs_type: %s\n", lhs_type.to_spec().c_str());
- for (const auto & from_to : rename_from_to) {
- ValueType renamed_type = lhs_type.rename(from_to.from, from_to.to);
- if (renamed_type.is_error()) continue;
- // printf("type %s -> %s\n", lhs_type.to_spec().c_str(), renamed_type.to_spec().c_str());
- SCOPED_TRACE(fmt("\n===\nLHS: %s\n===\n", lhs.to_string().c_str()));
- auto expect = ReferenceOperations::rename(lhs, from_to.from, from_to.to);
- auto actual = perform_generic_rename(lhs, from_to, factory);
- EXPECT_EQ(actual, expect);
+ for (const auto &layout : rename_layouts) {
+ for (TensorSpec lhs : { layout.cpy().cells_float().gen(),
+ layout.cpy().cells_double().gen() })
+ {
+ ValueType lhs_type = ValueType::from_spec(lhs.type());
+ for (const auto & from_to : rename_from_to) {
+ ValueType renamed_type = lhs_type.rename(from_to.from, from_to.to);
+ if (renamed_type.is_error()) continue;
+ // printf("type %s -> %s\n", lhs_type.to_spec().c_str(), renamed_type.to_spec().c_str());
+ SCOPED_TRACE(fmt("\n===\nLHS: %s\n===\n", lhs.to_string().c_str()));
+ auto expect = ReferenceOperations::rename(lhs, from_to.from, from_to.to);
+ auto actual = perform_generic_rename(lhs, from_to, factory);
+ EXPECT_EQ(actual, expect);
+ }
}
}
}
diff --git a/eval/src/tests/instruction/join_with_number/join_with_number_function_test.cpp b/eval/src/tests/instruction/join_with_number/join_with_number_function_test.cpp
index a3fbb3ed529..e6a256a493b 100644
--- a/eval/src/tests/instruction/join_with_number/join_with_number_function_test.cpp
+++ b/eval/src/tests/instruction/join_with_number/join_with_number_function_test.cpp
@@ -4,8 +4,7 @@
#include <vespa/eval/eval/fast_value.h>
#include <vespa/eval/eval/tensor_function.h>
#include <vespa/eval/eval/test/eval_fixture.h>
-#include <vespa/eval/eval/test/tensor_model.hpp>
-#include <vespa/eval/eval/test/param_variants.h>
+#include <vespa/eval/eval/test/gen_spec.h>
#include <vespa/eval/instruction/join_with_number_function.h>
#include <vespa/vespalib/util/stringfmt.h>
@@ -36,13 +35,12 @@ const ValueBuilderFactory &prod_factory = FastValueBuilderFactory::get();
EvalFixture::ParamRepo make_params() {
auto repo = EvalFixture::ParamRepo()
- .add("a", spec(1.5))
- .add("number", spec(2.5))
- .add("dense", spec({y(5)}, N()))
- .add_matrix("x", 3, "y", 5);
-
- add_variants(repo, "mixed", {x({"a"}),y(5),z({"d","e"})}, N());
- add_variants(repo, "sparse", {x({"a","b","c"}),z({"d","e","f"})}, N());
+ .add("a", GenSpec().seq_bias(1.5).gen())
+ .add("number", GenSpec().seq_bias(2.5).gen())
+ .add("dense", GenSpec().idx("y", 5).gen())
+ .add_variants("x3y5", GenSpec().idx("x", 3).idx("y", 5))
+ .add_variants("mixed", GenSpec().map("x", {"a"}).idx("y", 5).map("z", {"d","e"}))
+ .add_variants("sparse", GenSpec().map("x", {"a","b","c"}).map("z", {"d","e","f"}));
return repo;
}
@@ -81,22 +79,22 @@ void verify_not_optimized(const vespalib::string &expr) {
TEST("require that dense number join can be optimized") {
TEST_DO(verify_optimized("x3y5+a", Primary::LHS, false));
TEST_DO(verify_optimized("a+x3y5", Primary::RHS, false));
- TEST_DO(verify_optimized("x3y5f*a", Primary::LHS, false));
- TEST_DO(verify_optimized("a*x3y5f", Primary::RHS, false));
+ TEST_DO(verify_optimized("x3y5_f*a", Primary::LHS, false));
+ TEST_DO(verify_optimized("a*x3y5_f", Primary::RHS, false));
}
TEST("require that dense number join can be inplace") {
TEST_DO(verify_optimized("@x3y5*a", Primary::LHS, true));
TEST_DO(verify_optimized("a*@x3y5", Primary::RHS, true));
- TEST_DO(verify_optimized("@x3y5f+a", Primary::LHS, true));
- TEST_DO(verify_optimized("a+@x3y5f", Primary::RHS, true));
+ TEST_DO(verify_optimized("@x3y5_f+a", Primary::LHS, true));
+ TEST_DO(verify_optimized("a+@x3y5_f", Primary::RHS, true));
}
TEST("require that asymmetric operations work") {
TEST_DO(verify_optimized("x3y5/a", Primary::LHS, false));
TEST_DO(verify_optimized("a/x3y5", Primary::RHS, false));
- TEST_DO(verify_optimized("x3y5f-a", Primary::LHS, false));
- TEST_DO(verify_optimized("a-x3y5f", Primary::RHS, false));
+ TEST_DO(verify_optimized("x3y5_f-a", Primary::LHS, false));
+ TEST_DO(verify_optimized("a-x3y5_f", Primary::RHS, false));
}
TEST("require that sparse number join can be optimized") {
diff --git a/eval/src/tests/instruction/mixed_inner_product_function/mixed_inner_product_function_test.cpp b/eval/src/tests/instruction/mixed_inner_product_function/mixed_inner_product_function_test.cpp
index fbe71f3ed63..6b549b4d4d4 100644
--- a/eval/src/tests/instruction/mixed_inner_product_function/mixed_inner_product_function_test.cpp
+++ b/eval/src/tests/instruction/mixed_inner_product_function/mixed_inner_product_function_test.cpp
@@ -3,7 +3,7 @@
#include <vespa/eval/eval/fast_value.h>
#include <vespa/eval/eval/tensor_function.h>
#include <vespa/eval/eval/test/eval_fixture.h>
-#include <vespa/eval/eval/test/tensor_model.hpp>
+#include <vespa/eval/eval/test/gen_spec.h>
#include <vespa/eval/instruction/dense_dot_product_function.h>
#include <vespa/eval/instruction/dense_matmul_function.h>
#include <vespa/eval/instruction/dense_multi_matmul_function.h>
@@ -22,34 +22,25 @@ using namespace vespalib::eval::test;
const ValueBuilderFactory &prod_factory = FastValueBuilderFactory::get();
-struct MyVecSeq : Sequence {
- double bias;
- double operator[](size_t i) const override { return (i + bias); }
- MyVecSeq(double cellBias) : bias(cellBias) {}
-};
-
-std::function<double(size_t)> my_vec_gen(double cellBias) {
- return [=] (size_t i) noexcept { return i + cellBias; };
-}
//-----------------------------------------------------------------------------
EvalFixture::ParamRepo make_params() {
return EvalFixture::ParamRepo()
- .add_vector("x", 3, my_vec_gen(2.0))
- .add_vector("x", 3, my_vec_gen(13.25))
- .add_vector("y", 3, my_vec_gen(4.0))
- .add_vector("z", 3, my_vec_gen(0.25))
- .add_matrix("x", 3, "y", 1, my_vec_gen(5.0))
- .add_matrix("x", 1, "y", 3, my_vec_gen(6.0))
- .add_matrix("x", 3, "y", 3, my_vec_gen(1.5))
- .add_matrix("x", 3, "z", 3, my_vec_gen(2.5))
- .add_cube("x", 3, "y", 3, "z", 3, my_vec_gen(-4.0))
- .add("mix_x3zm", spec({x(3),z({"c","d"})}, MyVecSeq(0.5)))
- .add("mix_y3zm", spec({y(3),z({"c","d"})}, MyVecSeq(3.5)))
- .add("mix_x3zm_f", spec(float_cells({x(3),z({"c","d"})}), MyVecSeq(0.5)))
- .add("mix_y3zm_f", spec(float_cells({y(3),z({"c","d"})}), MyVecSeq(3.5)))
- .add("mix_x3y3zm", spec({x(3),y(3),z({"c","d"})}, MyVecSeq(0.0)))
+ .add_variants("x3", GenSpec().idx("x", 3).seq_bias(2.0))
+ .add_variants("x3$2", GenSpec().idx("x", 3).seq_bias(13.25))
+ .add_variants("y3", GenSpec().idx("y", 3).seq_bias(4.0))
+ .add_variants("z3", GenSpec().idx("z", 3).seq_bias(0.25))
+ .add_variants("x3y3", GenSpec().idx("x", 3).idx("y", 3).seq_bias(5.0))
+ .add_variants("x1y3", GenSpec().idx("x", 1).idx("y", 3).seq_bias(6.0))
+ .add_variants("x3y1", GenSpec().idx("x", 3).idx("y", 1).seq_bias(1.5))
+ .add_variants("x3z3", GenSpec().idx("x", 3).idx("z", 3).seq_bias(2.5))
+ .add_variants("x3y3z3", GenSpec().idx("x", 3).idx("y", 3).idx("z", 3).seq_bias(-4.0))
+ .add("mix_x3zm", GenSpec().idx("x", 3).map("z", {"c","d"}).seq_bias(0.5).gen())
+ .add("mix_y3zm", GenSpec().idx("y", 3).map("z", {"c","d"}).seq_bias(3.5).gen())
+ .add("mix_x3zm_f", GenSpec().idx("x", 3).map("z", {"c","d"}).cells_float().seq_bias(0.5).gen())
+ .add("mix_y3zm_f", GenSpec().idx("y", 3).map("z", {"c","d"}).cells_float().seq_bias(3.5).gen())
+ .add("mix_x3y3zm", GenSpec().idx("x", 3).idx("y", 3).map("z", {"c","d"}).seq_bias(0.0).gen())
;
}
@@ -101,35 +92,35 @@ TEST(MixedInnerProduct, use_dense_optimizers_when_possible) {
TEST(MixedInnerProduct, trigger_optimizer_when_possible) {
assert_mixed_optimized("reduce(x3 * mix_x3zm,sum,x)");
- assert_mixed_optimized("reduce(x3f * mix_x3zm,sum,x)");
+ assert_mixed_optimized("reduce(x3_f * mix_x3zm,sum,x)");
assert_mixed_optimized("reduce(x3 * mix_x3zm_f,sum,x)");
- assert_mixed_optimized("reduce(x3f * mix_x3zm_f,sum,x)");
+ assert_mixed_optimized("reduce(x3_f * mix_x3zm_f,sum,x)");
assert_mixed_optimized("reduce(x3$2 * mix_x3zm,sum,x)");
- assert_mixed_optimized("reduce(x3f$2 * mix_x3zm,sum,x)");
+ assert_mixed_optimized("reduce(x3$2_f * mix_x3zm,sum,x)");
assert_mixed_optimized("reduce(y3 * mix_y3zm,sum,y)");
- assert_mixed_optimized("reduce(y3f * mix_y3zm,sum,y)");
+ assert_mixed_optimized("reduce(y3_f * mix_y3zm,sum,y)");
assert_mixed_optimized("reduce(y3 * mix_y3zm_f,sum,y)");
- assert_mixed_optimized("reduce(y3f * mix_y3zm_f,sum,y)");
+ assert_mixed_optimized("reduce(y3_f * mix_y3zm_f,sum,y)");
assert_mixed_optimized("reduce(x3y1 * mix_x3zm,sum,x)");
- assert_mixed_optimized("reduce(x3y1f * mix_x3zm,sum,x)");
+ assert_mixed_optimized("reduce(x3y1_f * mix_x3zm,sum,x)");
assert_mixed_optimized("reduce(x3y1 * mix_x3zm,sum,x,y)");
- assert_mixed_optimized("reduce(x3y1f * mix_x3zm,sum,x,y)");
+ assert_mixed_optimized("reduce(x3y1_f * mix_x3zm,sum,x,y)");
assert_mixed_optimized("reduce(x1y3 * mix_y3zm,sum,y)");
- assert_mixed_optimized("reduce(x1y3f * mix_y3zm,sum,y)");
+ assert_mixed_optimized("reduce(x1y3_f * mix_y3zm,sum,y)");
assert_mixed_optimized("reduce(x1y3 * x1y3,sum,y)");
- assert_mixed_optimized("reduce(x1y3 * x1y3f,sum,y)");
- assert_mixed_optimized("reduce(x1y3f * x1y3,sum,y)");
- assert_mixed_optimized("reduce(x1y3f * x1y3f,sum,y)");
+ assert_mixed_optimized("reduce(x1y3 * x1y3_f,sum,y)");
+ assert_mixed_optimized("reduce(x1y3_f * x1y3,sum,y)");
+ assert_mixed_optimized("reduce(x1y3_f * x1y3_f,sum,y)");
assert_mixed_optimized("reduce(x1y3 * mix_y3zm,sum,y)");
- assert_mixed_optimized("reduce(x1y3f * mix_y3zm,sum,y)");
+ assert_mixed_optimized("reduce(x1y3_f * mix_y3zm,sum,y)");
assert_mixed_optimized("reduce(mix_x3zm * x3,sum,x)");
- assert_mixed_optimized("reduce(mix_x3zm * x3f,sum,x)");
+ assert_mixed_optimized("reduce(mix_x3zm * x3_f,sum,x)");
assert_mixed_optimized("reduce(mix_x3zm * x3y1,sum,x)");
- assert_mixed_optimized("reduce(mix_x3zm * x3y1f,sum,x)");
+ assert_mixed_optimized("reduce(mix_x3zm * x3y1_f,sum,x)");
assert_mixed_optimized("reduce(mix_y3zm * y3,sum,y)");
- assert_mixed_optimized("reduce(mix_y3zm * y3f,sum,y)");
+ assert_mixed_optimized("reduce(mix_y3zm * y3_f,sum,y)");
assert_mixed_optimized("reduce(mix_y3zm * x1y3,sum,y)");
- assert_mixed_optimized("reduce(mix_y3zm * x1y3f,sum,y)");
+ assert_mixed_optimized("reduce(mix_y3zm * x1y3_f,sum,y)");
}
TEST(MixedInnerProduct, should_not_trigger_optimizer_for_other_cases) {
diff --git a/eval/src/tests/instruction/mixed_map_function/mixed_map_function_test.cpp b/eval/src/tests/instruction/mixed_map_function/mixed_map_function_test.cpp
index 3caebea7298..3a7d1368f03 100644
--- a/eval/src/tests/instruction/mixed_map_function/mixed_map_function_test.cpp
+++ b/eval/src/tests/instruction/mixed_map_function/mixed_map_function_test.cpp
@@ -3,7 +3,7 @@
#include <vespa/eval/eval/tensor_function.h>
#include <vespa/eval/instruction/mixed_map_function.h>
#include <vespa/eval/eval/test/eval_fixture.h>
-#include <vespa/eval/eval/test/tensor_model.hpp>
+#include <vespa/eval/eval/test/gen_spec.h>
#include <vespa/vespalib/gtest/gtest.h>
using namespace vespalib;
@@ -15,13 +15,11 @@ const ValueBuilderFactory &prod_factory = FastValueBuilderFactory::get();
EvalFixture::ParamRepo make_params() {
return EvalFixture::ParamRepo()
- .add("a", spec(1.5))
- .add("b", spec(2.5))
- .add("sparse", spec({x({"a"})}, N()))
- .add("mixed", spec({x({"a"}),y(5)}, N()))
- .add_mutable("@sparse", spec({x({"a"})}, N()))
- .add_mutable("@mixed", spec({x({"a"}),y(5)}, N()))
- .add_matrix("x", 5, "y", 3);
+ .add("a", GenSpec().seq_bias(1.5).gen())
+ .add("b", GenSpec().seq_bias(2.5).gen())
+ .add_variants("sparse", GenSpec().map("x", {"a"}))
+ .add_variants("mixed", GenSpec().map("x", {"a"}).idx("y", 5))
+ .add_variants("x5y3", GenSpec().idx("x", 5).idx("y", 3));
}
EvalFixture::ParamRepo param_repo = make_params();
@@ -53,12 +51,12 @@ void verify_not_optimized(const vespalib::string &expr) {
TEST(MapTest, dense_map_is_optimized) {
verify_optimized("map(x5y3,f(x)(x+10))", false);
- verify_optimized("map(x5y3f,f(x)(x+10))", false);
+ verify_optimized("map(x5y3_f,f(x)(x+10))", false);
}
TEST(MapTest, simple_dense_map_can_be_inplace) {
verify_optimized("map(@x5y3,f(x)(x+10))", true);
- verify_optimized("map(@x5y3f,f(x)(x+10))", true);
+ verify_optimized("map(@x5y3_f,f(x)(x+10))", true);
}
TEST(MapTest, scalar_map_is_not_optimized) {
diff --git a/eval/src/tests/instruction/mixed_simple_join_function/mixed_simple_join_function_test.cpp b/eval/src/tests/instruction/mixed_simple_join_function/mixed_simple_join_function_test.cpp
index 9c891adf179..105ae22e06e 100644
--- a/eval/src/tests/instruction/mixed_simple_join_function/mixed_simple_join_function_test.cpp
+++ b/eval/src/tests/instruction/mixed_simple_join_function/mixed_simple_join_function_test.cpp
@@ -4,7 +4,7 @@
#include <vespa/eval/eval/tensor_function.h>
#include <vespa/eval/instruction/mixed_simple_join_function.h>
#include <vespa/eval/eval/test/eval_fixture.h>
-#include <vespa/eval/eval/test/tensor_model.hpp>
+#include <vespa/eval/eval/test/gen_spec.h>
#include <vespa/vespalib/util/stringfmt.h>
@@ -45,24 +45,24 @@ const ValueBuilderFactory &prod_factory = FastValueBuilderFactory::get();
EvalFixture::ParamRepo make_params() {
return EvalFixture::ParamRepo()
- .add("a", spec(1.5))
- .add("b", spec(2.5))
- .add("sparse", spec({x({"a", "b", "c"})}, N()))
- .add("mixed", spec({x({"a", "b", "c"}),y(5),z(3)}, N()))
- .add("empty_mixed", spec({x({}),y(5),z(3)}, N()))
- .add_mutable("@mixed", spec({x({"a", "b", "c"}),y(5),z(3)}, N()))
- .add_cube("a", 1, "b", 1, "c", 1)
- .add_cube("x", 1, "y", 1, "z", 1)
- .add_cube("x", 3, "y", 5, "z", 3)
- .add_vector("z", 3)
- .add_dense({{"c", 5}, {"d", 1}})
- .add_dense({{"b", 1}, {"c", 5}})
- .add_matrix("x", 3, "y", 5, [](size_t idx) noexcept { return double((idx * 2) + 3); })
- .add_matrix("x", 3, "y", 5, [](size_t idx) noexcept { return double((idx * 3) + 2); })
- .add_vector("y", 5, [](size_t idx) noexcept { return double((idx * 2) + 3); })
- .add_vector("y", 5, [](size_t idx) noexcept { return double((idx * 3) + 2); })
- .add_matrix("y", 5, "z", 3, [](size_t idx) noexcept { return double((idx * 2) + 3); })
- .add_matrix("y", 5, "z", 3, [](size_t idx) noexcept { return double((idx * 3) + 2); });
+ .add("a", GenSpec().seq_bias(1.5).gen())
+ .add("b", GenSpec().seq_bias(2.5).gen())
+ .add("sparse", GenSpec().map("x", {"a", "b", "c"}).gen())
+ .add("mixed", GenSpec().map("x", {"a", "b", "c"}).idx("y", 5).idx("z", 3).gen())
+ .add("empty_mixed", GenSpec().map("x", {}).idx("y", 5).idx("z", 3).gen())
+ .add_mutable("@mixed", GenSpec().map("x", {"a", "b", "c"}).idx("y", 5).idx("z", 3).gen())
+ .add_variants("a1b1c1", GenSpec().idx("a", 1).idx("b", 1).idx("c", 1))
+ .add_variants("x1y1z1", GenSpec().idx("x", 1).idx("y", 1).idx("z", 1))
+ .add_variants("x3y5z3", GenSpec().idx("x", 3).idx("y", 5).idx("z", 3))
+ .add_variants("z3", GenSpec().idx("z", 3))
+ .add_variants("c5d1", GenSpec().idx("c", 5).idx("d", 1))
+ .add_variants("b1c5", GenSpec().idx("b", 1).idx("c", 5))
+ .add_variants("x3y5", GenSpec().idx("x", 3).idx("y", 5).seq([](size_t idx) noexcept { return double((idx * 2) + 3); }))
+ .add_variants("x3y5$2", GenSpec().idx("x", 3).idx("y", 5).seq([](size_t idx) noexcept { return double((idx * 3) + 2); }))
+ .add_variants("y5", GenSpec().idx("y", 5).seq([](size_t idx) noexcept { return double((idx * 2) + 3); }))
+ .add_variants("y5$2", GenSpec().idx("y", 5).seq([](size_t idx) noexcept { return double((idx * 3) + 2); }))
+ .add_variants("y5z3", GenSpec().idx("y", 5).idx("z", 3).seq([](size_t idx) noexcept { return double((idx * 2) + 3); }))
+ .add_variants("y5z3$2", GenSpec().idx("y", 5).idx("z", 3).seq([](size_t idx) noexcept { return double((idx * 3) + 2); }));
}
EvalFixture::ParamRepo param_repo = make_params();
@@ -149,12 +149,12 @@ vespalib::string adjust_param(const vespalib::string &str, bool float_cells, boo
if (mut_cells) {
result = "@" + result;
}
- if (float_cells) {
- result += "f";
- }
if (is_rhs) {
result += "$2";
}
+ if (float_cells) {
+ result += "_f";
+ }
return result;
}
diff --git a/eval/src/tests/instruction/pow_as_map_optimizer/pow_as_map_optimizer_test.cpp b/eval/src/tests/instruction/pow_as_map_optimizer/pow_as_map_optimizer_test.cpp
index b4bf9ec5ef6..fe32a59bb78 100644
--- a/eval/src/tests/instruction/pow_as_map_optimizer/pow_as_map_optimizer_test.cpp
+++ b/eval/src/tests/instruction/pow_as_map_optimizer/pow_as_map_optimizer_test.cpp
@@ -3,7 +3,7 @@
#include <vespa/eval/eval/fast_value.h>
#include <vespa/eval/eval/tensor_function.h>
#include <vespa/eval/eval/test/eval_fixture.h>
-#include <vespa/eval/eval/test/tensor_model.hpp>
+#include <vespa/eval/eval/test/gen_spec.h>
#include <vespa/vespalib/gtest/gtest.h>
using namespace vespalib::eval::operation;
@@ -16,11 +16,11 @@ const ValueBuilderFactory &prod_factory = FastValueBuilderFactory::get();
EvalFixture::ParamRepo make_params() {
return EvalFixture::ParamRepo()
- .add("a", spec(1.5))
- .add("b", spec(2.5))
- .add("sparse", spec({x({"a","b"})}, N()))
- .add("mixed", spec({x({"a"}),y(5)}, N()))
- .add_matrix("x", 5, "y", 3);
+ .add("a", GenSpec().seq_bias(1.5).gen())
+ .add("b", GenSpec().seq_bias(2.5).gen())
+ .add("sparse", GenSpec().map("x", {"a","b"}).gen())
+ .add("mixed", GenSpec().map("x", {"a"}).idx("y", 5).gen())
+ .add_variants("x5y3", GenSpec().idx("x", 5).idx("y", 3));
}
EvalFixture::ParamRepo param_repo = make_params();
@@ -55,9 +55,9 @@ TEST(PowAsMapTest, squared_dense_tensor_is_optimized) {
verify_optimized("pow(x5y3,2.0)", Square::f);
verify_optimized("join(x5y3,2.0,f(x,y)(x^y))", Square::f);
verify_optimized("join(x5y3,2.0,f(x,y)(pow(x,y)))", Square::f);
- verify_optimized("join(x5y3f,2.0,f(x,y)(pow(x,y)))", Square::f);
+ verify_optimized("join(x5y3_f,2.0,f(x,y)(pow(x,y)))", Square::f);
verify_optimized("join(@x5y3,2.0,f(x,y)(pow(x,y)))", Square::f, true);
- verify_optimized("join(@x5y3f,2.0,f(x,y)(pow(x,y)))", Square::f, true);
+ verify_optimized("join(@x5y3_f,2.0,f(x,y)(pow(x,y)))", Square::f, true);
}
TEST(PowAsMapTest, cubed_dense_tensor_is_optimized) {
@@ -65,9 +65,9 @@ TEST(PowAsMapTest, cubed_dense_tensor_is_optimized) {
verify_optimized("pow(x5y3,3.0)", Cube::f);
verify_optimized("join(x5y3,3.0,f(x,y)(x^y))", Cube::f);
verify_optimized("join(x5y3,3.0,f(x,y)(pow(x,y)))", Cube::f);
- verify_optimized("join(x5y3f,3.0,f(x,y)(pow(x,y)))", Cube::f);
+ verify_optimized("join(x5y3_f,3.0,f(x,y)(pow(x,y)))", Cube::f);
verify_optimized("join(@x5y3,3.0,f(x,y)(pow(x,y)))", Cube::f, true);
- verify_optimized("join(@x5y3f,3.0,f(x,y)(pow(x,y)))", Cube::f, true);
+ verify_optimized("join(@x5y3_f,3.0,f(x,y)(pow(x,y)))", Cube::f, true);
}
TEST(PowAsMapTest, hypercubed_dense_tensor_is_not_optimized) {
diff --git a/eval/src/tests/instruction/remove_trivial_dimension_optimizer/remove_trivial_dimension_optimizer_test.cpp b/eval/src/tests/instruction/remove_trivial_dimension_optimizer/remove_trivial_dimension_optimizer_test.cpp
index 4de7e85074d..794725a8257 100644
--- a/eval/src/tests/instruction/remove_trivial_dimension_optimizer/remove_trivial_dimension_optimizer_test.cpp
+++ b/eval/src/tests/instruction/remove_trivial_dimension_optimizer/remove_trivial_dimension_optimizer_test.cpp
@@ -4,7 +4,7 @@
#include <vespa/eval/eval/tensor_function.h>
#include <vespa/eval/instruction/replace_type_function.h>
#include <vespa/eval/instruction/fast_rename_optimizer.h>
-#include <vespa/eval/eval/test/tensor_model.hpp>
+#include <vespa/eval/eval/test/gen_spec.h>
#include <vespa/eval/eval/test/eval_fixture.h>
#include <vespa/vespalib/util/stringfmt.h>
@@ -19,10 +19,10 @@ const ValueBuilderFactory &prod_factory = FastValueBuilderFactory::get();
EvalFixture::ParamRepo make_params() {
return EvalFixture::ParamRepo()
- .add("x1y5z1", spec({x(1),y(5),z(1)}, N()))
- .add("x1y5z1f", spec(float_cells({x(1),y(5),z(1)}), N()))
- .add("x1y1z1", spec({x(1),y(1),z(1)}, N()))
- .add("x1y5z_m", spec({x(1),y(5),z({"a"})}, N()));
+ .add("x1y5z1", GenSpec().idx("x", 1).idx("y", 5).idx("z", 1).gen())
+ .add("x1y5z1f", GenSpec().idx("x", 1).idx("y", 5).idx("z", 1).cells_float().gen())
+ .add("x1y1z1", GenSpec().idx("x", 1).idx("y", 1).idx("z", 1).gen())
+ .add("x1y5z_m", GenSpec().idx("x", 1).idx("y", 5).map("z", {"a"}).gen());
}
EvalFixture::ParamRepo param_repo = make_params();
diff --git a/eval/src/tests/instruction/sparse_dot_product_function/CMakeLists.txt b/eval/src/tests/instruction/sparse_dot_product_function/CMakeLists.txt
new file mode 100644
index 00000000000..076f1d79796
--- /dev/null
+++ b/eval/src/tests/instruction/sparse_dot_product_function/CMakeLists.txt
@@ -0,0 +1,9 @@
+# Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+vespa_add_executable(eval_sparse_dot_product_function_test_app TEST
+ SOURCES
+ sparse_dot_product_function_test.cpp
+ DEPENDS
+ vespaeval
+ GTest::GTest
+)
+vespa_add_test(NAME eval_sparse_dot_product_function_test_app COMMAND eval_sparse_dot_product_function_test_app)
diff --git a/eval/src/tests/instruction/sparse_dot_product_function/sparse_dot_product_function_test.cpp b/eval/src/tests/instruction/sparse_dot_product_function/sparse_dot_product_function_test.cpp
new file mode 100644
index 00000000000..65eab2778aa
--- /dev/null
+++ b/eval/src/tests/instruction/sparse_dot_product_function/sparse_dot_product_function_test.cpp
@@ -0,0 +1,85 @@
+// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+#include <vespa/eval/eval/fast_value.h>
+#include <vespa/eval/eval/simple_value.h>
+#include <vespa/eval/instruction/sparse_dot_product_function.h>
+#include <vespa/eval/eval/test/eval_fixture.h>
+#include <vespa/eval/eval/test/gen_spec.h>
+#include <vespa/vespalib/gtest/gtest.h>
+
+using namespace vespalib::eval;
+using namespace vespalib::eval::test;
+
+const ValueBuilderFactory &prod_factory = FastValueBuilderFactory::get();
+const ValueBuilderFactory &test_factory = SimpleValueBuilderFactory::get();
+
+//-----------------------------------------------------------------------------
+
+EvalFixture::ParamRepo make_params() {
+ return EvalFixture::ParamRepo()
+ .add("v1_x", GenSpec().map("x", 32, 1).seq_bias(3.0).gen())
+ .add("v1_x_f", GenSpec().map("x", 32, 1).seq_bias(3.0).cells_float().gen())
+ .add("v2_x", GenSpec().map("x", 16, 2).seq_bias(7.0).gen())
+ .add("v2_x_f", GenSpec().map("x", 16, 2).seq_bias(7.0).cells_float().gen())
+ .add("v3_y", GenSpec().map("y", 10, 1).gen())
+ .add("v4_xd", GenSpec().idx("x", 10).gen())
+ .add("m1_xy", GenSpec().map("x", 32, 1).map("y", 16, 2).seq_bias(3.0).gen())
+ .add("m2_xy", GenSpec().map("x", 16, 2).map("y", 32, 1).seq_bias(7.0).gen())
+ .add("m3_xym", GenSpec().map("x", 8, 1).idx("y", 5).gen());
+}
+EvalFixture::ParamRepo param_repo = make_params();
+
+void assert_optimized(const vespalib::string &expr) {
+ EvalFixture fast_fixture(prod_factory, expr, param_repo, true);
+ EvalFixture test_fixture(test_factory, expr, param_repo, true);
+ EvalFixture slow_fixture(prod_factory, expr, param_repo, false);
+ EXPECT_EQ(fast_fixture.result(), EvalFixture::ref(expr, param_repo));
+ EXPECT_EQ(test_fixture.result(), EvalFixture::ref(expr, param_repo));
+ EXPECT_EQ(slow_fixture.result(), EvalFixture::ref(expr, param_repo));
+ EXPECT_EQ(fast_fixture.find_all<SparseDotProductFunction>().size(), 1u);
+ EXPECT_EQ(test_fixture.find_all<SparseDotProductFunction>().size(), 1u);
+ EXPECT_EQ(slow_fixture.find_all<SparseDotProductFunction>().size(), 0u);
+}
+
+void assert_not_optimized(const vespalib::string &expr) {
+ EvalFixture fast_fixture(prod_factory, expr, param_repo, true);
+ EXPECT_EQ(fast_fixture.result(), EvalFixture::ref(expr, param_repo));
+ EXPECT_EQ(fast_fixture.find_all<SparseDotProductFunction>().size(), 0u);
+}
+
+//-----------------------------------------------------------------------------
+
+TEST(SparseDotProduct, expression_can_be_optimized)
+{
+ assert_optimized("reduce(v1_x*v2_x,sum,x)");
+ assert_optimized("reduce(v2_x*v1_x,sum)");
+ assert_optimized("reduce(v1_x*v2_x_f,sum)");
+ assert_optimized("reduce(v1_x_f*v2_x,sum)");
+ assert_optimized("reduce(v1_x_f*v2_x_f,sum)");
+}
+
+TEST(SparseDotProduct, multi_dimensional_expression_can_be_optimized)
+{
+ assert_optimized("reduce(m1_xy*m2_xy,sum,x,y)");
+ assert_optimized("reduce(m1_xy*m2_xy,sum)");
+}
+
+TEST(SparseDotProduct, embedded_dot_product_is_not_optimized)
+{
+ assert_not_optimized("reduce(m1_xy*v1_x,sum,x)");
+ assert_not_optimized("reduce(v1_x*m1_xy,sum,x)");
+}
+
+TEST(SparseDotProduct, similar_expressions_are_not_optimized)
+{
+ assert_not_optimized("reduce(m1_xy*v1_x,sum)");
+ assert_not_optimized("reduce(v1_x*v3_y,sum)");
+ assert_not_optimized("reduce(v2_x*v1_x,max)");
+ assert_not_optimized("reduce(v2_x+v1_x,sum)");
+ assert_not_optimized("reduce(v4_xd*v4_xd,sum)");
+ assert_not_optimized("reduce(m3_xym*m3_xym,sum)");
+}
+
+//-----------------------------------------------------------------------------
+
+GTEST_MAIN_RUN_ALL_TESTS()
diff --git a/eval/src/tests/instruction/sum_max_dot_product_function/sum_max_dot_product_function_test.cpp b/eval/src/tests/instruction/sum_max_dot_product_function/sum_max_dot_product_function_test.cpp
index 4b89f30d879..1013c98b424 100644
--- a/eval/src/tests/instruction/sum_max_dot_product_function/sum_max_dot_product_function_test.cpp
+++ b/eval/src/tests/instruction/sum_max_dot_product_function/sum_max_dot_product_function_test.cpp
@@ -3,7 +3,7 @@
#include <vespa/eval/eval/fast_value.h>
#include <vespa/eval/eval/tensor_function.h>
#include <vespa/eval/eval/test/eval_fixture.h>
-#include <vespa/eval/eval/test/tensor_model.hpp>
+#include <vespa/eval/eval/test/gen_spec.h>
#include <vespa/eval/instruction/sum_max_dot_product_function.h>
#include <vespa/vespalib/gtest/gtest.h>
@@ -13,12 +13,6 @@ using namespace vespalib::eval::test;
const ValueBuilderFactory &prod_factory = FastValueBuilderFactory::get();
-struct MyVecSeq : Sequence {
- double bias;
- double operator[](size_t i) const override { return (i + bias); }
- MyVecSeq(double cellBias) : bias(cellBias) {}
-};
-
//-----------------------------------------------------------------------------
vespalib::string main_expr = "reduce(reduce(reduce(a*b,sum,z),max,y),sum,x)";
@@ -34,7 +28,7 @@ void assert_optimized(const TensorSpec &a, const TensorSpec &b, size_t dp_size)
auto info = fast_fixture.find_all<SumMaxDotProductFunction>();
ASSERT_EQ(info.size(), 1u);
EXPECT_TRUE(info[0]->result_is_mutable());
- EXPECT_EQUAL(info[0]->dp_size(), dp_size);
+ EXPECT_EQ(info[0]->dp_size(), dp_size);
}
void assert_not_optimized(const TensorSpec &a, const TensorSpec &b, const vespalib::string &expr = main_expr) {
@@ -51,10 +45,23 @@ void assert_not_optimized(const TensorSpec &a, const TensorSpec &b, const vespal
//-----------------------------------------------------------------------------
-auto query = spec(float_cells({x({"0", "1", "2"}),z(5)}), MyVecSeq(0.5));
-auto document = spec(float_cells({y({"0", "1", "2", "3", "4", "5"}),z(5)}), MyVecSeq(2.5));
-auto empty_query = spec(float_cells({x({}),z(5)}), MyVecSeq(0.5));
-auto empty_document = spec(float_cells({y({}),z(5)}), MyVecSeq(2.5));
+GenSpec QueGen(size_t x_size, size_t z_size) { return GenSpec().cells_float().map("x", x_size).idx("z", z_size).seq_bias(0.5); }
+
+GenSpec DocGen(size_t y_size, size_t z_size) { return GenSpec().cells_float().map("y", y_size).idx("z", z_size).seq_bias(2.5); }
+
+GenSpec Que() { return QueGen(3, 5); }
+GenSpec Doc() { return DocGen(6, 5); }
+
+GenSpec QueEmptyX() { return QueGen(0, 5); }
+GenSpec DocEmptyX() { return DocGen(0, 5); }
+
+GenSpec QueTrivialZ() { return QueGen(3, 1); }
+GenSpec DocTrivialZ() { return DocGen(6, 1); }
+
+auto query = Que().gen();
+auto document = Doc().gen();
+auto empty_query = QueEmptyX().gen();
+auto empty_document = DocEmptyX().gen();
TEST(SumMaxDotProduct, expressions_can_be_optimized)
{
@@ -66,24 +73,24 @@ TEST(SumMaxDotProduct, expressions_can_be_optimized)
}
TEST(SumMaxDotProduct, double_cells_are_not_optimized) {
- auto double_query = spec({x({"0", "1", "2"}),z(5)}, MyVecSeq(0.5));
- auto double_document = spec({y({"0", "1", "2", "3", "4", "5"}),z(5)}, MyVecSeq(2.5));
+ auto double_query = Que().cells_double().gen();
+ auto double_document = Doc().cells_double().gen();
assert_not_optimized(query, double_document);
assert_not_optimized(double_query, document);
assert_not_optimized(double_query, double_document);
}
TEST(SumMaxDotProduct, trivial_dot_product_is_not_optimized) {
- auto trivial_query = spec(float_cells({x({"0", "1", "2"}),z(1)}), MyVecSeq(0.5));
- auto trivial_document = spec(float_cells({y({"0", "1", "2", "3", "4", "5"}),z(1)}), MyVecSeq(2.5));
+ auto trivial_query = QueTrivialZ().gen();
+ auto trivial_document = DocTrivialZ().gen();
assert_not_optimized(trivial_query, trivial_document);
}
TEST(SumMaxDotProduct, additional_dimensions_are_not_optimized) {
- auto extra_sparse_query = spec(float_cells({Domain("a", {"0"}),x({"0", "1", "2"}),z(5)}), MyVecSeq(0.5));
- auto extra_dense_query = spec(float_cells({Domain("a", 1),x({"0", "1", "2"}),z(5)}), MyVecSeq(0.5));
- auto extra_sparse_document = spec(float_cells({Domain("a", {"0"}),y({"0", "1", "2", "3", "4", "5"}),z(5)}), MyVecSeq(2.5));
- auto extra_dense_document = spec(float_cells({Domain("a", 1),y({"0", "1", "2", "3", "4", "5"}),z(5)}), MyVecSeq(2.5));
+ auto extra_sparse_query = Que().map("a", 1).gen();
+ auto extra_dense_query = Que().idx("a", 1).gen();
+ auto extra_sparse_document = Doc().map("a", 1).gen();
+ auto extra_dense_document = Doc().idx("a", 1).gen();
vespalib::string extra_sum_expr = "reduce(reduce(reduce(a*b,sum,z),max,y),sum,a,x)";
vespalib::string extra_max_expr = "reduce(reduce(reduce(a*b,sum,z),max,a,y),sum,x)";
assert_not_optimized(extra_sparse_query, document);
@@ -97,8 +104,8 @@ TEST(SumMaxDotProduct, additional_dimensions_are_not_optimized) {
}
TEST(SumMaxDotProduct, more_dense_variants_are_not_optimized) {
- auto dense_query = spec(float_cells({x(3),z(5)}), MyVecSeq(0.5));
- auto dense_document = spec(float_cells({y(5),z(5)}), MyVecSeq(2.5));
+ auto dense_query = GenSpec().cells_float().idx("x", 3).idx("z", 5).seq_bias(0.5).gen();
+ auto dense_document = GenSpec().cells_float().idx("y", 5).idx("z", 5).seq_bias(2.5).gen();
assert_not_optimized(dense_query, document);
assert_not_optimized(query, dense_document);
assert_not_optimized(dense_query, dense_document);
diff --git a/eval/src/tests/streamed/value/streamed_value_test.cpp b/eval/src/tests/streamed/value/streamed_value_test.cpp
index 2f91c3b9390..7aaa8cdebbc 100644
--- a/eval/src/tests/streamed/value/streamed_value_test.cpp
+++ b/eval/src/tests/streamed/value/streamed_value_test.cpp
@@ -5,7 +5,7 @@
#include <vespa/eval/eval/value_codec.h>
#include <vespa/eval/instruction/generic_join.h>
#include <vespa/eval/eval/interpreted_function.h>
-#include <vespa/eval/eval/test/tensor_model.hpp>
+#include <vespa/eval/eval/test/gen_spec.h>
#include <vespa/vespalib/util/stringfmt.h>
#include <vespa/vespalib/gtest/gtest.h>
@@ -23,45 +23,36 @@ using Handle = SharedStringRepo::Handle;
vespalib::string as_str(string_id label) { return Handle::string_from_id(label); }
-std::vector<Layout> layouts = {
- {},
- {x(3)},
- {x(3),y(5)},
- {x(3),y(5),z(7)},
- float_cells({x(3),y(5),z(7)}),
- {x({"a","b","c"})},
- {x({"a","b","c"}),y({"foo","bar"})},
- {x({"a","b","c"}),y({"foo","bar"}),z({"i","j","k","l"})},
- float_cells({x({"a","b","c"}),y({"foo","bar"}),z({"i","j","k","l"})}),
- {x(3),y({"foo", "bar"}),z(7)},
- {x({"a","b","c"}),y(5),z({"i","j","k","l"})},
- float_cells({x({"a","b","c"}),y(5),z({"i","j","k","l"})})
+GenSpec G() { return GenSpec(); }
+
+const std::vector<GenSpec> layouts = {
+ G(),
+ G().idx("x", 3),
+ G().idx("x", 3).idx("y", 5),
+ G().idx("x", 3).idx("y", 5).idx("z", 7),
+ G().map("x", {"a","b","c"}),
+ G().map("x", {"a","b","c"}).map("y", {"foo","bar"}),
+ G().map("x", {"a","b","c"}).map("y", {"foo","bar"}).map("z", {"i","j","k","l"}),
+ G().idx("x", 3).map("y", {"foo", "bar"}).idx("z", 7),
+ G().map("x", {"a","b","c"}).idx("y", 5).map("z", {"i","j","k","l"})
};
-std::vector<Layout> join_layouts = {
- {}, {},
- {x(5)}, {x(5)},
- {x(5)}, {y(5)},
- {x(5)}, {x(5),y(5)},
- {y(3)}, {x(2),z(3)},
- {x(3),y(5)}, {y(5),z(7)},
- float_cells({x(3),y(5)}), {y(5),z(7)},
- {x(3),y(5)}, float_cells({y(5),z(7)}),
- float_cells({x(3),y(5)}), float_cells({y(5),z(7)}),
- {x({"a","b","c"})}, {x({"a","b","c"})},
- {x({"a","b","c"})}, {x({"a","b"})},
- {x({"a","b","c"})}, {y({"foo","bar","baz"})},
- {x({"a","b","c"})}, {x({"a","b","c"}),y({"foo","bar","baz"})},
- {x({"a","b"}),y({"foo","bar","baz"})}, {x({"a","b","c"}),y({"foo","bar"})},
- {x({"a","b"}),y({"foo","bar","baz"})}, {y({"foo","bar"}),z({"i","j","k","l"})},
- float_cells({x({"a","b"}),y({"foo","bar","baz"})}), {y({"foo","bar"}),z({"i","j","k","l"})},
- {x({"a","b"}),y({"foo","bar","baz"})}, float_cells({y({"foo","bar"}),z({"i","j","k","l"})}),
- float_cells({x({"a","b"}),y({"foo","bar","baz"})}), float_cells({y({"foo","bar"}),z({"i","j","k","l"})}),
- {x(3),y({"foo", "bar"})}, {y({"foo", "bar"}),z(7)},
- {x({"a","b","c"}),y(5)}, {y(5),z({"i","j","k","l"})},
- float_cells({x({"a","b","c"}),y(5)}), {y(5),z({"i","j","k","l"})},
- {x({"a","b","c"}),y(5)}, float_cells({y(5),z({"i","j","k","l"})}),
- float_cells({x({"a","b","c"}),y(5)}), float_cells({y(5),z({"i","j","k","l"})})
+const std::vector<GenSpec> join_layouts = {
+ G(), G(),
+ G().idx("x", 5), G().idx("x", 5),
+ G().idx("x", 5), G().idx("y", 5),
+ G().idx("x", 5), G().idx("x", 5).idx("y", 5),
+ G().idx("y", 3), G().idx("x", 2).idx("z", 3),
+ G().idx("x", 3).idx("y", 5), G().idx("y", 5).idx("z", 7),
+ G().map("x", {"a","b","c"}), G().map("x", {"a","b","c"}),
+ G().map("x", {"a","b","c"}), G().map("x", {"a","b"}),
+ G().map("x", {"a","b","c"}), G().map("y", {"foo","bar","baz"}),
+ G().map("x", {"a","b","c"}), G().map("x", {"a","b","c"}).map("y", {"foo","bar","baz"}),
+ G().map("x", {"a","b"}).map("y", {"foo","bar","baz"}), G().map("x", {"a","b","c"}).map("y", {"foo","bar"}),
+ G().map("x", {"a","b"}).map("y", {"foo","bar","baz"}), G().map("y", {"foo","bar"}).map("z", {"i","j","k","l"}),
+ G().idx("x", 3).map("y", {"foo", "bar"}), G().map("y", {"foo","bar"}).idx("z", 7),
+ G().map("x", {"a","b","c"}).idx("y", 5), G().idx("y", 5).map("z", {"i","j","k","l"})
+
};
TensorSpec streamed_value_join(const TensorSpec &a, const TensorSpec &b, join_fun_t function) {
@@ -76,20 +67,26 @@ TensorSpec streamed_value_join(const TensorSpec &a, const TensorSpec &b, join_fu
TEST(StreamedValueTest, streamed_values_can_be_converted_from_and_to_tensor_spec) {
for (const auto &layout: layouts) {
- TensorSpec expect = spec(layout, N());
- std::unique_ptr<Value> value = value_from_spec(expect, StreamedValueBuilderFactory::get());
- TensorSpec actual = spec_from_value(*value);
- EXPECT_EQ(actual, expect);
+ for (TensorSpec expect : { layout.cpy().cells_float().gen(),
+ layout.cpy().cells_double().gen() })
+ {
+ std::unique_ptr<Value> value = value_from_spec(expect, StreamedValueBuilderFactory::get());
+ TensorSpec actual = spec_from_value(*value);
+ EXPECT_EQ(actual, expect);
+ }
}
}
TEST(StreamedValueTest, streamed_values_can_be_copied) {
for (const auto &layout: layouts) {
- TensorSpec expect = spec(layout, N());
- std::unique_ptr<Value> value = value_from_spec(expect, StreamedValueBuilderFactory::get());
- std::unique_ptr<Value> copy = StreamedValueBuilderFactory::get().copy(*value);
- TensorSpec actual = spec_from_value(*copy);
- EXPECT_EQ(actual, expect);
+ for (TensorSpec expect : { layout.cpy().cells_float().gen(),
+ layout.cpy().cells_double().gen() })
+ {
+ std::unique_ptr<Value> value = value_from_spec(expect, StreamedValueBuilderFactory::get());
+ std::unique_ptr<Value> copy = StreamedValueBuilderFactory::get().copy(*value);
+ TensorSpec actual = spec_from_value(*copy);
+ EXPECT_EQ(actual, expect);
+ }
}
}
@@ -126,16 +123,26 @@ TEST(StreamedValueTest, streamed_value_can_be_built_and_inspected) {
EXPECT_EQ(result["bb"], 3);
}
+GenSpec::seq_t N_16ths = [] (size_t i) noexcept { return (i + 1.0) / 16.0; };
+
TEST(StreamedValueTest, new_generic_join_works_for_streamed_values) {
ASSERT_TRUE((join_layouts.size() % 2) == 0);
for (size_t i = 0; i < join_layouts.size(); i += 2) {
- TensorSpec lhs = spec(join_layouts[i], Div16(N()));
- TensorSpec rhs = spec(join_layouts[i + 1], Div16(N()));
- for (auto fun: {operation::Add::f, operation::Sub::f, operation::Mul::f, operation::Max::f}) {
- SCOPED_TRACE(fmt("\n===\nLHS: %s\nRHS: %s\n===\n", lhs.to_string().c_str(), rhs.to_string().c_str()));
- auto expect = ReferenceOperations::join(lhs, rhs, fun);
- auto actual = streamed_value_join(lhs, rhs, fun);
- EXPECT_EQ(actual, expect);
+ const auto l = join_layouts[i].cpy().seq(N_16ths);
+ const auto r = join_layouts[i + 1].cpy().seq(N_16ths);
+ for (TensorSpec lhs : { l.cpy().cells_float().gen(),
+ l.cpy().cells_double().gen() })
+ {
+ for (TensorSpec rhs : { r.cpy().cells_float().gen(),
+ r.cpy().cells_double().gen() })
+ {
+ for (auto fun: {operation::Add::f, operation::Sub::f, operation::Mul::f, operation::Max::f}) {
+ SCOPED_TRACE(fmt("\n===\nLHS: %s\nRHS: %s\n===\n", lhs.to_string().c_str(), rhs.to_string().c_str()));
+ auto expect = ReferenceOperations::join(lhs, rhs, fun);
+ auto actual = streamed_value_join(lhs, rhs, fun);
+ EXPECT_EQ(actual, expect);
+ }
+ }
}
}
}
diff --git a/eval/src/tests/tensor/instruction_benchmark/instruction_benchmark.cpp b/eval/src/tests/tensor/instruction_benchmark/instruction_benchmark.cpp
index 3345d7dc8ee..000794aca7d 100644
--- a/eval/src/tests/tensor/instruction_benchmark/instruction_benchmark.cpp
+++ b/eval/src/tests/tensor/instruction_benchmark/instruction_benchmark.cpp
@@ -43,6 +43,7 @@
#include <vespa/vespalib/io/fileutil.h>
#include <vespa/vespalib/data/slime/slime.h>
#include <vespa/vespalib/data/smart_buffer.h>
+#include <vespa/eval/eval/test/gen_spec.h>
#include <optional>
#include <algorithm>
@@ -60,51 +61,7 @@ template <typename T> using CREF = std::reference_wrapper<const T>;
//-----------------------------------------------------------------------------
-struct D {
- vespalib::string name;
- bool mapped;
- size_t size;
- size_t stride;
- static D map(const vespalib::string &name_in, size_t size_in, size_t stride_in) { return D{name_in, true, size_in, stride_in}; }
- static D idx(const vespalib::string &name_in, size_t size_in) { return D{name_in, false, size_in, 1}; }
- operator ValueType::Dimension() const {
- if (mapped) {
- return ValueType::Dimension(name);
- } else {
- return ValueType::Dimension(name, size);
- }
- }
- TensorSpec::Label operator()(size_t idx) const {
- if (mapped) {
- // need plain number as string for dynamic sparse peek
- return TensorSpec::Label(fmt("%zu", idx));
- } else {
- return TensorSpec::Label(idx);
- }
- }
-};
-
-void add_cells(TensorSpec &spec, double &seq, TensorSpec::Address addr) {
- spec.add(addr, seq);
- seq += 1.0;
-}
-
-template <typename ...Ds> void add_cells(TensorSpec &spec, double &seq, TensorSpec::Address addr, const D &d, const Ds &...ds) {
- for (size_t i = 0, idx = 0; i < d.size; ++i, idx += d.stride) {
- addr.insert_or_assign(d.name, d(idx));
- add_cells(spec, seq, addr, ds...);
- }
-}
-
-template <typename ...Ds> TensorSpec make_spec(double seq, const Ds &...ds) {
- TensorSpec spec(ValueType::tensor_type({ds...}, CellType::FLOAT).to_spec());
- add_cells(spec, seq, TensorSpec::Address(), ds...);
- return spec;
-}
-
-TensorSpec make_vector(const D &d1, double seq) { return make_spec(seq, d1); }
-TensorSpec make_matrix(const D &d1, const D &d2, double seq) { return make_spec(seq, d1, d2); }
-TensorSpec make_cube(const D &d1, const D &d2, const D &d3, double seq) { return make_spec(seq, d1, d2, d3); }
+test::GenSpec GS(double bias) { return test::GenSpec().cells_float().seq_bias(bias); }
//-----------------------------------------------------------------------------
@@ -609,7 +566,7 @@ void benchmark_tensor_create(const vespalib::string &desc, const TensorSpec &pro
ASSERT_FALSE(proto_type.is_error());
std::vector<CREF<TensorSpec>> stack_spec;
for (const auto &cell: proto.cells()) {
- stack_spec.emplace_back(stash.create<TensorSpec>(make_spec(cell.second)));
+ stack_spec.emplace_back(stash.create<TensorSpec>(GS(cell.second).gen()));
}
std::vector<EvalOp::UP> list;
for (const Impl &impl: impl_list) {
@@ -645,7 +602,7 @@ void benchmark_tensor_peek(const vespalib::string &desc, const TensorSpec &lhs,
stack_spec.emplace_back(lhs);
if (peek_spec.is_dynamic) {
for (const auto &entry: peek_spec.spec) {
- stack_spec.emplace_back(stash.create<TensorSpec>(make_spec(double(entry.second))));
+ stack_spec.emplace_back(stash.create<TensorSpec>(GS(double(entry.second)).gen()));
}
}
std::vector<EvalOp::UP> list;
@@ -660,10 +617,10 @@ void benchmark_tensor_peek(const vespalib::string &desc, const TensorSpec &lhs,
//-----------------------------------------------------------------------------
TEST(MakeInputTest, print_some_test_input) {
- auto number = make_spec(5.0);
- auto sparse = make_vector(D::map("x", 5, 3), 1.0);
- auto dense = make_vector(D::idx("x", 5), 10.0);
- auto mixed = make_cube(D::map("x", 3, 7), D::idx("y", 2), D::idx("z", 2), 100.0);
+ auto number = GS(5.0).gen();
+ auto sparse = GS(1.0).map("x", 5, 3).gen();
+ auto dense = GS(10.0).idx("x", 5).gen();
+ auto mixed = GS(100.0).map("x", 3, 7).idx("y", 2).idx("z", 2).gen();
fprintf(stderr, "--------------------------------------------------------\n");
fprintf(stderr, "simple number: %s\n", number.to_string().c_str());
fprintf(stderr, "sparse vector: %s\n", sparse.to_string().c_str());
@@ -728,197 +685,197 @@ void benchmark_encode_decode(const vespalib::string &desc, const TensorSpec &pro
// relevant for the overall performance of the tensor implementation.
TEST(EncodeDecodeBench, encode_decode_dense) {
- auto proto = make_matrix(D::idx("a", 64), D::idx("b", 64), 1.0);
+ auto proto = GS(1.0).idx("a", 64).idx("b", 64).gen();
benchmark_encode_decode("dense tensor", proto);
}
TEST(EncodeDecodeBench, encode_decode_sparse) {
- auto proto = make_matrix(D::map("a", 64, 1), D::map("b", 64, 1), 1.0);
+ auto proto = GS(1.0).map("a", 64, 1).map("b", 64, 1).gen();
benchmark_encode_decode("sparse tensor", proto);
}
TEST(EncodeDecodeBench, encode_decode_mixed) {
- auto proto = make_matrix(D::map("a", 64, 1), D::idx("b", 64), 1.0);
+ auto proto = GS(1.0).map("a", 64, 1).idx("b", 64).gen();
benchmark_encode_decode("mixed tensor", proto);
}
//-----------------------------------------------------------------------------
TEST(DenseConcat, small_vectors) {
- auto lhs = make_vector(D::idx("x", 10), 1.0);
- auto rhs = make_vector(D::idx("x", 10), 2.0);
+ auto lhs = GS(1.0).idx("x", 10).gen();
+ auto rhs = GS(2.0).idx("x", 10).gen();
benchmark_concat("small dense vector append concat", lhs, rhs, "x");
}
TEST(DenseConcat, cross_vectors) {
- auto lhs = make_vector(D::idx("x", 10), 1.0);
- auto rhs = make_vector(D::idx("x", 10), 2.0);
+ auto lhs = GS(1.0).idx("x", 10).gen();
+ auto rhs = GS(2.0).idx("x", 10).gen();
benchmark_concat("small dense vector cross concat", lhs, rhs, "y");
}
TEST(DenseConcat, cube_and_vector) {
- auto lhs = make_cube(D::idx("a", 16), D::idx("b", 16), D::idx("c", 16), 1.0);
- auto rhs = make_vector(D::idx("a", 16), 42.0);
+ auto lhs = GS(1.0).idx("a", 16).idx("b", 16).idx("c", 16).gen();
+ auto rhs = GS(42.0).idx("a", 16).gen();
benchmark_concat("cube vs vector concat", lhs, rhs, "a");
}
TEST(SparseConcat, small_vectors) {
- auto lhs = make_vector(D::map("x", 10, 1), 1.0);
- auto rhs = make_vector(D::map("x", 10, 2), 2.0);
+ auto lhs = GS(1.0).map("x", 10, 1).gen();
+ auto rhs = GS(2.0).map("x", 10, 2).gen();
benchmark_concat("small sparse concat", lhs, rhs, "y");
}
TEST(MixedConcat, mixed_vs_dense) {
- auto lhs = make_cube(D::idx("a", 16), D::idx("b", 16), D::map("c", 16, 1), 1.0);
- auto rhs = make_matrix(D::idx("a", 16), D::idx("b", 16), 2.0);
+ auto lhs = GS(1.0).idx("a", 16).idx("b", 16).map("c", 16, 1).gen();
+ auto rhs = GS(2.0).idx("a", 16).idx("b", 16).gen();
benchmark_concat("mixed dense concat a", lhs, rhs, "a");
}
TEST(MixedConcat, large_mixed_a) {
- auto lhs = make_cube(D::idx("a", 16), D::idx("b", 16), D::map("c", 16, 1), 1.0);
- auto rhs = make_cube(D::idx("a", 16), D::idx("b", 16), D::map("c", 16, 2), 2.0);
+ auto lhs = GS(1.0).idx("a", 16).idx("b", 16).map("c", 16, 1).gen();
+ auto rhs = GS(2.0).idx("a", 16).idx("b", 16).map("c", 16, 2).gen();
benchmark_concat("mixed append concat a", lhs, rhs, "a");
}
TEST(MixedConcat, large_mixed_b) {
- auto lhs = make_cube(D::idx("a", 16), D::idx("b", 16), D::map("c", 16, 1), 1.0);
- auto rhs = make_cube(D::idx("a", 16), D::idx("b", 16), D::map("c", 16, 2), 2.0);
+ auto lhs = GS(1.0).idx("a", 16).idx("b", 16).map("c", 16, 1).gen();
+ auto rhs = GS(2.0).idx("a", 16).idx("b", 16).map("c", 16, 2).gen();
benchmark_concat("mixed append concat b", lhs, rhs, "b");
}
//-----------------------------------------------------------------------------
TEST(NumberJoin, plain_op2) {
- auto lhs = make_spec(2.0);
- auto rhs = make_spec(3.0);
+ auto lhs = GS(2.0).gen();
+ auto rhs = GS(3.0).gen();
benchmark_join("simple numbers multiply", lhs, rhs, operation::Mul::f);
}
//-----------------------------------------------------------------------------
TEST(DenseJoin, small_vectors) {
- auto lhs = make_vector(D::idx("x", 10), 1.0);
- auto rhs = make_vector(D::idx("x", 10), 2.0);
+ auto lhs = GS(1.0).idx("x", 10).gen();
+ auto rhs = GS(2.0).idx("x", 10).gen();
benchmark_join("small dense vector multiply", lhs, rhs, operation::Mul::f);
}
TEST(DenseJoin, full_overlap) {
- auto lhs = make_cube(D::idx("a", 16), D::idx("b", 16), D::idx("c", 16), 1.0);
- auto rhs = make_cube(D::idx("a", 16), D::idx("b", 16), D::idx("c", 16), 2.0);
+ auto lhs = GS(1.0).idx("a", 16).idx("b", 16).idx("c", 16).gen();
+ auto rhs = GS(2.0).idx("a", 16).idx("b", 16).idx("c", 16).gen();
benchmark_join("dense full overlap multiply", lhs, rhs, operation::Mul::f);
}
TEST(DenseJoin, partial_overlap) {
- auto lhs = make_cube(D::idx("a", 8), D::idx("c", 8), D::idx("d", 8), 1.0);
- auto rhs = make_cube(D::idx("b", 8), D::idx("c", 8), D::idx("d", 8), 2.0);
+ auto lhs = GS(1.0).idx("a", 8).idx("c", 8).idx("d", 8).gen();
+ auto rhs = GS(2.0).idx("b", 8).idx("c", 8).idx("d", 8).gen();
benchmark_join("dense partial overlap multiply", lhs, rhs, operation::Mul::f);
}
TEST(DenseJoin, subset_overlap) {
- auto lhs = make_cube(D::idx("a", 16), D::idx("b", 16), D::idx("c", 16), 1.0);
- auto rhs_inner = make_matrix(D::idx("b", 16), D::idx("c", 16), 2.0);
- auto rhs_outer = make_matrix(D::idx("a", 16), D::idx("b", 16), 3.0);
+ auto lhs = GS(1.0).idx("a", 16).idx("b", 16).idx("c", 16).gen();
+ auto rhs_inner = GS(2.0).idx("b", 16).idx("c", 16).gen();
+ auto rhs_outer = GS(3.0).idx("a", 16).idx("b", 16).gen();
benchmark_join("dense subset overlap inner multiply", lhs, rhs_inner, operation::Mul::f);
benchmark_join("dense subset overlap outer multiply", lhs, rhs_outer, operation::Mul::f);
}
TEST(DenseJoin, no_overlap) {
- auto lhs = make_cube(D::idx("a", 4), D::idx("e", 4), D::idx("f", 4), 1.0);
- auto rhs = make_cube(D::idx("b", 4), D::idx("c", 4), D::idx("d", 4), 2.0);
+ auto lhs = GS(1.0).idx("a", 4).idx("e", 4).idx("f", 4).gen();
+ auto rhs = GS(2.0).idx("b", 4).idx("c", 4).idx("d", 4).gen();
benchmark_join("dense no overlap multiply", lhs, rhs, operation::Mul::f);
}
TEST(DenseJoin, simple_expand) {
- auto lhs = make_cube(D::idx("a", 5), D::idx("b", 4), D::idx("c", 4), 1.0);
- auto rhs = make_cube(D::idx("d", 4), D::idx("e", 4), D::idx("f", 5), 2.0);
+ auto lhs = GS(1.0).idx("a", 5).idx("b", 4).idx("c", 4).gen();
+ auto rhs = GS(2.0).idx("d", 4).idx("e", 4).idx("f", 5).gen();
benchmark_join("dense simple expand multiply", lhs, rhs, operation::Mul::f);
}
TEST(DenseJoin, multiply_by_number) {
- auto lhs = make_spec(3.0);
- auto rhs = make_cube(D::idx("a", 16), D::idx("b", 16), D::idx("c", 16), 2.0);
+ auto lhs = GS(3.0).gen();
+ auto rhs = GS(2.0).idx("a", 16).idx("b", 16).idx("c", 16).gen();
benchmark_join("dense cube multiply by number", lhs, rhs, operation::Mul::f);
}
//-----------------------------------------------------------------------------
TEST(SparseJoin, small_vectors) {
- auto lhs = make_vector(D::map("x", 10, 1), 1.0);
- auto rhs = make_vector(D::map("x", 10, 2), 2.0);
+ auto lhs = GS(1.0).map("x", 10, 1).gen();
+ auto rhs = GS(2.0).map("x", 10, 2).gen();
benchmark_join("small sparse vector multiply", lhs, rhs, operation::Mul::f);
}
TEST(SparseJoin, large_vectors) {
- auto lhs = make_vector(D::map("x", 1800, 1), 1.0);
- auto rhs = make_vector(D::map("x", 1000, 2), 2.0);
+ auto lhs = GS(1.0).map("x", 1800, 1).gen();
+ auto rhs = GS(2.0).map("x", 1000, 2).gen();
benchmark_join("large sparse vector multiply", lhs, rhs, operation::Mul::f);
}
TEST(SparseJoin, full_overlap) {
- auto lhs = make_cube(D::map("a", 16, 1), D::map("b", 16, 1), D::map("c", 16, 1), 1.0);
- auto rhs = make_cube(D::map("a", 16, 2), D::map("b", 16, 2), D::map("c", 16, 2), 2.0);
+ auto lhs = GS(1.0).map("a", 16, 1).map("b", 16, 1).map("c", 16, 1).gen();
+ auto rhs = GS(2.0).map("a", 16, 2).map("b", 16, 2).map("c", 16, 2).gen();
benchmark_join("sparse full overlap multiply", lhs, rhs, operation::Mul::f);
}
TEST(SparseJoin, full_overlap_big_vs_small) {
- auto lhs = make_cube(D::map("a", 16, 1), D::map("b", 16, 1), D::map("c", 16, 1), 1.0);
- auto rhs = make_cube(D::map("a", 2, 1), D::map("b", 2, 1), D::map("c", 2, 1), 2.0);
+ auto lhs = GS(1.0).map("a", 16, 1).map("b", 16, 1).map("c", 16, 1).gen();
+ auto rhs = GS(2.0).map("a", 2, 1).map("b", 2, 1).map("c", 2, 1).gen();
benchmark_join("sparse full overlap big vs small multiply", lhs, rhs, operation::Mul::f);
}
TEST(SparseJoin, partial_overlap) {
- auto lhs = make_cube(D::map("a", 8, 1), D::map("c", 8, 1), D::map("d", 8, 1), 1.0);
- auto rhs = make_cube(D::map("b", 8, 2), D::map("c", 8, 2), D::map("d", 8, 2), 2.0);
+ auto lhs = GS(1.0).map("a", 8, 1).map("c", 8, 1).map("d", 8, 1).gen();
+ auto rhs = GS(2.0).map("b", 8, 2).map("c", 8, 2).map("d", 8, 2).gen();
benchmark_join("sparse partial overlap multiply", lhs, rhs, operation::Mul::f);
}
TEST(SparseJoin, no_overlap) {
- auto lhs = make_cube(D::map("a", 4, 1), D::map("e", 4, 1), D::map("f", 4, 1), 1.0);
- auto rhs = make_cube(D::map("b", 4, 1), D::map("c", 4, 1), D::map("d", 4, 1), 2.0);
+ auto lhs = GS(1.0).map("a", 4, 1).map("e", 4, 1).map("f", 4, 1).gen();
+ auto rhs = GS(2.0).map("b", 4, 1).map("c", 4, 1).map("d", 4, 1).gen();
benchmark_join("sparse no overlap multiply", lhs, rhs, operation::Mul::f);
}
TEST(SparseJoin, multiply_by_number) {
- auto lhs = make_spec(3.0);
- auto rhs = make_cube(D::map("a", 16, 2), D::map("b", 16, 2), D::map("c", 16, 2), 2.0);
+ auto lhs = GS(3.0).gen();
+ auto rhs = GS(2.0).map("a", 16, 2).map("b", 16, 2).map("c", 16, 2).gen();
benchmark_join("sparse multiply by number", lhs, rhs, operation::Mul::f);
}
//-----------------------------------------------------------------------------
TEST(MixedJoin, full_overlap) {
- auto lhs = make_cube(D::map("a", 16, 1), D::map("b", 16, 1), D::idx("c", 16), 1.0);
- auto rhs = make_cube(D::map("a", 16, 2), D::map("b", 16, 2), D::idx("c", 16), 2.0);
+ auto lhs = GS(1.0).map("a", 16, 1).map("b", 16, 1).idx("c", 16).gen();
+ auto rhs = GS(2.0).map("a", 16, 2).map("b", 16, 2).idx("c", 16).gen();
benchmark_join("mixed full overlap multiply", lhs, rhs, operation::Mul::f);
}
TEST(MixedJoin, partial_sparse_overlap) {
- auto lhs = make_cube(D::map("a", 8, 1), D::map("c", 8, 1), D::idx("d", 8), 1.0);
- auto rhs = make_cube(D::map("b", 8, 2), D::map("c", 8, 2), D::idx("d", 8), 2.0);
+ auto lhs = GS(1.0).map("a", 8, 1).map("c", 8, 1).idx("d", 8).gen();
+ auto rhs = GS(2.0).map("b", 8, 2).map("c", 8, 2).idx("d", 8).gen();
benchmark_join("mixed partial sparse overlap multiply", lhs, rhs, operation::Mul::f);
}
TEST(MixedJoin, no_overlap) {
- auto lhs = make_cube(D::map("a", 4, 1), D::map("e", 4, 1), D::idx("f", 4), 1.0);
- auto rhs = make_cube(D::map("b", 4, 1), D::map("c", 4, 1), D::idx("d", 4), 2.0);
+ auto lhs = GS(1.0).map("a", 4, 1).map("e", 4, 1).idx("f", 4).gen();
+ auto rhs = GS(2.0).map("b", 4, 1).map("c", 4, 1).idx("d", 4).gen();
benchmark_join("mixed no overlap multiply", lhs, rhs, operation::Mul::f);
}
TEST(MixedJoin, multiply_by_number) {
- auto lhs = make_spec(3.0);
- auto rhs = make_cube(D::map("a", 16, 2), D::map("b", 16, 2), D::idx("c", 16), 2.0);
+ auto lhs = GS(3.0).gen();
+ auto rhs = GS(2.0).map("a", 16, 2).map("b", 16, 2).idx("c", 16).gen();
benchmark_join("mixed multiply by number", lhs, rhs, operation::Mul::f);
}
//-----------------------------------------------------------------------------
TEST(ReduceBench, number_reduce) {
- auto lhs = make_spec(1.0);
+ auto lhs = GS(1.0).gen();
benchmark_reduce("number reduce", lhs, Aggr::SUM, {});
}
TEST(ReduceBench, dense_reduce) {
- auto lhs = make_cube(D::idx("a", 16), D::idx("b", 16), D::idx("c", 16), 1.0);
+ auto lhs = GS(1.0).idx("a", 16).idx("b", 16).idx("c", 16).gen();
benchmark_reduce("dense reduce inner", lhs, Aggr::SUM, {"c"});
benchmark_reduce("dense reduce middle", lhs, Aggr::SUM, {"b"});
benchmark_reduce("dense reduce outer", lhs, Aggr::SUM, {"a"});
@@ -929,7 +886,7 @@ TEST(ReduceBench, dense_reduce) {
}
TEST(ReduceBench, sparse_reduce) {
- auto lhs = make_cube(D::map("a", 16, 1), D::map("b", 16, 1), D::map("c", 16, 1), 1.0);
+ auto lhs = GS(1.0).map("a", 16, 1).map("b", 16, 1).map("c", 16, 1).gen();
benchmark_reduce("sparse reduce inner", lhs, Aggr::SUM, {"c"});
benchmark_reduce("sparse reduce middle", lhs, Aggr::SUM, {"b"});
benchmark_reduce("sparse reduce outer", lhs, Aggr::SUM, {"a"});
@@ -940,8 +897,8 @@ TEST(ReduceBench, sparse_reduce) {
}
TEST(ReduceBench, mixed_reduce) {
- auto lhs = make_spec(1.0, D::map("a", 4, 1), D::map("b", 4, 1), D::map("c", 4, 1),
- D::idx("d", 4), D::idx("e", 4), D::idx("f", 4));
+ auto lhs = GS(1.0).map("a", 4, 1).map("b", 4, 1).map("c", 4, 1)
+ .idx("d", 4).idx("e", 4).idx("f", 4).gen();
benchmark_reduce("mixed reduce middle dense", lhs, Aggr::SUM, {"e"});
benchmark_reduce("mixed reduce middle sparse", lhs, Aggr::SUM, {"b"});
benchmark_reduce("mixed reduce middle sparse/dense", lhs, Aggr::SUM, {"b", "e"});
@@ -953,87 +910,87 @@ TEST(ReduceBench, mixed_reduce) {
//-----------------------------------------------------------------------------
TEST(RenameBench, dense_rename) {
- auto lhs = make_matrix(D::idx("a", 64), D::idx("b", 64), 1.0);
+ auto lhs = GS(1.0).idx("a", 64).idx("b", 64).gen();
benchmark_rename("dense transpose", lhs, {"a", "b"}, {"b", "a"});
}
TEST(RenameBench, sparse_rename) {
- auto lhs = make_matrix(D::map("a", 64, 1), D::map("b", 64, 1), 1.0);
+ auto lhs = GS(1.0).map("a", 64, 1).map("b", 64, 1).gen();
benchmark_rename("sparse transpose", lhs, {"a", "b"}, {"b", "a"});
}
TEST(RenameBench, mixed_rename) {
- auto lhs = make_spec(1.0, D::map("a", 8, 1), D::map("b", 8, 1), D::idx("c", 8), D::idx("d", 8));
+ auto lhs = GS(1.0).map("a", 8, 1).map("b", 8, 1).idx("c", 8).idx("d", 8).gen();
benchmark_rename("mixed multi-transpose", lhs, {"a", "b", "c", "d"}, {"b", "a", "d", "c"});
}
//-----------------------------------------------------------------------------
TEST(MergeBench, dense_merge) {
- auto lhs = make_matrix(D::idx("a", 64), D::idx("b", 64), 1.0);
- auto rhs = make_matrix(D::idx("a", 64), D::idx("b", 64), 2.0);
+ auto lhs = GS(1.0).idx("a", 64).idx("b", 64).gen();
+ auto rhs = GS(2.0).idx("a", 64).idx("b", 64).gen();
benchmark_merge("dense merge", lhs, rhs, operation::Max::f);
}
TEST(MergeBench, sparse_merge_big_small) {
- auto lhs = make_matrix(D::map("a", 64, 1), D::map("b", 64, 1), 1.0);
- auto rhs = make_matrix(D::map("a", 8, 1), D::map("b", 8, 1), 2.0);
+ auto lhs = GS(1.0).map("a", 64, 1).map("b", 64, 1).gen();
+ auto rhs = GS(2.0).map("a", 8, 1).map("b", 8, 1).gen();
benchmark_merge("sparse merge big vs small", lhs, rhs, operation::Max::f);
}
TEST(MergeBench, sparse_merge_minimal_overlap) {
- auto lhs = make_matrix(D::map("a", 64, 11), D::map("b", 32, 11), 1.0);
- auto rhs = make_matrix(D::map("a", 32, 13), D::map("b", 64, 13), 2.0);
+ auto lhs = GS(1.0).map("a", 64, 11).map("b", 32, 11).gen();
+ auto rhs = GS(2.0).map("a", 32, 13).map("b", 64, 13).gen();
benchmark_merge("sparse merge minimal overlap", lhs, rhs, operation::Max::f);
}
TEST(MergeBench, mixed_merge) {
- auto lhs = make_matrix(D::map("a", 64, 1), D::idx("b", 64), 1.0);
- auto rhs = make_matrix(D::map("a", 64, 2), D::idx("b", 64), 2.0);
+ auto lhs = GS(1.0).map("a", 64, 1).idx("b", 64).gen();
+ auto rhs = GS(2.0).map("a", 64, 2).idx("b", 64).gen();
benchmark_merge("mixed merge", lhs, rhs, operation::Max::f);
}
//-----------------------------------------------------------------------------
TEST(MapBench, number_map) {
- auto lhs = make_spec(1.75);
+ auto lhs = GS(1.75).gen();
benchmark_map("number map", lhs, operation::Floor::f);
}
TEST(MapBench, dense_map) {
- auto lhs = make_matrix(D::idx("a", 64), D::idx("b", 64), 1.75);
+ auto lhs = GS(1.75).idx("a", 64).idx("b", 64).gen();
benchmark_map("dense map", lhs, operation::Floor::f);
}
TEST(MapBench, sparse_map_small) {
- auto lhs = make_matrix(D::map("a", 4, 1), D::map("b", 4, 1), 1.75);
+ auto lhs = GS(1.75).map("a", 4, 1).map("b", 4, 1).gen();
benchmark_map("sparse map small", lhs, operation::Floor::f);
}
TEST(MapBench, sparse_map_big) {
- auto lhs = make_matrix(D::map("a", 64, 1), D::map("b", 64, 1), 1.75);
+ auto lhs = GS(1.75).map("a", 64, 1).map("b", 64, 1).gen();
benchmark_map("sparse map big", lhs, operation::Floor::f);
}
TEST(MapBench, mixed_map) {
- auto lhs = make_matrix(D::map("a", 64, 1), D::idx("b", 64), 1.75);
+ auto lhs = GS(1.75).map("a", 64, 1).idx("b", 64).gen();
benchmark_map("mixed map", lhs, operation::Floor::f);
}
//-----------------------------------------------------------------------------
TEST(TensorCreateBench, create_dense) {
- auto proto = make_matrix(D::idx("a", 32), D::idx("b", 32), 1.0);
+ auto proto = GS(1.0).idx("a", 32).idx("b", 32).gen();
benchmark_tensor_create("dense tensor create", proto);
}
TEST(TensorCreateBench, create_sparse) {
- auto proto = make_matrix(D::map("a", 32, 1), D::map("b", 32, 1), 1.0);
+ auto proto = GS(1.0).map("a", 32, 1).map("b", 32, 1).gen();
benchmark_tensor_create("sparse tensor create", proto);
}
TEST(TensorCreateBench, create_mixed) {
- auto proto = make_matrix(D::map("a", 32, 1), D::idx("b", 32), 1.0);
+ auto proto = GS(1.0).map("a", 32, 1).idx("b", 32).gen();
benchmark_tensor_create("mixed tensor create", proto);
}
@@ -1041,7 +998,7 @@ TEST(TensorCreateBench, create_mixed) {
TEST(TensorLambdaBench, simple_lambda) {
auto type = ValueType::from_spec("tensor<float>(a[64],b[64])");
- auto p0 = make_spec(3.5);
+ auto p0 = GS(3.5).gen();
auto function = Function::parse({"a", "b", "p0"}, "(a*64+b)*p0");
ASSERT_FALSE(function->has_error());
benchmark_tensor_lambda("simple tensor lambda", type, p0, *function);
@@ -1049,7 +1006,7 @@ TEST(TensorLambdaBench, simple_lambda) {
TEST(TensorLambdaBench, complex_lambda) {
auto type = ValueType::from_spec("tensor<float>(a[64],b[64])");
- auto p0 = make_vector(D::idx("x", 3), 1.0);
+ auto p0 = GS(1.0).idx("x", 3).gen();
auto function = Function::parse({"a", "b", "p0"}, "(a*64+b)*reduce(p0,sum)");
ASSERT_FALSE(function->has_error());
benchmark_tensor_lambda("complex tensor lambda", type, p0, *function);
@@ -1058,7 +1015,7 @@ TEST(TensorLambdaBench, complex_lambda) {
//-----------------------------------------------------------------------------
TEST(TensorPeekBench, dense_peek) {
- auto lhs = make_matrix(D::idx("a", 64), D::idx("b", 64), 1.0);
+ auto lhs = GS(1.0).idx("a", 64).idx("b", 64).gen();
benchmark_tensor_peek("dense peek cell verbatim", lhs, verbatim_peek().add("a", 1).add("b", 2));
benchmark_tensor_peek("dense peek cell dynamic", lhs, dynamic_peek().add("a", 1).add("b", 2));
benchmark_tensor_peek("dense peek vector verbatim", lhs, verbatim_peek().add("a", 1));
@@ -1066,7 +1023,7 @@ TEST(TensorPeekBench, dense_peek) {
}
TEST(TensorPeekBench, sparse_peek) {
- auto lhs = make_matrix(D::map("a", 64, 1), D::map("b", 64, 1), 1.0);
+ auto lhs = GS(1.0).map("a", 64, 1).map("b", 64, 1).gen();
benchmark_tensor_peek("sparse peek cell verbatim", lhs, verbatim_peek().add("a", 1).add("b", 2));
benchmark_tensor_peek("sparse peek cell dynamic", lhs, dynamic_peek().add("a", 1).add("b", 2));
benchmark_tensor_peek("sparse peek vector verbatim", lhs, verbatim_peek().add("a", 1));
@@ -1074,7 +1031,7 @@ TEST(TensorPeekBench, sparse_peek) {
}
TEST(TensorPeekBench, mixed_peek) {
- auto lhs = make_spec(1.0, D::map("a", 8, 1), D::map("b", 8, 1), D::idx("c", 8), D::idx("d", 8));
+ auto lhs = GS(1.0).map("a", 8, 1).map("b", 8, 1).idx("c", 8).idx("d", 8).gen();
benchmark_tensor_peek("mixed peek cell verbatim", lhs, verbatim_peek().add("a", 1).add("b", 2).add("c", 3).add("d", 4));
benchmark_tensor_peek("mixed peek cell dynamic", lhs, dynamic_peek().add("a", 1).add("b", 2).add("c", 3).add("d", 4));
benchmark_tensor_peek("mixed peek dense verbatim", lhs, verbatim_peek().add("a", 1).add("b", 2));
diff --git a/eval/src/vespa/eval/eval/optimize_tensor_function.cpp b/eval/src/vespa/eval/eval/optimize_tensor_function.cpp
index 2e8c89f88fc..25612b8d5fd 100644
--- a/eval/src/vespa/eval/eval/optimize_tensor_function.cpp
+++ b/eval/src/vespa/eval/eval/optimize_tensor_function.cpp
@@ -5,6 +5,7 @@
#include "simple_value.h"
#include <vespa/eval/instruction/dense_dot_product_function.h>
+#include <vespa/eval/instruction/sparse_dot_product_function.h>
#include <vespa/eval/instruction/mixed_inner_product_function.h>
#include <vespa/eval/instruction/sum_max_dot_product_function.h>
#include <vespa/eval/instruction/dense_xw_product_function.h>
@@ -31,11 +32,7 @@ namespace vespalib::eval {
namespace {
-const TensorFunction &optimize_for_factory(const ValueBuilderFactory &factory, const TensorFunction &expr, Stash &stash) {
- if (&factory == &SimpleValueBuilderFactory::get()) {
- // never optimize simple value evaluation
- return expr;
- }
+const TensorFunction &optimize_for_factory(const ValueBuilderFactory &, const TensorFunction &expr, Stash &stash) {
using Child = TensorFunction::Child;
Child root(expr);
{
@@ -47,6 +44,7 @@ const TensorFunction &optimize_for_factory(const ValueBuilderFactory &factory, c
const Child &child = nodes.back().get();
child.set(SumMaxDotProductFunction::optimize(child.get(), stash));
child.set(DenseDotProductFunction::optimize(child.get(), stash));
+ child.set(SparseDotProductFunction::optimize(child.get(), stash));
child.set(DenseXWProductFunction::optimize(child.get(), stash));
child.set(DenseMatMulFunction::optimize(child.get(), stash));
child.set(DenseMultiMatMulFunction::optimize(child.get(), stash));
diff --git a/eval/src/vespa/eval/eval/test/CMakeLists.txt b/eval/src/vespa/eval/eval/test/CMakeLists.txt
index 2e9b50da5e6..e82b85d1890 100644
--- a/eval/src/vespa/eval/eval/test/CMakeLists.txt
+++ b/eval/src/vespa/eval/eval/test/CMakeLists.txt
@@ -3,6 +3,7 @@ vespa_add_library(eval_eval_test OBJECT
SOURCES
eval_fixture.cpp
eval_spec.cpp
+ gen_spec.cpp
reference_evaluation.cpp
reference_operations.cpp
tensor_conformance.cpp
diff --git a/eval/src/vespa/eval/eval/test/eval_fixture.cpp b/eval/src/vespa/eval/eval/test/eval_fixture.cpp
index 58d8905baf3..966954b9026 100644
--- a/eval/src/vespa/eval/eval/test/eval_fixture.cpp
+++ b/eval/src/vespa/eval/eval/test/eval_fixture.cpp
@@ -28,7 +28,10 @@ NodeTypes get_types(const Function &function, const ParamRepo &param_repo) {
std::vector<ValueType> param_types;
for (size_t i = 0; i < function.num_params(); ++i) {
auto pos = param_repo.map.find(function.param_name(i));
- ASSERT_TRUE(pos != param_repo.map.end());
+ if (pos == param_repo.map.end()) {
+ TEST_STATE(fmt("param name: '%s'", function.param_name(i).data()).c_str());
+ ASSERT_TRUE(pos != param_repo.map.end());
+ }
param_types.push_back(ValueType::from_spec(pos->second.value.type()));
ASSERT_TRUE(!param_types.back().is_error());
}
@@ -181,6 +184,23 @@ EvalFixture::ParamRepo::add_dense(const std::vector<std::pair<vespalib::string,
return *this;
}
+// produce 4 variants: float/double * mutable/const
+EvalFixture::ParamRepo &
+EvalFixture::ParamRepo::add_variants(const vespalib::string &name_base,
+ const GenSpec &spec)
+{
+ auto name_f = name_base + "_f";
+ auto name_m = "@" + name_base;
+ auto name_m_f = "@" + name_base + "_f";
+ auto dbl_ts = spec.cpy().cells_double().gen();
+ auto flt_ts = spec.cpy().cells_float().gen();
+ add(name_base, dbl_ts);
+ add(name_f, flt_ts);
+ add_mutable(name_m, dbl_ts);
+ add_mutable(name_m_f, flt_ts);
+ return *this;
+}
+
void
EvalFixture::detect_param_tampering(const ParamRepo &param_repo, bool allow_mutable) const
{
diff --git a/eval/src/vespa/eval/eval/test/eval_fixture.h b/eval/src/vespa/eval/eval/test/eval_fixture.h
index dc49cf7e4dc..44adaca3298 100644
--- a/eval/src/vespa/eval/eval/test/eval_fixture.h
+++ b/eval/src/vespa/eval/eval/test/eval_fixture.h
@@ -10,6 +10,7 @@
#include <vespa/vespalib/util/stash.h>
#include <set>
#include <functional>
+#include "gen_spec.h"
namespace vespalib::eval::test {
@@ -40,6 +41,10 @@ public:
ParamRepo &add_matrix(const char *d1, size_t s1, const char *d2, size_t s2, gen_fun_t gen = gen_N);
ParamRepo &add_cube(const char *d1, size_t s1, const char *d2, size_t s2, const char *d3, size_t s3, gen_fun_t gen = gen_N);
ParamRepo &add_dense(const std::vector<std::pair<vespalib::string, size_t> > &dims, gen_fun_t gen = gen_N);
+
+ // produce 4 variants: float/double * mutable/const
+ ParamRepo &add_variants(const vespalib::string &name_base,
+ const GenSpec &spec);
~ParamRepo() {}
};
diff --git a/eval/src/vespa/eval/eval/test/gen_spec.cpp b/eval/src/vespa/eval/eval/test/gen_spec.cpp
new file mode 100644
index 00000000000..c20e9005318
--- /dev/null
+++ b/eval/src/vespa/eval/eval/test/gen_spec.cpp
@@ -0,0 +1,63 @@
+// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+#include "gen_spec.h"
+#include <vespa/eval/eval/string_stuff.h>
+#include <vespa/vespalib/util/stringfmt.h>
+
+using vespalib::make_string_short::fmt;
+
+namespace vespalib::eval::test {
+
+DimSpec::~DimSpec() = default;
+
+std::vector<vespalib::string>
+DimSpec::make_dict(size_t size, size_t stride, const vespalib::string &prefix)
+{
+ std::vector<vespalib::string> dict;
+ for (size_t i = 0; i < size; ++i) {
+ dict.push_back(fmt("%s%zu", prefix.c_str(), i * stride));
+ }
+ return dict;
+}
+
+GenSpec::GenSpec(GenSpec &&other) = default;
+GenSpec::GenSpec(const GenSpec &other) = default;
+GenSpec &GenSpec::operator=(GenSpec &&other) = default;
+GenSpec &GenSpec::operator=(const GenSpec &other) = default;
+
+GenSpec::~GenSpec() = default;
+
+ValueType
+GenSpec::type() const
+{
+ std::vector<ValueType::Dimension> dim_types;
+ for (const auto &dim: _dims) {
+ dim_types.push_back(dim.type());
+ }
+ auto type = ValueType::tensor_type(dim_types, _cells);
+ assert(!type.is_error());
+ return type;
+}
+
+TensorSpec
+GenSpec::gen() const
+{
+ size_t idx = 0;
+ TensorSpec::Address addr;
+ TensorSpec result(type().to_spec());
+ std::function<void(size_t)> add_cells = [&](size_t dim_idx) {
+ if (dim_idx == _dims.size()) {
+ result.add(addr, _seq(idx++));
+ } else {
+ const auto &dim = _dims[dim_idx];
+ for (size_t i = 0; i < dim.size(); ++i) {
+ addr.insert_or_assign(dim.name(), dim.label(i));
+ add_cells(dim_idx + 1);
+ }
+ }
+ };
+ add_cells(0);
+ return result;
+}
+
+} // namespace
diff --git a/eval/src/vespa/eval/eval/test/gen_spec.h b/eval/src/vespa/eval/eval/test/gen_spec.h
new file mode 100644
index 00000000000..36bbd554125
--- /dev/null
+++ b/eval/src/vespa/eval/eval/test/gen_spec.h
@@ -0,0 +1,109 @@
+// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+#pragma once
+
+#include <vespa/eval/eval/tensor_spec.h>
+#include <vespa/eval/eval/value_type.h>
+#include <functional>
+#include <cassert>
+
+namespace vespalib::eval::test {
+
+/**
+ * Type and labels for a single dimension of a TensorSpec to be
+ * generated. Dimensions are specified independent of each other for
+ * simplicity. All dense subspaces will be padded during conversion to
+ * actual values, which means that indexed dimensions are inherently
+ * independent already. Using different labels for the same mapped
+ * dimension for different tensors should enable us to exhibit
+ * sufficient levels of partial overlap.
+ **/
+class DimSpec
+{
+private:
+ vespalib::string _name;
+ size_t _size;
+ std::vector<vespalib::string> _dict;
+public:
+ DimSpec(const vespalib::string &name, size_t size) noexcept
+ : _name(name), _size(size), _dict()
+ {
+ assert(_size);
+ }
+ DimSpec(const vespalib::string &name, std::vector<vespalib::string> dict) noexcept
+ : _name(name), _size(), _dict(std::move(dict))
+ {
+ assert(!_size);
+ }
+ ~DimSpec();
+ static std::vector<vespalib::string> make_dict(size_t size, size_t stride, const vespalib::string &prefix);
+ ValueType::Dimension type() const {
+ return _size ? ValueType::Dimension{_name, uint32_t(_size)} : ValueType::Dimension{_name};
+ }
+ const vespalib::string &name() const { return _name; }
+ size_t size() const {
+ return _size ? _size : _dict.size();
+ }
+ TensorSpec::Label label(size_t idx) const {
+ assert(idx < size());
+ return _size ? TensorSpec::Label{idx} : TensorSpec::Label{_dict[idx]};
+ }
+};
+
+/**
+ * Specification defining how to generate a TensorSpec. Typically used
+ * to generate complex values for testing and benchmarking.
+ **/
+class GenSpec
+{
+public:
+ using seq_t = std::function<double(size_t)>;
+private:
+ std::vector<DimSpec> _dims;
+ CellType _cells;
+ seq_t _seq;
+
+ static double default_seq(size_t idx) { return (idx + 1.0); }
+public:
+ GenSpec() : _dims(), _cells(CellType::DOUBLE), _seq(default_seq) {}
+ GenSpec(GenSpec &&other);
+ GenSpec(const GenSpec &other);
+ GenSpec &operator=(GenSpec &&other);
+ GenSpec &operator=(const GenSpec &other);
+ ~GenSpec();
+ std::vector<DimSpec> dims() const { return _dims; }
+ CellType cells() const { return _cells; }
+ seq_t seq() const { return _seq; }
+ GenSpec cpy() const { return *this; }
+ GenSpec &idx(const vespalib::string &name, size_t size) {
+ _dims.emplace_back(name, size);
+ return *this;
+ }
+ GenSpec &map(const vespalib::string &name, size_t size, size_t stride = 1, const vespalib::string &prefix = "") {
+ _dims.emplace_back(name, DimSpec::make_dict(size, stride, prefix));
+ return *this;
+ }
+ GenSpec &map(const vespalib::string &name, std::vector<vespalib::string> dict) {
+ _dims.emplace_back(name, std::move(dict));
+ return *this;
+ }
+ GenSpec &cells(CellType cell_type) {
+ _cells = cell_type;
+ return *this;
+ }
+ GenSpec &cells_double() { return cells(CellType::DOUBLE); }
+ GenSpec &cells_float() { return cells(CellType::FLOAT); }
+ GenSpec &seq(seq_t seq_in) {
+ _seq = seq_in;
+ return *this;
+ }
+ GenSpec &seq_n() { return seq(default_seq); }
+ GenSpec &seq_bias(double bias) {
+ seq_t fun = [bias](size_t idx) noexcept { return (idx + bias); };
+ return seq(fun);
+ }
+ ValueType type() const;
+ TensorSpec gen() const;
+};
+
+} // namespace
diff --git a/eval/src/vespa/eval/eval/test/param_variants.h b/eval/src/vespa/eval/eval/test/param_variants.h
deleted file mode 100644
index 41a43ebca08..00000000000
--- a/eval/src/vespa/eval/eval/test/param_variants.h
+++ /dev/null
@@ -1,23 +0,0 @@
-#include "eval_fixture.h"
-#include "tensor_model.hpp"
-
-namespace vespalib::eval::test {
-
-// for testing of optimizers / tensor functions
-// we produce the same param three times:
-// as-is, with float cells, and tagged as mutable.
-void add_variants(EvalFixture::ParamRepo &repo,
- const vespalib::string &name_base,
- const Layout &base_layout,
- const Sequence &seq)
-{
- auto name_f = name_base + "_f";
- auto name_m = "@" + name_base;
- auto name_m_f = "@" + name_base + "_f";
- repo.add(name_base, spec(base_layout, seq));
- repo.add(name_f, spec(float_cells(base_layout), seq));
- repo.add_mutable(name_m, spec(base_layout, seq));
- repo.add_mutable(name_m_f, spec(float_cells(base_layout), seq));
-}
-
-} // namespace
diff --git a/eval/src/vespa/eval/instruction/CMakeLists.txt b/eval/src/vespa/eval/instruction/CMakeLists.txt
index 58d5290f5d9..cac69d23640 100644
--- a/eval/src/vespa/eval/instruction/CMakeLists.txt
+++ b/eval/src/vespa/eval/instruction/CMakeLists.txt
@@ -32,6 +32,7 @@ vespa_add_library(eval_instruction OBJECT
pow_as_map_optimizer.cpp
remove_trivial_dimension_optimizer.cpp
replace_type_function.cpp
+ sparse_dot_product_function.cpp
sum_max_dot_product_function.cpp
vector_from_doubles_function.cpp
)
diff --git a/eval/src/vespa/eval/instruction/generic_join.cpp b/eval/src/vespa/eval/instruction/generic_join.cpp
index abe29b8228c..6d6f86b7c4d 100644
--- a/eval/src/vespa/eval/instruction/generic_join.cpp
+++ b/eval/src/vespa/eval/instruction/generic_join.cpp
@@ -308,6 +308,17 @@ SparseJoinPlan::SparseJoinPlan(const ValueType &lhs_type, const ValueType &rhs_t
[](const auto &a, const auto &b){ return (a.name < b.name); });
}
+SparseJoinPlan::SparseJoinPlan(size_t num_mapped_dims)
+ : sources(num_mapped_dims, Source::BOTH), lhs_overlap(), rhs_overlap()
+{
+ lhs_overlap.reserve(num_mapped_dims);
+ rhs_overlap.reserve(num_mapped_dims);
+ for (size_t i = 0; i < num_mapped_dims; ++i) {
+ lhs_overlap.push_back(i);
+ rhs_overlap.push_back(i);
+ }
+}
+
bool
SparseJoinPlan::should_forward_lhs_index() const
{
diff --git a/eval/src/vespa/eval/instruction/generic_join.h b/eval/src/vespa/eval/instruction/generic_join.h
index 1fcfcf416cc..026a2938971 100644
--- a/eval/src/vespa/eval/instruction/generic_join.h
+++ b/eval/src/vespa/eval/instruction/generic_join.h
@@ -58,6 +58,7 @@ struct SparseJoinPlan {
bool should_forward_lhs_index() const;
bool should_forward_rhs_index() const;
SparseJoinPlan(const ValueType &lhs_type, const ValueType &rhs_type);
+ SparseJoinPlan(size_t num_mapped_dims); // full overlap plan
~SparseJoinPlan();
};
@@ -70,15 +71,14 @@ struct SparseJoinState {
const Value::Index &first_index;
const Value::Index &second_index;
const std::vector<size_t> &second_view_dims;
- std::vector<string_id> full_address;
- std::vector<string_id*> first_address;
- std::vector<const string_id*> address_overlap;
- std::vector<string_id*> second_only_address;
+ std::vector<string_id> full_address;
+ std::vector<string_id*> first_address;
+ std::vector<const string_id*> address_overlap;
+ std::vector<string_id*> second_only_address;
size_t lhs_subspace;
size_t rhs_subspace;
size_t &first_subspace;
size_t &second_subspace;
-
SparseJoinState(const SparseJoinPlan &plan, const Value::Index &lhs, const Value::Index &rhs);
~SparseJoinState();
};
diff --git a/eval/src/vespa/eval/instruction/sparse_dot_product_function.cpp b/eval/src/vespa/eval/instruction/sparse_dot_product_function.cpp
new file mode 100644
index 00000000000..93ae2856372
--- /dev/null
+++ b/eval/src/vespa/eval/instruction/sparse_dot_product_function.cpp
@@ -0,0 +1,111 @@
+// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+#include "sparse_dot_product_function.h"
+#include "generic_join.h"
+#include "detect_type.h"
+#include <vespa/eval/eval/fast_value.hpp>
+
+namespace vespalib::eval {
+
+using namespace tensor_function;
+using namespace operation;
+using namespace instruction;
+
+namespace {
+
+template <typename SCT, typename BCT>
+double my_fast_sparse_dot_product(const FastValueIndex &small_idx, const FastValueIndex &big_idx,
+ const SCT *small_cells, const BCT *big_cells)
+{
+ double result = 0.0;
+ small_idx.map.each_map_entry([&](auto small_subspace, auto hash) {
+ auto small_addr = small_idx.map.get_addr(small_subspace);
+ auto big_subspace = big_idx.map.lookup(small_addr, hash);
+ if (big_subspace != FastAddrMap::npos()) {
+ result += (small_cells[small_subspace] * big_cells[big_subspace]);
+ }
+ });
+ return result;
+}
+
+template <typename LCT, typename RCT>
+void my_sparse_dot_product_op(InterpretedFunction::State &state, uint64_t num_mapped_dims) {
+ const auto &lhs_idx = state.peek(1).index();
+ const auto &rhs_idx = state.peek(0).index();
+ const LCT *lhs_cells = state.peek(1).cells().typify<LCT>().cbegin();
+ const RCT *rhs_cells = state.peek(0).cells().typify<RCT>().cbegin();
+ if (auto indexes = detect_type<FastValueIndex>(lhs_idx, rhs_idx)) {
+#if __has_cpp_attribute(likely)
+ [[likely]];
+#endif
+ const auto &lhs_fast = indexes.get<0>();
+ const auto &rhs_fast = indexes.get<1>();
+ double result = (rhs_fast.map.size() < lhs_fast.map.size())
+ ? my_fast_sparse_dot_product(rhs_fast, lhs_fast, rhs_cells, lhs_cells)
+ : my_fast_sparse_dot_product(lhs_fast, rhs_fast, lhs_cells, rhs_cells);
+ state.pop_pop_push(state.stash.create<ScalarValue<double>>(result));
+ } else {
+#if __has_cpp_attribute(unlikely)
+ [[unlikely]];
+#endif
+ double result = 0.0;
+ SparseJoinPlan plan(num_mapped_dims);
+ SparseJoinState sparse(plan, lhs_idx, rhs_idx);
+ auto outer = sparse.first_index.create_view({});
+ auto inner = sparse.second_index.create_view(sparse.second_view_dims);
+ outer->lookup({});
+ while (outer->next_result(sparse.first_address, sparse.first_subspace)) {
+ inner->lookup(sparse.address_overlap);
+ if (inner->next_result(sparse.second_only_address, sparse.second_subspace)) {
+ result += (lhs_cells[sparse.lhs_subspace] * rhs_cells[sparse.rhs_subspace]);
+ }
+ }
+ state.pop_pop_push(state.stash.create<ScalarValue<double>>(result));
+ }
+}
+
+struct MyGetFun {
+ template <typename LCT, typename RCT>
+ static auto invoke() { return my_sparse_dot_product_op<LCT,RCT>; }
+};
+
+} // namespace <unnamed>
+
+SparseDotProductFunction::SparseDotProductFunction(const TensorFunction &lhs_in,
+ const TensorFunction &rhs_in)
+ : tensor_function::Op2(ValueType::make_type(CellType::DOUBLE, {}), lhs_in, rhs_in)
+{
+}
+
+InterpretedFunction::Instruction
+SparseDotProductFunction::compile_self(const ValueBuilderFactory &, Stash &) const
+{
+ auto op = typify_invoke<2,TypifyCellType,MyGetFun>(lhs().result_type().cell_type(), rhs().result_type().cell_type());
+ return InterpretedFunction::Instruction(op, lhs().result_type().count_mapped_dimensions());
+}
+
+bool
+SparseDotProductFunction::compatible_types(const ValueType &res, const ValueType &lhs, const ValueType &rhs)
+{
+ return (res.is_scalar() && (res.cell_type() == CellType::DOUBLE) &&
+ lhs.is_sparse() && (rhs.dimensions() == lhs.dimensions()));
+}
+
+const TensorFunction &
+SparseDotProductFunction::optimize(const TensorFunction &expr, Stash &stash)
+{
+ auto reduce = as<Reduce>(expr);
+ if (reduce && (reduce->aggr() == Aggr::SUM)) {
+ auto join = as<Join>(reduce->child());
+ if (join && (join->function() == Mul::f)) {
+ const TensorFunction &lhs = join->lhs();
+ const TensorFunction &rhs = join->rhs();
+ if (compatible_types(expr.result_type(), lhs.result_type(), rhs.result_type())) {
+ return stash.create<SparseDotProductFunction>(lhs, rhs);
+ }
+ }
+ }
+ return expr;
+}
+
+} // namespace
diff --git a/eval/src/vespa/eval/instruction/sparse_dot_product_function.h b/eval/src/vespa/eval/instruction/sparse_dot_product_function.h
new file mode 100644
index 00000000000..ccc7a61f5e8
--- /dev/null
+++ b/eval/src/vespa/eval/instruction/sparse_dot_product_function.h
@@ -0,0 +1,23 @@
+// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+#pragma once
+
+#include <vespa/eval/eval/tensor_function.h>
+
+namespace vespalib::eval {
+
+/**
+ * Tensor function for a dot product between two sparse tensors.
+ */
+class SparseDotProductFunction : public tensor_function::Op2
+{
+public:
+ SparseDotProductFunction(const TensorFunction &lhs_in,
+ const TensorFunction &rhs_in);
+ InterpretedFunction::Instruction compile_self(const ValueBuilderFactory &factory, Stash &stash) const override;
+ bool result_is_mutable() const override { return true; }
+ static bool compatible_types(const ValueType &res, const ValueType &lhs, const ValueType &rhs);
+ static const TensorFunction &optimize(const TensorFunction &expr, Stash &stash);
+};
+
+} // namespace