summaryrefslogtreecommitdiffstats
path: root/eval
diff options
context:
space:
mode:
authorHÃ¥vard Pettersen <3535158+havardpe@users.noreply.github.com>2021-02-02 13:05:04 +0100
committerGitHub <noreply@github.com>2021-02-02 13:05:04 +0100
commit8e2aeae47fcf3b21d13fef8794dbac1b27728e62 (patch)
tree8260cd151706db132e3f90e53b357e630cb53d85 /eval
parentd2b40aa63fb94282177771b0949a5a83e82bfb85 (diff)
parentadd5dcb82527b280208a9526a0dd21ba3f01e271 (diff)
Merge pull request #16330 from vespa-engine/arnej/use-genspec-in-unit-tests-2
Arnej/use genspec in unit tests 2
Diffstat (limited to 'eval')
-rw-r--r--eval/src/tests/eval/fast_value/fast_value_test.cpp38
-rw-r--r--eval/src/tests/eval/reference_operations/reference_operations_test.cpp2
-rw-r--r--eval/src/tests/eval/simple_value/simple_value_test.cpp107
-rw-r--r--eval/src/tests/eval/tensor_lambda/tensor_lambda_test.cpp18
-rw-r--r--eval/src/tests/eval/value_codec/value_codec_test.cpp52
-rw-r--r--eval/src/tests/instruction/add_trivial_dimension_optimizer/add_trivial_dimension_optimizer_test.cpp12
-rw-r--r--eval/src/tests/instruction/dense_replace_type_function/dense_replace_type_function_test.cpp4
-rw-r--r--eval/src/tests/instruction/fast_rename_optimizer/fast_rename_optimizer_test.cpp16
-rw-r--r--eval/src/tests/instruction/join_with_number/join_with_number_function_test.cpp26
-rw-r--r--eval/src/tests/instruction/mixed_inner_product_function/mixed_inner_product_function_test.cpp71
-rw-r--r--eval/src/tests/instruction/mixed_map_function/mixed_map_function_test.cpp14
-rw-r--r--eval/src/tests/instruction/mixed_simple_join_function/mixed_simple_join_function_test.cpp42
-rw-r--r--eval/src/tests/instruction/pow_as_map_optimizer/pow_as_map_optimizer_test.cpp18
-rw-r--r--eval/src/tests/instruction/remove_trivial_dimension_optimizer/remove_trivial_dimension_optimizer_test.cpp10
-rw-r--r--eval/src/tests/instruction/sum_max_dot_product_function/sum_max_dot_product_function_test.cpp51
-rw-r--r--eval/src/tests/streamed/value/streamed_value_test.cpp107
-rw-r--r--eval/src/vespa/eval/eval/test/eval_fixture.cpp17
-rw-r--r--eval/src/vespa/eval/eval/test/eval_fixture.h5
-rw-r--r--eval/src/vespa/eval/eval/test/param_variants.h23
19 files changed, 318 insertions, 315 deletions
diff --git a/eval/src/tests/eval/fast_value/fast_value_test.cpp b/eval/src/tests/eval/fast_value/fast_value_test.cpp
index 279f17a1ead..6cf43511977 100644
--- a/eval/src/tests/eval/fast_value/fast_value_test.cpp
+++ b/eval/src/tests/eval/fast_value/fast_value_test.cpp
@@ -3,7 +3,7 @@
#include <vespa/eval/eval/fast_value.hpp>
#include <vespa/eval/eval/fast_value.h>
#include <vespa/eval/eval/value_codec.h>
-#include <vespa/eval/eval/test/tensor_model.hpp>
+#include <vespa/eval/eval/test/gen_spec.h>
#include <vespa/vespalib/gtest/gtest.h>
using namespace vespalib;
@@ -142,29 +142,29 @@ TEST(FastValueBuilderTest, mixed_add_subspace_robustness) {
}
}
-std::vector<Layout> layouts = {
- {},
- {x(3)},
- {x(3),y(5)},
- {x(3),y(5),z(7)},
- float_cells({x(3),y(5),z(7)}),
- {x({"a","b","c"})},
- {x({"a","b","c"}),y({"foo","bar"})},
- {x({"a","b","c"}),y({"foo","bar"}),z({"i","j","k","l"})},
- float_cells({x({"a","b","c"}),y({"foo","bar"}),z({"i","j","k","l"})}),
- {x(3),y({"foo", "bar"}),z(7)},
- {x({"a","b","c"}),y(5),z({"i","j","k","l"})},
- float_cells({x({"a","b","c"}),y(5),z({"i","j","k","l"})})
+GenSpec G() { return GenSpec().cells_float(); }
+
+std::vector<GenSpec> layouts = {
+ G(),
+ G().idx("x", 3),
+ G().idx("x", 3).idx("y", 5),
+ G().idx("x", 3).idx("y", 5).idx("z", 7),
+ G().map("x", {"a","b","c"}),
+ G().map("x", {"a","b","c"}).map("y", {"foo","bar"}),
+ G().map("x", {"a","b","c"}).map("y", {"foo","bar"}).map("z", {"i","j","k","l"}),
+ G().idx("x", 3).map("y", {"foo", "bar"}).idx("z", 7),
+ G().map("x", {"a","b","c"}).idx("y", 5).map("z", {"i","j","k","l"})
};
TEST(FastValueBuilderFactoryTest, fast_values_can_be_copied) {
auto factory = FastValueBuilderFactory::get();
for (const auto &layout: layouts) {
- TensorSpec expect = spec(layout, N());
- std::unique_ptr<Value> value = value_from_spec(expect, factory);
- std::unique_ptr<Value> copy = factory.copy(*value);
- TensorSpec actual = spec_from_value(*copy);
- EXPECT_EQ(actual, expect);
+ for (TensorSpec expect : { layout.gen(), layout.cpy().cells_double().gen() }) {
+ std::unique_ptr<Value> value = value_from_spec(expect, factory);
+ std::unique_ptr<Value> copy = factory.copy(*value);
+ TensorSpec actual = spec_from_value(*copy);
+ EXPECT_EQ(actual, expect);
+ }
}
}
diff --git a/eval/src/tests/eval/reference_operations/reference_operations_test.cpp b/eval/src/tests/eval/reference_operations/reference_operations_test.cpp
index 0495923018e..3fcca5e34d8 100644
--- a/eval/src/tests/eval/reference_operations/reference_operations_test.cpp
+++ b/eval/src/tests/eval/reference_operations/reference_operations_test.cpp
@@ -1,13 +1,11 @@
// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
#include <vespa/eval/eval/test/reference_operations.h>
-#include <vespa/eval/eval/test/tensor_model.hpp>
#include <vespa/vespalib/gtest/gtest.h>
#include <iostream>
using namespace vespalib;
using namespace vespalib::eval;
-using namespace vespalib::eval::test;
TensorSpec dense_2d_some_cells(bool square) {
return TensorSpec("tensor(a[3],d[5])")
diff --git a/eval/src/tests/eval/simple_value/simple_value_test.cpp b/eval/src/tests/eval/simple_value/simple_value_test.cpp
index 3a653b75172..e8abb646482 100644
--- a/eval/src/tests/eval/simple_value/simple_value_test.cpp
+++ b/eval/src/tests/eval/simple_value/simple_value_test.cpp
@@ -5,7 +5,7 @@
#include <vespa/eval/instruction/generic_join.h>
#include <vespa/eval/eval/interpreted_function.h>
#include <vespa/eval/eval/test/reference_operations.h>
-#include <vespa/eval/eval/test/tensor_model.hpp>
+#include <vespa/eval/eval/test/gen_spec.h>
#include <vespa/vespalib/util/stringfmt.h>
#include <vespa/vespalib/gtest/gtest.h>
@@ -23,45 +23,36 @@ using Handle = SharedStringRepo::Handle;
vespalib::string as_str(string_id label) { return Handle::string_from_id(label); }
-std::vector<Layout> layouts = {
- {},
- {x(3)},
- {x(3),y(5)},
- {x(3),y(5),z(7)},
- float_cells({x(3),y(5),z(7)}),
- {x({"a","b","c"})},
- {x({"a","b","c"}),y({"foo","bar"})},
- {x({"a","b","c"}),y({"foo","bar"}),z({"i","j","k","l"})},
- float_cells({x({"a","b","c"}),y({"foo","bar"}),z({"i","j","k","l"})}),
- {x(3),y({"foo", "bar"}),z(7)},
- {x({"a","b","c"}),y(5),z({"i","j","k","l"})},
- float_cells({x({"a","b","c"}),y(5),z({"i","j","k","l"})})
+GenSpec G() { return GenSpec().cells_float(); }
+
+std::vector<GenSpec> layouts = {
+ G(),
+ G().idx("x", 3),
+ G().idx("x", 3).idx("y", 5),
+ G().idx("x", 3).idx("y", 5).idx("z", 7),
+ G().map("x", {"a","b","c"}),
+ G().map("x", {"a","b","c"}).map("y", {"foo","bar"}),
+ G().map("x", {"a","b","c"}).map("y", {"foo","bar"}).map("z", {"i","j","k","l"}),
+ G().idx("x", 3).map("y", {"foo", "bar"}).idx("z", 7),
+ G().map("x", {"a","b","c"}).idx("y", 5).map("z", {"i","j","k","l"})
};
-std::vector<Layout> join_layouts = {
- {}, {},
- {x(5)}, {x(5)},
- {x(5)}, {y(5)},
- {x(5)}, {x(5),y(5)},
- {y(3)}, {x(2),z(3)},
- {x(3),y(5)}, {y(5),z(7)},
- float_cells({x(3),y(5)}), {y(5),z(7)},
- {x(3),y(5)}, float_cells({y(5),z(7)}),
- float_cells({x(3),y(5)}), float_cells({y(5),z(7)}),
- {x({"a","b","c"})}, {x({"a","b","c"})},
- {x({"a","b","c"})}, {x({"a","b"})},
- {x({"a","b","c"})}, {y({"foo","bar","baz"})},
- {x({"a","b","c"})}, {x({"a","b","c"}),y({"foo","bar","baz"})},
- {x({"a","b"}),y({"foo","bar","baz"})}, {x({"a","b","c"}),y({"foo","bar"})},
- {x({"a","b"}),y({"foo","bar","baz"})}, {y({"foo","bar"}),z({"i","j","k","l"})},
- float_cells({x({"a","b"}),y({"foo","bar","baz"})}), {y({"foo","bar"}),z({"i","j","k","l"})},
- {x({"a","b"}),y({"foo","bar","baz"})}, float_cells({y({"foo","bar"}),z({"i","j","k","l"})}),
- float_cells({x({"a","b"}),y({"foo","bar","baz"})}), float_cells({y({"foo","bar"}),z({"i","j","k","l"})}),
- {x(3),y({"foo", "bar"})}, {y({"foo", "bar"}),z(7)},
- {x({"a","b","c"}),y(5)}, {y(5),z({"i","j","k","l"})},
- float_cells({x({"a","b","c"}),y(5)}), {y(5),z({"i","j","k","l"})},
- {x({"a","b","c"}),y(5)}, float_cells({y(5),z({"i","j","k","l"})}),
- float_cells({x({"a","b","c"}),y(5)}), float_cells({y(5),z({"i","j","k","l"})})
+std::vector<GenSpec> join_layouts = {
+ G(), G(),
+ G().idx("x", 5), G().idx("x", 5),
+ G().idx("x", 5), G().idx("y", 5),
+ G().idx("x", 5), G().idx("x", 5).idx("y", 5),
+ G().idx("y", 3), G().idx("x", 2).idx("z", 3),
+ G().idx("x", 3).idx("y", 5), G().idx("y", 5).idx("z", 7),
+ G().map("x", {"a","b","c"}), G().map("x", {"a","b","c"}),
+ G().map("x", {"a","b","c"}), G().map("x", {"a","b"}),
+ G().map("x", {"a","b","c"}), G().map("y", {"foo","bar","baz"}),
+ G().map("x", {"a","b","c"}), G().map("x", {"a","b","c"}).map("y", {"foo","bar","baz"}),
+ G().map("x", {"a","b"}).map("y", {"foo","bar","baz"}), G().map("x", {"a","b","c"}).map("y", {"foo","bar"}),
+ G().map("x", {"a","b"}).map("y", {"foo","bar","baz"}), G().map("y", {"foo","bar"}).map("z", {"i","j","k","l"}),
+ G().idx("x", 3).map("y", {"foo", "bar"}), G().map("y", {"foo","bar"}).idx("z", 7),
+ G().map("x", {"a","b","c"}).idx("y", 5), G().idx("y", 5).map("z", {"i","j","k","l"})
+
};
TensorSpec simple_value_join(const TensorSpec &a, const TensorSpec &b, join_fun_t function) {
@@ -76,20 +67,22 @@ TensorSpec simple_value_join(const TensorSpec &a, const TensorSpec &b, join_fun_
TEST(SimpleValueTest, simple_values_can_be_converted_from_and_to_tensor_spec) {
for (const auto &layout: layouts) {
- TensorSpec expect = spec(layout, N());
- std::unique_ptr<Value> value = value_from_spec(expect, SimpleValueBuilderFactory::get());
- TensorSpec actual = spec_from_value(*value);
- EXPECT_EQ(actual, expect);
+ for (TensorSpec expect : { layout.gen(), layout.cpy().cells_double().gen() }) {
+ std::unique_ptr<Value> value = value_from_spec(expect, SimpleValueBuilderFactory::get());
+ TensorSpec actual = spec_from_value(*value);
+ EXPECT_EQ(actual, expect);
+ }
}
}
TEST(SimpleValueTest, simple_values_can_be_copied) {
for (const auto &layout: layouts) {
- TensorSpec expect = spec(layout, N());
- std::unique_ptr<Value> value = value_from_spec(expect, SimpleValueBuilderFactory::get());
- std::unique_ptr<Value> copy = SimpleValueBuilderFactory::get().copy(*value);
- TensorSpec actual = spec_from_value(*copy);
- EXPECT_EQ(actual, expect);
+ for (TensorSpec expect : { layout.gen(), layout.cpy().cells_double().gen() }) {
+ std::unique_ptr<Value> value = value_from_spec(expect, SimpleValueBuilderFactory::get());
+ std::unique_ptr<Value> copy = SimpleValueBuilderFactory::get().copy(*value);
+ TensorSpec actual = spec_from_value(*copy);
+ EXPECT_EQ(actual, expect);
+ }
}
}
@@ -126,16 +119,22 @@ TEST(SimpleValueTest, simple_value_can_be_built_and_inspected) {
EXPECT_EQ(result["bb"], 3);
}
+GenSpec::seq_t N_16ths = [] (size_t i) { return (i + 1.0) / 16.0; };
+
TEST(SimpleValueTest, new_generic_join_works_for_simple_values) {
ASSERT_TRUE((join_layouts.size() % 2) == 0);
for (size_t i = 0; i < join_layouts.size(); i += 2) {
- TensorSpec lhs = spec(join_layouts[i], Div16(N()));
- TensorSpec rhs = spec(join_layouts[i + 1], Div16(N()));
- for (auto fun: {operation::Add::f, operation::Sub::f, operation::Mul::f, operation::Div::f}) {
- SCOPED_TRACE(fmt("\n===\nLHS: %s\nRHS: %s\n===\n", lhs.to_string().c_str(), rhs.to_string().c_str()));
- auto expect = ReferenceOperations::join(lhs, rhs, fun);
- auto actual = simple_value_join(lhs, rhs, fun);
- EXPECT_EQ(actual, expect);
+ const auto l = join_layouts[i].seq(N_16ths);
+ const auto r = join_layouts[i + 1].seq(N_16ths);
+ for (TensorSpec lhs : { l.gen(), l.cpy().cells_double().gen() }) {
+ for (TensorSpec rhs : { r.gen(), r.cpy().cells_double().gen() }) {
+ for (auto fun: {operation::Add::f, operation::Sub::f, operation::Mul::f, operation::Div::f}) {
+ SCOPED_TRACE(fmt("\n===\nLHS: %s\nRHS: %s\n===\n", lhs.to_string().c_str(), rhs.to_string().c_str()));
+ auto expect = ReferenceOperations::join(lhs, rhs, fun);
+ auto actual = simple_value_join(lhs, rhs, fun);
+ EXPECT_EQ(actual, expect);
+ }
+ }
}
}
}
diff --git a/eval/src/tests/eval/tensor_lambda/tensor_lambda_test.cpp b/eval/src/tests/eval/tensor_lambda/tensor_lambda_test.cpp
index 23bd16cb721..18198a75f7d 100644
--- a/eval/src/tests/eval/tensor_lambda/tensor_lambda_test.cpp
+++ b/eval/src/tests/eval/tensor_lambda/tensor_lambda_test.cpp
@@ -8,7 +8,7 @@
#include <vespa/eval/instruction/dense_cell_range_function.h>
#include <vespa/eval/instruction/dense_lambda_peek_function.h>
#include <vespa/eval/instruction/fast_rename_optimizer.h>
-#include <vespa/eval/eval/test/tensor_model.hpp>
+#include <vespa/eval/eval/test/gen_spec.h>
#include <vespa/eval/eval/test/eval_fixture.h>
#include <vespa/eval/eval/tensor_nodes.h>
@@ -23,17 +23,19 @@ using namespace vespalib::eval::tensor_function;
const ValueBuilderFactory &simple_factory = SimpleValueBuilderFactory::get();
const ValueBuilderFactory &prod_factory = FastValueBuilderFactory::get();
+TensorSpec spec(double v) { return TensorSpec("double").add({}, v); }
+
EvalFixture::ParamRepo make_params() {
return EvalFixture::ParamRepo()
.add("a", spec(1))
.add("b", spec(2))
- .add("x3", spec({x(3)}, N()))
- .add("x3f", spec(float_cells({x(3)}), N()))
- .add("x3m", spec({x({"0", "1", "2"})}, N()))
- .add("x3y5", spec({x(3), y(5)}, N()))
- .add("x3y5f", spec(float_cells({x(3), y(5)}), N()))
- .add("x15", spec({x(15)}, N()))
- .add("x15f", spec(float_cells({x(15)}), N()));
+ .add("x3", GenSpec().idx("x", 3).gen())
+ .add("x3f", GenSpec().idx("x", 3).cells_float().gen())
+ .add("x3m", GenSpec().map("x", 3).gen())
+ .add("x3y5", GenSpec().idx("x", 3).idx("y", 5).gen())
+ .add("x3y5f", GenSpec().idx("x", 3).idx("y", 5).cells_float().gen())
+ .add("x15", GenSpec().idx("x", 15).gen())
+ .add("x15f", GenSpec().idx("x", 15).cells_float().gen());
}
EvalFixture::ParamRepo param_repo = make_params();
diff --git a/eval/src/tests/eval/value_codec/value_codec_test.cpp b/eval/src/tests/eval/value_codec/value_codec_test.cpp
index 2b03cffe730..acce0f5667f 100644
--- a/eval/src/tests/eval/value_codec/value_codec_test.cpp
+++ b/eval/src/tests/eval/value_codec/value_codec_test.cpp
@@ -2,7 +2,7 @@
#include <iostream>
#include <vespa/eval/eval/simple_value.h>
-#include <vespa/eval/eval/test/tensor_model.hpp>
+#include <vespa/eval/eval/test/gen_spec.h>
#include <vespa/eval/eval/value_codec.h>
#include <vespa/vespalib/data/memory.h>
#include <vespa/vespalib/gtest/gtest.h>
@@ -15,28 +15,28 @@ using namespace vespalib::eval::test;
const ValueBuilderFactory &factory = SimpleValueBuilderFactory::get();
-std::vector<Layout> layouts = {
- {},
- {x(3)},
- {x(3),y(5)},
- {x(3),y(5),z(7)},
- float_cells({x(3),y(5),z(7)}),
- {x({"a","b","c"})},
- {x({"a","b","c"}),y({"foo","bar"})},
- {x({"a","b","c"}),y({"foo","bar"}),z({"i","j","k","l"})},
- float_cells({x({"a","b","c"}),y({"foo","bar"}),z({"i","j","k","l"})}),
- {x(3),y({"foo", "bar"}),z(7)},
- {x({"a","b","c"}),y(5),z({"i","j","k","l"})},
- float_cells({x({"a","b","c"}),y(5),z({"i","j","k","l"})})
+GenSpec G() { return GenSpec().cells_float(); }
+
+std::vector<GenSpec> layouts = {
+ G(),
+ G().idx("x", 3),
+ G().idx("x", 3).idx("y", 5),
+ G().idx("x", 3).idx("y", 5).idx("z", 7),
+ G().map("x", {"a","b","c"}),
+ G().map("x", {"a","b","c"}).map("y", {"foo","bar"}),
+ G().map("x", {"a","b","c"}).map("y", {"foo","bar"}).map("z", {"i","j","k","l"}),
+ G().idx("x", 3).map("y", {"foo", "bar"}).idx("z", 7),
+ G().map("x", {"a","b","c"}).idx("y", 5).map("z", {"i","j","k","l"})
};
TEST(ValueCodecTest, simple_values_can_be_converted_from_and_to_tensor_spec) {
for (const auto &layout: layouts) {
- TensorSpec expect = spec(layout, N());
- std::unique_ptr<Value> value = value_from_spec(expect, factory);
- TensorSpec actual = spec_from_value(*value);
- EXPECT_EQ(actual, expect);
+ for (TensorSpec expect : { layout.gen(), layout.cpy().cells_double().gen() }) {
+ std::unique_ptr<Value> value = value_from_spec(expect, factory);
+ TensorSpec actual = spec_from_value(*value);
+ EXPECT_EQ(actual, expect);
+ }
}
}
@@ -66,8 +66,8 @@ TEST(ValueCodecTest, simple_values_can_be_built_using_tensor_spec) {
.add({{"w", "yyy"}, {"x", 1}, {"y", "yyy"}, {"z", 0}}, 0.0)
.add({{"w", "yyy"}, {"x", 1}, {"y", "yyy"}, {"z", 1}}, 4.0);
Value::UP full_tensor = value_from_spec(full_spec, factory);
- EXPECT_EQUAL(full_spec, spec_from_value(*tensor));
- EXPECT_EQUAL(full_spec, spec_from_value(*full_tensor));
+ EXPECT_EQ(full_spec, spec_from_value(*tensor));
+ EXPECT_EQ(full_spec, spec_from_value(*full_tensor));
};
//-----------------------------------------------------------------------------
@@ -333,11 +333,11 @@ TEST(ValueCodecTest, bad_sparse_tensors_are_caught) {
bad.encode_default(data_default);
bad.encode_with_double(data_double);
bad.encode_with_float(data_float);
- EXPECT_EXCEPTION(decode_value(data_default, factory), vespalib::IllegalStateException,
+ VESPA_EXPECT_EXCEPTION(decode_value(data_default, factory), vespalib::IllegalStateException,
"serialized input claims 12345678 blocks of size 1*8, but only");
- EXPECT_EXCEPTION(decode_value(data_double, factory), vespalib::IllegalStateException,
+ VESPA_EXPECT_EXCEPTION(decode_value(data_double, factory), vespalib::IllegalStateException,
"serialized input claims 12345678 blocks of size 1*8, but only");
- EXPECT_EXCEPTION(decode_value(data_float, factory), vespalib::IllegalStateException,
+ VESPA_EXPECT_EXCEPTION(decode_value(data_float, factory), vespalib::IllegalStateException,
"serialized input claims 12345678 blocks of size 1*4, but only");
}
@@ -386,11 +386,11 @@ TEST(ValueCodecTest, bad_dense_tensors_are_caught) {
bad.encode_default(data_default);
bad.encode_with_double(data_double);
bad.encode_with_float(data_float);
- EXPECT_EXCEPTION(decode_value(data_default, factory), vespalib::IllegalStateException,
+ VESPA_EXPECT_EXCEPTION(decode_value(data_default, factory), vespalib::IllegalStateException,
"serialized input claims 1 blocks of size 60000*8, but only");
- EXPECT_EXCEPTION(decode_value(data_double, factory), vespalib::IllegalStateException,
+ VESPA_EXPECT_EXCEPTION(decode_value(data_double, factory), vespalib::IllegalStateException,
"serialized input claims 1 blocks of size 60000*8, but only");
- EXPECT_EXCEPTION(decode_value(data_float, factory), vespalib::IllegalStateException,
+ VESPA_EXPECT_EXCEPTION(decode_value(data_float, factory), vespalib::IllegalStateException,
"serialized input claims 1 blocks of size 60000*4, but only");
}
diff --git a/eval/src/tests/instruction/add_trivial_dimension_optimizer/add_trivial_dimension_optimizer_test.cpp b/eval/src/tests/instruction/add_trivial_dimension_optimizer/add_trivial_dimension_optimizer_test.cpp
index 35195522adc..d2dccfde2fd 100644
--- a/eval/src/tests/instruction/add_trivial_dimension_optimizer/add_trivial_dimension_optimizer_test.cpp
+++ b/eval/src/tests/instruction/add_trivial_dimension_optimizer/add_trivial_dimension_optimizer_test.cpp
@@ -5,7 +5,7 @@
#include <vespa/eval/eval/tensor_function.h>
#include <vespa/eval/instruction/replace_type_function.h>
#include <vespa/eval/instruction/fast_rename_optimizer.h>
-#include <vespa/eval/eval/test/tensor_model.hpp>
+#include <vespa/eval/eval/test/gen_spec.h>
#include <vespa/eval/eval/test/eval_fixture.h>
#include <vespa/vespalib/util/stringfmt.h>
@@ -20,11 +20,11 @@ const ValueBuilderFactory &prod_factory = FastValueBuilderFactory::get();
EvalFixture::ParamRepo make_params() {
return EvalFixture::ParamRepo()
- .add("x5", spec({x(5)}, N()))
- .add("x5f", spec(float_cells({x(5)}), N()))
- .add("x5y1", spec({x(5),y(1)}, N()))
- .add("y1z1", spec({y(1),z(1)}, N()))
- .add("x_m", spec({x({"a"})}, N()));
+ .add("x5", GenSpec().idx("x", 5).gen())
+ .add("x5f", GenSpec().idx("x", 5).cells_float().gen())
+ .add("x5y1", GenSpec().idx("x", 5).idx("y", 1).gen())
+ .add("y1z1", GenSpec().idx("y", 5).idx("z", 1).gen())
+ .add("x_m", GenSpec().map("x", {"a"}).gen());
}
EvalFixture::ParamRepo param_repo = make_params();
diff --git a/eval/src/tests/instruction/dense_replace_type_function/dense_replace_type_function_test.cpp b/eval/src/tests/instruction/dense_replace_type_function/dense_replace_type_function_test.cpp
index 988ca79a04a..5dcdbc5bab8 100644
--- a/eval/src/tests/instruction/dense_replace_type_function/dense_replace_type_function_test.cpp
+++ b/eval/src/tests/instruction/dense_replace_type_function/dense_replace_type_function_test.cpp
@@ -5,7 +5,7 @@
#include <vespa/eval/eval/value_codec.h>
#include <vespa/eval/eval/interpreted_function.h>
#include <vespa/eval/instruction/replace_type_function.h>
-#include <vespa/eval/eval/test/tensor_model.hpp>
+#include <vespa/eval/eval/test/gen_spec.h>
using namespace vespalib::eval::tensor_function;
using namespace vespalib::eval::test;
@@ -33,7 +33,7 @@ struct Fixture {
std::vector<TensorFunction::Child::CREF> children;
InterpretedFunction::State state;
Fixture()
- : my_value(value_from_spec(spec({x(10)}, N()), prod_factory)),
+ : my_value(value_from_spec(GenSpec().idx("x", 10).gen(), prod_factory)),
new_type(ValueType::from_spec("tensor(x[5],y[2])")),
mock_child(my_value->type()),
my_fun(new_type, mock_child),
diff --git a/eval/src/tests/instruction/fast_rename_optimizer/fast_rename_optimizer_test.cpp b/eval/src/tests/instruction/fast_rename_optimizer/fast_rename_optimizer_test.cpp
index dc90a5e54a1..e915a396ae7 100644
--- a/eval/src/tests/instruction/fast_rename_optimizer/fast_rename_optimizer_test.cpp
+++ b/eval/src/tests/instruction/fast_rename_optimizer/fast_rename_optimizer_test.cpp
@@ -4,7 +4,7 @@
#include <vespa/eval/eval/tensor_function.h>
#include <vespa/eval/instruction/replace_type_function.h>
#include <vespa/eval/instruction/fast_rename_optimizer.h>
-#include <vespa/eval/eval/test/tensor_model.hpp>
+#include <vespa/eval/eval/test/gen_spec.h>
#include <vespa/eval/eval/test/eval_fixture.h>
#include <vespa/vespalib/util/stringfmt.h>
@@ -19,13 +19,13 @@ const ValueBuilderFactory &prod_factory = FastValueBuilderFactory::get();
EvalFixture::ParamRepo make_params() {
return EvalFixture::ParamRepo()
- .add("x5", spec({x(5)}, N()))
- .add("x5f", spec(float_cells({x(5)}), N()))
- .add("x_m", spec({x({"a", "b", "c"})}, N()))
- .add("xy_mm", spec({x({"a", "b", "c"}),y({"d","e"})}, N()))
- .add("x5y3z_m", spec({x(5),y(3),z({"a","b"})}, N()))
- .add("x5yz_m", spec({x(5),y({"a","b"}),z({"d","e"})}, N()))
- .add("x5y3", spec({x(5),y(3)}, N()));
+ .add("x5", GenSpec().idx("x", 5).gen())
+ .add("x5f", GenSpec().idx("x", 5).cells_float().gen())
+ .add("x_m", GenSpec().map("x", {"a", "b", "c"}).gen())
+ .add("xy_mm", GenSpec().map("x", {"a", "b", "c"}).map("y", {"d","e"}).gen())
+ .add("x5y3z_m", GenSpec().idx("x", 5).idx("y", 3).map("z", {"a","b"}).gen())
+ .add("x5yz_m", GenSpec().idx("x", 5).map("y", {"a","b"}).map("z", {"d","e"}).gen())
+ .add("x5y3", GenSpec().idx("x", 5).idx("y", 3).gen());
}
EvalFixture::ParamRepo param_repo = make_params();
diff --git a/eval/src/tests/instruction/join_with_number/join_with_number_function_test.cpp b/eval/src/tests/instruction/join_with_number/join_with_number_function_test.cpp
index a3fbb3ed529..2d943aa569e 100644
--- a/eval/src/tests/instruction/join_with_number/join_with_number_function_test.cpp
+++ b/eval/src/tests/instruction/join_with_number/join_with_number_function_test.cpp
@@ -4,8 +4,7 @@
#include <vespa/eval/eval/fast_value.h>
#include <vespa/eval/eval/tensor_function.h>
#include <vespa/eval/eval/test/eval_fixture.h>
-#include <vespa/eval/eval/test/tensor_model.hpp>
-#include <vespa/eval/eval/test/param_variants.h>
+#include <vespa/eval/eval/test/gen_spec.h>
#include <vespa/eval/instruction/join_with_number_function.h>
#include <vespa/vespalib/util/stringfmt.h>
@@ -34,15 +33,16 @@ std::ostream &operator<<(std::ostream &os, Primary primary)
const ValueBuilderFactory &prod_factory = FastValueBuilderFactory::get();
+TensorSpec spec(double v) { return TensorSpec("double").add({}, v); }
+
EvalFixture::ParamRepo make_params() {
auto repo = EvalFixture::ParamRepo()
.add("a", spec(1.5))
.add("number", spec(2.5))
- .add("dense", spec({y(5)}, N()))
- .add_matrix("x", 3, "y", 5);
-
- add_variants(repo, "mixed", {x({"a"}),y(5),z({"d","e"})}, N());
- add_variants(repo, "sparse", {x({"a","b","c"}),z({"d","e","f"})}, N());
+ .add("dense", GenSpec().idx("y", 5).gen())
+ .add_variants("x3y5", GenSpec().idx("x", 3).idx("y", 5))
+ .add_variants("mixed", GenSpec().map("x", {"a"}).idx("y", 5).map("z", {"d","e"}))
+ .add_variants("sparse", GenSpec().map("x", {"a","b","c"}).map("z", {"d","e","f"}));
return repo;
}
@@ -81,22 +81,22 @@ void verify_not_optimized(const vespalib::string &expr) {
TEST("require that dense number join can be optimized") {
TEST_DO(verify_optimized("x3y5+a", Primary::LHS, false));
TEST_DO(verify_optimized("a+x3y5", Primary::RHS, false));
- TEST_DO(verify_optimized("x3y5f*a", Primary::LHS, false));
- TEST_DO(verify_optimized("a*x3y5f", Primary::RHS, false));
+ TEST_DO(verify_optimized("x3y5_f*a", Primary::LHS, false));
+ TEST_DO(verify_optimized("a*x3y5_f", Primary::RHS, false));
}
TEST("require that dense number join can be inplace") {
TEST_DO(verify_optimized("@x3y5*a", Primary::LHS, true));
TEST_DO(verify_optimized("a*@x3y5", Primary::RHS, true));
- TEST_DO(verify_optimized("@x3y5f+a", Primary::LHS, true));
- TEST_DO(verify_optimized("a+@x3y5f", Primary::RHS, true));
+ TEST_DO(verify_optimized("@x3y5_f+a", Primary::LHS, true));
+ TEST_DO(verify_optimized("a+@x3y5_f", Primary::RHS, true));
}
TEST("require that asymmetric operations work") {
TEST_DO(verify_optimized("x3y5/a", Primary::LHS, false));
TEST_DO(verify_optimized("a/x3y5", Primary::RHS, false));
- TEST_DO(verify_optimized("x3y5f-a", Primary::LHS, false));
- TEST_DO(verify_optimized("a-x3y5f", Primary::RHS, false));
+ TEST_DO(verify_optimized("x3y5_f-a", Primary::LHS, false));
+ TEST_DO(verify_optimized("a-x3y5_f", Primary::RHS, false));
}
TEST("require that sparse number join can be optimized") {
diff --git a/eval/src/tests/instruction/mixed_inner_product_function/mixed_inner_product_function_test.cpp b/eval/src/tests/instruction/mixed_inner_product_function/mixed_inner_product_function_test.cpp
index fbe71f3ed63..6b549b4d4d4 100644
--- a/eval/src/tests/instruction/mixed_inner_product_function/mixed_inner_product_function_test.cpp
+++ b/eval/src/tests/instruction/mixed_inner_product_function/mixed_inner_product_function_test.cpp
@@ -3,7 +3,7 @@
#include <vespa/eval/eval/fast_value.h>
#include <vespa/eval/eval/tensor_function.h>
#include <vespa/eval/eval/test/eval_fixture.h>
-#include <vespa/eval/eval/test/tensor_model.hpp>
+#include <vespa/eval/eval/test/gen_spec.h>
#include <vespa/eval/instruction/dense_dot_product_function.h>
#include <vespa/eval/instruction/dense_matmul_function.h>
#include <vespa/eval/instruction/dense_multi_matmul_function.h>
@@ -22,34 +22,25 @@ using namespace vespalib::eval::test;
const ValueBuilderFactory &prod_factory = FastValueBuilderFactory::get();
-struct MyVecSeq : Sequence {
- double bias;
- double operator[](size_t i) const override { return (i + bias); }
- MyVecSeq(double cellBias) : bias(cellBias) {}
-};
-
-std::function<double(size_t)> my_vec_gen(double cellBias) {
- return [=] (size_t i) noexcept { return i + cellBias; };
-}
//-----------------------------------------------------------------------------
EvalFixture::ParamRepo make_params() {
return EvalFixture::ParamRepo()
- .add_vector("x", 3, my_vec_gen(2.0))
- .add_vector("x", 3, my_vec_gen(13.25))
- .add_vector("y", 3, my_vec_gen(4.0))
- .add_vector("z", 3, my_vec_gen(0.25))
- .add_matrix("x", 3, "y", 1, my_vec_gen(5.0))
- .add_matrix("x", 1, "y", 3, my_vec_gen(6.0))
- .add_matrix("x", 3, "y", 3, my_vec_gen(1.5))
- .add_matrix("x", 3, "z", 3, my_vec_gen(2.5))
- .add_cube("x", 3, "y", 3, "z", 3, my_vec_gen(-4.0))
- .add("mix_x3zm", spec({x(3),z({"c","d"})}, MyVecSeq(0.5)))
- .add("mix_y3zm", spec({y(3),z({"c","d"})}, MyVecSeq(3.5)))
- .add("mix_x3zm_f", spec(float_cells({x(3),z({"c","d"})}), MyVecSeq(0.5)))
- .add("mix_y3zm_f", spec(float_cells({y(3),z({"c","d"})}), MyVecSeq(3.5)))
- .add("mix_x3y3zm", spec({x(3),y(3),z({"c","d"})}, MyVecSeq(0.0)))
+ .add_variants("x3", GenSpec().idx("x", 3).seq_bias(2.0))
+ .add_variants("x3$2", GenSpec().idx("x", 3).seq_bias(13.25))
+ .add_variants("y3", GenSpec().idx("y", 3).seq_bias(4.0))
+ .add_variants("z3", GenSpec().idx("z", 3).seq_bias(0.25))
+ .add_variants("x3y3", GenSpec().idx("x", 3).idx("y", 3).seq_bias(5.0))
+ .add_variants("x1y3", GenSpec().idx("x", 1).idx("y", 3).seq_bias(6.0))
+ .add_variants("x3y1", GenSpec().idx("x", 3).idx("y", 1).seq_bias(1.5))
+ .add_variants("x3z3", GenSpec().idx("x", 3).idx("z", 3).seq_bias(2.5))
+ .add_variants("x3y3z3", GenSpec().idx("x", 3).idx("y", 3).idx("z", 3).seq_bias(-4.0))
+ .add("mix_x3zm", GenSpec().idx("x", 3).map("z", {"c","d"}).seq_bias(0.5).gen())
+ .add("mix_y3zm", GenSpec().idx("y", 3).map("z", {"c","d"}).seq_bias(3.5).gen())
+ .add("mix_x3zm_f", GenSpec().idx("x", 3).map("z", {"c","d"}).cells_float().seq_bias(0.5).gen())
+ .add("mix_y3zm_f", GenSpec().idx("y", 3).map("z", {"c","d"}).cells_float().seq_bias(3.5).gen())
+ .add("mix_x3y3zm", GenSpec().idx("x", 3).idx("y", 3).map("z", {"c","d"}).seq_bias(0.0).gen())
;
}
@@ -101,35 +92,35 @@ TEST(MixedInnerProduct, use_dense_optimizers_when_possible) {
TEST(MixedInnerProduct, trigger_optimizer_when_possible) {
assert_mixed_optimized("reduce(x3 * mix_x3zm,sum,x)");
- assert_mixed_optimized("reduce(x3f * mix_x3zm,sum,x)");
+ assert_mixed_optimized("reduce(x3_f * mix_x3zm,sum,x)");
assert_mixed_optimized("reduce(x3 * mix_x3zm_f,sum,x)");
- assert_mixed_optimized("reduce(x3f * mix_x3zm_f,sum,x)");
+ assert_mixed_optimized("reduce(x3_f * mix_x3zm_f,sum,x)");
assert_mixed_optimized("reduce(x3$2 * mix_x3zm,sum,x)");
- assert_mixed_optimized("reduce(x3f$2 * mix_x3zm,sum,x)");
+ assert_mixed_optimized("reduce(x3$2_f * mix_x3zm,sum,x)");
assert_mixed_optimized("reduce(y3 * mix_y3zm,sum,y)");
- assert_mixed_optimized("reduce(y3f * mix_y3zm,sum,y)");
+ assert_mixed_optimized("reduce(y3_f * mix_y3zm,sum,y)");
assert_mixed_optimized("reduce(y3 * mix_y3zm_f,sum,y)");
- assert_mixed_optimized("reduce(y3f * mix_y3zm_f,sum,y)");
+ assert_mixed_optimized("reduce(y3_f * mix_y3zm_f,sum,y)");
assert_mixed_optimized("reduce(x3y1 * mix_x3zm,sum,x)");
- assert_mixed_optimized("reduce(x3y1f * mix_x3zm,sum,x)");
+ assert_mixed_optimized("reduce(x3y1_f * mix_x3zm,sum,x)");
assert_mixed_optimized("reduce(x3y1 * mix_x3zm,sum,x,y)");
- assert_mixed_optimized("reduce(x3y1f * mix_x3zm,sum,x,y)");
+ assert_mixed_optimized("reduce(x3y1_f * mix_x3zm,sum,x,y)");
assert_mixed_optimized("reduce(x1y3 * mix_y3zm,sum,y)");
- assert_mixed_optimized("reduce(x1y3f * mix_y3zm,sum,y)");
+ assert_mixed_optimized("reduce(x1y3_f * mix_y3zm,sum,y)");
assert_mixed_optimized("reduce(x1y3 * x1y3,sum,y)");
- assert_mixed_optimized("reduce(x1y3 * x1y3f,sum,y)");
- assert_mixed_optimized("reduce(x1y3f * x1y3,sum,y)");
- assert_mixed_optimized("reduce(x1y3f * x1y3f,sum,y)");
+ assert_mixed_optimized("reduce(x1y3 * x1y3_f,sum,y)");
+ assert_mixed_optimized("reduce(x1y3_f * x1y3,sum,y)");
+ assert_mixed_optimized("reduce(x1y3_f * x1y3_f,sum,y)");
assert_mixed_optimized("reduce(x1y3 * mix_y3zm,sum,y)");
- assert_mixed_optimized("reduce(x1y3f * mix_y3zm,sum,y)");
+ assert_mixed_optimized("reduce(x1y3_f * mix_y3zm,sum,y)");
assert_mixed_optimized("reduce(mix_x3zm * x3,sum,x)");
- assert_mixed_optimized("reduce(mix_x3zm * x3f,sum,x)");
+ assert_mixed_optimized("reduce(mix_x3zm * x3_f,sum,x)");
assert_mixed_optimized("reduce(mix_x3zm * x3y1,sum,x)");
- assert_mixed_optimized("reduce(mix_x3zm * x3y1f,sum,x)");
+ assert_mixed_optimized("reduce(mix_x3zm * x3y1_f,sum,x)");
assert_mixed_optimized("reduce(mix_y3zm * y3,sum,y)");
- assert_mixed_optimized("reduce(mix_y3zm * y3f,sum,y)");
+ assert_mixed_optimized("reduce(mix_y3zm * y3_f,sum,y)");
assert_mixed_optimized("reduce(mix_y3zm * x1y3,sum,y)");
- assert_mixed_optimized("reduce(mix_y3zm * x1y3f,sum,y)");
+ assert_mixed_optimized("reduce(mix_y3zm * x1y3_f,sum,y)");
}
TEST(MixedInnerProduct, should_not_trigger_optimizer_for_other_cases) {
diff --git a/eval/src/tests/instruction/mixed_map_function/mixed_map_function_test.cpp b/eval/src/tests/instruction/mixed_map_function/mixed_map_function_test.cpp
index 3caebea7298..45e885fac33 100644
--- a/eval/src/tests/instruction/mixed_map_function/mixed_map_function_test.cpp
+++ b/eval/src/tests/instruction/mixed_map_function/mixed_map_function_test.cpp
@@ -3,7 +3,7 @@
#include <vespa/eval/eval/tensor_function.h>
#include <vespa/eval/instruction/mixed_map_function.h>
#include <vespa/eval/eval/test/eval_fixture.h>
-#include <vespa/eval/eval/test/tensor_model.hpp>
+#include <vespa/eval/eval/test/gen_spec.h>
#include <vespa/vespalib/gtest/gtest.h>
using namespace vespalib;
@@ -13,14 +13,18 @@ using namespace vespalib::eval::tensor_function;
const ValueBuilderFactory &prod_factory = FastValueBuilderFactory::get();
+TensorSpec spec(double v) { return TensorSpec("double").add({}, v); }
+TensorSpec sparse_spec = GenSpec().map("x", {"a"}).gen();
+TensorSpec mixed_spec = GenSpec().map("x", {"a"}).idx("y", 5).gen();
+
EvalFixture::ParamRepo make_params() {
return EvalFixture::ParamRepo()
.add("a", spec(1.5))
.add("b", spec(2.5))
- .add("sparse", spec({x({"a"})}, N()))
- .add("mixed", spec({x({"a"}),y(5)}, N()))
- .add_mutable("@sparse", spec({x({"a"})}, N()))
- .add_mutable("@mixed", spec({x({"a"}),y(5)}, N()))
+ .add("sparse", sparse_spec)
+ .add("mixed", mixed_spec)
+ .add_mutable("@sparse", sparse_spec)
+ .add_mutable("@mixed", mixed_spec)
.add_matrix("x", 5, "y", 3);
}
EvalFixture::ParamRepo param_repo = make_params();
diff --git a/eval/src/tests/instruction/mixed_simple_join_function/mixed_simple_join_function_test.cpp b/eval/src/tests/instruction/mixed_simple_join_function/mixed_simple_join_function_test.cpp
index 9c891adf179..02e13fcbef3 100644
--- a/eval/src/tests/instruction/mixed_simple_join_function/mixed_simple_join_function_test.cpp
+++ b/eval/src/tests/instruction/mixed_simple_join_function/mixed_simple_join_function_test.cpp
@@ -4,7 +4,7 @@
#include <vespa/eval/eval/tensor_function.h>
#include <vespa/eval/instruction/mixed_simple_join_function.h>
#include <vespa/eval/eval/test/eval_fixture.h>
-#include <vespa/eval/eval/test/tensor_model.hpp>
+#include <vespa/eval/eval/test/gen_spec.h>
#include <vespa/vespalib/util/stringfmt.h>
@@ -43,26 +43,28 @@ std::ostream &operator<<(std::ostream &os, Overlap overlap)
const ValueBuilderFactory &prod_factory = FastValueBuilderFactory::get();
+TensorSpec spec(double v) { return TensorSpec("double").add({}, v); }
+
EvalFixture::ParamRepo make_params() {
return EvalFixture::ParamRepo()
.add("a", spec(1.5))
.add("b", spec(2.5))
- .add("sparse", spec({x({"a", "b", "c"})}, N()))
- .add("mixed", spec({x({"a", "b", "c"}),y(5),z(3)}, N()))
- .add("empty_mixed", spec({x({}),y(5),z(3)}, N()))
- .add_mutable("@mixed", spec({x({"a", "b", "c"}),y(5),z(3)}, N()))
- .add_cube("a", 1, "b", 1, "c", 1)
- .add_cube("x", 1, "y", 1, "z", 1)
- .add_cube("x", 3, "y", 5, "z", 3)
- .add_vector("z", 3)
- .add_dense({{"c", 5}, {"d", 1}})
- .add_dense({{"b", 1}, {"c", 5}})
- .add_matrix("x", 3, "y", 5, [](size_t idx) noexcept { return double((idx * 2) + 3); })
- .add_matrix("x", 3, "y", 5, [](size_t idx) noexcept { return double((idx * 3) + 2); })
- .add_vector("y", 5, [](size_t idx) noexcept { return double((idx * 2) + 3); })
- .add_vector("y", 5, [](size_t idx) noexcept { return double((idx * 3) + 2); })
- .add_matrix("y", 5, "z", 3, [](size_t idx) noexcept { return double((idx * 2) + 3); })
- .add_matrix("y", 5, "z", 3, [](size_t idx) noexcept { return double((idx * 3) + 2); });
+ .add("sparse", GenSpec().map("x", {"a", "b", "c"}).gen())
+ .add("mixed", GenSpec().map("x", {"a", "b", "c"}).idx("y", 5).idx("z", 3).gen())
+ .add("empty_mixed", GenSpec().map("x", {}).idx("y", 5).idx("z", 3).gen())
+ .add_mutable("@mixed", GenSpec().map("x", {"a", "b", "c"}).idx("y", 5).idx("z", 3).gen())
+ .add_variants("a1b1c1", GenSpec().idx("a", 1).idx("b", 1).idx("c", 1))
+ .add_variants("x1y1z1", GenSpec().idx("x", 1).idx("y", 1).idx("z", 1))
+ .add_variants("x3y5z3", GenSpec().idx("x", 3).idx("y", 5).idx("z", 3))
+ .add_variants("z3", GenSpec().idx("z", 3))
+ .add_variants("c5d1", GenSpec().idx("c", 5).idx("d", 1))
+ .add_variants("b1c5", GenSpec().idx("b", 1).idx("c", 5))
+ .add_variants("x3y5", GenSpec().idx("x", 3).idx("y", 5).seq([](size_t idx) noexcept { return double((idx * 2) + 3); }))
+ .add_variants("x3y5$2", GenSpec().idx("x", 3).idx("y", 5).seq([](size_t idx) noexcept { return double((idx * 3) + 2); }))
+ .add_variants("y5", GenSpec().idx("y", 5).seq([](size_t idx) noexcept { return double((idx * 2) + 3); }))
+ .add_variants("y5$2", GenSpec().idx("y", 5).seq([](size_t idx) noexcept { return double((idx * 3) + 2); }))
+ .add_variants("y5z3", GenSpec().idx("y", 5).idx("z", 3).seq([](size_t idx) noexcept { return double((idx * 2) + 3); }))
+ .add_variants("y5z3$2", GenSpec().idx("y", 5).idx("z", 3).seq([](size_t idx) noexcept { return double((idx * 3) + 2); }));
}
EvalFixture::ParamRepo param_repo = make_params();
@@ -149,12 +151,12 @@ vespalib::string adjust_param(const vespalib::string &str, bool float_cells, boo
if (mut_cells) {
result = "@" + result;
}
- if (float_cells) {
- result += "f";
- }
if (is_rhs) {
result += "$2";
}
+ if (float_cells) {
+ result += "_f";
+ }
return result;
}
diff --git a/eval/src/tests/instruction/pow_as_map_optimizer/pow_as_map_optimizer_test.cpp b/eval/src/tests/instruction/pow_as_map_optimizer/pow_as_map_optimizer_test.cpp
index b4bf9ec5ef6..cceb18bfea6 100644
--- a/eval/src/tests/instruction/pow_as_map_optimizer/pow_as_map_optimizer_test.cpp
+++ b/eval/src/tests/instruction/pow_as_map_optimizer/pow_as_map_optimizer_test.cpp
@@ -3,7 +3,7 @@
#include <vespa/eval/eval/fast_value.h>
#include <vespa/eval/eval/tensor_function.h>
#include <vespa/eval/eval/test/eval_fixture.h>
-#include <vespa/eval/eval/test/tensor_model.hpp>
+#include <vespa/eval/eval/test/gen_spec.h>
#include <vespa/vespalib/gtest/gtest.h>
using namespace vespalib::eval::operation;
@@ -14,13 +14,15 @@ using namespace vespalib::eval;
const ValueBuilderFactory &prod_factory = FastValueBuilderFactory::get();
+TensorSpec spec(double v) { return TensorSpec("double").add({}, v); }
+
EvalFixture::ParamRepo make_params() {
return EvalFixture::ParamRepo()
.add("a", spec(1.5))
.add("b", spec(2.5))
- .add("sparse", spec({x({"a","b"})}, N()))
- .add("mixed", spec({x({"a"}),y(5)}, N()))
- .add_matrix("x", 5, "y", 3);
+ .add("sparse", GenSpec().map("x", {"a","b"}).gen())
+ .add("mixed", GenSpec().map("x", {"a"}).idx("y", 5).gen())
+ .add_variants("x5y3", GenSpec().idx("x", 5).idx("y", 3));
}
EvalFixture::ParamRepo param_repo = make_params();
@@ -55,9 +57,9 @@ TEST(PowAsMapTest, squared_dense_tensor_is_optimized) {
verify_optimized("pow(x5y3,2.0)", Square::f);
verify_optimized("join(x5y3,2.0,f(x,y)(x^y))", Square::f);
verify_optimized("join(x5y3,2.0,f(x,y)(pow(x,y)))", Square::f);
- verify_optimized("join(x5y3f,2.0,f(x,y)(pow(x,y)))", Square::f);
+ verify_optimized("join(x5y3_f,2.0,f(x,y)(pow(x,y)))", Square::f);
verify_optimized("join(@x5y3,2.0,f(x,y)(pow(x,y)))", Square::f, true);
- verify_optimized("join(@x5y3f,2.0,f(x,y)(pow(x,y)))", Square::f, true);
+ verify_optimized("join(@x5y3_f,2.0,f(x,y)(pow(x,y)))", Square::f, true);
}
TEST(PowAsMapTest, cubed_dense_tensor_is_optimized) {
@@ -65,9 +67,9 @@ TEST(PowAsMapTest, cubed_dense_tensor_is_optimized) {
verify_optimized("pow(x5y3,3.0)", Cube::f);
verify_optimized("join(x5y3,3.0,f(x,y)(x^y))", Cube::f);
verify_optimized("join(x5y3,3.0,f(x,y)(pow(x,y)))", Cube::f);
- verify_optimized("join(x5y3f,3.0,f(x,y)(pow(x,y)))", Cube::f);
+ verify_optimized("join(x5y3_f,3.0,f(x,y)(pow(x,y)))", Cube::f);
verify_optimized("join(@x5y3,3.0,f(x,y)(pow(x,y)))", Cube::f, true);
- verify_optimized("join(@x5y3f,3.0,f(x,y)(pow(x,y)))", Cube::f, true);
+ verify_optimized("join(@x5y3_f,3.0,f(x,y)(pow(x,y)))", Cube::f, true);
}
TEST(PowAsMapTest, hypercubed_dense_tensor_is_not_optimized) {
diff --git a/eval/src/tests/instruction/remove_trivial_dimension_optimizer/remove_trivial_dimension_optimizer_test.cpp b/eval/src/tests/instruction/remove_trivial_dimension_optimizer/remove_trivial_dimension_optimizer_test.cpp
index 4de7e85074d..794725a8257 100644
--- a/eval/src/tests/instruction/remove_trivial_dimension_optimizer/remove_trivial_dimension_optimizer_test.cpp
+++ b/eval/src/tests/instruction/remove_trivial_dimension_optimizer/remove_trivial_dimension_optimizer_test.cpp
@@ -4,7 +4,7 @@
#include <vespa/eval/eval/tensor_function.h>
#include <vespa/eval/instruction/replace_type_function.h>
#include <vespa/eval/instruction/fast_rename_optimizer.h>
-#include <vespa/eval/eval/test/tensor_model.hpp>
+#include <vespa/eval/eval/test/gen_spec.h>
#include <vespa/eval/eval/test/eval_fixture.h>
#include <vespa/vespalib/util/stringfmt.h>
@@ -19,10 +19,10 @@ const ValueBuilderFactory &prod_factory = FastValueBuilderFactory::get();
EvalFixture::ParamRepo make_params() {
return EvalFixture::ParamRepo()
- .add("x1y5z1", spec({x(1),y(5),z(1)}, N()))
- .add("x1y5z1f", spec(float_cells({x(1),y(5),z(1)}), N()))
- .add("x1y1z1", spec({x(1),y(1),z(1)}, N()))
- .add("x1y5z_m", spec({x(1),y(5),z({"a"})}, N()));
+ .add("x1y5z1", GenSpec().idx("x", 1).idx("y", 5).idx("z", 1).gen())
+ .add("x1y5z1f", GenSpec().idx("x", 1).idx("y", 5).idx("z", 1).cells_float().gen())
+ .add("x1y1z1", GenSpec().idx("x", 1).idx("y", 1).idx("z", 1).gen())
+ .add("x1y5z_m", GenSpec().idx("x", 1).idx("y", 5).map("z", {"a"}).gen());
}
EvalFixture::ParamRepo param_repo = make_params();
diff --git a/eval/src/tests/instruction/sum_max_dot_product_function/sum_max_dot_product_function_test.cpp b/eval/src/tests/instruction/sum_max_dot_product_function/sum_max_dot_product_function_test.cpp
index 4b89f30d879..616649e914b 100644
--- a/eval/src/tests/instruction/sum_max_dot_product_function/sum_max_dot_product_function_test.cpp
+++ b/eval/src/tests/instruction/sum_max_dot_product_function/sum_max_dot_product_function_test.cpp
@@ -3,7 +3,7 @@
#include <vespa/eval/eval/fast_value.h>
#include <vespa/eval/eval/tensor_function.h>
#include <vespa/eval/eval/test/eval_fixture.h>
-#include <vespa/eval/eval/test/tensor_model.hpp>
+#include <vespa/eval/eval/test/gen_spec.h>
#include <vespa/eval/instruction/sum_max_dot_product_function.h>
#include <vespa/vespalib/gtest/gtest.h>
@@ -13,12 +13,6 @@ using namespace vespalib::eval::test;
const ValueBuilderFactory &prod_factory = FastValueBuilderFactory::get();
-struct MyVecSeq : Sequence {
- double bias;
- double operator[](size_t i) const override { return (i + bias); }
- MyVecSeq(double cellBias) : bias(cellBias) {}
-};
-
//-----------------------------------------------------------------------------
vespalib::string main_expr = "reduce(reduce(reduce(a*b,sum,z),max,y),sum,x)";
@@ -34,7 +28,7 @@ void assert_optimized(const TensorSpec &a, const TensorSpec &b, size_t dp_size)
auto info = fast_fixture.find_all<SumMaxDotProductFunction>();
ASSERT_EQ(info.size(), 1u);
EXPECT_TRUE(info[0]->result_is_mutable());
- EXPECT_EQUAL(info[0]->dp_size(), dp_size);
+ EXPECT_EQ(info[0]->dp_size(), dp_size);
}
void assert_not_optimized(const TensorSpec &a, const TensorSpec &b, const vespalib::string &expr = main_expr) {
@@ -51,10 +45,23 @@ void assert_not_optimized(const TensorSpec &a, const TensorSpec &b, const vespal
//-----------------------------------------------------------------------------
-auto query = spec(float_cells({x({"0", "1", "2"}),z(5)}), MyVecSeq(0.5));
-auto document = spec(float_cells({y({"0", "1", "2", "3", "4", "5"}),z(5)}), MyVecSeq(2.5));
-auto empty_query = spec(float_cells({x({}),z(5)}), MyVecSeq(0.5));
-auto empty_document = spec(float_cells({y({}),z(5)}), MyVecSeq(2.5));
+GenSpec QueGen(size_t x_size, size_t z_size) { return GenSpec().cells_float().map("x", x_size).idx("z", z_size).seq_bias(0.5); }
+
+GenSpec DocGen(size_t y_size, size_t z_size) { return GenSpec().cells_float().map("y", y_size).idx("z", z_size).seq_bias(2.5); }
+
+GenSpec Que() { return QueGen(3, 5); }
+GenSpec Doc() { return DocGen(6, 5); }
+
+GenSpec QueX0() { return QueGen(0, 5); }
+GenSpec DocX0() { return DocGen(0, 5); }
+
+GenSpec QueZ1() { return QueGen(3, 1); }
+GenSpec DocZ1() { return DocGen(6, 1); }
+
+auto query = Que().gen();
+auto document = Doc().gen();
+auto empty_query = QueX0().gen();
+auto empty_document = DocX0().gen();
TEST(SumMaxDotProduct, expressions_can_be_optimized)
{
@@ -66,24 +73,24 @@ TEST(SumMaxDotProduct, expressions_can_be_optimized)
}
TEST(SumMaxDotProduct, double_cells_are_not_optimized) {
- auto double_query = spec({x({"0", "1", "2"}),z(5)}, MyVecSeq(0.5));
- auto double_document = spec({y({"0", "1", "2", "3", "4", "5"}),z(5)}, MyVecSeq(2.5));
+ auto double_query = Que().cells_double().gen();
+ auto double_document = Doc().cells_double().gen();
assert_not_optimized(query, double_document);
assert_not_optimized(double_query, document);
assert_not_optimized(double_query, double_document);
}
TEST(SumMaxDotProduct, trivial_dot_product_is_not_optimized) {
- auto trivial_query = spec(float_cells({x({"0", "1", "2"}),z(1)}), MyVecSeq(0.5));
- auto trivial_document = spec(float_cells({y({"0", "1", "2", "3", "4", "5"}),z(1)}), MyVecSeq(2.5));
+ auto trivial_query = QueZ1().gen();
+ auto trivial_document = DocZ1().gen();
assert_not_optimized(trivial_query, trivial_document);
}
TEST(SumMaxDotProduct, additional_dimensions_are_not_optimized) {
- auto extra_sparse_query = spec(float_cells({Domain("a", {"0"}),x({"0", "1", "2"}),z(5)}), MyVecSeq(0.5));
- auto extra_dense_query = spec(float_cells({Domain("a", 1),x({"0", "1", "2"}),z(5)}), MyVecSeq(0.5));
- auto extra_sparse_document = spec(float_cells({Domain("a", {"0"}),y({"0", "1", "2", "3", "4", "5"}),z(5)}), MyVecSeq(2.5));
- auto extra_dense_document = spec(float_cells({Domain("a", 1),y({"0", "1", "2", "3", "4", "5"}),z(5)}), MyVecSeq(2.5));
+ auto extra_sparse_query = Que().map("a", 1).gen();
+ auto extra_dense_query = Que().idx("a", 1).gen();
+ auto extra_sparse_document = Doc().map("a", 1).gen();
+ auto extra_dense_document = Doc().idx("a", 1).gen();
vespalib::string extra_sum_expr = "reduce(reduce(reduce(a*b,sum,z),max,y),sum,a,x)";
vespalib::string extra_max_expr = "reduce(reduce(reduce(a*b,sum,z),max,a,y),sum,x)";
assert_not_optimized(extra_sparse_query, document);
@@ -97,8 +104,8 @@ TEST(SumMaxDotProduct, additional_dimensions_are_not_optimized) {
}
TEST(SumMaxDotProduct, more_dense_variants_are_not_optimized) {
- auto dense_query = spec(float_cells({x(3),z(5)}), MyVecSeq(0.5));
- auto dense_document = spec(float_cells({y(5),z(5)}), MyVecSeq(2.5));
+ auto dense_query = GenSpec().cells_float().idx("x", 3).idx("z", 5).seq_bias(0.5).gen();
+ auto dense_document = GenSpec().cells_float().idx("y", 5).idx("z", 5).seq_bias(2.5).gen();
assert_not_optimized(dense_query, document);
assert_not_optimized(query, dense_document);
assert_not_optimized(dense_query, dense_document);
diff --git a/eval/src/tests/streamed/value/streamed_value_test.cpp b/eval/src/tests/streamed/value/streamed_value_test.cpp
index 2f91c3b9390..51098427295 100644
--- a/eval/src/tests/streamed/value/streamed_value_test.cpp
+++ b/eval/src/tests/streamed/value/streamed_value_test.cpp
@@ -5,7 +5,7 @@
#include <vespa/eval/eval/value_codec.h>
#include <vespa/eval/instruction/generic_join.h>
#include <vespa/eval/eval/interpreted_function.h>
-#include <vespa/eval/eval/test/tensor_model.hpp>
+#include <vespa/eval/eval/test/gen_spec.h>
#include <vespa/vespalib/util/stringfmt.h>
#include <vespa/vespalib/gtest/gtest.h>
@@ -23,45 +23,36 @@ using Handle = SharedStringRepo::Handle;
vespalib::string as_str(string_id label) { return Handle::string_from_id(label); }
-std::vector<Layout> layouts = {
- {},
- {x(3)},
- {x(3),y(5)},
- {x(3),y(5),z(7)},
- float_cells({x(3),y(5),z(7)}),
- {x({"a","b","c"})},
- {x({"a","b","c"}),y({"foo","bar"})},
- {x({"a","b","c"}),y({"foo","bar"}),z({"i","j","k","l"})},
- float_cells({x({"a","b","c"}),y({"foo","bar"}),z({"i","j","k","l"})}),
- {x(3),y({"foo", "bar"}),z(7)},
- {x({"a","b","c"}),y(5),z({"i","j","k","l"})},
- float_cells({x({"a","b","c"}),y(5),z({"i","j","k","l"})})
+GenSpec G() { return GenSpec().cells_float(); }
+
+std::vector<GenSpec> layouts = {
+ G(),
+ G().idx("x", 3),
+ G().idx("x", 3).idx("y", 5),
+ G().idx("x", 3).idx("y", 5).idx("z", 7),
+ G().map("x", {"a","b","c"}),
+ G().map("x", {"a","b","c"}).map("y", {"foo","bar"}),
+ G().map("x", {"a","b","c"}).map("y", {"foo","bar"}).map("z", {"i","j","k","l"}),
+ G().idx("x", 3).map("y", {"foo", "bar"}).idx("z", 7),
+ G().map("x", {"a","b","c"}).idx("y", 5).map("z", {"i","j","k","l"})
};
-std::vector<Layout> join_layouts = {
- {}, {},
- {x(5)}, {x(5)},
- {x(5)}, {y(5)},
- {x(5)}, {x(5),y(5)},
- {y(3)}, {x(2),z(3)},
- {x(3),y(5)}, {y(5),z(7)},
- float_cells({x(3),y(5)}), {y(5),z(7)},
- {x(3),y(5)}, float_cells({y(5),z(7)}),
- float_cells({x(3),y(5)}), float_cells({y(5),z(7)}),
- {x({"a","b","c"})}, {x({"a","b","c"})},
- {x({"a","b","c"})}, {x({"a","b"})},
- {x({"a","b","c"})}, {y({"foo","bar","baz"})},
- {x({"a","b","c"})}, {x({"a","b","c"}),y({"foo","bar","baz"})},
- {x({"a","b"}),y({"foo","bar","baz"})}, {x({"a","b","c"}),y({"foo","bar"})},
- {x({"a","b"}),y({"foo","bar","baz"})}, {y({"foo","bar"}),z({"i","j","k","l"})},
- float_cells({x({"a","b"}),y({"foo","bar","baz"})}), {y({"foo","bar"}),z({"i","j","k","l"})},
- {x({"a","b"}),y({"foo","bar","baz"})}, float_cells({y({"foo","bar"}),z({"i","j","k","l"})}),
- float_cells({x({"a","b"}),y({"foo","bar","baz"})}), float_cells({y({"foo","bar"}),z({"i","j","k","l"})}),
- {x(3),y({"foo", "bar"})}, {y({"foo", "bar"}),z(7)},
- {x({"a","b","c"}),y(5)}, {y(5),z({"i","j","k","l"})},
- float_cells({x({"a","b","c"}),y(5)}), {y(5),z({"i","j","k","l"})},
- {x({"a","b","c"}),y(5)}, float_cells({y(5),z({"i","j","k","l"})}),
- float_cells({x({"a","b","c"}),y(5)}), float_cells({y(5),z({"i","j","k","l"})})
+std::vector<GenSpec> join_layouts = {
+ G(), G(),
+ G().idx("x", 5), G().idx("x", 5),
+ G().idx("x", 5), G().idx("y", 5),
+ G().idx("x", 5), G().idx("x", 5).idx("y", 5),
+ G().idx("y", 3), G().idx("x", 2).idx("z", 3),
+ G().idx("x", 3).idx("y", 5), G().idx("y", 5).idx("z", 7),
+ G().map("x", {"a","b","c"}), G().map("x", {"a","b","c"}),
+ G().map("x", {"a","b","c"}), G().map("x", {"a","b"}),
+ G().map("x", {"a","b","c"}), G().map("y", {"foo","bar","baz"}),
+ G().map("x", {"a","b","c"}), G().map("x", {"a","b","c"}).map("y", {"foo","bar","baz"}),
+ G().map("x", {"a","b"}).map("y", {"foo","bar","baz"}), G().map("x", {"a","b","c"}).map("y", {"foo","bar"}),
+ G().map("x", {"a","b"}).map("y", {"foo","bar","baz"}), G().map("y", {"foo","bar"}).map("z", {"i","j","k","l"}),
+ G().idx("x", 3).map("y", {"foo", "bar"}), G().map("y", {"foo","bar"}).idx("z", 7),
+ G().map("x", {"a","b","c"}).idx("y", 5), G().idx("y", 5).map("z", {"i","j","k","l"})
+
};
TensorSpec streamed_value_join(const TensorSpec &a, const TensorSpec &b, join_fun_t function) {
@@ -76,20 +67,22 @@ TensorSpec streamed_value_join(const TensorSpec &a, const TensorSpec &b, join_fu
TEST(StreamedValueTest, streamed_values_can_be_converted_from_and_to_tensor_spec) {
for (const auto &layout: layouts) {
- TensorSpec expect = spec(layout, N());
- std::unique_ptr<Value> value = value_from_spec(expect, StreamedValueBuilderFactory::get());
- TensorSpec actual = spec_from_value(*value);
- EXPECT_EQ(actual, expect);
+ for (TensorSpec expect : { layout.gen(), layout.cpy().cells_double().gen() }) {
+ std::unique_ptr<Value> value = value_from_spec(expect, StreamedValueBuilderFactory::get());
+ TensorSpec actual = spec_from_value(*value);
+ EXPECT_EQ(actual, expect);
+ }
}
}
TEST(StreamedValueTest, streamed_values_can_be_copied) {
for (const auto &layout: layouts) {
- TensorSpec expect = spec(layout, N());
- std::unique_ptr<Value> value = value_from_spec(expect, StreamedValueBuilderFactory::get());
- std::unique_ptr<Value> copy = StreamedValueBuilderFactory::get().copy(*value);
- TensorSpec actual = spec_from_value(*copy);
- EXPECT_EQ(actual, expect);
+ for (TensorSpec expect : { layout.gen(), layout.cpy().cells_double().gen() }) {
+ std::unique_ptr<Value> value = value_from_spec(expect, StreamedValueBuilderFactory::get());
+ std::unique_ptr<Value> copy = StreamedValueBuilderFactory::get().copy(*value);
+ TensorSpec actual = spec_from_value(*copy);
+ EXPECT_EQ(actual, expect);
+ }
}
}
@@ -126,16 +119,22 @@ TEST(StreamedValueTest, streamed_value_can_be_built_and_inspected) {
EXPECT_EQ(result["bb"], 3);
}
+GenSpec::seq_t N_16ths = [] (size_t i) { return (i + 1.0) / 16.0; };
+
TEST(StreamedValueTest, new_generic_join_works_for_streamed_values) {
ASSERT_TRUE((join_layouts.size() % 2) == 0);
for (size_t i = 0; i < join_layouts.size(); i += 2) {
- TensorSpec lhs = spec(join_layouts[i], Div16(N()));
- TensorSpec rhs = spec(join_layouts[i + 1], Div16(N()));
- for (auto fun: {operation::Add::f, operation::Sub::f, operation::Mul::f, operation::Max::f}) {
- SCOPED_TRACE(fmt("\n===\nLHS: %s\nRHS: %s\n===\n", lhs.to_string().c_str(), rhs.to_string().c_str()));
- auto expect = ReferenceOperations::join(lhs, rhs, fun);
- auto actual = streamed_value_join(lhs, rhs, fun);
- EXPECT_EQ(actual, expect);
+ const auto l = join_layouts[i].seq(N_16ths);
+ const auto r = join_layouts[i + 1].seq(N_16ths);
+ for (TensorSpec lhs : { l.gen(), l.cpy().cells_double().gen() }) {
+ for (TensorSpec rhs : { r.gen(), r.cpy().cells_double().gen() }) {
+ for (auto fun: {operation::Add::f, operation::Sub::f, operation::Mul::f, operation::Max::f}) {
+ SCOPED_TRACE(fmt("\n===\nLHS: %s\nRHS: %s\n===\n", lhs.to_string().c_str(), rhs.to_string().c_str()));
+ auto expect = ReferenceOperations::join(lhs, rhs, fun);
+ auto actual = streamed_value_join(lhs, rhs, fun);
+ EXPECT_EQ(actual, expect);
+ }
+ }
}
}
}
diff --git a/eval/src/vespa/eval/eval/test/eval_fixture.cpp b/eval/src/vespa/eval/eval/test/eval_fixture.cpp
index 9b1789fbeea..966954b9026 100644
--- a/eval/src/vespa/eval/eval/test/eval_fixture.cpp
+++ b/eval/src/vespa/eval/eval/test/eval_fixture.cpp
@@ -184,6 +184,23 @@ EvalFixture::ParamRepo::add_dense(const std::vector<std::pair<vespalib::string,
return *this;
}
+// produce 4 variants: float/double * mutable/const
+EvalFixture::ParamRepo &
+EvalFixture::ParamRepo::add_variants(const vespalib::string &name_base,
+ const GenSpec &spec)
+{
+ auto name_f = name_base + "_f";
+ auto name_m = "@" + name_base;
+ auto name_m_f = "@" + name_base + "_f";
+ auto dbl_ts = spec.cpy().cells_double().gen();
+ auto flt_ts = spec.cpy().cells_float().gen();
+ add(name_base, dbl_ts);
+ add(name_f, flt_ts);
+ add_mutable(name_m, dbl_ts);
+ add_mutable(name_m_f, flt_ts);
+ return *this;
+}
+
void
EvalFixture::detect_param_tampering(const ParamRepo &param_repo, bool allow_mutable) const
{
diff --git a/eval/src/vespa/eval/eval/test/eval_fixture.h b/eval/src/vespa/eval/eval/test/eval_fixture.h
index dc49cf7e4dc..44adaca3298 100644
--- a/eval/src/vespa/eval/eval/test/eval_fixture.h
+++ b/eval/src/vespa/eval/eval/test/eval_fixture.h
@@ -10,6 +10,7 @@
#include <vespa/vespalib/util/stash.h>
#include <set>
#include <functional>
+#include "gen_spec.h"
namespace vespalib::eval::test {
@@ -40,6 +41,10 @@ public:
ParamRepo &add_matrix(const char *d1, size_t s1, const char *d2, size_t s2, gen_fun_t gen = gen_N);
ParamRepo &add_cube(const char *d1, size_t s1, const char *d2, size_t s2, const char *d3, size_t s3, gen_fun_t gen = gen_N);
ParamRepo &add_dense(const std::vector<std::pair<vespalib::string, size_t> > &dims, gen_fun_t gen = gen_N);
+
+ // produce 4 variants: float/double * mutable/const
+ ParamRepo &add_variants(const vespalib::string &name_base,
+ const GenSpec &spec);
~ParamRepo() {}
};
diff --git a/eval/src/vespa/eval/eval/test/param_variants.h b/eval/src/vespa/eval/eval/test/param_variants.h
deleted file mode 100644
index 41a43ebca08..00000000000
--- a/eval/src/vespa/eval/eval/test/param_variants.h
+++ /dev/null
@@ -1,23 +0,0 @@
-#include "eval_fixture.h"
-#include "tensor_model.hpp"
-
-namespace vespalib::eval::test {
-
-// for testing of optimizers / tensor functions
-// we produce the same param three times:
-// as-is, with float cells, and tagged as mutable.
-void add_variants(EvalFixture::ParamRepo &repo,
- const vespalib::string &name_base,
- const Layout &base_layout,
- const Sequence &seq)
-{
- auto name_f = name_base + "_f";
- auto name_m = "@" + name_base;
- auto name_m_f = "@" + name_base + "_f";
- repo.add(name_base, spec(base_layout, seq));
- repo.add(name_f, spec(float_cells(base_layout), seq));
- repo.add_mutable(name_m, spec(base_layout, seq));
- repo.add_mutable(name_m_f, spec(float_cells(base_layout), seq));
-}
-
-} // namespace