summaryrefslogtreecommitdiffstats
path: root/eval/src/tests
diff options
context:
space:
mode:
authorHåvard Pettersen <havardpe@oath.com>2019-04-10 09:50:06 +0000
committerHåvard Pettersen <havardpe@oath.com>2019-04-25 12:24:03 +0000
commit974a0f2d3c588e35165177ac556e08cfdcc0e26f (patch)
tree46551c364a6cd1ee3cf88292f30537870f2956b4 /eval/src/tests
parentb186ca34ab5df612a463334772788752e1deec3c (diff)
avoid abstract value types
remove basic value type ANY remove concept of tensors with unknown dimensions disallow value types with unbound indexed dimensions remove predicates talking about abstract types type of unknown values are now ERROR (was ANY) require that overlapping indexed dimensions are of equal size type unification now requires types to be equal ('if' expressions) creating a tensor type without dimensions now gives a double type make rank feature setup fail on invalid types (query/attribute)
Diffstat (limited to 'eval/src/tests')
-rw-r--r--eval/src/tests/eval/node_types/node_types_test.cpp84
-rw-r--r--eval/src/tests/eval/tensor_function/tensor_function_test.cpp28
-rw-r--r--eval/src/tests/eval/value_type/value_type_test.cpp261
-rw-r--r--eval/src/tests/tensor/dense_add_dimension_optimizer/dense_add_dimension_optimizer_test.cpp5
-rw-r--r--eval/src/tests/tensor/dense_dot_product_function/dense_dot_product_function_test.cpp56
-rw-r--r--eval/src/tests/tensor/dense_fast_rename_optimizer/dense_fast_rename_optimizer_test.cpp5
-rw-r--r--eval/src/tests/tensor/dense_inplace_join_function/dense_inplace_join_function_test.cpp10
-rw-r--r--eval/src/tests/tensor/dense_inplace_map_function/dense_inplace_map_function_test.cpp5
-rw-r--r--eval/src/tests/tensor/dense_remove_dimension_optimizer/dense_remove_dimension_optimizer_test.cpp3
-rw-r--r--eval/src/tests/tensor/dense_xw_product_function/dense_xw_product_function_test.cpp17
-rw-r--r--eval/src/tests/tensor/tensor_mapper/tensor_mapper_test.cpp54
11 files changed, 120 insertions, 408 deletions
diff --git a/eval/src/tests/eval/node_types/node_types_test.cpp b/eval/src/tests/eval/node_types/node_types_test.cpp
index ce01d3f78c0..c18470887b2 100644
--- a/eval/src/tests/eval/node_types/node_types_test.cpp
+++ b/eval/src/tests/eval/node_types/node_types_test.cpp
@@ -74,47 +74,31 @@ TEST("require that leaf constants have appropriate type") {
}
TEST("require that input parameters preserve their type") {
- TEST_DO(verify("any", "any"));
TEST_DO(verify("error", "error"));
TEST_DO(verify("double", "double"));
- TEST_DO(verify("tensor", "tensor"));
- TEST_DO(verify("tensor(x{},y[10],z[])", "tensor(x{},y[10],z[])"));
+ TEST_DO(verify("tensor", "double"));
+ TEST_DO(verify("tensor(x{},y[10],z[5])", "tensor(x{},y[10],z[5])"));
}
TEST("require that if resolves to the appropriate type") {
TEST_DO(verify("if(error,1,2)", "error"));
TEST_DO(verify("if(1,error,2)", "error"));
TEST_DO(verify("if(1,2,error)", "error"));
- TEST_DO(verify("if(any,1,2)", "double"));
TEST_DO(verify("if(double,1,2)", "double"));
- TEST_DO(verify("if(tensor,1,2)", "double"));
- TEST_DO(verify("if(double,tensor,tensor)", "tensor"));
- TEST_DO(verify("if(double,any,any)", "any"));
- TEST_DO(verify("if(double,tensor(a[2]),tensor(a[2]))", "tensor(a[2])"));
- TEST_DO(verify("if(double,tensor(a[2]),tensor(a[3]))", "tensor(a[])"));
- TEST_DO(verify("if(double,tensor(a[2]),tensor(a[]))", "tensor(a[])"));
- TEST_DO(verify("if(double,tensor(a[2]),tensor(a{}))", "tensor"));
+ TEST_DO(verify("if(tensor(x[10]),1,2)", "double"));
TEST_DO(verify("if(double,tensor(a{}),tensor(a{}))", "tensor(a{})"));
- TEST_DO(verify("if(double,tensor(a{}),tensor(b{}))", "tensor"));
- TEST_DO(verify("if(double,tensor(a{}),tensor)", "tensor"));
- TEST_DO(verify("if(double,tensor,tensor(a{}))", "tensor"));
- TEST_DO(verify("if(double,tensor,any)", "any"));
- TEST_DO(verify("if(double,any,tensor)", "any"));
- TEST_DO(verify("if(double,tensor,double)", "any"));
- TEST_DO(verify("if(double,double,tensor)", "any"));
- TEST_DO(verify("if(double,double,any)", "any"));
- TEST_DO(verify("if(double,any,double)", "any"));
+ TEST_DO(verify("if(double,tensor(a[2]),tensor(a[2]))", "tensor(a[2])"));
+ TEST_DO(verify("if(double,tensor(a[2]),tensor(a[3]))", "error"));
+ TEST_DO(verify("if(double,tensor(a[2]),tensor(a{}))", "error"));
+ TEST_DO(verify("if(double,tensor(a{}),tensor(b{}))", "error"));
+ TEST_DO(verify("if(double,tensor(a{}),double)", "error"));
}
TEST("require that reduce resolves correct type") {
TEST_DO(verify("reduce(error,sum)", "error"));
- TEST_DO(verify("reduce(tensor,sum)", "double"));
TEST_DO(verify("reduce(tensor(x{}),sum)", "double"));
TEST_DO(verify("reduce(double,sum)", "double"));
- TEST_DO(verify("reduce(any,sum)", "any"));
TEST_DO(verify("reduce(error,sum,x)", "error"));
- TEST_DO(verify("reduce(tensor,sum,x)", "any"));
- TEST_DO(verify("reduce(any,sum,x)", "any"));
TEST_DO(verify("reduce(double,sum,x)", "error"));
TEST_DO(verify("reduce(tensor(x{},y{},z{}),sum,y)", "tensor(x{},z{})"));
TEST_DO(verify("reduce(tensor(x{},y{},z{}),sum,x,z)", "tensor(y{})"));
@@ -125,18 +109,16 @@ TEST("require that reduce resolves correct type") {
TEST("require that rename resolves correct type") {
TEST_DO(verify("rename(error,x,y)", "error"));
- TEST_DO(verify("rename(tensor,x,y)", "any"));
TEST_DO(verify("rename(double,x,y)", "error"));
- TEST_DO(verify("rename(any,x,y)", "any"));
- TEST_DO(verify("rename(tensor(x{},y[],z[5]),a,b)", "error"));
- TEST_DO(verify("rename(tensor(x{},y[],z[5]),x,y)", "error"));
- TEST_DO(verify("rename(tensor(x{},y[],z[5]),x,x)", "tensor(x{},y[],z[5])"));
- TEST_DO(verify("rename(tensor(x{},y[],z[5]),x,w)", "tensor(w{},y[],z[5])"));
- TEST_DO(verify("rename(tensor(x{},y[],z[5]),y,w)", "tensor(x{},w[],z[5])"));
- TEST_DO(verify("rename(tensor(x{},y[],z[5]),z,w)", "tensor(x{},y[],w[5])"));
- TEST_DO(verify("rename(tensor(x{},y[],z[5]),(x,y,z),(z,y,x))", "tensor(z{},y[],x[5])"));
- TEST_DO(verify("rename(tensor(x{},y[],z[5]),(x,z),(z,x))", "tensor(z{},y[],x[5])"));
- TEST_DO(verify("rename(tensor(x{},y[],z[5]),(x,y,z),(a,b,c))", "tensor(a{},b[],c[5])"));
+ TEST_DO(verify("rename(tensor(x{},y[1],z[5]),a,b)", "error"));
+ TEST_DO(verify("rename(tensor(x{},y[1],z[5]),x,y)", "error"));
+ TEST_DO(verify("rename(tensor(x{},y[1],z[5]),x,x)", "tensor(x{},y[1],z[5])"));
+ TEST_DO(verify("rename(tensor(x{},y[1],z[5]),x,w)", "tensor(w{},y[1],z[5])"));
+ TEST_DO(verify("rename(tensor(x{},y[1],z[5]),y,w)", "tensor(x{},w[1],z[5])"));
+ TEST_DO(verify("rename(tensor(x{},y[1],z[5]),z,w)", "tensor(x{},y[1],w[5])"));
+ TEST_DO(verify("rename(tensor(x{},y[1],z[5]),(x,y,z),(z,y,x))", "tensor(z{},y[1],x[5])"));
+ TEST_DO(verify("rename(tensor(x{},y[1],z[5]),(x,z),(z,x))", "tensor(z{},y[1],x[5])"));
+ TEST_DO(verify("rename(tensor(x{},y[1],z[5]),(x,y,z),(a,b,c))", "tensor(a{},b[1],c[5])"));
}
vespalib::string strfmt(const char *pattern, const char *a) {
@@ -149,37 +131,24 @@ vespalib::string strfmt(const char *pattern, const char *a, const char *b) {
void verify_op1(const char *pattern) {
TEST_DO(verify(strfmt(pattern, "error"), "error"));
- TEST_DO(verify(strfmt(pattern, "any"), "any"));
TEST_DO(verify(strfmt(pattern, "double"), "double"));
- TEST_DO(verify(strfmt(pattern, "tensor"), "tensor"));
- TEST_DO(verify(strfmt(pattern, "tensor(x{},y[10],z[])"), "tensor(x{},y[10],z[])"));
+ TEST_DO(verify(strfmt(pattern, "tensor(x{},y[10],z[1])"), "tensor(x{},y[10],z[1])"));
}
void verify_op2(const char *pattern) {
TEST_DO(verify(strfmt(pattern, "error", "error"), "error"));
- TEST_DO(verify(strfmt(pattern, "any", "error"), "error"));
- TEST_DO(verify(strfmt(pattern, "error", "any"), "error"));
TEST_DO(verify(strfmt(pattern, "double", "error"), "error"));
TEST_DO(verify(strfmt(pattern, "error", "double"), "error"));
- TEST_DO(verify(strfmt(pattern, "tensor", "error"), "error"));
- TEST_DO(verify(strfmt(pattern, "error", "tensor"), "error"));
- TEST_DO(verify(strfmt(pattern, "any", "any"), "any"));
- TEST_DO(verify(strfmt(pattern, "any", "double"), "any"));
- TEST_DO(verify(strfmt(pattern, "double", "any"), "any"));
- TEST_DO(verify(strfmt(pattern, "any", "tensor"), "any"));
- TEST_DO(verify(strfmt(pattern, "tensor", "any"), "any"));
+ TEST_DO(verify(strfmt(pattern, "tensor(x{})", "error"), "error"));
+ TEST_DO(verify(strfmt(pattern, "error", "tensor(x{})"), "error"));
TEST_DO(verify(strfmt(pattern, "double", "double"), "double"));
- TEST_DO(verify(strfmt(pattern, "tensor", "double"), "tensor"));
- TEST_DO(verify(strfmt(pattern, "double", "tensor"), "tensor"));
TEST_DO(verify(strfmt(pattern, "tensor(x{})", "double"), "tensor(x{})"));
TEST_DO(verify(strfmt(pattern, "double", "tensor(x{})"), "tensor(x{})"));
- TEST_DO(verify(strfmt(pattern, "tensor", "tensor"), "any"));
TEST_DO(verify(strfmt(pattern, "tensor(x{})", "tensor(x{})"), "tensor(x{})"));
TEST_DO(verify(strfmt(pattern, "tensor(x{})", "tensor(y{})"), "tensor(x{},y{})"));
- TEST_DO(verify(strfmt(pattern, "tensor(x[3])", "tensor(x[5])"), "tensor(x[3])"));
- TEST_DO(verify(strfmt(pattern, "tensor(x[])", "tensor(x[5])"), "tensor(x[])"));
- TEST_DO(verify(strfmt(pattern, "tensor(x[5])", "tensor(x[3])"), "tensor(x[3])"));
- TEST_DO(verify(strfmt(pattern, "tensor(x[5])", "tensor(x[])"), "tensor(x[])"));
+ TEST_DO(verify(strfmt(pattern, "tensor(x[5])", "tensor(x[5])"), "tensor(x[5])"));
+ TEST_DO(verify(strfmt(pattern, "tensor(x[3])", "tensor(x[5])"), "error"));
+ TEST_DO(verify(strfmt(pattern, "tensor(x[5])", "tensor(x[3])"), "error"));
TEST_DO(verify(strfmt(pattern, "tensor(x{})", "tensor(x[5])"), "error"));
}
@@ -249,7 +218,8 @@ TEST("require that lambda tensor resolves correct type") {
TEST("require that tensor concat resolves correct type") {
TEST_DO(verify("concat(double,double,x)", "tensor(x[2])"));
TEST_DO(verify("concat(tensor(x[2]),tensor(x[3]),x)", "tensor(x[5])"));
- TEST_DO(verify("concat(tensor(x[2]),tensor(x[3]),y)", "tensor(x[2],y[2])"));
+ TEST_DO(verify("concat(tensor(x[2]),tensor(x[2]),y)", "tensor(x[2],y[2])"));
+ TEST_DO(verify("concat(tensor(x[2]),tensor(x[3]),y)", "error"));
TEST_DO(verify("concat(tensor(x[2]),tensor(x{}),x)", "error"));
TEST_DO(verify("concat(tensor(x[2]),tensor(y{}),x)", "tensor(x[3],y{})"));
}
@@ -258,7 +228,7 @@ TEST("require that double only expressions can be detected") {
Function plain_fun = Function::parse("1+2");
Function complex_fun = Function::parse("reduce(a,sum)");
NodeTypes plain_types(plain_fun, {});
- NodeTypes complex_types(complex_fun, {ValueType::tensor_type({})});
+ NodeTypes complex_types(complex_fun, {ValueType::tensor_type({{"x"}})});
EXPECT_TRUE(plain_types.get_type(plain_fun.root()).is_double());
EXPECT_TRUE(complex_types.get_type(complex_fun.root()).is_double());
EXPECT_TRUE(plain_types.all_types_are_double());
@@ -269,7 +239,7 @@ TEST("require that empty type repo works as expected") {
NodeTypes types;
Function function = Function::parse("1+2");
EXPECT_FALSE(function.has_error());
- EXPECT_TRUE(types.get_type(function.root()).is_any());
+ EXPECT_TRUE(types.get_type(function.root()).is_error());
EXPECT_FALSE(types.all_types_are_double());
}
diff --git a/eval/src/tests/eval/tensor_function/tensor_function_test.cpp b/eval/src/tests/eval/tensor_function/tensor_function_test.cpp
index 23ea1e8c13a..741b756e46f 100644
--- a/eval/src/tests/eval/tensor_function/tensor_function_test.cpp
+++ b/eval/src/tests/eval/tensor_function/tensor_function_test.cpp
@@ -261,11 +261,16 @@ TEST("require that if_node result is mutable only when both children produce mut
const Node &cond = inject(DoubleValue::double_type(), 0, stash);
const Node &a = inject(ValueType::from_spec("tensor(x[2])"), 0, stash);
const Node &b = inject(ValueType::from_spec("tensor(x[3])"), 0, stash);
+ const Node &c = inject(ValueType::from_spec("tensor(x[5])"), 0, stash);
const Node &tmp = concat(a, b, "x", stash); // will be mutable
- const Node &if_con_con = if_node(cond, a, b, stash);
- const Node &if_mut_con = if_node(cond, tmp, b, stash);
- const Node &if_con_mut = if_node(cond, a, tmp, stash);
+ const Node &if_con_con = if_node(cond, c, c, stash);
+ const Node &if_mut_con = if_node(cond, tmp, c, stash);
+ const Node &if_con_mut = if_node(cond, c, tmp, stash);
const Node &if_mut_mut = if_node(cond, tmp, tmp, stash);
+ EXPECT_EQUAL(if_con_con.result_type(), c.result_type());
+ EXPECT_EQUAL(if_con_mut.result_type(), c.result_type());
+ EXPECT_EQUAL(if_mut_con.result_type(), c.result_type());
+ EXPECT_EQUAL(if_mut_mut.result_type(), c.result_type());
EXPECT_TRUE(!if_con_con.result_is_mutable());
EXPECT_TRUE(!if_mut_con.result_is_mutable());
EXPECT_TRUE(!if_con_mut.result_is_mutable());
@@ -277,21 +282,12 @@ TEST("require that if_node gets expected result type") {
const Node &a = inject(DoubleValue::double_type(), 0, stash);
const Node &b = inject(ValueType::from_spec("tensor(x[2])"), 0, stash);
const Node &c = inject(ValueType::from_spec("tensor(x[3])"), 0, stash);
- const Node &d = inject(ValueType::from_spec("tensor(x[])"), 0, stash);
- const Node &e = inject(ValueType::from_spec("tensor(y[3])"), 0, stash);
- const Node &f = inject(ValueType::from_spec("double"), 0, stash);
- const Node &g = inject(ValueType::from_spec("error"), 0, stash);
+ const Node &d = inject(ValueType::from_spec("error"), 0, stash);
const Node &if_same = if_node(a, b, b, stash);
- const Node &if_similar = if_node(a, b, c, stash);
- const Node &if_subtype = if_node(a, b, d, stash);
- const Node &if_different = if_node(a, b, e, stash);
- const Node &if_different_types = if_node(a, b, f, stash);
- const Node &if_with_error = if_node(a, b, g, stash);
+ const Node &if_different = if_node(a, b, c, stash);
+ const Node &if_with_error = if_node(a, b, d, stash);
EXPECT_EQUAL(if_same.result_type(), ValueType::from_spec("tensor(x[2])"));
- EXPECT_EQUAL(if_similar.result_type(), ValueType::from_spec("tensor(x[])"));
- EXPECT_EQUAL(if_subtype.result_type(), ValueType::from_spec("tensor(x[])"));
- EXPECT_EQUAL(if_different.result_type(), ValueType::from_spec("tensor"));
- EXPECT_EQUAL(if_different_types.result_type(), ValueType::from_spec("any"));
+ EXPECT_EQUAL(if_different.result_type(), ValueType::from_spec("error"));
EXPECT_EQUAL(if_with_error.result_type(), ValueType::from_spec("error"));
}
diff --git a/eval/src/tests/eval/value_type/value_type_test.cpp b/eval/src/tests/eval/value_type/value_type_test.cpp
index f7db7816fad..a755eac965f 100644
--- a/eval/src/tests/eval/value_type/value_type_test.cpp
+++ b/eval/src/tests/eval/value_type/value_type_test.cpp
@@ -10,12 +10,6 @@ using namespace vespalib::eval;
const size_t npos = ValueType::Dimension::npos;
-TEST("require that ANY value type can be created") {
- ValueType t = ValueType::any_type();
- EXPECT_TRUE(t.type() == ValueType::Type::ANY);
- EXPECT_EQUAL(t.dimensions().size(), 0u);
-}
-
TEST("require that ERROR value type can be created") {
ValueType t = ValueType::error_type();
EXPECT_TRUE(t.type() == ValueType::Type::ERROR);
@@ -61,10 +55,9 @@ TEST("require that dimension names can be obtained") {
TEST("require that dimension index can be obtained") {
EXPECT_EQUAL(ValueType::error_type().dimension_index("x"), ValueType::Dimension::npos);
- EXPECT_EQUAL(ValueType::any_type().dimension_index("x"), ValueType::Dimension::npos);
EXPECT_EQUAL(ValueType::double_type().dimension_index("x"), ValueType::Dimension::npos);
EXPECT_EQUAL(ValueType::tensor_type({}).dimension_index("x"), ValueType::Dimension::npos);
- auto my_type = ValueType::tensor_type({{"y", 10}, {"x"}, {"z", 0}});
+ auto my_type = ValueType::tensor_type({{"y", 10}, {"x"}, {"z", 5}});
EXPECT_EQUAL(my_type.dimension_index("x"), 0u);
EXPECT_EQUAL(my_type.dimension_index("y"), 1u);
EXPECT_EQUAL(my_type.dimension_index("z"), 2u);
@@ -72,29 +65,30 @@ TEST("require that dimension index can be obtained") {
}
void verify_equal(const ValueType &a, const ValueType &b) {
- EXPECT_TRUE(a == b);
- EXPECT_TRUE(b == a);
+ EXPECT_EQUAL(a, b);
+ EXPECT_EQUAL(b, a);
EXPECT_FALSE(a != b);
EXPECT_FALSE(b != a);
+ EXPECT_EQUAL(a, ValueType::either(a, b));
+ EXPECT_EQUAL(a, ValueType::either(b, a));
}
-
+
void verify_not_equal(const ValueType &a, const ValueType &b) {
EXPECT_TRUE(a != b);
EXPECT_TRUE(b != a);
EXPECT_FALSE(a == b);
EXPECT_FALSE(b == a);
+ EXPECT_TRUE(ValueType::either(a, b).is_error());
+ EXPECT_TRUE(ValueType::either(b, a).is_error());
}
TEST("require that value types can be compared") {
TEST_DO(verify_equal(ValueType::error_type(), ValueType::error_type()));
- TEST_DO(verify_not_equal(ValueType::error_type(), ValueType::any_type()));
TEST_DO(verify_not_equal(ValueType::error_type(), ValueType::double_type()));
- TEST_DO(verify_not_equal(ValueType::error_type(), ValueType::tensor_type({})));
- TEST_DO(verify_equal(ValueType::any_type(), ValueType::any_type()));
- TEST_DO(verify_not_equal(ValueType::any_type(), ValueType::double_type()));
- TEST_DO(verify_not_equal(ValueType::any_type(), ValueType::tensor_type({})));
+ TEST_DO(verify_not_equal(ValueType::error_type(), ValueType::tensor_type({{"x"}})));
TEST_DO(verify_equal(ValueType::double_type(), ValueType::double_type()));
- TEST_DO(verify_not_equal(ValueType::double_type(), ValueType::tensor_type({})));
+ TEST_DO(verify_equal(ValueType::double_type(), ValueType::tensor_type({})));
+ TEST_DO(verify_not_equal(ValueType::double_type(), ValueType::tensor_type({{"x"}})));
TEST_DO(verify_equal(ValueType::tensor_type({{"x"}, {"y"}}), ValueType::tensor_type({{"y"}, {"x"}})));
TEST_DO(verify_not_equal(ValueType::tensor_type({{"x"}, {"y"}}), ValueType::tensor_type({{"x"}, {"y"}, {"z"}})));
TEST_DO(verify_equal(ValueType::tensor_type({{"x", 10}, {"y", 20}}), ValueType::tensor_type({{"y", 20}, {"x", 10}})));
@@ -103,67 +97,55 @@ TEST("require that value types can be compared") {
}
void verify_predicates(const ValueType &type,
- bool expect_any, bool expect_error, bool expect_double, bool expect_tensor,
- bool expect_maybe_tensor, bool expect_abstract, bool expect_unknown_dimensions)
+ bool expect_error, bool expect_double, bool expect_tensor,
+ bool expect_sparse, bool expect_dense)
{
- EXPECT_EQUAL(type.is_any(), expect_any);
EXPECT_EQUAL(type.is_error(), expect_error);
EXPECT_EQUAL(type.is_double(), expect_double);
EXPECT_EQUAL(type.is_tensor(), expect_tensor);
- EXPECT_EQUAL(type.maybe_tensor(), expect_maybe_tensor);
- EXPECT_EQUAL(type.is_abstract(), expect_abstract);
- EXPECT_EQUAL(type.unknown_dimensions(), expect_unknown_dimensions);
+ EXPECT_EQUAL(type.is_sparse(), expect_sparse);
+ EXPECT_EQUAL(type.is_dense(), expect_dense);
}
TEST("require that type-related predicate functions work as expected") {
- TEST_DO(verify_predicates(ValueType::any_type(),
- true, false, false, false,
- true, true, true));
- TEST_DO(verify_predicates(ValueType::error_type(),
- false, true, false, false,
- false, false, false));
- TEST_DO(verify_predicates(ValueType::double_type(),
- false, false, true, false,
- false, false, false));
- TEST_DO(verify_predicates(ValueType::tensor_type({}),
- false, false, false, true,
- true, true, true));
- TEST_DO(verify_predicates(ValueType::tensor_type({{"x"}}),
- false, false, false, true,
- true, false, false));
- TEST_DO(verify_predicates(ValueType::tensor_type({{"x", 0}}),
- false, false, false, true,
- true, true, false));
+ TEST_DO(verify_predicates(ValueType::error_type(), true, false, false, false, false));
+ TEST_DO(verify_predicates(ValueType::double_type(), false, true, false, false, false));
+ TEST_DO(verify_predicates(ValueType::tensor_type({}), false, true, false, false, false));
+ TEST_DO(verify_predicates(ValueType::tensor_type({{"x"}}), false, false, true, true, false));
+ TEST_DO(verify_predicates(ValueType::tensor_type({{"x"},{"y"}}), false, false, true, true, false));
+ TEST_DO(verify_predicates(ValueType::tensor_type({{"x", 5}}), false, false, true, false, true));
+ TEST_DO(verify_predicates(ValueType::tensor_type({{"x", 5},{"y", 10}}), false, false, true, false, true));
+ TEST_DO(verify_predicates(ValueType::tensor_type({{"x", 5}, {"y"}}), false, false, true, false, false));
}
TEST("require that dimension predicates work as expected") {
- ValueType type = ValueType::tensor_type({{"x"}, {"y", 10}, {"z", 0}});
- ASSERT_EQUAL(3u, type.dimensions().size());
- EXPECT_TRUE(type.dimensions()[0].is_mapped());
- EXPECT_TRUE(!type.dimensions()[0].is_indexed());
- EXPECT_TRUE(!type.dimensions()[0].is_bound());
- EXPECT_TRUE(!type.dimensions()[1].is_mapped());
- EXPECT_TRUE(type.dimensions()[1].is_indexed());
- EXPECT_TRUE(type.dimensions()[1].is_bound());
- EXPECT_TRUE(!type.dimensions()[2].is_mapped());
- EXPECT_TRUE(type.dimensions()[2].is_indexed());
- EXPECT_TRUE(!type.dimensions()[2].is_bound());
+ ValueType::Dimension x("x");
+ ValueType::Dimension y("y", 10);
+ ValueType::Dimension z("z", 0);
+ EXPECT_TRUE(x.is_mapped());
+ EXPECT_TRUE(!x.is_indexed());
+ EXPECT_TRUE(!x.is_bound());
+ EXPECT_TRUE(!y.is_mapped());
+ EXPECT_TRUE(y.is_indexed());
+ EXPECT_TRUE(y.is_bound());
+ EXPECT_TRUE(!z.is_mapped());
+ EXPECT_TRUE(z.is_indexed());
+ EXPECT_TRUE(!z.is_bound());
+}
+
+TEST("require that use of unbound dimensions result in error types") {
+ EXPECT_TRUE(ValueType::tensor_type({{"x", 0}}).is_error());
}
TEST("require that duplicate dimension names result in error types") {
EXPECT_TRUE(ValueType::tensor_type({{"x"}, {"x"}}).is_error());
}
-TEST("require that removing dimensions from non-abstract non-tensor types gives error type") {
+TEST("require that removing dimensions from non-tensor types gives error type") {
EXPECT_TRUE(ValueType::error_type().reduce({"x"}).is_error());
EXPECT_TRUE(ValueType::double_type().reduce({"x"}).is_error());
}
-TEST("require that removing dimensions from abstract maybe-tensor types gives any type") {
- EXPECT_TRUE(ValueType::any_type().reduce({"x"}).is_any());
- EXPECT_TRUE(ValueType::tensor_type({}).reduce({"x"}).is_any());
-}
-
TEST("require that dimensions can be removed from tensor value types") {
ValueType type = ValueType::tensor_type({{"x", 10}, {"y", 20}, {"z", 30}});
EXPECT_EQUAL(ValueType::tensor_type({{"y", 20}, {"z", 30}}), type.reduce({"x"}));
@@ -187,30 +169,22 @@ TEST("require that removing all dimensions gives double type") {
EXPECT_EQUAL(ValueType::double_type(), type.reduce({"x", "y", "z"}));
}
-TEST("require that dimensions can be combined for tensor value types") {
+TEST("require that dimensions can be combined for value types") {
ValueType tensor_type_xy = ValueType::tensor_type({{"x"}, {"y"}});
ValueType tensor_type_yz = ValueType::tensor_type({{"y"}, {"z"}});
ValueType tensor_type_xyz = ValueType::tensor_type({{"x"}, {"y"}, {"z"}});
ValueType tensor_type_y = ValueType::tensor_type({{"y"}});
+ ValueType tensor_type_a10 = ValueType::tensor_type({{"a", 10}});
+ ValueType tensor_type_a10xyz = ValueType::tensor_type({{"a", 10}, {"x"}, {"y"}, {"z"}});
+ ValueType scalar = ValueType::double_type();
+ EXPECT_EQUAL(ValueType::join(scalar, scalar), scalar);
EXPECT_EQUAL(ValueType::join(tensor_type_xy, tensor_type_yz), tensor_type_xyz);
EXPECT_EQUAL(ValueType::join(tensor_type_yz, tensor_type_xy), tensor_type_xyz);
EXPECT_EQUAL(ValueType::join(tensor_type_y, tensor_type_y), tensor_type_y);
-}
-
-TEST("require that indexed dimensions combine to the minimal dimension size") {
- ValueType tensor_0 = ValueType::tensor_type({{"x", 0}});
- ValueType tensor_10 = ValueType::tensor_type({{"x", 10}});
- ValueType tensor_20 = ValueType::tensor_type({{"x", 20}});
- EXPECT_EQUAL(ValueType::join(tensor_10, tensor_0), tensor_0);
- EXPECT_EQUAL(ValueType::join(tensor_10, tensor_10), tensor_10);
- EXPECT_EQUAL(ValueType::join(tensor_10, tensor_20), tensor_10);
-}
-
-void verify_combinable(const ValueType &a, const ValueType &b) {
- EXPECT_TRUE(!ValueType::join(a, b).is_error());
- EXPECT_TRUE(!ValueType::join(b, a).is_error());
- EXPECT_TRUE(!ValueType::join(a, b).is_any());
- EXPECT_TRUE(!ValueType::join(b, a).is_any());
+ EXPECT_EQUAL(ValueType::join(scalar, tensor_type_y), tensor_type_y);
+ EXPECT_EQUAL(ValueType::join(tensor_type_a10, tensor_type_a10), tensor_type_a10);
+ EXPECT_EQUAL(ValueType::join(tensor_type_a10, scalar), tensor_type_a10);
+ EXPECT_EQUAL(ValueType::join(tensor_type_xyz, tensor_type_a10), tensor_type_a10xyz);
}
void verify_not_combinable(const ValueType &a, const ValueType &b) {
@@ -218,70 +192,49 @@ void verify_not_combinable(const ValueType &a, const ValueType &b) {
EXPECT_TRUE(ValueType::join(b, a).is_error());
}
-void verify_maybe_combinable(const ValueType &a, const ValueType &b) {
- EXPECT_TRUE(ValueType::join(a, b).is_any());
- EXPECT_TRUE(ValueType::join(b, a).is_any());
-}
-
TEST("require that mapped and indexed dimensions are not combinable") {
verify_not_combinable(ValueType::tensor_type({{"x", 10}}), ValueType::tensor_type({{"x"}}));
}
-TEST("require that dimension combining is only allowed (yes/no/maybe) for appropriate types") {
- std::vector<ValueType> types = { ValueType::any_type(), ValueType::error_type(), ValueType::double_type(),
- ValueType::tensor_type({}), ValueType::tensor_type({{"x"}}) };
- for (size_t a = 0; a < types.size(); ++a) {
- for (size_t b = a; b < types.size(); ++b) {
- TEST_STATE(vespalib::make_string("a='%s', b='%s'", types[a].to_spec().c_str(), types[b].to_spec().c_str()).c_str());
- if (types[a].is_error() || types[b].is_error()) {
- verify_not_combinable(types[a], types[b]);
- } else if (types[a].is_any() || types[b].is_any()) {
- verify_maybe_combinable(types[a], types[b]);
- } else if (types[a].is_double() || types[b].is_double()) {
- verify_combinable(types[a], types[b]);
- } else if (types[a].unknown_dimensions() || types[b].unknown_dimensions()) {
- verify_maybe_combinable(types[a], types[b]);
- } else {
- verify_combinable(types[a], types[b]);
- }
- }
- }
+TEST("require that indexed dimensions of different sizes are not combinable") {
+ verify_not_combinable(ValueType::tensor_type({{"x", 10}}), ValueType::tensor_type({{"x", 20}}));
+}
+
+TEST("require that error type combined with anything produces error type") {
+ verify_not_combinable(ValueType::error_type(), ValueType::error_type());
+ verify_not_combinable(ValueType::error_type(), ValueType::double_type());
+ verify_not_combinable(ValueType::error_type(), ValueType::tensor_type({{"x"}}));
+ verify_not_combinable(ValueType::error_type(), ValueType::tensor_type({{"x", 10}}));
}
TEST("require that value type can make spec") {
- EXPECT_EQUAL("any", ValueType::any_type().to_spec());
EXPECT_EQUAL("error", ValueType::error_type().to_spec());
EXPECT_EQUAL("double", ValueType::double_type().to_spec());
- EXPECT_EQUAL("tensor", ValueType::tensor_type({}).to_spec());
+ EXPECT_EQUAL("double", ValueType::tensor_type({}).to_spec());
EXPECT_EQUAL("tensor(x{})", ValueType::tensor_type({{"x"}}).to_spec());
EXPECT_EQUAL("tensor(y[10])", ValueType::tensor_type({{"y", 10}}).to_spec());
- EXPECT_EQUAL("tensor(z[])", ValueType::tensor_type({{"z", 0}}).to_spec());
- EXPECT_EQUAL("tensor(x{},y[10],z[])", ValueType::tensor_type({{"x"}, {"y", 10}, {"z", 0}}).to_spec());
+ EXPECT_EQUAL("tensor(x{},y[10],z[5])", ValueType::tensor_type({{"x"}, {"y", 10}, {"z", 5}}).to_spec());
}
TEST("require that value type spec can be parsed") {
- EXPECT_EQUAL(ValueType::any_type(), ValueType::from_spec("any"));
EXPECT_EQUAL(ValueType::double_type(), ValueType::from_spec("double"));
EXPECT_EQUAL(ValueType::tensor_type({}), ValueType::from_spec("tensor"));
EXPECT_EQUAL(ValueType::tensor_type({}), ValueType::from_spec("tensor()"));
EXPECT_EQUAL(ValueType::tensor_type({{"x"}}), ValueType::from_spec("tensor(x{})"));
EXPECT_EQUAL(ValueType::tensor_type({{"y", 10}}), ValueType::from_spec("tensor(y[10])"));
- EXPECT_EQUAL(ValueType::tensor_type({{"z", 0}}), ValueType::from_spec("tensor(z[])"));
- EXPECT_EQUAL(ValueType::tensor_type({{"x"}, {"y", 10}, {"z", 0}}), ValueType::from_spec("tensor(x{},y[10],z[])"));
+ EXPECT_EQUAL(ValueType::tensor_type({{"x"}, {"y", 10}, {"z", 5}}), ValueType::from_spec("tensor(x{},y[10],z[5])"));
EXPECT_EQUAL(ValueType::tensor_type({{"y", 10}}), ValueType::from_spec("tensor<double>(y[10])"));
EXPECT_EQUAL(ValueType::tensor_type({{"y", 10}}), ValueType::from_spec("tensor<float>(y[10])"));
}
TEST("require that value type spec can be parsed with extra whitespace") {
- EXPECT_EQUAL(ValueType::any_type(), ValueType::from_spec(" any "));
EXPECT_EQUAL(ValueType::double_type(), ValueType::from_spec(" double "));
EXPECT_EQUAL(ValueType::tensor_type({}), ValueType::from_spec(" tensor "));
EXPECT_EQUAL(ValueType::tensor_type({}), ValueType::from_spec(" tensor ( ) "));
EXPECT_EQUAL(ValueType::tensor_type({{"x"}}), ValueType::from_spec(" tensor ( x { } ) "));
EXPECT_EQUAL(ValueType::tensor_type({{"y", 10}}), ValueType::from_spec(" tensor ( y [ 10 ] ) "));
- EXPECT_EQUAL(ValueType::tensor_type({{"z", 0}}), ValueType::from_spec(" tensor ( z [ ] ) "));
- EXPECT_EQUAL(ValueType::tensor_type({{"x"}, {"y", 10}, {"z", 0}}),
- ValueType::from_spec(" tensor ( x { } , y [ 10 ] , z [ ] ) "));
+ EXPECT_EQUAL(ValueType::tensor_type({{"x"}, {"y", 10}, {"z", 5}}),
+ ValueType::from_spec(" tensor ( x { } , y [ 10 ] , z [ 5 ] ) "));
EXPECT_EQUAL(ValueType::tensor_type({{"y", 10}}), ValueType::from_spec(" tensor < double > ( y [ 10 ] ) "));
EXPECT_EQUAL(ValueType::tensor_type({{"y", 10}}), ValueType::from_spec(" tensor < float > ( y [ 10 ] ) "));
}
@@ -290,6 +243,7 @@ TEST("require that malformed value type spec is parsed as error") {
EXPECT_TRUE(ValueType::from_spec("").is_error());
EXPECT_TRUE(ValueType::from_spec(" ").is_error());
EXPECT_TRUE(ValueType::from_spec("error").is_error());
+ EXPECT_TRUE(ValueType::from_spec("any").is_error());
EXPECT_TRUE(ValueType::from_spec("tensor tensor").is_error());
EXPECT_TRUE(ValueType::from_spec("tensor(x{10})").is_error());
EXPECT_TRUE(ValueType::from_spec("tensor(x{},)").is_error());
@@ -304,6 +258,7 @@ TEST("require that malformed value type spec is parsed as error") {
EXPECT_TRUE(ValueType::from_spec("tensor(x{},x{})").is_error());
EXPECT_TRUE(ValueType::from_spec("tensor(x{},x[10])").is_error());
EXPECT_TRUE(ValueType::from_spec("tensor(x{},x[])").is_error());
+ EXPECT_TRUE(ValueType::from_spec("tensor(z[])").is_error());
EXPECT_TRUE(ValueType::from_spec("tensor<float16>(x[10])").is_error());
}
@@ -327,15 +282,8 @@ ParseResult::ParseResult(const vespalib::string &spec_in)
ParseResult::~ParseResult() { }
TEST("require that we can parse a partial string into a type with the low-level API") {
- ParseResult result("tensor(a[]) , ");
- EXPECT_EQUAL(result.type, ValueType::tensor_type({{"a", 0}}));
- ASSERT_TRUE(result.after_inside());
- EXPECT_EQUAL(*result.after, ',');
-}
-
-TEST("require that we can parse an abstract tensor type from a partial string") {
- ParseResult result("tensor , ");
- EXPECT_EQUAL(result.type, ValueType::tensor_type({}));
+ ParseResult result("tensor(a[5]) , ");
+ EXPECT_EQUAL(result.type, ValueType::tensor_type({{"a", 5}}));
ASSERT_TRUE(result.after_inside());
EXPECT_EQUAL(*result.after, ',');
}
@@ -349,55 +297,28 @@ TEST("require that 'error' is the valid representation of the error type") {
EXPECT_TRUE(invalid.after == nullptr); // parse not ok
}
-TEST("require that a sparse type must be a tensor with dimensions that all are mapped") {
- EXPECT_TRUE(ValueType::from_spec("tensor(x{})").is_sparse());
- EXPECT_TRUE(ValueType::from_spec("tensor(x{},y{})").is_sparse());
- EXPECT_FALSE(ValueType::from_spec("tensor()").is_sparse());
- EXPECT_FALSE(ValueType::from_spec("tensor(x[])").is_sparse());
- EXPECT_FALSE(ValueType::from_spec("tensor(x{},y[])").is_sparse());
- EXPECT_FALSE(ValueType::from_spec("double").is_sparse());
- EXPECT_FALSE(ValueType::from_spec("any").is_sparse());
- EXPECT_FALSE(ValueType::from_spec("error").is_sparse());
-}
-
-TEST("require that a dense type must be a tensor with dimensions that all are indexed") {
- EXPECT_TRUE(ValueType::from_spec("tensor(x[])").is_dense());
- EXPECT_TRUE(ValueType::from_spec("tensor(x[],y[])").is_dense());
- EXPECT_FALSE(ValueType::from_spec("tensor()").is_dense());
- EXPECT_FALSE(ValueType::from_spec("tensor(x{})").is_dense());
- EXPECT_FALSE(ValueType::from_spec("tensor(x[],y{})").is_dense());
- EXPECT_FALSE(ValueType::from_spec("double").is_dense());
- EXPECT_FALSE(ValueType::from_spec("any").is_dense());
- EXPECT_FALSE(ValueType::from_spec("error").is_dense());
-}
-
TEST("require that tensor dimensions can be renamed") {
EXPECT_EQUAL(ValueType::from_spec("tensor(x{})").rename({"x"}, {"y"}),
ValueType::from_spec("tensor(y{})"));
- EXPECT_EQUAL(ValueType::from_spec("tensor(x{},y[])").rename({"x","y"}, {"y","x"}),
- ValueType::from_spec("tensor(y{},x[])"));
+ EXPECT_EQUAL(ValueType::from_spec("tensor(x{},y[5])").rename({"x","y"}, {"y","x"}),
+ ValueType::from_spec("tensor(y{},x[5])"));
EXPECT_EQUAL(ValueType::from_spec("tensor(x{})").rename({"x"}, {"x"}),
ValueType::from_spec("tensor(x{})"));
EXPECT_EQUAL(ValueType::from_spec("tensor(x{})").rename({}, {}), ValueType::error_type());
EXPECT_EQUAL(ValueType::double_type().rename({}, {}), ValueType::error_type());
EXPECT_EQUAL(ValueType::from_spec("tensor(x{},y{})").rename({"x"}, {"y","z"}), ValueType::error_type());
EXPECT_EQUAL(ValueType::from_spec("tensor(x{},y{})").rename({"x","y"}, {"z"}), ValueType::error_type());
- EXPECT_EQUAL(ValueType::tensor_type({}).rename({"x"}, {"y"}), ValueType::any_type());
- EXPECT_EQUAL(ValueType::any_type().rename({"x"}, {"y"}), ValueType::any_type());
EXPECT_EQUAL(ValueType::double_type().rename({"a"}, {"b"}), ValueType::error_type());
EXPECT_EQUAL(ValueType::error_type().rename({"a"}, {"b"}), ValueType::error_type());
}
TEST("require that types can be concatenated") {
ValueType error = ValueType::error_type();
- ValueType any = ValueType::any_type();
- ValueType tensor = ValueType::tensor_type({});
ValueType scalar = ValueType::double_type();
ValueType vx_2 = ValueType::from_spec("tensor(x[2])");
ValueType vx_m = ValueType::from_spec("tensor(x{})");
ValueType vx_3 = ValueType::from_spec("tensor(x[3])");
ValueType vx_5 = ValueType::from_spec("tensor(x[5])");
- ValueType vx_any = ValueType::from_spec("tensor(x[])");
ValueType vy_7 = ValueType::from_spec("tensor(y[7])");
ValueType mxy_22 = ValueType::from_spec("tensor(x[2],y[2])");
ValueType mxy_52 = ValueType::from_spec("tensor(x[5],y[2])");
@@ -407,30 +328,20 @@ TEST("require that types can be concatenated") {
EXPECT_EQUAL(ValueType::concat(error, vx_2, "x"), error);
EXPECT_EQUAL(ValueType::concat(vx_2, error, "x"), error);
- EXPECT_EQUAL(ValueType::concat(error, any, "x"), error);
- EXPECT_EQUAL(ValueType::concat(any, error, "x"), error);
EXPECT_EQUAL(ValueType::concat(vx_m, vx_2, "x"), error);
EXPECT_EQUAL(ValueType::concat(vx_2, vx_m, "x"), error);
EXPECT_EQUAL(ValueType::concat(vx_m, vx_m, "x"), error);
EXPECT_EQUAL(ValueType::concat(vx_m, scalar, "x"), error);
EXPECT_EQUAL(ValueType::concat(scalar, vx_m, "x"), error);
+ EXPECT_EQUAL(ValueType::concat(vx_2, vx_3, "y"), error);
EXPECT_EQUAL(ValueType::concat(vy_7, vx_m, "z"), cxyz_m72);
- EXPECT_EQUAL(ValueType::concat(tensor, vx_2, "x"), any);
- EXPECT_EQUAL(ValueType::concat(vx_2, tensor, "x"), any);
- EXPECT_EQUAL(ValueType::concat(any, vx_2, "x"), any);
- EXPECT_EQUAL(ValueType::concat(vx_2, any, "x"), any);
- EXPECT_EQUAL(ValueType::concat(any, tensor, "x"), any);
- EXPECT_EQUAL(ValueType::concat(tensor, any, "x"), any);
EXPECT_EQUAL(ValueType::concat(scalar, scalar, "x"), vx_2);
EXPECT_EQUAL(ValueType::concat(vx_2, scalar, "x"), vx_3);
EXPECT_EQUAL(ValueType::concat(scalar, vx_2, "x"), vx_3);
EXPECT_EQUAL(ValueType::concat(vx_2, vx_3, "x"), vx_5);
- EXPECT_EQUAL(ValueType::concat(vx_2, vx_any, "x"), vx_any);
- EXPECT_EQUAL(ValueType::concat(vx_any, vx_2, "x"), vx_any);
EXPECT_EQUAL(ValueType::concat(scalar, vx_2, "y"), mxy_22);
EXPECT_EQUAL(ValueType::concat(vx_2, scalar, "y"), mxy_22);
- EXPECT_EQUAL(ValueType::concat(vx_2, vx_3, "y"), mxy_22);
- EXPECT_EQUAL(ValueType::concat(vx_3, vx_2, "y"), mxy_22);
+ EXPECT_EQUAL(ValueType::concat(vx_2, vx_2, "y"), mxy_22);
EXPECT_EQUAL(ValueType::concat(mxy_22, vx_3, "x"), mxy_52);
EXPECT_EQUAL(ValueType::concat(vx_3, mxy_22, "x"), mxy_52);
EXPECT_EQUAL(ValueType::concat(mxy_22, vy_7, "y"), mxy_29);
@@ -438,34 +349,4 @@ TEST("require that types can be concatenated") {
EXPECT_EQUAL(ValueType::concat(vx_5, vy_7, "z"), cxyz_572);
}
-TEST("require that 'either' gives appropriate type") {
- ValueType error = ValueType::error_type();
- ValueType any = ValueType::any_type();
- ValueType tensor = ValueType::tensor_type({});
- ValueType scalar = ValueType::double_type();
- ValueType vx_2 = ValueType::from_spec("tensor(x[2])");
- ValueType vx_m = ValueType::from_spec("tensor(x{})");
- ValueType vx_3 = ValueType::from_spec("tensor(x[3])");
- ValueType vx_any = ValueType::from_spec("tensor(x[])");
- ValueType vy_2 = ValueType::from_spec("tensor(y[2])");
- ValueType mxy_22 = ValueType::from_spec("tensor(x[2],y[2])");
- ValueType mxy_23 = ValueType::from_spec("tensor(x[2],y[3])");
- ValueType mxy_32 = ValueType::from_spec("tensor(x[3],y[2])");
- ValueType mxy_any2 = ValueType::from_spec("tensor(x[],y[2])");
- ValueType mxy_2any = ValueType::from_spec("tensor(x[2],y[])");
-
- EXPECT_EQUAL(ValueType::either(vx_2, error), error);
- EXPECT_EQUAL(ValueType::either(error, vx_2), error);
- EXPECT_EQUAL(ValueType::either(vx_2, vx_2), vx_2);
- EXPECT_EQUAL(ValueType::either(vx_2, scalar), any);
- EXPECT_EQUAL(ValueType::either(scalar, vx_2), any);
- EXPECT_EQUAL(ValueType::either(vx_2, mxy_22), tensor);
- EXPECT_EQUAL(ValueType::either(tensor, vx_2), tensor);
- EXPECT_EQUAL(ValueType::either(vx_2, vy_2), tensor);
- EXPECT_EQUAL(ValueType::either(vx_2, vx_m), tensor);
- EXPECT_EQUAL(ValueType::either(vx_2, vx_3), vx_any);
- EXPECT_EQUAL(ValueType::either(mxy_22, mxy_23), mxy_2any);
- EXPECT_EQUAL(ValueType::either(mxy_32, mxy_22), mxy_any2);
-}
-
TEST_MAIN() { TEST_RUN_ALL(); }
diff --git a/eval/src/tests/tensor/dense_add_dimension_optimizer/dense_add_dimension_optimizer_test.cpp b/eval/src/tests/tensor/dense_add_dimension_optimizer/dense_add_dimension_optimizer_test.cpp
index ce321b7c3c3..9b46fc3393a 100644
--- a/eval/src/tests/tensor/dense_add_dimension_optimizer/dense_add_dimension_optimizer_test.cpp
+++ b/eval/src/tests/tensor/dense_add_dimension_optimizer/dense_add_dimension_optimizer_test.cpp
@@ -27,7 +27,6 @@ EvalFixture::ParamRepo make_params() {
.add("x5", spec({x(5)}, N()))
.add("x5y1", spec({x(5),y(1)}, N()))
.add("y1z1", spec({y(1),z(1)}, N()))
- .add("x5_u", spec({x(5)}, N()), "tensor(x[])")
.add("x_m", spec({x({"a"})}, N()));
}
EvalFixture::ParamRepo param_repo = make_params();
@@ -80,15 +79,11 @@ TEST("require that non-canonical dimension addition is not optimized") {
}
TEST("require that dimension addition with overlapping dimensions is not optimized") {
- TEST_DO(verify_not_optimized("x5*tensor(x[1],y[1])(1)"));
- TEST_DO(verify_not_optimized("tensor(x[1],y[1])(1)*x5"));
TEST_DO(verify_not_optimized("x5y1*tensor(y[1],z[1])(1)"));
TEST_DO(verify_not_optimized("tensor(y[1],z[1])(1)*x5y1"));
}
TEST("require that dimension addition with inappropriate dimensions is not optimized") {
- TEST_DO(verify_not_optimized("x5_u*tensor(y[1])(1)"));
- TEST_DO(verify_not_optimized("tensor(y[1])(1)*x5_u"));
TEST_DO(verify_not_optimized("x_m*tensor(y[1])(1)"));
TEST_DO(verify_not_optimized("tensor(y[1])(1)*x_m"));
}
diff --git a/eval/src/tests/tensor/dense_dot_product_function/dense_dot_product_function_test.cpp b/eval/src/tests/tensor/dense_dot_product_function/dense_dot_product_function_test.cpp
index 60830e4abd7..fae5db75618 100644
--- a/eval/src/tests/tensor/dense_dot_product_function/dense_dot_product_function_test.cpp
+++ b/eval/src/tests/tensor/dense_dot_product_function/dense_dot_product_function_test.cpp
@@ -62,11 +62,6 @@ TEST("require that basic dot product with equal sizes is correct") {
check_gen_with_result(2, 2, (3.0 * 5.0) + (4.0 * 6.0));
}
-TEST("require that basic dot product with un-equal sizes is correct") {
- check_gen_with_result(2, 3, (3.0 * 5.0) + (4.0 * 6.0));
- check_gen_with_result(3, 2, (3.0 * 5.0) + (4.0 * 6.0));
-}
-
//-----------------------------------------------------------------------------
void assertDotProduct(size_t numCells) {
@@ -98,18 +93,6 @@ TEST("require that dot product with equal sizes is correct") {
TEST_DO(assertDotProduct(1024 + 3));
}
-TEST("require that dot product with un-equal sizes is correct") {
- TEST_DO(assertDotProduct(8, 8 + 3));
- TEST_DO(assertDotProduct(8 + 3, 8));
- TEST_DO(assertDotProduct(16, 16 + 3));
- TEST_DO(assertDotProduct(32, 32 + 3));
- TEST_DO(assertDotProduct(64, 64 + 3));
- TEST_DO(assertDotProduct(128, 128 + 3));
- TEST_DO(assertDotProduct(256, 256 + 3));
- TEST_DO(assertDotProduct(512, 512 + 3));
- TEST_DO(assertDotProduct(1024, 1024 + 3));
-}
-
//-----------------------------------------------------------------------------
EvalFixture::ParamRepo make_params() {
@@ -120,14 +103,8 @@ EvalFixture::ParamRepo make_params() {
.add("v04_y3", spec({y(3)}, MyVecSeq(10)))
.add("v05_x5", spec({x(5)}, MyVecSeq(6.0)))
.add("v06_x5", spec({x(5)}, MyVecSeq(7.0)))
- .add("v07_x3_a", spec({x(3)}, MyVecSeq(8.0)), "any")
- .add("v08_x3_u", spec({x(3)}, MyVecSeq(9.0)), "tensor(x[])")
- .add("v09_x4_u", spec({x(4)}, MyVecSeq(3.0)), "tensor(x[])")
.add("m01_x3y3", spec({x(3),y(3)}, MyVecSeq(1.0)))
- .add("m02_x2y3", spec({x(2),y(3)}, MyVecSeq(2.0)))
- .add("m03_x3y2", spec({x(3),y(2)}, MyVecSeq(3.0)))
- .add("m04_xuy3", spec({x(3),y(3)}, MyVecSeq(4.0)), "tensor(x[],y[3])")
- .add("m05_x3yu", spec({x(3),y(3)}, MyVecSeq(5.0)), "tensor(x[3],y[])");
+ .add("m02_x3y3", spec({x(3),y(3)}, MyVecSeq(2.0)));
}
EvalFixture::ParamRepo param_repo = make_params();
@@ -146,11 +123,6 @@ void assertNotOptimized(const vespalib::string &expr) {
EXPECT_TRUE(info.empty());
}
-TEST("require that dot product is not optimized for unknown types") {
- TEST_DO(assertNotOptimized("reduce(v02_x3*v07_x3_a,sum)"));
- TEST_DO(assertNotOptimized("reduce(v07_x3_a*v03_x3,sum)"));
-}
-
TEST("require that dot product works with tensor function") {
TEST_DO(assertOptimized("reduce(v05_x5*v06_x5,sum)"));
TEST_DO(assertOptimized("reduce(v05_x5*v06_x5,sum,x)"));
@@ -162,18 +134,11 @@ TEST("require that dot product with compatible dimensions is optimized") {
TEST_DO(assertOptimized("reduce(v01_x1*v01_x1,sum)"));
TEST_DO(assertOptimized("reduce(v02_x3*v03_x3,sum)"));
TEST_DO(assertOptimized("reduce(v05_x5*v06_x5,sum)"));
-
- TEST_DO(assertOptimized("reduce(v02_x3*v06_x5,sum)"));
- TEST_DO(assertOptimized("reduce(v05_x5*v03_x3,sum)"));
- TEST_DO(assertOptimized("reduce(v08_x3_u*v05_x5,sum)"));
- TEST_DO(assertOptimized("reduce(v05_x5*v08_x3_u,sum)"));
}
TEST("require that dot product with incompatible dimensions is NOT optimized") {
TEST_DO(assertNotOptimized("reduce(v02_x3*v04_y3,sum)"));
TEST_DO(assertNotOptimized("reduce(v04_y3*v02_x3,sum)"));
- TEST_DO(assertNotOptimized("reduce(v08_x3_u*v04_y3,sum)"));
- TEST_DO(assertNotOptimized("reduce(v04_y3*v08_x3_u,sum)"));
TEST_DO(assertNotOptimized("reduce(v02_x3*m01_x3y3,sum)"));
TEST_DO(assertNotOptimized("reduce(m01_x3y3*v02_x3,sum)"));
}
@@ -188,11 +153,8 @@ TEST("require that expressions similar to dot product are not optimized") {
}
TEST("require that multi-dimensional dot product can be optimized") {
- TEST_DO(assertOptimized("reduce(m01_x3y3*m02_x2y3,sum)"));
- TEST_DO(assertOptimized("reduce(m02_x2y3*m01_x3y3,sum)"));
- TEST_DO(assertOptimized("reduce(m01_x3y3*m04_xuy3,sum)"));
- TEST_DO(assertOptimized("reduce(m04_xuy3*m01_x3y3,sum)"));
- TEST_DO(assertOptimized("reduce(m04_xuy3*m04_xuy3,sum)"));
+ TEST_DO(assertOptimized("reduce(m01_x3y3*m02_x3y3,sum)"));
+ TEST_DO(assertOptimized("reduce(m02_x3y3*m01_x3y3,sum)"));
}
TEST("require that result must be double to trigger optimization") {
@@ -201,14 +163,6 @@ TEST("require that result must be double to trigger optimization") {
TEST_DO(assertNotOptimized("reduce(m01_x3y3*m01_x3y3,sum,y)"));
}
-TEST("require that additional dimensions must have matching size") {
- TEST_DO(assertOptimized("reduce(m01_x3y3*m01_x3y3,sum)"));
- TEST_DO(assertNotOptimized("reduce(m01_x3y3*m03_x3y2,sum)"));
- TEST_DO(assertNotOptimized("reduce(m03_x3y2*m01_x3y3,sum)"));
- TEST_DO(assertNotOptimized("reduce(m01_x3y3*m05_x3yu,sum)"));
- TEST_DO(assertNotOptimized("reduce(m05_x3yu*m01_x3y3,sum)"));
-}
-
void verify_compatible(const vespalib::string &a, const vespalib::string &b) {
auto a_type = ValueType::from_spec(a);
auto b_type = ValueType::from_spec(b);
@@ -231,13 +185,9 @@ TEST("require that type compatibility test is appropriate") {
TEST_DO(verify_compatible("tensor(x[5])", "tensor(x[5])"));
TEST_DO(verify_not_compatible("tensor(x[5])", "tensor(y[5])"));
TEST_DO(verify_compatible("tensor(x[5])", "tensor(x[3])"));
- TEST_DO(verify_compatible("tensor(x[])", "tensor(x[3])"));
TEST_DO(verify_compatible("tensor(x[3],y[7],z[9])", "tensor(x[5],y[7],z[9])"));
- TEST_DO(verify_compatible("tensor(x[3],y[7],z[9])", "tensor(x[],y[7],z[9])"));
TEST_DO(verify_not_compatible("tensor(x[5],y[7],z[9])", "tensor(x[5],y[5],z[9])"));
- TEST_DO(verify_not_compatible("tensor(x[5],y[],z[9])", "tensor(x[5],y[7],z[9])"));
TEST_DO(verify_not_compatible("tensor(x[5],y[7],z[9])", "tensor(x[5],y[7],z[5])"));
- TEST_DO(verify_not_compatible("tensor(x[5],y[7],z[])", "tensor(x[5],y[7],z[9])"));
}
//-----------------------------------------------------------------------------
diff --git a/eval/src/tests/tensor/dense_fast_rename_optimizer/dense_fast_rename_optimizer_test.cpp b/eval/src/tests/tensor/dense_fast_rename_optimizer/dense_fast_rename_optimizer_test.cpp
index c892ed02808..10b4c622a0a 100644
--- a/eval/src/tests/tensor/dense_fast_rename_optimizer/dense_fast_rename_optimizer_test.cpp
+++ b/eval/src/tests/tensor/dense_fast_rename_optimizer/dense_fast_rename_optimizer_test.cpp
@@ -25,7 +25,6 @@ const TensorEngine &prod_engine = DefaultTensorEngine::ref();
EvalFixture::ParamRepo make_params() {
return EvalFixture::ParamRepo()
.add("x5", spec({x(5)}, N()))
- .add("x5_u", spec({x(5)}, N()), "tensor(x[])")
.add("x_m", spec({x({"a", "b", "c"})}, N()))
.add("x5y3", spec({x(5),y(3)}, N()));
}
@@ -64,10 +63,6 @@ TEST("require that transposing dense renames are not optimized") {
TEST_DO(verify_not_optimized("rename(x5y3,(y,x),(a,b))"));
}
-TEST("require that abstract dense renames are not optimized") {
- TEST_DO(verify_not_optimized("rename(x5_u,x,y)"));
-}
-
TEST("require that non-dense renames are not optimized") {
TEST_DO(verify_not_optimized("rename(x_m,x,y)"));
}
diff --git a/eval/src/tests/tensor/dense_inplace_join_function/dense_inplace_join_function_test.cpp b/eval/src/tests/tensor/dense_inplace_join_function/dense_inplace_join_function_test.cpp
index 3a5b27965d0..7ee603e1763 100644
--- a/eval/src/tests/tensor/dense_inplace_join_function/dense_inplace_join_function_test.cpp
+++ b/eval/src/tests/tensor/dense_inplace_join_function/dense_inplace_join_function_test.cpp
@@ -45,10 +45,8 @@ EvalFixture::ParamRepo make_params() {
.add_mutable("mut_x5_A", spec({x(5)}, seq))
.add_mutable("mut_x5_B", spec({x(5)}, seq))
.add_mutable("mut_x5_C", spec({x(5)}, seq))
- .add_mutable("mut_x4", spec({x(4)}, seq))
.add_mutable("mut_x5y3_A", spec({x(5),y(3)}, seq))
.add_mutable("mut_x5y3_B", spec({x(5),y(3)}, seq))
- .add_mutable("mut_x5_unbound", spec({x(5)}, seq), "tensor(x[])")
.add_mutable("mut_x_sparse", spec({x({"a", "b", "c"})}, seq));
}
EvalFixture::ParamRepo param_repo = make_params();
@@ -112,8 +110,6 @@ TEST("require that join(tensor,scalar) operations are not optimized") {
}
TEST("require that join with different tensor shapes are not optimized") {
- TEST_DO(verify_not_optimized("mut_x5_A-mut_x4"));
- TEST_DO(verify_not_optimized("mut_x4-mut_x5_A"));
TEST_DO(verify_not_optimized("mut_x5_A*mut_x5y3_B"));
}
@@ -124,12 +120,6 @@ TEST("require that inplace join operations can be chained") {
TEST_DO(verify_p2_optimized("con_x5_A-(con_x5_B-mut_x5_C)", 2));
}
-TEST("require that abstract tensors are not optimized") {
- TEST_DO(verify_not_optimized("mut_x5_unbound+mut_x5_A"));
- TEST_DO(verify_not_optimized("mut_x5_A+mut_x5_unbound"));
- TEST_DO(verify_not_optimized("mut_x5_unbound+mut_x5_unbound"));
-}
-
TEST("require that non-mutable tensors are not optimized") {
TEST_DO(verify_not_optimized("con_x5_A+con_x5_B"));
}
diff --git a/eval/src/tests/tensor/dense_inplace_map_function/dense_inplace_map_function_test.cpp b/eval/src/tests/tensor/dense_inplace_map_function/dense_inplace_map_function_test.cpp
index 77af747a066..a17b7e02eb8 100644
--- a/eval/src/tests/tensor/dense_inplace_map_function/dense_inplace_map_function_test.cpp
+++ b/eval/src/tests/tensor/dense_inplace_map_function/dense_inplace_map_function_test.cpp
@@ -27,7 +27,6 @@ EvalFixture::ParamRepo make_params() {
.add_mutable("_d", spec(5.0))
.add_mutable("_x5", spec({x(5)}, N()))
.add_mutable("_x5y3", spec({x(5),y(3)}, N()))
- .add_mutable("_x5_u", spec({x(5)}, N()), "tensor(x[])")
.add_mutable("_x_m", spec({x({"a", "b", "c"})}, N()));
}
EvalFixture::ParamRepo param_repo = make_params();
@@ -60,10 +59,6 @@ TEST("require that inplace map operations can be chained") {
TEST_DO(verify_optimized("map(map(_x5,f(x)(x+10)),f(x)(x-5))", 2));
}
-TEST("require that abstract tensors are not optimized") {
- TEST_DO(verify_not_optimized("map(_x5_u,f(x)(x+10))"));
-}
-
TEST("require that non-mutable tensors are not optimized") {
TEST_DO(verify_not_optimized("map(x5,f(x)(x+10))"));
}
diff --git a/eval/src/tests/tensor/dense_remove_dimension_optimizer/dense_remove_dimension_optimizer_test.cpp b/eval/src/tests/tensor/dense_remove_dimension_optimizer/dense_remove_dimension_optimizer_test.cpp
index b87393c2980..ac451d10b50 100644
--- a/eval/src/tests/tensor/dense_remove_dimension_optimizer/dense_remove_dimension_optimizer_test.cpp
+++ b/eval/src/tests/tensor/dense_remove_dimension_optimizer/dense_remove_dimension_optimizer_test.cpp
@@ -26,7 +26,6 @@ EvalFixture::ParamRepo make_params() {
return EvalFixture::ParamRepo()
.add("x1y5z1", spec({x(1),y(5),z(1)}, N()))
.add("x1y1z1", spec({x(1),y(1),z(1)}, N()))
- .add("x1y5z1_u", spec({x(1),y(5),z(1)}, N()), "tensor(x[1],y[5],z[])")
.add("x1y5z_m", spec({x(1),y(5),z({"a"})}, N()));
}
EvalFixture::ParamRepo param_repo = make_params();
@@ -74,8 +73,6 @@ TEST("require that full reduce is not optimized") {
}
TEST("require that inappropriate tensor types cannot be optimized") {
- TEST_DO(verify_not_optimized("reduce(x1y5z1_u,sum,x)"));
- TEST_DO(verify_not_optimized("reduce(x1y5z1_u,sum,z)"));
TEST_DO(verify_not_optimized("reduce(x1y5z_m,sum,x)"));
TEST_DO(verify_not_optimized("reduce(x1y5z_m,sum,z)"));
}
diff --git a/eval/src/tests/tensor/dense_xw_product_function/dense_xw_product_function_test.cpp b/eval/src/tests/tensor/dense_xw_product_function/dense_xw_product_function_test.cpp
index f18e72b0d07..b55e223ab07 100644
--- a/eval/src/tests/tensor/dense_xw_product_function/dense_xw_product_function_test.cpp
+++ b/eval/src/tests/tensor/dense_xw_product_function/dense_xw_product_function_test.cpp
@@ -49,12 +49,7 @@ EvalFixture::ParamRepo make_params() {
.add("x8y5", spec({x(8),y(5)}, MyMatSeq()))
.add("y5z8", spec({y(5),z(8)}, MyMatSeq()))
.add("x5y16", spec({x(5),y(16)}, MyMatSeq()))
- .add("y16z5", spec({y(16),z(5)}, MyMatSeq()))
- .add("a_y3", spec({y(3)}, MyVecSeq()), "any")
- .add("y3_u", spec({y(3)}, MyVecSeq()), "tensor(y[])")
- .add("a_x2y3", spec({x(2),y(3)}, MyMatSeq()), "any")
- .add("x2_uy3", spec({x(2),y(3)}, MyMatSeq()), "tensor(x[],y[3])")
- .add("x2y3_u", spec({x(2),y(3)}, MyMatSeq()), "tensor(x[2],y[])");
+ .add("y16z5", spec({y(16),z(5)}, MyMatSeq()));
}
EvalFixture::ParamRepo param_repo = make_params();
@@ -91,14 +86,6 @@ TEST("require that xw product gives same results as reference join/reduce") {
TEST_DO(verify_optimized("reduce(y16*y16z5,sum,y)", 16, 5, false));
}
-TEST("require that xw product is not optimized for abstract types") {
- TEST_DO(verify_not_optimized("reduce(a_y3*x2y3,sum)"));
- TEST_DO(verify_not_optimized("reduce(y3*a_x2y3,sum)"));
- TEST_DO(verify_not_optimized("reduce(y3_u*x2y3,sum)"));
- TEST_DO(verify_not_optimized("reduce(y3*x2_uy3,sum)"));
- TEST_DO(verify_not_optimized("reduce(y3*x2y3_u,sum)"));
-}
-
TEST("require that various variants of xw product can be optimized") {
TEST_DO(verify_optimized("reduce(y3*x2y3,sum,y)", 3, 2, true));
TEST_DO(verify_optimized("reduce(x2y3*y3,sum,y)", 3, 2, true));
@@ -118,8 +105,6 @@ TEST("require that expressions similar to xw product are not optimized") {
}
TEST("require that xw products with incompatible dimensions are not optimized") {
- TEST_DO(verify_not_optimized("reduce(y3*x1y1,sum,y)"));
- TEST_DO(verify_not_optimized("reduce(y3*x8y5,sum,y)"));
TEST_DO(verify_not_optimized("reduce(y3*x2z3,sum,y)"));
TEST_DO(verify_not_optimized("reduce(y3*x2z3,sum,z)"));
}
diff --git a/eval/src/tests/tensor/tensor_mapper/tensor_mapper_test.cpp b/eval/src/tests/tensor/tensor_mapper/tensor_mapper_test.cpp
index 124295a500d..60b17930f0b 100644
--- a/eval/src/tests/tensor/tensor_mapper/tensor_mapper_test.cpp
+++ b/eval/src/tests/tensor/tensor_mapper/tensor_mapper_test.cpp
@@ -96,48 +96,6 @@ TEST("require that sparse tensors can be mapped to dense type") {
.add({{"x",1},{"y",2}}, 0)));
}
-TEST("require that sparse tensors can be mapped to abstract dense type") {
- TEST_DO(verify(TensorSpec("tensor(x{},y{})")
- .add({{"x","0"},{"y","0"}}, 1)
- .add({{"x","1"},{"y","0"}}, 3)
- .add({{"x","0"},{"y","1"}}, 5)
- .add({{"x","10"},{"y","1"}}, 7),
- "tensor(x[2],y[])",
- TensorSpec("tensor(x[2],y[2])")
- .add({{"x",0},{"y",0}}, 1)
- .add({{"x",0},{"y",1}}, 5)
- .add({{"x",1},{"y",0}}, 3)
- .add({{"x",1},{"y",1}}, 0)));
-
- TEST_DO(verify(TensorSpec("tensor(x{},y{})")
- .add({{"x","0"},{"y","0"}}, 1)
- .add({{"x","1"},{"y","0"}}, 3)
- .add({{"x","0"},{"y","1"}}, 5)
- .add({{"x","2"},{"y","0"}}, 7),
- "tensor(x[],y[])",
- TensorSpec("tensor(x[3],y[2])")
- .add({{"x",0},{"y",0}}, 1)
- .add({{"x",0},{"y",1}}, 5)
- .add({{"x",1},{"y",0}}, 3)
- .add({{"x",1},{"y",1}}, 0)
- .add({{"x",2},{"y",0}}, 7)
- .add({{"x",2},{"y",1}}, 0)));
-
- TEST_DO(verify(TensorSpec("tensor(x{},y{})")
- .add({{"x","0"},{"y","0"}}, 1)
- .add({{"x","1"},{"y","0"}}, 3)
- .add({{"x","0"},{"y","1"}}, 5)
- .add({{"x","10"},{"y","3"}}, 7),
- "tensor(x[],y[3])",
- TensorSpec("tensor(x[2],y[3])")
- .add({{"x",0},{"y",0}}, 1)
- .add({{"x",0},{"y",1}}, 5)
- .add({{"x",0},{"y",2}}, 0)
- .add({{"x",1},{"y",0}}, 3)
- .add({{"x",1},{"y",1}}, 0)
- .add({{"x",1},{"y",2}}, 0)));
-}
-
TEST("require that dense tensors can be mapped to sparse type") {
TEST_DO(verify(TensorSpec("tensor(x[2],y[2])")
.add({{"x",0},{"y",0}}, 1)
@@ -168,7 +126,7 @@ TEST("require that mixed tensors can be mapped to dense type") {
.add({{"x",0},{"y","1"}}, 3)
.add({{"x",1},{"y","0"}}, 5)
.add({{"x",1},{"y","1"}}, 7),
- "tensor(y[])",
+ "tensor(y[2])",
TensorSpec("tensor(y[2])")
.add({{"y",0}}, 6)
.add({{"y",1}}, 10)));
@@ -180,7 +138,7 @@ TEST("require that mixed tensors can be mapped to mixed type") {
.add({{"x",0},{"y","1"}}, 3)
.add({{"x",1},{"y","0"}}, 5)
.add({{"x",1},{"y","1"}}, 7),
- "tensor(x{},y[])",
+ "tensor(x{},y[2])",
TensorSpec("tensor(x{},y[2])")
.add({{"x","0"},{"y",0}}, 1)
.add({{"x","0"},{"y",1}}, 3)
@@ -194,7 +152,7 @@ TEST("require that dense tensors can be mapped to mixed type") {
.add({{"x",0},{"y",1}}, 3)
.add({{"x",1},{"y",0}}, 5)
.add({{"x",1},{"y",1}}, 7),
- "tensor(x{},y[])",
+ "tensor(x{},y[2])",
TensorSpec("tensor(x{},y[2])")
.add({{"x","0"},{"y",0}}, 1)
.add({{"x","0"},{"y",1}}, 3)
@@ -208,7 +166,7 @@ TEST("require that sparse tensors can be mapped to mixed type") {
.add({{"x","0"},{"y","1"}}, 3)
.add({{"x","1"},{"y","0"}}, 5)
.add({{"x","1"},{"y","1"}}, 7),
- "tensor(x[],y{})",
+ "tensor(x[2],y{})",
TensorSpec("tensor(x[2],y{})")
.add({{"x",0},{"y","0"}}, 1)
.add({{"x",0},{"y","1"}}, 3)
@@ -225,14 +183,14 @@ TEST("require that missing dimensions are added appropriately") {
TEST_DO(verify(TensorSpec("tensor(x[1])")
.add({{"x",0}}, 42),
- "tensor(x[1],y[],z[2])",
+ "tensor(x[1],y[1],z[2])",
TensorSpec("tensor(x[1],y[1],z[2])")
.add({{"x",0},{"y",0},{"z",0}}, 42)
.add({{"x",0},{"y",0},{"z",1}}, 0)));
TEST_DO(verify(TensorSpec("tensor(a{})")
.add({{"a","foo"}}, 42),
- "tensor(a{},b[],c{},d[2])",
+ "tensor(a{},b[1],c{},d[2])",
TensorSpec("tensor(a{},b[1],c{},d[2])")
.add({{"a","foo"},{"b",0},{"c",""},{"d",0}}, 42)
.add({{"a","foo"},{"b",0},{"c",""},{"d",1}}, 0)));