From 9b3a2d5fdbc00c304b1da6e7a15c57dcbdab2c8e Mon Sep 17 00:00:00 2001 From: Arne Juul Date: Tue, 2 Feb 2021 15:27:14 +0000 Subject: followup on GenSpec after review * avoid changing layouts in-place, call cpy() first * do cells_float() vs cells_double() more equally * minor cosmetic fixes --- eval/src/tests/eval/fast_value/fast_value_test.cpp | 8 ++++--- .../tests/eval/simple_value/simple_value_test.cpp | 26 ++++++++++++++-------- .../eval/tensor_lambda/tensor_lambda_test.cpp | 6 ++--- .../tests/eval/value_codec/value_codec_test.cpp | 8 ++++--- .../generic_concat/generic_concat_test.cpp | 16 ++++++++----- .../generic_create/generic_create_test.cpp | 8 ++++--- .../instruction/generic_join/generic_join_test.cpp | 12 ++++++---- .../instruction/generic_map/generic_map_test.cpp | 8 ++++--- .../generic_merge/generic_merge_test.cpp | 16 ++++++++----- .../instruction/generic_peek/generic_peek_test.cpp | 8 ++++--- .../generic_reduce/generic_reduce_test.cpp | 8 ++++--- .../generic_rename/generic_rename_test.cpp | 8 ++++--- .../join_with_number_function_test.cpp | 6 ++--- .../mixed_map_function/mixed_map_function_test.cpp | 20 ++++++----------- .../mixed_simple_join_function_test.cpp | 6 ++--- .../pow_as_map_optimizer_test.cpp | 6 ++--- .../sum_max_dot_product_function_test.cpp | 16 ++++++------- .../tests/streamed/value/streamed_value_test.cpp | 26 ++++++++++++++-------- 18 files changed, 120 insertions(+), 92 deletions(-) (limited to 'eval') diff --git a/eval/src/tests/eval/fast_value/fast_value_test.cpp b/eval/src/tests/eval/fast_value/fast_value_test.cpp index 6cf43511977..9d29d8de660 100644 --- a/eval/src/tests/eval/fast_value/fast_value_test.cpp +++ b/eval/src/tests/eval/fast_value/fast_value_test.cpp @@ -142,9 +142,9 @@ TEST(FastValueBuilderTest, mixed_add_subspace_robustness) { } } -GenSpec G() { return GenSpec().cells_float(); } +GenSpec G() { return GenSpec(); } -std::vector layouts = { +const std::vector layouts = { G(), G().idx("x", 3), G().idx("x", 3).idx("y", 5), @@ -159,7 +159,9 @@ std::vector layouts = { TEST(FastValueBuilderFactoryTest, fast_values_can_be_copied) { auto factory = FastValueBuilderFactory::get(); for (const auto &layout: layouts) { - for (TensorSpec expect : { layout.gen(), layout.cpy().cells_double().gen() }) { + for (TensorSpec expect : { layout.cpy().cells_float().gen(), + layout.cpy().cells_double().gen() }) + { std::unique_ptr value = value_from_spec(expect, factory); std::unique_ptr copy = factory.copy(*value); TensorSpec actual = spec_from_value(*copy); diff --git a/eval/src/tests/eval/simple_value/simple_value_test.cpp b/eval/src/tests/eval/simple_value/simple_value_test.cpp index e8abb646482..202b2056e74 100644 --- a/eval/src/tests/eval/simple_value/simple_value_test.cpp +++ b/eval/src/tests/eval/simple_value/simple_value_test.cpp @@ -23,9 +23,9 @@ using Handle = SharedStringRepo::Handle; vespalib::string as_str(string_id label) { return Handle::string_from_id(label); } -GenSpec G() { return GenSpec().cells_float(); } +GenSpec G() { return GenSpec(); } -std::vector layouts = { +const std::vector layouts = { G(), G().idx("x", 3), G().idx("x", 3).idx("y", 5), @@ -37,7 +37,7 @@ std::vector layouts = { G().map("x", {"a","b","c"}).idx("y", 5).map("z", {"i","j","k","l"}) }; -std::vector join_layouts = { +const std::vector join_layouts = { G(), G(), G().idx("x", 5), G().idx("x", 5), G().idx("x", 5), G().idx("y", 5), @@ -67,7 +67,9 @@ TensorSpec simple_value_join(const TensorSpec &a, const TensorSpec &b, join_fun_ TEST(SimpleValueTest, simple_values_can_be_converted_from_and_to_tensor_spec) { for (const auto &layout: layouts) { - for (TensorSpec expect : { layout.gen(), layout.cpy().cells_double().gen() }) { + for (TensorSpec expect : { layout.cpy().cells_float().gen(), + layout.cpy().cells_double().gen() }) + { std::unique_ptr value = value_from_spec(expect, SimpleValueBuilderFactory::get()); TensorSpec actual = spec_from_value(*value); EXPECT_EQ(actual, expect); @@ -77,7 +79,9 @@ TEST(SimpleValueTest, simple_values_can_be_converted_from_and_to_tensor_spec) { TEST(SimpleValueTest, simple_values_can_be_copied) { for (const auto &layout: layouts) { - for (TensorSpec expect : { layout.gen(), layout.cpy().cells_double().gen() }) { + for (TensorSpec expect : { layout.cpy().cells_float().gen(), + layout.cpy().cells_double().gen() }) + { std::unique_ptr value = value_from_spec(expect, SimpleValueBuilderFactory::get()); std::unique_ptr copy = SimpleValueBuilderFactory::get().copy(*value); TensorSpec actual = spec_from_value(*copy); @@ -124,10 +128,14 @@ GenSpec::seq_t N_16ths = [] (size_t i) { return (i + 1.0) / 16.0; }; TEST(SimpleValueTest, new_generic_join_works_for_simple_values) { ASSERT_TRUE((join_layouts.size() % 2) == 0); for (size_t i = 0; i < join_layouts.size(); i += 2) { - const auto l = join_layouts[i].seq(N_16ths); - const auto r = join_layouts[i + 1].seq(N_16ths); - for (TensorSpec lhs : { l.gen(), l.cpy().cells_double().gen() }) { - for (TensorSpec rhs : { r.gen(), r.cpy().cells_double().gen() }) { + const auto l = join_layouts[i].cpy().seq(N_16ths); + const auto r = join_layouts[i + 1].cpy().seq(N_16ths); + for (TensorSpec lhs : { l.cpy().cells_float().gen(), + l.cpy().cells_double().gen() }) + { + for (TensorSpec rhs : { r.cpy().cells_float().gen(), + r.cpy().cells_double().gen() }) + { for (auto fun: {operation::Add::f, operation::Sub::f, operation::Mul::f, operation::Div::f}) { SCOPED_TRACE(fmt("\n===\nLHS: %s\nRHS: %s\n===\n", lhs.to_string().c_str(), rhs.to_string().c_str())); auto expect = ReferenceOperations::join(lhs, rhs, fun); diff --git a/eval/src/tests/eval/tensor_lambda/tensor_lambda_test.cpp b/eval/src/tests/eval/tensor_lambda/tensor_lambda_test.cpp index 18198a75f7d..dd21b663fa9 100644 --- a/eval/src/tests/eval/tensor_lambda/tensor_lambda_test.cpp +++ b/eval/src/tests/eval/tensor_lambda/tensor_lambda_test.cpp @@ -23,12 +23,10 @@ using namespace vespalib::eval::tensor_function; const ValueBuilderFactory &simple_factory = SimpleValueBuilderFactory::get(); const ValueBuilderFactory &prod_factory = FastValueBuilderFactory::get(); -TensorSpec spec(double v) { return TensorSpec("double").add({}, v); } - EvalFixture::ParamRepo make_params() { return EvalFixture::ParamRepo() - .add("a", spec(1)) - .add("b", spec(2)) + .add("a", GenSpec().seq_bias(1).gen()) + .add("b", GenSpec().seq_bias(2).gen()) .add("x3", GenSpec().idx("x", 3).gen()) .add("x3f", GenSpec().idx("x", 3).cells_float().gen()) .add("x3m", GenSpec().map("x", 3).gen()) diff --git a/eval/src/tests/eval/value_codec/value_codec_test.cpp b/eval/src/tests/eval/value_codec/value_codec_test.cpp index acce0f5667f..110b58c27de 100644 --- a/eval/src/tests/eval/value_codec/value_codec_test.cpp +++ b/eval/src/tests/eval/value_codec/value_codec_test.cpp @@ -15,9 +15,9 @@ using namespace vespalib::eval::test; const ValueBuilderFactory &factory = SimpleValueBuilderFactory::get(); -GenSpec G() { return GenSpec().cells_float(); } +GenSpec G() { return GenSpec(); } -std::vector layouts = { +const std::vector layouts = { G(), G().idx("x", 3), G().idx("x", 3).idx("y", 5), @@ -32,7 +32,9 @@ std::vector layouts = { TEST(ValueCodecTest, simple_values_can_be_converted_from_and_to_tensor_spec) { for (const auto &layout: layouts) { - for (TensorSpec expect : { layout.gen(), layout.cpy().cells_double().gen() }) { + for (TensorSpec expect : { layout.cpy().cells_float().gen(), + layout.cpy().cells_double().gen() }) + { std::unique_ptr value = value_from_spec(expect, factory); TensorSpec actual = spec_from_value(*value); EXPECT_EQ(actual, expect); diff --git a/eval/src/tests/instruction/generic_concat/generic_concat_test.cpp b/eval/src/tests/instruction/generic_concat/generic_concat_test.cpp index 88bf7f26b11..2fe49a12a61 100644 --- a/eval/src/tests/instruction/generic_concat/generic_concat_test.cpp +++ b/eval/src/tests/instruction/generic_concat/generic_concat_test.cpp @@ -18,11 +18,11 @@ using namespace vespalib::eval::test; using vespalib::make_string_short::fmt; -GenSpec G() { return GenSpec().cells_float(); } +GenSpec G() { return GenSpec(); } GenSpec::seq_t N_16ths = [] (size_t i) { return (i + 1.0) / 16.0; }; -std::vector concat_layouts = { +const std::vector concat_layouts = { G(), G(), G(), G().idx("y", 5), G().idx("y", 5), G(), @@ -76,10 +76,14 @@ TensorSpec perform_generic_concat(const TensorSpec &a, const TensorSpec &b, void test_generic_concat_with(const ValueBuilderFactory &factory) { ASSERT_TRUE((concat_layouts.size() % 2) == 0); for (size_t i = 0; i < concat_layouts.size(); i += 2) { - const auto &l = concat_layouts[i]; - const auto &r = concat_layouts[i+1].seq(N_16ths); - for (TensorSpec lhs : { l.gen(), l.cpy().cells_double().gen() }) { - for (TensorSpec rhs : { r.gen(), r.cpy().cells_double().gen() }) { + const auto l = concat_layouts[i]; + const auto r = concat_layouts[i+1].cpy().seq(N_16ths); + for (TensorSpec lhs : { l.cpy().cells_float().gen(), + l.cpy().cells_double().gen() }) + { + for (TensorSpec rhs : { r.cpy().cells_float().gen(), + r.cpy().cells_double().gen() }) + { SCOPED_TRACE(fmt("\n===\nin LHS: %s\nin RHS: %s\n===\n", lhs.to_string().c_str(), rhs.to_string().c_str())); auto actual = perform_generic_concat(lhs, rhs, "y", factory); auto expect = ReferenceOperations::concat(lhs, rhs, "y"); diff --git a/eval/src/tests/instruction/generic_create/generic_create_test.cpp b/eval/src/tests/instruction/generic_create/generic_create_test.cpp index 2dce4509571..fcf4618d592 100644 --- a/eval/src/tests/instruction/generic_create/generic_create_test.cpp +++ b/eval/src/tests/instruction/generic_create/generic_create_test.cpp @@ -19,9 +19,9 @@ using namespace vespalib::eval::test; using vespalib::make_string_short::fmt; -GenSpec G() { return GenSpec().cells_float(); } +GenSpec G() { return GenSpec(); } -std::vector create_layouts = { +const std::vector create_layouts = { G().idx("x", 3), G().idx("x", 3).idx("y", 5), G().idx("x", 3).idx("y", 5).idx("z", 7), @@ -91,7 +91,9 @@ TensorSpec perform_generic_create(const TensorSpec &a, const ValueBuilderFactory void test_generic_create_with(const ValueBuilderFactory &factory) { for (const auto &layout : create_layouts) { - for (TensorSpec full : { layout.gen(), layout.cpy().cells_double().gen() }) { + for (TensorSpec full : { layout.cpy().cells_float().gen(), + layout.cpy().cells_double().gen() }) + { auto actual = perform_generic_create(full, factory); auto expect = reference_create(full).normalize(); EXPECT_EQ(actual, expect); diff --git a/eval/src/tests/instruction/generic_join/generic_join_test.cpp b/eval/src/tests/instruction/generic_join/generic_join_test.cpp index e8af8f8ad38..cce3dc352e7 100644 --- a/eval/src/tests/instruction/generic_join/generic_join_test.cpp +++ b/eval/src/tests/instruction/generic_join/generic_join_test.cpp @@ -19,9 +19,9 @@ using vespalib::make_string_short::fmt; GenSpec::seq_t N_16ths = [] (size_t i) { return (i + 1.0) / 16.0; }; -GenSpec G() { return GenSpec().cells_float().seq(N_16ths); } +GenSpec G() { return GenSpec().seq(N_16ths); } -std::vector join_layouts = { +const std::vector join_layouts = { G(), G(), G().idx("x", 5), G().idx("x", 5), G().idx("x", 5), G().idx("y", 5), @@ -107,8 +107,12 @@ TEST(GenericJoinTest, generic_join_works_for_simple_and_fast_values) { for (size_t i = 0; i < join_layouts.size(); i += 2) { const auto &l = join_layouts[i]; const auto &r = join_layouts[i+1]; - for (TensorSpec lhs : { l.gen(), l.cpy().cells_double().gen() }) { - for (TensorSpec rhs : { r.gen(), r.cpy().cells_double().gen() }) { + for (TensorSpec lhs : { l.cpy().cells_float().gen(), + l.cpy().cells_double().gen() }) + { + for (TensorSpec rhs : { r.cpy().cells_float().gen(), + r.cpy().cells_double().gen() }) + { for (auto fun: {operation::Add::f, operation::Sub::f, operation::Mul::f, operation::Div::f}) { SCOPED_TRACE(fmt("\n===\nLHS: %s\nRHS: %s\n===\n", lhs.to_string().c_str(), rhs.to_string().c_str())); auto expect = ReferenceOperations::join(lhs, rhs, fun); diff --git a/eval/src/tests/instruction/generic_map/generic_map_test.cpp b/eval/src/tests/instruction/generic_map/generic_map_test.cpp index d8203ea135c..70541609542 100644 --- a/eval/src/tests/instruction/generic_map/generic_map_test.cpp +++ b/eval/src/tests/instruction/generic_map/generic_map_test.cpp @@ -19,9 +19,9 @@ using vespalib::make_string_short::fmt; GenSpec::seq_t N_16ths = [] (size_t i) { return (i + 1.0) / 16.0; }; -GenSpec G() { return GenSpec().cells_float().seq(N_16ths); } +GenSpec G() { return GenSpec().seq(N_16ths); } -std::vector map_layouts = { +const std::vector map_layouts = { G(), G().idx("x", 3), G().idx("x", 3).idx("y", 5), @@ -43,7 +43,9 @@ TensorSpec perform_generic_map(const TensorSpec &a, map_fun_t func, const ValueB void test_generic_map_with(const ValueBuilderFactory &factory) { for (const auto &layout : map_layouts) { - for (TensorSpec lhs : { layout.gen(), layout.cpy().cells_double().gen() }) { + for (TensorSpec lhs : { layout.cpy().cells_float().gen(), + layout.cpy().cells_double().gen() }) + { for (auto func : {operation::Floor::f, operation::Fabs::f, operation::Square::f, operation::Inv::f}) { SCOPED_TRACE(fmt("\n===\nLHS: %s\n===\n", lhs.to_string().c_str())); auto expect = ReferenceOperations::map(lhs, func); diff --git a/eval/src/tests/instruction/generic_merge/generic_merge_test.cpp b/eval/src/tests/instruction/generic_merge/generic_merge_test.cpp index 2554e018cf0..e944965a2c5 100644 --- a/eval/src/tests/instruction/generic_merge/generic_merge_test.cpp +++ b/eval/src/tests/instruction/generic_merge/generic_merge_test.cpp @@ -18,11 +18,11 @@ using namespace vespalib::eval::test; using vespalib::make_string_short::fmt; -GenSpec G() { return GenSpec().cells_float(); } +GenSpec G() { return GenSpec(); } GenSpec::seq_t N_16ths = [] (size_t i) { return (i + 1.0) / 16.0; }; -std::vector merge_layouts = { +const std::vector merge_layouts = { G(), G(), G().idx("x", 5), G().idx("x", 5), G().idx("x", 3).idx("y", 5), G().idx("x", 3).idx("y", 5), @@ -48,10 +48,14 @@ TensorSpec perform_generic_merge(const TensorSpec &a, const TensorSpec &b, join_ void test_generic_merge_with(const ValueBuilderFactory &factory) { ASSERT_TRUE((merge_layouts.size() % 2) == 0); for (size_t i = 0; i < merge_layouts.size(); i += 2) { - const auto &l = merge_layouts[i]; - const auto &r = merge_layouts[i+1].seq(N_16ths); - for (TensorSpec lhs : { l.gen(), l.cpy().cells_double().gen() }) { - for (TensorSpec rhs : { r.gen(), r.cpy().cells_double().gen() }) { + const auto l = merge_layouts[i]; + const auto r = merge_layouts[i+1].cpy().seq(N_16ths); + for (TensorSpec lhs : { l.cpy().cells_float().gen(), + l.cpy().cells_double().gen() }) + { + for (TensorSpec rhs : { r.cpy().cells_float().gen(), + r.cpy().cells_double().gen() }) + { SCOPED_TRACE(fmt("\n===\nLHS: %s\nRHS: %s\n===\n", lhs.to_string().c_str(), rhs.to_string().c_str())); for (auto fun: {operation::Add::f, operation::Mul::f, operation::Sub::f, operation::Max::f}) { auto expect = ReferenceOperations::merge(lhs, rhs, fun); diff --git a/eval/src/tests/instruction/generic_peek/generic_peek_test.cpp b/eval/src/tests/instruction/generic_peek/generic_peek_test.cpp index 092a91711ba..c80e8a1296b 100644 --- a/eval/src/tests/instruction/generic_peek/generic_peek_test.cpp +++ b/eval/src/tests/instruction/generic_peek/generic_peek_test.cpp @@ -22,9 +22,9 @@ using namespace vespalib::eval::test; using vespalib::make_string_short::fmt; -GenSpec G() { return GenSpec().cells_float(); } +GenSpec G() { return GenSpec(); } -std::vector peek_layouts = { +const std::vector peek_layouts = { G().idx("x", 4), G().idx("x", 4).idx("y", 5), G().idx("x", 4).idx("y", 5).idx("z", 3), @@ -194,7 +194,9 @@ void fill_dims_and_check(const TensorSpec &input, void test_generic_peek_with(const ValueBuilderFactory &factory) { for (const auto &layout : peek_layouts) { - for (TensorSpec input : { layout.gen(), layout.cpy().cells_double().gen() }) { + for (TensorSpec input : { layout.cpy().cells_float().gen(), + layout.cpy().cells_double().gen() }) + { ValueType input_type = ValueType::from_spec(input.type()); const auto &dims = input_type.dimensions(); PeekSpec spec; diff --git a/eval/src/tests/instruction/generic_reduce/generic_reduce_test.cpp b/eval/src/tests/instruction/generic_reduce/generic_reduce_test.cpp index 1e9ce85d7e1..56277cf4035 100644 --- a/eval/src/tests/instruction/generic_reduce/generic_reduce_test.cpp +++ b/eval/src/tests/instruction/generic_reduce/generic_reduce_test.cpp @@ -20,9 +20,9 @@ using vespalib::make_string_short::fmt; GenSpec::seq_t N_16ths = [] (size_t i) { return (i + 1.0) / 16.0; }; -GenSpec G() { return GenSpec().cells_float().seq(N_16ths); } +GenSpec G() { return GenSpec().seq(N_16ths); } -std::vector layouts = { +const std::vector layouts = { G(), G().idx("x", 3), G().idx("x", 3).idx("y", 5), @@ -70,7 +70,9 @@ TEST(GenericReduceTest, sparse_reduce_plan_can_be_created) { void test_generic_reduce_with(const ValueBuilderFactory &factory) { for (const auto &layout: layouts) { - for (TensorSpec input : { layout.gen(), layout.cpy().cells_double().gen() }) { + for (TensorSpec input : { layout.cpy().cells_float().gen(), + layout.cpy().cells_double().gen() }) + { SCOPED_TRACE(fmt("tensor type: %s, num_cells: %zu", input.type().c_str(), input.cells().size())); for (Aggr aggr: {Aggr::SUM, Aggr::AVG, Aggr::MIN, Aggr::MAX}) { SCOPED_TRACE(fmt("aggregator: %s", AggrNames::name_of(aggr)->c_str())); diff --git a/eval/src/tests/instruction/generic_rename/generic_rename_test.cpp b/eval/src/tests/instruction/generic_rename/generic_rename_test.cpp index 430e417e288..f0c2241202e 100644 --- a/eval/src/tests/instruction/generic_rename/generic_rename_test.cpp +++ b/eval/src/tests/instruction/generic_rename/generic_rename_test.cpp @@ -17,9 +17,9 @@ using namespace vespalib::eval::test; using vespalib::make_string_short::fmt; -GenSpec G() { return GenSpec().cells_float(); } +GenSpec G() { return GenSpec(); } -std::vector rename_layouts = { +const std::vector rename_layouts = { G().idx("x", 3), G().idx("x", 3).idx("y", 5), G().idx("x", 3).idx("y", 5).idx("z", 7), @@ -110,7 +110,9 @@ TensorSpec perform_generic_rename(const TensorSpec &a, void test_generic_rename_with(const ValueBuilderFactory &factory) { for (const auto &layout : rename_layouts) { - for (TensorSpec lhs : { layout.gen(), layout.cpy().cells_double().gen() }) { + for (TensorSpec lhs : { layout.cpy().cells_float().gen(), + layout.cpy().cells_double().gen() }) + { ValueType lhs_type = ValueType::from_spec(lhs.type()); for (const auto & from_to : rename_from_to) { ValueType renamed_type = lhs_type.rename(from_to.from, from_to.to); diff --git a/eval/src/tests/instruction/join_with_number/join_with_number_function_test.cpp b/eval/src/tests/instruction/join_with_number/join_with_number_function_test.cpp index 2d943aa569e..e6a256a493b 100644 --- a/eval/src/tests/instruction/join_with_number/join_with_number_function_test.cpp +++ b/eval/src/tests/instruction/join_with_number/join_with_number_function_test.cpp @@ -33,12 +33,10 @@ std::ostream &operator<<(std::ostream &os, Primary primary) const ValueBuilderFactory &prod_factory = FastValueBuilderFactory::get(); -TensorSpec spec(double v) { return TensorSpec("double").add({}, v); } - EvalFixture::ParamRepo make_params() { auto repo = EvalFixture::ParamRepo() - .add("a", spec(1.5)) - .add("number", spec(2.5)) + .add("a", GenSpec().seq_bias(1.5).gen()) + .add("number", GenSpec().seq_bias(2.5).gen()) .add("dense", GenSpec().idx("y", 5).gen()) .add_variants("x3y5", GenSpec().idx("x", 3).idx("y", 5)) .add_variants("mixed", GenSpec().map("x", {"a"}).idx("y", 5).map("z", {"d","e"})) diff --git a/eval/src/tests/instruction/mixed_map_function/mixed_map_function_test.cpp b/eval/src/tests/instruction/mixed_map_function/mixed_map_function_test.cpp index 45e885fac33..3a7d1368f03 100644 --- a/eval/src/tests/instruction/mixed_map_function/mixed_map_function_test.cpp +++ b/eval/src/tests/instruction/mixed_map_function/mixed_map_function_test.cpp @@ -13,19 +13,13 @@ using namespace vespalib::eval::tensor_function; const ValueBuilderFactory &prod_factory = FastValueBuilderFactory::get(); -TensorSpec spec(double v) { return TensorSpec("double").add({}, v); } -TensorSpec sparse_spec = GenSpec().map("x", {"a"}).gen(); -TensorSpec mixed_spec = GenSpec().map("x", {"a"}).idx("y", 5).gen(); - EvalFixture::ParamRepo make_params() { return EvalFixture::ParamRepo() - .add("a", spec(1.5)) - .add("b", spec(2.5)) - .add("sparse", sparse_spec) - .add("mixed", mixed_spec) - .add_mutable("@sparse", sparse_spec) - .add_mutable("@mixed", mixed_spec) - .add_matrix("x", 5, "y", 3); + .add("a", GenSpec().seq_bias(1.5).gen()) + .add("b", GenSpec().seq_bias(2.5).gen()) + .add_variants("sparse", GenSpec().map("x", {"a"})) + .add_variants("mixed", GenSpec().map("x", {"a"}).idx("y", 5)) + .add_variants("x5y3", GenSpec().idx("x", 5).idx("y", 3)); } EvalFixture::ParamRepo param_repo = make_params(); @@ -57,12 +51,12 @@ void verify_not_optimized(const vespalib::string &expr) { TEST(MapTest, dense_map_is_optimized) { verify_optimized("map(x5y3,f(x)(x+10))", false); - verify_optimized("map(x5y3f,f(x)(x+10))", false); + verify_optimized("map(x5y3_f,f(x)(x+10))", false); } TEST(MapTest, simple_dense_map_can_be_inplace) { verify_optimized("map(@x5y3,f(x)(x+10))", true); - verify_optimized("map(@x5y3f,f(x)(x+10))", true); + verify_optimized("map(@x5y3_f,f(x)(x+10))", true); } TEST(MapTest, scalar_map_is_not_optimized) { diff --git a/eval/src/tests/instruction/mixed_simple_join_function/mixed_simple_join_function_test.cpp b/eval/src/tests/instruction/mixed_simple_join_function/mixed_simple_join_function_test.cpp index 02e13fcbef3..105ae22e06e 100644 --- a/eval/src/tests/instruction/mixed_simple_join_function/mixed_simple_join_function_test.cpp +++ b/eval/src/tests/instruction/mixed_simple_join_function/mixed_simple_join_function_test.cpp @@ -43,12 +43,10 @@ std::ostream &operator<<(std::ostream &os, Overlap overlap) const ValueBuilderFactory &prod_factory = FastValueBuilderFactory::get(); -TensorSpec spec(double v) { return TensorSpec("double").add({}, v); } - EvalFixture::ParamRepo make_params() { return EvalFixture::ParamRepo() - .add("a", spec(1.5)) - .add("b", spec(2.5)) + .add("a", GenSpec().seq_bias(1.5).gen()) + .add("b", GenSpec().seq_bias(2.5).gen()) .add("sparse", GenSpec().map("x", {"a", "b", "c"}).gen()) .add("mixed", GenSpec().map("x", {"a", "b", "c"}).idx("y", 5).idx("z", 3).gen()) .add("empty_mixed", GenSpec().map("x", {}).idx("y", 5).idx("z", 3).gen()) diff --git a/eval/src/tests/instruction/pow_as_map_optimizer/pow_as_map_optimizer_test.cpp b/eval/src/tests/instruction/pow_as_map_optimizer/pow_as_map_optimizer_test.cpp index cceb18bfea6..fe32a59bb78 100644 --- a/eval/src/tests/instruction/pow_as_map_optimizer/pow_as_map_optimizer_test.cpp +++ b/eval/src/tests/instruction/pow_as_map_optimizer/pow_as_map_optimizer_test.cpp @@ -14,12 +14,10 @@ using namespace vespalib::eval; const ValueBuilderFactory &prod_factory = FastValueBuilderFactory::get(); -TensorSpec spec(double v) { return TensorSpec("double").add({}, v); } - EvalFixture::ParamRepo make_params() { return EvalFixture::ParamRepo() - .add("a", spec(1.5)) - .add("b", spec(2.5)) + .add("a", GenSpec().seq_bias(1.5).gen()) + .add("b", GenSpec().seq_bias(2.5).gen()) .add("sparse", GenSpec().map("x", {"a","b"}).gen()) .add("mixed", GenSpec().map("x", {"a"}).idx("y", 5).gen()) .add_variants("x5y3", GenSpec().idx("x", 5).idx("y", 3)); diff --git a/eval/src/tests/instruction/sum_max_dot_product_function/sum_max_dot_product_function_test.cpp b/eval/src/tests/instruction/sum_max_dot_product_function/sum_max_dot_product_function_test.cpp index 616649e914b..1013c98b424 100644 --- a/eval/src/tests/instruction/sum_max_dot_product_function/sum_max_dot_product_function_test.cpp +++ b/eval/src/tests/instruction/sum_max_dot_product_function/sum_max_dot_product_function_test.cpp @@ -52,16 +52,16 @@ GenSpec DocGen(size_t y_size, size_t z_size) { return GenSpec().cells_float().ma GenSpec Que() { return QueGen(3, 5); } GenSpec Doc() { return DocGen(6, 5); } -GenSpec QueX0() { return QueGen(0, 5); } -GenSpec DocX0() { return DocGen(0, 5); } +GenSpec QueEmptyX() { return QueGen(0, 5); } +GenSpec DocEmptyX() { return DocGen(0, 5); } -GenSpec QueZ1() { return QueGen(3, 1); } -GenSpec DocZ1() { return DocGen(6, 1); } +GenSpec QueTrivialZ() { return QueGen(3, 1); } +GenSpec DocTrivialZ() { return DocGen(6, 1); } auto query = Que().gen(); auto document = Doc().gen(); -auto empty_query = QueX0().gen(); -auto empty_document = DocX0().gen(); +auto empty_query = QueEmptyX().gen(); +auto empty_document = DocEmptyX().gen(); TEST(SumMaxDotProduct, expressions_can_be_optimized) { @@ -81,8 +81,8 @@ TEST(SumMaxDotProduct, double_cells_are_not_optimized) { } TEST(SumMaxDotProduct, trivial_dot_product_is_not_optimized) { - auto trivial_query = QueZ1().gen(); - auto trivial_document = DocZ1().gen(); + auto trivial_query = QueTrivialZ().gen(); + auto trivial_document = DocTrivialZ().gen(); assert_not_optimized(trivial_query, trivial_document); } diff --git a/eval/src/tests/streamed/value/streamed_value_test.cpp b/eval/src/tests/streamed/value/streamed_value_test.cpp index 51098427295..efa83351f28 100644 --- a/eval/src/tests/streamed/value/streamed_value_test.cpp +++ b/eval/src/tests/streamed/value/streamed_value_test.cpp @@ -23,9 +23,9 @@ using Handle = SharedStringRepo::Handle; vespalib::string as_str(string_id label) { return Handle::string_from_id(label); } -GenSpec G() { return GenSpec().cells_float(); } +GenSpec G() { return GenSpec(); } -std::vector layouts = { +const std::vector layouts = { G(), G().idx("x", 3), G().idx("x", 3).idx("y", 5), @@ -37,7 +37,7 @@ std::vector layouts = { G().map("x", {"a","b","c"}).idx("y", 5).map("z", {"i","j","k","l"}) }; -std::vector join_layouts = { +const std::vector join_layouts = { G(), G(), G().idx("x", 5), G().idx("x", 5), G().idx("x", 5), G().idx("y", 5), @@ -67,7 +67,9 @@ TensorSpec streamed_value_join(const TensorSpec &a, const TensorSpec &b, join_fu TEST(StreamedValueTest, streamed_values_can_be_converted_from_and_to_tensor_spec) { for (const auto &layout: layouts) { - for (TensorSpec expect : { layout.gen(), layout.cpy().cells_double().gen() }) { + for (TensorSpec expect : { layout.cpy().cells_float().gen(), + layout.cpy().cells_double().gen() }) + { std::unique_ptr value = value_from_spec(expect, StreamedValueBuilderFactory::get()); TensorSpec actual = spec_from_value(*value); EXPECT_EQ(actual, expect); @@ -77,7 +79,9 @@ TEST(StreamedValueTest, streamed_values_can_be_converted_from_and_to_tensor_spec TEST(StreamedValueTest, streamed_values_can_be_copied) { for (const auto &layout: layouts) { - for (TensorSpec expect : { layout.gen(), layout.cpy().cells_double().gen() }) { + for (TensorSpec expect : { layout.cpy().cells_float().gen(), + layout.cpy().cells_double().gen() }) + { std::unique_ptr value = value_from_spec(expect, StreamedValueBuilderFactory::get()); std::unique_ptr copy = StreamedValueBuilderFactory::get().copy(*value); TensorSpec actual = spec_from_value(*copy); @@ -124,10 +128,14 @@ GenSpec::seq_t N_16ths = [] (size_t i) { return (i + 1.0) / 16.0; }; TEST(StreamedValueTest, new_generic_join_works_for_streamed_values) { ASSERT_TRUE((join_layouts.size() % 2) == 0); for (size_t i = 0; i < join_layouts.size(); i += 2) { - const auto l = join_layouts[i].seq(N_16ths); - const auto r = join_layouts[i + 1].seq(N_16ths); - for (TensorSpec lhs : { l.gen(), l.cpy().cells_double().gen() }) { - for (TensorSpec rhs : { r.gen(), r.cpy().cells_double().gen() }) { + const auto l = join_layouts[i].cpy().seq(N_16ths); + const auto r = join_layouts[i + 1].cpy().seq(N_16ths); + for (TensorSpec lhs : { l.cpy().cells_float().gen(), + l.cpy().cells_double().gen() }) + { + for (TensorSpec rhs : { r.cpy().cells_float().gen(), + r.cpy().cells_double().gen() }) + { for (auto fun: {operation::Add::f, operation::Sub::f, operation::Mul::f, operation::Max::f}) { SCOPED_TRACE(fmt("\n===\nLHS: %s\nRHS: %s\n===\n", lhs.to_string().c_str(), rhs.to_string().c_str())); auto expect = ReferenceOperations::join(lhs, rhs, fun); -- cgit v1.2.3