From b77dcb0c7b6b174b50a2ff48c938096e4bd8937c Mon Sep 17 00:00:00 2001 From: HÃ¥vard Pettersen Date: Mon, 5 Mar 2018 12:47:59 +0000 Subject: test result mutability of optimized operations also clean up vector from doubles test using eval fixture --- .../dense_dot_product_function_test.cpp | 3 +- .../dense_fast_rename_function_test.cpp | 29 +++- .../vector_from_doubles_function_test.cpp | 153 ++++----------------- 3 files changed, 54 insertions(+), 131 deletions(-) (limited to 'eval/src') diff --git a/eval/src/tests/tensor/dense_dot_product_function/dense_dot_product_function_test.cpp b/eval/src/tests/tensor/dense_dot_product_function/dense_dot_product_function_test.cpp index fb48e445180..37f9602565d 100644 --- a/eval/src/tests/tensor/dense_dot_product_function/dense_dot_product_function_test.cpp +++ b/eval/src/tests/tensor/dense_dot_product_function/dense_dot_product_function_test.cpp @@ -131,7 +131,8 @@ void assertOptimized(const vespalib::string &expr) { EvalFixture fixture(prod_engine, expr, param_repo, true); EXPECT_EQUAL(fixture.result(), EvalFixture::ref(expr, param_repo)); auto info = fixture.find_all(); - EXPECT_EQUAL(info.size(), 1u); + ASSERT_EQUAL(info.size(), 1u); + EXPECT_TRUE(info[0]->result_is_mutable()); } void assertNotOptimized(const vespalib::string &expr) { diff --git a/eval/src/tests/tensor/dense_fast_rename_function/dense_fast_rename_function_test.cpp b/eval/src/tests/tensor/dense_fast_rename_function/dense_fast_rename_function_test.cpp index fab16f1e276..45b38b48481 100644 --- a/eval/src/tests/tensor/dense_fast_rename_function/dense_fast_rename_function_test.cpp +++ b/eval/src/tests/tensor/dense_fast_rename_function/dense_fast_rename_function_test.cpp @@ -24,17 +24,19 @@ const TensorEngine &prod_engine = DefaultTensorEngine::ref(); EvalFixture::ParamRepo make_params() { return EvalFixture::ParamRepo() .add("x5", spec({x(5)}, N())) + .add_mutable("mut_x5", spec({x(5)}, N())) .add("x5_u", spec({x(5)}, N()), "tensor(x[])") .add("x_m", spec({x({"a", "b", "c"})}, N())) .add("x5y3", spec({x(5),y(3)}, N())); } EvalFixture::ParamRepo param_repo = make_params(); -void verify_optimized(const vespalib::string &expr) { - EvalFixture fixture(prod_engine, expr, param_repo, true); +void verify_optimized(const vespalib::string &expr, bool expect_mutable = false) { + EvalFixture fixture(prod_engine, expr, param_repo, true, true); EXPECT_EQUAL(fixture.result(), EvalFixture::ref(expr, param_repo)); auto info = fixture.find_all(); - EXPECT_EQUAL(info.size(), 1u); + ASSERT_EQUAL(info.size(), 1u); + EXPECT_EQUAL(info[0]->result_is_mutable(), expect_mutable); } void verify_not_optimized(const vespalib::string &expr) { @@ -71,4 +73,25 @@ TEST("require that non-dense renames are not optimized") { TEST_DO(verify_not_optimized("rename(x_m,x,y)")); } +TEST("require that renaming a mutable result retains mutability") { + TEST_DO(verify_optimized("rename(mut_x5,x,y)", true)); +} + +TEST("require that child mutability changed under-the-hood is still reflected") { + Stash stash; + const Node &a = inject(ValueType::from_spec("tensor(x[2])"), 0, stash); + const Node &tmp = map(a, operation::Neg::f, stash); // will be mutable + DenseFastRenameFunction my_rename(ValueType::from_spec("tensor(y[2])"), a); + EXPECT_TRUE(!my_rename.result_is_mutable()); + { + std::vector list; + my_rename.push_children(list); + ASSERT_EQUAL(list.size(), 1u); + EXPECT_EQUAL(&(list[0].get().get()), &a); + const TensorFunction::Child &child = list[0]; + child.set(tmp); + } + EXPECT_TRUE(my_rename.result_is_mutable()); +} + TEST_MAIN() { TEST_RUN_ALL(); } diff --git a/eval/src/tests/tensor/vector_from_doubles_function/vector_from_doubles_function_test.cpp b/eval/src/tests/tensor/vector_from_doubles_function/vector_from_doubles_function_test.cpp index 0ba9871d672..41d98122e0f 100644 --- a/eval/src/tests/tensor/vector_from_doubles_function/vector_from_doubles_function_test.cpp +++ b/eval/src/tests/tensor/vector_from_doubles_function/vector_from_doubles_function_test.cpp @@ -1,164 +1,63 @@ // Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. -#include -LOG_SETUP("dense_dot_product_function_test"); - #include #include -#include #include #include #include #include #include -#include -#include +#include +#include #include #include using namespace vespalib; using namespace vespalib::eval; +using namespace vespalib::eval::test; using namespace vespalib::tensor; using namespace vespalib::eval::tensor_function; -const TensorEngine &ref_engine = SimpleTensorEngine::ref(); const TensorEngine &prod_engine = DefaultTensorEngine::ref(); -//----------------------------------------------------------------------------- -// verify that optimize() works as expected - -template -bool treeContains(const TensorFunction &expr) { - using Child = TensorFunction::Child; - Child root(expr); - std::vector nodes({root}); - for (size_t i = 0; i < nodes.size(); ++i) { - nodes[i].get().get().push_children(nodes); - } - for (const Child &child : nodes) { - if (as(child.get())) { - return true; - } - } - return false; -} - -const TensorFunction &optimize_fun(const Function &fun, const NodeTypes &node_types, Stash &stash) { - const TensorFunction &plain_fun = make_tensor_function(prod_engine, fun.root(), node_types, stash); - return prod_engine.optimize(plain_fun, stash); +EvalFixture::ParamRepo make_params() { + return EvalFixture::ParamRepo() + .add("a", spec(1.0)) + .add("b", spec(2.0)) + .add("c", spec(3.0)) + .add("d", spec(4.0)) + .add("x5", spec({x(5)}, N())); } +EvalFixture::ParamRepo param_repo = make_params(); -std::vector extract_types(size_t n, const std::vector &input) { - std::vector vec; - for (const TensorSpec &spec : input) { - vec.push_back(ValueType::from_spec(spec.type())); - } - while (vec.size() < n) { - vec.push_back(ValueType::double_type()); +void verify(const vespalib::string &expr, size_t expect_optimized_cnt, size_t expect_not_optimized_cnt) { + EvalFixture fixture(prod_engine, expr, param_repo, true); + EXPECT_EQUAL(fixture.result(), EvalFixture::ref(expr, param_repo)); + auto info = fixture.find_all(); + EXPECT_EQUAL(info.size(), expect_optimized_cnt); + for (size_t i = 0; i < info.size(); ++i) { + EXPECT_TRUE(info[i]->result_is_mutable()); } - return vec; + EXPECT_EQUAL(fixture.find_all().size(), expect_not_optimized_cnt); } -struct Context { - Stash stash; - Function function; - std::vector input; - std::vector input_types; - NodeTypes node_types; - const TensorFunction &optimized; - - Context(const vespalib::string &expr, std::vector in) - : stash(), - function(Function::parse(expr)), - input(in), - input_types(extract_types(function.num_params(), input)), - node_types(function, input_types), - optimized(optimize_fun(function, node_types, stash)) - { - EXPECT_EQUAL(actual(), expected()); - } - - ~Context() {} - - struct Params : LazyParams { - std::vector values; - Value &resolve(size_t idx, Stash &) const override { - return *values[idx]; - } - }; - - Params gen_params(const TensorEngine &engine) { - Params p; - for (const TensorSpec &spec : input) { - p.values.emplace_back(engine.from_spec(spec)); - } - while (p.values.size() < function.num_params()) { - double v = 1.0 + p.values.size(); - p.values.emplace_back(std::make_unique(v)); - } - return p; - } - - TensorSpec actual() { - const LazyParams ¶ms = gen_params(prod_engine); - InterpretedFunction prodIfun(prod_engine, optimized); - InterpretedFunction::Context prodIctx(prodIfun); - const Value &result = prodIfun.eval(prodIctx, params); - return prod_engine.to_spec(result); - } - - TensorSpec expected() { - const LazyParams ¶ms = gen_params(ref_engine); - InterpretedFunction refIfun(ref_engine, function, NodeTypes()); - InterpretedFunction::Context refIctx(refIfun); - const Value &result = refIfun.eval(refIctx, params); - return ref_engine.to_spec(result); - } - -}; - //----------------------------------------------------------------------------- -void verify_all_optimized(const vespalib::string &expr) { - Context context(expr, {}); - EXPECT_TRUE(treeContains(context.optimized)); - EXPECT_FALSE(treeContains(context.optimized)); -} - TEST("require that multiple concats are optimized") { - TEST_DO(verify_all_optimized("concat(a,b,x)")); - TEST_DO(verify_all_optimized("concat(a,concat(b,concat(c,d,x),x),x)")); - TEST_DO(verify_all_optimized("concat(concat(concat(a,b,x),c,x),d,x)")); - TEST_DO(verify_all_optimized("concat(concat(a,b,x),concat(c,d,x),x)")); -} - -//----------------------------------------------------------------------------- - -void verify_some_optimized(const vespalib::string &expr) { - Context context(expr, {}); - EXPECT_TRUE(treeContains(context.optimized)); - EXPECT_TRUE(treeContains(context.optimized)); + TEST_DO(verify("concat(a,b,x)", 1, 0)); + TEST_DO(verify("concat(a,concat(b,concat(c,d,x),x),x)", 1, 0)); + TEST_DO(verify("concat(concat(concat(a,b,x),c,x),d,x)", 1, 0)); + TEST_DO(verify("concat(concat(a,b,x),concat(c,d,x),x)", 1, 0)); } TEST("require that concat along different dimension is not optimized") { - TEST_DO(verify_some_optimized("concat(concat(a,b,x),concat(c,d,x),y)")); + TEST_DO(verify("concat(concat(a,b,x),concat(c,d,x),y)", 2, 1)); } -//----------------------------------------------------------------------------- - TEST("require that concat of vector and double is not optimized") { - TensorSpec vecspec = TensorSpec("tensor(x[3])") - .add({{"x", 0}}, 7.0) - .add({{"x", 1}}, 11.0) - .add({{"x", 2}}, 13.0); - TensorSpec dblspec = TensorSpec("double") - .add({}, 19.0); - Context context("concat(a,b,x)", {vecspec, dblspec}); - EXPECT_TRUE(treeContains(context.optimized)); - EXPECT_FALSE(treeContains(context.optimized)); + TEST_DO(verify("concat(a,x5,x)", 0, 1)); + TEST_DO(verify("concat(x5,b,x)", 0, 1)); } -//----------------------------------------------------------------------------- - TEST_MAIN() { TEST_RUN_ALL(); } -- cgit v1.2.3