diff options
author | Arne H Juul <arnej27959@users.noreply.github.com> | 2018-03-05 14:25:21 +0100 |
---|---|---|
committer | GitHub <noreply@github.com> | 2018-03-05 14:25:21 +0100 |
commit | 1081ee11b211b66a54a5983f7e5057e14190815d (patch) | |
tree | 3660f993cc5d2a252d64d5bb51390d76a06bcc02 | |
parent | 2723c52a4c6165fc68e0e2f0996f45e619b62426 (diff) | |
parent | b77dcb0c7b6b174b50a2ff48c938096e4bd8937c (diff) |
Merge pull request #5205 from vespa-engine/arnej/ensure-more-optimization
Arnej/ensure more optimization
14 files changed, 106 insertions, 149 deletions
diff --git a/eval/src/tests/eval/tensor_function/tensor_function_test.cpp b/eval/src/tests/eval/tensor_function/tensor_function_test.cpp index 0308b24e742..41037fa06ef 100644 --- a/eval/src/tests/eval/tensor_function/tensor_function_test.cpp +++ b/eval/src/tests/eval/tensor_function/tensor_function_test.cpp @@ -147,6 +147,7 @@ TEST("require that const_value works") { Value::UP my_const = ctx.make_tensor_matrix(); Value::UP expect = ctx.make_tensor_matrix(); const auto &fun = const_value(*my_const, ctx.stash); + EXPECT_TRUE(!fun.result_is_mutable()); EXPECT_EQUAL(expect->type(), fun.result_type()); const auto &prog = ctx.compile(fun); TEST_DO(verify_equal(*expect, ctx.eval(prog))); @@ -157,6 +158,7 @@ TEST("require that tensor injection works") { size_t a_id = ctx.add_tensor(ctx.make_tensor_matrix()); Value::UP expect = ctx.make_tensor_matrix(); const auto &fun = inject(ValueType::from_spec("tensor(x[2],y[2])"), a_id, ctx.stash); + EXPECT_TRUE(!fun.result_is_mutable()); EXPECT_EQUAL(expect->type(), fun.result_type()); const auto &prog = ctx.compile(fun); TEST_DO(verify_equal(*expect, ctx.eval(prog))); @@ -167,6 +169,7 @@ TEST("require that partial tensor reduction works") { size_t a_id = ctx.add_tensor(ctx.make_tensor_reduce_input()); Value::UP expect = ctx.make_tensor_reduce_y_output(); const auto &fun = reduce(inject(ValueType::from_spec("tensor(x[3],y[2])"), a_id, ctx.stash), Aggr::SUM, {"y"}, ctx.stash); + EXPECT_TRUE(fun.result_is_mutable()); EXPECT_EQUAL(expect->type(), fun.result_type()); const auto &prog = ctx.compile(fun); TEST_DO(verify_equal(*expect, ctx.eval(prog))); @@ -176,6 +179,7 @@ TEST("require that full tensor reduction works") { EvalCtx ctx(SimpleTensorEngine::ref()); size_t a_id = ctx.add_tensor(ctx.make_tensor_reduce_input()); const auto &fun = reduce(inject(ValueType::from_spec("tensor(x[3],y[2])"), a_id, ctx.stash), Aggr::SUM, {}, ctx.stash); + EXPECT_TRUE(fun.result_is_mutable()); EXPECT_EQUAL(ValueType::from_spec("double"), fun.result_type()); const auto &prog = ctx.compile(fun); const Value &result = ctx.eval(prog); @@ -188,6 +192,7 @@ TEST("require that tensor map works") { size_t a_id = ctx.add_tensor(ctx.make_tensor_map_input()); Value::UP expect = ctx.make_tensor_map_output(); const auto &fun = map(inject(ValueType::from_spec("tensor(x{},y{})"), a_id, ctx.stash), operation::Neg::f, ctx.stash); + EXPECT_TRUE(fun.result_is_mutable()); EXPECT_EQUAL(expect->type(), fun.result_type()); const auto &prog = ctx.compile(fun); TEST_DO(verify_equal(*expect, ctx.eval(prog))); @@ -201,6 +206,7 @@ TEST("require that tensor join works") { const auto &fun = join(inject(ValueType::from_spec("tensor(x{},y{})"), a_id, ctx.stash), inject(ValueType::from_spec("tensor(y{},z{})"), b_id, ctx.stash), operation::Mul::f, ctx.stash); + EXPECT_TRUE(fun.result_is_mutable()); EXPECT_EQUAL(expect->type(), fun.result_type()); const auto &prog = ctx.compile(fun); TEST_DO(verify_equal(*expect, ctx.eval(prog))); @@ -214,6 +220,7 @@ TEST("require that tensor concat works") { const auto &fun = concat(inject(ValueType::from_spec("tensor(x[2])"), a_id, ctx.stash), inject(ValueType::from_spec("tensor(x[2])"), b_id, ctx.stash), "y", ctx.stash); + EXPECT_TRUE(fun.result_is_mutable()); EXPECT_EQUAL(expect->type(), fun.result_type()); const auto &prog = ctx.compile(fun); TEST_DO(verify_equal(*expect, ctx.eval(prog))); @@ -225,6 +232,7 @@ TEST("require that tensor rename works") { Value::UP expect = ctx.make_tensor_matrix_renamed(); const auto &fun = rename(inject(ValueType::from_spec("tensor(x[2],y[2])"), a_id, ctx.stash), {"x"}, {"z"}, ctx.stash); + EXPECT_TRUE(fun.result_is_mutable()); EXPECT_EQUAL(expect->type(), fun.result_type()); const auto &prog = ctx.compile(fun); TEST_DO(verify_equal(*expect, ctx.eval(prog))); @@ -240,6 +248,7 @@ TEST("require that if_node works") { const auto &fun = if_node(inject(ValueType::double_type(), a_id, ctx.stash), inject(ValueType::from_spec("tensor(x[2])"), b_id, ctx.stash), inject(ValueType::from_spec("tensor(x[2])"), c_id, ctx.stash), ctx.stash); + EXPECT_TRUE(!fun.result_is_mutable()); EXPECT_EQUAL(expect_true->type(), fun.result_type()); const auto &prog = ctx.compile(fun); TEST_DO(verify_equal(*expect_true, ctx.eval(prog))); @@ -247,6 +256,22 @@ TEST("require that if_node works") { TEST_DO(verify_equal(*expect_false, ctx.eval(prog))); } +TEST("require that if_node result is mutable only when both children produce mutable results") { + Stash stash; + const Node &cond = inject(DoubleValue::double_type(), 0, stash); + const Node &a = inject(ValueType::from_spec("tensor(x[2])"), 0, stash); + const Node &b = inject(ValueType::from_spec("tensor(x[3])"), 0, stash); + const Node &tmp = concat(a, b, "x", stash); // will be mutable + const Node &if_con_con = if_node(cond, a, b, stash); + const Node &if_mut_con = if_node(cond, tmp, b, stash); + const Node &if_con_mut = if_node(cond, a, tmp, stash); + const Node &if_mut_mut = if_node(cond, tmp, tmp, stash); + EXPECT_TRUE(!if_con_con.result_is_mutable()); + EXPECT_TRUE(!if_mut_con.result_is_mutable()); + EXPECT_TRUE(!if_con_mut.result_is_mutable()); + EXPECT_TRUE(if_mut_mut.result_is_mutable()); +} + TEST("require that if_node gets expected result type") { Stash stash; const Node &a = inject(DoubleValue::double_type(), 0, stash); diff --git a/eval/src/tests/tensor/dense_dot_product_function/dense_dot_product_function_test.cpp b/eval/src/tests/tensor/dense_dot_product_function/dense_dot_product_function_test.cpp index fb48e445180..37f9602565d 100644 --- a/eval/src/tests/tensor/dense_dot_product_function/dense_dot_product_function_test.cpp +++ b/eval/src/tests/tensor/dense_dot_product_function/dense_dot_product_function_test.cpp @@ -131,7 +131,8 @@ void assertOptimized(const vespalib::string &expr) { EvalFixture fixture(prod_engine, expr, param_repo, true); EXPECT_EQUAL(fixture.result(), EvalFixture::ref(expr, param_repo)); auto info = fixture.find_all<DenseDotProductFunction>(); - EXPECT_EQUAL(info.size(), 1u); + ASSERT_EQUAL(info.size(), 1u); + EXPECT_TRUE(info[0]->result_is_mutable()); } void assertNotOptimized(const vespalib::string &expr) { diff --git a/eval/src/tests/tensor/dense_fast_rename_function/dense_fast_rename_function_test.cpp b/eval/src/tests/tensor/dense_fast_rename_function/dense_fast_rename_function_test.cpp index fab16f1e276..45b38b48481 100644 --- a/eval/src/tests/tensor/dense_fast_rename_function/dense_fast_rename_function_test.cpp +++ b/eval/src/tests/tensor/dense_fast_rename_function/dense_fast_rename_function_test.cpp @@ -24,17 +24,19 @@ const TensorEngine &prod_engine = DefaultTensorEngine::ref(); EvalFixture::ParamRepo make_params() { return EvalFixture::ParamRepo() .add("x5", spec({x(5)}, N())) + .add_mutable("mut_x5", spec({x(5)}, N())) .add("x5_u", spec({x(5)}, N()), "tensor(x[])") .add("x_m", spec({x({"a", "b", "c"})}, N())) .add("x5y3", spec({x(5),y(3)}, N())); } EvalFixture::ParamRepo param_repo = make_params(); -void verify_optimized(const vespalib::string &expr) { - EvalFixture fixture(prod_engine, expr, param_repo, true); +void verify_optimized(const vespalib::string &expr, bool expect_mutable = false) { + EvalFixture fixture(prod_engine, expr, param_repo, true, true); EXPECT_EQUAL(fixture.result(), EvalFixture::ref(expr, param_repo)); auto info = fixture.find_all<DenseFastRenameFunction>(); - EXPECT_EQUAL(info.size(), 1u); + ASSERT_EQUAL(info.size(), 1u); + EXPECT_EQUAL(info[0]->result_is_mutable(), expect_mutable); } void verify_not_optimized(const vespalib::string &expr) { @@ -71,4 +73,25 @@ TEST("require that non-dense renames are not optimized") { TEST_DO(verify_not_optimized("rename(x_m,x,y)")); } +TEST("require that renaming a mutable result retains mutability") { + TEST_DO(verify_optimized("rename(mut_x5,x,y)", true)); +} + +TEST("require that child mutability changed under-the-hood is still reflected") { + Stash stash; + const Node &a = inject(ValueType::from_spec("tensor(x[2])"), 0, stash); + const Node &tmp = map(a, operation::Neg::f, stash); // will be mutable + DenseFastRenameFunction my_rename(ValueType::from_spec("tensor(y[2])"), a); + EXPECT_TRUE(!my_rename.result_is_mutable()); + { + std::vector<TensorFunction::Child::CREF> list; + my_rename.push_children(list); + ASSERT_EQUAL(list.size(), 1u); + EXPECT_EQUAL(&(list[0].get().get()), &a); + const TensorFunction::Child &child = list[0]; + child.set(tmp); + } + EXPECT_TRUE(my_rename.result_is_mutable()); +} + TEST_MAIN() { TEST_RUN_ALL(); } diff --git a/eval/src/tests/tensor/vector_from_doubles_function/vector_from_doubles_function_test.cpp b/eval/src/tests/tensor/vector_from_doubles_function/vector_from_doubles_function_test.cpp index 0ba9871d672..41d98122e0f 100644 --- a/eval/src/tests/tensor/vector_from_doubles_function/vector_from_doubles_function_test.cpp +++ b/eval/src/tests/tensor/vector_from_doubles_function/vector_from_doubles_function_test.cpp @@ -1,164 +1,63 @@ // Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. -#include <vespa/log/log.h> -LOG_SETUP("dense_dot_product_function_test"); - #include <vespa/vespalib/testkit/test_kit.h> #include <vespa/eval/eval/tensor_function.h> -#include <vespa/eval/eval/operation.h> #include <vespa/eval/eval/simple_tensor.h> #include <vespa/eval/eval/simple_tensor_engine.h> #include <vespa/eval/tensor/default_tensor_engine.h> #include <vespa/eval/tensor/dense/vector_from_doubles_function.h> #include <vespa/eval/tensor/dense/dense_tensor.h> -#include <vespa/eval/tensor/dense/dense_tensor_builder.h> -#include <vespa/eval/tensor/dense/dense_tensor_view.h> +#include <vespa/eval/eval/test/tensor_model.hpp> +#include <vespa/eval/eval/test/eval_fixture.h> #include <vespa/vespalib/util/stringfmt.h> #include <vespa/vespalib/util/stash.h> using namespace vespalib; using namespace vespalib::eval; +using namespace vespalib::eval::test; using namespace vespalib::tensor; using namespace vespalib::eval::tensor_function; -const TensorEngine &ref_engine = SimpleTensorEngine::ref(); const TensorEngine &prod_engine = DefaultTensorEngine::ref(); -//----------------------------------------------------------------------------- -// verify that optimize() works as expected - -template<typename OPT> -bool treeContains(const TensorFunction &expr) { - using Child = TensorFunction::Child; - Child root(expr); - std::vector<Child::CREF> nodes({root}); - for (size_t i = 0; i < nodes.size(); ++i) { - nodes[i].get().get().push_children(nodes); - } - for (const Child &child : nodes) { - if (as<OPT>(child.get())) { - return true; - } - } - return false; -} - -const TensorFunction &optimize_fun(const Function &fun, const NodeTypes &node_types, Stash &stash) { - const TensorFunction &plain_fun = make_tensor_function(prod_engine, fun.root(), node_types, stash); - return prod_engine.optimize(plain_fun, stash); +EvalFixture::ParamRepo make_params() { + return EvalFixture::ParamRepo() + .add("a", spec(1.0)) + .add("b", spec(2.0)) + .add("c", spec(3.0)) + .add("d", spec(4.0)) + .add("x5", spec({x(5)}, N())); } +EvalFixture::ParamRepo param_repo = make_params(); -std::vector<ValueType> extract_types(size_t n, const std::vector<TensorSpec> &input) { - std::vector<ValueType> vec; - for (const TensorSpec &spec : input) { - vec.push_back(ValueType::from_spec(spec.type())); - } - while (vec.size() < n) { - vec.push_back(ValueType::double_type()); +void verify(const vespalib::string &expr, size_t expect_optimized_cnt, size_t expect_not_optimized_cnt) { + EvalFixture fixture(prod_engine, expr, param_repo, true); + EXPECT_EQUAL(fixture.result(), EvalFixture::ref(expr, param_repo)); + auto info = fixture.find_all<VectorFromDoublesFunction>(); + EXPECT_EQUAL(info.size(), expect_optimized_cnt); + for (size_t i = 0; i < info.size(); ++i) { + EXPECT_TRUE(info[i]->result_is_mutable()); } - return vec; + EXPECT_EQUAL(fixture.find_all<Concat>().size(), expect_not_optimized_cnt); } -struct Context { - Stash stash; - Function function; - std::vector<TensorSpec> input; - std::vector<ValueType> input_types; - NodeTypes node_types; - const TensorFunction &optimized; - - Context(const vespalib::string &expr, std::vector<TensorSpec> in) - : stash(), - function(Function::parse(expr)), - input(in), - input_types(extract_types(function.num_params(), input)), - node_types(function, input_types), - optimized(optimize_fun(function, node_types, stash)) - { - EXPECT_EQUAL(actual(), expected()); - } - - ~Context() {} - - struct Params : LazyParams { - std::vector<Value::UP> values; - Value &resolve(size_t idx, Stash &) const override { - return *values[idx]; - } - }; - - Params gen_params(const TensorEngine &engine) { - Params p; - for (const TensorSpec &spec : input) { - p.values.emplace_back(engine.from_spec(spec)); - } - while (p.values.size() < function.num_params()) { - double v = 1.0 + p.values.size(); - p.values.emplace_back(std::make_unique<DoubleValue>(v)); - } - return p; - } - - TensorSpec actual() { - const LazyParams ¶ms = gen_params(prod_engine); - InterpretedFunction prodIfun(prod_engine, optimized); - InterpretedFunction::Context prodIctx(prodIfun); - const Value &result = prodIfun.eval(prodIctx, params); - return prod_engine.to_spec(result); - } - - TensorSpec expected() { - const LazyParams ¶ms = gen_params(ref_engine); - InterpretedFunction refIfun(ref_engine, function, NodeTypes()); - InterpretedFunction::Context refIctx(refIfun); - const Value &result = refIfun.eval(refIctx, params); - return ref_engine.to_spec(result); - } - -}; - //----------------------------------------------------------------------------- -void verify_all_optimized(const vespalib::string &expr) { - Context context(expr, {}); - EXPECT_TRUE(treeContains<VectorFromDoublesFunction>(context.optimized)); - EXPECT_FALSE(treeContains<eval::tensor_function::Concat>(context.optimized)); -} - TEST("require that multiple concats are optimized") { - TEST_DO(verify_all_optimized("concat(a,b,x)")); - TEST_DO(verify_all_optimized("concat(a,concat(b,concat(c,d,x),x),x)")); - TEST_DO(verify_all_optimized("concat(concat(concat(a,b,x),c,x),d,x)")); - TEST_DO(verify_all_optimized("concat(concat(a,b,x),concat(c,d,x),x)")); -} - -//----------------------------------------------------------------------------- - -void verify_some_optimized(const vespalib::string &expr) { - Context context(expr, {}); - EXPECT_TRUE(treeContains<VectorFromDoublesFunction>(context.optimized)); - EXPECT_TRUE(treeContains<eval::tensor_function::Concat>(context.optimized)); + TEST_DO(verify("concat(a,b,x)", 1, 0)); + TEST_DO(verify("concat(a,concat(b,concat(c,d,x),x),x)", 1, 0)); + TEST_DO(verify("concat(concat(concat(a,b,x),c,x),d,x)", 1, 0)); + TEST_DO(verify("concat(concat(a,b,x),concat(c,d,x),x)", 1, 0)); } TEST("require that concat along different dimension is not optimized") { - TEST_DO(verify_some_optimized("concat(concat(a,b,x),concat(c,d,x),y)")); + TEST_DO(verify("concat(concat(a,b,x),concat(c,d,x),y)", 2, 1)); } -//----------------------------------------------------------------------------- - TEST("require that concat of vector and double is not optimized") { - TensorSpec vecspec = TensorSpec("tensor(x[3])") - .add({{"x", 0}}, 7.0) - .add({{"x", 1}}, 11.0) - .add({{"x", 2}}, 13.0); - TensorSpec dblspec = TensorSpec("double") - .add({}, 19.0); - Context context("concat(a,b,x)", {vecspec, dblspec}); - EXPECT_TRUE(treeContains<eval::tensor_function::Concat>(context.optimized)); - EXPECT_FALSE(treeContains<VectorFromDoublesFunction>(context.optimized)); + TEST_DO(verify("concat(a,x5,x)", 0, 1)); + TEST_DO(verify("concat(x5,b,x)", 0, 1)); } -//----------------------------------------------------------------------------- - TEST_MAIN() { TEST_RUN_ALL(); } diff --git a/eval/src/vespa/eval/eval/tensor_function.h b/eval/src/vespa/eval/eval/tensor_function.h index 2c3e9d21e0b..4e989d251c1 100644 --- a/eval/src/vespa/eval/eval/tensor_function.h +++ b/eval/src/vespa/eval/eval/tensor_function.h @@ -69,7 +69,7 @@ struct TensorFunction void set(const TensorFunction &child) const { ptr = &child; } }; virtual const ValueType &result_type() const = 0; - virtual bool result_is_mutable() const { return false; } + virtual bool result_is_mutable() const = 0; /** * Push references to all children (NB: implementation must use @@ -162,6 +162,7 @@ private: const Value &_value; public: ConstValue(const Value &value_in) : Leaf(value_in.type()), _value(value_in) {} + bool result_is_mutable() const override { return false; } InterpretedFunction::Instruction compile_self(Stash &stash) const final override; }; @@ -175,6 +176,7 @@ public: Inject(const ValueType &result_type_in, size_t param_idx_in) : Leaf(result_type_in), _param_idx(param_idx_in) {} size_t param_idx() const { return _param_idx; } + bool result_is_mutable() const override { return false; } InterpretedFunction::Instruction compile_self(Stash &stash) const final override; }; @@ -193,6 +195,7 @@ public: : Op1(result_type_in, child_in), _aggr(aggr_in), _dimensions(dimensions_in) {} Aggr aggr() const { return _aggr; } const std::vector<vespalib::string> &dimensions() const { return _dimensions; } + bool result_is_mutable() const override { return true; } InterpretedFunction::Instruction compile_self(Stash &stash) const final override; }; @@ -208,7 +211,8 @@ public: map_fun_t function_in) : Op1(result_type_in, child_in), _function(function_in) {} map_fun_t function() const { return _function; } - InterpretedFunction::Instruction compile_self(Stash &stash) const final override; + bool result_is_mutable() const override { return true; } + InterpretedFunction::Instruction compile_self(Stash &stash) const override; }; //----------------------------------------------------------------------------- @@ -224,7 +228,8 @@ public: join_fun_t function_in) : Op2(result_type_in, lhs_in, rhs_in), _function(function_in) {} join_fun_t function() const { return _function; } - InterpretedFunction::Instruction compile_self(Stash &stash) const final override; + bool result_is_mutable() const override { return true; } + InterpretedFunction::Instruction compile_self(Stash &stash) const override; }; //----------------------------------------------------------------------------- @@ -240,6 +245,7 @@ public: const vespalib::string &dimension_in) : Op2(result_type_in, lhs_in, rhs_in), _dimension(dimension_in) {} const vespalib::string &dimension() const { return _dimension; } + bool result_is_mutable() const override { return true; } InterpretedFunction::Instruction compile_self(Stash &stash) const final override; }; @@ -258,6 +264,7 @@ public: : Op1(result_type_in, child_in), _from(from_in), _to(to_in) {} const std::vector<vespalib::string> &from() const { return _from; } const std::vector<vespalib::string> &to() const { return _to; } + bool result_is_mutable() const override { return true; } InterpretedFunction::Instruction compile_self(Stash &stash) const final override; }; @@ -279,6 +286,10 @@ public: const TensorFunction &true_child() const { return _true_child.get(); } const TensorFunction &false_child() const { return _false_child.get(); } void push_children(std::vector<Child::CREF> &children) const final override; + bool result_is_mutable() const override { + return (true_child().result_is_mutable() && + false_child().result_is_mutable()); + } InterpretedFunction::Instruction compile_self(Stash &stash) const final override; }; diff --git a/eval/src/vespa/eval/tensor/dense/dense_dot_product_function.cpp b/eval/src/vespa/eval/tensor/dense/dense_dot_product_function.cpp index 7985b59da56..ae217935fd9 100644 --- a/eval/src/vespa/eval/tensor/dense/dense_dot_product_function.cpp +++ b/eval/src/vespa/eval/tensor/dense/dense_dot_product_function.cpp @@ -24,7 +24,7 @@ CellsRef getCellsRef(const eval::Value &value) { return denseTensor.cellsRef(); } -void my_op(eval::InterpretedFunction::State &state, uint64_t param) { +void my_dot_product_op(eval::InterpretedFunction::State &state, uint64_t param) { auto *hw_accelerator = (hwaccelrated::IAccelrated *)(param); DenseTensorView::CellsRef lhsCells = getCellsRef(state.peek(1)); DenseTensorView::CellsRef rhsCells = getCellsRef(state.peek(0)); @@ -56,7 +56,7 @@ DenseDotProductFunction::DenseDotProductFunction(const eval::TensorFunction &lhs eval::InterpretedFunction::Instruction DenseDotProductFunction::compile_self(Stash &) const { - return eval::InterpretedFunction::Instruction(my_op, (uint64_t)(_hwAccelerator.get())); + return eval::InterpretedFunction::Instruction(my_dot_product_op, (uint64_t)(_hwAccelerator.get())); } const TensorFunction & diff --git a/eval/src/vespa/eval/tensor/dense/dense_dot_product_function.h b/eval/src/vespa/eval/tensor/dense/dense_dot_product_function.h index eec7448f041..46b04a446d4 100644 --- a/eval/src/vespa/eval/tensor/dense/dense_dot_product_function.h +++ b/eval/src/vespa/eval/tensor/dense/dense_dot_product_function.h @@ -19,6 +19,7 @@ public: DenseDotProductFunction(const eval::TensorFunction &lhs_in, const eval::TensorFunction &rhs_in); eval::InterpretedFunction::Instruction compile_self(Stash &stash) const override; + bool result_is_mutable() const override { return true; } static const eval::TensorFunction &optimize(const eval::TensorFunction &expr, Stash &stash); }; diff --git a/eval/src/vespa/eval/tensor/dense/dense_fast_rename_function.h b/eval/src/vespa/eval/tensor/dense/dense_fast_rename_function.h index e7de8e95ff0..1ca61d52915 100644 --- a/eval/src/vespa/eval/tensor/dense/dense_fast_rename_function.h +++ b/eval/src/vespa/eval/tensor/dense/dense_fast_rename_function.h @@ -17,6 +17,7 @@ public: const eval::TensorFunction &child); ~DenseFastRenameFunction(); eval::InterpretedFunction::Instruction compile_self(Stash &stash) const override; + bool result_is_mutable() const override { return child().result_is_mutable(); } static const eval::TensorFunction &optimize(const eval::TensorFunction &expr, Stash &stash); }; diff --git a/eval/src/vespa/eval/tensor/dense/dense_inplace_join_function.cpp b/eval/src/vespa/eval/tensor/dense/dense_inplace_join_function.cpp index 53a5fe9bb27..78a407062c6 100644 --- a/eval/src/vespa/eval/tensor/dense/dense_inplace_join_function.cpp +++ b/eval/src/vespa/eval/tensor/dense/dense_inplace_join_function.cpp @@ -51,8 +51,7 @@ DenseInplaceJoinFunction::DenseInplaceJoinFunction(const ValueType &result_type, const TensorFunction &rhs, join_fun_t function_in, bool write_left_in) - : eval::tensor_function::Op2(result_type, lhs, rhs), - _function(function_in), + : eval::tensor_function::Join(result_type, lhs, rhs, function_in), _write_left(write_left_in) { } @@ -65,7 +64,7 @@ eval::InterpretedFunction::Instruction DenseInplaceJoinFunction::compile_self(Stash &) const { auto op = _write_left ? my_inplace_join_op<true> : my_inplace_join_op<false>; - return eval::InterpretedFunction::Instruction(op, (uint64_t)_function); + return eval::InterpretedFunction::Instruction(op, (uint64_t)function()); } const TensorFunction & diff --git a/eval/src/vespa/eval/tensor/dense/dense_inplace_join_function.h b/eval/src/vespa/eval/tensor/dense/dense_inplace_join_function.h index de2cdae3778..d31ce99dda4 100644 --- a/eval/src/vespa/eval/tensor/dense/dense_inplace_join_function.h +++ b/eval/src/vespa/eval/tensor/dense/dense_inplace_join_function.h @@ -9,12 +9,11 @@ namespace vespalib::tensor { /** * Tensor function for inplace join operation on mutable dense tensors. **/ -class DenseInplaceJoinFunction : public eval::tensor_function::Op2 +class DenseInplaceJoinFunction : public eval::tensor_function::Join { public: using join_fun_t = ::vespalib::eval::tensor_function::join_fun_t; private: - join_fun_t _function; bool _write_left; public: DenseInplaceJoinFunction(const eval::ValueType &result_type, @@ -23,7 +22,6 @@ public: join_fun_t function_in, bool write_left_in); ~DenseInplaceJoinFunction(); - join_fun_t function() const { return _function; } bool write_left() const { return _write_left; } bool result_is_mutable() const override { return true; } eval::InterpretedFunction::Instruction compile_self(Stash &stash) const override; diff --git a/eval/src/vespa/eval/tensor/dense/dense_inplace_map_function.cpp b/eval/src/vespa/eval/tensor/dense/dense_inplace_map_function.cpp index 162bdb2ebfe..a0aba25f342 100644 --- a/eval/src/vespa/eval/tensor/dense/dense_inplace_map_function.cpp +++ b/eval/src/vespa/eval/tensor/dense/dense_inplace_map_function.cpp @@ -38,8 +38,7 @@ bool isConcreteDenseTensor(const ValueType &type) { DenseInplaceMapFunction::DenseInplaceMapFunction(const eval::ValueType &result_type, const eval::TensorFunction &child, map_fun_t function_in) - : eval::tensor_function::Op1(result_type, child), - _function(function_in) + : eval::tensor_function::Map(result_type, child, function_in) { } @@ -50,7 +49,7 @@ DenseInplaceMapFunction::~DenseInplaceMapFunction() eval::InterpretedFunction::Instruction DenseInplaceMapFunction::compile_self(Stash &) const { - return eval::InterpretedFunction::Instruction(my_inplace_map_op, (uint64_t)_function); + return eval::InterpretedFunction::Instruction(my_inplace_map_op, (uint64_t)function()); } const TensorFunction & diff --git a/eval/src/vespa/eval/tensor/dense/dense_inplace_map_function.h b/eval/src/vespa/eval/tensor/dense/dense_inplace_map_function.h index f02f83edae1..bfa5760aa3c 100644 --- a/eval/src/vespa/eval/tensor/dense/dense_inplace_map_function.h +++ b/eval/src/vespa/eval/tensor/dense/dense_inplace_map_function.h @@ -9,7 +9,7 @@ namespace vespalib::tensor { /** * Tensor function for inplace map operation on mutable dense tensors. **/ -class DenseInplaceMapFunction : public eval::tensor_function::Op1 +class DenseInplaceMapFunction : public eval::tensor_function::Map { public: using map_fun_t = ::vespalib::eval::tensor_function::map_fun_t; @@ -20,7 +20,6 @@ public: const eval::TensorFunction &child, map_fun_t function_in); ~DenseInplaceMapFunction(); - map_fun_t function() const { return _function; } bool result_is_mutable() const override { return true; } eval::InterpretedFunction::Instruction compile_self(Stash &stash) const override; static const eval::TensorFunction &optimize(const eval::TensorFunction &expr, Stash &stash); diff --git a/eval/src/vespa/eval/tensor/dense/dense_xw_product_function.cpp b/eval/src/vespa/eval/tensor/dense/dense_xw_product_function.cpp index fa07028ef27..24b1d8abfcf 100644 --- a/eval/src/vespa/eval/tensor/dense/dense_xw_product_function.cpp +++ b/eval/src/vespa/eval/tensor/dense/dense_xw_product_function.cpp @@ -58,7 +58,7 @@ void transposedProduct(const DenseXWProductFunction::Self &self, } template <bool commonDimensionInnermost> -void my_op(eval::InterpretedFunction::State &state, uint64_t param) { +void my_xw_product_op(eval::InterpretedFunction::State &state, uint64_t param) { DenseXWProductFunction::Self *self = (DenseXWProductFunction::Self *)(param); CellsRef vectorCells = getCellsRef(state.peek(1)); @@ -129,7 +129,7 @@ eval::InterpretedFunction::Instruction DenseXWProductFunction::compile_self(Stash &stash) const { Self &self = stash.create<Self>(result_type(), _vectorSize, _resultSize); - auto op = _commonDimensionInnermost ? my_op<true> : my_op<false>; + auto op = _commonDimensionInnermost ? my_xw_product_op<true> : my_xw_product_op<false>; return eval::InterpretedFunction::Instruction(op, (uint64_t)(&self)); } diff --git a/eval/src/vespa/eval/tensor/dense/vector_from_doubles_function.h b/eval/src/vespa/eval/tensor/dense/vector_from_doubles_function.h index 417c60c2aca..378c9026f84 100644 --- a/eval/src/vespa/eval/tensor/dense/vector_from_doubles_function.h +++ b/eval/src/vespa/eval/tensor/dense/vector_from_doubles_function.h @@ -31,6 +31,7 @@ public: } size_t size() const { return _self.resultSize; } eval::InterpretedFunction::Instruction compile_self(Stash &stash) const override; + bool result_is_mutable() const override { return true; } static const eval::TensorFunction &optimize(const eval::TensorFunction &expr, Stash &stash); }; |