diff options
author | HÃ¥vard Pettersen <havardpe@gmail.com> | 2017-10-27 10:30:09 +0200 |
---|---|---|
committer | GitHub <noreply@github.com> | 2017-10-27 10:30:09 +0200 |
commit | 41e8d2a568bce389469dbfcf4668d0f0bc72cc59 (patch) | |
tree | cfefb4d7477b3578d4eff5976779eef89db74bfc /eval/src | |
parent | efbf5553d9aac346f70d7d2502249f48ec8b5e38 (diff) | |
parent | 51a6686b3b0dd58ccf352aac41feacd797efddc1 (diff) |
Merge pull request #3904 from vespa-engine/havardpe/tensor-cleanup
Havardpe/tensor cleanup
Diffstat (limited to 'eval/src')
17 files changed, 207 insertions, 292 deletions
diff --git a/eval/src/tests/eval/compiled_function/compiled_function_test.cpp b/eval/src/tests/eval/compiled_function/compiled_function_test.cpp index 357baa4d5c5..b887c6e45f9 100644 --- a/eval/src/tests/eval/compiled_function/compiled_function_test.cpp +++ b/eval/src/tests/eval/compiled_function/compiled_function_test.cpp @@ -49,7 +49,6 @@ TEST("require that lazy parameter passing works") { //----------------------------------------------------------------------------- std::vector<vespalib::string> unsupported = { - "sum(", "map(", "join(", "reduce(", diff --git a/eval/src/tests/eval/function/function_test.cpp b/eval/src/tests/eval/function/function_test.cpp index 621f68ffc62..eae58b04178 100644 --- a/eval/src/tests/eval/function/function_test.cpp +++ b/eval/src/tests/eval/function/function_test.cpp @@ -750,15 +750,9 @@ TEST("require that missing value gives parse error") { //----------------------------------------------------------------------------- -TEST("require that tensor sum can be parsed") { - EXPECT_EQUAL("sum(a)", Function::parse("sum(a)").dump()); - EXPECT_EQUAL("sum(a)", Function::parse(" sum ( a ) ").dump()); - EXPECT_EQUAL("sum(a,dim)", Function::parse("sum(a,dim)").dump()); - EXPECT_EQUAL("sum(a,dim)", Function::parse(" sum ( a , dim ) ").dump()); -} - TEST("require that tensor operations can be nested") { - EXPECT_EQUAL("sum(sum(sum(a)),dim)", Function::parse("sum(sum(sum(a)),dim)").dump()); + EXPECT_EQUAL("reduce(reduce(reduce(a,sum),sum),sum,dim)", + Function::parse("reduce(reduce(reduce(a,sum),sum),sum,dim)").dump()); } //----------------------------------------------------------------------------- @@ -795,6 +789,7 @@ TEST("require that tensor reduce can be parsed") { EXPECT_EQUAL("reduce(x,sum,a,b)", Function::parse({"x"}, "reduce(x,sum,a,b)").dump()); EXPECT_EQUAL("reduce(x,sum,a,b,c)", Function::parse({"x"}, "reduce(x,sum,a,b,c)").dump()); EXPECT_EQUAL("reduce(x,sum,a,b,c)", Function::parse({"x"}, " reduce ( x , sum , a , b , c ) ").dump()); + EXPECT_EQUAL("reduce(x,sum)", Function::parse({"x"}, "reduce(x,sum)").dump()); EXPECT_EQUAL("reduce(x,avg)", Function::parse({"x"}, "reduce(x,avg)").dump()); EXPECT_EQUAL("reduce(x,avg)", Function::parse({"x"}, "reduce( x , avg )").dump()); EXPECT_EQUAL("reduce(x,count)", Function::parse({"x"}, "reduce(x,count)").dump()); @@ -803,11 +798,6 @@ TEST("require that tensor reduce can be parsed") { EXPECT_EQUAL("reduce(x,max)", Function::parse({"x"}, "reduce(x,max)").dump()); } -TEST("require that tensor reduce is mapped to tensor sum for all dimensions/single dimension") { - EXPECT_EQUAL("sum(x)", Function::parse({"x"}, "reduce(x,sum)").dump()); - EXPECT_EQUAL("sum(x,d)", Function::parse({"x"}, "reduce(x,sum,d)").dump()); -} - TEST("require that tensor reduce with unknown aggregator fails") { verify_error("reduce(x,bogus)", "[reduce(x,bogus]...[unknown aggregator: 'bogus']...[)]"); } diff --git a/eval/src/tests/eval/interpreted_function/interpreted_function_test.cpp b/eval/src/tests/eval/interpreted_function/interpreted_function_test.cpp index 4497c5f5e70..0e548a3b82e 100644 --- a/eval/src/tests/eval/interpreted_function/interpreted_function_test.cpp +++ b/eval/src/tests/eval/interpreted_function/interpreted_function_test.cpp @@ -5,6 +5,7 @@ #include <vespa/eval/eval/interpreted_function.h> #include <vespa/eval/eval/test/eval_spec.h> #include <vespa/eval/eval/basic_nodes.h> +#include <vespa/eval/tensor/default_tensor_engine.h> #include <vespa/vespalib/util/stringfmt.h> #include <vespa/vespalib/util/stash.h> #include <vespa/vespalib/test/insertion_operators.h> @@ -12,6 +13,7 @@ using namespace vespalib::eval; using vespalib::Stash; +using vespalib::tensor::DefaultTensorEngine; //----------------------------------------------------------------------------- @@ -20,6 +22,7 @@ struct MyEvalTest : test::EvalSpec::EvalTest { size_t fail_cnt = 0; bool print_pass = false; bool print_fail = false; + virtual void next_expression(const std::vector<vespalib::string> ¶m_names, const vespalib::string &expression) override { @@ -35,6 +38,7 @@ struct MyEvalTest : test::EvalSpec::EvalTest { ++fail_cnt; } } + virtual void handle_case(const std::vector<vespalib::string> ¶m_names, const std::vector<double> ¶m_values, const vespalib::string &expression, @@ -45,23 +49,34 @@ struct MyEvalTest : test::EvalSpec::EvalTest { bool is_supported = true; bool has_issues = InterpretedFunction::detect_issues(function); if (is_supported && !has_issues) { - InterpretedFunction ifun(SimpleTensorEngine::ref(), function, NodeTypes()); - ASSERT_EQUAL(ifun.num_params(), param_values.size()); - InterpretedFunction::Context ictx(ifun); + vespalib::string desc = as_string(param_names, param_values, expression); InterpretedFunction::SimpleParams params(param_values); - const Value &result_value = ifun.eval(ictx, params); - double result = result_value.as_double(); - if (result_value.is_double() && is_same(expected_result, result)) { - print_pass && fprintf(stderr, "verifying: %s -> %g ... PASS\n", - as_string(param_names, param_values, expression).c_str(), - expected_result); - ++pass_cnt; - } else { - print_fail && fprintf(stderr, "verifying: %s -> %g ... FAIL: got %g\n", - as_string(param_names, param_values, expression).c_str(), - expected_result, result); - ++fail_cnt; - } + verify_result(SimpleTensorEngine::ref(), function, "[simple] "+desc, params, expected_result); + verify_result(DefaultTensorEngine::ref(), function, " [prod] "+desc, params, expected_result); + } + } + + void verify_result(const TensorEngine& engine, + const Function &function, + const vespalib::string &description, + const InterpretedFunction::SimpleParams ¶ms, + double expected_result) + { + InterpretedFunction ifun(engine, function, NodeTypes()); + ASSERT_EQUAL(ifun.num_params(), params.params.size()); + InterpretedFunction::Context ictx(ifun); + const Value &result_value = ifun.eval(ictx, params); + double result = result_value.as_double(); + if (result_value.is_double() && is_same(expected_result, result)) { + print_pass && fprintf(stderr, "verifying: %s -> %g ... PASS\n", + description.c_str(), + expected_result); + ++pass_cnt; + } else { + print_fail && fprintf(stderr, "verifying: %s -> %g ... FAIL: got %g\n", + description.c_str(), + expected_result, result); + ++fail_cnt; } } }; @@ -114,6 +129,10 @@ TEST("require that interpreted function instructions have expected size") { EXPECT_EQUAL(sizeof(InterpretedFunction::Instruction), 16u); } +TEST("require that function pointers can be passed as instruction parameters") { + EXPECT_EQUAL(sizeof(&operation::Add::f), sizeof(uint64_t)); +} + TEST("require that basic addition works") { Function function = Function::parse("a+10"); InterpretedFunction interpreted(SimpleTensorEngine::ref(), function, NodeTypes()); @@ -128,7 +147,7 @@ TEST("require that basic addition works") { TEST("require that dot product like expression is not optimized for unknown types") { const TensorEngine &engine = SimpleTensorEngine::ref(); - Function function = Function::parse("sum(a*b)"); + Function function = Function::parse("reduce(a*b,sum)"); DoubleValue a(2.0); DoubleValue b(3.0); double expect = (2.0 * 3.0); @@ -143,7 +162,7 @@ TEST("require that dot product like expression is not optimized for unknown type TEST("require that dot product works with tensor function") { const TensorEngine &engine = SimpleTensorEngine::ref(); - Function function = Function::parse("sum(a*b)"); + Function function = Function::parse("reduce(a*b,sum)"); auto a = TensorSpec("tensor(x[3])") .add({{"x", 0}}, 5.0) .add({{"x", 1}}, 3.0) @@ -167,7 +186,7 @@ TEST("require that dot product works with tensor function") { TEST("require that matrix multiplication works with tensor function") { const TensorEngine &engine = SimpleTensorEngine::ref(); - Function function = Function::parse("sum(a*b,y)"); + Function function = Function::parse("reduce(a*b,sum,y)"); auto a = TensorSpec("tensor(x[2],y[2])") .add({{"x", 0},{"y", 0}}, 1.0) .add({{"x", 0},{"y", 1}}, 2.0) diff --git a/eval/src/tests/eval/node_types/node_types_test.cpp b/eval/src/tests/eval/node_types/node_types_test.cpp index 555c2bd2ba4..f6afa543402 100644 --- a/eval/src/tests/eval/node_types/node_types_test.cpp +++ b/eval/src/tests/eval/node_types/node_types_test.cpp @@ -129,23 +129,6 @@ TEST("require that set membership resolves to double unless error") { TEST_DO(verify("any in [tensor,error,any]", "error")); } -TEST("require that sum resolves correct type") { - TEST_DO(verify("sum(error)", "error")); - TEST_DO(verify("sum(tensor)", "double")); - TEST_DO(verify("sum(double)", "double")); - TEST_DO(verify("sum(any)", "any")); -} - -TEST("require that dimension sum resolves correct type") { - TEST_DO(verify("sum(error,x)", "error")); - TEST_DO(verify("sum(tensor,x)", "any")); - TEST_DO(verify("sum(any,x)", "any")); - TEST_DO(verify("sum(double,x)", "error")); - TEST_DO(verify("sum(tensor(x{},y{},z{}),y)", "tensor(x{},z{})")); - TEST_DO(verify("sum(tensor(x{},y{},z{}),w)", "error")); - TEST_DO(verify("sum(tensor(x{}),x)", "double")); -} - TEST("require that reduce resolves correct type") { TEST_DO(verify("reduce(error,sum)", "error")); TEST_DO(verify("reduce(tensor,sum)", "double")); @@ -291,7 +274,7 @@ TEST("require that tensor concat resolves correct type") { TEST("require that double only expressions can be detected") { Function plain_fun = Function::parse("1+2"); - Function complex_fun = Function::parse("sum(a)"); + Function complex_fun = Function::parse("reduce(a,sum)"); NodeTypes plain_types(plain_fun, {}); NodeTypes complex_types(complex_fun, {ValueType::tensor_type({})}); EXPECT_TRUE(plain_types.get_type(plain_fun.root()).is_double()); diff --git a/eval/src/tests/tensor/tensor_performance/tensor_performance_test.cpp b/eval/src/tests/tensor/tensor_performance/tensor_performance_test.cpp index 735e9c9ab34..3daaa3f79b3 100644 --- a/eval/src/tests/tensor/tensor_performance/tensor_performance_test.cpp +++ b/eval/src/tests/tensor/tensor_performance/tensor_performance_test.cpp @@ -18,10 +18,10 @@ using namespace vespalib::tensor; //----------------------------------------------------------------------------- -const vespalib::string dot_product_match_expr = "sum(query*document)"; -const vespalib::string dot_product_multiply_expr = "sum(query*document)"; -const vespalib::string model_match_expr = "sum((query*document)*model)"; -const vespalib::string matrix_product_expr = "sum(sum((query+document)*model,x))"; +const vespalib::string dot_product_match_expr = "reduce(query*document,sum)"; +const vespalib::string dot_product_multiply_expr = "reduce(query*document,sum)"; +const vespalib::string model_match_expr = "reduce((query*document)*model,sum)"; +const vespalib::string matrix_product_expr = "reduce(reduce((query+document)*model,sum,x),sum)"; //----------------------------------------------------------------------------- diff --git a/eval/src/vespa/eval/eval/function.cpp b/eval/src/vespa/eval/eval/function.cpp index cb3d157c06f..58d61d33ea6 100644 --- a/eval/src/vespa/eval/eval/function.cpp +++ b/eval/src/vespa/eval/eval/function.cpp @@ -587,13 +587,7 @@ void parse_tensor_reduce(ParseContext &ctx) { return; } auto dimensions = get_ident_list(ctx, false); - if ((*maybe_aggr == Aggr::SUM) && dimensions.empty()) { - ctx.push_expression(std::make_unique<nodes::TensorSum>(std::move(child))); - } else if ((*maybe_aggr == Aggr::SUM) && (dimensions.size() == 1)) { - ctx.push_expression(std::make_unique<nodes::TensorSum>(std::move(child), dimensions[0])); - } else { - ctx.push_expression(std::make_unique<nodes::TensorReduce>(std::move(child), *maybe_aggr, std::move(dimensions))); - } + ctx.push_expression(std::make_unique<nodes::TensorReduce>(std::move(child), *maybe_aggr, std::move(dimensions))); } void parse_tensor_rename(ParseContext &ctx) { @@ -648,20 +642,6 @@ void parse_tensor_concat(ParseContext &ctx) { ctx.push_expression(std::make_unique<nodes::TensorConcat>(std::move(lhs), std::move(rhs), dimension)); } -// to be replaced with more generic 'reduce' -void parse_tensor_sum(ParseContext &ctx) { - parse_expression(ctx); - Node_UP child = ctx.pop_expression(); - if (ctx.get() == ',') { - ctx.next(); - vespalib::string dimension = get_ident(ctx, false); - ctx.skip_spaces(); - ctx.push_expression(Node_UP(new nodes::TensorSum(std::move(child), dimension))); - } else { - ctx.push_expression(Node_UP(new nodes::TensorSum(std::move(child)))); - } -} - bool try_parse_call(ParseContext &ctx, const vespalib::string &name) { ctx.skip_spaces(); if (ctx.get() == '(') { @@ -686,8 +666,6 @@ bool try_parse_call(ParseContext &ctx, const vespalib::string &name) { parse_tensor_lambda(ctx); } else if (name == "concat") { parse_tensor_concat(ctx); - } else if (name == "sum") { - parse_tensor_sum(ctx); } else { ctx.fail(make_string("unknown function: '%s'", name.c_str())); return false; diff --git a/eval/src/vespa/eval/eval/interpreted_function.cpp b/eval/src/vespa/eval/eval/interpreted_function.cpp index e6e11dbd7c3..09cc2d57e80 100644 --- a/eval/src/vespa/eval/eval/interpreted_function.cpp +++ b/eval/src/vespa/eval/eval/interpreted_function.cpp @@ -18,6 +18,8 @@ namespace { using namespace nodes; using State = InterpretedFunction::State; using Instruction = InterpretedFunction::Instruction; +using map_fun_t = double (*)(double); +using join_fun_t = double (*)(double, double); //----------------------------------------------------------------------------- @@ -32,6 +34,13 @@ const T &unwrap_param(uint64_t param) { return *((const T *)param); } //----------------------------------------------------------------------------- +uint64_t to_param(map_fun_t value) { return (uint64_t)value; } +uint64_t to_param(join_fun_t value) { return (uint64_t)value; } +map_fun_t to_map_fun(uint64_t param) { return (map_fun_t)param; } +join_fun_t to_join_fun(uint64_t param) { return (join_fun_t)param; } + +//----------------------------------------------------------------------------- + void op_load_const(State &state, uint64_t param) { state.stack.push_back(unwrap_param<Value>(param)); } @@ -46,18 +55,6 @@ void op_load_let(State &state, uint64_t param) { //----------------------------------------------------------------------------- -template <typename OP1> -void op_unary(State &state, uint64_t) { - state.replace(1, state.engine.map(state.peek(0), OP1::f, state.stash)); -} - -template <typename OP2> -void op_binary(State &state, uint64_t) { - state.replace(2, state.engine.join(state.peek(1), state.peek(0), OP2::f, state.stash)); -} - -//----------------------------------------------------------------------------- - void op_skip(State &state, uint64_t param) { state.program_offset += param; } @@ -101,25 +98,12 @@ void op_not_member(State &state, uint64_t) { //----------------------------------------------------------------------------- -void op_tensor_sum(State &state, uint64_t) { - state.replace(1, state.engine.reduce(state.peek(0), Aggr::SUM, {}, state.stash)); -} - -void op_tensor_sum_dimension(State &state, uint64_t param) { - const vespalib::string &dimension = unwrap_param<vespalib::string>(param); - state.replace(1, state.engine.reduce(state.peek(0), Aggr::SUM, {dimension}, state.stash)); -} - -//----------------------------------------------------------------------------- - void op_tensor_map(State &state, uint64_t param) { - const CompiledFunction &cfun = unwrap_param<CompiledFunction>(param); - state.replace(1, state.engine.map(state.peek(0), cfun.get_function<1>(), state.stash)); + state.replace(1, state.engine.map(state.peek(0), to_map_fun(param), state.stash)); } void op_tensor_join(State &state, uint64_t param) { - const CompiledFunction &cfun = unwrap_param<CompiledFunction>(param); - state.replace(2, state.engine.join(state.peek(1), state.peek(0), cfun.get_function<2>(), state.stash)); + state.replace(2, state.engine.join(state.peek(1), state.peek(0), to_join_fun(param), state.stash)); } using ReduceParams = std::pair<Aggr,std::vector<vespalib::string>>; @@ -227,10 +211,27 @@ struct ProgramBuilder : public NodeVisitor, public NodeTraverser { //------------------------------------------------------------------------- - void visit(const Number&node) override { - program.emplace_back(op_load_const, wrap_param<Value>(stash.create<DoubleValue>(node.value()))); + void make_const_op(const Node &node, const Value &value) { + (void) node; + program.emplace_back(op_load_const, wrap_param<Value>(value)); + } + + void make_map_op(const Node &node, map_fun_t function) { + (void) node; + program.emplace_back(op_tensor_map, to_param(function)); + } + + void make_join_op(const Node &node, join_fun_t function) { + (void) node; + program.emplace_back(op_tensor_join, to_param(function)); + } + + //------------------------------------------------------------------------- + + void visit(const Number &node) override { + make_const_op(node, stash.create<DoubleValue>(node.value())); } - void visit(const Symbol&node) override { + void visit(const Symbol &node) override { if (node.id() >= 0) { // param value program.emplace_back(op_load_param, node.id()); } else { // let binding @@ -238,19 +239,19 @@ struct ProgramBuilder : public NodeVisitor, public NodeTraverser { program.emplace_back(op_load_let, let_offset); } } - void visit(const String&node) override { - program.emplace_back(op_load_const, wrap_param<Value>(stash.create<DoubleValue>(node.hash()))); + void visit(const String &node) override { + make_const_op(node, stash.create<DoubleValue>(node.hash())); } - void visit(const Array&node) override { - program.emplace_back(op_load_const, wrap_param<Value>(stash.create<DoubleValue>(node.size()))); + void visit(const Array &node) override { + make_const_op(node, stash.create<DoubleValue>(node.size())); } - void visit(const Neg &) override { - program.emplace_back(op_unary<operation::Neg>); + void visit(const Neg &node) override { + make_map_op(node, operation::Neg::f); } - void visit(const Not &) override { - program.emplace_back(op_unary<operation::Not>); + void visit(const Not &node) override { + make_map_op(node, operation::Not::f); } - void visit(const If&node) override { + void visit(const If &node) override { node.cond().traverse(*this); size_t after_cond = program.size(); program.emplace_back(op_skip_if_false); @@ -261,58 +262,48 @@ struct ProgramBuilder : public NodeVisitor, public NodeTraverser { program[after_cond].update_param(after_true - after_cond); program[after_true].update_param(program.size() - after_true - 1); } - void visit(const Let&node) override { + void visit(const Let &node) override { node.value().traverse(*this); program.emplace_back(op_store_let); node.expr().traverse(*this); program.emplace_back(op_evict_let); } - void visit(const Error &) override { - program.emplace_back(op_load_const, wrap_param<Value>(stash.create<ErrorValue>())); + void visit(const Error &node) override { + make_const_op(node, ErrorValue::instance); + } + void visit(const TensorMap &node) override { + const auto &token = stash.create<CompileCache::Token::UP>(CompileCache::compile(node.lambda(), PassParams::SEPARATE)); + make_map_op(node, token.get()->get().get_function<1>()); } - void visit(const TensorSum&node) override { - if (is_typed(node) && is_typed_tensor_product_of_params(node.get_child(0))) { + void visit(const TensorJoin &node) override { + const auto &token = stash.create<CompileCache::Token::UP>(CompileCache::compile(node.lambda(), PassParams::SEPARATE)); + make_join_op(node, token.get()->get().get_function<2>()); + } + void visit(const TensorReduce &node) override { + if ((node.aggr() == Aggr::SUM) && is_typed(node) && is_typed_tensor_product_of_params(node.get_child(0))) { assert(program.size() >= 3); // load,load,mul program.pop_back(); // mul program.pop_back(); // load program.pop_back(); // load - std::vector<vespalib::string> dim_list; - if (!node.dimension().empty()) { - dim_list.push_back(node.dimension()); - } auto a = as<Symbol>(node.get_child(0).get_child(0)); auto b = as<Symbol>(node.get_child(0).get_child(1)); auto ir = tensor_function::reduce(tensor_function::join( tensor_function::inject(types.get_type(*a), 0), tensor_function::inject(types.get_type(*b), 1), - operation::Mul::f), Aggr::SUM, dim_list); + operation::Mul::f), node.aggr(), node.dimensions()); auto fun = tensor_engine.compile(std::move(ir)); const auto &meta = stash.create<TensorFunctionArgArgMeta>(std::move(fun), a->id(), b->id()); program.emplace_back(op_tensor_function_arg_arg, wrap_param<TensorFunctionArgArgMeta>(meta)); - } else if (node.dimension().empty()) { - program.emplace_back(op_tensor_sum); } else { - program.emplace_back(op_tensor_sum_dimension, - wrap_param<vespalib::string>(stash.create<vespalib::string>(node.dimension()))); + ReduceParams ¶ms = stash.create<ReduceParams>(node.aggr(), node.dimensions()); + program.emplace_back(op_tensor_reduce, wrap_param<ReduceParams>(params)); } } - void visit(const TensorMap&node) override { - const auto &token = stash.create<CompileCache::Token::UP>(CompileCache::compile(node.lambda(), PassParams::SEPARATE)); - program.emplace_back(op_tensor_map, wrap_param<CompiledFunction>(token.get()->get())); - } - void visit(const TensorJoin&node) override { - const auto &token = stash.create<CompileCache::Token::UP>(CompileCache::compile(node.lambda(), PassParams::SEPARATE)); - program.emplace_back(op_tensor_join, wrap_param<CompiledFunction>(token.get()->get())); - } - void visit(const TensorReduce&node) override { - ReduceParams ¶ms = stash.create<ReduceParams>(node.aggr(), node.dimensions()); - program.emplace_back(op_tensor_reduce, wrap_param<ReduceParams>(params)); - } - void visit(const TensorRename&node) override { + void visit(const TensorRename &node) override { RenameParams ¶ms = stash.create<RenameParams>(node.from(), node.to()); program.emplace_back(op_tensor_rename, wrap_param<RenameParams>(params)); } - void visit(const TensorLambda&node) override { + void visit(const TensorLambda &node) override { const auto &type = node.type(); TensorSpec spec(type.to_spec()); const auto &token = stash.create<CompileCache::Token::UP>(CompileCache::compile(node.lambda(), PassParams::ARRAY)); @@ -327,52 +318,52 @@ struct ProgramBuilder : public NodeVisitor, public NodeTraverser { spec.add(addr, fun(¶ms[0])); } while (step_labels(params, type)); auto tensor = tensor_engine.create(spec); - program.emplace_back(op_load_const, wrap_param<Value>(stash.create<TensorValue>(std::move(tensor)))); + make_const_op(node, stash.create<TensorValue>(std::move(tensor))); } - void visit(const TensorConcat&node) override { + void visit(const TensorConcat &node) override { vespalib::string &dimension = stash.create<vespalib::string>(node.dimension()); program.emplace_back(op_tensor_concat, wrap_param<vespalib::string>(dimension)); } - void visit(const Add &) override { - program.emplace_back(op_binary<operation::Add>); + void visit(const Add &node) override { + make_join_op(node, operation::Add::f); } - void visit(const Sub &) override { - program.emplace_back(op_binary<operation::Sub>); + void visit(const Sub &node) override { + make_join_op(node, operation::Sub::f); } - void visit(const Mul &) override { - program.emplace_back(op_binary<operation::Mul>); + void visit(const Mul &node) override { + make_join_op(node, operation::Mul::f); } - void visit(const Div &) override { - program.emplace_back(op_binary<operation::Div>); + void visit(const Div &node) override { + make_join_op(node, operation::Div::f); } - void visit(const Mod &) override { - program.emplace_back(op_binary<operation::Mod>); + void visit(const Mod &node) override { + make_join_op(node, operation::Mod::f); } - void visit(const Pow &) override { - program.emplace_back(op_binary<operation::Pow>); + void visit(const Pow &node) override { + make_join_op(node, operation::Pow::f); } - void visit(const Equal &) override { - program.emplace_back(op_binary<operation::Equal>); + void visit(const Equal &node) override { + make_join_op(node, operation::Equal::f); } - void visit(const NotEqual &) override { - program.emplace_back(op_binary<operation::NotEqual>); + void visit(const NotEqual &node) override { + make_join_op(node, operation::NotEqual::f); } - void visit(const Approx &) override { - program.emplace_back(op_binary<operation::Approx>); + void visit(const Approx &node) override { + make_join_op(node, operation::Approx::f); } - void visit(const Less &) override { - program.emplace_back(op_binary<operation::Less>); + void visit(const Less &node) override { + make_join_op(node, operation::Less::f); } - void visit(const LessEqual &) override { - program.emplace_back(op_binary<operation::LessEqual>); + void visit(const LessEqual &node) override { + make_join_op(node, operation::LessEqual::f); } - void visit(const Greater &) override { - program.emplace_back(op_binary<operation::Greater>); + void visit(const Greater &node) override { + make_join_op(node, operation::Greater::f); } - void visit(const GreaterEqual &) override { - program.emplace_back(op_binary<operation::GreaterEqual>); + void visit(const GreaterEqual &node) override { + make_join_op(node, operation::GreaterEqual::f); } - void visit(const In&node) override { + void visit(const In &node) override { std::vector<size_t> checks; node.lhs().traverse(*this); auto array = as<Array>(node.rhs()); @@ -392,91 +383,91 @@ struct ProgramBuilder : public NodeVisitor, public NodeTraverser { } program.emplace_back(op_not_member); } - void visit(const And &) override { - program.emplace_back(op_binary<operation::And>); + void visit(const And &node) override { + make_join_op(node, operation::And::f); } - void visit(const Or &) override { - program.emplace_back(op_binary<operation::Or>); + void visit(const Or &node) override { + make_join_op(node, operation::Or::f); } - void visit(const Cos &) override { - program.emplace_back(op_unary<operation::Cos>); + void visit(const Cos &node) override { + make_map_op(node, operation::Cos::f); } - void visit(const Sin &) override { - program.emplace_back(op_unary<operation::Sin>); + void visit(const Sin &node) override { + make_map_op(node, operation::Sin::f); } - void visit(const Tan &) override { - program.emplace_back(op_unary<operation::Tan>); + void visit(const Tan &node) override { + make_map_op(node, operation::Tan::f); } - void visit(const Cosh &) override { - program.emplace_back(op_unary<operation::Cosh>); + void visit(const Cosh &node) override { + make_map_op(node, operation::Cosh::f); } - void visit(const Sinh &) override { - program.emplace_back(op_unary<operation::Sinh>); + void visit(const Sinh &node) override { + make_map_op(node, operation::Sinh::f); } - void visit(const Tanh &) override { - program.emplace_back(op_unary<operation::Tanh>); + void visit(const Tanh &node) override { + make_map_op(node, operation::Tanh::f); } - void visit(const Acos &) override { - program.emplace_back(op_unary<operation::Acos>); + void visit(const Acos &node) override { + make_map_op(node, operation::Acos::f); } - void visit(const Asin &) override { - program.emplace_back(op_unary<operation::Asin>); + void visit(const Asin &node) override { + make_map_op(node, operation::Asin::f); } - void visit(const Atan &) override { - program.emplace_back(op_unary<operation::Atan>); + void visit(const Atan &node) override { + make_map_op(node, operation::Atan::f); } - void visit(const Exp &) override { - program.emplace_back(op_unary<operation::Exp>); + void visit(const Exp &node) override { + make_map_op(node, operation::Exp::f); } - void visit(const Log10 &) override { - program.emplace_back(op_unary<operation::Log10>); + void visit(const Log10 &node) override { + make_map_op(node, operation::Log10::f); } - void visit(const Log &) override { - program.emplace_back(op_unary<operation::Log>); + void visit(const Log &node) override { + make_map_op(node, operation::Log::f); } - void visit(const Sqrt &) override { - program.emplace_back(op_unary<operation::Sqrt>); + void visit(const Sqrt &node) override { + make_map_op(node, operation::Sqrt::f); } - void visit(const Ceil &) override { - program.emplace_back(op_unary<operation::Ceil>); + void visit(const Ceil &node) override { + make_map_op(node, operation::Ceil::f); } - void visit(const Fabs &) override { - program.emplace_back(op_unary<operation::Fabs>); + void visit(const Fabs &node) override { + make_map_op(node, operation::Fabs::f); } - void visit(const Floor &) override { - program.emplace_back(op_unary<operation::Floor>); + void visit(const Floor &node) override { + make_map_op(node, operation::Floor::f); } - void visit(const Atan2 &) override { - program.emplace_back(op_binary<operation::Atan2>); + void visit(const Atan2 &node) override { + make_join_op(node, operation::Atan2::f); } - void visit(const Ldexp &) override { - program.emplace_back(op_binary<operation::Ldexp>); + void visit(const Ldexp &node) override { + make_join_op(node, operation::Ldexp::f); } - void visit(const Pow2 &) override { - program.emplace_back(op_binary<operation::Pow>); + void visit(const Pow2 &node) override { + make_join_op(node, operation::Pow::f); } - void visit(const Fmod &) override { - program.emplace_back(op_binary<operation::Mod>); + void visit(const Fmod &node) override { + make_join_op(node, operation::Mod::f); } - void visit(const Min &) override { - program.emplace_back(op_binary<operation::Min>); + void visit(const Min &node) override { + make_join_op(node, operation::Min::f); } - void visit(const Max &) override { - program.emplace_back(op_binary<operation::Max>); + void visit(const Max &node) override { + make_join_op(node, operation::Max::f); } - void visit(const IsNan &) override { - program.emplace_back(op_unary<operation::IsNan>); + void visit(const IsNan &node) override { + make_map_op(node, operation::IsNan::f); } - void visit(const Relu &) override { - program.emplace_back(op_unary<operation::Relu>); + void visit(const Relu &node) override { + make_map_op(node, operation::Relu::f); } - void visit(const Sigmoid &) override { - program.emplace_back(op_unary<operation::Sigmoid>); + void visit(const Sigmoid &node) override { + make_map_op(node, operation::Sigmoid::f); } //------------------------------------------------------------------------- - bool open(const Node&node) override { + bool open(const Node &node) override { if (check_type<Array, If, Let, In>(node)) { node.accept(*this); return false; @@ -484,7 +475,7 @@ struct ProgramBuilder : public NodeVisitor, public NodeTraverser { return true; } - void close(const Node&node) override { + void close(const Node &node) override { node.accept(*this); } }; diff --git a/eval/src/vespa/eval/eval/key_gen.cpp b/eval/src/vespa/eval/eval/key_gen.cpp index 65be8d172fd..490f1c044ac 100644 --- a/eval/src/vespa/eval/eval/key_gen.cpp +++ b/eval/src/vespa/eval/eval/key_gen.cpp @@ -31,7 +31,6 @@ struct KeyGen : public NodeVisitor, public NodeTraverser { void visit(const If &node) override { add_byte( 7); add_double(node.p_true()); } void visit(const Let &) override { add_byte( 8); } void visit(const Error &) override { add_byte( 9); } - void visit(const TensorSum &) override { add_byte(10); } // dimensions should be part of key void visit(const TensorMap &) override { add_byte(11); } // lambda should be part of key void visit(const TensorJoin &) override { add_byte(12); } // lambda should be part of key void visit(const TensorReduce &) override { add_byte(13); } // aggr/dimensions should be part of key diff --git a/eval/src/vespa/eval/eval/llvm/compiled_function.cpp b/eval/src/vespa/eval/eval/llvm/compiled_function.cpp index fc016f6a1fd..a696f16f849 100644 --- a/eval/src/vespa/eval/eval/llvm/compiled_function.cpp +++ b/eval/src/vespa/eval/eval/llvm/compiled_function.cpp @@ -123,8 +123,7 @@ CompiledFunction::detect_issues(const Function &function) std::vector<vespalib::string> issues; bool open(const nodes::Node &) override { return true; } void close(const nodes::Node &node) override { - if (nodes::check_type<nodes::TensorSum, - nodes::TensorMap, + if (nodes::check_type<nodes::TensorMap, nodes::TensorJoin, nodes::TensorReduce, nodes::TensorRename, diff --git a/eval/src/vespa/eval/eval/llvm/llvm_wrapper.cpp b/eval/src/vespa/eval/eval/llvm/llvm_wrapper.cpp index 30c10464a35..77e23b44799 100644 --- a/eval/src/vespa/eval/eval/llvm/llvm_wrapper.cpp +++ b/eval/src/vespa/eval/eval/llvm/llvm_wrapper.cpp @@ -415,9 +415,6 @@ struct FunctionBuilder : public NodeVisitor, public NodeTraverser { // tensor nodes (not supported in compiled expressions) - void visit(const TensorSum &node) override { - make_error(node.num_children()); - } void visit(const TensorMap &node) override { make_error(node.num_children()); } diff --git a/eval/src/vespa/eval/eval/node_types.cpp b/eval/src/vespa/eval/eval/node_types.cpp index 24dd6196d64..3e942c7284e 100644 --- a/eval/src/vespa/eval/eval/node_types.cpp +++ b/eval/src/vespa/eval/eval/node_types.cpp @@ -152,14 +152,6 @@ struct TypeResolver : public NodeVisitor, public NodeTraverser { void visit(const Error &node) override { bind_type(ValueType::error_type(), node); } - void visit(const TensorSum &node) override { - const ValueType &child = state.peek(0); - if (node.dimension().empty()) { - bind_type(child.reduce({}), node); - } else { - bind_type(child.reduce({node.dimension()}), node); - } - } void visit(const TensorMap &node) override { resolve_op1(node); } void visit(const TensorJoin &node) override { resolve_op2(node); } void visit(const TensorReduce &node) override { diff --git a/eval/src/vespa/eval/eval/node_visitor.h b/eval/src/vespa/eval/eval/node_visitor.h index 1bbcef24107..b12689e9603 100644 --- a/eval/src/vespa/eval/eval/node_visitor.h +++ b/eval/src/vespa/eval/eval/node_visitor.h @@ -30,7 +30,6 @@ struct NodeVisitor { virtual void visit(const nodes::Error &) = 0; // tensor nodes - virtual void visit(const nodes::TensorSum &) = 0; virtual void visit(const nodes::TensorMap &) = 0; virtual void visit(const nodes::TensorJoin &) = 0; virtual void visit(const nodes::TensorReduce &) = 0; @@ -91,16 +90,15 @@ struct NodeVisitor { * of all types not specifically handled. **/ struct EmptyNodeVisitor : NodeVisitor { - virtual void visit(const nodes::Number &) override {} - virtual void visit(const nodes::Symbol &) override {} - virtual void visit(const nodes::String &) override {} - virtual void visit(const nodes::Array &) override {} - virtual void visit(const nodes::Neg &) override {} - virtual void visit(const nodes::Not &) override {} - virtual void visit(const nodes::If &) override {} - virtual void visit(const nodes::Let &) override {} + void visit(const nodes::Number &) override {} + void visit(const nodes::Symbol &) override {} + void visit(const nodes::String &) override {} + void visit(const nodes::Array &) override {} + void visit(const nodes::Neg &) override {} + void visit(const nodes::Not &) override {} + void visit(const nodes::If &) override {} + void visit(const nodes::Let &) override {} void visit(const nodes::Error &) override {} - void visit(const nodes::TensorSum &) override {} void visit(const nodes::TensorMap &) override {} void visit(const nodes::TensorJoin &) override {} void visit(const nodes::TensorReduce &) override {} diff --git a/eval/src/vespa/eval/eval/tensor_nodes.cpp b/eval/src/vespa/eval/eval/tensor_nodes.cpp index ea468b463ff..6abcd8fb9bc 100644 --- a/eval/src/vespa/eval/eval/tensor_nodes.cpp +++ b/eval/src/vespa/eval/eval/tensor_nodes.cpp @@ -7,7 +7,6 @@ namespace vespalib { namespace eval { namespace nodes { -void TensorSum ::accept(NodeVisitor &visitor) const { visitor.visit(*this); } void TensorMap ::accept(NodeVisitor &visitor) const { visitor.visit(*this); } void TensorJoin ::accept(NodeVisitor &visitor) const { visitor.visit(*this); } void TensorReduce::accept(NodeVisitor &visitor) const { visitor.visit(*this); } diff --git a/eval/src/vespa/eval/eval/tensor_nodes.h b/eval/src/vespa/eval/eval/tensor_nodes.h index df686510c1a..bfa572f1d94 100644 --- a/eval/src/vespa/eval/eval/tensor_nodes.h +++ b/eval/src/vespa/eval/eval/tensor_nodes.h @@ -12,38 +12,6 @@ namespace vespalib { namespace eval { namespace nodes { -class TensorSum : public Node { -private: - Node_UP _child; - vespalib::string _dimension; -public: - TensorSum(Node_UP child) : _child(std::move(child)), _dimension() {} - TensorSum(Node_UP child, const vespalib::string &dimension_in) - : _child(std::move(child)), _dimension(dimension_in) {} - const vespalib::string &dimension() const { return _dimension; } - vespalib::string dump(DumpContext &ctx) const override { - vespalib::string str; - str += "sum("; - str += _child->dump(ctx); - if (!_dimension.empty()) { - str += ","; - str += _dimension; - } - str += ")"; - return str; - } - void accept(NodeVisitor &visitor) const override; - size_t num_children() const override { return 1; } - const Node &get_child(size_t idx) const override { - (void) idx; - assert(idx == 0); - return *_child; - } - void detach_children(NodeHandler &handler) override { - handler.handle(std::move(_child)); - } -}; - class TensorMap : public Node { private: Node_UP _child; diff --git a/eval/src/vespa/eval/eval/test/eval_spec.cpp b/eval/src/vespa/eval/eval/test/eval_spec.cpp index f9c36c9eb93..086e6570e0a 100644 --- a/eval/src/vespa/eval/eval/test/eval_spec.cpp +++ b/eval/src/vespa/eval/eval/test/eval_spec.cpp @@ -166,7 +166,6 @@ EvalSpec::add_function_call_cases() { void EvalSpec::add_tensor_operation_cases() { - add_rule({"a", -1.0, 1.0}, "sum(a)", [](double a){ return a; }); add_rule({"a", -1.0, 1.0}, "map(a,f(x)(sin(x)))", [](double x){ return std::sin(x); }); add_rule({"a", -1.0, 1.0}, "map(a,f(x)(x+x*3))", [](double x){ return (x + (x * 3)); }); add_rule({"a", -1.0, 1.0}, {"b", -1.0, 1.0}, "join(a,b,f(x,y)(x+y))", [](double x, double y){ return (x + y); }); diff --git a/eval/src/vespa/eval/eval/test/tensor_conformance.cpp b/eval/src/vespa/eval/eval/test/tensor_conformance.cpp index fc6a5006e82..3c8cc505bd6 100644 --- a/eval/src/vespa/eval/eval/test/tensor_conformance.cpp +++ b/eval/src/vespa/eval/eval/test/tensor_conformance.cpp @@ -833,7 +833,7 @@ struct TestContext { const TensorSpec &lhs, const TensorSpec &rhs) { - Expr_TT eval("sum(a*b)"); + Expr_TT eval("reduce(a*b,sum)"); TEST_DO(verify_result(safe(eval).eval(engine, lhs, rhs), spec(expect))); } diff --git a/eval/src/vespa/eval/tensor/default_tensor_engine.cpp b/eval/src/vespa/eval/tensor/default_tensor_engine.cpp index 6a536497bdd..b004ce5bfde 100644 --- a/eval/src/vespa/eval/tensor/default_tensor_engine.cpp +++ b/eval/src/vespa/eval/tensor/default_tensor_engine.cpp @@ -288,9 +288,13 @@ const Value & DefaultTensorEngine::reduce(const Value &a, Aggr aggr, const std::vector<vespalib::string> &dimensions, Stash &stash) const { if (a.is_double()) { - Aggregator &aggregator = Aggregator::create(aggr, stash); - aggregator.first(a.as_double()); - return stash.create<DoubleValue>(aggregator.result()); + if (dimensions.empty()) { + Aggregator &aggregator = Aggregator::create(aggr, stash); + aggregator.first(a.as_double()); + return stash.create<DoubleValue>(aggregator.result()); + } else { + return ErrorValue::instance; + } } else if (auto tensor = a.as_tensor()) { assert(&tensor->engine() == this); const tensor::Tensor &my_a = static_cast<const tensor::Tensor &>(*tensor); |