diff options
author | Haavard <havardpe@yahoo-inc.com> | 2017-02-09 14:44:10 +0000 |
---|---|---|
committer | Haavard <havardpe@yahoo-inc.com> | 2017-02-10 16:07:47 +0000 |
commit | a9e64013b4db64cb6ec86be6dcc0076282ab8858 (patch) | |
tree | ca16fa7cb4d1efe5a4ccbf8380e235bf15b8baa6 /eval | |
parent | 461694eed494a7dc0f365725439beee2089eaec5 (diff) |
wire in immediate evaluation of new syntax
Diffstat (limited to 'eval')
9 files changed, 109 insertions, 73 deletions
diff --git a/eval/src/apps/eval_expr/eval_expr.cpp b/eval/src/apps/eval_expr/eval_expr.cpp index 06566f9ed80..61d1be0c054 100644 --- a/eval/src/apps/eval_expr/eval_expr.cpp +++ b/eval/src/apps/eval_expr/eval_expr.cpp @@ -19,8 +19,8 @@ int main(int argc, char **argv) { fprintf(stderr, "expression error: %s\n", function.get_error().c_str()); return 1; } - InterpretedFunction::Context ctx; InterpretedFunction interpreted(SimpleTensorEngine::ref(), function, NodeTypes()); + InterpretedFunction::Context ctx(interpreted); double result = interpreted.eval(ctx).as_double(); fprintf(stdout, "%.32g\n", result); return 0; diff --git a/eval/src/tests/eval/function_speed/function_speed_test.cpp b/eval/src/tests/eval/function_speed/function_speed_test.cpp index 41463f0ef5b..bdb93daec19 100644 --- a/eval/src/tests/eval/function_speed/function_speed_test.cpp +++ b/eval/src/tests/eval/function_speed/function_speed_test.cpp @@ -21,7 +21,7 @@ double gcc_function(double p, double o, double q, double f, double w) { return (0.35*p + 0.15*o + 0.30*q + 0.20*f) * w; } -InterpretedFunction::Context icontext; +InterpretedFunction::Context icontext(interpreted_function); double interpret_function(double p, double o, double q, double f, double w) { icontext.clear_params(); @@ -52,7 +52,7 @@ double big_gcc_function(double p, double o, double q, double f, double w) { (0.35*p + 0.15*o + 0.30*q + 0.20*f) * w; } -InterpretedFunction::Context big_icontext; +InterpretedFunction::Context big_icontext(big_interpreted_function); double big_interpret_function(double p, double o, double q, double f, double w) { big_icontext.clear_params(); diff --git a/eval/src/tests/eval/gbdt/gbdt_test.cpp b/eval/src/tests/eval/gbdt/gbdt_test.cpp index 58e4fca2d12..12e79941b44 100644 --- a/eval/src/tests/eval/gbdt/gbdt_test.cpp +++ b/eval/src/tests/eval/gbdt/gbdt_test.cpp @@ -17,7 +17,7 @@ using namespace vespalib::eval::gbdt; double eval_double(const Function &function, const std::vector<double> ¶ms) { InterpretedFunction ifun(SimpleTensorEngine::ref(), function, NodeTypes()); - InterpretedFunction::Context ctx; + InterpretedFunction::Context ctx(ifun); for (double param: params) { ctx.add_param(param); } diff --git a/eval/src/tests/eval/interpreted_function/interpreted_function_test.cpp b/eval/src/tests/eval/interpreted_function/interpreted_function_test.cpp index d39427ac232..4a0051303bb 100644 --- a/eval/src/tests/eval/interpreted_function/interpreted_function_test.cpp +++ b/eval/src/tests/eval/interpreted_function/interpreted_function_test.cpp @@ -15,29 +15,6 @@ using vespalib::Stash; //----------------------------------------------------------------------------- -std::vector<vespalib::string> unsupported = { - "map(", - "join(", - "reduce(", - "rename(", - "tensor(", - "concat(" -}; - -bool is_unsupported(const vespalib::string &expression) { - if (expression == "reduce(a,sum)") { - return false; - } - for (const auto &prefix: unsupported) { - if (starts_with(expression, prefix)) { - return true; - } - } - return false; -} - -//----------------------------------------------------------------------------- - struct MyEvalTest : test::EvalSpec::EvalTest { size_t pass_cnt = 0; size_t fail_cnt = 0; @@ -48,7 +25,7 @@ struct MyEvalTest : test::EvalSpec::EvalTest { { Function function = Function::parse(param_names, expression); ASSERT_TRUE(!function.has_error()); - bool is_supported = !is_unsupported(expression); + bool is_supported = true; bool has_issues = InterpretedFunction::detect_issues(function); if (is_supported == has_issues) { const char *supported_str = is_supported ? "supported" : "not supported"; @@ -65,12 +42,12 @@ struct MyEvalTest : test::EvalSpec::EvalTest { { Function function = Function::parse(param_names, expression); ASSERT_TRUE(!function.has_error()); - bool is_supported = !is_unsupported(expression); + bool is_supported = true; bool has_issues = InterpretedFunction::detect_issues(function); if (is_supported && !has_issues) { InterpretedFunction ifun(SimpleTensorEngine::ref(), function, NodeTypes()); ASSERT_EQUAL(ifun.num_params(), param_values.size()); - InterpretedFunction::Context ictx; + InterpretedFunction::Context ictx(ifun); for (double param: param_values) { ictx.add_param(param); } @@ -106,7 +83,7 @@ TEST("require that invalid function evaluates to a error") { Function function = Function::parse(params, "x & y"); EXPECT_TRUE(function.has_error()); InterpretedFunction ifun(SimpleTensorEngine::ref(), function, NodeTypes()); - InterpretedFunction::Context ctx; + InterpretedFunction::Context ctx(ifun); ctx.add_param(1); ctx.add_param(2); ctx.add_param(3); @@ -121,7 +98,7 @@ TEST("require that invalid function evaluates to a error") { size_t count_ifs(const vespalib::string &expr, std::initializer_list<double> params_in) { Function fun = Function::parse(expr); InterpretedFunction ifun(SimpleTensorEngine::ref(), fun, NodeTypes()); - InterpretedFunction::Context ctx; + InterpretedFunction::Context ctx(ifun); for (double param: params_in) { ctx.add_param(param); } @@ -147,7 +124,7 @@ TEST("require that interpreted function instructions have expected size") { TEST("require that basic addition works") { Function function = Function::parse("a+10"); InterpretedFunction interpreted(SimpleTensorEngine::ref(), function, NodeTypes()); - InterpretedFunction::Context ctx; + InterpretedFunction::Context ctx(interpreted); ctx.add_param(20); EXPECT_EQUAL(interpreted.eval(ctx).as_double(), 30.0); ctx.clear_params(); @@ -165,7 +142,7 @@ TEST("require that dot product like expression is not optimized for unknown type double expect = (2.0 * 3.0); InterpretedFunction interpreted(engine, function, NodeTypes()); EXPECT_EQUAL(4u, interpreted.program_size()); - InterpretedFunction::Context ctx; + InterpretedFunction::Context ctx(interpreted); ctx.add_param(a); ctx.add_param(b); const Value &result = interpreted.eval(ctx); @@ -188,7 +165,7 @@ TEST("require that dot product works with tensor function") { NodeTypes types(function, {ValueType::from_spec(a.type()), ValueType::from_spec(a.type())}); InterpretedFunction interpreted(engine, function, types); EXPECT_EQUAL(1u, interpreted.program_size()); - InterpretedFunction::Context ctx; + InterpretedFunction::Context ctx(interpreted); TensorValue va(engine.create(a)); TensorValue vb(engine.create(b)); ctx.add_param(va); @@ -219,7 +196,7 @@ TEST("require that matrix multiplication works with tensor function") { NodeTypes types(function, {ValueType::from_spec(a.type()), ValueType::from_spec(a.type())}); InterpretedFunction interpreted(engine, function, types); EXPECT_EQUAL(1u, interpreted.program_size()); - InterpretedFunction::Context ctx; + InterpretedFunction::Context ctx(interpreted); TensorValue va(engine.create(a)); TensorValue vb(engine.create(b)); ctx.add_param(va); @@ -231,15 +208,27 @@ TEST("require that matrix multiplication works with tensor function") { //----------------------------------------------------------------------------- -TEST("require function issues can be detected") { - auto simple = Function::parse("a+b"); - auto complex = Function::parse("join(a,b,f(a,b)(a+b))"); - EXPECT_FALSE(simple.has_error()); - EXPECT_FALSE(complex.has_error()); - EXPECT_FALSE(InterpretedFunction::detect_issues(simple)); - EXPECT_TRUE(InterpretedFunction::detect_issues(complex)); +TEST("require that functions with non-compilable lambdas cannot be interpreted") { + auto good_map = Function::parse("map(a,f(x)(x+1))"); + auto good_join = Function::parse("join(a,b,f(x,y)(x+y))"); + auto good_tensor = Function::parse("tensor(a[10],b[10])(a+b)"); + auto bad_map = Function::parse("map(a,f(x)(map(x,f(i)(i+1))))"); + auto bad_join = Function::parse("join(a,b,f(x,y)(join(x,y,f(i,j)(i+j))))"); + auto bad_tensor = Function::parse("tensor(a[10],b[10])(join(a,b,f(i,j)(i+j)))"); + for (const Function *good: {&good_map, &good_join, &good_tensor}) { + if (!EXPECT_TRUE(!good->has_error())) { + fprintf(stderr, "parse error: %s\n", good->get_error().c_str()); + } + EXPECT_TRUE(!InterpretedFunction::detect_issues(*good)); + } + for (const Function *bad: {&bad_map, &bad_join, &bad_tensor}) { + if (!EXPECT_TRUE(!bad->has_error())) { + fprintf(stderr, "parse error: %s\n", bad->get_error().c_str()); + } + EXPECT_TRUE(InterpretedFunction::detect_issues(*bad)); + } std::cerr << "Example function issues:" << std::endl - << InterpretedFunction::detect_issues(complex).list + << InterpretedFunction::detect_issues(bad_tensor).list << std::endl; } diff --git a/eval/src/tests/tensor/tensor_performance/tensor_performance_test.cpp b/eval/src/tests/tensor/tensor_performance/tensor_performance_test.cpp index 105eb955413..64bec6d1186 100644 --- a/eval/src/tests/tensor/tensor_performance/tensor_performance_test.cpp +++ b/eval/src/tests/tensor/tensor_performance/tensor_performance_test.cpp @@ -69,7 +69,7 @@ double calculate_expression(const vespalib::string &expression, const Params &pa const Function function = Function::parse(expression); const NodeTypes types(function, extract_param_types(function, params)); const InterpretedFunction interpreted(tensor::DefaultTensorEngine::ref(), function, types); - InterpretedFunction::Context context; + InterpretedFunction::Context context(interpreted); inject_params(function, params, context); const Value &result = interpreted.eval(context); EXPECT_TRUE(result.is_double()); @@ -83,7 +83,7 @@ double benchmark_expression_us(const vespalib::string &expression, const Params const Function function = Function::parse(expression); const NodeTypes types(function, extract_param_types(function, params)); const InterpretedFunction interpreted(tensor::DefaultTensorEngine::ref(), function, types); - InterpretedFunction::Context context; + InterpretedFunction::Context context(interpreted); inject_params(function, params, context); auto ranking = [&](){ interpreted.eval(context); }; auto baseline = [&](){ dummy_ranking(context); }; diff --git a/eval/src/vespa/eval/eval/basic_nodes.cpp b/eval/src/vespa/eval/eval/basic_nodes.cpp index 6d1a18dff03..c26f1a87217 100644 --- a/eval/src/vespa/eval/eval/basic_nodes.cpp +++ b/eval/src/vespa/eval/eval/basic_nodes.cpp @@ -27,7 +27,7 @@ double Node::get_const_value() const { assert(is_const()); InterpretedFunction function(SimpleTensorEngine::ref(), *this, 0, NodeTypes()); - InterpretedFunction::Context ctx; + InterpretedFunction::Context ctx(function); return function.eval(ctx).as_double(); } diff --git a/eval/src/vespa/eval/eval/interpreted_function.cpp b/eval/src/vespa/eval/eval/interpreted_function.cpp index 9ad98ce6579..2f1cd0b0075 100644 --- a/eval/src/vespa/eval/eval/interpreted_function.cpp +++ b/eval/src/vespa/eval/eval/interpreted_function.cpp @@ -124,6 +124,35 @@ void op_tensor_sum_dimension(State &state, uint64_t param) { //----------------------------------------------------------------------------- +void op_tensor_map(State &state, uint64_t param) { + const CompiledFunction &cfun = unwrap_param<CompiledFunction>(param); + state.replace(1, state.engine.map(state.peek(0), cfun.get_function<1>(), state.stash)); +} + +void op_tensor_join(State &state, uint64_t param) { + const CompiledFunction &cfun = unwrap_param<CompiledFunction>(param); + state.replace(2, state.engine.join(state.peek(1), state.peek(0), cfun.get_function<2>(), state.stash)); +} + +using ReduceParams = std::pair<Aggr,std::vector<vespalib::string>>; +void op_tensor_reduce(State &state, uint64_t param) { + const ReduceParams ¶ms = unwrap_param<ReduceParams>(param); + state.replace(1, state.engine.reduce(state.peek(0), params.first, params.second, state.stash)); +} + +using RenameParams = std::pair<std::vector<vespalib::string>,std::vector<vespalib::string>>; +void op_tensor_rename(State &state, uint64_t param) { + const RenameParams ¶ms = unwrap_param<RenameParams>(param); + state.replace(1, state.engine.rename(state.peek(0), params.first, params.second, state.stash)); +} + +void op_tensor_concat(State &state, uint64_t param) { + const vespalib::string &dimension = unwrap_param<vespalib::string>(param); + state.replace(2, state.engine.concat(state.peek(1), state.peek(0), dimension, state.stash)); +} + +//----------------------------------------------------------------------------- + template <typename T> const T &undef_cref() { const T *undef = nullptr; @@ -281,21 +310,21 @@ struct ProgramBuilder : public NodeVisitor, public NodeTraverser { wrap_param<vespalib::string>(stash.create<vespalib::string>(node.dimension()))); } } - virtual void visit(const TensorMap &) { - // TODO(havardpe): add actual evaluation - program.emplace_back(op_load_const, wrap_param<Value>(stash.create<ErrorValue>())); + virtual void visit(const TensorMap &node) { + const auto &token = stash.create<CompileCache::Token::UP>(CompileCache::compile(node.lambda(), PassParams::SEPARATE)); + program.emplace_back(op_tensor_map, wrap_param<CompiledFunction>(token.get()->get())); } - virtual void visit(const TensorJoin &) { - // TODO(havardpe): add actual evaluation - program.emplace_back(op_load_const, wrap_param<Value>(stash.create<ErrorValue>())); + virtual void visit(const TensorJoin &node) { + const auto &token = stash.create<CompileCache::Token::UP>(CompileCache::compile(node.lambda(), PassParams::SEPARATE)); + program.emplace_back(op_tensor_join, wrap_param<CompiledFunction>(token.get()->get())); } - virtual void visit(const TensorReduce &) { - // TODO(havardpe): add actual evaluation - program.emplace_back(op_load_const, wrap_param<Value>(stash.create<ErrorValue>())); + virtual void visit(const TensorReduce &node) { + ReduceParams ¶ms = stash.create<ReduceParams>(node.aggr(), node.dimensions()); + program.emplace_back(op_tensor_reduce, wrap_param<ReduceParams>(params)); } - virtual void visit(const TensorRename &) { - // TODO(havardpe): add actual evaluation - program.emplace_back(op_load_const, wrap_param<Value>(stash.create<ErrorValue>())); + virtual void visit(const TensorRename &node) { + RenameParams ¶ms = stash.create<RenameParams>(node.from(), node.to()); + program.emplace_back(op_tensor_rename, wrap_param<RenameParams>(params)); } virtual void visit(const TensorLambda &node) { const auto &type = node.type(); @@ -314,9 +343,9 @@ struct ProgramBuilder : public NodeVisitor, public NodeTraverser { auto tensor = tensor_engine.create(spec); program.emplace_back(op_load_const, wrap_param<Value>(stash.create<TensorValue>(std::move(tensor)))); } - virtual void visit(const TensorConcat &) { - // TODO(havardpe): add actual evaluation - program.emplace_back(op_load_const, wrap_param<Value>(stash.create<ErrorValue>())); + virtual void visit(const TensorConcat &node) { + vespalib::string &dimension = stash.create<vespalib::string>(node.dimension()); + program.emplace_back(op_tensor_concat, wrap_param<vespalib::string>(dimension)); } virtual void visit(const Add &) { program.emplace_back(op_binary<operation::Add>); @@ -471,8 +500,27 @@ struct ProgramBuilder : public NodeVisitor, public NodeTraverser { } }; +const Function *get_lambda(const nodes::Node &node) { + if (auto ptr = as<nodes::TensorMap>(node)) { + return &ptr->lambda(); + } + if (auto ptr = as<nodes::TensorJoin>(node)) { + return &ptr->lambda(); + } + if (auto ptr = as<nodes::TensorLambda>(node)) { + return &ptr->lambda(); + } + return nullptr; +} + } // namespace vespalib::<unnamed> +InterpretedFunction::Context::Context(const InterpretedFunction &ifun) + : _state(ifun._tensor_engine), + _param_stash() +{ +} + InterpretedFunction::InterpretedFunction(const TensorEngine &engine, const nodes::Node &root, size_t num_params_in, const NodeTypes &types) : _program(), _stash(), @@ -505,13 +553,9 @@ InterpretedFunction::detect_issues(const Function &function) std::vector<vespalib::string> issues; bool open(const nodes::Node &) override { return true; } void close(const nodes::Node &node) override { - if (nodes::check_type<nodes::TensorMap, - nodes::TensorJoin, - nodes::TensorReduce, - nodes::TensorRename, - nodes::TensorLambda, - nodes::TensorConcat>(node)) { - issues.push_back(make_string("unsupported node type: %s", + auto lambda = get_lambda(node); + if (lambda && CompiledFunction::detect_issues(*lambda)) { + issues.push_back(make_string("lambda function that cannot be compiled within %s", getClassName(node).c_str())); } } diff --git a/eval/src/vespa/eval/eval/interpreted_function.h b/eval/src/vespa/eval/eval/interpreted_function.h index fa1ea6580dd..e95224c12fc 100644 --- a/eval/src/vespa/eval/eval/interpreted_function.h +++ b/eval/src/vespa/eval/eval/interpreted_function.h @@ -27,13 +27,15 @@ class InterpretedFunction { public: struct State { + const TensorEngine &engine; std::vector<Value::CREF> params; Stash stash; std::vector<Value::CREF> stack; std::vector<Value::CREF> let_values; uint32_t program_offset; uint32_t if_cnt; - State() : params(), stash(), stack(), let_values(), program_offset(0) {} + State(const TensorEngine &engine_in) + : engine(engine_in), params(), stash(), stack(), let_values(), program_offset(0) {} void clear() { stash.clear(); stack.clear(); @@ -57,6 +59,7 @@ public: State _state; Stash _param_stash; public: + explicit Context(const InterpretedFunction &ifun); void clear_params() { _state.params.clear(); _param_stash.clear(); diff --git a/eval/src/vespa/eval/eval/test/tensor_conformance.cpp b/eval/src/vespa/eval/eval/test/tensor_conformance.cpp index 0fcdf326cf8..b6c44223edb 100644 --- a/eval/src/vespa/eval/eval/test/tensor_conformance.cpp +++ b/eval/src/vespa/eval/eval/test/tensor_conformance.cpp @@ -341,7 +341,7 @@ struct Expr_V : Eval { Function fun = Function::parse(expr); NodeTypes types(fun, {}); InterpretedFunction ifun(engine, fun, types); - InterpretedFunction::Context ctx; + InterpretedFunction::Context ctx(ifun); return Result(check_type(ifun.eval(ctx), types.get_type(fun.root()))); } }; @@ -355,7 +355,7 @@ struct Expr_T : Eval { auto a_type = ValueType::from_spec(a.type()); NodeTypes types(fun, {a_type}); InterpretedFunction ifun(engine, fun, types); - InterpretedFunction::Context ctx; + InterpretedFunction::Context ctx(ifun); TensorValue va(engine.create(a)); ctx.add_param(va); return Result(check_type(ifun.eval(ctx), types.get_type(fun.root()))); @@ -372,7 +372,7 @@ struct Expr_TT : Eval { auto b_type = ValueType::from_spec(b.type()); NodeTypes types(fun, {a_type, b_type}); InterpretedFunction ifun(engine, fun, types); - InterpretedFunction::Context ctx; + InterpretedFunction::Context ctx(ifun); TensorValue va(engine.create(a)); TensorValue vb(engine.create(b)); ctx.add_param(va); |