From 6753465db4726d1b6a126f40217632fd61d77b21 Mon Sep 17 00:00:00 2001 From: Haavard Date: Mon, 27 Feb 2017 12:05:40 +0000 Subject: interpreted functions now always take lazy parameters --- eval/src/apps/eval_expr/eval_expr.cpp | 3 +- eval/src/tests/eval/gbdt/gbdt_test.cpp | 6 +-- .../interpreted_function_test.cpp | 43 ++++++++-------------- .../tensor_performance/tensor_performance_test.cpp | 20 +++++----- eval/src/vespa/eval/eval/basic_nodes.cpp | 9 ++++- eval/src/vespa/eval/eval/interpreted_function.cpp | 42 ++++++++++++++------- eval/src/vespa/eval/eval/interpreted_function.h | 41 +++++++++++++++------ .../vespa/eval/eval/test/tensor_conformance.cpp | 12 +++--- 8 files changed, 102 insertions(+), 74 deletions(-) (limited to 'eval/src') diff --git a/eval/src/apps/eval_expr/eval_expr.cpp b/eval/src/apps/eval_expr/eval_expr.cpp index 61d1be0c054..d02ef0ad444 100644 --- a/eval/src/apps/eval_expr/eval_expr.cpp +++ b/eval/src/apps/eval_expr/eval_expr.cpp @@ -21,7 +21,8 @@ int main(int argc, char **argv) { } InterpretedFunction interpreted(SimpleTensorEngine::ref(), function, NodeTypes()); InterpretedFunction::Context ctx(interpreted); - double result = interpreted.eval(ctx).as_double(); + InterpretedFunction::SimpleParams params({}); + double result = interpreted.eval(ctx, params).as_double(); fprintf(stdout, "%.32g\n", result); return 0; } diff --git a/eval/src/tests/eval/gbdt/gbdt_test.cpp b/eval/src/tests/eval/gbdt/gbdt_test.cpp index b5ffb046b22..c96779edeb0 100644 --- a/eval/src/tests/eval/gbdt/gbdt_test.cpp +++ b/eval/src/tests/eval/gbdt/gbdt_test.cpp @@ -18,10 +18,8 @@ using namespace vespalib::eval::gbdt; double eval_double(const Function &function, const std::vector ¶ms) { InterpretedFunction ifun(SimpleTensorEngine::ref(), function, NodeTypes()); InterpretedFunction::Context ctx(ifun); - for (double param: params) { - ctx.add_param(param); - } - return ifun.eval(ctx).as_double(); + InterpretedFunction::SimpleParams fun_params(params); + return ifun.eval(ctx, fun_params).as_double(); } double my_resolve(void *ctx, size_t idx) { return ((double*)ctx)[idx]; } diff --git a/eval/src/tests/eval/interpreted_function/interpreted_function_test.cpp b/eval/src/tests/eval/interpreted_function/interpreted_function_test.cpp index 4a0051303bb..1d4aa677e25 100644 --- a/eval/src/tests/eval/interpreted_function/interpreted_function_test.cpp +++ b/eval/src/tests/eval/interpreted_function/interpreted_function_test.cpp @@ -48,10 +48,8 @@ struct MyEvalTest : test::EvalSpec::EvalTest { InterpretedFunction ifun(SimpleTensorEngine::ref(), function, NodeTypes()); ASSERT_EQUAL(ifun.num_params(), param_values.size()); InterpretedFunction::Context ictx(ifun); - for (double param: param_values) { - ictx.add_param(param); - } - const Value &result_value = ifun.eval(ictx); + InterpretedFunction::SimpleParams params(param_values); + const Value &result_value = ifun.eval(ictx, params); double result = result_value.as_double(); if (result_value.is_double() && is_same(expected_result, result)) { print_pass && fprintf(stderr, "verifying: %s -> %g ... PASS\n", @@ -84,11 +82,8 @@ TEST("require that invalid function evaluates to a error") { EXPECT_TRUE(function.has_error()); InterpretedFunction ifun(SimpleTensorEngine::ref(), function, NodeTypes()); InterpretedFunction::Context ctx(ifun); - ctx.add_param(1); - ctx.add_param(2); - ctx.add_param(3); - ctx.add_param(4); - const Value &result = ifun.eval(ctx); + InterpretedFunction::SimpleParams my_params({1,2,3,4}); + const Value &result = ifun.eval(ctx, my_params); EXPECT_TRUE(result.is_error()); EXPECT_EQUAL(error_value, result.as_double()); } @@ -99,10 +94,8 @@ size_t count_ifs(const vespalib::string &expr, std::initializer_list par Function fun = Function::parse(expr); InterpretedFunction ifun(SimpleTensorEngine::ref(), fun, NodeTypes()); InterpretedFunction::Context ctx(ifun); - for (double param: params_in) { - ctx.add_param(param); - } - ifun.eval(ctx); + InterpretedFunction::SimpleParams params(params_in); + ifun.eval(ctx, params); return ctx.if_cnt(); } @@ -125,11 +118,10 @@ TEST("require that basic addition works") { Function function = Function::parse("a+10"); InterpretedFunction interpreted(SimpleTensorEngine::ref(), function, NodeTypes()); InterpretedFunction::Context ctx(interpreted); - ctx.add_param(20); - EXPECT_EQUAL(interpreted.eval(ctx).as_double(), 30.0); - ctx.clear_params(); - ctx.add_param(40); - EXPECT_EQUAL(interpreted.eval(ctx).as_double(), 50.0); + InterpretedFunction::SimpleParams params_20({20}); + InterpretedFunction::SimpleParams params_40({40}); + EXPECT_EQUAL(interpreted.eval(ctx, params_20).as_double(), 30.0); + EXPECT_EQUAL(interpreted.eval(ctx, params_40).as_double(), 50.0); } //----------------------------------------------------------------------------- @@ -143,9 +135,8 @@ TEST("require that dot product like expression is not optimized for unknown type InterpretedFunction interpreted(engine, function, NodeTypes()); EXPECT_EQUAL(4u, interpreted.program_size()); InterpretedFunction::Context ctx(interpreted); - ctx.add_param(a); - ctx.add_param(b); - const Value &result = interpreted.eval(ctx); + InterpretedFunction::SimpleObjectParams params({a,b}); + const Value &result = interpreted.eval(ctx, params); EXPECT_TRUE(result.is_double()); EXPECT_EQUAL(expect, result.as_double()); } @@ -168,9 +159,8 @@ TEST("require that dot product works with tensor function") { InterpretedFunction::Context ctx(interpreted); TensorValue va(engine.create(a)); TensorValue vb(engine.create(b)); - ctx.add_param(va); - ctx.add_param(vb); - const Value &result = interpreted.eval(ctx); + InterpretedFunction::SimpleObjectParams params({va,vb}); + const Value &result = interpreted.eval(ctx, params); EXPECT_TRUE(result.is_double()); EXPECT_EQUAL(expect, result.as_double()); } @@ -199,9 +189,8 @@ TEST("require that matrix multiplication works with tensor function") { InterpretedFunction::Context ctx(interpreted); TensorValue va(engine.create(a)); TensorValue vb(engine.create(b)); - ctx.add_param(va); - ctx.add_param(vb); - const Value &result = interpreted.eval(ctx); + InterpretedFunction::SimpleObjectParams params({va,vb}); + const Value &result = interpreted.eval(ctx, params); ASSERT_TRUE(result.is_tensor()); EXPECT_EQUAL(expect, engine.to_spec(*result.as_tensor())); } diff --git a/eval/src/tests/tensor/tensor_performance/tensor_performance_test.cpp b/eval/src/tests/tensor/tensor_performance/tensor_performance_test.cpp index 64bec6d1186..c7d4006b749 100644 --- a/eval/src/tests/tensor/tensor_performance/tensor_performance_test.cpp +++ b/eval/src/tests/tensor/tensor_performance/tensor_performance_test.cpp @@ -42,16 +42,16 @@ struct Params { } }; -void inject_params(const Function &function, const Params ¶ms, - InterpretedFunction::Context &ctx) +InterpretedFunction::SimpleObjectParams make_params(const Function &function, const Params ¶ms) { - ctx.clear_params(); + InterpretedFunction::SimpleObjectParams fun_params({}); EXPECT_EQUAL(params.map.size(), function.num_params()); for (size_t i = 0; i < function.num_params(); ++i) { auto param = params.map.find(function.param_name(i)); ASSERT_TRUE(param != params.map.end()); - ctx.add_param(*(param->second)); + fun_params.params.push_back(*(param->second)); } + return fun_params; } std::vector extract_param_types(const Function &function, const Params ¶ms) { @@ -70,23 +70,23 @@ double calculate_expression(const vespalib::string &expression, const Params &pa const NodeTypes types(function, extract_param_types(function, params)); const InterpretedFunction interpreted(tensor::DefaultTensorEngine::ref(), function, types); InterpretedFunction::Context context(interpreted); - inject_params(function, params, context); - const Value &result = interpreted.eval(context); + auto fun_params = make_params(function, params); + const Value &result = interpreted.eval(context, fun_params); EXPECT_TRUE(result.is_double()); return result.as_double(); } DoubleValue dummy_result(0.0); -const Value &dummy_ranking(InterpretedFunction::Context &) { return dummy_result; } +const Value &dummy_ranking(InterpretedFunction::Context &, InterpretedFunction::LazyParams &) { return dummy_result; } double benchmark_expression_us(const vespalib::string &expression, const Params ¶ms) { const Function function = Function::parse(expression); const NodeTypes types(function, extract_param_types(function, params)); const InterpretedFunction interpreted(tensor::DefaultTensorEngine::ref(), function, types); InterpretedFunction::Context context(interpreted); - inject_params(function, params, context); - auto ranking = [&](){ interpreted.eval(context); }; - auto baseline = [&](){ dummy_ranking(context); }; + auto fun_params = make_params(function, params); + auto ranking = [&](){ interpreted.eval(context, fun_params); }; + auto baseline = [&](){ dummy_ranking(context, fun_params); }; return BenchmarkTimer::benchmark(ranking, baseline, 5.0) * 1000.0 * 1000.0; } diff --git a/eval/src/vespa/eval/eval/basic_nodes.cpp b/eval/src/vespa/eval/eval/basic_nodes.cpp index c26f1a87217..41ecab014d0 100644 --- a/eval/src/vespa/eval/eval/basic_nodes.cpp +++ b/eval/src/vespa/eval/eval/basic_nodes.cpp @@ -21,14 +21,21 @@ struct Frame { const Node &next_child() { return node.get_child(child_idx++); } }; +struct NoParams : InterpretedFunction::LazyParams { + const Value &resolve(size_t, Stash &stash) const override { + return stash.create(); + } +}; + } // namespace vespalib::eval::nodes:: double Node::get_const_value() const { assert(is_const()); InterpretedFunction function(SimpleTensorEngine::ref(), *this, 0, NodeTypes()); + NoParams no_params; InterpretedFunction::Context ctx(function); - return function.eval(ctx).as_double(); + return function.eval(ctx, no_params).as_double(); } void diff --git a/eval/src/vespa/eval/eval/interpreted_function.cpp b/eval/src/vespa/eval/eval/interpreted_function.cpp index 459c87586e3..13622e9999f 100644 --- a/eval/src/vespa/eval/eval/interpreted_function.cpp +++ b/eval/src/vespa/eval/eval/interpreted_function.cpp @@ -42,7 +42,7 @@ void op_load_const(State &state, uint64_t param) { } void op_load_param(State &state, uint64_t param) { - state.stack.push_back(state.params[param]); + state.stack.push_back(state.params->resolve(param, state.stash)); } void op_load_let(State &state, uint64_t param) { @@ -171,14 +171,14 @@ struct TensorFunctionArgArgMeta { struct ArgArgInput : TensorFunction::Input { const TensorFunctionArgArgMeta &meta; - const State &state; - ArgArgInput(const TensorFunctionArgArgMeta &meta_in, const State &state_in) + State &state; + ArgArgInput(const TensorFunctionArgArgMeta &meta_in, State &state_in) : meta(meta_in), state(state_in) {} const Value &get_tensor(size_t id) const override { if (id == 0) { - return state.params[meta.param1]; + return state.params->resolve(meta.param1, state.stash); } else if (id == 1) { - return state.params[meta.param2]; + return state.params->resolve(meta.param2, state.stash); } return undef_cref(); } @@ -516,9 +516,26 @@ const Function *get_lambda(const nodes::Node &node) { } // namespace vespalib:: +InterpretedFunction::LazyParams::~LazyParams() +{ +} + +const Value & +InterpretedFunction::SimpleParams::resolve(size_t idx, Stash &stash) const +{ + assert(idx < params.size()); + return stash.create(params[idx]); +} + +const Value & +InterpretedFunction::SimpleObjectParams::resolve(size_t idx, Stash &) const +{ + assert(idx < params.size()); + return params[idx]; +} + InterpretedFunction::Context::Context(const InterpretedFunction &ifun) - : _state(ifun._tensor_engine), - _param_stash() + : _state(ifun._tensor_engine) { } @@ -533,11 +550,10 @@ InterpretedFunction::InterpretedFunction(const TensorEngine &engine, const nodes } const Value & -InterpretedFunction::eval(Context &ctx) const +InterpretedFunction::eval(Context &ctx, const LazyParams ¶ms) const { State &state = ctx._state; - state.clear(); - assert(state.params.size() == _num_params); + state.init(params); while (state.program_offset < _program.size()) { _program[state.program_offset++].perform(state); } @@ -551,10 +567,8 @@ double InterpretedFunction::estimate_cost_us(const std::vector ¶ms, double budget) const { Context ctx(*this); - for (double param: params) { - ctx.add_param(param); - } - auto actual = [&](){eval(ctx);}; + SimpleParams lazy_params(params); + auto actual = [&](){eval(ctx, lazy_params);}; return BenchmarkTimer::benchmark(actual, budget) * 1000.0 * 1000.0; } diff --git a/eval/src/vespa/eval/eval/interpreted_function.h b/eval/src/vespa/eval/eval/interpreted_function.h index a3a0426c77c..3a3e6173fd5 100644 --- a/eval/src/vespa/eval/eval/interpreted_function.h +++ b/eval/src/vespa/eval/eval/interpreted_function.h @@ -26,17 +26,43 @@ class TensorEngine; class InterpretedFunction { public: + /** + * Interface used to lazy-resolve parameters when needed. + **/ + struct LazyParams { + virtual const Value &resolve(size_t idx, Stash &stash) const = 0; + virtual ~LazyParams(); + }; + /** + * Simple wrapper for number-only parameters that are known up + * front. Intended for convenience (testing), not performance. + **/ + struct SimpleParams : LazyParams { + std::vector params; + explicit SimpleParams(const std::vector ¶ms_in) : params(params_in) {} + const Value &resolve(size_t idx, Stash &stash) const override; + }; + /** + * Simple wrapper for object parameters that are known up + * front. Intended for convenience (testing), not performance. + **/ + struct SimpleObjectParams : LazyParams { + std::vector params; + explicit SimpleObjectParams(const std::vector ¶ms_in) : params(params_in) {} + const Value &resolve(size_t idx, Stash &stash) const override; + }; struct State { const TensorEngine &engine; - std::vector params; + const LazyParams *params; Stash stash; std::vector stack; std::vector let_values; uint32_t program_offset; uint32_t if_cnt; State(const TensorEngine &engine_in) - : engine(engine_in), params(), stash(), stack(), let_values(), program_offset(0) {} - void clear() { + : engine(engine_in), params(nullptr), stash(), stack(), let_values(), program_offset(0) {} + void init(const LazyParams ¶ms_in) { + params = ¶ms_in; stash.clear(); stack.clear(); let_values.clear(); @@ -57,15 +83,8 @@ public: friend class InterpretedFunction; private: State _state; - Stash _param_stash; public: explicit Context(const InterpretedFunction &ifun); - void clear_params() { - _state.params.clear(); - _param_stash.clear(); - } - void add_param(const Value ¶m) { _state.params.push_back(param); } - void add_param(double param) { add_param(_param_stash.create(param)); } uint32_t if_cnt() const { return _state.if_cnt; } }; using op_function = void (*)(State &, uint64_t); @@ -96,7 +115,7 @@ public: InterpretedFunction(InterpretedFunction &&rhs) = default; size_t program_size() const { return _program.size(); } size_t num_params() const { return _num_params; } - const Value &eval(Context &ctx) const; + const Value &eval(Context &ctx, const LazyParams ¶ms) const; double estimate_cost_us(const std::vector ¶ms, double budget = 5.0) const; static Function::Issues detect_issues(const Function &function); }; diff --git a/eval/src/vespa/eval/eval/test/tensor_conformance.cpp b/eval/src/vespa/eval/eval/test/tensor_conformance.cpp index 4b3939151c2..2445160bcd9 100644 --- a/eval/src/vespa/eval/eval/test/tensor_conformance.cpp +++ b/eval/src/vespa/eval/eval/test/tensor_conformance.cpp @@ -342,7 +342,8 @@ struct Expr_V : Eval { NodeTypes types(fun, {}); InterpretedFunction ifun(engine, fun, types); InterpretedFunction::Context ctx(ifun); - return Result(check_type(ifun.eval(ctx), types.get_type(fun.root()))); + InterpretedFunction::SimpleObjectParams params({}); + return Result(check_type(ifun.eval(ctx, params), types.get_type(fun.root()))); } }; @@ -357,8 +358,8 @@ struct Expr_T : Eval { InterpretedFunction ifun(engine, fun, types); InterpretedFunction::Context ctx(ifun); TensorValue va(engine.create(a)); - ctx.add_param(va); - return Result(check_type(ifun.eval(ctx), types.get_type(fun.root()))); + InterpretedFunction::SimpleObjectParams params({va}); + return Result(check_type(ifun.eval(ctx, params), types.get_type(fun.root()))); } }; @@ -375,9 +376,8 @@ struct Expr_TT : Eval { InterpretedFunction::Context ctx(ifun); TensorValue va(engine.create(a)); TensorValue vb(engine.create(b)); - ctx.add_param(va); - ctx.add_param(vb); - return Result(check_type(ifun.eval(ctx), types.get_type(fun.root()))); + InterpretedFunction::SimpleObjectParams params({va,vb}); + return Result(check_type(ifun.eval(ctx, params), types.get_type(fun.root()))); } }; -- cgit v1.2.3