diff options
author | Arne H Juul <arnej27959@users.noreply.github.com> | 2017-10-27 13:46:33 +0200 |
---|---|---|
committer | GitHub <noreply@github.com> | 2017-10-27 13:46:33 +0200 |
commit | 5fcbb66f52d44b286f0898ab318f7e6269330f4e (patch) | |
tree | 959be27611d30ff8094b5d6581e9ea67e9d9c8b0 /eval | |
parent | d4b17766643ca102c72442866ea38c5174c469c4 (diff) | |
parent | b6bbf2cdcaf9b5346a7dc072220f89fdf6325111 (diff) |
Merge pull request #3923 from vespa-engine/havardpe/more-diverse-type-knowledge-testing
Havardpe/more diverse type knowledge testing
Diffstat (limited to 'eval')
3 files changed, 37 insertions, 23 deletions
diff --git a/eval/src/apps/tensor_conformance/tensor_conformance.cpp b/eval/src/apps/tensor_conformance/tensor_conformance.cpp index f594bdc207f..367dca33515 100644 --- a/eval/src/apps/tensor_conformance/tensor_conformance.cpp +++ b/eval/src/apps/tensor_conformance/tensor_conformance.cpp @@ -155,18 +155,22 @@ TensorSpec extract_value(const Inspector &inspector) { //----------------------------------------------------------------------------- -TensorSpec eval_expr(const Inspector &test, const TensorEngine &engine) { +std::vector<ValueType> get_types(const std::vector<Value::CREF> ¶m_values) { + std::vector<ValueType> param_types; + for (size_t i = 0; i < param_values.size(); ++i) { + param_types.emplace_back(param_values[i].get().type()); + } + return param_types; +} + +TensorSpec eval_expr(const Inspector &test, const TensorEngine &engine, bool typed) { Stash stash; Function fun = Function::parse(test["expression"].asString().make_string()); std::vector<Value::CREF> param_values; - std::vector<ValueType> param_types; for (size_t i = 0; i < fun.num_params(); ++i) { param_values.emplace_back(to_value(extract_value(test["inputs"][fun.param_name(i)]), engine, stash)); } - for (size_t i = 0; i < fun.num_params(); ++i) { - param_types.emplace_back(param_values[i].get().type()); - } - NodeTypes types(fun, param_types); + NodeTypes types = typed ? NodeTypes(fun, get_types(param_values)) : NodeTypes(); InterpretedFunction ifun(engine, fun, types); InterpretedFunction::Context ctx(ifun); InterpretedFunction::SimpleObjectParams params(param_values); @@ -207,7 +211,7 @@ private: insert_value(test.setObject("result"), "expect", *expect); } else { insert_value(test.setObject("result"), "expect", - eval_expr(slime.get(), SimpleTensorEngine::ref())); + eval_expr(slime.get(), SimpleTensorEngine::ref(), false)); } write_compact(slime, _out); ++_num_tests; @@ -274,8 +278,12 @@ void for_each_test(Input &in, void evaluate(Input &in, Output &out) { auto handle_test = [&out](Slime &slime) { - insert_value(slime["result"], "prod_cpp", - eval_expr(slime.get(), DefaultTensorEngine::ref())); + insert_value(slime["result"], "cpp_prod", + eval_expr(slime.get(), DefaultTensorEngine::ref(), true)); + insert_value(slime["result"], "cpp_prod_untyped", + eval_expr(slime.get(), DefaultTensorEngine::ref(), false)); + insert_value(slime["result"], "cpp_ref_typed", + eval_expr(slime.get(), SimpleTensorEngine::ref(), true)); write_compact(slime, out); }; auto handle_summary = [&out](Slime &slime) @@ -299,7 +307,7 @@ void verify(Input &in, Output &out) { std::map<vespalib::string,size_t> result_map; auto handle_test = [&out,&result_map](Slime &slime) { - TensorSpec reference_result = eval_expr(slime.get(), SimpleTensorEngine::ref()); + TensorSpec reference_result = eval_expr(slime.get(), SimpleTensorEngine::ref(), false); for (const auto &result: extract_fields(slime["result"])) { ++result_map[result]; TEST_STATE(make_string("verifying result: '%s'", result.c_str()).c_str()); diff --git a/eval/src/tests/eval/function_speed/function_speed_test.cpp b/eval/src/tests/eval/function_speed/function_speed_test.cpp index ac1471544ed..65866de7ddd 100644 --- a/eval/src/tests/eval/function_speed/function_speed_test.cpp +++ b/eval/src/tests/eval/function_speed/function_speed_test.cpp @@ -39,15 +39,16 @@ double big_gcc_function(double p, double o, double q, double f, double w) { struct Fixture { Function function; - InterpretedFunction interpreted; InterpretedFunction interpreted_simple; + InterpretedFunction interpreted; CompiledFunction separate; CompiledFunction array; CompiledFunction lazy; Fixture(const vespalib::string &expr) : function(Function::parse(expr)), - interpreted(DefaultTensorEngine::ref(), function, NodeTypes()), interpreted_simple(SimpleTensorEngine::ref(), function, NodeTypes()), + interpreted(DefaultTensorEngine::ref(), function, + NodeTypes(function, std::vector<ValueType>(function.num_params(), ValueType::double_type()))), separate(function, PassParams::SEPARATE), array(function, PassParams::ARRAY), lazy(function, PassParams::LAZY) {} @@ -76,18 +77,18 @@ TEST("measure small function eval/jit/gcc speed") { EXPECT_EQUAL(fixture.separate.get_function<5>()(1,2,3,4,5), fun(1,2,3,4,5)); EXPECT_EQUAL(fixture.separate.get_function<5>()(5,4,3,2,1), fun(5,4,3,2,1)); - double interpret_time = fixture.interpreted.estimate_cost_us(test_params, budget); - fprintf(stderr, "interpret: %g us\n", interpret_time); double interpret_simple_time = fixture.interpreted_simple.estimate_cost_us(test_params, budget); fprintf(stderr, "interpret (simple): %g us\n", interpret_simple_time); + double interpret_time = fixture.interpreted.estimate_cost_us(test_params, budget); + fprintf(stderr, "interpret: %g us\n", interpret_time); double jit_time = estimate_cost_us(test_params, fixture.separate.get_function<5>()); fprintf(stderr, "jit compiled: %g us\n", jit_time); double gcc_time = estimate_cost_us(test_params, fun); fprintf(stderr, "gcc compiled: %g us\n", gcc_time); - double simple_vs_default_speed = (1.0/interpret_simple_time)/(1.0/interpret_time); + double default_vs_simple_speed = (1.0/interpret_time)/(1.0/interpret_simple_time); double jit_vs_interpret_speed = (1.0/jit_time)/(1.0/interpret_time); double gcc_vs_jit_speed = (1.0/gcc_time)/(1.0/jit_time); - fprintf(stderr, "simple vs default interpret speed: %g\n", simple_vs_default_speed); + fprintf(stderr, "default typed vs simple untyped interpret speed: %g\n", default_vs_simple_speed); fprintf(stderr, "jit speed compared to interpret: %g\n", jit_vs_interpret_speed); fprintf(stderr, "gcc speed compared to jit: %g\n", gcc_vs_jit_speed); @@ -110,18 +111,18 @@ TEST("measure big function eval/jit/gcc speed") { EXPECT_EQUAL(fixture.separate.get_function<5>()(1,2,3,4,5), fun(1,2,3,4,5)); EXPECT_EQUAL(fixture.separate.get_function<5>()(5,4,3,2,1), fun(5,4,3,2,1)); + double interpret_simple_time = fixture.interpreted_simple.estimate_cost_us(test_params, budget); + fprintf(stderr, "interpret (simple): %g us\n", interpret_simple_time); double interpret_time = fixture.interpreted.estimate_cost_us(test_params, budget); fprintf(stderr, "interpret: %g us\n", interpret_time); - double interpret_simple_time = fixture.interpreted_simple.estimate_cost_us(test_params, budget); - fprintf(stderr, "interpret (simple): %g us\n", interpret_time); double jit_time = estimate_cost_us(test_params, fixture.separate.get_function<5>()); fprintf(stderr, "jit compiled: %g us\n", jit_time); double gcc_time = estimate_cost_us(test_params, fun); fprintf(stderr, "gcc compiled: %g us\n", gcc_time); - double simple_vs_default_speed = (1.0/interpret_simple_time)/(1.0/interpret_time); + double default_vs_simple_speed = (1.0/interpret_time)/(1.0/interpret_simple_time); double jit_vs_interpret_speed = (1.0/jit_time)/(1.0/interpret_time); double gcc_vs_jit_speed = (1.0/gcc_time)/(1.0/jit_time); - fprintf(stderr, "simple vs default interpret speed: %g\n", simple_vs_default_speed); + fprintf(stderr, "default typed vs simple untyped interpret speed: %g\n", default_vs_simple_speed); fprintf(stderr, "jit speed compared to interpret: %g\n", jit_vs_interpret_speed); fprintf(stderr, "gcc speed compared to jit: %g\n", gcc_vs_jit_speed); diff --git a/eval/src/tests/eval/interpreted_function/interpreted_function_test.cpp b/eval/src/tests/eval/interpreted_function/interpreted_function_test.cpp index 0e548a3b82e..a443ccb3d01 100644 --- a/eval/src/tests/eval/interpreted_function/interpreted_function_test.cpp +++ b/eval/src/tests/eval/interpreted_function/interpreted_function_test.cpp @@ -51,18 +51,23 @@ struct MyEvalTest : test::EvalSpec::EvalTest { if (is_supported && !has_issues) { vespalib::string desc = as_string(param_names, param_values, expression); InterpretedFunction::SimpleParams params(param_values); - verify_result(SimpleTensorEngine::ref(), function, "[simple] "+desc, params, expected_result); - verify_result(DefaultTensorEngine::ref(), function, " [prod] "+desc, params, expected_result); + verify_result(SimpleTensorEngine::ref(), function, false, "[untyped simple] "+desc, params, expected_result); + verify_result(DefaultTensorEngine::ref(), function, false, "[untyped prod] "+desc, params, expected_result); + verify_result(DefaultTensorEngine::ref(), function, true, "[typed prod] "+desc, params, expected_result); } } void verify_result(const TensorEngine& engine, const Function &function, + bool typed, const vespalib::string &description, const InterpretedFunction::SimpleParams ¶ms, double expected_result) { - InterpretedFunction ifun(engine, function, NodeTypes()); + NodeTypes node_types = typed + ? NodeTypes(function, std::vector<ValueType>(params.params.size(), ValueType::double_type())) + : NodeTypes(); + InterpretedFunction ifun(engine, function, node_types); ASSERT_EQUAL(ifun.num_params(), params.params.size()); InterpretedFunction::Context ictx(ifun); const Value &result_value = ifun.eval(ictx, params); |