From 5f51e1c8028483a0d200e30e42a5f7f55cec38bf Mon Sep 17 00:00:00 2001 From: HÃ¥vard Pettersen Date: Fri, 9 Feb 2018 11:58:40 +0000 Subject: gc deprecated tests --- eval/CMakeLists.txt | 1 - .../interpreted_function_test.cpp | 150 ------------------- .../dense_tensor_function_optimizer/CMakeLists.txt | 8 -- .../tensor/dense_tensor_function_optimizer/FILES | 1 - .../dense_tensor_function_optimizer_test.cpp | 158 --------------------- 5 files changed, 318 deletions(-) delete mode 100644 eval/src/tests/tensor/dense_tensor_function_optimizer/CMakeLists.txt delete mode 100644 eval/src/tests/tensor/dense_tensor_function_optimizer/FILES delete mode 100644 eval/src/tests/tensor/dense_tensor_function_optimizer/dense_tensor_function_optimizer_test.cpp (limited to 'eval') diff --git a/eval/CMakeLists.txt b/eval/CMakeLists.txt index 8378af53098..3e0c0a12a4f 100644 --- a/eval/CMakeLists.txt +++ b/eval/CMakeLists.txt @@ -27,7 +27,6 @@ vespa_define_module( src/tests/tensor/dense_dot_product_function src/tests/tensor/dense_tensor_address_combiner src/tests/tensor/dense_tensor_builder - src/tests/tensor/dense_tensor_function_optimizer src/tests/tensor/dense_xw_product_function src/tests/tensor/vector_from_doubles_function src/tests/tensor/sparse_tensor_builder diff --git a/eval/src/tests/eval/interpreted_function/interpreted_function_test.cpp b/eval/src/tests/eval/interpreted_function/interpreted_function_test.cpp index 6b11e2e7034..714eb870b3e 100644 --- a/eval/src/tests/eval/interpreted_function/interpreted_function_test.cpp +++ b/eval/src/tests/eval/interpreted_function/interpreted_function_test.cpp @@ -154,156 +154,6 @@ TEST("require that basic addition works") { //----------------------------------------------------------------------------- -struct InnerProduct { - const TensorEngine &engine; - Function function; - TensorSpec a; - TensorSpec b; - TensorSpec expect; - NodeTypes types; - InterpretedFunction interpreted; - ~InnerProduct() {} - InnerProduct(const vespalib::string &expr) - : engine(DefaultTensorEngine::ref()), - function(Function::parse({"a", "b"}, expr)), - a("null"), b("null"), expect("null"), - types(), - interpreted(engine, function, types) {} - InnerProduct(const vespalib::string &expr, - TensorSpec a_in, - TensorSpec b_in, - TensorSpec expect_in) - : engine(DefaultTensorEngine::ref()), - function(Function::parse(expr)), - a(a_in), b(b_in), expect(expect_in), - types(function, {ValueType::from_spec(a.type()), ValueType::from_spec(b.type())}), - interpreted(engine, function, types) {} - void verify_optimized() const { - EXPECT_LESS(interpreted.program_size(), 4u); - InterpretedFunction::Context ctx(interpreted); - Value::UP va = engine.from_spec(a); - Value::UP vb = engine.from_spec(b); - SimpleObjectParams params({*va,*vb}); - const Value &result = interpreted.eval(ctx, params); - EXPECT_EQUAL(engine.to_spec(result), expect); - } - void verify_not_optimized() const { - EXPECT_EQUAL(4u, interpreted.program_size()); - } -}; - -struct UntypedIP : InnerProduct { - UntypedIP(const vespalib::string &expr) : InnerProduct(expr) { - a = TensorSpec("double").add({}, 2.0); - b = TensorSpec("double").add({}, 3.0); - expect = TensorSpec("double").add({}, 6.0); - } -}; - -struct DotProduct : InnerProduct { - DotProduct(const vespalib::string &expr) - : InnerProduct(expr, - TensorSpec("tensor(x[3])") - .add({{"x", 0}}, 5.0) - .add({{"x", 1}}, 3.0) - .add({{"x", 2}}, 2.0), - TensorSpec("tensor(x[3])") - .add({{"x", 0}}, 7.0) - .add({{"x", 1}}, 11.0) - .add({{"x", 2}}, 13.0), - TensorSpec("double") - .add({}, (5.0 * 7.0) + (3.0 * 11.0) + (2.0 * 13.0))) {} -}; - -struct XW : InnerProduct { - XW(const vespalib::string &expr) - : InnerProduct(expr, - TensorSpec("tensor(x[2])") - .add({{"x", 0}}, 1.0) - .add({{"x", 1}}, 2.0), - TensorSpec("tensor(x[2],y[3])") - .add({{"y", 0},{"x", 0}}, 3.0) - .add({{"y", 0},{"x", 1}}, 5.0) - .add({{"y", 1},{"x", 0}}, 7.0) - .add({{"y", 1},{"x", 1}}, 11.0) - .add({{"y", 2},{"x", 0}}, 13.0) - .add({{"y", 2},{"x", 1}}, 17.0), - TensorSpec("tensor(y[3])") - .add({{"y", 0}}, (1.0 * 3.0) + (2.0 * 5.0)) - .add({{"y", 1}}, (1.0 * 7.0) + (2.0 * 11.0)) - .add({{"y", 2}}, (1.0 * 13.0) + (2.0 * 17.0))) {} -}; - -struct MatMul : InnerProduct { - MatMul(const vespalib::string &expr) - : InnerProduct(expr, - TensorSpec("tensor(x[2],y[2])") - .add({{"x", 0},{"y", 0}}, 1.0) - .add({{"x", 0},{"y", 1}}, 2.0) - .add({{"x", 1},{"y", 0}}, 3.0) - .add({{"x", 1},{"y", 1}}, 5.0), - TensorSpec("tensor(y[2],z[2])") - .add({{"y", 0},{"z", 0}}, 7.0) - .add({{"y", 0},{"z", 1}}, 11.0) - .add({{"y", 1},{"z", 0}}, 13.0) - .add({{"y", 1},{"z", 1}}, 17.0), - TensorSpec("tensor(x[2],z[2])") - .add({{"x", 0},{"z", 0}}, (1.0 * 7.0) + (2.0 * 13.0)) - .add({{"x", 0},{"z", 1}}, (1.0 * 11.0) + (2.0 * 17.0)) - .add({{"x", 1},{"z", 0}}, (3.0 * 7.0) + (5.0 * 13.0)) - .add({{"x", 1},{"z", 1}}, (3.0 * 11.0) + (5.0 * 17.0))) {} -}; - -TEST("require that inner product is not optimized for unknown types") { - TEST_DO(UntypedIP("reduce(a*b,sum)").verify_not_optimized()); - TEST_DO(UntypedIP("reduce(join(a,b,f(x,y)(x*y)),sum)").verify_not_optimized()); -} - -TEST("require that dot product works with tensor function") { - TEST_DO(DotProduct("reduce(a*b,sum)").verify_optimized()); - TEST_DO(DotProduct("reduce(join(a,b,f(x,y)(x*y)),sum)").verify_optimized()); - TEST_DO(DotProduct("reduce(b*a,sum)").verify_optimized()); - TEST_DO(DotProduct("reduce(join(b,a,f(x,y)(x*y)),sum)").verify_optimized()); - TEST_DO(DotProduct("reduce(join(a,b,f(x,y)(y*x)),sum)").verify_optimized()); - TEST_DO(DotProduct("reduce(join(b,a,f(x,y)(y*x)),sum)").verify_optimized()); - TEST_DO(DotProduct("reduce(a*b,sum,x)").verify_optimized()); - TEST_DO(DotProduct("reduce(join(a,b,f(x,y)(x*y)),sum,x)").verify_optimized()); - TEST_DO(DotProduct("reduce(b*a,sum,x)").verify_optimized()); - TEST_DO(DotProduct("reduce(join(b,a,f(x,y)(x*y)),sum,x)").verify_optimized()); - TEST_DO(DotProduct("reduce(join(a,b,f(x,y)(y*x)),sum,x)").verify_optimized()); - TEST_DO(DotProduct("reduce(join(b,a,f(x,y)(y*x)),sum,x)").verify_optimized()); -} - -TEST("require that vector matrix multiplication works with tensor function") { - TEST_DO(XW("reduce(a*b,sum,x)").verify_optimized()); - TEST_DO(XW("reduce(join(a,b,f(x,y)(x*y)),sum,x)").verify_optimized()); - TEST_DO(XW("reduce(b*a,sum,x)").verify_optimized()); - TEST_DO(XW("reduce(join(b,a,f(x,y)(x*y)),sum,x)").verify_optimized()); - TEST_DO(XW("reduce(join(a,b,f(x,y)(y*x)),sum,x)").verify_optimized()); - TEST_DO(XW("reduce(join(b,a,f(x,y)(y*x)),sum,x)").verify_optimized()); -} - -TEST("require that matrix multiplication is not optimized (yet)") { - TEST_DO(MatMul("reduce(a*b,sum,y)").verify_not_optimized()); - TEST_DO(MatMul("reduce(join(a,b,f(x,y)(x*y)),sum,y)").verify_not_optimized()); - TEST_DO(MatMul("reduce(b*a,sum,y)").verify_not_optimized()); - TEST_DO(MatMul("reduce(join(b,a,f(x,y)(x*y)),sum,y)").verify_not_optimized()); - TEST_DO(MatMul("reduce(join(a,b,f(x,y)(y*x)),sum,y)").verify_not_optimized()); - TEST_DO(MatMul("reduce(join(b,a,f(x,y)(y*x)),sum,y)").verify_not_optimized()); -} - -TEST("require that expressions similar to inner product are not optimized") { - TEST_DO(DotProduct("reduce(a*b,prod)").verify_not_optimized()); - TEST_DO(DotProduct("reduce(a*b,max)").verify_not_optimized()); - TEST_DO(DotProduct("reduce(a+b,sum)").verify_not_optimized()); - TEST_DO(DotProduct("reduce(join(a,b,f(x,y)(x+y)),sum)").verify_not_optimized()); - TEST_DO(DotProduct("reduce(join(a,b,f(x,y)(x*x)),sum)").verify_not_optimized()); - TEST_DO(DotProduct("reduce(join(a,b,f(x,y)(y*y)),sum)").verify_not_optimized()); - TEST_DO(DotProduct("reduce(join(a,b,f(x,y)(x*y*1)),sum)").verify_not_optimized()); -} - -//----------------------------------------------------------------------------- - TEST("require that functions with non-compilable lambdas cannot be interpreted") { auto good_map = Function::parse("map(a,f(x)(x+1))"); auto good_join = Function::parse("join(a,b,f(x,y)(x+y))"); diff --git a/eval/src/tests/tensor/dense_tensor_function_optimizer/CMakeLists.txt b/eval/src/tests/tensor/dense_tensor_function_optimizer/CMakeLists.txt deleted file mode 100644 index 3a95ef776d7..00000000000 --- a/eval/src/tests/tensor/dense_tensor_function_optimizer/CMakeLists.txt +++ /dev/null @@ -1,8 +0,0 @@ -# Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. -vespa_add_executable(eval_dense_tensor_function_optimizer_test_app TEST - SOURCES - dense_tensor_function_optimizer_test.cpp - DEPENDS - vespaeval -) -vespa_add_test(NAME eval_dense_tensor_function_optimizer_test_app COMMAND eval_dense_tensor_function_optimizer_test_app) diff --git a/eval/src/tests/tensor/dense_tensor_function_optimizer/FILES b/eval/src/tests/tensor/dense_tensor_function_optimizer/FILES deleted file mode 100644 index 3c4ec2f1753..00000000000 --- a/eval/src/tests/tensor/dense_tensor_function_optimizer/FILES +++ /dev/null @@ -1 +0,0 @@ -dense_tensor_function_compiler_test.cpp diff --git a/eval/src/tests/tensor/dense_tensor_function_optimizer/dense_tensor_function_optimizer_test.cpp b/eval/src/tests/tensor/dense_tensor_function_optimizer/dense_tensor_function_optimizer_test.cpp deleted file mode 100644 index 269a1e265c5..00000000000 --- a/eval/src/tests/tensor/dense_tensor_function_optimizer/dense_tensor_function_optimizer_test.cpp +++ /dev/null @@ -1,158 +0,0 @@ -// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. - -#include -#include -#include -#include - -using namespace vespalib::eval; -using namespace vespalib::eval::operation; -using namespace vespalib::eval::tensor_function; -using namespace vespalib::tensor; -using vespalib::Stash; - -//----------------------------------------------------------------------------- - -const TensorFunction & -optimizeDotProduct(const vespalib::string &lhsType, - const vespalib::string &rhsType, - Stash &stash) -{ - const Node &reduceNode = reduce(join(inject(ValueType::from_spec(lhsType), 1, stash), - inject(ValueType::from_spec(rhsType), 3, stash), - Mul::f, stash), - Aggr::SUM, {}, stash); - return DenseDotProductFunction::optimize(reduceNode, stash); -} - -void assertParam(const TensorFunction &node, size_t expect_idx) { - auto inject = as(node); - ASSERT_TRUE(inject); - EXPECT_EQUAL(inject->param_idx(), expect_idx); -} - -void -assertOptimizedDotProduct(const vespalib::string &lhsType, - const vespalib::string &rhsType) -{ - Stash stash; - const TensorFunction &func = optimizeDotProduct(lhsType, rhsType, stash); - const DenseDotProductFunction *dotProduct = as(func); - ASSERT_TRUE(dotProduct); - TEST_DO(assertParam(dotProduct->lhs(), 1)); - TEST_DO(assertParam(dotProduct->rhs(), 3)); -} - -void -assertNotOptimizedDotProduct(const vespalib::string &lhsType, - const vespalib::string &rhsType) -{ - Stash stash; - const TensorFunction &func = optimizeDotProduct(lhsType, rhsType, stash); - const Reduce *reduce = as(func); - EXPECT_TRUE(reduce); -} - -//----------------------------------------------------------------------------- - -const TensorFunction & -optimizeXWProduct(const vespalib::string &lhsType, - const vespalib::string &rhsType, - const vespalib::string &dim, - Stash &stash) -{ - const Node &reduceNode = reduce(join(inject(ValueType::from_spec(lhsType), 1, stash), - inject(ValueType::from_spec(rhsType), 3, stash), - Mul::f, stash), - Aggr::SUM, {dim}, stash); - return DenseXWProductFunction::optimize(reduceNode, stash); -} - -void -assertOptimizedXWProduct(const vespalib::string &vecTypeStr, - const vespalib::string &matTypeStr, - const vespalib::string &dim) -{ - Stash stash; - const TensorFunction &func = optimizeXWProduct(vecTypeStr, matTypeStr, dim, stash); - const TensorFunction &inv_func = optimizeXWProduct(matTypeStr, vecTypeStr, dim, stash); - const DenseXWProductFunction *xwProduct = as(func); - const DenseXWProductFunction *inv_xwProduct = as(inv_func); - ValueType vecType = ValueType::from_spec(vecTypeStr); - ValueType matType = ValueType::from_spec(matTypeStr); - size_t common_idx = matType.dimension_index(vecType.dimensions()[0].name); - ASSERT_TRUE(xwProduct); - ASSERT_TRUE(inv_xwProduct); - ASSERT_TRUE(common_idx != ValueType::Dimension::npos); - TEST_DO(assertParam(xwProduct->lhs(), 1)); - TEST_DO(assertParam(inv_xwProduct->lhs(), 3)); - TEST_DO(assertParam(xwProduct->rhs(), 3)); - TEST_DO(assertParam(inv_xwProduct->rhs(), 1)); - EXPECT_EQUAL(xwProduct->vectorSize(), vecType.dimensions()[0].size); - EXPECT_EQUAL(inv_xwProduct->vectorSize(), vecType.dimensions()[0].size); - EXPECT_EQUAL(xwProduct->resultSize(), matType.dimensions()[1 - common_idx].size); - EXPECT_EQUAL(inv_xwProduct->resultSize(), matType.dimensions()[1 - common_idx].size); - EXPECT_EQUAL(xwProduct->matrixHasCommonDimensionInnermost(), (common_idx == 1)); - EXPECT_EQUAL(inv_xwProduct->matrixHasCommonDimensionInnermost(), (common_idx == 1)); -} - -void -assertNotOptimizedXWProduct(const vespalib::string &vecType, - const vespalib::string &matType, - const vespalib::string &dim) -{ - Stash stash; - const TensorFunction &func = optimizeXWProduct(vecType, matType, dim, stash); - const TensorFunction &inv_func = optimizeXWProduct(matType, vecType, dim, stash); - const Reduce *reduce = as(func); - const Reduce *inv_reduce = as(inv_func); - EXPECT_TRUE(reduce); - EXPECT_TRUE(inv_reduce); -} - -//----------------------------------------------------------------------------- - -TEST("require that dot product with compatible dimensions is optimized") -{ - TEST_DO(assertOptimizedDotProduct("tensor(x[5])", "tensor(x[5])")); - TEST_DO(assertOptimizedDotProduct("tensor(x[3])", "tensor(x[5])")); - TEST_DO(assertOptimizedDotProduct("tensor(x[5])", "tensor(x[3])")); - TEST_DO(assertOptimizedDotProduct("tensor(x[])", "tensor(x[5])")); - TEST_DO(assertOptimizedDotProduct("tensor(x[5])", "tensor(x[])")); - TEST_DO(assertOptimizedDotProduct("tensor(x[])", "tensor(x[])")); -} - -TEST("require that dot product with incompatible dimensions is NOT optimized") -{ - TEST_DO(assertNotOptimizedDotProduct("tensor(x[5])", "tensor(y[5])")); - TEST_DO(assertNotOptimizedDotProduct("tensor(y[5])", "tensor(x[5])")); - TEST_DO(assertNotOptimizedDotProduct("tensor(y[])", "tensor(x[])")); - TEST_DO(assertNotOptimizedDotProduct("tensor(x[5])", "tensor(x[5],y[7])")); - TEST_DO(assertNotOptimizedDotProduct("tensor(x[5],y[7])", "tensor(x[5],y[7])")); -} - -//----------------------------------------------------------------------------- - -TEST("require that xw products with compatible dimensions are optimized") { - TEST_DO(assertOptimizedXWProduct("tensor(x[3])", "tensor(x[3],y[4])", "x")); - TEST_DO(assertOptimizedXWProduct("tensor(y[4])", "tensor(x[3],y[4])", "y")); -} - -TEST("require that xw products with incompatible dimensions are not optimized") { - TEST_DO(assertNotOptimizedXWProduct("tensor(x[3])", "tensor(x[3],y[4])", "y")); - TEST_DO(assertNotOptimizedXWProduct("tensor(x[])", "tensor(x[3],y[4])", "x")); - TEST_DO(assertNotOptimizedXWProduct("tensor(x[3])", "tensor(x[],y[4])", "x")); - TEST_DO(assertNotOptimizedXWProduct("tensor(x[3])", "tensor(x[3],y[])", "x")); - TEST_DO(assertNotOptimizedXWProduct("tensor(x[2])", "tensor(x[3],y[4])", "x")); - TEST_DO(assertNotOptimizedXWProduct("tensor(x[4])", "tensor(x[3],y[4])", "x")); - TEST_DO(assertNotOptimizedXWProduct("tensor(x[3])", "tensor(y[3],z[4])", "x")); - TEST_DO(assertNotOptimizedXWProduct("tensor(x[3])", "tensor(y[3],z[4])", "y")); - TEST_DO(assertNotOptimizedXWProduct("tensor(x[3])", "tensor(y[3],z[4])", "z")); - TEST_DO(assertNotOptimizedXWProduct("tensor(y[4])", "tensor(x[3],y[4])", "x")); - TEST_DO(assertNotOptimizedXWProduct("tensor(y[3])", "tensor(x[3],y[4])", "y")); - TEST_DO(assertNotOptimizedXWProduct("tensor(y[5])", "tensor(x[3],y[4])", "y")); -} - -//----------------------------------------------------------------------------- - -TEST_MAIN() { TEST_RUN_ALL(); } -- cgit v1.2.3