summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--eval/src/tests/eval/interpreted_function/interpreted_function_test.cpp201
-rw-r--r--eval/src/vespa/eval/eval/interpreted_function.cpp29
2 files changed, 160 insertions, 70 deletions
diff --git a/eval/src/tests/eval/interpreted_function/interpreted_function_test.cpp b/eval/src/tests/eval/interpreted_function/interpreted_function_test.cpp
index 76f776df552..29bff7fbd69 100644
--- a/eval/src/tests/eval/interpreted_function/interpreted_function_test.cpp
+++ b/eval/src/tests/eval/interpreted_function/interpreted_function_test.cpp
@@ -151,73 +151,152 @@ TEST("require that basic addition works") {
//-----------------------------------------------------------------------------
-TEST("require that dot product like expression is not optimized for unknown types") {
- const TensorEngine &engine = SimpleTensorEngine::ref();
- Function function = Function::parse("reduce(a*b,sum)");
- DoubleValue a(2.0);
- DoubleValue b(3.0);
- double expect = (2.0 * 3.0);
- InterpretedFunction interpreted(engine, function, NodeTypes());
- EXPECT_EQUAL(4u, interpreted.program_size());
- InterpretedFunction::Context ctx(interpreted);
- InterpretedFunction::SimpleObjectParams params({a,b});
- const Value &result = interpreted.eval(ctx, params);
- EXPECT_TRUE(result.is_double());
- EXPECT_EQUAL(expect, result.as_double());
+struct InnerProduct {
+ const TensorEngine &engine;
+ Function function;
+ TensorSpec a;
+ TensorSpec b;
+ TensorSpec expect;
+ NodeTypes types;
+ InterpretedFunction interpreted;
+ ~InnerProduct() {}
+ InnerProduct(const vespalib::string &expr)
+ : engine(SimpleTensorEngine::ref()),
+ function(Function::parse({"a", "b"}, expr)),
+ a("null"), b("null"), expect("null"),
+ types(),
+ interpreted(engine, function, types) {}
+ InnerProduct(const vespalib::string &expr,
+ TensorSpec a_in,
+ TensorSpec b_in,
+ TensorSpec expect_in)
+ : engine(SimpleTensorEngine::ref()),
+ function(Function::parse(expr)),
+ a(a_in), b(b_in), expect(expect_in),
+ types(function, {ValueType::from_spec(a.type()), ValueType::from_spec(a.type())}),
+ interpreted(engine, function, types) {}
+ void verify_optimized() const {
+ EXPECT_EQUAL(1u, interpreted.program_size());
+ InterpretedFunction::Context ctx(interpreted);
+ Value::UP va = engine.from_spec(a);
+ Value::UP vb = engine.from_spec(b);
+ InterpretedFunction::SimpleObjectParams params({*va,*vb});
+ const Value &result = interpreted.eval(ctx, params);
+ EXPECT_EQUAL(engine.to_spec(result), expect);
+ }
+ void verify_not_optimized() const {
+ EXPECT_EQUAL(4u, interpreted.program_size());
+ }
+};
+
+struct UntypedIP : InnerProduct {
+ UntypedIP(const vespalib::string &expr) : InnerProduct(expr) {
+ a = TensorSpec("double").add({}, 2.0);
+ b = TensorSpec("double").add({}, 3.0);
+ expect = TensorSpec("double").add({}, 6.0);
+ }
+};
+
+struct DotProduct : InnerProduct {
+ DotProduct(const vespalib::string &expr)
+ : InnerProduct(expr,
+ TensorSpec("tensor(x[3])")
+ .add({{"x", 0}}, 5.0)
+ .add({{"x", 1}}, 3.0)
+ .add({{"x", 2}}, 2.0),
+ TensorSpec("tensor(x[3])")
+ .add({{"x", 0}}, 7.0)
+ .add({{"x", 1}}, 11.0)
+ .add({{"x", 2}}, 13.0),
+ TensorSpec("double")
+ .add({}, (5.0 * 7.0) + (3.0 * 11.0) + (2.0 * 13.0))) {}
+};
+
+struct XW : InnerProduct {
+ XW(const vespalib::string &expr)
+ : InnerProduct(expr,
+ TensorSpec("tensor(x[2])")
+ .add({{"x", 0}}, 1.0)
+ .add({{"x", 1}}, 2.0),
+ TensorSpec("tensor(x[2],y[3])")
+ .add({{"y", 0},{"x", 0}}, 3.0)
+ .add({{"y", 0},{"x", 1}}, 5.0)
+ .add({{"y", 1},{"x", 0}}, 7.0)
+ .add({{"y", 1},{"x", 1}}, 11.0)
+ .add({{"y", 2},{"x", 0}}, 13.0)
+ .add({{"y", 2},{"x", 1}}, 17.0),
+ TensorSpec("tensor(y[3])")
+ .add({{"y", 0}}, (1.0 * 3.0) + (2.0 * 5.0))
+ .add({{"y", 1}}, (1.0 * 7.0) + (2.0 * 11.0))
+ .add({{"y", 2}}, (1.0 * 13.0) + (2.0 * 17.0))) {}
+};
+
+struct MatMul : InnerProduct {
+ MatMul(const vespalib::string &expr)
+ : InnerProduct(expr,
+ TensorSpec("tensor(x[2],y[2])")
+ .add({{"x", 0},{"y", 0}}, 1.0)
+ .add({{"x", 0},{"y", 1}}, 2.0)
+ .add({{"x", 1},{"y", 0}}, 3.0)
+ .add({{"x", 1},{"y", 1}}, 5.0),
+ TensorSpec("tensor(y[2],z[2])")
+ .add({{"y", 0},{"z", 0}}, 7.0)
+ .add({{"y", 0},{"z", 1}}, 11.0)
+ .add({{"y", 1},{"z", 0}}, 13.0)
+ .add({{"y", 1},{"z", 1}}, 17.0),
+ TensorSpec("tensor(x[2],z[2])")
+ .add({{"x", 0},{"z", 0}}, (1.0 * 7.0) + (2.0 * 13.0))
+ .add({{"x", 0},{"z", 1}}, (1.0 * 11.0) + (2.0 * 17.0))
+ .add({{"x", 1},{"z", 0}}, (3.0 * 7.0) + (5.0 * 13.0))
+ .add({{"x", 1},{"z", 1}}, (3.0 * 11.0) + (5.0 * 17.0))) {}
+};
+
+TEST("require that inner product is not optimized for unknown types") {
+ TEST_DO(UntypedIP("reduce(a*b,sum)").verify_not_optimized());
+ TEST_DO(UntypedIP("reduce(join(a,b,f(x,y)(x*y)),sum)").verify_not_optimized());
}
TEST("require that dot product works with tensor function") {
- const TensorEngine &engine = SimpleTensorEngine::ref();
- Function function = Function::parse("reduce(a*b,sum)");
- auto a = TensorSpec("tensor(x[3])")
- .add({{"x", 0}}, 5.0)
- .add({{"x", 1}}, 3.0)
- .add({{"x", 2}}, 2.0);
- auto b = TensorSpec("tensor(x[3])")
- .add({{"x", 0}}, 7.0)
- .add({{"x", 1}}, 11.0)
- .add({{"x", 2}}, 13.0);
- double expect = ((5.0 * 7.0) + (3.0 * 11.0) + (2.0 * 13.0));
- NodeTypes types(function, {ValueType::from_spec(a.type()), ValueType::from_spec(a.type())});
- InterpretedFunction interpreted(engine, function, types);
- EXPECT_EQUAL(1u, interpreted.program_size());
- InterpretedFunction::Context ctx(interpreted);
- Value::UP va = engine.from_spec(a);
- Value::UP vb = engine.from_spec(b);
- InterpretedFunction::SimpleObjectParams params({*va,*vb});
- const Value &result = interpreted.eval(ctx, params);
- EXPECT_TRUE(result.is_double());
- EXPECT_EQUAL(expect, result.as_double());
+ TEST_DO(DotProduct("reduce(a*b,sum)").verify_optimized());
+ TEST_DO(DotProduct("reduce(join(a,b,f(x,y)(x*y)),sum)").verify_optimized());
+ TEST_DO(DotProduct("reduce(b*a,sum)").verify_optimized());
+ TEST_DO(DotProduct("reduce(join(b,a,f(x,y)(x*y)),sum)").verify_optimized());
+ TEST_DO(DotProduct("reduce(join(a,b,f(x,y)(y*x)),sum)").verify_optimized());
+ TEST_DO(DotProduct("reduce(join(b,a,f(x,y)(y*x)),sum)").verify_optimized());
+ TEST_DO(DotProduct("reduce(a*b,sum,x)").verify_optimized());
+ TEST_DO(DotProduct("reduce(join(a,b,f(x,y)(x*y)),sum,x)").verify_optimized());
+ TEST_DO(DotProduct("reduce(b*a,sum,x)").verify_optimized());
+ TEST_DO(DotProduct("reduce(join(b,a,f(x,y)(x*y)),sum,x)").verify_optimized());
+ TEST_DO(DotProduct("reduce(join(a,b,f(x,y)(y*x)),sum,x)").verify_optimized());
+ TEST_DO(DotProduct("reduce(join(b,a,f(x,y)(y*x)),sum,x)").verify_optimized());
+}
+
+TEST("require that vector matrix multiplication works with tensor function") {
+ TEST_DO(XW("reduce(a*b,sum,x)").verify_optimized());
+ TEST_DO(XW("reduce(join(a,b,f(x,y)(x*y)),sum,x)").verify_optimized());
+ TEST_DO(XW("reduce(b*a,sum,x)").verify_optimized());
+ TEST_DO(XW("reduce(join(b,a,f(x,y)(x*y)),sum,x)").verify_optimized());
+ TEST_DO(XW("reduce(join(a,b,f(x,y)(y*x)),sum,x)").verify_optimized());
+ TEST_DO(XW("reduce(join(b,a,f(x,y)(y*x)),sum,x)").verify_optimized());
}
TEST("require that matrix multiplication works with tensor function") {
- const TensorEngine &engine = SimpleTensorEngine::ref();
- Function function = Function::parse("reduce(a*b,sum,y)");
- auto a = TensorSpec("tensor(x[2],y[2])")
- .add({{"x", 0},{"y", 0}}, 1.0)
- .add({{"x", 0},{"y", 1}}, 2.0)
- .add({{"x", 1},{"y", 0}}, 3.0)
- .add({{"x", 1},{"y", 1}}, 5.0);
- auto b = TensorSpec("tensor(y[2],z[2])")
- .add({{"y", 0},{"z", 0}}, 7.0)
- .add({{"y", 0},{"z", 1}}, 11.0)
- .add({{"y", 1},{"z", 0}}, 13.0)
- .add({{"y", 1},{"z", 1}}, 17.0);
- auto expect = TensorSpec("tensor(x[2],z[2])")
- .add({{"x", 0},{"z", 0}}, (1.0 * 7.0) + (2.0 * 13.0))
- .add({{"x", 0},{"z", 1}}, (1.0 * 11.0) + (2.0 * 17.0))
- .add({{"x", 1},{"z", 0}}, (3.0 * 7.0) + (5.0 * 13.0))
- .add({{"x", 1},{"z", 1}}, (3.0 * 11.0) + (5.0 * 17.0));
- NodeTypes types(function, {ValueType::from_spec(a.type()), ValueType::from_spec(a.type())});
- InterpretedFunction interpreted(engine, function, types);
- EXPECT_EQUAL(1u, interpreted.program_size());
- InterpretedFunction::Context ctx(interpreted);
- Value::UP va = engine.from_spec(a);
- Value::UP vb = engine.from_spec(b);
- InterpretedFunction::SimpleObjectParams params({*va,*vb});
- const Value &result = interpreted.eval(ctx, params);
- ASSERT_TRUE(result.is_tensor());
- EXPECT_EQUAL(expect, engine.to_spec(result));
+ TEST_DO(MatMul("reduce(a*b,sum,y)").verify_optimized());
+ TEST_DO(MatMul("reduce(join(a,b,f(x,y)(x*y)),sum,y)").verify_optimized());
+ TEST_DO(MatMul("reduce(b*a,sum,y)").verify_optimized());
+ TEST_DO(MatMul("reduce(join(b,a,f(x,y)(x*y)),sum,y)").verify_optimized());
+ TEST_DO(MatMul("reduce(join(a,b,f(x,y)(y*x)),sum,y)").verify_optimized());
+ TEST_DO(MatMul("reduce(join(b,a,f(x,y)(y*x)),sum,y)").verify_optimized());
+}
+
+TEST("require that expressions similar to inner product are not optimized") {
+ TEST_DO(DotProduct("reduce(a*b,prod)").verify_not_optimized());
+ TEST_DO(DotProduct("reduce(a*b,max)").verify_not_optimized());
+ TEST_DO(DotProduct("reduce(a+b,sum)").verify_not_optimized());
+ TEST_DO(DotProduct("reduce(join(a,b,f(x,y)(x+y)),sum)").verify_not_optimized());
+ TEST_DO(DotProduct("reduce(join(a,b,f(x,y)(x*x)),sum)").verify_not_optimized());
+ TEST_DO(DotProduct("reduce(join(a,b,f(x,y)(y*y)),sum)").verify_not_optimized());
+ TEST_DO(DotProduct("reduce(join(a,b,f(x,y)(x*y*1)),sum)").verify_not_optimized());
}
//-----------------------------------------------------------------------------
diff --git a/eval/src/vespa/eval/eval/interpreted_function.cpp b/eval/src/vespa/eval/eval/interpreted_function.cpp
index cfe989e95f8..ada942767fa 100644
--- a/eval/src/vespa/eval/eval/interpreted_function.cpp
+++ b/eval/src/vespa/eval/eval/interpreted_function.cpp
@@ -161,25 +161,36 @@ struct ProgramBuilder : public NodeVisitor, public NodeTraverser {
//-------------------------------------------------------------------------
+ bool is_mul_join(const Node &node) const {
+ if (auto join = as<TensorJoin>(node)) {
+ if (auto mul = as<Mul>(join->lambda().root())) {
+ auto sym1 = as<Symbol>(mul->lhs());
+ auto sym2 = as<Symbol>(mul->rhs());
+ return (sym1 && sym2 && (sym1->id() != sym2->id()));
+ }
+ }
+ return false;
+ }
+
+ bool is_mul(const Node &node) const {
+ auto mul = as<Mul>(node);
+ return (mul || is_mul_join(node));
+ }
+
bool is_typed_tensor(const Node &node) const {
const ValueType &type = types.get_type(node);
return (type.is_tensor() && !type.dimensions().empty());
}
- bool is_typed(const Node &node) const {
- return (types.get_type(node).is_double() || is_typed_tensor(node));
- }
-
bool is_typed_tensor_param(const Node &node) const {
auto sym = as<Symbol>(node);
return (sym && is_typed_tensor(node));
}
bool is_typed_tensor_product_of_params(const Node &node) const {
- auto mul = as<Mul>(node);
- return (mul && is_typed_tensor(*mul) &&
- is_typed_tensor_param(mul->lhs()) &&
- is_typed_tensor_param(mul->rhs()));
+ return (is_typed_tensor(node) && is_mul(node) &&
+ is_typed_tensor_param(node.get_child(0)) &&
+ is_typed_tensor_param(node.get_child(1)));
}
//-------------------------------------------------------------------------
@@ -260,7 +271,7 @@ struct ProgramBuilder : public NodeVisitor, public NodeTraverser {
make_join_op(node, token.get()->get().get_function<2>());
}
void visit(const TensorReduce &node) override {
- if ((node.aggr() == Aggr::SUM) && is_typed(node) && is_typed_tensor_product_of_params(node.get_child(0))) {
+ if ((node.aggr() == Aggr::SUM) && is_typed_tensor_product_of_params(node.get_child(0))) {
assert(program.size() >= 3); // load,load,mul
program.pop_back(); // mul
program.pop_back(); // load