summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorArne H Juul <arnej27959@users.noreply.github.com>2017-12-01 11:45:58 +0100
committerGitHub <noreply@github.com>2017-12-01 11:45:58 +0100
commiteb1ea3a78e8d9b9c4d0bcbebe9bba0481a045168 (patch)
tree44f6eb09b4b8146ca56ee2f2005b239a3fe4cee1
parent70b409e9ffdf81de731095465e9f68b8eb5a34fc (diff)
parent133a4c7b51a33e329d8805adf3028afdc888eabe (diff)
Merge pull request #4327 from vespa-engine/havardpe/recognize-and-replace-xw-product
replace appropriate sub-expressions with dense xw product
-rw-r--r--eval/src/apps/tensor_conformance/generate.cpp11
-rw-r--r--eval/src/apps/tensor_conformance/test_spec.json4
-rw-r--r--eval/src/tests/tensor/dense_tensor_function_compiler/dense_tensor_function_compiler_test.cpp87
-rw-r--r--eval/src/vespa/eval/eval/tensor_function.h14
-rw-r--r--eval/src/vespa/eval/tensor/dense/dense_tensor_function_compiler.cpp68
5 files changed, 152 insertions, 32 deletions
diff --git a/eval/src/apps/tensor_conformance/generate.cpp b/eval/src/apps/tensor_conformance/generate.cpp
index 0aba5276ace..f70c472cbcd 100644
--- a/eval/src/apps/tensor_conformance/generate.cpp
+++ b/eval/src/apps/tensor_conformance/generate.cpp
@@ -169,6 +169,16 @@ void generate_dot_product(TestBuilder &dst) {
//-----------------------------------------------------------------------------
+void generate_xw_product(TestBuilder &dst) {
+ auto matrix = spec({x(2),y(3)}, Seq({ 3, 5, 7, 11, 13, 17 }));
+ dst.add("reduce(a*b,sum,x)", {{"a", spec(x(2), Seq({ 1, 2 }))}, {"b", matrix}},
+ spec(y(3), Seq({(1*3+2*11),(1*5+2*13),(1*7+2*17)})));
+ dst.add("reduce(a*b,sum,y)", {{"a", spec(y(3), Seq({ 1, 2, 3 }))}, {"b", matrix}},
+ spec(x(2), Seq({(1*3+2*5+3*7),(1*11+2*13+3*17)})));
+}
+
+//-----------------------------------------------------------------------------
+
void generate_tensor_concat(TestBuilder &dst) {
dst.add("concat(a,b,x)", {{"a", spec(10.0)}, {"b", spec(20.0)}}, spec(x(2), Seq({10.0, 20.0})));
dst.add("concat(a,b,x)", {{"a", spec(x(1), Seq({10.0}))}, {"b", spec(20.0)}}, spec(x(2), Seq({10.0, 20.0})));
@@ -218,6 +228,7 @@ Generator::generate(TestBuilder &dst)
generate_tensor_map(dst);
generate_tensor_join(dst);
generate_dot_product(dst);
+ generate_xw_product(dst);
generate_tensor_concat(dst);
generate_tensor_rename(dst);
generate_tensor_lambda(dst);
diff --git a/eval/src/apps/tensor_conformance/test_spec.json b/eval/src/apps/tensor_conformance/test_spec.json
index 24edc9a7ac7..513d5e8e902 100644
--- a/eval/src/apps/tensor_conformance/test_spec.json
+++ b/eval/src/apps/tensor_conformance/test_spec.json
@@ -1225,6 +1225,8 @@
{"expression":"reduce(a*b,sum)","inputs":{"a":"0x0201017803400000000000000040080000000000004014000000000000","b":"0x0201017803401C0000000000004026000000000000402A000000000000"},"result":{"expect":"0x0200405C000000000000"}}
{"expression":"reduce(a*b,sum)","inputs":{"a":"0x020101780240000000000000004008000000000000","b":"0x0201017803401C0000000000004026000000000000402A000000000000"},"result":{"expect":"0x02004047800000000000"}}
{"expression":"reduce(a*b,sum)","inputs":{"a":"0x0201017803400000000000000040080000000000004014000000000000","b":"0x0201017802401C0000000000004026000000000000"},"result":{"expect":"0x02004047800000000000"}}
+{"expression":"reduce(a*b,sum,x)","inputs":{"a":"0x02010178023FF00000000000004000000000000000","b":"0x020201780201790340080000000000004014000000000000401C0000000000004026000000000000402A0000000000004031000000000000"},"result":{"expect":"0x02010179034039000000000000403F0000000000004044800000000000"}}
+{"expression":"reduce(a*b,sum,y)","inputs":{"a":"0x02010179033FF000000000000040000000000000004008000000000000","b":"0x020201780201790340080000000000004014000000000000401C0000000000004026000000000000402A0000000000004031000000000000"},"result":{"expect":"0x020101780240410000000000004056000000000000"}}
{"expression":"concat(a,b,x)","inputs":{"a":"0x02004024000000000000","b":"0x02004034000000000000"},"result":{"expect":"0x020101780240240000000000004034000000000000"}}
{"expression":"concat(a,b,x)","inputs":{"a":"0x02010178014024000000000000","b":"0x02004034000000000000"},"result":{"expect":"0x020101780240240000000000004034000000000000"}}
{"expression":"concat(a,b,x)","inputs":{"a":"0x02004024000000000000","b":"0x02010178014034000000000000"},"result":{"expect":"0x020101780240240000000000004034000000000000"}}
@@ -1242,4 +1244,4 @@
{"expression":"tensor(x[10])(x+1)","inputs":{},"result":{"expect":"0x020101780A3FF000000000000040000000000000004008000000000000401000000000000040140000000000004018000000000000401C000000000000402000000000000040220000000000004024000000000000"}}
{"expression":"tensor(x[5],y[4])(x*4+(y+1))","inputs":{},"result":{"expect":"0x02020178050179043FF000000000000040000000000000004008000000000000401000000000000040140000000000004018000000000000401C00000000000040200000000000004022000000000000402400000000000040260000000000004028000000000000402A000000000000402C000000000000402E00000000000040300000000000004031000000000000403200000000000040330000000000004034000000000000"}}
{"expression":"tensor(x[5],y[4])(x==y)","inputs":{},"result":{"expect":"0x02020178050179043FF000000000000000000000000000000000000000000000000000000000000000000000000000003FF000000000000000000000000000000000000000000000000000000000000000000000000000003FF000000000000000000000000000000000000000000000000000000000000000000000000000003FF00000000000000000000000000000000000000000000000000000000000000000000000000000"}}
-{"num_tests":1244}
+{"num_tests":1246}
diff --git a/eval/src/tests/tensor/dense_tensor_function_compiler/dense_tensor_function_compiler_test.cpp b/eval/src/tests/tensor/dense_tensor_function_compiler/dense_tensor_function_compiler_test.cpp
index 63829650cc5..7df436d85a1 100644
--- a/eval/src/tests/tensor/dense_tensor_function_compiler/dense_tensor_function_compiler_test.cpp
+++ b/eval/src/tests/tensor/dense_tensor_function_compiler/dense_tensor_function_compiler_test.cpp
@@ -2,6 +2,7 @@
#include <vespa/vespalib/testkit/test_kit.h>
#include <vespa/eval/tensor/dense/dense_dot_product_function.h>
+#include <vespa/eval/tensor/dense/dense_xw_product_function.h>
#include <vespa/eval/tensor/dense/dense_tensor_function_compiler.h>
#include <vespa/eval/eval/operation.h>
@@ -11,8 +12,7 @@ using namespace vespalib::eval::tensor_function;
using namespace vespalib::tensor;
using vespalib::Stash;
-template <typename T>
-const T *as(const TensorFunction &function) { return dynamic_cast<const T *>(&function); }
+//-----------------------------------------------------------------------------
const TensorFunction &
compileDotProduct(const vespalib::string &lhsType,
@@ -48,6 +48,65 @@ assertNotCompiledDotProduct(const vespalib::string &lhsType,
EXPECT_TRUE(reduce);
}
+//-----------------------------------------------------------------------------
+
+const TensorFunction &
+compileXWProduct(const vespalib::string &lhsType,
+ const vespalib::string &rhsType,
+ const vespalib::string &dim,
+ Stash &stash)
+{
+ const Node &reduceNode = reduce(join(inject(ValueType::from_spec(lhsType), 1, stash),
+ inject(ValueType::from_spec(rhsType), 3, stash),
+ Mul::f, stash),
+ Aggr::SUM, {dim}, stash);
+ return DenseTensorFunctionCompiler::compile(reduceNode, stash);
+}
+
+void
+assertCompiledXWProduct(const vespalib::string &vecTypeStr,
+ const vespalib::string &matTypeStr,
+ const vespalib::string &dim)
+{
+ Stash stash;
+ const TensorFunction &func = compileXWProduct(vecTypeStr, matTypeStr, dim, stash);
+ const TensorFunction &inv_func = compileXWProduct(matTypeStr, vecTypeStr, dim, stash);
+ const DenseXWProductFunction *xwProduct = as<DenseXWProductFunction>(func);
+ const DenseXWProductFunction *inv_xwProduct = as<DenseXWProductFunction>(inv_func);
+ ValueType vecType = ValueType::from_spec(vecTypeStr);
+ ValueType matType = ValueType::from_spec(matTypeStr);
+ size_t common_idx = matType.dimension_index(vecType.dimensions()[0].name);
+ ASSERT_TRUE(xwProduct);
+ ASSERT_TRUE(inv_xwProduct);
+ ASSERT_TRUE(common_idx != ValueType::Dimension::npos);
+ EXPECT_EQUAL(xwProduct->vectorId(), 1u);
+ EXPECT_EQUAL(inv_xwProduct->vectorId(), 3u);
+ EXPECT_EQUAL(xwProduct->matrixId(), 3u);
+ EXPECT_EQUAL(inv_xwProduct->matrixId(), 1u);
+ EXPECT_EQUAL(xwProduct->vectorSize(), vecType.dimensions()[0].size);
+ EXPECT_EQUAL(inv_xwProduct->vectorSize(), vecType.dimensions()[0].size);
+ EXPECT_EQUAL(xwProduct->resultSize(), matType.dimensions()[1 - common_idx].size);
+ EXPECT_EQUAL(inv_xwProduct->resultSize(), matType.dimensions()[1 - common_idx].size);
+ EXPECT_EQUAL(xwProduct->matrixHasCommonDimensionInnermost(), (common_idx == 1));
+ EXPECT_EQUAL(inv_xwProduct->matrixHasCommonDimensionInnermost(), (common_idx == 1));
+}
+
+void
+assertNotCompiledXWProduct(const vespalib::string &vecType,
+ const vespalib::string &matType,
+ const vespalib::string &dim)
+{
+ Stash stash;
+ const TensorFunction &func = compileXWProduct(vecType, matType, dim, stash);
+ const TensorFunction &inv_func = compileXWProduct(matType, vecType, dim, stash);
+ const Reduce *reduce = as<Reduce>(func);
+ const Reduce *inv_reduce = as<Reduce>(inv_func);
+ EXPECT_TRUE(reduce);
+ EXPECT_TRUE(inv_reduce);
+}
+
+//-----------------------------------------------------------------------------
+
TEST("require that dot product with compatible dimensions is compiled")
{
TEST_DO(assertCompiledDotProduct("tensor(x[5])", "tensor(x[5])"));
@@ -67,4 +126,28 @@ TEST("require that dot product with incompatible dimensions is NOT compiled")
TEST_DO(assertNotCompiledDotProduct("tensor(x[5],y[7])", "tensor(x[5],y[7])"));
}
+//-----------------------------------------------------------------------------
+
+TEST("require that xw products with compatible dimensions are compiled") {
+ TEST_DO(assertCompiledXWProduct("tensor(x[3])", "tensor(x[3],y[4])", "x"));
+ TEST_DO(assertCompiledXWProduct("tensor(y[4])", "tensor(x[3],y[4])", "y"));
+}
+
+TEST("require that xw products with incompatible dimensions are not compiled") {
+ TEST_DO(assertNotCompiledXWProduct("tensor(x[3])", "tensor(x[3],y[4])", "y"));
+ TEST_DO(assertNotCompiledXWProduct("tensor(x[])", "tensor(x[3],y[4])", "x"));
+ TEST_DO(assertNotCompiledXWProduct("tensor(x[3])", "tensor(x[],y[4])", "x"));
+ TEST_DO(assertNotCompiledXWProduct("tensor(x[3])", "tensor(x[3],y[])", "x"));
+ TEST_DO(assertNotCompiledXWProduct("tensor(x[2])", "tensor(x[3],y[4])", "x"));
+ TEST_DO(assertNotCompiledXWProduct("tensor(x[4])", "tensor(x[3],y[4])", "x"));
+ TEST_DO(assertNotCompiledXWProduct("tensor(x[3])", "tensor(y[3],z[4])", "x"));
+ TEST_DO(assertNotCompiledXWProduct("tensor(x[3])", "tensor(y[3],z[4])", "y"));
+ TEST_DO(assertNotCompiledXWProduct("tensor(x[3])", "tensor(y[3],z[4])", "z"));
+ TEST_DO(assertNotCompiledXWProduct("tensor(y[4])", "tensor(x[3],y[4])", "x"));
+ TEST_DO(assertNotCompiledXWProduct("tensor(y[3])", "tensor(x[3],y[4])", "y"));
+ TEST_DO(assertNotCompiledXWProduct("tensor(y[5])", "tensor(x[3],y[4])", "y"));
+}
+
+//-----------------------------------------------------------------------------
+
TEST_MAIN() { TEST_RUN_ALL(); }
diff --git a/eval/src/vespa/eval/eval/tensor_function.h b/eval/src/vespa/eval/eval/tensor_function.h
index 359cabc18a0..e23dc8c6fc0 100644
--- a/eval/src/vespa/eval/eval/tensor_function.h
+++ b/eval/src/vespa/eval/eval/tensor_function.h
@@ -45,9 +45,13 @@ struct TensorFunction
virtual ~TensorFunction() {}
};
-//-----------------------------------------------------------------------------
+/**
+ * Simple typecasting utility.
+ */
+template <typename T>
+const T *as(const TensorFunction &node) { return dynamic_cast<const T *>(&node); }
-struct TensorFunctionVisitor;
+//-----------------------------------------------------------------------------
namespace tensor_function {
@@ -77,12 +81,6 @@ struct Node : public TensorFunction
Node &operator=(Node &&) = delete;
};
-/**
- * Simple typecasting utility.
- */
-template <typename T>
-const T *as(const Node &node) { return dynamic_cast<const T *>(&node); }
-
struct Inject : Node {
const size_t tensor_id;
Inject(const ValueType &result_type_in,
diff --git a/eval/src/vespa/eval/tensor/dense/dense_tensor_function_compiler.cpp b/eval/src/vespa/eval/tensor/dense/dense_tensor_function_compiler.cpp
index e9ee7d30692..1268a46b8e5 100644
--- a/eval/src/vespa/eval/tensor/dense/dense_tensor_function_compiler.cpp
+++ b/eval/src/vespa/eval/tensor/dense/dense_tensor_function_compiler.cpp
@@ -1,6 +1,7 @@
// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
#include "dense_dot_product_function.h"
+#include "dense_xw_product_function.h"
#include "dense_tensor_function_compiler.h"
#include <vespa/eval/eval/operation.h>
#include <vespa/vespalib/test/insertion_operators.h>
@@ -15,39 +16,64 @@ namespace tensor {
namespace {
-bool
-willReduceAllDimensions(const std::vector<vespalib::string> &dimensions)
-{
- return (dimensions.empty() || (dimensions.size() == 1));
+bool is1dDenseTensor(const ValueType &type) {
+ return (type.is_dense() && (type.dimensions().size() == 1));
}
-bool
-is1dDenseTensor(const ValueType &type)
-{
- return (type.is_dense() && (type.dimensions().size() == 1));
+bool isConcreteDenseTensor(const ValueType &type, size_t d) {
+ return (type.is_dense() && (type.dimensions().size() == d) && !type.is_abstract());
}
-bool
-isCompatibleTensorsForDotProduct(const ValueType &lhsType, const ValueType &rhsType)
-{
- return (is1dDenseTensor(lhsType) &&
+bool isDenseDotProduct(const ValueType &res, const ValueType &lhsType, const ValueType &rhsType) {
+ return (res.is_double() &&
+ is1dDenseTensor(lhsType) &&
is1dDenseTensor(rhsType) &&
(lhsType.dimensions()[0].name == rhsType.dimensions()[0].name));
}
-struct DotProductFunctionCompiler
+bool isDenseXWProduct(const ValueType &res, const ValueType &vec, const ValueType &mat) {
+ if (isConcreteDenseTensor(res, 1) &&
+ isConcreteDenseTensor(vec, 1) &&
+ isConcreteDenseTensor(mat, 2))
+ {
+ size_t res_idx = mat.dimension_index(res.dimensions()[0].name);
+ size_t vec_idx = mat.dimension_index(vec.dimensions()[0].name);
+ size_t npos = ValueType::Dimension::npos;
+ if ((res_idx != npos) && (vec_idx != npos) && (res_idx != vec_idx)) {
+ return ((mat.dimensions()[res_idx].size == res.dimensions()[0].size) &&
+ (mat.dimensions()[vec_idx].size == vec.dimensions()[0].size));
+ }
+ }
+ return false;
+}
+
+const TensorFunction &createDenseXWProduct(const ValueType &res, const Inject &vec, const Inject &mat, Stash &stash) {
+ bool common_is_inner = (mat.result_type.dimension_index(vec.result_type.dimensions()[0].name) == 1);
+ return stash.create<DenseXWProductFunction>(res, vec.tensor_id, mat.tensor_id,
+ vec.result_type.dimensions()[0].size,
+ res.dimensions()[0].size,
+ common_is_inner);
+}
+
+struct InnerProductFunctionCompiler
{
static const TensorFunction &compile(const Node &expr, Stash &stash) {
const Reduce *reduce = as<Reduce>(expr);
- if (reduce && (reduce->aggr == Aggr::SUM) && willReduceAllDimensions(reduce->dimensions)) {
+ if (reduce && (reduce->aggr == Aggr::SUM)) {
const Join *join = as<Join>(reduce->tensor);
if (join && (join->function == Mul::f)) {
- const Inject *lhsTensor = as<Inject>(join->lhs_tensor);
- const Inject *rhsTensor = as<Inject>(join->rhs_tensor);
- if (lhsTensor && rhsTensor &&
- isCompatibleTensorsForDotProduct(lhsTensor->result_type, rhsTensor->result_type))
- {
- return stash.create<DenseDotProductFunction>(lhsTensor->tensor_id, rhsTensor->tensor_id);
+ const Inject *lhs = as<Inject>(join->lhs_tensor);
+ const Inject *rhs = as<Inject>(join->rhs_tensor);
+ if (lhs && rhs) {
+ if (isDenseDotProduct(expr.result_type, lhs->result_type, rhs->result_type)) {
+ return stash.create<DenseDotProductFunction>(lhs->tensor_id, rhs->tensor_id);
+ }
+ if (isDenseXWProduct(expr.result_type, lhs->result_type, rhs->result_type)) {
+ return createDenseXWProduct(expr.result_type, *lhs, *rhs, stash);
+ }
+ if (isDenseXWProduct(expr.result_type, rhs->result_type, lhs->result_type)) {
+ return createDenseXWProduct(expr.result_type, *rhs, *lhs, stash);
+ }
}
}
}
@@ -60,7 +86,7 @@ struct DotProductFunctionCompiler
const TensorFunction &
DenseTensorFunctionCompiler::compile(const eval::tensor_function::Node &expr, Stash &stash)
{
- return DotProductFunctionCompiler::compile(expr, stash);
+ return InnerProductFunctionCompiler::compile(expr, stash);
}
} // namespace tensor