summaryrefslogtreecommitdiffstats
path: root/eval
diff options
context:
space:
mode:
authorHåvard Pettersen <havardpe@oath.com>2020-06-26 10:44:46 +0000
committerHåvard Pettersen <havardpe@oath.com>2020-06-26 10:44:46 +0000
commit4138c5f103acc24a82614e27fbf6a137b26bff00 (patch)
tree39cc12f0ccd4a4647f5b337c8ed90e041b113b64 /eval
parent3ec1f9416e3b4496d633a318acdc8aac6f21ab0d (diff)
ignore trivial dimensions
Diffstat (limited to 'eval')
-rw-r--r--eval/src/tests/eval/value_type/value_type_test.cpp14
-rw-r--r--eval/src/tests/tensor/dense_multi_matmul_function/dense_multi_matmul_function_test.cpp6
-rw-r--r--eval/src/vespa/eval/eval/value_type.cpp11
-rw-r--r--eval/src/vespa/eval/eval/value_type.h2
-rw-r--r--eval/src/vespa/eval/tensor/dense/dense_multi_matmul_function.cpp77
-rw-r--r--eval/src/vespa/eval/tensor/dense/dense_simple_expand_function.cpp11
-rw-r--r--eval/src/vespa/eval/tensor/dense/dense_simple_join_function.cpp11
7 files changed, 75 insertions, 57 deletions
diff --git a/eval/src/tests/eval/value_type/value_type_test.cpp b/eval/src/tests/eval/value_type/value_type_test.cpp
index 055c7e582bf..54bcd0694e2 100644
--- a/eval/src/tests/eval/value_type/value_type_test.cpp
+++ b/eval/src/tests/eval/value_type/value_type_test.cpp
@@ -266,6 +266,20 @@ TEST("require that dimension names can be obtained") {
EXPECT_EQUAL(type("tensor<float>(y[10],x[30],z{})").dimension_names(), str_list({"x", "y", "z"}));
}
+TEST("require that nontrivial dimensions can be obtained") {
+ auto my_check = [](const auto &list)
+ {
+ ASSERT_EQUAL(list.size(), 2u);
+ EXPECT_EQUAL(list[0].name, "x");
+ EXPECT_EQUAL(list[0].size, 10u);
+ EXPECT_EQUAL(list[1].name, "y");
+ EXPECT_TRUE(list[1].is_mapped());
+ };
+ EXPECT_TRUE(type("double").nontrivial_dimensions().empty());
+ TEST_DO(my_check(type("tensor(x[10],y{})").nontrivial_dimensions()));
+ TEST_DO(my_check(type("tensor(a[1],b[1],x[10],y{},z[1])").nontrivial_dimensions()));
+}
+
TEST("require that dimension index can be obtained") {
EXPECT_EQUAL(type("error").dimension_index("x"), ValueType::Dimension::npos);
EXPECT_EQUAL(type("double").dimension_index("x"), ValueType::Dimension::npos);
diff --git a/eval/src/tests/tensor/dense_multi_matmul_function/dense_multi_matmul_function_test.cpp b/eval/src/tests/tensor/dense_multi_matmul_function/dense_multi_matmul_function_test.cpp
index f9c563c9bf8..31dae9e7cf8 100644
--- a/eval/src/tests/tensor/dense_multi_matmul_function/dense_multi_matmul_function_test.cpp
+++ b/eval/src/tests/tensor/dense_multi_matmul_function/dense_multi_matmul_function_test.cpp
@@ -26,6 +26,7 @@ const TensorEngine &prod_engine = DefaultTensorEngine::ref();
EvalFixture::ParamRepo make_params() {
return EvalFixture::ParamRepo()
.add_dense({{"A", 2}, {"B", 1}, {"C", 3}, {"a", 2}, {"d", 3}}) // inner/inner
+ .add_dense({{"A", 2}, {"B", 1}, {"C", 3}, {"D", 1}, {"a", 2}, {"c", 1}, {"d", 3}, {"e", 1}}) // inner/inner, extra dims
.add_dense({{"B", 1}, {"C", 3}, {"a", 2}, {"d", 3}}) // inner/inner, missing A
.add_dense({{"A", 1}, {"a", 2}, {"d", 3}}) // inner/inner, single mat
.add_dense({{"A", 2}, {"D", 3}, {"a", 2}, {"b", 1}, {"c", 3}}) // inner/inner, inverted
@@ -109,6 +110,11 @@ TEST("require that multi matmul must have inner nesting of matmul dimensions") {
TEST_DO(verify_not_optimized("reduce(B5D3a2b1c3*A2D3a2b1c3,sum,D)"));
}
+TEST("require that multi matmul ignores trivial dimensions") {
+ TEST_DO(verify_optimized("reduce(A2B1C3D1a2c1d3e1*A2B1C3b5d3,sum,d)", 2, 3, 5, 6, true, true));
+ TEST_DO(verify_optimized("reduce(A2B1C3b5d3*A2B1C3D1a2c1d3e1,sum,d)", 2, 3, 5, 6, true, true));
+}
+
TEST("require that multi matmul function can be debug dumped") {
EvalFixture fixture(prod_engine, "reduce(A2B1C3a2d3*A2B1C3b5d3,sum,d)", param_repo, true);
auto info = fixture.find_all<DenseMultiMatMulFunction>();
diff --git a/eval/src/vespa/eval/eval/value_type.cpp b/eval/src/vespa/eval/eval/value_type.cpp
index 36c7c49c8b9..2287e8599eb 100644
--- a/eval/src/vespa/eval/eval/value_type.cpp
+++ b/eval/src/vespa/eval/eval/value_type.cpp
@@ -185,6 +185,17 @@ ValueType::dense_subspace_size() const
return size;
}
+std::vector<ValueType::Dimension>
+ValueType::nontrivial_dimensions() const {
+ std::vector<ValueType::Dimension> result;
+ for (const auto &dim: dimensions()) {
+ if (!dim.is_trivial()) {
+ result.push_back(dim);
+ }
+ }
+ return result;
+}
+
size_t
ValueType::dimension_index(const vespalib::string &name) const {
return my_dimension_index(_dimensions, name);
diff --git a/eval/src/vespa/eval/eval/value_type.h b/eval/src/vespa/eval/eval/value_type.h
index a8ae9c44bb0..a3dbb901eb0 100644
--- a/eval/src/vespa/eval/eval/value_type.h
+++ b/eval/src/vespa/eval/eval/value_type.h
@@ -33,6 +33,7 @@ public:
bool operator!=(const Dimension &rhs) const { return !(*this == rhs); }
bool is_mapped() const { return (size == npos); }
bool is_indexed() const { return (size != npos); }
+ bool is_trivial() const { return (size == 1); }
};
private:
@@ -61,6 +62,7 @@ public:
bool is_dense() const;
size_t dense_subspace_size() const;
const std::vector<Dimension> &dimensions() const { return _dimensions; }
+ std::vector<Dimension> nontrivial_dimensions() const;
size_t dimension_index(const vespalib::string &name) const;
std::vector<vespalib::string> dimension_names() const;
bool operator==(const ValueType &rhs) const {
diff --git a/eval/src/vespa/eval/tensor/dense/dense_multi_matmul_function.cpp b/eval/src/vespa/eval/tensor/dense/dense_multi_matmul_function.cpp
index 73942f7f044..23e54e7fc02 100644
--- a/eval/src/vespa/eval/tensor/dense/dense_multi_matmul_function.cpp
+++ b/eval/src/vespa/eval/tensor/dense/dense_multi_matmul_function.cpp
@@ -20,6 +20,9 @@ using eval::Aggr;
using namespace eval::tensor_function;
using namespace eval::operation;
+using Dim = ValueType::Dimension;
+using DimList = std::vector<Dim>;
+
namespace {
void my_cblas_double_multi_matmul_op(InterpretedFunction::State &state, uint64_t param) {
@@ -77,45 +80,37 @@ InterpretedFunction::op_function my_select(CellType cell_type) {
struct CommonDim {
bool valid;
bool inner;
- CommonDim(const ValueType &type, const vespalib::string &dim)
+ CommonDim(const DimList &list, const vespalib::string &dim)
: valid(true), inner(false)
{
- size_t size = type.dimensions().size();
- if (type.dimensions()[size - 1].name == dim) {
+ if (list[list.size() - 1].name == dim) {
inner = true;
- } else if (type.dimensions()[size - 2].name != dim) {
+ } else if (list[list.size() - 2].name != dim) {
valid = false;
}
}
- const ValueType::Dimension &get(const ValueType &type) const {
- size_t size = type.dimensions().size();
- return type.dimensions()[size - (inner ? 1 : 2)];
- }
- const ValueType::Dimension &get(const TensorFunction &expr) const {
- return get(expr.result_type());
- }
- const ValueType::Dimension &inv(const ValueType &type) const {
- size_t size = type.dimensions().size();
- return type.dimensions()[size - (inner ? 2 : 1)];
+ const Dim &get(const DimList &dims) const {
+ return dims[dims.size() - (inner ? 1 : 2)];
}
- const ValueType::Dimension &inv(const TensorFunction &expr) const {
- return inv(expr.result_type());
+ const Dim &inv(const DimList &dims) const {
+ return dims[dims.size() - (inner ? 2 : 1)];
}
};
-// Currently, non-matmul dimensions are required to be identical. This
-// restriction is added to reduce complexity and might be removed in
-// the future if/when a relevant use-case arises.
+// Currently, non-matmul dimensions are required to be identical
+// (after trivial dimensions are ignored). This restriction is added
+// to reduce complexity and might be removed in the future if/when a
+// relevant use-case arises.
struct DimPrefix {
bool valid;
size_t size;
- DimPrefix(const ValueType &a, const ValueType &b)
+ DimPrefix(const DimList &a, const DimList &b)
: valid(true), size(1)
{
- if (a.dimensions().size() == b.dimensions().size()) {
- for (size_t i = 0; i < (a.dimensions().size() - 2); ++i) {
- if (a.dimensions()[i] == b.dimensions()[i]) {
- size *= a.dimensions()[i].size;
+ if (a.size() == b.size()) {
+ for (size_t i = 0; i < (a.size() - 2); ++i) {
+ if (a[i] == b[i]) {
+ size *= a[i].size;
} else {
valid = false;
}
@@ -126,20 +121,22 @@ struct DimPrefix {
}
};
-bool check_input_type(const ValueType &type) {
+bool check_input_type(const ValueType &type, const DimList &relevant) {
return (type.is_dense() &&
- (type.dimensions().size() >= 2) &&
+ (relevant.size() >= 2) &&
((type.cell_type() == CellType::FLOAT) || (type.cell_type() == CellType::DOUBLE)));
}
bool is_multi_matmul(const ValueType &a, const ValueType &b, const vespalib::string &reduce_dim) {
- if (check_input_type(a) && check_input_type(b) && (a.cell_type() == b.cell_type())) {
- CommonDim cd_a(a, reduce_dim);
- CommonDim cd_b(b, reduce_dim);
- DimPrefix prefix(a, b);
+ auto dims_a = a.nontrivial_dimensions();
+ auto dims_b = b.nontrivial_dimensions();
+ if (check_input_type(a, dims_a) && check_input_type(b, dims_b) && (a.cell_type() == b.cell_type())) {
+ CommonDim cd_a(dims_a, reduce_dim);
+ CommonDim cd_b(dims_b, reduce_dim);
+ DimPrefix prefix(dims_a, dims_b);
return (cd_a.valid && cd_b.valid && prefix.valid &&
- (b.dimension_index(cd_a.inv(a).name) == ValueType::Dimension::npos) &&
- (a.dimension_index(cd_b.inv(b).name) == ValueType::Dimension::npos));
+ (b.dimension_index(cd_a.inv(dims_a).name) == Dim::npos) &&
+ (a.dimension_index(cd_b.inv(dims_b).name) == Dim::npos));
}
return false;
}
@@ -147,13 +144,15 @@ bool is_multi_matmul(const ValueType &a, const ValueType &b, const vespalib::str
const TensorFunction &create_multi_matmul(const TensorFunction &a, const TensorFunction &b,
const vespalib::string &reduce_dim, const ValueType &result_type, Stash &stash)
{
- CommonDim cd_a(a.result_type(), reduce_dim);
- CommonDim cd_b(b.result_type(), reduce_dim);
- DimPrefix prefix(a.result_type(), b.result_type());
- size_t a_size = cd_a.inv(a).size;
- size_t b_size = cd_b.inv(b).size;
- size_t common_size = cd_a.get(a).size;
- bool a_is_lhs = (cd_a.inv(a).name < cd_b.inv(b).name);
+ auto dims_a = a.result_type().nontrivial_dimensions();
+ auto dims_b = b.result_type().nontrivial_dimensions();
+ CommonDim cd_a(dims_a, reduce_dim);
+ CommonDim cd_b(dims_b, reduce_dim);
+ DimPrefix prefix(dims_a, dims_b);
+ size_t a_size = cd_a.inv(dims_a).size;
+ size_t b_size = cd_b.inv(dims_b).size;
+ size_t common_size = cd_a.get(dims_a).size;
+ bool a_is_lhs = (cd_a.inv(dims_a).name < cd_b.inv(dims_b).name);
if (a_is_lhs) {
return stash.create<DenseMultiMatMulFunction>(result_type, a, b, a_size, common_size, b_size, prefix.size, cd_a.inner, cd_b.inner);
} else {
diff --git a/eval/src/vespa/eval/tensor/dense/dense_simple_expand_function.cpp b/eval/src/vespa/eval/tensor/dense/dense_simple_expand_function.cpp
index d45e0d936a9..e4e3ffc27d6 100644
--- a/eval/src/vespa/eval/tensor/dense/dense_simple_expand_function.cpp
+++ b/eval/src/vespa/eval/tensor/dense/dense_simple_expand_function.cpp
@@ -71,16 +71,9 @@ using MyTypify = TypifyValue<TypifyCellType,TypifyOp2,TypifyBool>;
//-----------------------------------------------------------------------------
-std::vector<ValueType::Dimension> strip_trivial(const std::vector<ValueType::Dimension> &dim_list) {
- std::vector<ValueType::Dimension> result;
- std::copy_if(dim_list.begin(), dim_list.end(), std::back_inserter(result),
- [](const auto &dim){ return (dim.size != 1); });
- return result;
-}
-
std::optional<Inner> detect_simple_expand(const TensorFunction &lhs, const TensorFunction &rhs) {
- std::vector<ValueType::Dimension> a = strip_trivial(lhs.result_type().dimensions());
- std::vector<ValueType::Dimension> b = strip_trivial(rhs.result_type().dimensions());
+ std::vector<ValueType::Dimension> a = lhs.result_type().nontrivial_dimensions();
+ std::vector<ValueType::Dimension> b = rhs.result_type().nontrivial_dimensions();
if (a.empty() || b.empty()) {
return std::nullopt;
} else if (a.back().name < b.front().name) {
diff --git a/eval/src/vespa/eval/tensor/dense/dense_simple_join_function.cpp b/eval/src/vespa/eval/tensor/dense/dense_simple_join_function.cpp
index 5f8fbcac9bb..c407ef6cdff 100644
--- a/eval/src/vespa/eval/tensor/dense/dense_simple_join_function.cpp
+++ b/eval/src/vespa/eval/tensor/dense/dense_simple_join_function.cpp
@@ -129,16 +129,9 @@ Primary select_primary(const TensorFunction &lhs, const TensorFunction &rhs, Val
}
}
-std::vector<ValueType::Dimension> strip_trivial(const std::vector<ValueType::Dimension> &dim_list) {
- std::vector<ValueType::Dimension> result;
- std::copy_if(dim_list.begin(), dim_list.end(), std::back_inserter(result),
- [](const auto &dim){ return (dim.size != 1); });
- return result;
-}
-
std::optional<Overlap> detect_overlap(const TensorFunction &primary, const TensorFunction &secondary) {
- std::vector<ValueType::Dimension> a = strip_trivial(primary.result_type().dimensions());
- std::vector<ValueType::Dimension> b = strip_trivial(secondary.result_type().dimensions());
+ std::vector<ValueType::Dimension> a = primary.result_type().nontrivial_dimensions();
+ std::vector<ValueType::Dimension> b = secondary.result_type().nontrivial_dimensions();
if (b.size() > a.size()) {
return std::nullopt;
} else if (b == a) {