summaryrefslogtreecommitdiffstats
path: root/vespalib/src
diff options
context:
space:
mode:
Diffstat (limited to 'vespalib/src')
-rw-r--r--vespalib/src/testlist.txt2
-rw-r--r--vespalib/src/tests/eval/simple_tensor/simple_tensor_test.cpp153
-rw-r--r--vespalib/src/tests/eval/tensor/eval_tensor_test.cpp16
-rw-r--r--vespalib/src/tests/tensor/dense_tensor_builder/dense_tensor_builder_test.cpp78
-rw-r--r--vespalib/src/tests/tensor/join_tensor_addresses/.gitignore1
-rw-r--r--vespalib/src/tests/tensor/join_tensor_addresses/CMakeLists.txt9
-rw-r--r--vespalib/src/tests/tensor/join_tensor_addresses/FILES1
-rw-r--r--vespalib/src/tests/tensor/join_tensor_addresses/join_tensor_addresses_test.cpp317
-rw-r--r--vespalib/src/tests/tensor/sparse_tensor_builder/sparse_tensor_builder_test.cpp25
-rw-r--r--vespalib/src/tests/tensor/tensor_address_element_iterator/.gitignore1
-rw-r--r--vespalib/src/tests/tensor/tensor_address_element_iterator/CMakeLists.txt9
-rw-r--r--vespalib/src/tests/tensor/tensor_address_element_iterator/FILES1
-rw-r--r--vespalib/src/tests/tensor/tensor_address_element_iterator/tensor_address_element_iterator_test.cpp343
-rw-r--r--vespalib/src/tests/tensor/tensor_conformance/tensor_conformance_test.cpp4
-rw-r--r--vespalib/src/tests/tensor/tensor_operations/tensor_operations_test.cpp307
-rw-r--r--vespalib/src/tests/tensor/tensor_performance/tensor_performance_test.cpp12
-rw-r--r--vespalib/src/vespa/vespalib/eval/simple_tensor_engine.cpp24
-rw-r--r--vespalib/src/vespa/vespalib/eval/simple_tensor_engine.h1
-rw-r--r--vespalib/src/vespa/vespalib/eval/tensor_engine.h1
-rw-r--r--vespalib/src/vespa/vespalib/eval/tensor_spec.cpp39
-rw-r--r--vespalib/src/vespa/vespalib/eval/tensor_spec.h17
-rw-r--r--vespalib/src/vespa/vespalib/eval/test/tensor_conformance.cpp680
-rw-r--r--vespalib/src/vespa/vespalib/eval/value.cpp8
-rw-r--r--vespalib/src/vespa/vespalib/eval/value.h8
-rw-r--r--vespalib/src/vespa/vespalib/tensor/decoded_tensor_address_store.h57
-rw-r--r--vespalib/src/vespa/vespalib/tensor/default_tensor_engine.cpp27
-rw-r--r--vespalib/src/vespa/vespalib/tensor/default_tensor_engine.h1
-rw-r--r--vespalib/src/vespa/vespalib/tensor/dense/CMakeLists.txt4
-rw-r--r--vespalib/src/vespa/vespalib/tensor/dense/dense_tensor.cpp71
-rw-r--r--vespalib/src/vespa/vespalib/tensor/dense/dense_tensor.h7
-rw-r--r--vespalib/src/vespa/vespalib/tensor/dense/dense_tensor_address_combiner.cpp124
-rw-r--r--vespalib/src/vespa/vespalib/tensor/dense/dense_tensor_address_combiner.h46
-rw-r--r--vespalib/src/vespa/vespalib/tensor/dense/dense_tensor_apply.h25
-rw-r--r--vespalib/src/vespa/vespalib/tensor/dense/dense_tensor_apply.hpp32
-rw-r--r--vespalib/src/vespa/vespalib/tensor/dense/dense_tensor_dimension_sum.cpp106
-rw-r--r--vespalib/src/vespa/vespalib/tensor/dense/dense_tensor_dimension_sum.h36
-rw-r--r--vespalib/src/vespa/vespalib/tensor/dense/dense_tensor_product.cpp204
-rw-r--r--vespalib/src/vespa/vespalib/tensor/dense/dense_tensor_product.h32
-rw-r--r--vespalib/src/vespa/vespalib/tensor/dense/dense_tensor_reduce.h21
-rw-r--r--vespalib/src/vespa/vespalib/tensor/dense/dense_tensor_reduce.hpp133
-rw-r--r--vespalib/src/vespa/vespalib/tensor/dense/direct_dense_tensor_builder.cpp59
-rw-r--r--vespalib/src/vespa/vespalib/tensor/dense/direct_dense_tensor_builder.h31
-rw-r--r--vespalib/src/vespa/vespalib/tensor/dimensions_vector_iterator.h46
-rw-r--r--vespalib/src/vespa/vespalib/tensor/join_tensor_addresses.h128
-rw-r--r--vespalib/src/vespa/vespalib/tensor/sparse/CMakeLists.txt8
-rw-r--r--vespalib/src/vespa/vespalib/tensor/sparse/compact_tensor_address.cpp132
-rw-r--r--vespalib/src/vespa/vespalib/tensor/sparse/compact_tensor_address.h76
-rw-r--r--vespalib/src/vespa/vespalib/tensor/sparse/compact_tensor_address_builder.cpp38
-rw-r--r--vespalib/src/vespa/vespalib/tensor/sparse/compact_tensor_address_builder.h36
-rw-r--r--vespalib/src/vespa/vespalib/tensor/sparse/direct_sparse_tensor_builder.h19
-rw-r--r--vespalib/src/vespa/vespalib/tensor/sparse/join_sparse_tensors.h62
-rw-r--r--vespalib/src/vespa/vespalib/tensor/sparse/sparse_tensor.cpp118
-rw-r--r--vespalib/src/vespa/vespalib/tensor/sparse/sparse_tensor.h10
-rw-r--r--vespalib/src/vespa/vespalib/tensor/sparse/sparse_tensor_address_builder.h8
-rw-r--r--vespalib/src/vespa/vespalib/tensor/sparse/sparse_tensor_address_combiner.cpp69
-rw-r--r--vespalib/src/vespa/vespalib/tensor/sparse/sparse_tensor_address_combiner.h39
-rw-r--r--vespalib/src/vespa/vespalib/tensor/sparse/sparse_tensor_address_decoder.h4
-rw-r--r--vespalib/src/vespa/vespalib/tensor/sparse/sparse_tensor_address_padder.h2
-rw-r--r--vespalib/src/vespa/vespalib/tensor/sparse/sparse_tensor_address_reducer.cpp51
-rw-r--r--vespalib/src/vespa/vespalib/tensor/sparse/sparse_tensor_address_reducer.h58
-rw-r--r--vespalib/src/vespa/vespalib/tensor/sparse/sparse_tensor_address_ref.h (renamed from vespalib/src/vespa/vespalib/tensor/sparse/compact_tensor_address_ref.h)12
-rw-r--r--vespalib/src/vespa/vespalib/tensor/sparse/sparse_tensor_apply.h23
-rw-r--r--vespalib/src/vespa/vespalib/tensor/sparse/sparse_tensor_apply.hpp35
-rw-r--r--vespalib/src/vespa/vespalib/tensor/sparse/sparse_tensor_builder.cpp4
-rw-r--r--vespalib/src/vespa/vespalib/tensor/sparse/sparse_tensor_builder.h4
-rw-r--r--vespalib/src/vespa/vespalib/tensor/sparse/sparse_tensor_dimension_sum.cpp89
-rw-r--r--vespalib/src/vespa/vespalib/tensor/sparse/sparse_tensor_dimension_sum.h26
-rw-r--r--vespalib/src/vespa/vespalib/tensor/sparse/sparse_tensor_match.cpp4
-rw-r--r--vespalib/src/vespa/vespalib/tensor/sparse/sparse_tensor_product.cpp148
-rw-r--r--vespalib/src/vespa/vespalib/tensor/sparse/sparse_tensor_product.h47
-rw-r--r--vespalib/src/vespa/vespalib/tensor/sparse/sparse_tensor_reduce.hpp29
-rw-r--r--vespalib/src/vespa/vespalib/tensor/sparse/sparse_tensor_unsorted_address_builder.cpp (renamed from vespalib/src/vespa/vespalib/tensor/sparse/compact_tensor_unsorted_address_builder.cpp)21
-rw-r--r--vespalib/src/vespa/vespalib/tensor/sparse/sparse_tensor_unsorted_address_builder.h (renamed from vespalib/src/vespa/vespalib/tensor/sparse/compact_tensor_unsorted_address_builder.h)6
-rw-r--r--vespalib/src/vespa/vespalib/tensor/tensor.h8
-rw-r--r--vespalib/src/vespa/vespalib/tensor/tensor_address_element_iterator.h174
-rw-r--r--vespalib/src/vespa/vespalib/tensor/tensor_mapper.cpp16
-rw-r--r--vespalib/src/vespa/vespalib/tensor/tensor_operation.h1
-rw-r--r--vespalib/src/vespa/vespalib/test/insertion_operators.h18
78 files changed, 1961 insertions, 2681 deletions
diff --git a/vespalib/src/testlist.txt b/vespalib/src/testlist.txt
index e3dd8414e2e..67982805df7 100644
--- a/vespalib/src/testlist.txt
+++ b/vespalib/src/testlist.txt
@@ -65,11 +65,9 @@ tests/sync
tests/tensor/sparse_tensor_builder
tests/tensor/dense_tensor_builder
tests/tensor/dense_tensor_operations
-tests/tensor/join_tensor_addresses
tests/tensor/simple_tensor_builder
tests/tensor/tensor
tests/tensor/tensor_address
-tests/tensor/tensor_address_element_iterator
tests/tensor/tensor_function
tests/tensor/tensor_mapper
tests/tensor/tensor_operations
diff --git a/vespalib/src/tests/eval/simple_tensor/simple_tensor_test.cpp b/vespalib/src/tests/eval/simple_tensor/simple_tensor_test.cpp
index 775c2b72e0a..33812779a30 100644
--- a/vespalib/src/tests/eval/simple_tensor/simple_tensor_test.cpp
+++ b/vespalib/src/tests/eval/simple_tensor/simple_tensor_test.cpp
@@ -16,101 +16,68 @@ using Stash = vespalib::Stash;
// need to specify numbers explicitly as size_t to avoid ambiguous behavior for 0
constexpr size_t operator "" _z (unsigned long long int n) { return n; }
-void dump(const Cells &cells, std::ostream &out) {
- out << std::endl;
- for (const auto &cell: cells) {
- size_t n = 0;
- out << " [";
- for (const auto &label: cell.address) {
- if (n++) {
- out << ",";
- }
- if (label.is_mapped()) {
- out << label.name;
- } else {
- out << label.index;
- }
- }
- out << "]: " << cell.value << std::endl;
- }
+const Tensor &unwrap(const Value &value) {
+ ASSERT_TRUE(value.is_tensor());
+ return *value.as_tensor();
}
-struct Check {
+struct CellBuilder {
Cells cells;
- Check() : cells() {}
- explicit Check(const SimpleTensor &tensor) : cells() {
- for (const auto &cell: tensor.cells()) {
- add(cell.address, cell.value);
- }
- }
- explicit Check(const TensorSpec &spec)
- : Check(*SimpleTensor::create(spec)) {}
- Check &add(const Address &address, double value) {
- cells.emplace_back(address, value);
- std::sort(cells.begin(), cells.end(),
- [](const auto &a, const auto &b){ return (a.address < b.address); });
+ CellBuilder &add(const Address &addr, double value) {
+ cells.emplace_back(addr, value);
return *this;
}
- bool operator==(const Check &rhs) const {
- if (cells.size() != rhs.cells.size()) {
- return false;
- }
- for (size_t i = 0; i < cells.size(); ++i) {
- if ((cells[i].address != rhs.cells[i].address) ||
- (cells[i].value != rhs.cells[i].value))
- {
- return false;
- }
- }
- return true;
- }
+ Cells build() { return cells; }
};
-std::ostream &operator<<(std::ostream &out, const Check &value) {
- dump(value.cells, out);
- return out;
-}
-
-const SimpleTensor &unwrap(const Tensor &tensor) {
- ASSERT_EQUAL(&tensor.engine(), &SimpleTensorEngine::ref());
- return static_cast<const SimpleTensor &>(tensor);
-}
-
-const SimpleTensor &unwrap(const Value &value) {
- ASSERT_TRUE(value.is_tensor());
- return unwrap(*value.as_tensor());
-}
-
TEST("require that simple tensors can be built using tensor spec") {
TensorSpec spec("tensor(w{},x[2],y{},z[2])");
spec.add({{"w", "xxx"}, {"x", 0}, {"y", "xxx"}, {"z", 0}}, 1.0)
.add({{"w", "xxx"}, {"x", 0}, {"y", "yyy"}, {"z", 1}}, 2.0)
.add({{"w", "yyy"}, {"x", 1}, {"y", "xxx"}, {"z", 0}}, 3.0)
.add({{"w", "yyy"}, {"x", 1}, {"y", "yyy"}, {"z", 1}}, 4.0);
- std::unique_ptr<SimpleTensor> tensor = SimpleTensor::create(spec);
- Check expect = Check()
- .add({{"xxx"}, {0_z}, {"xxx"}, {0_z}}, 1.0)
- .add({{"xxx"}, {0_z}, {"xxx"}, {1_z}}, 0.0)
- .add({{"xxx"}, {1_z}, {"xxx"}, {0_z}}, 0.0)
- .add({{"xxx"}, {1_z}, {"xxx"}, {1_z}}, 0.0)
- //-----------------------------------------
- .add({{"xxx"}, {0_z}, {"yyy"}, {0_z}}, 0.0)
- .add({{"xxx"}, {0_z}, {"yyy"}, {1_z}}, 2.0)
- .add({{"xxx"}, {1_z}, {"yyy"}, {0_z}}, 0.0)
- .add({{"xxx"}, {1_z}, {"yyy"}, {1_z}}, 0.0)
- //-----------------------------------------
- .add({{"yyy"}, {0_z}, {"xxx"}, {0_z}}, 0.0)
- .add({{"yyy"}, {0_z}, {"xxx"}, {1_z}}, 0.0)
- .add({{"yyy"}, {1_z}, {"xxx"}, {0_z}}, 3.0)
- .add({{"yyy"}, {1_z}, {"xxx"}, {1_z}}, 0.0)
- //-----------------------------------------
- .add({{"yyy"}, {0_z}, {"yyy"}, {0_z}}, 0.0)
- .add({{"yyy"}, {0_z}, {"yyy"}, {1_z}}, 0.0)
- .add({{"yyy"}, {1_z}, {"yyy"}, {0_z}}, 0.0)
- .add({{"yyy"}, {1_z}, {"yyy"}, {1_z}}, 4.0);
- EXPECT_EQUAL(expect, Check(*tensor));
- std::unique_ptr<Tensor> tensor2 = SimpleTensorEngine::ref().create(spec);
- EXPECT_EQUAL(expect, Check(unwrap(*tensor2)));
+ auto tensor = SimpleTensorEngine::ref().create(spec);
+ TensorSpec full_spec("tensor(w{},x[2],y{},z[2])");
+ full_spec
+ .add({{"w", "xxx"}, {"x", 0}, {"y", "xxx"}, {"z", 0}}, 1.0)
+ .add({{"w", "xxx"}, {"x", 0}, {"y", "xxx"}, {"z", 1}}, 0.0)
+ .add({{"w", "xxx"}, {"x", 0}, {"y", "yyy"}, {"z", 0}}, 0.0)
+ .add({{"w", "xxx"}, {"x", 0}, {"y", "yyy"}, {"z", 1}}, 2.0)
+ .add({{"w", "xxx"}, {"x", 1}, {"y", "xxx"}, {"z", 0}}, 0.0)
+ .add({{"w", "xxx"}, {"x", 1}, {"y", "xxx"}, {"z", 1}}, 0.0)
+ .add({{"w", "xxx"}, {"x", 1}, {"y", "yyy"}, {"z", 0}}, 0.0)
+ .add({{"w", "xxx"}, {"x", 1}, {"y", "yyy"}, {"z", 1}}, 0.0)
+ .add({{"w", "yyy"}, {"x", 0}, {"y", "xxx"}, {"z", 0}}, 0.0)
+ .add({{"w", "yyy"}, {"x", 0}, {"y", "xxx"}, {"z", 1}}, 0.0)
+ .add({{"w", "yyy"}, {"x", 0}, {"y", "yyy"}, {"z", 0}}, 0.0)
+ .add({{"w", "yyy"}, {"x", 0}, {"y", "yyy"}, {"z", 1}}, 0.0)
+ .add({{"w", "yyy"}, {"x", 1}, {"y", "xxx"}, {"z", 0}}, 3.0)
+ .add({{"w", "yyy"}, {"x", 1}, {"y", "xxx"}, {"z", 1}}, 0.0)
+ .add({{"w", "yyy"}, {"x", 1}, {"y", "yyy"}, {"z", 0}}, 0.0)
+ .add({{"w", "yyy"}, {"x", 1}, {"y", "yyy"}, {"z", 1}}, 4.0);
+ auto full_tensor = SimpleTensorEngine::ref().create(full_spec);
+ SimpleTensor expect_tensor(ValueType::from_spec("tensor(w{},x[2],y{},z[2])"),
+ CellBuilder()
+ .add({{"xxx"}, {0_z}, {"xxx"}, {0_z}}, 1.0)
+ .add({{"xxx"}, {0_z}, {"xxx"}, {1_z}}, 0.0)
+ .add({{"xxx"}, {0_z}, {"yyy"}, {0_z}}, 0.0)
+ .add({{"xxx"}, {0_z}, {"yyy"}, {1_z}}, 2.0)
+ .add({{"xxx"}, {1_z}, {"xxx"}, {0_z}}, 0.0)
+ .add({{"xxx"}, {1_z}, {"xxx"}, {1_z}}, 0.0)
+ .add({{"xxx"}, {1_z}, {"yyy"}, {0_z}}, 0.0)
+ .add({{"xxx"}, {1_z}, {"yyy"}, {1_z}}, 0.0)
+ .add({{"yyy"}, {0_z}, {"xxx"}, {0_z}}, 0.0)
+ .add({{"yyy"}, {0_z}, {"xxx"}, {1_z}}, 0.0)
+ .add({{"yyy"}, {0_z}, {"yyy"}, {0_z}}, 0.0)
+ .add({{"yyy"}, {0_z}, {"yyy"}, {1_z}}, 0.0)
+ .add({{"yyy"}, {1_z}, {"xxx"}, {0_z}}, 3.0)
+ .add({{"yyy"}, {1_z}, {"xxx"}, {1_z}}, 0.0)
+ .add({{"yyy"}, {1_z}, {"yyy"}, {0_z}}, 0.0)
+ .add({{"yyy"}, {1_z}, {"yyy"}, {1_z}}, 4.0)
+ .build());
+ EXPECT_EQUAL(expect_tensor, *tensor);
+ EXPECT_EQUAL(expect_tensor, *full_tensor);
+ EXPECT_EQUAL(full_spec, tensor->engine().to_spec(*tensor));
};
TEST("require that simple tensors can have their values negated") {
@@ -125,10 +92,10 @@ TEST("require that simple tensors can have their values negated") {
.add({{"x","2"},{"y","1"}}, 3)
.add({{"x","1"},{"y","2"}}, -5));
auto result = SimpleTensor::perform(operation::Neg(), *tensor);
- EXPECT_EQUAL(Check(*expect), Check(*result));
+ EXPECT_EQUAL(*expect, *result);
Stash stash;
const Value &result2 = SimpleTensorEngine::ref().map(operation::Neg(), *tensor, stash);
- EXPECT_EQUAL(Check(*expect), Check(unwrap(result2)));
+ EXPECT_EQUAL(*expect, unwrap(result2));
}
TEST("require that simple tensors can be multiplied with each other") {
@@ -150,10 +117,10 @@ TEST("require that simple tensors can be multiplied with each other") {
.add({{"x","2"},{"y","1"},{"z","2"}}, 39)
.add({{"x","1"},{"y","2"},{"z","1"}}, 55));
auto result = SimpleTensor::perform(operation::Mul(), *lhs, *rhs);
- EXPECT_EQUAL(Check(*expect), Check(*result));
+ EXPECT_EQUAL(*expect, *result);
Stash stash;
const Value &result2 = SimpleTensorEngine::ref().apply(operation::Mul(), *lhs, *rhs, stash);
- EXPECT_EQUAL(Check(*expect), Check(unwrap(result2)));
+ EXPECT_EQUAL(*expect, unwrap(result2));
}
TEST("require that simple tensors support dimension reduction") {
@@ -178,22 +145,22 @@ TEST("require that simple tensors support dimension reduction") {
auto result_sum_y = tensor->reduce(operation::Add(), {"y"});
auto result_sum_x = tensor->reduce(operation::Add(), {"x"});
auto result_sum_all = tensor->reduce(operation::Add(), {"x", "y"});
- EXPECT_EQUAL(Check(*expect_sum_y), Check(*result_sum_y));
- EXPECT_EQUAL(Check(*expect_sum_x), Check(*result_sum_x));
- EXPECT_EQUAL(Check(*expect_sum_all), Check(*result_sum_all));
+ EXPECT_EQUAL(*expect_sum_y, *result_sum_y);
+ EXPECT_EQUAL(*expect_sum_x, *result_sum_x);
+ EXPECT_EQUAL(*expect_sum_all, *result_sum_all);
Stash stash;
const Value &result_sum_y_2 = SimpleTensorEngine::ref().reduce(*tensor, operation::Add(), {"y"}, stash);
const Value &result_sum_x_2 = SimpleTensorEngine::ref().reduce(*tensor, operation::Add(), {"x"}, stash);
const Value &result_sum_all_2 = SimpleTensorEngine::ref().reduce(*tensor, operation::Add(), {"x", "y"}, stash);
const Value &result_sum_all_3 = SimpleTensorEngine::ref().reduce(*tensor, operation::Add(), {}, stash);
- EXPECT_EQUAL(Check(*expect_sum_y), Check(unwrap(result_sum_y_2)));
- EXPECT_EQUAL(Check(*expect_sum_x), Check(unwrap(result_sum_x_2)));
+ EXPECT_EQUAL(*expect_sum_y, unwrap(result_sum_y_2));
+ EXPECT_EQUAL(*expect_sum_x, unwrap(result_sum_x_2));
EXPECT_TRUE(result_sum_all_2.is_double());
EXPECT_TRUE(result_sum_all_3.is_double());
EXPECT_EQUAL(21, result_sum_all_2.as_double());
EXPECT_EQUAL(21, result_sum_all_3.as_double());
- EXPECT_TRUE(SimpleTensorEngine::ref().equal(*result_sum_y, *result_sum_y));
- EXPECT_TRUE(!SimpleTensorEngine::ref().equal(*result_sum_y, *result_sum_x));
+ EXPECT_EQUAL(*result_sum_y, *result_sum_y);
+ EXPECT_NOT_EQUAL(*result_sum_y, *result_sum_x);
}
TEST_MAIN() { TEST_RUN_ALL(); }
diff --git a/vespalib/src/tests/eval/tensor/eval_tensor_test.cpp b/vespalib/src/tests/eval/tensor/eval_tensor_test.cpp
index e381ae88cbe..9a656ad2697 100644
--- a/vespalib/src/tests/eval/tensor/eval_tensor_test.cpp
+++ b/vespalib/src/tests/eval/tensor/eval_tensor_test.cpp
@@ -64,13 +64,13 @@ TEST("require that tensor sum over dimension works") {
}
TEST("require that tensor add works") {
- EXPECT_EQUAL(Eval("{{x:1}:1,{x:2}:5,{x:3}:3}"), Eval("{{x:1}:1,{x:2}:2} + {{x:2}:3,{x:3}:3}"));
- EXPECT_EQUAL(Eval("{{x:1}:1,{x:2}:5,{x:3}:3}"), Eval("{{x:2}:3,{x:3}:3} + {{x:1}:1,{x:2}:2}"));
+ EXPECT_EQUAL(Eval("{{x:2}:5}"), Eval("{{x:1}:1,{x:2}:2} + {{x:2}:3,{x:3}:3}"));
+ EXPECT_EQUAL(Eval("{{x:2}:5}"), Eval("{{x:2}:3,{x:3}:3} + {{x:1}:1,{x:2}:2}"));
}
TEST("require that tensor sub works") {
- EXPECT_EQUAL(Eval("{{x:1}:1,{x:2}:-1,{x:3}:-3}"), Eval("{{x:1}:1,{x:2}:2} - {{x:2}:3,{x:3}:3}"));
- EXPECT_EQUAL(Eval("{{x:1}:-1,{x:2}:1,{x:3}:3}"), Eval("{{x:2}:3,{x:3}:3} - {{x:1}:1,{x:2}:2}"));
+ EXPECT_EQUAL(Eval("{{x:2}:-1}"), Eval("{{x:1}:1,{x:2}:2} - {{x:2}:3,{x:3}:3}"));
+ EXPECT_EQUAL(Eval("{{x:2}:1}"), Eval("{{x:2}:3,{x:3}:3} - {{x:1}:1,{x:2}:2}"));
}
TEST("require that tensor multiply works") {
@@ -78,13 +78,13 @@ TEST("require that tensor multiply works") {
}
TEST("require that tensor min works") {
- EXPECT_EQUAL(Eval("{{x:1}:1,{x:2}:2,{x:3}:3}"), Eval("min({{x:1}:1,{x:2}:2}, {{x:2}:3,{x:3}:3})"));
- EXPECT_EQUAL(Eval("{{x:1}:1,{x:2}:2,{x:3}:3}"), Eval("min({{x:2}:3,{x:3}:3}, {{x:1}:1,{x:2}:2})"));
+ EXPECT_EQUAL(Eval("{{x:2}:2}"), Eval("min({{x:1}:1,{x:2}:2}, {{x:2}:3,{x:3}:3})"));
+ EXPECT_EQUAL(Eval("{{x:2}:2}"), Eval("min({{x:2}:3,{x:3}:3}, {{x:1}:1,{x:2}:2})"));
}
TEST("require that tensor max works") {
- EXPECT_EQUAL(Eval("{{x:1}:1,{x:2}:3,{x:3}:3}"), Eval("max({{x:1}:1,{x:2}:2}, {{x:2}:3,{x:3}:3})"));
- EXPECT_EQUAL(Eval("{{x:1}:1,{x:2}:3,{x:3}:3}"), Eval("max({{x:2}:3,{x:3}:3}, {{x:1}:1,{x:2}:2})"));
+ EXPECT_EQUAL(Eval("{{x:2}:3}"), Eval("max({{x:1}:1,{x:2}:2}, {{x:2}:3,{x:3}:3})"));
+ EXPECT_EQUAL(Eval("{{x:2}:3}"), Eval("max({{x:2}:3,{x:3}:3}, {{x:1}:1,{x:2}:2})"));
}
TEST("require that tensor match works") {
diff --git a/vespalib/src/tests/tensor/dense_tensor_builder/dense_tensor_builder_test.cpp b/vespalib/src/tests/tensor/dense_tensor_builder/dense_tensor_builder_test.cpp
index 8478d46e1f4..595b3743625 100644
--- a/vespalib/src/tests/tensor/dense_tensor_builder/dense_tensor_builder_test.cpp
+++ b/vespalib/src/tests/tensor/dense_tensor_builder/dense_tensor_builder_test.cpp
@@ -4,11 +4,11 @@
#include <vespa/vespalib/testkit/test_kit.h>
#include <vespa/vespalib/tensor/dense/dense_tensor_builder.h>
#include <vespa/vespalib/util/exceptions.h>
-#include <algorithm>
using namespace vespalib::tensor;
using vespalib::IllegalArgumentException;
using Builder = DenseTensorBuilder;
+using vespalib::eval::TensorSpec;
void
assertTensor(const DenseTensor::DimensionsMeta &expDims,
@@ -20,33 +20,71 @@ assertTensor(const DenseTensor::DimensionsMeta &expDims,
EXPECT_EQUAL(expCells, realTensor.cells());
}
+void
+assertTensorSpec(const TensorSpec &expSpec, const Tensor &tensor)
+{
+ TensorSpec actSpec = tensor.toSpec();
+ EXPECT_EQUAL(expSpec, actSpec);
+}
+
struct Fixture
{
Builder builder;
};
+Tensor::UP
+build1DTensor(Builder &builder)
+{
+ Builder::Dimension dimX = builder.defineDimension("x", 3);
+ builder.addLabel(dimX, 0).addCell(10).
+ addLabel(dimX, 1).addCell(11).
+ addLabel(dimX, 2).addCell(12);
+ return builder.build();
+}
+
TEST_F("require that 1d tensor can be constructed", Fixture)
{
- Builder::Dimension dimX = f.builder.defineDimension("x", 3);
- f.builder.addLabel(dimX, 0).addCell(10).
- addLabel(dimX, 1).addCell(11).
- addLabel(dimX, 2).addCell(12);
- assertTensor({{"x",3}}, {10,11,12},
- *f.builder.build());
+ assertTensor({{"x",3}}, {10,11,12}, *build1DTensor(f.builder));
+}
+
+TEST_F("require that 1d tensor can be converted to tensor spec", Fixture)
+{
+ assertTensorSpec(TensorSpec("tensor(x[3])").
+ add({{"x", 0}}, 10).
+ add({{"x", 1}}, 11).
+ add({{"x", 2}}, 12),
+ *build1DTensor(f.builder));
+}
+
+Tensor::UP
+build2DTensor(Builder &builder)
+{
+ Builder::Dimension dimX = builder.defineDimension("x", 3);
+ Builder::Dimension dimY = builder.defineDimension("y", 2);
+ builder.addLabel(dimX, 0).addLabel(dimY, 0).addCell(10).
+ addLabel(dimX, 0).addLabel(dimY, 1).addCell(11).
+ addLabel(dimX, 1).addLabel(dimY, 0).addCell(12).
+ addLabel(dimX, 1).addLabel(dimY, 1).addCell(13).
+ addLabel(dimX, 2).addLabel(dimY, 0).addCell(14).
+ addLabel(dimX, 2).addLabel(dimY, 1).addCell(15);
+ return builder.build();
}
TEST_F("require that 2d tensor can be constructed", Fixture)
{
- Builder::Dimension dimX = f.builder.defineDimension("x", 3);
- Builder::Dimension dimY = f.builder.defineDimension("y", 2);
- f.builder.addLabel(dimX, 0).addLabel(dimY, 0).addCell(10).
- addLabel(dimX, 0).addLabel(dimY, 1).addCell(11).
- addLabel(dimX, 1).addLabel(dimY, 0).addCell(12).
- addLabel(dimX, 1).addLabel(dimY, 1).addCell(13).
- addLabel(dimX, 2).addLabel(dimY, 0).addCell(14).
- addLabel(dimX, 2).addLabel(dimY, 1).addCell(15);
- assertTensor({{"x",3},{"y",2}}, {10,11,12,13,14,15},
- *f.builder.build());
+ assertTensor({{"x",3},{"y",2}}, {10,11,12,13,14,15}, *build2DTensor(f.builder));
+}
+
+TEST_F("require that 2d tensor can be converted to tensor spec", Fixture)
+{
+ assertTensorSpec(TensorSpec("tensor(x[3],y[2])").
+ add({{"x", 0},{"y", 0}}, 10).
+ add({{"x", 0},{"y", 1}}, 11).
+ add({{"x", 1},{"y", 0}}, 12).
+ add({{"x", 1},{"y", 1}}, 13).
+ add({{"x", 2},{"y", 0}}, 14).
+ add({{"x", 2},{"y", 1}}, 15),
+ *build2DTensor(f.builder));
}
TEST_F("require that 3d tensor can be constructed", Fixture)
@@ -189,7 +227,6 @@ TEST_F("require that already specified label throws exception", Fixture)
"Label for dimension 'x' is already specified with value '0'");
}
-
TEST_F("require that dimensions are sorted", Fixture)
{
Builder::Dimension dimY = f.builder.defineDimension("y", 3);
@@ -205,4 +242,9 @@ TEST_F("require that dimensions are sorted", Fixture)
EXPECT_EQUAL("tensor(x[5],y[3])", denseTensor.getType().to_spec());
}
+
+
+
+
+
TEST_MAIN() { TEST_RUN_ALL(); }
diff --git a/vespalib/src/tests/tensor/join_tensor_addresses/.gitignore b/vespalib/src/tests/tensor/join_tensor_addresses/.gitignore
deleted file mode 100644
index bcf856a9f59..00000000000
--- a/vespalib/src/tests/tensor/join_tensor_addresses/.gitignore
+++ /dev/null
@@ -1 +0,0 @@
-vespalib_join_tensor_addresses_test_app
diff --git a/vespalib/src/tests/tensor/join_tensor_addresses/CMakeLists.txt b/vespalib/src/tests/tensor/join_tensor_addresses/CMakeLists.txt
deleted file mode 100644
index 6923cbc1133..00000000000
--- a/vespalib/src/tests/tensor/join_tensor_addresses/CMakeLists.txt
+++ /dev/null
@@ -1,9 +0,0 @@
-# Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-vespa_add_executable(vespalib_join_tensor_addresses_test_app TEST
- SOURCES
- join_tensor_addresses_test.cpp
- DEPENDS
- vespalib
- vespalib_vespalib_tensor
-)
-vespa_add_test(NAME vespalib_join_tensor_addresses_test_app COMMAND vespalib_join_tensor_addresses_test_app)
diff --git a/vespalib/src/tests/tensor/join_tensor_addresses/FILES b/vespalib/src/tests/tensor/join_tensor_addresses/FILES
deleted file mode 100644
index ad4ab2f6d87..00000000000
--- a/vespalib/src/tests/tensor/join_tensor_addresses/FILES
+++ /dev/null
@@ -1 +0,0 @@
-join_tensor_addresses_test.cpp
diff --git a/vespalib/src/tests/tensor/join_tensor_addresses/join_tensor_addresses_test.cpp b/vespalib/src/tests/tensor/join_tensor_addresses/join_tensor_addresses_test.cpp
deleted file mode 100644
index db1e04d792a..00000000000
--- a/vespalib/src/tests/tensor/join_tensor_addresses/join_tensor_addresses_test.cpp
+++ /dev/null
@@ -1,317 +0,0 @@
-// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-
-#include <vespa/vespalib/testkit/test_kit.h>
-#include <vespa/vespalib/stllike/hash_set.h>
-#include <vespa/vespalib/tensor/tensor_address.h>
-#include <vespa/vespalib/tensor/tensor_address_builder.h>
-#include <vespa/vespalib/tensor/sparse/compact_tensor_address.h>
-#include <vespa/vespalib/tensor/sparse/compact_tensor_address_builder.h>
-#include <vespa/vespalib/tensor/tensor_address_element_iterator.h>
-#include <vespa/vespalib/tensor/dimensions_vector_iterator.h>
-#include <vespa/vespalib/tensor/join_tensor_addresses.h>
-
-using namespace vespalib::tensor;
-
-using TensorAddressMap = std::map<std::string, std::string>;
-using TensorAddressElementVec =
- std::vector<std::pair<std::string, std::string>>;
-
-namespace vespalib
-{
-
-std::ostream &
-operator<<(std::ostream &out, const TensorAddressElementVec &vec)
-{
- out << "{";
- bool first = true;
- for (const auto &elem : vec) {
- if (!first) {
- out << ",";
- }
- out << "{\"" << elem.first << "\",\"" << elem.second << "\"}";
- first = false;
- }
- out << "}";
- return out;
-};
-
-}
-
-
-class DummyAddressBuilder
-{
- TensorAddressElementVec _elements;
-public:
- void add(vespalib::stringref dimension, vespalib::stringref label)
- {
- _elements.emplace_back(dimension, label);
- }
-
- const TensorAddressElementVec &elements() const { return _elements; }
- void clear() { }
-};
-
-
-template <class TensorAddressT> struct FixtureBase;
-
-template <> struct FixtureBase<TensorAddress>
-{
- using AddressType = TensorAddress;
- using AddressBuilderType = TensorAddressBuilder;
-
- static TensorAddress create(TensorAddressBuilder &builder) {
- return builder.build();
- }
-};
-
-
-template <> struct FixtureBase<CompactTensorAddress>
-{
- using AddressType = CompactTensorAddress;
- using AddressBuilderType = CompactTensorAddressBuilder;
-
- vespalib::Stash _stash;
-
- CompactTensorAddress
- create(CompactTensorAddressBuilder &builder)
- {
- CompactTensorAddressRef oldRef = builder.getAddressRef();
- CompactTensorAddressRef newRef(oldRef, _stash);
- CompactTensorAddress ret;
- ret.deserializeFromSparseAddressRef(newRef);
- return ret;
- }
-};
-
-template <> struct FixtureBase<CompactTensorAddressRef>
-{
- using AddressType = CompactTensorAddressRef;
- using AddressBuilderType = CompactTensorAddressBuilder;
-
- vespalib::Stash _stash;
-
- CompactTensorAddressRef
- create(CompactTensorAddressBuilder &builder)
- {
- CompactTensorAddressRef oldRef = builder.getAddressRef();
- CompactTensorAddressRef newRef(oldRef, _stash);
- return newRef;
- }
-};
-
-template <class TensorAddressT> struct Fixture
- : public FixtureBase<TensorAddressT>
-{
- using Parent = FixtureBase<TensorAddressT>;
- using AddressType = typename Parent::AddressType;
- using AddressBuilderType = typename Parent::AddressBuilderType;
- using Parent::create;
-
- AddressType
- create(const TensorAddressMap &address_in) {
- AddressBuilderType builder;
- for (auto &element : address_in) {
- builder.add(element.first, element.second);
- }
- return create(builder);
- }
-
- void
- verifyJoin3Way(bool exp,
- const TensorAddressElementVec &expVec,
- const DimensionsVector &commonDimensions,
- const TensorAddressMap &lhsAddress_in,
- const TensorAddressMap &rhsAddress_in)
- {
- AddressType expAddress = create(lhsAddress_in);
- AddressType lhsAddress = create(lhsAddress_in);
- AddressType rhsAddress = create(rhsAddress_in);
- DummyAddressBuilder builder;
- bool act = joinTensorAddresses<DummyAddressBuilder,
- AddressType, AddressType>
- (builder, commonDimensions, lhsAddress, rhsAddress);
- EXPECT_EQUAL(exp, act);
- if (exp) {
- EXPECT_EQUAL(expVec, builder.elements());
- }
- }
-
- void
- verifyJoin2Way(bool exp,
- const TensorAddressElementVec &expVec,
- const DimensionsSet &commonDimensions,
- const TensorAddressMap &lhsAddress_in,
- const TensorAddressMap &rhsAddress_in)
- {
- AddressType expAddress = create(lhsAddress_in);
- AddressType lhsAddress = create(lhsAddress_in);
- AddressType rhsAddress = create(rhsAddress_in);
- DummyAddressBuilder builder;
- bool act = joinTensorAddresses<DummyAddressBuilder,
- AddressType, AddressType>
- (builder, commonDimensions, lhsAddress, rhsAddress);
- EXPECT_EQUAL(exp, act);
- if (exp) {
- EXPECT_EQUAL(expVec, builder.elements());
- }
- }
-
- void
- verifyJoin(bool exp,
- const TensorAddressElementVec &expVec,
- const DimensionsVector &commonDimensions,
- const TensorAddressMap &lhsAddress,
- const TensorAddressMap &rhsAddress)
- {
- TEST_DO(verifyJoin3Way(exp, expVec, commonDimensions,
- lhsAddress, rhsAddress));
- DimensionsSet commonDimensionsSet(commonDimensions.begin(),
- commonDimensions.end());
- TEST_DO(verifyJoin2Way(exp, expVec, commonDimensionsSet,
- lhsAddress, rhsAddress));
- }
-
- void
- verifyJoin(const TensorAddressElementVec &expVec,
- const DimensionsVector &commonDimensions,
- const TensorAddressMap &lhsAddress,
- const TensorAddressMap &rhsAddress)
- {
- verifyJoin(true, expVec, commonDimensions, lhsAddress, rhsAddress);
- }
-
- void
- verifyJoinFailure(const DimensionsVector &commonDimensions,
- const TensorAddressMap &lhsAddress,
- const TensorAddressMap &rhsAddress)
- {
- verifyJoin(false, {}, commonDimensions, lhsAddress, rhsAddress);
- }
-
- void
- verifyJoinFailureOnLabelMisMatch()
- {
- TEST_DO(verifyJoinFailure({"x", "y"},
- {{"x", "1"}, {"y", "2"}},
- {{"x", "1"}, {"y", "3"}}));
- TEST_DO(verifyJoinFailure({"x", "y"},
- {{"x", "1"}, {"y", "2"}},
- {{"x", "2"}, {"y", "2"}}));
- TEST_DO(verifyJoinFailure({"y"},
- {{"x", "1"}, {"y", "2"}},
- {{"y", "1"}, {"z", "3"}}));
- TEST_DO(verifyJoinFailure({"y"},
- {{"y", "2"}, {"z", "3"}},
- {{"x", "1"}, {"y", "1"}}));
- }
-
- void
- verityJoinFailureOnMissingDimension()
- {
- TEST_DO(verifyJoinFailure({"x", "y"},
- {{"y", "2"}},
- {{"x", "2"}, {"y", "2"}}));
- TEST_DO(verifyJoinFailure({"x", "y"},
- {{"x", "1"}, {"y", "2"}},
- {{"y", "2"}}));
- TEST_DO(verifyJoinFailure({"x", "y"},
- {{"x", "1"}},
- {{"x", "2"}, {"y", "2"}}));
- TEST_DO(verifyJoinFailure({"x", "y"},
- {{"x", "1"}, {"y", "2"}},
- {{"x", "2"}}));
- TEST_DO(verifyJoinFailure({"x", "y", "z"},
- {{"x", "1"}, {"z", "3"}},
- {{"x", "2"}, {"y", "2"}, {"z", "3"}}));
- TEST_DO(verifyJoinFailure({"x", "y", "z"},
- {{"x", "2"}, {"y", "2"}, {"z", "3"}},
- {{"x", "1"}, {"z", "3"}}));
- }
-
- void
- verifyJoinSuccessOnDisjunctDimensions()
- {
- TEST_DO(verifyJoin({}, {}, {}, {}));
- TEST_DO(verifyJoin({{"x", "1"}, {"y", "2"}, {"z", "3"}, {"zz", "4"}},
- {},
- {{"x", "1"}, {"y", "2"}},
- {{"z", "3"}, {"zz", "4"}}));
- TEST_DO(verifyJoin({{"x", "1"}, {"y", "2"}, {"z", "3"}, {"zz", "4"}},
- {},
- {{"z", "3"}, {"zz", "4"}},
- {{"x", "1"}, {"y", "2"}}));
- TEST_DO(verifyJoin({{"x", "1"}, {"y", "2"}, {"z", "3"}, {"zz", "4"}},
- {},
- {{"x", "1"}, {"z", "3"}},
- {{"y", "2"}, {"zz", "4"}}));
- TEST_DO(verifyJoin({{"x", "1"}, {"y", "2"}},
- {},
- {{"x", "1"}, {"y", "2"}},
- {}));
- TEST_DO(verifyJoin({{"x", "1"}, {"y", "2"}},
- {},
- {},
- {{"x", "1"}, {"y", "2"}}));
- TEST_DO(verifyJoin({{"x", "1"}, {"z", "3"}}, {"y"},
- {{"x", "1"}},
- {{"z", "3"}}));
- TEST_DO(verifyJoin( {{"x", "1"}, {"z", "3"}}, {"y"},
- {{"z", "3"}},
- {{"x", "1"}}));
- }
-
- void
- verifyJoinSuccessOnOverlappingDimensions()
- {
- TEST_DO(verifyJoin({{"x", "1"}}, {"x"},
- {{"x", "1"}}, {{"x", "1"}}));
- TEST_DO(verifyJoin({{"x", "1"}, {"y", "2"}, {"z", "3"}},
- {"x", "z"},
- {{"x", "1"}, {"y", "2"}, {"z", "3"}},
- {{"x", "1"}, {"z", "3"}}));
- TEST_DO(verifyJoin({{"x", "1"}, {"y", "2"}, {"z", "3"}},
- {"x", "z"},
- {{"x", "1"}, {"y", "2"}, {"z", "3"}},
- {{"x", "1"}, {"z", "3"}}));
- TEST_DO(verifyJoin( {{"x", "1"}, {"y", "2"}}, {"x", "y"},
- {{"x", "1"}, {"y", "2"}},
- {{"x", "1"}, {"y", "2"}}));
- TEST_DO(verifyJoin({{"x", "1"}, {"y", "2"}, {"z", "3"}}, {"y"},
- {{"x", "1"}, {"y", "2"}},
- {{"y", "2"}, {"z", "3"}}));
- TEST_DO(verifyJoin({{"x", "1"}, {"y", "2"}, {"z", "3"}}, {"y"},
- {{"y", "2"}, {"z", "3"}},
- {{"x", "1"}, {"y", "2"}}));
- }
-
- void
- verifyJoin()
- {
- verifyJoinSuccessOnDisjunctDimensions();
- verifyJoinSuccessOnOverlappingDimensions();
- verifyJoinFailureOnLabelMisMatch();
- verityJoinFailureOnMissingDimension();
- }
-
-};
-
-
-TEST_F("Test that Tensor address can be joined", Fixture<TensorAddress>)
-{
- f.verifyJoin();
-}
-
-TEST_F("Test that compact Tensor address can be joined",
- Fixture<CompactTensorAddress>)
-{
- f.verifyJoin();
-}
-
-
-TEST_F("Test that compact Tensor address ref can be joined",
- Fixture<CompactTensorAddressRef>)
-{
- f.verifyJoin();
-}
-
-TEST_MAIN() { TEST_RUN_ALL(); }
diff --git a/vespalib/src/tests/tensor/sparse_tensor_builder/sparse_tensor_builder_test.cpp b/vespalib/src/tests/tensor/sparse_tensor_builder/sparse_tensor_builder_test.cpp
index 69d8a7c3062..39e82abec7d 100644
--- a/vespalib/src/tests/tensor/sparse_tensor_builder/sparse_tensor_builder_test.cpp
+++ b/vespalib/src/tests/tensor/sparse_tensor_builder/sparse_tensor_builder_test.cpp
@@ -2,9 +2,10 @@
#include <vespa/vespalib/testkit/test_kit.h>
#include <vespa/vespalib/tensor/sparse/sparse_tensor_builder.h>
+#include <vespa/vespalib/test/insertion_operators.h>
using namespace vespalib::tensor;
-
+using vespalib::eval::TensorSpec;
void
assertCellValue(double expValue, const TensorAddress &address,
@@ -27,13 +28,14 @@ assertCellValue(double expValue, const TensorAddress &address,
addressBuilder.add("");
++dimsItr;
}
- CompactTensorAddressRef addressRef(addressBuilder.getAddressRef());
+ SparseTensorAddressRef addressRef(addressBuilder.getAddressRef());
auto itr = cells.find(addressRef);
EXPECT_FALSE(itr == cells.end());
EXPECT_EQUAL(expValue, itr->second);
}
-TEST("require that tensor can be constructed")
+Tensor::UP
+buildTensor()
{
SparseTensorBuilder builder;
builder.define_dimension("c");
@@ -44,7 +46,12 @@ TEST("require that tensor can be constructed")
add_label(builder.define_dimension("b"), "2").add_cell(10).
add_label(builder.define_dimension("c"), "3").
add_label(builder.define_dimension("d"), "4").add_cell(20);
- Tensor::UP tensor = builder.build();
+ return builder.build();
+}
+
+TEST("require that tensor can be constructed")
+{
+ Tensor::UP tensor = buildTensor();
const SparseTensor &sparseTensor = dynamic_cast<const SparseTensor &>(*tensor);
const TensorDimensions &dimensions = sparseTensor.dimensions();
const SparseTensor::Cells &cells = sparseTensor.cells();
@@ -55,6 +62,16 @@ TEST("require that tensor can be constructed")
dimensions, cells);
}
+TEST("require that tensor can be converted to tensor spec")
+{
+ Tensor::UP tensor = buildTensor();
+ TensorSpec expSpec("tensor(a{},b{},c{},d{})");
+ expSpec.add({{"a", "1"}, {"b", "2"}}, 10).
+ add({{"c", "3"}, {"d", "4"}}, 20);
+ TensorSpec actSpec = tensor->toSpec();
+ EXPECT_EQUAL(expSpec, actSpec);
+}
+
TEST("require that dimensions are extracted")
{
SparseTensorBuilder builder;
diff --git a/vespalib/src/tests/tensor/tensor_address_element_iterator/.gitignore b/vespalib/src/tests/tensor/tensor_address_element_iterator/.gitignore
deleted file mode 100644
index c28cf0c86f2..00000000000
--- a/vespalib/src/tests/tensor/tensor_address_element_iterator/.gitignore
+++ /dev/null
@@ -1 +0,0 @@
-vespalib_tensor_address_element_iterator_test_app
diff --git a/vespalib/src/tests/tensor/tensor_address_element_iterator/CMakeLists.txt b/vespalib/src/tests/tensor/tensor_address_element_iterator/CMakeLists.txt
deleted file mode 100644
index dad69af7ba3..00000000000
--- a/vespalib/src/tests/tensor/tensor_address_element_iterator/CMakeLists.txt
+++ /dev/null
@@ -1,9 +0,0 @@
-# Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-vespa_add_executable(vespalib_tensor_address_element_iterator_test_app TEST
- SOURCES
- tensor_address_element_iterator_test.cpp
- DEPENDS
- vespalib
- vespalib_vespalib_tensor
-)
-vespa_add_test(NAME vespalib_tensor_address_element_iterator_test_app COMMAND vespalib_tensor_address_element_iterator_test_app)
diff --git a/vespalib/src/tests/tensor/tensor_address_element_iterator/FILES b/vespalib/src/tests/tensor/tensor_address_element_iterator/FILES
deleted file mode 100644
index b185a25973e..00000000000
--- a/vespalib/src/tests/tensor/tensor_address_element_iterator/FILES
+++ /dev/null
@@ -1 +0,0 @@
-tensor_address_element_iterator_test.cpp
diff --git a/vespalib/src/tests/tensor/tensor_address_element_iterator/tensor_address_element_iterator_test.cpp b/vespalib/src/tests/tensor/tensor_address_element_iterator/tensor_address_element_iterator_test.cpp
deleted file mode 100644
index 4e953048f67..00000000000
--- a/vespalib/src/tests/tensor/tensor_address_element_iterator/tensor_address_element_iterator_test.cpp
+++ /dev/null
@@ -1,343 +0,0 @@
-// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-
-#include <vespa/vespalib/testkit/test_kit.h>
-#include <vespa/vespalib/stllike/hash_set.h>
-#include <vespa/vespalib/tensor/tensor_address.h>
-#include <vespa/vespalib/tensor/tensor_address_builder.h>
-#include <vespa/vespalib/tensor/sparse/compact_tensor_address.h>
-#include <vespa/vespalib/tensor/sparse/compact_tensor_address_builder.h>
-#include <vespa/vespalib/tensor/tensor_address_element_iterator.h>
-
-using namespace vespalib::tensor;
-
-using TensorAddressMap = std::map<std::string, std::string>;
-using TensorAddressElementVec =
- std::vector<std::pair<std::string, std::string>>;
-
-namespace vespalib
-{
-
-std::ostream &
-operator<<(std::ostream &out, const TensorAddressElementVec &vec)
-{
- out << "{";
- bool first = true;
- for (const auto &elem : vec) {
- if (!first) {
- out << ",";
- }
- out << "{\"" << elem.first << "\",\"" << elem.second << "\"}";
- first = false;
- }
- out << "}";
- return out;
-};
-
-}
-
-
-class DummyAddressBuilder
-{
- TensorAddressElementVec _elements;
-public:
- void add(vespalib::stringref dimension, vespalib::stringref label)
- {
- _elements.emplace_back(dimension, label);
- }
-
- const TensorAddressElementVec &elements() const { return _elements; }
-};
-
-
-template <class TensorAddressT> struct FixtureBase;
-
-template <> struct FixtureBase<TensorAddress>
-{
- using AddressType = TensorAddress;
- using AddressBuilderType = TensorAddressBuilder;
-
- static TensorAddress create(TensorAddressBuilder &builder) {
- return builder.build();
- }
-};
-
-
-template <> struct FixtureBase<CompactTensorAddress>
-{
- using AddressType = CompactTensorAddress;
- using AddressBuilderType = CompactTensorAddressBuilder;
-
- vespalib::Stash _stash;
-
- CompactTensorAddress
- create(CompactTensorAddressBuilder &builder)
- {
- CompactTensorAddressRef oldRef = builder.getAddressRef();
- CompactTensorAddressRef newRef(oldRef, _stash);
- CompactTensorAddress ret;
- ret.deserializeFromSparseAddressRef(newRef);
- return ret;
- }
-};
-
-template <> struct FixtureBase<CompactTensorAddressRef>
-{
- using AddressType = CompactTensorAddressRef;
- using AddressBuilderType = CompactTensorAddressBuilder;
-
- vespalib::Stash _stash;
-
- CompactTensorAddressRef
- create(CompactTensorAddressBuilder &builder)
- {
- CompactTensorAddressRef oldRef = builder.getAddressRef();
- CompactTensorAddressRef newRef(oldRef, _stash);
- return newRef;
- }
-};
-
-template <class TensorAddressT> struct Fixture
- : public FixtureBase<TensorAddressT>
-{
- using Parent = FixtureBase<TensorAddressT>;
- using AddressType = typename Parent::AddressType;
- using AddressBuilderType = typename Parent::AddressBuilderType;
- using Parent::create;
-
- AddressType
- create(const TensorAddressMap &address_in) {
- AddressBuilderType builder;
- for (auto &element : address_in) {
- builder.add(element.first, element.second);
- }
- return create(builder);
- }
-
- void
- verifyPlainIterate(const TensorAddressMap &address_in)
- {
- AddressType address = create(address_in);
- TensorAddressElementIterator<AddressType> itr(address);
- for (auto &element : address_in) {
- EXPECT_TRUE(itr.valid());
- EXPECT_EQUAL(element.first, itr.dimension());
- EXPECT_EQUAL(element.second, itr.label());
- itr.next();
- }
- EXPECT_FALSE(itr.valid());
- }
-
-
- void
- verifyPlainIterate()
- {
- TEST_DO(verifyPlainIterate({}));
- TEST_DO(verifyPlainIterate({{"a", "1"}}));
- TEST_DO(verifyPlainIterate({{"a", "1"}, {"b", "2"}}));
- }
-
- void
- verifyBeforeDimension(const TensorAddressMap &lhsAddress_in,
- const TensorAddressMap &rhsAddress_in,
- bool exp)
- {
- AddressType lhsAddress = create(lhsAddress_in);
- TensorAddressElementIterator<AddressType> lhsItr(lhsAddress);
- AddressType rhsAddress = create(rhsAddress_in);
- TensorAddressElementIterator<AddressType> rhsItr(rhsAddress);
- EXPECT_EQUAL(exp, lhsItr.beforeDimension(rhsItr));
- }
-
- void
- verifyBeforeDimension() {
- TEST_DO(verifyBeforeDimension({}, {}, false));
- TEST_DO(verifyBeforeDimension({}, {{"x", "1"}}, false));
- TEST_DO(verifyBeforeDimension({{"x", "1"}}, {}, true));
- TEST_DO(verifyBeforeDimension({{"x", "1"}}, {{"x", "2"}}, false));
- TEST_DO(verifyBeforeDimension({{"x", "1"}}, {{"y", "2"}}, true));
- TEST_DO(verifyBeforeDimension({{"y", "1"}}, {{"x", "2"}}, false));
- }
-
- void
- verifyAtDimension(const TensorAddressMap &address_in,
- vespalib::stringref dimension,
- bool exp)
- {
- AddressType address = create(address_in);
- TensorAddressElementIterator<AddressType> itr(address);
- EXPECT_EQUAL(exp, itr.atDimension(dimension));
- }
-
- void
- verifyAtDimension()
- {
- TEST_DO(verifyAtDimension({}, "x", false));
- TEST_DO(verifyAtDimension({{"x", "1"}}, "x", true));
- TEST_DO(verifyAtDimension({{"x", "1"}}, "y", false));
- TEST_DO(verifyAtDimension({{"y", "1"}}, "x", false));
- TEST_DO(verifyAtDimension({{"y", "1"}}, "y", true));
- }
-
- void
- verifyAddElements(const TensorAddressMap &lhsAddress_in,
- const TensorAddressMap &rhsAddress_in,
- const TensorAddressElementVec &exp)
- {
- AddressType lhsAddress = create(lhsAddress_in);
- TensorAddressElementIterator<AddressType> lhsItr(lhsAddress);
- AddressType rhsAddress = create(rhsAddress_in);
- TensorAddressElementIterator<AddressType> rhsItr(rhsAddress);
- DummyAddressBuilder builder;
- lhsItr.addElements(builder, rhsItr);
- EXPECT_EQUAL(exp, builder.elements());
- }
-
- void verifyAddElements(const TensorAddressMap &address_in,
- const TensorAddressElementVec &exp)
- {
- AddressType address = create(address_in);
- TensorAddressElementIterator<AddressType> itr(address);
- DummyAddressBuilder builder;
- itr.addElements(builder);
- EXPECT_EQUAL(exp, builder.elements());
- }
-
- void verifyAddElements(const TensorAddressMap &address_in,
- const DimensionsSet &dimensions,
- bool exp,
- const TensorAddressElementVec &expVec)
- {
- AddressType address = create(address_in);
- TensorAddressElementIterator<AddressType> itr(address);
- DummyAddressBuilder builder;
- EXPECT_EQUAL(exp, itr.addElements(builder, dimensions));
- EXPECT_EQUAL(expVec, builder.elements());
- }
-
- void verifyAddElements(const TensorAddressMap &lhsAddress_in,
- const TensorAddressMap &rhsAddress_in,
- const DimensionsSet &dimensions,
- bool exp,
- const TensorAddressElementVec &expVec)
- {
- AddressType lhsAddress = create(lhsAddress_in);
- TensorAddressElementIterator<AddressType> lhsItr(lhsAddress);
- AddressType rhsAddress = create(rhsAddress_in);
- TensorAddressElementIterator<AddressType> rhsItr(rhsAddress);
- DummyAddressBuilder builder;
- ASSERT_TRUE(lhsItr.beforeDimension(rhsItr));
- EXPECT_EQUAL(exp, lhsItr.addElements(builder, dimensions, rhsItr));
- EXPECT_EQUAL(expVec, builder.elements());
- }
-
- void
- verifyAddElements()
- {
- // Stop according to rhs iterator
- TEST_DO(verifyAddElements({}, {}, {}));
- TEST_DO(verifyAddElements({{"x", "1"}}, {}, {{"x", "1"}}));
- TEST_DO(verifyAddElements({{"x", "1"}}, {{"x", "1"}}, {}));
- TEST_DO(verifyAddElements({{"x", "1"}}, {{"y", "1"}}, {{"x", "1"}}));
- TEST_DO(verifyAddElements({{"y", "1"}}, {{"x", "1"}}, {}));
- TEST_DO(verifyAddElements({{"x", "1"}, {"y", "2"}}, {{"z", "1"}},
- {{"x", "1"}, {"y", "2"}}));
- // Pass through everything
- TEST_DO(verifyAddElements({}, {}));
- TEST_DO(verifyAddElements({{"x", "1"}}, {{"x", "1"}}));
- TEST_DO(verifyAddElements({{"x", "1"}, {"y", "2"}},
- {{"x", "1"}, {"y", "2"}}));
- // Filter on dimension set
- TEST_DO(verifyAddElements({}, {}, true, {}));
- TEST_DO(verifyAddElements({{"x", "1"}}, {}, true, {{"x", "1"}}));
- TEST_DO(verifyAddElements({{"x", "1"}, {"y", "2"}}, {}, true,
- {{"x", "1"}, {"y", "2"}}));
- TEST_DO(verifyAddElements({{"x", "1"}, {"y", "2"}}, {"y"}, false,
- {{"x", "1"}}));
- // Filter on dimension set and stop according to rhs iterator
- TEST_DO(verifyAddElements({{"x", "1"}}, {}, {}, true, {{"x", "1"}}));
- TEST_DO(verifyAddElements({{"x", "1"}, {"y", "2"}}, {}, {}, true,
- {{"x", "1"}, {"y", "2"}}));
- TEST_DO(verifyAddElements({{"x", "1"}, {"y", "2"}}, {{"y", "2"}}, {},
- true, {{"x", "1"}}));
- TEST_DO(verifyAddElements({{"x", "1"}, {"y", "2"}}, {{"y", "2"}}, {"y"},
- true, {{"x", "1"}}));
- TEST_DO(verifyAddElements({{"x", "1"}, {"y", "2"}}, {{"y", "2"}}, {"x"},
- false, {}));
- }
-};
-
-
-TEST_F("Test that Tensor address can be iterated", Fixture<TensorAddress>)
-{
- f.verifyPlainIterate();
-}
-
-TEST_F("Test that compact Tensor address can be iterated",
- Fixture<CompactTensorAddress>)
-{
- f.verifyPlainIterate();
-}
-
-
-TEST_F("Test that compact Tensor address ref can be iterated",
- Fixture<CompactTensorAddressRef>)
-{
- f.verifyPlainIterate();
-}
-
-TEST_F("Test that Tensor address works with beforeDimension",
- Fixture<TensorAddress>)
-{
- f.verifyBeforeDimension();
-}
-
-TEST_F("Test that compact Tensor address works with beforeDimension",
- Fixture<CompactTensorAddress>)
-{
- f.verifyBeforeDimension();
-}
-
-TEST_F("Test that compat Tensor address ref works with beforeDimension",
- Fixture<CompactTensorAddressRef>)
-{
- f.verifyBeforeDimension();
-}
-
-TEST_F("Test that Tensor address works with atDimension",
- Fixture<TensorAddress>)
-{
- f.verifyAtDimension();
-}
-
-TEST_F("Test that compact Tensor address works with atDimension",
- Fixture<CompactTensorAddress>)
-{
- f.verifyAtDimension();
-}
-
-TEST_F("Test that compat Tensor address ref works with atDimension",
- Fixture<CompactTensorAddressRef>)
-{
- f.verifyAtDimension();
-}
-
-TEST_F("Test that Tensor address works with addElements",
- Fixture<TensorAddress>)
-{
- f.verifyAddElements();
-}
-
-TEST_F("Test that compact Tensor address works with addElements",
- Fixture<CompactTensorAddress>)
-{
- f.verifyAddElements();
-}
-
-TEST_F("Test that compat Tensor address ref works with addElements",
- Fixture<CompactTensorAddressRef>)
-{
- f.verifyAddElements();
-}
-
-
-TEST_MAIN() { TEST_RUN_ALL(); }
diff --git a/vespalib/src/tests/tensor/tensor_conformance/tensor_conformance_test.cpp b/vespalib/src/tests/tensor/tensor_conformance/tensor_conformance_test.cpp
index 1f96b64d170..238d0604ee7 100644
--- a/vespalib/src/tests/tensor/tensor_conformance/tensor_conformance_test.cpp
+++ b/vespalib/src/tests/tensor/tensor_conformance/tensor_conformance_test.cpp
@@ -16,8 +16,4 @@ IGNORE_TEST("require that production tensor implementation passes non-mixed conf
TEST_DO(TensorConformance::run_tests(DefaultTensorEngine::ref(), false));
}
-IGNORE_TEST("require that production tensor implementation passes all conformance tests") {
- TEST_DO(TensorConformance::run_tests(DefaultTensorEngine::ref(), true));
-}
-
TEST_MAIN() { TEST_RUN_ALL(); }
diff --git a/vespalib/src/tests/tensor/tensor_operations/tensor_operations_test.cpp b/vespalib/src/tests/tensor/tensor_operations/tensor_operations_test.cpp
index a87c6555e84..5ad26e979c5 100644
--- a/vespalib/src/tests/tensor/tensor_operations/tensor_operations_test.cpp
+++ b/vespalib/src/tests/tensor/tensor_operations/tensor_operations_test.cpp
@@ -120,6 +120,10 @@ struct Fixture
void assertAdd(const TensorCells &exp, const TensorCells &lhs, const TensorCells &rhs, bool check_types = true) {
assertAddImpl(*createTensor(exp), *createTensor(lhs), *createTensor(rhs), check_types);
}
+ void assertAdd(const TensorCells &exp, const TensorDimensions &expDimensions,
+ const TensorCells &lhs, const TensorCells &rhs, bool check_types = true) {
+ assertAddImpl(*createTensor(exp, expDimensions), *createTensor(lhs), *createTensor(rhs), check_types);
+ }
void assertSubtractImpl(const Tensor &exp, const Tensor &lhs, const Tensor &rhs, bool check_types) {
MyInput input;
function::Node_UP ir = function::subtract(function::input(lhs.getType(), input.add(lhs)),
@@ -129,6 +133,9 @@ struct Fixture
void assertSubtract(const TensorCells &exp, const TensorCells &lhs, const TensorCells &rhs, bool check_types = true) {
assertSubtractImpl(*createTensor(exp), *createTensor(lhs), *createTensor(rhs), check_types);
}
+ void assertSubtract(const TensorCells &exp, const TensorDimensions &expDimensions, const TensorCells &lhs, const TensorCells &rhs, bool check_types = true) {
+ assertSubtractImpl(*createTensor(exp, expDimensions), *createTensor(lhs), *createTensor(rhs), check_types);
+ }
void assertMinImpl(const Tensor &exp, const Tensor &lhs, const Tensor &rhs, bool check_types) {
MyInput input;
function::Node_UP ir = function::min(function::input(lhs.getType(), input.add(lhs)),
@@ -138,6 +145,9 @@ struct Fixture
void assertMin(const TensorCells &exp, const TensorCells &lhs, const TensorCells &rhs, bool check_types = true) {
assertMinImpl(*createTensor(exp), *createTensor(lhs), *createTensor(rhs), check_types);
}
+ void assertMin(const TensorCells &exp, const TensorDimensions &expDimensions, const TensorCells &lhs, const TensorCells &rhs, bool check_types = true) {
+ assertMinImpl(*createTensor(exp, expDimensions), *createTensor(lhs), *createTensor(rhs), check_types);
+ }
void assertMaxImpl(const Tensor &exp, const Tensor &lhs, const Tensor &rhs, bool check_types) {
MyInput input;
function::Node_UP ir = function::max(function::input(lhs.getType(), input.add(lhs)),
@@ -147,6 +157,9 @@ struct Fixture
void assertMax(const TensorCells &exp, const TensorCells &lhs, const TensorCells &rhs, bool check_types = true) {
assertMaxImpl(*createTensor(exp), *createTensor(lhs), *createTensor(rhs), check_types);
}
+ void assertMax(const TensorCells &exp, const TensorDimensions &expDimensions, const TensorCells &lhs, const TensorCells &rhs, bool check_types = true) {
+ assertMaxImpl(*createTensor(exp, expDimensions), *createTensor(lhs), *createTensor(rhs), check_types);
+ }
void assertSumImpl(double exp, const Tensor &tensor) {
MyInput input;
function::Node_UP ir = function::sum(function::input(tensor.getType(), input.add(tensor)));
@@ -252,42 +265,42 @@ void
testTensorAdd(FixtureType &f)
{
f.assertAdd({},{},{}, false);
- f.assertAdd({ {{{"x","1"}}, 3}, {{{"x","2"}}, 5} },
- { {{{"x","1"}}, 3} },
- { {{{"x","2"}}, 5} });
- f.assertAdd({ {{{"x","1"}}, 8} },
- { {{{"x","1"}}, 3} },
- { {{{"x","1"}}, 5} });
- f.assertAdd({ {{{"x","1"}}, -2} },
- { {{{"x","1"}}, 3} },
- { {{{"x","1"}}, -5} });
- f.assertAdd({ {{{"x","1"}}, 0} },
- { {{{"x","1"}}, 3} },
- { {{{"x","1"}}, -3} });
- f.assertAdd({ {{{"x","1"}}, 3}, {{{"y","2"}}, 12}, {{{"z","3"}}, 11} },
- { {{{"x","1"}}, 3}, {{{"y","2"}}, 5} },
- { {{{"y","2"}}, 7}, {{{"z","3"}}, 11} });
- f.assertAdd({ {{{"x","1"}}, 3}, {{{"y","2"}}, 12}, {{{"z","3"}}, 11} },
- { {{{"y","2"}}, 7}, {{{"z","3"}}, 11} },
- { {{{"x","1"}}, 3}, {{{"y","2"}}, 5} });
- f.assertAdd({ {{{"y","2"}}, 12}, {{{"z","3"}}, 11} },
- { {{{"y","2"}}, 5} },
- { {{{"y","2"}}, 7}, {{{"z","3"}}, 11} });
- f.assertAdd({ {{{"y","2"}}, 12}, {{{"z","3"}}, 11} },
- { {{{"y","2"}}, 7}, {{{"z","3"}}, 11} },
- { {{{"y","2"}}, 5} });
- f.assertAdd({ {{{"x","1"}}, 3}, {{{"y","2"}}, 12} },
- { {{{"x","1"}}, 3}, {{{"y","2"}}, 5} },
- { {{{"y","2"}}, 7} });
- f.assertAdd({ {{{"x","1"}}, 3}, {{{"y","2"}}, 12} },
- { {{{"y","2"}}, 7} },
- { {{{"x","1"}}, 3}, {{{"y","2"}}, 5} });
- f.assertAdd({ {{{"x","1"}}, 3}, {{{"z","3"}}, 11} },
- { {{{"x","1"}}, 3} },
- { {{{"z","3"}}, 11} });
- f.assertAdd({ {{{"x","1"}}, 3}, {{{"z","3"}}, 11} },
- { {{{"z","3"}}, 11} },
- { {{{"x","1"}}, 3} });
+ TEST_DO(f.assertAdd({}, { "x" },
+ { {{{"x","1"}}, 3} },
+ { {{{"x","2"}}, 5} }));
+ TEST_DO(f.assertAdd({ {{{"x","1"}}, 8} },
+ { {{{"x","1"}}, 3} },
+ { {{{"x","1"}}, 5} }));
+ TEST_DO(f.assertAdd({ {{{"x","1"}}, -2} },
+ { {{{"x","1"}}, 3} },
+ { {{{"x","1"}}, -5} }));
+ TEST_DO(f.assertAdd({ {{{"x","1"}}, 0} },
+ { {{{"x","1"}}, 3} },
+ { {{{"x","1"}}, -3} }));
+ TEST_DO(f.assertAdd({ {{{"x","1"},{"z","3"}}, 14}, {{{"y","2"}}, 12} },
+ { {{{"x","1"}}, 3}, {{{"y","2"}}, 5} },
+ { {{{"y","2"}}, 7}, {{{"z","3"}}, 11} }));
+ TEST_DO(f.assertAdd({ {{{"x","1"},{"z","3"}}, 14}, {{{"y","2"}}, 12} },
+ { {{{"y","2"}}, 7}, {{{"z","3"}}, 11} },
+ { {{{"x","1"}}, 3}, {{{"y","2"}}, 5} }));
+ TEST_DO(f.assertAdd({ {{{"y","2"}}, 12} }, { "y", "z" },
+ { {{{"y","2"}}, 5} },
+ { {{{"y","2"}}, 7}, {{{"z","3"}}, 11} }));
+ TEST_DO(f.assertAdd({ {{{"y","2"}}, 12} }, { "y", "z" },
+ { {{{"y","2"}}, 7}, {{{"z","3"}}, 11} },
+ { {{{"y","2"}}, 5} }));
+ TEST_DO(f.assertAdd({ {{{"y","2"}}, 12} }, { "x", "y" },
+ { {{{"x","1"}}, 3}, {{{"y","2"}}, 5} },
+ { {{{"y","2"}}, 7} }));
+ TEST_DO(f.assertAdd({ {{{"y","2"}}, 12} }, { "x", "y" },
+ { {{{"y","2"}}, 7} },
+ { {{{"x","1"}}, 3}, {{{"y","2"}}, 5} }));
+ TEST_DO(f.assertAdd({ {{{"x","1"},{"z","3"}}, 14} },
+ { {{{"x","1"}}, 3} },
+ { {{{"z","3"}}, 11} }));
+ TEST_DO(f.assertAdd({ {{{"x","1"},{"z","3"}}, 14} },
+ { {{{"z","3"}}, 11} },
+ { {{{"x","1"}}, 3} }));
}
template <typename FixtureType>
@@ -295,42 +308,42 @@ void
testTensorSubtract(FixtureType &f)
{
f.assertSubtract({},{},{}, false);
- f.assertSubtract({ {{{"x","1"}}, 3}, {{{"x","2"}}, -5} },
- { {{{"x","1"}}, 3} },
- { {{{"x","2"}}, 5} });
- f.assertSubtract({ {{{"x","1"}}, -2} },
- { {{{"x","1"}}, 3} },
- { {{{"x","1"}}, 5} });
- f.assertSubtract({ {{{"x","1"}}, 8} },
- { {{{"x","1"}}, 3} },
- { {{{"x","1"}}, -5} });
- f.assertSubtract({ {{{"x","1"}}, 0} },
- { {{{"x","1"}}, 3} },
- { {{{"x","1"}}, 3} });
- f.assertSubtract({ {{{"x","1"}}, 3}, {{{"y","2"}},-2}, {{{"z","3"}},-11} },
- { {{{"x","1"}}, 3}, {{{"y","2"}}, 5} },
- { {{{"y","2"}}, 7}, {{{"z","3"}}, 11} });
- f.assertSubtract({ {{{"x","1"}},-3}, {{{"y","2"}}, 2}, {{{"z","3"}}, 11} },
- { {{{"y","2"}}, 7}, {{{"z","3"}}, 11} },
- { {{{"x","1"}}, 3}, {{{"y","2"}}, 5} });
- f.assertSubtract({ {{{"y","2"}},-2}, {{{"z","3"}},-11} },
- { {{{"y","2"}}, 5} },
- { {{{"y","2"}}, 7}, {{{"z","3"}}, 11} });
- f.assertSubtract({ {{{"y","2"}}, 2}, {{{"z","3"}}, 11} },
- { {{{"y","2"}}, 7}, {{{"z","3"}}, 11} },
- { {{{"y","2"}}, 5} });
- f.assertSubtract({ {{{"x","1"}}, 3}, {{{"y","2"}},-2} },
- { {{{"x","1"}}, 3}, {{{"y","2"}}, 5} },
- { {{{"y","2"}}, 7} });
- f.assertSubtract({ {{{"x","1"}},-3}, {{{"y","2"}}, 2} },
- { {{{"y","2"}}, 7} },
- { {{{"x","1"}}, 3}, {{{"y","2"}}, 5} });
- f.assertSubtract({ {{{"x","1"}}, 3}, {{{"z","3"}},-11} },
- { {{{"x","1"}}, 3} },
- { {{{"z","3"}}, 11} });
- f.assertSubtract({ {{{"x","1"}},-3}, {{{"z","3"}}, 11} },
- { {{{"z","3"}}, 11} },
- { {{{"x","1"}}, 3} });
+ TEST_DO(f.assertSubtract({}, { "x" },
+ { {{{"x","1"}}, 3} },
+ { {{{"x","2"}}, 5} }));
+ TEST_DO(f.assertSubtract({ {{{"x","1"}}, -2} },
+ { {{{"x","1"}}, 3} },
+ { {{{"x","1"}}, 5} }));
+ TEST_DO(f.assertSubtract({ {{{"x","1"}}, 8} },
+ { {{{"x","1"}}, 3} },
+ { {{{"x","1"}}, -5} }));
+ TEST_DO(f.assertSubtract({ {{{"x","1"}}, 0} },
+ { {{{"x","1"}}, 3} },
+ { {{{"x","1"}}, 3} }));
+ TEST_DO(f.assertSubtract({ {{{"x","1"},{"z","3"}}, -8}, {{{"y","2"}},-2} },
+ { {{{"x","1"}}, 3}, {{{"y","2"}}, 5} },
+ { {{{"y","2"}}, 7}, {{{"z","3"}}, 11} }));
+ TEST_DO(f.assertSubtract({ {{{"x","1"},{"z","3"}}, 8}, {{{"y","2"}}, 2} },
+ { {{{"y","2"}}, 7}, {{{"z","3"}}, 11} },
+ { {{{"x","1"}}, 3}, {{{"y","2"}}, 5} }));
+ TEST_DO(f.assertSubtract({ {{{"y","2"}},-2} }, { "y", "z" },
+ { {{{"y","2"}}, 5} },
+ { {{{"y","2"}}, 7}, {{{"z","3"}}, 11} }));
+ TEST_DO(f.assertSubtract({ {{{"y","2"}}, 2} }, { "y", "z" },
+ { {{{"y","2"}}, 7}, {{{"z","3"}}, 11} },
+ { {{{"y","2"}}, 5} }));
+ TEST_DO(f.assertSubtract({ {{{"y","2"}},-2} }, { "x", "y" },
+ { {{{"x","1"}}, 3}, {{{"y","2"}}, 5} },
+ { {{{"y","2"}}, 7} }));
+ TEST_DO(f.assertSubtract({ {{{"y","2"}}, 2} }, { "x", "y" },
+ { {{{"y","2"}}, 7} },
+ { {{{"x","1"}}, 3}, {{{"y","2"}}, 5} }));
+ TEST_DO(f.assertSubtract({ {{{"x","1"},{"z","3"}}, -8} },
+ { {{{"x","1"}}, 3} },
+ { {{{"z","3"}}, 11} }));
+ TEST_DO(f.assertSubtract({ {{{"x","1"},{"z","3"}}, 8} },
+ { {{{"z","3"}}, 11} },
+ { {{{"x","1"}}, 3} }));
}
template <typename FixtureType>
@@ -338,42 +351,42 @@ void
testTensorMin(FixtureType &f)
{
f.assertMin({},{},{}, false);
- f.assertMin({ {{{"x","1"}}, 3}, {{{"x","2"}}, 5} },
- { {{{"x","1"}}, 3} },
- { {{{"x","2"}}, 5} });
- f.assertMin({ {{{"x","1"}}, 3} },
- { {{{"x","1"}}, 3} },
- { {{{"x","1"}}, 5} });
- f.assertMin({ {{{"x","1"}}, -5} },
- { {{{"x","1"}}, 3} },
- { {{{"x","1"}}, -5} });
- f.assertMin({ {{{"x","1"}}, 3}, {{{"x","2"}}, 0} },
- { {{{"x","1"}}, 3} },
- { {{{"x","2"}}, 0} });
- f.assertMin({ {{{"x","1"}}, 3}, {{{"y","2"}}, 5}, {{{"z","3"}}, 11} },
- { {{{"x","1"}}, 3}, {{{"y","2"}}, 5} },
- { {{{"y","2"}}, 7}, {{{"z","3"}}, 11} });
- f.assertMin({ {{{"x","1"}}, 3}, {{{"y","2"}}, 5}, {{{"z","3"}}, 11} },
- { {{{"y","2"}}, 7}, {{{"z","3"}}, 11} },
- { {{{"x","1"}}, 3}, {{{"y","2"}}, 5} });
- f.assertMin({ {{{"y","2"}}, 5}, {{{"z","3"}}, 11} },
- { {{{"y","2"}}, 5} },
- { {{{"y","2"}}, 7}, {{{"z","3"}}, 11} });
- f.assertMin({ {{{"y","2"}}, 5}, {{{"z","3"}}, 11} },
- { {{{"y","2"}}, 7}, {{{"z","3"}}, 11} },
- { {{{"y","2"}}, 5} });
- f.assertMin({ {{{"x","1"}}, 3}, {{{"y","2"}}, 5} },
- { {{{"x","1"}}, 3}, {{{"y","2"}}, 5} },
- { {{{"y","2"}}, 7} });
- f.assertMin({ {{{"x","1"}}, 3}, {{{"y","2"}}, 5} },
- { {{{"y","2"}}, 7} },
- { {{{"x","1"}}, 3}, {{{"y","2"}}, 5} });
- f.assertMin({ {{{"x","1"}}, 3}, {{{"z","3"}}, 11} },
- { {{{"x","1"}}, 3} },
- { {{{"z","3"}}, 11} });
- f.assertMin({ {{{"x","1"}}, 3}, {{{"z","3"}}, 11} },
- { {{{"z","3"}}, 11} },
- { {{{"x","1"}}, 3} });
+ TEST_DO(f.assertMin({}, { "x" },
+ { {{{"x","1"}}, 3} },
+ { {{{"x","2"}}, 5} }));
+ TEST_DO(f.assertMin({ {{{"x","1"}}, 3} },
+ { {{{"x","1"}}, 3} },
+ { {{{"x","1"}}, 5} }));
+ TEST_DO(f.assertMin({ {{{"x","1"}}, -5} },
+ { {{{"x","1"}}, 3} },
+ { {{{"x","1"}}, -5} }));
+ TEST_DO(f.assertMin({}, { "x" },
+ { {{{"x","1"}}, 3} },
+ { {{{"x","2"}}, 0} }));
+ TEST_DO(f.assertMin({ {{{"x","1"},{"z","3"}}, 3}, {{{"y","2"}}, 5} },
+ { {{{"x","1"}}, 3}, {{{"y","2"}}, 5} },
+ { {{{"y","2"}}, 7}, {{{"z","3"}}, 11} }));
+ TEST_DO(f.assertMin({ {{{"x","1"},{"z","3"}}, 3}, {{{"y","2"}}, 5} },
+ { {{{"y","2"}}, 7}, {{{"z","3"}}, 11} },
+ { {{{"x","1"}}, 3}, {{{"y","2"}}, 5} }));
+ TEST_DO(f.assertMin({ {{{"y","2"}}, 5} }, { "y", "z" },
+ { {{{"y","2"}}, 5} },
+ { {{{"y","2"}}, 7}, {{{"z","3"}}, 11} }));
+ TEST_DO(f.assertMin({ {{{"y","2"}}, 5} }, { "y", "z" },
+ { {{{"y","2"}}, 7}, {{{"z","3"}}, 11} },
+ { {{{"y","2"}}, 5} }));
+ TEST_DO(f.assertMin({ {{{"y","2"}}, 5} }, { "x", "y" },
+ { {{{"x","1"}}, 3}, {{{"y","2"}}, 5} },
+ { {{{"y","2"}}, 7} }));
+ TEST_DO(f.assertMin({ {{{"y","2"}}, 5} }, { "x", "y" },
+ { {{{"y","2"}}, 7} },
+ { {{{"x","1"}}, 3}, {{{"y","2"}}, 5} }));
+ TEST_DO(f.assertMin({ {{{"x","1"},{"z","3"}}, 3} },
+ { {{{"x","1"}}, 3} },
+ { {{{"z","3"}}, 11} }));
+ TEST_DO(f.assertMin({ {{{"x","1"},{"z","3"}}, 3} },
+ { {{{"z","3"}}, 11} },
+ { {{{"x","1"}}, 3} }));
}
template <typename FixtureType>
@@ -381,45 +394,45 @@ void
testTensorMax(FixtureType &f)
{
f.assertMax({},{},{}, false);
- f.assertMax({ {{{"x","1"}}, 3}, {{{"x","2"}}, 5} },
- { {{{"x","1"}}, 3} },
- { {{{"x","2"}}, 5} });
- f.assertMax({ {{{"x","1"}}, 5} },
- { {{{"x","1"}}, 3} },
- { {{{"x","1"}}, 5} });
- f.assertMax({ {{{"x","1"}}, 3} },
- { {{{"x","1"}}, 3} },
- { {{{"x","1"}}, -5} });
- f.assertMax({ {{{"x","1"}}, 3}, {{{"x","2"}}, 0} },
- { {{{"x","1"}}, 3} },
- { {{{"x","2"}}, 0} });
- f.assertMax({ {{{"x","1"}}, 3}, {{{"x","2"}}, -5} },
- { {{{"x","1"}}, 3} },
- { {{{"x","2"}}, -5} });
- f.assertMax({ {{{"x","1"}}, 3}, {{{"y","2"}}, 7}, {{{"z","3"}}, 11} },
- { {{{"x","1"}}, 3}, {{{"y","2"}}, 5} },
- { {{{"y","2"}}, 7}, {{{"z","3"}}, 11} });
- f.assertMax({ {{{"x","1"}}, 3}, {{{"y","2"}}, 7}, {{{"z","3"}}, 11} },
- { {{{"y","2"}}, 7}, {{{"z","3"}}, 11} },
- { {{{"x","1"}}, 3}, {{{"y","2"}}, 5} });
- f.assertMax({ {{{"y","2"}}, 7}, {{{"z","3"}}, 11} },
- { {{{"y","2"}}, 5} },
- { {{{"y","2"}}, 7}, {{{"z","3"}}, 11} });
- f.assertMax({ {{{"y","2"}}, 7}, {{{"z","3"}}, 11} },
- { {{{"y","2"}}, 7}, {{{"z","3"}}, 11} },
- { {{{"y","2"}}, 5} });
- f.assertMax({ {{{"x","1"}}, 3}, {{{"y","2"}}, 7} },
- { {{{"x","1"}}, 3}, {{{"y","2"}}, 5} },
- { {{{"y","2"}}, 7} });
- f.assertMax({ {{{"x","1"}}, 3}, {{{"y","2"}}, 7} },
- { {{{"y","2"}}, 7} },
- { {{{"x","1"}}, 3}, {{{"y","2"}}, 5} });
- f.assertMax({ {{{"x","1"}}, 3}, {{{"z","3"}}, 11} },
- { {{{"x","1"}}, 3} },
- { {{{"z","3"}}, 11} });
- f.assertMax({ {{{"x","1"}}, 3}, {{{"z","3"}}, 11} },
- { {{{"z","3"}}, 11} },
- { {{{"x","1"}}, 3} });
+ TEST_DO(f.assertMax({}, { "x" },
+ { {{{"x","1"}}, 3} },
+ { {{{"x","2"}}, 5} }));
+ TEST_DO(f.assertMax({ {{{"x","1"}}, 5} },
+ { {{{"x","1"}}, 3} },
+ { {{{"x","1"}}, 5} }));
+ TEST_DO(f.assertMax({ {{{"x","1"}}, 3} },
+ { {{{"x","1"}}, 3} },
+ { {{{"x","1"}}, -5} }));
+ TEST_DO(f.assertMax({}, { "x" },
+ { {{{"x","1"}}, 3} },
+ { {{{"x","2"}}, 0} }));
+ TEST_DO(f.assertMax({}, { "x" },
+ { {{{"x","1"}}, 3} },
+ { {{{"x","2"}}, -5} }));
+ TEST_DO(f.assertMax({ {{{"x","1"},{"z","3"}}, 11}, {{{"y","2"}}, 7} },
+ { {{{"x","1"}}, 3}, {{{"y","2"}}, 5} },
+ { {{{"y","2"}}, 7}, {{{"z","3"}}, 11} }));
+ TEST_DO(f.assertMax({ {{{"x","1"},{"z","3"}}, 11}, {{{"y","2"}}, 7} },
+ { {{{"y","2"}}, 7}, {{{"z","3"}}, 11} },
+ { {{{"x","1"}}, 3}, {{{"y","2"}}, 5} }));
+ TEST_DO(f.assertMax({ {{{"y","2"}}, 7} }, { "y", "z" },
+ { {{{"y","2"}}, 5} },
+ { {{{"y","2"}}, 7}, {{{"z","3"}}, 11} }));
+ TEST_DO(f.assertMax({ {{{"y","2"}}, 7} }, { "y", "z" },
+ { {{{"y","2"}}, 7}, {{{"z","3"}}, 11} },
+ { {{{"y","2"}}, 5} }));
+ TEST_DO(f.assertMax({ {{{"y","2"}}, 7} }, { "x", "y" },
+ { {{{"x","1"}}, 3}, {{{"y","2"}}, 5} },
+ { {{{"y","2"}}, 7} }));
+ TEST_DO(f.assertMax({ {{{"y","2"}}, 7} }, { "x", "y" },
+ { {{{"y","2"}}, 7} },
+ { {{{"x","1"}}, 3}, {{{"y","2"}}, 5} }));
+ TEST_DO(f.assertMax({ {{{"x","1"},{"z","3"}}, 11} },
+ { {{{"x","1"}}, 3} },
+ { {{{"z","3"}}, 11} }));
+ TEST_DO(f.assertMax({ {{{"x","1"},{"z","3"}}, 11} },
+ { {{{"z","3"}}, 11} },
+ { {{{"x","1"}}, 3} }));
}
template <typename FixtureType>
diff --git a/vespalib/src/tests/tensor/tensor_performance/tensor_performance_test.cpp b/vespalib/src/tests/tensor/tensor_performance/tensor_performance_test.cpp
index ab8acb9e296..2cac4cfa18c 100644
--- a/vespalib/src/tests/tensor/tensor_performance/tensor_performance_test.cpp
+++ b/vespalib/src/tests/tensor/tensor_performance/tensor_performance_test.cpp
@@ -113,8 +113,8 @@ TEST("SMOKETEST - require that model match benchmark expression produces expecte
TEST("SMOKETEST - require that matrix product benchmark expression produces expected result") {
Params params;
- params.add("query", parse_tensor("{{x:0}:1.0}"));
- params.add("document", parse_tensor("{{x:1}:2.0}"));
+ params.add("query", parse_tensor("{{x:0}:1.0,{x:1}:0.0}"));
+ params.add("document", parse_tensor("{{x:0}:0.0,{x:1}:2.0}"));
params.add("model", parse_tensor("{{x:0,y:0}:1.0,{x:0,y:1}:2.0,"
" {x:1,y:0}:3.0,{x:1,y:1}:4.0}"));
EXPECT_EQUAL(calculate_expression(matrix_product_expr, params), 17.0);
@@ -339,12 +339,8 @@ TEST("benchmark matrix product") {
size_t matrix_size = vector_size * 2;
for (auto type: {SPARSE, DENSE}) {
Params params;
- size_t document_size = vector_size;
- if (type == DENSE) {
- document_size = matrix_size;
- }
- params.add("query", make_tensor(type, {DimensionSpec("x", vector_size, vector_size)}));
- params.add("document", make_tensor(type, {DimensionSpec("x", document_size)}));
+ params.add("query", make_tensor(type, {DimensionSpec("x", matrix_size)}));
+ params.add("document", make_tensor(type, {DimensionSpec("x", matrix_size)}));
params.add("model", make_tensor(type, {DimensionSpec("x", matrix_size), DimensionSpec("y", matrix_size)}));
double time_us = benchmark_expression_us(matrix_product_expr, params);
fprintf(stderr, "-- matrix product (%s) %zu + %zu vs %zux%zu: %g us\n", name(type), vector_size, vector_size, matrix_size, matrix_size, time_us);
diff --git a/vespalib/src/vespa/vespalib/eval/simple_tensor_engine.cpp b/vespalib/src/vespa/vespalib/eval/simple_tensor_engine.cpp
index 6e2e7778bc7..06e514e51ba 100644
--- a/vespalib/src/vespa/vespalib/eval/simple_tensor_engine.cpp
+++ b/vespalib/src/vespa/vespalib/eval/simple_tensor_engine.cpp
@@ -54,6 +54,30 @@ SimpleTensorEngine::to_string(const Tensor &tensor) const
return out;
}
+TensorSpec
+SimpleTensorEngine::to_spec(const Tensor &tensor) const
+{
+ assert(&tensor.engine() == this);
+ const SimpleTensor &simple_tensor = static_cast<const SimpleTensor&>(tensor);
+ ValueType type = simple_tensor.type();
+ const auto &dimensions = type.dimensions();
+ TensorSpec spec(type.to_spec());
+ for (const auto &cell: simple_tensor.cells()) {
+ TensorSpec::Address addr;
+ assert(cell.address.size() == dimensions.size());
+ for (size_t i = 0; i < cell.address.size(); ++i) {
+ const auto &label = cell.address[i];
+ if (label.is_mapped()) {
+ addr.emplace(dimensions[i].name, TensorSpec::Label(label.name));
+ } else {
+ addr.emplace(dimensions[i].name, TensorSpec::Label(label.index));
+ }
+ }
+ spec.add(addr, cell.value);
+ }
+ return spec;
+}
+
std::unique_ptr<eval::Tensor>
SimpleTensorEngine::create(const TensorSpec &spec) const
{
diff --git a/vespalib/src/vespa/vespalib/eval/simple_tensor_engine.h b/vespalib/src/vespa/vespalib/eval/simple_tensor_engine.h
index 4013aa9de5b..c3207c440fb 100644
--- a/vespalib/src/vespa/vespalib/eval/simple_tensor_engine.h
+++ b/vespalib/src/vespa/vespalib/eval/simple_tensor_engine.h
@@ -22,6 +22,7 @@ public:
ValueType type_of(const Tensor &tensor) const override;
bool equal(const Tensor &a, const Tensor &b) const override;
vespalib::string to_string(const Tensor &tensor) const override;
+ TensorSpec to_spec(const Tensor &tensor) const override;
std::unique_ptr<Tensor> create(const TensorSpec &spec) const override;
const Value &reduce(const Tensor &tensor, const BinaryOperation &op, const std::vector<vespalib::string> &dimensions, Stash &stash) const override;
diff --git a/vespalib/src/vespa/vespalib/eval/tensor_engine.h b/vespalib/src/vespa/vespalib/eval/tensor_engine.h
index 637d549a55d..2458da7ff8b 100644
--- a/vespalib/src/vespa/vespalib/eval/tensor_engine.h
+++ b/vespalib/src/vespa/vespalib/eval/tensor_engine.h
@@ -41,6 +41,7 @@ struct TensorEngine
virtual ValueType type_of(const Tensor &tensor) const = 0;
virtual bool equal(const Tensor &a, const Tensor &b) const = 0;
virtual vespalib::string to_string(const Tensor &tensor) const = 0;
+ virtual TensorSpec to_spec(const Tensor &tensor) const = 0;
virtual TensorFunction::UP compile(tensor_function::Node_UP expr) const { return std::move(expr); }
diff --git a/vespalib/src/vespa/vespalib/eval/tensor_spec.cpp b/vespalib/src/vespa/vespalib/eval/tensor_spec.cpp
index 28cda1b2962..eec930b8da4 100644
--- a/vespalib/src/vespa/vespalib/eval/tensor_spec.cpp
+++ b/vespalib/src/vespa/vespalib/eval/tensor_spec.cpp
@@ -1,10 +1,49 @@
// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
#include <vespa/fastos/fastos.h>
+#include <vespa/vespalib/util/stringfmt.h>
#include "tensor_spec.h"
+#include <iostream>
namespace vespalib {
namespace eval {
+vespalib::string
+TensorSpec::to_string() const
+{
+ vespalib::string out = vespalib::make_string("spec(%s) {\n", _type.c_str());
+ for (const auto &cell: _cells) {
+ size_t n = 0;
+ out.append(" [");
+ for (const auto &label: cell.first) {
+ if (n++) {
+ out.append(",");
+ }
+ if (label.second.is_mapped()) {
+ out.append(label.second.name);
+ } else {
+ out.append(vespalib::make_string("%zu", label.second.index));
+ }
+ }
+ out.append(vespalib::make_string("]: %g\n", cell.second.value));
+ }
+ out.append("}");
+ return out;
+}
+
+bool
+operator==(const TensorSpec &lhs, const TensorSpec &rhs)
+{
+ return ((lhs.type() == rhs.type()) &&
+ (lhs.cells() == rhs.cells()));
+}
+
+std::ostream &
+operator<<(std::ostream &out, const TensorSpec &spec)
+{
+ out << spec.to_string();
+ return out;
+}
+
} // namespace vespalib::eval
} // namespace vespalib
diff --git a/vespalib/src/vespa/vespalib/eval/tensor_spec.h b/vespalib/src/vespa/vespalib/eval/tensor_spec.h
index aff23a42832..41c1f8d4f3c 100644
--- a/vespalib/src/vespa/vespalib/eval/tensor_spec.h
+++ b/vespalib/src/vespa/vespalib/eval/tensor_spec.h
@@ -3,6 +3,7 @@
#pragma once
#include <vespa/vespalib/stllike/string.h>
+#include <vespa/vespalib/util/approx.h>
#include <memory>
#include <map>
@@ -25,6 +26,10 @@ public:
Label(const char *name_in) : index(npos), name(name_in) {}
bool is_mapped() const { return (index == npos); }
bool is_indexed() const { return (index != npos); }
+ bool operator==(const Label &rhs) const {
+ return ((index == rhs.index) &&
+ (name == rhs.name));
+ }
bool operator<(const Label &rhs) const {
if (index != rhs.index) {
return (index < rhs.index);
@@ -32,8 +37,14 @@ public:
return (name < rhs.name);
}
};
+ struct Value {
+ double value;
+ Value(double value_in) : value(value_in) {}
+ operator double() const { return value; }
+ bool operator==(const Value &rhs) const { return approx_equal(value, rhs.value); }
+ };
using Address = std::map<vespalib::string,Label>;
- using Cells = std::map<Address,double>;
+ using Cells = std::map<Address,Value>;
private:
vespalib::string _type;
Cells _cells;
@@ -45,7 +56,11 @@ public:
}
const vespalib::string &type() const { return _type; }
const Cells &cells() const { return _cells; }
+ vespalib::string to_string() const;
};
+bool operator==(const TensorSpec &lhs, const TensorSpec &rhs);
+std::ostream &operator<<(std::ostream &out, const TensorSpec &tensor);
+
} // namespace vespalib::eval
} // namespace vespalib
diff --git a/vespalib/src/vespa/vespalib/eval/test/tensor_conformance.cpp b/vespalib/src/vespa/vespalib/eval/test/tensor_conformance.cpp
index 4a4fc8dc555..362bb8c5561 100644
--- a/vespalib/src/vespa/vespalib/eval/test/tensor_conformance.cpp
+++ b/vespalib/src/vespa/vespalib/eval/test/tensor_conformance.cpp
@@ -3,6 +3,7 @@
#include <vespa/fastos/fastos.h>
#include <vespa/vespalib/testkit/test_kit.h>
#include "tensor_conformance.h"
+#include <vespa/vespalib/util/stringfmt.h>
#include <vespa/vespalib/eval/simple_tensor_engine.h>
#include <vespa/vespalib/eval/tensor_spec.h>
#include <vespa/vespalib/eval/function.h>
@@ -14,17 +15,6 @@ namespace eval {
namespace test {
namespace {
-// virtual ValueType type_of(const Tensor &tensor) const = 0;
-// virtual bool equal(const Tensor &a, const Tensor &b) const = 0;
-
-// virtual TensorFunction::UP compile(tensor_function::Node_UP expr) const { return std::move(expr); }
-
-// virtual std::unique_ptr<Tensor> create(const TensorSpec &spec) const = 0;
-
-// virtual const Value &reduce(const Tensor &tensor, const BinaryOperation &op, const std::vector<vespalib::string> &dimensions, Stash &stash) const = 0;
-// virtual const Value &map(const UnaryOperation &op, const Tensor &a, Stash &stash) const = 0;
-// virtual const Value &apply(const BinaryOperation &op, const Tensor &a, const Tensor &b, Stash &stash) const = 0;
-
// Random access sequence of numbers
struct Sequence {
virtual double operator[](size_t i) const = 0;
@@ -43,6 +33,13 @@ struct Div10 : Sequence {
double operator[](size_t i) const override { return (seq[i] / 10.0); }
};
+// Sequence of another sequence minus 2
+struct Sub2 : Sequence {
+ const Sequence &seq;
+ Sub2(const Sequence &seq_in) : seq(seq_in) {}
+ double operator[](size_t i) const override { return (seq[i] - 2.0); }
+};
+
// Sequence of a unary operator applied to a sequence
struct OpSeq : Sequence {
const Sequence &seq;
@@ -51,6 +48,13 @@ struct OpSeq : Sequence {
double operator[](size_t i) const override { return op.eval(seq[i]); }
};
+// Sequence of applying sigmoid to another sequence
+struct Sigmoid : Sequence {
+ const Sequence &seq;
+ Sigmoid(const Sequence &seq_in) : seq(seq_in) {}
+ double operator[](size_t i) const override { return operation::Sigmoid().eval(seq[i]); }
+};
+
// pre-defined sequence of numbers
struct Seq : Sequence {
std::vector<double> seq;
@@ -78,6 +82,13 @@ struct None : Mask {
bool operator[](size_t) const override { return false; }
};
+// Mask with false for each Nth index
+struct SkipNth : Mask {
+ size_t n;
+ SkipNth(size_t n_in) : n(n_in) {}
+ bool operator[](size_t i) const override { return (i % n) != 0; }
+};
+
// pre-defined mask
struct Bits : Mask {
std::vector<bool> bits;
@@ -88,6 +99,16 @@ struct Bits : Mask {
}
};
+// A mask converted to a sequence of two unique values (mapped from true and false)
+struct Mask2Seq : Sequence {
+ const Mask &mask;
+ double true_value;
+ double false_value;
+ Mask2Seq(const Mask &mask_in, double true_value_in = 1.0, double false_value_in = 0.0)
+ : mask(mask_in), true_value(true_value_in), false_value(false_value_in) {}
+ double operator[](size_t i) const override { return mask[i] ? true_value : false_value; }
+};
+
// custom op1
struct MyOp : CustomUnaryOperation {
double eval(double a) const override { return ((a + 1) * 2); }
@@ -132,28 +153,37 @@ vespalib::string infer_type(const Layout &layout) {
return ValueType::tensor_type(dimensions).to_spec();
}
-// Mix spaces with a number sequence to make a tensor spec
+// Wrapper for the things needed to generate a tensor
+struct Source {
+ using Address = TensorSpec::Address;
+
+ const Layout &layout;
+ const Sequence &seq;
+ const Mask &mask;
+ Source(const Layout &layout_in, const Sequence &seq_in, const Mask &mask_in)
+ : layout(layout_in), seq(seq_in), mask(mask_in) {}
+};
+
+// Mix layout with a number sequence to make a tensor spec
class TensorSpecBuilder
{
private:
using Label = TensorSpec::Label;
using Address = TensorSpec::Address;
- const Layout &_layout;
- const Sequence &_seq;
- const Mask &_mask;
- TensorSpec _spec;
- Address _addr;
- size_t _idx;
+ Source _source;
+ TensorSpec _spec;
+ Address _addr;
+ size_t _idx;
void generate(size_t layout_idx) {
- if (layout_idx == _layout.size()) {
- if (_mask[_idx]) {
- _spec.add(_addr, _seq[_idx]);
+ if (layout_idx == _source.layout.size()) {
+ if (_source.mask[_idx]) {
+ _spec.add(_addr, _source.seq[_idx]);
}
++_idx;
} else {
- const Domain &domain = _layout[layout_idx];
+ const Domain &domain = _source.layout[layout_idx];
if (domain.size > 0) { // indexed
for (size_t i = 0; i < domain.size; ++i) {
_addr.emplace(domain.dimension, Label(i)).first->second = Label(i);
@@ -170,67 +200,168 @@ private:
public:
TensorSpecBuilder(const Layout &layout, const Sequence &seq, const Mask &mask)
- : _layout(layout), _seq(seq), _mask(mask), _spec(infer_type(layout)), _addr(), _idx(0) {}
+ : _source(layout, seq, mask), _spec(infer_type(layout)), _addr(), _idx(0) {}
TensorSpec build() {
generate(0);
return _spec;
}
};
+TensorSpec spec(const Layout &layout, const Sequence &seq, const Mask &mask) {
+ return TensorSpecBuilder(layout, seq, mask).build();
+}
+TensorSpec spec(const Layout &layout, const Sequence &seq) {
+ return spec(layout, seq, All());
+}
+TensorSpec spec(const Layout &layout) {
+ return spec(layout, Seq(), None());
+}
+TensorSpec spec(const Domain &domain, const Sequence &seq, const Mask &mask) {
+ return spec(Layout({domain}), seq, mask);
+}
+TensorSpec spec(const Domain &domain, const Sequence &seq) {
+ return spec(Layout({domain}), seq);
+}
+TensorSpec spec(const Domain &domain) {
+ return spec(Layout({domain}));
+}
+TensorSpec spec(double value) {
+ return spec(Layout({}), Seq({value}));
+}
+TensorSpec spec() {
+ return spec(Layout({}));
+}
-using Tensor_UP = std::unique_ptr<Tensor>;
-
-// small utility used to capture passed tensor references for uniform handling
-struct TensorRef {
- const Tensor &ref;
- TensorRef(const Tensor &ref_in) : ref(ref_in) {}
- TensorRef(const Tensor_UP &up_ref) : ref(*(up_ref.get())) {}
-};
-
-// abstract evaluation verification wrapper
+// abstract evaluation wrapper
struct Eval {
- virtual void verify(const TensorEngine &engine, TensorRef expect) const {
- (void) engine;
- (void) expect;
+ // typed result wrapper
+ class Result {
+ private:
+ enum class Type { ERROR, NUMBER, TENSOR };
+ Type _type;
+ double _number;
+ TensorSpec _tensor;
+ public:
+ Result(const Value &value) : _type(Type::ERROR), _number(error_value), _tensor("error") {
+ if (value.is_double()) {
+ _type = Type::NUMBER;
+ _number = value.as_double();
+ _tensor = TensorSpec("double").add({}, _number);
+ } else if (value.is_tensor()) {
+ _type = Type::TENSOR;
+ _tensor = value.as_tensor()->engine().to_spec(*value.as_tensor());
+ if (_tensor.type() == "double") {
+ _number = _tensor.cells().empty() ? 0.0 : _tensor.cells().begin()->second.value;
+ }
+ }
+ }
+ bool is_error() const { return (_type == Type::ERROR); }
+ bool is_number() const { return (_type == Type::NUMBER); }
+ bool is_tensor() const { return (_type == Type::TENSOR); }
+ double number() const {
+ EXPECT_TRUE(is_number());
+ return _number;
+ }
+ const TensorSpec &tensor() const {
+ EXPECT_TRUE(is_tensor());
+ return _tensor;
+ }
+ };
+ virtual Result eval(const TensorEngine &) const {
+ TEST_ERROR("wrong signature");
+ return Result(ErrorValue());
+ }
+ virtual Result eval(const TensorEngine &, const TensorSpec &) const {
TEST_ERROR("wrong signature");
+ return Result(ErrorValue());
}
- virtual void verify(const TensorEngine &engine, TensorRef a, TensorRef expect) const {
- (void) engine;
- (void) a;
- (void) expect;
+ virtual Result eval(const TensorEngine &, const TensorSpec &, const TensorSpec &) const {
TEST_ERROR("wrong signature");
+ return Result(ErrorValue());
}
virtual ~Eval() {}
};
-// expression(void) -> tensor
-struct Expr_V_T : Eval {
+// catches exceptions trying to keep the test itself safe from eval side-effects
+struct SafeEval : Eval {
+ const Eval &unsafe;
+ SafeEval(const Eval &unsafe_in) : unsafe(unsafe_in) {}
+ Result eval(const TensorEngine &engine) const override {
+ try {
+ return unsafe.eval(engine);
+ } catch (std::exception &e) {
+ TEST_ERROR(e.what());
+ return Result(ErrorValue());
+ }
+ }
+ Result eval(const TensorEngine &engine, const TensorSpec &a) const override {
+ try {
+ return unsafe.eval(engine, a);
+ } catch (std::exception &e) {
+ TEST_ERROR(e.what());
+ return Result(ErrorValue());
+ }
+
+ }
+ Result eval(const TensorEngine &engine, const TensorSpec &a, const TensorSpec &b) const override {
+ try {
+ return unsafe.eval(engine, a, b);
+ } catch (std::exception &e) {
+ TEST_ERROR(e.what());
+ return Result(ErrorValue());
+ }
+ }
+};
+SafeEval safe(const Eval &eval) { return SafeEval(eval); }
+
+// expression(void)
+struct Expr_V : Eval {
const vespalib::string &expr;
- Expr_V_T(const vespalib::string &expr_in) : expr(expr_in) {}
- void verify(const TensorEngine &engine, TensorRef expect) const override {
+ Expr_V(const vespalib::string &expr_in) : expr(expr_in) {}
+ Result eval(const TensorEngine &engine) const override {
InterpretedFunction::Context ctx;
InterpretedFunction ifun(engine, Function::parse(expr));
- const Value &result = ifun.eval(ctx);
- if (EXPECT_TRUE(result.is_tensor())) {
- const Tensor *actual = result.as_tensor();
- EXPECT_EQUAL(*actual, expect.ref);
- }
+ return Result(ifun.eval(ctx));
}
};
-// expression(tensor) -> tensor
-struct Expr_T_T : Eval {
+// expression(tensor)
+struct Expr_T : Eval {
const vespalib::string &expr;
- Expr_T_T(const vespalib::string &expr_in) : expr(expr_in) {}
- void verify(const TensorEngine &engine, TensorRef a, TensorRef expect) const override {
- TensorValue va(a.ref);
+ Expr_T(const vespalib::string &expr_in) : expr(expr_in) {}
+ Result eval(const TensorEngine &engine, const TensorSpec &a) const override {
+ TensorValue va(engine.create(a));
InterpretedFunction::Context ctx;
InterpretedFunction ifun(engine, Function::parse(expr));
ctx.add_param(va);
- const Value &result = ifun.eval(ctx);
- if (EXPECT_TRUE(result.is_tensor())) {
- const Tensor *actual = result.as_tensor();
- EXPECT_EQUAL(*actual, expect.ref);
- }
+ return Result(ifun.eval(ctx));
+ }
+};
+
+// expression(tensor,tensor)
+struct Expr_TT : Eval {
+ const vespalib::string &expr;
+ Expr_TT(const vespalib::string &expr_in) : expr(expr_in) {}
+ Result eval(const TensorEngine &engine, const TensorSpec &a, const TensorSpec &b) const override {
+ TensorValue va(engine.create(a));
+ TensorValue vb(engine.create(b));
+ InterpretedFunction::Context ctx;
+ InterpretedFunction ifun(engine, Function::parse(expr));
+ ctx.add_param(va);
+ ctx.add_param(vb);
+ return Result(ifun.eval(ctx));
+ }
+};
+
+// evaluate tensor reduce operation using tensor engine immediate api
+struct ImmediateReduce : Eval {
+ const BinaryOperation &op;
+ std::vector<vespalib::string> dimensions;
+ ImmediateReduce(const BinaryOperation &op_in) : op(op_in), dimensions() {}
+ ImmediateReduce(const BinaryOperation &op_in, const vespalib::string &dimension)
+ : op(op_in), dimensions({dimension}) {}
+ Result eval(const TensorEngine &engine, const TensorSpec &a) const override {
+ Stash stash;
+ return Result(engine.reduce(*engine.create(a), op, dimensions, stash));
}
};
@@ -238,28 +369,66 @@ struct Expr_T_T : Eval {
struct ImmediateMap : Eval {
const UnaryOperation &op;
ImmediateMap(const UnaryOperation &op_in) : op(op_in) {}
- void verify(const TensorEngine &engine, TensorRef a, TensorRef expect) const override {
+ Result eval(const TensorEngine &engine, const TensorSpec &a) const override {
Stash stash;
- const Value &result = engine.map(op, a.ref, stash);
- if (EXPECT_TRUE(result.is_tensor())) {
- const Tensor *actual = result.as_tensor();
- EXPECT_EQUAL(*actual, expect.ref);
- }
+ return Result(engine.map(op, *engine.create(a), stash));
+ }
+};
+
+// evaluate tensor apply operation using tensor engine immediate api
+struct ImmediateApply : Eval {
+ const BinaryOperation &op;
+ ImmediateApply(const BinaryOperation &op_in) : op(op_in) {}
+ Result eval(const TensorEngine &engine, const TensorSpec &a, const TensorSpec &b) const override {
+ Stash stash;
+ return Result(engine.apply(op, *engine.create(a), *engine.create(b), stash));
}
};
-// input needed to evaluate a map operation in retained mode
-struct TensorMapInput : TensorFunction::Input {
- TensorValue tensor;
- const UnaryOperation &map_op;
- TensorMapInput(TensorRef in, const UnaryOperation &op) : tensor(in.ref), map_op(op) {}
+const size_t tensor_id_a = 11;
+const size_t tensor_id_b = 12;
+const size_t map_operation_id = 22;
+
+// input used when evaluating in retained mode
+struct Input : TensorFunction::Input {
+ std::vector<TensorValue> tensors;
+ const UnaryOperation *map_op;
+ Input(std::unique_ptr<Tensor> a) : tensors(), map_op(nullptr) {
+ tensors.emplace_back(std::move(a));
+ }
+ Input(std::unique_ptr<Tensor> a, const UnaryOperation &op) : tensors(), map_op(&op) {
+ tensors.emplace_back(std::move(a));
+ }
+ Input(std::unique_ptr<Tensor> a, std::unique_ptr<Tensor> b) : tensors(), map_op(nullptr) {
+ tensors.emplace_back(std::move(a));
+ tensors.emplace_back(std::move(b));
+ }
const Value &get_tensor(size_t id) const override {
- ASSERT_EQUAL(id, 11u);
- return tensor;
+ size_t offset = (id - tensor_id_a);
+ ASSERT_GREATER(tensors.size(), offset);
+ return tensors[offset];
}
const UnaryOperation &get_map_operation(size_t id) const {
- ASSERT_EQUAL(id, 22u);
- return map_op;
+ ASSERT_TRUE(map_op != nullptr);
+ ASSERT_EQUAL(id, map_operation_id);
+ return *map_op;
+ }
+};
+
+// evaluate tensor reduce operation using tensor engine retained api
+struct RetainedReduce : Eval {
+ const BinaryOperation &op;
+ std::vector<vespalib::string> dimensions;
+ RetainedReduce(const BinaryOperation &op_in) : op(op_in), dimensions() {}
+ RetainedReduce(const BinaryOperation &op_in, const vespalib::string &dimension)
+ : op(op_in), dimensions({dimension}) {}
+ Result eval(const TensorEngine &engine, const TensorSpec &a) const override {
+ auto a_type = ValueType::from_spec(a.type());
+ auto ir = tensor_function::reduce(tensor_function::inject(a_type, tensor_id_a), op, dimensions);
+ auto fun = engine.compile(std::move(ir));
+ Input input(engine.create(a));
+ Stash stash;
+ return Result(fun->eval(input, stash));
}
};
@@ -267,65 +436,65 @@ struct TensorMapInput : TensorFunction::Input {
struct RetainedMap : Eval {
const UnaryOperation &op;
RetainedMap(const UnaryOperation &op_in) : op(op_in) {}
- void verify(const TensorEngine &engine, TensorRef a, TensorRef expect) const override {
- auto a_type = a.ref.engine().type_of(a.ref);
- auto ir = tensor_function::map(22, tensor_function::inject(a_type, 11));
+ Result eval(const TensorEngine &engine, const TensorSpec &a) const override {
+ auto a_type = ValueType::from_spec(a.type());
+ auto ir = tensor_function::map(map_operation_id, tensor_function::inject(a_type, tensor_id_a));
auto fun = engine.compile(std::move(ir));
- TensorMapInput input(a, op);
+ Input input(engine.create(a), op);
Stash stash;
- const Value &result = fun->eval(input, stash);
- if (EXPECT_TRUE(result.is_tensor())) {
- const Tensor *actual = result.as_tensor();
- EXPECT_EQUAL(*actual, expect.ref);
- }
+ return Result(fun->eval(input, stash));
+ }
+};
+
+// evaluate tensor apply operation using tensor engine retained api
+struct RetainedApply : Eval {
+ const BinaryOperation &op;
+ RetainedApply(const BinaryOperation &op_in) : op(op_in) {}
+ Result eval(const TensorEngine &engine, const TensorSpec &a, const TensorSpec &b) const override {
+ auto a_type = ValueType::from_spec(a.type());
+ auto b_type = ValueType::from_spec(b.type());
+ auto ir = tensor_function::apply(op, tensor_function::inject(a_type, tensor_id_a),
+ tensor_function::inject(a_type, tensor_id_b));
+ auto fun = engine.compile(std::move(ir));
+ Input input(engine.create(a), engine.create(b));
+ Stash stash;
+ return Result(fun->eval(input, stash));
}
};
// placeholder used for unused values in a sequence
-const double X = 31212.0;
+const double X = error_value;
+
+// NaN value
+const double my_nan = std::numeric_limits<double>::quiet_NaN();
+
// Test wrapper to avoid passing global test parameters around
struct TestContext {
+ const TensorEngine &ref_engine;
const TensorEngine &engine;
bool test_mixed_cases;
+ size_t skip_count;
+
TestContext(const TensorEngine &engine_in, bool test_mixed_cases_in)
- : engine(engine_in), test_mixed_cases(test_mixed_cases_in) {}
+ : ref_engine(SimpleTensorEngine::ref()), engine(engine_in),
+ test_mixed_cases(test_mixed_cases_in), skip_count(0) {}
+
+ std::unique_ptr<Tensor> tensor(const TensorSpec &spec) {
+ auto result = engine.create(spec);
+ EXPECT_EQUAL(spec.type(), engine.type_of(*result).to_spec());
+ return result;
+ }
- bool mixed() {
+ bool mixed(size_t n) {
if (!test_mixed_cases) {
- fprintf(stderr, "skipping some tests since mixed testing is disabled\n");
+ skip_count += n;
}
return test_mixed_cases;
}
- Tensor_UP tensor(const Layout &layout, const Sequence &seq, const Mask &mask) {
- TensorSpec spec = TensorSpecBuilder(layout, seq, mask).build();
- Tensor_UP result = engine.create(spec);
- EXPECT_EQUAL(spec.type(), engine.type_of(*result).to_spec());
- return result;
- }
- Tensor_UP tensor(const Layout &layout, const Sequence &seq) {
- return tensor(layout, seq, All());
- }
- Tensor_UP tensor(const Layout &layout) {
- return tensor(layout, Seq(), None());
- }
- Tensor_UP tensor(const Domain &domain, const Sequence &seq, const Mask &mask) {
- return tensor(Layout({domain}), seq, mask);
- }
- Tensor_UP tensor(const Domain &domain, const Sequence &seq) {
- return tensor(Layout({domain}), seq);
- }
- Tensor_UP tensor(const Domain &domain) {
- return tensor(Layout({domain}));
- }
- Tensor_UP tensor(double value) {
- return tensor(Layout({}), Seq({value}));
- }
- Tensor_UP tensor() {
- return tensor(Layout({}));
- }
+ //-------------------------------------------------------------------------
void verify_create_type(const vespalib::string &type_spec) {
auto tensor = engine.create(TensorSpec(type_spec));
@@ -333,59 +502,154 @@ struct TestContext {
EXPECT_EQUAL(type_spec, engine.type_of(*tensor).to_spec());
}
- void verify_not_equal(TensorRef a, TensorRef b) {
- EXPECT_FALSE(a.ref == b.ref);
- EXPECT_FALSE(b.ref == a.ref);
- }
-
- void verify_verbatim_tensor(const vespalib::string &tensor_expr, TensorRef expect) {
- Expr_V_T(tensor_expr).verify(engine, expect);
- }
-
void test_tensor_create_type() {
TEST_DO(verify_create_type("double"));
TEST_DO(verify_create_type("tensor(x{})"));
TEST_DO(verify_create_type("tensor(x{},y{})"));
TEST_DO(verify_create_type("tensor(x[5])"));
TEST_DO(verify_create_type("tensor(x[5],y[10])"));
- if (mixed()) {
+ if (mixed(2)) {
TEST_DO(verify_create_type("tensor(x{},y[10])"));
TEST_DO(verify_create_type("tensor(x[5],y{})"));
}
}
+ //-------------------------------------------------------------------------
+
+ void verify_equal(const TensorSpec &a, const TensorSpec &b) {
+ auto ta = tensor(a);
+ auto tb = tensor(b);
+ EXPECT_EQUAL(a, b);
+ EXPECT_EQUAL(*ta, *tb);
+ TensorSpec spec = engine.to_spec(*ta);
+ TensorSpec ref_spec = ref_engine.to_spec(*ref_engine.create(a));
+ EXPECT_EQUAL(spec, ref_spec);
+ }
+
+ void test_tensor_equality() {
+ TEST_DO(verify_equal(spec(), spec()));
+ TEST_DO(verify_equal(spec(10.0), spec(10.0)));
+ TEST_DO(verify_equal(spec(x()), spec(x())));
+ TEST_DO(verify_equal(spec(x({"a"}), Seq({1})), spec(x({"a"}), Seq({1}))));
+ TEST_DO(verify_equal(spec({x({"a"}),y({"a"})}, Seq({1})), spec({y({"a"}),x({"a"})}, Seq({1}))));
+ TEST_DO(verify_equal(spec(x(3)), spec(x(3))));
+ TEST_DO(verify_equal(spec({x(1),y(1)}, Seq({1})), spec({y(1),x(1)}, Seq({1}))));
+ if (mixed(2)) {
+ TEST_DO(verify_equal(spec({x({"a"}),y(1)}, Seq({1})), spec({y(1),x({"a"})}, Seq({1}))));
+ TEST_DO(verify_equal(spec({y({"a"}),x(1)}, Seq({1})), spec({x(1),y({"a"})}, Seq({1}))));
+ }
+ }
+
+ //-------------------------------------------------------------------------
+
+ void verify_not_equal(const TensorSpec &a, const TensorSpec &b) {
+ auto ta = tensor(a);
+ auto tb = tensor(b);
+ EXPECT_NOT_EQUAL(a, b);
+ EXPECT_NOT_EQUAL(b, a);
+ EXPECT_NOT_EQUAL(*ta, *tb);
+ EXPECT_NOT_EQUAL(*tb, *ta);
+ }
+
void test_tensor_inequality() {
- TEST_DO(verify_not_equal(tensor(1.0), tensor(2.0)));
- TEST_DO(verify_not_equal(tensor(), tensor(x())));
- TEST_DO(verify_not_equal(tensor(), tensor(x(1))));
- TEST_DO(verify_not_equal(tensor(x()), tensor(x(1))));
- TEST_DO(verify_not_equal(tensor(x()), tensor(y())));
- TEST_DO(verify_not_equal(tensor(x(1)), tensor(x(2))));
- TEST_DO(verify_not_equal(tensor(x(1)), tensor(y(1))));
- TEST_DO(verify_not_equal(tensor(x({"a"}), Seq({1})), tensor(x({"a"}), Seq({2}))));
- TEST_DO(verify_not_equal(tensor(x({"a"}), Seq({1})), tensor(x({"b"}), Seq({1}))));
- TEST_DO(verify_not_equal(tensor(x({"a"}), Seq({1})), tensor({x({"a"}),y({"a"})}, Seq({1}))));
- TEST_DO(verify_not_equal(tensor(x(1), Seq({1})), tensor(x(1), Seq({2}))));
- TEST_DO(verify_not_equal(tensor(x(1), Seq({1})), tensor(x(2), Seq({1}), Bits({1,0}))));
- TEST_DO(verify_not_equal(tensor(x(2), Seq({1,1}), Bits({1,0})),
- tensor(x(2), Seq({1,1}), Bits({0,1}))));
- TEST_DO(verify_not_equal(tensor(x(1), Seq({1})), tensor({x(1),y(1)}, Seq({1}))));
- if (mixed()) {
- TEST_DO(verify_not_equal(tensor({x({"a"}),y(1)}, Seq({1})), tensor({x({"a"}),y(1)}, Seq({2}))));
- TEST_DO(verify_not_equal(tensor({x({"a"}),y(1)}, Seq({1})), tensor({x({"b"}),y(1)}, Seq({1}))));
- TEST_DO(verify_not_equal(tensor({x(2),y({"a"})}, Seq({1}), Bits({1,0})),
- tensor({x(2),y({"a"})}, Seq({X,1}), Bits({0,1}))));
+ TEST_DO(verify_not_equal(spec(1.0), spec(2.0)));
+ TEST_DO(verify_not_equal(spec(), spec(x())));
+ TEST_DO(verify_not_equal(spec(), spec(x(1))));
+ TEST_DO(verify_not_equal(spec(x()), spec(x(1))));
+ TEST_DO(verify_not_equal(spec(x()), spec(y())));
+ TEST_DO(verify_not_equal(spec(x(1)), spec(x(2))));
+ TEST_DO(verify_not_equal(spec(x(1)), spec(y(1))));
+ TEST_DO(verify_not_equal(spec(x({"a"}), Seq({1})), spec(x({"a"}), Seq({2}))));
+ TEST_DO(verify_not_equal(spec(x({"a"}), Seq({1})), spec(x({"b"}), Seq({1}))));
+ TEST_DO(verify_not_equal(spec(x({"a"}), Seq({1})), spec({x({"a"}),y({"a"})}, Seq({1}))));
+ TEST_DO(verify_not_equal(spec(x(1), Seq({1})), spec(x(1), Seq({2}))));
+ TEST_DO(verify_not_equal(spec(x(1), Seq({1})), spec(x(2), Seq({1}), Bits({1,0}))));
+ TEST_DO(verify_not_equal(spec(x(2), Seq({1,1}), Bits({1,0})),
+ spec(x(2), Seq({1,1}), Bits({0,1}))));
+ TEST_DO(verify_not_equal(spec(x(1), Seq({1})), spec({x(1),y(1)}, Seq({1}))));
+ if (mixed(3)) {
+ TEST_DO(verify_not_equal(spec({x({"a"}),y(1)}, Seq({1})), spec({x({"a"}),y(1)}, Seq({2}))));
+ TEST_DO(verify_not_equal(spec({x({"a"}),y(1)}, Seq({1})), spec({x({"b"}),y(1)}, Seq({1}))));
+ TEST_DO(verify_not_equal(spec({x(2),y({"a"})}, Seq({1}), Bits({1,0})),
+ spec({x(2),y({"a"})}, Seq({X,1}), Bits({0,1}))));
}
}
+ //-------------------------------------------------------------------------
+
+ void verify_verbatim_tensor(const vespalib::string &tensor_expr, const TensorSpec &expect) {
+ EXPECT_EQUAL(Expr_V(tensor_expr).eval(engine).tensor(), expect);
+ }
+
void test_verbatim_tensors() {
- TEST_DO(verify_verbatim_tensor("{}", tensor()));
- TEST_DO(verify_verbatim_tensor("{{}:5}", tensor(5.0)));
- TEST_DO(verify_verbatim_tensor("{{x:foo}:1,{x:bar}:2,{x:baz}:3}", tensor(x({"foo","bar","baz"}), Seq({1,2,3}))));
+ TEST_DO(verify_verbatim_tensor("{}", spec(0.0)));
+ TEST_DO(verify_verbatim_tensor("{{}:5}", spec(5.0)));
+ TEST_DO(verify_verbatim_tensor("{{x:foo}:1,{x:bar}:2,{x:baz}:3}", spec(x({"foo","bar","baz"}), Seq({1,2,3}))));
TEST_DO(verify_verbatim_tensor("{{x:foo,y:a}:1,{y:b,x:bar}:2}",
- tensor({x({"foo","bar"}),y({"a","b"})}, Seq({1,X,X,2}), Bits({1,0,0,1}))));
+ spec({x({"foo","bar"}),y({"a","b"})}, Seq({1,X,X,2}), Bits({1,0,0,1}))));
+ }
+
+ //-------------------------------------------------------------------------
+
+ void verify_reduce_result(const Eval &eval, const TensorSpec &a, const Eval::Result &expect) {
+ if (expect.is_tensor()) {
+ EXPECT_EQUAL(eval.eval(engine, a).tensor(), expect.tensor());
+ } else if (expect.is_number()) {
+ EXPECT_EQUAL(eval.eval(engine, a).number(), expect.number());
+ } else {
+ TEST_FATAL("expected result should be valid");
+ }
+ }
+
+ void test_reduce_op(const vespalib::string &name, const BinaryOperation &op, const Sequence &seq) {
+ std::vector<Layout> layouts = {
+ {x(3)},
+ {x(3),y(5)},
+ {x(3),y(5),z(7)},
+ {x({"a","b","c"})},
+ {x({"a","b","c"}),y({"foo","bar"})},
+ {x({"a","b","c"}),y({"foo","bar"}),z({"i","j","k","l"})}
+ };
+ if (mixed(2 * 4)) {
+ layouts.push_back({x(3),y({"foo", "bar"}),z(7)});
+ layouts.push_back({x({"a","b","c"}),y(5),z({"i","j","k","l"})});
+ }
+ for (const Layout &layout: layouts) {
+ TensorSpec input = spec(layout, seq);
+ for (const Domain &domain: layout) {
+ Eval::Result expect = ImmediateReduce(op, domain.dimension).eval(ref_engine, input);
+ TEST_STATE(make_string("shape: %s, reduce dimension: %s",
+ infer_type(layout).c_str(), domain.dimension.c_str()).c_str());
+ if (!name.empty()) {
+ vespalib::string expr = make_string("%s(a,%s)", name.c_str(), domain.dimension.c_str());
+ TEST_DO(verify_reduce_result(Expr_T(expr), input, expect));
+ }
+ TEST_DO(verify_reduce_result(ImmediateReduce(op, domain.dimension), input, expect));
+ TEST_DO(verify_reduce_result(RetainedReduce(op, domain.dimension), input, expect));
+ }
+ {
+ Eval::Result expect = ImmediateReduce(op).eval(ref_engine, input);
+ TEST_STATE(make_string("shape: %s, reduce all dimensions",
+ infer_type(layout).c_str()).c_str());
+ if (!name.empty()) {
+ vespalib::string expr = make_string("%s(a)", name.c_str());
+ TEST_DO(verify_reduce_result(Expr_T(expr), input, expect));
+ }
+ TEST_DO(verify_reduce_result(ImmediateReduce(op), input, expect));
+ TEST_DO(verify_reduce_result(RetainedReduce(op), input, expect));
+ }
+ }
+ }
+
+ void test_tensor_reduce() {
+ TEST_DO(test_reduce_op("sum", operation::Add(), N()));
+ TEST_DO(test_reduce_op("", operation::Mul(), Sigmoid(N())));
+ TEST_DO(test_reduce_op("", operation::Min(), N()));
+ TEST_DO(test_reduce_op("", operation::Max(), N()));
}
+ //-------------------------------------------------------------------------
+
void test_map_op(const Eval &eval, const UnaryOperation &ref_op, const Sequence &seq) {
std::vector<Layout> layouts = {
{},
@@ -396,38 +660,121 @@ struct TestContext {
{x({"a","b","c"}),y({"foo","bar"})},
{x({"a","b","c"}),y({"foo","bar"}),z({"i","j","k","l"})}
};
- if (mixed()) {
+ if (mixed(2)) {
layouts.push_back({x(3),y({"foo", "bar"}),z(7)});
layouts.push_back({x({"a","b","c"}),y(5),z({"i","j","k","l"})});
}
for (const Layout &layout: layouts) {
- TEST_DO(eval.verify(engine, tensor(layout, seq), tensor(layout, OpSeq(seq, ref_op))));
+ EXPECT_EQUAL(eval.eval(engine, spec(layout, seq)).tensor(), spec(layout, OpSeq(seq, ref_op)));
}
}
+ void test_map_op(const vespalib::string &expr, const UnaryOperation &op, const Sequence &seq) {
+ TEST_DO(test_map_op(ImmediateMap(op), op, seq));
+ TEST_DO(test_map_op(RetainedMap(op), op, seq));
+ TEST_DO(test_map_op(Expr_T(expr), op, seq));
+ }
+
void test_tensor_map() {
- TEST_DO(test_map_op(ImmediateMap(operation::Floor()), operation::Floor(), Div10(N())));
- TEST_DO(test_map_op(RetainedMap(operation::Floor()), operation::Floor(), Div10(N())));
- TEST_DO(test_map_op(Expr_T_T("floor(a)"), operation::Floor(), Div10(N())));
- //---------------------------------------------------------------------
- TEST_DO(test_map_op(ImmediateMap(operation::Ceil()), operation::Ceil(), Div10(N())));
- TEST_DO(test_map_op(RetainedMap(operation::Ceil()), operation::Ceil(), Div10(N())));
- TEST_DO(test_map_op(Expr_T_T("ceil(a)"), operation::Ceil(), Div10(N())));
- //---------------------------------------------------------------------
- TEST_DO(test_map_op(ImmediateMap(operation::Sqrt()), operation::Sqrt(), Div10(N())));
- TEST_DO(test_map_op(RetainedMap(operation::Sqrt()), operation::Sqrt(), Div10(N())));
- TEST_DO(test_map_op(Expr_T_T("sqrt(a)"), operation::Sqrt(), Div10(N())));
- //---------------------------------------------------------------------
- TEST_DO(test_map_op(ImmediateMap(MyOp()), MyOp(), Div10(N())));
- TEST_DO(test_map_op(RetainedMap(MyOp()), MyOp(), Div10(N())));
- TEST_DO(test_map_op(Expr_T_T("(a+1)*2"), MyOp(), Div10(N())));
+ TEST_DO(test_map_op("-a", operation::Neg(), Sub2(Div10(N()))));
+ TEST_DO(test_map_op("!a", operation::Not(), Mask2Seq(SkipNth(3))));
+ TEST_DO(test_map_op("cos(a)", operation::Cos(), Div10(N())));
+ TEST_DO(test_map_op("sin(a)", operation::Sin(), Div10(N())));
+ TEST_DO(test_map_op("tan(a)", operation::Tan(), Div10(N())));
+ TEST_DO(test_map_op("cosh(a)", operation::Cosh(), Div10(N())));
+ TEST_DO(test_map_op("sinh(a)", operation::Sinh(), Div10(N())));
+ TEST_DO(test_map_op("tanh(a)", operation::Tanh(), Div10(N())));
+ TEST_DO(test_map_op("acos(a)", operation::Acos(), Sigmoid(Div10(N()))));
+ TEST_DO(test_map_op("asin(a)", operation::Asin(), Sigmoid(Div10(N()))));
+ TEST_DO(test_map_op("atan(a)", operation::Atan(), Div10(N())));
+ TEST_DO(test_map_op("exp(a)", operation::Exp(), Div10(N())));
+ TEST_DO(test_map_op("log10(a)", operation::Log10(), Div10(N())));
+ TEST_DO(test_map_op("log(a)", operation::Log(), Div10(N())));
+ TEST_DO(test_map_op("sqrt(a)", operation::Sqrt(), Div10(N())));
+ TEST_DO(test_map_op("ceil(a)", operation::Ceil(), Div10(N())));
+ TEST_DO(test_map_op("fabs(a)", operation::Fabs(), Div10(N())));
+ TEST_DO(test_map_op("floor(a)", operation::Floor(), Div10(N())));
+ TEST_DO(test_map_op("isNan(a)", operation::IsNan(), Mask2Seq(SkipNth(3), 1.0, my_nan)));
+ TEST_DO(test_map_op("relu(a)", operation::Relu(), Sub2(Div10(N()))));
+ TEST_DO(test_map_op("sigmoid(a)", operation::Sigmoid(), Sub2(Div10(N()))));
+ TEST_DO(test_map_op("(a+1)*2", MyOp(), Div10(N())));
+ }
+
+ //-------------------------------------------------------------------------
+
+ void test_apply_op(const Eval &eval, const BinaryOperation &op, const Sequence &seq) {
+ std::vector<Layout> layouts = {
+ {}, {},
+ {x(5)}, {x(5)},
+ {x(5)}, {x(3)},
+ {x(5)}, {y(5)},
+ {x(5)}, {x(5),y(5)},
+ {x(3),y(5)}, {x(4),y(4)},
+ {x(3),y(5)}, {y(5),z(7)},
+ {x({"a","b","c"})}, {x({"a","b","c"})},
+ {x({"a","b","c"})}, {x({"a","b"})},
+ {x({"a","b","c"})}, {y({"foo","bar","baz"})},
+ {x({"a","b","c"})}, {x({"a","b","c"}),y({"foo","bar","baz"})},
+ {x({"a","b"}),y({"foo","bar","baz"})}, {x({"a","b","c"}),y({"foo","bar"})},
+ {x({"a","b"}),y({"foo","bar","baz"})}, {y({"foo","bar"}),z({"i","j","k","l"})}
+ };
+ if (mixed(2)) {
+ layouts.push_back({x(3),y({"foo", "bar"})});
+ layouts.push_back({y({"foo", "bar"}),z(7)});
+ layouts.push_back({x({"a","b","c"}),y(5)});
+ layouts.push_back({y(5),z({"i","j","k","l"})});
+ }
+ ASSERT_TRUE((layouts.size() % 2) == 0);
+ for (size_t i = 0; i < layouts.size(); i += 2) {
+ TensorSpec lhs_input = spec(layouts[i], seq);
+ TensorSpec rhs_input = spec(layouts[i + 1], seq);
+ TEST_STATE(make_string("lhs shape: %s, rhs shape: %s",
+ lhs_input.type().c_str(),
+ rhs_input.type().c_str()).c_str());
+ TensorSpec expect = ImmediateApply(op).eval(ref_engine, lhs_input, rhs_input).tensor();
+ EXPECT_EQUAL(safe(eval).eval(engine, lhs_input, rhs_input).tensor(), expect);
+ }
}
+ void test_apply_op(const vespalib::string &expr, const BinaryOperation &op, const Sequence &seq) {
+ TEST_DO(test_apply_op(ImmediateApply(op), op, seq));
+ TEST_DO(test_apply_op(RetainedApply(op), op, seq));
+ TEST_DO(test_apply_op(Expr_TT(expr), op, seq));
+ }
+
+ void test_tensor_apply() {
+ TEST_DO(test_apply_op("a+b", operation::Add(), Div10(N())));
+ TEST_DO(test_apply_op("a-b", operation::Sub(), Div10(N())));
+ TEST_DO(test_apply_op("a*b", operation::Mul(), Div10(N())));
+ TEST_DO(test_apply_op("a/b", operation::Div(), Div10(N())));
+ TEST_DO(test_apply_op("a^b", operation::Pow(), Div10(N())));
+ TEST_DO(test_apply_op("pow(a,b)", operation::Pow(), Div10(N())));
+ TEST_DO(test_apply_op("a==b", operation::Equal(), Div10(N())));
+ TEST_DO(test_apply_op("a!=b", operation::NotEqual(), Div10(N())));
+ TEST_DO(test_apply_op("a~=b", operation::Approx(), Div10(N())));
+ TEST_DO(test_apply_op("a<b", operation::Less(), Div10(N())));
+ TEST_DO(test_apply_op("a<=b", operation::LessEqual(), Div10(N())));
+ TEST_DO(test_apply_op("a>b", operation::Greater(), Div10(N())));
+ TEST_DO(test_apply_op("a>=b", operation::GreaterEqual(), Div10(N())));
+ TEST_DO(test_apply_op("a&&b", operation::And(), Mask2Seq(SkipNth(3))));
+ TEST_DO(test_apply_op("a||b", operation::Or(), Mask2Seq(SkipNth(3))));
+ TEST_DO(test_apply_op("atan2(a,b)", operation::Atan2(), Div10(N())));
+ TEST_DO(test_apply_op("ldexp(a,b)", operation::Ldexp(), Div10(N())));
+ TEST_DO(test_apply_op("fmod(a,b)", operation::Fmod(), Div10(N())));
+ TEST_DO(test_apply_op("min(a,b)", operation::Min(), Div10(N())));
+ TEST_DO(test_apply_op("max(a,b)", operation::Max(), Div10(N())));
+ }
+
+ //-------------------------------------------------------------------------
+
void run_tests() {
TEST_DO(test_tensor_create_type());
+ TEST_DO(test_tensor_equality());
TEST_DO(test_tensor_inequality());
TEST_DO(test_verbatim_tensors());
+ TEST_DO(test_tensor_reduce());
TEST_DO(test_tensor_map());
+ TEST_DO(test_tensor_apply());
}
};
@@ -438,6 +785,9 @@ TensorConformance::run_tests(const TensorEngine &engine, bool test_mixed_cases)
{
TestContext ctx(engine, test_mixed_cases);
ctx.run_tests();
+ if (ctx.skip_count > 0) {
+ fprintf(stderr, "WARNING: skipped %zu mixed test cases\n", ctx.skip_count);
+ }
}
} // namespace vespalib::eval::test
diff --git a/vespalib/src/vespa/vespalib/eval/value.cpp b/vespalib/src/vespa/vespalib/eval/value.cpp
index 859c91a59f5..ff72ac4c85c 100644
--- a/vespalib/src/vespa/vespalib/eval/value.cpp
+++ b/vespalib/src/vespa/vespalib/eval/value.cpp
@@ -23,23 +23,23 @@ Value::apply(const BinaryOperation &, const Value &, Stash &stash) const
bool
TensorValue::equal(const Value &rhs) const
{
- return (rhs.is_tensor() && _tensor.engine().equal(_tensor, *rhs.as_tensor()));
+ return (rhs.is_tensor() && _value->engine().equal(*_value, *rhs.as_tensor()));
}
const Value &
TensorValue::apply(const UnaryOperation &op, Stash &stash) const
{
- return _tensor.engine().map(op, _tensor, stash);
+ return _value->engine().map(op, *_value, stash);
}
const Value &
TensorValue::apply(const BinaryOperation &op, const Value &rhs, Stash &stash) const
{
const Tensor *other = rhs.as_tensor();
- if ((other == nullptr) || (&other->engine() != &_tensor.engine())) {
+ if ((other == nullptr) || (&other->engine() != &_value->engine())) {
return stash.create<ErrorValue>();
}
- return _tensor.engine().apply(op, _tensor, *other, stash);
+ return _value->engine().apply(op, *_value, *other, stash);
}
} // namespace vespalib::eval
diff --git a/vespalib/src/vespa/vespalib/eval/value.h b/vespalib/src/vespa/vespalib/eval/value.h
index 659e9ac6ec2..22e90b9327f 100644
--- a/vespalib/src/vespa/vespalib/eval/value.h
+++ b/vespalib/src/vespa/vespalib/eval/value.h
@@ -59,13 +59,11 @@ public:
class TensorValue : public Value
{
private:
- const Tensor &_tensor;
- std::unique_ptr<Tensor> _stored;
+ std::unique_ptr<Tensor> _value;
public:
- TensorValue(const Tensor &value) : _tensor(value), _stored() {}
- TensorValue(std::unique_ptr<Tensor> value) : _tensor(*value), _stored(std::move(value)) {}
+ TensorValue(std::unique_ptr<Tensor> value) : _value(std::move(value)) {}
bool is_tensor() const override { return true; }
- const Tensor *as_tensor() const override { return &_tensor; }
+ const Tensor *as_tensor() const override { return _value.get(); }
bool equal(const Value &rhs) const override;
const Value &apply(const UnaryOperation &op, Stash &stash) const override;
const Value &apply(const BinaryOperation &op, const Value &rhs, Stash &stash) const override;
diff --git a/vespalib/src/vespa/vespalib/tensor/decoded_tensor_address_store.h b/vespalib/src/vespa/vespalib/tensor/decoded_tensor_address_store.h
deleted file mode 100644
index b2d8d1b07ce..00000000000
--- a/vespalib/src/vespa/vespalib/tensor/decoded_tensor_address_store.h
+++ /dev/null
@@ -1,57 +0,0 @@
-// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-
-#pragma once
-
-namespace vespalib {
-namespace tensor {
-
-/**
- * A utility class to store decoded tensor address based on data stored
- * in tensors.
- */
-template <class AddressT> class DecodedTensorAddressStore;
-
-/**
- * A utility class to store decoded tensor address. TensorAddress
- * doesn't need any decoding, just pass through the argument
- * (e.g. tensor address in tensor hash table).
- */
-template <> class DecodedTensorAddressStore<TensorAddress>
-{
-public:
- void set(const TensorAddress &) { }
- static const TensorAddress &get(const TensorAddress &rhs) { return rhs; }
-};
-
-/**
- * A utility class to store decoded tensor address.
- * CompactTensorAddress needs decoding.
- */
-template <> class DecodedTensorAddressStore<CompactTensorAddress>
-{
-private:
- CompactTensorAddress _address;
-public:
- void set(const CompactTensorAddressRef rhs)
- { _address.deserializeFromSparseAddressRef(rhs); }
- const CompactTensorAddress &get(const CompactTensorAddressRef &)
- { return _address; }
-};
-
-/**
- * A utility class to store decoded tensor address. Just pass through
- * the argument (e.g. tensor address ref in tensor hash table).
- * CompactTensorAddressRef is encoded, decoding is performed on the
- * fly while iterating.
- */
-template <> class DecodedTensorAddressStore<CompactTensorAddressRef>
-{
-public:
- void set(const CompactTensorAddressRef &) { }
- static CompactTensorAddressRef get(const CompactTensorAddressRef rhs)
- { return rhs; }
-};
-
-
-} // namespace vespalib::tensor
-} // namespace vespalib
diff --git a/vespalib/src/vespa/vespalib/tensor/default_tensor_engine.cpp b/vespalib/src/vespa/vespalib/tensor/default_tensor_engine.cpp
index c34cfb78bbb..a2bc118c00b 100644
--- a/vespalib/src/vespa/vespalib/tensor/default_tensor_engine.cpp
+++ b/vespalib/src/vespa/vespalib/tensor/default_tensor_engine.cpp
@@ -47,6 +47,14 @@ DefaultTensorEngine::to_string(const Tensor &tensor) const
return my_tensor.toString();
}
+eval::TensorSpec
+DefaultTensorEngine::to_spec(const Tensor &tensor) const
+{
+ assert(&tensor.engine() == this);
+ const tensor::Tensor &my_tensor = static_cast<const tensor::Tensor &>(tensor);
+ return my_tensor.toSpec();
+}
+
struct IsAddOperation : public eval::DefaultOperationVisitor {
bool result = false;
void visitDefault(const eval::Operation &) override {}
@@ -107,11 +115,11 @@ DefaultTensorEngine::reduce(const Tensor &tensor, const BinaryOperation &op, con
const tensor::Tensor &my_tensor = static_cast<const tensor::Tensor &>(tensor);
IsAddOperation check;
op.accept(check);
+ tensor::Tensor::UP result;
if (check.result) {
if (dimensions.empty()) { // sum
return stash.create<eval::DoubleValue>(my_tensor.sum());
} else { // dimension sum
- tensor::Tensor::UP result;
for (const auto &dimension: dimensions) {
if (result) {
result = result->sum(dimension);
@@ -119,8 +127,18 @@ DefaultTensorEngine::reduce(const Tensor &tensor, const BinaryOperation &op, con
result = my_tensor.sum(dimension);
}
}
+ }
+ } else {
+ result = my_tensor.reduce(op, dimensions);
+ }
+ if (result) {
+ eval::ValueType result_type(result->getType());
+ if (result_type.is_tensor()) {
return stash.create<TensorValue>(std::move(result));
}
+ if (result_type.is_double()) {
+ return stash.create<eval::DoubleValue>(result->sum());
+ }
}
return stash.create<ErrorValue>();
}
@@ -147,8 +165,13 @@ struct TensorOperationOverride : eval::DefaultOperationVisitor {
TensorOperationOverride(const tensor::Tensor &lhs_in,
const tensor::Tensor &rhs_in)
: lhs(lhs_in), rhs(rhs_in), result() {}
- virtual void visitDefault(const eval::Operation &) override {
+ virtual void visitDefault(const eval::Operation &op) override {
// empty result indicates error
+ const eval::BinaryOperation *binaryOp =
+ dynamic_cast<const eval::BinaryOperation *>(&op);
+ if (binaryOp) {
+ result = lhs.apply(*binaryOp, rhs);
+ }
}
virtual void visit(const eval::operation::Add &) override {
result = lhs.add(rhs);
diff --git a/vespalib/src/vespa/vespalib/tensor/default_tensor_engine.h b/vespalib/src/vespa/vespalib/tensor/default_tensor_engine.h
index aba3665d98a..7e1bd903626 100644
--- a/vespalib/src/vespa/vespalib/tensor/default_tensor_engine.h
+++ b/vespalib/src/vespa/vespalib/tensor/default_tensor_engine.h
@@ -22,6 +22,7 @@ public:
ValueType type_of(const Tensor &tensor) const override;
bool equal(const Tensor &a, const Tensor &b) const override;
vespalib::string to_string(const Tensor &tensor) const override;
+ TensorSpec to_spec(const Tensor &tensor) const override;
std::unique_ptr<Tensor> create(const TensorSpec &spec) const override;
const Value &reduce(const Tensor &tensor, const BinaryOperation &op, const std::vector<vespalib::string> &dimensions, Stash &stash) const override;
diff --git a/vespalib/src/vespa/vespalib/tensor/dense/CMakeLists.txt b/vespalib/src/vespa/vespalib/tensor/dense/CMakeLists.txt
index e80083056ca..c965eb6609c 100644
--- a/vespalib/src/vespa/vespalib/tensor/dense/CMakeLists.txt
+++ b/vespalib/src/vespa/vespalib/tensor/dense/CMakeLists.txt
@@ -1,9 +1,9 @@
# Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
vespa_add_library(vespalib_vespalib_tensor_dense OBJECT
SOURCES
+ direct_dense_tensor_builder.cpp
dense_tensor.cpp
+ dense_tensor_address_combiner.cpp
dense_tensor_builder.cpp
- dense_tensor_dimension_sum.cpp
- dense_tensor_product.cpp
DEPENDS
)
diff --git a/vespalib/src/vespa/vespalib/tensor/dense/dense_tensor.cpp b/vespalib/src/vespa/vespalib/tensor/dense/dense_tensor.cpp
index 5a160329e79..18506870354 100644
--- a/vespalib/src/vespa/vespalib/tensor/dense/dense_tensor.cpp
+++ b/vespalib/src/vespa/vespalib/tensor/dense/dense_tensor.cpp
@@ -2,15 +2,17 @@
#include <vespa/fastos/fastos.h>
#include "dense_tensor.h"
-#include "dense_tensor_dimension_sum.h"
-#include "dense_tensor_product.h"
+#include "dense_tensor_apply.hpp"
+#include "dense_tensor_reduce.hpp"
#include <vespa/vespalib/util/stringfmt.h>
#include <vespa/vespalib/util/exceptions.h>
#include <vespa/vespalib/stllike/asciistream.h>
#include <vespa/vespalib/tensor/tensor_address_builder.h>
#include <vespa/vespalib/tensor/tensor_visitor.h>
+#include <vespa/vespalib/eval/operation.h>
#include <sstream>
+using vespalib::eval::TensorSpec;
namespace vespalib {
namespace tensor {
@@ -121,6 +123,15 @@ joinDenseTensorsNegated(const DenseTensor &lhs,
std::move(cells));
}
+std::vector<vespalib::string>
+getDimensions(const DenseTensor &tensor)
+{
+ std::vector<vespalib::string> dimensions;
+ for (const auto &dimMeta : tensor.dimensionsMeta()) {
+ dimensions.emplace_back(dimMeta.dimension());
+ }
+ return dimensions;
+}
}
@@ -237,7 +248,8 @@ DenseTensor::multiply(const Tensor &arg) const
if (!rhs) {
return Tensor::UP();
}
- return DenseTensorProduct(*this, *rhs).result();
+ return dense::apply(*this, *rhs, [](double lhsValue, double rhsValue)
+ { return lhsValue * rhsValue; });
}
Tensor::UP
@@ -296,7 +308,9 @@ DenseTensor::apply(const CellFunction &func) const
Tensor::UP
DenseTensor::sum(const vespalib::string &dimension) const
{
- return DenseTensorDimensionSum(*this, dimension).result();
+ return dense::reduce(*this, { dimension },
+ [](double lhsValue, double rhsValue)
+ { return lhsValue + rhsValue; });
}
bool
@@ -323,6 +337,33 @@ DenseTensor::clone() const
return std::make_unique<DenseTensor>(_dimensionsMeta, _cells);
}
+namespace {
+
+void
+buildAddress(const DenseTensor::CellsIterator &itr, TensorSpec::Address &address)
+{
+ auto addressItr = itr.address().begin();
+ for (const auto &dim : itr.dimensions()) {
+ address.emplace(std::make_pair(dim.dimension(), TensorSpec::Label(*addressItr++)));
+ }
+ assert(addressItr == itr.address().end());
+}
+
+}
+
+TensorSpec
+DenseTensor::toSpec() const
+{
+ TensorSpec result(getType().to_spec());
+ TensorSpec::Address address;
+ for (CellsIterator itr(_dimensionsMeta, _cells); itr.valid(); itr.next()) {
+ buildAddress(itr, address);
+ result.add(address, itr.cell());
+ address.clear();
+ }
+ return result;
+}
+
void
DenseTensor::print(std::ostream &out) const
{
@@ -376,5 +417,27 @@ operator<<(std::ostream &out, const DenseTensor::DimensionMeta &value)
return out;
}
+Tensor::UP
+DenseTensor::apply(const eval::BinaryOperation &op, const Tensor &arg) const
+{
+ const DenseTensor *rhs = dynamic_cast<const DenseTensor *>(&arg);
+ if (!rhs) {
+ return Tensor::UP();
+ }
+ return dense::apply(*this, *rhs,
+ [&op](double lhsValue, double rhsValue)
+ { return op.eval(lhsValue, rhsValue); });
+}
+
+Tensor::UP
+DenseTensor::reduce(const eval::BinaryOperation &op,
+ const std::vector<vespalib::string> &dimensions) const
+{
+ return dense::reduce(*this,
+ (dimensions.empty() ? getDimensions(*this) : dimensions),
+ [&op](double lhsValue, double rhsValue)
+ { return op.eval(lhsValue, rhsValue); });
+}
+
} // namespace vespalib::tensor
} // namespace vespalib
diff --git a/vespalib/src/vespa/vespalib/tensor/dense/dense_tensor.h b/vespalib/src/vespa/vespalib/tensor/dense/dense_tensor.h
index 73d9c26c408..b7d911363ba 100644
--- a/vespalib/src/vespa/vespalib/tensor/dense/dense_tensor.h
+++ b/vespalib/src/vespa/vespalib/tensor/dense/dense_tensor.h
@@ -69,6 +69,7 @@ public:
void next();
double cell() const { return _cells[_cellIdx]; }
const std::vector<size_t> &address() const { return _address; }
+ const DimensionsMeta &dimensions() const { return _dimensionsMeta; }
};
@@ -99,10 +100,16 @@ public:
virtual Tensor::UP match(const Tensor &arg) const override;
virtual Tensor::UP apply(const CellFunction &func) const override;
virtual Tensor::UP sum(const vespalib::string &dimension) const override;
+ virtual Tensor::UP apply(const eval::BinaryOperation &op,
+ const Tensor &arg) const override;
+ virtual Tensor::UP reduce(const eval::BinaryOperation &op,
+ const std::vector<vespalib::string> &dimensions)
+ const override;
virtual bool equals(const Tensor &arg) const override;
virtual void print(std::ostream &out) const override;
virtual vespalib::string toString() const override;
virtual Tensor::UP clone() const override;
+ virtual eval::TensorSpec toSpec() const override;
virtual void accept(TensorVisitor &visitor) const override;
};
diff --git a/vespalib/src/vespa/vespalib/tensor/dense/dense_tensor_address_combiner.cpp b/vespalib/src/vespa/vespalib/tensor/dense/dense_tensor_address_combiner.cpp
new file mode 100644
index 00000000000..2ad4228e0ec
--- /dev/null
+++ b/vespalib/src/vespa/vespalib/tensor/dense/dense_tensor_address_combiner.cpp
@@ -0,0 +1,124 @@
+// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+#include <vespa/fastos/fastos.h>
+#include "dense_tensor_address_combiner.h"
+#include <vespa/vespalib/util/exceptions.h>
+#include <vespa/vespalib/util/stringfmt.h>
+
+namespace vespalib {
+namespace tensor {
+
+using Address = DenseTensorAddressCombiner::Address;
+using DimensionsMeta = DenseTensorAddressCombiner::DimensionsMeta;
+
+namespace {
+
+class AddressReader
+{
+private:
+ const Address &_address;
+ size_t _idx;
+
+public:
+ AddressReader(const Address &address)
+ : _address(address),
+ _idx(0)
+ {}
+ size_t nextLabel() {
+ return _address[_idx++];
+ }
+ bool valid() {
+ return _idx < _address.size();
+ }
+};
+
+}
+
+DenseTensorAddressCombiner::DenseTensorAddressCombiner(const DimensionsMeta &lhs,
+ const DimensionsMeta &rhs)
+ : _ops(),
+ _combinedAddress()
+{
+ auto rhsItr = rhs.cbegin();
+ auto rhsItrEnd = rhs.cend();
+ for (const auto &lhsDim : lhs) {
+ while ((rhsItr != rhsItrEnd) && (rhsItr->dimension() < lhsDim.dimension())) {
+ _ops.push_back(AddressOp::RHS);
+ ++rhsItr;
+ }
+ if ((rhsItr != rhsItrEnd) && (rhsItr->dimension() == lhsDim.dimension())) {
+ _ops.push_back(AddressOp::BOTH);
+ ++rhsItr;
+ } else {
+ _ops.push_back(AddressOp::LHS);
+ }
+ }
+ while (rhsItr != rhsItrEnd) {
+ _ops.push_back(AddressOp::RHS);
+ ++rhsItr;
+ }
+}
+
+bool
+DenseTensorAddressCombiner::combine(const CellsIterator &lhsItr,
+ const CellsIterator &rhsItr)
+{
+ _combinedAddress.clear();
+ AddressReader lhsReader(lhsItr.address());
+ AddressReader rhsReader(rhsItr.address());
+ for (const auto &op : _ops) {
+ switch (op) {
+ case AddressOp::LHS:
+ _combinedAddress.emplace_back(lhsReader.nextLabel());
+ break;
+ case AddressOp::RHS:
+ _combinedAddress.emplace_back(rhsReader.nextLabel());
+ break;
+ case AddressOp::BOTH:
+ size_t lhsLabel = lhsReader.nextLabel();
+ size_t rhsLabel = rhsReader.nextLabel();
+ if (lhsLabel != rhsLabel) {
+ return false;
+ }
+ _combinedAddress.emplace_back(lhsLabel);
+ }
+ }
+ assert(!lhsReader.valid());
+ assert(!rhsReader.valid());
+ return true;
+}
+
+namespace {
+
+void
+validateDimensionsMeta(const DimensionsMeta &dimensionsMeta)
+{
+ for (size_t i = 1; i < dimensionsMeta.size(); ++i) {
+ const auto &prevDimMeta = dimensionsMeta[i-1];
+ const auto &currDimMeta = dimensionsMeta[i];
+ if ((prevDimMeta.dimension() == currDimMeta.dimension()) &&
+ (prevDimMeta.size() != currDimMeta.size()))
+ {
+ throw IllegalArgumentException(make_string(
+ "Shared dimension '%s' has mis-matching label ranges: "
+ "[0, %zu> vs [0, %zu>. This is not supported.",
+ prevDimMeta.dimension().c_str(), prevDimMeta.size(), currDimMeta.size()));
+ }
+ }
+}
+
+}
+
+DimensionsMeta
+DenseTensorAddressCombiner::combineDimensions(const DimensionsMeta &lhs, const DimensionsMeta &rhs)
+{
+ DimensionsMeta result;
+ std::set_union(lhs.cbegin(), lhs.cend(),
+ rhs.cbegin(), rhs.cend(),
+ std::back_inserter(result));
+ validateDimensionsMeta(result);
+ return result;
+}
+
+} // namespace vespalib::tensor
+} // namespace vespalib
diff --git a/vespalib/src/vespa/vespalib/tensor/dense/dense_tensor_address_combiner.h b/vespalib/src/vespa/vespalib/tensor/dense/dense_tensor_address_combiner.h
new file mode 100644
index 00000000000..2c7f9e61223
--- /dev/null
+++ b/vespalib/src/vespa/vespalib/tensor/dense/dense_tensor_address_combiner.h
@@ -0,0 +1,46 @@
+// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+#pragma once
+
+#include <vespa/vespalib/tensor/dense/dense_tensor.h>
+
+namespace vespalib {
+namespace tensor {
+
+/**
+ * Combines two dense tensor addresses to a new tensor address.
+ * The resulting dimensions is the union of the input dimensions and
+ * common dimensions must have matching labels.
+ */
+class DenseTensorAddressCombiner
+{
+public:
+ using Address = std::vector<size_t>;
+ using DimensionsMeta = DenseTensor::DimensionsMeta;
+
+private:
+ enum class AddressOp {
+ LHS,
+ RHS,
+ BOTH
+ };
+
+ using CellsIterator = DenseTensor::CellsIterator;
+
+ std::vector<AddressOp> _ops;
+ Address _combinedAddress;
+
+public:
+ DenseTensorAddressCombiner(const DimensionsMeta &lhs,
+ const DimensionsMeta &rhs);
+
+ bool combine(const CellsIterator &lhsItr,
+ const CellsIterator &rhsItr);
+ const Address &address() const { return _combinedAddress; }
+
+ static DimensionsMeta combineDimensions(const DimensionsMeta &lhs, const DimensionsMeta &rhs);
+
+};
+
+} // namespace vespalib::tensor
+} // namespace vespalib
diff --git a/vespalib/src/vespa/vespalib/tensor/dense/dense_tensor_apply.h b/vespalib/src/vespa/vespalib/tensor/dense/dense_tensor_apply.h
new file mode 100644
index 00000000000..307e1db43d3
--- /dev/null
+++ b/vespalib/src/vespa/vespalib/tensor/dense/dense_tensor_apply.h
@@ -0,0 +1,25 @@
+// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+#pragma once
+
+namespace vespalib {
+namespace tensor {
+
+class Tensor;
+class DenseTensor;
+
+namespace dense {
+
+/**
+ * Creates a new tensor using all combinations of input tensor cells with matching
+ * labels for common dimensions, using func to calculate new cell value
+ * based on the cell values in the input tensors.
+ */
+template <typename Function>
+std::unique_ptr<Tensor>
+apply(const DenseTensor &lhs, const DenseTensor &rhs, Function &&func);
+
+} // namespace vespalib::tensor::dense
+} // namespace vespalib::tensor
+} // namespace vespalib
+
diff --git a/vespalib/src/vespa/vespalib/tensor/dense/dense_tensor_apply.hpp b/vespalib/src/vespa/vespalib/tensor/dense/dense_tensor_apply.hpp
new file mode 100644
index 00000000000..3168089b941
--- /dev/null
+++ b/vespalib/src/vespa/vespalib/tensor/dense/dense_tensor_apply.hpp
@@ -0,0 +1,32 @@
+// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+#pragma once
+
+#include "dense_tensor_apply.h"
+#include "dense_tensor_address_combiner.h"
+#include "direct_dense_tensor_builder.h"
+
+namespace vespalib {
+namespace tensor {
+namespace dense {
+
+template <typename Function>
+std::unique_ptr<Tensor>
+apply(const DenseTensor &lhs, const DenseTensor &rhs, Function &&func)
+{
+ DenseTensorAddressCombiner combiner(lhs.dimensionsMeta(), rhs.dimensionsMeta());
+ DirectDenseTensorBuilder builder(DenseTensorAddressCombiner::combineDimensions(lhs.dimensionsMeta(), rhs.dimensionsMeta()));
+ for (DenseTensor::CellsIterator lhsItr = lhs.cellsIterator(); lhsItr.valid(); lhsItr.next()) {
+ for (DenseTensor::CellsIterator rhsItr = rhs.cellsIterator(); rhsItr.valid(); rhsItr.next()) {
+ bool combineSuccess = combiner.combine(lhsItr, rhsItr);
+ if (combineSuccess) {
+ builder.insertCell(combiner.address(), func(lhsItr.cell(), rhsItr.cell()));
+ }
+ }
+ }
+ return builder.build();
+}
+
+} // namespace vespalib::tensor::dense
+} // namespace vespalib::tensor
+} // namespace vespalib
diff --git a/vespalib/src/vespa/vespalib/tensor/dense/dense_tensor_dimension_sum.cpp b/vespalib/src/vespa/vespalib/tensor/dense/dense_tensor_dimension_sum.cpp
deleted file mode 100644
index f94c9137798..00000000000
--- a/vespalib/src/vespa/vespalib/tensor/dense/dense_tensor_dimension_sum.cpp
+++ /dev/null
@@ -1,106 +0,0 @@
-// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-
-#include <vespa/fastos/fastos.h>
-#include "dense_tensor_dimension_sum.h"
-
-namespace vespalib {
-namespace tensor {
-
-using DimensionsMeta = DenseTensor::DimensionsMeta;
-
-namespace {
-
-DimensionsMeta
-removeDimension(const DimensionsMeta &dimensionsMeta,
- const string &dimension)
-{
- DimensionsMeta result = dimensionsMeta;
- auto itr = std::lower_bound(result.begin(), result.end(), dimension,
- [](const auto &dimMeta, const auto &dimension_in)
- { return dimMeta.dimension() < dimension_in; });
- if ((itr != result.end()) && (itr->dimension() == dimension)) {
- result.erase(itr);
- }
- return result;
-}
-
-size_t
-calcCellsSize(const DimensionsMeta &dimensionsMeta)
-{
- size_t cellsSize = 1;
- for (const auto &dimMeta : dimensionsMeta) {
- cellsSize *= dimMeta.size();
- }
- return cellsSize;
-}
-
-struct DimensionSummer
-{
- size_t _innerDimSize;
- size_t _sumDimSize;
- size_t _outerDimSize;
- using Cells = DenseTensor::Cells;
-
- DimensionSummer(const DimensionsMeta &dimensionsMeta,
- const string &dimension)
- : _innerDimSize(1),
- _sumDimSize(1),
- _outerDimSize(1)
- {
- auto itr = std::lower_bound(dimensionsMeta.cbegin(), dimensionsMeta.cend(), dimension,
- [](const auto &dimMeta, const auto &dimension_in)
- { return dimMeta.dimension() < dimension_in; });
- if ((itr != dimensionsMeta.end()) && (itr->dimension() == dimension)) {
- for (auto outerItr = dimensionsMeta.cbegin(); outerItr != itr; ++outerItr) {
- _outerDimSize *= outerItr->size();
- }
- _sumDimSize = itr->size();
- for (++itr; itr != dimensionsMeta.cend(); ++itr) {
- _innerDimSize *= itr->size();
- }
- } else {
- _outerDimSize = calcCellsSize(dimensionsMeta);
- }
- }
-
- void
- sumCells(Cells &cells, const Cells &cells_in) const
- {
- auto itr_in = cells_in.cbegin();
- auto itr = cells.begin();
- for (size_t outerDim = 0; outerDim < _outerDimSize;
- ++outerDim) {
- auto saved_itr = itr;
- for (size_t sumDim = 0; sumDim < _sumDimSize; ++sumDim) {
- itr = saved_itr;
- for (size_t innerDim = 0; innerDim < _innerDimSize;
- ++innerDim) {
- *itr += *itr_in;
- ++itr;
- ++itr_in;
- }
- }
- }
- assert(itr == cells.end());
- assert(itr_in == cells_in.cend());
- }
-};
-
-
-}
-
-
-DenseTensorDimensionSum::DenseTensorDimensionSum(const TensorImplType &tensor,
- const string &dimension)
- : _dimensionsMeta(removeDimension(tensor.dimensionsMeta(),
- dimension)),
- _cells(calcCellsSize(_dimensionsMeta))
-{
- DimensionSummer dimensionSummer(tensor.dimensionsMeta(),
- dimension);
- dimensionSummer.sumCells(_cells, tensor.cells());
-}
-
-
-} // namespace vespalib::tensor
-} // namespace vespalib
diff --git a/vespalib/src/vespa/vespalib/tensor/dense/dense_tensor_dimension_sum.h b/vespalib/src/vespa/vespalib/tensor/dense/dense_tensor_dimension_sum.h
deleted file mode 100644
index c61e07d5c3a..00000000000
--- a/vespalib/src/vespa/vespalib/tensor/dense/dense_tensor_dimension_sum.h
+++ /dev/null
@@ -1,36 +0,0 @@
-// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-
-#pragma once
-
-#include "dense_tensor.h"
-
-namespace vespalib {
-namespace tensor {
-
-/**
- * Returns a tensor with the given dimension removed and the cell values in that dimension summed.
- */
-class DenseTensorDimensionSum
-{
-public:
- using TensorImplType = DenseTensor;
-private:
- using DimensionMeta = DenseTensor::DimensionMeta;
- using DimensionsMeta = DenseTensor::DimensionsMeta;
- using Cells = DenseTensor::Cells;
-
- DimensionsMeta _dimensionsMeta;
- Cells _cells;
-
-public:
- DenseTensorDimensionSum(const TensorImplType &tensor,
- const vespalib::string &dimension);
-
- Tensor::UP result() {
- return std::make_unique<DenseTensor>(std::move(_dimensionsMeta),
- std::move(_cells));
- }
-};
-
-} // namespace vespalib::tensor
-} // namespace vespalib
diff --git a/vespalib/src/vespa/vespalib/tensor/dense/dense_tensor_product.cpp b/vespalib/src/vespa/vespalib/tensor/dense/dense_tensor_product.cpp
deleted file mode 100644
index fff5f21d3d1..00000000000
--- a/vespalib/src/vespa/vespalib/tensor/dense/dense_tensor_product.cpp
+++ /dev/null
@@ -1,204 +0,0 @@
-// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-
-#include <vespa/fastos/fastos.h>
-#include "dense_tensor_product.h"
-#include <vespa/vespalib/util/exceptions.h>
-
-namespace vespalib {
-namespace tensor {
-
-using DimensionsMeta = DenseTensor::DimensionsMeta;
-using CellsIterator = DenseTensor::CellsIterator;
-using Address = std::vector<size_t>;
-
-using vespalib::IllegalArgumentException;
-using vespalib::make_string;
-
-namespace {
-
-enum class AddressCombineOp
-{
- LHS,
- RHS,
- BOTH
-};
-
-using AddressCombineOps = std::vector<AddressCombineOp>;
-
-class AddressReader
-{
-private:
- const Address &_address;
- size_t _idx;
-
-public:
- AddressReader(const Address &address)
- : _address(address),
- _idx(0)
- {}
- size_t nextLabel() {
- return _address[_idx++];
- }
- bool valid() {
- return _idx < _address.size();
- }
-};
-
-class CellsInserter
-{
-private:
- const DimensionsMeta &_dimensionsMeta;
- DenseTensor::Cells &_cells;
-
- size_t calculateCellAddress(const Address &address) {
- assert(address.size() == _dimensionsMeta.size());
- size_t result = 0;
- for (size_t i = 0; i < address.size(); ++i) {
- result *= _dimensionsMeta[i].size();
- result += address[i];
- }
- return result;
- }
-
-public:
- CellsInserter(const DimensionsMeta &dimensionsMeta,
- DenseTensor::Cells &cells)
- : _dimensionsMeta(dimensionsMeta),
- _cells(cells)
- {}
- void insertCell(const Address &address, double cellValue) {
- size_t cellAddress = calculateCellAddress(address);
- assert(cellAddress < _cells.size());
- _cells[cellAddress] = cellValue;
- }
-};
-
-void
-validateDimensionsMeta(const DimensionsMeta &dimensionsMeta)
-{
- for (size_t i = 1; i < dimensionsMeta.size(); ++i) {
- const auto &prevDimMeta = dimensionsMeta[i-1];
- const auto &currDimMeta = dimensionsMeta[i];
- if ((prevDimMeta.dimension() == currDimMeta.dimension()) &&
- (prevDimMeta.size() != currDimMeta.size())) {
- throw IllegalArgumentException(make_string(
- "Shared dimension '%s' in dense tensor product has mis-matching label ranges: "
- "[0, %zu> vs [0, %zu>. This is not supported.",
- prevDimMeta.dimension().c_str(), prevDimMeta.size(), currDimMeta.size()));
- }
- }
-}
-
-DimensionsMeta
-combineDimensions(const DimensionsMeta &lhs, const DimensionsMeta &rhs)
-{
- DimensionsMeta result;
- std::set_union(lhs.cbegin(), lhs.cend(),
- rhs.cbegin(), rhs.cend(),
- std::back_inserter(result));
- validateDimensionsMeta(result);
- return result;
-}
-
-size_t
-calculateCellsSize(const DimensionsMeta &dimensionsMeta)
-{
- size_t cellsSize = 1;
- for (const auto &dimMeta : dimensionsMeta) {
- cellsSize *= dimMeta.size();
- }
- return cellsSize;
-}
-
-AddressCombineOps
-buildCombineOps(const DimensionsMeta &lhs,
- const DimensionsMeta &rhs)
-{
- AddressCombineOps ops;
- auto rhsItr = rhs.cbegin();
- auto rhsItrEnd = rhs.cend();
- for (const auto &lhsDim : lhs) {
- while ((rhsItr != rhsItrEnd) && (rhsItr->dimension() < lhsDim.dimension())) {
- ops.push_back(AddressCombineOp::RHS);
- ++rhsItr;
- }
- if ((rhsItr != rhsItrEnd) && (rhsItr->dimension() == lhsDim.dimension())) {
- ops.push_back(AddressCombineOp::BOTH);
- ++rhsItr;
- } else {
- ops.push_back(AddressCombineOp::LHS);
- }
- }
- while (rhsItr != rhsItrEnd) {
- ops.push_back(AddressCombineOp::RHS);
- ++rhsItr;
- }
- return ops;
-}
-
-bool
-combineAddress(Address &combinedAddress,
- const CellsIterator &lhsItr,
- const CellsIterator &rhsItr,
- const AddressCombineOps &ops)
-{
- combinedAddress.clear();
- AddressReader lhsReader(lhsItr.address());
- AddressReader rhsReader(rhsItr.address());
- for (const auto &op : ops) {
- switch (op) {
- case AddressCombineOp::LHS:
- combinedAddress.emplace_back(lhsReader.nextLabel());
- break;
- case AddressCombineOp::RHS:
- combinedAddress.emplace_back(rhsReader.nextLabel());
- break;
- case AddressCombineOp::BOTH:
- size_t lhsLabel = lhsReader.nextLabel();
- size_t rhsLabel = rhsReader.nextLabel();
- if (lhsLabel != rhsLabel) {
- return false;
- }
- combinedAddress.emplace_back(lhsLabel);
- }
- }
- assert(!lhsReader.valid());
- assert(!rhsReader.valid());
- return true;
-}
-
-}
-
-void
-DenseTensorProduct::bruteForceProduct(const DenseTensor &lhs,
- const DenseTensor &rhs)
-{
- AddressCombineOps ops = buildCombineOps(lhs.dimensionsMeta(), rhs.dimensionsMeta());
- Address combinedAddress;
- CellsInserter cellsInserter(_dimensionsMeta, _cells);
- for (CellsIterator lhsItr = lhs.cellsIterator(); lhsItr.valid(); lhsItr.next()) {
- for (CellsIterator rhsItr = rhs.cellsIterator(); rhsItr.valid(); rhsItr.next()) {
- bool combineSuccess = combineAddress(combinedAddress, lhsItr, rhsItr, ops);
- if (combineSuccess) {
- cellsInserter.insertCell(combinedAddress, lhsItr.cell() * rhsItr.cell());
- }
- }
- }
-}
-
-DenseTensorProduct::DenseTensorProduct(const DenseTensor &lhs,
- const DenseTensor &rhs)
- : _dimensionsMeta(combineDimensions(lhs.dimensionsMeta(), rhs.dimensionsMeta())),
- _cells(calculateCellsSize(_dimensionsMeta))
-{
- bruteForceProduct(lhs, rhs);
-}
-
-Tensor::UP
-DenseTensorProduct::result()
-{
- return std::make_unique<DenseTensor>(std::move(_dimensionsMeta), std::move(_cells));
-}
-
-} // namespace vespalib::tensor
-} // namespace vespalib
diff --git a/vespalib/src/vespa/vespalib/tensor/dense/dense_tensor_product.h b/vespalib/src/vespa/vespalib/tensor/dense/dense_tensor_product.h
deleted file mode 100644
index 5615067119b..00000000000
--- a/vespalib/src/vespa/vespalib/tensor/dense/dense_tensor_product.h
+++ /dev/null
@@ -1,32 +0,0 @@
-// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-
-#pragma once
-
-#include "dense_tensor.h"
-#include <vespa/vespalib/tensor/tensor_operation.h>
-
-namespace vespalib {
-namespace tensor {
-
-/**
- * Returns the tensor product of the two given dense tensors.
- * This is all combinations of all cells in the first tensor with all cells of
- * the second tensor.
- *
- * Shared dimensions must have the same label range from [0, dimSize>.
- */
-class DenseTensorProduct
-{
-private:
- DenseTensor::DimensionsMeta _dimensionsMeta;
- DenseTensor::Cells _cells;
-
- void bruteForceProduct(const DenseTensor &lhs, const DenseTensor &rhs);
-
-public:
- DenseTensorProduct(const DenseTensor &lhs, const DenseTensor &rhs);
- Tensor::UP result();
-};
-
-} // namespace vespalib::tensor
-} // namespace vespalib
diff --git a/vespalib/src/vespa/vespalib/tensor/dense/dense_tensor_reduce.h b/vespalib/src/vespa/vespalib/tensor/dense/dense_tensor_reduce.h
new file mode 100644
index 00000000000..ce3bf308fd3
--- /dev/null
+++ b/vespalib/src/vespa/vespalib/tensor/dense/dense_tensor_reduce.h
@@ -0,0 +1,21 @@
+// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+#pragma once
+
+#include "dense_tensor.h"
+
+namespace vespalib {
+namespace tensor {
+namespace dense {
+
+/**
+ * Returns a tensor with the given dimension(s) removed and the cell values in that dimension(s)
+ * combined using the given func.
+ */
+template<typename Function>
+std::unique_ptr<Tensor>
+reduce(const DenseTensor &tensor, const std::vector<vespalib::string> &dimensions, Function &&func);
+
+} // namespace dense
+} // namespace tensor
+} // namespace vespalib
diff --git a/vespalib/src/vespa/vespalib/tensor/dense/dense_tensor_reduce.hpp b/vespalib/src/vespa/vespalib/tensor/dense/dense_tensor_reduce.hpp
new file mode 100644
index 00000000000..0e890fa9bc4
--- /dev/null
+++ b/vespalib/src/vespa/vespalib/tensor/dense/dense_tensor_reduce.hpp
@@ -0,0 +1,133 @@
+// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+#include <vespa/fastos/fastos.h>
+#include "dense_tensor_reduce.h"
+
+namespace vespalib {
+namespace tensor {
+namespace dense {
+
+using Cells = DenseTensor::Cells;
+using DimensionsMeta = DenseTensor::DimensionsMeta;
+
+namespace {
+
+DimensionsMeta
+removeDimension(const DimensionsMeta &dimensionsMeta,
+ const string &dimensionToRemove)
+{
+ DimensionsMeta result = dimensionsMeta;
+ auto itr = std::lower_bound(result.begin(), result.end(), dimensionToRemove,
+ [](const auto &dimMeta, const auto &dimension_in) {
+ return dimMeta.dimension() < dimension_in;
+ });
+ if ((itr != result.end()) && (itr->dimension() == dimensionToRemove)) {
+ result.erase(itr);
+ }
+ return result;
+}
+
+size_t
+calcCellsSize(const DimensionsMeta &dimensionsMeta)
+{
+ size_t cellsSize = 1;
+ for (const auto &dimMeta : dimensionsMeta) {
+ cellsSize *= dimMeta.size();
+ }
+ return cellsSize;
+}
+
+
+class DimensionReducer
+{
+private:
+ DimensionsMeta _dimensionsResult;
+ Cells _cellsResult;
+ size_t _innerDimSize;
+ size_t _sumDimSize;
+ size_t _outerDimSize;
+
+ void setup(const DimensionsMeta &dimensions,
+ const vespalib::string &dimensionToRemove) {
+ auto itr = std::lower_bound(dimensions.cbegin(), dimensions.cend(), dimensionToRemove,
+ [](const auto &dimMeta, const auto &dimension) {
+ return dimMeta.dimension() < dimension;
+ });
+ if ((itr != dimensions.end()) && (itr->dimension() == dimensionToRemove)) {
+ for (auto outerItr = dimensions.cbegin(); outerItr != itr; ++outerItr) {
+ _outerDimSize *= outerItr->size();
+ }
+ _sumDimSize = itr->size();
+ for (++itr; itr != dimensions.cend(); ++itr) {
+ _innerDimSize *= itr->size();
+ }
+ } else {
+ _outerDimSize = calcCellsSize(dimensions);
+ }
+ }
+
+public:
+ DimensionReducer(const DimensionsMeta &dimensions,
+ const string &dimensionToRemove)
+ : _dimensionsResult(removeDimension(dimensions, dimensionToRemove)),
+ _cellsResult(calcCellsSize(_dimensionsResult)),
+ _innerDimSize(1),
+ _sumDimSize(1),
+ _outerDimSize(1)
+ {
+ setup(dimensions, dimensionToRemove);
+ }
+
+ template <typename Function>
+ DenseTensor::UP
+ reduceCells(const Cells &cellsIn, Function &&func) {
+ auto itr_in = cellsIn.cbegin();
+ auto itr_out = _cellsResult.begin();
+ for (size_t outerDim = 0; outerDim < _outerDimSize; ++outerDim) {
+ auto saved_itr = itr_out;
+ for (size_t sumDim = 0; sumDim < _sumDimSize; ++sumDim) {
+ itr_out = saved_itr;
+ for (size_t innerDim = 0; innerDim < _innerDimSize; ++innerDim) {
+ *itr_out = func(*itr_out, *itr_in);
+ ++itr_out;
+ ++itr_in;
+ }
+ }
+ }
+ assert(itr_out == _cellsResult.end());
+ assert(itr_in == cellsIn.cend());
+ return std::make_unique<DenseTensor>(std::move(_dimensionsResult), std::move(_cellsResult));
+ }
+};
+
+template <typename Function>
+DenseTensor::UP
+reduce(const DenseTensor &tensor, const vespalib::string &dimensionToRemove, Function &&func)
+{
+ DimensionReducer reducer(tensor.dimensionsMeta(), dimensionToRemove);
+ return reducer.reduceCells(tensor.cells(), func);
+}
+
+}
+
+template <typename Function>
+std::unique_ptr<Tensor>
+reduce(const DenseTensor &tensor, const std::vector<vespalib::string> &dimensions, Function &&func)
+{
+ if (dimensions.size() == 1) {
+ return reduce(tensor, dimensions[0], func);
+ } else if (dimensions.size() > 0) {
+ DenseTensor::UP result = reduce(tensor, dimensions[0], func);
+ for (size_t i = 1; i < dimensions.size(); ++i) {
+ DenseTensor::UP tmpResult = reduce(*result, dimensions[i], func);
+ result = std::move(tmpResult);
+ }
+ return result;
+ } else {
+ return std::unique_ptr<Tensor>();
+ }
+}
+
+} // namespace dense
+} // namespace tensor
+} // namespace vespalib
diff --git a/vespalib/src/vespa/vespalib/tensor/dense/direct_dense_tensor_builder.cpp b/vespalib/src/vespa/vespalib/tensor/dense/direct_dense_tensor_builder.cpp
new file mode 100644
index 00000000000..dd1682fb451
--- /dev/null
+++ b/vespalib/src/vespa/vespalib/tensor/dense/direct_dense_tensor_builder.cpp
@@ -0,0 +1,59 @@
+// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+#include <vespa/fastos/fastos.h>
+#include "direct_dense_tensor_builder.h"
+
+namespace vespalib {
+namespace tensor {
+
+using Address = DirectDenseTensorBuilder::Address;
+using DimensionsMeta = DirectDenseTensorBuilder::DimensionsMeta;
+
+namespace {
+
+size_t
+calculateCellsSize(const DimensionsMeta &dimensionsMeta)
+{
+ size_t cellsSize = 1;
+ for (const auto &dimMeta : dimensionsMeta) {
+ cellsSize *= dimMeta.size();
+ }
+ return cellsSize;
+}
+
+size_t
+calculateCellAddress(const Address &address, const DimensionsMeta &dimensionsMeta)
+{
+ assert(address.size() == dimensionsMeta.size());
+ size_t result = 0;
+ for (size_t i = 0; i < address.size(); ++i) {
+ result *= dimensionsMeta[i].size();
+ result += address[i];
+ }
+ return result;
+}
+
+}
+
+DirectDenseTensorBuilder::DirectDenseTensorBuilder(const DimensionsMeta &dimensionsMeta)
+ : _dimensionsMeta(dimensionsMeta),
+ _cells(calculateCellsSize(_dimensionsMeta))
+{
+}
+
+void
+DirectDenseTensorBuilder::insertCell(const Address &address, double cellValue)
+{
+ size_t cellAddress = calculateCellAddress(address, _dimensionsMeta);
+ assert(cellAddress < _cells.size());
+ _cells[cellAddress] = cellValue;
+}
+
+Tensor::UP
+DirectDenseTensorBuilder::build()
+{
+ return std::make_unique<DenseTensor>(std::move(_dimensionsMeta), std::move(_cells));
+}
+
+} // namespace tensor
+} // namesapce vespalib
diff --git a/vespalib/src/vespa/vespalib/tensor/dense/direct_dense_tensor_builder.h b/vespalib/src/vespa/vespalib/tensor/dense/direct_dense_tensor_builder.h
new file mode 100644
index 00000000000..74234f1cabe
--- /dev/null
+++ b/vespalib/src/vespa/vespalib/tensor/dense/direct_dense_tensor_builder.h
@@ -0,0 +1,31 @@
+// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+#pragma once
+
+#include "dense_tensor.h"
+
+namespace vespalib {
+namespace tensor {
+
+/**
+ * Class for building a dense tensor by inserting cell values directly into underlying array of cells.
+ */
+class DirectDenseTensorBuilder
+{
+public:
+ using DimensionsMeta = DenseTensor::DimensionsMeta;
+ using Cells = DenseTensor::Cells;
+ using Address = std::vector<size_t>;
+
+private:
+ DimensionsMeta _dimensionsMeta;
+ Cells _cells;
+
+public:
+ DirectDenseTensorBuilder(const DimensionsMeta &dimensionsMeta);
+ void insertCell(const Address &address, double cellValue);
+ Tensor::UP build();
+};
+
+} // namespace tensor
+} // namespace vespalib
diff --git a/vespalib/src/vespa/vespalib/tensor/dimensions_vector_iterator.h b/vespalib/src/vespa/vespalib/tensor/dimensions_vector_iterator.h
deleted file mode 100644
index f23c4b6e20f..00000000000
--- a/vespalib/src/vespa/vespalib/tensor/dimensions_vector_iterator.h
+++ /dev/null
@@ -1,46 +0,0 @@
-// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-
-#pragma once
-
-namespace vespalib {
-namespace tensor {
-
-using DimensionsVector = std::vector<vespalib::stringref>;
-
-/**
- * An iterator for a dimensions vector used to simplify 3-way merge
- * between two tensor addresses and a dimension vector.
- */
-class DimensionsVectorIterator
-{
- using InnerIterator = DimensionsVector::const_iterator;
- InnerIterator _itr;
- InnerIterator _itrEnd;
-public:
- DimensionsVectorIterator(const DimensionsVector &dimensions)
- : _itr(dimensions.cbegin()),
- _itrEnd(dimensions.cend())
- {
- }
- bool valid() const { return (_itr != _itrEnd); }
- vespalib::stringref dimension() const { return *_itr; }
- template <typename Iterator>
- bool beforeDimension(const Iterator &rhs) const {
- if (!valid()) {
- return false;
- }
- if (!rhs.valid()) {
- return true;
- }
- return (*_itr < rhs.dimension());
- }
- bool atDimension(vespalib::stringref rhsDimension) const
- {
- return (valid() && (*_itr == rhsDimension));
- }
- void next() { ++_itr; }
-};
-
-
-} // namespace vespalib::tensor
-} // namespace vespalib
diff --git a/vespalib/src/vespa/vespalib/tensor/join_tensor_addresses.h b/vespalib/src/vespa/vespalib/tensor/join_tensor_addresses.h
deleted file mode 100644
index d691732b800..00000000000
--- a/vespalib/src/vespa/vespalib/tensor/join_tensor_addresses.h
+++ /dev/null
@@ -1,128 +0,0 @@
-// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-
-#pragma once
-
-namespace vespalib {
-namespace tensor {
-
-/*
- * Combine two tensor addresses, but fail if dimension label doesn't match
- * for common dimensions. Use 3-way merge between two tensors and a vector
- * of dimensions. To be used when we have few common dimensions.
- * The commonDimensions parameter is the intersection of the
- * dimensions in the two input tensors.
- */
-template <class AddressBuilder, class LhsAddress, class RhsAddress>
-bool
-joinTensorAddresses(AddressBuilder &combined,
- const DimensionsVector &commonDimensions,
- const LhsAddress &lhs,
- const RhsAddress &rhs)
-{
- TensorAddressElementIterator<LhsAddress> lhsItr(lhs);
- TensorAddressElementIterator<RhsAddress> rhsItr(rhs);
- DimensionsVectorIterator dimsItr(commonDimensions);
- combined.clear();
- while (lhsItr.valid()) {
- while (dimsItr.beforeDimension(lhsItr)) {
- rhsItr.addElements(combined, dimsItr);
- if (rhsItr.atDimension(dimsItr.dimension())) {
- // needed dimension missing from lhs
- return false;
- }
- dimsItr.next();
- }
- if (dimsItr.atDimension(lhsItr.dimension())) {
- rhsItr.addElements(combined, dimsItr);
- if (!rhsItr.atDimension(dimsItr.dimension())) {
- // needed dimension missing from rhs
- return false;
- }
- if (lhsItr.label() != rhsItr.label()) {
- // dimension exists in both rhs and lhs, but labels don't match
- return false;
- }
- // common dimension, labels match
- lhsItr.addElement(combined);
- lhsItr.next();
- rhsItr.next();
- dimsItr.next();
- continue;
- }
- rhsItr.addElements(combined, lhsItr);
- assert(lhsItr.beforeDimension(rhsItr));
- lhsItr.addElement(combined);
- lhsItr.next();
- }
- while (dimsItr.valid()) {
- rhsItr.addElements(combined, dimsItr);
- if (rhsItr.atDimension(dimsItr.dimension())) {
- // needed dimension missing from lhs
- return false;
- }
- dimsItr.next();
- }
- rhsItr.addElements(combined);
- // All matching
- return true;
-}
-
-/*
- * Combine two tensor addresses, but fail if dimension label doesn't match
- * for common dimensions. Use 3-way merge between two tensors and a vector
- * of dimensions. To be used when we have many common dimensions.
- * The commonDimensions parameter is the intersection of the
- * dimensions in the two input tensors.
- */
-template <class AddressBuilder, class LhsAddress, class RhsAddress>
-bool
-joinTensorAddresses(AddressBuilder &combined,
- const DimensionsSet &commonDimensions,
- const LhsAddress &lhs,
- const RhsAddress &rhs)
-{
- TensorAddressElementIterator<LhsAddress> lhsItr(lhs);
- TensorAddressElementIterator<RhsAddress> rhsItr(rhs);
- combined.clear();
- if (lhsItr.valid() && rhsItr.valid()) {
- for (;;) {
- if (lhsItr.beforeDimension(rhsItr)) {
- if (!lhsItr.addElements(combined, commonDimensions, rhsItr)) {
- return false;
- }
- if (!lhsItr.valid()) {
- break;
- }
- }
- if (lhsItr.dimension() == rhsItr.dimension()) {
- if (lhsItr.label() != rhsItr.label()) {
- return false;
- }
- lhsItr.addElement(combined);
- lhsItr.next();
- rhsItr.next();
- if (!lhsItr.valid() || !rhsItr.valid()) {
- break;
- }
- continue;
- }
- if (!rhsItr.addElements(combined, commonDimensions, lhsItr)) {
- return false;
- }
- if (!rhsItr.valid()) {
- break;
- }
- }
- }
- if (!lhsItr.addElements(combined, commonDimensions)) {
- return false;
- }
- if (!rhsItr.addElements(combined, commonDimensions)) {
- return false;
- }
- // All matching
- return true;
-}
-
-} // namespace vespalib::tensor
-} // namespace vespalib
diff --git a/vespalib/src/vespa/vespalib/tensor/sparse/CMakeLists.txt b/vespalib/src/vespa/vespalib/tensor/sparse/CMakeLists.txt
index aa2cc7869e5..7d8725ad610 100644
--- a/vespalib/src/vespa/vespalib/tensor/sparse/CMakeLists.txt
+++ b/vespalib/src/vespa/vespalib/tensor/sparse/CMakeLists.txt
@@ -2,12 +2,10 @@
vespa_add_library(vespalib_vespalib_tensor_sparse OBJECT
SOURCES
sparse_tensor.cpp
- sparse_tensor_dimension_sum.cpp
+ sparse_tensor_address_combiner.cpp
+ sparse_tensor_address_reducer.cpp
sparse_tensor_match.cpp
- sparse_tensor_product.cpp
- compact_tensor_address.cpp
- compact_tensor_address_builder.cpp
sparse_tensor_builder.cpp
- compact_tensor_unsorted_address_builder.cpp
+ sparse_tensor_unsorted_address_builder.cpp
DEPENDS
)
diff --git a/vespalib/src/vespa/vespalib/tensor/sparse/compact_tensor_address.cpp b/vespalib/src/vespa/vespalib/tensor/sparse/compact_tensor_address.cpp
deleted file mode 100644
index 97415e81e29..00000000000
--- a/vespalib/src/vespa/vespalib/tensor/sparse/compact_tensor_address.cpp
+++ /dev/null
@@ -1,132 +0,0 @@
-// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-
-#include <vespa/fastos/fastos.h>
-#include "compact_tensor_address.h"
-#include "sparse_tensor_address_decoder.h"
-#include <algorithm>
-
-namespace vespalib {
-namespace tensor {
-
-namespace
-{
-
-void
-setupElements(CompactTensorAddress::Elements &elements,
- CompactTensorAddressRef ref)
-{
- const char *cur = static_cast<const char *>(ref.start());
- const char *end = cur + ref.size();
- while (cur != end) {
- const char *dim = cur;
- while (*cur) {
- ++cur;
- }
- ++cur;
- const char *label = cur;
- while (*cur) {
- ++cur;
- }
- ++cur;
- elements.emplace_back(vespalib::stringref(dim, label - 1 - dim),
- vespalib::stringref(label, cur - 1 - label));
- }
-}
-
-
-}
-
-
-
-CompactTensorAddress::CompactTensorAddress()
- : _elements()
-{
-}
-
-CompactTensorAddress::CompactTensorAddress(const Elements &elements_in)
- : _elements(elements_in)
-{
-}
-
-bool
-CompactTensorAddress::hasDimension(const vespalib::string &dimension) const
-{
- for (const auto &elem : _elements) {
- if (elem.dimension() == dimension) {
- return true;
- }
- }
- return false;
-}
-
-bool
-CompactTensorAddress::operator<(const CompactTensorAddress &rhs) const
-{
- size_t minSize = std::min(_elements.size(), rhs._elements.size());
- for (size_t i = 0; i < minSize; ++i) {
- if (_elements[i] != rhs._elements[i]) {
- return _elements[i] < rhs._elements[i];
- }
- }
- return _elements.size() < rhs._elements.size();
-}
-
-bool
-CompactTensorAddress::operator==(const CompactTensorAddress &rhs) const
-{
- return _elements == rhs._elements;
-}
-
-
-void
-CompactTensorAddress::deserializeFromSparseAddressRef(CompactTensorAddressRef
- ref)
-{
- _elements.clear();
- setupElements(_elements, ref);
-}
-
-
-void
-CompactTensorAddress::deserializeFromAddressRefV2(CompactTensorAddressRef ref,
- const TensorDimensions &
- dimensions)
-{
- _elements.clear();
- SparseTensorAddressDecoder addr(ref);
- for (auto &dim : dimensions) {
- auto label = addr.decodeLabel();
- if (label.size() != 0u) {
- _elements.emplace_back(dim, label);
- }
- }
- assert(!addr.valid());
-}
-
-
-
-std::ostream &
-operator<<(std::ostream &out, const CompactTensorAddress::Elements &elements)
-{
- out << "{";
- bool first = true;
- for (const auto &elem : elements) {
- if (!first) {
- out << ",";
- }
- out << elem.dimension() << ":" << elem.label();
- first = false;
- }
- out << "}";
- return out;
-}
-
-std::ostream &
-operator<<(std::ostream &out, const CompactTensorAddress &value)
-{
- out << value.elements();
- return out;
-}
-
-} // namespace vespalib::tensor
-} // namespace vespalib
diff --git a/vespalib/src/vespa/vespalib/tensor/sparse/compact_tensor_address.h b/vespalib/src/vespa/vespalib/tensor/sparse/compact_tensor_address.h
deleted file mode 100644
index 509c267323c..00000000000
--- a/vespalib/src/vespa/vespalib/tensor/sparse/compact_tensor_address.h
+++ /dev/null
@@ -1,76 +0,0 @@
-// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-
-#pragma once
-
-#include <vespa/vespalib/stllike/string.h>
-#include <iostream>
-#include <vector>
-#include "compact_tensor_address_ref.h"
-#include <vespa/vespalib/tensor/types.h>
-
-namespace vespalib {
-namespace tensor {
-
-/**
- * A compact sparse immutable address to a tensor cell.
- *
- * Only dimensions which have a different label than "undefined" are
- * explicitly included.
- *
- * Tensor addresses are ordered by the natural order of the elements
- * in sorted order.
- */
-class CompactTensorAddress
-{
-public:
- class Element
- {
- private:
- vespalib::stringref _dimension;
- vespalib::stringref _label;
-
- public:
- Element(vespalib::stringref dimension_in,
- vespalib::stringref label_in)
- : _dimension(dimension_in), _label(label_in)
- {}
- vespalib::stringref dimension() const { return _dimension; }
- vespalib::stringref label() const { return _label; }
- bool operator<(const Element &rhs) const {
- if (_dimension == rhs._dimension) {
- // Define sort order when dimension is the same to be able
- // to do set operations over element vectors.
- return _label < rhs._label;
- }
- return _dimension < rhs._dimension;
- }
- bool operator==(const Element &rhs) const {
- return (_dimension == rhs._dimension) && (_label == rhs._label);
- }
- bool operator!=(const Element &rhs) const {
- return !(*this == rhs);
- }
- };
-
- typedef std::vector<Element> Elements;
-
-private:
- Elements _elements;
-
-public:
- CompactTensorAddress();
- explicit CompactTensorAddress(const Elements &elements_in);
- const Elements &elements() const { return _elements; }
- bool hasDimension(const vespalib::string &dimension) const;
- bool operator<(const CompactTensorAddress &rhs) const;
- bool operator==(const CompactTensorAddress &rhs) const;
- void deserializeFromSparseAddressRef(CompactTensorAddressRef ref);
- void deserializeFromAddressRefV2(CompactTensorAddressRef ref,
- const TensorDimensions &dimensions);
-};
-
-std::ostream &operator<<(std::ostream &out, const CompactTensorAddress::Elements &elements);
-std::ostream &operator<<(std::ostream &out, const CompactTensorAddress &value);
-
-} // namespace vespalib::tensor
-} // namespace vespalib
diff --git a/vespalib/src/vespa/vespalib/tensor/sparse/compact_tensor_address_builder.cpp b/vespalib/src/vespa/vespalib/tensor/sparse/compact_tensor_address_builder.cpp
deleted file mode 100644
index 03f2ec0fd15..00000000000
--- a/vespalib/src/vespa/vespalib/tensor/sparse/compact_tensor_address_builder.cpp
+++ /dev/null
@@ -1,38 +0,0 @@
-// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-
-#include <vespa/fastos/fastos.h>
-#include "compact_tensor_address_builder.h"
-#include <algorithm>
-
-namespace vespalib {
-namespace tensor {
-
-namespace
-{
-
-void
-append(std::vector<char> &address, vespalib::stringref str)
-{
- const char *cstr = str.c_str();
- address.insert(address.end(), cstr, cstr + str.size() + 1);
-}
-
-}
-
-CompactTensorAddressBuilder::CompactTensorAddressBuilder()
- : _address()
-{
-}
-
-
-void
-CompactTensorAddressBuilder::add(vespalib::stringref dimension,
- vespalib::stringref label)
-{
- append(_address, dimension);
- append(_address, label);
-}
-
-
-} // namespace vespalib::tensor
-} // namespace vespalib
diff --git a/vespalib/src/vespa/vespalib/tensor/sparse/compact_tensor_address_builder.h b/vespalib/src/vespa/vespalib/tensor/sparse/compact_tensor_address_builder.h
deleted file mode 100644
index 2981352eef5..00000000000
--- a/vespalib/src/vespa/vespalib/tensor/sparse/compact_tensor_address_builder.h
+++ /dev/null
@@ -1,36 +0,0 @@
-// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-
-#pragma once
-
-#include <vespa/vespalib/stllike/string.h>
-#include <vector>
-#include "compact_tensor_address_ref.h"
-
-namespace vespalib {
-namespace tensor {
-
-
-class CompactTensorAddress;
-
-/**
- * A writer to serialize tensor addresses into a compact representation.
- *
- * Format: (dimStr NUL labelStr NUL)*
- */
-class CompactTensorAddressBuilder
-{
-private:
- std::vector<char> _address;
-public:
- CompactTensorAddressBuilder();
- void add(vespalib::stringref dimension, vespalib::stringref label);
- void clear() { _address.clear(); }
- CompactTensorAddressRef getAddressRef() const {
- return CompactTensorAddressRef(&_address[0], _address.size());
- }
- bool empty() const { return _address.empty(); }
-};
-
-
-} // namespace vespalib::tensor
-} // namespace vespalib
diff --git a/vespalib/src/vespa/vespalib/tensor/sparse/direct_sparse_tensor_builder.h b/vespalib/src/vespa/vespalib/tensor/sparse/direct_sparse_tensor_builder.h
index 5e51a750fc2..1d5b4b550a4 100644
--- a/vespalib/src/vespa/vespalib/tensor/sparse/direct_sparse_tensor_builder.h
+++ b/vespalib/src/vespa/vespalib/tensor/sparse/direct_sparse_tensor_builder.h
@@ -21,8 +21,7 @@ public:
using Dimensions = typename TensorImplType::Dimensions;
using Cells = typename TensorImplType::Cells;
using AddressBuilderType = SparseTensorAddressBuilder;
- using AddressRefType = CompactTensorAddressRef;
- using AddressType = CompactTensorAddress;
+ using AddressRefType = SparseTensorAddressRef;
private:
Stash _stash;
@@ -34,8 +33,8 @@ public:
copyCells(const Cells &cells_in)
{
for (const auto &cell : cells_in) {
- CompactTensorAddressRef oldRef = cell.first;
- CompactTensorAddressRef newRef(oldRef, _stash);
+ SparseTensorAddressRef oldRef = cell.first;
+ SparseTensorAddressRef newRef(oldRef, _stash);
_cells[newRef] = cell.second;
}
}
@@ -47,8 +46,8 @@ public:
cells_in_dimensions);
for (const auto &cell : cells_in) {
addressPadder.padAddress(cell.first);
- CompactTensorAddressRef oldRef = addressPadder.getAddressRef();
- CompactTensorAddressRef newRef(oldRef, _stash);
+ SparseTensorAddressRef oldRef = addressPadder.getAddressRef();
+ SparseTensorAddressRef newRef(oldRef, _stash);
_cells[newRef] = cell.second;
}
}
@@ -97,20 +96,20 @@ public:
}
template <class Function>
- void insertCell(CompactTensorAddressRef address, double value,
+ void insertCell(SparseTensorAddressRef address, double value,
Function &&func)
{
- CompactTensorAddressRef oldRef(address);
+ SparseTensorAddressRef oldRef(address);
auto res = _cells.insert(std::make_pair(oldRef, value));
if (res.second) {
// Replace key with own copy
- res.first->first = CompactTensorAddressRef(oldRef, _stash);
+ res.first->first = SparseTensorAddressRef(oldRef, _stash);
} else {
res.first->second = func(res.first->second, value);
}
}
- void insertCell(CompactTensorAddressRef address, double value) {
+ void insertCell(SparseTensorAddressRef address, double value) {
// This address should not already exist and a new cell should be inserted.
insertCell(address, value, [](double, double) -> double { abort(); });
}
diff --git a/vespalib/src/vespa/vespalib/tensor/sparse/join_sparse_tensors.h b/vespalib/src/vespa/vespalib/tensor/sparse/join_sparse_tensors.h
deleted file mode 100644
index e17f1812533..00000000000
--- a/vespalib/src/vespa/vespalib/tensor/sparse/join_sparse_tensors.h
+++ /dev/null
@@ -1,62 +0,0 @@
-// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-
-#pragma once
-
-namespace vespalib {
-namespace tensor {
-
-/*
- * Join the cells of two tensors.
- * The given function is used to calculate the resulting cell value for overlapping cells.
- */
-template <typename Function>
-Tensor::UP
-joinSparseTensors(const SparseTensor &lhs, const SparseTensor &rhs,
- Function &&func)
-{
- DirectTensorBuilder<SparseTensor> builder(lhs.combineDimensionsWith(rhs),
- lhs.cells(), lhs.dimensions());
- if (builder.dimensions().size() == rhs.dimensions().size()) {
- for (const auto &rhsCell : rhs.cells()) {
- builder.insertCell(rhsCell.first, rhsCell.second, func);
- }
- } else {
- SparseTensorAddressPadder addressPadder(builder.dimensions(),
- rhs.dimensions());
- for (const auto &rhsCell : rhs.cells()) {
- addressPadder.padAddress(rhsCell.first);
- builder.insertCell(addressPadder, rhsCell.second, func);
- }
- }
- return builder.build();
-}
-
-/*
- * Join the cells of two tensors, where the rhs values are treated as negated values.
- * The given function is used to calculate the resulting cell value for overlapping cells.
- */
-template <typename Function>
-Tensor::UP
-joinSparseTensorsNegated(const SparseTensor &lhs,
- const SparseTensor &rhs,
- Function &&func)
-{
- DirectTensorBuilder<SparseTensor> builder(lhs.combineDimensionsWith(rhs),
- lhs.cells(), lhs.dimensions());
- if (builder.dimensions().size() == rhs.dimensions().size()) {
- for (const auto &rhsCell : rhs.cells()) {
- builder.insertCell(rhsCell.first, -rhsCell.second, func);
- }
- } else {
- SparseTensorAddressPadder addressPadder(builder.dimensions(),
- rhs.dimensions());
- for (const auto &rhsCell : rhs.cells()) {
- addressPadder.padAddress(rhsCell.first);
- builder.insertCell(addressPadder, -rhsCell.second, func);
- }
- }
- return builder.build();
-}
-
-} // namespace vespalib::tensor
-} // namespace vespalib
diff --git a/vespalib/src/vespa/vespalib/tensor/sparse/sparse_tensor.cpp b/vespalib/src/vespa/vespalib/tensor/sparse/sparse_tensor.cpp
index 7f833bf0bce..5e7ec5b1db3 100644
--- a/vespalib/src/vespa/vespalib/tensor/sparse/sparse_tensor.cpp
+++ b/vespalib/src/vespa/vespalib/tensor/sparse/sparse_tensor.cpp
@@ -3,15 +3,16 @@
#include <vespa/fastos/fastos.h>
#include "sparse_tensor.h"
#include "sparse_tensor_address_builder.h"
-#include "sparse_tensor_dimension_sum.h"
#include "sparse_tensor_match.h"
-#include "sparse_tensor_product.h"
-#include "join_sparse_tensors.h"
+#include "sparse_tensor_apply.hpp"
+#include "sparse_tensor_reduce.hpp"
#include <vespa/vespalib/tensor/tensor_address_builder.h>
#include <vespa/vespalib/tensor/tensor_apply.h>
#include <vespa/vespalib/tensor/tensor_visitor.h>
+#include <vespa/vespalib/eval/operation.h>
#include <sstream>
+using vespalib::eval::TensorSpec;
namespace vespalib {
namespace tensor {
@@ -24,12 +25,33 @@ void
copyCells(Cells &cells, const Cells &cells_in, Stash &stash)
{
for (const auto &cell : cells_in) {
- CompactTensorAddressRef oldRef = cell.first;
- CompactTensorAddressRef newRef(oldRef, stash);
+ SparseTensorAddressRef oldRef = cell.first;
+ SparseTensorAddressRef newRef(oldRef, stash);
cells[newRef] = cell.second;
}
}
+void
+printAddress(std::ostream &out, const SparseTensorAddressRef &ref,
+ const TensorDimensions &dimensions)
+{
+ out << "{";
+ bool first = true;
+ SparseTensorAddressDecoder addr(ref);
+ for (auto &dim : dimensions) {
+ auto label = addr.decodeLabel();
+ if (label.size() != 0u) {
+ if (!first) {
+ out << ",";
+ }
+ out << dim << ":" << label;
+ first = false;
+ }
+ }
+ assert(!addr.valid());
+ out << "}";
+}
+
}
SparseTensor::SparseTensor(const Dimensions &dimensions_in,
@@ -96,8 +118,8 @@ SparseTensor::add(const Tensor &arg) const
if (!rhs) {
return Tensor::UP();
}
- return joinSparseTensors(*this, *rhs,
- [](double lhsValue, double rhsValue) { return lhsValue + rhsValue; });
+ return sparse::apply(*this, *rhs, [](double lhsValue, double rhsValue)
+ { return lhsValue + rhsValue; });
}
Tensor::UP
@@ -107,9 +129,8 @@ SparseTensor::subtract(const Tensor &arg) const
if (!rhs) {
return Tensor::UP();
}
- // Note that -rhsCell.second is passed to the lambda function, that is why we do addition.
- return joinSparseTensorsNegated(*this, *rhs,
- [](double lhsValue, double rhsValue) { return lhsValue + rhsValue; });
+ return sparse::apply(*this, *rhs, [](double lhsValue, double rhsValue)
+ { return lhsValue - rhsValue; });
}
Tensor::UP
@@ -119,7 +140,8 @@ SparseTensor::multiply(const Tensor &arg) const
if (!rhs) {
return Tensor::UP();
}
- return SparseTensorProduct(*this, *rhs).result();
+ return sparse::apply(*this, *rhs, [](double lhsValue, double rhsValue)
+ { return lhsValue * rhsValue; });
}
Tensor::UP
@@ -129,8 +151,8 @@ SparseTensor::min(const Tensor &arg) const
if (!rhs) {
return Tensor::UP();
}
- return joinSparseTensors(*this, *rhs,
- [](double lhsValue, double rhsValue) { return std::min(lhsValue, rhsValue); });
+ return sparse::apply(*this, *rhs, [](double lhsValue, double rhsValue)
+ { return std::min(lhsValue, rhsValue); });
}
Tensor::UP
@@ -140,8 +162,8 @@ SparseTensor::max(const Tensor &arg) const
if (!rhs) {
return Tensor::UP();
}
- return joinSparseTensors(*this, *rhs,
- [](double lhsValue, double rhsValue) { return std::max(lhsValue, rhsValue); });
+ return sparse::apply(*this, *rhs, [](double lhsValue, double rhsValue)
+ { return std::max(lhsValue, rhsValue); });
}
Tensor::UP
@@ -163,7 +185,9 @@ SparseTensor::apply(const CellFunction &func) const
Tensor::UP
SparseTensor::sum(const vespalib::string &dimension) const
{
- return SparseTensorDimensionSum(*this, dimension).result();
+ return sparse::reduce(*this, { dimension },
+ [](double lhsValue, double rhsValue)
+ { return lhsValue + rhsValue; });
}
bool
@@ -190,18 +214,52 @@ SparseTensor::clone() const
return std::make_unique<SparseTensor>(_dimensions, _cells);
}
+namespace {
+
+void
+buildAddress(const SparseTensor::Dimensions &dimensions,
+ SparseTensorAddressDecoder &decoder,
+ TensorSpec::Address &address)
+{
+ for (const auto &dimension : dimensions) {
+ auto label = decoder.decodeLabel();
+ if (!label.empty()) {
+ address.emplace(std::make_pair(dimension, TensorSpec::Label(label)));
+ }
+ }
+ assert(!decoder.valid());
+}
+
+}
+
+TensorSpec
+SparseTensor::toSpec() const
+{
+ TensorSpec result(getType().to_spec());
+ TensorSpec::Address address;
+ for (const auto &cell : _cells) {
+ SparseTensorAddressDecoder decoder(cell.first);
+ buildAddress(_dimensions, decoder, address);
+ result.add(address, cell.second);
+ address.clear();
+ }
+ if (_dimensions.empty() && _cells.empty()) {
+ result.add(address, 0.0);
+ }
+ return result;
+}
+
void
SparseTensor::print(std::ostream &out) const
{
out << "{ ";
bool first = true;
- CompactTensorAddress addr;
for (const auto &cell : cells()) {
if (!first) {
out << ", ";
}
- addr.deserializeFromAddressRefV2(cell.first, _dimensions);
- out << addr << ":" << cell.second;
+ printAddress(out, cell.first, _dimensions);
+ out << ":" << cell.second;
first = false;
}
out << " }";
@@ -227,5 +285,27 @@ SparseTensor::accept(TensorVisitor &visitor) const
}
}
+Tensor::UP
+SparseTensor::apply(const eval::BinaryOperation &op, const Tensor &arg) const
+{
+ const SparseTensor *rhs = dynamic_cast<const SparseTensor *>(&arg);
+ if (!rhs) {
+ return Tensor::UP();
+ }
+ return sparse::apply(*this, *rhs,
+ [&op](double lhsValue, double rhsValue)
+ { return op.eval(lhsValue, rhsValue); });
+}
+
+Tensor::UP
+SparseTensor::reduce(const eval::BinaryOperation &op,
+ const std::vector<vespalib::string> &dimensions) const
+{
+ return sparse::reduce(*this,
+ (dimensions.empty() ? _dimensions : dimensions),
+ [&op](double lhsValue, double rhsValue)
+ { return op.eval(lhsValue, rhsValue); });
+}
+
} // namespace vespalib::tensor
} // namespace vespalib
diff --git a/vespalib/src/vespa/vespalib/tensor/sparse/sparse_tensor.h b/vespalib/src/vespa/vespalib/tensor/sparse/sparse_tensor.h
index 8427e51ffd1..d788a55885e 100644
--- a/vespalib/src/vespa/vespalib/tensor/sparse/sparse_tensor.h
+++ b/vespalib/src/vespa/vespalib/tensor/sparse/sparse_tensor.h
@@ -5,7 +5,7 @@
#include <vespa/vespalib/tensor/cell_function.h>
#include <vespa/vespalib/tensor/tensor.h>
#include <vespa/vespalib/tensor/tensor_address.h>
-#include "compact_tensor_address.h"
+#include "sparse_tensor_address_ref.h"
#include <vespa/vespalib/tensor/types.h>
#include <vespa/vespalib/stllike/hash_map.h>
#include <vespa/vespalib/stllike/string.h>
@@ -22,7 +22,7 @@ namespace tensor {
class SparseTensor : public Tensor
{
public:
- typedef vespalib::hash_map<CompactTensorAddressRef, double> Cells;
+ typedef vespalib::hash_map<SparseTensorAddressRef, double> Cells;
typedef TensorDimensions Dimensions;
static constexpr size_t STASH_CHUNK_SIZE = 16384u;
@@ -52,10 +52,16 @@ public:
virtual Tensor::UP match(const Tensor &arg) const override;
virtual Tensor::UP apply(const CellFunction &func) const override;
virtual Tensor::UP sum(const vespalib::string &dimension) const override;
+ virtual Tensor::UP apply(const eval::BinaryOperation &op,
+ const Tensor &arg) const override;
+ virtual Tensor::UP reduce(const eval::BinaryOperation &op,
+ const std::vector<vespalib::string> &dimensions)
+ const override;
virtual bool equals(const Tensor &arg) const override;
virtual void print(std::ostream &out) const override;
virtual vespalib::string toString() const override;
virtual Tensor::UP clone() const override;
+ virtual eval::TensorSpec toSpec() const override;
virtual void accept(TensorVisitor &visitor) const override;
};
diff --git a/vespalib/src/vespa/vespalib/tensor/sparse/sparse_tensor_address_builder.h b/vespalib/src/vespa/vespalib/tensor/sparse/sparse_tensor_address_builder.h
index 239b405fe4c..c1678d89018 100644
--- a/vespalib/src/vespa/vespalib/tensor/sparse/sparse_tensor_address_builder.h
+++ b/vespalib/src/vespa/vespalib/tensor/sparse/sparse_tensor_address_builder.h
@@ -4,14 +4,12 @@
#include <vespa/vespalib/stllike/string.h>
#include <vector>
-#include "compact_tensor_address_ref.h"
+#include "sparse_tensor_address_ref.h"
namespace vespalib {
namespace tensor {
-class CompactTensorAddress;
-
/**
* A writer to serialize tensor addresses into a compact representation.
* All dimensions in the tensors are present, empty label is the "undefined"
@@ -38,8 +36,8 @@ public:
void add(vespalib::stringref label) { append(label); }
void addUndefined() { _address.emplace_back('\0'); }
void clear() { _address.clear(); }
- CompactTensorAddressRef getAddressRef() const {
- return CompactTensorAddressRef(&_address[0], _address.size());
+ SparseTensorAddressRef getAddressRef() const {
+ return SparseTensorAddressRef(&_address[0], _address.size());
}
bool empty() const { return _address.empty(); }
};
diff --git a/vespalib/src/vespa/vespalib/tensor/sparse/sparse_tensor_address_combiner.cpp b/vespalib/src/vespa/vespalib/tensor/sparse/sparse_tensor_address_combiner.cpp
new file mode 100644
index 00000000000..53cf90e2db0
--- /dev/null
+++ b/vespalib/src/vespa/vespalib/tensor/sparse/sparse_tensor_address_combiner.cpp
@@ -0,0 +1,69 @@
+// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+#include <vespa/fastos/fastos.h>
+#include "sparse_tensor_address_combiner.h"
+#include "sparse_tensor_address_decoder.h"
+
+namespace vespalib {
+namespace tensor {
+namespace sparse {
+
+TensorAddressCombiner::TensorAddressCombiner(const TensorDimensions &lhs,
+ const TensorDimensions &rhs)
+{
+ auto rhsItr = rhs.cbegin();
+ auto rhsItrEnd = rhs.cend();
+ for (auto &lhsDim : lhs) {
+ while (rhsItr != rhsItrEnd && *rhsItr < lhsDim) {
+ _ops.push_back(AddressOp::RHS);
+ ++rhsItr;
+ }
+ if (rhsItr != rhsItrEnd && *rhsItr == lhsDim) {
+ _ops.push_back(AddressOp::BOTH);
+ ++rhsItr;
+ } else {
+ _ops.push_back(AddressOp::LHS);
+ }
+ }
+ while (rhsItr != rhsItrEnd) {
+ _ops.push_back(AddressOp::RHS);
+ ++rhsItr;
+ }
+}
+
+TensorAddressCombiner::~TensorAddressCombiner()
+{
+}
+
+bool
+TensorAddressCombiner::combine(SparseTensorAddressRef lhsRef,
+ SparseTensorAddressRef rhsRef)
+{
+ clear();
+ SparseTensorAddressDecoder lhs(lhsRef);
+ SparseTensorAddressDecoder rhs(rhsRef);
+ for (auto op : _ops) {
+ switch (op) {
+ case AddressOp::LHS:
+ add(lhs.decodeLabel());
+ break;
+ case AddressOp::RHS:
+ add(rhs.decodeLabel());
+ break;
+ case AddressOp::BOTH:
+ auto lhsLabel(lhs.decodeLabel());
+ auto rhsLabel(rhs.decodeLabel());
+ if (lhsLabel != rhsLabel) {
+ return false;
+ }
+ add(lhsLabel);
+ }
+ }
+ assert(!lhs.valid());
+ assert(!rhs.valid());
+ return true;
+}
+
+} // namespace vespalib::tensor::sparse
+} // namespace vespalib::tensor
+} // namespace vespalib
diff --git a/vespalib/src/vespa/vespalib/tensor/sparse/sparse_tensor_address_combiner.h b/vespalib/src/vespa/vespalib/tensor/sparse/sparse_tensor_address_combiner.h
new file mode 100644
index 00000000000..72717396a02
--- /dev/null
+++ b/vespalib/src/vespa/vespalib/tensor/sparse/sparse_tensor_address_combiner.h
@@ -0,0 +1,39 @@
+// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+#pragma once
+
+#include "sparse_tensor_address_builder.h"
+#include <vespa/vespalib/tensor/types.h>
+
+namespace vespalib {
+namespace tensor {
+namespace sparse {
+
+/**
+ * Combine two tensor addresses to a new tensor address. Common dimensions
+ * must have matching labels.
+ */
+class TensorAddressCombiner : public SparseTensorAddressBuilder
+{
+ enum class AddressOp
+ {
+ LHS,
+ RHS,
+ BOTH
+ };
+
+ std::vector<AddressOp> _ops;
+
+public:
+ TensorAddressCombiner(const TensorDimensions &lhs,
+ const TensorDimensions &rhs);
+
+ ~TensorAddressCombiner();
+
+ bool combine(SparseTensorAddressRef lhsRef, SparseTensorAddressRef rhsRef);
+};
+
+
+} // namespace vespalib::tensor::sparse
+} // namespace vespalib::tensor
+} // namespace vespalib
diff --git a/vespalib/src/vespa/vespalib/tensor/sparse/sparse_tensor_address_decoder.h b/vespalib/src/vespa/vespalib/tensor/sparse/sparse_tensor_address_decoder.h
index bac864b53f3..94cb9373bc2 100644
--- a/vespalib/src/vespa/vespalib/tensor/sparse/sparse_tensor_address_decoder.h
+++ b/vespalib/src/vespa/vespalib/tensor/sparse/sparse_tensor_address_decoder.h
@@ -3,7 +3,7 @@
#pragma once
#include <vespa/vespalib/stllike/string.h>
-#include "compact_tensor_address_ref.h"
+#include "sparse_tensor_address_ref.h"
namespace vespalib {
@@ -18,7 +18,7 @@ class SparseTensorAddressDecoder
const char *_cur;
const char *_end;
public:
- SparseTensorAddressDecoder(CompactTensorAddressRef ref)
+ SparseTensorAddressDecoder(SparseTensorAddressRef ref)
: _cur(static_cast<const char *>(ref.start())),
_end(_cur + ref.size())
{
diff --git a/vespalib/src/vespa/vespalib/tensor/sparse/sparse_tensor_address_padder.h b/vespalib/src/vespa/vespalib/tensor/sparse/sparse_tensor_address_padder.h
index 5de4bd00404..5f0c95033b3 100644
--- a/vespalib/src/vespa/vespalib/tensor/sparse/sparse_tensor_address_padder.h
+++ b/vespalib/src/vespa/vespalib/tensor/sparse/sparse_tensor_address_padder.h
@@ -47,7 +47,7 @@ public:
}
void
- padAddress(CompactTensorAddressRef ref)
+ padAddress(SparseTensorAddressRef ref)
{
clear();
SparseTensorAddressDecoder addr(ref);
diff --git a/vespalib/src/vespa/vespalib/tensor/sparse/sparse_tensor_address_reducer.cpp b/vespalib/src/vespa/vespalib/tensor/sparse/sparse_tensor_address_reducer.cpp
new file mode 100644
index 00000000000..2d3bbaef043
--- /dev/null
+++ b/vespalib/src/vespa/vespalib/tensor/sparse/sparse_tensor_address_reducer.cpp
@@ -0,0 +1,51 @@
+// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+#include <vespa/fastos/fastos.h>
+#include "sparse_tensor_address_reducer.h"
+
+namespace vespalib {
+namespace tensor {
+namespace sparse {
+
+TensorAddressReducer::TensorAddressReducer(const TensorDimensions &dims,
+ const std::vector<vespalib::string> &
+ removeDimensions)
+ : SparseTensorAddressBuilder(),
+ _ops()
+{
+ TensorDimensionsSet removeSet(removeDimensions.cbegin(),
+ removeDimensions.cend());
+ _ops.reserve(dims.size());
+ for (auto &dim : dims) {
+ if (removeSet.find(dim) != removeSet.end()) {
+ _ops.push_back(AddressOp::REMOVE);
+ } else {
+ _ops.push_back(AddressOp::COPY);
+ }
+ }
+}
+
+TensorDimensions
+TensorAddressReducer::remainingDimensions(const TensorDimensions &dimensions,
+ const std::vector<vespalib::string> &
+ removeDimensions)
+{
+ TensorDimensionsSet removeSet(removeDimensions.cbegin(),
+ removeDimensions.cend());
+ TensorDimensions result;
+ result.reserve(dimensions.size());
+ for (auto &dim : dimensions) {
+ if (removeSet.find(dim) == removeSet.end()) {
+ result.push_back(dim);
+ }
+ }
+ return std::move(result);
+}
+
+TensorAddressReducer::~TensorAddressReducer()
+{
+}
+
+} // namespace vespalib::tensor::sparse
+} // namespace vespalib::tensor
+} // namespace vespalib
diff --git a/vespalib/src/vespa/vespalib/tensor/sparse/sparse_tensor_address_reducer.h b/vespalib/src/vespa/vespalib/tensor/sparse/sparse_tensor_address_reducer.h
new file mode 100644
index 00000000000..775607ca059
--- /dev/null
+++ b/vespalib/src/vespa/vespalib/tensor/sparse/sparse_tensor_address_reducer.h
@@ -0,0 +1,58 @@
+// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+#pragma once
+
+#include "sparse_tensor_address_builder.h"
+#include <vespa/vespalib/tensor/types.h>
+#include "sparse_tensor_address_decoder.h"
+
+namespace vespalib {
+namespace tensor {
+namespace sparse {
+
+/**
+ * Reduce sparse tensor address by removing one or more dimensions.
+ */
+class TensorAddressReducer : public SparseTensorAddressBuilder
+{
+ enum AddressOp
+ {
+ REMOVE,
+ COPY
+ };
+
+ using AddressOps = std::vector<AddressOp>;
+
+ AddressOps _ops;
+
+public:
+ TensorAddressReducer(const TensorDimensions &dims,
+ const std::vector<vespalib::string> &removeDimensions);
+
+ ~TensorAddressReducer();
+
+ static TensorDimensions
+ remainingDimensions(const TensorDimensions &dimensions,
+ const std::vector<vespalib::string> &removeDimensions);
+
+ void reduce(SparseTensorAddressRef ref)
+ {
+ clear();
+ SparseTensorAddressDecoder decoder(ref);
+ for (auto op : _ops) {
+ switch (op) {
+ case AddressOp::REMOVE:
+ decoder.skipLabel();
+ break;
+ case AddressOp::COPY:
+ add(decoder.decodeLabel());
+ }
+ }
+ assert(!decoder.valid());
+ }
+};
+
+
+} // namespace vespalib::tensor::sparse
+} // namespace vespalib::tensor
+} // namespace vespalib
diff --git a/vespalib/src/vespa/vespalib/tensor/sparse/compact_tensor_address_ref.h b/vespalib/src/vespa/vespalib/tensor/sparse/sparse_tensor_address_ref.h
index fa49e2fd39c..4358ce501a2 100644
--- a/vespalib/src/vespa/vespalib/tensor/sparse/compact_tensor_address_ref.h
+++ b/vespalib/src/vespa/vespalib/tensor/sparse/sparse_tensor_address_ref.h
@@ -16,24 +16,24 @@ namespace tensor {
/**
* A reference to a compact sparse immutable address to a tensor cell.
*/
-class CompactTensorAddressRef
+class SparseTensorAddressRef
{
const void *_start;
size_t _size;
size_t _hash;
public:
- CompactTensorAddressRef()
+ SparseTensorAddressRef()
: _start(nullptr), _size(0u), _hash(0u)
{
}
- CompactTensorAddressRef(const void *start_in, size_t size_in)
+ SparseTensorAddressRef(const void *start_in, size_t size_in)
: _start(start_in), _size(size_in),
_hash(calcHash())
{
}
- CompactTensorAddressRef(const CompactTensorAddressRef rhs, Stash &stash)
+ SparseTensorAddressRef(const SparseTensorAddressRef rhs, Stash &stash)
: _start(nullptr),
_size(rhs._size),
_hash(rhs._hash)
@@ -47,7 +47,7 @@ public:
size_t calcHash() const { return hashValue(_start, _size); }
- bool operator<(const CompactTensorAddressRef &rhs) const {
+ bool operator<(const SparseTensorAddressRef &rhs) const {
size_t minSize = std::min(_size, rhs._size);
int res = memcmp(_start, rhs._start, minSize);
if (res != 0) {
@@ -56,7 +56,7 @@ public:
return _size < rhs._size;
}
- bool operator==(const CompactTensorAddressRef &rhs) const
+ bool operator==(const SparseTensorAddressRef &rhs) const
{
if (_size != rhs._size || _hash != rhs._hash) {
return false;
diff --git a/vespalib/src/vespa/vespalib/tensor/sparse/sparse_tensor_apply.h b/vespalib/src/vespa/vespalib/tensor/sparse/sparse_tensor_apply.h
new file mode 100644
index 00000000000..e0a8b2cee5b
--- /dev/null
+++ b/vespalib/src/vespa/vespalib/tensor/sparse/sparse_tensor_apply.h
@@ -0,0 +1,23 @@
+// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+#pragma once
+
+namespace vespalib {
+namespace tensor {
+class Tensor;
+class SparseTensor;
+namespace sparse {
+
+/**
+ * Create new tensor using all combinations of input tensor cells with matching
+ * labels for common dimensions, using func to calculate new cell value
+ * based on the cell values in the input tensors.
+ */
+template <typename Function>
+std::unique_ptr<Tensor>
+apply(const SparseTensor &lhs, const SparseTensor &rhs, Function &&func);
+
+
+} // namespace vespalib::tensor::sparse
+} // namespace vespalib::tensor
+} // namespace vespalib
diff --git a/vespalib/src/vespa/vespalib/tensor/sparse/sparse_tensor_apply.hpp b/vespalib/src/vespa/vespalib/tensor/sparse/sparse_tensor_apply.hpp
new file mode 100644
index 00000000000..6c055d8547b
--- /dev/null
+++ b/vespalib/src/vespa/vespalib/tensor/sparse/sparse_tensor_apply.hpp
@@ -0,0 +1,35 @@
+// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+#pragma once
+
+#include "sparse_tensor_apply.h"
+#include "sparse_tensor_address_combiner.h"
+#include <vespa/vespalib/tensor/direct_tensor_builder.h>
+#include "direct_sparse_tensor_builder.h"
+
+namespace vespalib {
+namespace tensor {
+namespace sparse {
+
+template <typename Function>
+std::unique_ptr<Tensor>
+apply(const SparseTensor &lhs, const SparseTensor &rhs, Function &&func)
+{
+ DirectTensorBuilder<SparseTensor> builder(lhs.combineDimensionsWith(rhs));
+ TensorAddressCombiner addressCombiner(lhs.dimensions(), rhs.dimensions());
+ for (const auto &lhsCell : lhs.cells()) {
+ for (const auto &rhsCell : rhs.cells()) {
+ bool combineSuccess = addressCombiner.combine(lhsCell.first,
+ rhsCell.first);
+ if (combineSuccess) {
+ builder.insertCell(addressCombiner.getAddressRef(),
+ func(lhsCell.second, rhsCell.second));
+ }
+ }
+ }
+ return builder.build();
+}
+
+} // namespace vespalib::tensor::sparse
+} // namespace vespalib::tensor
+} // namespace vespalib
diff --git a/vespalib/src/vespa/vespalib/tensor/sparse/sparse_tensor_builder.cpp b/vespalib/src/vespa/vespalib/tensor/sparse/sparse_tensor_builder.cpp
index a16774707b4..bb00d9b2e19 100644
--- a/vespalib/src/vespa/vespalib/tensor/sparse/sparse_tensor_builder.cpp
+++ b/vespalib/src/vespa/vespalib/tensor/sparse/sparse_tensor_builder.cpp
@@ -65,9 +65,9 @@ SparseTensorBuilder::add_cell(double value)
makeSortedDimensions();
}
_addressBuilder.buildTo(_normalizedAddressBuilder, _sortedDimensions);
- CompactTensorAddressRef taddress(_normalizedAddressBuilder.getAddressRef());
+ SparseTensorAddressRef taddress(_normalizedAddressBuilder.getAddressRef());
// Make a persistent copy of sparse tensor address owned by _stash
- CompactTensorAddressRef address(taddress, _stash);
+ SparseTensorAddressRef address(taddress, _stash);
_cells[address] = value;
_addressBuilder.clear();
_normalizedAddressBuilder.clear();
diff --git a/vespalib/src/vespa/vespalib/tensor/sparse/sparse_tensor_builder.h b/vespalib/src/vespa/vespalib/tensor/sparse/sparse_tensor_builder.h
index c7a7e8a5a9e..be0791a59c1 100644
--- a/vespalib/src/vespa/vespalib/tensor/sparse/sparse_tensor_builder.h
+++ b/vespalib/src/vespa/vespalib/tensor/sparse/sparse_tensor_builder.h
@@ -4,7 +4,7 @@
#include "sparse_tensor.h"
#include "sparse_tensor_address_builder.h"
-#include "compact_tensor_unsorted_address_builder.h"
+#include "sparse_tensor_unsorted_address_builder.h"
#include <vespa/vespalib/tensor/tensor_builder.h>
#include <vespa/vespalib/tensor/tensor_address.h>
#include <vespa/vespalib/stllike/hash_map.h>
@@ -18,7 +18,7 @@ namespace tensor {
*/
class SparseTensorBuilder : public TensorBuilder
{
- CompactTensorUnsortedAddressBuilder _addressBuilder; // unsorted dimensions
+ SparseTensorUnsortedAddressBuilder _addressBuilder; // unsorted dimensions
SparseTensorAddressBuilder _normalizedAddressBuilder; // sorted dimensions
SparseTensor::Cells _cells;
Stash _stash;
diff --git a/vespalib/src/vespa/vespalib/tensor/sparse/sparse_tensor_dimension_sum.cpp b/vespalib/src/vespa/vespalib/tensor/sparse/sparse_tensor_dimension_sum.cpp
deleted file mode 100644
index 54c8d9b175a..00000000000
--- a/vespalib/src/vespa/vespalib/tensor/sparse/sparse_tensor_dimension_sum.cpp
+++ /dev/null
@@ -1,89 +0,0 @@
-// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-
-#include <vespa/fastos/fastos.h>
-#include "sparse_tensor_dimension_sum.h"
-#include "sparse_tensor_address_decoder.h"
-
-namespace vespalib {
-namespace tensor {
-
-namespace {
-
-enum class AddressOp
-{
- REMOVE,
- COPY
-};
-
-using ReduceOps = std::vector<AddressOp>;
-
-
-ReduceOps
-buildReduceOps(const TensorDimensions &dims,
- const vespalib::stringref &dimension)
-{
- ReduceOps ops;
- for (auto &dim : dims) {
- if (dim == dimension) {
- ops.push_back(AddressOp::REMOVE);
- } else {
- ops.push_back(AddressOp::COPY);
- }
- }
- return ops;
-}
-
-
-void
-reduceAddress(SparseTensorAddressBuilder &builder,
- CompactTensorAddressRef ref,
- const ReduceOps &ops)
-{
- builder.clear();
- SparseTensorAddressDecoder addr(ref);
- for (auto op : ops) {
- switch (op) {
- case AddressOp::REMOVE:
- addr.skipLabel();
- break;
- case AddressOp::COPY:
- builder.add(addr.decodeLabel());
- break;
- }
- }
- assert(!addr.valid());
-}
-
-TensorDimensions
-removeDimension(const TensorDimensions &dimensions,
- const vespalib::string &dimension)
-{
- TensorDimensions result = dimensions;
- auto itr = std::lower_bound(result.begin(), result.end(), dimension);
- if (itr != result.end() && *itr == dimension) {
- result.erase(itr);
- }
- return result;
-}
-
-}
-
-SparseTensorDimensionSum::SparseTensorDimensionSum(const TensorImplType &
- tensor,
- const
- vespalib::string &
- dimension)
- : Parent(removeDimension(tensor.dimensions(), dimension))
-{
- ReduceOps ops(buildReduceOps(tensor.dimensions(), dimension));
- AddressBuilderType reducedAddress;
- for (const auto &cell : tensor.cells()) {
- reduceAddress(reducedAddress, cell.first, ops);
- _builder.insertCell(reducedAddress, cell.second,
- [](double cellValue, double rhsValue) { return cellValue + rhsValue; });
- }
-}
-
-
-} // namespace vespalib::tensor
-} // namespace vespalib
diff --git a/vespalib/src/vespa/vespalib/tensor/sparse/sparse_tensor_dimension_sum.h b/vespalib/src/vespa/vespalib/tensor/sparse/sparse_tensor_dimension_sum.h
deleted file mode 100644
index f88239834e9..00000000000
--- a/vespalib/src/vespa/vespalib/tensor/sparse/sparse_tensor_dimension_sum.h
+++ /dev/null
@@ -1,26 +0,0 @@
-// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-
-#pragma once
-
-#include <vespa/vespalib/tensor/tensor_operation.h>
-
-namespace vespalib {
-namespace tensor {
-
-/**
- * Returns a tensor with the given dimension removed and the cell values in that dimension summed.
- */
-class SparseTensorDimensionSum : public TensorOperation<SparseTensor>
-{
-public:
- using TensorImplType = SparseTensor;
- using Parent = TensorOperation<SparseTensor>;
- using AddressBuilderType = typename Parent::AddressBuilderType;
- using AddressType = typename Parent::AddressType;
- using Parent::_builder;
- SparseTensorDimensionSum(const TensorImplType &tensor,
- const vespalib::string &dimension);
-};
-
-} // namespace vespalib::tensor
-} // namespace vespalib
diff --git a/vespalib/src/vespa/vespalib/tensor/sparse/sparse_tensor_match.cpp b/vespalib/src/vespa/vespalib/tensor/sparse/sparse_tensor_match.cpp
index 27cede44ff4..35da291bbee 100644
--- a/vespalib/src/vespa/vespalib/tensor/sparse/sparse_tensor_match.cpp
+++ b/vespalib/src/vespa/vespalib/tensor/sparse/sparse_tensor_match.cpp
@@ -45,7 +45,7 @@ buildTransformOps(std::vector<AddressOp> &ops,
bool
transformAddress(SparseTensorAddressBuilder &builder,
- CompactTensorAddressRef ref,
+ SparseTensorAddressRef ref,
const std::vector<AddressOp> &ops)
{
builder.clear();
@@ -99,7 +99,7 @@ SparseTensorMatch::slowMatch(const TensorImplType &lhs,
if (!transformAddress(addressBuilder, lhsCell.first, ops)) {
continue;
}
- CompactTensorAddressRef ref(addressBuilder.getAddressRef());
+ SparseTensorAddressRef ref(addressBuilder.getAddressRef());
auto rhsItr = rhs.cells().find(ref);
if (rhsItr != rhs.cells().end()) {
addressPadder.padAddress(lhsCell.first);
diff --git a/vespalib/src/vespa/vespalib/tensor/sparse/sparse_tensor_product.cpp b/vespalib/src/vespa/vespalib/tensor/sparse/sparse_tensor_product.cpp
deleted file mode 100644
index 1a276ad55dd..00000000000
--- a/vespalib/src/vespa/vespalib/tensor/sparse/sparse_tensor_product.cpp
+++ /dev/null
@@ -1,148 +0,0 @@
-// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-
-#include <vespa/fastos/fastos.h>
-#include "sparse_tensor_product.h"
-#include "sparse_tensor_address_decoder.h"
-#include <type_traits>
-
-namespace vespalib {
-namespace tensor {
-
-namespace {
-
-enum class AddressOp
-{
- LHS,
- RHS,
- BOTH
-};
-
-using CombineOps = std::vector<AddressOp>;
-
-CombineOps
-buildCombineOps(const TensorDimensions &lhs,
- const TensorDimensions &rhs)
-{
- CombineOps ops;
- auto rhsItr = rhs.cbegin();
- auto rhsItrEnd = rhs.cend();
- for (auto &lhsDim : lhs) {
- while (rhsItr != rhsItrEnd && *rhsItr < lhsDim) {
- ops.push_back(AddressOp::RHS);
- ++rhsItr;
- }
- if (rhsItr != rhsItrEnd && *rhsItr == lhsDim) {
- ops.push_back(AddressOp::BOTH);
- ++rhsItr;
- } else {
- ops.push_back(AddressOp::LHS);
- }
- }
- while (rhsItr != rhsItrEnd) {
- ops.push_back(AddressOp::RHS);
- ++rhsItr;
- }
- return ops;
-}
-
-
-bool
-combineAddresses(SparseTensorAddressBuilder &builder,
- CompactTensorAddressRef lhsRef,
- CompactTensorAddressRef rhsRef,
- const CombineOps &ops)
-{
- builder.clear();
- SparseTensorAddressDecoder lhs(lhsRef);
- SparseTensorAddressDecoder rhs(rhsRef);
- for (auto op : ops) {
- switch (op) {
- case AddressOp::LHS:
- builder.add(lhs.decodeLabel());
- break;
- case AddressOp::RHS:
- builder.add(rhs.decodeLabel());
- break;
- case AddressOp::BOTH:
- auto lhsLabel(lhs.decodeLabel());
- auto rhsLabel(rhs.decodeLabel());
- if (lhsLabel != rhsLabel) {
- return false;
- }
- builder.add(lhsLabel);
- }
- }
- assert(!lhs.valid());
- assert(!rhs.valid());
- return true;
-}
-
-}
-
-
-void
-SparseTensorProduct::bruteForceProduct(const TensorImplType &lhs,
- const TensorImplType &rhs)
-{
- CombineOps ops(buildCombineOps(lhs.dimensions(), rhs.dimensions()));
- SparseTensorAddressBuilder addressBuilder;
- for (const auto &lhsCell : lhs.cells()) {
- for (const auto &rhsCell : rhs.cells()) {
- bool combineSuccess = combineAddresses(addressBuilder,
- lhsCell.first, rhsCell.first,
- ops);
- if (combineSuccess) {
- _builder.insertCell(addressBuilder.getAddressRef(),
- lhsCell.second * rhsCell.second);
- }
- }
- }
-}
-
-
-void
-SparseTensorProduct::fastProduct(const TensorImplType &lhs,
- const TensorImplType &rhs)
-{
- const typename TensorImplType::Cells &rhsCells = rhs.cells();
- for (const auto &lhsCell : lhs.cells()) {
- auto itr = rhsCells.find(lhsCell.first);
- if (itr != rhsCells.end()) {
- _builder.insertCell(lhsCell.first,
- lhsCell.second * itr->second);
- }
- }
-}
-
-
-SparseTensorProduct::SparseTensorProduct(const TensorImplType &lhs,
- const TensorImplType &rhs)
- : Parent(lhs.combineDimensionsWith(rhs))
-{
-#if 0
- /* Commented ut for now since we want to see brute force performance. */
- // All dimensions are common
- if (lhs.dimensions().size() == rhs.dimensions().size() &&
- lhs.dimensions().size() == _builder.dimensions().size()) {
- fastProduct(lhs, rhs);
- return;
- }
- // TODO: Handle zero cells or zero dimensions cases
- // No dimensions are common
- if (lhs.dimensions().size() + rhs.dimensions().size() ==
- _builder.dimensions().size()) {
- bruteForceNoCommonDimensionProduct(lhs, rhs);
- return;
- }
- // lhs dimensions equals common dimensions
- if (rhs.dimensions().size() == _builder.dimensions().size()) {
- }
- // rhs dimensions equals common dimensions
- if (lhs.dimensions().size() == _builder.dimensions().size()) {
- }
-#endif
- bruteForceProduct(lhs, rhs);
-}
-
-} // namespace vespalib::tensor
-} // namespace vespalib
diff --git a/vespalib/src/vespa/vespalib/tensor/sparse/sparse_tensor_product.h b/vespalib/src/vespa/vespalib/tensor/sparse/sparse_tensor_product.h
deleted file mode 100644
index 6aa84e83541..00000000000
--- a/vespalib/src/vespa/vespalib/tensor/sparse/sparse_tensor_product.h
+++ /dev/null
@@ -1,47 +0,0 @@
-// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-
-#pragma once
-
-#include <vespa/vespalib/tensor/tensor_operation.h>
-
-namespace vespalib {
-namespace tensor {
-
-/**
- * Returns the sparse tensor product of the two given tensors.
- * This is all combinations of all cells in the first tensor with all cells of
- * the second tensor, except the combinations which would have multiple labels
- * for the same dimension due to shared dimensions between the two tensors.
- *
- * If there are no overlapping dimensions this is the regular tensor product.
- * If the two tensors have exactly the same dimensions this is the Hadamard product.
- *
- * The sparse tensor is associative and commutative. Its dimensions are the
- * set of the dimensions of the two input tensors.
- */
-class SparseTensorProduct : public TensorOperation<SparseTensor>
-{
-public:
- using TensorImplType = SparseTensor;
- using Parent = TensorOperation<SparseTensor>;
- using Dimensions = typename Parent::Dimensions;
- using AddressBuilderType = typename Parent::AddressBuilderType;
- using AddressRefType = typename Parent::AddressRefType;
- using AddressType = typename Parent::AddressType;
- using Parent::_builder;
-
-private:
- void
- bruteForceProduct(const TensorImplType &lhs, const TensorImplType &rhs);
-
- void
- fastProduct(const TensorImplType &lhs, const TensorImplType &rhs);
-
-public:
- SparseTensorProduct(const TensorImplType &lhs,
- const TensorImplType &rhs);
-};
-
-
-} // namespace vespalib::tensor
-} // namespace vespalib
diff --git a/vespalib/src/vespa/vespalib/tensor/sparse/sparse_tensor_reduce.hpp b/vespalib/src/vespa/vespalib/tensor/sparse/sparse_tensor_reduce.hpp
new file mode 100644
index 00000000000..06c5deade5e
--- /dev/null
+++ b/vespalib/src/vespa/vespalib/tensor/sparse/sparse_tensor_reduce.hpp
@@ -0,0 +1,29 @@
+// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+#pragma once
+
+#include "sparse_tensor_address_reducer.h"
+#include <vespa/vespalib/tensor/direct_tensor_builder.h>
+#include "direct_sparse_tensor_builder.h"
+
+namespace vespalib {
+namespace tensor {
+namespace sparse {
+
+template <typename Function>
+std::unique_ptr<Tensor>
+reduce(const SparseTensor &tensor,
+ const std::vector<vespalib::string> &dimensions, Function &&func)
+{
+ DirectTensorBuilder<SparseTensor> builder(TensorAddressReducer::remainingDimensions(tensor.dimensions(), dimensions));
+ TensorAddressReducer addressReducer(tensor.dimensions(), dimensions);
+ for (const auto &cell : tensor.cells()) {
+ addressReducer.reduce(cell.first);
+ builder.insertCell(addressReducer.getAddressRef(), cell.second, func);
+ }
+ return builder.build();
+}
+
+} // namespace vespalib::tensor::sparse
+} // namespace vespalib::tensor
+} // namespace vespalib
diff --git a/vespalib/src/vespa/vespalib/tensor/sparse/compact_tensor_unsorted_address_builder.cpp b/vespalib/src/vespa/vespalib/tensor/sparse/sparse_tensor_unsorted_address_builder.cpp
index 1496ed0e5d5..57db0902396 100644
--- a/vespalib/src/vespa/vespalib/tensor/sparse/compact_tensor_unsorted_address_builder.cpp
+++ b/vespalib/src/vespa/vespalib/tensor/sparse/sparse_tensor_unsorted_address_builder.cpp
@@ -1,15 +1,14 @@
// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
#include <vespa/fastos/fastos.h>
-#include "compact_tensor_unsorted_address_builder.h"
-#include "compact_tensor_address_builder.h"
+#include "sparse_tensor_unsorted_address_builder.h"
#include "sparse_tensor_address_builder.h"
#include <algorithm>
namespace vespalib {
namespace tensor {
-CompactTensorUnsortedAddressBuilder::CompactTensorUnsortedAddressBuilder()
+SparseTensorUnsortedAddressBuilder::SparseTensorUnsortedAddressBuilder()
: _elementStrings(),
_elements()
{
@@ -17,21 +16,7 @@ CompactTensorUnsortedAddressBuilder::CompactTensorUnsortedAddressBuilder()
void
-CompactTensorUnsortedAddressBuilder::buildTo(CompactTensorAddressBuilder &
- builder)
-{
- const char *base = &_elementStrings[0];
- std::sort(_elements.begin(), _elements.end(),
- [=](const ElementRef &lhs, const ElementRef &rhs)
- { return lhs.getDimension(base) < rhs.getDimension(base); });
- // build normalized address with sorted dimensions
- for (const auto &element : _elements) {
- builder.add(element.getDimension(base), element.getLabel(base));
- }
-}
-
-void
-CompactTensorUnsortedAddressBuilder::buildTo(SparseTensorAddressBuilder &
+SparseTensorUnsortedAddressBuilder::buildTo(SparseTensorAddressBuilder &
builder,
const TensorDimensions &
dimensions)
diff --git a/vespalib/src/vespa/vespalib/tensor/sparse/compact_tensor_unsorted_address_builder.h b/vespalib/src/vespa/vespalib/tensor/sparse/sparse_tensor_unsorted_address_builder.h
index 1ee7ccf0b60..914f7d6ce2f 100644
--- a/vespalib/src/vespa/vespalib/tensor/sparse/compact_tensor_unsorted_address_builder.h
+++ b/vespalib/src/vespa/vespalib/tensor/sparse/sparse_tensor_unsorted_address_builder.h
@@ -9,14 +9,13 @@
namespace vespalib {
namespace tensor {
-class CompactTensorAddressBuilder;
class SparseTensorAddressBuilder;
/**
* A builder that buffers up a tensor address with unsorted
* dimensions.
*/
-class CompactTensorUnsortedAddressBuilder
+class SparseTensorUnsortedAddressBuilder
{
struct ElementStringRef
{
@@ -62,7 +61,7 @@ class CompactTensorUnsortedAddressBuilder
}
public:
- CompactTensorUnsortedAddressBuilder();
+ SparseTensorUnsortedAddressBuilder();
bool empty() const { return _elementStrings.empty(); }
void add(vespalib::stringref dimension, vespalib::stringref label)
{
@@ -72,7 +71,6 @@ public:
* Sort the stored tensor address and pass it over to a strict
* tensor address builder in sorted order.
*/
- void buildTo(CompactTensorAddressBuilder &builder);
void buildTo(SparseTensorAddressBuilder &builder,
const TensorDimensions &dimensions);
void clear() { _elementStrings.clear(); _elements.clear(); }
diff --git a/vespalib/src/vespa/vespalib/tensor/tensor.h b/vespalib/src/vespa/vespalib/tensor/tensor.h
index 4128a27d9a7..9e4f4a9bff0 100644
--- a/vespalib/src/vespa/vespalib/tensor/tensor.h
+++ b/vespalib/src/vespa/vespalib/tensor/tensor.h
@@ -6,9 +6,11 @@
#include "tensor_address.h"
#include <vespa/vespalib/stllike/string.h>
#include <vespa/vespalib/eval/tensor.h>
+#include <vespa/vespalib/eval/tensor_spec.h>
#include <vespa/vespalib/eval/value_type.h>
namespace vespalib {
+namespace eval { class BinaryOperation; }
namespace tensor {
class TensorVisitor;
@@ -37,10 +39,16 @@ struct Tensor : public eval::Tensor
virtual Tensor::UP match(const Tensor &arg) const = 0;
virtual Tensor::UP apply(const CellFunction &func) const = 0;
virtual Tensor::UP sum(const vespalib::string &dimension) const = 0;
+ virtual Tensor::UP apply(const eval::BinaryOperation &op,
+ const Tensor &arg) const = 0;
+ virtual Tensor::UP reduce(const eval::BinaryOperation &op,
+ const std::vector<vespalib::string> &dimensions)
+ const = 0;
virtual bool equals(const Tensor &arg) const = 0;
virtual void print(std::ostream &out) const = 0;
virtual vespalib::string toString() const = 0;
virtual Tensor::UP clone() const = 0;
+ virtual eval::TensorSpec toSpec() const = 0;
virtual void accept(TensorVisitor &visitor) const = 0;
};
diff --git a/vespalib/src/vespa/vespalib/tensor/tensor_address_element_iterator.h b/vespalib/src/vespa/vespalib/tensor/tensor_address_element_iterator.h
index 3a260c7c693..a250331de5f 100644
--- a/vespalib/src/vespa/vespalib/tensor/tensor_address_element_iterator.h
+++ b/vespalib/src/vespa/vespalib/tensor/tensor_address_element_iterator.h
@@ -25,68 +25,7 @@ public:
bool valid() const { return (_itr != _itrEnd); }
vespalib::stringref dimension() const { return _itr->dimension(); }
vespalib::stringref label() const { return _itr->label(); }
- template <class Iterator>
- bool beforeDimension(const Iterator &rhs) const {
- if (!valid()) {
- return false;
- }
- if (!rhs.valid()) {
- return true;
- }
- return (_itr->dimension() < rhs.dimension());
- }
- bool atDimension(vespalib::stringref rhsDimension) const
- {
- return (valid() && (_itr->dimension() == rhsDimension));
- }
void next() { ++_itr; }
- template <class AddressBuilder>
- void
- addElement(AddressBuilder &builder) {
- builder.add(_itr->dimension(), _itr->label());
- }
- template <class AddressBuilder, class Iterator>
- void addElements(AddressBuilder &builder, const Iterator &limit)
- {
- while (beforeDimension(limit)) {
- addElement(builder);
- next();
- }
- }
- template <class AddressBuilder, class Iterator>
- bool addElements(AddressBuilder &builder, const DimensionsSet &dims,
- const Iterator &limit)
- {
- do {
- if (dims.find(_itr->dimension()) != dims.end()) {
- return false;
- }
- addElement(builder);
- next();
- } while (beforeDimension(limit));
- return true;
- }
- template <class AddressBuilder>
- void addElements(AddressBuilder &builder)
- {
- while (valid()) {
- addElement(builder);
- next();
- }
- }
- template <class AddressBuilder>
- bool addElements(AddressBuilder &builder, const DimensionsSet &dims)
- {
- while (valid()) {
- if (dims.find(_itr->dimension()) != dims.end()) {
- return false;
- }
- addElement(builder);
- next();
- }
- return true;
- }
-
bool skipToDimension(vespalib::stringref rhsDimension) {
for (;;) {
if (!valid()) {
@@ -101,118 +40,5 @@ public:
}
};
-
-/**
- * An iterator for tensor address elements used to simplify 3-way merge
- * between two tensor addresses and a dimension vector.
- * This is a specialization to perform decoding on the fly while iterating.
- */
-template <>
-class TensorAddressElementIterator<CompactTensorAddressRef> {
- const char *_itr;
- const char *_itrEnd;
- vespalib::stringref _dimension;
- vespalib::stringref _label;
-
- size_t
- simple_strlen(const char *str) {
- const char *strend = str;
- for (; *strend != '\0'; ++strend) {
- }
- return (strend - str);
- }
-
- void decodeElement()
- {
- _dimension = vespalib::stringref(_itr, simple_strlen(_itr));
- const char *labelp = _dimension.c_str() + _dimension.size() + 1;
- _label = vespalib::stringref(labelp, simple_strlen(labelp));
- _itr = _label.c_str() + _label.size() + 1;
- }
-public:
- TensorAddressElementIterator(CompactTensorAddressRef address)
- : _itr(static_cast<const char *>(address.start())),
- _itrEnd(_itr + address.size()),
- _dimension(),
- _label()
- {
- if (_itr != _itrEnd) {
- decodeElement();
- }
- }
- bool valid() const { return (_dimension.size() != 0u); }
- vespalib::stringref dimension() const { return _dimension; }
- vespalib::stringref label() const { return _label; }
- template <class Iterator>
- bool beforeDimension(const Iterator &rhs) const {
- if (!valid()) {
- return false;
- }
- if (!rhs.valid()) {
- return true;
- }
- return (_dimension < rhs.dimension());
- }
- bool atDimension(vespalib::stringref rhsDimension) const
- {
- return (_dimension == rhsDimension);
- }
- void next() {
- if (_itr != _itrEnd) {
- decodeElement();
- } else {
- _dimension = vespalib::stringref();
- _label = vespalib::stringref();
- }
- }
- template <class AddressBuilder>
- void
- addElement(AddressBuilder &builder) {
- builder.add(_dimension, _label);
- }
- template <class AddressBuilder, class Iterator>
- void addElements(AddressBuilder &builder, const Iterator &limit)
- {
- while (beforeDimension(limit)) {
- addElement(builder);
- next();
- }
- }
- template <class AddressBuilder, class Iterator>
- bool addElements(AddressBuilder &builder, const DimensionsSet &dims,
- const Iterator &limit)
- {
- do {
- if (dims.find(_dimension) != dims.end()) {
- return false;
- }
- addElement(builder);
- next();
- } while (beforeDimension(limit));
- return true;
- }
- template <class AddressBuilder>
- void addElements(AddressBuilder &builder)
- {
- while (valid()) {
- addElement(builder);
- next();
- }
- }
- template <class AddressBuilder>
- bool addElements(AddressBuilder &builder, const DimensionsSet &dims)
- {
- while (valid()) {
- if (dims.find(_dimension) != dims.end()) {
- return false;
- }
- addElement(builder);
- next();
- }
- return true;
- }
-};
-
-
} // namespace vespalib::tensor
} // namespace vespalib
diff --git a/vespalib/src/vespa/vespalib/tensor/tensor_mapper.cpp b/vespalib/src/vespa/vespalib/tensor/tensor_mapper.cpp
index 460cb8f8bb4..a527627d786 100644
--- a/vespalib/src/vespa/vespalib/tensor/tensor_mapper.cpp
+++ b/vespalib/src/vespa/vespalib/tensor/tensor_mapper.cpp
@@ -98,22 +98,6 @@ mapAddress(const TensorAddress &address)
template <class TensorT>
void
-SparseTensorMapper<TensorT>::mapAddress(const TensorAddress &address)
-{
- _addressBuilder.clear();
- TensorAddressElementIterator<TensorAddress> addressIterator(address);
- for (const auto &dimension : _builder.dimensions()) {
- if (addressIterator.skipToDimension(dimension)) {
- _addressBuilder.add(dimension, addressIterator.label());
- addressIterator.next();
- } else {
- // output dimension not in input
- }
- }
-}
-
-template <class TensorT>
-void
SparseTensorMapper<TensorT>::visit(const TensorAddress &address, double value)
{
mapAddress(address);
diff --git a/vespalib/src/vespa/vespalib/tensor/tensor_operation.h b/vespalib/src/vespa/vespalib/tensor/tensor_operation.h
index f74f7a7990d..350dfcc8abc 100644
--- a/vespalib/src/vespa/vespalib/tensor/tensor_operation.h
+++ b/vespalib/src/vespa/vespalib/tensor/tensor_operation.h
@@ -21,7 +21,6 @@ public:
using Cells = typename TensorImplType::Cells;
using AddressBuilderType = typename MyTensorBuilder::AddressBuilderType;
using AddressRefType = typename MyTensorBuilder::AddressRefType;
- using AddressType = typename MyTensorBuilder::AddressType;
protected:
MyTensorBuilder _builder;
Dimensions &_dimensions;
diff --git a/vespalib/src/vespa/vespalib/test/insertion_operators.h b/vespalib/src/vespa/vespalib/test/insertion_operators.h
index 8ed52062281..ac4fa3541e3 100644
--- a/vespalib/src/vespa/vespalib/test/insertion_operators.h
+++ b/vespalib/src/vespa/vespalib/test/insertion_operators.h
@@ -1,6 +1,7 @@
// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
#pragma once
+#include <map>
#include <ostream>
#include <set>
#include <vector>
@@ -41,5 +42,22 @@ operator<<(std::ostream &os, const std::vector<T> &set)
return os;
}
+template <typename K, typename V>
+std::ostream &
+operator<<(std::ostream &os, const std::map<K, V> &map)
+{
+ os << "{";
+ bool first = true;
+ for (const auto &entry : map) {
+ if (!first) {
+ os << ",";
+ }
+ os << "{" << entry.first << "," << entry.second << "}";
+ first = false;
+ }
+ os << "}";
+ return os;
+}
+
} // namespace std