diff options
-rw-r--r-- | eval/src/vespa/eval/tensor/sparse/direct_sparse_tensor_builder.cpp | 15 | ||||
-rw-r--r-- | eval/src/vespa/eval/tensor/sparse/sparse_tensor.cpp | 11 | ||||
-rw-r--r-- | vespalib/src/tests/stash/stash.cpp | 6 | ||||
-rw-r--r-- | vespalib/src/vespa/vespalib/util/stash.cpp | 2 |
4 files changed, 29 insertions, 5 deletions
diff --git a/eval/src/vespa/eval/tensor/sparse/direct_sparse_tensor_builder.cpp b/eval/src/vespa/eval/tensor/sparse/direct_sparse_tensor_builder.cpp index c47521e702d..210d820191a 100644 --- a/eval/src/vespa/eval/tensor/sparse/direct_sparse_tensor_builder.cpp +++ b/eval/src/vespa/eval/tensor/sparse/direct_sparse_tensor_builder.cpp @@ -1,6 +1,7 @@ // Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. #include "direct_sparse_tensor_builder.h" +#include <assert.h> namespace vespalib::tensor { @@ -40,6 +41,18 @@ DirectSparseTensorBuilder::~DirectSparseTensorBuilder() = default; Tensor::UP DirectSparseTensorBuilder::build() { + size_t mem_use = _stash.get_memory_usage().usedBytes(); + if (mem_use < (SparseTensor::STASH_CHUNK_SIZE / 4)) { + Stash smaller_stash(mem_use); + Cells copy = _cells; + for (auto &cell : copy) { + SparseTensorAddressRef oldRef = cell.first; + SparseTensorAddressRef newRef(oldRef, smaller_stash); + cell.first = newRef; + } + assert(smaller_stash.get_memory_usage().allocatedBytes() < mem_use + 128); + return std::make_unique<SparseTensor>(std::move(_type), std::move(copy), std::move(smaller_stash)); + } return std::make_unique<SparseTensor>(std::move(_type), std::move(_cells), std::move(_stash)); } @@ -47,4 +60,4 @@ void DirectSparseTensorBuilder::reserve(uint32_t estimatedCells) { _cells.resize(estimatedCells*2); } -}
\ No newline at end of file +} diff --git a/eval/src/vespa/eval/tensor/sparse/sparse_tensor.cpp b/eval/src/vespa/eval/tensor/sparse/sparse_tensor.cpp index 87ab80c2a8e..81d327aaf97 100644 --- a/eval/src/vespa/eval/tensor/sparse/sparse_tensor.cpp +++ b/eval/src/vespa/eval/tensor/sparse/sparse_tensor.cpp @@ -106,6 +106,17 @@ SparseTensor::equals(const Tensor &arg) const Tensor::UP SparseTensor::clone() const { + size_t mem_use = _stash.get_memory_usage().usedBytes(); + if (mem_use < (SparseTensor::STASH_CHUNK_SIZE / 4)) { + Stash stash_copy(mem_use); + Cells cells_copy; + copyCells(cells_copy, _cells, stash_copy); + assert(stash_copy.get_memory_usage().allocatedBytes() < mem_use + 128); + eval::ValueType type_copy = _type; + return std::make_unique<SparseTensor>(std::move(type_copy), + std::move(cells_copy), + std::move(stash_copy)); + } return std::make_unique<SparseTensor>(_type, _cells); } diff --git a/vespalib/src/tests/stash/stash.cpp b/vespalib/src/tests/stash/stash.cpp index ebf38a1343a..7ad62b32bda 100644 --- a/vespalib/src/tests/stash/stash.cpp +++ b/vespalib/src/tests/stash/stash.cpp @@ -253,9 +253,9 @@ TEST("require that the chunk size can be adjusted") { EXPECT_EQUAL(64000u, stash.get_chunk_size()); } -TEST("require that minimal chunk size is 4096") { - Stash stash(128); - EXPECT_EQUAL(4096u, stash.get_chunk_size()); +TEST("require that minimal chunk size is 96") { + Stash stash(50); + EXPECT_EQUAL(96u, stash.get_chunk_size()); } TEST("require that a stash can be moved by construction") { diff --git a/vespalib/src/vespa/vespalib/util/stash.cpp b/vespalib/src/vespa/vespalib/util/stash.cpp index 31580e871db..ba5386ca74f 100644 --- a/vespalib/src/vespa/vespalib/util/stash.cpp +++ b/vespalib/src/vespa/vespalib/util/stash.cpp @@ -63,7 +63,7 @@ Stash::do_alloc(size_t size) Stash::Stash(size_t chunk_size) noexcept : _chunks(nullptr), _cleanup(nullptr), - _chunk_size(std::max(size_t(4096), chunk_size)) + _chunk_size(std::max(size_t(96), chunk_size)) { } |