summaryrefslogtreecommitdiffstats
path: root/eval
diff options
context:
space:
mode:
authorArne Juul <arnej@verizonmedia.com>2020-10-19 07:05:09 +0000
committerArne Juul <arnej@verizonmedia.com>2020-10-19 11:47:46 +0000
commit926970025dd7c5cd8c2a4cf7ae642653d7bb9440 (patch)
tree282f8f9607a8a0637ea0f23a2f28f2898e007a4b /eval
parent42696c88a000c3a36931c8ffce6441475b15850e (diff)
add WrappedSimpleValue
Diffstat (limited to 'eval')
-rw-r--r--eval/src/vespa/eval/tensor/CMakeLists.txt1
-rw-r--r--eval/src/vespa/eval/tensor/wrapped_simple_value.cpp173
-rw-r--r--eval/src/vespa/eval/tensor/wrapped_simple_value.h53
3 files changed, 227 insertions, 0 deletions
diff --git a/eval/src/vespa/eval/tensor/CMakeLists.txt b/eval/src/vespa/eval/tensor/CMakeLists.txt
index b75b34098f5..77ae1daec88 100644
--- a/eval/src/vespa/eval/tensor/CMakeLists.txt
+++ b/eval/src/vespa/eval/tensor/CMakeLists.txt
@@ -7,4 +7,5 @@ vespa_add_library(eval_tensor OBJECT
tensor.cpp
tensor_address.cpp
wrapped_simple_tensor.cpp
+ wrapped_simple_value.cpp
)
diff --git a/eval/src/vespa/eval/tensor/wrapped_simple_value.cpp b/eval/src/vespa/eval/tensor/wrapped_simple_value.cpp
new file mode 100644
index 00000000000..ca386ee9611
--- /dev/null
+++ b/eval/src/vespa/eval/tensor/wrapped_simple_value.cpp
@@ -0,0 +1,173 @@
+
+#include "wrapped_simple_value.h"
+#include "cell_values.h"
+#include "tensor_address_builder.h"
+#include "tensor_visitor.h"
+#include <vespa/eval/eval/memory_usage_stuff.h>
+#include <vespa/eval/eval/simple_value.h>
+#include <vespa/eval/eval/tensor_spec.h>
+#include <vespa/eval/eval/value_codec.h>
+#include <vespa/vespalib/util/stringfmt.h>
+
+#include <vespa/log/log.h>
+LOG_SETUP(".eval.tensor.wrapped_simple_value");
+
+using vespalib::eval::TensorSpec;
+using vespalib::eval::SimpleValueBuilderFactory;
+
+namespace vespalib::tensor {
+
+namespace {
+
+TensorSpec::Address
+sparsify_address(const TensorSpec::Address &address)
+{
+ TensorSpec::Address result;
+ for (const auto &elem : address) {
+ if (elem.second.is_indexed()) {
+ auto val = vespalib::make_string("%zu", elem.second.index);
+ result.emplace(elem.first, TensorSpec::Label(val));
+ } else {
+ result.emplace(elem);
+ }
+ }
+ return result;
+}
+
+TensorSpec::Address
+extract_sparse_address(const TensorSpec::Address &address)
+{
+ TensorSpec::Address result;
+ for (const auto &elem : address) {
+ if (elem.second.is_mapped()) {
+ result.emplace(elem);
+ }
+ }
+ return result;
+}
+
+Tensor::UP wrap(eval::Value::UP value) {
+ return std::make_unique<WrappedSimpleValue>(std::move(value));
+}
+
+} // namespace <unnamed>
+
+
+eval::TensorSpec
+WrappedSimpleValue::toSpec() const
+{
+ return spec_from_value(_tensor);
+}
+
+void
+WrappedSimpleValue::accept(TensorVisitor &visitor) const
+{
+ TensorSpec myspec = toSpec();
+ TensorAddressBuilder addr;
+ for (const auto & cell : myspec.cells()) {
+ auto sparse_addr = sparsify_address(cell.first);
+ addr.clear();
+ for (const auto & dim_and_label : sparse_addr) {
+ addr.add(dim_and_label.first, dim_and_label.second.name);
+ }
+ visitor.visit(addr.build(), cell.second);
+ }
+}
+
+MemoryUsage
+WrappedSimpleValue::get_memory_usage() const
+{
+ MemoryUsage rv = eval::self_memory_usage<WrappedSimpleValue>();
+ if (_space) {
+ rv.merge(_space->get_memory_usage());
+ }
+ return rv;
+}
+
+//-----------------------------------------------------------------------------
+
+Tensor::UP
+WrappedSimpleValue::apply(const CellFunction &) const
+{
+ LOG_ABORT("should not be reached");
+}
+
+Tensor::UP
+WrappedSimpleValue::join(join_fun_t, const Tensor &) const
+{
+ LOG_ABORT("should not be reached");
+}
+
+Tensor::UP
+WrappedSimpleValue::merge(join_fun_t, const Tensor &) const
+{
+ LOG_ABORT("should not be reached");
+}
+
+Tensor::UP
+WrappedSimpleValue::reduce(join_fun_t, const std::vector<vespalib::string> &) const
+{
+ LOG_ABORT("should not be reached");
+}
+
+Tensor::UP
+WrappedSimpleValue::modify(join_fun_t fun, const CellValues &cellValues) const
+{
+ TensorSpec a = toSpec();
+ TensorSpec b = cellValues.toSpec();
+ TensorSpec result(a.type());
+ auto end_iter = b.cells().end();
+ for (const auto &cell: a.cells()) {
+ double v = cell.second;
+ auto sparse_addr = sparsify_address(cell.first);
+ auto iter = b.cells().find(sparse_addr);
+ if (iter == end_iter) {
+ result.add(cell.first, v);
+ } else {
+ result.add(cell.first, fun(v, iter->second));
+ }
+ }
+ return wrap(value_from_spec(result, SimpleValueBuilderFactory::get()));
+}
+
+Tensor::UP
+WrappedSimpleValue::add(const Tensor &rhs) const
+{
+ TensorSpec a = toSpec();
+ TensorSpec b = rhs.toSpec();
+ if (a.type() != b.type()) {
+ return {};
+ }
+ TensorSpec result(a.type());
+ for (const auto &cell: b.cells()) {
+ result.add(cell.first, cell.second);
+ }
+ auto end_iter = b.cells().end();
+ for (const auto &cell: a.cells()) {
+ auto iter = b.cells().find(cell.first);
+ if (iter == end_iter) {
+ result.add(cell.first, cell.second);
+ }
+ }
+ return wrap(value_from_spec(result, SimpleValueBuilderFactory::get()));
+}
+
+
+Tensor::UP
+WrappedSimpleValue::remove(const CellValues &rhs) const
+{
+ TensorSpec a = toSpec();
+ TensorSpec b = rhs.toSpec();
+ TensorSpec result(a.type());
+ auto end_iter = b.cells().end();
+ for (const auto &cell: a.cells()) {
+ TensorSpec::Address mappedAddress = extract_sparse_address(cell.first);
+ auto iter = b.cells().find(mappedAddress);
+ if (iter == end_iter) {
+ result.add(cell.first, cell.second);
+ }
+ }
+ return wrap(value_from_spec(result, SimpleValueBuilderFactory::get()));
+}
+
+}
diff --git a/eval/src/vespa/eval/tensor/wrapped_simple_value.h b/eval/src/vespa/eval/tensor/wrapped_simple_value.h
new file mode 100644
index 00000000000..68339790436
--- /dev/null
+++ b/eval/src/vespa/eval/tensor/wrapped_simple_value.h
@@ -0,0 +1,53 @@
+// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+#pragma once
+
+#include "tensor.h"
+#include <vespa/eval/eval/value.h>
+#include <vespa/eval/eval/aggr.h>
+
+namespace vespalib::tensor {
+
+/**
+ * A thin wrapper around a SimpleValue to be used as fallback for tensors with data
+ * layouts not supported by the default tensor implementation.
+ *
+ * Tensor implementation class is currently inferred from its value
+ * type. Consider adding explicit tagging to the tensor::Tensor
+ * default implementation top-level class in the future.
+ **/
+class WrappedSimpleValue : public Tensor
+{
+private:
+ std::unique_ptr<eval::Value> _space;
+ const eval::Value &_tensor;
+public:
+ explicit WrappedSimpleValue(const eval::Value &tensor)
+ : _space(), _tensor(tensor) {}
+ explicit WrappedSimpleValue(std::unique_ptr<eval::Value> tensor)
+ : _space(std::move(tensor)), _tensor(*_space) {}
+ ~WrappedSimpleValue() {}
+ const eval::Value &unwrap() const { return _tensor; }
+
+ // Value API
+ const eval::ValueType &type() const override { return _tensor.type(); }
+ eval::TypedCells cells() const override { return _tensor.cells(); }
+ const Index &index() const override { return _tensor.index(); }
+ double as_double() const override { return _tensor.as_double(); }
+
+ // tensor API
+ eval::TensorSpec toSpec() const override;
+ void accept(TensorVisitor &visitor) const override;
+ MemoryUsage get_memory_usage() const override;
+
+ Tensor::UP join(join_fun_t fun, const Tensor &rhs) const override;
+ Tensor::UP merge(join_fun_t fun, const Tensor &rhs) const override;
+ Tensor::UP reduce(join_fun_t fun, const std::vector<vespalib::string> &dims) const override;
+
+ Tensor::UP apply(const CellFunction & func) const override;
+ Tensor::UP modify(join_fun_t fun, const CellValues &cellValues) const override;
+ Tensor::UP add(const Tensor &rhs) const override;
+ Tensor::UP remove(const CellValues &rhs) const override;
+};
+
+} // namespace vespalib::tensor