aboutsummaryrefslogtreecommitdiffstats
path: root/vespalib
diff options
context:
space:
mode:
authorTor Egge <Tor.Egge@yahoo-inc.com>2016-10-11 14:26:30 +0000
committerTor Egge <Tor.Egge@yahoo-inc.com>2016-10-11 14:26:30 +0000
commitf7d16e78882cc628e2c27e6f5f6def27bd4bebfa (patch)
treef44624a97246ac20dd889856991dae6394fa877d /vespalib
parentad5d41835e1120090e71229fcfe08413db2035cf (diff)
Remove vespalib::tensor::TensorFunction. A newer version exists as
vespalib::eval::TensorFunction.
Diffstat (limited to 'vespalib')
-rw-r--r--vespalib/CMakeLists.txt1
-rw-r--r--vespalib/src/testlist.txt1
-rw-r--r--vespalib/src/tests/tensor/tensor_function/.gitignore1
-rw-r--r--vespalib/src/tests/tensor/tensor_function/CMakeLists.txt9
-rw-r--r--vespalib/src/tests/tensor/tensor_function/tensor_function_test.cpp168
-rw-r--r--vespalib/src/vespa/vespalib/tensor/CMakeLists.txt1
-rw-r--r--vespalib/src/vespa/vespalib/tensor/tensor_function.cpp360
-rw-r--r--vespalib/src/vespa/vespalib/tensor/tensor_function.h110
8 files changed, 0 insertions, 651 deletions
diff --git a/vespalib/CMakeLists.txt b/vespalib/CMakeLists.txt
index a310bcd32a9..1047f692aea 100644
--- a/vespalib/CMakeLists.txt
+++ b/vespalib/CMakeLists.txt
@@ -77,7 +77,6 @@ vespa_define_module(
src/tests/tensor/dense_tensor_builder
src/tests/tensor/tensor_address
src/tests/tensor/tensor_conformance
- src/tests/tensor/tensor_function
src/tests/tensor/tensor_mapper
src/tests/tensor/tensor_performance
src/tests/tensor/tensor_serialization
diff --git a/vespalib/src/testlist.txt b/vespalib/src/testlist.txt
index 638a863e16e..02e66607eeb 100644
--- a/vespalib/src/testlist.txt
+++ b/vespalib/src/testlist.txt
@@ -67,7 +67,6 @@ tests/tensor/dense_tensor_builder
tests/tensor/simple_tensor_builder
tests/tensor/tensor
tests/tensor/tensor_address
-tests/tensor/tensor_function
tests/tensor/tensor_mapper
tests/tensor/tensor_performance
tests/tensor/tensor_serialization
diff --git a/vespalib/src/tests/tensor/tensor_function/.gitignore b/vespalib/src/tests/tensor/tensor_function/.gitignore
deleted file mode 100644
index 9dff11e518c..00000000000
--- a/vespalib/src/tests/tensor/tensor_function/.gitignore
+++ /dev/null
@@ -1 +0,0 @@
-vespalib_tensor_function_test_app
diff --git a/vespalib/src/tests/tensor/tensor_function/CMakeLists.txt b/vespalib/src/tests/tensor/tensor_function/CMakeLists.txt
deleted file mode 100644
index cca15f932c9..00000000000
--- a/vespalib/src/tests/tensor/tensor_function/CMakeLists.txt
+++ /dev/null
@@ -1,9 +0,0 @@
-# Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-vespa_add_executable(vespalib_tensor_function_test_app TEST
- SOURCES
- tensor_function_test.cpp
- DEPENDS
- vespalib
- vespalib_vespalib_tensor
-)
-vespa_add_test(NAME vespalib_tensor_function_test_app COMMAND vespalib_tensor_function_test_app)
diff --git a/vespalib/src/tests/tensor/tensor_function/tensor_function_test.cpp b/vespalib/src/tests/tensor/tensor_function/tensor_function_test.cpp
deleted file mode 100644
index 7a005a6aec4..00000000000
--- a/vespalib/src/tests/tensor/tensor_function/tensor_function_test.cpp
+++ /dev/null
@@ -1,168 +0,0 @@
-// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-#include <vespa/vespalib/testkit/test_kit.h>
-#include <vespa/vespalib/tensor/tensor_function.h>
-
-using namespace vespalib::tensor;
-using vespalib::eval::ValueType;
-
-// Evaluation of tensor functions is tested in the 'tensor operations'
-// test. This test checks type resolving and will be extended with
-// inspectability of tensor functions when the implementation is
-// extended to support it.
-
-// Note: The 'tensor type' test verifies how tensor type dimensions
-// may be combined. Specifically the fact that common dense dimensions
-// must have the same size.
-
-function::Node_UP invalid_value() {
- return function::input(ValueType::error_type(), 0);
-}
-
-function::Node_UP number_value() {
- return function::sum(function::input(ValueType::tensor_type({}), 0));
-}
-
-ValueType sparse_type(const std::vector<vespalib::string> &dimensions_in) {
- std::vector<ValueType::Dimension> dimensions;
- std::copy(dimensions_in.begin(), dimensions_in.end(), std::back_inserter(dimensions));
- return ValueType::tensor_type(dimensions);
-}
-
-ValueType dense_type(const std::vector<ValueType::Dimension> &dimensions_in) {
- return ValueType::tensor_type(dimensions_in);
-}
-
-function::Node_UP sparse_value(const std::vector<vespalib::string> &arg) {
- return function::input(sparse_type(arg), 0);
-}
-
-function::Node_UP dense_value(std::vector<ValueType::Dimension> arg) {
- return function::input(dense_type(arg), 0);
-}
-
-TensorAddress address(const TensorAddress::Elements &elems) {
- return TensorAddress(elems);
-}
-
-
-TEST("require that helper functions produce appropriate types") {
- EXPECT_TRUE(invalid_value()->type().is_error());
- EXPECT_EQUAL(number_value()->type(), ValueType::double_type());
- EXPECT_EQUAL(sparse_value({"x", "y"})->type(), sparse_type({"x", "y"}));
- EXPECT_EQUAL(dense_value({{"x", 10}})->type(), dense_type({{"x", 10}}));
-}
-
-TEST("require that input tensors preserves type") {
- EXPECT_EQUAL(sparse_type({"x", "y"}),
- function::input(sparse_type({"x", "y"}), 0)->type());
- EXPECT_EQUAL(dense_type({{"x", 10}}),
- function::input(dense_type({{"x", 10}}), 0)->type());
-}
-
-TEST("require that input tensors with non-tensor types are invalid") {
- EXPECT_TRUE(function::input(ValueType::error_type(), 0)->type().is_error());
-}
-
-TEST("require that sum of tensor gives number as result") {
- EXPECT_EQUAL(ValueType::double_type(), function::sum(sparse_value({}))->type());
- EXPECT_EQUAL(ValueType::double_type(), function::sum(dense_value({}))->type());
-}
-
-TEST("require that sum of number gives number as result") {
- EXPECT_EQUAL(ValueType::double_type(), function::sum(number_value())->type());
-}
-
-TEST("require that dimension sum removes the summed dimension") {
- EXPECT_EQUAL(sparse_type({"x", "y"}),
- function::dimension_sum(sparse_value({"x", "y", "z"}), "z")->type());
- EXPECT_EQUAL(dense_type({{"y", 10}}),
- function::dimension_sum(dense_value({{"x", 10}, {"y", 10}}), "x")->type());
-}
-
-TEST("require that dimension sum over non-existing dimension is invalid") {
- EXPECT_TRUE(function::dimension_sum(sparse_value({"x", "y", "z"}), "w")->type().is_error());
- EXPECT_TRUE(function::dimension_sum(dense_value({{"x", 10}, {"y", 10}}), "z")->type().is_error());
-}
-
-TEST("require that apply preserves tensor type") {
- EXPECT_EQUAL(sparse_type({"x", "y"}),
- function::apply(sparse_value({"x", "y"}), 0)->type());
- EXPECT_EQUAL(dense_type({{"x", 10}}),
- function::apply(dense_value({{"x", 10}}), 0)->type());
-}
-
-TEST("require that tensor add result has union of input dimensions") {
- EXPECT_EQUAL(sparse_type({"x", "y", "z"}),
- function::add(sparse_value({"x", "y"}),
- sparse_value({"y", "z"}))->type());
- EXPECT_EQUAL(dense_type({{"x", 10}, {"y", 10}, {"z", 10}}),
- function::add(dense_value({{"x", 10}, {"y", 10}}),
- dense_value({{"y", 10}, {"z", 10}}))->type());
-}
-
-TEST("require that tensor subtract result has union of input dimensions") {
- EXPECT_EQUAL(sparse_type({"x", "y", "z"}),
- function::subtract(sparse_value({"x", "y"}),
- sparse_value({"y", "z"}))->type());
- EXPECT_EQUAL(dense_type({{"x", 10}, {"y", 10}, {"z", 10}}),
- function::subtract(dense_value({{"x", 10}, {"y", 10}}),
- dense_value({{"y", 10}, {"z", 10}}))->type());
-}
-
-TEST("require that tensor multiply result has union of input dimensions") {
- EXPECT_EQUAL(sparse_type({"x", "y", "z"}),
- function::multiply(sparse_value({"x", "y"}),
- sparse_value({"y", "z"}))->type());
- EXPECT_EQUAL(dense_type({{"x", 10}, {"y", 10}, {"z", 10}}),
- function::multiply(dense_value({{"x", 10}, {"y", 10}}),
- dense_value({{"y", 10}, {"z", 10}}))->type());
-}
-
-TEST("require that tensor min result has union of input dimensions") {
- EXPECT_EQUAL(sparse_type({"x", "y", "z"}),
- function::min(sparse_value({"x", "y"}),
- sparse_value({"y", "z"}))->type());
- EXPECT_EQUAL(dense_type({{"x", 10}, {"y", 10}, {"z", 10}}),
- function::min(dense_value({{"x", 10}, {"y", 10}}),
- dense_value({{"y", 10}, {"z", 10}}))->type());
-}
-
-TEST("require that tensor max result has union of input dimensions") {
- EXPECT_EQUAL(sparse_type({"x", "y", "z"}),
- function::max(sparse_value({"x", "y"}),
- sparse_value({"y", "z"}))->type());
- EXPECT_EQUAL(dense_type({{"x", 10}, {"y", 10}, {"z", 10}}),
- function::max(dense_value({{"x", 10}, {"y", 10}}),
- dense_value({{"y", 10}, {"z", 10}}))->type());
-}
-
-TEST("require that tensor match result has intersection of input dimensions") {
- EXPECT_EQUAL(sparse_type({"y"}),
- function::match(sparse_value({"x", "y"}),
- sparse_value({"y", "z"}))->type());
- EXPECT_EQUAL(dense_type({{"y", 10}}),
- function::match(dense_value({{"x", 10}, {"y", 10}}),
- dense_value({{"y", 10}, {"z", 10}}))->type());
-}
-
-TEST("require that tensor operations on non-tensor types are invalid") {
- EXPECT_TRUE(function::sum(invalid_value())->type().is_error());
- EXPECT_TRUE(function::dimension_sum(invalid_value(), "x")->type().is_error());
- EXPECT_TRUE(function::dimension_sum(number_value(), "x")->type().is_error());
- EXPECT_TRUE(function::apply(invalid_value(), 0)->type().is_error());
- EXPECT_TRUE(function::apply(number_value(), 0)->type().is_error());
- EXPECT_TRUE(function::add(invalid_value(), invalid_value())->type().is_error());
- EXPECT_TRUE(function::add(number_value(), number_value())->type().is_error());
- EXPECT_TRUE(function::subtract(invalid_value(), invalid_value())->type().is_error());
- EXPECT_TRUE(function::subtract(number_value(), number_value())->type().is_error());
- EXPECT_TRUE(function::multiply(invalid_value(), invalid_value())->type().is_error());
- EXPECT_TRUE(function::multiply(number_value(), number_value())->type().is_error());
- EXPECT_TRUE(function::min(invalid_value(), invalid_value())->type().is_error());
- EXPECT_TRUE(function::min(number_value(), number_value())->type().is_error());
- EXPECT_TRUE(function::max(invalid_value(), invalid_value())->type().is_error());
- EXPECT_TRUE(function::max(number_value(), number_value())->type().is_error());
- EXPECT_TRUE(function::match(invalid_value(), invalid_value())->type().is_error());
- EXPECT_TRUE(function::match(number_value(), number_value())->type().is_error());
-}
-
-TEST_MAIN() { TEST_RUN_ALL(); }
diff --git a/vespalib/src/vespa/vespalib/tensor/CMakeLists.txt b/vespalib/src/vespa/vespalib/tensor/CMakeLists.txt
index ec610f2527c..00a582d99a7 100644
--- a/vespalib/src/vespa/vespalib/tensor/CMakeLists.txt
+++ b/vespalib/src/vespa/vespalib/tensor/CMakeLists.txt
@@ -6,7 +6,6 @@ vespa_add_library(vespalib_vespalib_tensor
tensor_address.cpp
tensor_apply.cpp
tensor_factory.cpp
- tensor_function.cpp
tensor_mapper.cpp
$<TARGET_OBJECTS:vespalib_vespalib_tensor_sparse>
$<TARGET_OBJECTS:vespalib_vespalib_tensor_dense>
diff --git a/vespalib/src/vespa/vespalib/tensor/tensor_function.cpp b/vespalib/src/vespa/vespalib/tensor/tensor_function.cpp
deleted file mode 100644
index 180f5f321cd..00000000000
--- a/vespalib/src/vespa/vespalib/tensor/tensor_function.cpp
+++ /dev/null
@@ -1,360 +0,0 @@
-// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-
-#include <vespa/fastos/fastos.h>
-#include "tensor_function.h"
-#include <vespa/vespalib/eval/value_type.h>
-
-namespace vespalib {
-namespace tensor {
-namespace function {
-namespace {
-
-//-----------------------------------------------------------------------------
-
-/**
- * Base function class keeping track of result type.
- **/
-class FunctionBase : public Node
-{
-private:
- eval::ValueType _type;
-protected:
- explicit FunctionBase(const eval::ValueType &type_in) : _type(type_in) {}
- const eval::ValueType &type() const override { return _type; }
-
- // helper function used to unwrap tensor value from eval result
- static const Tensor &eval_tensor(Node &node, const Input &input) {
- return node.eval(input).as_tensor;
- }
-};
-
-//-----------------------------------------------------------------------------
-
-/**
- * Function mixin class used to keep tensor results alive.
- **/
-class TensorCache : public FunctionBase
-{
-private:
- Tensor::UP _my_result;
-protected:
- explicit TensorCache(const eval::ValueType &type_in)
- : FunctionBase(type_in), _my_result() {}
- const Tensor &store_tensor(Tensor::UP result) {
- _my_result = std::move(result);
- return *_my_result;
- }
-};
-
-//-----------------------------------------------------------------------------
-
-/**
- * Resolve an input tensor value.
- **/
-class InputTensor : public FunctionBase
-{
-private:
- size_t _tensor_id;
-
- static eval::ValueType infer_type(const eval::ValueType &type_in) {
- if (type_in.is_tensor() || type_in.is_double()) {
- return type_in;
- } else {
- return eval::ValueType::error_type();
- }
- }
-
-public:
- InputTensor(const eval::ValueType &type_in, size_t tensor_id)
- : FunctionBase(infer_type(type_in)), _tensor_id(tensor_id) {}
- Result eval(const Input &input) override {
- return input.get_tensor(_tensor_id);
- }
-};
-
-//-----------------------------------------------------------------------------
-
-/**
- * Sum all the cells in a tensor.
- **/
-class Sum : public FunctionBase
-{
-private:
- Node_UP _child;
-
- static eval::ValueType infer_type(const eval::ValueType &child_type) {
- if (child_type.is_tensor() || child_type.is_double()) {
- return eval::ValueType::double_type();
- } else {
- return eval::ValueType::error_type();
- }
- }
-
-public:
- explicit Sum(Node_UP child)
- : FunctionBase(infer_type(child->type())),
- _child(std::move(child)) {}
-
- Result eval(const Input &input) override {
- return eval_tensor(*_child, input).sum();
- }
-};
-
-//-----------------------------------------------------------------------------
-
-/**
- * Perform sum over a tensor dimension.
- **/
-class DimensionSum : public TensorCache
-{
-private:
- Node_UP _child;
- vespalib::string _dimension;
-
- static eval::ValueType infer_type(const eval::ValueType &child_type, const vespalib::string &dimension) {
- return child_type.remove_dimensions({dimension});
- }
-
-public:
- DimensionSum(Node_UP child, const vespalib::string &dimension)
- : TensorCache(infer_type(child->type(), dimension)),
- _child(std::move(child)), _dimension(dimension) {}
-
- Result eval(const Input &input) override {
- return store_tensor(eval_tensor(*_child, input).sum(_dimension));
- }
-};
-
-//-----------------------------------------------------------------------------
-
-/**
- * Apply a cell function to all cells in a tensor.
- **/
-class Apply : public TensorCache
-{
-private:
- Node_UP _child;
- size_t _cell_function_id;
-
- static eval::ValueType infer_type(const eval::ValueType &child_type) {
- if (child_type.is_tensor()) {
- return child_type;
- } else {
- return eval::ValueType::error_type();
- }
- }
-
-public:
- Apply(Node_UP child, size_t cell_function_id)
- : TensorCache(infer_type(child->type())),
- _child(std::move(child)), _cell_function_id(cell_function_id) {}
-
- Result eval(const Input &input) override {
- const auto &cell_function = input.get_cell_function(_cell_function_id);
- return store_tensor(eval_tensor(*_child, input).apply(cell_function));
- }
-};
-
-//-----------------------------------------------------------------------------
-
-/**
- * Add two tensors.
- **/
-class Add : public TensorCache
-{
-private:
- Node_UP _lhs;
- Node_UP _rhs;
-
- static eval::ValueType infer_type(const eval::ValueType &lhs_type, const eval::ValueType &rhs_type) {
- return lhs_type.add_dimensions_from(rhs_type);
- }
-
-public:
- Add(Node_UP lhs, Node_UP rhs)
- : TensorCache(infer_type(lhs->type(), rhs->type())),
- _lhs(std::move(lhs)), _rhs(std::move(rhs)) {}
-
- Result eval(const Input &input) override {
- return store_tensor(eval_tensor(*_lhs, input)
- .add(eval_tensor(*_rhs, input)));
- }
-};
-
-//-----------------------------------------------------------------------------
-
-/**
- * Subtract two tensors.
- **/
-class Subtract : public TensorCache
-{
-private:
- Node_UP _lhs;
- Node_UP _rhs;
-
- static eval::ValueType infer_type(const eval::ValueType &lhs_type, const eval::ValueType &rhs_type) {
- return lhs_type.add_dimensions_from(rhs_type);
- }
-
-public:
- Subtract(Node_UP lhs, Node_UP rhs)
- : TensorCache(infer_type(lhs->type(), rhs->type())),
- _lhs(std::move(lhs)), _rhs(std::move(rhs)) {}
-
- Result eval(const Input &input) override {
- return store_tensor(eval_tensor(*_lhs, input)
- .subtract(eval_tensor(*_rhs, input)));
- }
-};
-
-//-----------------------------------------------------------------------------
-
-/**
- * Multiply two tensors.
- **/
-class Multiply : public TensorCache
-{
-private:
- Node_UP _lhs;
- Node_UP _rhs;
-
- static eval::ValueType infer_type(const eval::ValueType &lhs_type, const eval::ValueType &rhs_type) {
- return lhs_type.add_dimensions_from(rhs_type);
- }
-
-public:
- Multiply(Node_UP lhs, Node_UP rhs)
- : TensorCache(infer_type(lhs->type(), rhs->type())),
- _lhs(std::move(lhs)), _rhs(std::move(rhs)) {}
-
- Result eval(const Input &input) override {
- return store_tensor(eval_tensor(*_lhs, input)
- .multiply(eval_tensor(*_rhs, input)));
- }
-};
-
-//-----------------------------------------------------------------------------
-
-/**
- * Cellwise min between two tensors.
- **/
-class Min : public TensorCache
-{
-private:
- Node_UP _lhs;
- Node_UP _rhs;
-
- static eval::ValueType infer_type(const eval::ValueType &lhs_type, const eval::ValueType &rhs_type) {
- return lhs_type.add_dimensions_from(rhs_type);
- }
-
-public:
- Min(Node_UP lhs, Node_UP rhs)
- : TensorCache(infer_type(lhs->type(), rhs->type())),
- _lhs(std::move(lhs)), _rhs(std::move(rhs)) {}
-
- Result eval(const Input &input) override {
- return store_tensor(eval_tensor(*_lhs, input)
- .min(eval_tensor(*_rhs, input)));
- }
-};
-
-//-----------------------------------------------------------------------------
-
-/**
- * Cellwise max between two tensors.
- **/
-class Max : public TensorCache
-{
-private:
- Node_UP _lhs;
- Node_UP _rhs;
-
- static eval::ValueType infer_type(const eval::ValueType &lhs_type, const eval::ValueType &rhs_type) {
- return lhs_type.add_dimensions_from(rhs_type);
- }
-
-public:
- Max(Node_UP lhs, Node_UP rhs)
- : TensorCache(infer_type(lhs->type(), rhs->type())),
- _lhs(std::move(lhs)), _rhs(std::move(rhs)) {}
-
- Result eval(const Input &input) override {
- return store_tensor(eval_tensor(*_lhs, input)
- .max(eval_tensor(*_rhs, input)));
- }
-};
-
-//-----------------------------------------------------------------------------
-
-/**
- * Match two tensors.
- **/
-class Match : public TensorCache
-{
-private:
- Node_UP _lhs;
- Node_UP _rhs;
-
- static eval::ValueType infer_type(const eval::ValueType &lhs_type, const eval::ValueType &rhs_type) {
- return lhs_type.keep_dimensions_in(rhs_type);
- }
-
-public:
- Match(Node_UP lhs, Node_UP rhs)
- : TensorCache(infer_type(lhs->type(), rhs->type())),
- _lhs(std::move(lhs)), _rhs(std::move(rhs)) {}
-
- Result eval(const Input &input) override {
- return store_tensor(eval_tensor(*_lhs, input)
- .match(eval_tensor(*_rhs, input)));
- }
-};
-
-//-----------------------------------------------------------------------------
-
-} // namespace vespalib::tensor::function::<unnamed>
-
-Node_UP input(const eval::ValueType &type, size_t tensor_id) {
- return std::make_unique<InputTensor>(type, tensor_id);
-}
-
-Node_UP sum(Node_UP child) {
- return std::make_unique<Sum>(std::move(child));
-}
-
-Node_UP dimension_sum(Node_UP child, const vespalib::string &dimension) {
- return std::make_unique<DimensionSum>(std::move(child), dimension);
-}
-
-Node_UP apply(Node_UP child, size_t cell_function_id) {
- return std::make_unique<Apply>(std::move(child), cell_function_id);
-}
-
-Node_UP add(Node_UP lhs, Node_UP rhs) {
- return std::make_unique<Add>(std::move(lhs), std::move(rhs));
-}
-
-Node_UP subtract(Node_UP lhs, Node_UP rhs) {
- return std::make_unique<Subtract>(std::move(lhs), std::move(rhs));
-}
-
-Node_UP multiply(Node_UP lhs, Node_UP rhs) {
- return std::make_unique<Multiply>(std::move(lhs), std::move(rhs));
-}
-
-Node_UP min(Node_UP lhs, Node_UP rhs) {
- return std::make_unique<Min>(std::move(lhs), std::move(rhs));
-}
-
-Node_UP max(Node_UP lhs, Node_UP rhs) {
- return std::make_unique<Max>(std::move(lhs), std::move(rhs));
-}
-
-Node_UP match(Node_UP lhs, Node_UP rhs) {
- return std::make_unique<Match>(std::move(lhs), std::move(rhs));
-}
-
-} // namespace vespalib::tensor::function
-} // namespace vespalib::tensor
-} // namespace vespalib
diff --git a/vespalib/src/vespa/vespalib/tensor/tensor_function.h b/vespalib/src/vespa/vespalib/tensor/tensor_function.h
deleted file mode 100644
index f47c33adcbe..00000000000
--- a/vespalib/src/vespa/vespalib/tensor/tensor_function.h
+++ /dev/null
@@ -1,110 +0,0 @@
-// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-
-#pragma once
-
-#include "tensor.h"
-#include <vespa/vespalib/eval/value_type.h>
-#include <memory>
-
-namespace vespalib {
-namespace tensor {
-
-//-----------------------------------------------------------------------------
-
-/**
- * A tensor function that can be evaluated. A TensorFunction will
- * typically be produced by an implementation-specific compile step
- * that takes an implementation-independent intermediate
- * representation of the tensor function as input (tree of
- * function::Node objects).
- **/
-struct TensorFunction
-{
- typedef std::unique_ptr<TensorFunction> UP;
-
- /**
- * A tensor function will evaluate to either a tensor or a double
- * value. The result type indicated by the tensor function
- * intermediate representation will indicate which form is valid.
- **/
- union Result {
- double as_double;
- Tensor::CREF as_tensor;
- Result(const Result &rhs) { memcpy(this, &rhs, sizeof(Result)); }
- Result(double value) : as_double(value) {}
- Result(const Tensor &value) : as_tensor(value) {}
- ~Result() {}
- };
-
- /**
- * Interface used to obtain input to a tensor function.
- **/
- struct Input {
- virtual const Tensor &get_tensor(size_t id) const = 0;
- virtual const CellFunction &get_cell_function(size_t id) const = 0;
- virtual ~Input() {}
- };
-
- /**
- * Evaluate this tensor function based on the given input. This
- * function is defined as non-const because it will return tensors
- * by reference. Intermediate results are typically kept alive
- * until the next time eval is called. The return value must
- * conform to the result type indicated by the intermediate
- * representation describing this tensor function.
- *
- * @return result of evaluating this tensor function
- * @param input external stuff needed to evaluate this function
- **/
- virtual Result eval(const Input &input) = 0;
-
- virtual ~TensorFunction() {}
-};
-
-//-----------------------------------------------------------------------------
-
-namespace function {
-
-/**
- * Interface used to describe a tensor function as a tree of nodes
- * with information about operation sequencing and intermediate result
- * types. Each node in the tree will describe a single tensor
- * operation. This is the intermediate representation of a tensor
- * function.
- *
- * Since tensor operations currently are part of the tensor interface,
- * the intermediate representation of a tensor function can also be
- * used to evaluate the tensor function by performing the appropriate
- * operations directly on the input tensors. In other words, the
- * intermediate representation 'compiles to itself'.
- **/
-struct Node : public TensorFunction
-{
- /**
- * The result type of the tensor operation represented by this
- * Node.
- *
- * @return tensor operation result type.
- **/
- virtual const eval::ValueType &type() const = 0;
-};
-
-using Node_UP = std::unique_ptr<Node>;
-
-Node_UP input(const eval::ValueType &type, size_t tensor_id);
-Node_UP sum(Node_UP child);
-Node_UP dimension_sum(Node_UP child, const vespalib::string &dimension);
-Node_UP apply(Node_UP child, size_t cell_function_id);
-Node_UP add(Node_UP lhs, Node_UP rhs);
-Node_UP subtract(Node_UP lhs, Node_UP rhs);
-Node_UP multiply(Node_UP lhs, Node_UP rhs);
-Node_UP min(Node_UP lhs, Node_UP rhs);
-Node_UP max(Node_UP lhs, Node_UP rhs);
-Node_UP match(Node_UP lhs, Node_UP rhs);
-
-} // namespace vespalib::tensor::function
-
-//-----------------------------------------------------------------------------
-
-} // namespace vespalib::tensor
-} // namespace vespalib