summaryrefslogtreecommitdiffstats
path: root/eval
diff options
context:
space:
mode:
authorHåvard Pettersen <havardpe@oath.com>2017-11-06 15:27:58 +0000
committerHåvard Pettersen <havardpe@oath.com>2017-11-07 14:47:34 +0000
commit0970129d98a386753e2fa24c559c77392691c633 (patch)
treeaea271d8b97ff24fb3f4020b09d26901b978ddba /eval
parentf5957dbf63a5fcd7df5df9062ef0324a52ed8605 (diff)
clean up tensor engine API
make Tensor a subclass of Value
Diffstat (limited to 'eval')
-rw-r--r--eval/src/apps/eval_expr/eval_expr.cpp2
-rw-r--r--eval/src/apps/tensor_conformance/tensor_conformance.cpp51
-rw-r--r--eval/src/tests/eval/interpreted_function/interpreted_function_test.cpp14
-rw-r--r--eval/src/tests/eval/simple_tensor/simple_tensor_test.cpp18
-rw-r--r--eval/src/tests/eval/tensor_function/tensor_function_test.cpp65
-rw-r--r--eval/src/tests/eval/value_cache/tensor_loader_test.cpp61
-rw-r--r--eval/src/tests/tensor/dense_dot_product_function/dense_dot_product_function_test.cpp12
-rw-r--r--eval/src/tests/tensor/tensor_mapper/tensor_mapper_test.cpp5
-rw-r--r--eval/src/tests/tensor/tensor_performance/tensor_performance_test.cpp16
-rw-r--r--eval/src/vespa/eval/eval/interpreted_function.cpp3
-rw-r--r--eval/src/vespa/eval/eval/simple_tensor.cpp6
-rw-r--r--eval/src/vespa/eval/eval/simple_tensor.h2
-rw-r--r--eval/src/vespa/eval/eval/simple_tensor_engine.cpp116
-rw-r--r--eval/src/vespa/eval/eval/simple_tensor_engine.h10
-rw-r--r--eval/src/vespa/eval/eval/tensor.cpp2
-rw-r--r--eval/src/vespa/eval/eval/tensor.h6
-rw-r--r--eval/src/vespa/eval/eval/tensor_engine.h20
-rw-r--r--eval/src/vespa/eval/eval/tensor_function.cpp16
-rw-r--r--eval/src/vespa/eval/eval/test/tensor_conformance.cpp111
-rw-r--r--eval/src/vespa/eval/eval/value.cpp15
-rw-r--r--eval/src/vespa/eval/eval/value.h33
-rw-r--r--eval/src/vespa/eval/eval/value_cache/constant_tensor_loader.cpp9
-rw-r--r--eval/src/vespa/eval/eval/value_cache/constant_value.h20
-rw-r--r--eval/src/vespa/eval/tensor/default_tensor_engine.cpp90
-rw-r--r--eval/src/vespa/eval/tensor/default_tensor_engine.h12
-rw-r--r--eval/src/vespa/eval/tensor/dense/dense_tensor.cpp2
-rw-r--r--eval/src/vespa/eval/tensor/dense/dense_tensor_apply.hpp4
-rw-r--r--eval/src/vespa/eval/tensor/dense/dense_tensor_cells_iterator.h2
-rw-r--r--eval/src/vespa/eval/tensor/dense/dense_tensor_reduce.hpp2
-rw-r--r--eval/src/vespa/eval/tensor/dense/dense_tensor_view.cpp14
-rw-r--r--eval/src/vespa/eval/tensor/dense/dense_tensor_view.h2
-rw-r--r--eval/src/vespa/eval/tensor/dense/mutable_dense_tensor_view.cpp4
-rw-r--r--eval/src/vespa/eval/tensor/dense/mutable_dense_tensor_view.h2
-rw-r--r--eval/src/vespa/eval/tensor/serialization/dense_binary_format.cpp4
-rw-r--r--eval/src/vespa/eval/tensor/sparse/direct_sparse_tensor_builder.h2
-rw-r--r--eval/src/vespa/eval/tensor/sparse/sparse_tensor.h2
-rw-r--r--eval/src/vespa/eval/tensor/sparse/sparse_tensor_apply.hpp2
-rw-r--r--eval/src/vespa/eval/tensor/sparse/sparse_tensor_match.cpp10
-rw-r--r--eval/src/vespa/eval/tensor/sparse/sparse_tensor_reduce.hpp6
-rw-r--r--eval/src/vespa/eval/tensor/tensor.h1
-rw-r--r--eval/src/vespa/eval/tensor/tensor_apply.cpp2
-rw-r--r--eval/src/vespa/eval/tensor/tensor_mapper.cpp2
-rw-r--r--eval/src/vespa/eval/tensor/tensor_operation.h6
-rw-r--r--eval/src/vespa/eval/tensor/wrapped_simple_tensor.cpp2
44 files changed, 352 insertions, 434 deletions
diff --git a/eval/src/apps/eval_expr/eval_expr.cpp b/eval/src/apps/eval_expr/eval_expr.cpp
index 2e1f7f7fdcb..71c808174b8 100644
--- a/eval/src/apps/eval_expr/eval_expr.cpp
+++ b/eval/src/apps/eval_expr/eval_expr.cpp
@@ -26,7 +26,7 @@ int main(int argc, char **argv) {
if (result.is_double()) {
fprintf(stdout, "%.32g\n", result.as_double());
} else if (result.is_tensor()) {
- vespalib::string str = SimpleTensorEngine::ref().to_spec(*result.as_tensor()).to_string();
+ vespalib::string str = SimpleTensorEngine::ref().to_spec(result).to_string();
fprintf(stdout, "%s\n", str.c_str());
} else {
fprintf(stdout, "error\n");
diff --git a/eval/src/apps/tensor_conformance/tensor_conformance.cpp b/eval/src/apps/tensor_conformance/tensor_conformance.cpp
index d1163fb579d..616b98f0809 100644
--- a/eval/src/apps/tensor_conformance/tensor_conformance.cpp
+++ b/eval/src/apps/tensor_conformance/tensor_conformance.cpp
@@ -60,69 +60,42 @@ nbostream extract_data(const Inspector &value) {
//-----------------------------------------------------------------------------
-TensorSpec to_spec(const Value &value) {
- if (value.is_error()) {
- return TensorSpec("error");
- } else if (value.is_double()) {
- return TensorSpec("double").add({}, value.as_double());
- } else {
- ASSERT_TRUE(value.is_tensor());
- auto tensor = value.as_tensor();
- return tensor->engine().to_spec(*tensor);
- }
-}
-
-const Value &to_value(const TensorSpec &spec, const TensorEngine &engine, Stash &stash) {
- if (spec.type() == "error") {
- return stash.create<ErrorValue>();
- } else if (spec.type() == "double") {
- double value = 0.0;
- for (const auto &cell: spec.cells()) {
- value += cell.second;
- }
- return stash.create<DoubleValue>(value);
- } else {
- ASSERT_TRUE(starts_with(spec.type(), "tensor("));
- return stash.create<TensorValue>(engine.create(spec));
- }
-}
-
void insert_value(Cursor &cursor, const vespalib::string &name, const TensorSpec &spec) {
- Stash stash;
nbostream data;
- const Value &value = to_value(spec, SimpleTensorEngine::ref(), stash);
- SimpleTensorEngine::ref().encode(value, data, stash);
+ Value::UP value = SimpleTensorEngine::ref().from_spec(spec);
+ SimpleTensorEngine::ref().encode(*value, data);
cursor.setData(name, Memory(data.peek(), data.size()));
}
TensorSpec extract_value(const Inspector &inspector) {
- Stash stash;
nbostream data = extract_data(inspector);
- return to_spec(SimpleTensorEngine::ref().decode(data, stash));
+ const auto &engine = SimpleTensorEngine::ref();
+ return engine.to_spec(*engine.decode(data));
}
//-----------------------------------------------------------------------------
-std::vector<ValueType> get_types(const std::vector<Value::CREF> &param_values) {
+std::vector<ValueType> get_types(const std::vector<Value::UP> &param_values) {
std::vector<ValueType> param_types;
for (size_t i = 0; i < param_values.size(); ++i) {
- param_types.emplace_back(param_values[i].get().type());
+ param_types.emplace_back(param_values[i]->type());
}
return param_types;
}
TensorSpec eval_expr(const Inspector &test, const TensorEngine &engine, bool typed) {
- Stash stash;
Function fun = Function::parse(test["expression"].asString().make_string());
- std::vector<Value::CREF> param_values;
+ std::vector<Value::UP> param_values;
+ std::vector<Value::CREF> param_refs;
for (size_t i = 0; i < fun.num_params(); ++i) {
- param_values.emplace_back(to_value(extract_value(test["inputs"][fun.param_name(i)]), engine, stash));
+ param_values.emplace_back(engine.from_spec(extract_value(test["inputs"][fun.param_name(i)])));
+ param_refs.emplace_back(*param_values.back());
}
NodeTypes types = typed ? NodeTypes(fun, get_types(param_values)) : NodeTypes();
InterpretedFunction ifun(engine, fun, types);
InterpretedFunction::Context ctx(ifun);
- InterpretedFunction::SimpleObjectParams params(param_values);
- return to_spec(ifun.eval(ctx, params));
+ InterpretedFunction::SimpleObjectParams params(param_refs);
+ return engine.to_spec(ifun.eval(ctx, params));
}
//-----------------------------------------------------------------------------
diff --git a/eval/src/tests/eval/interpreted_function/interpreted_function_test.cpp b/eval/src/tests/eval/interpreted_function/interpreted_function_test.cpp
index a443ccb3d01..0f62e69b081 100644
--- a/eval/src/tests/eval/interpreted_function/interpreted_function_test.cpp
+++ b/eval/src/tests/eval/interpreted_function/interpreted_function_test.cpp
@@ -181,9 +181,9 @@ TEST("require that dot product works with tensor function") {
InterpretedFunction interpreted(engine, function, types);
EXPECT_EQUAL(1u, interpreted.program_size());
InterpretedFunction::Context ctx(interpreted);
- TensorValue va(engine.create(a));
- TensorValue vb(engine.create(b));
- InterpretedFunction::SimpleObjectParams params({va,vb});
+ Value::UP va = engine.from_spec(a);
+ Value::UP vb = engine.from_spec(b);
+ InterpretedFunction::SimpleObjectParams params({*va,*vb});
const Value &result = interpreted.eval(ctx, params);
EXPECT_TRUE(result.is_double());
EXPECT_EQUAL(expect, result.as_double());
@@ -211,12 +211,12 @@ TEST("require that matrix multiplication works with tensor function") {
InterpretedFunction interpreted(engine, function, types);
EXPECT_EQUAL(1u, interpreted.program_size());
InterpretedFunction::Context ctx(interpreted);
- TensorValue va(engine.create(a));
- TensorValue vb(engine.create(b));
- InterpretedFunction::SimpleObjectParams params({va,vb});
+ Value::UP va = engine.from_spec(a);
+ Value::UP vb = engine.from_spec(b);
+ InterpretedFunction::SimpleObjectParams params({*va,*vb});
const Value &result = interpreted.eval(ctx, params);
ASSERT_TRUE(result.is_tensor());
- EXPECT_EQUAL(expect, engine.to_spec(*result.as_tensor()));
+ EXPECT_EQUAL(expect, engine.to_spec(result));
}
//-----------------------------------------------------------------------------
diff --git a/eval/src/tests/eval/simple_tensor/simple_tensor_test.cpp b/eval/src/tests/eval/simple_tensor/simple_tensor_test.cpp
index 150b86f27ce..c3b42124155 100644
--- a/eval/src/tests/eval/simple_tensor/simple_tensor_test.cpp
+++ b/eval/src/tests/eval/simple_tensor/simple_tensor_test.cpp
@@ -13,7 +13,7 @@ using Cells = SimpleTensor::Cells;
using Address = SimpleTensor::Address;
using Stash = vespalib::Stash;
-TensorSpec to_spec(const Tensor &a) { return a.engine().to_spec(a); }
+TensorSpec to_spec(const Value &a) { return SimpleTensorEngine::ref().to_spec(a); }
const Tensor &unwrap(const Value &value) {
ASSERT_TRUE(value.is_tensor());
@@ -35,7 +35,7 @@ TEST("require that simple tensors can be built using tensor spec") {
.add({{"w", "xxx"}, {"x", 0}, {"y", "yyy"}, {"z", 1}}, 2.0)
.add({{"w", "yyy"}, {"x", 1}, {"y", "xxx"}, {"z", 0}}, 3.0)
.add({{"w", "yyy"}, {"x", 1}, {"y", "yyy"}, {"z", 1}}, 4.0);
- auto tensor = SimpleTensorEngine::ref().create(spec);
+ Value::UP tensor = SimpleTensorEngine::ref().from_spec(spec);
TensorSpec full_spec("tensor(w{},x[2],y{},z[2])");
full_spec
.add({{"w", "xxx"}, {"x", 0}, {"y", "xxx"}, {"z", 0}}, 1.0)
@@ -54,7 +54,7 @@ TEST("require that simple tensors can be built using tensor spec") {
.add({{"w", "yyy"}, {"x", 1}, {"y", "xxx"}, {"z", 1}}, 0.0)
.add({{"w", "yyy"}, {"x", 1}, {"y", "yyy"}, {"z", 0}}, 0.0)
.add({{"w", "yyy"}, {"x", 1}, {"y", "yyy"}, {"z", 1}}, 4.0);
- auto full_tensor = SimpleTensorEngine::ref().create(full_spec);
+ Value::UP full_tensor = SimpleTensorEngine::ref().from_spec(full_spec);
EXPECT_EQUAL(full_spec, to_spec(*tensor));
EXPECT_EQUAL(full_spec, to_spec(*full_tensor));
};
@@ -73,7 +73,7 @@ TEST("require that simple tensors can have their values negated") {
auto result = tensor->map([](double a){ return -a; });
EXPECT_EQUAL(to_spec(*expect), to_spec(*result));
Stash stash;
- const Value &result2 = SimpleTensorEngine::ref().map(TensorValue(*tensor), operation::Neg::f, stash);
+ const Value &result2 = SimpleTensorEngine::ref().map(*tensor, operation::Neg::f, stash);
EXPECT_EQUAL(to_spec(*expect), to_spec(unwrap(result2)));
}
@@ -98,7 +98,7 @@ TEST("require that simple tensors can be multiplied with each other") {
auto result = SimpleTensor::join(*lhs, *rhs, [](double a, double b){ return (a * b); });
EXPECT_EQUAL(to_spec(*expect), to_spec(*result));
Stash stash;
- const Value &result2 = SimpleTensorEngine::ref().join(TensorValue(*lhs), TensorValue(*rhs), operation::Mul::f, stash);
+ const Value &result2 = SimpleTensorEngine::ref().join(*lhs, *rhs, operation::Mul::f, stash);
EXPECT_EQUAL(to_spec(*expect), to_spec(unwrap(result2)));
}
@@ -129,10 +129,10 @@ TEST("require that simple tensors support dimension reduction") {
EXPECT_EQUAL(to_spec(*expect_sum_y), to_spec(*result_sum_y));
EXPECT_EQUAL(to_spec(*expect_sum_x), to_spec(*result_sum_x));
EXPECT_EQUAL(to_spec(*expect_sum_all), to_spec(*result_sum_all));
- const Value &result_sum_y_2 = SimpleTensorEngine::ref().reduce(TensorValue(*tensor), Aggr::SUM, {"y"}, stash);
- const Value &result_sum_x_2 = SimpleTensorEngine::ref().reduce(TensorValue(*tensor), Aggr::SUM, {"x"}, stash);
- const Value &result_sum_all_2 = SimpleTensorEngine::ref().reduce(TensorValue(*tensor), Aggr::SUM, {"x", "y"}, stash);
- const Value &result_sum_all_3 = SimpleTensorEngine::ref().reduce(TensorValue(*tensor), Aggr::SUM, {}, stash);
+ const Value &result_sum_y_2 = SimpleTensorEngine::ref().reduce(*tensor, Aggr::SUM, {"y"}, stash);
+ const Value &result_sum_x_2 = SimpleTensorEngine::ref().reduce(*tensor, Aggr::SUM, {"x"}, stash);
+ const Value &result_sum_all_2 = SimpleTensorEngine::ref().reduce(*tensor, Aggr::SUM, {"x", "y"}, stash);
+ const Value &result_sum_all_3 = SimpleTensorEngine::ref().reduce(*tensor, Aggr::SUM, {}, stash);
EXPECT_EQUAL(to_spec(*expect_sum_y), to_spec(unwrap(result_sum_y_2)));
EXPECT_EQUAL(to_spec(*expect_sum_x), to_spec(unwrap(result_sum_x_2)));
EXPECT_TRUE(result_sum_all_2.is_double());
diff --git a/eval/src/tests/eval/tensor_function/tensor_function_test.cpp b/eval/src/tests/eval/tensor_function/tensor_function_test.cpp
index 8bd86621bf6..150ec8a25db 100644
--- a/eval/src/tests/eval/tensor_function/tensor_function_test.cpp
+++ b/eval/src/tests/eval/tensor_function/tensor_function_test.cpp
@@ -19,9 +19,9 @@ struct EvalCtx : TensorFunction::Input {
std::map<size_t, Value::UP> tensors;
EvalCtx(const TensorEngine &engine_in)
: engine(engine_in), stash(), error(), tensors() {}
- ~EvalCtx() { }
- void add_tensor(std::unique_ptr<Tensor> tensor, size_t id) {
- tensors.emplace(id, std::make_unique<TensorValue>(std::move(tensor)));
+ ~EvalCtx() {}
+ void add_tensor(Value::UP tensor, size_t id) {
+ tensors.emplace(id, std::move(tensor));
}
const Value &get_tensor(size_t id) const override {
if (tensors.count(id) == 0) {
@@ -30,20 +30,19 @@ struct EvalCtx : TensorFunction::Input {
return *tensors.find(id)->second;
}
const Value &eval(const TensorFunction &fun) { return fun.eval(*this, stash); }
- const ValueType type(const Tensor &tensor) const { return engine.type_of(tensor); }
TensorFunction::UP compile(tensor_function::Node_UP expr) const {
return engine.compile(std::move(expr));
}
- std::unique_ptr<Tensor> make_tensor_inject() {
- return engine.create(
+ Value::UP make_tensor_inject() {
+ return engine.from_spec(
TensorSpec("tensor(x[2],y[2])")
.add({{"x", 0}, {"y", 0}}, 1.0)
.add({{"x", 0}, {"y", 1}}, 2.0)
.add({{"x", 1}, {"y", 0}}, 3.0)
.add({{"x", 1}, {"y", 1}}, 4.0));
}
- std::unique_ptr<Tensor> make_tensor_reduce_input() {
- return engine.create(
+ Value::UP make_tensor_reduce_input() {
+ return engine.from_spec(
TensorSpec("tensor(x[3],y[2])")
.add({{"x",0},{"y",0}}, 1)
.add({{"x",1},{"y",0}}, 2)
@@ -52,43 +51,43 @@ struct EvalCtx : TensorFunction::Input {
.add({{"x",1},{"y",1}}, 5)
.add({{"x",2},{"y",1}}, 6));
}
- std::unique_ptr<Tensor> make_tensor_reduce_y_output() {
- return engine.create(
+ Value::UP make_tensor_reduce_y_output() {
+ return engine.from_spec(
TensorSpec("tensor(x[3])")
.add({{"x",0}}, 5)
.add({{"x",1}}, 7)
.add({{"x",2}}, 9));
}
- std::unique_ptr<Tensor> make_tensor_map_input() {
- return engine.create(
+ Value::UP make_tensor_map_input() {
+ return engine.from_spec(
TensorSpec("tensor(x{},y{})")
.add({{"x","1"},{"y","1"}}, 1)
.add({{"x","2"},{"y","1"}}, -3)
.add({{"x","1"},{"y","2"}}, 5));
}
- std::unique_ptr<Tensor> make_tensor_map_output() {
- return engine.create(
+ Value::UP make_tensor_map_output() {
+ return engine.from_spec(
TensorSpec("tensor(x{},y{})")
.add({{"x","1"},{"y","1"}}, -1)
.add({{"x","2"},{"y","1"}}, 3)
.add({{"x","1"},{"y","2"}}, -5));
}
- std::unique_ptr<Tensor> make_tensor_apply_lhs() {
- return engine.create(
+ Value::UP make_tensor_apply_lhs() {
+ return engine.from_spec(
TensorSpec("tensor(x{},y{})")
.add({{"x","1"},{"y","1"}}, 1)
.add({{"x","2"},{"y","1"}}, 3)
.add({{"x","1"},{"y","2"}}, 5));
}
- std::unique_ptr<Tensor> make_tensor_apply_rhs() {
- return engine.create(
+ Value::UP make_tensor_apply_rhs() {
+ return engine.from_spec(
TensorSpec("tensor(y{},z{})")
.add({{"y","1"},{"z","1"}}, 7)
.add({{"y","2"},{"z","1"}}, 11)
.add({{"y","1"},{"z","2"}}, 13));
}
- std::unique_ptr<Tensor> make_tensor_apply_output() {
- return engine.create(
+ Value::UP make_tensor_apply_output() {
+ return engine.from_spec(
TensorSpec("tensor(x{},y{},z{})")
.add({{"x","1"},{"y","1"},{"z","1"}}, 7)
.add({{"x","1"},{"y","1"},{"z","2"}}, 13)
@@ -98,21 +97,23 @@ struct EvalCtx : TensorFunction::Input {
}
};
-void verify_equal(const Tensor &expect, const Value &value) {
+void verify_equal(const Value &expect, const Value &value) {
const Tensor *tensor = value.as_tensor();
ASSERT_TRUE(tensor != nullptr);
- ASSERT_EQUAL(&expect.engine(), &tensor->engine());
- auto expect_spec = expect.engine().to_spec(expect);
- auto value_spec = tensor->engine().to_spec(*tensor);
+ const Tensor *expect_tensor = expect.as_tensor();
+ ASSERT_TRUE(expect_tensor != nullptr);
+ ASSERT_EQUAL(&expect_tensor->engine(), &tensor->engine());
+ auto expect_spec = expect_tensor->engine().to_spec(expect);
+ auto value_spec = tensor->engine().to_spec(value);
EXPECT_EQUAL(expect_spec, value_spec);
}
TEST("require that tensor injection works") {
EvalCtx ctx(SimpleTensorEngine::ref());
ctx.add_tensor(ctx.make_tensor_inject(), 1);
- auto expect = ctx.make_tensor_inject();
+ Value::UP expect = ctx.make_tensor_inject();
auto fun = inject(ValueType::from_spec("tensor(x[2],y[2])"), 1);
- EXPECT_EQUAL(ctx.type(*expect), fun->result_type);
+ EXPECT_EQUAL(expect->type(), fun->result_type);
auto prog = ctx.compile(std::move(fun));
TEST_DO(verify_equal(*expect, ctx.eval(*prog)));
}
@@ -120,9 +121,9 @@ TEST("require that tensor injection works") {
TEST("require that partial tensor reduction works") {
EvalCtx ctx(SimpleTensorEngine::ref());
ctx.add_tensor(ctx.make_tensor_reduce_input(), 1);
- auto expect = ctx.make_tensor_reduce_y_output();
+ Value::UP expect = ctx.make_tensor_reduce_y_output();
auto fun = reduce(inject(ValueType::from_spec("tensor(x[3],y[2])"), 1), Aggr::SUM, {"y"});
- EXPECT_EQUAL(ctx.type(*expect), fun->result_type);
+ EXPECT_EQUAL(expect->type(), fun->result_type);
auto prog = ctx.compile(std::move(fun));
TEST_DO(verify_equal(*expect, ctx.eval(*prog)));
}
@@ -139,9 +140,9 @@ TEST("require that full tensor reduction works") {
TEST("require that tensor map works") {
EvalCtx ctx(SimpleTensorEngine::ref());
ctx.add_tensor(ctx.make_tensor_map_input(), 1);
- auto expect = ctx.make_tensor_map_output();
+ Value::UP expect = ctx.make_tensor_map_output();
auto fun = map(inject(ValueType::from_spec("tensor(x{},y{})"), 1), operation::Neg::f);
- EXPECT_EQUAL(ctx.type(*expect), fun->result_type);
+ EXPECT_EQUAL(expect->type(), fun->result_type);
auto prog = ctx.compile(std::move(fun));
TEST_DO(verify_equal(*expect, ctx.eval(*prog)));
}
@@ -150,11 +151,11 @@ TEST("require that tensor join works") {
EvalCtx ctx(SimpleTensorEngine::ref());
ctx.add_tensor(ctx.make_tensor_apply_lhs(), 1);
ctx.add_tensor(ctx.make_tensor_apply_rhs(), 2);
- auto expect = ctx.make_tensor_apply_output();
+ Value::UP expect = ctx.make_tensor_apply_output();
auto fun = join(inject(ValueType::from_spec("tensor(x{},y{})"), 1),
inject(ValueType::from_spec("tensor(y{},z{})"), 2),
operation::Mul::f);
- EXPECT_EQUAL(ctx.type(*expect), fun->result_type);
+ EXPECT_EQUAL(expect->type(), fun->result_type);
auto prog = ctx.compile(std::move(fun));
TEST_DO(verify_equal(*expect, ctx.eval(*prog)));
}
diff --git a/eval/src/tests/eval/value_cache/tensor_loader_test.cpp b/eval/src/tests/eval/value_cache/tensor_loader_test.cpp
index ee8e502815f..5dd8caa6e27 100644
--- a/eval/src/tests/eval/value_cache/tensor_loader_test.cpp
+++ b/eval/src/tests/eval/value_cache/tensor_loader_test.cpp
@@ -3,59 +3,56 @@
#include <vespa/eval/eval/value_cache/constant_tensor_loader.h>
#include <vespa/eval/eval/simple_tensor_engine.h>
#include <vespa/eval/eval/tensor_spec.h>
+#include <vespa/eval/eval/tensor.h>
using namespace vespalib::eval;
-std::unique_ptr<Tensor> dense_tensor_nocells() {
- return SimpleTensorEngine::ref()
- .create(TensorSpec("tensor(x[2],y[2])"));
+TensorSpec sparse_tensor_nocells() {
+ return TensorSpec("tensor(x{},y{})");
}
-std::unique_ptr<Tensor> make_nodim_tensor() {
- return SimpleTensorEngine::ref()
- .create(TensorSpec("double"));
+TensorSpec make_dense_tensor() {
+ return TensorSpec("tensor(x[2],y[2])")
+ .add({{"x", 0}, {"y", 0}}, 1.0)
+ .add({{"x", 0}, {"y", 1}}, 2.0)
+ .add({{"x", 1}, {"y", 0}}, 3.0)
+ .add({{"x", 1}, {"y", 1}}, 4.0);
}
-std::unique_ptr<Tensor> make_dense_tensor() {
- return SimpleTensorEngine::ref()
- .create(TensorSpec("tensor(x[2],y[2])")
- .add({{"x", 0}, {"y", 0}}, 1.0)
- .add({{"x", 0}, {"y", 1}}, 2.0)
- .add({{"x", 1}, {"y", 0}}, 3.0)
- .add({{"x", 1}, {"y", 1}}, 4.0));
+TensorSpec make_sparse_tensor() {
+ return TensorSpec("tensor(x{},y{})")
+ .add({{"x", "foo"}, {"y", "bar"}}, 1.0)
+ .add({{"x", "bar"}, {"y", "foo"}}, 2.0);
}
-std::unique_ptr<Tensor> make_sparse_tensor() {
- return SimpleTensorEngine::ref()
- .create(TensorSpec("tensor(x{},y{})")
- .add({{"x", "foo"}, {"y", "bar"}}, 1.0)
- .add({{"x", "bar"}, {"y", "foo"}}, 2.0));
+TensorSpec make_mixed_tensor() {
+ return TensorSpec("tensor(x{},y[2])")
+ .add({{"x", "foo"}, {"y", 0}}, 1.0)
+ .add({{"x", "foo"}, {"y", 1}}, 2.0);
}
-std::unique_ptr<Tensor> make_mixed_tensor() {
- return SimpleTensorEngine::ref()
- .create(TensorSpec("tensor(x{},y[2])")
- .add({{"x", "foo"}, {"y", 0}}, 1.0)
- .add({{"x", "foo"}, {"y", 1}}, 2.0));
+void verify_tensor(const TensorSpec &expect, ConstantValue::UP actual) {
+ const auto &engine = SimpleTensorEngine::ref();
+ ASSERT_EQUAL(expect.type(), actual->type().to_spec());
+ ASSERT_TRUE(&engine == &actual->value().as_tensor()->engine());
+ EXPECT_EQUAL(expect, engine.to_spec(actual->value()));
}
-void verify_tensor(std::unique_ptr<Tensor> expect, ConstantValue::UP actual) {
- const auto &engine = expect->engine();
- ASSERT_EQUAL(engine.type_of(*expect), actual->type());
- ASSERT_TRUE(&engine == &actual->value().as_tensor()->engine());
- EXPECT_EQUAL(engine.to_spec(*expect), engine.to_spec(*actual->value().as_tensor()));
+void verify_invalid(ConstantValue::UP actual) {
+ EXPECT_EQUAL(actual->type(), ValueType::double_type());
+ EXPECT_EQUAL(actual->value().as_double(), 0.0);
}
TEST_F("require that invalid types loads an empty double", ConstantTensorLoader(SimpleTensorEngine::ref())) {
- TEST_DO(verify_tensor(make_nodim_tensor(), f1.create(TEST_PATH("dense.json"), "invalid type spec")));
+ TEST_DO(verify_invalid(f1.create(TEST_PATH("dense.json"), "invalid type spec")));
}
TEST_F("require that invalid file name loads an empty tensor", ConstantTensorLoader(SimpleTensorEngine::ref())) {
- TEST_DO(verify_tensor(dense_tensor_nocells(), f1.create(TEST_PATH("missing_file.json"), "tensor(x[2],y[2])")));
+ TEST_DO(verify_tensor(sparse_tensor_nocells(), f1.create(TEST_PATH("missing_file.json"), "tensor(x{},y{})")));
}
TEST_F("require that invalid json loads an empty tensor", ConstantTensorLoader(SimpleTensorEngine::ref())) {
- TEST_DO(verify_tensor(dense_tensor_nocells(), f1.create(TEST_PATH("invalid.json"), "tensor(x[2],y[2])")));
+ TEST_DO(verify_tensor(sparse_tensor_nocells(), f1.create(TEST_PATH("invalid.json"), "tensor(x{},y{})")));
}
TEST_F("require that dense tensors can be loaded", ConstantTensorLoader(SimpleTensorEngine::ref())) {
@@ -75,7 +72,7 @@ TEST_F("require that lz4 compressed sparse tensor can be loaded", ConstantTensor
}
TEST_F("require that bad lz4 file fails to load creating empty result", ConstantTensorLoader(SimpleTensorEngine::ref())) {
- TEST_DO(verify_tensor(dense_tensor_nocells(), f1.create(TEST_PATH("bad_lz4.json.lz4"), "tensor(x[2],y[2])")));
+ TEST_DO(verify_tensor(sparse_tensor_nocells(), f1.create(TEST_PATH("bad_lz4.json.lz4"), "tensor(x{},y{})")));
}
TEST_MAIN() { TEST_RUN_ALL(); }
diff --git a/eval/src/tests/tensor/dense_dot_product_function/dense_dot_product_function_test.cpp b/eval/src/tests/tensor/dense_dot_product_function/dense_dot_product_function_test.cpp
index 17df3d21d0c..f272166fede 100644
--- a/eval/src/tests/tensor/dense_dot_product_function/dense_dot_product_function_test.cpp
+++ b/eval/src/tests/tensor/dense_dot_product_function/dense_dot_product_function_test.cpp
@@ -57,25 +57,19 @@ private:
tensor::Tensor::UP _rhsTensor;
const DenseTensor &_lhsDenseTensor;
const DenseTensor &_rhsDenseTensor;
- TensorValue _lhsValue;
- TensorValue _rhsValue;
public:
FunctionInput(size_t lhsNumCells, size_t rhsNumCells)
: _lhsTensor(makeTensor(lhsNumCells, 3.0)),
_rhsTensor(makeTensor(rhsNumCells, 5.0)),
_lhsDenseTensor(asDenseTensor(*_lhsTensor)),
- _rhsDenseTensor(asDenseTensor(*_rhsTensor)),
- _lhsValue(std::make_unique<DenseTensor>(_lhsDenseTensor.type(),
- _lhsDenseTensor.cells())),
- _rhsValue(std::make_unique<DenseTensor>(_rhsDenseTensor.type(),
- _rhsDenseTensor.cells()))
+ _rhsDenseTensor(asDenseTensor(*_rhsTensor))
{}
virtual const Value &get_tensor(size_t id) const override {
if (id == 0) {
- return _lhsValue;
+ return *_lhsTensor;
} else {
- return _rhsValue;
+ return *_rhsTensor;
}
}
double expectedDotProduct() const {
diff --git a/eval/src/tests/tensor/tensor_mapper/tensor_mapper_test.cpp b/eval/src/tests/tensor/tensor_mapper/tensor_mapper_test.cpp
index c26429f47e4..e369e09b99a 100644
--- a/eval/src/tests/tensor/tensor_mapper/tensor_mapper_test.cpp
+++ b/eval/src/tests/tensor/tensor_mapper/tensor_mapper_test.cpp
@@ -9,6 +9,7 @@
#include <vespa/eval/eval/simple_tensor.h>
using vespalib::eval::ValueType;
+using vespalib::eval::Value;
using vespalib::eval::TensorSpec;
using vespalib::eval::SimpleTensor;
using namespace vespalib::tensor;
@@ -21,8 +22,8 @@ void verify_wrapped(const TensorSpec &source, const vespalib::string &type, cons
}
void verify(const TensorSpec &source, const vespalib::string &type, const TensorSpec &expect) {
- auto tensor = DefaultTensorEngine::ref().create(source);
- const Tensor *tensor_impl = dynamic_cast<const Tensor *>(tensor.get());
+ Value::UP value = DefaultTensorEngine::ref().from_spec(source);
+ const Tensor *tensor_impl = dynamic_cast<const Tensor *>(value->as_tensor());
ASSERT_TRUE(tensor_impl);
TensorMapper mapper(ValueType::from_spec(type));
auto mapped = mapper.map(*tensor_impl);
diff --git a/eval/src/tests/tensor/tensor_performance/tensor_performance_test.cpp b/eval/src/tests/tensor/tensor_performance/tensor_performance_test.cpp
index 3daaa3f79b3..2ed0021b5c7 100644
--- a/eval/src/tests/tensor/tensor_performance/tensor_performance_test.cpp
+++ b/eval/src/tests/tensor/tensor_performance/tensor_performance_test.cpp
@@ -25,21 +25,12 @@ const vespalib::string matrix_product_expr = "reduce(reduce((query+documen
//-----------------------------------------------------------------------------
-Value::UP wrap(std::unique_ptr<eval::Tensor> tensor) {
- return Value::UP(new TensorValue(std::move(tensor)));
-}
-
-//-----------------------------------------------------------------------------
-
struct Params {
std::map<vespalib::string, Value::UP> map;
Params &add(const vespalib::string &name, Value::UP value) {
map.emplace(name, std::move(value));
return *this;
}
- Params &add(const vespalib::string &name, std::unique_ptr<eval::Tensor> value) {
- return add(name, wrap(std::move(value)));
- }
};
InterpretedFunction::SimpleObjectParams make_params(const Function &function, const Params &params)
@@ -49,7 +40,7 @@ InterpretedFunction::SimpleObjectParams make_params(const Function &function, co
for (size_t i = 0; i < function.num_params(); ++i) {
auto param = params.map.find(function.param_name(i));
ASSERT_TRUE(param != params.map.end());
- fun_params.params.push_back(*(param->second));
+ fun_params.params.push_back(*param->second);
}
return fun_params;
}
@@ -92,9 +83,8 @@ double benchmark_expression_us(const vespalib::string &expression, const Params
//-----------------------------------------------------------------------------
-tensor::Tensor::UP make_tensor(const TensorSpec &spec) {
- auto tensor = DefaultTensorEngine::ref().create(spec);
- return tensor::Tensor::UP(dynamic_cast<tensor::Tensor*>(tensor.release()));
+Value::UP make_tensor(TensorSpec spec) {
+ return DefaultTensorEngine::ref().from_spec(spec);
}
//-----------------------------------------------------------------------------
diff --git a/eval/src/vespa/eval/eval/interpreted_function.cpp b/eval/src/vespa/eval/eval/interpreted_function.cpp
index f99c4ace2dd..7e80a699107 100644
--- a/eval/src/vespa/eval/eval/interpreted_function.cpp
+++ b/eval/src/vespa/eval/eval/interpreted_function.cpp
@@ -309,8 +309,7 @@ struct ProgramBuilder : public NodeVisitor, public NodeTraverser {
}
spec.add(addr, fun(&params[0]));
} while (step_labels(params, type));
- auto tensor = tensor_engine.create(spec);
- make_const_op(node, stash.create<TensorValue>(std::move(tensor)));
+ make_const_op(node, *stash.create<Value::UP>(tensor_engine.from_spec(spec)));
}
void visit(const TensorConcat &node) override {
vespalib::string &dimension = stash.create<vespalib::string>(node.dimension());
diff --git a/eval/src/vespa/eval/eval/simple_tensor.cpp b/eval/src/vespa/eval/eval/simple_tensor.cpp
index e39e926708d..0e58d292334 100644
--- a/eval/src/vespa/eval/eval/simple_tensor.cpp
+++ b/eval/src/vespa/eval/eval/simple_tensor.cpp
@@ -604,7 +604,11 @@ SimpleTensor::rename(const std::vector<vespalib::string> &from, const std::vecto
std::unique_ptr<SimpleTensor>
SimpleTensor::create(const TensorSpec &spec)
{
- Builder builder(ValueType::from_spec(spec.type()));
+ ValueType my_type = ValueType::from_spec(spec.type());
+ if (my_type.is_error()) {
+ return std::make_unique<SimpleTensor>();
+ }
+ Builder builder(my_type);
for (const auto &cell: spec.cells()) {
builder.set(cell.first, cell.second);
}
diff --git a/eval/src/vespa/eval/eval/simple_tensor.h b/eval/src/vespa/eval/eval/simple_tensor.h
index 366796f00d8..45d1853824d 100644
--- a/eval/src/vespa/eval/eval/simple_tensor.h
+++ b/eval/src/vespa/eval/eval/simple_tensor.h
@@ -82,7 +82,7 @@ public:
explicit SimpleTensor(double value);
SimpleTensor(const ValueType &type_in, Cells cells_in);
double as_double() const final override;
- const ValueType &type() const { return _type; }
+ const ValueType &type() const override { return _type; }
const Cells &cells() const { return _cells; }
std::unique_ptr<SimpleTensor> map(map_fun_t function) const;
std::unique_ptr<SimpleTensor> reduce(Aggregator &aggr, const std::vector<vespalib::string> &dimensions) const;
diff --git a/eval/src/vespa/eval/eval/simple_tensor_engine.cpp b/eval/src/vespa/eval/eval/simple_tensor_engine.cpp
index 21498ca2ff1..2b3c5679488 100644
--- a/eval/src/vespa/eval/eval/simple_tensor_engine.cpp
+++ b/eval/src/vespa/eval/eval/simple_tensor_engine.cpp
@@ -10,7 +10,7 @@ namespace eval {
namespace {
-const SimpleTensor &to_simple(const Tensor &tensor) {
+const SimpleTensor &as_simple(const Tensor &tensor) {
assert(&tensor.engine() == &SimpleTensorEngine::ref());
return static_cast<const SimpleTensor&>(tensor);
}
@@ -20,98 +20,92 @@ const SimpleTensor &to_simple(const Value &value, Stash &stash) {
return stash.create<SimpleTensor>(value.as_double());
}
if (auto tensor = value.as_tensor()) {
- return to_simple(*tensor);
+ return as_simple(*tensor);
}
return stash.create<SimpleTensor>(); // error
}
+template <typename F>
+void with_simple(const Value &value, const F &f) {
+ if (value.is_double()) {
+ f(SimpleTensor(value.as_double()));
+ } else if (auto tensor = value.as_tensor()) {
+ f(as_simple(*tensor));
+ } else {
+ f(SimpleTensor());
+ }
+}
+
const Value &to_value(std::unique_ptr<SimpleTensor> tensor, Stash &stash) {
+ if (tensor->type().is_tensor()) {
+ return *stash.create<Value::UP>(std::move(tensor));
+ }
if (tensor->type().is_double()) {
- assert(tensor->cells().size() == 1u);
- return stash.create<DoubleValue>(tensor->cells()[0].value);
+ return stash.create<DoubleValue>(tensor->as_double());
}
+ assert(tensor->type().is_error());
+ return ErrorValue::instance;
+}
+
+Value::UP to_value(std::unique_ptr<SimpleTensor> tensor) {
if (tensor->type().is_tensor()) {
- return stash.create<TensorValue>(std::move(tensor));
+ return std::move(tensor);
+ }
+ if (tensor->type().is_double()) {
+ return std::make_unique<DoubleValue>(tensor->as_double());
}
assert(tensor->type().is_error());
- return stash.create<ErrorValue>();
+ return std::make_unique<ErrorValue>();
}
} // namespace vespalib::eval::<unnamed>
const SimpleTensorEngine SimpleTensorEngine::_engine;
-ValueType
-SimpleTensorEngine::type_of(const Tensor &tensor) const
-{
- return to_simple(tensor).type();
-}
-
-vespalib::string
-SimpleTensorEngine::to_string(const Tensor &tensor) const
-{
- const SimpleTensor &simple_tensor = to_simple(tensor);
- vespalib::string out = vespalib::make_string("simple(%s) {\n", simple_tensor.type().to_spec().c_str());
- for (const auto &cell: simple_tensor.cells()) {
- size_t n = 0;
- out.append(" [");
- for (const auto &label: cell.address) {
- if (n++) {
- out.append(",");
- }
- if (label.is_mapped()) {
- out.append(label.name);
- } else {
- out.append(vespalib::make_string("%zu", label.index));
- }
- }
- out.append(vespalib::make_string("]: %g\n", cell.value));
- }
- out.append("}");
- return out;
-}
+//-----------------------------------------------------------------------------
TensorSpec
-SimpleTensorEngine::to_spec(const Tensor &tensor) const
+SimpleTensorEngine::to_spec(const Value &value) const
{
- const SimpleTensor &simple_tensor = to_simple(tensor);
- ValueType type = simple_tensor.type();
- const auto &dimensions = type.dimensions();
- TensorSpec spec(type.to_spec());
- for (const auto &cell: simple_tensor.cells()) {
- TensorSpec::Address addr;
- assert(cell.address.size() == dimensions.size());
- for (size_t i = 0; i < cell.address.size(); ++i) {
- const auto &label = cell.address[i];
- if (label.is_mapped()) {
- addr.emplace(dimensions[i].name, TensorSpec::Label(label.name));
- } else {
- addr.emplace(dimensions[i].name, TensorSpec::Label(label.index));
- }
- }
- spec.add(addr, cell.value);
- }
+ TensorSpec spec(value.type().to_spec());
+ const auto &dimensions = value.type().dimensions();
+ with_simple(value, [&spec,&dimensions](const SimpleTensor &simple_tensor)
+ {
+ for (const auto &cell: simple_tensor.cells()) {
+ TensorSpec::Address addr;
+ assert(cell.address.size() == dimensions.size());
+ for (size_t i = 0; i < cell.address.size(); ++i) {
+ const auto &label = cell.address[i];
+ if (label.is_mapped()) {
+ addr.emplace(dimensions[i].name, TensorSpec::Label(label.name));
+ } else {
+ addr.emplace(dimensions[i].name, TensorSpec::Label(label.index));
+ }
+ }
+ spec.add(addr, cell.value);
+ }
+ });
return spec;
}
-std::unique_ptr<eval::Tensor>
-SimpleTensorEngine::create(const TensorSpec &spec) const
+Value::UP
+SimpleTensorEngine::from_spec(const TensorSpec &spec) const
{
- return SimpleTensor::create(spec);
+ return to_value(SimpleTensor::create(spec));
}
//-----------------------------------------------------------------------------
void
-SimpleTensorEngine::encode(const Value &value, nbostream &output, Stash &stash) const
+SimpleTensorEngine::encode(const Value &value, nbostream &output) const
{
- SimpleTensor::encode(to_simple(value, stash), output);
+ with_simple(value, [&output](const SimpleTensor &tensor) { SimpleTensor::encode(tensor, output); });
}
-const Value &
-SimpleTensorEngine::decode(nbostream &input, Stash &stash) const
+Value::UP
+SimpleTensorEngine::decode(nbostream &input) const
{
- return to_value(SimpleTensor::decode(input), stash);
+ return to_value(SimpleTensor::decode(input));
}
//-----------------------------------------------------------------------------
diff --git a/eval/src/vespa/eval/eval/simple_tensor_engine.h b/eval/src/vespa/eval/eval/simple_tensor_engine.h
index c751f2f6b49..4cfd389dfa9 100644
--- a/eval/src/vespa/eval/eval/simple_tensor_engine.h
+++ b/eval/src/vespa/eval/eval/simple_tensor_engine.h
@@ -19,14 +19,12 @@ private:
public:
static const TensorEngine &ref() { return _engine; };
- ValueType type_of(const Tensor &tensor) const override;
- vespalib::string to_string(const Tensor &tensor) const override;
- TensorSpec to_spec(const Tensor &tensor) const override;
+ TensorSpec to_spec(const Value &value) const override;
+ Value::UP from_spec(const TensorSpec &spec) const override;
- std::unique_ptr<Tensor> create(const TensorSpec &spec) const override;
+ void encode(const Value &value, nbostream &output) const override;
+ Value::UP decode(nbostream &input) const override;
- void encode(const Value &value, nbostream &output, Stash &stash) const override;
- const Value &decode(nbostream &input, Stash &stash) const override;
const Value &map(const Value &a, map_fun_t function, Stash &stash) const override;
const Value &join(const Value &a, const Value &b, join_fun_t function, Stash &stash) const override;
const Value &reduce(const Value &a, Aggr aggr, const std::vector<vespalib::string> &dimensions, Stash &stash) const override;
diff --git a/eval/src/vespa/eval/eval/tensor.cpp b/eval/src/vespa/eval/eval/tensor.cpp
index 926606f8e26..645208ba8fb 100644
--- a/eval/src/vespa/eval/eval/tensor.cpp
+++ b/eval/src/vespa/eval/eval/tensor.cpp
@@ -18,7 +18,7 @@ operator==(const Tensor &lhs, const Tensor &rhs)
std::ostream &
operator<<(std::ostream &out, const Tensor &tensor)
{
- out << tensor.engine().to_string(tensor);
+ out << tensor.engine().to_spec(tensor).to_string();
return out;
}
diff --git a/eval/src/vespa/eval/eval/tensor.h b/eval/src/vespa/eval/eval/tensor.h
index 57cd9abe1f5..149e2774bfb 100644
--- a/eval/src/vespa/eval/eval/tensor.h
+++ b/eval/src/vespa/eval/eval/tensor.h
@@ -3,6 +3,7 @@
#pragma once
#include "value_type.h"
+#include "value.h"
namespace vespalib {
namespace eval {
@@ -18,7 +19,7 @@ class TensorEngine;
* engine. TensorEngines should only have a single static instance per
* implementation.
**/
-class Tensor
+class Tensor : public Value
{
private:
const TensorEngine &_engine;
@@ -30,7 +31,8 @@ public:
Tensor(Tensor &&) = delete;
Tensor &operator=(const Tensor &) = delete;
Tensor &operator=(Tensor &&) = delete;
- virtual double as_double() const = 0;
+ bool is_tensor() const override { return true; }
+ const Tensor *as_tensor() const override { return this; }
const TensorEngine &engine() const { return _engine; }
virtual ~Tensor() {}
};
diff --git a/eval/src/vespa/eval/eval/tensor_engine.h b/eval/src/vespa/eval/eval/tensor_engine.h
index 00927f0c1b1..357b9c8f82f 100644
--- a/eval/src/vespa/eval/eval/tensor_engine.h
+++ b/eval/src/vespa/eval/eval/tensor_engine.h
@@ -32,25 +32,23 @@ class TensorSpec;
**/
struct TensorEngine
{
- using ValueType = eval::ValueType;
+ using Aggr = eval::Aggr;
using Tensor = eval::Tensor;
using TensorSpec = eval::TensorSpec;
using Value = eval::Value;
- using map_fun_t = double (*)(double);
+ using ValueType = eval::ValueType;
using join_fun_t = double (*)(double, double);
- using Aggr = eval::Aggr;
-
- virtual ValueType type_of(const Tensor &tensor) const = 0;
- virtual vespalib::string to_string(const Tensor &tensor) const = 0;
- virtual TensorSpec to_spec(const Tensor &tensor) const = 0;
+ using map_fun_t = double (*)(double);
virtual TensorFunction::UP compile(tensor_function::Node_UP expr) const { return std::move(expr); }
- virtual std::unique_ptr<Tensor> create(const TensorSpec &spec) const = 0;
-
// havardpe: new API, WIP
- virtual void encode(const Value &value, nbostream &output, Stash &stash) const = 0;
- virtual const Value &decode(nbostream &input, Stash &stash) const = 0;
+ virtual TensorSpec to_spec(const Value &value) const = 0;
+ virtual Value::UP from_spec(const TensorSpec &spec) const = 0;
+
+ virtual void encode(const Value &value, nbostream &output) const = 0;
+ virtual Value::UP decode(nbostream &input) const = 0;
+
virtual const Value &map(const Value &a, map_fun_t function, Stash &stash) const = 0;
virtual const Value &join(const Value &a, const Value &b, join_fun_t function, Stash &stash) const = 0;
virtual const Value &reduce(const Value &a, Aggr aggr, const std::vector<vespalib::string> &dimensions, Stash &stash) const = 0;
diff --git a/eval/src/vespa/eval/eval/tensor_function.cpp b/eval/src/vespa/eval/eval/tensor_function.cpp
index 0dcc930087f..ef91d9e10a9 100644
--- a/eval/src/vespa/eval/eval/tensor_function.cpp
+++ b/eval/src/vespa/eval/eval/tensor_function.cpp
@@ -5,11 +5,21 @@
#include "operation.h"
#include "tensor.h"
#include "tensor_engine.h"
+#include "simple_tensor_engine.h"
namespace vespalib {
namespace eval {
namespace tensor_function {
+const TensorEngine &infer_engine(const std::initializer_list<Value::CREF> &values) {
+ for (const Value &value: values) {
+ if (auto tensor = value.as_tensor()) {
+ return tensor->engine();
+ }
+ }
+ return SimpleTensorEngine::ref();
+}
+
void Inject::accept(TensorFunctionVisitor &visitor) const { visitor.visit(*this); }
void Reduce::accept(TensorFunctionVisitor &visitor) const { visitor.visit(*this); }
void Map ::accept(TensorFunctionVisitor &visitor) const { visitor.visit(*this); }
@@ -27,7 +37,7 @@ const Value &
Reduce::eval(const Input &input, Stash &stash) const
{
const Value &a = tensor->eval(input, stash);
- const TensorEngine &engine = a.as_tensor()->engine();
+ const TensorEngine &engine = infer_engine({a});
return engine.reduce(a, aggr, dimensions, stash);
}
@@ -35,7 +45,7 @@ const Value &
Map::eval(const Input &input, Stash &stash) const
{
const Value &a = tensor->eval(input, stash);
- const TensorEngine &engine = a.as_tensor()->engine();
+ const TensorEngine &engine = infer_engine({a});
return engine.map(a, function, stash);
}
@@ -44,7 +54,7 @@ Join::eval(const Input &input, Stash &stash) const
{
const Value &a = lhs_tensor->eval(input, stash);
const Value &b = rhs_tensor->eval(input, stash);
- const TensorEngine &engine = a.as_tensor()->engine();
+ const TensorEngine &engine = infer_engine({a,b});
return engine.join(a, b, function, stash);
}
diff --git a/eval/src/vespa/eval/eval/test/tensor_conformance.cpp b/eval/src/vespa/eval/eval/test/tensor_conformance.cpp
index e0a9f731804..c45e2df3432 100644
--- a/eval/src/vespa/eval/eval/test/tensor_conformance.cpp
+++ b/eval/src/vespa/eval/eval/test/tensor_conformance.cpp
@@ -38,18 +38,17 @@ struct Eval {
double _number;
TensorSpec _tensor;
public:
- Result(const Value &value) : _type(Type::ERROR), _number(error_value), _tensor("error") {
+ Result() : _type(Type::ERROR), _number(error_value), _tensor("error") {}
+ Result(const TensorEngine &engine, const Value &value) : _type(Type::ERROR), _number(error_value), _tensor("error") {
if (value.is_double()) {
_type = Type::NUMBER;
- _number = value.as_double();
- _tensor = TensorSpec("double").add({}, _number);
- } else if (value.is_tensor()) {
+ }
+ if (value.is_tensor()) {
+ EXPECT_TRUE(_type == Type::ERROR);
_type = Type::TENSOR;
- _tensor = value.as_tensor()->engine().to_spec(*value.as_tensor());
- if (_tensor.type() == "double") {
- _number = as_double(_tensor);
- }
}
+ _number = value.as_double();
+ _tensor = engine.to_spec(value);
}
bool is_error() const { return (_type == Type::ERROR); }
bool is_number() const { return (_type == Type::NUMBER); }
@@ -65,15 +64,15 @@ struct Eval {
};
virtual Result eval(const TensorEngine &) const {
TEST_ERROR("wrong signature");
- return Result(ErrorValue());
+ return Result();
}
virtual Result eval(const TensorEngine &, const TensorSpec &) const {
TEST_ERROR("wrong signature");
- return Result(ErrorValue());
+ return Result();
}
virtual Result eval(const TensorEngine &, const TensorSpec &, const TensorSpec &) const {
TEST_ERROR("wrong signature");
- return Result(ErrorValue());
+ return Result();
}
virtual ~Eval() {}
};
@@ -87,7 +86,7 @@ struct SafeEval : Eval {
return unsafe.eval(engine);
} catch (std::exception &e) {
TEST_ERROR(e.what());
- return Result(ErrorValue());
+ return Result();
}
}
Result eval(const TensorEngine &engine, const TensorSpec &a) const override {
@@ -95,7 +94,7 @@ struct SafeEval : Eval {
return unsafe.eval(engine, a);
} catch (std::exception &e) {
TEST_ERROR(e.what());
- return Result(ErrorValue());
+ return Result();
}
}
@@ -104,7 +103,7 @@ struct SafeEval : Eval {
return unsafe.eval(engine, a, b);
} catch (std::exception &e) {
TEST_ERROR(e.what());
- return Result(ErrorValue());
+ return Result();
}
}
};
@@ -125,7 +124,7 @@ struct Expr_V : Eval {
InterpretedFunction ifun(engine, fun, types);
InterpretedFunction::Context ctx(ifun);
InterpretedFunction::SimpleObjectParams params({});
- return Result(check_type(ifun.eval(ctx, params), types.get_type(fun.root())));
+ return Result(engine, check_type(ifun.eval(ctx, params), types.get_type(fun.root())));
}
};
@@ -139,9 +138,9 @@ struct Expr_T : Eval {
NodeTypes types(fun, {a_type});
InterpretedFunction ifun(engine, fun, types);
InterpretedFunction::Context ctx(ifun);
- TensorValue va(engine.create(a));
- InterpretedFunction::SimpleObjectParams params({va});
- return Result(check_type(ifun.eval(ctx, params), types.get_type(fun.root())));
+ Value::UP va = engine.from_spec(a);
+ InterpretedFunction::SimpleObjectParams params({*va});
+ return Result(engine, check_type(ifun.eval(ctx, params), types.get_type(fun.root())));
}
};
@@ -156,19 +155,15 @@ struct Expr_TT : Eval {
NodeTypes types(fun, {a_type, b_type});
InterpretedFunction ifun(engine, fun, types);
InterpretedFunction::Context ctx(ifun);
- TensorValue va(engine.create(a));
- TensorValue vb(engine.create(b));
- InterpretedFunction::SimpleObjectParams params({va,vb});
- return Result(check_type(ifun.eval(ctx, params), types.get_type(fun.root())));
+ Value::UP va = engine.from_spec(a);
+ Value::UP vb = engine.from_spec(b);
+ InterpretedFunction::SimpleObjectParams params({*va,*vb});
+ return Result(engine, check_type(ifun.eval(ctx, params), types.get_type(fun.root())));
}
};
const Value &make_value(const TensorEngine &engine, const TensorSpec &spec, Stash &stash) {
- if (spec.type() == "double") {
- double number = as_double(spec);
- return stash.create<DoubleValue>(number);
- }
- return stash.create<TensorValue>(engine.create(spec));
+ return *stash.create<Value::UP>(engine.from_spec(spec));
}
//-----------------------------------------------------------------------------
@@ -183,7 +178,7 @@ struct ImmediateReduce : Eval {
Result eval(const TensorEngine &engine, const TensorSpec &a) const override {
Stash stash;
const auto &lhs = make_value(engine, a, stash);
- return Result(engine.reduce(lhs, aggr, dimensions, stash));
+ return Result(engine, engine.reduce(lhs, aggr, dimensions, stash));
}
};
@@ -195,7 +190,7 @@ struct ImmediateMap : Eval {
Result eval(const TensorEngine &engine, const TensorSpec &a) const override {
Stash stash;
const auto &lhs = make_value(engine, a, stash);
- return Result(engine.map(lhs, function, stash));
+ return Result(engine, engine.map(lhs, function, stash));
}
};
@@ -208,7 +203,7 @@ struct ImmediateJoin : Eval {
Stash stash;
const auto &lhs = make_value(engine, a, stash);
const auto &rhs = make_value(engine, b, stash);
- return Result(engine.join(lhs, rhs, function, stash));
+ return Result(engine, engine.join(lhs, rhs, function, stash));
}
};
@@ -220,7 +215,7 @@ struct ImmediateConcat : Eval {
Stash stash;
const auto &lhs = make_value(engine, a, stash);
const auto &rhs = make_value(engine, b, stash);
- return Result(engine.concat(lhs, rhs, dimension, stash));
+ return Result(engine, engine.concat(lhs, rhs, dimension, stash));
}
};
@@ -233,7 +228,7 @@ struct ImmediateRename : Eval {
Result eval(const TensorEngine &engine, const TensorSpec &a) const override {
Stash stash;
const auto &lhs = make_value(engine, a, stash);
- return Result(engine.rename(lhs, from, to, stash));
+ return Result(engine, engine.rename(lhs, from, to, stash));
}
};
@@ -244,18 +239,18 @@ const size_t tensor_id_b = 12;
// input used when evaluating in retained mode
struct Input : TensorFunction::Input {
- std::vector<TensorValue> tensors;
- Input(std::unique_ptr<Tensor> a) : tensors() {
- tensors.emplace_back(std::move(a));
+ std::vector<Value::UP> tensors;
+ Input(Value::UP a) : tensors() {
+ tensors.push_back(std::move(a));
}
- Input(std::unique_ptr<Tensor> a, std::unique_ptr<Tensor> b) : tensors() {
- tensors.emplace_back(std::move(a));
- tensors.emplace_back(std::move(b));
+ Input(Value::UP a, Value::UP b) : tensors() {
+ tensors.push_back(std::move(a));
+ tensors.push_back(std::move(b));
}
const Value &get_tensor(size_t id) const override {
size_t offset = (id - tensor_id_a);
ASSERT_GREATER(tensors.size(), offset);
- return tensors[offset];
+ return *tensors[offset];
}
};
@@ -267,13 +262,13 @@ struct RetainedReduce : Eval {
RetainedReduce(Aggr aggr_in, const vespalib::string &dimension)
: aggr(aggr_in), dimensions({dimension}) {}
Result eval(const TensorEngine &engine, const TensorSpec &a) const override {
+ Stash stash;
auto a_type = ValueType::from_spec(a.type());
auto ir = tensor_function::reduce(tensor_function::inject(a_type, tensor_id_a), aggr, dimensions);
ValueType expect_type = ir->result_type;
auto fun = engine.compile(std::move(ir));
- Input input(engine.create(a));
- Stash stash;
- return Result(check_type(fun->eval(input, stash), expect_type));
+ Input input(engine.from_spec(a));
+ return Result(engine, check_type(fun->eval(input, stash), expect_type));
}
};
@@ -282,13 +277,13 @@ struct RetainedMap : Eval {
map_fun_t function;
RetainedMap(map_fun_t function_in) : function(function_in) {}
Result eval(const TensorEngine &engine, const TensorSpec &a) const override {
+ Stash stash;
auto a_type = ValueType::from_spec(a.type());
auto ir = tensor_function::map(tensor_function::inject(a_type, tensor_id_a), function);
ValueType expect_type = ir->result_type;
auto fun = engine.compile(std::move(ir));
- Input input(engine.create(a));
- Stash stash;
- return Result(check_type(fun->eval(input, stash), expect_type));
+ Input input(engine.from_spec(a));
+ return Result(engine, check_type(fun->eval(input, stash), expect_type));
}
};
@@ -297,6 +292,7 @@ struct RetainedJoin : Eval {
join_fun_t function;
RetainedJoin(join_fun_t function_in) : function(function_in) {}
Result eval(const TensorEngine &engine, const TensorSpec &a, const TensorSpec &b) const override {
+ Stash stash;
auto a_type = ValueType::from_spec(a.type());
auto b_type = ValueType::from_spec(b.type());
auto ir = tensor_function::join(tensor_function::inject(a_type, tensor_id_a),
@@ -304,9 +300,8 @@ struct RetainedJoin : Eval {
function);
ValueType expect_type = ir->result_type;
auto fun = engine.compile(std::move(ir));
- Input input(engine.create(a), engine.create(b));
- Stash stash;
- return Result(check_type(fun->eval(input, stash), expect_type));
+ Input input(engine.from_spec(a), engine.from_spec(b));
+ return Result(engine, check_type(fun->eval(input, stash), expect_type));
}
};
@@ -369,21 +364,15 @@ struct TestContext {
TestContext(const vespalib::string &module_path_in, const TensorEngine &engine_in)
: module_path(module_path_in), ref_engine(SimpleTensorEngine::ref()), engine(engine_in) {}
- std::unique_ptr<Tensor> tensor(const TensorSpec &spec) {
- auto result = engine.create(spec);
- EXPECT_EQUAL(spec.type(), engine.type_of(*result).to_spec());
- return result;
- }
-
//-------------------------------------------------------------------------
void verify_create_type(const vespalib::string &type_spec) {
- auto tensor = engine.create(TensorSpec(type_spec));
- EXPECT_TRUE(&engine == &tensor->engine());
- EXPECT_EQUAL(type_spec, engine.type_of(*tensor).to_spec());
+ Value::UP value = engine.from_spec(TensorSpec(type_spec));
+ EXPECT_EQUAL(type_spec, value->type().to_spec());
}
void test_tensor_create_type() {
+ TEST_DO(verify_create_type("error"));
TEST_DO(verify_create_type("double"));
TEST_DO(verify_create_type("tensor(x{})"));
TEST_DO(verify_create_type("tensor(x{},y{})"));
@@ -867,8 +856,8 @@ struct TestContext {
{
Stash stash;
nbostream data;
- encode_engine.encode(make_value(encode_engine, spec, stash), data, stash);
- TEST_DO(verify_result(Eval::Result(decode_engine.decode(data, stash)), spec));
+ encode_engine.encode(make_value(encode_engine, spec, stash), data);
+ TEST_DO(verify_result(Eval::Result(decode_engine, *decode_engine.decode(data)), spec));
}
void verify_encode_decode(const TensorSpec &spec) {
@@ -884,13 +873,13 @@ struct TestContext {
const Inspector &binary = test["binary"];
EXPECT_GREATER(binary.entries(), 0u);
nbostream encoded;
- engine.encode(make_value(engine, spec, stash), encoded, stash);
+ engine.encode(make_value(engine, spec, stash), encoded);
test.setData("encoded", Memory(encoded.peek(), encoded.size()));
bool matched_encode = false;
for (size_t i = 0; i < binary.entries(); ++i) {
nbostream data = extract_data(binary[i].asString());
matched_encode = (matched_encode || is_same(encoded, data));
- TEST_DO(verify_result(Eval::Result(engine.decode(data, stash)), spec));
+ TEST_DO(verify_result(Eval::Result(engine, *engine.decode(data)), spec));
EXPECT_EQUAL(data.size(), 0u);
}
EXPECT_TRUE(matched_encode);
diff --git a/eval/src/vespa/eval/eval/value.cpp b/eval/src/vespa/eval/eval/value.cpp
index 456d80c0ff0..4bfd758f9cd 100644
--- a/eval/src/vespa/eval/eval/value.cpp
+++ b/eval/src/vespa/eval/eval/value.cpp
@@ -6,19 +6,10 @@
namespace vespalib {
namespace eval {
-ErrorValue ErrorValue::instance;
+ValueType ErrorValue::_type = ValueType::error_type();
+const ErrorValue ErrorValue::instance;
-double
-TensorValue::as_double() const
-{
- return _tensor->as_double();
-}
-
-ValueType
-TensorValue::type() const
-{
- return _tensor->engine().type_of(*_tensor);
-}
+ValueType DoubleValue::_type = ValueType::double_type();
} // namespace vespalib::eval
} // namespace vespalib
diff --git a/eval/src/vespa/eval/eval/value.h b/eval/src/vespa/eval/eval/value.h
index 8826faed140..08ca9792739 100644
--- a/eval/src/vespa/eval/eval/value.h
+++ b/eval/src/vespa/eval/eval/value.h
@@ -5,7 +5,6 @@
#include <vespa/vespalib/stllike/string.h>
#include <memory>
#include <vespa/vespalib/util/stash.h>
-#include "tensor.h"
#include "value_type.h"
namespace vespalib {
@@ -25,43 +24,33 @@ struct Value {
virtual bool is_double() const { return false; }
virtual bool is_tensor() const { return false; }
virtual double as_double() const { return 0.0; }
- virtual bool as_bool() const { return false; }
+ bool as_bool() const { return (as_double() != 0.0); }
virtual const Tensor *as_tensor() const { return nullptr; }
- virtual ValueType type() const = 0;
+ virtual const ValueType &type() const = 0;
virtual ~Value() {}
};
-struct ErrorValue : public Value {
- static ErrorValue instance;
+class ErrorValue : public Value
+{
+private:
+ static ValueType _type;
+public:
+ static const ErrorValue instance;
bool is_error() const override { return true; }
double as_double() const override { return error_value; }
- ValueType type() const override { return ValueType::error_type(); }
+ const ValueType &type() const override { return _type; }
};
class DoubleValue : public Value
{
private:
double _value;
+ static ValueType _type;
public:
DoubleValue(double value) : _value(value) {}
bool is_double() const override { return true; }
double as_double() const override { return _value; }
- bool as_bool() const override { return (_value != 0.0); }
- ValueType type() const override { return ValueType::double_type(); }
-};
-
-class TensorValue : public Value
-{
-private:
- const Tensor *_tensor;
- std::unique_ptr<Tensor> _stored;
-public:
- TensorValue(const Tensor &value) : _tensor(&value), _stored() {}
- TensorValue(std::unique_ptr<Tensor> value) : _tensor(value.get()), _stored(std::move(value)) {}
- bool is_tensor() const override { return true; }
- double as_double() const override;
- const Tensor *as_tensor() const override { return _tensor; }
- ValueType type() const override;
+ const ValueType &type() const override { return _type; }
};
} // namespace vespalib::eval
diff --git a/eval/src/vespa/eval/eval/value_cache/constant_tensor_loader.cpp b/eval/src/vespa/eval/eval/value_cache/constant_tensor_loader.cpp
index f026ca060c6..38d5bbc643b 100644
--- a/eval/src/vespa/eval/eval/value_cache/constant_tensor_loader.cpp
+++ b/eval/src/vespa/eval/eval/value_cache/constant_tensor_loader.cpp
@@ -68,17 +68,13 @@ void decode_json(const vespalib::string &path, Slime &slime) {
} // namespace vespalib::eval::<unnamed>
-using ErrorConstant = SimpleConstantValue<ErrorValue>;
-using TensorConstant = SimpleConstantValue<TensorValue>;
-
ConstantValue::UP
ConstantTensorLoader::create(const vespalib::string &path, const vespalib::string &type) const
{
ValueType value_type = ValueType::from_spec(type);
if (value_type.is_error()) {
LOG(warning, "invalid type specification: %s", type.c_str());
- auto tensor = _engine.create(TensorSpec("double"));
- return std::make_unique<TensorConstant>(_engine.type_of(*tensor), std::move(tensor));
+ return std::make_unique<SimpleConstantValue>(_engine.from_spec(TensorSpec("double")));
}
Slime slime;
decode_json(path, slime);
@@ -96,8 +92,7 @@ ConstantTensorLoader::create(const vespalib::string &path, const vespalib::strin
cells[i]["address"].traverse(extractor);
spec.add(address, cells[i]["value"].asDouble());
}
- auto tensor = _engine.create(spec);
- return std::make_unique<TensorConstant>(_engine.type_of(*tensor), std::move(tensor));
+ return std::make_unique<SimpleConstantValue>(_engine.from_spec(spec));
}
} // namespace vespalib::eval
diff --git a/eval/src/vespa/eval/eval/value_cache/constant_value.h b/eval/src/vespa/eval/eval/value_cache/constant_value.h
index 462dc3ad9b4..ba7fe6fcf3d 100644
--- a/eval/src/vespa/eval/eval/value_cache/constant_value.h
+++ b/eval/src/vespa/eval/eval/value_cache/constant_value.h
@@ -21,19 +21,13 @@ struct ConstantValue {
virtual ~ConstantValue() {}
};
-/**
- * A simple implementation of a constant value that bundles together a
- * ValueType instance with a specific Value subclass instance.
- **/
-template <typename VALUE>
-struct SimpleConstantValue : ConstantValue {
- ValueType my_type;
- VALUE my_value;
- template <typename... Args>
- SimpleConstantValue(const ValueType &type_in, Args &&...args)
- : my_type(type_in), my_value(std::forward<Args>(args)...) {}
- const ValueType &type() const override { return my_type; }
- const Value &value() const override { return my_value; }
+class SimpleConstantValue : public ConstantValue {
+private:
+ const Value::UP _value;
+public:
+ SimpleConstantValue(Value::UP value) : _value(std::move(value)) {}
+ const ValueType &type() const override { return _value->type(); }
+ const Value &value() const override { return *_value; }
};
/**
diff --git a/eval/src/vespa/eval/tensor/default_tensor_engine.cpp b/eval/src/vespa/eval/tensor/default_tensor_engine.cpp
index 7adb95f69ca..9fcdfe36eba 100644
--- a/eval/src/vespa/eval/tensor/default_tensor_engine.cpp
+++ b/eval/src/vespa/eval/tensor/default_tensor_engine.cpp
@@ -22,7 +22,6 @@ using eval::Aggregator;
using eval::DoubleValue;
using eval::ErrorValue;
using eval::TensorSpec;
-using eval::TensorValue;
using eval::Value;
using eval::ValueType;
@@ -36,18 +35,13 @@ const eval::TensorEngine &default_engine() { return DefaultTensorEngine::ref();
// map tensors to simple tensors before fall-back evaluation
-const eval::SimpleTensor &to_simple(const eval::Tensor &tensor, Stash &stash) {
- if (auto wrapped = dynamic_cast<const WrappedSimpleTensor *>(&tensor)) {
- return wrapped->get();
- }
- TensorSpec spec = tensor.engine().to_spec(tensor);
- using PTR = std::unique_ptr<eval::SimpleTensor>;
- return *stash.create<PTR>(eval::SimpleTensor::create(spec));
-}
-
const Value &to_simple(const Value &value, Stash &stash) {
if (auto tensor = value.as_tensor()) {
- return stash.create<TensorValue>(to_simple(*tensor, stash));
+ if (auto wrapped = dynamic_cast<const WrappedSimpleTensor *>(tensor)) {
+ return wrapped->get();
+ }
+ TensorSpec spec = tensor->engine().to_spec(*tensor);
+ return *stash.create<Value::UP>(eval::SimpleTensor::create(spec));
}
return value;
}
@@ -58,11 +52,11 @@ const Value &to_default(const Value &value, Stash &stash) {
if (auto tensor = value.as_tensor()) {
if (auto simple = dynamic_cast<const eval::SimpleTensor *>(tensor)) {
if (!Tensor::supported({simple->type()})) {
- return stash.create<TensorValue>(std::make_unique<WrappedSimpleTensor>(*simple));
+ return stash.create<WrappedSimpleTensor>(*simple);
}
}
TensorSpec spec = tensor->engine().to_spec(*tensor);
- return stash.create<TensorValue>(default_engine().create(spec));
+ return *stash.create<Value::UP>(default_engine().from_spec(spec));
}
return value;
}
@@ -72,9 +66,19 @@ const Value &to_value(std::unique_ptr<Tensor> tensor, Stash &stash) {
return ErrorValue::instance;
}
if (tensor->getType().is_tensor()) {
- return stash.create<TensorValue>(std::move(tensor));
+ return *stash.create<Value::UP>(std::move(tensor));
+ }
+ return stash.create<DoubleValue>(tensor->as_double());
+}
+
+Value::UP to_value(std::unique_ptr<Tensor> tensor) {
+ if (!tensor) {
+ return std::make_unique<ErrorValue>();
}
- return stash.create<DoubleValue>(tensor->sum());
+ if (tensor->type().is_tensor()) {
+ return std::move(tensor);
+ }
+ return std::make_unique<DoubleValue>(tensor->as_double());
}
const Value &fallback_join(const Value &a, const Value &b, join_fun_t function, Stash &stash) {
@@ -89,38 +93,28 @@ const Value &fallback_reduce(const Value &a, eval::Aggr aggr, const std::vector<
const DefaultTensorEngine DefaultTensorEngine::_engine;
-eval::ValueType
-DefaultTensorEngine::type_of(const Tensor &tensor) const
-{
- assert(&tensor.engine() == this);
- const tensor::Tensor &my_tensor = static_cast<const tensor::Tensor &>(tensor);
- return my_tensor.getType();
-}
-
-vespalib::string
-DefaultTensorEngine::to_string(const Tensor &tensor) const
+eval::TensorFunction::UP
+DefaultTensorEngine::compile(eval::tensor_function::Node_UP expr) const
{
- assert(&tensor.engine() == this);
- const tensor::Tensor &my_tensor = static_cast<const tensor::Tensor &>(tensor);
- return my_tensor.toString();
+ return DenseTensorFunctionCompiler::compile(std::move(expr));
}
TensorSpec
-DefaultTensorEngine::to_spec(const Tensor &tensor) const
-{
- assert(&tensor.engine() == this);
- const tensor::Tensor &my_tensor = static_cast<const tensor::Tensor &>(tensor);
- return my_tensor.toSpec();
-}
-
-eval::TensorFunction::UP
-DefaultTensorEngine::compile(eval::tensor_function::Node_UP expr) const
+DefaultTensorEngine::to_spec(const Value &value) const
{
- return DenseTensorFunctionCompiler::compile(std::move(expr));
+ if (value.is_double()) {
+ return TensorSpec("double").add({}, value.as_double());
+ } else if (auto tensor = value.as_tensor()) {
+ assert(&tensor->engine() == this);
+ const tensor::Tensor &my_tensor = static_cast<const tensor::Tensor &>(*tensor);
+ return my_tensor.toSpec();
+ } else {
+ return TensorSpec("error");
+ }
}
-std::unique_ptr<eval::Tensor>
-DefaultTensorEngine::create(const TensorSpec &spec) const
+Value::UP
+DefaultTensorEngine::from_spec(const TensorSpec &spec) const
{
ValueType type = ValueType::from_spec(spec.type());
bool is_dense = false;
@@ -149,7 +143,7 @@ DefaultTensorEngine::create(const TensorSpec &spec) const
builder.addCell(cell.second);
}
return builder.build();
- } else { // sparse
+ } else if (is_sparse) {
DefaultTensor::builder builder;
std::map<vespalib::string,DefaultTensor::builder::Dimension> dimension_map;
for (const auto &dimension: type.dimensions()) {
@@ -163,6 +157,12 @@ DefaultTensorEngine::create(const TensorSpec &spec) const
builder.add_cell(cell.second);
}
return builder.build();
+ } else if (type.is_double()) {
+ double value = spec.cells().empty() ? 0.0 : spec.cells().begin()->second.value;
+ return std::make_unique<DoubleValue>(value);
+ } else {
+ assert(type.is_error());
+ return std::make_unique<ErrorValue>();
}
}
@@ -189,7 +189,7 @@ struct CellFunctionBindRightAdapter : tensor::CellFunction {
//-----------------------------------------------------------------------------
void
-DefaultTensorEngine::encode(const Value &value, nbostream &output, Stash &) const
+DefaultTensorEngine::encode(const Value &value, nbostream &output) const
{
if (auto tensor = value.as_tensor()) {
TypedBinaryFormat::serialize(output, static_cast<const tensor::Tensor &>(*tensor));
@@ -198,10 +198,10 @@ DefaultTensorEngine::encode(const Value &value, nbostream &output, Stash &) cons
}
}
-const Value &
-DefaultTensorEngine::decode(nbostream &input, Stash &stash) const
+Value::UP
+DefaultTensorEngine::decode(nbostream &input) const
{
- return to_value(TypedBinaryFormat::deserialize(input), stash);
+ return to_value(TypedBinaryFormat::deserialize(input));
}
//-----------------------------------------------------------------------------
diff --git a/eval/src/vespa/eval/tensor/default_tensor_engine.h b/eval/src/vespa/eval/tensor/default_tensor_engine.h
index bbb03aceb1f..86ee4459902 100644
--- a/eval/src/vespa/eval/tensor/default_tensor_engine.h
+++ b/eval/src/vespa/eval/tensor/default_tensor_engine.h
@@ -19,16 +19,14 @@ private:
public:
static const TensorEngine &ref() { return _engine; };
- ValueType type_of(const Tensor &tensor) const override;
- vespalib::string to_string(const Tensor &tensor) const override;
- TensorSpec to_spec(const Tensor &tensor) const override;
-
virtual eval::TensorFunction::UP compile(eval::tensor_function::Node_UP expr) const override;
- std::unique_ptr<Tensor> create(const TensorSpec &spec) const override;
+ TensorSpec to_spec(const Value &value) const override;
+ Value::UP from_spec(const TensorSpec &spec) const override;
+
+ void encode(const Value &value, nbostream &output) const override;
+ Value::UP decode(nbostream &input) const override;
- void encode(const Value &value, nbostream &output, Stash &stash) const override;
- const Value &decode(nbostream &input, Stash &stash) const override;
const Value &map(const Value &a, map_fun_t function, Stash &stash) const override;
const Value &join(const Value &a, const Value &b, join_fun_t function, Stash &stash) const override;
const Value &reduce(const Value &a, Aggr aggr, const std::vector<vespalib::string> &dimensions, Stash &stash) const override;
diff --git a/eval/src/vespa/eval/tensor/dense/dense_tensor.cpp b/eval/src/vespa/eval/tensor/dense/dense_tensor.cpp
index 354c0a2f466..5d7e0c83267 100644
--- a/eval/src/vespa/eval/tensor/dense/dense_tensor.cpp
+++ b/eval/src/vespa/eval/tensor/dense/dense_tensor.cpp
@@ -26,7 +26,7 @@ calcCellsSize(const eval::ValueType &type)
void
checkCellsSize(const DenseTensor &arg)
{
- auto cellsSize = calcCellsSize(arg.type());
+ auto cellsSize = calcCellsSize(arg.fast_type());
if (arg.cells().size() != cellsSize) {
throw IllegalStateException(make_string("Wrong cell size, "
"expected=%zu, "
diff --git a/eval/src/vespa/eval/tensor/dense/dense_tensor_apply.hpp b/eval/src/vespa/eval/tensor/dense/dense_tensor_apply.hpp
index 10651b59468..65fee767690 100644
--- a/eval/src/vespa/eval/tensor/dense/dense_tensor_apply.hpp
+++ b/eval/src/vespa/eval/tensor/dense/dense_tensor_apply.hpp
@@ -14,8 +14,8 @@ template <typename Function>
std::unique_ptr<Tensor>
apply(const DenseTensorView &lhs, const DenseTensorView &rhs, Function &&func)
{
- DenseTensorAddressCombiner combiner(lhs.type(), rhs.type());
- DirectDenseTensorBuilder builder(DenseTensorAddressCombiner::combineDimensions(lhs.type(), rhs.type()));
+ DenseTensorAddressCombiner combiner(lhs.fast_type(), rhs.fast_type());
+ DirectDenseTensorBuilder builder(DenseTensorAddressCombiner::combineDimensions(lhs.fast_type(), rhs.fast_type()));
for (DenseTensorCellsIterator lhsItr = lhs.cellsIterator(); lhsItr.valid(); lhsItr.next()) {
for (DenseTensorCellsIterator rhsItr = rhs.cellsIterator(); rhsItr.valid(); rhsItr.next()) {
bool combineSuccess = combiner.combine(lhsItr, rhsItr);
diff --git a/eval/src/vespa/eval/tensor/dense/dense_tensor_cells_iterator.h b/eval/src/vespa/eval/tensor/dense/dense_tensor_cells_iterator.h
index 2d5257f018b..f77517bfdc5 100644
--- a/eval/src/vespa/eval/tensor/dense/dense_tensor_cells_iterator.h
+++ b/eval/src/vespa/eval/tensor/dense/dense_tensor_cells_iterator.h
@@ -34,7 +34,7 @@ public:
void next();
double cell() const { return _cells[_cellIdx]; }
const std::vector<size_t> &address() const { return _address; }
- const eval::ValueType &type() const { return _type; }
+ const eval::ValueType &fast_type() const { return _type; }
};
} // namespace vespalib::tensor
diff --git a/eval/src/vespa/eval/tensor/dense/dense_tensor_reduce.hpp b/eval/src/vespa/eval/tensor/dense/dense_tensor_reduce.hpp
index c6fc04bb27b..9f608921c05 100644
--- a/eval/src/vespa/eval/tensor/dense/dense_tensor_reduce.hpp
+++ b/eval/src/vespa/eval/tensor/dense/dense_tensor_reduce.hpp
@@ -94,7 +94,7 @@ template <typename Function>
DenseTensor::UP
reduce(const DenseTensorView &tensor, const vespalib::string &dimensionToRemove, Function &&func)
{
- DimensionReducer reducer(tensor.type(), dimensionToRemove);
+ DimensionReducer reducer(tensor.fast_type(), dimensionToRemove);
return reducer.reduceCells(tensor.cellsRef(), func);
}
diff --git a/eval/src/vespa/eval/tensor/dense/dense_tensor_view.cpp b/eval/src/vespa/eval/tensor/dense/dense_tensor_view.cpp
index 4f3e49f8ec1..4402b5b0ae0 100644
--- a/eval/src/vespa/eval/tensor/dense/dense_tensor_view.cpp
+++ b/eval/src/vespa/eval/tensor/dense/dense_tensor_view.cpp
@@ -49,7 +49,7 @@ calcCellsSize(const eval::ValueType &type)
void
checkCellsSize(const DenseTensorView &arg)
{
- auto cellsSize = calcCellsSize(arg.type());
+ auto cellsSize = calcCellsSize(arg.fast_type());
if (arg.cellsRef().size() != cellsSize) {
throw IllegalStateException(make_string("wrong cell size, "
"expected=%zu, "
@@ -63,14 +63,14 @@ void
checkDimensions(const DenseTensorView &lhs, const DenseTensorView &rhs,
vespalib::stringref operation)
{
- if (lhs.type() != rhs.type()) {
+ if (lhs.fast_type() != rhs.fast_type()) {
throw IllegalStateException(make_string("mismatching dimensions for "
"dense tensor %s, "
"lhs dimensions = '%s', "
"rhs dimensions = '%s'",
operation.c_str(),
- dimensionsAsString(lhs.type()).c_str(),
- dimensionsAsString(rhs.type()).c_str()));
+ dimensionsAsString(lhs.fast_type()).c_str(),
+ dimensionsAsString(rhs.fast_type()).c_str()));
}
checkCellsSize(lhs);
checkCellsSize(rhs);
@@ -96,7 +96,7 @@ joinDenseTensors(const DenseTensorView &lhs, const DenseTensorView &rhs,
++rhsCellItr;
}
assert(rhsCellItr == rhs.cellsRef().cend());
- return std::make_unique<DenseTensor>(lhs.type(),
+ return std::make_unique<DenseTensor>(lhs.fast_type(),
std::move(cells));
}
@@ -132,7 +132,7 @@ bool sameCells(DenseTensorView::CellsRef lhs, DenseTensorView::CellsRef rhs)
DenseTensorView::DenseTensorView(const DenseTensor &rhs)
- : _typeRef(rhs.type()),
+ : _typeRef(rhs.fast_type()),
_cellsRef(rhs.cellsRef())
{
}
@@ -260,7 +260,7 @@ void
buildAddress(const DenseTensorCellsIterator &itr, TensorSpec::Address &address)
{
auto addressItr = itr.address().begin();
- for (const auto &dim : itr.type().dimensions()) {
+ for (const auto &dim : itr.fast_type().dimensions()) {
address.emplace(std::make_pair(dim.name, TensorSpec::Label(*addressItr++)));
}
assert(addressItr == itr.address().end());
diff --git a/eval/src/vespa/eval/tensor/dense/dense_tensor_view.h b/eval/src/vespa/eval/tensor/dense/dense_tensor_view.h
index aa447eb42af..472cc58ad6b 100644
--- a/eval/src/vespa/eval/tensor/dense/dense_tensor_view.h
+++ b/eval/src/vespa/eval/tensor/dense/dense_tensor_view.h
@@ -42,7 +42,7 @@ public:
: _typeRef(type_in),
_cellsRef()
{}
- const eval::ValueType &type() const { return _typeRef; }
+ const eval::ValueType &fast_type() const { return _typeRef; }
const CellsRef &cellsRef() const { return _cellsRef; }
bool operator==(const DenseTensorView &rhs) const;
CellsIterator cellsIterator() const { return CellsIterator(_typeRef, _cellsRef); }
diff --git a/eval/src/vespa/eval/tensor/dense/mutable_dense_tensor_view.cpp b/eval/src/vespa/eval/tensor/dense/mutable_dense_tensor_view.cpp
index 5da7165af2a..71b7824ee5d 100644
--- a/eval/src/vespa/eval/tensor/dense/mutable_dense_tensor_view.cpp
+++ b/eval/src/vespa/eval/tensor/dense/mutable_dense_tensor_view.cpp
@@ -22,13 +22,13 @@ MutableDenseTensorView::MutableValueType::MutableValueType(ValueType type_in)
MutableDenseTensorView::MutableValueType::~MutableValueType() {}
MutableDenseTensorView::MutableDenseTensorView(ValueType type_in)
- : DenseTensorView(_concreteType.type(), CellsRef()),
+ : DenseTensorView(_concreteType.fast_type(), CellsRef()),
_concreteType(type_in)
{
}
MutableDenseTensorView::MutableDenseTensorView(ValueType type_in, CellsRef cells_in)
- : DenseTensorView(_concreteType.type(), cells_in),
+ : DenseTensorView(_concreteType.fast_type(), cells_in),
_concreteType(type_in)
{
}
diff --git a/eval/src/vespa/eval/tensor/dense/mutable_dense_tensor_view.h b/eval/src/vespa/eval/tensor/dense/mutable_dense_tensor_view.h
index e856885d0fa..7eee3a9483c 100644
--- a/eval/src/vespa/eval/tensor/dense/mutable_dense_tensor_view.h
+++ b/eval/src/vespa/eval/tensor/dense/mutable_dense_tensor_view.h
@@ -23,7 +23,7 @@ private:
public:
MutableValueType(eval::ValueType type_in);
~MutableValueType();
- const eval::ValueType &type() const { return _type; }
+ const eval::ValueType &fast_type() const { return _type; }
void setUnboundDimensions(const uint32_t *unboundDimSizeBegin, const uint32_t *unboundDimSizeEnd) {
const uint32_t *unboundDimSizePtr = unboundDimSizeBegin;
for (auto unboundDimSize : _unboundDimSizes) {
diff --git a/eval/src/vespa/eval/tensor/serialization/dense_binary_format.cpp b/eval/src/vespa/eval/tensor/serialization/dense_binary_format.cpp
index bf522bcaed4..a2d600aa0c9 100644
--- a/eval/src/vespa/eval/tensor/serialization/dense_binary_format.cpp
+++ b/eval/src/vespa/eval/tensor/serialization/dense_binary_format.cpp
@@ -24,9 +24,9 @@ makeValueType(std::vector<eval::ValueType::Dimension> &&dimensions) {
void
DenseBinaryFormat::serialize(nbostream &stream, const DenseTensor &tensor)
{
- stream.putInt1_4Bytes(tensor.type().dimensions().size());
+ stream.putInt1_4Bytes(tensor.fast_type().dimensions().size());
size_t cellsSize = 1;
- for (const auto &dimension : tensor.type().dimensions()) {
+ for (const auto &dimension : tensor.fast_type().dimensions()) {
stream.writeSmallString(dimension.name);
stream.putInt1_4Bytes(dimension.size);
cellsSize *= dimension.size;
diff --git a/eval/src/vespa/eval/tensor/sparse/direct_sparse_tensor_builder.h b/eval/src/vespa/eval/tensor/sparse/direct_sparse_tensor_builder.h
index b0c75655ce5..33000d4889d 100644
--- a/eval/src/vespa/eval/tensor/sparse/direct_sparse_tensor_builder.h
+++ b/eval/src/vespa/eval/tensor/sparse/direct_sparse_tensor_builder.h
@@ -127,7 +127,7 @@ public:
insertCell(address.getAddressRef(), value, [](double, double) -> double { abort(); });
}
- eval::ValueType &type() { return _type; }
+ eval::ValueType &fast_type() { return _type; }
Cells &cells() { return _cells; }
};
diff --git a/eval/src/vespa/eval/tensor/sparse/sparse_tensor.h b/eval/src/vespa/eval/tensor/sparse/sparse_tensor.h
index ad460f4849c..8f5f8066352 100644
--- a/eval/src/vespa/eval/tensor/sparse/sparse_tensor.h
+++ b/eval/src/vespa/eval/tensor/sparse/sparse_tensor.h
@@ -37,7 +37,7 @@ public:
SparseTensor(eval::ValueType &&type_in,
Cells &&cells_in, Stash &&stash_in);
const Cells &cells() const { return _cells; }
- const eval::ValueType &type() const { return _type; }
+ const eval::ValueType &fast_type() const { return _type; }
bool operator==(const SparseTensor &rhs) const;
eval::ValueType combineDimensionsWith(const SparseTensor &rhs) const;
diff --git a/eval/src/vespa/eval/tensor/sparse/sparse_tensor_apply.hpp b/eval/src/vespa/eval/tensor/sparse/sparse_tensor_apply.hpp
index cb22afc8fd5..4528c8ef1df 100644
--- a/eval/src/vespa/eval/tensor/sparse/sparse_tensor_apply.hpp
+++ b/eval/src/vespa/eval/tensor/sparse/sparse_tensor_apply.hpp
@@ -16,7 +16,7 @@ std::unique_ptr<Tensor>
apply(const SparseTensor &lhs, const SparseTensor &rhs, Function &&func)
{
DirectTensorBuilder<SparseTensor> builder(lhs.combineDimensionsWith(rhs));
- TensorAddressCombiner addressCombiner(lhs.type(), rhs.type());
+ TensorAddressCombiner addressCombiner(lhs.fast_type(), rhs.fast_type());
for (const auto &lhsCell : lhs.cells()) {
for (const auto &rhsCell : rhs.cells()) {
bool combineSuccess = addressCombiner.combine(lhsCell.first,
diff --git a/eval/src/vespa/eval/tensor/sparse/sparse_tensor_match.cpp b/eval/src/vespa/eval/tensor/sparse/sparse_tensor_match.cpp
index 35ae6b7544b..b4c9d511d09 100644
--- a/eval/src/vespa/eval/tensor/sparse/sparse_tensor_match.cpp
+++ b/eval/src/vespa/eval/tensor/sparse/sparse_tensor_match.cpp
@@ -90,9 +90,9 @@ SparseTensorMatch::slowMatch(const TensorImplType &lhs,
{
std::vector<AddressOp> ops;
SparseTensorAddressBuilder addressBuilder;
- SparseTensorAddressPadder addressPadder(_builder.type(),
- lhs.type());
- buildTransformOps(ops, lhs.type(), rhs.type());
+ SparseTensorAddressPadder addressPadder(_builder.fast_type(),
+ lhs.fast_type());
+ buildTransformOps(ops, lhs.fast_type(), rhs.fast_type());
for (const auto &lhsCell : lhs.cells()) {
if (!transformAddress(addressBuilder, lhsCell.first, ops)) {
continue;
@@ -110,8 +110,8 @@ SparseTensorMatch::SparseTensorMatch(const TensorImplType &lhs,
const TensorImplType &rhs)
: Parent(lhs.combineDimensionsWith(rhs))
{
- if ((lhs.type().dimensions().size() == rhs.type().dimensions().size()) &&
- (lhs.type().dimensions().size() == _builder.type().dimensions().size())) {
+ if ((lhs.fast_type().dimensions().size() == rhs.fast_type().dimensions().size()) &&
+ (lhs.fast_type().dimensions().size() == _builder.fast_type().dimensions().size())) {
// Ensure that first tensor to fastMatch has fewest cells.
if (lhs.cells().size() <= rhs.cells().size()) {
fastMatch(lhs, rhs);
diff --git a/eval/src/vespa/eval/tensor/sparse/sparse_tensor_reduce.hpp b/eval/src/vespa/eval/tensor/sparse/sparse_tensor_reduce.hpp
index 30b359eb73e..53ab8116255 100644
--- a/eval/src/vespa/eval/tensor/sparse/sparse_tensor_reduce.hpp
+++ b/eval/src/vespa/eval/tensor/sparse/sparse_tensor_reduce.hpp
@@ -45,11 +45,11 @@ reduce(const SparseTensor &tensor,
if (dimensions.empty()) {
return reduceAll(tensor, func);
}
- DirectTensorBuilder<SparseTensor> builder(tensor.type().reduce(dimensions));
- if (builder.type().dimensions().empty()) {
+ DirectTensorBuilder<SparseTensor> builder(tensor.fast_type().reduce(dimensions));
+ if (builder.fast_type().dimensions().empty()) {
return reduceAll(tensor, builder, func);
}
- TensorAddressReducer addressReducer(tensor.type(), dimensions);
+ TensorAddressReducer addressReducer(tensor.fast_type(), dimensions);
for (const auto &cell : tensor.cells()) {
addressReducer.reduce(cell.first);
builder.insertCell(addressReducer.getAddressRef(), cell.second, func);
diff --git a/eval/src/vespa/eval/tensor/tensor.h b/eval/src/vespa/eval/tensor/tensor.h
index 3b3d7ce4a70..80afbbf52ff 100644
--- a/eval/src/vespa/eval/tensor/tensor.h
+++ b/eval/src/vespa/eval/tensor/tensor.h
@@ -31,6 +31,7 @@ struct Tensor : public eval::Tensor
Tensor();
virtual ~Tensor() {}
virtual const eval::ValueType &getType() const = 0;
+ virtual const eval::ValueType &type() const override { return getType(); }
virtual double sum() const = 0;
virtual double as_double() const final override { return sum(); }
virtual Tensor::UP add(const Tensor &arg) const = 0;
diff --git a/eval/src/vespa/eval/tensor/tensor_apply.cpp b/eval/src/vespa/eval/tensor/tensor_apply.cpp
index f6ee7492b05..7c518d0516f 100644
--- a/eval/src/vespa/eval/tensor/tensor_apply.cpp
+++ b/eval/src/vespa/eval/tensor/tensor_apply.cpp
@@ -8,7 +8,7 @@ namespace tensor {
template <class TensorT>
TensorApply<TensorT>::TensorApply(const TensorImplType &tensor,
const CellFunction &func)
- : Parent(tensor.type())
+ : Parent(tensor.fast_type())
{
for (const auto &cell : tensor.cells()) {
_builder.insertCell(cell.first, func.apply(cell.second));
diff --git a/eval/src/vespa/eval/tensor/tensor_mapper.cpp b/eval/src/vespa/eval/tensor/tensor_mapper.cpp
index 7c2c72abd46..25b369c246d 100644
--- a/eval/src/vespa/eval/tensor/tensor_mapper.cpp
+++ b/eval/src/vespa/eval/tensor/tensor_mapper.cpp
@@ -69,7 +69,7 @@ mapAddress(const TensorAddress &address)
{
_addressBuilder.clear();
TensorAddressElementIterator<TensorAddress> addressIterator(address);
- for (const auto &dimension : _builder.type().dimensions()) {
+ for (const auto &dimension : _builder.fast_type().dimensions()) {
if (addressIterator.skipToDimension(dimension.name)) {
_addressBuilder.add(addressIterator.label());
addressIterator.next();
diff --git a/eval/src/vespa/eval/tensor/tensor_operation.h b/eval/src/vespa/eval/tensor/tensor_operation.h
index abf58641549..6975c21c448 100644
--- a/eval/src/vespa/eval/tensor/tensor_operation.h
+++ b/eval/src/vespa/eval/tensor/tensor_operation.h
@@ -28,17 +28,17 @@ protected:
public:
TensorOperation()
: _builder(),
- _type(_builder.type()),
+ _type(_builder.fast_type()),
_cells(_builder.cells())
{}
TensorOperation(const eval::ValueType &type)
: _builder(type),
- _type(_builder.type()),
+ _type(_builder.fast_type()),
_cells(_builder.cells())
{}
TensorOperation(const eval::ValueType &type, const Cells &cells)
: _builder(type, cells),
- _type(_builder.type()),
+ _type(_builder.fast_type()),
_cells(_builder.cells())
{}
Tensor::UP result() {
diff --git a/eval/src/vespa/eval/tensor/wrapped_simple_tensor.cpp b/eval/src/vespa/eval/tensor/wrapped_simple_tensor.cpp
index 534854732c7..7ad97a6e84e 100644
--- a/eval/src/vespa/eval/tensor/wrapped_simple_tensor.cpp
+++ b/eval/src/vespa/eval/tensor/wrapped_simple_tensor.cpp
@@ -20,7 +20,7 @@ WrappedSimpleTensor::equals(const Tensor &arg) const
vespalib::string
WrappedSimpleTensor::toString() const
{
- return eval::SimpleTensorEngine::ref().to_string(_tensor);
+ return toSpec().to_string();
}
eval::TensorSpec