summaryrefslogtreecommitdiffstats
path: root/eval
diff options
context:
space:
mode:
Diffstat (limited to 'eval')
-rw-r--r--eval/src/apps/eval_expr/eval_expr.cpp2
-rw-r--r--eval/src/apps/tensor_conformance/generate.cpp1
-rw-r--r--eval/src/apps/tensor_conformance/tensor_conformance.cpp51
-rw-r--r--eval/src/apps/tensor_conformance/test_spec.json20
-rw-r--r--eval/src/tests/eval/interpreted_function/interpreted_function_test.cpp15
-rw-r--r--eval/src/tests/eval/node_types/node_types_test.cpp1
-rw-r--r--eval/src/tests/eval/simple_tensor/simple_tensor_test.cpp18
-rw-r--r--eval/src/tests/eval/tensor_function/tensor_function_test.cpp135
-rw-r--r--eval/src/tests/eval/value_cache/tensor_loader_test.cpp61
-rw-r--r--eval/src/tests/tensor/dense_dot_product_function/dense_dot_product_function_test.cpp28
-rw-r--r--eval/src/tests/tensor/dense_tensor_function_compiler/dense_tensor_function_compiler_test.cpp27
-rw-r--r--eval/src/tests/tensor/tensor_mapper/tensor_mapper_test.cpp5
-rw-r--r--eval/src/tests/tensor/tensor_performance/tensor_performance_test.cpp16
-rw-r--r--eval/src/vespa/eval/eval/call_nodes.cpp1
-rw-r--r--eval/src/vespa/eval/eval/call_nodes.h1
-rw-r--r--eval/src/vespa/eval/eval/interpreted_function.cpp48
-rw-r--r--eval/src/vespa/eval/eval/key_gen.cpp1
-rw-r--r--eval/src/vespa/eval/eval/llvm/llvm_wrapper.cpp6
-rw-r--r--eval/src/vespa/eval/eval/llvm/llvm_wrapper.h1
-rw-r--r--eval/src/vespa/eval/eval/node_types.cpp1
-rw-r--r--eval/src/vespa/eval/eval/node_visitor.h2
-rw-r--r--eval/src/vespa/eval/eval/operation.cpp1
-rw-r--r--eval/src/vespa/eval/eval/operation.h1
-rw-r--r--eval/src/vespa/eval/eval/simple_tensor.cpp6
-rw-r--r--eval/src/vespa/eval/eval/simple_tensor.h2
-rw-r--r--eval/src/vespa/eval/eval/simple_tensor_engine.cpp116
-rw-r--r--eval/src/vespa/eval/eval/simple_tensor_engine.h10
-rw-r--r--eval/src/vespa/eval/eval/tensor.cpp2
-rw-r--r--eval/src/vespa/eval/eval/tensor.h6
-rw-r--r--eval/src/vespa/eval/eval/tensor_engine.h20
-rw-r--r--eval/src/vespa/eval/eval/tensor_function.cpp59
-rw-r--r--eval/src/vespa/eval/eval/tensor_function.h100
-rw-r--r--eval/src/vespa/eval/eval/test/eval_spec.cpp1
-rw-r--r--eval/src/vespa/eval/eval/test/tensor_conformance.cpp156
-rw-r--r--eval/src/vespa/eval/eval/value.cpp15
-rw-r--r--eval/src/vespa/eval/eval/value.h33
-rw-r--r--eval/src/vespa/eval/eval/value_cache/constant_tensor_loader.cpp9
-rw-r--r--eval/src/vespa/eval/eval/value_cache/constant_value.h20
-rw-r--r--eval/src/vespa/eval/tensor/default_tensor_engine.cpp100
-rw-r--r--eval/src/vespa/eval/tensor/default_tensor_engine.h12
-rw-r--r--eval/src/vespa/eval/tensor/dense/dense_dot_product_function.cpp6
-rw-r--r--eval/src/vespa/eval/tensor/dense/dense_dot_product_function.h2
-rw-r--r--eval/src/vespa/eval/tensor/dense/dense_tensor.cpp2
-rw-r--r--eval/src/vespa/eval/tensor/dense/dense_tensor_apply.hpp4
-rw-r--r--eval/src/vespa/eval/tensor/dense/dense_tensor_cells_iterator.h2
-rw-r--r--eval/src/vespa/eval/tensor/dense/dense_tensor_function_compiler.cpp21
-rw-r--r--eval/src/vespa/eval/tensor/dense/dense_tensor_function_compiler.h5
-rw-r--r--eval/src/vespa/eval/tensor/dense/dense_tensor_reduce.hpp2
-rw-r--r--eval/src/vespa/eval/tensor/dense/dense_tensor_view.cpp14
-rw-r--r--eval/src/vespa/eval/tensor/dense/dense_tensor_view.h2
-rw-r--r--eval/src/vespa/eval/tensor/dense/mutable_dense_tensor_view.cpp4
-rw-r--r--eval/src/vespa/eval/tensor/dense/mutable_dense_tensor_view.h2
-rw-r--r--eval/src/vespa/eval/tensor/serialization/dense_binary_format.cpp4
-rw-r--r--eval/src/vespa/eval/tensor/sparse/direct_sparse_tensor_builder.h2
-rw-r--r--eval/src/vespa/eval/tensor/sparse/sparse_tensor.h2
-rw-r--r--eval/src/vespa/eval/tensor/sparse/sparse_tensor_apply.hpp2
-rw-r--r--eval/src/vespa/eval/tensor/sparse/sparse_tensor_match.cpp10
-rw-r--r--eval/src/vespa/eval/tensor/sparse/sparse_tensor_reduce.hpp6
-rw-r--r--eval/src/vespa/eval/tensor/tensor.h1
-rw-r--r--eval/src/vespa/eval/tensor/tensor_apply.cpp2
-rw-r--r--eval/src/vespa/eval/tensor/tensor_mapper.cpp2
-rw-r--r--eval/src/vespa/eval/tensor/tensor_operation.h6
-rw-r--r--eval/src/vespa/eval/tensor/wrapped_simple_tensor.cpp2
63 files changed, 575 insertions, 640 deletions
diff --git a/eval/src/apps/eval_expr/eval_expr.cpp b/eval/src/apps/eval_expr/eval_expr.cpp
index 2e1f7f7fdcb..71c808174b8 100644
--- a/eval/src/apps/eval_expr/eval_expr.cpp
+++ b/eval/src/apps/eval_expr/eval_expr.cpp
@@ -26,7 +26,7 @@ int main(int argc, char **argv) {
if (result.is_double()) {
fprintf(stdout, "%.32g\n", result.as_double());
} else if (result.is_tensor()) {
- vespalib::string str = SimpleTensorEngine::ref().to_spec(*result.as_tensor()).to_string();
+ vespalib::string str = SimpleTensorEngine::ref().to_spec(result).to_string();
fprintf(stdout, "%s\n", str.c_str());
} else {
fprintf(stdout, "error\n");
diff --git a/eval/src/apps/tensor_conformance/generate.cpp b/eval/src/apps/tensor_conformance/generate.cpp
index 993d226c3c6..0aba5276ace 100644
--- a/eval/src/apps/tensor_conformance/generate.cpp
+++ b/eval/src/apps/tensor_conformance/generate.cpp
@@ -93,6 +93,7 @@ void generate_tensor_map(TestBuilder &dst) {
generate_op1_map("isNan(a)", operation::IsNan::f, Mask2Seq(SkipNth(3), 1.0, my_nan), dst);
generate_op1_map("relu(a)", operation::Relu::f, Sub2(Div10(N())), dst);
generate_op1_map("sigmoid(a)", operation::Sigmoid::f, Sub2(Div10(N())), dst);
+ generate_op1_map("elu(a)", operation::Elu::f, Sub2(Div10(N())), dst);
generate_op1_map("a in [1,5,7,13,42]", MyIn::f, N(), dst);
generate_map_expr("map(a,f(a)((a+1)*2))", MyOp::f, Div10(N()), dst);
}
diff --git a/eval/src/apps/tensor_conformance/tensor_conformance.cpp b/eval/src/apps/tensor_conformance/tensor_conformance.cpp
index d1163fb579d..616b98f0809 100644
--- a/eval/src/apps/tensor_conformance/tensor_conformance.cpp
+++ b/eval/src/apps/tensor_conformance/tensor_conformance.cpp
@@ -60,69 +60,42 @@ nbostream extract_data(const Inspector &value) {
//-----------------------------------------------------------------------------
-TensorSpec to_spec(const Value &value) {
- if (value.is_error()) {
- return TensorSpec("error");
- } else if (value.is_double()) {
- return TensorSpec("double").add({}, value.as_double());
- } else {
- ASSERT_TRUE(value.is_tensor());
- auto tensor = value.as_tensor();
- return tensor->engine().to_spec(*tensor);
- }
-}
-
-const Value &to_value(const TensorSpec &spec, const TensorEngine &engine, Stash &stash) {
- if (spec.type() == "error") {
- return stash.create<ErrorValue>();
- } else if (spec.type() == "double") {
- double value = 0.0;
- for (const auto &cell: spec.cells()) {
- value += cell.second;
- }
- return stash.create<DoubleValue>(value);
- } else {
- ASSERT_TRUE(starts_with(spec.type(), "tensor("));
- return stash.create<TensorValue>(engine.create(spec));
- }
-}
-
void insert_value(Cursor &cursor, const vespalib::string &name, const TensorSpec &spec) {
- Stash stash;
nbostream data;
- const Value &value = to_value(spec, SimpleTensorEngine::ref(), stash);
- SimpleTensorEngine::ref().encode(value, data, stash);
+ Value::UP value = SimpleTensorEngine::ref().from_spec(spec);
+ SimpleTensorEngine::ref().encode(*value, data);
cursor.setData(name, Memory(data.peek(), data.size()));
}
TensorSpec extract_value(const Inspector &inspector) {
- Stash stash;
nbostream data = extract_data(inspector);
- return to_spec(SimpleTensorEngine::ref().decode(data, stash));
+ const auto &engine = SimpleTensorEngine::ref();
+ return engine.to_spec(*engine.decode(data));
}
//-----------------------------------------------------------------------------
-std::vector<ValueType> get_types(const std::vector<Value::CREF> &param_values) {
+std::vector<ValueType> get_types(const std::vector<Value::UP> &param_values) {
std::vector<ValueType> param_types;
for (size_t i = 0; i < param_values.size(); ++i) {
- param_types.emplace_back(param_values[i].get().type());
+ param_types.emplace_back(param_values[i]->type());
}
return param_types;
}
TensorSpec eval_expr(const Inspector &test, const TensorEngine &engine, bool typed) {
- Stash stash;
Function fun = Function::parse(test["expression"].asString().make_string());
- std::vector<Value::CREF> param_values;
+ std::vector<Value::UP> param_values;
+ std::vector<Value::CREF> param_refs;
for (size_t i = 0; i < fun.num_params(); ++i) {
- param_values.emplace_back(to_value(extract_value(test["inputs"][fun.param_name(i)]), engine, stash));
+ param_values.emplace_back(engine.from_spec(extract_value(test["inputs"][fun.param_name(i)])));
+ param_refs.emplace_back(*param_values.back());
}
NodeTypes types = typed ? NodeTypes(fun, get_types(param_values)) : NodeTypes();
InterpretedFunction ifun(engine, fun, types);
InterpretedFunction::Context ctx(ifun);
- InterpretedFunction::SimpleObjectParams params(param_values);
- return to_spec(ifun.eval(ctx, params));
+ InterpretedFunction::SimpleObjectParams params(param_refs);
+ return engine.to_spec(ifun.eval(ctx, params));
}
//-----------------------------------------------------------------------------
diff --git a/eval/src/apps/tensor_conformance/test_spec.json b/eval/src/apps/tensor_conformance/test_spec.json
index 95439b0f104..24edc9a7ac7 100644
--- a/eval/src/apps/tensor_conformance/test_spec.json
+++ b/eval/src/apps/tensor_conformance/test_spec.json
@@ -532,6 +532,24 @@
{"expression":"map(a,f(a)(sigmoid(a)))","inputs":{"a":"0x010301780179017A180161036261720169BFF8000000000000016103626172016ABFF6666666666666016103626172016BBFF4CCCCCCCCCCCD016103626172016CBFF3333333333333016103666F6F0169BFFE666666666666016103666F6F016ABFFCCCCCCCCCCCCD016103666F6F016BBFFB333333333333016103666F6F016CBFF999999999999A0162036261720169BFE6666666666666016203626172016ABFE3333333333334016203626172016BBFE0000000000000016203626172016CBFD9999999999998016203666F6F0169BFF199999999999A016203666F6F016ABFF0000000000000016203666F6F016BBFECCCCCCCCCCCCC016203666F6F016CBFE999999999999A01630362617201693FB99999999999A0016303626172016A3FC99999999999A0016303626172016B3FD3333333333330016303626172016C3FD9999999999998016303666F6F0169BFD3333333333334016303666F6F016ABFC9999999999998016303666F6F016BBFB99999999999A0016303666F6F016C0000000000000000"},"result":{"expect":"0x010301780179017A1801610362617201693FC759B8355A1BB0016103626172016A3FC95209D0A1A2DE016103626172016B3FCB69C25FE3C688016103626172016C3FCDA0FADA5A6609016103666F6F01693FC0A764FD2927E7016103666F6F016A3FC2282CFA533F46016103666F6F016B3FC3C5848EF36C9E016103666F6F016C3FC5806BEB16EB7F01620362617201693FD53C695ABCD715016203626172016A3FD6AD912C137583016203626172016B3FD829A0565978DE016203626172016C3FD9AF19F3D3169C016203666F6F01693FCFF77A137CDBF9016203666F6F016A3FD136561454BA86016203666F6F016B3FD27FCDA8478FA3016203666F6F016C3FD3D775461EDE9501630362617201693FE0CCA12729AFB8016303626172016A3FE1983D7795F414016303626172016B3FE261D545E46A8A016303626172016C3FE32873061674B2016303666F6F01693FDB3C5574372AEB016303666F6F016A3FDCCF8510D417DA016303666F6F016B3FDE66BDB1ACA090016303666F6F016C3FE0000000000000"}}
{"expression":"map(a,f(a)(sigmoid(a)))","inputs":{"a":"0x0301017902017803017A070203626172BFF3333333333333BFF199999999999ABFF0000000000000BFECCCCCCCCCCCCCBFE999999999999ABFE6666666666666BFE33333333333343FC99999999999A03FD33333333333303FD99999999999983FE00000000000003FE33333333333343FE66666666666683FE99999999999983FF999999999999A3FFB3333333333343FFCCCCCCCCCCCCC3FFE66666666666640000000000000004000CCCCCCCCCCCC400199999999999A03666F6FBFFE666666666666BFFCCCCCCCCCCCCDBFFB333333333333BFF999999999999ABFF8000000000000BFF6666666666666BFF4CCCCCCCCCCCDBFE0000000000000BFD9999999999998BFD3333333333334BFC9999999999998BFB99999999999A000000000000000003FB99999999999A03FECCCCCCCCCCCCC3FF00000000000003FF199999999999A3FF33333333333343FF4CCCCCCCCCCCC3FF66666666666663FF8000000000000"},"result":{"expect":"0x0301017902017803017A0702036261723FCDA0FADA5A66093FCFF77A137CDBF93FD136561454BA863FD27FCDA8478FA33FD3D775461EDE953FD53C695ABCD7153FD6AD912C1375833FE1983D7795F4143FE261D545E46A8A3FE32873061674B23FE3EB2FD4D343913FE4A93769F6453E3FE561CB52A194763FE614455CF090B63FEA9FE5053A45203FEB0E9EDC4324D83FEB75F4C16B302F3FEBD626C0B5B6063FEC2F7D5A8A79C93FEC8247621BC0C83FECCED80FEF120203666F6F3FC0A764FD2927E73FC2282CFA533F463FC3C5848EF36C9E3FC5806BEB16EB7F3FC759B8355A1BB03FC95209D0A1A2DE3FCB69C25FE3C6883FD829A0565978DE3FD9AF19F3D3169C3FDB3C5574372AEB3FDCCF8510D417DA3FDE66BDB1ACA0903FE00000000000003FE0CCA12729AFB83FE6C0192BDC382E3FE764D4F5D5A2BD3FE802217B20C9023FE897C14969667F3FE9258F68070E5E3FE9AB7D8BD797483FEA2991F2A97914"}}
{"expression":"map(a,f(a)(sigmoid(a)))","inputs":{"a":"0x03020178017A010179050C01610169BFFE666666666666BFF8000000000000BFF199999999999ABFE6666666666666BFD33333333333340161016ABFFCCCCCCCCCCCCDBFF6666666666666BFF0000000000000BFE3333333333334BFC99999999999980161016BBFFB333333333333BFF4CCCCCCCCCCCDBFECCCCCCCCCCCCCBFE0000000000000BFB99999999999A00161016CBFF999999999999ABFF3333333333333BFE999999999999ABFD99999999999980000000000000000016201693FB99999999999A03FE00000000000003FECCCCCCCCCCCCC3FF4CCCCCCCCCCCC3FFB3333333333340162016A3FC99999999999A03FE33333333333343FF00000000000003FF66666666666663FFCCCCCCCCCCCCC0162016B3FD33333333333303FE66666666666683FF199999999999A3FF80000000000003FFE6666666666660162016C3FD99999999999983FE99999999999983FF33333333333343FF999999999999A4000000000000000016301694000CCCCCCCCCCCC40040000000000004007333333333334400A666666666666400D99999999999A0163016A400199999999999A4004CCCCCCCCCCCC4008000000000000400B333333333334400E6666666666660163016B4002666666666666400599999999999A4008CCCCCCCCCCCC400C000000000000400F3333333333340163016C40033333333333344006666666666666400999999999999A400CCCCCCCCCCCCC4010000000000000"},"result":{"expect":"0x03020178017A010179050C016101693FC0A764FD2927E73FC759B8355A1BB03FCFF77A137CDBF93FD53C695ABCD7153FDB3C5574372AEB0161016A3FC2282CFA533F463FC95209D0A1A2DE3FD136561454BA863FD6AD912C1375833FDCCF8510D417DA0161016B3FC3C5848EF36C9E3FCB69C25FE3C6883FD27FCDA8478FA33FD829A0565978DE3FDE66BDB1ACA0900161016C3FC5806BEB16EB7F3FCDA0FADA5A66093FD3D775461EDE953FD9AF19F3D3169C3FE0000000000000016201693FE0CCA12729AFB83FE3EB2FD4D343913FE6C0192BDC382E3FE9258F68070E5E3FEB0E9EDC4324D80162016A3FE1983D7795F4143FE4A93769F6453E3FE764D4F5D5A2BD3FE9AB7D8BD797483FEB75F4C16B302F0162016B3FE261D545E46A8A3FE561CB52A194763FE802217B20C9023FEA2991F2A979143FEBD626C0B5B6060162016C3FE32873061674B23FE614455CF090B63FE897C14969667F3FEA9FE5053A45203FEC2F7D5A8A79C9016301693FEC8247621BC0C83FED9291DDB596F83FEE54C20D06AA953FEEDC99CF2C9D4D3FEF3A59F801F5820163016A3FECCED80FEF12023FEDC99E39374D9C3FEE7B7CBC36FABD3FEEF76F8069F3FB3FEF4CBFA61DE6A30163016B3FED15854CD0D92B3FEDFC1F4CE6E8223FEE9EDD88B9D8AF3FEF0FDFCBF19A933FEF5D77DCF758080163016C3FED56A636946E583FEE2A667D67D08C3FEEBF2786AED6983FEF261E0FCD4B463FEF6CA82F0DE1EA"}}
+{"expression":"elu(a)","inputs":{"a":"0x0200BFFE666666666666"},"result":{"expect":"0x0200BFEB36BBDEFDC9FA"}}
+{"expression":"elu(a)","inputs":{"a":"0x0201017803BFFE666666666666BFFCCCCCCCCCCCCDBFFB333333333333"},"result":{"expect":"0x0201017803BFEB36BBDEFDC9FABFEAB5DF1B20BD73BFEA2774E1D59D7E"}}
+{"expression":"elu(a)","inputs":{"a":"0x0202017803017905BFFE666666666666BFFCCCCCCCCCCCCDBFFB333333333333BFF999999999999ABFF8000000000000BFF6666666666666BFF4CCCCCCCCCCCDBFF3333333333333BFF199999999999ABFF0000000000000BFECCCCCCCCCCCCCBFE999999999999ABFE6666666666666BFE3333333333334BFE0000000000000"},"result":{"expect":"0x0202017803017905BFEB36BBDEFDC9FABFEAB5DF1B20BD73BFEA2774E1D59D7EBFE98A1050412C7BBFE8DC1E236D28F9BFE81BE0AF127E3BBFE7476B67B98EC0BFE65C9DF4C2F690BFE5591EBDB77208BFE43A54E4E98864BFE2FD619FFBC8F0BFE19F18DD3F123CBFE01BF92311555FBFDCE04528D3F63ABFD92E9A0720D3EC"}}
+{"expression":"elu(a)","inputs":{"a":"0x0203017803017905017A07BFFE666666666666BFFCCCCCCCCCCCCDBFFB333333333333BFF999999999999ABFF8000000000000BFF6666666666666BFF4CCCCCCCCCCCDBFF3333333333333BFF199999999999ABFF0000000000000BFECCCCCCCCCCCCCBFE999999999999ABFE6666666666666BFE3333333333334BFE0000000000000BFD9999999999998BFD3333333333334BFC9999999999998BFB99999999999A000000000000000003FB99999999999A03FC99999999999A03FD33333333333303FD99999999999983FE00000000000003FE33333333333343FE66666666666683FE99999999999983FECCCCCCCCCCCCC3FF00000000000003FF199999999999A3FF33333333333343FF4CCCCCCCCCCCC3FF66666666666663FF80000000000003FF999999999999A3FFB3333333333343FFCCCCCCCCCCCCC3FFE66666666666640000000000000004000CCCCCCCCCCCC400199999999999A4002666666666666400333333333333440040000000000004004CCCCCCCCCCCC400599999999999A4006666666666666400733333333333440080000000000004008CCCCCCCCCCCC400999999999999A400A666666666666400B333333333334400C000000000000400CCCCCCCCCCCCC400D99999999999A400E666666666666400F333333333334401000000000000040106666666666664010CCCCCCCCCCCD4011333333333333401199999999999A401200000000000040126666666666664012CCCCCCCCCCCD4013333333333333401399999999999A401400000000000040146666666666664014CCCCCCCCCCCD4015333333333333401599999999999A401600000000000040166666666666664016CCCCCCCCCCCD4017333333333333401799999999999A401800000000000040186666666666664018CCCCCCCCCCCC4019333333333334401999999999999A401A000000000000401A666666666666401ACCCCCCCCCCCC401B333333333334401B99999999999A401C000000000000401C666666666666401CCCCCCCCCCCCC401D333333333334401D99999999999A401E000000000000401E666666666666401ECCCCCCCCCCCC401F333333333334401F99999999999A402000000000000040203333333333334020666666666666402099999999999A4020CCCCCCCCCCCD4021000000000000"},"result":{"expect":"0x0203017803017905017A07BFEB36BBDEFDC9FABFEAB5DF1B20BD73BFEA2774E1D59D7EBFE98A1050412C7BBFE8DC1E236D28F9BFE81BE0AF127E3BBFE7476B67B98EC0BFE65C9DF4C2F690BFE5591EBDB77208BFE43A54E4E98864BFE2FD619FFBC8F0BFE19F18DD3F123CBFE01BF92311555FBFDCE04528D3F63ABFD92E9A0720D3ECBFD51979F31B1E24BFD0966F2C7907F6BFC733D4A7A67A98BFB85C933156A63000000000000000003FB99999999999A03FC99999999999A03FD33333333333303FD99999999999983FE00000000000003FE33333333333343FE66666666666683FE99999999999983FECCCCCCCCCCCCC3FF00000000000003FF199999999999A3FF33333333333343FF4CCCCCCCCCCCC3FF66666666666663FF80000000000003FF999999999999A3FFB3333333333343FFCCCCCCCCCCCCC3FFE66666666666640000000000000004000CCCCCCCCCCCC400199999999999A4002666666666666400333333333333440040000000000004004CCCCCCCCCCCC400599999999999A4006666666666666400733333333333440080000000000004008CCCCCCCCCCCC400999999999999A400A666666666666400B333333333334400C000000000000400CCCCCCCCCCCCC400D99999999999A400E666666666666400F333333333334401000000000000040106666666666664010CCCCCCCCCCCD4011333333333333401199999999999A401200000000000040126666666666664012CCCCCCCCCCCD4013333333333333401399999999999A401400000000000040146666666666664014CCCCCCCCCCCD4015333333333333401599999999999A401600000000000040166666666666664016CCCCCCCCCCCD4017333333333333401799999999999A401800000000000040186666666666664018CCCCCCCCCCCC4019333333333334401999999999999A401A000000000000401A666666666666401ACCCCCCCCCCCC401B333333333334401B99999999999A401C000000000000401C666666666666401CCCCCCCCCCCCC401D333333333334401D99999999999A401E000000000000401E666666666666401ECCCCCCCCCCCC401F333333333334401F99999999999A402000000000000040203333333333334020666666666666402099999999999A4020CCCCCCCCCCCD4021000000000000"}}
+{"expression":"elu(a)","inputs":{"a":"0x01010178030161BFFE6666666666660162BFFCCCCCCCCCCCCD0163BFFB333333333333"},"result":{"expect":"0x01010178030161BFEB36BBDEFDC9FA0162BFEAB5DF1B20BD730163BFEA2774E1D59D7E"}}
+{"expression":"elu(a)","inputs":{"a":"0x01020178017906016103626172BFFCCCCCCCCCCCCD016103666F6FBFFE666666666666016203626172BFF999999999999A016203666F6FBFFB333333333333016303626172BFF6666666666666016303666F6FBFF8000000000000"},"result":{"expect":"0x01020178017906016103626172BFEAB5DF1B20BD73016103666F6FBFEB36BBDEFDC9FA016203626172BFE98A1050412C7B016203666F6FBFEA2774E1D59D7E016303626172BFE81BE0AF127E3B016303666F6FBFE8DC1E236D28F9"}}
+{"expression":"elu(a)","inputs":{"a":"0x010301780179017A180161036261720169BFF8000000000000016103626172016ABFF6666666666666016103626172016BBFF4CCCCCCCCCCCD016103626172016CBFF3333333333333016103666F6F0169BFFE666666666666016103666F6F016ABFFCCCCCCCCCCCCD016103666F6F016BBFFB333333333333016103666F6F016CBFF999999999999A0162036261720169BFE6666666666666016203626172016ABFE3333333333334016203626172016BBFE0000000000000016203626172016CBFD9999999999998016203666F6F0169BFF199999999999A016203666F6F016ABFF0000000000000016203666F6F016BBFECCCCCCCCCCCCC016203666F6F016CBFE999999999999A01630362617201693FB99999999999A0016303626172016A3FC99999999999A0016303626172016B3FD3333333333330016303626172016C3FD9999999999998016303666F6F0169BFD3333333333334016303666F6F016ABFC9999999999998016303666F6F016BBFB99999999999A0016303666F6F016C0000000000000000"},"result":{"expect":"0x010301780179017A180161036261720169BFE8DC1E236D28F9016103626172016ABFE81BE0AF127E3B016103626172016BBFE7476B67B98EC0016103626172016CBFE65C9DF4C2F690016103666F6F0169BFEB36BBDEFDC9FA016103666F6F016ABFEAB5DF1B20BD73016103666F6F016BBFEA2774E1D59D7E016103666F6F016CBFE98A1050412C7B0162036261720169BFE01BF92311555F016203626172016ABFDCE04528D3F63A016203626172016BBFD92E9A0720D3EC016203626172016CBFD51979F31B1E24016203666F6F0169BFE5591EBDB77208016203666F6F016ABFE43A54E4E98864016203666F6F016BBFE2FD619FFBC8F0016203666F6F016CBFE19F18DD3F123C01630362617201693FB99999999999A0016303626172016A3FC99999999999A0016303626172016B3FD3333333333330016303626172016C3FD9999999999998016303666F6F0169BFD0966F2C7907F6016303666F6F016ABFC733D4A7A67A98016303666F6F016BBFB85C933156A630016303666F6F016C0000000000000000"}}
+{"expression":"elu(a)","inputs":{"a":"0x0301017902017803017A070203626172BFF3333333333333BFF199999999999ABFF0000000000000BFECCCCCCCCCCCCCBFE999999999999ABFE6666666666666BFE33333333333343FC99999999999A03FD33333333333303FD99999999999983FE00000000000003FE33333333333343FE66666666666683FE99999999999983FF999999999999A3FFB3333333333343FFCCCCCCCCCCCCC3FFE66666666666640000000000000004000CCCCCCCCCCCC400199999999999A03666F6FBFFE666666666666BFFCCCCCCCCCCCCDBFFB333333333333BFF999999999999ABFF8000000000000BFF6666666666666BFF4CCCCCCCCCCCDBFE0000000000000BFD9999999999998BFD3333333333334BFC9999999999998BFB99999999999A000000000000000003FB99999999999A03FECCCCCCCCCCCCC3FF00000000000003FF199999999999A3FF33333333333343FF4CCCCCCCCCCCC3FF66666666666663FF8000000000000"},"result":{"expect":"0x0301017902017803017A070203626172BFE65C9DF4C2F690BFE5591EBDB77208BFE43A54E4E98864BFE2FD619FFBC8F0BFE19F18DD3F123CBFE01BF92311555FBFDCE04528D3F63A3FC99999999999A03FD33333333333303FD99999999999983FE00000000000003FE33333333333343FE66666666666683FE99999999999983FF999999999999A3FFB3333333333343FFCCCCCCCCCCCCC3FFE66666666666640000000000000004000CCCCCCCCCCCC400199999999999A03666F6FBFEB36BBDEFDC9FABFEAB5DF1B20BD73BFEA2774E1D59D7EBFE98A1050412C7BBFE8DC1E236D28F9BFE81BE0AF127E3BBFE7476B67B98EC0BFD92E9A0720D3ECBFD51979F31B1E24BFD0966F2C7907F6BFC733D4A7A67A98BFB85C933156A63000000000000000003FB99999999999A03FECCCCCCCCCCCCC3FF00000000000003FF199999999999A3FF33333333333343FF4CCCCCCCCCCCC3FF66666666666663FF8000000000000"}}
+{"expression":"elu(a)","inputs":{"a":"0x03020178017A010179050C01610169BFFE666666666666BFF8000000000000BFF199999999999ABFE6666666666666BFD33333333333340161016ABFFCCCCCCCCCCCCDBFF6666666666666BFF0000000000000BFE3333333333334BFC99999999999980161016BBFFB333333333333BFF4CCCCCCCCCCCDBFECCCCCCCCCCCCCBFE0000000000000BFB99999999999A00161016CBFF999999999999ABFF3333333333333BFE999999999999ABFD99999999999980000000000000000016201693FB99999999999A03FE00000000000003FECCCCCCCCCCCCC3FF4CCCCCCCCCCCC3FFB3333333333340162016A3FC99999999999A03FE33333333333343FF00000000000003FF66666666666663FFCCCCCCCCCCCCC0162016B3FD33333333333303FE66666666666683FF199999999999A3FF80000000000003FFE6666666666660162016C3FD99999999999983FE99999999999983FF33333333333343FF999999999999A4000000000000000016301694000CCCCCCCCCCCC40040000000000004007333333333334400A666666666666400D99999999999A0163016A400199999999999A4004CCCCCCCCCCCC4008000000000000400B333333333334400E6666666666660163016B4002666666666666400599999999999A4008CCCCCCCCCCCC400C000000000000400F3333333333340163016C40033333333333344006666666666666400999999999999A400CCCCCCCCCCCCC4010000000000000"},"result":{"expect":"0x03020178017A010179050C01610169BFEB36BBDEFDC9FABFE8DC1E236D28F9BFE5591EBDB77208BFE01BF92311555FBFD0966F2C7907F60161016ABFEAB5DF1B20BD73BFE81BE0AF127E3BBFE43A54E4E98864BFDCE04528D3F63ABFC733D4A7A67A980161016BBFEA2774E1D59D7EBFE7476B67B98EC0BFE2FD619FFBC8F0BFD92E9A0720D3ECBFB85C933156A6300161016CBFE98A1050412C7BBFE65C9DF4C2F690BFE19F18DD3F123CBFD51979F31B1E240000000000000000016201693FB99999999999A03FE00000000000003FECCCCCCCCCCCCC3FF4CCCCCCCCCCCC3FFB3333333333340162016A3FC99999999999A03FE33333333333343FF00000000000003FF66666666666663FFCCCCCCCCCCCCC0162016B3FD33333333333303FE66666666666683FF199999999999A3FF80000000000003FFE6666666666660162016C3FD99999999999983FE99999999999983FF33333333333343FF999999999999A4000000000000000016301694000CCCCCCCCCCCC40040000000000004007333333333334400A666666666666400D99999999999A0163016A400199999999999A4004CCCCCCCCCCCC4008000000000000400B333333333334400E6666666666660163016B4002666666666666400599999999999A4008CCCCCCCCCCCC400C000000000000400F3333333333340163016C40033333333333344006666666666666400999999999999A400CCCCCCCCCCCCC4010000000000000"}}
+{"expression":"map(a,f(a)(elu(a)))","inputs":{"a":"0x0200BFFE666666666666"},"result":{"expect":"0x0200BFEB36BBDEFDC9FA"}}
+{"expression":"map(a,f(a)(elu(a)))","inputs":{"a":"0x0201017803BFFE666666666666BFFCCCCCCCCCCCCDBFFB333333333333"},"result":{"expect":"0x0201017803BFEB36BBDEFDC9FABFEAB5DF1B20BD73BFEA2774E1D59D7E"}}
+{"expression":"map(a,f(a)(elu(a)))","inputs":{"a":"0x0202017803017905BFFE666666666666BFFCCCCCCCCCCCCDBFFB333333333333BFF999999999999ABFF8000000000000BFF6666666666666BFF4CCCCCCCCCCCDBFF3333333333333BFF199999999999ABFF0000000000000BFECCCCCCCCCCCCCBFE999999999999ABFE6666666666666BFE3333333333334BFE0000000000000"},"result":{"expect":"0x0202017803017905BFEB36BBDEFDC9FABFEAB5DF1B20BD73BFEA2774E1D59D7EBFE98A1050412C7BBFE8DC1E236D28F9BFE81BE0AF127E3BBFE7476B67B98EC0BFE65C9DF4C2F690BFE5591EBDB77208BFE43A54E4E98864BFE2FD619FFBC8F0BFE19F18DD3F123CBFE01BF92311555FBFDCE04528D3F63ABFD92E9A0720D3EC"}}
+{"expression":"map(a,f(a)(elu(a)))","inputs":{"a":"0x0203017803017905017A07BFFE666666666666BFFCCCCCCCCCCCCDBFFB333333333333BFF999999999999ABFF8000000000000BFF6666666666666BFF4CCCCCCCCCCCDBFF3333333333333BFF199999999999ABFF0000000000000BFECCCCCCCCCCCCCBFE999999999999ABFE6666666666666BFE3333333333334BFE0000000000000BFD9999999999998BFD3333333333334BFC9999999999998BFB99999999999A000000000000000003FB99999999999A03FC99999999999A03FD33333333333303FD99999999999983FE00000000000003FE33333333333343FE66666666666683FE99999999999983FECCCCCCCCCCCCC3FF00000000000003FF199999999999A3FF33333333333343FF4CCCCCCCCCCCC3FF66666666666663FF80000000000003FF999999999999A3FFB3333333333343FFCCCCCCCCCCCCC3FFE66666666666640000000000000004000CCCCCCCCCCCC400199999999999A4002666666666666400333333333333440040000000000004004CCCCCCCCCCCC400599999999999A4006666666666666400733333333333440080000000000004008CCCCCCCCCCCC400999999999999A400A666666666666400B333333333334400C000000000000400CCCCCCCCCCCCC400D99999999999A400E666666666666400F333333333334401000000000000040106666666666664010CCCCCCCCCCCD4011333333333333401199999999999A401200000000000040126666666666664012CCCCCCCCCCCD4013333333333333401399999999999A401400000000000040146666666666664014CCCCCCCCCCCD4015333333333333401599999999999A401600000000000040166666666666664016CCCCCCCCCCCD4017333333333333401799999999999A401800000000000040186666666666664018CCCCCCCCCCCC4019333333333334401999999999999A401A000000000000401A666666666666401ACCCCCCCCCCCC401B333333333334401B99999999999A401C000000000000401C666666666666401CCCCCCCCCCCCC401D333333333334401D99999999999A401E000000000000401E666666666666401ECCCCCCCCCCCC401F333333333334401F99999999999A402000000000000040203333333333334020666666666666402099999999999A4020CCCCCCCCCCCD4021000000000000"},"result":{"expect":"0x0203017803017905017A07BFEB36BBDEFDC9FABFEAB5DF1B20BD73BFEA2774E1D59D7EBFE98A1050412C7BBFE8DC1E236D28F9BFE81BE0AF127E3BBFE7476B67B98EC0BFE65C9DF4C2F690BFE5591EBDB77208BFE43A54E4E98864BFE2FD619FFBC8F0BFE19F18DD3F123CBFE01BF92311555FBFDCE04528D3F63ABFD92E9A0720D3ECBFD51979F31B1E24BFD0966F2C7907F6BFC733D4A7A67A98BFB85C933156A63000000000000000003FB99999999999A03FC99999999999A03FD33333333333303FD99999999999983FE00000000000003FE33333333333343FE66666666666683FE99999999999983FECCCCCCCCCCCCC3FF00000000000003FF199999999999A3FF33333333333343FF4CCCCCCCCCCCC3FF66666666666663FF80000000000003FF999999999999A3FFB3333333333343FFCCCCCCCCCCCCC3FFE66666666666640000000000000004000CCCCCCCCCCCC400199999999999A4002666666666666400333333333333440040000000000004004CCCCCCCCCCCC400599999999999A4006666666666666400733333333333440080000000000004008CCCCCCCCCCCC400999999999999A400A666666666666400B333333333334400C000000000000400CCCCCCCCCCCCC400D99999999999A400E666666666666400F333333333334401000000000000040106666666666664010CCCCCCCCCCCD4011333333333333401199999999999A401200000000000040126666666666664012CCCCCCCCCCCD4013333333333333401399999999999A401400000000000040146666666666664014CCCCCCCCCCCD4015333333333333401599999999999A401600000000000040166666666666664016CCCCCCCCCCCD4017333333333333401799999999999A401800000000000040186666666666664018CCCCCCCCCCCC4019333333333334401999999999999A401A000000000000401A666666666666401ACCCCCCCCCCCC401B333333333334401B99999999999A401C000000000000401C666666666666401CCCCCCCCCCCCC401D333333333334401D99999999999A401E000000000000401E666666666666401ECCCCCCCCCCCC401F333333333334401F99999999999A402000000000000040203333333333334020666666666666402099999999999A4020CCCCCCCCCCCD4021000000000000"}}
+{"expression":"map(a,f(a)(elu(a)))","inputs":{"a":"0x01010178030161BFFE6666666666660162BFFCCCCCCCCCCCCD0163BFFB333333333333"},"result":{"expect":"0x01010178030161BFEB36BBDEFDC9FA0162BFEAB5DF1B20BD730163BFEA2774E1D59D7E"}}
+{"expression":"map(a,f(a)(elu(a)))","inputs":{"a":"0x01020178017906016103626172BFFCCCCCCCCCCCCD016103666F6FBFFE666666666666016203626172BFF999999999999A016203666F6FBFFB333333333333016303626172BFF6666666666666016303666F6FBFF8000000000000"},"result":{"expect":"0x01020178017906016103626172BFEAB5DF1B20BD73016103666F6FBFEB36BBDEFDC9FA016203626172BFE98A1050412C7B016203666F6FBFEA2774E1D59D7E016303626172BFE81BE0AF127E3B016303666F6FBFE8DC1E236D28F9"}}
+{"expression":"map(a,f(a)(elu(a)))","inputs":{"a":"0x010301780179017A180161036261720169BFF8000000000000016103626172016ABFF6666666666666016103626172016BBFF4CCCCCCCCCCCD016103626172016CBFF3333333333333016103666F6F0169BFFE666666666666016103666F6F016ABFFCCCCCCCCCCCCD016103666F6F016BBFFB333333333333016103666F6F016CBFF999999999999A0162036261720169BFE6666666666666016203626172016ABFE3333333333334016203626172016BBFE0000000000000016203626172016CBFD9999999999998016203666F6F0169BFF199999999999A016203666F6F016ABFF0000000000000016203666F6F016BBFECCCCCCCCCCCCC016203666F6F016CBFE999999999999A01630362617201693FB99999999999A0016303626172016A3FC99999999999A0016303626172016B3FD3333333333330016303626172016C3FD9999999999998016303666F6F0169BFD3333333333334016303666F6F016ABFC9999999999998016303666F6F016BBFB99999999999A0016303666F6F016C0000000000000000"},"result":{"expect":"0x010301780179017A180161036261720169BFE8DC1E236D28F9016103626172016ABFE81BE0AF127E3B016103626172016BBFE7476B67B98EC0016103626172016CBFE65C9DF4C2F690016103666F6F0169BFEB36BBDEFDC9FA016103666F6F016ABFEAB5DF1B20BD73016103666F6F016BBFEA2774E1D59D7E016103666F6F016CBFE98A1050412C7B0162036261720169BFE01BF92311555F016203626172016ABFDCE04528D3F63A016203626172016BBFD92E9A0720D3EC016203626172016CBFD51979F31B1E24016203666F6F0169BFE5591EBDB77208016203666F6F016ABFE43A54E4E98864016203666F6F016BBFE2FD619FFBC8F0016203666F6F016CBFE19F18DD3F123C01630362617201693FB99999999999A0016303626172016A3FC99999999999A0016303626172016B3FD3333333333330016303626172016C3FD9999999999998016303666F6F0169BFD0966F2C7907F6016303666F6F016ABFC733D4A7A67A98016303666F6F016BBFB85C933156A630016303666F6F016C0000000000000000"}}
+{"expression":"map(a,f(a)(elu(a)))","inputs":{"a":"0x0301017902017803017A070203626172BFF3333333333333BFF199999999999ABFF0000000000000BFECCCCCCCCCCCCCBFE999999999999ABFE6666666666666BFE33333333333343FC99999999999A03FD33333333333303FD99999999999983FE00000000000003FE33333333333343FE66666666666683FE99999999999983FF999999999999A3FFB3333333333343FFCCCCCCCCCCCCC3FFE66666666666640000000000000004000CCCCCCCCCCCC400199999999999A03666F6FBFFE666666666666BFFCCCCCCCCCCCCDBFFB333333333333BFF999999999999ABFF8000000000000BFF6666666666666BFF4CCCCCCCCCCCDBFE0000000000000BFD9999999999998BFD3333333333334BFC9999999999998BFB99999999999A000000000000000003FB99999999999A03FECCCCCCCCCCCCC3FF00000000000003FF199999999999A3FF33333333333343FF4CCCCCCCCCCCC3FF66666666666663FF8000000000000"},"result":{"expect":"0x0301017902017803017A070203626172BFE65C9DF4C2F690BFE5591EBDB77208BFE43A54E4E98864BFE2FD619FFBC8F0BFE19F18DD3F123CBFE01BF92311555FBFDCE04528D3F63A3FC99999999999A03FD33333333333303FD99999999999983FE00000000000003FE33333333333343FE66666666666683FE99999999999983FF999999999999A3FFB3333333333343FFCCCCCCCCCCCCC3FFE66666666666640000000000000004000CCCCCCCCCCCC400199999999999A03666F6FBFEB36BBDEFDC9FABFEAB5DF1B20BD73BFEA2774E1D59D7EBFE98A1050412C7BBFE8DC1E236D28F9BFE81BE0AF127E3BBFE7476B67B98EC0BFD92E9A0720D3ECBFD51979F31B1E24BFD0966F2C7907F6BFC733D4A7A67A98BFB85C933156A63000000000000000003FB99999999999A03FECCCCCCCCCCCCC3FF00000000000003FF199999999999A3FF33333333333343FF4CCCCCCCCCCCC3FF66666666666663FF8000000000000"}}
+{"expression":"map(a,f(a)(elu(a)))","inputs":{"a":"0x03020178017A010179050C01610169BFFE666666666666BFF8000000000000BFF199999999999ABFE6666666666666BFD33333333333340161016ABFFCCCCCCCCCCCCDBFF6666666666666BFF0000000000000BFE3333333333334BFC99999999999980161016BBFFB333333333333BFF4CCCCCCCCCCCDBFECCCCCCCCCCCCCBFE0000000000000BFB99999999999A00161016CBFF999999999999ABFF3333333333333BFE999999999999ABFD99999999999980000000000000000016201693FB99999999999A03FE00000000000003FECCCCCCCCCCCCC3FF4CCCCCCCCCCCC3FFB3333333333340162016A3FC99999999999A03FE33333333333343FF00000000000003FF66666666666663FFCCCCCCCCCCCCC0162016B3FD33333333333303FE66666666666683FF199999999999A3FF80000000000003FFE6666666666660162016C3FD99999999999983FE99999999999983FF33333333333343FF999999999999A4000000000000000016301694000CCCCCCCCCCCC40040000000000004007333333333334400A666666666666400D99999999999A0163016A400199999999999A4004CCCCCCCCCCCC4008000000000000400B333333333334400E6666666666660163016B4002666666666666400599999999999A4008CCCCCCCCCCCC400C000000000000400F3333333333340163016C40033333333333344006666666666666400999999999999A400CCCCCCCCCCCCC4010000000000000"},"result":{"expect":"0x03020178017A010179050C01610169BFEB36BBDEFDC9FABFE8DC1E236D28F9BFE5591EBDB77208BFE01BF92311555FBFD0966F2C7907F60161016ABFEAB5DF1B20BD73BFE81BE0AF127E3BBFE43A54E4E98864BFDCE04528D3F63ABFC733D4A7A67A980161016BBFEA2774E1D59D7EBFE7476B67B98EC0BFE2FD619FFBC8F0BFD92E9A0720D3ECBFB85C933156A6300161016CBFE98A1050412C7BBFE65C9DF4C2F690BFE19F18DD3F123CBFD51979F31B1E240000000000000000016201693FB99999999999A03FE00000000000003FECCCCCCCCCCCCC3FF4CCCCCCCCCCCC3FFB3333333333340162016A3FC99999999999A03FE33333333333343FF00000000000003FF66666666666663FFCCCCCCCCCCCCC0162016B3FD33333333333303FE66666666666683FF199999999999A3FF80000000000003FFE6666666666660162016C3FD99999999999983FE99999999999983FF33333333333343FF999999999999A4000000000000000016301694000CCCCCCCCCCCC40040000000000004007333333333334400A666666666666400D99999999999A0163016A400199999999999A4004CCCCCCCCCCCC4008000000000000400B333333333334400E6666666666660163016B4002666666666666400599999999999A4008CCCCCCCCCCCC400C000000000000400F3333333333340163016C40033333333333344006666666666666400999999999999A400CCCCCCCCCCCCC4010000000000000"}}
{"expression":"a in [1,5,7,13,42]","inputs":{"a":"0x02003FF0000000000000"},"result":{"expect":"0x02003FF0000000000000"}}
{"expression":"a in [1,5,7,13,42]","inputs":{"a":"0x02010178033FF000000000000040000000000000004008000000000000"},"result":{"expect":"0x02010178033FF000000000000000000000000000000000000000000000"}}
{"expression":"a in [1,5,7,13,42]","inputs":{"a":"0x02020178030179053FF000000000000040000000000000004008000000000000401000000000000040140000000000004018000000000000401C00000000000040200000000000004022000000000000402400000000000040260000000000004028000000000000402A000000000000402C000000000000402E000000000000"},"result":{"expect":"0x02020178030179053FF00000000000000000000000000000000000000000000000000000000000003FF000000000000000000000000000003FF0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000003FF000000000000000000000000000000000000000000000"}}
@@ -1224,4 +1242,4 @@
{"expression":"tensor(x[10])(x+1)","inputs":{},"result":{"expect":"0x020101780A3FF000000000000040000000000000004008000000000000401000000000000040140000000000004018000000000000401C000000000000402000000000000040220000000000004024000000000000"}}
{"expression":"tensor(x[5],y[4])(x*4+(y+1))","inputs":{},"result":{"expect":"0x02020178050179043FF000000000000040000000000000004008000000000000401000000000000040140000000000004018000000000000401C00000000000040200000000000004022000000000000402400000000000040260000000000004028000000000000402A000000000000402C000000000000402E00000000000040300000000000004031000000000000403200000000000040330000000000004034000000000000"}}
{"expression":"tensor(x[5],y[4])(x==y)","inputs":{},"result":{"expect":"0x02020178050179043FF000000000000000000000000000000000000000000000000000000000000000000000000000003FF000000000000000000000000000000000000000000000000000000000000000000000000000003FF000000000000000000000000000000000000000000000000000000000000000000000000000003FF00000000000000000000000000000000000000000000000000000000000000000000000000000"}}
-{"num_tests":1226}
+{"num_tests":1244}
diff --git a/eval/src/tests/eval/interpreted_function/interpreted_function_test.cpp b/eval/src/tests/eval/interpreted_function/interpreted_function_test.cpp
index a443ccb3d01..76f776df552 100644
--- a/eval/src/tests/eval/interpreted_function/interpreted_function_test.cpp
+++ b/eval/src/tests/eval/interpreted_function/interpreted_function_test.cpp
@@ -2,6 +2,7 @@
#include <vespa/vespalib/testkit/test_kit.h>
#include <vespa/eval/eval/function.h>
#include <vespa/eval/eval/tensor_spec.h>
+#include <vespa/eval/eval/operation.h>
#include <vespa/eval/eval/interpreted_function.h>
#include <vespa/eval/eval/test/eval_spec.h>
#include <vespa/eval/eval/basic_nodes.h>
@@ -181,9 +182,9 @@ TEST("require that dot product works with tensor function") {
InterpretedFunction interpreted(engine, function, types);
EXPECT_EQUAL(1u, interpreted.program_size());
InterpretedFunction::Context ctx(interpreted);
- TensorValue va(engine.create(a));
- TensorValue vb(engine.create(b));
- InterpretedFunction::SimpleObjectParams params({va,vb});
+ Value::UP va = engine.from_spec(a);
+ Value::UP vb = engine.from_spec(b);
+ InterpretedFunction::SimpleObjectParams params({*va,*vb});
const Value &result = interpreted.eval(ctx, params);
EXPECT_TRUE(result.is_double());
EXPECT_EQUAL(expect, result.as_double());
@@ -211,12 +212,12 @@ TEST("require that matrix multiplication works with tensor function") {
InterpretedFunction interpreted(engine, function, types);
EXPECT_EQUAL(1u, interpreted.program_size());
InterpretedFunction::Context ctx(interpreted);
- TensorValue va(engine.create(a));
- TensorValue vb(engine.create(b));
- InterpretedFunction::SimpleObjectParams params({va,vb});
+ Value::UP va = engine.from_spec(a);
+ Value::UP vb = engine.from_spec(b);
+ InterpretedFunction::SimpleObjectParams params({*va,*vb});
const Value &result = interpreted.eval(ctx, params);
ASSERT_TRUE(result.is_tensor());
- EXPECT_EQUAL(expect, engine.to_spec(*result.as_tensor()));
+ EXPECT_EQUAL(expect, engine.to_spec(result));
}
//-----------------------------------------------------------------------------
diff --git a/eval/src/tests/eval/node_types/node_types_test.cpp b/eval/src/tests/eval/node_types/node_types_test.cpp
index 0b6ea9e4d35..97b34f5be3c 100644
--- a/eval/src/tests/eval/node_types/node_types_test.cpp
+++ b/eval/src/tests/eval/node_types/node_types_test.cpp
@@ -221,6 +221,7 @@ TEST("require that various operations resolve appropriate type") {
TEST_DO(verify_op1("isNan(%s)")); // IsNan
TEST_DO(verify_op1("relu(%s)")); // Relu
TEST_DO(verify_op1("sigmoid(%s)")); // Sigmoid
+ TEST_DO(verify_op1("elu(%s)")); // Elu
}
TEST("require that map resolves correct type") {
diff --git a/eval/src/tests/eval/simple_tensor/simple_tensor_test.cpp b/eval/src/tests/eval/simple_tensor/simple_tensor_test.cpp
index 150b86f27ce..c3b42124155 100644
--- a/eval/src/tests/eval/simple_tensor/simple_tensor_test.cpp
+++ b/eval/src/tests/eval/simple_tensor/simple_tensor_test.cpp
@@ -13,7 +13,7 @@ using Cells = SimpleTensor::Cells;
using Address = SimpleTensor::Address;
using Stash = vespalib::Stash;
-TensorSpec to_spec(const Tensor &a) { return a.engine().to_spec(a); }
+TensorSpec to_spec(const Value &a) { return SimpleTensorEngine::ref().to_spec(a); }
const Tensor &unwrap(const Value &value) {
ASSERT_TRUE(value.is_tensor());
@@ -35,7 +35,7 @@ TEST("require that simple tensors can be built using tensor spec") {
.add({{"w", "xxx"}, {"x", 0}, {"y", "yyy"}, {"z", 1}}, 2.0)
.add({{"w", "yyy"}, {"x", 1}, {"y", "xxx"}, {"z", 0}}, 3.0)
.add({{"w", "yyy"}, {"x", 1}, {"y", "yyy"}, {"z", 1}}, 4.0);
- auto tensor = SimpleTensorEngine::ref().create(spec);
+ Value::UP tensor = SimpleTensorEngine::ref().from_spec(spec);
TensorSpec full_spec("tensor(w{},x[2],y{},z[2])");
full_spec
.add({{"w", "xxx"}, {"x", 0}, {"y", "xxx"}, {"z", 0}}, 1.0)
@@ -54,7 +54,7 @@ TEST("require that simple tensors can be built using tensor spec") {
.add({{"w", "yyy"}, {"x", 1}, {"y", "xxx"}, {"z", 1}}, 0.0)
.add({{"w", "yyy"}, {"x", 1}, {"y", "yyy"}, {"z", 0}}, 0.0)
.add({{"w", "yyy"}, {"x", 1}, {"y", "yyy"}, {"z", 1}}, 4.0);
- auto full_tensor = SimpleTensorEngine::ref().create(full_spec);
+ Value::UP full_tensor = SimpleTensorEngine::ref().from_spec(full_spec);
EXPECT_EQUAL(full_spec, to_spec(*tensor));
EXPECT_EQUAL(full_spec, to_spec(*full_tensor));
};
@@ -73,7 +73,7 @@ TEST("require that simple tensors can have their values negated") {
auto result = tensor->map([](double a){ return -a; });
EXPECT_EQUAL(to_spec(*expect), to_spec(*result));
Stash stash;
- const Value &result2 = SimpleTensorEngine::ref().map(TensorValue(*tensor), operation::Neg::f, stash);
+ const Value &result2 = SimpleTensorEngine::ref().map(*tensor, operation::Neg::f, stash);
EXPECT_EQUAL(to_spec(*expect), to_spec(unwrap(result2)));
}
@@ -98,7 +98,7 @@ TEST("require that simple tensors can be multiplied with each other") {
auto result = SimpleTensor::join(*lhs, *rhs, [](double a, double b){ return (a * b); });
EXPECT_EQUAL(to_spec(*expect), to_spec(*result));
Stash stash;
- const Value &result2 = SimpleTensorEngine::ref().join(TensorValue(*lhs), TensorValue(*rhs), operation::Mul::f, stash);
+ const Value &result2 = SimpleTensorEngine::ref().join(*lhs, *rhs, operation::Mul::f, stash);
EXPECT_EQUAL(to_spec(*expect), to_spec(unwrap(result2)));
}
@@ -129,10 +129,10 @@ TEST("require that simple tensors support dimension reduction") {
EXPECT_EQUAL(to_spec(*expect_sum_y), to_spec(*result_sum_y));
EXPECT_EQUAL(to_spec(*expect_sum_x), to_spec(*result_sum_x));
EXPECT_EQUAL(to_spec(*expect_sum_all), to_spec(*result_sum_all));
- const Value &result_sum_y_2 = SimpleTensorEngine::ref().reduce(TensorValue(*tensor), Aggr::SUM, {"y"}, stash);
- const Value &result_sum_x_2 = SimpleTensorEngine::ref().reduce(TensorValue(*tensor), Aggr::SUM, {"x"}, stash);
- const Value &result_sum_all_2 = SimpleTensorEngine::ref().reduce(TensorValue(*tensor), Aggr::SUM, {"x", "y"}, stash);
- const Value &result_sum_all_3 = SimpleTensorEngine::ref().reduce(TensorValue(*tensor), Aggr::SUM, {}, stash);
+ const Value &result_sum_y_2 = SimpleTensorEngine::ref().reduce(*tensor, Aggr::SUM, {"y"}, stash);
+ const Value &result_sum_x_2 = SimpleTensorEngine::ref().reduce(*tensor, Aggr::SUM, {"x"}, stash);
+ const Value &result_sum_all_2 = SimpleTensorEngine::ref().reduce(*tensor, Aggr::SUM, {"x", "y"}, stash);
+ const Value &result_sum_all_3 = SimpleTensorEngine::ref().reduce(*tensor, Aggr::SUM, {}, stash);
EXPECT_EQUAL(to_spec(*expect_sum_y), to_spec(unwrap(result_sum_y_2)));
EXPECT_EQUAL(to_spec(*expect_sum_x), to_spec(unwrap(result_sum_x_2)));
EXPECT_TRUE(result_sum_all_2.is_double());
diff --git a/eval/src/tests/eval/tensor_function/tensor_function_test.cpp b/eval/src/tests/eval/tensor_function/tensor_function_test.cpp
index 8bd86621bf6..681a4dabc19 100644
--- a/eval/src/tests/eval/tensor_function/tensor_function_test.cpp
+++ b/eval/src/tests/eval/tensor_function/tensor_function_test.cpp
@@ -12,38 +12,37 @@ using namespace vespalib;
using namespace vespalib::eval;
using namespace vespalib::eval::tensor_function;
-struct EvalCtx : TensorFunction::Input {
+struct EvalCtx {
const TensorEngine &engine;
Stash stash;
ErrorValue error;
- std::map<size_t, Value::UP> tensors;
+ std::vector<Value::UP> tensors;
+ std::vector<Value::CREF> params;
EvalCtx(const TensorEngine &engine_in)
: engine(engine_in), stash(), error(), tensors() {}
- ~EvalCtx() { }
- void add_tensor(std::unique_ptr<Tensor> tensor, size_t id) {
- tensors.emplace(id, std::make_unique<TensorValue>(std::move(tensor)));
+ ~EvalCtx() {}
+ size_t add_tensor(Value::UP tensor) {
+ size_t id = params.size();
+ params.emplace_back(*tensor);
+ tensors.push_back(std::move(tensor));
+ return id;
}
- const Value &get_tensor(size_t id) const override {
- if (tensors.count(id) == 0) {
- return error;
- }
- return *tensors.find(id)->second;
+ const Value &eval(const TensorFunction &fun) {
+ return fun.eval(params, stash);
}
- const Value &eval(const TensorFunction &fun) { return fun.eval(*this, stash); }
- const ValueType type(const Tensor &tensor) const { return engine.type_of(tensor); }
- TensorFunction::UP compile(tensor_function::Node_UP expr) const {
- return engine.compile(std::move(expr));
+ const TensorFunction &compile(const tensor_function::Node &expr) {
+ return engine.compile(expr, stash);
}
- std::unique_ptr<Tensor> make_tensor_inject() {
- return engine.create(
+ Value::UP make_tensor_inject() {
+ return engine.from_spec(
TensorSpec("tensor(x[2],y[2])")
.add({{"x", 0}, {"y", 0}}, 1.0)
.add({{"x", 0}, {"y", 1}}, 2.0)
.add({{"x", 1}, {"y", 0}}, 3.0)
.add({{"x", 1}, {"y", 1}}, 4.0));
}
- std::unique_ptr<Tensor> make_tensor_reduce_input() {
- return engine.create(
+ Value::UP make_tensor_reduce_input() {
+ return engine.from_spec(
TensorSpec("tensor(x[3],y[2])")
.add({{"x",0},{"y",0}}, 1)
.add({{"x",1},{"y",0}}, 2)
@@ -52,43 +51,43 @@ struct EvalCtx : TensorFunction::Input {
.add({{"x",1},{"y",1}}, 5)
.add({{"x",2},{"y",1}}, 6));
}
- std::unique_ptr<Tensor> make_tensor_reduce_y_output() {
- return engine.create(
+ Value::UP make_tensor_reduce_y_output() {
+ return engine.from_spec(
TensorSpec("tensor(x[3])")
.add({{"x",0}}, 5)
.add({{"x",1}}, 7)
.add({{"x",2}}, 9));
}
- std::unique_ptr<Tensor> make_tensor_map_input() {
- return engine.create(
+ Value::UP make_tensor_map_input() {
+ return engine.from_spec(
TensorSpec("tensor(x{},y{})")
.add({{"x","1"},{"y","1"}}, 1)
.add({{"x","2"},{"y","1"}}, -3)
.add({{"x","1"},{"y","2"}}, 5));
}
- std::unique_ptr<Tensor> make_tensor_map_output() {
- return engine.create(
+ Value::UP make_tensor_map_output() {
+ return engine.from_spec(
TensorSpec("tensor(x{},y{})")
.add({{"x","1"},{"y","1"}}, -1)
.add({{"x","2"},{"y","1"}}, 3)
.add({{"x","1"},{"y","2"}}, -5));
}
- std::unique_ptr<Tensor> make_tensor_apply_lhs() {
- return engine.create(
+ Value::UP make_tensor_apply_lhs() {
+ return engine.from_spec(
TensorSpec("tensor(x{},y{})")
.add({{"x","1"},{"y","1"}}, 1)
.add({{"x","2"},{"y","1"}}, 3)
.add({{"x","1"},{"y","2"}}, 5));
}
- std::unique_ptr<Tensor> make_tensor_apply_rhs() {
- return engine.create(
+ Value::UP make_tensor_apply_rhs() {
+ return engine.from_spec(
TensorSpec("tensor(y{},z{})")
.add({{"y","1"},{"z","1"}}, 7)
.add({{"y","2"},{"z","1"}}, 11)
.add({{"y","1"},{"z","2"}}, 13));
}
- std::unique_ptr<Tensor> make_tensor_apply_output() {
- return engine.create(
+ Value::UP make_tensor_apply_output() {
+ return engine.from_spec(
TensorSpec("tensor(x{},y{},z{})")
.add({{"x","1"},{"y","1"},{"z","1"}}, 7)
.add({{"x","1"},{"y","1"},{"z","2"}}, 13)
@@ -98,65 +97,69 @@ struct EvalCtx : TensorFunction::Input {
}
};
-void verify_equal(const Tensor &expect, const Value &value) {
+void verify_equal(const Value &expect, const Value &value) {
const Tensor *tensor = value.as_tensor();
ASSERT_TRUE(tensor != nullptr);
- ASSERT_EQUAL(&expect.engine(), &tensor->engine());
- auto expect_spec = expect.engine().to_spec(expect);
- auto value_spec = tensor->engine().to_spec(*tensor);
+ const Tensor *expect_tensor = expect.as_tensor();
+ ASSERT_TRUE(expect_tensor != nullptr);
+ ASSERT_EQUAL(&expect_tensor->engine(), &tensor->engine());
+ auto expect_spec = expect_tensor->engine().to_spec(expect);
+ auto value_spec = tensor->engine().to_spec(value);
EXPECT_EQUAL(expect_spec, value_spec);
}
TEST("require that tensor injection works") {
EvalCtx ctx(SimpleTensorEngine::ref());
- ctx.add_tensor(ctx.make_tensor_inject(), 1);
- auto expect = ctx.make_tensor_inject();
- auto fun = inject(ValueType::from_spec("tensor(x[2],y[2])"), 1);
- EXPECT_EQUAL(ctx.type(*expect), fun->result_type);
- auto prog = ctx.compile(std::move(fun));
- TEST_DO(verify_equal(*expect, ctx.eval(*prog)));
+ size_t a_id = ctx.add_tensor(ctx.make_tensor_inject());
+ Value::UP expect = ctx.make_tensor_inject();
+ const auto &fun = inject(ValueType::from_spec("tensor(x[2],y[2])"), a_id, ctx.stash);
+ EXPECT_EQUAL(expect->type(), fun.result_type);
+ const auto &prog = ctx.compile(fun);
+ TEST_DO(verify_equal(*expect, ctx.eval(prog)));
}
TEST("require that partial tensor reduction works") {
EvalCtx ctx(SimpleTensorEngine::ref());
- ctx.add_tensor(ctx.make_tensor_reduce_input(), 1);
- auto expect = ctx.make_tensor_reduce_y_output();
- auto fun = reduce(inject(ValueType::from_spec("tensor(x[3],y[2])"), 1), Aggr::SUM, {"y"});
- EXPECT_EQUAL(ctx.type(*expect), fun->result_type);
- auto prog = ctx.compile(std::move(fun));
- TEST_DO(verify_equal(*expect, ctx.eval(*prog)));
+ size_t a_id = ctx.add_tensor(ctx.make_tensor_reduce_input());
+ Value::UP expect = ctx.make_tensor_reduce_y_output();
+ const auto &fun = reduce(inject(ValueType::from_spec("tensor(x[3],y[2])"), a_id, ctx.stash), Aggr::SUM, {"y"}, ctx.stash);
+ EXPECT_EQUAL(expect->type(), fun.result_type);
+ const auto &prog = ctx.compile(fun);
+ TEST_DO(verify_equal(*expect, ctx.eval(prog)));
}
TEST("require that full tensor reduction works") {
EvalCtx ctx(SimpleTensorEngine::ref());
- ctx.add_tensor(ctx.make_tensor_reduce_input(), 1);
- auto fun = reduce(inject(ValueType::from_spec("tensor(x[3],y[2])"), 1), Aggr::SUM, {});
- EXPECT_EQUAL(ValueType::from_spec("double"), fun->result_type);
- auto prog = ctx.compile(std::move(fun));
- EXPECT_EQUAL(21.0, ctx.eval(*prog).as_double());
+ size_t a_id = ctx.add_tensor(ctx.make_tensor_reduce_input());
+ const auto &fun = reduce(inject(ValueType::from_spec("tensor(x[3],y[2])"), a_id, ctx.stash), Aggr::SUM, {}, ctx.stash);
+ EXPECT_EQUAL(ValueType::from_spec("double"), fun.result_type);
+ const auto &prog = ctx.compile(fun);
+ const Value &result = ctx.eval(prog);
+ EXPECT_TRUE(result.is_double());
+ EXPECT_EQUAL(21.0, result.as_double());
}
TEST("require that tensor map works") {
EvalCtx ctx(SimpleTensorEngine::ref());
- ctx.add_tensor(ctx.make_tensor_map_input(), 1);
- auto expect = ctx.make_tensor_map_output();
- auto fun = map(inject(ValueType::from_spec("tensor(x{},y{})"), 1), operation::Neg::f);
- EXPECT_EQUAL(ctx.type(*expect), fun->result_type);
- auto prog = ctx.compile(std::move(fun));
- TEST_DO(verify_equal(*expect, ctx.eval(*prog)));
+ size_t a_id = ctx.add_tensor(ctx.make_tensor_map_input());
+ Value::UP expect = ctx.make_tensor_map_output();
+ const auto &fun = map(inject(ValueType::from_spec("tensor(x{},y{})"), a_id, ctx.stash), operation::Neg::f, ctx.stash);
+ EXPECT_EQUAL(expect->type(), fun.result_type);
+ const auto &prog = ctx.compile(fun);
+ TEST_DO(verify_equal(*expect, ctx.eval(prog)));
}
TEST("require that tensor join works") {
EvalCtx ctx(SimpleTensorEngine::ref());
- ctx.add_tensor(ctx.make_tensor_apply_lhs(), 1);
- ctx.add_tensor(ctx.make_tensor_apply_rhs(), 2);
- auto expect = ctx.make_tensor_apply_output();
- auto fun = join(inject(ValueType::from_spec("tensor(x{},y{})"), 1),
- inject(ValueType::from_spec("tensor(y{},z{})"), 2),
- operation::Mul::f);
- EXPECT_EQUAL(ctx.type(*expect), fun->result_type);
- auto prog = ctx.compile(std::move(fun));
- TEST_DO(verify_equal(*expect, ctx.eval(*prog)));
+ size_t a_id = ctx.add_tensor(ctx.make_tensor_apply_lhs());
+ size_t b_id = ctx.add_tensor(ctx.make_tensor_apply_rhs());
+ Value::UP expect = ctx.make_tensor_apply_output();
+ const auto &fun = join(inject(ValueType::from_spec("tensor(x{},y{})"), a_id, ctx.stash),
+ inject(ValueType::from_spec("tensor(y{},z{})"), b_id, ctx.stash),
+ operation::Mul::f, ctx.stash);
+ EXPECT_EQUAL(expect->type(), fun.result_type);
+ const auto &prog = ctx.compile(fun);
+ TEST_DO(verify_equal(*expect, ctx.eval(prog)));
}
TEST_MAIN() { TEST_RUN_ALL(); }
diff --git a/eval/src/tests/eval/value_cache/tensor_loader_test.cpp b/eval/src/tests/eval/value_cache/tensor_loader_test.cpp
index ee8e502815f..5dd8caa6e27 100644
--- a/eval/src/tests/eval/value_cache/tensor_loader_test.cpp
+++ b/eval/src/tests/eval/value_cache/tensor_loader_test.cpp
@@ -3,59 +3,56 @@
#include <vespa/eval/eval/value_cache/constant_tensor_loader.h>
#include <vespa/eval/eval/simple_tensor_engine.h>
#include <vespa/eval/eval/tensor_spec.h>
+#include <vespa/eval/eval/tensor.h>
using namespace vespalib::eval;
-std::unique_ptr<Tensor> dense_tensor_nocells() {
- return SimpleTensorEngine::ref()
- .create(TensorSpec("tensor(x[2],y[2])"));
+TensorSpec sparse_tensor_nocells() {
+ return TensorSpec("tensor(x{},y{})");
}
-std::unique_ptr<Tensor> make_nodim_tensor() {
- return SimpleTensorEngine::ref()
- .create(TensorSpec("double"));
+TensorSpec make_dense_tensor() {
+ return TensorSpec("tensor(x[2],y[2])")
+ .add({{"x", 0}, {"y", 0}}, 1.0)
+ .add({{"x", 0}, {"y", 1}}, 2.0)
+ .add({{"x", 1}, {"y", 0}}, 3.0)
+ .add({{"x", 1}, {"y", 1}}, 4.0);
}
-std::unique_ptr<Tensor> make_dense_tensor() {
- return SimpleTensorEngine::ref()
- .create(TensorSpec("tensor(x[2],y[2])")
- .add({{"x", 0}, {"y", 0}}, 1.0)
- .add({{"x", 0}, {"y", 1}}, 2.0)
- .add({{"x", 1}, {"y", 0}}, 3.0)
- .add({{"x", 1}, {"y", 1}}, 4.0));
+TensorSpec make_sparse_tensor() {
+ return TensorSpec("tensor(x{},y{})")
+ .add({{"x", "foo"}, {"y", "bar"}}, 1.0)
+ .add({{"x", "bar"}, {"y", "foo"}}, 2.0);
}
-std::unique_ptr<Tensor> make_sparse_tensor() {
- return SimpleTensorEngine::ref()
- .create(TensorSpec("tensor(x{},y{})")
- .add({{"x", "foo"}, {"y", "bar"}}, 1.0)
- .add({{"x", "bar"}, {"y", "foo"}}, 2.0));
+TensorSpec make_mixed_tensor() {
+ return TensorSpec("tensor(x{},y[2])")
+ .add({{"x", "foo"}, {"y", 0}}, 1.0)
+ .add({{"x", "foo"}, {"y", 1}}, 2.0);
}
-std::unique_ptr<Tensor> make_mixed_tensor() {
- return SimpleTensorEngine::ref()
- .create(TensorSpec("tensor(x{},y[2])")
- .add({{"x", "foo"}, {"y", 0}}, 1.0)
- .add({{"x", "foo"}, {"y", 1}}, 2.0));
+void verify_tensor(const TensorSpec &expect, ConstantValue::UP actual) {
+ const auto &engine = SimpleTensorEngine::ref();
+ ASSERT_EQUAL(expect.type(), actual->type().to_spec());
+ ASSERT_TRUE(&engine == &actual->value().as_tensor()->engine());
+ EXPECT_EQUAL(expect, engine.to_spec(actual->value()));
}
-void verify_tensor(std::unique_ptr<Tensor> expect, ConstantValue::UP actual) {
- const auto &engine = expect->engine();
- ASSERT_EQUAL(engine.type_of(*expect), actual->type());
- ASSERT_TRUE(&engine == &actual->value().as_tensor()->engine());
- EXPECT_EQUAL(engine.to_spec(*expect), engine.to_spec(*actual->value().as_tensor()));
+void verify_invalid(ConstantValue::UP actual) {
+ EXPECT_EQUAL(actual->type(), ValueType::double_type());
+ EXPECT_EQUAL(actual->value().as_double(), 0.0);
}
TEST_F("require that invalid types loads an empty double", ConstantTensorLoader(SimpleTensorEngine::ref())) {
- TEST_DO(verify_tensor(make_nodim_tensor(), f1.create(TEST_PATH("dense.json"), "invalid type spec")));
+ TEST_DO(verify_invalid(f1.create(TEST_PATH("dense.json"), "invalid type spec")));
}
TEST_F("require that invalid file name loads an empty tensor", ConstantTensorLoader(SimpleTensorEngine::ref())) {
- TEST_DO(verify_tensor(dense_tensor_nocells(), f1.create(TEST_PATH("missing_file.json"), "tensor(x[2],y[2])")));
+ TEST_DO(verify_tensor(sparse_tensor_nocells(), f1.create(TEST_PATH("missing_file.json"), "tensor(x{},y{})")));
}
TEST_F("require that invalid json loads an empty tensor", ConstantTensorLoader(SimpleTensorEngine::ref())) {
- TEST_DO(verify_tensor(dense_tensor_nocells(), f1.create(TEST_PATH("invalid.json"), "tensor(x[2],y[2])")));
+ TEST_DO(verify_tensor(sparse_tensor_nocells(), f1.create(TEST_PATH("invalid.json"), "tensor(x{},y{})")));
}
TEST_F("require that dense tensors can be loaded", ConstantTensorLoader(SimpleTensorEngine::ref())) {
@@ -75,7 +72,7 @@ TEST_F("require that lz4 compressed sparse tensor can be loaded", ConstantTensor
}
TEST_F("require that bad lz4 file fails to load creating empty result", ConstantTensorLoader(SimpleTensorEngine::ref())) {
- TEST_DO(verify_tensor(dense_tensor_nocells(), f1.create(TEST_PATH("bad_lz4.json.lz4"), "tensor(x[2],y[2])")));
+ TEST_DO(verify_tensor(sparse_tensor_nocells(), f1.create(TEST_PATH("bad_lz4.json.lz4"), "tensor(x{},y{})")));
}
TEST_MAIN() { TEST_RUN_ALL(); }
diff --git a/eval/src/tests/tensor/dense_dot_product_function/dense_dot_product_function_test.cpp b/eval/src/tests/tensor/dense_dot_product_function/dense_dot_product_function_test.cpp
index 17df3d21d0c..ca77997bac7 100644
--- a/eval/src/tests/tensor/dense_dot_product_function/dense_dot_product_function_test.cpp
+++ b/eval/src/tests/tensor/dense_dot_product_function/dense_dot_product_function_test.cpp
@@ -50,34 +50,26 @@ asDenseTensor(const tensor::Tensor &tensor)
return dynamic_cast<const DenseTensor &>(tensor);
}
-class FunctionInput : public TensorFunction::Input
+class FunctionInput
{
private:
tensor::Tensor::UP _lhsTensor;
tensor::Tensor::UP _rhsTensor;
const DenseTensor &_lhsDenseTensor;
const DenseTensor &_rhsDenseTensor;
- TensorValue _lhsValue;
- TensorValue _rhsValue;
+ std::vector<Value::CREF> _params;
public:
FunctionInput(size_t lhsNumCells, size_t rhsNumCells)
: _lhsTensor(makeTensor(lhsNumCells, 3.0)),
_rhsTensor(makeTensor(rhsNumCells, 5.0)),
_lhsDenseTensor(asDenseTensor(*_lhsTensor)),
- _rhsDenseTensor(asDenseTensor(*_rhsTensor)),
- _lhsValue(std::make_unique<DenseTensor>(_lhsDenseTensor.type(),
- _lhsDenseTensor.cells())),
- _rhsValue(std::make_unique<DenseTensor>(_rhsDenseTensor.type(),
- _rhsDenseTensor.cells()))
- {}
- virtual const Value &get_tensor(size_t id) const override {
- if (id == 0) {
- return _lhsValue;
- } else {
- return _rhsValue;
- }
+ _rhsDenseTensor(asDenseTensor(*_rhsTensor))
+ {
+ _params.emplace_back(_lhsDenseTensor);
+ _params.emplace_back(_rhsDenseTensor);
}
+ ConstArrayRef<Value::CREF> get() const { return _params; }
double expectedDotProduct() const {
return calcDotProduct(_lhsDenseTensor, _rhsDenseTensor);
}
@@ -91,11 +83,11 @@ struct Fixture
~Fixture();
double eval() const {
Stash stash;
- const Value &result = function.eval(input, stash);
+ const Value &result = function.eval(input.get(), stash);
ASSERT_TRUE(result.is_double());
LOG(info, "eval(): (%s) * (%s) = %f",
- input.get_tensor(0).type().to_spec().c_str(),
- input.get_tensor(1).type().to_spec().c_str(),
+ input.get()[0].get().type().to_spec().c_str(),
+ input.get()[1].get().type().to_spec().c_str(),
result.as_double());
return result.as_double();
}
diff --git a/eval/src/tests/tensor/dense_tensor_function_compiler/dense_tensor_function_compiler_test.cpp b/eval/src/tests/tensor/dense_tensor_function_compiler/dense_tensor_function_compiler_test.cpp
index 6dcfc0791e7..63829650cc5 100644
--- a/eval/src/tests/tensor/dense_tensor_function_compiler/dense_tensor_function_compiler_test.cpp
+++ b/eval/src/tests/tensor/dense_tensor_function_compiler/dense_tensor_function_compiler_test.cpp
@@ -3,32 +3,36 @@
#include <vespa/vespalib/testkit/test_kit.h>
#include <vespa/eval/tensor/dense/dense_dot_product_function.h>
#include <vespa/eval/tensor/dense/dense_tensor_function_compiler.h>
+#include <vespa/eval/eval/operation.h>
using namespace vespalib::eval;
using namespace vespalib::eval::operation;
using namespace vespalib::eval::tensor_function;
using namespace vespalib::tensor;
+using vespalib::Stash;
template <typename T>
const T *as(const TensorFunction &function) { return dynamic_cast<const T *>(&function); }
-TensorFunction::UP
+const TensorFunction &
compileDotProduct(const vespalib::string &lhsType,
- const vespalib::string &rhsType)
+ const vespalib::string &rhsType,
+ Stash &stash)
{
- Node_UP reduceNode = reduce(join(inject(ValueType::from_spec(lhsType), 1),
- inject(ValueType::from_spec(rhsType), 3),
- Mul::f),
- Aggr::SUM, {});
- return DenseTensorFunctionCompiler::compile(std::move(reduceNode));
+ const Node &reduceNode = reduce(join(inject(ValueType::from_spec(lhsType), 1, stash),
+ inject(ValueType::from_spec(rhsType), 3, stash),
+ Mul::f, stash),
+ Aggr::SUM, {}, stash);
+ return DenseTensorFunctionCompiler::compile(reduceNode, stash);
}
void
assertCompiledDotProduct(const vespalib::string &lhsType,
const vespalib::string &rhsType)
{
- TensorFunction::UP func = compileDotProduct(lhsType, rhsType);
- const DenseDotProductFunction *dotProduct = as<DenseDotProductFunction>(*func);
+ Stash stash;
+ const TensorFunction &func = compileDotProduct(lhsType, rhsType, stash);
+ const DenseDotProductFunction *dotProduct = as<DenseDotProductFunction>(func);
ASSERT_TRUE(dotProduct);
EXPECT_EQUAL(1u, dotProduct->lhsTensorId());
EXPECT_EQUAL(3u, dotProduct->rhsTensorId());
@@ -38,8 +42,9 @@ void
assertNotCompiledDotProduct(const vespalib::string &lhsType,
const vespalib::string &rhsType)
{
- TensorFunction::UP func = compileDotProduct(lhsType, rhsType);
- const Reduce *reduce = as<Reduce>(*func);
+ Stash stash;
+ const TensorFunction &func = compileDotProduct(lhsType, rhsType, stash);
+ const Reduce *reduce = as<Reduce>(func);
EXPECT_TRUE(reduce);
}
diff --git a/eval/src/tests/tensor/tensor_mapper/tensor_mapper_test.cpp b/eval/src/tests/tensor/tensor_mapper/tensor_mapper_test.cpp
index c26429f47e4..e369e09b99a 100644
--- a/eval/src/tests/tensor/tensor_mapper/tensor_mapper_test.cpp
+++ b/eval/src/tests/tensor/tensor_mapper/tensor_mapper_test.cpp
@@ -9,6 +9,7 @@
#include <vespa/eval/eval/simple_tensor.h>
using vespalib::eval::ValueType;
+using vespalib::eval::Value;
using vespalib::eval::TensorSpec;
using vespalib::eval::SimpleTensor;
using namespace vespalib::tensor;
@@ -21,8 +22,8 @@ void verify_wrapped(const TensorSpec &source, const vespalib::string &type, cons
}
void verify(const TensorSpec &source, const vespalib::string &type, const TensorSpec &expect) {
- auto tensor = DefaultTensorEngine::ref().create(source);
- const Tensor *tensor_impl = dynamic_cast<const Tensor *>(tensor.get());
+ Value::UP value = DefaultTensorEngine::ref().from_spec(source);
+ const Tensor *tensor_impl = dynamic_cast<const Tensor *>(value->as_tensor());
ASSERT_TRUE(tensor_impl);
TensorMapper mapper(ValueType::from_spec(type));
auto mapped = mapper.map(*tensor_impl);
diff --git a/eval/src/tests/tensor/tensor_performance/tensor_performance_test.cpp b/eval/src/tests/tensor/tensor_performance/tensor_performance_test.cpp
index 3daaa3f79b3..2ed0021b5c7 100644
--- a/eval/src/tests/tensor/tensor_performance/tensor_performance_test.cpp
+++ b/eval/src/tests/tensor/tensor_performance/tensor_performance_test.cpp
@@ -25,21 +25,12 @@ const vespalib::string matrix_product_expr = "reduce(reduce((query+documen
//-----------------------------------------------------------------------------
-Value::UP wrap(std::unique_ptr<eval::Tensor> tensor) {
- return Value::UP(new TensorValue(std::move(tensor)));
-}
-
-//-----------------------------------------------------------------------------
-
struct Params {
std::map<vespalib::string, Value::UP> map;
Params &add(const vespalib::string &name, Value::UP value) {
map.emplace(name, std::move(value));
return *this;
}
- Params &add(const vespalib::string &name, std::unique_ptr<eval::Tensor> value) {
- return add(name, wrap(std::move(value)));
- }
};
InterpretedFunction::SimpleObjectParams make_params(const Function &function, const Params &params)
@@ -49,7 +40,7 @@ InterpretedFunction::SimpleObjectParams make_params(const Function &function, co
for (size_t i = 0; i < function.num_params(); ++i) {
auto param = params.map.find(function.param_name(i));
ASSERT_TRUE(param != params.map.end());
- fun_params.params.push_back(*(param->second));
+ fun_params.params.push_back(*param->second);
}
return fun_params;
}
@@ -92,9 +83,8 @@ double benchmark_expression_us(const vespalib::string &expression, const Params
//-----------------------------------------------------------------------------
-tensor::Tensor::UP make_tensor(const TensorSpec &spec) {
- auto tensor = DefaultTensorEngine::ref().create(spec);
- return tensor::Tensor::UP(dynamic_cast<tensor::Tensor*>(tensor.release()));
+Value::UP make_tensor(TensorSpec spec) {
+ return DefaultTensorEngine::ref().from_spec(spec);
}
//-----------------------------------------------------------------------------
diff --git a/eval/src/vespa/eval/eval/call_nodes.cpp b/eval/src/vespa/eval/eval/call_nodes.cpp
index 0e54ed183f4..69a9151a2bb 100644
--- a/eval/src/vespa/eval/eval/call_nodes.cpp
+++ b/eval/src/vespa/eval/eval/call_nodes.cpp
@@ -41,6 +41,7 @@ CallRepo::CallRepo() : _map() {
add(nodes::IsNan());
add(nodes::Relu());
add(nodes::Sigmoid());
+ add(nodes::Elu());
}
} // namespace vespalib::eval::nodes
diff --git a/eval/src/vespa/eval/eval/call_nodes.h b/eval/src/vespa/eval/eval/call_nodes.h
index 4c5611a863a..8210616750e 100644
--- a/eval/src/vespa/eval/eval/call_nodes.h
+++ b/eval/src/vespa/eval/eval/call_nodes.h
@@ -137,6 +137,7 @@ struct Max : CallHelper<Max> { Max() : Helper("max", 2) {} };
struct IsNan : CallHelper<IsNan> { IsNan() : Helper("isNan", 1) {} };
struct Relu : CallHelper<Relu> { Relu() : Helper("relu", 1) {} };
struct Sigmoid : CallHelper<Sigmoid> { Sigmoid() : Helper("sigmoid", 1) {} };
+struct Elu : CallHelper<Elu> { Elu() : Helper("elu", 1) {} };
//-----------------------------------------------------------------------------
diff --git a/eval/src/vespa/eval/eval/interpreted_function.cpp b/eval/src/vespa/eval/eval/interpreted_function.cpp
index f99c4ace2dd..cfe989e95f8 100644
--- a/eval/src/vespa/eval/eval/interpreted_function.cpp
+++ b/eval/src/vespa/eval/eval/interpreted_function.cpp
@@ -5,6 +5,7 @@
#include "node_traverser.h"
#include "check_type.h"
#include "tensor_spec.h"
+#include "operation.h"
#include <vespa/vespalib/util/classname.h>
#include <vespa/eval/eval/llvm/compile_cache.h>
#include <vespa/vespalib/util/benchmark_timer.h>
@@ -111,39 +112,26 @@ void op_tensor_concat(State &state, uint64_t param) {
//-----------------------------------------------------------------------------
template <typename T>
-const T &undef_cref() {
+const T &undef_cref() {
const T *undef = nullptr;
assert(undef);
return *undef;
}
struct TensorFunctionArgArgMeta {
- TensorFunction::UP function;
+ const TensorFunction &function;
size_t param1;
size_t param2;
- TensorFunctionArgArgMeta(TensorFunction::UP function_in, size_t param1_in, size_t param2_in)
- : function(std::move(function_in)), param1(param1_in), param2(param2_in) {}
-};
-
-struct ArgArgInput : TensorFunction::Input {
- const TensorFunctionArgArgMeta &meta;
- State &state;
- ArgArgInput(const TensorFunctionArgArgMeta &meta_in, State &state_in)
- : meta(meta_in), state(state_in) {}
- const Value &get_tensor(size_t id) const override {
- if (id == 0) {
- return state.params->resolve(meta.param1, state.stash);
- } else if (id == 1) {
- return state.params->resolve(meta.param2, state.stash);
- }
- return undef_cref<Value>();
- }
+ TensorFunctionArgArgMeta(const TensorFunction &function_in, size_t param1_in, size_t param2_in)
+ : function(function_in), param1(param1_in), param2(param2_in) {}
};
void op_tensor_function_arg_arg(State &state, uint64_t param) {
const TensorFunctionArgArgMeta &meta = unwrap_param<TensorFunctionArgArgMeta>(param);
- ArgArgInput input(meta, state);
- state.stack.push_back(meta.function->eval(input, state.stash));
+ Value::CREF params[2] =
+ {state.params->resolve(meta.param1, state.stash),
+ state.params->resolve(meta.param2, state.stash)};
+ state.stack.push_back(meta.function.eval(ConstArrayRef<Value::CREF>(params, 2), state.stash));
}
//-----------------------------------------------------------------------------
@@ -279,12 +267,12 @@ struct ProgramBuilder : public NodeVisitor, public NodeTraverser {
program.pop_back(); // load
auto a = as<Symbol>(node.get_child(0).get_child(0));
auto b = as<Symbol>(node.get_child(0).get_child(1));
- auto ir = tensor_function::reduce(tensor_function::join(
- tensor_function::inject(types.get_type(*a), 0),
- tensor_function::inject(types.get_type(*b), 1),
- operation::Mul::f), node.aggr(), node.dimensions());
- auto fun = tensor_engine.compile(std::move(ir));
- const auto &meta = stash.create<TensorFunctionArgArgMeta>(std::move(fun), a->id(), b->id());
+ const auto &ir = tensor_function::reduce(tensor_function::join(
+ tensor_function::inject(types.get_type(*a), 0, stash),
+ tensor_function::inject(types.get_type(*b), 1, stash),
+ operation::Mul::f, stash), node.aggr(), node.dimensions(), stash);
+ const auto &fun = tensor_engine.compile(ir, stash);
+ const auto &meta = stash.create<TensorFunctionArgArgMeta>(fun, a->id(), b->id());
program.emplace_back(op_tensor_function_arg_arg, wrap_param<TensorFunctionArgArgMeta>(meta));
} else {
ReduceParams &params = stash.create<ReduceParams>(node.aggr(), node.dimensions());
@@ -309,8 +297,7 @@ struct ProgramBuilder : public NodeVisitor, public NodeTraverser {
}
spec.add(addr, fun(&params[0]));
} while (step_labels(params, type));
- auto tensor = tensor_engine.create(spec);
- make_const_op(node, stash.create<TensorValue>(std::move(tensor)));
+ make_const_op(node, *stash.create<Value::UP>(tensor_engine.from_spec(spec)));
}
void visit(const TensorConcat &node) override {
vespalib::string &dimension = stash.create<vespalib::string>(node.dimension());
@@ -436,6 +423,9 @@ struct ProgramBuilder : public NodeVisitor, public NodeTraverser {
void visit(const Sigmoid &node) override {
make_map_op(node, operation::Sigmoid::f);
}
+ void visit(const Elu &node) override {
+ make_map_op(node, operation::Elu::f);
+ }
//-------------------------------------------------------------------------
diff --git a/eval/src/vespa/eval/eval/key_gen.cpp b/eval/src/vespa/eval/eval/key_gen.cpp
index e0494e1fe11..86908b331ba 100644
--- a/eval/src/vespa/eval/eval/key_gen.cpp
+++ b/eval/src/vespa/eval/eval/key_gen.cpp
@@ -81,6 +81,7 @@ struct KeyGen : public NodeVisitor, public NodeTraverser {
void visit(const IsNan &) override { add_byte(58); }
void visit(const Relu &) override { add_byte(59); }
void visit(const Sigmoid &) override { add_byte(60); }
+ void visit(const Elu &) override { add_byte(61); }
// traverse
bool open(const Node &node) override { node.accept(*this); return true; }
diff --git a/eval/src/vespa/eval/eval/llvm/llvm_wrapper.cpp b/eval/src/vespa/eval/eval/llvm/llvm_wrapper.cpp
index 9355cf7a4e4..f314f8a69cb 100644
--- a/eval/src/vespa/eval/eval/llvm/llvm_wrapper.cpp
+++ b/eval/src/vespa/eval/eval/llvm/llvm_wrapper.cpp
@@ -25,6 +25,7 @@ double vespalib_eval_isnan(double a) { return (std::isnan(a) ? 1.0 : 0.0); }
double vespalib_eval_approx(double a, double b) { return (vespalib::approx_equal(a, b) ? 1.0 : 0.0); }
double vespalib_eval_relu(double a) { return std::max(a, 0.0); }
double vespalib_eval_sigmoid(double a) { return 1.0 / (1.0 + std::exp(-1.0 * a)); }
+double vespalib_eval_elu(double a) { return (a < 0) ? std::exp(a) - 1.0 : a; }
using vespalib::eval::gbdt::Forest;
using resolve_function = double (*)(void *ctx, size_t idx);
@@ -586,6 +587,9 @@ struct FunctionBuilder : public NodeVisitor, public NodeTraverser {
void visit(const Sigmoid &) override {
make_call_1("vespalib_eval_sigmoid");
}
+ void visit(const Elu &) override {
+ make_call_1("vespalib_eval_elu");
+ }
};
FunctionBuilder::~FunctionBuilder() { }
@@ -628,7 +632,7 @@ LLVMWrapper::LLVMWrapper()
size_t
LLVMWrapper::make_function(size_t num_params, PassParams pass_params, const Node &root,
const gbdt::Optimize::Chain &forest_optimizers)
-{
+{
std::lock_guard<std::recursive_mutex> guard(_global_llvm_lock);
size_t function_id = _functions.size();
FunctionBuilder builder(*_context, *_module,
diff --git a/eval/src/vespa/eval/eval/llvm/llvm_wrapper.h b/eval/src/vespa/eval/eval/llvm/llvm_wrapper.h
index d3011a54ec0..6860be922f4 100644
--- a/eval/src/vespa/eval/eval/llvm/llvm_wrapper.h
+++ b/eval/src/vespa/eval/eval/llvm/llvm_wrapper.h
@@ -18,6 +18,7 @@ extern "C" {
double vespalib_eval_approx(double a, double b);
double vespalib_eval_relu(double a);
double vespalib_eval_sigmoid(double a);
+ double vespalib_eval_elu(double a);
};
namespace vespalib {
diff --git a/eval/src/vespa/eval/eval/node_types.cpp b/eval/src/vespa/eval/eval/node_types.cpp
index f86c3e1a84a..0cbc30667f0 100644
--- a/eval/src/vespa/eval/eval/node_types.cpp
+++ b/eval/src/vespa/eval/eval/node_types.cpp
@@ -164,6 +164,7 @@ struct TypeResolver : public NodeVisitor, public NodeTraverser {
void visit(const IsNan &node) override { resolve_op1(node); }
void visit(const Relu &node) override { resolve_op1(node); }
void visit(const Sigmoid &node) override { resolve_op1(node); }
+ void visit(const Elu &node) override { resolve_op1(node); }
//-------------------------------------------------------------------------
diff --git a/eval/src/vespa/eval/eval/node_visitor.h b/eval/src/vespa/eval/eval/node_visitor.h
index c5a6fd51373..10b389db792 100644
--- a/eval/src/vespa/eval/eval/node_visitor.h
+++ b/eval/src/vespa/eval/eval/node_visitor.h
@@ -79,6 +79,7 @@ struct NodeVisitor {
virtual void visit(const nodes::IsNan &) = 0;
virtual void visit(const nodes::Relu &) = 0;
virtual void visit(const nodes::Sigmoid &) = 0;
+ virtual void visit(const nodes::Elu &) = 0;
virtual ~NodeVisitor() {}
};
@@ -142,6 +143,7 @@ struct EmptyNodeVisitor : NodeVisitor {
void visit(const nodes::IsNan &) override {}
void visit(const nodes::Relu &) override {}
void visit(const nodes::Sigmoid &) override {}
+ void visit(const nodes::Elu &) override {}
};
} // namespace vespalib::eval
diff --git a/eval/src/vespa/eval/eval/operation.cpp b/eval/src/vespa/eval/eval/operation.cpp
index 42b1a110497..d697db40e7b 100644
--- a/eval/src/vespa/eval/eval/operation.cpp
+++ b/eval/src/vespa/eval/eval/operation.cpp
@@ -49,6 +49,7 @@ double Max::f(double a, double b) { return std::max(a, b); }
double IsNan::f(double a) { return std::isnan(a) ? 1.0 : 0.0; }
double Relu::f(double a) { return std::max(a, 0.0); }
double Sigmoid::f(double a) { return 1.0 / (1.0 + std::exp(-1.0 * a)); }
+double Elu::f(double a) { return (a < 0) ? std::exp(a) - 1 : a; }
} // namespace vespalib::eval::operation
} // namespace vespalib::eval
diff --git a/eval/src/vespa/eval/eval/operation.h b/eval/src/vespa/eval/eval/operation.h
index 12de7c3deb7..52a0fbabd22 100644
--- a/eval/src/vespa/eval/eval/operation.h
+++ b/eval/src/vespa/eval/eval/operation.h
@@ -51,6 +51,7 @@ struct Max { static double f(double a, double b); };
struct IsNan { static double f(double a); };
struct Relu { static double f(double a); };
struct Sigmoid { static double f(double a); };
+struct Elu { static double f(double a); };
} // namespace vespalib::eval::operation
} // namespace vespalib::eval
diff --git a/eval/src/vespa/eval/eval/simple_tensor.cpp b/eval/src/vespa/eval/eval/simple_tensor.cpp
index e39e926708d..0e58d292334 100644
--- a/eval/src/vespa/eval/eval/simple_tensor.cpp
+++ b/eval/src/vespa/eval/eval/simple_tensor.cpp
@@ -604,7 +604,11 @@ SimpleTensor::rename(const std::vector<vespalib::string> &from, const std::vecto
std::unique_ptr<SimpleTensor>
SimpleTensor::create(const TensorSpec &spec)
{
- Builder builder(ValueType::from_spec(spec.type()));
+ ValueType my_type = ValueType::from_spec(spec.type());
+ if (my_type.is_error()) {
+ return std::make_unique<SimpleTensor>();
+ }
+ Builder builder(my_type);
for (const auto &cell: spec.cells()) {
builder.set(cell.first, cell.second);
}
diff --git a/eval/src/vespa/eval/eval/simple_tensor.h b/eval/src/vespa/eval/eval/simple_tensor.h
index 366796f00d8..45d1853824d 100644
--- a/eval/src/vespa/eval/eval/simple_tensor.h
+++ b/eval/src/vespa/eval/eval/simple_tensor.h
@@ -82,7 +82,7 @@ public:
explicit SimpleTensor(double value);
SimpleTensor(const ValueType &type_in, Cells cells_in);
double as_double() const final override;
- const ValueType &type() const { return _type; }
+ const ValueType &type() const override { return _type; }
const Cells &cells() const { return _cells; }
std::unique_ptr<SimpleTensor> map(map_fun_t function) const;
std::unique_ptr<SimpleTensor> reduce(Aggregator &aggr, const std::vector<vespalib::string> &dimensions) const;
diff --git a/eval/src/vespa/eval/eval/simple_tensor_engine.cpp b/eval/src/vespa/eval/eval/simple_tensor_engine.cpp
index 21498ca2ff1..2b3c5679488 100644
--- a/eval/src/vespa/eval/eval/simple_tensor_engine.cpp
+++ b/eval/src/vespa/eval/eval/simple_tensor_engine.cpp
@@ -10,7 +10,7 @@ namespace eval {
namespace {
-const SimpleTensor &to_simple(const Tensor &tensor) {
+const SimpleTensor &as_simple(const Tensor &tensor) {
assert(&tensor.engine() == &SimpleTensorEngine::ref());
return static_cast<const SimpleTensor&>(tensor);
}
@@ -20,98 +20,92 @@ const SimpleTensor &to_simple(const Value &value, Stash &stash) {
return stash.create<SimpleTensor>(value.as_double());
}
if (auto tensor = value.as_tensor()) {
- return to_simple(*tensor);
+ return as_simple(*tensor);
}
return stash.create<SimpleTensor>(); // error
}
+template <typename F>
+void with_simple(const Value &value, const F &f) {
+ if (value.is_double()) {
+ f(SimpleTensor(value.as_double()));
+ } else if (auto tensor = value.as_tensor()) {
+ f(as_simple(*tensor));
+ } else {
+ f(SimpleTensor());
+ }
+}
+
const Value &to_value(std::unique_ptr<SimpleTensor> tensor, Stash &stash) {
+ if (tensor->type().is_tensor()) {
+ return *stash.create<Value::UP>(std::move(tensor));
+ }
if (tensor->type().is_double()) {
- assert(tensor->cells().size() == 1u);
- return stash.create<DoubleValue>(tensor->cells()[0].value);
+ return stash.create<DoubleValue>(tensor->as_double());
}
+ assert(tensor->type().is_error());
+ return ErrorValue::instance;
+}
+
+Value::UP to_value(std::unique_ptr<SimpleTensor> tensor) {
if (tensor->type().is_tensor()) {
- return stash.create<TensorValue>(std::move(tensor));
+ return std::move(tensor);
+ }
+ if (tensor->type().is_double()) {
+ return std::make_unique<DoubleValue>(tensor->as_double());
}
assert(tensor->type().is_error());
- return stash.create<ErrorValue>();
+ return std::make_unique<ErrorValue>();
}
} // namespace vespalib::eval::<unnamed>
const SimpleTensorEngine SimpleTensorEngine::_engine;
-ValueType
-SimpleTensorEngine::type_of(const Tensor &tensor) const
-{
- return to_simple(tensor).type();
-}
-
-vespalib::string
-SimpleTensorEngine::to_string(const Tensor &tensor) const
-{
- const SimpleTensor &simple_tensor = to_simple(tensor);
- vespalib::string out = vespalib::make_string("simple(%s) {\n", simple_tensor.type().to_spec().c_str());
- for (const auto &cell: simple_tensor.cells()) {
- size_t n = 0;
- out.append(" [");
- for (const auto &label: cell.address) {
- if (n++) {
- out.append(",");
- }
- if (label.is_mapped()) {
- out.append(label.name);
- } else {
- out.append(vespalib::make_string("%zu", label.index));
- }
- }
- out.append(vespalib::make_string("]: %g\n", cell.value));
- }
- out.append("}");
- return out;
-}
+//-----------------------------------------------------------------------------
TensorSpec
-SimpleTensorEngine::to_spec(const Tensor &tensor) const
+SimpleTensorEngine::to_spec(const Value &value) const
{
- const SimpleTensor &simple_tensor = to_simple(tensor);
- ValueType type = simple_tensor.type();
- const auto &dimensions = type.dimensions();
- TensorSpec spec(type.to_spec());
- for (const auto &cell: simple_tensor.cells()) {
- TensorSpec::Address addr;
- assert(cell.address.size() == dimensions.size());
- for (size_t i = 0; i < cell.address.size(); ++i) {
- const auto &label = cell.address[i];
- if (label.is_mapped()) {
- addr.emplace(dimensions[i].name, TensorSpec::Label(label.name));
- } else {
- addr.emplace(dimensions[i].name, TensorSpec::Label(label.index));
- }
- }
- spec.add(addr, cell.value);
- }
+ TensorSpec spec(value.type().to_spec());
+ const auto &dimensions = value.type().dimensions();
+ with_simple(value, [&spec,&dimensions](const SimpleTensor &simple_tensor)
+ {
+ for (const auto &cell: simple_tensor.cells()) {
+ TensorSpec::Address addr;
+ assert(cell.address.size() == dimensions.size());
+ for (size_t i = 0; i < cell.address.size(); ++i) {
+ const auto &label = cell.address[i];
+ if (label.is_mapped()) {
+ addr.emplace(dimensions[i].name, TensorSpec::Label(label.name));
+ } else {
+ addr.emplace(dimensions[i].name, TensorSpec::Label(label.index));
+ }
+ }
+ spec.add(addr, cell.value);
+ }
+ });
return spec;
}
-std::unique_ptr<eval::Tensor>
-SimpleTensorEngine::create(const TensorSpec &spec) const
+Value::UP
+SimpleTensorEngine::from_spec(const TensorSpec &spec) const
{
- return SimpleTensor::create(spec);
+ return to_value(SimpleTensor::create(spec));
}
//-----------------------------------------------------------------------------
void
-SimpleTensorEngine::encode(const Value &value, nbostream &output, Stash &stash) const
+SimpleTensorEngine::encode(const Value &value, nbostream &output) const
{
- SimpleTensor::encode(to_simple(value, stash), output);
+ with_simple(value, [&output](const SimpleTensor &tensor) { SimpleTensor::encode(tensor, output); });
}
-const Value &
-SimpleTensorEngine::decode(nbostream &input, Stash &stash) const
+Value::UP
+SimpleTensorEngine::decode(nbostream &input) const
{
- return to_value(SimpleTensor::decode(input), stash);
+ return to_value(SimpleTensor::decode(input));
}
//-----------------------------------------------------------------------------
diff --git a/eval/src/vespa/eval/eval/simple_tensor_engine.h b/eval/src/vespa/eval/eval/simple_tensor_engine.h
index c751f2f6b49..4cfd389dfa9 100644
--- a/eval/src/vespa/eval/eval/simple_tensor_engine.h
+++ b/eval/src/vespa/eval/eval/simple_tensor_engine.h
@@ -19,14 +19,12 @@ private:
public:
static const TensorEngine &ref() { return _engine; };
- ValueType type_of(const Tensor &tensor) const override;
- vespalib::string to_string(const Tensor &tensor) const override;
- TensorSpec to_spec(const Tensor &tensor) const override;
+ TensorSpec to_spec(const Value &value) const override;
+ Value::UP from_spec(const TensorSpec &spec) const override;
- std::unique_ptr<Tensor> create(const TensorSpec &spec) const override;
+ void encode(const Value &value, nbostream &output) const override;
+ Value::UP decode(nbostream &input) const override;
- void encode(const Value &value, nbostream &output, Stash &stash) const override;
- const Value &decode(nbostream &input, Stash &stash) const override;
const Value &map(const Value &a, map_fun_t function, Stash &stash) const override;
const Value &join(const Value &a, const Value &b, join_fun_t function, Stash &stash) const override;
const Value &reduce(const Value &a, Aggr aggr, const std::vector<vespalib::string> &dimensions, Stash &stash) const override;
diff --git a/eval/src/vespa/eval/eval/tensor.cpp b/eval/src/vespa/eval/eval/tensor.cpp
index 926606f8e26..645208ba8fb 100644
--- a/eval/src/vespa/eval/eval/tensor.cpp
+++ b/eval/src/vespa/eval/eval/tensor.cpp
@@ -18,7 +18,7 @@ operator==(const Tensor &lhs, const Tensor &rhs)
std::ostream &
operator<<(std::ostream &out, const Tensor &tensor)
{
- out << tensor.engine().to_string(tensor);
+ out << tensor.engine().to_spec(tensor).to_string();
return out;
}
diff --git a/eval/src/vespa/eval/eval/tensor.h b/eval/src/vespa/eval/eval/tensor.h
index 57cd9abe1f5..149e2774bfb 100644
--- a/eval/src/vespa/eval/eval/tensor.h
+++ b/eval/src/vespa/eval/eval/tensor.h
@@ -3,6 +3,7 @@
#pragma once
#include "value_type.h"
+#include "value.h"
namespace vespalib {
namespace eval {
@@ -18,7 +19,7 @@ class TensorEngine;
* engine. TensorEngines should only have a single static instance per
* implementation.
**/
-class Tensor
+class Tensor : public Value
{
private:
const TensorEngine &_engine;
@@ -30,7 +31,8 @@ public:
Tensor(Tensor &&) = delete;
Tensor &operator=(const Tensor &) = delete;
Tensor &operator=(Tensor &&) = delete;
- virtual double as_double() const = 0;
+ bool is_tensor() const override { return true; }
+ const Tensor *as_tensor() const override { return this; }
const TensorEngine &engine() const { return _engine; }
virtual ~Tensor() {}
};
diff --git a/eval/src/vespa/eval/eval/tensor_engine.h b/eval/src/vespa/eval/eval/tensor_engine.h
index 00927f0c1b1..02a7f0c655a 100644
--- a/eval/src/vespa/eval/eval/tensor_engine.h
+++ b/eval/src/vespa/eval/eval/tensor_engine.h
@@ -32,25 +32,23 @@ class TensorSpec;
**/
struct TensorEngine
{
- using ValueType = eval::ValueType;
+ using Aggr = eval::Aggr;
using Tensor = eval::Tensor;
+ using TensorFunction = eval::TensorFunction;
using TensorSpec = eval::TensorSpec;
using Value = eval::Value;
- using map_fun_t = double (*)(double);
+ using ValueType = eval::ValueType;
using join_fun_t = double (*)(double, double);
- using Aggr = eval::Aggr;
+ using map_fun_t = double (*)(double);
- virtual ValueType type_of(const Tensor &tensor) const = 0;
- virtual vespalib::string to_string(const Tensor &tensor) const = 0;
- virtual TensorSpec to_spec(const Tensor &tensor) const = 0;
+ virtual TensorSpec to_spec(const Value &value) const = 0;
+ virtual Value::UP from_spec(const TensorSpec &spec) const = 0;
- virtual TensorFunction::UP compile(tensor_function::Node_UP expr) const { return std::move(expr); }
+ virtual void encode(const Value &value, nbostream &output) const = 0;
+ virtual Value::UP decode(nbostream &input) const = 0;
- virtual std::unique_ptr<Tensor> create(const TensorSpec &spec) const = 0;
+ virtual const TensorFunction &compile(const tensor_function::Node &expr, Stash &) const { return expr; }
- // havardpe: new API, WIP
- virtual void encode(const Value &value, nbostream &output, Stash &stash) const = 0;
- virtual const Value &decode(nbostream &input, Stash &stash) const = 0;
virtual const Value &map(const Value &a, map_fun_t function, Stash &stash) const = 0;
virtual const Value &join(const Value &a, const Value &b, join_fun_t function, Stash &stash) const = 0;
virtual const Value &reduce(const Value &a, Aggr aggr, const std::vector<vespalib::string> &dimensions, Stash &stash) const = 0;
diff --git a/eval/src/vespa/eval/eval/tensor_function.cpp b/eval/src/vespa/eval/eval/tensor_function.cpp
index 0dcc930087f..9cd7c7fc9c2 100644
--- a/eval/src/vespa/eval/eval/tensor_function.cpp
+++ b/eval/src/vespa/eval/eval/tensor_function.cpp
@@ -5,68 +5,73 @@
#include "operation.h"
#include "tensor.h"
#include "tensor_engine.h"
+#include "simple_tensor_engine.h"
namespace vespalib {
namespace eval {
namespace tensor_function {
-void Inject::accept(TensorFunctionVisitor &visitor) const { visitor.visit(*this); }
-void Reduce::accept(TensorFunctionVisitor &visitor) const { visitor.visit(*this); }
-void Map ::accept(TensorFunctionVisitor &visitor) const { visitor.visit(*this); }
-void Join ::accept(TensorFunctionVisitor &visitor) const { visitor.visit(*this); }
+const TensorEngine &infer_engine(const std::initializer_list<Value::CREF> &values) {
+ for (const Value &value: values) {
+ if (auto tensor = value.as_tensor()) {
+ return tensor->engine();
+ }
+ }
+ return SimpleTensorEngine::ref();
+}
//-----------------------------------------------------------------------------
const Value &
-Inject::eval(const Input &input, Stash &) const
+Inject::eval(ConstArrayRef<Value::CREF> params, Stash &) const
{
- return input.get_tensor(tensor_id);
+ return params[tensor_id];
}
const Value &
-Reduce::eval(const Input &input, Stash &stash) const
+Reduce::eval(ConstArrayRef<Value::CREF> params, Stash &stash) const
{
- const Value &a = tensor->eval(input, stash);
- const TensorEngine &engine = a.as_tensor()->engine();
+ const Value &a = tensor.eval(params, stash);
+ const TensorEngine &engine = infer_engine({a});
return engine.reduce(a, aggr, dimensions, stash);
}
const Value &
-Map::eval(const Input &input, Stash &stash) const
+Map::eval(ConstArrayRef<Value::CREF> params, Stash &stash) const
{
- const Value &a = tensor->eval(input, stash);
- const TensorEngine &engine = a.as_tensor()->engine();
+ const Value &a = tensor.eval(params, stash);
+ const TensorEngine &engine = infer_engine({a});
return engine.map(a, function, stash);
}
const Value &
-Join::eval(const Input &input, Stash &stash) const
+Join::eval(ConstArrayRef<Value::CREF> params, Stash &stash) const
{
- const Value &a = lhs_tensor->eval(input, stash);
- const Value &b = rhs_tensor->eval(input, stash);
- const TensorEngine &engine = a.as_tensor()->engine();
+ const Value &a = lhs_tensor.eval(params, stash);
+ const Value &b = rhs_tensor.eval(params, stash);
+ const TensorEngine &engine = infer_engine({a,b});
return engine.join(a, b, function, stash);
}
//-----------------------------------------------------------------------------
-Node_UP inject(const ValueType &type, size_t tensor_id) {
- return std::make_unique<Inject>(type, tensor_id);
+const Node &inject(const ValueType &type, size_t tensor_id, Stash &stash) {
+ return stash.create<Inject>(type, tensor_id);
}
-Node_UP reduce(Node_UP tensor, Aggr aggr, const std::vector<vespalib::string> &dimensions) {
- ValueType result_type = tensor->result_type.reduce(dimensions);
- return std::make_unique<Reduce>(result_type, std::move(tensor), aggr, dimensions);
+const Node &reduce(const Node &tensor, Aggr aggr, const std::vector<vespalib::string> &dimensions, Stash &stash) {
+ ValueType result_type = tensor.result_type.reduce(dimensions);
+ return stash.create<Reduce>(result_type, tensor, aggr, dimensions);
}
-Node_UP map(Node_UP tensor, map_fun_t function) {
- ValueType result_type = tensor->result_type;
- return std::make_unique<Map>(result_type, std::move(tensor), function);
+const Node &map(const Node &tensor, map_fun_t function, Stash &stash) {
+ ValueType result_type = tensor.result_type;
+ return stash.create<Map>(result_type, tensor, function);
}
-Node_UP join(Node_UP lhs_tensor, Node_UP rhs_tensor, join_fun_t function) {
- ValueType result_type = ValueType::join(lhs_tensor->result_type, rhs_tensor->result_type);
- return std::make_unique<Join>(result_type, std::move(lhs_tensor), std::move(rhs_tensor), function);
+const Node &join(const Node &lhs_tensor, const Node &rhs_tensor, join_fun_t function, Stash &stash) {
+ ValueType result_type = ValueType::join(lhs_tensor.result_type, rhs_tensor.result_type);
+ return stash.create<Join>(result_type, lhs_tensor, rhs_tensor, function);
}
} // namespace vespalib::eval::tensor_function
diff --git a/eval/src/vespa/eval/eval/tensor_function.h b/eval/src/vespa/eval/eval/tensor_function.h
index cff21b7b9aa..359cabc18a0 100644
--- a/eval/src/vespa/eval/eval/tensor_function.h
+++ b/eval/src/vespa/eval/eval/tensor_function.h
@@ -5,8 +5,9 @@
#include <memory>
#include <vector>
#include <vespa/vespalib/stllike/string.h>
+#include <vespa/vespalib/util/arrayref.h>
#include "value_type.h"
-#include "operation.h"
+#include "value.h"
#include "aggr.h"
namespace vespalib {
@@ -15,7 +16,6 @@ class Stash;
namespace eval {
-class Value;
class Tensor;
//-----------------------------------------------------------------------------
@@ -29,28 +29,19 @@ class Tensor;
**/
struct TensorFunction
{
- typedef std::unique_ptr<TensorFunction> UP;
-
/**
- * Interface used to obtain input to a tensor function.
- **/
- struct Input {
- virtual const Value &get_tensor(size_t id) const = 0;
- virtual ~Input() {}
- };
-
- /**
- * Evaluate this tensor function based on the given input. The
- * given stash can be used to store temporary objects that need to
- * be kept alive for the return value to be valid. The return
- * value must conform to the result type indicated by the
- * intermediate representation describing this tensor function.
+ * Evaluate this tensor function based on the given
+ * parameters. The given stash can be used to store temporary
+ * objects that need to be kept alive for the return value to be
+ * valid. The return value must conform to the result type
+ * indicated by the intermediate representation describing this
+ * tensor function.
*
* @return result of evaluating this tensor function
- * @param input external stuff needed to evaluate this function
+ * @param params external values needed to evaluate this function
+ * @param stash heterogeneous object store
**/
- virtual const Value &eval(const Input &input, Stash &stash) const = 0;
-
+ virtual const Value &eval(ConstArrayRef<Value::CREF> params, Stash &stash) const = 0;
virtual ~TensorFunction() {}
};
@@ -78,15 +69,13 @@ using join_fun_t = double (*)(double, double);
**/
struct Node : public TensorFunction
{
- ValueType result_type;
+ const ValueType result_type;
Node(const ValueType &result_type_in) : result_type(result_type_in) {}
- virtual void accept(TensorFunctionVisitor &visitor) const = 0;
Node(const Node &) = delete;
Node &operator=(const Node &) = delete;
Node(Node &&) = delete;
Node &operator=(Node &&) = delete;
};
-using Node_UP = std::unique_ptr<Node>;
/**
* Simple typecasting utility.
@@ -95,68 +84,53 @@ template <typename T>
const T *as(const Node &node) { return dynamic_cast<const T *>(&node); }
struct Inject : Node {
- size_t tensor_id;
+ const size_t tensor_id;
Inject(const ValueType &result_type_in,
size_t tensor_id_in)
: Node(result_type_in), tensor_id(tensor_id_in) {}
- void accept(TensorFunctionVisitor &visitor) const override;
- const Value &eval(const Input &input, Stash &) const override;
+ const Value &eval(ConstArrayRef<Value::CREF> params, Stash &) const override;
};
struct Reduce : Node {
- Node_UP tensor;
- Aggr aggr;
- std::vector<vespalib::string> dimensions;
+ const Node &tensor;
+ const Aggr aggr;
+ const std::vector<vespalib::string> dimensions;
Reduce(const ValueType &result_type_in,
- Node_UP tensor_in,
+ const Node &tensor_in,
Aggr aggr_in,
const std::vector<vespalib::string> &dimensions_in)
- : Node(result_type_in), tensor(std::move(tensor_in)), aggr(aggr_in), dimensions(dimensions_in) {}
- void accept(TensorFunctionVisitor &visitor) const override;
- const Value &eval(const Input &input, Stash &stash) const override;
+ : Node(result_type_in), tensor(tensor_in), aggr(aggr_in), dimensions(dimensions_in) {}
+ const Value &eval(ConstArrayRef<Value::CREF> params, Stash &stash) const override;
};
struct Map : Node {
- Node_UP tensor;
- map_fun_t function;
+ const Node &tensor;
+ const map_fun_t function;
Map(const ValueType &result_type_in,
- Node_UP tensor_in,
+ const Node &tensor_in,
map_fun_t function_in)
- : Node(result_type_in), tensor(std::move(tensor_in)), function(function_in) {}
- void accept(TensorFunctionVisitor &visitor) const override;
- const Value &eval(const Input &input, Stash &stash) const override;
+ : Node(result_type_in), tensor(tensor_in), function(function_in) {}
+ const Value &eval(ConstArrayRef<Value::CREF> params, Stash &stash) const override;
};
struct Join : Node {
- Node_UP lhs_tensor;
- Node_UP rhs_tensor;
- join_fun_t function;
+ const Node &lhs_tensor;
+ const Node &rhs_tensor;
+ const join_fun_t function;
Join(const ValueType &result_type_in,
- Node_UP lhs_tensor_in,
- Node_UP rhs_tensor_in,
+ const Node &lhs_tensor_in,
+ const Node &rhs_tensor_in,
join_fun_t function_in)
- : Node(result_type_in), lhs_tensor(std::move(lhs_tensor_in)),
- rhs_tensor(std::move(rhs_tensor_in)), function(function_in) {}
- void accept(TensorFunctionVisitor &visitor) const override;
- const Value &eval(const Input &input, Stash &stash) const override;
+ : Node(result_type_in), lhs_tensor(lhs_tensor_in),
+ rhs_tensor(rhs_tensor_in), function(function_in) {}
+ const Value &eval(ConstArrayRef<Value::CREF> params, Stash &stash) const override;
};
-Node_UP inject(const ValueType &type, size_t tensor_id);
-Node_UP reduce(Node_UP tensor, Aggr aggr, const std::vector<vespalib::string> &dimensions);
-Node_UP map(Node_UP tensor, map_fun_t function);
-Node_UP join(Node_UP lhs_tensor, Node_UP rhs_tensor, join_fun_t function);
+const Node &inject(const ValueType &type, size_t tensor_id, Stash &stash);
+const Node &reduce(const Node &tensor, Aggr aggr, const std::vector<vespalib::string> &dimensions, Stash &stash);
+const Node &map(const Node &tensor, map_fun_t function, Stash &stash);
+const Node &join(const Node &lhs_tensor, const Node &rhs_tensor, join_fun_t function, Stash &stash);
} // namespace vespalib::eval::tensor_function
-
-struct TensorFunctionVisitor {
- virtual void visit(const tensor_function::Inject &) = 0;
- virtual void visit(const tensor_function::Reduce &) = 0;
- virtual void visit(const tensor_function::Map &) = 0;
- virtual void visit(const tensor_function::Join &) = 0;
- virtual ~TensorFunctionVisitor() {}
-};
-
-//-----------------------------------------------------------------------------
-
} // namespace vespalib::eval
} // namespace vespalib
diff --git a/eval/src/vespa/eval/eval/test/eval_spec.cpp b/eval/src/vespa/eval/eval/test/eval_spec.cpp
index d214486cf21..baa3ee989d4 100644
--- a/eval/src/vespa/eval/eval/test/eval_spec.cpp
+++ b/eval/src/vespa/eval/eval/test/eval_spec.cpp
@@ -150,6 +150,7 @@ EvalSpec::add_function_call_cases() {
.add_case({my_nan}, 1.0).add_case({my_inf}, 0.0).add_case({-my_inf}, 0.0);
add_rule({"a", -1.0, 1.0}, "relu(a)", [](double a){ return std::max(a, 0.0); });
add_rule({"a", -1.0, 1.0}, "sigmoid(a)", [](double a){ return 1.0 / (1.0 + std::exp(-1.0 * a)); });
+ add_rule({"a", -1.0, 1.0}, "elu(a)", [](double a){ return (a < 0) ? std::exp(a)-1 : a; });
add_rule({"a", -1.0, 1.0}, {"b", -1.0, 1.0}, "atan2(a,b)", [](double a, double b){ return std::atan2(a, b); });
add_rule({"a", -1.0, 1.0}, {"b", -1.0, 1.0}, "ldexp(a,b)", [](double a, double b){ return std::ldexp(a, b); });
add_rule({"a", -1.0, 1.0}, {"b", -1.0, 1.0}, "pow(a,b)", [](double a, double b){ return std::pow(a, b); });
diff --git a/eval/src/vespa/eval/eval/test/tensor_conformance.cpp b/eval/src/vespa/eval/eval/test/tensor_conformance.cpp
index e0a9f731804..23562f4a186 100644
--- a/eval/src/vespa/eval/eval/test/tensor_conformance.cpp
+++ b/eval/src/vespa/eval/eval/test/tensor_conformance.cpp
@@ -38,18 +38,17 @@ struct Eval {
double _number;
TensorSpec _tensor;
public:
- Result(const Value &value) : _type(Type::ERROR), _number(error_value), _tensor("error") {
+ Result() : _type(Type::ERROR), _number(error_value), _tensor("error") {}
+ Result(const TensorEngine &engine, const Value &value) : _type(Type::ERROR), _number(error_value), _tensor("error") {
if (value.is_double()) {
_type = Type::NUMBER;
- _number = value.as_double();
- _tensor = TensorSpec("double").add({}, _number);
- } else if (value.is_tensor()) {
+ }
+ if (value.is_tensor()) {
+ EXPECT_TRUE(_type == Type::ERROR);
_type = Type::TENSOR;
- _tensor = value.as_tensor()->engine().to_spec(*value.as_tensor());
- if (_tensor.type() == "double") {
- _number = as_double(_tensor);
- }
}
+ _number = value.as_double();
+ _tensor = engine.to_spec(value);
}
bool is_error() const { return (_type == Type::ERROR); }
bool is_number() const { return (_type == Type::NUMBER); }
@@ -60,20 +59,20 @@ struct Eval {
}
const TensorSpec &tensor() const {
EXPECT_TRUE(is_tensor());
- return _tensor;
+ return _tensor;
}
};
virtual Result eval(const TensorEngine &) const {
TEST_ERROR("wrong signature");
- return Result(ErrorValue());
+ return Result();
}
virtual Result eval(const TensorEngine &, const TensorSpec &) const {
TEST_ERROR("wrong signature");
- return Result(ErrorValue());
+ return Result();
}
virtual Result eval(const TensorEngine &, const TensorSpec &, const TensorSpec &) const {
TEST_ERROR("wrong signature");
- return Result(ErrorValue());
+ return Result();
}
virtual ~Eval() {}
};
@@ -87,7 +86,7 @@ struct SafeEval : Eval {
return unsafe.eval(engine);
} catch (std::exception &e) {
TEST_ERROR(e.what());
- return Result(ErrorValue());
+ return Result();
}
}
Result eval(const TensorEngine &engine, const TensorSpec &a) const override {
@@ -95,7 +94,7 @@ struct SafeEval : Eval {
return unsafe.eval(engine, a);
} catch (std::exception &e) {
TEST_ERROR(e.what());
- return Result(ErrorValue());
+ return Result();
}
}
@@ -104,7 +103,7 @@ struct SafeEval : Eval {
return unsafe.eval(engine, a, b);
} catch (std::exception &e) {
TEST_ERROR(e.what());
- return Result(ErrorValue());
+ return Result();
}
}
};
@@ -125,7 +124,7 @@ struct Expr_V : Eval {
InterpretedFunction ifun(engine, fun, types);
InterpretedFunction::Context ctx(ifun);
InterpretedFunction::SimpleObjectParams params({});
- return Result(check_type(ifun.eval(ctx, params), types.get_type(fun.root())));
+ return Result(engine, check_type(ifun.eval(ctx, params), types.get_type(fun.root())));
}
};
@@ -139,9 +138,9 @@ struct Expr_T : Eval {
NodeTypes types(fun, {a_type});
InterpretedFunction ifun(engine, fun, types);
InterpretedFunction::Context ctx(ifun);
- TensorValue va(engine.create(a));
- InterpretedFunction::SimpleObjectParams params({va});
- return Result(check_type(ifun.eval(ctx, params), types.get_type(fun.root())));
+ Value::UP va = engine.from_spec(a);
+ InterpretedFunction::SimpleObjectParams params({*va});
+ return Result(engine, check_type(ifun.eval(ctx, params), types.get_type(fun.root())));
}
};
@@ -156,19 +155,15 @@ struct Expr_TT : Eval {
NodeTypes types(fun, {a_type, b_type});
InterpretedFunction ifun(engine, fun, types);
InterpretedFunction::Context ctx(ifun);
- TensorValue va(engine.create(a));
- TensorValue vb(engine.create(b));
- InterpretedFunction::SimpleObjectParams params({va,vb});
- return Result(check_type(ifun.eval(ctx, params), types.get_type(fun.root())));
+ Value::UP va = engine.from_spec(a);
+ Value::UP vb = engine.from_spec(b);
+ InterpretedFunction::SimpleObjectParams params({*va,*vb});
+ return Result(engine, check_type(ifun.eval(ctx, params), types.get_type(fun.root())));
}
};
const Value &make_value(const TensorEngine &engine, const TensorSpec &spec, Stash &stash) {
- if (spec.type() == "double") {
- double number = as_double(spec);
- return stash.create<DoubleValue>(number);
- }
- return stash.create<TensorValue>(engine.create(spec));
+ return *stash.create<Value::UP>(engine.from_spec(spec));
}
//-----------------------------------------------------------------------------
@@ -179,11 +174,11 @@ struct ImmediateReduce : Eval {
std::vector<vespalib::string> dimensions;
ImmediateReduce(Aggr aggr_in) : aggr(aggr_in), dimensions() {}
ImmediateReduce(Aggr aggr_in, const vespalib::string &dimension)
- : aggr(aggr_in), dimensions({dimension}) {}
+ : aggr(aggr_in), dimensions({dimension}) {}
Result eval(const TensorEngine &engine, const TensorSpec &a) const override {
Stash stash;
const auto &lhs = make_value(engine, a, stash);
- return Result(engine.reduce(lhs, aggr, dimensions, stash));
+ return Result(engine, engine.reduce(lhs, aggr, dimensions, stash));
}
};
@@ -195,7 +190,7 @@ struct ImmediateMap : Eval {
Result eval(const TensorEngine &engine, const TensorSpec &a) const override {
Stash stash;
const auto &lhs = make_value(engine, a, stash);
- return Result(engine.map(lhs, function, stash));
+ return Result(engine, engine.map(lhs, function, stash));
}
};
@@ -208,7 +203,7 @@ struct ImmediateJoin : Eval {
Stash stash;
const auto &lhs = make_value(engine, a, stash);
const auto &rhs = make_value(engine, b, stash);
- return Result(engine.join(lhs, rhs, function, stash));
+ return Result(engine, engine.join(lhs, rhs, function, stash));
}
};
@@ -220,7 +215,7 @@ struct ImmediateConcat : Eval {
Stash stash;
const auto &lhs = make_value(engine, a, stash);
const auto &rhs = make_value(engine, b, stash);
- return Result(engine.concat(lhs, rhs, dimension, stash));
+ return Result(engine, engine.concat(lhs, rhs, dimension, stash));
}
};
@@ -233,7 +228,7 @@ struct ImmediateRename : Eval {
Result eval(const TensorEngine &engine, const TensorSpec &a) const override {
Stash stash;
const auto &lhs = make_value(engine, a, stash);
- return Result(engine.rename(lhs, from, to, stash));
+ return Result(engine, engine.rename(lhs, from, to, stash));
}
};
@@ -243,20 +238,28 @@ const size_t tensor_id_a = 11;
const size_t tensor_id_b = 12;
// input used when evaluating in retained mode
-struct Input : TensorFunction::Input {
- std::vector<TensorValue> tensors;
- Input(std::unique_ptr<Tensor> a) : tensors() {
- tensors.emplace_back(std::move(a));
+struct Input {
+ std::vector<Value::UP> tensors;
+ std::vector<Value::CREF> params;
+ ~Input() {}
+ void pad_params() {
+ for (size_t i = 0; i < tensor_id_a; ++i) {
+ params.push_back(ErrorValue::instance);
+ }
}
- Input(std::unique_ptr<Tensor> a, std::unique_ptr<Tensor> b) : tensors() {
- tensors.emplace_back(std::move(a));
- tensors.emplace_back(std::move(b));
+ Input(Value::UP a) : tensors() {
+ pad_params();
+ tensors.push_back(std::move(a));
+ params.emplace_back(*tensors.back());
}
- const Value &get_tensor(size_t id) const override {
- size_t offset = (id - tensor_id_a);
- ASSERT_GREATER(tensors.size(), offset);
- return tensors[offset];
+ Input(Value::UP a, Value::UP b) : tensors() {
+ pad_params();
+ tensors.push_back(std::move(a));
+ params.emplace_back(*tensors.back());
+ tensors.push_back(std::move(b));
+ params.emplace_back(*tensors.back());
}
+ ConstArrayRef<Value::CREF> get() const { return params; }
};
// evaluate tensor reduce operation using tensor engine retained api
@@ -267,13 +270,13 @@ struct RetainedReduce : Eval {
RetainedReduce(Aggr aggr_in, const vespalib::string &dimension)
: aggr(aggr_in), dimensions({dimension}) {}
Result eval(const TensorEngine &engine, const TensorSpec &a) const override {
- auto a_type = ValueType::from_spec(a.type());
- auto ir = tensor_function::reduce(tensor_function::inject(a_type, tensor_id_a), aggr, dimensions);
- ValueType expect_type = ir->result_type;
- auto fun = engine.compile(std::move(ir));
- Input input(engine.create(a));
Stash stash;
- return Result(check_type(fun->eval(input, stash), expect_type));
+ auto a_type = ValueType::from_spec(a.type());
+ const auto &ir = tensor_function::reduce(tensor_function::inject(a_type, tensor_id_a, stash), aggr, dimensions, stash);
+ ValueType expect_type = ir.result_type;
+ const auto &fun = engine.compile(ir, stash);
+ Input input(engine.from_spec(a));
+ return Result(engine, check_type(fun.eval(input.get(), stash), expect_type));
}
};
@@ -282,13 +285,13 @@ struct RetainedMap : Eval {
map_fun_t function;
RetainedMap(map_fun_t function_in) : function(function_in) {}
Result eval(const TensorEngine &engine, const TensorSpec &a) const override {
- auto a_type = ValueType::from_spec(a.type());
- auto ir = tensor_function::map(tensor_function::inject(a_type, tensor_id_a), function);
- ValueType expect_type = ir->result_type;
- auto fun = engine.compile(std::move(ir));
- Input input(engine.create(a));
Stash stash;
- return Result(check_type(fun->eval(input, stash), expect_type));
+ auto a_type = ValueType::from_spec(a.type());
+ const auto &ir = tensor_function::map(tensor_function::inject(a_type, tensor_id_a, stash), function, stash);
+ ValueType expect_type = ir.result_type;
+ const auto &fun = engine.compile(ir, stash);
+ Input input(engine.from_spec(a));
+ return Result(engine, check_type(fun.eval(input.get(), stash), expect_type));
}
};
@@ -297,16 +300,16 @@ struct RetainedJoin : Eval {
join_fun_t function;
RetainedJoin(join_fun_t function_in) : function(function_in) {}
Result eval(const TensorEngine &engine, const TensorSpec &a, const TensorSpec &b) const override {
+ Stash stash;
auto a_type = ValueType::from_spec(a.type());
auto b_type = ValueType::from_spec(b.type());
- auto ir = tensor_function::join(tensor_function::inject(a_type, tensor_id_a),
- tensor_function::inject(b_type, tensor_id_b),
- function);
- ValueType expect_type = ir->result_type;
- auto fun = engine.compile(std::move(ir));
- Input input(engine.create(a), engine.create(b));
- Stash stash;
- return Result(check_type(fun->eval(input, stash), expect_type));
+ const auto &ir = tensor_function::join(tensor_function::inject(a_type, tensor_id_a, stash),
+ tensor_function::inject(b_type, tensor_id_b, stash),
+ function, stash);
+ ValueType expect_type = ir.result_type;
+ const auto &fun = engine.compile(ir, stash);
+ Input input(engine.from_spec(a), engine.from_spec(b));
+ return Result(engine, check_type(fun.eval(input.get(), stash), expect_type));
}
};
@@ -369,21 +372,15 @@ struct TestContext {
TestContext(const vespalib::string &module_path_in, const TensorEngine &engine_in)
: module_path(module_path_in), ref_engine(SimpleTensorEngine::ref()), engine(engine_in) {}
- std::unique_ptr<Tensor> tensor(const TensorSpec &spec) {
- auto result = engine.create(spec);
- EXPECT_EQUAL(spec.type(), engine.type_of(*result).to_spec());
- return result;
- }
-
//-------------------------------------------------------------------------
void verify_create_type(const vespalib::string &type_spec) {
- auto tensor = engine.create(TensorSpec(type_spec));
- EXPECT_TRUE(&engine == &tensor->engine());
- EXPECT_EQUAL(type_spec, engine.type_of(*tensor).to_spec());
+ Value::UP value = engine.from_spec(TensorSpec(type_spec));
+ EXPECT_EQUAL(type_spec, value->type().to_spec());
}
void test_tensor_create_type() {
+ TEST_DO(verify_create_type("error"));
TEST_DO(verify_create_type("double"));
TEST_DO(verify_create_type("tensor(x{})"));
TEST_DO(verify_create_type("tensor(x{},y{})"));
@@ -491,6 +488,7 @@ struct TestContext {
TEST_DO(test_map_op("isNan(a)", operation::IsNan::f, Mask2Seq(SkipNth(3), 1.0, my_nan)));
TEST_DO(test_map_op("relu(a)", operation::Relu::f, Sub2(Div10(N()))));
TEST_DO(test_map_op("sigmoid(a)", operation::Sigmoid::f, Sub2(Div10(N()))));
+ TEST_DO(test_map_op("elu(a)", operation::Elu::f, Sub2(Div10(N()))));
TEST_DO(test_map_op("a in [1,5,7,13,42]", MyIn::f, N()));
TEST_DO(test_map_op("(a+1)*2", MyOp::f, Div10(N())));
}
@@ -731,7 +729,7 @@ struct TestContext {
TEST_STATE(make_string("lhs shape: %s, rhs shape: %s",
lhs_input.type().c_str(),
rhs_input.type().c_str()).c_str());
- Eval::Result expect = ImmediateJoin(op).eval(ref_engine, lhs_input, rhs_input);
+ Eval::Result expect = ImmediateJoin(op).eval(ref_engine, lhs_input, rhs_input);
TEST_DO(verify_result(safe(eval).eval(engine, lhs_input, rhs_input), expect));
}
TEST_DO(test_fixed_sparse_cases_apply_op(eval, op));
@@ -867,8 +865,8 @@ struct TestContext {
{
Stash stash;
nbostream data;
- encode_engine.encode(make_value(encode_engine, spec, stash), data, stash);
- TEST_DO(verify_result(Eval::Result(decode_engine.decode(data, stash)), spec));
+ encode_engine.encode(make_value(encode_engine, spec, stash), data);
+ TEST_DO(verify_result(Eval::Result(decode_engine, *decode_engine.decode(data)), spec));
}
void verify_encode_decode(const TensorSpec &spec) {
@@ -884,13 +882,13 @@ struct TestContext {
const Inspector &binary = test["binary"];
EXPECT_GREATER(binary.entries(), 0u);
nbostream encoded;
- engine.encode(make_value(engine, spec, stash), encoded, stash);
+ engine.encode(make_value(engine, spec, stash), encoded);
test.setData("encoded", Memory(encoded.peek(), encoded.size()));
bool matched_encode = false;
for (size_t i = 0; i < binary.entries(); ++i) {
nbostream data = extract_data(binary[i].asString());
matched_encode = (matched_encode || is_same(encoded, data));
- TEST_DO(verify_result(Eval::Result(engine.decode(data, stash)), spec));
+ TEST_DO(verify_result(Eval::Result(engine, *engine.decode(data)), spec));
EXPECT_EQUAL(data.size(), 0u);
}
EXPECT_TRUE(matched_encode);
diff --git a/eval/src/vespa/eval/eval/value.cpp b/eval/src/vespa/eval/eval/value.cpp
index 456d80c0ff0..4bfd758f9cd 100644
--- a/eval/src/vespa/eval/eval/value.cpp
+++ b/eval/src/vespa/eval/eval/value.cpp
@@ -6,19 +6,10 @@
namespace vespalib {
namespace eval {
-ErrorValue ErrorValue::instance;
+ValueType ErrorValue::_type = ValueType::error_type();
+const ErrorValue ErrorValue::instance;
-double
-TensorValue::as_double() const
-{
- return _tensor->as_double();
-}
-
-ValueType
-TensorValue::type() const
-{
- return _tensor->engine().type_of(*_tensor);
-}
+ValueType DoubleValue::_type = ValueType::double_type();
} // namespace vespalib::eval
} // namespace vespalib
diff --git a/eval/src/vespa/eval/eval/value.h b/eval/src/vespa/eval/eval/value.h
index 8826faed140..08ca9792739 100644
--- a/eval/src/vespa/eval/eval/value.h
+++ b/eval/src/vespa/eval/eval/value.h
@@ -5,7 +5,6 @@
#include <vespa/vespalib/stllike/string.h>
#include <memory>
#include <vespa/vespalib/util/stash.h>
-#include "tensor.h"
#include "value_type.h"
namespace vespalib {
@@ -25,43 +24,33 @@ struct Value {
virtual bool is_double() const { return false; }
virtual bool is_tensor() const { return false; }
virtual double as_double() const { return 0.0; }
- virtual bool as_bool() const { return false; }
+ bool as_bool() const { return (as_double() != 0.0); }
virtual const Tensor *as_tensor() const { return nullptr; }
- virtual ValueType type() const = 0;
+ virtual const ValueType &type() const = 0;
virtual ~Value() {}
};
-struct ErrorValue : public Value {
- static ErrorValue instance;
+class ErrorValue : public Value
+{
+private:
+ static ValueType _type;
+public:
+ static const ErrorValue instance;
bool is_error() const override { return true; }
double as_double() const override { return error_value; }
- ValueType type() const override { return ValueType::error_type(); }
+ const ValueType &type() const override { return _type; }
};
class DoubleValue : public Value
{
private:
double _value;
+ static ValueType _type;
public:
DoubleValue(double value) : _value(value) {}
bool is_double() const override { return true; }
double as_double() const override { return _value; }
- bool as_bool() const override { return (_value != 0.0); }
- ValueType type() const override { return ValueType::double_type(); }
-};
-
-class TensorValue : public Value
-{
-private:
- const Tensor *_tensor;
- std::unique_ptr<Tensor> _stored;
-public:
- TensorValue(const Tensor &value) : _tensor(&value), _stored() {}
- TensorValue(std::unique_ptr<Tensor> value) : _tensor(value.get()), _stored(std::move(value)) {}
- bool is_tensor() const override { return true; }
- double as_double() const override;
- const Tensor *as_tensor() const override { return _tensor; }
- ValueType type() const override;
+ const ValueType &type() const override { return _type; }
};
} // namespace vespalib::eval
diff --git a/eval/src/vespa/eval/eval/value_cache/constant_tensor_loader.cpp b/eval/src/vespa/eval/eval/value_cache/constant_tensor_loader.cpp
index f026ca060c6..38d5bbc643b 100644
--- a/eval/src/vespa/eval/eval/value_cache/constant_tensor_loader.cpp
+++ b/eval/src/vespa/eval/eval/value_cache/constant_tensor_loader.cpp
@@ -68,17 +68,13 @@ void decode_json(const vespalib::string &path, Slime &slime) {
} // namespace vespalib::eval::<unnamed>
-using ErrorConstant = SimpleConstantValue<ErrorValue>;
-using TensorConstant = SimpleConstantValue<TensorValue>;
-
ConstantValue::UP
ConstantTensorLoader::create(const vespalib::string &path, const vespalib::string &type) const
{
ValueType value_type = ValueType::from_spec(type);
if (value_type.is_error()) {
LOG(warning, "invalid type specification: %s", type.c_str());
- auto tensor = _engine.create(TensorSpec("double"));
- return std::make_unique<TensorConstant>(_engine.type_of(*tensor), std::move(tensor));
+ return std::make_unique<SimpleConstantValue>(_engine.from_spec(TensorSpec("double")));
}
Slime slime;
decode_json(path, slime);
@@ -96,8 +92,7 @@ ConstantTensorLoader::create(const vespalib::string &path, const vespalib::strin
cells[i]["address"].traverse(extractor);
spec.add(address, cells[i]["value"].asDouble());
}
- auto tensor = _engine.create(spec);
- return std::make_unique<TensorConstant>(_engine.type_of(*tensor), std::move(tensor));
+ return std::make_unique<SimpleConstantValue>(_engine.from_spec(spec));
}
} // namespace vespalib::eval
diff --git a/eval/src/vespa/eval/eval/value_cache/constant_value.h b/eval/src/vespa/eval/eval/value_cache/constant_value.h
index 462dc3ad9b4..ba7fe6fcf3d 100644
--- a/eval/src/vespa/eval/eval/value_cache/constant_value.h
+++ b/eval/src/vespa/eval/eval/value_cache/constant_value.h
@@ -21,19 +21,13 @@ struct ConstantValue {
virtual ~ConstantValue() {}
};
-/**
- * A simple implementation of a constant value that bundles together a
- * ValueType instance with a specific Value subclass instance.
- **/
-template <typename VALUE>
-struct SimpleConstantValue : ConstantValue {
- ValueType my_type;
- VALUE my_value;
- template <typename... Args>
- SimpleConstantValue(const ValueType &type_in, Args &&...args)
- : my_type(type_in), my_value(std::forward<Args>(args)...) {}
- const ValueType &type() const override { return my_type; }
- const Value &value() const override { return my_value; }
+class SimpleConstantValue : public ConstantValue {
+private:
+ const Value::UP _value;
+public:
+ SimpleConstantValue(Value::UP value) : _value(std::move(value)) {}
+ const ValueType &type() const override { return _value->type(); }
+ const Value &value() const override { return *_value; }
};
/**
diff --git a/eval/src/vespa/eval/tensor/default_tensor_engine.cpp b/eval/src/vespa/eval/tensor/default_tensor_engine.cpp
index 7adb95f69ca..2506e6fcf0e 100644
--- a/eval/src/vespa/eval/tensor/default_tensor_engine.cpp
+++ b/eval/src/vespa/eval/tensor/default_tensor_engine.cpp
@@ -11,6 +11,7 @@
#include <vespa/eval/eval/value.h>
#include <vespa/eval/eval/tensor_spec.h>
#include <vespa/eval/eval/simple_tensor_engine.h>
+#include <vespa/eval/eval/operation.h>
#include <cassert>
@@ -21,8 +22,8 @@ using eval::Aggr;
using eval::Aggregator;
using eval::DoubleValue;
using eval::ErrorValue;
+using eval::TensorFunction;
using eval::TensorSpec;
-using eval::TensorValue;
using eval::Value;
using eval::ValueType;
@@ -36,18 +37,13 @@ const eval::TensorEngine &default_engine() { return DefaultTensorEngine::ref();
// map tensors to simple tensors before fall-back evaluation
-const eval::SimpleTensor &to_simple(const eval::Tensor &tensor, Stash &stash) {
- if (auto wrapped = dynamic_cast<const WrappedSimpleTensor *>(&tensor)) {
- return wrapped->get();
- }
- TensorSpec spec = tensor.engine().to_spec(tensor);
- using PTR = std::unique_ptr<eval::SimpleTensor>;
- return *stash.create<PTR>(eval::SimpleTensor::create(spec));
-}
-
const Value &to_simple(const Value &value, Stash &stash) {
if (auto tensor = value.as_tensor()) {
- return stash.create<TensorValue>(to_simple(*tensor, stash));
+ if (auto wrapped = dynamic_cast<const WrappedSimpleTensor *>(tensor)) {
+ return wrapped->get();
+ }
+ TensorSpec spec = tensor->engine().to_spec(*tensor);
+ return *stash.create<Value::UP>(eval::SimpleTensor::create(spec));
}
return value;
}
@@ -58,11 +54,11 @@ const Value &to_default(const Value &value, Stash &stash) {
if (auto tensor = value.as_tensor()) {
if (auto simple = dynamic_cast<const eval::SimpleTensor *>(tensor)) {
if (!Tensor::supported({simple->type()})) {
- return stash.create<TensorValue>(std::make_unique<WrappedSimpleTensor>(*simple));
+ return stash.create<WrappedSimpleTensor>(*simple);
}
}
TensorSpec spec = tensor->engine().to_spec(*tensor);
- return stash.create<TensorValue>(default_engine().create(spec));
+ return *stash.create<Value::UP>(default_engine().from_spec(spec));
}
return value;
}
@@ -72,9 +68,19 @@ const Value &to_value(std::unique_ptr<Tensor> tensor, Stash &stash) {
return ErrorValue::instance;
}
if (tensor->getType().is_tensor()) {
- return stash.create<TensorValue>(std::move(tensor));
+ return *stash.create<Value::UP>(std::move(tensor));
}
- return stash.create<DoubleValue>(tensor->sum());
+ return stash.create<DoubleValue>(tensor->as_double());
+}
+
+Value::UP to_value(std::unique_ptr<Tensor> tensor) {
+ if (!tensor) {
+ return std::make_unique<ErrorValue>();
+ }
+ if (tensor->type().is_tensor()) {
+ return std::move(tensor);
+ }
+ return std::make_unique<DoubleValue>(tensor->as_double());
}
const Value &fallback_join(const Value &a, const Value &b, join_fun_t function, Stash &stash) {
@@ -89,38 +95,22 @@ const Value &fallback_reduce(const Value &a, eval::Aggr aggr, const std::vector<
const DefaultTensorEngine DefaultTensorEngine::_engine;
-eval::ValueType
-DefaultTensorEngine::type_of(const Tensor &tensor) const
-{
- assert(&tensor.engine() == this);
- const tensor::Tensor &my_tensor = static_cast<const tensor::Tensor &>(tensor);
- return my_tensor.getType();
-}
-
-vespalib::string
-DefaultTensorEngine::to_string(const Tensor &tensor) const
-{
- assert(&tensor.engine() == this);
- const tensor::Tensor &my_tensor = static_cast<const tensor::Tensor &>(tensor);
- return my_tensor.toString();
-}
-
TensorSpec
-DefaultTensorEngine::to_spec(const Tensor &tensor) const
-{
- assert(&tensor.engine() == this);
- const tensor::Tensor &my_tensor = static_cast<const tensor::Tensor &>(tensor);
- return my_tensor.toSpec();
-}
-
-eval::TensorFunction::UP
-DefaultTensorEngine::compile(eval::tensor_function::Node_UP expr) const
+DefaultTensorEngine::to_spec(const Value &value) const
{
- return DenseTensorFunctionCompiler::compile(std::move(expr));
+ if (value.is_double()) {
+ return TensorSpec("double").add({}, value.as_double());
+ } else if (auto tensor = value.as_tensor()) {
+ assert(&tensor->engine() == this);
+ const tensor::Tensor &my_tensor = static_cast<const tensor::Tensor &>(*tensor);
+ return my_tensor.toSpec();
+ } else {
+ return TensorSpec("error");
+ }
}
-std::unique_ptr<eval::Tensor>
-DefaultTensorEngine::create(const TensorSpec &spec) const
+Value::UP
+DefaultTensorEngine::from_spec(const TensorSpec &spec) const
{
ValueType type = ValueType::from_spec(spec.type());
bool is_dense = false;
@@ -149,7 +139,7 @@ DefaultTensorEngine::create(const TensorSpec &spec) const
builder.addCell(cell.second);
}
return builder.build();
- } else { // sparse
+ } else if (is_sparse) {
DefaultTensor::builder builder;
std::map<vespalib::string,DefaultTensor::builder::Dimension> dimension_map;
for (const auto &dimension: type.dimensions()) {
@@ -163,6 +153,12 @@ DefaultTensorEngine::create(const TensorSpec &spec) const
builder.add_cell(cell.second);
}
return builder.build();
+ } else if (type.is_double()) {
+ double value = spec.cells().empty() ? 0.0 : spec.cells().begin()->second.value;
+ return std::make_unique<DoubleValue>(value);
+ } else {
+ assert(type.is_error());
+ return std::make_unique<ErrorValue>();
}
}
@@ -189,7 +185,7 @@ struct CellFunctionBindRightAdapter : tensor::CellFunction {
//-----------------------------------------------------------------------------
void
-DefaultTensorEngine::encode(const Value &value, nbostream &output, Stash &) const
+DefaultTensorEngine::encode(const Value &value, nbostream &output) const
{
if (auto tensor = value.as_tensor()) {
TypedBinaryFormat::serialize(output, static_cast<const tensor::Tensor &>(*tensor));
@@ -198,10 +194,18 @@ DefaultTensorEngine::encode(const Value &value, nbostream &output, Stash &) cons
}
}
-const Value &
-DefaultTensorEngine::decode(nbostream &input, Stash &stash) const
+Value::UP
+DefaultTensorEngine::decode(nbostream &input) const
+{
+ return to_value(TypedBinaryFormat::deserialize(input));
+}
+
+//-----------------------------------------------------------------------------
+
+const TensorFunction &
+DefaultTensorEngine::compile(const eval::tensor_function::Node &expr, Stash &stash) const
{
- return to_value(TypedBinaryFormat::deserialize(input), stash);
+ return DenseTensorFunctionCompiler::compile(expr, stash);
}
//-----------------------------------------------------------------------------
diff --git a/eval/src/vespa/eval/tensor/default_tensor_engine.h b/eval/src/vespa/eval/tensor/default_tensor_engine.h
index bbb03aceb1f..1cef4ba2d35 100644
--- a/eval/src/vespa/eval/tensor/default_tensor_engine.h
+++ b/eval/src/vespa/eval/tensor/default_tensor_engine.h
@@ -19,16 +19,14 @@ private:
public:
static const TensorEngine &ref() { return _engine; };
- ValueType type_of(const Tensor &tensor) const override;
- vespalib::string to_string(const Tensor &tensor) const override;
- TensorSpec to_spec(const Tensor &tensor) const override;
+ TensorSpec to_spec(const Value &value) const override;
+ Value::UP from_spec(const TensorSpec &spec) const override;
- virtual eval::TensorFunction::UP compile(eval::tensor_function::Node_UP expr) const override;
+ void encode(const Value &value, nbostream &output) const override;
+ Value::UP decode(nbostream &input) const override;
- std::unique_ptr<Tensor> create(const TensorSpec &spec) const override;
+ const TensorFunction &compile(const eval::tensor_function::Node &expr, Stash &stash) const override;
- void encode(const Value &value, nbostream &output, Stash &stash) const override;
- const Value &decode(nbostream &input, Stash &stash) const override;
const Value &map(const Value &a, map_fun_t function, Stash &stash) const override;
const Value &join(const Value &a, const Value &b, join_fun_t function, Stash &stash) const override;
const Value &reduce(const Value &a, Aggr aggr, const std::vector<vespalib::string> &dimensions, Stash &stash) const override;
diff --git a/eval/src/vespa/eval/tensor/dense/dense_dot_product_function.cpp b/eval/src/vespa/eval/tensor/dense/dense_dot_product_function.cpp
index 530eaed9aa9..705496714fa 100644
--- a/eval/src/vespa/eval/tensor/dense/dense_dot_product_function.cpp
+++ b/eval/src/vespa/eval/tensor/dense/dense_dot_product_function.cpp
@@ -31,10 +31,10 @@ getCellsRef(const eval::Value &value)
}
const eval::Value &
-DenseDotProductFunction::eval(const Input &input, Stash &stash) const
+DenseDotProductFunction::eval(ConstArrayRef<eval::Value::CREF> params, Stash &stash) const
{
- DenseTensorView::CellsRef lhsCells = getCellsRef(input.get_tensor(_lhsTensorId));
- DenseTensorView::CellsRef rhsCells = getCellsRef(input.get_tensor(_rhsTensorId));
+ DenseTensorView::CellsRef lhsCells = getCellsRef(params[_lhsTensorId]);
+ DenseTensorView::CellsRef rhsCells = getCellsRef(params[_rhsTensorId]);
size_t numCells = std::min(lhsCells.size(), rhsCells.size());
double result = _hwAccelerator->dotProduct(lhsCells.cbegin(), rhsCells.cbegin(), numCells);
return stash.create<eval::DoubleValue>(result);
diff --git a/eval/src/vespa/eval/tensor/dense/dense_dot_product_function.h b/eval/src/vespa/eval/tensor/dense/dense_dot_product_function.h
index 905939cc781..8ad57d69524 100644
--- a/eval/src/vespa/eval/tensor/dense/dense_dot_product_function.h
+++ b/eval/src/vespa/eval/tensor/dense/dense_dot_product_function.h
@@ -24,7 +24,7 @@ public:
DenseDotProductFunction(size_t lhsTensorId_, size_t rhsTensorId_);
size_t lhsTensorId() const { return _lhsTensorId; }
size_t rhsTensorId() const { return _rhsTensorId; }
- virtual const eval::Value &eval(const Input &input, Stash &stash) const override;
+ const eval::Value &eval(ConstArrayRef<eval::Value::CREF> params, Stash &stash) const override;
};
} // namespace tensor
diff --git a/eval/src/vespa/eval/tensor/dense/dense_tensor.cpp b/eval/src/vespa/eval/tensor/dense/dense_tensor.cpp
index 354c0a2f466..5d7e0c83267 100644
--- a/eval/src/vespa/eval/tensor/dense/dense_tensor.cpp
+++ b/eval/src/vespa/eval/tensor/dense/dense_tensor.cpp
@@ -26,7 +26,7 @@ calcCellsSize(const eval::ValueType &type)
void
checkCellsSize(const DenseTensor &arg)
{
- auto cellsSize = calcCellsSize(arg.type());
+ auto cellsSize = calcCellsSize(arg.fast_type());
if (arg.cells().size() != cellsSize) {
throw IllegalStateException(make_string("Wrong cell size, "
"expected=%zu, "
diff --git a/eval/src/vespa/eval/tensor/dense/dense_tensor_apply.hpp b/eval/src/vespa/eval/tensor/dense/dense_tensor_apply.hpp
index 10651b59468..65fee767690 100644
--- a/eval/src/vespa/eval/tensor/dense/dense_tensor_apply.hpp
+++ b/eval/src/vespa/eval/tensor/dense/dense_tensor_apply.hpp
@@ -14,8 +14,8 @@ template <typename Function>
std::unique_ptr<Tensor>
apply(const DenseTensorView &lhs, const DenseTensorView &rhs, Function &&func)
{
- DenseTensorAddressCombiner combiner(lhs.type(), rhs.type());
- DirectDenseTensorBuilder builder(DenseTensorAddressCombiner::combineDimensions(lhs.type(), rhs.type()));
+ DenseTensorAddressCombiner combiner(lhs.fast_type(), rhs.fast_type());
+ DirectDenseTensorBuilder builder(DenseTensorAddressCombiner::combineDimensions(lhs.fast_type(), rhs.fast_type()));
for (DenseTensorCellsIterator lhsItr = lhs.cellsIterator(); lhsItr.valid(); lhsItr.next()) {
for (DenseTensorCellsIterator rhsItr = rhs.cellsIterator(); rhsItr.valid(); rhsItr.next()) {
bool combineSuccess = combiner.combine(lhsItr, rhsItr);
diff --git a/eval/src/vespa/eval/tensor/dense/dense_tensor_cells_iterator.h b/eval/src/vespa/eval/tensor/dense/dense_tensor_cells_iterator.h
index 2d5257f018b..f77517bfdc5 100644
--- a/eval/src/vespa/eval/tensor/dense/dense_tensor_cells_iterator.h
+++ b/eval/src/vespa/eval/tensor/dense/dense_tensor_cells_iterator.h
@@ -34,7 +34,7 @@ public:
void next();
double cell() const { return _cells[_cellIdx]; }
const std::vector<size_t> &address() const { return _address; }
- const eval::ValueType &type() const { return _type; }
+ const eval::ValueType &fast_type() const { return _type; }
};
} // namespace vespalib::tensor
diff --git a/eval/src/vespa/eval/tensor/dense/dense_tensor_function_compiler.cpp b/eval/src/vespa/eval/tensor/dense/dense_tensor_function_compiler.cpp
index a22307c25ad..e9ee7d30692 100644
--- a/eval/src/vespa/eval/tensor/dense/dense_tensor_function_compiler.cpp
+++ b/eval/src/vespa/eval/tensor/dense/dense_tensor_function_compiler.cpp
@@ -2,6 +2,7 @@
#include "dense_dot_product_function.h"
#include "dense_tensor_function_compiler.h"
+#include <vespa/eval/eval/operation.h>
#include <vespa/vespalib/test/insertion_operators.h>
#include <iostream>
@@ -36,30 +37,30 @@ isCompatibleTensorsForDotProduct(const ValueType &lhsType, const ValueType &rhsT
struct DotProductFunctionCompiler
{
- static TensorFunction::UP compile(Node_UP expr) {
- const Reduce *reduce = as<Reduce>(*expr);
+ static const TensorFunction &compile(const Node &expr, Stash &stash) {
+ const Reduce *reduce = as<Reduce>(expr);
if (reduce && (reduce->aggr == Aggr::SUM) && willReduceAllDimensions(reduce->dimensions)) {
- const Join *join = as<Join>(*reduce->tensor);
+ const Join *join = as<Join>(reduce->tensor);
if (join && (join->function == Mul::f)) {
- const Inject *lhsTensor = as<Inject>(*join->lhs_tensor);
- const Inject *rhsTensor = as<Inject>(*join->rhs_tensor);
+ const Inject *lhsTensor = as<Inject>(join->lhs_tensor);
+ const Inject *rhsTensor = as<Inject>(join->rhs_tensor);
if (lhsTensor && rhsTensor &&
isCompatibleTensorsForDotProduct(lhsTensor->result_type, rhsTensor->result_type))
{
- return std::make_unique<DenseDotProductFunction>(lhsTensor->tensor_id, rhsTensor->tensor_id);
+ return stash.create<DenseDotProductFunction>(lhsTensor->tensor_id, rhsTensor->tensor_id);
}
}
}
- return std::move(expr);
+ return expr;
}
};
}
-TensorFunction::UP
-DenseTensorFunctionCompiler::compile(Node_UP expr)
+const TensorFunction &
+DenseTensorFunctionCompiler::compile(const eval::tensor_function::Node &expr, Stash &stash)
{
- return DotProductFunctionCompiler::compile(std::move(expr));
+ return DotProductFunctionCompiler::compile(expr, stash);
}
} // namespace tensor
diff --git a/eval/src/vespa/eval/tensor/dense/dense_tensor_function_compiler.h b/eval/src/vespa/eval/tensor/dense/dense_tensor_function_compiler.h
index ef940bf38f9..d5ba4e4f7a7 100644
--- a/eval/src/vespa/eval/tensor/dense/dense_tensor_function_compiler.h
+++ b/eval/src/vespa/eval/tensor/dense/dense_tensor_function_compiler.h
@@ -5,6 +5,9 @@
#include <vespa/eval/eval/tensor_function.h>
namespace vespalib {
+
+class Stash;
+
namespace tensor {
/**
@@ -13,7 +16,7 @@ namespace tensor {
*/
struct DenseTensorFunctionCompiler
{
- static eval::TensorFunction::UP compile(eval::tensor_function::Node_UP expr);
+ static const eval::TensorFunction &compile(const eval::tensor_function::Node &expr, Stash &stash);
};
} // namespace tensor
diff --git a/eval/src/vespa/eval/tensor/dense/dense_tensor_reduce.hpp b/eval/src/vespa/eval/tensor/dense/dense_tensor_reduce.hpp
index c6fc04bb27b..9f608921c05 100644
--- a/eval/src/vespa/eval/tensor/dense/dense_tensor_reduce.hpp
+++ b/eval/src/vespa/eval/tensor/dense/dense_tensor_reduce.hpp
@@ -94,7 +94,7 @@ template <typename Function>
DenseTensor::UP
reduce(const DenseTensorView &tensor, const vespalib::string &dimensionToRemove, Function &&func)
{
- DimensionReducer reducer(tensor.type(), dimensionToRemove);
+ DimensionReducer reducer(tensor.fast_type(), dimensionToRemove);
return reducer.reduceCells(tensor.cellsRef(), func);
}
diff --git a/eval/src/vespa/eval/tensor/dense/dense_tensor_view.cpp b/eval/src/vespa/eval/tensor/dense/dense_tensor_view.cpp
index 4f3e49f8ec1..4402b5b0ae0 100644
--- a/eval/src/vespa/eval/tensor/dense/dense_tensor_view.cpp
+++ b/eval/src/vespa/eval/tensor/dense/dense_tensor_view.cpp
@@ -49,7 +49,7 @@ calcCellsSize(const eval::ValueType &type)
void
checkCellsSize(const DenseTensorView &arg)
{
- auto cellsSize = calcCellsSize(arg.type());
+ auto cellsSize = calcCellsSize(arg.fast_type());
if (arg.cellsRef().size() != cellsSize) {
throw IllegalStateException(make_string("wrong cell size, "
"expected=%zu, "
@@ -63,14 +63,14 @@ void
checkDimensions(const DenseTensorView &lhs, const DenseTensorView &rhs,
vespalib::stringref operation)
{
- if (lhs.type() != rhs.type()) {
+ if (lhs.fast_type() != rhs.fast_type()) {
throw IllegalStateException(make_string("mismatching dimensions for "
"dense tensor %s, "
"lhs dimensions = '%s', "
"rhs dimensions = '%s'",
operation.c_str(),
- dimensionsAsString(lhs.type()).c_str(),
- dimensionsAsString(rhs.type()).c_str()));
+ dimensionsAsString(lhs.fast_type()).c_str(),
+ dimensionsAsString(rhs.fast_type()).c_str()));
}
checkCellsSize(lhs);
checkCellsSize(rhs);
@@ -96,7 +96,7 @@ joinDenseTensors(const DenseTensorView &lhs, const DenseTensorView &rhs,
++rhsCellItr;
}
assert(rhsCellItr == rhs.cellsRef().cend());
- return std::make_unique<DenseTensor>(lhs.type(),
+ return std::make_unique<DenseTensor>(lhs.fast_type(),
std::move(cells));
}
@@ -132,7 +132,7 @@ bool sameCells(DenseTensorView::CellsRef lhs, DenseTensorView::CellsRef rhs)
DenseTensorView::DenseTensorView(const DenseTensor &rhs)
- : _typeRef(rhs.type()),
+ : _typeRef(rhs.fast_type()),
_cellsRef(rhs.cellsRef())
{
}
@@ -260,7 +260,7 @@ void
buildAddress(const DenseTensorCellsIterator &itr, TensorSpec::Address &address)
{
auto addressItr = itr.address().begin();
- for (const auto &dim : itr.type().dimensions()) {
+ for (const auto &dim : itr.fast_type().dimensions()) {
address.emplace(std::make_pair(dim.name, TensorSpec::Label(*addressItr++)));
}
assert(addressItr == itr.address().end());
diff --git a/eval/src/vespa/eval/tensor/dense/dense_tensor_view.h b/eval/src/vespa/eval/tensor/dense/dense_tensor_view.h
index aa447eb42af..472cc58ad6b 100644
--- a/eval/src/vespa/eval/tensor/dense/dense_tensor_view.h
+++ b/eval/src/vespa/eval/tensor/dense/dense_tensor_view.h
@@ -42,7 +42,7 @@ public:
: _typeRef(type_in),
_cellsRef()
{}
- const eval::ValueType &type() const { return _typeRef; }
+ const eval::ValueType &fast_type() const { return _typeRef; }
const CellsRef &cellsRef() const { return _cellsRef; }
bool operator==(const DenseTensorView &rhs) const;
CellsIterator cellsIterator() const { return CellsIterator(_typeRef, _cellsRef); }
diff --git a/eval/src/vespa/eval/tensor/dense/mutable_dense_tensor_view.cpp b/eval/src/vespa/eval/tensor/dense/mutable_dense_tensor_view.cpp
index 5da7165af2a..71b7824ee5d 100644
--- a/eval/src/vespa/eval/tensor/dense/mutable_dense_tensor_view.cpp
+++ b/eval/src/vespa/eval/tensor/dense/mutable_dense_tensor_view.cpp
@@ -22,13 +22,13 @@ MutableDenseTensorView::MutableValueType::MutableValueType(ValueType type_in)
MutableDenseTensorView::MutableValueType::~MutableValueType() {}
MutableDenseTensorView::MutableDenseTensorView(ValueType type_in)
- : DenseTensorView(_concreteType.type(), CellsRef()),
+ : DenseTensorView(_concreteType.fast_type(), CellsRef()),
_concreteType(type_in)
{
}
MutableDenseTensorView::MutableDenseTensorView(ValueType type_in, CellsRef cells_in)
- : DenseTensorView(_concreteType.type(), cells_in),
+ : DenseTensorView(_concreteType.fast_type(), cells_in),
_concreteType(type_in)
{
}
diff --git a/eval/src/vespa/eval/tensor/dense/mutable_dense_tensor_view.h b/eval/src/vespa/eval/tensor/dense/mutable_dense_tensor_view.h
index e856885d0fa..7eee3a9483c 100644
--- a/eval/src/vespa/eval/tensor/dense/mutable_dense_tensor_view.h
+++ b/eval/src/vespa/eval/tensor/dense/mutable_dense_tensor_view.h
@@ -23,7 +23,7 @@ private:
public:
MutableValueType(eval::ValueType type_in);
~MutableValueType();
- const eval::ValueType &type() const { return _type; }
+ const eval::ValueType &fast_type() const { return _type; }
void setUnboundDimensions(const uint32_t *unboundDimSizeBegin, const uint32_t *unboundDimSizeEnd) {
const uint32_t *unboundDimSizePtr = unboundDimSizeBegin;
for (auto unboundDimSize : _unboundDimSizes) {
diff --git a/eval/src/vespa/eval/tensor/serialization/dense_binary_format.cpp b/eval/src/vespa/eval/tensor/serialization/dense_binary_format.cpp
index bf522bcaed4..a2d600aa0c9 100644
--- a/eval/src/vespa/eval/tensor/serialization/dense_binary_format.cpp
+++ b/eval/src/vespa/eval/tensor/serialization/dense_binary_format.cpp
@@ -24,9 +24,9 @@ makeValueType(std::vector<eval::ValueType::Dimension> &&dimensions) {
void
DenseBinaryFormat::serialize(nbostream &stream, const DenseTensor &tensor)
{
- stream.putInt1_4Bytes(tensor.type().dimensions().size());
+ stream.putInt1_4Bytes(tensor.fast_type().dimensions().size());
size_t cellsSize = 1;
- for (const auto &dimension : tensor.type().dimensions()) {
+ for (const auto &dimension : tensor.fast_type().dimensions()) {
stream.writeSmallString(dimension.name);
stream.putInt1_4Bytes(dimension.size);
cellsSize *= dimension.size;
diff --git a/eval/src/vespa/eval/tensor/sparse/direct_sparse_tensor_builder.h b/eval/src/vespa/eval/tensor/sparse/direct_sparse_tensor_builder.h
index b0c75655ce5..33000d4889d 100644
--- a/eval/src/vespa/eval/tensor/sparse/direct_sparse_tensor_builder.h
+++ b/eval/src/vespa/eval/tensor/sparse/direct_sparse_tensor_builder.h
@@ -127,7 +127,7 @@ public:
insertCell(address.getAddressRef(), value, [](double, double) -> double { abort(); });
}
- eval::ValueType &type() { return _type; }
+ eval::ValueType &fast_type() { return _type; }
Cells &cells() { return _cells; }
};
diff --git a/eval/src/vespa/eval/tensor/sparse/sparse_tensor.h b/eval/src/vespa/eval/tensor/sparse/sparse_tensor.h
index ad460f4849c..8f5f8066352 100644
--- a/eval/src/vespa/eval/tensor/sparse/sparse_tensor.h
+++ b/eval/src/vespa/eval/tensor/sparse/sparse_tensor.h
@@ -37,7 +37,7 @@ public:
SparseTensor(eval::ValueType &&type_in,
Cells &&cells_in, Stash &&stash_in);
const Cells &cells() const { return _cells; }
- const eval::ValueType &type() const { return _type; }
+ const eval::ValueType &fast_type() const { return _type; }
bool operator==(const SparseTensor &rhs) const;
eval::ValueType combineDimensionsWith(const SparseTensor &rhs) const;
diff --git a/eval/src/vespa/eval/tensor/sparse/sparse_tensor_apply.hpp b/eval/src/vespa/eval/tensor/sparse/sparse_tensor_apply.hpp
index cb22afc8fd5..4528c8ef1df 100644
--- a/eval/src/vespa/eval/tensor/sparse/sparse_tensor_apply.hpp
+++ b/eval/src/vespa/eval/tensor/sparse/sparse_tensor_apply.hpp
@@ -16,7 +16,7 @@ std::unique_ptr<Tensor>
apply(const SparseTensor &lhs, const SparseTensor &rhs, Function &&func)
{
DirectTensorBuilder<SparseTensor> builder(lhs.combineDimensionsWith(rhs));
- TensorAddressCombiner addressCombiner(lhs.type(), rhs.type());
+ TensorAddressCombiner addressCombiner(lhs.fast_type(), rhs.fast_type());
for (const auto &lhsCell : lhs.cells()) {
for (const auto &rhsCell : rhs.cells()) {
bool combineSuccess = addressCombiner.combine(lhsCell.first,
diff --git a/eval/src/vespa/eval/tensor/sparse/sparse_tensor_match.cpp b/eval/src/vespa/eval/tensor/sparse/sparse_tensor_match.cpp
index 35ae6b7544b..b4c9d511d09 100644
--- a/eval/src/vespa/eval/tensor/sparse/sparse_tensor_match.cpp
+++ b/eval/src/vespa/eval/tensor/sparse/sparse_tensor_match.cpp
@@ -90,9 +90,9 @@ SparseTensorMatch::slowMatch(const TensorImplType &lhs,
{
std::vector<AddressOp> ops;
SparseTensorAddressBuilder addressBuilder;
- SparseTensorAddressPadder addressPadder(_builder.type(),
- lhs.type());
- buildTransformOps(ops, lhs.type(), rhs.type());
+ SparseTensorAddressPadder addressPadder(_builder.fast_type(),
+ lhs.fast_type());
+ buildTransformOps(ops, lhs.fast_type(), rhs.fast_type());
for (const auto &lhsCell : lhs.cells()) {
if (!transformAddress(addressBuilder, lhsCell.first, ops)) {
continue;
@@ -110,8 +110,8 @@ SparseTensorMatch::SparseTensorMatch(const TensorImplType &lhs,
const TensorImplType &rhs)
: Parent(lhs.combineDimensionsWith(rhs))
{
- if ((lhs.type().dimensions().size() == rhs.type().dimensions().size()) &&
- (lhs.type().dimensions().size() == _builder.type().dimensions().size())) {
+ if ((lhs.fast_type().dimensions().size() == rhs.fast_type().dimensions().size()) &&
+ (lhs.fast_type().dimensions().size() == _builder.fast_type().dimensions().size())) {
// Ensure that first tensor to fastMatch has fewest cells.
if (lhs.cells().size() <= rhs.cells().size()) {
fastMatch(lhs, rhs);
diff --git a/eval/src/vespa/eval/tensor/sparse/sparse_tensor_reduce.hpp b/eval/src/vespa/eval/tensor/sparse/sparse_tensor_reduce.hpp
index 30b359eb73e..53ab8116255 100644
--- a/eval/src/vespa/eval/tensor/sparse/sparse_tensor_reduce.hpp
+++ b/eval/src/vespa/eval/tensor/sparse/sparse_tensor_reduce.hpp
@@ -45,11 +45,11 @@ reduce(const SparseTensor &tensor,
if (dimensions.empty()) {
return reduceAll(tensor, func);
}
- DirectTensorBuilder<SparseTensor> builder(tensor.type().reduce(dimensions));
- if (builder.type().dimensions().empty()) {
+ DirectTensorBuilder<SparseTensor> builder(tensor.fast_type().reduce(dimensions));
+ if (builder.fast_type().dimensions().empty()) {
return reduceAll(tensor, builder, func);
}
- TensorAddressReducer addressReducer(tensor.type(), dimensions);
+ TensorAddressReducer addressReducer(tensor.fast_type(), dimensions);
for (const auto &cell : tensor.cells()) {
addressReducer.reduce(cell.first);
builder.insertCell(addressReducer.getAddressRef(), cell.second, func);
diff --git a/eval/src/vespa/eval/tensor/tensor.h b/eval/src/vespa/eval/tensor/tensor.h
index 3b3d7ce4a70..80afbbf52ff 100644
--- a/eval/src/vespa/eval/tensor/tensor.h
+++ b/eval/src/vespa/eval/tensor/tensor.h
@@ -31,6 +31,7 @@ struct Tensor : public eval::Tensor
Tensor();
virtual ~Tensor() {}
virtual const eval::ValueType &getType() const = 0;
+ virtual const eval::ValueType &type() const override { return getType(); }
virtual double sum() const = 0;
virtual double as_double() const final override { return sum(); }
virtual Tensor::UP add(const Tensor &arg) const = 0;
diff --git a/eval/src/vespa/eval/tensor/tensor_apply.cpp b/eval/src/vespa/eval/tensor/tensor_apply.cpp
index f6ee7492b05..7c518d0516f 100644
--- a/eval/src/vespa/eval/tensor/tensor_apply.cpp
+++ b/eval/src/vespa/eval/tensor/tensor_apply.cpp
@@ -8,7 +8,7 @@ namespace tensor {
template <class TensorT>
TensorApply<TensorT>::TensorApply(const TensorImplType &tensor,
const CellFunction &func)
- : Parent(tensor.type())
+ : Parent(tensor.fast_type())
{
for (const auto &cell : tensor.cells()) {
_builder.insertCell(cell.first, func.apply(cell.second));
diff --git a/eval/src/vespa/eval/tensor/tensor_mapper.cpp b/eval/src/vespa/eval/tensor/tensor_mapper.cpp
index 7c2c72abd46..25b369c246d 100644
--- a/eval/src/vespa/eval/tensor/tensor_mapper.cpp
+++ b/eval/src/vespa/eval/tensor/tensor_mapper.cpp
@@ -69,7 +69,7 @@ mapAddress(const TensorAddress &address)
{
_addressBuilder.clear();
TensorAddressElementIterator<TensorAddress> addressIterator(address);
- for (const auto &dimension : _builder.type().dimensions()) {
+ for (const auto &dimension : _builder.fast_type().dimensions()) {
if (addressIterator.skipToDimension(dimension.name)) {
_addressBuilder.add(addressIterator.label());
addressIterator.next();
diff --git a/eval/src/vespa/eval/tensor/tensor_operation.h b/eval/src/vespa/eval/tensor/tensor_operation.h
index abf58641549..6975c21c448 100644
--- a/eval/src/vespa/eval/tensor/tensor_operation.h
+++ b/eval/src/vespa/eval/tensor/tensor_operation.h
@@ -28,17 +28,17 @@ protected:
public:
TensorOperation()
: _builder(),
- _type(_builder.type()),
+ _type(_builder.fast_type()),
_cells(_builder.cells())
{}
TensorOperation(const eval::ValueType &type)
: _builder(type),
- _type(_builder.type()),
+ _type(_builder.fast_type()),
_cells(_builder.cells())
{}
TensorOperation(const eval::ValueType &type, const Cells &cells)
: _builder(type, cells),
- _type(_builder.type()),
+ _type(_builder.fast_type()),
_cells(_builder.cells())
{}
Tensor::UP result() {
diff --git a/eval/src/vespa/eval/tensor/wrapped_simple_tensor.cpp b/eval/src/vespa/eval/tensor/wrapped_simple_tensor.cpp
index 534854732c7..7ad97a6e84e 100644
--- a/eval/src/vespa/eval/tensor/wrapped_simple_tensor.cpp
+++ b/eval/src/vespa/eval/tensor/wrapped_simple_tensor.cpp
@@ -20,7 +20,7 @@ WrappedSimpleTensor::equals(const Tensor &arg) const
vespalib::string
WrappedSimpleTensor::toString() const
{
- return eval::SimpleTensorEngine::ref().to_string(_tensor);
+ return toSpec().to_string();
}
eval::TensorSpec