summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--document/src/vespa/document/update/tensor_modify_update.cpp3
-rw-r--r--document/src/vespa/document/update/tensor_remove_update.cpp3
-rw-r--r--eval/src/apps/eval_expr/eval_expr.cpp2
-rw-r--r--eval/src/tests/eval/fast_value/fast_value_test.cpp3
-rw-r--r--eval/src/tests/eval/gen_spec/gen_spec_test.cpp6
-rw-r--r--eval/src/tests/eval/interpreted_function/interpreted_function_test.cpp2
-rw-r--r--eval/src/tests/eval/node_types/node_types_test.cpp6
-rw-r--r--eval/src/tests/eval/reference_operations/reference_operations_test.cpp5
-rw-r--r--eval/src/tests/eval/simple_value/simple_value_test.cpp14
-rw-r--r--eval/src/tests/eval/tensor_function/tensor_function_test.cpp16
-rw-r--r--eval/src/tests/eval/value_codec/value_codec_test.cpp3
-rw-r--r--eval/src/tests/eval/value_type/value_type_test.cpp135
-rw-r--r--eval/src/tests/instruction/generic_cell_cast/generic_cell_cast_test.cpp7
-rw-r--r--eval/src/tests/instruction/generic_concat/generic_concat_test.cpp8
-rw-r--r--eval/src/tests/instruction/generic_create/generic_create_test.cpp2
-rw-r--r--eval/src/tests/instruction/generic_join/generic_join_test.cpp8
-rw-r--r--eval/src/tests/instruction/generic_map/generic_map_test.cpp8
-rw-r--r--eval/src/tests/instruction/generic_merge/generic_merge_test.cpp8
-rw-r--r--eval/src/tests/instruction/generic_peek/generic_peek_test.cpp6
-rw-r--r--eval/src/tests/instruction/generic_reduce/generic_reduce_test.cpp5
-rw-r--r--eval/src/tests/instruction/generic_rename/generic_rename_test.cpp6
-rw-r--r--eval/src/tests/streamed/value/streamed_value_test.cpp14
-rw-r--r--eval/src/vespa/eval/eval/fast_value.cpp4
-rw-r--r--eval/src/vespa/eval/eval/fast_value.hpp11
-rw-r--r--eval/src/vespa/eval/eval/function.cpp2
-rw-r--r--eval/src/vespa/eval/eval/node_types.cpp4
-rw-r--r--eval/src/vespa/eval/eval/tensor_function.cpp4
-rw-r--r--eval/src/vespa/eval/eval/test/gen_spec.cpp9
-rw-r--r--eval/src/vespa/eval/eval/test/gen_spec.h1
-rw-r--r--eval/src/vespa/eval/eval/test/tensor_conformance.cpp6
-rw-r--r--eval/src/vespa/eval/eval/value.cpp6
-rw-r--r--eval/src/vespa/eval/eval/value.h28
-rw-r--r--eval/src/vespa/eval/eval/value_codec.cpp2
-rw-r--r--eval/src/vespa/eval/eval/value_type.cpp207
-rw-r--r--eval/src/vespa/eval/eval/value_type.h29
-rw-r--r--eval/src/vespa/eval/eval/value_type_spec.cpp10
-rw-r--r--eval/src/vespa/eval/instruction/generic_join.cpp15
-rw-r--r--eval/src/vespa/eval/instruction/generic_map.cpp17
-rw-r--r--eval/src/vespa/eval/instruction/generic_reduce.cpp13
-rw-r--r--eval/src/vespa/eval/instruction/join_with_number_function.cpp2
-rw-r--r--eval/src/vespa/eval/instruction/mixed_inner_product_function.cpp4
-rw-r--r--eval/src/vespa/eval/instruction/mixed_map_function.cpp2
-rw-r--r--eval/src/vespa/eval/instruction/pow_as_map_optimizer.cpp2
-rw-r--r--eval/src/vespa/eval/instruction/remove_trivial_dimension_optimizer.cpp2
-rw-r--r--eval/src/vespa/eval/instruction/sparse_dot_product_function.cpp6
-rw-r--r--eval/src/vespa/eval/instruction/sum_max_dot_product_function.cpp4
-rw-r--r--eval/src/vespa/eval/onnx/onnx_wrapper.cpp2
-rw-r--r--searchcore/src/tests/proton/matching/request_context/request_context_test.cpp2
-rw-r--r--searchcore/src/vespa/searchcore/proton/attribute/flushableattribute.cpp3
-rw-r--r--searchlib/src/tests/features/constant/constant_test.cpp4
-rw-r--r--searchlib/src/tests/features/tensor/tensor_test.cpp2
-rw-r--r--searchlib/src/vespa/searchlib/attribute/configconverter.cpp3
-rw-r--r--searchlib/src/vespa/searchlib/features/attributefeature.cpp4
-rw-r--r--searchlib/src/vespa/searchlib/features/queryfeature.cpp2
-rw-r--r--searchlib/src/vespa/searchlib/features/tensor_from_attribute_executor.h3
-rw-r--r--searchlib/src/vespa/searchlib/features/tensor_from_labels_feature.cpp11
-rw-r--r--searchlib/src/vespa/searchlib/features/tensor_from_weighted_set_feature.cpp11
-rw-r--r--streamingvisitors/src/vespa/searchvisitor/hitcollector.cpp2
58 files changed, 368 insertions, 341 deletions
diff --git a/document/src/vespa/document/update/tensor_modify_update.cpp b/document/src/vespa/document/update/tensor_modify_update.cpp
index 791c3efe872..1358f2c8e46 100644
--- a/document/src/vespa/document/update/tensor_modify_update.cpp
+++ b/document/src/vespa/document/update/tensor_modify_update.cpp
@@ -22,6 +22,7 @@ using vespalib::IllegalArgumentException;
using vespalib::IllegalStateException;
using vespalib::make_string;
using vespalib::eval::ValueType;
+using vespalib::eval::CellType;
using vespalib::eval::FastValueBuilderFactory;
using join_fun_t = double (*)(double, double);
@@ -77,7 +78,7 @@ convertToCompatibleType(const TensorDataType &tensorType)
for (const auto &dim : tensorType.getTensorType().dimensions()) {
list.emplace_back(dim.name);
}
- return std::make_unique<const TensorDataType>(ValueType::tensor_type(std::move(list), tensorType.getTensorType().cell_type()));
+ return std::make_unique<const TensorDataType>(ValueType::make_type(tensorType.getTensorType().cell_type(), std::move(list)));
}
}
diff --git a/document/src/vespa/document/update/tensor_remove_update.cpp b/document/src/vespa/document/update/tensor_remove_update.cpp
index 5c8c5c07116..0b1096fce0e 100644
--- a/document/src/vespa/document/update/tensor_remove_update.cpp
+++ b/document/src/vespa/document/update/tensor_remove_update.cpp
@@ -19,6 +19,7 @@ using vespalib::IllegalStateException;
using vespalib::make_string;
using vespalib::eval::Value;
using vespalib::eval::ValueType;
+using vespalib::eval::CellType;
using vespalib::eval::FastValueBuilderFactory;
namespace document {
@@ -34,7 +35,7 @@ convertToCompatibleType(const TensorDataType &tensorType)
list.emplace_back(dim.name);
}
}
- return std::make_unique<const TensorDataType>(ValueType::tensor_type(std::move(list), tensorType.getTensorType().cell_type()));
+ return std::make_unique<const TensorDataType>(ValueType::make_type(tensorType.getTensorType().cell_type(), std::move(list)));
}
}
diff --git a/eval/src/apps/eval_expr/eval_expr.cpp b/eval/src/apps/eval_expr/eval_expr.cpp
index f5e5c5d0dfd..12c94c6e68e 100644
--- a/eval/src/apps/eval_expr/eval_expr.cpp
+++ b/eval/src/apps/eval_expr/eval_expr.cpp
@@ -22,7 +22,7 @@ int main(int argc, char **argv) {
auto type = ValueType::from_spec(result.type());
if (type.is_error()) {
fprintf(stdout, "error\n");
- } else if (type.is_scalar()) {
+ } else if (type.is_double()) {
fprintf(stdout, "%.32g\n", result.as_double());
} else {
fprintf(stdout, "%s\n", result.to_string().c_str());
diff --git a/eval/src/tests/eval/fast_value/fast_value_test.cpp b/eval/src/tests/eval/fast_value/fast_value_test.cpp
index 70c534b2010..2acb6c448c9 100644
--- a/eval/src/tests/eval/fast_value/fast_value_test.cpp
+++ b/eval/src/tests/eval/fast_value/fast_value_test.cpp
@@ -160,7 +160,8 @@ TEST(FastValueBuilderFactoryTest, fast_values_can_be_copied) {
auto factory = FastValueBuilderFactory::get();
for (const auto &layout: layouts) {
for (CellType ct : CellTypeUtils::list_types()) {
- TensorSpec expect = layout.cpy().cells(ct);
+ auto expect = layout.cpy().cells(ct);
+ if (expect.bad_scalar()) continue;
std::unique_ptr<Value> value = value_from_spec(expect, factory);
std::unique_ptr<Value> copy = factory.copy(*value);
TensorSpec actual = spec_from_value(*copy);
diff --git a/eval/src/tests/eval/gen_spec/gen_spec_test.cpp b/eval/src/tests/eval/gen_spec/gen_spec_test.cpp
index ba169b72489..9d8eb419a67 100644
--- a/eval/src/tests/eval/gen_spec/gen_spec_test.cpp
+++ b/eval/src/tests/eval/gen_spec/gen_spec_test.cpp
@@ -61,9 +61,8 @@ TEST(GenSpecTest, scalar_double) {
EXPECT_EQ(GenSpec(5.0).gen(), scalar_5);
}
-TEST(GenSpecTest, not_scalar_float_just_yet) {
- EXPECT_EQ(GenSpec().cells_float().gen(), scalar_1);
- EXPECT_EQ(GenSpec(5.0).cells_float().gen(), scalar_5);
+TEST(GenSpecTest, scalar_float_is_bad_scalar) {
+ EXPECT_TRUE(GenSpec().cells_float().bad_scalar());
}
//-----------------------------------------------------------------------------
@@ -126,7 +125,6 @@ GenSpec dbl() { return GenSpec().cells_double(); }
TEST(GenSpecTest, value_type) {
EXPECT_EQ(dbl().type().to_spec(), "double");
- EXPECT_EQ(flt().type().to_spec(), "double"); // NB
EXPECT_EQ(dbl().idx("x", 10).type().to_spec(), "tensor(x[10])");
EXPECT_EQ(flt().idx("x", 10).type().to_spec(), "tensor<float>(x[10])");
EXPECT_EQ(dbl().map("y", {}).type().to_spec(), "tensor(y{})");
diff --git a/eval/src/tests/eval/interpreted_function/interpreted_function_test.cpp b/eval/src/tests/eval/interpreted_function/interpreted_function_test.cpp
index 871a564bfa4..ba73a578f6f 100644
--- a/eval/src/tests/eval/interpreted_function/interpreted_function_test.cpp
+++ b/eval/src/tests/eval/interpreted_function/interpreted_function_test.cpp
@@ -80,7 +80,7 @@ struct MyEvalTest : test::EvalSpec::EvalTest {
InterpretedFunction ifun(factory, function, node_types);
InterpretedFunction::Context ictx(ifun);
const Value &result_value = ifun.eval(ictx, params);
- report_result(result_value.is_double(), result_value.as_double(), expected_result, description);
+ report_result(result_value.type().is_double(), result_value.as_double(), expected_result, description);
}
};
diff --git a/eval/src/tests/eval/node_types/node_types_test.cpp b/eval/src/tests/eval/node_types/node_types_test.cpp
index 89c37af0a83..a5a17ea15a0 100644
--- a/eval/src/tests/eval/node_types/node_types_test.cpp
+++ b/eval/src/tests/eval/node_types/node_types_test.cpp
@@ -293,8 +293,8 @@ TEST("require that tensor concat resolves correct type") {
}
TEST("require that tensor cell_cast resolves correct type") {
- TEST_DO(verify("cell_cast(double,float)", "double")); // NB
- TEST_DO(verify("cell_cast(float,double)", "double"));
+ TEST_DO(verify("cell_cast(double,double)", "double"));
+ TEST_DO(verify("cell_cast(double,float)", "error"));
TEST_DO(verify("cell_cast(tensor<double>(x{},y[5]),float)", "tensor<float>(x{},y[5])"));
TEST_DO(verify("cell_cast(tensor<float>(x{},y[5]),double)", "tensor<double>(x{},y[5])"));
TEST_DO(verify("cell_cast(tensor<float>(x{},y[5]),float)", "tensor<float>(x{},y[5])"));
@@ -304,7 +304,7 @@ TEST("require that double only expressions can be detected") {
auto plain_fun = Function::parse("1+2");
auto complex_fun = Function::parse("reduce(a,sum)");
NodeTypes plain_types(*plain_fun, {});
- NodeTypes complex_types(*complex_fun, {ValueType::tensor_type({{"x"}})});
+ NodeTypes complex_types(*complex_fun, {ValueType::make_type(CellType::DOUBLE, {{"x"}})});
EXPECT_TRUE(plain_types.get_type(plain_fun->root()).is_double());
EXPECT_TRUE(complex_types.get_type(complex_fun->root()).is_double());
EXPECT_TRUE(plain_types.all_types_are_double());
diff --git a/eval/src/tests/eval/reference_operations/reference_operations_test.cpp b/eval/src/tests/eval/reference_operations/reference_operations_test.cpp
index fdbf375fa3a..2edbefc7717 100644
--- a/eval/src/tests/eval/reference_operations/reference_operations_test.cpp
+++ b/eval/src/tests/eval/reference_operations/reference_operations_test.cpp
@@ -144,8 +144,9 @@ TEST(ReferenceCellCastTest, cell_cast_works) {
for (CellType from_type: CellTypeUtils::list_types()) {
for (CellType to_type: CellTypeUtils::list_types()) {
for (const auto &gen: gen_list) {
- TensorSpec input = gen.cpy().cells(from_type);
- TensorSpec expect = gen.cpy().cells(to_type);
+ auto input = gen.cpy().cells(from_type);
+ auto expect = gen.cpy().cells(to_type);
+ if (input.bad_scalar() || expect.bad_scalar()) continue;
auto actual = ReferenceOperations::cell_cast(input, to_type);
EXPECT_EQ(actual, expect);
}
diff --git a/eval/src/tests/eval/simple_value/simple_value_test.cpp b/eval/src/tests/eval/simple_value/simple_value_test.cpp
index 974f87a6055..57c71903bf1 100644
--- a/eval/src/tests/eval/simple_value/simple_value_test.cpp
+++ b/eval/src/tests/eval/simple_value/simple_value_test.cpp
@@ -69,7 +69,8 @@ TensorSpec simple_value_join(const TensorSpec &a, const TensorSpec &b, join_fun_
TEST(SimpleValueTest, simple_values_can_be_converted_from_and_to_tensor_spec) {
for (const auto &layout: layouts) {
for (CellType ct : CellTypeUtils::list_types()) {
- TensorSpec expect = layout.cpy().cells(ct);
+ auto expect = layout.cpy().cells(ct);
+ if (expect.bad_scalar()) continue;
std::unique_ptr<Value> value = value_from_spec(expect, SimpleValueBuilderFactory::get());
TensorSpec actual = spec_from_value(*value);
EXPECT_EQ(actual, expect);
@@ -80,7 +81,8 @@ TEST(SimpleValueTest, simple_values_can_be_converted_from_and_to_tensor_spec) {
TEST(SimpleValueTest, simple_values_can_be_copied) {
for (const auto &layout: layouts) {
for (CellType ct : CellTypeUtils::list_types()) {
- TensorSpec expect = layout.cpy().cells(ct);
+ auto expect = layout.cpy().cells(ct);
+ if (expect.bad_scalar()) continue;
std::unique_ptr<Value> value = value_from_spec(expect, SimpleValueBuilderFactory::get());
std::unique_ptr<Value> copy = SimpleValueBuilderFactory::get().copy(*value);
TensorSpec actual = spec_from_value(*copy);
@@ -131,11 +133,13 @@ TEST(SimpleValueTest, new_generic_join_works_for_simple_values) {
const auto l = join_layouts[i].cpy().seq(N_16ths);
const auto r = join_layouts[i + 1].cpy().seq(N_16ths);
for (CellType lct : CellTypeUtils::list_types()) {
- TensorSpec lhs = l.cpy().cells(lct);
+ auto lhs = l.cpy().cells(lct);
+ if (lhs.bad_scalar()) continue;
for (CellType rct : CellTypeUtils::list_types()) {
- TensorSpec rhs = r.cpy().cells(rct);
+ auto rhs = r.cpy().cells(rct);
+ if (rhs.bad_scalar()) continue;
for (auto fun: {operation::Add::f, operation::Sub::f, operation::Mul::f, operation::Div::f}) {
- SCOPED_TRACE(fmt("\n===\nLHS: %s\nRHS: %s\n===\n", lhs.to_string().c_str(), rhs.to_string().c_str()));
+ SCOPED_TRACE(fmt("\n===\nLHS: %s\nRHS: %s\n===\n", lhs.gen().to_string().c_str(), rhs.gen().to_string().c_str()));
auto expect = ReferenceOperations::join(lhs, rhs, fun);
auto actual = simple_value_join(lhs, rhs, fun);
EXPECT_EQ(actual, expect);
diff --git a/eval/src/tests/eval/tensor_function/tensor_function_test.cpp b/eval/src/tests/eval/tensor_function/tensor_function_test.cpp
index f1bd900b350..c457f68a614 100644
--- a/eval/src/tests/eval/tensor_function/tensor_function_test.cpp
+++ b/eval/src/tests/eval/tensor_function/tensor_function_test.cpp
@@ -229,9 +229,9 @@ TEST("require that full tensor reduction works") {
size_t a_id = ctx.add_tensor(ctx.make_tensor_reduce_input());
const auto &fun = reduce(inject(ValueType::from_spec("tensor(x[3],y[2])"), a_id, ctx.stash), Aggr::SUM, {}, ctx.stash);
EXPECT_TRUE(fun.result_is_mutable());
- EXPECT_EQUAL(ValueType::from_spec("double"), fun.result_type());
+ EXPECT_EQUAL(ValueType::double_type(), fun.result_type());
const Value &result = ctx.eval(fun);
- EXPECT_TRUE(result.is_double());
+ EXPECT_TRUE(result.type().is_double());
EXPECT_EQUAL(21.0, result.as_double());
}
@@ -300,8 +300,8 @@ TEST("require that tensor create works") {
size_t b_id = ctx.add_tensor(ctx.make_double(2.0));
Value::UP my_const = ctx.make_double(3.0);
Value::UP expect = ctx.make_vector({1.0, 2.0, 3.0});
- const auto &a = inject(ValueType::from_spec("double"), a_id, ctx.stash);
- const auto &b = inject(ValueType::from_spec("double"), b_id, ctx.stash);
+ const auto &a = inject(ValueType::double_type(), a_id, ctx.stash);
+ const auto &b = inject(ValueType::double_type(), b_id, ctx.stash);
const auto &c = const_value(*my_const, ctx.stash);
const auto &fun = create(ValueType::from_spec("tensor(x[3])"),
{
@@ -321,8 +321,8 @@ TEST("require that single value tensor peek works") {
size_t b_id = ctx.add_tensor(ctx.make_double(1000.0));
Value::UP my_const = ctx.make_mixed_tensor(1.0, 2.0, 3.0, 4.0);
Value::UP expect = ctx.make_vector({2.0, 3.0, 0.0});
- const auto &a = inject(ValueType::from_spec("double"), a_id, ctx.stash);
- const auto &b = inject(ValueType::from_spec("double"), b_id, ctx.stash);
+ const auto &a = inject(ValueType::double_type(), a_id, ctx.stash);
+ const auto &b = inject(ValueType::double_type(), b_id, ctx.stash);
const auto &t = const_value(*my_const, ctx.stash);
const auto &peek1 = peek(t, {{"x", "foo"}, {"y", a}}, ctx.stash);
const auto &peek2 = peek(t, {{"x", "bar"}, {"y", size_t(0)}}, ctx.stash);
@@ -354,13 +354,13 @@ TEST("require that automatic string conversion tensor peek works") {
EvalCtx ctx(simple_factory);
size_t a_id = ctx.add_tensor(ctx.make_double(1.0));
Value::UP my_const = ctx.make_vector({1.0, 2.0, 3.0}, "x", true);
- const auto &a = inject(ValueType::from_spec("double"), a_id, ctx.stash);
+ const auto &a = inject(ValueType::double_type(), a_id, ctx.stash);
const auto &t = const_value(*my_const, ctx.stash);
const auto &fun = peek(t, {{"x", a}}, ctx.stash);
EXPECT_TRUE(fun.result_is_mutable());
EXPECT_TRUE(fun.result_type().is_double());
const Value &result = ctx.eval(fun);
- EXPECT_TRUE(result.is_double());
+ EXPECT_TRUE(result.type().is_double());
EXPECT_EQUAL(2.0, result.as_double());
}
diff --git a/eval/src/tests/eval/value_codec/value_codec_test.cpp b/eval/src/tests/eval/value_codec/value_codec_test.cpp
index 434ad0b2a53..0bb1bcfb337 100644
--- a/eval/src/tests/eval/value_codec/value_codec_test.cpp
+++ b/eval/src/tests/eval/value_codec/value_codec_test.cpp
@@ -33,7 +33,8 @@ const std::vector<GenSpec> layouts = {
TEST(ValueCodecTest, simple_values_can_be_converted_from_and_to_tensor_spec) {
for (const auto &layout: layouts) {
for (CellType ct : CellTypeUtils::list_types()) {
- TensorSpec expect = layout.cpy().cells(ct);
+ auto expect = layout.cpy().cells(ct);
+ if (expect.bad_scalar()) continue;
std::unique_ptr<Value> value = value_from_spec(expect, factory);
TensorSpec actual = spec_from_value(*value);
EXPECT_EQ(actual, expect);
diff --git a/eval/src/tests/eval/value_type/value_type_test.cpp b/eval/src/tests/eval/value_type/value_type_test.cpp
index c1b25d48bf7..e0c90166fa2 100644
--- a/eval/src/tests/eval/value_type/value_type_test.cpp
+++ b/eval/src/tests/eval/value_type/value_type_test.cpp
@@ -36,15 +36,8 @@ TEST("require that DOUBLE value type can be created") {
EXPECT_EQUAL(t.dimensions().size(), 0u);
}
-TEST("require that FLOAT value type can be created") {
- ValueType t = ValueType::make_type(CellType::FLOAT, {});
- EXPECT_FALSE(t.is_error());
- EXPECT_TRUE(t.cell_type() == CellType::FLOAT);
- EXPECT_EQUAL(t.dimensions().size(), 0u);
-}
-
TEST("require that TENSOR value type can be created") {
- ValueType t = ValueType::tensor_type({{"x", 10},{"y"}});
+ ValueType t = ValueType::make_type(CellType::DOUBLE, {{"x", 10},{"y"}});
EXPECT_FALSE(t.is_error());
EXPECT_TRUE(t.cell_type() == CellType::DOUBLE);
ASSERT_EQUAL(t.dimensions().size(), 2u);
@@ -55,7 +48,7 @@ TEST("require that TENSOR value type can be created") {
}
TEST("require that float TENSOR value type can be created") {
- ValueType t = ValueType::tensor_type({{"x", 10},{"y"}}, CellType::FLOAT);
+ ValueType t = ValueType::make_type(CellType::FLOAT, {{"x", 10},{"y"}});
EXPECT_FALSE(t.is_error());
EXPECT_TRUE(t.cell_type() == CellType::FLOAT);
ASSERT_EQUAL(t.dimensions().size(), 2u);
@@ -66,7 +59,7 @@ TEST("require that float TENSOR value type can be created") {
}
TEST("require that TENSOR value type sorts dimensions") {
- ValueType t = ValueType::tensor_type({{"x", 10}, {"z", 30}, {"y"}});
+ ValueType t = ValueType::make_type(CellType::DOUBLE, {{"x", 10}, {"z", 30}, {"y"}});
EXPECT_FALSE(t.is_error());
EXPECT_TRUE(t.cell_type() == CellType::DOUBLE);
ASSERT_EQUAL(t.dimensions().size(), 3u);
@@ -78,19 +71,16 @@ TEST("require that TENSOR value type sorts dimensions") {
EXPECT_EQUAL(t.dimensions()[2].size, 30u);
}
-TEST("require that 'tensor<float>()' is normalized to 'double'") {
- ValueType t = ValueType::tensor_type({}, CellType::FLOAT);
- EXPECT_FALSE(t.is_error());
- EXPECT_TRUE(t.cell_type() == CellType::DOUBLE);
- EXPECT_EQUAL(t.dimensions().size(), 0u);
+TEST("require that non-double scalar values are not allowed") {
+ EXPECT_TRUE(ValueType::make_type(CellType::FLOAT, {}).is_error());
}
TEST("require that use of zero-size dimensions result in error types") {
- EXPECT_TRUE(ValueType::tensor_type({{"x", 0}}).is_error());
+ EXPECT_TRUE(ValueType::make_type(CellType::DOUBLE, {{"x", 0}}).is_error());
}
TEST("require that duplicate dimension names result in error types") {
- EXPECT_TRUE(ValueType::tensor_type({{"x"}, {"x"}}).is_error());
+ EXPECT_TRUE(ValueType::make_type(CellType::DOUBLE, {{"x"}, {"x"}}).is_error());
}
//-----------------------------------------------------------------------------
@@ -116,18 +106,17 @@ void verify_not_equal(const ValueType &a, const ValueType &b) {
TEST("require that value types can be compared") {
TEST_DO(verify_equal(ValueType::error_type(), ValueType::error_type()));
TEST_DO(verify_not_equal(ValueType::error_type(), ValueType::double_type()));
- TEST_DO(verify_not_equal(ValueType::error_type(), ValueType::tensor_type({{"x"}})));
+ TEST_DO(verify_not_equal(ValueType::error_type(), ValueType::make_type(CellType::DOUBLE, {{"x"}})));
TEST_DO(verify_equal(ValueType::double_type(), ValueType::double_type()));
- TEST_DO(verify_not_equal(ValueType::double_type(), ValueType::make_type(CellType::FLOAT, {})));
- TEST_DO(verify_equal(ValueType::double_type(), ValueType::tensor_type({})));
- TEST_DO(verify_not_equal(ValueType::double_type(), ValueType::tensor_type({{"x"}})));
- TEST_DO(verify_equal(ValueType::tensor_type({{"x"}, {"y"}}), ValueType::tensor_type({{"y"}, {"x"}})));
- TEST_DO(verify_not_equal(ValueType::tensor_type({{"x"}, {"y"}}), ValueType::tensor_type({{"x"}, {"y"}, {"z"}})));
- TEST_DO(verify_equal(ValueType::tensor_type({{"x", 10}, {"y", 20}}), ValueType::tensor_type({{"y", 20}, {"x", 10}})));
- TEST_DO(verify_not_equal(ValueType::tensor_type({{"x", 10}, {"y", 20}}), ValueType::tensor_type({{"x", 10}, {"y", 10}})));
- TEST_DO(verify_not_equal(ValueType::tensor_type({{"x", 10}}), ValueType::tensor_type({{"x"}})));
- TEST_DO(verify_equal(ValueType::tensor_type({{"x", 10}}, CellType::FLOAT), ValueType::tensor_type({{"x", 10}}, CellType::FLOAT)));
- TEST_DO(verify_not_equal(ValueType::tensor_type({{"x", 10}}, CellType::DOUBLE), ValueType::tensor_type({{"x", 10}}, CellType::FLOAT)));
+ TEST_DO(verify_equal(ValueType::double_type(), ValueType::make_type(CellType::DOUBLE, {})));
+ TEST_DO(verify_not_equal(ValueType::double_type(), ValueType::make_type(CellType::DOUBLE, {{"x"}})));
+ TEST_DO(verify_equal(ValueType::make_type(CellType::DOUBLE, {{"x"}, {"y"}}), ValueType::make_type(CellType::DOUBLE, {{"y"}, {"x"}})));
+ TEST_DO(verify_not_equal(ValueType::make_type(CellType::DOUBLE, {{"x"}, {"y"}}), ValueType::make_type(CellType::DOUBLE, {{"x"}, {"y"}, {"z"}})));
+ TEST_DO(verify_equal(ValueType::make_type(CellType::DOUBLE, {{"x", 10}, {"y", 20}}), ValueType::make_type(CellType::DOUBLE, {{"y", 20}, {"x", 10}})));
+ TEST_DO(verify_not_equal(ValueType::make_type(CellType::DOUBLE, {{"x", 10}, {"y", 20}}), ValueType::make_type(CellType::DOUBLE, {{"x", 10}, {"y", 10}})));
+ TEST_DO(verify_not_equal(ValueType::make_type(CellType::DOUBLE, {{"x", 10}}), ValueType::make_type(CellType::DOUBLE, {{"x"}})));
+ TEST_DO(verify_equal(ValueType::make_type(CellType::FLOAT, {{"x", 10}}), ValueType::make_type(CellType::FLOAT, {{"x", 10}})));
+ TEST_DO(verify_not_equal(ValueType::make_type(CellType::DOUBLE, {{"x", 10}}), ValueType::make_type(CellType::FLOAT, {{"x", 10}})));
}
//-----------------------------------------------------------------------------
@@ -135,46 +124,45 @@ TEST("require that value types can be compared") {
TEST("require that value type can make spec") {
EXPECT_EQUAL("error", ValueType::error_type().to_spec());
EXPECT_EQUAL("double", ValueType::double_type().to_spec());
- EXPECT_EQUAL("float", ValueType::make_type(CellType::FLOAT, {}).to_spec());
- EXPECT_EQUAL("double", ValueType::tensor_type({}).to_spec());
- EXPECT_EQUAL("double", ValueType::tensor_type({}, CellType::FLOAT).to_spec());
- EXPECT_EQUAL("tensor(x{})", ValueType::tensor_type({{"x"}}).to_spec());
- EXPECT_EQUAL("tensor(y[10])", ValueType::tensor_type({{"y", 10}}).to_spec());
- EXPECT_EQUAL("tensor(x{},y[10],z[5])", ValueType::tensor_type({{"x"}, {"y", 10}, {"z", 5}}).to_spec());
- EXPECT_EQUAL("tensor<float>(x{})", ValueType::tensor_type({{"x"}}, CellType::FLOAT).to_spec());
- EXPECT_EQUAL("tensor<float>(y[10])", ValueType::tensor_type({{"y", 10}}, CellType::FLOAT).to_spec());
- EXPECT_EQUAL("tensor<float>(x{},y[10],z[5])", ValueType::tensor_type({{"x"}, {"y", 10}, {"z", 5}}, CellType::FLOAT).to_spec());
+ EXPECT_EQUAL("error", ValueType::make_type(CellType::FLOAT, {}).to_spec());
+ EXPECT_EQUAL("double", ValueType::make_type(CellType::DOUBLE, {}).to_spec());
+ EXPECT_EQUAL("tensor(x{})", ValueType::make_type(CellType::DOUBLE, {{"x"}}).to_spec());
+ EXPECT_EQUAL("tensor(y[10])", ValueType::make_type(CellType::DOUBLE, {{"y", 10}}).to_spec());
+ EXPECT_EQUAL("tensor(x{},y[10],z[5])", ValueType::make_type(CellType::DOUBLE, {{"x"}, {"y", 10}, {"z", 5}}).to_spec());
+ EXPECT_EQUAL("tensor<float>(x{})", ValueType::make_type(CellType::FLOAT, {{"x"}}).to_spec());
+ EXPECT_EQUAL("tensor<float>(y[10])", ValueType::make_type(CellType::FLOAT, {{"y", 10}}).to_spec());
+ EXPECT_EQUAL("tensor<float>(x{},y[10],z[5])", ValueType::make_type(CellType::FLOAT, {{"x"}, {"y", 10}, {"z", 5}}).to_spec());
}
//-----------------------------------------------------------------------------
TEST("require that value type spec can be parsed") {
- EXPECT_EQUAL(ValueType::double_type(), ValueType::from_spec("double"));
- EXPECT_EQUAL(ValueType::make_type(CellType::FLOAT, {}), ValueType::from_spec("float"));
- EXPECT_EQUAL(ValueType::tensor_type({}), ValueType::from_spec("tensor()"));
- EXPECT_EQUAL(ValueType::tensor_type({{"x"}}), ValueType::from_spec("tensor(x{})"));
- EXPECT_EQUAL(ValueType::tensor_type({{"y", 10}}), ValueType::from_spec("tensor(y[10])"));
- EXPECT_EQUAL(ValueType::tensor_type({{"x"}, {"y", 10}, {"z", 5}}), ValueType::from_spec("tensor(x{},y[10],z[5])"));
- EXPECT_EQUAL(ValueType::tensor_type({{"y", 10}}), ValueType::from_spec("tensor<double>(y[10])"));
- EXPECT_EQUAL(ValueType::tensor_type({{"y", 10}}, CellType::FLOAT), ValueType::from_spec("tensor<float>(y[10])"));
+ EXPECT_EQUAL(ValueType::double_type(), type("double"));
+ EXPECT_EQUAL(ValueType::make_type(CellType::DOUBLE, {}), type("tensor()"));
+ EXPECT_EQUAL(ValueType::make_type(CellType::DOUBLE, {}), type("tensor<double>()"));
+ EXPECT_EQUAL(ValueType::make_type(CellType::DOUBLE, {{"x"}}), type("tensor(x{})"));
+ EXPECT_EQUAL(ValueType::make_type(CellType::DOUBLE, {{"y", 10}}), type("tensor(y[10])"));
+ EXPECT_EQUAL(ValueType::make_type(CellType::DOUBLE, {{"x"}, {"y", 10}, {"z", 5}}), type("tensor(x{},y[10],z[5])"));
+ EXPECT_EQUAL(ValueType::make_type(CellType::DOUBLE, {{"y", 10}}), type("tensor<double>(y[10])"));
+ EXPECT_EQUAL(ValueType::make_type(CellType::FLOAT, {{"y", 10}}), type("tensor<float>(y[10])"));
}
TEST("require that value type spec can be parsed with extra whitespace") {
- EXPECT_EQUAL(ValueType::double_type(), ValueType::from_spec(" double "));
- EXPECT_EQUAL(ValueType::make_type(CellType::FLOAT, {}), ValueType::from_spec(" float "));
- EXPECT_EQUAL(ValueType::tensor_type({}), ValueType::from_spec(" tensor ( ) "));
- EXPECT_EQUAL(ValueType::tensor_type({{"x"}}), ValueType::from_spec(" tensor ( x { } ) "));
- EXPECT_EQUAL(ValueType::tensor_type({{"y", 10}}), ValueType::from_spec(" tensor ( y [ 10 ] ) "));
- EXPECT_EQUAL(ValueType::tensor_type({{"x"}, {"y", 10}, {"z", 5}}),
- ValueType::from_spec(" tensor ( x { } , y [ 10 ] , z [ 5 ] ) "));
- EXPECT_EQUAL(ValueType::tensor_type({{"y", 10}}), ValueType::from_spec(" tensor < double > ( y [ 10 ] ) "));
- EXPECT_EQUAL(ValueType::tensor_type({{"y", 10}}, CellType::FLOAT), ValueType::from_spec(" tensor < float > ( y [ 10 ] ) "));
+ EXPECT_EQUAL(ValueType::double_type(), type(" double "));
+ EXPECT_EQUAL(ValueType::make_type(CellType::DOUBLE, {}), type(" tensor ( ) "));
+ EXPECT_EQUAL(ValueType::make_type(CellType::DOUBLE, {}), type(" tensor < double > ( ) "));
+ EXPECT_EQUAL(ValueType::make_type(CellType::DOUBLE, {{"x"}}), type(" tensor ( x { } ) "));
+ EXPECT_EQUAL(ValueType::make_type(CellType::DOUBLE, {{"y", 10}}), type(" tensor ( y [ 10 ] ) "));
+ EXPECT_EQUAL(ValueType::make_type(CellType::DOUBLE, {{"x"}, {"y", 10}, {"z", 5}}),
+ type(" tensor ( x { } , y [ 10 ] , z [ 5 ] ) "));
+ EXPECT_EQUAL(ValueType::make_type(CellType::DOUBLE, {{"y", 10}}), type(" tensor < double > ( y [ 10 ] ) "));
+ EXPECT_EQUAL(ValueType::make_type(CellType::FLOAT, {{"y", 10}}), type(" tensor < float > ( y [ 10 ] ) "));
}
TEST("require that the unsorted dimension list can be obtained when parsing type spec") {
std::vector<ValueType::Dimension> unsorted;
auto type = ValueType::from_spec("tensor(y[10],z[5],x{})", unsorted);
- EXPECT_EQUAL(ValueType::tensor_type({{"x"}, {"y", 10}, {"z", 5}}), type);
+ EXPECT_EQUAL(ValueType::make_type(CellType::DOUBLE, {{"x"}, {"y", 10}, {"z", 5}}), type);
ASSERT_EQUAL(unsorted.size(), 3u);
EXPECT_EQUAL(unsorted[0].name, "y");
EXPECT_EQUAL(unsorted[0].size, 10u);
@@ -207,6 +195,7 @@ TEST("require that malformed value type spec is parsed as error") {
EXPECT_TRUE(ValueType::from_spec(" ").is_error());
EXPECT_TRUE(ValueType::from_spec("error").is_error());
EXPECT_TRUE(ValueType::from_spec("any").is_error());
+ EXPECT_TRUE(ValueType::from_spec("float").is_error());
EXPECT_TRUE(ValueType::from_spec("tensor").is_error());
EXPECT_TRUE(ValueType::from_spec("tensor<double>").is_error());
EXPECT_TRUE(ValueType::from_spec("tensor() tensor()").is_error());
@@ -224,7 +213,8 @@ TEST("require that malformed value type spec is parsed as error") {
EXPECT_TRUE(ValueType::from_spec("tensor(x{},x[10])").is_error());
EXPECT_TRUE(ValueType::from_spec("tensor(x{},x[])").is_error());
EXPECT_TRUE(ValueType::from_spec("tensor(z[])").is_error());
- EXPECT_TRUE(ValueType::from_spec("tensor<float16>(x[10])").is_error());
+ EXPECT_TRUE(ValueType::from_spec("tensor<float>()").is_error());
+ EXPECT_TRUE(ValueType::from_spec("tensor<int7>(x[10])").is_error());
}
struct ParseResult {
@@ -247,7 +237,7 @@ ParseResult::~ParseResult() = default;
TEST("require that we can parse a partial string into a type with the low-level API") {
ParseResult result("tensor(a[5]) , ");
- EXPECT_EQUAL(result.type, ValueType::tensor_type({{"a", 5}}));
+ EXPECT_EQUAL(result.type, ValueType::make_type(CellType::DOUBLE, {{"a", 5}}));
ASSERT_TRUE(result.after_inside());
EXPECT_EQUAL(*result.after, ',');
}
@@ -315,7 +305,7 @@ void verify_predicates(const ValueType &type,
{
EXPECT_EQUAL(type.is_error(), expect_error);
EXPECT_EQUAL(type.is_double(), expect_double);
- EXPECT_EQUAL(type.is_tensor(), expect_tensor);
+ EXPECT_EQUAL(type.has_dimensions(), expect_tensor);
EXPECT_EQUAL(type.is_sparse(), expect_sparse);
EXPECT_EQUAL(type.is_dense(), expect_dense);
}
@@ -507,8 +497,12 @@ void verify_cell_cast(const ValueType &type) {
if (type.is_error()) {
EXPECT_TRUE(res_type.is_error());
EXPECT_EQUAL(res_type, type);
- } else if (type.is_scalar()) {
- EXPECT_TRUE(res_type.is_double()); // NB
+ } else if (type.is_double()) {
+ if (cell_type == CellType::DOUBLE) {
+ EXPECT_TRUE(res_type.is_double());
+ } else {
+ EXPECT_TRUE(res_type.is_error());
+ }
} else {
EXPECT_FALSE(res_type.is_error());
EXPECT_EQUAL(int(res_type.cell_type()), int(cell_type));
@@ -519,7 +513,6 @@ void verify_cell_cast(const ValueType &type) {
TEST("require that value type cell cast works correctly") {
TEST_DO(verify_cell_cast(type("error")));
- TEST_DO(verify_cell_cast(type("float")));
TEST_DO(verify_cell_cast(type("double")));
TEST_DO(verify_cell_cast(type("tensor<float>(x[10])")));
TEST_DO(verify_cell_cast(type("tensor<double>(x[10])")));
@@ -548,4 +541,24 @@ TEST("require that cell type name recognition is strict") {
EXPECT_FALSE(value_type::cell_type_from_name("").has_value());
}
+TEST("require that map type inference works as expected") {
+ EXPECT_EQUAL(type("error").map(), type("error"));
+ EXPECT_EQUAL(type("double").map(), type("double"));
+ EXPECT_EQUAL(type("tensor(x[10])").map(), type("tensor(x[10])"));
+ EXPECT_EQUAL(type("tensor<float>(x{})").map(), type("tensor<float>(x{})"));
+}
+
+TEST("require that peek type inference works as expected") {
+ auto input1 = type("tensor(a[2],b{},c[3],d{},e[5])");
+ auto input2 = type("tensor<float>(a[2],b{},c[3],d{},e[5])");
+ EXPECT_EQUAL(type("error").peek({}), type("error"));
+ EXPECT_EQUAL(type("double").peek({}), type("error"));
+ EXPECT_EQUAL(input1.peek({}), type("error"));
+ EXPECT_EQUAL(input1.peek({"x"}), type("error"));
+ EXPECT_EQUAL(input1.peek({"a", "c", "e"}), type("tensor(b{},d{})"));
+ EXPECT_EQUAL(input2.peek({"b", "d"}), type("tensor<float>(a[2],c[3],e[5])"));
+ EXPECT_EQUAL(input1.peek({"a", "b", "c", "d", "e"}), type("double"));
+ EXPECT_EQUAL(input2.peek({"a", "b", "c", "d", "e"}), type("double"));
+}
+
TEST_MAIN() { TEST_RUN_ALL(); }
diff --git a/eval/src/tests/instruction/generic_cell_cast/generic_cell_cast_test.cpp b/eval/src/tests/instruction/generic_cell_cast/generic_cell_cast_test.cpp
index eb156bbe531..95db66a3b13 100644
--- a/eval/src/tests/instruction/generic_cell_cast/generic_cell_cast_test.cpp
+++ b/eval/src/tests/instruction/generic_cell_cast/generic_cell_cast_test.cpp
@@ -47,10 +47,13 @@ void test_generic_cell_cast_with(const ValueBuilderFactory &factory) {
for (const auto &layout : layouts) {
for (CellType in_type: CellTypeUtils::list_types()) {
for (CellType out_type: CellTypeUtils::list_types()) {
- TensorSpec lhs = layout.cpy().cells(in_type);
- SCOPED_TRACE(fmt("\n===\nLHS: %s\n===\n", lhs.to_string().c_str()));
+ auto lhs = layout.cpy().cells(in_type);
+ auto gen_expect = layout.cpy().cells(out_type);
+ if (lhs.bad_scalar() || gen_expect.bad_scalar()) continue;
+ SCOPED_TRACE(fmt("\n===\nLHS: %s\n===\n", lhs.gen().to_string().c_str()));
auto expect = ReferenceOperations::cell_cast(lhs, out_type);
auto actual = perform_generic_cell_cast(lhs, out_type, factory);
+ EXPECT_EQ(expect, gen_expect);
EXPECT_EQ(actual, expect);
}
}
diff --git a/eval/src/tests/instruction/generic_concat/generic_concat_test.cpp b/eval/src/tests/instruction/generic_concat/generic_concat_test.cpp
index 6b6a803a4b1..a74b0f99841 100644
--- a/eval/src/tests/instruction/generic_concat/generic_concat_test.cpp
+++ b/eval/src/tests/instruction/generic_concat/generic_concat_test.cpp
@@ -80,10 +80,12 @@ void test_generic_concat_with(const ValueBuilderFactory &factory) {
const auto l = concat_layouts[i];
const auto r = concat_layouts[i+1].cpy().seq(N_16ths);
for (CellType lct : CellTypeUtils::list_types()) {
- TensorSpec lhs = l.cpy().cells(lct);
+ auto lhs = l.cpy().cells(lct);
+ if (lhs.bad_scalar()) continue;
for (CellType rct : CellTypeUtils::list_types()) {
- TensorSpec rhs = r.cpy().cells(rct);
- SCOPED_TRACE(fmt("\n===\nin LHS: %s\nin RHS: %s\n===\n", lhs.to_string().c_str(), rhs.to_string().c_str()));
+ auto rhs = r.cpy().cells(rct);
+ if (rhs.bad_scalar()) continue;
+ SCOPED_TRACE(fmt("\n===\nin LHS: %s\nin RHS: %s\n===\n", lhs.gen().to_string().c_str(), rhs.gen().to_string().c_str()));
auto actual = perform_generic_concat(lhs, rhs, "y", factory);
auto expect = ReferenceOperations::concat(lhs, rhs, "y");
EXPECT_EQ(actual, expect);
diff --git a/eval/src/tests/instruction/generic_create/generic_create_test.cpp b/eval/src/tests/instruction/generic_create/generic_create_test.cpp
index 843a292612d..9389c8401e9 100644
--- a/eval/src/tests/instruction/generic_create/generic_create_test.cpp
+++ b/eval/src/tests/instruction/generic_create/generic_create_test.cpp
@@ -92,7 +92,7 @@ TensorSpec perform_generic_create(const TensorSpec &a, const ValueBuilderFactory
void test_generic_create_with(const ValueBuilderFactory &factory) {
for (const auto &layout : create_layouts) {
for (CellType ct : CellTypeUtils::list_types()) {
- TensorSpec full = layout.cpy().cells(ct);
+ auto full = layout.cpy().cells(ct);
auto actual = perform_generic_create(full, factory);
auto expect = reference_create(full);
EXPECT_EQ(actual, expect);
diff --git a/eval/src/tests/instruction/generic_join/generic_join_test.cpp b/eval/src/tests/instruction/generic_join/generic_join_test.cpp
index 114881e6bee..a4f645c5dee 100644
--- a/eval/src/tests/instruction/generic_join/generic_join_test.cpp
+++ b/eval/src/tests/instruction/generic_join/generic_join_test.cpp
@@ -109,11 +109,13 @@ TEST(GenericJoinTest, generic_join_works_for_simple_and_fast_values) {
const auto &l = join_layouts[i];
const auto &r = join_layouts[i+1];
for (CellType lct : CellTypeUtils::list_types()) {
- TensorSpec lhs = l.cpy().cells(lct);
+ auto lhs = l.cpy().cells(lct);
+ if (lhs.bad_scalar()) continue;
for (CellType rct : CellTypeUtils::list_types()) {
- TensorSpec rhs = r.cpy().cells(rct);
+ auto rhs = r.cpy().cells(rct);
+ if (rhs.bad_scalar()) continue;
for (auto fun: {operation::Add::f, operation::Sub::f, operation::Mul::f, operation::Div::f}) {
- SCOPED_TRACE(fmt("\n===\nLHS: %s\nRHS: %s\n===\n", lhs.to_string().c_str(), rhs.to_string().c_str()));
+ SCOPED_TRACE(fmt("\n===\nLHS: %s\nRHS: %s\n===\n", lhs.gen().to_string().c_str(), rhs.gen().to_string().c_str()));
auto expect = ReferenceOperations::join(lhs, rhs, fun);
auto simple = perform_generic_join(lhs, rhs, fun, SimpleValueBuilderFactory::get());
auto fast = perform_generic_join(lhs, rhs, fun, FastValueBuilderFactory::get());
diff --git a/eval/src/tests/instruction/generic_map/generic_map_test.cpp b/eval/src/tests/instruction/generic_map/generic_map_test.cpp
index 56405eefdde..bfa7154968d 100644
--- a/eval/src/tests/instruction/generic_map/generic_map_test.cpp
+++ b/eval/src/tests/instruction/generic_map/generic_map_test.cpp
@@ -36,8 +36,7 @@ const std::vector<GenSpec> map_layouts = {
TensorSpec perform_generic_map(const TensorSpec &a, map_fun_t func, const ValueBuilderFactory &factory)
{
auto lhs = value_from_spec(a, factory);
- // XXX for now:
- auto res_type = lhs->type();
+ auto res_type = lhs->type().map();
auto my_op = GenericMap::make_instruction(res_type, lhs->type(), func);
InterpretedFunction::EvalSingle single(factory, my_op);
return spec_from_value(single.eval(std::vector<Value::CREF>({*lhs})));
@@ -46,9 +45,10 @@ TensorSpec perform_generic_map(const TensorSpec &a, map_fun_t func, const ValueB
void test_generic_map_with(const ValueBuilderFactory &factory) {
for (const auto &layout : map_layouts) {
for (CellType ct : CellTypeUtils::list_types()) {
- TensorSpec lhs = layout.cpy().cells(ct);
+ auto lhs = layout.cpy().cells(ct);
+ if (lhs.bad_scalar()) continue;
for (auto func : {operation::Floor::f, operation::Fabs::f, operation::Square::f, operation::Inv::f}) {
- SCOPED_TRACE(fmt("\n===\nLHS: %s\n===\n", lhs.to_string().c_str()));
+ SCOPED_TRACE(fmt("\n===\nLHS: %s\n===\n", lhs.gen().to_string().c_str()));
auto expect = ReferenceOperations::map(lhs, func);
auto actual = perform_generic_map(lhs, func, factory);
EXPECT_EQ(actual, expect);
diff --git a/eval/src/tests/instruction/generic_merge/generic_merge_test.cpp b/eval/src/tests/instruction/generic_merge/generic_merge_test.cpp
index 9fde59a7c86..701fb26d3ff 100644
--- a/eval/src/tests/instruction/generic_merge/generic_merge_test.cpp
+++ b/eval/src/tests/instruction/generic_merge/generic_merge_test.cpp
@@ -52,10 +52,12 @@ void test_generic_merge_with(const ValueBuilderFactory &factory) {
const auto l = merge_layouts[i];
const auto r = merge_layouts[i+1].cpy().seq(N_16ths);
for (CellType lct : CellTypeUtils::list_types()) {
- TensorSpec lhs = l.cpy().cells(lct);
+ auto lhs = l.cpy().cells(lct);
+ if (lhs.bad_scalar()) continue;
for (CellType rct : CellTypeUtils::list_types()) {
- TensorSpec rhs = r.cpy().cells(rct);
- SCOPED_TRACE(fmt("\n===\nLHS: %s\nRHS: %s\n===\n", lhs.to_string().c_str(), rhs.to_string().c_str()));
+ auto rhs = r.cpy().cells(rct);
+ if (rhs.bad_scalar()) continue;
+ SCOPED_TRACE(fmt("\n===\nLHS: %s\nRHS: %s\n===\n", lhs.gen().to_string().c_str(), rhs.gen().to_string().c_str()));
for (auto fun: {operation::Add::f, operation::Mul::f, operation::Sub::f, operation::Max::f}) {
auto expect = ReferenceOperations::merge(lhs, rhs, fun);
auto actual = perform_generic_merge(lhs, rhs, fun, factory);
diff --git a/eval/src/tests/instruction/generic_peek/generic_peek_test.cpp b/eval/src/tests/instruction/generic_peek/generic_peek_test.cpp
index 073df8be7e9..4b773b07734 100644
--- a/eval/src/tests/instruction/generic_peek/generic_peek_test.cpp
+++ b/eval/src/tests/instruction/generic_peek/generic_peek_test.cpp
@@ -150,7 +150,7 @@ void verify_peek_equal(const TensorSpec &input,
reduce_dims.push_back(dim_name);
}
if (reduce_dims.empty()) return;
- ValueType result_type = param_type.reduce(reduce_dims);
+ ValueType result_type = param_type.peek(reduce_dims);
auto expect = reference_peek(input, spec);
SCOPED_TRACE(fmt("peek input: %s\n peek spec: %s\n peek result %s\n",
input.to_string().c_str(),
@@ -195,8 +195,8 @@ void fill_dims_and_check(const TensorSpec &input,
void test_generic_peek_with(const ValueBuilderFactory &factory) {
for (const auto &layout : peek_layouts) {
for (CellType ct : CellTypeUtils::list_types()) {
- TensorSpec input = layout.cpy().cells(ct);
- ValueType input_type = ValueType::from_spec(input.type());
+ auto input = layout.cpy().cells(ct);
+ ValueType input_type = input.type();
const auto &dims = input_type.dimensions();
PeekSpec spec;
fill_dims_and_check(input, spec, dims, factory);
diff --git a/eval/src/tests/instruction/generic_reduce/generic_reduce_test.cpp b/eval/src/tests/instruction/generic_reduce/generic_reduce_test.cpp
index 3babe80766a..e3eea84fdea 100644
--- a/eval/src/tests/instruction/generic_reduce/generic_reduce_test.cpp
+++ b/eval/src/tests/instruction/generic_reduce/generic_reduce_test.cpp
@@ -72,8 +72,9 @@ TEST(GenericReduceTest, sparse_reduce_plan_can_be_created) {
void test_generic_reduce_with(const ValueBuilderFactory &factory) {
for (const auto &layout: layouts) {
for (CellType ct : CellTypeUtils::list_types()) {
- TensorSpec input = layout.cpy().cells(ct);
- SCOPED_TRACE(fmt("tensor type: %s, num_cells: %zu", input.type().c_str(), input.cells().size()));
+ auto input = layout.cpy().cells(ct);
+ if (input.bad_scalar()) continue;
+ SCOPED_TRACE(fmt("tensor type: %s, num_cells: %zu", input.gen().type().c_str(), input.gen().cells().size()));
for (Aggr aggr: {Aggr::SUM, Aggr::AVG, Aggr::MIN, Aggr::MAX}) {
SCOPED_TRACE(fmt("aggregator: %s", AggrNames::name_of(aggr)->c_str()));
auto t = layout.type();
diff --git a/eval/src/tests/instruction/generic_rename/generic_rename_test.cpp b/eval/src/tests/instruction/generic_rename/generic_rename_test.cpp
index 4edf2a0ca87..ca14149f1ff 100644
--- a/eval/src/tests/instruction/generic_rename/generic_rename_test.cpp
+++ b/eval/src/tests/instruction/generic_rename/generic_rename_test.cpp
@@ -112,13 +112,13 @@ TensorSpec perform_generic_rename(const TensorSpec &a,
void test_generic_rename_with(const ValueBuilderFactory &factory) {
for (const auto &layout : rename_layouts) {
for (CellType ct : CellTypeUtils::list_types()) {
- TensorSpec lhs = layout.cpy().cells(ct);
- ValueType lhs_type = ValueType::from_spec(lhs.type());
+ auto lhs = layout.cpy().cells(ct);
+ ValueType lhs_type = lhs.type();
for (const auto & from_to : rename_from_to) {
ValueType renamed_type = lhs_type.rename(from_to.from, from_to.to);
if (renamed_type.is_error()) continue;
// printf("type %s -> %s\n", lhs_type.to_spec().c_str(), renamed_type.to_spec().c_str());
- SCOPED_TRACE(fmt("\n===\nLHS: %s\n===\n", lhs.to_string().c_str()));
+ SCOPED_TRACE(fmt("\n===\nLHS: %s\n===\n", lhs.gen().to_string().c_str()));
auto expect = ReferenceOperations::rename(lhs, from_to.from, from_to.to);
auto actual = perform_generic_rename(lhs, from_to, factory);
EXPECT_EQ(actual, expect);
diff --git a/eval/src/tests/streamed/value/streamed_value_test.cpp b/eval/src/tests/streamed/value/streamed_value_test.cpp
index a750ee88667..bb286dbfdc8 100644
--- a/eval/src/tests/streamed/value/streamed_value_test.cpp
+++ b/eval/src/tests/streamed/value/streamed_value_test.cpp
@@ -69,7 +69,8 @@ TensorSpec streamed_value_join(const TensorSpec &a, const TensorSpec &b, join_fu
TEST(StreamedValueTest, streamed_values_can_be_converted_from_and_to_tensor_spec) {
for (const auto &layout: layouts) {
for (CellType ct : CellTypeUtils::list_types()) {
- TensorSpec expect = layout.cpy().cells(ct);
+ auto expect = layout.cpy().cells(ct);
+ if (expect.bad_scalar()) continue;
std::unique_ptr<Value> value = value_from_spec(expect, StreamedValueBuilderFactory::get());
TensorSpec actual = spec_from_value(*value);
EXPECT_EQ(actual, expect);
@@ -80,7 +81,8 @@ TEST(StreamedValueTest, streamed_values_can_be_converted_from_and_to_tensor_spec
TEST(StreamedValueTest, streamed_values_can_be_copied) {
for (const auto &layout: layouts) {
for (CellType ct : CellTypeUtils::list_types()) {
- TensorSpec expect = layout.cpy().cells(ct);
+ auto expect = layout.cpy().cells(ct);
+ if (expect.bad_scalar()) continue;
std::unique_ptr<Value> value = value_from_spec(expect, StreamedValueBuilderFactory::get());
std::unique_ptr<Value> copy = StreamedValueBuilderFactory::get().copy(*value);
TensorSpec actual = spec_from_value(*copy);
@@ -131,11 +133,13 @@ TEST(StreamedValueTest, new_generic_join_works_for_streamed_values) {
const auto l = join_layouts[i].cpy().seq(N_16ths);
const auto r = join_layouts[i + 1].cpy().seq(N_16ths);
for (CellType lct : CellTypeUtils::list_types()) {
- TensorSpec lhs = l.cpy().cells(lct);
+ auto lhs = l.cpy().cells(lct);
+ if (lhs.bad_scalar()) continue;
for (CellType rct : CellTypeUtils::list_types()) {
- TensorSpec rhs = r.cpy().cells(rct);
+ auto rhs = r.cpy().cells(rct);
+ if (rhs.bad_scalar()) continue;
for (auto fun: {operation::Add::f, operation::Sub::f, operation::Mul::f, operation::Max::f}) {
- SCOPED_TRACE(fmt("\n===\nLHS: %s\nRHS: %s\n===\n", lhs.to_string().c_str(), rhs.to_string().c_str()));
+ SCOPED_TRACE(fmt("\n===\nLHS: %s\nRHS: %s\n===\n", lhs.gen().to_string().c_str(), rhs.gen().to_string().c_str()));
auto expect = ReferenceOperations::join(lhs, rhs, fun);
auto actual = streamed_value_join(lhs, rhs, fun);
EXPECT_EQ(actual, expect);
diff --git a/eval/src/vespa/eval/eval/fast_value.cpp b/eval/src/vespa/eval/eval/fast_value.cpp
index 4b39fc48d9b..07d76bb6c97 100644
--- a/eval/src/vespa/eval/eval/fast_value.cpp
+++ b/eval/src/vespa/eval/eval/fast_value.cpp
@@ -15,8 +15,8 @@ struct CreateFastValueBuilderBase {
size_t num_mapped_dims, size_t subspace_size, size_t expected_subspaces)
{
assert(check_cell_type<T>(type.cell_type()));
- if (type.is_scalar()) {
- return std::make_unique<FastScalarBuilder<T>>();
+ if (type.is_double()) {
+ return std::make_unique<FastDoubleValueBuilder>();
} else if (num_mapped_dims == 0) {
return std::make_unique<FastDenseValue<T>>(type, subspace_size);
} else {
diff --git a/eval/src/vespa/eval/eval/fast_value.hpp b/eval/src/vespa/eval/eval/fast_value.hpp
index 69a496e9bff..d944377efc1 100644
--- a/eval/src/vespa/eval/eval/fast_value.hpp
+++ b/eval/src/vespa/eval/eval/fast_value.hpp
@@ -332,12 +332,11 @@ template <typename T> FastDenseValue<T>::~FastDenseValue() = default;
//-----------------------------------------------------------------------------
-template <typename T>
-struct FastScalarBuilder final : ValueBuilder<T> {
- T _value;
- ArrayRef<T> add_subspace(ConstArrayRef<vespalib::stringref>) final override { return ArrayRef<T>(&_value, 1); }
- ArrayRef<T> add_subspace(ConstArrayRef<string_id>) final override { return ArrayRef<T>(&_value, 1); };
- std::unique_ptr<Value> build(std::unique_ptr<ValueBuilder<T>>) final override { return std::make_unique<ScalarValue<T>>(_value); }
+struct FastDoubleValueBuilder final : ValueBuilder<double> {
+ double _value;
+ ArrayRef<double> add_subspace(ConstArrayRef<vespalib::stringref>) final override { return ArrayRef<double>(&_value, 1); }
+ ArrayRef<double> add_subspace(ConstArrayRef<string_id>) final override { return ArrayRef<double>(&_value, 1); };
+ std::unique_ptr<Value> build(std::unique_ptr<ValueBuilder<double>>) final override { return std::make_unique<DoubleValue>(_value); }
};
//-----------------------------------------------------------------------------
diff --git a/eval/src/vespa/eval/eval/function.cpp b/eval/src/vespa/eval/eval/function.cpp
index b03c2c1ed24..580a9b120d5 100644
--- a/eval/src/vespa/eval/eval/function.cpp
+++ b/eval/src/vespa/eval/eval/function.cpp
@@ -777,7 +777,7 @@ bool maybe_parse_tensor_generator(ParseContext &ctx) {
ctx.restore_input_mark(my_mark);
return false;
}
- bool is_create = (type.is_tensor() && (ctx.get() == ':'));
+ bool is_create = (type.has_dimensions() && (ctx.get() == ':'));
bool is_lambda = (type.is_dense() && (ctx.get() == '('));
if (is_create) {
parse_tensor_create(ctx, type, dim_list);
diff --git a/eval/src/vespa/eval/eval/node_types.cpp b/eval/src/vespa/eval/eval/node_types.cpp
index 2df22d5433c..63da6d79c6f 100644
--- a/eval/src/vespa/eval/eval/node_types.cpp
+++ b/eval/src/vespa/eval/eval/node_types.cpp
@@ -109,7 +109,7 @@ struct TypeResolver : public NodeVisitor, public NodeTraverser {
}
void resolve_op1(const Node &node) {
- bind(type(node.get_child(0)), node);
+ bind(type(node.get_child(0)).map(), node);
}
void resolve_op2(const Node &node) {
@@ -234,7 +234,7 @@ struct TypeResolver : public NodeVisitor, public NodeTraverser {
}
}
}
- bind(param_type.reduce(dimensions), node);
+ bind(param_type.peek(dimensions), node);
}
void visit(const Add &node) override { resolve_op2(node); }
void visit(const Sub &node) override { resolve_op2(node); }
diff --git a/eval/src/vespa/eval/eval/tensor_function.cpp b/eval/src/vespa/eval/eval/tensor_function.cpp
index a93c1dea83e..5b8a99d25a0 100644
--- a/eval/src/vespa/eval/eval/tensor_function.cpp
+++ b/eval/src/vespa/eval/eval/tensor_function.cpp
@@ -447,7 +447,7 @@ const TensorFunction &reduce(const TensorFunction &child, Aggr aggr, const std::
}
const TensorFunction &map(const TensorFunction &child, map_fun_t function, Stash &stash) {
- ValueType result_type = child.result_type();
+ ValueType result_type = child.result_type().map();
return stash.create<Map>(result_type, child, function);
}
@@ -485,7 +485,7 @@ const TensorFunction &peek(const TensorFunction &param, const std::map<vespalib:
dimensions.push_back(dim_spec.first);
}
assert(!dimensions.empty());
- ValueType result_type = param.result_type().reduce(dimensions);
+ ValueType result_type = param.result_type().peek(dimensions);
return stash.create<Peek>(result_type, param, spec);
}
diff --git a/eval/src/vespa/eval/eval/test/gen_spec.cpp b/eval/src/vespa/eval/eval/test/gen_spec.cpp
index 2f28f559051..a308cbeff46 100644
--- a/eval/src/vespa/eval/eval/test/gen_spec.cpp
+++ b/eval/src/vespa/eval/eval/test/gen_spec.cpp
@@ -60,6 +60,12 @@ GenSpec &GenSpec::operator=(const GenSpec &other) = default;
GenSpec::~GenSpec() = default;
+bool
+GenSpec::bad_scalar() const
+{
+ return (_dims.empty() && (_cells != CellType::DOUBLE));
+}
+
ValueType
GenSpec::type() const
{
@@ -67,7 +73,7 @@ GenSpec::type() const
for (const auto &dim: _dims) {
dim_types.push_back(dim.type());
}
- auto type = ValueType::tensor_type(dim_types, _cells);
+ auto type = ValueType::make_type(_cells, dim_types);
assert(!type.is_error());
return type;
}
@@ -77,6 +83,7 @@ GenSpec::gen() const
{
size_t idx = 0;
TensorSpec::Address addr;
+ assert(!bad_scalar());
TensorSpec result(type().to_spec());
std::function<void(size_t)> add_cells = [&](size_t dim_idx) {
if (dim_idx == _dims.size()) {
diff --git a/eval/src/vespa/eval/eval/test/gen_spec.h b/eval/src/vespa/eval/eval/test/gen_spec.h
index b3a3916967b..bbc1663a11f 100644
--- a/eval/src/vespa/eval/eval/test/gen_spec.h
+++ b/eval/src/vespa/eval/eval/test/gen_spec.h
@@ -128,6 +128,7 @@ public:
_seq = seq_in;
return *this;
}
+ bool bad_scalar() const;
ValueType type() const;
TensorSpec gen() const;
operator TensorSpec() const { return gen(); }
diff --git a/eval/src/vespa/eval/eval/test/tensor_conformance.cpp b/eval/src/vespa/eval/eval/test/tensor_conformance.cpp
index 117e8c9b149..17ad75ae455 100644
--- a/eval/src/vespa/eval/eval/test/tensor_conformance.cpp
+++ b/eval/src/vespa/eval/eval/test/tensor_conformance.cpp
@@ -560,8 +560,10 @@ struct TestContext {
void test_cell_cast(const GenSpec &a) {
for (CellType cell_type: CellTypeUtils::list_types()) {
+ auto expect = a.cpy().cells(cell_type);
+ if (expect.bad_scalar()) continue;
vespalib::string expr = fmt("cell_cast(a,%s)", value_type::cell_type_to_name(cell_type).c_str());
- TEST_DO(verify_result(factory, expr, {a}, a.cpy().cells(cell_type)));
+ TEST_DO(verify_result(factory, expr, {a}, expect));
}
}
@@ -570,8 +572,8 @@ struct TestContext {
for (CellType cell_type: CellTypeUtils::list_types()) {
gen_list.push_back(GenSpec(-3).cells(cell_type));
}
+ TEST_DO(test_cell_cast(GenSpec(42)));
for (const auto &gen: gen_list) {
- TEST_DO(test_cell_cast(gen));
TEST_DO(test_cell_cast(gen.cpy().idx("x", 10)));
TEST_DO(test_cell_cast(gen.cpy().map("x", 10, 1)));
TEST_DO(test_cell_cast(gen.cpy().map("x", 4, 1).idx("y", 4)));
diff --git a/eval/src/vespa/eval/eval/value.cpp b/eval/src/vespa/eval/eval/value.cpp
index 9cd5ef45765..b799658cfae 100644
--- a/eval/src/vespa/eval/eval/value.cpp
+++ b/eval/src/vespa/eval/eval/value.cpp
@@ -58,11 +58,7 @@ Value::as_double() const
return typify_invoke<1,TypifyCellType,MySum>(type().cell_type(), cells());
}
-template <typename T>
-ValueType ScalarValue<T>::_type = ValueType::make_type(get_cell_type<T>(), {});
-
-template class ScalarValue<double>;
-template class ScalarValue<float>;
+ValueType DoubleValue::_type = ValueType::double_type();
namespace {
diff --git a/eval/src/vespa/eval/eval/value.h b/eval/src/vespa/eval/eval/value.h
index ee850cdd47e..fcdaa7131c7 100644
--- a/eval/src/vespa/eval/eval/value.h
+++ b/eval/src/vespa/eval/eval/value.h
@@ -22,7 +22,6 @@ struct Value {
virtual const ValueType &type() const = 0;
virtual ~Value() {}
-// ---- new interface enabling separation of values and operations
// Root lookup structure for mapping labels to dense subspace indexes
struct Index {
@@ -59,16 +58,9 @@ struct Value {
};
virtual TypedCells cells() const = 0;
virtual const Index &index() const = 0;
-// --- end of new interface
-
virtual MemoryUsage get_memory_usage() const = 0;
-
-// --- old interface that may be (partially) removed in the future
- virtual bool is_double() const { return type().is_double(); }
- virtual bool is_tensor() const { return type().is_tensor(); }
virtual double as_double() const;
bool as_bool() const { return (as_double() != 0.0); }
-// --- end of old interface
};
/**
@@ -84,28 +76,21 @@ public:
std::unique_ptr<View> create_view(ConstArrayRef<size_t> dims) const override;
};
-template <typename T>
-class ScalarValue final : public Value
+class DoubleValue final : public Value
{
private:
- T _value;
+ double _value;
static ValueType _type;
public:
- ScalarValue(T value) : _value(value) {}
- TypedCells cells() const final override { return TypedCells(ConstArrayRef<T>(&_value, 1)); }
+ DoubleValue(double value) : _value(value) {}
+ TypedCells cells() const final override { return TypedCells(ConstArrayRef<double>(&_value, 1)); }
const Index &index() const final override { return TrivialIndex::get(); }
- MemoryUsage get_memory_usage() const final override { return self_memory_usage<ScalarValue<T>>(); }
- bool is_double() const final override { return std::is_same_v<T,double>; }
+ MemoryUsage get_memory_usage() const final override { return self_memory_usage<DoubleValue>(); }
double as_double() const final override { return _value; }
const ValueType &type() const final override { return _type; }
static const ValueType &shared_type() { return _type; }
};
-extern template class ScalarValue<double>;
-extern template class ScalarValue<float>;
-
-using DoubleValue = ScalarValue<double>;
-
/**
* A generic value without any mapped dimensions referencing its
* components without owning anything.
@@ -227,7 +212,6 @@ protected:
}
-VESPA_CAN_SKIP_DESTRUCTION(::vespalib::eval::ScalarValue<double>);
-VESPA_CAN_SKIP_DESTRUCTION(::vespalib::eval::ScalarValue<float>);
+VESPA_CAN_SKIP_DESTRUCTION(::vespalib::eval::DoubleValue);
VESPA_CAN_SKIP_DESTRUCTION(::vespalib::eval::DenseValueView);
VESPA_CAN_SKIP_DESTRUCTION(::vespalib::eval::ValueView);
diff --git a/eval/src/vespa/eval/eval/value_codec.cpp b/eval/src/vespa/eval/eval/value_codec.cpp
index 0016dfc694f..0c71d0335ce 100644
--- a/eval/src/vespa/eval/eval/value_codec.cpp
+++ b/eval/src/vespa/eval/eval/value_codec.cpp
@@ -119,7 +119,7 @@ ValueType decode_type(nbostream &input, const Format &format) {
if (dim_list.empty()) {
assert(cell_type == CellType::DOUBLE);
}
- return ValueType::tensor_type(std::move(dim_list), cell_type);
+ return ValueType::make_type(cell_type, std::move(dim_list));
}
size_t maybe_decode_num_blocks(nbostream &input, bool has_mapped_dims, const Format &format) {
diff --git a/eval/src/vespa/eval/eval/value_type.cpp b/eval/src/vespa/eval/eval/value_type.cpp
index b70edef7153..b6b17ff73b2 100644
--- a/eval/src/vespa/eval/eval/value_type.cpp
+++ b/eval/src/vespa/eval/eval/value_type.cpp
@@ -2,7 +2,9 @@
#include "value_type.h"
#include "value_type_spec.h"
+#include <vespa/vespalib/util/typify.h>
#include <algorithm>
+#include <cassert>
namespace vespalib::eval {
@@ -11,28 +13,13 @@ namespace {
using Dimension = ValueType::Dimension;
using DimensionList = std::vector<Dimension>;
-template <typename A, typename B>
-CellType unify() {
- using type = typename UnifyCellTypes<A,B>::type;
- return get_cell_type<type>();
-}
-
-template <typename A>
-CellType unify(CellType b) {
- switch (b) {
- case CellType::DOUBLE: return unify<A,double>();
- case CellType::FLOAT: return unify<A,float>();
- }
- abort();
-}
-
-CellType unify(CellType a, CellType b) {
- switch (a) {
- case CellType::DOUBLE: return unify<double>(b);
- case CellType::FLOAT: return unify<float>(b);
+struct Unify {
+ template <typename A, typename B>
+ static CellType invoke() {
+ using type = typename UnifyCellTypes<A,B>::type;
+ return get_cell_type<type>();
}
- abort();
-}
+};
size_t my_dimension_index(const std::vector<Dimension> &list, const vespalib::string &name) {
for (size_t idx = 0; idx < list.size(); ++idx) {
@@ -65,6 +52,28 @@ bool verify_dimensions(const DimensionList &dimensions) {
return true;
}
+struct MyReduce {
+ bool has_error;
+ std::vector<Dimension> dimensions;
+ MyReduce(const std::vector<Dimension> &dim_list, const std::vector<vespalib::string> &rem_list)
+ : has_error(false), dimensions()
+ {
+ if (!rem_list.empty()) {
+ size_t removed = 0;
+ for (const Dimension &dim: dim_list) {
+ if (std::find(rem_list.begin(), rem_list.end(), dim.name) == rem_list.end()) {
+ dimensions.push_back(dim);
+ } else {
+ ++removed;
+ }
+ }
+ if (removed != rem_list.size()) {
+ has_error = true;
+ }
+ }
+ }
+};
+
struct MyJoin {
bool mismatch;
DimensionList dimensions;
@@ -142,9 +151,54 @@ struct Renamer {
constexpr ValueType::Dimension::size_type ValueType::Dimension::npos;
+ValueType
+ValueType::normalize_type(CellType cell_type, std::vector<Dimension> dimensions, bool decay)
+{
+ if (decay) {
+ if ((cell_type != CellType::FLOAT) && (cell_type != CellType::DOUBLE)) {
+ // The result of any calculation should be 'at least' float
+ cell_type = CellType::FLOAT;
+ }
+ }
+ if (dimensions.empty()) {
+ // all scalar results should be double
+ cell_type = CellType::DOUBLE;
+ }
+ return make_type(cell_type, std::move(dimensions));
+}
+
+ValueType
+ValueType::error_if(bool has_error, ValueType else_type)
+{
+ if (has_error) {
+ return error_type();
+ } else {
+ return else_type;
+ }
+}
+
+CellType
+ValueType::unify_cell_types(const ValueType &a, const ValueType &b) {
+ if (a.is_double()) {
+ return b.cell_type();
+ } else if (b.is_double()) {
+ return a.cell_type();
+ }
+ return typify_invoke<2,TypifyCellType,Unify>(a.cell_type(), b.cell_type());
+}
+
ValueType::~ValueType() = default;
bool
+ValueType::is_double() const {
+ if (!_error && _dimensions.empty()) {
+ assert(_cell_type == CellType::DOUBLE);
+ return true;
+ }
+ return false;
+}
+
+bool
ValueType::is_sparse() const
{
if (dimensions().empty()) {
@@ -246,26 +300,25 @@ ValueType::dimension_names() const
}
ValueType
+ValueType::map() const
+{
+ return error_if(_error, normalize_type(_cell_type, _dimensions, true));
+}
+
+ValueType
ValueType::reduce(const std::vector<vespalib::string> &dimensions_in) const
{
- if (is_error()) {
- return error_type();
- } else if (dimensions_in.empty()) {
- return double_type();
- }
- size_t removed = 0;
- std::vector<Dimension> result;
- for (const Dimension &d: _dimensions) {
- if (std::find(dimensions_in.begin(), dimensions_in.end(), d.name) == dimensions_in.end()) {
- result.push_back(d);
- } else {
- ++removed;
- }
- }
- if (removed != dimensions_in.size()) {
- return error_type();
- }
- return tensor_type(std::move(result), _cell_type);
+ MyReduce result(_dimensions, dimensions_in);
+ return error_if(_error || result.has_error,
+ normalize_type(_cell_type, std::move(result.dimensions), true));
+}
+
+ValueType
+ValueType::peek(const std::vector<vespalib::string> &dimensions_in) const
+{
+ MyReduce result(_dimensions, dimensions_in);
+ return error_if(_error || result.has_error || dimensions_in.empty(),
+ normalize_type(_cell_type, std::move(result.dimensions), false));
}
ValueType
@@ -280,25 +333,23 @@ ValueType::rename(const std::vector<vespalib::string> &from,
for (const auto &dim: _dimensions) {
dim_list.emplace_back(renamer.rename(dim.name), dim.size);
}
- if (!renamer.matched_all()) {
- return error_type();
- }
- return tensor_type(dim_list, _cell_type);
+ return error_if(!renamer.matched_all(),
+ make_type(_cell_type, std::move(dim_list)));
}
ValueType
ValueType::cell_cast(CellType to_cell_type) const
{
- if (is_error()) {
- return error_type();
- }
- // TODO: return make_type(to_cell_type, _dimensions);
- return tensor_type(_dimensions, to_cell_type);
+ return error_if(_error, make_type(to_cell_type, _dimensions));
}
ValueType
ValueType::make_type(CellType cell_type, std::vector<Dimension> dimensions_in)
{
+ if (dimensions_in.empty() && (cell_type != CellType::DOUBLE)) {
+ // Note: all scalar values must have cell_type double
+ return error_type();
+ }
sort_dimensions(dimensions_in);
if (!verify_dimensions(dimensions_in)) {
return error_type();
@@ -307,15 +358,6 @@ ValueType::make_type(CellType cell_type, std::vector<Dimension> dimensions_in)
}
ValueType
-ValueType::tensor_type(std::vector<Dimension> dimensions_in, CellType cell_type)
-{
- if (dimensions_in.empty()) {
- return double_type();
- }
- return make_type(cell_type, std::move(dimensions_in));
-}
-
-ValueType
ValueType::from_spec(const vespalib::string &spec)
{
return value_type::from_spec(spec);
@@ -336,66 +378,35 @@ ValueType::to_spec() const
ValueType
ValueType::join(const ValueType &lhs, const ValueType &rhs)
{
- if (lhs.is_error() || rhs.is_error()) {
- return error_type();
- } else if (lhs.is_double()) {
- return rhs;
- } else if (rhs.is_double()) {
- return lhs;
- }
+ auto cell_type = unify_cell_types(lhs, rhs);
MyJoin result(lhs._dimensions, rhs._dimensions);
- if (result.mismatch) {
- return error_type();
- }
- return tensor_type(std::move(result.dimensions), unify(lhs._cell_type, rhs._cell_type));
+ return error_if(lhs._error || rhs._error || result.mismatch,
+ normalize_type(cell_type, std::move(result.dimensions), true));
}
ValueType
ValueType::merge(const ValueType &lhs, const ValueType &rhs)
{
- if ((lhs.is_error() != rhs.is_error()) ||
- (lhs.dimensions() != rhs.dimensions()))
- {
- return error_type();
- }
- if (lhs.dimensions().empty()) {
- return lhs;
- }
- return tensor_type(lhs.dimensions(), unify(lhs._cell_type, rhs._cell_type));
-}
-
-CellType
-ValueType::unify_cell_types(const ValueType &a, const ValueType &b) {
- if (a.is_double()) {
- return b.cell_type();
- } else if (b.is_double()) {
- return a.cell_type();
- }
- return unify(a.cell_type(), b.cell_type());
+ auto cell_type = unify_cell_types(lhs, rhs);
+ return error_if(lhs._error || rhs._error || (lhs._dimensions != rhs._dimensions),
+ normalize_type(cell_type, lhs._dimensions, true));
}
ValueType
ValueType::concat(const ValueType &lhs, const ValueType &rhs, const vespalib::string &dimension)
{
- if (lhs.is_error() || rhs.is_error()) {
- return error_type();
- }
+ auto cell_type = unify_cell_types(lhs, rhs);
MyJoin result(lhs._dimensions, rhs._dimensions, dimension);
- if (result.mismatch) {
- return error_type();
- }
if (!find_dimension(result.dimensions, dimension)) {
result.dimensions.emplace_back(dimension, 2);
}
- return tensor_type(std::move(result.dimensions), unify_cell_types(lhs, rhs));
+ return error_if(lhs._error || rhs._error || result.mismatch,
+ make_type(cell_type, std::move(result.dimensions)));
}
ValueType
ValueType::either(const ValueType &one, const ValueType &other) {
- if (one != other) {
- return error_type();
- }
- return one;
+ return error_if(one != other, one);
}
std::ostream &
diff --git a/eval/src/vespa/eval/eval/value_type.h b/eval/src/vespa/eval/eval/value_type.h
index 247912b274a..a42e83b2e97 100644
--- a/eval/src/vespa/eval/eval/value_type.h
+++ b/eval/src/vespa/eval/eval/value_type.h
@@ -45,6 +45,10 @@ private:
ValueType(CellType cell_type_in, std::vector<Dimension> &&dimensions_in)
: _error(false), _cell_type(cell_type_in), _dimensions(std::move(dimensions_in)) {}
+ static ValueType normalize_type(CellType cell_type, std::vector<Dimension> dimensions, bool decay);
+ static ValueType error_if(bool has_error, ValueType else_type);
+ static CellType unify_cell_types(const ValueType &a, const ValueType &b);
+
public:
ValueType(ValueType &&) noexcept = default;
ValueType(const ValueType &) = default;
@@ -53,21 +57,10 @@ public:
~ValueType();
CellType cell_type() const { return _cell_type; }
bool is_error() const { return _error; }
- bool is_scalar() const { return _dimensions.empty(); }
+ bool is_double() const;
+ bool has_dimensions() const { return !_dimensions.empty(); }
bool is_sparse() const;
bool is_dense() const;
-
- // TODO: remove is_double and is_tensor
- // is_tensor should no longer be useful
- // is_double should be replaced with is_scalar where you also
- // handle cell type correctly (free float values will
- // not be introduced by type-resolving just yet, so
- // is_double and is_scalar will be interchangeable in
- // most cases for a while)
-
- bool is_double() const { return (!_error && is_scalar() && (_cell_type == CellType::DOUBLE)); }
- bool is_tensor() const { return (!_dimensions.empty()); }
-
size_t count_indexed_dimensions() const;
size_t count_mapped_dimensions() const;
size_t dense_subspace_size() const;
@@ -83,27 +76,21 @@ public:
}
bool operator!=(const ValueType &rhs) const { return !(*this == rhs); }
+ ValueType map() const;
ValueType reduce(const std::vector<vespalib::string> &dimensions_in) const;
+ ValueType peek(const std::vector<vespalib::string> &dimensions_in) const;
ValueType rename(const std::vector<vespalib::string> &from,
const std::vector<vespalib::string> &to) const;
ValueType cell_cast(CellType to_cell_type) const;
static ValueType error_type() { return ValueType(); }
static ValueType make_type(CellType cell_type, std::vector<Dimension> dimensions_in);
-
- // TODO: remove double_type and tensor_type and use make_type
- // directly. Currently the tensor_type function contains
- // protection against ending up with scalar float values.
-
static ValueType double_type() { return make_type(CellType::DOUBLE, {}); }
- static ValueType tensor_type(std::vector<Dimension> dimensions_in, CellType cell_type = CellType::DOUBLE);
-
static ValueType from_spec(const vespalib::string &spec);
static ValueType from_spec(const vespalib::string &spec, std::vector<ValueType::Dimension> &unsorted);
vespalib::string to_spec() const;
static ValueType join(const ValueType &lhs, const ValueType &rhs);
static ValueType merge(const ValueType &lhs, const ValueType &rhs);
- static CellType unify_cell_types(const ValueType &a, const ValueType &b);
static ValueType concat(const ValueType &lhs, const ValueType &rhs, const vespalib::string &dimension);
static ValueType either(const ValueType &one, const ValueType &other);
};
diff --git a/eval/src/vespa/eval/eval/value_type_spec.cpp b/eval/src/vespa/eval/eval/value_type_spec.cpp
index 470da4f63a3..b518ccd1b30 100644
--- a/eval/src/vespa/eval/eval/value_type_spec.cpp
+++ b/eval/src/vespa/eval/eval/value_type_spec.cpp
@@ -192,9 +192,7 @@ parse_spec(const char *pos_in, const char *end_in, const char *&pos_out,
if (type_name == "error") {
return ValueType::error_type();
} else if (type_name == "double") {
- return ValueType::make_type(CellType::DOUBLE, {});
- } else if (type_name == "float") {
- return ValueType::make_type(CellType::FLOAT, {});
+ return ValueType::double_type();
} else if (type_name == "tensor") {
CellType cell_type = parse_cell_type(ctx);
std::vector<ValueType::Dimension> list = parse_dimension_list(ctx);
@@ -202,7 +200,7 @@ parse_spec(const char *pos_in, const char *end_in, const char *&pos_out,
if (unsorted != nullptr) {
*unsorted = list;
}
- return ValueType::tensor_type(std::move(list), cell_type);
+ return ValueType::make_type(cell_type, std::move(list));
}
} else {
ctx.fail();
@@ -241,8 +239,8 @@ to_spec(const ValueType &type)
size_t cnt = 0;
if (type.is_error()) {
os << "error";
- } else if (type.is_scalar()) {
- os << cell_type_to_name(type.cell_type());
+ } else if (type.is_double()) {
+ os << "double";
} else {
os << "tensor";
if (type.cell_type() != CellType::DOUBLE) {
diff --git a/eval/src/vespa/eval/instruction/generic_join.cpp b/eval/src/vespa/eval/instruction/generic_join.cpp
index 313aa38f753..4d528057f6f 100644
--- a/eval/src/vespa/eval/instruction/generic_join.cpp
+++ b/eval/src/vespa/eval/instruction/generic_join.cpp
@@ -113,19 +113,22 @@ void my_dense_join_op(State &state, uint64_t param_in) {
//-----------------------------------------------------------------------------
-template <typename LCT, typename RCT, typename OCT, typename Fun>
-void my_scalar_join_op(State &state, uint64_t param_in) {
+template <typename Fun>
+void my_double_join_op(State &state, uint64_t param_in) {
Fun fun(unwrap_param<JoinParam>(param_in).function);
- state.pop_pop_push(state.stash.create<ScalarValue<OCT>>(fun(state.peek(1).cells().typify<LCT>()[0],
- state.peek(0).cells().typify<RCT>()[0])));
+ state.pop_pop_push(state.stash.create<DoubleValue>(fun(state.peek(1).as_double(),
+ state.peek(0).as_double())));
};
//-----------------------------------------------------------------------------
struct SelectGenericJoinOp {
template <typename LCT, typename RCT, typename OCT, typename Fun> static auto invoke(const JoinParam &param) {
- if (param.res_type.is_scalar()) {
- return my_scalar_join_op<LCT,RCT,OCT,Fun>;
+ if (param.res_type.is_double()) {
+ assert((std::is_same_v<LCT,double>));
+ assert((std::is_same_v<RCT,double>));
+ assert((std::is_same_v<OCT,double>));
+ return my_double_join_op<Fun>;
}
if (param.sparse_plan.sources.empty()) {
return my_dense_join_op<LCT,RCT,OCT,Fun>;
diff --git a/eval/src/vespa/eval/instruction/generic_map.cpp b/eval/src/vespa/eval/instruction/generic_map.cpp
index 0144a39a58a..4f6780c2276 100644
--- a/eval/src/vespa/eval/instruction/generic_map.cpp
+++ b/eval/src/vespa/eval/instruction/generic_map.cpp
@@ -32,17 +32,17 @@ void my_generic_map_op(State &state, uint64_t param_in) {
state.pop_push(result_ref);
}
-template <typename CT, typename Func>
-void my_scalar_map_op(State &state, uint64_t param_in) {
- Func function(to_map_fun(param_in));
- const Value &a = state.peek(0);
- state.pop_push(state.stash.create<ScalarValue<CT>>(function(a.cells().typify<CT>()[0])));
+template <typename Func>
+void my_double_map_op(State &state, uint64_t param_in) {
+ Func fun(to_map_fun(param_in));
+ state.pop_push(state.stash.create<DoubleValue>(fun(state.peek(0).as_double())));
}
struct SelectGenericMapOp {
template <typename CT, typename Func> static auto invoke(const ValueType &type) {
- if (type.is_scalar()) {
- return my_scalar_map_op<CT, Func>;
+ if (type.is_double()) {
+ assert((std::is_same_v<CT,double>));
+ return my_double_map_op<Func>;
}
return my_generic_map_op<CT, Func>;
}
@@ -56,8 +56,7 @@ InterpretedFunction::Instruction
GenericMap::make_instruction(const ValueType &result_type,
const ValueType &input_type, map_fun_t function)
{
- // for now:
- assert(result_type == input_type);
+ assert(result_type == input_type.map());
auto op = typify_invoke<2,MapTypify,SelectGenericMapOp>(input_type.cell_type(), function, input_type);
return Instruction(op, to_param(function));
}
diff --git a/eval/src/vespa/eval/instruction/generic_reduce.cpp b/eval/src/vespa/eval/instruction/generic_reduce.cpp
index 7f3cf7ef0c6..b8aa84e5f88 100644
--- a/eval/src/vespa/eval/instruction/generic_reduce.cpp
+++ b/eval/src/vespa/eval/instruction/generic_reduce.cpp
@@ -154,7 +154,7 @@ void my_generic_dense_reduce_op(State &state, uint64_t param_in) {
}
};
-template <typename ICT, typename OCT, typename AGGR>
+template <typename ICT, typename AGGR>
void my_full_reduce_op(State &state, uint64_t) {
auto cells = state.peek(0).cells().typify<ICT>();
if (cells.size() >= 8) {
@@ -176,23 +176,24 @@ void my_full_reduce_op(State &state, uint64_t) {
aggrs[0].merge(aggrs[2]);
aggrs[1].merge(aggrs[3]);
aggrs[0].merge(aggrs[1]);
- state.pop_push(state.stash.create<ScalarValue<OCT>>(aggrs[0].result()));
+ state.pop_push(state.stash.create<DoubleValue>(aggrs[0].result()));
} else if (cells.size() > 0) {
AGGR aggr;
for (ICT value: cells) {
aggr.sample(value);
}
- state.pop_push(state.stash.create<ScalarValue<OCT>>(aggr.result()));
+ state.pop_push(state.stash.create<DoubleValue>(aggr.result()));
} else {
- state.pop_push(state.stash.create<ScalarValue<OCT>>(OCT{0}));
+ state.pop_push(state.stash.create<DoubleValue>(0.0));
}
};
struct SelectGenericReduceOp {
template <typename ICT, typename OCT, typename AGGR> static auto invoke(const ReduceParam &param) {
using AggrType = typename AGGR::template templ<OCT>;
- if (param.res_type.is_scalar()) {
- return my_full_reduce_op<ICT, OCT, AggrType>;
+ if (param.res_type.is_double()) {
+ assert((std::is_same_v<OCT,double>));
+ return my_full_reduce_op<ICT, AggrType>;
}
if (param.sparse_plan.should_forward_index()) {
return my_generic_dense_reduce_op<ICT, OCT, AggrType, true>;
diff --git a/eval/src/vespa/eval/instruction/join_with_number_function.cpp b/eval/src/vespa/eval/instruction/join_with_number_function.cpp
index cd95a109e60..c574e3f8ad9 100644
--- a/eval/src/vespa/eval/instruction/join_with_number_function.cpp
+++ b/eval/src/vespa/eval/instruction/join_with_number_function.cpp
@@ -93,7 +93,7 @@ JoinWithNumberFunction::visit_self(vespalib::ObjectVisitor &visitor) const
const TensorFunction &
JoinWithNumberFunction::optimize(const TensorFunction &expr, Stash &stash)
{
- if (! expr.result_type().is_scalar()) {
+ if (! expr.result_type().is_double()) {
if (const auto *join = as<Join>(expr)) {
const ValueType &result_type = join->result_type();
const TensorFunction &lhs = join->lhs();
diff --git a/eval/src/vespa/eval/instruction/mixed_inner_product_function.cpp b/eval/src/vespa/eval/instruction/mixed_inner_product_function.cpp
index c8a4df2b82d..a223463240a 100644
--- a/eval/src/vespa/eval/instruction/mixed_inner_product_function.cpp
+++ b/eval/src/vespa/eval/instruction/mixed_inner_product_function.cpp
@@ -105,7 +105,7 @@ MixedInnerProductFunction::compile_self(const ValueBuilderFactory &, Stash &stas
bool
MixedInnerProductFunction::compatible_types(const ValueType &res, const ValueType &mixed, const ValueType &vector)
{
- if (vector.is_dense() && ! res.is_scalar()) {
+ if (vector.is_dense() && ! res.is_double()) {
auto dense_dims = vector.nontrivial_indexed_dimensions();
auto mixed_dims = mixed.nontrivial_indexed_dimensions();
while (! dense_dims.empty()) {
@@ -139,7 +139,7 @@ MixedInnerProductFunction::optimize(const TensorFunction &expr, Stash &stash)
{
const auto & res_type = expr.result_type();
auto reduce = as<Reduce>(expr);
- if ((! res_type.is_scalar()) && reduce && (reduce->aggr() == Aggr::SUM)) {
+ if ((! res_type.is_double()) && reduce && (reduce->aggr() == Aggr::SUM)) {
auto join = as<Join>(reduce->child());
if (join && (join->function() == Mul::f)) {
const TensorFunction &lhs = join->lhs();
diff --git a/eval/src/vespa/eval/instruction/mixed_map_function.cpp b/eval/src/vespa/eval/instruction/mixed_map_function.cpp
index 06b53006952..69917ae94e0 100644
--- a/eval/src/vespa/eval/instruction/mixed_map_function.cpp
+++ b/eval/src/vespa/eval/instruction/mixed_map_function.cpp
@@ -75,7 +75,7 @@ const TensorFunction &
MixedMapFunction::optimize(const TensorFunction &expr, Stash &stash)
{
if (auto map = as<Map>(expr)) {
- if (! map->child().result_type().is_scalar()) {
+ if (! map->child().result_type().is_double()) {
return stash.create<MixedMapFunction>(map->result_type(), map->child(), map->function());
}
}
diff --git a/eval/src/vespa/eval/instruction/pow_as_map_optimizer.cpp b/eval/src/vespa/eval/instruction/pow_as_map_optimizer.cpp
index aa33e98c939..5c09ba2c8cc 100644
--- a/eval/src/vespa/eval/instruction/pow_as_map_optimizer.cpp
+++ b/eval/src/vespa/eval/instruction/pow_as_map_optimizer.cpp
@@ -15,7 +15,7 @@ PowAsMapOptimizer::optimize(const TensorFunction &expr, Stash &stash)
const TensorFunction &lhs = join->lhs();
const TensorFunction &rhs = join->rhs();
if ((join->function() == Pow::f) &&
- rhs.result_type().is_scalar())
+ rhs.result_type().is_double())
{
if (auto const_value = as<ConstValue>(rhs)) {
if (const_value->value().as_double() == 2.0) {
diff --git a/eval/src/vespa/eval/instruction/remove_trivial_dimension_optimizer.cpp b/eval/src/vespa/eval/instruction/remove_trivial_dimension_optimizer.cpp
index 77f5247aaaa..bd0534e1d7d 100644
--- a/eval/src/vespa/eval/instruction/remove_trivial_dimension_optimizer.cpp
+++ b/eval/src/vespa/eval/instruction/remove_trivial_dimension_optimizer.cpp
@@ -28,7 +28,7 @@ RemoveTrivialDimensionOptimizer::optimize(const TensorFunction &expr, Stash &sta
{
if (auto reduce = as<Reduce>(expr)) {
const TensorFunction &child = reduce->child();
- if ((! expr.result_type().dimensions().empty()) &&
+ if (expr.result_type().has_dimensions() &&
aggr::is_ident(reduce->aggr()) &&
is_trivial_dim_list(child.result_type(), reduce->dimensions()))
{
diff --git a/eval/src/vespa/eval/instruction/sparse_dot_product_function.cpp b/eval/src/vespa/eval/instruction/sparse_dot_product_function.cpp
index 7cc4417bdbb..4da3dbe4f5b 100644
--- a/eval/src/vespa/eval/instruction/sparse_dot_product_function.cpp
+++ b/eval/src/vespa/eval/instruction/sparse_dot_product_function.cpp
@@ -73,7 +73,7 @@ void my_sparse_dot_product_op(InterpretedFunction::State &state, uint64_t num_ma
double result = __builtin_expect(are_fast(lhs_idx, rhs_idx), true)
? my_fast_sparse_dot_product<CT,single_dim>(&as_fast(lhs_idx).map, &as_fast(rhs_idx).map, lhs_cells, rhs_cells)
: my_sparse_dot_product_fallback<CT>(lhs_idx, rhs_idx, lhs_cells, rhs_cells, num_mapped_dims);
- state.pop_pop_push(state.stash.create<ScalarValue<double>>(result));
+ state.pop_pop_push(state.stash.create<DoubleValue>(result));
}
struct MyGetFun {
@@ -87,7 +87,7 @@ using MyTypify = TypifyValue<TypifyCellType,TypifyBool>;
SparseDotProductFunction::SparseDotProductFunction(const TensorFunction &lhs_in,
const TensorFunction &rhs_in)
- : tensor_function::Op2(ValueType::make_type(CellType::DOUBLE, {}), lhs_in, rhs_in)
+ : tensor_function::Op2(ValueType::double_type(), lhs_in, rhs_in)
{
}
@@ -103,7 +103,7 @@ SparseDotProductFunction::compile_self(const ValueBuilderFactory &, Stash &) con
bool
SparseDotProductFunction::compatible_types(const ValueType &res, const ValueType &lhs, const ValueType &rhs)
{
- return (res.is_scalar() && (res.cell_type() == CellType::DOUBLE) &&
+ return (res.is_double() &&
lhs.is_sparse() && (rhs.dimensions() == lhs.dimensions()) &&
lhs.cell_type() == rhs.cell_type());
}
diff --git a/eval/src/vespa/eval/instruction/sum_max_dot_product_function.cpp b/eval/src/vespa/eval/instruction/sum_max_dot_product_function.cpp
index 4b541062007..bdf1682cccd 100644
--- a/eval/src/vespa/eval/instruction/sum_max_dot_product_function.cpp
+++ b/eval/src/vespa/eval/instruction/sum_max_dot_product_function.cpp
@@ -25,7 +25,7 @@ void my_sum_max_dot_product_op(InterpretedFunction::State &state, uint64_t dp_si
result += max_dp;
}
}
- state.pop_pop_push(state.stash.create<ScalarValue<double>>(result));
+ state.pop_pop_push(state.stash.create<DoubleValue>(result));
}
const Reduce *check_reduce(const TensorFunction &expr, Aggr aggr) {
@@ -49,7 +49,7 @@ const Join *check_mul(const TensorFunction &expr) {
bool check_params(const ValueType &res_type, const ValueType &query, const ValueType &document,
const vespalib::string &sum_dim, const vespalib::string &max_dim, const vespalib::string &dp_dim)
{
- if (res_type.is_scalar() && (res_type.cell_type() == CellType::DOUBLE) &&
+ if (res_type.is_double() &&
(query.dimensions().size() == 2) && (query.cell_type() == CellType::FLOAT) &&
(document.dimensions().size() == 2) && (document.cell_type() == CellType::FLOAT))
{
diff --git a/eval/src/vespa/eval/onnx/onnx_wrapper.cpp b/eval/src/vespa/eval/onnx/onnx_wrapper.cpp
index d9c0d659b1e..2891b37ebe8 100644
--- a/eval/src/vespa/eval/onnx/onnx_wrapper.cpp
+++ b/eval/src/vespa/eval/onnx/onnx_wrapper.cpp
@@ -297,7 +297,7 @@ Onnx::WirePlanner::make_output_type(const TensorInfo &onnx_out) const
}
dim_list.emplace_back(fmt("d%zu", dim_list.size()), dim_size);
}
- return ValueType::tensor_type(std::move(dim_list), to_cell_type(elements));
+ return ValueType::make_type(to_cell_type(elements), std::move(dim_list));
}
Onnx::WireInfo
diff --git a/searchcore/src/tests/proton/matching/request_context/request_context_test.cpp b/searchcore/src/tests/proton/matching/request_context/request_context_test.cpp
index 7b545344e9b..d8aa7e0ffa8 100644
--- a/searchcore/src/tests/proton/matching/request_context/request_context_test.cpp
+++ b/searchcore/src/tests/proton/matching/request_context/request_context_test.cpp
@@ -67,7 +67,7 @@ TEST_F(RequestContextTest, query_tensor_can_be_retrieved)
{
auto tensor = get_query_tensor("my_tensor");
ASSERT_TRUE(tensor);
- EXPECT_TRUE(tensor->is_tensor());
+ EXPECT_TRUE(tensor->type().has_dimensions());
EXPECT_EQ(expected_query_tensor(), spec_from_value(*tensor));
}
diff --git a/searchcore/src/vespa/searchcore/proton/attribute/flushableattribute.cpp b/searchcore/src/vespa/searchcore/proton/attribute/flushableattribute.cpp
index 956e9a1b430..ae3edc93f6d 100644
--- a/searchcore/src/vespa/searchcore/proton/attribute/flushableattribute.cpp
+++ b/searchcore/src/vespa/searchcore/proton/attribute/flushableattribute.cpp
@@ -168,7 +168,8 @@ FlushableAttribute::FlushableAttribute(const AttributeVectorSP attr,
_lastStats.setPathElementsToLog(8);
auto &config = attr->getConfig();
if (config.basicType() == search::attribute::BasicType::Type::TENSOR &&
- config.tensorType().is_tensor() && config.tensorType().is_dense() && config.hnsw_index_params().has_value()) {
+ config.tensorType().is_dense() && config.hnsw_index_params().has_value())
+ {
_replay_operation_cost = 100.0; // replaying operations to hnsw index is 100 times more expensive than reading from tls
}
}
diff --git a/searchlib/src/tests/features/constant/constant_test.cpp b/searchlib/src/tests/features/constant/constant_test.cpp
index 140c93125b0..9c8480c1da2 100644
--- a/searchlib/src/tests/features/constant/constant_test.cpp
+++ b/searchlib/src/tests/features/constant/constant_test.cpp
@@ -45,7 +45,7 @@ struct ExecFixture
bool setup() { return test.setup(); }
const Value &extractTensor(uint32_t docid) {
Value::CREF value = test.resolveObjectFeature(docid);
- ASSERT_TRUE(value.get().is_tensor());
+ ASSERT_TRUE(value.get().type().has_dimensions());
return value.get();
}
const Value &executeTensor(uint32_t docId = 1) {
@@ -53,7 +53,7 @@ struct ExecFixture
}
double extractDouble(uint32_t docid) {
Value::CREF value = test.resolveObjectFeature(docid);
- ASSERT_TRUE(value.get().is_double());
+ ASSERT_TRUE(value.get().type().is_double());
return value.get().as_double();
}
double executeDouble(uint32_t docId = 1) {
diff --git a/searchlib/src/tests/features/tensor/tensor_test.cpp b/searchlib/src/tests/features/tensor/tensor_test.cpp
index 53049c4a385..5d7698822eb 100644
--- a/searchlib/src/tests/features/tensor/tensor_test.cpp
+++ b/searchlib/src/tests/features/tensor/tensor_test.cpp
@@ -152,7 +152,7 @@ struct ExecFixture
}
const Value &extractTensor(uint32_t docid) {
Value::CREF value = test.resolveObjectFeature(docid);
- ASSERT_TRUE(value.get().is_tensor());
+ ASSERT_TRUE(value.get().type().has_dimensions());
return value.get();
}
const Value &execute(uint32_t docId = 1) {
diff --git a/searchlib/src/vespa/searchlib/attribute/configconverter.cpp b/searchlib/src/vespa/searchlib/attribute/configconverter.cpp
index f2e2f8271de..991f1f03ee7 100644
--- a/searchlib/src/vespa/searchlib/attribute/configconverter.cpp
+++ b/searchlib/src/vespa/searchlib/attribute/configconverter.cpp
@@ -11,6 +11,7 @@ namespace {
using search::attribute::CollectionType;
using search::attribute::BasicType;
using vespalib::eval::ValueType;
+using vespalib::eval::CellType;
typedef std::map<AttributesConfig::Attribute::Datatype, BasicType::Type> DataTypeMap;
typedef std::map<AttributesConfig::Attribute::Collectiontype, CollectionType::Type> CollectionTypeMap;
@@ -102,7 +103,7 @@ ConfigConverter::convert(const AttributesConfig::Attribute & cfg)
if (!cfg.tensortype.empty()) {
retval.setTensorType(ValueType::from_spec(cfg.tensortype));
} else {
- retval.setTensorType(ValueType::tensor_type({}));
+ retval.setTensorType(ValueType::double_type());
}
}
return retval;
diff --git a/searchlib/src/vespa/searchlib/features/attributefeature.cpp b/searchlib/src/vespa/searchlib/features/attributefeature.cpp
index 80d9a305ef4..2c1ae7c557a 100644
--- a/searchlib/src/vespa/searchlib/features/attributefeature.cpp
+++ b/searchlib/src/vespa/searchlib/features/attributefeature.cpp
@@ -530,7 +530,7 @@ AttributeBlueprint::setup(const fef::IIndexEnvironment & env,
"the given key of a weighted set attribute, or"
"the tensor of a tensor attribute", output_type);
const fef::FieldInfo * fInfo = env.getFieldByName(_attrName);
- if (_tensorType.is_tensor() || isSingleValueBoolField(*fInfo)) {
+ if (_tensorType.has_dimensions() || isSingleValueBoolField(*fInfo)) {
_numOutputs = 1;
} else {
describeOutput("weight", "The weight associated with the given key in a weighted set attribute.");
@@ -558,7 +558,7 @@ fef::FeatureExecutor &
AttributeBlueprint::createExecutor(const fef::IQueryEnvironment &env, vespalib::Stash &stash) const
{
const IAttributeVector * attribute = lookupAttribute(_attrKey, _attrName, env);
- if (_tensorType.is_tensor()) {
+ if (_tensorType.has_dimensions()) {
return createTensorAttributeExecutor(attribute, _attrName, _tensorType, stash);
} else {
return createAttributeExecutor(_numOutputs, attribute, _attrName, _extra, stash);
diff --git a/searchlib/src/vespa/searchlib/features/queryfeature.cpp b/searchlib/src/vespa/searchlib/features/queryfeature.cpp
index c6196fcbc7f..60bd77e4883 100644
--- a/searchlib/src/vespa/searchlib/features/queryfeature.cpp
+++ b/searchlib/src/vespa/searchlib/features/queryfeature.cpp
@@ -137,7 +137,7 @@ createTensorExecutor(const IQueryEnvironment &env,
FeatureExecutor &
QueryBlueprint::createExecutor(const IQueryEnvironment &env, vespalib::Stash &stash) const
{
- if (_valueType.is_tensor()) {
+ if (_valueType.has_dimensions()) {
return createTensorExecutor(env, _key, _valueType, stash);
} else {
std::vector<feature_t> values;
diff --git a/searchlib/src/vespa/searchlib/features/tensor_from_attribute_executor.h b/searchlib/src/vespa/searchlib/features/tensor_from_attribute_executor.h
index f4a5b0b8d0a..475075671cd 100644
--- a/searchlib/src/vespa/searchlib/features/tensor_from_attribute_executor.h
+++ b/searchlib/src/vespa/searchlib/features/tensor_from_attribute_executor.h
@@ -8,6 +8,7 @@
#include <vespa/vespalib/stllike/string.h>
using vespalib::eval::FastValueBuilderFactory;
+using vespalib::eval::CellType;
namespace search::features {
@@ -29,7 +30,7 @@ public:
TensorFromAttributeExecutor(const search::attribute::IAttributeVector *attribute,
const vespalib::string &dimension)
: _attribute(attribute),
- _type(vespalib::eval::ValueType::tensor_type({{dimension}})),
+ _type(vespalib::eval::ValueType::make_type(CellType::DOUBLE, {{dimension}})),
_attrBuffer(),
_addr_ref(),
_tensor()
diff --git a/searchlib/src/vespa/searchlib/features/tensor_from_labels_feature.cpp b/searchlib/src/vespa/searchlib/features/tensor_from_labels_feature.cpp
index e4f0a010ae2..76a6e908fcb 100644
--- a/searchlib/src/vespa/searchlib/features/tensor_from_labels_feature.cpp
+++ b/searchlib/src/vespa/searchlib/features/tensor_from_labels_feature.cpp
@@ -20,6 +20,7 @@ using search::attribute::WeightedConstCharContent;
using search::attribute::WeightedStringContent;
using vespalib::eval::FastValueBuilderFactory;
using vespalib::eval::ValueType;
+using vespalib::eval::CellType;
using search::fef::FeatureType;
namespace search {
@@ -45,7 +46,7 @@ TensorFromLabelsBlueprint::setup(const search::fef::IIndexEnvironment &env,
}
describeOutput("tensor",
"The tensor created from the given array source (attribute field or query parameter)",
- FeatureType::object(ValueType::tensor_type({{_dimension}})));
+ FeatureType::object(ValueType::make_type(CellType::DOUBLE, {{_dimension}})));
return validSource;
}
@@ -60,13 +61,13 @@ createAttributeExecutor(const search::fef::IQueryEnvironment &env,
if (attribute == NULL) {
LOG(warning, "The attribute vector '%s' was not found in the attribute manager."
" Returning empty tensor.", attrName.c_str());
- return ConstantTensorExecutor::createEmpty(ValueType::tensor_type({{dimension}}), stash);
+ return ConstantTensorExecutor::createEmpty(ValueType::make_type(CellType::DOUBLE, {{dimension}}), stash);
}
if (attribute->getCollectionType() != search::attribute::CollectionType::ARRAY ||
attribute->isFloatingPointType()) {
LOG(warning, "The attribute vector '%s' is NOT of type array of string or integer."
" Returning empty tensor.", attrName.c_str());
- return ConstantTensorExecutor::createEmpty(ValueType::tensor_type({{dimension}}), stash);
+ return ConstantTensorExecutor::createEmpty(ValueType::make_type(CellType::DOUBLE, {{dimension}}), stash);
}
// Note that for array attribute vectors the default weight is 1.0 for all values.
// This means we can get the attribute content as weighted content and build
@@ -86,7 +87,7 @@ createQueryExecutor(const search::fef::IQueryEnvironment &env,
const vespalib::string &queryKey,
const vespalib::string &dimension, vespalib::Stash &stash)
{
- ValueType type = ValueType::tensor_type({{dimension}});
+ ValueType type = ValueType::make_type(CellType::DOUBLE, {{dimension}});
search::fef::Property prop = env.getProperties().lookup(queryKey);
if (prop.found() && !prop.get().empty()) {
std::vector<vespalib::string> vector;
@@ -115,7 +116,7 @@ TensorFromLabelsBlueprint::createExecutor(const search::fef::IQueryEnvironment &
} else if (_sourceType == QUERY_SOURCE) {
return createQueryExecutor(env, _sourceParam, _dimension, stash);
}
- return ConstantTensorExecutor::createEmpty(ValueType::tensor_type({{_dimension}}), stash);
+ return ConstantTensorExecutor::createEmpty(ValueType::make_type(CellType::DOUBLE, {{_dimension}}), stash);
}
} // namespace features
diff --git a/searchlib/src/vespa/searchlib/features/tensor_from_weighted_set_feature.cpp b/searchlib/src/vespa/searchlib/features/tensor_from_weighted_set_feature.cpp
index 88309120882..50fab518402 100644
--- a/searchlib/src/vespa/searchlib/features/tensor_from_weighted_set_feature.cpp
+++ b/searchlib/src/vespa/searchlib/features/tensor_from_weighted_set_feature.cpp
@@ -21,6 +21,7 @@ using search::attribute::WeightedConstCharContent;
using search::attribute::WeightedStringContent;
using vespalib::eval::FastValueBuilderFactory;
using vespalib::eval::ValueType;
+using vespalib::eval::CellType;
using search::fef::FeatureType;
namespace search {
@@ -58,7 +59,7 @@ TensorFromWeightedSetBlueprint::setup(const search::fef::IIndexEnvironment &env,
}
describeOutput("tensor",
"The tensor created from the given weighted set source (attribute field or query parameter)",
- FeatureType::object(ValueType::tensor_type({{_dimension}})));
+ FeatureType::object(ValueType::make_type(CellType::DOUBLE, {{_dimension}})));
return validSource;
}
@@ -74,13 +75,13 @@ createAttributeExecutor(const search::fef::IQueryEnvironment &env,
if (attribute == NULL) {
LOG(warning, "The attribute vector '%s' was not found in the attribute manager."
" Returning empty tensor.", attrName.c_str());
- return ConstantTensorExecutor::createEmpty(ValueType::tensor_type({{dimension}}), stash);
+ return ConstantTensorExecutor::createEmpty(ValueType::make_type(CellType::DOUBLE, {{dimension}}), stash);
}
if (attribute->getCollectionType() != search::attribute::CollectionType::WSET ||
attribute->isFloatingPointType()) {
LOG(warning, "The attribute vector '%s' is NOT of type weighted set of string or integer."
" Returning empty tensor.", attrName.c_str());
- return ConstantTensorExecutor::createEmpty(ValueType::tensor_type({{dimension}}), stash);
+ return ConstantTensorExecutor::createEmpty(ValueType::make_type(CellType::DOUBLE, {{dimension}}), stash);
}
if (attribute->isIntegerType()) {
// Using WeightedStringContent ensures that the integer values are converted
@@ -97,7 +98,7 @@ createQueryExecutor(const search::fef::IQueryEnvironment &env,
const vespalib::string &queryKey,
const vespalib::string &dimension, vespalib::Stash &stash)
{
- ValueType type = ValueType::tensor_type({{dimension}});
+ ValueType type = ValueType::make_type(CellType::DOUBLE, {{dimension}});
search::fef::Property prop = env.getProperties().lookup(queryKey);
if (prop.found() && !prop.get().empty()) {
WeightedStringVector vector;
@@ -127,7 +128,7 @@ TensorFromWeightedSetBlueprint::createExecutor(const search::fef::IQueryEnvironm
} else if (_sourceType == QUERY_SOURCE) {
return createQueryExecutor(env, _sourceParam, _dimension, stash);
}
- return ConstantTensorExecutor::createEmpty(ValueType::tensor_type({{_dimension}}), stash);
+ return ConstantTensorExecutor::createEmpty(ValueType::make_type(CellType::DOUBLE, {{_dimension}}), stash);
}
} // namespace features
diff --git a/streamingvisitors/src/vespa/searchvisitor/hitcollector.cpp b/streamingvisitors/src/vespa/searchvisitor/hitcollector.cpp
index b0cb058d762..de8a6c707e5 100644
--- a/streamingvisitors/src/vespa/searchvisitor/hitcollector.cpp
+++ b/streamingvisitors/src/vespa/searchvisitor/hitcollector.cpp
@@ -162,7 +162,7 @@ HitCollector::getFeatureSet(IRankProgram &rankProgram,
for (uint32_t j = 0; j < names.size(); ++j) {
if (resolver.is_object(j)) {
auto obj = resolver.resolve(j).as_object(docId);
- if (! obj.get().is_double()) {
+ if (! obj.get().type().is_double()) {
vespalib::nbostream buf;
encode_value(obj.get(), buf);
f[j].set_data(vespalib::Memory(buf.peek(), buf.size()));