diff options
author | HÃ¥vard Pettersen <3535158+havardpe@users.noreply.github.com> | 2019-11-13 15:15:35 +0100 |
---|---|---|
committer | GitHub <noreply@github.com> | 2019-11-13 15:15:35 +0100 |
commit | 3c3a1bc15d4ba6c0b71e065cb1b7371ebe62e3a4 (patch) | |
tree | d607b02f798de2bb6c66d92b09a29d36ec1a361b | |
parent | 25d00feb031fab5a34fbea78f9eeb9b6ed7cf7f8 (diff) | |
parent | 713126938b27e784b9b11013a9dbaac179602fad (diff) |
Merge pull request #11287 from vespa-engine/havardpe/convenient-tensor-create-parsing
Havardpe/convenient tensor create parsing
-rw-r--r-- | eval/src/tests/eval/function/function_test.cpp | 69 | ||||
-rw-r--r-- | eval/src/tests/eval/node_types/node_types_test.cpp | 57 | ||||
-rw-r--r-- | eval/src/tests/eval/value_type/value_type_test.cpp | 45 | ||||
-rw-r--r-- | eval/src/vespa/eval/eval/function.cpp | 111 | ||||
-rw-r--r-- | eval/src/vespa/eval/eval/value_type.cpp | 6 | ||||
-rw-r--r-- | eval/src/vespa/eval/eval/value_type.h | 1 | ||||
-rw-r--r-- | eval/src/vespa/eval/eval/value_type_spec.cpp | 18 | ||||
-rw-r--r-- | eval/src/vespa/eval/eval/value_type_spec.h | 6 |
8 files changed, 246 insertions, 67 deletions
diff --git a/eval/src/tests/eval/function/function_test.cpp b/eval/src/tests/eval/function/function_test.cpp index 49793a62958..0e3100ae425 100644 --- a/eval/src/tests/eval/function/function_test.cpp +++ b/eval/src/tests/eval/function/function_test.cpp @@ -842,8 +842,8 @@ TEST("require that verbose tensor create handles spaces and reordering of variou } TEST("require that verbose tensor create detects invalid tensor type") { - TEST_DO(verify_error("tensor(x[,y}):ignored", - "[tensor(x[,y}):]...[invalid tensor type]...[ignored]")); + TEST_DO(verify_error("tensor(x[,y}):{{ignored}}", + "[tensor(x[,y})]...[invalid tensor type]...[:{{ignored}}]")); } TEST("require that verbose tensor create detects incomplete addresses") { @@ -868,6 +868,71 @@ TEST("require that verbose tensor create detects non-numeric indexes for indexed //----------------------------------------------------------------------------- +TEST("require that convenient tensor create can be parsed") { + auto dense = Function::parse("tensor(x[3]):[1,2,3]"); + auto sparse = Function::parse("tensor(x{}):{a:1,b:2,c:3}"); + auto mixed = Function::parse("tensor(x{},y[2]):{a:[1,2]}"); + EXPECT_EQUAL("tensor(x[3]):{{x:0}:1,{x:1}:2,{x:2}:3}", dense.dump()); + EXPECT_EQUAL("tensor(x{}):{{x:a}:1,{x:b}:2,{x:c}:3}", sparse.dump()); + EXPECT_EQUAL("tensor(x{},y[2]):{{x:a,y:0}:1,{x:a,y:1}:2}", mixed.dump()); +} + +TEST("require that convenient tensor create can contain expressions") { + auto fun = Function::parse("tensor(x[2]):[1,2+a]"); + EXPECT_EQUAL("tensor(x[2]):{{x:0}:1,{x:1}:(2+a)}", fun.dump()); + ASSERT_EQUAL(fun.num_params(), 1u); + EXPECT_EQUAL(fun.param_name(0), "a"); +} + +TEST("require that convenient tensor create handles dimension order") { + auto mixed = Function::parse("tensor(y{},x[2]):{a:[1,2]}"); + EXPECT_EQUAL("tensor(x[2],y{}):{{x:0,y:a}:1,{x:1,y:a}:2}", mixed.dump()); +} + +TEST("require that convenient tensor create can be highly nested") { + vespalib::string expect("tensor(a{},b{},c[1],d[1]):{{a:x,b:y,c:0,d:0}:5}"); + auto nested1 = Function::parse("tensor(a{},b{},c[1],d[1]):{x:{y:[[5]]}}"); + auto nested2 = Function::parse("tensor(c[1],d[1],a{},b{}):[[{x:{y:5}}]]"); + auto nested3 = Function::parse("tensor(a{},c[1],b{},d[1]): { x : [ { y : [ 5 ] } ] } "); + EXPECT_EQUAL(expect, nested1.dump()); + EXPECT_EQUAL(expect, nested2.dump()); + EXPECT_EQUAL(expect, nested3.dump()); +} + +TEST("require that convenient tensor create can have multiple values on multiple levels") { + vespalib::string expect("tensor(x{},y[2]):{{x:a,y:0}:1,{x:a,y:1}:2,{x:b,y:0}:3,{x:b,y:1}:4}"); + auto fun1 = Function::parse("tensor(x{},y[2]):{a:[1,2],b:[3,4]}"); + auto fun2 = Function::parse("tensor(y[2],x{}):[{a:1,b:3},{a:2,b:4}]"); + auto fun3 = Function::parse("tensor(x{},y[2]): { a : [ 1 , 2 ] , b : [ 3 , 4 ] } "); + auto fun4 = Function::parse("tensor(y[2],x{}): [ { a : 1 , b : 3 } , { a : 2 , b : 4 } ] "); + EXPECT_EQUAL(expect, fun1.dump()); + EXPECT_EQUAL(expect, fun2.dump()); + EXPECT_EQUAL(expect, fun3.dump()); + EXPECT_EQUAL(expect, fun4.dump()); +} + +TEST("require that convenient tensor create allows under-specified tensors") { + auto fun = Function::parse("tensor(x[2],y[2]):[[],[5]]"); + EXPECT_EQUAL("tensor(x[2],y[2]):{{x:1,y:0}:5}", fun.dump()); +} + +TEST("require that convenient tensor create detects invalid tensor type") { + TEST_DO(verify_error("tensor(x[,y}):ignored", + "[tensor(x[,y})]...[invalid tensor type]...[:ignored]")); +} + +TEST("require that convenient tensor create detects too large indexed dimensions") { + TEST_DO(verify_error("tensor(x[1]):[1,2]", + "[tensor(x[1]):[1,]...[dimension too large: 'x']...[2]]")); +} + +TEST("require that convenient tensor create detects under-specified cells") { + TEST_DO(verify_error("tensor(x[1],y[1]):[1]", + "[tensor(x[1],y[1]):[]...[expected '[', but got '1']...[1]]")); +} + +//----------------------------------------------------------------------------- + TEST("require that tensor concat can be parsed") { EXPECT_EQUAL("concat(a,b,d)", Function::parse({"a", "b"}, "concat(a,b,d)").dump()); EXPECT_EQUAL("concat(a,b,d)", Function::parse({"a", "b"}, " concat ( a , b , d ) ").dump()); diff --git a/eval/src/tests/eval/node_types/node_types_test.cpp b/eval/src/tests/eval/node_types/node_types_test.cpp index 5504bb33137..b2ad107f2aa 100644 --- a/eval/src/tests/eval/node_types/node_types_test.cpp +++ b/eval/src/tests/eval/node_types/node_types_test.cpp @@ -7,25 +7,10 @@ using namespace vespalib::eval; -/** - * Hack to avoid parse-conflict between tensor type expressions and - * lambda-generated tensors. This will patch leading identifier 'T' to - * 't' directly in the input stream after we have concluded that this - * is not a lambda-generated tensor in order to parse it out as a - * valid tensor type. This may be reverted later if we add support for - * parser rollback when we fail to parse a lambda-generated tensor. - **/ -void tensor_type_hack(const char *pos_in, const char *end_in) { - if ((pos_in < end_in) && (*pos_in == 'T')) { - const_cast<char *>(pos_in)[0] = 't'; - } -} - struct TypeSpecExtractor : public vespalib::eval::SymbolExtractor { void extract_symbol(const char *pos_in, const char *end_in, const char *&pos_out, vespalib::string &symbol_out) const override { - tensor_type_hack(pos_in, end_in); ValueType type = value_type::parse_spec(pos_in, end_in, pos_out); if (pos_out != nullptr) { symbol_out = type.to_spec(); @@ -33,21 +18,7 @@ struct TypeSpecExtractor : public vespalib::eval::SymbolExtractor { } }; -void verify(const vespalib::string &type_expr_in, const vespalib::string &type_spec, bool replace_first = true) { - vespalib::string type_expr = type_expr_in; - // replace 'tensor' with 'Tensor' in type expression, see hack above - size_t tensor_cnt = 0; - for (size_t idx = type_expr.find("tensor"); - idx != type_expr.npos; - idx = type_expr.find("tensor", idx + 1)) - { - // setting 'replace_first' to false will avoid replacing the - // first 'tensor' instance to let the parser handle it as an - // actual tensor generator. - if ((tensor_cnt++ > 0) || replace_first) { - type_expr[idx] = 'T'; - } - } +void verify(const vespalib::string &type_expr, const vespalib::string &type_spec) { Function function = Function::parse(type_expr, TypeSpecExtractor()); if (!EXPECT_TRUE(!function.has_error())) { fprintf(stderr, "parse error: %s\n", function.get_error().c_str()); @@ -225,22 +196,22 @@ TEST("require that join resolves correct type") { } TEST("require that lambda tensor resolves correct type") { - TEST_DO(verify("tensor(x[5])(1.0)", "tensor(x[5])", false)); - TEST_DO(verify("tensor(x[5],y[10])(1.0)", "tensor(x[5],y[10])", false)); - TEST_DO(verify("tensor(x[5],y[10],z[15])(1.0)", "tensor(x[5],y[10],z[15])", false)); - TEST_DO(verify("tensor<double>(x[5],y[10],z[15])(1.0)", "tensor(x[5],y[10],z[15])", false)); - TEST_DO(verify("tensor<float>(x[5],y[10],z[15])(1.0)", "tensor<float>(x[5],y[10],z[15])", false)); + TEST_DO(verify("tensor(x[5])(1.0)", "tensor(x[5])")); + TEST_DO(verify("tensor(x[5],y[10])(1.0)", "tensor(x[5],y[10])")); + TEST_DO(verify("tensor(x[5],y[10],z[15])(1.0)", "tensor(x[5],y[10],z[15])")); + TEST_DO(verify("tensor<double>(x[5],y[10],z[15])(1.0)", "tensor(x[5],y[10],z[15])")); + TEST_DO(verify("tensor<float>(x[5],y[10],z[15])(1.0)", "tensor<float>(x[5],y[10],z[15])")); } TEST("require that tensor create resolves correct type") { - TEST_DO(verify("tensor(x[3]):{{x:0}:double,{x:1}:double,{x:2}:double}", "tensor(x[3])", false)); - TEST_DO(verify("tensor(x{}):{{x:a}:double,{x:b}:double,{x:c}:double}", "tensor(x{})", false)); - TEST_DO(verify("tensor(x{},y[2]):{{x:a,y:0}:double,{x:a,y:1}:double}", "tensor(x{},y[2])", false)); - TEST_DO(verify("tensor<float>(x[3]):{{x:0}:double,{x:1}:double,{x:2}:double}", "tensor<float>(x[3])", false)); - TEST_DO(verify("tensor(x[3]):{{x:0}:double+double,{x:1}:double-double,{x:2}:double/double}", "tensor(x[3])", false)); - TEST_DO(verify("tensor(x[3]):{{x:0}:double,{x:1}:reduce(tensor(x[2]),sum),{x:2}:double}", "tensor(x[3])", false)); - TEST_DO(verify("tensor(x[3]):{{x:0}:double,{x:1}:tensor(x[2]),{x:2}:double}", "error", false)); - TEST_DO(verify("tensor(x[3]):{{x:0}:double,{x:1}:error,{x:2}:double}", "error", false)); + TEST_DO(verify("tensor(x[3]):{{x:0}:double,{x:1}:double,{x:2}:double}", "tensor(x[3])")); + TEST_DO(verify("tensor(x{}):{{x:a}:double,{x:b}:double,{x:c}:double}", "tensor(x{})")); + TEST_DO(verify("tensor(x{},y[2]):{{x:a,y:0}:double,{x:a,y:1}:double}", "tensor(x{},y[2])")); + TEST_DO(verify("tensor<float>(x[3]):{{x:0}:double,{x:1}:double,{x:2}:double}", "tensor<float>(x[3])")); + TEST_DO(verify("tensor(x[3]):{{x:0}:double+double,{x:1}:double-double,{x:2}:double/double}", "tensor(x[3])")); + TEST_DO(verify("tensor(x[3]):{{x:0}:double,{x:1}:reduce(tensor(x[2]),sum),{x:2}:double}", "tensor(x[3])")); + TEST_DO(verify("tensor(x[3]):{{x:0}:double,{x:1}:tensor(x[2]),{x:2}:double}", "error")); + TEST_DO(verify("tensor(x[3]):{{x:0}:double,{x:1}:error,{x:2}:double}", "error")); } TEST("require that tensor concat resolves correct type") { diff --git a/eval/src/tests/eval/value_type/value_type_test.cpp b/eval/src/tests/eval/value_type/value_type_test.cpp index 42b34c0d3ea..85ff7613775 100644 --- a/eval/src/tests/eval/value_type/value_type_test.cpp +++ b/eval/src/tests/eval/value_type/value_type_test.cpp @@ -162,6 +162,37 @@ TEST("require that value type spec can be parsed with extra whitespace") { EXPECT_EQUAL(ValueType::tensor_type({{"y", 10}}, CellType::FLOAT), ValueType::from_spec(" tensor < float > ( y [ 10 ] ) ")); } +TEST("require that the unsorted dimension list can be obtained when parsing type spec") { + std::vector<ValueType::Dimension> unsorted; + auto type = ValueType::from_spec("tensor(y[10],z[5],x{})", unsorted); + EXPECT_EQUAL(ValueType::tensor_type({{"x"}, {"y", 10}, {"z", 5}}), type); + ASSERT_EQUAL(unsorted.size(), 3u); + EXPECT_EQUAL(unsorted[0].name, "y"); + EXPECT_EQUAL(unsorted[0].size, 10u); + EXPECT_EQUAL(unsorted[1].name, "z"); + EXPECT_EQUAL(unsorted[1].size, 5u); + EXPECT_EQUAL(unsorted[2].name, "x"); + EXPECT_EQUAL(unsorted[2].size, npos); +} + +TEST("require that the unsorted dimension list can be obtained also when the type spec is invalid") { + std::vector<ValueType::Dimension> unsorted; + auto type = ValueType::from_spec("tensor(x[10],x[5])...", unsorted); + EXPECT_TRUE(type.is_error()); + ASSERT_EQUAL(unsorted.size(), 2u); + EXPECT_EQUAL(unsorted[0].name, "x"); + EXPECT_EQUAL(unsorted[0].size, 10u); + EXPECT_EQUAL(unsorted[1].name, "x"); + EXPECT_EQUAL(unsorted[1].size, 5u); +} + +TEST("require that the unsorted dimension list can not be obtained if the parse itself fails") { + std::vector<ValueType::Dimension> unsorted; + auto type = ValueType::from_spec("tensor(x[10],x[5]", unsorted); + EXPECT_TRUE(type.is_error()); + EXPECT_EQUAL(unsorted.size(), 0u); +} + TEST("require that malformed value type spec is parsed as error") { EXPECT_TRUE(ValueType::from_spec("").is_error()); EXPECT_TRUE(ValueType::from_spec(" ").is_error()); @@ -270,6 +301,20 @@ TEST("require that type-related predicate functions work as expected") { TEST_DO(verify_predicates(type("tensor<float>(x[5],y{})"), false, false, true, false, false)); } +TEST("require that dense subspace size calculation works as expected") { + EXPECT_EQUAL(type("error").dense_subspace_size(), 1u); + EXPECT_EQUAL(type("double").dense_subspace_size(), 1u); + EXPECT_EQUAL(type("tensor()").dense_subspace_size(), 1u); + EXPECT_EQUAL(type("tensor(x{})").dense_subspace_size(), 1u); + EXPECT_EQUAL(type("tensor(x{},y{})").dense_subspace_size(), 1u); + EXPECT_EQUAL(type("tensor(x[5])").dense_subspace_size(), 5u); + EXPECT_EQUAL(type("tensor(x[5],y[10])").dense_subspace_size(), 50u); + EXPECT_EQUAL(type("tensor(x[5],y{})").dense_subspace_size(), 5u); + EXPECT_EQUAL(type("tensor<float>(x{})").dense_subspace_size(), 1u); + EXPECT_EQUAL(type("tensor<float>(x[5])").dense_subspace_size(), 5u); + EXPECT_EQUAL(type("tensor<float>(x[5],y{})").dense_subspace_size(), 5u); +} + TEST("require that dimension predicates work as expected") { ValueType::Dimension x("x"); ValueType::Dimension y("y", 10); diff --git a/eval/src/vespa/eval/eval/function.cpp b/eval/src/vespa/eval/eval/function.cpp index 1cdd0478181..8b49278a8f0 100644 --- a/eval/src/vespa/eval/eval/function.cpp +++ b/eval/src/vespa/eval/eval/function.cpp @@ -613,11 +613,7 @@ TensorSpec::Address get_tensor_address(ParseContext &ctx, const ValueType &type) // pre: 'tensor<float>(a{},x[3]):' -> type // expect: '{{a:w,x:0}:1,{a:w,x:1}:2,{a:w,x:2}:3}' -void parse_tensor_create(ParseContext &ctx, const ValueType &type) { - if (type.is_error()) { - ctx.fail("invalid tensor type"); - return; - } +void parse_tensor_create_verbose(ParseContext &ctx, const ValueType &type) { ctx.skip_spaces(); ctx.eat('{'); nodes::TensorCreate::Spec create_spec; @@ -634,11 +630,78 @@ void parse_tensor_create(ParseContext &ctx, const ValueType &type) { ctx.push_expression(std::make_unique<nodes::TensorCreate>(type, std::move(create_spec))); } -void parse_tensor_lambda(ParseContext &ctx, const ValueType &type) { - if (!type.is_dense()) { - ctx.fail("invalid tensor type"); - return; +// pre: 'tensor<float>(a{},x[3]):' -> type +// expect: '{w:[0,1,2]}' +void parse_tensor_create_convenient(ParseContext &ctx, const ValueType &type, + const std::vector<ValueType::Dimension> &dim_list) +{ + nodes::TensorCreate::Spec create_spec; + using Label = TensorSpec::Label; + std::vector<Label> addr; + for (;;) { + if (addr.size() == dim_list.size()) { + TensorSpec::Address address; + for (size_t i = 0; i < addr.size(); ++i) { + if (addr[i].is_mapped()) { + address.emplace(dim_list[i].name, addr[i]); + } else { + address.emplace(dim_list[i].name, Label(addr[i].index-1)); + } + } + create_spec.emplace(std::move(address), get_expression(ctx)); + } else { + bool mapped = dim_list[addr.size()].is_mapped(); + addr.push_back(mapped ? Label("") : Label(size_t(0))); + ctx.skip_spaces(); + ctx.eat(mapped ? '{' : '['); + } + while (ctx.find_list_end()) { + bool mapped = addr.back().is_mapped(); + ctx.eat(mapped ? '}' : ']'); + addr.pop_back(); + if (addr.empty()) { + return ctx.push_expression(std::make_unique<nodes::TensorCreate>(type, std::move(create_spec))); + } + } + if (addr.back().is_mapped()) { + if (addr.back().name != "") { + ctx.eat(','); + } + addr.back().name = get_ident(ctx, false); + ctx.skip_spaces(); + ctx.eat(':'); + } else { + if (addr.back().index != 0) { + ctx.eat(','); + } + if (++addr.back().index > dim_list[addr.size()-1].size) { + return ctx.fail(make_string("dimension too large: '%s'", + dim_list[addr.size()-1].name.c_str())); + } + } + } +} + +void parse_tensor_create(ParseContext &ctx, const ValueType &type, + const std::vector<ValueType::Dimension> &dim_list) +{ + ctx.skip_spaces(); + ctx.eat(':'); + ParseContext::InputMark before_cells = ctx.get_input_mark(); + ctx.skip_spaces(); + ctx.eat('{'); + ctx.skip_spaces(); + ctx.eat('{'); + bool is_verbose = !ctx.failed(); + ctx.restore_input_mark(before_cells); + if (is_verbose) { + parse_tensor_create_verbose(ctx, type); + } else { + parse_tensor_create_convenient(ctx, type, dim_list); } +} + +void parse_tensor_lambda(ParseContext &ctx, const ValueType &type) { auto param_names = type.dimension_names(); ExplicitParams params(param_names); ctx.push_resolve_context(params, nullptr); @@ -650,7 +713,8 @@ void parse_tensor_lambda(ParseContext &ctx, const ValueType &type) { ctx.push_expression(std::make_unique<nodes::TensorLambda>(std::move(type), std::move(lambda))); } -void parse_tensor_generator(ParseContext &ctx) { +bool maybe_parse_tensor_generator(ParseContext &ctx) { + ParseContext::InputMark my_mark = ctx.get_input_mark(); vespalib::string type_spec("tensor"); while(!ctx.eos() && (ctx.get() != ')')) { type_spec.push_back(ctx.get()); @@ -658,14 +722,24 @@ void parse_tensor_generator(ParseContext &ctx) { } ctx.eat(')'); type_spec.push_back(')'); - ValueType type = ValueType::from_spec(type_spec); + std::vector<ValueType::Dimension> dim_list; + ValueType type = ValueType::from_spec(type_spec, dim_list); ctx.skip_spaces(); - if (ctx.get() == ':') { - ctx.eat(':'); - parse_tensor_create(ctx, type); - } else { + bool is_tensor_generate = ((ctx.get() == ':') || (ctx.get() == '(')); + if (!is_tensor_generate) { + ctx.restore_input_mark(my_mark); + return false; + } + bool is_create = (type.is_tensor() && (ctx.get() == ':')); + bool is_lambda = (type.is_dense() && (ctx.get() == '(')); + if (is_create) { + parse_tensor_create(ctx, type, dim_list); + } else if (is_lambda) { parse_tensor_lambda(ctx, type); + } else { + ctx.fail("invalid tensor type"); } + return true; } void parse_tensor_concat(ParseContext &ctx) { @@ -678,7 +752,7 @@ void parse_tensor_concat(ParseContext &ctx) { ctx.push_expression(std::make_unique<nodes::TensorConcat>(std::move(lhs), std::move(rhs), dimension)); } -bool try_parse_call(ParseContext &ctx, const vespalib::string &name) { +bool maybe_parse_call(ParseContext &ctx, const vespalib::string &name) { ctx.skip_spaces(); if (ctx.get() == '(') { ctx.eat('('); @@ -717,9 +791,8 @@ size_t parse_symbol(ParseContext &ctx, vespalib::string &name, ParseContext::Inp void parse_symbol_or_call(ParseContext &ctx) { ParseContext::InputMark before_name = ctx.get_input_mark(); vespalib::string name = get_ident(ctx, true); - if (name == "tensor") { - parse_tensor_generator(ctx); - } else if (!try_parse_call(ctx, name)) { + bool was_tensor_generate = ((name == "tensor") && maybe_parse_tensor_generator(ctx)); + if (!was_tensor_generate && !maybe_parse_call(ctx, name)) { size_t id = parse_symbol(ctx, name, before_name); if (name.empty()) { ctx.fail("missing value"); diff --git a/eval/src/vespa/eval/eval/value_type.cpp b/eval/src/vespa/eval/eval/value_type.cpp index 688112bbb8a..211a9c305a3 100644 --- a/eval/src/vespa/eval/eval/value_type.cpp +++ b/eval/src/vespa/eval/eval/value_type.cpp @@ -260,6 +260,12 @@ ValueType::from_spec(const vespalib::string &spec) return value_type::from_spec(spec); } +ValueType +ValueType::from_spec(const vespalib::string &spec, std::vector<ValueType::Dimension> &unsorted) +{ + return value_type::from_spec(spec, unsorted); +} + vespalib::string ValueType::to_spec() const { diff --git a/eval/src/vespa/eval/eval/value_type.h b/eval/src/vespa/eval/eval/value_type.h index df9c582aee8..b02053be3cb 100644 --- a/eval/src/vespa/eval/eval/value_type.h +++ b/eval/src/vespa/eval/eval/value_type.h @@ -77,6 +77,7 @@ public: static ValueType double_type() { return ValueType(Type::DOUBLE); } static ValueType tensor_type(std::vector<Dimension> dimensions_in, CellType cell_type = CellType::DOUBLE); static ValueType from_spec(const vespalib::string &spec); + static ValueType from_spec(const vespalib::string &spec, std::vector<ValueType::Dimension> &unsorted); vespalib::string to_spec() const; static ValueType join(const ValueType &lhs, const ValueType &rhs); static CellType unify_cell_types(const ValueType &a, const ValueType &b); diff --git a/eval/src/vespa/eval/eval/value_type_spec.cpp b/eval/src/vespa/eval/eval/value_type_spec.cpp index bbfa6f4fa28..0246800ca2a 100644 --- a/eval/src/vespa/eval/eval/value_type_spec.cpp +++ b/eval/src/vespa/eval/eval/value_type_spec.cpp @@ -176,7 +176,8 @@ CellType parse_cell_type(ParseContext &ctx) { } // namespace vespalib::eval::value_type::<anonymous> ValueType -parse_spec(const char *pos_in, const char *end_in, const char *&pos_out) +parse_spec(const char *pos_in, const char *end_in, const char *&pos_out, + std::vector<ValueType::Dimension> *unsorted) { ParseContext ctx(pos_in, end_in, pos_out); vespalib::string type_name = parse_ident(ctx); @@ -188,6 +189,9 @@ parse_spec(const char *pos_in, const char *end_in, const char *&pos_out) ValueType::CellType cell_type = parse_cell_type(ctx); std::vector<ValueType::Dimension> list = parse_dimension_list(ctx); if (!ctx.failed()) { + if (unsorted != nullptr) { + *unsorted = list; + } return ValueType::tensor_type(std::move(list), cell_type); } } else { @@ -208,6 +212,18 @@ from_spec(const vespalib::string &spec) return type; } +ValueType +from_spec(const vespalib::string &spec, std::vector<ValueType::Dimension> &unsorted) +{ + const char *after = nullptr; + const char *end = spec.data() + spec.size(); + ValueType type = parse_spec(spec.data(), end, after, &unsorted); + if (after != end) { + return ValueType::error_type(); + } + return type; +} + vespalib::string to_spec(const ValueType &type) { diff --git a/eval/src/vespa/eval/eval/value_type_spec.h b/eval/src/vespa/eval/eval/value_type_spec.h index f2609f59f32..ff5113c769a 100644 --- a/eval/src/vespa/eval/eval/value_type_spec.h +++ b/eval/src/vespa/eval/eval/value_type_spec.h @@ -6,9 +6,11 @@ namespace vespalib::eval::value_type { -ValueType parse_spec(const char *pos_in, const char *end_in, const char *&pos_out); +ValueType parse_spec(const char *pos_in, const char *end_in, const char *&pos_out, + std::vector<ValueType::Dimension> *unsorted = nullptr); -ValueType from_spec(const vespalib::string &str); +ValueType from_spec(const vespalib::string &spec); +ValueType from_spec(const vespalib::string &spec, std::vector<ValueType::Dimension> &unsorted); vespalib::string to_spec(const ValueType &type); } |