summaryrefslogtreecommitdiffstats
path: root/eval/src/tests
diff options
context:
space:
mode:
authorHåvard Pettersen <havardpe@oath.com>2021-03-03 14:38:29 +0000
committerHåvard Pettersen <havardpe@oath.com>2021-03-04 18:34:31 +0000
commitd48fc6fd919be1a21ca19165b6ddfa6171791725 (patch)
treea915ae22221dc955b98914496f43a80eb4ff62cf /eval/src/tests
parent311e77aad06f187c70864a80a0703082f72bb3d8 (diff)
all scalars must be double
and all operation results must be at least float
Diffstat (limited to 'eval/src/tests')
-rw-r--r--eval/src/tests/eval/fast_value/fast_value_test.cpp3
-rw-r--r--eval/src/tests/eval/gen_spec/gen_spec_test.cpp6
-rw-r--r--eval/src/tests/eval/interpreted_function/interpreted_function_test.cpp2
-rw-r--r--eval/src/tests/eval/node_types/node_types_test.cpp6
-rw-r--r--eval/src/tests/eval/reference_operations/reference_operations_test.cpp5
-rw-r--r--eval/src/tests/eval/simple_value/simple_value_test.cpp14
-rw-r--r--eval/src/tests/eval/tensor_function/tensor_function_test.cpp16
-rw-r--r--eval/src/tests/eval/value_codec/value_codec_test.cpp3
-rw-r--r--eval/src/tests/eval/value_type/value_type_test.cpp135
-rw-r--r--eval/src/tests/instruction/generic_cell_cast/generic_cell_cast_test.cpp7
-rw-r--r--eval/src/tests/instruction/generic_concat/generic_concat_test.cpp8
-rw-r--r--eval/src/tests/instruction/generic_create/generic_create_test.cpp2
-rw-r--r--eval/src/tests/instruction/generic_join/generic_join_test.cpp8
-rw-r--r--eval/src/tests/instruction/generic_map/generic_map_test.cpp8
-rw-r--r--eval/src/tests/instruction/generic_merge/generic_merge_test.cpp8
-rw-r--r--eval/src/tests/instruction/generic_peek/generic_peek_test.cpp6
-rw-r--r--eval/src/tests/instruction/generic_reduce/generic_reduce_test.cpp5
-rw-r--r--eval/src/tests/instruction/generic_rename/generic_rename_test.cpp6
-rw-r--r--eval/src/tests/streamed/value/streamed_value_test.cpp14
19 files changed, 147 insertions, 115 deletions
diff --git a/eval/src/tests/eval/fast_value/fast_value_test.cpp b/eval/src/tests/eval/fast_value/fast_value_test.cpp
index 70c534b2010..2acb6c448c9 100644
--- a/eval/src/tests/eval/fast_value/fast_value_test.cpp
+++ b/eval/src/tests/eval/fast_value/fast_value_test.cpp
@@ -160,7 +160,8 @@ TEST(FastValueBuilderFactoryTest, fast_values_can_be_copied) {
auto factory = FastValueBuilderFactory::get();
for (const auto &layout: layouts) {
for (CellType ct : CellTypeUtils::list_types()) {
- TensorSpec expect = layout.cpy().cells(ct);
+ auto expect = layout.cpy().cells(ct);
+ if (expect.bad_scalar()) continue;
std::unique_ptr<Value> value = value_from_spec(expect, factory);
std::unique_ptr<Value> copy = factory.copy(*value);
TensorSpec actual = spec_from_value(*copy);
diff --git a/eval/src/tests/eval/gen_spec/gen_spec_test.cpp b/eval/src/tests/eval/gen_spec/gen_spec_test.cpp
index ba169b72489..9d8eb419a67 100644
--- a/eval/src/tests/eval/gen_spec/gen_spec_test.cpp
+++ b/eval/src/tests/eval/gen_spec/gen_spec_test.cpp
@@ -61,9 +61,8 @@ TEST(GenSpecTest, scalar_double) {
EXPECT_EQ(GenSpec(5.0).gen(), scalar_5);
}
-TEST(GenSpecTest, not_scalar_float_just_yet) {
- EXPECT_EQ(GenSpec().cells_float().gen(), scalar_1);
- EXPECT_EQ(GenSpec(5.0).cells_float().gen(), scalar_5);
+TEST(GenSpecTest, scalar_float_is_bad_scalar) {
+ EXPECT_TRUE(GenSpec().cells_float().bad_scalar());
}
//-----------------------------------------------------------------------------
@@ -126,7 +125,6 @@ GenSpec dbl() { return GenSpec().cells_double(); }
TEST(GenSpecTest, value_type) {
EXPECT_EQ(dbl().type().to_spec(), "double");
- EXPECT_EQ(flt().type().to_spec(), "double"); // NB
EXPECT_EQ(dbl().idx("x", 10).type().to_spec(), "tensor(x[10])");
EXPECT_EQ(flt().idx("x", 10).type().to_spec(), "tensor<float>(x[10])");
EXPECT_EQ(dbl().map("y", {}).type().to_spec(), "tensor(y{})");
diff --git a/eval/src/tests/eval/interpreted_function/interpreted_function_test.cpp b/eval/src/tests/eval/interpreted_function/interpreted_function_test.cpp
index 871a564bfa4..ba73a578f6f 100644
--- a/eval/src/tests/eval/interpreted_function/interpreted_function_test.cpp
+++ b/eval/src/tests/eval/interpreted_function/interpreted_function_test.cpp
@@ -80,7 +80,7 @@ struct MyEvalTest : test::EvalSpec::EvalTest {
InterpretedFunction ifun(factory, function, node_types);
InterpretedFunction::Context ictx(ifun);
const Value &result_value = ifun.eval(ictx, params);
- report_result(result_value.is_double(), result_value.as_double(), expected_result, description);
+ report_result(result_value.type().is_double(), result_value.as_double(), expected_result, description);
}
};
diff --git a/eval/src/tests/eval/node_types/node_types_test.cpp b/eval/src/tests/eval/node_types/node_types_test.cpp
index 89c37af0a83..a5a17ea15a0 100644
--- a/eval/src/tests/eval/node_types/node_types_test.cpp
+++ b/eval/src/tests/eval/node_types/node_types_test.cpp
@@ -293,8 +293,8 @@ TEST("require that tensor concat resolves correct type") {
}
TEST("require that tensor cell_cast resolves correct type") {
- TEST_DO(verify("cell_cast(double,float)", "double")); // NB
- TEST_DO(verify("cell_cast(float,double)", "double"));
+ TEST_DO(verify("cell_cast(double,double)", "double"));
+ TEST_DO(verify("cell_cast(double,float)", "error"));
TEST_DO(verify("cell_cast(tensor<double>(x{},y[5]),float)", "tensor<float>(x{},y[5])"));
TEST_DO(verify("cell_cast(tensor<float>(x{},y[5]),double)", "tensor<double>(x{},y[5])"));
TEST_DO(verify("cell_cast(tensor<float>(x{},y[5]),float)", "tensor<float>(x{},y[5])"));
@@ -304,7 +304,7 @@ TEST("require that double only expressions can be detected") {
auto plain_fun = Function::parse("1+2");
auto complex_fun = Function::parse("reduce(a,sum)");
NodeTypes plain_types(*plain_fun, {});
- NodeTypes complex_types(*complex_fun, {ValueType::tensor_type({{"x"}})});
+ NodeTypes complex_types(*complex_fun, {ValueType::make_type(CellType::DOUBLE, {{"x"}})});
EXPECT_TRUE(plain_types.get_type(plain_fun->root()).is_double());
EXPECT_TRUE(complex_types.get_type(complex_fun->root()).is_double());
EXPECT_TRUE(plain_types.all_types_are_double());
diff --git a/eval/src/tests/eval/reference_operations/reference_operations_test.cpp b/eval/src/tests/eval/reference_operations/reference_operations_test.cpp
index fdbf375fa3a..2edbefc7717 100644
--- a/eval/src/tests/eval/reference_operations/reference_operations_test.cpp
+++ b/eval/src/tests/eval/reference_operations/reference_operations_test.cpp
@@ -144,8 +144,9 @@ TEST(ReferenceCellCastTest, cell_cast_works) {
for (CellType from_type: CellTypeUtils::list_types()) {
for (CellType to_type: CellTypeUtils::list_types()) {
for (const auto &gen: gen_list) {
- TensorSpec input = gen.cpy().cells(from_type);
- TensorSpec expect = gen.cpy().cells(to_type);
+ auto input = gen.cpy().cells(from_type);
+ auto expect = gen.cpy().cells(to_type);
+ if (input.bad_scalar() || expect.bad_scalar()) continue;
auto actual = ReferenceOperations::cell_cast(input, to_type);
EXPECT_EQ(actual, expect);
}
diff --git a/eval/src/tests/eval/simple_value/simple_value_test.cpp b/eval/src/tests/eval/simple_value/simple_value_test.cpp
index 974f87a6055..57c71903bf1 100644
--- a/eval/src/tests/eval/simple_value/simple_value_test.cpp
+++ b/eval/src/tests/eval/simple_value/simple_value_test.cpp
@@ -69,7 +69,8 @@ TensorSpec simple_value_join(const TensorSpec &a, const TensorSpec &b, join_fun_
TEST(SimpleValueTest, simple_values_can_be_converted_from_and_to_tensor_spec) {
for (const auto &layout: layouts) {
for (CellType ct : CellTypeUtils::list_types()) {
- TensorSpec expect = layout.cpy().cells(ct);
+ auto expect = layout.cpy().cells(ct);
+ if (expect.bad_scalar()) continue;
std::unique_ptr<Value> value = value_from_spec(expect, SimpleValueBuilderFactory::get());
TensorSpec actual = spec_from_value(*value);
EXPECT_EQ(actual, expect);
@@ -80,7 +81,8 @@ TEST(SimpleValueTest, simple_values_can_be_converted_from_and_to_tensor_spec) {
TEST(SimpleValueTest, simple_values_can_be_copied) {
for (const auto &layout: layouts) {
for (CellType ct : CellTypeUtils::list_types()) {
- TensorSpec expect = layout.cpy().cells(ct);
+ auto expect = layout.cpy().cells(ct);
+ if (expect.bad_scalar()) continue;
std::unique_ptr<Value> value = value_from_spec(expect, SimpleValueBuilderFactory::get());
std::unique_ptr<Value> copy = SimpleValueBuilderFactory::get().copy(*value);
TensorSpec actual = spec_from_value(*copy);
@@ -131,11 +133,13 @@ TEST(SimpleValueTest, new_generic_join_works_for_simple_values) {
const auto l = join_layouts[i].cpy().seq(N_16ths);
const auto r = join_layouts[i + 1].cpy().seq(N_16ths);
for (CellType lct : CellTypeUtils::list_types()) {
- TensorSpec lhs = l.cpy().cells(lct);
+ auto lhs = l.cpy().cells(lct);
+ if (lhs.bad_scalar()) continue;
for (CellType rct : CellTypeUtils::list_types()) {
- TensorSpec rhs = r.cpy().cells(rct);
+ auto rhs = r.cpy().cells(rct);
+ if (rhs.bad_scalar()) continue;
for (auto fun: {operation::Add::f, operation::Sub::f, operation::Mul::f, operation::Div::f}) {
- SCOPED_TRACE(fmt("\n===\nLHS: %s\nRHS: %s\n===\n", lhs.to_string().c_str(), rhs.to_string().c_str()));
+ SCOPED_TRACE(fmt("\n===\nLHS: %s\nRHS: %s\n===\n", lhs.gen().to_string().c_str(), rhs.gen().to_string().c_str()));
auto expect = ReferenceOperations::join(lhs, rhs, fun);
auto actual = simple_value_join(lhs, rhs, fun);
EXPECT_EQ(actual, expect);
diff --git a/eval/src/tests/eval/tensor_function/tensor_function_test.cpp b/eval/src/tests/eval/tensor_function/tensor_function_test.cpp
index f1bd900b350..c457f68a614 100644
--- a/eval/src/tests/eval/tensor_function/tensor_function_test.cpp
+++ b/eval/src/tests/eval/tensor_function/tensor_function_test.cpp
@@ -229,9 +229,9 @@ TEST("require that full tensor reduction works") {
size_t a_id = ctx.add_tensor(ctx.make_tensor_reduce_input());
const auto &fun = reduce(inject(ValueType::from_spec("tensor(x[3],y[2])"), a_id, ctx.stash), Aggr::SUM, {}, ctx.stash);
EXPECT_TRUE(fun.result_is_mutable());
- EXPECT_EQUAL(ValueType::from_spec("double"), fun.result_type());
+ EXPECT_EQUAL(ValueType::double_type(), fun.result_type());
const Value &result = ctx.eval(fun);
- EXPECT_TRUE(result.is_double());
+ EXPECT_TRUE(result.type().is_double());
EXPECT_EQUAL(21.0, result.as_double());
}
@@ -300,8 +300,8 @@ TEST("require that tensor create works") {
size_t b_id = ctx.add_tensor(ctx.make_double(2.0));
Value::UP my_const = ctx.make_double(3.0);
Value::UP expect = ctx.make_vector({1.0, 2.0, 3.0});
- const auto &a = inject(ValueType::from_spec("double"), a_id, ctx.stash);
- const auto &b = inject(ValueType::from_spec("double"), b_id, ctx.stash);
+ const auto &a = inject(ValueType::double_type(), a_id, ctx.stash);
+ const auto &b = inject(ValueType::double_type(), b_id, ctx.stash);
const auto &c = const_value(*my_const, ctx.stash);
const auto &fun = create(ValueType::from_spec("tensor(x[3])"),
{
@@ -321,8 +321,8 @@ TEST("require that single value tensor peek works") {
size_t b_id = ctx.add_tensor(ctx.make_double(1000.0));
Value::UP my_const = ctx.make_mixed_tensor(1.0, 2.0, 3.0, 4.0);
Value::UP expect = ctx.make_vector({2.0, 3.0, 0.0});
- const auto &a = inject(ValueType::from_spec("double"), a_id, ctx.stash);
- const auto &b = inject(ValueType::from_spec("double"), b_id, ctx.stash);
+ const auto &a = inject(ValueType::double_type(), a_id, ctx.stash);
+ const auto &b = inject(ValueType::double_type(), b_id, ctx.stash);
const auto &t = const_value(*my_const, ctx.stash);
const auto &peek1 = peek(t, {{"x", "foo"}, {"y", a}}, ctx.stash);
const auto &peek2 = peek(t, {{"x", "bar"}, {"y", size_t(0)}}, ctx.stash);
@@ -354,13 +354,13 @@ TEST("require that automatic string conversion tensor peek works") {
EvalCtx ctx(simple_factory);
size_t a_id = ctx.add_tensor(ctx.make_double(1.0));
Value::UP my_const = ctx.make_vector({1.0, 2.0, 3.0}, "x", true);
- const auto &a = inject(ValueType::from_spec("double"), a_id, ctx.stash);
+ const auto &a = inject(ValueType::double_type(), a_id, ctx.stash);
const auto &t = const_value(*my_const, ctx.stash);
const auto &fun = peek(t, {{"x", a}}, ctx.stash);
EXPECT_TRUE(fun.result_is_mutable());
EXPECT_TRUE(fun.result_type().is_double());
const Value &result = ctx.eval(fun);
- EXPECT_TRUE(result.is_double());
+ EXPECT_TRUE(result.type().is_double());
EXPECT_EQUAL(2.0, result.as_double());
}
diff --git a/eval/src/tests/eval/value_codec/value_codec_test.cpp b/eval/src/tests/eval/value_codec/value_codec_test.cpp
index 434ad0b2a53..0bb1bcfb337 100644
--- a/eval/src/tests/eval/value_codec/value_codec_test.cpp
+++ b/eval/src/tests/eval/value_codec/value_codec_test.cpp
@@ -33,7 +33,8 @@ const std::vector<GenSpec> layouts = {
TEST(ValueCodecTest, simple_values_can_be_converted_from_and_to_tensor_spec) {
for (const auto &layout: layouts) {
for (CellType ct : CellTypeUtils::list_types()) {
- TensorSpec expect = layout.cpy().cells(ct);
+ auto expect = layout.cpy().cells(ct);
+ if (expect.bad_scalar()) continue;
std::unique_ptr<Value> value = value_from_spec(expect, factory);
TensorSpec actual = spec_from_value(*value);
EXPECT_EQ(actual, expect);
diff --git a/eval/src/tests/eval/value_type/value_type_test.cpp b/eval/src/tests/eval/value_type/value_type_test.cpp
index c1b25d48bf7..e0c90166fa2 100644
--- a/eval/src/tests/eval/value_type/value_type_test.cpp
+++ b/eval/src/tests/eval/value_type/value_type_test.cpp
@@ -36,15 +36,8 @@ TEST("require that DOUBLE value type can be created") {
EXPECT_EQUAL(t.dimensions().size(), 0u);
}
-TEST("require that FLOAT value type can be created") {
- ValueType t = ValueType::make_type(CellType::FLOAT, {});
- EXPECT_FALSE(t.is_error());
- EXPECT_TRUE(t.cell_type() == CellType::FLOAT);
- EXPECT_EQUAL(t.dimensions().size(), 0u);
-}
-
TEST("require that TENSOR value type can be created") {
- ValueType t = ValueType::tensor_type({{"x", 10},{"y"}});
+ ValueType t = ValueType::make_type(CellType::DOUBLE, {{"x", 10},{"y"}});
EXPECT_FALSE(t.is_error());
EXPECT_TRUE(t.cell_type() == CellType::DOUBLE);
ASSERT_EQUAL(t.dimensions().size(), 2u);
@@ -55,7 +48,7 @@ TEST("require that TENSOR value type can be created") {
}
TEST("require that float TENSOR value type can be created") {
- ValueType t = ValueType::tensor_type({{"x", 10},{"y"}}, CellType::FLOAT);
+ ValueType t = ValueType::make_type(CellType::FLOAT, {{"x", 10},{"y"}});
EXPECT_FALSE(t.is_error());
EXPECT_TRUE(t.cell_type() == CellType::FLOAT);
ASSERT_EQUAL(t.dimensions().size(), 2u);
@@ -66,7 +59,7 @@ TEST("require that float TENSOR value type can be created") {
}
TEST("require that TENSOR value type sorts dimensions") {
- ValueType t = ValueType::tensor_type({{"x", 10}, {"z", 30}, {"y"}});
+ ValueType t = ValueType::make_type(CellType::DOUBLE, {{"x", 10}, {"z", 30}, {"y"}});
EXPECT_FALSE(t.is_error());
EXPECT_TRUE(t.cell_type() == CellType::DOUBLE);
ASSERT_EQUAL(t.dimensions().size(), 3u);
@@ -78,19 +71,16 @@ TEST("require that TENSOR value type sorts dimensions") {
EXPECT_EQUAL(t.dimensions()[2].size, 30u);
}
-TEST("require that 'tensor<float>()' is normalized to 'double'") {
- ValueType t = ValueType::tensor_type({}, CellType::FLOAT);
- EXPECT_FALSE(t.is_error());
- EXPECT_TRUE(t.cell_type() == CellType::DOUBLE);
- EXPECT_EQUAL(t.dimensions().size(), 0u);
+TEST("require that non-double scalar values are not allowed") {
+ EXPECT_TRUE(ValueType::make_type(CellType::FLOAT, {}).is_error());
}
TEST("require that use of zero-size dimensions result in error types") {
- EXPECT_TRUE(ValueType::tensor_type({{"x", 0}}).is_error());
+ EXPECT_TRUE(ValueType::make_type(CellType::DOUBLE, {{"x", 0}}).is_error());
}
TEST("require that duplicate dimension names result in error types") {
- EXPECT_TRUE(ValueType::tensor_type({{"x"}, {"x"}}).is_error());
+ EXPECT_TRUE(ValueType::make_type(CellType::DOUBLE, {{"x"}, {"x"}}).is_error());
}
//-----------------------------------------------------------------------------
@@ -116,18 +106,17 @@ void verify_not_equal(const ValueType &a, const ValueType &b) {
TEST("require that value types can be compared") {
TEST_DO(verify_equal(ValueType::error_type(), ValueType::error_type()));
TEST_DO(verify_not_equal(ValueType::error_type(), ValueType::double_type()));
- TEST_DO(verify_not_equal(ValueType::error_type(), ValueType::tensor_type({{"x"}})));
+ TEST_DO(verify_not_equal(ValueType::error_type(), ValueType::make_type(CellType::DOUBLE, {{"x"}})));
TEST_DO(verify_equal(ValueType::double_type(), ValueType::double_type()));
- TEST_DO(verify_not_equal(ValueType::double_type(), ValueType::make_type(CellType::FLOAT, {})));
- TEST_DO(verify_equal(ValueType::double_type(), ValueType::tensor_type({})));
- TEST_DO(verify_not_equal(ValueType::double_type(), ValueType::tensor_type({{"x"}})));
- TEST_DO(verify_equal(ValueType::tensor_type({{"x"}, {"y"}}), ValueType::tensor_type({{"y"}, {"x"}})));
- TEST_DO(verify_not_equal(ValueType::tensor_type({{"x"}, {"y"}}), ValueType::tensor_type({{"x"}, {"y"}, {"z"}})));
- TEST_DO(verify_equal(ValueType::tensor_type({{"x", 10}, {"y", 20}}), ValueType::tensor_type({{"y", 20}, {"x", 10}})));
- TEST_DO(verify_not_equal(ValueType::tensor_type({{"x", 10}, {"y", 20}}), ValueType::tensor_type({{"x", 10}, {"y", 10}})));
- TEST_DO(verify_not_equal(ValueType::tensor_type({{"x", 10}}), ValueType::tensor_type({{"x"}})));
- TEST_DO(verify_equal(ValueType::tensor_type({{"x", 10}}, CellType::FLOAT), ValueType::tensor_type({{"x", 10}}, CellType::FLOAT)));
- TEST_DO(verify_not_equal(ValueType::tensor_type({{"x", 10}}, CellType::DOUBLE), ValueType::tensor_type({{"x", 10}}, CellType::FLOAT)));
+ TEST_DO(verify_equal(ValueType::double_type(), ValueType::make_type(CellType::DOUBLE, {})));
+ TEST_DO(verify_not_equal(ValueType::double_type(), ValueType::make_type(CellType::DOUBLE, {{"x"}})));
+ TEST_DO(verify_equal(ValueType::make_type(CellType::DOUBLE, {{"x"}, {"y"}}), ValueType::make_type(CellType::DOUBLE, {{"y"}, {"x"}})));
+ TEST_DO(verify_not_equal(ValueType::make_type(CellType::DOUBLE, {{"x"}, {"y"}}), ValueType::make_type(CellType::DOUBLE, {{"x"}, {"y"}, {"z"}})));
+ TEST_DO(verify_equal(ValueType::make_type(CellType::DOUBLE, {{"x", 10}, {"y", 20}}), ValueType::make_type(CellType::DOUBLE, {{"y", 20}, {"x", 10}})));
+ TEST_DO(verify_not_equal(ValueType::make_type(CellType::DOUBLE, {{"x", 10}, {"y", 20}}), ValueType::make_type(CellType::DOUBLE, {{"x", 10}, {"y", 10}})));
+ TEST_DO(verify_not_equal(ValueType::make_type(CellType::DOUBLE, {{"x", 10}}), ValueType::make_type(CellType::DOUBLE, {{"x"}})));
+ TEST_DO(verify_equal(ValueType::make_type(CellType::FLOAT, {{"x", 10}}), ValueType::make_type(CellType::FLOAT, {{"x", 10}})));
+ TEST_DO(verify_not_equal(ValueType::make_type(CellType::DOUBLE, {{"x", 10}}), ValueType::make_type(CellType::FLOAT, {{"x", 10}})));
}
//-----------------------------------------------------------------------------
@@ -135,46 +124,45 @@ TEST("require that value types can be compared") {
TEST("require that value type can make spec") {
EXPECT_EQUAL("error", ValueType::error_type().to_spec());
EXPECT_EQUAL("double", ValueType::double_type().to_spec());
- EXPECT_EQUAL("float", ValueType::make_type(CellType::FLOAT, {}).to_spec());
- EXPECT_EQUAL("double", ValueType::tensor_type({}).to_spec());
- EXPECT_EQUAL("double", ValueType::tensor_type({}, CellType::FLOAT).to_spec());
- EXPECT_EQUAL("tensor(x{})", ValueType::tensor_type({{"x"}}).to_spec());
- EXPECT_EQUAL("tensor(y[10])", ValueType::tensor_type({{"y", 10}}).to_spec());
- EXPECT_EQUAL("tensor(x{},y[10],z[5])", ValueType::tensor_type({{"x"}, {"y", 10}, {"z", 5}}).to_spec());
- EXPECT_EQUAL("tensor<float>(x{})", ValueType::tensor_type({{"x"}}, CellType::FLOAT).to_spec());
- EXPECT_EQUAL("tensor<float>(y[10])", ValueType::tensor_type({{"y", 10}}, CellType::FLOAT).to_spec());
- EXPECT_EQUAL("tensor<float>(x{},y[10],z[5])", ValueType::tensor_type({{"x"}, {"y", 10}, {"z", 5}}, CellType::FLOAT).to_spec());
+ EXPECT_EQUAL("error", ValueType::make_type(CellType::FLOAT, {}).to_spec());
+ EXPECT_EQUAL("double", ValueType::make_type(CellType::DOUBLE, {}).to_spec());
+ EXPECT_EQUAL("tensor(x{})", ValueType::make_type(CellType::DOUBLE, {{"x"}}).to_spec());
+ EXPECT_EQUAL("tensor(y[10])", ValueType::make_type(CellType::DOUBLE, {{"y", 10}}).to_spec());
+ EXPECT_EQUAL("tensor(x{},y[10],z[5])", ValueType::make_type(CellType::DOUBLE, {{"x"}, {"y", 10}, {"z", 5}}).to_spec());
+ EXPECT_EQUAL("tensor<float>(x{})", ValueType::make_type(CellType::FLOAT, {{"x"}}).to_spec());
+ EXPECT_EQUAL("tensor<float>(y[10])", ValueType::make_type(CellType::FLOAT, {{"y", 10}}).to_spec());
+ EXPECT_EQUAL("tensor<float>(x{},y[10],z[5])", ValueType::make_type(CellType::FLOAT, {{"x"}, {"y", 10}, {"z", 5}}).to_spec());
}
//-----------------------------------------------------------------------------
TEST("require that value type spec can be parsed") {
- EXPECT_EQUAL(ValueType::double_type(), ValueType::from_spec("double"));
- EXPECT_EQUAL(ValueType::make_type(CellType::FLOAT, {}), ValueType::from_spec("float"));
- EXPECT_EQUAL(ValueType::tensor_type({}), ValueType::from_spec("tensor()"));
- EXPECT_EQUAL(ValueType::tensor_type({{"x"}}), ValueType::from_spec("tensor(x{})"));
- EXPECT_EQUAL(ValueType::tensor_type({{"y", 10}}), ValueType::from_spec("tensor(y[10])"));
- EXPECT_EQUAL(ValueType::tensor_type({{"x"}, {"y", 10}, {"z", 5}}), ValueType::from_spec("tensor(x{},y[10],z[5])"));
- EXPECT_EQUAL(ValueType::tensor_type({{"y", 10}}), ValueType::from_spec("tensor<double>(y[10])"));
- EXPECT_EQUAL(ValueType::tensor_type({{"y", 10}}, CellType::FLOAT), ValueType::from_spec("tensor<float>(y[10])"));
+ EXPECT_EQUAL(ValueType::double_type(), type("double"));
+ EXPECT_EQUAL(ValueType::make_type(CellType::DOUBLE, {}), type("tensor()"));
+ EXPECT_EQUAL(ValueType::make_type(CellType::DOUBLE, {}), type("tensor<double>()"));
+ EXPECT_EQUAL(ValueType::make_type(CellType::DOUBLE, {{"x"}}), type("tensor(x{})"));
+ EXPECT_EQUAL(ValueType::make_type(CellType::DOUBLE, {{"y", 10}}), type("tensor(y[10])"));
+ EXPECT_EQUAL(ValueType::make_type(CellType::DOUBLE, {{"x"}, {"y", 10}, {"z", 5}}), type("tensor(x{},y[10],z[5])"));
+ EXPECT_EQUAL(ValueType::make_type(CellType::DOUBLE, {{"y", 10}}), type("tensor<double>(y[10])"));
+ EXPECT_EQUAL(ValueType::make_type(CellType::FLOAT, {{"y", 10}}), type("tensor<float>(y[10])"));
}
TEST("require that value type spec can be parsed with extra whitespace") {
- EXPECT_EQUAL(ValueType::double_type(), ValueType::from_spec(" double "));
- EXPECT_EQUAL(ValueType::make_type(CellType::FLOAT, {}), ValueType::from_spec(" float "));
- EXPECT_EQUAL(ValueType::tensor_type({}), ValueType::from_spec(" tensor ( ) "));
- EXPECT_EQUAL(ValueType::tensor_type({{"x"}}), ValueType::from_spec(" tensor ( x { } ) "));
- EXPECT_EQUAL(ValueType::tensor_type({{"y", 10}}), ValueType::from_spec(" tensor ( y [ 10 ] ) "));
- EXPECT_EQUAL(ValueType::tensor_type({{"x"}, {"y", 10}, {"z", 5}}),
- ValueType::from_spec(" tensor ( x { } , y [ 10 ] , z [ 5 ] ) "));
- EXPECT_EQUAL(ValueType::tensor_type({{"y", 10}}), ValueType::from_spec(" tensor < double > ( y [ 10 ] ) "));
- EXPECT_EQUAL(ValueType::tensor_type({{"y", 10}}, CellType::FLOAT), ValueType::from_spec(" tensor < float > ( y [ 10 ] ) "));
+ EXPECT_EQUAL(ValueType::double_type(), type(" double "));
+ EXPECT_EQUAL(ValueType::make_type(CellType::DOUBLE, {}), type(" tensor ( ) "));
+ EXPECT_EQUAL(ValueType::make_type(CellType::DOUBLE, {}), type(" tensor < double > ( ) "));
+ EXPECT_EQUAL(ValueType::make_type(CellType::DOUBLE, {{"x"}}), type(" tensor ( x { } ) "));
+ EXPECT_EQUAL(ValueType::make_type(CellType::DOUBLE, {{"y", 10}}), type(" tensor ( y [ 10 ] ) "));
+ EXPECT_EQUAL(ValueType::make_type(CellType::DOUBLE, {{"x"}, {"y", 10}, {"z", 5}}),
+ type(" tensor ( x { } , y [ 10 ] , z [ 5 ] ) "));
+ EXPECT_EQUAL(ValueType::make_type(CellType::DOUBLE, {{"y", 10}}), type(" tensor < double > ( y [ 10 ] ) "));
+ EXPECT_EQUAL(ValueType::make_type(CellType::FLOAT, {{"y", 10}}), type(" tensor < float > ( y [ 10 ] ) "));
}
TEST("require that the unsorted dimension list can be obtained when parsing type spec") {
std::vector<ValueType::Dimension> unsorted;
auto type = ValueType::from_spec("tensor(y[10],z[5],x{})", unsorted);
- EXPECT_EQUAL(ValueType::tensor_type({{"x"}, {"y", 10}, {"z", 5}}), type);
+ EXPECT_EQUAL(ValueType::make_type(CellType::DOUBLE, {{"x"}, {"y", 10}, {"z", 5}}), type);
ASSERT_EQUAL(unsorted.size(), 3u);
EXPECT_EQUAL(unsorted[0].name, "y");
EXPECT_EQUAL(unsorted[0].size, 10u);
@@ -207,6 +195,7 @@ TEST("require that malformed value type spec is parsed as error") {
EXPECT_TRUE(ValueType::from_spec(" ").is_error());
EXPECT_TRUE(ValueType::from_spec("error").is_error());
EXPECT_TRUE(ValueType::from_spec("any").is_error());
+ EXPECT_TRUE(ValueType::from_spec("float").is_error());
EXPECT_TRUE(ValueType::from_spec("tensor").is_error());
EXPECT_TRUE(ValueType::from_spec("tensor<double>").is_error());
EXPECT_TRUE(ValueType::from_spec("tensor() tensor()").is_error());
@@ -224,7 +213,8 @@ TEST("require that malformed value type spec is parsed as error") {
EXPECT_TRUE(ValueType::from_spec("tensor(x{},x[10])").is_error());
EXPECT_TRUE(ValueType::from_spec("tensor(x{},x[])").is_error());
EXPECT_TRUE(ValueType::from_spec("tensor(z[])").is_error());
- EXPECT_TRUE(ValueType::from_spec("tensor<float16>(x[10])").is_error());
+ EXPECT_TRUE(ValueType::from_spec("tensor<float>()").is_error());
+ EXPECT_TRUE(ValueType::from_spec("tensor<int7>(x[10])").is_error());
}
struct ParseResult {
@@ -247,7 +237,7 @@ ParseResult::~ParseResult() = default;
TEST("require that we can parse a partial string into a type with the low-level API") {
ParseResult result("tensor(a[5]) , ");
- EXPECT_EQUAL(result.type, ValueType::tensor_type({{"a", 5}}));
+ EXPECT_EQUAL(result.type, ValueType::make_type(CellType::DOUBLE, {{"a", 5}}));
ASSERT_TRUE(result.after_inside());
EXPECT_EQUAL(*result.after, ',');
}
@@ -315,7 +305,7 @@ void verify_predicates(const ValueType &type,
{
EXPECT_EQUAL(type.is_error(), expect_error);
EXPECT_EQUAL(type.is_double(), expect_double);
- EXPECT_EQUAL(type.is_tensor(), expect_tensor);
+ EXPECT_EQUAL(type.has_dimensions(), expect_tensor);
EXPECT_EQUAL(type.is_sparse(), expect_sparse);
EXPECT_EQUAL(type.is_dense(), expect_dense);
}
@@ -507,8 +497,12 @@ void verify_cell_cast(const ValueType &type) {
if (type.is_error()) {
EXPECT_TRUE(res_type.is_error());
EXPECT_EQUAL(res_type, type);
- } else if (type.is_scalar()) {
- EXPECT_TRUE(res_type.is_double()); // NB
+ } else if (type.is_double()) {
+ if (cell_type == CellType::DOUBLE) {
+ EXPECT_TRUE(res_type.is_double());
+ } else {
+ EXPECT_TRUE(res_type.is_error());
+ }
} else {
EXPECT_FALSE(res_type.is_error());
EXPECT_EQUAL(int(res_type.cell_type()), int(cell_type));
@@ -519,7 +513,6 @@ void verify_cell_cast(const ValueType &type) {
TEST("require that value type cell cast works correctly") {
TEST_DO(verify_cell_cast(type("error")));
- TEST_DO(verify_cell_cast(type("float")));
TEST_DO(verify_cell_cast(type("double")));
TEST_DO(verify_cell_cast(type("tensor<float>(x[10])")));
TEST_DO(verify_cell_cast(type("tensor<double>(x[10])")));
@@ -548,4 +541,24 @@ TEST("require that cell type name recognition is strict") {
EXPECT_FALSE(value_type::cell_type_from_name("").has_value());
}
+TEST("require that map type inference works as expected") {
+ EXPECT_EQUAL(type("error").map(), type("error"));
+ EXPECT_EQUAL(type("double").map(), type("double"));
+ EXPECT_EQUAL(type("tensor(x[10])").map(), type("tensor(x[10])"));
+ EXPECT_EQUAL(type("tensor<float>(x{})").map(), type("tensor<float>(x{})"));
+}
+
+TEST("require that peek type inference works as expected") {
+ auto input1 = type("tensor(a[2],b{},c[3],d{},e[5])");
+ auto input2 = type("tensor<float>(a[2],b{},c[3],d{},e[5])");
+ EXPECT_EQUAL(type("error").peek({}), type("error"));
+ EXPECT_EQUAL(type("double").peek({}), type("error"));
+ EXPECT_EQUAL(input1.peek({}), type("error"));
+ EXPECT_EQUAL(input1.peek({"x"}), type("error"));
+ EXPECT_EQUAL(input1.peek({"a", "c", "e"}), type("tensor(b{},d{})"));
+ EXPECT_EQUAL(input2.peek({"b", "d"}), type("tensor<float>(a[2],c[3],e[5])"));
+ EXPECT_EQUAL(input1.peek({"a", "b", "c", "d", "e"}), type("double"));
+ EXPECT_EQUAL(input2.peek({"a", "b", "c", "d", "e"}), type("double"));
+}
+
TEST_MAIN() { TEST_RUN_ALL(); }
diff --git a/eval/src/tests/instruction/generic_cell_cast/generic_cell_cast_test.cpp b/eval/src/tests/instruction/generic_cell_cast/generic_cell_cast_test.cpp
index eb156bbe531..95db66a3b13 100644
--- a/eval/src/tests/instruction/generic_cell_cast/generic_cell_cast_test.cpp
+++ b/eval/src/tests/instruction/generic_cell_cast/generic_cell_cast_test.cpp
@@ -47,10 +47,13 @@ void test_generic_cell_cast_with(const ValueBuilderFactory &factory) {
for (const auto &layout : layouts) {
for (CellType in_type: CellTypeUtils::list_types()) {
for (CellType out_type: CellTypeUtils::list_types()) {
- TensorSpec lhs = layout.cpy().cells(in_type);
- SCOPED_TRACE(fmt("\n===\nLHS: %s\n===\n", lhs.to_string().c_str()));
+ auto lhs = layout.cpy().cells(in_type);
+ auto gen_expect = layout.cpy().cells(out_type);
+ if (lhs.bad_scalar() || gen_expect.bad_scalar()) continue;
+ SCOPED_TRACE(fmt("\n===\nLHS: %s\n===\n", lhs.gen().to_string().c_str()));
auto expect = ReferenceOperations::cell_cast(lhs, out_type);
auto actual = perform_generic_cell_cast(lhs, out_type, factory);
+ EXPECT_EQ(expect, gen_expect);
EXPECT_EQ(actual, expect);
}
}
diff --git a/eval/src/tests/instruction/generic_concat/generic_concat_test.cpp b/eval/src/tests/instruction/generic_concat/generic_concat_test.cpp
index 6b6a803a4b1..a74b0f99841 100644
--- a/eval/src/tests/instruction/generic_concat/generic_concat_test.cpp
+++ b/eval/src/tests/instruction/generic_concat/generic_concat_test.cpp
@@ -80,10 +80,12 @@ void test_generic_concat_with(const ValueBuilderFactory &factory) {
const auto l = concat_layouts[i];
const auto r = concat_layouts[i+1].cpy().seq(N_16ths);
for (CellType lct : CellTypeUtils::list_types()) {
- TensorSpec lhs = l.cpy().cells(lct);
+ auto lhs = l.cpy().cells(lct);
+ if (lhs.bad_scalar()) continue;
for (CellType rct : CellTypeUtils::list_types()) {
- TensorSpec rhs = r.cpy().cells(rct);
- SCOPED_TRACE(fmt("\n===\nin LHS: %s\nin RHS: %s\n===\n", lhs.to_string().c_str(), rhs.to_string().c_str()));
+ auto rhs = r.cpy().cells(rct);
+ if (rhs.bad_scalar()) continue;
+ SCOPED_TRACE(fmt("\n===\nin LHS: %s\nin RHS: %s\n===\n", lhs.gen().to_string().c_str(), rhs.gen().to_string().c_str()));
auto actual = perform_generic_concat(lhs, rhs, "y", factory);
auto expect = ReferenceOperations::concat(lhs, rhs, "y");
EXPECT_EQ(actual, expect);
diff --git a/eval/src/tests/instruction/generic_create/generic_create_test.cpp b/eval/src/tests/instruction/generic_create/generic_create_test.cpp
index 843a292612d..9389c8401e9 100644
--- a/eval/src/tests/instruction/generic_create/generic_create_test.cpp
+++ b/eval/src/tests/instruction/generic_create/generic_create_test.cpp
@@ -92,7 +92,7 @@ TensorSpec perform_generic_create(const TensorSpec &a, const ValueBuilderFactory
void test_generic_create_with(const ValueBuilderFactory &factory) {
for (const auto &layout : create_layouts) {
for (CellType ct : CellTypeUtils::list_types()) {
- TensorSpec full = layout.cpy().cells(ct);
+ auto full = layout.cpy().cells(ct);
auto actual = perform_generic_create(full, factory);
auto expect = reference_create(full);
EXPECT_EQ(actual, expect);
diff --git a/eval/src/tests/instruction/generic_join/generic_join_test.cpp b/eval/src/tests/instruction/generic_join/generic_join_test.cpp
index 114881e6bee..a4f645c5dee 100644
--- a/eval/src/tests/instruction/generic_join/generic_join_test.cpp
+++ b/eval/src/tests/instruction/generic_join/generic_join_test.cpp
@@ -109,11 +109,13 @@ TEST(GenericJoinTest, generic_join_works_for_simple_and_fast_values) {
const auto &l = join_layouts[i];
const auto &r = join_layouts[i+1];
for (CellType lct : CellTypeUtils::list_types()) {
- TensorSpec lhs = l.cpy().cells(lct);
+ auto lhs = l.cpy().cells(lct);
+ if (lhs.bad_scalar()) continue;
for (CellType rct : CellTypeUtils::list_types()) {
- TensorSpec rhs = r.cpy().cells(rct);
+ auto rhs = r.cpy().cells(rct);
+ if (rhs.bad_scalar()) continue;
for (auto fun: {operation::Add::f, operation::Sub::f, operation::Mul::f, operation::Div::f}) {
- SCOPED_TRACE(fmt("\n===\nLHS: %s\nRHS: %s\n===\n", lhs.to_string().c_str(), rhs.to_string().c_str()));
+ SCOPED_TRACE(fmt("\n===\nLHS: %s\nRHS: %s\n===\n", lhs.gen().to_string().c_str(), rhs.gen().to_string().c_str()));
auto expect = ReferenceOperations::join(lhs, rhs, fun);
auto simple = perform_generic_join(lhs, rhs, fun, SimpleValueBuilderFactory::get());
auto fast = perform_generic_join(lhs, rhs, fun, FastValueBuilderFactory::get());
diff --git a/eval/src/tests/instruction/generic_map/generic_map_test.cpp b/eval/src/tests/instruction/generic_map/generic_map_test.cpp
index 56405eefdde..bfa7154968d 100644
--- a/eval/src/tests/instruction/generic_map/generic_map_test.cpp
+++ b/eval/src/tests/instruction/generic_map/generic_map_test.cpp
@@ -36,8 +36,7 @@ const std::vector<GenSpec> map_layouts = {
TensorSpec perform_generic_map(const TensorSpec &a, map_fun_t func, const ValueBuilderFactory &factory)
{
auto lhs = value_from_spec(a, factory);
- // XXX for now:
- auto res_type = lhs->type();
+ auto res_type = lhs->type().map();
auto my_op = GenericMap::make_instruction(res_type, lhs->type(), func);
InterpretedFunction::EvalSingle single(factory, my_op);
return spec_from_value(single.eval(std::vector<Value::CREF>({*lhs})));
@@ -46,9 +45,10 @@ TensorSpec perform_generic_map(const TensorSpec &a, map_fun_t func, const ValueB
void test_generic_map_with(const ValueBuilderFactory &factory) {
for (const auto &layout : map_layouts) {
for (CellType ct : CellTypeUtils::list_types()) {
- TensorSpec lhs = layout.cpy().cells(ct);
+ auto lhs = layout.cpy().cells(ct);
+ if (lhs.bad_scalar()) continue;
for (auto func : {operation::Floor::f, operation::Fabs::f, operation::Square::f, operation::Inv::f}) {
- SCOPED_TRACE(fmt("\n===\nLHS: %s\n===\n", lhs.to_string().c_str()));
+ SCOPED_TRACE(fmt("\n===\nLHS: %s\n===\n", lhs.gen().to_string().c_str()));
auto expect = ReferenceOperations::map(lhs, func);
auto actual = perform_generic_map(lhs, func, factory);
EXPECT_EQ(actual, expect);
diff --git a/eval/src/tests/instruction/generic_merge/generic_merge_test.cpp b/eval/src/tests/instruction/generic_merge/generic_merge_test.cpp
index 9fde59a7c86..701fb26d3ff 100644
--- a/eval/src/tests/instruction/generic_merge/generic_merge_test.cpp
+++ b/eval/src/tests/instruction/generic_merge/generic_merge_test.cpp
@@ -52,10 +52,12 @@ void test_generic_merge_with(const ValueBuilderFactory &factory) {
const auto l = merge_layouts[i];
const auto r = merge_layouts[i+1].cpy().seq(N_16ths);
for (CellType lct : CellTypeUtils::list_types()) {
- TensorSpec lhs = l.cpy().cells(lct);
+ auto lhs = l.cpy().cells(lct);
+ if (lhs.bad_scalar()) continue;
for (CellType rct : CellTypeUtils::list_types()) {
- TensorSpec rhs = r.cpy().cells(rct);
- SCOPED_TRACE(fmt("\n===\nLHS: %s\nRHS: %s\n===\n", lhs.to_string().c_str(), rhs.to_string().c_str()));
+ auto rhs = r.cpy().cells(rct);
+ if (rhs.bad_scalar()) continue;
+ SCOPED_TRACE(fmt("\n===\nLHS: %s\nRHS: %s\n===\n", lhs.gen().to_string().c_str(), rhs.gen().to_string().c_str()));
for (auto fun: {operation::Add::f, operation::Mul::f, operation::Sub::f, operation::Max::f}) {
auto expect = ReferenceOperations::merge(lhs, rhs, fun);
auto actual = perform_generic_merge(lhs, rhs, fun, factory);
diff --git a/eval/src/tests/instruction/generic_peek/generic_peek_test.cpp b/eval/src/tests/instruction/generic_peek/generic_peek_test.cpp
index 073df8be7e9..4b773b07734 100644
--- a/eval/src/tests/instruction/generic_peek/generic_peek_test.cpp
+++ b/eval/src/tests/instruction/generic_peek/generic_peek_test.cpp
@@ -150,7 +150,7 @@ void verify_peek_equal(const TensorSpec &input,
reduce_dims.push_back(dim_name);
}
if (reduce_dims.empty()) return;
- ValueType result_type = param_type.reduce(reduce_dims);
+ ValueType result_type = param_type.peek(reduce_dims);
auto expect = reference_peek(input, spec);
SCOPED_TRACE(fmt("peek input: %s\n peek spec: %s\n peek result %s\n",
input.to_string().c_str(),
@@ -195,8 +195,8 @@ void fill_dims_and_check(const TensorSpec &input,
void test_generic_peek_with(const ValueBuilderFactory &factory) {
for (const auto &layout : peek_layouts) {
for (CellType ct : CellTypeUtils::list_types()) {
- TensorSpec input = layout.cpy().cells(ct);
- ValueType input_type = ValueType::from_spec(input.type());
+ auto input = layout.cpy().cells(ct);
+ ValueType input_type = input.type();
const auto &dims = input_type.dimensions();
PeekSpec spec;
fill_dims_and_check(input, spec, dims, factory);
diff --git a/eval/src/tests/instruction/generic_reduce/generic_reduce_test.cpp b/eval/src/tests/instruction/generic_reduce/generic_reduce_test.cpp
index 3babe80766a..e3eea84fdea 100644
--- a/eval/src/tests/instruction/generic_reduce/generic_reduce_test.cpp
+++ b/eval/src/tests/instruction/generic_reduce/generic_reduce_test.cpp
@@ -72,8 +72,9 @@ TEST(GenericReduceTest, sparse_reduce_plan_can_be_created) {
void test_generic_reduce_with(const ValueBuilderFactory &factory) {
for (const auto &layout: layouts) {
for (CellType ct : CellTypeUtils::list_types()) {
- TensorSpec input = layout.cpy().cells(ct);
- SCOPED_TRACE(fmt("tensor type: %s, num_cells: %zu", input.type().c_str(), input.cells().size()));
+ auto input = layout.cpy().cells(ct);
+ if (input.bad_scalar()) continue;
+ SCOPED_TRACE(fmt("tensor type: %s, num_cells: %zu", input.gen().type().c_str(), input.gen().cells().size()));
for (Aggr aggr: {Aggr::SUM, Aggr::AVG, Aggr::MIN, Aggr::MAX}) {
SCOPED_TRACE(fmt("aggregator: %s", AggrNames::name_of(aggr)->c_str()));
auto t = layout.type();
diff --git a/eval/src/tests/instruction/generic_rename/generic_rename_test.cpp b/eval/src/tests/instruction/generic_rename/generic_rename_test.cpp
index 4edf2a0ca87..ca14149f1ff 100644
--- a/eval/src/tests/instruction/generic_rename/generic_rename_test.cpp
+++ b/eval/src/tests/instruction/generic_rename/generic_rename_test.cpp
@@ -112,13 +112,13 @@ TensorSpec perform_generic_rename(const TensorSpec &a,
void test_generic_rename_with(const ValueBuilderFactory &factory) {
for (const auto &layout : rename_layouts) {
for (CellType ct : CellTypeUtils::list_types()) {
- TensorSpec lhs = layout.cpy().cells(ct);
- ValueType lhs_type = ValueType::from_spec(lhs.type());
+ auto lhs = layout.cpy().cells(ct);
+ ValueType lhs_type = lhs.type();
for (const auto & from_to : rename_from_to) {
ValueType renamed_type = lhs_type.rename(from_to.from, from_to.to);
if (renamed_type.is_error()) continue;
// printf("type %s -> %s\n", lhs_type.to_spec().c_str(), renamed_type.to_spec().c_str());
- SCOPED_TRACE(fmt("\n===\nLHS: %s\n===\n", lhs.to_string().c_str()));
+ SCOPED_TRACE(fmt("\n===\nLHS: %s\n===\n", lhs.gen().to_string().c_str()));
auto expect = ReferenceOperations::rename(lhs, from_to.from, from_to.to);
auto actual = perform_generic_rename(lhs, from_to, factory);
EXPECT_EQ(actual, expect);
diff --git a/eval/src/tests/streamed/value/streamed_value_test.cpp b/eval/src/tests/streamed/value/streamed_value_test.cpp
index a750ee88667..bb286dbfdc8 100644
--- a/eval/src/tests/streamed/value/streamed_value_test.cpp
+++ b/eval/src/tests/streamed/value/streamed_value_test.cpp
@@ -69,7 +69,8 @@ TensorSpec streamed_value_join(const TensorSpec &a, const TensorSpec &b, join_fu
TEST(StreamedValueTest, streamed_values_can_be_converted_from_and_to_tensor_spec) {
for (const auto &layout: layouts) {
for (CellType ct : CellTypeUtils::list_types()) {
- TensorSpec expect = layout.cpy().cells(ct);
+ auto expect = layout.cpy().cells(ct);
+ if (expect.bad_scalar()) continue;
std::unique_ptr<Value> value = value_from_spec(expect, StreamedValueBuilderFactory::get());
TensorSpec actual = spec_from_value(*value);
EXPECT_EQ(actual, expect);
@@ -80,7 +81,8 @@ TEST(StreamedValueTest, streamed_values_can_be_converted_from_and_to_tensor_spec
TEST(StreamedValueTest, streamed_values_can_be_copied) {
for (const auto &layout: layouts) {
for (CellType ct : CellTypeUtils::list_types()) {
- TensorSpec expect = layout.cpy().cells(ct);
+ auto expect = layout.cpy().cells(ct);
+ if (expect.bad_scalar()) continue;
std::unique_ptr<Value> value = value_from_spec(expect, StreamedValueBuilderFactory::get());
std::unique_ptr<Value> copy = StreamedValueBuilderFactory::get().copy(*value);
TensorSpec actual = spec_from_value(*copy);
@@ -131,11 +133,13 @@ TEST(StreamedValueTest, new_generic_join_works_for_streamed_values) {
const auto l = join_layouts[i].cpy().seq(N_16ths);
const auto r = join_layouts[i + 1].cpy().seq(N_16ths);
for (CellType lct : CellTypeUtils::list_types()) {
- TensorSpec lhs = l.cpy().cells(lct);
+ auto lhs = l.cpy().cells(lct);
+ if (lhs.bad_scalar()) continue;
for (CellType rct : CellTypeUtils::list_types()) {
- TensorSpec rhs = r.cpy().cells(rct);
+ auto rhs = r.cpy().cells(rct);
+ if (rhs.bad_scalar()) continue;
for (auto fun: {operation::Add::f, operation::Sub::f, operation::Mul::f, operation::Max::f}) {
- SCOPED_TRACE(fmt("\n===\nLHS: %s\nRHS: %s\n===\n", lhs.to_string().c_str(), rhs.to_string().c_str()));
+ SCOPED_TRACE(fmt("\n===\nLHS: %s\nRHS: %s\n===\n", lhs.gen().to_string().c_str(), rhs.gen().to_string().c_str()));
auto expect = ReferenceOperations::join(lhs, rhs, fun);
auto actual = streamed_value_join(lhs, rhs, fun);
EXPECT_EQ(actual, expect);