aboutsummaryrefslogtreecommitdiffstats
path: root/eval
diff options
context:
space:
mode:
authorHåvard Pettersen <havardpe@oath.com>2021-04-07 13:25:10 +0000
committerHåvard Pettersen <havardpe@oath.com>2021-04-09 08:53:27 +0000
commit030586a5052baac537bc152b9ede47ad0fd44009 (patch)
tree1d7d8f440b4ebad9b3e5af2641b6dee037b76ea1 /eval
parent248b9d5278345c4932b6cc9f598e3943be44b850 (diff)
add more type resolving tests with new cell types
Diffstat (limited to 'eval')
-rw-r--r--eval/src/tests/eval/node_types/node_types_test.cpp51
-rw-r--r--eval/src/tests/eval/value_type/value_type_test.cpp190
2 files changed, 221 insertions, 20 deletions
diff --git a/eval/src/tests/eval/node_types/node_types_test.cpp b/eval/src/tests/eval/node_types/node_types_test.cpp
index a5a17ea15a0..504f66ac717 100644
--- a/eval/src/tests/eval/node_types/node_types_test.cpp
+++ b/eval/src/tests/eval/node_types/node_types_test.cpp
@@ -63,6 +63,8 @@ TEST("require that input parameters preserve their type") {
TEST_DO(verify("tensor()", "double"));
TEST_DO(verify("tensor(x{},y[10],z[5])", "tensor(x{},y[10],z[5])"));
TEST_DO(verify("tensor<float>(x{},y[10],z[5])", "tensor<float>(x{},y[10],z[5])"));
+ TEST_DO(verify("tensor<bfloat16>(x{},y[10],z[5])", "tensor<bfloat16>(x{},y[10],z[5])"));
+ TEST_DO(verify("tensor<int8>(x{},y[10],z[5])", "tensor<int8>(x{},y[10],z[5])"));
}
TEST("require that if resolves to the appropriate type") {
@@ -74,7 +76,12 @@ TEST("require that if resolves to the appropriate type") {
TEST_DO(verify("if(double,tensor(a{}),tensor(a{}))", "tensor(a{})"));
TEST_DO(verify("if(double,tensor(a[2]),tensor(a[2]))", "tensor(a[2])"));
TEST_DO(verify("if(double,tensor<float>(a[2]),tensor<float>(a[2]))", "tensor<float>(a[2])"));
+ TEST_DO(verify("if(double,tensor<bfloat16>(a[2]),tensor<bfloat16>(a[2]))", "tensor<bfloat16>(a[2])"));
+ TEST_DO(verify("if(double,tensor<int8>(a[2]),tensor<int8>(a[2]))", "tensor<int8>(a[2])"));
TEST_DO(verify("if(double,tensor(a[2]),tensor<float>(a[2]))", "error"));
+ TEST_DO(verify("if(double,tensor<float>(a[2]),tensor<bfloat16>(a[2]))", "error"));
+ TEST_DO(verify("if(double,tensor<float>(a[2]),tensor<int8>(a[2]))", "error"));
+ TEST_DO(verify("if(double,tensor<bfloat16>(a[2]),tensor<int8>(a[2]))", "error"));
TEST_DO(verify("if(double,tensor(a[2]),tensor(a[3]))", "error"));
TEST_DO(verify("if(double,tensor(a[2]),tensor(a{}))", "error"));
TEST_DO(verify("if(double,tensor(a{}),tensor(b{}))", "error"));
@@ -94,8 +101,14 @@ TEST("require that reduce resolves correct type") {
TEST_DO(verify("reduce(tensor(x{},y{},z{}),sum,a,b,c)", "error"));
TEST_DO(verify("reduce(tensor(x{}),sum,x)", "double"));
TEST_DO(verify("reduce(tensor<float>(x{},y{},z{}),sum,x,z)", "tensor<float>(y{})"));
+ TEST_DO(verify("reduce(tensor<bfloat16>(x{},y{},z{}),sum,x,z)", "tensor<float>(y{})"));
+ TEST_DO(verify("reduce(tensor<int8>(x{},y{},z{}),sum,x,z)", "tensor<float>(y{})"));
TEST_DO(verify("reduce(tensor<float>(x{}),sum,x)", "double"));
TEST_DO(verify("reduce(tensor<float>(x{}),sum)", "double"));
+ TEST_DO(verify("reduce(tensor<bfloat16>(x{}),sum,x)", "double"));
+ TEST_DO(verify("reduce(tensor<bfloat16>(x{}),sum)", "double"));
+ TEST_DO(verify("reduce(tensor<int8>(x{}),sum,x)", "double"));
+ TEST_DO(verify("reduce(tensor<int8>(x{}),sum)", "double"));
}
TEST("require that rename resolves correct type") {
@@ -111,6 +124,8 @@ TEST("require that rename resolves correct type") {
TEST_DO(verify("rename(tensor(x{},y[1],z[5]),(x,z),(z,x))", "tensor(z{},y[1],x[5])"));
TEST_DO(verify("rename(tensor(x{},y[1],z[5]),(x,y,z),(a,b,c))", "tensor(a{},b[1],c[5])"));
TEST_DO(verify("rename(tensor<float>(x{},y[1],z[5]),(x,y,z),(a,b,c))", "tensor<float>(a{},b[1],c[5])"));
+ TEST_DO(verify("rename(tensor<bfloat16>(x{},y[1],z[5]),(x,y,z),(a,b,c))", "tensor<bfloat16>(a{},b[1],c[5])"));
+ TEST_DO(verify("rename(tensor<int8>(x{},y[1],z[5]),(x,y,z),(a,b,c))", "tensor<int8>(a{},b[1],c[5])"));
}
vespalib::string strfmt(const char *pattern, const char *a) {
@@ -126,6 +141,8 @@ void verify_op1(const char *pattern) {
TEST_DO(verify(strfmt(pattern, "double"), "double"));
TEST_DO(verify(strfmt(pattern, "tensor(x{},y[10],z[1])"), "tensor(x{},y[10],z[1])"));
TEST_DO(verify(strfmt(pattern, "tensor<float>(x{},y[10],z[1])"), "tensor<float>(x{},y[10],z[1])"));
+ TEST_DO(verify(strfmt(pattern, "tensor<bfloat16>(x{},y[10],z[1])"), "tensor<float>(x{},y[10],z[1])"));
+ TEST_DO(verify(strfmt(pattern, "tensor<int8>(x{},y[10],z[1])"), "tensor<float>(x{},y[10],z[1])"));
}
void verify_op2(const char *pattern) {
@@ -146,6 +163,15 @@ void verify_op2(const char *pattern) {
TEST_DO(verify(strfmt(pattern, "tensor<float>(x[5])", "tensor<float>(x[5])"), "tensor<float>(x[5])"));
TEST_DO(verify(strfmt(pattern, "tensor<float>(x[5])", "tensor(x[5])"), "tensor(x[5])"));
TEST_DO(verify(strfmt(pattern, "tensor<float>(x[5])", "double"), "tensor<float>(x[5])"));
+ TEST_DO(verify(strfmt(pattern, "tensor<bfloat16>(x[5])", "tensor<bfloat16>(x[5])"), "tensor<float>(x[5])"));
+ TEST_DO(verify(strfmt(pattern, "tensor<bfloat16>(x[5])", "tensor<float>(x[5])"), "tensor<float>(x[5])"));
+ TEST_DO(verify(strfmt(pattern, "tensor<bfloat16>(x[5])", "tensor(x[5])"), "tensor(x[5])"));
+ TEST_DO(verify(strfmt(pattern, "tensor<bfloat16>(x[5])", "double"), "tensor<float>(x[5])"));
+ TEST_DO(verify(strfmt(pattern, "tensor<int8>(x[5])", "tensor<int8>(x[5])"), "tensor<float>(x[5])"));
+ TEST_DO(verify(strfmt(pattern, "tensor<bfloat16>(x[5])", "tensor<int8>(x[5])"), "tensor<float>(x[5])"));
+ TEST_DO(verify(strfmt(pattern, "tensor<float>(x[5])", "tensor<int8>(x[5])"), "tensor<float>(x[5])"));
+ TEST_DO(verify(strfmt(pattern, "tensor(x[5])", "tensor<int8>(x[5])"), "tensor(x[5])"));
+ TEST_DO(verify(strfmt(pattern, "double", "tensor<int8>(x[5])"), "tensor<float>(x[5])"));
}
TEST("require that various operations resolve appropriate type") {
@@ -223,7 +249,11 @@ TEST("require that merge resolves to the appropriate type") {
TEST_DO(verify(strfmt(pattern, "tensor(x[5])", "tensor(x[3])"), "error"));
TEST_DO(verify(strfmt(pattern, "tensor(x{})", "tensor(x[5])"), "error"));
TEST_DO(verify(strfmt(pattern, "tensor<float>(x[5])", "tensor<float>(x[5])"), "tensor<float>(x[5])"));
+ TEST_DO(verify(strfmt(pattern, "tensor<bfloat16>(x[5])", "tensor<bfloat16>(x[5])"), "tensor<float>(x[5])"));
+ TEST_DO(verify(strfmt(pattern, "tensor<int8>(x[5])", "tensor<int8>(x[5])"), "tensor<float>(x[5])"));
TEST_DO(verify(strfmt(pattern, "tensor<float>(x[5])", "tensor(x[5])"), "tensor(x[5])"));
+ TEST_DO(verify(strfmt(pattern, "tensor<bfloat16>(x[5])", "tensor(x[5])"), "tensor(x[5])"));
+ TEST_DO(verify(strfmt(pattern, "tensor<int8>(x[5])", "tensor(x[5])"), "tensor(x[5])"));
TEST_DO(verify(strfmt(pattern, "tensor(x[5])", "tensor<float>(x[5])"), "tensor(x[5])"));
TEST_DO(verify(strfmt(pattern, "tensor<float>(x[5])", "double"), "error"));
}
@@ -234,6 +264,8 @@ TEST("require that static tensor lambda resolves correct type") {
TEST_DO(verify("tensor(x[5],y[10],z[15])(1.0)", "tensor(x[5],y[10],z[15])"));
TEST_DO(verify("tensor<double>(x[5],y[10],z[15])(1.0)", "tensor(x[5],y[10],z[15])"));
TEST_DO(verify("tensor<float>(x[5],y[10],z[15])(1.0)", "tensor<float>(x[5],y[10],z[15])"));
+ TEST_DO(verify("tensor<bfloat16>(x[5],y[10],z[15])(1.0)", "tensor<bfloat16>(x[5],y[10],z[15])"));
+ TEST_DO(verify("tensor<int8>(x[5],y[10],z[15])(1.0)", "tensor<int8>(x[5],y[10],z[15])"));
}
TEST("require that tensor create resolves correct type") {
@@ -241,6 +273,8 @@ TEST("require that tensor create resolves correct type") {
TEST_DO(verify("tensor(x{}):{{x:a}:double,{x:b}:double,{x:c}:double}", "tensor(x{})"));
TEST_DO(verify("tensor(x{},y[2]):{{x:a,y:0}:double,{x:a,y:1}:double}", "tensor(x{},y[2])"));
TEST_DO(verify("tensor<float>(x[3]):{{x:0}:double,{x:1}:double,{x:2}:double}", "tensor<float>(x[3])"));
+ TEST_DO(verify("tensor<bfloat16>(x[3]):{{x:0}:double,{x:1}:double,{x:2}:double}", "tensor<bfloat16>(x[3])"));
+ TEST_DO(verify("tensor<int8>(x[3]):{{x:0}:double,{x:1}:double,{x:2}:double}", "tensor<int8>(x[3])"));
TEST_DO(verify("tensor(x[3]):{{x:0}:double+double,{x:1}:double-double,{x:2}:double/double}", "tensor(x[3])"));
TEST_DO(verify("tensor(x[3]):{{x:0}:double,{x:1}:reduce(tensor(x[2]),sum),{x:2}:double}", "tensor(x[3])"));
TEST_DO(verify("tensor(x[3]):{{x:0}:double,{x:1}:tensor(x[2]),{x:2}:double}", "error"));
@@ -251,6 +285,8 @@ TEST("require that dynamic tensor lambda resolves correct type") {
TEST_DO(verify("tensor(x[3])(error)", "error"));
TEST_DO(verify("tensor(x[3])(double)", "tensor(x[3])"));
TEST_DO(verify("tensor<float>(x[3])(double)", "tensor<float>(x[3])"));
+ TEST_DO(verify("tensor<bfloat16>(x[3])(double)", "tensor<bfloat16>(x[3])"));
+ TEST_DO(verify("tensor<int8>(x[3])(double)", "tensor<int8>(x[3])"));
TEST_DO(verify("tensor(x[3])(tensor(x[2]))", "error"));
TEST_DO(verify("tensor(x[3])(reduce(tensor(x[2])+tensor(x[4]),sum))", "error"));
}
@@ -273,11 +309,15 @@ TEST("require that tensor peek resolves correct type") {
TEST_DO(verify("tensor<float>(x[3]){x:3}", "error"));
TEST_DO(verify("tensor<float>(x{}){x:1}", "double"));
TEST_DO(verify("tensor<float>(x{}){x:foo}", "double"));
+ TEST_DO(verify("tensor<bfloat16>(x{}){x:foo}", "double"));
+ TEST_DO(verify("tensor<int8>(x{}){x:foo}", "double"));
TEST_DO(verify("tensor<float>(x{}){x:(double)}", "double"));
TEST_DO(verify("tensor<float>(x{}){x:(tensor(x[3]))}", "error"));
TEST_DO(verify("tensor<float>(x{},y[3]){x:foo,y:2}", "double"));
TEST_DO(verify("tensor<float>(x{},y[3]){x:foo}", "tensor<float>(y[3])"));
TEST_DO(verify("tensor<float>(x{},y[3]){y:2}", "tensor<float>(x{})"));
+ TEST_DO(verify("tensor<bfloat16>(x{},y[3]){y:2}", "tensor<bfloat16>(x{})"));
+ TEST_DO(verify("tensor<int8>(x{},y[3]){y:2}", "tensor<int8>(x{})"));
}
TEST("require that tensor concat resolves correct type") {
@@ -290,6 +330,15 @@ TEST("require that tensor concat resolves correct type") {
TEST_DO(verify("concat(tensor<float>(x[2]),tensor<float>(x[3]),x)", "tensor<float>(x[5])"));
TEST_DO(verify("concat(tensor<float>(x[2]),tensor(x[3]),x)", "tensor(x[5])"));
TEST_DO(verify("concat(tensor<float>(x[2]),double,x)", "tensor<float>(x[3])"));
+ TEST_DO(verify("concat(tensor<bfloat16>(x[2]),tensor<bfloat16>(x[3]),x)", "tensor<bfloat16>(x[5])"));
+ TEST_DO(verify("concat(tensor<bfloat16>(x[2]),tensor<float>(x[3]),x)", "tensor<float>(x[5])"));
+ TEST_DO(verify("concat(tensor<bfloat16>(x[2]),tensor(x[3]),x)", "tensor(x[5])"));
+ TEST_DO(verify("concat(tensor<bfloat16>(x[2]),double,x)", "tensor<bfloat16>(x[3])"));
+ TEST_DO(verify("concat(tensor<int8>(x[3]),tensor<int8>(x[2]),x)", "tensor<int8>(x[5])"));
+ TEST_DO(verify("concat(tensor<bfloat16>(x[3]),tensor<int8>(x[2]),x)", "tensor<float>(x[5])"));
+ TEST_DO(verify("concat(tensor<float>(x[3]),tensor<int8>(x[2]),x)", "tensor<float>(x[5])"));
+ TEST_DO(verify("concat(tensor(x[3]),tensor<int8>(x[2]),x)", "tensor(x[5])"));
+ TEST_DO(verify("concat(double,tensor<int8>(x[2]),x)", "tensor<int8>(x[3])"));
}
TEST("require that tensor cell_cast resolves correct type") {
@@ -298,6 +347,8 @@ TEST("require that tensor cell_cast resolves correct type") {
TEST_DO(verify("cell_cast(tensor<double>(x{},y[5]),float)", "tensor<float>(x{},y[5])"));
TEST_DO(verify("cell_cast(tensor<float>(x{},y[5]),double)", "tensor<double>(x{},y[5])"));
TEST_DO(verify("cell_cast(tensor<float>(x{},y[5]),float)", "tensor<float>(x{},y[5])"));
+ TEST_DO(verify("cell_cast(tensor<float>(x{},y[5]),bfloat16)", "tensor<bfloat16>(x{},y[5])"));
+ TEST_DO(verify("cell_cast(tensor<float>(x{},y[5]),int8)", "tensor<int8>(x{},y[5])"));
}
TEST("require that double only expressions can be detected") {
diff --git a/eval/src/tests/eval/value_type/value_type_test.cpp b/eval/src/tests/eval/value_type/value_type_test.cpp
index a2b25a12b4b..9aec613f507 100644
--- a/eval/src/tests/eval/value_type/value_type_test.cpp
+++ b/eval/src/tests/eval/value_type/value_type_test.cpp
@@ -2,10 +2,13 @@
#include <vespa/vespalib/testkit/test_kit.h>
#include <vespa/eval/eval/value_type.h>
#include <vespa/eval/eval/value_type_spec.h>
+#include <vespa/eval/eval/int8float.h>
+#include <vespa/vespalib/util/bfloat16.h>
#include <vespa/vespalib/util/stringfmt.h>
#include <vespa/vespalib/test/insertion_operators.h>
#include <ostream>
+using vespalib::BFloat16;
using namespace vespalib::eval;
const size_t npos = ValueType::Dimension::npos;
@@ -58,6 +61,28 @@ TEST("require that float TENSOR value type can be created") {
EXPECT_EQUAL(t.dimensions()[1].size, npos);
}
+TEST("require that bfloat16 TENSOR value type can be created") {
+ ValueType t = ValueType::make_type(CellType::BFLOAT16, {{"x", 10},{"y"}});
+ EXPECT_FALSE(t.is_error());
+ EXPECT_TRUE(t.cell_type() == CellType::BFLOAT16);
+ ASSERT_EQUAL(t.dimensions().size(), 2u);
+ EXPECT_EQUAL(t.dimensions()[0].name, "x");
+ EXPECT_EQUAL(t.dimensions()[0].size, 10u);
+ EXPECT_EQUAL(t.dimensions()[1].name, "y");
+ EXPECT_EQUAL(t.dimensions()[1].size, npos);
+}
+
+TEST("require that int8 TENSOR value type can be created") {
+ ValueType t = ValueType::make_type(CellType::INT8, {{"x", 10},{"y"}});
+ EXPECT_FALSE(t.is_error());
+ EXPECT_TRUE(t.cell_type() == CellType::INT8);
+ ASSERT_EQUAL(t.dimensions().size(), 2u);
+ EXPECT_EQUAL(t.dimensions()[0].name, "x");
+ EXPECT_EQUAL(t.dimensions()[0].size, 10u);
+ EXPECT_EQUAL(t.dimensions()[1].name, "y");
+ EXPECT_EQUAL(t.dimensions()[1].size, npos);
+}
+
TEST("require that TENSOR value type sorts dimensions") {
ValueType t = ValueType::make_type(CellType::DOUBLE, {{"x", 10}, {"z", 30}, {"y"}});
EXPECT_FALSE(t.is_error());
@@ -73,6 +98,8 @@ TEST("require that TENSOR value type sorts dimensions") {
TEST("require that non-double scalar values are not allowed") {
EXPECT_TRUE(ValueType::make_type(CellType::FLOAT, {}).is_error());
+ EXPECT_TRUE(ValueType::make_type(CellType::BFLOAT16, {}).is_error());
+ EXPECT_TRUE(ValueType::make_type(CellType::INT8, {}).is_error());
}
TEST("require that use of zero-size dimensions result in error types") {
@@ -116,7 +143,12 @@ TEST("require that value types can be compared") {
TEST_DO(verify_not_equal(ValueType::make_type(CellType::DOUBLE, {{"x", 10}, {"y", 20}}), ValueType::make_type(CellType::DOUBLE, {{"x", 10}, {"y", 10}})));
TEST_DO(verify_not_equal(ValueType::make_type(CellType::DOUBLE, {{"x", 10}}), ValueType::make_type(CellType::DOUBLE, {{"x"}})));
TEST_DO(verify_equal(ValueType::make_type(CellType::FLOAT, {{"x", 10}}), ValueType::make_type(CellType::FLOAT, {{"x", 10}})));
+ TEST_DO(verify_equal(ValueType::make_type(CellType::BFLOAT16, {{"x", 10}}), ValueType::make_type(CellType::BFLOAT16, {{"x", 10}})));
+ TEST_DO(verify_equal(ValueType::make_type(CellType::INT8, {{"x", 10}}), ValueType::make_type(CellType::INT8, {{"x", 10}})));
TEST_DO(verify_not_equal(ValueType::make_type(CellType::DOUBLE, {{"x", 10}}), ValueType::make_type(CellType::FLOAT, {{"x", 10}})));
+ TEST_DO(verify_not_equal(ValueType::make_type(CellType::FLOAT, {{"x", 10}}), ValueType::make_type(CellType::BFLOAT16, {{"x", 10}})));
+ TEST_DO(verify_not_equal(ValueType::make_type(CellType::FLOAT, {{"x", 10}}), ValueType::make_type(CellType::INT8, {{"x", 10}})));
+ TEST_DO(verify_not_equal(ValueType::make_type(CellType::BFLOAT16, {{"x", 10}}), ValueType::make_type(CellType::INT8, {{"x", 10}})));
}
//-----------------------------------------------------------------------------
@@ -125,6 +157,8 @@ TEST("require that value type can make spec") {
EXPECT_EQUAL("error", ValueType::error_type().to_spec());
EXPECT_EQUAL("double", ValueType::double_type().to_spec());
EXPECT_EQUAL("error", ValueType::make_type(CellType::FLOAT, {}).to_spec());
+ EXPECT_EQUAL("error", ValueType::make_type(CellType::BFLOAT16, {}).to_spec());
+ EXPECT_EQUAL("error", ValueType::make_type(CellType::INT8, {}).to_spec());
EXPECT_EQUAL("double", ValueType::make_type(CellType::DOUBLE, {}).to_spec());
EXPECT_EQUAL("tensor(x{})", ValueType::make_type(CellType::DOUBLE, {{"x"}}).to_spec());
EXPECT_EQUAL("tensor(y[10])", ValueType::make_type(CellType::DOUBLE, {{"y", 10}}).to_spec());
@@ -132,6 +166,12 @@ TEST("require that value type can make spec") {
EXPECT_EQUAL("tensor<float>(x{})", ValueType::make_type(CellType::FLOAT, {{"x"}}).to_spec());
EXPECT_EQUAL("tensor<float>(y[10])", ValueType::make_type(CellType::FLOAT, {{"y", 10}}).to_spec());
EXPECT_EQUAL("tensor<float>(x{},y[10],z[5])", ValueType::make_type(CellType::FLOAT, {{"x"}, {"y", 10}, {"z", 5}}).to_spec());
+ EXPECT_EQUAL("tensor<bfloat16>(x{})", ValueType::make_type(CellType::BFLOAT16, {{"x"}}).to_spec());
+ EXPECT_EQUAL("tensor<bfloat16>(y[10])", ValueType::make_type(CellType::BFLOAT16, {{"y", 10}}).to_spec());
+ EXPECT_EQUAL("tensor<bfloat16>(x{},y[10],z[5])", ValueType::make_type(CellType::BFLOAT16, {{"x"}, {"y", 10}, {"z", 5}}).to_spec());
+ EXPECT_EQUAL("tensor<int8>(x{})", ValueType::make_type(CellType::INT8, {{"x"}}).to_spec());
+ EXPECT_EQUAL("tensor<int8>(y[10])", ValueType::make_type(CellType::INT8, {{"y", 10}}).to_spec());
+ EXPECT_EQUAL("tensor<int8>(x{},y[10],z[5])", ValueType::make_type(CellType::INT8, {{"x"}, {"y", 10}, {"z", 5}}).to_spec());
}
//-----------------------------------------------------------------------------
@@ -145,6 +185,8 @@ TEST("require that value type spec can be parsed") {
EXPECT_EQUAL(ValueType::make_type(CellType::DOUBLE, {{"x"}, {"y", 10}, {"z", 5}}), type("tensor(x{},y[10],z[5])"));
EXPECT_EQUAL(ValueType::make_type(CellType::DOUBLE, {{"y", 10}}), type("tensor<double>(y[10])"));
EXPECT_EQUAL(ValueType::make_type(CellType::FLOAT, {{"y", 10}}), type("tensor<float>(y[10])"));
+ EXPECT_EQUAL(ValueType::make_type(CellType::BFLOAT16, {{"y", 10}}), type("tensor<bfloat16>(y[10])"));
+ EXPECT_EQUAL(ValueType::make_type(CellType::INT8, {{"y", 10}}), type("tensor<int8>(y[10])"));
}
TEST("require that value type spec can be parsed with extra whitespace") {
@@ -196,6 +238,8 @@ TEST("require that malformed value type spec is parsed as error") {
EXPECT_TRUE(ValueType::from_spec("error").is_error());
EXPECT_TRUE(ValueType::from_spec("any").is_error());
EXPECT_TRUE(ValueType::from_spec("float").is_error());
+ EXPECT_TRUE(ValueType::from_spec("bfloat16").is_error());
+ EXPECT_TRUE(ValueType::from_spec("int8").is_error());
EXPECT_TRUE(ValueType::from_spec("tensor").is_error());
EXPECT_TRUE(ValueType::from_spec("tensor<double>").is_error());
EXPECT_TRUE(ValueType::from_spec("tensor() tensor()").is_error());
@@ -214,6 +258,8 @@ TEST("require that malformed value type spec is parsed as error") {
EXPECT_TRUE(ValueType::from_spec("tensor(x{},x[])").is_error());
EXPECT_TRUE(ValueType::from_spec("tensor(z[])").is_error());
EXPECT_TRUE(ValueType::from_spec("tensor<float>()").is_error());
+ EXPECT_TRUE(ValueType::from_spec("tensor<bfloat16>()").is_error());
+ EXPECT_TRUE(ValueType::from_spec("tensor<int8>()").is_error());
EXPECT_TRUE(ValueType::from_spec("tensor<int7>(x[10])").is_error());
}
@@ -257,12 +303,16 @@ TEST("require that value types preserve cell type") {
EXPECT_TRUE(type("tensor(x[10])").cell_type() == CellType::DOUBLE);
EXPECT_TRUE(type("tensor<double>(x[10])").cell_type() == CellType::DOUBLE);
EXPECT_TRUE(type("tensor<float>(x[10])").cell_type() == CellType::FLOAT);
+ EXPECT_TRUE(type("tensor<bfloat16>(x[10])").cell_type() == CellType::BFLOAT16);
+ EXPECT_TRUE(type("tensor<int8>(x[10])").cell_type() == CellType::INT8);
}
TEST("require that dimension names can be obtained") {
EXPECT_EQUAL(type("double").dimension_names(), str_list({}));
EXPECT_EQUAL(type("tensor(y[30],x[10])").dimension_names(), str_list({"x", "y"}));
EXPECT_EQUAL(type("tensor<float>(y[10],x[30],z{})").dimension_names(), str_list({"x", "y", "z"}));
+ EXPECT_EQUAL(type("tensor<bfloat16>(y[10],x[30],z{})").dimension_names(), str_list({"x", "y", "z"}));
+ EXPECT_EQUAL(type("tensor<int8>(y[10],x[30],z{})").dimension_names(), str_list({"x", "y", "z"}));
}
TEST("require that nontrivial indexed dimensions can be obtained") {
@@ -295,6 +345,8 @@ TEST("require that dimension index can be obtained") {
EXPECT_EQUAL(type("tensor()").dimension_index("x"), ValueType::Dimension::npos);
EXPECT_EQUAL(type("tensor(y[10],x{},z[5])").dimension_index("x"), 0u);
EXPECT_EQUAL(type("tensor<float>(y[10],x{},z[5])").dimension_index("y"), 1u);
+ EXPECT_EQUAL(type("tensor<bfloat16>(y[10],x{},z[5])").dimension_index("y"), 1u);
+ EXPECT_EQUAL(type("tensor<int8>(y[10],x{},z[5])").dimension_index("y"), 1u);
EXPECT_EQUAL(type("tensor(y[10],x{},z[5])").dimension_index("z"), 2u);
EXPECT_EQUAL(type("tensor(y[10],x{},z[5])").dimension_index("w"), ValueType::Dimension::npos);
}
@@ -322,6 +374,12 @@ TEST("require that type-related predicate functions work as expected") {
TEST_DO(verify_predicates(type("tensor<float>(x{})"), false, false, true, true, false));
TEST_DO(verify_predicates(type("tensor<float>(x[5])"), false, false, true, false, true));
TEST_DO(verify_predicates(type("tensor<float>(x[5],y{})"), false, false, true, false, false));
+ TEST_DO(verify_predicates(type("tensor<bfloat16>(x{})"), false, false, true, true, false));
+ TEST_DO(verify_predicates(type("tensor<bfloat16>(x[5])"), false, false, true, false, true));
+ TEST_DO(verify_predicates(type("tensor<bfloat16>(x[5],y{})"), false, false, true, false, false));
+ TEST_DO(verify_predicates(type("tensor<int8>(x{})"), false, false, true, true, false));
+ TEST_DO(verify_predicates(type("tensor<int8>(x[5])"), false, false, true, false, true));
+ TEST_DO(verify_predicates(type("tensor<int8>(x[5],y{})"), false, false, true, false, false));
}
TEST("require that mapped and indexed dimensions can be counted") {
@@ -349,6 +407,12 @@ TEST("require that dense subspace size calculation works as expected") {
EXPECT_EQUAL(type("tensor<float>(x{})").dense_subspace_size(), 1u);
EXPECT_EQUAL(type("tensor<float>(x[5])").dense_subspace_size(), 5u);
EXPECT_EQUAL(type("tensor<float>(x[5],y{})").dense_subspace_size(), 5u);
+ EXPECT_EQUAL(type("tensor<bfloat16>(x{})").dense_subspace_size(), 1u);
+ EXPECT_EQUAL(type("tensor<bfloat16>(x[5])").dense_subspace_size(), 5u);
+ EXPECT_EQUAL(type("tensor<bfloat16>(x[5],y{})").dense_subspace_size(), 5u);
+ EXPECT_EQUAL(type("tensor<int8>(x{})").dense_subspace_size(), 1u);
+ EXPECT_EQUAL(type("tensor<int8>(x[5])").dense_subspace_size(), 5u);
+ EXPECT_EQUAL(type("tensor<int8>(x[5],y{})").dense_subspace_size(), 5u);
}
TEST("require that dimension predicates work as expected") {
@@ -363,32 +427,51 @@ TEST("require that dimension predicates work as expected") {
EXPECT_TRUE(z.is_indexed());
}
-TEST("require that removing dimensions from non-tensor types gives error type") {
+TEST("require that value type map decays cell type") {
+ EXPECT_EQUAL(type("tensor(x[10])").map(), type("tensor(x[10])"));
+ EXPECT_EQUAL(type("tensor<float>(x[10])").map(), type("tensor<float>(x[10])"));
+ EXPECT_EQUAL(type("tensor<bfloat16>(x[10])").map(), type("tensor<float>(x[10])"));
+ EXPECT_EQUAL(type("tensor<int8>(x[10])").map(), type("tensor<float>(x[10])"));
+}
+
+TEST("require that reducing dimensions from non-tensor types gives error type") {
EXPECT_TRUE(type("error").reduce({"x"}).is_error());
EXPECT_TRUE(type("double").reduce({"x"}).is_error());
}
-TEST("require that dimensions can be removed from tensor value types") {
+TEST("require that a scalar value can be fully reduced to a scalar value") {
+ EXPECT_EQUAL(type("double").reduce({}), type("double"));
+}
+
+TEST("require that tensor value types can be reduced") {
EXPECT_EQUAL(type("tensor(x[10],y[20],z[30])").reduce({"x"}), type("tensor(y[20],z[30])"));
EXPECT_EQUAL(type("tensor(x[10],y[20],z[30])").reduce({"y"}), type("tensor(x[10],z[30])"));
EXPECT_EQUAL(type("tensor<float>(x[10],y[20],z[30])").reduce({"z"}), type("tensor<float>(x[10],y[20])"));
+ EXPECT_EQUAL(type("tensor<bfloat16>(x[10],y[20],z[30])").reduce({"z"}), type("tensor<float>(x[10],y[20])"));
+ EXPECT_EQUAL(type("tensor<int8>(x[10],y[20],z[30])").reduce({"z"}), type("tensor<float>(x[10],y[20])"));
EXPECT_EQUAL(type("tensor(x[10],y[20],z[30])").reduce({"x", "z"}), type("tensor(y[20])"));
EXPECT_EQUAL(type("tensor<float>(x[10],y[20],z[30])").reduce({"z", "x"}), type("tensor<float>(y[20])"));
+ EXPECT_EQUAL(type("tensor<bfloat16>(x[10],y[20],z[30])").reduce({"z", "x"}), type("tensor<float>(y[20])"));
+ EXPECT_EQUAL(type("tensor<int8>(x[10],y[20],z[30])").reduce({"z", "x"}), type("tensor<float>(y[20])"));
}
-TEST("require that removing an empty set of dimensions means removing them all") {
+TEST("require that reducing an empty set of dimensions means reducing them all") {
EXPECT_EQUAL(type("tensor(x[10],y[20],z[30])").reduce({}), type("double"));
EXPECT_EQUAL(type("tensor<float>(x[10],y[20],z[30])").reduce({}), type("double"));
+ EXPECT_EQUAL(type("tensor<bfloat16>(x[10],y[20],z[30])").reduce({}), type("double"));
+ EXPECT_EQUAL(type("tensor<int8>(x[10],y[20],z[30])").reduce({}), type("double"));
}
-TEST("require that removing non-existing dimensions gives error type") {
+TEST("require that reducing non-existing dimensions gives error type") {
EXPECT_TRUE(type("tensor(y{})").reduce({"x"}).is_error());
EXPECT_TRUE(type("tensor<float>(y[10])").reduce({"x"}).is_error());
}
-TEST("require that removing all dimensions gives double type") {
+TEST("require that reducing all dimensions gives double type") {
EXPECT_EQUAL(type("tensor(x[10],y[20],z[30])").reduce({"x", "y", "z"}), type("double"));
EXPECT_EQUAL(type("tensor<float>(x[10],y[20],z[30])").reduce({"x", "y", "z"}), type("double"));
+ EXPECT_EQUAL(type("tensor<bfloat16>(x[10],y[20],z[30])").reduce({"x", "y", "z"}), type("double"));
+ EXPECT_EQUAL(type("tensor<int8>(x[10],y[20],z[30])").reduce({"x", "y", "z"}), type("double"));
}
void verify_join(const ValueType &a, const ValueType b, const ValueType &res) {
@@ -407,9 +490,20 @@ TEST("require that dimensions can be combined for value types") {
}
TEST("require that cell type is handled correctly for join") {
- TEST_DO(verify_join(type("tensor(x{})"), type("tensor<float>(y{})"), type("tensor(x{},y{})")));
- TEST_DO(verify_join(type("tensor<float>(x{})"), type("tensor<float>(y{})"), type("tensor<float>(x{},y{})")));
+ TEST_DO(verify_join(type("tensor(x{})"), type("tensor(y{})"), type("tensor(x{},y{})")));
+ TEST_DO(verify_join(type("tensor(x{})"), type("tensor<float>(y{})"), type("tensor(x{},y{})")));
+ TEST_DO(verify_join(type("tensor(x{})"), type("tensor<bfloat16>(y{})"), type("tensor(x{},y{})")));
+ TEST_DO(verify_join(type("tensor(x{})"), type("tensor<int8>(y{})"), type("tensor(x{},y{})")));
+ TEST_DO(verify_join(type("tensor<float>(x{})"), type("tensor<float>(y{})"), type("tensor<float>(x{},y{})")));
+ TEST_DO(verify_join(type("tensor<float>(x{})"), type("tensor<bfloat16>(y{})"), type("tensor<float>(x{},y{})")));
+ TEST_DO(verify_join(type("tensor<float>(x{})"), type("tensor<int8>(y{})"), type("tensor<float>(x{},y{})")));
+ TEST_DO(verify_join(type("tensor<bfloat16>(x{})"), type("tensor<bfloat16>(y{})"), type("tensor<float>(x{},y{})")));
+ TEST_DO(verify_join(type("tensor<bfloat16>(x{})"), type("tensor<int8>(y{})"), type("tensor<float>(x{},y{})")));
+ TEST_DO(verify_join(type("tensor<int8>(x{})"), type("tensor<int8>(y{})"), type("tensor<float>(x{},y{})")));
+ TEST_DO(verify_join(type("tensor(x{})"), type("double"), type("tensor(x{})")));
TEST_DO(verify_join(type("tensor<float>(x{})"), type("double"), type("tensor<float>(x{})")));
+ TEST_DO(verify_join(type("tensor<bfloat16>(x{})"), type("double"), type("tensor<float>(x{})")));
+ TEST_DO(verify_join(type("tensor<int8>(x{})"), type("double"), type("tensor<float>(x{})")));
}
void verify_not_joinable(const ValueType &a, const ValueType &b) {
@@ -444,14 +538,32 @@ TEST("require that tensor dimensions can be renamed") {
EXPECT_EQUAL(type("error").rename({"a"}, {"b"}), type("error"));
}
+TEST("require that dimension rename preserves cell type") {
+ EXPECT_EQUAL(type("tensor(x{})").rename({"x"}, {"y"}), type("tensor(y{})"));
+ EXPECT_EQUAL(type("tensor<float>(x{})").rename({"x"}, {"y"}), type("tensor<float>(y{})"));
+ EXPECT_EQUAL(type("tensor<bfloat16>(x{})").rename({"x"}, {"y"}), type("tensor<bfloat16>(y{})"));
+ EXPECT_EQUAL(type("tensor<int8>(x{})").rename({"x"}, {"y"}), type("tensor<int8>(y{})"));
+}
+
+void verify_merge(const ValueType &a, const ValueType &b, const ValueType &res) {
+ EXPECT_EQUAL(ValueType::merge(a, b), res);
+ EXPECT_EQUAL(ValueType::merge(b, a), res);
+}
+
TEST("require that similar types can be merged") {
- EXPECT_EQUAL(ValueType::merge(type("error"), type("error")), type("error"));
- EXPECT_EQUAL(ValueType::merge(type("double"), type("double")), type("double"));
- EXPECT_EQUAL(ValueType::merge(type("tensor(x[5])"), type("tensor(x[5])")), type("tensor(x[5])"));
- EXPECT_EQUAL(ValueType::merge(type("tensor<float>(x[5])"), type("tensor(x[5])")), type("tensor(x[5])"));
- EXPECT_EQUAL(ValueType::merge(type("tensor(x[5])"), type("tensor<float>(x[5])")), type("tensor(x[5])"));
- EXPECT_EQUAL(ValueType::merge(type("tensor<float>(x[5])"), type("tensor<float>(x[5])")), type("tensor<float>(x[5])"));
- EXPECT_EQUAL(ValueType::merge(type("tensor(x{})"), type("tensor(x{})")), type("tensor(x{})"));
+ TEST_DO(verify_merge(type("error"), type("error"), type("error")));
+ TEST_DO(verify_merge(type("double"), type("double"), type("double")));
+ TEST_DO(verify_merge(type("tensor(x[5])"), type("tensor(x[5])"), type("tensor(x[5])")));
+ TEST_DO(verify_merge(type("tensor(x[5])"), type("tensor<float>(x[5])"), type("tensor(x[5])")));
+ TEST_DO(verify_merge(type("tensor(x[5])"), type("tensor<bfloat16>(x[5])"), type("tensor(x[5])")));
+ TEST_DO(verify_merge(type("tensor(x[5])"), type("tensor<int8>(x[5])"), type("tensor(x[5])")));
+ TEST_DO(verify_merge(type("tensor<float>(x[5])"), type("tensor<float>(x[5])"), type("tensor<float>(x[5])")));
+ TEST_DO(verify_merge(type("tensor<float>(x[5])"), type("tensor<bfloat16>(x[5])"), type("tensor<float>(x[5])")));
+ TEST_DO(verify_merge(type("tensor<float>(x[5])"), type("tensor<int8>(x[5])"), type("tensor<float>(x[5])")));
+ TEST_DO(verify_merge(type("tensor<bfloat16>(x[5])"), type("tensor<bfloat16>(x[5])"), type("tensor<float>(x[5])")));
+ TEST_DO(verify_merge(type("tensor<bfloat16>(x[5])"), type("tensor<int8>(x[5])"), type("tensor<float>(x[5])")));
+ TEST_DO(verify_merge(type("tensor<int8>(x[5])"), type("tensor<int8>(x[5])"), type("tensor<float>(x[5])")));
+ TEST_DO(verify_merge(type("tensor(x{})"), type("tensor(x{})"), type("tensor(x{})")));
}
TEST("require that diverging types can not be merged") {
@@ -463,7 +575,7 @@ TEST("require that diverging types can not be merged") {
EXPECT_EQUAL(ValueType::merge(type("tensor(x{})"), type("tensor(y{})")), type("error"));
}
-void verify_concat(const ValueType &a, const ValueType b, const vespalib::string &dim, const ValueType &res) {
+void verify_concat(const ValueType &a, const ValueType &b, const vespalib::string &dim, const ValueType &res) {
EXPECT_EQUAL(ValueType::concat(a, b, dim), res);
EXPECT_EQUAL(ValueType::concat(b, a, dim), res);
}
@@ -486,9 +598,23 @@ TEST("require that types can be concatenated") {
}
TEST("require that cell type is handled correctly for concat") {
- TEST_DO(verify_concat(type("tensor<float>(x[3])"), type("tensor(x[2])"), "x", type("tensor(x[5])")));
- TEST_DO(verify_concat(type("tensor<float>(x[3])"), type("tensor<float>(x[2])"), "x", type("tensor<float>(x[5])")));
+ TEST_DO(verify_concat(type("tensor(x[3])"), type("tensor(x[2])"), "x", type("tensor(x[5])")));
+ TEST_DO(verify_concat(type("tensor(x[3])"), type("tensor<float>(x[2])"), "x", type("tensor(x[5])")));
+ TEST_DO(verify_concat(type("tensor(x[3])"), type("tensor<bfloat16>(x[2])"), "x", type("tensor(x[5])")));
+ TEST_DO(verify_concat(type("tensor(x[3])"), type("tensor<int8>(x[2])"), "x", type("tensor(x[5])")));
+ TEST_DO(verify_concat(type("tensor<float>(x[3])"), type("tensor<float>(x[2])"), "x", type("tensor<float>(x[5])")));
+ TEST_DO(verify_concat(type("tensor<float>(x[3])"), type("tensor<bfloat16>(x[2])"), "x", type("tensor<float>(x[5])")));
+ TEST_DO(verify_concat(type("tensor<float>(x[3])"), type("tensor<int8>(x[2])"), "x", type("tensor<float>(x[5])")));
+ TEST_DO(verify_concat(type("tensor<bfloat16>(x[3])"), type("tensor<bfloat16>(x[2])"), "x", type("tensor<bfloat16>(x[5])")));
+ TEST_DO(verify_concat(type("tensor<bfloat16>(x[3])"), type("tensor<int8>(x[2])"), "x", type("tensor<float>(x[5])")));
+ TEST_DO(verify_concat(type("tensor<int8>(x[3])"), type("tensor<int8>(x[2])"), "x", type("tensor<int8>(x[5])")));
+}
+
+TEST("require that concat with number preserves cell type") {
+ TEST_DO(verify_concat(type("tensor(x[3])"), type("double"), "x", type("tensor(x[4])")));
TEST_DO(verify_concat(type("tensor<float>(x[3])"), type("double"), "x", type("tensor<float>(x[4])")));
+ TEST_DO(verify_concat(type("tensor<bfloat16>(x[3])"), type("double"), "x", type("tensor<bfloat16>(x[4])")));
+ TEST_DO(verify_concat(type("tensor<int8>(x[3])"), type("double"), "x", type("tensor<int8>(x[4])")));
}
void verify_cell_cast(const ValueType &type) {
@@ -514,12 +640,18 @@ void verify_cell_cast(const ValueType &type) {
TEST("require that value type cell cast works correctly") {
TEST_DO(verify_cell_cast(type("error")));
TEST_DO(verify_cell_cast(type("double")));
- TEST_DO(verify_cell_cast(type("tensor<float>(x[10])")));
TEST_DO(verify_cell_cast(type("tensor<double>(x[10])")));
- TEST_DO(verify_cell_cast(type("tensor<float>(x{})")));
+ TEST_DO(verify_cell_cast(type("tensor<float>(x[10])")));
+ TEST_DO(verify_cell_cast(type("tensor<bfloat16>(x[10])")));
+ TEST_DO(verify_cell_cast(type("tensor<int8>(x[10])")));
TEST_DO(verify_cell_cast(type("tensor<double>(x{})")));
- TEST_DO(verify_cell_cast(type("tensor<float>(x{},y[5])")));
+ TEST_DO(verify_cell_cast(type("tensor<float>(x{})")));
+ TEST_DO(verify_cell_cast(type("tensor<bfloat16>(x{})")));
+ TEST_DO(verify_cell_cast(type("tensor<int8>(x{})")));
TEST_DO(verify_cell_cast(type("tensor<double>(x{},y[5])")));
+ TEST_DO(verify_cell_cast(type("tensor<float>(x{},y[5])")));
+ TEST_DO(verify_cell_cast(type("tensor<bfloat16>(x{},y[5])")));
+ TEST_DO(verify_cell_cast(type("tensor<int8>(x{},y[5])")));
}
TEST("require that actual cell type can be converted to cell type name") {
@@ -561,14 +693,32 @@ TEST("require that peek type inference works as expected") {
EXPECT_EQUAL(input2.peek({"a", "b", "c", "d", "e"}), type("double"));
}
+TEST("require that non-scalar peek preserves cell type") {
+ EXPECT_EQUAL(type("tensor(x[3],y[5])").peek({"x"}), type("tensor(y[5])"));
+ EXPECT_EQUAL(type("tensor<float>(x[3],y[5])").peek({"x"}), type("tensor<float>(y[5])"));
+ EXPECT_EQUAL(type("tensor<bfloat16>(x[3],y[5])").peek({"x"}), type("tensor<bfloat16>(y[5])"));
+ EXPECT_EQUAL(type("tensor<int8>(x[3],y[5])").peek({"x"}), type("tensor<int8>(y[5])"));
+}
+
+TEST("require that scalar peek is always double") {
+ EXPECT_EQUAL(type("tensor(x[3],y[5])").peek({"x", "y"}), type("double"));
+ EXPECT_EQUAL(type("tensor<float>(x[3],y[5])").peek({"x", "y"}), type("double"));
+ EXPECT_EQUAL(type("tensor<bfloat16>(x[3],y[5])").peek({"x", "y"}), type("double"));
+ EXPECT_EQUAL(type("tensor<int8>(x[3],y[5])").peek({"x", "y"}), type("double"));
+}
+
TEST("require that cell alignment can be obtained") {
EXPECT_EQUAL(CellTypeUtils::alignment(CellType::DOUBLE), alignof(double));
EXPECT_EQUAL(CellTypeUtils::alignment(CellType::FLOAT), alignof(float));
+ EXPECT_EQUAL(CellTypeUtils::alignment(CellType::BFLOAT16), alignof(BFloat16));
+ EXPECT_EQUAL(CellTypeUtils::alignment(CellType::INT8), alignof(Int8Float));
}
TEST("require that cell array size can be calculated") {
EXPECT_EQUAL(CellTypeUtils::mem_size(CellType::DOUBLE, 37), 37 * sizeof(double));
EXPECT_EQUAL(CellTypeUtils::mem_size(CellType::FLOAT, 37), 37 * sizeof(float));
+ EXPECT_EQUAL(CellTypeUtils::mem_size(CellType::BFLOAT16, 37), 37 * sizeof(BFloat16));
+ EXPECT_EQUAL(CellTypeUtils::mem_size(CellType::INT8, 37), 37 * sizeof(Int8Float));
}
TEST("require that all cell types can be listed") {