diff options
author | Håvard Pettersen <havardpe@oath.com> | 2021-06-29 10:30:38 +0000 |
---|---|---|
committer | Håvard Pettersen <havardpe@oath.com> | 2021-06-29 10:55:10 +0000 |
commit | 77b57d25819751b082cfcf746537d254b08ccdfa (patch) | |
tree | 1a5de60e5dd354a1cc06319ae45618126f94261c /eval/src/tests/instruction | |
parent | e928ee61e47fe9c1cd15585df6dc553eaffa4370 (diff) |
optimize additional variants
Diffstat (limited to 'eval/src/tests/instruction')
-rw-r--r-- | eval/src/tests/instruction/unpack_bits_function/unpack_bits_function_test.cpp | 37 |
1 files changed, 23 insertions, 14 deletions
diff --git a/eval/src/tests/instruction/unpack_bits_function/unpack_bits_function_test.cpp b/eval/src/tests/instruction/unpack_bits_function/unpack_bits_function_test.cpp index 8250893225a..c0d7cdc43e7 100644 --- a/eval/src/tests/instruction/unpack_bits_function/unpack_bits_function_test.cpp +++ b/eval/src/tests/instruction/unpack_bits_function/unpack_bits_function_test.cpp @@ -46,40 +46,49 @@ void assert_not_optimized(const vespalib::string &expr) { //----------------------------------------------------------------------------- -TEST(UnpackBitsTest, expression_can_be_optimized) { +TEST(UnpackBitsTest, expression_can_be_optimized_with_big_bitorder) { assert_optimized("tensor<int8>(x[2048])(bit(full{x:(x/8)},7-x%8))"); assert_optimized("tensor<int8>(x[64])(bit(vx8{x:(x/8)},7-x%8))"); } +TEST(UnpackBitsTest, expression_can_be_optimized_with_small_bitorder) { + assert_optimized("tensor<int8>(x[2048])(bit(full{x:(x/8)},x%8))"); + assert_optimized("tensor<int8>(x[64])(bit(vx8{x:(x/8)},x%8))"); +} + TEST(UnpackBitsTest, unpack_bits_can_rename_dimension) { assert_optimized("tensor<int8>(x[64])(bit(vy8{y:(x/8)},7-x%8))"); + assert_optimized("tensor<int8>(x[64])(bit(vy8{y:(x/8)},x%8))"); } -//----------------------------------------------------------------------------- +TEST(UnpackBitsTest, result_may_have_other_cell_types_than_int8) { + assert_optimized("tensor<bfloat16>(x[64])(bit(vx8{x:(x/8)},7-x%8))"); + assert_optimized("tensor<float>(x[64])(bit(vx8{x:(x/8)},7-x%8))"); + assert_optimized("tensor<double>(x[64])(bit(vx8{x:(x/8)},7-x%8))"); -TEST(UnpackBitsTest, dimension_sizes_must_be_appropriate) { - assert_not_optimized("tensor<int8>(x[60])(bit(vx8{x:(x/8)},7-x%8))"); - assert_not_optimized("tensor<int8>(x[68])(bit(vx8{x:(x/8)},7-x%8))"); + assert_optimized("tensor<bfloat16>(x[64])(bit(vx8{x:(x/8)},x%8))"); + assert_optimized("tensor<float>(x[64])(bit(vx8{x:(x/8)},x%8))"); + assert_optimized("tensor<double>(x[64])(bit(vx8{x:(x/8)},x%8))"); } +//----------------------------------------------------------------------------- + TEST(UnpackBitsTest, source_must_be_int8) { assert_not_optimized("tensor<int8>(x[64])(bit(vxf{x:(x/8)},7-x%8))"); } -TEST(UnpackBitsTest, result_must_be_int8) { - assert_not_optimized("tensor<float>(x[64])(bit(vx8{x:(x/8)},7-x%8))"); +TEST(UnpackBitsTest, dimension_sizes_must_be_appropriate) { + assert_not_optimized("tensor<int8>(x[60])(bit(vx8{x:(x/8)},7-x%8))"); + assert_not_optimized("tensor<int8>(x[68])(bit(vx8{x:(x/8)},7-x%8))"); } TEST(UnpackBitsTest, similar_expressions_are_not_optimized) { - assert_not_optimized("tensor<int8>(x[64])(bit(vx8{x:(x/8)},7-x%7))"); - assert_not_optimized("tensor<int8>(x[64])(bit(vx8{x:(x/8)},7-x%9))"); - assert_not_optimized("tensor<int8>(x[64])(bit(vx8{x:(x/7)},7-x%8))"); + assert_not_optimized("tensor<int8>(x[64])(bit(vx8{x:(x*8)},7-x%8))"); assert_not_optimized("tensor<int8>(x[64])(bit(vx8{x:(x/9)},7-x%8))"); - assert_not_optimized("tensor<int8>(x[64])(bit(vx8{x:(x/8)},x%8-7))"); - assert_not_optimized("tensor<int8>(x[64])(bit(vx8{x:(8/x)},7-x%8))"); + assert_not_optimized("tensor<int8>(x[64])(bit(vx8{x:(x/8)},8-x%8))"); assert_not_optimized("tensor<int8>(x[64])(bit(vx8{x:(x/8)},7+x%8))"); - assert_not_optimized("tensor<int8>(x[64])(bit(vx8{x:(x*8)},7-x%8))"); - assert_not_optimized("tensor<int8>(x[64])(bit(vx8{x:(x/8)},(7-x)%8))"); + assert_not_optimized("tensor<int8>(x[64])(bit(vx8{x:(x/8)},7-x/8))"); + assert_not_optimized("tensor<int8>(x[64])(bit(vx8{x:(x/8)},7-x%9))"); } //----------------------------------------------------------------------------- |