diff options
author | Arne Juul <arnej@verizonmedia.com> | 2021-03-02 17:54:44 +0000 |
---|---|---|
committer | Arne Juul <arnej@verizonmedia.com> | 2021-03-02 17:54:44 +0000 |
commit | 1db3b2927f745ed63c46dd3a60456078e0da7047 (patch) | |
tree | 10035246e1b489640d213932d714ffe6008859f2 | |
parent | 87d63e9c3e8004763d7c3eadb3145ea8262c9449 (diff) |
add documentation comment
-rw-r--r-- | vespalib/src/vespa/vespalib/util/brain_float16.h | 10 |
1 files changed, 10 insertions, 0 deletions
diff --git a/vespalib/src/vespa/vespalib/util/brain_float16.h b/vespalib/src/vespa/vespalib/util/brain_float16.h index 25f25801790..4e449457eeb 100644 --- a/vespalib/src/vespa/vespalib/util/brain_float16.h +++ b/vespalib/src/vespa/vespalib/util/brain_float16.h @@ -8,6 +8,16 @@ namespace vespalib { +/** + * Class holding 16-bit floating-point numbers. + * Truncated version of normal 32-bit float; the sign and + * exponent are kept as-is but the mantissa has only 8-bit + * precision. Well suited for ML / AI, halving memory + * requirements for large vectors and similar data. + * Direct HW support possible (AVX-512 BF16 extension etc.) + * See also: + * https://en.wikipedia.org/wiki/Bfloat16_floating-point_format + **/ class BrainFloat16 { private: uint16_t _bits; |