core/num/f16.rs
1//! Constants for the `f16` half-precision floating point type.
2//!
3//! *[See also the `f16` primitive type][f16].*
4//!
5//! Mathematically significant numbers are provided in the `consts` sub-module.
6//!
7//! For the constants defined directly in this module
8//! (as distinct from those defined in the `consts` sub-module),
9//! new code should instead use the associated constants
10//! defined directly on the `f16` type.
11
12#![unstable(feature = "f16", issue = "116909")]
13
14use crate::convert::FloatToInt;
15use crate::num::FpCategory;
16#[cfg(not(test))]
17use crate::num::libm;
18use crate::panic::const_assert;
19use crate::{intrinsics, mem};
20
21/// Basic mathematical constants.
22#[unstable(feature = "f16", issue = "116909")]
23#[rustc_diagnostic_item = "f16_consts_mod"]
24pub mod consts {
25 // FIXME: replace with mathematical constants from cmath.
26
27 /// Archimedes' constant (π)
28 #[unstable(feature = "f16", issue = "116909")]
29 pub const PI: f16 = 3.14159265358979323846264338327950288_f16;
30
31 /// The full circle constant (τ)
32 ///
33 /// Equal to 2π.
34 #[unstable(feature = "f16", issue = "116909")]
35 pub const TAU: f16 = 6.28318530717958647692528676655900577_f16;
36
37 /// The golden ratio (φ)
38 #[unstable(feature = "f16", issue = "116909")]
39 pub const GOLDEN_RATIO: f16 = 1.618033988749894848204586834365638118_f16;
40
41 /// The Euler-Mascheroni constant (γ)
42 #[unstable(feature = "f16", issue = "116909")]
43 pub const EULER_GAMMA: f16 = 0.577215664901532860606512090082402431_f16;
44
45 /// π/2
46 #[unstable(feature = "f16", issue = "116909")]
47 pub const FRAC_PI_2: f16 = 1.57079632679489661923132169163975144_f16;
48
49 /// π/3
50 #[unstable(feature = "f16", issue = "116909")]
51 pub const FRAC_PI_3: f16 = 1.04719755119659774615421446109316763_f16;
52
53 /// π/4
54 #[unstable(feature = "f16", issue = "116909")]
55 pub const FRAC_PI_4: f16 = 0.785398163397448309615660845819875721_f16;
56
57 /// π/6
58 #[unstable(feature = "f16", issue = "116909")]
59 pub const FRAC_PI_6: f16 = 0.52359877559829887307710723054658381_f16;
60
61 /// π/8
62 #[unstable(feature = "f16", issue = "116909")]
63 pub const FRAC_PI_8: f16 = 0.39269908169872415480783042290993786_f16;
64
65 /// 1/π
66 #[unstable(feature = "f16", issue = "116909")]
67 pub const FRAC_1_PI: f16 = 0.318309886183790671537767526745028724_f16;
68
69 /// 1/sqrt(π)
70 #[unstable(feature = "f16", issue = "116909")]
71 // Also, #[unstable(feature = "more_float_constants", issue = "146939")]
72 pub const FRAC_1_SQRT_PI: f16 = 0.564189583547756286948079451560772586_f16;
73
74 /// 1/sqrt(2π)
75 #[doc(alias = "FRAC_1_SQRT_TAU")]
76 #[unstable(feature = "f16", issue = "116909")]
77 // Also, #[unstable(feature = "more_float_constants", issue = "146939")]
78 pub const FRAC_1_SQRT_2PI: f16 = 0.398942280401432677939946059934381868_f16;
79
80 /// 2/π
81 #[unstable(feature = "f16", issue = "116909")]
82 pub const FRAC_2_PI: f16 = 0.636619772367581343075535053490057448_f16;
83
84 /// 2/sqrt(π)
85 #[unstable(feature = "f16", issue = "116909")]
86 pub const FRAC_2_SQRT_PI: f16 = 1.12837916709551257389615890312154517_f16;
87
88 /// sqrt(2)
89 #[unstable(feature = "f16", issue = "116909")]
90 pub const SQRT_2: f16 = 1.41421356237309504880168872420969808_f16;
91
92 /// 1/sqrt(2)
93 #[unstable(feature = "f16", issue = "116909")]
94 pub const FRAC_1_SQRT_2: f16 = 0.707106781186547524400844362104849039_f16;
95
96 /// sqrt(3)
97 #[unstable(feature = "f16", issue = "116909")]
98 // Also, #[unstable(feature = "more_float_constants", issue = "146939")]
99 pub const SQRT_3: f16 = 1.732050807568877293527446341505872367_f16;
100
101 /// 1/sqrt(3)
102 #[unstable(feature = "f16", issue = "116909")]
103 // Also, #[unstable(feature = "more_float_constants", issue = "146939")]
104 pub const FRAC_1_SQRT_3: f16 = 0.577350269189625764509148780501957456_f16;
105
106 /// Euler's number (e)
107 #[unstable(feature = "f16", issue = "116909")]
108 pub const E: f16 = 2.71828182845904523536028747135266250_f16;
109
110 /// log<sub>2</sub>(10)
111 #[unstable(feature = "f16", issue = "116909")]
112 pub const LOG2_10: f16 = 3.32192809488736234787031942948939018_f16;
113
114 /// log<sub>2</sub>(e)
115 #[unstable(feature = "f16", issue = "116909")]
116 pub const LOG2_E: f16 = 1.44269504088896340735992468100189214_f16;
117
118 /// log<sub>10</sub>(2)
119 #[unstable(feature = "f16", issue = "116909")]
120 pub const LOG10_2: f16 = 0.301029995663981195213738894724493027_f16;
121
122 /// log<sub>10</sub>(e)
123 #[unstable(feature = "f16", issue = "116909")]
124 pub const LOG10_E: f16 = 0.434294481903251827651128918916605082_f16;
125
126 /// ln(2)
127 #[unstable(feature = "f16", issue = "116909")]
128 pub const LN_2: f16 = 0.693147180559945309417232121458176568_f16;
129
130 /// ln(10)
131 #[unstable(feature = "f16", issue = "116909")]
132 pub const LN_10: f16 = 2.30258509299404568401799145468436421_f16;
133}
134
135#[doc(test(attr(feature(cfg_target_has_reliable_f16_f128), allow(internal_features))))]
136impl f16 {
137 /// The radix or base of the internal representation of `f16`.
138 #[unstable(feature = "f16", issue = "116909")]
139 pub const RADIX: u32 = 2;
140
141 /// Number of significant digits in base 2.
142 ///
143 /// Note that the size of the mantissa in the bitwise representation is one
144 /// smaller than this since the leading 1 is not stored explicitly.
145 #[unstable(feature = "f16", issue = "116909")]
146 pub const MANTISSA_DIGITS: u32 = 11;
147
148 /// Approximate number of significant digits in base 10.
149 ///
150 /// This is the maximum <i>x</i> such that any decimal number with <i>x</i>
151 /// significant digits can be converted to `f16` and back without loss.
152 ///
153 /// Equal to floor(log<sub>10</sub> 2<sup>[`MANTISSA_DIGITS`] − 1</sup>).
154 ///
155 /// [`MANTISSA_DIGITS`]: f16::MANTISSA_DIGITS
156 #[unstable(feature = "f16", issue = "116909")]
157 pub const DIGITS: u32 = 3;
158
159 /// [Machine epsilon] value for `f16`.
160 ///
161 /// This is the difference between `1.0` and the next larger representable number.
162 ///
163 /// Equal to 2<sup>1 − [`MANTISSA_DIGITS`]</sup>.
164 ///
165 /// [Machine epsilon]: https://en.wikipedia.org/wiki/Machine_epsilon
166 /// [`MANTISSA_DIGITS`]: f16::MANTISSA_DIGITS
167 #[unstable(feature = "f16", issue = "116909")]
168 #[rustc_diagnostic_item = "f16_epsilon"]
169 pub const EPSILON: f16 = 9.7656e-4_f16;
170
171 /// Smallest finite `f16` value.
172 ///
173 /// Equal to −[`MAX`].
174 ///
175 /// [`MAX`]: f16::MAX
176 #[unstable(feature = "f16", issue = "116909")]
177 pub const MIN: f16 = -6.5504e+4_f16;
178 /// Smallest positive normal `f16` value.
179 ///
180 /// Equal to 2<sup>[`MIN_EXP`] − 1</sup>.
181 ///
182 /// [`MIN_EXP`]: f16::MIN_EXP
183 #[unstable(feature = "f16", issue = "116909")]
184 pub const MIN_POSITIVE: f16 = 6.1035e-5_f16;
185 /// Largest finite `f16` value.
186 ///
187 /// Equal to
188 /// (1 − 2<sup>−[`MANTISSA_DIGITS`]</sup>) 2<sup>[`MAX_EXP`]</sup>.
189 ///
190 /// [`MANTISSA_DIGITS`]: f16::MANTISSA_DIGITS
191 /// [`MAX_EXP`]: f16::MAX_EXP
192 #[unstable(feature = "f16", issue = "116909")]
193 pub const MAX: f16 = 6.5504e+4_f16;
194
195 /// One greater than the minimum possible *normal* power of 2 exponent
196 /// for a significand bounded by 1 ≤ x < 2 (i.e. the IEEE definition).
197 ///
198 /// This corresponds to the exact minimum possible *normal* power of 2 exponent
199 /// for a significand bounded by 0.5 ≤ x < 1 (i.e. the C definition).
200 /// In other words, all normal numbers representable by this type are
201 /// greater than or equal to 0.5 × 2<sup><i>MIN_EXP</i></sup>.
202 #[unstable(feature = "f16", issue = "116909")]
203 pub const MIN_EXP: i32 = -13;
204 /// One greater than the maximum possible power of 2 exponent
205 /// for a significand bounded by 1 ≤ x < 2 (i.e. the IEEE definition).
206 ///
207 /// This corresponds to the exact maximum possible power of 2 exponent
208 /// for a significand bounded by 0.5 ≤ x < 1 (i.e. the C definition).
209 /// In other words, all numbers representable by this type are
210 /// strictly less than 2<sup><i>MAX_EXP</i></sup>.
211 #[unstable(feature = "f16", issue = "116909")]
212 pub const MAX_EXP: i32 = 16;
213
214 /// Minimum <i>x</i> for which 10<sup><i>x</i></sup> is normal.
215 ///
216 /// Equal to ceil(log<sub>10</sub> [`MIN_POSITIVE`]).
217 ///
218 /// [`MIN_POSITIVE`]: f16::MIN_POSITIVE
219 #[unstable(feature = "f16", issue = "116909")]
220 pub const MIN_10_EXP: i32 = -4;
221 /// Maximum <i>x</i> for which 10<sup><i>x</i></sup> is normal.
222 ///
223 /// Equal to floor(log<sub>10</sub> [`MAX`]).
224 ///
225 /// [`MAX`]: f16::MAX
226 #[unstable(feature = "f16", issue = "116909")]
227 pub const MAX_10_EXP: i32 = 4;
228
229 /// Not a Number (NaN).
230 ///
231 /// Note that IEEE 754 doesn't define just a single NaN value; a plethora of bit patterns are
232 /// considered to be NaN. Furthermore, the standard makes a difference between a "signaling" and
233 /// a "quiet" NaN, and allows inspecting its "payload" (the unspecified bits in the bit pattern)
234 /// and its sign. See the [specification of NaN bit patterns](f32#nan-bit-patterns) for more
235 /// info.
236 ///
237 /// This constant is guaranteed to be a quiet NaN (on targets that follow the Rust assumptions
238 /// that the quiet/signaling bit being set to 1 indicates a quiet NaN). Beyond that, nothing is
239 /// guaranteed about the specific bit pattern chosen here: both payload and sign are arbitrary.
240 /// The concrete bit pattern may change across Rust versions and target platforms.
241 #[allow(clippy::eq_op)]
242 #[rustc_diagnostic_item = "f16_nan"]
243 #[unstable(feature = "f16", issue = "116909")]
244 pub const NAN: f16 = 0.0_f16 / 0.0_f16;
245
246 /// Infinity (∞).
247 #[unstable(feature = "f16", issue = "116909")]
248 pub const INFINITY: f16 = 1.0_f16 / 0.0_f16;
249
250 /// Negative infinity (−∞).
251 #[unstable(feature = "f16", issue = "116909")]
252 pub const NEG_INFINITY: f16 = -1.0_f16 / 0.0_f16;
253
254 /// Sign bit
255 pub(crate) const SIGN_MASK: u16 = 0x8000;
256
257 /// Exponent mask
258 pub(crate) const EXP_MASK: u16 = 0x7c00;
259
260 /// Mantissa mask
261 pub(crate) const MAN_MASK: u16 = 0x03ff;
262
263 /// Minimum representable positive value (min subnormal)
264 const TINY_BITS: u16 = 0x1;
265
266 /// Minimum representable negative value (min negative subnormal)
267 const NEG_TINY_BITS: u16 = Self::TINY_BITS | Self::SIGN_MASK;
268
269 /// Returns `true` if this value is NaN.
270 ///
271 /// ```
272 /// #![feature(f16)]
273 /// # #[cfg(target_has_reliable_f16)] {
274 ///
275 /// let nan = f16::NAN;
276 /// let f = 7.0_f16;
277 ///
278 /// assert!(nan.is_nan());
279 /// assert!(!f.is_nan());
280 /// # }
281 /// ```
282 #[inline]
283 #[must_use]
284 #[unstable(feature = "f16", issue = "116909")]
285 #[allow(clippy::eq_op)] // > if you intended to check if the operand is NaN, use `.is_nan()` instead :)
286 pub const fn is_nan(self) -> bool {
287 self != self
288 }
289
290 /// Returns `true` if this value is positive infinity or negative infinity, and
291 /// `false` otherwise.
292 ///
293 /// ```
294 /// #![feature(f16)]
295 /// # #[cfg(target_has_reliable_f16)] {
296 ///
297 /// let f = 7.0f16;
298 /// let inf = f16::INFINITY;
299 /// let neg_inf = f16::NEG_INFINITY;
300 /// let nan = f16::NAN;
301 ///
302 /// assert!(!f.is_infinite());
303 /// assert!(!nan.is_infinite());
304 ///
305 /// assert!(inf.is_infinite());
306 /// assert!(neg_inf.is_infinite());
307 /// # }
308 /// ```
309 #[inline]
310 #[must_use]
311 #[unstable(feature = "f16", issue = "116909")]
312 pub const fn is_infinite(self) -> bool {
313 (self == f16::INFINITY) | (self == f16::NEG_INFINITY)
314 }
315
316 /// Returns `true` if this number is neither infinite nor NaN.
317 ///
318 /// ```
319 /// #![feature(f16)]
320 /// # #[cfg(target_has_reliable_f16)] {
321 ///
322 /// let f = 7.0f16;
323 /// let inf: f16 = f16::INFINITY;
324 /// let neg_inf: f16 = f16::NEG_INFINITY;
325 /// let nan: f16 = f16::NAN;
326 ///
327 /// assert!(f.is_finite());
328 ///
329 /// assert!(!nan.is_finite());
330 /// assert!(!inf.is_finite());
331 /// assert!(!neg_inf.is_finite());
332 /// # }
333 /// ```
334 #[inline]
335 #[must_use]
336 #[unstable(feature = "f16", issue = "116909")]
337 #[rustc_const_unstable(feature = "f16", issue = "116909")]
338 pub const fn is_finite(self) -> bool {
339 // There's no need to handle NaN separately: if self is NaN,
340 // the comparison is not true, exactly as desired.
341 self.abs() < Self::INFINITY
342 }
343
344 /// Returns `true` if the number is [subnormal].
345 ///
346 /// ```
347 /// #![feature(f16)]
348 /// # #[cfg(target_has_reliable_f16)] {
349 ///
350 /// let min = f16::MIN_POSITIVE; // 6.1035e-5
351 /// let max = f16::MAX;
352 /// let lower_than_min = 1.0e-7_f16;
353 /// let zero = 0.0_f16;
354 ///
355 /// assert!(!min.is_subnormal());
356 /// assert!(!max.is_subnormal());
357 ///
358 /// assert!(!zero.is_subnormal());
359 /// assert!(!f16::NAN.is_subnormal());
360 /// assert!(!f16::INFINITY.is_subnormal());
361 /// // Values between `0` and `min` are Subnormal.
362 /// assert!(lower_than_min.is_subnormal());
363 /// # }
364 /// ```
365 /// [subnormal]: https://en.wikipedia.org/wiki/Denormal_number
366 #[inline]
367 #[must_use]
368 #[unstable(feature = "f16", issue = "116909")]
369 pub const fn is_subnormal(self) -> bool {
370 matches!(self.classify(), FpCategory::Subnormal)
371 }
372
373 /// Returns `true` if the number is neither zero, infinite, [subnormal], or NaN.
374 ///
375 /// ```
376 /// #![feature(f16)]
377 /// # #[cfg(target_has_reliable_f16)] {
378 ///
379 /// let min = f16::MIN_POSITIVE; // 6.1035e-5
380 /// let max = f16::MAX;
381 /// let lower_than_min = 1.0e-7_f16;
382 /// let zero = 0.0_f16;
383 ///
384 /// assert!(min.is_normal());
385 /// assert!(max.is_normal());
386 ///
387 /// assert!(!zero.is_normal());
388 /// assert!(!f16::NAN.is_normal());
389 /// assert!(!f16::INFINITY.is_normal());
390 /// // Values between `0` and `min` are Subnormal.
391 /// assert!(!lower_than_min.is_normal());
392 /// # }
393 /// ```
394 /// [subnormal]: https://en.wikipedia.org/wiki/Denormal_number
395 #[inline]
396 #[must_use]
397 #[unstable(feature = "f16", issue = "116909")]
398 pub const fn is_normal(self) -> bool {
399 matches!(self.classify(), FpCategory::Normal)
400 }
401
402 /// Returns the floating point category of the number. If only one property
403 /// is going to be tested, it is generally faster to use the specific
404 /// predicate instead.
405 ///
406 /// ```
407 /// #![feature(f16)]
408 /// # #[cfg(target_has_reliable_f16)] {
409 ///
410 /// use std::num::FpCategory;
411 ///
412 /// let num = 12.4_f16;
413 /// let inf = f16::INFINITY;
414 ///
415 /// assert_eq!(num.classify(), FpCategory::Normal);
416 /// assert_eq!(inf.classify(), FpCategory::Infinite);
417 /// # }
418 /// ```
419 #[inline]
420 #[unstable(feature = "f16", issue = "116909")]
421 pub const fn classify(self) -> FpCategory {
422 let b = self.to_bits();
423 match (b & Self::MAN_MASK, b & Self::EXP_MASK) {
424 (0, Self::EXP_MASK) => FpCategory::Infinite,
425 (_, Self::EXP_MASK) => FpCategory::Nan,
426 (0, 0) => FpCategory::Zero,
427 (_, 0) => FpCategory::Subnormal,
428 _ => FpCategory::Normal,
429 }
430 }
431
432 /// Returns `true` if `self` has a positive sign, including `+0.0`, NaNs with
433 /// positive sign bit and positive infinity.
434 ///
435 /// Note that IEEE 754 doesn't assign any meaning to the sign bit in case of
436 /// a NaN, and as Rust doesn't guarantee that the bit pattern of NaNs are
437 /// conserved over arithmetic operations, the result of `is_sign_positive` on
438 /// a NaN might produce an unexpected or non-portable result. See the [specification
439 /// of NaN bit patterns](f32#nan-bit-patterns) for more info. Use `self.signum() == 1.0`
440 /// if you need fully portable behavior (will return `false` for all NaNs).
441 ///
442 /// ```
443 /// #![feature(f16)]
444 /// # #[cfg(target_has_reliable_f16)] {
445 ///
446 /// let f = 7.0_f16;
447 /// let g = -7.0_f16;
448 ///
449 /// assert!(f.is_sign_positive());
450 /// assert!(!g.is_sign_positive());
451 /// # }
452 /// ```
453 #[inline]
454 #[must_use]
455 #[unstable(feature = "f16", issue = "116909")]
456 pub const fn is_sign_positive(self) -> bool {
457 !self.is_sign_negative()
458 }
459
460 /// Returns `true` if `self` has a negative sign, including `-0.0`, NaNs with
461 /// negative sign bit and negative infinity.
462 ///
463 /// Note that IEEE 754 doesn't assign any meaning to the sign bit in case of
464 /// a NaN, and as Rust doesn't guarantee that the bit pattern of NaNs are
465 /// conserved over arithmetic operations, the result of `is_sign_negative` on
466 /// a NaN might produce an unexpected or non-portable result. See the [specification
467 /// of NaN bit patterns](f32#nan-bit-patterns) for more info. Use `self.signum() == -1.0`
468 /// if you need fully portable behavior (will return `false` for all NaNs).
469 ///
470 /// ```
471 /// #![feature(f16)]
472 /// # #[cfg(target_has_reliable_f16)] {
473 ///
474 /// let f = 7.0_f16;
475 /// let g = -7.0_f16;
476 ///
477 /// assert!(!f.is_sign_negative());
478 /// assert!(g.is_sign_negative());
479 /// # }
480 /// ```
481 #[inline]
482 #[must_use]
483 #[unstable(feature = "f16", issue = "116909")]
484 pub const fn is_sign_negative(self) -> bool {
485 // IEEE754 says: isSignMinus(x) is true if and only if x has negative sign. isSignMinus
486 // applies to zeros and NaNs as well.
487 // SAFETY: This is just transmuting to get the sign bit, it's fine.
488 (self.to_bits() & (1 << 15)) != 0
489 }
490
491 /// Returns the least number greater than `self`.
492 ///
493 /// Let `TINY` be the smallest representable positive `f16`. Then,
494 /// - if `self.is_nan()`, this returns `self`;
495 /// - if `self` is [`NEG_INFINITY`], this returns [`MIN`];
496 /// - if `self` is `-TINY`, this returns -0.0;
497 /// - if `self` is -0.0 or +0.0, this returns `TINY`;
498 /// - if `self` is [`MAX`] or [`INFINITY`], this returns [`INFINITY`];
499 /// - otherwise the unique least value greater than `self` is returned.
500 ///
501 /// The identity `x.next_up() == -(-x).next_down()` holds for all non-NaN `x`. When `x`
502 /// is finite `x == x.next_up().next_down()` also holds.
503 ///
504 /// ```rust
505 /// #![feature(f16)]
506 /// # #[cfg(target_has_reliable_f16)] {
507 ///
508 /// // f16::EPSILON is the difference between 1.0 and the next number up.
509 /// assert_eq!(1.0f16.next_up(), 1.0 + f16::EPSILON);
510 /// // But not for most numbers.
511 /// assert!(0.1f16.next_up() < 0.1 + f16::EPSILON);
512 /// assert_eq!(4356f16.next_up(), 4360.0);
513 /// # }
514 /// ```
515 ///
516 /// This operation corresponds to IEEE-754 `nextUp`.
517 ///
518 /// [`NEG_INFINITY`]: Self::NEG_INFINITY
519 /// [`INFINITY`]: Self::INFINITY
520 /// [`MIN`]: Self::MIN
521 /// [`MAX`]: Self::MAX
522 #[inline]
523 #[doc(alias = "nextUp")]
524 #[unstable(feature = "f16", issue = "116909")]
525 pub const fn next_up(self) -> Self {
526 // Some targets violate Rust's assumption of IEEE semantics, e.g. by flushing
527 // denormals to zero. This is in general unsound and unsupported, but here
528 // we do our best to still produce the correct result on such targets.
529 let bits = self.to_bits();
530 if self.is_nan() || bits == Self::INFINITY.to_bits() {
531 return self;
532 }
533
534 let abs = bits & !Self::SIGN_MASK;
535 let next_bits = if abs == 0 {
536 Self::TINY_BITS
537 } else if bits == abs {
538 bits + 1
539 } else {
540 bits - 1
541 };
542 Self::from_bits(next_bits)
543 }
544
545 /// Returns the greatest number less than `self`.
546 ///
547 /// Let `TINY` be the smallest representable positive `f16`. Then,
548 /// - if `self.is_nan()`, this returns `self`;
549 /// - if `self` is [`INFINITY`], this returns [`MAX`];
550 /// - if `self` is `TINY`, this returns 0.0;
551 /// - if `self` is -0.0 or +0.0, this returns `-TINY`;
552 /// - if `self` is [`MIN`] or [`NEG_INFINITY`], this returns [`NEG_INFINITY`];
553 /// - otherwise the unique greatest value less than `self` is returned.
554 ///
555 /// The identity `x.next_down() == -(-x).next_up()` holds for all non-NaN `x`. When `x`
556 /// is finite `x == x.next_down().next_up()` also holds.
557 ///
558 /// ```rust
559 /// #![feature(f16)]
560 /// # #[cfg(target_has_reliable_f16)] {
561 ///
562 /// let x = 1.0f16;
563 /// // Clamp value into range [0, 1).
564 /// let clamped = x.clamp(0.0, 1.0f16.next_down());
565 /// assert!(clamped < 1.0);
566 /// assert_eq!(clamped.next_up(), 1.0);
567 /// # }
568 /// ```
569 ///
570 /// This operation corresponds to IEEE-754 `nextDown`.
571 ///
572 /// [`NEG_INFINITY`]: Self::NEG_INFINITY
573 /// [`INFINITY`]: Self::INFINITY
574 /// [`MIN`]: Self::MIN
575 /// [`MAX`]: Self::MAX
576 #[inline]
577 #[doc(alias = "nextDown")]
578 #[unstable(feature = "f16", issue = "116909")]
579 pub const fn next_down(self) -> Self {
580 // Some targets violate Rust's assumption of IEEE semantics, e.g. by flushing
581 // denormals to zero. This is in general unsound and unsupported, but here
582 // we do our best to still produce the correct result on such targets.
583 let bits = self.to_bits();
584 if self.is_nan() || bits == Self::NEG_INFINITY.to_bits() {
585 return self;
586 }
587
588 let abs = bits & !Self::SIGN_MASK;
589 let next_bits = if abs == 0 {
590 Self::NEG_TINY_BITS
591 } else if bits == abs {
592 bits - 1
593 } else {
594 bits + 1
595 };
596 Self::from_bits(next_bits)
597 }
598
599 /// Takes the reciprocal (inverse) of a number, `1/x`.
600 ///
601 /// ```
602 /// #![feature(f16)]
603 /// # #[cfg(target_has_reliable_f16)] {
604 ///
605 /// let x = 2.0_f16;
606 /// let abs_difference = (x.recip() - (1.0 / x)).abs();
607 ///
608 /// assert!(abs_difference <= f16::EPSILON);
609 /// # }
610 /// ```
611 #[inline]
612 #[unstable(feature = "f16", issue = "116909")]
613 #[must_use = "this returns the result of the operation, without modifying the original"]
614 pub const fn recip(self) -> Self {
615 1.0 / self
616 }
617
618 /// Converts radians to degrees.
619 ///
620 /// # Unspecified precision
621 ///
622 /// The precision of this function is non-deterministic. This means it varies by platform,
623 /// Rust version, and can even differ within the same execution from one invocation to the next.
624 ///
625 /// # Examples
626 ///
627 /// ```
628 /// #![feature(f16)]
629 /// # #[cfg(target_has_reliable_f16)] {
630 ///
631 /// let angle = std::f16::consts::PI;
632 ///
633 /// let abs_difference = (angle.to_degrees() - 180.0).abs();
634 /// assert!(abs_difference <= 0.5);
635 /// # }
636 /// ```
637 #[inline]
638 #[unstable(feature = "f16", issue = "116909")]
639 #[must_use = "this returns the result of the operation, without modifying the original"]
640 pub const fn to_degrees(self) -> Self {
641 // Use a literal to avoid double rounding, consts::PI is already rounded,
642 // and dividing would round again.
643 const PIS_IN_180: f16 = 57.2957795130823208767981548141051703_f16;
644 self * PIS_IN_180
645 }
646
647 /// Converts degrees to radians.
648 ///
649 /// # Unspecified precision
650 ///
651 /// The precision of this function is non-deterministic. This means it varies by platform,
652 /// Rust version, and can even differ within the same execution from one invocation to the next.
653 ///
654 /// # Examples
655 ///
656 /// ```
657 /// #![feature(f16)]
658 /// # #[cfg(target_has_reliable_f16)] {
659 ///
660 /// let angle = 180.0f16;
661 ///
662 /// let abs_difference = (angle.to_radians() - std::f16::consts::PI).abs();
663 ///
664 /// assert!(abs_difference <= 0.01);
665 /// # }
666 /// ```
667 #[inline]
668 #[unstable(feature = "f16", issue = "116909")]
669 #[must_use = "this returns the result of the operation, without modifying the original"]
670 pub const fn to_radians(self) -> f16 {
671 // Use a literal to avoid double rounding, consts::PI is already rounded,
672 // and dividing would round again.
673 const RADS_PER_DEG: f16 = 0.017453292519943295769236907684886_f16;
674 self * RADS_PER_DEG
675 }
676
677 /// Returns the maximum of the two numbers, ignoring NaN.
678 ///
679 /// If exactly one of the arguments is NaN (quiet or signaling), then the other argument is
680 /// returned. If both arguments are NaN, the return value is NaN, with the bit pattern picked
681 /// using the usual [rules for arithmetic operations](f32#nan-bit-patterns). If the inputs
682 /// compare equal (such as for the case of `+0.0` and `-0.0`), either input may be returned
683 /// non-deterministically.
684 ///
685 /// The handling of NaNs follows the IEEE 754-2019 semantics for `maximumNumber`, treating all
686 /// NaNs the same way to ensure the operation is associative. The handling of signed zeros
687 /// follows the IEEE 754-2008 semantics for `maxNum`.
688 ///
689 /// ```
690 /// #![feature(f16)]
691 /// # #[cfg(target_has_reliable_f16)] {
692 ///
693 /// let x = 1.0f16;
694 /// let y = 2.0f16;
695 ///
696 /// assert_eq!(x.max(y), y);
697 /// assert_eq!(x.max(f16::NAN), x);
698 /// # }
699 /// ```
700 #[inline]
701 #[unstable(feature = "f16", issue = "116909")]
702 #[rustc_const_unstable(feature = "f16", issue = "116909")]
703 #[must_use = "this returns the result of the comparison, without modifying either input"]
704 pub const fn max(self, other: f16) -> f16 {
705 intrinsics::maxnumf16(self, other)
706 }
707
708 /// Returns the minimum of the two numbers, ignoring NaN.
709 ///
710 /// If exactly one of the arguments is NaN (quiet or signaling), then the other argument is
711 /// returned. If both arguments are NaN, the return value is NaN, with the bit pattern picked
712 /// using the usual [rules for arithmetic operations](f32#nan-bit-patterns). If the inputs
713 /// compare equal (such as for the case of `+0.0` and `-0.0`), either input may be returned
714 /// non-deterministically.
715 ///
716 /// The handling of NaNs follows the IEEE 754-2019 semantics for `minimumNumber`, treating all
717 /// NaNs the same way to ensure the operation is associative. The handling of signed zeros
718 /// follows the IEEE 754-2008 semantics for `minNum`.
719 ///
720 /// ```
721 /// #![feature(f16)]
722 /// # #[cfg(target_has_reliable_f16)] {
723 ///
724 /// let x = 1.0f16;
725 /// let y = 2.0f16;
726 ///
727 /// assert_eq!(x.min(y), x);
728 /// assert_eq!(x.min(f16::NAN), x);
729 /// # }
730 /// ```
731 #[inline]
732 #[unstable(feature = "f16", issue = "116909")]
733 #[rustc_const_unstable(feature = "f16", issue = "116909")]
734 #[must_use = "this returns the result of the comparison, without modifying either input"]
735 pub const fn min(self, other: f16) -> f16 {
736 intrinsics::minnumf16(self, other)
737 }
738
739 /// Returns the maximum of the two numbers, propagating NaN.
740 ///
741 /// If at least one of the arguments is NaN, the return value is NaN, with the bit pattern
742 /// picked using the usual [rules for arithmetic operations](f32#nan-bit-patterns). Furthermore,
743 /// `-0.0` is considered to be less than `+0.0`, making this function fully deterministic for
744 /// non-NaN inputs.
745 ///
746 /// This is in contrast to [`f16::max`] which only returns NaN when *both* arguments are NaN,
747 /// and which does not reliably order `-0.0` and `+0.0`.
748 ///
749 /// This follows the IEEE 754-2019 semantics for `maximum`.
750 ///
751 /// ```
752 /// #![feature(f16)]
753 /// #![feature(float_minimum_maximum)]
754 /// # #[cfg(target_has_reliable_f16)] {
755 ///
756 /// let x = 1.0f16;
757 /// let y = 2.0f16;
758 ///
759 /// assert_eq!(x.maximum(y), y);
760 /// assert!(x.maximum(f16::NAN).is_nan());
761 /// # }
762 /// ```
763 #[inline]
764 #[unstable(feature = "f16", issue = "116909")]
765 // #[unstable(feature = "float_minimum_maximum", issue = "91079")]
766 #[must_use = "this returns the result of the comparison, without modifying either input"]
767 pub const fn maximum(self, other: f16) -> f16 {
768 intrinsics::maximumf16(self, other)
769 }
770
771 /// Returns the minimum of the two numbers, propagating NaN.
772 ///
773 /// If at least one of the arguments is NaN, the return value is NaN, with the bit pattern
774 /// picked using the usual [rules for arithmetic operations](f32#nan-bit-patterns). Furthermore,
775 /// `-0.0` is considered to be less than `+0.0`, making this function fully deterministic for
776 /// non-NaN inputs.
777 ///
778 /// This is in contrast to [`f16::min`] which only returns NaN when *both* arguments are NaN,
779 /// and which does not reliably order `-0.0` and `+0.0`.
780 ///
781 /// This follows the IEEE 754-2019 semantics for `minimum`.
782 ///
783 /// ```
784 /// #![feature(f16)]
785 /// #![feature(float_minimum_maximum)]
786 /// # #[cfg(target_has_reliable_f16)] {
787 ///
788 /// let x = 1.0f16;
789 /// let y = 2.0f16;
790 ///
791 /// assert_eq!(x.minimum(y), x);
792 /// assert!(x.minimum(f16::NAN).is_nan());
793 /// # }
794 /// ```
795 #[inline]
796 #[unstable(feature = "f16", issue = "116909")]
797 // #[unstable(feature = "float_minimum_maximum", issue = "91079")]
798 #[must_use = "this returns the result of the comparison, without modifying either input"]
799 pub const fn minimum(self, other: f16) -> f16 {
800 intrinsics::minimumf16(self, other)
801 }
802
803 /// Calculates the midpoint (average) between `self` and `rhs`.
804 ///
805 /// This returns NaN when *either* argument is NaN or if a combination of
806 /// +inf and -inf is provided as arguments.
807 ///
808 /// # Examples
809 ///
810 /// ```
811 /// #![feature(f16)]
812 /// # #[cfg(target_has_reliable_f16)] {
813 ///
814 /// assert_eq!(1f16.midpoint(4.0), 2.5);
815 /// assert_eq!((-5.5f16).midpoint(8.0), 1.25);
816 /// # }
817 /// ```
818 #[inline]
819 #[doc(alias = "average")]
820 #[unstable(feature = "f16", issue = "116909")]
821 #[rustc_const_unstable(feature = "f16", issue = "116909")]
822 pub const fn midpoint(self, other: f16) -> f16 {
823 const HI: f16 = f16::MAX / 2.;
824
825 let (a, b) = (self, other);
826 let abs_a = a.abs();
827 let abs_b = b.abs();
828
829 if abs_a <= HI && abs_b <= HI {
830 // Overflow is impossible
831 (a + b) / 2.
832 } else {
833 (a / 2.) + (b / 2.)
834 }
835 }
836
837 /// Rounds toward zero and converts to any primitive integer type,
838 /// assuming that the value is finite and fits in that type.
839 ///
840 /// ```
841 /// #![feature(f16)]
842 /// # #[cfg(target_has_reliable_f16)] {
843 ///
844 /// let value = 4.6_f16;
845 /// let rounded = unsafe { value.to_int_unchecked::<u16>() };
846 /// assert_eq!(rounded, 4);
847 ///
848 /// let value = -128.9_f16;
849 /// let rounded = unsafe { value.to_int_unchecked::<i8>() };
850 /// assert_eq!(rounded, i8::MIN);
851 /// # }
852 /// ```
853 ///
854 /// # Safety
855 ///
856 /// The value must:
857 ///
858 /// * Not be `NaN`
859 /// * Not be infinite
860 /// * Be representable in the return type `Int`, after truncating off its fractional part
861 #[inline]
862 #[unstable(feature = "f16", issue = "116909")]
863 #[must_use = "this returns the result of the operation, without modifying the original"]
864 pub unsafe fn to_int_unchecked<Int>(self) -> Int
865 where
866 Self: FloatToInt<Int>,
867 {
868 // SAFETY: the caller must uphold the safety contract for
869 // `FloatToInt::to_int_unchecked`.
870 unsafe { FloatToInt::<Int>::to_int_unchecked(self) }
871 }
872
873 /// Raw transmutation to `u16`.
874 ///
875 /// This is currently identical to `transmute::<f16, u16>(self)` on all platforms.
876 ///
877 /// See [`from_bits`](#method.from_bits) for some discussion of the
878 /// portability of this operation (there are almost no issues).
879 ///
880 /// Note that this function is distinct from `as` casting, which attempts to
881 /// preserve the *numeric* value, and not the bitwise value.
882 ///
883 /// ```
884 /// #![feature(f16)]
885 /// # #[cfg(target_has_reliable_f16)] {
886 ///
887 /// assert_ne!((1f16).to_bits(), 1f16 as u16); // to_bits() is not casting!
888 /// assert_eq!((12.5f16).to_bits(), 0x4a40);
889 /// # }
890 /// ```
891 #[inline]
892 #[unstable(feature = "f16", issue = "116909")]
893 #[must_use = "this returns the result of the operation, without modifying the original"]
894 #[allow(unnecessary_transmutes)]
895 pub const fn to_bits(self) -> u16 {
896 // SAFETY: `u16` is a plain old datatype so we can always transmute to it.
897 unsafe { mem::transmute(self) }
898 }
899
900 /// Raw transmutation from `u16`.
901 ///
902 /// This is currently identical to `transmute::<u16, f16>(v)` on all platforms.
903 /// It turns out this is incredibly portable, for two reasons:
904 ///
905 /// * Floats and Ints have the same endianness on all supported platforms.
906 /// * IEEE 754 very precisely specifies the bit layout of floats.
907 ///
908 /// However there is one caveat: prior to the 2008 version of IEEE 754, how
909 /// to interpret the NaN signaling bit wasn't actually specified. Most platforms
910 /// (notably x86 and ARM) picked the interpretation that was ultimately
911 /// standardized in 2008, but some didn't (notably MIPS). As a result, all
912 /// signaling NaNs on MIPS are quiet NaNs on x86, and vice-versa.
913 ///
914 /// Rather than trying to preserve signaling-ness cross-platform, this
915 /// implementation favors preserving the exact bits. This means that
916 /// any payloads encoded in NaNs will be preserved even if the result of
917 /// this method is sent over the network from an x86 machine to a MIPS one.
918 ///
919 /// If the results of this method are only manipulated by the same
920 /// architecture that produced them, then there is no portability concern.
921 ///
922 /// If the input isn't NaN, then there is no portability concern.
923 ///
924 /// If you don't care about signalingness (very likely), then there is no
925 /// portability concern.
926 ///
927 /// Note that this function is distinct from `as` casting, which attempts to
928 /// preserve the *numeric* value, and not the bitwise value.
929 ///
930 /// ```
931 /// #![feature(f16)]
932 /// # #[cfg(target_has_reliable_f16)] {
933 ///
934 /// let v = f16::from_bits(0x4a40);
935 /// assert_eq!(v, 12.5);
936 /// # }
937 /// ```
938 #[inline]
939 #[must_use]
940 #[unstable(feature = "f16", issue = "116909")]
941 #[allow(unnecessary_transmutes)]
942 pub const fn from_bits(v: u16) -> Self {
943 // It turns out the safety issues with sNaN were overblown! Hooray!
944 // SAFETY: `u16` is a plain old datatype so we can always transmute from it.
945 unsafe { mem::transmute(v) }
946 }
947
948 /// Returns the memory representation of this floating point number as a byte array in
949 /// big-endian (network) byte order.
950 ///
951 /// See [`from_bits`](Self::from_bits) for some discussion of the
952 /// portability of this operation (there are almost no issues).
953 ///
954 /// # Examples
955 ///
956 /// ```
957 /// #![feature(f16)]
958 /// # #[cfg(target_has_reliable_f16)] {
959 ///
960 /// let bytes = 12.5f16.to_be_bytes();
961 /// assert_eq!(bytes, [0x4a, 0x40]);
962 /// # }
963 /// ```
964 #[inline]
965 #[unstable(feature = "f16", issue = "116909")]
966 #[must_use = "this returns the result of the operation, without modifying the original"]
967 pub const fn to_be_bytes(self) -> [u8; 2] {
968 self.to_bits().to_be_bytes()
969 }
970
971 /// Returns the memory representation of this floating point number as a byte array in
972 /// little-endian byte order.
973 ///
974 /// See [`from_bits`](Self::from_bits) for some discussion of the
975 /// portability of this operation (there are almost no issues).
976 ///
977 /// # Examples
978 ///
979 /// ```
980 /// #![feature(f16)]
981 /// # #[cfg(target_has_reliable_f16)] {
982 ///
983 /// let bytes = 12.5f16.to_le_bytes();
984 /// assert_eq!(bytes, [0x40, 0x4a]);
985 /// # }
986 /// ```
987 #[inline]
988 #[unstable(feature = "f16", issue = "116909")]
989 #[must_use = "this returns the result of the operation, without modifying the original"]
990 pub const fn to_le_bytes(self) -> [u8; 2] {
991 self.to_bits().to_le_bytes()
992 }
993
994 /// Returns the memory representation of this floating point number as a byte array in
995 /// native byte order.
996 ///
997 /// As the target platform's native endianness is used, portable code
998 /// should use [`to_be_bytes`] or [`to_le_bytes`], as appropriate, instead.
999 ///
1000 /// [`to_be_bytes`]: f16::to_be_bytes
1001 /// [`to_le_bytes`]: f16::to_le_bytes
1002 ///
1003 /// See [`from_bits`](Self::from_bits) for some discussion of the
1004 /// portability of this operation (there are almost no issues).
1005 ///
1006 /// # Examples
1007 ///
1008 /// ```
1009 /// #![feature(f16)]
1010 /// # #[cfg(target_has_reliable_f16)] {
1011 ///
1012 /// let bytes = 12.5f16.to_ne_bytes();
1013 /// assert_eq!(
1014 /// bytes,
1015 /// if cfg!(target_endian = "big") {
1016 /// [0x4a, 0x40]
1017 /// } else {
1018 /// [0x40, 0x4a]
1019 /// }
1020 /// );
1021 /// # }
1022 /// ```
1023 #[inline]
1024 #[unstable(feature = "f16", issue = "116909")]
1025 #[must_use = "this returns the result of the operation, without modifying the original"]
1026 pub const fn to_ne_bytes(self) -> [u8; 2] {
1027 self.to_bits().to_ne_bytes()
1028 }
1029
1030 /// Creates a floating point value from its representation as a byte array in big endian.
1031 ///
1032 /// See [`from_bits`](Self::from_bits) for some discussion of the
1033 /// portability of this operation (there are almost no issues).
1034 ///
1035 /// # Examples
1036 ///
1037 /// ```
1038 /// #![feature(f16)]
1039 /// # #[cfg(target_has_reliable_f16)] {
1040 ///
1041 /// let value = f16::from_be_bytes([0x4a, 0x40]);
1042 /// assert_eq!(value, 12.5);
1043 /// # }
1044 /// ```
1045 #[inline]
1046 #[must_use]
1047 #[unstable(feature = "f16", issue = "116909")]
1048 pub const fn from_be_bytes(bytes: [u8; 2]) -> Self {
1049 Self::from_bits(u16::from_be_bytes(bytes))
1050 }
1051
1052 /// Creates a floating point value from its representation as a byte array in little endian.
1053 ///
1054 /// See [`from_bits`](Self::from_bits) for some discussion of the
1055 /// portability of this operation (there are almost no issues).
1056 ///
1057 /// # Examples
1058 ///
1059 /// ```
1060 /// #![feature(f16)]
1061 /// # #[cfg(target_has_reliable_f16)] {
1062 ///
1063 /// let value = f16::from_le_bytes([0x40, 0x4a]);
1064 /// assert_eq!(value, 12.5);
1065 /// # }
1066 /// ```
1067 #[inline]
1068 #[must_use]
1069 #[unstable(feature = "f16", issue = "116909")]
1070 pub const fn from_le_bytes(bytes: [u8; 2]) -> Self {
1071 Self::from_bits(u16::from_le_bytes(bytes))
1072 }
1073
1074 /// Creates a floating point value from its representation as a byte array in native endian.
1075 ///
1076 /// As the target platform's native endianness is used, portable code
1077 /// likely wants to use [`from_be_bytes`] or [`from_le_bytes`], as
1078 /// appropriate instead.
1079 ///
1080 /// [`from_be_bytes`]: f16::from_be_bytes
1081 /// [`from_le_bytes`]: f16::from_le_bytes
1082 ///
1083 /// See [`from_bits`](Self::from_bits) for some discussion of the
1084 /// portability of this operation (there are almost no issues).
1085 ///
1086 /// # Examples
1087 ///
1088 /// ```
1089 /// #![feature(f16)]
1090 /// # #[cfg(target_has_reliable_f16)] {
1091 ///
1092 /// let value = f16::from_ne_bytes(if cfg!(target_endian = "big") {
1093 /// [0x4a, 0x40]
1094 /// } else {
1095 /// [0x40, 0x4a]
1096 /// });
1097 /// assert_eq!(value, 12.5);
1098 /// # }
1099 /// ```
1100 #[inline]
1101 #[must_use]
1102 #[unstable(feature = "f16", issue = "116909")]
1103 pub const fn from_ne_bytes(bytes: [u8; 2]) -> Self {
1104 Self::from_bits(u16::from_ne_bytes(bytes))
1105 }
1106
1107 /// Returns the ordering between `self` and `other`.
1108 ///
1109 /// Unlike the standard partial comparison between floating point numbers,
1110 /// this comparison always produces an ordering in accordance to
1111 /// the `totalOrder` predicate as defined in the IEEE 754 (2008 revision)
1112 /// floating point standard. The values are ordered in the following sequence:
1113 ///
1114 /// - negative quiet NaN
1115 /// - negative signaling NaN
1116 /// - negative infinity
1117 /// - negative numbers
1118 /// - negative subnormal numbers
1119 /// - negative zero
1120 /// - positive zero
1121 /// - positive subnormal numbers
1122 /// - positive numbers
1123 /// - positive infinity
1124 /// - positive signaling NaN
1125 /// - positive quiet NaN.
1126 ///
1127 /// The ordering established by this function does not always agree with the
1128 /// [`PartialOrd`] and [`PartialEq`] implementations of `f16`. For example,
1129 /// they consider negative and positive zero equal, while `total_cmp`
1130 /// doesn't.
1131 ///
1132 /// The interpretation of the signaling NaN bit follows the definition in
1133 /// the IEEE 754 standard, which may not match the interpretation by some of
1134 /// the older, non-conformant (e.g. MIPS) hardware implementations.
1135 ///
1136 /// # Example
1137 ///
1138 /// ```
1139 /// #![feature(f16)]
1140 /// # #[cfg(target_has_reliable_f16)] {
1141 ///
1142 /// struct GoodBoy {
1143 /// name: &'static str,
1144 /// weight: f16,
1145 /// }
1146 ///
1147 /// let mut bois = vec![
1148 /// GoodBoy { name: "Pucci", weight: 0.1 },
1149 /// GoodBoy { name: "Woofer", weight: 99.0 },
1150 /// GoodBoy { name: "Yapper", weight: 10.0 },
1151 /// GoodBoy { name: "Chonk", weight: f16::INFINITY },
1152 /// GoodBoy { name: "Abs. Unit", weight: f16::NAN },
1153 /// GoodBoy { name: "Floaty", weight: -5.0 },
1154 /// ];
1155 ///
1156 /// bois.sort_by(|a, b| a.weight.total_cmp(&b.weight));
1157 ///
1158 /// // `f16::NAN` could be positive or negative, which will affect the sort order.
1159 /// if f16::NAN.is_sign_negative() {
1160 /// bois.into_iter().map(|b| b.weight)
1161 /// .zip([f16::NAN, -5.0, 0.1, 10.0, 99.0, f16::INFINITY].iter())
1162 /// .for_each(|(a, b)| assert_eq!(a.to_bits(), b.to_bits()))
1163 /// } else {
1164 /// bois.into_iter().map(|b| b.weight)
1165 /// .zip([-5.0, 0.1, 10.0, 99.0, f16::INFINITY, f16::NAN].iter())
1166 /// .for_each(|(a, b)| assert_eq!(a.to_bits(), b.to_bits()))
1167 /// }
1168 /// # }
1169 /// ```
1170 #[inline]
1171 #[must_use]
1172 #[unstable(feature = "f16", issue = "116909")]
1173 #[rustc_const_unstable(feature = "const_cmp", issue = "143800")]
1174 pub const fn total_cmp(&self, other: &Self) -> crate::cmp::Ordering {
1175 let mut left = self.to_bits() as i16;
1176 let mut right = other.to_bits() as i16;
1177
1178 // In case of negatives, flip all the bits except the sign
1179 // to achieve a similar layout as two's complement integers
1180 //
1181 // Why does this work? IEEE 754 floats consist of three fields:
1182 // Sign bit, exponent and mantissa. The set of exponent and mantissa
1183 // fields as a whole have the property that their bitwise order is
1184 // equal to the numeric magnitude where the magnitude is defined.
1185 // The magnitude is not normally defined on NaN values, but
1186 // IEEE 754 totalOrder defines the NaN values also to follow the
1187 // bitwise order. This leads to order explained in the doc comment.
1188 // However, the representation of magnitude is the same for negative
1189 // and positive numbers – only the sign bit is different.
1190 // To easily compare the floats as signed integers, we need to
1191 // flip the exponent and mantissa bits in case of negative numbers.
1192 // We effectively convert the numbers to "two's complement" form.
1193 //
1194 // To do the flipping, we construct a mask and XOR against it.
1195 // We branchlessly calculate an "all-ones except for the sign bit"
1196 // mask from negative-signed values: right shifting sign-extends
1197 // the integer, so we "fill" the mask with sign bits, and then
1198 // convert to unsigned to push one more zero bit.
1199 // On positive values, the mask is all zeros, so it's a no-op.
1200 left ^= (((left >> 15) as u16) >> 1) as i16;
1201 right ^= (((right >> 15) as u16) >> 1) as i16;
1202
1203 left.cmp(&right)
1204 }
1205
1206 /// Restrict a value to a certain interval unless it is NaN.
1207 ///
1208 /// Returns `max` if `self` is greater than `max`, and `min` if `self` is
1209 /// less than `min`. Otherwise this returns `self`.
1210 ///
1211 /// Note that this function returns NaN if the initial value was NaN as
1212 /// well. If the result is zero and among the three inputs `self`, `min`, and `max` there are
1213 /// zeros with different sign, either `0.0` or `-0.0` is returned non-deterministically.
1214 ///
1215 /// # Panics
1216 ///
1217 /// Panics if `min > max`, `min` is NaN, or `max` is NaN.
1218 ///
1219 /// # Examples
1220 ///
1221 /// ```
1222 /// #![feature(f16)]
1223 /// # #[cfg(target_has_reliable_f16)] {
1224 ///
1225 /// assert!((-3.0f16).clamp(-2.0, 1.0) == -2.0);
1226 /// assert!((0.0f16).clamp(-2.0, 1.0) == 0.0);
1227 /// assert!((2.0f16).clamp(-2.0, 1.0) == 1.0);
1228 /// assert!((f16::NAN).clamp(-2.0, 1.0).is_nan());
1229 ///
1230 /// // These always returns zero, but the sign (which is ignored by `==`) is non-deterministic.
1231 /// assert!((0.0f16).clamp(-0.0, -0.0) == 0.0);
1232 /// assert!((1.0f16).clamp(-0.0, 0.0) == 0.0);
1233 /// // This is definitely a negative zero.
1234 /// assert!((-1.0f16).clamp(-0.0, 1.0).is_sign_negative());
1235 /// # }
1236 /// ```
1237 #[inline]
1238 #[unstable(feature = "f16", issue = "116909")]
1239 #[must_use = "method returns a new number and does not mutate the original value"]
1240 pub const fn clamp(mut self, min: f16, max: f16) -> f16 {
1241 const_assert!(
1242 min <= max,
1243 "min > max, or either was NaN",
1244 "min > max, or either was NaN. min = {min:?}, max = {max:?}",
1245 min: f16,
1246 max: f16,
1247 );
1248
1249 if self < min {
1250 self = min;
1251 }
1252 if self > max {
1253 self = max;
1254 }
1255 self
1256 }
1257
1258 /// Clamps this number to a symmetric range centered around zero.
1259 ///
1260 /// The method clamps the number's magnitude (absolute value) to be at most `limit`.
1261 ///
1262 /// This is functionally equivalent to `self.clamp(-limit, limit)`, but is more
1263 /// explicit about the intent.
1264 ///
1265 /// # Panics
1266 ///
1267 /// Panics if `limit` is negative or NaN, as this indicates a logic error.
1268 ///
1269 /// # Examples
1270 ///
1271 /// ```
1272 /// #![feature(f16)]
1273 /// #![feature(clamp_magnitude)]
1274 /// # #[cfg(target_has_reliable_f16)] {
1275 /// assert_eq!(5.0f16.clamp_magnitude(3.0), 3.0);
1276 /// assert_eq!((-5.0f16).clamp_magnitude(3.0), -3.0);
1277 /// assert_eq!(2.0f16.clamp_magnitude(3.0), 2.0);
1278 /// assert_eq!((-2.0f16).clamp_magnitude(3.0), -2.0);
1279 /// # }
1280 /// ```
1281 #[inline]
1282 #[unstable(feature = "clamp_magnitude", issue = "148519")]
1283 #[must_use = "this returns the clamped value and does not modify the original"]
1284 pub fn clamp_magnitude(self, limit: f16) -> f16 {
1285 assert!(limit >= 0.0, "limit must be non-negative");
1286 let limit = limit.abs(); // Canonicalises -0.0 to 0.0
1287 self.clamp(-limit, limit)
1288 }
1289
1290 /// Computes the absolute value of `self`.
1291 ///
1292 /// This function always returns the precise result.
1293 ///
1294 /// # Examples
1295 ///
1296 /// ```
1297 /// #![feature(f16)]
1298 /// # #[cfg(target_has_reliable_f16_math)] {
1299 ///
1300 /// let x = 3.5_f16;
1301 /// let y = -3.5_f16;
1302 ///
1303 /// assert_eq!(x.abs(), x);
1304 /// assert_eq!(y.abs(), -y);
1305 ///
1306 /// assert!(f16::NAN.abs().is_nan());
1307 /// # }
1308 /// ```
1309 #[inline]
1310 #[unstable(feature = "f16", issue = "116909")]
1311 #[rustc_const_unstable(feature = "f16", issue = "116909")]
1312 #[must_use = "method returns a new number and does not mutate the original value"]
1313 pub const fn abs(self) -> Self {
1314 intrinsics::fabsf16(self)
1315 }
1316
1317 /// Returns a number that represents the sign of `self`.
1318 ///
1319 /// - `1.0` if the number is positive, `+0.0` or `INFINITY`
1320 /// - `-1.0` if the number is negative, `-0.0` or `NEG_INFINITY`
1321 /// - NaN if the number is NaN
1322 ///
1323 /// # Examples
1324 ///
1325 /// ```
1326 /// #![feature(f16)]
1327 /// # #[cfg(target_has_reliable_f16)] {
1328 ///
1329 /// let f = 3.5_f16;
1330 ///
1331 /// assert_eq!(f.signum(), 1.0);
1332 /// assert_eq!(f16::NEG_INFINITY.signum(), -1.0);
1333 ///
1334 /// assert!(f16::NAN.signum().is_nan());
1335 /// # }
1336 /// ```
1337 #[inline]
1338 #[unstable(feature = "f16", issue = "116909")]
1339 #[rustc_const_unstable(feature = "f16", issue = "116909")]
1340 #[must_use = "method returns a new number and does not mutate the original value"]
1341 pub const fn signum(self) -> f16 {
1342 if self.is_nan() { Self::NAN } else { 1.0_f16.copysign(self) }
1343 }
1344
1345 /// Returns a number composed of the magnitude of `self` and the sign of
1346 /// `sign`.
1347 ///
1348 /// Equal to `self` if the sign of `self` and `sign` are the same, otherwise equal to `-self`.
1349 /// If `self` is a NaN, then a NaN with the same payload as `self` and the sign bit of `sign` is
1350 /// returned.
1351 ///
1352 /// If `sign` is a NaN, then this operation will still carry over its sign into the result. Note
1353 /// that IEEE 754 doesn't assign any meaning to the sign bit in case of a NaN, and as Rust
1354 /// doesn't guarantee that the bit pattern of NaNs are conserved over arithmetic operations, the
1355 /// result of `copysign` with `sign` being a NaN might produce an unexpected or non-portable
1356 /// result. See the [specification of NaN bit patterns](primitive@f32#nan-bit-patterns) for more
1357 /// info.
1358 ///
1359 /// # Examples
1360 ///
1361 /// ```
1362 /// #![feature(f16)]
1363 /// # #[cfg(target_has_reliable_f16_math)] {
1364 ///
1365 /// let f = 3.5_f16;
1366 ///
1367 /// assert_eq!(f.copysign(0.42), 3.5_f16);
1368 /// assert_eq!(f.copysign(-0.42), -3.5_f16);
1369 /// assert_eq!((-f).copysign(0.42), 3.5_f16);
1370 /// assert_eq!((-f).copysign(-0.42), -3.5_f16);
1371 ///
1372 /// assert!(f16::NAN.copysign(1.0).is_nan());
1373 /// # }
1374 /// ```
1375 #[inline]
1376 #[unstable(feature = "f16", issue = "116909")]
1377 #[rustc_const_unstable(feature = "f16", issue = "116909")]
1378 #[must_use = "method returns a new number and does not mutate the original value"]
1379 pub const fn copysign(self, sign: f16) -> f16 {
1380 intrinsics::copysignf16(self, sign)
1381 }
1382
1383 /// Float addition that allows optimizations based on algebraic rules.
1384 ///
1385 /// See [algebraic operators](primitive@f32#algebraic-operators) for more info.
1386 #[must_use = "method returns a new number and does not mutate the original value"]
1387 #[unstable(feature = "float_algebraic", issue = "136469")]
1388 #[rustc_const_unstable(feature = "float_algebraic", issue = "136469")]
1389 #[inline]
1390 pub const fn algebraic_add(self, rhs: f16) -> f16 {
1391 intrinsics::fadd_algebraic(self, rhs)
1392 }
1393
1394 /// Float subtraction that allows optimizations based on algebraic rules.
1395 ///
1396 /// See [algebraic operators](primitive@f32#algebraic-operators) for more info.
1397 #[must_use = "method returns a new number and does not mutate the original value"]
1398 #[unstable(feature = "float_algebraic", issue = "136469")]
1399 #[rustc_const_unstable(feature = "float_algebraic", issue = "136469")]
1400 #[inline]
1401 pub const fn algebraic_sub(self, rhs: f16) -> f16 {
1402 intrinsics::fsub_algebraic(self, rhs)
1403 }
1404
1405 /// Float multiplication that allows optimizations based on algebraic rules.
1406 ///
1407 /// See [algebraic operators](primitive@f32#algebraic-operators) for more info.
1408 #[must_use = "method returns a new number and does not mutate the original value"]
1409 #[unstable(feature = "float_algebraic", issue = "136469")]
1410 #[rustc_const_unstable(feature = "float_algebraic", issue = "136469")]
1411 #[inline]
1412 pub const fn algebraic_mul(self, rhs: f16) -> f16 {
1413 intrinsics::fmul_algebraic(self, rhs)
1414 }
1415
1416 /// Float division that allows optimizations based on algebraic rules.
1417 ///
1418 /// See [algebraic operators](primitive@f32#algebraic-operators) for more info.
1419 #[must_use = "method returns a new number and does not mutate the original value"]
1420 #[unstable(feature = "float_algebraic", issue = "136469")]
1421 #[rustc_const_unstable(feature = "float_algebraic", issue = "136469")]
1422 #[inline]
1423 pub const fn algebraic_div(self, rhs: f16) -> f16 {
1424 intrinsics::fdiv_algebraic(self, rhs)
1425 }
1426
1427 /// Float remainder that allows optimizations based on algebraic rules.
1428 ///
1429 /// See [algebraic operators](primitive@f32#algebraic-operators) for more info.
1430 #[must_use = "method returns a new number and does not mutate the original value"]
1431 #[unstable(feature = "float_algebraic", issue = "136469")]
1432 #[rustc_const_unstable(feature = "float_algebraic", issue = "136469")]
1433 #[inline]
1434 pub const fn algebraic_rem(self, rhs: f16) -> f16 {
1435 intrinsics::frem_algebraic(self, rhs)
1436 }
1437}
1438
1439// Functions in this module fall into `core_float_math`
1440// #[unstable(feature = "core_float_math", issue = "137578")]
1441#[cfg(not(test))]
1442#[doc(test(attr(feature(cfg_target_has_reliable_f16_f128), expect(internal_features))))]
1443impl f16 {
1444 /// Returns the largest integer less than or equal to `self`.
1445 ///
1446 /// This function always returns the precise result.
1447 ///
1448 /// # Examples
1449 ///
1450 /// ```
1451 /// #![feature(f16)]
1452 /// # #[cfg(not(miri))]
1453 /// # #[cfg(target_has_reliable_f16)] {
1454 ///
1455 /// let f = 3.7_f16;
1456 /// let g = 3.0_f16;
1457 /// let h = -3.7_f16;
1458 ///
1459 /// assert_eq!(f.floor(), 3.0);
1460 /// assert_eq!(g.floor(), 3.0);
1461 /// assert_eq!(h.floor(), -4.0);
1462 /// # }
1463 /// ```
1464 #[inline]
1465 #[rustc_allow_incoherent_impl]
1466 #[unstable(feature = "f16", issue = "116909")]
1467 #[rustc_const_unstable(feature = "f16", issue = "116909")]
1468 #[must_use = "method returns a new number and does not mutate the original value"]
1469 pub const fn floor(self) -> f16 {
1470 intrinsics::floorf16(self)
1471 }
1472
1473 /// Returns the smallest integer greater than or equal to `self`.
1474 ///
1475 /// This function always returns the precise result.
1476 ///
1477 /// # Examples
1478 ///
1479 /// ```
1480 /// #![feature(f16)]
1481 /// # #[cfg(not(miri))]
1482 /// # #[cfg(target_has_reliable_f16)] {
1483 ///
1484 /// let f = 3.01_f16;
1485 /// let g = 4.0_f16;
1486 ///
1487 /// assert_eq!(f.ceil(), 4.0);
1488 /// assert_eq!(g.ceil(), 4.0);
1489 /// # }
1490 /// ```
1491 #[inline]
1492 #[doc(alias = "ceiling")]
1493 #[rustc_allow_incoherent_impl]
1494 #[unstable(feature = "f16", issue = "116909")]
1495 #[rustc_const_unstable(feature = "f16", issue = "116909")]
1496 #[must_use = "method returns a new number and does not mutate the original value"]
1497 pub const fn ceil(self) -> f16 {
1498 intrinsics::ceilf16(self)
1499 }
1500
1501 /// Returns the nearest integer to `self`. If a value is half-way between two
1502 /// integers, round away from `0.0`.
1503 ///
1504 /// This function always returns the precise result.
1505 ///
1506 /// # Examples
1507 ///
1508 /// ```
1509 /// #![feature(f16)]
1510 /// # #[cfg(not(miri))]
1511 /// # #[cfg(target_has_reliable_f16)] {
1512 ///
1513 /// let f = 3.3_f16;
1514 /// let g = -3.3_f16;
1515 /// let h = -3.7_f16;
1516 /// let i = 3.5_f16;
1517 /// let j = 4.5_f16;
1518 ///
1519 /// assert_eq!(f.round(), 3.0);
1520 /// assert_eq!(g.round(), -3.0);
1521 /// assert_eq!(h.round(), -4.0);
1522 /// assert_eq!(i.round(), 4.0);
1523 /// assert_eq!(j.round(), 5.0);
1524 /// # }
1525 /// ```
1526 #[inline]
1527 #[rustc_allow_incoherent_impl]
1528 #[unstable(feature = "f16", issue = "116909")]
1529 #[rustc_const_unstable(feature = "f16", issue = "116909")]
1530 #[must_use = "method returns a new number and does not mutate the original value"]
1531 pub const fn round(self) -> f16 {
1532 intrinsics::roundf16(self)
1533 }
1534
1535 /// Returns the nearest integer to a number. Rounds half-way cases to the number
1536 /// with an even least significant digit.
1537 ///
1538 /// This function always returns the precise result.
1539 ///
1540 /// # Examples
1541 ///
1542 /// ```
1543 /// #![feature(f16)]
1544 /// # #[cfg(not(miri))]
1545 /// # #[cfg(target_has_reliable_f16)] {
1546 ///
1547 /// let f = 3.3_f16;
1548 /// let g = -3.3_f16;
1549 /// let h = 3.5_f16;
1550 /// let i = 4.5_f16;
1551 ///
1552 /// assert_eq!(f.round_ties_even(), 3.0);
1553 /// assert_eq!(g.round_ties_even(), -3.0);
1554 /// assert_eq!(h.round_ties_even(), 4.0);
1555 /// assert_eq!(i.round_ties_even(), 4.0);
1556 /// # }
1557 /// ```
1558 #[inline]
1559 #[rustc_allow_incoherent_impl]
1560 #[unstable(feature = "f16", issue = "116909")]
1561 #[rustc_const_unstable(feature = "f16", issue = "116909")]
1562 #[must_use = "method returns a new number and does not mutate the original value"]
1563 pub const fn round_ties_even(self) -> f16 {
1564 intrinsics::round_ties_even_f16(self)
1565 }
1566
1567 /// Returns the integer part of `self`.
1568 /// This means that non-integer numbers are always truncated towards zero.
1569 ///
1570 /// This function always returns the precise result.
1571 ///
1572 /// # Examples
1573 ///
1574 /// ```
1575 /// #![feature(f16)]
1576 /// # #[cfg(not(miri))]
1577 /// # #[cfg(target_has_reliable_f16)] {
1578 ///
1579 /// let f = 3.7_f16;
1580 /// let g = 3.0_f16;
1581 /// let h = -3.7_f16;
1582 ///
1583 /// assert_eq!(f.trunc(), 3.0);
1584 /// assert_eq!(g.trunc(), 3.0);
1585 /// assert_eq!(h.trunc(), -3.0);
1586 /// # }
1587 /// ```
1588 #[inline]
1589 #[doc(alias = "truncate")]
1590 #[rustc_allow_incoherent_impl]
1591 #[unstable(feature = "f16", issue = "116909")]
1592 #[rustc_const_unstable(feature = "f16", issue = "116909")]
1593 #[must_use = "method returns a new number and does not mutate the original value"]
1594 pub const fn trunc(self) -> f16 {
1595 intrinsics::truncf16(self)
1596 }
1597
1598 /// Returns the fractional part of `self`.
1599 ///
1600 /// This function always returns the precise result.
1601 ///
1602 /// # Examples
1603 ///
1604 /// ```
1605 /// #![feature(f16)]
1606 /// # #[cfg(not(miri))]
1607 /// # #[cfg(target_has_reliable_f16)] {
1608 ///
1609 /// let x = 3.6_f16;
1610 /// let y = -3.6_f16;
1611 /// let abs_difference_x = (x.fract() - 0.6).abs();
1612 /// let abs_difference_y = (y.fract() - (-0.6)).abs();
1613 ///
1614 /// assert!(abs_difference_x <= f16::EPSILON);
1615 /// assert!(abs_difference_y <= f16::EPSILON);
1616 /// # }
1617 /// ```
1618 #[inline]
1619 #[rustc_allow_incoherent_impl]
1620 #[unstable(feature = "f16", issue = "116909")]
1621 #[rustc_const_unstable(feature = "f16", issue = "116909")]
1622 #[must_use = "method returns a new number and does not mutate the original value"]
1623 pub const fn fract(self) -> f16 {
1624 self - self.trunc()
1625 }
1626
1627 /// Fused multiply-add. Computes `(self * a) + b` with only one rounding
1628 /// error, yielding a more accurate result than an unfused multiply-add.
1629 ///
1630 /// Using `mul_add` *may* be more performant than an unfused multiply-add if
1631 /// the target architecture has a dedicated `fma` CPU instruction. However,
1632 /// this is not always true, and will be heavily dependant on designing
1633 /// algorithms with specific target hardware in mind.
1634 ///
1635 /// # Precision
1636 ///
1637 /// The result of this operation is guaranteed to be the rounded
1638 /// infinite-precision result. It is specified by IEEE 754 as
1639 /// `fusedMultiplyAdd` and guaranteed not to change.
1640 ///
1641 /// # Examples
1642 ///
1643 /// ```
1644 /// #![feature(f16)]
1645 /// # #[cfg(not(miri))]
1646 /// # #[cfg(target_has_reliable_f16)] {
1647 ///
1648 /// let m = 10.0_f16;
1649 /// let x = 4.0_f16;
1650 /// let b = 60.0_f16;
1651 ///
1652 /// assert_eq!(m.mul_add(x, b), 100.0);
1653 /// assert_eq!(m * x + b, 100.0);
1654 ///
1655 /// let one_plus_eps = 1.0_f16 + f16::EPSILON;
1656 /// let one_minus_eps = 1.0_f16 - f16::EPSILON;
1657 /// let minus_one = -1.0_f16;
1658 ///
1659 /// // The exact result (1 + eps) * (1 - eps) = 1 - eps * eps.
1660 /// assert_eq!(one_plus_eps.mul_add(one_minus_eps, minus_one), -f16::EPSILON * f16::EPSILON);
1661 /// // Different rounding with the non-fused multiply and add.
1662 /// assert_eq!(one_plus_eps * one_minus_eps + minus_one, 0.0);
1663 /// # }
1664 /// ```
1665 #[inline]
1666 #[rustc_allow_incoherent_impl]
1667 #[unstable(feature = "f16", issue = "116909")]
1668 #[doc(alias = "fmaf16", alias = "fusedMultiplyAdd")]
1669 #[must_use = "method returns a new number and does not mutate the original value"]
1670 pub const fn mul_add(self, a: f16, b: f16) -> f16 {
1671 intrinsics::fmaf16(self, a, b)
1672 }
1673
1674 /// Calculates Euclidean division, the matching method for `rem_euclid`.
1675 ///
1676 /// This computes the integer `n` such that
1677 /// `self = n * rhs + self.rem_euclid(rhs)`.
1678 /// In other words, the result is `self / rhs` rounded to the integer `n`
1679 /// such that `self >= n * rhs`.
1680 ///
1681 /// # Precision
1682 ///
1683 /// The result of this operation is guaranteed to be the rounded
1684 /// infinite-precision result.
1685 ///
1686 /// # Examples
1687 ///
1688 /// ```
1689 /// #![feature(f16)]
1690 /// # #[cfg(not(miri))]
1691 /// # #[cfg(target_has_reliable_f16)] {
1692 ///
1693 /// let a: f16 = 7.0;
1694 /// let b = 4.0;
1695 /// assert_eq!(a.div_euclid(b), 1.0); // 7.0 > 4.0 * 1.0
1696 /// assert_eq!((-a).div_euclid(b), -2.0); // -7.0 >= 4.0 * -2.0
1697 /// assert_eq!(a.div_euclid(-b), -1.0); // 7.0 >= -4.0 * -1.0
1698 /// assert_eq!((-a).div_euclid(-b), 2.0); // -7.0 >= -4.0 * 2.0
1699 /// # }
1700 /// ```
1701 #[inline]
1702 #[rustc_allow_incoherent_impl]
1703 #[unstable(feature = "f16", issue = "116909")]
1704 #[must_use = "method returns a new number and does not mutate the original value"]
1705 pub fn div_euclid(self, rhs: f16) -> f16 {
1706 let q = (self / rhs).trunc();
1707 if self % rhs < 0.0 {
1708 return if rhs > 0.0 { q - 1.0 } else { q + 1.0 };
1709 }
1710 q
1711 }
1712
1713 /// Calculates the least nonnegative remainder of `self` when
1714 /// divided by `rhs`.
1715 ///
1716 /// In particular, the return value `r` satisfies `0.0 <= r < rhs.abs()` in
1717 /// most cases. However, due to a floating point round-off error it can
1718 /// result in `r == rhs.abs()`, violating the mathematical definition, if
1719 /// `self` is much smaller than `rhs.abs()` in magnitude and `self < 0.0`.
1720 /// This result is not an element of the function's codomain, but it is the
1721 /// closest floating point number in the real numbers and thus fulfills the
1722 /// property `self == self.div_euclid(rhs) * rhs + self.rem_euclid(rhs)`
1723 /// approximately.
1724 ///
1725 /// # Precision
1726 ///
1727 /// The result of this operation is guaranteed to be the rounded
1728 /// infinite-precision result.
1729 ///
1730 /// # Examples
1731 ///
1732 /// ```
1733 /// #![feature(f16)]
1734 /// # #[cfg(not(miri))]
1735 /// # #[cfg(target_has_reliable_f16)] {
1736 ///
1737 /// let a: f16 = 7.0;
1738 /// let b = 4.0;
1739 /// assert_eq!(a.rem_euclid(b), 3.0);
1740 /// assert_eq!((-a).rem_euclid(b), 1.0);
1741 /// assert_eq!(a.rem_euclid(-b), 3.0);
1742 /// assert_eq!((-a).rem_euclid(-b), 1.0);
1743 /// // limitation due to round-off error
1744 /// assert!((-f16::EPSILON).rem_euclid(3.0) != 0.0);
1745 /// # }
1746 /// ```
1747 #[inline]
1748 #[rustc_allow_incoherent_impl]
1749 #[doc(alias = "modulo", alias = "mod")]
1750 #[unstable(feature = "f16", issue = "116909")]
1751 #[must_use = "method returns a new number and does not mutate the original value"]
1752 pub fn rem_euclid(self, rhs: f16) -> f16 {
1753 let r = self % rhs;
1754 if r < 0.0 { r + rhs.abs() } else { r }
1755 }
1756
1757 /// Raises a number to an integer power.
1758 ///
1759 /// Using this function is generally faster than using `powf`.
1760 /// It might have a different sequence of rounding operations than `powf`,
1761 /// so the results are not guaranteed to agree.
1762 ///
1763 /// Note that this function is special in that it can return non-NaN results for NaN inputs. For
1764 /// example, `f16::powi(f16::NAN, 0)` returns `1.0`. However, if an input is a *signaling*
1765 /// NaN, then the result is non-deterministically either a NaN or the result that the
1766 /// corresponding quiet NaN would produce.
1767 ///
1768 /// # Unspecified precision
1769 ///
1770 /// The precision of this function is non-deterministic. This means it varies by platform,
1771 /// Rust version, and can even differ within the same execution from one invocation to the next.
1772 ///
1773 /// # Examples
1774 ///
1775 /// ```
1776 /// #![feature(f16)]
1777 /// # #[cfg(not(miri))]
1778 /// # #[cfg(target_has_reliable_f16)] {
1779 ///
1780 /// let x = 2.0_f16;
1781 /// let abs_difference = (x.powi(2) - (x * x)).abs();
1782 /// assert!(abs_difference <= f16::EPSILON);
1783 ///
1784 /// assert_eq!(f16::powi(f16::NAN, 0), 1.0);
1785 /// assert_eq!(f16::powi(0.0, 0), 1.0);
1786 /// # }
1787 /// ```
1788 #[inline]
1789 #[rustc_allow_incoherent_impl]
1790 #[unstable(feature = "f16", issue = "116909")]
1791 #[must_use = "method returns a new number and does not mutate the original value"]
1792 pub fn powi(self, n: i32) -> f16 {
1793 intrinsics::powif16(self, n)
1794 }
1795
1796 /// Returns the square root of a number.
1797 ///
1798 /// Returns NaN if `self` is a negative number other than `-0.0`.
1799 ///
1800 /// # Precision
1801 ///
1802 /// The result of this operation is guaranteed to be the rounded
1803 /// infinite-precision result. It is specified by IEEE 754 as `squareRoot`
1804 /// and guaranteed not to change.
1805 ///
1806 /// # Examples
1807 ///
1808 /// ```
1809 /// #![feature(f16)]
1810 /// # #[cfg(not(miri))]
1811 /// # #[cfg(target_has_reliable_f16)] {
1812 ///
1813 /// let positive = 4.0_f16;
1814 /// let negative = -4.0_f16;
1815 /// let negative_zero = -0.0_f16;
1816 ///
1817 /// assert_eq!(positive.sqrt(), 2.0);
1818 /// assert!(negative.sqrt().is_nan());
1819 /// assert!(negative_zero.sqrt() == negative_zero);
1820 /// # }
1821 /// ```
1822 #[inline]
1823 #[doc(alias = "squareRoot")]
1824 #[rustc_allow_incoherent_impl]
1825 #[unstable(feature = "f16", issue = "116909")]
1826 #[must_use = "method returns a new number and does not mutate the original value"]
1827 pub fn sqrt(self) -> f16 {
1828 intrinsics::sqrtf16(self)
1829 }
1830
1831 /// Returns the cube root of a number.
1832 ///
1833 /// # Unspecified precision
1834 ///
1835 /// The precision of this function is non-deterministic. This means it varies by platform,
1836 /// Rust version, and can even differ within the same execution from one invocation to the next.
1837 ///
1838 /// This function currently corresponds to the `cbrtf` from libc on Unix
1839 /// and Windows. Note that this might change in the future.
1840 ///
1841 /// # Examples
1842 ///
1843 /// ```
1844 /// #![feature(f16)]
1845 /// # #[cfg(not(miri))]
1846 /// # #[cfg(target_has_reliable_f16)] {
1847 ///
1848 /// let x = 8.0f16;
1849 ///
1850 /// // x^(1/3) - 2 == 0
1851 /// let abs_difference = (x.cbrt() - 2.0).abs();
1852 ///
1853 /// assert!(abs_difference <= f16::EPSILON);
1854 /// # }
1855 /// ```
1856 #[inline]
1857 #[rustc_allow_incoherent_impl]
1858 #[unstable(feature = "f16", issue = "116909")]
1859 #[must_use = "method returns a new number and does not mutate the original value"]
1860 pub fn cbrt(self) -> f16 {
1861 libm::cbrtf(self as f32) as f16
1862 }
1863}