glam/f32/neon/
vec4.rs

1// Generated from vec.rs.tera template. Edit the template, not the generated file.
2
3use crate::{f32::math, neon::*, BVec4, BVec4A, Vec2, Vec3, Vec3A};
4
5use core::fmt;
6use core::iter::{Product, Sum};
7use core::{f32, ops::*};
8
9use core::arch::aarch64::*;
10
11#[cfg(feature = "zerocopy")]
12use zerocopy_derive::*;
13
14#[repr(C)]
15union UnionCast {
16    a: [f32; 4],
17    v: Vec4,
18}
19
20/// Creates a 4-dimensional vector.
21#[inline(always)]
22#[must_use]
23pub const fn vec4(x: f32, y: f32, z: f32, w: f32) -> Vec4 {
24    Vec4::new(x, y, z, w)
25}
26
27/// A 4-dimensional vector.
28///
29/// SIMD vector types are used for storage on supported platforms.
30///
31/// This type is 16 byte aligned.
32#[derive(Clone, Copy)]
33#[cfg_attr(feature = "bytemuck", derive(bytemuck::Pod, bytemuck::Zeroable))]
34#[cfg_attr(
35    feature = "zerocopy",
36    derive(FromBytes, Immutable, IntoBytes, KnownLayout)
37)]
38#[repr(transparent)]
39pub struct Vec4(pub(crate) float32x4_t);
40
41impl Vec4 {
42    /// All zeroes.
43    pub const ZERO: Self = Self::splat(0.0);
44
45    /// All ones.
46    pub const ONE: Self = Self::splat(1.0);
47
48    /// All negative ones.
49    pub const NEG_ONE: Self = Self::splat(-1.0);
50
51    /// All `f32::MIN`.
52    pub const MIN: Self = Self::splat(f32::MIN);
53
54    /// All `f32::MAX`.
55    pub const MAX: Self = Self::splat(f32::MAX);
56
57    /// All `f32::NAN`.
58    pub const NAN: Self = Self::splat(f32::NAN);
59
60    /// All `f32::INFINITY`.
61    pub const INFINITY: Self = Self::splat(f32::INFINITY);
62
63    /// All `f32::NEG_INFINITY`.
64    pub const NEG_INFINITY: Self = Self::splat(f32::NEG_INFINITY);
65
66    /// A unit vector pointing along the positive X axis.
67    pub const X: Self = Self::new(1.0, 0.0, 0.0, 0.0);
68
69    /// A unit vector pointing along the positive Y axis.
70    pub const Y: Self = Self::new(0.0, 1.0, 0.0, 0.0);
71
72    /// A unit vector pointing along the positive Z axis.
73    pub const Z: Self = Self::new(0.0, 0.0, 1.0, 0.0);
74
75    /// A unit vector pointing along the positive W axis.
76    pub const W: Self = Self::new(0.0, 0.0, 0.0, 1.0);
77
78    /// A unit vector pointing along the negative X axis.
79    pub const NEG_X: Self = Self::new(-1.0, 0.0, 0.0, 0.0);
80
81    /// A unit vector pointing along the negative Y axis.
82    pub const NEG_Y: Self = Self::new(0.0, -1.0, 0.0, 0.0);
83
84    /// A unit vector pointing along the negative Z axis.
85    pub const NEG_Z: Self = Self::new(0.0, 0.0, -1.0, 0.0);
86
87    /// A unit vector pointing along the negative W axis.
88    pub const NEG_W: Self = Self::new(0.0, 0.0, 0.0, -1.0);
89
90    /// The unit axes.
91    pub const AXES: [Self; 4] = [Self::X, Self::Y, Self::Z, Self::W];
92
93    /// Vec4 uses Rust Portable SIMD
94    pub const USES_CORE_SIMD: bool = false;
95    /// Vec4 uses Arm NEON
96    pub const USES_NEON: bool = true;
97    /// Vec4 uses scalar math
98    pub const USES_SCALAR_MATH: bool = false;
99    /// Vec4 uses Intel SSE2
100    pub const USES_SSE2: bool = false;
101    /// Vec4 uses WebAssembly 128-bit SIMD
102    pub const USES_WASM32_SIMD: bool = false;
103
104    /// Creates a new vector.
105    #[inline(always)]
106    #[must_use]
107    pub const fn new(x: f32, y: f32, z: f32, w: f32) -> Self {
108        unsafe { UnionCast { a: [x, y, z, w] }.v }
109    }
110
111    /// Creates a vector with all elements set to `v`.
112    #[inline]
113    #[must_use]
114    pub const fn splat(v: f32) -> Self {
115        unsafe { UnionCast { a: [v; 4] }.v }
116    }
117
118    /// Returns a vector containing each element of `self` modified by a mapping function `f`.
119    #[inline]
120    #[must_use]
121    pub fn map<F>(self, f: F) -> Self
122    where
123        F: Fn(f32) -> f32,
124    {
125        Self::new(f(self.x), f(self.y), f(self.z), f(self.w))
126    }
127
128    /// Creates a vector from the elements in `if_true` and `if_false`, selecting which to use
129    /// for each element of `self`.
130    ///
131    /// A true element in the mask uses the corresponding element from `if_true`, and false
132    /// uses the element from `if_false`.
133    #[inline]
134    #[must_use]
135    pub fn select(mask: BVec4A, if_true: Self, if_false: Self) -> Self {
136        Self(unsafe { vbslq_f32(mask.0, if_true.0, if_false.0) })
137    }
138
139    /// Creates a new vector from an array.
140    #[inline]
141    #[must_use]
142    pub const fn from_array(a: [f32; 4]) -> Self {
143        Self::new(a[0], a[1], a[2], a[3])
144    }
145
146    /// Converts `self` to `[x, y, z, w]`
147    #[inline]
148    #[must_use]
149    pub const fn to_array(&self) -> [f32; 4] {
150        unsafe { *(self as *const Self as *const [f32; 4]) }
151    }
152
153    /// Creates a vector from the first 4 values in `slice`.
154    ///
155    /// # Panics
156    ///
157    /// Panics if `slice` is less than 4 elements long.
158    #[inline]
159    #[must_use]
160    pub const fn from_slice(slice: &[f32]) -> Self {
161        assert!(slice.len() >= 4);
162        Self::new(slice[0], slice[1], slice[2], slice[3])
163    }
164
165    /// Writes the elements of `self` to the first 4 elements in `slice`.
166    ///
167    /// # Panics
168    ///
169    /// Panics if `slice` is less than 4 elements long.
170    #[inline]
171    pub fn write_to_slice(self, slice: &mut [f32]) {
172        assert!(slice.len() >= 4);
173        unsafe {
174            vst1q_f32(slice.as_mut_ptr(), self.0);
175        }
176    }
177
178    /// Creates a 3D vector from the `x`, `y` and `z` elements of `self`, discarding `w`.
179    ///
180    /// Truncation to [`Vec3`] may also be performed by using [`self.xyz()`][crate::swizzles::Vec4Swizzles::xyz()].
181    ///
182    /// To truncate to [`Vec3A`] use [`Vec3A::from_vec4()`].
183    #[inline]
184    #[must_use]
185    pub fn truncate(self) -> Vec3 {
186        use crate::swizzles::Vec4Swizzles;
187        self.xyz()
188    }
189
190    /// Creates a 4D vector from `self` with the given value of `x`.
191    #[inline]
192    #[must_use]
193    pub fn with_x(mut self, x: f32) -> Self {
194        self.x = x;
195        self
196    }
197
198    /// Creates a 4D vector from `self` with the given value of `y`.
199    #[inline]
200    #[must_use]
201    pub fn with_y(mut self, y: f32) -> Self {
202        self.y = y;
203        self
204    }
205
206    /// Creates a 4D vector from `self` with the given value of `z`.
207    #[inline]
208    #[must_use]
209    pub fn with_z(mut self, z: f32) -> Self {
210        self.z = z;
211        self
212    }
213
214    /// Creates a 4D vector from `self` with the given value of `w`.
215    #[inline]
216    #[must_use]
217    pub fn with_w(mut self, w: f32) -> Self {
218        self.w = w;
219        self
220    }
221
222    /// Computes the dot product of `self` and `rhs`.
223    #[inline]
224    #[must_use]
225    pub fn dot(self, rhs: Self) -> f32 {
226        unsafe { dot4(self.0, rhs.0) }
227    }
228
229    /// Returns a vector where every component is the dot product of `self` and `rhs`.
230    #[inline]
231    #[must_use]
232    pub fn dot_into_vec(self, rhs: Self) -> Self {
233        Self(unsafe { dot4_into_f32x4(self.0, rhs.0) })
234    }
235
236    /// Returns a vector containing the minimum values for each element of `self` and `rhs`.
237    ///
238    /// In other words this computes `[min(x, rhs.x), min(self.y, rhs.y), ..]`.
239    ///
240    /// NaN propogation does not follow IEEE 754-2008 semantics for minNum and may differ on
241    /// different SIMD architectures.
242    #[inline]
243    #[must_use]
244    pub fn min(self, rhs: Self) -> Self {
245        Self(unsafe { vminq_f32(self.0, rhs.0) })
246    }
247
248    /// Returns a vector containing the maximum values for each element of `self` and `rhs`.
249    ///
250    /// In other words this computes `[max(self.x, rhs.x), max(self.y, rhs.y), ..]`.
251    ///
252    /// NaN propogation does not follow IEEE 754-2008 semantics for maxNum and may differ on
253    /// different SIMD architectures.
254    #[inline]
255    #[must_use]
256    pub fn max(self, rhs: Self) -> Self {
257        Self(unsafe { vmaxq_f32(self.0, rhs.0) })
258    }
259
260    /// Component-wise clamping of values, similar to [`f32::clamp`].
261    ///
262    /// Each element in `min` must be less-or-equal to the corresponding element in `max`.
263    ///
264    /// NaN propogation does not follow IEEE 754-2008 semantics and may differ on
265    /// different SIMD architectures.
266    ///
267    /// # Panics
268    ///
269    /// Will panic if `min` is greater than `max` when `glam_assert` is enabled.
270    #[inline]
271    #[must_use]
272    pub fn clamp(self, min: Self, max: Self) -> Self {
273        glam_assert!(min.cmple(max).all(), "clamp: expected min <= max");
274        self.max(min).min(max)
275    }
276
277    /// Returns the horizontal minimum of `self`.
278    ///
279    /// In other words this computes `min(x, y, ..)`.
280    ///
281    /// NaN propogation does not follow IEEE 754-2008 semantics and may differ on
282    /// different SIMD architectures.
283    #[inline]
284    #[must_use]
285    pub fn min_element(self) -> f32 {
286        unsafe { vminnmvq_f32(self.0) }
287    }
288
289    /// Returns the horizontal maximum of `self`.
290    ///
291    /// In other words this computes `max(x, y, ..)`.
292    ///
293    /// NaN propogation does not follow IEEE 754-2008 semantics and may differ on
294    /// different SIMD architectures.
295    #[inline]
296    #[must_use]
297    pub fn max_element(self) -> f32 {
298        unsafe { vmaxnmvq_f32(self.0) }
299    }
300
301    /// Returns the index of the first minimum element of `self`.
302    #[doc(alias = "argmin")]
303    #[inline]
304    #[must_use]
305    pub fn min_position(self) -> usize {
306        let mut min = self.x;
307        let mut index = 0;
308        if self.y < min {
309            min = self.y;
310            index = 1;
311        }
312        if self.z < min {
313            min = self.z;
314            index = 2;
315        }
316        if self.w < min {
317            index = 3;
318        }
319        index
320    }
321
322    /// Returns the index of the first maximum element of `self`.
323    #[doc(alias = "argmax")]
324    #[inline]
325    #[must_use]
326    pub fn max_position(self) -> usize {
327        let mut max = self.x;
328        let mut index = 0;
329        if self.y > max {
330            max = self.y;
331            index = 1;
332        }
333        if self.z > max {
334            max = self.z;
335            index = 2;
336        }
337        if self.w > max {
338            index = 3;
339        }
340        index
341    }
342
343    /// Returns the sum of all elements of `self`.
344    ///
345    /// In other words, this computes `self.x + self.y + ..`.
346    #[inline]
347    #[must_use]
348    pub fn element_sum(self) -> f32 {
349        unsafe { vaddvq_f32(self.0) }
350    }
351
352    /// Returns the product of all elements of `self`.
353    ///
354    /// In other words, this computes `self.x * self.y * ..`.
355    #[inline]
356    #[must_use]
357    pub fn element_product(self) -> f32 {
358        unsafe {
359            let s = vmuls_laneq_f32(vgetq_lane_f32(self.0, 0), self.0, 1);
360            let s = vmuls_laneq_f32(s, self.0, 2);
361            vmuls_laneq_f32(s, self.0, 3)
362        }
363    }
364
365    /// Returns a vector mask containing the result of a `==` comparison for each element of
366    /// `self` and `rhs`.
367    ///
368    /// In other words, this computes `[self.x == rhs.x, self.y == rhs.y, ..]` for all
369    /// elements.
370    #[inline]
371    #[must_use]
372    pub fn cmpeq(self, rhs: Self) -> BVec4A {
373        BVec4A(unsafe { vceqq_f32(self.0, rhs.0) })
374    }
375
376    /// Returns a vector mask containing the result of a `!=` comparison for each element of
377    /// `self` and `rhs`.
378    ///
379    /// In other words this computes `[self.x != rhs.x, self.y != rhs.y, ..]` for all
380    /// elements.
381    #[inline]
382    #[must_use]
383    pub fn cmpne(self, rhs: Self) -> BVec4A {
384        BVec4A(unsafe { vmvnq_u32(vceqq_f32(self.0, rhs.0)) })
385    }
386
387    /// Returns a vector mask containing the result of a `>=` comparison for each element of
388    /// `self` and `rhs`.
389    ///
390    /// In other words this computes `[self.x >= rhs.x, self.y >= rhs.y, ..]` for all
391    /// elements.
392    #[inline]
393    #[must_use]
394    pub fn cmpge(self, rhs: Self) -> BVec4A {
395        BVec4A(unsafe { vcgeq_f32(self.0, rhs.0) })
396    }
397
398    /// Returns a vector mask containing the result of a `>` comparison for each element of
399    /// `self` and `rhs`.
400    ///
401    /// In other words this computes `[self.x > rhs.x, self.y > rhs.y, ..]` for all
402    /// elements.
403    #[inline]
404    #[must_use]
405    pub fn cmpgt(self, rhs: Self) -> BVec4A {
406        BVec4A(unsafe { vcgtq_f32(self.0, rhs.0) })
407    }
408
409    /// Returns a vector mask containing the result of a `<=` comparison for each element of
410    /// `self` and `rhs`.
411    ///
412    /// In other words this computes `[self.x <= rhs.x, self.y <= rhs.y, ..]` for all
413    /// elements.
414    #[inline]
415    #[must_use]
416    pub fn cmple(self, rhs: Self) -> BVec4A {
417        BVec4A(unsafe { vcleq_f32(self.0, rhs.0) })
418    }
419
420    /// Returns a vector mask containing the result of a `<` comparison for each element of
421    /// `self` and `rhs`.
422    ///
423    /// In other words this computes `[self.x < rhs.x, self.y < rhs.y, ..]` for all
424    /// elements.
425    #[inline]
426    #[must_use]
427    pub fn cmplt(self, rhs: Self) -> BVec4A {
428        BVec4A(unsafe { vcltq_f32(self.0, rhs.0) })
429    }
430
431    /// Returns a vector containing the absolute value of each element of `self`.
432    #[inline]
433    #[must_use]
434    pub fn abs(self) -> Self {
435        Self(unsafe { vabsq_f32(self.0) })
436    }
437
438    /// Returns a vector with elements representing the sign of `self`.
439    ///
440    /// - `1.0` if the number is positive, `+0.0` or `INFINITY`
441    /// - `-1.0` if the number is negative, `-0.0` or `NEG_INFINITY`
442    /// - `NAN` if the number is `NAN`
443    #[inline]
444    #[must_use]
445    pub fn signum(self) -> Self {
446        let result = Self(unsafe {
447            vreinterpretq_f32_u32(vorrq_u32(
448                vandq_u32(
449                    vreinterpretq_u32_f32(self.0),
450                    vreinterpretq_u32_f32(Self::NEG_ONE.0),
451                ),
452                vreinterpretq_u32_f32(Self::ONE.0),
453            ))
454        });
455        let mask = self.is_nan_mask();
456        Self::select(mask, self, result)
457    }
458
459    /// Returns a vector with signs of `rhs` and the magnitudes of `self`.
460    #[inline]
461    #[must_use]
462    pub fn copysign(self, rhs: Self) -> Self {
463        let mask = Self::splat(-0.0);
464        Self(unsafe {
465            vreinterpretq_f32_u32(vorrq_u32(
466                vandq_u32(vreinterpretq_u32_f32(rhs.0), vreinterpretq_u32_f32(mask.0)),
467                vandq_u32(
468                    vreinterpretq_u32_f32(self.0),
469                    vmvnq_u32(vreinterpretq_u32_f32(mask.0)),
470                ),
471            ))
472        })
473    }
474
475    /// Returns a bitmask with the lowest 4 bits set to the sign bits from the elements of `self`.
476    ///
477    /// A negative element results in a `1` bit and a positive element in a `0` bit.  Element `x` goes
478    /// into the first lowest bit, element `y` into the second, etc.
479    ///
480    /// An element is negative if it has a negative sign, including -0.0, NaNs with negative sign
481    /// bit and negative infinity.
482    #[inline]
483    #[must_use]
484    pub fn is_negative_bitmask(self) -> u32 {
485        unsafe {
486            let nmask = vreinterpretq_u32_f32(vdupq_n_f32(-0.0));
487            let m = vandq_u32(vreinterpretq_u32_f32(self.0), nmask);
488            let x = vgetq_lane_u32(m, 0) >> 31;
489            let y = vgetq_lane_u32(m, 1) >> 31;
490            let z = vgetq_lane_u32(m, 2) >> 31;
491
492            let w = vgetq_lane_u32(m, 3) >> 31;
493            x | y << 1 | z << 2 | w << 3
494        }
495    }
496
497    /// Returns `true` if, and only if, all elements are finite.  If any element is either
498    /// `NaN`, positive or negative infinity, this will return `false`.
499    #[inline]
500    #[must_use]
501    pub fn is_finite(self) -> bool {
502        self.is_finite_mask().all()
503    }
504
505    /// Performs `is_finite` on each element of self, returning a vector mask of the results.
506    ///
507    /// In other words, this computes `[x.is_finite(), y.is_finite(), ...]`.
508    #[inline]
509    #[must_use]
510    pub fn is_finite_mask(self) -> BVec4A {
511        BVec4A(unsafe { vcltq_f32(vabsq_f32(self.0), Self::INFINITY.0) })
512    }
513
514    /// Returns `true` if any elements are `NaN`.
515    #[inline]
516    #[must_use]
517    pub fn is_nan(self) -> bool {
518        self.is_nan_mask().any()
519    }
520
521    /// Performs `is_nan` on each element of self, returning a vector mask of the results.
522    ///
523    /// In other words, this computes `[x.is_nan(), y.is_nan(), ...]`.
524    #[inline]
525    #[must_use]
526    pub fn is_nan_mask(self) -> BVec4A {
527        BVec4A(unsafe { vmvnq_u32(vceqq_f32(self.0, self.0)) })
528    }
529
530    /// Computes the length of `self`.
531    #[doc(alias = "magnitude")]
532    #[inline]
533    #[must_use]
534    pub fn length(self) -> f32 {
535        math::sqrt(self.dot(self))
536    }
537
538    /// Computes the squared length of `self`.
539    ///
540    /// This is faster than `length()` as it avoids a square root operation.
541    #[doc(alias = "magnitude2")]
542    #[inline]
543    #[must_use]
544    pub fn length_squared(self) -> f32 {
545        self.dot(self)
546    }
547
548    /// Computes `1.0 / length()`.
549    ///
550    /// For valid results, `self` must _not_ be of length zero.
551    #[inline]
552    #[must_use]
553    pub fn length_recip(self) -> f32 {
554        self.length().recip()
555    }
556
557    /// Computes the Euclidean distance between two points in space.
558    #[inline]
559    #[must_use]
560    pub fn distance(self, rhs: Self) -> f32 {
561        (self - rhs).length()
562    }
563
564    /// Compute the squared euclidean distance between two points in space.
565    #[inline]
566    #[must_use]
567    pub fn distance_squared(self, rhs: Self) -> f32 {
568        (self - rhs).length_squared()
569    }
570
571    /// Returns the element-wise quotient of [Euclidean division] of `self` by `rhs`.
572    #[inline]
573    #[must_use]
574    pub fn div_euclid(self, rhs: Self) -> Self {
575        Self::new(
576            math::div_euclid(self.x, rhs.x),
577            math::div_euclid(self.y, rhs.y),
578            math::div_euclid(self.z, rhs.z),
579            math::div_euclid(self.w, rhs.w),
580        )
581    }
582
583    /// Returns the element-wise remainder of [Euclidean division] of `self` by `rhs`.
584    ///
585    /// [Euclidean division]: f32::rem_euclid
586    #[inline]
587    #[must_use]
588    pub fn rem_euclid(self, rhs: Self) -> Self {
589        Self::new(
590            math::rem_euclid(self.x, rhs.x),
591            math::rem_euclid(self.y, rhs.y),
592            math::rem_euclid(self.z, rhs.z),
593            math::rem_euclid(self.w, rhs.w),
594        )
595    }
596
597    /// Returns `self` normalized to length 1.0.
598    ///
599    /// For valid results, `self` must be finite and _not_ of length zero, nor very close to zero.
600    ///
601    /// See also [`Self::try_normalize()`] and [`Self::normalize_or_zero()`].
602    ///
603    /// # Panics
604    ///
605    /// Will panic if the resulting normalized vector is not finite when `glam_assert` is enabled.
606    #[inline]
607    #[must_use]
608    pub fn normalize(self) -> Self {
609        #[allow(clippy::let_and_return)]
610        let normalized = self.mul(self.length_recip());
611        glam_assert!(normalized.is_finite());
612        normalized
613    }
614
615    /// Returns `self` normalized to length 1.0 if possible, else returns `None`.
616    ///
617    /// In particular, if the input is zero (or very close to zero), or non-finite,
618    /// the result of this operation will be `None`.
619    ///
620    /// See also [`Self::normalize_or_zero()`].
621    #[inline]
622    #[must_use]
623    pub fn try_normalize(self) -> Option<Self> {
624        let rcp = self.length_recip();
625        if rcp.is_finite() && rcp > 0.0 {
626            Some(self * rcp)
627        } else {
628            None
629        }
630    }
631
632    /// Returns `self` normalized to length 1.0 if possible, else returns a
633    /// fallback value.
634    ///
635    /// In particular, if the input is zero (or very close to zero), or non-finite,
636    /// the result of this operation will be the fallback value.
637    ///
638    /// See also [`Self::try_normalize()`].
639    #[inline]
640    #[must_use]
641    pub fn normalize_or(self, fallback: Self) -> Self {
642        let rcp = self.length_recip();
643        if rcp.is_finite() && rcp > 0.0 {
644            self * rcp
645        } else {
646            fallback
647        }
648    }
649
650    /// Returns `self` normalized to length 1.0 if possible, else returns zero.
651    ///
652    /// In particular, if the input is zero (or very close to zero), or non-finite,
653    /// the result of this operation will be zero.
654    ///
655    /// See also [`Self::try_normalize()`].
656    #[inline]
657    #[must_use]
658    pub fn normalize_or_zero(self) -> Self {
659        self.normalize_or(Self::ZERO)
660    }
661
662    /// Returns `self` normalized to length 1.0 and the length of `self`.
663    ///
664    /// If `self` is zero length then `(Self::X, 0.0)` is returned.
665    #[inline]
666    #[must_use]
667    pub fn normalize_and_length(self) -> (Self, f32) {
668        let length = self.length();
669        let rcp = 1.0 / length;
670        if rcp.is_finite() && rcp > 0.0 {
671            (self * rcp, length)
672        } else {
673            (Self::X, 0.0)
674        }
675    }
676
677    /// Returns whether `self` is length `1.0` or not.
678    ///
679    /// Uses a precision threshold of approximately `1e-4`.
680    #[inline]
681    #[must_use]
682    pub fn is_normalized(self) -> bool {
683        math::abs(self.length_squared() - 1.0) <= 2e-4
684    }
685
686    /// Returns the vector projection of `self` onto `rhs`.
687    ///
688    /// `rhs` must be of non-zero length.
689    ///
690    /// # Panics
691    ///
692    /// Will panic if `rhs` is zero length when `glam_assert` is enabled.
693    #[inline]
694    #[must_use]
695    pub fn project_onto(self, rhs: Self) -> Self {
696        let other_len_sq_rcp = rhs.dot(rhs).recip();
697        glam_assert!(other_len_sq_rcp.is_finite());
698        rhs * self.dot(rhs) * other_len_sq_rcp
699    }
700
701    /// Returns the vector rejection of `self` from `rhs`.
702    ///
703    /// The vector rejection is the vector perpendicular to the projection of `self` onto
704    /// `rhs`, in rhs words the result of `self - self.project_onto(rhs)`.
705    ///
706    /// `rhs` must be of non-zero length.
707    ///
708    /// # Panics
709    ///
710    /// Will panic if `rhs` has a length of zero when `glam_assert` is enabled.
711    #[doc(alias("plane"))]
712    #[inline]
713    #[must_use]
714    pub fn reject_from(self, rhs: Self) -> Self {
715        self - self.project_onto(rhs)
716    }
717
718    /// Returns the vector projection of `self` onto `rhs`.
719    ///
720    /// `rhs` must be normalized.
721    ///
722    /// # Panics
723    ///
724    /// Will panic if `rhs` is not normalized when `glam_assert` is enabled.
725    #[inline]
726    #[must_use]
727    pub fn project_onto_normalized(self, rhs: Self) -> Self {
728        glam_assert!(rhs.is_normalized());
729        rhs * self.dot(rhs)
730    }
731
732    /// Returns the vector rejection of `self` from `rhs`.
733    ///
734    /// The vector rejection is the vector perpendicular to the projection of `self` onto
735    /// `rhs`, in rhs words the result of `self - self.project_onto(rhs)`.
736    ///
737    /// `rhs` must be normalized.
738    ///
739    /// # Panics
740    ///
741    /// Will panic if `rhs` is not normalized when `glam_assert` is enabled.
742    #[doc(alias("plane"))]
743    #[inline]
744    #[must_use]
745    pub fn reject_from_normalized(self, rhs: Self) -> Self {
746        self - self.project_onto_normalized(rhs)
747    }
748
749    /// Returns a vector containing the nearest integer to a number for each element of `self`.
750    /// Round half-way cases away from 0.0.
751    #[inline]
752    #[must_use]
753    pub fn round(self) -> Self {
754        Self(unsafe { vrndnq_f32(self.0) })
755    }
756
757    /// Returns a vector containing the largest integer less than or equal to a number for each
758    /// element of `self`.
759    #[inline]
760    #[must_use]
761    pub fn floor(self) -> Self {
762        Self(unsafe { vrndmq_f32(self.0) })
763    }
764
765    /// Returns a vector containing the smallest integer greater than or equal to a number for
766    /// each element of `self`.
767    #[inline]
768    #[must_use]
769    pub fn ceil(self) -> Self {
770        Self(unsafe { vrndpq_f32(self.0) })
771    }
772
773    /// Returns a vector containing the integer part each element of `self`. This means numbers are
774    /// always truncated towards zero.
775    #[inline]
776    #[must_use]
777    pub fn trunc(self) -> Self {
778        Self(unsafe { vrndq_f32(self.0) })
779    }
780
781    /// Returns a vector containing the fractional part of the vector as `self - self.trunc()`.
782    ///
783    /// Note that this differs from the GLSL implementation of `fract` which returns
784    /// `self - self.floor()`.
785    ///
786    /// Note that this is fast but not precise for large numbers.
787    #[inline]
788    #[must_use]
789    pub fn fract(self) -> Self {
790        self - self.trunc()
791    }
792
793    /// Returns a vector containing the fractional part of the vector as `self - self.floor()`.
794    ///
795    /// Note that this differs from the Rust implementation of `fract` which returns
796    /// `self - self.trunc()`.
797    ///
798    /// Note that this is fast but not precise for large numbers.
799    #[inline]
800    #[must_use]
801    pub fn fract_gl(self) -> Self {
802        self - self.floor()
803    }
804
805    /// Returns a vector containing `e^self` (the exponential function) for each element of
806    /// `self`.
807    #[inline]
808    #[must_use]
809    pub fn exp(self) -> Self {
810        Self::new(
811            math::exp(self.x),
812            math::exp(self.y),
813            math::exp(self.z),
814            math::exp(self.w),
815        )
816    }
817
818    /// Returns a vector containing each element of `self` raised to the power of `n`.
819    #[inline]
820    #[must_use]
821    pub fn powf(self, n: f32) -> Self {
822        Self::new(
823            math::powf(self.x, n),
824            math::powf(self.y, n),
825            math::powf(self.z, n),
826            math::powf(self.w, n),
827        )
828    }
829
830    /// Returns a vector containing the reciprocal `1.0/n` of each element of `self`.
831    #[inline]
832    #[must_use]
833    pub fn recip(self) -> Self {
834        Self(unsafe { vdivq_f32(Self::ONE.0, self.0) })
835    }
836
837    /// Performs a linear interpolation between `self` and `rhs` based on the value `s`.
838    ///
839    /// When `s` is `0.0`, the result will be equal to `self`.  When `s` is `1.0`, the result
840    /// will be equal to `rhs`. When `s` is outside of range `[0, 1]`, the result is linearly
841    /// extrapolated.
842    #[doc(alias = "mix")]
843    #[inline]
844    #[must_use]
845    pub fn lerp(self, rhs: Self, s: f32) -> Self {
846        self * (1.0 - s) + rhs * s
847    }
848
849    /// Moves towards `rhs` based on the value `d`.
850    ///
851    /// When `d` is `0.0`, the result will be equal to `self`. When `d` is equal to
852    /// `self.distance(rhs)`, the result will be equal to `rhs`. Will not go past `rhs`.
853    #[inline]
854    #[must_use]
855    pub fn move_towards(&self, rhs: Self, d: f32) -> Self {
856        let a = rhs - *self;
857        let len = a.length();
858        if len <= d || len <= 1e-4 {
859            return rhs;
860        }
861        *self + a / len * d
862    }
863
864    /// Calculates the midpoint between `self` and `rhs`.
865    ///
866    /// The midpoint is the average of, or halfway point between, two vectors.
867    /// `a.midpoint(b)` should yield the same result as `a.lerp(b, 0.5)`
868    /// while being slightly cheaper to compute.
869    #[inline]
870    pub fn midpoint(self, rhs: Self) -> Self {
871        (self + rhs) * 0.5
872    }
873
874    /// Returns true if the absolute difference of all elements between `self` and `rhs` is
875    /// less than or equal to `max_abs_diff`.
876    ///
877    /// This can be used to compare if two vectors contain similar elements. It works best when
878    /// comparing with a known value. The `max_abs_diff` that should be used used depends on
879    /// the values being compared against.
880    ///
881    /// For more see
882    /// [comparing floating point numbers](https://randomascii.wordpress.com/2012/02/25/comparing-floating-point-numbers-2012-edition/).
883    #[inline]
884    #[must_use]
885    pub fn abs_diff_eq(self, rhs: Self, max_abs_diff: f32) -> bool {
886        self.sub(rhs).abs().cmple(Self::splat(max_abs_diff)).all()
887    }
888
889    /// Returns a vector with a length no less than `min` and no more than `max`.
890    ///
891    /// # Panics
892    ///
893    /// Will panic if `min` is greater than `max`, or if either `min` or `max` is negative, when `glam_assert` is enabled.
894    #[inline]
895    #[must_use]
896    pub fn clamp_length(self, min: f32, max: f32) -> Self {
897        glam_assert!(0.0 <= min);
898        glam_assert!(min <= max);
899        let length_sq = self.length_squared();
900        if length_sq < min * min {
901            min * (self / math::sqrt(length_sq))
902        } else if length_sq > max * max {
903            max * (self / math::sqrt(length_sq))
904        } else {
905            self
906        }
907    }
908
909    /// Returns a vector with a length no more than `max`.
910    ///
911    /// # Panics
912    ///
913    /// Will panic if `max` is negative when `glam_assert` is enabled.
914    #[inline]
915    #[must_use]
916    pub fn clamp_length_max(self, max: f32) -> Self {
917        glam_assert!(0.0 <= max);
918        let length_sq = self.length_squared();
919        if length_sq > max * max {
920            max * (self / math::sqrt(length_sq))
921        } else {
922            self
923        }
924    }
925
926    /// Returns a vector with a length no less than `min`.
927    ///
928    /// # Panics
929    ///
930    /// Will panic if `min` is negative when `glam_assert` is enabled.
931    #[inline]
932    #[must_use]
933    pub fn clamp_length_min(self, min: f32) -> Self {
934        glam_assert!(0.0 <= min);
935        let length_sq = self.length_squared();
936        if length_sq < min * min {
937            min * (self / math::sqrt(length_sq))
938        } else {
939            self
940        }
941    }
942
943    /// Fused multiply-add. Computes `(self * a) + b` element-wise with only one rounding
944    /// error, yielding a more accurate result than an unfused multiply-add.
945    ///
946    /// Using `mul_add` *may* be more performant than an unfused multiply-add if the target
947    /// architecture has a dedicated fma CPU instruction. However, this is not always true,
948    /// and will be heavily dependant on designing algorithms with specific target hardware in
949    /// mind.
950    #[inline]
951    #[must_use]
952    pub fn mul_add(self, a: Self, b: Self) -> Self {
953        Self(unsafe { vfmaq_f32(b.0, self.0, a.0) })
954    }
955
956    /// Returns the reflection vector for a given incident vector `self` and surface normal
957    /// `normal`.
958    ///
959    /// `normal` must be normalized.
960    ///
961    /// # Panics
962    ///
963    /// Will panic if `normal` is not normalized when `glam_assert` is enabled.
964    #[inline]
965    #[must_use]
966    pub fn reflect(self, normal: Self) -> Self {
967        glam_assert!(normal.is_normalized());
968        self - 2.0 * self.dot(normal) * normal
969    }
970
971    /// Returns the refraction direction for a given incident vector `self`, surface normal
972    /// `normal` and ratio of indices of refraction, `eta`. When total internal reflection occurs,
973    /// a zero vector will be returned.
974    ///
975    /// `self` and `normal` must be normalized.
976    ///
977    /// # Panics
978    ///
979    /// Will panic if `self` or `normal` is not normalized when `glam_assert` is enabled.
980    #[inline]
981    #[must_use]
982    pub fn refract(self, normal: Self, eta: f32) -> Self {
983        glam_assert!(self.is_normalized());
984        glam_assert!(normal.is_normalized());
985        let n_dot_i = normal.dot(self);
986        let k = 1.0 - eta * eta * (1.0 - n_dot_i * n_dot_i);
987        if k >= 0.0 {
988            eta * self - (eta * n_dot_i + math::sqrt(k)) * normal
989        } else {
990            Self::ZERO
991        }
992    }
993
994    /// Casts all elements of `self` to `f64`.
995    #[inline]
996    #[must_use]
997    pub fn as_dvec4(&self) -> crate::DVec4 {
998        crate::DVec4::new(self.x as f64, self.y as f64, self.z as f64, self.w as f64)
999    }
1000
1001    /// Casts all elements of `self` to `i8`.
1002    #[inline]
1003    #[must_use]
1004    pub fn as_i8vec4(&self) -> crate::I8Vec4 {
1005        crate::I8Vec4::new(self.x as i8, self.y as i8, self.z as i8, self.w as i8)
1006    }
1007
1008    /// Casts all elements of `self` to `u8`.
1009    #[inline]
1010    #[must_use]
1011    pub fn as_u8vec4(&self) -> crate::U8Vec4 {
1012        crate::U8Vec4::new(self.x as u8, self.y as u8, self.z as u8, self.w as u8)
1013    }
1014
1015    /// Casts all elements of `self` to `i16`.
1016    #[inline]
1017    #[must_use]
1018    pub fn as_i16vec4(&self) -> crate::I16Vec4 {
1019        crate::I16Vec4::new(self.x as i16, self.y as i16, self.z as i16, self.w as i16)
1020    }
1021
1022    /// Casts all elements of `self` to `u16`.
1023    #[inline]
1024    #[must_use]
1025    pub fn as_u16vec4(&self) -> crate::U16Vec4 {
1026        crate::U16Vec4::new(self.x as u16, self.y as u16, self.z as u16, self.w as u16)
1027    }
1028
1029    /// Casts all elements of `self` to `i32`.
1030    #[inline]
1031    #[must_use]
1032    pub fn as_ivec4(&self) -> crate::IVec4 {
1033        crate::IVec4::new(self.x as i32, self.y as i32, self.z as i32, self.w as i32)
1034    }
1035
1036    /// Casts all elements of `self` to `u32`.
1037    #[inline]
1038    #[must_use]
1039    pub fn as_uvec4(&self) -> crate::UVec4 {
1040        crate::UVec4::new(self.x as u32, self.y as u32, self.z as u32, self.w as u32)
1041    }
1042
1043    /// Casts all elements of `self` to `i64`.
1044    #[inline]
1045    #[must_use]
1046    pub fn as_i64vec4(&self) -> crate::I64Vec4 {
1047        crate::I64Vec4::new(self.x as i64, self.y as i64, self.z as i64, self.w as i64)
1048    }
1049
1050    /// Casts all elements of `self` to `u64`.
1051    #[inline]
1052    #[must_use]
1053    pub fn as_u64vec4(&self) -> crate::U64Vec4 {
1054        crate::U64Vec4::new(self.x as u64, self.y as u64, self.z as u64, self.w as u64)
1055    }
1056
1057    /// Casts all elements of `self` to `usize`.
1058    #[inline]
1059    #[must_use]
1060    pub fn as_usizevec4(&self) -> crate::USizeVec4 {
1061        crate::USizeVec4::new(
1062            self.x as usize,
1063            self.y as usize,
1064            self.z as usize,
1065            self.w as usize,
1066        )
1067    }
1068}
1069
1070impl Default for Vec4 {
1071    #[inline(always)]
1072    fn default() -> Self {
1073        Self::ZERO
1074    }
1075}
1076
1077impl PartialEq for Vec4 {
1078    #[inline]
1079    fn eq(&self, rhs: &Self) -> bool {
1080        self.cmpeq(*rhs).all()
1081    }
1082}
1083
1084impl Div for Vec4 {
1085    type Output = Self;
1086    #[inline]
1087    fn div(self, rhs: Self) -> Self {
1088        Self(unsafe { vdivq_f32(self.0, rhs.0) })
1089    }
1090}
1091
1092impl Div<&Self> for Vec4 {
1093    type Output = Self;
1094    #[inline]
1095    fn div(self, rhs: &Self) -> Self {
1096        self.div(*rhs)
1097    }
1098}
1099
1100impl Div<&Vec4> for &Vec4 {
1101    type Output = Vec4;
1102    #[inline]
1103    fn div(self, rhs: &Vec4) -> Vec4 {
1104        (*self).div(*rhs)
1105    }
1106}
1107
1108impl Div<Vec4> for &Vec4 {
1109    type Output = Vec4;
1110    #[inline]
1111    fn div(self, rhs: Vec4) -> Vec4 {
1112        (*self).div(rhs)
1113    }
1114}
1115
1116impl DivAssign for Vec4 {
1117    #[inline]
1118    fn div_assign(&mut self, rhs: Self) {
1119        self.0 = unsafe { vdivq_f32(self.0, rhs.0) };
1120    }
1121}
1122
1123impl DivAssign<&Self> for Vec4 {
1124    #[inline]
1125    fn div_assign(&mut self, rhs: &Self) {
1126        self.div_assign(*rhs);
1127    }
1128}
1129
1130impl Div<f32> for Vec4 {
1131    type Output = Self;
1132    #[inline]
1133    fn div(self, rhs: f32) -> Self {
1134        Self(unsafe { vdivq_f32(self.0, vld1q_dup_f32(&rhs)) })
1135    }
1136}
1137
1138impl Div<&f32> for Vec4 {
1139    type Output = Self;
1140    #[inline]
1141    fn div(self, rhs: &f32) -> Self {
1142        self.div(*rhs)
1143    }
1144}
1145
1146impl Div<&f32> for &Vec4 {
1147    type Output = Vec4;
1148    #[inline]
1149    fn div(self, rhs: &f32) -> Vec4 {
1150        (*self).div(*rhs)
1151    }
1152}
1153
1154impl Div<f32> for &Vec4 {
1155    type Output = Vec4;
1156    #[inline]
1157    fn div(self, rhs: f32) -> Vec4 {
1158        (*self).div(rhs)
1159    }
1160}
1161
1162impl DivAssign<f32> for Vec4 {
1163    #[inline]
1164    fn div_assign(&mut self, rhs: f32) {
1165        self.0 = unsafe { vdivq_f32(self.0, vld1q_dup_f32(&rhs)) };
1166    }
1167}
1168
1169impl DivAssign<&f32> for Vec4 {
1170    #[inline]
1171    fn div_assign(&mut self, rhs: &f32) {
1172        self.div_assign(*rhs);
1173    }
1174}
1175
1176impl Div<Vec4> for f32 {
1177    type Output = Vec4;
1178    #[inline]
1179    fn div(self, rhs: Vec4) -> Vec4 {
1180        Vec4(unsafe { vdivq_f32(vld1q_dup_f32(&self), rhs.0) })
1181    }
1182}
1183
1184impl Div<&Vec4> for f32 {
1185    type Output = Vec4;
1186    #[inline]
1187    fn div(self, rhs: &Vec4) -> Vec4 {
1188        self.div(*rhs)
1189    }
1190}
1191
1192impl Div<&Vec4> for &f32 {
1193    type Output = Vec4;
1194    #[inline]
1195    fn div(self, rhs: &Vec4) -> Vec4 {
1196        (*self).div(*rhs)
1197    }
1198}
1199
1200impl Div<Vec4> for &f32 {
1201    type Output = Vec4;
1202    #[inline]
1203    fn div(self, rhs: Vec4) -> Vec4 {
1204        (*self).div(rhs)
1205    }
1206}
1207
1208impl Mul for Vec4 {
1209    type Output = Self;
1210    #[inline]
1211    fn mul(self, rhs: Self) -> Self {
1212        Self(unsafe { vmulq_f32(self.0, rhs.0) })
1213    }
1214}
1215
1216impl Mul<&Self> for Vec4 {
1217    type Output = Self;
1218    #[inline]
1219    fn mul(self, rhs: &Self) -> Self {
1220        self.mul(*rhs)
1221    }
1222}
1223
1224impl Mul<&Vec4> for &Vec4 {
1225    type Output = Vec4;
1226    #[inline]
1227    fn mul(self, rhs: &Vec4) -> Vec4 {
1228        (*self).mul(*rhs)
1229    }
1230}
1231
1232impl Mul<Vec4> for &Vec4 {
1233    type Output = Vec4;
1234    #[inline]
1235    fn mul(self, rhs: Vec4) -> Vec4 {
1236        (*self).mul(rhs)
1237    }
1238}
1239
1240impl MulAssign for Vec4 {
1241    #[inline]
1242    fn mul_assign(&mut self, rhs: Self) {
1243        self.0 = unsafe { vmulq_f32(self.0, rhs.0) };
1244    }
1245}
1246
1247impl MulAssign<&Self> for Vec4 {
1248    #[inline]
1249    fn mul_assign(&mut self, rhs: &Self) {
1250        self.mul_assign(*rhs);
1251    }
1252}
1253
1254impl Mul<f32> for Vec4 {
1255    type Output = Self;
1256    #[inline]
1257    fn mul(self, rhs: f32) -> Self {
1258        Self(unsafe { vmulq_n_f32(self.0, rhs) })
1259    }
1260}
1261
1262impl Mul<&f32> for Vec4 {
1263    type Output = Self;
1264    #[inline]
1265    fn mul(self, rhs: &f32) -> Self {
1266        self.mul(*rhs)
1267    }
1268}
1269
1270impl Mul<&f32> for &Vec4 {
1271    type Output = Vec4;
1272    #[inline]
1273    fn mul(self, rhs: &f32) -> Vec4 {
1274        (*self).mul(*rhs)
1275    }
1276}
1277
1278impl Mul<f32> for &Vec4 {
1279    type Output = Vec4;
1280    #[inline]
1281    fn mul(self, rhs: f32) -> Vec4 {
1282        (*self).mul(rhs)
1283    }
1284}
1285
1286impl MulAssign<f32> for Vec4 {
1287    #[inline]
1288    fn mul_assign(&mut self, rhs: f32) {
1289        self.0 = unsafe { vmulq_n_f32(self.0, rhs) };
1290    }
1291}
1292
1293impl MulAssign<&f32> for Vec4 {
1294    #[inline]
1295    fn mul_assign(&mut self, rhs: &f32) {
1296        self.mul_assign(*rhs);
1297    }
1298}
1299
1300impl Mul<Vec4> for f32 {
1301    type Output = Vec4;
1302    #[inline]
1303    fn mul(self, rhs: Vec4) -> Vec4 {
1304        Vec4(unsafe { vmulq_n_f32(rhs.0, self) })
1305    }
1306}
1307
1308impl Mul<&Vec4> for f32 {
1309    type Output = Vec4;
1310    #[inline]
1311    fn mul(self, rhs: &Vec4) -> Vec4 {
1312        self.mul(*rhs)
1313    }
1314}
1315
1316impl Mul<&Vec4> for &f32 {
1317    type Output = Vec4;
1318    #[inline]
1319    fn mul(self, rhs: &Vec4) -> Vec4 {
1320        (*self).mul(*rhs)
1321    }
1322}
1323
1324impl Mul<Vec4> for &f32 {
1325    type Output = Vec4;
1326    #[inline]
1327    fn mul(self, rhs: Vec4) -> Vec4 {
1328        (*self).mul(rhs)
1329    }
1330}
1331
1332impl Add for Vec4 {
1333    type Output = Self;
1334    #[inline]
1335    fn add(self, rhs: Self) -> Self {
1336        Self(unsafe { vaddq_f32(self.0, rhs.0) })
1337    }
1338}
1339
1340impl Add<&Self> for Vec4 {
1341    type Output = Self;
1342    #[inline]
1343    fn add(self, rhs: &Self) -> Self {
1344        self.add(*rhs)
1345    }
1346}
1347
1348impl Add<&Vec4> for &Vec4 {
1349    type Output = Vec4;
1350    #[inline]
1351    fn add(self, rhs: &Vec4) -> Vec4 {
1352        (*self).add(*rhs)
1353    }
1354}
1355
1356impl Add<Vec4> for &Vec4 {
1357    type Output = Vec4;
1358    #[inline]
1359    fn add(self, rhs: Vec4) -> Vec4 {
1360        (*self).add(rhs)
1361    }
1362}
1363
1364impl AddAssign for Vec4 {
1365    #[inline]
1366    fn add_assign(&mut self, rhs: Self) {
1367        self.0 = unsafe { vaddq_f32(self.0, rhs.0) };
1368    }
1369}
1370
1371impl AddAssign<&Self> for Vec4 {
1372    #[inline]
1373    fn add_assign(&mut self, rhs: &Self) {
1374        self.add_assign(*rhs);
1375    }
1376}
1377
1378impl Add<f32> for Vec4 {
1379    type Output = Self;
1380    #[inline]
1381    fn add(self, rhs: f32) -> Self {
1382        Self(unsafe { vaddq_f32(self.0, vld1q_dup_f32(&rhs)) })
1383    }
1384}
1385
1386impl Add<&f32> for Vec4 {
1387    type Output = Self;
1388    #[inline]
1389    fn add(self, rhs: &f32) -> Self {
1390        self.add(*rhs)
1391    }
1392}
1393
1394impl Add<&f32> for &Vec4 {
1395    type Output = Vec4;
1396    #[inline]
1397    fn add(self, rhs: &f32) -> Vec4 {
1398        (*self).add(*rhs)
1399    }
1400}
1401
1402impl Add<f32> for &Vec4 {
1403    type Output = Vec4;
1404    #[inline]
1405    fn add(self, rhs: f32) -> Vec4 {
1406        (*self).add(rhs)
1407    }
1408}
1409
1410impl AddAssign<f32> for Vec4 {
1411    #[inline]
1412    fn add_assign(&mut self, rhs: f32) {
1413        self.0 = unsafe { vaddq_f32(self.0, vld1q_dup_f32(&rhs)) };
1414    }
1415}
1416
1417impl AddAssign<&f32> for Vec4 {
1418    #[inline]
1419    fn add_assign(&mut self, rhs: &f32) {
1420        self.add_assign(*rhs);
1421    }
1422}
1423
1424impl Add<Vec4> for f32 {
1425    type Output = Vec4;
1426    #[inline]
1427    fn add(self, rhs: Vec4) -> Vec4 {
1428        Vec4(unsafe { vaddq_f32(vld1q_dup_f32(&self), rhs.0) })
1429    }
1430}
1431
1432impl Add<&Vec4> for f32 {
1433    type Output = Vec4;
1434    #[inline]
1435    fn add(self, rhs: &Vec4) -> Vec4 {
1436        self.add(*rhs)
1437    }
1438}
1439
1440impl Add<&Vec4> for &f32 {
1441    type Output = Vec4;
1442    #[inline]
1443    fn add(self, rhs: &Vec4) -> Vec4 {
1444        (*self).add(*rhs)
1445    }
1446}
1447
1448impl Add<Vec4> for &f32 {
1449    type Output = Vec4;
1450    #[inline]
1451    fn add(self, rhs: Vec4) -> Vec4 {
1452        (*self).add(rhs)
1453    }
1454}
1455
1456impl Sub for Vec4 {
1457    type Output = Self;
1458    #[inline]
1459    fn sub(self, rhs: Self) -> Self {
1460        Self(unsafe { vsubq_f32(self.0, rhs.0) })
1461    }
1462}
1463
1464impl Sub<&Self> for Vec4 {
1465    type Output = Self;
1466    #[inline]
1467    fn sub(self, rhs: &Self) -> Self {
1468        self.sub(*rhs)
1469    }
1470}
1471
1472impl Sub<&Vec4> for &Vec4 {
1473    type Output = Vec4;
1474    #[inline]
1475    fn sub(self, rhs: &Vec4) -> Vec4 {
1476        (*self).sub(*rhs)
1477    }
1478}
1479
1480impl Sub<Vec4> for &Vec4 {
1481    type Output = Vec4;
1482    #[inline]
1483    fn sub(self, rhs: Vec4) -> Vec4 {
1484        (*self).sub(rhs)
1485    }
1486}
1487
1488impl SubAssign for Vec4 {
1489    #[inline]
1490    fn sub_assign(&mut self, rhs: Self) {
1491        self.0 = unsafe { vsubq_f32(self.0, rhs.0) };
1492    }
1493}
1494
1495impl SubAssign<&Self> for Vec4 {
1496    #[inline]
1497    fn sub_assign(&mut self, rhs: &Self) {
1498        self.sub_assign(*rhs);
1499    }
1500}
1501
1502impl Sub<f32> for Vec4 {
1503    type Output = Self;
1504    #[inline]
1505    fn sub(self, rhs: f32) -> Self {
1506        Self(unsafe { vsubq_f32(self.0, vld1q_dup_f32(&rhs)) })
1507    }
1508}
1509
1510impl Sub<&f32> for Vec4 {
1511    type Output = Self;
1512    #[inline]
1513    fn sub(self, rhs: &f32) -> Self {
1514        self.sub(*rhs)
1515    }
1516}
1517
1518impl Sub<&f32> for &Vec4 {
1519    type Output = Vec4;
1520    #[inline]
1521    fn sub(self, rhs: &f32) -> Vec4 {
1522        (*self).sub(*rhs)
1523    }
1524}
1525
1526impl Sub<f32> for &Vec4 {
1527    type Output = Vec4;
1528    #[inline]
1529    fn sub(self, rhs: f32) -> Vec4 {
1530        (*self).sub(rhs)
1531    }
1532}
1533
1534impl SubAssign<f32> for Vec4 {
1535    #[inline]
1536    fn sub_assign(&mut self, rhs: f32) {
1537        self.0 = unsafe { vsubq_f32(self.0, vld1q_dup_f32(&rhs)) };
1538    }
1539}
1540
1541impl SubAssign<&f32> for Vec4 {
1542    #[inline]
1543    fn sub_assign(&mut self, rhs: &f32) {
1544        self.sub_assign(*rhs);
1545    }
1546}
1547
1548impl Sub<Vec4> for f32 {
1549    type Output = Vec4;
1550    #[inline]
1551    fn sub(self, rhs: Vec4) -> Vec4 {
1552        Vec4(unsafe { vsubq_f32(vld1q_dup_f32(&self), rhs.0) })
1553    }
1554}
1555
1556impl Sub<&Vec4> for f32 {
1557    type Output = Vec4;
1558    #[inline]
1559    fn sub(self, rhs: &Vec4) -> Vec4 {
1560        self.sub(*rhs)
1561    }
1562}
1563
1564impl Sub<&Vec4> for &f32 {
1565    type Output = Vec4;
1566    #[inline]
1567    fn sub(self, rhs: &Vec4) -> Vec4 {
1568        (*self).sub(*rhs)
1569    }
1570}
1571
1572impl Sub<Vec4> for &f32 {
1573    type Output = Vec4;
1574    #[inline]
1575    fn sub(self, rhs: Vec4) -> Vec4 {
1576        (*self).sub(rhs)
1577    }
1578}
1579
1580impl Rem for Vec4 {
1581    type Output = Self;
1582    #[inline]
1583    fn rem(self, rhs: Self) -> Self {
1584        unsafe {
1585            let n = vrndmq_f32(vdivq_f32(self.0, rhs.0));
1586            Self(vsubq_f32(self.0, vmulq_f32(n, rhs.0)))
1587        }
1588    }
1589}
1590
1591impl Rem<&Self> for Vec4 {
1592    type Output = Self;
1593    #[inline]
1594    fn rem(self, rhs: &Self) -> Self {
1595        self.rem(*rhs)
1596    }
1597}
1598
1599impl Rem<&Vec4> for &Vec4 {
1600    type Output = Vec4;
1601    #[inline]
1602    fn rem(self, rhs: &Vec4) -> Vec4 {
1603        (*self).rem(*rhs)
1604    }
1605}
1606
1607impl Rem<Vec4> for &Vec4 {
1608    type Output = Vec4;
1609    #[inline]
1610    fn rem(self, rhs: Vec4) -> Vec4 {
1611        (*self).rem(rhs)
1612    }
1613}
1614
1615impl RemAssign for Vec4 {
1616    #[inline]
1617    fn rem_assign(&mut self, rhs: Self) {
1618        *self = self.rem(rhs);
1619    }
1620}
1621
1622impl RemAssign<&Self> for Vec4 {
1623    #[inline]
1624    fn rem_assign(&mut self, rhs: &Self) {
1625        self.rem_assign(*rhs);
1626    }
1627}
1628
1629impl Rem<f32> for Vec4 {
1630    type Output = Self;
1631    #[inline]
1632    fn rem(self, rhs: f32) -> Self {
1633        self.rem(Self::splat(rhs))
1634    }
1635}
1636
1637impl Rem<&f32> for Vec4 {
1638    type Output = Self;
1639    #[inline]
1640    fn rem(self, rhs: &f32) -> Self {
1641        self.rem(*rhs)
1642    }
1643}
1644
1645impl Rem<&f32> for &Vec4 {
1646    type Output = Vec4;
1647    #[inline]
1648    fn rem(self, rhs: &f32) -> Vec4 {
1649        (*self).rem(*rhs)
1650    }
1651}
1652
1653impl Rem<f32> for &Vec4 {
1654    type Output = Vec4;
1655    #[inline]
1656    fn rem(self, rhs: f32) -> Vec4 {
1657        (*self).rem(rhs)
1658    }
1659}
1660
1661impl RemAssign<f32> for Vec4 {
1662    #[inline]
1663    fn rem_assign(&mut self, rhs: f32) {
1664        *self = self.rem(Self::splat(rhs));
1665    }
1666}
1667
1668impl RemAssign<&f32> for Vec4 {
1669    #[inline]
1670    fn rem_assign(&mut self, rhs: &f32) {
1671        self.rem_assign(*rhs);
1672    }
1673}
1674
1675impl Rem<Vec4> for f32 {
1676    type Output = Vec4;
1677    #[inline]
1678    fn rem(self, rhs: Vec4) -> Vec4 {
1679        Vec4::splat(self).rem(rhs)
1680    }
1681}
1682
1683impl Rem<&Vec4> for f32 {
1684    type Output = Vec4;
1685    #[inline]
1686    fn rem(self, rhs: &Vec4) -> Vec4 {
1687        self.rem(*rhs)
1688    }
1689}
1690
1691impl Rem<&Vec4> for &f32 {
1692    type Output = Vec4;
1693    #[inline]
1694    fn rem(self, rhs: &Vec4) -> Vec4 {
1695        (*self).rem(*rhs)
1696    }
1697}
1698
1699impl Rem<Vec4> for &f32 {
1700    type Output = Vec4;
1701    #[inline]
1702    fn rem(self, rhs: Vec4) -> Vec4 {
1703        (*self).rem(rhs)
1704    }
1705}
1706
1707impl AsRef<[f32; 4]> for Vec4 {
1708    #[inline]
1709    fn as_ref(&self) -> &[f32; 4] {
1710        unsafe { &*(self as *const Self as *const [f32; 4]) }
1711    }
1712}
1713
1714impl AsMut<[f32; 4]> for Vec4 {
1715    #[inline]
1716    fn as_mut(&mut self) -> &mut [f32; 4] {
1717        unsafe { &mut *(self as *mut Self as *mut [f32; 4]) }
1718    }
1719}
1720
1721impl Sum for Vec4 {
1722    #[inline]
1723    fn sum<I>(iter: I) -> Self
1724    where
1725        I: Iterator<Item = Self>,
1726    {
1727        iter.fold(Self::ZERO, Self::add)
1728    }
1729}
1730
1731impl<'a> Sum<&'a Self> for Vec4 {
1732    #[inline]
1733    fn sum<I>(iter: I) -> Self
1734    where
1735        I: Iterator<Item = &'a Self>,
1736    {
1737        iter.fold(Self::ZERO, |a, &b| Self::add(a, b))
1738    }
1739}
1740
1741impl Product for Vec4 {
1742    #[inline]
1743    fn product<I>(iter: I) -> Self
1744    where
1745        I: Iterator<Item = Self>,
1746    {
1747        iter.fold(Self::ONE, Self::mul)
1748    }
1749}
1750
1751impl<'a> Product<&'a Self> for Vec4 {
1752    #[inline]
1753    fn product<I>(iter: I) -> Self
1754    where
1755        I: Iterator<Item = &'a Self>,
1756    {
1757        iter.fold(Self::ONE, |a, &b| Self::mul(a, b))
1758    }
1759}
1760
1761impl Neg for Vec4 {
1762    type Output = Self;
1763    #[inline]
1764    fn neg(self) -> Self {
1765        Self(unsafe { vnegq_f32(self.0) })
1766    }
1767}
1768
1769impl Neg for &Vec4 {
1770    type Output = Vec4;
1771    #[inline]
1772    fn neg(self) -> Vec4 {
1773        (*self).neg()
1774    }
1775}
1776
1777impl Index<usize> for Vec4 {
1778    type Output = f32;
1779    #[inline]
1780    fn index(&self, index: usize) -> &Self::Output {
1781        match index {
1782            0 => &self.x,
1783            1 => &self.y,
1784            2 => &self.z,
1785            3 => &self.w,
1786            _ => panic!("index out of bounds"),
1787        }
1788    }
1789}
1790
1791impl IndexMut<usize> for Vec4 {
1792    #[inline]
1793    fn index_mut(&mut self, index: usize) -> &mut Self::Output {
1794        match index {
1795            0 => &mut self.x,
1796            1 => &mut self.y,
1797            2 => &mut self.z,
1798            3 => &mut self.w,
1799            _ => panic!("index out of bounds"),
1800        }
1801    }
1802}
1803
1804impl fmt::Display for Vec4 {
1805    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1806        if let Some(p) = f.precision() {
1807            write!(
1808                f,
1809                "[{:.*}, {:.*}, {:.*}, {:.*}]",
1810                p, self.x, p, self.y, p, self.z, p, self.w
1811            )
1812        } else {
1813            write!(f, "[{}, {}, {}, {}]", self.x, self.y, self.z, self.w)
1814        }
1815    }
1816}
1817
1818impl fmt::Debug for Vec4 {
1819    fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
1820        fmt.debug_tuple(stringify!(Vec4))
1821            .field(&self.x)
1822            .field(&self.y)
1823            .field(&self.z)
1824            .field(&self.w)
1825            .finish()
1826    }
1827}
1828
1829impl From<Vec4> for float32x4_t {
1830    #[inline(always)]
1831    fn from(t: Vec4) -> Self {
1832        t.0
1833    }
1834}
1835
1836impl From<float32x4_t> for Vec4 {
1837    #[inline(always)]
1838    fn from(t: float32x4_t) -> Self {
1839        Self(t)
1840    }
1841}
1842
1843impl From<[f32; 4]> for Vec4 {
1844    #[inline]
1845    fn from(a: [f32; 4]) -> Self {
1846        Self(unsafe { vld1q_f32(a.as_ptr()) })
1847    }
1848}
1849
1850impl From<Vec4> for [f32; 4] {
1851    #[inline]
1852    fn from(v: Vec4) -> Self {
1853        use crate::align16::Align16;
1854        use core::mem::MaybeUninit;
1855        let mut out: MaybeUninit<Align16<Self>> = MaybeUninit::uninit();
1856        unsafe {
1857            vst1q_f32(out.as_mut_ptr().cast(), v.0);
1858            out.assume_init().0
1859        }
1860    }
1861}
1862
1863impl From<(f32, f32, f32, f32)> for Vec4 {
1864    #[inline]
1865    fn from(t: (f32, f32, f32, f32)) -> Self {
1866        Self::new(t.0, t.1, t.2, t.3)
1867    }
1868}
1869
1870impl From<Vec4> for (f32, f32, f32, f32) {
1871    #[inline]
1872    fn from(v: Vec4) -> Self {
1873        (v.x, v.y, v.z, v.w)
1874    }
1875}
1876
1877impl From<(Vec3A, f32)> for Vec4 {
1878    #[inline]
1879    fn from((v, w): (Vec3A, f32)) -> Self {
1880        v.extend(w)
1881    }
1882}
1883
1884impl From<(f32, Vec3A)> for Vec4 {
1885    #[inline]
1886    fn from((x, v): (f32, Vec3A)) -> Self {
1887        Self::new(x, v.x, v.y, v.z)
1888    }
1889}
1890
1891impl From<(Vec3, f32)> for Vec4 {
1892    #[inline]
1893    fn from((v, w): (Vec3, f32)) -> Self {
1894        Self::new(v.x, v.y, v.z, w)
1895    }
1896}
1897
1898impl From<(f32, Vec3)> for Vec4 {
1899    #[inline]
1900    fn from((x, v): (f32, Vec3)) -> Self {
1901        Self::new(x, v.x, v.y, v.z)
1902    }
1903}
1904
1905impl From<(Vec2, f32, f32)> for Vec4 {
1906    #[inline]
1907    fn from((v, z, w): (Vec2, f32, f32)) -> Self {
1908        Self::new(v.x, v.y, z, w)
1909    }
1910}
1911
1912impl From<(Vec2, Vec2)> for Vec4 {
1913    #[inline]
1914    fn from((v, u): (Vec2, Vec2)) -> Self {
1915        Self::new(v.x, v.y, u.x, u.y)
1916    }
1917}
1918
1919impl Deref for Vec4 {
1920    type Target = crate::deref::Vec4<f32>;
1921    #[inline]
1922    fn deref(&self) -> &Self::Target {
1923        unsafe { &*(self as *const Self).cast() }
1924    }
1925}
1926
1927impl DerefMut for Vec4 {
1928    #[inline]
1929    fn deref_mut(&mut self) -> &mut Self::Target {
1930        unsafe { &mut *(self as *mut Self).cast() }
1931    }
1932}
1933
1934impl From<BVec4> for Vec4 {
1935    #[inline]
1936    fn from(v: BVec4) -> Self {
1937        Self::new(
1938            f32::from(v.x),
1939            f32::from(v.y),
1940            f32::from(v.z),
1941            f32::from(v.w),
1942        )
1943    }
1944}
1945
1946#[cfg(not(feature = "scalar-math"))]
1947impl From<BVec4A> for Vec4 {
1948    #[inline]
1949    fn from(v: BVec4A) -> Self {
1950        let bool_array: [bool; 4] = v.into();
1951        Self::new(
1952            f32::from(bool_array[0]),
1953            f32::from(bool_array[1]),
1954            f32::from(bool_array[2]),
1955            f32::from(bool_array[3]),
1956        )
1957    }
1958}