glam/f32/neon/
vec4.rs

1// Generated from vec.rs.tera template. Edit the template, not the generated file.
2
3use crate::{f32::math, neon::*, BVec4, BVec4A, Vec2, Vec3, Vec3A};
4
5use core::fmt;
6use core::iter::{Product, Sum};
7use core::{f32, ops::*};
8
9use core::arch::aarch64::*;
10
11#[repr(C)]
12union UnionCast {
13    a: [f32; 4],
14    v: Vec4,
15}
16
17/// Creates a 4-dimensional vector.
18#[inline(always)]
19#[must_use]
20pub const fn vec4(x: f32, y: f32, z: f32, w: f32) -> Vec4 {
21    Vec4::new(x, y, z, w)
22}
23
24/// A 4-dimensional vector.
25///
26/// SIMD vector types are used for storage on supported platforms.
27///
28/// This type is 16 byte aligned.
29#[derive(Clone, Copy)]
30#[repr(transparent)]
31pub struct Vec4(pub(crate) float32x4_t);
32
33impl Vec4 {
34    /// All zeroes.
35    pub const ZERO: Self = Self::splat(0.0);
36
37    /// All ones.
38    pub const ONE: Self = Self::splat(1.0);
39
40    /// All negative ones.
41    pub const NEG_ONE: Self = Self::splat(-1.0);
42
43    /// All `f32::MIN`.
44    pub const MIN: Self = Self::splat(f32::MIN);
45
46    /// All `f32::MAX`.
47    pub const MAX: Self = Self::splat(f32::MAX);
48
49    /// All `f32::NAN`.
50    pub const NAN: Self = Self::splat(f32::NAN);
51
52    /// All `f32::INFINITY`.
53    pub const INFINITY: Self = Self::splat(f32::INFINITY);
54
55    /// All `f32::NEG_INFINITY`.
56    pub const NEG_INFINITY: Self = Self::splat(f32::NEG_INFINITY);
57
58    /// A unit vector pointing along the positive X axis.
59    pub const X: Self = Self::new(1.0, 0.0, 0.0, 0.0);
60
61    /// A unit vector pointing along the positive Y axis.
62    pub const Y: Self = Self::new(0.0, 1.0, 0.0, 0.0);
63
64    /// A unit vector pointing along the positive Z axis.
65    pub const Z: Self = Self::new(0.0, 0.0, 1.0, 0.0);
66
67    /// A unit vector pointing along the positive W axis.
68    pub const W: Self = Self::new(0.0, 0.0, 0.0, 1.0);
69
70    /// A unit vector pointing along the negative X axis.
71    pub const NEG_X: Self = Self::new(-1.0, 0.0, 0.0, 0.0);
72
73    /// A unit vector pointing along the negative Y axis.
74    pub const NEG_Y: Self = Self::new(0.0, -1.0, 0.0, 0.0);
75
76    /// A unit vector pointing along the negative Z axis.
77    pub const NEG_Z: Self = Self::new(0.0, 0.0, -1.0, 0.0);
78
79    /// A unit vector pointing along the negative W axis.
80    pub const NEG_W: Self = Self::new(0.0, 0.0, 0.0, -1.0);
81
82    /// The unit axes.
83    pub const AXES: [Self; 4] = [Self::X, Self::Y, Self::Z, Self::W];
84
85    /// Vec4 uses Rust Portable SIMD
86    pub const USES_CORE_SIMD: bool = false;
87    /// Vec4 uses Arm NEON
88    pub const USES_NEON: bool = true;
89    /// Vec4 uses scalar math
90    pub const USES_SCALAR_MATH: bool = false;
91    /// Vec4 uses Intel SSE2
92    pub const USES_SSE2: bool = false;
93    /// Vec4 uses WebAssembly 128-bit SIMD
94    pub const USES_WASM32_SIMD: bool = false;
95
96    /// Creates a new vector.
97    #[inline(always)]
98    #[must_use]
99    pub const fn new(x: f32, y: f32, z: f32, w: f32) -> Self {
100        unsafe { UnionCast { a: [x, y, z, w] }.v }
101    }
102
103    /// Creates a vector with all elements set to `v`.
104    #[inline]
105    #[must_use]
106    pub const fn splat(v: f32) -> Self {
107        unsafe { UnionCast { a: [v; 4] }.v }
108    }
109
110    /// Returns a vector containing each element of `self` modified by a mapping function `f`.
111    #[inline]
112    #[must_use]
113    pub fn map<F>(self, f: F) -> Self
114    where
115        F: Fn(f32) -> f32,
116    {
117        Self::new(f(self.x), f(self.y), f(self.z), f(self.w))
118    }
119
120    /// Creates a vector from the elements in `if_true` and `if_false`, selecting which to use
121    /// for each element of `self`.
122    ///
123    /// A true element in the mask uses the corresponding element from `if_true`, and false
124    /// uses the element from `if_false`.
125    #[inline]
126    #[must_use]
127    pub fn select(mask: BVec4A, if_true: Self, if_false: Self) -> Self {
128        Self(unsafe { vbslq_f32(mask.0, if_true.0, if_false.0) })
129    }
130
131    /// Creates a new vector from an array.
132    #[inline]
133    #[must_use]
134    pub const fn from_array(a: [f32; 4]) -> Self {
135        Self::new(a[0], a[1], a[2], a[3])
136    }
137
138    /// `[x, y, z, w]`
139    #[inline]
140    #[must_use]
141    pub const fn to_array(&self) -> [f32; 4] {
142        unsafe { *(self as *const Vec4 as *const [f32; 4]) }
143    }
144
145    /// Creates a vector from the first 4 values in `slice`.
146    ///
147    /// # Panics
148    ///
149    /// Panics if `slice` is less than 4 elements long.
150    #[inline]
151    #[must_use]
152    pub const fn from_slice(slice: &[f32]) -> Self {
153        assert!(slice.len() >= 4);
154        Self::new(slice[0], slice[1], slice[2], slice[3])
155    }
156
157    /// Writes the elements of `self` to the first 4 elements in `slice`.
158    ///
159    /// # Panics
160    ///
161    /// Panics if `slice` is less than 4 elements long.
162    #[inline]
163    pub fn write_to_slice(self, slice: &mut [f32]) {
164        assert!(slice.len() >= 4);
165        unsafe {
166            vst1q_f32(slice.as_mut_ptr(), self.0);
167        }
168    }
169
170    /// Creates a 3D vector from the `x`, `y` and `z` elements of `self`, discarding `w`.
171    ///
172    /// Truncation to [`Vec3`] may also be performed by using [`self.xyz()`][crate::swizzles::Vec4Swizzles::xyz()].
173    ///
174    /// To truncate to [`Vec3A`] use [`Vec3A::from_vec4()`].
175    #[inline]
176    #[must_use]
177    pub fn truncate(self) -> Vec3 {
178        use crate::swizzles::Vec4Swizzles;
179        self.xyz()
180    }
181
182    /// Creates a 4D vector from `self` with the given value of `x`.
183    #[inline]
184    #[must_use]
185    pub fn with_x(mut self, x: f32) -> Self {
186        self.x = x;
187        self
188    }
189
190    /// Creates a 4D vector from `self` with the given value of `y`.
191    #[inline]
192    #[must_use]
193    pub fn with_y(mut self, y: f32) -> Self {
194        self.y = y;
195        self
196    }
197
198    /// Creates a 4D vector from `self` with the given value of `z`.
199    #[inline]
200    #[must_use]
201    pub fn with_z(mut self, z: f32) -> Self {
202        self.z = z;
203        self
204    }
205
206    /// Creates a 4D vector from `self` with the given value of `w`.
207    #[inline]
208    #[must_use]
209    pub fn with_w(mut self, w: f32) -> Self {
210        self.w = w;
211        self
212    }
213
214    /// Computes the dot product of `self` and `rhs`.
215    #[inline]
216    #[must_use]
217    pub fn dot(self, rhs: Self) -> f32 {
218        unsafe { dot4(self.0, rhs.0) }
219    }
220
221    /// Returns a vector where every component is the dot product of `self` and `rhs`.
222    #[inline]
223    #[must_use]
224    pub fn dot_into_vec(self, rhs: Self) -> Self {
225        Self(unsafe { dot4_into_f32x4(self.0, rhs.0) })
226    }
227
228    /// Returns a vector containing the minimum values for each element of `self` and `rhs`.
229    ///
230    /// In other words this computes `[min(x, rhs.x), min(self.y, rhs.y), ..]`.
231    ///
232    /// NaN propogation does not follow IEEE 754-2008 semantics for minNum and may differ on
233    /// different SIMD architectures.
234    #[inline]
235    #[must_use]
236    pub fn min(self, rhs: Self) -> Self {
237        Self(unsafe { vminq_f32(self.0, rhs.0) })
238    }
239
240    /// Returns a vector containing the maximum values for each element of `self` and `rhs`.
241    ///
242    /// In other words this computes `[max(self.x, rhs.x), max(self.y, rhs.y), ..]`.
243    ///
244    /// NaN propogation does not follow IEEE 754-2008 semantics for maxNum and may differ on
245    /// different SIMD architectures.
246    #[inline]
247    #[must_use]
248    pub fn max(self, rhs: Self) -> Self {
249        Self(unsafe { vmaxq_f32(self.0, rhs.0) })
250    }
251
252    /// Component-wise clamping of values, similar to [`f32::clamp`].
253    ///
254    /// Each element in `min` must be less-or-equal to the corresponding element in `max`.
255    ///
256    /// NaN propogation does not follow IEEE 754-2008 semantics and may differ on
257    /// different SIMD architectures.
258    ///
259    /// # Panics
260    ///
261    /// Will panic if `min` is greater than `max` when `glam_assert` is enabled.
262    #[inline]
263    #[must_use]
264    pub fn clamp(self, min: Self, max: Self) -> Self {
265        glam_assert!(min.cmple(max).all(), "clamp: expected min <= max");
266        self.max(min).min(max)
267    }
268
269    /// Returns the horizontal minimum of `self`.
270    ///
271    /// In other words this computes `min(x, y, ..)`.
272    ///
273    /// NaN propogation does not follow IEEE 754-2008 semantics and may differ on
274    /// different SIMD architectures.
275    #[inline]
276    #[must_use]
277    pub fn min_element(self) -> f32 {
278        unsafe { vminnmvq_f32(self.0) }
279    }
280
281    /// Returns the horizontal maximum of `self`.
282    ///
283    /// In other words this computes `max(x, y, ..)`.
284    ///
285    /// NaN propogation does not follow IEEE 754-2008 semantics and may differ on
286    /// different SIMD architectures.
287    #[inline]
288    #[must_use]
289    pub fn max_element(self) -> f32 {
290        unsafe { vmaxnmvq_f32(self.0) }
291    }
292
293    /// Returns the index of the first minimum element of `self`.
294    #[doc(alias = "argmin")]
295    #[inline]
296    #[must_use]
297    pub fn min_position(self) -> usize {
298        let mut min = self.x;
299        let mut index = 0;
300        if self.y < min {
301            min = self.y;
302            index = 1;
303        }
304        if self.z < min {
305            min = self.z;
306            index = 2;
307        }
308        if self.w < min {
309            index = 3;
310        }
311        index
312    }
313
314    /// Returns the index of the first maximum element of `self`.
315    #[doc(alias = "argmax")]
316    #[inline]
317    #[must_use]
318    pub fn max_position(self) -> usize {
319        let mut max = self.x;
320        let mut index = 0;
321        if self.y > max {
322            max = self.y;
323            index = 1;
324        }
325        if self.z > max {
326            max = self.z;
327            index = 2;
328        }
329        if self.w > max {
330            index = 3;
331        }
332        index
333    }
334
335    /// Returns the sum of all elements of `self`.
336    ///
337    /// In other words, this computes `self.x + self.y + ..`.
338    #[inline]
339    #[must_use]
340    pub fn element_sum(self) -> f32 {
341        unsafe { vaddvq_f32(self.0) }
342    }
343
344    /// Returns the product of all elements of `self`.
345    ///
346    /// In other words, this computes `self.x * self.y * ..`.
347    #[inline]
348    #[must_use]
349    pub fn element_product(self) -> f32 {
350        unsafe {
351            let s = vmuls_laneq_f32(vgetq_lane_f32(self.0, 0), self.0, 1);
352            let s = vmuls_laneq_f32(s, self.0, 2);
353            vmuls_laneq_f32(s, self.0, 3)
354        }
355    }
356
357    /// Returns a vector mask containing the result of a `==` comparison for each element of
358    /// `self` and `rhs`.
359    ///
360    /// In other words, this computes `[self.x == rhs.x, self.y == rhs.y, ..]` for all
361    /// elements.
362    #[inline]
363    #[must_use]
364    pub fn cmpeq(self, rhs: Self) -> BVec4A {
365        BVec4A(unsafe { vceqq_f32(self.0, rhs.0) })
366    }
367
368    /// Returns a vector mask containing the result of a `!=` comparison for each element of
369    /// `self` and `rhs`.
370    ///
371    /// In other words this computes `[self.x != rhs.x, self.y != rhs.y, ..]` for all
372    /// elements.
373    #[inline]
374    #[must_use]
375    pub fn cmpne(self, rhs: Self) -> BVec4A {
376        BVec4A(unsafe { vmvnq_u32(vceqq_f32(self.0, rhs.0)) })
377    }
378
379    /// Returns a vector mask containing the result of a `>=` comparison for each element of
380    /// `self` and `rhs`.
381    ///
382    /// In other words this computes `[self.x >= rhs.x, self.y >= rhs.y, ..]` for all
383    /// elements.
384    #[inline]
385    #[must_use]
386    pub fn cmpge(self, rhs: Self) -> BVec4A {
387        BVec4A(unsafe { vcgeq_f32(self.0, rhs.0) })
388    }
389
390    /// Returns a vector mask containing the result of a `>` comparison for each element of
391    /// `self` and `rhs`.
392    ///
393    /// In other words this computes `[self.x > rhs.x, self.y > rhs.y, ..]` for all
394    /// elements.
395    #[inline]
396    #[must_use]
397    pub fn cmpgt(self, rhs: Self) -> BVec4A {
398        BVec4A(unsafe { vcgtq_f32(self.0, rhs.0) })
399    }
400
401    /// Returns a vector mask containing the result of a `<=` comparison for each element of
402    /// `self` and `rhs`.
403    ///
404    /// In other words this computes `[self.x <= rhs.x, self.y <= rhs.y, ..]` for all
405    /// elements.
406    #[inline]
407    #[must_use]
408    pub fn cmple(self, rhs: Self) -> BVec4A {
409        BVec4A(unsafe { vcleq_f32(self.0, rhs.0) })
410    }
411
412    /// Returns a vector mask containing the result of a `<` comparison for each element of
413    /// `self` and `rhs`.
414    ///
415    /// In other words this computes `[self.x < rhs.x, self.y < rhs.y, ..]` for all
416    /// elements.
417    #[inline]
418    #[must_use]
419    pub fn cmplt(self, rhs: Self) -> BVec4A {
420        BVec4A(unsafe { vcltq_f32(self.0, rhs.0) })
421    }
422
423    /// Returns a vector containing the absolute value of each element of `self`.
424    #[inline]
425    #[must_use]
426    pub fn abs(self) -> Self {
427        Self(unsafe { vabsq_f32(self.0) })
428    }
429
430    /// Returns a vector with elements representing the sign of `self`.
431    ///
432    /// - `1.0` if the number is positive, `+0.0` or `INFINITY`
433    /// - `-1.0` if the number is negative, `-0.0` or `NEG_INFINITY`
434    /// - `NAN` if the number is `NAN`
435    #[inline]
436    #[must_use]
437    pub fn signum(self) -> Self {
438        let result = Self(unsafe {
439            vreinterpretq_f32_u32(vorrq_u32(
440                vandq_u32(
441                    vreinterpretq_u32_f32(self.0),
442                    vreinterpretq_u32_f32(Self::NEG_ONE.0),
443                ),
444                vreinterpretq_u32_f32(Self::ONE.0),
445            ))
446        });
447        let mask = self.is_nan_mask();
448        Self::select(mask, self, result)
449    }
450
451    /// Returns a vector with signs of `rhs` and the magnitudes of `self`.
452    #[inline]
453    #[must_use]
454    pub fn copysign(self, rhs: Self) -> Self {
455        let mask = Self::splat(-0.0);
456        Self(unsafe {
457            vreinterpretq_f32_u32(vorrq_u32(
458                vandq_u32(vreinterpretq_u32_f32(rhs.0), vreinterpretq_u32_f32(mask.0)),
459                vandq_u32(
460                    vreinterpretq_u32_f32(self.0),
461                    vmvnq_u32(vreinterpretq_u32_f32(mask.0)),
462                ),
463            ))
464        })
465    }
466
467    /// Returns a bitmask with the lowest 4 bits set to the sign bits from the elements of `self`.
468    ///
469    /// A negative element results in a `1` bit and a positive element in a `0` bit.  Element `x` goes
470    /// into the first lowest bit, element `y` into the second, etc.
471    ///
472    /// An element is negative if it has a negative sign, including -0.0, NaNs with negative sign
473    /// bit and negative infinity.
474    #[inline]
475    #[must_use]
476    pub fn is_negative_bitmask(self) -> u32 {
477        unsafe {
478            let nmask = vreinterpretq_u32_f32(vdupq_n_f32(-0.0));
479            let m = vandq_u32(vreinterpretq_u32_f32(self.0), nmask);
480            let x = vgetq_lane_u32(m, 0) >> 31;
481            let y = vgetq_lane_u32(m, 1) >> 31;
482            let z = vgetq_lane_u32(m, 2) >> 31;
483
484            let w = vgetq_lane_u32(m, 3) >> 31;
485            x | y << 1 | z << 2 | w << 3
486        }
487    }
488
489    /// Returns `true` if, and only if, all elements are finite.  If any element is either
490    /// `NaN`, positive or negative infinity, this will return `false`.
491    #[inline]
492    #[must_use]
493    pub fn is_finite(self) -> bool {
494        self.is_finite_mask().all()
495    }
496
497    /// Performs `is_finite` on each element of self, returning a vector mask of the results.
498    ///
499    /// In other words, this computes `[x.is_finite(), y.is_finite(), ...]`.
500    pub fn is_finite_mask(self) -> BVec4A {
501        BVec4A(unsafe { vcltq_f32(vabsq_f32(self.0), Self::INFINITY.0) })
502    }
503
504    /// Returns `true` if any elements are `NaN`.
505    #[inline]
506    #[must_use]
507    pub fn is_nan(self) -> bool {
508        self.is_nan_mask().any()
509    }
510
511    /// Performs `is_nan` on each element of self, returning a vector mask of the results.
512    ///
513    /// In other words, this computes `[x.is_nan(), y.is_nan(), ...]`.
514    #[inline]
515    #[must_use]
516    pub fn is_nan_mask(self) -> BVec4A {
517        BVec4A(unsafe { vmvnq_u32(vceqq_f32(self.0, self.0)) })
518    }
519
520    /// Computes the length of `self`.
521    #[doc(alias = "magnitude")]
522    #[inline]
523    #[must_use]
524    pub fn length(self) -> f32 {
525        math::sqrt(self.dot(self))
526    }
527
528    /// Computes the squared length of `self`.
529    ///
530    /// This is faster than `length()` as it avoids a square root operation.
531    #[doc(alias = "magnitude2")]
532    #[inline]
533    #[must_use]
534    pub fn length_squared(self) -> f32 {
535        self.dot(self)
536    }
537
538    /// Computes `1.0 / length()`.
539    ///
540    /// For valid results, `self` must _not_ be of length zero.
541    #[inline]
542    #[must_use]
543    pub fn length_recip(self) -> f32 {
544        self.length().recip()
545    }
546
547    /// Computes the Euclidean distance between two points in space.
548    #[inline]
549    #[must_use]
550    pub fn distance(self, rhs: Self) -> f32 {
551        (self - rhs).length()
552    }
553
554    /// Compute the squared euclidean distance between two points in space.
555    #[inline]
556    #[must_use]
557    pub fn distance_squared(self, rhs: Self) -> f32 {
558        (self - rhs).length_squared()
559    }
560
561    /// Returns the element-wise quotient of [Euclidean division] of `self` by `rhs`.
562    #[inline]
563    #[must_use]
564    pub fn div_euclid(self, rhs: Self) -> Self {
565        Self::new(
566            math::div_euclid(self.x, rhs.x),
567            math::div_euclid(self.y, rhs.y),
568            math::div_euclid(self.z, rhs.z),
569            math::div_euclid(self.w, rhs.w),
570        )
571    }
572
573    /// Returns the element-wise remainder of [Euclidean division] of `self` by `rhs`.
574    ///
575    /// [Euclidean division]: f32::rem_euclid
576    #[inline]
577    #[must_use]
578    pub fn rem_euclid(self, rhs: Self) -> Self {
579        Self::new(
580            math::rem_euclid(self.x, rhs.x),
581            math::rem_euclid(self.y, rhs.y),
582            math::rem_euclid(self.z, rhs.z),
583            math::rem_euclid(self.w, rhs.w),
584        )
585    }
586
587    /// Returns `self` normalized to length 1.0.
588    ///
589    /// For valid results, `self` must be finite and _not_ of length zero, nor very close to zero.
590    ///
591    /// See also [`Self::try_normalize()`] and [`Self::normalize_or_zero()`].
592    ///
593    /// Panics
594    ///
595    /// Will panic if the resulting normalized vector is not finite when `glam_assert` is enabled.
596    #[inline]
597    #[must_use]
598    pub fn normalize(self) -> Self {
599        #[allow(clippy::let_and_return)]
600        let normalized = self.mul(self.length_recip());
601        glam_assert!(normalized.is_finite());
602        normalized
603    }
604
605    /// Returns `self` normalized to length 1.0 if possible, else returns `None`.
606    ///
607    /// In particular, if the input is zero (or very close to zero), or non-finite,
608    /// the result of this operation will be `None`.
609    ///
610    /// See also [`Self::normalize_or_zero()`].
611    #[inline]
612    #[must_use]
613    pub fn try_normalize(self) -> Option<Self> {
614        let rcp = self.length_recip();
615        if rcp.is_finite() && rcp > 0.0 {
616            Some(self * rcp)
617        } else {
618            None
619        }
620    }
621
622    /// Returns `self` normalized to length 1.0 if possible, else returns a
623    /// fallback value.
624    ///
625    /// In particular, if the input is zero (or very close to zero), or non-finite,
626    /// the result of this operation will be the fallback value.
627    ///
628    /// See also [`Self::try_normalize()`].
629    #[inline]
630    #[must_use]
631    pub fn normalize_or(self, fallback: Self) -> Self {
632        let rcp = self.length_recip();
633        if rcp.is_finite() && rcp > 0.0 {
634            self * rcp
635        } else {
636            fallback
637        }
638    }
639
640    /// Returns `self` normalized to length 1.0 if possible, else returns zero.
641    ///
642    /// In particular, if the input is zero (or very close to zero), or non-finite,
643    /// the result of this operation will be zero.
644    ///
645    /// See also [`Self::try_normalize()`].
646    #[inline]
647    #[must_use]
648    pub fn normalize_or_zero(self) -> Self {
649        self.normalize_or(Self::ZERO)
650    }
651
652    /// Returns `self` normalized to length 1.0 and the length of `self`.
653    ///
654    /// If `self` is zero length then `(Self::X, 0.0)` is returned.
655    #[inline]
656    #[must_use]
657    pub fn normalize_and_length(self) -> (Self, f32) {
658        let length = self.length();
659        let rcp = 1.0 / length;
660        if rcp.is_finite() && rcp > 0.0 {
661            (self * rcp, length)
662        } else {
663            (Self::X, 0.0)
664        }
665    }
666
667    /// Returns whether `self` is length `1.0` or not.
668    ///
669    /// Uses a precision threshold of approximately `1e-4`.
670    #[inline]
671    #[must_use]
672    pub fn is_normalized(self) -> bool {
673        math::abs(self.length_squared() - 1.0) <= 2e-4
674    }
675
676    /// Returns the vector projection of `self` onto `rhs`.
677    ///
678    /// `rhs` must be of non-zero length.
679    ///
680    /// # Panics
681    ///
682    /// Will panic if `rhs` is zero length when `glam_assert` is enabled.
683    #[inline]
684    #[must_use]
685    pub fn project_onto(self, rhs: Self) -> Self {
686        let other_len_sq_rcp = rhs.dot(rhs).recip();
687        glam_assert!(other_len_sq_rcp.is_finite());
688        rhs * self.dot(rhs) * other_len_sq_rcp
689    }
690
691    /// Returns the vector rejection of `self` from `rhs`.
692    ///
693    /// The vector rejection is the vector perpendicular to the projection of `self` onto
694    /// `rhs`, in rhs words the result of `self - self.project_onto(rhs)`.
695    ///
696    /// `rhs` must be of non-zero length.
697    ///
698    /// # Panics
699    ///
700    /// Will panic if `rhs` has a length of zero when `glam_assert` is enabled.
701    #[doc(alias("plane"))]
702    #[inline]
703    #[must_use]
704    pub fn reject_from(self, rhs: Self) -> Self {
705        self - self.project_onto(rhs)
706    }
707
708    /// Returns the vector projection of `self` onto `rhs`.
709    ///
710    /// `rhs` must be normalized.
711    ///
712    /// # Panics
713    ///
714    /// Will panic if `rhs` is not normalized when `glam_assert` is enabled.
715    #[inline]
716    #[must_use]
717    pub fn project_onto_normalized(self, rhs: Self) -> Self {
718        glam_assert!(rhs.is_normalized());
719        rhs * self.dot(rhs)
720    }
721
722    /// Returns the vector rejection of `self` from `rhs`.
723    ///
724    /// The vector rejection is the vector perpendicular to the projection of `self` onto
725    /// `rhs`, in rhs words the result of `self - self.project_onto(rhs)`.
726    ///
727    /// `rhs` must be normalized.
728    ///
729    /// # Panics
730    ///
731    /// Will panic if `rhs` is not normalized when `glam_assert` is enabled.
732    #[doc(alias("plane"))]
733    #[inline]
734    #[must_use]
735    pub fn reject_from_normalized(self, rhs: Self) -> Self {
736        self - self.project_onto_normalized(rhs)
737    }
738
739    /// Returns a vector containing the nearest integer to a number for each element of `self`.
740    /// Round half-way cases away from 0.0.
741    #[inline]
742    #[must_use]
743    pub fn round(self) -> Self {
744        Self(unsafe { vrndnq_f32(self.0) })
745    }
746
747    /// Returns a vector containing the largest integer less than or equal to a number for each
748    /// element of `self`.
749    #[inline]
750    #[must_use]
751    pub fn floor(self) -> Self {
752        Self(unsafe { vrndmq_f32(self.0) })
753    }
754
755    /// Returns a vector containing the smallest integer greater than or equal to a number for
756    /// each element of `self`.
757    #[inline]
758    #[must_use]
759    pub fn ceil(self) -> Self {
760        Self(unsafe { vrndpq_f32(self.0) })
761    }
762
763    /// Returns a vector containing the integer part each element of `self`. This means numbers are
764    /// always truncated towards zero.
765    #[inline]
766    #[must_use]
767    pub fn trunc(self) -> Self {
768        Self(unsafe { vrndq_f32(self.0) })
769    }
770
771    /// Returns a vector containing the fractional part of the vector as `self - self.trunc()`.
772    ///
773    /// Note that this differs from the GLSL implementation of `fract` which returns
774    /// `self - self.floor()`.
775    ///
776    /// Note that this is fast but not precise for large numbers.
777    #[inline]
778    #[must_use]
779    pub fn fract(self) -> Self {
780        self - self.trunc()
781    }
782
783    /// Returns a vector containing the fractional part of the vector as `self - self.floor()`.
784    ///
785    /// Note that this differs from the Rust implementation of `fract` which returns
786    /// `self - self.trunc()`.
787    ///
788    /// Note that this is fast but not precise for large numbers.
789    #[inline]
790    #[must_use]
791    pub fn fract_gl(self) -> Self {
792        self - self.floor()
793    }
794
795    /// Returns a vector containing `e^self` (the exponential function) for each element of
796    /// `self`.
797    #[inline]
798    #[must_use]
799    pub fn exp(self) -> Self {
800        Self::new(
801            math::exp(self.x),
802            math::exp(self.y),
803            math::exp(self.z),
804            math::exp(self.w),
805        )
806    }
807
808    /// Returns a vector containing each element of `self` raised to the power of `n`.
809    #[inline]
810    #[must_use]
811    pub fn powf(self, n: f32) -> Self {
812        Self::new(
813            math::powf(self.x, n),
814            math::powf(self.y, n),
815            math::powf(self.z, n),
816            math::powf(self.w, n),
817        )
818    }
819
820    /// Returns a vector containing the reciprocal `1.0/n` of each element of `self`.
821    #[inline]
822    #[must_use]
823    pub fn recip(self) -> Self {
824        Self(unsafe { vdivq_f32(Self::ONE.0, self.0) })
825    }
826
827    /// Performs a linear interpolation between `self` and `rhs` based on the value `s`.
828    ///
829    /// When `s` is `0.0`, the result will be equal to `self`.  When `s` is `1.0`, the result
830    /// will be equal to `rhs`. When `s` is outside of range `[0, 1]`, the result is linearly
831    /// extrapolated.
832    #[doc(alias = "mix")]
833    #[inline]
834    #[must_use]
835    pub fn lerp(self, rhs: Self, s: f32) -> Self {
836        self * (1.0 - s) + rhs * s
837    }
838
839    /// Moves towards `rhs` based on the value `d`.
840    ///
841    /// When `d` is `0.0`, the result will be equal to `self`. When `d` is equal to
842    /// `self.distance(rhs)`, the result will be equal to `rhs`. Will not go past `rhs`.
843    #[inline]
844    #[must_use]
845    pub fn move_towards(&self, rhs: Self, d: f32) -> Self {
846        let a = rhs - *self;
847        let len = a.length();
848        if len <= d || len <= 1e-4 {
849            return rhs;
850        }
851        *self + a / len * d
852    }
853
854    /// Calculates the midpoint between `self` and `rhs`.
855    ///
856    /// The midpoint is the average of, or halfway point between, two vectors.
857    /// `a.midpoint(b)` should yield the same result as `a.lerp(b, 0.5)`
858    /// while being slightly cheaper to compute.
859    #[inline]
860    pub fn midpoint(self, rhs: Self) -> Self {
861        (self + rhs) * 0.5
862    }
863
864    /// Returns true if the absolute difference of all elements between `self` and `rhs` is
865    /// less than or equal to `max_abs_diff`.
866    ///
867    /// This can be used to compare if two vectors contain similar elements. It works best when
868    /// comparing with a known value. The `max_abs_diff` that should be used used depends on
869    /// the values being compared against.
870    ///
871    /// For more see
872    /// [comparing floating point numbers](https://randomascii.wordpress.com/2012/02/25/comparing-floating-point-numbers-2012-edition/).
873    #[inline]
874    #[must_use]
875    pub fn abs_diff_eq(self, rhs: Self, max_abs_diff: f32) -> bool {
876        self.sub(rhs).abs().cmple(Self::splat(max_abs_diff)).all()
877    }
878
879    /// Returns a vector with a length no less than `min` and no more than `max`.
880    ///
881    /// # Panics
882    ///
883    /// Will panic if `min` is greater than `max`, or if either `min` or `max` is negative, when `glam_assert` is enabled.
884    #[inline]
885    #[must_use]
886    pub fn clamp_length(self, min: f32, max: f32) -> Self {
887        glam_assert!(0.0 <= min);
888        glam_assert!(min <= max);
889        let length_sq = self.length_squared();
890        if length_sq < min * min {
891            min * (self / math::sqrt(length_sq))
892        } else if length_sq > max * max {
893            max * (self / math::sqrt(length_sq))
894        } else {
895            self
896        }
897    }
898
899    /// Returns a vector with a length no more than `max`.
900    ///
901    /// # Panics
902    ///
903    /// Will panic if `max` is negative when `glam_assert` is enabled.
904    #[inline]
905    #[must_use]
906    pub fn clamp_length_max(self, max: f32) -> Self {
907        glam_assert!(0.0 <= max);
908        let length_sq = self.length_squared();
909        if length_sq > max * max {
910            max * (self / math::sqrt(length_sq))
911        } else {
912            self
913        }
914    }
915
916    /// Returns a vector with a length no less than `min`.
917    ///
918    /// # Panics
919    ///
920    /// Will panic if `min` is negative when `glam_assert` is enabled.
921    #[inline]
922    #[must_use]
923    pub fn clamp_length_min(self, min: f32) -> Self {
924        glam_assert!(0.0 <= min);
925        let length_sq = self.length_squared();
926        if length_sq < min * min {
927            min * (self / math::sqrt(length_sq))
928        } else {
929            self
930        }
931    }
932
933    /// Fused multiply-add. Computes `(self * a) + b` element-wise with only one rounding
934    /// error, yielding a more accurate result than an unfused multiply-add.
935    ///
936    /// Using `mul_add` *may* be more performant than an unfused multiply-add if the target
937    /// architecture has a dedicated fma CPU instruction. However, this is not always true,
938    /// and will be heavily dependant on designing algorithms with specific target hardware in
939    /// mind.
940    #[inline]
941    #[must_use]
942    pub fn mul_add(self, a: Self, b: Self) -> Self {
943        Self(unsafe { vfmaq_f32(b.0, self.0, a.0) })
944    }
945
946    /// Returns the reflection vector for a given incident vector `self` and surface normal
947    /// `normal`.
948    ///
949    /// `normal` must be normalized.
950    ///
951    /// # Panics
952    ///
953    /// Will panic if `normal` is not normalized when `glam_assert` is enabled.
954    #[inline]
955    #[must_use]
956    pub fn reflect(self, normal: Self) -> Self {
957        glam_assert!(normal.is_normalized());
958        self - 2.0 * self.dot(normal) * normal
959    }
960
961    /// Returns the refraction direction for a given incident vector `self`, surface normal
962    /// `normal` and ratio of indices of refraction, `eta`. When total internal reflection occurs,
963    /// a zero vector will be returned.
964    ///
965    /// `self` and `normal` must be normalized.
966    ///
967    /// # Panics
968    ///
969    /// Will panic if `self` or `normal` is not normalized when `glam_assert` is enabled.
970    #[inline]
971    #[must_use]
972    pub fn refract(self, normal: Self, eta: f32) -> Self {
973        glam_assert!(self.is_normalized());
974        glam_assert!(normal.is_normalized());
975        let n_dot_i = normal.dot(self);
976        let k = 1.0 - eta * eta * (1.0 - n_dot_i * n_dot_i);
977        if k >= 0.0 {
978            eta * self - (eta * n_dot_i + math::sqrt(k)) * normal
979        } else {
980            Self::ZERO
981        }
982    }
983
984    /// Casts all elements of `self` to `f64`.
985    #[inline]
986    #[must_use]
987    pub fn as_dvec4(&self) -> crate::DVec4 {
988        crate::DVec4::new(self.x as f64, self.y as f64, self.z as f64, self.w as f64)
989    }
990
991    /// Casts all elements of `self` to `i8`.
992    #[inline]
993    #[must_use]
994    pub fn as_i8vec4(&self) -> crate::I8Vec4 {
995        crate::I8Vec4::new(self.x as i8, self.y as i8, self.z as i8, self.w as i8)
996    }
997
998    /// Casts all elements of `self` to `u8`.
999    #[inline]
1000    #[must_use]
1001    pub fn as_u8vec4(&self) -> crate::U8Vec4 {
1002        crate::U8Vec4::new(self.x as u8, self.y as u8, self.z as u8, self.w as u8)
1003    }
1004
1005    /// Casts all elements of `self` to `i16`.
1006    #[inline]
1007    #[must_use]
1008    pub fn as_i16vec4(&self) -> crate::I16Vec4 {
1009        crate::I16Vec4::new(self.x as i16, self.y as i16, self.z as i16, self.w as i16)
1010    }
1011
1012    /// Casts all elements of `self` to `u16`.
1013    #[inline]
1014    #[must_use]
1015    pub fn as_u16vec4(&self) -> crate::U16Vec4 {
1016        crate::U16Vec4::new(self.x as u16, self.y as u16, self.z as u16, self.w as u16)
1017    }
1018
1019    /// Casts all elements of `self` to `i32`.
1020    #[inline]
1021    #[must_use]
1022    pub fn as_ivec4(&self) -> crate::IVec4 {
1023        crate::IVec4::new(self.x as i32, self.y as i32, self.z as i32, self.w as i32)
1024    }
1025
1026    /// Casts all elements of `self` to `u32`.
1027    #[inline]
1028    #[must_use]
1029    pub fn as_uvec4(&self) -> crate::UVec4 {
1030        crate::UVec4::new(self.x as u32, self.y as u32, self.z as u32, self.w as u32)
1031    }
1032
1033    /// Casts all elements of `self` to `i64`.
1034    #[inline]
1035    #[must_use]
1036    pub fn as_i64vec4(&self) -> crate::I64Vec4 {
1037        crate::I64Vec4::new(self.x as i64, self.y as i64, self.z as i64, self.w as i64)
1038    }
1039
1040    /// Casts all elements of `self` to `u64`.
1041    #[inline]
1042    #[must_use]
1043    pub fn as_u64vec4(&self) -> crate::U64Vec4 {
1044        crate::U64Vec4::new(self.x as u64, self.y as u64, self.z as u64, self.w as u64)
1045    }
1046
1047    /// Casts all elements of `self` to `usize`.
1048    #[inline]
1049    #[must_use]
1050    pub fn as_usizevec4(&self) -> crate::USizeVec4 {
1051        crate::USizeVec4::new(
1052            self.x as usize,
1053            self.y as usize,
1054            self.z as usize,
1055            self.w as usize,
1056        )
1057    }
1058}
1059
1060impl Default for Vec4 {
1061    #[inline(always)]
1062    fn default() -> Self {
1063        Self::ZERO
1064    }
1065}
1066
1067impl PartialEq for Vec4 {
1068    #[inline]
1069    fn eq(&self, rhs: &Self) -> bool {
1070        self.cmpeq(*rhs).all()
1071    }
1072}
1073
1074impl Div<Vec4> for Vec4 {
1075    type Output = Self;
1076    #[inline]
1077    fn div(self, rhs: Self) -> Self {
1078        Self(unsafe { vdivq_f32(self.0, rhs.0) })
1079    }
1080}
1081
1082impl Div<&Vec4> for Vec4 {
1083    type Output = Vec4;
1084    #[inline]
1085    fn div(self, rhs: &Vec4) -> Vec4 {
1086        self.div(*rhs)
1087    }
1088}
1089
1090impl Div<&Vec4> for &Vec4 {
1091    type Output = Vec4;
1092    #[inline]
1093    fn div(self, rhs: &Vec4) -> Vec4 {
1094        (*self).div(*rhs)
1095    }
1096}
1097
1098impl Div<Vec4> for &Vec4 {
1099    type Output = Vec4;
1100    #[inline]
1101    fn div(self, rhs: Vec4) -> Vec4 {
1102        (*self).div(rhs)
1103    }
1104}
1105
1106impl DivAssign<Vec4> for Vec4 {
1107    #[inline]
1108    fn div_assign(&mut self, rhs: Self) {
1109        self.0 = unsafe { vdivq_f32(self.0, rhs.0) };
1110    }
1111}
1112
1113impl DivAssign<&Vec4> for Vec4 {
1114    #[inline]
1115    fn div_assign(&mut self, rhs: &Vec4) {
1116        self.div_assign(*rhs)
1117    }
1118}
1119
1120impl Div<f32> for Vec4 {
1121    type Output = Self;
1122    #[inline]
1123    fn div(self, rhs: f32) -> Self {
1124        Self(unsafe { vdivq_f32(self.0, vld1q_dup_f32(&rhs)) })
1125    }
1126}
1127
1128impl Div<&f32> for Vec4 {
1129    type Output = Vec4;
1130    #[inline]
1131    fn div(self, rhs: &f32) -> Vec4 {
1132        self.div(*rhs)
1133    }
1134}
1135
1136impl Div<&f32> for &Vec4 {
1137    type Output = Vec4;
1138    #[inline]
1139    fn div(self, rhs: &f32) -> Vec4 {
1140        (*self).div(*rhs)
1141    }
1142}
1143
1144impl Div<f32> for &Vec4 {
1145    type Output = Vec4;
1146    #[inline]
1147    fn div(self, rhs: f32) -> Vec4 {
1148        (*self).div(rhs)
1149    }
1150}
1151
1152impl DivAssign<f32> for Vec4 {
1153    #[inline]
1154    fn div_assign(&mut self, rhs: f32) {
1155        self.0 = unsafe { vdivq_f32(self.0, vld1q_dup_f32(&rhs)) };
1156    }
1157}
1158
1159impl DivAssign<&f32> for Vec4 {
1160    #[inline]
1161    fn div_assign(&mut self, rhs: &f32) {
1162        self.div_assign(*rhs)
1163    }
1164}
1165
1166impl Div<Vec4> for f32 {
1167    type Output = Vec4;
1168    #[inline]
1169    fn div(self, rhs: Vec4) -> Vec4 {
1170        Vec4(unsafe { vdivq_f32(vld1q_dup_f32(&self), rhs.0) })
1171    }
1172}
1173
1174impl Div<&Vec4> for f32 {
1175    type Output = Vec4;
1176    #[inline]
1177    fn div(self, rhs: &Vec4) -> Vec4 {
1178        self.div(*rhs)
1179    }
1180}
1181
1182impl Div<&Vec4> for &f32 {
1183    type Output = Vec4;
1184    #[inline]
1185    fn div(self, rhs: &Vec4) -> Vec4 {
1186        (*self).div(*rhs)
1187    }
1188}
1189
1190impl Div<Vec4> for &f32 {
1191    type Output = Vec4;
1192    #[inline]
1193    fn div(self, rhs: Vec4) -> Vec4 {
1194        (*self).div(rhs)
1195    }
1196}
1197
1198impl Mul<Vec4> for Vec4 {
1199    type Output = Self;
1200    #[inline]
1201    fn mul(self, rhs: Self) -> Self {
1202        Self(unsafe { vmulq_f32(self.0, rhs.0) })
1203    }
1204}
1205
1206impl Mul<&Vec4> for Vec4 {
1207    type Output = Vec4;
1208    #[inline]
1209    fn mul(self, rhs: &Vec4) -> Vec4 {
1210        self.mul(*rhs)
1211    }
1212}
1213
1214impl Mul<&Vec4> for &Vec4 {
1215    type Output = Vec4;
1216    #[inline]
1217    fn mul(self, rhs: &Vec4) -> Vec4 {
1218        (*self).mul(*rhs)
1219    }
1220}
1221
1222impl Mul<Vec4> for &Vec4 {
1223    type Output = Vec4;
1224    #[inline]
1225    fn mul(self, rhs: Vec4) -> Vec4 {
1226        (*self).mul(rhs)
1227    }
1228}
1229
1230impl MulAssign<Vec4> for Vec4 {
1231    #[inline]
1232    fn mul_assign(&mut self, rhs: Self) {
1233        self.0 = unsafe { vmulq_f32(self.0, rhs.0) };
1234    }
1235}
1236
1237impl MulAssign<&Vec4> for Vec4 {
1238    #[inline]
1239    fn mul_assign(&mut self, rhs: &Vec4) {
1240        self.mul_assign(*rhs)
1241    }
1242}
1243
1244impl Mul<f32> for Vec4 {
1245    type Output = Self;
1246    #[inline]
1247    fn mul(self, rhs: f32) -> Self {
1248        Self(unsafe { vmulq_n_f32(self.0, rhs) })
1249    }
1250}
1251
1252impl Mul<&f32> for Vec4 {
1253    type Output = Vec4;
1254    #[inline]
1255    fn mul(self, rhs: &f32) -> Vec4 {
1256        self.mul(*rhs)
1257    }
1258}
1259
1260impl Mul<&f32> for &Vec4 {
1261    type Output = Vec4;
1262    #[inline]
1263    fn mul(self, rhs: &f32) -> Vec4 {
1264        (*self).mul(*rhs)
1265    }
1266}
1267
1268impl Mul<f32> for &Vec4 {
1269    type Output = Vec4;
1270    #[inline]
1271    fn mul(self, rhs: f32) -> Vec4 {
1272        (*self).mul(rhs)
1273    }
1274}
1275
1276impl MulAssign<f32> for Vec4 {
1277    #[inline]
1278    fn mul_assign(&mut self, rhs: f32) {
1279        self.0 = unsafe { vmulq_n_f32(self.0, rhs) };
1280    }
1281}
1282
1283impl MulAssign<&f32> for Vec4 {
1284    #[inline]
1285    fn mul_assign(&mut self, rhs: &f32) {
1286        self.mul_assign(*rhs)
1287    }
1288}
1289
1290impl Mul<Vec4> for f32 {
1291    type Output = Vec4;
1292    #[inline]
1293    fn mul(self, rhs: Vec4) -> Vec4 {
1294        Vec4(unsafe { vmulq_n_f32(rhs.0, self) })
1295    }
1296}
1297
1298impl Mul<&Vec4> for f32 {
1299    type Output = Vec4;
1300    #[inline]
1301    fn mul(self, rhs: &Vec4) -> Vec4 {
1302        self.mul(*rhs)
1303    }
1304}
1305
1306impl Mul<&Vec4> for &f32 {
1307    type Output = Vec4;
1308    #[inline]
1309    fn mul(self, rhs: &Vec4) -> Vec4 {
1310        (*self).mul(*rhs)
1311    }
1312}
1313
1314impl Mul<Vec4> for &f32 {
1315    type Output = Vec4;
1316    #[inline]
1317    fn mul(self, rhs: Vec4) -> Vec4 {
1318        (*self).mul(rhs)
1319    }
1320}
1321
1322impl Add<Vec4> for Vec4 {
1323    type Output = Self;
1324    #[inline]
1325    fn add(self, rhs: Self) -> Self {
1326        Self(unsafe { vaddq_f32(self.0, rhs.0) })
1327    }
1328}
1329
1330impl Add<&Vec4> for Vec4 {
1331    type Output = Vec4;
1332    #[inline]
1333    fn add(self, rhs: &Vec4) -> Vec4 {
1334        self.add(*rhs)
1335    }
1336}
1337
1338impl Add<&Vec4> for &Vec4 {
1339    type Output = Vec4;
1340    #[inline]
1341    fn add(self, rhs: &Vec4) -> Vec4 {
1342        (*self).add(*rhs)
1343    }
1344}
1345
1346impl Add<Vec4> for &Vec4 {
1347    type Output = Vec4;
1348    #[inline]
1349    fn add(self, rhs: Vec4) -> Vec4 {
1350        (*self).add(rhs)
1351    }
1352}
1353
1354impl AddAssign<Vec4> for Vec4 {
1355    #[inline]
1356    fn add_assign(&mut self, rhs: Self) {
1357        self.0 = unsafe { vaddq_f32(self.0, rhs.0) };
1358    }
1359}
1360
1361impl AddAssign<&Vec4> for Vec4 {
1362    #[inline]
1363    fn add_assign(&mut self, rhs: &Vec4) {
1364        self.add_assign(*rhs)
1365    }
1366}
1367
1368impl Add<f32> for Vec4 {
1369    type Output = Self;
1370    #[inline]
1371    fn add(self, rhs: f32) -> Self {
1372        Self(unsafe { vaddq_f32(self.0, vld1q_dup_f32(&rhs)) })
1373    }
1374}
1375
1376impl Add<&f32> for Vec4 {
1377    type Output = Vec4;
1378    #[inline]
1379    fn add(self, rhs: &f32) -> Vec4 {
1380        self.add(*rhs)
1381    }
1382}
1383
1384impl Add<&f32> for &Vec4 {
1385    type Output = Vec4;
1386    #[inline]
1387    fn add(self, rhs: &f32) -> Vec4 {
1388        (*self).add(*rhs)
1389    }
1390}
1391
1392impl Add<f32> for &Vec4 {
1393    type Output = Vec4;
1394    #[inline]
1395    fn add(self, rhs: f32) -> Vec4 {
1396        (*self).add(rhs)
1397    }
1398}
1399
1400impl AddAssign<f32> for Vec4 {
1401    #[inline]
1402    fn add_assign(&mut self, rhs: f32) {
1403        self.0 = unsafe { vaddq_f32(self.0, vld1q_dup_f32(&rhs)) };
1404    }
1405}
1406
1407impl AddAssign<&f32> for Vec4 {
1408    #[inline]
1409    fn add_assign(&mut self, rhs: &f32) {
1410        self.add_assign(*rhs)
1411    }
1412}
1413
1414impl Add<Vec4> for f32 {
1415    type Output = Vec4;
1416    #[inline]
1417    fn add(self, rhs: Vec4) -> Vec4 {
1418        Vec4(unsafe { vaddq_f32(vld1q_dup_f32(&self), rhs.0) })
1419    }
1420}
1421
1422impl Add<&Vec4> for f32 {
1423    type Output = Vec4;
1424    #[inline]
1425    fn add(self, rhs: &Vec4) -> Vec4 {
1426        self.add(*rhs)
1427    }
1428}
1429
1430impl Add<&Vec4> for &f32 {
1431    type Output = Vec4;
1432    #[inline]
1433    fn add(self, rhs: &Vec4) -> Vec4 {
1434        (*self).add(*rhs)
1435    }
1436}
1437
1438impl Add<Vec4> for &f32 {
1439    type Output = Vec4;
1440    #[inline]
1441    fn add(self, rhs: Vec4) -> Vec4 {
1442        (*self).add(rhs)
1443    }
1444}
1445
1446impl Sub<Vec4> for Vec4 {
1447    type Output = Self;
1448    #[inline]
1449    fn sub(self, rhs: Self) -> Self {
1450        Self(unsafe { vsubq_f32(self.0, rhs.0) })
1451    }
1452}
1453
1454impl Sub<&Vec4> for Vec4 {
1455    type Output = Vec4;
1456    #[inline]
1457    fn sub(self, rhs: &Vec4) -> Vec4 {
1458        self.sub(*rhs)
1459    }
1460}
1461
1462impl Sub<&Vec4> for &Vec4 {
1463    type Output = Vec4;
1464    #[inline]
1465    fn sub(self, rhs: &Vec4) -> Vec4 {
1466        (*self).sub(*rhs)
1467    }
1468}
1469
1470impl Sub<Vec4> for &Vec4 {
1471    type Output = Vec4;
1472    #[inline]
1473    fn sub(self, rhs: Vec4) -> Vec4 {
1474        (*self).sub(rhs)
1475    }
1476}
1477
1478impl SubAssign<Vec4> for Vec4 {
1479    #[inline]
1480    fn sub_assign(&mut self, rhs: Vec4) {
1481        self.0 = unsafe { vsubq_f32(self.0, rhs.0) };
1482    }
1483}
1484
1485impl SubAssign<&Vec4> for Vec4 {
1486    #[inline]
1487    fn sub_assign(&mut self, rhs: &Vec4) {
1488        self.sub_assign(*rhs)
1489    }
1490}
1491
1492impl Sub<f32> for Vec4 {
1493    type Output = Self;
1494    #[inline]
1495    fn sub(self, rhs: f32) -> Self {
1496        Self(unsafe { vsubq_f32(self.0, vld1q_dup_f32(&rhs)) })
1497    }
1498}
1499
1500impl Sub<&f32> for Vec4 {
1501    type Output = Vec4;
1502    #[inline]
1503    fn sub(self, rhs: &f32) -> Vec4 {
1504        self.sub(*rhs)
1505    }
1506}
1507
1508impl Sub<&f32> for &Vec4 {
1509    type Output = Vec4;
1510    #[inline]
1511    fn sub(self, rhs: &f32) -> Vec4 {
1512        (*self).sub(*rhs)
1513    }
1514}
1515
1516impl Sub<f32> for &Vec4 {
1517    type Output = Vec4;
1518    #[inline]
1519    fn sub(self, rhs: f32) -> Vec4 {
1520        (*self).sub(rhs)
1521    }
1522}
1523
1524impl SubAssign<f32> for Vec4 {
1525    #[inline]
1526    fn sub_assign(&mut self, rhs: f32) {
1527        self.0 = unsafe { vsubq_f32(self.0, vld1q_dup_f32(&rhs)) };
1528    }
1529}
1530
1531impl SubAssign<&f32> for Vec4 {
1532    #[inline]
1533    fn sub_assign(&mut self, rhs: &f32) {
1534        self.sub_assign(*rhs)
1535    }
1536}
1537
1538impl Sub<Vec4> for f32 {
1539    type Output = Vec4;
1540    #[inline]
1541    fn sub(self, rhs: Vec4) -> Vec4 {
1542        Vec4(unsafe { vsubq_f32(vld1q_dup_f32(&self), rhs.0) })
1543    }
1544}
1545
1546impl Sub<&Vec4> for f32 {
1547    type Output = Vec4;
1548    #[inline]
1549    fn sub(self, rhs: &Vec4) -> Vec4 {
1550        self.sub(*rhs)
1551    }
1552}
1553
1554impl Sub<&Vec4> for &f32 {
1555    type Output = Vec4;
1556    #[inline]
1557    fn sub(self, rhs: &Vec4) -> Vec4 {
1558        (*self).sub(*rhs)
1559    }
1560}
1561
1562impl Sub<Vec4> for &f32 {
1563    type Output = Vec4;
1564    #[inline]
1565    fn sub(self, rhs: Vec4) -> Vec4 {
1566        (*self).sub(rhs)
1567    }
1568}
1569
1570impl Rem<Vec4> for Vec4 {
1571    type Output = Self;
1572    #[inline]
1573    fn rem(self, rhs: Self) -> Self {
1574        unsafe {
1575            let n = vrndmq_f32(vdivq_f32(self.0, rhs.0));
1576            Self(vsubq_f32(self.0, vmulq_f32(n, rhs.0)))
1577        }
1578    }
1579}
1580
1581impl Rem<&Vec4> for Vec4 {
1582    type Output = Vec4;
1583    #[inline]
1584    fn rem(self, rhs: &Vec4) -> Vec4 {
1585        self.rem(*rhs)
1586    }
1587}
1588
1589impl Rem<&Vec4> for &Vec4 {
1590    type Output = Vec4;
1591    #[inline]
1592    fn rem(self, rhs: &Vec4) -> Vec4 {
1593        (*self).rem(*rhs)
1594    }
1595}
1596
1597impl Rem<Vec4> for &Vec4 {
1598    type Output = Vec4;
1599    #[inline]
1600    fn rem(self, rhs: Vec4) -> Vec4 {
1601        (*self).rem(rhs)
1602    }
1603}
1604
1605impl RemAssign<Vec4> for Vec4 {
1606    #[inline]
1607    fn rem_assign(&mut self, rhs: Self) {
1608        *self = self.rem(rhs);
1609    }
1610}
1611
1612impl RemAssign<&Vec4> for Vec4 {
1613    #[inline]
1614    fn rem_assign(&mut self, rhs: &Vec4) {
1615        self.rem_assign(*rhs)
1616    }
1617}
1618
1619impl Rem<f32> for Vec4 {
1620    type Output = Self;
1621    #[inline]
1622    fn rem(self, rhs: f32) -> Self {
1623        self.rem(Self::splat(rhs))
1624    }
1625}
1626
1627impl Rem<&f32> for Vec4 {
1628    type Output = Vec4;
1629    #[inline]
1630    fn rem(self, rhs: &f32) -> Vec4 {
1631        self.rem(*rhs)
1632    }
1633}
1634
1635impl Rem<&f32> for &Vec4 {
1636    type Output = Vec4;
1637    #[inline]
1638    fn rem(self, rhs: &f32) -> Vec4 {
1639        (*self).rem(*rhs)
1640    }
1641}
1642
1643impl Rem<f32> for &Vec4 {
1644    type Output = Vec4;
1645    #[inline]
1646    fn rem(self, rhs: f32) -> Vec4 {
1647        (*self).rem(rhs)
1648    }
1649}
1650
1651impl RemAssign<f32> for Vec4 {
1652    #[inline]
1653    fn rem_assign(&mut self, rhs: f32) {
1654        *self = self.rem(Self::splat(rhs));
1655    }
1656}
1657
1658impl RemAssign<&f32> for Vec4 {
1659    #[inline]
1660    fn rem_assign(&mut self, rhs: &f32) {
1661        self.rem_assign(*rhs)
1662    }
1663}
1664
1665impl Rem<Vec4> for f32 {
1666    type Output = Vec4;
1667    #[inline]
1668    fn rem(self, rhs: Vec4) -> Vec4 {
1669        Vec4::splat(self).rem(rhs)
1670    }
1671}
1672
1673impl Rem<&Vec4> for f32 {
1674    type Output = Vec4;
1675    #[inline]
1676    fn rem(self, rhs: &Vec4) -> Vec4 {
1677        self.rem(*rhs)
1678    }
1679}
1680
1681impl Rem<&Vec4> for &f32 {
1682    type Output = Vec4;
1683    #[inline]
1684    fn rem(self, rhs: &Vec4) -> Vec4 {
1685        (*self).rem(*rhs)
1686    }
1687}
1688
1689impl Rem<Vec4> for &f32 {
1690    type Output = Vec4;
1691    #[inline]
1692    fn rem(self, rhs: Vec4) -> Vec4 {
1693        (*self).rem(rhs)
1694    }
1695}
1696
1697#[cfg(not(target_arch = "spirv"))]
1698impl AsRef<[f32; 4]> for Vec4 {
1699    #[inline]
1700    fn as_ref(&self) -> &[f32; 4] {
1701        unsafe { &*(self as *const Vec4 as *const [f32; 4]) }
1702    }
1703}
1704
1705#[cfg(not(target_arch = "spirv"))]
1706impl AsMut<[f32; 4]> for Vec4 {
1707    #[inline]
1708    fn as_mut(&mut self) -> &mut [f32; 4] {
1709        unsafe { &mut *(self as *mut Vec4 as *mut [f32; 4]) }
1710    }
1711}
1712
1713impl Sum for Vec4 {
1714    #[inline]
1715    fn sum<I>(iter: I) -> Self
1716    where
1717        I: Iterator<Item = Self>,
1718    {
1719        iter.fold(Self::ZERO, Self::add)
1720    }
1721}
1722
1723impl<'a> Sum<&'a Self> for Vec4 {
1724    #[inline]
1725    fn sum<I>(iter: I) -> Self
1726    where
1727        I: Iterator<Item = &'a Self>,
1728    {
1729        iter.fold(Self::ZERO, |a, &b| Self::add(a, b))
1730    }
1731}
1732
1733impl Product for Vec4 {
1734    #[inline]
1735    fn product<I>(iter: I) -> Self
1736    where
1737        I: Iterator<Item = Self>,
1738    {
1739        iter.fold(Self::ONE, Self::mul)
1740    }
1741}
1742
1743impl<'a> Product<&'a Self> for Vec4 {
1744    #[inline]
1745    fn product<I>(iter: I) -> Self
1746    where
1747        I: Iterator<Item = &'a Self>,
1748    {
1749        iter.fold(Self::ONE, |a, &b| Self::mul(a, b))
1750    }
1751}
1752
1753impl Neg for Vec4 {
1754    type Output = Self;
1755    #[inline]
1756    fn neg(self) -> Self {
1757        Self(unsafe { vnegq_f32(self.0) })
1758    }
1759}
1760
1761impl Neg for &Vec4 {
1762    type Output = Vec4;
1763    #[inline]
1764    fn neg(self) -> Vec4 {
1765        (*self).neg()
1766    }
1767}
1768
1769impl Index<usize> for Vec4 {
1770    type Output = f32;
1771    #[inline]
1772    fn index(&self, index: usize) -> &Self::Output {
1773        match index {
1774            0 => &self.x,
1775            1 => &self.y,
1776            2 => &self.z,
1777            3 => &self.w,
1778            _ => panic!("index out of bounds"),
1779        }
1780    }
1781}
1782
1783impl IndexMut<usize> for Vec4 {
1784    #[inline]
1785    fn index_mut(&mut self, index: usize) -> &mut Self::Output {
1786        match index {
1787            0 => &mut self.x,
1788            1 => &mut self.y,
1789            2 => &mut self.z,
1790            3 => &mut self.w,
1791            _ => panic!("index out of bounds"),
1792        }
1793    }
1794}
1795
1796impl fmt::Display for Vec4 {
1797    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1798        if let Some(p) = f.precision() {
1799            write!(
1800                f,
1801                "[{:.*}, {:.*}, {:.*}, {:.*}]",
1802                p, self.x, p, self.y, p, self.z, p, self.w
1803            )
1804        } else {
1805            write!(f, "[{}, {}, {}, {}]", self.x, self.y, self.z, self.w)
1806        }
1807    }
1808}
1809
1810impl fmt::Debug for Vec4 {
1811    fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
1812        fmt.debug_tuple(stringify!(Vec4))
1813            .field(&self.x)
1814            .field(&self.y)
1815            .field(&self.z)
1816            .field(&self.w)
1817            .finish()
1818    }
1819}
1820
1821impl From<Vec4> for float32x4_t {
1822    #[inline(always)]
1823    fn from(t: Vec4) -> Self {
1824        t.0
1825    }
1826}
1827
1828impl From<float32x4_t> for Vec4 {
1829    #[inline(always)]
1830    fn from(t: float32x4_t) -> Self {
1831        Self(t)
1832    }
1833}
1834
1835impl From<[f32; 4]> for Vec4 {
1836    #[inline]
1837    fn from(a: [f32; 4]) -> Self {
1838        Self(unsafe { vld1q_f32(a.as_ptr()) })
1839    }
1840}
1841
1842impl From<Vec4> for [f32; 4] {
1843    #[inline]
1844    fn from(v: Vec4) -> Self {
1845        use crate::align16::Align16;
1846        use core::mem::MaybeUninit;
1847        let mut out: MaybeUninit<Align16<Self>> = MaybeUninit::uninit();
1848        unsafe {
1849            vst1q_f32(out.as_mut_ptr().cast(), v.0);
1850            out.assume_init().0
1851        }
1852    }
1853}
1854
1855impl From<(f32, f32, f32, f32)> for Vec4 {
1856    #[inline]
1857    fn from(t: (f32, f32, f32, f32)) -> Self {
1858        Self::new(t.0, t.1, t.2, t.3)
1859    }
1860}
1861
1862impl From<Vec4> for (f32, f32, f32, f32) {
1863    #[inline]
1864    fn from(v: Vec4) -> Self {
1865        use crate::align16::Align16;
1866        use core::mem::MaybeUninit;
1867        let mut out: MaybeUninit<Align16<Self>> = MaybeUninit::uninit();
1868        unsafe {
1869            vst1q_f32(out.as_mut_ptr().cast(), v.0);
1870            out.assume_init().0
1871        }
1872    }
1873}
1874
1875impl From<(Vec3A, f32)> for Vec4 {
1876    #[inline]
1877    fn from((v, w): (Vec3A, f32)) -> Self {
1878        v.extend(w)
1879    }
1880}
1881
1882impl From<(f32, Vec3A)> for Vec4 {
1883    #[inline]
1884    fn from((x, v): (f32, Vec3A)) -> Self {
1885        Self::new(x, v.x, v.y, v.z)
1886    }
1887}
1888
1889impl From<(Vec3, f32)> for Vec4 {
1890    #[inline]
1891    fn from((v, w): (Vec3, f32)) -> Self {
1892        Self::new(v.x, v.y, v.z, w)
1893    }
1894}
1895
1896impl From<(f32, Vec3)> for Vec4 {
1897    #[inline]
1898    fn from((x, v): (f32, Vec3)) -> Self {
1899        Self::new(x, v.x, v.y, v.z)
1900    }
1901}
1902
1903impl From<(Vec2, f32, f32)> for Vec4 {
1904    #[inline]
1905    fn from((v, z, w): (Vec2, f32, f32)) -> Self {
1906        Self::new(v.x, v.y, z, w)
1907    }
1908}
1909
1910impl From<(Vec2, Vec2)> for Vec4 {
1911    #[inline]
1912    fn from((v, u): (Vec2, Vec2)) -> Self {
1913        Self::new(v.x, v.y, u.x, u.y)
1914    }
1915}
1916
1917impl Deref for Vec4 {
1918    type Target = crate::deref::Vec4<f32>;
1919    #[inline]
1920    fn deref(&self) -> &Self::Target {
1921        unsafe { &*(self as *const Self).cast() }
1922    }
1923}
1924
1925impl DerefMut for Vec4 {
1926    #[inline]
1927    fn deref_mut(&mut self) -> &mut Self::Target {
1928        unsafe { &mut *(self as *mut Self).cast() }
1929    }
1930}
1931
1932impl From<BVec4> for Vec4 {
1933    #[inline]
1934    fn from(v: BVec4) -> Self {
1935        Self::new(
1936            f32::from(v.x),
1937            f32::from(v.y),
1938            f32::from(v.z),
1939            f32::from(v.w),
1940        )
1941    }
1942}
1943
1944#[cfg(not(feature = "scalar-math"))]
1945impl From<BVec4A> for Vec4 {
1946    #[inline]
1947    fn from(v: BVec4A) -> Self {
1948        let bool_array: [bool; 4] = v.into();
1949        Self::new(
1950            f32::from(bool_array[0]),
1951            f32::from(bool_array[1]),
1952            f32::from(bool_array[2]),
1953            f32::from(bool_array[3]),
1954        )
1955    }
1956}