glam/f32/neon/
vec4.rs

1// Generated from vec.rs.tera template. Edit the template, not the generated file.
2
3use crate::{f32::math, neon::*, BVec4, BVec4A, Vec2, Vec3, Vec3A};
4
5use core::fmt;
6use core::iter::{Product, Sum};
7use core::{f32, ops::*};
8
9use core::arch::aarch64::*;
10
11#[repr(C)]
12union UnionCast {
13    a: [f32; 4],
14    v: Vec4,
15}
16
17/// Creates a 4-dimensional vector.
18#[inline(always)]
19#[must_use]
20pub const fn vec4(x: f32, y: f32, z: f32, w: f32) -> Vec4 {
21    Vec4::new(x, y, z, w)
22}
23
24/// A 4-dimensional vector.
25///
26/// SIMD vector types are used for storage on supported platforms.
27///
28/// This type is 16 byte aligned.
29#[derive(Clone, Copy)]
30#[cfg_attr(
31    all(feature = "bytemuck", not(target_arch = "spirv")),
32    derive(bytemuck::Pod, bytemuck::Zeroable)
33)]
34#[repr(transparent)]
35pub struct Vec4(pub(crate) float32x4_t);
36
37impl Vec4 {
38    /// All zeroes.
39    pub const ZERO: Self = Self::splat(0.0);
40
41    /// All ones.
42    pub const ONE: Self = Self::splat(1.0);
43
44    /// All negative ones.
45    pub const NEG_ONE: Self = Self::splat(-1.0);
46
47    /// All `f32::MIN`.
48    pub const MIN: Self = Self::splat(f32::MIN);
49
50    /// All `f32::MAX`.
51    pub const MAX: Self = Self::splat(f32::MAX);
52
53    /// All `f32::NAN`.
54    pub const NAN: Self = Self::splat(f32::NAN);
55
56    /// All `f32::INFINITY`.
57    pub const INFINITY: Self = Self::splat(f32::INFINITY);
58
59    /// All `f32::NEG_INFINITY`.
60    pub const NEG_INFINITY: Self = Self::splat(f32::NEG_INFINITY);
61
62    /// A unit vector pointing along the positive X axis.
63    pub const X: Self = Self::new(1.0, 0.0, 0.0, 0.0);
64
65    /// A unit vector pointing along the positive Y axis.
66    pub const Y: Self = Self::new(0.0, 1.0, 0.0, 0.0);
67
68    /// A unit vector pointing along the positive Z axis.
69    pub const Z: Self = Self::new(0.0, 0.0, 1.0, 0.0);
70
71    /// A unit vector pointing along the positive W axis.
72    pub const W: Self = Self::new(0.0, 0.0, 0.0, 1.0);
73
74    /// A unit vector pointing along the negative X axis.
75    pub const NEG_X: Self = Self::new(-1.0, 0.0, 0.0, 0.0);
76
77    /// A unit vector pointing along the negative Y axis.
78    pub const NEG_Y: Self = Self::new(0.0, -1.0, 0.0, 0.0);
79
80    /// A unit vector pointing along the negative Z axis.
81    pub const NEG_Z: Self = Self::new(0.0, 0.0, -1.0, 0.0);
82
83    /// A unit vector pointing along the negative W axis.
84    pub const NEG_W: Self = Self::new(0.0, 0.0, 0.0, -1.0);
85
86    /// The unit axes.
87    pub const AXES: [Self; 4] = [Self::X, Self::Y, Self::Z, Self::W];
88
89    /// Vec4 uses Rust Portable SIMD
90    pub const USES_CORE_SIMD: bool = false;
91    /// Vec4 uses Arm NEON
92    pub const USES_NEON: bool = true;
93    /// Vec4 uses scalar math
94    pub const USES_SCALAR_MATH: bool = false;
95    /// Vec4 uses Intel SSE2
96    pub const USES_SSE2: bool = false;
97    /// Vec4 uses WebAssembly 128-bit SIMD
98    pub const USES_WASM32_SIMD: bool = false;
99
100    /// Creates a new vector.
101    #[inline(always)]
102    #[must_use]
103    pub const fn new(x: f32, y: f32, z: f32, w: f32) -> Self {
104        unsafe { UnionCast { a: [x, y, z, w] }.v }
105    }
106
107    /// Creates a vector with all elements set to `v`.
108    #[inline]
109    #[must_use]
110    pub const fn splat(v: f32) -> Self {
111        unsafe { UnionCast { a: [v; 4] }.v }
112    }
113
114    /// Returns a vector containing each element of `self` modified by a mapping function `f`.
115    #[inline]
116    #[must_use]
117    pub fn map<F>(self, f: F) -> Self
118    where
119        F: Fn(f32) -> f32,
120    {
121        Self::new(f(self.x), f(self.y), f(self.z), f(self.w))
122    }
123
124    /// Creates a vector from the elements in `if_true` and `if_false`, selecting which to use
125    /// for each element of `self`.
126    ///
127    /// A true element in the mask uses the corresponding element from `if_true`, and false
128    /// uses the element from `if_false`.
129    #[inline]
130    #[must_use]
131    pub fn select(mask: BVec4A, if_true: Self, if_false: Self) -> Self {
132        Self(unsafe { vbslq_f32(mask.0, if_true.0, if_false.0) })
133    }
134
135    /// Creates a new vector from an array.
136    #[inline]
137    #[must_use]
138    pub const fn from_array(a: [f32; 4]) -> Self {
139        Self::new(a[0], a[1], a[2], a[3])
140    }
141
142    /// Converts `self` to `[x, y, z, w]`
143    #[inline]
144    #[must_use]
145    pub const fn to_array(&self) -> [f32; 4] {
146        unsafe { *(self as *const Self as *const [f32; 4]) }
147    }
148
149    /// Creates a vector from the first 4 values in `slice`.
150    ///
151    /// # Panics
152    ///
153    /// Panics if `slice` is less than 4 elements long.
154    #[inline]
155    #[must_use]
156    pub const fn from_slice(slice: &[f32]) -> Self {
157        assert!(slice.len() >= 4);
158        Self::new(slice[0], slice[1], slice[2], slice[3])
159    }
160
161    /// Writes the elements of `self` to the first 4 elements in `slice`.
162    ///
163    /// # Panics
164    ///
165    /// Panics if `slice` is less than 4 elements long.
166    #[inline]
167    pub fn write_to_slice(self, slice: &mut [f32]) {
168        assert!(slice.len() >= 4);
169        unsafe {
170            vst1q_f32(slice.as_mut_ptr(), self.0);
171        }
172    }
173
174    /// Creates a 3D vector from the `x`, `y` and `z` elements of `self`, discarding `w`.
175    ///
176    /// Truncation to [`Vec3`] may also be performed by using [`self.xyz()`][crate::swizzles::Vec4Swizzles::xyz()].
177    ///
178    /// To truncate to [`Vec3A`] use [`Vec3A::from_vec4()`].
179    #[inline]
180    #[must_use]
181    pub fn truncate(self) -> Vec3 {
182        use crate::swizzles::Vec4Swizzles;
183        self.xyz()
184    }
185
186    /// Creates a 4D vector from `self` with the given value of `x`.
187    #[inline]
188    #[must_use]
189    pub fn with_x(mut self, x: f32) -> Self {
190        self.x = x;
191        self
192    }
193
194    /// Creates a 4D vector from `self` with the given value of `y`.
195    #[inline]
196    #[must_use]
197    pub fn with_y(mut self, y: f32) -> Self {
198        self.y = y;
199        self
200    }
201
202    /// Creates a 4D vector from `self` with the given value of `z`.
203    #[inline]
204    #[must_use]
205    pub fn with_z(mut self, z: f32) -> Self {
206        self.z = z;
207        self
208    }
209
210    /// Creates a 4D vector from `self` with the given value of `w`.
211    #[inline]
212    #[must_use]
213    pub fn with_w(mut self, w: f32) -> Self {
214        self.w = w;
215        self
216    }
217
218    /// Computes the dot product of `self` and `rhs`.
219    #[inline]
220    #[must_use]
221    pub fn dot(self, rhs: Self) -> f32 {
222        unsafe { dot4(self.0, rhs.0) }
223    }
224
225    /// Returns a vector where every component is the dot product of `self` and `rhs`.
226    #[inline]
227    #[must_use]
228    pub fn dot_into_vec(self, rhs: Self) -> Self {
229        Self(unsafe { dot4_into_f32x4(self.0, rhs.0) })
230    }
231
232    /// Returns a vector containing the minimum values for each element of `self` and `rhs`.
233    ///
234    /// In other words this computes `[min(x, rhs.x), min(self.y, rhs.y), ..]`.
235    ///
236    /// NaN propogation does not follow IEEE 754-2008 semantics for minNum and may differ on
237    /// different SIMD architectures.
238    #[inline]
239    #[must_use]
240    pub fn min(self, rhs: Self) -> Self {
241        Self(unsafe { vminq_f32(self.0, rhs.0) })
242    }
243
244    /// Returns a vector containing the maximum values for each element of `self` and `rhs`.
245    ///
246    /// In other words this computes `[max(self.x, rhs.x), max(self.y, rhs.y), ..]`.
247    ///
248    /// NaN propogation does not follow IEEE 754-2008 semantics for maxNum and may differ on
249    /// different SIMD architectures.
250    #[inline]
251    #[must_use]
252    pub fn max(self, rhs: Self) -> Self {
253        Self(unsafe { vmaxq_f32(self.0, rhs.0) })
254    }
255
256    /// Component-wise clamping of values, similar to [`f32::clamp`].
257    ///
258    /// Each element in `min` must be less-or-equal to the corresponding element in `max`.
259    ///
260    /// NaN propogation does not follow IEEE 754-2008 semantics and may differ on
261    /// different SIMD architectures.
262    ///
263    /// # Panics
264    ///
265    /// Will panic if `min` is greater than `max` when `glam_assert` is enabled.
266    #[inline]
267    #[must_use]
268    pub fn clamp(self, min: Self, max: Self) -> Self {
269        glam_assert!(min.cmple(max).all(), "clamp: expected min <= max");
270        self.max(min).min(max)
271    }
272
273    /// Returns the horizontal minimum of `self`.
274    ///
275    /// In other words this computes `min(x, y, ..)`.
276    ///
277    /// NaN propogation does not follow IEEE 754-2008 semantics and may differ on
278    /// different SIMD architectures.
279    #[inline]
280    #[must_use]
281    pub fn min_element(self) -> f32 {
282        unsafe { vminnmvq_f32(self.0) }
283    }
284
285    /// Returns the horizontal maximum of `self`.
286    ///
287    /// In other words this computes `max(x, y, ..)`.
288    ///
289    /// NaN propogation does not follow IEEE 754-2008 semantics and may differ on
290    /// different SIMD architectures.
291    #[inline]
292    #[must_use]
293    pub fn max_element(self) -> f32 {
294        unsafe { vmaxnmvq_f32(self.0) }
295    }
296
297    /// Returns the index of the first minimum element of `self`.
298    #[doc(alias = "argmin")]
299    #[inline]
300    #[must_use]
301    pub fn min_position(self) -> usize {
302        let mut min = self.x;
303        let mut index = 0;
304        if self.y < min {
305            min = self.y;
306            index = 1;
307        }
308        if self.z < min {
309            min = self.z;
310            index = 2;
311        }
312        if self.w < min {
313            index = 3;
314        }
315        index
316    }
317
318    /// Returns the index of the first maximum element of `self`.
319    #[doc(alias = "argmax")]
320    #[inline]
321    #[must_use]
322    pub fn max_position(self) -> usize {
323        let mut max = self.x;
324        let mut index = 0;
325        if self.y > max {
326            max = self.y;
327            index = 1;
328        }
329        if self.z > max {
330            max = self.z;
331            index = 2;
332        }
333        if self.w > max {
334            index = 3;
335        }
336        index
337    }
338
339    /// Returns the sum of all elements of `self`.
340    ///
341    /// In other words, this computes `self.x + self.y + ..`.
342    #[inline]
343    #[must_use]
344    pub fn element_sum(self) -> f32 {
345        unsafe { vaddvq_f32(self.0) }
346    }
347
348    /// Returns the product of all elements of `self`.
349    ///
350    /// In other words, this computes `self.x * self.y * ..`.
351    #[inline]
352    #[must_use]
353    pub fn element_product(self) -> f32 {
354        unsafe {
355            let s = vmuls_laneq_f32(vgetq_lane_f32(self.0, 0), self.0, 1);
356            let s = vmuls_laneq_f32(s, self.0, 2);
357            vmuls_laneq_f32(s, self.0, 3)
358        }
359    }
360
361    /// Returns a vector mask containing the result of a `==` comparison for each element of
362    /// `self` and `rhs`.
363    ///
364    /// In other words, this computes `[self.x == rhs.x, self.y == rhs.y, ..]` for all
365    /// elements.
366    #[inline]
367    #[must_use]
368    pub fn cmpeq(self, rhs: Self) -> BVec4A {
369        BVec4A(unsafe { vceqq_f32(self.0, rhs.0) })
370    }
371
372    /// Returns a vector mask containing the result of a `!=` comparison for each element of
373    /// `self` and `rhs`.
374    ///
375    /// In other words this computes `[self.x != rhs.x, self.y != rhs.y, ..]` for all
376    /// elements.
377    #[inline]
378    #[must_use]
379    pub fn cmpne(self, rhs: Self) -> BVec4A {
380        BVec4A(unsafe { vmvnq_u32(vceqq_f32(self.0, rhs.0)) })
381    }
382
383    /// Returns a vector mask containing the result of a `>=` comparison for each element of
384    /// `self` and `rhs`.
385    ///
386    /// In other words this computes `[self.x >= rhs.x, self.y >= rhs.y, ..]` for all
387    /// elements.
388    #[inline]
389    #[must_use]
390    pub fn cmpge(self, rhs: Self) -> BVec4A {
391        BVec4A(unsafe { vcgeq_f32(self.0, rhs.0) })
392    }
393
394    /// Returns a vector mask containing the result of a `>` comparison for each element of
395    /// `self` and `rhs`.
396    ///
397    /// In other words this computes `[self.x > rhs.x, self.y > rhs.y, ..]` for all
398    /// elements.
399    #[inline]
400    #[must_use]
401    pub fn cmpgt(self, rhs: Self) -> BVec4A {
402        BVec4A(unsafe { vcgtq_f32(self.0, rhs.0) })
403    }
404
405    /// Returns a vector mask containing the result of a `<=` comparison for each element of
406    /// `self` and `rhs`.
407    ///
408    /// In other words this computes `[self.x <= rhs.x, self.y <= rhs.y, ..]` for all
409    /// elements.
410    #[inline]
411    #[must_use]
412    pub fn cmple(self, rhs: Self) -> BVec4A {
413        BVec4A(unsafe { vcleq_f32(self.0, rhs.0) })
414    }
415
416    /// Returns a vector mask containing the result of a `<` comparison for each element of
417    /// `self` and `rhs`.
418    ///
419    /// In other words this computes `[self.x < rhs.x, self.y < rhs.y, ..]` for all
420    /// elements.
421    #[inline]
422    #[must_use]
423    pub fn cmplt(self, rhs: Self) -> BVec4A {
424        BVec4A(unsafe { vcltq_f32(self.0, rhs.0) })
425    }
426
427    /// Returns a vector containing the absolute value of each element of `self`.
428    #[inline]
429    #[must_use]
430    pub fn abs(self) -> Self {
431        Self(unsafe { vabsq_f32(self.0) })
432    }
433
434    /// Returns a vector with elements representing the sign of `self`.
435    ///
436    /// - `1.0` if the number is positive, `+0.0` or `INFINITY`
437    /// - `-1.0` if the number is negative, `-0.0` or `NEG_INFINITY`
438    /// - `NAN` if the number is `NAN`
439    #[inline]
440    #[must_use]
441    pub fn signum(self) -> Self {
442        let result = Self(unsafe {
443            vreinterpretq_f32_u32(vorrq_u32(
444                vandq_u32(
445                    vreinterpretq_u32_f32(self.0),
446                    vreinterpretq_u32_f32(Self::NEG_ONE.0),
447                ),
448                vreinterpretq_u32_f32(Self::ONE.0),
449            ))
450        });
451        let mask = self.is_nan_mask();
452        Self::select(mask, self, result)
453    }
454
455    /// Returns a vector with signs of `rhs` and the magnitudes of `self`.
456    #[inline]
457    #[must_use]
458    pub fn copysign(self, rhs: Self) -> Self {
459        let mask = Self::splat(-0.0);
460        Self(unsafe {
461            vreinterpretq_f32_u32(vorrq_u32(
462                vandq_u32(vreinterpretq_u32_f32(rhs.0), vreinterpretq_u32_f32(mask.0)),
463                vandq_u32(
464                    vreinterpretq_u32_f32(self.0),
465                    vmvnq_u32(vreinterpretq_u32_f32(mask.0)),
466                ),
467            ))
468        })
469    }
470
471    /// Returns a bitmask with the lowest 4 bits set to the sign bits from the elements of `self`.
472    ///
473    /// A negative element results in a `1` bit and a positive element in a `0` bit.  Element `x` goes
474    /// into the first lowest bit, element `y` into the second, etc.
475    ///
476    /// An element is negative if it has a negative sign, including -0.0, NaNs with negative sign
477    /// bit and negative infinity.
478    #[inline]
479    #[must_use]
480    pub fn is_negative_bitmask(self) -> u32 {
481        unsafe {
482            let nmask = vreinterpretq_u32_f32(vdupq_n_f32(-0.0));
483            let m = vandq_u32(vreinterpretq_u32_f32(self.0), nmask);
484            let x = vgetq_lane_u32(m, 0) >> 31;
485            let y = vgetq_lane_u32(m, 1) >> 31;
486            let z = vgetq_lane_u32(m, 2) >> 31;
487
488            let w = vgetq_lane_u32(m, 3) >> 31;
489            x | y << 1 | z << 2 | w << 3
490        }
491    }
492
493    /// Returns `true` if, and only if, all elements are finite.  If any element is either
494    /// `NaN`, positive or negative infinity, this will return `false`.
495    #[inline]
496    #[must_use]
497    pub fn is_finite(self) -> bool {
498        self.is_finite_mask().all()
499    }
500
501    /// Performs `is_finite` on each element of self, returning a vector mask of the results.
502    ///
503    /// In other words, this computes `[x.is_finite(), y.is_finite(), ...]`.
504    #[inline]
505    #[must_use]
506    pub fn is_finite_mask(self) -> BVec4A {
507        BVec4A(unsafe { vcltq_f32(vabsq_f32(self.0), Self::INFINITY.0) })
508    }
509
510    /// Returns `true` if any elements are `NaN`.
511    #[inline]
512    #[must_use]
513    pub fn is_nan(self) -> bool {
514        self.is_nan_mask().any()
515    }
516
517    /// Performs `is_nan` on each element of self, returning a vector mask of the results.
518    ///
519    /// In other words, this computes `[x.is_nan(), y.is_nan(), ...]`.
520    #[inline]
521    #[must_use]
522    pub fn is_nan_mask(self) -> BVec4A {
523        BVec4A(unsafe { vmvnq_u32(vceqq_f32(self.0, self.0)) })
524    }
525
526    /// Computes the length of `self`.
527    #[doc(alias = "magnitude")]
528    #[inline]
529    #[must_use]
530    pub fn length(self) -> f32 {
531        math::sqrt(self.dot(self))
532    }
533
534    /// Computes the squared length of `self`.
535    ///
536    /// This is faster than `length()` as it avoids a square root operation.
537    #[doc(alias = "magnitude2")]
538    #[inline]
539    #[must_use]
540    pub fn length_squared(self) -> f32 {
541        self.dot(self)
542    }
543
544    /// Computes `1.0 / length()`.
545    ///
546    /// For valid results, `self` must _not_ be of length zero.
547    #[inline]
548    #[must_use]
549    pub fn length_recip(self) -> f32 {
550        self.length().recip()
551    }
552
553    /// Computes the Euclidean distance between two points in space.
554    #[inline]
555    #[must_use]
556    pub fn distance(self, rhs: Self) -> f32 {
557        (self - rhs).length()
558    }
559
560    /// Compute the squared euclidean distance between two points in space.
561    #[inline]
562    #[must_use]
563    pub fn distance_squared(self, rhs: Self) -> f32 {
564        (self - rhs).length_squared()
565    }
566
567    /// Returns the element-wise quotient of [Euclidean division] of `self` by `rhs`.
568    #[inline]
569    #[must_use]
570    pub fn div_euclid(self, rhs: Self) -> Self {
571        Self::new(
572            math::div_euclid(self.x, rhs.x),
573            math::div_euclid(self.y, rhs.y),
574            math::div_euclid(self.z, rhs.z),
575            math::div_euclid(self.w, rhs.w),
576        )
577    }
578
579    /// Returns the element-wise remainder of [Euclidean division] of `self` by `rhs`.
580    ///
581    /// [Euclidean division]: f32::rem_euclid
582    #[inline]
583    #[must_use]
584    pub fn rem_euclid(self, rhs: Self) -> Self {
585        Self::new(
586            math::rem_euclid(self.x, rhs.x),
587            math::rem_euclid(self.y, rhs.y),
588            math::rem_euclid(self.z, rhs.z),
589            math::rem_euclid(self.w, rhs.w),
590        )
591    }
592
593    /// Returns `self` normalized to length 1.0.
594    ///
595    /// For valid results, `self` must be finite and _not_ of length zero, nor very close to zero.
596    ///
597    /// See also [`Self::try_normalize()`] and [`Self::normalize_or_zero()`].
598    ///
599    /// # Panics
600    ///
601    /// Will panic if the resulting normalized vector is not finite when `glam_assert` is enabled.
602    #[inline]
603    #[must_use]
604    pub fn normalize(self) -> Self {
605        #[allow(clippy::let_and_return)]
606        let normalized = self.mul(self.length_recip());
607        glam_assert!(normalized.is_finite());
608        normalized
609    }
610
611    /// Returns `self` normalized to length 1.0 if possible, else returns `None`.
612    ///
613    /// In particular, if the input is zero (or very close to zero), or non-finite,
614    /// the result of this operation will be `None`.
615    ///
616    /// See also [`Self::normalize_or_zero()`].
617    #[inline]
618    #[must_use]
619    pub fn try_normalize(self) -> Option<Self> {
620        let rcp = self.length_recip();
621        if rcp.is_finite() && rcp > 0.0 {
622            Some(self * rcp)
623        } else {
624            None
625        }
626    }
627
628    /// Returns `self` normalized to length 1.0 if possible, else returns a
629    /// fallback value.
630    ///
631    /// In particular, if the input is zero (or very close to zero), or non-finite,
632    /// the result of this operation will be the fallback value.
633    ///
634    /// See also [`Self::try_normalize()`].
635    #[inline]
636    #[must_use]
637    pub fn normalize_or(self, fallback: Self) -> Self {
638        let rcp = self.length_recip();
639        if rcp.is_finite() && rcp > 0.0 {
640            self * rcp
641        } else {
642            fallback
643        }
644    }
645
646    /// Returns `self` normalized to length 1.0 if possible, else returns zero.
647    ///
648    /// In particular, if the input is zero (or very close to zero), or non-finite,
649    /// the result of this operation will be zero.
650    ///
651    /// See also [`Self::try_normalize()`].
652    #[inline]
653    #[must_use]
654    pub fn normalize_or_zero(self) -> Self {
655        self.normalize_or(Self::ZERO)
656    }
657
658    /// Returns `self` normalized to length 1.0 and the length of `self`.
659    ///
660    /// If `self` is zero length then `(Self::X, 0.0)` is returned.
661    #[inline]
662    #[must_use]
663    pub fn normalize_and_length(self) -> (Self, f32) {
664        let length = self.length();
665        let rcp = 1.0 / length;
666        if rcp.is_finite() && rcp > 0.0 {
667            (self * rcp, length)
668        } else {
669            (Self::X, 0.0)
670        }
671    }
672
673    /// Returns whether `self` is length `1.0` or not.
674    ///
675    /// Uses a precision threshold of approximately `1e-4`.
676    #[inline]
677    #[must_use]
678    pub fn is_normalized(self) -> bool {
679        math::abs(self.length_squared() - 1.0) <= 2e-4
680    }
681
682    /// Returns the vector projection of `self` onto `rhs`.
683    ///
684    /// `rhs` must be of non-zero length.
685    ///
686    /// # Panics
687    ///
688    /// Will panic if `rhs` is zero length when `glam_assert` is enabled.
689    #[inline]
690    #[must_use]
691    pub fn project_onto(self, rhs: Self) -> Self {
692        let other_len_sq_rcp = rhs.dot(rhs).recip();
693        glam_assert!(other_len_sq_rcp.is_finite());
694        rhs * self.dot(rhs) * other_len_sq_rcp
695    }
696
697    /// Returns the vector rejection of `self` from `rhs`.
698    ///
699    /// The vector rejection is the vector perpendicular to the projection of `self` onto
700    /// `rhs`, in rhs words the result of `self - self.project_onto(rhs)`.
701    ///
702    /// `rhs` must be of non-zero length.
703    ///
704    /// # Panics
705    ///
706    /// Will panic if `rhs` has a length of zero when `glam_assert` is enabled.
707    #[doc(alias("plane"))]
708    #[inline]
709    #[must_use]
710    pub fn reject_from(self, rhs: Self) -> Self {
711        self - self.project_onto(rhs)
712    }
713
714    /// Returns the vector projection of `self` onto `rhs`.
715    ///
716    /// `rhs` must be normalized.
717    ///
718    /// # Panics
719    ///
720    /// Will panic if `rhs` is not normalized when `glam_assert` is enabled.
721    #[inline]
722    #[must_use]
723    pub fn project_onto_normalized(self, rhs: Self) -> Self {
724        glam_assert!(rhs.is_normalized());
725        rhs * self.dot(rhs)
726    }
727
728    /// Returns the vector rejection of `self` from `rhs`.
729    ///
730    /// The vector rejection is the vector perpendicular to the projection of `self` onto
731    /// `rhs`, in rhs words the result of `self - self.project_onto(rhs)`.
732    ///
733    /// `rhs` must be normalized.
734    ///
735    /// # Panics
736    ///
737    /// Will panic if `rhs` is not normalized when `glam_assert` is enabled.
738    #[doc(alias("plane"))]
739    #[inline]
740    #[must_use]
741    pub fn reject_from_normalized(self, rhs: Self) -> Self {
742        self - self.project_onto_normalized(rhs)
743    }
744
745    /// Returns a vector containing the nearest integer to a number for each element of `self`.
746    /// Round half-way cases away from 0.0.
747    #[inline]
748    #[must_use]
749    pub fn round(self) -> Self {
750        Self(unsafe { vrndnq_f32(self.0) })
751    }
752
753    /// Returns a vector containing the largest integer less than or equal to a number for each
754    /// element of `self`.
755    #[inline]
756    #[must_use]
757    pub fn floor(self) -> Self {
758        Self(unsafe { vrndmq_f32(self.0) })
759    }
760
761    /// Returns a vector containing the smallest integer greater than or equal to a number for
762    /// each element of `self`.
763    #[inline]
764    #[must_use]
765    pub fn ceil(self) -> Self {
766        Self(unsafe { vrndpq_f32(self.0) })
767    }
768
769    /// Returns a vector containing the integer part each element of `self`. This means numbers are
770    /// always truncated towards zero.
771    #[inline]
772    #[must_use]
773    pub fn trunc(self) -> Self {
774        Self(unsafe { vrndq_f32(self.0) })
775    }
776
777    /// Returns a vector containing the fractional part of the vector as `self - self.trunc()`.
778    ///
779    /// Note that this differs from the GLSL implementation of `fract` which returns
780    /// `self - self.floor()`.
781    ///
782    /// Note that this is fast but not precise for large numbers.
783    #[inline]
784    #[must_use]
785    pub fn fract(self) -> Self {
786        self - self.trunc()
787    }
788
789    /// Returns a vector containing the fractional part of the vector as `self - self.floor()`.
790    ///
791    /// Note that this differs from the Rust implementation of `fract` which returns
792    /// `self - self.trunc()`.
793    ///
794    /// Note that this is fast but not precise for large numbers.
795    #[inline]
796    #[must_use]
797    pub fn fract_gl(self) -> Self {
798        self - self.floor()
799    }
800
801    /// Returns a vector containing `e^self` (the exponential function) for each element of
802    /// `self`.
803    #[inline]
804    #[must_use]
805    pub fn exp(self) -> Self {
806        Self::new(
807            math::exp(self.x),
808            math::exp(self.y),
809            math::exp(self.z),
810            math::exp(self.w),
811        )
812    }
813
814    /// Returns a vector containing each element of `self` raised to the power of `n`.
815    #[inline]
816    #[must_use]
817    pub fn powf(self, n: f32) -> Self {
818        Self::new(
819            math::powf(self.x, n),
820            math::powf(self.y, n),
821            math::powf(self.z, n),
822            math::powf(self.w, n),
823        )
824    }
825
826    /// Returns a vector containing the reciprocal `1.0/n` of each element of `self`.
827    #[inline]
828    #[must_use]
829    pub fn recip(self) -> Self {
830        Self(unsafe { vdivq_f32(Self::ONE.0, self.0) })
831    }
832
833    /// Performs a linear interpolation between `self` and `rhs` based on the value `s`.
834    ///
835    /// When `s` is `0.0`, the result will be equal to `self`.  When `s` is `1.0`, the result
836    /// will be equal to `rhs`. When `s` is outside of range `[0, 1]`, the result is linearly
837    /// extrapolated.
838    #[doc(alias = "mix")]
839    #[inline]
840    #[must_use]
841    pub fn lerp(self, rhs: Self, s: f32) -> Self {
842        self * (1.0 - s) + rhs * s
843    }
844
845    /// Moves towards `rhs` based on the value `d`.
846    ///
847    /// When `d` is `0.0`, the result will be equal to `self`. When `d` is equal to
848    /// `self.distance(rhs)`, the result will be equal to `rhs`. Will not go past `rhs`.
849    #[inline]
850    #[must_use]
851    pub fn move_towards(&self, rhs: Self, d: f32) -> Self {
852        let a = rhs - *self;
853        let len = a.length();
854        if len <= d || len <= 1e-4 {
855            return rhs;
856        }
857        *self + a / len * d
858    }
859
860    /// Calculates the midpoint between `self` and `rhs`.
861    ///
862    /// The midpoint is the average of, or halfway point between, two vectors.
863    /// `a.midpoint(b)` should yield the same result as `a.lerp(b, 0.5)`
864    /// while being slightly cheaper to compute.
865    #[inline]
866    pub fn midpoint(self, rhs: Self) -> Self {
867        (self + rhs) * 0.5
868    }
869
870    /// Returns true if the absolute difference of all elements between `self` and `rhs` is
871    /// less than or equal to `max_abs_diff`.
872    ///
873    /// This can be used to compare if two vectors contain similar elements. It works best when
874    /// comparing with a known value. The `max_abs_diff` that should be used used depends on
875    /// the values being compared against.
876    ///
877    /// For more see
878    /// [comparing floating point numbers](https://randomascii.wordpress.com/2012/02/25/comparing-floating-point-numbers-2012-edition/).
879    #[inline]
880    #[must_use]
881    pub fn abs_diff_eq(self, rhs: Self, max_abs_diff: f32) -> bool {
882        self.sub(rhs).abs().cmple(Self::splat(max_abs_diff)).all()
883    }
884
885    /// Returns a vector with a length no less than `min` and no more than `max`.
886    ///
887    /// # Panics
888    ///
889    /// Will panic if `min` is greater than `max`, or if either `min` or `max` is negative, when `glam_assert` is enabled.
890    #[inline]
891    #[must_use]
892    pub fn clamp_length(self, min: f32, max: f32) -> Self {
893        glam_assert!(0.0 <= min);
894        glam_assert!(min <= max);
895        let length_sq = self.length_squared();
896        if length_sq < min * min {
897            min * (self / math::sqrt(length_sq))
898        } else if length_sq > max * max {
899            max * (self / math::sqrt(length_sq))
900        } else {
901            self
902        }
903    }
904
905    /// Returns a vector with a length no more than `max`.
906    ///
907    /// # Panics
908    ///
909    /// Will panic if `max` is negative when `glam_assert` is enabled.
910    #[inline]
911    #[must_use]
912    pub fn clamp_length_max(self, max: f32) -> Self {
913        glam_assert!(0.0 <= max);
914        let length_sq = self.length_squared();
915        if length_sq > max * max {
916            max * (self / math::sqrt(length_sq))
917        } else {
918            self
919        }
920    }
921
922    /// Returns a vector with a length no less than `min`.
923    ///
924    /// # Panics
925    ///
926    /// Will panic if `min` is negative when `glam_assert` is enabled.
927    #[inline]
928    #[must_use]
929    pub fn clamp_length_min(self, min: f32) -> Self {
930        glam_assert!(0.0 <= min);
931        let length_sq = self.length_squared();
932        if length_sq < min * min {
933            min * (self / math::sqrt(length_sq))
934        } else {
935            self
936        }
937    }
938
939    /// Fused multiply-add. Computes `(self * a) + b` element-wise with only one rounding
940    /// error, yielding a more accurate result than an unfused multiply-add.
941    ///
942    /// Using `mul_add` *may* be more performant than an unfused multiply-add if the target
943    /// architecture has a dedicated fma CPU instruction. However, this is not always true,
944    /// and will be heavily dependant on designing algorithms with specific target hardware in
945    /// mind.
946    #[inline]
947    #[must_use]
948    pub fn mul_add(self, a: Self, b: Self) -> Self {
949        Self(unsafe { vfmaq_f32(b.0, self.0, a.0) })
950    }
951
952    /// Returns the reflection vector for a given incident vector `self` and surface normal
953    /// `normal`.
954    ///
955    /// `normal` must be normalized.
956    ///
957    /// # Panics
958    ///
959    /// Will panic if `normal` is not normalized when `glam_assert` is enabled.
960    #[inline]
961    #[must_use]
962    pub fn reflect(self, normal: Self) -> Self {
963        glam_assert!(normal.is_normalized());
964        self - 2.0 * self.dot(normal) * normal
965    }
966
967    /// Returns the refraction direction for a given incident vector `self`, surface normal
968    /// `normal` and ratio of indices of refraction, `eta`. When total internal reflection occurs,
969    /// a zero vector will be returned.
970    ///
971    /// `self` and `normal` must be normalized.
972    ///
973    /// # Panics
974    ///
975    /// Will panic if `self` or `normal` is not normalized when `glam_assert` is enabled.
976    #[inline]
977    #[must_use]
978    pub fn refract(self, normal: Self, eta: f32) -> Self {
979        glam_assert!(self.is_normalized());
980        glam_assert!(normal.is_normalized());
981        let n_dot_i = normal.dot(self);
982        let k = 1.0 - eta * eta * (1.0 - n_dot_i * n_dot_i);
983        if k >= 0.0 {
984            eta * self - (eta * n_dot_i + math::sqrt(k)) * normal
985        } else {
986            Self::ZERO
987        }
988    }
989
990    /// Casts all elements of `self` to `f64`.
991    #[inline]
992    #[must_use]
993    pub fn as_dvec4(&self) -> crate::DVec4 {
994        crate::DVec4::new(self.x as f64, self.y as f64, self.z as f64, self.w as f64)
995    }
996
997    /// Casts all elements of `self` to `i8`.
998    #[inline]
999    #[must_use]
1000    pub fn as_i8vec4(&self) -> crate::I8Vec4 {
1001        crate::I8Vec4::new(self.x as i8, self.y as i8, self.z as i8, self.w as i8)
1002    }
1003
1004    /// Casts all elements of `self` to `u8`.
1005    #[inline]
1006    #[must_use]
1007    pub fn as_u8vec4(&self) -> crate::U8Vec4 {
1008        crate::U8Vec4::new(self.x as u8, self.y as u8, self.z as u8, self.w as u8)
1009    }
1010
1011    /// Casts all elements of `self` to `i16`.
1012    #[inline]
1013    #[must_use]
1014    pub fn as_i16vec4(&self) -> crate::I16Vec4 {
1015        crate::I16Vec4::new(self.x as i16, self.y as i16, self.z as i16, self.w as i16)
1016    }
1017
1018    /// Casts all elements of `self` to `u16`.
1019    #[inline]
1020    #[must_use]
1021    pub fn as_u16vec4(&self) -> crate::U16Vec4 {
1022        crate::U16Vec4::new(self.x as u16, self.y as u16, self.z as u16, self.w as u16)
1023    }
1024
1025    /// Casts all elements of `self` to `i32`.
1026    #[inline]
1027    #[must_use]
1028    pub fn as_ivec4(&self) -> crate::IVec4 {
1029        crate::IVec4::new(self.x as i32, self.y as i32, self.z as i32, self.w as i32)
1030    }
1031
1032    /// Casts all elements of `self` to `u32`.
1033    #[inline]
1034    #[must_use]
1035    pub fn as_uvec4(&self) -> crate::UVec4 {
1036        crate::UVec4::new(self.x as u32, self.y as u32, self.z as u32, self.w as u32)
1037    }
1038
1039    /// Casts all elements of `self` to `i64`.
1040    #[inline]
1041    #[must_use]
1042    pub fn as_i64vec4(&self) -> crate::I64Vec4 {
1043        crate::I64Vec4::new(self.x as i64, self.y as i64, self.z as i64, self.w as i64)
1044    }
1045
1046    /// Casts all elements of `self` to `u64`.
1047    #[inline]
1048    #[must_use]
1049    pub fn as_u64vec4(&self) -> crate::U64Vec4 {
1050        crate::U64Vec4::new(self.x as u64, self.y as u64, self.z as u64, self.w as u64)
1051    }
1052
1053    /// Casts all elements of `self` to `usize`.
1054    #[inline]
1055    #[must_use]
1056    pub fn as_usizevec4(&self) -> crate::USizeVec4 {
1057        crate::USizeVec4::new(
1058            self.x as usize,
1059            self.y as usize,
1060            self.z as usize,
1061            self.w as usize,
1062        )
1063    }
1064}
1065
1066impl Default for Vec4 {
1067    #[inline(always)]
1068    fn default() -> Self {
1069        Self::ZERO
1070    }
1071}
1072
1073impl PartialEq for Vec4 {
1074    #[inline]
1075    fn eq(&self, rhs: &Self) -> bool {
1076        self.cmpeq(*rhs).all()
1077    }
1078}
1079
1080impl Div for Vec4 {
1081    type Output = Self;
1082    #[inline]
1083    fn div(self, rhs: Self) -> Self {
1084        Self(unsafe { vdivq_f32(self.0, rhs.0) })
1085    }
1086}
1087
1088impl Div<&Self> for Vec4 {
1089    type Output = Self;
1090    #[inline]
1091    fn div(self, rhs: &Self) -> Self {
1092        self.div(*rhs)
1093    }
1094}
1095
1096impl Div<&Vec4> for &Vec4 {
1097    type Output = Vec4;
1098    #[inline]
1099    fn div(self, rhs: &Vec4) -> Vec4 {
1100        (*self).div(*rhs)
1101    }
1102}
1103
1104impl Div<Vec4> for &Vec4 {
1105    type Output = Vec4;
1106    #[inline]
1107    fn div(self, rhs: Vec4) -> Vec4 {
1108        (*self).div(rhs)
1109    }
1110}
1111
1112impl DivAssign for Vec4 {
1113    #[inline]
1114    fn div_assign(&mut self, rhs: Self) {
1115        self.0 = unsafe { vdivq_f32(self.0, rhs.0) };
1116    }
1117}
1118
1119impl DivAssign<&Self> for Vec4 {
1120    #[inline]
1121    fn div_assign(&mut self, rhs: &Self) {
1122        self.div_assign(*rhs);
1123    }
1124}
1125
1126impl Div<f32> for Vec4 {
1127    type Output = Self;
1128    #[inline]
1129    fn div(self, rhs: f32) -> Self {
1130        Self(unsafe { vdivq_f32(self.0, vld1q_dup_f32(&rhs)) })
1131    }
1132}
1133
1134impl Div<&f32> for Vec4 {
1135    type Output = Self;
1136    #[inline]
1137    fn div(self, rhs: &f32) -> Self {
1138        self.div(*rhs)
1139    }
1140}
1141
1142impl Div<&f32> for &Vec4 {
1143    type Output = Vec4;
1144    #[inline]
1145    fn div(self, rhs: &f32) -> Vec4 {
1146        (*self).div(*rhs)
1147    }
1148}
1149
1150impl Div<f32> for &Vec4 {
1151    type Output = Vec4;
1152    #[inline]
1153    fn div(self, rhs: f32) -> Vec4 {
1154        (*self).div(rhs)
1155    }
1156}
1157
1158impl DivAssign<f32> for Vec4 {
1159    #[inline]
1160    fn div_assign(&mut self, rhs: f32) {
1161        self.0 = unsafe { vdivq_f32(self.0, vld1q_dup_f32(&rhs)) };
1162    }
1163}
1164
1165impl DivAssign<&f32> for Vec4 {
1166    #[inline]
1167    fn div_assign(&mut self, rhs: &f32) {
1168        self.div_assign(*rhs);
1169    }
1170}
1171
1172impl Div<Vec4> for f32 {
1173    type Output = Vec4;
1174    #[inline]
1175    fn div(self, rhs: Vec4) -> Vec4 {
1176        Vec4(unsafe { vdivq_f32(vld1q_dup_f32(&self), rhs.0) })
1177    }
1178}
1179
1180impl Div<&Vec4> for f32 {
1181    type Output = Vec4;
1182    #[inline]
1183    fn div(self, rhs: &Vec4) -> Vec4 {
1184        self.div(*rhs)
1185    }
1186}
1187
1188impl Div<&Vec4> for &f32 {
1189    type Output = Vec4;
1190    #[inline]
1191    fn div(self, rhs: &Vec4) -> Vec4 {
1192        (*self).div(*rhs)
1193    }
1194}
1195
1196impl Div<Vec4> for &f32 {
1197    type Output = Vec4;
1198    #[inline]
1199    fn div(self, rhs: Vec4) -> Vec4 {
1200        (*self).div(rhs)
1201    }
1202}
1203
1204impl Mul for Vec4 {
1205    type Output = Self;
1206    #[inline]
1207    fn mul(self, rhs: Self) -> Self {
1208        Self(unsafe { vmulq_f32(self.0, rhs.0) })
1209    }
1210}
1211
1212impl Mul<&Self> for Vec4 {
1213    type Output = Self;
1214    #[inline]
1215    fn mul(self, rhs: &Self) -> Self {
1216        self.mul(*rhs)
1217    }
1218}
1219
1220impl Mul<&Vec4> for &Vec4 {
1221    type Output = Vec4;
1222    #[inline]
1223    fn mul(self, rhs: &Vec4) -> Vec4 {
1224        (*self).mul(*rhs)
1225    }
1226}
1227
1228impl Mul<Vec4> for &Vec4 {
1229    type Output = Vec4;
1230    #[inline]
1231    fn mul(self, rhs: Vec4) -> Vec4 {
1232        (*self).mul(rhs)
1233    }
1234}
1235
1236impl MulAssign for Vec4 {
1237    #[inline]
1238    fn mul_assign(&mut self, rhs: Self) {
1239        self.0 = unsafe { vmulq_f32(self.0, rhs.0) };
1240    }
1241}
1242
1243impl MulAssign<&Self> for Vec4 {
1244    #[inline]
1245    fn mul_assign(&mut self, rhs: &Self) {
1246        self.mul_assign(*rhs);
1247    }
1248}
1249
1250impl Mul<f32> for Vec4 {
1251    type Output = Self;
1252    #[inline]
1253    fn mul(self, rhs: f32) -> Self {
1254        Self(unsafe { vmulq_n_f32(self.0, rhs) })
1255    }
1256}
1257
1258impl Mul<&f32> for Vec4 {
1259    type Output = Self;
1260    #[inline]
1261    fn mul(self, rhs: &f32) -> Self {
1262        self.mul(*rhs)
1263    }
1264}
1265
1266impl Mul<&f32> for &Vec4 {
1267    type Output = Vec4;
1268    #[inline]
1269    fn mul(self, rhs: &f32) -> Vec4 {
1270        (*self).mul(*rhs)
1271    }
1272}
1273
1274impl Mul<f32> for &Vec4 {
1275    type Output = Vec4;
1276    #[inline]
1277    fn mul(self, rhs: f32) -> Vec4 {
1278        (*self).mul(rhs)
1279    }
1280}
1281
1282impl MulAssign<f32> for Vec4 {
1283    #[inline]
1284    fn mul_assign(&mut self, rhs: f32) {
1285        self.0 = unsafe { vmulq_n_f32(self.0, rhs) };
1286    }
1287}
1288
1289impl MulAssign<&f32> for Vec4 {
1290    #[inline]
1291    fn mul_assign(&mut self, rhs: &f32) {
1292        self.mul_assign(*rhs);
1293    }
1294}
1295
1296impl Mul<Vec4> for f32 {
1297    type Output = Vec4;
1298    #[inline]
1299    fn mul(self, rhs: Vec4) -> Vec4 {
1300        Vec4(unsafe { vmulq_n_f32(rhs.0, self) })
1301    }
1302}
1303
1304impl Mul<&Vec4> for f32 {
1305    type Output = Vec4;
1306    #[inline]
1307    fn mul(self, rhs: &Vec4) -> Vec4 {
1308        self.mul(*rhs)
1309    }
1310}
1311
1312impl Mul<&Vec4> for &f32 {
1313    type Output = Vec4;
1314    #[inline]
1315    fn mul(self, rhs: &Vec4) -> Vec4 {
1316        (*self).mul(*rhs)
1317    }
1318}
1319
1320impl Mul<Vec4> for &f32 {
1321    type Output = Vec4;
1322    #[inline]
1323    fn mul(self, rhs: Vec4) -> Vec4 {
1324        (*self).mul(rhs)
1325    }
1326}
1327
1328impl Add for Vec4 {
1329    type Output = Self;
1330    #[inline]
1331    fn add(self, rhs: Self) -> Self {
1332        Self(unsafe { vaddq_f32(self.0, rhs.0) })
1333    }
1334}
1335
1336impl Add<&Self> for Vec4 {
1337    type Output = Self;
1338    #[inline]
1339    fn add(self, rhs: &Self) -> Self {
1340        self.add(*rhs)
1341    }
1342}
1343
1344impl Add<&Vec4> for &Vec4 {
1345    type Output = Vec4;
1346    #[inline]
1347    fn add(self, rhs: &Vec4) -> Vec4 {
1348        (*self).add(*rhs)
1349    }
1350}
1351
1352impl Add<Vec4> for &Vec4 {
1353    type Output = Vec4;
1354    #[inline]
1355    fn add(self, rhs: Vec4) -> Vec4 {
1356        (*self).add(rhs)
1357    }
1358}
1359
1360impl AddAssign for Vec4 {
1361    #[inline]
1362    fn add_assign(&mut self, rhs: Self) {
1363        self.0 = unsafe { vaddq_f32(self.0, rhs.0) };
1364    }
1365}
1366
1367impl AddAssign<&Self> for Vec4 {
1368    #[inline]
1369    fn add_assign(&mut self, rhs: &Self) {
1370        self.add_assign(*rhs);
1371    }
1372}
1373
1374impl Add<f32> for Vec4 {
1375    type Output = Self;
1376    #[inline]
1377    fn add(self, rhs: f32) -> Self {
1378        Self(unsafe { vaddq_f32(self.0, vld1q_dup_f32(&rhs)) })
1379    }
1380}
1381
1382impl Add<&f32> for Vec4 {
1383    type Output = Self;
1384    #[inline]
1385    fn add(self, rhs: &f32) -> Self {
1386        self.add(*rhs)
1387    }
1388}
1389
1390impl Add<&f32> for &Vec4 {
1391    type Output = Vec4;
1392    #[inline]
1393    fn add(self, rhs: &f32) -> Vec4 {
1394        (*self).add(*rhs)
1395    }
1396}
1397
1398impl Add<f32> for &Vec4 {
1399    type Output = Vec4;
1400    #[inline]
1401    fn add(self, rhs: f32) -> Vec4 {
1402        (*self).add(rhs)
1403    }
1404}
1405
1406impl AddAssign<f32> for Vec4 {
1407    #[inline]
1408    fn add_assign(&mut self, rhs: f32) {
1409        self.0 = unsafe { vaddq_f32(self.0, vld1q_dup_f32(&rhs)) };
1410    }
1411}
1412
1413impl AddAssign<&f32> for Vec4 {
1414    #[inline]
1415    fn add_assign(&mut self, rhs: &f32) {
1416        self.add_assign(*rhs);
1417    }
1418}
1419
1420impl Add<Vec4> for f32 {
1421    type Output = Vec4;
1422    #[inline]
1423    fn add(self, rhs: Vec4) -> Vec4 {
1424        Vec4(unsafe { vaddq_f32(vld1q_dup_f32(&self), rhs.0) })
1425    }
1426}
1427
1428impl Add<&Vec4> for f32 {
1429    type Output = Vec4;
1430    #[inline]
1431    fn add(self, rhs: &Vec4) -> Vec4 {
1432        self.add(*rhs)
1433    }
1434}
1435
1436impl Add<&Vec4> for &f32 {
1437    type Output = Vec4;
1438    #[inline]
1439    fn add(self, rhs: &Vec4) -> Vec4 {
1440        (*self).add(*rhs)
1441    }
1442}
1443
1444impl Add<Vec4> for &f32 {
1445    type Output = Vec4;
1446    #[inline]
1447    fn add(self, rhs: Vec4) -> Vec4 {
1448        (*self).add(rhs)
1449    }
1450}
1451
1452impl Sub for Vec4 {
1453    type Output = Self;
1454    #[inline]
1455    fn sub(self, rhs: Self) -> Self {
1456        Self(unsafe { vsubq_f32(self.0, rhs.0) })
1457    }
1458}
1459
1460impl Sub<&Self> for Vec4 {
1461    type Output = Self;
1462    #[inline]
1463    fn sub(self, rhs: &Self) -> Self {
1464        self.sub(*rhs)
1465    }
1466}
1467
1468impl Sub<&Vec4> for &Vec4 {
1469    type Output = Vec4;
1470    #[inline]
1471    fn sub(self, rhs: &Vec4) -> Vec4 {
1472        (*self).sub(*rhs)
1473    }
1474}
1475
1476impl Sub<Vec4> for &Vec4 {
1477    type Output = Vec4;
1478    #[inline]
1479    fn sub(self, rhs: Vec4) -> Vec4 {
1480        (*self).sub(rhs)
1481    }
1482}
1483
1484impl SubAssign for Vec4 {
1485    #[inline]
1486    fn sub_assign(&mut self, rhs: Self) {
1487        self.0 = unsafe { vsubq_f32(self.0, rhs.0) };
1488    }
1489}
1490
1491impl SubAssign<&Self> for Vec4 {
1492    #[inline]
1493    fn sub_assign(&mut self, rhs: &Self) {
1494        self.sub_assign(*rhs);
1495    }
1496}
1497
1498impl Sub<f32> for Vec4 {
1499    type Output = Self;
1500    #[inline]
1501    fn sub(self, rhs: f32) -> Self {
1502        Self(unsafe { vsubq_f32(self.0, vld1q_dup_f32(&rhs)) })
1503    }
1504}
1505
1506impl Sub<&f32> for Vec4 {
1507    type Output = Self;
1508    #[inline]
1509    fn sub(self, rhs: &f32) -> Self {
1510        self.sub(*rhs)
1511    }
1512}
1513
1514impl Sub<&f32> for &Vec4 {
1515    type Output = Vec4;
1516    #[inline]
1517    fn sub(self, rhs: &f32) -> Vec4 {
1518        (*self).sub(*rhs)
1519    }
1520}
1521
1522impl Sub<f32> for &Vec4 {
1523    type Output = Vec4;
1524    #[inline]
1525    fn sub(self, rhs: f32) -> Vec4 {
1526        (*self).sub(rhs)
1527    }
1528}
1529
1530impl SubAssign<f32> for Vec4 {
1531    #[inline]
1532    fn sub_assign(&mut self, rhs: f32) {
1533        self.0 = unsafe { vsubq_f32(self.0, vld1q_dup_f32(&rhs)) };
1534    }
1535}
1536
1537impl SubAssign<&f32> for Vec4 {
1538    #[inline]
1539    fn sub_assign(&mut self, rhs: &f32) {
1540        self.sub_assign(*rhs);
1541    }
1542}
1543
1544impl Sub<Vec4> for f32 {
1545    type Output = Vec4;
1546    #[inline]
1547    fn sub(self, rhs: Vec4) -> Vec4 {
1548        Vec4(unsafe { vsubq_f32(vld1q_dup_f32(&self), rhs.0) })
1549    }
1550}
1551
1552impl Sub<&Vec4> for f32 {
1553    type Output = Vec4;
1554    #[inline]
1555    fn sub(self, rhs: &Vec4) -> Vec4 {
1556        self.sub(*rhs)
1557    }
1558}
1559
1560impl Sub<&Vec4> for &f32 {
1561    type Output = Vec4;
1562    #[inline]
1563    fn sub(self, rhs: &Vec4) -> Vec4 {
1564        (*self).sub(*rhs)
1565    }
1566}
1567
1568impl Sub<Vec4> for &f32 {
1569    type Output = Vec4;
1570    #[inline]
1571    fn sub(self, rhs: Vec4) -> Vec4 {
1572        (*self).sub(rhs)
1573    }
1574}
1575
1576impl Rem for Vec4 {
1577    type Output = Self;
1578    #[inline]
1579    fn rem(self, rhs: Self) -> Self {
1580        unsafe {
1581            let n = vrndmq_f32(vdivq_f32(self.0, rhs.0));
1582            Self(vsubq_f32(self.0, vmulq_f32(n, rhs.0)))
1583        }
1584    }
1585}
1586
1587impl Rem<&Self> for Vec4 {
1588    type Output = Self;
1589    #[inline]
1590    fn rem(self, rhs: &Self) -> Self {
1591        self.rem(*rhs)
1592    }
1593}
1594
1595impl Rem<&Vec4> for &Vec4 {
1596    type Output = Vec4;
1597    #[inline]
1598    fn rem(self, rhs: &Vec4) -> Vec4 {
1599        (*self).rem(*rhs)
1600    }
1601}
1602
1603impl Rem<Vec4> for &Vec4 {
1604    type Output = Vec4;
1605    #[inline]
1606    fn rem(self, rhs: Vec4) -> Vec4 {
1607        (*self).rem(rhs)
1608    }
1609}
1610
1611impl RemAssign for Vec4 {
1612    #[inline]
1613    fn rem_assign(&mut self, rhs: Self) {
1614        *self = self.rem(rhs);
1615    }
1616}
1617
1618impl RemAssign<&Self> for Vec4 {
1619    #[inline]
1620    fn rem_assign(&mut self, rhs: &Self) {
1621        self.rem_assign(*rhs);
1622    }
1623}
1624
1625impl Rem<f32> for Vec4 {
1626    type Output = Self;
1627    #[inline]
1628    fn rem(self, rhs: f32) -> Self {
1629        self.rem(Self::splat(rhs))
1630    }
1631}
1632
1633impl Rem<&f32> for Vec4 {
1634    type Output = Self;
1635    #[inline]
1636    fn rem(self, rhs: &f32) -> Self {
1637        self.rem(*rhs)
1638    }
1639}
1640
1641impl Rem<&f32> for &Vec4 {
1642    type Output = Vec4;
1643    #[inline]
1644    fn rem(self, rhs: &f32) -> Vec4 {
1645        (*self).rem(*rhs)
1646    }
1647}
1648
1649impl Rem<f32> for &Vec4 {
1650    type Output = Vec4;
1651    #[inline]
1652    fn rem(self, rhs: f32) -> Vec4 {
1653        (*self).rem(rhs)
1654    }
1655}
1656
1657impl RemAssign<f32> for Vec4 {
1658    #[inline]
1659    fn rem_assign(&mut self, rhs: f32) {
1660        *self = self.rem(Self::splat(rhs));
1661    }
1662}
1663
1664impl RemAssign<&f32> for Vec4 {
1665    #[inline]
1666    fn rem_assign(&mut self, rhs: &f32) {
1667        self.rem_assign(*rhs);
1668    }
1669}
1670
1671impl Rem<Vec4> for f32 {
1672    type Output = Vec4;
1673    #[inline]
1674    fn rem(self, rhs: Vec4) -> Vec4 {
1675        Vec4::splat(self).rem(rhs)
1676    }
1677}
1678
1679impl Rem<&Vec4> for f32 {
1680    type Output = Vec4;
1681    #[inline]
1682    fn rem(self, rhs: &Vec4) -> Vec4 {
1683        self.rem(*rhs)
1684    }
1685}
1686
1687impl Rem<&Vec4> for &f32 {
1688    type Output = Vec4;
1689    #[inline]
1690    fn rem(self, rhs: &Vec4) -> Vec4 {
1691        (*self).rem(*rhs)
1692    }
1693}
1694
1695impl Rem<Vec4> for &f32 {
1696    type Output = Vec4;
1697    #[inline]
1698    fn rem(self, rhs: Vec4) -> Vec4 {
1699        (*self).rem(rhs)
1700    }
1701}
1702
1703#[cfg(not(target_arch = "spirv"))]
1704impl AsRef<[f32; 4]> for Vec4 {
1705    #[inline]
1706    fn as_ref(&self) -> &[f32; 4] {
1707        unsafe { &*(self as *const Self as *const [f32; 4]) }
1708    }
1709}
1710
1711#[cfg(not(target_arch = "spirv"))]
1712impl AsMut<[f32; 4]> for Vec4 {
1713    #[inline]
1714    fn as_mut(&mut self) -> &mut [f32; 4] {
1715        unsafe { &mut *(self as *mut Self as *mut [f32; 4]) }
1716    }
1717}
1718
1719impl Sum for Vec4 {
1720    #[inline]
1721    fn sum<I>(iter: I) -> Self
1722    where
1723        I: Iterator<Item = Self>,
1724    {
1725        iter.fold(Self::ZERO, Self::add)
1726    }
1727}
1728
1729impl<'a> Sum<&'a Self> for Vec4 {
1730    #[inline]
1731    fn sum<I>(iter: I) -> Self
1732    where
1733        I: Iterator<Item = &'a Self>,
1734    {
1735        iter.fold(Self::ZERO, |a, &b| Self::add(a, b))
1736    }
1737}
1738
1739impl Product for Vec4 {
1740    #[inline]
1741    fn product<I>(iter: I) -> Self
1742    where
1743        I: Iterator<Item = Self>,
1744    {
1745        iter.fold(Self::ONE, Self::mul)
1746    }
1747}
1748
1749impl<'a> Product<&'a Self> for Vec4 {
1750    #[inline]
1751    fn product<I>(iter: I) -> Self
1752    where
1753        I: Iterator<Item = &'a Self>,
1754    {
1755        iter.fold(Self::ONE, |a, &b| Self::mul(a, b))
1756    }
1757}
1758
1759impl Neg for Vec4 {
1760    type Output = Self;
1761    #[inline]
1762    fn neg(self) -> Self {
1763        Self(unsafe { vnegq_f32(self.0) })
1764    }
1765}
1766
1767impl Neg for &Vec4 {
1768    type Output = Vec4;
1769    #[inline]
1770    fn neg(self) -> Vec4 {
1771        (*self).neg()
1772    }
1773}
1774
1775impl Index<usize> for Vec4 {
1776    type Output = f32;
1777    #[inline]
1778    fn index(&self, index: usize) -> &Self::Output {
1779        match index {
1780            0 => &self.x,
1781            1 => &self.y,
1782            2 => &self.z,
1783            3 => &self.w,
1784            _ => panic!("index out of bounds"),
1785        }
1786    }
1787}
1788
1789impl IndexMut<usize> for Vec4 {
1790    #[inline]
1791    fn index_mut(&mut self, index: usize) -> &mut Self::Output {
1792        match index {
1793            0 => &mut self.x,
1794            1 => &mut self.y,
1795            2 => &mut self.z,
1796            3 => &mut self.w,
1797            _ => panic!("index out of bounds"),
1798        }
1799    }
1800}
1801
1802impl fmt::Display for Vec4 {
1803    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1804        if let Some(p) = f.precision() {
1805            write!(
1806                f,
1807                "[{:.*}, {:.*}, {:.*}, {:.*}]",
1808                p, self.x, p, self.y, p, self.z, p, self.w
1809            )
1810        } else {
1811            write!(f, "[{}, {}, {}, {}]", self.x, self.y, self.z, self.w)
1812        }
1813    }
1814}
1815
1816impl fmt::Debug for Vec4 {
1817    fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
1818        fmt.debug_tuple(stringify!(Vec4))
1819            .field(&self.x)
1820            .field(&self.y)
1821            .field(&self.z)
1822            .field(&self.w)
1823            .finish()
1824    }
1825}
1826
1827impl From<Vec4> for float32x4_t {
1828    #[inline(always)]
1829    fn from(t: Vec4) -> Self {
1830        t.0
1831    }
1832}
1833
1834impl From<float32x4_t> for Vec4 {
1835    #[inline(always)]
1836    fn from(t: float32x4_t) -> Self {
1837        Self(t)
1838    }
1839}
1840
1841impl From<[f32; 4]> for Vec4 {
1842    #[inline]
1843    fn from(a: [f32; 4]) -> Self {
1844        Self(unsafe { vld1q_f32(a.as_ptr()) })
1845    }
1846}
1847
1848impl From<Vec4> for [f32; 4] {
1849    #[inline]
1850    fn from(v: Vec4) -> Self {
1851        use crate::align16::Align16;
1852        use core::mem::MaybeUninit;
1853        let mut out: MaybeUninit<Align16<Self>> = MaybeUninit::uninit();
1854        unsafe {
1855            vst1q_f32(out.as_mut_ptr().cast(), v.0);
1856            out.assume_init().0
1857        }
1858    }
1859}
1860
1861impl From<(f32, f32, f32, f32)> for Vec4 {
1862    #[inline]
1863    fn from(t: (f32, f32, f32, f32)) -> Self {
1864        Self::new(t.0, t.1, t.2, t.3)
1865    }
1866}
1867
1868impl From<Vec4> for (f32, f32, f32, f32) {
1869    #[inline]
1870    fn from(v: Vec4) -> Self {
1871        (v.x, v.y, v.z, v.w)
1872    }
1873}
1874
1875impl From<(Vec3A, f32)> for Vec4 {
1876    #[inline]
1877    fn from((v, w): (Vec3A, f32)) -> Self {
1878        v.extend(w)
1879    }
1880}
1881
1882impl From<(f32, Vec3A)> for Vec4 {
1883    #[inline]
1884    fn from((x, v): (f32, Vec3A)) -> Self {
1885        Self::new(x, v.x, v.y, v.z)
1886    }
1887}
1888
1889impl From<(Vec3, f32)> for Vec4 {
1890    #[inline]
1891    fn from((v, w): (Vec3, f32)) -> Self {
1892        Self::new(v.x, v.y, v.z, w)
1893    }
1894}
1895
1896impl From<(f32, Vec3)> for Vec4 {
1897    #[inline]
1898    fn from((x, v): (f32, Vec3)) -> Self {
1899        Self::new(x, v.x, v.y, v.z)
1900    }
1901}
1902
1903impl From<(Vec2, f32, f32)> for Vec4 {
1904    #[inline]
1905    fn from((v, z, w): (Vec2, f32, f32)) -> Self {
1906        Self::new(v.x, v.y, z, w)
1907    }
1908}
1909
1910impl From<(Vec2, Vec2)> for Vec4 {
1911    #[inline]
1912    fn from((v, u): (Vec2, Vec2)) -> Self {
1913        Self::new(v.x, v.y, u.x, u.y)
1914    }
1915}
1916
1917impl Deref for Vec4 {
1918    type Target = crate::deref::Vec4<f32>;
1919    #[inline]
1920    fn deref(&self) -> &Self::Target {
1921        unsafe { &*(self as *const Self).cast() }
1922    }
1923}
1924
1925impl DerefMut for Vec4 {
1926    #[inline]
1927    fn deref_mut(&mut self) -> &mut Self::Target {
1928        unsafe { &mut *(self as *mut Self).cast() }
1929    }
1930}
1931
1932impl From<BVec4> for Vec4 {
1933    #[inline]
1934    fn from(v: BVec4) -> Self {
1935        Self::new(
1936            f32::from(v.x),
1937            f32::from(v.y),
1938            f32::from(v.z),
1939            f32::from(v.w),
1940        )
1941    }
1942}
1943
1944#[cfg(not(feature = "scalar-math"))]
1945impl From<BVec4A> for Vec4 {
1946    #[inline]
1947    fn from(v: BVec4A) -> Self {
1948        let bool_array: [bool; 4] = v.into();
1949        Self::new(
1950            f32::from(bool_array[0]),
1951            f32::from(bool_array[1]),
1952            f32::from(bool_array[2]),
1953            f32::from(bool_array[3]),
1954        )
1955    }
1956}