1use crate::{f32::math, neon::*, BVec4, BVec4A, Vec2, Vec3, Vec3A};
4
5use core::fmt;
6use core::iter::{Product, Sum};
7use core::{f32, ops::*};
8
9use core::arch::aarch64::*;
10
11#[repr(C)]
12union UnionCast {
13 a: [f32; 4],
14 v: Vec4,
15}
16
17#[inline(always)]
19#[must_use]
20pub const fn vec4(x: f32, y: f32, z: f32, w: f32) -> Vec4 {
21 Vec4::new(x, y, z, w)
22}
23
24#[derive(Clone, Copy)]
30#[cfg_attr(feature = "bytemuck", derive(bytemuck::Pod, bytemuck::Zeroable))]
31#[repr(transparent)]
32pub struct Vec4(pub(crate) float32x4_t);
33
34impl Vec4 {
35 pub const ZERO: Self = Self::splat(0.0);
37
38 pub const ONE: Self = Self::splat(1.0);
40
41 pub const NEG_ONE: Self = Self::splat(-1.0);
43
44 pub const MIN: Self = Self::splat(f32::MIN);
46
47 pub const MAX: Self = Self::splat(f32::MAX);
49
50 pub const NAN: Self = Self::splat(f32::NAN);
52
53 pub const INFINITY: Self = Self::splat(f32::INFINITY);
55
56 pub const NEG_INFINITY: Self = Self::splat(f32::NEG_INFINITY);
58
59 pub const X: Self = Self::new(1.0, 0.0, 0.0, 0.0);
61
62 pub const Y: Self = Self::new(0.0, 1.0, 0.0, 0.0);
64
65 pub const Z: Self = Self::new(0.0, 0.0, 1.0, 0.0);
67
68 pub const W: Self = Self::new(0.0, 0.0, 0.0, 1.0);
70
71 pub const NEG_X: Self = Self::new(-1.0, 0.0, 0.0, 0.0);
73
74 pub const NEG_Y: Self = Self::new(0.0, -1.0, 0.0, 0.0);
76
77 pub const NEG_Z: Self = Self::new(0.0, 0.0, -1.0, 0.0);
79
80 pub const NEG_W: Self = Self::new(0.0, 0.0, 0.0, -1.0);
82
83 pub const AXES: [Self; 4] = [Self::X, Self::Y, Self::Z, Self::W];
85
86 pub const USES_CORE_SIMD: bool = false;
88 pub const USES_NEON: bool = true;
90 pub const USES_SCALAR_MATH: bool = false;
92 pub const USES_SSE2: bool = false;
94 pub const USES_WASM32_SIMD: bool = false;
96
97 #[inline(always)]
99 #[must_use]
100 pub const fn new(x: f32, y: f32, z: f32, w: f32) -> Self {
101 unsafe { UnionCast { a: [x, y, z, w] }.v }
102 }
103
104 #[inline]
106 #[must_use]
107 pub const fn splat(v: f32) -> Self {
108 unsafe { UnionCast { a: [v; 4] }.v }
109 }
110
111 #[inline]
113 #[must_use]
114 pub fn map<F>(self, f: F) -> Self
115 where
116 F: Fn(f32) -> f32,
117 {
118 Self::new(f(self.x), f(self.y), f(self.z), f(self.w))
119 }
120
121 #[inline]
127 #[must_use]
128 pub fn select(mask: BVec4A, if_true: Self, if_false: Self) -> Self {
129 Self(unsafe { vbslq_f32(mask.0, if_true.0, if_false.0) })
130 }
131
132 #[inline]
134 #[must_use]
135 pub const fn from_array(a: [f32; 4]) -> Self {
136 Self::new(a[0], a[1], a[2], a[3])
137 }
138
139 #[inline]
141 #[must_use]
142 pub const fn to_array(&self) -> [f32; 4] {
143 unsafe { *(self as *const Self as *const [f32; 4]) }
144 }
145
146 #[inline]
152 #[must_use]
153 pub const fn from_slice(slice: &[f32]) -> Self {
154 assert!(slice.len() >= 4);
155 Self::new(slice[0], slice[1], slice[2], slice[3])
156 }
157
158 #[inline]
164 pub fn write_to_slice(self, slice: &mut [f32]) {
165 assert!(slice.len() >= 4);
166 unsafe {
167 vst1q_f32(slice.as_mut_ptr(), self.0);
168 }
169 }
170
171 #[inline]
177 #[must_use]
178 pub fn truncate(self) -> Vec3 {
179 use crate::swizzles::Vec4Swizzles;
180 self.xyz()
181 }
182
183 #[inline]
185 #[must_use]
186 pub fn with_x(mut self, x: f32) -> Self {
187 self.x = x;
188 self
189 }
190
191 #[inline]
193 #[must_use]
194 pub fn with_y(mut self, y: f32) -> Self {
195 self.y = y;
196 self
197 }
198
199 #[inline]
201 #[must_use]
202 pub fn with_z(mut self, z: f32) -> Self {
203 self.z = z;
204 self
205 }
206
207 #[inline]
209 #[must_use]
210 pub fn with_w(mut self, w: f32) -> Self {
211 self.w = w;
212 self
213 }
214
215 #[inline]
217 #[must_use]
218 pub fn dot(self, rhs: Self) -> f32 {
219 unsafe { dot4(self.0, rhs.0) }
220 }
221
222 #[inline]
224 #[must_use]
225 pub fn dot_into_vec(self, rhs: Self) -> Self {
226 Self(unsafe { dot4_into_f32x4(self.0, rhs.0) })
227 }
228
229 #[inline]
236 #[must_use]
237 pub fn min(self, rhs: Self) -> Self {
238 Self(unsafe { vminq_f32(self.0, rhs.0) })
239 }
240
241 #[inline]
248 #[must_use]
249 pub fn max(self, rhs: Self) -> Self {
250 Self(unsafe { vmaxq_f32(self.0, rhs.0) })
251 }
252
253 #[inline]
264 #[must_use]
265 pub fn clamp(self, min: Self, max: Self) -> Self {
266 glam_assert!(min.cmple(max).all(), "clamp: expected min <= max");
267 self.max(min).min(max)
268 }
269
270 #[inline]
277 #[must_use]
278 pub fn min_element(self) -> f32 {
279 unsafe { vminnmvq_f32(self.0) }
280 }
281
282 #[inline]
289 #[must_use]
290 pub fn max_element(self) -> f32 {
291 unsafe { vmaxnmvq_f32(self.0) }
292 }
293
294 #[doc(alias = "argmin")]
296 #[inline]
297 #[must_use]
298 pub fn min_position(self) -> usize {
299 let mut min = self.x;
300 let mut index = 0;
301 if self.y < min {
302 min = self.y;
303 index = 1;
304 }
305 if self.z < min {
306 min = self.z;
307 index = 2;
308 }
309 if self.w < min {
310 index = 3;
311 }
312 index
313 }
314
315 #[doc(alias = "argmax")]
317 #[inline]
318 #[must_use]
319 pub fn max_position(self) -> usize {
320 let mut max = self.x;
321 let mut index = 0;
322 if self.y > max {
323 max = self.y;
324 index = 1;
325 }
326 if self.z > max {
327 max = self.z;
328 index = 2;
329 }
330 if self.w > max {
331 index = 3;
332 }
333 index
334 }
335
336 #[inline]
340 #[must_use]
341 pub fn element_sum(self) -> f32 {
342 unsafe { vaddvq_f32(self.0) }
343 }
344
345 #[inline]
349 #[must_use]
350 pub fn element_product(self) -> f32 {
351 unsafe {
352 let s = vmuls_laneq_f32(vgetq_lane_f32(self.0, 0), self.0, 1);
353 let s = vmuls_laneq_f32(s, self.0, 2);
354 vmuls_laneq_f32(s, self.0, 3)
355 }
356 }
357
358 #[inline]
364 #[must_use]
365 pub fn cmpeq(self, rhs: Self) -> BVec4A {
366 BVec4A(unsafe { vceqq_f32(self.0, rhs.0) })
367 }
368
369 #[inline]
375 #[must_use]
376 pub fn cmpne(self, rhs: Self) -> BVec4A {
377 BVec4A(unsafe { vmvnq_u32(vceqq_f32(self.0, rhs.0)) })
378 }
379
380 #[inline]
386 #[must_use]
387 pub fn cmpge(self, rhs: Self) -> BVec4A {
388 BVec4A(unsafe { vcgeq_f32(self.0, rhs.0) })
389 }
390
391 #[inline]
397 #[must_use]
398 pub fn cmpgt(self, rhs: Self) -> BVec4A {
399 BVec4A(unsafe { vcgtq_f32(self.0, rhs.0) })
400 }
401
402 #[inline]
408 #[must_use]
409 pub fn cmple(self, rhs: Self) -> BVec4A {
410 BVec4A(unsafe { vcleq_f32(self.0, rhs.0) })
411 }
412
413 #[inline]
419 #[must_use]
420 pub fn cmplt(self, rhs: Self) -> BVec4A {
421 BVec4A(unsafe { vcltq_f32(self.0, rhs.0) })
422 }
423
424 #[inline]
426 #[must_use]
427 pub fn abs(self) -> Self {
428 Self(unsafe { vabsq_f32(self.0) })
429 }
430
431 #[inline]
437 #[must_use]
438 pub fn signum(self) -> Self {
439 let result = Self(unsafe {
440 vreinterpretq_f32_u32(vorrq_u32(
441 vandq_u32(
442 vreinterpretq_u32_f32(self.0),
443 vreinterpretq_u32_f32(Self::NEG_ONE.0),
444 ),
445 vreinterpretq_u32_f32(Self::ONE.0),
446 ))
447 });
448 let mask = self.is_nan_mask();
449 Self::select(mask, self, result)
450 }
451
452 #[inline]
454 #[must_use]
455 pub fn copysign(self, rhs: Self) -> Self {
456 let mask = Self::splat(-0.0);
457 Self(unsafe {
458 vreinterpretq_f32_u32(vorrq_u32(
459 vandq_u32(vreinterpretq_u32_f32(rhs.0), vreinterpretq_u32_f32(mask.0)),
460 vandq_u32(
461 vreinterpretq_u32_f32(self.0),
462 vmvnq_u32(vreinterpretq_u32_f32(mask.0)),
463 ),
464 ))
465 })
466 }
467
468 #[inline]
476 #[must_use]
477 pub fn is_negative_bitmask(self) -> u32 {
478 unsafe {
479 let nmask = vreinterpretq_u32_f32(vdupq_n_f32(-0.0));
480 let m = vandq_u32(vreinterpretq_u32_f32(self.0), nmask);
481 let x = vgetq_lane_u32(m, 0) >> 31;
482 let y = vgetq_lane_u32(m, 1) >> 31;
483 let z = vgetq_lane_u32(m, 2) >> 31;
484
485 let w = vgetq_lane_u32(m, 3) >> 31;
486 x | y << 1 | z << 2 | w << 3
487 }
488 }
489
490 #[inline]
493 #[must_use]
494 pub fn is_finite(self) -> bool {
495 self.is_finite_mask().all()
496 }
497
498 #[inline]
502 #[must_use]
503 pub fn is_finite_mask(self) -> BVec4A {
504 BVec4A(unsafe { vcltq_f32(vabsq_f32(self.0), Self::INFINITY.0) })
505 }
506
507 #[inline]
509 #[must_use]
510 pub fn is_nan(self) -> bool {
511 self.is_nan_mask().any()
512 }
513
514 #[inline]
518 #[must_use]
519 pub fn is_nan_mask(self) -> BVec4A {
520 BVec4A(unsafe { vmvnq_u32(vceqq_f32(self.0, self.0)) })
521 }
522
523 #[doc(alias = "magnitude")]
525 #[inline]
526 #[must_use]
527 pub fn length(self) -> f32 {
528 math::sqrt(self.dot(self))
529 }
530
531 #[doc(alias = "magnitude2")]
535 #[inline]
536 #[must_use]
537 pub fn length_squared(self) -> f32 {
538 self.dot(self)
539 }
540
541 #[inline]
545 #[must_use]
546 pub fn length_recip(self) -> f32 {
547 self.length().recip()
548 }
549
550 #[inline]
552 #[must_use]
553 pub fn distance(self, rhs: Self) -> f32 {
554 (self - rhs).length()
555 }
556
557 #[inline]
559 #[must_use]
560 pub fn distance_squared(self, rhs: Self) -> f32 {
561 (self - rhs).length_squared()
562 }
563
564 #[inline]
566 #[must_use]
567 pub fn div_euclid(self, rhs: Self) -> Self {
568 Self::new(
569 math::div_euclid(self.x, rhs.x),
570 math::div_euclid(self.y, rhs.y),
571 math::div_euclid(self.z, rhs.z),
572 math::div_euclid(self.w, rhs.w),
573 )
574 }
575
576 #[inline]
580 #[must_use]
581 pub fn rem_euclid(self, rhs: Self) -> Self {
582 Self::new(
583 math::rem_euclid(self.x, rhs.x),
584 math::rem_euclid(self.y, rhs.y),
585 math::rem_euclid(self.z, rhs.z),
586 math::rem_euclid(self.w, rhs.w),
587 )
588 }
589
590 #[inline]
600 #[must_use]
601 pub fn normalize(self) -> Self {
602 #[allow(clippy::let_and_return)]
603 let normalized = self.mul(self.length_recip());
604 glam_assert!(normalized.is_finite());
605 normalized
606 }
607
608 #[inline]
615 #[must_use]
616 pub fn try_normalize(self) -> Option<Self> {
617 let rcp = self.length_recip();
618 if rcp.is_finite() && rcp > 0.0 {
619 Some(self * rcp)
620 } else {
621 None
622 }
623 }
624
625 #[inline]
633 #[must_use]
634 pub fn normalize_or(self, fallback: Self) -> Self {
635 let rcp = self.length_recip();
636 if rcp.is_finite() && rcp > 0.0 {
637 self * rcp
638 } else {
639 fallback
640 }
641 }
642
643 #[inline]
650 #[must_use]
651 pub fn normalize_or_zero(self) -> Self {
652 self.normalize_or(Self::ZERO)
653 }
654
655 #[inline]
659 #[must_use]
660 pub fn normalize_and_length(self) -> (Self, f32) {
661 let length = self.length();
662 let rcp = 1.0 / length;
663 if rcp.is_finite() && rcp > 0.0 {
664 (self * rcp, length)
665 } else {
666 (Self::X, 0.0)
667 }
668 }
669
670 #[inline]
674 #[must_use]
675 pub fn is_normalized(self) -> bool {
676 math::abs(self.length_squared() - 1.0) <= 2e-4
677 }
678
679 #[inline]
687 #[must_use]
688 pub fn project_onto(self, rhs: Self) -> Self {
689 let other_len_sq_rcp = rhs.dot(rhs).recip();
690 glam_assert!(other_len_sq_rcp.is_finite());
691 rhs * self.dot(rhs) * other_len_sq_rcp
692 }
693
694 #[doc(alias("plane"))]
705 #[inline]
706 #[must_use]
707 pub fn reject_from(self, rhs: Self) -> Self {
708 self - self.project_onto(rhs)
709 }
710
711 #[inline]
719 #[must_use]
720 pub fn project_onto_normalized(self, rhs: Self) -> Self {
721 glam_assert!(rhs.is_normalized());
722 rhs * self.dot(rhs)
723 }
724
725 #[doc(alias("plane"))]
736 #[inline]
737 #[must_use]
738 pub fn reject_from_normalized(self, rhs: Self) -> Self {
739 self - self.project_onto_normalized(rhs)
740 }
741
742 #[inline]
745 #[must_use]
746 pub fn round(self) -> Self {
747 Self(unsafe { vrndnq_f32(self.0) })
748 }
749
750 #[inline]
753 #[must_use]
754 pub fn floor(self) -> Self {
755 Self(unsafe { vrndmq_f32(self.0) })
756 }
757
758 #[inline]
761 #[must_use]
762 pub fn ceil(self) -> Self {
763 Self(unsafe { vrndpq_f32(self.0) })
764 }
765
766 #[inline]
769 #[must_use]
770 pub fn trunc(self) -> Self {
771 Self(unsafe { vrndq_f32(self.0) })
772 }
773
774 #[inline]
781 #[must_use]
782 pub fn fract(self) -> Self {
783 self - self.trunc()
784 }
785
786 #[inline]
793 #[must_use]
794 pub fn fract_gl(self) -> Self {
795 self - self.floor()
796 }
797
798 #[inline]
801 #[must_use]
802 pub fn exp(self) -> Self {
803 Self::new(
804 math::exp(self.x),
805 math::exp(self.y),
806 math::exp(self.z),
807 math::exp(self.w),
808 )
809 }
810
811 #[inline]
813 #[must_use]
814 pub fn powf(self, n: f32) -> Self {
815 Self::new(
816 math::powf(self.x, n),
817 math::powf(self.y, n),
818 math::powf(self.z, n),
819 math::powf(self.w, n),
820 )
821 }
822
823 #[inline]
825 #[must_use]
826 pub fn recip(self) -> Self {
827 Self(unsafe { vdivq_f32(Self::ONE.0, self.0) })
828 }
829
830 #[doc(alias = "mix")]
836 #[inline]
837 #[must_use]
838 pub fn lerp(self, rhs: Self, s: f32) -> Self {
839 self * (1.0 - s) + rhs * s
840 }
841
842 #[inline]
847 #[must_use]
848 pub fn move_towards(&self, rhs: Self, d: f32) -> Self {
849 let a = rhs - *self;
850 let len = a.length();
851 if len <= d || len <= 1e-4 {
852 return rhs;
853 }
854 *self + a / len * d
855 }
856
857 #[inline]
863 pub fn midpoint(self, rhs: Self) -> Self {
864 (self + rhs) * 0.5
865 }
866
867 #[inline]
877 #[must_use]
878 pub fn abs_diff_eq(self, rhs: Self, max_abs_diff: f32) -> bool {
879 self.sub(rhs).abs().cmple(Self::splat(max_abs_diff)).all()
880 }
881
882 #[inline]
888 #[must_use]
889 pub fn clamp_length(self, min: f32, max: f32) -> Self {
890 glam_assert!(0.0 <= min);
891 glam_assert!(min <= max);
892 let length_sq = self.length_squared();
893 if length_sq < min * min {
894 min * (self / math::sqrt(length_sq))
895 } else if length_sq > max * max {
896 max * (self / math::sqrt(length_sq))
897 } else {
898 self
899 }
900 }
901
902 #[inline]
908 #[must_use]
909 pub fn clamp_length_max(self, max: f32) -> Self {
910 glam_assert!(0.0 <= max);
911 let length_sq = self.length_squared();
912 if length_sq > max * max {
913 max * (self / math::sqrt(length_sq))
914 } else {
915 self
916 }
917 }
918
919 #[inline]
925 #[must_use]
926 pub fn clamp_length_min(self, min: f32) -> Self {
927 glam_assert!(0.0 <= min);
928 let length_sq = self.length_squared();
929 if length_sq < min * min {
930 min * (self / math::sqrt(length_sq))
931 } else {
932 self
933 }
934 }
935
936 #[inline]
944 #[must_use]
945 pub fn mul_add(self, a: Self, b: Self) -> Self {
946 Self(unsafe { vfmaq_f32(b.0, self.0, a.0) })
947 }
948
949 #[inline]
958 #[must_use]
959 pub fn reflect(self, normal: Self) -> Self {
960 glam_assert!(normal.is_normalized());
961 self - 2.0 * self.dot(normal) * normal
962 }
963
964 #[inline]
974 #[must_use]
975 pub fn refract(self, normal: Self, eta: f32) -> Self {
976 glam_assert!(self.is_normalized());
977 glam_assert!(normal.is_normalized());
978 let n_dot_i = normal.dot(self);
979 let k = 1.0 - eta * eta * (1.0 - n_dot_i * n_dot_i);
980 if k >= 0.0 {
981 eta * self - (eta * n_dot_i + math::sqrt(k)) * normal
982 } else {
983 Self::ZERO
984 }
985 }
986
987 #[inline]
989 #[must_use]
990 pub fn as_dvec4(&self) -> crate::DVec4 {
991 crate::DVec4::new(self.x as f64, self.y as f64, self.z as f64, self.w as f64)
992 }
993
994 #[inline]
996 #[must_use]
997 pub fn as_i8vec4(&self) -> crate::I8Vec4 {
998 crate::I8Vec4::new(self.x as i8, self.y as i8, self.z as i8, self.w as i8)
999 }
1000
1001 #[inline]
1003 #[must_use]
1004 pub fn as_u8vec4(&self) -> crate::U8Vec4 {
1005 crate::U8Vec4::new(self.x as u8, self.y as u8, self.z as u8, self.w as u8)
1006 }
1007
1008 #[inline]
1010 #[must_use]
1011 pub fn as_i16vec4(&self) -> crate::I16Vec4 {
1012 crate::I16Vec4::new(self.x as i16, self.y as i16, self.z as i16, self.w as i16)
1013 }
1014
1015 #[inline]
1017 #[must_use]
1018 pub fn as_u16vec4(&self) -> crate::U16Vec4 {
1019 crate::U16Vec4::new(self.x as u16, self.y as u16, self.z as u16, self.w as u16)
1020 }
1021
1022 #[inline]
1024 #[must_use]
1025 pub fn as_ivec4(&self) -> crate::IVec4 {
1026 crate::IVec4::new(self.x as i32, self.y as i32, self.z as i32, self.w as i32)
1027 }
1028
1029 #[inline]
1031 #[must_use]
1032 pub fn as_uvec4(&self) -> crate::UVec4 {
1033 crate::UVec4::new(self.x as u32, self.y as u32, self.z as u32, self.w as u32)
1034 }
1035
1036 #[inline]
1038 #[must_use]
1039 pub fn as_i64vec4(&self) -> crate::I64Vec4 {
1040 crate::I64Vec4::new(self.x as i64, self.y as i64, self.z as i64, self.w as i64)
1041 }
1042
1043 #[inline]
1045 #[must_use]
1046 pub fn as_u64vec4(&self) -> crate::U64Vec4 {
1047 crate::U64Vec4::new(self.x as u64, self.y as u64, self.z as u64, self.w as u64)
1048 }
1049
1050 #[inline]
1052 #[must_use]
1053 pub fn as_usizevec4(&self) -> crate::USizeVec4 {
1054 crate::USizeVec4::new(
1055 self.x as usize,
1056 self.y as usize,
1057 self.z as usize,
1058 self.w as usize,
1059 )
1060 }
1061}
1062
1063impl Default for Vec4 {
1064 #[inline(always)]
1065 fn default() -> Self {
1066 Self::ZERO
1067 }
1068}
1069
1070impl PartialEq for Vec4 {
1071 #[inline]
1072 fn eq(&self, rhs: &Self) -> bool {
1073 self.cmpeq(*rhs).all()
1074 }
1075}
1076
1077impl Div for Vec4 {
1078 type Output = Self;
1079 #[inline]
1080 fn div(self, rhs: Self) -> Self {
1081 Self(unsafe { vdivq_f32(self.0, rhs.0) })
1082 }
1083}
1084
1085impl Div<&Self> for Vec4 {
1086 type Output = Self;
1087 #[inline]
1088 fn div(self, rhs: &Self) -> Self {
1089 self.div(*rhs)
1090 }
1091}
1092
1093impl Div<&Vec4> for &Vec4 {
1094 type Output = Vec4;
1095 #[inline]
1096 fn div(self, rhs: &Vec4) -> Vec4 {
1097 (*self).div(*rhs)
1098 }
1099}
1100
1101impl Div<Vec4> for &Vec4 {
1102 type Output = Vec4;
1103 #[inline]
1104 fn div(self, rhs: Vec4) -> Vec4 {
1105 (*self).div(rhs)
1106 }
1107}
1108
1109impl DivAssign for Vec4 {
1110 #[inline]
1111 fn div_assign(&mut self, rhs: Self) {
1112 self.0 = unsafe { vdivq_f32(self.0, rhs.0) };
1113 }
1114}
1115
1116impl DivAssign<&Self> for Vec4 {
1117 #[inline]
1118 fn div_assign(&mut self, rhs: &Self) {
1119 self.div_assign(*rhs);
1120 }
1121}
1122
1123impl Div<f32> for Vec4 {
1124 type Output = Self;
1125 #[inline]
1126 fn div(self, rhs: f32) -> Self {
1127 Self(unsafe { vdivq_f32(self.0, vld1q_dup_f32(&rhs)) })
1128 }
1129}
1130
1131impl Div<&f32> for Vec4 {
1132 type Output = Self;
1133 #[inline]
1134 fn div(self, rhs: &f32) -> Self {
1135 self.div(*rhs)
1136 }
1137}
1138
1139impl Div<&f32> for &Vec4 {
1140 type Output = Vec4;
1141 #[inline]
1142 fn div(self, rhs: &f32) -> Vec4 {
1143 (*self).div(*rhs)
1144 }
1145}
1146
1147impl Div<f32> for &Vec4 {
1148 type Output = Vec4;
1149 #[inline]
1150 fn div(self, rhs: f32) -> Vec4 {
1151 (*self).div(rhs)
1152 }
1153}
1154
1155impl DivAssign<f32> for Vec4 {
1156 #[inline]
1157 fn div_assign(&mut self, rhs: f32) {
1158 self.0 = unsafe { vdivq_f32(self.0, vld1q_dup_f32(&rhs)) };
1159 }
1160}
1161
1162impl DivAssign<&f32> for Vec4 {
1163 #[inline]
1164 fn div_assign(&mut self, rhs: &f32) {
1165 self.div_assign(*rhs);
1166 }
1167}
1168
1169impl Div<Vec4> for f32 {
1170 type Output = Vec4;
1171 #[inline]
1172 fn div(self, rhs: Vec4) -> Vec4 {
1173 Vec4(unsafe { vdivq_f32(vld1q_dup_f32(&self), rhs.0) })
1174 }
1175}
1176
1177impl Div<&Vec4> for f32 {
1178 type Output = Vec4;
1179 #[inline]
1180 fn div(self, rhs: &Vec4) -> Vec4 {
1181 self.div(*rhs)
1182 }
1183}
1184
1185impl Div<&Vec4> for &f32 {
1186 type Output = Vec4;
1187 #[inline]
1188 fn div(self, rhs: &Vec4) -> Vec4 {
1189 (*self).div(*rhs)
1190 }
1191}
1192
1193impl Div<Vec4> for &f32 {
1194 type Output = Vec4;
1195 #[inline]
1196 fn div(self, rhs: Vec4) -> Vec4 {
1197 (*self).div(rhs)
1198 }
1199}
1200
1201impl Mul for Vec4 {
1202 type Output = Self;
1203 #[inline]
1204 fn mul(self, rhs: Self) -> Self {
1205 Self(unsafe { vmulq_f32(self.0, rhs.0) })
1206 }
1207}
1208
1209impl Mul<&Self> for Vec4 {
1210 type Output = Self;
1211 #[inline]
1212 fn mul(self, rhs: &Self) -> Self {
1213 self.mul(*rhs)
1214 }
1215}
1216
1217impl Mul<&Vec4> for &Vec4 {
1218 type Output = Vec4;
1219 #[inline]
1220 fn mul(self, rhs: &Vec4) -> Vec4 {
1221 (*self).mul(*rhs)
1222 }
1223}
1224
1225impl Mul<Vec4> for &Vec4 {
1226 type Output = Vec4;
1227 #[inline]
1228 fn mul(self, rhs: Vec4) -> Vec4 {
1229 (*self).mul(rhs)
1230 }
1231}
1232
1233impl MulAssign for Vec4 {
1234 #[inline]
1235 fn mul_assign(&mut self, rhs: Self) {
1236 self.0 = unsafe { vmulq_f32(self.0, rhs.0) };
1237 }
1238}
1239
1240impl MulAssign<&Self> for Vec4 {
1241 #[inline]
1242 fn mul_assign(&mut self, rhs: &Self) {
1243 self.mul_assign(*rhs);
1244 }
1245}
1246
1247impl Mul<f32> for Vec4 {
1248 type Output = Self;
1249 #[inline]
1250 fn mul(self, rhs: f32) -> Self {
1251 Self(unsafe { vmulq_n_f32(self.0, rhs) })
1252 }
1253}
1254
1255impl Mul<&f32> for Vec4 {
1256 type Output = Self;
1257 #[inline]
1258 fn mul(self, rhs: &f32) -> Self {
1259 self.mul(*rhs)
1260 }
1261}
1262
1263impl Mul<&f32> for &Vec4 {
1264 type Output = Vec4;
1265 #[inline]
1266 fn mul(self, rhs: &f32) -> Vec4 {
1267 (*self).mul(*rhs)
1268 }
1269}
1270
1271impl Mul<f32> for &Vec4 {
1272 type Output = Vec4;
1273 #[inline]
1274 fn mul(self, rhs: f32) -> Vec4 {
1275 (*self).mul(rhs)
1276 }
1277}
1278
1279impl MulAssign<f32> for Vec4 {
1280 #[inline]
1281 fn mul_assign(&mut self, rhs: f32) {
1282 self.0 = unsafe { vmulq_n_f32(self.0, rhs) };
1283 }
1284}
1285
1286impl MulAssign<&f32> for Vec4 {
1287 #[inline]
1288 fn mul_assign(&mut self, rhs: &f32) {
1289 self.mul_assign(*rhs);
1290 }
1291}
1292
1293impl Mul<Vec4> for f32 {
1294 type Output = Vec4;
1295 #[inline]
1296 fn mul(self, rhs: Vec4) -> Vec4 {
1297 Vec4(unsafe { vmulq_n_f32(rhs.0, self) })
1298 }
1299}
1300
1301impl Mul<&Vec4> for f32 {
1302 type Output = Vec4;
1303 #[inline]
1304 fn mul(self, rhs: &Vec4) -> Vec4 {
1305 self.mul(*rhs)
1306 }
1307}
1308
1309impl Mul<&Vec4> for &f32 {
1310 type Output = Vec4;
1311 #[inline]
1312 fn mul(self, rhs: &Vec4) -> Vec4 {
1313 (*self).mul(*rhs)
1314 }
1315}
1316
1317impl Mul<Vec4> for &f32 {
1318 type Output = Vec4;
1319 #[inline]
1320 fn mul(self, rhs: Vec4) -> Vec4 {
1321 (*self).mul(rhs)
1322 }
1323}
1324
1325impl Add for Vec4 {
1326 type Output = Self;
1327 #[inline]
1328 fn add(self, rhs: Self) -> Self {
1329 Self(unsafe { vaddq_f32(self.0, rhs.0) })
1330 }
1331}
1332
1333impl Add<&Self> for Vec4 {
1334 type Output = Self;
1335 #[inline]
1336 fn add(self, rhs: &Self) -> Self {
1337 self.add(*rhs)
1338 }
1339}
1340
1341impl Add<&Vec4> for &Vec4 {
1342 type Output = Vec4;
1343 #[inline]
1344 fn add(self, rhs: &Vec4) -> Vec4 {
1345 (*self).add(*rhs)
1346 }
1347}
1348
1349impl Add<Vec4> for &Vec4 {
1350 type Output = Vec4;
1351 #[inline]
1352 fn add(self, rhs: Vec4) -> Vec4 {
1353 (*self).add(rhs)
1354 }
1355}
1356
1357impl AddAssign for Vec4 {
1358 #[inline]
1359 fn add_assign(&mut self, rhs: Self) {
1360 self.0 = unsafe { vaddq_f32(self.0, rhs.0) };
1361 }
1362}
1363
1364impl AddAssign<&Self> for Vec4 {
1365 #[inline]
1366 fn add_assign(&mut self, rhs: &Self) {
1367 self.add_assign(*rhs);
1368 }
1369}
1370
1371impl Add<f32> for Vec4 {
1372 type Output = Self;
1373 #[inline]
1374 fn add(self, rhs: f32) -> Self {
1375 Self(unsafe { vaddq_f32(self.0, vld1q_dup_f32(&rhs)) })
1376 }
1377}
1378
1379impl Add<&f32> for Vec4 {
1380 type Output = Self;
1381 #[inline]
1382 fn add(self, rhs: &f32) -> Self {
1383 self.add(*rhs)
1384 }
1385}
1386
1387impl Add<&f32> for &Vec4 {
1388 type Output = Vec4;
1389 #[inline]
1390 fn add(self, rhs: &f32) -> Vec4 {
1391 (*self).add(*rhs)
1392 }
1393}
1394
1395impl Add<f32> for &Vec4 {
1396 type Output = Vec4;
1397 #[inline]
1398 fn add(self, rhs: f32) -> Vec4 {
1399 (*self).add(rhs)
1400 }
1401}
1402
1403impl AddAssign<f32> for Vec4 {
1404 #[inline]
1405 fn add_assign(&mut self, rhs: f32) {
1406 self.0 = unsafe { vaddq_f32(self.0, vld1q_dup_f32(&rhs)) };
1407 }
1408}
1409
1410impl AddAssign<&f32> for Vec4 {
1411 #[inline]
1412 fn add_assign(&mut self, rhs: &f32) {
1413 self.add_assign(*rhs);
1414 }
1415}
1416
1417impl Add<Vec4> for f32 {
1418 type Output = Vec4;
1419 #[inline]
1420 fn add(self, rhs: Vec4) -> Vec4 {
1421 Vec4(unsafe { vaddq_f32(vld1q_dup_f32(&self), rhs.0) })
1422 }
1423}
1424
1425impl Add<&Vec4> for f32 {
1426 type Output = Vec4;
1427 #[inline]
1428 fn add(self, rhs: &Vec4) -> Vec4 {
1429 self.add(*rhs)
1430 }
1431}
1432
1433impl Add<&Vec4> for &f32 {
1434 type Output = Vec4;
1435 #[inline]
1436 fn add(self, rhs: &Vec4) -> Vec4 {
1437 (*self).add(*rhs)
1438 }
1439}
1440
1441impl Add<Vec4> for &f32 {
1442 type Output = Vec4;
1443 #[inline]
1444 fn add(self, rhs: Vec4) -> Vec4 {
1445 (*self).add(rhs)
1446 }
1447}
1448
1449impl Sub for Vec4 {
1450 type Output = Self;
1451 #[inline]
1452 fn sub(self, rhs: Self) -> Self {
1453 Self(unsafe { vsubq_f32(self.0, rhs.0) })
1454 }
1455}
1456
1457impl Sub<&Self> for Vec4 {
1458 type Output = Self;
1459 #[inline]
1460 fn sub(self, rhs: &Self) -> Self {
1461 self.sub(*rhs)
1462 }
1463}
1464
1465impl Sub<&Vec4> for &Vec4 {
1466 type Output = Vec4;
1467 #[inline]
1468 fn sub(self, rhs: &Vec4) -> Vec4 {
1469 (*self).sub(*rhs)
1470 }
1471}
1472
1473impl Sub<Vec4> for &Vec4 {
1474 type Output = Vec4;
1475 #[inline]
1476 fn sub(self, rhs: Vec4) -> Vec4 {
1477 (*self).sub(rhs)
1478 }
1479}
1480
1481impl SubAssign for Vec4 {
1482 #[inline]
1483 fn sub_assign(&mut self, rhs: Self) {
1484 self.0 = unsafe { vsubq_f32(self.0, rhs.0) };
1485 }
1486}
1487
1488impl SubAssign<&Self> for Vec4 {
1489 #[inline]
1490 fn sub_assign(&mut self, rhs: &Self) {
1491 self.sub_assign(*rhs);
1492 }
1493}
1494
1495impl Sub<f32> for Vec4 {
1496 type Output = Self;
1497 #[inline]
1498 fn sub(self, rhs: f32) -> Self {
1499 Self(unsafe { vsubq_f32(self.0, vld1q_dup_f32(&rhs)) })
1500 }
1501}
1502
1503impl Sub<&f32> for Vec4 {
1504 type Output = Self;
1505 #[inline]
1506 fn sub(self, rhs: &f32) -> Self {
1507 self.sub(*rhs)
1508 }
1509}
1510
1511impl Sub<&f32> for &Vec4 {
1512 type Output = Vec4;
1513 #[inline]
1514 fn sub(self, rhs: &f32) -> Vec4 {
1515 (*self).sub(*rhs)
1516 }
1517}
1518
1519impl Sub<f32> for &Vec4 {
1520 type Output = Vec4;
1521 #[inline]
1522 fn sub(self, rhs: f32) -> Vec4 {
1523 (*self).sub(rhs)
1524 }
1525}
1526
1527impl SubAssign<f32> for Vec4 {
1528 #[inline]
1529 fn sub_assign(&mut self, rhs: f32) {
1530 self.0 = unsafe { vsubq_f32(self.0, vld1q_dup_f32(&rhs)) };
1531 }
1532}
1533
1534impl SubAssign<&f32> for Vec4 {
1535 #[inline]
1536 fn sub_assign(&mut self, rhs: &f32) {
1537 self.sub_assign(*rhs);
1538 }
1539}
1540
1541impl Sub<Vec4> for f32 {
1542 type Output = Vec4;
1543 #[inline]
1544 fn sub(self, rhs: Vec4) -> Vec4 {
1545 Vec4(unsafe { vsubq_f32(vld1q_dup_f32(&self), rhs.0) })
1546 }
1547}
1548
1549impl Sub<&Vec4> for f32 {
1550 type Output = Vec4;
1551 #[inline]
1552 fn sub(self, rhs: &Vec4) -> Vec4 {
1553 self.sub(*rhs)
1554 }
1555}
1556
1557impl Sub<&Vec4> for &f32 {
1558 type Output = Vec4;
1559 #[inline]
1560 fn sub(self, rhs: &Vec4) -> Vec4 {
1561 (*self).sub(*rhs)
1562 }
1563}
1564
1565impl Sub<Vec4> for &f32 {
1566 type Output = Vec4;
1567 #[inline]
1568 fn sub(self, rhs: Vec4) -> Vec4 {
1569 (*self).sub(rhs)
1570 }
1571}
1572
1573impl Rem for Vec4 {
1574 type Output = Self;
1575 #[inline]
1576 fn rem(self, rhs: Self) -> Self {
1577 unsafe {
1578 let n = vrndmq_f32(vdivq_f32(self.0, rhs.0));
1579 Self(vsubq_f32(self.0, vmulq_f32(n, rhs.0)))
1580 }
1581 }
1582}
1583
1584impl Rem<&Self> for Vec4 {
1585 type Output = Self;
1586 #[inline]
1587 fn rem(self, rhs: &Self) -> Self {
1588 self.rem(*rhs)
1589 }
1590}
1591
1592impl Rem<&Vec4> for &Vec4 {
1593 type Output = Vec4;
1594 #[inline]
1595 fn rem(self, rhs: &Vec4) -> Vec4 {
1596 (*self).rem(*rhs)
1597 }
1598}
1599
1600impl Rem<Vec4> for &Vec4 {
1601 type Output = Vec4;
1602 #[inline]
1603 fn rem(self, rhs: Vec4) -> Vec4 {
1604 (*self).rem(rhs)
1605 }
1606}
1607
1608impl RemAssign for Vec4 {
1609 #[inline]
1610 fn rem_assign(&mut self, rhs: Self) {
1611 *self = self.rem(rhs);
1612 }
1613}
1614
1615impl RemAssign<&Self> for Vec4 {
1616 #[inline]
1617 fn rem_assign(&mut self, rhs: &Self) {
1618 self.rem_assign(*rhs);
1619 }
1620}
1621
1622impl Rem<f32> for Vec4 {
1623 type Output = Self;
1624 #[inline]
1625 fn rem(self, rhs: f32) -> Self {
1626 self.rem(Self::splat(rhs))
1627 }
1628}
1629
1630impl Rem<&f32> for Vec4 {
1631 type Output = Self;
1632 #[inline]
1633 fn rem(self, rhs: &f32) -> Self {
1634 self.rem(*rhs)
1635 }
1636}
1637
1638impl Rem<&f32> for &Vec4 {
1639 type Output = Vec4;
1640 #[inline]
1641 fn rem(self, rhs: &f32) -> Vec4 {
1642 (*self).rem(*rhs)
1643 }
1644}
1645
1646impl Rem<f32> for &Vec4 {
1647 type Output = Vec4;
1648 #[inline]
1649 fn rem(self, rhs: f32) -> Vec4 {
1650 (*self).rem(rhs)
1651 }
1652}
1653
1654impl RemAssign<f32> for Vec4 {
1655 #[inline]
1656 fn rem_assign(&mut self, rhs: f32) {
1657 *self = self.rem(Self::splat(rhs));
1658 }
1659}
1660
1661impl RemAssign<&f32> for Vec4 {
1662 #[inline]
1663 fn rem_assign(&mut self, rhs: &f32) {
1664 self.rem_assign(*rhs);
1665 }
1666}
1667
1668impl Rem<Vec4> for f32 {
1669 type Output = Vec4;
1670 #[inline]
1671 fn rem(self, rhs: Vec4) -> Vec4 {
1672 Vec4::splat(self).rem(rhs)
1673 }
1674}
1675
1676impl Rem<&Vec4> for f32 {
1677 type Output = Vec4;
1678 #[inline]
1679 fn rem(self, rhs: &Vec4) -> Vec4 {
1680 self.rem(*rhs)
1681 }
1682}
1683
1684impl Rem<&Vec4> for &f32 {
1685 type Output = Vec4;
1686 #[inline]
1687 fn rem(self, rhs: &Vec4) -> Vec4 {
1688 (*self).rem(*rhs)
1689 }
1690}
1691
1692impl Rem<Vec4> for &f32 {
1693 type Output = Vec4;
1694 #[inline]
1695 fn rem(self, rhs: Vec4) -> Vec4 {
1696 (*self).rem(rhs)
1697 }
1698}
1699
1700impl AsRef<[f32; 4]> for Vec4 {
1701 #[inline]
1702 fn as_ref(&self) -> &[f32; 4] {
1703 unsafe { &*(self as *const Self as *const [f32; 4]) }
1704 }
1705}
1706
1707impl AsMut<[f32; 4]> for Vec4 {
1708 #[inline]
1709 fn as_mut(&mut self) -> &mut [f32; 4] {
1710 unsafe { &mut *(self as *mut Self as *mut [f32; 4]) }
1711 }
1712}
1713
1714impl Sum for Vec4 {
1715 #[inline]
1716 fn sum<I>(iter: I) -> Self
1717 where
1718 I: Iterator<Item = Self>,
1719 {
1720 iter.fold(Self::ZERO, Self::add)
1721 }
1722}
1723
1724impl<'a> Sum<&'a Self> for Vec4 {
1725 #[inline]
1726 fn sum<I>(iter: I) -> Self
1727 where
1728 I: Iterator<Item = &'a Self>,
1729 {
1730 iter.fold(Self::ZERO, |a, &b| Self::add(a, b))
1731 }
1732}
1733
1734impl Product for Vec4 {
1735 #[inline]
1736 fn product<I>(iter: I) -> Self
1737 where
1738 I: Iterator<Item = Self>,
1739 {
1740 iter.fold(Self::ONE, Self::mul)
1741 }
1742}
1743
1744impl<'a> Product<&'a Self> for Vec4 {
1745 #[inline]
1746 fn product<I>(iter: I) -> Self
1747 where
1748 I: Iterator<Item = &'a Self>,
1749 {
1750 iter.fold(Self::ONE, |a, &b| Self::mul(a, b))
1751 }
1752}
1753
1754impl Neg for Vec4 {
1755 type Output = Self;
1756 #[inline]
1757 fn neg(self) -> Self {
1758 Self(unsafe { vnegq_f32(self.0) })
1759 }
1760}
1761
1762impl Neg for &Vec4 {
1763 type Output = Vec4;
1764 #[inline]
1765 fn neg(self) -> Vec4 {
1766 (*self).neg()
1767 }
1768}
1769
1770impl Index<usize> for Vec4 {
1771 type Output = f32;
1772 #[inline]
1773 fn index(&self, index: usize) -> &Self::Output {
1774 match index {
1775 0 => &self.x,
1776 1 => &self.y,
1777 2 => &self.z,
1778 3 => &self.w,
1779 _ => panic!("index out of bounds"),
1780 }
1781 }
1782}
1783
1784impl IndexMut<usize> for Vec4 {
1785 #[inline]
1786 fn index_mut(&mut self, index: usize) -> &mut Self::Output {
1787 match index {
1788 0 => &mut self.x,
1789 1 => &mut self.y,
1790 2 => &mut self.z,
1791 3 => &mut self.w,
1792 _ => panic!("index out of bounds"),
1793 }
1794 }
1795}
1796
1797impl fmt::Display for Vec4 {
1798 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1799 if let Some(p) = f.precision() {
1800 write!(
1801 f,
1802 "[{:.*}, {:.*}, {:.*}, {:.*}]",
1803 p, self.x, p, self.y, p, self.z, p, self.w
1804 )
1805 } else {
1806 write!(f, "[{}, {}, {}, {}]", self.x, self.y, self.z, self.w)
1807 }
1808 }
1809}
1810
1811impl fmt::Debug for Vec4 {
1812 fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
1813 fmt.debug_tuple(stringify!(Vec4))
1814 .field(&self.x)
1815 .field(&self.y)
1816 .field(&self.z)
1817 .field(&self.w)
1818 .finish()
1819 }
1820}
1821
1822impl From<Vec4> for float32x4_t {
1823 #[inline(always)]
1824 fn from(t: Vec4) -> Self {
1825 t.0
1826 }
1827}
1828
1829impl From<float32x4_t> for Vec4 {
1830 #[inline(always)]
1831 fn from(t: float32x4_t) -> Self {
1832 Self(t)
1833 }
1834}
1835
1836impl From<[f32; 4]> for Vec4 {
1837 #[inline]
1838 fn from(a: [f32; 4]) -> Self {
1839 Self(unsafe { vld1q_f32(a.as_ptr()) })
1840 }
1841}
1842
1843impl From<Vec4> for [f32; 4] {
1844 #[inline]
1845 fn from(v: Vec4) -> Self {
1846 use crate::align16::Align16;
1847 use core::mem::MaybeUninit;
1848 let mut out: MaybeUninit<Align16<Self>> = MaybeUninit::uninit();
1849 unsafe {
1850 vst1q_f32(out.as_mut_ptr().cast(), v.0);
1851 out.assume_init().0
1852 }
1853 }
1854}
1855
1856impl From<(f32, f32, f32, f32)> for Vec4 {
1857 #[inline]
1858 fn from(t: (f32, f32, f32, f32)) -> Self {
1859 Self::new(t.0, t.1, t.2, t.3)
1860 }
1861}
1862
1863impl From<Vec4> for (f32, f32, f32, f32) {
1864 #[inline]
1865 fn from(v: Vec4) -> Self {
1866 (v.x, v.y, v.z, v.w)
1867 }
1868}
1869
1870impl From<(Vec3A, f32)> for Vec4 {
1871 #[inline]
1872 fn from((v, w): (Vec3A, f32)) -> Self {
1873 v.extend(w)
1874 }
1875}
1876
1877impl From<(f32, Vec3A)> for Vec4 {
1878 #[inline]
1879 fn from((x, v): (f32, Vec3A)) -> Self {
1880 Self::new(x, v.x, v.y, v.z)
1881 }
1882}
1883
1884impl From<(Vec3, f32)> for Vec4 {
1885 #[inline]
1886 fn from((v, w): (Vec3, f32)) -> Self {
1887 Self::new(v.x, v.y, v.z, w)
1888 }
1889}
1890
1891impl From<(f32, Vec3)> for Vec4 {
1892 #[inline]
1893 fn from((x, v): (f32, Vec3)) -> Self {
1894 Self::new(x, v.x, v.y, v.z)
1895 }
1896}
1897
1898impl From<(Vec2, f32, f32)> for Vec4 {
1899 #[inline]
1900 fn from((v, z, w): (Vec2, f32, f32)) -> Self {
1901 Self::new(v.x, v.y, z, w)
1902 }
1903}
1904
1905impl From<(Vec2, Vec2)> for Vec4 {
1906 #[inline]
1907 fn from((v, u): (Vec2, Vec2)) -> Self {
1908 Self::new(v.x, v.y, u.x, u.y)
1909 }
1910}
1911
1912impl Deref for Vec4 {
1913 type Target = crate::deref::Vec4<f32>;
1914 #[inline]
1915 fn deref(&self) -> &Self::Target {
1916 unsafe { &*(self as *const Self).cast() }
1917 }
1918}
1919
1920impl DerefMut for Vec4 {
1921 #[inline]
1922 fn deref_mut(&mut self) -> &mut Self::Target {
1923 unsafe { &mut *(self as *mut Self).cast() }
1924 }
1925}
1926
1927impl From<BVec4> for Vec4 {
1928 #[inline]
1929 fn from(v: BVec4) -> Self {
1930 Self::new(
1931 f32::from(v.x),
1932 f32::from(v.y),
1933 f32::from(v.z),
1934 f32::from(v.w),
1935 )
1936 }
1937}
1938
1939#[cfg(not(feature = "scalar-math"))]
1940impl From<BVec4A> for Vec4 {
1941 #[inline]
1942 fn from(v: BVec4A) -> Self {
1943 let bool_array: [bool; 4] = v.into();
1944 Self::new(
1945 f32::from(bool_array[0]),
1946 f32::from(bool_array[1]),
1947 f32::from(bool_array[2]),
1948 f32::from(bool_array[3]),
1949 )
1950 }
1951}