1use crate::{f32::math, neon::*, BVec4, BVec4A, Vec2, Vec3, Vec3A};
4
5use core::fmt;
6use core::iter::{Product, Sum};
7use core::{f32, ops::*};
8
9use core::arch::aarch64::*;
10
11#[cfg(feature = "zerocopy")]
12use zerocopy_derive::*;
13
14#[repr(C)]
15union UnionCast {
16 a: [f32; 4],
17 v: Vec4,
18}
19
20#[inline(always)]
22#[must_use]
23pub const fn vec4(x: f32, y: f32, z: f32, w: f32) -> Vec4 {
24 Vec4::new(x, y, z, w)
25}
26
27#[derive(Clone, Copy)]
33#[cfg_attr(feature = "bytemuck", derive(bytemuck::Pod, bytemuck::Zeroable))]
34#[cfg_attr(
35 feature = "zerocopy",
36 derive(FromBytes, Immutable, IntoBytes, KnownLayout)
37)]
38#[repr(transparent)]
39pub struct Vec4(pub(crate) float32x4_t);
40
41impl Vec4 {
42 pub const ZERO: Self = Self::splat(0.0);
44
45 pub const ONE: Self = Self::splat(1.0);
47
48 pub const NEG_ONE: Self = Self::splat(-1.0);
50
51 pub const MIN: Self = Self::splat(f32::MIN);
53
54 pub const MAX: Self = Self::splat(f32::MAX);
56
57 pub const NAN: Self = Self::splat(f32::NAN);
59
60 pub const INFINITY: Self = Self::splat(f32::INFINITY);
62
63 pub const NEG_INFINITY: Self = Self::splat(f32::NEG_INFINITY);
65
66 pub const X: Self = Self::new(1.0, 0.0, 0.0, 0.0);
68
69 pub const Y: Self = Self::new(0.0, 1.0, 0.0, 0.0);
71
72 pub const Z: Self = Self::new(0.0, 0.0, 1.0, 0.0);
74
75 pub const W: Self = Self::new(0.0, 0.0, 0.0, 1.0);
77
78 pub const NEG_X: Self = Self::new(-1.0, 0.0, 0.0, 0.0);
80
81 pub const NEG_Y: Self = Self::new(0.0, -1.0, 0.0, 0.0);
83
84 pub const NEG_Z: Self = Self::new(0.0, 0.0, -1.0, 0.0);
86
87 pub const NEG_W: Self = Self::new(0.0, 0.0, 0.0, -1.0);
89
90 pub const AXES: [Self; 4] = [Self::X, Self::Y, Self::Z, Self::W];
92
93 pub const USES_CORE_SIMD: bool = false;
95 pub const USES_NEON: bool = true;
97 pub const USES_SCALAR_MATH: bool = false;
99 pub const USES_SSE2: bool = false;
101 pub const USES_WASM32_SIMD: bool = false;
103
104 #[inline(always)]
106 #[must_use]
107 pub const fn new(x: f32, y: f32, z: f32, w: f32) -> Self {
108 unsafe { UnionCast { a: [x, y, z, w] }.v }
109 }
110
111 #[inline]
113 #[must_use]
114 pub const fn splat(v: f32) -> Self {
115 unsafe { UnionCast { a: [v; 4] }.v }
116 }
117
118 #[inline]
120 #[must_use]
121 pub fn map<F>(self, f: F) -> Self
122 where
123 F: Fn(f32) -> f32,
124 {
125 Self::new(f(self.x), f(self.y), f(self.z), f(self.w))
126 }
127
128 #[inline]
134 #[must_use]
135 pub fn select(mask: BVec4A, if_true: Self, if_false: Self) -> Self {
136 Self(unsafe { vbslq_f32(mask.0, if_true.0, if_false.0) })
137 }
138
139 #[inline]
141 #[must_use]
142 pub const fn from_array(a: [f32; 4]) -> Self {
143 Self::new(a[0], a[1], a[2], a[3])
144 }
145
146 #[inline]
148 #[must_use]
149 pub const fn to_array(&self) -> [f32; 4] {
150 unsafe { *(self as *const Self as *const [f32; 4]) }
151 }
152
153 #[inline]
159 #[must_use]
160 pub const fn from_slice(slice: &[f32]) -> Self {
161 assert!(slice.len() >= 4);
162 Self::new(slice[0], slice[1], slice[2], slice[3])
163 }
164
165 #[inline]
171 pub fn write_to_slice(self, slice: &mut [f32]) {
172 assert!(slice.len() >= 4);
173 unsafe {
174 vst1q_f32(slice.as_mut_ptr(), self.0);
175 }
176 }
177
178 #[inline]
184 #[must_use]
185 pub fn truncate(self) -> Vec3 {
186 use crate::swizzles::Vec4Swizzles;
187 self.xyz()
188 }
189
190 #[inline]
198 #[must_use]
199 pub fn project(self) -> Vec3 {
200 Vec3::from_homogeneous(self)
201 }
202
203 #[inline]
205 #[must_use]
206 pub fn with_x(mut self, x: f32) -> Self {
207 self.x = x;
208 self
209 }
210
211 #[inline]
213 #[must_use]
214 pub fn with_y(mut self, y: f32) -> Self {
215 self.y = y;
216 self
217 }
218
219 #[inline]
221 #[must_use]
222 pub fn with_z(mut self, z: f32) -> Self {
223 self.z = z;
224 self
225 }
226
227 #[inline]
229 #[must_use]
230 pub fn with_w(mut self, w: f32) -> Self {
231 self.w = w;
232 self
233 }
234
235 #[inline]
237 #[must_use]
238 pub fn dot(self, rhs: Self) -> f32 {
239 unsafe { dot4(self.0, rhs.0) }
240 }
241
242 #[inline]
244 #[must_use]
245 pub fn dot_into_vec(self, rhs: Self) -> Self {
246 Self(unsafe { dot4_into_f32x4(self.0, rhs.0) })
247 }
248
249 #[inline]
256 #[must_use]
257 pub fn min(self, rhs: Self) -> Self {
258 Self(unsafe { vminq_f32(self.0, rhs.0) })
259 }
260
261 #[inline]
268 #[must_use]
269 pub fn max(self, rhs: Self) -> Self {
270 Self(unsafe { vmaxq_f32(self.0, rhs.0) })
271 }
272
273 #[inline]
284 #[must_use]
285 pub fn clamp(self, min: Self, max: Self) -> Self {
286 glam_assert!(min.cmple(max).all(), "clamp: expected min <= max");
287 self.max(min).min(max)
288 }
289
290 #[inline]
297 #[must_use]
298 pub fn min_element(self) -> f32 {
299 unsafe { vminnmvq_f32(self.0) }
300 }
301
302 #[inline]
309 #[must_use]
310 pub fn max_element(self) -> f32 {
311 unsafe { vmaxnmvq_f32(self.0) }
312 }
313
314 #[doc(alias = "argmin")]
316 #[inline]
317 #[must_use]
318 pub fn min_position(self) -> usize {
319 let mut min = self.x;
320 let mut index = 0;
321 if self.y < min {
322 min = self.y;
323 index = 1;
324 }
325 if self.z < min {
326 min = self.z;
327 index = 2;
328 }
329 if self.w < min {
330 index = 3;
331 }
332 index
333 }
334
335 #[doc(alias = "argmax")]
337 #[inline]
338 #[must_use]
339 pub fn max_position(self) -> usize {
340 let mut max = self.x;
341 let mut index = 0;
342 if self.y > max {
343 max = self.y;
344 index = 1;
345 }
346 if self.z > max {
347 max = self.z;
348 index = 2;
349 }
350 if self.w > max {
351 index = 3;
352 }
353 index
354 }
355
356 #[inline]
360 #[must_use]
361 pub fn element_sum(self) -> f32 {
362 unsafe { vaddvq_f32(self.0) }
363 }
364
365 #[inline]
369 #[must_use]
370 pub fn element_product(self) -> f32 {
371 unsafe {
372 let s = vmuls_laneq_f32(vgetq_lane_f32(self.0, 0), self.0, 1);
373 let s = vmuls_laneq_f32(s, self.0, 2);
374 vmuls_laneq_f32(s, self.0, 3)
375 }
376 }
377
378 #[inline]
384 #[must_use]
385 pub fn cmpeq(self, rhs: Self) -> BVec4A {
386 BVec4A(unsafe { vceqq_f32(self.0, rhs.0) })
387 }
388
389 #[inline]
395 #[must_use]
396 pub fn cmpne(self, rhs: Self) -> BVec4A {
397 BVec4A(unsafe { vmvnq_u32(vceqq_f32(self.0, rhs.0)) })
398 }
399
400 #[inline]
406 #[must_use]
407 pub fn cmpge(self, rhs: Self) -> BVec4A {
408 BVec4A(unsafe { vcgeq_f32(self.0, rhs.0) })
409 }
410
411 #[inline]
417 #[must_use]
418 pub fn cmpgt(self, rhs: Self) -> BVec4A {
419 BVec4A(unsafe { vcgtq_f32(self.0, rhs.0) })
420 }
421
422 #[inline]
428 #[must_use]
429 pub fn cmple(self, rhs: Self) -> BVec4A {
430 BVec4A(unsafe { vcleq_f32(self.0, rhs.0) })
431 }
432
433 #[inline]
439 #[must_use]
440 pub fn cmplt(self, rhs: Self) -> BVec4A {
441 BVec4A(unsafe { vcltq_f32(self.0, rhs.0) })
442 }
443
444 #[inline]
446 #[must_use]
447 pub fn abs(self) -> Self {
448 Self(unsafe { vabsq_f32(self.0) })
449 }
450
451 #[inline]
457 #[must_use]
458 pub fn signum(self) -> Self {
459 let result = Self(unsafe {
460 vreinterpretq_f32_u32(vorrq_u32(
461 vandq_u32(
462 vreinterpretq_u32_f32(self.0),
463 vreinterpretq_u32_f32(Self::NEG_ONE.0),
464 ),
465 vreinterpretq_u32_f32(Self::ONE.0),
466 ))
467 });
468 let mask = self.is_nan_mask();
469 Self::select(mask, self, result)
470 }
471
472 #[inline]
474 #[must_use]
475 pub fn copysign(self, rhs: Self) -> Self {
476 let mask = Self::splat(-0.0);
477 Self(unsafe {
478 vreinterpretq_f32_u32(vorrq_u32(
479 vandq_u32(vreinterpretq_u32_f32(rhs.0), vreinterpretq_u32_f32(mask.0)),
480 vandq_u32(
481 vreinterpretq_u32_f32(self.0),
482 vmvnq_u32(vreinterpretq_u32_f32(mask.0)),
483 ),
484 ))
485 })
486 }
487
488 #[inline]
496 #[must_use]
497 pub fn is_negative_bitmask(self) -> u32 {
498 unsafe {
499 let nmask = vreinterpretq_u32_f32(vdupq_n_f32(-0.0));
500 let m = vandq_u32(vreinterpretq_u32_f32(self.0), nmask);
501 let x = vgetq_lane_u32(m, 0) >> 31;
502 let y = vgetq_lane_u32(m, 1) >> 31;
503 let z = vgetq_lane_u32(m, 2) >> 31;
504
505 let w = vgetq_lane_u32(m, 3) >> 31;
506 x | y << 1 | z << 2 | w << 3
507 }
508 }
509
510 #[inline]
513 #[must_use]
514 pub fn is_finite(self) -> bool {
515 self.is_finite_mask().all()
516 }
517
518 #[inline]
522 #[must_use]
523 pub fn is_finite_mask(self) -> BVec4A {
524 BVec4A(unsafe { vcltq_f32(vabsq_f32(self.0), Self::INFINITY.0) })
525 }
526
527 #[inline]
529 #[must_use]
530 pub fn is_nan(self) -> bool {
531 self.is_nan_mask().any()
532 }
533
534 #[inline]
538 #[must_use]
539 pub fn is_nan_mask(self) -> BVec4A {
540 BVec4A(unsafe { vmvnq_u32(vceqq_f32(self.0, self.0)) })
541 }
542
543 #[doc(alias = "magnitude")]
545 #[inline]
546 #[must_use]
547 pub fn length(self) -> f32 {
548 math::sqrt(self.dot(self))
549 }
550
551 #[doc(alias = "magnitude2")]
555 #[inline]
556 #[must_use]
557 pub fn length_squared(self) -> f32 {
558 self.dot(self)
559 }
560
561 #[inline]
565 #[must_use]
566 pub fn length_recip(self) -> f32 {
567 self.length().recip()
568 }
569
570 #[inline]
572 #[must_use]
573 pub fn distance(self, rhs: Self) -> f32 {
574 (self - rhs).length()
575 }
576
577 #[inline]
579 #[must_use]
580 pub fn distance_squared(self, rhs: Self) -> f32 {
581 (self - rhs).length_squared()
582 }
583
584 #[inline]
586 #[must_use]
587 pub fn div_euclid(self, rhs: Self) -> Self {
588 Self::new(
589 math::div_euclid(self.x, rhs.x),
590 math::div_euclid(self.y, rhs.y),
591 math::div_euclid(self.z, rhs.z),
592 math::div_euclid(self.w, rhs.w),
593 )
594 }
595
596 #[inline]
600 #[must_use]
601 pub fn rem_euclid(self, rhs: Self) -> Self {
602 Self::new(
603 math::rem_euclid(self.x, rhs.x),
604 math::rem_euclid(self.y, rhs.y),
605 math::rem_euclid(self.z, rhs.z),
606 math::rem_euclid(self.w, rhs.w),
607 )
608 }
609
610 #[inline]
620 #[must_use]
621 pub fn normalize(self) -> Self {
622 #[allow(clippy::let_and_return)]
623 let normalized = self.mul(self.length_recip());
624 glam_assert!(normalized.is_finite());
625 normalized
626 }
627
628 #[inline]
635 #[must_use]
636 pub fn try_normalize(self) -> Option<Self> {
637 let rcp = self.length_recip();
638 if rcp.is_finite() && rcp > 0.0 {
639 Some(self * rcp)
640 } else {
641 None
642 }
643 }
644
645 #[inline]
653 #[must_use]
654 pub fn normalize_or(self, fallback: Self) -> Self {
655 let rcp = self.length_recip();
656 if rcp.is_finite() && rcp > 0.0 {
657 self * rcp
658 } else {
659 fallback
660 }
661 }
662
663 #[inline]
670 #[must_use]
671 pub fn normalize_or_zero(self) -> Self {
672 self.normalize_or(Self::ZERO)
673 }
674
675 #[inline]
679 #[must_use]
680 pub fn normalize_and_length(self) -> (Self, f32) {
681 let length = self.length();
682 let rcp = 1.0 / length;
683 if rcp.is_finite() && rcp > 0.0 {
684 (self * rcp, length)
685 } else {
686 (Self::X, 0.0)
687 }
688 }
689
690 #[inline]
694 #[must_use]
695 pub fn is_normalized(self) -> bool {
696 math::abs(self.length_squared() - 1.0) <= 2e-4
697 }
698
699 #[inline]
707 #[must_use]
708 pub fn project_onto(self, rhs: Self) -> Self {
709 let other_len_sq_rcp = rhs.dot(rhs).recip();
710 glam_assert!(other_len_sq_rcp.is_finite());
711 rhs * self.dot(rhs) * other_len_sq_rcp
712 }
713
714 #[doc(alias("plane"))]
725 #[inline]
726 #[must_use]
727 pub fn reject_from(self, rhs: Self) -> Self {
728 self - self.project_onto(rhs)
729 }
730
731 #[inline]
739 #[must_use]
740 pub fn project_onto_normalized(self, rhs: Self) -> Self {
741 glam_assert!(rhs.is_normalized());
742 rhs * self.dot(rhs)
743 }
744
745 #[doc(alias("plane"))]
756 #[inline]
757 #[must_use]
758 pub fn reject_from_normalized(self, rhs: Self) -> Self {
759 self - self.project_onto_normalized(rhs)
760 }
761
762 #[inline]
765 #[must_use]
766 pub fn round(self) -> Self {
767 Self(unsafe { vrndnq_f32(self.0) })
768 }
769
770 #[inline]
773 #[must_use]
774 pub fn floor(self) -> Self {
775 Self(unsafe { vrndmq_f32(self.0) })
776 }
777
778 #[inline]
781 #[must_use]
782 pub fn ceil(self) -> Self {
783 Self(unsafe { vrndpq_f32(self.0) })
784 }
785
786 #[inline]
789 #[must_use]
790 pub fn trunc(self) -> Self {
791 Self(unsafe { vrndq_f32(self.0) })
792 }
793
794 #[inline]
798 #[must_use]
799 pub fn step(self, rhs: Self) -> Self {
800 Self::select(rhs.cmplt(self), Self::ZERO, Self::ONE)
801 }
802
803 #[inline]
805 #[must_use]
806 pub fn saturate(self) -> Self {
807 self.clamp(Self::ZERO, Self::ONE)
808 }
809
810 #[inline]
817 #[must_use]
818 pub fn fract(self) -> Self {
819 self - self.trunc()
820 }
821
822 #[inline]
829 #[must_use]
830 pub fn fract_gl(self) -> Self {
831 self - self.floor()
832 }
833
834 #[inline]
837 #[must_use]
838 pub fn exp(self) -> Self {
839 Self::new(
840 math::exp(self.x),
841 math::exp(self.y),
842 math::exp(self.z),
843 math::exp(self.w),
844 )
845 }
846
847 #[inline]
849 #[must_use]
850 pub fn exp2(self) -> Self {
851 Self::new(
852 math::exp2(self.x),
853 math::exp2(self.y),
854 math::exp2(self.z),
855 math::exp2(self.w),
856 )
857 }
858
859 #[inline]
862 #[must_use]
863 pub fn ln(self) -> Self {
864 Self::new(
865 math::ln(self.x),
866 math::ln(self.y),
867 math::ln(self.z),
868 math::ln(self.w),
869 )
870 }
871
872 #[inline]
875 #[must_use]
876 pub fn log2(self) -> Self {
877 Self::new(
878 math::log2(self.x),
879 math::log2(self.y),
880 math::log2(self.z),
881 math::log2(self.w),
882 )
883 }
884
885 #[inline]
887 #[must_use]
888 pub fn powf(self, n: f32) -> Self {
889 Self::new(
890 math::powf(self.x, n),
891 math::powf(self.y, n),
892 math::powf(self.z, n),
893 math::powf(self.w, n),
894 )
895 }
896
897 #[inline]
900 #[must_use]
901 pub fn sqrt(self) -> Self {
902 Self::new(
903 math::sqrt(self.x),
904 math::sqrt(self.y),
905 math::sqrt(self.z),
906 math::sqrt(self.w),
907 )
908 }
909
910 #[inline]
912 #[must_use]
913 pub fn cos(self) -> Self {
914 Self::new(
915 math::cos(self.x),
916 math::cos(self.y),
917 math::cos(self.z),
918 math::cos(self.w),
919 )
920 }
921
922 #[inline]
924 #[must_use]
925 pub fn sin(self) -> Self {
926 Self::new(
927 math::sin(self.x),
928 math::sin(self.y),
929 math::sin(self.z),
930 math::sin(self.w),
931 )
932 }
933
934 #[inline]
936 #[must_use]
937 pub fn sin_cos(self) -> (Self, Self) {
938 let (sin_x, cos_x) = math::sin_cos(self.x);
939 let (sin_y, cos_y) = math::sin_cos(self.y);
940 let (sin_z, cos_z) = math::sin_cos(self.z);
941 let (sin_w, cos_w) = math::sin_cos(self.w);
942
943 (
944 Self::new(sin_x, sin_y, sin_z, sin_w),
945 Self::new(cos_x, cos_y, cos_z, cos_w),
946 )
947 }
948
949 #[inline]
951 #[must_use]
952 pub fn recip(self) -> Self {
953 Self(unsafe { vdivq_f32(Self::ONE.0, self.0) })
954 }
955
956 #[doc(alias = "mix")]
962 #[inline]
963 #[must_use]
964 pub fn lerp(self, rhs: Self, s: f32) -> Self {
965 self * (1.0 - s) + rhs * s
966 }
967
968 #[inline]
973 #[must_use]
974 pub fn move_towards(self, rhs: Self, d: f32) -> Self {
975 let a = rhs - self;
976 let len = a.length();
977 if len <= d || len <= 1e-4 {
978 return rhs;
979 }
980 self + a / len * d
981 }
982
983 #[inline]
989 pub fn midpoint(self, rhs: Self) -> Self {
990 (self + rhs) * 0.5
991 }
992
993 #[inline]
1003 #[must_use]
1004 pub fn abs_diff_eq(self, rhs: Self, max_abs_diff: f32) -> bool {
1005 self.sub(rhs).abs().cmple(Self::splat(max_abs_diff)).all()
1006 }
1007
1008 #[inline]
1014 #[must_use]
1015 pub fn clamp_length(self, min: f32, max: f32) -> Self {
1016 glam_assert!(0.0 <= min);
1017 glam_assert!(min <= max);
1018 let length_sq = self.length_squared();
1019 if length_sq < min * min {
1020 min * (self / math::sqrt(length_sq))
1021 } else if length_sq > max * max {
1022 max * (self / math::sqrt(length_sq))
1023 } else {
1024 self
1025 }
1026 }
1027
1028 #[inline]
1034 #[must_use]
1035 pub fn clamp_length_max(self, max: f32) -> Self {
1036 glam_assert!(0.0 <= max);
1037 let length_sq = self.length_squared();
1038 if length_sq > max * max {
1039 max * (self / math::sqrt(length_sq))
1040 } else {
1041 self
1042 }
1043 }
1044
1045 #[inline]
1051 #[must_use]
1052 pub fn clamp_length_min(self, min: f32) -> Self {
1053 glam_assert!(0.0 <= min);
1054 let length_sq = self.length_squared();
1055 if length_sq < min * min {
1056 min * (self / math::sqrt(length_sq))
1057 } else {
1058 self
1059 }
1060 }
1061
1062 #[inline]
1070 #[must_use]
1071 pub fn mul_add(self, a: Self, b: Self) -> Self {
1072 Self(unsafe { vfmaq_f32(b.0, self.0, a.0) })
1073 }
1074
1075 #[inline]
1084 #[must_use]
1085 pub fn reflect(self, normal: Self) -> Self {
1086 glam_assert!(normal.is_normalized());
1087 self - 2.0 * self.dot(normal) * normal
1088 }
1089
1090 #[inline]
1100 #[must_use]
1101 pub fn refract(self, normal: Self, eta: f32) -> Self {
1102 glam_assert!(self.is_normalized());
1103 glam_assert!(normal.is_normalized());
1104 let n_dot_i = normal.dot(self);
1105 let k = 1.0 - eta * eta * (1.0 - n_dot_i * n_dot_i);
1106 if k >= 0.0 {
1107 eta * self - (eta * n_dot_i + math::sqrt(k)) * normal
1108 } else {
1109 Self::ZERO
1110 }
1111 }
1112
1113 #[inline]
1115 #[must_use]
1116 pub fn as_dvec4(self) -> crate::DVec4 {
1117 crate::DVec4::new(self.x as f64, self.y as f64, self.z as f64, self.w as f64)
1118 }
1119
1120 #[inline]
1122 #[must_use]
1123 pub fn as_i8vec4(self) -> crate::I8Vec4 {
1124 crate::I8Vec4::new(self.x as i8, self.y as i8, self.z as i8, self.w as i8)
1125 }
1126
1127 #[inline]
1129 #[must_use]
1130 pub fn as_u8vec4(self) -> crate::U8Vec4 {
1131 crate::U8Vec4::new(self.x as u8, self.y as u8, self.z as u8, self.w as u8)
1132 }
1133
1134 #[inline]
1136 #[must_use]
1137 pub fn as_i16vec4(self) -> crate::I16Vec4 {
1138 crate::I16Vec4::new(self.x as i16, self.y as i16, self.z as i16, self.w as i16)
1139 }
1140
1141 #[inline]
1143 #[must_use]
1144 pub fn as_u16vec4(self) -> crate::U16Vec4 {
1145 crate::U16Vec4::new(self.x as u16, self.y as u16, self.z as u16, self.w as u16)
1146 }
1147
1148 #[inline]
1150 #[must_use]
1151 pub fn as_ivec4(self) -> crate::IVec4 {
1152 crate::IVec4::new(self.x as i32, self.y as i32, self.z as i32, self.w as i32)
1153 }
1154
1155 #[inline]
1157 #[must_use]
1158 pub fn as_uvec4(self) -> crate::UVec4 {
1159 crate::UVec4::new(self.x as u32, self.y as u32, self.z as u32, self.w as u32)
1160 }
1161
1162 #[inline]
1164 #[must_use]
1165 pub fn as_i64vec4(self) -> crate::I64Vec4 {
1166 crate::I64Vec4::new(self.x as i64, self.y as i64, self.z as i64, self.w as i64)
1167 }
1168
1169 #[inline]
1171 #[must_use]
1172 pub fn as_u64vec4(self) -> crate::U64Vec4 {
1173 crate::U64Vec4::new(self.x as u64, self.y as u64, self.z as u64, self.w as u64)
1174 }
1175
1176 #[inline]
1178 #[must_use]
1179 pub fn as_usizevec4(self) -> crate::USizeVec4 {
1180 crate::USizeVec4::new(
1181 self.x as usize,
1182 self.y as usize,
1183 self.z as usize,
1184 self.w as usize,
1185 )
1186 }
1187}
1188
1189impl Default for Vec4 {
1190 #[inline(always)]
1191 fn default() -> Self {
1192 Self::ZERO
1193 }
1194}
1195
1196impl PartialEq for Vec4 {
1197 #[inline]
1198 fn eq(&self, rhs: &Self) -> bool {
1199 self.cmpeq(*rhs).all()
1200 }
1201}
1202
1203impl Div for Vec4 {
1204 type Output = Self;
1205 #[inline]
1206 fn div(self, rhs: Self) -> Self {
1207 Self(unsafe { vdivq_f32(self.0, rhs.0) })
1208 }
1209}
1210
1211impl Div<&Self> for Vec4 {
1212 type Output = Self;
1213 #[inline]
1214 fn div(self, rhs: &Self) -> Self {
1215 self.div(*rhs)
1216 }
1217}
1218
1219impl Div<&Vec4> for &Vec4 {
1220 type Output = Vec4;
1221 #[inline]
1222 fn div(self, rhs: &Vec4) -> Vec4 {
1223 (*self).div(*rhs)
1224 }
1225}
1226
1227impl Div<Vec4> for &Vec4 {
1228 type Output = Vec4;
1229 #[inline]
1230 fn div(self, rhs: Vec4) -> Vec4 {
1231 (*self).div(rhs)
1232 }
1233}
1234
1235impl DivAssign for Vec4 {
1236 #[inline]
1237 fn div_assign(&mut self, rhs: Self) {
1238 self.0 = unsafe { vdivq_f32(self.0, rhs.0) };
1239 }
1240}
1241
1242impl DivAssign<&Self> for Vec4 {
1243 #[inline]
1244 fn div_assign(&mut self, rhs: &Self) {
1245 self.div_assign(*rhs);
1246 }
1247}
1248
1249impl Div<f32> for Vec4 {
1250 type Output = Self;
1251 #[inline]
1252 fn div(self, rhs: f32) -> Self {
1253 Self(unsafe { vdivq_f32(self.0, vld1q_dup_f32(&rhs)) })
1254 }
1255}
1256
1257impl Div<&f32> for Vec4 {
1258 type Output = Self;
1259 #[inline]
1260 fn div(self, rhs: &f32) -> Self {
1261 self.div(*rhs)
1262 }
1263}
1264
1265impl Div<&f32> for &Vec4 {
1266 type Output = Vec4;
1267 #[inline]
1268 fn div(self, rhs: &f32) -> Vec4 {
1269 (*self).div(*rhs)
1270 }
1271}
1272
1273impl Div<f32> for &Vec4 {
1274 type Output = Vec4;
1275 #[inline]
1276 fn div(self, rhs: f32) -> Vec4 {
1277 (*self).div(rhs)
1278 }
1279}
1280
1281impl DivAssign<f32> for Vec4 {
1282 #[inline]
1283 fn div_assign(&mut self, rhs: f32) {
1284 self.0 = unsafe { vdivq_f32(self.0, vld1q_dup_f32(&rhs)) };
1285 }
1286}
1287
1288impl DivAssign<&f32> for Vec4 {
1289 #[inline]
1290 fn div_assign(&mut self, rhs: &f32) {
1291 self.div_assign(*rhs);
1292 }
1293}
1294
1295impl Div<Vec4> for f32 {
1296 type Output = Vec4;
1297 #[inline]
1298 fn div(self, rhs: Vec4) -> Vec4 {
1299 Vec4(unsafe { vdivq_f32(vld1q_dup_f32(&self), rhs.0) })
1300 }
1301}
1302
1303impl Div<&Vec4> for f32 {
1304 type Output = Vec4;
1305 #[inline]
1306 fn div(self, rhs: &Vec4) -> Vec4 {
1307 self.div(*rhs)
1308 }
1309}
1310
1311impl Div<&Vec4> for &f32 {
1312 type Output = Vec4;
1313 #[inline]
1314 fn div(self, rhs: &Vec4) -> Vec4 {
1315 (*self).div(*rhs)
1316 }
1317}
1318
1319impl Div<Vec4> for &f32 {
1320 type Output = Vec4;
1321 #[inline]
1322 fn div(self, rhs: Vec4) -> Vec4 {
1323 (*self).div(rhs)
1324 }
1325}
1326
1327impl Mul for Vec4 {
1328 type Output = Self;
1329 #[inline]
1330 fn mul(self, rhs: Self) -> Self {
1331 Self(unsafe { vmulq_f32(self.0, rhs.0) })
1332 }
1333}
1334
1335impl Mul<&Self> for Vec4 {
1336 type Output = Self;
1337 #[inline]
1338 fn mul(self, rhs: &Self) -> Self {
1339 self.mul(*rhs)
1340 }
1341}
1342
1343impl Mul<&Vec4> for &Vec4 {
1344 type Output = Vec4;
1345 #[inline]
1346 fn mul(self, rhs: &Vec4) -> Vec4 {
1347 (*self).mul(*rhs)
1348 }
1349}
1350
1351impl Mul<Vec4> for &Vec4 {
1352 type Output = Vec4;
1353 #[inline]
1354 fn mul(self, rhs: Vec4) -> Vec4 {
1355 (*self).mul(rhs)
1356 }
1357}
1358
1359impl MulAssign for Vec4 {
1360 #[inline]
1361 fn mul_assign(&mut self, rhs: Self) {
1362 self.0 = unsafe { vmulq_f32(self.0, rhs.0) };
1363 }
1364}
1365
1366impl MulAssign<&Self> for Vec4 {
1367 #[inline]
1368 fn mul_assign(&mut self, rhs: &Self) {
1369 self.mul_assign(*rhs);
1370 }
1371}
1372
1373impl Mul<f32> for Vec4 {
1374 type Output = Self;
1375 #[inline]
1376 fn mul(self, rhs: f32) -> Self {
1377 Self(unsafe { vmulq_n_f32(self.0, rhs) })
1378 }
1379}
1380
1381impl Mul<&f32> for Vec4 {
1382 type Output = Self;
1383 #[inline]
1384 fn mul(self, rhs: &f32) -> Self {
1385 self.mul(*rhs)
1386 }
1387}
1388
1389impl Mul<&f32> for &Vec4 {
1390 type Output = Vec4;
1391 #[inline]
1392 fn mul(self, rhs: &f32) -> Vec4 {
1393 (*self).mul(*rhs)
1394 }
1395}
1396
1397impl Mul<f32> for &Vec4 {
1398 type Output = Vec4;
1399 #[inline]
1400 fn mul(self, rhs: f32) -> Vec4 {
1401 (*self).mul(rhs)
1402 }
1403}
1404
1405impl MulAssign<f32> for Vec4 {
1406 #[inline]
1407 fn mul_assign(&mut self, rhs: f32) {
1408 self.0 = unsafe { vmulq_n_f32(self.0, rhs) };
1409 }
1410}
1411
1412impl MulAssign<&f32> for Vec4 {
1413 #[inline]
1414 fn mul_assign(&mut self, rhs: &f32) {
1415 self.mul_assign(*rhs);
1416 }
1417}
1418
1419impl Mul<Vec4> for f32 {
1420 type Output = Vec4;
1421 #[inline]
1422 fn mul(self, rhs: Vec4) -> Vec4 {
1423 Vec4(unsafe { vmulq_n_f32(rhs.0, self) })
1424 }
1425}
1426
1427impl Mul<&Vec4> for f32 {
1428 type Output = Vec4;
1429 #[inline]
1430 fn mul(self, rhs: &Vec4) -> Vec4 {
1431 self.mul(*rhs)
1432 }
1433}
1434
1435impl Mul<&Vec4> for &f32 {
1436 type Output = Vec4;
1437 #[inline]
1438 fn mul(self, rhs: &Vec4) -> Vec4 {
1439 (*self).mul(*rhs)
1440 }
1441}
1442
1443impl Mul<Vec4> for &f32 {
1444 type Output = Vec4;
1445 #[inline]
1446 fn mul(self, rhs: Vec4) -> Vec4 {
1447 (*self).mul(rhs)
1448 }
1449}
1450
1451impl Add for Vec4 {
1452 type Output = Self;
1453 #[inline]
1454 fn add(self, rhs: Self) -> Self {
1455 Self(unsafe { vaddq_f32(self.0, rhs.0) })
1456 }
1457}
1458
1459impl Add<&Self> for Vec4 {
1460 type Output = Self;
1461 #[inline]
1462 fn add(self, rhs: &Self) -> Self {
1463 self.add(*rhs)
1464 }
1465}
1466
1467impl Add<&Vec4> for &Vec4 {
1468 type Output = Vec4;
1469 #[inline]
1470 fn add(self, rhs: &Vec4) -> Vec4 {
1471 (*self).add(*rhs)
1472 }
1473}
1474
1475impl Add<Vec4> for &Vec4 {
1476 type Output = Vec4;
1477 #[inline]
1478 fn add(self, rhs: Vec4) -> Vec4 {
1479 (*self).add(rhs)
1480 }
1481}
1482
1483impl AddAssign for Vec4 {
1484 #[inline]
1485 fn add_assign(&mut self, rhs: Self) {
1486 self.0 = unsafe { vaddq_f32(self.0, rhs.0) };
1487 }
1488}
1489
1490impl AddAssign<&Self> for Vec4 {
1491 #[inline]
1492 fn add_assign(&mut self, rhs: &Self) {
1493 self.add_assign(*rhs);
1494 }
1495}
1496
1497impl Add<f32> for Vec4 {
1498 type Output = Self;
1499 #[inline]
1500 fn add(self, rhs: f32) -> Self {
1501 Self(unsafe { vaddq_f32(self.0, vld1q_dup_f32(&rhs)) })
1502 }
1503}
1504
1505impl Add<&f32> for Vec4 {
1506 type Output = Self;
1507 #[inline]
1508 fn add(self, rhs: &f32) -> Self {
1509 self.add(*rhs)
1510 }
1511}
1512
1513impl Add<&f32> for &Vec4 {
1514 type Output = Vec4;
1515 #[inline]
1516 fn add(self, rhs: &f32) -> Vec4 {
1517 (*self).add(*rhs)
1518 }
1519}
1520
1521impl Add<f32> for &Vec4 {
1522 type Output = Vec4;
1523 #[inline]
1524 fn add(self, rhs: f32) -> Vec4 {
1525 (*self).add(rhs)
1526 }
1527}
1528
1529impl AddAssign<f32> for Vec4 {
1530 #[inline]
1531 fn add_assign(&mut self, rhs: f32) {
1532 self.0 = unsafe { vaddq_f32(self.0, vld1q_dup_f32(&rhs)) };
1533 }
1534}
1535
1536impl AddAssign<&f32> for Vec4 {
1537 #[inline]
1538 fn add_assign(&mut self, rhs: &f32) {
1539 self.add_assign(*rhs);
1540 }
1541}
1542
1543impl Add<Vec4> for f32 {
1544 type Output = Vec4;
1545 #[inline]
1546 fn add(self, rhs: Vec4) -> Vec4 {
1547 Vec4(unsafe { vaddq_f32(vld1q_dup_f32(&self), rhs.0) })
1548 }
1549}
1550
1551impl Add<&Vec4> for f32 {
1552 type Output = Vec4;
1553 #[inline]
1554 fn add(self, rhs: &Vec4) -> Vec4 {
1555 self.add(*rhs)
1556 }
1557}
1558
1559impl Add<&Vec4> for &f32 {
1560 type Output = Vec4;
1561 #[inline]
1562 fn add(self, rhs: &Vec4) -> Vec4 {
1563 (*self).add(*rhs)
1564 }
1565}
1566
1567impl Add<Vec4> for &f32 {
1568 type Output = Vec4;
1569 #[inline]
1570 fn add(self, rhs: Vec4) -> Vec4 {
1571 (*self).add(rhs)
1572 }
1573}
1574
1575impl Sub for Vec4 {
1576 type Output = Self;
1577 #[inline]
1578 fn sub(self, rhs: Self) -> Self {
1579 Self(unsafe { vsubq_f32(self.0, rhs.0) })
1580 }
1581}
1582
1583impl Sub<&Self> for Vec4 {
1584 type Output = Self;
1585 #[inline]
1586 fn sub(self, rhs: &Self) -> Self {
1587 self.sub(*rhs)
1588 }
1589}
1590
1591impl Sub<&Vec4> for &Vec4 {
1592 type Output = Vec4;
1593 #[inline]
1594 fn sub(self, rhs: &Vec4) -> Vec4 {
1595 (*self).sub(*rhs)
1596 }
1597}
1598
1599impl Sub<Vec4> for &Vec4 {
1600 type Output = Vec4;
1601 #[inline]
1602 fn sub(self, rhs: Vec4) -> Vec4 {
1603 (*self).sub(rhs)
1604 }
1605}
1606
1607impl SubAssign for Vec4 {
1608 #[inline]
1609 fn sub_assign(&mut self, rhs: Self) {
1610 self.0 = unsafe { vsubq_f32(self.0, rhs.0) };
1611 }
1612}
1613
1614impl SubAssign<&Self> for Vec4 {
1615 #[inline]
1616 fn sub_assign(&mut self, rhs: &Self) {
1617 self.sub_assign(*rhs);
1618 }
1619}
1620
1621impl Sub<f32> for Vec4 {
1622 type Output = Self;
1623 #[inline]
1624 fn sub(self, rhs: f32) -> Self {
1625 Self(unsafe { vsubq_f32(self.0, vld1q_dup_f32(&rhs)) })
1626 }
1627}
1628
1629impl Sub<&f32> for Vec4 {
1630 type Output = Self;
1631 #[inline]
1632 fn sub(self, rhs: &f32) -> Self {
1633 self.sub(*rhs)
1634 }
1635}
1636
1637impl Sub<&f32> for &Vec4 {
1638 type Output = Vec4;
1639 #[inline]
1640 fn sub(self, rhs: &f32) -> Vec4 {
1641 (*self).sub(*rhs)
1642 }
1643}
1644
1645impl Sub<f32> for &Vec4 {
1646 type Output = Vec4;
1647 #[inline]
1648 fn sub(self, rhs: f32) -> Vec4 {
1649 (*self).sub(rhs)
1650 }
1651}
1652
1653impl SubAssign<f32> for Vec4 {
1654 #[inline]
1655 fn sub_assign(&mut self, rhs: f32) {
1656 self.0 = unsafe { vsubq_f32(self.0, vld1q_dup_f32(&rhs)) };
1657 }
1658}
1659
1660impl SubAssign<&f32> for Vec4 {
1661 #[inline]
1662 fn sub_assign(&mut self, rhs: &f32) {
1663 self.sub_assign(*rhs);
1664 }
1665}
1666
1667impl Sub<Vec4> for f32 {
1668 type Output = Vec4;
1669 #[inline]
1670 fn sub(self, rhs: Vec4) -> Vec4 {
1671 Vec4(unsafe { vsubq_f32(vld1q_dup_f32(&self), rhs.0) })
1672 }
1673}
1674
1675impl Sub<&Vec4> for f32 {
1676 type Output = Vec4;
1677 #[inline]
1678 fn sub(self, rhs: &Vec4) -> Vec4 {
1679 self.sub(*rhs)
1680 }
1681}
1682
1683impl Sub<&Vec4> for &f32 {
1684 type Output = Vec4;
1685 #[inline]
1686 fn sub(self, rhs: &Vec4) -> Vec4 {
1687 (*self).sub(*rhs)
1688 }
1689}
1690
1691impl Sub<Vec4> for &f32 {
1692 type Output = Vec4;
1693 #[inline]
1694 fn sub(self, rhs: Vec4) -> Vec4 {
1695 (*self).sub(rhs)
1696 }
1697}
1698
1699impl Rem for Vec4 {
1700 type Output = Self;
1701 #[inline]
1702 fn rem(self, rhs: Self) -> Self {
1703 unsafe {
1704 let n = vrndmq_f32(vdivq_f32(self.0, rhs.0));
1705 Self(vsubq_f32(self.0, vmulq_f32(n, rhs.0)))
1706 }
1707 }
1708}
1709
1710impl Rem<&Self> for Vec4 {
1711 type Output = Self;
1712 #[inline]
1713 fn rem(self, rhs: &Self) -> Self {
1714 self.rem(*rhs)
1715 }
1716}
1717
1718impl Rem<&Vec4> for &Vec4 {
1719 type Output = Vec4;
1720 #[inline]
1721 fn rem(self, rhs: &Vec4) -> Vec4 {
1722 (*self).rem(*rhs)
1723 }
1724}
1725
1726impl Rem<Vec4> for &Vec4 {
1727 type Output = Vec4;
1728 #[inline]
1729 fn rem(self, rhs: Vec4) -> Vec4 {
1730 (*self).rem(rhs)
1731 }
1732}
1733
1734impl RemAssign for Vec4 {
1735 #[inline]
1736 fn rem_assign(&mut self, rhs: Self) {
1737 *self = self.rem(rhs);
1738 }
1739}
1740
1741impl RemAssign<&Self> for Vec4 {
1742 #[inline]
1743 fn rem_assign(&mut self, rhs: &Self) {
1744 self.rem_assign(*rhs);
1745 }
1746}
1747
1748impl Rem<f32> for Vec4 {
1749 type Output = Self;
1750 #[inline]
1751 fn rem(self, rhs: f32) -> Self {
1752 self.rem(Self::splat(rhs))
1753 }
1754}
1755
1756impl Rem<&f32> for Vec4 {
1757 type Output = Self;
1758 #[inline]
1759 fn rem(self, rhs: &f32) -> Self {
1760 self.rem(*rhs)
1761 }
1762}
1763
1764impl Rem<&f32> for &Vec4 {
1765 type Output = Vec4;
1766 #[inline]
1767 fn rem(self, rhs: &f32) -> Vec4 {
1768 (*self).rem(*rhs)
1769 }
1770}
1771
1772impl Rem<f32> for &Vec4 {
1773 type Output = Vec4;
1774 #[inline]
1775 fn rem(self, rhs: f32) -> Vec4 {
1776 (*self).rem(rhs)
1777 }
1778}
1779
1780impl RemAssign<f32> for Vec4 {
1781 #[inline]
1782 fn rem_assign(&mut self, rhs: f32) {
1783 *self = self.rem(Self::splat(rhs));
1784 }
1785}
1786
1787impl RemAssign<&f32> for Vec4 {
1788 #[inline]
1789 fn rem_assign(&mut self, rhs: &f32) {
1790 self.rem_assign(*rhs);
1791 }
1792}
1793
1794impl Rem<Vec4> for f32 {
1795 type Output = Vec4;
1796 #[inline]
1797 fn rem(self, rhs: Vec4) -> Vec4 {
1798 Vec4::splat(self).rem(rhs)
1799 }
1800}
1801
1802impl Rem<&Vec4> for f32 {
1803 type Output = Vec4;
1804 #[inline]
1805 fn rem(self, rhs: &Vec4) -> Vec4 {
1806 self.rem(*rhs)
1807 }
1808}
1809
1810impl Rem<&Vec4> for &f32 {
1811 type Output = Vec4;
1812 #[inline]
1813 fn rem(self, rhs: &Vec4) -> Vec4 {
1814 (*self).rem(*rhs)
1815 }
1816}
1817
1818impl Rem<Vec4> for &f32 {
1819 type Output = Vec4;
1820 #[inline]
1821 fn rem(self, rhs: Vec4) -> Vec4 {
1822 (*self).rem(rhs)
1823 }
1824}
1825
1826impl AsRef<[f32; 4]> for Vec4 {
1827 #[inline]
1828 fn as_ref(&self) -> &[f32; 4] {
1829 unsafe { &*(self as *const Self as *const [f32; 4]) }
1830 }
1831}
1832
1833impl AsMut<[f32; 4]> for Vec4 {
1834 #[inline]
1835 fn as_mut(&mut self) -> &mut [f32; 4] {
1836 unsafe { &mut *(self as *mut Self as *mut [f32; 4]) }
1837 }
1838}
1839
1840impl Sum for Vec4 {
1841 #[inline]
1842 fn sum<I>(iter: I) -> Self
1843 where
1844 I: Iterator<Item = Self>,
1845 {
1846 iter.fold(Self::ZERO, Self::add)
1847 }
1848}
1849
1850impl<'a> Sum<&'a Self> for Vec4 {
1851 #[inline]
1852 fn sum<I>(iter: I) -> Self
1853 where
1854 I: Iterator<Item = &'a Self>,
1855 {
1856 iter.fold(Self::ZERO, |a, &b| Self::add(a, b))
1857 }
1858}
1859
1860impl Product for Vec4 {
1861 #[inline]
1862 fn product<I>(iter: I) -> Self
1863 where
1864 I: Iterator<Item = Self>,
1865 {
1866 iter.fold(Self::ONE, Self::mul)
1867 }
1868}
1869
1870impl<'a> Product<&'a Self> for Vec4 {
1871 #[inline]
1872 fn product<I>(iter: I) -> Self
1873 where
1874 I: Iterator<Item = &'a Self>,
1875 {
1876 iter.fold(Self::ONE, |a, &b| Self::mul(a, b))
1877 }
1878}
1879
1880impl Neg for Vec4 {
1881 type Output = Self;
1882 #[inline]
1883 fn neg(self) -> Self {
1884 Self(unsafe { vnegq_f32(self.0) })
1885 }
1886}
1887
1888impl Neg for &Vec4 {
1889 type Output = Vec4;
1890 #[inline]
1891 fn neg(self) -> Vec4 {
1892 (*self).neg()
1893 }
1894}
1895
1896impl Index<usize> for Vec4 {
1897 type Output = f32;
1898 #[inline]
1899 fn index(&self, index: usize) -> &Self::Output {
1900 match index {
1901 0 => &self.x,
1902 1 => &self.y,
1903 2 => &self.z,
1904 3 => &self.w,
1905 _ => panic!("index out of bounds"),
1906 }
1907 }
1908}
1909
1910impl IndexMut<usize> for Vec4 {
1911 #[inline]
1912 fn index_mut(&mut self, index: usize) -> &mut Self::Output {
1913 match index {
1914 0 => &mut self.x,
1915 1 => &mut self.y,
1916 2 => &mut self.z,
1917 3 => &mut self.w,
1918 _ => panic!("index out of bounds"),
1919 }
1920 }
1921}
1922
1923impl fmt::Display for Vec4 {
1924 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1925 if let Some(p) = f.precision() {
1926 write!(
1927 f,
1928 "[{:.*}, {:.*}, {:.*}, {:.*}]",
1929 p, self.x, p, self.y, p, self.z, p, self.w
1930 )
1931 } else {
1932 write!(f, "[{}, {}, {}, {}]", self.x, self.y, self.z, self.w)
1933 }
1934 }
1935}
1936
1937impl fmt::Debug for Vec4 {
1938 fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
1939 fmt.debug_tuple(stringify!(Vec4))
1940 .field(&self.x)
1941 .field(&self.y)
1942 .field(&self.z)
1943 .field(&self.w)
1944 .finish()
1945 }
1946}
1947
1948impl From<Vec4> for float32x4_t {
1949 #[inline(always)]
1950 fn from(t: Vec4) -> Self {
1951 t.0
1952 }
1953}
1954
1955impl From<float32x4_t> for Vec4 {
1956 #[inline(always)]
1957 fn from(t: float32x4_t) -> Self {
1958 Self(t)
1959 }
1960}
1961
1962impl From<[f32; 4]> for Vec4 {
1963 #[inline]
1964 fn from(a: [f32; 4]) -> Self {
1965 Self(unsafe { vld1q_f32(a.as_ptr()) })
1966 }
1967}
1968
1969impl From<Vec4> for [f32; 4] {
1970 #[inline]
1971 fn from(v: Vec4) -> Self {
1972 use crate::align16::Align16;
1973 use core::mem::MaybeUninit;
1974 let mut out: MaybeUninit<Align16<Self>> = MaybeUninit::uninit();
1975 unsafe {
1976 vst1q_f32(out.as_mut_ptr().cast(), v.0);
1977 out.assume_init().0
1978 }
1979 }
1980}
1981
1982impl From<(f32, f32, f32, f32)> for Vec4 {
1983 #[inline]
1984 fn from(t: (f32, f32, f32, f32)) -> Self {
1985 Self::new(t.0, t.1, t.2, t.3)
1986 }
1987}
1988
1989impl From<Vec4> for (f32, f32, f32, f32) {
1990 #[inline]
1991 fn from(v: Vec4) -> Self {
1992 (v.x, v.y, v.z, v.w)
1993 }
1994}
1995
1996impl From<(Vec3A, f32)> for Vec4 {
1997 #[inline]
1998 fn from((v, w): (Vec3A, f32)) -> Self {
1999 v.extend(w)
2000 }
2001}
2002
2003impl From<(f32, Vec3A)> for Vec4 {
2004 #[inline]
2005 fn from((x, v): (f32, Vec3A)) -> Self {
2006 Self::new(x, v.x, v.y, v.z)
2007 }
2008}
2009
2010impl From<(Vec3, f32)> for Vec4 {
2011 #[inline]
2012 fn from((v, w): (Vec3, f32)) -> Self {
2013 Self::new(v.x, v.y, v.z, w)
2014 }
2015}
2016
2017impl From<(f32, Vec3)> for Vec4 {
2018 #[inline]
2019 fn from((x, v): (f32, Vec3)) -> Self {
2020 Self::new(x, v.x, v.y, v.z)
2021 }
2022}
2023
2024impl From<(Vec2, f32, f32)> for Vec4 {
2025 #[inline]
2026 fn from((v, z, w): (Vec2, f32, f32)) -> Self {
2027 Self::new(v.x, v.y, z, w)
2028 }
2029}
2030
2031impl From<(Vec2, Vec2)> for Vec4 {
2032 #[inline]
2033 fn from((v, u): (Vec2, Vec2)) -> Self {
2034 Self::new(v.x, v.y, u.x, u.y)
2035 }
2036}
2037
2038impl Deref for Vec4 {
2039 type Target = crate::deref::Vec4<f32>;
2040 #[inline]
2041 fn deref(&self) -> &Self::Target {
2042 unsafe { &*(self as *const Self).cast() }
2043 }
2044}
2045
2046impl DerefMut for Vec4 {
2047 #[inline]
2048 fn deref_mut(&mut self) -> &mut Self::Target {
2049 unsafe { &mut *(self as *mut Self).cast() }
2050 }
2051}
2052
2053impl From<BVec4> for Vec4 {
2054 #[inline]
2055 fn from(v: BVec4) -> Self {
2056 Self::new(
2057 f32::from(v.x),
2058 f32::from(v.y),
2059 f32::from(v.z),
2060 f32::from(v.w),
2061 )
2062 }
2063}
2064
2065#[cfg(not(feature = "scalar-math"))]
2066impl From<BVec4A> for Vec4 {
2067 #[inline]
2068 fn from(v: BVec4A) -> Self {
2069 let bool_array: [bool; 4] = v.into();
2070 Self::new(
2071 f32::from(bool_array[0]),
2072 f32::from(bool_array[1]),
2073 f32::from(bool_array[2]),
2074 f32::from(bool_array[3]),
2075 )
2076 }
2077}