1use crate::{f32::math, neon::*, BVec3, BVec3A, FloatExt, Quat, Vec2, Vec3, Vec4};
4
5use core::fmt;
6use core::iter::{Product, Sum};
7use core::{f32, ops::*};
8
9use core::arch::aarch64::*;
10
11#[repr(C)]
12union UnionCast {
13 a: [f32; 4],
14 v: Vec3A,
15}
16
17#[inline(always)]
19#[must_use]
20pub const fn vec3a(x: f32, y: f32, z: f32) -> Vec3A {
21 Vec3A::new(x, y, z)
22}
23
24#[derive(Clone, Copy)]
34#[cfg_attr(
35 all(feature = "bytemuck", not(target_arch = "spirv")),
36 derive(bytemuck::Pod, bytemuck::Zeroable)
37)]
38#[repr(transparent)]
39pub struct Vec3A(pub(crate) float32x4_t);
40
41impl Vec3A {
42 pub const ZERO: Self = Self::splat(0.0);
44
45 pub const ONE: Self = Self::splat(1.0);
47
48 pub const NEG_ONE: Self = Self::splat(-1.0);
50
51 pub const MIN: Self = Self::splat(f32::MIN);
53
54 pub const MAX: Self = Self::splat(f32::MAX);
56
57 pub const NAN: Self = Self::splat(f32::NAN);
59
60 pub const INFINITY: Self = Self::splat(f32::INFINITY);
62
63 pub const NEG_INFINITY: Self = Self::splat(f32::NEG_INFINITY);
65
66 pub const X: Self = Self::new(1.0, 0.0, 0.0);
68
69 pub const Y: Self = Self::new(0.0, 1.0, 0.0);
71
72 pub const Z: Self = Self::new(0.0, 0.0, 1.0);
74
75 pub const NEG_X: Self = Self::new(-1.0, 0.0, 0.0);
77
78 pub const NEG_Y: Self = Self::new(0.0, -1.0, 0.0);
80
81 pub const NEG_Z: Self = Self::new(0.0, 0.0, -1.0);
83
84 pub const AXES: [Self; 3] = [Self::X, Self::Y, Self::Z];
86
87 pub const USES_CORE_SIMD: bool = false;
89 pub const USES_NEON: bool = true;
91 pub const USES_SCALAR_MATH: bool = false;
93 pub const USES_SSE2: bool = false;
95 pub const USES_WASM32_SIMD: bool = false;
97
98 #[inline(always)]
100 #[must_use]
101 pub const fn new(x: f32, y: f32, z: f32) -> Self {
102 unsafe { UnionCast { a: [x, y, z, z] }.v }
103 }
104
105 #[inline]
107 #[must_use]
108 pub const fn splat(v: f32) -> Self {
109 unsafe { UnionCast { a: [v; 4] }.v }
110 }
111
112 #[inline]
114 #[must_use]
115 pub fn map<F>(self, f: F) -> Self
116 where
117 F: Fn(f32) -> f32,
118 {
119 Self::new(f(self.x), f(self.y), f(self.z))
120 }
121
122 #[inline]
128 #[must_use]
129 pub fn select(mask: BVec3A, if_true: Self, if_false: Self) -> Self {
130 Self(unsafe { vbslq_f32(mask.0, if_true.0, if_false.0) })
131 }
132
133 #[inline]
135 #[must_use]
136 pub const fn from_array(a: [f32; 3]) -> Self {
137 Self::new(a[0], a[1], a[2])
138 }
139
140 #[inline]
142 #[must_use]
143 pub const fn to_array(&self) -> [f32; 3] {
144 unsafe { *(self as *const Self as *const [f32; 3]) }
145 }
146
147 #[inline]
153 #[must_use]
154 pub const fn from_slice(slice: &[f32]) -> Self {
155 assert!(slice.len() >= 3);
156 Self::new(slice[0], slice[1], slice[2])
157 }
158
159 #[inline]
165 pub fn write_to_slice(self, slice: &mut [f32]) {
166 slice[..3].copy_from_slice(&self.to_array());
167 }
168
169 #[inline]
173 #[must_use]
174 pub fn from_vec4(v: Vec4) -> Self {
175 Self(v.0)
176 }
177
178 #[inline]
180 #[must_use]
181 pub fn extend(self, w: f32) -> Vec4 {
182 Vec4::new(self.x, self.y, self.z, w)
183 }
184
185 #[inline]
189 #[must_use]
190 pub fn truncate(self) -> Vec2 {
191 use crate::swizzles::Vec3Swizzles;
192 self.xy()
193 }
194
195 #[inline]
197 #[must_use]
198 pub fn to_vec3(self) -> Vec3 {
199 Vec3::from(self)
200 }
201
202 #[inline]
204 #[must_use]
205 pub fn with_x(mut self, x: f32) -> Self {
206 self.x = x;
207 self
208 }
209
210 #[inline]
212 #[must_use]
213 pub fn with_y(mut self, y: f32) -> Self {
214 self.y = y;
215 self
216 }
217
218 #[inline]
220 #[must_use]
221 pub fn with_z(mut self, z: f32) -> Self {
222 self.z = z;
223 self
224 }
225
226 #[inline]
228 #[must_use]
229 pub fn dot(self, rhs: Self) -> f32 {
230 (self.x * rhs.x) + (self.y * rhs.y) + (self.z * rhs.z)
232 }
233
234 #[inline]
236 #[must_use]
237 pub fn dot_into_vec(self, rhs: Self) -> Self {
238 Self(unsafe { dot3_into_f32x4(self.0, rhs.0) })
239 }
240
241 #[inline]
243 #[must_use]
244 pub fn cross(self, rhs: Self) -> Self {
245 unsafe {
246 let lhs = self.0;
248 let rhs = rhs.0;
249 let lhs_yzwx = vextq_f32(lhs, lhs, 1);
251 let rhs_wxyz = vextq_f32(rhs, rhs, 3);
252
253 let lhs_yzx = vsetq_lane_f32(vgetq_lane_f32(lhs, 0), lhs_yzwx, 2);
254 let rhs_zxy = vsetq_lane_f32(vgetq_lane_f32(rhs, 2), rhs_wxyz, 0);
255
256 let part_a = vmulq_f32(lhs_yzx, rhs_zxy);
258
259 let lhs_wxyz = vextq_f32(lhs, lhs, 3);
260 let rhs_yzwx = vextq_f32(rhs, rhs, 1);
261 let lhs_zxy = vsetq_lane_f32(vgetq_lane_f32(lhs, 2), lhs_wxyz, 0);
262 let rhs_yzx = vsetq_lane_f32(vgetq_lane_f32(rhs, 0), rhs_yzwx, 2);
263
264 let result = vmlsq_f32(part_a, lhs_zxy, rhs_yzx);
266 Self(result)
267 }
268 }
269
270 #[inline]
277 #[must_use]
278 pub fn min(self, rhs: Self) -> Self {
279 Self(unsafe { vminq_f32(self.0, rhs.0) })
280 }
281
282 #[inline]
289 #[must_use]
290 pub fn max(self, rhs: Self) -> Self {
291 Self(unsafe { vmaxq_f32(self.0, rhs.0) })
292 }
293
294 #[inline]
305 #[must_use]
306 pub fn clamp(self, min: Self, max: Self) -> Self {
307 glam_assert!(min.cmple(max).all(), "clamp: expected min <= max");
308 self.max(min).min(max)
309 }
310
311 #[inline]
318 #[must_use]
319 pub fn min_element(self) -> f32 {
320 self.x.min(self.y.min(self.z))
321 }
322
323 #[inline]
330 #[must_use]
331 pub fn max_element(self) -> f32 {
332 self.x.max(self.y.max(self.z))
333 }
334
335 #[doc(alias = "argmin")]
337 #[inline]
338 #[must_use]
339 pub fn min_position(self) -> usize {
340 let mut min = self.x;
341 let mut index = 0;
342 if self.y < min {
343 min = self.y;
344 index = 1;
345 }
346 if self.z < min {
347 index = 2;
348 }
349 index
350 }
351
352 #[doc(alias = "argmax")]
354 #[inline]
355 #[must_use]
356 pub fn max_position(self) -> usize {
357 let mut max = self.x;
358 let mut index = 0;
359 if self.y > max {
360 max = self.y;
361 index = 1;
362 }
363 if self.z > max {
364 index = 2;
365 }
366 index
367 }
368
369 #[inline]
373 #[must_use]
374 pub fn element_sum(self) -> f32 {
375 unsafe { vaddvq_f32(vsetq_lane_f32(0.0, self.0, 3)) }
376 }
377
378 #[inline]
382 #[must_use]
383 pub fn element_product(self) -> f32 {
384 unsafe {
385 let s = vmuls_laneq_f32(vgetq_lane_f32(self.0, 0), self.0, 1);
386 vmuls_laneq_f32(s, self.0, 2)
387 }
388 }
389
390 #[inline]
396 #[must_use]
397 pub fn cmpeq(self, rhs: Self) -> BVec3A {
398 BVec3A(unsafe { vceqq_f32(self.0, rhs.0) })
399 }
400
401 #[inline]
407 #[must_use]
408 pub fn cmpne(self, rhs: Self) -> BVec3A {
409 BVec3A(unsafe { vmvnq_u32(vceqq_f32(self.0, rhs.0)) })
410 }
411
412 #[inline]
418 #[must_use]
419 pub fn cmpge(self, rhs: Self) -> BVec3A {
420 BVec3A(unsafe { vcgeq_f32(self.0, rhs.0) })
421 }
422
423 #[inline]
429 #[must_use]
430 pub fn cmpgt(self, rhs: Self) -> BVec3A {
431 BVec3A(unsafe { vcgtq_f32(self.0, rhs.0) })
432 }
433
434 #[inline]
440 #[must_use]
441 pub fn cmple(self, rhs: Self) -> BVec3A {
442 BVec3A(unsafe { vcleq_f32(self.0, rhs.0) })
443 }
444
445 #[inline]
451 #[must_use]
452 pub fn cmplt(self, rhs: Self) -> BVec3A {
453 BVec3A(unsafe { vcltq_f32(self.0, rhs.0) })
454 }
455
456 #[inline]
458 #[must_use]
459 pub fn abs(self) -> Self {
460 Self(unsafe { vabsq_f32(self.0) })
461 }
462
463 #[inline]
469 #[must_use]
470 pub fn signum(self) -> Self {
471 let result = Self(unsafe {
472 vreinterpretq_f32_u32(vorrq_u32(
473 vandq_u32(
474 vreinterpretq_u32_f32(self.0),
475 vreinterpretq_u32_f32(Self::NEG_ONE.0),
476 ),
477 vreinterpretq_u32_f32(Self::ONE.0),
478 ))
479 });
480 let mask = self.is_nan_mask();
481 Self::select(mask, self, result)
482 }
483
484 #[inline]
486 #[must_use]
487 pub fn copysign(self, rhs: Self) -> Self {
488 let mask = Self::splat(-0.0);
489 Self(unsafe {
490 vreinterpretq_f32_u32(vorrq_u32(
491 vandq_u32(vreinterpretq_u32_f32(rhs.0), vreinterpretq_u32_f32(mask.0)),
492 vandq_u32(
493 vreinterpretq_u32_f32(self.0),
494 vmvnq_u32(vreinterpretq_u32_f32(mask.0)),
495 ),
496 ))
497 })
498 }
499
500 #[inline]
508 #[must_use]
509 pub fn is_negative_bitmask(self) -> u32 {
510 unsafe {
511 let nmask = vreinterpretq_u32_f32(vdupq_n_f32(-0.0));
512 let m = vandq_u32(vreinterpretq_u32_f32(self.0), nmask);
513 let x = vgetq_lane_u32(m, 0) >> 31;
514 let y = vgetq_lane_u32(m, 1) >> 31;
515 let z = vgetq_lane_u32(m, 2) >> 31;
516
517 x | y << 1 | z << 2
518 }
519 }
520
521 #[inline]
524 #[must_use]
525 pub fn is_finite(self) -> bool {
526 self.is_finite_mask().all()
527 }
528
529 #[inline]
533 #[must_use]
534 pub fn is_finite_mask(self) -> BVec3A {
535 BVec3A(unsafe { vcltq_f32(vabsq_f32(self.0), Self::INFINITY.0) })
536 }
537
538 #[inline]
540 #[must_use]
541 pub fn is_nan(self) -> bool {
542 self.is_nan_mask().any()
543 }
544
545 #[inline]
549 #[must_use]
550 pub fn is_nan_mask(self) -> BVec3A {
551 BVec3A(unsafe { vmvnq_u32(vceqq_f32(self.0, self.0)) })
552 }
553
554 #[doc(alias = "magnitude")]
556 #[inline]
557 #[must_use]
558 pub fn length(self) -> f32 {
559 math::sqrt(self.dot(self))
560 }
561
562 #[doc(alias = "magnitude2")]
566 #[inline]
567 #[must_use]
568 pub fn length_squared(self) -> f32 {
569 self.dot(self)
570 }
571
572 #[inline]
576 #[must_use]
577 pub fn length_recip(self) -> f32 {
578 self.length().recip()
579 }
580
581 #[inline]
583 #[must_use]
584 pub fn distance(self, rhs: Self) -> f32 {
585 (self - rhs).length()
586 }
587
588 #[inline]
590 #[must_use]
591 pub fn distance_squared(self, rhs: Self) -> f32 {
592 (self - rhs).length_squared()
593 }
594
595 #[inline]
597 #[must_use]
598 pub fn div_euclid(self, rhs: Self) -> Self {
599 Self::new(
600 math::div_euclid(self.x, rhs.x),
601 math::div_euclid(self.y, rhs.y),
602 math::div_euclid(self.z, rhs.z),
603 )
604 }
605
606 #[inline]
610 #[must_use]
611 pub fn rem_euclid(self, rhs: Self) -> Self {
612 Self::new(
613 math::rem_euclid(self.x, rhs.x),
614 math::rem_euclid(self.y, rhs.y),
615 math::rem_euclid(self.z, rhs.z),
616 )
617 }
618
619 #[inline]
629 #[must_use]
630 pub fn normalize(self) -> Self {
631 #[allow(clippy::let_and_return)]
632 let normalized = self.mul(self.length_recip());
633 glam_assert!(normalized.is_finite());
634 normalized
635 }
636
637 #[inline]
644 #[must_use]
645 pub fn try_normalize(self) -> Option<Self> {
646 let rcp = self.length_recip();
647 if rcp.is_finite() && rcp > 0.0 {
648 Some(self * rcp)
649 } else {
650 None
651 }
652 }
653
654 #[inline]
662 #[must_use]
663 pub fn normalize_or(self, fallback: Self) -> Self {
664 let rcp = self.length_recip();
665 if rcp.is_finite() && rcp > 0.0 {
666 self * rcp
667 } else {
668 fallback
669 }
670 }
671
672 #[inline]
679 #[must_use]
680 pub fn normalize_or_zero(self) -> Self {
681 self.normalize_or(Self::ZERO)
682 }
683
684 #[inline]
688 #[must_use]
689 pub fn normalize_and_length(self) -> (Self, f32) {
690 let length = self.length();
691 let rcp = 1.0 / length;
692 if rcp.is_finite() && rcp > 0.0 {
693 (self * rcp, length)
694 } else {
695 (Self::X, 0.0)
696 }
697 }
698
699 #[inline]
703 #[must_use]
704 pub fn is_normalized(self) -> bool {
705 math::abs(self.length_squared() - 1.0) <= 2e-4
706 }
707
708 #[inline]
716 #[must_use]
717 pub fn project_onto(self, rhs: Self) -> Self {
718 let other_len_sq_rcp = rhs.dot(rhs).recip();
719 glam_assert!(other_len_sq_rcp.is_finite());
720 rhs * self.dot(rhs) * other_len_sq_rcp
721 }
722
723 #[doc(alias("plane"))]
734 #[inline]
735 #[must_use]
736 pub fn reject_from(self, rhs: Self) -> Self {
737 self - self.project_onto(rhs)
738 }
739
740 #[inline]
748 #[must_use]
749 pub fn project_onto_normalized(self, rhs: Self) -> Self {
750 glam_assert!(rhs.is_normalized());
751 rhs * self.dot(rhs)
752 }
753
754 #[doc(alias("plane"))]
765 #[inline]
766 #[must_use]
767 pub fn reject_from_normalized(self, rhs: Self) -> Self {
768 self - self.project_onto_normalized(rhs)
769 }
770
771 #[inline]
774 #[must_use]
775 pub fn round(self) -> Self {
776 Self(unsafe { vrndnq_f32(self.0) })
777 }
778
779 #[inline]
782 #[must_use]
783 pub fn floor(self) -> Self {
784 Self(unsafe { vrndmq_f32(self.0) })
785 }
786
787 #[inline]
790 #[must_use]
791 pub fn ceil(self) -> Self {
792 Self(unsafe { vrndpq_f32(self.0) })
793 }
794
795 #[inline]
798 #[must_use]
799 pub fn trunc(self) -> Self {
800 Self(unsafe { vrndq_f32(self.0) })
801 }
802
803 #[inline]
810 #[must_use]
811 pub fn fract(self) -> Self {
812 self - self.trunc()
813 }
814
815 #[inline]
822 #[must_use]
823 pub fn fract_gl(self) -> Self {
824 self - self.floor()
825 }
826
827 #[inline]
830 #[must_use]
831 pub fn exp(self) -> Self {
832 Self::new(math::exp(self.x), math::exp(self.y), math::exp(self.z))
833 }
834
835 #[inline]
837 #[must_use]
838 pub fn powf(self, n: f32) -> Self {
839 Self::new(
840 math::powf(self.x, n),
841 math::powf(self.y, n),
842 math::powf(self.z, n),
843 )
844 }
845
846 #[inline]
848 #[must_use]
849 pub fn recip(self) -> Self {
850 Self(unsafe { vdivq_f32(Self::ONE.0, self.0) })
851 }
852
853 #[doc(alias = "mix")]
859 #[inline]
860 #[must_use]
861 pub fn lerp(self, rhs: Self, s: f32) -> Self {
862 self * (1.0 - s) + rhs * s
863 }
864
865 #[inline]
870 #[must_use]
871 pub fn move_towards(&self, rhs: Self, d: f32) -> Self {
872 let a = rhs - *self;
873 let len = a.length();
874 if len <= d || len <= 1e-4 {
875 return rhs;
876 }
877 *self + a / len * d
878 }
879
880 #[inline]
886 pub fn midpoint(self, rhs: Self) -> Self {
887 (self + rhs) * 0.5
888 }
889
890 #[inline]
900 #[must_use]
901 pub fn abs_diff_eq(self, rhs: Self, max_abs_diff: f32) -> bool {
902 self.sub(rhs).abs().cmple(Self::splat(max_abs_diff)).all()
903 }
904
905 #[inline]
911 #[must_use]
912 pub fn clamp_length(self, min: f32, max: f32) -> Self {
913 glam_assert!(0.0 <= min);
914 glam_assert!(min <= max);
915 let length_sq = self.length_squared();
916 if length_sq < min * min {
917 min * (self / math::sqrt(length_sq))
918 } else if length_sq > max * max {
919 max * (self / math::sqrt(length_sq))
920 } else {
921 self
922 }
923 }
924
925 #[inline]
931 #[must_use]
932 pub fn clamp_length_max(self, max: f32) -> Self {
933 glam_assert!(0.0 <= max);
934 let length_sq = self.length_squared();
935 if length_sq > max * max {
936 max * (self / math::sqrt(length_sq))
937 } else {
938 self
939 }
940 }
941
942 #[inline]
948 #[must_use]
949 pub fn clamp_length_min(self, min: f32) -> Self {
950 glam_assert!(0.0 <= min);
951 let length_sq = self.length_squared();
952 if length_sq < min * min {
953 min * (self / math::sqrt(length_sq))
954 } else {
955 self
956 }
957 }
958
959 #[inline]
967 #[must_use]
968 pub fn mul_add(self, a: Self, b: Self) -> Self {
969 Self(unsafe { vfmaq_f32(b.0, self.0, a.0) })
970 }
971
972 #[inline]
981 #[must_use]
982 pub fn reflect(self, normal: Self) -> Self {
983 glam_assert!(normal.is_normalized());
984 self - 2.0 * self.dot(normal) * normal
985 }
986
987 #[inline]
997 #[must_use]
998 pub fn refract(self, normal: Self, eta: f32) -> Self {
999 glam_assert!(self.is_normalized());
1000 glam_assert!(normal.is_normalized());
1001 let n_dot_i = normal.dot(self);
1002 let k = 1.0 - eta * eta * (1.0 - n_dot_i * n_dot_i);
1003 if k >= 0.0 {
1004 eta * self - (eta * n_dot_i + math::sqrt(k)) * normal
1005 } else {
1006 Self::ZERO
1007 }
1008 }
1009
1010 #[inline]
1014 #[must_use]
1015 pub fn angle_between(self, rhs: Self) -> f32 {
1016 math::acos_approx(
1017 self.dot(rhs)
1018 .div(math::sqrt(self.length_squared().mul(rhs.length_squared()))),
1019 )
1020 }
1021
1022 #[inline]
1028 #[must_use]
1029 pub fn rotate_towards(self, rhs: Self, max_angle: f32) -> Self {
1030 let angle_between = self.angle_between(rhs);
1031 let angle = max_angle.clamp(angle_between - core::f32::consts::PI, angle_between);
1033 let axis = self
1034 .cross(rhs)
1035 .try_normalize()
1036 .unwrap_or_else(|| self.any_orthogonal_vector().normalize());
1037 Quat::from_axis_angle(axis.into(), angle) * self
1038 }
1039
1040 #[inline]
1047 #[must_use]
1048 pub fn any_orthogonal_vector(&self) -> Self {
1049 if math::abs(self.x) > math::abs(self.y) {
1051 Self::new(-self.z, 0.0, self.x) } else {
1053 Self::new(0.0, self.z, -self.y) }
1055 }
1056
1057 #[inline]
1065 #[must_use]
1066 pub fn any_orthonormal_vector(&self) -> Self {
1067 glam_assert!(self.is_normalized());
1068 let sign = math::signum(self.z);
1070 let a = -1.0 / (sign + self.z);
1071 let b = self.x * self.y * a;
1072 Self::new(b, sign + self.y * self.y * a, -self.y)
1073 }
1074
1075 #[inline]
1082 #[must_use]
1083 pub fn any_orthonormal_pair(&self) -> (Self, Self) {
1084 glam_assert!(self.is_normalized());
1085 let sign = math::signum(self.z);
1087 let a = -1.0 / (sign + self.z);
1088 let b = self.x * self.y * a;
1089 (
1090 Self::new(1.0 + sign * self.x * self.x * a, sign * b, -sign * self.x),
1091 Self::new(b, sign + self.y * self.y * a, -self.y),
1092 )
1093 }
1094
1095 #[inline]
1101 #[must_use]
1102 pub fn slerp(self, rhs: Self, s: f32) -> Self {
1103 let self_length = self.length();
1104 let rhs_length = rhs.length();
1105 let dot = self.dot(rhs) / (self_length * rhs_length);
1107 if math::abs(dot) < 1.0 - 3e-7 {
1109 let theta = math::acos_approx(dot);
1111 let sin_theta = math::sin(theta);
1113 let t1 = math::sin(theta * (1. - s));
1114 let t2 = math::sin(theta * s);
1115
1116 let result_length = self_length.lerp(rhs_length, s);
1118 return (self * (result_length / self_length) * t1
1120 + rhs * (result_length / rhs_length) * t2)
1121 * sin_theta.recip();
1122 }
1123 if dot < 0.0 {
1124 let axis = self.any_orthogonal_vector().normalize().into();
1128 let rotation = Quat::from_axis_angle(axis, core::f32::consts::PI * s);
1129 let result_length = self_length.lerp(rhs_length, s);
1131 rotation * self * (result_length / self_length)
1132 } else {
1133 self.lerp(rhs, s)
1135 }
1136 }
1137
1138 #[inline]
1140 #[must_use]
1141 pub fn as_dvec3(&self) -> crate::DVec3 {
1142 crate::DVec3::new(self.x as f64, self.y as f64, self.z as f64)
1143 }
1144
1145 #[inline]
1147 #[must_use]
1148 pub fn as_i8vec3(&self) -> crate::I8Vec3 {
1149 crate::I8Vec3::new(self.x as i8, self.y as i8, self.z as i8)
1150 }
1151
1152 #[inline]
1154 #[must_use]
1155 pub fn as_u8vec3(&self) -> crate::U8Vec3 {
1156 crate::U8Vec3::new(self.x as u8, self.y as u8, self.z as u8)
1157 }
1158
1159 #[inline]
1161 #[must_use]
1162 pub fn as_i16vec3(&self) -> crate::I16Vec3 {
1163 crate::I16Vec3::new(self.x as i16, self.y as i16, self.z as i16)
1164 }
1165
1166 #[inline]
1168 #[must_use]
1169 pub fn as_u16vec3(&self) -> crate::U16Vec3 {
1170 crate::U16Vec3::new(self.x as u16, self.y as u16, self.z as u16)
1171 }
1172
1173 #[inline]
1175 #[must_use]
1176 pub fn as_ivec3(&self) -> crate::IVec3 {
1177 crate::IVec3::new(self.x as i32, self.y as i32, self.z as i32)
1178 }
1179
1180 #[inline]
1182 #[must_use]
1183 pub fn as_uvec3(&self) -> crate::UVec3 {
1184 crate::UVec3::new(self.x as u32, self.y as u32, self.z as u32)
1185 }
1186
1187 #[inline]
1189 #[must_use]
1190 pub fn as_i64vec3(&self) -> crate::I64Vec3 {
1191 crate::I64Vec3::new(self.x as i64, self.y as i64, self.z as i64)
1192 }
1193
1194 #[inline]
1196 #[must_use]
1197 pub fn as_u64vec3(&self) -> crate::U64Vec3 {
1198 crate::U64Vec3::new(self.x as u64, self.y as u64, self.z as u64)
1199 }
1200
1201 #[inline]
1203 #[must_use]
1204 pub fn as_usizevec3(&self) -> crate::USizeVec3 {
1205 crate::USizeVec3::new(self.x as usize, self.y as usize, self.z as usize)
1206 }
1207}
1208
1209impl Default for Vec3A {
1210 #[inline(always)]
1211 fn default() -> Self {
1212 Self::ZERO
1213 }
1214}
1215
1216impl PartialEq for Vec3A {
1217 #[inline]
1218 fn eq(&self, rhs: &Self) -> bool {
1219 self.cmpeq(*rhs).all()
1220 }
1221}
1222
1223impl Div for Vec3A {
1224 type Output = Self;
1225 #[inline]
1226 fn div(self, rhs: Self) -> Self {
1227 Self(unsafe { vdivq_f32(self.0, rhs.0) })
1228 }
1229}
1230
1231impl Div<&Self> for Vec3A {
1232 type Output = Self;
1233 #[inline]
1234 fn div(self, rhs: &Self) -> Self {
1235 self.div(*rhs)
1236 }
1237}
1238
1239impl Div<&Vec3A> for &Vec3A {
1240 type Output = Vec3A;
1241 #[inline]
1242 fn div(self, rhs: &Vec3A) -> Vec3A {
1243 (*self).div(*rhs)
1244 }
1245}
1246
1247impl Div<Vec3A> for &Vec3A {
1248 type Output = Vec3A;
1249 #[inline]
1250 fn div(self, rhs: Vec3A) -> Vec3A {
1251 (*self).div(rhs)
1252 }
1253}
1254
1255impl DivAssign for Vec3A {
1256 #[inline]
1257 fn div_assign(&mut self, rhs: Self) {
1258 self.0 = unsafe { vdivq_f32(self.0, rhs.0) };
1259 }
1260}
1261
1262impl DivAssign<&Self> for Vec3A {
1263 #[inline]
1264 fn div_assign(&mut self, rhs: &Self) {
1265 self.div_assign(*rhs);
1266 }
1267}
1268
1269impl Div<f32> for Vec3A {
1270 type Output = Self;
1271 #[inline]
1272 fn div(self, rhs: f32) -> Self {
1273 Self(unsafe { vdivq_f32(self.0, vld1q_dup_f32(&rhs)) })
1274 }
1275}
1276
1277impl Div<&f32> for Vec3A {
1278 type Output = Self;
1279 #[inline]
1280 fn div(self, rhs: &f32) -> Self {
1281 self.div(*rhs)
1282 }
1283}
1284
1285impl Div<&f32> for &Vec3A {
1286 type Output = Vec3A;
1287 #[inline]
1288 fn div(self, rhs: &f32) -> Vec3A {
1289 (*self).div(*rhs)
1290 }
1291}
1292
1293impl Div<f32> for &Vec3A {
1294 type Output = Vec3A;
1295 #[inline]
1296 fn div(self, rhs: f32) -> Vec3A {
1297 (*self).div(rhs)
1298 }
1299}
1300
1301impl DivAssign<f32> for Vec3A {
1302 #[inline]
1303 fn div_assign(&mut self, rhs: f32) {
1304 self.0 = unsafe { vdivq_f32(self.0, vld1q_dup_f32(&rhs)) };
1305 }
1306}
1307
1308impl DivAssign<&f32> for Vec3A {
1309 #[inline]
1310 fn div_assign(&mut self, rhs: &f32) {
1311 self.div_assign(*rhs);
1312 }
1313}
1314
1315impl Div<Vec3A> for f32 {
1316 type Output = Vec3A;
1317 #[inline]
1318 fn div(self, rhs: Vec3A) -> Vec3A {
1319 Vec3A(unsafe { vdivq_f32(vld1q_dup_f32(&self), rhs.0) })
1320 }
1321}
1322
1323impl Div<&Vec3A> for f32 {
1324 type Output = Vec3A;
1325 #[inline]
1326 fn div(self, rhs: &Vec3A) -> Vec3A {
1327 self.div(*rhs)
1328 }
1329}
1330
1331impl Div<&Vec3A> for &f32 {
1332 type Output = Vec3A;
1333 #[inline]
1334 fn div(self, rhs: &Vec3A) -> Vec3A {
1335 (*self).div(*rhs)
1336 }
1337}
1338
1339impl Div<Vec3A> for &f32 {
1340 type Output = Vec3A;
1341 #[inline]
1342 fn div(self, rhs: Vec3A) -> Vec3A {
1343 (*self).div(rhs)
1344 }
1345}
1346
1347impl Mul for Vec3A {
1348 type Output = Self;
1349 #[inline]
1350 fn mul(self, rhs: Self) -> Self {
1351 Self(unsafe { vmulq_f32(self.0, rhs.0) })
1352 }
1353}
1354
1355impl Mul<&Self> for Vec3A {
1356 type Output = Self;
1357 #[inline]
1358 fn mul(self, rhs: &Self) -> Self {
1359 self.mul(*rhs)
1360 }
1361}
1362
1363impl Mul<&Vec3A> for &Vec3A {
1364 type Output = Vec3A;
1365 #[inline]
1366 fn mul(self, rhs: &Vec3A) -> Vec3A {
1367 (*self).mul(*rhs)
1368 }
1369}
1370
1371impl Mul<Vec3A> for &Vec3A {
1372 type Output = Vec3A;
1373 #[inline]
1374 fn mul(self, rhs: Vec3A) -> Vec3A {
1375 (*self).mul(rhs)
1376 }
1377}
1378
1379impl MulAssign for Vec3A {
1380 #[inline]
1381 fn mul_assign(&mut self, rhs: Self) {
1382 self.0 = unsafe { vmulq_f32(self.0, rhs.0) };
1383 }
1384}
1385
1386impl MulAssign<&Self> for Vec3A {
1387 #[inline]
1388 fn mul_assign(&mut self, rhs: &Self) {
1389 self.mul_assign(*rhs);
1390 }
1391}
1392
1393impl Mul<f32> for Vec3A {
1394 type Output = Self;
1395 #[inline]
1396 fn mul(self, rhs: f32) -> Self {
1397 Self(unsafe { vmulq_n_f32(self.0, rhs) })
1398 }
1399}
1400
1401impl Mul<&f32> for Vec3A {
1402 type Output = Self;
1403 #[inline]
1404 fn mul(self, rhs: &f32) -> Self {
1405 self.mul(*rhs)
1406 }
1407}
1408
1409impl Mul<&f32> for &Vec3A {
1410 type Output = Vec3A;
1411 #[inline]
1412 fn mul(self, rhs: &f32) -> Vec3A {
1413 (*self).mul(*rhs)
1414 }
1415}
1416
1417impl Mul<f32> for &Vec3A {
1418 type Output = Vec3A;
1419 #[inline]
1420 fn mul(self, rhs: f32) -> Vec3A {
1421 (*self).mul(rhs)
1422 }
1423}
1424
1425impl MulAssign<f32> for Vec3A {
1426 #[inline]
1427 fn mul_assign(&mut self, rhs: f32) {
1428 self.0 = unsafe { vmulq_n_f32(self.0, rhs) };
1429 }
1430}
1431
1432impl MulAssign<&f32> for Vec3A {
1433 #[inline]
1434 fn mul_assign(&mut self, rhs: &f32) {
1435 self.mul_assign(*rhs);
1436 }
1437}
1438
1439impl Mul<Vec3A> for f32 {
1440 type Output = Vec3A;
1441 #[inline]
1442 fn mul(self, rhs: Vec3A) -> Vec3A {
1443 Vec3A(unsafe { vmulq_n_f32(rhs.0, self) })
1444 }
1445}
1446
1447impl Mul<&Vec3A> for f32 {
1448 type Output = Vec3A;
1449 #[inline]
1450 fn mul(self, rhs: &Vec3A) -> Vec3A {
1451 self.mul(*rhs)
1452 }
1453}
1454
1455impl Mul<&Vec3A> for &f32 {
1456 type Output = Vec3A;
1457 #[inline]
1458 fn mul(self, rhs: &Vec3A) -> Vec3A {
1459 (*self).mul(*rhs)
1460 }
1461}
1462
1463impl Mul<Vec3A> for &f32 {
1464 type Output = Vec3A;
1465 #[inline]
1466 fn mul(self, rhs: Vec3A) -> Vec3A {
1467 (*self).mul(rhs)
1468 }
1469}
1470
1471impl Add for Vec3A {
1472 type Output = Self;
1473 #[inline]
1474 fn add(self, rhs: Self) -> Self {
1475 Self(unsafe { vaddq_f32(self.0, rhs.0) })
1476 }
1477}
1478
1479impl Add<&Self> for Vec3A {
1480 type Output = Self;
1481 #[inline]
1482 fn add(self, rhs: &Self) -> Self {
1483 self.add(*rhs)
1484 }
1485}
1486
1487impl Add<&Vec3A> for &Vec3A {
1488 type Output = Vec3A;
1489 #[inline]
1490 fn add(self, rhs: &Vec3A) -> Vec3A {
1491 (*self).add(*rhs)
1492 }
1493}
1494
1495impl Add<Vec3A> for &Vec3A {
1496 type Output = Vec3A;
1497 #[inline]
1498 fn add(self, rhs: Vec3A) -> Vec3A {
1499 (*self).add(rhs)
1500 }
1501}
1502
1503impl AddAssign for Vec3A {
1504 #[inline]
1505 fn add_assign(&mut self, rhs: Self) {
1506 self.0 = unsafe { vaddq_f32(self.0, rhs.0) };
1507 }
1508}
1509
1510impl AddAssign<&Self> for Vec3A {
1511 #[inline]
1512 fn add_assign(&mut self, rhs: &Self) {
1513 self.add_assign(*rhs);
1514 }
1515}
1516
1517impl Add<f32> for Vec3A {
1518 type Output = Self;
1519 #[inline]
1520 fn add(self, rhs: f32) -> Self {
1521 Self(unsafe { vaddq_f32(self.0, vld1q_dup_f32(&rhs)) })
1522 }
1523}
1524
1525impl Add<&f32> for Vec3A {
1526 type Output = Self;
1527 #[inline]
1528 fn add(self, rhs: &f32) -> Self {
1529 self.add(*rhs)
1530 }
1531}
1532
1533impl Add<&f32> for &Vec3A {
1534 type Output = Vec3A;
1535 #[inline]
1536 fn add(self, rhs: &f32) -> Vec3A {
1537 (*self).add(*rhs)
1538 }
1539}
1540
1541impl Add<f32> for &Vec3A {
1542 type Output = Vec3A;
1543 #[inline]
1544 fn add(self, rhs: f32) -> Vec3A {
1545 (*self).add(rhs)
1546 }
1547}
1548
1549impl AddAssign<f32> for Vec3A {
1550 #[inline]
1551 fn add_assign(&mut self, rhs: f32) {
1552 self.0 = unsafe { vaddq_f32(self.0, vld1q_dup_f32(&rhs)) };
1553 }
1554}
1555
1556impl AddAssign<&f32> for Vec3A {
1557 #[inline]
1558 fn add_assign(&mut self, rhs: &f32) {
1559 self.add_assign(*rhs);
1560 }
1561}
1562
1563impl Add<Vec3A> for f32 {
1564 type Output = Vec3A;
1565 #[inline]
1566 fn add(self, rhs: Vec3A) -> Vec3A {
1567 Vec3A(unsafe { vaddq_f32(vld1q_dup_f32(&self), rhs.0) })
1568 }
1569}
1570
1571impl Add<&Vec3A> for f32 {
1572 type Output = Vec3A;
1573 #[inline]
1574 fn add(self, rhs: &Vec3A) -> Vec3A {
1575 self.add(*rhs)
1576 }
1577}
1578
1579impl Add<&Vec3A> for &f32 {
1580 type Output = Vec3A;
1581 #[inline]
1582 fn add(self, rhs: &Vec3A) -> Vec3A {
1583 (*self).add(*rhs)
1584 }
1585}
1586
1587impl Add<Vec3A> for &f32 {
1588 type Output = Vec3A;
1589 #[inline]
1590 fn add(self, rhs: Vec3A) -> Vec3A {
1591 (*self).add(rhs)
1592 }
1593}
1594
1595impl Sub for Vec3A {
1596 type Output = Self;
1597 #[inline]
1598 fn sub(self, rhs: Self) -> Self {
1599 Self(unsafe { vsubq_f32(self.0, rhs.0) })
1600 }
1601}
1602
1603impl Sub<&Self> for Vec3A {
1604 type Output = Self;
1605 #[inline]
1606 fn sub(self, rhs: &Self) -> Self {
1607 self.sub(*rhs)
1608 }
1609}
1610
1611impl Sub<&Vec3A> for &Vec3A {
1612 type Output = Vec3A;
1613 #[inline]
1614 fn sub(self, rhs: &Vec3A) -> Vec3A {
1615 (*self).sub(*rhs)
1616 }
1617}
1618
1619impl Sub<Vec3A> for &Vec3A {
1620 type Output = Vec3A;
1621 #[inline]
1622 fn sub(self, rhs: Vec3A) -> Vec3A {
1623 (*self).sub(rhs)
1624 }
1625}
1626
1627impl SubAssign for Vec3A {
1628 #[inline]
1629 fn sub_assign(&mut self, rhs: Self) {
1630 self.0 = unsafe { vsubq_f32(self.0, rhs.0) };
1631 }
1632}
1633
1634impl SubAssign<&Self> for Vec3A {
1635 #[inline]
1636 fn sub_assign(&mut self, rhs: &Self) {
1637 self.sub_assign(*rhs);
1638 }
1639}
1640
1641impl Sub<f32> for Vec3A {
1642 type Output = Self;
1643 #[inline]
1644 fn sub(self, rhs: f32) -> Self {
1645 Self(unsafe { vsubq_f32(self.0, vld1q_dup_f32(&rhs)) })
1646 }
1647}
1648
1649impl Sub<&f32> for Vec3A {
1650 type Output = Self;
1651 #[inline]
1652 fn sub(self, rhs: &f32) -> Self {
1653 self.sub(*rhs)
1654 }
1655}
1656
1657impl Sub<&f32> for &Vec3A {
1658 type Output = Vec3A;
1659 #[inline]
1660 fn sub(self, rhs: &f32) -> Vec3A {
1661 (*self).sub(*rhs)
1662 }
1663}
1664
1665impl Sub<f32> for &Vec3A {
1666 type Output = Vec3A;
1667 #[inline]
1668 fn sub(self, rhs: f32) -> Vec3A {
1669 (*self).sub(rhs)
1670 }
1671}
1672
1673impl SubAssign<f32> for Vec3A {
1674 #[inline]
1675 fn sub_assign(&mut self, rhs: f32) {
1676 self.0 = unsafe { vsubq_f32(self.0, vld1q_dup_f32(&rhs)) };
1677 }
1678}
1679
1680impl SubAssign<&f32> for Vec3A {
1681 #[inline]
1682 fn sub_assign(&mut self, rhs: &f32) {
1683 self.sub_assign(*rhs);
1684 }
1685}
1686
1687impl Sub<Vec3A> for f32 {
1688 type Output = Vec3A;
1689 #[inline]
1690 fn sub(self, rhs: Vec3A) -> Vec3A {
1691 Vec3A(unsafe { vsubq_f32(vld1q_dup_f32(&self), rhs.0) })
1692 }
1693}
1694
1695impl Sub<&Vec3A> for f32 {
1696 type Output = Vec3A;
1697 #[inline]
1698 fn sub(self, rhs: &Vec3A) -> Vec3A {
1699 self.sub(*rhs)
1700 }
1701}
1702
1703impl Sub<&Vec3A> for &f32 {
1704 type Output = Vec3A;
1705 #[inline]
1706 fn sub(self, rhs: &Vec3A) -> Vec3A {
1707 (*self).sub(*rhs)
1708 }
1709}
1710
1711impl Sub<Vec3A> for &f32 {
1712 type Output = Vec3A;
1713 #[inline]
1714 fn sub(self, rhs: Vec3A) -> Vec3A {
1715 (*self).sub(rhs)
1716 }
1717}
1718
1719impl Rem for Vec3A {
1720 type Output = Self;
1721 #[inline]
1722 fn rem(self, rhs: Self) -> Self {
1723 unsafe {
1724 let n = vrndmq_f32(vdivq_f32(self.0, rhs.0));
1725 Self(vsubq_f32(self.0, vmulq_f32(n, rhs.0)))
1726 }
1727 }
1728}
1729
1730impl Rem<&Self> for Vec3A {
1731 type Output = Self;
1732 #[inline]
1733 fn rem(self, rhs: &Self) -> Self {
1734 self.rem(*rhs)
1735 }
1736}
1737
1738impl Rem<&Vec3A> for &Vec3A {
1739 type Output = Vec3A;
1740 #[inline]
1741 fn rem(self, rhs: &Vec3A) -> Vec3A {
1742 (*self).rem(*rhs)
1743 }
1744}
1745
1746impl Rem<Vec3A> for &Vec3A {
1747 type Output = Vec3A;
1748 #[inline]
1749 fn rem(self, rhs: Vec3A) -> Vec3A {
1750 (*self).rem(rhs)
1751 }
1752}
1753
1754impl RemAssign for Vec3A {
1755 #[inline]
1756 fn rem_assign(&mut self, rhs: Self) {
1757 *self = self.rem(rhs);
1758 }
1759}
1760
1761impl RemAssign<&Self> for Vec3A {
1762 #[inline]
1763 fn rem_assign(&mut self, rhs: &Self) {
1764 self.rem_assign(*rhs);
1765 }
1766}
1767
1768impl Rem<f32> for Vec3A {
1769 type Output = Self;
1770 #[inline]
1771 fn rem(self, rhs: f32) -> Self {
1772 self.rem(Self::splat(rhs))
1773 }
1774}
1775
1776impl Rem<&f32> for Vec3A {
1777 type Output = Self;
1778 #[inline]
1779 fn rem(self, rhs: &f32) -> Self {
1780 self.rem(*rhs)
1781 }
1782}
1783
1784impl Rem<&f32> for &Vec3A {
1785 type Output = Vec3A;
1786 #[inline]
1787 fn rem(self, rhs: &f32) -> Vec3A {
1788 (*self).rem(*rhs)
1789 }
1790}
1791
1792impl Rem<f32> for &Vec3A {
1793 type Output = Vec3A;
1794 #[inline]
1795 fn rem(self, rhs: f32) -> Vec3A {
1796 (*self).rem(rhs)
1797 }
1798}
1799
1800impl RemAssign<f32> for Vec3A {
1801 #[inline]
1802 fn rem_assign(&mut self, rhs: f32) {
1803 *self = self.rem(Self::splat(rhs));
1804 }
1805}
1806
1807impl RemAssign<&f32> for Vec3A {
1808 #[inline]
1809 fn rem_assign(&mut self, rhs: &f32) {
1810 self.rem_assign(*rhs);
1811 }
1812}
1813
1814impl Rem<Vec3A> for f32 {
1815 type Output = Vec3A;
1816 #[inline]
1817 fn rem(self, rhs: Vec3A) -> Vec3A {
1818 Vec3A::splat(self).rem(rhs)
1819 }
1820}
1821
1822impl Rem<&Vec3A> for f32 {
1823 type Output = Vec3A;
1824 #[inline]
1825 fn rem(self, rhs: &Vec3A) -> Vec3A {
1826 self.rem(*rhs)
1827 }
1828}
1829
1830impl Rem<&Vec3A> for &f32 {
1831 type Output = Vec3A;
1832 #[inline]
1833 fn rem(self, rhs: &Vec3A) -> Vec3A {
1834 (*self).rem(*rhs)
1835 }
1836}
1837
1838impl Rem<Vec3A> for &f32 {
1839 type Output = Vec3A;
1840 #[inline]
1841 fn rem(self, rhs: Vec3A) -> Vec3A {
1842 (*self).rem(rhs)
1843 }
1844}
1845
1846#[cfg(not(target_arch = "spirv"))]
1847impl AsRef<[f32; 3]> for Vec3A {
1848 #[inline]
1849 fn as_ref(&self) -> &[f32; 3] {
1850 unsafe { &*(self as *const Self as *const [f32; 3]) }
1851 }
1852}
1853
1854#[cfg(not(target_arch = "spirv"))]
1855impl AsMut<[f32; 3]> for Vec3A {
1856 #[inline]
1857 fn as_mut(&mut self) -> &mut [f32; 3] {
1858 unsafe { &mut *(self as *mut Self as *mut [f32; 3]) }
1859 }
1860}
1861
1862impl Sum for Vec3A {
1863 #[inline]
1864 fn sum<I>(iter: I) -> Self
1865 where
1866 I: Iterator<Item = Self>,
1867 {
1868 iter.fold(Self::ZERO, Self::add)
1869 }
1870}
1871
1872impl<'a> Sum<&'a Self> for Vec3A {
1873 #[inline]
1874 fn sum<I>(iter: I) -> Self
1875 where
1876 I: Iterator<Item = &'a Self>,
1877 {
1878 iter.fold(Self::ZERO, |a, &b| Self::add(a, b))
1879 }
1880}
1881
1882impl Product for Vec3A {
1883 #[inline]
1884 fn product<I>(iter: I) -> Self
1885 where
1886 I: Iterator<Item = Self>,
1887 {
1888 iter.fold(Self::ONE, Self::mul)
1889 }
1890}
1891
1892impl<'a> Product<&'a Self> for Vec3A {
1893 #[inline]
1894 fn product<I>(iter: I) -> Self
1895 where
1896 I: Iterator<Item = &'a Self>,
1897 {
1898 iter.fold(Self::ONE, |a, &b| Self::mul(a, b))
1899 }
1900}
1901
1902impl Neg for Vec3A {
1903 type Output = Self;
1904 #[inline]
1905 fn neg(self) -> Self {
1906 Self(unsafe { vnegq_f32(self.0) })
1907 }
1908}
1909
1910impl Neg for &Vec3A {
1911 type Output = Vec3A;
1912 #[inline]
1913 fn neg(self) -> Vec3A {
1914 (*self).neg()
1915 }
1916}
1917
1918impl Index<usize> for Vec3A {
1919 type Output = f32;
1920 #[inline]
1921 fn index(&self, index: usize) -> &Self::Output {
1922 match index {
1923 0 => &self.x,
1924 1 => &self.y,
1925 2 => &self.z,
1926 _ => panic!("index out of bounds"),
1927 }
1928 }
1929}
1930
1931impl IndexMut<usize> for Vec3A {
1932 #[inline]
1933 fn index_mut(&mut self, index: usize) -> &mut Self::Output {
1934 match index {
1935 0 => &mut self.x,
1936 1 => &mut self.y,
1937 2 => &mut self.z,
1938 _ => panic!("index out of bounds"),
1939 }
1940 }
1941}
1942
1943impl fmt::Display for Vec3A {
1944 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1945 if let Some(p) = f.precision() {
1946 write!(f, "[{:.*}, {:.*}, {:.*}]", p, self.x, p, self.y, p, self.z)
1947 } else {
1948 write!(f, "[{}, {}, {}]", self.x, self.y, self.z)
1949 }
1950 }
1951}
1952
1953impl fmt::Debug for Vec3A {
1954 fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
1955 fmt.debug_tuple(stringify!(Vec3A))
1956 .field(&self.x)
1957 .field(&self.y)
1958 .field(&self.z)
1959 .finish()
1960 }
1961}
1962
1963impl From<Vec3A> for float32x4_t {
1964 #[inline(always)]
1965 fn from(t: Vec3A) -> Self {
1966 t.0
1967 }
1968}
1969
1970impl From<float32x4_t> for Vec3A {
1971 #[inline(always)]
1972 fn from(t: float32x4_t) -> Self {
1973 Self(t)
1974 }
1975}
1976
1977impl From<[f32; 3]> for Vec3A {
1978 #[inline]
1979 fn from(a: [f32; 3]) -> Self {
1980 Self::new(a[0], a[1], a[2])
1981 }
1982}
1983
1984impl From<Vec3A> for [f32; 3] {
1985 #[inline]
1986 fn from(v: Vec3A) -> Self {
1987 use crate::align16::Align16;
1988 use core::mem::MaybeUninit;
1989 let mut out: MaybeUninit<Align16<Self>> = MaybeUninit::uninit();
1990 unsafe {
1991 vst1q_f32(out.as_mut_ptr().cast(), v.0);
1992 out.assume_init().0
1993 }
1994 }
1995}
1996
1997impl From<(f32, f32, f32)> for Vec3A {
1998 #[inline]
1999 fn from(t: (f32, f32, f32)) -> Self {
2000 Self::new(t.0, t.1, t.2)
2001 }
2002}
2003
2004impl From<Vec3A> for (f32, f32, f32) {
2005 #[inline]
2006 fn from(v: Vec3A) -> Self {
2007 (v.x, v.y, v.z)
2008 }
2009}
2010
2011impl From<Vec3> for Vec3A {
2012 #[inline]
2013 fn from(v: Vec3) -> Self {
2014 Self::new(v.x, v.y, v.z)
2015 }
2016}
2017
2018impl From<Vec3A> for Vec3 {
2019 #[inline]
2020 fn from(v: Vec3A) -> Self {
2021 use crate::align16::Align16;
2022 use core::mem::MaybeUninit;
2023 let mut out: MaybeUninit<Align16<Self>> = MaybeUninit::uninit();
2024 unsafe {
2025 vst1q_f32(out.as_mut_ptr().cast(), v.0);
2026 out.assume_init().0
2027 }
2028 }
2029}
2030
2031impl From<(Vec2, f32)> for Vec3A {
2032 #[inline]
2033 fn from((v, z): (Vec2, f32)) -> Self {
2034 Self::new(v.x, v.y, z)
2035 }
2036}
2037
2038impl Deref for Vec3A {
2039 type Target = crate::deref::Vec3<f32>;
2040 #[inline]
2041 fn deref(&self) -> &Self::Target {
2042 unsafe { &*(self as *const Self).cast() }
2043 }
2044}
2045
2046impl DerefMut for Vec3A {
2047 #[inline]
2048 fn deref_mut(&mut self) -> &mut Self::Target {
2049 unsafe { &mut *(self as *mut Self).cast() }
2050 }
2051}
2052
2053impl From<BVec3> for Vec3A {
2054 #[inline]
2055 fn from(v: BVec3) -> Self {
2056 Self::new(f32::from(v.x), f32::from(v.y), f32::from(v.z))
2057 }
2058}
2059
2060impl From<BVec3A> for Vec3A {
2061 #[inline]
2062 fn from(v: BVec3A) -> Self {
2063 let bool_array: [bool; 3] = v.into();
2064 Self::new(
2065 f32::from(bool_array[0]),
2066 f32::from(bool_array[1]),
2067 f32::from(bool_array[2]),
2068 )
2069 }
2070}