1use crate::{f32::math, neon::*, BVec3, BVec3A, FloatExt, Quat, Vec2, Vec3, Vec4};
4
5use core::fmt;
6use core::iter::{Product, Sum};
7use core::{f32, ops::*};
8
9use core::arch::aarch64::*;
10
11#[repr(C)]
12union UnionCast {
13 a: [f32; 4],
14 v: Vec3A,
15}
16
17#[inline(always)]
19#[must_use]
20pub const fn vec3a(x: f32, y: f32, z: f32) -> Vec3A {
21 Vec3A::new(x, y, z)
22}
23
24#[derive(Clone, Copy)]
34#[cfg_attr(feature = "bytemuck", derive(bytemuck::Pod, bytemuck::Zeroable))]
35#[repr(transparent)]
36pub struct Vec3A(pub(crate) float32x4_t);
37
38impl Vec3A {
39 pub const ZERO: Self = Self::splat(0.0);
41
42 pub const ONE: Self = Self::splat(1.0);
44
45 pub const NEG_ONE: Self = Self::splat(-1.0);
47
48 pub const MIN: Self = Self::splat(f32::MIN);
50
51 pub const MAX: Self = Self::splat(f32::MAX);
53
54 pub const NAN: Self = Self::splat(f32::NAN);
56
57 pub const INFINITY: Self = Self::splat(f32::INFINITY);
59
60 pub const NEG_INFINITY: Self = Self::splat(f32::NEG_INFINITY);
62
63 pub const X: Self = Self::new(1.0, 0.0, 0.0);
65
66 pub const Y: Self = Self::new(0.0, 1.0, 0.0);
68
69 pub const Z: Self = Self::new(0.0, 0.0, 1.0);
71
72 pub const NEG_X: Self = Self::new(-1.0, 0.0, 0.0);
74
75 pub const NEG_Y: Self = Self::new(0.0, -1.0, 0.0);
77
78 pub const NEG_Z: Self = Self::new(0.0, 0.0, -1.0);
80
81 pub const AXES: [Self; 3] = [Self::X, Self::Y, Self::Z];
83
84 pub const USES_CORE_SIMD: bool = false;
86 pub const USES_NEON: bool = true;
88 pub const USES_SCALAR_MATH: bool = false;
90 pub const USES_SSE2: bool = false;
92 pub const USES_WASM32_SIMD: bool = false;
94
95 #[inline(always)]
97 #[must_use]
98 pub const fn new(x: f32, y: f32, z: f32) -> Self {
99 unsafe { UnionCast { a: [x, y, z, z] }.v }
100 }
101
102 #[inline]
104 #[must_use]
105 pub const fn splat(v: f32) -> Self {
106 unsafe { UnionCast { a: [v; 4] }.v }
107 }
108
109 #[inline]
111 #[must_use]
112 pub fn map<F>(self, f: F) -> Self
113 where
114 F: Fn(f32) -> f32,
115 {
116 Self::new(f(self.x), f(self.y), f(self.z))
117 }
118
119 #[inline]
125 #[must_use]
126 pub fn select(mask: BVec3A, if_true: Self, if_false: Self) -> Self {
127 Self(unsafe { vbslq_f32(mask.0, if_true.0, if_false.0) })
128 }
129
130 #[inline]
132 #[must_use]
133 pub const fn from_array(a: [f32; 3]) -> Self {
134 Self::new(a[0], a[1], a[2])
135 }
136
137 #[inline]
139 #[must_use]
140 pub const fn to_array(&self) -> [f32; 3] {
141 unsafe { *(self as *const Self as *const [f32; 3]) }
142 }
143
144 #[inline]
150 #[must_use]
151 pub const fn from_slice(slice: &[f32]) -> Self {
152 assert!(slice.len() >= 3);
153 Self::new(slice[0], slice[1], slice[2])
154 }
155
156 #[inline]
162 pub fn write_to_slice(self, slice: &mut [f32]) {
163 slice[..3].copy_from_slice(&self.to_array());
164 }
165
166 #[inline]
170 #[must_use]
171 pub fn from_vec4(v: Vec4) -> Self {
172 Self(v.0)
173 }
174
175 #[inline]
177 #[must_use]
178 pub fn extend(self, w: f32) -> Vec4 {
179 Vec4::new(self.x, self.y, self.z, w)
180 }
181
182 #[inline]
186 #[must_use]
187 pub fn truncate(self) -> Vec2 {
188 use crate::swizzles::Vec3Swizzles;
189 self.xy()
190 }
191
192 #[inline]
194 #[must_use]
195 pub fn to_vec3(self) -> Vec3 {
196 Vec3::from(self)
197 }
198
199 #[inline]
201 #[must_use]
202 pub fn with_x(mut self, x: f32) -> Self {
203 self.x = x;
204 self
205 }
206
207 #[inline]
209 #[must_use]
210 pub fn with_y(mut self, y: f32) -> Self {
211 self.y = y;
212 self
213 }
214
215 #[inline]
217 #[must_use]
218 pub fn with_z(mut self, z: f32) -> Self {
219 self.z = z;
220 self
221 }
222
223 #[inline]
225 #[must_use]
226 pub fn dot(self, rhs: Self) -> f32 {
227 (self.x * rhs.x) + (self.y * rhs.y) + (self.z * rhs.z)
229 }
230
231 #[inline]
233 #[must_use]
234 pub fn dot_into_vec(self, rhs: Self) -> Self {
235 Self(unsafe { dot3_into_f32x4(self.0, rhs.0) })
236 }
237
238 #[inline]
240 #[must_use]
241 pub fn cross(self, rhs: Self) -> Self {
242 unsafe {
243 let lhs = self.0;
245 let rhs = rhs.0;
246 let lhs_yzwx = vextq_f32(lhs, lhs, 1);
248 let rhs_wxyz = vextq_f32(rhs, rhs, 3);
249
250 let lhs_yzx = vsetq_lane_f32(vgetq_lane_f32(lhs, 0), lhs_yzwx, 2);
251 let rhs_zxy = vsetq_lane_f32(vgetq_lane_f32(rhs, 2), rhs_wxyz, 0);
252
253 let part_a = vmulq_f32(lhs_yzx, rhs_zxy);
255
256 let lhs_wxyz = vextq_f32(lhs, lhs, 3);
257 let rhs_yzwx = vextq_f32(rhs, rhs, 1);
258 let lhs_zxy = vsetq_lane_f32(vgetq_lane_f32(lhs, 2), lhs_wxyz, 0);
259 let rhs_yzx = vsetq_lane_f32(vgetq_lane_f32(rhs, 0), rhs_yzwx, 2);
260
261 let result = vmlsq_f32(part_a, lhs_zxy, rhs_yzx);
263 Self(result)
264 }
265 }
266
267 #[inline]
274 #[must_use]
275 pub fn min(self, rhs: Self) -> Self {
276 Self(unsafe { vminq_f32(self.0, rhs.0) })
277 }
278
279 #[inline]
286 #[must_use]
287 pub fn max(self, rhs: Self) -> Self {
288 Self(unsafe { vmaxq_f32(self.0, rhs.0) })
289 }
290
291 #[inline]
302 #[must_use]
303 pub fn clamp(self, min: Self, max: Self) -> Self {
304 glam_assert!(min.cmple(max).all(), "clamp: expected min <= max");
305 self.max(min).min(max)
306 }
307
308 #[inline]
315 #[must_use]
316 pub fn min_element(self) -> f32 {
317 self.x.min(self.y.min(self.z))
318 }
319
320 #[inline]
327 #[must_use]
328 pub fn max_element(self) -> f32 {
329 self.x.max(self.y.max(self.z))
330 }
331
332 #[doc(alias = "argmin")]
334 #[inline]
335 #[must_use]
336 pub fn min_position(self) -> usize {
337 let mut min = self.x;
338 let mut index = 0;
339 if self.y < min {
340 min = self.y;
341 index = 1;
342 }
343 if self.z < min {
344 index = 2;
345 }
346 index
347 }
348
349 #[doc(alias = "argmax")]
351 #[inline]
352 #[must_use]
353 pub fn max_position(self) -> usize {
354 let mut max = self.x;
355 let mut index = 0;
356 if self.y > max {
357 max = self.y;
358 index = 1;
359 }
360 if self.z > max {
361 index = 2;
362 }
363 index
364 }
365
366 #[inline]
370 #[must_use]
371 pub fn element_sum(self) -> f32 {
372 unsafe { vaddvq_f32(vsetq_lane_f32(0.0, self.0, 3)) }
373 }
374
375 #[inline]
379 #[must_use]
380 pub fn element_product(self) -> f32 {
381 unsafe {
382 let s = vmuls_laneq_f32(vgetq_lane_f32(self.0, 0), self.0, 1);
383 vmuls_laneq_f32(s, self.0, 2)
384 }
385 }
386
387 #[inline]
393 #[must_use]
394 pub fn cmpeq(self, rhs: Self) -> BVec3A {
395 BVec3A(unsafe { vceqq_f32(self.0, rhs.0) })
396 }
397
398 #[inline]
404 #[must_use]
405 pub fn cmpne(self, rhs: Self) -> BVec3A {
406 BVec3A(unsafe { vmvnq_u32(vceqq_f32(self.0, rhs.0)) })
407 }
408
409 #[inline]
415 #[must_use]
416 pub fn cmpge(self, rhs: Self) -> BVec3A {
417 BVec3A(unsafe { vcgeq_f32(self.0, rhs.0) })
418 }
419
420 #[inline]
426 #[must_use]
427 pub fn cmpgt(self, rhs: Self) -> BVec3A {
428 BVec3A(unsafe { vcgtq_f32(self.0, rhs.0) })
429 }
430
431 #[inline]
437 #[must_use]
438 pub fn cmple(self, rhs: Self) -> BVec3A {
439 BVec3A(unsafe { vcleq_f32(self.0, rhs.0) })
440 }
441
442 #[inline]
448 #[must_use]
449 pub fn cmplt(self, rhs: Self) -> BVec3A {
450 BVec3A(unsafe { vcltq_f32(self.0, rhs.0) })
451 }
452
453 #[inline]
455 #[must_use]
456 pub fn abs(self) -> Self {
457 Self(unsafe { vabsq_f32(self.0) })
458 }
459
460 #[inline]
466 #[must_use]
467 pub fn signum(self) -> Self {
468 let result = Self(unsafe {
469 vreinterpretq_f32_u32(vorrq_u32(
470 vandq_u32(
471 vreinterpretq_u32_f32(self.0),
472 vreinterpretq_u32_f32(Self::NEG_ONE.0),
473 ),
474 vreinterpretq_u32_f32(Self::ONE.0),
475 ))
476 });
477 let mask = self.is_nan_mask();
478 Self::select(mask, self, result)
479 }
480
481 #[inline]
483 #[must_use]
484 pub fn copysign(self, rhs: Self) -> Self {
485 let mask = Self::splat(-0.0);
486 Self(unsafe {
487 vreinterpretq_f32_u32(vorrq_u32(
488 vandq_u32(vreinterpretq_u32_f32(rhs.0), vreinterpretq_u32_f32(mask.0)),
489 vandq_u32(
490 vreinterpretq_u32_f32(self.0),
491 vmvnq_u32(vreinterpretq_u32_f32(mask.0)),
492 ),
493 ))
494 })
495 }
496
497 #[inline]
505 #[must_use]
506 pub fn is_negative_bitmask(self) -> u32 {
507 unsafe {
508 let nmask = vreinterpretq_u32_f32(vdupq_n_f32(-0.0));
509 let m = vandq_u32(vreinterpretq_u32_f32(self.0), nmask);
510 let x = vgetq_lane_u32(m, 0) >> 31;
511 let y = vgetq_lane_u32(m, 1) >> 31;
512 let z = vgetq_lane_u32(m, 2) >> 31;
513
514 x | y << 1 | z << 2
515 }
516 }
517
518 #[inline]
521 #[must_use]
522 pub fn is_finite(self) -> bool {
523 self.is_finite_mask().all()
524 }
525
526 #[inline]
530 #[must_use]
531 pub fn is_finite_mask(self) -> BVec3A {
532 BVec3A(unsafe { vcltq_f32(vabsq_f32(self.0), Self::INFINITY.0) })
533 }
534
535 #[inline]
537 #[must_use]
538 pub fn is_nan(self) -> bool {
539 self.is_nan_mask().any()
540 }
541
542 #[inline]
546 #[must_use]
547 pub fn is_nan_mask(self) -> BVec3A {
548 BVec3A(unsafe { vmvnq_u32(vceqq_f32(self.0, self.0)) })
549 }
550
551 #[doc(alias = "magnitude")]
553 #[inline]
554 #[must_use]
555 pub fn length(self) -> f32 {
556 math::sqrt(self.dot(self))
557 }
558
559 #[doc(alias = "magnitude2")]
563 #[inline]
564 #[must_use]
565 pub fn length_squared(self) -> f32 {
566 self.dot(self)
567 }
568
569 #[inline]
573 #[must_use]
574 pub fn length_recip(self) -> f32 {
575 self.length().recip()
576 }
577
578 #[inline]
580 #[must_use]
581 pub fn distance(self, rhs: Self) -> f32 {
582 (self - rhs).length()
583 }
584
585 #[inline]
587 #[must_use]
588 pub fn distance_squared(self, rhs: Self) -> f32 {
589 (self - rhs).length_squared()
590 }
591
592 #[inline]
594 #[must_use]
595 pub fn div_euclid(self, rhs: Self) -> Self {
596 Self::new(
597 math::div_euclid(self.x, rhs.x),
598 math::div_euclid(self.y, rhs.y),
599 math::div_euclid(self.z, rhs.z),
600 )
601 }
602
603 #[inline]
607 #[must_use]
608 pub fn rem_euclid(self, rhs: Self) -> Self {
609 Self::new(
610 math::rem_euclid(self.x, rhs.x),
611 math::rem_euclid(self.y, rhs.y),
612 math::rem_euclid(self.z, rhs.z),
613 )
614 }
615
616 #[inline]
626 #[must_use]
627 pub fn normalize(self) -> Self {
628 #[allow(clippy::let_and_return)]
629 let normalized = self.mul(self.length_recip());
630 glam_assert!(normalized.is_finite());
631 normalized
632 }
633
634 #[inline]
641 #[must_use]
642 pub fn try_normalize(self) -> Option<Self> {
643 let rcp = self.length_recip();
644 if rcp.is_finite() && rcp > 0.0 {
645 Some(self * rcp)
646 } else {
647 None
648 }
649 }
650
651 #[inline]
659 #[must_use]
660 pub fn normalize_or(self, fallback: Self) -> Self {
661 let rcp = self.length_recip();
662 if rcp.is_finite() && rcp > 0.0 {
663 self * rcp
664 } else {
665 fallback
666 }
667 }
668
669 #[inline]
676 #[must_use]
677 pub fn normalize_or_zero(self) -> Self {
678 self.normalize_or(Self::ZERO)
679 }
680
681 #[inline]
685 #[must_use]
686 pub fn normalize_and_length(self) -> (Self, f32) {
687 let length = self.length();
688 let rcp = 1.0 / length;
689 if rcp.is_finite() && rcp > 0.0 {
690 (self * rcp, length)
691 } else {
692 (Self::X, 0.0)
693 }
694 }
695
696 #[inline]
700 #[must_use]
701 pub fn is_normalized(self) -> bool {
702 math::abs(self.length_squared() - 1.0) <= 2e-4
703 }
704
705 #[inline]
713 #[must_use]
714 pub fn project_onto(self, rhs: Self) -> Self {
715 let other_len_sq_rcp = rhs.dot(rhs).recip();
716 glam_assert!(other_len_sq_rcp.is_finite());
717 rhs * self.dot(rhs) * other_len_sq_rcp
718 }
719
720 #[doc(alias("plane"))]
731 #[inline]
732 #[must_use]
733 pub fn reject_from(self, rhs: Self) -> Self {
734 self - self.project_onto(rhs)
735 }
736
737 #[inline]
745 #[must_use]
746 pub fn project_onto_normalized(self, rhs: Self) -> Self {
747 glam_assert!(rhs.is_normalized());
748 rhs * self.dot(rhs)
749 }
750
751 #[doc(alias("plane"))]
762 #[inline]
763 #[must_use]
764 pub fn reject_from_normalized(self, rhs: Self) -> Self {
765 self - self.project_onto_normalized(rhs)
766 }
767
768 #[inline]
771 #[must_use]
772 pub fn round(self) -> Self {
773 Self(unsafe { vrndnq_f32(self.0) })
774 }
775
776 #[inline]
779 #[must_use]
780 pub fn floor(self) -> Self {
781 Self(unsafe { vrndmq_f32(self.0) })
782 }
783
784 #[inline]
787 #[must_use]
788 pub fn ceil(self) -> Self {
789 Self(unsafe { vrndpq_f32(self.0) })
790 }
791
792 #[inline]
795 #[must_use]
796 pub fn trunc(self) -> Self {
797 Self(unsafe { vrndq_f32(self.0) })
798 }
799
800 #[inline]
807 #[must_use]
808 pub fn fract(self) -> Self {
809 self - self.trunc()
810 }
811
812 #[inline]
819 #[must_use]
820 pub fn fract_gl(self) -> Self {
821 self - self.floor()
822 }
823
824 #[inline]
827 #[must_use]
828 pub fn exp(self) -> Self {
829 Self::new(math::exp(self.x), math::exp(self.y), math::exp(self.z))
830 }
831
832 #[inline]
834 #[must_use]
835 pub fn powf(self, n: f32) -> Self {
836 Self::new(
837 math::powf(self.x, n),
838 math::powf(self.y, n),
839 math::powf(self.z, n),
840 )
841 }
842
843 #[inline]
845 #[must_use]
846 pub fn recip(self) -> Self {
847 Self(unsafe { vdivq_f32(Self::ONE.0, self.0) })
848 }
849
850 #[doc(alias = "mix")]
856 #[inline]
857 #[must_use]
858 pub fn lerp(self, rhs: Self, s: f32) -> Self {
859 self * (1.0 - s) + rhs * s
860 }
861
862 #[inline]
867 #[must_use]
868 pub fn move_towards(&self, rhs: Self, d: f32) -> Self {
869 let a = rhs - *self;
870 let len = a.length();
871 if len <= d || len <= 1e-4 {
872 return rhs;
873 }
874 *self + a / len * d
875 }
876
877 #[inline]
883 pub fn midpoint(self, rhs: Self) -> Self {
884 (self + rhs) * 0.5
885 }
886
887 #[inline]
897 #[must_use]
898 pub fn abs_diff_eq(self, rhs: Self, max_abs_diff: f32) -> bool {
899 self.sub(rhs).abs().cmple(Self::splat(max_abs_diff)).all()
900 }
901
902 #[inline]
908 #[must_use]
909 pub fn clamp_length(self, min: f32, max: f32) -> Self {
910 glam_assert!(0.0 <= min);
911 glam_assert!(min <= max);
912 let length_sq = self.length_squared();
913 if length_sq < min * min {
914 min * (self / math::sqrt(length_sq))
915 } else if length_sq > max * max {
916 max * (self / math::sqrt(length_sq))
917 } else {
918 self
919 }
920 }
921
922 #[inline]
928 #[must_use]
929 pub fn clamp_length_max(self, max: f32) -> Self {
930 glam_assert!(0.0 <= max);
931 let length_sq = self.length_squared();
932 if length_sq > max * max {
933 max * (self / math::sqrt(length_sq))
934 } else {
935 self
936 }
937 }
938
939 #[inline]
945 #[must_use]
946 pub fn clamp_length_min(self, min: f32) -> Self {
947 glam_assert!(0.0 <= min);
948 let length_sq = self.length_squared();
949 if length_sq < min * min {
950 min * (self / math::sqrt(length_sq))
951 } else {
952 self
953 }
954 }
955
956 #[inline]
964 #[must_use]
965 pub fn mul_add(self, a: Self, b: Self) -> Self {
966 Self(unsafe { vfmaq_f32(b.0, self.0, a.0) })
967 }
968
969 #[inline]
978 #[must_use]
979 pub fn reflect(self, normal: Self) -> Self {
980 glam_assert!(normal.is_normalized());
981 self - 2.0 * self.dot(normal) * normal
982 }
983
984 #[inline]
994 #[must_use]
995 pub fn refract(self, normal: Self, eta: f32) -> Self {
996 glam_assert!(self.is_normalized());
997 glam_assert!(normal.is_normalized());
998 let n_dot_i = normal.dot(self);
999 let k = 1.0 - eta * eta * (1.0 - n_dot_i * n_dot_i);
1000 if k >= 0.0 {
1001 eta * self - (eta * n_dot_i + math::sqrt(k)) * normal
1002 } else {
1003 Self::ZERO
1004 }
1005 }
1006
1007 #[inline]
1011 #[must_use]
1012 pub fn angle_between(self, rhs: Self) -> f32 {
1013 math::acos_approx(
1014 self.dot(rhs)
1015 .div(math::sqrt(self.length_squared().mul(rhs.length_squared()))),
1016 )
1017 }
1018
1019 #[inline]
1025 #[must_use]
1026 pub fn rotate_towards(self, rhs: Self, max_angle: f32) -> Self {
1027 let angle_between = self.angle_between(rhs);
1028 let angle = max_angle.clamp(angle_between - core::f32::consts::PI, angle_between);
1030 let axis = self
1031 .cross(rhs)
1032 .try_normalize()
1033 .unwrap_or_else(|| self.any_orthogonal_vector().normalize());
1034 Quat::from_axis_angle(axis.into(), angle) * self
1035 }
1036
1037 #[inline]
1044 #[must_use]
1045 pub fn any_orthogonal_vector(&self) -> Self {
1046 if math::abs(self.x) > math::abs(self.y) {
1048 Self::new(-self.z, 0.0, self.x) } else {
1050 Self::new(0.0, self.z, -self.y) }
1052 }
1053
1054 #[inline]
1062 #[must_use]
1063 pub fn any_orthonormal_vector(&self) -> Self {
1064 glam_assert!(self.is_normalized());
1065 let sign = math::signum(self.z);
1067 let a = -1.0 / (sign + self.z);
1068 let b = self.x * self.y * a;
1069 Self::new(b, sign + self.y * self.y * a, -self.y)
1070 }
1071
1072 #[inline]
1079 #[must_use]
1080 pub fn any_orthonormal_pair(&self) -> (Self, Self) {
1081 glam_assert!(self.is_normalized());
1082 let sign = math::signum(self.z);
1084 let a = -1.0 / (sign + self.z);
1085 let b = self.x * self.y * a;
1086 (
1087 Self::new(1.0 + sign * self.x * self.x * a, sign * b, -sign * self.x),
1088 Self::new(b, sign + self.y * self.y * a, -self.y),
1089 )
1090 }
1091
1092 #[inline]
1098 #[must_use]
1099 pub fn slerp(self, rhs: Self, s: f32) -> Self {
1100 let self_length = self.length();
1101 let rhs_length = rhs.length();
1102 let dot = self.dot(rhs) / (self_length * rhs_length);
1104 if math::abs(dot) < 1.0 - 3e-7 {
1106 let theta = math::acos_approx(dot);
1108 let sin_theta = math::sin(theta);
1110 let t1 = math::sin(theta * (1. - s));
1111 let t2 = math::sin(theta * s);
1112
1113 let result_length = self_length.lerp(rhs_length, s);
1115 return (self * (result_length / self_length) * t1
1117 + rhs * (result_length / rhs_length) * t2)
1118 * sin_theta.recip();
1119 }
1120 if dot < 0.0 {
1121 let axis = self.any_orthogonal_vector().normalize().into();
1125 let rotation = Quat::from_axis_angle(axis, core::f32::consts::PI * s);
1126 let result_length = self_length.lerp(rhs_length, s);
1128 rotation * self * (result_length / self_length)
1129 } else {
1130 self.lerp(rhs, s)
1132 }
1133 }
1134
1135 #[inline]
1137 #[must_use]
1138 pub fn as_dvec3(&self) -> crate::DVec3 {
1139 crate::DVec3::new(self.x as f64, self.y as f64, self.z as f64)
1140 }
1141
1142 #[inline]
1144 #[must_use]
1145 pub fn as_i8vec3(&self) -> crate::I8Vec3 {
1146 crate::I8Vec3::new(self.x as i8, self.y as i8, self.z as i8)
1147 }
1148
1149 #[inline]
1151 #[must_use]
1152 pub fn as_u8vec3(&self) -> crate::U8Vec3 {
1153 crate::U8Vec3::new(self.x as u8, self.y as u8, self.z as u8)
1154 }
1155
1156 #[inline]
1158 #[must_use]
1159 pub fn as_i16vec3(&self) -> crate::I16Vec3 {
1160 crate::I16Vec3::new(self.x as i16, self.y as i16, self.z as i16)
1161 }
1162
1163 #[inline]
1165 #[must_use]
1166 pub fn as_u16vec3(&self) -> crate::U16Vec3 {
1167 crate::U16Vec3::new(self.x as u16, self.y as u16, self.z as u16)
1168 }
1169
1170 #[inline]
1172 #[must_use]
1173 pub fn as_ivec3(&self) -> crate::IVec3 {
1174 crate::IVec3::new(self.x as i32, self.y as i32, self.z as i32)
1175 }
1176
1177 #[inline]
1179 #[must_use]
1180 pub fn as_uvec3(&self) -> crate::UVec3 {
1181 crate::UVec3::new(self.x as u32, self.y as u32, self.z as u32)
1182 }
1183
1184 #[inline]
1186 #[must_use]
1187 pub fn as_i64vec3(&self) -> crate::I64Vec3 {
1188 crate::I64Vec3::new(self.x as i64, self.y as i64, self.z as i64)
1189 }
1190
1191 #[inline]
1193 #[must_use]
1194 pub fn as_u64vec3(&self) -> crate::U64Vec3 {
1195 crate::U64Vec3::new(self.x as u64, self.y as u64, self.z as u64)
1196 }
1197
1198 #[inline]
1200 #[must_use]
1201 pub fn as_usizevec3(&self) -> crate::USizeVec3 {
1202 crate::USizeVec3::new(self.x as usize, self.y as usize, self.z as usize)
1203 }
1204}
1205
1206impl Default for Vec3A {
1207 #[inline(always)]
1208 fn default() -> Self {
1209 Self::ZERO
1210 }
1211}
1212
1213impl PartialEq for Vec3A {
1214 #[inline]
1215 fn eq(&self, rhs: &Self) -> bool {
1216 self.cmpeq(*rhs).all()
1217 }
1218}
1219
1220impl Div for Vec3A {
1221 type Output = Self;
1222 #[inline]
1223 fn div(self, rhs: Self) -> Self {
1224 Self(unsafe { vdivq_f32(self.0, rhs.0) })
1225 }
1226}
1227
1228impl Div<&Self> for Vec3A {
1229 type Output = Self;
1230 #[inline]
1231 fn div(self, rhs: &Self) -> Self {
1232 self.div(*rhs)
1233 }
1234}
1235
1236impl Div<&Vec3A> for &Vec3A {
1237 type Output = Vec3A;
1238 #[inline]
1239 fn div(self, rhs: &Vec3A) -> Vec3A {
1240 (*self).div(*rhs)
1241 }
1242}
1243
1244impl Div<Vec3A> for &Vec3A {
1245 type Output = Vec3A;
1246 #[inline]
1247 fn div(self, rhs: Vec3A) -> Vec3A {
1248 (*self).div(rhs)
1249 }
1250}
1251
1252impl DivAssign for Vec3A {
1253 #[inline]
1254 fn div_assign(&mut self, rhs: Self) {
1255 self.0 = unsafe { vdivq_f32(self.0, rhs.0) };
1256 }
1257}
1258
1259impl DivAssign<&Self> for Vec3A {
1260 #[inline]
1261 fn div_assign(&mut self, rhs: &Self) {
1262 self.div_assign(*rhs);
1263 }
1264}
1265
1266impl Div<f32> for Vec3A {
1267 type Output = Self;
1268 #[inline]
1269 fn div(self, rhs: f32) -> Self {
1270 Self(unsafe { vdivq_f32(self.0, vld1q_dup_f32(&rhs)) })
1271 }
1272}
1273
1274impl Div<&f32> for Vec3A {
1275 type Output = Self;
1276 #[inline]
1277 fn div(self, rhs: &f32) -> Self {
1278 self.div(*rhs)
1279 }
1280}
1281
1282impl Div<&f32> for &Vec3A {
1283 type Output = Vec3A;
1284 #[inline]
1285 fn div(self, rhs: &f32) -> Vec3A {
1286 (*self).div(*rhs)
1287 }
1288}
1289
1290impl Div<f32> for &Vec3A {
1291 type Output = Vec3A;
1292 #[inline]
1293 fn div(self, rhs: f32) -> Vec3A {
1294 (*self).div(rhs)
1295 }
1296}
1297
1298impl DivAssign<f32> for Vec3A {
1299 #[inline]
1300 fn div_assign(&mut self, rhs: f32) {
1301 self.0 = unsafe { vdivq_f32(self.0, vld1q_dup_f32(&rhs)) };
1302 }
1303}
1304
1305impl DivAssign<&f32> for Vec3A {
1306 #[inline]
1307 fn div_assign(&mut self, rhs: &f32) {
1308 self.div_assign(*rhs);
1309 }
1310}
1311
1312impl Div<Vec3A> for f32 {
1313 type Output = Vec3A;
1314 #[inline]
1315 fn div(self, rhs: Vec3A) -> Vec3A {
1316 Vec3A(unsafe { vdivq_f32(vld1q_dup_f32(&self), rhs.0) })
1317 }
1318}
1319
1320impl Div<&Vec3A> for f32 {
1321 type Output = Vec3A;
1322 #[inline]
1323 fn div(self, rhs: &Vec3A) -> Vec3A {
1324 self.div(*rhs)
1325 }
1326}
1327
1328impl Div<&Vec3A> for &f32 {
1329 type Output = Vec3A;
1330 #[inline]
1331 fn div(self, rhs: &Vec3A) -> Vec3A {
1332 (*self).div(*rhs)
1333 }
1334}
1335
1336impl Div<Vec3A> for &f32 {
1337 type Output = Vec3A;
1338 #[inline]
1339 fn div(self, rhs: Vec3A) -> Vec3A {
1340 (*self).div(rhs)
1341 }
1342}
1343
1344impl Mul for Vec3A {
1345 type Output = Self;
1346 #[inline]
1347 fn mul(self, rhs: Self) -> Self {
1348 Self(unsafe { vmulq_f32(self.0, rhs.0) })
1349 }
1350}
1351
1352impl Mul<&Self> for Vec3A {
1353 type Output = Self;
1354 #[inline]
1355 fn mul(self, rhs: &Self) -> Self {
1356 self.mul(*rhs)
1357 }
1358}
1359
1360impl Mul<&Vec3A> for &Vec3A {
1361 type Output = Vec3A;
1362 #[inline]
1363 fn mul(self, rhs: &Vec3A) -> Vec3A {
1364 (*self).mul(*rhs)
1365 }
1366}
1367
1368impl Mul<Vec3A> for &Vec3A {
1369 type Output = Vec3A;
1370 #[inline]
1371 fn mul(self, rhs: Vec3A) -> Vec3A {
1372 (*self).mul(rhs)
1373 }
1374}
1375
1376impl MulAssign for Vec3A {
1377 #[inline]
1378 fn mul_assign(&mut self, rhs: Self) {
1379 self.0 = unsafe { vmulq_f32(self.0, rhs.0) };
1380 }
1381}
1382
1383impl MulAssign<&Self> for Vec3A {
1384 #[inline]
1385 fn mul_assign(&mut self, rhs: &Self) {
1386 self.mul_assign(*rhs);
1387 }
1388}
1389
1390impl Mul<f32> for Vec3A {
1391 type Output = Self;
1392 #[inline]
1393 fn mul(self, rhs: f32) -> Self {
1394 Self(unsafe { vmulq_n_f32(self.0, rhs) })
1395 }
1396}
1397
1398impl Mul<&f32> for Vec3A {
1399 type Output = Self;
1400 #[inline]
1401 fn mul(self, rhs: &f32) -> Self {
1402 self.mul(*rhs)
1403 }
1404}
1405
1406impl Mul<&f32> for &Vec3A {
1407 type Output = Vec3A;
1408 #[inline]
1409 fn mul(self, rhs: &f32) -> Vec3A {
1410 (*self).mul(*rhs)
1411 }
1412}
1413
1414impl Mul<f32> for &Vec3A {
1415 type Output = Vec3A;
1416 #[inline]
1417 fn mul(self, rhs: f32) -> Vec3A {
1418 (*self).mul(rhs)
1419 }
1420}
1421
1422impl MulAssign<f32> for Vec3A {
1423 #[inline]
1424 fn mul_assign(&mut self, rhs: f32) {
1425 self.0 = unsafe { vmulq_n_f32(self.0, rhs) };
1426 }
1427}
1428
1429impl MulAssign<&f32> for Vec3A {
1430 #[inline]
1431 fn mul_assign(&mut self, rhs: &f32) {
1432 self.mul_assign(*rhs);
1433 }
1434}
1435
1436impl Mul<Vec3A> for f32 {
1437 type Output = Vec3A;
1438 #[inline]
1439 fn mul(self, rhs: Vec3A) -> Vec3A {
1440 Vec3A(unsafe { vmulq_n_f32(rhs.0, self) })
1441 }
1442}
1443
1444impl Mul<&Vec3A> for f32 {
1445 type Output = Vec3A;
1446 #[inline]
1447 fn mul(self, rhs: &Vec3A) -> Vec3A {
1448 self.mul(*rhs)
1449 }
1450}
1451
1452impl Mul<&Vec3A> for &f32 {
1453 type Output = Vec3A;
1454 #[inline]
1455 fn mul(self, rhs: &Vec3A) -> Vec3A {
1456 (*self).mul(*rhs)
1457 }
1458}
1459
1460impl Mul<Vec3A> for &f32 {
1461 type Output = Vec3A;
1462 #[inline]
1463 fn mul(self, rhs: Vec3A) -> Vec3A {
1464 (*self).mul(rhs)
1465 }
1466}
1467
1468impl Add for Vec3A {
1469 type Output = Self;
1470 #[inline]
1471 fn add(self, rhs: Self) -> Self {
1472 Self(unsafe { vaddq_f32(self.0, rhs.0) })
1473 }
1474}
1475
1476impl Add<&Self> for Vec3A {
1477 type Output = Self;
1478 #[inline]
1479 fn add(self, rhs: &Self) -> Self {
1480 self.add(*rhs)
1481 }
1482}
1483
1484impl Add<&Vec3A> for &Vec3A {
1485 type Output = Vec3A;
1486 #[inline]
1487 fn add(self, rhs: &Vec3A) -> Vec3A {
1488 (*self).add(*rhs)
1489 }
1490}
1491
1492impl Add<Vec3A> for &Vec3A {
1493 type Output = Vec3A;
1494 #[inline]
1495 fn add(self, rhs: Vec3A) -> Vec3A {
1496 (*self).add(rhs)
1497 }
1498}
1499
1500impl AddAssign for Vec3A {
1501 #[inline]
1502 fn add_assign(&mut self, rhs: Self) {
1503 self.0 = unsafe { vaddq_f32(self.0, rhs.0) };
1504 }
1505}
1506
1507impl AddAssign<&Self> for Vec3A {
1508 #[inline]
1509 fn add_assign(&mut self, rhs: &Self) {
1510 self.add_assign(*rhs);
1511 }
1512}
1513
1514impl Add<f32> for Vec3A {
1515 type Output = Self;
1516 #[inline]
1517 fn add(self, rhs: f32) -> Self {
1518 Self(unsafe { vaddq_f32(self.0, vld1q_dup_f32(&rhs)) })
1519 }
1520}
1521
1522impl Add<&f32> for Vec3A {
1523 type Output = Self;
1524 #[inline]
1525 fn add(self, rhs: &f32) -> Self {
1526 self.add(*rhs)
1527 }
1528}
1529
1530impl Add<&f32> for &Vec3A {
1531 type Output = Vec3A;
1532 #[inline]
1533 fn add(self, rhs: &f32) -> Vec3A {
1534 (*self).add(*rhs)
1535 }
1536}
1537
1538impl Add<f32> for &Vec3A {
1539 type Output = Vec3A;
1540 #[inline]
1541 fn add(self, rhs: f32) -> Vec3A {
1542 (*self).add(rhs)
1543 }
1544}
1545
1546impl AddAssign<f32> for Vec3A {
1547 #[inline]
1548 fn add_assign(&mut self, rhs: f32) {
1549 self.0 = unsafe { vaddq_f32(self.0, vld1q_dup_f32(&rhs)) };
1550 }
1551}
1552
1553impl AddAssign<&f32> for Vec3A {
1554 #[inline]
1555 fn add_assign(&mut self, rhs: &f32) {
1556 self.add_assign(*rhs);
1557 }
1558}
1559
1560impl Add<Vec3A> for f32 {
1561 type Output = Vec3A;
1562 #[inline]
1563 fn add(self, rhs: Vec3A) -> Vec3A {
1564 Vec3A(unsafe { vaddq_f32(vld1q_dup_f32(&self), rhs.0) })
1565 }
1566}
1567
1568impl Add<&Vec3A> for f32 {
1569 type Output = Vec3A;
1570 #[inline]
1571 fn add(self, rhs: &Vec3A) -> Vec3A {
1572 self.add(*rhs)
1573 }
1574}
1575
1576impl Add<&Vec3A> for &f32 {
1577 type Output = Vec3A;
1578 #[inline]
1579 fn add(self, rhs: &Vec3A) -> Vec3A {
1580 (*self).add(*rhs)
1581 }
1582}
1583
1584impl Add<Vec3A> for &f32 {
1585 type Output = Vec3A;
1586 #[inline]
1587 fn add(self, rhs: Vec3A) -> Vec3A {
1588 (*self).add(rhs)
1589 }
1590}
1591
1592impl Sub for Vec3A {
1593 type Output = Self;
1594 #[inline]
1595 fn sub(self, rhs: Self) -> Self {
1596 Self(unsafe { vsubq_f32(self.0, rhs.0) })
1597 }
1598}
1599
1600impl Sub<&Self> for Vec3A {
1601 type Output = Self;
1602 #[inline]
1603 fn sub(self, rhs: &Self) -> Self {
1604 self.sub(*rhs)
1605 }
1606}
1607
1608impl Sub<&Vec3A> for &Vec3A {
1609 type Output = Vec3A;
1610 #[inline]
1611 fn sub(self, rhs: &Vec3A) -> Vec3A {
1612 (*self).sub(*rhs)
1613 }
1614}
1615
1616impl Sub<Vec3A> for &Vec3A {
1617 type Output = Vec3A;
1618 #[inline]
1619 fn sub(self, rhs: Vec3A) -> Vec3A {
1620 (*self).sub(rhs)
1621 }
1622}
1623
1624impl SubAssign for Vec3A {
1625 #[inline]
1626 fn sub_assign(&mut self, rhs: Self) {
1627 self.0 = unsafe { vsubq_f32(self.0, rhs.0) };
1628 }
1629}
1630
1631impl SubAssign<&Self> for Vec3A {
1632 #[inline]
1633 fn sub_assign(&mut self, rhs: &Self) {
1634 self.sub_assign(*rhs);
1635 }
1636}
1637
1638impl Sub<f32> for Vec3A {
1639 type Output = Self;
1640 #[inline]
1641 fn sub(self, rhs: f32) -> Self {
1642 Self(unsafe { vsubq_f32(self.0, vld1q_dup_f32(&rhs)) })
1643 }
1644}
1645
1646impl Sub<&f32> for Vec3A {
1647 type Output = Self;
1648 #[inline]
1649 fn sub(self, rhs: &f32) -> Self {
1650 self.sub(*rhs)
1651 }
1652}
1653
1654impl Sub<&f32> for &Vec3A {
1655 type Output = Vec3A;
1656 #[inline]
1657 fn sub(self, rhs: &f32) -> Vec3A {
1658 (*self).sub(*rhs)
1659 }
1660}
1661
1662impl Sub<f32> for &Vec3A {
1663 type Output = Vec3A;
1664 #[inline]
1665 fn sub(self, rhs: f32) -> Vec3A {
1666 (*self).sub(rhs)
1667 }
1668}
1669
1670impl SubAssign<f32> for Vec3A {
1671 #[inline]
1672 fn sub_assign(&mut self, rhs: f32) {
1673 self.0 = unsafe { vsubq_f32(self.0, vld1q_dup_f32(&rhs)) };
1674 }
1675}
1676
1677impl SubAssign<&f32> for Vec3A {
1678 #[inline]
1679 fn sub_assign(&mut self, rhs: &f32) {
1680 self.sub_assign(*rhs);
1681 }
1682}
1683
1684impl Sub<Vec3A> for f32 {
1685 type Output = Vec3A;
1686 #[inline]
1687 fn sub(self, rhs: Vec3A) -> Vec3A {
1688 Vec3A(unsafe { vsubq_f32(vld1q_dup_f32(&self), rhs.0) })
1689 }
1690}
1691
1692impl Sub<&Vec3A> for f32 {
1693 type Output = Vec3A;
1694 #[inline]
1695 fn sub(self, rhs: &Vec3A) -> Vec3A {
1696 self.sub(*rhs)
1697 }
1698}
1699
1700impl Sub<&Vec3A> for &f32 {
1701 type Output = Vec3A;
1702 #[inline]
1703 fn sub(self, rhs: &Vec3A) -> Vec3A {
1704 (*self).sub(*rhs)
1705 }
1706}
1707
1708impl Sub<Vec3A> for &f32 {
1709 type Output = Vec3A;
1710 #[inline]
1711 fn sub(self, rhs: Vec3A) -> Vec3A {
1712 (*self).sub(rhs)
1713 }
1714}
1715
1716impl Rem for Vec3A {
1717 type Output = Self;
1718 #[inline]
1719 fn rem(self, rhs: Self) -> Self {
1720 unsafe {
1721 let n = vrndmq_f32(vdivq_f32(self.0, rhs.0));
1722 Self(vsubq_f32(self.0, vmulq_f32(n, rhs.0)))
1723 }
1724 }
1725}
1726
1727impl Rem<&Self> for Vec3A {
1728 type Output = Self;
1729 #[inline]
1730 fn rem(self, rhs: &Self) -> Self {
1731 self.rem(*rhs)
1732 }
1733}
1734
1735impl Rem<&Vec3A> for &Vec3A {
1736 type Output = Vec3A;
1737 #[inline]
1738 fn rem(self, rhs: &Vec3A) -> Vec3A {
1739 (*self).rem(*rhs)
1740 }
1741}
1742
1743impl Rem<Vec3A> for &Vec3A {
1744 type Output = Vec3A;
1745 #[inline]
1746 fn rem(self, rhs: Vec3A) -> Vec3A {
1747 (*self).rem(rhs)
1748 }
1749}
1750
1751impl RemAssign for Vec3A {
1752 #[inline]
1753 fn rem_assign(&mut self, rhs: Self) {
1754 *self = self.rem(rhs);
1755 }
1756}
1757
1758impl RemAssign<&Self> for Vec3A {
1759 #[inline]
1760 fn rem_assign(&mut self, rhs: &Self) {
1761 self.rem_assign(*rhs);
1762 }
1763}
1764
1765impl Rem<f32> for Vec3A {
1766 type Output = Self;
1767 #[inline]
1768 fn rem(self, rhs: f32) -> Self {
1769 self.rem(Self::splat(rhs))
1770 }
1771}
1772
1773impl Rem<&f32> for Vec3A {
1774 type Output = Self;
1775 #[inline]
1776 fn rem(self, rhs: &f32) -> Self {
1777 self.rem(*rhs)
1778 }
1779}
1780
1781impl Rem<&f32> for &Vec3A {
1782 type Output = Vec3A;
1783 #[inline]
1784 fn rem(self, rhs: &f32) -> Vec3A {
1785 (*self).rem(*rhs)
1786 }
1787}
1788
1789impl Rem<f32> for &Vec3A {
1790 type Output = Vec3A;
1791 #[inline]
1792 fn rem(self, rhs: f32) -> Vec3A {
1793 (*self).rem(rhs)
1794 }
1795}
1796
1797impl RemAssign<f32> for Vec3A {
1798 #[inline]
1799 fn rem_assign(&mut self, rhs: f32) {
1800 *self = self.rem(Self::splat(rhs));
1801 }
1802}
1803
1804impl RemAssign<&f32> for Vec3A {
1805 #[inline]
1806 fn rem_assign(&mut self, rhs: &f32) {
1807 self.rem_assign(*rhs);
1808 }
1809}
1810
1811impl Rem<Vec3A> for f32 {
1812 type Output = Vec3A;
1813 #[inline]
1814 fn rem(self, rhs: Vec3A) -> Vec3A {
1815 Vec3A::splat(self).rem(rhs)
1816 }
1817}
1818
1819impl Rem<&Vec3A> for f32 {
1820 type Output = Vec3A;
1821 #[inline]
1822 fn rem(self, rhs: &Vec3A) -> Vec3A {
1823 self.rem(*rhs)
1824 }
1825}
1826
1827impl Rem<&Vec3A> for &f32 {
1828 type Output = Vec3A;
1829 #[inline]
1830 fn rem(self, rhs: &Vec3A) -> Vec3A {
1831 (*self).rem(*rhs)
1832 }
1833}
1834
1835impl Rem<Vec3A> for &f32 {
1836 type Output = Vec3A;
1837 #[inline]
1838 fn rem(self, rhs: Vec3A) -> Vec3A {
1839 (*self).rem(rhs)
1840 }
1841}
1842
1843impl AsRef<[f32; 3]> for Vec3A {
1844 #[inline]
1845 fn as_ref(&self) -> &[f32; 3] {
1846 unsafe { &*(self as *const Self as *const [f32; 3]) }
1847 }
1848}
1849
1850impl AsMut<[f32; 3]> for Vec3A {
1851 #[inline]
1852 fn as_mut(&mut self) -> &mut [f32; 3] {
1853 unsafe { &mut *(self as *mut Self as *mut [f32; 3]) }
1854 }
1855}
1856
1857impl Sum for Vec3A {
1858 #[inline]
1859 fn sum<I>(iter: I) -> Self
1860 where
1861 I: Iterator<Item = Self>,
1862 {
1863 iter.fold(Self::ZERO, Self::add)
1864 }
1865}
1866
1867impl<'a> Sum<&'a Self> for Vec3A {
1868 #[inline]
1869 fn sum<I>(iter: I) -> Self
1870 where
1871 I: Iterator<Item = &'a Self>,
1872 {
1873 iter.fold(Self::ZERO, |a, &b| Self::add(a, b))
1874 }
1875}
1876
1877impl Product for Vec3A {
1878 #[inline]
1879 fn product<I>(iter: I) -> Self
1880 where
1881 I: Iterator<Item = Self>,
1882 {
1883 iter.fold(Self::ONE, Self::mul)
1884 }
1885}
1886
1887impl<'a> Product<&'a Self> for Vec3A {
1888 #[inline]
1889 fn product<I>(iter: I) -> Self
1890 where
1891 I: Iterator<Item = &'a Self>,
1892 {
1893 iter.fold(Self::ONE, |a, &b| Self::mul(a, b))
1894 }
1895}
1896
1897impl Neg for Vec3A {
1898 type Output = Self;
1899 #[inline]
1900 fn neg(self) -> Self {
1901 Self(unsafe { vnegq_f32(self.0) })
1902 }
1903}
1904
1905impl Neg for &Vec3A {
1906 type Output = Vec3A;
1907 #[inline]
1908 fn neg(self) -> Vec3A {
1909 (*self).neg()
1910 }
1911}
1912
1913impl Index<usize> for Vec3A {
1914 type Output = f32;
1915 #[inline]
1916 fn index(&self, index: usize) -> &Self::Output {
1917 match index {
1918 0 => &self.x,
1919 1 => &self.y,
1920 2 => &self.z,
1921 _ => panic!("index out of bounds"),
1922 }
1923 }
1924}
1925
1926impl IndexMut<usize> for Vec3A {
1927 #[inline]
1928 fn index_mut(&mut self, index: usize) -> &mut Self::Output {
1929 match index {
1930 0 => &mut self.x,
1931 1 => &mut self.y,
1932 2 => &mut self.z,
1933 _ => panic!("index out of bounds"),
1934 }
1935 }
1936}
1937
1938impl fmt::Display for Vec3A {
1939 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1940 if let Some(p) = f.precision() {
1941 write!(f, "[{:.*}, {:.*}, {:.*}]", p, self.x, p, self.y, p, self.z)
1942 } else {
1943 write!(f, "[{}, {}, {}]", self.x, self.y, self.z)
1944 }
1945 }
1946}
1947
1948impl fmt::Debug for Vec3A {
1949 fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
1950 fmt.debug_tuple(stringify!(Vec3A))
1951 .field(&self.x)
1952 .field(&self.y)
1953 .field(&self.z)
1954 .finish()
1955 }
1956}
1957
1958impl From<Vec3A> for float32x4_t {
1959 #[inline(always)]
1960 fn from(t: Vec3A) -> Self {
1961 t.0
1962 }
1963}
1964
1965impl From<float32x4_t> for Vec3A {
1966 #[inline(always)]
1967 fn from(t: float32x4_t) -> Self {
1968 Self(t)
1969 }
1970}
1971
1972impl From<[f32; 3]> for Vec3A {
1973 #[inline]
1974 fn from(a: [f32; 3]) -> Self {
1975 Self::new(a[0], a[1], a[2])
1976 }
1977}
1978
1979impl From<Vec3A> for [f32; 3] {
1980 #[inline]
1981 fn from(v: Vec3A) -> Self {
1982 use crate::align16::Align16;
1983 use core::mem::MaybeUninit;
1984 let mut out: MaybeUninit<Align16<Self>> = MaybeUninit::uninit();
1985 unsafe {
1986 vst1q_f32(out.as_mut_ptr().cast(), v.0);
1987 out.assume_init().0
1988 }
1989 }
1990}
1991
1992impl From<(f32, f32, f32)> for Vec3A {
1993 #[inline]
1994 fn from(t: (f32, f32, f32)) -> Self {
1995 Self::new(t.0, t.1, t.2)
1996 }
1997}
1998
1999impl From<Vec3A> for (f32, f32, f32) {
2000 #[inline]
2001 fn from(v: Vec3A) -> Self {
2002 (v.x, v.y, v.z)
2003 }
2004}
2005
2006impl From<Vec3> for Vec3A {
2007 #[inline]
2008 fn from(v: Vec3) -> Self {
2009 Self::new(v.x, v.y, v.z)
2010 }
2011}
2012
2013impl From<Vec3A> for Vec3 {
2014 #[inline]
2015 fn from(v: Vec3A) -> Self {
2016 use crate::align16::Align16;
2017 use core::mem::MaybeUninit;
2018 let mut out: MaybeUninit<Align16<Self>> = MaybeUninit::uninit();
2019 unsafe {
2020 vst1q_f32(out.as_mut_ptr().cast(), v.0);
2021 out.assume_init().0
2022 }
2023 }
2024}
2025
2026impl From<(Vec2, f32)> for Vec3A {
2027 #[inline]
2028 fn from((v, z): (Vec2, f32)) -> Self {
2029 Self::new(v.x, v.y, z)
2030 }
2031}
2032
2033impl Deref for Vec3A {
2034 type Target = crate::deref::Vec3<f32>;
2035 #[inline]
2036 fn deref(&self) -> &Self::Target {
2037 unsafe { &*(self as *const Self).cast() }
2038 }
2039}
2040
2041impl DerefMut for Vec3A {
2042 #[inline]
2043 fn deref_mut(&mut self) -> &mut Self::Target {
2044 unsafe { &mut *(self as *mut Self).cast() }
2045 }
2046}
2047
2048impl From<BVec3> for Vec3A {
2049 #[inline]
2050 fn from(v: BVec3) -> Self {
2051 Self::new(f32::from(v.x), f32::from(v.y), f32::from(v.z))
2052 }
2053}
2054
2055impl From<BVec3A> for Vec3A {
2056 #[inline]
2057 fn from(v: BVec3A) -> Self {
2058 let bool_array: [bool; 3] = v.into();
2059 Self::new(
2060 f32::from(bool_array[0]),
2061 f32::from(bool_array[1]),
2062 f32::from(bool_array[2]),
2063 )
2064 }
2065}