1use crate::{f32::math, neon::*, BVec3, BVec3A, FloatExt, Quat, Vec2, Vec3, Vec4};
4
5use core::fmt;
6use core::iter::{Product, Sum};
7use core::{f32, ops::*};
8
9use core::arch::aarch64::*;
10
11#[cfg(feature = "zerocopy")]
12use zerocopy_derive::*;
13
14#[repr(C)]
15union UnionCast {
16 a: [f32; 4],
17 v: Vec3A,
18}
19
20#[inline(always)]
22#[must_use]
23pub const fn vec3a(x: f32, y: f32, z: f32) -> Vec3A {
24 Vec3A::new(x, y, z)
25}
26
27#[derive(Clone, Copy)]
37#[cfg_attr(feature = "bytemuck", derive(bytemuck::Pod, bytemuck::Zeroable))]
38#[cfg_attr(
39 feature = "zerocopy",
40 derive(FromBytes, Immutable, IntoBytes, KnownLayout)
41)]
42#[repr(transparent)]
43pub struct Vec3A(pub(crate) float32x4_t);
44
45impl Vec3A {
46 pub const ZERO: Self = Self::splat(0.0);
48
49 pub const ONE: Self = Self::splat(1.0);
51
52 pub const NEG_ONE: Self = Self::splat(-1.0);
54
55 pub const MIN: Self = Self::splat(f32::MIN);
57
58 pub const MAX: Self = Self::splat(f32::MAX);
60
61 pub const NAN: Self = Self::splat(f32::NAN);
63
64 pub const INFINITY: Self = Self::splat(f32::INFINITY);
66
67 pub const NEG_INFINITY: Self = Self::splat(f32::NEG_INFINITY);
69
70 pub const X: Self = Self::new(1.0, 0.0, 0.0);
72
73 pub const Y: Self = Self::new(0.0, 1.0, 0.0);
75
76 pub const Z: Self = Self::new(0.0, 0.0, 1.0);
78
79 pub const NEG_X: Self = Self::new(-1.0, 0.0, 0.0);
81
82 pub const NEG_Y: Self = Self::new(0.0, -1.0, 0.0);
84
85 pub const NEG_Z: Self = Self::new(0.0, 0.0, -1.0);
87
88 pub const AXES: [Self; 3] = [Self::X, Self::Y, Self::Z];
90
91 pub const USES_CORE_SIMD: bool = false;
93 pub const USES_NEON: bool = true;
95 pub const USES_SCALAR_MATH: bool = false;
97 pub const USES_SSE2: bool = false;
99 pub const USES_WASM32_SIMD: bool = false;
101
102 #[inline(always)]
104 #[must_use]
105 pub const fn new(x: f32, y: f32, z: f32) -> Self {
106 unsafe { UnionCast { a: [x, y, z, z] }.v }
107 }
108
109 #[inline]
111 #[must_use]
112 pub const fn splat(v: f32) -> Self {
113 unsafe { UnionCast { a: [v; 4] }.v }
114 }
115
116 #[inline]
118 #[must_use]
119 pub fn map<F>(self, f: F) -> Self
120 where
121 F: Fn(f32) -> f32,
122 {
123 Self::new(f(self.x), f(self.y), f(self.z))
124 }
125
126 #[inline]
132 #[must_use]
133 pub fn select(mask: BVec3A, if_true: Self, if_false: Self) -> Self {
134 Self(unsafe { vbslq_f32(mask.0, if_true.0, if_false.0) })
135 }
136
137 #[inline]
139 #[must_use]
140 pub const fn from_array(a: [f32; 3]) -> Self {
141 Self::new(a[0], a[1], a[2])
142 }
143
144 #[inline]
146 #[must_use]
147 pub const fn to_array(&self) -> [f32; 3] {
148 unsafe { *(self as *const Self as *const [f32; 3]) }
149 }
150
151 #[inline]
157 #[must_use]
158 pub const fn from_slice(slice: &[f32]) -> Self {
159 assert!(slice.len() >= 3);
160 Self::new(slice[0], slice[1], slice[2])
161 }
162
163 #[inline]
169 pub fn write_to_slice(self, slice: &mut [f32]) {
170 slice[..3].copy_from_slice(&self.to_array());
171 }
172
173 #[inline]
177 #[must_use]
178 pub fn from_vec4(v: Vec4) -> Self {
179 Self(v.0)
180 }
181
182 #[inline]
184 #[must_use]
185 pub fn extend(self, w: f32) -> Vec4 {
186 Vec4::new(self.x, self.y, self.z, w)
187 }
188
189 #[inline]
193 #[must_use]
194 pub fn truncate(self) -> Vec2 {
195 use crate::swizzles::Vec3Swizzles;
196 self.xy()
197 }
198
199 #[inline]
201 #[must_use]
202 pub fn to_vec3(self) -> Vec3 {
203 Vec3::from(self)
204 }
205
206 #[inline]
208 #[must_use]
209 pub fn with_x(mut self, x: f32) -> Self {
210 self.x = x;
211 self
212 }
213
214 #[inline]
216 #[must_use]
217 pub fn with_y(mut self, y: f32) -> Self {
218 self.y = y;
219 self
220 }
221
222 #[inline]
224 #[must_use]
225 pub fn with_z(mut self, z: f32) -> Self {
226 self.z = z;
227 self
228 }
229
230 #[inline]
232 #[must_use]
233 pub fn dot(self, rhs: Self) -> f32 {
234 (self.x * rhs.x) + (self.y * rhs.y) + (self.z * rhs.z)
236 }
237
238 #[inline]
240 #[must_use]
241 pub fn dot_into_vec(self, rhs: Self) -> Self {
242 Self(unsafe { dot3_into_f32x4(self.0, rhs.0) })
243 }
244
245 #[inline]
247 #[must_use]
248 pub fn cross(self, rhs: Self) -> Self {
249 unsafe {
250 let lhs = self.0;
252 let rhs = rhs.0;
253 let lhs_yzwx = vextq_f32(lhs, lhs, 1);
255 let rhs_wxyz = vextq_f32(rhs, rhs, 3);
256
257 let lhs_yzx = vsetq_lane_f32(vgetq_lane_f32(lhs, 0), lhs_yzwx, 2);
258 let rhs_zxy = vsetq_lane_f32(vgetq_lane_f32(rhs, 2), rhs_wxyz, 0);
259
260 let part_a = vmulq_f32(lhs_yzx, rhs_zxy);
262
263 let lhs_wxyz = vextq_f32(lhs, lhs, 3);
264 let rhs_yzwx = vextq_f32(rhs, rhs, 1);
265 let lhs_zxy = vsetq_lane_f32(vgetq_lane_f32(lhs, 2), lhs_wxyz, 0);
266 let rhs_yzx = vsetq_lane_f32(vgetq_lane_f32(rhs, 0), rhs_yzwx, 2);
267
268 let result = vmlsq_f32(part_a, lhs_zxy, rhs_yzx);
270 Self(result)
271 }
272 }
273
274 #[inline]
281 #[must_use]
282 pub fn min(self, rhs: Self) -> Self {
283 Self(unsafe { vminq_f32(self.0, rhs.0) })
284 }
285
286 #[inline]
293 #[must_use]
294 pub fn max(self, rhs: Self) -> Self {
295 Self(unsafe { vmaxq_f32(self.0, rhs.0) })
296 }
297
298 #[inline]
309 #[must_use]
310 pub fn clamp(self, min: Self, max: Self) -> Self {
311 glam_assert!(min.cmple(max).all(), "clamp: expected min <= max");
312 self.max(min).min(max)
313 }
314
315 #[inline]
322 #[must_use]
323 pub fn min_element(self) -> f32 {
324 self.x.min(self.y.min(self.z))
325 }
326
327 #[inline]
334 #[must_use]
335 pub fn max_element(self) -> f32 {
336 self.x.max(self.y.max(self.z))
337 }
338
339 #[doc(alias = "argmin")]
341 #[inline]
342 #[must_use]
343 pub fn min_position(self) -> usize {
344 let mut min = self.x;
345 let mut index = 0;
346 if self.y < min {
347 min = self.y;
348 index = 1;
349 }
350 if self.z < min {
351 index = 2;
352 }
353 index
354 }
355
356 #[doc(alias = "argmax")]
358 #[inline]
359 #[must_use]
360 pub fn max_position(self) -> usize {
361 let mut max = self.x;
362 let mut index = 0;
363 if self.y > max {
364 max = self.y;
365 index = 1;
366 }
367 if self.z > max {
368 index = 2;
369 }
370 index
371 }
372
373 #[inline]
377 #[must_use]
378 pub fn element_sum(self) -> f32 {
379 unsafe { vaddvq_f32(vsetq_lane_f32(0.0, self.0, 3)) }
380 }
381
382 #[inline]
386 #[must_use]
387 pub fn element_product(self) -> f32 {
388 unsafe {
389 let s = vmuls_laneq_f32(vgetq_lane_f32(self.0, 0), self.0, 1);
390 vmuls_laneq_f32(s, self.0, 2)
391 }
392 }
393
394 #[inline]
400 #[must_use]
401 pub fn cmpeq(self, rhs: Self) -> BVec3A {
402 BVec3A(unsafe { vceqq_f32(self.0, rhs.0) })
403 }
404
405 #[inline]
411 #[must_use]
412 pub fn cmpne(self, rhs: Self) -> BVec3A {
413 BVec3A(unsafe { vmvnq_u32(vceqq_f32(self.0, rhs.0)) })
414 }
415
416 #[inline]
422 #[must_use]
423 pub fn cmpge(self, rhs: Self) -> BVec3A {
424 BVec3A(unsafe { vcgeq_f32(self.0, rhs.0) })
425 }
426
427 #[inline]
433 #[must_use]
434 pub fn cmpgt(self, rhs: Self) -> BVec3A {
435 BVec3A(unsafe { vcgtq_f32(self.0, rhs.0) })
436 }
437
438 #[inline]
444 #[must_use]
445 pub fn cmple(self, rhs: Self) -> BVec3A {
446 BVec3A(unsafe { vcleq_f32(self.0, rhs.0) })
447 }
448
449 #[inline]
455 #[must_use]
456 pub fn cmplt(self, rhs: Self) -> BVec3A {
457 BVec3A(unsafe { vcltq_f32(self.0, rhs.0) })
458 }
459
460 #[inline]
462 #[must_use]
463 pub fn abs(self) -> Self {
464 Self(unsafe { vabsq_f32(self.0) })
465 }
466
467 #[inline]
473 #[must_use]
474 pub fn signum(self) -> Self {
475 let result = Self(unsafe {
476 vreinterpretq_f32_u32(vorrq_u32(
477 vandq_u32(
478 vreinterpretq_u32_f32(self.0),
479 vreinterpretq_u32_f32(Self::NEG_ONE.0),
480 ),
481 vreinterpretq_u32_f32(Self::ONE.0),
482 ))
483 });
484 let mask = self.is_nan_mask();
485 Self::select(mask, self, result)
486 }
487
488 #[inline]
490 #[must_use]
491 pub fn copysign(self, rhs: Self) -> Self {
492 let mask = Self::splat(-0.0);
493 Self(unsafe {
494 vreinterpretq_f32_u32(vorrq_u32(
495 vandq_u32(vreinterpretq_u32_f32(rhs.0), vreinterpretq_u32_f32(mask.0)),
496 vandq_u32(
497 vreinterpretq_u32_f32(self.0),
498 vmvnq_u32(vreinterpretq_u32_f32(mask.0)),
499 ),
500 ))
501 })
502 }
503
504 #[inline]
512 #[must_use]
513 pub fn is_negative_bitmask(self) -> u32 {
514 unsafe {
515 let nmask = vreinterpretq_u32_f32(vdupq_n_f32(-0.0));
516 let m = vandq_u32(vreinterpretq_u32_f32(self.0), nmask);
517 let x = vgetq_lane_u32(m, 0) >> 31;
518 let y = vgetq_lane_u32(m, 1) >> 31;
519 let z = vgetq_lane_u32(m, 2) >> 31;
520
521 x | y << 1 | z << 2
522 }
523 }
524
525 #[inline]
528 #[must_use]
529 pub fn is_finite(self) -> bool {
530 self.is_finite_mask().all()
531 }
532
533 #[inline]
537 #[must_use]
538 pub fn is_finite_mask(self) -> BVec3A {
539 BVec3A(unsafe { vcltq_f32(vabsq_f32(self.0), Self::INFINITY.0) })
540 }
541
542 #[inline]
544 #[must_use]
545 pub fn is_nan(self) -> bool {
546 self.is_nan_mask().any()
547 }
548
549 #[inline]
553 #[must_use]
554 pub fn is_nan_mask(self) -> BVec3A {
555 BVec3A(unsafe { vmvnq_u32(vceqq_f32(self.0, self.0)) })
556 }
557
558 #[doc(alias = "magnitude")]
560 #[inline]
561 #[must_use]
562 pub fn length(self) -> f32 {
563 math::sqrt(self.dot(self))
564 }
565
566 #[doc(alias = "magnitude2")]
570 #[inline]
571 #[must_use]
572 pub fn length_squared(self) -> f32 {
573 self.dot(self)
574 }
575
576 #[inline]
580 #[must_use]
581 pub fn length_recip(self) -> f32 {
582 self.length().recip()
583 }
584
585 #[inline]
587 #[must_use]
588 pub fn distance(self, rhs: Self) -> f32 {
589 (self - rhs).length()
590 }
591
592 #[inline]
594 #[must_use]
595 pub fn distance_squared(self, rhs: Self) -> f32 {
596 (self - rhs).length_squared()
597 }
598
599 #[inline]
601 #[must_use]
602 pub fn div_euclid(self, rhs: Self) -> Self {
603 Self::new(
604 math::div_euclid(self.x, rhs.x),
605 math::div_euclid(self.y, rhs.y),
606 math::div_euclid(self.z, rhs.z),
607 )
608 }
609
610 #[inline]
614 #[must_use]
615 pub fn rem_euclid(self, rhs: Self) -> Self {
616 Self::new(
617 math::rem_euclid(self.x, rhs.x),
618 math::rem_euclid(self.y, rhs.y),
619 math::rem_euclid(self.z, rhs.z),
620 )
621 }
622
623 #[inline]
633 #[must_use]
634 pub fn normalize(self) -> Self {
635 #[allow(clippy::let_and_return)]
636 let normalized = self.mul(self.length_recip());
637 glam_assert!(normalized.is_finite());
638 normalized
639 }
640
641 #[inline]
648 #[must_use]
649 pub fn try_normalize(self) -> Option<Self> {
650 let rcp = self.length_recip();
651 if rcp.is_finite() && rcp > 0.0 {
652 Some(self * rcp)
653 } else {
654 None
655 }
656 }
657
658 #[inline]
666 #[must_use]
667 pub fn normalize_or(self, fallback: Self) -> Self {
668 let rcp = self.length_recip();
669 if rcp.is_finite() && rcp > 0.0 {
670 self * rcp
671 } else {
672 fallback
673 }
674 }
675
676 #[inline]
683 #[must_use]
684 pub fn normalize_or_zero(self) -> Self {
685 self.normalize_or(Self::ZERO)
686 }
687
688 #[inline]
692 #[must_use]
693 pub fn normalize_and_length(self) -> (Self, f32) {
694 let length = self.length();
695 let rcp = 1.0 / length;
696 if rcp.is_finite() && rcp > 0.0 {
697 (self * rcp, length)
698 } else {
699 (Self::X, 0.0)
700 }
701 }
702
703 #[inline]
707 #[must_use]
708 pub fn is_normalized(self) -> bool {
709 math::abs(self.length_squared() - 1.0) <= 2e-4
710 }
711
712 #[inline]
720 #[must_use]
721 pub fn project_onto(self, rhs: Self) -> Self {
722 let other_len_sq_rcp = rhs.dot(rhs).recip();
723 glam_assert!(other_len_sq_rcp.is_finite());
724 rhs * self.dot(rhs) * other_len_sq_rcp
725 }
726
727 #[doc(alias("plane"))]
738 #[inline]
739 #[must_use]
740 pub fn reject_from(self, rhs: Self) -> Self {
741 self - self.project_onto(rhs)
742 }
743
744 #[inline]
752 #[must_use]
753 pub fn project_onto_normalized(self, rhs: Self) -> Self {
754 glam_assert!(rhs.is_normalized());
755 rhs * self.dot(rhs)
756 }
757
758 #[doc(alias("plane"))]
769 #[inline]
770 #[must_use]
771 pub fn reject_from_normalized(self, rhs: Self) -> Self {
772 self - self.project_onto_normalized(rhs)
773 }
774
775 #[inline]
778 #[must_use]
779 pub fn round(self) -> Self {
780 Self(unsafe { vrndnq_f32(self.0) })
781 }
782
783 #[inline]
786 #[must_use]
787 pub fn floor(self) -> Self {
788 Self(unsafe { vrndmq_f32(self.0) })
789 }
790
791 #[inline]
794 #[must_use]
795 pub fn ceil(self) -> Self {
796 Self(unsafe { vrndpq_f32(self.0) })
797 }
798
799 #[inline]
802 #[must_use]
803 pub fn trunc(self) -> Self {
804 Self(unsafe { vrndq_f32(self.0) })
805 }
806
807 #[inline]
814 #[must_use]
815 pub fn fract(self) -> Self {
816 self - self.trunc()
817 }
818
819 #[inline]
826 #[must_use]
827 pub fn fract_gl(self) -> Self {
828 self - self.floor()
829 }
830
831 #[inline]
834 #[must_use]
835 pub fn exp(self) -> Self {
836 Self::new(math::exp(self.x), math::exp(self.y), math::exp(self.z))
837 }
838
839 #[inline]
841 #[must_use]
842 pub fn powf(self, n: f32) -> Self {
843 Self::new(
844 math::powf(self.x, n),
845 math::powf(self.y, n),
846 math::powf(self.z, n),
847 )
848 }
849
850 #[inline]
852 #[must_use]
853 pub fn recip(self) -> Self {
854 Self(unsafe { vdivq_f32(Self::ONE.0, self.0) })
855 }
856
857 #[doc(alias = "mix")]
863 #[inline]
864 #[must_use]
865 pub fn lerp(self, rhs: Self, s: f32) -> Self {
866 self * (1.0 - s) + rhs * s
867 }
868
869 #[inline]
874 #[must_use]
875 pub fn move_towards(&self, rhs: Self, d: f32) -> Self {
876 let a = rhs - *self;
877 let len = a.length();
878 if len <= d || len <= 1e-4 {
879 return rhs;
880 }
881 *self + a / len * d
882 }
883
884 #[inline]
890 pub fn midpoint(self, rhs: Self) -> Self {
891 (self + rhs) * 0.5
892 }
893
894 #[inline]
904 #[must_use]
905 pub fn abs_diff_eq(self, rhs: Self, max_abs_diff: f32) -> bool {
906 self.sub(rhs).abs().cmple(Self::splat(max_abs_diff)).all()
907 }
908
909 #[inline]
915 #[must_use]
916 pub fn clamp_length(self, min: f32, max: f32) -> Self {
917 glam_assert!(0.0 <= min);
918 glam_assert!(min <= max);
919 let length_sq = self.length_squared();
920 if length_sq < min * min {
921 min * (self / math::sqrt(length_sq))
922 } else if length_sq > max * max {
923 max * (self / math::sqrt(length_sq))
924 } else {
925 self
926 }
927 }
928
929 #[inline]
935 #[must_use]
936 pub fn clamp_length_max(self, max: f32) -> Self {
937 glam_assert!(0.0 <= max);
938 let length_sq = self.length_squared();
939 if length_sq > max * max {
940 max * (self / math::sqrt(length_sq))
941 } else {
942 self
943 }
944 }
945
946 #[inline]
952 #[must_use]
953 pub fn clamp_length_min(self, min: f32) -> Self {
954 glam_assert!(0.0 <= min);
955 let length_sq = self.length_squared();
956 if length_sq < min * min {
957 min * (self / math::sqrt(length_sq))
958 } else {
959 self
960 }
961 }
962
963 #[inline]
971 #[must_use]
972 pub fn mul_add(self, a: Self, b: Self) -> Self {
973 Self(unsafe { vfmaq_f32(b.0, self.0, a.0) })
974 }
975
976 #[inline]
985 #[must_use]
986 pub fn reflect(self, normal: Self) -> Self {
987 glam_assert!(normal.is_normalized());
988 self - 2.0 * self.dot(normal) * normal
989 }
990
991 #[inline]
1001 #[must_use]
1002 pub fn refract(self, normal: Self, eta: f32) -> Self {
1003 glam_assert!(self.is_normalized());
1004 glam_assert!(normal.is_normalized());
1005 let n_dot_i = normal.dot(self);
1006 let k = 1.0 - eta * eta * (1.0 - n_dot_i * n_dot_i);
1007 if k >= 0.0 {
1008 eta * self - (eta * n_dot_i + math::sqrt(k)) * normal
1009 } else {
1010 Self::ZERO
1011 }
1012 }
1013
1014 #[inline]
1018 #[must_use]
1019 pub fn angle_between(self, rhs: Self) -> f32 {
1020 math::acos_approx(
1021 self.dot(rhs)
1022 .div(math::sqrt(self.length_squared().mul(rhs.length_squared()))),
1023 )
1024 }
1025
1026 #[inline]
1028 #[must_use]
1029 pub fn rotate_x(self, angle: f32) -> Self {
1030 let (sina, cosa) = math::sin_cos(angle);
1031 Self::new(
1032 self.x,
1033 self.y * cosa - self.z * sina,
1034 self.y * sina + self.z * cosa,
1035 )
1036 }
1037
1038 #[inline]
1040 #[must_use]
1041 pub fn rotate_y(self, angle: f32) -> Self {
1042 let (sina, cosa) = math::sin_cos(angle);
1043 Self::new(
1044 self.x * cosa + self.z * sina,
1045 self.y,
1046 self.x * -sina + self.z * cosa,
1047 )
1048 }
1049
1050 #[inline]
1052 #[must_use]
1053 pub fn rotate_z(self, angle: f32) -> Self {
1054 let (sina, cosa) = math::sin_cos(angle);
1055 Self::new(
1056 self.x * cosa - self.y * sina,
1057 self.x * sina + self.y * cosa,
1058 self.z,
1059 )
1060 }
1061
1062 #[inline]
1070 #[must_use]
1071 pub fn rotate_axis(self, axis: Self, angle: f32) -> Self {
1072 Quat::from_axis_angle(axis.into(), angle) * self
1073 }
1074
1075 #[inline]
1081 #[must_use]
1082 pub fn rotate_towards(self, rhs: Self, max_angle: f32) -> Self {
1083 let angle_between = self.angle_between(rhs);
1084 let angle = max_angle.clamp(angle_between - core::f32::consts::PI, angle_between);
1086 let axis = self
1087 .cross(rhs)
1088 .try_normalize()
1089 .unwrap_or_else(|| self.any_orthogonal_vector().normalize());
1090 Quat::from_axis_angle(axis.into(), angle) * self
1091 }
1092
1093 #[inline]
1100 #[must_use]
1101 pub fn any_orthogonal_vector(&self) -> Self {
1102 if math::abs(self.x) > math::abs(self.y) {
1104 Self::new(-self.z, 0.0, self.x) } else {
1106 Self::new(0.0, self.z, -self.y) }
1108 }
1109
1110 #[inline]
1118 #[must_use]
1119 pub fn any_orthonormal_vector(&self) -> Self {
1120 glam_assert!(self.is_normalized());
1121 let sign = math::signum(self.z);
1123 let a = -1.0 / (sign + self.z);
1124 let b = self.x * self.y * a;
1125 Self::new(b, sign + self.y * self.y * a, -self.y)
1126 }
1127
1128 #[inline]
1135 #[must_use]
1136 pub fn any_orthonormal_pair(&self) -> (Self, Self) {
1137 glam_assert!(self.is_normalized());
1138 let sign = math::signum(self.z);
1140 let a = -1.0 / (sign + self.z);
1141 let b = self.x * self.y * a;
1142 (
1143 Self::new(1.0 + sign * self.x * self.x * a, sign * b, -sign * self.x),
1144 Self::new(b, sign + self.y * self.y * a, -self.y),
1145 )
1146 }
1147
1148 #[inline]
1154 #[must_use]
1155 pub fn slerp(self, rhs: Self, s: f32) -> Self {
1156 let self_length = self.length();
1157 let rhs_length = rhs.length();
1158 let dot = self.dot(rhs) / (self_length * rhs_length);
1160 if math::abs(dot) < 1.0 - 3e-7 {
1162 let theta = math::acos_approx(dot);
1164 let sin_theta = math::sin(theta);
1166 let t1 = math::sin(theta * (1. - s));
1167 let t2 = math::sin(theta * s);
1168
1169 let result_length = self_length.lerp(rhs_length, s);
1171 return (self * (result_length / self_length) * t1
1173 + rhs * (result_length / rhs_length) * t2)
1174 * sin_theta.recip();
1175 }
1176 if dot < 0.0 {
1177 let axis = self.any_orthogonal_vector().normalize().into();
1181 let rotation = Quat::from_axis_angle(axis, core::f32::consts::PI * s);
1182 let result_length = self_length.lerp(rhs_length, s);
1184 rotation * self * (result_length / self_length)
1185 } else {
1186 self.lerp(rhs, s)
1188 }
1189 }
1190
1191 #[inline]
1193 #[must_use]
1194 pub fn as_dvec3(&self) -> crate::DVec3 {
1195 crate::DVec3::new(self.x as f64, self.y as f64, self.z as f64)
1196 }
1197
1198 #[inline]
1200 #[must_use]
1201 pub fn as_i8vec3(&self) -> crate::I8Vec3 {
1202 crate::I8Vec3::new(self.x as i8, self.y as i8, self.z as i8)
1203 }
1204
1205 #[inline]
1207 #[must_use]
1208 pub fn as_u8vec3(&self) -> crate::U8Vec3 {
1209 crate::U8Vec3::new(self.x as u8, self.y as u8, self.z as u8)
1210 }
1211
1212 #[inline]
1214 #[must_use]
1215 pub fn as_i16vec3(&self) -> crate::I16Vec3 {
1216 crate::I16Vec3::new(self.x as i16, self.y as i16, self.z as i16)
1217 }
1218
1219 #[inline]
1221 #[must_use]
1222 pub fn as_u16vec3(&self) -> crate::U16Vec3 {
1223 crate::U16Vec3::new(self.x as u16, self.y as u16, self.z as u16)
1224 }
1225
1226 #[inline]
1228 #[must_use]
1229 pub fn as_ivec3(&self) -> crate::IVec3 {
1230 crate::IVec3::new(self.x as i32, self.y as i32, self.z as i32)
1231 }
1232
1233 #[inline]
1235 #[must_use]
1236 pub fn as_uvec3(&self) -> crate::UVec3 {
1237 crate::UVec3::new(self.x as u32, self.y as u32, self.z as u32)
1238 }
1239
1240 #[inline]
1242 #[must_use]
1243 pub fn as_i64vec3(&self) -> crate::I64Vec3 {
1244 crate::I64Vec3::new(self.x as i64, self.y as i64, self.z as i64)
1245 }
1246
1247 #[inline]
1249 #[must_use]
1250 pub fn as_u64vec3(&self) -> crate::U64Vec3 {
1251 crate::U64Vec3::new(self.x as u64, self.y as u64, self.z as u64)
1252 }
1253
1254 #[inline]
1256 #[must_use]
1257 pub fn as_usizevec3(&self) -> crate::USizeVec3 {
1258 crate::USizeVec3::new(self.x as usize, self.y as usize, self.z as usize)
1259 }
1260}
1261
1262impl Default for Vec3A {
1263 #[inline(always)]
1264 fn default() -> Self {
1265 Self::ZERO
1266 }
1267}
1268
1269impl PartialEq for Vec3A {
1270 #[inline]
1271 fn eq(&self, rhs: &Self) -> bool {
1272 self.cmpeq(*rhs).all()
1273 }
1274}
1275
1276impl Div for Vec3A {
1277 type Output = Self;
1278 #[inline]
1279 fn div(self, rhs: Self) -> Self {
1280 Self(unsafe { vdivq_f32(self.0, rhs.0) })
1281 }
1282}
1283
1284impl Div<&Self> for Vec3A {
1285 type Output = Self;
1286 #[inline]
1287 fn div(self, rhs: &Self) -> Self {
1288 self.div(*rhs)
1289 }
1290}
1291
1292impl Div<&Vec3A> for &Vec3A {
1293 type Output = Vec3A;
1294 #[inline]
1295 fn div(self, rhs: &Vec3A) -> Vec3A {
1296 (*self).div(*rhs)
1297 }
1298}
1299
1300impl Div<Vec3A> for &Vec3A {
1301 type Output = Vec3A;
1302 #[inline]
1303 fn div(self, rhs: Vec3A) -> Vec3A {
1304 (*self).div(rhs)
1305 }
1306}
1307
1308impl DivAssign for Vec3A {
1309 #[inline]
1310 fn div_assign(&mut self, rhs: Self) {
1311 self.0 = unsafe { vdivq_f32(self.0, rhs.0) };
1312 }
1313}
1314
1315impl DivAssign<&Self> for Vec3A {
1316 #[inline]
1317 fn div_assign(&mut self, rhs: &Self) {
1318 self.div_assign(*rhs);
1319 }
1320}
1321
1322impl Div<f32> for Vec3A {
1323 type Output = Self;
1324 #[inline]
1325 fn div(self, rhs: f32) -> Self {
1326 Self(unsafe { vdivq_f32(self.0, vld1q_dup_f32(&rhs)) })
1327 }
1328}
1329
1330impl Div<&f32> for Vec3A {
1331 type Output = Self;
1332 #[inline]
1333 fn div(self, rhs: &f32) -> Self {
1334 self.div(*rhs)
1335 }
1336}
1337
1338impl Div<&f32> for &Vec3A {
1339 type Output = Vec3A;
1340 #[inline]
1341 fn div(self, rhs: &f32) -> Vec3A {
1342 (*self).div(*rhs)
1343 }
1344}
1345
1346impl Div<f32> for &Vec3A {
1347 type Output = Vec3A;
1348 #[inline]
1349 fn div(self, rhs: f32) -> Vec3A {
1350 (*self).div(rhs)
1351 }
1352}
1353
1354impl DivAssign<f32> for Vec3A {
1355 #[inline]
1356 fn div_assign(&mut self, rhs: f32) {
1357 self.0 = unsafe { vdivq_f32(self.0, vld1q_dup_f32(&rhs)) };
1358 }
1359}
1360
1361impl DivAssign<&f32> for Vec3A {
1362 #[inline]
1363 fn div_assign(&mut self, rhs: &f32) {
1364 self.div_assign(*rhs);
1365 }
1366}
1367
1368impl Div<Vec3A> for f32 {
1369 type Output = Vec3A;
1370 #[inline]
1371 fn div(self, rhs: Vec3A) -> Vec3A {
1372 Vec3A(unsafe { vdivq_f32(vld1q_dup_f32(&self), rhs.0) })
1373 }
1374}
1375
1376impl Div<&Vec3A> for f32 {
1377 type Output = Vec3A;
1378 #[inline]
1379 fn div(self, rhs: &Vec3A) -> Vec3A {
1380 self.div(*rhs)
1381 }
1382}
1383
1384impl Div<&Vec3A> for &f32 {
1385 type Output = Vec3A;
1386 #[inline]
1387 fn div(self, rhs: &Vec3A) -> Vec3A {
1388 (*self).div(*rhs)
1389 }
1390}
1391
1392impl Div<Vec3A> for &f32 {
1393 type Output = Vec3A;
1394 #[inline]
1395 fn div(self, rhs: Vec3A) -> Vec3A {
1396 (*self).div(rhs)
1397 }
1398}
1399
1400impl Mul for Vec3A {
1401 type Output = Self;
1402 #[inline]
1403 fn mul(self, rhs: Self) -> Self {
1404 Self(unsafe { vmulq_f32(self.0, rhs.0) })
1405 }
1406}
1407
1408impl Mul<&Self> for Vec3A {
1409 type Output = Self;
1410 #[inline]
1411 fn mul(self, rhs: &Self) -> Self {
1412 self.mul(*rhs)
1413 }
1414}
1415
1416impl Mul<&Vec3A> for &Vec3A {
1417 type Output = Vec3A;
1418 #[inline]
1419 fn mul(self, rhs: &Vec3A) -> Vec3A {
1420 (*self).mul(*rhs)
1421 }
1422}
1423
1424impl Mul<Vec3A> for &Vec3A {
1425 type Output = Vec3A;
1426 #[inline]
1427 fn mul(self, rhs: Vec3A) -> Vec3A {
1428 (*self).mul(rhs)
1429 }
1430}
1431
1432impl MulAssign for Vec3A {
1433 #[inline]
1434 fn mul_assign(&mut self, rhs: Self) {
1435 self.0 = unsafe { vmulq_f32(self.0, rhs.0) };
1436 }
1437}
1438
1439impl MulAssign<&Self> for Vec3A {
1440 #[inline]
1441 fn mul_assign(&mut self, rhs: &Self) {
1442 self.mul_assign(*rhs);
1443 }
1444}
1445
1446impl Mul<f32> for Vec3A {
1447 type Output = Self;
1448 #[inline]
1449 fn mul(self, rhs: f32) -> Self {
1450 Self(unsafe { vmulq_n_f32(self.0, rhs) })
1451 }
1452}
1453
1454impl Mul<&f32> for Vec3A {
1455 type Output = Self;
1456 #[inline]
1457 fn mul(self, rhs: &f32) -> Self {
1458 self.mul(*rhs)
1459 }
1460}
1461
1462impl Mul<&f32> for &Vec3A {
1463 type Output = Vec3A;
1464 #[inline]
1465 fn mul(self, rhs: &f32) -> Vec3A {
1466 (*self).mul(*rhs)
1467 }
1468}
1469
1470impl Mul<f32> for &Vec3A {
1471 type Output = Vec3A;
1472 #[inline]
1473 fn mul(self, rhs: f32) -> Vec3A {
1474 (*self).mul(rhs)
1475 }
1476}
1477
1478impl MulAssign<f32> for Vec3A {
1479 #[inline]
1480 fn mul_assign(&mut self, rhs: f32) {
1481 self.0 = unsafe { vmulq_n_f32(self.0, rhs) };
1482 }
1483}
1484
1485impl MulAssign<&f32> for Vec3A {
1486 #[inline]
1487 fn mul_assign(&mut self, rhs: &f32) {
1488 self.mul_assign(*rhs);
1489 }
1490}
1491
1492impl Mul<Vec3A> for f32 {
1493 type Output = Vec3A;
1494 #[inline]
1495 fn mul(self, rhs: Vec3A) -> Vec3A {
1496 Vec3A(unsafe { vmulq_n_f32(rhs.0, self) })
1497 }
1498}
1499
1500impl Mul<&Vec3A> for f32 {
1501 type Output = Vec3A;
1502 #[inline]
1503 fn mul(self, rhs: &Vec3A) -> Vec3A {
1504 self.mul(*rhs)
1505 }
1506}
1507
1508impl Mul<&Vec3A> for &f32 {
1509 type Output = Vec3A;
1510 #[inline]
1511 fn mul(self, rhs: &Vec3A) -> Vec3A {
1512 (*self).mul(*rhs)
1513 }
1514}
1515
1516impl Mul<Vec3A> for &f32 {
1517 type Output = Vec3A;
1518 #[inline]
1519 fn mul(self, rhs: Vec3A) -> Vec3A {
1520 (*self).mul(rhs)
1521 }
1522}
1523
1524impl Add for Vec3A {
1525 type Output = Self;
1526 #[inline]
1527 fn add(self, rhs: Self) -> Self {
1528 Self(unsafe { vaddq_f32(self.0, rhs.0) })
1529 }
1530}
1531
1532impl Add<&Self> for Vec3A {
1533 type Output = Self;
1534 #[inline]
1535 fn add(self, rhs: &Self) -> Self {
1536 self.add(*rhs)
1537 }
1538}
1539
1540impl Add<&Vec3A> for &Vec3A {
1541 type Output = Vec3A;
1542 #[inline]
1543 fn add(self, rhs: &Vec3A) -> Vec3A {
1544 (*self).add(*rhs)
1545 }
1546}
1547
1548impl Add<Vec3A> for &Vec3A {
1549 type Output = Vec3A;
1550 #[inline]
1551 fn add(self, rhs: Vec3A) -> Vec3A {
1552 (*self).add(rhs)
1553 }
1554}
1555
1556impl AddAssign for Vec3A {
1557 #[inline]
1558 fn add_assign(&mut self, rhs: Self) {
1559 self.0 = unsafe { vaddq_f32(self.0, rhs.0) };
1560 }
1561}
1562
1563impl AddAssign<&Self> for Vec3A {
1564 #[inline]
1565 fn add_assign(&mut self, rhs: &Self) {
1566 self.add_assign(*rhs);
1567 }
1568}
1569
1570impl Add<f32> for Vec3A {
1571 type Output = Self;
1572 #[inline]
1573 fn add(self, rhs: f32) -> Self {
1574 Self(unsafe { vaddq_f32(self.0, vld1q_dup_f32(&rhs)) })
1575 }
1576}
1577
1578impl Add<&f32> for Vec3A {
1579 type Output = Self;
1580 #[inline]
1581 fn add(self, rhs: &f32) -> Self {
1582 self.add(*rhs)
1583 }
1584}
1585
1586impl Add<&f32> for &Vec3A {
1587 type Output = Vec3A;
1588 #[inline]
1589 fn add(self, rhs: &f32) -> Vec3A {
1590 (*self).add(*rhs)
1591 }
1592}
1593
1594impl Add<f32> for &Vec3A {
1595 type Output = Vec3A;
1596 #[inline]
1597 fn add(self, rhs: f32) -> Vec3A {
1598 (*self).add(rhs)
1599 }
1600}
1601
1602impl AddAssign<f32> for Vec3A {
1603 #[inline]
1604 fn add_assign(&mut self, rhs: f32) {
1605 self.0 = unsafe { vaddq_f32(self.0, vld1q_dup_f32(&rhs)) };
1606 }
1607}
1608
1609impl AddAssign<&f32> for Vec3A {
1610 #[inline]
1611 fn add_assign(&mut self, rhs: &f32) {
1612 self.add_assign(*rhs);
1613 }
1614}
1615
1616impl Add<Vec3A> for f32 {
1617 type Output = Vec3A;
1618 #[inline]
1619 fn add(self, rhs: Vec3A) -> Vec3A {
1620 Vec3A(unsafe { vaddq_f32(vld1q_dup_f32(&self), rhs.0) })
1621 }
1622}
1623
1624impl Add<&Vec3A> for f32 {
1625 type Output = Vec3A;
1626 #[inline]
1627 fn add(self, rhs: &Vec3A) -> Vec3A {
1628 self.add(*rhs)
1629 }
1630}
1631
1632impl Add<&Vec3A> for &f32 {
1633 type Output = Vec3A;
1634 #[inline]
1635 fn add(self, rhs: &Vec3A) -> Vec3A {
1636 (*self).add(*rhs)
1637 }
1638}
1639
1640impl Add<Vec3A> for &f32 {
1641 type Output = Vec3A;
1642 #[inline]
1643 fn add(self, rhs: Vec3A) -> Vec3A {
1644 (*self).add(rhs)
1645 }
1646}
1647
1648impl Sub for Vec3A {
1649 type Output = Self;
1650 #[inline]
1651 fn sub(self, rhs: Self) -> Self {
1652 Self(unsafe { vsubq_f32(self.0, rhs.0) })
1653 }
1654}
1655
1656impl Sub<&Self> for Vec3A {
1657 type Output = Self;
1658 #[inline]
1659 fn sub(self, rhs: &Self) -> Self {
1660 self.sub(*rhs)
1661 }
1662}
1663
1664impl Sub<&Vec3A> for &Vec3A {
1665 type Output = Vec3A;
1666 #[inline]
1667 fn sub(self, rhs: &Vec3A) -> Vec3A {
1668 (*self).sub(*rhs)
1669 }
1670}
1671
1672impl Sub<Vec3A> for &Vec3A {
1673 type Output = Vec3A;
1674 #[inline]
1675 fn sub(self, rhs: Vec3A) -> Vec3A {
1676 (*self).sub(rhs)
1677 }
1678}
1679
1680impl SubAssign for Vec3A {
1681 #[inline]
1682 fn sub_assign(&mut self, rhs: Self) {
1683 self.0 = unsafe { vsubq_f32(self.0, rhs.0) };
1684 }
1685}
1686
1687impl SubAssign<&Self> for Vec3A {
1688 #[inline]
1689 fn sub_assign(&mut self, rhs: &Self) {
1690 self.sub_assign(*rhs);
1691 }
1692}
1693
1694impl Sub<f32> for Vec3A {
1695 type Output = Self;
1696 #[inline]
1697 fn sub(self, rhs: f32) -> Self {
1698 Self(unsafe { vsubq_f32(self.0, vld1q_dup_f32(&rhs)) })
1699 }
1700}
1701
1702impl Sub<&f32> for Vec3A {
1703 type Output = Self;
1704 #[inline]
1705 fn sub(self, rhs: &f32) -> Self {
1706 self.sub(*rhs)
1707 }
1708}
1709
1710impl Sub<&f32> for &Vec3A {
1711 type Output = Vec3A;
1712 #[inline]
1713 fn sub(self, rhs: &f32) -> Vec3A {
1714 (*self).sub(*rhs)
1715 }
1716}
1717
1718impl Sub<f32> for &Vec3A {
1719 type Output = Vec3A;
1720 #[inline]
1721 fn sub(self, rhs: f32) -> Vec3A {
1722 (*self).sub(rhs)
1723 }
1724}
1725
1726impl SubAssign<f32> for Vec3A {
1727 #[inline]
1728 fn sub_assign(&mut self, rhs: f32) {
1729 self.0 = unsafe { vsubq_f32(self.0, vld1q_dup_f32(&rhs)) };
1730 }
1731}
1732
1733impl SubAssign<&f32> for Vec3A {
1734 #[inline]
1735 fn sub_assign(&mut self, rhs: &f32) {
1736 self.sub_assign(*rhs);
1737 }
1738}
1739
1740impl Sub<Vec3A> for f32 {
1741 type Output = Vec3A;
1742 #[inline]
1743 fn sub(self, rhs: Vec3A) -> Vec3A {
1744 Vec3A(unsafe { vsubq_f32(vld1q_dup_f32(&self), rhs.0) })
1745 }
1746}
1747
1748impl Sub<&Vec3A> for f32 {
1749 type Output = Vec3A;
1750 #[inline]
1751 fn sub(self, rhs: &Vec3A) -> Vec3A {
1752 self.sub(*rhs)
1753 }
1754}
1755
1756impl Sub<&Vec3A> for &f32 {
1757 type Output = Vec3A;
1758 #[inline]
1759 fn sub(self, rhs: &Vec3A) -> Vec3A {
1760 (*self).sub(*rhs)
1761 }
1762}
1763
1764impl Sub<Vec3A> for &f32 {
1765 type Output = Vec3A;
1766 #[inline]
1767 fn sub(self, rhs: Vec3A) -> Vec3A {
1768 (*self).sub(rhs)
1769 }
1770}
1771
1772impl Rem for Vec3A {
1773 type Output = Self;
1774 #[inline]
1775 fn rem(self, rhs: Self) -> Self {
1776 unsafe {
1777 let n = vrndmq_f32(vdivq_f32(self.0, rhs.0));
1778 Self(vsubq_f32(self.0, vmulq_f32(n, rhs.0)))
1779 }
1780 }
1781}
1782
1783impl Rem<&Self> for Vec3A {
1784 type Output = Self;
1785 #[inline]
1786 fn rem(self, rhs: &Self) -> Self {
1787 self.rem(*rhs)
1788 }
1789}
1790
1791impl Rem<&Vec3A> for &Vec3A {
1792 type Output = Vec3A;
1793 #[inline]
1794 fn rem(self, rhs: &Vec3A) -> Vec3A {
1795 (*self).rem(*rhs)
1796 }
1797}
1798
1799impl Rem<Vec3A> for &Vec3A {
1800 type Output = Vec3A;
1801 #[inline]
1802 fn rem(self, rhs: Vec3A) -> Vec3A {
1803 (*self).rem(rhs)
1804 }
1805}
1806
1807impl RemAssign for Vec3A {
1808 #[inline]
1809 fn rem_assign(&mut self, rhs: Self) {
1810 *self = self.rem(rhs);
1811 }
1812}
1813
1814impl RemAssign<&Self> for Vec3A {
1815 #[inline]
1816 fn rem_assign(&mut self, rhs: &Self) {
1817 self.rem_assign(*rhs);
1818 }
1819}
1820
1821impl Rem<f32> for Vec3A {
1822 type Output = Self;
1823 #[inline]
1824 fn rem(self, rhs: f32) -> Self {
1825 self.rem(Self::splat(rhs))
1826 }
1827}
1828
1829impl Rem<&f32> for Vec3A {
1830 type Output = Self;
1831 #[inline]
1832 fn rem(self, rhs: &f32) -> Self {
1833 self.rem(*rhs)
1834 }
1835}
1836
1837impl Rem<&f32> for &Vec3A {
1838 type Output = Vec3A;
1839 #[inline]
1840 fn rem(self, rhs: &f32) -> Vec3A {
1841 (*self).rem(*rhs)
1842 }
1843}
1844
1845impl Rem<f32> for &Vec3A {
1846 type Output = Vec3A;
1847 #[inline]
1848 fn rem(self, rhs: f32) -> Vec3A {
1849 (*self).rem(rhs)
1850 }
1851}
1852
1853impl RemAssign<f32> for Vec3A {
1854 #[inline]
1855 fn rem_assign(&mut self, rhs: f32) {
1856 *self = self.rem(Self::splat(rhs));
1857 }
1858}
1859
1860impl RemAssign<&f32> for Vec3A {
1861 #[inline]
1862 fn rem_assign(&mut self, rhs: &f32) {
1863 self.rem_assign(*rhs);
1864 }
1865}
1866
1867impl Rem<Vec3A> for f32 {
1868 type Output = Vec3A;
1869 #[inline]
1870 fn rem(self, rhs: Vec3A) -> Vec3A {
1871 Vec3A::splat(self).rem(rhs)
1872 }
1873}
1874
1875impl Rem<&Vec3A> for f32 {
1876 type Output = Vec3A;
1877 #[inline]
1878 fn rem(self, rhs: &Vec3A) -> Vec3A {
1879 self.rem(*rhs)
1880 }
1881}
1882
1883impl Rem<&Vec3A> for &f32 {
1884 type Output = Vec3A;
1885 #[inline]
1886 fn rem(self, rhs: &Vec3A) -> Vec3A {
1887 (*self).rem(*rhs)
1888 }
1889}
1890
1891impl Rem<Vec3A> for &f32 {
1892 type Output = Vec3A;
1893 #[inline]
1894 fn rem(self, rhs: Vec3A) -> Vec3A {
1895 (*self).rem(rhs)
1896 }
1897}
1898
1899impl AsRef<[f32; 3]> for Vec3A {
1900 #[inline]
1901 fn as_ref(&self) -> &[f32; 3] {
1902 unsafe { &*(self as *const Self as *const [f32; 3]) }
1903 }
1904}
1905
1906impl AsMut<[f32; 3]> for Vec3A {
1907 #[inline]
1908 fn as_mut(&mut self) -> &mut [f32; 3] {
1909 unsafe { &mut *(self as *mut Self as *mut [f32; 3]) }
1910 }
1911}
1912
1913impl Sum for Vec3A {
1914 #[inline]
1915 fn sum<I>(iter: I) -> Self
1916 where
1917 I: Iterator<Item = Self>,
1918 {
1919 iter.fold(Self::ZERO, Self::add)
1920 }
1921}
1922
1923impl<'a> Sum<&'a Self> for Vec3A {
1924 #[inline]
1925 fn sum<I>(iter: I) -> Self
1926 where
1927 I: Iterator<Item = &'a Self>,
1928 {
1929 iter.fold(Self::ZERO, |a, &b| Self::add(a, b))
1930 }
1931}
1932
1933impl Product for Vec3A {
1934 #[inline]
1935 fn product<I>(iter: I) -> Self
1936 where
1937 I: Iterator<Item = Self>,
1938 {
1939 iter.fold(Self::ONE, Self::mul)
1940 }
1941}
1942
1943impl<'a> Product<&'a Self> for Vec3A {
1944 #[inline]
1945 fn product<I>(iter: I) -> Self
1946 where
1947 I: Iterator<Item = &'a Self>,
1948 {
1949 iter.fold(Self::ONE, |a, &b| Self::mul(a, b))
1950 }
1951}
1952
1953impl Neg for Vec3A {
1954 type Output = Self;
1955 #[inline]
1956 fn neg(self) -> Self {
1957 Self(unsafe { vnegq_f32(self.0) })
1958 }
1959}
1960
1961impl Neg for &Vec3A {
1962 type Output = Vec3A;
1963 #[inline]
1964 fn neg(self) -> Vec3A {
1965 (*self).neg()
1966 }
1967}
1968
1969impl Index<usize> for Vec3A {
1970 type Output = f32;
1971 #[inline]
1972 fn index(&self, index: usize) -> &Self::Output {
1973 match index {
1974 0 => &self.x,
1975 1 => &self.y,
1976 2 => &self.z,
1977 _ => panic!("index out of bounds"),
1978 }
1979 }
1980}
1981
1982impl IndexMut<usize> for Vec3A {
1983 #[inline]
1984 fn index_mut(&mut self, index: usize) -> &mut Self::Output {
1985 match index {
1986 0 => &mut self.x,
1987 1 => &mut self.y,
1988 2 => &mut self.z,
1989 _ => panic!("index out of bounds"),
1990 }
1991 }
1992}
1993
1994impl fmt::Display for Vec3A {
1995 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1996 if let Some(p) = f.precision() {
1997 write!(f, "[{:.*}, {:.*}, {:.*}]", p, self.x, p, self.y, p, self.z)
1998 } else {
1999 write!(f, "[{}, {}, {}]", self.x, self.y, self.z)
2000 }
2001 }
2002}
2003
2004impl fmt::Debug for Vec3A {
2005 fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
2006 fmt.debug_tuple(stringify!(Vec3A))
2007 .field(&self.x)
2008 .field(&self.y)
2009 .field(&self.z)
2010 .finish()
2011 }
2012}
2013
2014impl From<Vec3A> for float32x4_t {
2015 #[inline(always)]
2016 fn from(t: Vec3A) -> Self {
2017 t.0
2018 }
2019}
2020
2021impl From<float32x4_t> for Vec3A {
2022 #[inline(always)]
2023 fn from(t: float32x4_t) -> Self {
2024 Self(t)
2025 }
2026}
2027
2028impl From<[f32; 3]> for Vec3A {
2029 #[inline]
2030 fn from(a: [f32; 3]) -> Self {
2031 Self::new(a[0], a[1], a[2])
2032 }
2033}
2034
2035impl From<Vec3A> for [f32; 3] {
2036 #[inline]
2037 fn from(v: Vec3A) -> Self {
2038 use crate::align16::Align16;
2039 use core::mem::MaybeUninit;
2040 let mut out: MaybeUninit<Align16<Self>> = MaybeUninit::uninit();
2041 unsafe {
2042 vst1q_f32(out.as_mut_ptr().cast(), v.0);
2043 out.assume_init().0
2044 }
2045 }
2046}
2047
2048impl From<(f32, f32, f32)> for Vec3A {
2049 #[inline]
2050 fn from(t: (f32, f32, f32)) -> Self {
2051 Self::new(t.0, t.1, t.2)
2052 }
2053}
2054
2055impl From<Vec3A> for (f32, f32, f32) {
2056 #[inline]
2057 fn from(v: Vec3A) -> Self {
2058 (v.x, v.y, v.z)
2059 }
2060}
2061
2062impl From<Vec3> for Vec3A {
2063 #[inline]
2064 fn from(v: Vec3) -> Self {
2065 Self::new(v.x, v.y, v.z)
2066 }
2067}
2068
2069impl From<Vec3A> for Vec3 {
2070 #[inline]
2071 fn from(v: Vec3A) -> Self {
2072 use crate::align16::Align16;
2073 use core::mem::MaybeUninit;
2074 let mut out: MaybeUninit<Align16<Self>> = MaybeUninit::uninit();
2075 unsafe {
2076 vst1q_f32(out.as_mut_ptr().cast(), v.0);
2077 out.assume_init().0
2078 }
2079 }
2080}
2081
2082impl From<(Vec2, f32)> for Vec3A {
2083 #[inline]
2084 fn from((v, z): (Vec2, f32)) -> Self {
2085 Self::new(v.x, v.y, z)
2086 }
2087}
2088
2089impl Deref for Vec3A {
2090 type Target = crate::deref::Vec3<f32>;
2091 #[inline]
2092 fn deref(&self) -> &Self::Target {
2093 unsafe { &*(self as *const Self).cast() }
2094 }
2095}
2096
2097impl DerefMut for Vec3A {
2098 #[inline]
2099 fn deref_mut(&mut self) -> &mut Self::Target {
2100 unsafe { &mut *(self as *mut Self).cast() }
2101 }
2102}
2103
2104impl From<BVec3> for Vec3A {
2105 #[inline]
2106 fn from(v: BVec3) -> Self {
2107 Self::new(f32::from(v.x), f32::from(v.y), f32::from(v.z))
2108 }
2109}
2110
2111impl From<BVec3A> for Vec3A {
2112 #[inline]
2113 fn from(v: BVec3A) -> Self {
2114 let bool_array: [bool; 3] = v.into();
2115 Self::new(
2116 f32::from(bool_array[0]),
2117 f32::from(bool_array[1]),
2118 f32::from(bool_array[2]),
2119 )
2120 }
2121}