1use crate::{f32::math, neon::*, BVec3, BVec3A, FloatExt, Quat, Vec2, Vec3, Vec4};
4
5use core::fmt;
6use core::iter::{Product, Sum};
7use core::{f32, ops::*};
8
9use core::arch::aarch64::*;
10
11#[cfg(feature = "zerocopy")]
12use zerocopy_derive::*;
13
14#[repr(C)]
15union UnionCast {
16 a: [f32; 4],
17 v: Vec3A,
18}
19
20#[inline(always)]
22#[must_use]
23pub const fn vec3a(x: f32, y: f32, z: f32) -> Vec3A {
24 Vec3A::new(x, y, z)
25}
26
27#[derive(Clone, Copy)]
37#[cfg_attr(feature = "bytemuck", derive(bytemuck::Pod, bytemuck::Zeroable))]
38#[cfg_attr(
39 feature = "zerocopy",
40 derive(FromBytes, Immutable, IntoBytes, KnownLayout)
41)]
42#[repr(transparent)]
43pub struct Vec3A(pub(crate) float32x4_t);
44
45impl Vec3A {
46 pub const ZERO: Self = Self::splat(0.0);
48
49 pub const ONE: Self = Self::splat(1.0);
51
52 pub const NEG_ONE: Self = Self::splat(-1.0);
54
55 pub const MIN: Self = Self::splat(f32::MIN);
57
58 pub const MAX: Self = Self::splat(f32::MAX);
60
61 pub const NAN: Self = Self::splat(f32::NAN);
63
64 pub const INFINITY: Self = Self::splat(f32::INFINITY);
66
67 pub const NEG_INFINITY: Self = Self::splat(f32::NEG_INFINITY);
69
70 pub const X: Self = Self::new(1.0, 0.0, 0.0);
72
73 pub const Y: Self = Self::new(0.0, 1.0, 0.0);
75
76 pub const Z: Self = Self::new(0.0, 0.0, 1.0);
78
79 pub const NEG_X: Self = Self::new(-1.0, 0.0, 0.0);
81
82 pub const NEG_Y: Self = Self::new(0.0, -1.0, 0.0);
84
85 pub const NEG_Z: Self = Self::new(0.0, 0.0, -1.0);
87
88 pub const AXES: [Self; 3] = [Self::X, Self::Y, Self::Z];
90
91 pub const USES_CORE_SIMD: bool = false;
93 pub const USES_NEON: bool = true;
95 pub const USES_SCALAR_MATH: bool = false;
97 pub const USES_SSE2: bool = false;
99 pub const USES_WASM32_SIMD: bool = false;
101
102 #[inline(always)]
104 #[must_use]
105 pub const fn new(x: f32, y: f32, z: f32) -> Self {
106 unsafe { UnionCast { a: [x, y, z, z] }.v }
107 }
108
109 #[inline]
111 #[must_use]
112 pub const fn splat(v: f32) -> Self {
113 unsafe { UnionCast { a: [v; 4] }.v }
114 }
115
116 #[inline]
118 #[must_use]
119 pub fn map<F>(self, f: F) -> Self
120 where
121 F: Fn(f32) -> f32,
122 {
123 Self::new(f(self.x), f(self.y), f(self.z))
124 }
125
126 #[inline]
132 #[must_use]
133 pub fn select(mask: BVec3A, if_true: Self, if_false: Self) -> Self {
134 Self(unsafe { vbslq_f32(mask.0, if_true.0, if_false.0) })
135 }
136
137 #[inline]
139 #[must_use]
140 pub const fn from_array(a: [f32; 3]) -> Self {
141 Self::new(a[0], a[1], a[2])
142 }
143
144 #[inline]
146 #[must_use]
147 pub const fn to_array(&self) -> [f32; 3] {
148 unsafe { *(self as *const Self as *const [f32; 3]) }
149 }
150
151 #[inline]
157 #[must_use]
158 pub const fn from_slice(slice: &[f32]) -> Self {
159 assert!(slice.len() >= 3);
160 Self::new(slice[0], slice[1], slice[2])
161 }
162
163 #[inline]
169 pub fn write_to_slice(self, slice: &mut [f32]) {
170 slice[..3].copy_from_slice(&self.to_array());
171 }
172
173 #[inline]
177 #[must_use]
178 pub fn from_vec4(v: Vec4) -> Self {
179 Self(v.0)
180 }
181
182 #[inline]
184 #[must_use]
185 pub fn extend(self, w: f32) -> Vec4 {
186 Vec4::new(self.x, self.y, self.z, w)
187 }
188
189 #[inline]
193 #[must_use]
194 pub fn truncate(self) -> Vec2 {
195 use crate::swizzles::Vec3Swizzles;
196 self.xy()
197 }
198
199 #[inline]
205 #[must_use]
206 pub fn from_homogeneous(v: Vec4) -> Self {
207 glam_assert!(v.w != 0.0);
208 Self::from_vec4(v) / v.w
209 }
210
211 #[inline]
213 #[must_use]
214 pub fn to_homogeneous(self) -> Vec4 {
215 self.extend(1.0)
216 }
217
218 #[inline]
220 #[must_use]
221 pub fn to_vec3(self) -> Vec3 {
222 Vec3::from(self)
223 }
224
225 #[inline]
227 #[must_use]
228 pub fn with_x(mut self, x: f32) -> Self {
229 self.x = x;
230 self
231 }
232
233 #[inline]
235 #[must_use]
236 pub fn with_y(mut self, y: f32) -> Self {
237 self.y = y;
238 self
239 }
240
241 #[inline]
243 #[must_use]
244 pub fn with_z(mut self, z: f32) -> Self {
245 self.z = z;
246 self
247 }
248
249 #[inline]
251 #[must_use]
252 pub fn dot(self, rhs: Self) -> f32 {
253 (self.x * rhs.x) + (self.y * rhs.y) + (self.z * rhs.z)
255 }
256
257 #[inline]
259 #[must_use]
260 pub fn dot_into_vec(self, rhs: Self) -> Self {
261 Self(unsafe { dot3_into_f32x4(self.0, rhs.0) })
262 }
263
264 #[inline]
266 #[must_use]
267 pub fn cross(self, rhs: Self) -> Self {
268 unsafe {
269 let lhs = self.0;
271 let rhs = rhs.0;
272 let lhs_yzwx = vextq_f32(lhs, lhs, 1);
274 let rhs_wxyz = vextq_f32(rhs, rhs, 3);
275
276 let lhs_yzx = vsetq_lane_f32(vgetq_lane_f32(lhs, 0), lhs_yzwx, 2);
277 let rhs_zxy = vsetq_lane_f32(vgetq_lane_f32(rhs, 2), rhs_wxyz, 0);
278
279 let part_a = vmulq_f32(lhs_yzx, rhs_zxy);
281
282 let lhs_wxyz = vextq_f32(lhs, lhs, 3);
283 let rhs_yzwx = vextq_f32(rhs, rhs, 1);
284 let lhs_zxy = vsetq_lane_f32(vgetq_lane_f32(lhs, 2), lhs_wxyz, 0);
285 let rhs_yzx = vsetq_lane_f32(vgetq_lane_f32(rhs, 0), rhs_yzwx, 2);
286
287 let result = vmlsq_f32(part_a, lhs_zxy, rhs_yzx);
289 Self(result)
290 }
291 }
292
293 #[inline]
300 #[must_use]
301 pub fn min(self, rhs: Self) -> Self {
302 Self(unsafe { vminq_f32(self.0, rhs.0) })
303 }
304
305 #[inline]
312 #[must_use]
313 pub fn max(self, rhs: Self) -> Self {
314 Self(unsafe { vmaxq_f32(self.0, rhs.0) })
315 }
316
317 #[inline]
328 #[must_use]
329 pub fn clamp(self, min: Self, max: Self) -> Self {
330 glam_assert!(min.cmple(max).all(), "clamp: expected min <= max");
331 self.max(min).min(max)
332 }
333
334 #[inline]
341 #[must_use]
342 pub fn min_element(self) -> f32 {
343 self.x.min(self.y.min(self.z))
344 }
345
346 #[inline]
353 #[must_use]
354 pub fn max_element(self) -> f32 {
355 self.x.max(self.y.max(self.z))
356 }
357
358 #[doc(alias = "argmin")]
360 #[inline]
361 #[must_use]
362 pub fn min_position(self) -> usize {
363 let mut min = self.x;
364 let mut index = 0;
365 if self.y < min {
366 min = self.y;
367 index = 1;
368 }
369 if self.z < min {
370 index = 2;
371 }
372 index
373 }
374
375 #[doc(alias = "argmax")]
377 #[inline]
378 #[must_use]
379 pub fn max_position(self) -> usize {
380 let mut max = self.x;
381 let mut index = 0;
382 if self.y > max {
383 max = self.y;
384 index = 1;
385 }
386 if self.z > max {
387 index = 2;
388 }
389 index
390 }
391
392 #[inline]
396 #[must_use]
397 pub fn element_sum(self) -> f32 {
398 unsafe { vaddvq_f32(vsetq_lane_f32(0.0, self.0, 3)) }
399 }
400
401 #[inline]
405 #[must_use]
406 pub fn element_product(self) -> f32 {
407 unsafe {
408 let s = vmuls_laneq_f32(vgetq_lane_f32(self.0, 0), self.0, 1);
409 vmuls_laneq_f32(s, self.0, 2)
410 }
411 }
412
413 #[inline]
419 #[must_use]
420 pub fn cmpeq(self, rhs: Self) -> BVec3A {
421 BVec3A(unsafe { vceqq_f32(self.0, rhs.0) })
422 }
423
424 #[inline]
430 #[must_use]
431 pub fn cmpne(self, rhs: Self) -> BVec3A {
432 BVec3A(unsafe { vmvnq_u32(vceqq_f32(self.0, rhs.0)) })
433 }
434
435 #[inline]
441 #[must_use]
442 pub fn cmpge(self, rhs: Self) -> BVec3A {
443 BVec3A(unsafe { vcgeq_f32(self.0, rhs.0) })
444 }
445
446 #[inline]
452 #[must_use]
453 pub fn cmpgt(self, rhs: Self) -> BVec3A {
454 BVec3A(unsafe { vcgtq_f32(self.0, rhs.0) })
455 }
456
457 #[inline]
463 #[must_use]
464 pub fn cmple(self, rhs: Self) -> BVec3A {
465 BVec3A(unsafe { vcleq_f32(self.0, rhs.0) })
466 }
467
468 #[inline]
474 #[must_use]
475 pub fn cmplt(self, rhs: Self) -> BVec3A {
476 BVec3A(unsafe { vcltq_f32(self.0, rhs.0) })
477 }
478
479 #[inline]
481 #[must_use]
482 pub fn abs(self) -> Self {
483 Self(unsafe { vabsq_f32(self.0) })
484 }
485
486 #[inline]
492 #[must_use]
493 pub fn signum(self) -> Self {
494 let result = Self(unsafe {
495 vreinterpretq_f32_u32(vorrq_u32(
496 vandq_u32(
497 vreinterpretq_u32_f32(self.0),
498 vreinterpretq_u32_f32(Self::NEG_ONE.0),
499 ),
500 vreinterpretq_u32_f32(Self::ONE.0),
501 ))
502 });
503 let mask = self.is_nan_mask();
504 Self::select(mask, self, result)
505 }
506
507 #[inline]
509 #[must_use]
510 pub fn copysign(self, rhs: Self) -> Self {
511 let mask = Self::splat(-0.0);
512 Self(unsafe {
513 vreinterpretq_f32_u32(vorrq_u32(
514 vandq_u32(vreinterpretq_u32_f32(rhs.0), vreinterpretq_u32_f32(mask.0)),
515 vandq_u32(
516 vreinterpretq_u32_f32(self.0),
517 vmvnq_u32(vreinterpretq_u32_f32(mask.0)),
518 ),
519 ))
520 })
521 }
522
523 #[inline]
531 #[must_use]
532 pub fn is_negative_bitmask(self) -> u32 {
533 unsafe {
534 let nmask = vreinterpretq_u32_f32(vdupq_n_f32(-0.0));
535 let m = vandq_u32(vreinterpretq_u32_f32(self.0), nmask);
536 let x = vgetq_lane_u32(m, 0) >> 31;
537 let y = vgetq_lane_u32(m, 1) >> 31;
538 let z = vgetq_lane_u32(m, 2) >> 31;
539
540 x | y << 1 | z << 2
541 }
542 }
543
544 #[inline]
547 #[must_use]
548 pub fn is_finite(self) -> bool {
549 self.is_finite_mask().all()
550 }
551
552 #[inline]
556 #[must_use]
557 pub fn is_finite_mask(self) -> BVec3A {
558 BVec3A(unsafe { vcltq_f32(vabsq_f32(self.0), Self::INFINITY.0) })
559 }
560
561 #[inline]
563 #[must_use]
564 pub fn is_nan(self) -> bool {
565 self.is_nan_mask().any()
566 }
567
568 #[inline]
572 #[must_use]
573 pub fn is_nan_mask(self) -> BVec3A {
574 BVec3A(unsafe { vmvnq_u32(vceqq_f32(self.0, self.0)) })
575 }
576
577 #[doc(alias = "magnitude")]
579 #[inline]
580 #[must_use]
581 pub fn length(self) -> f32 {
582 math::sqrt(self.dot(self))
583 }
584
585 #[doc(alias = "magnitude2")]
589 #[inline]
590 #[must_use]
591 pub fn length_squared(self) -> f32 {
592 self.dot(self)
593 }
594
595 #[inline]
599 #[must_use]
600 pub fn length_recip(self) -> f32 {
601 self.length().recip()
602 }
603
604 #[inline]
606 #[must_use]
607 pub fn distance(self, rhs: Self) -> f32 {
608 (self - rhs).length()
609 }
610
611 #[inline]
613 #[must_use]
614 pub fn distance_squared(self, rhs: Self) -> f32 {
615 (self - rhs).length_squared()
616 }
617
618 #[inline]
620 #[must_use]
621 pub fn div_euclid(self, rhs: Self) -> Self {
622 Self::new(
623 math::div_euclid(self.x, rhs.x),
624 math::div_euclid(self.y, rhs.y),
625 math::div_euclid(self.z, rhs.z),
626 )
627 }
628
629 #[inline]
633 #[must_use]
634 pub fn rem_euclid(self, rhs: Self) -> Self {
635 Self::new(
636 math::rem_euclid(self.x, rhs.x),
637 math::rem_euclid(self.y, rhs.y),
638 math::rem_euclid(self.z, rhs.z),
639 )
640 }
641
642 #[inline]
652 #[must_use]
653 pub fn normalize(self) -> Self {
654 #[allow(clippy::let_and_return)]
655 let normalized = self.mul(self.length_recip());
656 glam_assert!(normalized.is_finite());
657 normalized
658 }
659
660 #[inline]
667 #[must_use]
668 pub fn try_normalize(self) -> Option<Self> {
669 let rcp = self.length_recip();
670 if rcp.is_finite() && rcp > 0.0 {
671 Some(self * rcp)
672 } else {
673 None
674 }
675 }
676
677 #[inline]
685 #[must_use]
686 pub fn normalize_or(self, fallback: Self) -> Self {
687 let rcp = self.length_recip();
688 if rcp.is_finite() && rcp > 0.0 {
689 self * rcp
690 } else {
691 fallback
692 }
693 }
694
695 #[inline]
702 #[must_use]
703 pub fn normalize_or_zero(self) -> Self {
704 self.normalize_or(Self::ZERO)
705 }
706
707 #[inline]
711 #[must_use]
712 pub fn normalize_and_length(self) -> (Self, f32) {
713 let length = self.length();
714 let rcp = 1.0 / length;
715 if rcp.is_finite() && rcp > 0.0 {
716 (self * rcp, length)
717 } else {
718 (Self::X, 0.0)
719 }
720 }
721
722 #[inline]
726 #[must_use]
727 pub fn is_normalized(self) -> bool {
728 math::abs(self.length_squared() - 1.0) <= 2e-4
729 }
730
731 #[inline]
739 #[must_use]
740 pub fn project_onto(self, rhs: Self) -> Self {
741 let other_len_sq_rcp = rhs.dot(rhs).recip();
742 glam_assert!(other_len_sq_rcp.is_finite());
743 rhs * self.dot(rhs) * other_len_sq_rcp
744 }
745
746 #[doc(alias("plane"))]
757 #[inline]
758 #[must_use]
759 pub fn reject_from(self, rhs: Self) -> Self {
760 self - self.project_onto(rhs)
761 }
762
763 #[inline]
771 #[must_use]
772 pub fn project_onto_normalized(self, rhs: Self) -> Self {
773 glam_assert!(rhs.is_normalized());
774 rhs * self.dot(rhs)
775 }
776
777 #[doc(alias("plane"))]
788 #[inline]
789 #[must_use]
790 pub fn reject_from_normalized(self, rhs: Self) -> Self {
791 self - self.project_onto_normalized(rhs)
792 }
793
794 #[inline]
797 #[must_use]
798 pub fn round(self) -> Self {
799 Self(unsafe { vrndnq_f32(self.0) })
800 }
801
802 #[inline]
805 #[must_use]
806 pub fn floor(self) -> Self {
807 Self(unsafe { vrndmq_f32(self.0) })
808 }
809
810 #[inline]
813 #[must_use]
814 pub fn ceil(self) -> Self {
815 Self(unsafe { vrndpq_f32(self.0) })
816 }
817
818 #[inline]
821 #[must_use]
822 pub fn trunc(self) -> Self {
823 Self(unsafe { vrndq_f32(self.0) })
824 }
825
826 #[inline]
830 #[must_use]
831 pub fn step(self, rhs: Self) -> Self {
832 Self::select(rhs.cmplt(self), Self::ZERO, Self::ONE)
833 }
834
835 #[inline]
837 #[must_use]
838 pub fn saturate(self) -> Self {
839 self.clamp(Self::ZERO, Self::ONE)
840 }
841
842 #[inline]
849 #[must_use]
850 pub fn fract(self) -> Self {
851 self - self.trunc()
852 }
853
854 #[inline]
861 #[must_use]
862 pub fn fract_gl(self) -> Self {
863 self - self.floor()
864 }
865
866 #[inline]
869 #[must_use]
870 pub fn exp(self) -> Self {
871 Self::new(math::exp(self.x), math::exp(self.y), math::exp(self.z))
872 }
873
874 #[inline]
876 #[must_use]
877 pub fn exp2(self) -> Self {
878 Self::new(math::exp2(self.x), math::exp2(self.y), math::exp2(self.z))
879 }
880
881 #[inline]
884 #[must_use]
885 pub fn ln(self) -> Self {
886 Self::new(math::ln(self.x), math::ln(self.y), math::ln(self.z))
887 }
888
889 #[inline]
892 #[must_use]
893 pub fn log2(self) -> Self {
894 Self::new(math::log2(self.x), math::log2(self.y), math::log2(self.z))
895 }
896
897 #[inline]
899 #[must_use]
900 pub fn powf(self, n: f32) -> Self {
901 Self::new(
902 math::powf(self.x, n),
903 math::powf(self.y, n),
904 math::powf(self.z, n),
905 )
906 }
907
908 #[inline]
911 #[must_use]
912 pub fn sqrt(self) -> Self {
913 Self::new(math::sqrt(self.x), math::sqrt(self.y), math::sqrt(self.z))
914 }
915
916 #[inline]
918 #[must_use]
919 pub fn cos(self) -> Self {
920 Self::new(math::cos(self.x), math::cos(self.y), math::cos(self.z))
921 }
922
923 #[inline]
925 #[must_use]
926 pub fn sin(self) -> Self {
927 Self::new(math::sin(self.x), math::sin(self.y), math::sin(self.z))
928 }
929
930 #[inline]
932 #[must_use]
933 pub fn sin_cos(self) -> (Self, Self) {
934 let (sin_x, cos_x) = math::sin_cos(self.x);
935 let (sin_y, cos_y) = math::sin_cos(self.y);
936 let (sin_z, cos_z) = math::sin_cos(self.z);
937
938 (
939 Self::new(sin_x, sin_y, sin_z),
940 Self::new(cos_x, cos_y, cos_z),
941 )
942 }
943
944 #[inline]
946 #[must_use]
947 pub fn recip(self) -> Self {
948 Self(unsafe { vdivq_f32(Self::ONE.0, self.0) })
949 }
950
951 #[doc(alias = "mix")]
957 #[inline]
958 #[must_use]
959 pub fn lerp(self, rhs: Self, s: f32) -> Self {
960 self * (1.0 - s) + rhs * s
961 }
962
963 #[inline]
968 #[must_use]
969 pub fn move_towards(self, rhs: Self, d: f32) -> Self {
970 let a = rhs - self;
971 let len = a.length();
972 if len <= d || len <= 1e-4 {
973 return rhs;
974 }
975 self + a / len * d
976 }
977
978 #[inline]
984 pub fn midpoint(self, rhs: Self) -> Self {
985 (self + rhs) * 0.5
986 }
987
988 #[inline]
998 #[must_use]
999 pub fn abs_diff_eq(self, rhs: Self, max_abs_diff: f32) -> bool {
1000 self.sub(rhs).abs().cmple(Self::splat(max_abs_diff)).all()
1001 }
1002
1003 #[inline]
1009 #[must_use]
1010 pub fn clamp_length(self, min: f32, max: f32) -> Self {
1011 glam_assert!(0.0 <= min);
1012 glam_assert!(min <= max);
1013 let length_sq = self.length_squared();
1014 if length_sq < min * min {
1015 min * (self / math::sqrt(length_sq))
1016 } else if length_sq > max * max {
1017 max * (self / math::sqrt(length_sq))
1018 } else {
1019 self
1020 }
1021 }
1022
1023 #[inline]
1029 #[must_use]
1030 pub fn clamp_length_max(self, max: f32) -> Self {
1031 glam_assert!(0.0 <= max);
1032 let length_sq = self.length_squared();
1033 if length_sq > max * max {
1034 max * (self / math::sqrt(length_sq))
1035 } else {
1036 self
1037 }
1038 }
1039
1040 #[inline]
1046 #[must_use]
1047 pub fn clamp_length_min(self, min: f32) -> Self {
1048 glam_assert!(0.0 <= min);
1049 let length_sq = self.length_squared();
1050 if length_sq < min * min {
1051 min * (self / math::sqrt(length_sq))
1052 } else {
1053 self
1054 }
1055 }
1056
1057 #[inline]
1065 #[must_use]
1066 pub fn mul_add(self, a: Self, b: Self) -> Self {
1067 Self(unsafe { vfmaq_f32(b.0, self.0, a.0) })
1068 }
1069
1070 #[inline]
1079 #[must_use]
1080 pub fn reflect(self, normal: Self) -> Self {
1081 glam_assert!(normal.is_normalized());
1082 self - 2.0 * self.dot(normal) * normal
1083 }
1084
1085 #[inline]
1095 #[must_use]
1096 pub fn refract(self, normal: Self, eta: f32) -> Self {
1097 glam_assert!(self.is_normalized());
1098 glam_assert!(normal.is_normalized());
1099 let n_dot_i = normal.dot(self);
1100 let k = 1.0 - eta * eta * (1.0 - n_dot_i * n_dot_i);
1101 if k >= 0.0 {
1102 eta * self - (eta * n_dot_i + math::sqrt(k)) * normal
1103 } else {
1104 Self::ZERO
1105 }
1106 }
1107
1108 #[inline]
1112 #[must_use]
1113 pub fn angle_between(self, rhs: Self) -> f32 {
1114 math::acos_approx(
1115 self.dot(rhs)
1116 .div(math::sqrt(self.length_squared().mul(rhs.length_squared()))),
1117 )
1118 }
1119
1120 #[inline]
1122 #[must_use]
1123 pub fn rotate_x(self, angle: f32) -> Self {
1124 let (sina, cosa) = math::sin_cos(angle);
1125 Self::new(
1126 self.x,
1127 self.y * cosa - self.z * sina,
1128 self.y * sina + self.z * cosa,
1129 )
1130 }
1131
1132 #[inline]
1134 #[must_use]
1135 pub fn rotate_y(self, angle: f32) -> Self {
1136 let (sina, cosa) = math::sin_cos(angle);
1137 Self::new(
1138 self.x * cosa + self.z * sina,
1139 self.y,
1140 self.x * -sina + self.z * cosa,
1141 )
1142 }
1143
1144 #[inline]
1146 #[must_use]
1147 pub fn rotate_z(self, angle: f32) -> Self {
1148 let (sina, cosa) = math::sin_cos(angle);
1149 Self::new(
1150 self.x * cosa - self.y * sina,
1151 self.x * sina + self.y * cosa,
1152 self.z,
1153 )
1154 }
1155
1156 #[inline]
1164 #[must_use]
1165 pub fn rotate_axis(self, axis: Self, angle: f32) -> Self {
1166 Quat::from_axis_angle(axis.into(), angle) * self
1167 }
1168
1169 #[inline]
1175 #[must_use]
1176 pub fn rotate_towards(self, rhs: Self, max_angle: f32) -> Self {
1177 let angle_between = self.angle_between(rhs);
1178 let angle = max_angle.clamp(angle_between - core::f32::consts::PI, angle_between);
1180 let axis = self
1181 .cross(rhs)
1182 .try_normalize()
1183 .unwrap_or_else(|| self.any_orthogonal_vector().normalize());
1184 Quat::from_axis_angle(axis.into(), angle) * self
1185 }
1186
1187 #[inline]
1194 #[must_use]
1195 pub fn any_orthogonal_vector(self) -> Self {
1196 if math::abs(self.x) > math::abs(self.y) {
1198 Self::new(-self.z, 0.0, self.x) } else {
1200 Self::new(0.0, self.z, -self.y) }
1202 }
1203
1204 #[inline]
1212 #[must_use]
1213 pub fn any_orthonormal_vector(self) -> Self {
1214 glam_assert!(self.is_normalized());
1215 let sign = math::signum(self.z);
1217 let a = -1.0 / (sign + self.z);
1218 let b = self.x * self.y * a;
1219 Self::new(b, sign + self.y * self.y * a, -self.y)
1220 }
1221
1222 #[inline]
1229 #[must_use]
1230 pub fn any_orthonormal_pair(self) -> (Self, Self) {
1231 glam_assert!(self.is_normalized());
1232 let sign = math::signum(self.z);
1234 let a = -1.0 / (sign + self.z);
1235 let b = self.x * self.y * a;
1236 (
1237 Self::new(1.0 + sign * self.x * self.x * a, sign * b, -sign * self.x),
1238 Self::new(b, sign + self.y * self.y * a, -self.y),
1239 )
1240 }
1241
1242 #[inline]
1248 #[must_use]
1249 pub fn slerp(self, rhs: Self, s: f32) -> Self {
1250 let self_length = self.length();
1251 let rhs_length = rhs.length();
1252 let dot = self.dot(rhs) / (self_length * rhs_length);
1254 if math::abs(dot) < 1.0 - 3e-7 {
1256 let theta = math::acos_approx(dot);
1258 let sin_theta = math::sin(theta);
1260 let t1 = math::sin(theta * (1. - s));
1261 let t2 = math::sin(theta * s);
1262
1263 let result_length = self_length.lerp(rhs_length, s);
1265 return (self * (result_length / self_length) * t1
1267 + rhs * (result_length / rhs_length) * t2)
1268 * sin_theta.recip();
1269 }
1270 if dot < 0.0 {
1271 let axis = self.any_orthogonal_vector().normalize().into();
1275 let rotation = Quat::from_axis_angle(axis, core::f32::consts::PI * s);
1276 let result_length = self_length.lerp(rhs_length, s);
1278 rotation * self * (result_length / self_length)
1279 } else {
1280 self.lerp(rhs, s)
1282 }
1283 }
1284
1285 #[inline]
1287 #[must_use]
1288 pub fn as_dvec3(self) -> crate::DVec3 {
1289 crate::DVec3::new(self.x as f64, self.y as f64, self.z as f64)
1290 }
1291
1292 #[inline]
1294 #[must_use]
1295 pub fn as_i8vec3(self) -> crate::I8Vec3 {
1296 crate::I8Vec3::new(self.x as i8, self.y as i8, self.z as i8)
1297 }
1298
1299 #[inline]
1301 #[must_use]
1302 pub fn as_u8vec3(self) -> crate::U8Vec3 {
1303 crate::U8Vec3::new(self.x as u8, self.y as u8, self.z as u8)
1304 }
1305
1306 #[inline]
1308 #[must_use]
1309 pub fn as_i16vec3(self) -> crate::I16Vec3 {
1310 crate::I16Vec3::new(self.x as i16, self.y as i16, self.z as i16)
1311 }
1312
1313 #[inline]
1315 #[must_use]
1316 pub fn as_u16vec3(self) -> crate::U16Vec3 {
1317 crate::U16Vec3::new(self.x as u16, self.y as u16, self.z as u16)
1318 }
1319
1320 #[inline]
1322 #[must_use]
1323 pub fn as_ivec3(self) -> crate::IVec3 {
1324 crate::IVec3::new(self.x as i32, self.y as i32, self.z as i32)
1325 }
1326
1327 #[inline]
1329 #[must_use]
1330 pub fn as_uvec3(self) -> crate::UVec3 {
1331 crate::UVec3::new(self.x as u32, self.y as u32, self.z as u32)
1332 }
1333
1334 #[inline]
1336 #[must_use]
1337 pub fn as_i64vec3(self) -> crate::I64Vec3 {
1338 crate::I64Vec3::new(self.x as i64, self.y as i64, self.z as i64)
1339 }
1340
1341 #[inline]
1343 #[must_use]
1344 pub fn as_u64vec3(self) -> crate::U64Vec3 {
1345 crate::U64Vec3::new(self.x as u64, self.y as u64, self.z as u64)
1346 }
1347
1348 #[inline]
1350 #[must_use]
1351 pub fn as_usizevec3(self) -> crate::USizeVec3 {
1352 crate::USizeVec3::new(self.x as usize, self.y as usize, self.z as usize)
1353 }
1354}
1355
1356impl Default for Vec3A {
1357 #[inline(always)]
1358 fn default() -> Self {
1359 Self::ZERO
1360 }
1361}
1362
1363impl PartialEq for Vec3A {
1364 #[inline]
1365 fn eq(&self, rhs: &Self) -> bool {
1366 self.cmpeq(*rhs).all()
1367 }
1368}
1369
1370impl Div for Vec3A {
1371 type Output = Self;
1372 #[inline]
1373 fn div(self, rhs: Self) -> Self {
1374 Self(unsafe { vdivq_f32(self.0, rhs.0) })
1375 }
1376}
1377
1378impl Div<&Self> for Vec3A {
1379 type Output = Self;
1380 #[inline]
1381 fn div(self, rhs: &Self) -> Self {
1382 self.div(*rhs)
1383 }
1384}
1385
1386impl Div<&Vec3A> for &Vec3A {
1387 type Output = Vec3A;
1388 #[inline]
1389 fn div(self, rhs: &Vec3A) -> Vec3A {
1390 (*self).div(*rhs)
1391 }
1392}
1393
1394impl Div<Vec3A> for &Vec3A {
1395 type Output = Vec3A;
1396 #[inline]
1397 fn div(self, rhs: Vec3A) -> Vec3A {
1398 (*self).div(rhs)
1399 }
1400}
1401
1402impl DivAssign for Vec3A {
1403 #[inline]
1404 fn div_assign(&mut self, rhs: Self) {
1405 self.0 = unsafe { vdivq_f32(self.0, rhs.0) };
1406 }
1407}
1408
1409impl DivAssign<&Self> for Vec3A {
1410 #[inline]
1411 fn div_assign(&mut self, rhs: &Self) {
1412 self.div_assign(*rhs);
1413 }
1414}
1415
1416impl Div<f32> for Vec3A {
1417 type Output = Self;
1418 #[inline]
1419 fn div(self, rhs: f32) -> Self {
1420 Self(unsafe { vdivq_f32(self.0, vld1q_dup_f32(&rhs)) })
1421 }
1422}
1423
1424impl Div<&f32> for Vec3A {
1425 type Output = Self;
1426 #[inline]
1427 fn div(self, rhs: &f32) -> Self {
1428 self.div(*rhs)
1429 }
1430}
1431
1432impl Div<&f32> for &Vec3A {
1433 type Output = Vec3A;
1434 #[inline]
1435 fn div(self, rhs: &f32) -> Vec3A {
1436 (*self).div(*rhs)
1437 }
1438}
1439
1440impl Div<f32> for &Vec3A {
1441 type Output = Vec3A;
1442 #[inline]
1443 fn div(self, rhs: f32) -> Vec3A {
1444 (*self).div(rhs)
1445 }
1446}
1447
1448impl DivAssign<f32> for Vec3A {
1449 #[inline]
1450 fn div_assign(&mut self, rhs: f32) {
1451 self.0 = unsafe { vdivq_f32(self.0, vld1q_dup_f32(&rhs)) };
1452 }
1453}
1454
1455impl DivAssign<&f32> for Vec3A {
1456 #[inline]
1457 fn div_assign(&mut self, rhs: &f32) {
1458 self.div_assign(*rhs);
1459 }
1460}
1461
1462impl Div<Vec3A> for f32 {
1463 type Output = Vec3A;
1464 #[inline]
1465 fn div(self, rhs: Vec3A) -> Vec3A {
1466 Vec3A(unsafe { vdivq_f32(vld1q_dup_f32(&self), rhs.0) })
1467 }
1468}
1469
1470impl Div<&Vec3A> for f32 {
1471 type Output = Vec3A;
1472 #[inline]
1473 fn div(self, rhs: &Vec3A) -> Vec3A {
1474 self.div(*rhs)
1475 }
1476}
1477
1478impl Div<&Vec3A> for &f32 {
1479 type Output = Vec3A;
1480 #[inline]
1481 fn div(self, rhs: &Vec3A) -> Vec3A {
1482 (*self).div(*rhs)
1483 }
1484}
1485
1486impl Div<Vec3A> for &f32 {
1487 type Output = Vec3A;
1488 #[inline]
1489 fn div(self, rhs: Vec3A) -> Vec3A {
1490 (*self).div(rhs)
1491 }
1492}
1493
1494impl Mul for Vec3A {
1495 type Output = Self;
1496 #[inline]
1497 fn mul(self, rhs: Self) -> Self {
1498 Self(unsafe { vmulq_f32(self.0, rhs.0) })
1499 }
1500}
1501
1502impl Mul<&Self> for Vec3A {
1503 type Output = Self;
1504 #[inline]
1505 fn mul(self, rhs: &Self) -> Self {
1506 self.mul(*rhs)
1507 }
1508}
1509
1510impl Mul<&Vec3A> for &Vec3A {
1511 type Output = Vec3A;
1512 #[inline]
1513 fn mul(self, rhs: &Vec3A) -> Vec3A {
1514 (*self).mul(*rhs)
1515 }
1516}
1517
1518impl Mul<Vec3A> for &Vec3A {
1519 type Output = Vec3A;
1520 #[inline]
1521 fn mul(self, rhs: Vec3A) -> Vec3A {
1522 (*self).mul(rhs)
1523 }
1524}
1525
1526impl MulAssign for Vec3A {
1527 #[inline]
1528 fn mul_assign(&mut self, rhs: Self) {
1529 self.0 = unsafe { vmulq_f32(self.0, rhs.0) };
1530 }
1531}
1532
1533impl MulAssign<&Self> for Vec3A {
1534 #[inline]
1535 fn mul_assign(&mut self, rhs: &Self) {
1536 self.mul_assign(*rhs);
1537 }
1538}
1539
1540impl Mul<f32> for Vec3A {
1541 type Output = Self;
1542 #[inline]
1543 fn mul(self, rhs: f32) -> Self {
1544 Self(unsafe { vmulq_n_f32(self.0, rhs) })
1545 }
1546}
1547
1548impl Mul<&f32> for Vec3A {
1549 type Output = Self;
1550 #[inline]
1551 fn mul(self, rhs: &f32) -> Self {
1552 self.mul(*rhs)
1553 }
1554}
1555
1556impl Mul<&f32> for &Vec3A {
1557 type Output = Vec3A;
1558 #[inline]
1559 fn mul(self, rhs: &f32) -> Vec3A {
1560 (*self).mul(*rhs)
1561 }
1562}
1563
1564impl Mul<f32> for &Vec3A {
1565 type Output = Vec3A;
1566 #[inline]
1567 fn mul(self, rhs: f32) -> Vec3A {
1568 (*self).mul(rhs)
1569 }
1570}
1571
1572impl MulAssign<f32> for Vec3A {
1573 #[inline]
1574 fn mul_assign(&mut self, rhs: f32) {
1575 self.0 = unsafe { vmulq_n_f32(self.0, rhs) };
1576 }
1577}
1578
1579impl MulAssign<&f32> for Vec3A {
1580 #[inline]
1581 fn mul_assign(&mut self, rhs: &f32) {
1582 self.mul_assign(*rhs);
1583 }
1584}
1585
1586impl Mul<Vec3A> for f32 {
1587 type Output = Vec3A;
1588 #[inline]
1589 fn mul(self, rhs: Vec3A) -> Vec3A {
1590 Vec3A(unsafe { vmulq_n_f32(rhs.0, self) })
1591 }
1592}
1593
1594impl Mul<&Vec3A> for f32 {
1595 type Output = Vec3A;
1596 #[inline]
1597 fn mul(self, rhs: &Vec3A) -> Vec3A {
1598 self.mul(*rhs)
1599 }
1600}
1601
1602impl Mul<&Vec3A> for &f32 {
1603 type Output = Vec3A;
1604 #[inline]
1605 fn mul(self, rhs: &Vec3A) -> Vec3A {
1606 (*self).mul(*rhs)
1607 }
1608}
1609
1610impl Mul<Vec3A> for &f32 {
1611 type Output = Vec3A;
1612 #[inline]
1613 fn mul(self, rhs: Vec3A) -> Vec3A {
1614 (*self).mul(rhs)
1615 }
1616}
1617
1618impl Add for Vec3A {
1619 type Output = Self;
1620 #[inline]
1621 fn add(self, rhs: Self) -> Self {
1622 Self(unsafe { vaddq_f32(self.0, rhs.0) })
1623 }
1624}
1625
1626impl Add<&Self> for Vec3A {
1627 type Output = Self;
1628 #[inline]
1629 fn add(self, rhs: &Self) -> Self {
1630 self.add(*rhs)
1631 }
1632}
1633
1634impl Add<&Vec3A> for &Vec3A {
1635 type Output = Vec3A;
1636 #[inline]
1637 fn add(self, rhs: &Vec3A) -> Vec3A {
1638 (*self).add(*rhs)
1639 }
1640}
1641
1642impl Add<Vec3A> for &Vec3A {
1643 type Output = Vec3A;
1644 #[inline]
1645 fn add(self, rhs: Vec3A) -> Vec3A {
1646 (*self).add(rhs)
1647 }
1648}
1649
1650impl AddAssign for Vec3A {
1651 #[inline]
1652 fn add_assign(&mut self, rhs: Self) {
1653 self.0 = unsafe { vaddq_f32(self.0, rhs.0) };
1654 }
1655}
1656
1657impl AddAssign<&Self> for Vec3A {
1658 #[inline]
1659 fn add_assign(&mut self, rhs: &Self) {
1660 self.add_assign(*rhs);
1661 }
1662}
1663
1664impl Add<f32> for Vec3A {
1665 type Output = Self;
1666 #[inline]
1667 fn add(self, rhs: f32) -> Self {
1668 Self(unsafe { vaddq_f32(self.0, vld1q_dup_f32(&rhs)) })
1669 }
1670}
1671
1672impl Add<&f32> for Vec3A {
1673 type Output = Self;
1674 #[inline]
1675 fn add(self, rhs: &f32) -> Self {
1676 self.add(*rhs)
1677 }
1678}
1679
1680impl Add<&f32> for &Vec3A {
1681 type Output = Vec3A;
1682 #[inline]
1683 fn add(self, rhs: &f32) -> Vec3A {
1684 (*self).add(*rhs)
1685 }
1686}
1687
1688impl Add<f32> for &Vec3A {
1689 type Output = Vec3A;
1690 #[inline]
1691 fn add(self, rhs: f32) -> Vec3A {
1692 (*self).add(rhs)
1693 }
1694}
1695
1696impl AddAssign<f32> for Vec3A {
1697 #[inline]
1698 fn add_assign(&mut self, rhs: f32) {
1699 self.0 = unsafe { vaddq_f32(self.0, vld1q_dup_f32(&rhs)) };
1700 }
1701}
1702
1703impl AddAssign<&f32> for Vec3A {
1704 #[inline]
1705 fn add_assign(&mut self, rhs: &f32) {
1706 self.add_assign(*rhs);
1707 }
1708}
1709
1710impl Add<Vec3A> for f32 {
1711 type Output = Vec3A;
1712 #[inline]
1713 fn add(self, rhs: Vec3A) -> Vec3A {
1714 Vec3A(unsafe { vaddq_f32(vld1q_dup_f32(&self), rhs.0) })
1715 }
1716}
1717
1718impl Add<&Vec3A> for f32 {
1719 type Output = Vec3A;
1720 #[inline]
1721 fn add(self, rhs: &Vec3A) -> Vec3A {
1722 self.add(*rhs)
1723 }
1724}
1725
1726impl Add<&Vec3A> for &f32 {
1727 type Output = Vec3A;
1728 #[inline]
1729 fn add(self, rhs: &Vec3A) -> Vec3A {
1730 (*self).add(*rhs)
1731 }
1732}
1733
1734impl Add<Vec3A> for &f32 {
1735 type Output = Vec3A;
1736 #[inline]
1737 fn add(self, rhs: Vec3A) -> Vec3A {
1738 (*self).add(rhs)
1739 }
1740}
1741
1742impl Sub for Vec3A {
1743 type Output = Self;
1744 #[inline]
1745 fn sub(self, rhs: Self) -> Self {
1746 Self(unsafe { vsubq_f32(self.0, rhs.0) })
1747 }
1748}
1749
1750impl Sub<&Self> for Vec3A {
1751 type Output = Self;
1752 #[inline]
1753 fn sub(self, rhs: &Self) -> Self {
1754 self.sub(*rhs)
1755 }
1756}
1757
1758impl Sub<&Vec3A> for &Vec3A {
1759 type Output = Vec3A;
1760 #[inline]
1761 fn sub(self, rhs: &Vec3A) -> Vec3A {
1762 (*self).sub(*rhs)
1763 }
1764}
1765
1766impl Sub<Vec3A> for &Vec3A {
1767 type Output = Vec3A;
1768 #[inline]
1769 fn sub(self, rhs: Vec3A) -> Vec3A {
1770 (*self).sub(rhs)
1771 }
1772}
1773
1774impl SubAssign for Vec3A {
1775 #[inline]
1776 fn sub_assign(&mut self, rhs: Self) {
1777 self.0 = unsafe { vsubq_f32(self.0, rhs.0) };
1778 }
1779}
1780
1781impl SubAssign<&Self> for Vec3A {
1782 #[inline]
1783 fn sub_assign(&mut self, rhs: &Self) {
1784 self.sub_assign(*rhs);
1785 }
1786}
1787
1788impl Sub<f32> for Vec3A {
1789 type Output = Self;
1790 #[inline]
1791 fn sub(self, rhs: f32) -> Self {
1792 Self(unsafe { vsubq_f32(self.0, vld1q_dup_f32(&rhs)) })
1793 }
1794}
1795
1796impl Sub<&f32> for Vec3A {
1797 type Output = Self;
1798 #[inline]
1799 fn sub(self, rhs: &f32) -> Self {
1800 self.sub(*rhs)
1801 }
1802}
1803
1804impl Sub<&f32> for &Vec3A {
1805 type Output = Vec3A;
1806 #[inline]
1807 fn sub(self, rhs: &f32) -> Vec3A {
1808 (*self).sub(*rhs)
1809 }
1810}
1811
1812impl Sub<f32> for &Vec3A {
1813 type Output = Vec3A;
1814 #[inline]
1815 fn sub(self, rhs: f32) -> Vec3A {
1816 (*self).sub(rhs)
1817 }
1818}
1819
1820impl SubAssign<f32> for Vec3A {
1821 #[inline]
1822 fn sub_assign(&mut self, rhs: f32) {
1823 self.0 = unsafe { vsubq_f32(self.0, vld1q_dup_f32(&rhs)) };
1824 }
1825}
1826
1827impl SubAssign<&f32> for Vec3A {
1828 #[inline]
1829 fn sub_assign(&mut self, rhs: &f32) {
1830 self.sub_assign(*rhs);
1831 }
1832}
1833
1834impl Sub<Vec3A> for f32 {
1835 type Output = Vec3A;
1836 #[inline]
1837 fn sub(self, rhs: Vec3A) -> Vec3A {
1838 Vec3A(unsafe { vsubq_f32(vld1q_dup_f32(&self), rhs.0) })
1839 }
1840}
1841
1842impl Sub<&Vec3A> for f32 {
1843 type Output = Vec3A;
1844 #[inline]
1845 fn sub(self, rhs: &Vec3A) -> Vec3A {
1846 self.sub(*rhs)
1847 }
1848}
1849
1850impl Sub<&Vec3A> for &f32 {
1851 type Output = Vec3A;
1852 #[inline]
1853 fn sub(self, rhs: &Vec3A) -> Vec3A {
1854 (*self).sub(*rhs)
1855 }
1856}
1857
1858impl Sub<Vec3A> for &f32 {
1859 type Output = Vec3A;
1860 #[inline]
1861 fn sub(self, rhs: Vec3A) -> Vec3A {
1862 (*self).sub(rhs)
1863 }
1864}
1865
1866impl Rem for Vec3A {
1867 type Output = Self;
1868 #[inline]
1869 fn rem(self, rhs: Self) -> Self {
1870 unsafe {
1871 let n = vrndmq_f32(vdivq_f32(self.0, rhs.0));
1872 Self(vsubq_f32(self.0, vmulq_f32(n, rhs.0)))
1873 }
1874 }
1875}
1876
1877impl Rem<&Self> for Vec3A {
1878 type Output = Self;
1879 #[inline]
1880 fn rem(self, rhs: &Self) -> Self {
1881 self.rem(*rhs)
1882 }
1883}
1884
1885impl Rem<&Vec3A> for &Vec3A {
1886 type Output = Vec3A;
1887 #[inline]
1888 fn rem(self, rhs: &Vec3A) -> Vec3A {
1889 (*self).rem(*rhs)
1890 }
1891}
1892
1893impl Rem<Vec3A> for &Vec3A {
1894 type Output = Vec3A;
1895 #[inline]
1896 fn rem(self, rhs: Vec3A) -> Vec3A {
1897 (*self).rem(rhs)
1898 }
1899}
1900
1901impl RemAssign for Vec3A {
1902 #[inline]
1903 fn rem_assign(&mut self, rhs: Self) {
1904 *self = self.rem(rhs);
1905 }
1906}
1907
1908impl RemAssign<&Self> for Vec3A {
1909 #[inline]
1910 fn rem_assign(&mut self, rhs: &Self) {
1911 self.rem_assign(*rhs);
1912 }
1913}
1914
1915impl Rem<f32> for Vec3A {
1916 type Output = Self;
1917 #[inline]
1918 fn rem(self, rhs: f32) -> Self {
1919 self.rem(Self::splat(rhs))
1920 }
1921}
1922
1923impl Rem<&f32> for Vec3A {
1924 type Output = Self;
1925 #[inline]
1926 fn rem(self, rhs: &f32) -> Self {
1927 self.rem(*rhs)
1928 }
1929}
1930
1931impl Rem<&f32> for &Vec3A {
1932 type Output = Vec3A;
1933 #[inline]
1934 fn rem(self, rhs: &f32) -> Vec3A {
1935 (*self).rem(*rhs)
1936 }
1937}
1938
1939impl Rem<f32> for &Vec3A {
1940 type Output = Vec3A;
1941 #[inline]
1942 fn rem(self, rhs: f32) -> Vec3A {
1943 (*self).rem(rhs)
1944 }
1945}
1946
1947impl RemAssign<f32> for Vec3A {
1948 #[inline]
1949 fn rem_assign(&mut self, rhs: f32) {
1950 *self = self.rem(Self::splat(rhs));
1951 }
1952}
1953
1954impl RemAssign<&f32> for Vec3A {
1955 #[inline]
1956 fn rem_assign(&mut self, rhs: &f32) {
1957 self.rem_assign(*rhs);
1958 }
1959}
1960
1961impl Rem<Vec3A> for f32 {
1962 type Output = Vec3A;
1963 #[inline]
1964 fn rem(self, rhs: Vec3A) -> Vec3A {
1965 Vec3A::splat(self).rem(rhs)
1966 }
1967}
1968
1969impl Rem<&Vec3A> for f32 {
1970 type Output = Vec3A;
1971 #[inline]
1972 fn rem(self, rhs: &Vec3A) -> Vec3A {
1973 self.rem(*rhs)
1974 }
1975}
1976
1977impl Rem<&Vec3A> for &f32 {
1978 type Output = Vec3A;
1979 #[inline]
1980 fn rem(self, rhs: &Vec3A) -> Vec3A {
1981 (*self).rem(*rhs)
1982 }
1983}
1984
1985impl Rem<Vec3A> for &f32 {
1986 type Output = Vec3A;
1987 #[inline]
1988 fn rem(self, rhs: Vec3A) -> Vec3A {
1989 (*self).rem(rhs)
1990 }
1991}
1992
1993impl AsRef<[f32; 3]> for Vec3A {
1994 #[inline]
1995 fn as_ref(&self) -> &[f32; 3] {
1996 unsafe { &*(self as *const Self as *const [f32; 3]) }
1997 }
1998}
1999
2000impl AsMut<[f32; 3]> for Vec3A {
2001 #[inline]
2002 fn as_mut(&mut self) -> &mut [f32; 3] {
2003 unsafe { &mut *(self as *mut Self as *mut [f32; 3]) }
2004 }
2005}
2006
2007impl Sum for Vec3A {
2008 #[inline]
2009 fn sum<I>(iter: I) -> Self
2010 where
2011 I: Iterator<Item = Self>,
2012 {
2013 iter.fold(Self::ZERO, Self::add)
2014 }
2015}
2016
2017impl<'a> Sum<&'a Self> for Vec3A {
2018 #[inline]
2019 fn sum<I>(iter: I) -> Self
2020 where
2021 I: Iterator<Item = &'a Self>,
2022 {
2023 iter.fold(Self::ZERO, |a, &b| Self::add(a, b))
2024 }
2025}
2026
2027impl Product for Vec3A {
2028 #[inline]
2029 fn product<I>(iter: I) -> Self
2030 where
2031 I: Iterator<Item = Self>,
2032 {
2033 iter.fold(Self::ONE, Self::mul)
2034 }
2035}
2036
2037impl<'a> Product<&'a Self> for Vec3A {
2038 #[inline]
2039 fn product<I>(iter: I) -> Self
2040 where
2041 I: Iterator<Item = &'a Self>,
2042 {
2043 iter.fold(Self::ONE, |a, &b| Self::mul(a, b))
2044 }
2045}
2046
2047impl Neg for Vec3A {
2048 type Output = Self;
2049 #[inline]
2050 fn neg(self) -> Self {
2051 Self(unsafe { vnegq_f32(self.0) })
2052 }
2053}
2054
2055impl Neg for &Vec3A {
2056 type Output = Vec3A;
2057 #[inline]
2058 fn neg(self) -> Vec3A {
2059 (*self).neg()
2060 }
2061}
2062
2063impl Index<usize> for Vec3A {
2064 type Output = f32;
2065 #[inline]
2066 fn index(&self, index: usize) -> &Self::Output {
2067 match index {
2068 0 => &self.x,
2069 1 => &self.y,
2070 2 => &self.z,
2071 _ => panic!("index out of bounds"),
2072 }
2073 }
2074}
2075
2076impl IndexMut<usize> for Vec3A {
2077 #[inline]
2078 fn index_mut(&mut self, index: usize) -> &mut Self::Output {
2079 match index {
2080 0 => &mut self.x,
2081 1 => &mut self.y,
2082 2 => &mut self.z,
2083 _ => panic!("index out of bounds"),
2084 }
2085 }
2086}
2087
2088impl fmt::Display for Vec3A {
2089 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
2090 if let Some(p) = f.precision() {
2091 write!(f, "[{:.*}, {:.*}, {:.*}]", p, self.x, p, self.y, p, self.z)
2092 } else {
2093 write!(f, "[{}, {}, {}]", self.x, self.y, self.z)
2094 }
2095 }
2096}
2097
2098impl fmt::Debug for Vec3A {
2099 fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
2100 fmt.debug_tuple(stringify!(Vec3A))
2101 .field(&self.x)
2102 .field(&self.y)
2103 .field(&self.z)
2104 .finish()
2105 }
2106}
2107
2108impl From<Vec3A> for float32x4_t {
2109 #[inline(always)]
2110 fn from(t: Vec3A) -> Self {
2111 t.0
2112 }
2113}
2114
2115impl From<float32x4_t> for Vec3A {
2116 #[inline(always)]
2117 fn from(t: float32x4_t) -> Self {
2118 Self(t)
2119 }
2120}
2121
2122impl From<[f32; 3]> for Vec3A {
2123 #[inline]
2124 fn from(a: [f32; 3]) -> Self {
2125 Self::new(a[0], a[1], a[2])
2126 }
2127}
2128
2129impl From<Vec3A> for [f32; 3] {
2130 #[inline]
2131 fn from(v: Vec3A) -> Self {
2132 use crate::align16::Align16;
2133 use core::mem::MaybeUninit;
2134 let mut out: MaybeUninit<Align16<Self>> = MaybeUninit::uninit();
2135 unsafe {
2136 vst1q_f32(out.as_mut_ptr().cast(), v.0);
2137 out.assume_init().0
2138 }
2139 }
2140}
2141
2142impl From<(f32, f32, f32)> for Vec3A {
2143 #[inline]
2144 fn from(t: (f32, f32, f32)) -> Self {
2145 Self::new(t.0, t.1, t.2)
2146 }
2147}
2148
2149impl From<Vec3A> for (f32, f32, f32) {
2150 #[inline]
2151 fn from(v: Vec3A) -> Self {
2152 (v.x, v.y, v.z)
2153 }
2154}
2155
2156impl From<Vec3> for Vec3A {
2157 #[inline]
2158 fn from(v: Vec3) -> Self {
2159 Self::new(v.x, v.y, v.z)
2160 }
2161}
2162
2163impl From<Vec3A> for Vec3 {
2164 #[inline]
2165 fn from(v: Vec3A) -> Self {
2166 use crate::align16::Align16;
2167 use core::mem::MaybeUninit;
2168 let mut out: MaybeUninit<Align16<Self>> = MaybeUninit::uninit();
2169 unsafe {
2170 vst1q_f32(out.as_mut_ptr().cast(), v.0);
2171 out.assume_init().0
2172 }
2173 }
2174}
2175
2176impl From<(Vec2, f32)> for Vec3A {
2177 #[inline]
2178 fn from((v, z): (Vec2, f32)) -> Self {
2179 Self::new(v.x, v.y, z)
2180 }
2181}
2182
2183impl Deref for Vec3A {
2184 type Target = crate::deref::Vec3<f32>;
2185 #[inline]
2186 fn deref(&self) -> &Self::Target {
2187 unsafe { &*(self as *const Self).cast() }
2188 }
2189}
2190
2191impl DerefMut for Vec3A {
2192 #[inline]
2193 fn deref_mut(&mut self) -> &mut Self::Target {
2194 unsafe { &mut *(self as *mut Self).cast() }
2195 }
2196}
2197
2198impl From<BVec3> for Vec3A {
2199 #[inline]
2200 fn from(v: BVec3) -> Self {
2201 Self::new(f32::from(v.x), f32::from(v.y), f32::from(v.z))
2202 }
2203}
2204
2205impl From<BVec3A> for Vec3A {
2206 #[inline]
2207 fn from(v: BVec3A) -> Self {
2208 let bool_array: [bool; 3] = v.into();
2209 Self::new(
2210 f32::from(bool_array[0]),
2211 f32::from(bool_array[1]),
2212 f32::from(bool_array[2]),
2213 )
2214 }
2215}