1use crate::{f32::math, neon::*, BVec3, BVec3A, FloatExt, Quat, Vec2, Vec3, Vec4};
4
5use core::fmt;
6use core::iter::{Product, Sum};
7use core::{f32, ops::*};
8
9use core::arch::aarch64::*;
10
11#[repr(C)]
12union UnionCast {
13 a: [f32; 4],
14 v: Vec3A,
15}
16
17#[inline(always)]
19#[must_use]
20pub const fn vec3a(x: f32, y: f32, z: f32) -> Vec3A {
21 Vec3A::new(x, y, z)
22}
23
24#[derive(Clone, Copy)]
34#[repr(transparent)]
35pub struct Vec3A(pub(crate) float32x4_t);
36
37impl Vec3A {
38 pub const ZERO: Self = Self::splat(0.0);
40
41 pub const ONE: Self = Self::splat(1.0);
43
44 pub const NEG_ONE: Self = Self::splat(-1.0);
46
47 pub const MIN: Self = Self::splat(f32::MIN);
49
50 pub const MAX: Self = Self::splat(f32::MAX);
52
53 pub const NAN: Self = Self::splat(f32::NAN);
55
56 pub const INFINITY: Self = Self::splat(f32::INFINITY);
58
59 pub const NEG_INFINITY: Self = Self::splat(f32::NEG_INFINITY);
61
62 pub const X: Self = Self::new(1.0, 0.0, 0.0);
64
65 pub const Y: Self = Self::new(0.0, 1.0, 0.0);
67
68 pub const Z: Self = Self::new(0.0, 0.0, 1.0);
70
71 pub const NEG_X: Self = Self::new(-1.0, 0.0, 0.0);
73
74 pub const NEG_Y: Self = Self::new(0.0, -1.0, 0.0);
76
77 pub const NEG_Z: Self = Self::new(0.0, 0.0, -1.0);
79
80 pub const AXES: [Self; 3] = [Self::X, Self::Y, Self::Z];
82
83 pub const USES_CORE_SIMD: bool = false;
85 pub const USES_NEON: bool = true;
87 pub const USES_SCALAR_MATH: bool = false;
89 pub const USES_SSE2: bool = false;
91 pub const USES_WASM32_SIMD: bool = false;
93
94 #[inline(always)]
96 #[must_use]
97 pub const fn new(x: f32, y: f32, z: f32) -> Self {
98 unsafe { UnionCast { a: [x, y, z, z] }.v }
99 }
100
101 #[inline]
103 #[must_use]
104 pub const fn splat(v: f32) -> Self {
105 unsafe { UnionCast { a: [v; 4] }.v }
106 }
107
108 #[inline]
110 #[must_use]
111 pub fn map<F>(self, f: F) -> Self
112 where
113 F: Fn(f32) -> f32,
114 {
115 Self::new(f(self.x), f(self.y), f(self.z))
116 }
117
118 #[inline]
124 #[must_use]
125 pub fn select(mask: BVec3A, if_true: Self, if_false: Self) -> Self {
126 Self(unsafe { vbslq_f32(mask.0, if_true.0, if_false.0) })
127 }
128
129 #[inline]
131 #[must_use]
132 pub const fn from_array(a: [f32; 3]) -> Self {
133 Self::new(a[0], a[1], a[2])
134 }
135
136 #[inline]
138 #[must_use]
139 pub const fn to_array(&self) -> [f32; 3] {
140 unsafe { *(self as *const Vec3A as *const [f32; 3]) }
141 }
142
143 #[inline]
149 #[must_use]
150 pub const fn from_slice(slice: &[f32]) -> Self {
151 assert!(slice.len() >= 3);
152 Self::new(slice[0], slice[1], slice[2])
153 }
154
155 #[inline]
161 pub fn write_to_slice(self, slice: &mut [f32]) {
162 slice[..3].copy_from_slice(&self.to_array());
163 }
164
165 #[inline]
169 #[must_use]
170 pub fn from_vec4(v: Vec4) -> Self {
171 Self(v.0)
172 }
173
174 #[inline]
176 #[must_use]
177 pub fn extend(self, w: f32) -> Vec4 {
178 Vec4::new(self.x, self.y, self.z, w)
179 }
180
181 #[inline]
185 #[must_use]
186 pub fn truncate(self) -> Vec2 {
187 use crate::swizzles::Vec3Swizzles;
188 self.xy()
189 }
190
191 #[inline]
193 #[must_use]
194 pub fn with_x(mut self, x: f32) -> Self {
195 self.x = x;
196 self
197 }
198
199 #[inline]
201 #[must_use]
202 pub fn with_y(mut self, y: f32) -> Self {
203 self.y = y;
204 self
205 }
206
207 #[inline]
209 #[must_use]
210 pub fn with_z(mut self, z: f32) -> Self {
211 self.z = z;
212 self
213 }
214
215 #[inline]
217 #[must_use]
218 pub fn dot(self, rhs: Self) -> f32 {
219 (self.x * rhs.x) + (self.y * rhs.y) + (self.z * rhs.z)
221 }
222
223 #[inline]
225 #[must_use]
226 pub fn dot_into_vec(self, rhs: Self) -> Self {
227 Self(unsafe { dot3_into_f32x4(self.0, rhs.0) })
228 }
229
230 #[inline]
232 #[must_use]
233 pub fn cross(self, rhs: Self) -> Self {
234 unsafe {
235 let lhs = self.0;
237 let rhs = rhs.0;
238 let lhs_yzwx = vextq_f32(lhs, lhs, 1);
240 let rhs_wxyz = vextq_f32(rhs, rhs, 3);
241
242 let lhs_yzx = vsetq_lane_f32(vgetq_lane_f32(lhs, 0), lhs_yzwx, 2);
243 let rhs_zxy = vsetq_lane_f32(vgetq_lane_f32(rhs, 2), rhs_wxyz, 0);
244
245 let part_a = vmulq_f32(lhs_yzx, rhs_zxy);
247
248 let lhs_wxyz = vextq_f32(lhs, lhs, 3);
249 let rhs_yzwx = vextq_f32(rhs, rhs, 1);
250 let lhs_zxy = vsetq_lane_f32(vgetq_lane_f32(lhs, 2), lhs_wxyz, 0);
251 let rhs_yzx = vsetq_lane_f32(vgetq_lane_f32(rhs, 0), rhs_yzwx, 2);
252
253 let result = vmlsq_f32(part_a, lhs_zxy, rhs_yzx);
255 Self(result)
256 }
257 }
258
259 #[inline]
266 #[must_use]
267 pub fn min(self, rhs: Self) -> Self {
268 Self(unsafe { vminq_f32(self.0, rhs.0) })
269 }
270
271 #[inline]
278 #[must_use]
279 pub fn max(self, rhs: Self) -> Self {
280 Self(unsafe { vmaxq_f32(self.0, rhs.0) })
281 }
282
283 #[inline]
294 #[must_use]
295 pub fn clamp(self, min: Self, max: Self) -> Self {
296 glam_assert!(min.cmple(max).all(), "clamp: expected min <= max");
297 self.max(min).min(max)
298 }
299
300 #[inline]
307 #[must_use]
308 pub fn min_element(self) -> f32 {
309 self.x.min(self.y.min(self.z))
310 }
311
312 #[inline]
319 #[must_use]
320 pub fn max_element(self) -> f32 {
321 self.x.max(self.y.max(self.z))
322 }
323
324 #[doc(alias = "argmin")]
326 #[inline]
327 #[must_use]
328 pub fn min_position(self) -> usize {
329 let mut min = self.x;
330 let mut index = 0;
331 if self.y < min {
332 min = self.y;
333 index = 1;
334 }
335 if self.z < min {
336 index = 2;
337 }
338 index
339 }
340
341 #[doc(alias = "argmax")]
343 #[inline]
344 #[must_use]
345 pub fn max_position(self) -> usize {
346 let mut max = self.x;
347 let mut index = 0;
348 if self.y > max {
349 max = self.y;
350 index = 1;
351 }
352 if self.z > max {
353 index = 2;
354 }
355 index
356 }
357
358 #[inline]
362 #[must_use]
363 pub fn element_sum(self) -> f32 {
364 unsafe { vaddvq_f32(vsetq_lane_f32(0.0, self.0, 3)) }
365 }
366
367 #[inline]
371 #[must_use]
372 pub fn element_product(self) -> f32 {
373 unsafe {
374 let s = vmuls_laneq_f32(vgetq_lane_f32(self.0, 0), self.0, 1);
375 vmuls_laneq_f32(s, self.0, 2)
376 }
377 }
378
379 #[inline]
385 #[must_use]
386 pub fn cmpeq(self, rhs: Self) -> BVec3A {
387 BVec3A(unsafe { vceqq_f32(self.0, rhs.0) })
388 }
389
390 #[inline]
396 #[must_use]
397 pub fn cmpne(self, rhs: Self) -> BVec3A {
398 BVec3A(unsafe { vmvnq_u32(vceqq_f32(self.0, rhs.0)) })
399 }
400
401 #[inline]
407 #[must_use]
408 pub fn cmpge(self, rhs: Self) -> BVec3A {
409 BVec3A(unsafe { vcgeq_f32(self.0, rhs.0) })
410 }
411
412 #[inline]
418 #[must_use]
419 pub fn cmpgt(self, rhs: Self) -> BVec3A {
420 BVec3A(unsafe { vcgtq_f32(self.0, rhs.0) })
421 }
422
423 #[inline]
429 #[must_use]
430 pub fn cmple(self, rhs: Self) -> BVec3A {
431 BVec3A(unsafe { vcleq_f32(self.0, rhs.0) })
432 }
433
434 #[inline]
440 #[must_use]
441 pub fn cmplt(self, rhs: Self) -> BVec3A {
442 BVec3A(unsafe { vcltq_f32(self.0, rhs.0) })
443 }
444
445 #[inline]
447 #[must_use]
448 pub fn abs(self) -> Self {
449 Self(unsafe { vabsq_f32(self.0) })
450 }
451
452 #[inline]
458 #[must_use]
459 pub fn signum(self) -> Self {
460 let result = Self(unsafe {
461 vreinterpretq_f32_u32(vorrq_u32(
462 vandq_u32(
463 vreinterpretq_u32_f32(self.0),
464 vreinterpretq_u32_f32(Self::NEG_ONE.0),
465 ),
466 vreinterpretq_u32_f32(Self::ONE.0),
467 ))
468 });
469 let mask = self.is_nan_mask();
470 Self::select(mask, self, result)
471 }
472
473 #[inline]
475 #[must_use]
476 pub fn copysign(self, rhs: Self) -> Self {
477 let mask = Self::splat(-0.0);
478 Self(unsafe {
479 vreinterpretq_f32_u32(vorrq_u32(
480 vandq_u32(vreinterpretq_u32_f32(rhs.0), vreinterpretq_u32_f32(mask.0)),
481 vandq_u32(
482 vreinterpretq_u32_f32(self.0),
483 vmvnq_u32(vreinterpretq_u32_f32(mask.0)),
484 ),
485 ))
486 })
487 }
488
489 #[inline]
497 #[must_use]
498 pub fn is_negative_bitmask(self) -> u32 {
499 unsafe {
500 let nmask = vreinterpretq_u32_f32(vdupq_n_f32(-0.0));
501 let m = vandq_u32(vreinterpretq_u32_f32(self.0), nmask);
502 let x = vgetq_lane_u32(m, 0) >> 31;
503 let y = vgetq_lane_u32(m, 1) >> 31;
504 let z = vgetq_lane_u32(m, 2) >> 31;
505
506 x | y << 1 | z << 2
507 }
508 }
509
510 #[inline]
513 #[must_use]
514 pub fn is_finite(self) -> bool {
515 self.is_finite_mask().all()
516 }
517
518 pub fn is_finite_mask(self) -> BVec3A {
522 BVec3A(unsafe { vcltq_f32(vabsq_f32(self.0), Self::INFINITY.0) })
523 }
524
525 #[inline]
527 #[must_use]
528 pub fn is_nan(self) -> bool {
529 self.is_nan_mask().any()
530 }
531
532 #[inline]
536 #[must_use]
537 pub fn is_nan_mask(self) -> BVec3A {
538 BVec3A(unsafe { vmvnq_u32(vceqq_f32(self.0, self.0)) })
539 }
540
541 #[doc(alias = "magnitude")]
543 #[inline]
544 #[must_use]
545 pub fn length(self) -> f32 {
546 math::sqrt(self.dot(self))
547 }
548
549 #[doc(alias = "magnitude2")]
553 #[inline]
554 #[must_use]
555 pub fn length_squared(self) -> f32 {
556 self.dot(self)
557 }
558
559 #[inline]
563 #[must_use]
564 pub fn length_recip(self) -> f32 {
565 self.length().recip()
566 }
567
568 #[inline]
570 #[must_use]
571 pub fn distance(self, rhs: Self) -> f32 {
572 (self - rhs).length()
573 }
574
575 #[inline]
577 #[must_use]
578 pub fn distance_squared(self, rhs: Self) -> f32 {
579 (self - rhs).length_squared()
580 }
581
582 #[inline]
584 #[must_use]
585 pub fn div_euclid(self, rhs: Self) -> Self {
586 Self::new(
587 math::div_euclid(self.x, rhs.x),
588 math::div_euclid(self.y, rhs.y),
589 math::div_euclid(self.z, rhs.z),
590 )
591 }
592
593 #[inline]
597 #[must_use]
598 pub fn rem_euclid(self, rhs: Self) -> Self {
599 Self::new(
600 math::rem_euclid(self.x, rhs.x),
601 math::rem_euclid(self.y, rhs.y),
602 math::rem_euclid(self.z, rhs.z),
603 )
604 }
605
606 #[inline]
616 #[must_use]
617 pub fn normalize(self) -> Self {
618 #[allow(clippy::let_and_return)]
619 let normalized = self.mul(self.length_recip());
620 glam_assert!(normalized.is_finite());
621 normalized
622 }
623
624 #[inline]
631 #[must_use]
632 pub fn try_normalize(self) -> Option<Self> {
633 let rcp = self.length_recip();
634 if rcp.is_finite() && rcp > 0.0 {
635 Some(self * rcp)
636 } else {
637 None
638 }
639 }
640
641 #[inline]
649 #[must_use]
650 pub fn normalize_or(self, fallback: Self) -> Self {
651 let rcp = self.length_recip();
652 if rcp.is_finite() && rcp > 0.0 {
653 self * rcp
654 } else {
655 fallback
656 }
657 }
658
659 #[inline]
666 #[must_use]
667 pub fn normalize_or_zero(self) -> Self {
668 self.normalize_or(Self::ZERO)
669 }
670
671 #[inline]
675 #[must_use]
676 pub fn normalize_and_length(self) -> (Self, f32) {
677 let length = self.length();
678 let rcp = 1.0 / length;
679 if rcp.is_finite() && rcp > 0.0 {
680 (self * rcp, length)
681 } else {
682 (Self::X, 0.0)
683 }
684 }
685
686 #[inline]
690 #[must_use]
691 pub fn is_normalized(self) -> bool {
692 math::abs(self.length_squared() - 1.0) <= 2e-4
693 }
694
695 #[inline]
703 #[must_use]
704 pub fn project_onto(self, rhs: Self) -> Self {
705 let other_len_sq_rcp = rhs.dot(rhs).recip();
706 glam_assert!(other_len_sq_rcp.is_finite());
707 rhs * self.dot(rhs) * other_len_sq_rcp
708 }
709
710 #[doc(alias("plane"))]
721 #[inline]
722 #[must_use]
723 pub fn reject_from(self, rhs: Self) -> Self {
724 self - self.project_onto(rhs)
725 }
726
727 #[inline]
735 #[must_use]
736 pub fn project_onto_normalized(self, rhs: Self) -> Self {
737 glam_assert!(rhs.is_normalized());
738 rhs * self.dot(rhs)
739 }
740
741 #[doc(alias("plane"))]
752 #[inline]
753 #[must_use]
754 pub fn reject_from_normalized(self, rhs: Self) -> Self {
755 self - self.project_onto_normalized(rhs)
756 }
757
758 #[inline]
761 #[must_use]
762 pub fn round(self) -> Self {
763 Self(unsafe { vrndnq_f32(self.0) })
764 }
765
766 #[inline]
769 #[must_use]
770 pub fn floor(self) -> Self {
771 Self(unsafe { vrndmq_f32(self.0) })
772 }
773
774 #[inline]
777 #[must_use]
778 pub fn ceil(self) -> Self {
779 Self(unsafe { vrndpq_f32(self.0) })
780 }
781
782 #[inline]
785 #[must_use]
786 pub fn trunc(self) -> Self {
787 Self(unsafe { vrndq_f32(self.0) })
788 }
789
790 #[inline]
797 #[must_use]
798 pub fn fract(self) -> Self {
799 self - self.trunc()
800 }
801
802 #[inline]
809 #[must_use]
810 pub fn fract_gl(self) -> Self {
811 self - self.floor()
812 }
813
814 #[inline]
817 #[must_use]
818 pub fn exp(self) -> Self {
819 Self::new(math::exp(self.x), math::exp(self.y), math::exp(self.z))
820 }
821
822 #[inline]
824 #[must_use]
825 pub fn powf(self, n: f32) -> Self {
826 Self::new(
827 math::powf(self.x, n),
828 math::powf(self.y, n),
829 math::powf(self.z, n),
830 )
831 }
832
833 #[inline]
835 #[must_use]
836 pub fn recip(self) -> Self {
837 Self(unsafe { vdivq_f32(Self::ONE.0, self.0) })
838 }
839
840 #[doc(alias = "mix")]
846 #[inline]
847 #[must_use]
848 pub fn lerp(self, rhs: Self, s: f32) -> Self {
849 self * (1.0 - s) + rhs * s
850 }
851
852 #[inline]
857 #[must_use]
858 pub fn move_towards(&self, rhs: Self, d: f32) -> Self {
859 let a = rhs - *self;
860 let len = a.length();
861 if len <= d || len <= 1e-4 {
862 return rhs;
863 }
864 *self + a / len * d
865 }
866
867 #[inline]
873 pub fn midpoint(self, rhs: Self) -> Self {
874 (self + rhs) * 0.5
875 }
876
877 #[inline]
887 #[must_use]
888 pub fn abs_diff_eq(self, rhs: Self, max_abs_diff: f32) -> bool {
889 self.sub(rhs).abs().cmple(Self::splat(max_abs_diff)).all()
890 }
891
892 #[inline]
898 #[must_use]
899 pub fn clamp_length(self, min: f32, max: f32) -> Self {
900 glam_assert!(0.0 <= min);
901 glam_assert!(min <= max);
902 let length_sq = self.length_squared();
903 if length_sq < min * min {
904 min * (self / math::sqrt(length_sq))
905 } else if length_sq > max * max {
906 max * (self / math::sqrt(length_sq))
907 } else {
908 self
909 }
910 }
911
912 #[inline]
918 #[must_use]
919 pub fn clamp_length_max(self, max: f32) -> Self {
920 glam_assert!(0.0 <= max);
921 let length_sq = self.length_squared();
922 if length_sq > max * max {
923 max * (self / math::sqrt(length_sq))
924 } else {
925 self
926 }
927 }
928
929 #[inline]
935 #[must_use]
936 pub fn clamp_length_min(self, min: f32) -> Self {
937 glam_assert!(0.0 <= min);
938 let length_sq = self.length_squared();
939 if length_sq < min * min {
940 min * (self / math::sqrt(length_sq))
941 } else {
942 self
943 }
944 }
945
946 #[inline]
954 #[must_use]
955 pub fn mul_add(self, a: Self, b: Self) -> Self {
956 Self(unsafe { vfmaq_f32(b.0, self.0, a.0) })
957 }
958
959 #[inline]
968 #[must_use]
969 pub fn reflect(self, normal: Self) -> Self {
970 glam_assert!(normal.is_normalized());
971 self - 2.0 * self.dot(normal) * normal
972 }
973
974 #[inline]
984 #[must_use]
985 pub fn refract(self, normal: Self, eta: f32) -> Self {
986 glam_assert!(self.is_normalized());
987 glam_assert!(normal.is_normalized());
988 let n_dot_i = normal.dot(self);
989 let k = 1.0 - eta * eta * (1.0 - n_dot_i * n_dot_i);
990 if k >= 0.0 {
991 eta * self - (eta * n_dot_i + math::sqrt(k)) * normal
992 } else {
993 Self::ZERO
994 }
995 }
996
997 #[inline]
1001 #[must_use]
1002 pub fn angle_between(self, rhs: Self) -> f32 {
1003 math::acos_approx(
1004 self.dot(rhs)
1005 .div(math::sqrt(self.length_squared().mul(rhs.length_squared()))),
1006 )
1007 }
1008
1009 #[inline]
1015 #[must_use]
1016 pub fn rotate_towards(self, rhs: Self, max_angle: f32) -> Self {
1017 let angle_between = self.angle_between(rhs);
1018 let angle = max_angle.clamp(angle_between - core::f32::consts::PI, angle_between);
1020 let axis = self
1021 .cross(rhs)
1022 .try_normalize()
1023 .unwrap_or_else(|| self.any_orthogonal_vector().normalize());
1024 Quat::from_axis_angle(axis.into(), angle) * self
1025 }
1026
1027 #[inline]
1034 #[must_use]
1035 pub fn any_orthogonal_vector(&self) -> Self {
1036 if math::abs(self.x) > math::abs(self.y) {
1038 Self::new(-self.z, 0.0, self.x) } else {
1040 Self::new(0.0, self.z, -self.y) }
1042 }
1043
1044 #[inline]
1052 #[must_use]
1053 pub fn any_orthonormal_vector(&self) -> Self {
1054 glam_assert!(self.is_normalized());
1055 let sign = math::signum(self.z);
1057 let a = -1.0 / (sign + self.z);
1058 let b = self.x * self.y * a;
1059 Self::new(b, sign + self.y * self.y * a, -self.y)
1060 }
1061
1062 #[inline]
1069 #[must_use]
1070 pub fn any_orthonormal_pair(&self) -> (Self, Self) {
1071 glam_assert!(self.is_normalized());
1072 let sign = math::signum(self.z);
1074 let a = -1.0 / (sign + self.z);
1075 let b = self.x * self.y * a;
1076 (
1077 Self::new(1.0 + sign * self.x * self.x * a, sign * b, -sign * self.x),
1078 Self::new(b, sign + self.y * self.y * a, -self.y),
1079 )
1080 }
1081
1082 #[inline]
1088 #[must_use]
1089 pub fn slerp(self, rhs: Self, s: f32) -> Self {
1090 let self_length = self.length();
1091 let rhs_length = rhs.length();
1092 let dot = self.dot(rhs) / (self_length * rhs_length);
1094 if math::abs(dot) < 1.0 - 3e-7 {
1096 let theta = math::acos_approx(dot);
1098 let sin_theta = math::sin(theta);
1100 let t1 = math::sin(theta * (1. - s));
1101 let t2 = math::sin(theta * s);
1102
1103 let result_length = self_length.lerp(rhs_length, s);
1105 return (self * (result_length / self_length) * t1
1107 + rhs * (result_length / rhs_length) * t2)
1108 * sin_theta.recip();
1109 }
1110 if dot < 0.0 {
1111 let axis = self.any_orthogonal_vector().normalize().into();
1115 let rotation = Quat::from_axis_angle(axis, core::f32::consts::PI * s);
1116 let result_length = self_length.lerp(rhs_length, s);
1118 rotation * self * (result_length / self_length)
1119 } else {
1120 self.lerp(rhs, s)
1122 }
1123 }
1124
1125 #[inline]
1127 #[must_use]
1128 pub fn as_dvec3(&self) -> crate::DVec3 {
1129 crate::DVec3::new(self.x as f64, self.y as f64, self.z as f64)
1130 }
1131
1132 #[inline]
1134 #[must_use]
1135 pub fn as_i8vec3(&self) -> crate::I8Vec3 {
1136 crate::I8Vec3::new(self.x as i8, self.y as i8, self.z as i8)
1137 }
1138
1139 #[inline]
1141 #[must_use]
1142 pub fn as_u8vec3(&self) -> crate::U8Vec3 {
1143 crate::U8Vec3::new(self.x as u8, self.y as u8, self.z as u8)
1144 }
1145
1146 #[inline]
1148 #[must_use]
1149 pub fn as_i16vec3(&self) -> crate::I16Vec3 {
1150 crate::I16Vec3::new(self.x as i16, self.y as i16, self.z as i16)
1151 }
1152
1153 #[inline]
1155 #[must_use]
1156 pub fn as_u16vec3(&self) -> crate::U16Vec3 {
1157 crate::U16Vec3::new(self.x as u16, self.y as u16, self.z as u16)
1158 }
1159
1160 #[inline]
1162 #[must_use]
1163 pub fn as_ivec3(&self) -> crate::IVec3 {
1164 crate::IVec3::new(self.x as i32, self.y as i32, self.z as i32)
1165 }
1166
1167 #[inline]
1169 #[must_use]
1170 pub fn as_uvec3(&self) -> crate::UVec3 {
1171 crate::UVec3::new(self.x as u32, self.y as u32, self.z as u32)
1172 }
1173
1174 #[inline]
1176 #[must_use]
1177 pub fn as_i64vec3(&self) -> crate::I64Vec3 {
1178 crate::I64Vec3::new(self.x as i64, self.y as i64, self.z as i64)
1179 }
1180
1181 #[inline]
1183 #[must_use]
1184 pub fn as_u64vec3(&self) -> crate::U64Vec3 {
1185 crate::U64Vec3::new(self.x as u64, self.y as u64, self.z as u64)
1186 }
1187
1188 #[inline]
1190 #[must_use]
1191 pub fn as_usizevec3(&self) -> crate::USizeVec3 {
1192 crate::USizeVec3::new(self.x as usize, self.y as usize, self.z as usize)
1193 }
1194}
1195
1196impl Default for Vec3A {
1197 #[inline(always)]
1198 fn default() -> Self {
1199 Self::ZERO
1200 }
1201}
1202
1203impl PartialEq for Vec3A {
1204 #[inline]
1205 fn eq(&self, rhs: &Self) -> bool {
1206 self.cmpeq(*rhs).all()
1207 }
1208}
1209
1210impl Div<Vec3A> for Vec3A {
1211 type Output = Self;
1212 #[inline]
1213 fn div(self, rhs: Self) -> Self {
1214 Self(unsafe { vdivq_f32(self.0, rhs.0) })
1215 }
1216}
1217
1218impl Div<&Vec3A> for Vec3A {
1219 type Output = Vec3A;
1220 #[inline]
1221 fn div(self, rhs: &Vec3A) -> Vec3A {
1222 self.div(*rhs)
1223 }
1224}
1225
1226impl Div<&Vec3A> for &Vec3A {
1227 type Output = Vec3A;
1228 #[inline]
1229 fn div(self, rhs: &Vec3A) -> Vec3A {
1230 (*self).div(*rhs)
1231 }
1232}
1233
1234impl Div<Vec3A> for &Vec3A {
1235 type Output = Vec3A;
1236 #[inline]
1237 fn div(self, rhs: Vec3A) -> Vec3A {
1238 (*self).div(rhs)
1239 }
1240}
1241
1242impl DivAssign<Vec3A> for Vec3A {
1243 #[inline]
1244 fn div_assign(&mut self, rhs: Self) {
1245 self.0 = unsafe { vdivq_f32(self.0, rhs.0) };
1246 }
1247}
1248
1249impl DivAssign<&Vec3A> for Vec3A {
1250 #[inline]
1251 fn div_assign(&mut self, rhs: &Vec3A) {
1252 self.div_assign(*rhs)
1253 }
1254}
1255
1256impl Div<f32> for Vec3A {
1257 type Output = Self;
1258 #[inline]
1259 fn div(self, rhs: f32) -> Self {
1260 Self(unsafe { vdivq_f32(self.0, vld1q_dup_f32(&rhs)) })
1261 }
1262}
1263
1264impl Div<&f32> for Vec3A {
1265 type Output = Vec3A;
1266 #[inline]
1267 fn div(self, rhs: &f32) -> Vec3A {
1268 self.div(*rhs)
1269 }
1270}
1271
1272impl Div<&f32> for &Vec3A {
1273 type Output = Vec3A;
1274 #[inline]
1275 fn div(self, rhs: &f32) -> Vec3A {
1276 (*self).div(*rhs)
1277 }
1278}
1279
1280impl Div<f32> for &Vec3A {
1281 type Output = Vec3A;
1282 #[inline]
1283 fn div(self, rhs: f32) -> Vec3A {
1284 (*self).div(rhs)
1285 }
1286}
1287
1288impl DivAssign<f32> for Vec3A {
1289 #[inline]
1290 fn div_assign(&mut self, rhs: f32) {
1291 self.0 = unsafe { vdivq_f32(self.0, vld1q_dup_f32(&rhs)) };
1292 }
1293}
1294
1295impl DivAssign<&f32> for Vec3A {
1296 #[inline]
1297 fn div_assign(&mut self, rhs: &f32) {
1298 self.div_assign(*rhs)
1299 }
1300}
1301
1302impl Div<Vec3A> for f32 {
1303 type Output = Vec3A;
1304 #[inline]
1305 fn div(self, rhs: Vec3A) -> Vec3A {
1306 Vec3A(unsafe { vdivq_f32(vld1q_dup_f32(&self), rhs.0) })
1307 }
1308}
1309
1310impl Div<&Vec3A> for f32 {
1311 type Output = Vec3A;
1312 #[inline]
1313 fn div(self, rhs: &Vec3A) -> Vec3A {
1314 self.div(*rhs)
1315 }
1316}
1317
1318impl Div<&Vec3A> for &f32 {
1319 type Output = Vec3A;
1320 #[inline]
1321 fn div(self, rhs: &Vec3A) -> Vec3A {
1322 (*self).div(*rhs)
1323 }
1324}
1325
1326impl Div<Vec3A> for &f32 {
1327 type Output = Vec3A;
1328 #[inline]
1329 fn div(self, rhs: Vec3A) -> Vec3A {
1330 (*self).div(rhs)
1331 }
1332}
1333
1334impl Mul<Vec3A> for Vec3A {
1335 type Output = Self;
1336 #[inline]
1337 fn mul(self, rhs: Self) -> Self {
1338 Self(unsafe { vmulq_f32(self.0, rhs.0) })
1339 }
1340}
1341
1342impl Mul<&Vec3A> for Vec3A {
1343 type Output = Vec3A;
1344 #[inline]
1345 fn mul(self, rhs: &Vec3A) -> Vec3A {
1346 self.mul(*rhs)
1347 }
1348}
1349
1350impl Mul<&Vec3A> for &Vec3A {
1351 type Output = Vec3A;
1352 #[inline]
1353 fn mul(self, rhs: &Vec3A) -> Vec3A {
1354 (*self).mul(*rhs)
1355 }
1356}
1357
1358impl Mul<Vec3A> for &Vec3A {
1359 type Output = Vec3A;
1360 #[inline]
1361 fn mul(self, rhs: Vec3A) -> Vec3A {
1362 (*self).mul(rhs)
1363 }
1364}
1365
1366impl MulAssign<Vec3A> for Vec3A {
1367 #[inline]
1368 fn mul_assign(&mut self, rhs: Self) {
1369 self.0 = unsafe { vmulq_f32(self.0, rhs.0) };
1370 }
1371}
1372
1373impl MulAssign<&Vec3A> for Vec3A {
1374 #[inline]
1375 fn mul_assign(&mut self, rhs: &Vec3A) {
1376 self.mul_assign(*rhs)
1377 }
1378}
1379
1380impl Mul<f32> for Vec3A {
1381 type Output = Self;
1382 #[inline]
1383 fn mul(self, rhs: f32) -> Self {
1384 Self(unsafe { vmulq_n_f32(self.0, rhs) })
1385 }
1386}
1387
1388impl Mul<&f32> for Vec3A {
1389 type Output = Vec3A;
1390 #[inline]
1391 fn mul(self, rhs: &f32) -> Vec3A {
1392 self.mul(*rhs)
1393 }
1394}
1395
1396impl Mul<&f32> for &Vec3A {
1397 type Output = Vec3A;
1398 #[inline]
1399 fn mul(self, rhs: &f32) -> Vec3A {
1400 (*self).mul(*rhs)
1401 }
1402}
1403
1404impl Mul<f32> for &Vec3A {
1405 type Output = Vec3A;
1406 #[inline]
1407 fn mul(self, rhs: f32) -> Vec3A {
1408 (*self).mul(rhs)
1409 }
1410}
1411
1412impl MulAssign<f32> for Vec3A {
1413 #[inline]
1414 fn mul_assign(&mut self, rhs: f32) {
1415 self.0 = unsafe { vmulq_n_f32(self.0, rhs) };
1416 }
1417}
1418
1419impl MulAssign<&f32> for Vec3A {
1420 #[inline]
1421 fn mul_assign(&mut self, rhs: &f32) {
1422 self.mul_assign(*rhs)
1423 }
1424}
1425
1426impl Mul<Vec3A> for f32 {
1427 type Output = Vec3A;
1428 #[inline]
1429 fn mul(self, rhs: Vec3A) -> Vec3A {
1430 Vec3A(unsafe { vmulq_n_f32(rhs.0, self) })
1431 }
1432}
1433
1434impl Mul<&Vec3A> for f32 {
1435 type Output = Vec3A;
1436 #[inline]
1437 fn mul(self, rhs: &Vec3A) -> Vec3A {
1438 self.mul(*rhs)
1439 }
1440}
1441
1442impl Mul<&Vec3A> for &f32 {
1443 type Output = Vec3A;
1444 #[inline]
1445 fn mul(self, rhs: &Vec3A) -> Vec3A {
1446 (*self).mul(*rhs)
1447 }
1448}
1449
1450impl Mul<Vec3A> for &f32 {
1451 type Output = Vec3A;
1452 #[inline]
1453 fn mul(self, rhs: Vec3A) -> Vec3A {
1454 (*self).mul(rhs)
1455 }
1456}
1457
1458impl Add<Vec3A> for Vec3A {
1459 type Output = Self;
1460 #[inline]
1461 fn add(self, rhs: Self) -> Self {
1462 Self(unsafe { vaddq_f32(self.0, rhs.0) })
1463 }
1464}
1465
1466impl Add<&Vec3A> for Vec3A {
1467 type Output = Vec3A;
1468 #[inline]
1469 fn add(self, rhs: &Vec3A) -> Vec3A {
1470 self.add(*rhs)
1471 }
1472}
1473
1474impl Add<&Vec3A> for &Vec3A {
1475 type Output = Vec3A;
1476 #[inline]
1477 fn add(self, rhs: &Vec3A) -> Vec3A {
1478 (*self).add(*rhs)
1479 }
1480}
1481
1482impl Add<Vec3A> for &Vec3A {
1483 type Output = Vec3A;
1484 #[inline]
1485 fn add(self, rhs: Vec3A) -> Vec3A {
1486 (*self).add(rhs)
1487 }
1488}
1489
1490impl AddAssign<Vec3A> for Vec3A {
1491 #[inline]
1492 fn add_assign(&mut self, rhs: Self) {
1493 self.0 = unsafe { vaddq_f32(self.0, rhs.0) };
1494 }
1495}
1496
1497impl AddAssign<&Vec3A> for Vec3A {
1498 #[inline]
1499 fn add_assign(&mut self, rhs: &Vec3A) {
1500 self.add_assign(*rhs)
1501 }
1502}
1503
1504impl Add<f32> for Vec3A {
1505 type Output = Self;
1506 #[inline]
1507 fn add(self, rhs: f32) -> Self {
1508 Self(unsafe { vaddq_f32(self.0, vld1q_dup_f32(&rhs)) })
1509 }
1510}
1511
1512impl Add<&f32> for Vec3A {
1513 type Output = Vec3A;
1514 #[inline]
1515 fn add(self, rhs: &f32) -> Vec3A {
1516 self.add(*rhs)
1517 }
1518}
1519
1520impl Add<&f32> for &Vec3A {
1521 type Output = Vec3A;
1522 #[inline]
1523 fn add(self, rhs: &f32) -> Vec3A {
1524 (*self).add(*rhs)
1525 }
1526}
1527
1528impl Add<f32> for &Vec3A {
1529 type Output = Vec3A;
1530 #[inline]
1531 fn add(self, rhs: f32) -> Vec3A {
1532 (*self).add(rhs)
1533 }
1534}
1535
1536impl AddAssign<f32> for Vec3A {
1537 #[inline]
1538 fn add_assign(&mut self, rhs: f32) {
1539 self.0 = unsafe { vaddq_f32(self.0, vld1q_dup_f32(&rhs)) };
1540 }
1541}
1542
1543impl AddAssign<&f32> for Vec3A {
1544 #[inline]
1545 fn add_assign(&mut self, rhs: &f32) {
1546 self.add_assign(*rhs)
1547 }
1548}
1549
1550impl Add<Vec3A> for f32 {
1551 type Output = Vec3A;
1552 #[inline]
1553 fn add(self, rhs: Vec3A) -> Vec3A {
1554 Vec3A(unsafe { vaddq_f32(vld1q_dup_f32(&self), rhs.0) })
1555 }
1556}
1557
1558impl Add<&Vec3A> for f32 {
1559 type Output = Vec3A;
1560 #[inline]
1561 fn add(self, rhs: &Vec3A) -> Vec3A {
1562 self.add(*rhs)
1563 }
1564}
1565
1566impl Add<&Vec3A> for &f32 {
1567 type Output = Vec3A;
1568 #[inline]
1569 fn add(self, rhs: &Vec3A) -> Vec3A {
1570 (*self).add(*rhs)
1571 }
1572}
1573
1574impl Add<Vec3A> for &f32 {
1575 type Output = Vec3A;
1576 #[inline]
1577 fn add(self, rhs: Vec3A) -> Vec3A {
1578 (*self).add(rhs)
1579 }
1580}
1581
1582impl Sub<Vec3A> for Vec3A {
1583 type Output = Self;
1584 #[inline]
1585 fn sub(self, rhs: Self) -> Self {
1586 Self(unsafe { vsubq_f32(self.0, rhs.0) })
1587 }
1588}
1589
1590impl Sub<&Vec3A> for Vec3A {
1591 type Output = Vec3A;
1592 #[inline]
1593 fn sub(self, rhs: &Vec3A) -> Vec3A {
1594 self.sub(*rhs)
1595 }
1596}
1597
1598impl Sub<&Vec3A> for &Vec3A {
1599 type Output = Vec3A;
1600 #[inline]
1601 fn sub(self, rhs: &Vec3A) -> Vec3A {
1602 (*self).sub(*rhs)
1603 }
1604}
1605
1606impl Sub<Vec3A> for &Vec3A {
1607 type Output = Vec3A;
1608 #[inline]
1609 fn sub(self, rhs: Vec3A) -> Vec3A {
1610 (*self).sub(rhs)
1611 }
1612}
1613
1614impl SubAssign<Vec3A> for Vec3A {
1615 #[inline]
1616 fn sub_assign(&mut self, rhs: Vec3A) {
1617 self.0 = unsafe { vsubq_f32(self.0, rhs.0) };
1618 }
1619}
1620
1621impl SubAssign<&Vec3A> for Vec3A {
1622 #[inline]
1623 fn sub_assign(&mut self, rhs: &Vec3A) {
1624 self.sub_assign(*rhs)
1625 }
1626}
1627
1628impl Sub<f32> for Vec3A {
1629 type Output = Self;
1630 #[inline]
1631 fn sub(self, rhs: f32) -> Self {
1632 Self(unsafe { vsubq_f32(self.0, vld1q_dup_f32(&rhs)) })
1633 }
1634}
1635
1636impl Sub<&f32> for Vec3A {
1637 type Output = Vec3A;
1638 #[inline]
1639 fn sub(self, rhs: &f32) -> Vec3A {
1640 self.sub(*rhs)
1641 }
1642}
1643
1644impl Sub<&f32> for &Vec3A {
1645 type Output = Vec3A;
1646 #[inline]
1647 fn sub(self, rhs: &f32) -> Vec3A {
1648 (*self).sub(*rhs)
1649 }
1650}
1651
1652impl Sub<f32> for &Vec3A {
1653 type Output = Vec3A;
1654 #[inline]
1655 fn sub(self, rhs: f32) -> Vec3A {
1656 (*self).sub(rhs)
1657 }
1658}
1659
1660impl SubAssign<f32> for Vec3A {
1661 #[inline]
1662 fn sub_assign(&mut self, rhs: f32) {
1663 self.0 = unsafe { vsubq_f32(self.0, vld1q_dup_f32(&rhs)) };
1664 }
1665}
1666
1667impl SubAssign<&f32> for Vec3A {
1668 #[inline]
1669 fn sub_assign(&mut self, rhs: &f32) {
1670 self.sub_assign(*rhs)
1671 }
1672}
1673
1674impl Sub<Vec3A> for f32 {
1675 type Output = Vec3A;
1676 #[inline]
1677 fn sub(self, rhs: Vec3A) -> Vec3A {
1678 Vec3A(unsafe { vsubq_f32(vld1q_dup_f32(&self), rhs.0) })
1679 }
1680}
1681
1682impl Sub<&Vec3A> for f32 {
1683 type Output = Vec3A;
1684 #[inline]
1685 fn sub(self, rhs: &Vec3A) -> Vec3A {
1686 self.sub(*rhs)
1687 }
1688}
1689
1690impl Sub<&Vec3A> for &f32 {
1691 type Output = Vec3A;
1692 #[inline]
1693 fn sub(self, rhs: &Vec3A) -> Vec3A {
1694 (*self).sub(*rhs)
1695 }
1696}
1697
1698impl Sub<Vec3A> for &f32 {
1699 type Output = Vec3A;
1700 #[inline]
1701 fn sub(self, rhs: Vec3A) -> Vec3A {
1702 (*self).sub(rhs)
1703 }
1704}
1705
1706impl Rem<Vec3A> for Vec3A {
1707 type Output = Self;
1708 #[inline]
1709 fn rem(self, rhs: Self) -> Self {
1710 unsafe {
1711 let n = vrndmq_f32(vdivq_f32(self.0, rhs.0));
1712 Self(vsubq_f32(self.0, vmulq_f32(n, rhs.0)))
1713 }
1714 }
1715}
1716
1717impl Rem<&Vec3A> for Vec3A {
1718 type Output = Vec3A;
1719 #[inline]
1720 fn rem(self, rhs: &Vec3A) -> Vec3A {
1721 self.rem(*rhs)
1722 }
1723}
1724
1725impl Rem<&Vec3A> for &Vec3A {
1726 type Output = Vec3A;
1727 #[inline]
1728 fn rem(self, rhs: &Vec3A) -> Vec3A {
1729 (*self).rem(*rhs)
1730 }
1731}
1732
1733impl Rem<Vec3A> for &Vec3A {
1734 type Output = Vec3A;
1735 #[inline]
1736 fn rem(self, rhs: Vec3A) -> Vec3A {
1737 (*self).rem(rhs)
1738 }
1739}
1740
1741impl RemAssign<Vec3A> for Vec3A {
1742 #[inline]
1743 fn rem_assign(&mut self, rhs: Self) {
1744 *self = self.rem(rhs);
1745 }
1746}
1747
1748impl RemAssign<&Vec3A> for Vec3A {
1749 #[inline]
1750 fn rem_assign(&mut self, rhs: &Vec3A) {
1751 self.rem_assign(*rhs)
1752 }
1753}
1754
1755impl Rem<f32> for Vec3A {
1756 type Output = Self;
1757 #[inline]
1758 fn rem(self, rhs: f32) -> Self {
1759 self.rem(Self::splat(rhs))
1760 }
1761}
1762
1763impl Rem<&f32> for Vec3A {
1764 type Output = Vec3A;
1765 #[inline]
1766 fn rem(self, rhs: &f32) -> Vec3A {
1767 self.rem(*rhs)
1768 }
1769}
1770
1771impl Rem<&f32> for &Vec3A {
1772 type Output = Vec3A;
1773 #[inline]
1774 fn rem(self, rhs: &f32) -> Vec3A {
1775 (*self).rem(*rhs)
1776 }
1777}
1778
1779impl Rem<f32> for &Vec3A {
1780 type Output = Vec3A;
1781 #[inline]
1782 fn rem(self, rhs: f32) -> Vec3A {
1783 (*self).rem(rhs)
1784 }
1785}
1786
1787impl RemAssign<f32> for Vec3A {
1788 #[inline]
1789 fn rem_assign(&mut self, rhs: f32) {
1790 *self = self.rem(Self::splat(rhs));
1791 }
1792}
1793
1794impl RemAssign<&f32> for Vec3A {
1795 #[inline]
1796 fn rem_assign(&mut self, rhs: &f32) {
1797 self.rem_assign(*rhs)
1798 }
1799}
1800
1801impl Rem<Vec3A> for f32 {
1802 type Output = Vec3A;
1803 #[inline]
1804 fn rem(self, rhs: Vec3A) -> Vec3A {
1805 Vec3A::splat(self).rem(rhs)
1806 }
1807}
1808
1809impl Rem<&Vec3A> for f32 {
1810 type Output = Vec3A;
1811 #[inline]
1812 fn rem(self, rhs: &Vec3A) -> Vec3A {
1813 self.rem(*rhs)
1814 }
1815}
1816
1817impl Rem<&Vec3A> for &f32 {
1818 type Output = Vec3A;
1819 #[inline]
1820 fn rem(self, rhs: &Vec3A) -> Vec3A {
1821 (*self).rem(*rhs)
1822 }
1823}
1824
1825impl Rem<Vec3A> for &f32 {
1826 type Output = Vec3A;
1827 #[inline]
1828 fn rem(self, rhs: Vec3A) -> Vec3A {
1829 (*self).rem(rhs)
1830 }
1831}
1832
1833#[cfg(not(target_arch = "spirv"))]
1834impl AsRef<[f32; 3]> for Vec3A {
1835 #[inline]
1836 fn as_ref(&self) -> &[f32; 3] {
1837 unsafe { &*(self as *const Vec3A as *const [f32; 3]) }
1838 }
1839}
1840
1841#[cfg(not(target_arch = "spirv"))]
1842impl AsMut<[f32; 3]> for Vec3A {
1843 #[inline]
1844 fn as_mut(&mut self) -> &mut [f32; 3] {
1845 unsafe { &mut *(self as *mut Vec3A as *mut [f32; 3]) }
1846 }
1847}
1848
1849impl Sum for Vec3A {
1850 #[inline]
1851 fn sum<I>(iter: I) -> Self
1852 where
1853 I: Iterator<Item = Self>,
1854 {
1855 iter.fold(Self::ZERO, Self::add)
1856 }
1857}
1858
1859impl<'a> Sum<&'a Self> for Vec3A {
1860 #[inline]
1861 fn sum<I>(iter: I) -> Self
1862 where
1863 I: Iterator<Item = &'a Self>,
1864 {
1865 iter.fold(Self::ZERO, |a, &b| Self::add(a, b))
1866 }
1867}
1868
1869impl Product for Vec3A {
1870 #[inline]
1871 fn product<I>(iter: I) -> Self
1872 where
1873 I: Iterator<Item = Self>,
1874 {
1875 iter.fold(Self::ONE, Self::mul)
1876 }
1877}
1878
1879impl<'a> Product<&'a Self> for Vec3A {
1880 #[inline]
1881 fn product<I>(iter: I) -> Self
1882 where
1883 I: Iterator<Item = &'a Self>,
1884 {
1885 iter.fold(Self::ONE, |a, &b| Self::mul(a, b))
1886 }
1887}
1888
1889impl Neg for Vec3A {
1890 type Output = Self;
1891 #[inline]
1892 fn neg(self) -> Self {
1893 Self(unsafe { vnegq_f32(self.0) })
1894 }
1895}
1896
1897impl Neg for &Vec3A {
1898 type Output = Vec3A;
1899 #[inline]
1900 fn neg(self) -> Vec3A {
1901 (*self).neg()
1902 }
1903}
1904
1905impl Index<usize> for Vec3A {
1906 type Output = f32;
1907 #[inline]
1908 fn index(&self, index: usize) -> &Self::Output {
1909 match index {
1910 0 => &self.x,
1911 1 => &self.y,
1912 2 => &self.z,
1913 _ => panic!("index out of bounds"),
1914 }
1915 }
1916}
1917
1918impl IndexMut<usize> for Vec3A {
1919 #[inline]
1920 fn index_mut(&mut self, index: usize) -> &mut Self::Output {
1921 match index {
1922 0 => &mut self.x,
1923 1 => &mut self.y,
1924 2 => &mut self.z,
1925 _ => panic!("index out of bounds"),
1926 }
1927 }
1928}
1929
1930impl fmt::Display for Vec3A {
1931 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1932 if let Some(p) = f.precision() {
1933 write!(f, "[{:.*}, {:.*}, {:.*}]", p, self.x, p, self.y, p, self.z)
1934 } else {
1935 write!(f, "[{}, {}, {}]", self.x, self.y, self.z)
1936 }
1937 }
1938}
1939
1940impl fmt::Debug for Vec3A {
1941 fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
1942 fmt.debug_tuple(stringify!(Vec3A))
1943 .field(&self.x)
1944 .field(&self.y)
1945 .field(&self.z)
1946 .finish()
1947 }
1948}
1949
1950impl From<Vec3A> for float32x4_t {
1951 #[inline(always)]
1952 fn from(t: Vec3A) -> Self {
1953 t.0
1954 }
1955}
1956
1957impl From<float32x4_t> for Vec3A {
1958 #[inline(always)]
1959 fn from(t: float32x4_t) -> Self {
1960 Self(t)
1961 }
1962}
1963
1964impl From<[f32; 3]> for Vec3A {
1965 #[inline]
1966 fn from(a: [f32; 3]) -> Self {
1967 Self::new(a[0], a[1], a[2])
1968 }
1969}
1970
1971impl From<Vec3A> for [f32; 3] {
1972 #[inline]
1973 fn from(v: Vec3A) -> Self {
1974 use crate::align16::Align16;
1975 use core::mem::MaybeUninit;
1976 let mut out: MaybeUninit<Align16<Self>> = MaybeUninit::uninit();
1977 unsafe {
1978 vst1q_f32(out.as_mut_ptr().cast(), v.0);
1979 out.assume_init().0
1980 }
1981 }
1982}
1983
1984impl From<(f32, f32, f32)> for Vec3A {
1985 #[inline]
1986 fn from(t: (f32, f32, f32)) -> Self {
1987 Self::new(t.0, t.1, t.2)
1988 }
1989}
1990
1991impl From<Vec3A> for (f32, f32, f32) {
1992 #[inline]
1993 fn from(v: Vec3A) -> Self {
1994 use crate::align16::Align16;
1995 use core::mem::MaybeUninit;
1996 let mut out: MaybeUninit<Align16<Self>> = MaybeUninit::uninit();
1997 unsafe {
1998 vst1q_f32(out.as_mut_ptr().cast(), v.0);
1999 out.assume_init().0
2000 }
2001 }
2002}
2003
2004impl From<Vec3> for Vec3A {
2005 #[inline]
2006 fn from(v: Vec3) -> Self {
2007 Self::new(v.x, v.y, v.z)
2008 }
2009}
2010
2011impl From<Vec3A> for Vec3 {
2012 #[inline]
2013 fn from(v: Vec3A) -> Self {
2014 use crate::align16::Align16;
2015 use core::mem::MaybeUninit;
2016 let mut out: MaybeUninit<Align16<Self>> = MaybeUninit::uninit();
2017 unsafe {
2018 vst1q_f32(out.as_mut_ptr().cast(), v.0);
2019 out.assume_init().0
2020 }
2021 }
2022}
2023
2024impl From<(Vec2, f32)> for Vec3A {
2025 #[inline]
2026 fn from((v, z): (Vec2, f32)) -> Self {
2027 Self::new(v.x, v.y, z)
2028 }
2029}
2030
2031impl Deref for Vec3A {
2032 type Target = crate::deref::Vec3<f32>;
2033 #[inline]
2034 fn deref(&self) -> &Self::Target {
2035 unsafe { &*(self as *const Self).cast() }
2036 }
2037}
2038
2039impl DerefMut for Vec3A {
2040 #[inline]
2041 fn deref_mut(&mut self) -> &mut Self::Target {
2042 unsafe { &mut *(self as *mut Self).cast() }
2043 }
2044}
2045
2046impl From<BVec3> for Vec3A {
2047 #[inline]
2048 fn from(v: BVec3) -> Self {
2049 Self::new(f32::from(v.x), f32::from(v.y), f32::from(v.z))
2050 }
2051}
2052
2053impl From<BVec3A> for Vec3A {
2054 #[inline]
2055 fn from(v: BVec3A) -> Self {
2056 let bool_array: [bool; 3] = v.into();
2057 Self::new(
2058 f32::from(bool_array[0]),
2059 f32::from(bool_array[1]),
2060 f32::from(bool_array[2]),
2061 )
2062 }
2063}