1#[cfg(not(feature = "scalar-math"))]
4use crate::BVec4A;
5use crate::{
6 BVec4, I16Vec4, I64Vec2, I64Vec3, I8Vec4, IVec4, U16Vec4, U64Vec4, U8Vec4, USizeVec4, UVec4,
7};
8
9use core::fmt;
10use core::iter::{Product, Sum};
11use core::{f32, ops::*};
12
13#[inline(always)]
15#[must_use]
16pub const fn i64vec4(x: i64, y: i64, z: i64, w: i64) -> I64Vec4 {
17 I64Vec4::new(x, y, z, w)
18}
19
20#[cfg_attr(not(target_arch = "spirv"), derive(Hash))]
22#[derive(Clone, Copy, PartialEq, Eq)]
23#[cfg_attr(
24 all(feature = "bytemuck", not(target_arch = "spirv")),
25 derive(bytemuck::Pod, bytemuck::Zeroable)
26)]
27#[cfg_attr(feature = "cuda", repr(align(16)))]
28#[cfg_attr(not(target_arch = "spirv"), repr(C))]
29#[cfg_attr(target_arch = "spirv", repr(simd))]
30pub struct I64Vec4 {
31 pub x: i64,
32 pub y: i64,
33 pub z: i64,
34 pub w: i64,
35}
36
37impl I64Vec4 {
38 pub const ZERO: Self = Self::splat(0);
40
41 pub const ONE: Self = Self::splat(1);
43
44 pub const NEG_ONE: Self = Self::splat(-1);
46
47 pub const MIN: Self = Self::splat(i64::MIN);
49
50 pub const MAX: Self = Self::splat(i64::MAX);
52
53 pub const X: Self = Self::new(1, 0, 0, 0);
55
56 pub const Y: Self = Self::new(0, 1, 0, 0);
58
59 pub const Z: Self = Self::new(0, 0, 1, 0);
61
62 pub const W: Self = Self::new(0, 0, 0, 1);
64
65 pub const NEG_X: Self = Self::new(-1, 0, 0, 0);
67
68 pub const NEG_Y: Self = Self::new(0, -1, 0, 0);
70
71 pub const NEG_Z: Self = Self::new(0, 0, -1, 0);
73
74 pub const NEG_W: Self = Self::new(0, 0, 0, -1);
76
77 pub const AXES: [Self; 4] = [Self::X, Self::Y, Self::Z, Self::W];
79
80 #[inline(always)]
82 #[must_use]
83 pub const fn new(x: i64, y: i64, z: i64, w: i64) -> Self {
84 Self { x, y, z, w }
85 }
86
87 #[inline]
89 #[must_use]
90 pub const fn splat(v: i64) -> Self {
91 Self {
92 x: v,
93
94 y: v,
95
96 z: v,
97
98 w: v,
99 }
100 }
101
102 #[inline]
104 #[must_use]
105 pub fn map<F>(self, f: F) -> Self
106 where
107 F: Fn(i64) -> i64,
108 {
109 Self::new(f(self.x), f(self.y), f(self.z), f(self.w))
110 }
111
112 #[inline]
118 #[must_use]
119 pub fn select(mask: BVec4, if_true: Self, if_false: Self) -> Self {
120 Self {
121 x: if mask.test(0) { if_true.x } else { if_false.x },
122 y: if mask.test(1) { if_true.y } else { if_false.y },
123 z: if mask.test(2) { if_true.z } else { if_false.z },
124 w: if mask.test(3) { if_true.w } else { if_false.w },
125 }
126 }
127
128 #[inline]
130 #[must_use]
131 pub const fn from_array(a: [i64; 4]) -> Self {
132 Self::new(a[0], a[1], a[2], a[3])
133 }
134
135 #[inline]
137 #[must_use]
138 pub const fn to_array(&self) -> [i64; 4] {
139 [self.x, self.y, self.z, self.w]
140 }
141
142 #[inline]
148 #[must_use]
149 pub const fn from_slice(slice: &[i64]) -> Self {
150 assert!(slice.len() >= 4);
151 Self::new(slice[0], slice[1], slice[2], slice[3])
152 }
153
154 #[inline]
160 pub fn write_to_slice(self, slice: &mut [i64]) {
161 slice[..4].copy_from_slice(&self.to_array());
162 }
163
164 #[inline]
168 #[must_use]
169 pub fn truncate(self) -> I64Vec3 {
170 use crate::swizzles::Vec4Swizzles;
171 self.xyz()
172 }
173
174 #[inline]
176 #[must_use]
177 pub fn with_x(mut self, x: i64) -> Self {
178 self.x = x;
179 self
180 }
181
182 #[inline]
184 #[must_use]
185 pub fn with_y(mut self, y: i64) -> Self {
186 self.y = y;
187 self
188 }
189
190 #[inline]
192 #[must_use]
193 pub fn with_z(mut self, z: i64) -> Self {
194 self.z = z;
195 self
196 }
197
198 #[inline]
200 #[must_use]
201 pub fn with_w(mut self, w: i64) -> Self {
202 self.w = w;
203 self
204 }
205
206 #[inline]
208 #[must_use]
209 pub fn dot(self, rhs: Self) -> i64 {
210 (self.x * rhs.x) + (self.y * rhs.y) + (self.z * rhs.z) + (self.w * rhs.w)
211 }
212
213 #[inline]
215 #[must_use]
216 pub fn dot_into_vec(self, rhs: Self) -> Self {
217 Self::splat(self.dot(rhs))
218 }
219
220 #[inline]
224 #[must_use]
225 pub fn min(self, rhs: Self) -> Self {
226 Self {
227 x: if self.x < rhs.x { self.x } else { rhs.x },
228 y: if self.y < rhs.y { self.y } else { rhs.y },
229 z: if self.z < rhs.z { self.z } else { rhs.z },
230 w: if self.w < rhs.w { self.w } else { rhs.w },
231 }
232 }
233
234 #[inline]
238 #[must_use]
239 pub fn max(self, rhs: Self) -> Self {
240 Self {
241 x: if self.x > rhs.x { self.x } else { rhs.x },
242 y: if self.y > rhs.y { self.y } else { rhs.y },
243 z: if self.z > rhs.z { self.z } else { rhs.z },
244 w: if self.w > rhs.w { self.w } else { rhs.w },
245 }
246 }
247
248 #[inline]
256 #[must_use]
257 pub fn clamp(self, min: Self, max: Self) -> Self {
258 glam_assert!(min.cmple(max).all(), "clamp: expected min <= max");
259 self.max(min).min(max)
260 }
261
262 #[inline]
266 #[must_use]
267 pub fn min_element(self) -> i64 {
268 let min = |a, b| if a < b { a } else { b };
269 min(self.x, min(self.y, min(self.z, self.w)))
270 }
271
272 #[inline]
276 #[must_use]
277 pub fn max_element(self) -> i64 {
278 let max = |a, b| if a > b { a } else { b };
279 max(self.x, max(self.y, max(self.z, self.w)))
280 }
281
282 #[doc(alias = "argmin")]
284 #[inline]
285 #[must_use]
286 pub fn min_position(self) -> usize {
287 let mut min = self.x;
288 let mut index = 0;
289 if self.y < min {
290 min = self.y;
291 index = 1;
292 }
293 if self.z < min {
294 min = self.z;
295 index = 2;
296 }
297 if self.w < min {
298 index = 3;
299 }
300 index
301 }
302
303 #[doc(alias = "argmax")]
305 #[inline]
306 #[must_use]
307 pub fn max_position(self) -> usize {
308 let mut max = self.x;
309 let mut index = 0;
310 if self.y > max {
311 max = self.y;
312 index = 1;
313 }
314 if self.z > max {
315 max = self.z;
316 index = 2;
317 }
318 if self.w > max {
319 index = 3;
320 }
321 index
322 }
323
324 #[inline]
328 #[must_use]
329 pub fn element_sum(self) -> i64 {
330 self.x + self.y + self.z + self.w
331 }
332
333 #[inline]
337 #[must_use]
338 pub fn element_product(self) -> i64 {
339 self.x * self.y * self.z * self.w
340 }
341
342 #[inline]
348 #[must_use]
349 pub fn cmpeq(self, rhs: Self) -> BVec4 {
350 BVec4::new(
351 self.x.eq(&rhs.x),
352 self.y.eq(&rhs.y),
353 self.z.eq(&rhs.z),
354 self.w.eq(&rhs.w),
355 )
356 }
357
358 #[inline]
364 #[must_use]
365 pub fn cmpne(self, rhs: Self) -> BVec4 {
366 BVec4::new(
367 self.x.ne(&rhs.x),
368 self.y.ne(&rhs.y),
369 self.z.ne(&rhs.z),
370 self.w.ne(&rhs.w),
371 )
372 }
373
374 #[inline]
380 #[must_use]
381 pub fn cmpge(self, rhs: Self) -> BVec4 {
382 BVec4::new(
383 self.x.ge(&rhs.x),
384 self.y.ge(&rhs.y),
385 self.z.ge(&rhs.z),
386 self.w.ge(&rhs.w),
387 )
388 }
389
390 #[inline]
396 #[must_use]
397 pub fn cmpgt(self, rhs: Self) -> BVec4 {
398 BVec4::new(
399 self.x.gt(&rhs.x),
400 self.y.gt(&rhs.y),
401 self.z.gt(&rhs.z),
402 self.w.gt(&rhs.w),
403 )
404 }
405
406 #[inline]
412 #[must_use]
413 pub fn cmple(self, rhs: Self) -> BVec4 {
414 BVec4::new(
415 self.x.le(&rhs.x),
416 self.y.le(&rhs.y),
417 self.z.le(&rhs.z),
418 self.w.le(&rhs.w),
419 )
420 }
421
422 #[inline]
428 #[must_use]
429 pub fn cmplt(self, rhs: Self) -> BVec4 {
430 BVec4::new(
431 self.x.lt(&rhs.x),
432 self.y.lt(&rhs.y),
433 self.z.lt(&rhs.z),
434 self.w.lt(&rhs.w),
435 )
436 }
437
438 #[inline]
440 #[must_use]
441 pub fn abs(self) -> Self {
442 Self {
443 x: self.x.abs(),
444 y: self.y.abs(),
445 z: self.z.abs(),
446 w: self.w.abs(),
447 }
448 }
449
450 #[inline]
456 #[must_use]
457 pub fn signum(self) -> Self {
458 Self {
459 x: self.x.signum(),
460 y: self.y.signum(),
461 z: self.z.signum(),
462 w: self.w.signum(),
463 }
464 }
465
466 #[inline]
474 #[must_use]
475 pub fn is_negative_bitmask(self) -> u32 {
476 (self.x.is_negative() as u32)
477 | ((self.y.is_negative() as u32) << 1)
478 | ((self.z.is_negative() as u32) << 2)
479 | ((self.w.is_negative() as u32) << 3)
480 }
481
482 #[doc(alias = "magnitude2")]
484 #[inline]
485 #[must_use]
486 pub fn length_squared(self) -> i64 {
487 self.dot(self)
488 }
489
490 #[inline]
492 #[must_use]
493 pub fn distance_squared(self, rhs: Self) -> i64 {
494 (self - rhs).length_squared()
495 }
496
497 #[inline]
502 #[must_use]
503 pub fn div_euclid(self, rhs: Self) -> Self {
504 Self::new(
505 self.x.div_euclid(rhs.x),
506 self.y.div_euclid(rhs.y),
507 self.z.div_euclid(rhs.z),
508 self.w.div_euclid(rhs.w),
509 )
510 }
511
512 #[inline]
519 #[must_use]
520 pub fn rem_euclid(self, rhs: Self) -> Self {
521 Self::new(
522 self.x.rem_euclid(rhs.x),
523 self.y.rem_euclid(rhs.y),
524 self.z.rem_euclid(rhs.z),
525 self.w.rem_euclid(rhs.w),
526 )
527 }
528
529 #[inline]
538 #[must_use]
539 pub fn manhattan_distance(self, rhs: Self) -> u64 {
540 self.x.abs_diff(rhs.x)
541 + self.y.abs_diff(rhs.y)
542 + self.z.abs_diff(rhs.z)
543 + self.w.abs_diff(rhs.w)
544 }
545
546 #[inline]
552 #[must_use]
553 pub fn checked_manhattan_distance(self, rhs: Self) -> Option<u64> {
554 let d = self.x.abs_diff(rhs.x);
555 let d = d.checked_add(self.y.abs_diff(rhs.y))?;
556 let d = d.checked_add(self.z.abs_diff(rhs.z))?;
557 d.checked_add(self.w.abs_diff(rhs.w))
558 }
559
560 #[inline]
564 #[must_use]
565 pub fn chebyshev_distance(self, rhs: Self) -> u64 {
566 [
568 self.x.abs_diff(rhs.x),
569 self.y.abs_diff(rhs.y),
570 self.z.abs_diff(rhs.z),
571 self.w.abs_diff(rhs.w),
572 ]
573 .into_iter()
574 .max()
575 .unwrap()
576 }
577
578 #[inline]
580 #[must_use]
581 pub fn as_vec4(&self) -> crate::Vec4 {
582 crate::Vec4::new(self.x as f32, self.y as f32, self.z as f32, self.w as f32)
583 }
584
585 #[inline]
587 #[must_use]
588 pub fn as_dvec4(&self) -> crate::DVec4 {
589 crate::DVec4::new(self.x as f64, self.y as f64, self.z as f64, self.w as f64)
590 }
591
592 #[inline]
594 #[must_use]
595 pub fn as_i8vec4(&self) -> crate::I8Vec4 {
596 crate::I8Vec4::new(self.x as i8, self.y as i8, self.z as i8, self.w as i8)
597 }
598
599 #[inline]
601 #[must_use]
602 pub fn as_u8vec4(&self) -> crate::U8Vec4 {
603 crate::U8Vec4::new(self.x as u8, self.y as u8, self.z as u8, self.w as u8)
604 }
605
606 #[inline]
608 #[must_use]
609 pub fn as_i16vec4(&self) -> crate::I16Vec4 {
610 crate::I16Vec4::new(self.x as i16, self.y as i16, self.z as i16, self.w as i16)
611 }
612
613 #[inline]
615 #[must_use]
616 pub fn as_u16vec4(&self) -> crate::U16Vec4 {
617 crate::U16Vec4::new(self.x as u16, self.y as u16, self.z as u16, self.w as u16)
618 }
619
620 #[inline]
622 #[must_use]
623 pub fn as_ivec4(&self) -> crate::IVec4 {
624 crate::IVec4::new(self.x as i32, self.y as i32, self.z as i32, self.w as i32)
625 }
626
627 #[inline]
629 #[must_use]
630 pub fn as_uvec4(&self) -> crate::UVec4 {
631 crate::UVec4::new(self.x as u32, self.y as u32, self.z as u32, self.w as u32)
632 }
633
634 #[inline]
636 #[must_use]
637 pub fn as_u64vec4(&self) -> crate::U64Vec4 {
638 crate::U64Vec4::new(self.x as u64, self.y as u64, self.z as u64, self.w as u64)
639 }
640
641 #[inline]
643 #[must_use]
644 pub fn as_usizevec4(&self) -> crate::USizeVec4 {
645 crate::USizeVec4::new(
646 self.x as usize,
647 self.y as usize,
648 self.z as usize,
649 self.w as usize,
650 )
651 }
652
653 #[inline]
657 #[must_use]
658 pub const fn checked_add(self, rhs: Self) -> Option<Self> {
659 let x = match self.x.checked_add(rhs.x) {
660 Some(v) => v,
661 None => return None,
662 };
663 let y = match self.y.checked_add(rhs.y) {
664 Some(v) => v,
665 None => return None,
666 };
667 let z = match self.z.checked_add(rhs.z) {
668 Some(v) => v,
669 None => return None,
670 };
671 let w = match self.w.checked_add(rhs.w) {
672 Some(v) => v,
673 None => return None,
674 };
675
676 Some(Self { x, y, z, w })
677 }
678
679 #[inline]
683 #[must_use]
684 pub const fn checked_sub(self, rhs: Self) -> Option<Self> {
685 let x = match self.x.checked_sub(rhs.x) {
686 Some(v) => v,
687 None => return None,
688 };
689 let y = match self.y.checked_sub(rhs.y) {
690 Some(v) => v,
691 None => return None,
692 };
693 let z = match self.z.checked_sub(rhs.z) {
694 Some(v) => v,
695 None => return None,
696 };
697 let w = match self.w.checked_sub(rhs.w) {
698 Some(v) => v,
699 None => return None,
700 };
701
702 Some(Self { x, y, z, w })
703 }
704
705 #[inline]
709 #[must_use]
710 pub const fn checked_mul(self, rhs: Self) -> Option<Self> {
711 let x = match self.x.checked_mul(rhs.x) {
712 Some(v) => v,
713 None => return None,
714 };
715 let y = match self.y.checked_mul(rhs.y) {
716 Some(v) => v,
717 None => return None,
718 };
719 let z = match self.z.checked_mul(rhs.z) {
720 Some(v) => v,
721 None => return None,
722 };
723 let w = match self.w.checked_mul(rhs.w) {
724 Some(v) => v,
725 None => return None,
726 };
727
728 Some(Self { x, y, z, w })
729 }
730
731 #[inline]
735 #[must_use]
736 pub const fn checked_div(self, rhs: Self) -> Option<Self> {
737 let x = match self.x.checked_div(rhs.x) {
738 Some(v) => v,
739 None => return None,
740 };
741 let y = match self.y.checked_div(rhs.y) {
742 Some(v) => v,
743 None => return None,
744 };
745 let z = match self.z.checked_div(rhs.z) {
746 Some(v) => v,
747 None => return None,
748 };
749 let w = match self.w.checked_div(rhs.w) {
750 Some(v) => v,
751 None => return None,
752 };
753
754 Some(Self { x, y, z, w })
755 }
756
757 #[inline]
761 #[must_use]
762 pub const fn wrapping_add(self, rhs: Self) -> Self {
763 Self {
764 x: self.x.wrapping_add(rhs.x),
765 y: self.y.wrapping_add(rhs.y),
766 z: self.z.wrapping_add(rhs.z),
767 w: self.w.wrapping_add(rhs.w),
768 }
769 }
770
771 #[inline]
775 #[must_use]
776 pub const fn wrapping_sub(self, rhs: Self) -> Self {
777 Self {
778 x: self.x.wrapping_sub(rhs.x),
779 y: self.y.wrapping_sub(rhs.y),
780 z: self.z.wrapping_sub(rhs.z),
781 w: self.w.wrapping_sub(rhs.w),
782 }
783 }
784
785 #[inline]
789 #[must_use]
790 pub const fn wrapping_mul(self, rhs: Self) -> Self {
791 Self {
792 x: self.x.wrapping_mul(rhs.x),
793 y: self.y.wrapping_mul(rhs.y),
794 z: self.z.wrapping_mul(rhs.z),
795 w: self.w.wrapping_mul(rhs.w),
796 }
797 }
798
799 #[inline]
803 #[must_use]
804 pub const fn wrapping_div(self, rhs: Self) -> Self {
805 Self {
806 x: self.x.wrapping_div(rhs.x),
807 y: self.y.wrapping_div(rhs.y),
808 z: self.z.wrapping_div(rhs.z),
809 w: self.w.wrapping_div(rhs.w),
810 }
811 }
812
813 #[inline]
817 #[must_use]
818 pub const fn saturating_add(self, rhs: Self) -> Self {
819 Self {
820 x: self.x.saturating_add(rhs.x),
821 y: self.y.saturating_add(rhs.y),
822 z: self.z.saturating_add(rhs.z),
823 w: self.w.saturating_add(rhs.w),
824 }
825 }
826
827 #[inline]
831 #[must_use]
832 pub const fn saturating_sub(self, rhs: Self) -> Self {
833 Self {
834 x: self.x.saturating_sub(rhs.x),
835 y: self.y.saturating_sub(rhs.y),
836 z: self.z.saturating_sub(rhs.z),
837 w: self.w.saturating_sub(rhs.w),
838 }
839 }
840
841 #[inline]
845 #[must_use]
846 pub const fn saturating_mul(self, rhs: Self) -> Self {
847 Self {
848 x: self.x.saturating_mul(rhs.x),
849 y: self.y.saturating_mul(rhs.y),
850 z: self.z.saturating_mul(rhs.z),
851 w: self.w.saturating_mul(rhs.w),
852 }
853 }
854
855 #[inline]
859 #[must_use]
860 pub const fn saturating_div(self, rhs: Self) -> Self {
861 Self {
862 x: self.x.saturating_div(rhs.x),
863 y: self.y.saturating_div(rhs.y),
864 z: self.z.saturating_div(rhs.z),
865 w: self.w.saturating_div(rhs.w),
866 }
867 }
868
869 #[inline]
873 #[must_use]
874 pub const fn checked_add_unsigned(self, rhs: U64Vec4) -> Option<Self> {
875 let x = match self.x.checked_add_unsigned(rhs.x) {
876 Some(v) => v,
877 None => return None,
878 };
879 let y = match self.y.checked_add_unsigned(rhs.y) {
880 Some(v) => v,
881 None => return None,
882 };
883 let z = match self.z.checked_add_unsigned(rhs.z) {
884 Some(v) => v,
885 None => return None,
886 };
887 let w = match self.w.checked_add_unsigned(rhs.w) {
888 Some(v) => v,
889 None => return None,
890 };
891
892 Some(Self { x, y, z, w })
893 }
894
895 #[inline]
899 #[must_use]
900 pub const fn checked_sub_unsigned(self, rhs: U64Vec4) -> Option<Self> {
901 let x = match self.x.checked_sub_unsigned(rhs.x) {
902 Some(v) => v,
903 None => return None,
904 };
905 let y = match self.y.checked_sub_unsigned(rhs.y) {
906 Some(v) => v,
907 None => return None,
908 };
909 let z = match self.z.checked_sub_unsigned(rhs.z) {
910 Some(v) => v,
911 None => return None,
912 };
913 let w = match self.w.checked_sub_unsigned(rhs.w) {
914 Some(v) => v,
915 None => return None,
916 };
917
918 Some(Self { x, y, z, w })
919 }
920
921 #[inline]
925 #[must_use]
926 pub const fn wrapping_add_unsigned(self, rhs: U64Vec4) -> Self {
927 Self {
928 x: self.x.wrapping_add_unsigned(rhs.x),
929 y: self.y.wrapping_add_unsigned(rhs.y),
930 z: self.z.wrapping_add_unsigned(rhs.z),
931 w: self.w.wrapping_add_unsigned(rhs.w),
932 }
933 }
934
935 #[inline]
939 #[must_use]
940 pub const fn wrapping_sub_unsigned(self, rhs: U64Vec4) -> Self {
941 Self {
942 x: self.x.wrapping_sub_unsigned(rhs.x),
943 y: self.y.wrapping_sub_unsigned(rhs.y),
944 z: self.z.wrapping_sub_unsigned(rhs.z),
945 w: self.w.wrapping_sub_unsigned(rhs.w),
946 }
947 }
948
949 #[inline]
953 #[must_use]
954 pub const fn saturating_add_unsigned(self, rhs: U64Vec4) -> Self {
955 Self {
956 x: self.x.saturating_add_unsigned(rhs.x),
957 y: self.y.saturating_add_unsigned(rhs.y),
958 z: self.z.saturating_add_unsigned(rhs.z),
959 w: self.w.saturating_add_unsigned(rhs.w),
960 }
961 }
962
963 #[inline]
967 #[must_use]
968 pub const fn saturating_sub_unsigned(self, rhs: U64Vec4) -> Self {
969 Self {
970 x: self.x.saturating_sub_unsigned(rhs.x),
971 y: self.y.saturating_sub_unsigned(rhs.y),
972 z: self.z.saturating_sub_unsigned(rhs.z),
973 w: self.w.saturating_sub_unsigned(rhs.w),
974 }
975 }
976}
977
978impl Default for I64Vec4 {
979 #[inline(always)]
980 fn default() -> Self {
981 Self::ZERO
982 }
983}
984
985impl Div for I64Vec4 {
986 type Output = Self;
987 #[inline]
988 fn div(self, rhs: Self) -> Self {
989 Self {
990 x: self.x.div(rhs.x),
991 y: self.y.div(rhs.y),
992 z: self.z.div(rhs.z),
993 w: self.w.div(rhs.w),
994 }
995 }
996}
997
998impl Div<&Self> for I64Vec4 {
999 type Output = Self;
1000 #[inline]
1001 fn div(self, rhs: &Self) -> Self {
1002 self.div(*rhs)
1003 }
1004}
1005
1006impl Div<&I64Vec4> for &I64Vec4 {
1007 type Output = I64Vec4;
1008 #[inline]
1009 fn div(self, rhs: &I64Vec4) -> I64Vec4 {
1010 (*self).div(*rhs)
1011 }
1012}
1013
1014impl Div<I64Vec4> for &I64Vec4 {
1015 type Output = I64Vec4;
1016 #[inline]
1017 fn div(self, rhs: I64Vec4) -> I64Vec4 {
1018 (*self).div(rhs)
1019 }
1020}
1021
1022impl DivAssign for I64Vec4 {
1023 #[inline]
1024 fn div_assign(&mut self, rhs: Self) {
1025 self.x.div_assign(rhs.x);
1026 self.y.div_assign(rhs.y);
1027 self.z.div_assign(rhs.z);
1028 self.w.div_assign(rhs.w);
1029 }
1030}
1031
1032impl DivAssign<&Self> for I64Vec4 {
1033 #[inline]
1034 fn div_assign(&mut self, rhs: &Self) {
1035 self.div_assign(*rhs);
1036 }
1037}
1038
1039impl Div<i64> for I64Vec4 {
1040 type Output = Self;
1041 #[inline]
1042 fn div(self, rhs: i64) -> Self {
1043 Self {
1044 x: self.x.div(rhs),
1045 y: self.y.div(rhs),
1046 z: self.z.div(rhs),
1047 w: self.w.div(rhs),
1048 }
1049 }
1050}
1051
1052impl Div<&i64> for I64Vec4 {
1053 type Output = Self;
1054 #[inline]
1055 fn div(self, rhs: &i64) -> Self {
1056 self.div(*rhs)
1057 }
1058}
1059
1060impl Div<&i64> for &I64Vec4 {
1061 type Output = I64Vec4;
1062 #[inline]
1063 fn div(self, rhs: &i64) -> I64Vec4 {
1064 (*self).div(*rhs)
1065 }
1066}
1067
1068impl Div<i64> for &I64Vec4 {
1069 type Output = I64Vec4;
1070 #[inline]
1071 fn div(self, rhs: i64) -> I64Vec4 {
1072 (*self).div(rhs)
1073 }
1074}
1075
1076impl DivAssign<i64> for I64Vec4 {
1077 #[inline]
1078 fn div_assign(&mut self, rhs: i64) {
1079 self.x.div_assign(rhs);
1080 self.y.div_assign(rhs);
1081 self.z.div_assign(rhs);
1082 self.w.div_assign(rhs);
1083 }
1084}
1085
1086impl DivAssign<&i64> for I64Vec4 {
1087 #[inline]
1088 fn div_assign(&mut self, rhs: &i64) {
1089 self.div_assign(*rhs);
1090 }
1091}
1092
1093impl Div<I64Vec4> for i64 {
1094 type Output = I64Vec4;
1095 #[inline]
1096 fn div(self, rhs: I64Vec4) -> I64Vec4 {
1097 I64Vec4 {
1098 x: self.div(rhs.x),
1099 y: self.div(rhs.y),
1100 z: self.div(rhs.z),
1101 w: self.div(rhs.w),
1102 }
1103 }
1104}
1105
1106impl Div<&I64Vec4> for i64 {
1107 type Output = I64Vec4;
1108 #[inline]
1109 fn div(self, rhs: &I64Vec4) -> I64Vec4 {
1110 self.div(*rhs)
1111 }
1112}
1113
1114impl Div<&I64Vec4> for &i64 {
1115 type Output = I64Vec4;
1116 #[inline]
1117 fn div(self, rhs: &I64Vec4) -> I64Vec4 {
1118 (*self).div(*rhs)
1119 }
1120}
1121
1122impl Div<I64Vec4> for &i64 {
1123 type Output = I64Vec4;
1124 #[inline]
1125 fn div(self, rhs: I64Vec4) -> I64Vec4 {
1126 (*self).div(rhs)
1127 }
1128}
1129
1130impl Mul for I64Vec4 {
1131 type Output = Self;
1132 #[inline]
1133 fn mul(self, rhs: Self) -> Self {
1134 Self {
1135 x: self.x.mul(rhs.x),
1136 y: self.y.mul(rhs.y),
1137 z: self.z.mul(rhs.z),
1138 w: self.w.mul(rhs.w),
1139 }
1140 }
1141}
1142
1143impl Mul<&Self> for I64Vec4 {
1144 type Output = Self;
1145 #[inline]
1146 fn mul(self, rhs: &Self) -> Self {
1147 self.mul(*rhs)
1148 }
1149}
1150
1151impl Mul<&I64Vec4> for &I64Vec4 {
1152 type Output = I64Vec4;
1153 #[inline]
1154 fn mul(self, rhs: &I64Vec4) -> I64Vec4 {
1155 (*self).mul(*rhs)
1156 }
1157}
1158
1159impl Mul<I64Vec4> for &I64Vec4 {
1160 type Output = I64Vec4;
1161 #[inline]
1162 fn mul(self, rhs: I64Vec4) -> I64Vec4 {
1163 (*self).mul(rhs)
1164 }
1165}
1166
1167impl MulAssign for I64Vec4 {
1168 #[inline]
1169 fn mul_assign(&mut self, rhs: Self) {
1170 self.x.mul_assign(rhs.x);
1171 self.y.mul_assign(rhs.y);
1172 self.z.mul_assign(rhs.z);
1173 self.w.mul_assign(rhs.w);
1174 }
1175}
1176
1177impl MulAssign<&Self> for I64Vec4 {
1178 #[inline]
1179 fn mul_assign(&mut self, rhs: &Self) {
1180 self.mul_assign(*rhs);
1181 }
1182}
1183
1184impl Mul<i64> for I64Vec4 {
1185 type Output = Self;
1186 #[inline]
1187 fn mul(self, rhs: i64) -> Self {
1188 Self {
1189 x: self.x.mul(rhs),
1190 y: self.y.mul(rhs),
1191 z: self.z.mul(rhs),
1192 w: self.w.mul(rhs),
1193 }
1194 }
1195}
1196
1197impl Mul<&i64> for I64Vec4 {
1198 type Output = Self;
1199 #[inline]
1200 fn mul(self, rhs: &i64) -> Self {
1201 self.mul(*rhs)
1202 }
1203}
1204
1205impl Mul<&i64> for &I64Vec4 {
1206 type Output = I64Vec4;
1207 #[inline]
1208 fn mul(self, rhs: &i64) -> I64Vec4 {
1209 (*self).mul(*rhs)
1210 }
1211}
1212
1213impl Mul<i64> for &I64Vec4 {
1214 type Output = I64Vec4;
1215 #[inline]
1216 fn mul(self, rhs: i64) -> I64Vec4 {
1217 (*self).mul(rhs)
1218 }
1219}
1220
1221impl MulAssign<i64> for I64Vec4 {
1222 #[inline]
1223 fn mul_assign(&mut self, rhs: i64) {
1224 self.x.mul_assign(rhs);
1225 self.y.mul_assign(rhs);
1226 self.z.mul_assign(rhs);
1227 self.w.mul_assign(rhs);
1228 }
1229}
1230
1231impl MulAssign<&i64> for I64Vec4 {
1232 #[inline]
1233 fn mul_assign(&mut self, rhs: &i64) {
1234 self.mul_assign(*rhs);
1235 }
1236}
1237
1238impl Mul<I64Vec4> for i64 {
1239 type Output = I64Vec4;
1240 #[inline]
1241 fn mul(self, rhs: I64Vec4) -> I64Vec4 {
1242 I64Vec4 {
1243 x: self.mul(rhs.x),
1244 y: self.mul(rhs.y),
1245 z: self.mul(rhs.z),
1246 w: self.mul(rhs.w),
1247 }
1248 }
1249}
1250
1251impl Mul<&I64Vec4> for i64 {
1252 type Output = I64Vec4;
1253 #[inline]
1254 fn mul(self, rhs: &I64Vec4) -> I64Vec4 {
1255 self.mul(*rhs)
1256 }
1257}
1258
1259impl Mul<&I64Vec4> for &i64 {
1260 type Output = I64Vec4;
1261 #[inline]
1262 fn mul(self, rhs: &I64Vec4) -> I64Vec4 {
1263 (*self).mul(*rhs)
1264 }
1265}
1266
1267impl Mul<I64Vec4> for &i64 {
1268 type Output = I64Vec4;
1269 #[inline]
1270 fn mul(self, rhs: I64Vec4) -> I64Vec4 {
1271 (*self).mul(rhs)
1272 }
1273}
1274
1275impl Add for I64Vec4 {
1276 type Output = Self;
1277 #[inline]
1278 fn add(self, rhs: Self) -> Self {
1279 Self {
1280 x: self.x.add(rhs.x),
1281 y: self.y.add(rhs.y),
1282 z: self.z.add(rhs.z),
1283 w: self.w.add(rhs.w),
1284 }
1285 }
1286}
1287
1288impl Add<&Self> for I64Vec4 {
1289 type Output = Self;
1290 #[inline]
1291 fn add(self, rhs: &Self) -> Self {
1292 self.add(*rhs)
1293 }
1294}
1295
1296impl Add<&I64Vec4> for &I64Vec4 {
1297 type Output = I64Vec4;
1298 #[inline]
1299 fn add(self, rhs: &I64Vec4) -> I64Vec4 {
1300 (*self).add(*rhs)
1301 }
1302}
1303
1304impl Add<I64Vec4> for &I64Vec4 {
1305 type Output = I64Vec4;
1306 #[inline]
1307 fn add(self, rhs: I64Vec4) -> I64Vec4 {
1308 (*self).add(rhs)
1309 }
1310}
1311
1312impl AddAssign for I64Vec4 {
1313 #[inline]
1314 fn add_assign(&mut self, rhs: Self) {
1315 self.x.add_assign(rhs.x);
1316 self.y.add_assign(rhs.y);
1317 self.z.add_assign(rhs.z);
1318 self.w.add_assign(rhs.w);
1319 }
1320}
1321
1322impl AddAssign<&Self> for I64Vec4 {
1323 #[inline]
1324 fn add_assign(&mut self, rhs: &Self) {
1325 self.add_assign(*rhs);
1326 }
1327}
1328
1329impl Add<i64> for I64Vec4 {
1330 type Output = Self;
1331 #[inline]
1332 fn add(self, rhs: i64) -> Self {
1333 Self {
1334 x: self.x.add(rhs),
1335 y: self.y.add(rhs),
1336 z: self.z.add(rhs),
1337 w: self.w.add(rhs),
1338 }
1339 }
1340}
1341
1342impl Add<&i64> for I64Vec4 {
1343 type Output = Self;
1344 #[inline]
1345 fn add(self, rhs: &i64) -> Self {
1346 self.add(*rhs)
1347 }
1348}
1349
1350impl Add<&i64> for &I64Vec4 {
1351 type Output = I64Vec4;
1352 #[inline]
1353 fn add(self, rhs: &i64) -> I64Vec4 {
1354 (*self).add(*rhs)
1355 }
1356}
1357
1358impl Add<i64> for &I64Vec4 {
1359 type Output = I64Vec4;
1360 #[inline]
1361 fn add(self, rhs: i64) -> I64Vec4 {
1362 (*self).add(rhs)
1363 }
1364}
1365
1366impl AddAssign<i64> for I64Vec4 {
1367 #[inline]
1368 fn add_assign(&mut self, rhs: i64) {
1369 self.x.add_assign(rhs);
1370 self.y.add_assign(rhs);
1371 self.z.add_assign(rhs);
1372 self.w.add_assign(rhs);
1373 }
1374}
1375
1376impl AddAssign<&i64> for I64Vec4 {
1377 #[inline]
1378 fn add_assign(&mut self, rhs: &i64) {
1379 self.add_assign(*rhs);
1380 }
1381}
1382
1383impl Add<I64Vec4> for i64 {
1384 type Output = I64Vec4;
1385 #[inline]
1386 fn add(self, rhs: I64Vec4) -> I64Vec4 {
1387 I64Vec4 {
1388 x: self.add(rhs.x),
1389 y: self.add(rhs.y),
1390 z: self.add(rhs.z),
1391 w: self.add(rhs.w),
1392 }
1393 }
1394}
1395
1396impl Add<&I64Vec4> for i64 {
1397 type Output = I64Vec4;
1398 #[inline]
1399 fn add(self, rhs: &I64Vec4) -> I64Vec4 {
1400 self.add(*rhs)
1401 }
1402}
1403
1404impl Add<&I64Vec4> for &i64 {
1405 type Output = I64Vec4;
1406 #[inline]
1407 fn add(self, rhs: &I64Vec4) -> I64Vec4 {
1408 (*self).add(*rhs)
1409 }
1410}
1411
1412impl Add<I64Vec4> for &i64 {
1413 type Output = I64Vec4;
1414 #[inline]
1415 fn add(self, rhs: I64Vec4) -> I64Vec4 {
1416 (*self).add(rhs)
1417 }
1418}
1419
1420impl Sub for I64Vec4 {
1421 type Output = Self;
1422 #[inline]
1423 fn sub(self, rhs: Self) -> Self {
1424 Self {
1425 x: self.x.sub(rhs.x),
1426 y: self.y.sub(rhs.y),
1427 z: self.z.sub(rhs.z),
1428 w: self.w.sub(rhs.w),
1429 }
1430 }
1431}
1432
1433impl Sub<&Self> for I64Vec4 {
1434 type Output = Self;
1435 #[inline]
1436 fn sub(self, rhs: &Self) -> Self {
1437 self.sub(*rhs)
1438 }
1439}
1440
1441impl Sub<&I64Vec4> for &I64Vec4 {
1442 type Output = I64Vec4;
1443 #[inline]
1444 fn sub(self, rhs: &I64Vec4) -> I64Vec4 {
1445 (*self).sub(*rhs)
1446 }
1447}
1448
1449impl Sub<I64Vec4> for &I64Vec4 {
1450 type Output = I64Vec4;
1451 #[inline]
1452 fn sub(self, rhs: I64Vec4) -> I64Vec4 {
1453 (*self).sub(rhs)
1454 }
1455}
1456
1457impl SubAssign for I64Vec4 {
1458 #[inline]
1459 fn sub_assign(&mut self, rhs: Self) {
1460 self.x.sub_assign(rhs.x);
1461 self.y.sub_assign(rhs.y);
1462 self.z.sub_assign(rhs.z);
1463 self.w.sub_assign(rhs.w);
1464 }
1465}
1466
1467impl SubAssign<&Self> for I64Vec4 {
1468 #[inline]
1469 fn sub_assign(&mut self, rhs: &Self) {
1470 self.sub_assign(*rhs);
1471 }
1472}
1473
1474impl Sub<i64> for I64Vec4 {
1475 type Output = Self;
1476 #[inline]
1477 fn sub(self, rhs: i64) -> Self {
1478 Self {
1479 x: self.x.sub(rhs),
1480 y: self.y.sub(rhs),
1481 z: self.z.sub(rhs),
1482 w: self.w.sub(rhs),
1483 }
1484 }
1485}
1486
1487impl Sub<&i64> for I64Vec4 {
1488 type Output = Self;
1489 #[inline]
1490 fn sub(self, rhs: &i64) -> Self {
1491 self.sub(*rhs)
1492 }
1493}
1494
1495impl Sub<&i64> for &I64Vec4 {
1496 type Output = I64Vec4;
1497 #[inline]
1498 fn sub(self, rhs: &i64) -> I64Vec4 {
1499 (*self).sub(*rhs)
1500 }
1501}
1502
1503impl Sub<i64> for &I64Vec4 {
1504 type Output = I64Vec4;
1505 #[inline]
1506 fn sub(self, rhs: i64) -> I64Vec4 {
1507 (*self).sub(rhs)
1508 }
1509}
1510
1511impl SubAssign<i64> for I64Vec4 {
1512 #[inline]
1513 fn sub_assign(&mut self, rhs: i64) {
1514 self.x.sub_assign(rhs);
1515 self.y.sub_assign(rhs);
1516 self.z.sub_assign(rhs);
1517 self.w.sub_assign(rhs);
1518 }
1519}
1520
1521impl SubAssign<&i64> for I64Vec4 {
1522 #[inline]
1523 fn sub_assign(&mut self, rhs: &i64) {
1524 self.sub_assign(*rhs);
1525 }
1526}
1527
1528impl Sub<I64Vec4> for i64 {
1529 type Output = I64Vec4;
1530 #[inline]
1531 fn sub(self, rhs: I64Vec4) -> I64Vec4 {
1532 I64Vec4 {
1533 x: self.sub(rhs.x),
1534 y: self.sub(rhs.y),
1535 z: self.sub(rhs.z),
1536 w: self.sub(rhs.w),
1537 }
1538 }
1539}
1540
1541impl Sub<&I64Vec4> for i64 {
1542 type Output = I64Vec4;
1543 #[inline]
1544 fn sub(self, rhs: &I64Vec4) -> I64Vec4 {
1545 self.sub(*rhs)
1546 }
1547}
1548
1549impl Sub<&I64Vec4> for &i64 {
1550 type Output = I64Vec4;
1551 #[inline]
1552 fn sub(self, rhs: &I64Vec4) -> I64Vec4 {
1553 (*self).sub(*rhs)
1554 }
1555}
1556
1557impl Sub<I64Vec4> for &i64 {
1558 type Output = I64Vec4;
1559 #[inline]
1560 fn sub(self, rhs: I64Vec4) -> I64Vec4 {
1561 (*self).sub(rhs)
1562 }
1563}
1564
1565impl Rem for I64Vec4 {
1566 type Output = Self;
1567 #[inline]
1568 fn rem(self, rhs: Self) -> Self {
1569 Self {
1570 x: self.x.rem(rhs.x),
1571 y: self.y.rem(rhs.y),
1572 z: self.z.rem(rhs.z),
1573 w: self.w.rem(rhs.w),
1574 }
1575 }
1576}
1577
1578impl Rem<&Self> for I64Vec4 {
1579 type Output = Self;
1580 #[inline]
1581 fn rem(self, rhs: &Self) -> Self {
1582 self.rem(*rhs)
1583 }
1584}
1585
1586impl Rem<&I64Vec4> for &I64Vec4 {
1587 type Output = I64Vec4;
1588 #[inline]
1589 fn rem(self, rhs: &I64Vec4) -> I64Vec4 {
1590 (*self).rem(*rhs)
1591 }
1592}
1593
1594impl Rem<I64Vec4> for &I64Vec4 {
1595 type Output = I64Vec4;
1596 #[inline]
1597 fn rem(self, rhs: I64Vec4) -> I64Vec4 {
1598 (*self).rem(rhs)
1599 }
1600}
1601
1602impl RemAssign for I64Vec4 {
1603 #[inline]
1604 fn rem_assign(&mut self, rhs: Self) {
1605 self.x.rem_assign(rhs.x);
1606 self.y.rem_assign(rhs.y);
1607 self.z.rem_assign(rhs.z);
1608 self.w.rem_assign(rhs.w);
1609 }
1610}
1611
1612impl RemAssign<&Self> for I64Vec4 {
1613 #[inline]
1614 fn rem_assign(&mut self, rhs: &Self) {
1615 self.rem_assign(*rhs);
1616 }
1617}
1618
1619impl Rem<i64> for I64Vec4 {
1620 type Output = Self;
1621 #[inline]
1622 fn rem(self, rhs: i64) -> Self {
1623 Self {
1624 x: self.x.rem(rhs),
1625 y: self.y.rem(rhs),
1626 z: self.z.rem(rhs),
1627 w: self.w.rem(rhs),
1628 }
1629 }
1630}
1631
1632impl Rem<&i64> for I64Vec4 {
1633 type Output = Self;
1634 #[inline]
1635 fn rem(self, rhs: &i64) -> Self {
1636 self.rem(*rhs)
1637 }
1638}
1639
1640impl Rem<&i64> for &I64Vec4 {
1641 type Output = I64Vec4;
1642 #[inline]
1643 fn rem(self, rhs: &i64) -> I64Vec4 {
1644 (*self).rem(*rhs)
1645 }
1646}
1647
1648impl Rem<i64> for &I64Vec4 {
1649 type Output = I64Vec4;
1650 #[inline]
1651 fn rem(self, rhs: i64) -> I64Vec4 {
1652 (*self).rem(rhs)
1653 }
1654}
1655
1656impl RemAssign<i64> for I64Vec4 {
1657 #[inline]
1658 fn rem_assign(&mut self, rhs: i64) {
1659 self.x.rem_assign(rhs);
1660 self.y.rem_assign(rhs);
1661 self.z.rem_assign(rhs);
1662 self.w.rem_assign(rhs);
1663 }
1664}
1665
1666impl RemAssign<&i64> for I64Vec4 {
1667 #[inline]
1668 fn rem_assign(&mut self, rhs: &i64) {
1669 self.rem_assign(*rhs);
1670 }
1671}
1672
1673impl Rem<I64Vec4> for i64 {
1674 type Output = I64Vec4;
1675 #[inline]
1676 fn rem(self, rhs: I64Vec4) -> I64Vec4 {
1677 I64Vec4 {
1678 x: self.rem(rhs.x),
1679 y: self.rem(rhs.y),
1680 z: self.rem(rhs.z),
1681 w: self.rem(rhs.w),
1682 }
1683 }
1684}
1685
1686impl Rem<&I64Vec4> for i64 {
1687 type Output = I64Vec4;
1688 #[inline]
1689 fn rem(self, rhs: &I64Vec4) -> I64Vec4 {
1690 self.rem(*rhs)
1691 }
1692}
1693
1694impl Rem<&I64Vec4> for &i64 {
1695 type Output = I64Vec4;
1696 #[inline]
1697 fn rem(self, rhs: &I64Vec4) -> I64Vec4 {
1698 (*self).rem(*rhs)
1699 }
1700}
1701
1702impl Rem<I64Vec4> for &i64 {
1703 type Output = I64Vec4;
1704 #[inline]
1705 fn rem(self, rhs: I64Vec4) -> I64Vec4 {
1706 (*self).rem(rhs)
1707 }
1708}
1709
1710#[cfg(not(target_arch = "spirv"))]
1711impl AsRef<[i64; 4]> for I64Vec4 {
1712 #[inline]
1713 fn as_ref(&self) -> &[i64; 4] {
1714 unsafe { &*(self as *const Self as *const [i64; 4]) }
1715 }
1716}
1717
1718#[cfg(not(target_arch = "spirv"))]
1719impl AsMut<[i64; 4]> for I64Vec4 {
1720 #[inline]
1721 fn as_mut(&mut self) -> &mut [i64; 4] {
1722 unsafe { &mut *(self as *mut Self as *mut [i64; 4]) }
1723 }
1724}
1725
1726impl Sum for I64Vec4 {
1727 #[inline]
1728 fn sum<I>(iter: I) -> Self
1729 where
1730 I: Iterator<Item = Self>,
1731 {
1732 iter.fold(Self::ZERO, Self::add)
1733 }
1734}
1735
1736impl<'a> Sum<&'a Self> for I64Vec4 {
1737 #[inline]
1738 fn sum<I>(iter: I) -> Self
1739 where
1740 I: Iterator<Item = &'a Self>,
1741 {
1742 iter.fold(Self::ZERO, |a, &b| Self::add(a, b))
1743 }
1744}
1745
1746impl Product for I64Vec4 {
1747 #[inline]
1748 fn product<I>(iter: I) -> Self
1749 where
1750 I: Iterator<Item = Self>,
1751 {
1752 iter.fold(Self::ONE, Self::mul)
1753 }
1754}
1755
1756impl<'a> Product<&'a Self> for I64Vec4 {
1757 #[inline]
1758 fn product<I>(iter: I) -> Self
1759 where
1760 I: Iterator<Item = &'a Self>,
1761 {
1762 iter.fold(Self::ONE, |a, &b| Self::mul(a, b))
1763 }
1764}
1765
1766impl Neg for I64Vec4 {
1767 type Output = Self;
1768 #[inline]
1769 fn neg(self) -> Self {
1770 Self {
1771 x: self.x.neg(),
1772 y: self.y.neg(),
1773 z: self.z.neg(),
1774 w: self.w.neg(),
1775 }
1776 }
1777}
1778
1779impl Neg for &I64Vec4 {
1780 type Output = I64Vec4;
1781 #[inline]
1782 fn neg(self) -> I64Vec4 {
1783 (*self).neg()
1784 }
1785}
1786
1787impl Not for I64Vec4 {
1788 type Output = Self;
1789 #[inline]
1790 fn not(self) -> Self {
1791 Self {
1792 x: self.x.not(),
1793 y: self.y.not(),
1794 z: self.z.not(),
1795 w: self.w.not(),
1796 }
1797 }
1798}
1799
1800impl Not for &I64Vec4 {
1801 type Output = I64Vec4;
1802 #[inline]
1803 fn not(self) -> I64Vec4 {
1804 (*self).not()
1805 }
1806}
1807
1808impl BitAnd for I64Vec4 {
1809 type Output = Self;
1810 #[inline]
1811 fn bitand(self, rhs: Self) -> Self::Output {
1812 Self {
1813 x: self.x.bitand(rhs.x),
1814 y: self.y.bitand(rhs.y),
1815 z: self.z.bitand(rhs.z),
1816 w: self.w.bitand(rhs.w),
1817 }
1818 }
1819}
1820
1821impl BitAnd<&Self> for I64Vec4 {
1822 type Output = Self;
1823 #[inline]
1824 fn bitand(self, rhs: &Self) -> Self {
1825 self.bitand(*rhs)
1826 }
1827}
1828
1829impl BitAnd<&I64Vec4> for &I64Vec4 {
1830 type Output = I64Vec4;
1831 #[inline]
1832 fn bitand(self, rhs: &I64Vec4) -> I64Vec4 {
1833 (*self).bitand(*rhs)
1834 }
1835}
1836
1837impl BitAnd<I64Vec4> for &I64Vec4 {
1838 type Output = I64Vec4;
1839 #[inline]
1840 fn bitand(self, rhs: I64Vec4) -> I64Vec4 {
1841 (*self).bitand(rhs)
1842 }
1843}
1844
1845impl BitAndAssign for I64Vec4 {
1846 #[inline]
1847 fn bitand_assign(&mut self, rhs: Self) {
1848 *self = self.bitand(rhs);
1849 }
1850}
1851
1852impl BitAndAssign<&Self> for I64Vec4 {
1853 #[inline]
1854 fn bitand_assign(&mut self, rhs: &Self) {
1855 self.bitand_assign(*rhs);
1856 }
1857}
1858
1859impl BitOr for I64Vec4 {
1860 type Output = Self;
1861 #[inline]
1862 fn bitor(self, rhs: Self) -> Self::Output {
1863 Self {
1864 x: self.x.bitor(rhs.x),
1865 y: self.y.bitor(rhs.y),
1866 z: self.z.bitor(rhs.z),
1867 w: self.w.bitor(rhs.w),
1868 }
1869 }
1870}
1871
1872impl BitOr<&Self> for I64Vec4 {
1873 type Output = Self;
1874 #[inline]
1875 fn bitor(self, rhs: &Self) -> Self {
1876 self.bitor(*rhs)
1877 }
1878}
1879
1880impl BitOr<&I64Vec4> for &I64Vec4 {
1881 type Output = I64Vec4;
1882 #[inline]
1883 fn bitor(self, rhs: &I64Vec4) -> I64Vec4 {
1884 (*self).bitor(*rhs)
1885 }
1886}
1887
1888impl BitOr<I64Vec4> for &I64Vec4 {
1889 type Output = I64Vec4;
1890 #[inline]
1891 fn bitor(self, rhs: I64Vec4) -> I64Vec4 {
1892 (*self).bitor(rhs)
1893 }
1894}
1895
1896impl BitOrAssign for I64Vec4 {
1897 #[inline]
1898 fn bitor_assign(&mut self, rhs: Self) {
1899 *self = self.bitor(rhs);
1900 }
1901}
1902
1903impl BitOrAssign<&Self> for I64Vec4 {
1904 #[inline]
1905 fn bitor_assign(&mut self, rhs: &Self) {
1906 self.bitor_assign(*rhs);
1907 }
1908}
1909
1910impl BitXor for I64Vec4 {
1911 type Output = Self;
1912 #[inline]
1913 fn bitxor(self, rhs: Self) -> Self::Output {
1914 Self {
1915 x: self.x.bitxor(rhs.x),
1916 y: self.y.bitxor(rhs.y),
1917 z: self.z.bitxor(rhs.z),
1918 w: self.w.bitxor(rhs.w),
1919 }
1920 }
1921}
1922
1923impl BitXor<&Self> for I64Vec4 {
1924 type Output = Self;
1925 #[inline]
1926 fn bitxor(self, rhs: &Self) -> Self {
1927 self.bitxor(*rhs)
1928 }
1929}
1930
1931impl BitXor<&I64Vec4> for &I64Vec4 {
1932 type Output = I64Vec4;
1933 #[inline]
1934 fn bitxor(self, rhs: &I64Vec4) -> I64Vec4 {
1935 (*self).bitxor(*rhs)
1936 }
1937}
1938
1939impl BitXor<I64Vec4> for &I64Vec4 {
1940 type Output = I64Vec4;
1941 #[inline]
1942 fn bitxor(self, rhs: I64Vec4) -> I64Vec4 {
1943 (*self).bitxor(rhs)
1944 }
1945}
1946
1947impl BitXorAssign for I64Vec4 {
1948 #[inline]
1949 fn bitxor_assign(&mut self, rhs: Self) {
1950 *self = self.bitxor(rhs);
1951 }
1952}
1953
1954impl BitXorAssign<&Self> for I64Vec4 {
1955 #[inline]
1956 fn bitxor_assign(&mut self, rhs: &Self) {
1957 self.bitxor_assign(*rhs);
1958 }
1959}
1960
1961impl BitAnd<i64> for I64Vec4 {
1962 type Output = Self;
1963 #[inline]
1964 fn bitand(self, rhs: i64) -> Self::Output {
1965 Self {
1966 x: self.x.bitand(rhs),
1967 y: self.y.bitand(rhs),
1968 z: self.z.bitand(rhs),
1969 w: self.w.bitand(rhs),
1970 }
1971 }
1972}
1973
1974impl BitAnd<&i64> for I64Vec4 {
1975 type Output = Self;
1976 #[inline]
1977 fn bitand(self, rhs: &i64) -> Self {
1978 self.bitand(*rhs)
1979 }
1980}
1981
1982impl BitAnd<&i64> for &I64Vec4 {
1983 type Output = I64Vec4;
1984 #[inline]
1985 fn bitand(self, rhs: &i64) -> I64Vec4 {
1986 (*self).bitand(*rhs)
1987 }
1988}
1989
1990impl BitAnd<i64> for &I64Vec4 {
1991 type Output = I64Vec4;
1992 #[inline]
1993 fn bitand(self, rhs: i64) -> I64Vec4 {
1994 (*self).bitand(rhs)
1995 }
1996}
1997
1998impl BitAndAssign<i64> for I64Vec4 {
1999 #[inline]
2000 fn bitand_assign(&mut self, rhs: i64) {
2001 *self = self.bitand(rhs);
2002 }
2003}
2004
2005impl BitAndAssign<&i64> for I64Vec4 {
2006 #[inline]
2007 fn bitand_assign(&mut self, rhs: &i64) {
2008 self.bitand_assign(*rhs);
2009 }
2010}
2011
2012impl BitOr<i64> for I64Vec4 {
2013 type Output = Self;
2014 #[inline]
2015 fn bitor(self, rhs: i64) -> Self::Output {
2016 Self {
2017 x: self.x.bitor(rhs),
2018 y: self.y.bitor(rhs),
2019 z: self.z.bitor(rhs),
2020 w: self.w.bitor(rhs),
2021 }
2022 }
2023}
2024
2025impl BitOr<&i64> for I64Vec4 {
2026 type Output = Self;
2027 #[inline]
2028 fn bitor(self, rhs: &i64) -> Self {
2029 self.bitor(*rhs)
2030 }
2031}
2032
2033impl BitOr<&i64> for &I64Vec4 {
2034 type Output = I64Vec4;
2035 #[inline]
2036 fn bitor(self, rhs: &i64) -> I64Vec4 {
2037 (*self).bitor(*rhs)
2038 }
2039}
2040
2041impl BitOr<i64> for &I64Vec4 {
2042 type Output = I64Vec4;
2043 #[inline]
2044 fn bitor(self, rhs: i64) -> I64Vec4 {
2045 (*self).bitor(rhs)
2046 }
2047}
2048
2049impl BitOrAssign<i64> for I64Vec4 {
2050 #[inline]
2051 fn bitor_assign(&mut self, rhs: i64) {
2052 *self = self.bitor(rhs);
2053 }
2054}
2055
2056impl BitOrAssign<&i64> for I64Vec4 {
2057 #[inline]
2058 fn bitor_assign(&mut self, rhs: &i64) {
2059 self.bitor_assign(*rhs);
2060 }
2061}
2062
2063impl BitXor<i64> for I64Vec4 {
2064 type Output = Self;
2065 #[inline]
2066 fn bitxor(self, rhs: i64) -> Self::Output {
2067 Self {
2068 x: self.x.bitxor(rhs),
2069 y: self.y.bitxor(rhs),
2070 z: self.z.bitxor(rhs),
2071 w: self.w.bitxor(rhs),
2072 }
2073 }
2074}
2075
2076impl BitXor<&i64> for I64Vec4 {
2077 type Output = Self;
2078 #[inline]
2079 fn bitxor(self, rhs: &i64) -> Self {
2080 self.bitxor(*rhs)
2081 }
2082}
2083
2084impl BitXor<&i64> for &I64Vec4 {
2085 type Output = I64Vec4;
2086 #[inline]
2087 fn bitxor(self, rhs: &i64) -> I64Vec4 {
2088 (*self).bitxor(*rhs)
2089 }
2090}
2091
2092impl BitXor<i64> for &I64Vec4 {
2093 type Output = I64Vec4;
2094 #[inline]
2095 fn bitxor(self, rhs: i64) -> I64Vec4 {
2096 (*self).bitxor(rhs)
2097 }
2098}
2099
2100impl BitXorAssign<i64> for I64Vec4 {
2101 #[inline]
2102 fn bitxor_assign(&mut self, rhs: i64) {
2103 *self = self.bitxor(rhs);
2104 }
2105}
2106
2107impl BitXorAssign<&i64> for I64Vec4 {
2108 #[inline]
2109 fn bitxor_assign(&mut self, rhs: &i64) {
2110 self.bitxor_assign(*rhs);
2111 }
2112}
2113
2114impl Shl<i8> for I64Vec4 {
2115 type Output = Self;
2116 #[inline]
2117 fn shl(self, rhs: i8) -> Self::Output {
2118 Self {
2119 x: self.x.shl(rhs),
2120 y: self.y.shl(rhs),
2121 z: self.z.shl(rhs),
2122 w: self.w.shl(rhs),
2123 }
2124 }
2125}
2126
2127impl Shl<&i8> for I64Vec4 {
2128 type Output = Self;
2129 #[inline]
2130 fn shl(self, rhs: &i8) -> Self {
2131 self.shl(*rhs)
2132 }
2133}
2134
2135impl Shl<&i8> for &I64Vec4 {
2136 type Output = I64Vec4;
2137 #[inline]
2138 fn shl(self, rhs: &i8) -> I64Vec4 {
2139 (*self).shl(*rhs)
2140 }
2141}
2142
2143impl Shl<i8> for &I64Vec4 {
2144 type Output = I64Vec4;
2145 #[inline]
2146 fn shl(self, rhs: i8) -> I64Vec4 {
2147 (*self).shl(rhs)
2148 }
2149}
2150
2151impl ShlAssign<i8> for I64Vec4 {
2152 #[inline]
2153 fn shl_assign(&mut self, rhs: i8) {
2154 *self = self.shl(rhs);
2155 }
2156}
2157
2158impl ShlAssign<&i8> for I64Vec4 {
2159 #[inline]
2160 fn shl_assign(&mut self, rhs: &i8) {
2161 self.shl_assign(*rhs);
2162 }
2163}
2164
2165impl Shr<i8> for I64Vec4 {
2166 type Output = Self;
2167 #[inline]
2168 fn shr(self, rhs: i8) -> Self::Output {
2169 Self {
2170 x: self.x.shr(rhs),
2171 y: self.y.shr(rhs),
2172 z: self.z.shr(rhs),
2173 w: self.w.shr(rhs),
2174 }
2175 }
2176}
2177
2178impl Shr<&i8> for I64Vec4 {
2179 type Output = Self;
2180 #[inline]
2181 fn shr(self, rhs: &i8) -> Self {
2182 self.shr(*rhs)
2183 }
2184}
2185
2186impl Shr<&i8> for &I64Vec4 {
2187 type Output = I64Vec4;
2188 #[inline]
2189 fn shr(self, rhs: &i8) -> I64Vec4 {
2190 (*self).shr(*rhs)
2191 }
2192}
2193
2194impl Shr<i8> for &I64Vec4 {
2195 type Output = I64Vec4;
2196 #[inline]
2197 fn shr(self, rhs: i8) -> I64Vec4 {
2198 (*self).shr(rhs)
2199 }
2200}
2201
2202impl ShrAssign<i8> for I64Vec4 {
2203 #[inline]
2204 fn shr_assign(&mut self, rhs: i8) {
2205 *self = self.shr(rhs);
2206 }
2207}
2208
2209impl ShrAssign<&i8> for I64Vec4 {
2210 #[inline]
2211 fn shr_assign(&mut self, rhs: &i8) {
2212 self.shr_assign(*rhs);
2213 }
2214}
2215
2216impl Shl<i16> for I64Vec4 {
2217 type Output = Self;
2218 #[inline]
2219 fn shl(self, rhs: i16) -> Self::Output {
2220 Self {
2221 x: self.x.shl(rhs),
2222 y: self.y.shl(rhs),
2223 z: self.z.shl(rhs),
2224 w: self.w.shl(rhs),
2225 }
2226 }
2227}
2228
2229impl Shl<&i16> for I64Vec4 {
2230 type Output = Self;
2231 #[inline]
2232 fn shl(self, rhs: &i16) -> Self {
2233 self.shl(*rhs)
2234 }
2235}
2236
2237impl Shl<&i16> for &I64Vec4 {
2238 type Output = I64Vec4;
2239 #[inline]
2240 fn shl(self, rhs: &i16) -> I64Vec4 {
2241 (*self).shl(*rhs)
2242 }
2243}
2244
2245impl Shl<i16> for &I64Vec4 {
2246 type Output = I64Vec4;
2247 #[inline]
2248 fn shl(self, rhs: i16) -> I64Vec4 {
2249 (*self).shl(rhs)
2250 }
2251}
2252
2253impl ShlAssign<i16> for I64Vec4 {
2254 #[inline]
2255 fn shl_assign(&mut self, rhs: i16) {
2256 *self = self.shl(rhs);
2257 }
2258}
2259
2260impl ShlAssign<&i16> for I64Vec4 {
2261 #[inline]
2262 fn shl_assign(&mut self, rhs: &i16) {
2263 self.shl_assign(*rhs);
2264 }
2265}
2266
2267impl Shr<i16> for I64Vec4 {
2268 type Output = Self;
2269 #[inline]
2270 fn shr(self, rhs: i16) -> Self::Output {
2271 Self {
2272 x: self.x.shr(rhs),
2273 y: self.y.shr(rhs),
2274 z: self.z.shr(rhs),
2275 w: self.w.shr(rhs),
2276 }
2277 }
2278}
2279
2280impl Shr<&i16> for I64Vec4 {
2281 type Output = Self;
2282 #[inline]
2283 fn shr(self, rhs: &i16) -> Self {
2284 self.shr(*rhs)
2285 }
2286}
2287
2288impl Shr<&i16> for &I64Vec4 {
2289 type Output = I64Vec4;
2290 #[inline]
2291 fn shr(self, rhs: &i16) -> I64Vec4 {
2292 (*self).shr(*rhs)
2293 }
2294}
2295
2296impl Shr<i16> for &I64Vec4 {
2297 type Output = I64Vec4;
2298 #[inline]
2299 fn shr(self, rhs: i16) -> I64Vec4 {
2300 (*self).shr(rhs)
2301 }
2302}
2303
2304impl ShrAssign<i16> for I64Vec4 {
2305 #[inline]
2306 fn shr_assign(&mut self, rhs: i16) {
2307 *self = self.shr(rhs);
2308 }
2309}
2310
2311impl ShrAssign<&i16> for I64Vec4 {
2312 #[inline]
2313 fn shr_assign(&mut self, rhs: &i16) {
2314 self.shr_assign(*rhs);
2315 }
2316}
2317
2318impl Shl<i32> for I64Vec4 {
2319 type Output = Self;
2320 #[inline]
2321 fn shl(self, rhs: i32) -> Self::Output {
2322 Self {
2323 x: self.x.shl(rhs),
2324 y: self.y.shl(rhs),
2325 z: self.z.shl(rhs),
2326 w: self.w.shl(rhs),
2327 }
2328 }
2329}
2330
2331impl Shl<&i32> for I64Vec4 {
2332 type Output = Self;
2333 #[inline]
2334 fn shl(self, rhs: &i32) -> Self {
2335 self.shl(*rhs)
2336 }
2337}
2338
2339impl Shl<&i32> for &I64Vec4 {
2340 type Output = I64Vec4;
2341 #[inline]
2342 fn shl(self, rhs: &i32) -> I64Vec4 {
2343 (*self).shl(*rhs)
2344 }
2345}
2346
2347impl Shl<i32> for &I64Vec4 {
2348 type Output = I64Vec4;
2349 #[inline]
2350 fn shl(self, rhs: i32) -> I64Vec4 {
2351 (*self).shl(rhs)
2352 }
2353}
2354
2355impl ShlAssign<i32> for I64Vec4 {
2356 #[inline]
2357 fn shl_assign(&mut self, rhs: i32) {
2358 *self = self.shl(rhs);
2359 }
2360}
2361
2362impl ShlAssign<&i32> for I64Vec4 {
2363 #[inline]
2364 fn shl_assign(&mut self, rhs: &i32) {
2365 self.shl_assign(*rhs);
2366 }
2367}
2368
2369impl Shr<i32> for I64Vec4 {
2370 type Output = Self;
2371 #[inline]
2372 fn shr(self, rhs: i32) -> Self::Output {
2373 Self {
2374 x: self.x.shr(rhs),
2375 y: self.y.shr(rhs),
2376 z: self.z.shr(rhs),
2377 w: self.w.shr(rhs),
2378 }
2379 }
2380}
2381
2382impl Shr<&i32> for I64Vec4 {
2383 type Output = Self;
2384 #[inline]
2385 fn shr(self, rhs: &i32) -> Self {
2386 self.shr(*rhs)
2387 }
2388}
2389
2390impl Shr<&i32> for &I64Vec4 {
2391 type Output = I64Vec4;
2392 #[inline]
2393 fn shr(self, rhs: &i32) -> I64Vec4 {
2394 (*self).shr(*rhs)
2395 }
2396}
2397
2398impl Shr<i32> for &I64Vec4 {
2399 type Output = I64Vec4;
2400 #[inline]
2401 fn shr(self, rhs: i32) -> I64Vec4 {
2402 (*self).shr(rhs)
2403 }
2404}
2405
2406impl ShrAssign<i32> for I64Vec4 {
2407 #[inline]
2408 fn shr_assign(&mut self, rhs: i32) {
2409 *self = self.shr(rhs);
2410 }
2411}
2412
2413impl ShrAssign<&i32> for I64Vec4 {
2414 #[inline]
2415 fn shr_assign(&mut self, rhs: &i32) {
2416 self.shr_assign(*rhs);
2417 }
2418}
2419
2420impl Shl<i64> for I64Vec4 {
2421 type Output = Self;
2422 #[inline]
2423 fn shl(self, rhs: i64) -> Self::Output {
2424 Self {
2425 x: self.x.shl(rhs),
2426 y: self.y.shl(rhs),
2427 z: self.z.shl(rhs),
2428 w: self.w.shl(rhs),
2429 }
2430 }
2431}
2432
2433impl Shl<&i64> for I64Vec4 {
2434 type Output = Self;
2435 #[inline]
2436 fn shl(self, rhs: &i64) -> Self {
2437 self.shl(*rhs)
2438 }
2439}
2440
2441impl Shl<&i64> for &I64Vec4 {
2442 type Output = I64Vec4;
2443 #[inline]
2444 fn shl(self, rhs: &i64) -> I64Vec4 {
2445 (*self).shl(*rhs)
2446 }
2447}
2448
2449impl Shl<i64> for &I64Vec4 {
2450 type Output = I64Vec4;
2451 #[inline]
2452 fn shl(self, rhs: i64) -> I64Vec4 {
2453 (*self).shl(rhs)
2454 }
2455}
2456
2457impl ShlAssign<i64> for I64Vec4 {
2458 #[inline]
2459 fn shl_assign(&mut self, rhs: i64) {
2460 *self = self.shl(rhs);
2461 }
2462}
2463
2464impl ShlAssign<&i64> for I64Vec4 {
2465 #[inline]
2466 fn shl_assign(&mut self, rhs: &i64) {
2467 self.shl_assign(*rhs);
2468 }
2469}
2470
2471impl Shr<i64> for I64Vec4 {
2472 type Output = Self;
2473 #[inline]
2474 fn shr(self, rhs: i64) -> Self::Output {
2475 Self {
2476 x: self.x.shr(rhs),
2477 y: self.y.shr(rhs),
2478 z: self.z.shr(rhs),
2479 w: self.w.shr(rhs),
2480 }
2481 }
2482}
2483
2484impl Shr<&i64> for I64Vec4 {
2485 type Output = Self;
2486 #[inline]
2487 fn shr(self, rhs: &i64) -> Self {
2488 self.shr(*rhs)
2489 }
2490}
2491
2492impl Shr<&i64> for &I64Vec4 {
2493 type Output = I64Vec4;
2494 #[inline]
2495 fn shr(self, rhs: &i64) -> I64Vec4 {
2496 (*self).shr(*rhs)
2497 }
2498}
2499
2500impl Shr<i64> for &I64Vec4 {
2501 type Output = I64Vec4;
2502 #[inline]
2503 fn shr(self, rhs: i64) -> I64Vec4 {
2504 (*self).shr(rhs)
2505 }
2506}
2507
2508impl ShrAssign<i64> for I64Vec4 {
2509 #[inline]
2510 fn shr_assign(&mut self, rhs: i64) {
2511 *self = self.shr(rhs);
2512 }
2513}
2514
2515impl ShrAssign<&i64> for I64Vec4 {
2516 #[inline]
2517 fn shr_assign(&mut self, rhs: &i64) {
2518 self.shr_assign(*rhs);
2519 }
2520}
2521
2522impl Shl<u8> for I64Vec4 {
2523 type Output = Self;
2524 #[inline]
2525 fn shl(self, rhs: u8) -> Self::Output {
2526 Self {
2527 x: self.x.shl(rhs),
2528 y: self.y.shl(rhs),
2529 z: self.z.shl(rhs),
2530 w: self.w.shl(rhs),
2531 }
2532 }
2533}
2534
2535impl Shl<&u8> for I64Vec4 {
2536 type Output = Self;
2537 #[inline]
2538 fn shl(self, rhs: &u8) -> Self {
2539 self.shl(*rhs)
2540 }
2541}
2542
2543impl Shl<&u8> for &I64Vec4 {
2544 type Output = I64Vec4;
2545 #[inline]
2546 fn shl(self, rhs: &u8) -> I64Vec4 {
2547 (*self).shl(*rhs)
2548 }
2549}
2550
2551impl Shl<u8> for &I64Vec4 {
2552 type Output = I64Vec4;
2553 #[inline]
2554 fn shl(self, rhs: u8) -> I64Vec4 {
2555 (*self).shl(rhs)
2556 }
2557}
2558
2559impl ShlAssign<u8> for I64Vec4 {
2560 #[inline]
2561 fn shl_assign(&mut self, rhs: u8) {
2562 *self = self.shl(rhs);
2563 }
2564}
2565
2566impl ShlAssign<&u8> for I64Vec4 {
2567 #[inline]
2568 fn shl_assign(&mut self, rhs: &u8) {
2569 self.shl_assign(*rhs);
2570 }
2571}
2572
2573impl Shr<u8> for I64Vec4 {
2574 type Output = Self;
2575 #[inline]
2576 fn shr(self, rhs: u8) -> Self::Output {
2577 Self {
2578 x: self.x.shr(rhs),
2579 y: self.y.shr(rhs),
2580 z: self.z.shr(rhs),
2581 w: self.w.shr(rhs),
2582 }
2583 }
2584}
2585
2586impl Shr<&u8> for I64Vec4 {
2587 type Output = Self;
2588 #[inline]
2589 fn shr(self, rhs: &u8) -> Self {
2590 self.shr(*rhs)
2591 }
2592}
2593
2594impl Shr<&u8> for &I64Vec4 {
2595 type Output = I64Vec4;
2596 #[inline]
2597 fn shr(self, rhs: &u8) -> I64Vec4 {
2598 (*self).shr(*rhs)
2599 }
2600}
2601
2602impl Shr<u8> for &I64Vec4 {
2603 type Output = I64Vec4;
2604 #[inline]
2605 fn shr(self, rhs: u8) -> I64Vec4 {
2606 (*self).shr(rhs)
2607 }
2608}
2609
2610impl ShrAssign<u8> for I64Vec4 {
2611 #[inline]
2612 fn shr_assign(&mut self, rhs: u8) {
2613 *self = self.shr(rhs);
2614 }
2615}
2616
2617impl ShrAssign<&u8> for I64Vec4 {
2618 #[inline]
2619 fn shr_assign(&mut self, rhs: &u8) {
2620 self.shr_assign(*rhs);
2621 }
2622}
2623
2624impl Shl<u16> for I64Vec4 {
2625 type Output = Self;
2626 #[inline]
2627 fn shl(self, rhs: u16) -> Self::Output {
2628 Self {
2629 x: self.x.shl(rhs),
2630 y: self.y.shl(rhs),
2631 z: self.z.shl(rhs),
2632 w: self.w.shl(rhs),
2633 }
2634 }
2635}
2636
2637impl Shl<&u16> for I64Vec4 {
2638 type Output = Self;
2639 #[inline]
2640 fn shl(self, rhs: &u16) -> Self {
2641 self.shl(*rhs)
2642 }
2643}
2644
2645impl Shl<&u16> for &I64Vec4 {
2646 type Output = I64Vec4;
2647 #[inline]
2648 fn shl(self, rhs: &u16) -> I64Vec4 {
2649 (*self).shl(*rhs)
2650 }
2651}
2652
2653impl Shl<u16> for &I64Vec4 {
2654 type Output = I64Vec4;
2655 #[inline]
2656 fn shl(self, rhs: u16) -> I64Vec4 {
2657 (*self).shl(rhs)
2658 }
2659}
2660
2661impl ShlAssign<u16> for I64Vec4 {
2662 #[inline]
2663 fn shl_assign(&mut self, rhs: u16) {
2664 *self = self.shl(rhs);
2665 }
2666}
2667
2668impl ShlAssign<&u16> for I64Vec4 {
2669 #[inline]
2670 fn shl_assign(&mut self, rhs: &u16) {
2671 self.shl_assign(*rhs);
2672 }
2673}
2674
2675impl Shr<u16> for I64Vec4 {
2676 type Output = Self;
2677 #[inline]
2678 fn shr(self, rhs: u16) -> Self::Output {
2679 Self {
2680 x: self.x.shr(rhs),
2681 y: self.y.shr(rhs),
2682 z: self.z.shr(rhs),
2683 w: self.w.shr(rhs),
2684 }
2685 }
2686}
2687
2688impl Shr<&u16> for I64Vec4 {
2689 type Output = Self;
2690 #[inline]
2691 fn shr(self, rhs: &u16) -> Self {
2692 self.shr(*rhs)
2693 }
2694}
2695
2696impl Shr<&u16> for &I64Vec4 {
2697 type Output = I64Vec4;
2698 #[inline]
2699 fn shr(self, rhs: &u16) -> I64Vec4 {
2700 (*self).shr(*rhs)
2701 }
2702}
2703
2704impl Shr<u16> for &I64Vec4 {
2705 type Output = I64Vec4;
2706 #[inline]
2707 fn shr(self, rhs: u16) -> I64Vec4 {
2708 (*self).shr(rhs)
2709 }
2710}
2711
2712impl ShrAssign<u16> for I64Vec4 {
2713 #[inline]
2714 fn shr_assign(&mut self, rhs: u16) {
2715 *self = self.shr(rhs);
2716 }
2717}
2718
2719impl ShrAssign<&u16> for I64Vec4 {
2720 #[inline]
2721 fn shr_assign(&mut self, rhs: &u16) {
2722 self.shr_assign(*rhs);
2723 }
2724}
2725
2726impl Shl<u32> for I64Vec4 {
2727 type Output = Self;
2728 #[inline]
2729 fn shl(self, rhs: u32) -> Self::Output {
2730 Self {
2731 x: self.x.shl(rhs),
2732 y: self.y.shl(rhs),
2733 z: self.z.shl(rhs),
2734 w: self.w.shl(rhs),
2735 }
2736 }
2737}
2738
2739impl Shl<&u32> for I64Vec4 {
2740 type Output = Self;
2741 #[inline]
2742 fn shl(self, rhs: &u32) -> Self {
2743 self.shl(*rhs)
2744 }
2745}
2746
2747impl Shl<&u32> for &I64Vec4 {
2748 type Output = I64Vec4;
2749 #[inline]
2750 fn shl(self, rhs: &u32) -> I64Vec4 {
2751 (*self).shl(*rhs)
2752 }
2753}
2754
2755impl Shl<u32> for &I64Vec4 {
2756 type Output = I64Vec4;
2757 #[inline]
2758 fn shl(self, rhs: u32) -> I64Vec4 {
2759 (*self).shl(rhs)
2760 }
2761}
2762
2763impl ShlAssign<u32> for I64Vec4 {
2764 #[inline]
2765 fn shl_assign(&mut self, rhs: u32) {
2766 *self = self.shl(rhs);
2767 }
2768}
2769
2770impl ShlAssign<&u32> for I64Vec4 {
2771 #[inline]
2772 fn shl_assign(&mut self, rhs: &u32) {
2773 self.shl_assign(*rhs);
2774 }
2775}
2776
2777impl Shr<u32> for I64Vec4 {
2778 type Output = Self;
2779 #[inline]
2780 fn shr(self, rhs: u32) -> Self::Output {
2781 Self {
2782 x: self.x.shr(rhs),
2783 y: self.y.shr(rhs),
2784 z: self.z.shr(rhs),
2785 w: self.w.shr(rhs),
2786 }
2787 }
2788}
2789
2790impl Shr<&u32> for I64Vec4 {
2791 type Output = Self;
2792 #[inline]
2793 fn shr(self, rhs: &u32) -> Self {
2794 self.shr(*rhs)
2795 }
2796}
2797
2798impl Shr<&u32> for &I64Vec4 {
2799 type Output = I64Vec4;
2800 #[inline]
2801 fn shr(self, rhs: &u32) -> I64Vec4 {
2802 (*self).shr(*rhs)
2803 }
2804}
2805
2806impl Shr<u32> for &I64Vec4 {
2807 type Output = I64Vec4;
2808 #[inline]
2809 fn shr(self, rhs: u32) -> I64Vec4 {
2810 (*self).shr(rhs)
2811 }
2812}
2813
2814impl ShrAssign<u32> for I64Vec4 {
2815 #[inline]
2816 fn shr_assign(&mut self, rhs: u32) {
2817 *self = self.shr(rhs);
2818 }
2819}
2820
2821impl ShrAssign<&u32> for I64Vec4 {
2822 #[inline]
2823 fn shr_assign(&mut self, rhs: &u32) {
2824 self.shr_assign(*rhs);
2825 }
2826}
2827
2828impl Shl<u64> for I64Vec4 {
2829 type Output = Self;
2830 #[inline]
2831 fn shl(self, rhs: u64) -> Self::Output {
2832 Self {
2833 x: self.x.shl(rhs),
2834 y: self.y.shl(rhs),
2835 z: self.z.shl(rhs),
2836 w: self.w.shl(rhs),
2837 }
2838 }
2839}
2840
2841impl Shl<&u64> for I64Vec4 {
2842 type Output = Self;
2843 #[inline]
2844 fn shl(self, rhs: &u64) -> Self {
2845 self.shl(*rhs)
2846 }
2847}
2848
2849impl Shl<&u64> for &I64Vec4 {
2850 type Output = I64Vec4;
2851 #[inline]
2852 fn shl(self, rhs: &u64) -> I64Vec4 {
2853 (*self).shl(*rhs)
2854 }
2855}
2856
2857impl Shl<u64> for &I64Vec4 {
2858 type Output = I64Vec4;
2859 #[inline]
2860 fn shl(self, rhs: u64) -> I64Vec4 {
2861 (*self).shl(rhs)
2862 }
2863}
2864
2865impl ShlAssign<u64> for I64Vec4 {
2866 #[inline]
2867 fn shl_assign(&mut self, rhs: u64) {
2868 *self = self.shl(rhs);
2869 }
2870}
2871
2872impl ShlAssign<&u64> for I64Vec4 {
2873 #[inline]
2874 fn shl_assign(&mut self, rhs: &u64) {
2875 self.shl_assign(*rhs);
2876 }
2877}
2878
2879impl Shr<u64> for I64Vec4 {
2880 type Output = Self;
2881 #[inline]
2882 fn shr(self, rhs: u64) -> Self::Output {
2883 Self {
2884 x: self.x.shr(rhs),
2885 y: self.y.shr(rhs),
2886 z: self.z.shr(rhs),
2887 w: self.w.shr(rhs),
2888 }
2889 }
2890}
2891
2892impl Shr<&u64> for I64Vec4 {
2893 type Output = Self;
2894 #[inline]
2895 fn shr(self, rhs: &u64) -> Self {
2896 self.shr(*rhs)
2897 }
2898}
2899
2900impl Shr<&u64> for &I64Vec4 {
2901 type Output = I64Vec4;
2902 #[inline]
2903 fn shr(self, rhs: &u64) -> I64Vec4 {
2904 (*self).shr(*rhs)
2905 }
2906}
2907
2908impl Shr<u64> for &I64Vec4 {
2909 type Output = I64Vec4;
2910 #[inline]
2911 fn shr(self, rhs: u64) -> I64Vec4 {
2912 (*self).shr(rhs)
2913 }
2914}
2915
2916impl ShrAssign<u64> for I64Vec4 {
2917 #[inline]
2918 fn shr_assign(&mut self, rhs: u64) {
2919 *self = self.shr(rhs);
2920 }
2921}
2922
2923impl ShrAssign<&u64> for I64Vec4 {
2924 #[inline]
2925 fn shr_assign(&mut self, rhs: &u64) {
2926 self.shr_assign(*rhs);
2927 }
2928}
2929
2930impl Shl<IVec4> for I64Vec4 {
2931 type Output = Self;
2932 #[inline]
2933 fn shl(self, rhs: IVec4) -> Self {
2934 Self {
2935 x: self.x.shl(rhs.x),
2936 y: self.y.shl(rhs.y),
2937 z: self.z.shl(rhs.z),
2938 w: self.w.shl(rhs.w),
2939 }
2940 }
2941}
2942
2943impl Shl<&IVec4> for I64Vec4 {
2944 type Output = Self;
2945 #[inline]
2946 fn shl(self, rhs: &IVec4) -> Self {
2947 self.shl(*rhs)
2948 }
2949}
2950
2951impl Shl<&IVec4> for &I64Vec4 {
2952 type Output = I64Vec4;
2953 #[inline]
2954 fn shl(self, rhs: &IVec4) -> I64Vec4 {
2955 (*self).shl(*rhs)
2956 }
2957}
2958
2959impl Shl<IVec4> for &I64Vec4 {
2960 type Output = I64Vec4;
2961 #[inline]
2962 fn shl(self, rhs: IVec4) -> I64Vec4 {
2963 (*self).shl(rhs)
2964 }
2965}
2966
2967impl Shr<IVec4> for I64Vec4 {
2968 type Output = Self;
2969 #[inline]
2970 fn shr(self, rhs: IVec4) -> Self {
2971 Self {
2972 x: self.x.shr(rhs.x),
2973 y: self.y.shr(rhs.y),
2974 z: self.z.shr(rhs.z),
2975 w: self.w.shr(rhs.w),
2976 }
2977 }
2978}
2979
2980impl Shr<&IVec4> for I64Vec4 {
2981 type Output = Self;
2982 #[inline]
2983 fn shr(self, rhs: &IVec4) -> Self {
2984 self.shr(*rhs)
2985 }
2986}
2987
2988impl Shr<&IVec4> for &I64Vec4 {
2989 type Output = I64Vec4;
2990 #[inline]
2991 fn shr(self, rhs: &IVec4) -> I64Vec4 {
2992 (*self).shr(*rhs)
2993 }
2994}
2995
2996impl Shr<IVec4> for &I64Vec4 {
2997 type Output = I64Vec4;
2998 #[inline]
2999 fn shr(self, rhs: IVec4) -> I64Vec4 {
3000 (*self).shr(rhs)
3001 }
3002}
3003
3004impl Shl<UVec4> for I64Vec4 {
3005 type Output = Self;
3006 #[inline]
3007 fn shl(self, rhs: UVec4) -> Self {
3008 Self {
3009 x: self.x.shl(rhs.x),
3010 y: self.y.shl(rhs.y),
3011 z: self.z.shl(rhs.z),
3012 w: self.w.shl(rhs.w),
3013 }
3014 }
3015}
3016
3017impl Shl<&UVec4> for I64Vec4 {
3018 type Output = Self;
3019 #[inline]
3020 fn shl(self, rhs: &UVec4) -> Self {
3021 self.shl(*rhs)
3022 }
3023}
3024
3025impl Shl<&UVec4> for &I64Vec4 {
3026 type Output = I64Vec4;
3027 #[inline]
3028 fn shl(self, rhs: &UVec4) -> I64Vec4 {
3029 (*self).shl(*rhs)
3030 }
3031}
3032
3033impl Shl<UVec4> for &I64Vec4 {
3034 type Output = I64Vec4;
3035 #[inline]
3036 fn shl(self, rhs: UVec4) -> I64Vec4 {
3037 (*self).shl(rhs)
3038 }
3039}
3040
3041impl Shr<UVec4> for I64Vec4 {
3042 type Output = Self;
3043 #[inline]
3044 fn shr(self, rhs: UVec4) -> Self {
3045 Self {
3046 x: self.x.shr(rhs.x),
3047 y: self.y.shr(rhs.y),
3048 z: self.z.shr(rhs.z),
3049 w: self.w.shr(rhs.w),
3050 }
3051 }
3052}
3053
3054impl Shr<&UVec4> for I64Vec4 {
3055 type Output = Self;
3056 #[inline]
3057 fn shr(self, rhs: &UVec4) -> Self {
3058 self.shr(*rhs)
3059 }
3060}
3061
3062impl Shr<&UVec4> for &I64Vec4 {
3063 type Output = I64Vec4;
3064 #[inline]
3065 fn shr(self, rhs: &UVec4) -> I64Vec4 {
3066 (*self).shr(*rhs)
3067 }
3068}
3069
3070impl Shr<UVec4> for &I64Vec4 {
3071 type Output = I64Vec4;
3072 #[inline]
3073 fn shr(self, rhs: UVec4) -> I64Vec4 {
3074 (*self).shr(rhs)
3075 }
3076}
3077
3078impl Index<usize> for I64Vec4 {
3079 type Output = i64;
3080 #[inline]
3081 fn index(&self, index: usize) -> &Self::Output {
3082 match index {
3083 0 => &self.x,
3084 1 => &self.y,
3085 2 => &self.z,
3086 3 => &self.w,
3087 _ => panic!("index out of bounds"),
3088 }
3089 }
3090}
3091
3092impl IndexMut<usize> for I64Vec4 {
3093 #[inline]
3094 fn index_mut(&mut self, index: usize) -> &mut Self::Output {
3095 match index {
3096 0 => &mut self.x,
3097 1 => &mut self.y,
3098 2 => &mut self.z,
3099 3 => &mut self.w,
3100 _ => panic!("index out of bounds"),
3101 }
3102 }
3103}
3104
3105impl fmt::Display for I64Vec4 {
3106 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
3107 write!(f, "[{}, {}, {}, {}]", self.x, self.y, self.z, self.w)
3108 }
3109}
3110
3111impl fmt::Debug for I64Vec4 {
3112 fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
3113 fmt.debug_tuple(stringify!(I64Vec4))
3114 .field(&self.x)
3115 .field(&self.y)
3116 .field(&self.z)
3117 .field(&self.w)
3118 .finish()
3119 }
3120}
3121
3122impl From<[i64; 4]> for I64Vec4 {
3123 #[inline]
3124 fn from(a: [i64; 4]) -> Self {
3125 Self::new(a[0], a[1], a[2], a[3])
3126 }
3127}
3128
3129impl From<I64Vec4> for [i64; 4] {
3130 #[inline]
3131 fn from(v: I64Vec4) -> Self {
3132 [v.x, v.y, v.z, v.w]
3133 }
3134}
3135
3136impl From<(i64, i64, i64, i64)> for I64Vec4 {
3137 #[inline]
3138 fn from(t: (i64, i64, i64, i64)) -> Self {
3139 Self::new(t.0, t.1, t.2, t.3)
3140 }
3141}
3142
3143impl From<I64Vec4> for (i64, i64, i64, i64) {
3144 #[inline]
3145 fn from(v: I64Vec4) -> Self {
3146 (v.x, v.y, v.z, v.w)
3147 }
3148}
3149
3150impl From<(I64Vec3, i64)> for I64Vec4 {
3151 #[inline]
3152 fn from((v, w): (I64Vec3, i64)) -> Self {
3153 Self::new(v.x, v.y, v.z, w)
3154 }
3155}
3156
3157impl From<(i64, I64Vec3)> for I64Vec4 {
3158 #[inline]
3159 fn from((x, v): (i64, I64Vec3)) -> Self {
3160 Self::new(x, v.x, v.y, v.z)
3161 }
3162}
3163
3164impl From<(I64Vec2, i64, i64)> for I64Vec4 {
3165 #[inline]
3166 fn from((v, z, w): (I64Vec2, i64, i64)) -> Self {
3167 Self::new(v.x, v.y, z, w)
3168 }
3169}
3170
3171impl From<(I64Vec2, I64Vec2)> for I64Vec4 {
3172 #[inline]
3173 fn from((v, u): (I64Vec2, I64Vec2)) -> Self {
3174 Self::new(v.x, v.y, u.x, u.y)
3175 }
3176}
3177
3178impl From<I8Vec4> for I64Vec4 {
3179 #[inline]
3180 fn from(v: I8Vec4) -> Self {
3181 Self::new(
3182 i64::from(v.x),
3183 i64::from(v.y),
3184 i64::from(v.z),
3185 i64::from(v.w),
3186 )
3187 }
3188}
3189
3190impl From<U8Vec4> for I64Vec4 {
3191 #[inline]
3192 fn from(v: U8Vec4) -> Self {
3193 Self::new(
3194 i64::from(v.x),
3195 i64::from(v.y),
3196 i64::from(v.z),
3197 i64::from(v.w),
3198 )
3199 }
3200}
3201
3202impl From<I16Vec4> for I64Vec4 {
3203 #[inline]
3204 fn from(v: I16Vec4) -> Self {
3205 Self::new(
3206 i64::from(v.x),
3207 i64::from(v.y),
3208 i64::from(v.z),
3209 i64::from(v.w),
3210 )
3211 }
3212}
3213
3214impl From<U16Vec4> for I64Vec4 {
3215 #[inline]
3216 fn from(v: U16Vec4) -> Self {
3217 Self::new(
3218 i64::from(v.x),
3219 i64::from(v.y),
3220 i64::from(v.z),
3221 i64::from(v.w),
3222 )
3223 }
3224}
3225
3226impl From<IVec4> for I64Vec4 {
3227 #[inline]
3228 fn from(v: IVec4) -> Self {
3229 Self::new(
3230 i64::from(v.x),
3231 i64::from(v.y),
3232 i64::from(v.z),
3233 i64::from(v.w),
3234 )
3235 }
3236}
3237
3238impl From<UVec4> for I64Vec4 {
3239 #[inline]
3240 fn from(v: UVec4) -> Self {
3241 Self::new(
3242 i64::from(v.x),
3243 i64::from(v.y),
3244 i64::from(v.z),
3245 i64::from(v.w),
3246 )
3247 }
3248}
3249
3250impl TryFrom<U64Vec4> for I64Vec4 {
3251 type Error = core::num::TryFromIntError;
3252
3253 #[inline]
3254 fn try_from(v: U64Vec4) -> Result<Self, Self::Error> {
3255 Ok(Self::new(
3256 i64::try_from(v.x)?,
3257 i64::try_from(v.y)?,
3258 i64::try_from(v.z)?,
3259 i64::try_from(v.w)?,
3260 ))
3261 }
3262}
3263
3264impl TryFrom<USizeVec4> for I64Vec4 {
3265 type Error = core::num::TryFromIntError;
3266
3267 #[inline]
3268 fn try_from(v: USizeVec4) -> Result<Self, Self::Error> {
3269 Ok(Self::new(
3270 i64::try_from(v.x)?,
3271 i64::try_from(v.y)?,
3272 i64::try_from(v.z)?,
3273 i64::try_from(v.w)?,
3274 ))
3275 }
3276}
3277
3278impl From<BVec4> for I64Vec4 {
3279 #[inline]
3280 fn from(v: BVec4) -> Self {
3281 Self::new(
3282 i64::from(v.x),
3283 i64::from(v.y),
3284 i64::from(v.z),
3285 i64::from(v.w),
3286 )
3287 }
3288}
3289
3290#[cfg(not(feature = "scalar-math"))]
3291impl From<BVec4A> for I64Vec4 {
3292 #[inline]
3293 fn from(v: BVec4A) -> Self {
3294 let bool_array: [bool; 4] = v.into();
3295 Self::new(
3296 i64::from(bool_array[0]),
3297 i64::from(bool_array[1]),
3298 i64::from(bool_array[2]),
3299 i64::from(bool_array[3]),
3300 )
3301 }
3302}