1use crate::{BVec2, I16Vec2, I64Vec3, I8Vec2, IVec2, U16Vec2, U64Vec2, U8Vec2, USizeVec2, UVec2};
4
5use core::fmt;
6use core::iter::{Product, Sum};
7use core::{f32, ops::*};
8
9#[inline(always)]
11#[must_use]
12pub const fn i64vec2(x: i64, y: i64) -> I64Vec2 {
13 I64Vec2::new(x, y)
14}
15
16#[cfg_attr(not(target_arch = "spirv"), derive(Hash))]
18#[derive(Clone, Copy, PartialEq, Eq)]
19#[cfg_attr(
20 all(feature = "bytemuck", not(target_arch = "spirv")),
21 derive(bytemuck::Pod, bytemuck::Zeroable)
22)]
23#[cfg_attr(feature = "cuda", repr(align(16)))]
24#[cfg_attr(not(target_arch = "spirv"), repr(C))]
25#[cfg_attr(target_arch = "spirv", repr(simd))]
26pub struct I64Vec2 {
27 pub x: i64,
28 pub y: i64,
29}
30
31impl I64Vec2 {
32 pub const ZERO: Self = Self::splat(0);
34
35 pub const ONE: Self = Self::splat(1);
37
38 pub const NEG_ONE: Self = Self::splat(-1);
40
41 pub const MIN: Self = Self::splat(i64::MIN);
43
44 pub const MAX: Self = Self::splat(i64::MAX);
46
47 pub const X: Self = Self::new(1, 0);
49
50 pub const Y: Self = Self::new(0, 1);
52
53 pub const NEG_X: Self = Self::new(-1, 0);
55
56 pub const NEG_Y: Self = Self::new(0, -1);
58
59 pub const AXES: [Self; 2] = [Self::X, Self::Y];
61
62 #[inline(always)]
64 #[must_use]
65 pub const fn new(x: i64, y: i64) -> Self {
66 Self { x, y }
67 }
68
69 #[inline]
71 #[must_use]
72 pub const fn splat(v: i64) -> Self {
73 Self { x: v, y: v }
74 }
75
76 #[inline]
78 #[must_use]
79 pub fn map<F>(self, f: F) -> Self
80 where
81 F: Fn(i64) -> i64,
82 {
83 Self::new(f(self.x), f(self.y))
84 }
85
86 #[inline]
92 #[must_use]
93 pub fn select(mask: BVec2, if_true: Self, if_false: Self) -> Self {
94 Self {
95 x: if mask.test(0) { if_true.x } else { if_false.x },
96 y: if mask.test(1) { if_true.y } else { if_false.y },
97 }
98 }
99
100 #[inline]
102 #[must_use]
103 pub const fn from_array(a: [i64; 2]) -> Self {
104 Self::new(a[0], a[1])
105 }
106
107 #[inline]
109 #[must_use]
110 pub const fn to_array(&self) -> [i64; 2] {
111 [self.x, self.y]
112 }
113
114 #[inline]
120 #[must_use]
121 pub const fn from_slice(slice: &[i64]) -> Self {
122 assert!(slice.len() >= 2);
123 Self::new(slice[0], slice[1])
124 }
125
126 #[inline]
132 pub fn write_to_slice(self, slice: &mut [i64]) {
133 slice[..2].copy_from_slice(&self.to_array());
134 }
135
136 #[inline]
138 #[must_use]
139 pub const fn extend(self, z: i64) -> I64Vec3 {
140 I64Vec3::new(self.x, self.y, z)
141 }
142
143 #[inline]
145 #[must_use]
146 pub fn with_x(mut self, x: i64) -> Self {
147 self.x = x;
148 self
149 }
150
151 #[inline]
153 #[must_use]
154 pub fn with_y(mut self, y: i64) -> Self {
155 self.y = y;
156 self
157 }
158
159 #[inline]
161 #[must_use]
162 pub fn dot(self, rhs: Self) -> i64 {
163 (self.x * rhs.x) + (self.y * rhs.y)
164 }
165
166 #[inline]
168 #[must_use]
169 pub fn dot_into_vec(self, rhs: Self) -> Self {
170 Self::splat(self.dot(rhs))
171 }
172
173 #[inline]
177 #[must_use]
178 pub fn min(self, rhs: Self) -> Self {
179 Self {
180 x: if self.x < rhs.x { self.x } else { rhs.x },
181 y: if self.y < rhs.y { self.y } else { rhs.y },
182 }
183 }
184
185 #[inline]
189 #[must_use]
190 pub fn max(self, rhs: Self) -> Self {
191 Self {
192 x: if self.x > rhs.x { self.x } else { rhs.x },
193 y: if self.y > rhs.y { self.y } else { rhs.y },
194 }
195 }
196
197 #[inline]
205 #[must_use]
206 pub fn clamp(self, min: Self, max: Self) -> Self {
207 glam_assert!(min.cmple(max).all(), "clamp: expected min <= max");
208 self.max(min).min(max)
209 }
210
211 #[inline]
215 #[must_use]
216 pub fn min_element(self) -> i64 {
217 let min = |a, b| if a < b { a } else { b };
218 min(self.x, self.y)
219 }
220
221 #[inline]
225 #[must_use]
226 pub fn max_element(self) -> i64 {
227 let max = |a, b| if a > b { a } else { b };
228 max(self.x, self.y)
229 }
230
231 #[doc(alias = "argmin")]
233 #[inline]
234 #[must_use]
235 pub fn min_position(self) -> usize {
236 if self.x <= self.y {
237 0
238 } else {
239 1
240 }
241 }
242
243 #[doc(alias = "argmax")]
245 #[inline]
246 #[must_use]
247 pub fn max_position(self) -> usize {
248 if self.x >= self.y {
249 0
250 } else {
251 1
252 }
253 }
254
255 #[inline]
259 #[must_use]
260 pub fn element_sum(self) -> i64 {
261 self.x + self.y
262 }
263
264 #[inline]
268 #[must_use]
269 pub fn element_product(self) -> i64 {
270 self.x * self.y
271 }
272
273 #[inline]
279 #[must_use]
280 pub fn cmpeq(self, rhs: Self) -> BVec2 {
281 BVec2::new(self.x.eq(&rhs.x), self.y.eq(&rhs.y))
282 }
283
284 #[inline]
290 #[must_use]
291 pub fn cmpne(self, rhs: Self) -> BVec2 {
292 BVec2::new(self.x.ne(&rhs.x), self.y.ne(&rhs.y))
293 }
294
295 #[inline]
301 #[must_use]
302 pub fn cmpge(self, rhs: Self) -> BVec2 {
303 BVec2::new(self.x.ge(&rhs.x), self.y.ge(&rhs.y))
304 }
305
306 #[inline]
312 #[must_use]
313 pub fn cmpgt(self, rhs: Self) -> BVec2 {
314 BVec2::new(self.x.gt(&rhs.x), self.y.gt(&rhs.y))
315 }
316
317 #[inline]
323 #[must_use]
324 pub fn cmple(self, rhs: Self) -> BVec2 {
325 BVec2::new(self.x.le(&rhs.x), self.y.le(&rhs.y))
326 }
327
328 #[inline]
334 #[must_use]
335 pub fn cmplt(self, rhs: Self) -> BVec2 {
336 BVec2::new(self.x.lt(&rhs.x), self.y.lt(&rhs.y))
337 }
338
339 #[inline]
341 #[must_use]
342 pub fn abs(self) -> Self {
343 Self {
344 x: self.x.abs(),
345 y: self.y.abs(),
346 }
347 }
348
349 #[inline]
355 #[must_use]
356 pub fn signum(self) -> Self {
357 Self {
358 x: self.x.signum(),
359 y: self.y.signum(),
360 }
361 }
362
363 #[inline]
371 #[must_use]
372 pub fn is_negative_bitmask(self) -> u32 {
373 (self.x.is_negative() as u32) | ((self.y.is_negative() as u32) << 1)
374 }
375
376 #[doc(alias = "magnitude2")]
378 #[inline]
379 #[must_use]
380 pub fn length_squared(self) -> i64 {
381 self.dot(self)
382 }
383
384 #[inline]
386 #[must_use]
387 pub fn distance_squared(self, rhs: Self) -> i64 {
388 (self - rhs).length_squared()
389 }
390
391 #[inline]
396 #[must_use]
397 pub fn div_euclid(self, rhs: Self) -> Self {
398 Self::new(self.x.div_euclid(rhs.x), self.y.div_euclid(rhs.y))
399 }
400
401 #[inline]
408 #[must_use]
409 pub fn rem_euclid(self, rhs: Self) -> Self {
410 Self::new(self.x.rem_euclid(rhs.x), self.y.rem_euclid(rhs.y))
411 }
412
413 #[inline]
422 #[must_use]
423 pub fn manhattan_distance(self, rhs: Self) -> u64 {
424 self.x.abs_diff(rhs.x) + self.y.abs_diff(rhs.y)
425 }
426
427 #[inline]
433 #[must_use]
434 pub fn checked_manhattan_distance(self, rhs: Self) -> Option<u64> {
435 let d = self.x.abs_diff(rhs.x);
436 d.checked_add(self.y.abs_diff(rhs.y))
437 }
438
439 #[inline]
443 #[must_use]
444 pub fn chebyshev_distance(self, rhs: Self) -> u64 {
445 [self.x.abs_diff(rhs.x), self.y.abs_diff(rhs.y)]
447 .into_iter()
448 .max()
449 .unwrap()
450 }
451
452 #[inline]
454 #[must_use]
455 pub fn perp(self) -> Self {
456 Self {
457 x: -self.y,
458 y: self.x,
459 }
460 }
461
462 #[doc(alias = "wedge")]
465 #[doc(alias = "cross")]
466 #[doc(alias = "determinant")]
467 #[inline]
468 #[must_use]
469 pub fn perp_dot(self, rhs: Self) -> i64 {
470 (self.x * rhs.y) - (self.y * rhs.x)
471 }
472
473 #[inline]
477 #[must_use]
478 pub fn rotate(self, rhs: Self) -> Self {
479 Self {
480 x: self.x * rhs.x - self.y * rhs.y,
481 y: self.y * rhs.x + self.x * rhs.y,
482 }
483 }
484
485 #[inline]
487 #[must_use]
488 pub fn as_vec2(&self) -> crate::Vec2 {
489 crate::Vec2::new(self.x as f32, self.y as f32)
490 }
491
492 #[inline]
494 #[must_use]
495 pub fn as_dvec2(&self) -> crate::DVec2 {
496 crate::DVec2::new(self.x as f64, self.y as f64)
497 }
498
499 #[inline]
501 #[must_use]
502 pub fn as_i8vec2(&self) -> crate::I8Vec2 {
503 crate::I8Vec2::new(self.x as i8, self.y as i8)
504 }
505
506 #[inline]
508 #[must_use]
509 pub fn as_u8vec2(&self) -> crate::U8Vec2 {
510 crate::U8Vec2::new(self.x as u8, self.y as u8)
511 }
512
513 #[inline]
515 #[must_use]
516 pub fn as_i16vec2(&self) -> crate::I16Vec2 {
517 crate::I16Vec2::new(self.x as i16, self.y as i16)
518 }
519
520 #[inline]
522 #[must_use]
523 pub fn as_u16vec2(&self) -> crate::U16Vec2 {
524 crate::U16Vec2::new(self.x as u16, self.y as u16)
525 }
526
527 #[inline]
529 #[must_use]
530 pub fn as_ivec2(&self) -> crate::IVec2 {
531 crate::IVec2::new(self.x as i32, self.y as i32)
532 }
533
534 #[inline]
536 #[must_use]
537 pub fn as_uvec2(&self) -> crate::UVec2 {
538 crate::UVec2::new(self.x as u32, self.y as u32)
539 }
540
541 #[inline]
543 #[must_use]
544 pub fn as_u64vec2(&self) -> crate::U64Vec2 {
545 crate::U64Vec2::new(self.x as u64, self.y as u64)
546 }
547
548 #[inline]
550 #[must_use]
551 pub fn as_usizevec2(&self) -> crate::USizeVec2 {
552 crate::USizeVec2::new(self.x as usize, self.y as usize)
553 }
554
555 #[inline]
559 #[must_use]
560 pub const fn checked_add(self, rhs: Self) -> Option<Self> {
561 let x = match self.x.checked_add(rhs.x) {
562 Some(v) => v,
563 None => return None,
564 };
565 let y = match self.y.checked_add(rhs.y) {
566 Some(v) => v,
567 None => return None,
568 };
569
570 Some(Self { x, y })
571 }
572
573 #[inline]
577 #[must_use]
578 pub const fn checked_sub(self, rhs: Self) -> Option<Self> {
579 let x = match self.x.checked_sub(rhs.x) {
580 Some(v) => v,
581 None => return None,
582 };
583 let y = match self.y.checked_sub(rhs.y) {
584 Some(v) => v,
585 None => return None,
586 };
587
588 Some(Self { x, y })
589 }
590
591 #[inline]
595 #[must_use]
596 pub const fn checked_mul(self, rhs: Self) -> Option<Self> {
597 let x = match self.x.checked_mul(rhs.x) {
598 Some(v) => v,
599 None => return None,
600 };
601 let y = match self.y.checked_mul(rhs.y) {
602 Some(v) => v,
603 None => return None,
604 };
605
606 Some(Self { x, y })
607 }
608
609 #[inline]
613 #[must_use]
614 pub const fn checked_div(self, rhs: Self) -> Option<Self> {
615 let x = match self.x.checked_div(rhs.x) {
616 Some(v) => v,
617 None => return None,
618 };
619 let y = match self.y.checked_div(rhs.y) {
620 Some(v) => v,
621 None => return None,
622 };
623
624 Some(Self { x, y })
625 }
626
627 #[inline]
631 #[must_use]
632 pub const fn wrapping_add(self, rhs: Self) -> Self {
633 Self {
634 x: self.x.wrapping_add(rhs.x),
635 y: self.y.wrapping_add(rhs.y),
636 }
637 }
638
639 #[inline]
643 #[must_use]
644 pub const fn wrapping_sub(self, rhs: Self) -> Self {
645 Self {
646 x: self.x.wrapping_sub(rhs.x),
647 y: self.y.wrapping_sub(rhs.y),
648 }
649 }
650
651 #[inline]
655 #[must_use]
656 pub const fn wrapping_mul(self, rhs: Self) -> Self {
657 Self {
658 x: self.x.wrapping_mul(rhs.x),
659 y: self.y.wrapping_mul(rhs.y),
660 }
661 }
662
663 #[inline]
667 #[must_use]
668 pub const fn wrapping_div(self, rhs: Self) -> Self {
669 Self {
670 x: self.x.wrapping_div(rhs.x),
671 y: self.y.wrapping_div(rhs.y),
672 }
673 }
674
675 #[inline]
679 #[must_use]
680 pub const fn saturating_add(self, rhs: Self) -> Self {
681 Self {
682 x: self.x.saturating_add(rhs.x),
683 y: self.y.saturating_add(rhs.y),
684 }
685 }
686
687 #[inline]
691 #[must_use]
692 pub const fn saturating_sub(self, rhs: Self) -> Self {
693 Self {
694 x: self.x.saturating_sub(rhs.x),
695 y: self.y.saturating_sub(rhs.y),
696 }
697 }
698
699 #[inline]
703 #[must_use]
704 pub const fn saturating_mul(self, rhs: Self) -> Self {
705 Self {
706 x: self.x.saturating_mul(rhs.x),
707 y: self.y.saturating_mul(rhs.y),
708 }
709 }
710
711 #[inline]
715 #[must_use]
716 pub const fn saturating_div(self, rhs: Self) -> Self {
717 Self {
718 x: self.x.saturating_div(rhs.x),
719 y: self.y.saturating_div(rhs.y),
720 }
721 }
722
723 #[inline]
727 #[must_use]
728 pub const fn checked_add_unsigned(self, rhs: U64Vec2) -> Option<Self> {
729 let x = match self.x.checked_add_unsigned(rhs.x) {
730 Some(v) => v,
731 None => return None,
732 };
733 let y = match self.y.checked_add_unsigned(rhs.y) {
734 Some(v) => v,
735 None => return None,
736 };
737
738 Some(Self { x, y })
739 }
740
741 #[inline]
745 #[must_use]
746 pub const fn checked_sub_unsigned(self, rhs: U64Vec2) -> Option<Self> {
747 let x = match self.x.checked_sub_unsigned(rhs.x) {
748 Some(v) => v,
749 None => return None,
750 };
751 let y = match self.y.checked_sub_unsigned(rhs.y) {
752 Some(v) => v,
753 None => return None,
754 };
755
756 Some(Self { x, y })
757 }
758
759 #[inline]
763 #[must_use]
764 pub const fn wrapping_add_unsigned(self, rhs: U64Vec2) -> Self {
765 Self {
766 x: self.x.wrapping_add_unsigned(rhs.x),
767 y: self.y.wrapping_add_unsigned(rhs.y),
768 }
769 }
770
771 #[inline]
775 #[must_use]
776 pub const fn wrapping_sub_unsigned(self, rhs: U64Vec2) -> Self {
777 Self {
778 x: self.x.wrapping_sub_unsigned(rhs.x),
779 y: self.y.wrapping_sub_unsigned(rhs.y),
780 }
781 }
782
783 #[inline]
787 #[must_use]
788 pub const fn saturating_add_unsigned(self, rhs: U64Vec2) -> Self {
789 Self {
790 x: self.x.saturating_add_unsigned(rhs.x),
791 y: self.y.saturating_add_unsigned(rhs.y),
792 }
793 }
794
795 #[inline]
799 #[must_use]
800 pub const fn saturating_sub_unsigned(self, rhs: U64Vec2) -> Self {
801 Self {
802 x: self.x.saturating_sub_unsigned(rhs.x),
803 y: self.y.saturating_sub_unsigned(rhs.y),
804 }
805 }
806}
807
808impl Default for I64Vec2 {
809 #[inline(always)]
810 fn default() -> Self {
811 Self::ZERO
812 }
813}
814
815impl Div for I64Vec2 {
816 type Output = Self;
817 #[inline]
818 fn div(self, rhs: Self) -> Self {
819 Self {
820 x: self.x.div(rhs.x),
821 y: self.y.div(rhs.y),
822 }
823 }
824}
825
826impl Div<&Self> for I64Vec2 {
827 type Output = Self;
828 #[inline]
829 fn div(self, rhs: &Self) -> Self {
830 self.div(*rhs)
831 }
832}
833
834impl Div<&I64Vec2> for &I64Vec2 {
835 type Output = I64Vec2;
836 #[inline]
837 fn div(self, rhs: &I64Vec2) -> I64Vec2 {
838 (*self).div(*rhs)
839 }
840}
841
842impl Div<I64Vec2> for &I64Vec2 {
843 type Output = I64Vec2;
844 #[inline]
845 fn div(self, rhs: I64Vec2) -> I64Vec2 {
846 (*self).div(rhs)
847 }
848}
849
850impl DivAssign for I64Vec2 {
851 #[inline]
852 fn div_assign(&mut self, rhs: Self) {
853 self.x.div_assign(rhs.x);
854 self.y.div_assign(rhs.y);
855 }
856}
857
858impl DivAssign<&Self> for I64Vec2 {
859 #[inline]
860 fn div_assign(&mut self, rhs: &Self) {
861 self.div_assign(*rhs);
862 }
863}
864
865impl Div<i64> for I64Vec2 {
866 type Output = Self;
867 #[inline]
868 fn div(self, rhs: i64) -> Self {
869 Self {
870 x: self.x.div(rhs),
871 y: self.y.div(rhs),
872 }
873 }
874}
875
876impl Div<&i64> for I64Vec2 {
877 type Output = Self;
878 #[inline]
879 fn div(self, rhs: &i64) -> Self {
880 self.div(*rhs)
881 }
882}
883
884impl Div<&i64> for &I64Vec2 {
885 type Output = I64Vec2;
886 #[inline]
887 fn div(self, rhs: &i64) -> I64Vec2 {
888 (*self).div(*rhs)
889 }
890}
891
892impl Div<i64> for &I64Vec2 {
893 type Output = I64Vec2;
894 #[inline]
895 fn div(self, rhs: i64) -> I64Vec2 {
896 (*self).div(rhs)
897 }
898}
899
900impl DivAssign<i64> for I64Vec2 {
901 #[inline]
902 fn div_assign(&mut self, rhs: i64) {
903 self.x.div_assign(rhs);
904 self.y.div_assign(rhs);
905 }
906}
907
908impl DivAssign<&i64> for I64Vec2 {
909 #[inline]
910 fn div_assign(&mut self, rhs: &i64) {
911 self.div_assign(*rhs);
912 }
913}
914
915impl Div<I64Vec2> for i64 {
916 type Output = I64Vec2;
917 #[inline]
918 fn div(self, rhs: I64Vec2) -> I64Vec2 {
919 I64Vec2 {
920 x: self.div(rhs.x),
921 y: self.div(rhs.y),
922 }
923 }
924}
925
926impl Div<&I64Vec2> for i64 {
927 type Output = I64Vec2;
928 #[inline]
929 fn div(self, rhs: &I64Vec2) -> I64Vec2 {
930 self.div(*rhs)
931 }
932}
933
934impl Div<&I64Vec2> for &i64 {
935 type Output = I64Vec2;
936 #[inline]
937 fn div(self, rhs: &I64Vec2) -> I64Vec2 {
938 (*self).div(*rhs)
939 }
940}
941
942impl Div<I64Vec2> for &i64 {
943 type Output = I64Vec2;
944 #[inline]
945 fn div(self, rhs: I64Vec2) -> I64Vec2 {
946 (*self).div(rhs)
947 }
948}
949
950impl Mul for I64Vec2 {
951 type Output = Self;
952 #[inline]
953 fn mul(self, rhs: Self) -> Self {
954 Self {
955 x: self.x.mul(rhs.x),
956 y: self.y.mul(rhs.y),
957 }
958 }
959}
960
961impl Mul<&Self> for I64Vec2 {
962 type Output = Self;
963 #[inline]
964 fn mul(self, rhs: &Self) -> Self {
965 self.mul(*rhs)
966 }
967}
968
969impl Mul<&I64Vec2> for &I64Vec2 {
970 type Output = I64Vec2;
971 #[inline]
972 fn mul(self, rhs: &I64Vec2) -> I64Vec2 {
973 (*self).mul(*rhs)
974 }
975}
976
977impl Mul<I64Vec2> for &I64Vec2 {
978 type Output = I64Vec2;
979 #[inline]
980 fn mul(self, rhs: I64Vec2) -> I64Vec2 {
981 (*self).mul(rhs)
982 }
983}
984
985impl MulAssign for I64Vec2 {
986 #[inline]
987 fn mul_assign(&mut self, rhs: Self) {
988 self.x.mul_assign(rhs.x);
989 self.y.mul_assign(rhs.y);
990 }
991}
992
993impl MulAssign<&Self> for I64Vec2 {
994 #[inline]
995 fn mul_assign(&mut self, rhs: &Self) {
996 self.mul_assign(*rhs);
997 }
998}
999
1000impl Mul<i64> for I64Vec2 {
1001 type Output = Self;
1002 #[inline]
1003 fn mul(self, rhs: i64) -> Self {
1004 Self {
1005 x: self.x.mul(rhs),
1006 y: self.y.mul(rhs),
1007 }
1008 }
1009}
1010
1011impl Mul<&i64> for I64Vec2 {
1012 type Output = Self;
1013 #[inline]
1014 fn mul(self, rhs: &i64) -> Self {
1015 self.mul(*rhs)
1016 }
1017}
1018
1019impl Mul<&i64> for &I64Vec2 {
1020 type Output = I64Vec2;
1021 #[inline]
1022 fn mul(self, rhs: &i64) -> I64Vec2 {
1023 (*self).mul(*rhs)
1024 }
1025}
1026
1027impl Mul<i64> for &I64Vec2 {
1028 type Output = I64Vec2;
1029 #[inline]
1030 fn mul(self, rhs: i64) -> I64Vec2 {
1031 (*self).mul(rhs)
1032 }
1033}
1034
1035impl MulAssign<i64> for I64Vec2 {
1036 #[inline]
1037 fn mul_assign(&mut self, rhs: i64) {
1038 self.x.mul_assign(rhs);
1039 self.y.mul_assign(rhs);
1040 }
1041}
1042
1043impl MulAssign<&i64> for I64Vec2 {
1044 #[inline]
1045 fn mul_assign(&mut self, rhs: &i64) {
1046 self.mul_assign(*rhs);
1047 }
1048}
1049
1050impl Mul<I64Vec2> for i64 {
1051 type Output = I64Vec2;
1052 #[inline]
1053 fn mul(self, rhs: I64Vec2) -> I64Vec2 {
1054 I64Vec2 {
1055 x: self.mul(rhs.x),
1056 y: self.mul(rhs.y),
1057 }
1058 }
1059}
1060
1061impl Mul<&I64Vec2> for i64 {
1062 type Output = I64Vec2;
1063 #[inline]
1064 fn mul(self, rhs: &I64Vec2) -> I64Vec2 {
1065 self.mul(*rhs)
1066 }
1067}
1068
1069impl Mul<&I64Vec2> for &i64 {
1070 type Output = I64Vec2;
1071 #[inline]
1072 fn mul(self, rhs: &I64Vec2) -> I64Vec2 {
1073 (*self).mul(*rhs)
1074 }
1075}
1076
1077impl Mul<I64Vec2> for &i64 {
1078 type Output = I64Vec2;
1079 #[inline]
1080 fn mul(self, rhs: I64Vec2) -> I64Vec2 {
1081 (*self).mul(rhs)
1082 }
1083}
1084
1085impl Add for I64Vec2 {
1086 type Output = Self;
1087 #[inline]
1088 fn add(self, rhs: Self) -> Self {
1089 Self {
1090 x: self.x.add(rhs.x),
1091 y: self.y.add(rhs.y),
1092 }
1093 }
1094}
1095
1096impl Add<&Self> for I64Vec2 {
1097 type Output = Self;
1098 #[inline]
1099 fn add(self, rhs: &Self) -> Self {
1100 self.add(*rhs)
1101 }
1102}
1103
1104impl Add<&I64Vec2> for &I64Vec2 {
1105 type Output = I64Vec2;
1106 #[inline]
1107 fn add(self, rhs: &I64Vec2) -> I64Vec2 {
1108 (*self).add(*rhs)
1109 }
1110}
1111
1112impl Add<I64Vec2> for &I64Vec2 {
1113 type Output = I64Vec2;
1114 #[inline]
1115 fn add(self, rhs: I64Vec2) -> I64Vec2 {
1116 (*self).add(rhs)
1117 }
1118}
1119
1120impl AddAssign for I64Vec2 {
1121 #[inline]
1122 fn add_assign(&mut self, rhs: Self) {
1123 self.x.add_assign(rhs.x);
1124 self.y.add_assign(rhs.y);
1125 }
1126}
1127
1128impl AddAssign<&Self> for I64Vec2 {
1129 #[inline]
1130 fn add_assign(&mut self, rhs: &Self) {
1131 self.add_assign(*rhs);
1132 }
1133}
1134
1135impl Add<i64> for I64Vec2 {
1136 type Output = Self;
1137 #[inline]
1138 fn add(self, rhs: i64) -> Self {
1139 Self {
1140 x: self.x.add(rhs),
1141 y: self.y.add(rhs),
1142 }
1143 }
1144}
1145
1146impl Add<&i64> for I64Vec2 {
1147 type Output = Self;
1148 #[inline]
1149 fn add(self, rhs: &i64) -> Self {
1150 self.add(*rhs)
1151 }
1152}
1153
1154impl Add<&i64> for &I64Vec2 {
1155 type Output = I64Vec2;
1156 #[inline]
1157 fn add(self, rhs: &i64) -> I64Vec2 {
1158 (*self).add(*rhs)
1159 }
1160}
1161
1162impl Add<i64> for &I64Vec2 {
1163 type Output = I64Vec2;
1164 #[inline]
1165 fn add(self, rhs: i64) -> I64Vec2 {
1166 (*self).add(rhs)
1167 }
1168}
1169
1170impl AddAssign<i64> for I64Vec2 {
1171 #[inline]
1172 fn add_assign(&mut self, rhs: i64) {
1173 self.x.add_assign(rhs);
1174 self.y.add_assign(rhs);
1175 }
1176}
1177
1178impl AddAssign<&i64> for I64Vec2 {
1179 #[inline]
1180 fn add_assign(&mut self, rhs: &i64) {
1181 self.add_assign(*rhs);
1182 }
1183}
1184
1185impl Add<I64Vec2> for i64 {
1186 type Output = I64Vec2;
1187 #[inline]
1188 fn add(self, rhs: I64Vec2) -> I64Vec2 {
1189 I64Vec2 {
1190 x: self.add(rhs.x),
1191 y: self.add(rhs.y),
1192 }
1193 }
1194}
1195
1196impl Add<&I64Vec2> for i64 {
1197 type Output = I64Vec2;
1198 #[inline]
1199 fn add(self, rhs: &I64Vec2) -> I64Vec2 {
1200 self.add(*rhs)
1201 }
1202}
1203
1204impl Add<&I64Vec2> for &i64 {
1205 type Output = I64Vec2;
1206 #[inline]
1207 fn add(self, rhs: &I64Vec2) -> I64Vec2 {
1208 (*self).add(*rhs)
1209 }
1210}
1211
1212impl Add<I64Vec2> for &i64 {
1213 type Output = I64Vec2;
1214 #[inline]
1215 fn add(self, rhs: I64Vec2) -> I64Vec2 {
1216 (*self).add(rhs)
1217 }
1218}
1219
1220impl Sub for I64Vec2 {
1221 type Output = Self;
1222 #[inline]
1223 fn sub(self, rhs: Self) -> Self {
1224 Self {
1225 x: self.x.sub(rhs.x),
1226 y: self.y.sub(rhs.y),
1227 }
1228 }
1229}
1230
1231impl Sub<&Self> for I64Vec2 {
1232 type Output = Self;
1233 #[inline]
1234 fn sub(self, rhs: &Self) -> Self {
1235 self.sub(*rhs)
1236 }
1237}
1238
1239impl Sub<&I64Vec2> for &I64Vec2 {
1240 type Output = I64Vec2;
1241 #[inline]
1242 fn sub(self, rhs: &I64Vec2) -> I64Vec2 {
1243 (*self).sub(*rhs)
1244 }
1245}
1246
1247impl Sub<I64Vec2> for &I64Vec2 {
1248 type Output = I64Vec2;
1249 #[inline]
1250 fn sub(self, rhs: I64Vec2) -> I64Vec2 {
1251 (*self).sub(rhs)
1252 }
1253}
1254
1255impl SubAssign for I64Vec2 {
1256 #[inline]
1257 fn sub_assign(&mut self, rhs: Self) {
1258 self.x.sub_assign(rhs.x);
1259 self.y.sub_assign(rhs.y);
1260 }
1261}
1262
1263impl SubAssign<&Self> for I64Vec2 {
1264 #[inline]
1265 fn sub_assign(&mut self, rhs: &Self) {
1266 self.sub_assign(*rhs);
1267 }
1268}
1269
1270impl Sub<i64> for I64Vec2 {
1271 type Output = Self;
1272 #[inline]
1273 fn sub(self, rhs: i64) -> Self {
1274 Self {
1275 x: self.x.sub(rhs),
1276 y: self.y.sub(rhs),
1277 }
1278 }
1279}
1280
1281impl Sub<&i64> for I64Vec2 {
1282 type Output = Self;
1283 #[inline]
1284 fn sub(self, rhs: &i64) -> Self {
1285 self.sub(*rhs)
1286 }
1287}
1288
1289impl Sub<&i64> for &I64Vec2 {
1290 type Output = I64Vec2;
1291 #[inline]
1292 fn sub(self, rhs: &i64) -> I64Vec2 {
1293 (*self).sub(*rhs)
1294 }
1295}
1296
1297impl Sub<i64> for &I64Vec2 {
1298 type Output = I64Vec2;
1299 #[inline]
1300 fn sub(self, rhs: i64) -> I64Vec2 {
1301 (*self).sub(rhs)
1302 }
1303}
1304
1305impl SubAssign<i64> for I64Vec2 {
1306 #[inline]
1307 fn sub_assign(&mut self, rhs: i64) {
1308 self.x.sub_assign(rhs);
1309 self.y.sub_assign(rhs);
1310 }
1311}
1312
1313impl SubAssign<&i64> for I64Vec2 {
1314 #[inline]
1315 fn sub_assign(&mut self, rhs: &i64) {
1316 self.sub_assign(*rhs);
1317 }
1318}
1319
1320impl Sub<I64Vec2> for i64 {
1321 type Output = I64Vec2;
1322 #[inline]
1323 fn sub(self, rhs: I64Vec2) -> I64Vec2 {
1324 I64Vec2 {
1325 x: self.sub(rhs.x),
1326 y: self.sub(rhs.y),
1327 }
1328 }
1329}
1330
1331impl Sub<&I64Vec2> for i64 {
1332 type Output = I64Vec2;
1333 #[inline]
1334 fn sub(self, rhs: &I64Vec2) -> I64Vec2 {
1335 self.sub(*rhs)
1336 }
1337}
1338
1339impl Sub<&I64Vec2> for &i64 {
1340 type Output = I64Vec2;
1341 #[inline]
1342 fn sub(self, rhs: &I64Vec2) -> I64Vec2 {
1343 (*self).sub(*rhs)
1344 }
1345}
1346
1347impl Sub<I64Vec2> for &i64 {
1348 type Output = I64Vec2;
1349 #[inline]
1350 fn sub(self, rhs: I64Vec2) -> I64Vec2 {
1351 (*self).sub(rhs)
1352 }
1353}
1354
1355impl Rem for I64Vec2 {
1356 type Output = Self;
1357 #[inline]
1358 fn rem(self, rhs: Self) -> Self {
1359 Self {
1360 x: self.x.rem(rhs.x),
1361 y: self.y.rem(rhs.y),
1362 }
1363 }
1364}
1365
1366impl Rem<&Self> for I64Vec2 {
1367 type Output = Self;
1368 #[inline]
1369 fn rem(self, rhs: &Self) -> Self {
1370 self.rem(*rhs)
1371 }
1372}
1373
1374impl Rem<&I64Vec2> for &I64Vec2 {
1375 type Output = I64Vec2;
1376 #[inline]
1377 fn rem(self, rhs: &I64Vec2) -> I64Vec2 {
1378 (*self).rem(*rhs)
1379 }
1380}
1381
1382impl Rem<I64Vec2> for &I64Vec2 {
1383 type Output = I64Vec2;
1384 #[inline]
1385 fn rem(self, rhs: I64Vec2) -> I64Vec2 {
1386 (*self).rem(rhs)
1387 }
1388}
1389
1390impl RemAssign for I64Vec2 {
1391 #[inline]
1392 fn rem_assign(&mut self, rhs: Self) {
1393 self.x.rem_assign(rhs.x);
1394 self.y.rem_assign(rhs.y);
1395 }
1396}
1397
1398impl RemAssign<&Self> for I64Vec2 {
1399 #[inline]
1400 fn rem_assign(&mut self, rhs: &Self) {
1401 self.rem_assign(*rhs);
1402 }
1403}
1404
1405impl Rem<i64> for I64Vec2 {
1406 type Output = Self;
1407 #[inline]
1408 fn rem(self, rhs: i64) -> Self {
1409 Self {
1410 x: self.x.rem(rhs),
1411 y: self.y.rem(rhs),
1412 }
1413 }
1414}
1415
1416impl Rem<&i64> for I64Vec2 {
1417 type Output = Self;
1418 #[inline]
1419 fn rem(self, rhs: &i64) -> Self {
1420 self.rem(*rhs)
1421 }
1422}
1423
1424impl Rem<&i64> for &I64Vec2 {
1425 type Output = I64Vec2;
1426 #[inline]
1427 fn rem(self, rhs: &i64) -> I64Vec2 {
1428 (*self).rem(*rhs)
1429 }
1430}
1431
1432impl Rem<i64> for &I64Vec2 {
1433 type Output = I64Vec2;
1434 #[inline]
1435 fn rem(self, rhs: i64) -> I64Vec2 {
1436 (*self).rem(rhs)
1437 }
1438}
1439
1440impl RemAssign<i64> for I64Vec2 {
1441 #[inline]
1442 fn rem_assign(&mut self, rhs: i64) {
1443 self.x.rem_assign(rhs);
1444 self.y.rem_assign(rhs);
1445 }
1446}
1447
1448impl RemAssign<&i64> for I64Vec2 {
1449 #[inline]
1450 fn rem_assign(&mut self, rhs: &i64) {
1451 self.rem_assign(*rhs);
1452 }
1453}
1454
1455impl Rem<I64Vec2> for i64 {
1456 type Output = I64Vec2;
1457 #[inline]
1458 fn rem(self, rhs: I64Vec2) -> I64Vec2 {
1459 I64Vec2 {
1460 x: self.rem(rhs.x),
1461 y: self.rem(rhs.y),
1462 }
1463 }
1464}
1465
1466impl Rem<&I64Vec2> for i64 {
1467 type Output = I64Vec2;
1468 #[inline]
1469 fn rem(self, rhs: &I64Vec2) -> I64Vec2 {
1470 self.rem(*rhs)
1471 }
1472}
1473
1474impl Rem<&I64Vec2> for &i64 {
1475 type Output = I64Vec2;
1476 #[inline]
1477 fn rem(self, rhs: &I64Vec2) -> I64Vec2 {
1478 (*self).rem(*rhs)
1479 }
1480}
1481
1482impl Rem<I64Vec2> for &i64 {
1483 type Output = I64Vec2;
1484 #[inline]
1485 fn rem(self, rhs: I64Vec2) -> I64Vec2 {
1486 (*self).rem(rhs)
1487 }
1488}
1489
1490#[cfg(not(target_arch = "spirv"))]
1491impl AsRef<[i64; 2]> for I64Vec2 {
1492 #[inline]
1493 fn as_ref(&self) -> &[i64; 2] {
1494 unsafe { &*(self as *const Self as *const [i64; 2]) }
1495 }
1496}
1497
1498#[cfg(not(target_arch = "spirv"))]
1499impl AsMut<[i64; 2]> for I64Vec2 {
1500 #[inline]
1501 fn as_mut(&mut self) -> &mut [i64; 2] {
1502 unsafe { &mut *(self as *mut Self as *mut [i64; 2]) }
1503 }
1504}
1505
1506impl Sum for I64Vec2 {
1507 #[inline]
1508 fn sum<I>(iter: I) -> Self
1509 where
1510 I: Iterator<Item = Self>,
1511 {
1512 iter.fold(Self::ZERO, Self::add)
1513 }
1514}
1515
1516impl<'a> Sum<&'a Self> for I64Vec2 {
1517 #[inline]
1518 fn sum<I>(iter: I) -> Self
1519 where
1520 I: Iterator<Item = &'a Self>,
1521 {
1522 iter.fold(Self::ZERO, |a, &b| Self::add(a, b))
1523 }
1524}
1525
1526impl Product for I64Vec2 {
1527 #[inline]
1528 fn product<I>(iter: I) -> Self
1529 where
1530 I: Iterator<Item = Self>,
1531 {
1532 iter.fold(Self::ONE, Self::mul)
1533 }
1534}
1535
1536impl<'a> Product<&'a Self> for I64Vec2 {
1537 #[inline]
1538 fn product<I>(iter: I) -> Self
1539 where
1540 I: Iterator<Item = &'a Self>,
1541 {
1542 iter.fold(Self::ONE, |a, &b| Self::mul(a, b))
1543 }
1544}
1545
1546impl Neg for I64Vec2 {
1547 type Output = Self;
1548 #[inline]
1549 fn neg(self) -> Self {
1550 Self {
1551 x: self.x.neg(),
1552 y: self.y.neg(),
1553 }
1554 }
1555}
1556
1557impl Neg for &I64Vec2 {
1558 type Output = I64Vec2;
1559 #[inline]
1560 fn neg(self) -> I64Vec2 {
1561 (*self).neg()
1562 }
1563}
1564
1565impl Not for I64Vec2 {
1566 type Output = Self;
1567 #[inline]
1568 fn not(self) -> Self {
1569 Self {
1570 x: self.x.not(),
1571 y: self.y.not(),
1572 }
1573 }
1574}
1575
1576impl Not for &I64Vec2 {
1577 type Output = I64Vec2;
1578 #[inline]
1579 fn not(self) -> I64Vec2 {
1580 (*self).not()
1581 }
1582}
1583
1584impl BitAnd for I64Vec2 {
1585 type Output = Self;
1586 #[inline]
1587 fn bitand(self, rhs: Self) -> Self::Output {
1588 Self {
1589 x: self.x.bitand(rhs.x),
1590 y: self.y.bitand(rhs.y),
1591 }
1592 }
1593}
1594
1595impl BitAnd<&Self> for I64Vec2 {
1596 type Output = Self;
1597 #[inline]
1598 fn bitand(self, rhs: &Self) -> Self {
1599 self.bitand(*rhs)
1600 }
1601}
1602
1603impl BitAnd<&I64Vec2> for &I64Vec2 {
1604 type Output = I64Vec2;
1605 #[inline]
1606 fn bitand(self, rhs: &I64Vec2) -> I64Vec2 {
1607 (*self).bitand(*rhs)
1608 }
1609}
1610
1611impl BitAnd<I64Vec2> for &I64Vec2 {
1612 type Output = I64Vec2;
1613 #[inline]
1614 fn bitand(self, rhs: I64Vec2) -> I64Vec2 {
1615 (*self).bitand(rhs)
1616 }
1617}
1618
1619impl BitAndAssign for I64Vec2 {
1620 #[inline]
1621 fn bitand_assign(&mut self, rhs: Self) {
1622 *self = self.bitand(rhs);
1623 }
1624}
1625
1626impl BitAndAssign<&Self> for I64Vec2 {
1627 #[inline]
1628 fn bitand_assign(&mut self, rhs: &Self) {
1629 self.bitand_assign(*rhs);
1630 }
1631}
1632
1633impl BitOr for I64Vec2 {
1634 type Output = Self;
1635 #[inline]
1636 fn bitor(self, rhs: Self) -> Self::Output {
1637 Self {
1638 x: self.x.bitor(rhs.x),
1639 y: self.y.bitor(rhs.y),
1640 }
1641 }
1642}
1643
1644impl BitOr<&Self> for I64Vec2 {
1645 type Output = Self;
1646 #[inline]
1647 fn bitor(self, rhs: &Self) -> Self {
1648 self.bitor(*rhs)
1649 }
1650}
1651
1652impl BitOr<&I64Vec2> for &I64Vec2 {
1653 type Output = I64Vec2;
1654 #[inline]
1655 fn bitor(self, rhs: &I64Vec2) -> I64Vec2 {
1656 (*self).bitor(*rhs)
1657 }
1658}
1659
1660impl BitOr<I64Vec2> for &I64Vec2 {
1661 type Output = I64Vec2;
1662 #[inline]
1663 fn bitor(self, rhs: I64Vec2) -> I64Vec2 {
1664 (*self).bitor(rhs)
1665 }
1666}
1667
1668impl BitOrAssign for I64Vec2 {
1669 #[inline]
1670 fn bitor_assign(&mut self, rhs: Self) {
1671 *self = self.bitor(rhs);
1672 }
1673}
1674
1675impl BitOrAssign<&Self> for I64Vec2 {
1676 #[inline]
1677 fn bitor_assign(&mut self, rhs: &Self) {
1678 self.bitor_assign(*rhs);
1679 }
1680}
1681
1682impl BitXor for I64Vec2 {
1683 type Output = Self;
1684 #[inline]
1685 fn bitxor(self, rhs: Self) -> Self::Output {
1686 Self {
1687 x: self.x.bitxor(rhs.x),
1688 y: self.y.bitxor(rhs.y),
1689 }
1690 }
1691}
1692
1693impl BitXor<&Self> for I64Vec2 {
1694 type Output = Self;
1695 #[inline]
1696 fn bitxor(self, rhs: &Self) -> Self {
1697 self.bitxor(*rhs)
1698 }
1699}
1700
1701impl BitXor<&I64Vec2> for &I64Vec2 {
1702 type Output = I64Vec2;
1703 #[inline]
1704 fn bitxor(self, rhs: &I64Vec2) -> I64Vec2 {
1705 (*self).bitxor(*rhs)
1706 }
1707}
1708
1709impl BitXor<I64Vec2> for &I64Vec2 {
1710 type Output = I64Vec2;
1711 #[inline]
1712 fn bitxor(self, rhs: I64Vec2) -> I64Vec2 {
1713 (*self).bitxor(rhs)
1714 }
1715}
1716
1717impl BitXorAssign for I64Vec2 {
1718 #[inline]
1719 fn bitxor_assign(&mut self, rhs: Self) {
1720 *self = self.bitxor(rhs);
1721 }
1722}
1723
1724impl BitXorAssign<&Self> for I64Vec2 {
1725 #[inline]
1726 fn bitxor_assign(&mut self, rhs: &Self) {
1727 self.bitxor_assign(*rhs);
1728 }
1729}
1730
1731impl BitAnd<i64> for I64Vec2 {
1732 type Output = Self;
1733 #[inline]
1734 fn bitand(self, rhs: i64) -> Self::Output {
1735 Self {
1736 x: self.x.bitand(rhs),
1737 y: self.y.bitand(rhs),
1738 }
1739 }
1740}
1741
1742impl BitAnd<&i64> for I64Vec2 {
1743 type Output = Self;
1744 #[inline]
1745 fn bitand(self, rhs: &i64) -> Self {
1746 self.bitand(*rhs)
1747 }
1748}
1749
1750impl BitAnd<&i64> for &I64Vec2 {
1751 type Output = I64Vec2;
1752 #[inline]
1753 fn bitand(self, rhs: &i64) -> I64Vec2 {
1754 (*self).bitand(*rhs)
1755 }
1756}
1757
1758impl BitAnd<i64> for &I64Vec2 {
1759 type Output = I64Vec2;
1760 #[inline]
1761 fn bitand(self, rhs: i64) -> I64Vec2 {
1762 (*self).bitand(rhs)
1763 }
1764}
1765
1766impl BitAndAssign<i64> for I64Vec2 {
1767 #[inline]
1768 fn bitand_assign(&mut self, rhs: i64) {
1769 *self = self.bitand(rhs);
1770 }
1771}
1772
1773impl BitAndAssign<&i64> for I64Vec2 {
1774 #[inline]
1775 fn bitand_assign(&mut self, rhs: &i64) {
1776 self.bitand_assign(*rhs);
1777 }
1778}
1779
1780impl BitOr<i64> for I64Vec2 {
1781 type Output = Self;
1782 #[inline]
1783 fn bitor(self, rhs: i64) -> Self::Output {
1784 Self {
1785 x: self.x.bitor(rhs),
1786 y: self.y.bitor(rhs),
1787 }
1788 }
1789}
1790
1791impl BitOr<&i64> for I64Vec2 {
1792 type Output = Self;
1793 #[inline]
1794 fn bitor(self, rhs: &i64) -> Self {
1795 self.bitor(*rhs)
1796 }
1797}
1798
1799impl BitOr<&i64> for &I64Vec2 {
1800 type Output = I64Vec2;
1801 #[inline]
1802 fn bitor(self, rhs: &i64) -> I64Vec2 {
1803 (*self).bitor(*rhs)
1804 }
1805}
1806
1807impl BitOr<i64> for &I64Vec2 {
1808 type Output = I64Vec2;
1809 #[inline]
1810 fn bitor(self, rhs: i64) -> I64Vec2 {
1811 (*self).bitor(rhs)
1812 }
1813}
1814
1815impl BitOrAssign<i64> for I64Vec2 {
1816 #[inline]
1817 fn bitor_assign(&mut self, rhs: i64) {
1818 *self = self.bitor(rhs);
1819 }
1820}
1821
1822impl BitOrAssign<&i64> for I64Vec2 {
1823 #[inline]
1824 fn bitor_assign(&mut self, rhs: &i64) {
1825 self.bitor_assign(*rhs);
1826 }
1827}
1828
1829impl BitXor<i64> for I64Vec2 {
1830 type Output = Self;
1831 #[inline]
1832 fn bitxor(self, rhs: i64) -> Self::Output {
1833 Self {
1834 x: self.x.bitxor(rhs),
1835 y: self.y.bitxor(rhs),
1836 }
1837 }
1838}
1839
1840impl BitXor<&i64> for I64Vec2 {
1841 type Output = Self;
1842 #[inline]
1843 fn bitxor(self, rhs: &i64) -> Self {
1844 self.bitxor(*rhs)
1845 }
1846}
1847
1848impl BitXor<&i64> for &I64Vec2 {
1849 type Output = I64Vec2;
1850 #[inline]
1851 fn bitxor(self, rhs: &i64) -> I64Vec2 {
1852 (*self).bitxor(*rhs)
1853 }
1854}
1855
1856impl BitXor<i64> for &I64Vec2 {
1857 type Output = I64Vec2;
1858 #[inline]
1859 fn bitxor(self, rhs: i64) -> I64Vec2 {
1860 (*self).bitxor(rhs)
1861 }
1862}
1863
1864impl BitXorAssign<i64> for I64Vec2 {
1865 #[inline]
1866 fn bitxor_assign(&mut self, rhs: i64) {
1867 *self = self.bitxor(rhs);
1868 }
1869}
1870
1871impl BitXorAssign<&i64> for I64Vec2 {
1872 #[inline]
1873 fn bitxor_assign(&mut self, rhs: &i64) {
1874 self.bitxor_assign(*rhs);
1875 }
1876}
1877
1878impl Shl<i8> for I64Vec2 {
1879 type Output = Self;
1880 #[inline]
1881 fn shl(self, rhs: i8) -> Self::Output {
1882 Self {
1883 x: self.x.shl(rhs),
1884 y: self.y.shl(rhs),
1885 }
1886 }
1887}
1888
1889impl Shl<&i8> for I64Vec2 {
1890 type Output = Self;
1891 #[inline]
1892 fn shl(self, rhs: &i8) -> Self {
1893 self.shl(*rhs)
1894 }
1895}
1896
1897impl Shl<&i8> for &I64Vec2 {
1898 type Output = I64Vec2;
1899 #[inline]
1900 fn shl(self, rhs: &i8) -> I64Vec2 {
1901 (*self).shl(*rhs)
1902 }
1903}
1904
1905impl Shl<i8> for &I64Vec2 {
1906 type Output = I64Vec2;
1907 #[inline]
1908 fn shl(self, rhs: i8) -> I64Vec2 {
1909 (*self).shl(rhs)
1910 }
1911}
1912
1913impl ShlAssign<i8> for I64Vec2 {
1914 #[inline]
1915 fn shl_assign(&mut self, rhs: i8) {
1916 *self = self.shl(rhs);
1917 }
1918}
1919
1920impl ShlAssign<&i8> for I64Vec2 {
1921 #[inline]
1922 fn shl_assign(&mut self, rhs: &i8) {
1923 self.shl_assign(*rhs);
1924 }
1925}
1926
1927impl Shr<i8> for I64Vec2 {
1928 type Output = Self;
1929 #[inline]
1930 fn shr(self, rhs: i8) -> Self::Output {
1931 Self {
1932 x: self.x.shr(rhs),
1933 y: self.y.shr(rhs),
1934 }
1935 }
1936}
1937
1938impl Shr<&i8> for I64Vec2 {
1939 type Output = Self;
1940 #[inline]
1941 fn shr(self, rhs: &i8) -> Self {
1942 self.shr(*rhs)
1943 }
1944}
1945
1946impl Shr<&i8> for &I64Vec2 {
1947 type Output = I64Vec2;
1948 #[inline]
1949 fn shr(self, rhs: &i8) -> I64Vec2 {
1950 (*self).shr(*rhs)
1951 }
1952}
1953
1954impl Shr<i8> for &I64Vec2 {
1955 type Output = I64Vec2;
1956 #[inline]
1957 fn shr(self, rhs: i8) -> I64Vec2 {
1958 (*self).shr(rhs)
1959 }
1960}
1961
1962impl ShrAssign<i8> for I64Vec2 {
1963 #[inline]
1964 fn shr_assign(&mut self, rhs: i8) {
1965 *self = self.shr(rhs);
1966 }
1967}
1968
1969impl ShrAssign<&i8> for I64Vec2 {
1970 #[inline]
1971 fn shr_assign(&mut self, rhs: &i8) {
1972 self.shr_assign(*rhs);
1973 }
1974}
1975
1976impl Shl<i16> for I64Vec2 {
1977 type Output = Self;
1978 #[inline]
1979 fn shl(self, rhs: i16) -> Self::Output {
1980 Self {
1981 x: self.x.shl(rhs),
1982 y: self.y.shl(rhs),
1983 }
1984 }
1985}
1986
1987impl Shl<&i16> for I64Vec2 {
1988 type Output = Self;
1989 #[inline]
1990 fn shl(self, rhs: &i16) -> Self {
1991 self.shl(*rhs)
1992 }
1993}
1994
1995impl Shl<&i16> for &I64Vec2 {
1996 type Output = I64Vec2;
1997 #[inline]
1998 fn shl(self, rhs: &i16) -> I64Vec2 {
1999 (*self).shl(*rhs)
2000 }
2001}
2002
2003impl Shl<i16> for &I64Vec2 {
2004 type Output = I64Vec2;
2005 #[inline]
2006 fn shl(self, rhs: i16) -> I64Vec2 {
2007 (*self).shl(rhs)
2008 }
2009}
2010
2011impl ShlAssign<i16> for I64Vec2 {
2012 #[inline]
2013 fn shl_assign(&mut self, rhs: i16) {
2014 *self = self.shl(rhs);
2015 }
2016}
2017
2018impl ShlAssign<&i16> for I64Vec2 {
2019 #[inline]
2020 fn shl_assign(&mut self, rhs: &i16) {
2021 self.shl_assign(*rhs);
2022 }
2023}
2024
2025impl Shr<i16> for I64Vec2 {
2026 type Output = Self;
2027 #[inline]
2028 fn shr(self, rhs: i16) -> Self::Output {
2029 Self {
2030 x: self.x.shr(rhs),
2031 y: self.y.shr(rhs),
2032 }
2033 }
2034}
2035
2036impl Shr<&i16> for I64Vec2 {
2037 type Output = Self;
2038 #[inline]
2039 fn shr(self, rhs: &i16) -> Self {
2040 self.shr(*rhs)
2041 }
2042}
2043
2044impl Shr<&i16> for &I64Vec2 {
2045 type Output = I64Vec2;
2046 #[inline]
2047 fn shr(self, rhs: &i16) -> I64Vec2 {
2048 (*self).shr(*rhs)
2049 }
2050}
2051
2052impl Shr<i16> for &I64Vec2 {
2053 type Output = I64Vec2;
2054 #[inline]
2055 fn shr(self, rhs: i16) -> I64Vec2 {
2056 (*self).shr(rhs)
2057 }
2058}
2059
2060impl ShrAssign<i16> for I64Vec2 {
2061 #[inline]
2062 fn shr_assign(&mut self, rhs: i16) {
2063 *self = self.shr(rhs);
2064 }
2065}
2066
2067impl ShrAssign<&i16> for I64Vec2 {
2068 #[inline]
2069 fn shr_assign(&mut self, rhs: &i16) {
2070 self.shr_assign(*rhs);
2071 }
2072}
2073
2074impl Shl<i32> for I64Vec2 {
2075 type Output = Self;
2076 #[inline]
2077 fn shl(self, rhs: i32) -> Self::Output {
2078 Self {
2079 x: self.x.shl(rhs),
2080 y: self.y.shl(rhs),
2081 }
2082 }
2083}
2084
2085impl Shl<&i32> for I64Vec2 {
2086 type Output = Self;
2087 #[inline]
2088 fn shl(self, rhs: &i32) -> Self {
2089 self.shl(*rhs)
2090 }
2091}
2092
2093impl Shl<&i32> for &I64Vec2 {
2094 type Output = I64Vec2;
2095 #[inline]
2096 fn shl(self, rhs: &i32) -> I64Vec2 {
2097 (*self).shl(*rhs)
2098 }
2099}
2100
2101impl Shl<i32> for &I64Vec2 {
2102 type Output = I64Vec2;
2103 #[inline]
2104 fn shl(self, rhs: i32) -> I64Vec2 {
2105 (*self).shl(rhs)
2106 }
2107}
2108
2109impl ShlAssign<i32> for I64Vec2 {
2110 #[inline]
2111 fn shl_assign(&mut self, rhs: i32) {
2112 *self = self.shl(rhs);
2113 }
2114}
2115
2116impl ShlAssign<&i32> for I64Vec2 {
2117 #[inline]
2118 fn shl_assign(&mut self, rhs: &i32) {
2119 self.shl_assign(*rhs);
2120 }
2121}
2122
2123impl Shr<i32> for I64Vec2 {
2124 type Output = Self;
2125 #[inline]
2126 fn shr(self, rhs: i32) -> Self::Output {
2127 Self {
2128 x: self.x.shr(rhs),
2129 y: self.y.shr(rhs),
2130 }
2131 }
2132}
2133
2134impl Shr<&i32> for I64Vec2 {
2135 type Output = Self;
2136 #[inline]
2137 fn shr(self, rhs: &i32) -> Self {
2138 self.shr(*rhs)
2139 }
2140}
2141
2142impl Shr<&i32> for &I64Vec2 {
2143 type Output = I64Vec2;
2144 #[inline]
2145 fn shr(self, rhs: &i32) -> I64Vec2 {
2146 (*self).shr(*rhs)
2147 }
2148}
2149
2150impl Shr<i32> for &I64Vec2 {
2151 type Output = I64Vec2;
2152 #[inline]
2153 fn shr(self, rhs: i32) -> I64Vec2 {
2154 (*self).shr(rhs)
2155 }
2156}
2157
2158impl ShrAssign<i32> for I64Vec2 {
2159 #[inline]
2160 fn shr_assign(&mut self, rhs: i32) {
2161 *self = self.shr(rhs);
2162 }
2163}
2164
2165impl ShrAssign<&i32> for I64Vec2 {
2166 #[inline]
2167 fn shr_assign(&mut self, rhs: &i32) {
2168 self.shr_assign(*rhs);
2169 }
2170}
2171
2172impl Shl<i64> for I64Vec2 {
2173 type Output = Self;
2174 #[inline]
2175 fn shl(self, rhs: i64) -> Self::Output {
2176 Self {
2177 x: self.x.shl(rhs),
2178 y: self.y.shl(rhs),
2179 }
2180 }
2181}
2182
2183impl Shl<&i64> for I64Vec2 {
2184 type Output = Self;
2185 #[inline]
2186 fn shl(self, rhs: &i64) -> Self {
2187 self.shl(*rhs)
2188 }
2189}
2190
2191impl Shl<&i64> for &I64Vec2 {
2192 type Output = I64Vec2;
2193 #[inline]
2194 fn shl(self, rhs: &i64) -> I64Vec2 {
2195 (*self).shl(*rhs)
2196 }
2197}
2198
2199impl Shl<i64> for &I64Vec2 {
2200 type Output = I64Vec2;
2201 #[inline]
2202 fn shl(self, rhs: i64) -> I64Vec2 {
2203 (*self).shl(rhs)
2204 }
2205}
2206
2207impl ShlAssign<i64> for I64Vec2 {
2208 #[inline]
2209 fn shl_assign(&mut self, rhs: i64) {
2210 *self = self.shl(rhs);
2211 }
2212}
2213
2214impl ShlAssign<&i64> for I64Vec2 {
2215 #[inline]
2216 fn shl_assign(&mut self, rhs: &i64) {
2217 self.shl_assign(*rhs);
2218 }
2219}
2220
2221impl Shr<i64> for I64Vec2 {
2222 type Output = Self;
2223 #[inline]
2224 fn shr(self, rhs: i64) -> Self::Output {
2225 Self {
2226 x: self.x.shr(rhs),
2227 y: self.y.shr(rhs),
2228 }
2229 }
2230}
2231
2232impl Shr<&i64> for I64Vec2 {
2233 type Output = Self;
2234 #[inline]
2235 fn shr(self, rhs: &i64) -> Self {
2236 self.shr(*rhs)
2237 }
2238}
2239
2240impl Shr<&i64> for &I64Vec2 {
2241 type Output = I64Vec2;
2242 #[inline]
2243 fn shr(self, rhs: &i64) -> I64Vec2 {
2244 (*self).shr(*rhs)
2245 }
2246}
2247
2248impl Shr<i64> for &I64Vec2 {
2249 type Output = I64Vec2;
2250 #[inline]
2251 fn shr(self, rhs: i64) -> I64Vec2 {
2252 (*self).shr(rhs)
2253 }
2254}
2255
2256impl ShrAssign<i64> for I64Vec2 {
2257 #[inline]
2258 fn shr_assign(&mut self, rhs: i64) {
2259 *self = self.shr(rhs);
2260 }
2261}
2262
2263impl ShrAssign<&i64> for I64Vec2 {
2264 #[inline]
2265 fn shr_assign(&mut self, rhs: &i64) {
2266 self.shr_assign(*rhs);
2267 }
2268}
2269
2270impl Shl<u8> for I64Vec2 {
2271 type Output = Self;
2272 #[inline]
2273 fn shl(self, rhs: u8) -> Self::Output {
2274 Self {
2275 x: self.x.shl(rhs),
2276 y: self.y.shl(rhs),
2277 }
2278 }
2279}
2280
2281impl Shl<&u8> for I64Vec2 {
2282 type Output = Self;
2283 #[inline]
2284 fn shl(self, rhs: &u8) -> Self {
2285 self.shl(*rhs)
2286 }
2287}
2288
2289impl Shl<&u8> for &I64Vec2 {
2290 type Output = I64Vec2;
2291 #[inline]
2292 fn shl(self, rhs: &u8) -> I64Vec2 {
2293 (*self).shl(*rhs)
2294 }
2295}
2296
2297impl Shl<u8> for &I64Vec2 {
2298 type Output = I64Vec2;
2299 #[inline]
2300 fn shl(self, rhs: u8) -> I64Vec2 {
2301 (*self).shl(rhs)
2302 }
2303}
2304
2305impl ShlAssign<u8> for I64Vec2 {
2306 #[inline]
2307 fn shl_assign(&mut self, rhs: u8) {
2308 *self = self.shl(rhs);
2309 }
2310}
2311
2312impl ShlAssign<&u8> for I64Vec2 {
2313 #[inline]
2314 fn shl_assign(&mut self, rhs: &u8) {
2315 self.shl_assign(*rhs);
2316 }
2317}
2318
2319impl Shr<u8> for I64Vec2 {
2320 type Output = Self;
2321 #[inline]
2322 fn shr(self, rhs: u8) -> Self::Output {
2323 Self {
2324 x: self.x.shr(rhs),
2325 y: self.y.shr(rhs),
2326 }
2327 }
2328}
2329
2330impl Shr<&u8> for I64Vec2 {
2331 type Output = Self;
2332 #[inline]
2333 fn shr(self, rhs: &u8) -> Self {
2334 self.shr(*rhs)
2335 }
2336}
2337
2338impl Shr<&u8> for &I64Vec2 {
2339 type Output = I64Vec2;
2340 #[inline]
2341 fn shr(self, rhs: &u8) -> I64Vec2 {
2342 (*self).shr(*rhs)
2343 }
2344}
2345
2346impl Shr<u8> for &I64Vec2 {
2347 type Output = I64Vec2;
2348 #[inline]
2349 fn shr(self, rhs: u8) -> I64Vec2 {
2350 (*self).shr(rhs)
2351 }
2352}
2353
2354impl ShrAssign<u8> for I64Vec2 {
2355 #[inline]
2356 fn shr_assign(&mut self, rhs: u8) {
2357 *self = self.shr(rhs);
2358 }
2359}
2360
2361impl ShrAssign<&u8> for I64Vec2 {
2362 #[inline]
2363 fn shr_assign(&mut self, rhs: &u8) {
2364 self.shr_assign(*rhs);
2365 }
2366}
2367
2368impl Shl<u16> for I64Vec2 {
2369 type Output = Self;
2370 #[inline]
2371 fn shl(self, rhs: u16) -> Self::Output {
2372 Self {
2373 x: self.x.shl(rhs),
2374 y: self.y.shl(rhs),
2375 }
2376 }
2377}
2378
2379impl Shl<&u16> for I64Vec2 {
2380 type Output = Self;
2381 #[inline]
2382 fn shl(self, rhs: &u16) -> Self {
2383 self.shl(*rhs)
2384 }
2385}
2386
2387impl Shl<&u16> for &I64Vec2 {
2388 type Output = I64Vec2;
2389 #[inline]
2390 fn shl(self, rhs: &u16) -> I64Vec2 {
2391 (*self).shl(*rhs)
2392 }
2393}
2394
2395impl Shl<u16> for &I64Vec2 {
2396 type Output = I64Vec2;
2397 #[inline]
2398 fn shl(self, rhs: u16) -> I64Vec2 {
2399 (*self).shl(rhs)
2400 }
2401}
2402
2403impl ShlAssign<u16> for I64Vec2 {
2404 #[inline]
2405 fn shl_assign(&mut self, rhs: u16) {
2406 *self = self.shl(rhs);
2407 }
2408}
2409
2410impl ShlAssign<&u16> for I64Vec2 {
2411 #[inline]
2412 fn shl_assign(&mut self, rhs: &u16) {
2413 self.shl_assign(*rhs);
2414 }
2415}
2416
2417impl Shr<u16> for I64Vec2 {
2418 type Output = Self;
2419 #[inline]
2420 fn shr(self, rhs: u16) -> Self::Output {
2421 Self {
2422 x: self.x.shr(rhs),
2423 y: self.y.shr(rhs),
2424 }
2425 }
2426}
2427
2428impl Shr<&u16> for I64Vec2 {
2429 type Output = Self;
2430 #[inline]
2431 fn shr(self, rhs: &u16) -> Self {
2432 self.shr(*rhs)
2433 }
2434}
2435
2436impl Shr<&u16> for &I64Vec2 {
2437 type Output = I64Vec2;
2438 #[inline]
2439 fn shr(self, rhs: &u16) -> I64Vec2 {
2440 (*self).shr(*rhs)
2441 }
2442}
2443
2444impl Shr<u16> for &I64Vec2 {
2445 type Output = I64Vec2;
2446 #[inline]
2447 fn shr(self, rhs: u16) -> I64Vec2 {
2448 (*self).shr(rhs)
2449 }
2450}
2451
2452impl ShrAssign<u16> for I64Vec2 {
2453 #[inline]
2454 fn shr_assign(&mut self, rhs: u16) {
2455 *self = self.shr(rhs);
2456 }
2457}
2458
2459impl ShrAssign<&u16> for I64Vec2 {
2460 #[inline]
2461 fn shr_assign(&mut self, rhs: &u16) {
2462 self.shr_assign(*rhs);
2463 }
2464}
2465
2466impl Shl<u32> for I64Vec2 {
2467 type Output = Self;
2468 #[inline]
2469 fn shl(self, rhs: u32) -> Self::Output {
2470 Self {
2471 x: self.x.shl(rhs),
2472 y: self.y.shl(rhs),
2473 }
2474 }
2475}
2476
2477impl Shl<&u32> for I64Vec2 {
2478 type Output = Self;
2479 #[inline]
2480 fn shl(self, rhs: &u32) -> Self {
2481 self.shl(*rhs)
2482 }
2483}
2484
2485impl Shl<&u32> for &I64Vec2 {
2486 type Output = I64Vec2;
2487 #[inline]
2488 fn shl(self, rhs: &u32) -> I64Vec2 {
2489 (*self).shl(*rhs)
2490 }
2491}
2492
2493impl Shl<u32> for &I64Vec2 {
2494 type Output = I64Vec2;
2495 #[inline]
2496 fn shl(self, rhs: u32) -> I64Vec2 {
2497 (*self).shl(rhs)
2498 }
2499}
2500
2501impl ShlAssign<u32> for I64Vec2 {
2502 #[inline]
2503 fn shl_assign(&mut self, rhs: u32) {
2504 *self = self.shl(rhs);
2505 }
2506}
2507
2508impl ShlAssign<&u32> for I64Vec2 {
2509 #[inline]
2510 fn shl_assign(&mut self, rhs: &u32) {
2511 self.shl_assign(*rhs);
2512 }
2513}
2514
2515impl Shr<u32> for I64Vec2 {
2516 type Output = Self;
2517 #[inline]
2518 fn shr(self, rhs: u32) -> Self::Output {
2519 Self {
2520 x: self.x.shr(rhs),
2521 y: self.y.shr(rhs),
2522 }
2523 }
2524}
2525
2526impl Shr<&u32> for I64Vec2 {
2527 type Output = Self;
2528 #[inline]
2529 fn shr(self, rhs: &u32) -> Self {
2530 self.shr(*rhs)
2531 }
2532}
2533
2534impl Shr<&u32> for &I64Vec2 {
2535 type Output = I64Vec2;
2536 #[inline]
2537 fn shr(self, rhs: &u32) -> I64Vec2 {
2538 (*self).shr(*rhs)
2539 }
2540}
2541
2542impl Shr<u32> for &I64Vec2 {
2543 type Output = I64Vec2;
2544 #[inline]
2545 fn shr(self, rhs: u32) -> I64Vec2 {
2546 (*self).shr(rhs)
2547 }
2548}
2549
2550impl ShrAssign<u32> for I64Vec2 {
2551 #[inline]
2552 fn shr_assign(&mut self, rhs: u32) {
2553 *self = self.shr(rhs);
2554 }
2555}
2556
2557impl ShrAssign<&u32> for I64Vec2 {
2558 #[inline]
2559 fn shr_assign(&mut self, rhs: &u32) {
2560 self.shr_assign(*rhs);
2561 }
2562}
2563
2564impl Shl<u64> for I64Vec2 {
2565 type Output = Self;
2566 #[inline]
2567 fn shl(self, rhs: u64) -> Self::Output {
2568 Self {
2569 x: self.x.shl(rhs),
2570 y: self.y.shl(rhs),
2571 }
2572 }
2573}
2574
2575impl Shl<&u64> for I64Vec2 {
2576 type Output = Self;
2577 #[inline]
2578 fn shl(self, rhs: &u64) -> Self {
2579 self.shl(*rhs)
2580 }
2581}
2582
2583impl Shl<&u64> for &I64Vec2 {
2584 type Output = I64Vec2;
2585 #[inline]
2586 fn shl(self, rhs: &u64) -> I64Vec2 {
2587 (*self).shl(*rhs)
2588 }
2589}
2590
2591impl Shl<u64> for &I64Vec2 {
2592 type Output = I64Vec2;
2593 #[inline]
2594 fn shl(self, rhs: u64) -> I64Vec2 {
2595 (*self).shl(rhs)
2596 }
2597}
2598
2599impl ShlAssign<u64> for I64Vec2 {
2600 #[inline]
2601 fn shl_assign(&mut self, rhs: u64) {
2602 *self = self.shl(rhs);
2603 }
2604}
2605
2606impl ShlAssign<&u64> for I64Vec2 {
2607 #[inline]
2608 fn shl_assign(&mut self, rhs: &u64) {
2609 self.shl_assign(*rhs);
2610 }
2611}
2612
2613impl Shr<u64> for I64Vec2 {
2614 type Output = Self;
2615 #[inline]
2616 fn shr(self, rhs: u64) -> Self::Output {
2617 Self {
2618 x: self.x.shr(rhs),
2619 y: self.y.shr(rhs),
2620 }
2621 }
2622}
2623
2624impl Shr<&u64> for I64Vec2 {
2625 type Output = Self;
2626 #[inline]
2627 fn shr(self, rhs: &u64) -> Self {
2628 self.shr(*rhs)
2629 }
2630}
2631
2632impl Shr<&u64> for &I64Vec2 {
2633 type Output = I64Vec2;
2634 #[inline]
2635 fn shr(self, rhs: &u64) -> I64Vec2 {
2636 (*self).shr(*rhs)
2637 }
2638}
2639
2640impl Shr<u64> for &I64Vec2 {
2641 type Output = I64Vec2;
2642 #[inline]
2643 fn shr(self, rhs: u64) -> I64Vec2 {
2644 (*self).shr(rhs)
2645 }
2646}
2647
2648impl ShrAssign<u64> for I64Vec2 {
2649 #[inline]
2650 fn shr_assign(&mut self, rhs: u64) {
2651 *self = self.shr(rhs);
2652 }
2653}
2654
2655impl ShrAssign<&u64> for I64Vec2 {
2656 #[inline]
2657 fn shr_assign(&mut self, rhs: &u64) {
2658 self.shr_assign(*rhs);
2659 }
2660}
2661
2662impl Shl<IVec2> for I64Vec2 {
2663 type Output = Self;
2664 #[inline]
2665 fn shl(self, rhs: IVec2) -> Self {
2666 Self {
2667 x: self.x.shl(rhs.x),
2668 y: self.y.shl(rhs.y),
2669 }
2670 }
2671}
2672
2673impl Shl<&IVec2> for I64Vec2 {
2674 type Output = Self;
2675 #[inline]
2676 fn shl(self, rhs: &IVec2) -> Self {
2677 self.shl(*rhs)
2678 }
2679}
2680
2681impl Shl<&IVec2> for &I64Vec2 {
2682 type Output = I64Vec2;
2683 #[inline]
2684 fn shl(self, rhs: &IVec2) -> I64Vec2 {
2685 (*self).shl(*rhs)
2686 }
2687}
2688
2689impl Shl<IVec2> for &I64Vec2 {
2690 type Output = I64Vec2;
2691 #[inline]
2692 fn shl(self, rhs: IVec2) -> I64Vec2 {
2693 (*self).shl(rhs)
2694 }
2695}
2696
2697impl Shr<IVec2> for I64Vec2 {
2698 type Output = Self;
2699 #[inline]
2700 fn shr(self, rhs: IVec2) -> Self {
2701 Self {
2702 x: self.x.shr(rhs.x),
2703 y: self.y.shr(rhs.y),
2704 }
2705 }
2706}
2707
2708impl Shr<&IVec2> for I64Vec2 {
2709 type Output = Self;
2710 #[inline]
2711 fn shr(self, rhs: &IVec2) -> Self {
2712 self.shr(*rhs)
2713 }
2714}
2715
2716impl Shr<&IVec2> for &I64Vec2 {
2717 type Output = I64Vec2;
2718 #[inline]
2719 fn shr(self, rhs: &IVec2) -> I64Vec2 {
2720 (*self).shr(*rhs)
2721 }
2722}
2723
2724impl Shr<IVec2> for &I64Vec2 {
2725 type Output = I64Vec2;
2726 #[inline]
2727 fn shr(self, rhs: IVec2) -> I64Vec2 {
2728 (*self).shr(rhs)
2729 }
2730}
2731
2732impl Shl<UVec2> for I64Vec2 {
2733 type Output = Self;
2734 #[inline]
2735 fn shl(self, rhs: UVec2) -> Self {
2736 Self {
2737 x: self.x.shl(rhs.x),
2738 y: self.y.shl(rhs.y),
2739 }
2740 }
2741}
2742
2743impl Shl<&UVec2> for I64Vec2 {
2744 type Output = Self;
2745 #[inline]
2746 fn shl(self, rhs: &UVec2) -> Self {
2747 self.shl(*rhs)
2748 }
2749}
2750
2751impl Shl<&UVec2> for &I64Vec2 {
2752 type Output = I64Vec2;
2753 #[inline]
2754 fn shl(self, rhs: &UVec2) -> I64Vec2 {
2755 (*self).shl(*rhs)
2756 }
2757}
2758
2759impl Shl<UVec2> for &I64Vec2 {
2760 type Output = I64Vec2;
2761 #[inline]
2762 fn shl(self, rhs: UVec2) -> I64Vec2 {
2763 (*self).shl(rhs)
2764 }
2765}
2766
2767impl Shr<UVec2> for I64Vec2 {
2768 type Output = Self;
2769 #[inline]
2770 fn shr(self, rhs: UVec2) -> Self {
2771 Self {
2772 x: self.x.shr(rhs.x),
2773 y: self.y.shr(rhs.y),
2774 }
2775 }
2776}
2777
2778impl Shr<&UVec2> for I64Vec2 {
2779 type Output = Self;
2780 #[inline]
2781 fn shr(self, rhs: &UVec2) -> Self {
2782 self.shr(*rhs)
2783 }
2784}
2785
2786impl Shr<&UVec2> for &I64Vec2 {
2787 type Output = I64Vec2;
2788 #[inline]
2789 fn shr(self, rhs: &UVec2) -> I64Vec2 {
2790 (*self).shr(*rhs)
2791 }
2792}
2793
2794impl Shr<UVec2> for &I64Vec2 {
2795 type Output = I64Vec2;
2796 #[inline]
2797 fn shr(self, rhs: UVec2) -> I64Vec2 {
2798 (*self).shr(rhs)
2799 }
2800}
2801
2802impl Index<usize> for I64Vec2 {
2803 type Output = i64;
2804 #[inline]
2805 fn index(&self, index: usize) -> &Self::Output {
2806 match index {
2807 0 => &self.x,
2808 1 => &self.y,
2809 _ => panic!("index out of bounds"),
2810 }
2811 }
2812}
2813
2814impl IndexMut<usize> for I64Vec2 {
2815 #[inline]
2816 fn index_mut(&mut self, index: usize) -> &mut Self::Output {
2817 match index {
2818 0 => &mut self.x,
2819 1 => &mut self.y,
2820 _ => panic!("index out of bounds"),
2821 }
2822 }
2823}
2824
2825impl fmt::Display for I64Vec2 {
2826 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
2827 write!(f, "[{}, {}]", self.x, self.y)
2828 }
2829}
2830
2831impl fmt::Debug for I64Vec2 {
2832 fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
2833 fmt.debug_tuple(stringify!(I64Vec2))
2834 .field(&self.x)
2835 .field(&self.y)
2836 .finish()
2837 }
2838}
2839
2840impl From<[i64; 2]> for I64Vec2 {
2841 #[inline]
2842 fn from(a: [i64; 2]) -> Self {
2843 Self::new(a[0], a[1])
2844 }
2845}
2846
2847impl From<I64Vec2> for [i64; 2] {
2848 #[inline]
2849 fn from(v: I64Vec2) -> Self {
2850 [v.x, v.y]
2851 }
2852}
2853
2854impl From<(i64, i64)> for I64Vec2 {
2855 #[inline]
2856 fn from(t: (i64, i64)) -> Self {
2857 Self::new(t.0, t.1)
2858 }
2859}
2860
2861impl From<I64Vec2> for (i64, i64) {
2862 #[inline]
2863 fn from(v: I64Vec2) -> Self {
2864 (v.x, v.y)
2865 }
2866}
2867
2868impl From<I8Vec2> for I64Vec2 {
2869 #[inline]
2870 fn from(v: I8Vec2) -> Self {
2871 Self::new(i64::from(v.x), i64::from(v.y))
2872 }
2873}
2874
2875impl From<U8Vec2> for I64Vec2 {
2876 #[inline]
2877 fn from(v: U8Vec2) -> Self {
2878 Self::new(i64::from(v.x), i64::from(v.y))
2879 }
2880}
2881
2882impl From<I16Vec2> for I64Vec2 {
2883 #[inline]
2884 fn from(v: I16Vec2) -> Self {
2885 Self::new(i64::from(v.x), i64::from(v.y))
2886 }
2887}
2888
2889impl From<U16Vec2> for I64Vec2 {
2890 #[inline]
2891 fn from(v: U16Vec2) -> Self {
2892 Self::new(i64::from(v.x), i64::from(v.y))
2893 }
2894}
2895
2896impl From<IVec2> for I64Vec2 {
2897 #[inline]
2898 fn from(v: IVec2) -> Self {
2899 Self::new(i64::from(v.x), i64::from(v.y))
2900 }
2901}
2902
2903impl From<UVec2> for I64Vec2 {
2904 #[inline]
2905 fn from(v: UVec2) -> Self {
2906 Self::new(i64::from(v.x), i64::from(v.y))
2907 }
2908}
2909
2910impl TryFrom<U64Vec2> for I64Vec2 {
2911 type Error = core::num::TryFromIntError;
2912
2913 #[inline]
2914 fn try_from(v: U64Vec2) -> Result<Self, Self::Error> {
2915 Ok(Self::new(i64::try_from(v.x)?, i64::try_from(v.y)?))
2916 }
2917}
2918
2919impl TryFrom<USizeVec2> for I64Vec2 {
2920 type Error = core::num::TryFromIntError;
2921
2922 #[inline]
2923 fn try_from(v: USizeVec2) -> Result<Self, Self::Error> {
2924 Ok(Self::new(i64::try_from(v.x)?, i64::try_from(v.y)?))
2925 }
2926}
2927
2928impl From<BVec2> for I64Vec2 {
2929 #[inline]
2930 fn from(v: BVec2) -> Self {
2931 Self::new(i64::from(v.x), i64::from(v.y))
2932 }
2933}