rustc_codegen_spirv/builder/
intrinsics.rs

1// HACK(eddyb) avoids rewriting all of the imports (see `lib.rs` and `build.rs`).
2use crate::maybe_pqp_cg_ssa as rustc_codegen_ssa;
3
4use super::Builder;
5use crate::abi::ConvSpirvType;
6use crate::builder_spirv::{SpirvValue, SpirvValueExt};
7use crate::codegen_cx::CodegenCx;
8use crate::custom_insts::CustomInst;
9use crate::spirv_type::SpirvType;
10use rspirv::dr::Operand;
11use rspirv::spirv::GLOp;
12use rustc_codegen_ssa::mir::operand::{OperandRef, OperandValue};
13use rustc_codegen_ssa::mir::place::PlaceRef;
14use rustc_codegen_ssa::traits::{BuilderMethods, IntrinsicCallBuilderMethods};
15use rustc_middle::ty::layout::LayoutOf;
16use rustc_middle::ty::{FnDef, Instance, Ty, TyKind, TypingEnv};
17use rustc_middle::{bug, ty};
18use rustc_span::Span;
19use rustc_span::sym;
20
21fn int_type_width_signed(ty: Ty<'_>, cx: &CodegenCx<'_>) -> Option<(u64, bool)> {
22    match ty.kind() {
23        TyKind::Int(t) => Some((
24            t.bit_width()
25                .unwrap_or(cx.tcx.sess.target.pointer_width as u64),
26            true,
27        )),
28        TyKind::Uint(t) => Some((
29            t.bit_width()
30                .unwrap_or(cx.tcx.sess.target.pointer_width as u64),
31            false,
32        )),
33        _ => None,
34    }
35}
36
37impl Builder<'_, '_> {
38    pub fn copysign(&mut self, val: SpirvValue, sign: SpirvValue) -> SpirvValue {
39        let width = match self.lookup_type(val.ty) {
40            SpirvType::Float(width) => width,
41            other => bug!(
42                "copysign must have float argument, not {}",
43                other.debug(val.ty, self)
44            ),
45        };
46        let int_ty = SpirvType::Integer(width, false).def(self.span(), self);
47        let [mask_sign, mask_value] = {
48            let sign_bit = 1u128.checked_shl(width - 1).unwrap();
49            let value_mask = sign_bit - 1;
50            [sign_bit, value_mask].map(|v| self.constant_int(int_ty, v))
51        };
52        let val_bits = self.bitcast(val, int_ty);
53        let sign_bits = self.bitcast(sign, int_ty);
54        let val_masked = self.and(val_bits, mask_value);
55        let sign_masked = self.and(sign_bits, mask_sign);
56        let result_bits = self.or(val_masked, sign_masked);
57        self.bitcast(result_bits, val.ty)
58    }
59}
60
61impl<'a, 'tcx> IntrinsicCallBuilderMethods<'tcx> for Builder<'a, 'tcx> {
62    fn codegen_intrinsic_call(
63        &mut self,
64        instance: Instance<'tcx>,
65        args: &[OperandRef<'tcx, Self::Value>],
66        result: PlaceRef<'tcx, Self::Value>,
67        _span: Span,
68    ) -> Result<(), ty::Instance<'tcx>> {
69        let callee_ty = instance.ty(self.tcx, TypingEnv::fully_monomorphized());
70
71        let (def_id, fn_args) = match *callee_ty.kind() {
72            FnDef(def_id, fn_args) => (def_id, fn_args),
73            _ => bug!("expected fn item type, found {}", callee_ty),
74        };
75
76        let sig = callee_ty.fn_sig(self.tcx);
77        let sig = self
78            .tcx
79            .normalize_erasing_late_bound_regions(TypingEnv::fully_monomorphized(), sig);
80        let arg_tys = sig.inputs();
81        let name = self.tcx.item_name(def_id);
82
83        let ret_ty = self.layout_of(sig.output()).spirv_type(self.span(), self);
84
85        let value = match name {
86            sym::likely | sym::unlikely => {
87                // Ignore these for now.
88                args[0].immediate()
89            }
90
91            sym::breakpoint => {
92                self.abort();
93                assert!(result.layout.ty.is_unit());
94                return Ok(());
95            }
96
97            sym::volatile_load | sym::unaligned_volatile_load => {
98                let ptr = args[0].immediate();
99                let layout = self.layout_of(fn_args.type_at(0));
100                let load = self.volatile_load(layout.spirv_type(self.span(), self), ptr);
101                if !result.layout.is_zst() {
102                    self.store(load, result.val.llval, result.val.align);
103                }
104                return Ok(());
105            }
106
107            sym::prefetch_read_data
108            | sym::prefetch_write_data
109            | sym::prefetch_read_instruction
110            | sym::prefetch_write_instruction => {
111                // ignore
112                assert!(result.layout.ty.is_unit());
113                return Ok(());
114            }
115
116            sym::saturating_add => {
117                assert_eq!(arg_tys[0], arg_tys[1]);
118                let result = match arg_tys[0].kind() {
119                    TyKind::Int(_) | TyKind::Uint(_) => {
120                        self.add(args[0].immediate(), args[1].immediate())
121                    }
122                    TyKind::Float(_) => self.fadd(args[0].immediate(), args[1].immediate()),
123                    other => self.fatal(format!(
124                        "Unimplemented saturating_add intrinsic type: {other:#?}"
125                    )),
126                };
127                // TODO: Implement this
128                self.zombie(result.def(self), "saturating_add is not implemented yet");
129                result
130            }
131            sym::saturating_sub => {
132                assert_eq!(arg_tys[0], arg_tys[1]);
133                let result = match &arg_tys[0].kind() {
134                    TyKind::Int(_) | TyKind::Uint(_) => {
135                        self.sub(args[0].immediate(), args[1].immediate())
136                    }
137                    TyKind::Float(_) => self.fsub(args[0].immediate(), args[1].immediate()),
138                    other => self.fatal(format!(
139                        "Unimplemented saturating_sub intrinsic type: {other:#?}"
140                    )),
141                };
142                // TODO: Implement this
143                self.zombie(result.def(self), "saturating_sub is not implemented yet");
144                result
145            }
146
147            sym::sqrtf32 | sym::sqrtf64 | sym::sqrtf128 => {
148                self.gl_op(GLOp::Sqrt, ret_ty, [args[0].immediate()])
149            }
150            sym::powif32 | sym::powif64 | sym::powif128 => {
151                let float = self.sitofp(args[1].immediate(), args[0].immediate().ty);
152                self.gl_op(GLOp::Pow, ret_ty, [args[0].immediate(), float])
153            }
154            sym::sinf32 | sym::sinf64 | sym::sinf128 => {
155                self.gl_op(GLOp::Sin, ret_ty, [args[0].immediate()])
156            }
157            sym::cosf32 | sym::cosf64 | sym::cosf128 => {
158                self.gl_op(GLOp::Cos, ret_ty, [args[0].immediate()])
159            }
160            sym::powf32 | sym::powf64 | sym::powf128 => self.gl_op(
161                GLOp::Pow,
162                ret_ty,
163                [args[0].immediate(), args[1].immediate()],
164            ),
165            sym::expf32 | sym::expf64 | sym::expf128 => {
166                self.gl_op(GLOp::Exp, ret_ty, [args[0].immediate()])
167            }
168            sym::exp2f32 | sym::exp2f64 | sym::exp2f128 => {
169                self.gl_op(GLOp::Exp2, ret_ty, [args[0].immediate()])
170            }
171            sym::logf32 | sym::logf64 | sym::logf128 => {
172                self.gl_op(GLOp::Log, ret_ty, [args[0].immediate()])
173            }
174            sym::log2f32 | sym::log2f64 | sym::log2f128 => {
175                self.gl_op(GLOp::Log2, ret_ty, [args[0].immediate()])
176            }
177            sym::log10f32 | sym::log10f64 | sym::log10f128 => {
178                // spir-v glsl doesn't have log10, so,
179                // log10(x) == (1 / ln(10)) * ln(x)
180                let mul = self.constant_float(args[0].immediate().ty, 1.0 / 10.0f64.ln());
181                let ln = self.gl_op(GLOp::Log, ret_ty, [args[0].immediate()]);
182                self.fmul(mul, ln)
183            }
184            sym::fmaf32 | sym::fmaf64 | sym::fmaf128 => self.gl_op(
185                GLOp::Fma,
186                ret_ty,
187                [
188                    args[0].immediate(),
189                    args[1].immediate(),
190                    args[2].immediate(),
191                ],
192            ),
193            sym::fabsf32 | sym::fabsf64 | sym::fabsf128 => {
194                self.gl_op(GLOp::FAbs, ret_ty, [args[0].immediate()])
195            }
196            sym::minnumf32 | sym::minnumf64 | sym::minnumf128 => self.gl_op(
197                GLOp::FMin,
198                ret_ty,
199                [args[0].immediate(), args[1].immediate()],
200            ),
201            sym::maxnumf32 | sym::maxnumf64 | sym::maxnumf128 => self.gl_op(
202                GLOp::FMax,
203                ret_ty,
204                [args[0].immediate(), args[1].immediate()],
205            ),
206            sym::copysignf32 | sym::copysignf64 | sym::copysignf128 => {
207                let val = args[0].immediate();
208                let sign = args[1].immediate();
209                self.copysign(val, sign)
210            }
211            sym::floorf32 | sym::floorf64 | sym::floorf128 => {
212                self.gl_op(GLOp::Floor, ret_ty, [args[0].immediate()])
213            }
214            sym::ceilf32 | sym::ceilf64 | sym::ceilf128 => {
215                self.gl_op(GLOp::Ceil, ret_ty, [args[0].immediate()])
216            }
217            sym::truncf32 | sym::truncf64 | sym::truncf128 => {
218                self.gl_op(GLOp::Trunc, ret_ty, [args[0].immediate()])
219            }
220            sym::round_ties_even_f32 | sym::round_ties_even_f64 | sym::round_ties_even_f128 => {
221                self.gl_op(GLOp::RoundEven, ret_ty, [args[0].immediate()])
222            }
223            sym::roundf32 | sym::roundf64 | sym::roundf128 => {
224                self.gl_op(GLOp::Round, ret_ty, [args[0].immediate()])
225            }
226
227            sym::rotate_left | sym::rotate_right => {
228                let is_left = name == sym::rotate_left;
229                let val = args[0].immediate();
230                let shift = args[1].immediate();
231                self.rotate(val, shift, is_left)
232            }
233
234            sym::ctlz => self.count_leading_trailing_zeros(args[0].immediate(), false, false),
235            sym::ctlz_nonzero => {
236                self.count_leading_trailing_zeros(args[0].immediate(), false, true)
237            }
238            sym::cttz => self.count_leading_trailing_zeros(args[0].immediate(), true, false),
239            sym::cttz_nonzero => self.count_leading_trailing_zeros(args[0].immediate(), true, true),
240
241            sym::ctpop => self.count_ones(args[0].immediate()),
242            sym::bitreverse => self.bit_reverse(args[0].immediate()),
243            sym::black_box => {
244                // TODO(LegNeato): do something more sophisticated that prevents DCE
245                self.tcx
246                    .dcx()
247                    .warn("black_box intrinsic does not prevent optimization in Rust GPU");
248
249                let layout = self.layout_of(arg_tys[0]);
250                let llty = layout.spirv_type(self.span(), self);
251
252                match args[0].val {
253                    // Scalars pass through unchanged
254                    OperandValue::Immediate(v) => v,
255                    // Pack scalar pairs to a single SSA aggregate
256                    OperandValue::Pair(..) => args[0].immediate_or_packed_pair(self),
257                    // Lvalues get loaded
258                    OperandValue::Ref(place) => self.load(llty, place.llval, place.align),
259                    // ZSTs become undef of the right type
260                    OperandValue::ZeroSized => self.undef(llty),
261                }
262            }
263            sym::bswap => {
264                // https://github.com/KhronosGroup/SPIRV-LLVM/pull/221/files
265                // TODO: Definitely add tests to make sure this impl is right.
266                let arg = args[0].immediate();
267                let (width, is_signed) = int_type_width_signed(arg_tys[0], self)
268                    .expect("bswap must have an integer argument");
269
270                // Cast to unsigned type for byte-swapping
271                let unsigned_ty: u32 =
272                    SpirvType::Integer(width.try_into().unwrap(), false).def(self.span(), self);
273                let unsigned_arg = if is_signed {
274                    self.bitcast(arg, unsigned_ty)
275                } else {
276                    arg
277                };
278
279                let swapped = match width {
280                    8 => unsigned_arg,
281                    16 => {
282                        let offset8 = self.constant_u16(self.span(), 8);
283                        let tmp1 = self.shl(unsigned_arg, offset8);
284                        let tmp2 = self.lshr(unsigned_arg, offset8);
285                        self.or(tmp1, tmp2)
286                    }
287                    32 => {
288                        let offset8 = self.constant_u32(self.span(), 8);
289                        let offset24 = self.constant_u32(self.span(), 24);
290                        let mask16 = self.constant_u32(self.span(), 0xFF00);
291                        let mask24 = self.constant_u32(self.span(), 0xFF0000);
292                        let tmp4 = self.shl(unsigned_arg, offset24);
293                        let tmp3 = self.shl(unsigned_arg, offset8);
294                        let tmp2 = self.lshr(unsigned_arg, offset8);
295                        let tmp1 = self.lshr(unsigned_arg, offset24);
296                        let tmp3 = self.and(tmp3, mask24);
297                        let tmp2 = self.and(tmp2, mask16);
298                        let res1 = self.or(tmp1, tmp2);
299                        let res2 = self.or(tmp3, tmp4);
300                        self.or(res1, res2)
301                    }
302                    64 => {
303                        let offset8 = self.constant_u64(self.span(), 8);
304                        let offset24 = self.constant_u64(self.span(), 24);
305                        let offset40 = self.constant_u64(self.span(), 40);
306                        let offset56 = self.constant_u64(self.span(), 56);
307                        let mask16 = self.constant_u64(self.span(), 0xff00);
308                        let mask24 = self.constant_u64(self.span(), 0xff0000);
309                        let mask32 = self.constant_u64(self.span(), 0xff000000);
310                        let mask40 = self.constant_u64(self.span(), 0xff00000000);
311                        let mask48 = self.constant_u64(self.span(), 0xff0000000000);
312                        let mask56 = self.constant_u64(self.span(), 0xff000000000000);
313                        let tmp8 = self.shl(unsigned_arg, offset56);
314                        let tmp7 = self.shl(unsigned_arg, offset40);
315                        let tmp6 = self.shl(unsigned_arg, offset24);
316                        let tmp5 = self.shl(unsigned_arg, offset8);
317                        let tmp4 = self.lshr(unsigned_arg, offset8);
318                        let tmp3 = self.lshr(unsigned_arg, offset24);
319                        let tmp2 = self.lshr(unsigned_arg, offset40);
320                        let tmp1 = self.lshr(unsigned_arg, offset56);
321                        let tmp7 = self.and(tmp7, mask56);
322                        let tmp6 = self.and(tmp6, mask48);
323                        let tmp5 = self.and(tmp5, mask40);
324                        let tmp4 = self.and(tmp4, mask32);
325                        let tmp3 = self.and(tmp3, mask24);
326                        let tmp2 = self.and(tmp2, mask16);
327                        let res1 = self.or(tmp8, tmp7);
328                        let res2 = self.or(tmp6, tmp5);
329                        let res3 = self.or(tmp4, tmp3);
330                        let res4 = self.or(tmp2, tmp1);
331                        let res1 = self.or(res1, res2);
332                        let res3 = self.or(res3, res4);
333                        self.or(res1, res3)
334                    }
335                    other => self.undef_zombie(
336                        ret_ty,
337                        &format!("bswap not implemented for int width {other}"),
338                    ),
339                };
340
341                // Cast back to the original signed type if necessary
342                if is_signed {
343                    self.bitcast(swapped, arg.ty)
344                } else {
345                    swapped
346                }
347            }
348
349            sym::compare_bytes => self.undef_zombie(ret_ty, "memcmp not implemented"),
350
351            _ => {
352                // Call the fallback body instead of generating the intrinsic code
353                return Err(ty::Instance::new_raw(instance.def_id(), instance.args));
354            }
355        };
356
357        if result.layout.ty.is_bool() {
358            let val = self.from_immediate(value);
359            self.store_to_place(val, result.val);
360        } else if !result.layout.ty.is_unit() {
361            // FIXME(eddyb) upstream uses `self.store_to_place(value, result.val);`,
362            // which AFAICT does not handle packed pairs explicitly, meaning it
363            // can/will store e.g. LLVM `{A, B}` values, which is legal (in LLVM),
364            // but seems suboptimal (or even risky with e.g. layout randomization).
365            OperandRef::from_immediate_or_packed_pair(self, value, result.layout)
366                .val
367                .store(self, result);
368        }
369        Ok(())
370    }
371
372    fn abort(&mut self) {
373        self.abort_with_kind_and_message_debug_printf("abort", "intrinsics::abort() called", []);
374    }
375
376    // FIXME(eddyb) `assume` is not implemented atm, so all of its forms should
377    // avoid computing its (potentially illegal) bool input in the first place.
378    fn assume(&mut self, _val: Self::Value) {}
379
380    fn expect(&mut self, cond: Self::Value, _expected: bool) -> Self::Value {
381        // TODO: llvm.expect
382        cond
383    }
384
385    fn type_checked_load(
386        &mut self,
387        _llvtable: Self::Value,
388        _vtable_byte_offset: u64,
389        _typeid: Self::Metadata,
390    ) -> Self::Value {
391        todo!()
392    }
393
394    fn va_start(&mut self, _val: Self::Value) -> Self::Value {
395        todo!()
396    }
397
398    fn va_end(&mut self, _val: Self::Value) -> Self::Value {
399        todo!()
400    }
401}
402
403impl Builder<'_, '_> {
404    pub fn count_ones(&mut self, arg: SpirvValue) -> SpirvValue {
405        let ty = arg.ty;
406        match self.cx.lookup_type(ty) {
407            SpirvType::Integer(bits, false) => {
408                let u32 = SpirvType::Integer(32, false).def(self.span(), self);
409
410                match bits {
411                    8 | 16 => {
412                        let arg = arg.def(self);
413                        let arg = self.emit().u_convert(u32, None, arg).unwrap();
414                        self.emit().bit_count(u32, None, arg).unwrap()
415                    }
416                    32 => self.emit().bit_count(u32, None, arg.def(self)).unwrap(),
417                    64 => {
418                        let u32_32 = self.constant_u32(self.span(), 32).def(self);
419                        let arg = arg.def(self);
420                        let lower = self.emit().u_convert(u32, None, arg).unwrap();
421                        let higher = self
422                            .emit()
423                            .shift_right_logical(ty, None, arg, u32_32)
424                            .unwrap();
425                        let higher = self.emit().u_convert(u32, None, higher).unwrap();
426
427                        let lower_bits = self.emit().bit_count(u32, None, lower).unwrap();
428                        let higher_bits = self.emit().bit_count(u32, None, higher).unwrap();
429                        self.emit()
430                            .i_add(u32, None, lower_bits, higher_bits)
431                            .unwrap()
432                    }
433                    _ => {
434                        return self.undef_zombie(
435                            ty,
436                            &format!("count_ones() on unsupported {ty:?} bit integer type"),
437                        );
438                    }
439                }
440                .with_type(u32)
441            }
442            _ => self.fatal(format!(
443                "count_ones() expected an unsigned integer type, got {:?}",
444                self.cx.lookup_type(ty)
445            )),
446        }
447    }
448
449    pub fn bit_reverse(&mut self, arg: SpirvValue) -> SpirvValue {
450        let ty = arg.ty;
451        match self.cx.lookup_type(ty) {
452            SpirvType::Integer(bits, false) => {
453                let u32 = SpirvType::Integer(32, false).def(self.span(), self);
454                let uint = SpirvType::Integer(bits, false).def(self.span(), self);
455
456                match bits {
457                    8 | 16 => {
458                        let arg = arg.def(self);
459                        let arg = self.emit().u_convert(u32, None, arg).unwrap();
460
461                        let reverse = self.emit().bit_reverse(u32, None, arg).unwrap();
462                        let shift = self.constant_u32(self.span(), 32 - bits).def(self);
463                        let reverse = self
464                            .emit()
465                            .shift_right_logical(u32, None, reverse, shift)
466                            .unwrap();
467                        self.emit().u_convert(uint, None, reverse).unwrap()
468                    }
469                    32 => self.emit().bit_reverse(u32, None, arg.def(self)).unwrap(),
470                    64 => {
471                        let u32_32 = self.constant_u32(self.span(), 32).def(self);
472                        let arg = arg.def(self);
473                        let lower = self.emit().u_convert(u32, None, arg).unwrap();
474                        let higher = self
475                            .emit()
476                            .shift_right_logical(ty, None, arg, u32_32)
477                            .unwrap();
478                        let higher = self.emit().u_convert(u32, None, higher).unwrap();
479
480                        // note that higher and lower have swapped
481                        let higher_bits = self.emit().bit_reverse(u32, None, lower).unwrap();
482                        let lower_bits = self.emit().bit_reverse(u32, None, higher).unwrap();
483
484                        let higher_bits = self.emit().u_convert(uint, None, higher_bits).unwrap();
485                        let higher_bits = self
486                            .emit()
487                            .shift_left_logical(uint, None, higher_bits, u32_32)
488                            .unwrap();
489                        let lower_bits = self.emit().u_convert(uint, None, lower_bits).unwrap();
490
491                        self.emit()
492                            .bitwise_or(ty, None, lower_bits, higher_bits)
493                            .unwrap()
494                    }
495                    _ => {
496                        return self.undef_zombie(
497                            ty,
498                            &format!("bit_reverse() on unsupported {ty:?} bit integer type"),
499                        );
500                    }
501                }
502                .with_type(ty)
503            }
504            _ => self.fatal(format!(
505                "bit_reverse() expected an unsigned integer type, got {:?}",
506                self.cx.lookup_type(ty)
507            )),
508        }
509    }
510
511    pub fn count_leading_trailing_zeros(
512        &mut self,
513        arg: SpirvValue,
514        trailing: bool,
515        non_zero: bool,
516    ) -> SpirvValue {
517        let ty = arg.ty;
518        match self.cx.lookup_type(ty) {
519            SpirvType::Integer(bits, false) => {
520                let bool = SpirvType::Bool.def(self.span(), self);
521                let u32 = SpirvType::Integer(32, false).def(self.span(), self);
522
523                let glsl = self.ext_inst.borrow_mut().import_glsl(self);
524                let find_xsb = |this: &mut Self, arg, offset: i32| {
525                    if trailing {
526                        let lsb = this
527                            .emit()
528                            .ext_inst(
529                                u32,
530                                None,
531                                glsl,
532                                GLOp::FindILsb as u32,
533                                [Operand::IdRef(arg)],
534                            )
535                            .unwrap();
536                        if offset == 0 {
537                            lsb
538                        } else {
539                            let const_offset = this.constant_i32(this.span(), offset).def(this);
540                            this.emit().i_add(u32, None, const_offset, lsb).unwrap()
541                        }
542                    } else {
543                        // rust is always unsigned, so FindUMsb
544                        let msb_bit = this
545                            .emit()
546                            .ext_inst(
547                                u32,
548                                None,
549                                glsl,
550                                GLOp::FindUMsb as u32,
551                                [Operand::IdRef(arg)],
552                            )
553                            .unwrap();
554                        // the glsl op returns the Msb bit, not the amount of leading zeros of this u32
555                        // leading zeros = 31 - Msb bit
556                        let const_offset = this.constant_i32(this.span(), 31 - offset).def(this);
557                        this.emit().i_sub(u32, None, const_offset, msb_bit).unwrap()
558                    }
559                };
560
561                let converted = match bits {
562                    8 | 16 => {
563                        let arg = self.emit().u_convert(u32, None, arg.def(self)).unwrap();
564                        if trailing {
565                            find_xsb(self, arg, 0)
566                        } else {
567                            find_xsb(self, arg, bits as i32 - 32)
568                        }
569                    }
570                    32 => find_xsb(self, arg.def(self), 0),
571                    64 => {
572                        let u32_0 = self.constant_int(u32, 0).def(self);
573                        let u32_32 = self.constant_u32(self.span(), 32).def(self);
574
575                        let arg = arg.def(self);
576                        let lower = self.emit().u_convert(u32, None, arg).unwrap();
577                        let higher = self
578                            .emit()
579                            .shift_right_logical(ty, None, arg, u32_32)
580                            .unwrap();
581                        let higher = self.emit().u_convert(u32, None, higher).unwrap();
582
583                        if trailing {
584                            let use_lower = self.emit().i_equal(bool, None, lower, u32_0).unwrap();
585                            let lower_bits = find_xsb(self, lower, 32);
586                            let higher_bits = find_xsb(self, higher, 0);
587                            self.emit()
588                                .select(u32, None, use_lower, higher_bits, lower_bits)
589                                .unwrap()
590                        } else {
591                            let use_higher =
592                                self.emit().i_equal(bool, None, higher, u32_0).unwrap();
593                            let lower_bits = find_xsb(self, lower, 0);
594                            let higher_bits = find_xsb(self, higher, 32);
595                            self.emit()
596                                .select(u32, None, use_higher, lower_bits, higher_bits)
597                                .unwrap()
598                        }
599                    }
600                    _ => {
601                        return self.undef_zombie(ty, &format!(
602                            "count_leading_trailing_zeros() on unsupported {ty:?} bit integer type"
603                        ));
604                    }
605                };
606
607                if non_zero {
608                    converted
609                } else {
610                    let int_0 = self.constant_int(ty, 0).def(self);
611                    let int_bits = self.constant_int(u32, bits as u128).def(self);
612                    let is_0 = self
613                        .emit()
614                        .i_equal(bool, None, arg.def(self), int_0)
615                        .unwrap();
616                    self.emit()
617                        .select(u32, None, is_0, int_bits, converted)
618                        .unwrap()
619                }
620                .with_type(u32)
621            }
622            SpirvType::Integer(bits, true) => {
623                // rustc wants `[i8,i16,i32,i64]::leading_zeros()` with `non_zero: true` for some reason. I do not know
624                // how these are reachable, marking them as zombies makes none of our compiletests fail.
625                let unsigned = SpirvType::Integer(bits, false).def(self.span(), self);
626                let arg = self
627                    .emit()
628                    .bitcast(unsigned, None, arg.def(self))
629                    .unwrap()
630                    .with_type(unsigned);
631                let result = self.count_leading_trailing_zeros(arg, trailing, non_zero);
632                self.emit()
633                    .bitcast(ty, None, result.def(self))
634                    .unwrap()
635                    .with_type(ty)
636            }
637            e => {
638                self.fatal(format!(
639                    "count_leading_trailing_zeros(trailing: {trailing}, non_zero: {non_zero}) expected an integer type, got {e:?}",
640                ));
641            }
642        }
643    }
644
645    pub fn abort_with_kind_and_message_debug_printf(
646        &mut self,
647        kind: &str,
648        message_debug_printf_fmt_str: impl Into<String>,
649        message_debug_printf_args: impl IntoIterator<Item = SpirvValue>,
650    ) {
651        // FIXME(eddyb) this should be cached more efficiently.
652        let void_ty = SpirvType::Void.def(rustc_span::DUMMY_SP, self);
653
654        // HACK(eddyb) there is no `abort` or `trap` instruction in SPIR-V,
655        // so the best thing we can do is use our own custom instruction.
656        let kind_id = self.emit().string(kind);
657        let message_debug_printf_fmt_str_id = self.emit().string(message_debug_printf_fmt_str);
658        self.custom_inst(
659            void_ty,
660            CustomInst::Abort {
661                kind: Operand::IdRef(kind_id),
662                message_debug_printf: [message_debug_printf_fmt_str_id]
663                    .into_iter()
664                    .chain(
665                        message_debug_printf_args
666                            .into_iter()
667                            .map(|arg| arg.def(self)),
668                    )
669                    .map(Operand::IdRef)
670                    .collect(),
671            },
672        );
673        self.unreachable();
674
675        // HACK(eddyb) we still need an active block in case the user of this
676        // `Builder` will continue to emit instructions after the `.abort()`.
677        let post_abort_dead_bb = self.append_sibling_block("post_abort_dead");
678        self.switch_to_block(post_abort_dead_bb);
679    }
680}