rustc_codegen_spirv/codegen_cx/
constant.rs

1// HACK(eddyb) avoids rewriting all of the imports (see `lib.rs` and `build.rs`).
2use crate::maybe_pqp_cg_ssa as rustc_codegen_ssa;
3
4use super::CodegenCx;
5use crate::abi::ConvSpirvType;
6use crate::builder_spirv::{SpirvConst, SpirvValue, SpirvValueExt, SpirvValueKind};
7use crate::spirv_type::SpirvType;
8use itertools::Itertools as _;
9use rspirv::spirv::Word;
10use rustc_abi::{self as abi, AddressSpace, Float, HasDataLayout, Integer, Primitive, Size};
11use rustc_codegen_ssa::traits::{ConstCodegenMethods, MiscCodegenMethods, StaticCodegenMethods};
12use rustc_middle::mir::interpret::{AllocError, ConstAllocation, GlobalAlloc, Scalar, alloc_range};
13use rustc_middle::ty::layout::LayoutOf;
14use rustc_span::{DUMMY_SP, Span};
15
16impl<'tcx> CodegenCx<'tcx> {
17    pub fn def_constant(&self, ty: Word, val: SpirvConst<'_, 'tcx>) -> SpirvValue {
18        self.builder.def_constant_cx(ty, val, self)
19    }
20
21    pub fn constant_u8(&self, span: Span, val: u8) -> SpirvValue {
22        self.constant_int_from_native_unsigned(span, val)
23    }
24
25    pub fn constant_i8(&self, span: Span, val: i8) -> SpirvValue {
26        self.constant_int_from_native_signed(span, val)
27    }
28
29    pub fn constant_i16(&self, span: Span, val: i16) -> SpirvValue {
30        self.constant_int_from_native_signed(span, val)
31    }
32
33    pub fn constant_u16(&self, span: Span, val: u16) -> SpirvValue {
34        self.constant_int_from_native_unsigned(span, val)
35    }
36
37    pub fn constant_i32(&self, span: Span, val: i32) -> SpirvValue {
38        self.constant_int_from_native_signed(span, val)
39    }
40
41    pub fn constant_u32(&self, span: Span, val: u32) -> SpirvValue {
42        self.constant_int_from_native_unsigned(span, val)
43    }
44
45    pub fn constant_i64(&self, span: Span, val: i64) -> SpirvValue {
46        self.constant_int_from_native_signed(span, val)
47    }
48
49    pub fn constant_u64(&self, span: Span, val: u64) -> SpirvValue {
50        self.constant_int_from_native_unsigned(span, val)
51    }
52
53    fn constant_int_from_native_unsigned(&self, span: Span, val: impl Into<u128>) -> SpirvValue {
54        let size = Size::from_bytes(std::mem::size_of_val(&val));
55        let ty = SpirvType::Integer(size.bits() as u32, false).def(span, self);
56        self.constant_int(ty, val.into())
57    }
58
59    fn constant_int_from_native_signed(&self, span: Span, val: impl Into<i128>) -> SpirvValue {
60        let size = Size::from_bytes(std::mem::size_of_val(&val));
61        let ty = SpirvType::Integer(size.bits() as u32, true).def(span, self);
62        self.constant_int(ty, val.into() as u128)
63    }
64
65    pub fn constant_int(&self, ty: Word, val: u128) -> SpirvValue {
66        self.def_constant(ty, SpirvConst::Scalar(val))
67    }
68
69    pub fn constant_f32(&self, span: Span, val: f32) -> SpirvValue {
70        let ty = SpirvType::Float(32).def(span, self);
71        self.def_constant(ty, SpirvConst::Scalar(val.to_bits().into()))
72    }
73
74    pub fn constant_f64(&self, span: Span, val: f64) -> SpirvValue {
75        let ty = SpirvType::Float(64).def(span, self);
76        self.def_constant(ty, SpirvConst::Scalar(val.to_bits().into()))
77    }
78
79    pub fn constant_float(&self, ty: Word, val: f64) -> SpirvValue {
80        match self.lookup_type(ty) {
81            // FIXME(eddyb) use `rustc_apfloat` to support all float sizes.
82            SpirvType::Float(32) => {
83                self.def_constant(ty, SpirvConst::Scalar((val as f32).to_bits().into()))
84            }
85            SpirvType::Float(64) => self.def_constant(ty, SpirvConst::Scalar(val.to_bits().into())),
86            other => self.tcx.dcx().fatal(format!(
87                "constant_float does not support type {}",
88                other.debug(ty, self)
89            )),
90        }
91    }
92
93    pub fn constant_bool(&self, span: Span, val: bool) -> SpirvValue {
94        let ty = SpirvType::Bool.def(span, self);
95        self.def_constant(ty, SpirvConst::Scalar(val as u128))
96    }
97
98    pub fn constant_composite(&self, ty: Word, fields: impl Iterator<Item = Word>) -> SpirvValue {
99        // FIXME(eddyb) use `AccumulateVec`s just like `rustc` itself does.
100        self.def_constant(ty, SpirvConst::Composite(&fields.collect::<Vec<_>>()))
101    }
102
103    pub fn constant_null(&self, ty: Word) -> SpirvValue {
104        self.def_constant(ty, SpirvConst::Null)
105    }
106
107    pub fn undef(&self, ty: Word) -> SpirvValue {
108        self.def_constant(ty, SpirvConst::Undef)
109    }
110}
111
112impl ConstCodegenMethods for CodegenCx<'_> {
113    fn const_null(&self, t: Self::Type) -> Self::Value {
114        self.constant_null(t)
115    }
116    fn const_undef(&self, ty: Self::Type) -> Self::Value {
117        self.undef(ty)
118    }
119    fn const_poison(&self, ty: Self::Type) -> Self::Value {
120        // No distinction between undef and poison.
121        self.const_undef(ty)
122    }
123    fn const_int(&self, t: Self::Type, i: i64) -> Self::Value {
124        self.constant_int(t, i as u128)
125    }
126    fn const_uint(&self, t: Self::Type, i: u64) -> Self::Value {
127        self.constant_int(t, i.into())
128    }
129    fn const_uint_big(&self, t: Self::Type, i: u128) -> Self::Value {
130        self.constant_int(t, i)
131    }
132    fn const_bool(&self, val: bool) -> Self::Value {
133        self.constant_bool(DUMMY_SP, val)
134    }
135    fn const_i8(&self, i: i8) -> Self::Value {
136        self.constant_i8(DUMMY_SP, i)
137    }
138    fn const_i16(&self, i: i16) -> Self::Value {
139        self.constant_i16(DUMMY_SP, i)
140    }
141    fn const_i32(&self, i: i32) -> Self::Value {
142        self.constant_i32(DUMMY_SP, i)
143    }
144    fn const_u32(&self, i: u32) -> Self::Value {
145        self.constant_u32(DUMMY_SP, i)
146    }
147    fn const_u64(&self, i: u64) -> Self::Value {
148        self.constant_u64(DUMMY_SP, i)
149    }
150    fn const_u128(&self, i: u128) -> Self::Value {
151        let ty = SpirvType::Integer(128, false).def(DUMMY_SP, self);
152        self.const_uint_big(ty, i)
153    }
154    fn const_usize(&self, i: u64) -> Self::Value {
155        let ptr_size = self.tcx.data_layout.pointer_size.bits() as u32;
156        let t = SpirvType::Integer(ptr_size, false).def(DUMMY_SP, self);
157        self.constant_int(t, i.into())
158    }
159    fn const_u8(&self, i: u8) -> Self::Value {
160        self.constant_u8(DUMMY_SP, i)
161    }
162    fn const_real(&self, t: Self::Type, val: f64) -> Self::Value {
163        self.constant_float(t, val)
164    }
165
166    fn const_str(&self, s: &str) -> (Self::Value, Self::Value) {
167        let len = s.len();
168        let str_ty = self
169            .layout_of(self.tcx.types.str_)
170            .spirv_type(DUMMY_SP, self);
171        (
172            self.def_constant(
173                self.type_ptr_to(str_ty),
174                SpirvConst::PtrTo {
175                    pointee: self
176                        .constant_composite(
177                            str_ty,
178                            s.bytes().map(|b| self.const_u8(b).def_cx(self)),
179                        )
180                        .def_cx(self),
181                },
182            ),
183            self.const_usize(len as u64),
184        )
185    }
186    fn const_struct(&self, elts: &[Self::Value], _packed: bool) -> Self::Value {
187        // Presumably this will get bitcasted to the right type?
188        // FIXME(eddyb) use `AccumulateVec`s just like `rustc` itself does.
189        let field_types = elts.iter().map(|f| f.ty).collect::<Vec<_>>();
190        let (field_offsets, size, align) = crate::abi::auto_struct_layout(self, &field_types);
191        let struct_ty = SpirvType::Adt {
192            def_id: None,
193            size,
194            align,
195            field_types: &field_types,
196            field_offsets: &field_offsets,
197            field_names: None,
198        }
199        .def(DUMMY_SP, self);
200        self.constant_composite(struct_ty, elts.iter().map(|f| f.def_cx(self)))
201    }
202    fn const_vector(&self, elts: &[Self::Value]) -> Self::Value {
203        let vector_ty = SpirvType::Vector {
204            element: elts[0].ty,
205            count: elts.len() as u32,
206        }
207        .def(DUMMY_SP, self);
208        self.constant_composite(vector_ty, elts.iter().map(|elt| elt.def_cx(self)))
209    }
210
211    fn const_to_opt_uint(&self, v: Self::Value) -> Option<u64> {
212        self.builder.lookup_const_scalar(v)?.try_into().ok()
213    }
214    // FIXME(eddyb) what's the purpose of the `sign_ext` argument, and can it
215    // differ from the signedness of `v`?
216    fn const_to_opt_u128(&self, v: Self::Value, _sign_ext: bool) -> Option<u128> {
217        self.builder.lookup_const_scalar(v)
218    }
219
220    fn scalar_to_backend(
221        &self,
222        scalar: Scalar,
223        layout: abi::Scalar,
224        ty: Self::Type,
225    ) -> Self::Value {
226        match scalar {
227            Scalar::Int(int) => {
228                assert_eq!(int.size(), layout.primitive().size(self));
229                let data = int.to_uint(int.size());
230
231                if let Primitive::Pointer(_) = layout.primitive() {
232                    if data == 0 {
233                        self.constant_null(ty)
234                    } else {
235                        let result = self.undef(ty);
236                        self.zombie_no_span(
237                            result.def_cx(self),
238                            "pointer has non-null integer address",
239                        );
240                        result
241                    }
242                } else {
243                    self.def_constant(ty, SpirvConst::Scalar(data))
244                }
245            }
246            Scalar::Ptr(ptr, _) => {
247                let (prov, offset) = ptr.into_parts();
248                let alloc_id = prov.alloc_id();
249                let (base_addr, _base_addr_space) = match self.tcx.global_alloc(alloc_id) {
250                    GlobalAlloc::Memory(alloc) => {
251                        let pointee = match self.lookup_type(ty) {
252                            SpirvType::Pointer { pointee } => pointee,
253                            other => self.tcx.dcx().fatal(format!(
254                                "GlobalAlloc::Memory type not implemented: {}",
255                                other.debug(ty, self)
256                            )),
257                        };
258                        // FIXME(eddyb) always use `const_data_from_alloc`, and
259                        // defer the actual `try_read_from_const_alloc` step.
260                        let init = self
261                            .try_read_from_const_alloc(alloc, pointee)
262                            .unwrap_or_else(|| self.const_data_from_alloc(alloc));
263                        let value = self.static_addr_of(init, alloc.inner().align, None);
264                        (value, AddressSpace::DATA)
265                    }
266                    GlobalAlloc::Function { instance } => (
267                        self.get_fn_addr(instance),
268                        self.data_layout().instruction_address_space,
269                    ),
270                    GlobalAlloc::VTable(vty, dyn_ty) => {
271                        let alloc = self
272                            .tcx
273                            .global_alloc(self.tcx.vtable_allocation((
274                                vty,
275                                dyn_ty.principal().map(|principal| {
276                                    self.tcx.instantiate_bound_regions_with_erased(principal)
277                                }),
278                            )))
279                            .unwrap_memory();
280                        let pointee = match self.lookup_type(ty) {
281                            SpirvType::Pointer { pointee } => pointee,
282                            other => self.tcx.dcx().fatal(format!(
283                                "GlobalAlloc::VTable type not implemented: {}",
284                                other.debug(ty, self)
285                            )),
286                        };
287                        // FIXME(eddyb) always use `const_data_from_alloc`, and
288                        // defer the actual `try_read_from_const_alloc` step.
289                        let init = self
290                            .try_read_from_const_alloc(alloc, pointee)
291                            .unwrap_or_else(|| self.const_data_from_alloc(alloc));
292                        let value = self.static_addr_of(init, alloc.inner().align, None);
293                        (value, AddressSpace::DATA)
294                    }
295                    GlobalAlloc::Static(def_id) => {
296                        assert!(self.tcx.is_static(def_id));
297                        assert!(!self.tcx.is_thread_local_static(def_id));
298                        (self.get_static(def_id), AddressSpace::DATA)
299                    }
300                };
301                self.const_bitcast(self.const_ptr_byte_offset(base_addr, offset), ty)
302            }
303        }
304    }
305
306    // HACK(eddyb) this uses a symbolic `ConstDataFromAlloc`, to allow deferring
307    // the actual value generation until after a pointer to this value is cast
308    // to its final type (e.g. that will be loaded as).
309    // FIXME(eddyb) replace this with `qptr` handling of constant data.
310    fn const_data_from_alloc(&self, alloc: ConstAllocation<'_>) -> Self::Value {
311        // HACK(eddyb) the `ConstCodegenMethods` trait no longer guarantees the
312        // lifetime that `alloc` is interned for, but since it *is* interned,
313        // we can cheaply recover it (see also the `ty::Lift` infrastructure).
314        let alloc = self.tcx.lift(alloc).unwrap();
315
316        let void_type = SpirvType::Void.def(DUMMY_SP, self);
317        self.def_constant(void_type, SpirvConst::ConstDataFromAlloc(alloc))
318    }
319
320    fn const_ptr_byte_offset(&self, val: Self::Value, offset: Size) -> Self::Value {
321        if offset == Size::ZERO {
322            val
323        } else {
324            // FIXME(eddyb) implement via `OpSpecConstantOp`.
325            // FIXME(eddyb) this zombies the original value without creating a new one.
326            let result = val;
327            self.zombie_no_span(result.def_cx(self), "const_ptr_byte_offset");
328            result
329        }
330    }
331}
332
333impl<'tcx> CodegenCx<'tcx> {
334    pub fn const_bitcast(&self, val: SpirvValue, ty: Word) -> SpirvValue {
335        // HACK(eddyb) special-case `const_data_from_alloc` + `static_addr_of`
336        // as the old `from_const_alloc` (now `OperandRef::from_const_alloc`).
337        if let SpirvValueKind::IllegalConst(_) = val.kind
338            && let Some(SpirvConst::PtrTo { pointee }) = self.builder.lookup_const(val)
339            && let Some(SpirvConst::ConstDataFromAlloc(alloc)) =
340                self.builder.lookup_const_by_id(pointee)
341            && let SpirvType::Pointer { pointee } = self.lookup_type(ty)
342            && let Some(init) = self.try_read_from_const_alloc(alloc, pointee)
343        {
344            return self.static_addr_of(init, alloc.inner().align, None);
345        }
346
347        if val.ty == ty {
348            val
349        } else {
350            // FIXME(eddyb) implement via `OpSpecConstantOp`.
351            // FIXME(eddyb) this zombies the original value without creating a new one.
352            let result = val.def_cx(self).with_type(ty);
353            self.zombie_no_span(result.def_cx(self), "const_bitcast");
354            result
355        }
356    }
357
358    // This function comes from `ty::layout`'s `layout_of_uncached`,
359    // where it's named `scalar_unit`.
360    pub fn primitive_to_scalar(&self, value: Primitive) -> abi::Scalar {
361        let bits = value.size(self.data_layout()).bits();
362        assert!(bits <= 128);
363        abi::Scalar::Initialized {
364            value,
365            valid_range: abi::WrappingRange {
366                start: 0,
367                end: (!0 >> (128 - bits)),
368            },
369        }
370    }
371
372    /// Attempt to read a whole constant of type `ty` from `alloc`, but only
373    /// returning that constant if its size covers the entirety of `alloc`.
374    //
375    // FIXME(eddyb) should this use something like `Result<_, PartialRead>`?
376    pub fn try_read_from_const_alloc(
377        &self,
378        alloc: ConstAllocation<'tcx>,
379        ty: Word,
380    ) -> Option<SpirvValue> {
381        let (result, read_size) = self.read_from_const_alloc_at(alloc, ty, Size::ZERO);
382        (read_size == alloc.inner().size()).then_some(result)
383    }
384
385    // HACK(eddyb) the `Size` returned is the equivalent of `size_of_val` on
386    // the returned constant, i.e. `ty.sizeof()` can be either `Some(read_size)`,
387    // or `None` - i.e. unsized, in which case only the returned `Size` records
388    // how much was read from `alloc` to build the returned constant value.
389    #[tracing::instrument(level = "trace", skip(self), fields(ty = ?self.debug_type(ty), offset))]
390    fn read_from_const_alloc_at(
391        &self,
392        alloc: ConstAllocation<'tcx>,
393        ty: Word,
394        offset: Size,
395    ) -> (SpirvValue, Size) {
396        let ty_def = self.lookup_type(ty);
397        match ty_def {
398            SpirvType::Bool
399            | SpirvType::Integer(..)
400            | SpirvType::Float(_)
401            | SpirvType::Pointer { .. } => {
402                let size = ty_def.sizeof(self).unwrap();
403                let primitive = match ty_def {
404                    SpirvType::Bool => Primitive::Int(Integer::fit_unsigned(0), false),
405                    SpirvType::Integer(int_size, int_signedness) => Primitive::Int(
406                        match int_size {
407                            8 => Integer::I8,
408                            16 => Integer::I16,
409                            32 => Integer::I32,
410                            64 => Integer::I64,
411                            128 => Integer::I128,
412                            other => {
413                                self.tcx
414                                    .dcx()
415                                    .fatal(format!("invalid size for integer: {other}"));
416                            }
417                        },
418                        int_signedness,
419                    ),
420                    SpirvType::Float(float_size) => Primitive::Float(match float_size {
421                        16 => Float::F16,
422                        32 => Float::F32,
423                        64 => Float::F64,
424                        128 => Float::F128,
425                        other => {
426                            self.tcx
427                                .dcx()
428                                .fatal(format!("invalid size for float: {other}"));
429                        }
430                    }),
431                    SpirvType::Pointer { .. } => Primitive::Pointer(AddressSpace::DATA),
432                    _ => unreachable!(),
433                };
434
435                let range = alloc_range(offset, size);
436                let read_provenance = matches!(primitive, Primitive::Pointer(_));
437
438                let mut primitive = primitive;
439                let mut read_result = alloc.inner().read_scalar(self, range, read_provenance);
440
441                // HACK(eddyb) while reading a pointer as an integer will fail,
442                // the pointer itself can be read as a pointer, and then passed
443                // to `scalar_to_backend`, which will `const_bitcast` it to `ty`.
444                if read_result.is_err()
445                    && !read_provenance
446                    && let read_ptr_result @ Ok(Scalar::Ptr(ptr, _)) = alloc
447                        .inner()
448                        .read_scalar(self, range, /* read_provenance */ true)
449                {
450                    let (prov, _offset) = ptr.into_parts();
451                    primitive = Primitive::Pointer(
452                        self.tcx.global_alloc(prov.alloc_id()).address_space(self),
453                    );
454                    read_result = read_ptr_result;
455                }
456
457                let scalar_or_zombie = match read_result {
458                    Ok(scalar) => {
459                        Ok(self.scalar_to_backend(scalar, self.primitive_to_scalar(primitive), ty))
460                    }
461
462                    // FIXME(eddyb) could some of these use e.g. `const_bitcast`?
463                    // (or, in general, assembling one constant out of several)
464                    Err(err) => match err {
465                        // The scalar is only `undef` if the entire byte range
466                        // it covers is completely uninitialized - all other
467                        // failure modes of `read_scalar` are various errors.
468                        AllocError::InvalidUninitBytes(_) => {
469                            let uninit_range = alloc
470                                .inner()
471                                .init_mask()
472                                .is_range_initialized(range)
473                                .unwrap_err();
474                            let uninit_size = {
475                                let [start, end] = [uninit_range.start, uninit_range.end()]
476                                    .map(|x| x.clamp(range.start, range.end()));
477                                end - start
478                            };
479                            if uninit_size == size {
480                                Ok(self.undef(ty))
481                            } else {
482                                Err(format!(
483                                    "overlaps {} uninitialized bytes",
484                                    uninit_size.bytes()
485                                ))
486                            }
487                        }
488                        AllocError::ReadPointerAsInt(_) => Err("overlaps pointer bytes".into()),
489                        AllocError::ReadPartialPointer(_) => {
490                            Err("partially overlaps another pointer".into())
491                        }
492
493                        // HACK(eddyb) these should never happen when using
494                        // `read_scalar`, but better not outright crash.
495                        AllocError::ScalarSizeMismatch(_)
496                        | AllocError::OverwritePartialPointer(_) => {
497                            Err(format!("unrecognized `AllocError::{err:?}`"))
498                        }
499                    },
500                };
501                let result = scalar_or_zombie.unwrap_or_else(|reason| {
502                    let result = self.undef(ty);
503                    self.zombie_no_span(
504                        result.def_cx(self),
505                        &format!("unsupported `{}` constant: {reason}", self.debug_type(ty),),
506                    );
507                    result
508                });
509                (result, size)
510            }
511            SpirvType::Adt {
512                field_types,
513                field_offsets,
514                ..
515            } => {
516                // HACK(eddyb) this accounts for unsized `struct`s, and allows
517                // detecting gaps *only* at the end of the type, but is cheap.
518                let mut tail_read_range = ..Size::ZERO;
519                let result = self.constant_composite(
520                    ty,
521                    field_types
522                        .iter()
523                        .zip_eq(field_offsets.iter())
524                        .map(|(&f_ty, &f_offset)| {
525                            let (f, f_size) =
526                                self.read_from_const_alloc_at(alloc, f_ty, offset + f_offset);
527                            tail_read_range.end =
528                                tail_read_range.end.max(offset + f_offset + f_size);
529                            f.def_cx(self)
530                        }),
531                );
532
533                let ty_size = ty_def.sizeof(self);
534
535                // HACK(eddyb) catch non-padding holes in e.g. `enum` values.
536                if let Some(ty_size) = ty_size
537                    && let Some(tail_gap) = (ty_size.bytes())
538                        .checked_sub(tail_read_range.end.align_to(ty_def.alignof(self)).bytes())
539                    && tail_gap > 0
540                {
541                    self.zombie_no_span(
542                        result.def_cx(self),
543                        &format!(
544                            "undersized `{}` constant (at least {tail_gap} bytes may be missing)",
545                            self.debug_type(ty)
546                        ),
547                    );
548                }
549
550                (result, ty_size.unwrap_or(tail_read_range.end))
551            }
552            SpirvType::Vector { element, .. }
553            | SpirvType::Matrix { element, .. }
554            | SpirvType::Array { element, .. }
555            | SpirvType::RuntimeArray { element } => {
556                let stride = self.lookup_type(element).sizeof(self).unwrap();
557
558                let count = match ty_def {
559                    SpirvType::Vector { count, .. } | SpirvType::Matrix { count, .. } => {
560                        u64::from(count)
561                    }
562                    SpirvType::Array { count, .. } => {
563                        u64::try_from(self.builder.lookup_const_scalar(count).unwrap()).unwrap()
564                    }
565                    SpirvType::RuntimeArray { .. } => {
566                        (alloc.inner().size() - offset).bytes() / stride.bytes()
567                    }
568                    _ => unreachable!(),
569                };
570
571                let result = self.constant_composite(
572                    ty,
573                    (0..count).map(|i| {
574                        let (e, e_size) =
575                            self.read_from_const_alloc_at(alloc, element, offset + i * stride);
576                        assert_eq!(e_size, stride);
577                        e.def_cx(self)
578                    }),
579                );
580
581                // HACK(eddyb) `align_to` can only cause an increase for `Vector`,
582                // because its `size`/`align` are rounded up to a power of two
583                // (for now, at least, even if eventually that should go away).
584                let read_size = (count * stride).align_to(ty_def.alignof(self));
585
586                if let Some(ty_size) = ty_def.sizeof(self) {
587                    assert_eq!(read_size, ty_size);
588                }
589
590                if let SpirvType::RuntimeArray { .. } = ty_def {
591                    // FIXME(eddyb) values of this type should never be created,
592                    // the only reasonable encoding of e.g. `&str` consts should
593                    // be `&[u8; N]` consts, with the `static_addr_of` pointer
594                    // (*not* the value it points to) cast to `&str`, afterwards.
595                    self.zombie_no_span(
596                        result.def_cx(self),
597                        &format!("unsupported unsized `{}` constant", self.debug_type(ty)),
598                    );
599                }
600
601                (result, read_size)
602            }
603
604            SpirvType::Void
605            | SpirvType::Function { .. }
606            | SpirvType::Image { .. }
607            | SpirvType::Sampler
608            | SpirvType::SampledImage { .. }
609            | SpirvType::InterfaceBlock { .. }
610            | SpirvType::AccelerationStructureKhr
611            | SpirvType::RayQueryKhr => {
612                let result = self.undef(ty);
613                self.zombie_no_span(
614                    result.def_cx(self),
615                    &format!(
616                        "cannot reinterpret Rust constant data as a `{}` value",
617                        self.debug_type(ty)
618                    ),
619                );
620                (result, ty_def.sizeof(self).unwrap_or(Size::ZERO))
621            }
622        }
623    }
624}