Skip to content

Commit 201686a

Browse files
committed
Auto merge of rust-lang#122053 - erikdesjardins:alloca, r=nikic
Stop using LLVM struct types for alloca The alloca type has no semantic meaning, only the size (and alignment, but we specify it explicitly) matter. Using `[N x i8]` is a more direct way to specify that we want `N` bytes, and avoids relying on LLVM's struct layout. It is likely that a future LLVM version will change to an untyped alloca representation. Split out from rust-lang#121577. r? `@ghost`
2 parents ab71ee7 + d46b688 commit 201686a

34 files changed

+218
-188
lines changed

compiler/rustc_codegen_gcc/src/builder.rs

+4-16
Original file line numberDiff line numberDiff line change
@@ -898,26 +898,14 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> {
898898
self.gcc_checked_binop(oop, typ, lhs, rhs)
899899
}
900900

901-
fn alloca(&mut self, ty: Type<'gcc>, align: Align) -> RValue<'gcc> {
902-
// FIXME(antoyo): this check that we don't call get_aligned() a second time on a type.
903-
// Ideally, we shouldn't need to do this check.
904-
let aligned_type = if ty == self.cx.u128_type || ty == self.cx.i128_type {
905-
ty
906-
} else {
907-
ty.get_aligned(align.bytes())
908-
};
901+
fn alloca(&mut self, size: Size, align: Align) -> RValue<'gcc> {
902+
let ty = self.cx.type_array(self.cx.type_i8(), size.bytes()).get_aligned(align.bytes());
909903
// TODO(antoyo): It might be better to return a LValue, but fixing the rustc API is non-trivial.
910904
self.stack_var_count.set(self.stack_var_count.get() + 1);
911-
self.current_func()
912-
.new_local(
913-
self.location,
914-
aligned_type,
915-
&format!("stack_var_{}", self.stack_var_count.get()),
916-
)
917-
.get_address(self.location)
905+
self.current_func().new_local(None, ty, &format!("stack_var_{}", self.stack_var_count.get())).get_address(None)
918906
}
919907

920-
fn byte_array_alloca(&mut self, _len: RValue<'gcc>, _align: Align) -> RValue<'gcc> {
908+
fn dynamic_alloca(&mut self, _len: RValue<'gcc>, _align: Align) -> RValue<'gcc> {
921909
unimplemented!();
922910
}
923911

compiler/rustc_codegen_gcc/src/intrinsic/mod.rs

+1-1
Original file line numberDiff line numberDiff line change
@@ -531,7 +531,7 @@ impl<'gcc, 'tcx> ArgAbiExt<'gcc, 'tcx> for ArgAbi<'tcx, Ty<'tcx>> {
531531
// We instead thus allocate some scratch space...
532532
let scratch_size = cast.size(bx);
533533
let scratch_align = cast.align(bx);
534-
let llscratch = bx.alloca(cast.gcc_type(bx), scratch_align);
534+
let llscratch = bx.alloca(scratch_size, scratch_align);
535535
bx.lifetime_start(llscratch, scratch_size);
536536

537537
// ... where we first store the value...

compiler/rustc_codegen_gcc/src/intrinsic/simd.rs

+2-2
Original file line numberDiff line numberDiff line change
@@ -18,7 +18,7 @@ use rustc_middle::span_bug;
1818
use rustc_middle::ty::layout::HasTyCtxt;
1919
use rustc_middle::ty::{self, Ty};
2020
use rustc_span::{sym, Span, Symbol};
21-
use rustc_target::abi::Align;
21+
use rustc_target::abi::{Align, Size};
2222

2323
use crate::builder::Builder;
2424
#[cfg(not(feature = "master"))]
@@ -558,7 +558,7 @@ pub fn generic_simd_intrinsic<'a, 'gcc, 'tcx>(
558558
let ze = bx.zext(result, bx.type_ix(expected_bytes * 8));
559559

560560
// Convert the integer to a byte array
561-
let ptr = bx.alloca(bx.type_ix(expected_bytes * 8), Align::ONE);
561+
let ptr = bx.alloca(Size::from_bytes(expected_bytes), Align::ONE);
562562
bx.store(ze, ptr, Align::ONE);
563563
let array_ty = bx.type_array(bx.type_i8(), expected_bytes);
564564
let ptr = bx.pointercast(ptr, bx.cx.type_ptr_to(array_ty));

compiler/rustc_codegen_llvm/src/abi.rs

+1-1
Original file line numberDiff line numberDiff line change
@@ -227,7 +227,7 @@ impl<'ll, 'tcx> ArgAbiExt<'ll, 'tcx> for ArgAbi<'tcx, Ty<'tcx>> {
227227
// when passed by value, making it larger.
228228
let copy_bytes = cmp::min(scratch_size.bytes(), self.layout.size.bytes());
229229
// Allocate some scratch space...
230-
let llscratch = bx.alloca(cast.llvm_type(bx), scratch_align);
230+
let llscratch = bx.alloca(scratch_size, scratch_align);
231231
bx.lifetime_start(llscratch, scratch_size);
232232
// ...store the value...
233233
bx.store(val, llscratch, scratch_align);

compiler/rustc_codegen_llvm/src/builder.rs

+4-3
Original file line numberDiff line numberDiff line change
@@ -468,20 +468,21 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
468468
val
469469
}
470470

471-
fn alloca(&mut self, ty: &'ll Type, align: Align) -> &'ll Value {
471+
fn alloca(&mut self, size: Size, align: Align) -> &'ll Value {
472472
let mut bx = Builder::with_cx(self.cx);
473473
bx.position_at_start(unsafe { llvm::LLVMGetFirstBasicBlock(self.llfn()) });
474+
let ty = self.cx().type_array(self.cx().type_i8(), size.bytes());
474475
unsafe {
475476
let alloca = llvm::LLVMBuildAlloca(bx.llbuilder, ty, UNNAMED);
476477
llvm::LLVMSetAlignment(alloca, align.bytes() as c_uint);
477478
alloca
478479
}
479480
}
480481

481-
fn byte_array_alloca(&mut self, len: &'ll Value, align: Align) -> &'ll Value {
482+
fn dynamic_alloca(&mut self, size: &'ll Value, align: Align) -> &'ll Value {
482483
unsafe {
483484
let alloca =
484-
llvm::LLVMBuildArrayAlloca(self.llbuilder, self.cx().type_i8(), len, UNNAMED);
485+
llvm::LLVMBuildArrayAlloca(self.llbuilder, self.cx().type_i8(), size, UNNAMED);
485486
llvm::LLVMSetAlignment(alloca, align.bytes() as c_uint);
486487
alloca
487488
}

compiler/rustc_codegen_llvm/src/intrinsic.rs

+10-10
Original file line numberDiff line numberDiff line change
@@ -18,7 +18,7 @@ use rustc_middle::ty::layout::{FnAbiOf, HasTyCtxt, LayoutOf};
1818
use rustc_middle::ty::{self, GenericArgsRef, Ty};
1919
use rustc_middle::{bug, span_bug};
2020
use rustc_span::{sym, Span, Symbol};
21-
use rustc_target::abi::{self, Align, HasDataLayout, Primitive};
21+
use rustc_target::abi::{self, Align, HasDataLayout, Primitive, Size};
2222
use rustc_target::spec::{HasTargetSpec, PanicStrategy};
2323

2424
use std::cmp::Ordering;
@@ -638,8 +638,9 @@ fn codegen_msvc_try<'ll>(
638638
// }
639639
//
640640
// More information can be found in libstd's seh.rs implementation.
641+
let ptr_size = bx.tcx().data_layout.pointer_size;
641642
let ptr_align = bx.tcx().data_layout.pointer_align.abi;
642-
let slot = bx.alloca(bx.type_ptr(), ptr_align);
643+
let slot = bx.alloca(ptr_size, ptr_align);
643644
let try_func_ty = bx.type_func(&[bx.type_ptr()], bx.type_void());
644645
bx.invoke(try_func_ty, None, None, try_func, &[data], normal, catchswitch, None, None);
645646

@@ -909,15 +910,14 @@ fn codegen_emcc_try<'ll>(
909910

910911
// We need to pass two values to catch_func (ptr and is_rust_panic), so
911912
// create an alloca and pass a pointer to that.
913+
let ptr_size = bx.tcx().data_layout.pointer_size;
912914
let ptr_align = bx.tcx().data_layout.pointer_align.abi;
913915
let i8_align = bx.tcx().data_layout.i8_align.abi;
914-
let catch_data_type = bx.type_struct(&[bx.type_ptr(), bx.type_bool()], false);
915-
let catch_data = bx.alloca(catch_data_type, ptr_align);
916-
let catch_data_0 =
917-
bx.inbounds_gep(catch_data_type, catch_data, &[bx.const_usize(0), bx.const_usize(0)]);
918-
bx.store(ptr, catch_data_0, ptr_align);
919-
let catch_data_1 =
920-
bx.inbounds_gep(catch_data_type, catch_data, &[bx.const_usize(0), bx.const_usize(1)]);
916+
// Required in order for there to be no padding between the fields.
917+
assert!(i8_align <= ptr_align);
918+
let catch_data = bx.alloca(2 * ptr_size, ptr_align);
919+
bx.store(ptr, catch_data, ptr_align);
920+
let catch_data_1 = bx.inbounds_ptradd(catch_data, bx.const_usize(ptr_size.bytes()));
921921
bx.store(is_rust_panic, catch_data_1, i8_align);
922922

923923
let catch_ty = bx.type_func(&[bx.type_ptr(), bx.type_ptr()], bx.type_void());
@@ -1363,7 +1363,7 @@ fn generic_simd_intrinsic<'ll, 'tcx>(
13631363
let ze = bx.zext(i_, bx.type_ix(expected_bytes * 8));
13641364

13651365
// Convert the integer to a byte array
1366-
let ptr = bx.alloca(bx.type_ix(expected_bytes * 8), Align::ONE);
1366+
let ptr = bx.alloca(Size::from_bytes(expected_bytes), Align::ONE);
13671367
bx.store(ze, ptr, Align::ONE);
13681368
let array_ty = bx.type_array(bx.type_i8(), expected_bytes);
13691369
return Ok(bx.load(array_ty, ptr, Align::ONE));

compiler/rustc_codegen_ssa/src/base.rs

+1-1
Original file line numberDiff line numberDiff line change
@@ -508,7 +508,7 @@ fn get_argc_argv<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
508508
let ptr_size = bx.tcx().data_layout.pointer_size;
509509
let ptr_align = bx.tcx().data_layout.pointer_align.abi;
510510
let arg_argc = bx.const_int(cx.type_isize(), 2);
511-
let arg_argv = bx.alloca(cx.type_array(cx.type_ptr(), 2), ptr_align);
511+
let arg_argv = bx.alloca(2 * ptr_size, ptr_align);
512512
bx.store(param_handle, arg_argv, ptr_align);
513513
let arg_argv_el1 = bx.inbounds_ptradd(arg_argv, bx.const_usize(ptr_size.bytes()));
514514
bx.store(param_system_table, arg_argv_el1, ptr_align);

compiler/rustc_codegen_ssa/src/mir/block.rs

+1-1
Original file line numberDiff line numberDiff line change
@@ -1514,7 +1514,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
15141514
// when passed by value, making it larger.
15151515
let copy_bytes = cmp::min(scratch_size.bytes(), arg.layout.size.bytes());
15161516
// Allocate some scratch space...
1517-
let llscratch = bx.alloca(bx.cast_backend_type(cast), scratch_align);
1517+
let llscratch = bx.alloca(scratch_size, scratch_align);
15181518
bx.lifetime_start(llscratch, scratch_size);
15191519
// ...memcpy the value...
15201520
bx.memcpy(

compiler/rustc_codegen_ssa/src/mir/operand.rs

+2-2
Original file line numberDiff line numberDiff line change
@@ -327,7 +327,7 @@ impl<'a, 'tcx, V: CodegenObject> OperandRef<'tcx, V> {
327327
let llfield_ty = bx.cx().backend_type(field);
328328

329329
// Can't bitcast an aggregate, so round trip through memory.
330-
let llptr = bx.alloca(llfield_ty, field.align.abi);
330+
let llptr = bx.alloca(field.size, field.align.abi);
331331
bx.store(*llval, llptr, field.align.abi);
332332
*llval = bx.load(llfield_ty, llptr, field.align.abi);
333333
}
@@ -470,7 +470,7 @@ impl<'a, 'tcx, V: CodegenObject> OperandValue<V> {
470470
let align_minus_1 = bx.sub(align, one);
471471
let size_extra = bx.add(size, align_minus_1);
472472
let min_align = Align::ONE;
473-
let alloca = bx.byte_array_alloca(size_extra, min_align);
473+
let alloca = bx.dynamic_alloca(size_extra, min_align);
474474
let address = bx.ptrtoint(alloca, bx.type_isize());
475475
let neg_address = bx.neg(address);
476476
let offset = bx.and(neg_address, align_minus_1);

compiler/rustc_codegen_ssa/src/mir/place.rs

+1-1
Original file line numberDiff line numberDiff line change
@@ -81,7 +81,7 @@ impl<'a, 'tcx, V: CodegenObject> PlaceRef<'tcx, V> {
8181
align: Align,
8282
) -> Self {
8383
assert!(layout.is_sized(), "tried to statically allocate unsized place");
84-
let tmp = bx.alloca(bx.cx().backend_type(layout), align);
84+
let tmp = bx.alloca(layout.size, align);
8585
Self::new_sized_aligned(tmp, layout, align)
8686
}
8787

compiler/rustc_codegen_ssa/src/traits/builder.rs

+2-2
Original file line numberDiff line numberDiff line change
@@ -144,8 +144,8 @@ pub trait BuilderMethods<'a, 'tcx>:
144144
}
145145
fn to_immediate_scalar(&mut self, val: Self::Value, scalar: Scalar) -> Self::Value;
146146

147-
fn alloca(&mut self, ty: Self::Type, align: Align) -> Self::Value;
148-
fn byte_array_alloca(&mut self, len: Self::Value, align: Align) -> Self::Value;
147+
fn alloca(&mut self, size: Size, align: Align) -> Self::Value;
148+
fn dynamic_alloca(&mut self, size: Self::Value, align: Align) -> Self::Value;
149149

150150
fn load(&mut self, ty: Self::Type, ptr: Self::Value, align: Align) -> Self::Value;
151151
fn volatile_load(&mut self, ty: Self::Type, ptr: Self::Value) -> Self::Value;

tests/assembly/stack-protector/stack-protector-heuristics-effect.rs

+15-40
Original file line numberDiff line numberDiff line change
@@ -11,6 +11,11 @@
1111
//@ compile-flags: -C opt-level=2 -Z merge-functions=disabled
1212
//@ min-llvm-version: 17.0.2
1313

14+
// NOTE: the heuristics for stack smash protection inappropriately rely on types in LLVM IR,
15+
// despite those types having no semantic meaning. This means that the `basic` and `strong`
16+
// settings do not behave in a coherent way. This is a known issue in LLVM.
17+
// See comments on https://github.com/rust-lang/rust/issues/114903.
18+
1419
#![crate_type = "lib"]
1520

1621
#![allow(incomplete_features)]
@@ -39,23 +44,9 @@ pub fn array_char(f: fn(*const char)) {
3944
f(&b as *const _);
4045
f(&c as *const _);
4146

42-
// Any type of local array variable leads to stack protection with the
43-
// "strong" heuristic. The 'basic' heuristic only adds stack protection to
44-
// functions with local array variables of a byte-sized type, however. Since
45-
// 'char' is 4 bytes in Rust, this function is not protected by the 'basic'
46-
// heuristic
47-
//
48-
// (This test *also* takes the address of the local stack variables. We
49-
// cannot know that this isn't what triggers the `strong` heuristic.
50-
// However, the test strategy of passing the address of a stack array to an
51-
// external function is sufficient to trigger the `basic` heuristic (see
52-
// test `array_u8_large()`). Since the `basic` heuristic only checks for the
53-
// presence of stack-local array variables, we can be confident that this
54-
// test also captures this part of the `strong` heuristic specification.)
55-
5647
// all: __stack_chk_fail
5748
// strong: __stack_chk_fail
58-
// basic-NOT: __stack_chk_fail
49+
// basic: __stack_chk_fail
5950
// none-NOT: __stack_chk_fail
6051
// missing-NOT: __stack_chk_fail
6152
}
@@ -163,26 +154,11 @@ pub fn local_string_addr_taken(f: fn(&String)) {
163154
f(&x);
164155

165156
// Taking the address of the local variable `x` leads to stack smash
166-
// protection with the `strong` heuristic, but not with the `basic`
167-
// heuristic. It does not matter that the reference is not mut.
168-
//
169-
// An interesting note is that a similar function in C++ *would* be
170-
// protected by the `basic` heuristic, because `std::string` has a char
171-
// array internally as a small object optimization:
172-
// ```
173-
// cat <<EOF | clang++ -O2 -fstack-protector -S -x c++ - -o - | grep stack_chk
174-
// #include <string>
175-
// void f(void (*g)(const std::string&)) {
176-
// std::string x;
177-
// g(x);
178-
// }
179-
// EOF
180-
// ```
181-
//
157+
// protection. It does not matter that the reference is not mut.
182158

183159
// all: __stack_chk_fail
184160
// strong: __stack_chk_fail
185-
// basic-NOT: __stack_chk_fail
161+
// basic: __stack_chk_fail
186162
// none-NOT: __stack_chk_fail
187163
// missing-NOT: __stack_chk_fail
188164
}
@@ -233,8 +209,8 @@ pub fn local_large_var_moved(f: fn(Gigastruct)) {
233209
// Even though the local variable conceptually doesn't have its address
234210
// taken, it's so large that the "move" is implemented with a reference to a
235211
// stack-local variable in the ABI. Consequently, this function *is*
236-
// protected by the `strong` heuristic. This is also the case for
237-
// rvalue-references in C++, regardless of struct size:
212+
// protected. This is also the case for rvalue-references in C++,
213+
// regardless of struct size:
238214
// ```
239215
// cat <<EOF | clang++ -O2 -fstack-protector-strong -S -x c++ - -o - | grep stack_chk
240216
// #include <cstdint>
@@ -248,7 +224,7 @@ pub fn local_large_var_moved(f: fn(Gigastruct)) {
248224

249225
// all: __stack_chk_fail
250226
// strong: __stack_chk_fail
251-
// basic-NOT: __stack_chk_fail
227+
// basic: __stack_chk_fail
252228
// none-NOT: __stack_chk_fail
253229
// missing-NOT: __stack_chk_fail
254230
}
@@ -261,9 +237,9 @@ pub fn local_large_var_cloned(f: fn(Gigastruct)) {
261237
// A new instance of `Gigastruct` is passed to `f()`, without any apparent
262238
// connection to this stack frame. Still, since instances of `Gigastruct`
263239
// are sufficiently large, it is allocated in the caller stack frame and
264-
// passed as a pointer. As such, this function is *also* protected by the
265-
// `strong` heuristic, just like `local_large_var_moved`. This is also the
266-
// case for pass-by-value of sufficiently large structs in C++:
240+
// passed as a pointer. As such, this function is *also* protected, just
241+
// like `local_large_var_moved`. This is also the case for pass-by-value
242+
// of sufficiently large structs in C++:
267243
// ```
268244
// cat <<EOF | clang++ -O2 -fstack-protector-strong -S -x c++ - -o - | grep stack_chk
269245
// #include <cstdint>
@@ -275,10 +251,9 @@ pub fn local_large_var_cloned(f: fn(Gigastruct)) {
275251
// EOF
276252
// ```
277253

278-
279254
// all: __stack_chk_fail
280255
// strong: __stack_chk_fail
281-
// basic-NOT: __stack_chk_fail
256+
// basic: __stack_chk_fail
282257
// none-NOT: __stack_chk_fail
283258
// missing-NOT: __stack_chk_fail
284259
}

tests/codegen/align-byval-alignment-mismatch.rs

+3-3
Original file line numberDiff line numberDiff line change
@@ -56,7 +56,7 @@ extern "C" {
5656
#[no_mangle]
5757
pub unsafe fn rust_to_c_increases_alignment(x: Align1) {
5858
// i686-linux: start:
59-
// i686-linux-NEXT: [[ALLOCA:%[0-9a-z]+]] = alloca %Align1, align 4
59+
// i686-linux-NEXT: [[ALLOCA:%[0-9a-z]+]] = alloca [48 x i8], align 4
6060
// i686-linux-NEXT: call void @llvm.memcpy.{{.+}}(ptr {{.*}}align 4 {{.*}}[[ALLOCA]], ptr {{.*}}align 1 {{.*}}%x
6161
// i686-linux-NEXT: call void @extern_c_align1({{.+}} [[ALLOCA]])
6262

@@ -90,7 +90,7 @@ pub unsafe extern "C" fn c_to_rust_decreases_alignment(x: Align1) {
9090
#[no_mangle]
9191
pub unsafe extern "C" fn c_to_rust_increases_alignment(x: Align16) {
9292
// i686-linux: start:
93-
// i686-linux-NEXT: [[ALLOCA:%[0-9a-z]+]] = alloca %Align16, align 16
93+
// i686-linux-NEXT: [[ALLOCA:%[0-9a-z]+]] = alloca [48 x i8], align 16
9494
// i686-linux-NEXT: call void @llvm.memcpy.{{.+}}(ptr {{.*}}align 16 {{.*}}[[ALLOCA]], ptr {{.*}}align 4 {{.*}}%0
9595
// i686-linux-NEXT: call void @extern_rust_align16({{.+}} [[ALLOCA]])
9696

@@ -116,7 +116,7 @@ pub unsafe extern "C" fn c_to_rust_ref_decreases_alignment(x: Align1) {
116116
#[no_mangle]
117117
pub unsafe extern "C" fn c_to_rust_ref_increases_alignment(x: Align16) {
118118
// i686-linux: start:
119-
// i686-linux-NEXT: [[ALLOCA:%[0-9a-z]+]] = alloca %Align16, align 16
119+
// i686-linux-NEXT: [[ALLOCA:%[0-9a-z]+]] = alloca [48 x i8], align 16
120120
// i686-linux-NEXT: call void @llvm.memcpy.{{.+}}(ptr {{.*}}align 16 {{.*}}[[ALLOCA]], ptr {{.*}}align 4 {{.*}}%0
121121
// i686-linux-NEXT: call void @extern_rust_ref_align16({{.+}} [[ALLOCA]])
122122

tests/codegen/align-byval.rs

+6-6
Original file line numberDiff line numberDiff line change
@@ -106,20 +106,20 @@ pub struct ForceAlign16 {
106106
pub unsafe fn call_na1(x: NaturalAlign1) {
107107
// CHECK: start:
108108

109-
// m68k: [[ALLOCA:%[a-z0-9+]]] = alloca %NaturalAlign1, align 1
109+
// m68k: [[ALLOCA:%[a-z0-9+]]] = alloca [2 x i8], align 1
110110
// m68k: call void @natural_align_1({{.*}}byval([2 x i8]) align 1{{.*}} [[ALLOCA]])
111111

112-
// wasm: [[ALLOCA:%[a-z0-9+]]] = alloca %NaturalAlign1, align 1
112+
// wasm: [[ALLOCA:%[a-z0-9+]]] = alloca [2 x i8], align 1
113113
// wasm: call void @natural_align_1({{.*}}byval([2 x i8]) align 1{{.*}} [[ALLOCA]])
114114

115115
// x86_64-linux: call void @natural_align_1(i16
116116

117117
// x86_64-windows: call void @natural_align_1(i16
118118

119-
// i686-linux: [[ALLOCA:%[a-z0-9+]]] = alloca %NaturalAlign1, align 4
119+
// i686-linux: [[ALLOCA:%[a-z0-9+]]] = alloca [2 x i8], align 4
120120
// i686-linux: call void @natural_align_1({{.*}}byval([2 x i8]) align 4{{.*}} [[ALLOCA]])
121121

122-
// i686-windows: [[ALLOCA:%[a-z0-9+]]] = alloca %NaturalAlign1, align 4
122+
// i686-windows: [[ALLOCA:%[a-z0-9+]]] = alloca [2 x i8], align 4
123123
// i686-windows: call void @natural_align_1({{.*}}byval([2 x i8]) align 4{{.*}} [[ALLOCA]])
124124
natural_align_1(x);
125125
}
@@ -134,10 +134,10 @@ pub unsafe fn call_na2(x: NaturalAlign2) {
134134
// x86_64-linux-NEXT: call void @natural_align_2
135135
// x86_64-windows-NEXT: call void @natural_align_2
136136

137-
// i686-linux: [[ALLOCA:%[0-9]+]] = alloca %NaturalAlign2, align 4
137+
// i686-linux: [[ALLOCA:%[0-9]+]] = alloca [34 x i8], align 4
138138
// i686-linux: call void @natural_align_2({{.*}}byval([34 x i8]) align 4{{.*}} [[ALLOCA]])
139139

140-
// i686-windows: [[ALLOCA:%[0-9]+]] = alloca %NaturalAlign2, align 4
140+
// i686-windows: [[ALLOCA:%[0-9]+]] = alloca [34 x i8], align 4
141141
// i686-windows: call void @natural_align_2({{.*}}byval([34 x i8]) align 4{{.*}} [[ALLOCA]])
142142
natural_align_2(x);
143143
}

tests/codegen/align-enum.rs

+2-2
Original file line numberDiff line numberDiff line change
@@ -18,7 +18,7 @@ pub struct Nested64 {
1818
// CHECK-LABEL: @align64
1919
#[no_mangle]
2020
pub fn align64(a: u32) -> Align64 {
21-
// CHECK: %a64 = alloca %Align64, align 64
21+
// CHECK: %a64 = alloca [64 x i8], align 64
2222
// CHECK: call void @llvm.memcpy.{{.*}}(ptr align 64 %{{.*}}, ptr align 64 %{{.*}}, i{{[0-9]+}} 64, i1 false)
2323
let a64 = Align64::A(a);
2424
a64
@@ -27,7 +27,7 @@ pub fn align64(a: u32) -> Align64 {
2727
// CHECK-LABEL: @nested64
2828
#[no_mangle]
2929
pub fn nested64(a: u8, b: u32, c: u16) -> Nested64 {
30-
// CHECK: %n64 = alloca %Nested64, align 64
30+
// CHECK: %n64 = alloca [128 x i8], align 64
3131
let n64 = Nested64 { a, b: Align64::B(b), c };
3232
n64
3333
}

0 commit comments

Comments
 (0)