aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorRafael Espindola <rafael.espindola@gmail.com>2011-10-26 21:16:41 +0000
committerRafael Espindola <rafael.espindola@gmail.com>2011-10-26 21:16:41 +0000
commit66bf7430f5586188944e45514abbb04fe3383107 (patch)
treed52594a39bb7362ee28be0c9b59811fb2c006039
parent758a519a22b469ce8e2b8d0bf7a72813e87710d4 (diff)
Fixes an issue reported by -verify-machineinstrs.
Patch by Sanjoy Das. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@143064 91177308-0d34-0410-b5e6-96231b3b80d8
-rw-r--r--lib/Target/X86/X86ISelLowering.cpp9
-rw-r--r--lib/Target/X86/X86InstrCompiler.td4
-rw-r--r--test/CodeGen/X86/segmented-stacks.ll15
3 files changed, 17 insertions, 11 deletions
diff --git a/lib/Target/X86/X86ISelLowering.cpp b/lib/Target/X86/X86ISelLowering.cpp
index 7dec7c4e33..ce2ef92802 100644
--- a/lib/Target/X86/X86ISelLowering.cpp
+++ b/lib/Target/X86/X86ISelLowering.cpp
@@ -11784,6 +11784,7 @@ X86TargetLowering::EmitLoweredSegAlloca(MachineInstr *MI, MachineBasicBlock *BB,
unsigned mallocPtrVReg = MRI.createVirtualRegister(AddrRegClass),
bumpSPPtrVReg = MRI.createVirtualRegister(AddrRegClass),
tmpSPVReg = MRI.createVirtualRegister(AddrRegClass),
+ SPLimitVReg = MRI.createVirtualRegister(AddrRegClass),
sizeVReg = MI->getOperand(1).getReg(),
physSPReg = Is64Bit ? X86::RSP : X86::ESP;
@@ -11801,19 +11802,19 @@ X86TargetLowering::EmitLoweredSegAlloca(MachineInstr *MI, MachineBasicBlock *BB,
// Add code to the main basic block to check if the stack limit has been hit,
// and if so, jump to mallocMBB otherwise to bumpMBB.
BuildMI(BB, DL, TII->get(TargetOpcode::COPY), tmpSPVReg).addReg(physSPReg);
- BuildMI(BB, DL, TII->get(Is64Bit ? X86::SUB64rr:X86::SUB32rr), tmpSPVReg)
+ BuildMI(BB, DL, TII->get(Is64Bit ? X86::SUB64rr:X86::SUB32rr), SPLimitVReg)
.addReg(tmpSPVReg).addReg(sizeVReg);
BuildMI(BB, DL, TII->get(Is64Bit ? X86::CMP64mr:X86::CMP32mr))
.addReg(0).addImm(0).addReg(0).addImm(TlsOffset).addReg(TlsReg)
- .addReg(tmpSPVReg);
+ .addReg(SPLimitVReg);
BuildMI(BB, DL, TII->get(X86::JG_4)).addMBB(mallocMBB);
// bumpMBB simply decreases the stack pointer, since we know the current
// stacklet has enough space.
BuildMI(bumpMBB, DL, TII->get(TargetOpcode::COPY), physSPReg)
- .addReg(tmpSPVReg);
+ .addReg(SPLimitVReg);
BuildMI(bumpMBB, DL, TII->get(TargetOpcode::COPY), bumpSPPtrVReg)
- .addReg(tmpSPVReg);
+ .addReg(SPLimitVReg);
BuildMI(bumpMBB, DL, TII->get(X86::JMP_4)).addMBB(continueMBB);
// Calls into a routine in libgcc to allocate more space from the heap.
diff --git a/lib/Target/X86/X86InstrCompiler.td b/lib/Target/X86/X86InstrCompiler.td
index 5f9bf16507..e0cf669bee 100644
--- a/lib/Target/X86/X86InstrCompiler.td
+++ b/lib/Target/X86/X86InstrCompiler.td
@@ -112,14 +112,14 @@ let Defs = [EAX, ESP, EFLAGS], Uses = [ESP] in
// allocated by bumping the stack pointer. Otherwise memory is allocated from
// the heap.
-let Defs = [EAX, ESP, EFLAGS], Uses = [ESP, EAX] in
+let Defs = [EAX, ESP, EFLAGS], Uses = [ESP] in
def SEG_ALLOCA_32 : I<0, Pseudo, (outs GR32:$dst), (ins GR32:$size),
"# variable sized alloca for segmented stacks",
[(set GR32:$dst,
(X86SegAlloca GR32:$size))]>,
Requires<[In32BitMode]>;
-let Defs = [RAX, RSP, EFLAGS], Uses = [RSP, RAX] in
+let Defs = [RAX, RSP, EFLAGS], Uses = [RSP] in
def SEG_ALLOCA_64 : I<0, Pseudo, (outs GR64:$dst), (ins GR64:$size),
"# variable sized alloca for segmented stacks",
[(set GR64:$dst,
diff --git a/test/CodeGen/X86/segmented-stacks.ll b/test/CodeGen/X86/segmented-stacks.ll
index 3f0067e292..4871daaa01 100644
--- a/test/CodeGen/X86/segmented-stacks.ll
+++ b/test/CodeGen/X86/segmented-stacks.ll
@@ -30,6 +30,10 @@ false:
; X32-NEXT: addl $8, %esp
; X32-NEXT: ret
+; X32: movl %esp, %eax
+; X32-NEXT: subl %ecx, %eax
+; X32-NEXT: cmpl %eax, %gs:48
+
; X32: movl %eax, %esp
; X32: subl $12, %esp
@@ -47,14 +51,15 @@ false:
; X64-NEXT: callq __morestack
; X64-NEXT: ret
-; X64: movq %rsp, %rax
-; X64-NEXT: subq %rcx, %rax
-; X64-NEXT: cmpq %rax, %fs:112
+; X64: movq %rsp, %rdi
+; X64-NEXT: subq %rax, %rdi
+; X64-NEXT: cmpq %rdi, %fs:112
-; X64: movq %rax, %rsp
+; X64: movq %rdi, %rsp
-; X64: movq %rcx, %rdi
+; X64: movq %rax, %rdi
; X64-NEXT: callq __morestack_allocate_stack_space
+; X64-NEXT: movq %rax, %rdi
}