aboutsummaryrefslogtreecommitdiff
path: root/lib/Target/ARM/ARMLoadStoreOptimizer.cpp
diff options
context:
space:
mode:
authorBob Wilson <bob.wilson@apple.com>2010-03-13 01:08:20 +0000
committerBob Wilson <bob.wilson@apple.com>2010-03-13 01:08:20 +0000
commit815baebe1c8dc02accf128ae10dff9a1742d3244 (patch)
treee33fba11b18d0987b5512337f77c9e8334d889c9 /lib/Target/ARM/ARMLoadStoreOptimizer.cpp
parentafc33fa6d5f16089e7747ad761ae2d498ce43e90 (diff)
Change ARM ld/st multiple instructions to have variant instructions for
writebacks to the address register. This gets rid of the hack that the first register on the list was the magic writeback register operand. There was an implicit constraint that if that operand was not reg0 it had to match the base register operand. The post-RA scheduler's antidependency breaker did not understand that constraint and sometimes changed one without the other. This also fixes Radar 7495976 and should help the verifier work better for ARM code. There are now new ld/st instructions explicit writeback operands and explicit constraints that tie those registers together. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@98409 91177308-0d34-0410-b5e6-96231b3b80d8
Diffstat (limited to 'lib/Target/ARM/ARMLoadStoreOptimizer.cpp')
-rw-r--r--lib/Target/ARM/ARMLoadStoreOptimizer.cpp183
1 files changed, 99 insertions, 84 deletions
diff --git a/lib/Target/ARM/ARMLoadStoreOptimizer.cpp b/lib/Target/ARM/ARMLoadStoreOptimizer.cpp
index 808999c3b3..77dd466761 100644
--- a/lib/Target/ARM/ARMLoadStoreOptimizer.cpp
+++ b/lib/Target/ARM/ARMLoadStoreOptimizer.cpp
@@ -254,7 +254,6 @@ ARMLoadStoreOpt::MergeOps(MachineBasicBlock &MBB,
.addReg(Base, getKillRegState(BaseKill))
.addImm(ARM_AM::getAM5Opc(Mode, false, isDPR ? NumRegs<<1 : NumRegs))
.addImm(Pred).addReg(PredReg);
- MIB.addReg(0); // Add optional writeback (0 for now).
for (unsigned i = 0; i != NumRegs; ++i)
MIB = MIB.addReg(Regs[i].first, getDefRegState(isDef)
| getKillRegState(Regs[i].second));
@@ -443,7 +442,7 @@ static inline unsigned getLSMultipleTransferSize(MachineInstr *MI) {
case ARM::STM:
case ARM::t2LDM:
case ARM::t2STM:
- return (MI->getNumOperands() - 5) * 4;
+ return (MI->getNumOperands() - 4) * 4;
case ARM::VLDMS:
case ARM::VSTMS:
case ARM::VLDMD:
@@ -452,6 +451,21 @@ static inline unsigned getLSMultipleTransferSize(MachineInstr *MI) {
}
}
+static unsigned getUpdatingLSMultipleOpcode(unsigned Opc) {
+ switch (Opc) {
+ case ARM::LDM: return ARM::LDM_UPD;
+ case ARM::STM: return ARM::STM_UPD;
+ case ARM::t2LDM: return ARM::t2LDM_UPD;
+ case ARM::t2STM: return ARM::t2STM_UPD;
+ case ARM::VLDMS: return ARM::VLDMS_UPD;
+ case ARM::VLDMD: return ARM::VLDMD_UPD;
+ case ARM::VSTMS: return ARM::VSTMS_UPD;
+ case ARM::VSTMD: return ARM::VSTMD_UPD;
+ default: llvm_unreachable("Unhandled opcode!");
+ }
+ return 0;
+}
+
/// MergeBaseUpdateLSMultiple - Fold proceeding/trailing inc/dec of base
/// register into the LDM/STM/VLDM{D|S}/VSTM{D|S} op when possible:
///
@@ -470,117 +484,119 @@ bool ARMLoadStoreOpt::MergeBaseUpdateLSMultiple(MachineBasicBlock &MBB,
MachineBasicBlock::iterator &I) {
MachineInstr *MI = MBBI;
unsigned Base = MI->getOperand(0).getReg();
+ bool BaseKill = MI->getOperand(0).isKill();
unsigned Bytes = getLSMultipleTransferSize(MI);
unsigned PredReg = 0;
ARMCC::CondCodes Pred = llvm::getInstrPredicate(MI, PredReg);
int Opcode = MI->getOpcode();
+ DebugLoc dl = MI->getDebugLoc();
bool isAM4 = (Opcode == ARM::LDM || Opcode == ARM::t2LDM ||
Opcode == ARM::STM || Opcode == ARM::t2STM);
- if (isAM4) {
- if (ARM_AM::getAM4WBFlag(MI->getOperand(1).getImm()))
- return false;
+ bool DoMerge = false;
+ ARM_AM::AMSubMode Mode = ARM_AM::ia;
+ unsigned Offset = 0;
- // Can't use the updating AM4 sub-mode if the base register is also a dest
+ if (isAM4) {
+ // Can't use an updating ld/st if the base register is also a dest
// register. e.g. ldmdb r0!, {r0, r1, r2}. The behavior is undefined.
for (unsigned i = 3, e = MI->getNumOperands(); i != e; ++i) {
if (MI->getOperand(i).getReg() == Base)
return false;
}
+ assert(!ARM_AM::getAM4WBFlag(MI->getOperand(1).getImm()));
+ Mode = ARM_AM::getAM4SubMode(MI->getOperand(1).getImm());
+ } else {
+ // VLDM{D|S}, VSTM{D|S} addressing mode 5 ops.
+ assert(!ARM_AM::getAM5WBFlag(MI->getOperand(1).getImm()));
+ Mode = ARM_AM::getAM5SubMode(MI->getOperand(1).getImm());
+ Offset = ARM_AM::getAM5Offset(MI->getOperand(1).getImm());
+ }
- ARM_AM::AMSubMode Mode = ARM_AM::getAM4SubMode(MI->getOperand(1).getImm());
- if (MBBI != MBB.begin()) {
- MachineBasicBlock::iterator PrevMBBI = prior(MBBI);
+ // Try merging with the previous instruction.
+ if (MBBI != MBB.begin()) {
+ MachineBasicBlock::iterator PrevMBBI = prior(MBBI);
+ if (isAM4) {
if (Mode == ARM_AM::ia &&
isMatchingDecrement(PrevMBBI, Base, Bytes, 0, Pred, PredReg)) {
- MI->getOperand(1).setImm(ARM_AM::getAM4ModeImm(ARM_AM::db, true));
- MI->getOperand(4).setReg(Base);
- MI->getOperand(4).setIsDef();
- MBB.erase(PrevMBBI);
- return true;
- } else if (Mode == ARM_AM::ib &&
+ DoMerge = true;
+ Mode = ARM_AM::db;
+ } else if (isAM4 && Mode == ARM_AM::ib &&
isMatchingDecrement(PrevMBBI, Base, Bytes, 0, Pred, PredReg)) {
- MI->getOperand(1).setImm(ARM_AM::getAM4ModeImm(ARM_AM::da, true));
- MI->getOperand(4).setReg(Base); // WB to base
- MI->getOperand(4).setIsDef();
- MBB.erase(PrevMBBI);
- return true;
+ DoMerge = true;
+ Mode = ARM_AM::da;
+ }
+ } else {
+ if (Mode == ARM_AM::ia &&
+ isMatchingDecrement(PrevMBBI, Base, Bytes, 0, Pred, PredReg)) {
+ Mode = ARM_AM::db;
+ DoMerge = true;
}
}
+ if (DoMerge)
+ MBB.erase(PrevMBBI);
+ }
- if (MBBI != MBB.end()) {
- MachineBasicBlock::iterator NextMBBI = llvm::next(MBBI);
+ // Try merging with the next instruction.
+ if (!DoMerge && MBBI != MBB.end()) {
+ MachineBasicBlock::iterator NextMBBI = llvm::next(MBBI);
+ if (isAM4) {
if ((Mode == ARM_AM::ia || Mode == ARM_AM::ib) &&
isMatchingIncrement(NextMBBI, Base, Bytes, 0, Pred, PredReg)) {
- MI->getOperand(1).setImm(ARM_AM::getAM4ModeImm(Mode, true));
- MI->getOperand(4).setReg(Base); // WB to base
- MI->getOperand(4).setIsDef();
- if (NextMBBI == I) {
- Advance = true;
- ++I;
- }
- MBB.erase(NextMBBI);
- return true;
+ DoMerge = true;
} else if ((Mode == ARM_AM::da || Mode == ARM_AM::db) &&
isMatchingDecrement(NextMBBI, Base, Bytes, 0, Pred, PredReg)) {
- MI->getOperand(1).setImm(ARM_AM::getAM4ModeImm(Mode, true));
- MI->getOperand(4).setReg(Base); // WB to base
- MI->getOperand(4).setIsDef();
- if (NextMBBI == I) {
- Advance = true;
- ++I;
- }
- MBB.erase(NextMBBI);
- return true;
+ DoMerge = true;
}
- }
- } else {
- // VLDM{D|S}, VSTM{D|S} addressing mode 5 ops.
- if (ARM_AM::getAM5WBFlag(MI->getOperand(1).getImm()))
- return false;
-
- ARM_AM::AMSubMode Mode = ARM_AM::getAM5SubMode(MI->getOperand(1).getImm());
- unsigned Offset = ARM_AM::getAM5Offset(MI->getOperand(1).getImm());
- if (MBBI != MBB.begin()) {
- MachineBasicBlock::iterator PrevMBBI = prior(MBBI);
+ } else {
if (Mode == ARM_AM::ia &&
- isMatchingDecrement(PrevMBBI, Base, Bytes, 0, Pred, PredReg)) {
- MI->getOperand(1).setImm(ARM_AM::getAM5Opc(ARM_AM::db, true, Offset));
- MI->getOperand(4).setReg(Base); // WB to base
- MI->getOperand(4).setIsDef();
- MBB.erase(PrevMBBI);
- return true;
+ isMatchingIncrement(NextMBBI, Base, Bytes, 0, Pred, PredReg)) {
+ DoMerge = true;
}
}
-
- if (MBBI != MBB.end()) {
- MachineBasicBlock::iterator NextMBBI = llvm::next(MBBI);
- if (Mode == ARM_AM::ia &&
- isMatchingIncrement(NextMBBI, Base, Bytes, 0, Pred, PredReg)) {
- MI->getOperand(1).setImm(ARM_AM::getAM5Opc(ARM_AM::ia, true, Offset));
- MI->getOperand(4).setReg(Base); // WB to base
- MI->getOperand(4).setIsDef();
- if (NextMBBI == I) {
- Advance = true;
- ++I;
- }
- MBB.erase(NextMBBI);
+ if (DoMerge) {
+ if (NextMBBI == I) {
+ Advance = true;
+ ++I;
}
- return true;
+ MBB.erase(NextMBBI);
}
}
- return false;
+ if (!DoMerge)
+ return false;
+
+ unsigned NewOpc = getUpdatingLSMultipleOpcode(Opcode);
+ MachineInstrBuilder MIB = BuildMI(MBB, MBBI, dl, TII->get(NewOpc))
+ .addReg(Base, getDefRegState(true)) // WB base register
+ .addReg(Base, getKillRegState(BaseKill));
+ if (isAM4) {
+ // [t2]LDM_UPD, [t2]STM_UPD
+ MIB.addImm(ARM_AM::getAM4ModeImm(Mode, true))
+ .addImm(Pred).addReg(PredReg);
+ } else {
+ // VLDM[SD}_UPD, VSTM[SD]_UPD
+ MIB.addImm(ARM_AM::getAM5Opc(Mode, true, Offset))
+ .addImm(Pred).addReg(PredReg);
+ }
+ // Transfer the rest of operands.
+ for (unsigned OpNum = 4, e = MI->getNumOperands(); OpNum != e; ++OpNum)
+ MIB.addOperand(MI->getOperand(OpNum));
+ // Transfer memoperands.
+ (*MIB).setMemRefs(MI->memoperands_begin(), MI->memoperands_end());
+
+ MBB.erase(MBBI);
+ return true;
}
static unsigned getPreIndexedLoadStoreOpcode(unsigned Opc) {
switch (Opc) {
case ARM::LDR: return ARM::LDR_PRE;
case ARM::STR: return ARM::STR_PRE;
- case ARM::VLDRS: return ARM::VLDMS;
- case ARM::VLDRD: return ARM::VLDMD;
- case ARM::VSTRS: return ARM::VSTMS;
- case ARM::VSTRD: return ARM::VSTMD;
+ case ARM::VLDRS: return ARM::VLDMS_UPD;
+ case ARM::VLDRD: return ARM::VLDMD_UPD;
+ case ARM::VSTRS: return ARM::VSTMS_UPD;
+ case ARM::VSTRD: return ARM::VSTMD_UPD;
case ARM::t2LDRi8:
case ARM::t2LDRi12:
return ARM::t2LDR_PRE;
@@ -596,10 +612,10 @@ static unsigned getPostIndexedLoadStoreOpcode(unsigned Opc) {
switch (Opc) {
case ARM::LDR: return ARM::LDR_POST;
case ARM::STR: return ARM::STR_POST;
- case ARM::VLDRS: return ARM::VLDMS;
- case ARM::VLDRD: return ARM::VLDMD;
- case ARM::VSTRS: return ARM::VSTMS;
- case ARM::VSTRD: return ARM::VSTMD;
+ case ARM::VLDRS: return ARM::VLDMS_UPD;
+ case ARM::VLDRD: return ARM::VLDMD_UPD;
+ case ARM::VSTRS: return ARM::VSTMS_UPD;
+ case ARM::VSTRD: return ARM::VSTMD_UPD;
case ARM::t2LDRi8:
case ARM::t2LDRi12:
return ARM::t2LDR_POST;
@@ -699,13 +715,13 @@ bool ARMLoadStoreOpt::MergeBaseUpdateLoadStore(MachineBasicBlock &MBB,
Offset = AddSub == ARM_AM::sub ? -Bytes : Bytes;
if (isAM5) {
- // VLDM[SD}, VSTM[SD]
+ // VLDM[SD}_UPD, VSTM[SD]_UPD
MachineOperand &MO = MI->getOperand(0);
BuildMI(MBB, MBBI, dl, TII->get(NewOpc))
+ .addReg(Base, getDefRegState(true)) // WB base register
.addReg(Base, getKillRegState(isLd ? BaseKill : false))
.addImm(Offset)
.addImm(Pred).addReg(PredReg)
- .addReg(Base, getDefRegState(true)) // WB base register
.addReg(MO.getReg(), (isLd ? getDefRegState(true) :
getKillRegState(MO.isKill())));
} else if (isLd) {
@@ -909,7 +925,6 @@ bool ARMLoadStoreOpt::FixInvalidRegPairOp(MachineBasicBlock &MBB,
.addReg(BaseReg, getKillRegState(BaseKill))
.addImm(ARM_AM::getAM4ModeImm(ARM_AM::ia))
.addImm(Pred).addReg(PredReg)
- .addReg(0)
.addReg(EvenReg, getDefRegState(isLd) | getDeadRegState(EvenDeadKill))
.addReg(OddReg, getDefRegState(isLd) | getDeadRegState(OddDeadKill));
++NumLDRD2LDM;
@@ -918,7 +933,6 @@ bool ARMLoadStoreOpt::FixInvalidRegPairOp(MachineBasicBlock &MBB,
.addReg(BaseReg, getKillRegState(BaseKill))
.addImm(ARM_AM::getAM4ModeImm(ARM_AM::ia))
.addImm(Pred).addReg(PredReg)
- .addReg(0)
.addReg(EvenReg,
getKillRegState(EvenDeadKill) | getUndefRegState(EvenUndef))
.addReg(OddReg,
@@ -1156,7 +1170,8 @@ bool ARMLoadStoreOpt::MergeReturnIntoLDM(MachineBasicBlock &MBB) {
if (MBBI != MBB.begin() &&
(MBBI->getOpcode() == ARM::BX_RET || MBBI->getOpcode() == ARM::tBX_RET)) {
MachineInstr *PrevMI = prior(MBBI);
- if (PrevMI->getOpcode() == ARM::LDM || PrevMI->getOpcode() == ARM::t2LDM) {
+ if (PrevMI->getOpcode() == ARM::LDM_UPD ||
+ PrevMI->getOpcode() == ARM::t2LDM_UPD) {
MachineOperand &MO = PrevMI->getOperand(PrevMI->getNumOperands()-1);
if (MO.getReg() != ARM::LR)
return false;