aboutsummaryrefslogtreecommitdiff
path: root/lib/Target/PowerPC/PPCInstrInfo.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'lib/Target/PowerPC/PPCInstrInfo.cpp')
-rw-r--r--lib/Target/PowerPC/PPCInstrInfo.cpp56
1 files changed, 56 insertions, 0 deletions
diff --git a/lib/Target/PowerPC/PPCInstrInfo.cpp b/lib/Target/PowerPC/PPCInstrInfo.cpp
index d8f6015dce..594c59b257 100644
--- a/lib/Target/PowerPC/PPCInstrInfo.cpp
+++ b/lib/Target/PowerPC/PPCInstrInfo.cpp
@@ -314,6 +314,8 @@ void PPCInstrInfo::copyRegToReg(MachineBasicBlock &MBB,
BuildMI(MBB, MI, get(PPC::MCRF), DestReg).addReg(SrcReg);
} else if (DestRC == PPC::VRRCRegisterClass) {
BuildMI(MBB, MI, get(PPC::VOR), DestReg).addReg(SrcReg).addReg(SrcReg);
+ } else if (DestRC == PPC::CRBITRCRegisterClass) {
+ BuildMI(MBB, MI, get(PPC::CROR), DestReg).addReg(SrcReg).addReg(SrcReg);
} else {
cerr << "Attempt to copy register that is not GPR or FPR";
abort();
@@ -379,6 +381,35 @@ static bool StoreRegToStackSlot(const TargetInstrInfo &TII,
.addReg(PPC::R0, false, false, isKill),
FrameIdx));
}
+ } else if (RC == PPC::CRBITRCRegisterClass) {
+ // FIXME: We use CRi here because there is no mtcrf on a bit. Since the
+ // backend currently only uses CR1EQ as an individual bit, this should
+ // not cause any bug. If we need other uses of CR bits, the following
+ // code may be invalid.
+ if (SrcReg >= PPC::CR0LT || SrcReg <= PPC::CR0UN)
+ return StoreRegToStackSlot(TII, PPC::CR0, isKill, FrameIdx,
+ PPC::CRRCRegisterClass, NewMIs);
+ else if (SrcReg >= PPC::CR1LT || SrcReg <= PPC::CR1UN)
+ return StoreRegToStackSlot(TII, PPC::CR1, isKill, FrameIdx,
+ PPC::CRRCRegisterClass, NewMIs);
+ if (SrcReg >= PPC::CR2LT || SrcReg <= PPC::CR2UN)
+ return StoreRegToStackSlot(TII, PPC::CR2, isKill, FrameIdx,
+ PPC::CRRCRegisterClass, NewMIs);
+ if (SrcReg >= PPC::CR3LT || SrcReg <= PPC::CR3UN)
+ return StoreRegToStackSlot(TII, PPC::CR3, isKill, FrameIdx,
+ PPC::CRRCRegisterClass, NewMIs);
+ if (SrcReg >= PPC::CR4LT || SrcReg <= PPC::CR4UN)
+ return StoreRegToStackSlot(TII, PPC::CR4, isKill, FrameIdx,
+ PPC::CRRCRegisterClass, NewMIs);
+ if (SrcReg >= PPC::CR5LT || SrcReg <= PPC::CR5UN)
+ return StoreRegToStackSlot(TII, PPC::CR5, isKill, FrameIdx,
+ PPC::CRRCRegisterClass, NewMIs);
+ if (SrcReg >= PPC::CR6LT || SrcReg <= PPC::CR6UN)
+ return StoreRegToStackSlot(TII, PPC::CR6, isKill, FrameIdx,
+ PPC::CRRCRegisterClass, NewMIs);
+ if (SrcReg >= PPC::CR7LT || SrcReg <= PPC::CR7UN)
+ return StoreRegToStackSlot(TII, PPC::CR7, isKill, FrameIdx,
+ PPC::CRRCRegisterClass, NewMIs);
} else if (RC == PPC::VRRCRegisterClass) {
// We don't have indexed addressing for vector loads. Emit:
// R0 = ADDI FI#
@@ -501,6 +532,31 @@ static void LoadRegFromStackSlot(const TargetInstrInfo &TII,
}
NewMIs.push_back(BuildMI(TII.get(PPC::MTCRF), DestReg).addReg(PPC::R0));
+ } else if (RC == PPC::CRBITRCRegisterClass) {
+ if (DestReg >= PPC::CR0LT || DestReg <= PPC::CR0UN)
+ return LoadRegFromStackSlot(TII, PPC::CR0, FrameIdx,
+ PPC::CRRCRegisterClass, NewMIs);
+ else if (DestReg >= PPC::CR1LT || DestReg <= PPC::CR1UN)
+ return LoadRegFromStackSlot(TII, PPC::CR1, FrameIdx,
+ PPC::CRRCRegisterClass, NewMIs);
+ if (DestReg >= PPC::CR2LT || DestReg <= PPC::CR2UN)
+ return LoadRegFromStackSlot(TII, PPC::CR2, FrameIdx,
+ PPC::CRRCRegisterClass, NewMIs);
+ if (DestReg >= PPC::CR3LT || DestReg <= PPC::CR3UN)
+ return LoadRegFromStackSlot(TII, PPC::CR3, FrameIdx,
+ PPC::CRRCRegisterClass, NewMIs);
+ if (DestReg >= PPC::CR4LT || DestReg <= PPC::CR4UN)
+ return LoadRegFromStackSlot(TII, PPC::CR4, FrameIdx,
+ PPC::CRRCRegisterClass, NewMIs);
+ if (DestReg >= PPC::CR5LT || DestReg <= PPC::CR5UN)
+ return LoadRegFromStackSlot(TII, PPC::CR5, FrameIdx,
+ PPC::CRRCRegisterClass, NewMIs);
+ if (DestReg >= PPC::CR6LT || DestReg <= PPC::CR6UN)
+ return LoadRegFromStackSlot(TII, PPC::CR6, FrameIdx,
+ PPC::CRRCRegisterClass, NewMIs);
+ if (DestReg >= PPC::CR7LT || DestReg <= PPC::CR7UN)
+ return LoadRegFromStackSlot(TII, PPC::CR7, FrameIdx,
+ PPC::CRRCRegisterClass, NewMIs);
} else if (RC == PPC::VRRCRegisterClass) {
// We don't have indexed addressing for vector loads. Emit:
// R0 = ADDI FI#