aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--lib/Target/Mips/MipsAsmPrinter.cpp17
-rw-r--r--lib/Target/Mips/MipsISelLowering.cpp11
-rw-r--r--lib/Target/Mips/MipsISelLowering.h2
-rw-r--r--lib/Target/Mips/MipsInstrInfo.td7
-rw-r--r--test/CodeGen/Mips/unalignedload.ll16
5 files changed, 51 insertions, 2 deletions
diff --git a/lib/Target/Mips/MipsAsmPrinter.cpp b/lib/Target/Mips/MipsAsmPrinter.cpp
index 049a939340..4013d6c110 100644
--- a/lib/Target/Mips/MipsAsmPrinter.cpp
+++ b/lib/Target/Mips/MipsAsmPrinter.cpp
@@ -25,6 +25,7 @@
#include "llvm/CodeGen/MachineConstantPool.h"
#include "llvm/CodeGen/MachineFrameInfo.h"
#include "llvm/CodeGen/MachineInstr.h"
+#include "llvm/CodeGen/MachineMemOperand.h"
#include "llvm/MC/MCStreamer.h"
#include "llvm/MC/MCAsmInfo.h"
#include "llvm/MC/MCInst.h"
@@ -54,6 +55,22 @@ void MipsAsmPrinter::EmitInstruction(const MachineInstr *MI) {
MipsMCInstLower MCInstLowering(Mang, *MF, *this);
MCInst TmpInst0;
MCInstLowering.Lower(MI, TmpInst0);
+ unsigned Opc = MI->getOpcode();
+
+ // Convert aligned loads/stores to their unaligned counterparts.
+ // FIXME: expand other unaligned memory accesses too.
+ if ((Opc == Mips::LW || Opc == Mips::SW) && !MI->memoperands_empty() &&
+ (*MI->memoperands_begin())->getAlignment() < 4) {
+ MCInst Directive;
+ Directive.setOpcode(Mips::MACRO);
+ OutStreamer.EmitInstruction(Directive);
+ TmpInst0.setOpcode(Opc == Mips::LW ? Mips::ULW : Mips::USW);
+ OutStreamer.EmitInstruction(TmpInst0);
+ Directive.setOpcode(Mips::NOMACRO);
+ OutStreamer.EmitInstruction(Directive);
+ return;
+ }
+
OutStreamer.EmitInstruction(TmpInst0);
}
diff --git a/lib/Target/Mips/MipsISelLowering.cpp b/lib/Target/Mips/MipsISelLowering.cpp
index 54fa2d40b0..28ab854c38 100644
--- a/lib/Target/Mips/MipsISelLowering.cpp
+++ b/lib/Target/Mips/MipsISelLowering.cpp
@@ -195,6 +195,11 @@ MipsTargetLowering(MipsTargetMachine &TM)
setExceptionSelectorRegister(Mips::A1);
}
+bool MipsTargetLowering::allowsUnalignedMemoryAccesses(EVT VT) const {
+ // FIXME: allow unaligned memory accesses for other types too.
+ return VT.getSimpleVT().SimpleTy == MVT::i32;
+}
+
MVT::SimpleValueType MipsTargetLowering::getSetCCResultType(EVT VT) const {
return MVT::i32;
}
@@ -1685,6 +1690,7 @@ WriteByValArg(SDValue& Chain, DebugLoc dl,
unsigned NumWords = (Flags.getByValSize() + 3) / 4;
unsigned LastWord = FirstWord + NumWords;
unsigned CurWord;
+ unsigned ByValAlign = Flags.getByValAlign();
// copy the first 4 words of byval arg to registers A0 - A3
for (CurWord = FirstWord; CurWord < std::min(LastWord, O32IntRegsSize);
@@ -1694,7 +1700,8 @@ WriteByValArg(SDValue& Chain, DebugLoc dl,
MVT::i32));
SDValue LoadVal = DAG.getLoad(MVT::i32, dl, Chain, LoadPtr,
MachinePointerInfo(),
- false, false, 0);
+ false, false, std::min(ByValAlign,
+ (unsigned )4));
MemOpChains.push_back(LoadVal.getValue(1));
unsigned DstReg = O32IntRegs[CurWord];
RegsToPass.push_back(std::make_pair(DstReg, LoadVal));
@@ -1710,7 +1717,7 @@ WriteByValArg(SDValue& Chain, DebugLoc dl,
SDValue Dst = DAG.getFrameIndex(LastFI, PtrType);
Chain = DAG.getMemcpy(Chain, dl, Dst, Src,
DAG.getConstant(SizeInBytes, MVT::i32),
- /*Align*/4,
+ /*Align*/ByValAlign,
/*isVolatile=*/false, /*AlwaysInline=*/false,
MachinePointerInfo(0), MachinePointerInfo(0));
MemOpChains.push_back(Chain);
diff --git a/lib/Target/Mips/MipsISelLowering.h b/lib/Target/Mips/MipsISelLowering.h
index cfb96ee7f6..2d8661987a 100644
--- a/lib/Target/Mips/MipsISelLowering.h
+++ b/lib/Target/Mips/MipsISelLowering.h
@@ -95,6 +95,8 @@ namespace llvm {
public:
explicit MipsTargetLowering(MipsTargetMachine &TM);
+ virtual bool allowsUnalignedMemoryAccesses (EVT VT) const;
+
/// LowerOperation - Provide custom lowering hooks for some operations.
virtual SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const;
diff --git a/lib/Target/Mips/MipsInstrInfo.td b/lib/Target/Mips/MipsInstrInfo.td
index d43961e9c7..0680c36b6f 100644
--- a/lib/Target/Mips/MipsInstrInfo.td
+++ b/lib/Target/Mips/MipsInstrInfo.td
@@ -474,6 +474,13 @@ let usesCustomInserter = 1 in {
def ATOMIC_CMP_SWAP_I32 : AtomicCmpSwap<atomic_cmp_swap_32, "32">;
}
+// Unaligned memory load and store.
+// Replaces LW or SW during MCInstLowering if memory access is unaligned.
+def ULW :
+ MipsPseudo<(outs CPURegs:$dst), (ins mem:$addr), "ulw\t$dst, $addr", []>;
+def USW :
+ MipsPseudo<(outs), (ins CPURegs:$dst, mem:$addr), "usw\t$dst, $addr", []>;
+
//===----------------------------------------------------------------------===//
// Instruction definition
//===----------------------------------------------------------------------===//
diff --git a/test/CodeGen/Mips/unalignedload.ll b/test/CodeGen/Mips/unalignedload.ll
new file mode 100644
index 0000000000..072f0574e7
--- /dev/null
+++ b/test/CodeGen/Mips/unalignedload.ll
@@ -0,0 +1,16 @@
+; RUN: llc -march=mips < %s | FileCheck %s
+
+%struct.S2 = type { %struct.S1, %struct.S1 }
+%struct.S1 = type { i8, i8 }
+
+@s2 = common global %struct.S2 zeroinitializer, align 1
+
+define void @foo1() nounwind {
+entry:
+; CHECK: ulw ${{[0-9]+}}, 2
+
+ tail call void @foo2(%struct.S1* byval getelementptr inbounds (%struct.S2* @s2, i32 0, i32 1)) nounwind
+ ret void
+}
+
+declare void @foo2(%struct.S1* byval)