aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorTim Northover <Tim.Northover@arm.com>2013-02-05 13:24:56 +0000
committerTim Northover <Tim.Northover@arm.com>2013-02-05 13:24:56 +0000
commitdfe076af9879eb68a7b8331f9c02eecf563d85be (patch)
treee1c1993543cc51da36b9cfc99ca0e7104a28ef33
parent19254c49a8752fe8c6fa648a6eb29f20a1f62c8b (diff)
Fix formatting in AArch64 backend.
This should fix three purely whitespace issues: + 80 column violations. + Tab characters. + TableGen brace placement. No functional changes. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@174370 91177308-0d34-0410-b5e6-96231b3b80d8
-rw-r--r--lib/Target/AArch64/AArch64.td2
-rw-r--r--lib/Target/AArch64/AArch64AsmPrinter.cpp6
-rw-r--r--lib/Target/AArch64/AArch64ConstantIslandPass.cpp9
-rw-r--r--lib/Target/AArch64/AArch64FrameLowering.cpp12
-rw-r--r--lib/Target/AArch64/AArch64FrameLowering.h2
-rw-r--r--lib/Target/AArch64/AArch64ISelDAGToDAG.cpp9
-rw-r--r--lib/Target/AArch64/AArch64ISelLowering.cpp58
-rw-r--r--lib/Target/AArch64/AArch64ISelLowering.h4
-rw-r--r--lib/Target/AArch64/AArch64InstrFormats.td159
-rw-r--r--lib/Target/AArch64/AArch64InstrInfo.cpp6
-rw-r--r--lib/Target/AArch64/AArch64InstrInfo.td943
-rw-r--r--lib/Target/AArch64/AArch64MachineFunctionInfo.h4
-rw-r--r--lib/Target/AArch64/AArch64RegisterInfo.cpp8
-rw-r--r--lib/Target/AArch64/AArch64RegisterInfo.td6
-rw-r--r--lib/Target/AArch64/AArch64SelectionDAGInfo.h2
-rw-r--r--lib/Target/AArch64/AArch64TargetObjectFile.cpp2
-rw-r--r--lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp12
-rw-r--r--lib/Target/AArch64/Disassembler/AArch64Disassembler.cpp28
-rw-r--r--lib/Target/AArch64/InstPrinter/AArch64InstPrinter.h2
-rw-r--r--lib/Target/AArch64/MCTargetDesc/AArch64AsmBackend.cpp134
-rw-r--r--lib/Target/AArch64/MCTargetDesc/AArch64MCCodeEmitter.cpp2
-rw-r--r--lib/Target/AArch64/MCTargetDesc/AArch64MCExpr.h3
-rw-r--r--lib/Target/AArch64/TargetInfo/AArch64TargetInfo.cpp2
-rw-r--r--lib/Target/AArch64/Utils/AArch64BaseInfo.cpp3
24 files changed, 577 insertions, 841 deletions
diff --git a/lib/Target/AArch64/AArch64.td b/lib/Target/AArch64/AArch64.td
index 750fec7931..0e4f5fb1c5 100644
--- a/lib/Target/AArch64/AArch64.td
+++ b/lib/Target/AArch64/AArch64.td
@@ -1,4 +1,4 @@
-//===- AArch64.td - Describe the AArch64 Target Machine ---------*- tblgen -*-==//
+//===- AArch64.td - Describe the AArch64 Target Machine -------*- tblgen -*-==//
//
// The LLVM Compiler Infrastructure
//
diff --git a/lib/Target/AArch64/AArch64AsmPrinter.cpp b/lib/Target/AArch64/AArch64AsmPrinter.cpp
index 63cc88f815..61839b6ba8 100644
--- a/lib/Target/AArch64/AArch64AsmPrinter.cpp
+++ b/lib/Target/AArch64/AArch64AsmPrinter.cpp
@@ -34,7 +34,8 @@ AArch64AsmPrinter::getDebugValueLocation(const MachineInstr *MI) const {
// expected to be created.
assert(MI->getNumOperands() == 4 && MI->getOperand(0).isReg()
&& MI->getOperand(1).isImm() && "unexpected custom DBG_VALUE");
- return MachineLocation(MI->getOperand(0).getReg(), MI->getOperand(1).getImm());
+ return MachineLocation(MI->getOperand(0).getReg(),
+ MI->getOperand(1).getImm());
}
/// Try to print a floating-point register as if it belonged to a specified
@@ -90,7 +91,8 @@ bool AArch64AsmPrinter::printSymbolicAddress(const MachineOperand &MO,
StringRef Name;
StringRef Modifier;
switch (MO.getType()) {
- default: llvm_unreachable("Unexpected operand for symbolic address constraint");
+ default:
+ llvm_unreachable("Unexpected operand for symbolic address constraint");
case MachineOperand::MO_GlobalAddress:
Name = Mang->getSymbol(MO.getGlobal())->getName();
diff --git a/lib/Target/AArch64/AArch64ConstantIslandPass.cpp b/lib/Target/AArch64/AArch64ConstantIslandPass.cpp
index f5e5c640cd..ab482bda6a 100644
--- a/lib/Target/AArch64/AArch64ConstantIslandPass.cpp
+++ b/lib/Target/AArch64/AArch64ConstantIslandPass.cpp
@@ -46,7 +46,8 @@ STATISTIC(NumCBrFixed, "Number of cond branches fixed");
// FIXME: This option should be removed once it has received sufficient testing.
static cl::opt<bool>
AlignConstantIslands("aarch64-align-constant-islands", cl::Hidden,
- cl::init(true), cl::desc("Align constant islands in code"));
+ cl::init(true),
+ cl::desc("Align constant islands in code"));
/// Return the worst case padding that could result from unknown offset bits.
/// This does not include alignment padding caused by known offset bits.
@@ -828,7 +829,8 @@ bool AArch64ConstantIslands::isWaterInRange(unsigned UserOffset,
bool AArch64ConstantIslands::isCPEntryInRange(MachineInstr *MI,
unsigned UserOffset,
MachineInstr *CPEMI,
- unsigned OffsetBits, bool DoDump) {
+ unsigned OffsetBits,
+ bool DoDump) {
unsigned CPEOffset = getOffsetOf(CPEMI);
if (DoDump) {
@@ -930,7 +932,8 @@ int AArch64ConstantIslands::findInRangeCPEntry(CPUser& U, unsigned UserOffset)
// Removing CPEs can leave empty entries, skip
if (CPEs[i].CPEMI == NULL)
continue;
- if (isCPEntryInRange(UserMI, UserOffset, CPEs[i].CPEMI, U.getOffsetBits())) {
+ if (isCPEntryInRange(UserMI, UserOffset, CPEs[i].CPEMI,
+ U.getOffsetBits())) {
DEBUG(dbgs() << "Replacing CPE#" << CPI << " with CPE#"
<< CPEs[i].CPI << "\n");
// Point the CPUser node to the replacement
diff --git a/lib/Target/AArch64/AArch64FrameLowering.cpp b/lib/Target/AArch64/AArch64FrameLowering.cpp
index 2301114219..24d1576e5a 100644
--- a/lib/Target/AArch64/AArch64FrameLowering.cpp
+++ b/lib/Target/AArch64/AArch64FrameLowering.cpp
@@ -180,7 +180,8 @@ void AArch64FrameLowering::emitPrologue(MachineFunction &MF) const {
for (std::vector<CalleeSavedInfo>::const_iterator I = CSI.begin(),
E = CSI.end(); I != E; ++I) {
- MachineLocation Dst(MachineLocation::VirtualFP, MFI->getObjectOffset(I->getFrameIdx()));
+ MachineLocation Dst(MachineLocation::VirtualFP,
+ MFI->getObjectOffset(I->getFrameIdx()));
MachineLocation Src(I->getReg());
Moves.push_back(MachineMove(CSLabel, Dst, Src));
}
@@ -537,7 +538,8 @@ AArch64FrameLowering::emitFrameMemOps(bool isPrologue, MachineBasicBlock &MBB,
State = RegState::Define;
}
- NewMI = BuildMI(MBB, MBBI, DL, TII.get(PossClasses[ClassIdx].SingleOpcode))
+ NewMI = BuildMI(MBB, MBBI, DL,
+ TII.get(PossClasses[ClassIdx].SingleOpcode))
.addReg(CSI[i].getReg(), State);
}
@@ -549,9 +551,9 @@ AArch64FrameLowering::emitFrameMemOps(bool isPrologue, MachineBasicBlock &MBB,
Flags = isPrologue ? MachineMemOperand::MOStore : MachineMemOperand::MOLoad;
MachineMemOperand *MMO =
MF.getMachineMemOperand(MachinePointerInfo::getFixedStack(FrameIdx),
- Flags,
- Pair ? TheClass.getSize() * 2 : TheClass.getSize(),
- MFI.getObjectAlignment(FrameIdx));
+ Flags,
+ Pair ? TheClass.getSize() * 2 : TheClass.getSize(),
+ MFI.getObjectAlignment(FrameIdx));
NewMI.addFrameIndex(FrameIdx)
.addImm(0) // address-register offset
diff --git a/lib/Target/AArch64/AArch64FrameLowering.h b/lib/Target/AArch64/AArch64FrameLowering.h
index dfa66ec236..a14c2bb791 100644
--- a/lib/Target/AArch64/AArch64FrameLowering.h
+++ b/lib/Target/AArch64/AArch64FrameLowering.h
@@ -29,7 +29,7 @@ private:
struct LoadStoreMethod {
const TargetRegisterClass *RegClass; // E.g. GPR64RegClass
- // The preferred instruction.
+ // The preferred instruction.
unsigned PairOpcode; // E.g. LSPair64_STR
// Sometimes only a single register can be handled at once.
diff --git a/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp b/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp
index 28f152c249..c9335557e2 100644
--- a/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp
+++ b/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp
@@ -72,7 +72,8 @@ public:
bool SelectFPZeroOperand(SDValue N, SDValue &Dummy);
- bool SelectCVTFixedPosOperand(SDValue N, SDValue &FixedPos, unsigned RegWidth);
+ bool SelectCVTFixedPosOperand(SDValue N, SDValue &FixedPos,
+ unsigned RegWidth);
bool SelectInlineAsmMemoryOperand(const SDValue &Op,
char ConstraintCode,
@@ -130,8 +131,8 @@ AArch64DAGToDAGISel::SelectCVTFixedPosOperand(SDValue N, SDValue &FixedPos,
bool
AArch64DAGToDAGISel::SelectInlineAsmMemoryOperand(const SDValue &Op,
- char ConstraintCode,
- std::vector<SDValue> &OutOps) {
+ char ConstraintCode,
+ std::vector<SDValue> &OutOps) {
switch (ConstraintCode) {
default: llvm_unreachable("Unrecognised AArch64 memory constraint");
case 'm':
@@ -152,7 +153,7 @@ AArch64DAGToDAGISel::SelectFPZeroOperand(SDValue N, SDValue &Dummy) {
ConstantFPSDNode *Imm = dyn_cast<ConstantFPSDNode>(N);
if (!Imm || !Imm->getValueAPF().isPosZero())
return false;
-
+
// Doesn't actually carry any information, but keeps TableGen quiet.
Dummy = CurDAG->getTargetConstant(0, MVT::i32);
return true;
diff --git a/lib/Target/AArch64/AArch64ISelLowering.cpp b/lib/Target/AArch64/AArch64ISelLowering.cpp
index 9b26b1fed9..2158b05f63 100644
--- a/lib/Target/AArch64/AArch64ISelLowering.cpp
+++ b/lib/Target/AArch64/AArch64ISelLowering.cpp
@@ -841,7 +841,8 @@ AArch64TargetLowering::SaveVarArgRegisters(CCState &CCInfo, SelectionDAG &DAG,
DebugLoc DL, SDValue &Chain) const {
MachineFunction &MF = DAG.getMachineFunction();
MachineFrameInfo *MFI = MF.getFrameInfo();
- AArch64MachineFunctionInfo *FuncInfo = MF.getInfo<AArch64MachineFunctionInfo>();
+ AArch64MachineFunctionInfo *FuncInfo
+ = MF.getInfo<AArch64MachineFunctionInfo>();
SmallVector<SDValue, 8> MemOps;
@@ -1045,10 +1046,11 @@ AArch64TargetLowering::LowerReturn(SDValue Chain,
SDValue Flag;
for (unsigned i = 0, e = RVLocs.size(); i != e; ++i) {
- // PCS: "If the type, T, of the result of a function is such that void func(T
- // arg) would require that arg be passed as a value in a register (or set of
- // registers) according to the rules in 5.4, then the result is returned in
- // the same registers as would be used for such an argument.
+ // PCS: "If the type, T, of the result of a function is such that
+ // void func(T arg) would require that arg be passed as a value in a
+ // register (or set of registers) according to the rules in 5.4, then the
+ // result is returned in the same registers as would be used for such an
+ // argument.
//
// Otherwise, the caller shall reserve a block of memory of sufficient
// size and alignment to hold the result. The address of the memory block
@@ -1166,7 +1168,8 @@ AArch64TargetLowering::LowerCall(CallLoweringInfo &CLI,
if (!IsSibCall)
Chain = DAG.getCALLSEQ_START(Chain, DAG.getIntPtrConstant(NumBytes, true));
- SDValue StackPtr = DAG.getCopyFromReg(Chain, dl, AArch64::XSP, getPointerTy());
+ SDValue StackPtr = DAG.getCopyFromReg(Chain, dl, AArch64::XSP,
+ getPointerTy());
SmallVector<SDValue, 8> MemOpChains;
SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPass;
@@ -1874,9 +1877,10 @@ AArch64TargetLowering::LowerGlobalAddressELF(SDValue Op,
if (Alignment == 0) {
const PointerType *GVPtrTy = cast<PointerType>(GV->getType());
- if (GVPtrTy->getElementType()->isSized())
- Alignment = getDataLayout()->getABITypeAlignment(GVPtrTy->getElementType());
- else {
+ if (GVPtrTy->getElementType()->isSized()) {
+ Alignment
+ = getDataLayout()->getABITypeAlignment(GVPtrTy->getElementType());
+ } else {
// Be conservative if we can't guess, not that it really matters:
// functions and labels aren't valid for loads, and the methods used to
// actually calculate an address work with any alignment.
@@ -1954,7 +1958,8 @@ SDValue AArch64TargetLowering::LowerTLSDescCall(SDValue SymAddr,
Ops.push_back(Glue);
SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
- Chain = DAG.getNode(AArch64ISD::TLSDESCCALL, DL, NodeTys, &Ops[0], Ops.size());
+ Chain = DAG.getNode(AArch64ISD::TLSDESCCALL, DL, NodeTys, &Ops[0],
+ Ops.size());
Glue = Chain.getValue(1);
// After the call, the offset from TPIDR_EL0 is in X0, copy it out and pass it
@@ -1995,7 +2000,8 @@ AArch64TargetLowering::LowerGlobalTLSAddress(SDValue Op,
TPOff = SDValue(DAG.getMachineNode(AArch64::MOVZxii, DL, PtrVT, HiVar,
DAG.getTargetConstant(0, MVT::i32)), 0);
- TPOff = SDValue(DAG.getMachineNode(AArch64::MOVKxii, DL, PtrVT, TPOff, LoVar,
+ TPOff = SDValue(DAG.getMachineNode(AArch64::MOVKxii, DL, PtrVT,
+ TPOff, LoVar,
DAG.getTargetConstant(0, MVT::i32)), 0);
} else if (Model == TLSModel::GeneralDynamic) {
// Accesses used in this sequence go via the TLS descriptor which lives in
@@ -2005,7 +2011,8 @@ AArch64TargetLowering::LowerGlobalTLSAddress(SDValue Op,
SDValue LoDesc = DAG.getTargetGlobalAddress(GV, DL, PtrVT, 0,
AArch64II::MO_TLSDESC_LO12);
SDValue DescAddr = DAG.getNode(AArch64ISD::WrapperSmall, DL, PtrVT,
- HiDesc, LoDesc, DAG.getConstant(8, MVT::i32));
+ HiDesc, LoDesc,
+ DAG.getConstant(8, MVT::i32));
SDValue SymAddr = DAG.getTargetGlobalAddress(GV, DL, PtrVT, 0);
TPOff = LowerTLSDescCall(SymAddr, DescAddr, DL, DAG);
@@ -2027,7 +2034,8 @@ AArch64TargetLowering::LowerGlobalTLSAddress(SDValue Op,
SDValue LoDesc = DAG.getTargetExternalSymbol("_TLS_MODULE_BASE_", PtrVT,
AArch64II::MO_TLSDESC_LO12);
SDValue DescAddr = DAG.getNode(AArch64ISD::WrapperSmall, DL, PtrVT,
- HiDesc, LoDesc, DAG.getConstant(8, MVT::i32));
+ HiDesc, LoDesc,
+ DAG.getConstant(8, MVT::i32));
SDValue SymAddr = DAG.getTargetExternalSymbol("_TLS_MODULE_BASE_", PtrVT);
ThreadBase = LowerTLSDescCall(SymAddr, DescAddr, DL, DAG);
@@ -2040,7 +2048,8 @@ AArch64TargetLowering::LowerGlobalTLSAddress(SDValue Op,
TPOff = SDValue(DAG.getMachineNode(AArch64::MOVZxii, DL, PtrVT, HiVar,
DAG.getTargetConstant(0, MVT::i32)), 0);
- TPOff = SDValue(DAG.getMachineNode(AArch64::MOVKxii, DL, PtrVT, TPOff, LoVar,
+ TPOff = SDValue(DAG.getMachineNode(AArch64::MOVKxii, DL, PtrVT,
+ TPOff, LoVar,
DAG.getTargetConstant(0, MVT::i32)), 0);
} else
llvm_unreachable("Unsupported TLS access model");
@@ -2123,7 +2132,8 @@ AArch64TargetLowering::LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const {
SDValue A64cc = DAG.getConstant(CondCode, MVT::i32);
SDValue SetCC = DAG.getNode(AArch64ISD::SETCC, dl, MVT::i32, LHS, RHS,
DAG.getCondCode(CC));
- SDValue A64SELECT_CC = DAG.getNode(AArch64ISD::SELECT_CC, dl, Op.getValueType(),
+ SDValue A64SELECT_CC = DAG.getNode(AArch64ISD::SELECT_CC, dl,
+ Op.getValueType(),
SetCC, IfTrue, IfFalse, A64cc);
if (Alternative != A64CC::Invalid) {
@@ -2231,7 +2241,8 @@ AArch64TargetLowering::LowerVASTART(SDValue Op, SelectionDAG &DAG) const {
// The layout of the va_list struct is specified in the AArch64 Procedure Call
// Standard, section B.3.
MachineFunction &MF = DAG.getMachineFunction();
- AArch64MachineFunctionInfo *FuncInfo = MF.getInfo<AArch64MachineFunctionInfo>();
+ AArch64MachineFunctionInfo *FuncInfo
+ = MF.getInfo<AArch64MachineFunctionInfo>();
DebugLoc DL = Op.getDebugLoc();
SDValue Chain = Op.getOperand(0);
@@ -2365,7 +2376,7 @@ static SDValue PerformANDCombine(SDNode *N,
}
static SDValue PerformATOMIC_FENCECombine(SDNode *FenceNode,
- TargetLowering::DAGCombinerInfo &DCI) {
+ TargetLowering::DAGCombinerInfo &DCI) {
// An atomic operation followed by an acquiring atomic fence can be reduced to
// an acquiring load. The atomic operation provides a convenient pointer to
// load from. If the original operation was a load anyway we can actually
@@ -2407,7 +2418,7 @@ static SDValue PerformATOMIC_FENCECombine(SDNode *FenceNode,
}
static SDValue PerformATOMIC_STORECombine(SDNode *N,
- TargetLowering::DAGCombinerInfo &DCI) {
+ TargetLowering::DAGCombinerInfo &DCI) {
// A releasing atomic fence followed by an atomic store can be combined into a
// single store operation.
SelectionDAG &DAG = DCI.DAG;
@@ -2821,7 +2832,8 @@ AArch64TargetLowering::getConstraintType(const std::string &Constraint) const {
}
// FIXME: Ump, Utf, Usa, Ush
- // Ump: A memory address suitable for ldp/stp in SI, DI, SF and DF modes, whatever they may be
+ // Ump: A memory address suitable for ldp/stp in SI, DI, SF and DF modes,
+ // whatever they may be
// Utf: A memory address suitable for ldp/stp in TF mode, whatever it may be
// Usa: An absolute symbolic address
// Ush: The high part (bits 32:12) of a pc-relative symbolic address
@@ -2893,7 +2905,8 @@ AArch64TargetLowering::LowerAsmOperandForConstraint(SDValue Op,
if (const GlobalAddressSDNode *GA = dyn_cast<GlobalAddressSDNode>(Op)) {
Result = DAG.getTargetGlobalAddress(GA->getGlobal(), Op.getDebugLoc(),
GA->getValueType(0));
- } else if (const BlockAddressSDNode *BA = dyn_cast<BlockAddressSDNode>(Op)) {
+ } else if (const BlockAddressSDNode *BA
+ = dyn_cast<BlockAddressSDNode>(Op)) {
Result = DAG.getTargetBlockAddress(BA->getBlockAddress(),
BA->getValueType(0));
} else if (const ExternalSymbolSDNode *ES
@@ -2924,8 +2937,9 @@ AArch64TargetLowering::LowerAsmOperandForConstraint(SDValue Op,
}
std::pair<unsigned, const TargetRegisterClass*>
-AArch64TargetLowering::getRegForInlineAsmConstraint(const std::string &Constraint,
- EVT VT) const {
+AArch64TargetLowering::getRegForInlineAsmConstraint(
+ const std::string &Constraint,
+ EVT VT) const {
if (Constraint.size() == 1) {
switch (Constraint[0]) {
case 'r':
diff --git a/lib/Target/AArch64/AArch64ISelLowering.h b/lib/Target/AArch64/AArch64ISelLowering.h
index ec4e432302..4960d286e9 100644
--- a/lib/Target/AArch64/AArch64ISelLowering.h
+++ b/lib/Target/AArch64/AArch64ISelLowering.h
@@ -161,8 +161,8 @@ public:
SelectionDAG& DAG) const;
/// Finds the incoming stack arguments which overlap the given fixed stack
- /// object and incorporates their load into the current chain. This prevents an
- /// upcoming store from clobbering the stack argument before it's used.
+ /// object and incorporates their load into the current chain. This prevents
+ /// an upcoming store from clobbering the stack argument before it's used.
SDValue addTokenForArgument(SDValue Chain, SelectionDAG &DAG,
MachineFrameInfo *MFI, int ClobberedFI) const;
diff --git a/lib/Target/AArch64/AArch64InstrFormats.td b/lib/Target/AArch64/AArch64InstrFormats.td
index ce663960d4..c6aa265638 100644
--- a/lib/Target/AArch64/AArch64InstrFormats.td
+++ b/lib/Target/AArch64/AArch64InstrFormats.td
@@ -16,8 +16,7 @@
// architecture.
class A64Inst<dag outs, dag ins, string asmstr, list<dag> patterns,
InstrItinClass itin>
- : Instruction
-{
+ : Instruction {
// All A64 instructions are 32-bit. This field will be filled in
// graually going down the hierarchy.
field bits<32> Inst;
@@ -40,8 +39,7 @@ class A64Inst<dag outs, dag ins, string asmstr, list<dag> patterns,
let Itinerary = itin;
}
-class PseudoInst<dag outs, dag ins, list<dag> patterns> : Instruction
-{
+class PseudoInst<dag outs, dag ins, list<dag> patterns> : Instruction {
let Namespace = "AArch64";
let OutOperandList = outs;
@@ -54,8 +52,7 @@ class PseudoInst<dag outs, dag ins, list<dag> patterns> : Instruction
// Represents a pseudo-instruction that represents a single A64 instruction for
// whatever reason, the eventual result will be a 32-bit real instruction.
class A64PseudoInst<dag outs, dag ins, list<dag> patterns>
- : PseudoInst<outs, ins, patterns>
-{
+ : PseudoInst<outs, ins, patterns> {
let Size = 4;
}
@@ -70,8 +67,7 @@ class A64PseudoExpand<dag outs, dag ins, list<dag> patterns, dag Result>
class A64InstRd<dag outs, dag ins, string asmstr,
list<dag> patterns, InstrItinClass itin>
- : A64Inst<outs, ins, asmstr, patterns, itin>
-{
+ : A64Inst<outs, ins, asmstr, patterns, itin> {
bits<5> Rd;
let Inst{4-0} = Rd;
@@ -79,8 +75,7 @@ class A64InstRd<dag outs, dag ins, string asmstr,
class A64InstRt<dag outs, dag ins, string asmstr,
list<dag> patterns, InstrItinClass itin>
- : A64Inst<outs, ins, asmstr, patterns, itin>
-{
+ : A64Inst<outs, ins, asmstr, patterns, itin> {
bits<5> Rt;
let Inst{4-0} = Rt;
@@ -89,8 +84,7 @@ class A64InstRt<dag outs, dag ins, string asmstr,
class A64InstRdn<dag outs, dag ins, string asmstr,
list<dag> patterns, InstrItinClass itin>
- : A64InstRd<outs, ins, asmstr, patterns, itin>
-{
+ : A64InstRd<outs, ins, asmstr, patterns, itin> {
// Inherit rdt
bits<5> Rn;
@@ -99,8 +93,7 @@ class A64InstRdn<dag outs, dag ins, string asmstr,
class A64InstRtn<dag outs, dag ins, string asmstr,
list<dag> patterns, InstrItinClass itin>
- : A64InstRt<outs, ins, asmstr, patterns, itin>
-{
+ : A64InstRt<outs, ins, asmstr, patterns, itin> {
// Inherit rdt
bits<5> Rn;
@@ -110,8 +103,7 @@ class A64InstRtn<dag outs, dag ins, string asmstr,
// Instructions taking Rt,Rt2,Rn
class A64InstRtt2n<dag outs, dag ins, string asmstr,
list<dag> patterns, InstrItinClass itin>
- : A64InstRtn<outs, ins, asmstr, patterns, itin>
-{
+ : A64InstRtn<outs, ins, asmstr, patterns, itin> {
bits<5> Rt2;
let Inst{14-10} = Rt2;
@@ -119,8 +111,7 @@ class A64InstRtt2n<dag outs, dag ins, string asmstr,
class A64InstRdnm<dag outs, dag ins, string asmstr,
list<dag> patterns, InstrItinClass itin>
- : A64InstRdn<outs, ins, asmstr, patterns, itin>
-{
+ : A64InstRdn<outs, ins, asmstr, patterns, itin> {
bits<5> Rm;
let Inst{20-16} = Rm;
@@ -135,8 +126,7 @@ class A64InstRdnm<dag outs, dag ins, string asmstr,
class A64I_addsubext<bit sf, bit op, bit S, bits<2> opt, bits<3> option,
dag outs, dag ins, string asmstr, list<dag> patterns,
InstrItinClass itin>
- : A64InstRdnm<outs, ins, asmstr, patterns, itin>
-{
+ : A64InstRdnm<outs, ins, asmstr, patterns, itin> {
bits<3> Imm3;
let Inst{31} = sf;
@@ -156,8 +146,7 @@ class A64I_addsubext<bit sf, bit op, bit S, bits<2> opt, bits<3> option,
class A64I_addsubimm<bit sf, bit op, bit S, bits<2> shift,
dag outs, dag ins, string asmstr,
list<dag> patterns, InstrItinClass itin>
- : A64InstRdn<outs, ins, asmstr, patterns, itin>
-{
+ : A64InstRdn<outs, ins, asmstr, patterns, itin> {
bits<12> Imm12;
let Inst{31} = sf;
@@ -172,8 +161,7 @@ class A64I_addsubimm<bit sf, bit op, bit S, bits<2> shift,
class A64I_addsubshift<bit sf, bit op, bit S, bits<2> shift,
dag outs, dag ins, string asmstr, list<dag> patterns,
InstrItinClass itin>
- : A64InstRdnm<outs, ins, asmstr, patterns, itin>
-{
+ : A64InstRdnm<outs, ins, asmstr, patterns, itin> {
bits<6> Imm6;
let Inst{31} = sf;
@@ -192,8 +180,7 @@ class A64I_addsubshift<bit sf, bit op, bit S, bits<2> shift,
class A64I_addsubcarry<bit sf, bit op, bit S, bits<6> opcode2,
dag outs, dag ins, string asmstr, list<dag> patterns,
InstrItinClass itin>
- : A64InstRdnm<outs, ins, asmstr, patterns, itin>
-{
+ : A64InstRdnm<outs, ins, asmstr, patterns, itin> {
let Inst{31} = sf;
let Inst{30} = op;
let Inst{29} = S;
@@ -209,8 +196,7 @@ class A64I_addsubcarry<bit sf, bit op, bit S, bits<6> opcode2,
class A64I_bitfield<bit sf, bits<2> opc, bit n,
dag outs, dag ins, string asmstr,
list<dag> patterns, InstrItinClass itin>
- : A64InstRdn<outs, ins, asmstr, patterns, itin>
-{
+ : A64InstRdn<outs, ins, asmstr, patterns, itin> {
bits<6> ImmR;
bits<6> ImmS;
@@ -228,8 +214,7 @@ class A64I_bitfield<bit sf, bits<2> opc, bit n,
class A64I_cmpbr<bit sf, bit op,
dag outs, dag ins, string asmstr,
list<dag> patterns, InstrItinClass itin>
- : A64InstRt<outs, ins, asmstr, patterns, itin>
-{
+ : A64InstRt<outs, ins, asmstr, patterns, itin> {
bits<19> Label;
let Inst{31} = sf;
@@ -243,8 +228,7 @@ class A64I_cmpbr<bit sf, bit op,
class A64I_condbr<bit o1, bit o0,
dag outs, dag ins, string asmstr,
list<dag> patterns, InstrItinClass itin>
- : A64Inst<outs, ins, asmstr, patterns, itin>
-{
+ : A64Inst<outs, ins, asmstr, patterns, itin> {
bits<19> Label;
bits<4> Cond;
@@ -259,8 +243,7 @@ class A64I_condbr<bit o1, bit o0,
class A64I_condcmpimm<bit sf, bit op, bit o2, bit o3, bit s,
dag outs, dag ins, string asmstr,
list<dag> patterns, InstrItinClass itin>
- : A64Inst<outs, ins, asmstr, patterns, itin>
-{
+ : A64Inst<outs, ins, asmstr, patterns, itin> {
bits<5> Rn;
bits<5> UImm5;
bits<4> NZCVImm;
@@ -283,8 +266,7 @@ class A64I_condcmpimm<bit sf, bit op, bit o2, bit o3, bit s,
class A64I_condcmpreg<bit sf, bit op, bit o2, bit o3, bit s,
dag outs, dag ins, string asmstr,
list<dag> patterns, InstrItinClass itin>
- : A64Inst<outs, ins, asmstr, patterns, itin>
-{
+ : A64Inst<outs, ins, asmstr, patterns, itin> {
bits<5> Rn;
bits<5> Rm;
bits<4> NZCVImm;
@@ -308,8 +290,7 @@ class A64I_condcmpreg<bit sf, bit op, bit o2, bit o3, bit s,
class A64I_condsel<bit sf, bit op, bit s, bits<2> op2,
dag outs, dag ins, string asmstr,
list<dag> patterns, InstrItinClass itin>
- : A64InstRdnm<outs, ins, asmstr, patterns, itin>
-{
+ : A64InstRdnm<outs, ins, asmstr, patterns, itin> {
bits<4> Cond;
let Inst{31} = sf;
@@ -327,8 +308,7 @@ class A64I_condsel<bit sf, bit op, bit s, bits<2> op2,
class A64I_dp_1src<bit sf, bit S, bits<5> opcode2, bits<6> opcode,
string asmstr, dag outs, dag ins,
list<dag> patterns, InstrItinClass itin>
- : A64InstRdn<outs, ins, asmstr, patterns, itin>
-{
+ : A64InstRdn<outs, ins, asmstr, patterns, itin> {
let Inst{31} = sf;
let Inst{30} = 0b1;
let Inst{29} = S;
@@ -341,8 +321,7 @@ class A64I_dp_1src<bit sf, bit S, bits<5> opcode2, bits<6> opcode,
class A64I_dp_2src<bit sf, bits<6> opcode, bit S,
string asmstr, dag outs, dag ins,
list<dag> patterns, InstrItinClass itin>
- : A64InstRdnm<outs, ins, asmstr, patterns, itin>
-{
+ : A64InstRdnm<outs, ins, asmstr, patterns, itin> {
let Inst{31} = sf;
let Inst{30} = 0b0;
let Inst{29} = S;
@@ -355,8 +334,7 @@ class A64I_dp_2src<bit sf, bits<6> opcode, bit S,
class A64I_dp3<bit sf, bits<6> opcode,
dag outs, dag ins, string asmstr,
list<dag> patterns, InstrItinClass itin>
- : A64InstRdnm<outs, ins, asmstr, patterns, itin>
-{
+ : A64InstRdnm<outs, ins, asmstr, patterns, itin> {
bits<5> Ra;
let Inst{31} = sf;
@@ -374,8 +352,7 @@ class A64I_dp3<bit sf, bits<6> opcode,
class A64I_exception<bits<3> opc, bits<3> op2, bits<2> ll,
dag outs, dag ins, string asmstr,
list<dag> patterns, InstrItinClass itin>
- : A64Inst<outs, ins, asmstr, patterns, itin>
-{
+ : A64Inst<outs, ins, asmstr, patterns, itin> {
bits<16> UImm16;
let Inst{31-24} = 0b11010100;
@@ -389,8 +366,7 @@ class A64I_exception<bits<3> opc, bits<3> op2, bits<2> ll,
class A64I_extract<bit sf, bits<3> op, bit n,
dag outs, dag ins, string asmstr,
list<dag> patterns, InstrItinClass itin>
- : A64InstRdnm<outs, ins, asmstr, patterns, itin>
-{
+ : A64InstRdnm<outs, ins, asmstr, patterns, itin> {
bits<6> LSB;
let Inst{31} = sf;
@@ -408,8 +384,7 @@ class A64I_extract<bit sf, bits<3> op, bit n,
class A64I_fpcmp<bit m, bit s, bits<2> type, bits<2> op, bits<5> opcode2,
dag outs, dag ins, string asmstr,
list<dag> patterns, InstrItinClass itin>
- : A64Inst<outs, ins, asmstr, patterns, itin>
-{
+ : A64Inst<outs, ins, asmstr, patterns, itin> {
bits<5> Rn;
bits<5> Rm;
@@ -430,8 +405,7 @@ class A64I_fpcmp<bit m, bit s, bits<2> type, bits<2> op, bits<5> opcode2,
class A64I_fpccmp<bit m, bit s, bits<2> type, bit op,
dag outs, dag ins, string asmstr,
list<dag> patterns, InstrItinClass itin>
- : A64InstRdn<outs, ins, asmstr, patterns, itin>
-{
+ : A64InstRdn<outs, ins, asmstr, patterns, itin> {
bits<5> Rn;
bits<5> Rm;
bits<4> NZCVImm;
@@ -455,8 +429,7 @@ class A64I_fpccmp<bit m, bit s, bits<2> type, bit op,
class A64I_fpcondsel<bit m, bit s, bits<2> type,
dag outs, dag ins, string asmstr,
list<dag> patterns, InstrItinClass itin>
- : A64InstRdnm<outs, ins, asmstr, patterns, itin>
-{
+ : A64InstRdnm<outs, ins, asmstr, patterns, itin> {
bits<4> Cond;
let Inst{31} = m;
@@ -477,8 +450,7 @@ class A64I_fpcondsel<bit m, bit s, bits<2> type,
class A64I_fpdp1<bit m, bit s, bits<2> type, bits<6> opcode,
dag outs, dag ins, string asmstr,
list<dag> patterns, InstrItinClass itin>
- : A64InstRdn<outs, ins, asmstr, patterns, itin>
-{
+ : A64InstRdn<outs, ins, asmstr, patterns, itin> {
let Inst{31} = m;
let Inst{30} = 0b0;
let Inst{29} = s;
@@ -495,8 +467,7 @@ class A64I_fpdp1<bit m, bit s, bits<2> type, bits<6> opcode,
class A64I_fpdp2<bit m, bit s, bits<2> type, bits<4> opcode,
dag outs, dag ins, string asmstr,
list<dag> patterns, InstrItinClass itin>
- : A64InstRdnm<outs, ins, asmstr, patterns, itin>
-{
+ : A64InstRdnm<outs, ins, asmstr, patterns, itin> {
let Inst{31} = m;
let Inst{30} = 0b0;
let Inst{29} = s;
@@ -514,8 +485,7 @@ class A64I_fpdp2<bit m, bit s, bits<2> type, bits<4> opcode,
class A64I_fpdp3<bit m, bit s, bits<2> type, bit o1, bit o0,
dag outs, dag ins, string asmstr,
list<dag> patterns, InstrItinClass itin>
- : A64InstRdnm<outs, ins, asmstr, patterns, itin>
-{
+ : A64InstRdnm<outs, ins, asmstr, patterns, itin> {
bits<5> Ra;
let Inst{31} = m;
@@ -535,8 +505,7 @@ class A64I_fpdp3<bit m, bit s, bits<2> type, bit o1, bit o0,
class A64I_fpfixed<bit sf, bit s, bits<2> type, bits<2> mode, bits<3> opcode,
dag outs, dag ins, string asmstr,
list<dag> patterns, InstrItinClass itin>
- : A64InstRdn<outs, ins, asmstr, patterns, itin>