diff --git a/clang/lib/Driver/ToolChains/Clang.cpp b/clang/lib/Driver/ToolChains/Clang.cpp index 4b74775188501..b39d1d472763b 100644 --- a/clang/lib/Driver/ToolChains/Clang.cpp +++ b/clang/lib/Driver/ToolChains/Clang.cpp @@ -1990,9 +1990,6 @@ void Clang::AddMIPSTargetArgs(const ArgList &Args, // Enable interprocedural register allocation by default on NanoMips if (Triple.isNanoMips()) { - CmdArgs.push_back("-mllvm"); - CmdArgs.push_back("-enable-ipra"); - // Change inlining thresholds. if (Arg *A = Args.getLastArg(options::OPT_O_Group)) { if (A->getOption().matches(options::OPT_O)) { diff --git a/llvm/include/llvm/BinaryFormat/ELF.h b/llvm/include/llvm/BinaryFormat/ELF.h index 6148f968cdbaa..a332378a970cd 100644 --- a/llvm/include/llvm/BinaryFormat/ELF.h +++ b/llvm/include/llvm/BinaryFormat/ELF.h @@ -318,6 +318,7 @@ enum { EM_RISCV = 243, // RISC-V EM_LANAI = 244, // Lanai 32-bit processor EM_BPF = 247, // Linux kernel bpf virtual machine + EM_NANOMIPS = 249, // MIPS Tech nanoMIPS architecture EM_VE = 251, // NEC SX-Aurora VE EM_CSKY = 252, // C-SKY 32-bit processor }; @@ -593,6 +594,38 @@ enum { ODK_PAGESIZE = 11 // Page size information }; +// ELF Relocation types for Mips +enum { +#include "ELFRelocs/NanoMips.def" +}; + +// NanoMips specific e_flags +enum : unsigned { + // File may be relaxed by the linker. + EF_NANOMIPS_LINKRELAX = 0x00000001, + // File contains position independent code. + EF_NANOMIPS_PIC = 0x00000002, + // Indicates code compiled for a 64-bit machine in 32-bit mode + // (regs are 32-bits wide). + EF_NANOMIPS_32BITMODE = 0x00000004, + // Indicate that all data access in this object is GP-relative + EF_NANOMIPS_PID = 0x00000008, + // Indicate that this object does not use absolute addressing. + EF_NANOMIPS_PCREL = 0x00000010, + // Four bit nanoMIPS architecture field. + EF_NANOMIPS_ARCH = 0xf0000000, + // -march=32r6 code. + E_NANOMIPS_ARCH_32R6 = 0x00000000, + // -march=64r6 code. + E_NANOMIPS_ARCH_64R6 = 0x10000000, + // The ABI of the file. + EF_NANOMIPS_ABI = 0x0000f000, + // nanoMIPS ABI in 32 bit mode. + E_NANOMIPS_ABI_P32 = 0x00001000, + // nanoMIPS ABI in 64 bit mode. + E_NANOMIPS_ABI_P64 = 0x00002000 +}; + // Hexagon-specific e_flags enum { // Object processor version flags, bits[11:0] diff --git a/llvm/include/llvm/BinaryFormat/ELFRelocs/NanoMips.def b/llvm/include/llvm/BinaryFormat/ELFRelocs/NanoMips.def new file mode 100644 index 0000000000000..af905ecb25fd0 --- /dev/null +++ b/llvm/include/llvm/BinaryFormat/ELFRelocs/NanoMips.def @@ -0,0 +1,81 @@ +#ifndef ELF_RELOC +#error "ELF_RELOC must be defined" +#endif + +ELF_RELOC(R_NANOMIPS_NONE, 0) +ELF_RELOC(R_NANOMIPS_32, 1) +ELF_RELOC(R_NANOMIPS_64, 2) +ELF_RELOC(R_NANOMIPS_NEG, 3) +ELF_RELOC(R_NANOMIPS_ASHIFTR_1, 4) +ELF_RELOC(R_NANOMIPS_UNSIGNED_8, 5) +ELF_RELOC(R_NANOMIPS_SIGNED_8, 6) +ELF_RELOC(R_NANOMIPS_UNSIGNED_16, 7) +ELF_RELOC(R_NANOMIPS_SIGNED_16, 8) +ELF_RELOC(R_NANOMIPS_RELATIVE, 9) +ELF_RELOC(R_NANOMIPS_GLOBAL, 10) +ELF_RELOC(R_NANOMIPS_JUMP_SLOT, 11) +ELF_RELOC(R_NANOMIPS_IRELATIVE, 12) +ELF_RELOC(R_NANOMIPS_PC25_S1, 13) +ELF_RELOC(R_NANOMIPS_PC21_S1, 14) +ELF_RELOC(R_NANOMIPS_PC14_S1, 15) +ELF_RELOC(R_NANOMIPS_PC11_S1, 16) +ELF_RELOC(R_NANOMIPS_PC10_S1, 17) +ELF_RELOC(R_NANOMIPS_PC7_S1, 18) +ELF_RELOC(R_NANOMIPS_PC4_S1, 19) +ELF_RELOC(R_NANOMIPS_GPREL19_S2, 20) +ELF_RELOC(R_NANOMIPS_GPREL18_S3, 21) +ELF_RELOC(R_NANOMIPS_GPREL18, 22) +ELF_RELOC(R_NANOMIPS_GPREL17_S1, 23) +ELF_RELOC(R_NANOMIPS_GPREL16_S2, 24) +ELF_RELOC(R_NANOMIPS_GPREL7_S2, 25) +ELF_RELOC(R_NANOMIPS_GPREL_HI20, 26) +ELF_RELOC(R_NANOMIPS_PCHI20, 27) +ELF_RELOC(R_NANOMIPS_HI20, 28) +ELF_RELOC(R_NANOMIPS_LO12, 29) +ELF_RELOC(R_NANOMIPS_GPREL_I32, 30) +ELF_RELOC(R_NANOMIPS_PC_I32, 31) +ELF_RELOC(R_NANOMIPS_I32, 32) +ELF_RELOC(R_NANOMIPS_GOT_DISP, 33) +ELF_RELOC(R_NANOMIPS_GOTPC_I32, 34) +ELF_RELOC(R_NANOMIPS_GOTPC_HI20, 35) +ELF_RELOC(R_NANOMIPS_GOT_LO12, 36) +ELF_RELOC(R_NANOMIPS_GOT_CALL, 37) +ELF_RELOC(R_NANOMIPS_GOT_PAGE, 38) +ELF_RELOC(R_NANOMIPS_GOT_OFST, 39) +ELF_RELOC(R_NANOMIPS_LO4_S2, 40) +ELF_RELOC(R_NANOMIPS_RESERVED1, 41) +ELF_RELOC(R_NANOMIPS_GPREL_LO12, 42) +ELF_RELOC(R_NANOMIPS_SCN_DISP, 43) +ELF_RELOC(R_NANOMIPS_COPY, 44) +ELF_RELOC(R_NANOMIPS_ALIGN, 64) +ELF_RELOC(R_NANOMIPS_FILL, 65) +ELF_RELOC(R_NANOMIPS_MAX, 66) +ELF_RELOC(R_NANOMIPS_INSN32, 67) +ELF_RELOC(R_NANOMIPS_FIXED, 68) +ELF_RELOC(R_NANOMIPS_NORELAX, 69) +ELF_RELOC(R_NANOMIPS_RELAX, 70) +ELF_RELOC(R_NANOMIPS_SAVERESTORE, 71) +ELF_RELOC(R_NANOMIPS_INSN16, 72) +ELF_RELOC(R_NANOMIPS_JALR32, 73) +ELF_RELOC(R_NANOMIPS_JALR16, 74) +ELF_RELOC(R_NANOMIPS_JUMPTABLE_LOAD, 75) +ELF_RELOC(R_NANOMIPS_FRAME_REG, 76) +ELF_RELOC(R_NANOMIPS_TLS_DTPMOD, 80) +ELF_RELOC(R_NANOMIPS_TLS_DTPREL, 81) +ELF_RELOC(R_NANOMIPS_TLS_TPREL, 82) +ELF_RELOC(R_NANOMIPS_TLS_GD, 83) +ELF_RELOC(R_NANOMIPS_TLS_GD_I32, 84) +ELF_RELOC(R_NANOMIPS_TLS_LD, 85) +ELF_RELOC(R_NANOMIPS_TLS_LD_I32, 86) +ELF_RELOC(R_NANOMIPS_TLS_DTPREL12, 87) +ELF_RELOC(R_NANOMIPS_TLS_DTPREL16, 88) +ELF_RELOC(R_NANOMIPS_TLS_DTPREL_I32, 89) +ELF_RELOC(R_NANOMIPS_TLS_GOTTPREL, 90) +ELF_RELOC(R_NANOMIPS_TLS_GOTTPREL_PC_I32, 91) +ELF_RELOC(R_NANOMIPS_TLS_TPREL12, 92) +ELF_RELOC(R_NANOMIPS_TLS_TPREL16, 93) +ELF_RELOC(R_NANOMIPS_TLS_TPREL_I32, 94) +ELF_RELOC(R_NANOMIPS_PC32, 248) +ELF_RELOC(R_NANOMIPS_EH, 249) +ELF_RELOC(R_NANOMIPS_GNU_VTINHERIT, 253) +ELF_RELOC(R_NANOMIPS_GNU_VTENTRY, 254) diff --git a/llvm/include/llvm/CodeGen/TargetRegisterInfo.h b/llvm/include/llvm/CodeGen/TargetRegisterInfo.h index 92ce5b737090c..8abecb861c518 100644 --- a/llvm/include/llvm/CodeGen/TargetRegisterInfo.h +++ b/llvm/include/llvm/CodeGen/TargetRegisterInfo.h @@ -565,6 +565,13 @@ class TargetRegisterInfo : public MCRegisterInfo { virtual bool isCalleeSavedPhysReg(MCRegister PhysReg, const MachineFunction &MF) const; + // Return true if the register is needed for returning from the + // function and so must be preserved in the callee even if preserved + // by the caller + virtual bool isNeededForReturn(MCRegister PhysReg, const MachineFunction &MF) const { + return false; + } + /// Prior to adding the live-out mask to a stackmap or patchpoint /// instruction, provide the target the opportunity to adjust it (mainly to /// remove pseudo-registers that should be ignored). diff --git a/llvm/include/llvm/module.modulemap b/llvm/include/llvm/module.modulemap index 848fb266374ef..6b0cddc3153fb 100644 --- a/llvm/include/llvm/module.modulemap +++ b/llvm/include/llvm/module.modulemap @@ -72,6 +72,7 @@ module LLVM_BinaryFormat { textual header "BinaryFormat/ELFRelocs/Lanai.def" textual header "BinaryFormat/ELFRelocs/M68k.def" textual header "BinaryFormat/ELFRelocs/Mips.def" + textual header "BinaryFormat/ELFRelocs/NanoMips.def" textual header "BinaryFormat/ELFRelocs/MSP430.def" textual header "BinaryFormat/ELFRelocs/PowerPC64.def" textual header "BinaryFormat/ELFRelocs/PowerPC.def" diff --git a/llvm/lib/CodeGen/TargetFrameLoweringImpl.cpp b/llvm/lib/CodeGen/TargetFrameLoweringImpl.cpp index b0594ec086b28..fafe719c5fa8c 100644 --- a/llvm/lib/CodeGen/TargetFrameLoweringImpl.cpp +++ b/llvm/lib/CodeGen/TargetFrameLoweringImpl.cpp @@ -87,10 +87,9 @@ void TargetFrameLowering::determineCalleeSaves(MachineFunction &MF, // When interprocedural register allocation is enabled caller saved registers // are preferred over callee saved registers. - if (MF.getTarget().Options.EnableIPRA && - isSafeForNoCSROpt(MF.getFunction()) && - isProfitableForNoCSROpt(MF.getFunction())) - return; + bool NoCSR = (MF.getTarget().Options.EnableIPRA && + isSafeForNoCSROpt(MF.getFunction()) && + isProfitableForNoCSROpt(MF.getFunction())); // Get the callee saved register list... const MCPhysReg *CSRegs = MF.getRegInfo().getCalleeSavedRegs(); @@ -119,10 +118,12 @@ void TargetFrameLowering::determineCalleeSaves(MachineFunction &MF, // Functions which call __builtin_unwind_init get all their registers saved. bool CallsUnwindInit = MF.callsUnwindInit(); const MachineRegisterInfo &MRI = MF.getRegInfo(); + const TargetRegisterInfo *RI = MF.getSubtarget().getRegisterInfo(); for (unsigned i = 0; CSRegs[i]; ++i) { unsigned Reg = CSRegs[i]; if (CallsUnwindInit || MRI.isPhysRegModified(Reg)) - SavedRegs.set(Reg); + if (!NoCSR || !MRI.isAllocatable(Reg) || RI->isNeededForReturn(Reg, MF)) + SavedRegs.set(Reg); } } diff --git a/llvm/lib/MC/MCAsmStreamer.cpp b/llvm/lib/MC/MCAsmStreamer.cpp index 72f4ee3f33beb..8e4a215d50435 100644 --- a/llvm/lib/MC/MCAsmStreamer.cpp +++ b/llvm/lib/MC/MCAsmStreamer.cpp @@ -2166,7 +2166,7 @@ void MCAsmStreamer::AddEncodingComment(const MCInst &Inst, for (unsigned j = 0; j != Info.TargetSize; ++j) { unsigned Index = F.getOffset() * 8 + Info.TargetOffset + j; assert(Index < Code.size() * 8 && "Invalid offset in fixup!"); - FixupMap[Index] = 1 + i; + FixupMap[(Index + 16) % (Code.size() * 8)] = 1 + i; } } diff --git a/llvm/lib/Object/ELF.cpp b/llvm/lib/Object/ELF.cpp index ca2ed4449120d..bcb44172ef78f 100644 --- a/llvm/lib/Object/ELF.cpp +++ b/llvm/lib/Object/ELF.cpp @@ -51,8 +51,15 @@ StringRef llvm::object::getELFRelocationTypeName(uint32_t Machine, break; } break; + case ELF::EM_NANOMIPS: + switch (Type) { +#include "llvm/BinaryFormat/ELFRelocs/NanoMips.def" + default: + break; + } + break; case ELF::EM_AARCH64: - switch (Type) { + switch (Type) { #include "llvm/BinaryFormat/ELFRelocs/AArch64.def" default: break; @@ -182,6 +189,7 @@ uint32_t llvm::object::getELFRelativeRelocationType(uint32_t Machine) { case ELF::EM_IAMCU: return ELF::R_386_RELATIVE; case ELF::EM_MIPS: + case ELF::EM_NANOMIPS: break; case ELF::EM_AARCH64: return ELF::R_AARCH64_RELATIVE; diff --git a/llvm/lib/ObjectYAML/ELFYAML.cpp b/llvm/lib/ObjectYAML/ELFYAML.cpp index 50821544a687f..f0d65ca195893 100644 --- a/llvm/lib/ObjectYAML/ELFYAML.cpp +++ b/llvm/lib/ObjectYAML/ELFYAML.cpp @@ -787,6 +787,9 @@ void ScalarEnumerationTraits::enumeration( case ELF::EM_MIPS: #include "llvm/BinaryFormat/ELFRelocs/Mips.def" break; + case ELF::EM_NANOMIPS: +#include "llvm/BinaryFormat/ELFRelocs/NanoMips.def" + break; case ELF::EM_HEXAGON: #include "llvm/BinaryFormat/ELFRelocs/Hexagon.def" break; diff --git a/llvm/lib/Target/Mips/AsmParser/MipsAsmParser.cpp b/llvm/lib/Target/Mips/AsmParser/MipsAsmParser.cpp index b87b674aed3eb..40d84911c5206 100644 --- a/llvm/lib/Target/Mips/AsmParser/MipsAsmParser.cpp +++ b/llvm/lib/Target/Mips/AsmParser/MipsAsmParser.cpp @@ -22,6 +22,7 @@ #include "llvm/ADT/Twine.h" #include "llvm/BinaryFormat/ELF.h" #include "llvm/MC/MCContext.h" +#include "llvm/MC/MCAsmInfo.h" #include "llvm/MC/MCExpr.h" #include "llvm/MC/MCInst.h" #include "llvm/MC/MCInstrDesc.h" @@ -194,6 +195,7 @@ class MipsAsmParser : public MCTargetAsmParser { bool ParseDirective(AsmToken DirectiveID) override; OperandMatchResultTy parseMemOperand(OperandVector &Operands); + OperandMatchResultTy parseMemNMRX(OperandVector &Operands); OperandMatchResultTy matchAnyRegisterNameWithoutDollar(OperandVector &Operands, StringRef Identifier, SMLoc S); @@ -207,6 +209,7 @@ class MipsAsmParser : public MCTargetAsmParser { OperandMatchResultTy parseJumpTarget(OperandVector &Operands); OperandMatchResultTy parseInvNum(OperandVector &Operands); OperandMatchResultTy parseRegisterList(OperandVector &Operands); + OperandMatchResultTy parseNMRegisterList(OperandVector &Operands); bool searchSymbolAlias(OperandVector &Operands); @@ -409,6 +412,7 @@ class MipsAsmParser : public MCTargetAsmParser { bool parseDirectiveTpRelDWord(); bool parseDirectiveModule(); bool parseDirectiveModuleFP(); + bool parseDirectiveLinkRelax(); bool parseFpABIValue(MipsABIFlagsSection::FpABIKind &FpABI, StringRef Directive); @@ -519,6 +523,12 @@ class MipsAsmParser : public MCTargetAsmParser { Match_RequiresPosSizeRange0_32, Match_RequiresPosSizeRange33_64, Match_RequiresPosSizeUImm6, + Match_RequiresDstRegPair, + Match_RequiresSrcRegPair, + Match_RequiresFirstOpLT, + Match_RequiresFirstOpGE, + Match_RequiresBaseGP, + Match_RequiresBaseSP, #define GET_OPERAND_DIAGNOSTIC_TYPES #include "MipsGenAsmMatcher.inc" #undef GET_OPERAND_DIAGNOSTIC_TYPES @@ -598,6 +608,7 @@ class MipsAsmParser : public MCTargetAsmParser { bool isABI_N32() const { return ABI.IsN32(); } bool isABI_N64() const { return ABI.IsN64(); } bool isABI_O32() const { return ABI.IsO32(); } + bool isABI_P32() const { return ABI.IsP32(); } bool isABI_FPXX() const { return getSTI().getFeatureBits()[Mips::FeatureFPXX]; } @@ -770,17 +781,26 @@ class MipsAsmParser : public MCTargetAsmParser { case AsmToken::PercentGp_Rel: return MipsMCExpr::create(MipsMCExpr::MEK_GPREL, E, Ctx); case AsmToken::PercentHi: - return MipsMCExpr::create(MipsMCExpr::MEK_HI, E, Ctx); + if (hasNanoMips()) + return MipsMCExpr::create(MipsMCExpr::MEK_HI20, E, Ctx); + else + return MipsMCExpr::create(MipsMCExpr::MEK_HI, E, Ctx); case AsmToken::PercentHigher: return MipsMCExpr::create(MipsMCExpr::MEK_HIGHER, E, Ctx); case AsmToken::PercentHighest: return MipsMCExpr::create(MipsMCExpr::MEK_HIGHEST, E, Ctx); case AsmToken::PercentLo: - return MipsMCExpr::create(MipsMCExpr::MEK_LO, E, Ctx); + if (hasNanoMips()) + return MipsMCExpr::create(MipsMCExpr::MEK_LO12, E, Ctx); + else + return MipsMCExpr::create(MipsMCExpr::MEK_LO, E, Ctx); case AsmToken::PercentNeg: return MipsMCExpr::create(MipsMCExpr::MEK_NEG, E, Ctx); case AsmToken::PercentPcrel_Hi: - return MipsMCExpr::create(MipsMCExpr::MEK_PCREL_HI16, E, Ctx); + if (hasNanoMips()) + return MipsMCExpr::create(MipsMCExpr::MEK_PCREL_HI, E, Ctx); + else + return MipsMCExpr::create(MipsMCExpr::MEK_PCREL_HI16, E, Ctx); case AsmToken::PercentPcrel_Lo: return MipsMCExpr::create(MipsMCExpr::MEK_PCREL_LO16, E, Ctx); case AsmToken::PercentTlsgd: @@ -923,6 +943,32 @@ class MipsOperand : public MCParsedAsmOperand { return RegIdx.RegInfo->getRegClass(ClassID).getRegister(RegIdx.Index); } + /// Coerce the register to GPR32 and return the real register for the current + /// target. + unsigned getGPRNM32Reg() const { + assert(isRegIdx() && (RegIdx.Kind & RegKind_GPR) && "Invalid access!"); + unsigned ClassID = Mips::GPRNM32RegClassID; + return RegIdx.RegInfo->getRegClass(ClassID).getRegister(RegIdx.Index); + } + + // Find the next (or previous) i'th register in sequence for NanoMips + unsigned getGPRNM32RegNext(int i = 1) const { + assert(isRegIdx() && (RegIdx.Kind & RegKind_GPR) && + ((signed)RegIdx.Index + i >= 0) && (RegIdx.Index + i <= 31) && + "Invalid access!"); + unsigned ClassID = Mips::GPRNM32RegClassID; + return RegIdx.RegInfo->getRegClass(ClassID).getRegister(RegIdx.Index + i); + } + + unsigned getGPRNM4ZeroReg() const { + assert(isRegIdx() && (RegIdx.Kind & RegKind_GPR) && "Invalid access!"); + unsigned ClassID = Mips::GPRNM32RegClassID; + unsigned RegNo = RegIdx.Index; + if (RegNo == 0) + RegNo = 11; + return RegIdx.RegInfo->getRegClass(ClassID).getRegister(RegIdx.Index); + } + /// Coerce the register to GPR64 and return the real register for the current /// target. unsigned getGPR64Reg() const { @@ -1107,6 +1153,16 @@ class MipsOperand : public MCParsedAsmOperand { Inst.addOperand(MCOperand::createReg(getGPRMM16Reg())); } + void addGPRNM32AsmRegOperands(MCInst &Inst, unsigned N) const { + assert(N == 1 && "Invalid number of operands!"); + Inst.addOperand(MCOperand::createReg(getGPRNM32Reg())); + } + + void addGPRNM4ZAsmRegOperands(MCInst &Inst, unsigned N) const { + assert(N == 1 && "Invalid number of operands!"); + Inst.addOperand(MCOperand::createReg(getGPRNM4ZeroReg())); + } + /// Render the operand to an MCInst as a GPR64 /// Asserts if the wrong number of operands are requested, or the operand /// is not a k_RegisterIndex compatible with RegKind_GPR @@ -1229,6 +1285,30 @@ class MipsOperand : public MCParsedAsmOperand { addConstantSImmOperands(Inst, N); } + void addSym32Operands(MCInst &Inst, unsigned N) const { + if (isImm() && !isConstantImm()) { + addExpr(Inst, getImm()); + return; + } + addConstantSImmOperands<32, 0, 0>(Inst, N); + } + + void addSym32PCRelOperands(MCInst &Inst, unsigned N) const { + if (isImm() && !isConstantImm()) { + addExpr(Inst, getImm()); + return; + } + addConstantSImmOperands<32, 0, 0>(Inst, N); + } + + void addSym32GPRelOperands(MCInst &Inst, unsigned N) const { + if (isImm() && !isConstantImm()) { + addExpr(Inst, getImm()); + return; + } + addConstantSImmOperands<32, 0, 0>(Inst, N); + } + template void addUImmOperands(MCInst &Inst, unsigned N) const { if (isImm() && !isConstantImm()) { @@ -1265,6 +1345,15 @@ class MipsOperand : public MCParsedAsmOperand { addExpr(Inst, Expr); } + void addNMMemOperands(MCInst &Inst, unsigned N) const { + assert(N == 2 && "Invalid number of operands!"); + + Inst.addOperand(MCOperand::createReg(getMemBase()->getGPRNM32Reg())); + + const MCExpr *Expr = getMemOff(); + addExpr(Inst, Expr); + } + void addMicroMipsMemOperands(MCInst &Inst, unsigned N) const { assert(N == 2 && "Invalid number of operands!"); @@ -1307,6 +1396,10 @@ class MipsOperand : public MCParsedAsmOperand { return isConstantImm() ? isInt(getConstantImm()) : isImm(); } + template bool isNegImm() const { + return (isConstantImm() && getConstantImm() < 0 && isUInt(-getConstantImm())); + } + template bool isUImm() const { return isConstantImm() ? isUInt(getConstantImm()) : isImm(); } @@ -1326,6 +1419,23 @@ class MipsOperand : public MCParsedAsmOperand { getConstantImm() <= Top; } + template bool isConstantSImmRange() const { + return isConstantImm() && getConstantImm() >= Bottom && + getConstantImm() <= Top; + } + + bool isConstantUImmMask() const { + return (isConstantImm() && + ((getConstantImm() >= 0 && getConstantImm() < 11) || + (getConstantImm() == 0xff) || (getConstantImm() == 0xffff) || + (getConstantImm() == 0xe) || (getConstantImm() == 0xf))); + } + + template bool isConstantNegImmRange() const { + return isConstantImm() && getConstantImm() >= -Bottom && + getConstantImm() <= -Top; + } + bool isToken() const override { // Note: It's not possible to pretend that other operand kinds are tokens. // The matcher emitter checks tokens first. @@ -1338,6 +1448,61 @@ class MipsOperand : public MCParsedAsmOperand { return isMem() && isa(getMemOff()); } + template + bool isMemWithUimmOffset() const { + if (!isMem()) + return false; + if (!getMemBase()->isGPRAsmReg()) + return false; + if ((isConstantMemOff() && + isShiftedUInt(getConstantMemOff()))) + return true; + return false; + MCValue Res; + bool IsReloc = getMemOff()->evaluateAsRelocatable(Res, nullptr, nullptr); + return IsReloc && isShiftedUInt(Res.getConstant()); + } + + template + bool isMemWithBaseUimmOffset() const { + MCValue Res; + if (!isMem()) + return false; + if (!getMemBase()->isGPRAsmReg() + || !MipsMCRegisterClasses[ClassID].contains(getMemBase()->getGPRNM32Reg())) + return false; + if (isConstantMemOff()) + return isShiftedUInt(getConstantMemOff()); + if (ClassID == Mips::GPRNM32RegClassID) + return getMemOff()->evaluateAsRelocatable(Res, nullptr, nullptr); + else + return false; + } + + template + bool isMemWithBaseSimmOffset() const { + MCValue Res; + if (!isMem()) + return false; + if (!getMemBase()->isGPRAsmReg() + || !MipsMCRegisterClasses[ClassID].contains(getMemBase()->getGPRNM32Reg())) + return false; + if (isConstantMemOff()) + return isShiftedInt(getConstantMemOff()); + return false; + } + + bool isMemNMRX() const { + if (!isMem()) + return false; + if (!getMemBase()->isGPRAsmReg() + || !MipsMCRegisterClasses[Mips::GPRNM32RegClassID].contains(getConstantMemOff())) + return false; + return true; + } + // Allow relocation operators. template bool isMemWithSimmOffset() const { @@ -1389,12 +1554,76 @@ class MipsOperand : public MCParsedAsmOperand { && (getMemBase()->getGPR32Reg() == Mips::GP); } + bool isConstantUImm3Shift() const { + return (isConstantImm() && getConstantImm() > 0 && getConstantImm() <= 8); + } + + MipsOperand *getMemBase() const { + assert((Kind == k_Memory) && "Invalid access!"); + return Mem.Base; + } + + const MCExpr *getMemOff() const { + assert((Kind == k_Memory) && "Invalid access!"); + return Mem.Off; + } + + int64_t getConstantMemOff() const { + return static_cast(getMemOff())->getValue(); + } + + template bool isMemWithUimmOffsetGP() const { + if (isMem() && getMemBase()->isRegIdx() && (getMemBase()->getGPR32Reg() == Mips::GP)) { + MCValue Res; + bool IsReloc = getMemOff()->evaluateAsRelocatable(Res, nullptr, nullptr); + + + if (isConstantMemOff()) + return (isUInt(getConstantMemOff()) && + (getConstantMemOff() % Align == 0)); + else + return IsReloc; + } + return false; + } + + template bool isMemWithUimmOffsetSP() const { + return isMem() && isConstantMemOff() && isUInt(getConstantMemOff()) + && (getConstantMemOff() % Align == 0) && getMemBase()->isRegIdx() + && (getMemBase()->getGPR32Reg() == Mips::SP); + } + template bool isScaledUImm() const { return isConstantImm() && isShiftedUInt(getConstantImm()); } + bool isSym32() const { + MCValue Res; + bool Success; + if (Kind != k_Immediate) + return false; + Success = getImm()->evaluateAsRelocatable(Res, nullptr, nullptr); + if (Success && Res.getRefKind() == MipsMCExpr::MEK_None) + return Success; + return Success; + } + + bool isSym32PCRel() const { + return isSym32(); + } + + bool isSym32GPRel() const { + MCValue Res; + bool Success; + + Success = getImm()->evaluateAsRelocatable(Res, nullptr, nullptr); + if (Success && Res.getRefKind() == MipsMCExpr::MEK_GPREL) + return Success; + return false; + } + template bool isScaledSImm() const { if (isConstantImm() && @@ -1445,6 +1674,31 @@ class MipsOperand : public MCParsedAsmOperand { bool isRegList() const { return Kind == k_RegList; } + bool isNMRegList16() const { + if (!isRegList()) + return false; + + // List must not be empty + unsigned Size = RegList.List->size(); + if (Size == 0) + return true; + + // List must start with $fp or $ra + unsigned R0 = RegList.List->front(); + unsigned R1 = RegList.List->back(); + if (!(R0 == Mips::FP_NM || R0 == Mips::RA_NM)) + return false; + + // If $gp is specified, it must be part of a + // contiguous sequence of registers + if (R0 == Mips::FP_NM && R1 == Mips::GP_NM) + return (Size == 15); + if (R0 == Mips::RA_NM && R1 == Mips::GP_NM) + return (Size == 14); + + return true; + } + StringRef getToken() const { assert(Kind == k_Token && "Invalid access!"); return StringRef(Tok.Data, Tok.Length); @@ -1473,20 +1727,6 @@ class MipsOperand : public MCParsedAsmOperand { return Value; } - MipsOperand *getMemBase() const { - assert((Kind == k_Memory) && "Invalid access!"); - return Mem.Base; - } - - const MCExpr *getMemOff() const { - assert((Kind == k_Memory) && "Invalid access!"); - return Mem.Off; - } - - int64_t getConstantMemOff() const { - return static_cast(getMemOff())->getValue(); - } - const SmallVectorImpl &getRegList() const { assert((Kind == k_RegList) && "Invalid access!"); return *(RegList.List); @@ -1599,6 +1839,16 @@ class MipsOperand : public MCParsedAsmOperand { return Op; } + static std::unique_ptr + CreateRegListNM(SmallVectorImpl &Regs, SMLoc StartLoc, SMLoc EndLoc, + MipsAsmParser &Parser) { + auto Op = std::make_unique(k_RegList, Parser); + Op->RegList.List = new SmallVector(Regs.begin(), Regs.end()); + Op->StartLoc = StartLoc; + Op->EndLoc = EndLoc; + return Op; + } + bool isGPRZeroAsmReg() const { return isRegIdx() && RegIdx.Kind & RegKind_GPR && RegIdx.Index == 0; } @@ -1695,6 +1945,100 @@ class MipsOperand : public MCParsedAsmOperand { return isRegIdx() && RegIdx.Kind & RegKind_MSACtrl && RegIdx.Index <= 7; } + bool isNM16AsmReg() const { + if (!(isRegIdx() && RegIdx.Kind)) + return false; + return ((RegIdx.Index >= 4 && RegIdx.Index <= 7) + || (RegIdx.Index >= 16 && RegIdx.Index <= 19)); + + } + + bool isNM16ZeroAsmReg() const { + if (!(isRegIdx() && RegIdx.Kind)) + return false; + volatile unsigned RegNo = RegIdx.Index; + return ((RegNo == 0) || + (RegNo >= 4 && RegNo <= 7) || + (RegNo >= 17 && RegNo <= 19)); + } + + bool isNM4AsmReg() const { + if (!(isRegIdx() && RegIdx.Kind)) + return false; + return ((RegIdx.Index >= 4 && RegIdx.Index <= 11) + || (RegIdx.Index >= 16 && RegIdx.Index <= 23)); + + } + + bool isNM4ZeroAsmReg() const { + if (!(isRegIdx() && RegIdx.Kind)) + return false; + return ((RegIdx.Index == 0) || + (RegIdx.Index >= 4 && RegIdx.Index <= 10) || + (RegIdx.Index >= 16 && RegIdx.Index <= 23)); + } + + bool isNM2R1AsmReg() const { + volatile unsigned RegNo = RegIdx.Index; + if (!(isRegIdx() && RegIdx.Kind)) + return false; + return (RegNo >= 4 && RegNo <= 7); + } + + bool isNM2R2AsmReg() const { + if (!(isRegIdx() && RegIdx.Kind)) + return false; + return (RegIdx.Index >= 5 && RegIdx.Index <= 8); + } + + bool isNM1R1AsmReg() const { + if (!(isRegIdx() && RegIdx.Kind)) + return false; + return (RegIdx.Index == 4 || RegIdx.Index == 5); + } + + enum NM_REG_TYPE { + NMR, + NMR_NZ, + NMR_3, + NMR_3Z, + NMR_4, + NMR_4Z, + NMR_2R1, + NMR_2R2, + NMR_1R1 + }; + + template + bool isGPRNMAsmReg() const { + if (!(isRegIdx() && RegIdx.Kind)) + return false; + switch (rt) { + case Mips::GPRNMGPRegClassID: + return (RegIdx.Index == 28); + case Mips::GPRNMSPRegClassID: + return (RegIdx.Index == 29); + case Mips::GPRNM32NZRegClassID: + return (RegIdx.Index > 0 && RegIdx.Index < 32); + case Mips::GPRNM3RegClassID: + return isNM16AsmReg(); + case Mips::GPRNM3ZRegClassID: + return isNM16ZeroAsmReg(); + case Mips::GPRNM4RegClassID: + return isNM4AsmReg(); + case Mips::GPRNM4ZRegClassID: + return isNM4ZeroAsmReg(); + case Mips::GPRNM2R1RegClassID: + return isNM2R1AsmReg(); + case Mips::GPRNM2R2RegClassID: + return isNM2R2AsmReg(); + case Mips::GPRNM1R1RegClassID: + return isNM1R1AsmReg(); + default: + return (RegIdx.Index < 32); + } + } + /// getStartLoc - Get the location of the first token of this operand. SMLoc getStartLoc() const override { return StartLoc; } /// getEndLoc - Get the location of the last token of this operand. @@ -1849,13 +2193,25 @@ static bool needsExpandMemInst(MCInst &Inst) { const MCOperandInfo &OpInfo = MCID.OpInfo[NumOp - 1]; if (OpInfo.OperandType != MCOI::OPERAND_MEMORY && OpInfo.OperandType != MCOI::OPERAND_UNKNOWN && - OpInfo.OperandType != MipsII::OPERAND_MEM_SIMM9) + OpInfo.OperandType != MipsII::OPERAND_MEM_SIMM9 && + OpInfo.OperandType != NanoMips::OPERAND_NM_GPREL21 && + OpInfo.OperandType != NanoMips::OPERAND_NM_GPREL18 && + OpInfo.OperandType != NanoMips::OPERAND_NM_GPREL9 && + OpInfo.OperandType != NanoMips::OPERAND_NM_SPREL7) return false; MCOperand &Op = Inst.getOperand(NumOp - 1); if (Op.isImm()) { if (OpInfo.OperandType == MipsII::OPERAND_MEM_SIMM9) return !isInt<9>(Op.getImm()); + if (OpInfo.OperandType == NanoMips::OPERAND_NM_GPREL21) + return !isUInt<21>(Op.getImm()); + if (OpInfo.OperandType == NanoMips::OPERAND_NM_GPREL18) + return !isUInt<18>(Op.getImm()); + if (OpInfo.OperandType == NanoMips::OPERAND_NM_GPREL9) + return !isUInt<9>(Op.getImm()); + if (OpInfo.OperandType == NanoMips::OPERAND_NM_SPREL7) + return !isUInt<7>(Op.getImm()); // Offset can't exceed 16bit value. return !isInt<16>(Op.getImm()); } @@ -2165,6 +2521,10 @@ bool MipsAsmParser::processInstruction(MCInst &Inst, SMLoc IDLoc, case MipsII::OPERAND_MEM_SIMM9: expandMem9Inst(Inst, IDLoc, Out, STI, MCID.mayLoad()); break; + case NanoMips::OPERAND_NM_GPREL18: + case NanoMips::OPERAND_NM_GPREL21: + // These cases have no legal expansion + break; default: expandMem16Inst(Inst, IDLoc, Out, STI, MCID.mayLoad()); break; @@ -5768,6 +6128,10 @@ MipsAsmParser::checkEarlyTargetMatchPredicate(MCInst &Inst, return Match_Success; case Mips::DATI: case Mips::DAHI: + case Mips::ADDIURS5_NM: + case Mips::ADDIU48_NM: + case Mips::ADDu4x4_NM: + case Mips::MUL4x4_NM: if (static_cast(*Operands[1]) .isValidForTie(static_cast(*Operands[2]))) return Match_Success; @@ -5859,7 +6223,9 @@ unsigned MipsAsmParser::checkTargetMatchPredicate(MCInst &Inst) { if (Inst.getOperand(0).getReg() == Inst.getOperand(1).getReg()) return Match_RequiresDifferentOperands; return Match_Success; - case Mips::DINS: { + case Mips::DINS: + case Mips::EXT_NM: + case Mips::INS_NM: { assert(Inst.getOperand(2).isImm() && Inst.getOperand(3).isImm() && "Operands must be immediates for dins!"); const signed Pos = Inst.getOperand(2).getImm(); @@ -5901,9 +6267,39 @@ unsigned MipsAsmParser::checkTargetMatchPredicate(MCInst &Inst) { case Mips::CRC32H: case Mips::CRC32CH: case Mips::CRC32W: case Mips::CRC32CW: case Mips::CRC32D: case Mips::CRC32CD: + case Mips::AND16_NM: case Mips::OR16_NM: + case Mips::XOR16_NM: if (Inst.getOperand(0).getReg() != Inst.getOperand(2).getReg()) return Match_RequiresSameSrcAndDst; return Match_Success; + case Mips::MOVEP_NM: + if (Inst.getOperand(1).getReg() != Inst.getOperand(0).getReg() + 1) + return Match_RequiresDstRegPair; + return Match_Success; + case Mips::MOVEPREV_NM: + if (Inst.getOperand(3).getReg() != Inst.getOperand(2).getReg() + 1) + return Match_RequiresSrcRegPair; + return Match_Success; + case Mips::BEQC16_NM: + if (Inst.getOperand(0).getReg() >= Inst.getOperand(1).getReg()) + return Match_RequiresFirstOpLT; + return Match_Success; + case Mips::BNEC16_NM: + if (Inst.getOperand(0).getReg() < Inst.getOperand(1).getReg()) + return Match_RequiresFirstOpGE; + return Match_Success; + case Mips::ADDIUGPB_NM: + case Mips::ADDIUGPW_NM: + case Mips::ADDIUGP48_NM: + if (Inst.getOperand(1).getReg() != Mips::GP_NM) + return Match_RequiresBaseGP; + return Match_Success; + case Mips::SWSP16_NM: + case Mips::LWSP16_NM: + case Mips::ADDIUR1SP_NM: + if (Inst.getOperand(1).getReg() != Mips::SP_NM) + return Match_RequiresBaseSP; + return Match_Success; } uint64_t TSFlags = getInstDesc(Inst.getOpcode()).TSFlags; @@ -5932,8 +6328,10 @@ bool MipsAsmParser::MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode, uint64_t &ErrorInfo, bool MatchingInlineAsm) { MCInst Inst; - unsigned MatchResult = - MatchInstructionImpl(Operands, Inst, ErrorInfo, MatchingInlineAsm); + unsigned MatchResult; + int VariantId = getContext().getAsmInfo()->getAssemblerDialect(); + + MatchResult = MatchInstructionImpl(Operands, Inst, ErrorInfo, MatchingInlineAsm, VariantId); switch (MatchResult) { case Match_Success: @@ -5971,6 +6369,16 @@ bool MipsAsmParser::MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode, return Error(IDLoc, "invalid operand ($zero) for instruction"); case Match_RequiresSameSrcAndDst: return Error(IDLoc, "source and destination must match"); + case Match_RequiresDstRegPair: + return Error(IDLoc, "destination registers must be in sequence"); + case Match_RequiresSrcRegPair: + return Error(IDLoc, "source registers must be in sequence"); + case Match_RequiresFirstOpLT: + return Error(RefineErrorLoc(IDLoc, Operands, ErrorInfo), + "first register operand(rs) must be less than second(rt)"); + case Match_RequiresFirstOpGE: + return Error(RefineErrorLoc(IDLoc, Operands, ErrorInfo), + "first register operand(rs) must not be less than second(rt)"); case Match_NoFCCRegisterForCurrentISA: return Error(RefineErrorLoc(IDLoc, Operands, ErrorInfo), "non-zero fcc register doesn't exist in current ISA level"); @@ -6072,6 +6480,9 @@ bool MipsAsmParser::MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode, case Match_SImm32_Relaxed: return Error(RefineErrorLoc(IDLoc, Operands, ErrorInfo), "expected 32-bit signed immediate"); + case Match_SImm20s12: + return Error(RefineErrorLoc(IDLoc, Operands, ErrorInfo), + "expected 20-bit signed immediate"); case Match_UImm32_Coerced: return Error(RefineErrorLoc(IDLoc, Operands, ErrorInfo), "expected 32-bit immediate"); @@ -6120,6 +6531,17 @@ bool MipsAsmParser::MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode, return Error(ErrorStart, "size plus position are not in the range 33 .. 64", SMRange(ErrorStart, ErrorEnd)); } + case Match_RequiresBaseGP: + return Error(IDLoc, "expected $gp as base register"); + case Match_RequiresBaseSP: + return Error(IDLoc, "expected $sp as base register"); + case Match_Sym32: + case Match_Sym32PCRel: + return Error(RefineErrorLoc(IDLoc, Operands, ErrorInfo), + "expected symbol"); + case Match_Sym32GPRel: + return Error(RefineErrorLoc(IDLoc, Operands, ErrorInfo), + "expected %gp_rel expression"); } llvm_unreachable("Implement any new match types added!"); @@ -6158,7 +6580,45 @@ MipsAsmParser::printWarningWithFixIt(const Twine &Msg, const Twine &FixMsg, int MipsAsmParser::matchCPURegisterName(StringRef Name) { int CC; - CC = StringSwitch(Name) + if (isABI_P32()) { + CC = StringSwitch(Name) + .Cases("zero", "r0", 0) + .Cases("at", "r1", "AT", "r1", 1) + .Cases("t4", "r2", 2) + .Cases("t5", "r3", 3) + .Cases("a0", "r4", 4) + .Cases("a1", "r5", 5) + .Cases("a2", "r6", 6) + .Cases("a3", "r7", 7) + .Cases("a4", "r8", 8) + .Cases("a5", "r9", 9) + .Cases("a6", "r10", 10) + .Cases("a7", "r11", 11) + .Cases("t0", "r12", 12) + .Cases("t1", "r13", 13) + .Cases("t2", "r14", 14) + .Cases("t3", "r15", 15) + .Cases("s0", "r16", 16) + .Cases("s1", "r17", 17) + .Cases("s2", "r18", 18) + .Cases("s3", "r19", 19) + .Cases("s4", "r20", 20) + .Cases("s5", "r21", 21) + .Cases("s6", "r22", 22) + .Cases("s7", "r23", 23) + .Cases("t8", "r24", 24) + .Cases("t9", "r25", 25) + .Cases("k0", "r26", 26) + .Cases("k1", "r27", 27) + .Cases("gp", "r28", 28) + .Cases("sp", "r29", 29) + .Cases("fp", "s8", "r30", 30) + .Cases("ra", "r31", 31) + .Default(-1); + return CC; + } + else { + CC = StringSwitch(Name) .Case("zero", 0) .Cases("at", "AT", 1) .Case("a0", 4) @@ -6193,6 +6653,7 @@ int MipsAsmParser::matchCPURegisterName(StringRef Name) { .Case("t8", 24) .Case("t9", 25) .Default(-1); + } if (!(isABI_N32() || isABI_N64())) return CC; @@ -6573,6 +7034,78 @@ MipsAsmParser::parseMemOperand(OperandVector &Operands) { return MatchOperand_Success; } +// Parse register indexed memory operand - $rs($rt) +OperandMatchResultTy +MipsAsmParser::parseMemNMRX(OperandVector &Operands) { + MCAsmParser &Parser = getParser(); + LLVM_DEBUG(dbgs() << "parseMemRx\n"); + const MCExpr *IdVal = nullptr; + SMLoc S; + bool isParenExpr = false; + OperandMatchResultTy Res = MatchOperand_NoMatch; + + S = Parser.getTok().getLoc(); + + if (getLexer().getKind() == AsmToken::LParen) { + Parser.Lex(); + isParenExpr = true; + } + SmallVector, 1> Reg; + if ((Res = parseAnyRegister(Reg)) != MatchOperand_Success) + return Res; + else { + // Register encoded as immediate to fit struct MemOp + MipsOperand &RegOpnd = static_cast(*Reg[0]); + IdVal = MCConstantExpr::create(RegOpnd.getGPRNM32Reg(), getContext()); + } + + if (Parser.getTok().isNot(AsmToken::LParen)) { + if (Parser.getTok().is(AsmToken::EndOfStatement)) { + SMLoc E = + SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1); + + // Zero register assumed, add a memory operand with ZERO as its base. + // "Base" will be managed by k_Memory. + auto Base = MipsOperand::createGPRReg(0, "0", getContext().getRegisterInfo(), S, E, *this); + Operands.push_back( + MipsOperand::CreateMem(std::move(Base), IdVal, S, E, *this)); + return MatchOperand_Success; + } + else { + Error(Parser.getTok().getLoc(), "'(' expected"); + return MatchOperand_ParseFail; + } + } + else + Parser.Lex(); // Eat the '(' token. + + Res = parseAnyRegister(Operands); + if (Res != MatchOperand_Success) + return Res; + + if (Parser.getTok().isNot(AsmToken::RParen)) { + Error(Parser.getTok().getLoc(), "')' expected"); + return MatchOperand_ParseFail; + } + + SMLoc E = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1); + + Parser.Lex(); // Eat the ')' token. + + if (!IdVal) + IdVal = MCConstantExpr::create(0, getContext()); + + // Replace the register operand with the memory operand. + std::unique_ptr op( + static_cast(Operands.back().release())); + // Remove the register from the operands. + // "op" will be managed by k_Memory. + Operands.pop_back(); + // Add the memory operand. + Operands.push_back(MipsOperand::CreateMem(std::move(op), IdVal, S, E, *this)); + return MatchOperand_Success; +} + bool MipsAsmParser::searchSymbolAlias(OperandVector &Operands) { MCAsmParser &Parser = getParser(); MCSymbol *Sym = getContext().lookupSymbol(Parser.getTok().getIdentifier()); @@ -6875,6 +7408,112 @@ MipsAsmParser::parseRegisterList(OperandVector &Operands) { return MatchOperand_Success; } +OperandMatchResultTy +MipsAsmParser::parseNMRegisterList(OperandVector &Operands) { + MCAsmParser &Parser = getParser(); + SmallVector Regs; + unsigned RegNo; + unsigned PrevReg = Mips::NoRegister; + bool RegRange = false; + SmallVector, 16> TmpOperands; + MipsOperand *RegFirst; + + SMLoc S = Parser.getTok().getLoc(); + + while (parseAnyRegister(TmpOperands) == MatchOperand_Success) { + SMLoc E = getLexer().getLoc(); + MipsOperand &Reg = static_cast(*TmpOperands.back()); + RegNo = Reg.getGPRNM32Reg(); + + if (RegRange) { + unsigned i = 1; + PrevReg = RegFirst->getGPRNM32RegNext(i++); + while (PrevReg != RegNo) { + if (PrevReg == Mips::RA_NM) { + Error(E, "invalid register range"); + return MatchOperand_ParseFail; + } + Regs.push_back(PrevReg); + PrevReg = RegFirst->getGPRNM32RegNext(i++); + } + RegRange = false; + } + else if (PrevReg == Mips::RA_NM && RegNo != Mips::S0_NM) { + Error(E, "register sequence must continue at $s0 after $ra"); + return MatchOperand_ParseFail; + } + else if (Regs.size() == Regs.capacity()) { + Error(E, "too many registers in list"); + return MatchOperand_ParseFail; + } else if ((PrevReg != Mips::RA_NM) && + (PrevReg != Mips::NoRegister) && + (Reg.getGPRNM32RegNext(-1) != PrevReg) && + (RegNo != Mips::GP_NM)) { + Error(E, "consecutive register numbers expected"); + return MatchOperand_ParseFail; + } + + Regs.push_back(RegNo); + + if (Parser.getTok().is(AsmToken::Minus)) { + RegRange = true; + RegFirst = &Reg; + } + + if (Parser.getTok().is(AsmToken::EndOfStatement)) + break; + + if (Parser.getTok().isNot(AsmToken::Minus) && + Parser.getTok().isNot(AsmToken::Comma)) { + Error(E, "',' or '-' expected"); + return MatchOperand_ParseFail; + } + + Lex(); // Consume comma or minus + if (Parser.getTok().isNot(AsmToken::Dollar)) + break; + + PrevReg = RegNo; + } + + /* Parse an alternate format with count and start-reg instead + of register list */ + if (Regs.size() == 0) { + if (getLexer().getTok().is(AsmToken::Integer)) { + unsigned Count = Parser.getTok().getIntVal(); + Lex(); + SMLoc E = Parser.getTok().getLoc(); + if (Count > 16) { + Error(E, "too many registers in list"); + return MatchOperand_ParseFail; + } + if (Count > 0) { + if (Parser.getTok().isNot(AsmToken::Minus) && + Parser.getTok().isNot(AsmToken::Comma)) { + Error(E, "',' or '-' expected"); + return MatchOperand_ParseFail; + } + Lex(); // Consume comma or minus + E = Parser.getTok().getLoc(); + if (parseAnyRegister(TmpOperands) == MatchOperand_Success) { + MipsOperand &Reg = static_cast(*TmpOperands.back()); + unsigned i = 1; + while (i < Count) + Regs.push_back(Reg.getGPRNM32RegNext(i++)); + } + else + return MatchOperand_ParseFail; + } + } + else + return MatchOperand_ParseFail; + } + + SMLoc E = Parser.getTok().getLoc(); + Operands.push_back(MipsOperand::CreateRegListNM(Regs, S, E, *this)); + return MatchOperand_Success; +} + /// Sometimes (i.e. load/stores) the operand may be followed immediately by /// either this. /// ::= '(', register, ')' @@ -6941,7 +7580,7 @@ bool MipsAsmParser::ParseInstruction(ParseInstructionInfo &Info, StringRef Name, getTargetStreamer().forbidModuleDirective(); // Check if we have valid mnemonic - if (!mnemonicIsValid(Name, 0)) { + if (!mnemonicIsValid(Name, 0) && !mnemonicIsValid(Name, 1)) { FeatureBitset FBS = ComputeAvailableFeatures(getSTI().getFeatureBits()); std::string Suggestion = MipsMnemonicSpellCheck(Name, FBS); return Error(NameLoc, "unknown instruction" + Suggestion); @@ -7101,7 +7740,8 @@ bool MipsAsmParser::parseSetNoReorderDirective() { return false; } AssemblerOptions.back()->setNoReorder(); - getTargetStreamer().emitDirectiveSetNoReorder(); + if (!hasNanoMips()) + getTargetStreamer().emitDirectiveSetNoReorder(); Parser.Lex(); // Consume the EndOfStatement. return false; } @@ -7871,6 +8511,14 @@ bool MipsAsmParser::parseDirectiveNaN() { return false; } + +bool MipsAsmParser::parseDirectiveLinkRelax() { + getParser().Lex(); + setModuleFeatureBits(Mips::FeatureRelax, "relax"); + getTargetStreamer().emitDirectiveLinkRelax(); + return false; +} + bool MipsAsmParser::parseDirectiveSet() { const AsmToken &Tok = getParser().getTok(); StringRef IdVal = Tok.getString(); @@ -7910,59 +8558,61 @@ bool MipsAsmParser::parseDirectiveSet() { return parseSetMacroDirective(); if (IdVal == "nomacro") return parseSetNoMacroDirective(); - if (IdVal == "mips16") - return parseSetMips16Directive(); - if (IdVal == "nomips16") - return parseSetNoMips16Directive(); - if (IdVal == "nomicromips") { - clearFeatureBits(Mips::FeatureMicroMips, "micromips"); - getTargetStreamer().emitDirectiveSetNoMicroMips(); - getParser().eatToEndOfStatement(); - return false; - } - if (IdVal == "micromips") { - if (hasMips64r6()) { - Error(Loc, ".set micromips directive is not supported with MIPS64R6"); + if (!hasNanoMips()) { + if (IdVal == "mips16") + return parseSetMips16Directive(); + if (IdVal == "nomips16") + return parseSetNoMips16Directive(); + if (IdVal == "nomicromips") { + clearFeatureBits(Mips::FeatureMicroMips, "micromips"); + getTargetStreamer().emitDirectiveSetNoMicroMips(); + getParser().eatToEndOfStatement(); return false; } - return parseSetFeature(Mips::FeatureMicroMips); - } - if (IdVal == "mips0") - return parseSetMips0Directive(); - if (IdVal == "mips1") - return parseSetFeature(Mips::FeatureMips1); - if (IdVal == "mips2") - return parseSetFeature(Mips::FeatureMips2); - if (IdVal == "mips3") - return parseSetFeature(Mips::FeatureMips3); - if (IdVal == "mips4") - return parseSetFeature(Mips::FeatureMips4); - if (IdVal == "mips5") - return parseSetFeature(Mips::FeatureMips5); - if (IdVal == "mips32") - return parseSetFeature(Mips::FeatureMips32); - if (IdVal == "mips32r2") - return parseSetFeature(Mips::FeatureMips32r2); - if (IdVal == "mips32r3") - return parseSetFeature(Mips::FeatureMips32r3); - if (IdVal == "mips32r5") - return parseSetFeature(Mips::FeatureMips32r5); - if (IdVal == "mips32r6") - return parseSetFeature(Mips::FeatureMips32r6); - if (IdVal == "mips64") - return parseSetFeature(Mips::FeatureMips64); - if (IdVal == "mips64r2") - return parseSetFeature(Mips::FeatureMips64r2); - if (IdVal == "mips64r3") - return parseSetFeature(Mips::FeatureMips64r3); - if (IdVal == "mips64r5") - return parseSetFeature(Mips::FeatureMips64r5); - if (IdVal == "mips64r6") { - if (inMicroMipsMode()) { - Error(Loc, "MIPS64R6 is not supported with microMIPS"); - return false; + if (IdVal == "micromips") { + if (hasMips64r6()) { + Error(Loc, ".set micromips directive is not supported with MIPS64R6"); + return false; + } + return parseSetFeature(Mips::FeatureMicroMips); + } + if (IdVal == "mips0") + return parseSetMips0Directive(); + if (IdVal == "mips1") + return parseSetFeature(Mips::FeatureMips1); + if (IdVal == "mips2") + return parseSetFeature(Mips::FeatureMips2); + if (IdVal == "mips3") + return parseSetFeature(Mips::FeatureMips3); + if (IdVal == "mips4") + return parseSetFeature(Mips::FeatureMips4); + if (IdVal == "mips5") + return parseSetFeature(Mips::FeatureMips5); + if (IdVal == "mips32") + return parseSetFeature(Mips::FeatureMips32); + if (IdVal == "mips32r2") + return parseSetFeature(Mips::FeatureMips32r2); + if (IdVal == "mips32r3") + return parseSetFeature(Mips::FeatureMips32r3); + if (IdVal == "mips32r5") + return parseSetFeature(Mips::FeatureMips32r5); + if (IdVal == "mips32r6") + return parseSetFeature(Mips::FeatureMips32r6); + if (IdVal == "mips64") + return parseSetFeature(Mips::FeatureMips64); + if (IdVal == "mips64r2") + return parseSetFeature(Mips::FeatureMips64r2); + if (IdVal == "mips64r3") + return parseSetFeature(Mips::FeatureMips64r3); + if (IdVal == "mips64r5") + return parseSetFeature(Mips::FeatureMips64r5); + if (IdVal == "mips64r6") { + if (inMicroMipsMode()) { + Error(Loc, "MIPS64R6 is not supported with microMIPS"); + return false; + } + return parseSetFeature(Mips::FeatureMips64r6); } - return parseSetFeature(Mips::FeatureMips64r6); } if (IdVal == "dsp") return parseSetFeature(Mips::FeatureDSP); @@ -8221,6 +8871,7 @@ bool MipsAsmParser::parseSSectionDirective(StringRef Section, unsigned Type) { /// ::= .module novirt /// ::= .module ginv /// ::= .module noginv +/// ::= .module pcrel bool MipsAsmParser::parseDirectiveModule() { MCAsmParser &Parser = getParser(); MCAsmLexer &Lexer = getLexer(); @@ -8452,6 +9103,21 @@ bool MipsAsmParser::parseDirectiveModule() { return false; } + return false; // parseDirectiveModule has finished successfully. + } else if (Option == "pcrel") { + // clearModuleFeatureBits(Mips::FeatureGINV, "ginv"); + + // If printing assembly, use the recently updated ABI Flags information. + // If generating ELF, don't do anything (the .MIPS.abiflags section gets + // emitted later). + getTargetStreamer().emitDirectiveModulePcRel(); + + // If this is not the end of the statement, report an error. + if (getLexer().isNot(AsmToken::EndOfStatement)) { + reportParseError("unexpected token, expected end of statement"); + return false; + } + return false; // parseDirectiveModule has finished successfully. } else { return Error(L, "'" + Twine(Option) + "' is not a valid .module option."); @@ -8746,6 +9412,11 @@ bool MipsAsmParser::ParseDirective(AsmToken DirectiveID) { return false; } + if (IDVal == ".linkrelax") { + parseDirectiveLinkRelax(); + return false; + } + if (IDVal == ".mask" || IDVal == ".fmask") { // .mask bitmask, frame_offset // bitmask: One bit for each register used. @@ -8905,6 +9576,7 @@ extern "C" LLVM_EXTERNAL_VISIBILITY void LLVMInitializeMipsAsmParser() { RegisterMCAsmParser Y(getTheMipselTarget()); RegisterMCAsmParser A(getTheMips64Target()); RegisterMCAsmParser B(getTheMips64elTarget()); + RegisterMCAsmParser C(getTheNanoMipsTarget()); } #define GET_REGISTER_MATCHER @@ -8918,6 +9590,7 @@ bool MipsAsmParser::mnemonicIsValid(StringRef Mnemonic, unsigned VariantID) { switch (VariantID) { default: llvm_unreachable("invalid variant!"); case 0: Start = std::begin(MatchTable0); End = std::end(MatchTable0); break; + case 1: Start = std::begin(MatchTable1); End = std::end(MatchTable1); break; } // Search the table. auto MnemonicRange = std::equal_range(Start, End, Mnemonic, LessOpcode()); diff --git a/llvm/lib/Target/Mips/Disassembler/MipsDisassembler.cpp b/llvm/lib/Target/Mips/Disassembler/MipsDisassembler.cpp index 6f197e4245613..94742e7776917 100644 --- a/llvm/lib/Target/Mips/Disassembler/MipsDisassembler.cpp +++ b/llvm/lib/Target/Mips/Disassembler/MipsDisassembler.cpp @@ -39,12 +39,14 @@ namespace { class MipsDisassembler : public MCDisassembler { bool IsMicroMips; + bool IsNanoMips; bool IsBigEndian; public: MipsDisassembler(const MCSubtargetInfo &STI, MCContext &Ctx, bool IsBigEndian) : MCDisassembler(STI, Ctx), IsMicroMips(STI.getFeatureBits()[Mips::FeatureMicroMips]), + IsNanoMips(STI.getFeatureBits()[Mips::FeatureNanoMips]), IsBigEndian(IsBigEndian) {} bool hasMips2() const { return STI.getFeatureBits()[Mips::FeatureMips2]; } @@ -109,6 +111,61 @@ static DecodeStatus DecodeGPR32RegisterClass(MCInst &Inst, uint64_t Address, const void *Decoder); +static DecodeStatus DecodeGPRNM3RegisterClass(MCInst &Inst, + unsigned RegNo, + uint64_t Address, + const void *Decoder); + +static DecodeStatus DecodeGPRNM4RegisterClass(MCInst &Inst, + unsigned RegNo, + uint64_t Address, + const void *Decoder); + +static DecodeStatus DecodeGPRNMSPRegisterClass(MCInst &Inst, + unsigned RegNo, + uint64_t Address, + const void *Decoder); + +static DecodeStatus DecodeGPRNMGPRegisterClass(MCInst &Inst, + unsigned RegNo, + uint64_t Address, + const void *Decoder); + +static DecodeStatus DecodeGPRNM3ZRegisterClass(MCInst &Inst, + unsigned RegNo, + uint64_t Address, + const void *Decoder); + +static DecodeStatus DecodeGPRNM4ZRegisterClass(MCInst &Inst, + unsigned RegNo, + uint64_t Address, + const void *Decoder); + +static DecodeStatus DecodeGPRNM32NZRegisterClass(MCInst &Inst, + unsigned RegNo, + uint64_t Address, + const void *Decoder); + +static DecodeStatus DecodeGPRNM32RegisterClass(MCInst &Inst, + unsigned RegNo, + uint64_t Address, + const void *Decoder); + +static DecodeStatus DecodeGPRNM2R1RegisterClass(MCInst &Inst, + unsigned RegNo, + uint64_t Address, + const void *Decoder); + +static DecodeStatus DecodeGPRNM2R2RegisterClass(MCInst &Inst, + unsigned RegNo, + uint64_t Address, + const void *Decoder); + +static DecodeStatus DecodeGPRNM1R1RegisterClass(MCInst &Inst, + unsigned RegNo, + uint64_t Address, + const void *Decoder); + static DecodeStatus DecodePtrRegisterClass(MCInst &Inst, unsigned Insn, uint64_t Address, @@ -261,6 +318,14 @@ static DecodeStatus DecodeBranchTarget26MM(MCInst &Inst, uint64_t Address, const void *Decoder); +// DecodeBranchTargetMM - Decode nanoMIPS branch offset, which is +// shifted left by 1 bit. +template +static DecodeStatus DecodeBranchTargetNM(MCInst &Inst, + unsigned Offset, + uint64_t Address, + const void *Decoder); + // DecodeJumpTargetMM - Decode microMIPS jump target, which is // shifted left by 1 bit. static DecodeStatus DecodeJumpTargetMM(MCInst &Inst, @@ -280,6 +345,22 @@ static DecodeStatus DecodeMem(MCInst &Inst, uint64_t Address, const void *Decoder); +template +static DecodeStatus DecodeMemNM(MCInst &Inst, + unsigned Insn, + uint64_t Address, + const void *Decoder); + +static DecodeStatus DecodeMemNMRX(MCInst &Inst, + unsigned Insn, + uint64_t Address, + const void *Decoder); + +static DecodeStatus DecodeMemNM4x4(MCInst &Inst, + unsigned Insn, + uint64_t Address, + const void *Decoder); + static DecodeStatus DecodeMemEVA(MCInst &Inst, unsigned Insn, uint64_t Address, @@ -425,6 +506,9 @@ static DecodeStatus DecodeInsSize(MCInst &Inst, uint64_t Address, const void *Decoder); +static DecodeStatus DecodeSimm32(MCInst &Inst, unsigned Insn, + uint64_t Address, const void *Decoder); + static DecodeStatus DecodeSimm19Lsl2(MCInst &Inst, unsigned Insn, uint64_t Address, const void *Decoder); @@ -549,6 +633,33 @@ static DecodeStatus DecodeMovePRegPair(MCInst &Inst, unsigned RegPair, static DecodeStatus DecodeMovePOperands(MCInst &Inst, unsigned Insn, uint64_t Address, const void *Decoder); +static DecodeStatus DecodeImmM1To126(MCInst &Inst, unsigned Value, + uint64_t Address, + const void *Decoder); + +static DecodeStatus DecodeUImm4Mask(MCInst &Inst, unsigned Value, + uint64_t Address, + const void *Decoder); + +static DecodeStatus DecodeUImm3Shift(MCInst &Inst, unsigned Value, + uint64_t Address, + const void *Decoder); + +static DecodeStatus DecodeNMRegListOperand(MCInst &Inst, + unsigned Insn, + uint64_t Address, + const void *Decoder); + +static DecodeStatus DecodeNMRegList16Operand(MCInst &Inst, + unsigned Insn, + uint64_t Address, + const void *Decoder); + +static DecodeStatus DecodeNegImm12(MCInst &Inst, + unsigned Insn, + uint64_t Address, + const void *Decoder); + static MCDisassembler *createMipsDisassembler( const Target &T, const MCSubtargetInfo &STI, @@ -573,6 +684,8 @@ extern "C" LLVM_EXTERNAL_VISIBILITY void LLVMInitializeMipsDisassembler() { createMipsDisassembler); TargetRegistry::RegisterMCDisassembler(getTheMips64elTarget(), createMipselDisassembler); + TargetRegistry::RegisterMCDisassembler(getTheNanoMipsTarget(), + createMipselDisassembler); } #include "MipsGenDisassemblerTables.inc" @@ -1216,6 +1329,31 @@ static DecodeStatus readInstruction32(ArrayRef Bytes, uint64_t Address, return MCDisassembler::Success; } + +/// Read four bytes from the ArrayRef and return 32 bit word sorted +/// according to the given endianness. +static DecodeStatus readInstruction48(ArrayRef Bytes, uint64_t Address, + uint64_t &Size, uint64_t &Insn, + bool IsBigEndian = false, bool IsNanoMips = true) { + // We want to read exactly 6 Bytes of little-endian data in nanoMIPS mode. + if (Bytes.size() < 6 || IsBigEndian || !IsNanoMips) { + Size = 0; + return MCDisassembler::Fail; + } + + // High 16 bits of a 32-bit microMIPS instruction (where the opcode is) + // always precede the low 16 bits in the instruction stream (that is, they + // are placed at lower addresses in the instruction stream). + // + // nanoMIPS byte ordering: + // Little-endian: 1 | 0 | 3 | 2 | 5 | 4 + + Insn = (Bytes[0] << 0) | (Bytes[1] << 8); + Insn = ((Insn << 32) | (Bytes[4] << 0) | (Bytes[5] << 8) | (Bytes[2] << 16) + | (Bytes[3] << 24)); + return MCDisassembler::Success; +} + DecodeStatus MipsDisassembler::getInstruction(MCInst &Instr, uint64_t &Size, ArrayRef Bytes, uint64_t Address, @@ -1224,6 +1362,59 @@ DecodeStatus MipsDisassembler::getInstruction(MCInst &Instr, uint64_t &Size, DecodeStatus Result; Size = 0; + if (IsNanoMips) { + uint64_t Insn2; + Result = readInstruction16(Bytes, Address, Size, Insn, IsBigEndian); + if (Result == MCDisassembler::Fail) + return MCDisassembler::Fail; + + LLVM_DEBUG( + dbgs() << "Trying NanoMips16 table (16-bit instructions):\n"); + // Calling the auto-generated decoder function for NanoMips + // 16-bit instructions. + Result = decodeInstruction(DecoderTableNanoMips16, Instr, Insn, + Address, this, STI); + if (Result != MCDisassembler::Fail) { + Size = 2; + return Result; + } + + Result = readInstruction32(Bytes, Address, Size, Insn, IsBigEndian, IsNanoMips); + if (Result == MCDisassembler::Fail) + return MCDisassembler::Fail; + + LLVM_DEBUG( + dbgs() << "Trying NanoMips32 table (32-bit instructions):\n"); + // Calling the auto-generated decoder function. + Result = decodeInstruction(DecoderTableNanoMips32, Instr, Insn, + Address, this, STI); + if (Result != MCDisassembler::Fail) { + Size = 4; + return Result; + } + + Result = readInstruction48(Bytes, Address, Size, Insn2, IsBigEndian, IsNanoMips); + if (Result == MCDisassembler::Fail) + return MCDisassembler::Fail; + + LLVM_DEBUG(dbgs() << "Trying NanoMips48 table (48-bit instructions):\n"); + // Calling the auto-generated decoder function. + Result = decodeInstruction(DecoderTableNanoMips48, Instr, Insn2, Address, + this, STI); + if (Result != MCDisassembler::Fail) { + Size = 6; + return Result; + } + + // This is an invalid instruction. Claim that the Size is 2 bytes. Since + // microMIPS instructions have a minimum alignment of 2, the next 2 bytes + // could form a valid instruction. The two bytes we rejected as an + // instruction could have actually beeen an inline constant pool that is + // unconditionally branched over. + Size = 2; + return MCDisassembler::Fail; + } + if (IsMicroMips) { Result = readInstruction16(Bytes, Address, Size, Insn, IsBigEndian); if (Result == MCDisassembler::Fail) @@ -1455,6 +1646,135 @@ static DecodeStatus DecodeGPR32RegisterClass(MCInst &Inst, return MCDisassembler::Success; } +static DecodeStatus DecodeGPRNM3RegisterClass(MCInst &Inst, + unsigned RegNo, + uint64_t Address, + const void *Decoder) { + if (RegNo > 7) + return MCDisassembler::Fail; + RegNo |= ((RegNo & 0x4) ^ 0x4) << 2; + unsigned Reg = getReg(Decoder, Mips::GPRNM32RegClassID, RegNo); + Inst.addOperand(MCOperand::createReg(Reg)); + return MCDisassembler::Success; +} + +static DecodeStatus DecodeGPRNMSPRegisterClass(MCInst &Inst, + unsigned RegNo, + uint64_t Address, + const void *Decoder) { + Inst.addOperand(MCOperand::createReg(Mips::SP_NM)); + return MCDisassembler::Success; +} + +static DecodeStatus DecodeGPRNMGPRegisterClass(MCInst &Inst, + unsigned RegNo, + uint64_t Address, + const void *Decoder) { + Inst.addOperand(MCOperand::createReg(Mips::GP_NM)); + return MCDisassembler::Success; +} + +static DecodeStatus DecodeGPRNM3ZRegisterClass(MCInst &Inst, + unsigned RegNo, + uint64_t Address, + const void *Decoder) { + if (RegNo > 7) + return MCDisassembler::Fail; + if (RegNo != 0) + RegNo |= ((RegNo & 0x4) ^ 0x4) << 2; + unsigned Reg = getReg(Decoder, Mips::GPRNM32RegClassID, RegNo); + Inst.addOperand(MCOperand::createReg(Reg)); + return MCDisassembler::Success; +} + +static DecodeStatus DecodeGPRNM4RegisterClass(MCInst &Inst, + unsigned RegNo, + uint64_t Address, + const void *Decoder) { + if (RegNo > 31) + return MCDisassembler::Fail; + RegNo &= ~0x8; + RegNo += (RegNo < 4 ? 8 : 0); + unsigned Reg = getReg(Decoder, Mips::GPRNM32RegClassID, RegNo); + Inst.addOperand(MCOperand::createReg(Reg)); + return MCDisassembler::Success; +} + +static DecodeStatus DecodeGPRNM4ZRegisterClass(MCInst &Inst, + unsigned RegNo, + uint64_t Address, + const void *Decoder) { + if (RegNo > 31) + return MCDisassembler::Fail; + RegNo &= ~0x8; + if (RegNo == 3) + RegNo = 0; + else + RegNo += (RegNo < 3 ? 8 : 0); + unsigned Reg = getReg(Decoder, Mips::GPRNM32RegClassID, RegNo); + Inst.addOperand(MCOperand::createReg(Reg)); + return MCDisassembler::Success; +} + +static DecodeStatus DecodeGPRNM32NZRegisterClass(MCInst &Inst, + unsigned RegNo, + uint64_t Address, + const void *Decoder) { + if (RegNo == 0) + return MCDisassembler::Fail; + unsigned Reg = getReg(Decoder, Mips::GPRNM32RegClassID, RegNo); + Inst.addOperand(MCOperand::createReg(Reg)); + return MCDisassembler::Success; +} + +static DecodeStatus DecodeGPRNM32RegisterClass(MCInst &Inst, + unsigned RegNo, + uint64_t Address, + const void *Decoder) { + if (RegNo > 31) + return MCDisassembler::Fail; + unsigned Reg = getReg(Decoder, Mips::GPRNM32RegClassID, RegNo); + Inst.addOperand(MCOperand::createReg(Reg)); + return MCDisassembler::Success; +} + +static DecodeStatus DecodeGPRNM2R1RegisterClass(MCInst &Inst, + unsigned RegNo, + uint64_t Address, + const void *Decoder) { + if (RegNo > 31) + return MCDisassembler::Fail; + RegNo += 4; + unsigned Reg = getReg(Decoder, Mips::GPRNM32RegClassID, RegNo); + Inst.addOperand(MCOperand::createReg(Reg)); + Inst.addOperand(MCOperand::createReg(Reg+1)); + return MCDisassembler::Success; +} + +static DecodeStatus DecodeGPRNM2R2RegisterClass(MCInst &Inst, + unsigned RegNo, + uint64_t Address, + const void *Decoder) { + if (RegNo > 31) + return MCDisassembler::Fail; + RegNo += 5; + unsigned Reg = getReg(Decoder, Mips::GPRNM32RegClassID, RegNo); + Inst.addOperand(MCOperand::createReg(Reg)); + return MCDisassembler::Success; +} + +static DecodeStatus DecodeGPRNM1R1RegisterClass(MCInst &Inst, + unsigned RegNo, + uint64_t Address, + const void *Decoder) { + if (RegNo != 0 && RegNo != 1) + return MCDisassembler::Fail; + RegNo += 4; + unsigned Reg = getReg(Decoder, Mips::GPRNM32RegClassID, RegNo); + Inst.addOperand(MCOperand::createReg(Reg)); + return MCDisassembler::Success; +} + static DecodeStatus DecodePtrRegisterClass(MCInst &Inst, unsigned RegNo, uint64_t Address, @@ -1551,6 +1871,76 @@ static DecodeStatus DecodeMem(MCInst &Inst, return MCDisassembler::Success; } +template +static DecodeStatus DecodeMemNM(MCInst &Inst, + unsigned Insn, + uint64_t Address, + const void *Decoder) { + int Offset = (Insn & ((1 << Offbits) - 1)); + if (isSigned) + Offset = SignExtend32(Offset); + unsigned Base; + + switch (rt) { + case Mips::GPRNMGPRegClassID: + case Mips::GPRNMSPRegClassID: + Base = 0; + break; + case Mips::GPRNM3RegClassID: + Base = fieldFromInstruction(Insn, Offbits, 3); + break; + case Mips::GPRNM4RegClassID: + case Mips::GPRNM4ZRegClassID: + assert(false && "Unexpected register class."); + break; + default: + Base = fieldFromInstruction(Insn, Offbits, 5); + } + Base = getReg(Decoder, rt, Base); + +// if (Inst.getOpcode() == Mips::SC || +// Inst.getOpcode() == Mips::SCD) +// Inst.addOperand(MCOperand::createReg(Reg)); + + Inst.addOperand(MCOperand::createReg(Base)); + Inst.addOperand(MCOperand::createImm(Offset)); + + return MCDisassembler::Success; +} + +static DecodeStatus DecodeMemNMRX(MCInst &Inst, + unsigned Insn, + uint64_t Address, + const void *Decoder) { + unsigned Offset = getReg(Decoder, Mips::GPRNM32RegClassID, + fieldFromInstruction(Insn, 0, 5)); + unsigned Base = getReg(Decoder, Mips::GPRNM32RegClassID, + fieldFromInstruction(Insn, 5, 5)); + + Inst.addOperand(MCOperand::createReg(Base)); + Inst.addOperand(MCOperand::createImm(Offset)); + + return MCDisassembler::Success; +} + + +static DecodeStatus DecodeMemNM4x4(MCInst &Inst, + unsigned Insn, + uint64_t Address, + const void *Decoder) { + int Offset = (Insn & 0x8) | (Insn & 0x100) >> 6; + unsigned Base; + + Base = getReg(Decoder, Mips::GPRNM4RegClassID, + fieldFromInstruction(Insn, 0, 5) & ~0x8); + + Inst.addOperand(MCOperand::createReg(Base)); + Inst.addOperand(MCOperand::createImm(Offset)); + + return MCDisassembler::Success; +} + static DecodeStatus DecodeMemEVA(MCInst &Inst, unsigned Insn, uint64_t Address, @@ -1936,6 +2326,7 @@ static DecodeStatus DecodeMemMMImm16(MCInst &Inst, return MCDisassembler::Success; } + static DecodeStatus DecodeFMem(MCInst &Inst, unsigned Insn, uint64_t Address, @@ -2301,6 +2692,17 @@ static DecodeStatus DecodeBranchTarget26MM(MCInst &Inst, return MCDisassembler::Success; } +template +static DecodeStatus DecodeBranchTargetNM(MCInst &Inst, + unsigned Offset, + uint64_t Address, + const void *Decoder) { + int32_t BranchOffset = SignExtend32(Offset << 1); + + Inst.addOperand(MCOperand::createImm(BranchOffset)); + return MCDisassembler::Success; +} + static DecodeStatus DecodeJumpTargetMM(MCInst &Inst, unsigned Insn, uint64_t Address, @@ -2383,6 +2785,12 @@ static DecodeStatus DecodeInsSize(MCInst &Inst, return MCDisassembler::Success; } +static DecodeStatus DecodeSimm32(MCInst &Inst, unsigned Insn, + uint64_t Address, const void *Decoder) { + Inst.addOperand(MCOperand::createImm(Insn)); + return MCDisassembler::Success; +} + static DecodeStatus DecodeSimm19Lsl2(MCInst &Inst, unsigned Insn, uint64_t Address, const void *Decoder) { Inst.addOperand(MCOperand::createImm(SignExtend32<19>(Insn) * 4)); @@ -2637,3 +3045,84 @@ static DecodeStatus DecodeBlezGroupBranchMMR6(MCInst &MI, InsnType insn, return MCDisassembler::Success; } + +static DecodeStatus DecodeImmM1To126(MCInst &Inst, unsigned Value, + uint64_t Address, + const void *Decoder) { + if (Value == 127) + Inst.addOperand(MCOperand::createImm(-1)); + else + Inst.addOperand(MCOperand::createImm(Value)); + return MCDisassembler::Success; +} + +static DecodeStatus DecodeUImm4Mask(MCInst &Inst, unsigned Value, + uint64_t Address, + const void *Decoder) { + if (Value == 12) + Inst.addOperand(MCOperand::createImm(0xff)); + else if (Value == 13) + Inst.addOperand(MCOperand::createImm(0xffff)); + else + Inst.addOperand(MCOperand::createImm(Value)); + return MCDisassembler::Success; +} + +static DecodeStatus DecodeUImm3Shift(MCInst &Inst, unsigned Value, + uint64_t Address, + const void *Decoder) { + if (Value == 0) + Inst.addOperand(MCOperand::createImm(8)); + else + Inst.addOperand(MCOperand::createImm(Value)); + return MCDisassembler::Success; +} + +static DecodeStatus DecodeNMRegListOperand(MCInst &Inst, + unsigned Insn, + uint64_t Address, + const void *Decoder) { + unsigned RegStart = fieldFromInstruction(Insn, 5, 5); + unsigned RegCount = fieldFromInstruction(Insn, 1, 4); + unsigned GP_bit = fieldFromInstruction(Insn, 0, 1); + unsigned i; + unsigned RegNo; + + Inst.addOperand(MCOperand::createReg(getReg(Decoder, + Mips::GPRNM32RegClassID, + RegStart))); + for (i = RegStart + 1; i < RegStart + RegCount; i++) { + if (i == RegStart + RegCount - 1 && GP_bit) + RegNo = 28; + else if (i > 31) + RegNo = 16 + (i % 32); // $ra+1 wraps to $s0 + else + RegNo = i; + Inst.addOperand(MCOperand::createReg(getReg(Decoder, + Mips::GPRNM32RegClassID, + RegNo))); + } + return MCDisassembler::Success; +} + +static DecodeStatus DecodeNMRegList16Operand(MCInst &Inst, + unsigned Insn, + uint64_t Address, + const void *Decoder) { + unsigned RegStart = 30 + fieldFromInstruction(Insn, 4, 1); + unsigned RegCount = fieldFromInstruction(Insn, 0, 4); + // Re-encode the parameters for 32-bit instruction operand + // and call it's decoder + return DecodeNMRegListOperand(Inst, + (RegStart << 5) | (RegCount << 1), + Address, Decoder); +} + +static DecodeStatus DecodeNegImm12(MCInst &Inst, + unsigned Insn, + uint64_t Address, + const void *Decoder) { + unsigned Imm = fieldFromInstruction(Insn, 0, 12); + Inst.addOperand(MCOperand::createImm(-Imm)); + return MCDisassembler::Success; +} diff --git a/llvm/lib/Target/Mips/MCTargetDesc/MipsABIInfo.cpp b/llvm/lib/Target/Mips/MCTargetDesc/MipsABIInfo.cpp index 01b777f53cdb6..56595776ae8ce 100644 --- a/llvm/lib/Target/Mips/MCTargetDesc/MipsABIInfo.cpp +++ b/llvm/lib/Target/Mips/MCTargetDesc/MipsABIInfo.cpp @@ -111,7 +111,7 @@ unsigned MipsABIInfo::GetPtrAdduOp() const { } unsigned MipsABIInfo::GetPtrAddiuOp() const { - return ArePtrs64bit() ? Mips::DADDiu : IsP32() ? Mips::ADDiu_NM : Mips::ADDiu; + return ArePtrs64bit() ? Mips::DADDiu : IsP32() ? Mips::ADDIU_NM : Mips::ADDiu; } unsigned MipsABIInfo::GetPtrSubuOp() const { diff --git a/llvm/lib/Target/Mips/MCTargetDesc/MipsAsmBackend.cpp b/llvm/lib/Target/Mips/MCTargetDesc/MipsAsmBackend.cpp index 94d338746a6c2..b9c4e42a54469 100644 --- a/llvm/lib/Target/Mips/MCTargetDesc/MipsAsmBackend.cpp +++ b/llvm/lib/Target/Mips/MCTargetDesc/MipsAsmBackend.cpp @@ -210,6 +210,80 @@ static unsigned adjustFixupValue(const MCFixup &Fixup, uint64_t Value, return 0; } break; + case Mips::fixup_NANOMIPS_PC25_S1: + // Forcing a signed division because Value can be negative. + Value = (int64_t)Value / 2; + // We now check if Value can be encoded as a 19-bit signed immediate. + if (!isInt<25>(Value)) { + Ctx.reportError(Fixup.getLoc(), "out of range PC25 fixup"); + return 0; + } + break; + case Mips::fixup_NANOMIPS_PC21_S1: + // Forcing a signed division because Value can be negative. + Value = (int64_t)Value / 2; + // We now check if Value can be encoded as a 21-bit signed immediate. + if (!isInt<21>(Value)) { + Ctx.reportError(Fixup.getLoc(), "out of range PC21 fixup"); + return 0; + } + break; + case Mips::fixup_NANOMIPS_PC14_S1: + // Forcing a signed division because Value can be negative. + Value = (int64_t)Value / 2; + // We now check if Value can be encoded as a 14-bit signed immediate. + if (!isInt<14>(Value)) { + Ctx.reportError(Fixup.getLoc(), "out of range PC14 fixup"); + return 0; + } + break; + case Mips::fixup_NANOMIPS_PC11_S1: + // Forcing a signed division because Value can be negative. + Value = (int64_t)Value / 2; + // We now check if Value can be encoded as a 11-bit signed immediate. + if (!isInt<11>(Value)) { + Ctx.reportError(Fixup.getLoc(), "out of range PC11 fixup"); + return 0; + } + break; + case Mips::fixup_NANOMIPS_PC10_S1: + // Forcing a signed division because Value can be negative. + Value = (int64_t)Value / 2; + // We now check if Value can be encoded as a 10-bit signed immediate. + if (!isInt<10>(Value)) { + Ctx.reportError(Fixup.getLoc(), "out of range PC10 fixup"); + return 0; + } + break; + case Mips::fixup_NANOMIPS_PC7_S1: + // Forcing a signed division because Value can be negative. + Value = (int64_t)Value / 2; + // We now check if Value can be encoded as a 7-bit signed immediate. + if (!isInt<7>(Value)) { + Ctx.reportError(Fixup.getLoc(), "out of range PC7 fixup"); + return 0; + } + break; + case Mips::fixup_NANOMIPS_PC4_S1: + // Forcing a signed division because Value can be negative. + Value = (int64_t)Value / 2; + // We now check if Value can be encoded as a 4-bit signed immediate. + if (!isInt<4>(Value)) { + Ctx.reportError(Fixup.getLoc(), "out of range PC4 fixup"); + return 0; + } + break; + case Mips::fixup_NANOMIPS_HI20: + case Mips::fixup_NANOMIPS_GPREL_HI20: + case Mips::fixup_NANOMIPS_PCHI20: + case Mips::fixup_NANOMIPS_GOTPC_HI20: + Value = (Value >> 12) & 0xfffff; + break; + case Mips::fixup_NANOMIPS_LO12: + case Mips::fixup_NANOMIPS_GPREL_LO12: + case Mips::fixup_NANOMIPS_GOT_LO12: + Value = Value & 0xfff; + break; } return Value; @@ -418,7 +492,79 @@ getFixupKindInfo(MCFixupKind Kind) const { { "fixup_Mips_SUB", 0, 64, 0 }, { "fixup_MICROMIPS_SUB", 0, 64, 0 }, { "fixup_Mips_JALR", 0, 32, 0 }, - { "fixup_MICROMIPS_JALR", 0, 32, 0 } + { "fixup_MICROMIPS_JALR", 0, 32, 0 }, + { "fixup_NANOMIPS_32", 0, 32, 0 }, + { "fixup_NANOMIPS_64", 0, 64, 0 }, + { "fixup_NANOMIPS_NEG", 0, 32, 0 }, + { "fixup_NANOMIPS_ASHIFTR_1", 0, 32, 0 }, + { "fixup_NANOMIPS_UNSIGNED_8", 0, 32, 0 }, + { "fixup_NANOMIPS_SIGNED_8", 0, 32, 0 }, + { "fixup_NANOMIPS_UNSIGNED_16", 0, 32, 0 }, + { "fixup_NANOMIPS_SIGNED_16", 0, 32, 0 }, + { "fixup_NANOMIPS_RELATIVE", 0, 32, 0 }, + { "fixup_NANOMIPS_GLOBAL", 0, 32, 0 }, + { "fixup_NANOMIPS_JUMP_SLOT", 0, 32, 0 }, + { "fixup_NANOMIPS_IRELATIVE", 0, 32, 0 }, + { "fixup_NANOMIPS_PC25_S1", 0, 25, MCFixupKindInfo::FKF_IsPCRel }, + { "fixup_NANOMIPS_PC21_S1", 0, 21, MCFixupKindInfo::FKF_IsPCRel }, + { "fixup_NANOMIPS_PC14_S1", 0, 14, MCFixupKindInfo::FKF_IsPCRel }, + { "fixup_NANOMIPS_PC11_S1", 0, 11, MCFixupKindInfo::FKF_IsPCRel }, + { "fixup_NANOMIPS_PC10_S1", 0, 10, MCFixupKindInfo::FKF_IsPCRel }, + { "fixup_NANOMIPS_PC7_S1", 0, 7, MCFixupKindInfo::FKF_IsPCRel }, + { "fixup_NANOMIPS_PC4_S1", 0, 4, MCFixupKindInfo::FKF_IsPCRel }, + { "fixup_NANOMIPS_GPREL19_S2", 0, 19, 0 }, + { "fixup_NANOMIPS_GPREL18_S3", 0, 18, 0 }, + { "fixup_NANOMIPS_GPREL18", 0, 18, 0 }, + { "fixup_NANOMIPS_GPREL17_S1", 0, 17, 0 }, + { "fixup_NANOMIPS_GPREL16_S2", 0, 16, 0 }, + { "fixup_NANOMIPS_GPREL7_S2", 0, 7, 0 }, + { "fixup_NANOMIPS_GPREL_HI20", 20, 32, 0 }, + { "fixup_NANOMIPS_PCHI20", 0, 20, MCFixupKindInfo::FKF_IsPCRel }, + { "fixup_NANOMIPS_HI20", 0, 20, 0 }, + { "fixup_NANOMIPS_LO12", 0, 12, 0 }, + { "fixup_NANOMIPS_GPREL_I32", 0, 32, 0 }, + { "fixup_NANOMIPS_PC_I32", 0, 32, MCFixupKindInfo::FKF_IsPCRel }, + { "fixup_NANOMIPS_I32", 0, 32, 0 }, + { "fixup_NANOMIPS_GOT_DISP", 0, 32, 0 }, + { "fixup_NANOMIPS_GOTPC_I32", 0, 32, MCFixupKindInfo::FKF_IsPCRel }, + { "fixup_NANOMIPS_GOTPC_HI20", 0, 32, MCFixupKindInfo::FKF_IsPCRel }, + { "fixup_NANOMIPS_GOT_LO12", 0, 32, 0 }, + { "fixup_NANOMIPS_GOT_CALL", 0, 32, 0 }, + { "fixup_NANOMIPS_GOT_PAGE", 0, 32, 0 }, + { "fixup_NANOMIPS_GOT_OFST", 0, 32, 0 }, + { "fixup_NANOMIPS_LO4_S2", 0, 4, 0 }, + { "fixup_NANOMIPS_RESERVED1", 0, 32, 0 }, + { "fixup_NANOMIPS_GPREL_LO12", 0, 12, 0 }, + { "fixup_NANOMIPS_SCN_DISP", 0, 32, 0 }, + { "fixup_NANOMIPS_COPY", 0, 32, 0 }, + { "fixup_NANOMIPS_ALIGN", 0, 0, 0 }, + { "fixup_NANOMIPS_FILL", 0, 0, 0 }, + { "fixup_NANOMIPS_MAX", 0, 0, 0 }, + { "fixup_NANOMIPS_INSN32", 0, 0, 0 }, + { "fixup_NANOMIPS_FIXED", 0, 0, 0 }, + { "fixup_NANOMIPS_NORELAX", 0, 0, 0 }, + { "fixup_NANOMIPS_RELAX", 0, 0, 0 }, + { "fixup_NANOMIPS_SAVERESTORE", 0, 0, 0 }, + { "fixup_NANOMIPS_INSN16", 0, 0, 0 }, + { "fixup_NANOMIPS_JALR32", 0, 32, 0 }, + { "fixup_NANOMIPS_JALR16", 0, 32, 0 }, + { "fixup_NANOMIPS_JUMPTABLE_LOAD", 0, 32, 0 }, + { "fixup_NANOMIPS_FRAME_REG", 0, 32, 0 }, + { "fixup_NANOMIPS_TLS_DTPMOD", 0, 32, 0 }, + { "fixup_NANOMIPS_TLS_DTPREL", 0, 32, 0 }, + { "fixup_NANOMIPS_TLS_TPREL", 0, 32, 0 }, + { "fixup_NANOMIPS_TLS_GD", 0, 32, 0 }, + { "fixup_NANOMIPS_TLS_GD_I32", 0, 32, 0 }, + { "fixup_NANOMIPS_TLS_LD", 0, 32, 0 }, + { "fixup_NANOMIPS_TLS_LD_I32", 0, 32, 0 }, + { "fixup_NANOMIPS_TLS_DTPREL12", 0, 32, 0 }, + { "fixup_NANOMIPS_TLS_DTPREL16", 0, 32, 0 }, + { "fixup_NANOMIPS_TLS_DTPREL_I32", 0, 32, 0 }, + { "fixup_NANOMIPS_TLS_GOTTPREL", 0, 32, 0 }, + { "fixup_NANOMIPS_TLS_GOTTPREL_PC_I32", 0, 32, MCFixupKindInfo::FKF_IsPCRel }, + { "fixup_NANOMIPS_TLS_TPREL12", 0, 32, 0 }, + { "fixup_NANOMIPS_TLS_TPREL16", 0, 32, 0 }, + { "fixup_NANOMIPS_TLS_TPREL_I32", 0, 32, 0 }, }; static_assert(array_lengthof(LittleEndianInfos) == Mips::NumTargetFixupKinds, "Not all MIPS little endian fixup kinds added!"); @@ -499,8 +645,9 @@ getFixupKindInfo(MCFixupKind Kind) const { { "fixup_Mips_JALR", 0, 32, 0 }, { "fixup_MICROMIPS_JALR", 0, 32, 0 } }; - static_assert(array_lengthof(BigEndianInfos) == Mips::NumTargetFixupKinds, - "Not all MIPS big endian fixup kinds added!"); +// static_assert(array_lengthof(BigEndianInfos) == +// Mips::NumTargetFixupKinds, +// "Not all MIPS big endian fixup kinds added!"); if (Kind < FirstTargetFixupKind) return MCAsmBackend::getFixupKindInfo(Kind); @@ -569,6 +716,29 @@ bool MipsAsmBackend::shouldForceRelocation(const MCAssembler &Asm, case Mips::fixup_MICROMIPS_TLS_TPREL_HI16: case Mips::fixup_MICROMIPS_TLS_TPREL_LO16: case Mips::fixup_MICROMIPS_JALR: + case Mips::fixup_NANOMIPS_PC25_S1: + case Mips::fixup_NANOMIPS_PC21_S1: + case Mips::fixup_NANOMIPS_PC14_S1: + case Mips::fixup_NANOMIPS_PC11_S1: + case Mips::fixup_NANOMIPS_PC10_S1: + case Mips::fixup_NANOMIPS_PC7_S1: + case Mips::fixup_NANOMIPS_PC4_S1: + case Mips::fixup_NANOMIPS_PCHI20: + case Mips::fixup_NANOMIPS_GOTPC_I32: + case Mips::fixup_NANOMIPS_GOTPC_HI20: + case Mips::fixup_NANOMIPS_TLS_GOTTPREL_PC_I32: + case Mips::fixup_NANOMIPS_COPY: + case Mips::fixup_NANOMIPS_ALIGN: + case Mips::fixup_NANOMIPS_FILL: + case Mips::fixup_NANOMIPS_MAX: + case Mips::fixup_NANOMIPS_INSN32: + case Mips::fixup_NANOMIPS_FIXED: + case Mips::fixup_NANOMIPS_NORELAX: + case Mips::fixup_NANOMIPS_SAVERESTORE: + case Mips::fixup_NANOMIPS_INSN16: + case Mips::fixup_NANOMIPS_JUMPTABLE_LOAD: + case Mips::fixup_NANOMIPS_JALR32: + case Mips::fixup_NANOMIPS_JALR16: return true; } } diff --git a/llvm/lib/Target/Mips/MCTargetDesc/MipsBaseInfo.h b/llvm/lib/Target/Mips/MCTargetDesc/MipsBaseInfo.h index e4b2d3747a272..b6e8f6d6e4ba0 100644 --- a/llvm/lib/Target/Mips/MCTargetDesc/MipsBaseInfo.h +++ b/llvm/lib/Target/Mips/MCTargetDesc/MipsBaseInfo.h @@ -139,6 +139,17 @@ namespace MipsII { OPERAND_LAST_MIPS_MEM_IMM = OPERAND_MEM_SIMM9 }; } + + namespace NanoMips { + enum OperandType : unsigned { + OPERAND_FIRST_NM_MEM_IMM = MipsII::OPERAND_LAST_MIPS_MEM_IMM+1, + OPERAND_NM_SPREL7 = OPERAND_FIRST_NM_MEM_IMM, + OPERAND_NM_GPREL9, + OPERAND_NM_GPREL18, + OPERAND_NM_GPREL21, + OPERAND_LAST_NM_MEM_IMM = OPERAND_NM_GPREL21 + }; + } } #endif diff --git a/llvm/lib/Target/Mips/MCTargetDesc/MipsELFObjectWriter.cpp b/llvm/lib/Target/Mips/MCTargetDesc/MipsELFObjectWriter.cpp index 9c317e3f8840f..8ed266503c353 100644 --- a/llvm/lib/Target/Mips/MCTargetDesc/MipsELFObjectWriter.cpp +++ b/llvm/lib/Target/Mips/MCTargetDesc/MipsELFObjectWriter.cpp @@ -56,7 +56,8 @@ raw_ostream &operator<<(raw_ostream &OS, const MipsRelocationEntry &RHS) { class MipsELFObjectWriter : public MCELFObjectTargetWriter { public: - MipsELFObjectWriter(uint8_t OSABI, bool HasRelocationAddend, bool Is64); + MipsELFObjectWriter(uint8_t OSABI, bool HasRelocationAddend, + uint16_t EMachine, bool Is64); ~MipsELFObjectWriter() override = default; @@ -146,6 +147,10 @@ static unsigned getMatchingLoType(const ELFRelocationEntry &Reloc) { return ELF::R_MICROMIPS_LO16; if (Type == ELF::R_MIPS16_HI16) return ELF::R_MIPS16_LO16; + if (Type == ELF::R_NANOMIPS_HI20) + return ELF::R_NANOMIPS_LO12; + if (Type == ELF::R_NANOMIPS_PCHI20) + return ELF::R_NANOMIPS_LO12; if (Reloc.OriginalSymbol && Reloc.OriginalSymbol->getBinding() != ELF::STB_LOCAL) @@ -155,6 +160,8 @@ static unsigned getMatchingLoType(const ELFRelocationEntry &Reloc) { return ELF::R_MIPS_LO16; if (Type == ELF::R_MICROMIPS_GOT16) return ELF::R_MICROMIPS_LO16; + if (Type == ELF::R_NANOMIPS_GOTPC_HI20) + return ELF::R_NANOMIPS_GOT_LO12; if (Type == ELF::R_MIPS16_GOT16) return ELF::R_MIPS16_LO16; @@ -211,8 +218,113 @@ static void dumpRelocs(const char *Prefix, const Container &Relocs) { #endif MipsELFObjectWriter::MipsELFObjectWriter(uint8_t OSABI, - bool HasRelocationAddend, bool Is64) - : MCELFObjectTargetWriter(Is64, OSABI, ELF::EM_MIPS, HasRelocationAddend) {} + bool HasRelocationAddend, + uint16_t EMachine, bool Is64) + : MCELFObjectTargetWriter(Is64, OSABI, EMachine, HasRelocationAddend) {} + + +static unsigned getNanoMipsRelocType(MCContext &Ctx, + const MCValue &Target, + const MCFixup &Fixup, + bool IsPCRel) { + // Determine the type of the relocation. + unsigned Kind = Fixup.getTargetKind(); + + if (IsPCRel) { + switch (Kind) { + case Mips::fixup_NANOMIPS_PC25_S1: + return ELF::R_NANOMIPS_PC25_S1; + case Mips::fixup_NANOMIPS_PC21_S1: + return ELF::R_NANOMIPS_PC21_S1; + case Mips::fixup_NANOMIPS_PC14_S1: + return ELF::R_NANOMIPS_PC14_S1; + case Mips::fixup_NANOMIPS_PC11_S1: + return ELF::R_NANOMIPS_PC11_S1; + case Mips::fixup_NANOMIPS_PC10_S1: + return ELF::R_NANOMIPS_PC10_S1; + case Mips::fixup_NANOMIPS_PC7_S1: + return ELF::R_NANOMIPS_PC7_S1; + case Mips::fixup_NANOMIPS_PC4_S1: + return ELF::R_NANOMIPS_PC4_S1; + case Mips::fixup_NANOMIPS_PCHI20: + return ELF::R_NANOMIPS_PCHI20; + case Mips::fixup_NANOMIPS_PC_I32: + return ELF::R_NANOMIPS_PC_I32; + case Mips::fixup_NANOMIPS_GOTPC_I32: + return ELF::R_NANOMIPS_GOTPC_I32; + case Mips::fixup_NANOMIPS_GOTPC_HI20: + return ELF::R_NANOMIPS_GOTPC_HI20; + } + + llvm_unreachable("invalid PC-relative fixup kind!"); + } + + switch (Kind) { + case Mips::fixup_NANOMIPS_32: return ELF::R_NANOMIPS_32; + case Mips::fixup_NANOMIPS_64: return ELF::R_NANOMIPS_64; + case Mips::fixup_NANOMIPS_NEG: return ELF::R_NANOMIPS_NEG; + case Mips::fixup_NANOMIPS_ASHIFTR_1: return ELF::R_NANOMIPS_ASHIFTR_1; + case Mips::fixup_NANOMIPS_UNSIGNED_8: return ELF::R_NANOMIPS_UNSIGNED_8; + case Mips::fixup_NANOMIPS_SIGNED_8: return ELF::R_NANOMIPS_SIGNED_8; + case Mips::fixup_NANOMIPS_UNSIGNED_16: return ELF::R_NANOMIPS_UNSIGNED_16; + case Mips::fixup_NANOMIPS_SIGNED_16: return ELF::R_NANOMIPS_SIGNED_16; + case Mips::fixup_NANOMIPS_RELATIVE: return ELF::R_NANOMIPS_RELATIVE; + case Mips::fixup_NANOMIPS_GLOBAL: return ELF::R_NANOMIPS_GLOBAL; + case Mips::fixup_NANOMIPS_JUMP_SLOT: return ELF::R_NANOMIPS_JUMP_SLOT; + case Mips::fixup_NANOMIPS_IRELATIVE: return ELF::R_NANOMIPS_IRELATIVE; + case Mips::fixup_NANOMIPS_GPREL19_S2: return ELF::R_NANOMIPS_GPREL19_S2; + case Mips::fixup_NANOMIPS_GPREL18_S3: return ELF::R_NANOMIPS_GPREL18_S3; + case Mips::fixup_NANOMIPS_GPREL18: return ELF::R_NANOMIPS_GPREL18; + case Mips::fixup_NANOMIPS_GPREL17_S1: return ELF::R_NANOMIPS_GPREL17_S1; + case Mips::fixup_NANOMIPS_GPREL16_S2: return ELF::R_NANOMIPS_GPREL16_S2; + case Mips::fixup_NANOMIPS_GPREL7_S2: return ELF::R_NANOMIPS_GPREL7_S2; + case Mips::fixup_NANOMIPS_GPREL_HI20: return ELF::R_NANOMIPS_GPREL_HI20; + case Mips::fixup_NANOMIPS_HI20: return ELF::R_NANOMIPS_HI20; + case Mips::fixup_NANOMIPS_LO12: return ELF::R_NANOMIPS_LO12; + case Mips::fixup_NANOMIPS_GPREL_I32: return ELF::R_NANOMIPS_GPREL_I32; + case Mips::fixup_NANOMIPS_I32: return ELF::R_NANOMIPS_I32; + case Mips::fixup_NANOMIPS_GOT_DISP: return ELF::R_NANOMIPS_GOT_DISP; + case Mips::fixup_NANOMIPS_GOT_LO12: return ELF::R_NANOMIPS_GOT_LO12; + case Mips::fixup_NANOMIPS_GOT_CALL: return ELF::R_NANOMIPS_GOT_CALL; + case Mips::fixup_NANOMIPS_GOT_PAGE: return ELF::R_NANOMIPS_GOT_PAGE; + case Mips::fixup_NANOMIPS_GOT_OFST: return ELF::R_NANOMIPS_GOT_OFST; + case Mips::fixup_NANOMIPS_LO4_S2: return ELF::R_NANOMIPS_LO4_S2; + case Mips::fixup_NANOMIPS_GPREL_LO12: return ELF::R_NANOMIPS_GPREL_LO12; + case Mips::fixup_NANOMIPS_COPY: return ELF::R_NANOMIPS_COPY; + case Mips::fixup_NANOMIPS_ALIGN: return ELF::R_NANOMIPS_ALIGN; + case Mips::fixup_NANOMIPS_FILL: return ELF::R_NANOMIPS_FILL; + case Mips::fixup_NANOMIPS_MAX: return ELF::R_NANOMIPS_MAX; + case Mips::fixup_NANOMIPS_INSN32: return ELF::R_NANOMIPS_INSN32; + case Mips::fixup_NANOMIPS_FIXED: return ELF::R_NANOMIPS_FIXED; + case Mips::fixup_NANOMIPS_NORELAX: return ELF::R_NANOMIPS_NORELAX; + case Mips::fixup_NANOMIPS_RELAX: return ELF::R_NANOMIPS_RELAX; + case Mips::fixup_NANOMIPS_SAVERESTORE: return ELF::R_NANOMIPS_SAVERESTORE; + case Mips::fixup_NANOMIPS_INSN16: return ELF::R_NANOMIPS_INSN16; + case Mips::fixup_NANOMIPS_JALR32: return ELF::R_NANOMIPS_JALR32; + case Mips::fixup_NANOMIPS_JALR16: return ELF::R_NANOMIPS_JALR16; + case Mips::fixup_NANOMIPS_JUMPTABLE_LOAD: return ELF::R_NANOMIPS_JUMPTABLE_LOAD; + case Mips::fixup_NANOMIPS_FRAME_REG: return ELF::R_NANOMIPS_FRAME_REG; + // TLS relocations. + case Mips::fixup_NANOMIPS_TLS_DTPMOD: return ELF::R_NANOMIPS_TLS_DTPMOD; + case Mips::fixup_NANOMIPS_TLS_DTPREL: return ELF::R_NANOMIPS_TLS_DTPREL; + case Mips::fixup_NANOMIPS_TLS_TPREL: return ELF::R_NANOMIPS_TLS_TPREL; + case Mips::fixup_NANOMIPS_TLS_GD: return ELF::R_NANOMIPS_TLS_GD; + case Mips::fixup_NANOMIPS_TLS_GD_I32: return ELF::R_NANOMIPS_TLS_GD_I32; + case Mips::fixup_NANOMIPS_TLS_LD: return ELF::R_NANOMIPS_TLS_LD; + case Mips::fixup_NANOMIPS_TLS_LD_I32: return ELF::R_NANOMIPS_TLS_LD_I32; + case Mips::fixup_NANOMIPS_TLS_DTPREL12: return ELF::R_NANOMIPS_TLS_DTPREL12; + case Mips::fixup_NANOMIPS_TLS_DTPREL16: return ELF::R_NANOMIPS_TLS_DTPREL16; + case Mips::fixup_NANOMIPS_TLS_DTPREL_I32: return ELF::R_NANOMIPS_TLS_DTPREL_I32; + case Mips::fixup_NANOMIPS_TLS_GOTTPREL: return ELF::R_NANOMIPS_TLS_GOTTPREL; + case Mips::fixup_NANOMIPS_TLS_GOTTPREL_PC_I32: return ELF::R_NANOMIPS_TLS_GOTTPREL_PC_I32; + case Mips::fixup_NANOMIPS_TLS_TPREL12: return ELF::R_NANOMIPS_TLS_TPREL12; + case Mips::fixup_NANOMIPS_TLS_TPREL16: return ELF::R_NANOMIPS_TLS_TPREL16; + case Mips::fixup_NANOMIPS_TLS_TPREL_I32: return ELF::R_NANOMIPS_TLS_TPREL_I32; + } + + llvm_unreachable("invalid fixup kind!"); +} + unsigned MipsELFObjectWriter::getRelocType(MCContext &Ctx, const MCValue &Target, @@ -220,26 +332,51 @@ unsigned MipsELFObjectWriter::getRelocType(MCContext &Ctx, bool IsPCRel) const { // Determine the type of the relocation. unsigned Kind = Fixup.getTargetKind(); - - switch (Kind) { - case FK_NONE: - return ELF::R_MIPS_NONE; - case FK_Data_1: - Ctx.reportError(Fixup.getLoc(), - "MIPS does not support one byte relocations"); - return ELF::R_MIPS_NONE; - case Mips::fixup_Mips_16: - case FK_Data_2: - return IsPCRel ? ELF::R_MIPS_PC16 : ELF::R_MIPS_16; - case Mips::fixup_Mips_32: - case FK_Data_4: - return IsPCRel ? ELF::R_MIPS_PC32 : ELF::R_MIPS_32; - case Mips::fixup_Mips_64: - case FK_Data_8: - return IsPCRel - ? setRTypes(ELF::R_MIPS_PC32, ELF::R_MIPS_64, ELF::R_MIPS_NONE) - : (unsigned)ELF::R_MIPS_64; - } + bool isNanoMips = Ctx.getTargetTriple().isNanoMips(); + + if (Kind >= Mips::FirstNanoMipsFixupKind + && Kind <= Mips::LastNanoMipsFixupKind) + return getNanoMipsRelocType(Ctx, Target, Fixup, IsPCRel); + + if (isNanoMips) + { + switch (Kind) { + case FK_NONE: + return ELF::R_NANOMIPS_NONE; + case FK_Data_1: + return ELF::R_NANOMIPS_UNSIGNED_8; + case FK_Data_2: + return ELF::R_NANOMIPS_UNSIGNED_16; + case FK_Data_4: + return ELF::R_NANOMIPS_32; + case FK_Data_8: + Ctx.reportError(Fixup.getLoc(), + "NanoMips does not support 8 byte relocations"); + return ELF::R_NANOMIPS_NONE; + } + } + else + { + switch (Kind) { + case FK_NONE: + return ELF::R_MIPS_NONE; + case FK_Data_1: + Ctx.reportError(Fixup.getLoc(), + "MIPS does not support one byte relocations"); + return ELF::R_MIPS_NONE; + case Mips::fixup_Mips_16: + case FK_Data_2: + return IsPCRel ? ELF::R_MIPS_PC16 : ELF::R_MIPS_16; + case Mips::fixup_Mips_32: + case FK_Data_4: + return IsPCRel ? ELF::R_MIPS_PC32 : ELF::R_MIPS_32; + case Mips::fixup_Mips_64: + case FK_Data_8: + return IsPCRel + ? setRTypes(ELF::R_MIPS_PC32, ELF::R_MIPS_64, ELF::R_MIPS_NONE) + : (unsigned)ELF::R_MIPS_64; + } + } if (IsPCRel) { switch (Kind) { @@ -512,6 +649,10 @@ bool MipsELFObjectWriter::needsRelocateWithSymbol(const MCSymbol &Sym, needsRelocateWithSymbol(Sym, (Type >> 8) & 0xff) || needsRelocateWithSymbol(Sym, (Type >> 16) & 0xff); + // Conservative assumption for NanoMips, let the linker handle it! + if (getEMachine() == ELF::EM_NANOMIPS) + return true; + switch (Type) { default: errs() << Type << "\n"; @@ -664,7 +805,9 @@ std::unique_ptr llvm::createMipsELFObjectWriter(const Triple &TT, bool IsN32) { uint8_t OSABI = MCELFObjectTargetWriter::getOSABI(TT.getOS()); bool IsN64 = TT.isArch64Bit() && !IsN32; - bool HasRelocationAddend = TT.isArch64Bit(); + bool HasRelocationAddend = (TT.isArch64Bit() || TT.getArch() == llvm::Triple::nanomips); + uint16_t EMachine = ((TT.getArch() == llvm::Triple::nanomips)? + ELF::EM_NANOMIPS : ELF::EM_MIPS); return std::make_unique(OSABI, HasRelocationAddend, - IsN64); + EMachine, IsN64); } diff --git a/llvm/lib/Target/Mips/MCTargetDesc/MipsFixupKinds.h b/llvm/lib/Target/Mips/MCTargetDesc/MipsFixupKinds.h index b83d822bd8d03..992479803a002 100644 --- a/llvm/lib/Target/Mips/MCTargetDesc/MipsFixupKinds.h +++ b/llvm/lib/Target/Mips/MCTargetDesc/MipsFixupKinds.h @@ -222,6 +222,83 @@ namespace Mips { fixup_Mips_JALR, fixup_MICROMIPS_JALR, + // NanoMIPS relocations + FirstNanoMipsFixupKind, + fixup_NANOMIPS_32 = FirstNanoMipsFixupKind, + fixup_NANOMIPS_64, + fixup_NANOMIPS_NEG, + fixup_NANOMIPS_ASHIFTR_1, + fixup_NANOMIPS_UNSIGNED_8, + fixup_NANOMIPS_SIGNED_8, + fixup_NANOMIPS_UNSIGNED_16, + fixup_NANOMIPS_SIGNED_16, + fixup_NANOMIPS_RELATIVE, + fixup_NANOMIPS_GLOBAL, + fixup_NANOMIPS_JUMP_SLOT, + fixup_NANOMIPS_IRELATIVE, + fixup_NANOMIPS_PC25_S1, + fixup_NANOMIPS_PC21_S1, + fixup_NANOMIPS_PC14_S1, + fixup_NANOMIPS_PC11_S1, + fixup_NANOMIPS_PC10_S1, + fixup_NANOMIPS_PC7_S1, + fixup_NANOMIPS_PC4_S1, + fixup_NANOMIPS_GPREL19_S2, + fixup_NANOMIPS_GPREL18_S3, + fixup_NANOMIPS_GPREL18, + fixup_NANOMIPS_GPREL17_S1, + fixup_NANOMIPS_GPREL16_S2, + fixup_NANOMIPS_GPREL7_S2, + fixup_NANOMIPS_GPREL_HI20, + fixup_NANOMIPS_PCHI20, + fixup_NANOMIPS_HI20, + fixup_NANOMIPS_LO12, + fixup_NANOMIPS_GPREL_I32, + fixup_NANOMIPS_PC_I32, + fixup_NANOMIPS_I32, + fixup_NANOMIPS_GOT_DISP, + fixup_NANOMIPS_GOTPC_I32, + fixup_NANOMIPS_GOTPC_HI20, + fixup_NANOMIPS_GOT_LO12, + fixup_NANOMIPS_GOT_CALL, + fixup_NANOMIPS_GOT_PAGE, + fixup_NANOMIPS_GOT_OFST, + fixup_NANOMIPS_LO4_S2, + fixup_NANOMIPS_RESERVED1, + fixup_NANOMIPS_GPREL_LO12, + fixup_NANOMIPS_SCN_DISP, + fixup_NANOMIPS_COPY, + fixup_NANOMIPS_ALIGN, + fixup_NANOMIPS_FILL, + fixup_NANOMIPS_MAX, + fixup_NANOMIPS_INSN32, + fixup_NANOMIPS_FIXED, + fixup_NANOMIPS_NORELAX, + fixup_NANOMIPS_RELAX, + fixup_NANOMIPS_SAVERESTORE, + fixup_NANOMIPS_INSN16, + fixup_NANOMIPS_JALR32, + fixup_NANOMIPS_JALR16, + fixup_NANOMIPS_JUMPTABLE_LOAD, + fixup_NANOMIPS_FRAME_REG, + // TLS relocations. + fixup_NANOMIPS_TLS_DTPMOD, + fixup_NANOMIPS_TLS_DTPREL, + fixup_NANOMIPS_TLS_TPREL, + fixup_NANOMIPS_TLS_GD, + fixup_NANOMIPS_TLS_GD_I32, + fixup_NANOMIPS_TLS_LD, + fixup_NANOMIPS_TLS_LD_I32, + fixup_NANOMIPS_TLS_DTPREL12, + fixup_NANOMIPS_TLS_DTPREL16, + fixup_NANOMIPS_TLS_DTPREL_I32, + fixup_NANOMIPS_TLS_GOTTPREL, + fixup_NANOMIPS_TLS_GOTTPREL_PC_I32, + fixup_NANOMIPS_TLS_TPREL12, + fixup_NANOMIPS_TLS_TPREL16, + fixup_NANOMIPS_TLS_TPREL_I32, + LastNanoMipsFixupKind = fixup_NANOMIPS_TLS_TPREL_I32, + // Marker LastTargetFixupKind, NumTargetFixupKinds = LastTargetFixupKind - FirstTargetFixupKind @@ -229,5 +306,4 @@ namespace Mips { } // namespace Mips } // namespace llvm - #endif diff --git a/llvm/lib/Target/Mips/MCTargetDesc/MipsInstPrinter.cpp b/llvm/lib/Target/Mips/MCTargetDesc/MipsInstPrinter.cpp index eb49aa483343c..0d7308020b6e4 100644 --- a/llvm/lib/Target/Mips/MCTargetDesc/MipsInstPrinter.cpp +++ b/llvm/lib/Target/Mips/MCTargetDesc/MipsInstPrinter.cpp @@ -106,6 +106,21 @@ void MipsInstPrinter::printInst(const MCInst *MI, uint64_t Address, printSaveRestore(MI, O); O << "\n"; return; + case Mips::RESTORE_NM: + O << "\trestore\t"; + printSaveRestore(MI, O); + return; + case Mips::SAVE16_NM: + case Mips::SAVE_NM: + O << "\tsave\t"; + printSaveRestore(MI, O); + return; + case Mips::RESTOREJRC16_NM: + case Mips::RESTOREJRC_NM: + O << "\trestore.jrc\t"; + printSaveRestore(MI, O); + O << "\n"; + return; } // Try to print any aliases first. @@ -175,7 +190,31 @@ printMemOperand(const MCInst *MI, int opNum, raw_ostream &O) { break; } - printOperand(MI, opNum+1, O); + // Index register is encoded as immediate value + // in case of nanoMIPS indexed instructions + switch (MI->getOpcode()) { + case Mips::LWX_NM: + case Mips::LWXS_NM: + case Mips::LBX_NM: + case Mips::LBUX_NM: + case Mips::LHX_NM: + case Mips::LHUX_NM: + case Mips::LHXS_NM: + case Mips::LHUXS_NM: + case Mips::SWX_NM: + case Mips::SWXS_NM: + case Mips::SBX_NM: + case Mips::SHX_NM: + case Mips::SHXS_NM: + if (!MI->getOperand(opNum+1).isReg()) { + printRegName(O, MI->getOperand(opNum+1).getImm()); + break; + } + // Fall through + default: + printOperand(MI, opNum+1, O); + break; + } O << "("; printOperand(MI, opNum, O); O << ")"; diff --git a/llvm/lib/Target/Mips/MCTargetDesc/MipsMCAsmInfo.cpp b/llvm/lib/Target/Mips/MCTargetDesc/MipsMCAsmInfo.cpp index c1f1944941541..b957c025771b2 100644 --- a/llvm/lib/Target/Mips/MCTargetDesc/MipsMCAsmInfo.cpp +++ b/llvm/lib/Target/Mips/MCTargetDesc/MipsMCAsmInfo.cpp @@ -52,4 +52,6 @@ MipsMCAsmInfo::MipsMCAsmInfo(const Triple &TheTriple, HasMipsExpressions = true; if (ABI.IsP32()) HasLEB128Directives = false; + if (TheTriple.isNanoMips()) + AssemblerDialect = 1; } diff --git a/llvm/lib/Target/Mips/MCTargetDesc/MipsMCCodeEmitter.cpp b/llvm/lib/Target/Mips/MCTargetDesc/MipsMCCodeEmitter.cpp index 4b8b7ebd40a79..3b5529295ce52 100644 --- a/llvm/lib/Target/Mips/MCTargetDesc/MipsMCCodeEmitter.cpp +++ b/llvm/lib/Target/Mips/MCTargetDesc/MipsMCCodeEmitter.cpp @@ -140,7 +140,11 @@ void MipsMCCodeEmitter::emitInstruction(uint64_t Val, unsigned Size, // Little-endian byte ordering: // mips32r2: 4 | 3 | 2 | 1 // microMIPS: 2 | 1 | 4 | 3 - if (IsLittleEndian && Size == 4 && isMicroMips(STI)) { + if (IsLittleEndian && Size == 4 && (isMicroMips(STI) || isNanoMips(STI))) { + emitInstruction(Val >> 16, 2, STI, OS); + emitInstruction(Val, 2, STI, OS); + } else if (IsLittleEndian && Size == 6 && isNanoMips(STI)) { + emitInstruction(Val >> 32, 2, STI, OS); emitInstruction(Val >> 16, 2, STI, OS); emitInstruction(Val, 2, STI, OS); } else { @@ -184,14 +188,15 @@ encodeInstruction(const MCInst &MI, raw_ostream &OS, } unsigned long N = Fixups.size(); - uint32_t Binary = getBinaryCodeForInstr(TmpInst, Fixups, STI); + uint64_t Binary = getBinaryCodeForInstr(TmpInst, Fixups, STI); // Check for unimplemented opcodes. // Unfortunately in MIPS both NOP and SLL will come in with Binary == 0 // so we have to special check for them. const unsigned Opcode = TmpInst.getOpcode(); if ((Opcode != Mips::NOP) && (Opcode != Mips::SLL) && - (Opcode != Mips::SLL_MM) && (Opcode != Mips::SLL_MMR6) && !Binary) + (Opcode != Mips::SLL_MM) && (Opcode != Mips::SLL_MMR6) && + (Opcode != Mips::SIGRIE_NM) && !Binary) llvm_unreachable("unimplemented opcode in encodeInstruction()"); int NewOpcode = -1; @@ -233,6 +238,8 @@ encodeInstruction(const MCInst &MI, raw_ostream &OS, emitInstruction(Binary, Size, STI, OS); } + + /// getBranchTargetOpValue - Return binary encoding of the branch /// target operand. If the machine operand requires relocation, /// record the relocation and return zero. @@ -255,6 +262,43 @@ getBranchTargetOpValue(const MCInst &MI, unsigned OpNo, return 0; } +static unsigned +getNMRelocForSize(unsigned Size) { + switch (Size) { + default: llvm_unreachable("Unhandled fixup kind!"); + case 25: return Mips::fixup_NANOMIPS_PC25_S1; + case 21: return Mips::fixup_NANOMIPS_PC21_S1; + case 14: return Mips::fixup_NANOMIPS_PC14_S1; + case 11: return Mips::fixup_NANOMIPS_PC11_S1; + case 10: return Mips::fixup_NANOMIPS_PC10_S1; + case 7: return Mips::fixup_NANOMIPS_PC7_S1; + case 4: return Mips::fixup_NANOMIPS_PC4_S1; + } +} + +/// getBranchTargetOpValue - Return binary encoding of the branch +/// target operand. If the machine operand requires relocation, +/// record the relocation and return zero. +template +unsigned MipsMCCodeEmitter:: +getBranchTargetOpValueNM(const MCInst &MI, unsigned OpNo, + SmallVectorImpl &Fixups, + const MCSubtargetInfo &STI) const { + const MCOperand &MO = MI.getOperand(OpNo); + + // If the destination is an immediate, divide by 2. + if (MO.isImm()) return MO.getImm() >> 1; + + assert(MO.isExpr() && + "getBranchTargetOpValue expects only expressions or immediates"); + + const MCExpr *FixupExpression = MCBinaryExpr::createAdd( + MO.getExpr(), MCConstantExpr::create(0, Ctx), Ctx); + Fixups.push_back(MCFixup::create(0, FixupExpression, + MCFixupKind(getNMRelocForSize(Bits)))); + return 0; +} + /// getBranchTargetOpValue1SImm16 - Return binary encoding of the branch /// target operand. If the machine operand requires relocation, /// record the relocation and return zero. @@ -516,6 +560,27 @@ getJumpTargetOpValue(const MCInst &MI, unsigned OpNo, return 0; } +/// getJumpTargetOpValue - Return binary encoding of the jump +/// target operand. If the machine operand requires relocation, +/// record the relocation and return zero. +unsigned MipsMCCodeEmitter:: +getJumpTargetOpValueNM(const MCInst &MI, unsigned OpNo, + SmallVectorImpl &Fixups, + const MCSubtargetInfo &STI) const { + const MCOperand &MO = MI.getOperand(OpNo); + // If the destination is an immediate, divide by 2. + if (MO.isImm()) return MO.getImm()>>1; + + assert(MO.isExpr() && + "getJumpTargetOpValue expects only expressions or an immediate"); + + const MCExpr *Expr = MO.getExpr(); + Fixups.push_back(MCFixup::create(0, Expr, + MCFixupKind(Mips::fixup_NANOMIPS_PC25_S1))); + return 0; +} + + unsigned MipsMCCodeEmitter:: getJumpTargetOpValueMM(const MCInst &MI, unsigned OpNo, SmallVectorImpl &Fixups, @@ -577,6 +642,20 @@ getUImm6Lsl2Encoding(const MCInst &MI, unsigned OpNo, return 0; } +unsigned MipsMCCodeEmitter:: +getSImm20Lsl12Encoding(const MCInst &MI, unsigned OpNo, + SmallVectorImpl &Fixups, + const MCSubtargetInfo &STI) const { + const MCOperand &MO = MI.getOperand(OpNo); + unsigned Res = getMachineOpValue(MI, MO, Fixups, STI); + if (MO.isImm()) { + assert((Res & 0xfff) == 0); + return Res; + } + + return 0; +} + unsigned MipsMCCodeEmitter:: getSImm9AddiuspValue(const MCInst &MI, unsigned OpNo, SmallVectorImpl &Fixups, @@ -590,6 +669,59 @@ getSImm9AddiuspValue(const MCInst &MI, unsigned OpNo, return 0; } +unsigned MipsMCCodeEmitter:: +getSymPCRel(const MCInst &MI, unsigned OpNo, + SmallVectorImpl &Fixups, + const MCSubtargetInfo &STI) const { + const MCExpr *Expr; + const MCOperand &MO = MI.getOperand(OpNo); + if (MO.isExpr()) { + Expr = MO.getExpr(); + Fixups.push_back(MCFixup::create(2, Expr, + MCFixupKind(Mips::fixup_NANOMIPS_PC_I32))); + } + else if (MO.isImm()) { + Expr = MCConstantExpr::create(static_cast(MO.getImm()), Ctx); + Fixups.push_back(MCFixup::create(2, Expr, + MCFixupKind(Mips::fixup_NANOMIPS_PC_I32))); + } + return 0; +} + +unsigned MipsMCCodeEmitter:: +getSymGPRel(const MCInst &MI, unsigned OpNo, + SmallVectorImpl &Fixups, + const MCSubtargetInfo &STI) const { + const MCExpr *Expr; + const MCOperand MO = MI.getOperand(OpNo); + + if (MO.isExpr()) { + Expr = MO.getExpr(); + const MipsMCExpr *MipsExpr = cast(Expr); + assert (MipsExpr->getKind() == MipsMCExpr::MEK_GPREL); + Expr = MO.getExpr(); + } + else if (MO.isImm()) + Expr = MCConstantExpr::create(static_cast(MO.getImm()), Ctx); + + Fixups.push_back(MCFixup::create(2, Expr, + MCFixupKind(Mips::fixup_NANOMIPS_GPREL_I32))); + return 0; +} + +unsigned MipsMCCodeEmitter:: +getSymAbs(const MCInst &MI, unsigned OpNo, + SmallVectorImpl &Fixups, + const MCSubtargetInfo &STI) const { + const MCOperand &MO = MI.getOperand(OpNo); + if (MO.isExpr()) + Fixups.push_back(MCFixup::create(2, MO.getExpr(), + MCFixupKind(Mips::fixup_NANOMIPS_I32))); + else if (MO.isImm()) + return MO.getImm(); + return 0; +} + unsigned MipsMCCodeEmitter:: getExprOpValue(const MCExpr *Expr, SmallVectorImpl &Fixups, const MCSubtargetInfo &STI) const { @@ -670,14 +802,18 @@ getExprOpValue(const MCExpr *Expr, SmallVectorImpl &Fixups, case MipsMCExpr::MEK_GPREL: FixupKind = Mips::fixup_Mips_GPREL16; break; + case MipsMCExpr::MEK_LO12: + FixupKind = Mips::fixup_NANOMIPS_LO12; + break; case MipsMCExpr::MEK_LO: // Check for %lo(%neg(%gp_rel(X))) if (MipsExpr->isGpOff()) FixupKind = isMicroMips(STI) ? Mips::fixup_MICROMIPS_GPOFF_LO : Mips::fixup_Mips_GPOFF_LO; else - FixupKind = isMicroMips(STI) ? Mips::fixup_MICROMIPS_LO16 - : Mips::fixup_Mips_LO16; + FixupKind = (isMicroMips(STI) ? Mips::fixup_MICROMIPS_LO16 + : (isNanoMips(STI) ? Mips::fixup_NANOMIPS_LO12 + : Mips::fixup_Mips_LO16)); break; case MipsMCExpr::MEK_HIGHEST: FixupKind = isMicroMips(STI) ? Mips::fixup_MICROMIPS_HIGHEST @@ -687,18 +823,25 @@ getExprOpValue(const MCExpr *Expr, SmallVectorImpl &Fixups, FixupKind = isMicroMips(STI) ? Mips::fixup_MICROMIPS_HIGHER : Mips::fixup_Mips_HIGHER; break; + case MipsMCExpr::MEK_HI20: + FixupKind = Mips::fixup_NANOMIPS_HI20; + break; case MipsMCExpr::MEK_HI: // Check for %hi(%neg(%gp_rel(X))) if (MipsExpr->isGpOff()) FixupKind = isMicroMips(STI) ? Mips::fixup_MICROMIPS_GPOFF_HI : Mips::fixup_Mips_GPOFF_HI; else - FixupKind = isMicroMips(STI) ? Mips::fixup_MICROMIPS_HI16 - : Mips::fixup_Mips_HI16; + FixupKind = (isMicroMips(STI) ? Mips::fixup_MICROMIPS_HI16 + : (isNanoMips(STI) ? Mips::fixup_NANOMIPS_HI20 + : Mips::fixup_Mips_HI16)); break; case MipsMCExpr::MEK_PCREL_HI16: FixupKind = Mips::fixup_MIPS_PCHI16; break; + case MipsMCExpr::MEK_PCREL_HI: + FixupKind = Mips::fixup_NANOMIPS_PCHI20; + break; case MipsMCExpr::MEK_PCREL_LO16: FixupKind = Mips::fixup_MIPS_PCLO16; break; @@ -722,9 +865,6 @@ getExprOpValue(const MCExpr *Expr, SmallVectorImpl &Fixups, FixupKind = isMicroMips(STI) ? Mips::fixup_MICROMIPS_SUB : Mips::fixup_Mips_SUB; break; - case MipsMCExpr::MEK_PCREL_HI: - llvm_unreachable("nanoMIPS: NYI"); - break; } Fixups.push_back(MCFixup::create(0, MipsExpr, MCFixupKind(FixupKind))); return 0; @@ -830,6 +970,22 @@ getMemEncodingMMSPImm5Lsl2(const MCInst &MI, unsigned OpNo, return OffBits & 0x1F; } +unsigned MipsMCCodeEmitter:: +getMemEncodingNMRX(const MCInst &MI, unsigned OpNo, + SmallVectorImpl &Fixups, + const MCSubtargetInfo &STI) const { + // Register is encoded in bits 9-5, offset is encoded in bits 4-0. + assert(MI.getOperand(OpNo).isReg()); + unsigned RegBits = getMachineOpValue(MI, MI.getOperand(OpNo), Fixups, + STI) << 5; + unsigned Reg = getMachineOpValue(MI, MI.getOperand(OpNo+1), + Fixups, STI); + unsigned OffBits = Ctx.getRegisterInfo()->getEncodingValue(Reg); + + + return RegBits | OffBits; +} + unsigned MipsMCCodeEmitter:: getMemEncodingMMGPImm7Lsl2(const MCInst &MI, unsigned OpNo, SmallVectorImpl &Fixups, @@ -935,6 +1091,109 @@ getMemEncodingMMImm4sp(const MCInst &MI, unsigned OpNo, return ((OffBits >> 2) & 0x0F); } +unsigned MipsMCCodeEmitter:: +getMemEncodingNMImm12(const MCInst &MI, unsigned OpNo, + SmallVectorImpl &Fixups, + const MCSubtargetInfo &STI) const { + // Base register is encoded in bits 20-16, offset is encoded in bits 15-0. + assert(MI.getOperand(OpNo).isReg()); + unsigned RegBits = getMachineOpValue(MI, MI.getOperand(OpNo), Fixups, + STI) << 12; + unsigned OffBits = getMachineOpValue(MI, MI.getOperand(OpNo+1), Fixups, STI); + + return (OffBits & 0xFFF) | RegBits; +} + + +unsigned MipsMCCodeEmitter:: +getMemEncodingNMImm9(const MCInst &MI, unsigned OpNo, + SmallVectorImpl &Fixups, + const MCSubtargetInfo &STI) const { + // Base register is encoded in bits 13-9, offset is encoded in bits 8-0. + assert(MI.getOperand(OpNo).isReg()); + unsigned RegBits = getMachineOpValue(MI, MI.getOperand(OpNo), Fixups, + STI) << 9; + int OffBits = getMachineOpValue(MI, MI.getOperand(OpNo+1), Fixups, STI); + + return (OffBits & 0x1FF) | RegBits; +} + +unsigned MipsMCCodeEmitter:: +getMemEncodingNMImm6S2(const MCInst &MI, unsigned OpNo, + SmallVectorImpl &Fixups, + const MCSubtargetInfo &STI) const { + // Base register is encoded in bits 11-7, offset is encoded in bits 6-0. + assert(MI.getOperand(OpNo).isReg()); + unsigned RegBits = getMachineOpValue(MI, MI.getOperand(OpNo), Fixups, + STI) << 6; + unsigned OffBits = getMachineOpValue(MI, MI.getOperand(OpNo+1), Fixups, STI); + + return (OffBits & 0x3C) | RegBits; +} + +unsigned MipsMCCodeEmitter:: +getMemEncodingNMImm3S1(const MCInst &MI, unsigned OpNo, + SmallVectorImpl &Fixups, + const MCSubtargetInfo &STI) const { + // Base register is encoded in bits 11-7, offset is encoded in bits 6-0. + assert(MI.getOperand(OpNo).isReg()); + unsigned RegBits = getMachineOpValue(MI, MI.getOperand(OpNo), Fixups, + STI) << 3; + unsigned OffBits = getMachineOpValue(MI, MI.getOperand(OpNo+1), Fixups, STI); + + return (OffBits & 0x6) | RegBits; +} + +unsigned MipsMCCodeEmitter:: +getMemEncodingNMImm2(const MCInst &MI, unsigned OpNo, + SmallVectorImpl &Fixups, + const MCSubtargetInfo &STI) const { + // Base register is encoded in bits 11-7, offset is encoded in bits 6-0. + assert(MI.getOperand(OpNo).isReg()); + unsigned RegBits = getMachineOpValue(MI, MI.getOperand(OpNo), Fixups, + STI) << 2; + unsigned OffBits = getMachineOpValue(MI, MI.getOperand(OpNo+1), Fixups, STI); + + return (OffBits & 0x3) | RegBits; +} + +unsigned MipsMCCodeEmitter:: +getMemEncodingNMImm4S2(const MCInst &MI, unsigned OpNo, + SmallVectorImpl &Fixups, + const MCSubtargetInfo &STI) const { + // Base register is encoded in bits 8-4, offset is encoded in bits 3-0. + assert(MI.getOperand(OpNo).isReg()); + unsigned RegBits = getMachineOpValue(MI, MI.getOperand(OpNo), Fixups, + STI) << 4; + unsigned OffBits = getMachineOpValue(MI, MI.getOperand(OpNo+1), Fixups, STI); + + return (OffBits & 0xc) | RegBits; +} + +unsigned MipsMCCodeEmitter:: +getMemEncodingNMGP(const MCInst &MI, unsigned OpNo, + SmallVectorImpl &Fixups, + const MCSubtargetInfo &STI) const { + unsigned RegBits=getMachineOpValue(MI, MI.getOperand(OpNo), Fixups, + STI); + assert(MI.getOperand(OpNo).isReg() && RegBits == 28); + unsigned OffBits = getMachineOpValue(MI, MI.getOperand(OpNo+1), + Fixups, STI); + return OffBits; +} + +unsigned MipsMCCodeEmitter:: +getMemEncodingNMSP(const MCInst &MI, unsigned OpNo, + SmallVectorImpl &Fixups, + const MCSubtargetInfo &STI) const { + unsigned RegBits=getMachineOpValue(MI, MI.getOperand(OpNo), Fixups, + STI); + assert(MI.getOperand(OpNo).isReg() && RegBits == 29); + unsigned OffBits = getMachineOpValue(MI, MI.getOperand(OpNo+1), + Fixups, STI); + return OffBits; +} + // FIXME: should be called getMSBEncoding // unsigned @@ -1041,6 +1300,22 @@ MipsMCCodeEmitter::getUImm4AndValue(const MCInst &MI, unsigned OpNo, llvm_unreachable("Unexpected value"); } +unsigned +MipsMCCodeEmitter::getUImm4MaskEncoding(const MCInst &MI, unsigned OpNo, + SmallVectorImpl &Fixups, + const MCSubtargetInfo &STI) const { + assert(MI.getOperand(OpNo).isImm()); + const MCOperand &MO = MI.getOperand(OpNo); + unsigned Value = MO.getImm(); + switch (Value) { + case 0xff: return 12; + case 0xffff: return 13; + default: return Value; + } + llvm_unreachable("Unexpected value"); +} + + unsigned MipsMCCodeEmitter::getRegisterListOpValue(const MCInst &MI, unsigned OpNo, SmallVectorImpl &Fixups, @@ -1137,4 +1412,111 @@ MipsMCCodeEmitter::getSimm23Lsl2Encoding(const MCInst &MI, unsigned OpNo, return Res >> 2; } +unsigned +MipsMCCodeEmitter::getGPRNM4x4ZeroReg(const MCInst &MI, unsigned OpNo, + SmallVectorImpl &Fixups, + const MCSubtargetInfo &STI) const { + MCOperand Op = MI.getOperand(OpNo); + assert(Op.isReg() && "Operand of movep is not a register!"); + unsigned RegNo = Ctx.getRegisterInfo()->getEncodingValue(Op.getReg()); + switch (Op.getReg()) { + default: + return RegNo; + case Mips::ZERO_NM: return 11; + case Mips::A4_NM: + case Mips::A5_NM: + case Mips::A6_NM: + return RegNo - 8; + } +} + +unsigned +MipsMCCodeEmitter::getGPRNM4x4Reg(const MCInst &MI, unsigned OpNo, + SmallVectorImpl &Fixups, + const MCSubtargetInfo &STI) const { + MCOperand Op = MI.getOperand(OpNo); + assert(Op.isReg() && "Operand of movep is not a register!"); + unsigned RegNo = Ctx.getRegisterInfo()->getEncodingValue(Op.getReg()); + switch (Op.getReg()) { + default: + return RegNo; + case Mips::A4_NM: + case Mips::A5_NM: + case Mips::A6_NM: + case Mips::A7_NM: + return RegNo - 8; + } +} + +unsigned +MipsMCCodeEmitter::getUImm3ShiftEncoding(const MCInst &MI, unsigned OpNo, + SmallVectorImpl &Fixups, + const MCSubtargetInfo &STI) const { + assert(MI.getOperand(OpNo).isImm()); + const MCOperand &MO = MI.getOperand(OpNo); + unsigned Value = MO.getImm(); + if (Value == 8) + return 0; + else + return Value; + llvm_unreachable("Unexpected value"); +} + +unsigned +MipsMCCodeEmitter::getNMRegListEncoding(const MCInst &MI, unsigned OpNo, + SmallVectorImpl &Fixups, + const MCSubtargetInfo &STI) const { + unsigned res = 0; + unsigned gp = 0; + if (!MI.getOperand(OpNo).isImm() || MI.getOperand(OpNo).getImm() != 0) + for (unsigned I = OpNo; I < MI.getNumOperands(); I++) { + unsigned Reg = MI.getOperand(I).getReg(); + unsigned RegNo = Ctx.getRegisterInfo()->getEncodingValue(Reg); + if (res == 0) + res |= (RegNo << 4); + if (RegNo == 28) + gp = 1; + res++; + } + return (res << 1 | gp); +} + +unsigned +MipsMCCodeEmitter::getNMRegList16Encoding(const MCInst &MI, unsigned OpNo, + SmallVectorImpl &Fixups, + const MCSubtargetInfo &STI) const { + unsigned res = 0; + if (OpNo < MI.getNumOperands() && + (!MI.getOperand(OpNo).isImm() || MI.getOperand(OpNo).getImm() != 0)) { + res = 0x10; + for (unsigned I = OpNo; I < MI.getNumOperands(); I++) { + unsigned Reg = MI.getOperand(I).getReg(); + unsigned RegNo = Ctx.getRegisterInfo()->getEncodingValue(Reg); + if (RegNo == 30) + res &= 0xf; + res++; + } + } + return res; +} + +unsigned +MipsMCCodeEmitter::getNegImm12Encoding(const MCInst &MI, unsigned OpNo, + SmallVectorImpl &Fixups, + const MCSubtargetInfo &STI) const { + int res = MI.getOperand(OpNo).getImm(); + return (unsigned) -res; +} + +unsigned +MipsMCCodeEmitter::getSImm32Encoding(const MCInst &MI, unsigned OpNo, + SmallVectorImpl &Fixups, + const MCSubtargetInfo &STI) const { + const MCOperand &MO = MI.getOperand(OpNo); + assert(MO.isImm() && "getSImm32Encoding expects only an immediate"); + int Res = static_cast(MO.getImm()); + return Res; +} + + #include "MipsGenMCCodeEmitter.inc" diff --git a/llvm/lib/Target/Mips/MCTargetDesc/MipsMCCodeEmitter.h b/llvm/lib/Target/Mips/MCTargetDesc/MipsMCCodeEmitter.h index d10e8194f2e5e..cea7b49ebb953 100644 --- a/llvm/lib/Target/Mips/MCTargetDesc/MipsMCCodeEmitter.h +++ b/llvm/lib/Target/Mips/MCTargetDesc/MipsMCCodeEmitter.h @@ -72,6 +72,13 @@ class MipsMCCodeEmitter : public MCCodeEmitter { SmallVectorImpl &Fixups, const MCSubtargetInfo &STI) const; + // getBranchJumpOpValueNM - Return binary encoding of the microMIPS jump + // target operand. If the machine operand requires relocation, + // record the relocation and return zero. + unsigned getJumpTargetOpValueNM(const MCInst &MI, unsigned OpNo, + SmallVectorImpl &Fixups, + const MCSubtargetInfo &STI) const; + // getUImm5Lsl2Encoding - Return binary encoding of the microMIPS jump // target operand. unsigned getUImm5Lsl2Encoding(const MCInst &MI, unsigned OpNo, @@ -86,6 +93,10 @@ class MipsMCCodeEmitter : public MCCodeEmitter { SmallVectorImpl &Fixups, const MCSubtargetInfo &STI) const; + unsigned getSImm20Lsl12Encoding(const MCInst &MI, unsigned OpNo, + SmallVectorImpl &Fixups, + const MCSubtargetInfo &STI) const; + // getSImm9AddiuspValue - Return binary encoding of the microMIPS addiusp // instruction immediate operand. unsigned getSImm9AddiuspValue(const MCInst &MI, unsigned OpNo, @@ -224,6 +235,39 @@ class MipsMCCodeEmitter : public MCCodeEmitter { SmallVectorImpl &Fixups, const MCSubtargetInfo &STI) const; + template + unsigned getBranchTargetOpValueNM(const MCInst &MI, unsigned OpNo, + SmallVectorImpl &Fixups, + const MCSubtargetInfo &STI) const; + + unsigned getMemEncodingNMImm9(const MCInst &MI, unsigned OpNo, + SmallVectorImpl &Fixups, + const MCSubtargetInfo &STI) const; + unsigned getMemEncodingNMImm12(const MCInst &MI, unsigned OpNo, + SmallVectorImpl &Fixups, + const MCSubtargetInfo &STI) const; + unsigned getMemEncodingNMImm6S2(const MCInst &MI, unsigned OpNo, + SmallVectorImpl &Fixups, + const MCSubtargetInfo &STI) const; + unsigned getMemEncodingNMImm3S1(const MCInst &MI, unsigned OpNo, + SmallVectorImpl &Fixups, + const MCSubtargetInfo &STI) const; + unsigned getMemEncodingNMImm2(const MCInst &MI, unsigned OpNo, + SmallVectorImpl &Fixups, + const MCSubtargetInfo &STI) const; + unsigned getMemEncodingNMImm4S2(const MCInst &MI, unsigned OpNo, + SmallVectorImpl &Fixups, + const MCSubtargetInfo &STI) const; + unsigned getMemEncodingNMGP(const MCInst &MI, unsigned OpNo, + SmallVectorImpl &Fixups, + const MCSubtargetInfo &STI) const; + unsigned getMemEncodingNMSP(const MCInst &MI, unsigned OpNo, + SmallVectorImpl &Fixups, + const MCSubtargetInfo &STI) const; + unsigned getMemEncodingNMRX(const MCInst &MI, unsigned OpNo, + SmallVectorImpl &Fixups, + const MCSubtargetInfo &STI) const; + /// Subtract Offset then encode as a N-bit unsigned integer. template unsigned getUImmWithOffsetEncoding(const MCInst &MI, unsigned OpNo, @@ -245,6 +289,10 @@ class MipsMCCodeEmitter : public MCCodeEmitter { SmallVectorImpl &Fixups, const MCSubtargetInfo &STI) const; + unsigned getUImm4MaskEncoding(const MCInst &MI, unsigned OpNo, + SmallVectorImpl &Fixups, + const MCSubtargetInfo &STI) const; + unsigned getMovePRegPairOpValue(const MCInst &MI, unsigned OpNo, SmallVectorImpl &Fixups, const MCSubtargetInfo &STI) const; @@ -256,6 +304,18 @@ class MipsMCCodeEmitter : public MCCodeEmitter { SmallVectorImpl &Fixups, const MCSubtargetInfo &STI) const; + unsigned getSymPCRel(const MCInst &MI, unsigned OpNo, + SmallVectorImpl &Fixups, + const MCSubtargetInfo &STI) const; + + unsigned getSymGPRel(const MCInst &MI, unsigned OpNo, + SmallVectorImpl &Fixups, + const MCSubtargetInfo &STI) const; + + unsigned getSymAbs(const MCInst &MI, unsigned OpNo, + SmallVectorImpl &Fixups, + const MCSubtargetInfo &STI) const; + unsigned getExprOpValue(const MCExpr *Expr, SmallVectorImpl &Fixups, const MCSubtargetInfo &STI) const; @@ -267,6 +327,32 @@ class MipsMCCodeEmitter : public MCCodeEmitter { SmallVectorImpl &Fixups, const MCSubtargetInfo &STI) const; + unsigned getGPRNM4x4ZeroReg(const MCInst &MI, unsigned OpNo, + SmallVectorImpl &Fixups, + const MCSubtargetInfo &STI) const; + + unsigned getGPRNM4x4Reg(const MCInst &MI, unsigned OpNo, + SmallVectorImpl &Fixups, + const MCSubtargetInfo &STI) const; + + unsigned getUImm3ShiftEncoding(const MCInst &MI, unsigned OpNo, + SmallVectorImpl &Fixups, + const MCSubtargetInfo &STI) const; + + unsigned getNMRegListEncoding(const MCInst &MI, unsigned OpNo, + SmallVectorImpl &Fixups, + const MCSubtargetInfo &STI) const; + + unsigned getNMRegList16Encoding(const MCInst &MI, unsigned OpNo, + SmallVectorImpl &Fixups, + const MCSubtargetInfo &STI) const; + unsigned getNegImm12Encoding(const MCInst &MI, unsigned OpNo, + SmallVectorImpl &Fixups, + const MCSubtargetInfo &STI) const; + unsigned getSImm32Encoding(const MCInst &MI, unsigned OpNo, + SmallVectorImpl &Fixups, + const MCSubtargetInfo &STI) const; + private: void LowerCompactBranch(MCInst& Inst) const; }; diff --git a/llvm/lib/Target/Mips/MCTargetDesc/MipsMCExpr.cpp b/llvm/lib/Target/Mips/MCTargetDesc/MipsMCExpr.cpp index c9560968edee8..ded4235df580e 100644 --- a/llvm/lib/Target/Mips/MCTargetDesc/MipsMCExpr.cpp +++ b/llvm/lib/Target/Mips/MCTargetDesc/MipsMCExpr.cpp @@ -87,6 +87,7 @@ void MipsMCExpr::printImpl(raw_ostream &OS, const MCAsmInfo *MAI) const { OS << "%gp_rel"; break; case MEK_HI: + case MEK_HI20: OS << "%hi"; break; case MEK_HIGHER: @@ -96,6 +97,7 @@ void MipsMCExpr::printImpl(raw_ostream &OS, const MCAsmInfo *MAI) const { OS << "%highest"; break; case MEK_LO: + case MEK_LO12: OS << "%lo"; break; case MEK_NEG: @@ -161,8 +163,6 @@ MipsMCExpr::evaluateAsRelocatableImpl(MCValue &Res, if (Res.isAbsolute() && Fixup == nullptr) { int64_t AbsVal = Res.getConstant(); switch (Kind) { - case MEK_PCREL_HI: - llvm_unreachable("nanoMIPS: NYI"); case MEK_None: case MEK_Special: llvm_unreachable("MEK_None and MEK_Special are invalid"); @@ -187,6 +187,7 @@ MipsMCExpr::evaluateAsRelocatableImpl(MCValue &Res, case MEK_TLSLDM: case MEK_TPREL_HI: case MEK_TPREL_LO: + case MEK_PCREL_HI: return false; case MEK_LO: case MEK_CALL_LO16: @@ -205,6 +206,12 @@ MipsMCExpr::evaluateAsRelocatableImpl(MCValue &Res, case MEK_NEG: AbsVal = -AbsVal; break; + case MEK_HI20: + AbsVal = SignExtend64(AbsVal >> 12, 20); + break; + case MEK_LO12: + AbsVal = AbsVal & 0xfff; + break; } Res = MCValue::get(AbsVal); return true; @@ -254,9 +261,6 @@ static void fixELFSymbolsInTLSFixupsImpl(const MCExpr *Expr, MCAssembler &Asm) { void MipsMCExpr::fixELFSymbolsInTLSFixups(MCAssembler &Asm) const { switch (getKind()) { - case MEK_PCREL_HI: - llvm_unreachable("nanoMIPS: NYI"); - break; case MEK_None: case MEK_Special: llvm_unreachable("MEK_None and MEK_Special are invalid"); @@ -278,6 +282,7 @@ void MipsMCExpr::fixELFSymbolsInTLSFixups(MCAssembler &Asm) const { case MEK_NEG: case MEK_PCREL_HI16: case MEK_PCREL_LO16: + case MEK_PCREL_HI: // If we do have nested target-specific expressions, they will be in // a consecutive chain. if (const MipsMCExpr *E = dyn_cast(getSubExpr())) diff --git a/llvm/lib/Target/Mips/MCTargetDesc/MipsMCExpr.h b/llvm/lib/Target/Mips/MCTargetDesc/MipsMCExpr.h index 60cda32558134..12fe984971914 100644 --- a/llvm/lib/Target/Mips/MCTargetDesc/MipsMCExpr.h +++ b/llvm/lib/Target/Mips/MCTargetDesc/MipsMCExpr.h @@ -46,6 +46,8 @@ class MipsMCExpr : public MCTargetExpr { MEK_TPREL_LO, MEK_Special, MEK_PCREL_HI, + MEK_HI20, + MEK_LO12, }; private: diff --git a/llvm/lib/Target/Mips/MCTargetDesc/MipsMCTargetDesc.cpp b/llvm/lib/Target/Mips/MCTargetDesc/MipsMCTargetDesc.cpp index 97fedee15f0b1..e90998a40cd5d 100644 --- a/llvm/lib/Target/Mips/MCTargetDesc/MipsMCTargetDesc.cpp +++ b/llvm/lib/Target/Mips/MCTargetDesc/MipsMCTargetDesc.cpp @@ -144,7 +144,20 @@ class MipsMCInstrAnalysis : public MCInstrAnalysis { unsigned NumOps = Inst.getNumOperands(); if (NumOps == 0) return false; - switch (Info->get(Inst.getOpcode()).OpInfo[NumOps - 1].OperandType) { + + // FIXME: Can't figure out why OperandType is UNKNOWN for + // NanoMips register lists so work-around it for now + unsigned Opcode = Inst.getOpcode(); + switch (Opcode) { + case Mips::SAVE_NM: + case Mips::RESTORE_NM: + case Mips::RESTOREJRC_NM: + case Mips::SAVE16_NM: + case Mips::RESTOREJRC16_NM: + return false; + } + + switch (Info->get(Opcode).OpInfo[NumOps - 1].OperandType) { case MCOI::OPERAND_UNKNOWN: case MCOI::OPERAND_IMMEDIATE: { // j, jal, jalx, jals @@ -210,6 +223,6 @@ extern "C" LLVM_EXTERNAL_VISIBILITY void LLVMInitializeMipsTargetMC() { for (Target *T : {&getTheMipsTarget(), &getTheMips64Target()}) TargetRegistry::RegisterMCCodeEmitter(*T, createMipsMCCodeEmitterEB); - for (Target *T : {&getTheMipselTarget(), &getTheMips64elTarget()}) + for (Target *T : {&getTheMipselTarget(), &getTheMips64elTarget(), &getTheNanoMipsTarget()}) TargetRegistry::RegisterMCCodeEmitter(*T, createMipsMCCodeEmitterEL); } diff --git a/llvm/lib/Target/Mips/MCTargetDesc/MipsTargetStreamer.cpp b/llvm/lib/Target/Mips/MCTargetDesc/MipsTargetStreamer.cpp index f8758f6376646..c523bc9e5d771 100644 --- a/llvm/lib/Target/Mips/MCTargetDesc/MipsTargetStreamer.cpp +++ b/llvm/lib/Target/Mips/MCTargetDesc/MipsTargetStreamer.cpp @@ -868,6 +868,12 @@ MipsTargetELFStreamer::MipsTargetELFStreamer(MCStreamer &S, if (Features[Mips::FeatureNaN2008]) EFlags |= ELF::EF_MIPS_NAN2008; + if (STI.getTargetTriple().getArch() == Triple::ArchType::nanomips) { + Pic = false; + PCRel = false; + Pid = false; + } + MCA.setELFHeaderEFlags(EFlags); } @@ -930,6 +936,8 @@ void MipsTargetELFStreamer::finish() { EFlags |= ELF::EF_MIPS_ABI_O32; else if (getABI().IsN32()) EFlags |= ELF::EF_MIPS_ABI2; + else if (getABI().IsP32()) + EFlags |= ELF::E_NANOMIPS_ABI_P32; if (Features[Mips::FeatureGP64Bit]) { if (getABI().IsO32()) @@ -937,13 +945,25 @@ void MipsTargetELFStreamer::finish() { } else if (Features[Mips::FeatureMips64r2] || Features[Mips::FeatureMips64]) EFlags |= ELF::EF_MIPS_32BITMODE; - // -mplt is not implemented but we should act as if it was - // given. - if (!Features[Mips::FeatureNoABICalls]) - EFlags |= ELF::EF_MIPS_CPIC; + if (STI.getTargetTriple().getArch() != Triple::ArchType::nanomips) { + // -mplt is not implemented but we should act as if it was + // given. + if (!Features[Mips::FeatureNoABICalls]) + EFlags |= ELF::EF_MIPS_CPIC; - if (Pic) - EFlags |= ELF::EF_MIPS_PIC | ELF::EF_MIPS_CPIC; + if (Pic) + EFlags |= ELF::EF_MIPS_PIC | ELF::EF_MIPS_CPIC; + } + else { + if (Pic) + EFlags |= ELF::EF_NANOMIPS_PIC; + if (Pid) + EFlags |= ELF::EF_NANOMIPS_PID; + if (PCRel) + EFlags |= ELF::EF_NANOMIPS_PCREL; + if (Features[Mips::FeatureRelax]) + EFlags |= ELF::EF_NANOMIPS_LINKRELAX; + } MCA.setELFHeaderEFlags(EFlags); @@ -1105,6 +1125,20 @@ void MipsTargetELFStreamer::emitDirectiveOptionPic2() { MCA.setELFHeaderEFlags(Flags); } +void MipsTargetELFStreamer::emitDirectiveLinkRelax() { + MCAssembler &MCA = getStreamer().getAssembler(); + unsigned Flags = MCA.getELFHeaderEFlags(); + Flags |= ELF::EF_NANOMIPS_LINKRELAX; + MCA.setELFHeaderEFlags(Flags); +} + +void MipsTargetELFStreamer::emitDirectiveModulePcRel() { + MCAssembler &MCA = getStreamer().getAssembler(); + unsigned Flags = MCA.getELFHeaderEFlags(); + Flags |= ELF::EF_NANOMIPS_PCREL; + MCA.setELFHeaderEFlags(Flags); +} + void MipsTargetELFStreamer::emitDirectiveInsn() { MipsTargetStreamer::emitDirectiveInsn(); MipsELFStreamer &MEF = static_cast(Streamer); diff --git a/llvm/lib/Target/Mips/MicroMipsInstrFormats.td b/llvm/lib/Target/Mips/MicroMipsInstrFormats.td index 101d080f95674..12a1b157a88bf 100644 --- a/llvm/lib/Target/Mips/MicroMipsInstrFormats.td +++ b/llvm/lib/Target/Mips/MicroMipsInstrFormats.td @@ -31,6 +31,7 @@ class MicroMipsInstBase pattern, let Pattern = pattern; let Itinerary = itin; + let AsmVariantName = "mips"; let EncodingPredicates = [InMicroMips]; Format Form = f; diff --git a/llvm/lib/Target/Mips/Mips.td b/llvm/lib/Target/Mips/Mips.td index 3cae4ea1974dd..2e8bf6262d9e7 100644 --- a/llvm/lib/Target/Mips/Mips.td +++ b/llvm/lib/Target/Mips/Mips.td @@ -148,6 +148,11 @@ def FeatureMips64r6 : SubtargetFeature<"mips64r6", "MipsArchVersion", "Mips64r6 ISA Support [experimental]", [FeatureMips32r6, FeatureMips64r5, FeatureNaN2008, FeatureAbs2008]>; + +def FeatureRelax : SubtargetFeature<"relax", "UseLinkerRelax", + "true", + "Enable linker relaxation">; + def FeatureNanoMips : SubtargetFeature<"nanomips", "MipsArchVersion", "NanoMips", "NanoMips ISA Support [experimental]">; @@ -274,18 +279,26 @@ def MipsAsmParser : AsmParser { def MipsAsmParserVariant : AsmParserVariant { int Variant = 0; - + string Name = "mips"; // Recognize hard coded registers. string RegisterPrefix = "$"; } +def NanoMipsAsmParserVariant : AsmParserVariant { + int Variant = 1; + string Name = "nanomips"; + string TokenizingCharacters = "*!"; + string RegisterPrefix = "$"; +} + def Mips : Target { let InstructionSet = MipsInstrInfo; let AssemblyParsers = [MipsAsmParser]; - let AssemblyParserVariants = [MipsAsmParserVariant]; + let AssemblyParserVariants = [MipsAsmParserVariant, NanoMipsAsmParserVariant]; let AllowRegisterRenaming = 1; } + //===----------------------------------------------------------------------===// // Pfm Counters //===----------------------------------------------------------------------===// diff --git a/llvm/lib/Target/Mips/Mips16InstrFormats.td b/llvm/lib/Target/Mips/Mips16InstrFormats.td index f4ac160c2ba57..bc5642433418f 100644 --- a/llvm/lib/Target/Mips/Mips16InstrFormats.td +++ b/llvm/lib/Target/Mips/Mips16InstrFormats.td @@ -45,6 +45,7 @@ class MipsInst16_Base pattern, let Pattern = pattern; let Itinerary = itin; + let AsmVariantName = "mips"; let Predicates = [InMips16Mode]; } diff --git a/llvm/lib/Target/Mips/MipsAsmPrinter.cpp b/llvm/lib/Target/Mips/MipsAsmPrinter.cpp index 306a6fcc1a36c..1597efbca8802 100644 --- a/llvm/lib/Target/Mips/MipsAsmPrinter.cpp +++ b/llvm/lib/Target/Mips/MipsAsmPrinter.cpp @@ -820,7 +820,7 @@ void MipsAsmPrinter::emitStartOfAsmFile(Module &M) { } if (IsNanoMips) - TS.emitDirectiveLinkRelax(); + TS.emitDirectiveLinkRelax(); if (!IsNanoMips) // NaN: At the moment we only support: diff --git a/llvm/lib/Target/Mips/MipsISelDAGToDAG.cpp b/llvm/lib/Target/Mips/MipsISelDAGToDAG.cpp index 233068f83be0b..95e71d4a32056 100644 --- a/llvm/lib/Target/Mips/MipsISelDAGToDAG.cpp +++ b/llvm/lib/Target/Mips/MipsISelDAGToDAG.cpp @@ -85,6 +85,16 @@ bool MipsDAGToDAGISel::selectAddrDefault(SDValue Addr, SDValue &Base, return false; } +bool MipsDAGToDAGISel::selectAddrSym(SDValue Addr, SDValue &Base) const { + llvm_unreachable("Unimplemented function."); + return false; +} + +bool MipsDAGToDAGISel::selectAddrSymGPRel(SDValue Addr, SDValue &Base) const { + llvm_unreachable("Unimplemented function."); + return false; +} + bool MipsDAGToDAGISel::selectIntAddr(SDValue Addr, SDValue &Base, SDValue &Offset) const { llvm_unreachable("Unimplemented function."); @@ -171,6 +181,60 @@ bool MipsDAGToDAGISel::selectIntAddrUImm12(SDValue Addr, SDValue &Base, return false; } +bool MipsDAGToDAGISel::selectIntAddrUImm6s2(SDValue Addr, SDValue &Base, + SDValue &Offset) const { + llvm_unreachable("Unimplemented function."); + return false; +} + +bool MipsDAGToDAGISel::selectIntAddrUImm3s1(SDValue Addr, SDValue &Base, + SDValue &Offset) const { + llvm_unreachable("Unimplemented function."); + return false; +} + +bool MipsDAGToDAGISel::selectIntAddrUImm2(SDValue Addr, SDValue &Base, + SDValue &Offset) const { + llvm_unreachable("Unimplemented function."); + return false; +} + +bool MipsDAGToDAGISel::selectIntAddrUImm4s2(SDValue Addr, SDValue &Base, + SDValue &Offset) const { + llvm_unreachable("Unimplemented function."); + return false; +} + +bool MipsDAGToDAGISel::selectIntAddrUImm19s2(SDValue Addr, SDValue &Base, + SDValue &Offset) const { + llvm_unreachable("Unimplemented function."); + return false; +} + +bool MipsDAGToDAGISel::selectIntAddrUImm18(SDValue Addr, SDValue &Base, + SDValue &Offset) const { + llvm_unreachable("Unimplemented function."); + return false; +} + +bool MipsDAGToDAGISel::selectIntAddrUImm17s1(SDValue Addr, SDValue &Base, + SDValue &Offset) const { + llvm_unreachable("Unimplemented function."); + return false; +} + +bool MipsDAGToDAGISel::selectIntAddrUImm7s2(SDValue Addr, SDValue &Base, + SDValue &Offset) const { + llvm_unreachable("Unimplemented function."); + return false; +} + +bool MipsDAGToDAGISel::selectIntAddrUImm5s2(SDValue Addr, SDValue &Base, + SDValue &Offset) const { + llvm_unreachable("Unimplemented function."); + return false; +} + bool MipsDAGToDAGISel::selectIntAddrIndexed(SDValue Addr, SDValue &Base, SDValue &Offset) const { @@ -376,3 +440,20 @@ SelectInlineAsmMemoryOperand(const SDValue &Op, unsigned ConstraintID, } return true; } + +bool MipsDAGToDAGISel::selectOffsetGP18(SDValue Addr, SDValue &Offset) const { + llvm_unreachable("Unimplemented function."); + return false; +} + +bool MipsDAGToDAGISel::selectOffsetGP19s2(SDValue Addr, SDValue &Offset) const { + llvm_unreachable("Unimplemented function."); + return false; +} + +bool MipsDAGToDAGISel::selectOffsetGP(SDValue Addr, SDValue &Offset, + unsigned OffsetBits, + unsigned ShiftAmount) const { + llvm_unreachable("Unimplemented function."); + return false; +} diff --git a/llvm/lib/Target/Mips/MipsISelDAGToDAG.h b/llvm/lib/Target/Mips/MipsISelDAGToDAG.h index 16eea26f2feb3..c4b55f5d21497 100644 --- a/llvm/lib/Target/Mips/MipsISelDAGToDAG.h +++ b/llvm/lib/Target/Mips/MipsISelDAGToDAG.h @@ -62,6 +62,10 @@ class MipsDAGToDAGISel : public SelectionDAGISel { virtual bool selectAddrDefault(SDValue Addr, SDValue &Base, SDValue &Offset) const; + virtual bool selectAddrSym(SDValue Addr, SDValue &Base) const; + + virtual bool selectAddrSymGPRel(SDValue Addr, SDValue &Base) const; + /// Match integer address pattern. virtual bool selectIntAddr(SDValue Addr, SDValue &Base, SDValue &Offset) const; @@ -104,6 +108,33 @@ class MipsDAGToDAGISel : public SelectionDAGISel { virtual bool selectIntAddrUImm12(SDValue Addr, SDValue &Base, SDValue &Offset) const; + virtual bool selectIntAddrUImm6s2(SDValue Addr, SDValue &Base, + SDValue &Offset) const; + + virtual bool selectIntAddrUImm3s1(SDValue Addr, SDValue &Base, + SDValue &Offset) const; + + virtual bool selectIntAddrUImm2(SDValue Addr, SDValue &Base, + SDValue &Offset) const; + + virtual bool selectIntAddrUImm4s2(SDValue Addr, SDValue &Base, + SDValue &Offset) const; + + virtual bool selectIntAddrUImm19s2(SDValue Addr, SDValue &Base, + SDValue &Offset) const; + + virtual bool selectIntAddrUImm18(SDValue Addr, SDValue &Base, + SDValue &Offset) const; + + virtual bool selectIntAddrUImm17s1(SDValue Addr, SDValue &Base, + SDValue &Offset) const; + + virtual bool selectIntAddrUImm7s2(SDValue Addr, SDValue &Base, + SDValue &Offset) const; + + virtual bool selectIntAddrUImm5s2(SDValue Addr, SDValue &Base, + SDValue &Offset) const; + virtual bool selectIntAddrIndexed(SDValue Addr, SDValue &Base, SDValue &Offset) const; virtual bool selectIntAddrIndexedLsl2(SDValue Addr, SDValue &Base, SDValue &Offset) const; @@ -160,6 +191,14 @@ class MipsDAGToDAGISel : public SelectionDAGISel { bool SelectInlineAsmMemoryOperand(const SDValue &Op, unsigned ConstraintID, std::vector &OutOps) override; + + // Select a GP-relative offset expressions + virtual bool selectOffsetGP(SDValue Addr, SDValue &Offset, + unsigned OffsetBits, + unsigned ShiftAmount) const; + virtual bool selectOffsetGP18(SDValue Addr, SDValue &Offset) const; + virtual bool selectOffsetGP19s2(SDValue Addr, SDValue &Offset) const; + }; } diff --git a/llvm/lib/Target/Mips/MipsISelLowering.cpp b/llvm/lib/Target/Mips/MipsISelLowering.cpp index c92e4577eed97..f566f714c6de1 100644 --- a/llvm/lib/Target/Mips/MipsISelLowering.cpp +++ b/llvm/lib/Target/Mips/MipsISelLowering.cpp @@ -4607,12 +4607,12 @@ MipsTargetLowering::getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI, if (Subtarget.inMips16Mode()) return std::make_pair(0U, &Mips::CPU16RegsRegClass); if (Subtarget.hasNanoMips()) - return std::make_pair(0U, &Mips::GPR32NMRegClass); + return std::make_pair(0U, &Mips::GPRNM32RegClass); return std::make_pair(0U, &Mips::GPR32RegClass); } if (VT == MVT::i64 && !Subtarget.isGP64bit()) return std::make_pair(0U, Subtarget.hasNanoMips() - ? &Mips::GPR32NMRegClass + ? &Mips::GPRNM32RegClass : &Mips::GPR32RegClass); if (VT == MVT::i64 && Subtarget.isGP64bit()) return std::make_pair(0U, &Mips::GPR64RegClass); diff --git a/llvm/lib/Target/Mips/MipsInstrFormats.td b/llvm/lib/Target/Mips/MipsInstrFormats.td index 10529c7d9e192..c841aa4d9b5fc 100644 --- a/llvm/lib/Target/Mips/MipsInstrFormats.td +++ b/llvm/lib/Target/Mips/MipsInstrFormats.td @@ -106,6 +106,7 @@ class MipsInst pattern, let TSFlags{5} = hasForbiddenSlot; let TSFlags{6} = hasFCCRegOperand; + let AsmVariantName = "mips"; let DecoderNamespace = "Mips"; field bits<32> SoftFail = 0; diff --git a/llvm/lib/Target/Mips/MipsInstrInfo.cpp b/llvm/lib/Target/Mips/MipsInstrInfo.cpp index 3184bb86ae632..b5668449c753d 100644 --- a/llvm/lib/Target/Mips/MipsInstrInfo.cpp +++ b/llvm/lib/Target/Mips/MipsInstrInfo.cpp @@ -1198,7 +1198,7 @@ void llvm::MipsInstrInfo::buildOutlinedFrame( Et = std::prev(MBB.end()); } - MachineInstr *SaveRAtoStack = BuildMI(MF, DebugLoc(), get(Mips::SAVE_NM)) + MachineInstr *SaveRAtoStack = BuildMI(MF, DebugLoc(), get(Mips::SAVE16_NM)) .addImm(16) .addReg(Mips::RA_NM); @@ -1293,11 +1293,11 @@ MachineBasicBlock::iterator MipsInstrInfo::insertOutlinedCall( assert(Reg != 0 && "No callee-saved register available?"); // save RA + restore RA from Reg (available register) - SaveRA = BuildMI(MF, DebugLoc(), get(Mips::MOVE_NM)) + SaveRA = BuildMI(MF, DebugLoc(), get(Mips::MOVE16_NM)) .addReg(Reg, RegState::Define) .addReg(Mips::RA_NM); - RestoreRA = BuildMI(MF, DebugLoc(), get(Mips::MOVE_NM)) + RestoreRA = BuildMI(MF, DebugLoc(), get(Mips::MOVE16_NM)) .addReg(Mips::RA_NM, RegState::Define) .addReg(Reg); @@ -1306,7 +1306,7 @@ MachineBasicBlock::iterator MipsInstrInfo::insertOutlinedCall( // Default case. Save and Restore from stack pointer : else { - SaveRA = BuildMI(MF, DebugLoc(), get(Mips::SAVE_NM)) + SaveRA = BuildMI(MF, DebugLoc(), get(Mips::SAVE16_NM)) .addImm(16) .addReg(Mips::RA_NM); @@ -1335,7 +1335,7 @@ MipsInstrInfo::findRegisterToSaveRA(const outliner::Candidate &C) const { const MipsRegisterInfo *MRI = static_cast( MF->getSubtarget().getRegisterInfo()); - for (unsigned Reg : Mips::GPR32NMRegClass) { + for (unsigned Reg : Mips::GPRNM32RegClass) { if (!MRI->isReservedReg(*MF, Reg) && Reg != Mips::RA_NM && C.LRU.available(Reg) && C.UsedInSequence.available(Reg) && diff --git a/llvm/lib/Target/Mips/MipsInstrInfo.td b/llvm/lib/Target/Mips/MipsInstrInfo.td index 0cfddd41194a7..b8d574c716786 100644 --- a/llvm/lib/Target/Mips/MipsInstrInfo.td +++ b/llvm/lib/Target/Mips/MipsInstrInfo.td @@ -306,6 +306,10 @@ class ISA_MIPS1_NOT_32R6_64R6 { list InsnPredicates = [NotMips32r6, NotMips64r6]; list EncodingPredicates = [HasStdEnc]; } +class ISA_MIPS1_NOT_32R6_64R6_NMIPS { + list InsnPredicates = [NotMips32r6, NotMips64r6, NotNanoMips]; + list EncodingPredicates = [HasStdEnc]; +} class ISA_MIPS2 { list InsnPredicates = [HasMips2]; list EncodingPredicates = [HasStdEnc]; @@ -1112,6 +1116,16 @@ def MipsMemSimmPtrAsmOperand : AsmOperandClass { let DiagnosticType = "MemSImmPtr"; } +class MipsMemUimmAsmOperand : AsmOperandClass { + let Name = "MemOffsetUimm" # Width # "_" # Shift; + let SuperClasses = [MipsMemAsmOperand]; + let RenderMethod = "addMemOperands"; + let ParserMethod = "parseMemOperand"; + let PredicateMethod = "isMemWithUimmOffset<" # Width # ", " # Shift # ">"; + let DiagnosticType = !if(!eq(Shift, 0), "MemUImm" # Width, + "MemUImm" # Width # "Lsl" # Shift); +} + def MipsInvertedImmoperand : AsmOperandClass { let Name = "InvNum"; let RenderMethod = "addImmOperands"; @@ -3022,7 +3036,7 @@ def SDivIMacro : MipsAsmPseudoInst<(outs GPR32Opnd:$rd), def UDivMacro : MipsAsmPseudoInst<(outs GPR32Opnd:$rd), (ins GPR32Opnd:$rs, GPR32Opnd:$rt), "divu\t$rd, $rs, $rt">, - ISA_MIPS1_NOT_32R6_64R6; + ISA_MIPS1_NOT_32R6_64R6_NMIPS; def UDivIMacro : MipsAsmPseudoInst<(outs GPR32Opnd:$rd), (ins GPR32Opnd:$rs, simm32:$imm), "divu\t$rd, $rs, $imm">, diff --git a/llvm/lib/Target/Mips/MipsRegisterInfo.cpp b/llvm/lib/Target/Mips/MipsRegisterInfo.cpp index f75fe3740c7e7..be34ffe91605b 100644 --- a/llvm/lib/Target/Mips/MipsRegisterInfo.cpp +++ b/llvm/lib/Target/Mips/MipsRegisterInfo.cpp @@ -53,7 +53,7 @@ MipsRegisterInfo::getPointerRegClass(const MachineFunction &MF, case MipsPtrClass::Default: return ABI.ArePtrs64bit() ? &Mips::GPR64RegClass - : ABI.IsP32() ? &Mips::GPR32NMRegClass : &Mips::GPR32RegClass; + : ABI.IsP32() ? &Mips::GPRNM32RegClass : &Mips::GPR32RegClass; case MipsPtrClass::GPR16MM: return &Mips::GPRMM16RegClass; case MipsPtrClass::StackPointer: @@ -73,7 +73,8 @@ MipsRegisterInfo::getRegPressureLimit(const TargetRegisterClass *RC, return 0; case Mips::GPR32RegClassID: case Mips::GPR64RegClassID: - case Mips::DSPRRegClassID: { + case Mips::DSPRRegClassID: + case Mips::GPRNM32RegClassID: { const TargetFrameLowering *TFI = MF.getSubtarget().getFrameLowering(); return 28 - TFI->hasFP(MF); } @@ -169,7 +170,7 @@ getReservedRegs(const MachineFunction &MF) const { Mips::ZERO_64, Mips::K0_64, Mips::K1_64, Mips::SP_64 }; - static const MCPhysReg ReservedGPR32NM[] = { + static const MCPhysReg ReservedGPRNM32[] = { Mips::ZERO_NM, Mips::K0_NM, Mips::K1_NM, Mips::SP_NM, Mips::AT_NM }; @@ -189,8 +190,8 @@ getReservedRegs(const MachineFunction &MF) const { for (unsigned I = 0; I < array_lengthof(ReservedGPR64); ++I) Reserved.set(ReservedGPR64[I]); - for (unsigned I = 0; I < array_lengthof(ReservedGPR32NM); ++I) - Reserved.set(ReservedGPR32NM[I]); + for (unsigned I = 0; I < array_lengthof(ReservedGPRNM32); ++I) + Reserved.set(ReservedGPRNM32[I]); // For mno-abicalls, GP is a program invariant! if (!Subtarget.isABICalls()) { @@ -267,6 +268,11 @@ MipsRegisterInfo::requiresRegisterScavenging(const MachineFunction &MF) const { return true; } +bool MipsRegisterInfo::isNeededForReturn(MCRegister PhysReg, const MachineFunction &MF) const { + return PhysReg == Mips::RA || PhysReg == Mips::RA_64 || PhysReg == Mips::RA_NM; +} + + // FrameIndex represent objects inside a abstract stack. // We must replace FrameIndex with an stack/frame pointer // direct reference. diff --git a/llvm/lib/Target/Mips/MipsRegisterInfo.h b/llvm/lib/Target/Mips/MipsRegisterInfo.h index b2a7dbb361484..feeba17303869 100644 --- a/llvm/lib/Target/Mips/MipsRegisterInfo.h +++ b/llvm/lib/Target/Mips/MipsRegisterInfo.h @@ -73,6 +73,8 @@ class MipsRegisterInfo : public MipsGenRegisterInfo { /// Return GPR register class. virtual const TargetRegisterClass *intRegClass(unsigned Size) const = 0; + virtual bool isNeededForReturn(MCRegister PhysReg, const MachineFunction &MF) const override; + private: virtual void eliminateFI(MachineBasicBlock::iterator II, unsigned OpNo, int FrameIndex, uint64_t StackSize, diff --git a/llvm/lib/Target/Mips/MipsRegisterInfo.td b/llvm/lib/Target/Mips/MipsRegisterInfo.td index 45329b2a52eb1..0735ab7aef77f 100644 --- a/llvm/lib/Target/Mips/MipsRegisterInfo.td +++ b/llvm/lib/Target/Mips/MipsRegisterInfo.td @@ -340,12 +340,53 @@ class GPR32ClassNM regTypes> : let AltOrderSelect = [{ return 1; }]; } -def GPR32NM : GPR32ClassNM<[i32]>; +def GPRNM32 : GPR32ClassNM<[i32]>; // nanoMIPS "gpr3" register encoding type. -def GPR3 : +def GPRNM3 : RegisterClass<"Mips", [i32], 32, - (add A0_NM, A1_NM, A2_NM, A3_NM, S0_NM, S1_NM, S2_NM, S3_NM)>; + (add S0_NM, S1_NM, S2_NM, S3_NM, A0_NM, A1_NM, A2_NM, A3_NM)>; + +// nanoMIPS "gpr3" register encoding type. +def GPRNM3Z : + RegisterClass<"Mips", [i32], 32, + (add ZERO_NM, S1_NM, S2_NM, S3_NM, A0_NM, A1_NM, A2_NM, A3_NM)>; + +// nanoMIPS "gpr4x4" register encoding type. +def GPRNM4 : + RegisterClass<"Mips", [i32], 32, + (add A4_NM, A5_NM, A6_NM, A7_NM, A0_NM, A1_NM, A2_NM, A3_NM, + S0_NM, S1_NM, S2_NM, S3_NM, S4_NM, S5_NM, S6_NM, S7_NM)>; + +// nanoMIPS "gpr4x4.zero" register encoding type. +def GPRNM4Z : + RegisterClass<"Mips", [i32], 32, + (add A4_NM, A5_NM, A6_NM, ZERO_NM, A0_NM, A1_NM, A2_NM, A3_NM, + S0_NM, S1_NM, S2_NM, S3_NM, S4_NM, S5_NM, S6_NM, S7_NM)>; + +// nanoMIPS non-zero gpr register encoding type. +def GPRNM32NZ : + RegisterClass<"Mips", [i32], 32, + (add AT_NM, T4_NM, T5_NM, A0_NM, A1_NM, A2_NM, A3_NM, + A4_NM, A5_NM, A6_NM, A7_NM, T0_NM, T1_NM, T2_NM, T3_NM, + S0_NM, S1_NM, S2_NM, S3_NM, S4_NM, S5_NM, S6_NM, S7_NM, + T8_NM, T9_NM, K0_NM, K1_NM, GP_NM, SP_NM, FP_NM, RA_NM)>; + +def GPRNMSP : RegisterClass<"Mips", [i32], 32, (add SP_NM)>; + +def GPRNMGP : RegisterClass<"Mips", [i32], 32, (add GP_NM)>; + +// nanoMIPS "gpr2.reg1" & "gpr2.reg2" register encoding types. +def GPRNM2R1 : + RegisterClass<"Mips", [i32], 32, + (add A0_NM, A1_NM, A2_NM, A3_NM)>; +def GPRNM2R2 : + RegisterClass<"Mips", [i32], 32, + (add A1_NM, A2_NM, A3_NM, A4_NM)>; + +def GPRNM1R1 : + RegisterClass<"Mips", [i32], 32, + (add A0_NM, A1_NM)>; class GPR32Class regTypes> : RegisterClass<"Mips", regTypes, 32, (add @@ -593,16 +634,19 @@ def GPR32AsmOperand : MipsAsmRegOperand { let PredicateMethod = "isGPRAsmReg"; } -def GPR32NMAsmOperand : MipsAsmRegOperand { - let Name = "GPR32NMAsmReg"; - let PredicateMethod = "isGPRAsmReg"; -} - def GPRMM16AsmOperand : MipsAsmRegOperand { let Name = "GPRMM16AsmReg"; let PredicateMethod = "isMM16AsmReg"; } +class GPRNMAsmOperandClass Supers = []> : MipsAsmRegOperand { + let Name = "GPRNM" # Cname; + let PredicateMethod = "isGPRNMAsmReg"; + let RenderMethod = "addGPRNM32AsmRegOperands"; + let SuperClasses = Supers; +} + def GPRMM16AsmOperandZero : MipsAsmRegOperand { let Name = "GPRMM16AsmRegZero"; let PredicateMethod = "isMM16AsmRegZero"; @@ -696,18 +740,81 @@ def GPR32Opnd : RegisterOperand { let ParserMatchClass = GPR32AsmOperand; } -def GPR32NMOpnd : RegisterOperand { - let ParserMatchClass = GPR32AsmOperand; +def GPRNM48Opnd : RegisterOperand { + let ParserMatchClass = GPRNMAsmOperandClass<"48Reg5", "GPRNM32RegClassID", + []>; } -def GPR3Opnd : RegisterOperand { - let ParserMatchClass = GPR32AsmOperand; +def GPRNM32NZOpnd : RegisterOperand { + let ParserMatchClass = GPRNMAsmOperandClass<"32Reg5NZ", "GPRNM32NZRegClassID", + [GPRNM48Opnd.ParserMatchClass]>; +} + +def GPRNM32Opnd : RegisterOperand { + let ParserMatchClass = GPRNMAsmOperandClass<"32Reg5", "GPRNM32RegClassID", + [GPRNM32NZOpnd.ParserMatchClass]>; +} + +def GPRNM4Opnd : RegisterOperand { + let ParserMatchClass = GPRNMAsmOperandClass<"16Reg4x4", "GPRNM4RegClassID", + [GPRNM32Opnd.ParserMatchClass]>; + let EncoderMethod = "getGPRNM4x4Reg"; +} + +def GPRNM4ZOpnd : RegisterOperand { + let ParserMatchClass = GPRNMAsmOperandClass<"16Reg4x4Z", "GPRNM4ZRegClassID", + [GPRNM32Opnd.ParserMatchClass]>; + let EncoderMethod = "getGPRNM4x4ZeroReg"; +} + +def GPRNM16R5Opnd : RegisterOperand { + let ParserMatchClass = GPRNMAsmOperandClass<"16Reg5", "GPRNM32RegClassID", + [GPRNM4Opnd.ParserMatchClass]>; +} + +def GPRNM16R5NZOpnd : RegisterOperand { + let ParserMatchClass = GPRNMAsmOperandClass<"16Reg5NZ", "GPRNM32NZRegClassID", + [GPRNM4Opnd.ParserMatchClass]>; +} + +def GPRNM16R3Opnd : RegisterOperand { + let ParserMatchClass = GPRNMAsmOperandClass<"16Reg3", "GPRNM3RegClassID", + [GPRNM16R5Opnd.ParserMatchClass]>; +} + +def GPRNM16R3ZOpnd : RegisterOperand { + let ParserMatchClass = GPRNMAsmOperandClass<"16Reg3Z", "GPRNM3ZRegClassID", + [GPRNM16R5Opnd.ParserMatchClass]>; +} + +def GPRNMSPOpnd : RegisterOperand { + let ParserMatchClass = GPRNMAsmOperandClass<"RegSP", "GPRNMSPRegClassID", + [GPRNM16R3Opnd.ParserMatchClass]>; +} + +def GPRNMGPOpnd : RegisterOperand { + let ParserMatchClass = GPRNMAsmOperandClass<"RegGP", "GPRNMGPRegClassID", + [GPRNM16R3Opnd.ParserMatchClass]>; +} + +def GPRNM2R1Opnd : RegisterOperand { + let ParserMatchClass = GPRNMAsmOperandClass<"2Reg1", "GPRNM2R1RegClassID", + [GPRNM4Opnd.ParserMatchClass]>; +} + +def GPRNM2R2Opnd : RegisterOperand { + let ParserMatchClass = GPRNMAsmOperandClass<"2Reg2", "GPRNM2R2RegClassID", + [GPRNM4Opnd.ParserMatchClass]>; +} + +def GPRNM1R1Opnd : RegisterOperand { + let ParserMatchClass = GPRNMAsmOperandClass<"1Reg1", "GPRNM1R1RegClassID", + [GPRNM4Opnd.ParserMatchClass]>; } def GPRMM16Opnd : RegisterOperand { let ParserMatchClass = GPRMM16AsmOperand; } - def GPRMM16OpndZero : RegisterOperand { let ParserMatchClass = GPRMM16AsmOperandZero; } diff --git a/llvm/lib/Target/Mips/MipsSEFrameLowering.cpp b/llvm/lib/Target/Mips/MipsSEFrameLowering.cpp index 56e27f7e71e94..369e94509a951 100644 --- a/llvm/lib/Target/Mips/MipsSEFrameLowering.cpp +++ b/llvm/lib/Target/Mips/MipsSEFrameLowering.cpp @@ -421,7 +421,7 @@ void MipsSEFrameLowering::emitPrologue(MachineFunction &MF, const TargetRegisterClass *RC = ABI.ArePtrs64bit() ? &Mips::GPR64RegClass - : ABI.IsP32() ? &Mips::GPR32NMRegClass : &Mips::GPR32RegClass; + : ABI.IsP32() ? &Mips::GPRNM32RegClass : &Mips::GPR32RegClass; // First, compute final stack size. uint64_t StackSize = MFI.getStackSize(); @@ -546,7 +546,7 @@ void MipsSEFrameLowering::emitPrologue(MachineFunction &MF, int64_t MaxAlign = -(int64_t)MFI.getMaxAlign().value(); if (ABI.IsP32()) - BuildMI(MBB, MBBI, dl, TII.get(Mips::Li_NM), VR).addImm(MaxAlign); + BuildMI(MBB, MBBI, dl, TII.get(Mips::LI48_NM), VR).addImm(MaxAlign); else BuildMI(MBB, MBBI, dl, TII.get(ADDiu), VR) .addReg(ZERO) @@ -724,7 +724,7 @@ void MipsSEFrameLowering::emitEpilogue(MachineFunction &MF, const TargetRegisterClass *RC = ABI.ArePtrs64bit() ? &Mips::GPR64RegClass - : ABI.IsP32() ? &Mips::GPR32NMRegClass : &Mips::GPR32RegClass; + : ABI.IsP32() ? &Mips::GPRNM32RegClass : &Mips::GPR32RegClass; // Find first instruction that restores a callee-saved register. MachineBasicBlock::iterator I = MBBI; @@ -923,7 +923,7 @@ void MipsSEFrameLowering::determineCalleeSaves(MachineFunction &MF, const TargetRegisterClass &RC = ABI.ArePtrs64bit() ? Mips::GPR64RegClass - : ABI.IsP32() ? Mips::GPR32NMRegClass : Mips::GPR32RegClass; + : ABI.IsP32() ? Mips::GPRNM32RegClass : Mips::GPR32RegClass; int FI = MF.getFrameInfo().CreateStackObject(TRI->getSpillSize(RC), TRI->getSpillAlign(RC), false); RS->addScavengingFrameIndex(FI); diff --git a/llvm/lib/Target/Mips/MipsSEISelDAGToDAG.cpp b/llvm/lib/Target/Mips/MipsSEISelDAGToDAG.cpp index eac9bec5f5f3b..46957de0ed0e5 100644 --- a/llvm/lib/Target/Mips/MipsSEISelDAGToDAG.cpp +++ b/llvm/lib/Target/Mips/MipsSEISelDAGToDAG.cpp @@ -366,6 +366,30 @@ bool MipsSEDAGToDAGISel::selectAddrDefault(SDValue Addr, SDValue &Base, return true; } +bool MipsSEDAGToDAGISel::selectAddrSym(SDValue Addr, SDValue &Base) const { + SDValue Opnd0 = Addr; + if (isa(Opnd0) || isa(Opnd0) || + isa(Opnd0) || isa(Opnd0) || + isa(Opnd0) || + isa(Opnd0)) { + Base = Addr; + return true; + } + else + return false; +} + +bool MipsSEDAGToDAGISel::selectAddrSymGPRel(SDValue Addr, SDValue &Base) const { + SDValue Opnd0 = Addr; + if (isa(Opnd0) || + isa(Opnd0)) { + Base = Addr; + return true; + } + else + return false; +} + bool MipsSEDAGToDAGISel::selectIntAddr(SDValue Addr, SDValue &Base, SDValue &Offset) const { return selectAddrRegImm(Addr, Base, Offset) || @@ -544,6 +568,72 @@ bool MipsSEDAGToDAGISel::selectIntAddrUImm12(SDValue Addr, SDValue &Base, return selectAddrFrameIndexUOffset(Addr, Base, Offset, 12, 0); } + +bool MipsSEDAGToDAGISel::selectIntAddrUImm6s2(SDValue Addr, SDValue &Base, + SDValue &Offset) const { + return selectAddrFrameIndexUOffset(Addr, Base, Offset, 4, 2); +} + +bool MipsSEDAGToDAGISel::selectIntAddrUImm3s1(SDValue Addr, SDValue &Base, + SDValue &Offset) const { + return selectAddrFrameIndexUOffset(Addr, Base, Offset, 2, 1); +} + +bool MipsSEDAGToDAGISel::selectIntAddrUImm2(SDValue Addr, SDValue &Base, + SDValue &Offset) const { + return selectAddrFrameIndexUOffset(Addr, Base, Offset, 2); +} + +bool MipsSEDAGToDAGISel::selectIntAddrUImm4s2(SDValue Addr, SDValue &Base, + SDValue &Offset) const { + return selectAddrFrameIndexUOffset(Addr, Base, Offset, 2, 2); +} + +bool MipsSEDAGToDAGISel::selectIntAddrUImm19s2(SDValue Addr, SDValue &Base, + SDValue &Offset) const { + bool Retval = selectAddrFrameIndexUOffset(Addr, Base, Offset, 19, 2); + if (Base == CurDAG->getRegister(Mips::GP_NM, MVT::i32)) + return Retval; + else + return false; +} + +bool MipsSEDAGToDAGISel::selectIntAddrUImm17s1(SDValue Addr, SDValue &Base, + SDValue &Offset) const { + bool Retval = selectAddrFrameIndexUOffset(Addr, Base, Offset, 17, 1); + if (Base == CurDAG->getRegister(Mips::GP_NM, MVT::i32)) + return Retval; + else + return false; +} + +bool MipsSEDAGToDAGISel::selectIntAddrUImm18(SDValue Addr, SDValue &Base, + SDValue &Offset) const { + bool Retval = selectAddrFrameIndexUOffset(Addr, Base, Offset, 19); + if (Base == CurDAG->getRegister(Mips::GP_NM, MVT::i32)) + return Retval; + else + return false; +} + +bool MipsSEDAGToDAGISel::selectIntAddrUImm7s2(SDValue Addr, SDValue &Base, + SDValue &Offset) const { + bool Retval = selectAddrFrameIndexUOffset(Addr, Base, Offset, 7, 2); + if (Base == CurDAG->getRegister(Mips::GP_NM, MVT::i32)) + return Retval; + else + return false; +} + +bool MipsSEDAGToDAGISel::selectIntAddrUImm5s2(SDValue Addr, SDValue &Base, + SDValue &Offset) const { + bool Retval = selectAddrFrameIndexUOffset(Addr, Base, Offset, 5, 2); + if (Base == CurDAG->getRegister(Mips::SP_NM, MVT::i32)) + return Retval; + else + return false; +} + // A load/store 'x' indexed (reg + reg) bool MipsSEDAGToDAGISel::selectIntAddrIndexed(SDValue Addr, SDValue &Base, SDValue &Offset) const { @@ -1519,6 +1609,33 @@ SelectInlineAsmMemoryOperand(const SDValue &Op, unsigned ConstraintID, return true; } +bool MipsSEDAGToDAGISel::selectOffsetGP( + SDValue Addr, SDValue &Offset, unsigned OffsetBits, + unsigned ShiftAmount = 0) const { + if (Addr.getOpcode() == MipsISD::GPRel || + isa(Addr) || + isa(Addr)) { + ConstantSDNode *CN = dyn_cast(Addr.getOperand(0)); + if (isUIntN(OffsetBits + ShiftAmount, CN->getZExtValue())) { + EVT ValTy = Addr.getValueType(); + Offset = + CurDAG->getTargetConstant(CN->getZExtValue(), SDLoc(Addr), ValTy); + return true; + } + } + return false; +} + +bool MipsSEDAGToDAGISel::selectOffsetGP18(SDValue Addr, + SDValue &Offset) const { + return selectOffsetGP(Addr, Offset, 18); +} + +bool MipsSEDAGToDAGISel::selectOffsetGP19s2(SDValue Addr, + SDValue &Offset) const { + return selectOffsetGP(Addr, Offset, 19, 2); +} + FunctionPass *llvm::createMipsSEISelDag(MipsTargetMachine &TM, CodeGenOpt::Level OptLevel) { return new MipsSEDAGToDAGISel(TM, OptLevel); diff --git a/llvm/lib/Target/Mips/MipsSEISelDAGToDAG.h b/llvm/lib/Target/Mips/MipsSEISelDAGToDAG.h index e0af0436002bc..1da7756549ef7 100644 --- a/llvm/lib/Target/Mips/MipsSEISelDAGToDAG.h +++ b/llvm/lib/Target/Mips/MipsSEISelDAGToDAG.h @@ -53,6 +53,10 @@ class MipsSEDAGToDAGISel : public MipsDAGToDAGISel { bool selectAddrDefault(SDValue Addr, SDValue &Base, SDValue &Offset) const override; + bool selectAddrSym(SDValue Addr, SDValue &Base) const override; + + bool selectAddrSymGPRel(SDValue Addr, SDValue &Base) const override; + bool selectIntAddr(SDValue Addr, SDValue &Base, SDValue &Offset) const override; @@ -108,6 +112,24 @@ class MipsSEDAGToDAGISel : public MipsDAGToDAGISel { bool selectIntAddrIndexedLsl2(SDValue Addr, SDValue &Base, SDValue &Offset) const override; + bool selectIntAddrUImm6s2(SDValue Addr, SDValue &Base, SDValue &Offset) const override; + + bool selectIntAddrUImm3s1(SDValue Addr, SDValue &Base, SDValue &Offset) const override; + + bool selectIntAddrUImm2(SDValue Addr, SDValue &Base, SDValue &Offset) const override; + + bool selectIntAddrUImm4s2(SDValue Addr, SDValue &Base, SDValue &Offset) const override; + + bool selectIntAddrUImm19s2(SDValue Addr, SDValue &Base, SDValue &Offset) const override; + + bool selectIntAddrUImm18(SDValue Addr, SDValue &Base, SDValue &Offset) const override; + + bool selectIntAddrUImm17s1(SDValue Addr, SDValue &Base, SDValue &Offset) const override; + + bool selectIntAddrUImm7s2(SDValue Addr, SDValue &Base, SDValue &Offset) const override; + + bool selectIntAddrUImm5s2(SDValue Addr, SDValue &Base, SDValue &Offset) const override; + /// Select constant vector splats. bool selectVSplat(SDNode *N, APInt &Imm, unsigned MinSizeInBits) const override; @@ -153,6 +175,15 @@ class MipsSEDAGToDAGISel : public MipsDAGToDAGISel { bool SelectInlineAsmMemoryOperand(const SDValue &Op, unsigned ConstraintID, std::vector &OutOps) override; + + // Select a GP-relative offset expressions + bool selectOffsetGP(SDValue Addr, SDValue &Offset, + unsigned OffsetBits, unsigned ShiftAmount) const; + + bool selectOffsetGP18(SDValue Addr, SDValue &Offset) const override; + + bool selectOffsetGP19s2(SDValue Addr, SDValue &Offset) const override; + }; FunctionPass *createMipsSEISelDag(MipsTargetMachine &TM, diff --git a/llvm/lib/Target/Mips/MipsSEISelLowering.cpp b/llvm/lib/Target/Mips/MipsSEISelLowering.cpp index d409046446775..ecd44487bee5a 100644 --- a/llvm/lib/Target/Mips/MipsSEISelLowering.cpp +++ b/llvm/lib/Target/Mips/MipsSEISelLowering.cpp @@ -69,7 +69,7 @@ MipsSETargetLowering::MipsSETargetLowering(const MipsTargetMachine &TM, // Set up the register classes if (STI.isABI_P32()) - addRegisterClass(MVT::i32, &Mips::GPR32NMRegClass); + addRegisterClass(MVT::i32, &Mips::GPRNM32RegClass); else addRegisterClass(MVT::i32, &Mips::GPR32RegClass); diff --git a/llvm/lib/Target/Mips/MipsSEInstrInfo.cpp b/llvm/lib/Target/Mips/MipsSEInstrInfo.cpp index 0c2da0fab787c..1961837dbd610 100644 --- a/llvm/lib/Target/Mips/MipsSEInstrInfo.cpp +++ b/llvm/lib/Target/Mips/MipsSEInstrInfo.cpp @@ -170,9 +170,9 @@ void MipsSEInstrInfo::copyPhysReg(MachineBasicBlock &MBB, if (Mips::MSA128BRegClass.contains(SrcReg)) Opc = Mips::MOVE_V; } - else if (Mips::GPR32NMRegClass.contains(SrcReg)) { - if (Mips::GPR32NMRegClass.contains(DestReg)) - Opc = Mips::MOVE_NM; + else if (Mips::GPRNM32RegClass.contains(SrcReg)) { + if (Mips::GPRNM32NZRegClass.contains(DestReg)) + Opc = Mips::MOVE16_NM; } assert(Opc && "Cannot copy registers"); @@ -296,7 +296,7 @@ storeRegToStack(MachineBasicBlock &MBB, MachineBasicBlock::iterator I, Opc = Mips::SD; else if (Mips::DSPRRegClass.hasSubClassEq(RC)) Opc = Mips::SWDSP; - else if (Mips::GPR32NMRegClass.hasSubClassEq(RC)) + else if (Mips::GPRNM32RegClass.hasSubClassEq(RC)) Opc = Mips::SW_NM; // Hi, Lo are normally caller save but they are callee save @@ -376,7 +376,7 @@ loadRegFromStack(MachineBasicBlock &MBB, MachineBasicBlock::iterator I, Opc = Mips::LD; else if (Mips::DSPRRegClass.hasSubClassEq(RC)) Opc = Mips::LWDSP; - else if (Mips::GPR32NMRegClass.hasSubClassEq(RC)) + else if (Mips::GPRNM32RegClass.hasSubClassEq(RC)) Opc = Mips::LW_NM; assert(Opc && "Register class not handled!"); @@ -625,6 +625,7 @@ void MipsSEInstrInfo::adjustStackPtr(unsigned SP, int64_t Amount, MachineBasicBlock::iterator I) const { MipsABIInfo ABI = Subtarget.getABI(); DebugLoc DL; + if (I != MBB.end()) DL = I->getDebugLoc(); unsigned ADDiu = ABI.GetPtrAddiuOp(); if (Amount == 0) @@ -656,9 +657,9 @@ unsigned MipsSEInstrInfo::loadImmediate(int64_t Imm, MachineBasicBlock &MBB, if (Subtarget.hasNanoMips()) { assert(Imm == (int32_t)Imm); MachineRegisterInfo &RegInfo = MBB.getParent()->getRegInfo(); - const TargetRegisterClass *RC = &Mips::GPR32NMRegClass; + const TargetRegisterClass *RC = &Mips::GPRNM32RegClass; Register Reg = RegInfo.createVirtualRegister(RC); - BuildMI(MBB, II, DL, get(Mips::Li_NM), Reg).addImm((int32_t)Imm); + BuildMI(MBB, II, DL, get(Mips::LI48_NM), Reg).addImm((int32_t)Imm); return Reg; } diff --git a/llvm/lib/Target/Mips/MipsSERegisterInfo.cpp b/llvm/lib/Target/Mips/MipsSERegisterInfo.cpp index b8ea753c3ed8e..331b6308797b4 100644 --- a/llvm/lib/Target/Mips/MipsSERegisterInfo.cpp +++ b/llvm/lib/Target/Mips/MipsSERegisterInfo.cpp @@ -115,8 +115,8 @@ static inline unsigned getLoadStoreOffsetSizeInBits(const unsigned Opcode, case Mips::SWs9_NM: case Mips::SHs9_NM: case Mips::SBs9_NM: - case Mips::UALW_NM: - case Mips::UASW_NM: + case Mips::UALWM_NM: + case Mips::UASWM_NM: case Mips::UALH_NM: case Mips::UASH_NM: return 9; @@ -226,12 +226,12 @@ void MipsSERegisterInfo::eliminateFI(MachineBasicBlock::iterator II, LLVM_DEBUG(errs() << "Offset : " << Offset << "\n" << "<--------->\n"); - if (MI.getOpcode() == Mips::LEA_ADDiu_NM && Offset == 0) { + if (MI.getOpcode() == Mips::LEA_ADDIU_NM && Offset == 0) { auto &MBB = *MI.getParent(); const MipsSEInstrInfo &TII = *static_cast( MBB.getParent()->getSubtarget().getInstrInfo()); DebugLoc DL = MI.getDebugLoc(); - BuildMI(MBB, II, DL, TII.get(Mips::MOVE_NM), MI.getOperand(0).getReg()) + BuildMI(MBB, II, DL, TII.get(Mips::MOVE16_NM), MI.getOperand(0).getReg()) .addReg(FrameReg); MI.eraseFromParent(); return; @@ -255,7 +255,7 @@ void MipsSERegisterInfo::eliminateFI(MachineBasicBlock::iterator II, const TargetRegisterClass *PtrRC = ABI.ArePtrs64bit() ? &Mips::GPR64RegClass - : ABI.IsP32() ? &Mips::GPR32NMRegClass : &Mips::GPR32RegClass; + : ABI.IsP32() ? &Mips::GPRNM32RegClass : &Mips::GPR32RegClass; MachineRegisterInfo &RegInfo = MBB.getParent()->getRegInfo(); Register Reg = RegInfo.createVirtualRegister(PtrRC); const MipsSEInstrInfo &TII = diff --git a/llvm/lib/Target/Mips/MipsScheduleGeneric.td b/llvm/lib/Target/Mips/MipsScheduleGeneric.td index 784f0202ba125..bb1c212e128e1 100644 --- a/llvm/lib/Target/Mips/MipsScheduleGeneric.td +++ b/llvm/lib/Target/Mips/MipsScheduleGeneric.td @@ -111,23 +111,28 @@ def : InstRW<[GenericWriteALU], (instrs ADDIUPC_MMR6, ADDIU_MMR6, ADDU16_MMR6, SELEQZ_MMR6, SELNEZ_MMR6, SLL16_MMR6, SLL_MMR6, SRL16_MMR6, SSNOP_MMR6, SUBU16_MMR6, SUBU_MMR6, SUB_MMR6, WSBH_MMR6, XOR16_MMR6, - XORI_MMR6, XOR_MMR6)>; + XORI_MMR6, XOR_MMR6)>; // nanoMIPS // ============= -def : InstRW<[GenericWriteALU], (instrs ADD_NM, ADDu_NM, ADDiu_NM, ADDIU48_NM, - ALUIPC_NM, AND_NM, AND16_NM, ANDI_NM, +def : InstRW<[GenericWriteALU], (instrs ADD_NM, ADDu16_NM, ADDu4x4_NM, ADDu_NM, + ADDIU_NM, ADDIUR2_NM, ADDIURS5_NM, ADDIU48_NM, + ADDIUGPB_NM, ADDIUGPW_NM, ADDIUR1SP_NM, + ADDIUPC48_NM, ADDIUNEG_NM, ADDIUGP48_NM, + ALUIPC_NM, AND_NM, AND16_NM, ANDI16_NM, ANDI_NM, BITREVW_NM, BYTEREVW_NM, CLO_NM, CLZ_NM, - DIV_NM, DIVU_NM, EXT_NM, INS_NM, LA_NM, Li_NM, - LAGPB_NM, - LSA_NM, MOVEP_NM, MOVE_NM, MOD_NM, MODU_NM, - MUH_NM, MUHU_NM, MUL_NM, MULU_NM, NOT_NM, - NOR_NM, OR_NM, OR16_NM, ORI_NM, ROTRV_NM, - ROTR_NM, SEB_NM, SEH_NM, SEQI_NM, SLL_NM, + DIV_NM, DIVU_NM, EXT_NM, EXTW_NM, INS_NM, + LI48_NM, LUI_NM, LI16_NM, + LSA_NM, MOVEP_NM, MOVE16_NM, MOD_NM, MODU_NM, + MOVEPREV_NM, + MUH_NM, MUHU_NM, MUL_NM, MULU_NM, MUL4x4_NM, + NOT16_NM, NOR_NM, OR_NM, OR16_NM, ORI_NM, ROTRV_NM, + ROTR_NM, ROTX_NM, SEB_NM, SEH_NM, SEQI_NM, SLL_NM, SLLV_NM, SLT_NM, SLTI_NM, SLTIU_NM, SLTU_NM, - SRAV_NM, SRA_NM, SRLV_NM, SRL_NM, SUB_NM, - SUBu_NM, XOR_NM, XOR16_NM, XORI_NM)>; + SOV_NM, SRAV_NM, SRA_NM, SRLV_NM, SRL_NM, SUB_NM, + SUBu_NM, SUBu16_NM, XOR_NM, XOR16_NM, XORI_NM, + SLL16_NM, SRL16_NM)>; // MIPS64 // ====== @@ -288,7 +293,7 @@ def : InstRW<[GenericWriteDIVU], (instrs DDIVU, DMODU)>; // ======== def : InstRW<[GenericWriteALU], (instrs MOVN_NM, MOVZ_NM)>; -def : InstRW<[GenericWriteMove], (instrs RDHWR_NM)>; +def : InstRW<[GenericWriteMove], (instrs RDHWR_NM, WRPGPR_NM, RDPGPR_NM)>; // CTISTD Pipeline // --------------- @@ -428,6 +433,10 @@ def : InstRW<[GenericWriteTrap], (instrs BREAK16_MMR6, BREAK_MMR6, SDBBP_MMR6, def : InstRW<[GenericWriteTrap], (instrs TEQ_NM, TNE_NM)>; +def : InstRW<[GenericWriteJump], (instrs ERET_NM, ERETNC_NM, DERET_NM, + SIGRIE_NM, BREAK_NM, SDBBP_NM, SYSCALL_NM, + BREAK16_NM, SDBBP16_NM, SYSCALL16_NM)>; + // MIPS64 // ====== @@ -458,14 +467,20 @@ def : InstRW<[GenericWriteJump], (instrs JR_HB64_R6, TAILCALL64R6REG, // nanoMIPS // ======== -def : InstRW<[GenericWriteJump], (instrs JRC_NM, BC_NM)>; +def : InstRW<[GenericWriteJump], (instrs JRC_NM, BC_NM, BEQC_NM, BGEC_NM, BGEUC_NM, + BNEC_NM, BLTC_NM, BLTUC_NM, BEQZC_NM, + BNEZC_NM, BEQIC_NM, BGEIC_NM, BLTIC_NM, + BNEIC_NM, BGEIUC_NM, BLTIUC_NM, BBNEZC_NM, + BBEQZC_NM, BC16_NM, BALC16_NM, + BRSC_NM, BEQC16_NM, BNEC16_NM)>; def : InstRW<[GenericWriteJump], (instrs PseudoReturnNM, PseudoIndirectBranchNM, TAILCALL_NM, TAILCALLREG_NM)>; def : InstRW<[GenericWriteJumpAndLink], (instrs BALC_NM, MOVEBALC_NM, JALRC_NM, - JALRCPseudo)>; + JALRCHB_NM, JALRC16_NM, + JALRCPseudo, BALRSC_NM)>; // COP0 Pipeline // ============= @@ -547,6 +562,11 @@ def : InstRW<[GenericWriteCOPOther], (instrs DMFC2, DMTC2)>; def : InstRW<[GenericWriteMove], (instrs CFC2_MM, CTC2_MM)>; +// nanoMIPS +// ============= + +def : InstRW<[GenericWriteCOP0], (instrs DI_NM, EI_NM)>; + // MIPS MT ASE - hasMT // ==================== @@ -709,15 +729,19 @@ def : InstRW<[GenericWriteLoad], (instrs LW_NM, LWGP_NM, LWs9_NM, LBU_NM, LBUs9_NM, LB_NM, LBs9_NM, LWX_NM, LWXS_NM, LHUX_NM, LHUXS_NM, LHX_NM, LHXS_NM, LBUX_NM, LBX_NM, - RESTOREJRC_NM, RESTORE_NM, SAVE_NM, - UALH_NM, UALW_NM, LWM_NM, UALWM_NM, - LWPC_NM)>; - -def: InstRW<[GenericWriteStore], (instrs SW_NM, SWs9_NM, - SH_NM, SHs9_NM, - SB_NM, SBs9_NM, + RESTOREJRC16_NM, RESTOREJRC_NM, + RESTORE_NM, SAVE_NM, SAVE16_NM, + UALH_NM, LWM_NM, UALWM_NM, UALW_NM, + LWPC_NM, LW16_NM, LH16_NM, LHU16_NM, + LB16_NM, LBU16_NM, LBGP_NM, LBUGP_NM, + LHGP_NM, LHUGP_NM, LW4x4_NM)>; + +def: InstRW<[GenericWriteStore], (instrs SW_NM, SWs9_NM, SW16_NM, SH16_NM, + SH_NM, SHs9_NM, SWGP_NM, SBGP_NM, SHGP_NM, + SB_NM, SBs9_NM, SB16_NM, LWGP16_NM, SWGP16_NM, + LWSP16_NM, SWSP16_NM, SW4x4_NM, SWX_NM, SWXS_NM, SHX_NM, SHXS_NM, SBX_NM, - UASH_NM, UASW_NM, SWM_NM, UASWM_NM, SWPC_NM)>; + UASH_NM, SWM_NM, UASWM_NM, UASW_NM, SWPC_NM)>; // microMIPS32r6 // ============= diff --git a/llvm/lib/Target/Mips/MipsSubtarget.cpp b/llvm/lib/Target/Mips/MipsSubtarget.cpp index 0717b36fe1e98..2b097a6ae68b8 100644 --- a/llvm/lib/Target/Mips/MipsSubtarget.cpp +++ b/llvm/lib/Target/Mips/MipsSubtarget.cpp @@ -156,16 +156,17 @@ MipsSubtarget::MipsSubtarget(const Triple &TT, StringRef CPU, StringRef FS, if (hasNanoMips()) NoABICalls = true; - - if (!hasNanoMips() && UnalignedLS) - errs() << "warning: '-mload-store-unaligned' is supported only for nanoMIPS" - << "\n"; + else { + if (UnalignedLS) + errs() << "warning: '-mload-store-unaligned' is supported only for nanoMIPS" + << "\n"; - if (NoABICalls && TM.isPositionIndependent() && !hasNanoMips()) - report_fatal_error("position-independent code requires '-mabicalls'"); + if (NoABICalls && TM.isPositionIndependent()) + report_fatal_error("position-independent code requires '-mabicalls'"); - if (isABI_N64() && !TM.isPositionIndependent() && !hasSym32()) - NoABICalls = true; + if (isABI_N64() && !TM.isPositionIndependent() && !hasSym32()) + NoABICalls = true; + } // Set UseSmallSection. UseSmallSection = GPOpt; diff --git a/llvm/lib/Target/Mips/MipsSubtarget.h b/llvm/lib/Target/Mips/MipsSubtarget.h index 7fcc7d5b46c72..ef6c02de2d118 100644 --- a/llvm/lib/Target/Mips/MipsSubtarget.h +++ b/llvm/lib/Target/Mips/MipsSubtarget.h @@ -202,6 +202,9 @@ class MipsSubtarget : public MipsGenSubtargetInfo { // Use unaliged loads and stores (nanoMIPS only). bool UseUnalignedLoadStore = false; + // Use linker relaxations + bool UseLinkerRelax = true; + /// The minimum alignment known to hold of the stack frame on /// entry to the function and which must be maintained by every function. Align stackAlignment; @@ -288,6 +291,7 @@ class MipsSubtarget : public MipsGenSubtargetInfo { bool isABICalls() const { return !NoABICalls; } bool isFPXX() const { return IsFPXX; } bool isFP64bit() const { return IsFP64bit; } + bool useLinkerRelax() const { return UseLinkerRelax; } bool useOddSPReg() const { return UseOddSPReg; } bool noOddSPReg() const { return !UseOddSPReg; } bool isNaN2008() const { return IsNaN2008bit; } diff --git a/llvm/lib/Target/Mips/MipsTargetMachine.h b/llvm/lib/Target/Mips/MipsTargetMachine.h index 4881b96c5f2e1..34311d28bb91e 100644 --- a/llvm/lib/Target/Mips/MipsTargetMachine.h +++ b/llvm/lib/Target/Mips/MipsTargetMachine.h @@ -110,6 +110,10 @@ class NanoMipsTargetMachine : public MipsTargetMachine { Optional RM, Optional CM, CodeGenOpt::Level OL, bool JIT); + + bool useIPRA() const { + return true; + } }; } // end namespace llvm diff --git a/llvm/lib/Target/Mips/MipsTargetStreamer.h b/llvm/lib/Target/Mips/MipsTargetStreamer.h index b5556a699baec..ac11d50e67248 100644 --- a/llvm/lib/Target/Mips/MipsTargetStreamer.h +++ b/llvm/lib/Target/Mips/MipsTargetStreamer.h @@ -318,6 +318,8 @@ class MipsTargetELFStreamer : public MipsTargetStreamer { bool MicroMipsEnabled; const MCSubtargetInfo &STI; bool Pic; + bool PCRel; + bool Pid; public: bool isMicroMipsEnabled() const { return MicroMipsEnabled; } @@ -345,6 +347,8 @@ class MipsTargetELFStreamer : public MipsTargetStreamer { void emitDirectiveOptionPic0() override; void emitDirectiveOptionPic2() override; void emitDirectiveInsn() override; + void emitDirectiveLinkRelax() override; + void emitDirectiveModulePcRel() override; void emitFrame(unsigned StackReg, unsigned StackSize, unsigned ReturnReg) override; void emitMask(unsigned CPUBitmask, int CPUTopSavedRegOff) override; diff --git a/llvm/lib/Target/Mips/NanoMipsInstrFormats.td b/llvm/lib/Target/Mips/NanoMipsInstrFormats.td index 06a4f04f6a345..f5a2dfedbcb09 100644 --- a/llvm/lib/Target/Mips/NanoMipsInstrFormats.td +++ b/llvm/lib/Target/Mips/NanoMipsInstrFormats.td @@ -1,14 +1,14 @@ class InstNM pattern, - InstrItinClass itin = IIPseudo>: + InstrItinClass itin = NoItinerary>: Instruction, PredicateControl, ISA_NANOMIPS { let Namespace = "Mips"; let DecoderNamespace = "NanoMips"; let EncodingPredicates = [HasNanoMips]; string Arch = "nanomips"; - let OutOperandList = outs; let InOperandList = ins; + let AsmVariantName = "nanomips"; let AsmString = asmstr; let Pattern = pattern; @@ -21,21 +21,25 @@ class InstNM pattern, class InstSize16 { field bits<16> Inst; int Size = 2; + field bits<16> SoftFail = 0; } class InstSize32 { field bits<32> Inst; int Size = 4; + field bits<32> SoftFail = 0; } class InstSize48 { field bits<48> Inst; int Size = 6; + field bits<48> SoftFail = 0; } // nanoMIPS Pseudo Instructions Format -class PseudoInstNM pattern> : - InstNM { +class PseudoInstNM pattern, + InstrItinClass itin = IIPseudo> : + InstNM { let isCodeGenOnly = 1; let isPseudo = 1; } diff --git a/llvm/lib/Target/Mips/NanoMipsInstrInfo.td b/llvm/lib/Target/Mips/NanoMipsInstrInfo.td index eda4057fca1e9..791f1472fe38f 100644 --- a/llvm/lib/Target/Mips/NanoMipsInstrInfo.td +++ b/llvm/lib/Target/Mips/NanoMipsInstrInfo.td @@ -30,41 +30,444 @@ def imm32_NM : IntImmLeaf; def immFitsAddiu32 : IntImmLeaf= -4095) || (N > 4095 && N <= 65535); + return (N > 4095 && N <= 65535); +}]>; +def immFitsAddiuNeg : IntImmLeaf= -4095); }]>; def imm32ZExt12 : IntImmLeaf(Imm.getZExtValue()); }]>; def imm32ZExt12ANDI : IntImmLeaf(Imm.getZExtValue()) || Imm.getZExtValue() == 65535; }]>; def imm32ZExt5 : IntImmLeaf(Imm.getZExtValue()); }]>; +def imm32ZExt3 : IntImmLeaf 0 && + Imm.getZExtValue() <= 8;}]>; def imm32SExt12 : IntImmLeaf(Imm.getSExtValue()); }]>; +def imm32Neg12 : IntImmLeaf(Imm.getSExtValue()) && + Imm.getSExtValue() < 0 && + Imm.getSExtValue() >= -4095; }]>; // True if (N + 1) fits in 12-bit field. def immZExt12Plus1 : PatLeaf<(imm), [{ return isUInt<13>(N->getZExtValue()) && isUInt<12>(N->getZExtValue() + 1); }]>; def immZExt7Plus1 : PatLeaf<(imm), [{ - return isUInt<7>(N->getZExtValue()) && isUInt<12>(N->getZExtValue() + 1); + return isUInt<8>(N->getZExtValue()) && isUInt<7>(N->getZExtValue() + 1); +}]>; +// Immediate range for signed 20-bit upper part +def imm32SExt20s12 : IntImmLeaf(Imm.getSExtValue()) + && (Imm.getZExtValue() % 4096 == 0); }]>; + +// Immediate range for 16-bit load immediate +def imm7M1To126 : IntImmLeaf= -1 && Imm.getSExtValue() < 127); +}]>; + +// Immediate range for unsigned 18-bit +def imm32ZExt18 : IntImmLeaf(Imm.getZExtValue());}]>; + +// Immediate range for unsigned 18-bit +def imm32ZExt4 : IntImmLeaf(Imm.getZExtValue());}]>; + +// Immediate range for unsigned 21-bit word-aligned +def imm32ZExt21s2 : IntImmLeaf(Imm.getZExtValue()) && + (Imm.getZExtValue() % 4 == 0); }]>; + +// Immediate range for unsigned 21-bit word-aligned +def imm32ZExt8s2 : IntImmLeaf(Imm.getZExtValue()) && + (Imm.getZExtValue() % 4 == 0); }]>; + +// Immediate range for 16-bit AND immediate +def imm4Mask : IntImmLeaf= 0 && Imm.getZExtValue() < 11) || + (Imm.getZExtValue() == 0xff) || (Imm.getZExtValue() == 0xffff) || + (Imm.getZExtValue() == 0xe) || (Imm.getZExtValue() == 0xf)); }]>; -// Immediate range covering both signed 12-bit and unsigned 16-bit immediates. -def imm32SExt12OrZExt16 : IntImmLeaf(Imm.getSExtValue()) - || isUInt<16>(Imm.getZExtValue()); }]>; + +class ConstantSImmRangeAsmOperandClass Supers = []> + : AsmOperandClass { + let Name = "ConstantNegImmRange" # Bottom # "_" # Top; + let RenderMethod = "addSImmOperands<" # 12 # ">"; + let PredicateMethod = "isConstantNegImmRange<" # Bottom # ", " # Top # ">"; + let SuperClasses = Supers; + let DiagnosticType = "SImmRange" # Bottom # "_" # Top; +} + +class NegImmAsmOperandClass Supers = []> + : AsmOperandClass { + let Name = "NegImm" # Bits; + let RenderMethod = "addSImmOperands<" # 32 # ">"; + let PredicateMethod = "isNegImm<" # Bits # ">"; + let SuperClasses = Supers; +} + +class Imm7AsmOperandClass : AsmOperandClass { + let Name = "UImm7N1"; + let RenderMethod = "addSImmOperands<8>"; + let PredicateMethod = "isConstantSImmRange<-1,126>"; +} + +class UImm4AsmOperandClass : AsmOperandClass { + let Name = "UImm4Mask"; + let RenderMethod = "addUImmOperands<16>"; + let PredicateMethod = "isConstantUImmMask"; +} + +class UImm3ShiftAsmOperandClass : AsmOperandClass { + let Name = "UImm3Shift"; + let RenderMethod = "addUImmOperands<4>"; + let PredicateMethod = "isConstantUImm3Shift"; +} def UImm12AsmOperandClass : UImmAsmOperandClass<12, []>; def UImm5AsmOperandClass : UImmAsmOperandClass<5, [UImm12AsmOperandClass]>; -def UImm3Plus1AsmOperandClass : - ConstantUImmAsmOperandClass<3, [UImm5AsmOperandClass], 1>; +def Imm7AsmOperand : Imm7AsmOperandClass; +def UImm4AsmOperand : UImm4AsmOperandClass; +def UImm3ShiftAsmOperand : UImm3ShiftAsmOperandClass; + +def SImm12AsmOperandClass : SImmAsmOperandClass<12, []>; +def NImm12AsmOperandClass : NegImmAsmOperandClass<12, []>; +def NaImm12AsmOperandClass : ConstantSImmRangeAsmOperandClass<4095, 1, []>; + +class SImmScaledAsmOperandClass Supers = []> + : AsmOperandClass{ + let Name = "SImm" # Bits # "s" # Align; + let RenderMethod = "addSImmOperands<32>"; + let PredicateMethod = "isSImm<" # Bits # ">"; + let SuperClasses = Supers; + let DiagnosticType = "SImm" # Bits # "s" # Align; +} + +class SymAsmOperandClass + : AsmOperandClass{ + let Name = "Sym32" # Type ; + let DiagnosticType = "Sym32" # Type; + let PredicateMethod = "isSym32" # Type; +} + +def SymAsmOperandAbs : SymAsmOperandClass; +def SymAsmOperandPCRel : SymAsmOperandClass<"PCRel">; +def SymAsmOperandGPRel : SymAsmOperandClass<"GPRel">; + +def SImm32S12AsmOperandClass : SImmScaledAsmOperandClass<20, 12, []>; + +class NMMemUimmAsmOperand Supers = [MipsMemAsmOperand]> : AsmOperandClass { + let Name = "NMMemOffsetUimm" # Width # "_" # Shift; + let SuperClasses = Supers; + let RenderMethod = "addNMMemOperands"; + let ParserMethod = "parseMemOperand"; + let PredicateMethod = "isMemWithBaseUimmOffset<" # Width # ", " # Shift # ", " # RegClass # ">"; +} + +class NMMemSimmAsmOperand Supers = [MipsMemAsmOperand]> : AsmOperandClass { + let Name = "NMMemOffsetSimm" # Width # "_" # Shift; + let SuperClasses = Supers; + let RenderMethod = "addNMMemOperands"; + let ParserMethod = "parseMemOperand"; + let PredicateMethod = "isMemWithBaseSimmOffset<" # Width # ", " # Shift # ", " # RegClass # ">"; +} + +def NMJumpTargetAsmOperand : AsmOperandClass { + let Name = "JumpTargetNM"; + let ParserMethod = "parseJumpTarget"; + let PredicateMethod = "isImm"; + let RenderMethod = "addImmOperands"; +} + +class NMUImmAsmOperandClass Supers = [], + int Offset = 0> : + AsmOperandClass { + let Name = "NMUImm" # Bits # "_" # Offset; + let RenderMethod = "addConstantUImmOperands<" # Bits # "," # Offset # ">"; + let PredicateMethod = "isConstantUImm<" # Bits # "," # Offset # ">"; + let SuperClasses = Supers; +} + +class NMSImmAsmOperandClass Supers = [], + int Offset = 0> : + AsmOperandClass { + let Name = "NMSImm" # Bits # "_" # Offset; + let RenderMethod = "addSImmOperands<" # Bits # ">"; + let PredicateMethod = "isConstantSImm<" # Bits # "," # Offset # ">"; + let SuperClasses = Supers; +} + +class NMUImmScaledAsmOperandClass Supers = []> : + AsmOperandClass { + let Name = "NMUImm" # Bits # "_s" # Shift; + let RenderMethod = "addConstantUImmOperands<32>"; + let PredicateMethod = "isScaledUImm<" # Bits # "," # Shift # ">"; + let SuperClasses = Supers; +} def uimm12_nm : Operand { let PrintMethod = "printUImm<12>"; - let ParserMatchClass = !cast("UImm12AsmOperandClass"); + let ParserMatchClass = NMUImmAsmOperandClass<12>; +} + +def uimm12s3_nm : Operand { + let PrintMethod = "printUImm<12>"; + let ParserMatchClass = NMUImmScaledAsmOperandClass<9, 3, []>; +} +def uimm8s4_nm : Operand { + let PrintMethod = "printUImm<8>"; + let ParserMatchClass = NMUImmScaledAsmOperandClass<4, 4, [uimm12s3_nm.ParserMatchClass]>; } def uimm5_nm : Operand { let PrintMethod = "printUImm<5>"; - let ParserMatchClass = !cast("UImm5AsmOperandClass"); + let ParserMatchClass = NMUImmAsmOperandClass<5>; +} +def simm4_nm : Operand { + let ParserMatchClass = NMSImmAsmOperandClass<4>; +} +def uimm1_nm : Operand { + let PrintMethod = "printUImm<1>"; + let ParserMatchClass = NMUImmAsmOperandClass<1>; } def uimm3plus1_nm : Operand { - let PrintMethod = "printUImm<3, 1>"; - let ParserMatchClass = !cast("UImm3Plus1AsmOperandClass"); + let ParserMatchClass = NMUImmAsmOperandClass<3, [], 1>; +} +def nimm12_nm : Operand { + let ParserMatchClass = !cast("NImm12AsmOperandClass"); + let EncoderMethod = "getNegImm12Encoding"; + let DecoderMethod = "DecodeNegImm12"; +} +def uimm16_nm : Operand { + let PrintMethod = "printUImm<16>"; + let ParserMatchClass = !cast("UImm16AsmOperandClass"); +} +def uimm5s1_nm : Operand { + let PrintMethod = "printUImm<5>"; + let ParserMatchClass = NMUImmScaledAsmOperandClass<4, 1>; +} +def uimm5s2_nm : Operand { + let PrintMethod = "printUImm<5>"; + let ParserMatchClass = NMUImmScaledAsmOperandClass<3, 2, [uimm16_nm.ParserMatchClass]>; +} +def uimm21s2_nm : Operand { + let PrintMethod = "printUImm<21>"; + let ParserMatchClass = NMUImmScaledAsmOperandClass<19, 2, [uimm16_nm.ParserMatchClass]>; +} +def uimm8s2_nm : Operand { + let PrintMethod = "printUImm<8>"; + let ParserMatchClass = NMUImmScaledAsmOperandClass<6, 2, [uimm16_nm.ParserMatchClass]>; +} +def nimm7_nm : Operand { + let ParserMatchClass = !cast("Imm7AsmOperand"); + let DecoderMethod = "DecodeImmM1To126"; +} +def uimm4mask_nm : Operand { + let EncoderMethod = "getUImm4MaskEncoding"; + let PrintMethod = "printUImm<16>"; + let ParserMatchClass = !cast("UImm4AsmOperand"); + let DecoderMethod = "DecodeUImm4Mask"; +} +def uimm3shift_nm : Operand { + let EncoderMethod = "getUImm3ShiftEncoding"; + let PrintMethod = "printUImm<4>"; + let ParserMatchClass = !cast("UImm3ShiftAsmOperand"); + let DecoderMethod = "DecodeUImm3Shift"; +} + +def UImm19AsmOperand : NMUImmAsmOperandClass<19>; +def UImm18AsmOperand : NMUImmAsmOperandClass<18>; +def UImm3AsmOperand : NMUImmAsmOperandClass<3, [UImm19AsmOperand]>; +def UImm2AsmOperand : NMUImmAsmOperandClass<2, [UImm18AsmOperand]>; + +def uimm19_nm : Operand { + let ParserMatchClass = UImm19AsmOperand; +} + +def uimm18_nm : Operand { + let ParserMatchClass = UImm18AsmOperand; +} + +def uimm3_nm : Operand { + let ParserMatchClass = UImm3AsmOperand; +} + +def uimm2_nm : Operand { + let ParserMatchClass = UImm2AsmOperand; +} + +def MemNMGP19S2Parser : NMMemUimmAsmOperand<19,2, + "Mips::GPRNMGPRegClassID">; + +def mem_nm_gp19s2 : mem_generic { + let EncoderMethod = "getMemEncodingNMGP"; + let OperandType = "OPERAND_NM_GPREL21"; + let OperandNamespace = "NanoMips"; + let DecoderMethod = "DecodeMemNM<21,false,Mips::GPRNMGPRegClassID>"; + let ParserMatchClass = MemNMGP19S2Parser; +} + +def mem_nm_gp18 : mem_generic { + let EncoderMethod = "getMemEncodingNMGP"; + let DecoderMethod = "DecodeMemNM<18,false,Mips::GPRNMGPRegClassID>"; + let OperandType = "OPERAND_NM_GPREL18"; + let OperandNamespace = "NanoMips"; + let ParserMatchClass = NMMemUimmAsmOperand<18,0,"Mips::GPRNMGPRegClassID">; +} + +def mem_nm_gp17s1 : mem_generic { + let EncoderMethod = "getMemEncodingNMGP"; + let DecoderMethod = "DecodeMemNM<18,false,Mips::GPRNMGPRegClassID>"; + let OperandType = "OPERAND_NM_GPREL18"; + let OperandNamespace = "NanoMips"; + let ParserMatchClass = NMMemUimmAsmOperand<17,1,"Mips::GPRNMGPRegClassID">; +} + +def MemNMU12Parser : NMMemUimmAsmOperand<12,0, + "Mips::GPRNM32RegClassID",[MemNMGP19S2Parser]>; + +def mem_nmu12 : mem_generic { + let EncoderMethod = "getMemEncodingNMImm12"; + let DecoderMethod = "DecodeMemNM<12,false,Mips::GPRNM32RegClassID>"; + let ParserMatchClass = MemNMU12Parser; +} + +def MemNMS9Parser : NMMemSimmAsmOperand<9,0, + "Mips::GPRNM32RegClassID",[MemNMGP19S2Parser]>; + +def mem_nms9 : mem_generic { + let EncoderMethod = "getMemEncodingNMImm9"; + let DecoderMethod = "DecodeMemNM<9,true,Mips::GPRNM32RegClassID>"; + let ParserMatchClass = MemNMS9Parser; +} + +def MemNMGP7S2Parser : NMMemUimmAsmOperand<7,2,"Mips::GPRNMGPRegClassID",[MemNMS9Parser]>; + +def mem_nm_gp7s2 : mem_generic { + let EncoderMethod = "getMemEncodingNMGP"; + let DecoderMethod = "DecodeMemNM<9,false,Mips::GPRNMGPRegClassID>"; + let OperandType = "OPERAND_NM_GPREL9"; + let OperandNamespace = "NanoMips"; + let ParserMatchClass = MemNMGP7S2Parser; +} + +def MemNMSP5S2Parser : + NMMemUimmAsmOperand<5,2,"Mips::GPRNMSPRegClassID",[MemNMGP7S2Parser]>; + +def mem_nm_sp5s2 : mem_generic { + let EncoderMethod = "getMemEncodingNMSP"; + let DecoderMethod = "DecodeMemNM<7,false,Mips::GPRNMSPRegClassID>"; + let OperandType = "OPERAND_NM_SPREL7"; + let OperandNamespace = "NanoMips"; + let ParserMatchClass = MemNMSP5S2Parser; +} + +def MemNM6S2Parser : + NMMemUimmAsmOperand<4,2,"Mips::GPRNM3RegClassID",[MemNMSP5S2Parser]>; +def MemNM3S1Parser : + NMMemUimmAsmOperand<3,1,"Mips::GPRNM3RegClassID",[MemNMSP5S2Parser]>; +def MemNM2Parser : + NMMemUimmAsmOperand<2,0,"Mips::GPRNM3RegClassID",[MemNMSP5S2Parser]>; +def MemNM4S2Parser : + NMMemUimmAsmOperand<2,2,"Mips::GPRNM4RegClassID",[MemNMSP5S2Parser]>; + +def mem_nm6s2 : mem_generic { + let EncoderMethod = "getMemEncodingNMImm6S2"; + let DecoderMethod = "DecodeMemNM<6,false,Mips::GPRNM3RegClassID>"; + let ParserMatchClass = MemNM6S2Parser; +} + +def mem_nm3s1 : mem_generic { + let EncoderMethod = "getMemEncodingNMImm3S1"; + let DecoderMethod = "DecodeMemNM<3,false,Mips::GPRNM3RegClassID>"; + let ParserMatchClass = MemNM3S1Parser; +} + +def mem_nm2 : mem_generic { + let EncoderMethod = "getMemEncodingNMImm2"; + let DecoderMethod = "DecodeMemNM<2,false,Mips::GPRNM3RegClassID>"; + let ParserMatchClass = MemNM2Parser; +} + +def mem_nm4s2 : mem_generic { + let EncoderMethod = "getMemEncodingNMImm4S2"; + let ParserMatchClass = MemNM4S2Parser; + let DecoderMethod = "DecodeMemNM4x4"; +} + +def mem_nmpcrel : mem_generic { + let EncoderMethod = "getMemEncodingNMImm9"; + let ParserMatchClass = MipsMemSimmAsmOperand<9>; +} + +def NMMemRXAsmOperand : AsmOperandClass { + let Name = "NMMemRX"; + let SuperClasses = [MipsMemAsmOperand]; + let RenderMethod = "addNMMemOperands"; + let ParserMethod = "parseMemNMRX"; + let PredicateMethod = "isMemNMRX"; +} + +def mem_nmrx : mem_generic { + let EncoderMethod = "getMemEncodingNMRX"; + let DecoderMethod = "DecodeMemNMRX"; + let ParserMatchClass = NMMemRXAsmOperand; +} + +foreach I = {4, 7, 10, 11, 14, 25} in + def brtarget # I # _nm : Operand { + let EncoderMethod = "getBranchTargetOpValueNM<" # I # ">"; + let OperandType = "OPERAND_PCREL"; + let DecoderMethod = "DecodeBranchTargetNM<" # I # ">"; + let ParserMatchClass = NMJumpTargetAsmOperand; + } + +def brtarget4s1_nm : Operand { + let EncoderMethod = "getBranchTargetOpValueNM<4>"; + let OperandType = "OPERAND_PCREL"; + let DecoderMethod = "DecodeBranchTargetNM<4>"; + let ParserMatchClass = NMJumpTargetAsmOperand; +} + +foreach I = {10, 21, 25} in + def calltarget # I # _nm : Operand { + let EncoderMethod = "getBranchTargetOpValueNM<" # I # ">"; + let ParserMatchClass = NMJumpTargetAsmOperand; + } + +def jmptarget_nm : Operand { + let EncoderMethod = "getBranchTargetOpValueNM<25>"; + let ParserMatchClass = NMJumpTargetAsmOperand; +} + +def simm32s12_nm : Operand { +// let EncoderMethod = "getSImm20Lsl12Encoding"; + let PrintMethod = "printUImm<20>"; + let ParserMatchClass = SImmAsmOperandClass<20, []>; +} + +def sym32_pc_nm : Operand { + let EncoderMethod = "getSymPCRel"; + let ParserMatchClass = SymAsmOperandPCRel; +} + +def sym32_gp_nm : Operand { + let EncoderMethod = "getSymGPRel"; + let ParserMatchClass = SymAsmOperandGPRel; +} + +def sym32_abs_nm : Operand { + let EncoderMethod = "getSymAbs"; + let ParserMatchClass = SymAsmOperandAbs; +} + +def simm32_nm : Operand { + let ParserMatchClass = NMSImmAsmOperandClass<32>; + let DecoderMethod = "DecodeSimm32"; + let EncoderMethod = "getSImm32Encoding"; } def simm32power2 : IntImmLeaf; @@ -76,77 +479,519 @@ def addruimm12 : ComplexPattern; def addrindexed : ComplexPattern; def addrindexedlsl1 : ComplexPattern; def addrindexedlsl2 : ComplexPattern; +def addruimm6s2 : ComplexPattern; +def addruimm3s1 : ComplexPattern; +def addruimm2 : ComplexPattern; +def addruimm4s2 : ComplexPattern; +def addrgp19s2 : ComplexPattern; +def addrgp18 : ComplexPattern; +def addrgp17s1 : ComplexPattern; +def addrgp7s2 : ComplexPattern; +def addrsp5s2 : ComplexPattern; +def addrpcrel : ComplexPattern; +def addrgprel : ComplexPattern; +def offsetgp19s2 : ComplexPattern; +def offsetgp18 : ComplexPattern; def Log2XForm : SDNodeXFormgetTargetConstant(Log2_32(N->getZExtValue()), SDLoc(N), MVT::i32); }]>; +class RegConstraint { + string Constraints = C; +} +class NoEncode { + string DisableEncoding = E; +} + //===----------------------------------------------------------------------===// // // Instruction Descriptions // //===----------------------------------------------------------------------===// + +class _Pool_P32 psel> : InstSize32 { + let Inst{31-26} = {psel{4...2}, 0b0, psel{1...0}}; +} + +class _Pool_P_ADDIU psel> : _Pool_P32<0b00000> { + let Inst{25...21} = psel; +} + +class _Pool_P32A psel> : _Pool_P32<0b00100> { + let Inst{2...0} = psel; +} + +class _Pool_P_GP_W psel> : _Pool_P32<0b01000> { + let Inst{1...0} = psel; +} + +class _Pool_P_U12 psel> : _Pool_P32<0b10000> { + let Inst{15-12} = psel; +} + +class _Pool_P_LUI psel> : _Pool_P32<0b11100> { + let Inst{1} = psel; +} + +class _Pool_P48I psel> : InstSize48 { + let Inst{47...42} = 0b011000; + let Inst{36...32} = psel; +} + +class _Pool_P_GP_BH psel> : _Pool_P32<0b01001> { + let Inst{20...18} = psel; +} + +class _Pool_P_LS_U12 psel> : _Pool_P32<0b10001> { + let Inst{15...12} = psel; +} + +class _Pool_P_LS_S9 psel> : _Pool_P32<0b10101> { + let Inst{10...8} = psel; +} + +// TODO MOVE.BALC + +class _Pool_P_BAL psel> : _Pool_P32<0b00110> { + let Inst{25} = psel; +} + +class _Pool_P_J psel> : _Pool_P32<0b01010> { + let Inst{15...12} = psel; +} + +class _Pool_P_BR1 psel> : _Pool_P32<0b10010> { + let Inst{15...14} = psel; +} + +class _Pool_P_BR2 psel> : _Pool_P32<0b10110> { + let Inst{15...14} = psel; +} + +class _Pool_P_BR12 psel> : _Pool_P32<0b10010> { + let Inst{29} = psel{2}; + let Inst{15...14} = psel{1...0}; +} + +class _Pool_P_BRI psel> : _Pool_P32<0b11010> { + let Inst{20...18} = psel; +} + +class _Pool_P_BALRSC psel> : _Pool_P_J<0b1000> { + let Inst{25...21} = psel; +} + +class _Pool_P_SHIFT psel> : _Pool_P_U12<0b1100> { + let Inst{8...5} = psel; + bits<5> shift; + bits<5> rs; + bits<5> rt; + let Inst{25...21} = rt; + let Inst{20...16} = rs; + let Inst{4...0} = shift; +} + +class _Pool_P_SLL rt, bits<5> shift> : _Pool_P_SHIFT<0b0000> { + let Inst{25...21} = rt; + let Inst{4...0} = shift; +} + +class _Pool_P_ROTX psel> : _Pool_P_U12<0b1101> { + let Inst{11} = psel{1}; + let Inst{5} = psel{0}; +} + +class _Pool_P_EXT_INS psel> : _Pool_P_U12 { + let Inst{11} = 0; + let Inst{5} = 0; +} + +class _Pool_P_PREF_U12 psel> : _Pool_P_LS_U12<0b0011> { + let Inst{25...21} = psel; +} + +class _Pool_P_LS_S0 psel>: _Pool_P_LS_S9<0b000> { + let Inst{14...11} = psel; +} + +class _Pool_P_PREF_S9 psel>: _Pool_P_LS_S0<0b0011> { + let Inst{25...21} = psel; +} + +class _Pool_P_LS_S1 psel>: _Pool_P_LS_S9<0b001> { + let Inst{14...11} = psel; +} + +class _Pool_P_LL psel>: _Pool_P_LS_S1<0b1010> { + let Inst{1...0} = psel; +} + +class _Pool_P_SC psel>: _Pool_P_LS_S1<0b1011> { + let Inst{1...0} = psel; +} + +class _Pool_P_LS_E0 psel>: _Pool_P_LS_S9<0b010> { + let Inst{14...11} = psel; +} + +class _Pool_P_LLE psel>: _Pool_P_LS_E0<0b1010> { + let Inst{1...0} = psel; +} + +class _Pool_P_PREFE psel>: _Pool_P_LS_E0<0b0011> { + let Inst{25...21} = psel; +} + +class _Pool_P_SCE psel>: _Pool_P_LS_E0<0b1011> { + let Inst{1...0} = psel; +} + +class _Pool_P_LS_WM psel>: _Pool_P_LS_S9<0b100> { + let Inst{11} = psel; +} + +class _Pool_P_LS_UAWM psel>: _Pool_P_LS_S9<0b101> { + let Inst{11} = psel; +} + +class _Pool_P16 psel> : InstSize16 { + let Inst{15-10} = {psel{4...2}, 0b1, psel{1...0}}; +} + +class _Pool_P16_MV : _Pool_P16<0b00000>; + +class _Pool_P16_SR psel> : _Pool_P16<0b00011> { + let Inst{8} = psel; +} + +class _Pool_P16_SHIFT isel> : _Pool_P16<0b00100> { + let Inst{3} = isel; +} + +class _Pool_P16_4X4 psel> : _Pool_P16<0b00111> { + let Inst{8} = psel{1}; + let Inst{3} = psel{0}; +} + +class _Pool_P16C psel> : _Pool_P16<0b01000> { + let Inst{0} = psel; +} + +class _Pool_POOL16C_00 psel> : _Pool_P16<0b01000> { + let Inst{3...2} = psel; + let Inst{1...0} = 0b00; +} + +class _Pool_P16_LB psel> : _Pool_P16<0b01011> { + let Inst{3...2} = psel; +} + +class _Pool_P16_A1 psel> : _Pool_P16<0b01100> { + let Inst{6} = psel; +} + +class _Pool_P16_LH psel> : _Pool_P16<0b01111> { + let Inst{3} = psel{1}; + let Inst{0} = psel{0}; +} + +class _Pool_P16_A2 psel> : _Pool_P16<0b10000> { + let Inst{3} = psel; +} + +class _Pool_P16_ADDU psel> : _Pool_P16<0b10100> { + let Inst{0} = psel; +} + +class _Pool_P16_BR psel> : _Pool_P16<0b11010> { + let Inst{3...0} = psel; +} + +class _Pool_P16_RI psel> : _Pool_P16_MV { + let Inst{4...3} = psel; + let Inst{9...5} = 0b00000; +} + +class _Pool_P16_SYSCALL psel> : _Pool_P16_RI<0b01> { + let Inst{2} = psel; +} + +class _Pool_P_RI psel> : _Pool_P_ADDIU<0b00000> { + let Inst{20...19} = psel; +} + +class _Pool_POOL32A0 psel> : _Pool_P32A<0b000> { + let Inst{5} = psel; +} + +class _Pool_POOL32A0_0 psel> : _Pool_POOL32A0<0b0> { + let Inst{9...6} = psel{5...2}; + let Inst{4...3} = psel{1...0}; +} + +class _Pool_P_TRAP isel> : _Pool_POOL32A0_0<0b000000> +{ + let Inst{10} = isel; +} + +class _Pool_P_CMOVE isel> : _Pool_POOL32A0_0<0b100010> +{ + let Inst{10} = isel; +} + +class _Pool_POOL32A7 psel> : _Pool_P32A<0b111> { + let Inst{5...3} = psel; +} + +class _Pool_POOL32Axf_4 isel> : _Pool_POOL32A7<0b111> { + let Inst{15...9} = isel; + let Inst{8...6} = 0b100; +} + +class _Pool_POOL32Axf_5 psel> : _Pool_POOL32A7<0b111> { + let Inst{8...6} = 0b101; + let Inst{15...14} = psel; +} + +class _Pool_POOL32Axf_5_group1 isel> : _Pool_POOL32Axf_5<0b01> { + let Inst{13...9} = isel; +} + +class _Pool_POOL32Axf_5_group3 isel> : _Pool_POOL32Axf_5<0b11> { + let Inst{13...9} = isel; +} + +class _Pool_PP_LSX isel> : _Pool_POOL32A7<0b000> { + let Inst{6} = scaled; + let Inst{10...7} = isel; +} + +// 16-bit arithmetic and logical instructions with 4x4 register operands, P16_4X4 +class Arith4x4_Desc: + InstNM<(outs RO:$dst), (ins RO:$rt, RO:$rs), + !strconcat(opstr, "\t$dst, $rt, $rs"), + [(set RO:$dst, (OpNode RO:$rt, RO:$rs))]> { + Format Form = FrmR; +} + +class Arith4x4_Enc isel>: _Pool_P16_4X4 { + bits<5> dst; + bits<5> rs; + bits<5> rt; + + let Inst{9} = dst{4}; + let Inst{7...5} = dst{2...0}; + let Inst{4} = rs{4}; + let Inst{2...0} = rs{2...0}; +} + // 32-bit arithmetic and logical instructions with 3 register operands. -class ArithLoginR32: + +class ArithLogicR32_Desc: InstNM<(outs RO:$rd), (ins RO:$rs, RO:$rt), - !strconcat(opstr, "\t$rd, $rs, $rt"), - [(set RO:$rd, (OpNode RO:$rs, RO:$rt))]>, InstSize32; + !strconcat(opstr, "\t$rd, $rs, $rt"), + [(set RO:$rd, (OpNode RO:$rs, RO:$rt))]>; + +class ArithLogicR32_Enc psel>: _Pool_POOL32A0_0 { + bits<5> rd; + bits<5> rs; + bits<5> rt; + let Inst{25...21} = rt; + let Inst{20...16} = rs; + let Inst{15...11} = rd; +} -// 16-bit arithmetic and logical instructions with 2 register operands. -class ArithLogicR16: - InstNM<(outs RO:$rt), (ins RO:$rs), !strconcat(opstr, "\t$rt, $rs"), - [(set RO:$rt, (OpNode RO:$rs))]>, InstSize16; +// 16-bit arithmetic and logical instructions with 2 register operands, POOL16C_0 +class ArithLogicR16_Desc: + InstNM<(outs RO:$dst), (ins RO:$rs, RO:$rt), + !strconcat(opstr, "\t$dst, $rs, $rt"), + [(set RO:$dst, (OpNode RO:$rs, RO:$rt))]>; + +class ArithLogicR16U_Desc: + InstNM<(outs RO:$rt), (ins RO:$rs), + !strconcat(opstr, "\t$rt, $rs"), + [(set RO:$rt, (OpNode RO:$rs))]> { +} -// Arithmetic and logical instructions with 2 register operands and immediate. -class ArithLogicINM : - InstNM<(outs RO:$rt), (ins RO:$rs, Od:$imm), - !strconcat(opstr, "\t$rt, $rs, $imm"), - [(set RO:$rt, (OpNode RO:$rs, imm_type:$imm))]> { +class ArithLogicR16_Enc isel>: _Pool_POOL16C_00 { + bits<3> rs; + bits<3> rt; + let Inst{9...7} = rt; + let Inst{6...4} = rs; +} + +class CLZO_Enc isel>: _Pool_POOL32Axf_4 { + bits<5> rs; + bits<5> rt; + let Inst{25...21} = rt; + let Inst{20...16} = rs; +} + +class LogicU32_Enc isel>: _Pool_POOL32A0_0 { + bits<5> rs; + bits<5> rt; + let Inst{25...21} = rt; + let Inst{20...16} = rs; +} + +// 16-bit arithmetic instructions with 2 register operands, POOL16C_0 +class AddSubR16_Desc: + InstNM<(outs RO:$rd), (ins RO:$rs, RO:$rt), !strconcat(opstr, "\t$rd, $rs, $rt"), + [(set RO:$rd, (OpNode RO:$rs, RO:$rt))]>; + +class AddSubR16_Enc isel>: _Pool_P16_ADDU { + bits<3> rd; + bits<3> rs; + bits<3> rt; + let Inst{15-10} = 0b101100; + let Inst{9...7} = rt; + let Inst{6...4} = rs; + let Inst{3...1} = rd; +} + +class ArithLogicINM2Reg : + InstNM<(outs DRO:$rt), (ins SRO:$rs, Od:$imm), + !strconcat(opstr, "\t$rt, $rs, $imm"), + [(set DRO:$rt, (OpNode SRO:$rs, imm_type:$imm))]> { let isReMaterializable = 1; } +class ArithLogicINM1Reg : + InstNM<(outs), (ins DRO:$rt, Od:$imm), + !strconcat(opstr, "\t$rt, $imm"), + [(set DRO:$rt, (OpNode DRO:$rt, imm_type:$imm))]> { + let isReMaterializable = 1; +} + + +// Arithmetic and logical instructions with 2 register operands and immediate. +class ArithLogicINM : + ArithLogicINM2Reg; + class ArithLogicI32 : - ArithLogicINM, InstSize32; + SDPatternOperator imm_type = null_frag, + SDPatternOperator OpNode = null_frag> : + ArithLogicINM; + +class AddImmediate : + ArithLogicINM2Reg; + +class LoadUpperI32 : + ArithLogicINM, _Pool_P32<0b00000> { + bits<5> rt; + bits<5> rs; + bits<16> imm; + let Inst{25...21} = rt; + let Inst{20...16} = rs; + let Inst{15...0} = imm; +} class DivMod : - ArithLoginR32 { + ArithLogicR32_Desc { // Need to insert TEQ, because DIV(U)/MOD(U) don't trap on division by zero. bit usesCustomInserter = 1; } -class ExtBaseNM : +class CondLogicI32_Desc : + ArithLogicINM; + +class CondLogicI32_Enc psel> : _Pool_P_U12 { + bits<5> rt; + bits<5> rs; + bits<12> imm; + let Inst{25...21} = rt; + let Inst{20...16} = rs; + let Inst{11...0} = imm; +} + +class ShiftI_Desc : + ArithLogicINM; + +class ShiftI32_Enc psel> : _Pool_P_SHIFT { + bits<5> rt; + bits<5> rs; + bits<5> imm; + let Inst{25...21} = rt; + let Inst{20...16} = rs; + let Inst{4...0} = imm; +} + +class ShiftI16_Enc isel> : _Pool_P16_SHIFT { + bits<3> rt; + bits<3> rs; + bits<3> imm; + let Inst{9...7} = rt; + let Inst{6...4} = rs; + let Inst{2...0} = imm; +} + +class ExtBaseNM : InstNM<(outs RO:$rt), (ins RO:$rs, uimm5:$pos, uimm5_plus1:$size), "ext\t$rt, $rs, $pos, $size", [(set RO:$rt, (MipsExt RO:$rs, immZExt5:$pos, immZExt5Plus1:$size))], - II_EXT>, InstSize32 { - let isCodeGenOnly = 1; -} + II_EXT>; -class InsBaseNM : +class InsBaseNM : InstNM<(outs RO:$rt), (ins RO:$rs, uimm5:$pos, uimm5_inssize_plus1:$size, RO:$src), "ins \t$rt, $rs, $pos, $size", [(set RO:$rt, (MipsIns RO:$rs, immZExt5:$pos, immZExt5Plus1:$size, RO:$src))], - II_INS>, InstSize32 { + II_INS>{ let Constraints = "$src = $rt"; - let isCodeGenOnly = 1; } -class Trap : - InstNM<(outs), (ins GPR32NMOpnd:$rs, GPR32NMOpnd:$rt, uimm5_nm:$imm), - !strconcat(opstr, "\t$rt, $rs, $imm"), []>, InstSize32 { +class ExtInsBase_Enc isel> : _Pool_P_EXT_INS { + bits<5> rt; + bits<5> rs; + bits<5> pos; + bits<5> size; + + let Inst{25...21} = rt; + let Inst{20...16} = rs; + let Inst{4...0} = pos; + let Inst{10...6} = size; +} + +class Trap_Desc : + InstNM<(outs), (ins RO:$rs, RO:$rt, ImmOp:$imm), + !strconcat(opstr, "\t$rs, $rt, $imm"), []> { let hasSideEffects = 1; let isCTI = 1; } +class Trap_Enc isel> : _Pool_P_TRAP { + bits<5> rt; + bits<5> rs; + bits<5> imm; + let Inst{25...21} = rt; + let Inst{20...16} = rs; + let Inst{15...11} = imm; +} + // Instructions with 1 register (dest) and 1 immediate operand. class RegImmNM; -class LoadMemoryNM : + InstNM<(outs RO:$rt), (ins Od:$imm), !strconcat(opstr, "\t$rt, $imm"), + [(set RO:$rt, (OpNode imm_type:$imm))]>; + +class RegImm48_Enc isel> : _Pool_P48I { + bits<5> rt; + bits<32> imm; + let Inst{41...37} = rt; + let Inst{31...16} = imm{15...0}; + let Inst{15...0} = imm{31...16}; +} + +class LSMem_S9_Enc psel, bits<4> isel> : _Pool_P_LS_S9 +{ + bits<5> rt; + bits<14> addr; + bits<5> base = addr{13...9}; + bits<9> offset = addr{8...0}; + let Inst{25...21} = rt; + let Inst{20...16} = base; + let Inst{14...11} = isel; + let Inst{15} = offset{8}; + let Inst{7...0} = offset{7...0}; +} + +class LSMem_S9_Mult_Enc isel> : _Pool_P_LS_S9 +{ + bits<5> rt; + bits<14> addr; + bits<5> base = addr{13...9}; + bits<9> offset = addr{8...0}; + bits<3> count; + let Inst{25...21} = rt; + let Inst{20...16} = base; + let Inst{11} = isel{3}; + let Inst{15} = offset{8}; + let Inst{7...0} = offset{7...0}; + let Inst{14...12} = count; +} + +class LSMem_U12_Enc isel> : _Pool_P_LS_U12 +{ + bits<5> rt; + bits<17> addr; + bits<5> base = addr{16...12}; + bits<12> offset = addr{11...0}; + let Inst{25...21} = rt; + let Inst{20...16} = base; + let Inst{11...0} = offset; +} + +class LSMemX_Enc isel> : _Pool_PP_LSX +{ + bits<5> rd; + bits<10> addr; + let Inst{25...21} = addr{9...5}; + let Inst{20...16} = addr{4...0}; + let Inst{15...11} = rd; +} + +class LSMem16_Enc isel> : _Pool_P16 +{ + bits<3> rt; + bits<9> addr; + bits<3> base = addr{8...6}; + bits<4> offset = addr{5...2}; + let Inst{9...7} = rt; + let Inst{6...4} = base; + let Inst{3...0} = offset; +} + +class LSGPRegMem16_Enc isel> : _Pool_P16 +{ + bits<3> rt; + bits<9> addr; + bits<7> offset = addr{8...2}; + let Inst{9...7} = rt; + let Inst{6...0} = offset; +} + +class LSSPRegMem16_Enc isel> : _Pool_P16 +{ + bits<5> rt; + bits<7> addr; + bits<5> offset = addr{6...2}; + let Inst{9...5} = rt; + let Inst{4...0} = offset; +} + +class LHMem16_Enc isel> : _Pool_P16_LH +{ + bits<3> rt; + bits<6> addr; + bits<3> base = addr{5...3}; + bits<3> offset = addr{2...0}; + let Inst{9...7} = rt; + let Inst{6...4} = base; + let Inst{2...1} = offset{2...1}; +} + +class LBMem16_Enc isel> : _Pool_P16_LB +{ + bits<3> rt; + bits<5> addr; + bits<3> base = addr{4...2}; + bits<2> offset = addr{1...0}; + let Inst{9...7} = rt; + let Inst{6...4} = base; + let Inst{1...0} = offset{1...0}; +} + +class LSMemGP19S2_Enc isel> : _Pool_P_GP_W +{ + bits<5> rt; + bits<21> addr; + let Inst{25...21} = rt; + let Inst{20...2} = addr{20...2}; +} + +class LSMemGP18_Enc isel> : _Pool_P_GP_BH +{ + bits<5> rt; + bits<18> addr; + let Inst{25...21} = rt; + let Inst{17...0} = addr; +} + +class LSMemGP17S1_Enc isel> : _Pool_P_GP_BH +{ + bits<5> rt; + bits<18> addr; + let Inst{25...21} = rt; + let Inst{17...1} = addr{17...1}; + let Inst{0} = isel{0}; +} + +class LSMem4x4_Enc isel> : _Pool_P16 +{ + bits<5> rt; + bits<9> addr; + bits<5> rs = addr{8...4}; + bits<4> offset = addr{3...0}; + let Inst{9} = rt{4}; + let Inst{7...5} = rt{2...0}; + let Inst{4} = rs{4}; + let Inst{2...0} = rs{2...0}; + let Inst{3} = offset{3}; + let Inst{8} = offset{2}; +} + +class LI16_Enc isel> : _Pool_P16 +{ + bits<3> rt; + bits<7> eu; + let Inst{9...7} = rt; + let Inst{6...0} = eu; +} + +class IndirectBranch32_Enc : _Pool_P_J<0b1000> +{ + bits<5> rs; + let Inst{25...21} = 0b00000; + let Inst{20...16} = rs; +} + +class IndirectBranch16_Enc isel> : _Pool_P16_BR<0b0000> +{ + bits<5> rs; + let Inst{9...5} = rs; + let Inst{4} = isel; +} + +class IndirectCallNM_Enc isel> : _Pool_P_J +{ + bits<5> rt; + bits<5> rs; + let Inst{25...21} = rt; + let Inst{20...16} = rs; +} + +class LoadMemoryNM : + InstNM<(outs GPRNM32Opnd:$rt), (ins MO:$addr), !strconcat(opstr, "\t$rt, $addr"), + [(set GPRNM32Opnd:$rt, (OpNode Addr:$addr))], + itin> { + let canFoldAsLoad = 1; + let mayLoad = 1; + string BaseOpcode = opstr; +} + +class LoadMemoryX : + InstNM<(outs GPRNM32Opnd:$rd), (ins MO:$addr), + !strconcat(opstr, "\t$rd, $addr"), + [(set GPRNM32Opnd:$rd, (OpNode Addr:$addr))], + itin> { + let canFoldAsLoad = 1; + let mayLoad = 1; + string BaseOpcode = opstr; +} + +class StoreMemoryX : + InstNM<(outs), (ins GPRNM32Opnd:$rd, MO:$addr), + !strconcat(opstr, "\t$rd, ${addr}"), + [(OpNode GPRNM32Opnd:$rd, Addr:$addr)], + itin> { + let mayStore = 1; + string BaseOpcode = opstr; +} + +class LoadMemoryNM16 : InstNM<(outs RO:$rt), (ins MO:$addr), !strconcat(opstr, "\t$rt, $addr"), [(set RO:$rt, (OpNode Addr:$addr))], itin> { - let DecoderMethod = "DecodeMem"; let canFoldAsLoad = 1; let mayLoad = 1; string BaseOpcode = opstr; } -class StoreMemoryNM : + InstNM<(outs), (ins GPRNM32Opnd:$rt, MO:$addr), !strconcat(opstr, "\t$rt, $addr"), + [(OpNode GPRNM32Opnd:$rt, Addr:$addr)], + itin> { + let mayStore = 1; + string BaseOpcode = opstr; +} + +class StoreMemoryNM16 : InstNM<(outs), (ins RO:$rt, MO:$addr), !strconcat(opstr, "\t$rt, $addr"), [(OpNode RO:$rt, Addr:$addr)], itin> { - let DecoderMethod = "DecodeMem"; let mayStore = 1; string BaseOpcode = opstr; } -class LoadPCBase : - InstNM<(outs RO:$rt), (ins MO:$addr), "lwpc\t$rt, $addr", [], II_LW>, - InstSize48 { +class StoreMemoryNM4x4 : + InstNM<(outs), (ins GPRNM4ZOpnd:$rt, MO:$addr), !strconcat(opstr, "\t$rt, $addr"), + [(OpNode GPRNM4ZOpnd:$rt, Addr:$addr)], + itin> { + let mayStore = 1; + string BaseOpcode = opstr; +} + +class Op48_Addr_Enc isel> : _Pool_P48I { + bits<5> rt; + bits<32> addr; + let Inst{41...37} = rt; + let Inst{31...16} = addr{15...0}; + let Inst{15...0} = addr{31...16}; +} + +class Op48_Imm_Enc isel> : _Pool_P48I { + bits<5> rt; + bits<32> imm; + let Inst{41...37} = rt; + let Inst{31...16} = imm{15...0}; + let Inst{15...0} = imm{31...16}; +} + +class LoadPCBase : + InstNM<(outs RO:$rt), + (ins MO:$addr), "lwpc\t$rt, $addr", + [(set RO:$rt, (load Addr:$addr))], + II_LW> { let mayLoad = 1; } -class StorePCBase : - InstNM<(outs), (ins RO:$rt, MO:$addr), "swpc\t$rt, $addr", [], II_SW>, - InstSize48 { +class StorePCBase : + InstNM<(outs), + (ins RO:$rt, MO:$addr), "swpc\t$rt, $addr", + [(store RO:$rt, Addr:$addr)], + II_SW> { let mayStore = 1; } class CallNM : + DAGOperand calltarget> : InstNM<(outs), (ins calltarget:$addr), !strconcat(opstr, "\t$addr"), - [(OpNode imm_type:$addr)]> { + [(OpNode tglobaladdr:$addr)]> { let DecoderMethod = "DecodeJumpTarget"; let isCall = 1; let isCTI = 1; let Defs = [RA_NM]; } -class MoveBalcBase : InstNM<(outs GPR32NMOpnd:$rd), - (ins GPR32NMOpnd:$rt, calltarget:$addr), +class IndirectCallNM : + InstNM<(outs RA:$rt), (ins RO:$rs), !strconcat(opstr, "\t$rt, $rs"), + []> { + let isCall = 1; + let isCTI = 1; +} + +class MoveBalcBase : InstNM<(outs GPRNM32Opnd:$rd), + (ins GPRNM32Opnd:$rt, calltarget25_nm:$addr), "move.balc\t$rd, $rt, $addr", []>, InstSize32 { let isCall = 1; let isCTI = 1; @@ -214,7 +1326,7 @@ class MoveBalcBase : InstNM<(outs GPR32NMOpnd:$rd), class IndirectBranchNM: InstNM<(outs), (ins RO:$rs), !strconcat(opstr, "\t$rs"), - [(operator RO:$rs)]>, InstSize16 { + [(operator RO:$rs)]> { let isTerminator = 1; let isBarrier = 1; let isCTI = 1; @@ -231,33 +1343,97 @@ class AdjustStack : let hasNoSchedulingInfo = 1; } -def nmreglist : Operand { +class NMRegListAsmOperandClass Supers = []> + : AsmOperandClass { + let Name = "NMRegList32"; + let RenderMethod = "addRegListOperands"; + let PredicateMethod = "isRegList"; + let SuperClasses = Supers; + let ParserMethod = "parseNMRegisterList"; +} + +class NMRegList16AsmOperandClass Supers = []> + : AsmOperandClass { + let Name = "NMRegList16"; + let PredicateMethod = "isNMRegList16"; + let RenderMethod = "addRegListOperands"; + let SuperClasses = Supers; + let ParserMethod = "parseNMRegisterList"; +} + +def nmreglist16 : Operand { + let PrintMethod = "printNanoMipsRegisterList"; + let ParserMatchClass = NMRegList16AsmOperandClass<>; + let DecoderMethod = "DecodeNMRegList16Operand"; + let EncoderMethod = "getNMRegList16Encoding"; +} + +def nmreglist : Operand { let PrintMethod = "printNanoMipsRegisterList"; + let ParserMatchClass = NMRegListAsmOperandClass<>; + let DecoderMethod = "DecodeNMRegListOperand"; + let EncoderMethod = "getNMRegListEncoding"; } -class SaveRestore : +class SaveRestore : InstNM<(outs), - (ins uimm12_nm:$adj, nmreglist:$regs, variable_ops), + (ins Od:$adj, RL:$regs, variable_ops), !strconcat(opstr, "\t$adj$regs"), []> { let Defs = [SP_NM]; let Uses = [SP_NM]; let hasSideEffects = 1; - let isCodeGenOnly = 1; } -def SAVE_NM : SaveRestore<"save">, InstSize32; -def RESTORE_NM : SaveRestore<"restore">, InstSize32; -def RESTOREJRC_NM : SaveRestore<"restore.jrc">, InstSize32; + +class _Pool_PP_SR isel> : _Pool_P_U12<0b0011> { + let Inst{20} = 0; + let Inst{1...0} = isel; +} + +class SaveRestore32_Enc isel> : _Pool_PP_SR { + bits<12> adj; + bits<10> regs; + let Inst{11...3} = adj{11...3}; + let Inst{25...21} = regs{9...5}; + let Inst{19...16} = regs{4...1}; + let Inst{2} = regs{0}; +} + + +class SaveRestore16_Enc isel> : _Pool_P16_SR { + bits<8> adj; + bits<5> regs; + let Inst{7...4} = adj{7...4}; + let Inst{9} = regs{4}; + let Inst{3...0} = regs{3...0}; +} + +def SAVE16_NM : SaveRestore<"save", uimm8s4_nm, nmreglist16>, SaveRestore16_Enc<0b0>; +def RESTOREJRC16_NM : SaveRestore<"restore.jrc", uimm8s4_nm, nmreglist16>, SaveRestore16_Enc<0b1>; +def SAVE16_NOREG_NM : MipsInstAlias<"save $adj", + (SAVE16_NM uimm8s4_nm:$adj, 0)>; +def RESTOREJRC16_NOREG_NM : MipsInstAlias<"restore.jrc $adj", + (RESTOREJRC16_NM uimm8s4_nm:$adj, 0)>; + +def SAVE_NM : SaveRestore<"save", uimm12s3_nm, nmreglist>, SaveRestore32_Enc<0b00>; +def RESTORE_NM : SaveRestore<"restore", uimm12s3_nm, nmreglist>, SaveRestore32_Enc<0b10>; +def RESTOREJRC_NM : SaveRestore<"restore.jrc", uimm12s3_nm, nmreglist>, SaveRestore32_Enc<0b11>; + +def SAVE_NOREG_NM : MipsInstAlias<"save $adj", + (SAVE_NM uimm12s3_nm:$adj, 0)>; +def RESTORE_NOREG_NM : MipsInstAlias<"restore $adj", + (RESTORE_NM uimm12s3_nm:$adj, 0)>; +def RESTOREJRC_NOREG_NM : MipsInstAlias<"restore.jrc $adj", + (RESTOREJRC_NM uimm12s3_nm:$adj, 0)>; class LoadMultipleNM : - InstNM<(outs GPR32NMOpnd:$rt), (ins mem:$addr, uimm3plus1_nm:$count), - !strconcat(opstr, "\t$rt, $addr, $count"), []>, InstSize32 { - let DecoderMethod = "DecodeMem"; + InstNM<(outs GPRNM32Opnd:$rt), + (ins mem_nms9:$addr, uimm3plus1_nm:$count), + !strconcat(opstr, "\t$rt, $addr, $count"), []> { let mayLoad = 1; } class StoreMultipleNM : - InstNM<(outs), (ins GPR32NMOpnd:$rt, mem:$addr, uimm3plus1_nm:$count), - !strconcat(opstr, "\t$rt, $addr, $count"), []>, InstSize32 { - let DecoderMethod = "DecodeMem"; + InstNM<(outs), (ins GPRNM32Opnd:$rt, mem_nms9:$addr, uimm3plus1_nm:$count), + !strconcat(opstr, "\t$rt, $addr, $count"), []> { let mayStore = 1; } @@ -271,10 +1447,10 @@ class EffectiveAddressNM : } class UnalignedLoad : - InstNM<(outs RO:$rt), (ins mem_simm9:$addr, RO:$src), + RegisterOperand RO = GPRNM32Opnd> : + InstNM<(outs RO:$rt), (ins mem_nms9:$addr, RO:$src), !strconcat(opstr, "\t$rt, $addr"), - [(set RO:$rt, (OpNode addrsimm9:$addr, RO:$src))]>, InstSize32 { + [(set RO:$rt, (OpNode addrsimm9:$addr, RO:$src))]> { let DecoderMethod = "DecodeMem"; string Constraints = "$src = $rt"; let mayLoad = 1; @@ -282,143 +1458,395 @@ class UnalignedLoad : - InstNM<(outs), (ins RO:$rt, mem_simm9:$addr), + RegisterOperand RO = GPRNM32Opnd> : + InstNM<(outs), (ins RO:$rt, mem_nms9:$addr), !strconcat(opstr, "\t$rt, $addr"), - [(OpNode RO:$rt, addrsimm9:$addr)]>, InstSize32 { + [(OpNode RO:$rt, addrsimm9:$addr)]> { let DecoderMethod = "DecodeMem"; let mayStore = 1; } class ReadHardwareNM : InstNM<(outs CPURegOperand:$rt), (ins RO:$hs, uimm5:$sel), - "rdhwr\t$rt, $hs, $sel", []>, InstSize32 { - let isCodeGenOnly = 1; -} + "rdhwr\t$rt, $hs, $sel", []>; class SignExtInRegNM : - InstNM<(outs RO:$rd), (ins RO:$rt), !strconcat(opstr, "\t$rd, $rt"), - [(set RO:$rd, (sext_inreg RO:$rt, vt))]>, InstSize32; + InstNM<(outs RO:$rt), (ins RO:$rs), !strconcat(opstr, "\t$rt, $rs"), + [(set RO:$rt, (sext_inreg RO:$rs, vt))]>; class LoadScaledAddressNM : InstNM<(outs RO:$rd), (ins RO:$rs, RO:$rt, uimm2:$shift), !strconcat(opstr, "\t$rd, $rs, $rt, $shift"), - [(set RO:$rd, (add (shl RO:$rs, immZExt2:$shift), RO:$rt))]>, - InstSize32; + [(set RO:$rd, (add (shl RO:$rs, immZExt2:$shift), RO:$rt))]>; + +class LoadScaledAddress_Enc: _Pool_POOL32A7<0b001> { + bits<5> rd; + bits<5> rs; + bits<5> rt; + bits<2> shift; + let Inst{25...21} = rt; + let Inst{20...16} = rs; + let Inst{15...11} = rd; + let Inst{10...9} = shift; +} class CountLeading0NM: - InstNM<(outs RO:$rd), (ins RO:$rs), !strconcat(opstr, "\t$rd, $rs"), - [(set RO:$rd, (ctlz RO:$rs))]>, InstSize32; + InstNM<(outs RO:$rt), (ins RO:$rs), !strconcat(opstr, "\t$rt, $rs"), + [(set RO:$rt, (ctlz RO:$rs))]>; class CountLeading1NM: - InstNM<(outs RO:$rd), (ins RO:$rs), !strconcat(opstr, "\t$rd, $rs"), - [(set RO:$rd, (ctlz (not RO:$rs)))]>, InstSize32; + InstNM<(outs RO:$rt), (ins RO:$rs), !strconcat(opstr, "\t$rt, $rs"), + [(set RO:$rt, (ctlz (not RO:$rs)))]>; class ReverseNM: InstNM<(outs RO:$rd), (ins RO:$rs), !strconcat(opstr, "\t$rd, $rs"), [(set RO:$rd, (OpNode RO:$rs))]>, InstSize32; +class LoadAddressPCRelNM : + InstNM<(outs RO:$rt), (ins MO:$addr), + !strconcat(opstr, "\t$rt, $addr"), + [(set RO:$rt, Addr:$addr)]>; + +class LoadAddressGPRelNM : + InstNM<(outs RO:$rt), (ins RO:$rs, MO:$addr), + !strconcat(opstr, "\t$rt, $rs, $addr"), + [(set RO:$rt, (add RO:$rs, Addr:$addr))]>; + +class LoadAddressAbsNM : + InstNM<(outs RO:$rt), (ins MO:$addr), + !strconcat(opstr, "\t$rt, $addr"), + [(set RO:$rt, Addr:$addr)]>; + +class LoadImmediateNM : + InstNM<(outs RO:$rt), (ins Od:$eu), + !strconcat(opstr, "\t$rt, $eu"), + [(set RO:$rt, imm_type:$eu)]>; + //===----------------------------------------------------------------------===// // // Instruction Definitions // //===----------------------------------------------------------------------===// + +// 16-bit register operations +let FastISelShouldIgnore = 1 in { +def NOT16_NM : ArithLogicR16U_Desc<"not", GPRNM16R3Opnd, not>, + ArithLogicR16_Enc<0b00>; +let isCommutable=1 in { +def XOR16_NM : ArithLogicR16_Desc<"xor", GPRNM16R3Opnd, xor>, + ArithLogicR16_Enc<0b01>; +def AND16_NM : ArithLogicR16_Desc<"and", GPRNM16R3Opnd, and>, + ArithLogicR16_Enc<0b10>; +def OR16_NM : ArithLogicR16_Desc<"or ", GPRNM16R3Opnd, or>, + ArithLogicR16_Enc<0b11>; +} +} + +class ANDI16_Enc isel> : _Pool_P16 +{ + bits<3> rt; + bits<3> rs; + bits<4> imm; + let Inst{9...7} = rt; + let Inst{6...4} = rs; + let Inst{3...0} = imm; +} + +def ANDI16_NM : ArithLogicINM<"andi", uimm4mask_nm, GPRNM16R3Opnd, + imm4Mask, and>, ANDI16_Enc<0b11100>; + // _POOL32A0_0 pool of instructions. -def SLLV_NM : ArithLoginR32<"sllv", GPR32NMOpnd, shl>; -def SRLV_NM : ArithLoginR32<"srlv", GPR32NMOpnd, srl>; -def SRAV_NM : ArithLoginR32<"srav", GPR32NMOpnd, sra>; -def ROTRV_NM : ArithLoginR32<"rotrv", GPR32NMOpnd, rotr>; -def ADD_NM : ArithLoginR32<"add", GPR32NMOpnd>; -def ADDu_NM : ArithLoginR32<"addu", GPR32NMOpnd, add>; -def SUB_NM : ArithLoginR32<"sub", GPR32NMOpnd>; -def SUBu_NM : ArithLoginR32<"subu", GPR32NMOpnd, sub>; -def AND_NM : ArithLoginR32<"and", GPR32NMOpnd, and>; -def OR_NM : ArithLoginR32<"or", GPR32NMOpnd, or>; -def NOR_NM : ArithLoginR32<"nor", GPR32NMOpnd>; -def XOR_NM : ArithLoginR32<"xor", GPR32NMOpnd, xor>; -def SLT_NM : ArithLoginR32<"slt", GPR32NMOpnd, setlt>; -def SLTU_NM : ArithLoginR32<"sltu", GPR32NMOpnd, setult>; -def MUL_NM : ArithLoginR32<"mul", GPR32NMOpnd, mul>; -def MUH_NM : ArithLoginR32<"muh", GPR32NMOpnd, mulhs>; -def MULU_NM : ArithLoginR32<"mulu", GPR32NMOpnd>; -def MUHU_NM : ArithLoginR32<"muhu", GPR32NMOpnd, mulhu>; -def DIV_NM : DivMod<"div", sdiv>; -def MOD_NM : DivMod<"mod", srem>; -def DIVU_NM : DivMod<"divu", udiv>; -def MODU_NM : DivMod<"modu", urem>; +def SLLV_NM : ArithLogicR32_Desc<"sllv", GPRNM32Opnd, shl>, + ArithLogicR32_Enc<0b000010>; +def SRLV_NM : ArithLogicR32_Desc<"srlv", GPRNM32Opnd, srl>, + ArithLogicR32_Enc<0b000110>; +def SRAV_NM : ArithLogicR32_Desc<"srav", GPRNM32Opnd, sra>, + ArithLogicR32_Enc<0b001010>; +def ROTRV_NM : ArithLogicR32_Desc<"rotrv", GPRNM32Opnd, rotr>, + ArithLogicR32_Enc<0b001110>; +def ADD_NM : ArithLogicR32_Desc<"add", GPRNM32Opnd>, + ArithLogicR32_Enc<0b010010>; +def ADDu_NM : ArithLogicR32_Desc<"addu", GPRNM32Opnd, add>, + ArithLogicR32_Enc<0b010110>; +def SUB_NM : ArithLogicR32_Desc<"sub", GPRNM32Opnd>, + ArithLogicR32_Enc<0b011010>; +def SUBu_NM : ArithLogicR32_Desc<"subu", GPRNM32Opnd, sub>, + ArithLogicR32_Enc<0b011110>; +def AND_NM : ArithLogicR32_Desc<"and", GPRNM32Opnd, and>, + ArithLogicR32_Enc<0b100110>; +def OR_NM : ArithLogicR32_Desc<"or", GPRNM32Opnd, or>, + ArithLogicR32_Enc<0b101010>; +def NOR_NM : ArithLogicR32_Desc<"nor", GPRNM32Opnd>, + ArithLogicR32_Enc<0b101110>; +def XOR_NM : ArithLogicR32_Desc<"xor", GPRNM32Opnd, xor>, + ArithLogicR32_Enc<0b110010>; +def SLT_NM : ArithLogicR32_Desc<"slt", GPRNM32Opnd, setlt>, + ArithLogicR32_Enc<0b110110>; +def SLTU_NM : ArithLogicR32_Desc<"sltu", GPRNM32Opnd, setult>, + ArithLogicR32_Enc<0b111010>; +def MUL_NM : ArithLogicR32_Desc<"mul", GPRNM32Opnd, mul>, + ArithLogicR32_Enc<0b000011>; +def MUH_NM : ArithLogicR32_Desc<"muh", GPRNM32Opnd, mulhs>, + ArithLogicR32_Enc<0b000111>; +def MULU_NM : ArithLogicR32_Desc<"mulu", GPRNM32Opnd>, + ArithLogicR32_Enc<0b001011>; +def MUHU_NM : ArithLogicR32_Desc<"muhu", GPRNM32Opnd, mulhu>, + ArithLogicR32_Enc<0b001111>; +def DIV_NM : DivMod<"div", sdiv>, + ArithLogicR32_Enc<0b010011>; +def MOD_NM : DivMod<"mod", srem>, + ArithLogicR32_Enc<0b010111>; +def DIVU_NM : DivMod<"divu", udiv>, + ArithLogicR32_Enc<0b011011>; +def MODU_NM : DivMod<"modu", urem>, + ArithLogicR32_Enc<0b011111>; +def SOV_NM : ArithLogicR32_Desc<"sov", GPRNM32Opnd>, + ArithLogicR32_Enc<0b111110>; + +def NOT_NM : MipsInstAlias<"not $rt, $rs", + (NOR_NM GPRNM32Opnd:$rt, GPRNM32Opnd:$rs, ZERO_NM)>; + +// 16-bit register operations +let FastISelShouldIgnore = 1, isCommutable=1 in { +def SUBu16_NM : AddSubR16_Desc<"subu", GPRNM16R3Opnd, sub>, + AddSubR16_Enc<0b1>; +def ADDu16_NM : AddSubR16_Desc<"addu", GPRNM16R3Opnd, add>, + AddSubR16_Enc<0b0>; +def ADDu4x4_NM : Arith4x4_Desc<"addu", GPRNM4Opnd, add>, + Arith4x4_Enc<0b00>, RegConstraint<"$rt = $dst">, + NoEncode<"$rt">; +def MUL4x4_NM : Arith4x4_Desc<"mul", GPRNM4Opnd, mul>, + Arith4x4_Enc<0b01>, RegConstraint<"$rt = $dst">, + NoEncode<"$rt">; +} // P.U12 pool of instructions -def ORI_NM : ArithLogicI32<"ori", uimm12_nm, GPR32NMOpnd, imm32ZExt12, or>; -def XORI_NM : ArithLogicI32<"xori", uimm12_nm, GPR32NMOpnd, imm32ZExt12, xor>; -def ANDI_NM : ArithLogicI32<"andi", uimm16, GPR32NMOpnd, imm32ZExt12ANDI, and>; -def SLTI_NM : ArithLogicI32<"slti", uimm12_nm, GPR32NMOpnd, imm32ZExt12, setlt>; -def SLTIU_NM : ArithLogicI32<"sltiu", uimm12_nm, GPR32NMOpnd, imm32ZExt12, setult>; -def SEQI_NM : ArithLogicI32<"seqi", uimm12_nm, GPR32NMOpnd, imm32ZExt12, seteq>; +def ORI_NM : CondLogicI32_Desc<"ori", uimm12_nm, GPRNM32Opnd, imm32ZExt12, or>, + CondLogicI32_Enc<0b0000>; +def XORI_NM : CondLogicI32_Desc<"xori", uimm12_nm, GPRNM32Opnd, imm32ZExt12, xor>, + CondLogicI32_Enc<0b0001>; +def ANDI_NM : CondLogicI32_Desc<"andi", uimm12_nm, GPRNM32Opnd, imm32ZExt12ANDI, and>, + CondLogicI32_Enc<0b0010>; +def SLTI_NM : CondLogicI32_Desc<"slti", uimm12_nm, GPRNM32Opnd, imm32ZExt12, setlt>, + CondLogicI32_Enc<0b0100>; +def SLTIU_NM : CondLogicI32_Desc<"sltiu", uimm12_nm, GPRNM32Opnd, imm32ZExt12, setult>, + CondLogicI32_Enc<0b0101>; +def SEQI_NM : CondLogicI32_Desc<"seqi", uimm12_nm, GPRNM32Opnd, imm32ZExt12, seteq>, + CondLogicI32_Enc<0b0110>; // P.SHIFT pool of instructions -def SLL_NM : ArithLogicI32<"sll", uimm5_nm, GPR32NMOpnd, imm32ZExt5, shl>; -def SRL_NM : ArithLogicI32<"srl", uimm5_nm, GPR32NMOpnd, imm32ZExt5, srl>; -def SRA_NM : ArithLogicI32<"sra", uimm5_nm, GPR32NMOpnd, imm32ZExt5, sra>; -def ROTR_NM : ArithLogicI32<"rotr", uimm5_nm, GPR32NMOpnd, imm32ZExt5, rotr>; +def SLL16_NM : ShiftI_Desc<"sll", uimm3shift_nm, GPRNM16R3Opnd, imm32ZExt3, shl>, + ShiftI16_Enc<0b0>; +def SLL_NM : ShiftI_Desc<"sll", uimm5_nm, GPRNM32Opnd, imm32ZExt5, shl>, + ShiftI32_Enc<0b0000>; +def SRL16_NM : ShiftI_Desc<"srl", uimm3shift_nm, GPRNM16R3Opnd, imm32ZExt3, srl>, + ShiftI16_Enc<0b1>; +def SRL_NM : ShiftI_Desc<"srl", uimm5_nm, GPRNM32Opnd, imm32ZExt5, srl>, + ShiftI32_Enc<0b0010>; +def SRA_NM : ShiftI_Desc<"sra", uimm5_nm, GPRNM32Opnd, imm32ZExt5, sra>, + ShiftI32_Enc<0b0100>; +def ROTR_NM : ShiftI_Desc<"rotr", uimm5_nm, GPRNM32Opnd, imm32ZExt5, rotr>, + ShiftI32_Enc<0b0110>; + +def EXT_NM : ExtBaseNM, ExtInsBase_Enc<0b1111>; +def INS_NM : InsBaseNM, ExtInsBase_Enc<0b1110>; + +class AddImmediateGP : + InstNM<(outs DRO:$rt), (ins SRO:$rs, Od:$imm), + !strconcat(opstr, "\t$rt, $rs, $imm"), + [(set DRO:$rt, (OpNode SRO:$rs, imm_type:$imm))]> { + let isReMaterializable = 1; +} + +def ADDIUGPB_NM : AddImmediateGP<"addiu", uimm18_nm, GPRNM32Opnd, + GPRNM32Opnd, offsetgp18, add>, + _Pool_P_GP_BH<0b011>, NoEncode<"$rs"> { + bits<5> rt; + bits<18> imm; + let Inst{25...21} = rt; + let Inst{17...0} = imm; + let DisableEncoding="$rs"; +} -def EXT_NM : ExtBaseNM; -def INS_NM : InsBaseNM; +def ADDIUGPW_NM : AddImmediateGP<"addiu", uimm21s2_nm, GPRNM32Opnd, + GPRNM32Opnd, offsetgp19s2, add>, + _Pool_P_GP_W<0b00>, NoEncode<"$rs"> { + bits<5> rt; + bits<21> imm; + let Inst{25...21} = rt; + let Inst{20...2} = imm{20...2}; + let DisableEncoding="$rs"; +} + +def ADDIU_NM : AddImmediate<"addiu", uimm16_nm, GPRNM32NZOpnd, GPRNM32Opnd, + imm32ZExt16, add>, _Pool_P32<0b00000> { + bits<5> rt; + bits<5> rs; + bits<16> imm; + let Inst{25...21} = rt; + let Inst{20...16} = rs; + let Inst{15...0} = imm; +} + +def LI_NM : MipsInstAlias<"li $rt, $imm", + (ADDIU_NM GPRNM32NZOpnd:$rt, ZERO_NM, uimm16_nm:$imm)>; -def ADDiu_NM : ArithLogicI32<"addiu", uimm16_simm12, GPR32NMOpnd, - imm32SExt12OrZExt16, add>; -let Constraints = "$rt = $rs" in -def ADDIU48_NM : ArithLogicINM<"addiu[48]", simm32_relaxed, GPR32NMOpnd, - imm32_NM, add>, InstSize48; +def ADDIUNEG_NM : AddImmediate<"addiu", nimm12_nm, GPRNM32Opnd, GPRNM32Opnd, + imm32Neg12, add>, CondLogicI32_Enc<0b1000>; -def LSA_NM : LoadScaledAddressNM<"lsa", GPR32NMOpnd>; +def LINEG_NM : MipsInstAlias<"li $rt, $imm", + (ADDIUNEG_NM GPRNM32Opnd:$rt, ZERO_NM, nimm12_nm:$imm)>; -def SEB_NM : SignExtInRegNM<"seb", i8, GPR32NMOpnd>; -def SEH_NM : SignExtInRegNM<"seh", i16, GPR32NMOpnd>; +def ADDIUR2_NM : AddImmediate<"addiu", uimm5s2_nm, GPRNM16R3Opnd, GPRNM16R3Opnd, + imm32ZExt5, add>, _Pool_P16_A2<0b0> { + bits<3> rt; + bits<3> rs; + bits<5> imm; + let Inst{9...7} = rt; + let Inst{6...4} = rs; + let Inst{2...0} = imm{4...2}; +} + +def ADDIURS5_NM : AddImmediate<"addiu", simm4_nm, GPRNM16R5NZOpnd, GPRNM16R5NZOpnd, + imm32ZExt4, add>, _Pool_P16_A2<0b1>, + RegConstraint<"$rs = $rt">, NoEncode<"$rs"> { + bits<5> rt; + bits<4> imm; + let Inst{9...5} = rt; + let Inst{4} = imm{3}; + let Inst{2...0} = imm{2...0}; + let Constraints = "$rs = $rt"; +} -def CLZ_NM : CountLeading0NM<"clz", GPR32NMOpnd>; -def CLO_NM : CountLeading1NM<"clo", GPR32NMOpnd>; +def ADDIUR1SP_NM : AddImmediate<"addiu", uimm8s2_nm, GPRNM16R3Opnd, + GPRNM32Opnd, imm32ZExt8s2, add>, + _Pool_P16<0b01100>, + NoEncode<"$rs"> { + bits<3> rt; + bits<8> imm; + let Inst{9...7} = rt; + let Inst{6} = 0b1; + let Inst{5...0} = imm{7...2}; +} -def BITREVW_NM : ReverseNM<"bitrevw", GPR32NMOpnd, bitreverse>; -def BYTEREVW_NM : ReverseNM<"byterevw", GPR32NMOpnd, bswap>; +def ADDIU48_NM : ArithLogicINM<"addiu", simm32_nm, GPRNM48Opnd, + imm32_NM, add>, Op48_Imm_Enc<0b00001>, + RegConstraint<"$rs = $rt">; + +def ADDIUPC48_NM : LoadAddressPCRelNM<"lapc", GPRNM48Opnd, sym32_pc_nm, + addrpcrel>, Op48_Addr_Enc<0b00011>; + +def LAPC_NM : MipsInstAlias<"lapc $rt, $addr", + (ADDIUPC48_NM GPRNM32Opnd: $rt, sym32_pc_nm:$addr)>; + +def LSA_NM : LoadScaledAddressNM<"lsa", GPRNM32Opnd>, LoadScaledAddress_Enc; + +def SEB_NM : SignExtInRegNM<"seb", i8, GPRNM32Opnd>, LogicU32_Enc<0b000001>; +def SEH_NM : SignExtInRegNM<"seh", i16, GPRNM32Opnd>, LogicU32_Enc<0b000101>; + +def CLZ_NM : CountLeading0NM<"clz", GPRNM32Opnd>, CLZO_Enc<0b0101101>; +def CLO_NM : CountLeading1NM<"clo", GPRNM32Opnd>, CLZO_Enc<0b0100101>; + +def BITREVW_NM : ReverseNM<"bitrevw", GPRNM32Opnd, bitreverse>; +def BYTEREVW_NM : ReverseNM<"byterevw", GPRNM32Opnd, bswap>; // P.TRAP pool of instructions. -def TEQ_NM : Trap<"teq">; -def TNE_NM : Trap<"tne">; +def TEQ_NM : Trap_Desc<"teq", GPRNM32Opnd, uimm5_nm>, Trap_Enc<0b0>; +def TNE_NM : Trap_Desc<"tne", GPRNM32Opnd, uimm5_nm>, Trap_Enc<0b1>; -def NOT_NM : ArithLogicR16<"not", GPR3Opnd, not>; -def XOR16_NM : ArithLogicR16<"xor16", GPR3Opnd>; -def AND16_NM : ArithLogicR16<"and16", GPR3Opnd>; -def OR16_NM : ArithLogicR16<"or16", GPR3Opnd>; +def JRC_NM : IndirectBranchNM<"jrc", GPRNM32Opnd>, IndirectBranch16_Enc<0>; + +def MOVEBALC_NM : InstNM<(outs GPRNM1R1Opnd:$rd), + (ins GPRNM4ZOpnd:$rt, calltarget21_nm:$addr), + "move.balc\t$rd, $rt, $addr", []>, + _Pool_P32<0b00010> { + let isCall = 1; + let isCTI = 1; + let Defs = [RA_NM]; + bits<5> rt; + bits<1> rd; + bits<22> addr; + let Inst{25} = rt{4}; + let Inst{24} = rd; + let Inst{23...21} = rt{2...0}; + let Inst{20...1} = addr{20...1}; + let Inst{0} = addr{21}; +} -def JRC_NM : IndirectBranchNM<"jrc", GPR32NMOpnd>; +let isReMaterializable = 1 in { +def LI16_NM : LoadImmediateNM<"li", GPRNM16R3Opnd, nimm7_nm, imm7M1To126>, + LI16_Enc<0b11000>; +} +def : NMPat<(i32 imm32ZExt16:$imm), (ADDIU_NM ZERO_NM, imm:$imm)>; -def BALC_NM : CallNM<"balc", MipsJmpLink, tglobaladdr>, InstSize32; -def MOVEBALC_NM : MoveBalcBase; +def : NMPat<(i32 imm32Neg12:$imm), (ADDIUNEG_NM ZERO_NM, imm:$imm)>; let isReMaterializable = 1 in { -def Li_NM : RegImmNM<"li", simm32_relaxed, GPR32NMOpnd>, InstSize48; -def LA_NM : RegImmNM<"la", simm32_relaxed, GPR32NMOpnd>, InstSize48; +def LI48_NM : LoadAddressAbsNM<"li", GPRNM48Opnd, sym32_abs_nm, + addrpcrel>, Op48_Imm_Enc<0b00000>; +def ADDIUGP48_NM : LoadAddressGPRelNM<"addiu.b32", GPRNM48Opnd, sym32_gp_nm, + addrgprel>, Op48_Addr_Enc<0b00010>; +} // isReMaterializable = 1 -def LAGPB_NM : RegImmNM<"addiu.b", simm32_relaxed, GPR32NMOpnd>, InstSize48 { - let AsmString = "addiu.b $rt, $$gp, %gprel( $imm )"; - let isReMaterializable = 1; +def ADDIUB32_NM : MipsInstAlias<"addiu.b32 $rt, $rs, $addr", + (ADDIUGP48_NM GPRNM48Opnd:$rt, + GPRNM32Opnd:$rs, sym32_gp_nm:$addr)>; +def LA_NM : MipsInstAlias<"la $rt, $addr", + (ADDIUPC48_NM GPRNM48Opnd:$rt, sym32_pc_nm:$addr)>; + +// Arithmetic and logical instructions with 2 register operands and immediate. +class LoadUpperINM : + InstNM<(outs RO:$rt), (ins Od:$imm), + !strconcat(opstr, "\t$rt, $imm"), + [(set RO:$rt, (OpNode RO:$rs, imm_type:$imm))]> { + let isReMaterializable = 1; } -} // isReMaterializable = 1 +class LUI_Enc isel> : _Pool_P_LUI { + bits<5> rt; + bits<20> imm; + let Inst{25...21} = rt; + let Inst{20...12} = imm{8...0}; + let Inst{11...2} = imm{18...9}; + let Inst{0} = imm{19}; +} + +def LUI_NM : RegImmNM<"lui", simm32s12_nm, GPRNM32Opnd, imm32SExt20s12>, LUI_Enc<0b0>; -def ALUIPC_NM : RegImmNM<"aluipc", simm32_relaxed, GPR32NMOpnd>, InstSize32; +def ALUIPC_NM : RegImmNM<"aluipc", simm32s12_nm, GPRNM32Opnd, imm32SExt20s12>, LUI_Enc<0b1>; -def LEA_ADDiu_NM : EffectiveAddressNM<"addiu", GPR32NMOpnd>, InstSize32; +def LEA_ADDIU_NM : EffectiveAddressNM<"addiu", GPRNM32Opnd>, _Pool_P32<0b00000> { + bits<5> rt; + bits<16> addr; + let Inst{25...21} = rt; + let Inst{20...16} = 0b00000; + let Inst{15...0} = addr; +} + +def RDHWR_NM : ReadHardwareNM, + _Pool_POOL32A0_0<0b011100> { + bits<5> rt; + bits<5> hs; + bits<5> sel; + let Inst{25...21} = rt; + let Inst{20...16} = hs; + let Inst{15...11} = sel; +} -def RDHWR_NM : ReadHardwareNM; def ADJCALLSTACKDOWN_NM : AdjustStack; def ADJCALLSTACKUP_NM : AdjustStack; // Return instruction is matched as RetRA, then expanded into PseudoReturnNM // after register allocation. Finally, MipsAsmPrinter expands this into JRC_NM. -def PseudoReturnNM : PseudoInstNM<(outs), (ins GPR32NMOpnd:$rs), []> { +def PseudoReturnNM : PseudoInstNM<(outs), (ins GPRNM32Opnd:$rs), []> { let isTerminator = 1; let isBarrier = 1; let isReturn = 1; @@ -430,8 +1858,8 @@ def PseudoReturnNM : PseudoInstNM<(outs), (ins GPR32NMOpnd:$rs), []> { // Indirect branch is matched as PseudoIndirectBranchNM and expanded to JRC_NM. def PseudoIndirectBranchNM : - PseudoInstNM<(outs), (ins GPR32NMOpnd:$rs), [(brind GPR32NMOpnd:$rs)]>, - PseudoInstExpansion<(JRC_NM GPR32NMOpnd:$rs)> { + PseudoInstNM<(outs), (ins GPRNM32Opnd:$rs), [(brind GPRNM32Opnd:$rs)]>, + PseudoInstExpansion<(JRC_NM GPRNM32Opnd:$rs)> { let isTerminator = 1; let isBarrier = 1; let isBranch = 1; @@ -439,46 +1867,48 @@ def PseudoIndirectBranchNM : bit isCTI = 1; } -let hasPostISelHook = 1, isCall = 1, isCTI = 1, Defs = [RA_NM] in { - def JALRC_NM : InstNM<(outs GPR32NMOpnd:$rd), (ins GPR32NMOpnd:$rs), - "jalrc\t$rd, $rs", []>, InstSize32; - def JALRCPseudo : PseudoInstNM<(outs), (ins GPR32NMOpnd:$rs), - [(MipsJmpLink GPR32NMOpnd:$rs)]>, - PseudoInstExpansion<(JALRC_NM RA_NM, GPR32NMOpnd:$rs)>; +let hasPostISelHook = 1, isCall = 1, isCTI = 1, Defs = [RA_NM] in { + def JALRC16_NM : IndirectBranchNM<"jalrc", GPRNM32Opnd>, IndirectBranch16_Enc<1>; + + def JALRC_NM : IndirectCallNM<"jalrc", GPRNM32Opnd, GPRNM32Opnd>, + IndirectCallNM_Enc<0b0000>; + def JALRCHB_NM : IndirectCallNM<"jalrc.hb", GPRNM32Opnd, GPRNM32Opnd>, + IndirectCallNM_Enc<0b0001>; + def JALRCPseudo : PseudoInstNM<(outs), (ins GPRNM32Opnd:$rs), + [(MipsJmpLink GPRNM32Opnd:$rs)]>, + PseudoInstExpansion<(JALRC_NM RA_NM, GPRNM32Opnd:$rs)>; } -def : NMPat<(i32 imm32_NM:$imm), (Li_NM imm:$imm)>; - -def : NMPat<(MipsJmpLink (i32 texternalsym:$dst)), (BALC_NM texternalsym:$dst)>; +def : NMPat<(i32 imm32_NM:$imm), (LI48_NM imm:$imm)>; -def : NMPat<(MipsFullAddr tglobaltlsaddr:$in), (LA_NM tglobaltlsaddr:$in)>; -def : NMPat<(MipsFullAddr tblockaddress:$in), (LA_NM tblockaddress:$in)>; -def : NMPat<(MipsFullAddr tjumptable:$in), (LA_NM tjumptable:$in)>; +def : NMPat<(MipsFullAddr tglobaltlsaddr:$in), (ADDIUPC48_NM tglobaltlsaddr:$in)>; +def : NMPat<(MipsFullAddr tblockaddress:$in), (ADDIUPC48_NM tblockaddress:$in)>; +def : NMPat<(MipsFullAddr tjumptable:$in), (ADDIUPC48_NM tjumptable:$in)>; -def : NMPat<(not (or GPR32NM:$rs, GPR32NM:$rt)), - (NOR_NM GPR32NM:$rs, GPR32NM:$rt)>; +def : NMPat<(not (or GPRNM32:$rs, GPRNM32:$rt)), + (NOR_NM GPRNM32:$rs, GPRNM32:$rt)>; def : NMPat<(trap), (TEQ_NM ZERO_NM, ZERO_NM, 0)>; // SetCC patterns. -def : NMPat<(setne GPR32NM:$lhs, 0), (SLTU_NM ZERO_NM, GPR32NM:$lhs)>; -def : NMPat<(seteq GPR32NM:$lhs, GPR32NM:$rhs), - (SLTIU_NM (XOR_NM GPR32NM:$lhs, GPR32NM:$rhs), 1)>; -def : NMPat<(setne GPR32NM:$lhs, GPR32NM:$rhs), - (SLTU_NM ZERO_NM, (XOR_NM GPR32NM:$lhs, GPR32NM:$rhs))>; -def : NMPat<(setle GPR32NM:$lhs, GPR32NM:$rhs), - (XORI_NM (SLT_NM GPR32NM:$rhs, GPR32NM:$lhs), 1)>; -def : NMPat<(setule GPR32NM:$lhs, GPR32NM:$rhs), - (XORI_NM (SLTU_NM GPR32NM:$rhs, GPR32NM:$lhs), 1)>; -def : NMPat<(setgt GPR32NM:$lhs, GPR32NM:$rhs), - (SLT_NM GPR32NM:$rhs, GPR32NM:$lhs)>; -def : NMPat<(setugt GPR32NM:$lhs, GPR32NM:$rhs), - (SLTU_NM GPR32NM:$rhs, GPR32NM:$lhs)>; -def : NMPat<(setge GPR32NM:$lhs, GPR32NM:$rhs), - (XORI_NM (SLT_NM GPR32NM:$lhs, GPR32NM:$rhs), 1)>; -def : NMPat<(setuge GPR32NM:$lhs, GPR32NM:$rhs), - (XORI_NM (SLTU_NM GPR32NM:$lhs, GPR32NM:$rhs), 1)>; +def : NMPat<(setne GPRNM32:$lhs, 0), (SLTU_NM ZERO_NM, GPRNM32:$lhs)>; +def : NMPat<(seteq GPRNM32:$lhs, GPRNM32:$rhs), + (SLTIU_NM (XOR_NM GPRNM32:$lhs, GPRNM32:$rhs), 1)>; +def : NMPat<(setne GPRNM32:$lhs, GPRNM32:$rhs), + (SLTU_NM ZERO_NM, (XOR_NM GPRNM32:$lhs, GPRNM32:$rhs))>; +def : NMPat<(setle GPRNM32:$lhs, GPRNM32:$rhs), + (XORI_NM (SLT_NM GPRNM32:$rhs, GPRNM32:$lhs), 1)>; +def : NMPat<(setule GPRNM32:$lhs, GPRNM32:$rhs), + (XORI_NM (SLTU_NM GPRNM32:$rhs, GPRNM32:$lhs), 1)>; +def : NMPat<(setgt GPRNM32:$lhs, GPRNM32:$rhs), + (SLT_NM GPRNM32:$rhs, GPRNM32:$lhs)>; +def : NMPat<(setugt GPRNM32:$lhs, GPRNM32:$rhs), + (SLTU_NM GPRNM32:$rhs, GPRNM32:$lhs)>; +def : NMPat<(setge GPRNM32:$lhs, GPRNM32:$rhs), + (XORI_NM (SLT_NM GPRNM32:$lhs, GPRNM32:$rhs), 1)>; +def : NMPat<(setuge GPRNM32:$lhs, GPRNM32:$rhs), + (XORI_NM (SLTU_NM GPRNM32:$lhs, GPRNM32:$rhs), 1)>; //===----------------------------------------------------------------------===// // @@ -508,8 +1938,7 @@ class CBranchNM - : InstSize32, - InstNM<(outs), (ins RO:$rt, imm_type:$u, opnd:$offset), + : InstNM<(outs), (ins RO:$rt, imm_type:$u, opnd:$offset), !strconcat(opstr, "\t$rt, $u, $offset"), [(brcond (i32 (cond_op RO:$rt, ImmOp:$u)), bb:$offset)]> { let isBranch = 1; @@ -520,8 +1949,7 @@ class CBranchImmNM - : InstSize16, - InstNM<(outs), (ins RO:$rs, opnd:$offset), + : InstNM<(outs), (ins RO:$rs, opnd:$offset), !strconcat(opstr, "\t$rs, $offset"), [(brcond (i32 (cond_op RO:$rs, 0)), bb:$offset)]> { let isBranch = 1; @@ -532,8 +1960,7 @@ class CBranchZeroNM - : InstSize32, - InstNM<(outs), (ins RO:$rt, imm_type:$u, opnd:$offset), + : InstNM<(outs), (ins RO:$rt, imm_type:$u, opnd:$offset), !strconcat(opstr, "\t$rt, $u, $offset"), [(brcond (i32 (cond_op (and RO:$rt, (shl 1, ImmOp:$u)), 0)), bb:$offset)]> { let isBranch = 1; @@ -542,46 +1969,175 @@ class CBranchBitNM + : InstNM<(outs), (ins brtarget:$addr), "bc\t$addr", + [(br bb:$addr)], II_J> { + let isTerminator = 1; + let isBarrier = 1; + let isBranch = 1; + let isCTI = 1; +} + +class BC16_Enc isel> : _Pool_P16 +{ + bits<11> addr; + let Inst{9...1} = addr{9...1}; + let Inst{0} = addr{10}; +} -let isCodeGenOnly = 1, hasNoSchedulingInfo = 1, - hasDelaySlot = 0 in { +class BC32_Enc isel> : _Pool_P_BAL +{ + bits<26> addr; + let Inst{24...0} = addr{25...1}; +} -// TODO: BBEQZ/BBNEZ bit-test conditional branch instructions -def BEQC_NM : CBranchNM<"beqc", brtarget, seteq, GPR32NMOpnd>, InstSize32; -def BEQIC_NM : CBranchImmNM<"beqic", brtarget, seteq, GPR32NMOpnd, uimm7, immZExt7>; -def BEQZC_NM : CBranchZeroNM<"beqzc", brtarget, seteq, GPR32NMOpnd>; +class B14_Enc isel> : _Pool_P_BR12 +{ + bits<5> rt; + bits<5> rs; + bits<15> offset; + let Inst{25...21} = rt; + let Inst{20...16} = rs; + let Inst{13...1} = offset{13...1}; + let Inst{0} = offset{14}; +} -def BGEC_NM : CBranchNM<"bgec", brtarget, setge, GPR32NMOpnd>, InstSize32; -def BGEIC_NM : CBranchImmNM<"bgeic", brtarget, setge, GPR32NMOpnd, uimm7, immZExt7>; -def BGEIUC_NM : CBranchImmNM<"bgeiuc", brtarget, setuge, GPR32NMOpnd, uimm7, immZExt7>; -def BGEUC_NM : CBranchNM<"bgeuc", brtarget, setuge, GPR32NMOpnd>, InstSize32; +class BZ16_Enc isel> : _Pool_P16 +{ + bits<3> rt3; + bits<5> rs; + bits<8> offset; + let Inst{9...7} = rt3; + let Inst{6...1} = offset{6...1}; + let Inst{0} = offset{7}; +} -def BLTC_NM : CBranchNM<"bltc", brtarget, setlt, GPR32NMOpnd>, InstSize32; -def BLTIC_NM : CBranchImmNM<"bltic", brtarget, setlt, GPR32NMOpnd, uimm7, immZExt7>; -def BLTIUC_NM : CBranchImmNM<"bltiuc", brtarget, setult, GPR32NMOpnd, uimm7, immZExt7>; -def BLTUC_NM : CBranchNM<"bltuc", brtarget, setult, GPR32NMOpnd>, InstSize32; +class BRI_Enc isel> : _Pool_P_BRI +{ + bits<5> rt; + bits<7> u; + bits<12> offset; + let Inst{25...21} = rt; + let Inst{17...11} = u; + let Inst{10...1} = offset{10...1}; + let Inst{0} = offset{11}; +} -def BNEC_NM : CBranchNM<"bnec", brtarget, setne, GPR32NMOpnd>, InstSize32; -def BNEIC_NM : CBranchImmNM<"bneic", brtarget, setne, GPR32NMOpnd, uimm7, immZExt7>; -def BNEZC_NM : CBranchZeroNM<"bnezc", brtarget, setne, GPR32NMOpnd>; +class BBit_Enc isel> : _Pool_P_BRI +{ + bits<5> rt; + bits<6> u; + bits<12> offset; + let Inst{25...21} = rt; + let Inst{16...11} = u; + let Inst{10...1} = offset{10...1}; + let Inst{0} = offset{11}; +} -// Bit-test branches -def BBNEZC_NM : CBranchBitNM<"bbnezc", brtarget, setne, GPR32NMOpnd, simm32, simm32power2>; -def BBEQZC_NM : CBranchBitNM<"bbeqzc", brtarget, seteq, GPR32NMOpnd, simm32, simm32power2>; +class MOVE_Enc : _Pool_P16_MV +{ + bits<5> rt; + bits<5> rs; + let Inst{9...5} = rt; + let Inst{4...0} = rs; +} +class MOVEP_Enc isel> : _Pool_P16 +{ + bits<2> dst1; + bits<5> src1; + bits<5> src2; + + let Inst{9} = src2{4}; + let Inst{7...5} = src2{2...0}; + let Inst{4} = src1{4}; + let Inst{2...0} = src1{2...0}; + let Inst{8} = dst1{0}; + let Inst{3} = dst1{1}; } -class BCBase : InstNM<(outs), (ins jmptarget:$target), "bc\t$target", - [(br bb:$target)], II_J>, InstSize32 { - let isTerminator = 1; - let isBarrier = 1; - let isBranch = 1; - let isCTI = 1; +class MOVEPREV_Enc isel> : _Pool_P16 +{ + bits<2> src1; + bits<5> dst1; + bits<5> dst2; + + let Inst{9} = dst2{4}; + let Inst{7...5} = dst2{2...0}; + let Inst{4} = dst1{4}; + let Inst{2...0} = dst1{2...0}; + let Inst{8} = src1{0}; + let Inst{3} = src1{1}; +} + +def BC_NM : BCBase, BC32_Enc<0b0>; +def BALC_NM : CallNM<"balc", MipsJmpLink, calltarget25_nm>, BC32_Enc<0b1>; +def BC16_NM : BCBase, BC16_Enc<0b00010>; +def BALC16_NM : CallNM<"balc", MipsJmpLink, calltarget10_nm>, BC16_Enc<0b00110>; + +def : NMPat<(MipsJmpLink (i32 texternalsym:$dst)), (BALC_NM texternalsym:$dst)>; + +class B4_Enc : _Pool_P16<0b11010> { + bits<3> rs; + bits<3> rt; + bits<4> offset; + let Inst{9...7} = rt; + let Inst{6...4} = rs; + let Inst{3...0} = offset; +} + +def BEQC_NM : CBranchNM<"beqc", brtarget14_nm, seteq, GPRNM32Opnd>, + B14_Enc<0b000>; +def BEQC16_NM : CBranchNM<"beqc16", brtarget4s1_nm, seteq, GPRNM16R3Opnd>, + B4_Enc; +def BGEC_NM : CBranchNM<"bgec", brtarget14_nm, setge, GPRNM32Opnd>, + B14_Enc<0b010>; +def BGEUC_NM : CBranchNM<"bgeuc", brtarget14_nm, setuge, GPRNM32Opnd>, + B14_Enc<0b011>; + +def BNEC_NM : CBranchNM<"bnec", brtarget14_nm, setne, GPRNM32Opnd>, + B14_Enc<0b100>; +def BNEC16_NM : CBranchNM<"bnec16", brtarget4s1_nm, setne, GPRNM16R3Opnd>, + B4_Enc; +def BLTC_NM : CBranchNM<"bltc", brtarget14_nm, setlt, GPRNM32Opnd>, + B14_Enc<0b110>; +def BLTUC_NM : CBranchNM<"bltuc", brtarget14_nm, setult, GPRNM32Opnd>, + B14_Enc<0b111>; + +def BEQZC_NM : CBranchZeroNM<"beqzc", brtarget7_nm, seteq, GPRNM32Opnd>, + BZ16_Enc<0b10010>; +def BNEZC_NM : CBranchZeroNM<"bnezc", brtarget7_nm, setne, GPRNM32Opnd>, + BZ16_Enc<0b10110>; + +def BEQIC_NM : CBranchImmNM<"beqic", brtarget11_nm, seteq, GPRNM32Opnd, uimm7, immZExt7>, + BRI_Enc<0b000>; +def BGEIC_NM : CBranchImmNM<"bgeic", brtarget11_nm, setge, GPRNM32Opnd, uimm7, immZExt7>, + BRI_Enc<0b010>; +def BLTIC_NM : CBranchImmNM<"bltic", brtarget11_nm, setlt, GPRNM32Opnd, uimm7, immZExt7>, + BRI_Enc<0b110>; +def BNEIC_NM : CBranchImmNM<"bneic", brtarget11_nm, setne, GPRNM32Opnd, uimm7, immZExt7>, + BRI_Enc<0b100>; +def BGEIUC_NM : CBranchImmNM<"bgeiuc", brtarget11_nm, setuge, GPRNM32Opnd, uimm7, immZExt7>, + BRI_Enc<0b011>; +def BLTIUC_NM : CBranchImmNM<"bltiuc", brtarget11_nm, setult, GPRNM32Opnd, uimm7, immZExt7>, + BRI_Enc<0b111>; + + + +def BRSC_NM : IndirectBranchNM<"brsc", GPRNM32Opnd>, IndirectBranch32_Enc; +def BALRSC_NM : IndirectCallNM<"balrsc", GPRNM32NZOpnd, GPRNM32Opnd>, IndirectCallNM_Enc<0b1000>; + +// Bit-test branches +let hasNoSchedulingInfo = 1, + hasDelaySlot = 0 in { +def BBNEZC_NM : CBranchBitNM<"bbnezc", brtarget11_nm, setne, GPRNM32Opnd, simm32, simm32power2>, + BBit_Enc<0b001>; +def BBEQZC_NM : CBranchBitNM<"bbeqzc", brtarget11_nm, seteq, GPRNM32Opnd, simm32, simm32power2>, + BBit_Enc<0b101>; } -def BC_NM : BCBase; -class TailCallBase : PseudoInstNM<(outs), (ins calltarget:$target), []>, - PseudoInstExpansion<(BC_NM jmptarget:$target)> { +class TailCallBase : PseudoInstNM<(outs), (ins calltarget25_nm:$target), []>, + PseudoInstExpansion<(BC_NM brtarget25_nm:$target)> { let isCall = 1; let isTerminator = 1; let isReturn = 1; @@ -590,9 +2146,9 @@ class TailCallBase : PseudoInstNM<(outs), (ins calltarget:$target), []>, } def TAILCALL_NM : TailCallBase; -class TailCallRegBase : PseudoInstNM<(outs), (ins GPR32NMOpnd:$rs), - [(MipsTailCall GPR32NMOpnd:$rs)]>, - PseudoInstExpansion<(JRC_NM GPR32NMOpnd:$rs)> { +class TailCallRegBase : PseudoInstNM<(outs), (ins GPRNM32Opnd:$rs), + [(MipsTailCall GPRNM32Opnd:$rs)]>, + PseudoInstExpansion<(JRC_NM GPRNM32Opnd:$rs)> { let isCall = 1; let isTerminator = 1; let isReturn = 1; @@ -608,34 +2164,34 @@ def : NMPat<(MipsTailCall (i32 texternalsym:$dst)), (TAILCALL_NM texternalsym:$dst)>; // '>-1' can be represented as '>=0'. -def : NMPat<(brcond (i32 (setgt GPR32NM:$lhs, -1)), bb:$dst), - (BGEC_NM GPR32NM:$lhs, ZERO_NM, bb:$dst)>; +def : NMPat<(brcond (i32 (setgt GPRNM32:$lhs, -1)), bb:$dst), + (BGEC_NM GPRNM32:$lhs, ZERO_NM, bb:$dst)>; // Reversed branch conditions: -def : NMPat<(brcond (i32 (setugt GPR32NM:$lhs, GPR32NM:$rhs)), bb:$dst), - (BLTUC_NM GPR32NM:$rhs, GPR32NM:$lhs, bb:$dst)>; -def : NMPat<(brcond (i32 (setgt GPR32NM:$lhs, GPR32NM:$rhs)), bb:$dst), - (BLTC_NM GPR32NM:$rhs, GPR32NM:$lhs, bb:$dst)>; -def : NMPat<(brcond (i32 (setule GPR32NM:$lhs, GPR32NM:$rhs)), bb:$dst), - (BGEUC_NM GPR32NM:$rhs, GPR32NM:$lhs, bb:$dst)>; -def : NMPat<(brcond (i32 (setle GPR32NM:$lhs, GPR32NM:$rhs)), bb:$dst), - (BGEC_NM GPR32NM:$rhs, GPR32NM:$lhs, bb:$dst)>; +def : NMPat<(brcond (i32 (setugt GPRNM32:$lhs, GPRNM32:$rhs)), bb:$dst), + (BLTUC_NM GPRNM32:$rhs, GPRNM32:$lhs, bb:$dst)>; +def : NMPat<(brcond (i32 (setgt GPRNM32:$lhs, GPRNM32:$rhs)), bb:$dst), + (BLTC_NM GPRNM32:$rhs, GPRNM32:$lhs, bb:$dst)>; +def : NMPat<(brcond (i32 (setule GPRNM32:$lhs, GPRNM32:$rhs)), bb:$dst), + (BGEUC_NM GPRNM32:$rhs, GPRNM32:$lhs, bb:$dst)>; +def : NMPat<(brcond (i32 (setle GPRNM32:$lhs, GPRNM32:$rhs)), bb:$dst), + (BGEC_NM GPRNM32:$rhs, GPRNM32:$lhs, bb:$dst)>; // Immediate comparison branches with offset -def : NMPat<(brcond (i32 (setgt GPR32NM:$lhs, immZExt7Plus1:$imm)), bb:$dst), - (BGEIC_NM GPR32NM:$lhs, (Plus1 $imm), bb:$dst)>; -def : NMPat<(brcond (i32 (setugt GPR32NM:$lhs, immZExt7Plus1:$imm)), bb:$dst), - (BGEIUC_NM GPR32NM:$lhs, (Plus1 $imm), bb:$dst)>; +def : NMPat<(brcond (i32 (setgt GPRNM32:$lhs, immZExt7Plus1:$imm)), bb:$dst), + (BGEIC_NM GPRNM32:$lhs, (Plus1 $imm), bb:$dst)>; +def : NMPat<(brcond (i32 (setugt GPRNM32:$lhs, immZExt7Plus1:$imm)), bb:$dst), + (BGEIUC_NM GPRNM32:$lhs, (Plus1 $imm), bb:$dst)>; // Branch on integer value -def : NMPat<(brcond (i32 GPR32NM:$lhs), bb:$dst), - (BNEC_NM GPR32NM:$lhs, ZERO_NM, bb:$dst)>; +def : NMPat<(brcond (i32 GPRNM32:$lhs), bb:$dst), + (BNEC_NM GPRNM32:$lhs, ZERO_NM, bb:$dst)>; // Bit-test branch -def : NMPat<(brcond (i32 (setne (and GPR32NM:$rt, simm32power2:$imm), 0)), bb:$offset), - (BBNEZC_NM GPR32NM:$rt, (Log2XForm $imm), bb:$offset)>; -def : NMPat<(brcond (i32 (seteq (and GPR32NM:$rt, simm32power2:$imm), 0)), bb:$offset), - (BBEQZC_NM GPR32NM:$rt, (Log2XForm $imm), bb:$offset)>; +def : NMPat<(brcond (i32 (setne (and GPRNM32:$rt, simm32power2:$imm), 0)), bb:$offset), + (BBNEZC_NM GPRNM32:$rt, (Log2XForm $imm), bb:$offset)>; +def : NMPat<(brcond (i32 (seteq (and GPRNM32:$rt, simm32power2:$imm), 0)), bb:$offset), + (BBEQZC_NM GPRNM32:$rt, (Log2XForm $imm), bb:$offset)>; //===----------------------------------------------------------------------===// @@ -643,67 +2199,151 @@ def : NMPat<(brcond (i32 (seteq (and GPR32NM:$rt, simm32power2:$imm), 0)), bb:$o // Load / store instructions // //===----------------------------------------------------------------------===// - -let isCodeGenOnly = 1 in { +def LWSP16_NM : LoadMemoryNM16<"lw", GPRNM32Opnd, mem_nm_sp5s2, load, addrsp5s2, II_LW>, + LSSPRegMem16_Enc<0b00101>; // Broken +def SWSP16_NM : StoreMemoryNM16<"sw", GPRNM32Opnd, mem_nm_sp5s2, store, addrsp5s2, II_SW>, + LSSPRegMem16_Enc<0b10101>; // Broken +def LWGP16_NM : LoadMemoryNM16<"lw", GPRNM16R3Opnd, mem_nm_gp7s2, load, addrgp7s2, II_LW>, + LSGPRegMem16_Enc<0b01001>; +def SWGP16_NM : StoreMemoryNM16<"sw", GPRNM16R3ZOpnd, mem_nm_gp7s2, store, addrgp7s2, II_SW>, + LSGPRegMem16_Enc<0b11001>; + +def LW16_NM : LoadMemoryNM16<"lw", GPRNM16R3Opnd, mem_nm6s2, load, addruimm6s2, II_LW>, + LSMem16_Enc<0b00001>; +def SW16_NM : StoreMemoryNM16<"sw", GPRNM16R3ZOpnd, mem_nm6s2, store, addruimm6s2, II_SW>, + LSMem16_Enc<0b10001>; +def LH16_NM : LoadMemoryNM16<"lh", GPRNM16R3Opnd, mem_nm3s1, sextloadi16, addruimm3s1, II_LH>, + LHMem16_Enc<0b00>; +def LHU16_NM : LoadMemoryNM16<"lhu", GPRNM16R3Opnd, mem_nm3s1, zextloadi16, addruimm3s1, II_LH>, + LHMem16_Enc<0b10>; +def SH16_NM : StoreMemoryNM16<"sh", GPRNM16R3ZOpnd, mem_nm3s1, truncstorei16, addruimm3s1, II_SH>, + LHMem16_Enc<0b01>; + +def LB16_NM : LoadMemoryNM16<"lb", GPRNM16R3Opnd, mem_nm2, sextloadi8, addruimm2, II_LB>, + LBMem16_Enc<0b00>; +def LBU16_NM : LoadMemoryNM16<"lbu", GPRNM16R3Opnd, mem_nm2, zextloadi8, addruimm2, II_LB>, + LBMem16_Enc<0b10>; +def SB16_NM : StoreMemoryNM16<"sb", GPRNM16R3ZOpnd, mem_nm2, truncstorei8, addruimm2, II_SB>, + LBMem16_Enc<0b01>; + +def LWGP_NM : LoadMemoryNM<"lw", mem_nm_gp19s2, load, addrgp19s2, II_LW>, + LSMemGP19S2_Enc<0b10>; +def SWGP_NM : StoreMemoryNM<"sw", mem_nm_gp19s2, store, addrgp19s2, II_SW>, + LSMemGP19S2_Enc<0b11>; + +def LBGP_NM : LoadMemoryNM<"lb", mem_nm_gp18, sextloadi8, addrgp18, II_LB>, + LSMemGP18_Enc<0b000>; +def SBGP_NM : StoreMemoryNM<"sb", mem_nm_gp18, truncstorei8, addrgp18, II_SB>, + LSMemGP18_Enc<0b001>; +def LBUGP_NM : LoadMemoryNM<"lbu", mem_nm_gp18, zextloadi8, addrgp18, II_LB>, + LSMemGP18_Enc<0b010>; + +def LHGP_NM : LoadMemoryNM<"lh", mem_nm_gp17s1, sextloadi16, addrgp17s1, II_LH>, + LSMemGP17S1_Enc<0b0000>; +def SHGP_NM : StoreMemoryNM<"sh", mem_nm_gp17s1, truncstorei16, addrgp17s1, II_SH>, + LSMemGP17S1_Enc<0b1010>; +def LHUGP_NM : LoadMemoryNM<"lhu", mem_nm_gp17s1, zextloadi16, addrgp17s1, II_LH>, + LSMemGP17S1_Enc<0b0001>; // TODO: addruimm12 and addrsimm9 need to be looked into again. Currently // addruimm12 needs to come before addrsimm9, because addrsimm9 handles // loads/store that do not fit the offset. If we put the same handling in // addruimm12, this will cause addrsimm9 to never get selected. -def LWs9_NM : LoadMemoryNM<"lw", GPR32NMOpnd, mem, load, addrsimm9, II_LW>, InstSize32; -def LW_NM : LoadMemoryNM<"lw", GPR32NMOpnd, mem, load, addruimm12, II_LW>, InstSize32; -def LWXS_NM : LoadMemoryNM<"lwxs", GPR32NMOpnd, mem, load, addrindexedlsl2, II_LW>, InstSize32; -def LWX_NM : LoadMemoryNM<"lwx", GPR32NMOpnd, mem, load, addrindexed, II_LW>, InstSize32; -def SW_NM : StoreMemoryNM<"sw", GPR32NMOpnd, mem, store, addruimm12, II_SW>, InstSize32; -def SWs9_NM : StoreMemoryNM<"sw", GPR32NMOpnd, mem, store, addrsimm9, II_SW>, InstSize32; -def SWXS_NM : StoreMemoryNM<"swxs", GPR32NMOpnd, mem, store, addrindexedlsl2, II_SW>, InstSize32; -def SWX_NM : StoreMemoryNM<"swx", GPR32NMOpnd, mem, store, addrindexed, II_SW>, InstSize32; - -def LHU_NM : LoadMemoryNM<"lhu", GPR32NMOpnd, mem, zextloadi16, addruimm12, II_LHU>, InstSize32; -def LHUs9_NM : LoadMemoryNM<"lhu", GPR32NMOpnd, mem, zextloadi16, addrsimm9, II_LHU>, InstSize32; -def LHUXS_NM : LoadMemoryNM<"lhuxs", GPR32NMOpnd, mem, zextloadi16, addrindexedlsl1, II_LHU>, InstSize32; -def LHUX_NM : LoadMemoryNM<"lhux", GPR32NMOpnd, mem, zextloadi16, addrindexed, II_LHU>, InstSize32; -def LH_NM : LoadMemoryNM<"lh", GPR32NMOpnd, mem, sextloadi16, addruimm12, II_LH>, InstSize32; -def LHs9_NM : LoadMemoryNM<"lh", GPR32NMOpnd, mem, sextloadi16, addrsimm9, II_LH>, InstSize32; -def LHXS_NM : LoadMemoryNM<"lhxs", GPR32NMOpnd, mem, sextloadi16, addrindexedlsl1, II_LH>, InstSize32; -def LHX_NM : LoadMemoryNM<"lhx", GPR32NMOpnd, mem, sextloadi16, addrindexed, II_LH>, InstSize32; -def SH_NM : StoreMemoryNM<"sh", GPR32NMOpnd, mem, truncstorei16, addruimm12, II_SH>, InstSize32; -def SHs9_NM : StoreMemoryNM<"sh", GPR32NMOpnd, mem, truncstorei16, addrsimm9, II_SH>, InstSize32; -def SHXS_NM : StoreMemoryNM<"shxs", GPR32NMOpnd, mem, truncstorei16, addrindexedlsl1, II_SH>, InstSize32; -def SHX_NM : StoreMemoryNM<"shx", GPR32NMOpnd, mem, truncstorei16, addrindexed, II_SH>, InstSize32; - -def LBU_NM : LoadMemoryNM<"lbu", GPR32NMOpnd, mem, zextloadi8, addruimm12, II_LBU>, InstSize32; -def LBUs9_NM : LoadMemoryNM<"lbu", GPR32NMOpnd, mem, zextloadi8, addrsimm9, II_LBU>, InstSize32; -def LBUX_NM : LoadMemoryNM<"lbux", GPR32NMOpnd, mem, zextloadi8, addrindexed, II_LBU>, InstSize32; -def LB_NM : LoadMemoryNM<"lb", GPR32NMOpnd, mem, sextloadi8, addruimm12, II_LB>, InstSize32; -def LBs9_NM : LoadMemoryNM<"lb", GPR32NMOpnd, mem, sextloadi8, addrsimm9, II_LB>, InstSize32; -def LBX_NM : LoadMemoryNM<"lbx", GPR32NMOpnd, mem, sextloadi8, addrindexed, II_LB>, InstSize32; -def SB_NM : StoreMemoryNM<"sb", GPR32NMOpnd, mem, truncstorei8, addruimm12, II_SB>, InstSize32; -def SBs9_NM : StoreMemoryNM<"sb", GPR32NMOpnd, mem, truncstorei8, addrsimm9, II_SB>, InstSize32; -def SBX_NM : StoreMemoryNM<"sbx", GPR32NMOpnd, mem, truncstorei8, addrindexed, II_SB>, InstSize32; - -// TODO: Halfword load/store is never selected, this needs to be looked into. -def UALH_NM : UnalignedLoad<"ualh", NMUnalignedLH>; -def UALW_NM : UnalignedLoad<"ualw", NMUnalignedLW>; -def UASH_NM : UnalignedStore<"uash", NMUnalignedSH>; -def UASW_NM : UnalignedStore<"uasw", NMUnalignedSW>; - -def LWGP_NM : LoadMemoryNM<"lw", GPR32NMOpnd, mem>, InstSize32; - -def SWPC_NM : StorePCBase; -def LWPC_NM : LoadPCBase; - -def LWM_NM : LoadMultipleNM<"lwm">; -def UALWM_NM : LoadMultipleNM<"ualwm">; -def SWM_NM : StoreMultipleNM<"swm">; -def UASWM_NM : StoreMultipleNM<"uaswm">; - -def MOVE_NM : InstNM<(outs GPR32NMOpnd:$rt), (ins GPR32NMOpnd:$rs), - "move\t$rt, $rs", []>, InstSize16; -def MOVEP_NM : InstNM<(outs GPR32NMOpnd:$dst1, GPR32NMOpnd:$dst2), - (ins GPR32NMOpnd:$src1, GPR32NMOpnd:$src2), - "movep\t$dst1, $dst2, $src1, $src2", []>, InstSize16; - +def LB_NM : LoadMemoryNM<"lb", mem_nmu12, sextloadi8, addruimm12, II_LB>, + LSMem_U12_Enc<0b0000>; +def SB_NM : StoreMemoryNM<"sb", mem_nmu12, truncstorei8, addruimm12, II_SB>, + LSMem_U12_Enc<0b0001>; +def LH_NM : LoadMemoryNM<"lh", mem_nmu12, sextloadi16, addruimm12, II_LH>, + LSMem_U12_Enc<0b0100>; +def SH_NM : StoreMemoryNM<"sh", mem_nmu12, truncstorei16, addruimm12, II_SH>, + LSMem_U12_Enc<0b0101>; +def LW_NM : LoadMemoryNM<"lw", mem_nmu12, load, addruimm12, II_LW>, + LSMem_U12_Enc<0b1000>; +def SW_NM : StoreMemoryNM<"sw", mem_nmu12, store, addruimm12, II_SW>, + LSMem_U12_Enc<0b1001>; +def LBU_NM : LoadMemoryNM<"lbu", mem_nmu12, zextloadi8, addruimm12, II_LBU>, + LSMem_U12_Enc<0b0010>; +def LHU_NM : LoadMemoryNM<"lhu", mem_nmu12, zextloadi16, addruimm12, II_LHU>, + LSMem_U12_Enc<0b0110>; + +def LBs9_NM : LoadMemoryNM<"lb", mem_nms9, sextloadi8, addrsimm9, II_LB>, + LSMem_S9_Enc<0b000, 0b0000>; +def SBs9_NM : StoreMemoryNM<"sb", mem_nms9, truncstorei8, addrsimm9, II_SB>, + LSMem_S9_Enc<0b000, 0b0001>; +def LHs9_NM : LoadMemoryNM<"lh", mem_nms9, sextloadi16, addrsimm9, II_LH>, + LSMem_S9_Enc<0b000, 0b0100>; +def SHs9_NM : StoreMemoryNM<"sh", mem_nms9, truncstorei16, addrsimm9, II_SH>, + LSMem_S9_Enc<0b000, 0b0101>; +def LWs9_NM : LoadMemoryNM<"lw", mem_nms9, load, addrsimm9, II_LW>, + LSMem_S9_Enc<0b000, 0b1000>; +def SWs9_NM : StoreMemoryNM<"sw", mem_nms9, store, addrsimm9, II_SW>, + LSMem_S9_Enc<0b000, 0b1001>; +def LBUs9_NM : LoadMemoryNM<"lbu", mem_nms9, zextloadi8, addrsimm9, II_LBU>, + LSMem_S9_Enc<0b000, 0b0010>; +def LHUs9_NM : LoadMemoryNM<"lhu", mem_nms9, zextloadi16, addrsimm9, II_LHU>, + LSMem_S9_Enc<0b000, 0b0110>; + +def LBX_NM : LoadMemoryX<"lbx", mem_nmrx, sextloadi8, addrindexed, II_LB>, + LSMemX_Enc<0, 0b0000>; +def SBX_NM : StoreMemoryX<"sbx", mem_nmrx, truncstorei8, addrindexed, II_SB>, + LSMemX_Enc<0, 0b0001>; +def LHX_NM : LoadMemoryX<"lhx", mem_nmrx, sextloadi16, addrindexed, II_LH>, + LSMemX_Enc<0, 0b0100>; +def SHX_NM : StoreMemoryX<"shx", mem_nmrx, truncstorei16, addrindexed, II_SH>, + LSMemX_Enc<0, 0b0101>; +def LWX_NM : LoadMemoryX<"lwx", mem_nmrx, load, addrindexed, II_LW>, + LSMemX_Enc<0, 0b1000>; +def SWX_NM : StoreMemoryX<"swx", mem_nmrx, store, addrindexed, II_SW>, + LSMemX_Enc<0, 0b1001>; +def LBUX_NM : LoadMemoryX<"lbux", mem_nmrx, zextloadi8, addrindexed, II_LBU>, + LSMemX_Enc<0, 0b0010>; +def LHUX_NM : LoadMemoryX<"lhux", mem_nmrx, zextloadi16, addrindexed, II_LHU>, + LSMemX_Enc<0, 0b0110>; + +def LHXS_NM : LoadMemoryX<"lhxs", mem_nmrx, sextloadi16, addrindexedlsl1, II_LH>, + LSMemX_Enc<0b1, 0b0100>; +def SHXS_NM : StoreMemoryX<"shxs", mem_nmrx, truncstorei16, addrindexedlsl1, II_SH>, + LSMemX_Enc<0b1, 0b0101>; +def LHUXS_NM : LoadMemoryX<"lhuxs", mem_nmrx, zextloadi16, addrindexedlsl1, II_LHU>, + LSMemX_Enc<0b1, 0b0110>; +def LWXS_NM : LoadMemoryX<"lwxs", mem_nmrx, load, addrindexedlsl2, II_LW>, + LSMemX_Enc<0b1, 0b1000>; +def SWXS_NM : StoreMemoryX<"swxs", mem_nmrx, store, addrindexedlsl2, II_SW>, + LSMemX_Enc<0b1, 0b1001>; + +def UALH_NM : UnalignedLoad<"ualh", NMUnalignedLH>, LSMem_S9_Enc<0b001, 0b0100>; +def UASH_NM : UnalignedStore<"uash", NMUnalignedSH>, LSMem_S9_Enc<0b001, 0b0101>; +def LWM_NM : LoadMultipleNM<"lwm">, LSMem_S9_Mult_Enc<0b0100>; +def UALWM_NM : LoadMultipleNM<"ualwm">, LSMem_S9_Mult_Enc<0b0101>; +def SWM_NM : StoreMultipleNM<"swm">, LSMem_S9_Mult_Enc<0b1100>; +def UASWM_NM : StoreMultipleNM<"uaswm">, LSMem_S9_Mult_Enc<0b1101>; + +def UALW_NM : UnalignedLoad<"ualw", NMUnalignedLW>, LSMem_S9_Enc<0b101, 0b0010>; +def UASW_NM : UnalignedStore<"uasw", NMUnalignedSW>, LSMem_S9_Enc<0b101, 0b0011>; + +//def CACHE_NM : LSMem_S9_Enc<0b001, 0b0111>; + +def LW4x4_NM : LoadMemoryNM16<"lw", GPRNM4Opnd, mem_nm4s2, load, addruimm4s2, II_LW>, + LSMem4x4_Enc<0b01101>; +def SW4x4_NM : StoreMemoryNM4x4<"sw", GPRNM4Opnd, mem_nm4s2, store, addruimm4s2, II_SW>, + LSMem4x4_Enc<0b11101>; + +def SWPC_NM : StorePCBase, Op48_Addr_Enc<0b01111>; +def LWPC_NM : LoadPCBase, Op48_Addr_Enc<0b01011>; + +let isMoveReg = 1 in { +def MOVE16_NM : InstNM<(outs GPRNM32NZOpnd:$rt), (ins GPRNM32Opnd:$rs), + "move\t$rt, $rs", []>, MOVE_Enc; + +def MOVEP_NM : InstNM<(outs GPRNM2R1Opnd:$dst1, GPRNM2R2Opnd:$dst2), + (ins GPRNM4ZOpnd:$src1, GPRNM4ZOpnd:$src2), + "movep\t$dst1, $dst2, $src1, $src2", []>, + MOVEP_Enc<0b10111>; + +def MOVEPREV_NM : InstNM<(outs GPRNM4Opnd:$dst1, GPRNM4Opnd:$dst2), + (ins GPRNM2R1Opnd:$src1, GPRNM2R2Opnd:$src2), + "movep\t$dst1, $dst2, $src1, $src2", []>, + MOVEPREV_Enc<0b11111>; } // These patterns help generate better loads/stores when the immediate doesn't @@ -719,42 +2359,42 @@ def MOVEP_NM : InstNM<(outs GPR32NMOpnd:$dst1, GPR32NMOpnd:$dst2), // instructions. // let AddedComplexity = 3 in { -def : NMPat<(i32 (extloadi8 (add GPR32NM:$rs, immFitsAddiu32:$imm))), - (LBU_NM (ADDiu_NM GPR32NM:$rs, imm:$imm), 0)>; -def : NMPat<(i32 (zextloadi8 (add GPR32NM:$rs, immFitsAddiu32:$imm))), - (LBU_NM (ADDiu_NM GPR32NM:$rs, imm:$imm), 0)>; -def : NMPat<(i32 (sextloadi8 (add GPR32NM:$rs, immFitsAddiu32:$imm))), - (LB_NM (ADDiu_NM GPR32NM:$rs, imm:$imm), 0)>; -def : NMPat<(i32 (extloadi16 (add GPR32NM:$rs, immFitsAddiu32:$imm))), - (LHU_NM (ADDiu_NM GPR32NM:$rs, imm:$imm), 0)>; -def : NMPat<(i32 (zextloadi16 (add GPR32NM:$rs, immFitsAddiu32:$imm))), - (LHU_NM (ADDiu_NM GPR32NM:$rs, imm:$imm), 0)>; -def : NMPat<(i32 (sextloadi16 (add GPR32NM:$rs, immFitsAddiu32:$imm))), - (LH_NM (ADDiu_NM GPR32NM:$rs, imm:$imm), 0)>; -def : NMPat<(i32 (load (add GPR32NM:$rs, immFitsAddiu32:$imm))), - (LW_NM (ADDiu_NM GPR32NM:$rs, imm:$imm), 0)>; -def : NMPat<(truncstorei8 GPR32NM:$rt, (add GPR32NM:$rs, immFitsAddiu32:$imm)), - (SB_NM GPR32NM:$rt, (ADDiu_NM GPR32NM:$rs, imm:$imm), 0)>; -def : NMPat<(truncstorei16 GPR32NM:$rt, (add GPR32NM:$rs, immFitsAddiu32:$imm)), - (SH_NM GPR32NM:$rt, (ADDiu_NM GPR32NM:$rs, imm:$imm), 0)>; -def : NMPat<(store GPR32NM:$rt, (add GPR32NM:$rs, immFitsAddiu32:$imm)), - (SW_NM GPR32NM:$rt, (ADDiu_NM GPR32NM:$rs, imm:$imm), 0)>; +def : NMPat<(i32 (extloadi8 (add GPRNM32:$rs, immFitsAddiu32:$imm))), + (LBU_NM (ADDIU_NM GPRNM32:$rs, imm:$imm), 0)>; +def : NMPat<(i32 (zextloadi8 (add GPRNM32:$rs, immFitsAddiu32:$imm))), + (LBU_NM (ADDIU_NM GPRNM32:$rs, imm:$imm), 0)>; +def : NMPat<(i32 (sextloadi8 (add GPRNM32:$rs, immFitsAddiu32:$imm))), + (LB_NM (ADDIU_NM GPRNM32:$rs, imm:$imm), 0)>; +def : NMPat<(i32 (extloadi16 (add GPRNM32:$rs, immFitsAddiu32:$imm))), + (LHU_NM (ADDIU_NM GPRNM32:$rs, imm:$imm), 0)>; +def : NMPat<(i32 (zextloadi16 (add GPRNM32:$rs, immFitsAddiu32:$imm))), + (LHU_NM (ADDIU_NM GPRNM32:$rs, imm:$imm), 0)>; +def : NMPat<(i32 (sextloadi16 (add GPRNM32:$rs, immFitsAddiu32:$imm))), + (LH_NM (ADDIU_NM GPRNM32:$rs, imm:$imm), 0)>; +def : NMPat<(i32 (load (add GPRNM32:$rs, immFitsAddiu32:$imm))), + (LW_NM (ADDIU_NM GPRNM32:$rs, imm:$imm), 0)>; +def : NMPat<(truncstorei8 GPRNM32:$rt, (add GPRNM32:$rs, immFitsAddiu32:$imm)), + (SB_NM GPRNM32:$rt, (ADDIU_NM GPRNM32:$rs, imm:$imm), 0)>; +def : NMPat<(truncstorei16 GPRNM32:$rt, (add GPRNM32:$rs, immFitsAddiu32:$imm)), + (SH_NM GPRNM32:$rt, (ADDIU_NM GPRNM32:$rs, imm:$imm), 0)>; +def : NMPat<(store GPRNM32:$rt, (add GPRNM32:$rs, immFitsAddiu32:$imm)), + (SW_NM GPRNM32:$rt, (ADDIU_NM GPRNM32:$rs, imm:$imm), 0)>; } // AddedComplexity = 3 // Catching out-of-range immediate loads/stores. -def : NMPat<(i32 (extloadi8 GPR32NM:$rs)), (LBU_NM GPR32NM:$rs, 0)>; -def : NMPat<(i32 (zextloadi8 GPR32NM:$rs)), (LBU_NM GPR32NM:$rs, 0)>; -def : NMPat<(i32 (sextloadi8 GPR32NM:$rs)), (LB_NM GPR32NM:$rs, 0)>; -def : NMPat<(i32 (extloadi16 GPR32NM:$rs)), (LHU_NM GPR32NM:$rs, 0)>; -def : NMPat<(i32 (zextloadi16 GPR32NM:$rs)), (LHU_NM GPR32NM:$rs, 0)>; -def : NMPat<(i32 (sextloadi16 GPR32NM:$rs)), (LH_NM GPR32NM:$rs, 0)>; -def : NMPat<(i32 (load GPR32NM:$rs)), (LW_NM GPR32NM:$rs, 0)>; -def : NMPat<(truncstorei8 GPR32NM:$rt, GPR32NM:$rs), - (SB_NM GPR32NM:$rt, GPR32NM:$rs, 0)>; -def : NMPat<(truncstorei16 GPR32NM:$rt, GPR32NM:$rs), - (SH_NM GPR32NM:$rt, GPR32NM:$rs, 0)>; -def : NMPat<(store GPR32NM:$rt, GPR32NM:$rs), - (SW_NM GPR32NM:$rt, GPR32NM:$rs, 0)>; +def : NMPat<(i32 (extloadi8 GPRNM32:$rs)), (LBU_NM GPRNM32:$rs, 0)>; +def : NMPat<(i32 (zextloadi8 GPRNM32:$rs)), (LBU_NM GPRNM32:$rs, 0)>; +def : NMPat<(i32 (sextloadi8 GPRNM32:$rs)), (LB_NM GPRNM32:$rs, 0)>; +def : NMPat<(i32 (extloadi16 GPRNM32:$rs)), (LHU_NM GPRNM32:$rs, 0)>; +def : NMPat<(i32 (zextloadi16 GPRNM32:$rs)), (LHU_NM GPRNM32:$rs, 0)>; +def : NMPat<(i32 (sextloadi16 GPRNM32:$rs)), (LH_NM GPRNM32:$rs, 0)>; +def : NMPat<(i32 (load GPRNM32:$rs)), (LW_NM GPRNM32:$rs, 0)>; +def : NMPat<(truncstorei8 GPRNM32:$rt, GPRNM32:$rs), + (SB_NM GPRNM32:$rt, GPRNM32:$rs, 0)>; +def : NMPat<(truncstorei16 GPRNM32:$rt, GPRNM32:$rs), + (SH_NM GPRNM32:$rt, GPRNM32:$rs, 0)>; +def : NMPat<(store GPRNM32:$rt, GPRNM32:$rs), + (SW_NM GPRNM32:$rt, GPRNM32:$rs, 0)>; // Any-extending loads def : NMPat<(i32 (extloadi8 addruimm12:$addr)), (LBU_NM addruimm12:$addr)>; @@ -767,20 +2407,20 @@ def : NMPat<(i32 (extloadi16 addrindexedlsl1:$addr)), def : NMPat<(i32 (extloadi16 addrindexed:$addr)), (LHUX_NM addrindexed:$addr)>; // Catching out-of-range immediate unaligned loads/stores. -def : NMPat<(i32 (NMUnalignedLW GPR32NM:$rs, GPR32NM:$src)), - (UALW_NM GPR32NM:$rs, 0, GPR32NM:$src)>; -def : NMPat<(NMUnalignedSW GPR32NM:$rt, GPR32NM:$rs), - (UASW_NM GPR32NM:$rt, GPR32NM:$rs, 0)>; -def : NMPat<(i32 (NMUnalignedLH GPR32NM:$rs, GPR32NM:$src)), - (UALH_NM GPR32NM:$rs, 0, GPR32NM:$src)>; -def : NMPat<(NMUnalignedSH GPR32NM:$rt, GPR32NM:$rs), - (UASH_NM GPR32NM:$rt, GPR32NM:$rs, 0)>; +def : NMPat<(i32 (NMUnalignedLW GPRNM32:$rs, GPRNM32:$src)), + (UALW_NM GPRNM32:$rs, 0, GPRNM32:$src)>; +def : NMPat<(NMUnalignedSW GPRNM32:$rt, GPRNM32:$rs), + (UASW_NM GPRNM32:$rt, GPRNM32:$rs, 0)>; +def : NMPat<(i32 (NMUnalignedLH GPRNM32:$rs, GPRNM32:$src)), + (UALH_NM GPRNM32:$rs, 0, GPRNM32:$src)>; +def : NMPat<(NMUnalignedSH GPRNM32:$rt, GPRNM32:$rs), + (UASH_NM GPRNM32:$rt, GPRNM32:$rs, 0)>; // GP-relative load. def : NMPat<(i32 (load (add GP_NM, (MipsGPRel tglobaladdr:$in)))), (LWGP_NM GP_NM, tglobaladdr:$in)>; def : NMPat<(add GP_NM, (MipsGPRel tglobaladdr:$in)), - (ADDiu_NM GP_NM, tglobaladdr:$in)>; + (ADDIU_NM GP_NM, tglobaladdr:$in)>; // GP-relative address @@ -792,11 +2432,10 @@ def alignedglobaladdr : PatFrag<(ops), }]>; def : NMPat<(MipsFullAddr alignedglobaladdr:$in), - (LA_NM alignedglobaladdr:$in)>; + (ADDIUPC48_NM alignedglobaladdr:$in)>; def : NMPat<(MipsFullAddr tglobaladdr:$in), - (LAGPB_NM tglobaladdr:$in)>; - + (ADDIUPC48_NM tglobaladdr:$in)>; // Atomic load patterns. def : NMPat<(atomic_load_8 addr:$a), (LB_NM addr:$a)>; @@ -808,18 +2447,28 @@ def : NMPat<(atomic_store_8 addr:$a, GPR32:$v), (SB_NM GPR32:$v, addr:$a)>; def : NMPat<(atomic_store_16 addr:$a, GPR32:$v), (SH_NM GPR32:$v, addr:$a)>; def : NMPat<(atomic_store_32 addr:$a, GPR32:$v), (SW_NM GPR32:$v, addr:$a)>; +class CondMove_Enc isel> : _Pool_P_CMOVE +{ + bits<5> rd; + bits<5> rs; + bits<5> rt; + let Inst{25...21} = rt; + let Inst{20...16} = rs; + let Inst{15...11} = rd; +} + // Conditional Moves -def MOVZ_NM : InstNM<(outs GPR32NMOpnd:$rd), - (ins GPR32NMOpnd:$rs, GPR32NMOpnd:$rt, GPR32NMOpnd:$F), +def MOVZ_NM : InstNM<(outs GPRNM32Opnd:$rd), + (ins GPRNM32Opnd:$rs, GPRNM32Opnd:$rt, GPRNM32Opnd:$F), "movz\t$rd, $rs, $rt", - []>, InstSize32 { + []>, CondMove_Enc<0b0> { let Constraints = "$F = $rd"; } -def MOVN_NM : InstNM<(outs GPR32NMOpnd:$rd), - (ins GPR32NMOpnd:$rs, GPR32NMOpnd:$rt, GPR32NMOpnd:$F), +def MOVN_NM : InstNM<(outs GPRNM32Opnd:$rd), + (ins GPRNM32Opnd:$rs, GPRNM32Opnd:$rt, GPRNM32Opnd:$F), "movn\t$rd, $rs, $rt", - []>, InstSize32 { + []>, CondMove_Enc<0b1> { let Constraints = "$F = $rd"; } @@ -856,8 +2505,124 @@ multiclass MovzPats0NM; -defm : MovzPats1, ISA_NANOMIPS; -defm : MovzPats2NM; -defm : MovnPats, ISA_NANOMIPS; +defm : MovzPats1, ISA_NANOMIPS; +defm : MovzPats2NM; +defm : MovnPats, ISA_NANOMIPS; + +class ToggleInterrupts_Enc isel> : _Pool_POOL32Axf_5_group1 { + bits<5> rt; + let Inst{25...21} = rt; +} + +def DI_NM : InstNM<(outs GPRNM32Opnd:$rt), (ins), + "di\t$rt", []>, ToggleInterrupts_Enc<0b00011>; + +def EI_NM : InstNM<(outs GPRNM32Opnd:$rt), (ins), + "ei\t$rt", []>, ToggleInterrupts_Enc<0b01011>; + +// Instruction with no operands +class MnemonicOnly: + InstNM<(outs), (ins), opstr, []>; + +def ERET_NM : MnemonicOnly<"eret">, _Pool_POOL32Axf_5_group3<0b11001> { + let Inst{16} = 0b0; +} + +def ERETNC_NM : MnemonicOnly<"eretnc">, _Pool_POOL32Axf_5_group3<0b11001> { + let Inst{16} = 0b1; +} + +def DERET_NM : MnemonicOnly<"deret">, _Pool_POOL32Axf_5_group3<0b10001>; + +def NOP_NM : MipsInstAlias<"nop", (SLL_NM ZERO_NM, ZERO_NM, 0)>; + +def EHB_NM : MipsInstAlias<"ehb", (SLL_NM ZERO_NM, ZERO_NM, 3)>; + +def PAUSE_NM : MipsInstAlias<"pause", (SLL_NM ZERO_NM, ZERO_NM, 5)>; + +def SYNC_NM : MipsInstAlias<"sync", (SLL_NM ZERO_NM, ZERO_NM, 6)>; + +class IntExcpFlow_Enc isel> : _Pool_P_RI +{ + bits<19> imm; + let Inst{18...0} = imm; +} + +class IntExcpFlow16_Enc isel> : _Pool_P16_RI +{ + bits<3> imm; + let Inst{2...0} = imm; +} + +def SIGRIE_NM : InstNM<(outs), (ins uimm19_nm:$imm), + "sigrie\t$imm", []>, IntExcpFlow_Enc<0b00>; + +def BREAK_NM : InstNM<(outs), (ins uimm19_nm:$imm), + "break\t$imm", []>, IntExcpFlow_Enc<0b10>; + +def SDBBP_NM : InstNM<(outs), (ins uimm19_nm:$imm), + "sdbbp\t$imm", []>, IntExcpFlow_Enc<0b11>; + +def SYSCALL_NM : InstNM<(outs), (ins uimm18_nm:$imm), + "syscall\t$imm", []>, IntExcpFlow_Enc<0b01> { + let Inst{18} = 0b0; +} + +def BREAK16_NM : InstNM<(outs), (ins uimm3_nm:$imm), + "break\t$imm", []>, IntExcpFlow16_Enc<0b10>; + +def SDBBP16_NM : InstNM<(outs), (ins uimm3_nm:$imm), + "sdbbp\t$imm", []>, IntExcpFlow16_Enc<0b11>; + +def SYSCALL16_NM : InstNM<(outs), (ins uimm2_nm:$imm), + "syscall\t$imm", []>, IntExcpFlow16_Enc<0b01> { + let Inst{2} = 0b0; +} + +class ShadowRW_Enc isel>: _Pool_POOL32Axf_5_group3 { + bits<5> rs; + bits<5> rt; + let Inst{25...21} = rt; + let Inst{20...16} = rs; +} + +def WRPGPR_NM : InstNM<(outs GPRNM32Opnd:$rt), (ins GPRNM32Opnd:$rs), + "wrpgpr\t$rt, $rs", []>, ShadowRW_Enc<0b11000>; + +def RDPGPR_NM : InstNM<(outs GPRNM32Opnd:$rt), (ins GPRNM32Opnd:$rs), + "rdpgpr\t$rt, $rs", []>, ShadowRW_Enc<0b10000>; + +def EXTW_NM : InstNM<(outs GPRNM32Opnd:$rd), + (ins GPRNM32Opnd:$rs, GPRNM32Opnd:$rt, uimm5_nm:$shift), + "extw\t$rd, $rs, $rt, $shift", []>, + _Pool_POOL32A7<0b011> { + bits<5> rd; + bits<5> rs; + bits<5> rt; + bits<5> shift; + let Inst{25...21} = rt; + let Inst{20...16} = rs; + let Inst{15...11} = rd; + let Inst{10...6} = shift; +} + +def ROTX_NM : InstNM<(outs GPRNM32Opnd:$rt), + (ins GPRNM32Opnd:$rs, uimm5s1_nm:$shiftx, + uimm5_nm:$shift, uimm1_nm:$stripe), + "rotx\t$rt, $rs, $shift, $shiftx, $stripe", []>, + _Pool_P_U12<0b1101> { + bits<5> rt; + bits<5> rs; + bits<5> shift; + bits<5> shiftx; + bits<1> stripe; + let Inst{25...21} = rt; + let Inst{20...16} = rs; + let Inst{10...7} = shiftx{4...1}; + let Inst{4...0} = shift; + let Inst{6} = stripe; + let Inst{11} = 0; + let Inst{5} = 0; +} diff --git a/llvm/lib/Target/Mips/NanoMipsLoadStoreOptimizer.cpp b/llvm/lib/Target/Mips/NanoMipsLoadStoreOptimizer.cpp index e644f9f638dbe..d3bc47429fa7e 100644 --- a/llvm/lib/Target/Mips/NanoMipsLoadStoreOptimizer.cpp +++ b/llvm/lib/Target/Mips/NanoMipsLoadStoreOptimizer.cpp @@ -45,7 +45,7 @@ struct NMLoadStoreOpt : public MachineFunctionPass { {Mips::S1_NM, 4}, {Mips::S2_NM, 5}, {Mips::S3_NM, 6}, {Mips::S4_NM, 7}, {Mips::S5_NM, 8}, {Mips::S6_NM, 9}, {Mips::S7_NM, 10}, }; - MCRegisterClass RC = MipsMCRegisterClasses[Mips::GPR32NMRegClassID]; + MCRegisterClass RC = MipsMCRegisterClasses[Mips::GPRNM32RegClassID]; NMLoadStoreOpt() : MachineFunctionPass(ID) {} StringRef getPassName() const override { return NM_LOAD_STORE_OPT_NAME; } @@ -86,7 +86,7 @@ bool NMLoadStoreOpt::runOnMachineFunction(MachineFunction &Fn) { bool NMLoadStoreOpt::isStackPointerAdjustment(MachineInstr &MI, bool IsRestore) { - if (MI.getOpcode() != Mips::ADDiu_NM) + if (MI.getOpcode() != Mips::ADDIU_NM) return false; Register DstReg = MI.getOperand(0).getReg(); Register SrcReg = MI.getOperand(1).getReg(); @@ -332,9 +332,9 @@ bool NMLoadStoreOpt::generateSaveOrRestore(MachineBasicBlock &MBB, // because it needs to be the last instruction in the basic block. If // possible, it will be generated with NewStackOffset. unsigned Opcode = IsRestore - ? ((Return && !NewStackOffset) ? Mips::RESTOREJRC_NM + ? ((Return && !NewStackOffset) ? Mips::RESTOREJRC16_NM : Mips::RESTORE_NM) - : Mips::SAVE_NM; + : Mips::SAVE16_NM; auto InsertBefore = std::next(MBBIter((Return && !NewStackOffset) ? Return : AdjustStack)); auto DL = Return ? Return->getDebugLoc() : AdjustStack->getDebugLoc(); @@ -371,7 +371,7 @@ bool NMLoadStoreOpt::generateSaveOrRestore(MachineBasicBlock &MBB, // In case return is also consumed, we should put restore.jrc after // return, to make sure it is very last instruction. InsertBefore = std::next(MBBIter(Return)); - BuildMI(MBB, InsertBefore, DL, TII->get(Mips::RESTOREJRC_NM)) + BuildMI(MBB, InsertBefore, DL, TII->get(Mips::RESTOREJRC16_NM)) .addImm(NewStackOffset); MBB.erase(Return); } else { @@ -383,7 +383,7 @@ bool NMLoadStoreOpt::generateSaveOrRestore(MachineBasicBlock &MBB, // In case of save, the offset is subtracted from SP. if (!IsRestore) NewStackOffset = -NewStackOffset; - BuildMI(MBB, InsertBefore, DL, TII->get(Mips::ADDiu_NM), Mips::SP_NM) + BuildMI(MBB, InsertBefore, DL, TII->get(Mips::ADDIUNEG_NM), Mips::SP_NM) .addReg(Mips::SP_NM) .addImm(NewStackOffset); } @@ -407,7 +407,8 @@ bool NMLoadStoreOpt::isValidLoadStore(MachineInstr &MI, bool IsLoad) { unsigned Opcode = MI.getOpcode(); if (IsLoad) { // TODO: Handle unaligned loads and stores. - if (Opcode == Mips::LW_NM || Opcode == Mips::LWs9_NM) { + if (Opcode == Mips::LW_NM || Opcode == Mips::LWs9_NM + || Opcode == Mips::LW16_NM || Opcode == Mips::LW4x4_NM) { // TODO: Rt and Rs can be equal, but only if that is the last load of // the sequence. Register Rt = MI.getOperand(0).getReg(); @@ -416,7 +417,8 @@ bool NMLoadStoreOpt::isValidLoadStore(MachineInstr &MI, bool IsLoad) { return true; } } else { - if (Opcode == Mips::SW_NM || Opcode == Mips::SWs9_NM) + if (Opcode == Mips::SW_NM || Opcode == Mips::SWs9_NM + || Opcode == Mips::SW16_NM || Opcode == Mips::SW4x4_NM) return true; } return false; @@ -574,7 +576,14 @@ static bool isValidUse(MachineInstr *MI, Register Reg) { case Mips::SWs9_NM: case Mips::LW_NM: case Mips::LWs9_NM: - case Mips::ADDiu_NM: + case Mips::ADDIU_NM: + case Mips::ADDIUNEG_NM: + case Mips::LH16_NM: + case Mips::LHU16_NM: + case Mips::SH16_NM: + case Mips::LB16_NM: + case Mips::LBU16_NM: + case Mips::SB16_NM: return MI->getOperand(1).getReg() == Reg; default: return false; @@ -589,6 +598,18 @@ static bool isLoadStoreShortChar(MachineInstr *MI) { case Mips::SH_NM: case Mips::LH_NM: case Mips::LHU_NM: + case Mips::LH16_NM: + case Mips::LHU16_NM: + case Mips::SH16_NM: + case Mips::LHGP_NM: + case Mips::LHUGP_NM: + case Mips::SHGP_NM: + case Mips::LB16_NM: + case Mips::LBU16_NM: + case Mips::SB16_NM: + case Mips::LBGP_NM: + case Mips::LBUGP_NM: + case Mips::SBGP_NM: return true; default: return false; @@ -620,7 +641,8 @@ static bool isLoadStoreShortChar(MachineInstr *MI) { bool NMLoadStoreOpt::generatePCRelative(MachineBasicBlock &MBB) { SmallVector> Candidates; for (auto &MI : MBB) { - if (MI.getOpcode() == Mips::LA_NM || MI.getOpcode() == Mips::LAGPB_NM) { + if (MI.getOpcode() == Mips::ADDIUPC48_NM || + MI.getOpcode() == Mips::ADDIUGP48_NM) { bool IsRedefined = false; bool IsUsedByMultipleMIs = false; MachineInstr *FirstUse = nullptr; @@ -683,13 +705,13 @@ bool NMLoadStoreOpt::generatePCRelative(MachineBasicBlock &MBB) { for (auto Pair : Candidates) { auto *LA = Pair.first; auto *Use = Pair.second; - auto &Address = LA->getOperand(1); + auto &Address = LA->getOperand((LA->getOpcode() == Mips::ADDIUPC48_NM)? 1 : 2); auto Dst = Use->getOperand(0).getReg(); int64_t Offset = Use->getOperand(2).getImm() + Address.getOffset(); assert(Address.isGlobal()); - if (Use->getOpcode() == Mips::ADDiu_NM) { + if (Use->getOpcode() == Mips::ADDIU_NM) { // Move LA to its use to avoid extending the lifetime of Dst MBB.insert(MBBIter(Use), MBB.remove(LA)); diff --git a/llvm/lib/Target/Mips/NanoMipsMoveOptimizer.cpp b/llvm/lib/Target/Mips/NanoMipsMoveOptimizer.cpp index 49fcad87a2ae0..d6b676f94c88b 100644 --- a/llvm/lib/Target/Mips/NanoMipsMoveOptimizer.cpp +++ b/llvm/lib/Target/Mips/NanoMipsMoveOptimizer.cpp @@ -202,7 +202,7 @@ bool NMMoveOpt::generateMoveP(MachineBasicBlock &MBB) { }; for (auto &MI : MBB) { - if (MI.getOpcode() == Mips::MOVE_NM) { + if (MI.getOpcode() == Mips::MOVE16_NM) { if (PrevMove) { bool Swap; if (areMovePRevCompatibleMoves(PrevMove, &MI, Swap) || @@ -240,7 +240,13 @@ bool NMMoveOpt::generateMoveP(MachineBasicBlock &MBB) { bool Swap = std::get<2>(Tuple); if (Swap) std::swap(Move1, Move2); - BuildMI(MBB, InsertBefore, DL, TII->get(Mips::MOVEP_NM)) + bool Rev= (isInSet(GPR4, Move1->getOperand(0).getReg()) && + isInSet(GPR2REG1, Move1->getOperand(1).getReg()) && + isInSet(GPR4, Move2->getOperand(0).getReg()) && + isInSet(GPR2REG2, Move2->getOperand(1).getReg())); + + BuildMI(MBB, InsertBefore, DL, + TII->get(Rev ? Mips::MOVEPREV_NM : Mips::MOVEP_NM)) .addReg(Move1->getOperand(0).getReg(), RegState::Define) .addReg(Move2->getOperand(0).getReg(), RegState::Define) .addReg(Move1->getOperand(1).getReg()) @@ -303,7 +309,7 @@ bool NMMoveOpt::generateMoveBalc(MachineBasicBlock &MBB) { if (MI2.isCFIInstruction() || MI2.isDebugInstr()) continue; - if (MI2.getOpcode() == Mips::MOVE_NM && + if (MI2.getOpcode() == Mips::MOVE16_NM && // Make sure $rt is used only by BALC. CandidateDstRegs.contains(MI2.getOperand(0).getReg()) && // Make sure $rs is not redefined between MOVE and BALC. diff --git a/llvm/test/CodeGen/Mips/nanomips/addiu.ll b/llvm/test/CodeGen/Mips/nanomips/addiu.ll index 1e57099200181..0c1780cbff66b 100644 --- a/llvm/test/CodeGen/Mips/nanomips/addiu.ll +++ b/llvm/test/CodeGen/Mips/nanomips/addiu.ll @@ -1,56 +1,56 @@ ; RUN: llc -mtriple=nanomips -asm-show-inst -verify-machineinstrs < %s | FileCheck %s define i32 @test_addiu0(i32 %a) { -; CHECK: addiu $a0, $a0, 1 +; CHECK: addiu $a0, $a0, 1 # rt +# appropriately for each branch instruction +# +# RUN: llvm-mc %s -triple=nanomips-elf -show-encoding -show-inst 2> %t0 | FileCheck %s + # CHECK: .text + .set noat +# .linkrelax + # reg3-reg3 arithmetic, 16-bit + addu $a1, $s2, $a3 # CHECK: addu $a1, $s2, $a3 # encoding: [0xaa,0xb3] + # CHECK-NEXT: #