diff --git a/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp b/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp index f892f7b95178..f1fcf2f036c0 100644 --- a/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp +++ b/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp @@ -770,6 +770,7 @@ class AMDGPUAsmParser : public MCTargetAsmParser { bool AddNextRegisterToList(unsigned& Reg, unsigned& RegWidth, RegisterKind RegKind, unsigned Reg1, unsigned RegNum); bool ParseAMDGPURegister(RegisterKind& RegKind, unsigned& Reg, unsigned& RegNum, unsigned& RegWidth, unsigned *DwordRegIndex); void cvtMubufImpl(MCInst &Inst, const OperandVector &Operands, bool IsAtomic, bool IsAtomicReturn); + void cvtDSImpl(MCInst &Inst, const OperandVector &Operands, bool IsGdsHardcoded); public: enum AMDGPUMatchResultTy { @@ -888,7 +889,8 @@ class AMDGPUAsmParser : public MCTargetAsmParser { OperandMatchResultTy parseVReg32OrOff(OperandVector &Operands); void cvtDSOffset01(MCInst &Inst, const OperandVector &Operands); - void cvtDS(MCInst &Inst, const OperandVector &Operands); + void cvtDS(MCInst &Inst, const OperandVector &Operands) { cvtDSImpl(Inst, Operands, false); } + void cvtDSGds(MCInst &Inst, const OperandVector &Operands) { cvtDSImpl(Inst, Operands, true); } void cvtExp(MCInst &Inst, const OperandVector &Operands); bool parseCnt(int64_t &IntVal); @@ -2350,9 +2352,8 @@ void AMDGPUAsmParser::cvtDSOffset01(MCInst &Inst, Inst.addOperand(MCOperand::createReg(AMDGPU::M0)); // m0 } -void AMDGPUAsmParser::cvtDS(MCInst &Inst, const OperandVector &Operands) { +void AMDGPUAsmParser::cvtDSImpl(MCInst &Inst, const OperandVector &Operands, bool IsGdsHardcoded) { std::map OptionalIdx; - bool GDSOnly = false; for (unsigned i = 1, e = Operands.size(); i != e; ++i) { AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[i]); @@ -2364,7 +2365,7 @@ void AMDGPUAsmParser::cvtDS(MCInst &Inst, const OperandVector &Operands) { } if (Op.isToken() && Op.getToken() == "gds") { - GDSOnly = true; + IsGdsHardcoded = true; continue; } @@ -2373,7 +2374,7 @@ void AMDGPUAsmParser::cvtDS(MCInst &Inst, const OperandVector &Operands) { } addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyOffset); - if (!GDSOnly) { + if (!IsGdsHardcoded) { addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyGDS); } Inst.addOperand(MCOperand::createReg(AMDGPU::M0)); // m0 diff --git a/lib/Target/AMDGPU/DSInstructions.td b/lib/Target/AMDGPU/DSInstructions.td index a077001df6bd..7f7aa18c6719 100644 --- a/lib/Target/AMDGPU/DSInstructions.td +++ b/lib/Target/AMDGPU/DSInstructions.td @@ -174,6 +174,7 @@ class DS_1A_RET_GDS : DS_Pseudo : DS_Pseudo= AMDGPU::OPERAND_SRC_FIRST && OpType <= AMDGPU::OPERAND_SRC_LAST; } bool isSISrcFPOperand(const MCInstrDesc &Desc, unsigned OpNo) { - assert(OpNo <= Desc.NumOperands); + assert(OpNo < Desc.NumOperands); unsigned OpType = Desc.OpInfo[OpNo].OperandType; switch (OpType) { case AMDGPU::OPERAND_REG_IMM_FP32: @@ -358,7 +358,7 @@ bool isSISrcFPOperand(const MCInstrDesc &Desc, unsigned OpNo) { } bool isSISrcInlinableOperand(const MCInstrDesc &Desc, unsigned OpNo) { - assert(OpNo <= Desc.NumOperands); + assert(OpNo < Desc.NumOperands); unsigned OpType = Desc.OpInfo[OpNo].OperandType; return OpType >= AMDGPU::OPERAND_REG_INLINE_C_FIRST && OpType <= AMDGPU::OPERAND_REG_INLINE_C_LAST; @@ -402,7 +402,7 @@ unsigned getRegBitWidth(const MCRegisterClass &RC) { unsigned getRegOperandSize(const MCRegisterInfo *MRI, const MCInstrDesc &Desc, unsigned OpNo) { - assert(OpNo <= Desc.NumOperands); + assert(OpNo < Desc.NumOperands); unsigned RCID = Desc.OpInfo[OpNo].RegClass; return getRegBitWidth(MRI->getRegClass(RCID)) / 8; }