Skip to content

Commit

Permalink
[Hexagon] New HVX target features.
Browse files Browse the repository at this point in the history
This patch lets the llvm tools handle the new HVX target features that
are added by frontend (clang). The target-features are of the form
"hvx-length64b" for 64 Byte HVX mode, "hvx-length128b" for 128 Byte mode HVX.
"hvx-double" is an alias to "hvx-length128b" and is soon will be deprecated.
The hvx version target feature is upgated form "+hvx" to "+hvxv{version_number}.
Eg: "+hvxv62"

For the correct HVX code generation, the user must use the following
target features.
For 64B mode: "+hvxv62" "+hvx-length64b"
For 128B mode: "+hvxv62" "+hvx-length128b"

Clang picks a default length if none is specified. If for some reason,
no hvx-length is specified to llvm, the compilation will bail out.
There is a corresponding clang patch.

Differential Revision: https://reviews.llvm.org/D38851


git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@316101 91177308-0d34-0410-b5e6-96231b3b80d8
  • Loading branch information
Sumanth Gundapaneni committed Oct 18, 2017
1 parent a05cf7b commit 7a600d3
Show file tree
Hide file tree
Showing 110 changed files with 243 additions and 217 deletions.
56 changes: 42 additions & 14 deletions lib/Target/Hexagon/Hexagon.td
Original file line number Diff line number Diff line change
Expand Up @@ -25,10 +25,31 @@ include "llvm/Target/Target.td"
include "HexagonDepArch.td"

// Hexagon ISA Extensions
def ExtensionHVX: SubtargetFeature<"hvx", "UseHVXOps", "true",
"Hexagon HVX instructions">;
def ExtensionHVXDbl: SubtargetFeature<"hvx-double", "UseHVXDblOps", "true",
"Hexagon HVX Double instructions">;
def ExtensionHVXV60: SubtargetFeature<"hvxv60", "HexagonHVXVersion",
"Hexagon::ArchEnum::V60", "Hexagon HVX instructions">;
def ExtensionHVXV62: SubtargetFeature<"hvxv62", "HexagonHVXVersion",
"Hexagon::ArchEnum::V62", "Hexagon HVX instructions",
[ExtensionHVXV60]>;
def ExtensionHVX: SubtargetFeature<"hvx", "HexagonHVXVersion",
"Hexagon::ArchEnum::V62", "Hexagon HVX instructions",
[ExtensionHVXV60,
ExtensionHVXV62]>;
def ExtensionHVX64B
: SubtargetFeature<"hvx-length64b", "UseHVX64BOps", "true",
"Hexagon HVX 64B instructions",
[ExtensionHVXV60, ExtensionHVXV62]>;
def ExtensionHVX128B
: SubtargetFeature<"hvx-length128b", "UseHVX128BOps", "true",
"Hexagon HVX 128B instructions",
[ExtensionHVXV60, ExtensionHVXV62]>;

// This is an alias to ExtensionHVX128B to accept the hvx-double as
// an acceptable subtarget feature.
def ExtensionHVXDbl
: SubtargetFeature<"hvx-double", "UseHVX128BOps", "true",
"Hexagon HVX 128B instructions",
[ExtensionHVXV60, ExtensionHVXV62]>;

def FeatureLongCalls: SubtargetFeature<"long-calls", "UseLongCalls", "true",
"Use constant-extended calls">;

Expand All @@ -38,14 +59,21 @@ def FeatureLongCalls: SubtargetFeature<"long-calls", "UseLongCalls", "true",

def UseMEMOP : Predicate<"HST->useMemOps()">;
def IEEERndNearV5T : Predicate<"HST->modeIEEERndNear()">;
def UseHVXDbl : Predicate<"HST->useHVXDblOps()">,
AssemblerPredicate<"ExtensionHVXDbl">;
def UseHVXSgl : Predicate<"HST->useHVXSglOps()">;
def UseHVX : Predicate<"HST->useHVXSglOps() ||HST->useHVXDblOps()">,
AssemblerPredicate<"ExtensionHVX">;

def Hvx64 : HwMode<"+hvx,-hvx-double">;
def Hvx128 : HwMode<"+hvx,+hvx-double">;
def UseHVX64B : Predicate<"HST->useHVX64BOps()">,
AssemblerPredicate<"ExtensionHVX64B">;
def UseHVX128B : Predicate<"HST->useHVX128BOps()">,
AssemblerPredicate<"ExtensionHVX128B">;
def UseHVX : Predicate<"HST->useHVXOps()">,
AssemblerPredicate<"ExtensionHVXV60">;
def UseHVXV60 : Predicate<"HST->useHVXOps()">,
AssemblerPredicate<"ExtensionHVXV60">;
def UseHVXV62 : Predicate<"HST->useHVXOps()">,
AssemblerPredicate<"ExtensionHVXV62">;

def Hvx64 : HwMode<"+hvx-length64b">;
def Hvx64old : HwMode<"-hvx-double">;
def Hvx128 : HwMode<"+hvx-length128b">;
def Hvx128old : HwMode<"+hvx-double">;

//===----------------------------------------------------------------------===//
// Classes used for relation maps.
Expand Down Expand Up @@ -274,9 +302,9 @@ def : Proc<"hexagonv5", HexagonModelV4,
def : Proc<"hexagonv55", HexagonModelV55,
[ArchV4, ArchV5, ArchV55]>;
def : Proc<"hexagonv60", HexagonModelV60,
[ArchV4, ArchV5, ArchV55, ArchV60, ExtensionHVX]>;
[ArchV4, ArchV5, ArchV55, ArchV60]>;
def : Proc<"hexagonv62", HexagonModelV62,
[ArchV4, ArchV5, ArchV55, ArchV60, ArchV62, ExtensionHVX]>;
[ArchV4, ArchV5, ArchV55, ArchV60, ArchV62]>;

//===----------------------------------------------------------------------===//
// Declare the target which we are implementing
Expand Down
66 changes: 33 additions & 33 deletions lib/Target/Hexagon/HexagonISelLowering.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -357,7 +357,7 @@ static bool CC_HexagonVector(unsigned ValNo, MVT ValVT,
auto &MF = State.getMachineFunction();
auto &HST = MF.getSubtarget<HexagonSubtarget>();

if (HST.useHVXSglOps() &&
if (HST.useHVX64BOps() &&
(LocVT == MVT::v8i64 || LocVT == MVT::v16i32 || LocVT == MVT::v32i16 ||
LocVT == MVT::v64i8 || LocVT == MVT::v512i1)) {
if (unsigned Reg = State.AllocateReg(VecLstS)) {
Expand All @@ -368,7 +368,7 @@ static bool CC_HexagonVector(unsigned ValNo, MVT ValVT,
State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset, LocVT, LocInfo));
return false;
}
if (HST.useHVXSglOps() && (LocVT == MVT::v16i64 || LocVT == MVT::v32i32 ||
if (HST.useHVX64BOps() && (LocVT == MVT::v16i64 || LocVT == MVT::v32i32 ||
LocVT == MVT::v64i16 || LocVT == MVT::v128i8)) {
if (unsigned Reg = State.AllocateReg(VecLstD)) {
State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
Expand All @@ -379,8 +379,8 @@ static bool CC_HexagonVector(unsigned ValNo, MVT ValVT,
return false;
}
// 128B Mode
if (HST.useHVXDblOps() && (LocVT == MVT::v32i64 || LocVT == MVT::v64i32 ||
LocVT == MVT::v128i16 || LocVT == MVT::v256i8)) {
if (HST.useHVX128BOps() && (LocVT == MVT::v32i64 || LocVT == MVT::v64i32 ||
LocVT == MVT::v128i16 || LocVT == MVT::v256i8)) {
if (unsigned Reg = State.AllocateReg(VecLstD)) {
State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
return false;
Expand All @@ -389,7 +389,7 @@ static bool CC_HexagonVector(unsigned ValNo, MVT ValVT,
State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset, LocVT, LocInfo));
return false;
}
if (HST.useHVXDblOps() &&
if (HST.useHVX128BOps() &&
(LocVT == MVT::v16i64 || LocVT == MVT::v32i32 || LocVT == MVT::v64i16 ||
LocVT == MVT::v128i8 || LocVT == MVT::v1024i1)) {
if (unsigned Reg = State.AllocateReg(VecLstS)) {
Expand Down Expand Up @@ -437,7 +437,7 @@ static bool RetCC_Hexagon(unsigned ValNo, MVT ValVT,
LocInfo = CCValAssign::Full;
} else if (LocVT == MVT::v128i8 || LocVT == MVT::v64i16 ||
LocVT == MVT::v32i32 || LocVT == MVT::v16i64 ||
(LocVT == MVT::v1024i1 && HST.useHVXDblOps())) {
(LocVT == MVT::v1024i1 && HST.useHVX128BOps())) {
LocVT = MVT::v32i32;
ValVT = MVT::v32i32;
LocInfo = CCValAssign::Full;
Expand Down Expand Up @@ -507,7 +507,7 @@ static bool RetCC_HexagonVector(unsigned ValNo, MVT ValVT,
return false;
}
} else if (LocVT == MVT::v32i32) {
unsigned Req = HST.useHVXDblOps() ? Hexagon::V0 : Hexagon::W0;
unsigned Req = HST.useHVX128BOps() ? Hexagon::V0 : Hexagon::W0;
if (unsigned Reg = State.AllocateReg(Req)) {
State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
return false;
Expand Down Expand Up @@ -827,9 +827,9 @@ HexagonTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
DEBUG(dbgs() << "Function needs byte stack align due to call args\n");
// V6 vectors passed by value have 64 or 128 byte alignment depending
// on whether we are 64 byte vector mode or 128 byte.
bool UseHVXDbl = Subtarget.useHVXDblOps();
bool UseHVX128B = Subtarget.useHVX128BOps();
assert(Subtarget.useHVXOps());
const unsigned ObjAlign = UseHVXDbl ? 128 : 64;
const unsigned ObjAlign = UseHVX128B ? 128 : 64;
LargestAlignSeen = std::max(LargestAlignSeen, ObjAlign);
MFI.ensureMaxAlignment(LargestAlignSeen);
}
Expand Down Expand Up @@ -940,15 +940,15 @@ static bool getIndexedAddressParts(SDNode *Ptr, EVT VT,

auto &HST = static_cast<const HexagonSubtarget&>(DAG.getSubtarget());

bool ValidHVXDblType =
HST.useHVXDblOps() && (VT == MVT::v32i32 || VT == MVT::v16i64 ||
VT == MVT::v64i16 || VT == MVT::v128i8);
bool ValidHVX128BType =
HST.useHVX128BOps() && (VT == MVT::v32i32 || VT == MVT::v16i64 ||
VT == MVT::v64i16 || VT == MVT::v128i8);
bool ValidHVXType =
HST.useHVXSglOps() && (VT == MVT::v16i32 || VT == MVT::v8i64 ||
HST.useHVX64BOps() && (VT == MVT::v16i32 || VT == MVT::v8i64 ||
VT == MVT::v32i16 || VT == MVT::v64i8);

if (ValidHVXDblType || ValidHVXType ||
VT == MVT::i64 || VT == MVT::i32 || VT == MVT::i16 || VT == MVT::i8) {
if (ValidHVX128BType || ValidHVXType || VT == MVT::i64 || VT == MVT::i32 ||
VT == MVT::i16 || VT == MVT::i8) {
IsInc = (Ptr->getOpcode() == ISD::ADD);
Base = Ptr->getOperand(0);
Offset = Ptr->getOperand(1);
Expand Down Expand Up @@ -1182,7 +1182,7 @@ SDValue HexagonTargetLowering::LowerFormalArguments(
RegInfo.createVirtualRegister(&Hexagon::HvxVRRegClass);
RegInfo.addLiveIn(VA.getLocReg(), VReg);
InVals.push_back(DAG.getCopyFromReg(Chain, dl, VReg, RegVT));
} else if (Subtarget.useHVXDblOps() &&
} else if (Subtarget.useHVX128BOps() &&
((RegVT == MVT::v16i64 || RegVT == MVT::v32i32 ||
RegVT == MVT::v64i16 || RegVT == MVT::v128i8))) {
unsigned VReg =
Expand All @@ -1197,7 +1197,7 @@ SDValue HexagonTargetLowering::LowerFormalArguments(
RegInfo.createVirtualRegister(&Hexagon::HvxWRRegClass);
RegInfo.addLiveIn(VA.getLocReg(), VReg);
InVals.push_back(DAG.getCopyFromReg(Chain, dl, VReg, RegVT));
} else if (Subtarget.useHVXDblOps() &&
} else if (Subtarget.useHVX128BOps() &&
((RegVT == MVT::v32i64 || RegVT == MVT::v64i32 ||
RegVT == MVT::v128i16 || RegVT == MVT::v256i8))) {
unsigned VReg =
Expand Down Expand Up @@ -1743,7 +1743,7 @@ HexagonTargetLowering::HexagonTargetLowering(const TargetMachine &TM,
}

if (Subtarget.hasV60TOps()) {
if (Subtarget.useHVXSglOps()) {
if (Subtarget.useHVX64BOps()) {
addRegisterClass(MVT::v64i8, &Hexagon::HvxVRRegClass);
addRegisterClass(MVT::v32i16, &Hexagon::HvxVRRegClass);
addRegisterClass(MVT::v16i32, &Hexagon::HvxVRRegClass);
Expand All @@ -1753,7 +1753,7 @@ HexagonTargetLowering::HexagonTargetLowering(const TargetMachine &TM,
addRegisterClass(MVT::v32i32, &Hexagon::HvxWRRegClass);
addRegisterClass(MVT::v16i64, &Hexagon::HvxWRRegClass);
addRegisterClass(MVT::v512i1, &Hexagon::HvxQRRegClass);
} else if (Subtarget.useHVXDblOps()) {
} else if (Subtarget.useHVX128BOps()) {
addRegisterClass(MVT::v128i8, &Hexagon::HvxVRRegClass);
addRegisterClass(MVT::v64i16, &Hexagon::HvxVRRegClass);
addRegisterClass(MVT::v32i32, &Hexagon::HvxVRRegClass);
Expand Down Expand Up @@ -1992,7 +1992,7 @@ HexagonTargetLowering::HexagonTargetLowering(const TargetMachine &TM,
setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v8i8, Custom);

if (Subtarget.useHVXOps()) {
if (Subtarget.useHVXSglOps()) {
if (Subtarget.useHVX64BOps()) {
setOperationAction(ISD::CONCAT_VECTORS, MVT::v128i8, Custom);
setOperationAction(ISD::CONCAT_VECTORS, MVT::v64i16, Custom);
setOperationAction(ISD::CONCAT_VECTORS, MVT::v32i32, Custom);
Expand All @@ -2004,7 +2004,7 @@ HexagonTargetLowering::HexagonTargetLowering(const TargetMachine &TM,
setOperationAction(ISD::EXTRACT_SUBVECTOR, MVT::v64i8, Custom);
setOperationAction(ISD::EXTRACT_SUBVECTOR, MVT::v32i16, Custom);
setOperationAction(ISD::EXTRACT_SUBVECTOR, MVT::v16i32, Custom);
} else if (Subtarget.useHVXDblOps()) {
} else if (Subtarget.useHVX128BOps()) {
setOperationAction(ISD::CONCAT_VECTORS, MVT::v256i8, Custom);
setOperationAction(ISD::CONCAT_VECTORS, MVT::v128i16, Custom);
setOperationAction(ISD::CONCAT_VECTORS, MVT::v64i32, Custom);
Expand Down Expand Up @@ -2082,13 +2082,13 @@ HexagonTargetLowering::HexagonTargetLowering(const TargetMachine &TM,
setIndexedStoreAction(ISD::POST_INC, VT, Legal);
}

if (Subtarget.useHVXSglOps()) {
if (Subtarget.useHVX64BOps()) {
for (MVT VT : {MVT::v64i8, MVT::v32i16, MVT::v16i32, MVT::v8i64,
MVT::v128i8, MVT::v64i16, MVT::v32i32, MVT::v16i64}) {
setIndexedLoadAction(ISD::POST_INC, VT, Legal);
setIndexedStoreAction(ISD::POST_INC, VT, Legal);
}
} else if (Subtarget.useHVXDblOps()) {
} else if (Subtarget.useHVX128BOps()) {
for (MVT VT : {MVT::v128i8, MVT::v64i16, MVT::v32i32, MVT::v16i64,
MVT::v256i8, MVT::v128i16, MVT::v64i32, MVT::v32i64}) {
setIndexedLoadAction(ISD::POST_INC, VT, Legal);
Expand Down Expand Up @@ -2353,8 +2353,8 @@ HexagonTargetLowering::LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG)
size_t MaskLen = Mask.size();
unsigned SizeInBits = VT.getScalarSizeInBits() * MaskLen;

if ((Subtarget.useHVXSglOps() && SizeInBits == 64 * 8) ||
(Subtarget.useHVXDblOps() && SizeInBits == 128 * 8)) {
if ((Subtarget.useHVX64BOps() && SizeInBits == 64 * 8) ||
(Subtarget.useHVX128BOps() && SizeInBits == 128 * 8)) {
StridedLoadKind Pattern = isStridedLoad(Mask);
if (Pattern == StridedLoadKind::NoPattern)
return SDValue();
Expand Down Expand Up @@ -2617,11 +2617,11 @@ HexagonTargetLowering::LowerCONCAT_VECTORS(SDValue Op,
return DAG.getNode(HexagonISD::COMBINE, dl, VT, Op.getOperand(1), Vec0);

if (UseHVX) {
assert((Width == 64*8 && Subtarget.useHVXSglOps()) ||
(Width == 128*8 && Subtarget.useHVXDblOps()));
assert((Width == 64 * 8 && Subtarget.useHVX64BOps()) ||
(Width == 128 * 8 && Subtarget.useHVX128BOps()));
SDValue Vec1 = Op.getOperand(1);
MVT OpTy = Subtarget.useHVXSglOps() ? MVT::v16i32 : MVT::v32i32;
MVT ReTy = Subtarget.useHVXSglOps() ? MVT::v32i32 : MVT::v64i32;
MVT OpTy = Subtarget.useHVX64BOps() ? MVT::v16i32 : MVT::v32i32;
MVT ReTy = Subtarget.useHVX64BOps() ? MVT::v32i32 : MVT::v64i32;
SDValue B0 = DAG.getNode(ISD::BITCAST, dl, OpTy, Vec0);
SDValue B1 = DAG.getNode(ISD::BITCAST, dl, OpTy, Vec1);
SDValue VC = DAG.getNode(HexagonISD::VCOMBINE, dl, ReTy, B1, B0);
Expand Down Expand Up @@ -2667,15 +2667,15 @@ HexagonTargetLowering::LowerEXTRACT_SUBVECTOR_HVX(SDValue Op,
EVT VT = Op.getOperand(0).getValueType();
SDLoc dl(Op);
bool UseHVX = Subtarget.useHVXOps();
bool UseHVXSgl = Subtarget.useHVXSglOps();
bool UseHVX64B = Subtarget.useHVX64BOps();
// Just in case...

if (!VT.isVector() || !UseHVX)
return SDValue();

EVT ResVT = Op.getValueType();
unsigned ResSize = ResVT.getSizeInBits();
unsigned VectorSizeInBits = UseHVXSgl ? (64 * 8) : (128 * 8);
unsigned VectorSizeInBits = UseHVX64B ? (64 * 8) : (128 * 8);
unsigned OpSize = VT.getSizeInBits();

// We deal only with cases where the result is the vector size
Expand Down Expand Up @@ -3001,7 +3001,7 @@ HexagonTargetLowering::getRegForInlineAsmConstraint(
case 512:
return std::make_pair(0U, &Hexagon::HvxVRRegClass);
case 1024:
if (Subtarget.hasV60TOps() && Subtarget.useHVXDblOps())
if (Subtarget.hasV60TOps() && Subtarget.useHVX128BOps())
return std::make_pair(0U, &Hexagon::HvxVRRegClass);
return std::make_pair(0U, &Hexagon::HvxWRRegClass);
case 2048:
Expand Down Expand Up @@ -3204,7 +3204,7 @@ HexagonTargetLowering::findRepresentativeClass(const TargetRegisterInfo *TRI,
case MVT::v32i32:
case MVT::v16i64:
if (Subtarget.hasV60TOps() && Subtarget.useHVXOps() &&
Subtarget.useHVXDblOps())
Subtarget.useHVX128BOps())
RRC = &Hexagon::HvxVRRegClass;
else
RRC = &Hexagon::HvxWRRegClass;
Expand Down
2 changes: 1 addition & 1 deletion lib/Target/Hexagon/HexagonPseudo.td
Original file line number Diff line number Diff line change
Expand Up @@ -427,7 +427,7 @@ class LDrivv_template<RegisterClass RC, InstHexagon rootInst>
def PS_vloadrw_ai: LDrivv_template<HvxWR, V6_vL32b_ai>,
Requires<[HasV60T,UseHVX]>;
def PS_vloadrw_nt_ai: LDrivv_template<HvxWR, V6_vL32b_nt_ai>,
Requires<[HasV60T,UseHVXSgl]>;
Requires<[HasV60T,UseHVX]>;
def PS_vloadrwu_ai: LDrivv_template<HvxWR, V6_vL32Ub_ai>,
Requires<[HasV60T,UseHVX]>;

Expand Down
46 changes: 27 additions & 19 deletions lib/Target/Hexagon/HexagonRegisterInfo.td
Original file line number Diff line number Diff line change
Expand Up @@ -216,25 +216,33 @@ let Namespace = "Hexagon" in {

// HVX types

def VecI1 : ValueTypeByHwMode<[Hvx64, Hvx128, DefaultMode],
[v512i1, v1024i1, v512i1]>;
def VecI8 : ValueTypeByHwMode<[Hvx64, Hvx128, DefaultMode],
[v64i8, v128i8, v64i8]>;
def VecI16 : ValueTypeByHwMode<[Hvx64, Hvx128, DefaultMode],
[v32i16, v64i16, v32i16]>;
def VecI32 : ValueTypeByHwMode<[Hvx64, Hvx128, DefaultMode],
[v16i32, v32i32, v16i32]>;
def VecI64 : ValueTypeByHwMode<[Hvx64, Hvx128, DefaultMode],
[v8i64, v16i64, v8i64]>;
def VecPI8 : ValueTypeByHwMode<[Hvx64, Hvx128, DefaultMode],
[v128i8, v256i8, v128i8]>;
def VecPI16 : ValueTypeByHwMode<[Hvx64, Hvx128, DefaultMode],
[v64i16, v128i16, v64i16]>;
def VecPI32 : ValueTypeByHwMode<[Hvx64, Hvx128, DefaultMode],
[v32i32, v64i32, v32i32]>;
def VecPI64 : ValueTypeByHwMode<[Hvx64, Hvx128, DefaultMode],
[v16i64, v32i64, v16i64]>;

def VecI1
: ValueTypeByHwMode<[Hvx64, Hvx64old, Hvx128, Hvx128old, DefaultMode],
[v512i1, v512i1, v1024i1, v1024i1, v512i1]>;
def VecI8
: ValueTypeByHwMode<[Hvx64, Hvx64old, Hvx128, Hvx128old, DefaultMode],
[v64i8, v64i8, v128i8, v128i8, v64i8]>;
def VecI16
: ValueTypeByHwMode<[Hvx64, Hvx64old, Hvx128, Hvx128old, DefaultMode],
[v32i16, v32i16, v64i16, v64i16, v32i16]>;
def VecI32
: ValueTypeByHwMode<[Hvx64, Hvx64old, Hvx128, Hvx128old, DefaultMode],
[v16i32, v16i32, v32i32, v32i32, v16i32]>;
def VecI64
: ValueTypeByHwMode<[Hvx64, Hvx64old, Hvx128, Hvx128old, DefaultMode],
[v8i64, v8i64, v16i64, v16i64, v8i64]>;
def VecPI8
: ValueTypeByHwMode<[Hvx64, Hvx64old, Hvx128, Hvx128old, DefaultMode],
[v128i8, v128i8, v256i8, v256i8, v128i8]>;
def VecPI16
: ValueTypeByHwMode<[Hvx64, Hvx64old, Hvx128, Hvx128old, DefaultMode],
[v64i16, v64i16, v128i16, v128i16, v64i16]>;
def VecPI32
: ValueTypeByHwMode<[Hvx64, Hvx64old, Hvx128, Hvx128old, DefaultMode],
[v32i32, v32i32, v64i32, v64i32, v32i32]>;
def VecPI64
: ValueTypeByHwMode<[Hvx64, Hvx64old, Hvx128, Hvx128old, DefaultMode],
[v16i64, v16i64, v32i64, v32i64, v16i64]>;

// Register classes.
//
Expand Down
16 changes: 2 additions & 14 deletions lib/Target/Hexagon/HexagonSubtarget.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -53,14 +53,6 @@ static cl::opt<bool> EnableIEEERndNear("enable-hexagon-ieee-rnd-near",
static cl::opt<bool> EnableBSBSched("enable-bsb-sched",
cl::Hidden, cl::ZeroOrMore, cl::init(true));

static cl::opt<bool> EnableHexagonHVXDouble("enable-hexagon-hvx-double",
cl::Hidden, cl::ZeroOrMore, cl::init(false),
cl::desc("Enable Hexagon Double Vector eXtensions"));

static cl::opt<bool> EnableHexagonHVX("enable-hexagon-hvx",
cl::Hidden, cl::ZeroOrMore, cl::init(false),
cl::desc("Enable Hexagon Vector eXtensions"));

static cl::opt<bool> EnableTCLatencySched("enable-tc-latency-sched",
cl::Hidden, cl::ZeroOrMore, cl::init(false));

Expand Down Expand Up @@ -126,8 +118,8 @@ HexagonSubtarget::initializeSubtargetDependencies(StringRef CPU, StringRef FS) {
else
llvm_unreachable("Unrecognized Hexagon processor version");

UseHVXOps = false;
UseHVXDblOps = false;
UseHVX128BOps = false;
UseHVX64BOps = false;
UseLongCalls = false;

UseMemOps = DisableMemOps ? false : EnableMemOps;
Expand All @@ -136,10 +128,6 @@ HexagonSubtarget::initializeSubtargetDependencies(StringRef CPU, StringRef FS) {

ParseSubtargetFeatures(CPUString, FS);

if (EnableHexagonHVX.getPosition())
UseHVXOps = EnableHexagonHVX;
if (EnableHexagonHVXDouble.getPosition())
UseHVXDblOps = EnableHexagonHVXDouble;
if (OverrideLongCalls.getPosition())
UseLongCalls = OverrideLongCalls;

Expand Down
Loading

0 comments on commit 7a600d3

Please sign in to comment.