Skip to content

Commit

Permalink
Generic Bypass Slow Div
Browse files Browse the repository at this point in the history
- CodeGenPrepare pass for identifying div/rem ops
- Backend specifies the type mapping using addBypassSlowDivType
- Enabled only for Intel Atom with O2 32-bit -> 8-bit
- Replace IDIV with instructions which test its value and use DIVB if the value
is positive and less than 256.
- In the case when the quotient and remainder of a divide are used a DIV
and a REM instruction will be present in the IR. In the non-Atom case
they are both lowered to IDIVs and CSE removes the redundant IDIV instruction,
using the quotient and remainder from the first IDIV. However,
due to this optimization CSE is not able to eliminate redundant
IDIV instructions because they are located in different basic blocks.
This is overcome by calculating both the quotient (DIV) and remainder (REM)
in each basic block that is inserted by the optimization and reusing the result
values when a subsequent DIV or REM instruction uses the same operands.
- Test cases check for the presents of the optimization when calculating
either the quotient, remainder,  or both.

Patch by Tyler Nowicki!



git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@163150 91177308-0d34-0410-b5e6-96231b3b80d8
  • Loading branch information
pgurd committed Sep 4, 2012
1 parent e20cf3d commit 2e2efd9
Show file tree
Hide file tree
Showing 11 changed files with 473 additions and 3 deletions.
22 changes: 22 additions & 0 deletions include/llvm/Target/TargetLowering.h
Original file line number Diff line number Diff line change
Expand Up @@ -25,6 +25,7 @@
#include "llvm/CallingConv.h"
#include "llvm/InlineAsm.h"
#include "llvm/Attributes.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/Support/CallSite.h"
#include "llvm/CodeGen/SelectionDAGNodes.h"
#include "llvm/CodeGen/RuntimeLibcalls.h"
Expand Down Expand Up @@ -154,6 +155,16 @@ class TargetLowering {
/// a sequence of several shifts, adds, and multiplies for this target.
bool isIntDivCheap() const { return IntDivIsCheap; }

/// isSlowDivBypassed - Returns true if target has indicated at least one
/// type should be bypassed.
bool isSlowDivBypassed() const { return !BypassSlowDivTypes.empty(); }

/// getBypassSlowDivTypes - Returns map of slow types for division or
/// remainder with corresponding fast types
const DenseMap<Type *, Type *> &getBypassSlowDivTypes() const {
return BypassSlowDivTypes;
}

/// isPow2DivCheap() - Return true if pow2 div is cheaper than a chain of
/// srl/add/sra.
bool isPow2DivCheap() const { return Pow2DivIsCheap; }
Expand Down Expand Up @@ -1055,6 +1066,11 @@ class TargetLowering {
/// of instructions not containing an integer divide.
void setIntDivIsCheap(bool isCheap = true) { IntDivIsCheap = isCheap; }

/// addBypassSlowDivType - Tells the code generator which types to bypass.
void addBypassSlowDivType(Type *slow_type, Type *fast_type) {
BypassSlowDivTypes[slow_type] = fast_type;
}

/// setPow2DivIsCheap - Tells the code generator that it shouldn't generate
/// srl/add/sra for a signed divide by power of two, and let the target handle
/// it.
Expand Down Expand Up @@ -1772,6 +1788,12 @@ class TargetLowering {
/// set to true unconditionally.
bool IntDivIsCheap;

/// BypassSlowDivTypes - Tells the code generator to bypass slow divide or
/// remainder instructions. For example, SlowDivBypass[i32,u8] tells the code
/// generator to bypass 32-bit signed integer div/rem with an 8-bit unsigned
/// integer div/rem when the operands are positive and less than 256.
DenseMap <Type *, Type *> BypassSlowDivTypes;

/// Pow2DivIsCheap - Tells the code generator that it shouldn't generate
/// srl/add/sra for a signed divide by power of two, and let the target handle
/// it.
Expand Down
58 changes: 58 additions & 0 deletions include/llvm/Transforms/Utils/BypassSlowDivision.h
Original file line number Diff line number Diff line change
@@ -0,0 +1,58 @@
//===- llvm/Transforms/Utils/BypassSlowDivision.h --------------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file contains an optimization for div and rem on architectures that
// execute short instructions significantly faster than longer instructions.
// For example, on Intel Atom 32-bit divides are slow enough that during
// runtime it is profitable to check the value of the operands, and if they are
// positive and less than 256 use an unsigned 8-bit divide.
//
//===----------------------------------------------------------------------===//

#ifndef TRANSFORMS_UTILS_BYPASSSLOWDIVISION_H
#define TRANSFORMS_UTILS_BYPASSSLOWDIVISION_H

#include "llvm/Function.h"

/// This optimization identifies DIV instructions that can be
/// profitably bypassed and carried out with a shorter, faster divide.
bool bypassSlowDivision(llvm::Function &F,
llvm::Function::iterator &I,
const llvm::DenseMap<llvm::Type *, llvm::Type *> &BypassTypeMap);

#endif
//===- llvm/Transforms/Utils/BypassSlowDivision.h --------------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file contains an optimization for div and rem on architectures that
// execute short instructions significantly faster than longer instructions.
// For example, on Intel Atom 32-bit divides are slow enough that during
// runtime it is profitable to check the value of the operands, and if they are
// positive and less than 256 use an unsigned 8-bit divide.
//
//===----------------------------------------------------------------------===//

#ifndef TRANSFORMS_UTILS_BYPASSSLOWDIVISION_H
#define TRANSFORMS_UTILS_BYPASSSLOWDIVISION_H

#include "llvm/Function.h"

/// This optimization identifies DIV instructions that can be
/// profitably bypassed and carried out with a shorter, faster divide.
bool bypassSlowDivision(llvm::Function &F,
llvm::Function::iterator &I,
const llvm::DenseMap<llvm::Type *, llvm::Type *> &BypassTypeMap);

#endif
1 change: 0 additions & 1 deletion lib/CodeGen/SelectionDAG/TargetLowering.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -898,7 +898,6 @@ const char *TargetLowering::getTargetNodeName(unsigned Opcode) const {
return NULL;
}


EVT TargetLowering::getSetCCResultType(EVT VT) const {
assert(!VT.isVector() && "No default SetCC type for vectors!");
return PointerTy.SimpleTy;
Expand Down
6 changes: 5 additions & 1 deletion lib/Target/X86/X86.td
Original file line number Diff line number Diff line change
Expand Up @@ -120,6 +120,9 @@ def FeatureBMI2 : SubtargetFeature<"bmi2", "HasBMI2", "true",
"Support BMI2 instructions">;
def FeatureLeaForSP : SubtargetFeature<"lea-sp", "UseLeaForSP", "true",
"Use LEA for adjusting the stack pointer">;
def FeatureSlowDivide : SubtargetFeature<"idiv-to-divb",
"HasSlowDivide", "true",
"Use small divide for positive values less than 256">;

//===----------------------------------------------------------------------===//
// X86 processors supported.
Expand Down Expand Up @@ -160,7 +163,8 @@ def : Proc<"core2", [FeatureSSSE3, FeatureCMPXCHG16B,
def : Proc<"penryn", [FeatureSSE41, FeatureCMPXCHG16B,
FeatureSlowBTMem]>;
def : AtomProc<"atom", [ProcIntelAtom, FeatureSSE3, FeatureCMPXCHG16B,
FeatureMOVBE, FeatureSlowBTMem, FeatureLeaForSP]>;
FeatureMOVBE, FeatureSlowBTMem, FeatureLeaForSP,
FeatureSlowDivide]>;
// "Arrandale" along with corei3 and corei5
def : Proc<"corei7", [FeatureSSE42, FeatureCMPXCHG16B,
FeatureSlowBTMem, FeatureFastUAMem,
Expand Down
4 changes: 4 additions & 0 deletions lib/Target/X86/X86ISelLowering.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -182,6 +182,10 @@ X86TargetLowering::X86TargetLowering(X86TargetMachine &TM)
setSchedulingPreference(Sched::RegPressure);
setStackPointerRegisterToSaveRestore(X86StackPtr);

// Bypass i32 with i8 on Atom when compiling with O2
if (Subtarget->hasSlowDivide() && TM.getOptLevel() >= CodeGenOpt::Default)
addBypassSlowDivType(Type::getInt32Ty(getGlobalContext()), Type::getInt8Ty(getGlobalContext()));

if (Subtarget->isTargetWindows() && !Subtarget->isTargetCygMing()) {
// Setup Windows compiler runtime calls.
setLibcallName(RTLIB::SDIV_I64, "_alldiv");
Expand Down
1 change: 1 addition & 0 deletions lib/Target/X86/X86Subtarget.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -346,6 +346,7 @@ X86Subtarget::X86Subtarget(const std::string &TT, const std::string &CPU,
, HasVectorUAMem(false)
, HasCmpxchg16b(false)
, UseLeaForSP(false)
, HasSlowDivide(false)
, PostRAScheduler(false)
, stackAlignment(4)
// FIXME: this is a known good value for Yonah. How about others?
Expand Down
5 changes: 5 additions & 0 deletions lib/Target/X86/X86Subtarget.h
Original file line number Diff line number Diff line change
Expand Up @@ -136,6 +136,10 @@ class X86Subtarget : public X86GenSubtargetInfo {
/// the stack pointer. This is an optimization for Intel Atom processors.
bool UseLeaForSP;

/// HasSlowDivide - True if smaller divides are significantly faster than
/// full divides and should be used when possible.
bool HasSlowDivide;

/// PostRAScheduler - True if using post-register-allocation scheduler.
bool PostRAScheduler;

Expand Down Expand Up @@ -221,6 +225,7 @@ class X86Subtarget : public X86GenSubtargetInfo {
bool hasVectorUAMem() const { return HasVectorUAMem; }
bool hasCmpxchg16b() const { return HasCmpxchg16b; }
bool useLeaForSP() const { return UseLeaForSP; }
bool hasSlowDivide() const { return HasSlowDivide; }

bool isAtom() const { return X86ProcFamily == IntelAtom; }

Expand Down
15 changes: 14 additions & 1 deletion lib/Transforms/Scalar/CodeGenPrepare.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -43,6 +43,7 @@
#include "llvm/Transforms/Utils/AddrModeMatcher.h"
#include "llvm/Transforms/Utils/BasicBlockUtils.h"
#include "llvm/Transforms/Utils/BuildLibCalls.h"
#include "llvm/Transforms/Utils/BypassSlowDivision.h"
#include "llvm/Transforms/Utils/Local.h"
using namespace llvm;
using namespace llvm::PatternMatch;
Expand Down Expand Up @@ -148,7 +149,19 @@ bool CodeGenPrepare::runOnFunction(Function &F) {
PFI = getAnalysisIfAvailable<ProfileInfo>();
OptSize = F.hasFnAttr(Attribute::OptimizeForSize);

// First pass, eliminate blocks that contain only PHI nodes and an
/// This optimization identifies DIV instructions that can be
/// profitably bypassed and carried out with a shorter, faster divide.
if (TLI && TLI->isSlowDivBypassed()) {
const DenseMap<Type *, Type *> &BypassTypeMap = TLI->getBypassSlowDivTypes();

for (Function::iterator I = F.begin(); I != F.end(); I++) {
EverMadeChange |= bypassSlowDivision(F,
I,
BypassTypeMap);
}
}

// Eliminate blocks that contain only PHI nodes and an
// unconditional branch.
EverMadeChange |= EliminateMostlyEmptyBlocks(F);

Expand Down
Loading

0 comments on commit 2e2efd9

Please sign in to comment.