Skip to content

Commit

Permalink
Move GenericScheduler and PostGenericScheduler into a header.
Browse files Browse the repository at this point in the history
These were not exposed previously because I didn't want out-of-tree
targets to be too dependent on their internals. They can be reused for
a very wide variety of processors with casual scheduling needs without
exposing the classes by instead using hooks defined in
MachineSchedPolicy (we can add more if needed). When targets are more
aggressively tuned or want to provide custom heuristics, they can
define their own MachineSchedStrategy. I tend to think this is better
once you start customizing heuristics because you can copy over only
what you need. I don't think that layering heuristics generally works
well.

However, Arch64 targets now want to reuse the Generic scheduling logic
but also provide extensions. I don't see much harm in exposing the
Generic scheduling classes with a major caveat: these scheduling
strategies may change in the future without validating performance on
less mainstream processors. If you want to be immune from changes,
just define your own MachineSchedStrategy.

git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@210166 91177308-0d34-0410-b5e6-96231b3b80d8
  • Loading branch information
atrick committed Jun 4, 2014
1 parent e5d8481 commit 0c83424
Show file tree
Hide file tree
Showing 2 changed files with 226 additions and 229 deletions.
211 changes: 211 additions & 0 deletions include/llvm/CodeGen/MachineScheduler.h
Original file line number Diff line number Diff line change
Expand Up @@ -739,6 +739,217 @@ class SchedBoundary {
#endif
};

/// Base class for GenericScheduler. This class maintains information about
/// scheduling candidates based on TargetSchedModel making it easy to implement
/// heuristics for either preRA or postRA scheduling.
class GenericSchedulerBase : public MachineSchedStrategy {
public:
/// Represent the type of SchedCandidate found within a single queue.
/// pickNodeBidirectional depends on these listed by decreasing priority.
enum CandReason {
NoCand, PhysRegCopy, RegExcess, RegCritical, Stall, Cluster, Weak, RegMax,
ResourceReduce, ResourceDemand, BotHeightReduce, BotPathReduce,
TopDepthReduce, TopPathReduce, NextDefUse, NodeOrder};

#ifndef NDEBUG
static const char *getReasonStr(GenericSchedulerBase::CandReason Reason);
#endif

/// Policy for scheduling the next instruction in the candidate's zone.
struct CandPolicy {
bool ReduceLatency;
unsigned ReduceResIdx;
unsigned DemandResIdx;

CandPolicy(): ReduceLatency(false), ReduceResIdx(0), DemandResIdx(0) {}
};

/// Status of an instruction's critical resource consumption.
struct SchedResourceDelta {
// Count critical resources in the scheduled region required by SU.
unsigned CritResources;

// Count critical resources from another region consumed by SU.
unsigned DemandedResources;

SchedResourceDelta(): CritResources(0), DemandedResources(0) {}

bool operator==(const SchedResourceDelta &RHS) const {
return CritResources == RHS.CritResources
&& DemandedResources == RHS.DemandedResources;
}
bool operator!=(const SchedResourceDelta &RHS) const {
return !operator==(RHS);
}
};

/// Store the state used by GenericScheduler heuristics, required for the
/// lifetime of one invocation of pickNode().
struct SchedCandidate {
CandPolicy Policy;

// The best SUnit candidate.
SUnit *SU;

// The reason for this candidate.
CandReason Reason;

// Set of reasons that apply to multiple candidates.
uint32_t RepeatReasonSet;

// Register pressure values for the best candidate.
RegPressureDelta RPDelta;

// Critical resource consumption of the best candidate.
SchedResourceDelta ResDelta;

SchedCandidate(const CandPolicy &policy)
: Policy(policy), SU(nullptr), Reason(NoCand), RepeatReasonSet(0) {}

bool isValid() const { return SU; }

// Copy the status of another candidate without changing policy.
void setBest(SchedCandidate &Best) {
assert(Best.Reason != NoCand && "uninitialized Sched candidate");
SU = Best.SU;
Reason = Best.Reason;
RPDelta = Best.RPDelta;
ResDelta = Best.ResDelta;
}

bool isRepeat(CandReason R) { return RepeatReasonSet & (1 << R); }
void setRepeat(CandReason R) { RepeatReasonSet |= (1 << R); }

void initResourceDelta(const ScheduleDAGMI *DAG,
const TargetSchedModel *SchedModel);
};

protected:
const MachineSchedContext *Context;
const TargetSchedModel *SchedModel;
const TargetRegisterInfo *TRI;

SchedRemainder Rem;
protected:
GenericSchedulerBase(const MachineSchedContext *C):
Context(C), SchedModel(nullptr), TRI(nullptr) {}

void setPolicy(CandPolicy &Policy, bool IsPostRA, SchedBoundary &CurrZone,
SchedBoundary *OtherZone);

#ifndef NDEBUG
void traceCandidate(const SchedCandidate &Cand);
#endif
};

/// GenericScheduler shrinks the unscheduled zone using heuristics to balance
/// the schedule.
class GenericScheduler : public GenericSchedulerBase {
ScheduleDAGMILive *DAG;

// State of the top and bottom scheduled instruction boundaries.
SchedBoundary Top;
SchedBoundary Bot;

MachineSchedPolicy RegionPolicy;
public:
GenericScheduler(const MachineSchedContext *C):
GenericSchedulerBase(C), DAG(nullptr), Top(SchedBoundary::TopQID, "TopQ"),
Bot(SchedBoundary::BotQID, "BotQ") {}

void initPolicy(MachineBasicBlock::iterator Begin,
MachineBasicBlock::iterator End,
unsigned NumRegionInstrs) override;

bool shouldTrackPressure() const override {
return RegionPolicy.ShouldTrackPressure;
}

void initialize(ScheduleDAGMI *dag) override;

SUnit *pickNode(bool &IsTopNode) override;

void schedNode(SUnit *SU, bool IsTopNode) override;

void releaseTopNode(SUnit *SU) override {
Top.releaseTopNode(SU);
}

void releaseBottomNode(SUnit *SU) override {
Bot.releaseBottomNode(SU);
}

void registerRoots() override;

protected:
void checkAcyclicLatency();

void tryCandidate(SchedCandidate &Cand,
SchedCandidate &TryCand,
SchedBoundary &Zone,
const RegPressureTracker &RPTracker,
RegPressureTracker &TempTracker);

SUnit *pickNodeBidirectional(bool &IsTopNode);

void pickNodeFromQueue(SchedBoundary &Zone,
const RegPressureTracker &RPTracker,
SchedCandidate &Candidate);

void reschedulePhysRegCopies(SUnit *SU, bool isTop);
};

/// PostGenericScheduler - Interface to the scheduling algorithm used by
/// ScheduleDAGMI.
///
/// Callbacks from ScheduleDAGMI:
/// initPolicy -> initialize(DAG) -> registerRoots -> pickNode ...
class PostGenericScheduler : public GenericSchedulerBase {
ScheduleDAGMI *DAG;
SchedBoundary Top;
SmallVector<SUnit*, 8> BotRoots;
public:
PostGenericScheduler(const MachineSchedContext *C):
GenericSchedulerBase(C), Top(SchedBoundary::TopQID, "TopQ") {}

virtual ~PostGenericScheduler() {}

void initPolicy(MachineBasicBlock::iterator Begin,
MachineBasicBlock::iterator End,
unsigned NumRegionInstrs) override {
/* no configurable policy */
};

/// PostRA scheduling does not track pressure.
bool shouldTrackPressure() const override { return false; }

void initialize(ScheduleDAGMI *Dag) override;

void registerRoots() override;

SUnit *pickNode(bool &IsTopNode) override;

void scheduleTree(unsigned SubtreeID) override {
llvm_unreachable("PostRA scheduler does not support subtree analysis.");
}

void schedNode(SUnit *SU, bool IsTopNode) override;

void releaseTopNode(SUnit *SU) override {
Top.releaseTopNode(SU);
}

// Only called for roots.
void releaseBottomNode(SUnit *SU) override {
BotRoots.push_back(SU);
}

protected:
void tryCandidate(SchedCandidate &Cand, SchedCandidate &TryCand);

void pickNodeFromQueue(SchedCandidate &Cand);
};

} // namespace llvm

#endif
Loading

0 comments on commit 0c83424

Please sign in to comment.