[Beignet] [PATCH 3/3] Backend: Add hole reuse in reg alloction
Xiuli Pan
xiuli.pan at intel.com
Fri Sep 30 07:32:29 UTC 2016
From: Pan Xiuli <xiuli.pan at intel.com>
We first find regs that have pool in simple linear scale, and save them
in HoleRegPool, when allocte regs we first try to search fit candidate
in the pool and choose the most fit one to reuse.
DEBUG version: You can export DUMPHOLE for easy review.
Signed-off-by: Pan Xiuli <xiuli.pan at intel.com>
---
backend/src/backend/gen_insn_selection.cpp | 31 +++++
backend/src/backend/gen_insn_selection.hpp | 3 +
backend/src/backend/gen_reg_allocation.cpp | 175 +++++++++++++++++++++++++++--
backend/src/backend/gen_reg_allocation.hpp | 6 +
4 files changed, 205 insertions(+), 10 deletions(-)
diff --git a/backend/src/backend/gen_insn_selection.cpp b/backend/src/backend/gen_insn_selection.cpp
index 97ee17a..ba78858 100644
--- a/backend/src/backend/gen_insn_selection.cpp
+++ b/backend/src/backend/gen_insn_selection.cpp
@@ -225,6 +225,37 @@ namespace gbe
return this->opcode == SEL_OP_LABEL;
}
+ bool SelectionInstruction::isNative(void) const {
+ return this->opcode == SEL_OP_NOT || /* ALU1 */
+ this->opcode == SEL_OP_LZD ||
+ this->opcode == SEL_OP_RNDZ ||
+ this->opcode == SEL_OP_RNDE ||
+ this->opcode == SEL_OP_RNDD ||
+ this->opcode == SEL_OP_RNDU ||
+ this->opcode == SEL_OP_FRC ||
+ this->opcode == SEL_OP_F16TO32 ||
+ this->opcode == SEL_OP_F32TO16 ||
+ this->opcode == SEL_OP_CBIT ||
+ this->opcode == SEL_OP_SEL || /* ALU2 */
+ this->opcode == SEL_OP_AND ||
+ this->opcode == SEL_OP_OR ||
+ this->opcode == SEL_OP_XOR ||
+ this->opcode == SEL_OP_SHR ||
+ this->opcode == SEL_OP_SHL ||
+ this->opcode == SEL_OP_RSR ||
+ this->opcode == SEL_OP_RSL ||
+ this->opcode == SEL_OP_ASR ||
+ this->opcode == SEL_OP_SEL ||
+ this->opcode == SEL_OP_ADD ||
+ this->opcode == SEL_OP_MUL ||
+ this->opcode == SEL_OP_FBH ||
+ this->opcode == SEL_OP_FBL ||
+ this->opcode == SEL_OP_MACH ||
+ this->opcode == SEL_OP_MATH ||
+ this->opcode == SEL_OP_LRP || /* ALU3 */
+ this->opcode == SEL_OP_MAD;
+ }
+
///////////////////////////////////////////////////////////////////////////
// SelectionVector
///////////////////////////////////////////////////////////////////////////
diff --git a/backend/src/backend/gen_insn_selection.hpp b/backend/src/backend/gen_insn_selection.hpp
index 814b449..2719858 100644
--- a/backend/src/backend/gen_insn_selection.hpp
+++ b/backend/src/backend/gen_insn_selection.hpp
@@ -82,6 +82,8 @@ namespace gbe
bool isBranch(void) const;
/*! Is it a label instruction (i.e. change the implicit mask) */
bool isLabel(void) const;
+ /*! Is it a simple navtive label instruction (i.e. will be one simple ISA) */
+ bool isNative(void) const;
/*! Get the destination register */
GenRegister &dst(uint32_t dstID) { return regs[dstID]; }
/*! Get the source register */
@@ -259,6 +261,7 @@ namespace gbe
bool hasBarrier;
bool hasBranch;
bool removeSimpleIfEndif;
+ uint32_t ID;
};
enum SEL_IR_OPT_FEATURE {
diff --git a/backend/src/backend/gen_reg_allocation.cpp b/backend/src/backend/gen_reg_allocation.cpp
index 4451efb..6e6eeff 100644
--- a/backend/src/backend/gen_reg_allocation.cpp
+++ b/backend/src/backend/gen_reg_allocation.cpp
@@ -50,12 +50,14 @@ namespace gbe
struct GenRegInterval {
INLINE GenRegInterval(ir::Register reg) :
reg(reg), minID(INT_MAX), maxID(-INT_MAX), accessCount(0),
- conflictReg(0), b3OpAlign(0) {}
+ conflictReg(0), b3OpAlign(0) , isreused(0), ishole(0),
+ allocblockID(-1), lastblockID(-1), allocID(-1), useID(-1){}
ir::Register reg; //!< (virtual) register of the interval
int32_t minID, maxID; //!< Starting and ending points
int32_t accessCount;
ir::Register conflictReg; // < has banck conflict with this register
- bool b3OpAlign;
+ bool b3OpAlign, isreused, ishole;
+ int32_t allocblockID, lastblockID, allocID, useID;
};
struct SpillInterval {
@@ -123,7 +125,7 @@ namespace gbe
/*! Allocate the vectors detected in the instruction selection pass */
void allocateVector(Selection &selection);
/*! Allocate the given interval. Return true if success */
- bool createGenReg(const Selection &selection, const GenRegInterval &interval);
+ bool createGenReg(const Selection &selection, GenRegInterval &interval);
/*! Indicate if the registers are already allocated in vectors */
bool isAllocated(const SelectionVector *vector) const;
/*! Reallocate registers if needed to make the registers in the vector
@@ -164,6 +166,8 @@ namespace gbe
uint32_t reservedReg;
/*! Current vector to expire */
uint32_t expiringID;
+ /*! PHI regs that can be reused */
+ map<uint32_t, vector<HoleRegTag>> HoleRegPool;
INLINE void insertNewReg(const Selection &selection, ir::Register reg, uint32_t grfOffset, bool isVector = false);
INLINE bool expireReg(ir::Register reg);
INLINE bool spillAtInterval(GenRegInterval interval, int size, uint32_t alignment);
@@ -278,9 +282,70 @@ namespace gbe
}
}
- bool GenRegAllocator::Opaque::createGenReg(const Selection &selection, const GenRegInterval &interval) {
+ INLINE float IDFitess(int a, int b) {
+ if (a == b) return 1.0f;
+ else if (a > b) return 1.0f/(a - b -1);
+ else return 2.0f;
+ }
+ INLINE float getPhiRegFitness(const GenRegInterval &interval, HoleRegTag &holeRegTag) {
+ int regstID = interval.allocID;
+ int regendID = interval.useID;
+ int holeregstID = holeRegTag.startID;
+ int holeregendID = holeRegTag.endID;
+ return IDFitess(regstID, holeregstID) + IDFitess(holeregendID, regendID);
+ }
+
+
+ bool GenRegAllocator::Opaque::createGenReg(const Selection &selection, GenRegInterval &interval) {
using namespace ir;
const ir::Register reg = interval.reg;
+ // Reg reuse need very strict condition check, otherwise may have errors
+ if (!interval.ishole && interval.allocblockID <= interval.lastblockID &&
+ interval.allocID < interval.useID && interval.allocID == interval.minID &&
+ interval.useID == interval.maxID)
+ {
+ uint32_t blockID = interval.allocblockID;
+ uint32_t lastblockID = interval.lastblockID;
+ uint32_t useID = interval.useID;
+ auto holepool = this->HoleRegPool.find(blockID);
+ // Use block ID as index to get candidate hole reg to reuse
+ if (holepool != this->HoleRegPool.end()) {
+ auto &holepoolvec = holepool->second;
+ HoleRegTag* holeregbest = NULL;
+ float lastfitness = 0;
+ auto holepoolitr = holepoolvec.end();
+ for (auto itr = holepoolvec.begin() ; itr != holepoolvec.end(); ++itr) {
+ float fitness = getPhiRegFitness(interval, *itr);
+ // reg out of range of holepool reg
+ if (fitness > 2) continue;
+ if (fitness > lastfitness) {
+ lastfitness = fitness;
+ holeregbest = &*itr;
+ holepoolitr = itr;
+ }
+ // reg has one prefect fit use this
+ if (fitness > 1 ) break;
+ }
+ // Reuse the hole and update the holeregpool
+ if (holeregbest) {
+ holeregbest->startID = useID + 1;
+ auto holereg = holeregbest->reg;
+ if (getenv("DUMPHOLE"))
+ printf("%d (%d:%d->%d:%d)has %zu candidate best fitness is %d with %f\n", (int)reg, blockID, interval.allocID, lastblockID , useID, holepoolvec.size(), (int)holereg, lastfitness);
+ if (RA.contains(holereg)) {
+ uint32_t grfOffset = RA.find(holereg)->second;
+ RA.insert(std::make_pair(reg, grfOffset));
+ offsetReg.insert(std::make_pair(grfOffset, reg));
+ interval.isreused = true;
+ if (blockID != lastblockID) {
+ this->HoleRegPool[lastblockID].push_back(*holeregbest);
+ holepoolvec.erase(holepoolitr);
+ }
+ }
+ }
+ }
+
+ }
if (RA.contains(reg) == true)
return true; // already allocated
uint32_t regSize;
@@ -712,7 +777,7 @@ namespace gbe
ctx.errCode = REGISTER_ALLOCATION_FAIL;
const uint32_t regNum = ctx.sel->getRegNum();
for (uint32_t startID = 0; startID < regNum; ++startID) {
- const GenRegInterval &interval = *this->starting[startID];
+ GenRegInterval &interval = *this->starting[startID];
const ir::Register reg = interval.reg;
if (interval.maxID == -INT_MAX)
@@ -830,6 +895,8 @@ namespace gbe
INLINE bool GenRegAllocator::Opaque::expireReg(ir::Register reg)
{
+ if (this->intervals[reg].isreused)
+ return true;
auto it = RA.find(reg);
if (flagBooleans.contains(reg))
return false;
@@ -1209,9 +1276,12 @@ namespace gbe
// Compute the intervals
int32_t insnID = 0;
+ int32_t blockID = 0;
for (auto &block : *selection.blockList) {
+ blockID++;
int32_t lastID = insnID;
- int32_t firstID = insnID;
+ int32_t firstID = insnID ? insnID + 2 : insnID;
+ block.ID = blockID;
// Update the intervals of each used register. Note that we do not
// register allocate R0, so we skip all sub-registers in r0
RegIntervalMap *boolsMap = new RegIntervalMap;
@@ -1219,7 +1289,7 @@ namespace gbe
flag0ReservedBlocks.insert(&block);
for (auto &insn : block.insnList) {
const uint32_t srcNum = insn.srcNum, dstNum = insn.dstNum;
- insn.ID = insnID;
+ insnID = (int)insn.ID;
bool is3SrcOp = insn.opcode == SEL_OP_MAD;
for (uint32_t srcID = 0; srcID < srcNum; ++srcID) {
const GenRegister &selReg = insn.src(srcID);
@@ -1244,8 +1314,14 @@ namespace gbe
if (this->intervals[reg].conflictReg == 0 ||
this->intervals[reg].conflictReg > conflictReg)
this->intervals[reg].conflictReg = conflictReg;
- this->intervals[reg].minID = std::min(this->intervals[reg].minID, insnID);
- this->intervals[reg].maxID = std::max(this->intervals[reg].maxID, insnID);
+ int insnsrcID = insnID;
+ // If instruction is simple, src and dst can resed and they will have different IDs
+ if (insn.isNative())
+ insnsrcID -= 1;
+ this->intervals[reg].minID = std::min(this->intervals[reg].minID, insnsrcID);
+ this->intervals[reg].maxID = std::max(this->intervals[reg].maxID, insnsrcID);
+ this->intervals[reg].useID = insnsrcID;
+ this->intervals[reg].lastblockID = blockID;
}
for (uint32_t dstID = 0; dstID < dstNum; ++dstID) {
const GenRegister &selReg = insn.dst(dstID);
@@ -1261,6 +1337,36 @@ namespace gbe
}
this->intervals[reg].minID = std::min(this->intervals[reg].minID, insnID);
this->intervals[reg].maxID = std::max(this->intervals[reg].maxID, insnID);
+ int lastblockID = this->intervals[reg].lastblockID;
+ if ( lastblockID >= 0 && lastblockID <= blockID) {
+ int useID = this->intervals[reg].useID;
+ int allocID = this->intervals[reg].allocID;
+ // Only find hole reg that not in ocl registers
+ // Check if the reg will leave a hole and add it to HoleRegPool
+ if (useID >= 0 && useID < insnID - 1 && reg >= ir::ocl::regNum) {
+ int lastID = allocID > useID ? allocID : useID;
+ if (blockID - lastblockID > 1) continue;
+ if (getenv("DUMPHOLE"))
+ printf("reg %d is hole in %d: %d => %d: %d)\n",(int)reg, lastblockID, lastID, blockID,insnID);
+ HoleRegTag holeregtag;
+ holeregtag.reg = reg;
+ holeregtag.startID = lastID + 1;
+ holeregtag.endID = insnID - 1;
+ this->HoleRegPool[lastblockID].push_back(holeregtag);
+ this->intervals[reg].ishole = true;
+ }
+ this->intervals[reg].allocID = insnID;
+ if (this->intervals[reg].allocblockID != -1)
+ this->intervals[reg].ishole = true;
+ } else {
+ if (this->intervals[reg].allocblockID == -1)
+ this->intervals[reg].allocblockID = blockID;
+ else if (this->intervals[reg].allocblockID != -1)
+ this->intervals[reg].ishole = true;
+ else
+ this->intervals[reg].lastblockID= blockID;
+ this->intervals[reg].allocID = insnID;
+ }
}
// OK, a flag is used as a predicate or a conditional modifier
@@ -1303,7 +1409,6 @@ namespace gbe
(block.removeSimpleIfEndif && insn.state.flag == 0 && insn.state.subFlag == 0) ));
}
lastID = insnID;
- insnID++;
}
// All registers alive at the begining of the block must update their intervals.
@@ -1422,6 +1527,7 @@ do { \
INLINE void GenRegAllocator::Opaque::outputAllocation(void) {
using namespace std;
cout << "## register allocation ##" << endl;
+ int allmaxID = 0;
for(auto &i : RA) {
ir::Register vReg = (ir::Register)i.first;
ir::RegisterFamily family;
@@ -1437,6 +1543,9 @@ do { \
<< "[ " << setw(8) << this->intervals[(uint)vReg].minID
<< " -> " << setw(8) << this->intervals[(uint)vReg].maxID
<< "]" << setw(8) << "use count: " << this->intervals[(uint)vReg].accessCount << endl;
+ int thisMaxID = this->intervals[(uint)vReg].maxID;
+ if( thisMaxID > 0)
+ allmaxID = thisMaxID > allmaxID ? thisMaxID : allmaxID;
}
if (!spilledRegs.empty())
cout << "## spilled registers: " << spilledRegs.size() << endl;
@@ -1454,6 +1563,52 @@ do { \
<< "]" << setw(8) << "use count: " << this->intervals[(uint)vReg].accessCount << endl;
}
cout << endl;
+#ifndef NDEBUG
+ int *lregsize = new int[allmaxID + 4];
+ for(int i = 0; i < allmaxID + 4; i++)
+ lregsize[i] = 0;
+ for(auto &i : RA) {
+ ir::Register vReg = (ir::Register)i.first;
+ if (this->intervals[vReg].isreused)
+ continue;
+ ir::RegisterFamily family;
+ uint32_t regSize;
+ getRegAttrib(vReg, regSize, &family);
+ int regminID = this->intervals[(uint)vReg].minID;
+ int regmaxID = this->intervals[(uint)vReg].maxID;
+ regminID = (regminID > allmaxID ? 0 : regminID);
+ regmaxID = (regmaxID < 0 ? allmaxID - 1: regmaxID);
+ for(int j = regminID; j <= regmaxID; ++j) {
+ lregsize[j] += regSize;
+ }
+ }
+ for(auto it = spilledRegs.begin(); it != spilledRegs.end(); it++) {
+ ir::Register vReg = it->first;
+ ir::RegisterFamily family;
+ uint32_t regSize;
+ getRegAttrib(vReg, regSize, &family);
+ int regminID = this->intervals[(uint)vReg].minID;
+ int regmaxID = this->intervals[(uint)vReg].maxID;
+ regminID = (regminID > allmaxID ? 0 : regminID);
+ regmaxID = (regmaxID < 0 ? allmaxID - 1: regmaxID);
+ for(int j = regminID; j <= regmaxID; ++j) {
+ lregsize[j] += regSize;
+ }
+ }
+ cout<<"We have " << allmaxID << " lines" << endl;
+
+ int themaxID = 0, themaxSize = 0;
+ for (int i = 0; i < allmaxID; i+=2) {
+ if (themaxSize < lregsize[i]) {
+ themaxID = i;
+ themaxSize = lregsize[i];
+ }
+ //cout << "[" << i << "]" << "with " << lregsize[i] << "B (" << lregsize[i]/32 <<") regs!" << endl;
+ }
+
+ cout << "[" << themaxID << "]" << "with " << themaxSize << "B (" << themaxSize/32 <<") regs!" << endl;
+ delete [] lregsize;
+#endif
}
INLINE GenRegister setGenReg(const GenRegister &src, uint32_t grfOffset) {
diff --git a/backend/src/backend/gen_reg_allocation.hpp b/backend/src/backend/gen_reg_allocation.hpp
index 8d5e797..972ec1f 100644
--- a/backend/src/backend/gen_reg_allocation.hpp
+++ b/backend/src/backend/gen_reg_allocation.hpp
@@ -40,6 +40,12 @@ namespace gbe
int32_t addr;
} SpillRegTag;
+ typedef struct HoleRegTag {
+ uint32_t startID;
+ uint32_t endID;
+ ir::Register reg;
+ }HoleRegTag;
+
typedef map<ir::Register, SpillRegTag> SpilledRegs;
/*! Register allocate (i.e. virtual to physical register mapping) */
--
2.7.4
More information about the Beignet
mailing list