[Beignet] [PATCH 3/5] GBE: complete constant expression processing.
Zhigang Gong
zhigang.gong at intel.com
Wed Jul 23 22:48:59 PDT 2014
The target is to process all possible complex nested constant expression as below:
const = type0 OP0 (const0)
const0 = type1 OP1 (const1, const2)
const1 = ...
The supported OPs are as below:
BITCAST,
ADD,
SUB,
MUL,
DIV,
REM,
SHL,
ASHR,
LSHR,
AND,
OR,
XOR
We also add support for array/vector type of immediate. Some possible examples are as below:
float bitcast (i32 trunc (i128 bitcast (<4 x i32> <i32 1064178811, i32 1064346583, i32 1062836634, i32 undef> to i128) to i32) to float)
float bitcast (i32 trunc (i128 lshr (i128 bitcast (<4 x i32> <i32 1064178811, i32 1064346583, i32 1062836634, i32 undef> to i128), i128 32) to i32) to float)
Signed-off-by: Zhigang Gong <zhigang.gong at intel.com>
---
backend/src/ir/context.hpp | 30 +++-
backend/src/ir/function.cpp | 2 +
backend/src/ir/immediate.hpp | 307 ++++++++++++++++++++++++++++++++--
backend/src/ir/lowering.cpp | 2 +
backend/src/ir/type.cpp | 2 +
backend/src/ir/type.hpp | 6 +-
backend/src/llvm/llvm_gen_backend.cpp | 173 +++++++++++++++----
7 files changed, 471 insertions(+), 51 deletions(-)
diff --git a/backend/src/ir/context.hpp b/backend/src/ir/context.hpp
index 27ff4e9..b434427 100644
--- a/backend/src/ir/context.hpp
+++ b/backend/src/ir/context.hpp
@@ -69,6 +69,18 @@ namespace ir {
const Immediate imm(value);
return fn->newImmediate(imm);
}
+ template <typename T> INLINE ImmediateIndex newImmediate(T value, uint32_t num) {
+ const Immediate imm(value, num);
+ return fn->newImmediate(imm);
+ }
+ /*! Create a new immediate value */
+ INLINE ImmediateIndex newImmediate(vector<ImmediateIndex>indexVector) {
+ vector<const Immediate*> immVector;
+ for( uint32_t i = 0; i < indexVector.size(); i++)
+ immVector.push_back(&fn->getImmediate(indexVector[i]));
+ const Immediate imm(immVector);
+ return fn->newImmediate(imm);
+ }
/*! Create an integer immediate value */
INLINE ImmediateIndex newIntegerImmediate(int64_t x, Type type) {
switch (type) {
@@ -91,6 +103,20 @@ namespace ir {
return this->newImmediate(x);
}
+ INLINE ImmediateIndex processImm(ImmOpCode op, ImmediateIndex src, Type type) {
+ const Immediate &imm = fn->getImmediate(src);
+ const Immediate &dstImm = Immediate(op, imm, type);
+ return fn->newImmediate(dstImm);
+ }
+
+ INLINE ImmediateIndex processImm(ImmOpCode op, ImmediateIndex src0,
+ ImmediateIndex src1, Type type) {
+ const Immediate &imm0 = fn->getImmediate(src0);
+ const Immediate &imm1 = fn->getImmediate(src1);
+ const Immediate &dstImm = Immediate(op, imm0, imm1, type);
+ return fn->newImmediate(dstImm);
+ }
+
/*! Set an immediate value */
template <typename T> INLINE void setImmediate(ImmediateIndex index, T value) {
const Immediate imm(value);
@@ -101,9 +127,9 @@ namespace ir {
GBE_ASSERTM(fn != NULL, "No function currently defined");
const Immediate imm(value);
const ImmediateIndex index = fn->newImmediate(imm);
- const RegisterFamily family = getFamily(imm.type);
+ const RegisterFamily family = getFamily(imm.getType());
const Register reg = this->reg(family);
- this->LOADI(imm.type, reg, index);
+ this->LOADI(imm.getType(), reg, index);
return reg;
}
/*! Create a new label for the current function */
diff --git a/backend/src/ir/function.cpp b/backend/src/ir/function.cpp
index fa69ad2..78dad46 100644
--- a/backend/src/ir/function.cpp
+++ b/backend/src/ir/function.cpp
@@ -159,6 +159,8 @@ namespace ir {
case TYPE_HALF: out << "half(" << imm.getIntegerValue() << ")"; break;
case TYPE_FLOAT: out << imm.getFloatValue(); break;
case TYPE_DOUBLE: out << imm.getDoubleValue(); break;
+ default:
+ GBE_ASSERT(0 && "unsupported imm type.\n");
}
}
diff --git a/backend/src/ir/immediate.hpp b/backend/src/ir/immediate.hpp
index 1902fde..acdc11f 100644
--- a/backend/src/ir/immediate.hpp
+++ b/backend/src/ir/immediate.hpp
@@ -32,6 +32,37 @@
namespace gbe {
namespace ir {
+ typedef enum {
+ IMM_TRUNC = 0,
+ IMM_BITCAST,
+ IMM_ADD,
+ IMM_SUB,
+ IMM_MUL,
+ IMM_DIV,
+ IMM_REM,
+ IMM_SHL,
+ IMM_ASHR,
+ IMM_LSHR,
+ IMM_AND,
+ IMM_OR,
+ IMM_XOR
+ } ImmOpCode;
+
+ typedef enum {
+ IMM_TYPE_BOOL = TYPE_BOOL,
+ IMM_TYPE_S8 = TYPE_S8,
+ IMM_TYPE_U8 = TYPE_U8,
+ IMM_TYPE_S16 = TYPE_S16,
+ IMM_TYPE_U16 = TYPE_U16,
+ IMM_TYPE_S32 = TYPE_S32,
+ IMM_TYPE_U32 = TYPE_U32,
+ IMM_TYPE_S64 = TYPE_S64,
+ IMM_TYPE_U64 = TYPE_U64,
+ IMM_TYPE_FLOAT = TYPE_FLOAT,
+ IMM_TYPE_DOUBLE = TYPE_DOUBLE,
+ IMM_TYPE_COMP
+ } ImmType;
+
/*! The value as stored in the instruction */
class Immediate
{
@@ -39,7 +70,15 @@ namespace ir {
INLINE Immediate(void) {}
Type getType(void) const {
- return type;
+ return (Type)type;
+ }
+
+ bool isCompType(void) const {
+ return type == IMM_TYPE_COMP;
+ }
+
+ uint32_t getElemNum(void) const {
+ return elemNum;
}
uint32_t getTypeSize(void) const {
@@ -57,12 +96,13 @@ namespace ir {
case TYPE_DOUBLE:
case TYPE_S64:
case TYPE_U64: return 8;
+ case IMM_TYPE_COMP: return sizeof(Immediate*);
}
}
#define DECL_CONSTRUCTOR(TYPE, FIELD, IR_TYPE) \
Immediate(TYPE FIELD) { \
- this->type = IR_TYPE; \
+ this->type = (ImmType)IR_TYPE; \
this->elemNum = 1; \
this->data.p = &defaultData; \
defaultData = 0ull; \
@@ -84,7 +124,7 @@ namespace ir {
#define DECL_CONSTRUCTOR(TYPE, FIELD, IR_TYPE, ELEMNUM) \
Immediate(TYPE *FIELD, uint32_t ELEMNUM) { \
- this->type = IR_TYPE; \
+ this->type = (ImmType)IR_TYPE; \
this->elemNum = ELEMNUM; \
if (elemNum * ELEMNUM > 8) \
this->data.p = malloc(ELEMNUM * getTypeSize()); \
@@ -107,6 +147,34 @@ namespace ir {
DECL_CONSTRUCTOR(double, f64, TYPE_DOUBLE, elemNum)
#undef DECL_CONSTRUCTOR
+ Immediate(const vector<const Immediate*> immVec) {
+ if (immVec.size() == 1) {
+ *this = *immVec[0];
+ } else if (!(immVec[0]->isCompType()) && immVec[0]->elemNum == 1) {
+ this->type = immVec[0]->type;
+ this->elemNum = immVec.size();
+ if (immVec[0]->getTypeSize() * immVec.size() < 8)
+ this->data.p = &this->defaultData;
+ else
+ this->data.p = malloc(immVec[0]->getTypeSize() * immVec.size());
+ uint8_t *p = (uint8_t*)this->data.p;
+ for(uint32_t i = 0; i < immVec.size(); i++) {
+ GBE_ASSERT(immVec[i]->type == immVec[0]->type && immVec[i]->elemNum == 1);
+ memcpy(p, immVec[i]->data.p, immVec[i]->getTypeSize());
+ p += immVec[i]->getTypeSize();
+ }
+ } else {
+ this->type = IMM_TYPE_COMP;
+ if (immVec.size() * sizeof(Immediate*) < 8)
+ this->data.p = &this->defaultData;
+ else
+ this->data.p = malloc(immVec.size() * sizeof(Immediate*));
+ this->elemNum = immVec.size();
+ for(uint32_t i = 0; i < immVec.size(); i++)
+ this->data.immVec[i] = immVec[i];
+ }
+ }
+
int64_t getIntegerValue(void) const {
switch (type) {
default:
@@ -124,12 +192,12 @@ namespace ir {
}
float getFloatValue(void) const {
- GBE_ASSERT(type == TYPE_FLOAT);
+ GBE_ASSERT(type == IMM_TYPE_FLOAT);
return *data.f32;
}
float asFloatValue(void) const {
- GBE_ASSERT(type == TYPE_FLOAT || type == TYPE_U32 || type == TYPE_S32);
+ GBE_ASSERT(type == IMM_TYPE_FLOAT || type == IMM_TYPE_U32 || type == IMM_TYPE_S32);
return *data.f32;
}
@@ -139,28 +207,204 @@ namespace ir {
}
double getDoubleValue(void) const {
- GBE_ASSERT(type == TYPE_DOUBLE);
+ GBE_ASSERT(type == IMM_TYPE_DOUBLE);
return *data.f64;
}
-
+
Immediate(const Immediate & other) {
+ *this = other;
+ }
+
+ Immediate(ImmOpCode op, const Immediate &other, Type dstType) {
+ if (op == IMM_TRUNC) {
+ copy(other, 0, 1);
+ } else if (op == IMM_BITCAST) {
+ *this = other;
+ type = (ImmType)dstType;
+ }
+ }
+
+ Immediate & operator= (const Immediate & other) {
if (this != &other) {
- this->type = other.type;
- this->elemNum = other.elemNum;
+ type = other.type;
+ elemNum = other.elemNum;
if (other.data.p != &other.defaultData) {
- this->data.p = malloc(other.elemNum * other.getTypeSize());
- memcpy(this->data.p, other.data.p, other.elemNum * other.getTypeSize());
+ data.p = malloc(other.elemNum * other.getTypeSize());
+ memcpy(data.p, other.data.p, other.elemNum * other.getTypeSize());
}
else {
- this->defaultData = other.defaultData;
- this->data.p = &this->defaultData;
+ defaultData = other.defaultData;
+ data.p = &defaultData;
}
}
+ return *this;
}
+#define SCALAR_SAME_TYPE_ASSERT() \
+ GBE_ASSERT(this->getType() == right.getType() && \
+ this->getElemNum() == right.getElemNum() && \
+ this->getElemNum() == 1 && \
+ this->getType() != TYPE_BOOL);
- Immediate & operator= (const Immediate & other) {
- *this = Immediate(other);
- return *this;
+#define DECLAR_BINARY_ALL_TYPE_OP(OP) \
+ Immediate operator OP (const Immediate &right) const { \
+ SCALAR_SAME_TYPE_ASSERT(); \
+ switch (this->getType()) { \
+ default: \
+ GBE_ASSERT(0); \
+ case TYPE_S8: return Immediate(*this->data.s8 OP *right.data.s8); \
+ case TYPE_U8: return Immediate(*this->data.u8 OP *right.data.u8); \
+ case TYPE_S16: return Immediate(*this->data.s16 OP *right.data.s16); \
+ case TYPE_U16: return Immediate(*this->data.u16 OP *right.data.u16); \
+ case TYPE_S32: return Immediate(*this->data.s32 OP *right.data.s32); \
+ case TYPE_U32: return Immediate(*this->data.u32 OP *right.data.u32); \
+ case TYPE_S64: return Immediate(*this->data.s64 OP *right.data.s64); \
+ case TYPE_U64: return Immediate(*this->data.u64 OP *right.data.u64); \
+ case TYPE_FLOAT: return Immediate(*this->data.f32 OP *right.data.f32); \
+ case TYPE_DOUBLE: return Immediate(*this->data.f64 OP *right.data.f64); \
+ }\
+ return *this;\
+ }
+
+ DECLAR_BINARY_ALL_TYPE_OP(+)
+ DECLAR_BINARY_ALL_TYPE_OP(-)
+ DECLAR_BINARY_ALL_TYPE_OP(*)
+ DECLAR_BINARY_ALL_TYPE_OP(/)
+
+#undef DECLAR_BINARY_ALL_TYPE_OP
+
+#define DECLAR_BINARY_INT_TYPE_OP(OP) \
+ Immediate operator OP (const Immediate &right) const { \
+ SCALAR_SAME_TYPE_ASSERT(); \
+ switch (this->getType()) { \
+ default: \
+ GBE_ASSERT(0); \
+ case TYPE_S8: return Immediate(*this->data.s8 OP *right.data.s8); \
+ case TYPE_U8: return Immediate(*this->data.u8 OP *right.data.u8); \
+ case TYPE_S16: return Immediate(*this->data.s16 OP *right.data.s16); \
+ case TYPE_U16: return Immediate(*this->data.u16 OP *right.data.u16); \
+ case TYPE_S32: return Immediate(*this->data.s32 OP *right.data.s32); \
+ case TYPE_U32: return Immediate(*this->data.u32 OP *right.data.u32); \
+ case TYPE_S64: return Immediate(*this->data.s64 OP *right.data.s64); \
+ case TYPE_U64: return Immediate(*this->data.u64 OP *right.data.u64); \
+ }\
+ return *this;\
+ }
+ DECLAR_BINARY_INT_TYPE_OP(%)
+ DECLAR_BINARY_INT_TYPE_OP(&)
+ DECLAR_BINARY_INT_TYPE_OP(|)
+ DECLAR_BINARY_INT_TYPE_OP(^)
+#undef DECLAR_BINARY_INT_TYPE_OP
+
+
+#define DECLAR_BINARY_ASHIFT_OP(OP) \
+ Immediate operator OP (const Immediate &right) const { \
+ GBE_ASSERT(this->getType() > TYPE_BOOL && this->getType() <= TYPE_U64); \
+ int32_t shift = right.getIntegerValue(); \
+ if (shift == 0) \
+ return *this; \
+ else \
+ switch (this->getType()) { \
+ default: \
+ GBE_ASSERT(0); \
+ case TYPE_S8: return Immediate((*this->data.s8 OP shift)); \
+ case TYPE_U8: return Immediate((*this->data.u8 OP shift)); \
+ case TYPE_S16: return Immediate((*this->data.s16 OP shift)); \
+ case TYPE_U16: return Immediate((*this->data.u16 OP shift)); \
+ case TYPE_S32: return Immediate((*this->data.s32 OP shift)); \
+ case TYPE_U32: return Immediate((*this->data.u32 OP shift)); \
+ case TYPE_S64: return Immediate((*this->data.s64 OP shift)); \
+ case TYPE_U64: return Immediate((*this->data.u64 OP shift)); \
+ } \
+ }
+
+ DECLAR_BINARY_ASHIFT_OP(>>)
+ DECLAR_BINARY_ASHIFT_OP(<<)
+
+#undef DECLAR_BINARY_ASHIFT_OP
+
+ static Immediate lshr (const Immediate &left, const Immediate &right) {
+ GBE_ASSERT(left.getType() > TYPE_BOOL && left.getType() <= TYPE_U64);
+ int32_t shift = right.getIntegerValue();
+ if (shift == 0)
+ return left;
+ else
+ switch (left.getType()) {
+ default:
+ GBE_ASSERT(0);
+ case TYPE_S8:
+ case TYPE_U8: return Immediate((*left.data.u8 >> shift));
+ case TYPE_S16:
+ case TYPE_U16: return Immediate((*left.data.u16 >> shift));
+ case TYPE_S32:
+ case TYPE_U32: return Immediate((*left.data.u32 >> shift));
+ case TYPE_S64:
+ case TYPE_U64: return Immediate((*left.data.u64 >> shift));
+ }
+ }
+
+
+ Immediate(ImmOpCode op, const Immediate &left, const Immediate &right, Type dstType) {
+ switch (op) {
+ default:
+ GBE_ASSERT(0 && "unsupported imm op\n");
+ case IMM_ADD: *this = left + right; break;
+ case IMM_SUB: *this = left - right; break;
+ case IMM_MUL: *this = left * right; break;
+ case IMM_DIV: *this = left / right; break;
+ case IMM_AND: *this = left & right; break;
+ case IMM_OR: *this = left | right; break;
+ case IMM_XOR: *this = left ^ right; break;
+ case IMM_REM:
+ {
+ if (left.getType() > TYPE_BOOL && left.getType() <= TYPE_U64)
+ *this = left % right;
+ else if (left.getType() == TYPE_FLOAT && right.getType() == TYPE_FLOAT) {
+ *this = Immediate(left);
+ *this->data.f32 = fmodf(left.getFloatValue(), right.getFloatValue());
+ }
+ else if (left.getType() == TYPE_DOUBLE && right.getType() == TYPE_DOUBLE) {
+ *this = Immediate(left);
+ *this->data.f64 += fmod(left.getDoubleValue(), right.getDoubleValue());
+ }
+ else
+ GBE_ASSERT(0);
+ break;
+ }
+ case IMM_LSHR:
+ {
+ if (left.getElemNum() == 1)
+ lshr(left, right);
+ else {
+ GBE_ASSERT(right.getIntegerValue() <= (left.getElemNum() * left.getTypeSize() * 8));
+ GBE_ASSERT(right.getIntegerValue() % (left.getTypeSize() * 8) == 0);
+ copy(left, right.getIntegerValue() / (left.getTypeSize() * 8), left.getElemNum());
+ }
+ break;
+ }
+ case IMM_ASHR:
+ {
+ if (left.getElemNum() == 1)
+ *this = left >> right;
+ else {
+ GBE_ASSERT(0 && "Doesn't support ashr on array constant.");
+ copy(left, right.getIntegerValue() / (left.getTypeSize() * 8), left.getElemNum());
+ }
+ break;
+ }
+ case IMM_SHL:
+ {
+ if (left.getElemNum() == 1)
+ *this = left << right;
+ else {
+ GBE_ASSERT(right.getIntegerValue() <= (left.getElemNum() * left.getTypeSize() * 8));
+ GBE_ASSERT(right.getIntegerValue() % (left.getTypeSize() * 8) == 0);
+ copy(left, -right.getIntegerValue() / (left.getTypeSize() * 8), left.getElemNum());
+ }
+ break;
+ }
+ }
+ // If the dst type is large int, we will not change the imm type to large int.
+ GBE_ASSERT(type == (ImmType)dstType || dstType == TYPE_LARGE_INT);
}
~Immediate() {
@@ -183,12 +427,41 @@ namespace ir {
uint64_t *u64;
float *f32;
double *f64;
+ const Immediate *immVec[];
void *p;
} data; //!< Value to store
- Type type; //!< Type of the value
+ ImmType type; //!< Type of the value
uint32_t elemNum; //!< vector imm data type
uint64_t defaultData;
+ void copy(const Immediate &other, int32_t offset, uint32_t num) {
+ if (this != &other) {
+ if (other.type == IMM_TYPE_COMP && num == 1) {
+ GBE_ASSERT(offset >= 0 && offset <= (int32_t)other.elemNum);
+ *this = *other.data.immVec[offset];
+ return;
+ }
+ type = other.type;
+ elemNum = num;
+ if (num * other.getTypeSize() < 8)
+ data.p = &defaultData;
+ else
+ data.p = malloc(num * other.getTypeSize());
+ uint8_t* datap = (uint8_t*)data.p;
+ memset(datap, 0, num * other.getTypeSize());
+ if (offset < 0) {
+ datap += (-offset) * other.getTypeSize();
+ num -= num < (uint32_t)(-offset) ? num : (-offset);
+ offset = 0;
+ } else if (offset > 0 && num > 1) {
+ GBE_ASSERT((int32_t)num > offset);
+ num -= offset;
+ }
+ memcpy(datap, (uint8_t*)other.data.p + offset * other.getTypeSize(),
+ num * other.getTypeSize());
+ }
+ }
+
GBE_CLASS(Immediate);
};
diff --git a/backend/src/ir/lowering.cpp b/backend/src/ir/lowering.cpp
index 739e944..f71fd72 100644
--- a/backend/src/ir/lowering.cpp
+++ b/backend/src/ir/lowering.cpp
@@ -135,6 +135,8 @@ namespace ir {
case TYPE_S8: return imm.getIntegerValue();
case TYPE_BOOL:
case TYPE_HALF: NOT_SUPPORTED; return 0;
+ default:
+ GBE_ASSERT(0 && "Unsupported imm type.\n");
}
return 0;
}
diff --git a/backend/src/ir/type.cpp b/backend/src/ir/type.cpp
index a6a2e44..56f5c12 100644
--- a/backend/src/ir/type.cpp
+++ b/backend/src/ir/type.cpp
@@ -40,6 +40,8 @@ namespace ir {
case TYPE_HALF: return out << "half";
case TYPE_FLOAT: return out << "float";
case TYPE_DOUBLE: return out << "double";
+ default :
+ GBE_ASSERT(0 && "Unsupported type\n");
};
return out;
}
diff --git a/backend/src/ir/type.hpp b/backend/src/ir/type.hpp
index 1e24906..8bfbdc8 100644
--- a/backend/src/ir/type.hpp
+++ b/backend/src/ir/type.hpp
@@ -46,7 +46,8 @@ namespace ir {
TYPE_U64, //!< unsigned 64 bits integer
TYPE_HALF, //!< 16 bits floating point value
TYPE_FLOAT, //!< 32 bits floating point value
- TYPE_DOUBLE //!< 64 bits floating point value
+ TYPE_DOUBLE, //!< 64 bits floating point value
+ TYPE_LARGE_INT //!< integer larger than 64 bits.
};
/*! Output a string for the type in the given stream */
@@ -72,8 +73,9 @@ namespace ir {
case TYPE_U64:
case TYPE_DOUBLE:
return FAMILY_QWORD;
+ default:
+ return FAMILY_DWORD;
};
- return FAMILY_DWORD;
}
/*! Return a type for each register family */
diff --git a/backend/src/llvm/llvm_gen_backend.cpp b/backend/src/llvm/llvm_gen_backend.cpp
index b3d32b8..ab31806 100644
--- a/backend/src/llvm/llvm_gen_backend.cpp
+++ b/backend/src/llvm/llvm_gen_backend.cpp
@@ -210,8 +210,7 @@ namespace gbe
return ir::TYPE_S32;
if (type == Type::getInt64Ty(type->getContext()))
return ir::TYPE_S64;
- ctx.getUnit().setValid(false);
- return ir::TYPE_S64;
+ return ir::TYPE_LARGE_INT;
}
/*! LLVM IR Type to Gen IR unsigned type translation */
@@ -461,6 +460,12 @@ namespace gbe
PASS_EMIT_INSTRUCTIONS = 1
} pass;
+ typedef enum {
+ CONST_INT,
+ CONST_FLOAT,
+ CONST_DOUBLE
+ } ConstTypeId;
+
LoopInfo *LI;
const Module *TheModule;
@@ -489,8 +494,8 @@ namespace gbe
/*! helper function for parsing global constant data */
void getConstantData(const Constant * c, void* mem, uint32_t& offset) const;
void collectGlobalConstant(void) const;
- ir::ImmediateIndex processConstantImmIndex(Constant *CPV, uint32_t index = 0u);
- const ir::Immediate &processConstantImm(Constant *CPV, uint32_t index = 0u);
+ ir::ImmediateIndex processConstantImmIndex(Constant *CPV, int32_t index = 0u);
+ const ir::Immediate &processConstantImm(Constant *CPV, int32_t index = 0u);
bool runOnFunction(Function &F) {
// Do not codegen any 'available_externally' functions at all, they have
@@ -601,6 +606,12 @@ namespace gbe
Value *llvmValue, const ir::Register ptr,
const ir::AddressSpace addrSpace, Type * elemType, bool isLoad);
void visitInstruction(Instruction &I) {NOT_SUPPORTED;}
+ private:
+ ir::ImmediateIndex processConstantImmIndexImpl(Constant *CPV, int32_t index = 0u);
+ template <typename T, typename P = T>
+ ir::ImmediateIndex processSeqConstant(ConstantDataSequential *seq,
+ int index, ConstTypeId tid);
+ ir::ImmediateIndex processConstantVector(ConstantVector *cv, int index);
};
char GenWriter::ID = 0;
@@ -736,13 +747,42 @@ namespace gbe
return false;
}
- ir::ImmediateIndex GenWriter::processConstantImmIndex(Constant *CPV, uint32_t index)
+ #define GET_EFFECT_DATA(_seq, _index, _tid) \
+ ((_tid == CONST_INT) ? _seq->getElementAsInteger(_index) : \
+ ((_tid == CONST_FLOAT) ? _seq->getElementAsFloat(_index) : \
+ _seq->getElementAsDouble(_index)))
+
+ // typename P is for bool only, as c++ set the &vector<bool)vec[0] to void
+ // type. We have to use uint8_t for bool vector.
+ template <typename T, typename P>
+ ir::ImmediateIndex GenWriter::processSeqConstant(ConstantDataSequential *seq,
+ int index, ConstTypeId tid) {
+ if (index >= 0) {
+ const T data = GET_EFFECT_DATA(seq, index, tid);
+ return ctx.newImmediate(data);
+ } else {
+ vector<P> array;
+ for(int i = 0; i < seq->getNumElements(); i++)
+ array.push_back(GET_EFFECT_DATA(seq, i, tid));
+ return ctx.newImmediate((T*)&array[0], array.size());
+ }
+ }
+
+ ir::ImmediateIndex GenWriter::processConstantVector(ConstantVector *cv, int index) {
+ if (index >= 0) {
+ Constant *c = cv->getOperand(index);
+ return processConstantImmIndex(c, -1);
+ } else {
+ vector<ir::ImmediateIndex> immVector;
+ for (uint32_t i = 0; i < cv->getNumOperands(); i++)
+ immVector.push_back(processConstantImmIndex(cv->getOperand(i)));
+ return ctx.newImmediate(immVector);
+ }
+ }
+
+ ir::ImmediateIndex GenWriter::processConstantImmIndexImpl(Constant *CPV, int32_t index)
{
-#if GBE_DEBUG
- GBE_ASSERTM(dyn_cast<ConstantExpr>(CPV) == NULL, "Unsupported constant expression");
- if (isa<UndefValue>(CPV) && CPV->getType()->isSingleValueType())
- GBE_ASSERTM(false, "Unsupported constant expression");
-#endif /* GBE_DEBUG */
+ GBE_ASSERT(dyn_cast<ConstantExpr>(CPV) == NULL);
#if LLVM_VERSION_MINOR > 0
ConstantDataSequential *seq = dyn_cast<ConstantDataSequential>(CPV);
@@ -750,26 +790,19 @@ namespace gbe
if (seq) {
Type *Ty = seq->getElementType();
if (Ty == Type::getInt1Ty(CPV->getContext())) {
- const uint64_t u64 = seq->getElementAsInteger(index);
- return ctx.newImmediate(bool(u64));
+ return processSeqConstant<bool, uint8_t>(seq, index, CONST_INT);
} else if (Ty == Type::getInt8Ty(CPV->getContext())) {
- const uint64_t u64 = seq->getElementAsInteger(index);
- return ctx.newImmediate(uint8_t(u64));
+ return processSeqConstant<uint8_t>(seq, index, CONST_INT);
} else if (Ty == Type::getInt16Ty(CPV->getContext())) {
- const uint64_t u64 = seq->getElementAsInteger(index);
- return ctx.newImmediate(uint16_t(u64));
+ return processSeqConstant<uint16_t>(seq, index, CONST_INT);
} else if (Ty == Type::getInt32Ty(CPV->getContext())) {
- const uint64_t u64 = seq->getElementAsInteger(index);
- return ctx.newImmediate(uint32_t(u64));
+ return processSeqConstant<uint32_t>(seq, index, CONST_INT);
} else if (Ty == Type::getInt64Ty(CPV->getContext())) {
- const uint64_t u64 = seq->getElementAsInteger(index);
- return ctx.newImmediate(u64);
+ return processSeqConstant<uint64_t>(seq, index, CONST_INT);
} else if (Ty == Type::getFloatTy(CPV->getContext())) {
- const float f32 = seq->getElementAsFloat(index);
- return ctx.newImmediate(f32);
+ return processSeqConstant<float>(seq, index, CONST_FLOAT);
} else if (Ty == Type::getDoubleTy(CPV->getContext())) {
- const double f64 = seq->getElementAsDouble(index);
- return ctx.newImmediate(f64);
+ return processSeqConstant<double>(seq, index, CONST_DOUBLE);
}
} else
#endif /* LLVM_VERSION_MINOR > 0 */
@@ -805,7 +838,7 @@ namespace gbe
}
} else {
if (dyn_cast<ConstantVector>(CPV))
- CPV = extractConstantElem(CPV, index);
+ return processConstantVector(dyn_cast<ConstantVector>(CPV), index);
GBE_ASSERTM(dyn_cast<ConstantExpr>(CPV) == NULL, "Unsupported constant expression");
// Integers
@@ -827,8 +860,7 @@ namespace gbe
const uint64_t u64 = CI->getZExtValue();
return ctx.newImmediate(u64);
} else {
- GBE_ASSERTM(false, "Unsupported integer size");
- return ctx.newImmediate(uint64_t(0));
+ return ctx.newImmediate(uint64_t(CI->getZExtValue()));
}
}
@@ -837,8 +869,20 @@ namespace gbe
return ctx.newImmediate(uint32_t(0));
}
- // Floats and doubles
const Type::TypeID typeID = CPV->getType()->getTypeID();
+ if (isa<UndefValue>(CPV)) {
+ Type* Ty = CPV->getType();
+ if (Ty == Type::getInt1Ty(CPV->getContext())) return ctx.newImmediate(false);
+ if (Ty == Type::getInt8Ty(CPV->getContext())) return ctx.newImmediate((uint8_t)0);
+ if (Ty == Type::getInt16Ty(CPV->getContext())) return ctx.newImmediate((uint16_t)0);
+ if (Ty == Type::getInt32Ty(CPV->getContext())) return ctx.newImmediate((uint32_t)0);
+ if (Ty == Type::getInt64Ty(CPV->getContext())) return ctx.newImmediate((uint64_t)0);
+ if (Ty == Type::getFloatTy(CPV->getContext())) return ctx.newImmediate((float)0);
+ if (Ty == Type::getDoubleTy(CPV->getContext())) return ctx.newImmediate((double)0);
+ GBE_ASSERT(0 && "Unsupported undef value type.\n");
+ }
+
+ // Floats and doubles
switch (typeID) {
case Type::FloatTyID:
case Type::DoubleTyID:
@@ -865,7 +909,77 @@ namespace gbe
return ctx.newImmediate(uint64_t(0));
}
- const ir::Immediate &GenWriter::processConstantImm(Constant *CPV, uint32_t index) {
+ ir::ImmediateIndex GenWriter::processConstantImmIndex(Constant *CPV, int32_t index) {
+ if (dyn_cast<ConstantExpr>(CPV) == NULL)
+ return processConstantImmIndexImpl(CPV, index);
+
+ if (dyn_cast<ConstantExpr>(CPV)) {
+ ConstantExpr *ce = dyn_cast<ConstantExpr>(CPV);
+ ir::Type type = getType(ctx, ce->getType());
+ switch (ce->getOpcode()) {
+ default:
+ //ce->dump();
+ GBE_ASSERT(0 && "unsupported ce opcode.\n");
+ case Instruction::Trunc:
+ {
+ const ir::ImmediateIndex immIndex = processConstantImmIndex(ce->getOperand(0), -1);
+ return ctx.processImm(ir::IMM_TRUNC, immIndex, type);
+ }
+ case Instruction::BitCast:
+ {
+ const ir::ImmediateIndex immIndex = processConstantImmIndex(ce->getOperand(0), -1);
+ if (type == ir::TYPE_LARGE_INT)
+ return immIndex;
+ return ctx.processImm(ir::IMM_BITCAST, immIndex, type);
+ }
+ case Instruction::Add:
+ case Instruction::Sub:
+ case Instruction::Mul:
+ case Instruction::SDiv:
+ case Instruction::SRem:
+ case Instruction::Shl:
+ case Instruction::AShr:
+ case Instruction::LShr:
+ case Instruction::And:
+ case Instruction::Or:
+ case Instruction::Xor: {
+ const ir::ImmediateIndex lhs = processConstantImmIndex(ce->getOperand(0), -1);
+ const ir::ImmediateIndex rhs = processConstantImmIndex(ce->getOperand(1), -1);
+ switch (ce->getOpcode()) {
+ default:
+ //ce->dump();
+ GBE_ASSERTM(0, "Unsupported constant expression.\n");
+ case Instruction::Add:
+ return ctx.processImm(ir::IMM_ADD, lhs, rhs, type);
+ case Instruction::Sub:
+ return ctx.processImm(ir::IMM_SUB, lhs, rhs, type);
+ case Instruction::Mul:
+ return ctx.processImm(ir::IMM_MUL, lhs, rhs, type);
+ case Instruction::SDiv:
+ return ctx.processImm(ir::IMM_DIV, lhs, rhs, type);
+ case Instruction::SRem:
+ return ctx.processImm(ir::IMM_REM, lhs, rhs, type);
+ case Instruction::Shl:
+ return ctx.processImm(ir::IMM_SHL, lhs, rhs, type);
+ case Instruction::AShr:
+ return ctx.processImm(ir::IMM_ASHR, lhs, rhs, type);
+ case Instruction::LShr:
+ return ctx.processImm(ir::IMM_LSHR, lhs, rhs, type);
+ case Instruction::And:
+ return ctx.processImm(ir::IMM_AND, lhs, rhs, type);
+ case Instruction::Or:
+ return ctx.processImm(ir::IMM_OR, lhs, rhs, type);
+ case Instruction::Xor:
+ return ctx.processImm(ir::IMM_XOR, lhs, rhs, type);
+ }
+ }
+ }
+ }
+ GBE_ASSERT(0 && "unsupported constant.\n");
+ return ctx.newImmediate((uint32_t)0);
+ }
+
+ const ir::Immediate &GenWriter::processConstantImm(Constant *CPV, int32_t index) {
ir::ImmediateIndex immIndex = processConstantImmIndex(CPV, index);
return ctx.getFunction().getImmediate(immIndex);
}
@@ -898,7 +1012,6 @@ namespace gbe
ir::Register GenWriter::getConstantRegister(Constant *c, uint32_t elemID) {
GBE_ASSERT(c != NULL);
-
if(isa<GlobalValue>(c)) {
return regTranslator.getScalar(c, elemID);
}
--
1.8.3.2
More information about the Beignet
mailing list