[Mesa-dev] R600 Patches: Add support for the local address space

Tom Stellard tom at stellard.net
Wed Jun 12 17:42:41 PDT 2013


Hi,

The attached patches add support for local address space on
Evergreen / Northern Islands GPUs.

Please Review.

-Tom
-------------- next part --------------
>From 13dc637eb8b224a2f8a0fabeab08aba37a5f06be Mon Sep 17 00:00:00 2001
From: Tom Stellard <thomas.stellard at amd.com>
Date: Thu, 30 May 2013 10:10:50 -0700
Subject: [PATCH 1/5] TableGen: Generate a function for getting operand
 indices based on their defined names v2

This patch modifies TableGen to generate a function in
${TARGET}GenInstrInfo.inc called getNamedOperandIdx(), which can be used
to look up indices for operands based on their names.

For example, if you have an instruction like:

def ADD : TargetInstr <(outs GPR:$dst), (ins GPR:$src0, GPR:$src1)>;

You can look up the operand indices using the new function, like this:

Target::getNamedOperandIdx(Target::ADD, Target::OpName::DST)  => 0
Target::getNamedOperandIdx(Target::ADD, Target::OpName::SRC0) => 1
Target::getNamedOperandIdx(Target::ADD, Target::OpName::SRC1) => 2

The operand names are case insensitive, so $dst is equivalent to $DST.

This change is useful for R600 which has instructions with a large number
of operands, many of which model single bit instruction configuration
values.  These configuration bits are common across most instructions,
but may have a different operand index depending on the instruction type.
It is useful to have a convenient way to look up the operand indices,
so these bits can be generically set on any instruction.

v2:
  - Don't uppercase enum values
  - Use table compresion to reduce function size
---
 utils/TableGen/InstrInfoEmitter.cpp | 87 +++++++++++++++++++++++++++++++++++++
 1 file changed, 87 insertions(+)

diff --git a/utils/TableGen/InstrInfoEmitter.cpp b/utils/TableGen/InstrInfoEmitter.cpp
index d6020a8..edb66c2 100644
--- a/utils/TableGen/InstrInfoEmitter.cpp
+++ b/utils/TableGen/InstrInfoEmitter.cpp
@@ -45,6 +45,8 @@ private:
   void emitEnums(raw_ostream &OS);
 
   typedef std::map<std::vector<std::string>, unsigned> OperandInfoMapTy;
+  typedef std::map<std::map<unsigned, unsigned>,
+                   std::vector<std::string> > OpNameMapTy;
   void emitRecord(const CodeGenInstruction &Inst, unsigned Num,
                   Record *InstrInfo,
                   std::map<std::vector<Record*>, unsigned> &EL,
@@ -293,6 +295,91 @@ void InstrInfoEmitter::run(raw_ostream &OS) {
   OS << "} // End llvm namespace \n";
 
   OS << "#endif // GET_INSTRINFO_CTOR\n\n";
+
+  // Operand name -> index mapping
+
+  std::string Namespace = Target.getInstNamespace();
+  std::string OpNameNS = "OpName";
+  std::map<std::string, unsigned> Operands;
+  OpNameMapTy OperandMap;
+  for (unsigned i = 0, e = NumberedInstructions.size(), NumOperands = 0;
+                                                                  i != e; ++i) {
+    const CodeGenInstruction *Inst = NumberedInstructions[i];
+    std::map<unsigned, unsigned> OpList;
+    for (unsigned j = 0, je = Inst->Operands.size(); j != je; ++j) {
+      CGIOperandList::OperandInfo Info = Inst->Operands[j];
+      std::string Name =  Info.Name;
+      if (Operands.count(Name) == 0) {
+        Operands[Name] = NumOperands++;
+      }
+      unsigned OperandId = Operands[Name];
+      OpList[OperandId] = Info.MIOperandNo;
+    }
+    OperandMap[OpList].push_back(Namespace + "::" + Inst->TheDef->getName());
+  }
+
+  OS << "#ifdef GET_INSTRINFO_OPERAND_ENUM\n";
+  OS << "#undef GET_INSTRINFO_OPERAND_ENUM\n";
+  OS << "namespace llvm {";
+  OS << "namespace " << Namespace << " {\n";
+  OS << "namespace " << OpNameNS << " { \n";
+  OS << "enum {\n";
+  for (std::map<std::string, unsigned>::iterator i = Operands.begin(),
+                                             e = Operands.end();
+                                             i != e; ++i) {
+    OS << "  " << i->first << " = " << i->second << ",\n";
+  }
+  OS << "OPERAND_LAST";
+  OS << "\n};\n";
+  OS << "} // End namespace OpName\n";
+  OS << "} // End namespace " << Namespace << "\n";
+  OS << "} // End namespace llvm\n";
+  OS << "#endif //GET_INSTRINFO_OPERAND_ENUM\n";
+
+  OS << "#ifdef GET_INSTRINFO_NAMED_OPS\n";
+  OS << "#undef GET_INSTRINFO_NAMED_OPS\n";
+  OS << "namespace llvm {";
+  OS << "namespace " << Namespace << " {\n";
+  OS << "int16_t getNamedOperandIdx(uint16_t Opcode, uint16_t NamedIdx) {\n";
+  OS << "  static const int16_t OperandMap []["<< Operands.size() << "] = {\n";
+  for (OpNameMapTy::iterator i = OperandMap.begin(), e = OperandMap.end();
+                                                     i != e; ++i) {
+    std::map<unsigned, unsigned> OpList = i->first;
+    OS << "{";
+    for (unsigned Idx = 0; Idx < Operands.size(); ++Idx) {
+      if (OpList.count(Idx) == 0) {
+        OS << "-1";
+      } else {
+        OS << OpList[Idx];
+      }
+      OS << ", ";
+    }
+    OS << "},\n";
+  }
+  OS << "};\n";
+
+  OS << "  switch(Opcode) {\n";
+  unsigned TableIndex = 0;
+  for (OpNameMapTy::iterator i = OperandMap.begin(), e = OperandMap.end();
+                                                     i != e; ++i, ++TableIndex) {
+    std::map<unsigned, unsigned> OpList = i->first;
+    std::vector<std::string> OpcodeList = i->second;
+
+    for (std::vector<std::string>::iterator ii = OpcodeList.begin(),
+                                            ie = OpcodeList.end();
+                                            ii != ie; ++ii) {
+      std::string OpName = *ii;
+      OS << "  case " << OpName << ":\n";
+    }
+
+    OS << "    return OperandMap[" << TableIndex << "][NamedIdx];\n";
+  }
+  OS << "    default: return -1;\n";
+  OS << "  }\n";
+  OS << "}\n";
+  OS << "} // End namespace " << Namespace << "\n";
+  OS << "} // End namespace llvm\n";
+  OS << "#endif //GET_INSTRINFO_NAMED_OPS\n";
 }
 
 void InstrInfoEmitter::emitRecord(const CodeGenInstruction &Inst, unsigned Num,
-- 
1.7.11.4

-------------- next part --------------
>From fc26395c1e11f7504854aa7e32257b739aa66ddd Mon Sep 17 00:00:00 2001
From: Tom Stellard <thomas.stellard at amd.com>
Date: Thu, 30 May 2013 13:03:20 -0700
Subject: [PATCH 2/5] R600: Use new getNamedOperandIdx function generated by
 TableGen v2

v2:
  - Use the correct case for operand names
---
 lib/Target/R600/AMDGPUInstrInfo.cpp         |   1 +
 lib/Target/R600/AMDGPUInstrInfo.h           |   5 +
 lib/Target/R600/AMDILISelDAGToDAG.cpp       |  91 ++++++-------
 lib/Target/R600/R600Defines.h               |  41 +-----
 lib/Target/R600/R600ExpandSpecialInstrs.cpp |  16 +--
 lib/Target/R600/R600ISelLowering.cpp        |   2 +-
 lib/Target/R600/R600InstrInfo.cpp           | 190 +++++++++++-----------------
 lib/Target/R600/R600InstrInfo.h             |   8 +-
 lib/Target/R600/R600Packetizer.cpp          |  18 +--
 9 files changed, 148 insertions(+), 224 deletions(-)

diff --git a/lib/Target/R600/AMDGPUInstrInfo.cpp b/lib/Target/R600/AMDGPUInstrInfo.cpp
index 31b3002..61437e9 100644
--- a/lib/Target/R600/AMDGPUInstrInfo.cpp
+++ b/lib/Target/R600/AMDGPUInstrInfo.cpp
@@ -21,6 +21,7 @@
 #include "llvm/CodeGen/MachineRegisterInfo.h"
 
 #define GET_INSTRINFO_CTOR
+#define GET_INSTRINFO_NAMED_OPS
 #define GET_INSTRMAP_INFO
 #include "AMDGPUGenInstrInfo.inc"
 
diff --git a/lib/Target/R600/AMDGPUInstrInfo.h b/lib/Target/R600/AMDGPUInstrInfo.h
index 3909e4e..306f467 100644
--- a/lib/Target/R600/AMDGPUInstrInfo.h
+++ b/lib/Target/R600/AMDGPUInstrInfo.h
@@ -23,6 +23,7 @@
 
 #define GET_INSTRINFO_HEADER
 #define GET_INSTRINFO_ENUM
+#define GET_INSTRINFO_OPERAND_ENUM
 #include "AMDGPUGenInstrInfo.inc"
 
 #define OPCODE_IS_ZERO_INT AMDGPU::PRED_SETE_INT
@@ -198,6 +199,10 @@ public:
 
 };
 
+namespace AMDGPU {
+  int16_t getNamedOperandIdx(uint16_t Opcode, uint16_t NamedIndex);
+}  // End namespace AMDGPU
+
 } // End llvm namespace
 
 #define AMDGPU_FLAG_REGISTER_LOAD  (UINT64_C(1) << 63)
diff --git a/lib/Target/R600/AMDILISelDAGToDAG.cpp b/lib/Target/R600/AMDILISelDAGToDAG.cpp
index 93432a2..a01879e 100644
--- a/lib/Target/R600/AMDILISelDAGToDAG.cpp
+++ b/lib/Target/R600/AMDILISelDAGToDAG.cpp
@@ -281,7 +281,8 @@ SDNode *AMDGPUDAGToDAGISel::Select(SDNode *N) {
           continue;
         }
 
-        int ImmIdx = TII->getOperandIdx(Use->getMachineOpcode(), R600Operands::IMM);
+        int ImmIdx = TII->getOperandIdx(Use->getMachineOpcode(),
+                                        AMDGPU::OpName::literal);
         assert(ImmIdx != -1);
 
         // subtract one from ImmIdx, because the DST operand is usually index
@@ -358,7 +359,7 @@ SDNode *AMDGPUDAGToDAGISel::Select(SDNode *N) {
         if (PotentialClamp->isMachineOpcode() &&
             PotentialClamp->getMachineOpcode() == AMDGPU::CLAMP_R600) {
           unsigned ClampIdx =
-            TII->getOperandIdx(Result->getMachineOpcode(), R600Operands::CLAMP);
+            TII->getOperandIdx(Result->getMachineOpcode(), AMDGPU::OpName::clamp);
           std::vector<SDValue> Ops;
           unsigned NumOp = Result->getNumOperands();
           for (unsigned i = 0; i < NumOp; ++i) {
@@ -416,23 +417,23 @@ bool AMDGPUDAGToDAGISel::FoldOperand(SDValue &Src, SDValue &Sel, SDValue &Neg,
 bool AMDGPUDAGToDAGISel::FoldOperands(unsigned Opcode,
     const R600InstrInfo *TII, std::vector<SDValue> &Ops) {
   int OperandIdx[] = {
-    TII->getOperandIdx(Opcode, R600Operands::SRC0),
-    TII->getOperandIdx(Opcode, R600Operands::SRC1),
-    TII->getOperandIdx(Opcode, R600Operands::SRC2)
+    TII->getOperandIdx(Opcode, AMDGPU::OpName::src0),
+    TII->getOperandIdx(Opcode, AMDGPU::OpName::src1),
+    TII->getOperandIdx(Opcode, AMDGPU::OpName::src2)
   };
   int SelIdx[] = {
-    TII->getOperandIdx(Opcode, R600Operands::SRC0_SEL),
-    TII->getOperandIdx(Opcode, R600Operands::SRC1_SEL),
-    TII->getOperandIdx(Opcode, R600Operands::SRC2_SEL)
+    TII->getOperandIdx(Opcode, AMDGPU::OpName::src0_sel),
+    TII->getOperandIdx(Opcode, AMDGPU::OpName::src1_sel),
+    TII->getOperandIdx(Opcode, AMDGPU::OpName::src2_sel)
   };
   int NegIdx[] = {
-    TII->getOperandIdx(Opcode, R600Operands::SRC0_NEG),
-    TII->getOperandIdx(Opcode, R600Operands::SRC1_NEG),
-    TII->getOperandIdx(Opcode, R600Operands::SRC2_NEG)
+    TII->getOperandIdx(Opcode, AMDGPU::OpName::src0_neg),
+    TII->getOperandIdx(Opcode, AMDGPU::OpName::src1_neg),
+    TII->getOperandIdx(Opcode, AMDGPU::OpName::src2_neg)
   };
   int AbsIdx[] = {
-    TII->getOperandIdx(Opcode, R600Operands::SRC0_ABS),
-    TII->getOperandIdx(Opcode, R600Operands::SRC1_ABS),
+    TII->getOperandIdx(Opcode, AMDGPU::OpName::src0_abs),
+    TII->getOperandIdx(Opcode, AMDGPU::OpName::src1_abs),
     -1
   };
 
@@ -467,44 +468,44 @@ bool AMDGPUDAGToDAGISel::FoldOperands(unsigned Opcode,
 bool AMDGPUDAGToDAGISel::FoldDotOperands(unsigned Opcode,
     const R600InstrInfo *TII, std::vector<SDValue> &Ops) {
   int OperandIdx[] = {
-    TII->getOperandIdx(Opcode, R600Operands::SRC0_X),
-    TII->getOperandIdx(Opcode, R600Operands::SRC0_Y),
-    TII->getOperandIdx(Opcode, R600Operands::SRC0_Z),
-    TII->getOperandIdx(Opcode, R600Operands::SRC0_W),
-    TII->getOperandIdx(Opcode, R600Operands::SRC1_X),
-    TII->getOperandIdx(Opcode, R600Operands::SRC1_Y),
-    TII->getOperandIdx(Opcode, R600Operands::SRC1_Z),
-    TII->getOperandIdx(Opcode, R600Operands::SRC1_W)
+    TII->getOperandIdx(Opcode, AMDGPU::OpName::src0_X),
+    TII->getOperandIdx(Opcode, AMDGPU::OpName::src0_Y),
+    TII->getOperandIdx(Opcode, AMDGPU::OpName::src0_Z),
+    TII->getOperandIdx(Opcode, AMDGPU::OpName::src0_W),
+    TII->getOperandIdx(Opcode, AMDGPU::OpName::src1_X),
+    TII->getOperandIdx(Opcode, AMDGPU::OpName::src1_Y),
+    TII->getOperandIdx(Opcode, AMDGPU::OpName::src1_Z),
+    TII->getOperandIdx(Opcode, AMDGPU::OpName::src1_W)
   };
   int SelIdx[] = {
-    TII->getOperandIdx(Opcode, R600Operands::SRC0_SEL_X),
-    TII->getOperandIdx(Opcode, R600Operands::SRC0_SEL_Y),
-    TII->getOperandIdx(Opcode, R600Operands::SRC0_SEL_Z),
-    TII->getOperandIdx(Opcode, R600Operands::SRC0_SEL_W),
-    TII->getOperandIdx(Opcode, R600Operands::SRC1_SEL_X),
-    TII->getOperandIdx(Opcode, R600Operands::SRC1_SEL_Y),
-    TII->getOperandIdx(Opcode, R600Operands::SRC1_SEL_Z),
-    TII->getOperandIdx(Opcode, R600Operands::SRC1_SEL_W)
+    TII->getOperandIdx(Opcode, AMDGPU::OpName::src0_sel_X),
+    TII->getOperandIdx(Opcode, AMDGPU::OpName::src0_sel_Y),
+    TII->getOperandIdx(Opcode, AMDGPU::OpName::src0_sel_Z),
+    TII->getOperandIdx(Opcode, AMDGPU::OpName::src0_sel_W),
+    TII->getOperandIdx(Opcode, AMDGPU::OpName::src1_sel_X),
+    TII->getOperandIdx(Opcode, AMDGPU::OpName::src1_sel_Y),
+    TII->getOperandIdx(Opcode, AMDGPU::OpName::src1_sel_Z),
+    TII->getOperandIdx(Opcode, AMDGPU::OpName::src1_sel_W)
   };
   int NegIdx[] = {
-    TII->getOperandIdx(Opcode, R600Operands::SRC0_NEG_X),
-    TII->getOperandIdx(Opcode, R600Operands::SRC0_NEG_Y),
-    TII->getOperandIdx(Opcode, R600Operands::SRC0_NEG_Z),
-    TII->getOperandIdx(Opcode, R600Operands::SRC0_NEG_W),
-    TII->getOperandIdx(Opcode, R600Operands::SRC1_NEG_X),
-    TII->getOperandIdx(Opcode, R600Operands::SRC1_NEG_Y),
-    TII->getOperandIdx(Opcode, R600Operands::SRC1_NEG_Z),
-    TII->getOperandIdx(Opcode, R600Operands::SRC1_NEG_W)
+    TII->getOperandIdx(Opcode, AMDGPU::OpName::src0_neg_X),
+    TII->getOperandIdx(Opcode, AMDGPU::OpName::src0_neg_Y),
+    TII->getOperandIdx(Opcode, AMDGPU::OpName::src0_neg_Z),
+    TII->getOperandIdx(Opcode, AMDGPU::OpName::src0_neg_W),
+    TII->getOperandIdx(Opcode, AMDGPU::OpName::src1_neg_X),
+    TII->getOperandIdx(Opcode, AMDGPU::OpName::src1_neg_Y),
+    TII->getOperandIdx(Opcode, AMDGPU::OpName::src1_neg_Z),
+    TII->getOperandIdx(Opcode, AMDGPU::OpName::src1_neg_W)
   };
   int AbsIdx[] = {
-    TII->getOperandIdx(Opcode, R600Operands::SRC0_ABS_X),
-    TII->getOperandIdx(Opcode, R600Operands::SRC0_ABS_Y),
-    TII->getOperandIdx(Opcode, R600Operands::SRC0_ABS_Z),
-    TII->getOperandIdx(Opcode, R600Operands::SRC0_ABS_W),
-    TII->getOperandIdx(Opcode, R600Operands::SRC1_ABS_X),
-    TII->getOperandIdx(Opcode, R600Operands::SRC1_ABS_Y),
-    TII->getOperandIdx(Opcode, R600Operands::SRC1_ABS_Z),
-    TII->getOperandIdx(Opcode, R600Operands::SRC1_ABS_W)
+    TII->getOperandIdx(Opcode, AMDGPU::OpName::src0_abs_X),
+    TII->getOperandIdx(Opcode, AMDGPU::OpName::src0_abs_Y),
+    TII->getOperandIdx(Opcode, AMDGPU::OpName::src0_abs_Z),
+    TII->getOperandIdx(Opcode, AMDGPU::OpName::src0_abs_W),
+    TII->getOperandIdx(Opcode, AMDGPU::OpName::src1_abs_X),
+    TII->getOperandIdx(Opcode, AMDGPU::OpName::src1_abs_Y),
+    TII->getOperandIdx(Opcode, AMDGPU::OpName::src1_abs_Z),
+    TII->getOperandIdx(Opcode, AMDGPU::OpName::src1_abs_W)
   };
 
   // Gather constants values
diff --git a/lib/Target/R600/R600Defines.h b/lib/Target/R600/R600Defines.h
index aebe581..e30ea27 100644
--- a/lib/Target/R600/R600Defines.h
+++ b/lib/Target/R600/R600Defines.h
@@ -57,46 +57,7 @@ namespace R600_InstFlag {
 #define IS_VTX(desc) ((desc).TSFlags & R600_InstFlag::VTX_INST)
 #define IS_TEX(desc) ((desc).TSFlags & R600_InstFlag::TEX_INST)
 
-namespace R600Operands {
-  enum Ops {
-    DST,
-    UPDATE_EXEC_MASK,
-    UPDATE_PREDICATE,
-    WRITE,
-    OMOD,
-    DST_REL,
-    CLAMP,
-    SRC0,
-    SRC0_NEG,
-    SRC0_REL,
-    SRC0_ABS,
-    SRC0_SEL,
-    SRC1,
-    SRC1_NEG,
-    SRC1_REL,
-    SRC1_ABS,
-    SRC1_SEL,
-    SRC2,
-    SRC2_NEG,
-    SRC2_REL,
-    SRC2_SEL,
-    LAST,
-    PRED_SEL,
-    IMM,
-    BANK_SWIZZLE,
-    COUNT
- };
-
-  const static int ALUOpTable[3][R600Operands::COUNT] = {
-//            W        C     S  S  S  S     S  S  S  S     S  S  S
-//            R  O  D  L  S  R  R  R  R  S  R  R  R  R  S  R  R  R  L  P
-//   D  U     I  M  R  A  R  C  C  C  C  R  C  C  C  C  R  C  C  C  A  R  I
-//   S  E  U  T  O  E  M  C  0  0  0  0  C  1  1  1  1  C  2  2  2  S  E  M  B
-//   T  M  P  E  D  L  P  0  N  R  A  S  1  N  R  A  S  2  N  R  S  T  D  M  S
-    {0,-1,-1, 1, 2, 3, 4, 5, 6, 7, 8, 9,-1,-1,-1,-1,-1,-1,-1,-1,-1,10,11,12,13},
-    {0, 1, 2, 3, 4 ,5 ,6 ,7, 8, 9,10,11,12,13,14,15,16,-1,-1,-1,-1,17,18,19,20},
-    {0,-1,-1,-1,-1, 1, 2, 3, 4, 5,-1, 6, 7, 8, 9,-1,10,11,12,13,14,15,16,17,18}
-  };
+namespace OpName {
 
   enum VecOps {
     UPDATE_EXEC_MASK_X,
diff --git a/lib/Target/R600/R600ExpandSpecialInstrs.cpp b/lib/Target/R600/R600ExpandSpecialInstrs.cpp
index 40c058f..efc9523 100644
--- a/lib/Target/R600/R600ExpandSpecialInstrs.cpp
+++ b/lib/Target/R600/R600ExpandSpecialInstrs.cpp
@@ -82,9 +82,9 @@ bool R600ExpandSpecialInstrsPass::runOnMachineFunction(MachineFunction &MF) {
                                             AMDGPU::ZERO);             // src1
         TII->addFlag(PredSet, 0, MO_FLAG_MASK);
         if (Flags & MO_FLAG_PUSH) {
-          TII->setImmOperand(PredSet, R600Operands::UPDATE_EXEC_MASK, 1);
+          TII->setImmOperand(PredSet, AMDGPU::OpName::update_exec_mask, 1);
         } else {
-          TII->setImmOperand(PredSet, R600Operands::UPDATE_PREDICATE, 1);
+          TII->setImmOperand(PredSet, AMDGPU::OpName::update_pred, 1);
         }
         MI.eraseFromParent();
         continue;
@@ -96,7 +96,7 @@ bool R600ExpandSpecialInstrsPass::runOnMachineFunction(MachineFunction &MF) {
                                           AMDGPU::ZERO,
                                           AMDGPU::ZERO);
         TII->addFlag(PredSet, 0, MO_FLAG_MASK);
-        TII->setImmOperand(PredSet, R600Operands::UPDATE_EXEC_MASK, 1);
+        TII->setImmOperand(PredSet, AMDGPU::OpName::update_exec_mask, 1);
 
         BuildMI(MBB, I, MBB.findDebugLoc(I),
                 TII->get(AMDGPU::PREDICATED_BREAK))
@@ -208,10 +208,10 @@ bool R600ExpandSpecialInstrsPass::runOnMachineFunction(MachineFunction &MF) {
           // While not strictly necessary from hw point of view, we force
           // all src operands of a dot4 inst to belong to the same slot.
           unsigned Src0 = BMI->getOperand(
-              TII->getOperandIdx(Opcode, R600Operands::SRC0))
+              TII->getOperandIdx(Opcode, AMDGPU::OpName::src0))
               .getReg();
           unsigned Src1 = BMI->getOperand(
-              TII->getOperandIdx(Opcode, R600Operands::SRC1))
+              TII->getOperandIdx(Opcode, AMDGPU::OpName::src1))
               .getReg();
           (void) Src0;
           (void) Src1;
@@ -258,14 +258,14 @@ bool R600ExpandSpecialInstrsPass::runOnMachineFunction(MachineFunction &MF) {
       // T0_W = CUBE T1_Y, T1_Z
       for (unsigned Chan = 0; Chan < 4; Chan++) {
         unsigned DstReg = MI.getOperand(
-                            TII->getOperandIdx(MI, R600Operands::DST)).getReg();
+                            TII->getOperandIdx(MI, AMDGPU::OpName::dst)).getReg();
         unsigned Src0 = MI.getOperand(
-                           TII->getOperandIdx(MI, R600Operands::SRC0)).getReg();
+                           TII->getOperandIdx(MI, AMDGPU::OpName::src0)).getReg();
         unsigned Src1 = 0;
 
         // Determine the correct source registers
         if (!IsCube) {
-          int Src1Idx = TII->getOperandIdx(MI, R600Operands::SRC1);
+          int Src1Idx = TII->getOperandIdx(MI, AMDGPU::OpName::src1);
           if (Src1Idx != -1) {
             Src1 = MI.getOperand(Src1Idx).getReg();
           }
diff --git a/lib/Target/R600/R600ISelLowering.cpp b/lib/Target/R600/R600ISelLowering.cpp
index 06c2100..ee13320 100644
--- a/lib/Target/R600/R600ISelLowering.cpp
+++ b/lib/Target/R600/R600ISelLowering.cpp
@@ -168,7 +168,7 @@ MachineBasicBlock * R600TargetLowering::EmitInstrWithCustomInserter(
   case AMDGPU::CONST_COPY: {
     MachineInstr *NewMI = TII->buildDefaultInstruction(*BB, MI, AMDGPU::MOV,
         MI->getOperand(0).getReg(), AMDGPU::ALU_CONST);
-    TII->setImmOperand(NewMI, R600Operands::SRC0_SEL,
+    TII->setImmOperand(NewMI, AMDGPU::OpName::src0_sel,
         MI->getOperand(1).getImm());
     break;
   }
diff --git a/lib/Target/R600/R600InstrInfo.cpp b/lib/Target/R600/R600InstrInfo.cpp
index 4f5cfcd..b9da74c 100644
--- a/lib/Target/R600/R600InstrInfo.cpp
+++ b/lib/Target/R600/R600InstrInfo.cpp
@@ -69,7 +69,7 @@ R600InstrInfo::copyPhysReg(MachineBasicBlock &MBB,
 
     MachineInstr *NewMI = buildDefaultInstruction(MBB, MI, AMDGPU::MOV,
                                                   DestReg, SrcReg);
-    NewMI->getOperand(getOperandIdx(*NewMI, R600Operands::SRC0))
+    NewMI->getOperand(getOperandIdx(*NewMI, AMDGPU::OpName::src0))
                                     .setIsKill(KillSrc);
   }
 }
@@ -170,22 +170,24 @@ R600InstrInfo::getSrcs(MachineInstr *MI) const {
   SmallVector<std::pair<MachineOperand *, int64_t>, 3> Result;
 
   if (MI->getOpcode() == AMDGPU::DOT_4) {
-    static const R600Operands::VecOps OpTable[8][2] = {
-      {R600Operands::SRC0_X, R600Operands::SRC0_SEL_X},
-      {R600Operands::SRC0_Y, R600Operands::SRC0_SEL_Y},
-      {R600Operands::SRC0_Z, R600Operands::SRC0_SEL_Z},
-      {R600Operands::SRC0_W, R600Operands::SRC0_SEL_W},
-      {R600Operands::SRC1_X, R600Operands::SRC1_SEL_X},
-      {R600Operands::SRC1_Y, R600Operands::SRC1_SEL_Y},
-      {R600Operands::SRC1_Z, R600Operands::SRC1_SEL_Z},
-      {R600Operands::SRC1_W, R600Operands::SRC1_SEL_W},
+    static const unsigned OpTable[8][2] = {
+      {AMDGPU::OpName::src0_X, AMDGPU::OpName::src0_sel_X},
+      {AMDGPU::OpName::src0_Y, AMDGPU::OpName::src0_sel_Y},
+      {AMDGPU::OpName::src0_Z, AMDGPU::OpName::src0_sel_Z},
+      {AMDGPU::OpName::src0_W, AMDGPU::OpName::src0_sel_W},
+      {AMDGPU::OpName::src1_X, AMDGPU::OpName::src1_sel_X},
+      {AMDGPU::OpName::src1_Y, AMDGPU::OpName::src1_sel_Y},
+      {AMDGPU::OpName::src1_Z, AMDGPU::OpName::src1_sel_Z},
+      {AMDGPU::OpName::src1_W, AMDGPU::OpName::src1_sel_W},
     };
 
     for (unsigned j = 0; j < 8; j++) {
-      MachineOperand &MO = MI->getOperand(OpTable[j][0] + 1);
+      MachineOperand &MO = MI->getOperand(getOperandIdx(MI->getOpcode(),
+                                                        OpTable[j][0]));
       unsigned Reg = MO.getReg();
       if (Reg == AMDGPU::ALU_CONST) {
-        unsigned Sel = MI->getOperand(OpTable[j][1] + 1).getImm();
+        unsigned Sel = MI->getOperand(getOperandIdx(MI->getOpcode(),
+                                                    OpTable[j][1])).getImm();
         Result.push_back(std::pair<MachineOperand *, int64_t>(&MO, Sel));
         continue;
       }
@@ -194,10 +196,10 @@ R600InstrInfo::getSrcs(MachineInstr *MI) const {
     return Result;
   }
 
-  static const R600Operands::Ops OpTable[3][2] = {
-    {R600Operands::SRC0, R600Operands::SRC0_SEL},
-    {R600Operands::SRC1, R600Operands::SRC1_SEL},
-    {R600Operands::SRC2, R600Operands::SRC2_SEL},
+  static const unsigned OpTable[3][2] = {
+    {AMDGPU::OpName::src0, AMDGPU::OpName::src0_sel},
+    {AMDGPU::OpName::src1, AMDGPU::OpName::src1_sel},
+    {AMDGPU::OpName::src2, AMDGPU::OpName::src2_sel},
   };
 
   for (unsigned j = 0; j < 3; j++) {
@@ -214,7 +216,7 @@ R600InstrInfo::getSrcs(MachineInstr *MI) const {
     }
     if (Reg == AMDGPU::ALU_LITERAL_X) {
       unsigned Imm = MI->getOperand(
-          getOperandIdx(MI->getOpcode(), R600Operands::IMM)).getImm();
+          getOperandIdx(MI->getOpcode(), AMDGPU::OpName::literal)).getImm();
       Result.push_back(std::pair<MachineOperand *, int64_t>(&MO, Imm));
       continue;
     }
@@ -329,7 +331,7 @@ R600InstrInfo::fitsReadPortLimitations(const std::vector<MachineInstr *> &IG,
   for (unsigned i = 0, e = IG.size(); i < e; ++i) {
     IGSrcs.push_back(ExtractSrcs(IG[i], PV));
     unsigned Op = getOperandIdx(IG[i]->getOpcode(),
-        R600Operands::BANK_SWIZZLE);
+        AMDGPU::OpName::bank_swizzle);
     ValidSwizzle.push_back( (R600InstrInfo::BankSwizzle)
         IG[i]->getOperand(Op).getImm());
   }
@@ -812,13 +814,13 @@ MachineInstrBuilder R600InstrInfo::buildIndirectWrite(MachineBasicBlock *MBB,
   unsigned AddrReg = AMDGPU::R600_AddrRegClass.getRegister(Address);
   MachineInstr *MOVA = buildDefaultInstruction(*MBB, I, AMDGPU::MOVA_INT_eg,
                                                AMDGPU::AR_X, OffsetReg);
-  setImmOperand(MOVA, R600Operands::WRITE, 0);
+  setImmOperand(MOVA, AMDGPU::OpName::write, 0);
 
   MachineInstrBuilder Mov = buildDefaultInstruction(*MBB, I, AMDGPU::MOV,
                                       AddrReg, ValueReg)
                                       .addReg(AMDGPU::AR_X,
                                            RegState::Implicit | RegState::Kill);
-  setImmOperand(Mov, R600Operands::DST_REL, 1);
+  setImmOperand(Mov, AMDGPU::OpName::dst_rel, 1);
   return Mov;
 }
 
@@ -830,13 +832,13 @@ MachineInstrBuilder R600InstrInfo::buildIndirectRead(MachineBasicBlock *MBB,
   MachineInstr *MOVA = buildDefaultInstruction(*MBB, I, AMDGPU::MOVA_INT_eg,
                                                        AMDGPU::AR_X,
                                                        OffsetReg);
-  setImmOperand(MOVA, R600Operands::WRITE, 0);
+  setImmOperand(MOVA, AMDGPU::OpName::write, 0);
   MachineInstrBuilder Mov = buildDefaultInstruction(*MBB, I, AMDGPU::MOV,
                                       ValueReg,
                                       AddrReg)
                                       .addReg(AMDGPU::AR_X,
                                            RegState::Implicit | RegState::Kill);
-  setImmOperand(Mov, R600Operands::SRC0_REL, 1);
+  setImmOperand(Mov, AMDGPU::OpName::src0_rel, 1);
 
   return Mov;
 }
@@ -892,7 +894,7 @@ MachineInstrBuilder R600InstrInfo::buildDefaultInstruction(MachineBasicBlock &MB
 
 #define OPERAND_CASE(Label) \
   case Label: { \
-    static const R600Operands::VecOps Ops[] = \
+    static const unsigned Ops[] = \
     { \
       Label##_X, \
       Label##_Y, \
@@ -902,26 +904,25 @@ MachineInstrBuilder R600InstrInfo::buildDefaultInstruction(MachineBasicBlock &MB
     return Ops[Slot]; \
   }
 
-static R600Operands::VecOps
-getSlotedOps(R600Operands::Ops Op, unsigned Slot) {
+static unsigned getSlotedOps(unsigned  Op, unsigned Slot) {
   switch (Op) {
-  OPERAND_CASE(R600Operands::UPDATE_EXEC_MASK)
-  OPERAND_CASE(R600Operands::UPDATE_PREDICATE)
-  OPERAND_CASE(R600Operands::WRITE)
-  OPERAND_CASE(R600Operands::OMOD)
-  OPERAND_CASE(R600Operands::DST_REL)
-  OPERAND_CASE(R600Operands::CLAMP)
-  OPERAND_CASE(R600Operands::SRC0)
-  OPERAND_CASE(R600Operands::SRC0_NEG)
-  OPERAND_CASE(R600Operands::SRC0_REL)
-  OPERAND_CASE(R600Operands::SRC0_ABS)
-  OPERAND_CASE(R600Operands::SRC0_SEL)
-  OPERAND_CASE(R600Operands::SRC1)
-  OPERAND_CASE(R600Operands::SRC1_NEG)
-  OPERAND_CASE(R600Operands::SRC1_REL)
-  OPERAND_CASE(R600Operands::SRC1_ABS)
-  OPERAND_CASE(R600Operands::SRC1_SEL)
-  OPERAND_CASE(R600Operands::PRED_SEL)
+  OPERAND_CASE(AMDGPU::OpName::update_exec_mask)
+  OPERAND_CASE(AMDGPU::OpName::update_pred)
+  OPERAND_CASE(AMDGPU::OpName::write)
+  OPERAND_CASE(AMDGPU::OpName::omod)
+  OPERAND_CASE(AMDGPU::OpName::dst_rel)
+  OPERAND_CASE(AMDGPU::OpName::clamp)
+  OPERAND_CASE(AMDGPU::OpName::src0)
+  OPERAND_CASE(AMDGPU::OpName::src0_neg)
+  OPERAND_CASE(AMDGPU::OpName::src0_rel)
+  OPERAND_CASE(AMDGPU::OpName::src0_abs)
+  OPERAND_CASE(AMDGPU::OpName::src0_sel)
+  OPERAND_CASE(AMDGPU::OpName::src1)
+  OPERAND_CASE(AMDGPU::OpName::src1_neg)
+  OPERAND_CASE(AMDGPU::OpName::src1_rel)
+  OPERAND_CASE(AMDGPU::OpName::src1_abs)
+  OPERAND_CASE(AMDGPU::OpName::src1_sel)
+  OPERAND_CASE(AMDGPU::OpName::pred_sel)
   default:
     llvm_unreachable("Wrong Operand");
   }
@@ -929,12 +930,6 @@ getSlotedOps(R600Operands::Ops Op, unsigned Slot) {
 
 #undef OPERAND_CASE
 
-static int
-getVecOperandIdx(R600Operands::VecOps Op) {
-  return 1 + Op;
-}
-
-
 MachineInstr *R600InstrInfo::buildSlotOfVectorInstruction(
     MachineBasicBlock &MBB, MachineInstr *MI, unsigned Slot, unsigned DstReg)
     const {
@@ -947,31 +942,31 @@ MachineInstr *R600InstrInfo::buildSlotOfVectorInstruction(
     Opcode = AMDGPU::DOT4_eg;
   MachineBasicBlock::iterator I = MI;
   MachineOperand &Src0 = MI->getOperand(
-      getVecOperandIdx(getSlotedOps(R600Operands::SRC0, Slot)));
+      getOperandIdx(MI->getOpcode(), getSlotedOps(AMDGPU::OpName::src0, Slot)));
   MachineOperand &Src1 = MI->getOperand(
-      getVecOperandIdx(getSlotedOps(R600Operands::SRC1, Slot)));
+      getOperandIdx(MI->getOpcode(), getSlotedOps(AMDGPU::OpName::src1, Slot)));
   MachineInstr *MIB = buildDefaultInstruction(
       MBB, I, Opcode, DstReg, Src0.getReg(), Src1.getReg());
-  static const R600Operands::Ops Operands[14] = {
-    R600Operands::UPDATE_EXEC_MASK,
-    R600Operands::UPDATE_PREDICATE,
-    R600Operands::WRITE,
-    R600Operands::OMOD,
-    R600Operands::DST_REL,
-    R600Operands::CLAMP,
-    R600Operands::SRC0_NEG,
-    R600Operands::SRC0_REL,
-    R600Operands::SRC0_ABS,
-    R600Operands::SRC0_SEL,
-    R600Operands::SRC1_NEG,
-    R600Operands::SRC1_REL,
-    R600Operands::SRC1_ABS,
-    R600Operands::SRC1_SEL,
+  static const unsigned  Operands[14] = {
+    AMDGPU::OpName::update_exec_mask,
+    AMDGPU::OpName::update_pred,
+    AMDGPU::OpName::write,
+    AMDGPU::OpName::omod,
+    AMDGPU::OpName::dst_rel,
+    AMDGPU::OpName::clamp,
+    AMDGPU::OpName::src0_neg,
+    AMDGPU::OpName::src0_rel,
+    AMDGPU::OpName::src0_abs,
+    AMDGPU::OpName::src0_sel,
+    AMDGPU::OpName::src1_neg,
+    AMDGPU::OpName::src1_rel,
+    AMDGPU::OpName::src1_abs,
+    AMDGPU::OpName::src1_sel,
   };
 
   for (unsigned i = 0; i < 14; i++) {
     MachineOperand &MO = MI->getOperand(
-        getVecOperandIdx(getSlotedOps(Operands[i], Slot)));
+        getOperandIdx(MI->getOpcode(), getSlotedOps(Operands[i], Slot)));
     assert (MO.isImm());
     setImmOperand(MIB, Operands[i], MO.getImm());
   }
@@ -985,56 +980,19 @@ MachineInstr *R600InstrInfo::buildMovImm(MachineBasicBlock &BB,
                                          uint64_t Imm) const {
   MachineInstr *MovImm = buildDefaultInstruction(BB, I, AMDGPU::MOV, DstReg,
                                                   AMDGPU::ALU_LITERAL_X);
-  setImmOperand(MovImm, R600Operands::IMM, Imm);
+  setImmOperand(MovImm, AMDGPU::OpName::literal, Imm);
   return MovImm;
 }
 
-int R600InstrInfo::getOperandIdx(const MachineInstr &MI,
-                                 R600Operands::Ops Op) const {
-  return getOperandIdx(MI.getOpcode(), Op);
-}
-
-int R600InstrInfo::getOperandIdx(const MachineInstr &MI,
-                                 R600Operands::VecOps Op) const {
+int R600InstrInfo::getOperandIdx(const MachineInstr &MI, unsigned Op) const {
   return getOperandIdx(MI.getOpcode(), Op);
 }
 
-int R600InstrInfo::getOperandIdx(unsigned Opcode,
-                                 R600Operands::Ops Op) const {
-  unsigned TargetFlags = get(Opcode).TSFlags;
-  unsigned OpTableIdx;
-
-  if (!HAS_NATIVE_OPERANDS(TargetFlags)) {
-    switch (Op) {
-    case R600Operands::DST: return 0;
-    case R600Operands::SRC0: return 1;
-    case R600Operands::SRC1: return 2;
-    case R600Operands::SRC2: return 3;
-    default:
-      assert(!"Unknown operand type for instruction");
-      return -1;
-    }
-  }
-
-  if (TargetFlags & R600_InstFlag::OP1) {
-    OpTableIdx = 0;
-  } else if (TargetFlags & R600_InstFlag::OP2) {
-    OpTableIdx = 1;
-  } else {
-    assert((TargetFlags & R600_InstFlag::OP3) && "OP1, OP2, or OP3 not defined "
-                                                 "for this instruction");
-    OpTableIdx = 2;
-  }
-
-  return R600Operands::ALUOpTable[OpTableIdx][Op];
-}
-
-int R600InstrInfo::getOperandIdx(unsigned Opcode,
-                                 R600Operands::VecOps Op) const {
-  return Op + 1;
+int R600InstrInfo::getOperandIdx(unsigned Opcode, unsigned Op) const {
+  return AMDGPU::getNamedOperandIdx(Opcode, Op);
 }
 
-void R600InstrInfo::setImmOperand(MachineInstr *MI, R600Operands::Ops Op,
+void R600InstrInfo::setImmOperand(MachineInstr *MI, unsigned Op,
                                   int64_t Imm) const {
   int Idx = getOperandIdx(*MI, Op);
   assert(Idx != -1 && "Operand not supported for this instruction.");
@@ -1062,20 +1020,20 @@ MachineOperand &R600InstrInfo::getFlagOp(MachineInstr *MI, unsigned SrcIdx,
     bool IsOP3 = (TargetFlags & R600_InstFlag::OP3) == R600_InstFlag::OP3;
     switch (Flag) {
     case MO_FLAG_CLAMP:
-      FlagIndex = getOperandIdx(*MI, R600Operands::CLAMP);
+      FlagIndex = getOperandIdx(*MI, AMDGPU::OpName::clamp);
       break;
     case MO_FLAG_MASK:
-      FlagIndex = getOperandIdx(*MI, R600Operands::WRITE);
+      FlagIndex = getOperandIdx(*MI, AMDGPU::OpName::write);
       break;
     case MO_FLAG_NOT_LAST:
     case MO_FLAG_LAST:
-      FlagIndex = getOperandIdx(*MI, R600Operands::LAST);
+      FlagIndex = getOperandIdx(*MI, AMDGPU::OpName::last);
       break;
     case MO_FLAG_NEG:
       switch (SrcIdx) {
-      case 0: FlagIndex = getOperandIdx(*MI, R600Operands::SRC0_NEG); break;
-      case 1: FlagIndex = getOperandIdx(*MI, R600Operands::SRC1_NEG); break;
-      case 2: FlagIndex = getOperandIdx(*MI, R600Operands::SRC2_NEG); break;
+      case 0: FlagIndex = getOperandIdx(*MI, AMDGPU::OpName::src0_neg); break;
+      case 1: FlagIndex = getOperandIdx(*MI, AMDGPU::OpName::src1_neg); break;
+      case 2: FlagIndex = getOperandIdx(*MI, AMDGPU::OpName::src2_neg); break;
       }
       break;
 
@@ -1084,8 +1042,8 @@ MachineOperand &R600InstrInfo::getFlagOp(MachineInstr *MI, unsigned SrcIdx,
                        "instructions.");
       (void)IsOP3;
       switch (SrcIdx) {
-      case 0: FlagIndex = getOperandIdx(*MI, R600Operands::SRC0_ABS); break;
-      case 1: FlagIndex = getOperandIdx(*MI, R600Operands::SRC1_ABS); break;
+      case 0: FlagIndex = getOperandIdx(*MI, AMDGPU::OpName::src0_abs); break;
+      case 1: FlagIndex = getOperandIdx(*MI, AMDGPU::OpName::src1_abs); break;
       }
       break;
 
diff --git a/lib/Target/R600/R600InstrInfo.h b/lib/Target/R600/R600InstrInfo.h
index 6a11c63..f06abf6 100644
--- a/lib/Target/R600/R600InstrInfo.h
+++ b/lib/Target/R600/R600InstrInfo.h
@@ -210,17 +210,15 @@ namespace llvm {
   /// \brief Get the index of Op in the MachineInstr.
   ///
   /// \returns -1 if the Instruction does not contain the specified \p Op.
-  int getOperandIdx(const MachineInstr &MI, R600Operands::Ops Op) const;
-  int getOperandIdx(const MachineInstr &MI, R600Operands::VecOps Op) const;
+  int getOperandIdx(const MachineInstr &MI, unsigned Op) const;
 
   /// \brief Get the index of \p Op for the given Opcode.
   ///
   /// \returns -1 if the Instruction does not contain the specified \p Op.
-  int getOperandIdx(unsigned Opcode, R600Operands::Ops Op) const;
-  int getOperandIdx(unsigned Opcode, R600Operands::VecOps Op) const;
+  int getOperandIdx(unsigned Opcode, unsigned Op) const;
 
   /// \brief Helper function for setting instruction flag values.
-  void setImmOperand(MachineInstr *MI, R600Operands::Ops Op, int64_t Imm) const;
+  void setImmOperand(MachineInstr *MI, unsigned Op, int64_t Imm) const;
 
   /// \returns true if this instruction has an operand for storing target flags.
   bool hasFlagOperand(const MachineInstr &MI) const;
diff --git a/lib/Target/R600/R600Packetizer.cpp b/lib/Target/R600/R600Packetizer.cpp
index da614c7..6024fd5 100644
--- a/lib/Target/R600/R600Packetizer.cpp
+++ b/lib/Target/R600/R600Packetizer.cpp
@@ -79,7 +79,7 @@ private:
         continue;
       if (TII->isTransOnly(BI))
         continue;
-      int OperandIdx = TII->getOperandIdx(BI->getOpcode(), R600Operands::WRITE);
+      int OperandIdx = TII->getOperandIdx(BI->getOpcode(), AMDGPU::OpName::write);
       if (OperandIdx > -1 && BI->getOperand(OperandIdx).getImm() == 0)
         continue;
       unsigned Dst = BI->getOperand(0).getReg();
@@ -112,10 +112,10 @@ private:
 
   void substitutePV(MachineInstr *MI, const DenseMap<unsigned, unsigned> &PVs)
       const {
-    R600Operands::Ops Ops[] = {
-      R600Operands::SRC0,
-      R600Operands::SRC1,
-      R600Operands::SRC2
+    unsigned Ops[] = {
+      AMDGPU::OpName::src0,
+      AMDGPU::OpName::src1,
+      AMDGPU::OpName::src2
     };
     for (unsigned i = 0; i < 3; i++) {
       int OperandIdx = TII->getOperandIdx(MI->getOpcode(), Ops[i]);
@@ -164,8 +164,8 @@ public:
     if (getSlot(MII) <= getSlot(MIJ))
       return false;
     // Does MII and MIJ share the same pred_sel ?
-    int OpI = TII->getOperandIdx(MII->getOpcode(), R600Operands::PRED_SEL),
-        OpJ = TII->getOperandIdx(MIJ->getOpcode(), R600Operands::PRED_SEL);
+    int OpI = TII->getOperandIdx(MII->getOpcode(), AMDGPU::OpName::pred_sel),
+        OpJ = TII->getOperandIdx(MIJ->getOpcode(), AMDGPU::OpName::pred_sel);
     unsigned PredI = (OpI > -1)?MII->getOperand(OpI).getReg():0,
         PredJ = (OpJ > -1)?MIJ->getOperand(OpJ).getReg():0;
     if (PredI != PredJ)
@@ -191,7 +191,7 @@ public:
   bool isLegalToPruneDependencies(SUnit *SUI, SUnit *SUJ) {return false;}
 
   void setIsLastBit(MachineInstr *MI, unsigned Bit) const {
-    unsigned LastOp = TII->getOperandIdx(MI->getOpcode(), R600Operands::LAST);
+    unsigned LastOp = TII->getOperandIdx(MI->getOpcode(), AMDGPU::OpName::last);
     MI->getOperand(LastOp).setImm(Bit);
   }
 
@@ -230,7 +230,7 @@ public:
       for (unsigned i = 0, e = CurrentPacketMIs.size(); i < e; i++) {
         MachineInstr *MI = CurrentPacketMIs[i];
             unsigned Op = TII->getOperandIdx(MI->getOpcode(),
-                R600Operands::BANK_SWIZZLE);
+                AMDGPU::OpName::bank_swizzle);
             MI->getOperand(Op).setImm(BS[i]);
       }
     }
-- 
1.7.11.4

-------------- next part --------------
>From ba6cb6d402f9cc1737da552b8007544644949074 Mon Sep 17 00:00:00 2001
From: Tom Stellard <thomas.stellard at amd.com>
Date: Wed, 12 Jun 2013 09:02:39 -0700
Subject: [PATCH 3/5] R600: Add ALUInst bit to tablegen definitions

---
 lib/Target/R600/R600Defines.h       | 3 ++-
 lib/Target/R600/R600InstrInfo.cpp   | 6 ++++++
 lib/Target/R600/R600Instructions.td | 5 +++++
 3 files changed, 13 insertions(+), 1 deletion(-)

diff --git a/lib/Target/R600/R600Defines.h b/lib/Target/R600/R600Defines.h
index e30ea27..6bcf8ae 100644
--- a/lib/Target/R600/R600Defines.h
+++ b/lib/Target/R600/R600Defines.h
@@ -41,7 +41,8 @@ namespace R600_InstFlag {
     OP1 = (1 << 10),
     OP2 = (1 << 11),
     VTX_INST  = (1 << 12),
-    TEX_INST = (1 << 13)
+    TEX_INST = (1 << 13),
+    ALU_INST = (1 << 14)
   };
 }
 
diff --git a/lib/Target/R600/R600InstrInfo.cpp b/lib/Target/R600/R600InstrInfo.cpp
index b9da74c..6de47f7 100644
--- a/lib/Target/R600/R600InstrInfo.cpp
+++ b/lib/Target/R600/R600InstrInfo.cpp
@@ -133,6 +133,12 @@ bool R600InstrInfo::isCubeOp(unsigned Opcode) const {
 bool R600InstrInfo::isALUInstr(unsigned Opcode) const {
   unsigned TargetFlags = get(Opcode).TSFlags;
 
+  return (TargetFlags & R600_InstFlag::ALU_INST);
+}
+
+bool R600InstrInfo::hasInstrModifiers(unsigned Opcode) const {
+  unsigned TargetFlags = get(Opcode).TSFlags;
+
   return ((TargetFlags & R600_InstFlag::OP1) |
           (TargetFlags & R600_InstFlag::OP2) |
           (TargetFlags & R600_InstFlag::OP3));
diff --git a/lib/Target/R600/R600Instructions.td b/lib/Target/R600/R600Instructions.td
index b4131be..422e9914 100644
--- a/lib/Target/R600/R600Instructions.td
+++ b/lib/Target/R600/R600Instructions.td
@@ -28,6 +28,7 @@ class InstR600 <dag outs, dag ins, string asm, list<dag> pattern,
   bit HasNativeOperands = 0;
   bit VTXInst = 0;
   bit TEXInst = 0;
+  bit ALUInst = 0;
 
   let Namespace = "AMDGPU";
   let OutOperandList = outs;
@@ -49,6 +50,7 @@ class InstR600 <dag outs, dag ins, string asm, list<dag> pattern,
   let TSFlags{11} = Op2;
   let TSFlags{12} = VTXInst;
   let TSFlags{13} = TEXInst;
+  let TSFlags{14} = ALUInst;
 }
 
 class InstR600ISA <dag outs, dag ins, string asm, list<dag> pattern> :
@@ -380,6 +382,7 @@ class R600_1OP <bits<11> inst, string opName, list<dag> pattern,
   let update_pred = 0;
   let HasNativeOperands = 1;
   let Op1 = 1;
+  let ALUInst = 1;
   let DisableEncoding = "$literal";
 
   let Inst{31-0}  = Word0;
@@ -416,6 +419,7 @@ class R600_2OP <bits<11> inst, string opName, list<dag> pattern,
 
   let HasNativeOperands = 1;
   let Op2 = 1;
+  let ALUInst = 1;
   let DisableEncoding = "$literal";
 
   let Inst{31-0}  = Word0;
@@ -456,6 +460,7 @@ class R600_3OP <bits<5> inst, string opName, list<dag> pattern,
   let HasNativeOperands = 1;
   let DisableEncoding = "$literal";
   let Op3 = 1;
+  let ALUInst = 1;
 
   let Inst{31-0}  = Word0;
   let Inst{63-32} = Word1;
-- 
1.7.11.4

-------------- next part --------------
>From 3ea28a2d2da52b769b6fc4a6e1a3a8527b01f5d4 Mon Sep 17 00:00:00 2001
From: Tom Stellard <thomas.stellard at amd.com>
Date: Wed, 12 Jun 2013 09:41:59 -0700
Subject: [PATCH 4/5] R600: Add support for GROUP_BARRIER instruction

---
 lib/Target/R600/AMDGPUIntrinsics.td            |  2 ++
 lib/Target/R600/R600EmitClauseMarkers.cpp      |  9 +++++++-
 lib/Target/R600/R600InstrInfo.cpp              | 18 +++++++++-------
 lib/Target/R600/R600InstrInfo.h                |  2 ++
 lib/Target/R600/R600Instructions.td            | 30 ++++++++++++++++++++++++++
 lib/Target/R600/R600MachineScheduler.cpp       |  6 +++++-
 lib/Target/R600/R600Packetizer.cpp             |  8 ++++++-
 test/CodeGen/R600/llvm.AMDGPU.barrier.local.ll | 24 +++++++++++++++++++++
 8 files changed, 88 insertions(+), 11 deletions(-)
 create mode 100644 test/CodeGen/R600/llvm.AMDGPU.barrier.local.ll

diff --git a/lib/Target/R600/AMDGPUIntrinsics.td b/lib/Target/R600/AMDGPUIntrinsics.td
index eecb25b..9f975bf 100644
--- a/lib/Target/R600/AMDGPUIntrinsics.td
+++ b/lib/Target/R600/AMDGPUIntrinsics.td
@@ -50,6 +50,8 @@ let TargetPrefix = "AMDGPU", isTarget = 1 in {
   def int_AMDGPU_umax : Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], [IntrNoMem]>;
   def int_AMDGPU_umin : Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], [IntrNoMem]>;
   def int_AMDGPU_cube : Intrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty], [IntrNoMem]>;
+
+  def int_AMDGPU_barrier_local  : Intrinsic<[], [], []>;
 }
 
 let TargetPrefix = "TGSI", isTarget = 1 in {
diff --git a/lib/Target/R600/R600EmitClauseMarkers.cpp b/lib/Target/R600/R600EmitClauseMarkers.cpp
index ff5ce5a..0aea2d7 100644
--- a/lib/Target/R600/R600EmitClauseMarkers.cpp
+++ b/lib/Target/R600/R600EmitClauseMarkers.cpp
@@ -177,7 +177,14 @@ private:
         AluInstCount ++;
         continue;
       }
-      if (I->getOpcode() == AMDGPU::KILLGT) {
+      // XXX: GROUP_BARRIER instructions cannot be in the same ALU clause as:
+      //
+      // * KILL or INTERP instructions
+      // * Any instruction that sets UPDATE_EXEC_MASK or UPDATE_PRED bits
+      // * Uses waterfalling (i.e. INDEX_MODE = AR.X)
+      //
+      // XXX: These checks have not been implemented yet.
+      if (TII->mustBeLastInClause(I->getOpcode())) {
         I++;
         break;
       }
diff --git a/lib/Target/R600/R600InstrInfo.cpp b/lib/Target/R600/R600InstrInfo.cpp
index 6de47f7..d0aeaa6 100644
--- a/lib/Target/R600/R600InstrInfo.cpp
+++ b/lib/Target/R600/R600InstrInfo.cpp
@@ -136,14 +136,6 @@ bool R600InstrInfo::isALUInstr(unsigned Opcode) const {
   return (TargetFlags & R600_InstFlag::ALU_INST);
 }
 
-bool R600InstrInfo::hasInstrModifiers(unsigned Opcode) const {
-  unsigned TargetFlags = get(Opcode).TSFlags;
-
-  return ((TargetFlags & R600_InstFlag::OP1) |
-          (TargetFlags & R600_InstFlag::OP2) |
-          (TargetFlags & R600_InstFlag::OP3));
-}
-
 bool R600InstrInfo::isTransOnly(unsigned Opcode) const {
   return (get(Opcode).TSFlags & R600_InstFlag::TRANS_ONLY);
 }
@@ -171,6 +163,16 @@ bool R600InstrInfo::usesTextureCache(const MachineInstr *MI) const {
          usesTextureCache(MI->getOpcode());
 }
 
+bool R600InstrInfo::mustBeLastInClause(unsigned Opcode) const {
+  switch (Opcode) {
+  case AMDGPU::KILLGT:
+  case AMDGPU::GROUP_BARRIER:
+    return true;
+  default:
+    return false;
+  }
+}
+
 SmallVector<std::pair<MachineOperand *, int64_t>, 3>
 R600InstrInfo::getSrcs(MachineInstr *MI) const {
   SmallVector<std::pair<MachineOperand *, int64_t>, 3> Result;
diff --git a/lib/Target/R600/R600InstrInfo.h b/lib/Target/R600/R600InstrInfo.h
index f06abf6..3c2e50b 100644
--- a/lib/Target/R600/R600InstrInfo.h
+++ b/lib/Target/R600/R600InstrInfo.h
@@ -72,6 +72,8 @@ namespace llvm {
   bool usesTextureCache(unsigned Opcode) const;
   bool usesTextureCache(const MachineInstr *MI) const;
 
+  bool mustBeLastInClause(unsigned Opcode) const;
+
   /// \returns a pair for each src of an ALU instructions.
   /// The first member of a pair is the register id.
   /// If register is ALU_CONST, second member is SEL.
diff --git a/lib/Target/R600/R600Instructions.td b/lib/Target/R600/R600Instructions.td
index 422e9914..d24682e 100644
--- a/lib/Target/R600/R600Instructions.td
+++ b/lib/Target/R600/R600Instructions.td
@@ -1960,8 +1960,38 @@ def CONSTANT_LOAD_eg : VTX_READ_32_eg <1,
   [(set i32:$dst, (constant_load ADDRVTX_READ:$ptr))]
 >;
 
+def GROUP_BARRIER : InstR600 <
+    (outs), (ins), "  GROUP_BARRIER", [(int_AMDGPU_barrier_local)], AnyALU>,
+    R600ALU_Word0,
+    R600ALU_Word1_OP2 <0x54> {
+
+  let dst = 0;
+  let dst_rel = 0;
+  let src0 = 0;
+  let src0_rel = 0;
+  let src0_neg = 0;
+  let src0_abs = 0;
+  let src1 = 0;
+  let src1_rel = 0;
+  let src1_neg = 0;
+  let src1_abs = 0;
+  let write = 0;
+  let omod = 0;
+  let clamp = 0;
+  let last = 1;
+  let bank_swizzle = 0;
+  let pred_sel = 0;
+  let update_exec_mask = 0;
+  let update_pred = 0;
+
+  let Inst{31-0}  = Word0;
+  let Inst{63-32} = Word1;
+
+  let ALUInst = 1;
 }
 
+} // End isEGorCayman
+
 //===----------------------------------------------------------------------===//
 // Regist loads and stores - for indirect addressing
 //===----------------------------------------------------------------------===//
diff --git a/lib/Target/R600/R600MachineScheduler.cpp b/lib/Target/R600/R600MachineScheduler.cpp
index a330d88..acc1b4d 100644
--- a/lib/Target/R600/R600MachineScheduler.cpp
+++ b/lib/Target/R600/R600MachineScheduler.cpp
@@ -269,10 +269,14 @@ R600SchedStrategy::AluKind R600SchedStrategy::getAluKind(SUnit *SU) const {
     }
 
     // Does the instruction take a whole IG ?
+    // XXX: Is it possible to add a helper function in R600InstrInfo that can
+    // be used here and in R600PacketizerList::isSoloInstruction() ?
     if(TII->isVector(*MI) ||
         TII->isCubeOp(MI->getOpcode()) ||
-        TII->isReductionOp(MI->getOpcode()))
+        TII->isReductionOp(MI->getOpcode()) ||
+        MI->getOpcode() == AMDGPU::GROUP_BARRIER) {
       return AluT_XYZW;
+    }
 
     // Is the result already assigned to a channel ?
     unsigned DestSubReg = MI->getOperand(0).getSubReg();
diff --git a/lib/Target/R600/R600Packetizer.cpp b/lib/Target/R600/R600Packetizer.cpp
index 6024fd5..4c72d22 100644
--- a/lib/Target/R600/R600Packetizer.cpp
+++ b/lib/Target/R600/R600Packetizer.cpp
@@ -82,7 +82,11 @@ private:
       int OperandIdx = TII->getOperandIdx(BI->getOpcode(), AMDGPU::OpName::write);
       if (OperandIdx > -1 && BI->getOperand(OperandIdx).getImm() == 0)
         continue;
-      unsigned Dst = BI->getOperand(0).getReg();
+      int DstIdx = TII->getOperandIdx(BI->getOpcode(), AMDGPU::OpName::dst);
+      if (DstIdx == -1) {
+        continue;
+      }
+      unsigned Dst = BI->getOperand(DstIdx).getReg();
       if (BI->getOpcode() == AMDGPU::DOT4_r600 ||
           BI->getOpcode() == AMDGPU::DOT4_eg) {
         Result[Dst] = AMDGPU::PV_X;
@@ -154,6 +158,8 @@ public:
       return true;
     if (TII->isTransOnly(MI))
       return true;
+    if (MI->getOpcode() == AMDGPU::GROUP_BARRIER)
+      return true;
     return false;
   }
 
diff --git a/test/CodeGen/R600/llvm.AMDGPU.barrier.local.ll b/test/CodeGen/R600/llvm.AMDGPU.barrier.local.ll
new file mode 100644
index 0000000..8d3c9ca
--- /dev/null
+++ b/test/CodeGen/R600/llvm.AMDGPU.barrier.local.ll
@@ -0,0 +1,24 @@
+; RUN: llc < %s -march=r600 -mcpu=redwood | FileCheck %s
+
+; CHECK: GROUP_BARRIER
+
+define void @test(i32 addrspace(1)* %out) {
+entry:
+  %0 = call i32 @llvm.r600.read.tidig.x()
+  %1 = getelementptr i32 addrspace(1)* %out, i32 %0
+  store i32 %0, i32 addrspace(1)* %1
+  call void @llvm.AMDGPU.barrier.local()
+  %2 = call i32 @llvm.r600.read.local.size.x()
+  %3 = sub i32 %2, 1
+  %4 = sub i32 %3, %0
+  %5 = getelementptr i32 addrspace(1)* %out, i32 %4
+  %6 = load i32 addrspace(1)* %5
+  store i32 %6, i32 addrspace(1)* %1
+  ret void
+}
+
+declare i32 @llvm.r600.read.tidig.x() #0
+declare void @llvm.AMDGPU.barrier.local()
+declare i32 @llvm.r600.read.local.size.x() #0
+
+attributes #0 = { readnone }
-- 
1.7.11.4

-------------- next part --------------
>From f32730b07d1136a8adeb8705ff1ee3a5e2d0e2c7 Mon Sep 17 00:00:00 2001
From: Tom Stellard <thomas.stellard at amd.com>
Date: Thu, 15 Nov 2012 21:32:31 +0000
Subject: [PATCH 5/5] R600: Add local memory support via LDS

---
 lib/Target/R600/AMDGPUAsmPrinter.cpp      |   6 ++
 lib/Target/R600/AMDGPUISelLowering.cpp    |  23 +++++++
 lib/Target/R600/AMDGPUISelLowering.h      |   3 +
 lib/Target/R600/AMDGPUInstructions.td     |   9 +++
 lib/Target/R600/AMDGPUMachineFunction.cpp |   1 +
 lib/Target/R600/AMDGPUMachineFunction.h   |   2 +
 lib/Target/R600/AMDILISelDAGToDAG.cpp     |  15 ++--
 lib/Target/R600/R600Defines.h             |   6 +-
 lib/Target/R600/R600ISelLowering.cpp      |  18 ++++-
 lib/Target/R600/R600InstrInfo.cpp         |  28 ++++++++
 lib/Target/R600/R600InstrInfo.h           |   2 +
 lib/Target/R600/R600Instructions.td       | 111 ++++++++++++++++++++++++++++--
 lib/Target/R600/R600MachineScheduler.cpp  |  12 +++-
 lib/Target/R600/R600Packetizer.cpp        |   3 +
 lib/Target/R600/R600RegisterInfo.td       |   3 +-
 lib/Target/R600/R600Schedule.td           |   2 +
 test/CodeGen/R600/local-memory.ll         |  82 ++++++++++++++++++++++
 17 files changed, 310 insertions(+), 16 deletions(-)
 create mode 100644 test/CodeGen/R600/local-memory.ll

diff --git a/lib/Target/R600/AMDGPUAsmPrinter.cpp b/lib/Target/R600/AMDGPUAsmPrinter.cpp
index f720c7e..996d2a6 100644
--- a/lib/Target/R600/AMDGPUAsmPrinter.cpp
+++ b/lib/Target/R600/AMDGPUAsmPrinter.cpp
@@ -29,6 +29,7 @@
 #include "llvm/MC/MCSectionELF.h"
 #include "llvm/MC/MCStreamer.h"
 #include "llvm/Support/ELF.h"
+#include "llvm/Support/MathExtras.h"
 #include "llvm/Support/TargetRegistry.h"
 #include "llvm/Target/TargetLoweringObjectFile.h"
 
@@ -130,6 +131,11 @@ void AMDGPUAsmPrinter::EmitProgramInfoR600(MachineFunction &MF) {
                            S_STACK_SIZE(MFI->StackSize), 4);
   OutStreamer.EmitIntValue(R_02880C_DB_SHADER_CONTROL, 4);
   OutStreamer.EmitIntValue(S_02880C_KILL_ENABLE(killPixel), 4);
+
+  if (MFI->ShaderType == ShaderType::COMPUTE) {
+    OutStreamer.EmitIntValue(R_0288E8_SQ_LDS_ALLOC, 4);
+    OutStreamer.EmitIntValue(RoundUpToAlignment(MFI->LDSSize, 4) >> 2, 4);
+  }
 }
 
 void AMDGPUAsmPrinter::EmitProgramInfoSI(MachineFunction &MF) {
diff --git a/lib/Target/R600/AMDGPUISelLowering.cpp b/lib/Target/R600/AMDGPUISelLowering.cpp
index 02d6fab..804f991 100644
--- a/lib/Target/R600/AMDGPUISelLowering.cpp
+++ b/lib/Target/R600/AMDGPUISelLowering.cpp
@@ -24,6 +24,7 @@
 #include "llvm/CodeGen/MachineRegisterInfo.h"
 #include "llvm/CodeGen/SelectionDAG.h"
 #include "llvm/CodeGen/TargetLoweringObjectFileImpl.h"
+#include "llvm/IR/DataLayout.h"
 
 using namespace llvm;
 
@@ -70,6 +71,8 @@ AMDGPUTargetLowering::AMDGPUTargetLowering(TargetMachine &TM) :
   setOperationAction(ISD::UDIV, MVT::i32, Expand);
   setOperationAction(ISD::UDIVREM, MVT::i32, Custom);
   setOperationAction(ISD::UREM, MVT::i32, Expand);
+
+  setOperationAction(ISD::GlobalAddress, MVT::i32, Custom);
 }
 
 //===---------------------------------------------------------------------===//
@@ -116,6 +119,26 @@ SDValue AMDGPUTargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG)
   return Op;
 }
 
+SDValue AMDGPUTargetLowering::LowerGlobalAddress(AMDGPUMachineFunction* MFI,
+                                                 SDValue Op,
+                                                 SelectionDAG &DAG) const {
+
+  const DataLayout *TD = getTargetMachine().getDataLayout();
+  GlobalAddressSDNode *G = cast<GlobalAddressSDNode>(Op);
+  // XXX: What does the value of G->getOffset() mean?
+  assert(G->getOffset() == 0 &&
+         "Do not know what to do with an non-zero offset");
+
+  unsigned Offset = MFI->LDSSize;
+  const GlobalValue *GV = G->getGlobal();
+  uint64_t Size = TD->getTypeAllocSize(GV->getType()->getElementType());
+
+  // XXX: Account for alignment?
+  MFI->LDSSize += Size;
+
+  return DAG.getConstant(Offset, MVT::i32);
+}
+
 SDValue AMDGPUTargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op,
     SelectionDAG &DAG) const {
   unsigned IntrinsicID = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
diff --git a/lib/Target/R600/AMDGPUISelLowering.h b/lib/Target/R600/AMDGPUISelLowering.h
index 69a0ac9..d739a01 100644
--- a/lib/Target/R600/AMDGPUISelLowering.h
+++ b/lib/Target/R600/AMDGPUISelLowering.h
@@ -20,6 +20,7 @@
 
 namespace llvm {
 
+class AMDGPUMachineFunction;
 class MachineRegisterInfo;
 
 class AMDGPUTargetLowering : public TargetLowering {
@@ -36,6 +37,8 @@ protected:
   virtual SDValue CreateLiveInRegister(SelectionDAG &DAG,
                                        const TargetRegisterClass *RC,
                                        unsigned Reg, EVT VT) const;
+  SDValue LowerGlobalAddress(AMDGPUMachineFunction *MFI, SDValue Op,
+                             SelectionDAG &DAG) const;
 
   bool isHWTrueValue(SDValue Op) const;
   bool isHWFalseValue(SDValue Op) const;
diff --git a/lib/Target/R600/AMDGPUInstructions.td b/lib/Target/R600/AMDGPUInstructions.td
index 29df374..234bb99 100644
--- a/lib/Target/R600/AMDGPUInstructions.td
+++ b/lib/Target/R600/AMDGPUInstructions.td
@@ -94,6 +94,15 @@ def zextloadi8_constant : PatFrag<(ops node:$ptr), (zextloadi8 node:$ptr), [{
     return isGlobalLoad(dyn_cast<LoadSDNode>(N));
 }]>;
 
+def local_load : PatFrag<(ops node:$ptr), (load node:$ptr), [{
+    return isLocalLoad(dyn_cast<LoadSDNode>(N));
+}]>;
+
+def local_store : PatFrag<(ops node:$val, node:$ptr),
+                             (store node:$val, node:$ptr), [{
+    return isLocalStore(dyn_cast<StoreSDNode>(N));
+}]>;
+
 class Constants {
 int TWO_PI = 0x40c90fdb;
 int PI = 0x40490fdb;
diff --git a/lib/Target/R600/AMDGPUMachineFunction.cpp b/lib/Target/R600/AMDGPUMachineFunction.cpp
index 0461025..9a1e344 100644
--- a/lib/Target/R600/AMDGPUMachineFunction.cpp
+++ b/lib/Target/R600/AMDGPUMachineFunction.cpp
@@ -10,6 +10,7 @@ const char *AMDGPUMachineFunction::ShaderTypeAttribute = "ShaderType";
 AMDGPUMachineFunction::AMDGPUMachineFunction(const MachineFunction &MF) :
     MachineFunctionInfo() {
   ShaderType = ShaderType::COMPUTE;
+  LDSSize = 0;
   AttributeSet Set = MF.getFunction()->getAttributes();
   Attribute A = Set.getAttribute(AttributeSet::FunctionIndex,
                                  ShaderTypeAttribute);
diff --git a/lib/Target/R600/AMDGPUMachineFunction.h b/lib/Target/R600/AMDGPUMachineFunction.h
index 21c8c51..5d5df12 100644
--- a/lib/Target/R600/AMDGPUMachineFunction.h
+++ b/lib/Target/R600/AMDGPUMachineFunction.h
@@ -23,6 +23,8 @@ private:
 public:
   AMDGPUMachineFunction(const MachineFunction &MF);
   unsigned ShaderType;
+  /// Number of bytes in the LDS that are being used.
+  unsigned LDSSize;
 };
 
 }
diff --git a/lib/Target/R600/AMDILISelDAGToDAG.cpp b/lib/Target/R600/AMDILISelDAGToDAG.cpp
index a01879e..7ccb68a 100644
--- a/lib/Target/R600/AMDILISelDAGToDAG.cpp
+++ b/lib/Target/R600/AMDILISelDAGToDAG.cpp
@@ -283,11 +283,16 @@ SDNode *AMDGPUDAGToDAGISel::Select(SDNode *N) {
 
         int ImmIdx = TII->getOperandIdx(Use->getMachineOpcode(),
                                         AMDGPU::OpName::literal);
-        assert(ImmIdx != -1);
+        if (ImmIdx == -1) {
+          continue;
+        }
 
-        // subtract one from ImmIdx, because the DST operand is usually index
-        // 0 for MachineInstrs, but we have no DST in the Ops vector.
-        ImmIdx--;
+        if (TII->getOperandIdx(Use->getMachineOpcode(),
+                               AMDGPU::OpName::dst) != -1) {
+          // subtract one from ImmIdx, because the DST operand is usually index
+          // 0 for MachineInstrs, but we have no DST in the Ops vector.
+          ImmIdx--;
+        }
 
         // Check that we aren't already using an immediate.
         // XXX: It's possible for an instruction to have more than one
@@ -337,7 +342,7 @@ SDNode *AMDGPUDAGToDAGISel::Select(SDNode *N) {
     }
     if (Result && Result->isMachineOpcode() &&
         !(TII->get(Result->getMachineOpcode()).TSFlags & R600_InstFlag::VECTOR)
-        && TII->isALUInstr(Result->getMachineOpcode())) {
+        && TII->hasInstrModifiers(Result->getMachineOpcode())) {
       // Fold FNEG/FABS/CONST_ADDRESS
       // TODO: Isel can generate multiple MachineInst, we need to recursively
       // parse Result
diff --git a/lib/Target/R600/R600Defines.h b/lib/Target/R600/R600Defines.h
index 6bcf8ae..90fc29c 100644
--- a/lib/Target/R600/R600Defines.h
+++ b/lib/Target/R600/R600Defines.h
@@ -42,7 +42,9 @@ namespace R600_InstFlag {
     OP2 = (1 << 11),
     VTX_INST  = (1 << 12),
     TEX_INST = (1 << 13),
-    ALU_INST = (1 << 14)
+    ALU_INST = (1 << 14),
+    LDS_1A = (1 << 15),
+    LDS_1A1D = (1 << 16)
   };
 }
 
@@ -162,4 +164,6 @@ namespace OpName {
 #define R_028878_SQ_PGM_RESOURCES_GS                 0x028878
 #define R_0288D4_SQ_PGM_RESOURCES_LS                 0x0288d4
 
+#define R_0288E8_SQ_LDS_ALLOC                        0x0288E8
+
 #endif // R600DEFINES_H_
diff --git a/lib/Target/R600/R600ISelLowering.cpp b/lib/Target/R600/R600ISelLowering.cpp
index ee13320..0232533 100644
--- a/lib/Target/R600/R600ISelLowering.cpp
+++ b/lib/Target/R600/R600ISelLowering.cpp
@@ -156,6 +156,19 @@ MachineBasicBlock * R600TargetLowering::EmitInstrWithCustomInserter(
     break;
   }
 
+  case AMDGPU::LDS_READ_RET: {
+    MachineInstrBuilder NewMI = BuildMI(*BB, I, BB->findDebugLoc(I),
+                                        TII->get(MI->getOpcode()),
+                                        AMDGPU::OQAP);
+    for (unsigned i = 1, e = MI->getNumOperands(); i < e; ++i) {
+      NewMI.addOperand(MI->getOperand(i));
+    }
+    TII->buildDefaultInstruction(*BB, I, AMDGPU::MOV,
+                                 MI->getOperand(0).getReg(),
+                                 AMDGPU::OQAP);
+    break;
+  }
+
   case AMDGPU::MOV_IMM_F32:
     TII->buildMovImm(*BB, I, MI->getOperand(0).getReg(),
                      MI->getOperand(1).getFPImm()->getValueAPF()
@@ -474,6 +487,8 @@ MachineBasicBlock * R600TargetLowering::EmitInstrWithCustomInserter(
 //===----------------------------------------------------------------------===//
 
 SDValue R600TargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const {
+  MachineFunction &MF = DAG.getMachineFunction();
+  R600MachineFunctionInfo *MFI = MF.getInfo<R600MachineFunctionInfo>();
   switch (Op.getOpcode()) {
   default: return AMDGPUTargetLowering::LowerOperation(Op, DAG);
   case ISD::SELECT_CC: return LowerSELECT_CC(Op, DAG);
@@ -481,14 +496,13 @@ SDValue R600TargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const
   case ISD::STORE: return LowerSTORE(Op, DAG);
   case ISD::LOAD: return LowerLOAD(Op, DAG);
   case ISD::FrameIndex: return LowerFrameIndex(Op, DAG);
+  case ISD::GlobalAddress: return LowerGlobalAddress(MFI, Op, DAG);
   case ISD::INTRINSIC_VOID: {
     SDValue Chain = Op.getOperand(0);
     unsigned IntrinsicID =
                          cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue();
     switch (IntrinsicID) {
     case AMDGPUIntrinsic::AMDGPU_store_output: {
-      MachineFunction &MF = DAG.getMachineFunction();
-      R600MachineFunctionInfo *MFI = MF.getInfo<R600MachineFunctionInfo>();
       int64_t RegIndex = cast<ConstantSDNode>(Op.getOperand(3))->getZExtValue();
       unsigned Reg = AMDGPU::R600_TReg32RegClass.getRegister(RegIndex);
       MFI->LiveOuts.push_back(Reg);
diff --git a/lib/Target/R600/R600InstrInfo.cpp b/lib/Target/R600/R600InstrInfo.cpp
index d0aeaa6..014e0d5 100644
--- a/lib/Target/R600/R600InstrInfo.cpp
+++ b/lib/Target/R600/R600InstrInfo.cpp
@@ -136,6 +136,21 @@ bool R600InstrInfo::isALUInstr(unsigned Opcode) const {
   return (TargetFlags & R600_InstFlag::ALU_INST);
 }
 
+bool R600InstrInfo::hasInstrModifiers(unsigned Opcode) const {
+  unsigned TargetFlags = get(Opcode).TSFlags;
+
+  return ((TargetFlags & R600_InstFlag::OP1) |
+          (TargetFlags & R600_InstFlag::OP2) |
+          (TargetFlags & R600_InstFlag::OP3));
+}
+
+bool R600InstrInfo::isLDSInstr(unsigned Opcode) const {
+  unsigned TargetFlags = get(Opcode).TSFlags;
+
+  return ((TargetFlags & R600_InstFlag::LDS_1A) |
+          (TargetFlags & R600_InstFlag::LDS_1A1D));
+}
+
 bool R600InstrInfo::isTransOnly(unsigned Opcode) const {
   return (get(Opcode).TSFlags & R600_InstFlag::TRANS_ONLY);
 }
@@ -245,6 +260,9 @@ R600InstrInfo::ExtractSrcs(MachineInstr *MI,
     unsigned Reg = Srcs[i].first->getReg();
     unsigned Index = RI.getEncodingValue(Reg) & 0xff;
     unsigned Chan = RI.getHWRegChan(Reg);
+    if (Reg == AMDGPU::OQAP) {
+      Result.push_back(std::pair<int, unsigned>(Reg, 0));
+    }
     if (Index > 127) {
       Result.push_back(DummyPair);
       continue;
@@ -300,6 +318,16 @@ isLegal(const std::vector<std::vector<std::pair<int, unsigned> > > &IGSrcs,
       const std::pair<int, unsigned> &Src = Srcs[j];
       if (Src.first < 0)
         continue;
+      if (Src.first == AMDGPU::OQAP) {
+        if (Swz[i] != R600InstrInfo::ALU_VEC_012 &&
+            Swz[i] != R600InstrInfo::ALU_VEC_021) {
+            // The value from output queue A (denoted by register OQAP) can
+            // only be fetched during the first cycle.
+            return false;
+        }
+        // OQAP does not count towards the normal read port restrictions
+        continue;
+      }
       if (Vector[Src.second][j] < 0)
         Vector[Src.second][j] = Src.first;
       if (Vector[Src.second][j] != Src.first)
diff --git a/lib/Target/R600/R600InstrInfo.h b/lib/Target/R600/R600InstrInfo.h
index 3c2e50b..688cd0c 100644
--- a/lib/Target/R600/R600InstrInfo.h
+++ b/lib/Target/R600/R600InstrInfo.h
@@ -63,6 +63,8 @@ namespace llvm {
 
   /// \returns true if this \p Opcode represents an ALU instruction.
   bool isALUInstr(unsigned Opcode) const;
+  bool hasInstrModifiers(unsigned Opcode) const;
+  bool isLDSInstr(unsigned Opcode) const;
 
   bool isTransOnly(unsigned Opcode) const;
   bool isTransOnly(const MachineInstr *MI) const;
diff --git a/lib/Target/R600/R600Instructions.td b/lib/Target/R600/R600Instructions.td
index d24682e..bd61031 100644
--- a/lib/Target/R600/R600Instructions.td
+++ b/lib/Target/R600/R600Instructions.td
@@ -25,6 +25,8 @@ class InstR600 <dag outs, dag ins, string asm, list<dag> pattern,
   bits<2> FlagOperandIdx = 0;
   bit Op1 = 0;
   bit Op2 = 0;
+  bit LDS_1A = 0;
+  bit LDS_1A1D = 0;
   bit HasNativeOperands = 0;
   bit VTXInst = 0;
   bit TEXInst = 0;
@@ -51,6 +53,8 @@ class InstR600 <dag outs, dag ins, string asm, list<dag> pattern,
   let TSFlags{12} = VTXInst;
   let TSFlags{13} = TEXInst;
   let TSFlags{14} = ALUInst;
+  let TSFlags{15} = LDS_1A;
+  let TSFlags{16} = LDS_1A1D;
 }
 
 class InstR600ISA <dag outs, dag ins, string asm, list<dag> pattern> :
@@ -116,15 +120,13 @@ def ADDRGA_CONST_OFFSET : ComplexPattern<i32, 1, "SelectGlobalValueConstantOffse
 def ADDRGA_VAR_OFFSET : ComplexPattern<i32, 2, "SelectGlobalValueVariableOffset", [], []>;
 def ADDRIndirect : ComplexPattern<iPTR, 2, "SelectADDRIndirect", [], []>;
 
-class R600ALU_Word0 {
+class R600_ALU_LDS_Word0 {
   field bits<32> Word0;
 
   bits<11> src0;
-  bits<1>  src0_neg;
   bits<1>  src0_rel;
   bits<11> src1;
   bits<1>  src1_rel;
-  bits<1>  src1_neg;
   bits<3>  index_mode = 0;
   bits<2>  pred_sel;
   bits<1>  last;
@@ -137,16 +139,23 @@ class R600ALU_Word0 {
   let Word0{8-0}   = src0_sel;
   let Word0{9}     = src0_rel;
   let Word0{11-10} = src0_chan;
-  let Word0{12}    = src0_neg;
   let Word0{21-13} = src1_sel;
   let Word0{22}    = src1_rel;
   let Word0{24-23} = src1_chan;
-  let Word0{25}    = src1_neg;
   let Word0{28-26} = index_mode;
   let Word0{30-29} = pred_sel;
   let Word0{31}    = last;
 }
 
+class R600ALU_Word0 : R600_ALU_LDS_Word0 {
+
+  bits<1>  src0_neg;
+  bits<1>  src1_neg;
+
+  let Word0{12}    = src0_neg;
+  let Word0{25}    = src1_neg;
+}
+
 class R600ALU_Word1 {
   field bits<32> Word1;
 
@@ -1990,6 +1999,98 @@ def GROUP_BARRIER : InstR600 <
   let ALUInst = 1;
 }
 
+//===----------------------------------------------------------------------===//
+// LDS Instructions
+//===----------------------------------------------------------------------===//
+class R600_LDS  <bits<6> op, dag outs, dag ins, string asm,
+                 list<dag> pattern = []> :
+
+    InstR600 <outs, ins, asm, pattern, XALU>,
+    R600_ALU_LDS_Word0 {
+
+  field bits<32> Word1;
+
+  bits<11> src2;
+  bits<1>  src2_rel;
+  // offset specifies the stride offset to the second set of data to be read
+  // from.  This is a dword offset.
+  bits<6>  offset = 0;
+  bits<5>  alu_inst = 17; // OP3_INST_LDS_IDX_OP
+  bits<3>  bank_swizzle;
+  bits<6>  lds_op = op;
+  bits<2>  dst_chan = 0;
+
+  bits<9> src2_sel  = src2{8-0};
+  bits<2> src2_chan = src2{10-9};
+
+  let Word0{12} = offset{4};
+  let Word0{25} = offset{5};
+
+  let Word1{8-0}   = src2_sel;
+  let Word1{9}     = src2_rel;
+  let Word1{11-10} = src2_chan;
+  let Word1{12}    = offset{1};
+  let Word1{17-13} = alu_inst;
+  let Word1{20-18} = bank_swizzle;
+  let Word1{26-21} = lds_op;
+  let Word1{27}    = offset{0};
+  let Word1{28}    = offset{2};
+  let Word1{30-29} = dst_chan;
+  let Word1{31}    = offset{3};
+
+  let Inst{31-0}  = Word0;
+  let Inst{63-32} = Word1;
+
+  let ALUInst = 1;
+  let HasNativeOperands = 1;
+}
+
+class R600_LDS_1A <bits<6> lds_op, string name, list<dag> pattern> : R600_LDS <
+  lds_op,
+  (outs R600_Reg32:$dst),
+  (ins R600_Reg32:$src0, REL:$src0_rel, SEL:$src0_sel,
+       LAST:$last, R600_Pred:$pred_sel,
+       BANK_SWIZZLE:$bank_swizzle),
+  "  "#name#" $last OQAP, $src0$src0_rel $pred_sel",
+  pattern
+  > {
+
+  let src1 = 0;
+  let src1_rel = 0;
+  let src2 = 0;
+  let src2_rel = 0;
+
+  let Defs = [OQAP];
+  let usesCustomInserter = 1;
+  let LDS_1A = 1;
+  let DisableEncoding = "$dst";
+}
+
+class R600_LDS_1A1D <bits<6> lds_op, string name, list<dag> pattern> :
+    R600_LDS <
+  lds_op,
+  (outs),
+  (ins R600_Reg32:$src0, REL:$src0_rel, SEL:$src0_sel,
+       R600_Reg32:$src1, REL:$src1_rel, SEL:$src1_sel,
+       LAST:$last, R600_Pred:$pred_sel,
+       BANK_SWIZZLE:$bank_swizzle),
+  "  "#name#" $last $src0$src0_rel, $src1$src1_rel, $pred_sel",
+  pattern
+  > {
+
+  let src2 = 0;
+  let src2_rel = 0;
+  let LDS_1A1D = 1;
+}
+
+def LDS_READ_RET : R600_LDS_1A <0x32, "LDS_READ_RET",
+  [(set (i32 R600_Reg32:$dst), (local_load R600_Reg32:$src0))]
+>;
+
+def LDS_WRITE : R600_LDS_1A1D <0xD, "LDS_WRITE",
+  [(local_store (i32 R600_Reg32:$src1), R600_Reg32:$src0)]
+>;
+
 } // End isEGorCayman
 
 //===----------------------------------------------------------------------===//
diff --git a/lib/Target/R600/R600MachineScheduler.cpp b/lib/Target/R600/R600MachineScheduler.cpp
index acc1b4d..7e28f9d 100644
--- a/lib/Target/R600/R600MachineScheduler.cpp
+++ b/lib/Target/R600/R600MachineScheduler.cpp
@@ -278,6 +278,10 @@ R600SchedStrategy::AluKind R600SchedStrategy::getAluKind(SUnit *SU) const {
       return AluT_XYZW;
     }
 
+    if (TII->isLDSInstr(MI->getOpcode())) {
+      return AluT_X;
+    }
+
     // Is the result already assigned to a channel ?
     unsigned DestSubReg = MI->getOperand(0).getSubReg();
     switch (DestSubReg) {
@@ -371,14 +375,18 @@ void R600SchedStrategy::PrepareNextSlot() {
 }
 
 void R600SchedStrategy::AssignSlot(MachineInstr* MI, unsigned Slot) {
-  unsigned DestReg = MI->getOperand(0).getReg();
+  int DstIndex = TII->getOperandIdx(MI->getOpcode(), AMDGPU::OpName::dst);
+  if (DstIndex == -1) {
+    return;
+  }
+  unsigned DestReg = MI->getOperand(DstIndex).getReg();
   // PressureRegister crashes if an operand is def and used in the same inst
   // and we try to constraint its regclass
   for (MachineInstr::mop_iterator It = MI->operands_begin(),
       E = MI->operands_end(); It != E; ++It) {
     MachineOperand &MO = *It;
     if (MO.isReg() && !MO.isDef() &&
-        MO.getReg() == MI->getOperand(0).getReg())
+        MO.getReg() == DestReg)
       return;
   }
   // Constrains the regclass of DestReg to assign it to Slot
diff --git a/lib/Target/R600/R600Packetizer.cpp b/lib/Target/R600/R600Packetizer.cpp
index 4c72d22..6fc15de 100644
--- a/lib/Target/R600/R600Packetizer.cpp
+++ b/lib/Target/R600/R600Packetizer.cpp
@@ -92,6 +92,9 @@ private:
         Result[Dst] = AMDGPU::PV_X;
         continue;
       }
+      if (Dst == AMDGPU::OQAP) {
+        continue;
+      }
       unsigned PVReg = 0;
       switch (TRI.getHWRegChan(Dst)) {
       case 0:
diff --git a/lib/Target/R600/R600RegisterInfo.td b/lib/Target/R600/R600RegisterInfo.td
index a8b9b70..60a93e3 100644
--- a/lib/Target/R600/R600RegisterInfo.td
+++ b/lib/Target/R600/R600RegisterInfo.td
@@ -101,6 +101,7 @@ def PRED_SEL_OFF: R600Reg<"Pred_sel_off", 0>;
 def PRED_SEL_ZERO : R600Reg<"Pred_sel_zero", 2>;
 def PRED_SEL_ONE : R600Reg<"Pred_sel_one", 3>;
 def AR_X : R600Reg<"AR.x", 0>;
+def OQAP : R600Reg<"OQAP", 221>;
 
 def R600_ArrayBase : RegisterClass <"AMDGPU", [f32, i32], 32,
                           (add (sequence "ArrayBase%u", 448, 480))>;
@@ -170,7 +171,7 @@ def R600_Reg32 : RegisterClass <"AMDGPU", [f32, i32], 32, (add
     R600_ArrayBase,
     R600_Addr,
     ZERO, HALF, ONE, ONE_INT, PV_X, ALU_LITERAL_X, NEG_ONE, NEG_HALF,
-    ALU_CONST, ALU_PARAM
+    ALU_CONST, ALU_PARAM, OQAP
     )>;
 
 def R600_Predicate : RegisterClass <"AMDGPU", [i32], 32, (add
diff --git a/lib/Target/R600/R600Schedule.td b/lib/Target/R600/R600Schedule.td
index 78a460a..bd23cf8 100644
--- a/lib/Target/R600/R600Schedule.td
+++ b/lib/Target/R600/R600Schedule.td
@@ -23,6 +23,7 @@ def TRANS : FuncUnit;
 def AnyALU : InstrItinClass;
 def VecALU : InstrItinClass;
 def TransALU : InstrItinClass;
+def XALU : InstrItinClass;
 
 def R600_VLIW5_Itin : ProcessorItineraries <
   [ALU_X, ALU_Y, ALU_Z, ALU_W, TRANS, ALU_NULL],
@@ -31,6 +32,7 @@ def R600_VLIW5_Itin : ProcessorItineraries <
     InstrItinData<AnyALU, [InstrStage<1, [ALU_X, ALU_Y, ALU_Z, ALU_W, TRANS]>]>,
     InstrItinData<VecALU, [InstrStage<1, [ALU_X, ALU_Y, ALU_X, ALU_W]>]>,
     InstrItinData<TransALU, [InstrStage<1, [TRANS]>]>,
+    InstrItinData<XALU, [InstrStage<1, [ALU_X]>]>,
     InstrItinData<NullALU, [InstrStage<1, [ALU_NULL]>]>
   ]
 >;
diff --git a/test/CodeGen/R600/local-memory.ll b/test/CodeGen/R600/local-memory.ll
new file mode 100644
index 0000000..0ff3848
--- /dev/null
+++ b/test/CodeGen/R600/local-memory.ll
@@ -0,0 +1,82 @@
+; RUN: llc < %s -march=r600 -mcpu=redwood | FileCheck %s
+
+
+ at local_memory.local_mem = internal addrspace(3) unnamed_addr global [16 x i32] zeroinitializer, align 4
+
+; CHECK: @local_memory
+
+; Check that the LDS size emitted correctly
+; CHECK: .long 166120
+; CHECK-NEXT: .long 16
+
+; CHECK: LDS_WRITE
+
+; GROUP_BARRIER must be the last instruction in a clause
+; CHECK: GROUP_BARRIER
+; CHECK-NEXT: ALU clause
+
+; CHECK: LDS_READ_RET
+
+define void @local_memory(i32 addrspace(1)* %out) {
+entry:
+  %y.i = call i32 @llvm.r600.read.tidig.x() #0
+  %arrayidx = getelementptr inbounds [16 x i32] addrspace(3)* @local_memory.local_mem, i32 0, i32 %y.i
+  store i32 %y.i, i32 addrspace(3)* %arrayidx, align 4
+  %add = add nsw i32 %y.i, 1
+  %cmp = icmp eq i32 %add, 16
+  %.add = select i1 %cmp, i32 0, i32 %add
+  call void @llvm.AMDGPU.barrier.local()
+  %arrayidx1 = getelementptr inbounds [16 x i32] addrspace(3)* @local_memory.local_mem, i32 0, i32 %.add
+  %0 = load i32 addrspace(3)* %arrayidx1, align 4
+  %arrayidx2 = getelementptr inbounds i32 addrspace(1)* %out, i32 %y.i
+  store i32 %0, i32 addrspace(1)* %arrayidx2, align 4
+  ret void
+}
+
+ at local_memory_two_objects.local_mem0 = internal addrspace(3) unnamed_addr global [4 x i32] zeroinitializer, align 4
+ at local_memory_two_objects.local_mem1 = internal addrspace(3) unnamed_addr global [4 x i32] zeroinitializer, align 4
+
+; CHECK: @local_memory_two_objects
+
+; Check that the LDS size emitted correctly
+; CHECK: .long 166120
+; CHECK-NEXT: .long 8
+
+; Make sure the lds writes are using different addresses.
+; CHECK: LDS_WRITE {{[*]*}} {{PV|T}}[[ADDRW:[0-9]*\.[XYZW]]]
+; CHECK-NOT: LDS_WRITE {{[*]*}} T[[ADDRW]]
+
+; GROUP_BARRIER must be the last instruction in a clause
+; CHECK: GROUP_BARRIER
+; CHECK-NEXT: ALU clause
+
+; Make sure the lds reads are using different addresses.
+; CHECK: LDS_READ_RET {{[*]*}} OQAP, {{PV|T}}[[ADDRR:[0-9]*\.[XYZW]]]
+; CHECK-NOT: LDS_READ_RET {{[*]*}} OQAP, T[[ADDRR]]
+
+define void @local_memory_two_objects(i32 addrspace(1)* %out) {
+entry:
+  %x.i = call i32 @llvm.r600.read.tidig.x() #0
+  %arrayidx = getelementptr inbounds [4 x i32] addrspace(3)* @local_memory_two_objects.local_mem0, i32 0, i32 %x.i
+  store i32 %x.i, i32 addrspace(3)* %arrayidx, align 4
+  %mul = shl nsw i32 %x.i, 1
+  %arrayidx1 = getelementptr inbounds [4 x i32] addrspace(3)* @local_memory_two_objects.local_mem1, i32 0, i32 %x.i
+  store i32 %mul, i32 addrspace(3)* %arrayidx1, align 4
+  %sub = sub nsw i32 3, %x.i
+  call void @llvm.AMDGPU.barrier.local()
+  %arrayidx2 = getelementptr inbounds [4 x i32] addrspace(3)* @local_memory_two_objects.local_mem0, i32 0, i32 %sub
+  %0 = load i32 addrspace(3)* %arrayidx2, align 4
+  %arrayidx3 = getelementptr inbounds i32 addrspace(1)* %out, i32 %x.i
+  store i32 %0, i32 addrspace(1)* %arrayidx3, align 4
+  %arrayidx4 = getelementptr inbounds [4 x i32] addrspace(3)* @local_memory_two_objects.local_mem1, i32 0, i32 %sub
+  %1 = load i32 addrspace(3)* %arrayidx4, align 4
+  %add = add nsw i32 %x.i, 4
+  %arrayidx5 = getelementptr inbounds i32 addrspace(1)* %out, i32 %add
+  store i32 %1, i32 addrspace(1)* %arrayidx5, align 4
+  ret void
+}
+
+declare i32 @llvm.r600.read.tidig.x() #0
+declare void @llvm.AMDGPU.barrier.local()
+
+attributes #0 = { readnone }
-- 
1.7.11.4



More information about the mesa-dev mailing list