[Mesa-dev] PATCH: R600: Fix handling of kernel arguments

Tom Stellard tom at stellard.net
Wed Oct 16 00:43:00 CEST 2013


On Tue, Oct 15, 2013 at 01:59:55PM -0400, Alex Deucher wrote:
> Did you forget the attachments?
> 

Yes I did.  Here they are.

-Tom

> On Tue, Oct 15, 2013 at 12:40 PM, Tom Stellard <tom at stellard.net> wrote:
> > Hi,
> >
> > The attached patches fix various bugs in the handling of kernel arguments
> > in the R600 backend.  The main problem with the old implementations was
> > that it disagreed with clover on type size and alignment, so vector
> > arguments were not being loaded correctly into the kernel.
> >
> > This series depends on this patch:
> >
> > http://lists.cs.uiuc.edu/pipermail/llvm-commits/Week-of-Mon-20131014/191252.html
> >
> > Please review and test.
> >
> > -Tom
> > _______________________________________________
> > llvm-commits mailing list
> > llvm-commits at cs.uiuc.edu
> > http://lists.cs.uiuc.edu/mailman/listinfo/llvm-commits
-------------- next part --------------
>From f7c9f8f53ac1f7cd315ae142914113146b13ee76 Mon Sep 17 00:00:00 2001
From: Tom Stellard <thomas.stellard at amd.com>
Date: Tue, 27 Aug 2013 17:24:04 -0700
Subject: [PATCH] R600: Fix handling of vector kernel arguments

The SelectionDAGBuilder was promoting vector kernel arguments to legal
types, but this won't work for R600 and SI since kernel arguments are
stored in memory and can't be promoted.  In order to handle vector
arguments correctly we need to look at the original types from the LLVM IR
function.
---
 lib/Target/R600/AMDGPUCallingConv.td    |  6 +--
 lib/Target/R600/AMDGPUISelLowering.cpp  | 52 +++++++++++++++++++
 lib/Target/R600/AMDGPUISelLowering.h    | 12 ++++-
 lib/Target/R600/AMDGPUTargetMachine.cpp |  5 +-
 lib/Target/R600/R600ISelLowering.cpp    | 23 ++++++---
 lib/Target/R600/SIISelLowering.cpp      | 64 +++++++++++++++--------
 lib/Target/R600/SIISelLowering.h        |  2 +-
 lib/Target/R600/SIInstructions.td       | 14 +++++
 lib/Target/R600/SIRegisterInfo.td       |  2 +-
 test/CodeGen/R600/or.ll                 |  2 +-
 test/CodeGen/R600/short-args.ll         | 69 -------------------------
 test/CodeGen/R600/store.ll              | 90 +++++++++++++++++----------------
 test/CodeGen/R600/trunc.ll              |  4 +-
 13 files changed, 190 insertions(+), 155 deletions(-)
 delete mode 100644 test/CodeGen/R600/short-args.ll

diff --git a/lib/Target/R600/AMDGPUCallingConv.td b/lib/Target/R600/AMDGPUCallingConv.td
index a194e6d..3535e35 100644
--- a/lib/Target/R600/AMDGPUCallingConv.td
+++ b/lib/Target/R600/AMDGPUCallingConv.td
@@ -44,11 +44,7 @@ def CC_SI : CallingConv<[
 
 // Calling convention for compute kernels
 def CC_AMDGPU_Kernel : CallingConv<[
-  CCIfType<[v4i32, v4f32],               CCAssignToStack <16, 16>>,
-  CCIfType<[i64, f64, v2f32, v2i32],     CCAssignToStack < 8, 8>>,
-  CCIfType<[i32, f32],                   CCAssignToStack < 4, 4>>,
-  CCIfType<[i16],                        CCAssignToStack < 2, 4>>,
-  CCIfType<[i8],                         CCAssignToStack < 1, 4>>
+  CCCustom<"allocateStack">
 ]>;
 
 def CC_AMDGPU : CallingConv<[
diff --git a/lib/Target/R600/AMDGPUISelLowering.cpp b/lib/Target/R600/AMDGPUISelLowering.cpp
index f8f0596..91d85d3 100644
--- a/lib/Target/R600/AMDGPUISelLowering.cpp
+++ b/lib/Target/R600/AMDGPUISelLowering.cpp
@@ -29,6 +29,14 @@
 #include "llvm/IR/DataLayout.h"
 
 using namespace llvm;
+static bool allocateStack(unsigned ValNo, MVT ValVT, MVT LocVT,
+                      CCValAssign::LocInfo LocInfo,
+                      ISD::ArgFlagsTy ArgFlags, CCState &State) {
+  unsigned Offset = State.AllocateStack(ValVT.getSizeInBits() / 8, ArgFlags.getOrigAlign());
+    State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset, LocVT, LocInfo));
+
+  return true;
+}
 
 #include "AMDGPUGenCallingConv.inc"
 
@@ -65,6 +73,12 @@ AMDGPUTargetLowering::AMDGPUTargetLowering(TargetMachine &TM) :
   setOperationAction(ISD::STORE, MVT::v4f32, Promote);
   AddPromotedToType(ISD::STORE, MVT::v4f32, MVT::v4i32);
 
+  setOperationAction(ISD::STORE, MVT::v8f32, Promote);
+  AddPromotedToType(ISD::STORE, MVT::v8f32, MVT::v8i32);
+
+  setOperationAction(ISD::STORE, MVT::v16f32, Promote);
+  AddPromotedToType(ISD::STORE, MVT::v16f32, MVT::v16i32);
+
   setOperationAction(ISD::STORE, MVT::f64, Promote);
   AddPromotedToType(ISD::STORE, MVT::f64, MVT::i64);
 
@@ -91,6 +105,12 @@ AMDGPUTargetLowering::AMDGPUTargetLowering(TargetMachine &TM) :
   setOperationAction(ISD::LOAD, MVT::v4f32, Promote);
   AddPromotedToType(ISD::LOAD, MVT::v4f32, MVT::v4i32);
 
+  setOperationAction(ISD::LOAD, MVT::v8f32, Promote);
+  AddPromotedToType(ISD::LOAD, MVT::v8f32, MVT::v8i32);
+
+  setOperationAction(ISD::LOAD, MVT::v16f32, Promote);
+  AddPromotedToType(ISD::LOAD, MVT::v16f32, MVT::v16i32);
+
   setOperationAction(ISD::LOAD, MVT::f64, Promote);
   AddPromotedToType(ISD::LOAD, MVT::f64, MVT::i64);
 
@@ -673,6 +693,38 @@ SDValue AMDGPUTargetLowering::LowerUDIVREM(SDValue Op,
 // Helper functions
 //===----------------------------------------------------------------------===//
 
+void AMDGPUTargetLowering::getOriginalFunctionArgs(
+                               SelectionDAG &DAG,
+                               const Function *F,
+                               const SmallVectorImpl<ISD::InputArg> &Ins,
+                               SmallVectorImpl<ISD::InputArg> &OrigIns) const {
+
+  for (unsigned i = 0, e = Ins.size(); i < e; ++i) {
+    if (Ins[i].ArgVT == Ins[i].VT) {
+      OrigIns.push_back(Ins[i]);
+      continue;
+    }
+
+    EVT VT;
+    if (Ins[i].ArgVT.isVector() && !Ins[i].VT.isVector()) {
+      // Vector has been split into scalars.
+      VT = Ins[i].ArgVT.getVectorElementType();
+    } else if (Ins[i].VT.isVector() && Ins[i].ArgVT.isVector() &&
+               Ins[i].ArgVT.getVectorElementType() !=
+               Ins[i].VT.getVectorElementType()) {
+      // Vector elements have been promoted
+      VT = Ins[i].ArgVT;
+    } else {
+      // Vector has been spilt into smaller vectors.
+      VT = Ins[i].VT;
+    }
+
+    ISD::InputArg Arg(Ins[i].Flags, VT, VT, Ins[i].Used,
+                      Ins[i].OrigArgIndex, Ins[i].PartOffset);
+    OrigIns.push_back(Arg);
+  }
+}
+
 bool AMDGPUTargetLowering::isHWTrueValue(SDValue Op) const {
   if (ConstantFPSDNode * CFP = dyn_cast<ConstantFPSDNode>(Op)) {
     return CFP->isExactlyValue(1.0);
diff --git a/lib/Target/R600/AMDGPUISelLowering.h b/lib/Target/R600/AMDGPUISelLowering.h
index 43f6389..e167f12 100644
--- a/lib/Target/R600/AMDGPUISelLowering.h
+++ b/lib/Target/R600/AMDGPUISelLowering.h
@@ -37,7 +37,6 @@ private:
   SDValue MergeVectorStore(const SDValue &Op, SelectionDAG &DAG) const;
   /// \brief Split a vector store into multiple scalar stores.
   /// \returns The resulting chain. 
-  SDValue SplitVectorStore(SDValue Op, SelectionDAG &DAG) const;
   SDValue LowerUDIVREM(SDValue Op, SelectionDAG &DAG) const;
 
 protected:
@@ -53,10 +52,21 @@ protected:
                              SelectionDAG &DAG) const;
   /// \brief Split a vector load into multiple scalar loads.
   SDValue SplitVectorLoad(const SDValue &Op, SelectionDAG &DAG) const;
+  SDValue SplitVectorStore(SDValue Op, SelectionDAG &DAG) const;
   SDValue LowerSTORE(SDValue Op, SelectionDAG &DAG) const;
   bool isHWTrueValue(SDValue Op) const;
   bool isHWFalseValue(SDValue Op) const;
 
+  /// The SelectionDAGBuilder will automatically promote function arguments
+  /// with illegal types.  However, this does not work for the AMDGPU targets
+  /// since the function arguments are stored in memory as these illegal types.
+  /// In order to handle this properly we need to get the origianl types sizes
+  /// from the LLVM IR Function and fixup the ISD:InputArg values before
+  /// passing them to AnalyzeFormalArguments()
+  void getOriginalFunctionArgs(SelectionDAG &DAG,
+                               const Function *F,
+                               const SmallVectorImpl<ISD::InputArg> &Ins,
+                               SmallVectorImpl<ISD::InputArg> &OrigIns) const;
   void AnalyzeFormalArguments(CCState &State,
                               const SmallVectorImpl<ISD::InputArg> &Ins) const;
 
diff --git a/lib/Target/R600/AMDGPUTargetMachine.cpp b/lib/Target/R600/AMDGPUTargetMachine.cpp
index 9722e7d..b19277d 100644
--- a/lib/Target/R600/AMDGPUTargetMachine.cpp
+++ b/lib/Target/R600/AMDGPUTargetMachine.cpp
@@ -59,8 +59,9 @@ AMDGPUTargetMachine::AMDGPUTargetMachine(const Target &T, StringRef TT,
   LLVMTargetMachine(T, TT, CPU, FS, Options, RM, CM, OptLevel),
   Subtarget(TT, CPU, FS),
   Layout(Subtarget.getDataLayout()),
-  FrameLowering(TargetFrameLowering::StackGrowsUp, 16 // Stack Alignment
-                                                 , 0),
+  FrameLowering(TargetFrameLowering::StackGrowsUp,
+                64 * 16 // Maximum stack alignment (long16)
+               , 0),
   IntrinsicInfo(this),
   InstrItins(&Subtarget.getInstrItineraryData()) {
   // TLInfo uses InstrInfo so it must be initialized after.
diff --git a/lib/Target/R600/R600ISelLowering.cpp b/lib/Target/R600/R600ISelLowering.cpp
index 1765261..b46a283 100644
--- a/lib/Target/R600/R600ISelLowering.cpp
+++ b/lib/Target/R600/R600ISelLowering.cpp
@@ -1194,7 +1194,7 @@ SDValue R600TargetLowering::LowerLOAD(SDValue Op, SelectionDAG &DAG) const
   }
 
   int ConstantBlock = ConstantAddressBlock(LoadNode->getAddressSpace());
-  if (ConstantBlock > -1) {
+  if (ConstantBlock > -1 && LoadNode->getExtensionType() != ISD::SEXTLOAD) {
     SDValue Result;
     if (dyn_cast<ConstantExpr>(LoadNode->getSrcValue()) ||
         dyn_cast<Constant>(LoadNode->getSrcValue()) ||
@@ -1325,22 +1325,29 @@ SDValue R600TargetLowering::LowerFormalArguments(
   CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(),
                  getTargetMachine(), ArgLocs, *DAG.getContext());
 
-  AnalyzeFormalArguments(CCInfo, Ins);
+  SmallVector<ISD::InputArg, 8> LocalIns;
+
+  getOriginalFunctionArgs(DAG, DAG.getMachineFunction().getFunction(), Ins,
+                          LocalIns);
+
+  AnalyzeFormalArguments(CCInfo, LocalIns);
 
   for (unsigned i = 0, e = Ins.size(); i < e; ++i) {
     CCValAssign &VA = ArgLocs[i];
-    EVT VT = VA.getLocVT();
+    EVT VT = Ins[i].VT;
+    EVT MemVT = LocalIns[i].VT;
 
     PointerType *PtrTy = PointerType::get(VT.getTypeForEVT(*DAG.getContext()),
                                                    AMDGPUAS::CONSTANT_BUFFER_0);
 
     // The first 36 bytes of the input buffer contains information about
     // thread group and global sizes.
-    SDValue Arg = DAG.getLoad(VT, DL, Chain,
-                           DAG.getConstant(36 + VA.getLocMemOffset(), MVT::i32),
-                           MachinePointerInfo(UndefValue::get(PtrTy)), false,
-                           false, false, 4); // 4 is the prefered alignment for
-                                             // the CONSTANT memory space.
+    SDValue Arg = DAG.getExtLoad(ISD::SEXTLOAD, DL, VT, Chain,
+                                 DAG.getConstant(36 + VA.getLocMemOffset(), MVT::i32),
+                                 MachinePointerInfo(UndefValue::get(PtrTy)),
+                                 MemVT, false, false, 4);
+                                 // 4 is the prefered alignment for
+                                 // the CONSTANT memory space.
     InVals.push_back(Arg);
   }
   return Chain;
diff --git a/lib/Target/R600/SIISelLowering.cpp b/lib/Target/R600/SIISelLowering.cpp
index 2e7e3a4..2eff175 100644
--- a/lib/Target/R600/SIISelLowering.cpp
+++ b/lib/Target/R600/SIISelLowering.cpp
@@ -75,13 +75,16 @@ SITargetLowering::SITargetLowering(TargetMachine &TM) :
   setOperationAction(ISD::LOAD, MVT::i64, Custom);
   setOperationAction(ISD::LOAD, MVT::v2i32, Custom);
   setOperationAction(ISD::LOAD, MVT::v4i32, Custom);
+  setOperationAction(ISD::LOAD, MVT::v8i32, Custom);
+  setOperationAction(ISD::LOAD, MVT::v16i32, Custom);
 
   setOperationAction(ISD::STORE, MVT::i32, Custom);
   setOperationAction(ISD::STORE, MVT::i64, Custom);
   setOperationAction(ISD::STORE, MVT::i128, Custom);
   setOperationAction(ISD::STORE, MVT::v2i32, Custom);
   setOperationAction(ISD::STORE, MVT::v4i32, Custom);
-
+  setOperationAction(ISD::STORE, MVT::v8i32, Custom);
+  setOperationAction(ISD::STORE, MVT::v16i32, Custom);
 
   setOperationAction(ISD::SELECT_CC, MVT::f32, Custom);
   setOperationAction(ISD::SELECT_CC, MVT::i32, Custom);
@@ -91,6 +94,7 @@ SITargetLowering::SITargetLowering(TargetMachine &TM) :
   setOperationAction(ISD::SETCC, MVT::v2i1, Expand);
   setOperationAction(ISD::SETCC, MVT::v4i1, Expand);
 
+  setOperationAction(ISD::ANY_EXTEND, MVT::i64, Custom);
   setOperationAction(ISD::SIGN_EXTEND, MVT::i64, Custom);
   setOperationAction(ISD::ZERO_EXTEND, MVT::i64, Custom);
 
@@ -103,10 +107,15 @@ SITargetLowering::SITargetLowering(TargetMachine &TM) :
 
   setLoadExtAction(ISD::SEXTLOAD, MVT::i32, Expand);
   setLoadExtAction(ISD::EXTLOAD, MVT::i32, Expand);
+  setLoadExtAction(ISD::SEXTLOAD, MVT::v8i16, Expand);
+  setLoadExtAction(ISD::SEXTLOAD, MVT::v16i16, Expand);
 
   setLoadExtAction(ISD::EXTLOAD, MVT::f32, Expand);
   setTruncStoreAction(MVT::f64, MVT::f32, Expand);
   setTruncStoreAction(MVT::i64, MVT::i32, Expand);
+  setTruncStoreAction(MVT::i128, MVT::i64, Expand);
+  setTruncStoreAction(MVT::v8i32, MVT::v8i16, Expand);
+  setTruncStoreAction(MVT::v16i32, MVT::v16i16, Expand);
 
   setOperationAction(ISD::GlobalAddress, MVT::i32, Custom);
   setOperationAction(ISD::FrameIndex, MVT::i64, Custom);
@@ -132,23 +141,22 @@ bool SITargetLowering::allowsUnalignedMemoryAccesses(EVT  VT,
 }
 
 bool SITargetLowering::shouldSplitVectorElementType(EVT VT) const {
-  return VT.bitsLE(MVT::i8);
+  return VT.bitsLE(MVT::i16);
 }
 
-SDValue SITargetLowering::LowerParameter(SelectionDAG &DAG, EVT VT,
+SDValue SITargetLowering::LowerParameter(SelectionDAG &DAG, EVT VT, EVT MemVT,
                                          SDLoc DL, SDValue Chain,
                                          unsigned Offset) const {
   MachineRegisterInfo &MRI = DAG.getMachineFunction().getRegInfo();
   PointerType *PtrTy = PointerType::get(VT.getTypeForEVT(*DAG.getContext()),
                                             AMDGPUAS::CONSTANT_ADDRESS);
-  EVT ArgVT = MVT::getIntegerVT(VT.getSizeInBits());
   SDValue BasePtr =  DAG.getCopyFromReg(Chain, DL,
                            MRI.getLiveInVirtReg(AMDGPU::SGPR0_SGPR1), MVT::i64);
   SDValue Ptr = DAG.getNode(ISD::ADD, DL, MVT::i64, BasePtr,
                                              DAG.getConstant(Offset, MVT::i64));
-  return DAG.getLoad(VT, DL, Chain, Ptr,
-                            MachinePointerInfo(UndefValue::get(PtrTy)),
-                            false, false, false, ArgVT.getSizeInBits() >> 3);
+  return DAG.getExtLoad(ISD::SEXTLOAD, DL, VT, Chain, Ptr,
+                            MachinePointerInfo(UndefValue::get(PtrTy)), MemVT,
+                            false, false, MemVT.getSizeInBits() >> 3);
 
 }
 
@@ -207,7 +215,7 @@ SDValue SITargetLowering::LowerFormalArguments(
         NewArg.PartOffset += NewArg.VT.getStoreSize();
       }
 
-    } else {
+    } else if (Info->ShaderType != ShaderType::COMPUTE) {
       Splits.push_back(Arg);
     }
   }
@@ -230,6 +238,11 @@ SDValue SITargetLowering::LowerFormalArguments(
     MF.addLiveIn(AMDGPU::SGPR0_SGPR1, &AMDGPU::SReg_64RegClass);
   }
 
+  if (Info->ShaderType == ShaderType::COMPUTE) {
+    getOriginalFunctionArgs(DAG, DAG.getMachineFunction().getFunction(), Ins,
+                            Splits);
+  }
+
   AnalyzeFormalArguments(CCInfo, Splits);
 
   for (unsigned i = 0, e = Ins.size(), ArgIdx = 0; i != e; ++i) {
@@ -244,9 +257,11 @@ SDValue SITargetLowering::LowerFormalArguments(
     EVT VT = VA.getLocVT();
 
     if (VA.isMemLoc()) {
+      VT = Ins[i].VT;
+      EVT MemVT = Splits[i].VT;
       // The first 36 bytes of the input buffer contains information about
       // thread group and global sizes.
-      SDValue Arg = LowerParameter(DAG, VT, DL, DAG.getRoot(),
+      SDValue Arg = LowerParameter(DAG, VT, MemVT,  DL, DAG.getRoot(),
                                    36 + VA.getLocMemOffset());
       InVals.push_back(Arg);
       continue;
@@ -408,9 +423,9 @@ SDValue SITargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const {
   case ISD::BRCOND: return LowerBRCOND(Op, DAG);
   case ISD::LOAD: {
     LoadSDNode *Load = dyn_cast<LoadSDNode>(Op);
-    if ((Load->getAddressSpace() == AMDGPUAS::LOCAL_ADDRESS ||
-         Load->getAddressSpace() == AMDGPUAS::PRIVATE_ADDRESS) &&
-        Op.getValueType().isVector()) {
+    if (Op.getValueType().isVector() &&
+        (Load->getAddressSpace() == AMDGPUAS::LOCAL_ADDRESS ||
+         Load->getAddressSpace() == AMDGPUAS::PRIVATE_ADDRESS)) {
       SDValue MergedValues[2] = {
         SplitVectorLoad(Op, DAG),
         Load->getChain()
@@ -423,6 +438,7 @@ SDValue SITargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const {
   case ISD::SELECT_CC: return LowerSELECT_CC(Op, DAG);
   case ISD::SIGN_EXTEND: return LowerSIGN_EXTEND(Op, DAG);
   case ISD::STORE: return LowerSTORE(Op, DAG);
+  case ISD::ANY_EXTEND: // Fall-through
   case ISD::ZERO_EXTEND: return LowerZERO_EXTEND(Op, DAG);
   case ISD::GlobalAddress: return LowerGlobalAddress(MFI, Op, DAG);
   case ISD::INTRINSIC_WO_CHAIN: {
@@ -435,23 +451,23 @@ SDValue SITargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const {
     switch (IntrinsicID) {
     default: return AMDGPUTargetLowering::LowerOperation(Op, DAG);
     case Intrinsic::r600_read_ngroups_x:
-      return LowerParameter(DAG, VT, DL, DAG.getEntryNode(), 0);
+      return LowerParameter(DAG, VT, VT, DL, DAG.getEntryNode(), 0);
     case Intrinsic::r600_read_ngroups_y:
-      return LowerParameter(DAG, VT, DL, DAG.getEntryNode(), 4);
+      return LowerParameter(DAG, VT, VT, DL, DAG.getEntryNode(), 4);
     case Intrinsic::r600_read_ngroups_z:
-      return LowerParameter(DAG, VT, DL, DAG.getEntryNode(), 8);
+      return LowerParameter(DAG, VT, VT, DL, DAG.getEntryNode(), 8);
     case Intrinsic::r600_read_global_size_x:
-      return LowerParameter(DAG, VT, DL, DAG.getEntryNode(), 12);
+      return LowerParameter(DAG, VT, VT, DL, DAG.getEntryNode(), 12);
     case Intrinsic::r600_read_global_size_y:
-      return LowerParameter(DAG, VT, DL, DAG.getEntryNode(), 16);
+      return LowerParameter(DAG, VT, VT, DL, DAG.getEntryNode(), 16);
     case Intrinsic::r600_read_global_size_z:
-      return LowerParameter(DAG, VT, DL, DAG.getEntryNode(), 20);
+      return LowerParameter(DAG, VT, VT, DL, DAG.getEntryNode(), 20);
     case Intrinsic::r600_read_local_size_x:
-      return LowerParameter(DAG, VT, DL, DAG.getEntryNode(), 24);
+      return LowerParameter(DAG, VT, VT, DL, DAG.getEntryNode(), 24);
     case Intrinsic::r600_read_local_size_y:
-      return LowerParameter(DAG, VT, DL, DAG.getEntryNode(), 28);
+      return LowerParameter(DAG, VT, VT, DL, DAG.getEntryNode(), 28);
     case Intrinsic::r600_read_local_size_z:
-      return LowerParameter(DAG, VT, DL, DAG.getEntryNode(), 32);
+      return LowerParameter(DAG, VT, VT, DL, DAG.getEntryNode(), 32);
     case Intrinsic::r600_read_tgid_x:
       return CreateLiveInRegister(DAG, &AMDGPU::SReg_32RegClass,
                      AMDGPU::SReg_32RegClass.getRegister(NumUserSGPRs + 0), VT);
@@ -722,6 +738,12 @@ SDValue SITargetLowering::LowerSTORE(SDValue Op, SelectionDAG &DAG) const {
   if (Ret.getNode())
     return Ret;
 
+  if (VT.isVector() && VT.getVectorNumElements() >= 8) {
+    Ret = SplitVectorStore(Op, DAG);
+    if (Ret.getNode())
+      return Ret;
+  }
+
   if (Store->getAddressSpace() != AMDGPUAS::PRIVATE_ADDRESS)
     return SDValue();
 
diff --git a/lib/Target/R600/SIISelLowering.h b/lib/Target/R600/SIISelLowering.h
index ecfea15..384caf4 100644
--- a/lib/Target/R600/SIISelLowering.h
+++ b/lib/Target/R600/SIISelLowering.h
@@ -21,7 +21,7 @@
 namespace llvm {
 
 class SITargetLowering : public AMDGPUTargetLowering {
-  SDValue LowerParameter(SelectionDAG &DAG, EVT VT, SDLoc DL,
+  SDValue LowerParameter(SelectionDAG &DAG, EVT VT, EVT MemVT, SDLoc DL,
                          SDValue Chain, unsigned Offset) const;
   SDValue LowerSampleIntrinsic(unsigned Opcode, const SDValue &Op,
                                SelectionDAG &DAG) const;
diff --git a/lib/Target/R600/SIInstructions.td b/lib/Target/R600/SIInstructions.td
index 64812dc..a0afb63 100644
--- a/lib/Target/R600/SIInstructions.td
+++ b/lib/Target/R600/SIInstructions.td
@@ -1614,9 +1614,12 @@ def : BitConvert <f64, i64, VReg_64>;
 
 def : BitConvert <v2f32, v2i32, VReg_64>;
 def : BitConvert <v2i32, v2f32, VReg_64>;
+def : BitConvert <v2i32, i64, VReg_64>;
 
 def : BitConvert <v4f32, v4i32, VReg_128>;
 def : BitConvert <v4i32, v4f32, VReg_128>;
+def : BitConvert <v4i32, i128,  VReg_128>;
+def : BitConvert <i128, v4i32,  VReg_128>;
 
 def : BitConvert <v8i32, v32i8, SReg_256>;
 def : BitConvert <v32i8, v8i32, SReg_256>;
@@ -2037,6 +2040,17 @@ def : Pat <
     (V_OR_B32_e32 (EXTRACT_SUBREG $a, sub1), (EXTRACT_SUBREG $b, sub1)), sub1)
 >;
 
+//===----------------------------------------------------------------------===//
+// Miscellaneous Pattens
+//===----------------------------------------------------------------------===//
+
+def : Pat <
+  (i64 (trunc i128:$x)),
+  (INSERT_SUBREG (INSERT_SUBREG (i64 (IMPLICIT_DEF)),
+    (i32 (EXTRACT_SUBREG $x, sub0)), sub0),
+    (i32 (EXTRACT_SUBREG $x, sub1)), sub1)
+>;
+
 //============================================================================//
 // Miscellaneous Optimization Patterns
 //============================================================================//
diff --git a/lib/Target/R600/SIRegisterInfo.td b/lib/Target/R600/SIRegisterInfo.td
index c8e3295..49bdbc9 100644
--- a/lib/Target/R600/SIRegisterInfo.td
+++ b/lib/Target/R600/SIRegisterInfo.td
@@ -174,7 +174,7 @@ def VReg_96 : RegisterClass<"AMDGPU", [untyped], 96, (add VGPR_96)> {
   let Size = 96;
 }
 
-def VReg_128 : RegisterClass<"AMDGPU", [v4i32, v4f32], 128, (add VGPR_128)>;
+def VReg_128 : RegisterClass<"AMDGPU", [v4i32, v4f32, i128], 128, (add VGPR_128)>;
 
 def VReg_256 : RegisterClass<"AMDGPU", [v32i8, v8i32, v8f32], 256, (add VGPR_256)>;
 
diff --git a/test/CodeGen/R600/or.ll b/test/CodeGen/R600/or.ll
index 6950ed0..6c70469 100644
--- a/test/CodeGen/R600/or.ll
+++ b/test/CodeGen/R600/or.ll
@@ -41,7 +41,7 @@ define void @or_v4i32(<4 x i32> addrspace(1)* %out, <4 x i32> addrspace(1)* %in)
 
 ; EG-CHECK-LABEL: @or_i64
 ; EG-CHECK-DAG: OR_INT * T{{[0-9]\.[XYZW]}}, KC0[2].W, KC0[3].Y
-; EG-CHECK-DAG: OR_INT * T{{[0-9]\.[XYZW]}}, KC0[2].Z, KC0[3].X
+; EG-CHECK-DAG: OR_INT * T{{[0-9]\.[XYZW]}}, KC0[3].X, KC0[3].Z
 ; SI-CHECK-LABEL: @or_i64
 ; SI-CHECK: V_OR_B32_e32 VGPR{{[0-9]}}
 ; SI-CHECK: V_OR_B32_e32 VGPR{{[0-9]}}
diff --git a/test/CodeGen/R600/short-args.ll b/test/CodeGen/R600/short-args.ll
deleted file mode 100644
index 8882978..0000000
--- a/test/CodeGen/R600/short-args.ll
+++ /dev/null
@@ -1,69 +0,0 @@
-; RUN: llc < %s -march=r600 -mcpu=redwood | FileCheck %s --check-prefix=EG-CHECK
-; RUN: llc < %s -march=r600 -mcpu=cayman | FileCheck %s --check-prefix=EG-CHECK
-; RUN: llc < %s -march=r600 -mcpu=SI -verify-machineinstrs | FileCheck %s --check-prefix=SI-CHECK
-
-; EG-CHECK: @i8_arg
-; EG-CHECK: MOV {{[ *]*}}T{{[0-9]+\.[XYZW]}}, KC0[2].Z
-; SI-CHECK: BUFFER_LOAD_UBYTE
-
-define void @i8_arg(i32 addrspace(1)* nocapture %out, i8 %in) nounwind {
-entry:
-  %0 = zext i8 %in to i32
-  store i32 %0, i32 addrspace(1)* %out, align 4
-  ret void
-}
-
-; EG-CHECK: @i8_zext_arg
-; EG-CHECK: MOV {{[ *]*}}T{{[0-9]+\.[XYZW]}}, KC0[2].Z
-; SI-CHECK: S_LOAD_DWORD SGPR{{[0-9]}}, SGPR0_SGPR1, 11
-
-define void @i8_zext_arg(i32 addrspace(1)* nocapture %out, i8 zeroext %in) nounwind {
-entry:
-  %0 = zext i8 %in to i32
-  store i32 %0, i32 addrspace(1)* %out, align 4
-  ret void
-}
-
-; EG-CHECK: @i8_sext_arg
-; EG-CHECK: MOV {{[ *]*}}T{{[0-9]+\.[XYZW]}}, KC0[2].Z
-; SI-CHECK: S_LOAD_DWORD SGPR{{[0-9]}}, SGPR0_SGPR1, 11
-
-define void @i8_sext_arg(i32 addrspace(1)* nocapture %out, i8 signext %in) nounwind {
-entry:
-  %0 = sext i8 %in to i32
-  store i32 %0, i32 addrspace(1)* %out, align 4
-  ret void
-}
-
-; EG-CHECK: @i16_arg
-; EG-CHECK: MOV {{[ *]*}}T{{[0-9]+\.[XYZW]}}, KC0[2].Z
-; SI-CHECK: BUFFER_LOAD_USHORT
-
-define void @i16_arg(i32 addrspace(1)* nocapture %out, i16 %in) nounwind {
-entry:
-  %0 = zext i16 %in to i32
-  store i32 %0, i32 addrspace(1)* %out, align 4
-  ret void
-}
-
-; EG-CHECK: @i16_zext_arg
-; EG-CHECK: MOV {{[ *]*}}T{{[0-9]+\.[XYZW]}}, KC0[2].Z
-; SI-CHECK: S_LOAD_DWORD SGPR{{[0-9]}}, SGPR0_SGPR1, 11
-
-define void @i16_zext_arg(i32 addrspace(1)* nocapture %out, i16 zeroext %in) nounwind {
-entry:
-  %0 = zext i16 %in to i32
-  store i32 %0, i32 addrspace(1)* %out, align 4
-  ret void
-}
-
-; EG-CHECK: @i16_sext_arg
-; EG-CHECK: MOV {{[ *]*}}T{{[0-9]+\.[XYZW]}}, KC0[2].Z
-; SI-CHECK: S_LOAD_DWORD SGPR{{[0-9]}}, SGPR0_SGPR1, 11
-
-define void @i16_sext_arg(i32 addrspace(1)* nocapture %out, i16 signext %in) nounwind {
-entry:
-  %0 = sext i16 %in to i32
-  store i32 %0, i32 addrspace(1)* %out, align 4
-  ret void
-}
diff --git a/test/CodeGen/R600/store.ll b/test/CodeGen/R600/store.ll
index a4c025a..5e51d56 100644
--- a/test/CodeGen/R600/store.ll
+++ b/test/CodeGen/R600/store.ll
@@ -7,7 +7,7 @@
 ;===------------------------------------------------------------------------===;
 
 ; i8 store
-; EG-CHECK: @store_i8
+; EG-CHECK-LABEL: @store_i8
 ; EG-CHECK: MEM_RAT MSKOR T[[RW_GPR:[0-9]]].XW, T{{[0-9]}}.X
 ; EG-CHECK: VTX_READ_8 [[VAL:T[0-9]\.X]], [[VAL]]
 ; IG 0: Get the byte index and truncate the value
@@ -26,7 +26,7 @@
 ; EG-CHECK: MOV T[[RW_GPR]].Y, 0.0
 ; EG-CHECK: MOV * T[[RW_GPR]].Z, 0.0
 
-; SI-CHECK: @store_i8
+; SI-CHECK-LABEL: @store_i8
 ; SI-CHECK: BUFFER_STORE_BYTE
 
 define void @store_i8(i8 addrspace(1)* %out, i8 %in) {
@@ -36,7 +36,7 @@ entry:
 }
 
 ; i16 store
-; EG-CHECK: @store_i16
+; EG-CHECK-LABEL: @store_i16
 ; EG-CHECK: MEM_RAT MSKOR T[[RW_GPR:[0-9]]].XW, T{{[0-9]}}.X
 ; EG-CHECK: VTX_READ_16 [[VAL:T[0-9]\.X]], [[VAL]]
 ; IG 0: Get the byte index and truncate the value
@@ -55,7 +55,7 @@ entry:
 ; EG-CHECK: MOV T[[RW_GPR]].Y, 0.0
 ; EG-CHECK: MOV * T[[RW_GPR]].Z, 0.0
 
-; SI-CHECK: @store_i16
+; SI-CHECK-LABEL: @store_i16
 ; SI-CHECK: BUFFER_STORE_SHORT
 define void @store_i16(i16 addrspace(1)* %out, i16 %in) {
 entry:
@@ -63,10 +63,10 @@ entry:
   ret void
 }
 
-; EG-CHECK: @store_v2i8
+; EG-CHECK-LABEL: @store_v2i8
 ; EG-CHECK: MEM_RAT MSKOR
 ; EG-CHECK-NOT: MEM_RAT MSKOR
-; SI-CHECK: @store_v2i8
+; SI-CHECK-LABEL: @store_v2i8
 ; SI-CHECK: BUFFER_STORE_BYTE
 ; SI-CHECK: BUFFER_STORE_BYTE
 define void @store_v2i8(<2 x i8> addrspace(1)* %out, <2 x i32> %in) {
@@ -77,12 +77,13 @@ entry:
 }
 
 
-; EG-CHECK: @store_v2i16
+; EG-CHECK-LABEL: @store_v2i16
 ; EG-CHECK: MEM_RAT_CACHELESS STORE_RAW
-; CM-CHECK: @store_v2i16
+; CM-CHECK-LABEL: @store_v2i16
 ; CM-CHECK: MEM_RAT_CACHELESS STORE_DWORD
-; SI-CHECK: @store_v2i16
-; SI-CHECK: BUFFER_STORE_DWORD
+; SI-CHECK-LABEL: @store_v2i16
+; SI-CHECK: BUFFER_STORE_SHORT
+; SI-CHECK: BUFFER_STORE_SHORT
 define void @store_v2i16(<2 x i16> addrspace(1)* %out, <2 x i32> %in) {
 entry:
   %0 = trunc <2 x i32> %in to <2 x i16>
@@ -90,11 +91,11 @@ entry:
   ret void
 }
 
-; EG-CHECK: @store_v4i8
+; EG-CHECK-LABEL: @store_v4i8
 ; EG-CHECK: MEM_RAT_CACHELESS STORE_RAW
-; CM-CHECK: @store_v4i8
+; CM-CHECK-LABEL: @store_v4i8
 ; CM-CHECK: MEM_RAT_CACHELESS STORE_DWORD
-; SI-CHECK: @store_v4i8
+; SI-CHECK-LABEL: @store_v4i8
 ; SI-CHECK: BUFFER_STORE_BYTE
 ; SI-CHECK: BUFFER_STORE_BYTE
 ; SI-CHECK: BUFFER_STORE_BYTE
@@ -107,11 +108,11 @@ entry:
 }
 
 ; floating-point store
-; EG-CHECK: @store_f32
+; EG-CHECK-LABEL: @store_f32
 ; EG-CHECK: MEM_RAT_CACHELESS STORE_RAW T{{[0-9]+\.X, T[0-9]+\.X}}, 1
-; CM-CHECK: @store_f32
+; CM-CHECK-LABEL: @store_f32
 ; CM-CHECK: MEM_RAT_CACHELESS STORE_DWORD T{{[0-9]+\.X, T[0-9]+\.X}}
-; SI-CHECK: @store_f32
+; SI-CHECK-LABEL: @store_f32
 ; SI-CHECK: BUFFER_STORE_DWORD
 
 define void @store_f32(float addrspace(1)* %out, float %in) {
@@ -119,13 +120,13 @@ define void @store_f32(float addrspace(1)* %out, float %in) {
   ret void
 }
 
-; EG-CHECK: @store_v4i16
+; EG-CHECK-LABEL: @store_v4i16
 ; EG-CHECK: MEM_RAT MSKOR
 ; EG-CHECK: MEM_RAT MSKOR
 ; EG-CHECK: MEM_RAT MSKOR
 ; EG-CHECK: MEM_RAT MSKOR
 ; EG-CHECK-NOT: MEM_RAT MSKOR
-; SI-CHECK: @store_v4i16
+; SI-CHECK-LABEL: @store_v4i16
 ; SI-CHECK: BUFFER_STORE_SHORT
 ; SI-CHECK: BUFFER_STORE_SHORT
 ; SI-CHECK: BUFFER_STORE_SHORT
@@ -139,11 +140,11 @@ entry:
 }
 
 ; vec2 floating-point stores
-; EG-CHECK: @store_v2f32
+; EG-CHECK-LABEL: @store_v2f32
 ; EG-CHECK: MEM_RAT_CACHELESS STORE_RAW
-; CM-CHECK: @store_v2f32
+; CM-CHECK-LABEL: @store_v2f32
 ; CM-CHECK: MEM_RAT_CACHELESS STORE_DWORD
-; SI-CHECK: @store_v2f32
+; SI-CHECK-LABEL: @store_v2f32
 ; SI-CHECK: BUFFER_STORE_DWORDX2
 
 define void @store_v2f32(<2 x float> addrspace(1)* %out, float %a, float %b) {
@@ -154,13 +155,13 @@ entry:
   ret void
 }
 
-; EG-CHECK: @store_v4i32
+; EG-CHECK-LABEL: @store_v4i32
 ; EG-CHECK: MEM_RAT_CACHELESS STORE_RAW
 ; EG-CHECK-NOT: MEM_RAT_CACHELESS STORE_RAW
-; CM-CHECK: @store_v4i32
+; CM-CHECK-LABEL: @store_v4i32
 ; CM-CHECK: MEM_RAT_CACHELESS STORE_DWORD
 ; CM-CHECK-NOT: MEM_RAT_CACHELESS STORE_DWORD
-; SI-CHECK: @store_v4i32
+; SI-CHECK-LABEL: @store_v4i32
 ; SI-CHECK: BUFFER_STORE_DWORDX4
 define void @store_v4i32(<4 x i32> addrspace(1)* %out, <4 x i32> %in) {
 entry:
@@ -172,41 +173,42 @@ entry:
 ; Local Address Space
 ;===------------------------------------------------------------------------===;
 
-; EG-CHECK: @store_local_i8
+; EG-CHECK-LABEL: @store_local_i8
 ; EG-CHECK: LDS_BYTE_WRITE
-; SI-CHECK: @store_local_i8
+; SI-CHECK-LABEL: @store_local_i8
 ; SI-CHECK: DS_WRITE_B8
 define void @store_local_i8(i8 addrspace(3)* %out, i8 %in) {
   store i8 %in, i8 addrspace(3)* %out
   ret void
 }
 
-; EG-CHECK: @store_local_i16
+; EG-CHECK-LABEL: @store_local_i16
 ; EG-CHECK: LDS_SHORT_WRITE
-; SI-CHECK: @store_local_i16
+; SI-CHECK-LABEL: @store_local_i16
 ; SI-CHECK: DS_WRITE_B16
 define void @store_local_i16(i16 addrspace(3)* %out, i16 %in) {
   store i16 %in, i16 addrspace(3)* %out
   ret void
 }
 
-; EG-CHECK: @store_local_v2i16
+; EG-CHECK-LABEL: @store_local_v2i16
 ; EG-CHECK: LDS_WRITE
-; CM-CHECK: @store_local_v2i16
+; CM-CHECK-LABEL: @store_local_v2i16
 ; CM-CHECK: LDS_WRITE
-; SI-CHECK: @store_local_v2i16
-; SI-CHECK: DS_WRITE_B32
+; SI-CHECK-LABEL: @store_local_v2i16
+; SI-CHECK: DS_WRITE_B16
+; SI-CHECK: DS_WRITE_B16
 define void @store_local_v2i16(<2 x i16> addrspace(3)* %out, <2 x i16> %in) {
 entry:
   store <2 x i16> %in, <2 x i16> addrspace(3)* %out
   ret void
 }
 
-; EG-CHECK: @store_local_v4i8
+; EG-CHECK-LABEL: @store_local_v4i8
 ; EG-CHECK: LDS_WRITE
-; CM-CHECK: @store_local_v4i8
+; CM-CHECK-LABEL: @store_local_v4i8
 ; CM-CHECK: LDS_WRITE
-; SI-CHECK: @store_local_v4i8
+; SI-CHECK-LABEL: @store_local_v4i8
 ; SI-CHECK: DS_WRITE_B8
 ; SI-CHECK: DS_WRITE_B8
 ; SI-CHECK: DS_WRITE_B8
@@ -217,13 +219,13 @@ entry:
   ret void
 }
 
-; EG-CHECK: @store_local_v2i32
+; EG-CHECK-LABEL: @store_local_v2i32
 ; EG-CHECK: LDS_WRITE
 ; EG-CHECK: LDS_WRITE
-; CM-CHECK: @store_local_v2i32
+; CM-CHECK-LABEL: @store_local_v2i32
 ; CM-CHECK: LDS_WRITE
 ; CM-CHECK: LDS_WRITE
-; SI-CHECK: @store_local_v2i32
+; SI-CHECK-LABEL: @store_local_v2i32
 ; SI-CHECK: DS_WRITE_B32
 ; SI-CHECK: DS_WRITE_B32
 define void @store_local_v2i32(<2 x i32> addrspace(3)* %out, <2 x i32> %in) {
@@ -232,17 +234,17 @@ entry:
   ret void
 }
 
-; EG-CHECK: @store_local_v4i32
+; EG-CHECK-LABEL: @store_local_v4i32
 ; EG-CHECK: LDS_WRITE
 ; EG-CHECK: LDS_WRITE
 ; EG-CHECK: LDS_WRITE
 ; EG-CHECK: LDS_WRITE
-; CM-CHECK: @store_local_v4i32
+; CM-CHECK-LABEL: @store_local_v4i32
 ; CM-CHECK: LDS_WRITE
 ; CM-CHECK: LDS_WRITE
 ; CM-CHECK: LDS_WRITE
 ; CM-CHECK: LDS_WRITE
-; SI-CHECK: @store_local_v4i32
+; SI-CHECK-LABEL: @store_local_v4i32
 ; SI-CHECK: DS_WRITE_B32
 ; SI-CHECK: DS_WRITE_B32
 ; SI-CHECK: DS_WRITE_B32
@@ -260,11 +262,11 @@ entry:
 ; Evergreen / Northern Islands don't support 64-bit stores yet, so there should
 ; be two 32-bit stores.
 
-; EG-CHECK: @vecload2
+; EG-CHECK-LABEL: @vecload2
 ; EG-CHECK: MEM_RAT_CACHELESS STORE_RAW
-; CM-CHECK: @vecload2
+; CM-CHECK-LABEL: @vecload2
 ; CM-CHECK: MEM_RAT_CACHELESS STORE_DWORD
-; SI-CHECK: @vecload2
+; SI-CHECK-LABEL: @vecload2
 ; SI-CHECK: BUFFER_STORE_DWORDX2
 define void @vecload2(i32 addrspace(1)* nocapture %out, i32 addrspace(2)* nocapture %mem) #0 {
 entry:
diff --git a/test/CodeGen/R600/trunc.ll b/test/CodeGen/R600/trunc.ll
index be7a430..6e1a96c 100644
--- a/test/CodeGen/R600/trunc.ll
+++ b/test/CodeGen/R600/trunc.ll
@@ -20,8 +20,8 @@ define void @trunc_i64_to_i32_store(i32 addrspace(1)* %out, i64 %in) {
 ; SI-LABEL: @trunc_shl_i64:
 ; SI: S_LOAD_DWORDX2
 ; SI: S_LOAD_DWORDX2 [[SREG:SGPR[0-9]+_SGPR[0-9]+]]
-; SI: V_LSHL_B64 [[LO_VREG:VGPR[0-9]+]]_VGPR{{[0-9]+}}, [[SREG]], 2
-; SI-NOT: [[LO_VREG]]
+; SI: S_LSHL_B64 [[LO_SREG:SGPR[0-9]+]]_SGPR{{[0-9]+}}, [[SREG]], 2
+; SI: MOV_B32_e32 [[LO_VREG:VGPR[0-9]+]], [[LO_SREG]]
 ; SI: BUFFER_STORE_DWORD [[LO_VREG]],
 define void @trunc_shl_i64(i32 addrspace(1)* %out, i64 %a) {
   %b = shl i64 %a, 2
-- 
1.7.11.4

-------------- next part --------------
>From 0dcc4d79f82511334ba4f7cfc50432f2309ed91a Mon Sep 17 00:00:00 2001
From: Tom Stellard <thomas.stellard at amd.com>
Date: Mon, 14 Oct 2013 08:28:02 -0700
Subject: [PATCH 2/3] R600/SI: Use S_LOAD_DWORD instructions for v8i32 and
 v16i32

---
 lib/Target/R600/AMDGPUAsmPrinter.cpp | 3 +++
 lib/Target/R600/SIInstructions.td    | 3 +++
 lib/Target/R600/SIRegisterInfo.td    | 4 ++--
 test/CodeGen/R600/fneg.ll            | 8 ++++----
 4 files changed, 12 insertions(+), 6 deletions(-)

diff --git a/lib/Target/R600/AMDGPUAsmPrinter.cpp b/lib/Target/R600/AMDGPUAsmPrinter.cpp
index f3ccce7..bd8435b 100644
--- a/lib/Target/R600/AMDGPUAsmPrinter.cpp
+++ b/lib/Target/R600/AMDGPUAsmPrinter.cpp
@@ -230,6 +230,9 @@ void AMDGPUAsmPrinter::EmitProgramInfoSI(MachineFunction &MF) {
         } else if (AMDGPU::VReg_256RegClass.contains(reg)) {
           isSGPR = false;
           width = 8;
+        } else if (AMDGPU::SReg_512RegClass.contains(reg)) {
+          isSGPR = true;
+          width = 16;
         } else if (AMDGPU::VReg_512RegClass.contains(reg)) {
           isSGPR = false;
           width = 16;
diff --git a/lib/Target/R600/SIInstructions.td b/lib/Target/R600/SIInstructions.td
index 460c1ce..64812dc 100644
--- a/lib/Target/R600/SIInstructions.td
+++ b/lib/Target/R600/SIInstructions.td
@@ -1874,7 +1874,10 @@ defm : SMRD_Pattern <S_LOAD_DWORD_IMM, S_LOAD_DWORD_SGPR, i32>;
 defm : SMRD_Pattern <S_LOAD_DWORDX2_IMM, S_LOAD_DWORDX2_SGPR, i64>;
 defm : SMRD_Pattern <S_LOAD_DWORDX2_IMM, S_LOAD_DWORDX2_SGPR, v2i32>;
 defm : SMRD_Pattern <S_LOAD_DWORDX4_IMM, S_LOAD_DWORDX4_SGPR, i128>;
+defm : SMRD_Pattern <S_LOAD_DWORDX4_IMM, S_LOAD_DWORDX4_SGPR, v4i32>;
 defm : SMRD_Pattern <S_LOAD_DWORDX8_IMM, S_LOAD_DWORDX8_SGPR, v32i8>;
+defm : SMRD_Pattern <S_LOAD_DWORDX8_IMM, S_LOAD_DWORDX8_SGPR, v8i32>;
+defm : SMRD_Pattern <S_LOAD_DWORDX16_IMM, S_LOAD_DWORDX16_SGPR, v16i32>;
 
 //===----------------------------------------------------------------------===//
 // MUBUF Patterns
diff --git a/lib/Target/R600/SIRegisterInfo.td b/lib/Target/R600/SIRegisterInfo.td
index 2d7bff0..c8e3295 100644
--- a/lib/Target/R600/SIRegisterInfo.td
+++ b/lib/Target/R600/SIRegisterInfo.td
@@ -159,11 +159,11 @@ def SReg_64 : RegisterClass<"AMDGPU", [v2i32, i64, i1], 64,
   (add SGPR_64Regs, VCCReg, EXECReg)
 >;
 
-def SReg_128 : RegisterClass<"AMDGPU", [i128], 128, (add SGPR_128)>;
+def SReg_128 : RegisterClass<"AMDGPU", [i128, v4i32], 128, (add SGPR_128)>;
 
 def SReg_256 : RegisterClass<"AMDGPU", [v32i8, v8i32, v8f32], 256, (add SGPR_256)>;
 
-def SReg_512 : RegisterClass<"AMDGPU", [v64i8], 512, (add SGPR_512)>;
+def SReg_512 : RegisterClass<"AMDGPU", [v64i8, v16i32], 512, (add SGPR_512)>;
 
 // Register class for all vector registers (VGPRs + Interploation Registers)
 def VReg_32 : RegisterClass<"AMDGPU", [i32, f32, v1i32], 32, (add VGPR_32)>;
diff --git a/test/CodeGen/R600/fneg.ll b/test/CodeGen/R600/fneg.ll
index 702fb33..0eb728a 100644
--- a/test/CodeGen/R600/fneg.ll
+++ b/test/CodeGen/R600/fneg.ll
@@ -31,10 +31,10 @@ entry:
 ; R600-CHECK: -PV
 ; R600-CHECK: -PV
 ; SI-CHECK-LABEL: @fneg_v4
-; SI-CHECK: V_ADD_F32_e64 VGPR{{[0-9]}}, VGPR{{[0-9]}}, 0, 0, 0, 0, 1
-; SI-CHECK: V_ADD_F32_e64 VGPR{{[0-9]}}, VGPR{{[0-9]}}, 0, 0, 0, 0, 1
-; SI-CHECK: V_ADD_F32_e64 VGPR{{[0-9]}}, VGPR{{[0-9]}}, 0, 0, 0, 0, 1
-; SI-CHECK: V_ADD_F32_e64 VGPR{{[0-9]}}, VGPR{{[0-9]}}, 0, 0, 0, 0, 1
+; SI-CHECK: V_ADD_F32_e64 VGPR{{[0-9]}}, SGPR{{[0-9]}}, 0, 0, 0, 0, 1
+; SI-CHECK: V_ADD_F32_e64 VGPR{{[0-9]}}, SGPR{{[0-9]}}, 0, 0, 0, 0, 1
+; SI-CHECK: V_ADD_F32_e64 VGPR{{[0-9]}}, SGPR{{[0-9]}}, 0, 0, 0, 0, 1
+; SI-CHECK: V_ADD_F32_e64 VGPR{{[0-9]}}, SGPR{{[0-9]}}, 0, 0, 0, 0, 1
 define void @fneg_v4(<4 x float> addrspace(1)* nocapture %out, <4 x float> %in) {
 entry:
   %0 = fsub <4 x float> <float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00>, %in
-- 
1.7.11.4

-------------- next part --------------
>From 97acb72d15fd0dea0a0471b4e3b3da6dd18cf884 Mon Sep 17 00:00:00 2001
From: Tom Stellard <thomas.stellard at amd.com>
Date: Mon, 14 Oct 2013 08:28:38 -0700
Subject: [PATCH 3/3] R600/SI: Remove a stray debugging dump

---
 lib/Target/R600/SIInstrInfo.cpp | 1 -
 1 file changed, 1 deletion(-)

diff --git a/lib/Target/R600/SIInstrInfo.cpp b/lib/Target/R600/SIInstrInfo.cpp
index 15ba7d4..db737a4 100644
--- a/lib/Target/R600/SIInstrInfo.cpp
+++ b/lib/Target/R600/SIInstrInfo.cpp
@@ -307,7 +307,6 @@ bool SIInstrInfo::verifyInstruction(const MachineInstr *MI,
   if (isVOP1(Opcode) || isVOP2(Opcode) || isVOP3(Opcode) || isVOPC(Opcode)) {
     unsigned ConstantBusCount = 0;
     unsigned SGPRUsed = AMDGPU::NoRegister;
-    MI->dump();
     for (int i = 0, e = MI->getNumOperands(); i != e; ++i) {
       const MachineOperand &MO = MI->getOperand(i);
       if (MO.isReg() && MO.isUse() &&
-- 
1.7.11.4



More information about the mesa-dev mailing list