[Mesa-dev] [PATCH 1/2] AMDGPU: Various coding style fixes

Tom Stellard tom at stellard.net
Thu Nov 29 14:47:51 PST 2012


From: Tom Stellard <thomas.stellard at amd.com>

- Fix more coding style errors with braces on their own line
  Found using: grep -rn '^[ ]*{$'

- Remove underscore from start of macro defs
- Remove all tabs
- Remove "end of" comments that don't refer to namespaces.
- Fix an issue with parens being on the wrong line.
- Fix warnings from building with clang
---
 lib/Target/AMDGPU/AMDGPUISelLowering.h             |   9 +-
 lib/Target/AMDGPU/AMDGPUInstrInfo.h                |   6 +-
 lib/Target/AMDGPU/AMDGPURegisterInfo.h             |   9 +-
 lib/Target/AMDGPU/AMDGPUSubtarget.cpp              |  28 +-
 lib/Target/AMDGPU/AMDGPUSubtarget.h                |  28 +-
 lib/Target/AMDGPU/AMDGPUTargetMachine.cpp          |   5 +-
 lib/Target/AMDGPU/AMDGPUTargetMachine.h            |   1 -
 lib/Target/AMDGPU/AMDIL.h                          |   6 +-
 lib/Target/AMDGPU/AMDIL7XXDevice.cpp               |   6 +-
 lib/Target/AMDGPU/AMDIL7XXDevice.h                 |   6 +-
 lib/Target/AMDGPU/AMDILBase.td                     |   8 +-
 lib/Target/AMDGPU/AMDILCFGStructurizer.cpp         |  56 ++--
 lib/Target/AMDGPU/AMDILDevice.cpp                  |   4 +-
 lib/Target/AMDGPU/AMDILDevice.h                    |   8 +-
 lib/Target/AMDGPU/AMDILDeviceInfo.h                |  12 +-
 lib/Target/AMDGPU/AMDILDevices.h                   |   6 +-
 lib/Target/AMDGPU/AMDILEvergreenDevice.cpp         |   8 +-
 lib/Target/AMDGPU/AMDILEvergreenDevice.h           |   6 +-
 lib/Target/AMDGPU/AMDILFrameLowering.h             |   6 +-
 lib/Target/AMDGPU/AMDILISelDAGToDAG.cpp            | 172 ++++++-----
 lib/Target/AMDGPU/AMDILISelLowering.cpp            |  33 +-
 lib/Target/AMDGPU/AMDILIntrinsicInfo.h             |   6 +-
 lib/Target/AMDGPU/AMDILNIDevice.cpp                |   8 +-
 lib/Target/AMDGPU/AMDILNIDevice.h                  |   6 +-
 lib/Target/AMDGPU/AMDILPeepholeOptimizer.cpp       |   3 +-
 lib/Target/AMDGPU/AMDILRegisterInfo.td             |   9 +-
 lib/Target/AMDGPU/AMDILSIDevice.h                  |   6 +-
 lib/Target/AMDGPU/MCTargetDesc/AMDGPUMCAsmInfo.cpp |   9 +-
 lib/Target/AMDGPU/MCTargetDesc/AMDGPUMCAsmInfo.h   |   6 +-
 .../AMDGPU/MCTargetDesc/R600MCCodeEmitter.cpp      |  45 ++-
 lib/Target/AMDGPU/R600ExpandSpecialInstrs.cpp      |   6 +-
 lib/Target/AMDGPU/R600ISelLowering.cpp             | 336 ++++++++++-----------
 lib/Target/AMDGPU/R600ISelLowering.h               |   3 +-
 lib/Target/AMDGPU/R600InstrInfo.cpp                |   3 +-
 lib/Target/AMDGPU/R600Instructions.td              |  38 +--
 lib/Target/AMDGPU/R600MachineFunctionInfo.cpp      |   3 +-
 lib/Target/AMDGPU/R600RegisterInfo.h               |   3 +-
 lib/Target/AMDGPU/SIFixSGPRLiveness.cpp            |   3 +-
 lib/Target/AMDGPU/SIISelLowering.cpp               |  10 +-
 lib/Target/AMDGPU/SIISelLowering.h                 |   3 +-
 lib/Target/AMDGPU/SIInstrInfo.cpp                  |   2 +-
 lib/Target/AMDGPU/SIInstrInfo.td                   |   2 +-
 lib/Target/AMDGPU/SIInstructions.td                |  24 +-
 lib/Target/AMDGPU/SILowerControlFlow.cpp           |   6 +-
 lib/Target/AMDGPU/SIMachineFunctionInfo.h          |   4 +-
 lib/Target/AMDGPU/SIRegisterInfo.h                 |   3 +-
 46 files changed, 452 insertions(+), 518 deletions(-)

diff --git a/lib/Target/AMDGPU/AMDGPUISelLowering.h b/lib/Target/AMDGPU/AMDGPUISelLowering.h
index 58d2287..7783326 100644
--- a/lib/Target/AMDGPU/AMDGPUISelLowering.h
+++ b/lib/Target/AMDGPU/AMDGPUISelLowering.h
@@ -21,8 +21,7 @@ namespace llvm {
 
 class MachineRegisterInfo;
 
-class AMDGPUTargetLowering : public TargetLowering
-{
+class AMDGPUTargetLowering : public TargetLowering {
 private:
   SDValue LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG) const;
   SDValue LowerUDIVREM(SDValue Op, SelectionDAG &DAG) const;
@@ -96,11 +95,9 @@ private:
   SDValue LowerFP_ROUND(SDValue Op, SelectionDAG &DAG) const;
 };
 
-namespace AMDGPUISD
-{
+namespace AMDGPUISD {
 
-enum
-{
+enum {
   // AMDIL ISD Opcodes
   FIRST_NUMBER = ISD::BUILTIN_OP_END,
   MAD,         // 32bit Fused Multiply Add instruction
diff --git a/lib/Target/AMDGPU/AMDGPUInstrInfo.h b/lib/Target/AMDGPU/AMDGPUInstrInfo.h
index e81f73d..bd6a028 100644
--- a/lib/Target/AMDGPU/AMDGPUInstrInfo.h
+++ b/lib/Target/AMDGPU/AMDGPUInstrInfo.h
@@ -12,8 +12,8 @@
 //
 //===----------------------------------------------------------------------===//
 
-#ifndef AMDGPUINSTRUCTIONINFO_H_
-#define AMDGPUINSTRUCTIONINFO_H_
+#ifndef AMDGPUINSTRUCTIONINFO_H
+#define AMDGPUINSTRUCTIONINFO_H
 
 #include "AMDGPURegisterInfo.h"
 #include "AMDGPUInstrInfo.h"
@@ -145,4 +145,4 @@ public:
 
 } // End llvm namespace
 
-#endif // AMDGPUINSTRINFO_H_
+#endif // AMDGPUINSTRINFO_H
diff --git a/lib/Target/AMDGPU/AMDGPURegisterInfo.h b/lib/Target/AMDGPU/AMDGPURegisterInfo.h
index 326610d..14d618b 100644
--- a/lib/Target/AMDGPU/AMDGPURegisterInfo.h
+++ b/lib/Target/AMDGPU/AMDGPURegisterInfo.h
@@ -12,8 +12,8 @@
 //
 //===----------------------------------------------------------------------===//
 
-#ifndef AMDGPUREGISTERINFO_H_
-#define AMDGPUREGISTERINFO_H_
+#ifndef AMDGPUREGISTERINFO_H
+#define AMDGPUREGISTERINFO_H
 
 #include "llvm/ADT/BitVector.h"
 #include "llvm/Target/TargetRegisterInfo.h"
@@ -27,8 +27,7 @@ namespace llvm {
 class AMDGPUTargetMachine;
 class TargetInstrInfo;
 
-struct AMDGPURegisterInfo : public AMDGPUGenRegisterInfo
-{
+struct AMDGPURegisterInfo : public AMDGPUGenRegisterInfo {
   TargetMachine &TM;
   const TargetInstrInfo &TII;
   static const uint16_t CalleeSavedReg;
@@ -59,4 +58,4 @@ struct AMDGPURegisterInfo : public AMDGPUGenRegisterInfo
 
 } // End namespace llvm
 
-#endif // AMDIDSAREGISTERINFO_H_
+#endif // AMDIDSAREGISTERINFO_H
diff --git a/lib/Target/AMDGPU/AMDGPUSubtarget.cpp b/lib/Target/AMDGPU/AMDGPUSubtarget.cpp
index cc8f961..461a22f 100644
--- a/lib/Target/AMDGPU/AMDGPUSubtarget.cpp
+++ b/lib/Target/AMDGPU/AMDGPUSubtarget.cpp
@@ -21,24 +21,24 @@ using namespace llvm;
 #include "AMDGPUGenSubtargetInfo.inc"
 
 AMDGPUSubtarget::AMDGPUSubtarget(StringRef TT, StringRef CPU, StringRef FS) :
-  AMDGPUGenSubtargetInfo(TT, CPU, FS), mDumpCode(false) {
+  AMDGPUGenSubtargetInfo(TT, CPU, FS), DumpCode(false) {
     InstrItins = getInstrItineraryForCPU(CPU);
 
   memset(CapsOverride, 0, sizeof(*CapsOverride)
       * AMDGPUDeviceInfo::MaxNumberCapabilities);
   // Default card
   StringRef GPU = CPU;
-  mIs64bit = false;
-  mDefaultSize[0] = 64;
-  mDefaultSize[1] = 1;
-  mDefaultSize[2] = 1;
+  Is64bit = false;
+  DefaultSize[0] = 64;
+  DefaultSize[1] = 1;
+  DefaultSize[2] = 1;
   ParseSubtargetFeatures(GPU, FS);
-  mDevName = GPU;
-  mDevice = AMDGPUDeviceInfo::getDeviceFromName(mDevName, this, mIs64bit);
+  DevName = GPU;
+  Device = AMDGPUDeviceInfo::getDeviceFromName(DevName, this, Is64bit);
 }
 
 AMDGPUSubtarget::~AMDGPUSubtarget() {
-  delete mDevice;
+  delete Device;
 }
 
 bool
@@ -49,7 +49,7 @@ AMDGPUSubtarget::isOverride(AMDGPUDeviceInfo::Caps caps) const {
 }
 bool
 AMDGPUSubtarget::is64bit() const  {
-  return mIs64bit;
+  return Is64bit;
 }
 bool
 AMDGPUSubtarget::isTargetELF() const {
@@ -60,27 +60,27 @@ AMDGPUSubtarget::getDefaultSize(uint32_t dim) const {
   if (dim > 3) {
     return 1;
   } else {
-    return mDefaultSize[dim];
+    return DefaultSize[dim];
   }
 }
 
 std::string
 AMDGPUSubtarget::getDataLayout() const {
-    if (!mDevice) {
+    if (!Device) {
         return std::string("e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16"
                 "-i32:32:32-i64:64:64-f32:32:32-f64:64:64-f80:32:32"
                 "-v16:16:16-v24:32:32-v32:32:32-v48:64:64-v64:64:64"
                 "-v96:128:128-v128:128:128-v192:256:256-v256:256:256"
                 "-v512:512:512-v1024:1024:1024-v2048:2048:2048-a0:0:64");
     }
-    return mDevice->getDataLayout();
+    return Device->getDataLayout();
 }
 
 std::string
 AMDGPUSubtarget::getDeviceName() const {
-  return mDevName;
+  return DevName;
 }
 const AMDGPUDevice *
 AMDGPUSubtarget::device() const {
-  return mDevice;
+  return Device;
 }
diff --git a/lib/Target/AMDGPU/AMDGPUSubtarget.h b/lib/Target/AMDGPU/AMDGPUSubtarget.h
index 30bda83..79a7486 100644
--- a/lib/Target/AMDGPU/AMDGPUSubtarget.h
+++ b/lib/Target/AMDGPU/AMDGPUSubtarget.h
@@ -11,8 +11,8 @@
 //
 //===----------------------------------------------------------------------===//
 
-#ifndef _AMDGPUSUBTARGET_H_
-#define _AMDGPUSUBTARGET_H_
+#ifndef AMDGPUSUBTARGET_H
+#define AMDGPUSUBTARGET_H
 #include "AMDILDevice.h"
 #include "llvm/ADT/StringExtras.h"
 #include "llvm/ADT/StringRef.h"
@@ -25,18 +25,16 @@
 
 namespace llvm {
 
-class AMDGPUSubtarget : public AMDGPUGenSubtargetInfo
-{
+class AMDGPUSubtarget : public AMDGPUGenSubtargetInfo {
 private:
   bool CapsOverride[AMDGPUDeviceInfo::MaxNumberCapabilities];
-  const AMDGPUDevice *mDevice;
-  size_t mDefaultSize[3];
-  size_t mMinimumSize[3];
-  std::string mDevName;
-  bool mIs64bit;
-  bool mIs32on64bit;
-  bool mDumpCode;
-  bool mR600ALUInst;
+  const AMDGPUDevice *Device;
+  size_t DefaultSize[3];
+  std::string DevName;
+  bool Is64bit;
+  bool Is32on64bit;
+  bool DumpCode;
+  bool R600ALUInst;
 
   InstrItineraryData InstrItins;
 
@@ -56,11 +54,11 @@ public:
   std::string getDataLayout() const;
   std::string getDeviceName() const;
   virtual size_t getDefaultSize(uint32_t dim) const;
-  bool dumpCode() const { return mDumpCode; }
-  bool r600ALUEncoding() const { return mR600ALUInst; }
+  bool dumpCode() const { return DumpCode; }
+  bool r600ALUEncoding() const { return R600ALUInst; }
 
 };
 
 } // End namespace llvm
 
-#endif // AMDGPUSUBTARGET_H_
+#endif // AMDGPUSUBTARGET_H
diff --git a/lib/Target/AMDGPU/AMDGPUTargetMachine.cpp b/lib/Target/AMDGPU/AMDGPUTargetMachine.cpp
index 113b7bf..50a3f62 100644
--- a/lib/Target/AMDGPU/AMDGPUTargetMachine.cpp
+++ b/lib/Target/AMDGPU/AMDGPUTargetMachine.cpp
@@ -51,10 +51,7 @@ AMDGPUTargetMachine::AMDGPUTargetMachine(const Target &T, StringRef TT,
   FrameLowering(TargetFrameLowering::StackGrowsUp,
       Subtarget.device()->getStackAlignment(), 0),
   IntrinsicInfo(this),
-  InstrItins(&Subtarget.getInstrItineraryData()),
-  mDump(false)
-
-{
+  InstrItins(&Subtarget.getInstrItineraryData()) {
   // TLInfo uses InstrInfo so it must be initialized after.
   if (Subtarget.device()->getGeneration() <= AMDGPUDeviceInfo::HD6XXX) {
     InstrInfo = new R600InstrInfo(*this);
diff --git a/lib/Target/AMDGPU/AMDGPUTargetMachine.h b/lib/Target/AMDGPU/AMDGPUTargetMachine.h
index 712bc6c..7924f10 100644
--- a/lib/Target/AMDGPU/AMDGPUTargetMachine.h
+++ b/lib/Target/AMDGPU/AMDGPUTargetMachine.h
@@ -35,7 +35,6 @@ class AMDGPUTargetMachine : public LLVMTargetMachine {
   const AMDGPUInstrInfo * InstrInfo;
   AMDGPUTargetLowering * TLInfo;
   const InstrItineraryData* InstrItins;
-  bool mDump;
 
 public:
    AMDGPUTargetMachine(const Target &T, StringRef TT, StringRef FS,
diff --git a/lib/Target/AMDGPU/AMDIL.h b/lib/Target/AMDGPU/AMDIL.h
index e96b123..d8682fb 100644
--- a/lib/Target/AMDGPU/AMDIL.h
+++ b/lib/Target/AMDGPU/AMDIL.h
@@ -12,8 +12,8 @@
 //
 //===----------------------------------------------------------------------===//
 
-#ifndef AMDIL_H_
-#define AMDIL_H_
+#ifndef AMDIL_H
+#define AMDIL_H
 
 #include "llvm/CodeGen/MachineFunction.h"
 #include "llvm/Target/TargetMachine.h"
@@ -103,4 +103,4 @@ enum AddressSpaces {
 } // namespace AMDGPUAS
 
 } // end namespace llvm
-#endif // AMDIL_H_
+#endif // AMDIL_H
diff --git a/lib/Target/AMDGPU/AMDIL7XXDevice.cpp b/lib/Target/AMDGPU/AMDIL7XXDevice.cpp
index 854d690..21879d6 100644
--- a/lib/Target/AMDGPU/AMDIL7XXDevice.cpp
+++ b/lib/Target/AMDGPU/AMDIL7XXDevice.cpp
@@ -16,11 +16,11 @@ AMDGPU7XXDevice::AMDGPU7XXDevice(AMDGPUSubtarget *ST) : AMDGPUDevice(ST) {
   setCaps();
   std::string name = mSTM->getDeviceName();
   if (name == "rv710") {
-    mDeviceFlag = OCL_DEVICE_RV710;
+    DeviceFlag = OCL_DEVICE_RV710;
   } else if (name == "rv730") {
-    mDeviceFlag = OCL_DEVICE_RV730;
+    DeviceFlag = OCL_DEVICE_RV730;
   } else {
-    mDeviceFlag = OCL_DEVICE_RV770;
+    DeviceFlag = OCL_DEVICE_RV770;
   }
 }
 
diff --git a/lib/Target/AMDGPU/AMDIL7XXDevice.h b/lib/Target/AMDGPU/AMDIL7XXDevice.h
index e848e2e..1befb92 100644
--- a/lib/Target/AMDGPU/AMDIL7XXDevice.h
+++ b/lib/Target/AMDGPU/AMDIL7XXDevice.h
@@ -14,8 +14,8 @@
 // implement in order to correctly answer queries on the capabilities of the
 // specific hardware.
 //===----------------------------------------------------------------------===//
-#ifndef _AMDIL7XXDEVICEIMPL_H_
-#define _AMDIL7XXDEVICEIMPL_H_
+#ifndef AMDIL7XXDEVICEIMPL_H
+#define AMDIL7XXDEVICEIMPL_H
 #include "AMDILDevice.h"
 
 namespace llvm {
@@ -67,4 +67,4 @@ public:
 }; // AMDGPU710Device
 
 } // namespace llvm
-#endif // _AMDILDEVICEIMPL_H_
+#endif // AMDILDEVICEIMPL_H
diff --git a/lib/Target/AMDGPU/AMDILBase.td b/lib/Target/AMDGPU/AMDILBase.td
index ffe9ce2..c12cedc 100644
--- a/lib/Target/AMDGPU/AMDILBase.td
+++ b/lib/Target/AMDGPU/AMDILBase.td
@@ -52,12 +52,12 @@ def FeatureNoInline : SubtargetFeature<"no-inline",
         "specify whether to not inline functions">;
 
 def Feature64BitPtr : SubtargetFeature<"64BitPtr",
-        "mIs64bit",
+        "Is64bit",
         "false",
         "Specify if 64bit addressing should be used.">;
 
 def Feature32on64BitPtr : SubtargetFeature<"64on32BitPtr",
-        "mIs32on64bit",
+        "Is32on64bit",
         "false",
         "Specify if 64bit sized pointers with 32bit addressing should be used.">;
 def FeatureDebug : SubtargetFeature<"debug",
@@ -65,12 +65,12 @@ def FeatureDebug : SubtargetFeature<"debug",
         "true",
         "Debug mode is enabled, so disable hardware accelerated address spaces.">;
 def FeatureDumpCode : SubtargetFeature <"DumpCode",
-        "mDumpCode",
+        "DumpCode",
         "true",
         "Dump MachineInstrs in the CodeEmitter">;
 
 def FeatureR600ALUInst : SubtargetFeature<"R600ALUInst",
-        "mR600ALUInst",
+        "R600ALUInst",
         "false",
         "Older version of ALU instructions encoding.">;
 
diff --git a/lib/Target/AMDGPU/AMDILCFGStructurizer.cpp b/lib/Target/AMDGPU/AMDILCFGStructurizer.cpp
index 61d4c59..948bdb4 100644
--- a/lib/Target/AMDGPU/AMDILCFGStructurizer.cpp
+++ b/lib/Target/AMDGPU/AMDILCFGStructurizer.cpp
@@ -607,7 +607,7 @@ template<class PassT> void CFGStructurizer<PassT>::orderBlocks() {
     if (sccNum == INVALIDSCCNUM) {
       errs() << "unreachable block BB" << bb->getNumber() << "\n";
     }
-  } //end of for
+  }
 } //orderBlocks
 
 template<class PassT> int CFGStructurizer<PassT>::patternMatch(BlockT *curBlk) {
@@ -839,8 +839,7 @@ int CFGStructurizer<PassT>::loopbreakPatternMatch(LoopT *loopRep,
   int numCloned = 0;
   int numSerial = 0;
 
-  if (exitBlkSet.size() == 1)
-  {
+  if (exitBlkSet.size() == 1) {
     exitLandBlk = *exitBlkSet.begin();
   } else {
     exitLandBlk = findNearestCommonPostDom(exitBlkSet);
@@ -1396,7 +1395,7 @@ void CFGStructurizer<PassT>::mergeIfthenelseBlock(InstrT *branchInstr,
   CFGTraits::insertCondBranchBefore(branchInstrPos,
                                     CFGTraits::getBranchNzeroOpcode(oldOpcode),
                                     passRep,
-									branchDL);
+                                    branchDL);
 
   if (trueBlk) {
     curBlk->splice(branchInstrPos, trueBlk, trueBlk->begin(), trueBlk->end());
@@ -1455,8 +1454,7 @@ void CFGStructurizer<PassT>::mergeLooplandBlock(BlockT *dstBlk,
   // Loop breakInitRegs are init before entering the loop.
   for (typename std::set<RegiT>::const_iterator iter =
          loopLand->breakInitRegs.begin(),
-       iterEnd = loopLand->breakInitRegs.end(); iter != iterEnd; ++iter)
-  {
+       iterEnd = loopLand->breakInitRegs.end(); iter != iterEnd; ++iter) {
     CFGTraits::insertAssignInstrBefore(dstBlk, passRep, *iter, 0);
   }
   // Loop endbranchInitRegs are init before entering the loop.
@@ -1608,7 +1606,7 @@ void CFGStructurizer<PassT>::settleLoopcontBlock(BlockT *contingBlk,
       CFGTraits::getInstrPos(contingBlk, branchInstr);
     BlockT *trueBranch = CFGTraits::getTrueBranch(branchInstr);
     int oldOpcode = branchInstr->getOpcode();
-	DebugLoc DL = branchInstr->getDebugLoc();
+    DebugLoc DL = branchInstr->getDebugLoc();
 
     //    transform contingBlk to
     //     if () {
@@ -1623,8 +1621,7 @@ void CFGStructurizer<PassT>::settleLoopcontBlock(BlockT *contingBlk,
     bool useContinueLogical = 
       (setReg == INVALIDREGNUM && (&*contingBlk->rbegin()) == branchInstr);
 
-    if (useContinueLogical == false) 
-    {
+    if (useContinueLogical == false) {
       int branchOpcode =
         trueBranch == contBlk ? CFGTraits::getBranchNzeroOpcode(oldOpcode)
                               : CFGTraits::getBranchZeroOpcode(oldOpcode);
@@ -2325,8 +2322,7 @@ void CFGStructurizer<PassT>::addLoopEndbranchInitReg(LoopT *loopRep,
   }
   theEntry->endbranchInitRegs.insert(regNum);
 
-  if (DEBUGME)
-  {
+  if (DEBUGME) {
         errs() << "addLoopEndbranchInitReg loop-header = BB"
       << loopRep->getHeader()->getNumber()
       << "  regNum = " << regNum << "\n";
@@ -2485,14 +2481,12 @@ public:
 
 private:
 
-};   //end of class AMDGPUCFGStructurizer
+};
 
 } //end of namespace llvm
-AMDGPUCFGStructurizer::AMDGPUCFGStructurizer(char &pid, TargetMachine &tm
-                                          )
+AMDGPUCFGStructurizer::AMDGPUCFGStructurizer(char &pid, TargetMachine &tm)
 : MachineFunctionPass(pid), TM(tm), TII(tm.getInstrInfo()),
-  TRI(static_cast<const AMDGPURegisterInfo *>(tm.getRegisterInfo())
-  ) {
+  TRI(static_cast<const AMDGPURegisterInfo *>(tm.getRegisterInfo())) {
 }
 
 const TargetInstrInfo *AMDGPUCFGStructurizer::getTargetInstrInfo() const {
@@ -2522,7 +2516,7 @@ public:
 
 private:
 
-};   //end of class AMDGPUCFGPrepare
+};
 
 char AMDGPUCFGPrepare::ID = 0;
 } //end of namespace llvm
@@ -2564,7 +2558,7 @@ public:
 
 private:
 
-};   //end of class AMDGPUCFGPerform
+};
 
 char AMDGPUCFGPerform::ID = 0;
 } //end of namespace llvm
@@ -2605,7 +2599,7 @@ struct CFGStructTraits<AMDGPUCFGStructurizer> {
     case AMDGPU::SI_IF_NZ: return AMDGPU::SI_IF_NZ;
     default:
       assert(0 && "internal error");
-    };
+    }
     return -1;
   }
 
@@ -2617,12 +2611,11 @@ struct CFGStructTraits<AMDGPUCFGStructurizer> {
     case AMDGPU::SI_IF_Z: return AMDGPU::SI_IF_Z;
     default:
       assert(0 && "internal error");
-    };
+    }
     return -1;
   }
 
-  static int getContinueNzeroOpcode(int oldOpcode)
-  {
+  static int getContinueNzeroOpcode(int oldOpcode) {
     switch(oldOpcode) {
     case AMDGPU::JUMP: return AMDGPU::CONTINUE_LOGICALNZ_i32;
     default:
@@ -2636,7 +2629,7 @@ struct CFGStructTraits<AMDGPUCFGStructurizer> {
     case AMDGPU::JUMP: return AMDGPU::CONTINUE_LOGICALZ_i32;
     default:
       assert(0 && "internal error");
-    };
+    }
     return -1;
   }
 
@@ -2689,11 +2682,11 @@ struct CFGStructTraits<AMDGPUCFGStructurizer> {
   static DebugLoc getLastDebugLocInBB(MachineBasicBlock *blk) {
     //get DebugLoc from the first MachineBasicBlock instruction with debug info
     DebugLoc DL;
-	for (MachineBasicBlock::iterator iter = blk->begin(); iter != blk->end(); ++iter) {
-	  MachineInstr *instr = &(*iter);
-	  if (instr->getDebugLoc().isUnknown() == false) {
-	    DL = instr->getDebugLoc();
-	  }
+    for (MachineBasicBlock::iterator iter = blk->begin(); iter != blk->end(); ++iter) {
+      MachineInstr *instr = &(*iter);
+      if (instr->getDebugLoc().isUnknown() == false) {
+        DL = instr->getDebugLoc();
+      }
     }
     return DL;
   }
@@ -2851,7 +2844,7 @@ struct CFGStructTraits<AMDGPUCFGStructurizer> {
   static void insertCondBranchBefore(MachineBasicBlock::iterator instrPos,
                                      int newOpcode,
                                      AMDGPUCFGStructurizer *passRep,
-									 DebugLoc DL) {
+                                     DebugLoc DL) {
     MachineInstr *oldInstr = &(*instrPos);
     const TargetInstrInfo *tii = passRep->getTargetInstrInfo();
     MachineBasicBlock *blk = oldInstr->getParent();
@@ -2872,7 +2865,7 @@ struct CFGStructTraits<AMDGPUCFGStructurizer> {
                                      int newOpcode,
                                      AMDGPUCFGStructurizer *passRep,
                                      RegiT regNum,
-									 DebugLoc DL) {
+                                     DebugLoc DL) {
     const TargetInstrInfo *tii = passRep->getTargetInstrInfo();
 
     MachineInstr *newInstr =
@@ -3054,6 +3047,3 @@ bool AMDGPUCFGPerform::runOnMachineFunction(MachineFunction &func) {
                                                                     *this,
                                                                     TRI);
 }
-
-//end of file newline goes below
-
diff --git a/lib/Target/AMDGPU/AMDILDevice.cpp b/lib/Target/AMDGPU/AMDILDevice.cpp
index 3ff62a8..3938681 100644
--- a/lib/Target/AMDGPU/AMDILDevice.cpp
+++ b/lib/Target/AMDGPU/AMDILDevice.cpp
@@ -15,7 +15,7 @@ AMDGPUDevice::AMDGPUDevice(AMDGPUSubtarget *ST) : mSTM(ST) {
   mHWBits.resize(AMDGPUDeviceInfo::MaxNumberCapabilities);
   mSWBits.resize(AMDGPUDeviceInfo::MaxNumberCapabilities);
   setCaps();
-  mDeviceFlag = OCL_DEVICE_ALL;
+  DeviceFlag = OCL_DEVICE_ALL;
 }
 
 AMDGPUDevice::~AMDGPUDevice() {
@@ -29,7 +29,7 @@ size_t AMDGPUDevice::getMaxGDSSize() const {
 
 uint32_t 
 AMDGPUDevice::getDeviceFlag() const {
-  return mDeviceFlag;
+  return DeviceFlag;
 }
 
 size_t AMDGPUDevice::getMaxNumCBs() const {
diff --git a/lib/Target/AMDGPU/AMDILDevice.h b/lib/Target/AMDGPU/AMDILDevice.h
index 05fe153..3740e3b 100644
--- a/lib/Target/AMDGPU/AMDILDevice.h
+++ b/lib/Target/AMDGPU/AMDILDevice.h
@@ -14,8 +14,8 @@
 // implement in order to correctly answer queries on the capabilities of the
 // specific hardware.
 //===----------------------------------------------------------------------===//
-#ifndef _AMDILDEVICEIMPL_H_
-#define _AMDILDEVICEIMPL_H_
+#ifndef AMDILDEVICEIMPL_H
+#define AMDILDEVICEIMPL_H
 #include "AMDIL.h"
 #include "llvm/ADT/BitVector.h"
 
@@ -106,11 +106,11 @@ protected:
   llvm::BitVector mHWBits;
   llvm::BitVector mSWBits;
   AMDGPUSubtarget *mSTM;
-  uint32_t mDeviceFlag;
+  uint32_t DeviceFlag;
 private:
   AMDGPUDeviceInfo::ExecutionMode
   getExecutionMode(AMDGPUDeviceInfo::Caps Caps) const;
 }; // AMDGPUDevice
 
 } // namespace llvm
-#endif // _AMDILDEVICEIMPL_H_
+#endif // AMDILDEVICEIMPL_H
diff --git a/lib/Target/AMDGPU/AMDILDeviceInfo.h b/lib/Target/AMDGPU/AMDILDeviceInfo.h
index 4fa021e..0eb3989 100644
--- a/lib/Target/AMDGPU/AMDILDeviceInfo.h
+++ b/lib/Target/AMDGPU/AMDILDeviceInfo.h
@@ -6,18 +6,16 @@
 // License. See LICENSE.TXT for details.
 //
 //==-----------------------------------------------------------------------===//
-#ifndef _AMDILDEVICEINFO_H_
-#define _AMDILDEVICEINFO_H_
+#ifndef AMDILDEVICEINFO_H
+#define AMDILDEVICEINFO_H
 
 
 #include <string>
 
-namespace llvm
-{
+namespace llvm {
   class AMDGPUDevice;
   class AMDGPUSubtarget;
-  namespace AMDGPUDeviceInfo
-  {
+  namespace AMDGPUDeviceInfo {
     // Each Capabilities can be executed using a hardware instruction,
     // emulated with a sequence of software instructions, or not
     // supported at all.
@@ -87,4 +85,4 @@ namespace llvm
                       bool is64bit = false, bool is64on32bit = false);
   } // namespace AMDILDeviceInfo
 } // namespace llvm
-#endif // _AMDILDEVICEINFO_H_
+#endif // AMDILDEVICEINFO_H
diff --git a/lib/Target/AMDGPU/AMDILDevices.h b/lib/Target/AMDGPU/AMDILDevices.h
index cfcc330..befdf8f 100644
--- a/lib/Target/AMDGPU/AMDILDevices.h
+++ b/lib/Target/AMDGPU/AMDILDevices.h
@@ -6,8 +6,8 @@
 // License. See LICENSE.TXT for details.
 //
 //==-----------------------------------------------------------------------===//
-#ifndef __AMDIL_DEVICES_H_
-#define __AMDIL_DEVICES_H_
+#ifndef AMDIL_DEVICES_H
+#define AMDIL_DEVICES_H
 // Include all of the device specific header files
 // This file is for Internal use only!
 #include "AMDIL7XXDevice.h"
@@ -16,4 +16,4 @@
 #include "AMDILNIDevice.h"
 #include "AMDILSIDevice.h"
 
-#endif // _AMDIL_DEVICES_H_
+#endif // AMDIL_DEVICES_H
diff --git a/lib/Target/AMDGPU/AMDILEvergreenDevice.cpp b/lib/Target/AMDGPU/AMDILEvergreenDevice.cpp
index 28e6e84..052238c 100644
--- a/lib/Target/AMDGPU/AMDILEvergreenDevice.cpp
+++ b/lib/Target/AMDGPU/AMDILEvergreenDevice.cpp
@@ -15,13 +15,13 @@ AMDGPUEvergreenDevice::AMDGPUEvergreenDevice(AMDGPUSubtarget *ST)
   setCaps();
   std::string name = ST->getDeviceName();
   if (name == "cedar") {
-    mDeviceFlag = OCL_DEVICE_CEDAR;
+    DeviceFlag = OCL_DEVICE_CEDAR;
   } else if (name == "redwood") {
-    mDeviceFlag = OCL_DEVICE_REDWOOD;
+    DeviceFlag = OCL_DEVICE_REDWOOD;
   } else if (name == "cypress") {
-    mDeviceFlag = OCL_DEVICE_CYPRESS;
+    DeviceFlag = OCL_DEVICE_CYPRESS;
   } else {
-    mDeviceFlag = OCL_DEVICE_JUNIPER;
+    DeviceFlag = OCL_DEVICE_JUNIPER;
   }
 }
 
diff --git a/lib/Target/AMDGPU/AMDILEvergreenDevice.h b/lib/Target/AMDGPU/AMDILEvergreenDevice.h
index 5def695..d1a42ef 100644
--- a/lib/Target/AMDGPU/AMDILEvergreenDevice.h
+++ b/lib/Target/AMDGPU/AMDILEvergreenDevice.h
@@ -14,8 +14,8 @@
 // implement in order to correctly answer queries on the capabilities of the
 // specific hardware.
 //===----------------------------------------------------------------------===//
-#ifndef _AMDILEVERGREENDEVICE_H_
-#define _AMDILEVERGREENDEVICE_H_
+#ifndef AMDILEVERGREENDEVICE_H
+#define AMDILEVERGREENDEVICE_H
 #include "AMDILDevice.h"
 #include "AMDGPUSubtarget.h"
 
@@ -84,4 +84,4 @@ private:
 }; // AMDGPURedwoodDevice
   
 } // namespace llvm
-#endif // _AMDILEVERGREENDEVICE_H_
+#endif // AMDILEVERGREENDEVICE_H
diff --git a/lib/Target/AMDGPU/AMDILFrameLowering.h b/lib/Target/AMDGPU/AMDILFrameLowering.h
index 934ee46..81c09a5 100644
--- a/lib/Target/AMDGPU/AMDILFrameLowering.h
+++ b/lib/Target/AMDGPU/AMDILFrameLowering.h
@@ -17,8 +17,8 @@
 // Interface to describe a layout of a stack frame on a AMDIL target machine
 //
 //===----------------------------------------------------------------------===//
-#ifndef _AMDILFRAME_LOWERING_H_
-#define _AMDILFRAME_LOWERING_H_
+#ifndef AMDILFRAME_LOWERING_H
+#define AMDILFRAME_LOWERING_H
 
 #include "llvm/CodeGen/MachineFunction.h"
 #include "llvm/Target/TargetFrameLowering.h"
@@ -43,4 +43,4 @@ namespace llvm {
       virtual bool hasFP(const MachineFunction &MF) const;
   }; // class AMDGPUFrameLowering
 } // namespace llvm
-#endif // _AMDILFRAME_LOWERING_H_
+#endif // AMDILFRAME_LOWERING_H
diff --git a/lib/Target/AMDGPU/AMDILISelDAGToDAG.cpp b/lib/Target/AMDGPU/AMDILISelDAGToDAG.cpp
index 2a80f1b..e7115bc 100644
--- a/lib/Target/AMDGPU/AMDILISelDAGToDAG.cpp
+++ b/lib/Target/AMDGPU/AMDILISelDAGToDAG.cpp
@@ -157,110 +157,107 @@ SDNode *AMDGPUDAGToDAGISel::Select(SDNode *N) {
   }
   switch (Opc) {
   default: break;
-  case ISD::FrameIndex:
-    {
-      if (FrameIndexSDNode *FIN = dyn_cast<FrameIndexSDNode>(N)) {
-        unsigned int FI = FIN->getIndex();
-        EVT OpVT = N->getValueType(0);
-        unsigned int NewOpc = AMDGPU::COPY;
-        SDValue TFI = CurDAG->getTargetFrameIndex(FI, MVT::i32);
-        return CurDAG->SelectNodeTo(N, NewOpc, OpVT, TFI);
-      }
+  case ISD::FrameIndex: {
+    if (FrameIndexSDNode *FIN = dyn_cast<FrameIndexSDNode>(N)) {
+      unsigned int FI = FIN->getIndex();
+      EVT OpVT = N->getValueType(0);
+      unsigned int NewOpc = AMDGPU::COPY;
+      SDValue TFI = CurDAG->getTargetFrameIndex(FI, MVT::i32);
+      return CurDAG->SelectNodeTo(N, NewOpc, OpVT, TFI);
     }
     break;
+  }
   case ISD::ConstantFP:
-  case ISD::Constant:
-    {
-      const AMDGPUSubtarget &ST = TM.getSubtarget<AMDGPUSubtarget>();
-      // XXX: Custom immediate lowering not implemented yet.  Instead we use
-      // pseudo instructions defined in SIInstructions.td
-      if (ST.device()->getGeneration() > AMDGPUDeviceInfo::HD6XXX) {
-        break;
+  case ISD::Constant: {
+    const AMDGPUSubtarget &ST = TM.getSubtarget<AMDGPUSubtarget>();
+    // XXX: Custom immediate lowering not implemented yet.  Instead we use
+    // pseudo instructions defined in SIInstructions.td
+    if (ST.device()->getGeneration() > AMDGPUDeviceInfo::HD6XXX) {
+      break;
+    }
+    const R600InstrInfo *TII = static_cast<const R600InstrInfo*>(TM.getInstrInfo());
+
+    uint64_t ImmValue = 0;
+    unsigned ImmReg = AMDGPU::ALU_LITERAL_X;
+
+    if (N->getOpcode() == ISD::ConstantFP) {
+      // XXX: 64-bit Immediates not supported yet
+      assert(N->getValueType(0) != MVT::f64);
+
+      ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(N);
+      APFloat Value = C->getValueAPF();
+      float FloatValue = Value.convertToFloat();
+      if (FloatValue == 0.0) {
+        ImmReg = AMDGPU::ZERO;
+      } else if (FloatValue == 0.5) {
+        ImmReg = AMDGPU::HALF;
+      } else if (FloatValue == 1.0) {
+        ImmReg = AMDGPU::ONE;
+      } else {
+        ImmValue = Value.bitcastToAPInt().getZExtValue();
       }
-      const R600InstrInfo *TII = static_cast<const R600InstrInfo*>(TM.getInstrInfo());
-
-      uint64_t ImmValue = 0;
-      unsigned ImmReg = AMDGPU::ALU_LITERAL_X;
-
-      if (N->getOpcode() == ISD::ConstantFP) {
-        // XXX: 64-bit Immediates not supported yet
-        assert(N->getValueType(0) != MVT::f64);
-
-        ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(N);
-        APFloat Value = C->getValueAPF();
-        float FloatValue = Value.convertToFloat();
-        if (FloatValue == 0.0) {
-          ImmReg = AMDGPU::ZERO;
-        } else if (FloatValue == 0.5) {
-          ImmReg = AMDGPU::HALF;
-        } else if (FloatValue == 1.0) {
-          ImmReg = AMDGPU::ONE;
-        } else {
-          ImmValue = Value.bitcastToAPInt().getZExtValue();
-        }
+    } else {
+      // XXX: 64-bit Immediates not supported yet
+      assert(N->getValueType(0) != MVT::i64);
+
+      ConstantSDNode *C = dyn_cast<ConstantSDNode>(N);
+      if (C->getZExtValue() == 0) {
+        ImmReg = AMDGPU::ZERO;
+      } else if (C->getZExtValue() == 1) {
+        ImmReg = AMDGPU::ONE_INT;
       } else {
-        // XXX: 64-bit Immediates not supported yet
-        assert(N->getValueType(0) != MVT::i64);
-
-        ConstantSDNode *C = dyn_cast<ConstantSDNode>(N);
-        if (C->getZExtValue() == 0) {
-          ImmReg = AMDGPU::ZERO;
-        } else if (C->getZExtValue() == 1) {
-          ImmReg = AMDGPU::ONE_INT;
-        } else {
-          ImmValue = C->getZExtValue();
-        }
+        ImmValue = C->getZExtValue();
       }
+    }
 
-      for (SDNode::use_iterator Use = N->use_begin(), E = SDNode::use_end();
-                                                      Use != E; ++Use) {
-        std::vector<SDValue> Ops;
-        for (unsigned i = 0; i < Use->getNumOperands(); ++i) {
-          Ops.push_back(Use->getOperand(i));
-        }
+    for (SDNode::use_iterator Use = N->use_begin(), E = SDNode::use_end();
+                                                    Use != E; ++Use) {
+      std::vector<SDValue> Ops;
+      for (unsigned i = 0; i < Use->getNumOperands(); ++i) {
+        Ops.push_back(Use->getOperand(i));
+      }
 
-        if (!Use->isMachineOpcode()) {
-            if (ImmReg == AMDGPU::ALU_LITERAL_X) {
-              // We can only use literal constants (e.g. AMDGPU::ZERO,
-              // AMDGPU::ONE, etc) in machine opcodes.
-              continue;
-            }
-        } else {
-          if (!TII->isALUInstr(Use->getMachineOpcode())) {
+      if (!Use->isMachineOpcode()) {
+          if (ImmReg == AMDGPU::ALU_LITERAL_X) {
+            // We can only use literal constants (e.g. AMDGPU::ZERO,
+            // AMDGPU::ONE, etc) in machine opcodes.
             continue;
           }
+      } else {
+        if (!TII->isALUInstr(Use->getMachineOpcode())) {
+          continue;
+        }
 
-          int ImmIdx = TII->getOperandIdx(Use->getMachineOpcode(), R600Operands::IMM);
-          assert(ImmIdx != -1);
-
-          // subtract one from ImmIdx, because the DST operand is usually index
-          // 0 for MachineInstrs, but we have no DST in the Ops vector.
-          ImmIdx--;
+        int ImmIdx = TII->getOperandIdx(Use->getMachineOpcode(), R600Operands::IMM);
+        assert(ImmIdx != -1);
 
-          // Check that we aren't already using an immediate.
-          // XXX: It's possible for an instruction to have more than one
-          // immediate operand, but this is not supported yet.
-          if (ImmReg == AMDGPU::ALU_LITERAL_X) {
-            ConstantSDNode *C = dyn_cast<ConstantSDNode>(Use->getOperand(ImmIdx));
-            assert(C);
+        // subtract one from ImmIdx, because the DST operand is usually index
+        // 0 for MachineInstrs, but we have no DST in the Ops vector.
+        ImmIdx--;
 
-            if (C->getZExtValue() != 0) {
-              // This instruction is already using an immediate.
-              continue;
-            }
+        // Check that we aren't already using an immediate.
+        // XXX: It's possible for an instruction to have more than one
+        // immediate operand, but this is not supported yet.
+        if (ImmReg == AMDGPU::ALU_LITERAL_X) {
+          ConstantSDNode *C = dyn_cast<ConstantSDNode>(Use->getOperand(ImmIdx));
+          assert(C);
 
-            // Set the immediate value
-            Ops[ImmIdx] = CurDAG->getTargetConstant(ImmValue, MVT::i32);
+          if (C->getZExtValue() != 0) {
+            // This instruction is already using an immediate.
+            continue;
           }
-        }
-        // Set the immediate register
-        Ops[Use.getOperandNo()] = CurDAG->getRegister(ImmReg, MVT::i32);
 
-        CurDAG->UpdateNodeOperands(*Use, Ops.data(), Use->getNumOperands());
+          // Set the immediate value
+          Ops[ImmIdx] = CurDAG->getTargetConstant(ImmValue, MVT::i32);
+        }
       }
-      break;
-    }
+      // Set the immediate register
+      Ops[Use.getOperandNo()] = CurDAG->getRegister(ImmReg, MVT::i32);
 
+      CurDAG->UpdateNodeOperands(*Use, Ops.data(), Use->getNumOperands());
+    }
+    break;
+  }
   }
   return SelectCode(N);
 }
@@ -388,8 +385,7 @@ bool AMDGPUDAGToDAGISel::isPrivateLoad(const LoadSDNode *N) {
       && !checkType(N->getSrcValue(), AMDGPUAS::REGION_ADDRESS)
       && !checkType(N->getSrcValue(), AMDGPUAS::CONSTANT_ADDRESS)
       && !checkType(N->getSrcValue(), AMDGPUAS::PARAM_D_ADDRESS)
-      && !checkType(N->getSrcValue(), AMDGPUAS::PARAM_I_ADDRESS))
-  {
+      && !checkType(N->getSrcValue(), AMDGPUAS::PARAM_I_ADDRESS)) {
     return true;
   }
   return false;
diff --git a/lib/Target/AMDGPU/AMDILISelLowering.cpp b/lib/Target/AMDGPU/AMDILISelLowering.cpp
index 23bd92a..e8cb703 100644
--- a/lib/Target/AMDGPU/AMDILISelLowering.cpp
+++ b/lib/Target/AMDGPU/AMDILISelLowering.cpp
@@ -44,8 +44,7 @@ using namespace llvm;
 // TargetLowering Class Implementation Begins
 //===----------------------------------------------------------------------===//
 void AMDGPUTargetLowering::InitAMDILLowering() {
-  int types[] =
-  {
+  int types[] = {
     (int)MVT::i8,
     (int)MVT::i16,
     (int)MVT::i32,
@@ -64,22 +63,19 @@ void AMDGPUTargetLowering::InitAMDILLowering() {
     (int)MVT::v2i64
   };
 
-  int IntTypes[] =
-  {
+  int IntTypes[] = {
     (int)MVT::i8,
     (int)MVT::i16,
     (int)MVT::i32,
     (int)MVT::i64
   };
 
-  int FloatTypes[] =
-  {
+  int FloatTypes[] = {
     (int)MVT::f32,
     (int)MVT::f64
   };
 
-  int VectorTypes[] =
-  {
+  int VectorTypes[] = {
     (int)MVT::v2i8,
     (int)MVT::v4i8,
     (int)MVT::v2i16,
@@ -91,16 +87,16 @@ void AMDGPUTargetLowering::InitAMDILLowering() {
     (int)MVT::v2f64,
     (int)MVT::v2i64
   };
-  size_t numTypes = sizeof(types) / sizeof(*types);
-  size_t numFloatTypes = sizeof(FloatTypes) / sizeof(*FloatTypes);
-  size_t numIntTypes = sizeof(IntTypes) / sizeof(*IntTypes);
-  size_t numVectorTypes = sizeof(VectorTypes) / sizeof(*VectorTypes);
+  size_t NumTypes = sizeof(types) / sizeof(*types);
+  size_t NumFloatTypes = sizeof(FloatTypes) / sizeof(*FloatTypes);
+  size_t NumIntTypes = sizeof(IntTypes) / sizeof(*IntTypes);
+  size_t NumVectorTypes = sizeof(VectorTypes) / sizeof(*VectorTypes);
 
   const AMDGPUSubtarget &STM = getTargetMachine().getSubtarget<AMDGPUSubtarget>();
   // These are the current register classes that are
   // supported
 
-  for (unsigned int x  = 0; x < numTypes; ++x) {
+  for (unsigned int x  = 0; x < NumTypes; ++x) {
     MVT::SimpleValueType VT = (MVT::SimpleValueType)types[x];
 
     //FIXME: SIGN_EXTEND_INREG is not meaningful for floating point types
@@ -121,7 +117,7 @@ void AMDGPUTargetLowering::InitAMDILLowering() {
       setOperationAction(ISD::SDIV, VT, Custom);
     }
   }
-  for (unsigned int x = 0; x < numFloatTypes; ++x) {
+  for (unsigned int x = 0; x < NumFloatTypes; ++x) {
     MVT::SimpleValueType VT = (MVT::SimpleValueType)FloatTypes[x];
 
     // IL does not have these operations for floating point types
@@ -136,7 +132,7 @@ void AMDGPUTargetLowering::InitAMDILLowering() {
     setOperationAction(ISD::SETULE, VT, Expand);
   }
 
-  for (unsigned int x = 0; x < numIntTypes; ++x) {
+  for (unsigned int x = 0; x < NumIntTypes; ++x) {
     MVT::SimpleValueType VT = (MVT::SimpleValueType)IntTypes[x];
 
     // GPU also does not have divrem function for signed or unsigned
@@ -156,8 +152,7 @@ void AMDGPUTargetLowering::InitAMDILLowering() {
     setOperationAction(ISD::CTLZ, VT, Expand);
   }
 
-  for ( unsigned int ii = 0; ii < numVectorTypes; ++ii )
-  {
+  for (unsigned int ii = 0; ii < NumVectorTypes; ++ii) {
     MVT::SimpleValueType VT = (MVT::SimpleValueType)VectorTypes[ii];
 
     setOperationAction(ISD::VECTOR_SHUFFLE, VT, Expand);
@@ -229,10 +224,6 @@ void AMDGPUTargetLowering::InitAMDILLowering() {
   maxStoresPerMemmove = 4096;
   maxStoresPerMemset  = 4096;
 
-#undef numTypes
-#undef numIntTypes
-#undef numVectorTypes
-#undef numFloatTypes
 }
 
 bool
diff --git a/lib/Target/AMDGPU/AMDILIntrinsicInfo.h b/lib/Target/AMDGPU/AMDILIntrinsicInfo.h
index 5d9e845..70c15d3 100644
--- a/lib/Target/AMDGPU/AMDILIntrinsicInfo.h
+++ b/lib/Target/AMDGPU/AMDILIntrinsicInfo.h
@@ -10,8 +10,8 @@
 //   Interface for the AMDGPU Implementation of the Intrinsic Info class.
 //
 //===-----------------------------------------------------------------------===//
-#ifndef _AMDIL_INTRINSICS_H_
-#define _AMDIL_INTRINSICS_H_
+#ifndef AMDIL_INTRINSICS_H
+#define AMDIL_INTRINSICS_H
 
 #include "llvm/Intrinsics.h"
 #include "llvm/Target/TargetIntrinsicInfo.h"
@@ -43,5 +43,5 @@ namespace llvm {
   }; // AMDGPUIntrinsicInfo
 }
 
-#endif // _AMDIL_INTRINSICS_H_
+#endif // AMDIL_INTRINSICS_H
 
diff --git a/lib/Target/AMDGPU/AMDILNIDevice.cpp b/lib/Target/AMDGPU/AMDILNIDevice.cpp
index a903f44..6681e19 100644
--- a/lib/Target/AMDGPU/AMDILNIDevice.cpp
+++ b/lib/Target/AMDGPU/AMDILNIDevice.cpp
@@ -16,13 +16,13 @@ AMDGPUNIDevice::AMDGPUNIDevice(AMDGPUSubtarget *ST)
   : AMDGPUEvergreenDevice(ST) {
   std::string name = ST->getDeviceName();
   if (name == "caicos") {
-    mDeviceFlag = OCL_DEVICE_CAICOS;
+    DeviceFlag = OCL_DEVICE_CAICOS;
   } else if (name == "turks") {
-    mDeviceFlag = OCL_DEVICE_TURKS;
+    DeviceFlag = OCL_DEVICE_TURKS;
   } else if (name == "cayman") {
-    mDeviceFlag = OCL_DEVICE_CAYMAN;
+    DeviceFlag = OCL_DEVICE_CAYMAN;
   } else {
-    mDeviceFlag = OCL_DEVICE_BARTS;
+    DeviceFlag = OCL_DEVICE_BARTS;
   }
 }
 AMDGPUNIDevice::~AMDGPUNIDevice() {
diff --git a/lib/Target/AMDGPU/AMDILNIDevice.h b/lib/Target/AMDGPU/AMDILNIDevice.h
index 387f7d1..0579489 100644
--- a/lib/Target/AMDGPU/AMDILNIDevice.h
+++ b/lib/Target/AMDGPU/AMDILNIDevice.h
@@ -14,8 +14,8 @@
 // implement in order to correctly answer queries on the capabilities of the
 // specific hardware.
 //===---------------------------------------------------------------------===//
-#ifndef _AMDILNIDEVICE_H_
-#define _AMDILNIDEVICE_H_
+#ifndef AMDILNIDEVICE_H
+#define AMDILNIDEVICE_H
 #include "AMDILEvergreenDevice.h"
 #include "AMDGPUSubtarget.h"
 
@@ -56,4 +56,4 @@ namespace llvm {
 
   static const unsigned int MAX_LDS_SIZE_900 = AMDGPUDevice::MAX_LDS_SIZE_800;
 } // namespace llvm
-#endif // _AMDILNIDEVICE_H_
+#endif // AMDILNIDEVICE_H
diff --git a/lib/Target/AMDGPU/AMDILPeepholeOptimizer.cpp b/lib/Target/AMDGPU/AMDILPeepholeOptimizer.cpp
index f4611f6..6114ba3 100644
--- a/lib/Target/AMDGPU/AMDILPeepholeOptimizer.cpp
+++ b/lib/Target/AMDGPU/AMDILPeepholeOptimizer.cpp
@@ -159,8 +159,7 @@ Function safeNestedForEach(InputIterator First, InputIterator Last,
 
 namespace llvm {
   FunctionPass *
-  createAMDGPUPeepholeOpt(TargetMachine &tm) 
-  {
+  createAMDGPUPeepholeOpt(TargetMachine &tm) {
     return new AMDGPUPeepholeOpt(tm);
   }
 } // llvm namespace
diff --git a/lib/Target/AMDGPU/AMDILRegisterInfo.td b/lib/Target/AMDGPU/AMDILRegisterInfo.td
index 42235ff..b9d0334 100644
--- a/lib/Target/AMDGPU/AMDILRegisterInfo.td
+++ b/lib/Target/AMDGPU/AMDILRegisterInfo.td
@@ -85,24 +85,21 @@ def MEM : AMDILReg<999, "mem">, DwarfRegNum<[999]>;
 def RA : AMDILReg<998, "r998">, DwarfRegNum<[998]>;
 def FP : AMDILReg<997, "r997">, DwarfRegNum<[997]>;
 def GPRI16 : RegisterClass<"AMDGPU", [i16], 16,
-  (add (sequence "R%u", 1, 20), RA, SP, T1, T2, T3, T4, T5, SDP, R1010, R1011, R1001, R1002, R1003, R1004, R1005, R1006, R1007, R1008, MEM, R1012)>
-{
+  (add (sequence "R%u", 1, 20), RA, SP, T1, T2, T3, T4, T5, SDP, R1010, R1011, R1001, R1002, R1003, R1004, R1005, R1006, R1007, R1008, MEM, R1012)> {
         let AltOrders = [(add (sequence "R%u", 1, 20))];
         let AltOrderSelect = [{
           return 1;
         }];
     }
 def GPRI32 : RegisterClass<"AMDGPU", [i32], 32,
-  (add (sequence "R%u", 1, 20), RA, SP, T1, T2, T3, T4, T5, SDP, R1010, R1011, R1001, R1002, R1003, R1004, R1005, R1006, R1007, R1008, MEM, R1012)>
-{
+  (add (sequence "R%u", 1, 20), RA, SP, T1, T2, T3, T4, T5, SDP, R1010, R1011, R1001, R1002, R1003, R1004, R1005, R1006, R1007, R1008, MEM, R1012)> {
         let AltOrders = [(add (sequence "R%u", 1, 20))];
         let AltOrderSelect = [{
           return 1;
         }];
     }
 def GPRF32 : RegisterClass<"AMDGPU", [f32], 32,
-  (add (sequence "R%u", 1, 20), RA, SP, T1, T2, T3, T4, T5, SDP, R1010, R1011, R1001, R1002, R1003, R1004, R1005, R1006, R1007, R1008, MEM, R1012)>
-{
+  (add (sequence "R%u", 1, 20), RA, SP, T1, T2, T3, T4, T5, SDP, R1010, R1011, R1001, R1002, R1003, R1004, R1005, R1006, R1007, R1008, MEM, R1012)> {
         let AltOrders = [(add (sequence "R%u", 1, 20))];
         let AltOrderSelect = [{
           return 1;
diff --git a/lib/Target/AMDGPU/AMDILSIDevice.h b/lib/Target/AMDGPU/AMDILSIDevice.h
index 6a684cb..2272d85 100644
--- a/lib/Target/AMDGPU/AMDILSIDevice.h
+++ b/lib/Target/AMDGPU/AMDILSIDevice.h
@@ -14,8 +14,8 @@
 // implement in order to correctly answer queries on the capabilities of the
 // specific hardware.
 //===---------------------------------------------------------------------===//
-#ifndef _AMDILSIDEVICE_H_
-#define _AMDILSIDEVICE_H_
+#ifndef AMDILSIDEVICE_H
+#define AMDILSIDEVICE_H
 #include "AMDILEvergreenDevice.h"
 #include "AMDGPUSubtarget.h"
 
@@ -42,4 +42,4 @@ namespace llvm {
   }; // AMDGPUSIDevice
 
 } // namespace llvm
-#endif // _AMDILSIDEVICE_H_
+#endif // AMDILSIDEVICE_H
diff --git a/lib/Target/AMDGPU/MCTargetDesc/AMDGPUMCAsmInfo.cpp b/lib/Target/AMDGPU/MCTargetDesc/AMDGPUMCAsmInfo.cpp
index bbca34b..a0b7c32 100644
--- a/lib/Target/AMDGPU/MCTargetDesc/AMDGPUMCAsmInfo.cpp
+++ b/lib/Target/AMDGPU/MCTargetDesc/AMDGPUMCAsmInfo.cpp
@@ -10,8 +10,7 @@
 #include "AMDGPUMCAsmInfo.h"
 
 using namespace llvm;
-AMDGPUMCAsmInfo::AMDGPUMCAsmInfo(const Target &T, StringRef &TT) : MCAsmInfo()
-{
+AMDGPUMCAsmInfo::AMDGPUMCAsmInfo(const Target &T, StringRef &TT) : MCAsmInfo() {
   HasSingleParameterDotFile = false;
   WeakDefDirective = 0;
   //===------------------------------------------------------------------===//
@@ -75,13 +74,11 @@ AMDGPUMCAsmInfo::AMDGPUMCAsmInfo(const Target &T, StringRef &TT) : MCAsmInfo()
 }
 
 const char*
-AMDGPUMCAsmInfo::getDataASDirective(unsigned int Size, unsigned int AS) const
-{
+AMDGPUMCAsmInfo::getDataASDirective(unsigned int Size, unsigned int AS) const {
   return 0;
 }
 
 const MCSection*
-AMDGPUMCAsmInfo::getNonexecutableStackSection(MCContext &CTX) const
-{
+AMDGPUMCAsmInfo::getNonexecutableStackSection(MCContext &CTX) const {
   return 0;
 }
diff --git a/lib/Target/AMDGPU/MCTargetDesc/AMDGPUMCAsmInfo.h b/lib/Target/AMDGPU/MCTargetDesc/AMDGPUMCAsmInfo.h
index 0ca264b..d829f56 100644
--- a/lib/Target/AMDGPU/MCTargetDesc/AMDGPUMCAsmInfo.h
+++ b/lib/Target/AMDGPU/MCTargetDesc/AMDGPUMCAsmInfo.h
@@ -11,8 +11,8 @@
 //
 //===----------------------------------------------------------------------===//
 
-#ifndef AMDGPUMCASMINFO_H_
-#define AMDGPUMCASMINFO_H_
+#ifndef AMDGPUMCASMINFO_H
+#define AMDGPUMCASMINFO_H
 
 #include "llvm/MC/MCAsmInfo.h"
 namespace llvm {
@@ -27,4 +27,4 @@ namespace llvm {
       const MCSection* getNonexecutableStackSection(MCContext &CTX) const;
   };
 } // namespace llvm
-#endif // AMDGPUMCASMINFO_H_
+#endif // AMDGPUMCASMINFO_H
diff --git a/lib/Target/AMDGPU/MCTargetDesc/R600MCCodeEmitter.cpp b/lib/Target/AMDGPU/MCTargetDesc/R600MCCodeEmitter.cpp
index 21b639b..d61bc0b 100644
--- a/lib/Target/AMDGPU/MCTargetDesc/R600MCCodeEmitter.cpp
+++ b/lib/Target/AMDGPU/MCTargetDesc/R600MCCodeEmitter.cpp
@@ -150,13 +150,12 @@ void R600MCCodeEmitter::EncodeInstruction(const MCInst &MI, raw_ostream &OS,
   } else {
     switch(MI.getOpcode()) {
     case AMDGPU::RAT_WRITE_CACHELESS_32_eg:
-    case AMDGPU::RAT_WRITE_CACHELESS_128_eg:
-      {
-        uint64_t inst = getBinaryCodeForInstr(MI, Fixups);
-        EmitByte(INSTR_NATIVE, OS);
-        Emit(inst, OS);
-        break;
-      }
+    case AMDGPU::RAT_WRITE_CACHELESS_128_eg: {
+      uint64_t inst = getBinaryCodeForInstr(MI, Fixups);
+      EmitByte(INSTR_NATIVE, OS);
+      Emit(inst, OS);
+      break;
+    }
     case AMDGPU::CONSTANT_LOAD_eg:
     case AMDGPU::VTX_READ_PARAM_i32_eg:
     case AMDGPU::VTX_READ_PARAM_f32_eg:
@@ -164,24 +163,22 @@ void R600MCCodeEmitter::EncodeInstruction(const MCInst &MI, raw_ostream &OS,
     case AMDGPU::VTX_READ_GLOBAL_i32_eg:
     case AMDGPU::VTX_READ_GLOBAL_f32_eg:
     case AMDGPU::VTX_READ_GLOBAL_v4i32_eg:
-    case AMDGPU::VTX_READ_GLOBAL_v4f32_eg:
-      {
-        uint64_t InstWord01 = getBinaryCodeForInstr(MI, Fixups);
-        uint32_t InstWord2 = MI.getOperand(2).getImm(); // Offset
-
-        EmitByte(INSTR_VTX, OS);
-        Emit(InstWord01, OS);
-        Emit(InstWord2, OS);
-        break;
-      }
+    case AMDGPU::VTX_READ_GLOBAL_v4f32_eg: {
+      uint64_t InstWord01 = getBinaryCodeForInstr(MI, Fixups);
+      uint32_t InstWord2 = MI.getOperand(2).getImm(); // Offset
+
+      EmitByte(INSTR_VTX, OS);
+      Emit(InstWord01, OS);
+      Emit(InstWord2, OS);
+      break;
+    }
     case AMDGPU::EG_Export:
-    case AMDGPU::R600_Export:
-      {
-        uint64_t Inst = getBinaryCodeForInstr(MI, Fixups);
-        EmitByte(INSTR_EXPORT, OS);
-        Emit(Inst, OS);
-        break;
-      }
+    case AMDGPU::R600_Export: {
+      uint64_t Inst = getBinaryCodeForInstr(MI, Fixups);
+      EmitByte(INSTR_EXPORT, OS);
+      Emit(Inst, OS);
+      break;
+    }
 
     default:
       EmitALUInstr(MI, Fixups, OS);
diff --git a/lib/Target/AMDGPU/R600ExpandSpecialInstrs.cpp b/lib/Target/AMDGPU/R600ExpandSpecialInstrs.cpp
index df5c53a..dc73fa8 100644
--- a/lib/Target/AMDGPU/R600ExpandSpecialInstrs.cpp
+++ b/lib/Target/AMDGPU/R600ExpandSpecialInstrs.cpp
@@ -98,7 +98,7 @@ bool R600ExpandSpecialInstrsPass::ExpandInputPerspective(MachineInstr &MI) {
         MI.getOperand(2).getImm());
 
 
-    unsigned Sel;
+    unsigned Sel = AMDGPU::sel_x;
     switch (i % 4) {
     case 0:Sel = AMDGPU::sel_x;break;
     case 1:Sel = AMDGPU::sel_y;break;
@@ -140,7 +140,7 @@ bool R600ExpandSpecialInstrsPass::ExpandInputConstant(MachineInstr &MI) {
     unsigned ReadReg = AMDGPU::R600_ArrayBaseRegClass.getRegister(
         MI.getOperand(1).getImm());
 
-    unsigned Sel;
+    unsigned Sel = AMDGPU::sel_x;
     switch (i % 4) {
     case 0:Sel = AMDGPU::sel_x;break;
     case 1:Sel = AMDGPU::sel_y;break;
@@ -220,7 +220,7 @@ bool R600ExpandSpecialInstrsPass::runOnMachineFunction(MachineFunction &MF) {
 
       bool IsReduction = TII->isReductionOp(MI.getOpcode());
       bool IsVector = TII->isVector(MI);
-	    bool IsCube = TII->isCubeOp(MI.getOpcode());
+      bool IsCube = TII->isCubeOp(MI.getOpcode());
       if (!IsReduction && !IsVector && !IsCube) {
         continue;
       }
diff --git a/lib/Target/AMDGPU/R600ISelLowering.cpp b/lib/Target/AMDGPU/R600ISelLowering.cpp
index 18d3d14..d6c18b0 100644
--- a/lib/Target/AMDGPU/R600ISelLowering.cpp
+++ b/lib/Target/AMDGPU/R600ISelLowering.cpp
@@ -80,53 +80,49 @@ MachineBasicBlock * R600TargetLowering::EmitInstrWithCustomInserter(
   switch (MI->getOpcode()) {
   default: return AMDGPUTargetLowering::EmitInstrWithCustomInserter(MI, BB);
   case AMDGPU::SHADER_TYPE: break;
-  case AMDGPU::CLAMP_R600:
-    {
-      MachineInstr *NewMI = TII->buildDefaultInstruction(*BB, I,
-                                                    AMDGPU::MOV,
-                                                    MI->getOperand(0).getReg(),
-                                                    MI->getOperand(1).getReg());
-      TII->addFlag(NewMI, 0, MO_FLAG_CLAMP);
-      break;
-    }
-  case AMDGPU::FABS_R600:
-    {
-      MachineInstr *NewMI = TII->buildDefaultInstruction(*BB, I,
+  case AMDGPU::CLAMP_R600: {
+    MachineInstr *NewMI = TII->buildDefaultInstruction(*BB, I,
+                                                   AMDGPU::MOV,
+                                                   MI->getOperand(0).getReg(),
+                                                   MI->getOperand(1).getReg());
+    TII->addFlag(NewMI, 0, MO_FLAG_CLAMP);
+    break;
+  }
+
+  case AMDGPU::FABS_R600: {
+    MachineInstr *NewMI = TII->buildDefaultInstruction(*BB, I,
                                                     AMDGPU::MOV,
                                                     MI->getOperand(0).getReg(),
                                                     MI->getOperand(1).getReg());
-      TII->addFlag(NewMI, 0, MO_FLAG_ABS);
-      break;
-    }
+    TII->addFlag(NewMI, 0, MO_FLAG_ABS);
+    break;
+  }
 
-  case AMDGPU::FNEG_R600:
-    {
-      MachineInstr *NewMI = TII->buildDefaultInstruction(*BB, I,
+  case AMDGPU::FNEG_R600: {
+    MachineInstr *NewMI = TII->buildDefaultInstruction(*BB, I,
                                                     AMDGPU::MOV,
                                                     MI->getOperand(0).getReg(),
                                                     MI->getOperand(1).getReg());
-      TII->addFlag(NewMI, 0, MO_FLAG_NEG);
+    TII->addFlag(NewMI, 0, MO_FLAG_NEG);
     break;
-    }
+  }
 
-  case AMDGPU::R600_LOAD_CONST:
-    {
-      int64_t RegIndex = MI->getOperand(1).getImm();
-      unsigned ConstantReg = AMDGPU::R600_CReg32RegClass.getRegister(RegIndex);
-      BuildMI(*BB, I, BB->findDebugLoc(I), TII->get(AMDGPU::COPY))
-                  .addOperand(MI->getOperand(0))
-                  .addReg(ConstantReg);
-      break;
-    }
+  case AMDGPU::R600_LOAD_CONST: {
+    int64_t RegIndex = MI->getOperand(1).getImm();
+    unsigned ConstantReg = AMDGPU::R600_CReg32RegClass.getRegister(RegIndex);
+    BuildMI(*BB, I, BB->findDebugLoc(I), TII->get(AMDGPU::COPY))
+                .addOperand(MI->getOperand(0))
+                .addReg(ConstantReg);
+    break;
+  }
 
-  case AMDGPU::MASK_WRITE:
-    {
-      unsigned maskedRegister = MI->getOperand(0).getReg();
-      assert(TargetRegisterInfo::isVirtualRegister(maskedRegister));
-      MachineInstr * defInstr = MRI.getVRegDef(maskedRegister);
-      TII->addFlag(defInstr, 0, MO_FLAG_MASK);
-      break;
-    }
+  case AMDGPU::MASK_WRITE: {
+    unsigned maskedRegister = MI->getOperand(0).getReg();
+    assert(TargetRegisterInfo::isVirtualRegister(maskedRegister));
+    MachineInstr * defInstr = MRI.getVRegDef(maskedRegister);
+    TII->addFlag(defInstr, 0, MO_FLAG_MASK);
+    break;
+  }
 
   case AMDGPU::MOV_IMM_F32:
     TII->buildMovImm(*BB, I, MI->getOperand(0).getReg(),
@@ -140,156 +136,154 @@ MachineBasicBlock * R600TargetLowering::EmitInstrWithCustomInserter(
 
 
   case AMDGPU::RAT_WRITE_CACHELESS_32_eg:
-  case AMDGPU::RAT_WRITE_CACHELESS_128_eg:
-    {
-      // Convert to DWORD address
-      unsigned NewAddr = MRI.createVirtualRegister(
+  case AMDGPU::RAT_WRITE_CACHELESS_128_eg: {
+    // Convert to DWORD address
+    unsigned NewAddr = MRI.createVirtualRegister(
                                              &AMDGPU::R600_TReg32_XRegClass);
-      unsigned ShiftValue = MRI.createVirtualRegister(
+    unsigned ShiftValue = MRI.createVirtualRegister(
                                               &AMDGPU::R600_TReg32RegClass);
-      unsigned EOP = (llvm::next(I)->getOpcode() == AMDGPU::RETURN) ? 1 : 0;
-
-      // XXX In theory, we should be able to pass ShiftValue directly to
-      // the LSHR_eg instruction as an inline literal, but I tried doing it
-      // this way and it didn't produce the correct results.
-      TII->buildMovImm(*BB, I, ShiftValue, 2);
-      TII->buildDefaultInstruction(*BB, I, AMDGPU::LSHR_eg, NewAddr,
-                                   MI->getOperand(1).getReg(),
-                                   ShiftValue);
-      BuildMI(*BB, I, BB->findDebugLoc(I), TII->get(MI->getOpcode()))
-              .addOperand(MI->getOperand(0))
-              .addReg(NewAddr)
-              .addImm(EOP); // Set End of program bit
-      break;
-    }
+    unsigned EOP = (llvm::next(I)->getOpcode() == AMDGPU::RETURN) ? 1 : 0;
+
+    // XXX In theory, we should be able to pass ShiftValue directly to
+    // the LSHR_eg instruction as an inline literal, but I tried doing it
+    // this way and it didn't produce the correct results.
+    TII->buildMovImm(*BB, I, ShiftValue, 2);
+    TII->buildDefaultInstruction(*BB, I, AMDGPU::LSHR_eg, NewAddr,
+                                 MI->getOperand(1).getReg(),
+                                 ShiftValue);
+    BuildMI(*BB, I, BB->findDebugLoc(I), TII->get(MI->getOpcode()))
+            .addOperand(MI->getOperand(0))
+            .addReg(NewAddr)
+            .addImm(EOP); // Set End of program bit
+    break;
+  }
 
-  case AMDGPU::RESERVE_REG:
-    {
-      R600MachineFunctionInfo * MFI = MF->getInfo<R600MachineFunctionInfo>();
-      int64_t ReservedIndex = MI->getOperand(0).getImm();
-      unsigned ReservedReg =
-                          AMDGPU::R600_TReg32RegClass.getRegister(ReservedIndex);
-      MFI->ReservedRegs.push_back(ReservedReg);
-      unsigned SuperReg =
+  case AMDGPU::RESERVE_REG: {
+    R600MachineFunctionInfo * MFI = MF->getInfo<R600MachineFunctionInfo>();
+    int64_t ReservedIndex = MI->getOperand(0).getImm();
+    unsigned ReservedReg =
+                         AMDGPU::R600_TReg32RegClass.getRegister(ReservedIndex);
+    MFI->ReservedRegs.push_back(ReservedReg);
+    unsigned SuperReg =
           AMDGPU::R600_Reg128RegClass.getRegister(ReservedIndex / 4);
-      MFI->ReservedRegs.push_back(SuperReg);
-      break;
-    }
+    MFI->ReservedRegs.push_back(SuperReg);
+    break;
+  }
+
+  case AMDGPU::TXD: {
+    unsigned T0 = MRI.createVirtualRegister(&AMDGPU::R600_Reg128RegClass);
+    unsigned T1 = MRI.createVirtualRegister(&AMDGPU::R600_Reg128RegClass);
+
+    BuildMI(*BB, I, BB->findDebugLoc(I), TII->get(AMDGPU::TEX_SET_GRADIENTS_H), T0)
+            .addOperand(MI->getOperand(3))
+            .addOperand(MI->getOperand(4))
+            .addOperand(MI->getOperand(5))
+            .addOperand(MI->getOperand(6));
+    BuildMI(*BB, I, BB->findDebugLoc(I), TII->get(AMDGPU::TEX_SET_GRADIENTS_V), T1)
+            .addOperand(MI->getOperand(2))
+            .addOperand(MI->getOperand(4))
+            .addOperand(MI->getOperand(5))
+            .addOperand(MI->getOperand(6));
+    BuildMI(*BB, I, BB->findDebugLoc(I), TII->get(AMDGPU::TEX_SAMPLE_G))
+            .addOperand(MI->getOperand(0))
+            .addOperand(MI->getOperand(1))
+            .addOperand(MI->getOperand(4))
+            .addOperand(MI->getOperand(5))
+            .addOperand(MI->getOperand(6))
+            .addReg(T0, RegState::Implicit)
+            .addReg(T1, RegState::Implicit);
+    break;
+  }
+
+  case AMDGPU::TXD_SHADOW: {
+    unsigned T0 = MRI.createVirtualRegister(&AMDGPU::R600_Reg128RegClass);
+    unsigned T1 = MRI.createVirtualRegister(&AMDGPU::R600_Reg128RegClass);
+
+    BuildMI(*BB, I, BB->findDebugLoc(I), TII->get(AMDGPU::TEX_SET_GRADIENTS_H), T0)
+            .addOperand(MI->getOperand(3))
+            .addOperand(MI->getOperand(4))
+            .addOperand(MI->getOperand(5))
+            .addOperand(MI->getOperand(6));
+    BuildMI(*BB, I, BB->findDebugLoc(I), TII->get(AMDGPU::TEX_SET_GRADIENTS_V), T1)
+            .addOperand(MI->getOperand(2))
+            .addOperand(MI->getOperand(4))
+            .addOperand(MI->getOperand(5))
+            .addOperand(MI->getOperand(6));
+    BuildMI(*BB, I, BB->findDebugLoc(I), TII->get(AMDGPU::TEX_SAMPLE_C_G))
+            .addOperand(MI->getOperand(0))
+            .addOperand(MI->getOperand(1))
+            .addOperand(MI->getOperand(4))
+            .addOperand(MI->getOperand(5))
+            .addOperand(MI->getOperand(6))
+            .addReg(T0, RegState::Implicit)
+            .addReg(T1, RegState::Implicit);
+    break;
+  }
 
-  case AMDGPU::TXD:
-    {
-      unsigned t0 = MRI.createVirtualRegister(&AMDGPU::R600_Reg128RegClass);
-      unsigned t1 = MRI.createVirtualRegister(&AMDGPU::R600_Reg128RegClass);
-
-      BuildMI(*BB, I, BB->findDebugLoc(I), TII->get(AMDGPU::TEX_SET_GRADIENTS_H), t0)
-              .addOperand(MI->getOperand(3))
-              .addOperand(MI->getOperand(4))
-              .addOperand(MI->getOperand(5))
-              .addOperand(MI->getOperand(6));
-      BuildMI(*BB, I, BB->findDebugLoc(I), TII->get(AMDGPU::TEX_SET_GRADIENTS_V), t1)
-              .addOperand(MI->getOperand(2))
-              .addOperand(MI->getOperand(4))
-              .addOperand(MI->getOperand(5))
-              .addOperand(MI->getOperand(6));
-      BuildMI(*BB, I, BB->findDebugLoc(I), TII->get(AMDGPU::TEX_SAMPLE_G))
-              .addOperand(MI->getOperand(0))
-              .addOperand(MI->getOperand(1))
-              .addOperand(MI->getOperand(4))
-              .addOperand(MI->getOperand(5))
-              .addOperand(MI->getOperand(6))
-              .addReg(t0, RegState::Implicit)
-              .addReg(t1, RegState::Implicit);
-      break;
-    }
-  case AMDGPU::TXD_SHADOW:
-    {
-      unsigned t0 = MRI.createVirtualRegister(&AMDGPU::R600_Reg128RegClass);
-      unsigned t1 = MRI.createVirtualRegister(&AMDGPU::R600_Reg128RegClass);
-
-      BuildMI(*BB, I, BB->findDebugLoc(I), TII->get(AMDGPU::TEX_SET_GRADIENTS_H), t0)
-              .addOperand(MI->getOperand(3))
-              .addOperand(MI->getOperand(4))
-              .addOperand(MI->getOperand(5))
-              .addOperand(MI->getOperand(6));
-      BuildMI(*BB, I, BB->findDebugLoc(I), TII->get(AMDGPU::TEX_SET_GRADIENTS_V), t1)
-              .addOperand(MI->getOperand(2))
-              .addOperand(MI->getOperand(4))
-              .addOperand(MI->getOperand(5))
-              .addOperand(MI->getOperand(6));
-      BuildMI(*BB, I, BB->findDebugLoc(I), TII->get(AMDGPU::TEX_SAMPLE_C_G))
-              .addOperand(MI->getOperand(0))
-              .addOperand(MI->getOperand(1))
-              .addOperand(MI->getOperand(4))
-              .addOperand(MI->getOperand(5))
-              .addOperand(MI->getOperand(6))
-              .addReg(t0, RegState::Implicit)
-              .addReg(t1, RegState::Implicit);
-      break;
-    }
   case AMDGPU::BRANCH:
       BuildMI(*BB, I, BB->findDebugLoc(I), TII->get(AMDGPU::JUMP))
               .addOperand(MI->getOperand(0))
               .addReg(0);
       break;
-  case AMDGPU::BRANCH_COND_f32:
-    {
-      MachineInstr *NewMI =
-        BuildMI(*BB, I, BB->findDebugLoc(I), TII->get(AMDGPU::PRED_X),
-                AMDGPU::PREDICATE_BIT)
-                .addOperand(MI->getOperand(1))
-                .addImm(OPCODE_IS_NOT_ZERO)
-                .addImm(0); // Flags
-      TII->addFlag(NewMI, 0, MO_FLAG_PUSH);
-      BuildMI(*BB, I, BB->findDebugLoc(I), TII->get(AMDGPU::JUMP))
-              .addOperand(MI->getOperand(0))
-              .addReg(AMDGPU::PREDICATE_BIT, RegState::Kill);
-      break;
-    }
-  case AMDGPU::BRANCH_COND_i32:
-    {
-      MachineInstr *NewMI =
-        BuildMI(*BB, I, BB->findDebugLoc(I), TII->get(AMDGPU::PRED_X),
+
+  case AMDGPU::BRANCH_COND_f32: {
+    MachineInstr *NewMI =
+      BuildMI(*BB, I, BB->findDebugLoc(I), TII->get(AMDGPU::PRED_X),
               AMDGPU::PREDICATE_BIT)
               .addOperand(MI->getOperand(1))
-              .addImm(OPCODE_IS_NOT_ZERO_INT)
+              .addImm(OPCODE_IS_NOT_ZERO)
               .addImm(0); // Flags
-      TII->addFlag(NewMI, 0, MO_FLAG_PUSH);
-      BuildMI(*BB, I, BB->findDebugLoc(I), TII->get(AMDGPU::JUMP))
-             .addOperand(MI->getOperand(0))
-              .addReg(AMDGPU::PREDICATE_BIT, RegState::Kill);
+    TII->addFlag(NewMI, 0, MO_FLAG_PUSH);
+    BuildMI(*BB, I, BB->findDebugLoc(I), TII->get(AMDGPU::JUMP))
+            .addOperand(MI->getOperand(0))
+            .addReg(AMDGPU::PREDICATE_BIT, RegState::Kill);
+    break;
+  }
+
+  case AMDGPU::BRANCH_COND_i32: {
+    MachineInstr *NewMI =
+      BuildMI(*BB, I, BB->findDebugLoc(I), TII->get(AMDGPU::PRED_X),
+            AMDGPU::PREDICATE_BIT)
+            .addOperand(MI->getOperand(1))
+            .addImm(OPCODE_IS_NOT_ZERO_INT)
+            .addImm(0); // Flags
+    TII->addFlag(NewMI, 0, MO_FLAG_PUSH);
+    BuildMI(*BB, I, BB->findDebugLoc(I), TII->get(AMDGPU::JUMP))
+           .addOperand(MI->getOperand(0))
+            .addReg(AMDGPU::PREDICATE_BIT, RegState::Kill);
+    break;
+  }
+
+  case AMDGPU::input_perspective: {
+    R600MachineFunctionInfo *MFI = MF->getInfo<R600MachineFunctionInfo>();
+
+    // XXX Be more fine about register reservation
+    for (unsigned i = 0; i < 4; i ++) {
+      unsigned ReservedReg = AMDGPU::R600_TReg32RegClass.getRegister(i);
+      MFI->ReservedRegs.push_back(ReservedReg);
+    }
+
+    switch (MI->getOperand(1).getImm()) {
+    case 0:// Perspective
+      MFI->HasPerspectiveInterpolation = true;
       break;
+    case 1:// Linear
+      MFI->HasLinearInterpolation = true;
+      break;
+    default:
+      assert(0 && "Unknow ij index");
     }
-  case AMDGPU::input_perspective:
-    {
-      R600MachineFunctionInfo *MFI = MF->getInfo<R600MachineFunctionInfo>();
-
-      // XXX Be more fine about register reservation
-      for (unsigned i = 0; i < 4; i ++) {
-        unsigned ReservedReg = AMDGPU::R600_TReg32RegClass.getRegister(i);
-        MFI->ReservedRegs.push_back(ReservedReg);
-      }
 
-      switch (MI->getOperand(1).getImm()) {
-      case 0:// Perspective
-        MFI->HasPerspectiveInterpolation = true;
-        break;
-      case 1:// Linear
-        MFI->HasLinearInterpolation = true;
-        break;
-      default:
-        assert(0 && "Unknow ij index");
-      }
+    return BB;
+  }
 
-      return BB;
-    }
   case AMDGPU::EG_Export:
-  case AMDGPU::R600_Export:
-    {
-      bool EOP = (llvm::next(I)->getOpcode() == AMDGPU::RETURN)? 1 : 0;
-      if (!EOP)
-        return BB;
-      unsigned CfInst = (MI->getOpcode() == AMDGPU::EG_Export)? 84 : 40;
-      BuildMI(*BB, I, BB->findDebugLoc(I), TII->get(MI->getOpcode()))
+  case AMDGPU::R600_Export: {
+    bool EOP = (llvm::next(I)->getOpcode() == AMDGPU::RETURN)? 1 : 0;
+    if (!EOP)
+      return BB;
+    unsigned CfInst = (MI->getOpcode() == AMDGPU::EG_Export)? 84 : 40;
+    BuildMI(*BB, I, BB->findDebugLoc(I), TII->get(MI->getOpcode()))
             .addOperand(MI->getOperand(0))
             .addOperand(MI->getOperand(1))
             .addOperand(MI->getOperand(2))
@@ -299,8 +293,8 @@ MachineBasicBlock * R600TargetLowering::EmitInstrWithCustomInserter(
             .addOperand(MI->getOperand(6))
             .addImm(CfInst)
             .addImm(1);
-      break;
-    }
+    break;
+  }
   }
 
   MI->eraseFromParent();
diff --git a/lib/Target/AMDGPU/R600ISelLowering.h b/lib/Target/AMDGPU/R600ISelLowering.h
index d1dfe9f..53e7988 100644
--- a/lib/Target/AMDGPU/R600ISelLowering.h
+++ b/lib/Target/AMDGPU/R600ISelLowering.h
@@ -20,8 +20,7 @@ namespace llvm {
 
 class R600InstrInfo;
 
-class R600TargetLowering : public AMDGPUTargetLowering
-{
+class R600TargetLowering : public AMDGPUTargetLowering {
 public:
   R600TargetLowering(TargetMachine &TM);
   virtual MachineBasicBlock * EmitInstrWithCustomInserter(MachineInstr *MI,
diff --git a/lib/Target/AMDGPU/R600InstrInfo.cpp b/lib/Target/AMDGPU/R600InstrInfo.cpp
index 814e0a2..b9ade1e 100644
--- a/lib/Target/AMDGPU/R600InstrInfo.cpp
+++ b/lib/Target/AMDGPU/R600InstrInfo.cpp
@@ -127,8 +127,7 @@ bool R600InstrInfo::isCubeOp(unsigned Opcode) const {
   }
 }
 
-bool R600InstrInfo::isALUInstr(unsigned Opcode) const
-{
+bool R600InstrInfo::isALUInstr(unsigned Opcode) const {
   unsigned TargetFlags = get(Opcode).TSFlags;
 
   return ((TargetFlags & R600_InstFlag::OP1) |
diff --git a/lib/Target/AMDGPU/R600Instructions.td b/lib/Target/AMDGPU/R600Instructions.td
index 35c0b6a..39b49f6 100644
--- a/lib/Target/AMDGPU/R600Instructions.td
+++ b/lib/Target/AMDGPU/R600Instructions.td
@@ -48,8 +48,7 @@ class InstR600 <bits<11> inst, dag outs, dag ins, string asm, list<dag> pattern,
 }
 
 class InstR600ISA <dag outs, dag ins, string asm, list<dag> pattern> :
-    AMDGPUInst <outs, ins, asm, pattern>
-{
+    AMDGPUInst <outs, ins, asm, pattern> {
   field bits<64> Inst;
 
   let Namespace = "AMDGPU";
@@ -344,8 +343,7 @@ def TEX_SHADOW : PatLeaf<
 
 class EG_CF_RAT <bits <8> cf_inst, bits <6> rat_inst, bits<4> rat_id, dag outs,
                  dag ins, string asm, list<dag> pattern> :
-    InstR600ISA <outs, ins, asm, pattern>
-{
+    InstR600ISA <outs, ins, asm, pattern> {
   bits<7>  RW_GPR;
   bits<7>  INDEX_GPR;
 
@@ -410,8 +408,8 @@ def isCayman : Predicate<"Subtarget.device()"
                             "->getDeviceFlag() == OCL_DEVICE_CAYMAN">;
 def isEGorCayman : Predicate<"Subtarget.device()"
                             "->getGeneration() == AMDGPUDeviceInfo::HD5XXX"
-			    "|| Subtarget.device()->getGeneration() =="
-			    "AMDGPUDeviceInfo::HD6XXX">;
+                            "|| Subtarget.device()->getGeneration() =="
+                            "AMDGPUDeviceInfo::HD6XXX">;
 
 def isR600toCayman : Predicate<
                      "Subtarget.device()->getGeneration() <= AMDGPUDeviceInfo::HD6XXX">;
@@ -444,13 +442,11 @@ def input_constant :  AMDGPUShaderInst <
 
 
 
-def INTERP_XY : R600_2OP <0xD6, "INTERP_XY", []>
-{
+def INTERP_XY : R600_2OP <0xD6, "INTERP_XY", []> {
   let bank_swizzle = 5;
 }
 
-def INTERP_ZW : R600_2OP <0xD7, "INTERP_ZW", []>
-{
+def INTERP_ZW : R600_2OP <0xD7, "INTERP_ZW", []> {
   let bank_swizzle = 5;
 }
 
@@ -499,8 +495,7 @@ class ExportInst : InstR600ISA<(
     i32imm:$sw_x, i32imm:$sw_y, i32imm:$sw_z, i32imm:$sw_w, i32imm:$inst,
     i32imm:$eop),
     !strconcat("EXPORT", " $src"),
-    []>
-{
+    []> {
   bits<13> arraybase;
   bits<2> type;
   bits<7> src;
@@ -638,8 +633,8 @@ def SETGT_INT : R600_2OP <
 >;
 
 def SETGE_INT : R600_2OP <
-	0x3C, "SETGE_INT",
-	[(set (i32 R600_Reg32:$dst),
+  0x3C, "SETGE_INT",
+  [(set (i32 R600_Reg32:$dst),
    (selectcc (i32 R600_Reg32:$src0), R600_Reg32:$src1, -1, 0, SETGE))]
 >;
 
@@ -667,7 +662,7 @@ def PRED_SETGE_INT : R600_2OP <0x44, "PRED_SETGE_INT", []>;
 def PRED_SETNE_INT : R600_2OP <0x45, "PRED_SETNE_INT", []>;
 
 def CNDE_INT : R600_3OP <
-	0x1C, "CNDE_INT",
+  0x1C, "CNDE_INT",
   [(set (i32 R600_Reg32:$dst),
    (selectcc (i32 R600_Reg32:$src0), 0,
        (i32 R600_Reg32:$src1), (i32 R600_Reg32:$src2),
@@ -675,7 +670,7 @@ def CNDE_INT : R600_3OP <
 >;
 
 def CNDGE_INT : R600_3OP <
-	0x1E, "CNDGE_INT",
+  0x1E, "CNDGE_INT",
   [(set (i32 R600_Reg32:$dst),
    (selectcc (i32 R600_Reg32:$src0), 0,
        (i32 R600_Reg32:$src1), (i32 R600_Reg32:$src2),
@@ -683,7 +678,7 @@ def CNDGE_INT : R600_3OP <
 >;
 
 def CNDGT_INT : R600_3OP <
-	0x1D, "CNDGT_INT",
+  0x1D, "CNDGT_INT",
   [(set (i32 R600_Reg32:$dst),
    (selectcc (i32 R600_Reg32:$src0), 0,
        (i32 R600_Reg32:$src1), (i32 R600_Reg32:$src2),
@@ -966,8 +961,7 @@ let Predicates = [isR600] in {
   defm DIV_r600 : DIV_Common<RECIP_IEEE_r600>;
   def TGSI_LIT_Z_r600 : TGSI_LIT_Z_Common<MUL_LIT_r600, LOG_CLAMPED_r600, EXP_IEEE_r600>;
 
-  def R600_Export : ExportInst
-  {
+  def R600_Export : ExportInst {
     let Inst{52-49} = 1; // BURST_COUNT
     let Inst{53} = eop;
     let Inst{54} = 1; // VALID_PIXEL_MODE
@@ -1109,8 +1103,7 @@ let Predicates = [isEGorCayman] in {
   def : Pat<(fp_to_uint R600_Reg32:$src0),
     (FLT_TO_UINT_eg (TRUNC R600_Reg32:$src0))>;
 
-  def EG_Export : ExportInst
-  {
+  def EG_Export : ExportInst {
     let Inst{51-48} = 1; // BURST_COUNT
     let Inst{52} = 1; // VALID_PIXEL_MODE
     let Inst{53} = eop;
@@ -1128,8 +1121,7 @@ let Predicates = [isEGorCayman] in {
 let usesCustomInserter = 1 in {
 
 class RAT_WRITE_CACHELESS_eg <dag ins, bits<4> comp_mask, string name> : EG_CF_RAT <
-  0x57, 0x2, 0, (outs), ins, !strconcat(name, " $rw_gpr, $index_gpr, $eop"), []>
-{
+  0x57, 0x2, 0, (outs), ins, !strconcat(name, " $rw_gpr, $index_gpr, $eop"), []> {
   let RIM         = 0;
   // XXX: Have a separate instruction for non-indexed writes.
   let TYPE        = 1;
diff --git a/lib/Target/AMDGPU/R600MachineFunctionInfo.cpp b/lib/Target/AMDGPU/R600MachineFunctionInfo.cpp
index c65b347..d710e3a 100644
--- a/lib/Target/AMDGPU/R600MachineFunctionInfo.cpp
+++ b/lib/Target/AMDGPU/R600MachineFunctionInfo.cpp
@@ -14,8 +14,7 @@ using namespace llvm;
 R600MachineFunctionInfo::R600MachineFunctionInfo(const MachineFunction &MF)
   : MachineFunctionInfo(),
     HasLinearInterpolation(false),
-    HasPerspectiveInterpolation(false)
-  {
+    HasPerspectiveInterpolation(false) {
     memset(Outputs, 0, sizeof(Outputs));
   }
 
diff --git a/lib/Target/AMDGPU/R600RegisterInfo.h b/lib/Target/AMDGPU/R600RegisterInfo.h
index e6ac2aa..9188b93 100644
--- a/lib/Target/AMDGPU/R600RegisterInfo.h
+++ b/lib/Target/AMDGPU/R600RegisterInfo.h
@@ -22,8 +22,7 @@ namespace llvm {
 class R600TargetMachine;
 class TargetInstrInfo;
 
-struct R600RegisterInfo : public AMDGPURegisterInfo
-{
+struct R600RegisterInfo : public AMDGPURegisterInfo {
   AMDGPUTargetMachine &TM;
   const TargetInstrInfo &TII;
 
diff --git a/lib/Target/AMDGPU/SIFixSGPRLiveness.cpp b/lib/Target/AMDGPU/SIFixSGPRLiveness.cpp
index 56db55c..de3a060 100644
--- a/lib/Target/AMDGPU/SIFixSGPRLiveness.cpp
+++ b/lib/Target/AMDGPU/SIFixSGPRLiveness.cpp
@@ -32,8 +32,7 @@ private:
   MachineDominatorTree *MD;
   MachinePostDominatorTree *MPD;
 
-  bool isSGPR(const TargetRegisterClass *RegClass)
-  {
+  bool isSGPR(const TargetRegisterClass *RegClass) {
     return RegClass == &AMDGPU::SReg_1RegClass ||
            RegClass == &AMDGPU::SReg_32RegClass ||
            RegClass == &AMDGPU::SReg_64RegClass ||
diff --git a/lib/Target/AMDGPU/SIISelLowering.cpp b/lib/Target/AMDGPU/SIISelLowering.cpp
index d24db1e..889eeb4 100644
--- a/lib/Target/AMDGPU/SIISelLowering.cpp
+++ b/lib/Target/AMDGPU/SIISelLowering.cpp
@@ -179,7 +179,7 @@ void SITargetLowering::LowerSI_INTERP(MachineInstr *MI, MachineBasicBlock &BB,
           .addOperand(iReg)
           .addOperand(attr_chan)
           .addOperand(attr)
-	  .addReg(M0);
+          .addReg(M0);
 
   BuildMI(BB, I, BB.findDebugLoc(I), TII->get(AMDGPU::V_INTERP_P2_F32))
           .addOperand(dst)
@@ -187,7 +187,7 @@ void SITargetLowering::LowerSI_INTERP(MachineInstr *MI, MachineBasicBlock &BB,
           .addOperand(jReg)
           .addOperand(attr_chan)
           .addOperand(attr)
-	  .addReg(M0);
+          .addReg(M0);
 
   MI->eraseFromParent();
 }
@@ -208,7 +208,7 @@ void SITargetLowering::LowerSI_INTERP_CONST(MachineInstr *MI,
           .addOperand(dst)
           .addOperand(attr_chan)
           .addOperand(attr)
-	  .addReg(M0);
+          .addReg(M0);
 
   MI->eraseFromParent();
 }
@@ -229,8 +229,8 @@ void SITargetLowering::LowerSI_V_CNDLT(MachineInstr *MI, MachineBasicBlock &BB,
   unsigned VCC = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass);
 
   BuildMI(BB, I, BB.findDebugLoc(I),
-	  TII->get(AMDGPU::V_CMP_GT_F32_e32),
-	  VCC)
+          TII->get(AMDGPU::V_CMP_GT_F32_e32),
+          VCC)
           .addReg(AMDGPU::SREG_LIT_0)
           .addOperand(MI->getOperand(1));
 
diff --git a/lib/Target/AMDGPU/SIISelLowering.h b/lib/Target/AMDGPU/SIISelLowering.h
index 4407bf0..c82af86 100644
--- a/lib/Target/AMDGPU/SIISelLowering.h
+++ b/lib/Target/AMDGPU/SIISelLowering.h
@@ -19,8 +19,7 @@
 
 namespace llvm {
 
-class SITargetLowering : public AMDGPUTargetLowering
-{
+class SITargetLowering : public AMDGPUTargetLowering {
   const SIInstrInfo * TII;
 
   /// AppendS_WAITCNT - Memory reads and writes are syncronized using the
diff --git a/lib/Target/AMDGPU/SIInstrInfo.cpp b/lib/Target/AMDGPU/SIInstrInfo.cpp
index 60e7be4..3c773a0 100644
--- a/lib/Target/AMDGPU/SIInstrInfo.cpp
+++ b/lib/Target/AMDGPU/SIInstrInfo.cpp
@@ -47,7 +47,7 @@ SIInstrInfo::copyPhysReg(MachineBasicBlock &MBB,
             .addReg(SrcReg, getKillRegState(KillSrc));
   } else if (AMDGPU::VReg_32RegClass.contains(DestReg)) {
     assert(AMDGPU::VReg_32RegClass.contains(SrcReg) ||
-	   AMDGPU::SReg_32RegClass.contains(SrcReg));
+           AMDGPU::SReg_32RegClass.contains(SrcReg));
     BuildMI(MBB, MI, DL, get(AMDGPU::V_MOV_B32_e32), DestReg)
             .addReg(SrcReg, getKillRegState(KillSrc));
   } else {
diff --git a/lib/Target/AMDGPU/SIInstrInfo.td b/lib/Target/AMDGPU/SIInstrInfo.td
index ea8a33f..873a451 100644
--- a/lib/Target/AMDGPU/SIInstrInfo.td
+++ b/lib/Target/AMDGPU/SIInstrInfo.td
@@ -562,7 +562,7 @@ class MTBUF_Store_Helper <bits<3> op, string asm, RegisterClass regClass> : MTBU
 }
 
 multiclass SMRD_Helper <bits<5> op, string asm, RegisterClass dstClass,
-			ValueType vt> {
+                        ValueType vt> {
   def _IMM : SMRD <
               op,
               (outs dstClass:$dst),
diff --git a/lib/Target/AMDGPU/SIInstructions.td b/lib/Target/AMDGPU/SIInstructions.td
index d6f71f6..a2123a7 100644
--- a/lib/Target/AMDGPU/SIInstructions.td
+++ b/lib/Target/AMDGPU/SIInstructions.td
@@ -1058,10 +1058,10 @@ def LOAD_CONST : AMDGPUShaderInst <
 let usesCustomInserter = 1 in {
 
 def SI_V_CNDLT : InstSI <
-	(outs VReg_32:$dst),
-	(ins VReg_32:$src0, VReg_32:$src1, VReg_32:$src2),
-	"SI_V_CNDLT $dst, $src0, $src1, $src2",
-	[(set VReg_32:$dst, (int_AMDGPU_cndlt VReg_32:$src0, VReg_32:$src1, VReg_32:$src2))]
+  (outs VReg_32:$dst),
+  (ins VReg_32:$src0, VReg_32:$src1, VReg_32:$src2),
+  "SI_V_CNDLT $dst, $src0, $src1, $src2",
+  [(set VReg_32:$dst, (int_AMDGPU_cndlt VReg_32:$src0, VReg_32:$src1, VReg_32:$src2))]
 >;
 
 def SI_INTERP : InstSI <
@@ -1080,17 +1080,17 @@ def SI_INTERP_CONST : InstSI <
 >;
 
 def SI_KIL : InstSI <
-	(outs),
-	(ins VReg_32:$src),
-	"SI_KIL $src",
-	[(int_AMDGPU_kill VReg_32:$src)]
+  (outs),
+  (ins VReg_32:$src),
+  "SI_KIL $src",
+  [(int_AMDGPU_kill VReg_32:$src)]
 >;
 
 def SI_WQM : InstSI <
-	(outs),
-	(ins),
-	"SI_WQM",
-	[(int_SI_wqm)]
+  (outs),
+  (ins),
+  "SI_WQM",
+  [(int_SI_wqm)]
 >;
 
 } // end usesCustomInserter 
diff --git a/lib/Target/AMDGPU/SILowerControlFlow.cpp b/lib/Target/AMDGPU/SILowerControlFlow.cpp
index f91e3ea..5e7c3fa 100644
--- a/lib/Target/AMDGPU/SILowerControlFlow.cpp
+++ b/lib/Target/AMDGPU/SILowerControlFlow.cpp
@@ -146,8 +146,8 @@ bool SILowerControlFlowPass::runOnMachineFunction(MachineFunction &MF) {
                   .addReg(Reg);
           freeReg(Reg);
 
-	  if (MF.getInfo<SIMachineFunctionInfo>()->ShaderType == ShaderType::PIXEL &&
-	      PredicateStack.empty()) {
+          if (MF.getInfo<SIMachineFunctionInfo>()->ShaderType == ShaderType::PIXEL &&
+              PredicateStack.empty()) {
             // If the exec mask is non-zero, skip the next two instructions
             BuildMI(MBB, I, MBB.findDebugLoc(I), TII->get(AMDGPU::S_CBRANCH_EXECNZ))
                     .addImm(3)
@@ -167,7 +167,7 @@ bool SILowerControlFlowPass::runOnMachineFunction(MachineFunction &MF) {
 
             // ... and terminate wavefront
             BuildMI(MBB, I, MBB.findDebugLoc(I), TII->get(AMDGPU::S_ENDPGM));
-	  }
+          }
           MI.eraseFromParent();
           break;
       }
diff --git a/lib/Target/AMDGPU/SIMachineFunctionInfo.h b/lib/Target/AMDGPU/SIMachineFunctionInfo.h
index 68097b4..26902fa 100644
--- a/lib/Target/AMDGPU/SIMachineFunctionInfo.h
+++ b/lib/Target/AMDGPU/SIMachineFunctionInfo.h
@@ -14,8 +14,8 @@
 //===----------------------------------------------------------------------===//
 
 
-#ifndef _SIMACHINEFUNCTIONINFO_H_
-#define _SIMACHINEFUNCTIONINFO_H_
+#ifndef SIMACHINEFUNCTIONINFO_H_
+#define SIMACHINEFUNCTIONINFO_H_
 
 #include "llvm/CodeGen/MachineFunction.h"
 
diff --git a/lib/Target/AMDGPU/SIRegisterInfo.h b/lib/Target/AMDGPU/SIRegisterInfo.h
index b571da9..e70ab95 100644
--- a/lib/Target/AMDGPU/SIRegisterInfo.h
+++ b/lib/Target/AMDGPU/SIRegisterInfo.h
@@ -22,8 +22,7 @@ namespace llvm {
 class AMDGPUTargetMachine;
 class TargetInstrInfo;
 
-struct SIRegisterInfo : public AMDGPURegisterInfo
-{
+struct SIRegisterInfo : public AMDGPURegisterInfo {
   AMDGPUTargetMachine &TM;
   const TargetInstrInfo &TII;
 
-- 
1.7.11.4



More information about the mesa-dev mailing list