[Mesa-dev] [PATCH 1/5] AMDGPU: Coding style - put braces on same line as function headers

Tom Stellard tom at stellard.net
Wed Nov 28 14:50:07 PST 2012


From: Tom Stellard <thomas.stellard at amd.com>

---
 lib/Target/AMDGPU/AMDGPUConvertToISA.cpp      |   3 +-
 lib/Target/AMDGPU/AMDGPUISelLowering.cpp      |  33 +++------
 lib/Target/AMDGPU/AMDGPUInstrInfo.cpp         |   6 +-
 lib/Target/AMDGPU/AMDGPUSubtarget.cpp         |  24 ++----
 lib/Target/AMDGPU/AMDGPUTargetMachine.cpp     |   6 +-
 lib/Target/AMDGPU/AMDIL7XXDevice.cpp          |  45 ++++--------
 lib/Target/AMDGPU/AMDILCFGStructurizer.cpp    |  45 ++++--------
 lib/Target/AMDGPU/AMDILDevice.cpp             |  42 ++++-------
 lib/Target/AMDGPU/AMDILDeviceInfo.cpp         |   3 +-
 lib/Target/AMDGPU/AMDILEvergreenDevice.cpp    |   3 +-
 lib/Target/AMDGPU/AMDILFrameLowering.cpp      |  18 ++---
 lib/Target/AMDGPU/AMDILISelDAGToDAG.cpp       |   9 +--
 lib/Target/AMDGPU/AMDILISelLowering.cpp       |  51 +++++--------
 lib/Target/AMDGPU/AMDILIntrinsicInfo.cpp      |  15 ++--
 lib/Target/AMDGPU/AMDILNIDevice.cpp           |  21 ++----
 lib/Target/AMDGPU/AMDILPeepholeOptimizer.cpp  |  78 +++++++-------------
 lib/Target/AMDGPU/AMDILSIDevice.cpp           |  15 ++--
 lib/Target/AMDGPU/R600ExpandSpecialInstrs.cpp |   6 +-
 lib/Target/AMDGPU/R600ISelLowering.cpp        |  45 ++++--------
 lib/Target/AMDGPU/R600InstrInfo.cpp           | 102 +++++++++-----------------
 lib/Target/AMDGPU/R600MachineFunctionInfo.cpp |   6 +-
 lib/Target/AMDGPU/R600RegisterInfo.cpp        |  15 ++--
 lib/Target/AMDGPU/SIAssignInterpRegs.cpp      |   6 +-
 lib/Target/AMDGPU/SIFixSGPRLiveness.cpp       |  24 ++----
 lib/Target/AMDGPU/SIISelLowering.cpp          |  45 ++++--------
 lib/Target/AMDGPU/SIInstrInfo.cpp             |  12 +--
 lib/Target/AMDGPU/SIRegisterInfo.cpp          |   9 +--
 27 files changed, 229 insertions(+), 458 deletions(-)

diff --git a/lib/Target/AMDGPU/AMDGPUConvertToISA.cpp b/lib/Target/AMDGPU/AMDGPUConvertToISA.cpp
index fbca0a7..dd5f786 100644
--- a/lib/Target/AMDGPU/AMDGPUConvertToISA.cpp
+++ b/lib/Target/AMDGPU/AMDGPUConvertToISA.cpp
@@ -44,8 +44,7 @@ FunctionPass *llvm::createAMDGPUConvertToISAPass(TargetMachine &tm) {
   return new AMDGPUConvertToISAPass(tm);
 }
 
-bool AMDGPUConvertToISAPass::runOnMachineFunction(MachineFunction &MF)
-{
+bool AMDGPUConvertToISAPass::runOnMachineFunction(MachineFunction &MF) {
   const AMDGPUInstrInfo * TII =
                       static_cast<const AMDGPUInstrInfo*>(TM.getInstrInfo());
 
diff --git a/lib/Target/AMDGPU/AMDGPUISelLowering.cpp b/lib/Target/AMDGPU/AMDGPUISelLowering.cpp
index 57dcaac..d815ef8 100644
--- a/lib/Target/AMDGPU/AMDGPUISelLowering.cpp
+++ b/lib/Target/AMDGPU/AMDGPUISelLowering.cpp
@@ -21,8 +21,7 @@
 using namespace llvm;
 
 AMDGPUTargetLowering::AMDGPUTargetLowering(TargetMachine &TM) :
-  TargetLowering(TM, new TargetLoweringObjectFileELF())
-{
+  TargetLowering(TM, new TargetLoweringObjectFileELF()) {
 
   // Initialize target lowering borrowed from AMDIL
   InitAMDILLowering();
@@ -55,8 +54,7 @@ SDValue AMDGPUTargetLowering::LowerFormalArguments(
                                       bool isVarArg,
                                       const SmallVectorImpl<ISD::InputArg> &Ins,
                                       DebugLoc DL, SelectionDAG &DAG,
-                                      SmallVectorImpl<SDValue> &InVals) const
-{
+                                      SmallVectorImpl<SDValue> &InVals) const {
   for (unsigned i = 0, e = Ins.size(); i < e; ++i) {
     InVals.push_back(SDValue());
   }
@@ -69,8 +67,7 @@ SDValue AMDGPUTargetLowering::LowerReturn(
                                      bool isVarArg,
                                      const SmallVectorImpl<ISD::OutputArg> &Outs,
                                      const SmallVectorImpl<SDValue> &OutVals,
-                                     DebugLoc DL, SelectionDAG &DAG) const
-{
+                                     DebugLoc DL, SelectionDAG &DAG) const {
   return DAG.getNode(AMDGPUISD::RET_FLAG, DL, MVT::Other, Chain);
 }
 
@@ -79,8 +76,7 @@ SDValue AMDGPUTargetLowering::LowerReturn(
 //===---------------------------------------------------------------------===//
 
 SDValue AMDGPUTargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG)
-    const
-{
+    const {
   switch (Op.getOpcode()) {
   default:
     Op.getNode()->dump();
@@ -100,8 +96,7 @@ SDValue AMDGPUTargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG)
 }
 
 SDValue AMDGPUTargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op,
-    SelectionDAG &DAG) const
-{
+    SelectionDAG &DAG) const {
   unsigned IntrinsicID = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
   DebugLoc DL = Op.getDebugLoc();
   EVT VT = Op.getValueType();
@@ -144,8 +139,7 @@ SDValue AMDGPUTargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op,
 
 ///IABS(a) = SMAX(sub(0, a), a)
 SDValue AMDGPUTargetLowering::LowerIntrinsicIABS(SDValue Op,
-    SelectionDAG &DAG) const
-{
+    SelectionDAG &DAG) const {
 
   DebugLoc DL = Op.getDebugLoc();
   EVT VT = Op.getValueType();
@@ -158,8 +152,7 @@ SDValue AMDGPUTargetLowering::LowerIntrinsicIABS(SDValue Op,
 /// Linear Interpolation
 /// LRP(a, b, c) = muladd(a,  b, (1 - a) * c)
 SDValue AMDGPUTargetLowering::LowerIntrinsicLRP(SDValue Op,
-    SelectionDAG &DAG) const
-{
+    SelectionDAG &DAG) const {
   DebugLoc DL = Op.getDebugLoc();
   EVT VT = Op.getValueType();
   SDValue OneSubA = DAG.getNode(ISD::FSUB, DL, VT,
@@ -175,8 +168,7 @@ SDValue AMDGPUTargetLowering::LowerIntrinsicLRP(SDValue Op,
 
 
 SDValue AMDGPUTargetLowering::LowerUDIVREM(SDValue Op,
-    SelectionDAG &DAG) const
-{
+    SelectionDAG &DAG) const {
   DebugLoc DL = Op.getDebugLoc();
   EVT VT = Op.getValueType();
 
@@ -284,8 +276,7 @@ SDValue AMDGPUTargetLowering::LowerUDIVREM(SDValue Op,
 // Helper functions
 //===----------------------------------------------------------------------===//
 
-bool AMDGPUTargetLowering::isHWTrueValue(SDValue Op) const
-{
+bool AMDGPUTargetLowering::isHWTrueValue(SDValue Op) const {
   if (ConstantFPSDNode * CFP = dyn_cast<ConstantFPSDNode>(Op)) {
     return CFP->isExactlyValue(1.0);
   }
@@ -295,8 +286,7 @@ bool AMDGPUTargetLowering::isHWTrueValue(SDValue Op) const
   return false;
 }
 
-bool AMDGPUTargetLowering::isHWFalseValue(SDValue Op) const
-{
+bool AMDGPUTargetLowering::isHWFalseValue(SDValue Op) const {
   if (ConstantFPSDNode * CFP = dyn_cast<ConstantFPSDNode>(Op)) {
     return CFP->getValueAPF().isZero();
   }
@@ -323,8 +313,7 @@ SDValue AMDGPUTargetLowering::CreateLiveInRegister(SelectionDAG &DAG,
 
 #define NODE_NAME_CASE(node) case AMDGPUISD::node: return #node;
 
-const char* AMDGPUTargetLowering::getTargetNodeName(unsigned Opcode) const
-{
+const char* AMDGPUTargetLowering::getTargetNodeName(unsigned Opcode) const {
   switch (Opcode) {
   default: return 0;
   // AMDIL DAG nodes
diff --git a/lib/Target/AMDGPU/AMDGPUInstrInfo.cpp b/lib/Target/AMDGPU/AMDGPUInstrInfo.cpp
index 9aae09a..22b0794 100644
--- a/lib/Target/AMDGPU/AMDGPUInstrInfo.cpp
+++ b/lib/Target/AMDGPU/AMDGPUInstrInfo.cpp
@@ -155,8 +155,7 @@ AMDGPUInstrInfo::foldMemoryOperandImpl(MachineFunction &MF,
 }
 bool
 AMDGPUInstrInfo::canFoldMemoryOperand(const MachineInstr *MI,
-                                     const SmallVectorImpl<unsigned> &Ops) const
-{
+                                     const SmallVectorImpl<unsigned> &Ops) const {
   // TODO: Implement this function
   return false;
 }
@@ -236,8 +235,7 @@ AMDGPUInstrInfo::isSafeToMoveRegClassDefs(const TargetRegisterClass *RC) const {
 }
  
 void AMDGPUInstrInfo::convertToISA(MachineInstr & MI, MachineFunction &MF,
-    DebugLoc DL) const
-{
+    DebugLoc DL) const {
   MachineRegisterInfo &MRI = MF.getRegInfo();
   const AMDGPURegisterInfo & RI = getRegisterInfo();
 
diff --git a/lib/Target/AMDGPU/AMDGPUSubtarget.cpp b/lib/Target/AMDGPU/AMDGPUSubtarget.cpp
index d4a70b6..cc8f961 100644
--- a/lib/Target/AMDGPU/AMDGPUSubtarget.cpp
+++ b/lib/Target/AMDGPU/AMDGPUSubtarget.cpp
@@ -37,31 +37,26 @@ AMDGPUSubtarget::AMDGPUSubtarget(StringRef TT, StringRef CPU, StringRef FS) :
   mDevice = AMDGPUDeviceInfo::getDeviceFromName(mDevName, this, mIs64bit);
 }
 
-AMDGPUSubtarget::~AMDGPUSubtarget()
-{
+AMDGPUSubtarget::~AMDGPUSubtarget() {
   delete mDevice;
 }
 
 bool
-AMDGPUSubtarget::isOverride(AMDGPUDeviceInfo::Caps caps) const
-{
+AMDGPUSubtarget::isOverride(AMDGPUDeviceInfo::Caps caps) const {
   assert(caps < AMDGPUDeviceInfo::MaxNumberCapabilities &&
       "Caps index is out of bounds!");
   return CapsOverride[caps];
 }
 bool
-AMDGPUSubtarget::is64bit() const 
-{
+AMDGPUSubtarget::is64bit() const  {
   return mIs64bit;
 }
 bool
-AMDGPUSubtarget::isTargetELF() const
-{
+AMDGPUSubtarget::isTargetELF() const {
   return false;
 }
 size_t
-AMDGPUSubtarget::getDefaultSize(uint32_t dim) const
-{
+AMDGPUSubtarget::getDefaultSize(uint32_t dim) const {
   if (dim > 3) {
     return 1;
   } else {
@@ -70,8 +65,7 @@ AMDGPUSubtarget::getDefaultSize(uint32_t dim) const
 }
 
 std::string
-AMDGPUSubtarget::getDataLayout() const
-{
+AMDGPUSubtarget::getDataLayout() const {
     if (!mDevice) {
         return std::string("e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16"
                 "-i32:32:32-i64:64:64-f32:32:32-f64:64:64-f80:32:32"
@@ -83,12 +77,10 @@ AMDGPUSubtarget::getDataLayout() const
 }
 
 std::string
-AMDGPUSubtarget::getDeviceName() const
-{
+AMDGPUSubtarget::getDeviceName() const {
   return mDevName;
 }
 const AMDGPUDevice *
-AMDGPUSubtarget::device() const
-{
+AMDGPUSubtarget::device() const {
   return mDevice;
 }
diff --git a/lib/Target/AMDGPU/AMDGPUTargetMachine.cpp b/lib/Target/AMDGPU/AMDGPUTargetMachine.cpp
index 5c4af91..113b7bf 100644
--- a/lib/Target/AMDGPU/AMDGPUTargetMachine.cpp
+++ b/lib/Target/AMDGPU/AMDGPUTargetMachine.cpp
@@ -65,8 +65,7 @@ AMDGPUTargetMachine::AMDGPUTargetMachine(const Target &T, StringRef TT,
   }
 }
 
-AMDGPUTargetMachine::~AMDGPUTargetMachine()
-{
+AMDGPUTargetMachine::~AMDGPUTargetMachine() {
 }
 
 namespace {
@@ -93,8 +92,7 @@ TargetPassConfig *AMDGPUTargetMachine::createPassConfig(PassManagerBase &PM) {
 }
 
 bool
-AMDGPUPassConfig::addPreISel()
-{
+AMDGPUPassConfig::addPreISel() {
   return false;
 }
 
diff --git a/lib/Target/AMDGPU/AMDIL7XXDevice.cpp b/lib/Target/AMDGPU/AMDIL7XXDevice.cpp
index 8561f0b..854d690 100644
--- a/lib/Target/AMDGPU/AMDIL7XXDevice.cpp
+++ b/lib/Target/AMDGPU/AMDIL7XXDevice.cpp
@@ -12,8 +12,7 @@
 
 using namespace llvm;
 
-AMDGPU7XXDevice::AMDGPU7XXDevice(AMDGPUSubtarget *ST) : AMDGPUDevice(ST)
-{
+AMDGPU7XXDevice::AMDGPU7XXDevice(AMDGPUSubtarget *ST) : AMDGPUDevice(ST) {
   setCaps();
   std::string name = mSTM->getDeviceName();
   if (name == "rv710") {
@@ -25,35 +24,29 @@ AMDGPU7XXDevice::AMDGPU7XXDevice(AMDGPUSubtarget *ST) : AMDGPUDevice(ST)
   }
 }
 
-AMDGPU7XXDevice::~AMDGPU7XXDevice()
-{
+AMDGPU7XXDevice::~AMDGPU7XXDevice() {
 }
 
-void AMDGPU7XXDevice::setCaps()
-{
+void AMDGPU7XXDevice::setCaps() {
   mSWBits.set(AMDGPUDeviceInfo::LocalMem);
 }
 
-size_t AMDGPU7XXDevice::getMaxLDSSize() const
-{
+size_t AMDGPU7XXDevice::getMaxLDSSize() const {
   if (usesHardware(AMDGPUDeviceInfo::LocalMem)) {
     return MAX_LDS_SIZE_700;
   }
   return 0;
 }
 
-size_t AMDGPU7XXDevice::getWavefrontSize() const
-{
+size_t AMDGPU7XXDevice::getWavefrontSize() const {
   return AMDGPUDevice::HalfWavefrontSize;
 }
 
-uint32_t AMDGPU7XXDevice::getGeneration() const
-{
+uint32_t AMDGPU7XXDevice::getGeneration() const {
   return AMDGPUDeviceInfo::HD4XXX;
 }
 
-uint32_t AMDGPU7XXDevice::getResourceID(uint32_t DeviceID) const
-{
+uint32_t AMDGPU7XXDevice::getResourceID(uint32_t DeviceID) const {
   switch (DeviceID) {
   default:
     assert(0 && "ID type passed in is unknown!");
@@ -84,22 +77,18 @@ uint32_t AMDGPU7XXDevice::getResourceID(uint32_t DeviceID) const
   return 0;
 }
 
-uint32_t AMDGPU7XXDevice::getMaxNumUAVs() const
-{
+uint32_t AMDGPU7XXDevice::getMaxNumUAVs() const {
   return 1;
 }
 
-AMDGPU770Device::AMDGPU770Device(AMDGPUSubtarget *ST): AMDGPU7XXDevice(ST)
-{
+AMDGPU770Device::AMDGPU770Device(AMDGPUSubtarget *ST): AMDGPU7XXDevice(ST) {
   setCaps();
 }
 
-AMDGPU770Device::~AMDGPU770Device()
-{
+AMDGPU770Device::~AMDGPU770Device() {
 }
 
-void AMDGPU770Device::setCaps()
-{
+void AMDGPU770Device::setCaps() {
   if (mSTM->isOverride(AMDGPUDeviceInfo::DoubleOps)) {
     mSWBits.set(AMDGPUDeviceInfo::FMA);
     mHWBits.set(AMDGPUDeviceInfo::DoubleOps);
@@ -110,20 +99,16 @@ void AMDGPU770Device::setCaps()
   mSWBits.set(AMDGPUDeviceInfo::LocalMem);
 }
 
-size_t AMDGPU770Device::getWavefrontSize() const
-{
+size_t AMDGPU770Device::getWavefrontSize() const {
   return AMDGPUDevice::WavefrontSize;
 }
 
-AMDGPU710Device::AMDGPU710Device(AMDGPUSubtarget *ST) : AMDGPU7XXDevice(ST)
-{
+AMDGPU710Device::AMDGPU710Device(AMDGPUSubtarget *ST) : AMDGPU7XXDevice(ST) {
 }
 
-AMDGPU710Device::~AMDGPU710Device()
-{
+AMDGPU710Device::~AMDGPU710Device() {
 }
 
-size_t AMDGPU710Device::getWavefrontSize() const
-{
+size_t AMDGPU710Device::getWavefrontSize() const {
   return AMDGPUDevice::QuarterWavefrontSize;
 }
diff --git a/lib/Target/AMDGPU/AMDILCFGStructurizer.cpp b/lib/Target/AMDGPU/AMDILCFGStructurizer.cpp
index 01a5d89..e3d1990 100644
--- a/lib/Target/AMDGPU/AMDILCFGStructurizer.cpp
+++ b/lib/Target/AMDGPU/AMDILCFGStructurizer.cpp
@@ -58,8 +58,7 @@ STATISTIC(numClonedInstr,           "CFGStructurizer cloned instructions");
 // Miscellaneous utility for CFGStructurizer.
 //
 //===----------------------------------------------------------------------===//
-namespace llvmCFGStruct
-{
+namespace llvmCFGStruct {
 #define SHOWNEWINSTR(i) \
   if (DEBUGME) errs() << "New instr: " << *i << "\n"
 
@@ -108,8 +107,7 @@ void ReverseVector(SmallVector<NodeT *, DEFAULT_VEC_SLOTS> &Src) {
 //
 //===----------------------------------------------------------------------===//
 
-namespace llvmCFGStruct
-{
+namespace llvmCFGStruct {
 template<class PassT>
 struct CFGStructTraits {
 };
@@ -153,12 +151,10 @@ public:
 //
 //===----------------------------------------------------------------------===//
 
-namespace llvmCFGStruct
-{
+namespace llvmCFGStruct {
 // bixia TODO: port it to BasicBlock, not just MachineBasicBlock.
 template<class PassT>
-class  CFGStructurizer
-{
+class  CFGStructurizer {
 public:
   typedef enum {
     Not_SinglePath = 0,
@@ -1570,8 +1566,7 @@ void CFGStructurizer<PassT>::mergeLooplandBlock(BlockT *dstBlk,
 } //mergeLooplandBlock
 
 template<class PassT>
-void CFGStructurizer<PassT>::reversePredicateSetter(typename BlockT::iterator I)
-{
+void CFGStructurizer<PassT>::reversePredicateSetter(typename BlockT::iterator I) {
   while (I--) {
     if (I->getOpcode() == AMDGPU::PRED_X) {
       switch (static_cast<MachineInstr *>(I)->getOperand(2).getImm()) {
@@ -2549,10 +2544,8 @@ CFGStructurizer<PassT>::findNearestCommonPostDom
 
 using namespace llvmCFGStruct;
 
-namespace llvm
-{
-class AMDGPUCFGStructurizer : public MachineFunctionPass
-{
+namespace llvm {
+class AMDGPUCFGStructurizer : public MachineFunctionPass {
 public:
   typedef MachineInstr              InstructionType;
   typedef MachineFunction           FunctionType;
@@ -2598,10 +2591,8 @@ const TargetInstrInfo *AMDGPUCFGStructurizer::getTargetInstrInfo() const {
 
 using namespace llvmCFGStruct;
 
-namespace llvm
-{
-class AMDGPUCFGPrepare : public AMDGPUCFGStructurizer
-{
+namespace llvm {
+class AMDGPUCFGPrepare : public AMDGPUCFGStructurizer {
 public:
   static char ID;
 
@@ -2621,8 +2612,7 @@ char AMDGPUCFGPrepare::ID = 0;
 } //end of namespace llvm
 
 AMDGPUCFGPrepare::AMDGPUCFGPrepare(TargetMachine &tm)
-  : AMDGPUCFGStructurizer(ID, tm ) 
-{
+  : AMDGPUCFGStructurizer(ID, tm )  {
 }
 const char *AMDGPUCFGPrepare::getPassName() const {
   return "AMD IL Control Flow Graph Preparation Pass";
@@ -2645,10 +2635,8 @@ void AMDGPUCFGPrepare::getAnalysisUsage(AnalysisUsage &AU) const {
 
 using namespace llvmCFGStruct;
 
-namespace llvm
-{
-class AMDGPUCFGPerform : public AMDGPUCFGStructurizer
-{
+namespace llvm {
+class AMDGPUCFGPerform : public AMDGPUCFGStructurizer {
 public:
   static char ID;
 
@@ -2666,8 +2654,7 @@ char AMDGPUCFGPerform::ID = 0;
 } //end of namespace llvm
 
   AMDGPUCFGPerform::AMDGPUCFGPerform(TargetMachine &tm)
-: AMDGPUCFGStructurizer(ID, tm)
-{
+: AMDGPUCFGStructurizer(ID, tm) {
 }
 
 const char *AMDGPUCFGPerform::getPassName() const {
@@ -2688,12 +2675,10 @@ void AMDGPUCFGPerform::getAnalysisUsage(AnalysisUsage &AU) const {
 //
 //===----------------------------------------------------------------------===//
 
-namespace llvmCFGStruct
-{
+namespace llvmCFGStruct {
 // this class is tailor to the AMDGPU backend
 template<>
-struct CFGStructTraits<AMDGPUCFGStructurizer>
-{
+struct CFGStructTraits<AMDGPUCFGStructurizer> {
   typedef int RegiT;
 
   static int getBreakNzeroOpcode(int oldOpcode) {
diff --git a/lib/Target/AMDGPU/AMDILDevice.cpp b/lib/Target/AMDGPU/AMDILDevice.cpp
index 3955828..3ff62a8 100644
--- a/lib/Target/AMDGPU/AMDILDevice.cpp
+++ b/lib/Target/AMDGPU/AMDILDevice.cpp
@@ -11,33 +11,28 @@
 
 using namespace llvm;
 // Default implementation for all of the classes.
-AMDGPUDevice::AMDGPUDevice(AMDGPUSubtarget *ST) : mSTM(ST)
-{
+AMDGPUDevice::AMDGPUDevice(AMDGPUSubtarget *ST) : mSTM(ST) {
   mHWBits.resize(AMDGPUDeviceInfo::MaxNumberCapabilities);
   mSWBits.resize(AMDGPUDeviceInfo::MaxNumberCapabilities);
   setCaps();
   mDeviceFlag = OCL_DEVICE_ALL;
 }
 
-AMDGPUDevice::~AMDGPUDevice()
-{
+AMDGPUDevice::~AMDGPUDevice() {
     mHWBits.clear();
     mSWBits.clear();
 }
 
-size_t AMDGPUDevice::getMaxGDSSize() const
-{
+size_t AMDGPUDevice::getMaxGDSSize() const {
   return 0;
 }
 
 uint32_t 
-AMDGPUDevice::getDeviceFlag() const
-{
+AMDGPUDevice::getDeviceFlag() const {
   return mDeviceFlag;
 }
 
-size_t AMDGPUDevice::getMaxNumCBs() const
-{
+size_t AMDGPUDevice::getMaxNumCBs() const {
   if (usesHardware(AMDGPUDeviceInfo::ConstantMem)) {
     return HW_MAX_NUM_CB;
   }
@@ -45,8 +40,7 @@ size_t AMDGPUDevice::getMaxNumCBs() const
   return 0;
 }
 
-size_t AMDGPUDevice::getMaxCBSize() const
-{
+size_t AMDGPUDevice::getMaxCBSize() const {
   if (usesHardware(AMDGPUDeviceInfo::ConstantMem)) {
     return MAX_CB_SIZE;
   }
@@ -54,18 +48,15 @@ size_t AMDGPUDevice::getMaxCBSize() const
   return 0;
 }
 
-size_t AMDGPUDevice::getMaxScratchSize() const
-{
+size_t AMDGPUDevice::getMaxScratchSize() const {
   return 65536;
 }
 
-uint32_t AMDGPUDevice::getStackAlignment() const
-{
+uint32_t AMDGPUDevice::getStackAlignment() const {
   return 16;
 }
 
-void AMDGPUDevice::setCaps()
-{
+void AMDGPUDevice::setCaps() {
   mSWBits.set(AMDGPUDeviceInfo::HalfOps);
   mSWBits.set(AMDGPUDeviceInfo::ByteOps);
   mSWBits.set(AMDGPUDeviceInfo::ShortOps);
@@ -94,8 +85,7 @@ void AMDGPUDevice::setCaps()
 }
 
 AMDGPUDeviceInfo::ExecutionMode
-AMDGPUDevice::getExecutionMode(AMDGPUDeviceInfo::Caps Caps) const
-{
+AMDGPUDevice::getExecutionMode(AMDGPUDeviceInfo::Caps Caps) const {
   if (mHWBits[Caps]) {
     assert(!mSWBits[Caps] && "Cannot set both SW and HW caps");
     return AMDGPUDeviceInfo::Hardware;
@@ -110,24 +100,20 @@ AMDGPUDevice::getExecutionMode(AMDGPUDeviceInfo::Caps Caps) const
 
 }
 
-bool AMDGPUDevice::isSupported(AMDGPUDeviceInfo::Caps Mode) const
-{
+bool AMDGPUDevice::isSupported(AMDGPUDeviceInfo::Caps Mode) const {
   return getExecutionMode(Mode) != AMDGPUDeviceInfo::Unsupported;
 }
 
-bool AMDGPUDevice::usesHardware(AMDGPUDeviceInfo::Caps Mode) const
-{
+bool AMDGPUDevice::usesHardware(AMDGPUDeviceInfo::Caps Mode) const {
   return getExecutionMode(Mode) == AMDGPUDeviceInfo::Hardware;
 }
 
-bool AMDGPUDevice::usesSoftware(AMDGPUDeviceInfo::Caps Mode) const
-{
+bool AMDGPUDevice::usesSoftware(AMDGPUDeviceInfo::Caps Mode) const {
   return getExecutionMode(Mode) == AMDGPUDeviceInfo::Software;
 }
 
 std::string
-AMDGPUDevice::getDataLayout() const
-{
+AMDGPUDevice::getDataLayout() const {
     return std::string("e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16"
       "-i32:32:32-i64:64:64-f32:32:32-f64:64:64-f80:32:32"
       "-v16:16:16-v24:32:32-v32:32:32-v48:64:64-v64:64:64"
diff --git a/lib/Target/AMDGPU/AMDILDeviceInfo.cpp b/lib/Target/AMDGPU/AMDILDeviceInfo.cpp
index b2f7cfb..e110ddd 100644
--- a/lib/Target/AMDGPU/AMDILDeviceInfo.cpp
+++ b/lib/Target/AMDGPU/AMDILDeviceInfo.cpp
@@ -18,8 +18,7 @@ namespace llvm {
 namespace AMDGPUDeviceInfo {
     AMDGPUDevice*
 getDeviceFromName(const std::string &deviceName, AMDGPUSubtarget *ptr,
-                  bool is64bit, bool is64on32bit)
-{
+                  bool is64bit, bool is64on32bit) {
     if (deviceName.c_str()[2] == '7') {
         switch (deviceName.c_str()[3]) {
             case '1':
diff --git a/lib/Target/AMDGPU/AMDILEvergreenDevice.cpp b/lib/Target/AMDGPU/AMDILEvergreenDevice.cpp
index 3532a28..28e6e84 100644
--- a/lib/Target/AMDGPU/AMDILEvergreenDevice.cpp
+++ b/lib/Target/AMDGPU/AMDILEvergreenDevice.cpp
@@ -156,8 +156,7 @@ AMDGPURedwoodDevice::AMDGPURedwoodDevice(AMDGPUSubtarget *ST)
   setCaps();
 }
 
-AMDGPURedwoodDevice::~AMDGPURedwoodDevice()
-{
+AMDGPURedwoodDevice::~AMDGPURedwoodDevice() {
 }
 
 void AMDGPURedwoodDevice::setCaps() {
diff --git a/lib/Target/AMDGPU/AMDILFrameLowering.cpp b/lib/Target/AMDGPU/AMDILFrameLowering.cpp
index f2a0fe5..7410108 100644
--- a/lib/Target/AMDGPU/AMDILFrameLowering.cpp
+++ b/lib/Target/AMDGPU/AMDILFrameLowering.cpp
@@ -16,12 +16,10 @@
 using namespace llvm;
 AMDGPUFrameLowering::AMDGPUFrameLowering(StackDirection D, unsigned StackAl,
     int LAO, unsigned TransAl)
-  : TargetFrameLowering(D, StackAl, LAO, TransAl)
-{
+  : TargetFrameLowering(D, StackAl, LAO, TransAl) {
 }
 
-AMDGPUFrameLowering::~AMDGPUFrameLowering()
-{
+AMDGPUFrameLowering::~AMDGPUFrameLowering() {
 }
 
 /// getFrameIndexOffset - Returns the displacement from the frame register to
@@ -33,21 +31,17 @@ int AMDGPUFrameLowering::getFrameIndexOffset(const MachineFunction &MF,
 }
 
 const TargetFrameLowering::SpillSlot *
-AMDGPUFrameLowering::getCalleeSavedSpillSlots(unsigned &NumEntries) const
-{
+AMDGPUFrameLowering::getCalleeSavedSpillSlots(unsigned &NumEntries) const {
   NumEntries = 0;
   return 0;
 }
 void
-AMDGPUFrameLowering::emitPrologue(MachineFunction &MF) const
-{
+AMDGPUFrameLowering::emitPrologue(MachineFunction &MF) const {
 }
 void
-AMDGPUFrameLowering::emitEpilogue(MachineFunction &MF, MachineBasicBlock &MBB) const
-{
+AMDGPUFrameLowering::emitEpilogue(MachineFunction &MF, MachineBasicBlock &MBB) const {
 }
 bool
-AMDGPUFrameLowering::hasFP(const MachineFunction &MF) const
-{
+AMDGPUFrameLowering::hasFP(const MachineFunction &MF) const {
   return false;
 }
diff --git a/lib/Target/AMDGPU/AMDILISelDAGToDAG.cpp b/lib/Target/AMDGPU/AMDILISelDAGToDAG.cpp
index 8071131..fd8bc2b 100644
--- a/lib/Target/AMDGPU/AMDILISelDAGToDAG.cpp
+++ b/lib/Target/AMDGPU/AMDILISelDAGToDAG.cpp
@@ -86,8 +86,7 @@ FunctionPass *llvm::createAMDGPUISelDag(TargetMachine &TM
 
 AMDGPUDAGToDAGISel::AMDGPUDAGToDAGISel(TargetMachine &TM
                                      )
-  : SelectionDAGISel(TM), Subtarget(TM.getSubtarget<AMDGPUSubtarget>())
-{
+  : SelectionDAGISel(TM), Subtarget(TM.getSubtarget<AMDGPUSubtarget>()) {
 }
 
 AMDGPUDAGToDAGISel::~AMDGPUDAGToDAGISel() {
@@ -181,8 +180,7 @@ bool AMDGPUDAGToDAGISel::checkType(const Value *ptr, unsigned int addrspace) {
   return dyn_cast<PointerType>(ptrType)->getAddressSpace() == addrspace;
 }
 
-const Value * AMDGPUDAGToDAGISel::getBasePointerValue(const Value *V)
-{
+const Value * AMDGPUDAGToDAGISel::getBasePointerValue(const Value *V) {
   if (!V) {
     return NULL;
   }
@@ -353,8 +351,7 @@ bool AMDGPUDAGToDAGISel::SelectADDR8BitOffset(SDValue Addr, SDValue& Base,
 }
 
 bool AMDGPUDAGToDAGISel::SelectADDRVTX_READ(SDValue Addr, SDValue &Base,
-                                           SDValue &Offset)
-{
+                                           SDValue &Offset) {
   ConstantSDNode * IMMOffset;
 
   if (Addr.getOpcode() == ISD::ADD
diff --git a/lib/Target/AMDGPU/AMDILISelLowering.cpp b/lib/Target/AMDGPU/AMDILISelLowering.cpp
index ce7a5e8..29e679c 100644
--- a/lib/Target/AMDGPU/AMDILISelLowering.cpp
+++ b/lib/Target/AMDGPU/AMDILISelLowering.cpp
@@ -44,8 +44,7 @@ using namespace llvm;
 //===----------------------------------------------------------------------===//
 // TargetLowering Class Implementation Begins
 //===----------------------------------------------------------------------===//
-void AMDGPUTargetLowering::InitAMDILLowering()
-{
+void AMDGPUTargetLowering::InitAMDILLowering() {
   int types[] =
   {
     (int)MVT::i8,
@@ -239,15 +238,13 @@ void AMDGPUTargetLowering::InitAMDILLowering()
 
 bool
 AMDGPUTargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info,
-    const CallInst &I, unsigned Intrinsic) const
-{
+    const CallInst &I, unsigned Intrinsic) const {
   return false;
 }
 
 // The backend supports 32 and 64 bit floating point immediates
 bool
-AMDGPUTargetLowering::isFPImmLegal(const APFloat &Imm, EVT VT) const
-{
+AMDGPUTargetLowering::isFPImmLegal(const APFloat &Imm, EVT VT) const {
   if (VT.getScalarType().getSimpleVT().SimpleTy == MVT::f32
       || VT.getScalarType().getSimpleVT().SimpleTy == MVT::f64) {
     return true;
@@ -257,8 +254,7 @@ AMDGPUTargetLowering::isFPImmLegal(const APFloat &Imm, EVT VT) const
 }
 
 bool
-AMDGPUTargetLowering::ShouldShrinkFPConstant(EVT VT) const
-{
+AMDGPUTargetLowering::ShouldShrinkFPConstant(EVT VT) const {
   if (VT.getScalarType().getSimpleVT().SimpleTy == MVT::f32
       || VT.getScalarType().getSimpleVT().SimpleTy == MVT::f64) {
     return false;
@@ -278,8 +274,7 @@ AMDGPUTargetLowering::computeMaskedBitsForTargetNode(
     APInt &KnownZero,
     APInt &KnownOne,
     const SelectionDAG &DAG,
-    unsigned Depth) const
-{
+    unsigned Depth) const {
   APInt KnownZero2;
   APInt KnownOne2;
   KnownZero = KnownOne = APInt(KnownOne.getBitWidth(), 0); // Don't know anything
@@ -313,8 +308,7 @@ AMDGPUTargetLowering::computeMaskedBitsForTargetNode(
 //===----------------------------------------------------------------------===//
 
 SDValue
-AMDGPUTargetLowering::LowerSDIV(SDValue Op, SelectionDAG &DAG) const
-{
+AMDGPUTargetLowering::LowerSDIV(SDValue Op, SelectionDAG &DAG) const {
   EVT OVT = Op.getValueType();
   SDValue DST;
   if (OVT.getScalarType() == MVT::i64) {
@@ -331,8 +325,7 @@ AMDGPUTargetLowering::LowerSDIV(SDValue Op, SelectionDAG &DAG) const
 }
 
 SDValue
-AMDGPUTargetLowering::LowerSREM(SDValue Op, SelectionDAG &DAG) const
-{
+AMDGPUTargetLowering::LowerSREM(SDValue Op, SelectionDAG &DAG) const {
   EVT OVT = Op.getValueType();
   SDValue DST;
   if (OVT.getScalarType() == MVT::i64) {
@@ -350,8 +343,7 @@ AMDGPUTargetLowering::LowerSREM(SDValue Op, SelectionDAG &DAG) const
 }
 
 SDValue
-AMDGPUTargetLowering::LowerSIGN_EXTEND_INREG(SDValue Op, SelectionDAG &DAG) const
-{
+AMDGPUTargetLowering::LowerSIGN_EXTEND_INREG(SDValue Op, SelectionDAG &DAG) const {
   SDValue Data = Op.getOperand(0);
   VTSDNode *BaseType = cast<VTSDNode>(Op.getOperand(1));
   DebugLoc DL = Op.getDebugLoc();
@@ -381,8 +373,7 @@ AMDGPUTargetLowering::LowerSIGN_EXTEND_INREG(SDValue Op, SelectionDAG &DAG) cons
   return Data;
 }
 EVT
-AMDGPUTargetLowering::genIntType(uint32_t size, uint32_t numEle) const
-{
+AMDGPUTargetLowering::genIntType(uint32_t size, uint32_t numEle) const {
   int iSize = (size * numEle);
   int vEle = (iSize >> ((size == 64) ? 6 : 5));
   if (!vEle) {
@@ -404,8 +395,7 @@ AMDGPUTargetLowering::genIntType(uint32_t size, uint32_t numEle) const
 }
 
 SDValue
-AMDGPUTargetLowering::LowerBRCOND(SDValue Op, SelectionDAG &DAG) const
-{
+AMDGPUTargetLowering::LowerBRCOND(SDValue Op, SelectionDAG &DAG) const {
   SDValue Chain = Op.getOperand(0);
   SDValue Cond  = Op.getOperand(1);
   SDValue Jump  = Op.getOperand(2);
@@ -419,8 +409,7 @@ AMDGPUTargetLowering::LowerBRCOND(SDValue Op, SelectionDAG &DAG) const
 }
 
 SDValue
-AMDGPUTargetLowering::LowerSDIV24(SDValue Op, SelectionDAG &DAG) const
-{
+AMDGPUTargetLowering::LowerSDIV24(SDValue Op, SelectionDAG &DAG) const {
   DebugLoc DL = Op.getDebugLoc();
   EVT OVT = Op.getValueType();
   SDValue LHS = Op.getOperand(0);
@@ -500,8 +489,7 @@ AMDGPUTargetLowering::LowerSDIV24(SDValue Op, SelectionDAG &DAG) const
 }
 
 SDValue
-AMDGPUTargetLowering::LowerSDIV32(SDValue Op, SelectionDAG &DAG) const
-{
+AMDGPUTargetLowering::LowerSDIV32(SDValue Op, SelectionDAG &DAG) const {
   DebugLoc DL = Op.getDebugLoc();
   EVT OVT = Op.getValueType();
   SDValue LHS = Op.getOperand(0);
@@ -567,14 +555,12 @@ AMDGPUTargetLowering::LowerSDIV32(SDValue Op, SelectionDAG &DAG) const
 }
 
 SDValue
-AMDGPUTargetLowering::LowerSDIV64(SDValue Op, SelectionDAG &DAG) const
-{
+AMDGPUTargetLowering::LowerSDIV64(SDValue Op, SelectionDAG &DAG) const {
   return SDValue(Op.getNode(), 0);
 }
 
 SDValue
-AMDGPUTargetLowering::LowerSREM8(SDValue Op, SelectionDAG &DAG) const
-{
+AMDGPUTargetLowering::LowerSREM8(SDValue Op, SelectionDAG &DAG) const {
   DebugLoc DL = Op.getDebugLoc();
   EVT OVT = Op.getValueType();
   MVT INTTY = MVT::i32;
@@ -591,8 +577,7 @@ AMDGPUTargetLowering::LowerSREM8(SDValue Op, SelectionDAG &DAG) const
 }
 
 SDValue
-AMDGPUTargetLowering::LowerSREM16(SDValue Op, SelectionDAG &DAG) const
-{
+AMDGPUTargetLowering::LowerSREM16(SDValue Op, SelectionDAG &DAG) const {
   DebugLoc DL = Op.getDebugLoc();
   EVT OVT = Op.getValueType();
   MVT INTTY = MVT::i32;
@@ -609,8 +594,7 @@ AMDGPUTargetLowering::LowerSREM16(SDValue Op, SelectionDAG &DAG) const
 }
 
 SDValue
-AMDGPUTargetLowering::LowerSREM32(SDValue Op, SelectionDAG &DAG) const
-{
+AMDGPUTargetLowering::LowerSREM32(SDValue Op, SelectionDAG &DAG) const {
   DebugLoc DL = Op.getDebugLoc();
   EVT OVT = Op.getValueType();
   SDValue LHS = Op.getOperand(0);
@@ -672,7 +656,6 @@ AMDGPUTargetLowering::LowerSREM32(SDValue Op, SelectionDAG &DAG) const
 }
 
 SDValue
-AMDGPUTargetLowering::LowerSREM64(SDValue Op, SelectionDAG &DAG) const
-{
+AMDGPUTargetLowering::LowerSREM64(SDValue Op, SelectionDAG &DAG) const {
   return SDValue(Op.getNode(), 0);
 }
diff --git a/lib/Target/AMDGPU/AMDILIntrinsicInfo.cpp b/lib/Target/AMDGPU/AMDILIntrinsicInfo.cpp
index 8485c50..75bdd37 100644
--- a/lib/Target/AMDGPU/AMDILIntrinsicInfo.cpp
+++ b/lib/Target/AMDGPU/AMDILIntrinsicInfo.cpp
@@ -25,14 +25,12 @@ using namespace llvm;
 #undef GET_LLVM_INTRINSIC_FOR_GCC_BUILTIN
 
 AMDGPUIntrinsicInfo::AMDGPUIntrinsicInfo(TargetMachine *tm) 
-  : TargetIntrinsicInfo()
-{
+  : TargetIntrinsicInfo() {
 }
 
 std::string 
 AMDGPUIntrinsicInfo::getName(unsigned int IntrID, Type **Tys,
-    unsigned int numTys) const 
-{
+    unsigned int numTys) const  {
   static const char* const names[] = {
 #define GET_INTRINSIC_NAME_TABLE
 #include "AMDGPUGenIntrinsics.inc"
@@ -52,8 +50,7 @@ AMDGPUIntrinsicInfo::getName(unsigned int IntrID, Type **Tys,
 }
 
 unsigned int
-AMDGPUIntrinsicInfo::lookupName(const char *Name, unsigned int Len) const 
-{
+AMDGPUIntrinsicInfo::lookupName(const char *Name, unsigned int Len) const  {
 #define GET_FUNCTION_RECOGNIZER
 #include "AMDGPUGenIntrinsics.inc"
 #undef GET_FUNCTION_RECOGNIZER
@@ -68,8 +65,7 @@ AMDGPUIntrinsicInfo::lookupName(const char *Name, unsigned int Len) const
 }
 
 bool 
-AMDGPUIntrinsicInfo::isOverloaded(unsigned id) const 
-{
+AMDGPUIntrinsicInfo::isOverloaded(unsigned id) const  {
   // Overload Table
 #define GET_INTRINSIC_OVERLOAD_TABLE
 #include "AMDGPUGenIntrinsics.inc"
@@ -79,7 +75,6 @@ AMDGPUIntrinsicInfo::isOverloaded(unsigned id) const
 Function*
 AMDGPUIntrinsicInfo::getDeclaration(Module *M, unsigned IntrID,
     Type **Tys,
-    unsigned numTys) const 
-{
+    unsigned numTys) const  {
   assert(!"Not implemented");
 }
diff --git a/lib/Target/AMDGPU/AMDILNIDevice.cpp b/lib/Target/AMDGPU/AMDILNIDevice.cpp
index 0ebbc9d..a903f44 100644
--- a/lib/Target/AMDGPU/AMDILNIDevice.cpp
+++ b/lib/Target/AMDGPU/AMDILNIDevice.cpp
@@ -13,8 +13,7 @@
 using namespace llvm;
 
 AMDGPUNIDevice::AMDGPUNIDevice(AMDGPUSubtarget *ST)
-  : AMDGPUEvergreenDevice(ST)
-{
+  : AMDGPUEvergreenDevice(ST) {
   std::string name = ST->getDeviceName();
   if (name == "caicos") {
     mDeviceFlag = OCL_DEVICE_CAICOS;
@@ -26,13 +25,11 @@ AMDGPUNIDevice::AMDGPUNIDevice(AMDGPUSubtarget *ST)
     mDeviceFlag = OCL_DEVICE_BARTS;
   }
 }
-AMDGPUNIDevice::~AMDGPUNIDevice()
-{
+AMDGPUNIDevice::~AMDGPUNIDevice() {
 }
 
 size_t
-AMDGPUNIDevice::getMaxLDSSize() const
-{
+AMDGPUNIDevice::getMaxLDSSize() const {
   if (usesHardware(AMDGPUDeviceInfo::LocalMem)) {
     return MAX_LDS_SIZE_900;
   } else {
@@ -41,25 +38,21 @@ AMDGPUNIDevice::getMaxLDSSize() const
 }
 
 uint32_t
-AMDGPUNIDevice::getGeneration() const
-{
+AMDGPUNIDevice::getGeneration() const {
   return AMDGPUDeviceInfo::HD6XXX;
 }
 
 
 AMDGPUCaymanDevice::AMDGPUCaymanDevice(AMDGPUSubtarget *ST)
-  : AMDGPUNIDevice(ST)
-{
+  : AMDGPUNIDevice(ST) {
   setCaps();
 }
 
-AMDGPUCaymanDevice::~AMDGPUCaymanDevice()
-{
+AMDGPUCaymanDevice::~AMDGPUCaymanDevice() {
 }
 
 void
-AMDGPUCaymanDevice::setCaps()
-{
+AMDGPUCaymanDevice::setCaps() {
   if (mSTM->isOverride(AMDGPUDeviceInfo::DoubleOps)) {
     mHWBits.set(AMDGPUDeviceInfo::DoubleOps);
     mHWBits.set(AMDGPUDeviceInfo::FMA);
diff --git a/lib/Target/AMDGPU/AMDILPeepholeOptimizer.cpp b/lib/Target/AMDGPU/AMDILPeepholeOptimizer.cpp
index 758ed34..f4611f6 100644
--- a/lib/Target/AMDGPU/AMDILPeepholeOptimizer.cpp
+++ b/lib/Target/AMDGPU/AMDILPeepholeOptimizer.cpp
@@ -142,8 +142,7 @@ private:
 // function with a pointer to the current iterator.
 template<class InputIterator, class SecondIterator, class Function>
 Function safeNestedForEach(InputIterator First, InputIterator Last,
-                              SecondIterator S, Function F)
-{
+                              SecondIterator S, Function F) {
   for ( ; First != Last; ++First) {
     SecondIterator sf, sl;
     for (sf = First->begin(), sl = First->end();
@@ -167,26 +166,22 @@ namespace llvm {
 } // llvm namespace
 
 AMDGPUPeepholeOpt::AMDGPUPeepholeOpt(TargetMachine &tm)
-  : FunctionPass(ID), TM(tm) 
-{
+  : FunctionPass(ID), TM(tm)  {
   mDebug = DEBUGME;
   optLevel = TM.getOptLevel();
 
 }
 
-AMDGPUPeepholeOpt::~AMDGPUPeepholeOpt() 
-{
+AMDGPUPeepholeOpt::~AMDGPUPeepholeOpt()  {
 }
 
 const char *
-AMDGPUPeepholeOpt::getPassName() const 
-{
+AMDGPUPeepholeOpt::getPassName() const  {
   return "AMDGPU PeepHole Optimization Pass";
 }
 
 bool 
-containsPointerType(Type *Ty) 
-{
+containsPointerType(Type *Ty)  {
   if (!Ty) {
     return false;
   }
@@ -214,8 +209,7 @@ containsPointerType(Type *Ty)
 }
 
 bool 
-AMDGPUPeepholeOpt::dumpAllIntoArena(Function &F) 
-{
+AMDGPUPeepholeOpt::dumpAllIntoArena(Function &F)  {
   bool dumpAll = false;
   for (Function::const_arg_iterator cab = F.arg_begin(),
        cae = F.arg_end(); cab != cae; ++cab) {
@@ -240,8 +234,7 @@ AMDGPUPeepholeOpt::dumpAllIntoArena(Function &F)
   return dumpAll;
 }
 void
-AMDGPUPeepholeOpt::doIsConstCallConversionIfNeeded()
-{
+AMDGPUPeepholeOpt::doIsConstCallConversionIfNeeded() {
   if (isConstVec.empty()) {
     return;
   }
@@ -257,8 +250,7 @@ AMDGPUPeepholeOpt::doIsConstCallConversionIfNeeded()
   isConstVec.clear();
 }
 void 
-AMDGPUPeepholeOpt::doAtomicConversionIfNeeded(Function &F) 
-{
+AMDGPUPeepholeOpt::doAtomicConversionIfNeeded(Function &F)  {
   // Don't do anything if we don't have any atomic operations.
   if (atomicFuncs.empty()) {
     return;
@@ -278,8 +270,7 @@ AMDGPUPeepholeOpt::doAtomicConversionIfNeeded(Function &F)
 }
 
 bool 
-AMDGPUPeepholeOpt::runOnFunction(Function &MF) 
-{
+AMDGPUPeepholeOpt::runOnFunction(Function &MF)  {
   mChanged = false;
   mF = &MF;
   mSTM = &TM.getSubtarget<AMDGPUSubtarget>();
@@ -302,8 +293,7 @@ AMDGPUPeepholeOpt::runOnFunction(Function &MF)
 }
 
 bool 
-AMDGPUPeepholeOpt::optimizeCallInst(BasicBlock::iterator *bbb) 
-{
+AMDGPUPeepholeOpt::optimizeCallInst(BasicBlock::iterator *bbb)  {
   Instruction *inst = (*bbb);
   CallInst *CI = dyn_cast<CallInst>(inst);
   if (!CI) {
@@ -397,8 +387,7 @@ bool
 AMDGPUPeepholeOpt::setupBitInsert(Instruction *base, 
     Instruction *&src, 
     Constant *&mask, 
-    Constant *&shift)
-{
+    Constant *&shift) {
   if (!base) {
     if (mDebug) {
       dbgs() << "Null pointer passed into function.\n";
@@ -447,8 +436,7 @@ AMDGPUPeepholeOpt::setupBitInsert(Instruction *base,
   return true;
 }
 bool
-AMDGPUPeepholeOpt::optimizeBitInsert(Instruction *inst) 
-{
+AMDGPUPeepholeOpt::optimizeBitInsert(Instruction *inst)  {
   if (!inst) {
     return false;
   }
@@ -687,8 +675,7 @@ AMDGPUPeepholeOpt::optimizeBitInsert(Instruction *inst)
 }
 
 bool 
-AMDGPUPeepholeOpt::optimizeBitExtract(Instruction *inst) 
-{
+AMDGPUPeepholeOpt::optimizeBitExtract(Instruction *inst)  {
   if (!inst) {
     return false;
   }
@@ -846,8 +833,7 @@ AMDGPUPeepholeOpt::optimizeBitExtract(Instruction *inst)
 }
 
 bool
-AMDGPUPeepholeOpt::expandBFI(CallInst *CI)
-{
+AMDGPUPeepholeOpt::expandBFI(CallInst *CI) {
   if (!CI) {
     return false;
   }
@@ -885,8 +871,7 @@ AMDGPUPeepholeOpt::expandBFI(CallInst *CI)
 }
 
 bool
-AMDGPUPeepholeOpt::expandBFM(CallInst *CI)
-{
+AMDGPUPeepholeOpt::expandBFM(CallInst *CI) {
   if (!CI) {
     return false;
   }
@@ -929,8 +914,7 @@ AMDGPUPeepholeOpt::expandBFM(CallInst *CI)
 }
 
 bool
-AMDGPUPeepholeOpt::instLevelOptimizations(BasicBlock::iterator *bbb) 
-{
+AMDGPUPeepholeOpt::instLevelOptimizations(BasicBlock::iterator *bbb)  {
   Instruction *inst = (*bbb);
   if (optimizeCallInst(bbb)) {
     return true;
@@ -947,8 +931,7 @@ AMDGPUPeepholeOpt::instLevelOptimizations(BasicBlock::iterator *bbb)
   return false;
 }
 bool
-AMDGPUPeepholeOpt::correctMisalignedMemOp(Instruction *inst)
-{
+AMDGPUPeepholeOpt::correctMisalignedMemOp(Instruction *inst) {
   LoadInst *linst = dyn_cast<LoadInst>(inst);
   StoreInst *sinst = dyn_cast<StoreInst>(inst);
   unsigned alignment;
@@ -981,8 +964,7 @@ AMDGPUPeepholeOpt::correctMisalignedMemOp(Instruction *inst)
   return false;
 }
 bool 
-AMDGPUPeepholeOpt::isSigned24BitOps(CallInst *CI) 
-{
+AMDGPUPeepholeOpt::isSigned24BitOps(CallInst *CI)  {
   if (!CI) {
     return false;
   }
@@ -999,8 +981,7 @@ AMDGPUPeepholeOpt::isSigned24BitOps(CallInst *CI)
 }
 
 void 
-AMDGPUPeepholeOpt::expandSigned24BitOps(CallInst *CI) 
-{
+AMDGPUPeepholeOpt::expandSigned24BitOps(CallInst *CI)  {
   assert(isSigned24BitOps(CI) && "Must be a "
       "signed 24 bit operation to call this function!");
   Value *LHS = CI->getOperand(CI->getNumOperands()-1);
@@ -1071,16 +1052,14 @@ AMDGPUPeepholeOpt::expandSigned24BitOps(CallInst *CI)
 }
 
 bool 
-AMDGPUPeepholeOpt::isRWGLocalOpt(CallInst *CI) 
-{
+AMDGPUPeepholeOpt::isRWGLocalOpt(CallInst *CI)  {
   return (CI != NULL
           && CI->getOperand(CI->getNumOperands() - 1)->getName() 
           == "__amdil_get_local_size_int");
 }
 
 bool 
-AMDGPUPeepholeOpt::convertAccurateDivide(CallInst *CI) 
-{
+AMDGPUPeepholeOpt::convertAccurateDivide(CallInst *CI)  {
   if (!CI) {
     return false;
   }
@@ -1093,8 +1072,7 @@ AMDGPUPeepholeOpt::convertAccurateDivide(CallInst *CI)
 }
 
 void 
-AMDGPUPeepholeOpt::expandAccurateDivide(CallInst *CI) 
-{
+AMDGPUPeepholeOpt::expandAccurateDivide(CallInst *CI)  {
   assert(convertAccurateDivide(CI)
          && "expanding accurate divide can only happen if it is expandable!");
   BinaryOperator *divOp =
@@ -1104,8 +1082,7 @@ AMDGPUPeepholeOpt::expandAccurateDivide(CallInst *CI)
 }
 
 bool
-AMDGPUPeepholeOpt::propagateSamplerInst(CallInst *CI)
-{
+AMDGPUPeepholeOpt::propagateSamplerInst(CallInst *CI) {
   if (optLevel != CodeGenOpt::None) {
     return false;
   }
@@ -1159,20 +1136,17 @@ AMDGPUPeepholeOpt::propagateSamplerInst(CallInst *CI)
 }
 
 bool 
-AMDGPUPeepholeOpt::doInitialization(Module &M) 
-{
+AMDGPUPeepholeOpt::doInitialization(Module &M)  {
   return false;
 }
 
 bool 
-AMDGPUPeepholeOpt::doFinalization(Module &M) 
-{
+AMDGPUPeepholeOpt::doFinalization(Module &M)  {
   return false;
 }
 
 void 
-AMDGPUPeepholeOpt::getAnalysisUsage(AnalysisUsage &AU) const 
-{
+AMDGPUPeepholeOpt::getAnalysisUsage(AnalysisUsage &AU) const  {
   AU.addRequired<MachineFunctionAnalysis>();
   FunctionPass::getAnalysisUsage(AU);
   AU.setPreservesAll();
diff --git a/lib/Target/AMDGPU/AMDILSIDevice.cpp b/lib/Target/AMDGPU/AMDILSIDevice.cpp
index 856b00f..a8314c0 100644
--- a/lib/Target/AMDGPU/AMDILSIDevice.cpp
+++ b/lib/Target/AMDGPU/AMDILSIDevice.cpp
@@ -14,16 +14,13 @@
 using namespace llvm;
 
 AMDGPUSIDevice::AMDGPUSIDevice(AMDGPUSubtarget *ST)
-  : AMDGPUEvergreenDevice(ST)
-{
+  : AMDGPUEvergreenDevice(ST) {
 }
-AMDGPUSIDevice::~AMDGPUSIDevice()
-{
+AMDGPUSIDevice::~AMDGPUSIDevice() {
 }
 
 size_t
-AMDGPUSIDevice::getMaxLDSSize() const
-{
+AMDGPUSIDevice::getMaxLDSSize() const {
   if (usesHardware(AMDGPUDeviceInfo::LocalMem)) {
     return MAX_LDS_SIZE_900;
   } else {
@@ -32,14 +29,12 @@ AMDGPUSIDevice::getMaxLDSSize() const
 }
 
 uint32_t
-AMDGPUSIDevice::getGeneration() const
-{
+AMDGPUSIDevice::getGeneration() const {
   return AMDGPUDeviceInfo::HD7XXX;
 }
 
 std::string
-AMDGPUSIDevice::getDataLayout() const
-{
+AMDGPUSIDevice::getDataLayout() const {
     return std::string("e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16"
       "-i32:32:32-i64:64:64-f32:32:32-f64:64:64-f80:32:32"
       "-v16:16:16-v24:32:32-v32:32:32-v48:64:64-v64:64:64"
diff --git a/lib/Target/AMDGPU/R600ExpandSpecialInstrs.cpp b/lib/Target/AMDGPU/R600ExpandSpecialInstrs.cpp
index e040e4c..dee4f6c 100644
--- a/lib/Target/AMDGPU/R600ExpandSpecialInstrs.cpp
+++ b/lib/Target/AMDGPU/R600ExpandSpecialInstrs.cpp
@@ -52,8 +52,7 @@ FunctionPass *llvm::createR600ExpandSpecialInstrsPass(TargetMachine &TM) {
   return new R600ExpandSpecialInstrsPass(TM);
 }
 
-bool R600ExpandSpecialInstrsPass::ExpandInputPerspective(MachineInstr &MI)
-{
+bool R600ExpandSpecialInstrsPass::ExpandInputPerspective(MachineInstr &MI) {
   const R600RegisterInfo &TRI = TII->getRegisterInfo();
   if (MI.getOpcode() != AMDGPU::input_perspective)
     return false;
@@ -129,8 +128,7 @@ bool R600ExpandSpecialInstrsPass::ExpandInputPerspective(MachineInstr &MI)
   return true;
 }
 
-bool R600ExpandSpecialInstrsPass::ExpandInputConstant(MachineInstr &MI)
-{
+bool R600ExpandSpecialInstrsPass::ExpandInputConstant(MachineInstr &MI) {
   const R600RegisterInfo &TRI = TII->getRegisterInfo();
   if (MI.getOpcode() != AMDGPU::input_constant)
     return false;
diff --git a/lib/Target/AMDGPU/R600ISelLowering.cpp b/lib/Target/AMDGPU/R600ISelLowering.cpp
index d306cfe..615f1b3 100644
--- a/lib/Target/AMDGPU/R600ISelLowering.cpp
+++ b/lib/Target/AMDGPU/R600ISelLowering.cpp
@@ -25,8 +25,7 @@ using namespace llvm;
 
 R600TargetLowering::R600TargetLowering(TargetMachine &TM) :
     AMDGPUTargetLowering(TM),
-    TII(static_cast<const R600InstrInfo*>(TM.getInstrInfo()))
-{
+    TII(static_cast<const R600InstrInfo*>(TM.getInstrInfo())) {
   setOperationAction(ISD::MUL, MVT::i64, Expand);
   addRegisterClass(MVT::v4f32, &AMDGPU::R600_Reg128RegClass);
   addRegisterClass(MVT::f32, &AMDGPU::R600_Reg32RegClass);
@@ -73,8 +72,7 @@ R600TargetLowering::R600TargetLowering(TargetMachine &TM) :
 }
 
 MachineBasicBlock * R600TargetLowering::EmitInstrWithCustomInserter(
-    MachineInstr * MI, MachineBasicBlock * BB) const
-{
+    MachineInstr * MI, MachineBasicBlock * BB) const {
   MachineFunction * MF = BB->getParent();
   MachineRegisterInfo &MRI = MF->getRegInfo();
   MachineBasicBlock::iterator I = *MI;
@@ -310,8 +308,7 @@ MachineBasicBlock * R600TargetLowering::EmitInstrWithCustomInserter(
 using namespace llvm::Intrinsic;
 using namespace llvm::AMDGPUIntrinsic;
 
-SDValue R600TargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const
-{
+SDValue R600TargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const {
   switch (Op.getOpcode()) {
   default: return AMDGPUTargetLowering::LowerOperation(Op, DAG);
   case ISD::BR_CC: return LowerBR_CC(Op, DAG);
@@ -481,16 +478,14 @@ SDValue R600TargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const
 
 void R600TargetLowering::ReplaceNodeResults(SDNode *N,
                                             SmallVectorImpl<SDValue> &Results,
-                                            SelectionDAG &DAG) const
-{
+                                            SelectionDAG &DAG) const {
   switch (N->getOpcode()) {
   default: return;
   case ISD::FP_TO_UINT: Results.push_back(LowerFPTOUINT(N->getOperand(0), DAG));
   }
 }
 
-SDValue R600TargetLowering::LowerFPTOUINT(SDValue Op, SelectionDAG &DAG) const
-{
+SDValue R600TargetLowering::LowerFPTOUINT(SDValue Op, SelectionDAG &DAG) const {
   return DAG.getNode(
       ISD::SETCC,
       Op.getDebugLoc(),
@@ -500,8 +495,7 @@ SDValue R600TargetLowering::LowerFPTOUINT(SDValue Op, SelectionDAG &DAG) const
       );
 }
 
-SDValue R600TargetLowering::LowerBR_CC(SDValue Op, SelectionDAG &DAG) const
-{
+SDValue R600TargetLowering::LowerBR_CC(SDValue Op, SelectionDAG &DAG) const {
   SDValue Chain = Op.getOperand(0);
   SDValue CC = Op.getOperand(1);
   SDValue LHS   = Op.getOperand(2);
@@ -541,8 +535,7 @@ SDValue R600TargetLowering::LowerBR_CC(SDValue Op, SelectionDAG &DAG) const
 
 SDValue R600TargetLowering::LowerImplicitParameter(SelectionDAG &DAG, EVT VT,
                                                    DebugLoc DL,
-                                                   unsigned DwordOffset) const
-{
+                                                   unsigned DwordOffset) const {
   unsigned ByteOffset = DwordOffset * 4;
   PointerType * PtrType = PointerType::get(VT.getTypeForEVT(*DAG.getContext()),
                                       AMDGPUAS::PARAM_I_ADDRESS);
@@ -556,8 +549,7 @@ SDValue R600TargetLowering::LowerImplicitParameter(SelectionDAG &DAG, EVT VT,
                      false, false, false, 0);
 }
 
-SDValue R600TargetLowering::LowerROTL(SDValue Op, SelectionDAG &DAG) const
-{
+SDValue R600TargetLowering::LowerROTL(SDValue Op, SelectionDAG &DAG) const {
   DebugLoc DL = Op.getDebugLoc();
   EVT VT = Op.getValueType();
 
@@ -569,8 +561,7 @@ SDValue R600TargetLowering::LowerROTL(SDValue Op, SelectionDAG &DAG) const
                                  Op.getOperand(1)));
 }
 
-bool R600TargetLowering::isZero(SDValue Op) const
-{
+bool R600TargetLowering::isZero(SDValue Op) const {
   if(ConstantSDNode *Cst = dyn_cast<ConstantSDNode>(Op)) {
     return Cst->isNullValue();
   } else if(ConstantFPSDNode *CstFP = dyn_cast<ConstantFPSDNode>(Op)){
@@ -580,8 +571,7 @@ bool R600TargetLowering::isZero(SDValue Op) const
   }
 }
 
-SDValue R600TargetLowering::LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const
-{
+SDValue R600TargetLowering::LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const {
   DebugLoc DL = Op.getDebugLoc();
   EVT VT = Op.getValueType();
 
@@ -714,8 +704,7 @@ SDValue R600TargetLowering::LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const
       DAG.getCondCode(ISD::SETNE));
 }
 
-SDValue R600TargetLowering::LowerSELECT(SDValue Op, SelectionDAG &DAG) const
-{
+SDValue R600TargetLowering::LowerSELECT(SDValue Op, SelectionDAG &DAG) const {
   return DAG.getNode(ISD::SELECT_CC,
       Op.getDebugLoc(),
       Op.getValueType(),
@@ -726,8 +715,7 @@ SDValue R600TargetLowering::LowerSELECT(SDValue Op, SelectionDAG &DAG) const
       DAG.getCondCode(ISD::SETNE));
 }
 
-SDValue R600TargetLowering::LowerSETCC(SDValue Op, SelectionDAG &DAG) const
-{
+SDValue R600TargetLowering::LowerSETCC(SDValue Op, SelectionDAG &DAG) const {
   SDValue Cond;
   SDValue LHS = Op.getOperand(0);
   SDValue RHS = Op.getOperand(1);
@@ -770,8 +758,7 @@ SDValue R600TargetLowering::LowerSETCC(SDValue Op, SelectionDAG &DAG) const
 }
 
 SDValue R600TargetLowering::LowerFPOW(SDValue Op,
-    SelectionDAG &DAG) const
-{
+    SelectionDAG &DAG) const {
   DebugLoc DL = Op.getDebugLoc();
   EVT VT = Op.getValueType();
   SDValue LogBase = DAG.getNode(ISD::FLOG2, DL, VT, Op.getOperand(0));
@@ -789,8 +776,7 @@ SDValue R600TargetLowering::LowerFormalArguments(
                                       bool isVarArg,
                                       const SmallVectorImpl<ISD::InputArg> &Ins,
                                       DebugLoc DL, SelectionDAG &DAG,
-                                      SmallVectorImpl<SDValue> &InVals) const
-{
+                                      SmallVectorImpl<SDValue> &InVals) const {
   unsigned ParamOffsetBytes = 36;
   for (unsigned i = 0, e = Ins.size(); i < e; ++i) {
     EVT VT = Ins[i].VT;
@@ -816,8 +802,7 @@ EVT R600TargetLowering::getSetCCResultType(EVT VT) const {
 //===----------------------------------------------------------------------===//
 
 SDValue R600TargetLowering::PerformDAGCombine(SDNode *N,
-                                              DAGCombinerInfo &DCI) const
-{
+                                              DAGCombinerInfo &DCI) const {
   SelectionDAG &DAG = DCI.DAG;
 
   switch (N->getOpcode()) {
diff --git a/lib/Target/AMDGPU/R600InstrInfo.cpp b/lib/Target/AMDGPU/R600InstrInfo.cpp
index e437313..d9b9f2b 100644
--- a/lib/Target/AMDGPU/R600InstrInfo.cpp
+++ b/lib/Target/AMDGPU/R600InstrInfo.cpp
@@ -29,18 +29,15 @@ R600InstrInfo::R600InstrInfo(AMDGPUTargetMachine &tm)
     RI(tm, *this)
   { }
 
-const R600RegisterInfo &R600InstrInfo::getRegisterInfo() const
-{
+const R600RegisterInfo &R600InstrInfo::getRegisterInfo() const {
   return RI;
 }
 
-bool R600InstrInfo::isTrig(const MachineInstr &MI) const
-{
+bool R600InstrInfo::isTrig(const MachineInstr &MI) const {
   return get(MI.getOpcode()).TSFlags & R600_InstFlag::TRIG;
 }
 
-bool R600InstrInfo::isVector(const MachineInstr &MI) const
-{
+bool R600InstrInfo::isVector(const MachineInstr &MI) const {
   return get(MI.getOpcode()).TSFlags & R600_InstFlag::VECTOR;
 }
 
@@ -48,8 +45,7 @@ void
 R600InstrInfo::copyPhysReg(MachineBasicBlock &MBB,
                            MachineBasicBlock::iterator MI, DebugLoc DL,
                            unsigned DestReg, unsigned SrcReg,
-                           bool KillSrc) const
-{
+                           bool KillSrc) const {
   if (AMDGPU::R600_Reg128RegClass.contains(DestReg)
       && AMDGPU::R600_Reg128RegClass.contains(SrcReg)) {
     for (unsigned I = 0; I < 4; I++) {
@@ -74,8 +70,7 @@ R600InstrInfo::copyPhysReg(MachineBasicBlock &MBB,
 }
 
 MachineInstr * R600InstrInfo::getMovImmInstr(MachineFunction *MF,
-                                             unsigned DstReg, int64_t Imm) const
-{
+                                             unsigned DstReg, int64_t Imm) const {
   MachineInstr * MI = MF->CreateMachineInstr(get(AMDGPU::MOV), DebugLoc());
   MachineInstrBuilder(MI).addReg(DstReg, RegState::Define);
   MachineInstrBuilder(MI).addReg(AMDGPU::ALU_LITERAL_X);
@@ -85,13 +80,11 @@ MachineInstr * R600InstrInfo::getMovImmInstr(MachineFunction *MF,
   return MI;
 }
 
-unsigned R600InstrInfo::getIEQOpcode() const
-{
+unsigned R600InstrInfo::getIEQOpcode() const {
   return AMDGPU::SETE_INT;
 }
 
-bool R600InstrInfo::isMov(unsigned Opcode) const
-{
+bool R600InstrInfo::isMov(unsigned Opcode) const {
 
 
   switch(Opcode) {
@@ -106,8 +99,7 @@ bool R600InstrInfo::isMov(unsigned Opcode) const
 // Some instructions act as place holders to emulate operations that the GPU
 // hardware does automatically. This function can be used to check if
 // an opcode falls into this category.
-bool R600InstrInfo::isPlaceHolderOpcode(unsigned Opcode) const
-{
+bool R600InstrInfo::isPlaceHolderOpcode(unsigned Opcode) const {
   switch (Opcode) {
   default: return false;
   case AMDGPU::RETURN:
@@ -116,8 +108,7 @@ bool R600InstrInfo::isPlaceHolderOpcode(unsigned Opcode) const
   }
 }
 
-bool R600InstrInfo::isReductionOp(unsigned Opcode) const
-{
+bool R600InstrInfo::isReductionOp(unsigned Opcode) const {
   switch(Opcode) {
     default: return false;
     case AMDGPU::DOT4_r600_pseudo:
@@ -126,8 +117,7 @@ bool R600InstrInfo::isReductionOp(unsigned Opcode) const
   }
 }
 
-bool R600InstrInfo::isCubeOp(unsigned Opcode) const
-{
+bool R600InstrInfo::isCubeOp(unsigned Opcode) const {
   switch(Opcode) {
     default: return false;
     case AMDGPU::CUBE_r600_pseudo:
@@ -139,15 +129,13 @@ bool R600InstrInfo::isCubeOp(unsigned Opcode) const
 }
 
 DFAPacketizer *R600InstrInfo::CreateTargetScheduleState(const TargetMachine *TM,
-    const ScheduleDAG *DAG) const
-{
+    const ScheduleDAG *DAG) const {
   const InstrItineraryData *II = TM->getInstrItineraryData();
   return TM->getSubtarget<AMDGPUSubtarget>().createDFAPacketizer(II);
 }
 
 static bool
-isPredicateSetter(unsigned Opcode)
-{
+isPredicateSetter(unsigned Opcode) {
   switch (Opcode) {
   case AMDGPU::PRED_X:
     return true;
@@ -158,8 +146,7 @@ isPredicateSetter(unsigned Opcode)
 
 static MachineInstr *
 findFirstPredicateSetterFrom(MachineBasicBlock &MBB,
-                             MachineBasicBlock::iterator I)
-{
+                             MachineBasicBlock::iterator I) {
   while (I != MBB.begin()) {
     --I;
     MachineInstr *MI = I;
@@ -175,8 +162,7 @@ R600InstrInfo::AnalyzeBranch(MachineBasicBlock &MBB,
                              MachineBasicBlock *&TBB,
                              MachineBasicBlock *&FBB,
                              SmallVectorImpl<MachineOperand> &Cond,
-                             bool AllowModify) const
-{
+                             bool AllowModify) const {
   // Most of the following comes from the ARM implementation of AnalyzeBranch
 
   // If the block has no terminators, it just falls into the block after it.
@@ -259,8 +245,7 @@ R600InstrInfo::InsertBranch(MachineBasicBlock &MBB,
                             MachineBasicBlock *TBB,
                             MachineBasicBlock *FBB,
                             const SmallVectorImpl<MachineOperand> &Cond,
-                            DebugLoc DL) const
-{
+                            DebugLoc DL) const {
   assert(TBB && "InsertBranch must not be told to insert a fallthrough");
 
   if (FBB == 0) {
@@ -292,8 +277,7 @@ R600InstrInfo::InsertBranch(MachineBasicBlock &MBB,
 }
 
 unsigned
-R600InstrInfo::RemoveBranch(MachineBasicBlock &MBB) const
-{
+R600InstrInfo::RemoveBranch(MachineBasicBlock &MBB) const {
 
   // Note : we leave PRED* instructions there.
   // They may be needed when predicating instructions.
@@ -337,8 +321,7 @@ R600InstrInfo::RemoveBranch(MachineBasicBlock &MBB) const
 }
 
 bool
-R600InstrInfo::isPredicated(const MachineInstr *MI) const
-{
+R600InstrInfo::isPredicated(const MachineInstr *MI) const {
   int idx = MI->findFirstPredOperandIdx();
   if (idx < 0)
     return false;
@@ -354,8 +337,7 @@ R600InstrInfo::isPredicated(const MachineInstr *MI) const
 }
 
 bool
-R600InstrInfo::isPredicable(MachineInstr *MI) const
-{
+R600InstrInfo::isPredicable(MachineInstr *MI) const {
   // XXX: KILL* instructions can be predicated, but they must be the last
   // instruction in a clause, so this means any instructions after them cannot
   // be predicated.  Until we have proper support for instruction clauses in the
@@ -384,8 +366,7 @@ R600InstrInfo::isProfitableToIfCvt(MachineBasicBlock &TMBB,
                                    MachineBasicBlock &FMBB,
                                    unsigned NumFCycles,
                                    unsigned ExtraFCycles,
-                                   const BranchProbability &Probability) const
-{
+                                   const BranchProbability &Probability) const {
   return true;
 }
 
@@ -393,22 +374,19 @@ bool
 R600InstrInfo::isProfitableToDupForIfCvt(MachineBasicBlock &MBB,
                                          unsigned NumCyles,
                                          const BranchProbability &Probability)
-                                         const
-{
+                                         const {
   return true;
 }
 
 bool
 R600InstrInfo::isProfitableToUnpredicate(MachineBasicBlock &TMBB,
-                                         MachineBasicBlock &FMBB) const
-{
+                                         MachineBasicBlock &FMBB) const {
   return false;
 }
 
 
 bool
-R600InstrInfo::ReverseBranchCondition(SmallVectorImpl<MachineOperand> &Cond) const
-{
+R600InstrInfo::ReverseBranchCondition(SmallVectorImpl<MachineOperand> &Cond) const {
   MachineOperand &MO = Cond[1];
   switch (MO.getImm()) {
   case OPCODE_IS_ZERO_INT:
@@ -443,24 +421,21 @@ R600InstrInfo::ReverseBranchCondition(SmallVectorImpl<MachineOperand> &Cond) con
 
 bool
 R600InstrInfo::DefinesPredicate(MachineInstr *MI,
-                                std::vector<MachineOperand> &Pred) const
-{
+                                std::vector<MachineOperand> &Pred) const {
   return isPredicateSetter(MI->getOpcode());
 }
 
 
 bool
 R600InstrInfo::SubsumesPredicate(const SmallVectorImpl<MachineOperand> &Pred1,
-                       const SmallVectorImpl<MachineOperand> &Pred2) const
-{
+                       const SmallVectorImpl<MachineOperand> &Pred2) const {
   return false;
 }
 
 
 bool
 R600InstrInfo::PredicateInstruction(MachineInstr *MI,
-                      const SmallVectorImpl<MachineOperand> &Pred) const
-{
+                      const SmallVectorImpl<MachineOperand> &Pred) const {
   int PIdx = MI->findFirstPredOperandIdx();
 
   if (PIdx != -1) {
@@ -475,8 +450,7 @@ R600InstrInfo::PredicateInstruction(MachineInstr *MI,
 
 unsigned int R600InstrInfo::getInstrLatency(const InstrItineraryData *ItinData,
                                             const MachineInstr *MI,
-                                            unsigned *PredCost) const
-{
+                                            unsigned *PredCost) const {
   if (PredCost)
     *PredCost = 2;
   return 2;
@@ -487,8 +461,7 @@ MachineInstrBuilder R600InstrInfo::buildDefaultInstruction(MachineBasicBlock &MB
                                                   unsigned Opcode,
                                                   unsigned DstReg,
                                                   unsigned Src0Reg,
-                                                  unsigned Src1Reg) const
-{
+                                                  unsigned Src1Reg) const {
   MachineInstrBuilder MIB = BuildMI(MBB, I, MBB.findDebugLoc(I), get(Opcode),
     DstReg);           // $dst
 
@@ -524,8 +497,7 @@ MachineInstrBuilder R600InstrInfo::buildDefaultInstruction(MachineBasicBlock &MB
 MachineInstr *R600InstrInfo::buildMovImm(MachineBasicBlock &BB,
                                          MachineBasicBlock::iterator I,
                                          unsigned DstReg,
-                                         uint64_t Imm) const
-{
+                                         uint64_t Imm) const {
   MachineInstr *MovImm = buildDefaultInstruction(BB, I, AMDGPU::MOV, DstReg,
                                                   AMDGPU::ALU_LITERAL_X);
   setImmOperand(MovImm, R600Operands::IMM, Imm);
@@ -533,8 +505,7 @@ MachineInstr *R600InstrInfo::buildMovImm(MachineBasicBlock &BB,
 }
 
 int R600InstrInfo::getOperandIdx(const MachineInstr &MI,
-                                 R600Operands::Ops Op) const
-{
+                                 R600Operands::Ops Op) const {
   const static int OpTable[3][R600Operands::COUNT] = {
 //            W        C     S  S  S     S  S  S     S  S
 //            R  O  D  L  S  R  R  R  S  R  R  R  S  R  R  L  P
@@ -574,8 +545,7 @@ int R600InstrInfo::getOperandIdx(const MachineInstr &MI,
 }
 
 void R600InstrInfo::setImmOperand(MachineInstr *MI, R600Operands::Ops Op,
-                                  int64_t Imm) const
-{
+                                  int64_t Imm) const {
   int Idx = getOperandIdx(*MI, Op);
   assert(Idx != -1 && "Operand not supported for this instruction.");
   assert(MI->getOperand(Idx).isImm());
@@ -586,14 +556,12 @@ void R600InstrInfo::setImmOperand(MachineInstr *MI, R600Operands::Ops Op,
 // Instruction flag getters/setters
 //===----------------------------------------------------------------------===//
 
-bool R600InstrInfo::hasFlagOperand(const MachineInstr &MI) const
-{
+bool R600InstrInfo::hasFlagOperand(const MachineInstr &MI) const {
   return GET_FLAG_OPERAND_IDX(get(MI.getOpcode()).TSFlags) != 0;
 }
 
 MachineOperand &R600InstrInfo::getFlagOp(MachineInstr *MI, unsigned SrcIdx,
-                                         unsigned Flag) const
-{
+                                         unsigned Flag) const {
   unsigned TargetFlags = get(MI->getOpcode()).TSFlags;
   int FlagIndex = 0;
   if (Flag != 0) {
@@ -647,8 +615,7 @@ MachineOperand &R600InstrInfo::getFlagOp(MachineInstr *MI, unsigned SrcIdx,
 }
 
 void R600InstrInfo::addFlag(MachineInstr *MI, unsigned Operand,
-                            unsigned Flag) const
-{
+                            unsigned Flag) const {
   unsigned TargetFlags = get(MI->getOpcode()).TSFlags;
   if (Flag == 0) {
     return;
@@ -669,8 +636,7 @@ void R600InstrInfo::addFlag(MachineInstr *MI, unsigned Operand,
 }
 
 void R600InstrInfo::clearFlag(MachineInstr *MI, unsigned Operand,
-                              unsigned Flag) const
-{
+                              unsigned Flag) const {
   unsigned TargetFlags = get(MI->getOpcode()).TSFlags;
   if (HAS_NATIVE_OPERANDS(TargetFlags)) {
     MachineOperand &FlagOp = getFlagOp(MI, Operand, Flag);
diff --git a/lib/Target/AMDGPU/R600MachineFunctionInfo.cpp b/lib/Target/AMDGPU/R600MachineFunctionInfo.cpp
index 49e662f..c65b347 100644
--- a/lib/Target/AMDGPU/R600MachineFunctionInfo.cpp
+++ b/lib/Target/AMDGPU/R600MachineFunctionInfo.cpp
@@ -19,14 +19,12 @@ R600MachineFunctionInfo::R600MachineFunctionInfo(const MachineFunction &MF)
     memset(Outputs, 0, sizeof(Outputs));
   }
 
-unsigned R600MachineFunctionInfo::GetIJPerspectiveIndex() const
-{
+unsigned R600MachineFunctionInfo::GetIJPerspectiveIndex() const {
   assert(HasPerspectiveInterpolation);
   return 0;
 }
 
-unsigned R600MachineFunctionInfo::GetIJLinearIndex() const
-{
+unsigned R600MachineFunctionInfo::GetIJLinearIndex() const {
   assert(HasLinearInterpolation);
   if (HasPerspectiveInterpolation)
     return 1;
diff --git a/lib/Target/AMDGPU/R600RegisterInfo.cpp b/lib/Target/AMDGPU/R600RegisterInfo.cpp
index ef15183..c7c8150 100644
--- a/lib/Target/AMDGPU/R600RegisterInfo.cpp
+++ b/lib/Target/AMDGPU/R600RegisterInfo.cpp
@@ -25,8 +25,7 @@ R600RegisterInfo::R600RegisterInfo(AMDGPUTargetMachine &tm,
   TII(tii)
   { }
 
-BitVector R600RegisterInfo::getReservedRegs(const MachineFunction &MF) const
-{
+BitVector R600RegisterInfo::getReservedRegs(const MachineFunction &MF) const {
   BitVector Reserved(getNumRegs());
   const R600MachineFunctionInfo * MFI = MF.getInfo<R600MachineFunctionInfo>();
 
@@ -57,8 +56,7 @@ BitVector R600RegisterInfo::getReservedRegs(const MachineFunction &MF) const
 }
 
 const TargetRegisterClass *
-R600RegisterInfo::getISARegClass(const TargetRegisterClass * rc) const
-{
+R600RegisterInfo::getISARegClass(const TargetRegisterClass * rc) const {
   switch (rc->getID()) {
   case AMDGPU::GPRF32RegClassID:
   case AMDGPU::GPRI32RegClassID:
@@ -67,22 +65,19 @@ R600RegisterInfo::getISARegClass(const TargetRegisterClass * rc) const
   }
 }
 
-unsigned R600RegisterInfo::getHWRegChan(unsigned reg) const
-{
+unsigned R600RegisterInfo::getHWRegChan(unsigned reg) const {
   return this->getEncodingValue(reg) >> HW_CHAN_SHIFT;
 }
 
 const TargetRegisterClass * R600RegisterInfo::getCFGStructurizerRegClass(
-                                                                   MVT VT) const
-{
+                                                                   MVT VT) const {
   switch(VT.SimpleTy) {
   default:
   case MVT::i32: return &AMDGPU::R600_TReg32RegClass;
   }
 }
 
-unsigned R600RegisterInfo::getSubRegFromChannel(unsigned Channel) const
-{
+unsigned R600RegisterInfo::getSubRegFromChannel(unsigned Channel) const {
   switch (Channel) {
     default: assert(!"Invalid channel index"); return 0;
     case 0: return AMDGPU::sel_x;
diff --git a/lib/Target/AMDGPU/SIAssignInterpRegs.cpp b/lib/Target/AMDGPU/SIAssignInterpRegs.cpp
index 1fc0a87..03d2eaf 100644
--- a/lib/Target/AMDGPU/SIAssignInterpRegs.cpp
+++ b/lib/Target/AMDGPU/SIAssignInterpRegs.cpp
@@ -65,8 +65,7 @@ FunctionPass *llvm::createSIAssignInterpRegsPass(TargetMachine &tm) {
   return new SIAssignInterpRegsPass(tm);
 }
 
-bool SIAssignInterpRegsPass::runOnMachineFunction(MachineFunction &MF)
-{
+bool SIAssignInterpRegsPass::runOnMachineFunction(MachineFunction &MF) {
 
   struct InterpInfo InterpUse[INTERP_VALUES] = {
     {false, {AMDGPU::PERSP_SAMPLE_I, AMDGPU::PERSP_SAMPLE_J}, 2},
@@ -136,8 +135,7 @@ bool SIAssignInterpRegsPass::runOnMachineFunction(MachineFunction &MF)
 
 void SIAssignInterpRegsPass::addLiveIn(MachineFunction * MF,
                            MachineRegisterInfo & MRI,
-                           unsigned physReg, unsigned virtReg)
-{
+                           unsigned physReg, unsigned virtReg) {
     const TargetInstrInfo * TII = TM.getInstrInfo();
     if (!MRI.isLiveIn(physReg)) {
       MRI.addLiveIn(physReg, virtReg);
diff --git a/lib/Target/AMDGPU/SIFixSGPRLiveness.cpp b/lib/Target/AMDGPU/SIFixSGPRLiveness.cpp
index 71641d1..56db55c 100644
--- a/lib/Target/AMDGPU/SIFixSGPRLiveness.cpp
+++ b/lib/Target/AMDGPU/SIFixSGPRLiveness.cpp
@@ -66,21 +66,18 @@ char SIFixSGPRLiveness::ID = 0;
 
 SIFixSGPRLiveness::SIFixSGPRLiveness(TargetMachine &tm):
   MachineFunctionPass(ID),
-  TII(tm.getInstrInfo())
-{
+  TII(tm.getInstrInfo()) {
   initializeLiveIntervalsPass(*PassRegistry::getPassRegistry());
 }
 
-void SIFixSGPRLiveness::getAnalysisUsage(AnalysisUsage &AU) const
-{
+void SIFixSGPRLiveness::getAnalysisUsage(AnalysisUsage &AU) const {
   AU.addRequired<MachineDominatorTree>();
   AU.addRequired<MachinePostDominatorTree>();
   AU.setPreservesCFG();
   MachineFunctionPass::getAnalysisUsage(AU);
 }
 
-void SIFixSGPRLiveness::addKill(MachineBasicBlock::iterator I, unsigned Reg)
-{
+void SIFixSGPRLiveness::addKill(MachineBasicBlock::iterator I, unsigned Reg) {
   MachineBasicBlock *MBB = I->getParent();
 
   BuildMI(*MBB, I, DebugLoc(), TII->get(TargetOpcode::KILL)).addReg(Reg);
@@ -88,8 +85,7 @@ void SIFixSGPRLiveness::addKill(MachineBasicBlock::iterator I, unsigned Reg)
 
 // Find the common post dominator of all uses
 MachineBasicBlock *SIFixSGPRLiveness::handleUses(unsigned VirtReg,
-                                                 MachineBasicBlock *Begin)
-{
+                                                 MachineBasicBlock *Begin) {
   MachineBasicBlock *LastUse = Begin, *End = Begin;
   bool EndUsesReg = true;
 
@@ -116,8 +112,7 @@ MachineBasicBlock *SIFixSGPRLiveness::handleUses(unsigned VirtReg,
 // Handles predecessors separately, only add KILLs to dominated ones
 void SIFixSGPRLiveness::handlePreds(MachineBasicBlock *Begin,
                                     MachineBasicBlock *End,
-                                    unsigned VirtReg)
-{
+                                    unsigned VirtReg) {
   MachineBasicBlock::pred_iterator i, e;
   for (i = End->pred_begin(), e = End->pred_end(); i != e; ++i) {
 
@@ -137,8 +132,7 @@ void SIFixSGPRLiveness::handlePreds(MachineBasicBlock *Begin,
   }
 }
 
-bool SIFixSGPRLiveness::handleVirtReg(unsigned VirtReg)
-{
+bool SIFixSGPRLiveness::handleVirtReg(unsigned VirtReg) {
 
   MachineInstr *Def = MRI->getVRegDef(VirtReg);
   if (!Def || MRI->use_empty(VirtReg))
@@ -159,8 +153,7 @@ bool SIFixSGPRLiveness::handleVirtReg(unsigned VirtReg)
   return true;
 }
 
-bool SIFixSGPRLiveness::runOnMachineFunction(MachineFunction &MF)
-{
+bool SIFixSGPRLiveness::runOnMachineFunction(MachineFunction &MF) {
   bool Changes = false;
 
   MRI = &MF.getRegInfo();
@@ -180,7 +173,6 @@ bool SIFixSGPRLiveness::runOnMachineFunction(MachineFunction &MF)
   return Changes;
 }
 
-FunctionPass *llvm::createSIFixSGPRLivenessPass(TargetMachine &tm)
-{
+FunctionPass *llvm::createSIFixSGPRLivenessPass(TargetMachine &tm) {
   return new SIFixSGPRLiveness(tm);
 }
diff --git a/lib/Target/AMDGPU/SIISelLowering.cpp b/lib/Target/AMDGPU/SIISelLowering.cpp
index 45f180f..d24db1e 100644
--- a/lib/Target/AMDGPU/SIISelLowering.cpp
+++ b/lib/Target/AMDGPU/SIISelLowering.cpp
@@ -26,8 +26,7 @@ using namespace llvm;
 
 SITargetLowering::SITargetLowering(TargetMachine &TM) :
     AMDGPUTargetLowering(TM),
-    TII(static_cast<const SIInstrInfo*>(TM.getInstrInfo()))
-{
+    TII(static_cast<const SIInstrInfo*>(TM.getInstrInfo())) {
   addRegisterClass(MVT::v4f32, &AMDGPU::VReg_128RegClass);
   addRegisterClass(MVT::f32, &AMDGPU::VReg_32RegClass);
   addRegisterClass(MVT::i32, &AMDGPU::VReg_32RegClass);
@@ -64,8 +63,7 @@ SITargetLowering::SITargetLowering(TargetMachine &TM) :
 }
 
 MachineBasicBlock * SITargetLowering::EmitInstrWithCustomInserter(
-    MachineInstr * MI, MachineBasicBlock * BB) const
-{
+    MachineInstr * MI, MachineBasicBlock * BB) const {
   const TargetInstrInfo * TII = getTargetMachine().getInstrInfo();
   MachineRegisterInfo & MRI = BB->getParent()->getRegInfo();
   MachineBasicBlock::iterator I = MI;
@@ -149,16 +147,14 @@ MachineBasicBlock * SITargetLowering::EmitInstrWithCustomInserter(
 }
 
 void SITargetLowering::AppendS_WAITCNT(MachineInstr *MI, MachineBasicBlock &BB,
-    MachineBasicBlock::iterator I) const
-{
+    MachineBasicBlock::iterator I) const {
   BuildMI(BB, I, BB.findDebugLoc(I), TII->get(AMDGPU::S_WAITCNT))
           .addImm(0);
 }
 
 
 void SITargetLowering::LowerSI_WQM(MachineInstr *MI, MachineBasicBlock &BB,
-    MachineBasicBlock::iterator I, MachineRegisterInfo & MRI) const
-{
+    MachineBasicBlock::iterator I, MachineRegisterInfo & MRI) const {
   BuildMI(BB, I, BB.findDebugLoc(I), TII->get(AMDGPU::S_WQM_B64), AMDGPU::EXEC)
           .addReg(AMDGPU::EXEC);
 
@@ -166,8 +162,7 @@ void SITargetLowering::LowerSI_WQM(MachineInstr *MI, MachineBasicBlock &BB,
 }
 
 void SITargetLowering::LowerSI_INTERP(MachineInstr *MI, MachineBasicBlock &BB,
-    MachineBasicBlock::iterator I, MachineRegisterInfo & MRI) const
-{
+    MachineBasicBlock::iterator I, MachineRegisterInfo & MRI) const {
   unsigned tmp = MRI.createVirtualRegister(&AMDGPU::VReg_32RegClass);
   unsigned M0 = MRI.createVirtualRegister(&AMDGPU::M0RegRegClass);
   MachineOperand dst = MI->getOperand(0);
@@ -199,8 +194,7 @@ void SITargetLowering::LowerSI_INTERP(MachineInstr *MI, MachineBasicBlock &BB,
 
 void SITargetLowering::LowerSI_INTERP_CONST(MachineInstr *MI,
     MachineBasicBlock &BB, MachineBasicBlock::iterator I,
-    MachineRegisterInfo &MRI) const
-{
+    MachineRegisterInfo &MRI) const {
   MachineOperand dst = MI->getOperand(0);
   MachineOperand attr_chan = MI->getOperand(1);
   MachineOperand attr = MI->getOperand(2);
@@ -220,8 +214,7 @@ void SITargetLowering::LowerSI_INTERP_CONST(MachineInstr *MI,
 }
 
 void SITargetLowering::LowerSI_KIL(MachineInstr *MI, MachineBasicBlock &BB,
-    MachineBasicBlock::iterator I, MachineRegisterInfo & MRI) const
-{
+    MachineBasicBlock::iterator I, MachineRegisterInfo & MRI) const {
   // Clear this pixel from the exec mask if the operand is negative
   BuildMI(BB, I, BB.findDebugLoc(I), TII->get(AMDGPU::V_CMPX_LE_F32_e32),
           AMDGPU::VCC)
@@ -232,8 +225,7 @@ void SITargetLowering::LowerSI_KIL(MachineInstr *MI, MachineBasicBlock &BB,
 }
 
 void SITargetLowering::LowerSI_V_CNDLT(MachineInstr *MI, MachineBasicBlock &BB,
-    MachineBasicBlock::iterator I, MachineRegisterInfo & MRI) const
-{
+    MachineBasicBlock::iterator I, MachineRegisterInfo & MRI) const {
   unsigned VCC = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass);
 
   BuildMI(BB, I, BB.findDebugLoc(I),
@@ -251,8 +243,7 @@ void SITargetLowering::LowerSI_V_CNDLT(MachineInstr *MI, MachineBasicBlock &BB,
   MI->eraseFromParent();
 }
 
-EVT SITargetLowering::getSetCCResultType(EVT VT) const
-{
+EVT SITargetLowering::getSetCCResultType(EVT VT) const {
   return MVT::i1;
 }
 
@@ -260,8 +251,7 @@ EVT SITargetLowering::getSetCCResultType(EVT VT) const
 // Custom DAG Lowering Operations
 //===----------------------------------------------------------------------===//
 
-SDValue SITargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const
-{
+SDValue SITargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const {
   switch (Op.getOpcode()) {
   default: return AMDGPUTargetLowering::LowerOperation(Op, DAG);
   case ISD::BR_CC: return LowerBR_CC(Op, DAG);
@@ -294,8 +284,7 @@ SDValue SITargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const
 ///
 SDValue SITargetLowering::Loweri1ContextSwitch(SDValue Op,
                                                SelectionDAG &DAG,
-                                               unsigned VCCNode) const
-{
+                                               unsigned VCCNode) const {
   DebugLoc DL = Op.getDebugLoc();
 
   SDValue OpNode = DAG.getNode(VCCNode, DL, MVT::i64,
@@ -307,8 +296,7 @@ SDValue SITargetLowering::Loweri1ContextSwitch(SDValue Op,
   return DAG.getNode(SIISD::VCC_BITCAST, DL, MVT::i1, OpNode);
 }
 
-SDValue SITargetLowering::LowerBR_CC(SDValue Op, SelectionDAG &DAG) const
-{
+SDValue SITargetLowering::LowerBR_CC(SDValue Op, SelectionDAG &DAG) const {
   SDValue Chain = Op.getOperand(0);
   SDValue CC = Op.getOperand(1);
   SDValue LHS   = Op.getOperand(2);
@@ -331,8 +319,7 @@ SDValue SITargetLowering::LowerBR_CC(SDValue Op, SelectionDAG &DAG) const
   return Result;
 }
 
-SDValue SITargetLowering::LowerLOAD(SDValue Op, SelectionDAG &DAG) const
-{
+SDValue SITargetLowering::LowerLOAD(SDValue Op, SelectionDAG &DAG) const {
   EVT VT = Op.getValueType();
   LoadSDNode *Ptr = dyn_cast<LoadSDNode>(Op);
 
@@ -373,8 +360,7 @@ SDValue SITargetLowering::LowerLOAD(SDValue Op, SelectionDAG &DAG) const
   return SDValue();
 }
 
-SDValue SITargetLowering::LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const
-{
+SDValue SITargetLowering::LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const {
   SDValue LHS = Op.getOperand(0);
   SDValue RHS = Op.getOperand(1);
   SDValue True = Op.getOperand(2);
@@ -439,8 +425,7 @@ SDValue SITargetLowering::PerformDAGCombine(SDNode *N,
 
 #define NODE_NAME_CASE(node) case SIISD::node: return #node;
 
-const char* SITargetLowering::getTargetNodeName(unsigned Opcode) const
-{
+const char* SITargetLowering::getTargetNodeName(unsigned Opcode) const {
   switch (Opcode) {
   default: return AMDGPUTargetLowering::getTargetNodeName(Opcode);
   NODE_NAME_CASE(VCC_AND)
diff --git a/lib/Target/AMDGPU/SIInstrInfo.cpp b/lib/Target/AMDGPU/SIInstrInfo.cpp
index bed9e77..60e7be4 100644
--- a/lib/Target/AMDGPU/SIInstrInfo.cpp
+++ b/lib/Target/AMDGPU/SIInstrInfo.cpp
@@ -27,8 +27,7 @@ SIInstrInfo::SIInstrInfo(AMDGPUTargetMachine &tm)
     RI(tm, *this)
     { }
 
-const SIRegisterInfo &SIInstrInfo::getRegisterInfo() const
-{
+const SIRegisterInfo &SIInstrInfo::getRegisterInfo() const {
   return RI;
 }
 
@@ -36,8 +35,7 @@ void
 SIInstrInfo::copyPhysReg(MachineBasicBlock &MBB,
                            MachineBasicBlock::iterator MI, DebugLoc DL,
                            unsigned DestReg, unsigned SrcReg,
-                           bool KillSrc) const
-{
+                           bool KillSrc) const {
   // If we are trying to copy to or from SCC, there is a bug somewhere else in
   // the backend.  While it may be theoretically possible to do this, it should
   // never be necessary.
@@ -61,8 +59,7 @@ SIInstrInfo::copyPhysReg(MachineBasicBlock &MBB,
 }
 
 MachineInstr * SIInstrInfo::getMovImmInstr(MachineFunction *MF, unsigned DstReg,
-                                           int64_t Imm) const
-{
+                                           int64_t Imm) const {
   MachineInstr * MI = MF->CreateMachineInstr(get(AMDGPU::V_MOV_IMM_I32), DebugLoc());
   MachineInstrBuilder(MI).addReg(DstReg, RegState::Define);
   MachineInstrBuilder(MI).addImm(Imm);
@@ -71,8 +68,7 @@ MachineInstr * SIInstrInfo::getMovImmInstr(MachineFunction *MF, unsigned DstReg,
 
 }
 
-bool SIInstrInfo::isMov(unsigned Opcode) const
-{
+bool SIInstrInfo::isMov(unsigned Opcode) const {
   switch(Opcode) {
   default: return false;
   case AMDGPU::S_MOV_B32:
diff --git a/lib/Target/AMDGPU/SIRegisterInfo.cpp b/lib/Target/AMDGPU/SIRegisterInfo.cpp
index 3d6dc83..7ffd29a 100644
--- a/lib/Target/AMDGPU/SIRegisterInfo.cpp
+++ b/lib/Target/AMDGPU/SIRegisterInfo.cpp
@@ -24,15 +24,13 @@ SIRegisterInfo::SIRegisterInfo(AMDGPUTargetMachine &tm,
   TII(tii)
   { }
 
-BitVector SIRegisterInfo::getReservedRegs(const MachineFunction &MF) const
-{
+BitVector SIRegisterInfo::getReservedRegs(const MachineFunction &MF) const {
   BitVector Reserved(getNumRegs());
   return Reserved;
 }
 
 const TargetRegisterClass *
-SIRegisterInfo::getISARegClass(const TargetRegisterClass * rc) const
-{
+SIRegisterInfo::getISARegClass(const TargetRegisterClass * rc) const {
   switch (rc->getID()) {
   case AMDGPU::GPRF32RegClassID:
     return &AMDGPU::VReg_32RegClass;
@@ -41,8 +39,7 @@ SIRegisterInfo::getISARegClass(const TargetRegisterClass * rc) const
 }
 
 const TargetRegisterClass * SIRegisterInfo::getCFGStructurizerRegClass(
-                                                                   MVT VT) const
-{
+                                                                   MVT VT) const {
   switch(VT.SimpleTy) {
     default:
     case MVT::i32: return &AMDGPU::VReg_32RegClass;
-- 
1.7.11.4



More information about the mesa-dev mailing list