Index: lib/Target/AMDGPU/AMDGPUISelDAGToDAG.cpp =================================================================== --- lib/Target/AMDGPU/AMDGPUISelDAGToDAG.cpp +++ lib/Target/AMDGPU/AMDGPUISelDAGToDAG.cpp @@ -285,6 +285,38 @@ return N; } +static unsigned selectVectorRegClassID(unsigned NumVectorElts, bool UseVGPR) { + if (UseVGPR) { + switch (NumVectorElts) { + case 1: + return AMDGPU::VGPR_32RegClassID; + case 2: + return AMDGPU::VReg_64RegClassID; + case 4: + return AMDGPU::VReg_128RegClassID; + case 8: + return AMDGPU::VReg_256RegClassID; + case 16: + return AMDGPU::VReg_512RegClassID; + } + } + + switch (NumVectorElts) { + case 1: + return AMDGPU::SReg_32RegClassID; + case 2: + return AMDGPU::SReg_64RegClassID; + case 4: + return AMDGPU::SReg_128RegClassID; + case 8: + return AMDGPU::SReg_256RegClassID; + case 16: + return AMDGPU::SReg_512RegClassID; + } + + llvm_unreachable("invalid vector size"); +} + SDNode *AMDGPUDAGToDAGISel::Select(SDNode *N) { unsigned int Opc = N->getOpcode(); if (N->isMachineOpcode()) { @@ -318,7 +350,8 @@ EVT EltVT = VT.getVectorElementType(); assert(EltVT.bitsEq(MVT::i32)); if (Subtarget->getGeneration() >= AMDGPUSubtarget::SOUTHERN_ISLANDS) { - bool UseVReg = true; + bool UseVReg = false; + for (SDNode::use_iterator U = N->use_begin(), E = SDNode::use_end(); U != E; ++U) { if (!U->isMachineOpcode()) { @@ -332,24 +365,8 @@ UseVReg = false; } } - switch(NumVectorElts) { - case 1: RegClassID = UseVReg ? AMDGPU::VGPR_32RegClassID : - AMDGPU::SReg_32RegClassID; - break; - case 2: RegClassID = UseVReg ? AMDGPU::VReg_64RegClassID : - AMDGPU::SReg_64RegClassID; - break; - case 4: RegClassID = UseVReg ? AMDGPU::VReg_128RegClassID : - AMDGPU::SReg_128RegClassID; - break; - case 8: RegClassID = UseVReg ? AMDGPU::VReg_256RegClassID : - AMDGPU::SReg_256RegClassID; - break; - case 16: RegClassID = UseVReg ? AMDGPU::VReg_512RegClassID : - AMDGPU::SReg_512RegClassID; - break; - default: llvm_unreachable("Do not know how to lower this BUILD_VECTOR"); - } + + RegClassID = selectVectorRegClassID(NumVectorElts, UseVReg); } else { // BUILD_VECTOR was lowered into an IMPLICIT_DEF + 4 INSERT_SUBREG // that adds a 128 bits reg copy when going through TwoAddressInstructions Index: lib/Target/AMDGPU/SIFixSGPRCopies.cpp =================================================================== --- lib/Target/AMDGPU/SIFixSGPRCopies.cpp +++ lib/Target/AMDGPU/SIFixSGPRCopies.cpp @@ -242,12 +242,15 @@ static_cast(MF.getSubtarget().getRegisterInfo()); const SIInstrInfo *TII = static_cast(MF.getSubtarget().getInstrInfo()); + + SmallVector Worklist; + for (MachineFunction::iterator BI = MF.begin(), BE = MF.end(); BI != BE; ++BI) { MachineBasicBlock &MBB = *BI; for (MachineBasicBlock::iterator I = MBB.begin(), E = MBB.end(); - I != E; ++I) { + I != E; ++I) { MachineInstr &MI = *I; switch (MI.getOpcode()) { Index: test/CodeGen/AMDGPU/split-scalar-i64-add.ll =================================================================== --- test/CodeGen/AMDGPU/split-scalar-i64-add.ll +++ test/CodeGen/AMDGPU/split-scalar-i64-add.ll @@ -1,4 +1,4 @@ -; RUN: llc -march=amdgcn -mcpu=SI -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s +; RUN: llc -march=amdgcn -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s declare i32 @llvm.r600.read.tidig.x() readnone @@ -8,9 +8,22 @@ ; scc instead. ; FUNC-LABEL: {{^}}imp_def_vcc_split_i64_add_0: -; SI: v_add_i32 -; SI: v_addc_u32 -define void @imp_def_vcc_split_i64_add_0(i64 addrspace(1)* %out, i32 %val) { +; SI: v_add_i32_e32 v{{[0-9]+}}, vcc, 0x18f, v{{[0-9]+}} +; SI: v_addc_u32_e32 v{{[0-9]+}}, vcc, 0, v{{[0-9]+}}, vcc +define void @imp_def_vcc_split_i64_add_0(i64 addrspace(1)* %out, i32 addrspace(1)* %in, i32 %s.val) { + %v.val = load volatile i32, i32 addrspace(1)* %in + %vec.0 = insertelement <2 x i32> undef, i32 %s.val, i32 0 + %vec.1 = insertelement <2 x i32> %vec.0, i32 %v.val, i32 1 + %bc = bitcast <2 x i32> %vec.1 to i64 + %add = add i64 %bc, 399 + store i64 %add, i64 addrspace(1)* %out, align 8 + ret void +} + +; FUNC-LABEL: {{^}}s_imp_def_vcc_split_i64_add_0: +; SI: s_add_u32 {{s[0-9]+}}, {{s[0-9]+}}, 0x18f +; SI: s_addc_u32 {{s[0-9]+}}, 0xf423f, 0 +define void @s_imp_def_vcc_split_i64_add_0(i64 addrspace(1)* %out, i32 %val) { %vec.0 = insertelement <2 x i32> undef, i32 %val, i32 0 %vec.1 = insertelement <2 x i32> %vec.0, i32 999999, i32 1 %bc = bitcast <2 x i32> %vec.1 to i64 @@ -22,7 +35,20 @@ ; FUNC-LABEL: {{^}}imp_def_vcc_split_i64_add_1: ; SI: v_add_i32 ; SI: v_addc_u32 -define void @imp_def_vcc_split_i64_add_1(i64 addrspace(1)* %out, i32 %val0, i64 %val1) { +define void @imp_def_vcc_split_i64_add_1(i64 addrspace(1)* %out, i32 addrspace(1)* %in, i32 %val0, i64 %val1) { + %v.val = load volatile i32, i32 addrspace(1)* %in + %vec.0 = insertelement <2 x i32> undef, i32 %val0, i32 0 + %vec.1 = insertelement <2 x i32> %vec.0, i32 %v.val, i32 1 + %bc = bitcast <2 x i32> %vec.1 to i64 + %add = add i64 %bc, %val1 + store i64 %add, i64 addrspace(1)* %out, align 8 + ret void +} + +; FUNC-LABEL: {{^}}s_imp_def_vcc_split_i64_add_1: +; SI: s_add_u32 {{s[0-9]+}}, {{s[0-9]+}}, {{s[0-9]+}} +; SI: s_addc_u32 {{s[0-9]+}}, 0x1869f, {{s[0-9]+}} +define void @s_imp_def_vcc_split_i64_add_1(i64 addrspace(1)* %out, i32 %val0, i64 %val1) { %vec.0 = insertelement <2 x i32> undef, i32 %val0, i32 0 %vec.1 = insertelement <2 x i32> %vec.0, i32 99999, i32 1 %bc = bitcast <2 x i32> %vec.1 to i64 @@ -32,9 +58,9 @@ } ; Doesn't use constants -; FUNC-LABEL @imp_def_vcc_split_i64_add_2 -; SI: v_add_i32 -; SI: v_addc_u32 +; FUNC-LABEL: {{^}}imp_def_vcc_split_i64_add_2: +; SI: v_add_i32_e32 {{v[0-9]+}}, vcc, {{s[0-9]+}}, {{v[0-9]+}} +; SI: v_addc_u32_e32 {{v[0-9]+}}, vcc, {{v[0-9]+}}, {{v[0-9]+}}, vcc define void @imp_def_vcc_split_i64_add_2(i64 addrspace(1)* %out, i32 addrspace(1)* %in, i32 %val0, i64 %val1) { %tid = call i32 @llvm.r600.read.tidig.x() readnone %gep = getelementptr i32, i32 addrspace(1)* %in, i32 %tid