Index: lib/Target/AMDGPU/AMDGPUISelDAGToDAG.cpp =================================================================== --- lib/Target/AMDGPU/AMDGPUISelDAGToDAG.cpp +++ lib/Target/AMDGPU/AMDGPUISelDAGToDAG.cpp @@ -157,6 +157,7 @@ SDValue &Omod) const; void SelectADD_SUB_I64(SDNode *N); + void SelectUADDO_USUBO(SDNode *N); void SelectDIV_SCALE(SDNode *N); void SelectFMA_W_CHAIN(SDNode *N); void SelectFMUL_W_CHAIN(SDNode *N); @@ -319,6 +320,11 @@ SelectADD_SUB_I64(N); return; } + case ISD::UADDO: + case ISD::USUBO: { + SelectUADDO_USUBO(N); + return; + } case AMDGPUISD::FMUL_W_CHAIN: { SelectFMUL_W_CHAIN(N); return; @@ -685,6 +691,17 @@ CurDAG->RemoveDeadNode(N); } +void AMDGPUDAGToDAGISel::SelectUADDO_USUBO(SDNode *N) { + // The name of the opcodes are misleading. v_add_i32/v_sub_i32 have unsigned + // carry out despite the _i32 name. These were renamed in VI to _U32. + // FIXME: We should probably rename the opcodes here. + unsigned Opc = N->getOpcode() == ISD::UADDO ? + AMDGPU::V_ADD_I32_e64 : AMDGPU::V_SUB_I32_e64; + + CurDAG->SelectNodeTo(N, Opc, N->getVTList(), + { N->getOperand(0), N->getOperand(1) }); +} + void AMDGPUDAGToDAGISel::SelectFMA_W_CHAIN(SDNode *N) { SDLoc SL(N); // src0_modifiers, src0, src1_modifiers, src1, src2_modifiers, src2, clamp, omod Index: lib/Target/AMDGPU/SIISelLowering.cpp =================================================================== --- lib/Target/AMDGPU/SIISelLowering.cpp +++ lib/Target/AMDGPU/SIISelLowering.cpp @@ -141,6 +141,9 @@ setOperationAction(ISD::BR_CC, MVT::f32, Expand); setOperationAction(ISD::BR_CC, MVT::f64, Expand); + setOperationAction(ISD::UADDO, MVT::i32, Legal); + setOperationAction(ISD::USUBO, MVT::i32, Legal); + // We only support LOAD/STORE and vector manipulation ops for vectors // with > 4 elements. for (MVT VT : {MVT::v8i32, MVT::v8f32, MVT::v16i32, MVT::v16f32, MVT::v2i64, MVT::v2f64}) { Index: test/CodeGen/AMDGPU/uaddo.ll =================================================================== --- test/CodeGen/AMDGPU/uaddo.ll +++ test/CodeGen/AMDGPU/uaddo.ll @@ -1,19 +1,16 @@ -; RUN: llc -march=amdgcn -mcpu=SI -verify-machineinstrs< %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s -; RUN: llc -march=amdgcn -mcpu=tonga -verify-machineinstrs< %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s -; RUN: llc -march=r600 -mcpu=cypress -verify-machineinstrs< %s | FileCheck -check-prefix=EG -check-prefix=FUNC %s +; RUN: llc -march=amdgcn -verify-machineinstrs < %s | FileCheck -check-prefixes=GCN,SI,FUNC %s +; RUN: llc -march=amdgcn -mcpu=tonga -verify-machineinstrs < %s | FileCheck -check-prefixes=GCN,VI,FUNC %s +; RUN: llc -march=r600 -mcpu=cypress -verify-machineinstrs < %s | FileCheck -check-prefixes=EG,FUNC %s -declare { i32, i1 } @llvm.uadd.with.overflow.i32(i32, i32) nounwind readnone -declare { i64, i1 } @llvm.uadd.with.overflow.i64(i64, i64) nounwind readnone - -; FUNC-LABEL: {{^}}uaddo_i64_zext: -; SI: add -; SI: addc -; SI: addc +; FUNC-LABEL: {{^}}s_uaddo_i64_zext: +; GCN: s_add_u32 +; GCN: s_addc_u32 +; GCN: v_cmp_lt_u64_e32 vcc ; EG: ADDC_UINT ; EG: ADDC_UINT -define void @uaddo_i64_zext(i64 addrspace(1)* %out, i64 %a, i64 %b) nounwind { - %uadd = call { i64, i1 } @llvm.uadd.with.overflow.i64(i64 %a, i64 %b) nounwind +define void @s_uaddo_i64_zext(i64 addrspace(1)* %out, i64 %a, i64 %b) #0 { + %uadd = call { i64, i1 } @llvm.uadd.with.overflow.i64(i64 %a, i64 %b) %val = extractvalue { i64, i1 } %uadd, 0 %carry = extractvalue { i64, i1 } %uadd, 1 %ext = zext i1 %carry to i64 @@ -22,13 +19,16 @@ ret void } +; FIXME: Could do scalar + ; FUNC-LABEL: {{^}}s_uaddo_i32: -; SI: s_add_i32 +; GCN: v_add_i32_e32 v{{[0-9]+}}, vcc, s{{[0-9]+}}, v{{[0-9]+}} +; GCN: v_cndmask_b32_e64 v{{[0-9]+}}, 0, 1, vcc ; EG: ADDC_UINT ; EG: ADD_INT -define void @s_uaddo_i32(i32 addrspace(1)* %out, i1 addrspace(1)* %carryout, i32 %a, i32 %b) nounwind { - %uadd = call { i32, i1 } @llvm.uadd.with.overflow.i32(i32 %a, i32 %b) nounwind +define void @s_uaddo_i32(i32 addrspace(1)* %out, i1 addrspace(1)* %carryout, i32 %a, i32 %b) #0 { + %uadd = call { i32, i1 } @llvm.uadd.with.overflow.i32(i32 %a, i32 %b) %val = extractvalue { i32, i1 } %uadd, 0 %carry = extractvalue { i32, i1 } %uadd, 1 store i32 %val, i32 addrspace(1)* %out, align 4 @@ -37,14 +37,19 @@ } ; FUNC-LABEL: {{^}}v_uaddo_i32: -; SI: v_add_i32 +; GCN: v_add_i32_e32 v{{[0-9]+}}, vcc, v{{[0-9]+}}, v{{[0-9]+}} +; GCN: v_cndmask_b32_e64 v{{[0-9]+}}, 0, 1, vcc ; EG: ADDC_UINT ; EG: ADD_INT -define void @v_uaddo_i32(i32 addrspace(1)* %out, i1 addrspace(1)* %carryout, i32 addrspace(1)* %aptr, i32 addrspace(1)* %bptr) nounwind { - %a = load i32, i32 addrspace(1)* %aptr, align 4 - %b = load i32, i32 addrspace(1)* %bptr, align 4 - %uadd = call { i32, i1 } @llvm.uadd.with.overflow.i32(i32 %a, i32 %b) nounwind +define void @v_uaddo_i32(i32 addrspace(1)* %out, i1 addrspace(1)* %carryout, i32 addrspace(1)* %a.ptr, i32 addrspace(1)* %b.ptr) #0 { + %tid = call i32 @llvm.amdgcn.workitem.id.x() + %tid.ext = sext i32 %tid to i64 + %a.gep = getelementptr inbounds i32, i32 addrspace(1)* %a.ptr + %b.gep = getelementptr inbounds i32, i32 addrspace(1)* %b.ptr + %a = load i32, i32 addrspace(1)* %a.gep, align 4 + %b = load i32, i32 addrspace(1)* %b.gep, align 4 + %uadd = call { i32, i1 } @llvm.uadd.with.overflow.i32(i32 %a, i32 %b) %val = extractvalue { i32, i1 } %uadd, 0 %carry = extractvalue { i32, i1 } %uadd, 1 store i32 %val, i32 addrspace(1)* %out, align 4 @@ -52,14 +57,36 @@ ret void } +; FUNC-LABEL: {{^}}v_uaddo_i32_novcc: +; GCN: v_add_i32_e64 v{{[0-9]+}}, [[COND:s\[[0-9]+:[0-9]+\]]], v{{[0-9]+}}, v{{[0-9]+}} +; GCN: v_cndmask_b32_e64 v{{[0-9]+}}, 0, 1, [[COND]] + +; EG: ADDC_UINT +; EG: ADD_INT +define void @v_uaddo_i32_novcc(i32 addrspace(1)* %out, i1 addrspace(1)* %carryout, i32 addrspace(1)* %a.ptr, i32 addrspace(1)* %b.ptr) #0 { + %tid = call i32 @llvm.amdgcn.workitem.id.x() + %tid.ext = sext i32 %tid to i64 + %a.gep = getelementptr inbounds i32, i32 addrspace(1)* %a.ptr + %b.gep = getelementptr inbounds i32, i32 addrspace(1)* %b.ptr + %a = load i32, i32 addrspace(1)* %a.gep, align 4 + %b = load i32, i32 addrspace(1)* %b.gep, align 4 + %uadd = call { i32, i1 } @llvm.uadd.with.overflow.i32(i32 %a, i32 %b) + %val = extractvalue { i32, i1 } %uadd, 0 + %carry = extractvalue { i32, i1 } %uadd, 1 + store volatile i32 %val, i32 addrspace(1)* %out, align 4 + call void asm sideeffect "", "~{VCC}"() #0 + store volatile i1 %carry, i1 addrspace(1)* %carryout + ret void +} + ; FUNC-LABEL: {{^}}s_uaddo_i64: -; SI: s_add_u32 -; SI: s_addc_u32 +; GCN: s_add_u32 +; GCN: s_addc_u32 ; EG: ADDC_UINT ; EG: ADD_INT -define void @s_uaddo_i64(i64 addrspace(1)* %out, i1 addrspace(1)* %carryout, i64 %a, i64 %b) nounwind { - %uadd = call { i64, i1 } @llvm.uadd.with.overflow.i64(i64 %a, i64 %b) nounwind +define void @s_uaddo_i64(i64 addrspace(1)* %out, i1 addrspace(1)* %carryout, i64 %a, i64 %b) #0 { + %uadd = call { i64, i1 } @llvm.uadd.with.overflow.i64(i64 %a, i64 %b) %val = extractvalue { i64, i1 } %uadd, 0 %carry = extractvalue { i64, i1 } %uadd, 1 store i64 %val, i64 addrspace(1)* %out, align 8 @@ -68,18 +95,48 @@ } ; FUNC-LABEL: {{^}}v_uaddo_i64: -; SI: v_add_i32 -; SI: v_addc_u32 +; GCN: v_add_i32 +; GCN: v_addc_u32 ; EG: ADDC_UINT ; EG: ADD_INT -define void @v_uaddo_i64(i64 addrspace(1)* %out, i1 addrspace(1)* %carryout, i64 addrspace(1)* %aptr, i64 addrspace(1)* %bptr) nounwind { - %a = load i64, i64 addrspace(1)* %aptr, align 4 - %b = load i64, i64 addrspace(1)* %bptr, align 4 - %uadd = call { i64, i1 } @llvm.uadd.with.overflow.i64(i64 %a, i64 %b) nounwind +define void @v_uaddo_i64(i64 addrspace(1)* %out, i1 addrspace(1)* %carryout, i64 addrspace(1)* %a.ptr, i64 addrspace(1)* %b.ptr) #0 { + %tid = call i32 @llvm.amdgcn.workitem.id.x() + %tid.ext = sext i32 %tid to i64 + %a.gep = getelementptr inbounds i64, i64 addrspace(1)* %a.ptr + %b.gep = getelementptr inbounds i64, i64 addrspace(1)* %b.ptr + %a = load i64, i64 addrspace(1)* %a.gep + %b = load i64, i64 addrspace(1)* %b.gep + %uadd = call { i64, i1 } @llvm.uadd.with.overflow.i64(i64 %a, i64 %b) %val = extractvalue { i64, i1 } %uadd, 0 %carry = extractvalue { i64, i1 } %uadd, 1 - store i64 %val, i64 addrspace(1)* %out, align 8 + store i64 %val, i64 addrspace(1)* %out + store i1 %carry, i1 addrspace(1)* %carryout + ret void +} + +; FUNC-LABEL: {{^}}v_uaddo_i16: +; VI: v_add_u16_e32 +; VI: v_cmp_lt_u16_e32 +define void @v_uaddo_i16(i16 addrspace(1)* %out, i1 addrspace(1)* %carryout, i16 addrspace(1)* %a.ptr, i16 addrspace(1)* %b.ptr) #0 { + %tid = call i32 @llvm.amdgcn.workitem.id.x() + %tid.ext = sext i32 %tid to i64 + %a.gep = getelementptr inbounds i16, i16 addrspace(1)* %a.ptr + %b.gep = getelementptr inbounds i16, i16 addrspace(1)* %b.ptr + %a = load i16, i16 addrspace(1)* %a.gep + %b = load i16, i16 addrspace(1)* %b.gep + %uadd = call { i16, i1 } @llvm.uadd.with.overflow.i16(i16 %a, i16 %b) + %val = extractvalue { i16, i1 } %uadd, 0 + %carry = extractvalue { i16, i1 } %uadd, 1 + store i16 %val, i16 addrspace(1)* %out store i1 %carry, i1 addrspace(1)* %carryout ret void } + +declare i32 @llvm.amdgcn.workitem.id.x() #1 +declare { i16, i1 } @llvm.uadd.with.overflow.i16(i16, i16) #1 +declare { i32, i1 } @llvm.uadd.with.overflow.i32(i32, i32) #1 +declare { i64, i1 } @llvm.uadd.with.overflow.i64(i64, i64) #1 + +attributes #0 = { nounwind } +attributes #1 = { nounwind readnone } Index: test/CodeGen/AMDGPU/usubo.ll =================================================================== --- test/CodeGen/AMDGPU/usubo.ll +++ test/CodeGen/AMDGPU/usubo.ll @@ -1,16 +1,16 @@ -; RUN: llc -march=amdgcn -mcpu=SI -verify-machineinstrs< %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s -; RUN: llc -march=amdgcn -mcpu=tonga -verify-machineinstrs< %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s -; RUN: llc -march=r600 -mcpu=cypress -verify-machineinstrs< %s | FileCheck -check-prefix=EG -check-prefix=FUNC %s +; RUN: llc -march=amdgcn -verify-machineinstrs < %s | FileCheck -check-prefixes=GCN,SI,FUNC %s +; RUN: llc -march=amdgcn -mcpu=tonga -verify-machineinstrs < %s | FileCheck -check-prefixes=GCN,VI,FUNC %s +; RUN: llc -march=r600 -mcpu=cypress -verify-machineinstrs < %s | FileCheck -check-prefixes=EG,FUNC %s -declare { i32, i1 } @llvm.usub.with.overflow.i32(i32, i32) nounwind readnone -declare { i64, i1 } @llvm.usub.with.overflow.i64(i64, i64) nounwind readnone - -; FUNC-LABEL: {{^}}usubo_i64_zext: +; FUNC-LABEL: {{^}}s_usubo_i64_zext: +; GCN: s_sub_u32 +; GCN: s_subb_u32 +; GCN: v_cmp_gt_u64_e32 vcc ; EG: SUBB_UINT ; EG: ADDC_UINT -define void @usubo_i64_zext(i64 addrspace(1)* %out, i64 %a, i64 %b) nounwind { - %usub = call { i64, i1 } @llvm.usub.with.overflow.i64(i64 %a, i64 %b) nounwind +define void @s_usubo_i64_zext(i64 addrspace(1)* %out, i64 %a, i64 %b) #0 { + %usub = call { i64, i1 } @llvm.usub.with.overflow.i64(i64 %a, i64 %b) #0 %val = extractvalue { i64, i1 } %usub, 0 %carry = extractvalue { i64, i1 } %usub, 1 %ext = zext i1 %carry to i64 @@ -19,13 +19,16 @@ ret void } +; FIXME: Could do scalar + ; FUNC-LABEL: {{^}}s_usubo_i32: -; SI: s_sub_i32 +; GCN: v_sub_i32_e32 v{{[0-9]+}}, vcc, s{{[0-9]+}}, v{{[0-9]+}} +; GCN: v_cndmask_b32_e64 v{{[0-9]+}}, 0, 1, vcc ; EG-DAG: SUBB_UINT ; EG-DAG: SUB_INT -define void @s_usubo_i32(i32 addrspace(1)* %out, i1 addrspace(1)* %carryout, i32 %a, i32 %b) nounwind { - %usub = call { i32, i1 } @llvm.usub.with.overflow.i32(i32 %a, i32 %b) nounwind +define void @s_usubo_i32(i32 addrspace(1)* %out, i1 addrspace(1)* %carryout, i32 %a, i32 %b) #0 { + %usub = call { i32, i1 } @llvm.usub.with.overflow.i32(i32 %a, i32 %b) %val = extractvalue { i32, i1 } %usub, 0 %carry = extractvalue { i32, i1 } %usub, 1 store i32 %val, i32 addrspace(1)* %out, align 4 @@ -34,14 +37,19 @@ } ; FUNC-LABEL: {{^}}v_usubo_i32: -; SI: v_subrev_i32_e32 +; GCN: v_sub_i32_e32 v{{[0-9]+}}, vcc, v{{[0-9]+}}, v{{[0-9]+}} +; GCN: v_cndmask_b32_e64 v{{[0-9]+}}, 0, 1, vcc ; EG-DAG: SUBB_UINT ; EG-DAG: SUB_INT -define void @v_usubo_i32(i32 addrspace(1)* %out, i1 addrspace(1)* %carryout, i32 addrspace(1)* %aptr, i32 addrspace(1)* %bptr) nounwind { - %a = load i32, i32 addrspace(1)* %aptr, align 4 - %b = load i32, i32 addrspace(1)* %bptr, align 4 - %usub = call { i32, i1 } @llvm.usub.with.overflow.i32(i32 %a, i32 %b) nounwind +define void @v_usubo_i32(i32 addrspace(1)* %out, i1 addrspace(1)* %carryout, i32 addrspace(1)* %a.ptr, i32 addrspace(1)* %b.ptr) #0 { + %tid = call i32 @llvm.amdgcn.workitem.id.x() + %tid.ext = sext i32 %tid to i64 + %a.gep = getelementptr inbounds i32, i32 addrspace(1)* %a.ptr + %b.gep = getelementptr inbounds i32, i32 addrspace(1)* %b.ptr + %a = load i32, i32 addrspace(1)* %a.gep, align 4 + %b = load i32, i32 addrspace(1)* %b.gep, align 4 + %usub = call { i32, i1 } @llvm.usub.with.overflow.i32(i32 %a, i32 %b) %val = extractvalue { i32, i1 } %usub, 0 %carry = extractvalue { i32, i1 } %usub, 1 store i32 %val, i32 addrspace(1)* %out, align 4 @@ -49,16 +57,38 @@ ret void } +; FUNC-LABEL: {{^}}v_usubo_i32_novcc: +; GCN: v_sub_i32_e64 v{{[0-9]+}}, [[COND:s\[[0-9]+:[0-9]+\]]], v{{[0-9]+}}, v{{[0-9]+}} +; GCN: v_cndmask_b32_e64 v{{[0-9]+}}, 0, 1, [[COND]] + +; EG-DAG: SUBB_UINT +; EG-DAG: SUB_INT +define void @v_usubo_i32_novcc(i32 addrspace(1)* %out, i1 addrspace(1)* %carryout, i32 addrspace(1)* %a.ptr, i32 addrspace(1)* %b.ptr) #0 { + %tid = call i32 @llvm.amdgcn.workitem.id.x() + %tid.ext = sext i32 %tid to i64 + %a.gep = getelementptr inbounds i32, i32 addrspace(1)* %a.ptr + %b.gep = getelementptr inbounds i32, i32 addrspace(1)* %b.ptr + %a = load i32, i32 addrspace(1)* %a.gep, align 4 + %b = load i32, i32 addrspace(1)* %b.gep, align 4 + %uadd = call { i32, i1 } @llvm.usub.with.overflow.i32(i32 %a, i32 %b) + %val = extractvalue { i32, i1 } %uadd, 0 + %carry = extractvalue { i32, i1 } %uadd, 1 + store volatile i32 %val, i32 addrspace(1)* %out, align 4 + call void asm sideeffect "", "~{VCC}"() #0 + store volatile i1 %carry, i1 addrspace(1)* %carryout + ret void +} + ; FUNC-LABEL: {{^}}s_usubo_i64: -; SI: s_sub_u32 -; SI: s_subb_u32 +; GCN: s_sub_u32 +; GCN: s_subb_u32 ; EG-DAG: SUBB_UINT ; EG-DAG: SUB_INT ; EG-DAG: SUB_INT ; EG: SUB_INT -define void @s_usubo_i64(i64 addrspace(1)* %out, i1 addrspace(1)* %carryout, i64 %a, i64 %b) nounwind { - %usub = call { i64, i1 } @llvm.usub.with.overflow.i64(i64 %a, i64 %b) nounwind +define void @s_usubo_i64(i64 addrspace(1)* %out, i1 addrspace(1)* %carryout, i64 %a, i64 %b) #0 { + %usub = call { i64, i1 } @llvm.usub.with.overflow.i64(i64 %a, i64 %b) %val = extractvalue { i64, i1 } %usub, 0 %carry = extractvalue { i64, i1 } %usub, 1 store i64 %val, i64 addrspace(1)* %out, align 8 @@ -67,20 +97,50 @@ } ; FUNC-LABEL: {{^}}v_usubo_i64: -; SI: v_sub_i32 -; SI: v_subb_u32 +; GCN: v_sub_i32 +; GCN: v_subb_u32 ; EG-DAG: SUBB_UINT ; EG-DAG: SUB_INT ; EG-DAG: SUB_INT ; EG: SUB_INT -define void @v_usubo_i64(i64 addrspace(1)* %out, i1 addrspace(1)* %carryout, i64 addrspace(1)* %aptr, i64 addrspace(1)* %bptr) nounwind { - %a = load i64, i64 addrspace(1)* %aptr, align 4 - %b = load i64, i64 addrspace(1)* %bptr, align 4 - %usub = call { i64, i1 } @llvm.usub.with.overflow.i64(i64 %a, i64 %b) nounwind +define void @v_usubo_i64(i64 addrspace(1)* %out, i1 addrspace(1)* %carryout, i64 addrspace(1)* %a.ptr, i64 addrspace(1)* %b.ptr) #0 { + %tid = call i32 @llvm.amdgcn.workitem.id.x() + %tid.ext = sext i32 %tid to i64 + %a.gep = getelementptr inbounds i64, i64 addrspace(1)* %a.ptr + %b.gep = getelementptr inbounds i64, i64 addrspace(1)* %b.ptr + %a = load i64, i64 addrspace(1)* %a.gep + %b = load i64, i64 addrspace(1)* %b.gep + %usub = call { i64, i1 } @llvm.usub.with.overflow.i64(i64 %a, i64 %b) %val = extractvalue { i64, i1 } %usub, 0 %carry = extractvalue { i64, i1 } %usub, 1 store i64 %val, i64 addrspace(1)* %out, align 8 store i1 %carry, i1 addrspace(1)* %carryout ret void } + +; FUNC-LABEL: {{^}}v_usubo_i16: +; VI: v_subrev_u16_e32 +; VI: v_cmp_gt_u16_e32 +define void @v_usubo_i16(i16 addrspace(1)* %out, i1 addrspace(1)* %carryout, i16 addrspace(1)* %a.ptr, i16 addrspace(1)* %b.ptr) #0 { + %tid = call i32 @llvm.amdgcn.workitem.id.x() + %tid.ext = sext i32 %tid to i64 + %a.gep = getelementptr inbounds i16, i16 addrspace(1)* %a.ptr + %b.gep = getelementptr inbounds i16, i16 addrspace(1)* %b.ptr + %a = load i16, i16 addrspace(1)* %a.gep + %b = load i16, i16 addrspace(1)* %b.gep + %usub = call { i16, i1 } @llvm.usub.with.overflow.i16(i16 %a, i16 %b) + %val = extractvalue { i16, i1 } %usub, 0 + %carry = extractvalue { i16, i1 } %usub, 1 + store i16 %val, i16 addrspace(1)* %out + store i1 %carry, i1 addrspace(1)* %carryout + ret void +} + +declare i32 @llvm.amdgcn.workitem.id.x() #1 +declare { i16, i1 } @llvm.usub.with.overflow.i16(i16, i16) #1 +declare { i32, i1 } @llvm.usub.with.overflow.i32(i32, i32) #1 +declare { i64, i1 } @llvm.usub.with.overflow.i64(i64, i64) #1 + +attributes #0 = { nounwind } +attributes #1 = { nounwind readnone }