Index: clang/include/clang/Basic/BuiltinsNVPTX.def =================================================================== --- clang/include/clang/Basic/BuiltinsNVPTX.def +++ clang/include/clang/Basic/BuiltinsNVPTX.def @@ -413,6 +413,13 @@ TARGET_BUILTIN(__nvvm_vote_uni_sync, "bUib", "", "ptx60") TARGET_BUILTIN(__nvvm_vote_ballot_sync, "UiUib", "", "ptx60") +// Match +TARGET_BUILTIN(__nvvm_match_any_sync_i32, "UiUiUi", "", "ptx60") +TARGET_BUILTIN(__nvvm_match_any_sync_i64, "WiUiWi", "", "ptx60") +// These return a pair {value, predicate}, which requires custom lowering. +TARGET_BUILTIN(__nvvm_match_all_sync_i32p, "UiUiUii*", "", "ptx60") +TARGET_BUILTIN(__nvvm_match_all_sync_i64p, "WiUiWii*", "", "ptx60") + // Membar BUILTIN(__nvvm_membar_cta, "v", "") Index: clang/lib/CodeGen/CGBuiltin.cpp =================================================================== --- clang/lib/CodeGen/CGBuiltin.cpp +++ clang/lib/CodeGen/CGBuiltin.cpp @@ -9589,6 +9589,21 @@ {Ptr->getType()->getPointerElementType(), Ptr->getType()}), {Ptr, EmitScalarExpr(E->getArg(1)), EmitScalarExpr(E->getArg(2))}); } + case NVPTX::BI__nvvm_match_all_sync_i32p: + case NVPTX::BI__nvvm_match_all_sync_i64p: { + Value *Mask = EmitScalarExpr(E->getArg(0)); + Value *Val = EmitScalarExpr(E->getArg(1)); + Address PredOutPtr = EmitPointerWithAlignment(E->getArg(2)); + Value *ResultPair = Builder.CreateCall( + CGM.getIntrinsic(BuiltinID == NVPTX::BI__nvvm_match_all_sync_i32p + ? Intrinsic::nvvm_match_all_sync_i32p + : Intrinsic::nvvm_match_all_sync_i64p), + {Mask, Val}); + Value *Pred = Builder.CreateZExt(Builder.CreateExtractValue(ResultPair, 1), + PredOutPtr.getElementType()); + Builder.CreateStore(Pred, PredOutPtr); + return Builder.CreateExtractValue(ResultPair, 0); + } default: return nullptr; } Index: clang/lib/Headers/__clang_cuda_intrinsics.h =================================================================== --- clang/lib/Headers/__clang_cuda_intrinsics.h +++ clang/lib/Headers/__clang_cuda_intrinsics.h @@ -92,8 +92,9 @@ #endif // !defined(__CUDA_ARCH__) || __CUDA_ARCH__ >= 300 +#if CUDA_VERSION >= 9000 +#if (!defined(__CUDA_ARCH__) || __CUDA_ARCH__ >= 300) // __shfl_sync_* variants available in CUDA-9 -#if CUDA_VERSION >= 9000 && (!defined(__CUDA_ARCH__) || __CUDA_ARCH__ >= 300) #pragma push_macro("__MAKE_SYNC_SHUFFLES") #define __MAKE_SYNC_SHUFFLES(__FnName, __IntIntrinsic, __FloatIntrinsic, \ __Mask) \ @@ -170,25 +171,50 @@ } inline __device__ int __all_sync(unsigned int mask, int pred) { - return __nvvm_vote_sync_all(mask, pred); + return __nvvm_vote_all_sync(mask, pred); } inline __device__ int __any_sync(unsigned int mask, int pred) { - return __nvvm_vote_sync_any(mask, pred); + return __nvvm_vote_any_sync(mask, pred); } inline __device__ int __uni_sync(unsigned int mask, int pred) { - return __nvvm_vote_sync_uni(mask, pred); + return __nvvm_vote_uni_sync(mask, pred); } inline __device__ unsigned int __ballot_sync(unsigned int mask, int pred) { - return __nvvm_vote_sync_ballot(mask, pred); + return __nvvm_vote_ballot_sync(mask, pred); } -inline __device__ activemask() { return __nvvm_vote.ballot(1); } +inline __device__ unsigned int activemask() { return __nvvm_vote_ballot(1); } -#endif // __CUDA_VERSION >= 9000 && (!defined(__CUDA_ARCH__) || - // __CUDA_ARCH__ >= 300) +#endif // !defined(__CUDA_ARCH__) || __CUDA_ARCH__ >= 300 + +// Define __match* builtins CUDA-9 headers expect to see. +#if !defined(__CUDA_ARCH__) || __CUDA_ARCH__ >= 700 +inline __device__ unsigned int __match32_any_sync(unsigned int mask, + unsigned int value) { + return __nvvm_match_any_sync_i32(mask, value); +} + +inline __device__ unsigned long long +__match64_any_sync(unsigned int mask, unsigned long long value) { + return __nvvm_match_any_sync_i64(mask, value); +} + +inline __device__ unsigned int +__match32_all_sync(unsigned int mask, unsigned int value, int *pred) { + return __nvvm_match_all_sync_i32p(mask, value, pred); +} + +inline __device__ unsigned long long +__match64_all_sync(unsigned int mask, unsigned long long value, int *pred) { + return __nvvm_match_all_sync_i64p(mask, value, pred); +} +#include "crt/sm_70_rt.hpp" + +#endif // !defined(__CUDA_ARCH__) || __CUDA_ARCH__ >= 700 +#endif // __CUDA_VERSION >= 9000 // sm_32 intrinsics: __ldg and __funnelshift_{l,lc,r,rc}. Index: clang/test/CodeGen/builtins-nvptx-ptx60.cu =================================================================== --- clang/test/CodeGen/builtins-nvptx-ptx60.cu +++ clang/test/CodeGen/builtins-nvptx-ptx60.cu @@ -10,6 +10,8 @@ #define __shared__ __attribute__((shared)) #define __constant__ __attribute__((constant)) +typedef unsigned long long uint64_t; + // We have to keep all builtins that depend on particular target feature in the // same function, because the codegen will stop after the very first function // that encounters an error, so -verify will not be able to find errors in @@ -17,7 +19,8 @@ // CHECK-LABEL: nvvm_sync __device__ void nvvm_sync(unsigned mask, int i, float f, int a, int b, - bool pred) { + bool pred, uint64_t i64) { + // CHECK: call void @llvm.nvvm.bar.warp.sync(i32 // expected-error@+1 {{'__nvvm_bar_warp_sync' needs target feature ptx60}} __nvvm_bar_warp_sync(mask); @@ -73,5 +76,22 @@ // expected-error@+1 {{'__nvvm_vote_ballot_sync' needs target feature ptx60}} __nvvm_vote_ballot_sync(mask, pred); + // + // MATCH.{ALL,ANY}.SYNC + // + + // CHECK: call i32 @llvm.nvvm.match.any.sync.i32(i32 + // expected-error@+1 {{'__nvvm_match_any_sync_i32' needs target feature ptx60}} + __nvvm_match_any_sync_i32(mask, i); + // CHECK: call i64 @llvm.nvvm.match.any.sync.i64(i32 + // expected-error@+1 {{'__nvvm_match_any_sync_i64' needs target feature ptx60}} + __nvvm_match_any_sync_i64(mask, i64); + // CHECK: call { i32, i1 } @llvm.nvvm.match.all.sync.i32p(i32 + // expected-error@+1 {{'__nvvm_match_all_sync_i32p' needs target feature ptx60}} + __nvvm_match_all_sync_i32p(mask, i, &i); + // CHECK: call { i64, i1 } @llvm.nvvm.match.all.sync.i64p(i32 + // expected-error@+1 {{'__nvvm_match_all_sync_i64p' needs target feature ptx60}} + __nvvm_match_all_sync_i64p(mask, i64, &i); + // CHECK: ret void } Index: llvm/include/llvm/IR/IntrinsicsNVVM.td =================================================================== --- llvm/include/llvm/IR/IntrinsicsNVVM.td +++ llvm/include/llvm/IR/IntrinsicsNVVM.td @@ -3842,4 +3842,31 @@ [IntrNoMem, IntrConvergent], "llvm.nvvm.vote.ballot.sync">, GCCBuiltin<"__nvvm_vote_ballot_sync">; +// +// MATCH.SYNC +// +// match.any.sync.b32 mask, value +def int_nvvm_match_any_sync_i32 : + Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], + [IntrNoMem, IntrConvergent], "llvm.nvvm.match.any.sync.i32">, + GCCBuiltin<"__nvvm_match_any_sync_i32">; +// match.any.sync.b64 mask, value +def int_nvvm_match_any_sync_i64 : + Intrinsic<[llvm_i64_ty], [llvm_i32_ty, llvm_i64_ty], + [IntrNoMem, IntrConvergent], "llvm.nvvm.match.any.sync.i64">, + GCCBuiltin<"__nvvm_match_any_sync_i64">; + +// match.all instruction have two variants -- one returns a single value, another +// returns a pair {value, predicate}. We currently only implement the latter as +// that's the variant exposed by CUDA API. + +// match.all.sync.b32p mask, value +def int_nvvm_match_all_sync_i32p : + Intrinsic<[llvm_i32_ty, llvm_i1_ty], [llvm_i32_ty, llvm_i32_ty], + [IntrNoMem, IntrConvergent], "llvm.nvvm.match.all.sync.i32p">; +// match.all.sync.b64p mask, value +def int_nvvm_match_all_sync_i64p : + Intrinsic<[llvm_i64_ty, llvm_i1_ty], [llvm_i32_ty, llvm_i64_ty], + [IntrNoMem, IntrConvergent], "llvm.nvvm.match.all.sync.i64p">; + } // let TargetPrefix = "nvvm" Index: llvm/lib/Target/NVPTX/NVPTXISelDAGToDAG.h =================================================================== --- llvm/lib/Target/NVPTX/NVPTXISelDAGToDAG.h +++ llvm/lib/Target/NVPTX/NVPTXISelDAGToDAG.h @@ -58,6 +58,7 @@ bool tryIntrinsicNoChain(SDNode *N); bool tryIntrinsicChain(SDNode *N); void SelectTexSurfHandle(SDNode *N); + void SelectMatchAll(SDNode *N); bool tryLoad(SDNode *N); bool tryLoadVector(SDNode *N); bool tryLDGLDU(SDNode *N); Index: llvm/lib/Target/NVPTX/NVPTXISelDAGToDAG.cpp =================================================================== --- llvm/lib/Target/NVPTX/NVPTXISelDAGToDAG.cpp +++ llvm/lib/Target/NVPTX/NVPTXISelDAGToDAG.cpp @@ -715,6 +715,10 @@ case Intrinsic::nvvm_texsurf_handle_internal: SelectTexSurfHandle(N); return true; + case Intrinsic::nvvm_match_all_sync_i32p: + case Intrinsic::nvvm_match_all_sync_i64p: + SelectMatchAll(N); + return true; } } @@ -726,6 +730,36 @@ MVT::i64, GlobalVal)); } +void NVPTXDAGToDAGISel::SelectMatchAll(SDNode *N) { + SDLoc DL(N); + enum { IS_I64 = 4, HAS_CONST_VALUE = 2, HAS_CONST_MASK = 1 }; + unsigned IID = cast(N->getOperand(0))->getZExtValue(); + unsigned OpcodeIndex = + (IID == Intrinsic::nvvm_match_all_sync_i64p) ? IS_I64 : 0; + SDValue MaskOp = N->getOperand(1); + SDValue ValueOp = N->getOperand(2); + if (ConstantSDNode *ValueConst = dyn_cast(ValueOp)) { + OpcodeIndex |= HAS_CONST_VALUE; + ValueOp = CurDAG->getTargetConstant(ValueConst->getZExtValue(), DL, + ValueConst->getValueType(0)); + } + if (ConstantSDNode *MaskConst = dyn_cast(MaskOp)) { + OpcodeIndex |= HAS_CONST_MASK; + MaskOp = CurDAG->getTargetConstant(MaskConst->getZExtValue(), DL, + MaskConst->getValueType(0)); + } + // Maps {IS_I64, HAS_CONST_VALUE, HAS_CONST_MASK} -> opcode + unsigned Opcodes[8] = { + NVPTX::MATCH_ALLP_SYNC_32rr, NVPTX::MATCH_ALLP_SYNC_32ri, + NVPTX::MATCH_ALLP_SYNC_32ir, NVPTX::MATCH_ALLP_SYNC_32ii, + NVPTX::MATCH_ALLP_SYNC_64rr, NVPTX::MATCH_ALLP_SYNC_64ri, + NVPTX::MATCH_ALLP_SYNC_64ir, NVPTX::MATCH_ALLP_SYNC_64ii}; + SDNode *NewNode = CurDAG->getMachineNode(Opcodes[OpcodeIndex], DL, + {ValueOp->getValueType(0), MVT::i1}, + {MaskOp, ValueOp}); + ReplaceNode(N, NewNode); +} + void NVPTXDAGToDAGISel::SelectAddrSpaceCast(SDNode *N) { SDValue Src = N->getOperand(0); AddrSpaceCastSDNode *CastN = cast(N); Index: llvm/lib/Target/NVPTX/NVPTXInstrInfo.td =================================================================== --- llvm/lib/Target/NVPTX/NVPTXInstrInfo.td +++ llvm/lib/Target/NVPTX/NVPTXInstrInfo.td @@ -158,6 +158,7 @@ def hasPTX60 : Predicate<"Subtarget->getPTXVersion() >= 60">; def hasSM30 : Predicate<"Subtarget->getSmVersion() >= 30">; +def hasSM70 : Predicate<"Subtarget->getSmVersion() >= 70">; def useFP16Math: Predicate<"Subtarget->allowFP16Math()">; Index: llvm/lib/Target/NVPTX/NVPTXIntrinsics.td =================================================================== --- llvm/lib/Target/NVPTX/NVPTXIntrinsics.td +++ llvm/lib/Target/NVPTX/NVPTXIntrinsics.td @@ -247,6 +247,63 @@ defm VOTE_SYNC_UNI : VOTE_SYNC; defm VOTE_SYNC_BALLOT : VOTE_SYNC; +multiclass MATCH_ANY_SYNC { + def ii : NVPTXInst<(outs regclass:$dest), (ins i32imm:$mask, ImmOp:$value), + "match.any.sync." # ptxtype # " \t$dest, $value, $mask;", + [(set regclass:$dest, (IntOp imm:$mask, imm:$value))]>, + Requires<[hasPTX60, hasSM70]>; + def ir : NVPTXInst<(outs regclass:$dest), (ins Int32Regs:$mask, ImmOp:$value), + "match.any.sync." # ptxtype # " \t$dest, $value, $mask;", + [(set regclass:$dest, (IntOp Int32Regs:$mask, imm:$value))]>, + Requires<[hasPTX60, hasSM70]>; + def ri : NVPTXInst<(outs regclass:$dest), (ins i32imm:$mask, regclass:$value), + "match.any.sync." # ptxtype # " \t$dest, $value, $mask;", + [(set regclass:$dest, (IntOp imm:$mask, regclass:$value))]>, + Requires<[hasPTX60, hasSM70]>; + def rr : NVPTXInst<(outs regclass:$dest), (ins Int32Regs:$mask, regclass:$value), + "match.any.sync." # ptxtype # " \t$dest, $value, $mask;", + [(set regclass:$dest, (IntOp Int32Regs:$mask, regclass:$value))]>, + Requires<[hasPTX60, hasSM70]>; +} + +defm MATCH_ANY_SYNC_32 : MATCH_ANY_SYNC; +defm MATCH_ANY_SYNC_64 : MATCH_ANY_SYNC; + +multiclass MATCH_ALLP_SYNC { + def ii : NVPTXInst<(outs regclass:$dest, Int1Regs:$pred), + (ins i32imm:$mask, ImmOp:$value), + "match.all.sync." # ptxtype # " \t$dest|$pred, $value, $mask;", + // If would be nice if tablegen could match multiple return values, + // but it does not seem to be the case. Thus we have an empty pattern and + // lower intrinsic to instruction manually. + // [(set regclass:$dest, Int1Regs:$pred, (IntOp imm:$value, imm:$mask))]>, + []>, + Requires<[hasPTX60, hasSM70]>; + def ir : NVPTXInst<(outs regclass:$dest, Int1Regs:$pred), + (ins Int32Regs:$mask, ImmOp:$value), + "match.all.sync." # ptxtype # " \t$dest|$pred, $value, $mask;", + []>, + Requires<[hasPTX60, hasSM70]>; + def ri : NVPTXInst<(outs regclass:$dest, Int1Regs:$pred), + (ins i32imm:$mask, regclass:$value), + "match.all.sync." # ptxtype # " \t$dest|$pred, $value, $mask;", + []>, + Requires<[hasPTX60, hasSM70]>; + def rr : NVPTXInst<(outs regclass:$dest, Int1Regs:$pred), + (ins Int32Regs:$mask, regclass:$value), + "match.all.sync." # ptxtype # " \t$dest|$pred, $value, $mask;", + []>, + Requires<[hasPTX60, hasSM70]>; +} +defm MATCH_ALLP_SYNC_32 : MATCH_ALLP_SYNC; +defm MATCH_ALLP_SYNC_64 : MATCH_ALLP_SYNC; + } // isConvergent = 1 //----------------------------------- Index: llvm/test/CodeGen/NVPTX/match.ll =================================================================== --- /dev/null +++ llvm/test/CodeGen/NVPTX/match.ll @@ -0,0 +1,117 @@ +; RUN: llc < %s -march=nvptx64 -mcpu=sm_70 -mattr=+ptx60 | FileCheck %s + +declare i32 @llvm.nvvm.match.any.sync.i32(i32, i32) +declare i64 @llvm.nvvm.match.any.sync.i64(i32, i64) + +; CHECK-LABEL: .func{{.*}}match.any.sync.i32 +define i32 @match.any.sync.i32(i32 %mask, i32 %value) { + ; CHECK: ld.param.u32 [[MASK:%r[0-9]+]], [match.any.sync.i32_param_0]; + ; CHECK: ld.param.u32 [[VALUE:%r[0-9]+]], [match.any.sync.i32_param_1]; + + ; CHECK: match.any.sync.b32 [[V0:%r[0-9]+]], [[VALUE]], [[MASK]]; + %v0 = call i32 @llvm.nvvm.match.any.sync.i32(i32 %mask, i32 %value) + ; CHECK: match.any.sync.b32 [[V1:%r[0-9]+]], [[VALUE]], 1; + %v1 = call i32 @llvm.nvvm.match.any.sync.i32(i32 1, i32 %value) + ; CHECK: match.any.sync.b32 [[V2:%r[0-9]+]], 2, [[MASK]]; + %v2 = call i32 @llvm.nvvm.match.any.sync.i32(i32 %mask, i32 2) + ; CHECK: match.any.sync.b32 [[V3:%r[0-9]+]], 4, 3; + %v3 = call i32 @llvm.nvvm.match.any.sync.i32(i32 3, i32 4) + %sum1 = add i32 %v0, %v1 + %sum2 = add i32 %v2, %v3 + %sum3 = add i32 %sum1, %sum2 + ret i32 %sum3; +} + +; CHECK-LABEL: .func{{.*}}match.any.sync.i64 +define i64 @match.any.sync.i64(i32 %mask, i64 %value) { + ; CHECK: ld.param.u32 [[MASK:%r[0-9]+]], [match.any.sync.i64_param_0]; + ; CHECK: ld.param.u64 [[VALUE:%rd[0-9]+]], [match.any.sync.i64_param_1]; + + ; CHECK: match.any.sync.b64 [[V0:%rd[0-9]+]], [[VALUE]], [[MASK]]; + %v0 = call i64 @llvm.nvvm.match.any.sync.i64(i32 %mask, i64 %value) + ; CHECK: match.any.sync.b64 [[V1:%rd[0-9]+]], [[VALUE]], 1; + %v1 = call i64 @llvm.nvvm.match.any.sync.i64(i32 1, i64 %value) + ; CHECK: match.any.sync.b64 [[V2:%rd[0-9]+]], 2, [[MASK]]; + %v2 = call i64 @llvm.nvvm.match.any.sync.i64(i32 %mask, i64 2) + ; CHECK: match.any.sync.b64 [[V3:%rd[0-9]+]], 4, 3; + %v3 = call i64 @llvm.nvvm.match.any.sync.i64(i32 3, i64 4) + %sum1 = add i64 %v0, %v1 + %sum2 = add i64 %v2, %v3 + %sum3 = add i64 %sum1, %sum2 + ret i64 %sum3; +} + +declare {i32, i1} @llvm.nvvm.match.all.sync.i32p(i32, i32) +declare {i64, i1} @llvm.nvvm.match.all.sync.i64p(i32, i64) + +; CHECK-LABEL: .func{{.*}}match.all.sync.i32p( +define {i32,i1} @match.all.sync.i32p(i32 %mask, i32 %value) { + ; CHECK: ld.param.u32 [[MASK:%r[0-9]+]], [match.all.sync.i32p_param_0]; + ; CHECK: ld.param.u32 [[VALUE:%r[0-9]+]], [match.all.sync.i32p_param_1]; + + ; CHECK: match.all.sync.b32 {{%r[0-9]+\|%p[0-9]+}}, [[VALUE]], [[MASK]]; + %r1 = call {i32, i1} @llvm.nvvm.match.all.sync.i32p(i32 %mask, i32 %value) + %v1 = extractvalue {i32, i1} %r1, 0 + %p1 = extractvalue {i32, i1} %r1, 1 + + ; CHECK: match.all.sync.b32 {{%r[0-9]+\|%p[0-9]+}}, 1, [[MASK]]; + %r2 = call {i32, i1} @llvm.nvvm.match.all.sync.i32p(i32 %mask, i32 1) + %v2 = extractvalue {i32, i1} %r2, 0 + %p2 = extractvalue {i32, i1} %r2, 1 + + ; CHECK: match.all.sync.b32 {{%r[0-9]+\|%p[0-9]+}}, [[VALUE]], 2; + %r3 = call {i32, i1} @llvm.nvvm.match.all.sync.i32p(i32 2, i32 %value) + %v3 = extractvalue {i32, i1} %r3, 0 + %p3 = extractvalue {i32, i1} %r3, 1 + + ; CHECK: match.all.sync.b32 {{%r[0-9]+\|%p[0-9]+}}, 4, 3; + %r4 = call {i32, i1} @llvm.nvvm.match.all.sync.i32p(i32 3, i32 4) + %v4 = extractvalue {i32, i1} %r4, 0 + %p4 = extractvalue {i32, i1} %r4, 1 + + %vsum1 = add i32 %v1, %v2 + %vsum2 = add i32 %v3, %v4 + %vsum3 = add i32 %vsum1, %vsum2 + %psum1 = add i1 %p1, %p2 + %psum2 = add i1 %p3, %p4 + %psum3 = add i1 %psum1, %psum2 + %ret0 = insertvalue {i32, i1} undef, i32 %vsum3, 0 + %ret1 = insertvalue {i32, i1} %ret0, i1 %psum3, 1 + ret {i32, i1} %ret1; +} + +; CHECK-LABEL: .func{{.*}}match.all.sync.i64p( +define {i64,i1} @match.all.sync.i64p(i32 %mask, i64 %value) { + ; CHECK: ld.param.u32 [[MASK:%r[0-9]+]], [match.all.sync.i64p_param_0]; + ; CHECK: ld.param.u64 [[VALUE:%rd[0-9]+]], [match.all.sync.i64p_param_1]; + + ; CHECK: match.all.sync.b64 {{%rd[0-9]+\|%p[0-9]+}}, [[VALUE]], [[MASK]]; + %r1 = call {i64, i1} @llvm.nvvm.match.all.sync.i64p(i32 %mask, i64 %value) + %v1 = extractvalue {i64, i1} %r1, 0 + %p1 = extractvalue {i64, i1} %r1, 1 + + ; CHECK: match.all.sync.b64 {{%rd[0-9]+\|%p[0-9]+}}, 1, [[MASK]]; + %r2 = call {i64, i1} @llvm.nvvm.match.all.sync.i64p(i32 %mask, i64 1) + %v2 = extractvalue {i64, i1} %r2, 0 + %p2 = extractvalue {i64, i1} %r2, 1 + + ; CHECK: match.all.sync.b64 {{%rd[0-9]+\|%p[0-9]+}}, [[VALUE]], 2; + %r3 = call {i64, i1} @llvm.nvvm.match.all.sync.i64p(i32 2, i64 %value) + %v3 = extractvalue {i64, i1} %r3, 0 + %p3 = extractvalue {i64, i1} %r3, 1 + + ; CHECK: match.all.sync.b64 {{%rd[0-9]+\|%p[0-9]+}}, 4, 3; + %r4 = call {i64, i1} @llvm.nvvm.match.all.sync.i64p(i32 3, i64 4) + %v4 = extractvalue {i64, i1} %r4, 0 + %p4 = extractvalue {i64, i1} %r4, 1 + + %vsum1 = add i64 %v1, %v2 + %vsum2 = add i64 %v3, %v4 + %vsum3 = add i64 %vsum1, %vsum2 + %psum1 = add i1 %p1, %p2 + %psum2 = add i1 %p3, %p4 + %psum3 = add i1 %psum1, %psum2 + %ret0 = insertvalue {i64, i1} undef, i64 %vsum3, 0 + %ret1 = insertvalue {i64, i1} %ret0, i1 %psum3, 1 + ret {i64, i1} %ret1; +}