Index: llvm/lib/CodeGen/SelectionDAG/LegalizeTypes.h =================================================================== --- llvm/lib/CodeGen/SelectionDAG/LegalizeTypes.h +++ llvm/lib/CodeGen/SelectionDAG/LegalizeTypes.h @@ -762,6 +762,7 @@ void SplitVecRes_SETCC(SDNode *N, SDValue &Lo, SDValue &Hi); void SplitVecRes_VECTOR_SHUFFLE(ShuffleVectorSDNode *N, SDValue &Lo, SDValue &Hi); + void SplitVecRes_VAARG(SDNode *N, SDValue &Lo, SDValue &Hi); // Vector Operand Splitting: <128 x ty> -> 2 x <64 x ty>. bool SplitVectorOperand(SDNode *N, unsigned OpNo); Index: llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp =================================================================== --- llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp +++ llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp @@ -849,6 +849,9 @@ case ISD::VECTOR_SHUFFLE: SplitVecRes_VECTOR_SHUFFLE(cast(N), Lo, Hi); break; + case ISD::VAARG: + SplitVecRes_VAARG(N, Lo, Hi); + break; case ISD::ANY_EXTEND_VECTOR_INREG: case ISD::SIGN_EXTEND_VECTOR_INREG: @@ -1842,6 +1845,24 @@ } } +void DAGTypeLegalizer::SplitVecRes_VAARG(SDNode *N, SDValue &Lo, SDValue &Hi) { + EVT OVT = N->getValueType(0); + EVT NVT = OVT.getHalfNumVectorElementsVT(*DAG.getContext()); + SDValue Chain = N->getOperand(0); + SDValue Ptr = N->getOperand(1); + SDValue SV = N->getOperand(2); + SDLoc dl(N); + const unsigned Align = N->getConstantOperandVal(3); + + Lo = DAG.getVAArg(NVT, dl, Chain, Ptr, SV, Align); + Hi = DAG.getVAArg(NVT, dl, Lo.getValue(1), Ptr, SV, 0); + Chain = Hi.getValue(1); + + // Modified the chain - switch anything that used the old chain to use + // the new one. + ReplaceValueWith(SDValue(N, 1), Chain); +} + //===----------------------------------------------------------------------===// // Operand Vector Splitting Index: llvm/test/CodeGen/PowerPC/legalize-vaarg.ll =================================================================== --- /dev/null +++ llvm/test/CodeGen/PowerPC/legalize-vaarg.ll @@ -0,0 +1,47 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +;RUN: llc < %s --mtriple=powerpc64- -mattr=+altivec | FileCheck %s -check-prefix=BE +;RUN: llc < %s --mtriple=powerpc64le -mattr=+altivec | FileCheck %s -check-prefix=LE +define <8 x i32> @test_large_vec_vaarg(i32 %n, ...) { +; BE-LABEL: test_large_vec_vaarg: +; BE: # %bb.0: +; BE-NEXT: std 4, 56(1) +; BE-NEXT: std 5, 64(1) +; BE-NEXT: std 6, 72(1) +; BE-NEXT: std 7, 80(1) +; BE-NEXT: std 8, 88(1) +; BE-NEXT: std 9, 96(1) +; BE-NEXT: std 10, 104(1) +; BE-NEXT: ld 3, -8(1) +; BE-NEXT: addi 3, 3, 31 +; BE-NEXT: rldicr 3, 3, 0, 58 +; BE-NEXT: ori 4, 3, 16 +; BE-NEXT: std 4, -8(1) +; BE-NEXT: addi 5, 4, 16 +; BE-NEXT: lvx 2, 0, 3 +; BE-NEXT: std 5, -8(1) +; BE-NEXT: lvx 3, 0, 4 +; BE-NEXT: blr +; +; LE-LABEL: test_large_vec_vaarg: +; LE: # %bb.0: +; LE-NEXT: std 4, 40(1) +; LE-NEXT: std 5, 48(1) +; LE-NEXT: std 6, 56(1) +; LE-NEXT: std 7, 64(1) +; LE-NEXT: std 8, 72(1) +; LE-NEXT: std 9, 80(1) +; LE-NEXT: std 10, 88(1) +; LE-NEXT: ld 3, -8(1) +; LE-NEXT: addi 3, 3, 31 +; LE-NEXT: rldicr 3, 3, 0, 58 +; LE-NEXT: ori 4, 3, 16 +; LE-NEXT: std 4, -8(1) +; LE-NEXT: addi 5, 4, 16 +; LE-NEXT: lvx 2, 0, 3 +; LE-NEXT: std 5, -8(1) +; LE-NEXT: lvx 3, 0, 4 +; LE-NEXT: blr + %args = alloca i8*, align 4 + %x = va_arg i8** %args, <8 x i32> + ret <8 x i32> %x +} Index: llvm/test/CodeGen/X86/legalize-vaarg.ll =================================================================== --- /dev/null +++ llvm/test/CodeGen/X86/legalize-vaarg.ll @@ -0,0 +1,76 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +;RUN: llc < %s -mtriple=x86_64-- -mattr=avx | FileCheck %s + +define <32 x i32> @test_large_vec_vaarg(i32 %n, ...) { +; CHECK-LABEL: test_large_vec_vaarg: +; CHECK: # %bb.0: +; CHECK-NEXT: movl -{{[0-9]+}}(%rsp), %ecx +; CHECK-NEXT: cmpl $24, %ecx +; CHECK-NEXT: jae .LBB0_2 +; CHECK-NEXT: # %bb.1: +; CHECK-NEXT: movq {{[0-9]+}}(%rsp), %rax +; CHECK-NEXT: addq %rcx, %rax +; CHECK-NEXT: addl $8, %ecx +; CHECK-NEXT: movl %ecx, -{{[0-9]+}}(%rsp) +; CHECK-NEXT: jmp .LBB0_3 +; CHECK-NEXT: .LBB0_2: +; CHECK-NEXT: movq (%rsp), %rax +; CHECK-NEXT: addq $127, %rax +; CHECK-NEXT: andq $-128, %rax +; CHECK-NEXT: leaq 32(%rax), %rcx +; CHECK-NEXT: movq %rcx, (%rsp) +; CHECK-NEXT: .LBB0_3: +; CHECK-NEXT: vmovaps (%rax), %ymm0 +; CHECK-NEXT: movl -{{[0-9]+}}(%rsp), %ecx +; CHECK-NEXT: cmpl $24, %ecx +; CHECK-NEXT: jae .LBB0_5 +; CHECK-NEXT: # %bb.4: +; CHECK-NEXT: movq {{[0-9]+}}(%rsp), %rax +; CHECK-NEXT: addq %rcx, %rax +; CHECK-NEXT: addl $8, %ecx +; CHECK-NEXT: movl %ecx, -{{[0-9]+}}(%rsp) +; CHECK-NEXT: jmp .LBB0_6 +; CHECK-NEXT: .LBB0_5: +; CHECK-NEXT: movq (%rsp), %rax +; CHECK-NEXT: movq %rax, %rcx +; CHECK-NEXT: addq $32, %rcx +; CHECK-NEXT: movq %rcx, (%rsp) +; CHECK-NEXT: .LBB0_6: +; CHECK-NEXT: vmovaps (%rax), %ymm1 +; CHECK-NEXT: movl -{{[0-9]+}}(%rsp), %ecx +; CHECK-NEXT: cmpl $24, %ecx +; CHECK-NEXT: jae .LBB0_8 +; CHECK-NEXT: # %bb.7: +; CHECK-NEXT: movq {{[0-9]+}}(%rsp), %rax +; CHECK-NEXT: addq %rcx, %rax +; CHECK-NEXT: addl $8, %ecx +; CHECK-NEXT: movl %ecx, -{{[0-9]+}}(%rsp) +; CHECK-NEXT: jmp .LBB0_9 +; CHECK-NEXT: .LBB0_8: +; CHECK-NEXT: movq (%rsp), %rax +; CHECK-NEXT: movq %rax, %rcx +; CHECK-NEXT: addq $32, %rcx +; CHECK-NEXT: movq %rcx, (%rsp) +; CHECK-NEXT: .LBB0_9: +; CHECK-NEXT: vmovaps (%rax), %ymm2 +; CHECK-NEXT: movl -{{[0-9]+}}(%rsp), %ecx +; CHECK-NEXT: cmpl $24, %ecx +; CHECK-NEXT: jae .LBB0_11 +; CHECK-NEXT: # %bb.10: +; CHECK-NEXT: movq {{[0-9]+}}(%rsp), %rax +; CHECK-NEXT: addq %rcx, %rax +; CHECK-NEXT: addl $8, %ecx +; CHECK-NEXT: movl %ecx, -{{[0-9]+}}(%rsp) +; CHECK-NEXT: vmovaps (%rax), %ymm3 +; CHECK-NEXT: retq +; CHECK-NEXT: .LBB0_11: +; CHECK-NEXT: movq (%rsp), %rax +; CHECK-NEXT: movq %rax, %rcx +; CHECK-NEXT: addq $32, %rcx +; CHECK-NEXT: movq %rcx, (%rsp) +; CHECK-NEXT: vmovaps (%rax), %ymm3 +; CHECK-NEXT: retq + %args = alloca i8*, align 4 + %x = va_arg i8** %args, <32 x i32> + ret <32 x i32> %x +}