diff --git a/llvm/lib/Target/PowerPC/PPCISelLowering.cpp b/llvm/lib/Target/PowerPC/PPCISelLowering.cpp --- a/llvm/lib/Target/PowerPC/PPCISelLowering.cpp +++ b/llvm/lib/Target/PowerPC/PPCISelLowering.cpp @@ -1079,8 +1079,7 @@ setOperationAction(ISD::FCOPYSIGN, MVT::v4f32, Legal); setOperationAction(ISD::FCOPYSIGN, MVT::v2f64, Legal); - if (Subtarget.hasDirectMove()) - setOperationAction(ISD::BUILD_VECTOR, MVT::v2i64, Custom); + setOperationAction(ISD::BUILD_VECTOR, MVT::v2i64, Custom); setOperationAction(ISD::BUILD_VECTOR, MVT::v2f64, Custom); // Handle constrained floating-point operations of vector. diff --git a/llvm/test/CodeGen/PowerPC/aix-vec-arg-spills-mir.ll b/llvm/test/CodeGen/PowerPC/aix-vec-arg-spills-mir.ll --- a/llvm/test/CodeGen/PowerPC/aix-vec-arg-spills-mir.ll +++ b/llvm/test/CodeGen/PowerPC/aix-vec-arg-spills-mir.ll @@ -12,127 +12,126 @@ @__const.caller.t = private unnamed_addr constant %struct.Test { double 0.000000e+00, double 1.000000e+00, double 2.000000e+00, double 3.000000e+00 }, align 8 define double @caller() { -; MIR32-LABEL: name: caller -; MIR32: bb.0.entry: -; MIR32: renamable $r3 = LWZtoc @__const.caller.t, $r2 :: (load (s32) from got) -; MIR32: renamable $r4 = LI 31 -; MIR32: renamable $v2 = LVX renamable $r3, killed renamable $r4 -; MIR32: renamable $r4 = LI 16 -; MIR32: renamable $v3 = LVX renamable $r3, killed renamable $r4 -; MIR32: renamable $v4 = LVSL $zero, renamable $r3 -; MIR32: renamable $v2 = VPERM renamable $v3, killed renamable $v2, renamable $v4 -; MIR32: renamable $r4 = LI 172 -; MIR32: STXVW4X killed renamable $v2, $r1, killed renamable $r4 :: (store (s128) into unknown-address + 16, align 4) -; MIR32: renamable $v2 = LVX $zero, killed renamable $r3 -; MIR32: renamable $v2 = VPERM killed renamable $v2, killed renamable $v3, killed renamable $v4 -; MIR32: renamable $r3 = LI 156 -; MIR32: STXVW4X killed renamable $v2, $r1, killed renamable $r3 :: (store (s128), align 4) -; MIR32: ADJCALLSTACKDOWN 188, 0, implicit-def dead $r1, implicit $r1 -; MIR32: renamable $vsl0 = XXLXORz -; MIR32: $f1 = XXLXORdpz -; MIR32: $f2 = XXLXORdpz -; MIR32: $v2 = XXLXORz -; MIR32: $v3 = XXLXORz -; MIR32: $v4 = XXLXORz -; MIR32: $v5 = XXLXORz -; MIR32: $v6 = XXLXORz -; MIR32: $v7 = XXLXORz -; MIR32: $v8 = XXLXORz -; MIR32: $v9 = XXLXORz -; MIR32: $v10 = XXLXORz -; MIR32: $v11 = XXLXORz -; MIR32: $v12 = XXLXORz -; MIR32: $v13 = XXLXORz -; MIR32: $f3 = XXLXORdpz -; MIR32: $f4 = XXLXORdpz -; MIR32: $f5 = XXLXORdpz -; MIR32: $f6 = XXLXORdpz -; MIR32: $f7 = XXLXORdpz -; MIR32: renamable $r3 = LI 136 -; MIR32: $f8 = XXLXORdpz -; MIR32: renamable $r4 = LI 120 -; MIR32: renamable $r5 = LWZtoc %const.0, $r2 :: (load (s32) from got) -; MIR32: STXVW4X renamable $vsl0, $r1, killed renamable $r3 :: (store (s128), align 8) -; MIR32: $f9 = XXLXORdpz -; MIR32: renamable $r3 = LI 104 -; MIR32: STXVW4X renamable $vsl0, $r1, killed renamable $r4 :: (store (s128), align 8) -; MIR32: $f10 = XXLXORdpz -; MIR32: STXVW4X renamable $vsl0, $r1, killed renamable $r3 :: (store (s128), align 8) -; MIR32: renamable $r3 = LI 88 -; MIR32: $f11 = XXLXORdpz -; MIR32: STXVW4X renamable $vsl0, $r1, killed renamable $r3 :: (store (s128), align 8) -; MIR32: renamable $r3 = LI 72 -; MIR32: renamable $v0 = LXVD2X $zero, killed renamable $r5 :: (load (s128) from constant-pool) -; MIR32: $f12 = XXLXORdpz -; MIR32: STXVW4X killed renamable $vsl0, $r1, killed renamable $r3 :: (store (s128), align 8) -; MIR32: $f13 = XXLXORdpz -; MIR32: renamable $r5 = LI 48 -; MIR32: renamable $r6 = LI 512 -; MIR32: $r3 = LI 128 -; MIR32: $r4 = LI 256 -; MIR32: STXVD2X killed renamable $v0, $r1, killed renamable $r5 :: (store (s128)) -; MIR32: STW killed renamable $r6, 152, $r1 :: (store (s32)) -; MIR32: BL_NOP , csr_aix32_altivec, implicit-def dead $lr, implicit $rm, implicit $r3, implicit $r4, implicit $f1, implicit $f2, implicit $v2, implicit $v3, implicit $v4, implicit $v5, implicit killed $v6, implicit killed $v7, implicit killed $v8, implicit killed $v9, implicit killed $v10, implicit killed $v11, implicit killed $v12, implicit killed $v13, implicit $f3, implicit $f4, implicit $f5, implicit $f6, implicit $f7, implicit $f8, implicit $f9, implicit $f10, implicit $f11, implicit $f12, implicit $f13, implicit $r2, implicit-def $r1, implicit-def $f1 -; MIR32: ADJCALLSTACKUP 188, 0, implicit-def dead $r1, implicit $r1 -; MIR32: BLR implicit $lr, implicit $rm, implicit $f1 -; MIR64-LABEL: name: caller -; MIR64: bb.0.entry: -; MIR64: renamable $x3 = LDtoc @__const.caller.t, $x2 :: (load (s64) from got) -; MIR64: renamable $x4 = LI8 16 -; MIR64: renamable $vsl0 = LXVD2X renamable $x3, killed renamable $x4 :: (load (s128) from unknown-address + 16, align 8) -; MIR64: renamable $x4 = LI8 208 -; MIR64: STXVD2X killed renamable $vsl0, $x1, killed renamable $x4 :: (store (s128) into unknown-address + 16, align 4) -; MIR64: renamable $vsl0 = LXVD2X $zero8, killed renamable $x3 :: (load (s128), align 8) -; MIR64: renamable $x3 = LI8 192 -; MIR64: STXVD2X killed renamable $vsl0, $x1, killed renamable $x3 :: (store (s128), align 4) -; MIR64: ADJCALLSTACKDOWN 224, 0, implicit-def dead $r1, implicit $r1 -; MIR64: $f1 = XXLXORdpz -; MIR64: $f2 = XXLXORdpz -; MIR64: $v2 = XXLXORz -; MIR64: $v3 = XXLXORz -; MIR64: $v4 = XXLXORz -; MIR64: $v5 = XXLXORz -; MIR64: $v6 = XXLXORz -; MIR64: $v7 = XXLXORz -; MIR64: $v8 = XXLXORz -; MIR64: $v9 = XXLXORz -; MIR64: $v10 = XXLXORz -; MIR64: $v11 = XXLXORz -; MIR64: $v12 = XXLXORz -; MIR64: $v13 = XXLXORz -; MIR64: $f3 = XXLXORdpz -; MIR64: renamable $x3 = LDtocCPT %const.0, $x2 :: (load (s64) from got) -; MIR64: $f4 = XXLXORdpz -; MIR64: $f5 = XXLXORdpz -; MIR64: $f6 = XXLXORdpz -; MIR64: renamable $x4 = LDtocCPT %const.1, $x2 :: (load (s64) from got) -; MIR64: renamable $vsl0 = LXVD2X $zero8, killed renamable $x3 :: (load (s128) from constant-pool) -; MIR64: $f7 = XXLXORdpz -; MIR64: $f8 = XXLXORdpz -; MIR64: renamable $x3 = LI8 160 -; MIR64: $f9 = XXLXORdpz -; MIR64: renamable $x5 = LI8 144 -; MIR64: renamable $vsl13 = LXVD2X $zero8, killed renamable $x4 :: (load (s128) from constant-pool) -; MIR64: STXVD2X renamable $vsl0, $x1, killed renamable $x3 :: (store (s128), align 8) -; MIR64: $f10 = XXLXORdpz -; MIR64: renamable $x3 = LI8 128 -; MIR64: STXVD2X renamable $vsl0, $x1, killed renamable $x5 :: (store (s128), align 8) -; MIR64: $f11 = XXLXORdpz -; MIR64: renamable $x4 = LI8 80 -; MIR64: STXVD2X killed renamable $vsl0, $x1, killed renamable $x3 :: (store (s128), align 8) -; MIR64: $f12 = XXLXORdpz -; MIR64: STXVD2X killed renamable $vsl13, $x1, killed renamable $x4 :: (store (s128)) -; MIR64: $f13 = XXLXORdpz -; MIR64: renamable $x5 = LI8 512 -; MIR64: renamable $x6 = LI8 0 -; MIR64: $x3 = LI8 128 -; MIR64: $x4 = LI8 256 -; MIR64: STD killed renamable $x5, 184, $x1 :: (store (s64)) -; MIR64: STD killed renamable $x6, 176, $x1 :: (store (s64)) -; MIR64: BL8_NOP , csr_ppc64_altivec, implicit-def dead $lr8, implicit $rm, implicit $x3, implicit $x4, implicit $f1, implicit $f2, implicit killed $v2, implicit killed $v3, implicit killed $v4, implicit killed $v5, implicit killed $v6, implicit killed $v7, implicit killed $v8, implicit killed $v9, implicit killed $v10, implicit killed $v11, implicit killed $v12, implicit killed $v13, implicit $f3, implicit $f4, implicit $f5, implicit $f6, implicit $f7, implicit $f8, implicit $f9, implicit $f10, implicit $f11, implicit $f12, implicit $f13, implicit $x2, implicit-def $r1, implicit-def $f1 -; MIR64: ADJCALLSTACKUP 224, 0, implicit-def dead $r1, implicit $r1 -; MIR64: BLR8 implicit $lr8, implicit $rm, implicit $f1 + ; MIR32-LABEL: name: caller + ; MIR32: bb.0.entry: + ; MIR32: renamable $r3 = LWZtoc @__const.caller.t, $r2 :: (load (s32) from got) + ; MIR32: renamable $r4 = LI 31 + ; MIR32: renamable $v2 = LVX renamable $r3, killed renamable $r4 + ; MIR32: renamable $r4 = LI 16 + ; MIR32: renamable $v3 = LVX renamable $r3, killed renamable $r4 + ; MIR32: renamable $v4 = LVSL $zero, renamable $r3 + ; MIR32: renamable $v2 = VPERM renamable $v3, killed renamable $v2, renamable $v4 + ; MIR32: renamable $r4 = LI 172 + ; MIR32: STXVW4X killed renamable $v2, $r1, killed renamable $r4 :: (store (s128) into unknown-address + 16, align 4) + ; MIR32: renamable $v2 = LVX $zero, killed renamable $r3 + ; MIR32: renamable $v2 = VPERM killed renamable $v2, killed renamable $v3, killed renamable $v4 + ; MIR32: renamable $r3 = LI 156 + ; MIR32: STXVW4X killed renamable $v2, $r1, killed renamable $r3 :: (store (s128), align 4) + ; MIR32: ADJCALLSTACKDOWN 188, 0, implicit-def dead $r1, implicit $r1 + ; MIR32: renamable $vsl0 = XXLXORz + ; MIR32: $f1 = XXLXORdpz + ; MIR32: $f2 = XXLXORdpz + ; MIR32: $v2 = XXLXORz + ; MIR32: $v3 = XXLXORz + ; MIR32: $v4 = XXLXORz + ; MIR32: $v5 = XXLXORz + ; MIR32: $v6 = XXLXORz + ; MIR32: $v7 = XXLXORz + ; MIR32: $v8 = XXLXORz + ; MIR32: $v9 = XXLXORz + ; MIR32: $v10 = XXLXORz + ; MIR32: $v11 = XXLXORz + ; MIR32: $v12 = XXLXORz + ; MIR32: $v13 = XXLXORz + ; MIR32: $f3 = XXLXORdpz + ; MIR32: $f4 = XXLXORdpz + ; MIR32: $f5 = XXLXORdpz + ; MIR32: $f6 = XXLXORdpz + ; MIR32: $f7 = XXLXORdpz + ; MIR32: renamable $r3 = LI 136 + ; MIR32: $f8 = XXLXORdpz + ; MIR32: renamable $r4 = LI 120 + ; MIR32: renamable $r5 = LWZtoc %const.0, $r2 :: (load (s32) from got) + ; MIR32: STXVW4X renamable $vsl0, $r1, killed renamable $r3 :: (store (s128), align 8) + ; MIR32: $f9 = XXLXORdpz + ; MIR32: renamable $r3 = LI 104 + ; MIR32: STXVW4X renamable $vsl0, $r1, killed renamable $r4 :: (store (s128), align 8) + ; MIR32: $f10 = XXLXORdpz + ; MIR32: STXVW4X renamable $vsl0, $r1, killed renamable $r3 :: (store (s128), align 8) + ; MIR32: renamable $r3 = LI 88 + ; MIR32: $f11 = XXLXORdpz + ; MIR32: STXVW4X renamable $vsl0, $r1, killed renamable $r3 :: (store (s128), align 8) + ; MIR32: renamable $r3 = LI 72 + ; MIR32: renamable $v0 = LXVD2X $zero, killed renamable $r5 :: (load (s128) from constant-pool) + ; MIR32: $f12 = XXLXORdpz + ; MIR32: STXVW4X killed renamable $vsl0, $r1, killed renamable $r3 :: (store (s128), align 8) + ; MIR32: $f13 = XXLXORdpz + ; MIR32: renamable $r5 = LI 48 + ; MIR32: renamable $r6 = LI 512 + ; MIR32: $r3 = LI 128 + ; MIR32: $r4 = LI 256 + ; MIR32: STXVD2X killed renamable $v0, $r1, killed renamable $r5 :: (store (s128)) + ; MIR32: STW killed renamable $r6, 152, $r1 :: (store (s32)) + ; MIR32: BL_NOP , csr_aix32_altivec, implicit-def dead $lr, implicit $rm, implicit $r3, implicit $r4, implicit $f1, implicit $f2, implicit $v2, implicit $v3, implicit $v4, implicit $v5, implicit killed $v6, implicit killed $v7, implicit killed $v8, implicit killed $v9, implicit killed $v10, implicit killed $v11, implicit killed $v12, implicit killed $v13, implicit $f3, implicit $f4, implicit $f5, implicit $f6, implicit $f7, implicit $f8, implicit $f9, implicit $f10, implicit $f11, implicit $f12, implicit $f13, implicit $r2, implicit-def $r1, implicit-def $f1 + ; MIR32: ADJCALLSTACKUP 188, 0, implicit-def dead $r1, implicit $r1 + ; MIR32: BLR implicit $lr, implicit $rm, implicit $f1 + ; MIR64-LABEL: name: caller + ; MIR64: bb.0.entry: + ; MIR64: renamable $x3 = LDtoc @__const.caller.t, $x2 :: (load (s64) from got) + ; MIR64: renamable $x4 = LI8 16 + ; MIR64: renamable $vsl0 = LXVD2X renamable $x3, killed renamable $x4 :: (load (s128) from unknown-address + 16, align 8) + ; MIR64: renamable $x4 = LI8 208 + ; MIR64: STXVD2X killed renamable $vsl0, $x1, killed renamable $x4 :: (store (s128) into unknown-address + 16, align 4) + ; MIR64: renamable $vsl0 = LXVD2X $zero8, killed renamable $x3 :: (load (s128), align 8) + ; MIR64: renamable $x3 = LI8 192 + ; MIR64: STXVD2X killed renamable $vsl0, $x1, killed renamable $x3 :: (store (s128), align 4) + ; MIR64: ADJCALLSTACKDOWN 224, 0, implicit-def dead $r1, implicit $r1 + ; MIR64: renamable $vsl0 = XXLXORz + ; MIR64: $f1 = XXLXORdpz + ; MIR64: $f2 = XXLXORdpz + ; MIR64: $v2 = XXLXORz + ; MIR64: $v3 = XXLXORz + ; MIR64: $v4 = XXLXORz + ; MIR64: $v5 = XXLXORz + ; MIR64: $v6 = XXLXORz + ; MIR64: $v7 = XXLXORz + ; MIR64: $v8 = XXLXORz + ; MIR64: $v9 = XXLXORz + ; MIR64: $v10 = XXLXORz + ; MIR64: $v11 = XXLXORz + ; MIR64: $v12 = XXLXORz + ; MIR64: $v13 = XXLXORz + ; MIR64: $f3 = XXLXORdpz + ; MIR64: $f4 = XXLXORdpz + ; MIR64: $f5 = XXLXORdpz + ; MIR64: $f6 = XXLXORdpz + ; MIR64: renamable $x3 = LDtocCPT %const.0, $x2 :: (load (s64) from got) + ; MIR64: $f7 = XXLXORdpz + ; MIR64: $f8 = XXLXORdpz + ; MIR64: renamable $x4 = LI8 160 + ; MIR64: $f9 = XXLXORdpz + ; MIR64: renamable $x5 = LI8 144 + ; MIR64: STXVW4X renamable $vsl0, $x1, killed renamable $x4 :: (store (s128), align 8) + ; MIR64: renamable $vsl13 = LXVD2X $zero8, killed renamable $x3 :: (load (s128) from constant-pool) + ; MIR64: $f10 = XXLXORdpz + ; MIR64: renamable $x3 = LI8 128 + ; MIR64: STXVW4X renamable $vsl0, $x1, killed renamable $x5 :: (store (s128), align 8) + ; MIR64: $f11 = XXLXORdpz + ; MIR64: renamable $x4 = LI8 80 + ; MIR64: STXVW4X killed renamable $vsl0, $x1, killed renamable $x3 :: (store (s128), align 8) + ; MIR64: $f12 = XXLXORdpz + ; MIR64: STXVD2X killed renamable $vsl13, $x1, killed renamable $x4 :: (store (s128)) + ; MIR64: $f13 = XXLXORdpz + ; MIR64: renamable $x5 = LI8 512 + ; MIR64: renamable $x6 = LI8 0 + ; MIR64: $x3 = LI8 128 + ; MIR64: $x4 = LI8 256 + ; MIR64: STD killed renamable $x5, 184, $x1 :: (store (s64)) + ; MIR64: STD killed renamable $x6, 176, $x1 :: (store (s64)) + ; MIR64: BL8_NOP , csr_ppc64_altivec, implicit-def dead $lr8, implicit $rm, implicit $x3, implicit $x4, implicit $f1, implicit $f2, implicit killed $v2, implicit killed $v3, implicit killed $v4, implicit killed $v5, implicit killed $v6, implicit killed $v7, implicit killed $v8, implicit killed $v9, implicit killed $v10, implicit killed $v11, implicit killed $v12, implicit killed $v13, implicit $f3, implicit $f4, implicit $f5, implicit $f6, implicit $f7, implicit $f8, implicit $f9, implicit $f10, implicit $f11, implicit $f12, implicit $f13, implicit $x2, implicit-def $r1, implicit-def $f1 + ; MIR64: ADJCALLSTACKUP 224, 0, implicit-def dead $r1, implicit $r1 + ; MIR64: BLR8 implicit $lr8, implicit $rm, implicit $f1 entry: %call = tail call double @callee(i32 signext 128, i32 signext 256, double 0.000000e+00, double 0.000000e+00, <2 x double> , <2 x double> , <2 x double> , <2 x double> , <2 x double> , <2 x double> , <2 x double> , <2 x double> , <2 x double> , <2 x double> , <2 x double> , <2 x double> , <2 x double> , double 0.000000e+00, double 0.000000e+00, double 0.000000e+00, double 0.000000e+00, double 0.000000e+00, double 0.000000e+00, double 0.000000e+00, double 0.000000e+00, double 0.000000e+00, double 0.000000e+00, double 0.000000e+00, i32 signext 512, %struct.Test* nonnull byval(%struct.Test) align 4 @__const.caller.t) ret double %call diff --git a/llvm/test/CodeGen/PowerPC/aix-vec-arg-spills.ll b/llvm/test/CodeGen/PowerPC/aix-vec-arg-spills.ll --- a/llvm/test/CodeGen/PowerPC/aix-vec-arg-spills.ll +++ b/llvm/test/CodeGen/PowerPC/aix-vec-arg-spills.ll @@ -16,10 +16,10 @@ ; 32BIT-NEXT: mflr 0 ; 32BIT-NEXT: stw 0, 8(1) ; 32BIT-NEXT: stwu 1, -192(1) -; 32BIT-NEXT: lwz 3, L..C0(2) +; 32BIT-NEXT: lwz 3, L..C0(2) # @__const.caller.t ; 32BIT-NEXT: li 4, 31 ; 32BIT-NEXT: xxlxor 0, 0, 0 -; 32BIT-NEXT: lwz 5, L..C1(2) +; 32BIT-NEXT: lwz 5, L..C1(2) # %const.0 ; 32BIT-NEXT: li 6, 512 ; 32BIT-NEXT: xxlxor 1, 1, 1 ; 32BIT-NEXT: xxlxor 2, 2, 2 @@ -79,13 +79,13 @@ ; 32BIT-NEXT: lwz 0, 8(1) ; 32BIT-NEXT: mtlr 0 ; 32BIT-NEXT: blr - +; ; 64BIT-LABEL: caller: ; 64BIT: # %bb.0: # %entry ; 64BIT-NEXT: mflr 0 ; 64BIT-NEXT: std 0, 16(1) ; 64BIT-NEXT: stdu 1, -224(1) -; 64BIT-NEXT: ld 3, L..C0(2) +; 64BIT-NEXT: ld 3, L..C0(2) # @__const.caller.t ; 64BIT-NEXT: li 4, 16 ; 64BIT-NEXT: li 5, 144 ; 64BIT-NEXT: xxlxor 1, 1, 1 @@ -98,39 +98,38 @@ ; 64BIT-NEXT: xxlxor 36, 36, 36 ; 64BIT-NEXT: xxlxor 37, 37, 37 ; 64BIT-NEXT: stxvd2x 0, 1, 4 -; 64BIT-NEXT: ld 4, L..C1(2) +; 64BIT-NEXT: li 4, 160 ; 64BIT-NEXT: xxlxor 38, 38, 38 ; 64BIT-NEXT: lxvd2x 0, 0, 3 ; 64BIT-NEXT: li 3, 192 ; 64BIT-NEXT: xxlxor 39, 39, 39 ; 64BIT-NEXT: xxlxor 40, 40, 40 -; 64BIT-NEXT: lxvd2x 13, 0, 4 -; 64BIT-NEXT: li 4, 80 ; 64BIT-NEXT: xxlxor 41, 41, 41 ; 64BIT-NEXT: stxvd2x 0, 1, 3 -; 64BIT-NEXT: ld 3, L..C2(2) +; 64BIT-NEXT: ld 3, L..C1(2) # %const.0 +; 64BIT-NEXT: xxlxor 0, 0, 0 ; 64BIT-NEXT: xxlxor 42, 42, 42 +; 64BIT-NEXT: stxvw4x 0, 1, 4 +; 64BIT-NEXT: li 4, 80 ; 64BIT-NEXT: xxlxor 43, 43, 43 +; 64BIT-NEXT: lxvd2x 13, 0, 3 +; 64BIT-NEXT: li 3, 128 ; 64BIT-NEXT: xxlxor 44, 44, 44 -; 64BIT-NEXT: lxvd2x 0, 0, 3 -; 64BIT-NEXT: li 3, 160 +; 64BIT-NEXT: stxvw4x 0, 1, 5 ; 64BIT-NEXT: xxlxor 45, 45, 45 +; 64BIT-NEXT: stxvw4x 0, 1, 3 +; 64BIT-NEXT: li 5, 512 ; 64BIT-NEXT: xxlxor 3, 3, 3 ; 64BIT-NEXT: xxlxor 4, 4, 4 -; 64BIT-NEXT: stxvd2x 0, 1, 3 -; 64BIT-NEXT: li 3, 128 +; 64BIT-NEXT: stxvd2x 13, 1, 4 +; 64BIT-NEXT: li 4, 256 +; 64BIT-NEXT: std 5, 184(1) ; 64BIT-NEXT: xxlxor 5, 5, 5 +; 64BIT-NEXT: std 6, 176(1) ; 64BIT-NEXT: xxlxor 6, 6, 6 -; 64BIT-NEXT: stxvd2x 0, 1, 5 -; 64BIT-NEXT: li 5, 512 ; 64BIT-NEXT: xxlxor 7, 7, 7 -; 64BIT-NEXT: stxvd2x 0, 1, 3 ; 64BIT-NEXT: xxlxor 8, 8, 8 -; 64BIT-NEXT: stxvd2x 13, 1, 4 -; 64BIT-NEXT: li 4, 256 -; 64BIT-NEXT: std 5, 184(1) ; 64BIT-NEXT: xxlxor 9, 9, 9 -; 64BIT-NEXT: std 6, 176(1) ; 64BIT-NEXT: xxlxor 10, 10, 10 ; 64BIT-NEXT: xxlxor 11, 11, 11 ; 64BIT-NEXT: xxlxor 12, 12, 12 @@ -141,6 +140,7 @@ ; 64BIT-NEXT: ld 0, 16(1) ; 64BIT-NEXT: mtlr 0 ; 64BIT-NEXT: blr + entry: %call = tail call double @callee(i32 signext 128, i32 signext 256, double 0.000000e+00, double 0.000000e+00, <2 x double> , <2 x double> , <2 x double> , <2 x double> , <2 x double> , <2 x double> , <2 x double> , <2 x double> , <2 x double> , <2 x double> , <2 x double> , <2 x double> , <2 x double> , double 0.000000e+00, double 0.000000e+00, double 0.000000e+00, double 0.000000e+00, double 0.000000e+00, double 0.000000e+00, double 0.000000e+00, double 0.000000e+00, double 0.000000e+00, double 0.000000e+00, double 0.000000e+00, i32 signext 512, %struct.Test* nonnull byval(%struct.Test) align 4 @__const.caller.t) ret double %call diff --git a/llvm/test/CodeGen/PowerPC/build-vector-allones.ll b/llvm/test/CodeGen/PowerPC/build-vector-allones.ll --- a/llvm/test/CodeGen/PowerPC/build-vector-allones.ll +++ b/llvm/test/CodeGen/PowerPC/build-vector-allones.ll @@ -33,9 +33,7 @@ define <2 x i64> @One2i64() { ; P7BE-LABEL: One2i64: ; P7BE: # %bb.0: # %entry -; P7BE-NEXT: addis r3, r2, .LCPI1_0@toc@ha -; P7BE-NEXT: addi r3, r3, .LCPI1_0@toc@l -; P7BE-NEXT: lxvd2x vs34, 0, r3 +; P7BE-NEXT: vspltisb ; P7BE-NEXT: blr ; ; P8LE-LABEL: One2i64: diff --git a/llvm/test/CodeGen/PowerPC/load-and-splat.ll b/llvm/test/CodeGen/PowerPC/load-and-splat.ll --- a/llvm/test/CodeGen/PowerPC/load-and-splat.ll +++ b/llvm/test/CodeGen/PowerPC/load-and-splat.ll @@ -124,11 +124,8 @@ ; ; P7-LABEL: test4: ; P7: # %bb.0: # %entry -; P7-NEXT: ld r4, 24(r4) -; P7-NEXT: addi r5, r1, -16 -; P7-NEXT: std r4, -8(r1) -; P7-NEXT: std r4, -16(r1) -; P7-NEXT: lxvd2x vs0, 0, r5 +; P7-NEXT: addi r4, r4, 24 +; P7-NEXT: lxvdsx vs0, 0, r4 ; P7-NEXT: stxvd2x vs0, 0, r3 ; P7-NEXT: blr entry: diff --git a/llvm/test/CodeGen/PowerPC/vector-popcnt-128-ult-ugt.ll b/llvm/test/CodeGen/PowerPC/vector-popcnt-128-ult-ugt.ll --- a/llvm/test/CodeGen/PowerPC/vector-popcnt-128-ult-ugt.ll +++ b/llvm/test/CodeGen/PowerPC/vector-popcnt-128-ult-ugt.ll @@ -11969,6 +11969,7 @@ ; PWR7-LABEL: ugt_1_v2i64: ; PWR7: # %bb.0: ; PWR7-NEXT: addi 3, 1, -32 +; PWR7-NEXT: xxlxor 35, 35, 35 ; PWR7-NEXT: stxvd2x 34, 0, 3 ; PWR7-NEXT: ld 3, -24(1) ; PWR7-NEXT: addi 3, 3, -1 @@ -11978,9 +11979,6 @@ ; PWR7-NEXT: std 3, -16(1) ; PWR7-NEXT: addi 3, 1, -16 ; PWR7-NEXT: lxvw4x 0, 0, 3 -; PWR7-NEXT: addis 3, 2, .LCPI100_0@toc@ha -; PWR7-NEXT: addi 3, 3, .LCPI100_0@toc@l -; PWR7-NEXT: lxvw4x 35, 0, 3 ; PWR7-NEXT: xxland 34, 34, 0 ; PWR7-NEXT: vcmpequw 2, 2, 3 ; PWR7-NEXT: xxlnor 34, 34, 34 @@ -12037,6 +12035,7 @@ ; PWR7-LABEL: ult_2_v2i64: ; PWR7: # %bb.0: ; PWR7-NEXT: addi 3, 1, -32 +; PWR7-NEXT: xxlxor 35, 35, 35 ; PWR7-NEXT: stxvd2x 34, 0, 3 ; PWR7-NEXT: ld 3, -24(1) ; PWR7-NEXT: addi 3, 3, -1 @@ -12046,9 +12045,6 @@ ; PWR7-NEXT: std 3, -16(1) ; PWR7-NEXT: addi 3, 1, -16 ; PWR7-NEXT: lxvw4x 0, 0, 3 -; PWR7-NEXT: addis 3, 2, .LCPI101_0@toc@ha -; PWR7-NEXT: addi 3, 3, .LCPI101_0@toc@l -; PWR7-NEXT: lxvw4x 35, 0, 3 ; PWR7-NEXT: xxland 34, 34, 0 ; PWR7-NEXT: vcmpequw 2, 2, 3 ; PWR7-NEXT: blr