diff --git a/llvm/test/CodeGen/VE/VELIntrinsics/vand.ll b/llvm/test/CodeGen/VE/VELIntrinsics/vand.ll --- a/llvm/test/CodeGen/VE/VELIntrinsics/vand.ll +++ b/llvm/test/CodeGen/VE/VELIntrinsics/vand.ll @@ -4,8 +4,8 @@ ;;; ;;; Note: ;;; We test VAND*vvl, VAND*vvl_v, VAND*rvl, VAND*rvl_v, VAND*vvml_v, -;;; VAND*rvml_v, PVAND*vvl, PVAND*vvl_v, PVAND*rvl, PVAND*rvl_v, PVAND*vvml_v, and -;;; PVAND*rvml_v instructions. +;;; VAND*rvml_v, PVAND*vvl, PVAND*vvl_v, PVAND*rvl, PVAND*rvl_v, PVAND*vvml_v, +;;; and PVAND*rvml_v instructions. ; Function Attrs: nounwind readnone define fastcc <256 x double> @vand_vvvl(<256 x double> %0, <256 x double> %1) { diff --git a/llvm/test/CodeGen/VE/VELIntrinsics/vld.ll b/llvm/test/CodeGen/VE/VELIntrinsics/vld.ll --- a/llvm/test/CodeGen/VE/VELIntrinsics/vld.ll +++ b/llvm/test/CodeGen/VE/VELIntrinsics/vld.ll @@ -3,7 +3,7 @@ ;;; Test vector load intrinsic instructions ;;; ;;; Note: -;;; We test VLD*rrl, VLD*irl, VLD*rrl_v, VLD*irl_v +;;; We test VLD*rrl, VLD*irl, VLD*rrl_v, and VLD*irl_v instructions. ; Function Attrs: nounwind define void @vld_vssl(i8* %0, i64 %1) { @@ -17,7 +17,7 @@ ; CHECK-NEXT: #NO_APP ; CHECK-NEXT: b.l.t (, %s10) %3 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 %1, i8* %0, i32 256) - tail call void asm sideeffect "vst $0, $1, $2", "v,r,r"(<256 x double> %3, i64 %1, i8* %0) + tail call void asm sideeffect "vst ${0:v}, $2, $1", "v,r,r"(<256 x double> %3, i8* %0, i64 %1) ret void } @@ -38,7 +38,7 @@ ; CHECK-NEXT: b.l.t (, %s10) %4 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 %1, i8* %2, i32 256) %5 = tail call fast <256 x double> @llvm.ve.vl.vld.vssvl(i64 %1, i8* %0, <256 x double> %4, i32 256) - tail call void asm sideeffect "vst $0, $1, $2", "v,r,r"(<256 x double> %5, i64 %1, i8* %0) + tail call void asm sideeffect "vst ${0:v}, $2, $1", "v,r,r"(<256 x double> %5, i8* %0, i64 %1) ret void } @@ -57,7 +57,7 @@ ; CHECK-NEXT: #NO_APP ; CHECK-NEXT: b.l.t (, %s10) %2 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 8, i8* %0, i32 256) - tail call void asm sideeffect "vst $0, 8, $1", "v,r"(<256 x double> %2, i8* %0) + tail call void asm sideeffect "vst ${0:v}, 8, $1", "v,r"(<256 x double> %2, i8* %0) ret void } @@ -75,7 +75,7 @@ ; CHECK-NEXT: b.l.t (, %s10) %3 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 8, i8* %1, i32 256) %4 = tail call fast <256 x double> @llvm.ve.vl.vld.vssvl(i64 8, i8* %0, <256 x double> %3, i32 256) - tail call void asm sideeffect "vst $0, 8, $1", "v,r"(<256 x double> %4, i8* %0) + tail call void asm sideeffect "vst ${0:v}, 8, $1", "v,r"(<256 x double> %4, i8* %0) ret void } @@ -91,7 +91,7 @@ ; CHECK-NEXT: #NO_APP ; CHECK-NEXT: b.l.t (, %s10) %3 = tail call fast <256 x double> @llvm.ve.vl.vldnc.vssl(i64 %1, i8* %0, i32 256) - tail call void asm sideeffect "vst $0, $1, $2", "v,r,r"(<256 x double> %3, i64 %1, i8* %0) + tail call void asm sideeffect "vst ${0:v}, $2, $1", "v,r,r"(<256 x double> %3, i8* %0, i64 %1) ret void } @@ -112,7 +112,7 @@ ; CHECK-NEXT: b.l.t (, %s10) %4 = tail call fast <256 x double> @llvm.ve.vl.vldnc.vssl(i64 %1, i8* %2, i32 256) %5 = tail call fast <256 x double> @llvm.ve.vl.vldnc.vssvl(i64 %1, i8* %0, <256 x double> %4, i32 256) - tail call void asm sideeffect "vst $0, $1, $2", "v,r,r"(<256 x double> %5, i64 %1, i8* %0) + tail call void asm sideeffect "vst ${0:v}, $2, $1", "v,r,r"(<256 x double> %5, i8* %0, i64 %1) ret void } @@ -131,7 +131,7 @@ ; CHECK-NEXT: #NO_APP ; CHECK-NEXT: b.l.t (, %s10) %2 = tail call fast <256 x double> @llvm.ve.vl.vldnc.vssl(i64 8, i8* %0, i32 256) - tail call void asm sideeffect "vst $0, 8, $1", "v,r"(<256 x double> %2, i8* %0) + tail call void asm sideeffect "vst ${0:v}, 8, $1", "v,r"(<256 x double> %2, i8* %0) ret void } @@ -149,7 +149,7 @@ ; CHECK-NEXT: b.l.t (, %s10) %3 = tail call fast <256 x double> @llvm.ve.vl.vldnc.vssl(i64 8, i8* %1, i32 256) %4 = tail call fast <256 x double> @llvm.ve.vl.vldnc.vssvl(i64 8, i8* %0, <256 x double> %3, i32 256) - tail call void asm sideeffect "vst $0, 8, $1", "v,r"(<256 x double> %4, i8* %0) + tail call void asm sideeffect "vst ${0:v}, 8, $1", "v,r"(<256 x double> %4, i8* %0) ret void } @@ -165,7 +165,7 @@ ; CHECK-NEXT: #NO_APP ; CHECK-NEXT: b.l.t (, %s10) %3 = tail call fast <256 x double> @llvm.ve.vl.vldu.vssl(i64 %1, i8* %0, i32 256) - tail call void asm sideeffect "vst $0, $1, $2", "v,r,r"(<256 x double> %3, i64 %1, i8* %0) + tail call void asm sideeffect "vst ${0:v}, $2, $1", "v,r,r"(<256 x double> %3, i8* %0, i64 %1) ret void } @@ -186,7 +186,7 @@ ; CHECK-NEXT: b.l.t (, %s10) %4 = tail call fast <256 x double> @llvm.ve.vl.vldu.vssl(i64 %1, i8* %2, i32 256) %5 = tail call fast <256 x double> @llvm.ve.vl.vldu.vssvl(i64 %1, i8* %0, <256 x double> %4, i32 256) - tail call void asm sideeffect "vst $0, $1, $2", "v,r,r"(<256 x double> %5, i64 %1, i8* %0) + tail call void asm sideeffect "vst ${0:v}, $2, $1", "v,r,r"(<256 x double> %5, i8* %0, i64 %1) ret void } @@ -205,7 +205,7 @@ ; CHECK-NEXT: #NO_APP ; CHECK-NEXT: b.l.t (, %s10) %2 = tail call fast <256 x double> @llvm.ve.vl.vldu.vssl(i64 8, i8* %0, i32 256) - tail call void asm sideeffect "vst $0, 8, $1", "v,r"(<256 x double> %2, i8* %0) + tail call void asm sideeffect "vst ${0:v}, 8, $1", "v,r"(<256 x double> %2, i8* %0) ret void } @@ -223,7 +223,7 @@ ; CHECK-NEXT: b.l.t (, %s10) %3 = tail call fast <256 x double> @llvm.ve.vl.vldu.vssl(i64 8, i8* %1, i32 256) %4 = tail call fast <256 x double> @llvm.ve.vl.vldu.vssvl(i64 8, i8* %0, <256 x double> %3, i32 256) - tail call void asm sideeffect "vst $0, 8, $1", "v,r"(<256 x double> %4, i8* %0) + tail call void asm sideeffect "vst ${0:v}, 8, $1", "v,r"(<256 x double> %4, i8* %0) ret void } @@ -239,7 +239,7 @@ ; CHECK-NEXT: #NO_APP ; CHECK-NEXT: b.l.t (, %s10) %3 = tail call fast <256 x double> @llvm.ve.vl.vldunc.vssl(i64 %1, i8* %0, i32 256) - tail call void asm sideeffect "vst $0, $1, $2", "v,r,r"(<256 x double> %3, i64 %1, i8* %0) + tail call void asm sideeffect "vst ${0:v}, $2, $1", "v,r,r"(<256 x double> %3, i8* %0, i64 %1) ret void } @@ -260,7 +260,7 @@ ; CHECK-NEXT: b.l.t (, %s10) %4 = tail call fast <256 x double> @llvm.ve.vl.vldunc.vssl(i64 %1, i8* %2, i32 256) %5 = tail call fast <256 x double> @llvm.ve.vl.vldunc.vssvl(i64 %1, i8* %0, <256 x double> %4, i32 256) - tail call void asm sideeffect "vst $0, $1, $2", "v,r,r"(<256 x double> %5, i64 %1, i8* %0) + tail call void asm sideeffect "vst ${0:v}, $2, $1", "v,r,r"(<256 x double> %5, i8* %0, i64 %1) ret void } @@ -279,7 +279,7 @@ ; CHECK-NEXT: #NO_APP ; CHECK-NEXT: b.l.t (, %s10) %2 = tail call fast <256 x double> @llvm.ve.vl.vldunc.vssl(i64 8, i8* %0, i32 256) - tail call void asm sideeffect "vst $0, 8, $1", "v,r"(<256 x double> %2, i8* %0) + tail call void asm sideeffect "vst ${0:v}, 8, $1", "v,r"(<256 x double> %2, i8* %0) ret void } @@ -297,7 +297,7 @@ ; CHECK-NEXT: b.l.t (, %s10) %3 = tail call fast <256 x double> @llvm.ve.vl.vldunc.vssl(i64 8, i8* %1, i32 256) %4 = tail call fast <256 x double> @llvm.ve.vl.vldunc.vssvl(i64 8, i8* %0, <256 x double> %3, i32 256) - tail call void asm sideeffect "vst $0, 8, $1", "v,r"(<256 x double> %4, i8* %0) + tail call void asm sideeffect "vst ${0:v}, 8, $1", "v,r"(<256 x double> %4, i8* %0) ret void } @@ -313,7 +313,7 @@ ; CHECK-NEXT: #NO_APP ; CHECK-NEXT: b.l.t (, %s10) %3 = tail call fast <256 x double> @llvm.ve.vl.vldlsx.vssl(i64 %1, i8* %0, i32 256) - tail call void asm sideeffect "vst $0, $1, $2", "v,r,r"(<256 x double> %3, i64 %1, i8* %0) + tail call void asm sideeffect "vst ${0:v}, $2, $1", "v,r,r"(<256 x double> %3, i8* %0, i64 %1) ret void } @@ -334,7 +334,7 @@ ; CHECK-NEXT: b.l.t (, %s10) %4 = tail call fast <256 x double> @llvm.ve.vl.vldlsx.vssl(i64 %1, i8* %2, i32 256) %5 = tail call fast <256 x double> @llvm.ve.vl.vldlsx.vssvl(i64 %1, i8* %0, <256 x double> %4, i32 256) - tail call void asm sideeffect "vst $0, $1, $2", "v,r,r"(<256 x double> %5, i64 %1, i8* %0) + tail call void asm sideeffect "vst ${0:v}, $2, $1", "v,r,r"(<256 x double> %5, i8* %0, i64 %1) ret void } @@ -353,7 +353,7 @@ ; CHECK-NEXT: #NO_APP ; CHECK-NEXT: b.l.t (, %s10) %2 = tail call fast <256 x double> @llvm.ve.vl.vldlsx.vssl(i64 8, i8* %0, i32 256) - tail call void asm sideeffect "vst $0, 8, $1", "v,r"(<256 x double> %2, i8* %0) + tail call void asm sideeffect "vst ${0:v}, 8, $1", "v,r"(<256 x double> %2, i8* %0) ret void } @@ -371,7 +371,7 @@ ; CHECK-NEXT: b.l.t (, %s10) %3 = tail call fast <256 x double> @llvm.ve.vl.vldlsx.vssl(i64 8, i8* %1, i32 256) %4 = tail call fast <256 x double> @llvm.ve.vl.vldlsx.vssvl(i64 8, i8* %0, <256 x double> %3, i32 256) - tail call void asm sideeffect "vst $0, 8, $1", "v,r"(<256 x double> %4, i8* %0) + tail call void asm sideeffect "vst ${0:v}, 8, $1", "v,r"(<256 x double> %4, i8* %0) ret void } @@ -387,7 +387,7 @@ ; CHECK-NEXT: #NO_APP ; CHECK-NEXT: b.l.t (, %s10) %3 = tail call fast <256 x double> @llvm.ve.vl.vldlsxnc.vssl(i64 %1, i8* %0, i32 256) - tail call void asm sideeffect "vst $0, $1, $2", "v,r,r"(<256 x double> %3, i64 %1, i8* %0) + tail call void asm sideeffect "vst ${0:v}, $2, $1", "v,r,r"(<256 x double> %3, i8* %0, i64 %1) ret void } @@ -408,7 +408,7 @@ ; CHECK-NEXT: b.l.t (, %s10) %4 = tail call fast <256 x double> @llvm.ve.vl.vldlsxnc.vssl(i64 %1, i8* %2, i32 256) %5 = tail call fast <256 x double> @llvm.ve.vl.vldlsxnc.vssvl(i64 %1, i8* %0, <256 x double> %4, i32 256) - tail call void asm sideeffect "vst $0, $1, $2", "v,r,r"(<256 x double> %5, i64 %1, i8* %0) + tail call void asm sideeffect "vst ${0:v}, $2, $1", "v,r,r"(<256 x double> %5, i8* %0, i64 %1) ret void } @@ -427,7 +427,7 @@ ; CHECK-NEXT: #NO_APP ; CHECK-NEXT: b.l.t (, %s10) %2 = tail call fast <256 x double> @llvm.ve.vl.vldlsxnc.vssl(i64 8, i8* %0, i32 256) - tail call void asm sideeffect "vst $0, 8, $1", "v,r"(<256 x double> %2, i8* %0) + tail call void asm sideeffect "vst ${0:v}, 8, $1", "v,r"(<256 x double> %2, i8* %0) ret void } @@ -445,7 +445,7 @@ ; CHECK-NEXT: b.l.t (, %s10) %3 = tail call fast <256 x double> @llvm.ve.vl.vldlsxnc.vssl(i64 8, i8* %1, i32 256) %4 = tail call fast <256 x double> @llvm.ve.vl.vldlsxnc.vssvl(i64 8, i8* %0, <256 x double> %3, i32 256) - tail call void asm sideeffect "vst $0, 8, $1", "v,r"(<256 x double> %4, i8* %0) + tail call void asm sideeffect "vst ${0:v}, 8, $1", "v,r"(<256 x double> %4, i8* %0) ret void } @@ -461,7 +461,7 @@ ; CHECK-NEXT: #NO_APP ; CHECK-NEXT: b.l.t (, %s10) %3 = tail call fast <256 x double> @llvm.ve.vl.vldlzx.vssl(i64 %1, i8* %0, i32 256) - tail call void asm sideeffect "vst $0, $1, $2", "v,r,r"(<256 x double> %3, i64 %1, i8* %0) + tail call void asm sideeffect "vst ${0:v}, $2, $1", "v,r,r"(<256 x double> %3, i8* %0, i64 %1) ret void } @@ -482,7 +482,7 @@ ; CHECK-NEXT: b.l.t (, %s10) %4 = tail call fast <256 x double> @llvm.ve.vl.vldlzx.vssl(i64 %1, i8* %2, i32 256) %5 = tail call fast <256 x double> @llvm.ve.vl.vldlzx.vssvl(i64 %1, i8* %0, <256 x double> %4, i32 256) - tail call void asm sideeffect "vst $0, $1, $2", "v,r,r"(<256 x double> %5, i64 %1, i8* %0) + tail call void asm sideeffect "vst ${0:v}, $2, $1", "v,r,r"(<256 x double> %5, i8* %0, i64 %1) ret void } @@ -501,7 +501,7 @@ ; CHECK-NEXT: #NO_APP ; CHECK-NEXT: b.l.t (, %s10) %2 = tail call fast <256 x double> @llvm.ve.vl.vldlzx.vssl(i64 8, i8* %0, i32 256) - tail call void asm sideeffect "vst $0, 8, $1", "v,r"(<256 x double> %2, i8* %0) + tail call void asm sideeffect "vst ${0:v}, 8, $1", "v,r"(<256 x double> %2, i8* %0) ret void } @@ -519,7 +519,7 @@ ; CHECK-NEXT: b.l.t (, %s10) %3 = tail call fast <256 x double> @llvm.ve.vl.vldlzx.vssl(i64 8, i8* %1, i32 256) %4 = tail call fast <256 x double> @llvm.ve.vl.vldlzx.vssvl(i64 8, i8* %0, <256 x double> %3, i32 256) - tail call void asm sideeffect "vst $0, 8, $1", "v,r"(<256 x double> %4, i8* %0) + tail call void asm sideeffect "vst ${0:v}, 8, $1", "v,r"(<256 x double> %4, i8* %0) ret void } @@ -535,7 +535,7 @@ ; CHECK-NEXT: #NO_APP ; CHECK-NEXT: b.l.t (, %s10) %3 = tail call fast <256 x double> @llvm.ve.vl.vldlzxnc.vssl(i64 %1, i8* %0, i32 256) - tail call void asm sideeffect "vst $0, $1, $2", "v,r,r"(<256 x double> %3, i64 %1, i8* %0) + tail call void asm sideeffect "vst ${0:v}, $2, $1", "v,r,r"(<256 x double> %3, i8* %0, i64 %1) ret void } @@ -556,7 +556,7 @@ ; CHECK-NEXT: b.l.t (, %s10) %4 = tail call fast <256 x double> @llvm.ve.vl.vldlzxnc.vssl(i64 %1, i8* %2, i32 256) %5 = tail call fast <256 x double> @llvm.ve.vl.vldlzxnc.vssvl(i64 %1, i8* %0, <256 x double> %4, i32 256) - tail call void asm sideeffect "vst $0, $1, $2", "v,r,r"(<256 x double> %5, i64 %1, i8* %0) + tail call void asm sideeffect "vst ${0:v}, $2, $1", "v,r,r"(<256 x double> %5, i8* %0, i64 %1) ret void } @@ -575,7 +575,7 @@ ; CHECK-NEXT: #NO_APP ; CHECK-NEXT: b.l.t (, %s10) %2 = tail call fast <256 x double> @llvm.ve.vl.vldlzxnc.vssl(i64 8, i8* %0, i32 256) - tail call void asm sideeffect "vst $0, 8, $1", "v,r"(<256 x double> %2, i8* %0) + tail call void asm sideeffect "vst ${0:v}, 8, $1", "v,r"(<256 x double> %2, i8* %0) ret void } @@ -593,7 +593,7 @@ ; CHECK-NEXT: b.l.t (, %s10) %3 = tail call fast <256 x double> @llvm.ve.vl.vldlzxnc.vssl(i64 8, i8* %1, i32 256) %4 = tail call fast <256 x double> @llvm.ve.vl.vldlzxnc.vssvl(i64 8, i8* %0, <256 x double> %3, i32 256) - tail call void asm sideeffect "vst $0, 8, $1", "v,r"(<256 x double> %4, i8* %0) + tail call void asm sideeffect "vst ${0:v}, 8, $1", "v,r"(<256 x double> %4, i8* %0) ret void } @@ -609,7 +609,7 @@ ; CHECK-NEXT: #NO_APP ; CHECK-NEXT: b.l.t (, %s10) %3 = tail call fast <256 x double> @llvm.ve.vl.vld2d.vssl(i64 %1, i8* %0, i32 256) - tail call void asm sideeffect "vst $0, $1, $2", "v,r,r"(<256 x double> %3, i64 %1, i8* %0) + tail call void asm sideeffect "vst ${0:v}, $2, $1", "v,r,r"(<256 x double> %3, i8* %0, i64 %1) ret void } @@ -630,7 +630,7 @@ ; CHECK-NEXT: b.l.t (, %s10) %4 = tail call fast <256 x double> @llvm.ve.vl.vld2d.vssl(i64 %1, i8* %2, i32 256) %5 = tail call fast <256 x double> @llvm.ve.vl.vld2d.vssvl(i64 %1, i8* %0, <256 x double> %4, i32 256) - tail call void asm sideeffect "vst $0, $1, $2", "v,r,r"(<256 x double> %5, i64 %1, i8* %0) + tail call void asm sideeffect "vst ${0:v}, $2, $1", "v,r,r"(<256 x double> %5, i8* %0, i64 %1) ret void } @@ -649,7 +649,7 @@ ; CHECK-NEXT: #NO_APP ; CHECK-NEXT: b.l.t (, %s10) %2 = tail call fast <256 x double> @llvm.ve.vl.vld2d.vssl(i64 8, i8* %0, i32 256) - tail call void asm sideeffect "vst $0, 8, $1", "v,r"(<256 x double> %2, i8* %0) + tail call void asm sideeffect "vst ${0:v}, 8, $1", "v,r"(<256 x double> %2, i8* %0) ret void } @@ -667,7 +667,7 @@ ; CHECK-NEXT: b.l.t (, %s10) %3 = tail call fast <256 x double> @llvm.ve.vl.vld2d.vssl(i64 8, i8* %1, i32 256) %4 = tail call fast <256 x double> @llvm.ve.vl.vld2d.vssvl(i64 8, i8* %0, <256 x double> %3, i32 256) - tail call void asm sideeffect "vst $0, 8, $1", "v,r"(<256 x double> %4, i8* %0) + tail call void asm sideeffect "vst ${0:v}, 8, $1", "v,r"(<256 x double> %4, i8* %0) ret void } @@ -683,7 +683,7 @@ ; CHECK-NEXT: #NO_APP ; CHECK-NEXT: b.l.t (, %s10) %3 = tail call fast <256 x double> @llvm.ve.vl.vld2dnc.vssl(i64 %1, i8* %0, i32 256) - tail call void asm sideeffect "vst $0, $1, $2", "v,r,r"(<256 x double> %3, i64 %1, i8* %0) + tail call void asm sideeffect "vst ${0:v}, $2, $1", "v,r,r"(<256 x double> %3, i8* %0, i64 %1) ret void } @@ -704,7 +704,7 @@ ; CHECK-NEXT: b.l.t (, %s10) %4 = tail call fast <256 x double> @llvm.ve.vl.vld2dnc.vssl(i64 %1, i8* %2, i32 256) %5 = tail call fast <256 x double> @llvm.ve.vl.vld2dnc.vssvl(i64 %1, i8* %0, <256 x double> %4, i32 256) - tail call void asm sideeffect "vst $0, $1, $2", "v,r,r"(<256 x double> %5, i64 %1, i8* %0) + tail call void asm sideeffect "vst ${0:v}, $2, $1", "v,r,r"(<256 x double> %5, i8* %0, i64 %1) ret void } @@ -723,7 +723,7 @@ ; CHECK-NEXT: #NO_APP ; CHECK-NEXT: b.l.t (, %s10) %2 = tail call fast <256 x double> @llvm.ve.vl.vld2dnc.vssl(i64 8, i8* %0, i32 256) - tail call void asm sideeffect "vst $0, 8, $1", "v,r"(<256 x double> %2, i8* %0) + tail call void asm sideeffect "vst ${0:v}, 8, $1", "v,r"(<256 x double> %2, i8* %0) ret void } @@ -741,7 +741,7 @@ ; CHECK-NEXT: b.l.t (, %s10) %3 = tail call fast <256 x double> @llvm.ve.vl.vld2dnc.vssl(i64 8, i8* %1, i32 256) %4 = tail call fast <256 x double> @llvm.ve.vl.vld2dnc.vssvl(i64 8, i8* %0, <256 x double> %3, i32 256) - tail call void asm sideeffect "vst $0, 8, $1", "v,r"(<256 x double> %4, i8* %0) + tail call void asm sideeffect "vst ${0:v}, 8, $1", "v,r"(<256 x double> %4, i8* %0) ret void } @@ -757,7 +757,7 @@ ; CHECK-NEXT: #NO_APP ; CHECK-NEXT: b.l.t (, %s10) %3 = tail call fast <256 x double> @llvm.ve.vl.vldu2d.vssl(i64 %1, i8* %0, i32 256) - tail call void asm sideeffect "vst $0, $1, $2", "v,r,r"(<256 x double> %3, i64 %1, i8* %0) + tail call void asm sideeffect "vst ${0:v}, $2, $1", "v,r,r"(<256 x double> %3, i8* %0, i64 %1) ret void } @@ -778,7 +778,7 @@ ; CHECK-NEXT: b.l.t (, %s10) %4 = tail call fast <256 x double> @llvm.ve.vl.vldu2d.vssl(i64 %1, i8* %2, i32 256) %5 = tail call fast <256 x double> @llvm.ve.vl.vldu2d.vssvl(i64 %1, i8* %0, <256 x double> %4, i32 256) - tail call void asm sideeffect "vst $0, $1, $2", "v,r,r"(<256 x double> %5, i64 %1, i8* %0) + tail call void asm sideeffect "vst ${0:v}, $2, $1", "v,r,r"(<256 x double> %5, i8* %0, i64 %1) ret void } @@ -797,7 +797,7 @@ ; CHECK-NEXT: #NO_APP ; CHECK-NEXT: b.l.t (, %s10) %2 = tail call fast <256 x double> @llvm.ve.vl.vldu2d.vssl(i64 8, i8* %0, i32 256) - tail call void asm sideeffect "vst $0, 8, $1", "v,r"(<256 x double> %2, i8* %0) + tail call void asm sideeffect "vst ${0:v}, 8, $1", "v,r"(<256 x double> %2, i8* %0) ret void } @@ -815,7 +815,7 @@ ; CHECK-NEXT: b.l.t (, %s10) %3 = tail call fast <256 x double> @llvm.ve.vl.vldu2d.vssl(i64 8, i8* %1, i32 256) %4 = tail call fast <256 x double> @llvm.ve.vl.vldu2d.vssvl(i64 8, i8* %0, <256 x double> %3, i32 256) - tail call void asm sideeffect "vst $0, 8, $1", "v,r"(<256 x double> %4, i8* %0) + tail call void asm sideeffect "vst ${0:v}, 8, $1", "v,r"(<256 x double> %4, i8* %0) ret void } @@ -831,7 +831,7 @@ ; CHECK-NEXT: #NO_APP ; CHECK-NEXT: b.l.t (, %s10) %3 = tail call fast <256 x double> @llvm.ve.vl.vldu2dnc.vssl(i64 %1, i8* %0, i32 256) - tail call void asm sideeffect "vst $0, $1, $2", "v,r,r"(<256 x double> %3, i64 %1, i8* %0) + tail call void asm sideeffect "vst ${0:v}, $2, $1", "v,r,r"(<256 x double> %3, i8* %0, i64 %1) ret void } @@ -852,7 +852,7 @@ ; CHECK-NEXT: b.l.t (, %s10) %4 = tail call fast <256 x double> @llvm.ve.vl.vldu2dnc.vssl(i64 %1, i8* %2, i32 256) %5 = tail call fast <256 x double> @llvm.ve.vl.vldu2dnc.vssvl(i64 %1, i8* %0, <256 x double> %4, i32 256) - tail call void asm sideeffect "vst $0, $1, $2", "v,r,r"(<256 x double> %5, i64 %1, i8* %0) + tail call void asm sideeffect "vst ${0:v}, $2, $1", "v,r,r"(<256 x double> %5, i8* %0, i64 %1) ret void } @@ -871,7 +871,7 @@ ; CHECK-NEXT: #NO_APP ; CHECK-NEXT: b.l.t (, %s10) %2 = tail call fast <256 x double> @llvm.ve.vl.vldu2dnc.vssl(i64 8, i8* %0, i32 256) - tail call void asm sideeffect "vst $0, 8, $1", "v,r"(<256 x double> %2, i8* %0) + tail call void asm sideeffect "vst ${0:v}, 8, $1", "v,r"(<256 x double> %2, i8* %0) ret void } @@ -889,7 +889,7 @@ ; CHECK-NEXT: b.l.t (, %s10) %3 = tail call fast <256 x double> @llvm.ve.vl.vldu2dnc.vssl(i64 8, i8* %1, i32 256) %4 = tail call fast <256 x double> @llvm.ve.vl.vldu2dnc.vssvl(i64 8, i8* %0, <256 x double> %3, i32 256) - tail call void asm sideeffect "vst $0, 8, $1", "v,r"(<256 x double> %4, i8* %0) + tail call void asm sideeffect "vst ${0:v}, 8, $1", "v,r"(<256 x double> %4, i8* %0) ret void } @@ -905,7 +905,7 @@ ; CHECK-NEXT: #NO_APP ; CHECK-NEXT: b.l.t (, %s10) %3 = tail call fast <256 x double> @llvm.ve.vl.vldl2dsx.vssl(i64 %1, i8* %0, i32 256) - tail call void asm sideeffect "vst $0, $1, $2", "v,r,r"(<256 x double> %3, i64 %1, i8* %0) + tail call void asm sideeffect "vst ${0:v}, $2, $1", "v,r,r"(<256 x double> %3, i8* %0, i64 %1) ret void } @@ -926,7 +926,7 @@ ; CHECK-NEXT: b.l.t (, %s10) %4 = tail call fast <256 x double> @llvm.ve.vl.vldl2dsx.vssl(i64 %1, i8* %2, i32 256) %5 = tail call fast <256 x double> @llvm.ve.vl.vldl2dsx.vssvl(i64 %1, i8* %0, <256 x double> %4, i32 256) - tail call void asm sideeffect "vst $0, $1, $2", "v,r,r"(<256 x double> %5, i64 %1, i8* %0) + tail call void asm sideeffect "vst ${0:v}, $2, $1", "v,r,r"(<256 x double> %5, i8* %0, i64 %1) ret void } @@ -945,7 +945,7 @@ ; CHECK-NEXT: #NO_APP ; CHECK-NEXT: b.l.t (, %s10) %2 = tail call fast <256 x double> @llvm.ve.vl.vldl2dsx.vssl(i64 8, i8* %0, i32 256) - tail call void asm sideeffect "vst $0, 8, $1", "v,r"(<256 x double> %2, i8* %0) + tail call void asm sideeffect "vst ${0:v}, 8, $1", "v,r"(<256 x double> %2, i8* %0) ret void } @@ -963,7 +963,7 @@ ; CHECK-NEXT: b.l.t (, %s10) %3 = tail call fast <256 x double> @llvm.ve.vl.vldl2dsx.vssl(i64 8, i8* %1, i32 256) %4 = tail call fast <256 x double> @llvm.ve.vl.vldl2dsx.vssvl(i64 8, i8* %0, <256 x double> %3, i32 256) - tail call void asm sideeffect "vst $0, 8, $1", "v,r"(<256 x double> %4, i8* %0) + tail call void asm sideeffect "vst ${0:v}, 8, $1", "v,r"(<256 x double> %4, i8* %0) ret void } @@ -979,7 +979,7 @@ ; CHECK-NEXT: #NO_APP ; CHECK-NEXT: b.l.t (, %s10) %3 = tail call fast <256 x double> @llvm.ve.vl.vldl2dsxnc.vssl(i64 %1, i8* %0, i32 256) - tail call void asm sideeffect "vst $0, $1, $2", "v,r,r"(<256 x double> %3, i64 %1, i8* %0) + tail call void asm sideeffect "vst ${0:v}, $2, $1", "v,r,r"(<256 x double> %3, i8* %0, i64 %1) ret void } @@ -1000,7 +1000,7 @@ ; CHECK-NEXT: b.l.t (, %s10) %4 = tail call fast <256 x double> @llvm.ve.vl.vldl2dsxnc.vssl(i64 %1, i8* %2, i32 256) %5 = tail call fast <256 x double> @llvm.ve.vl.vldl2dsxnc.vssvl(i64 %1, i8* %0, <256 x double> %4, i32 256) - tail call void asm sideeffect "vst $0, $1, $2", "v,r,r"(<256 x double> %5, i64 %1, i8* %0) + tail call void asm sideeffect "vst ${0:v}, $2, $1", "v,r,r"(<256 x double> %5, i8* %0, i64 %1) ret void } @@ -1019,7 +1019,7 @@ ; CHECK-NEXT: #NO_APP ; CHECK-NEXT: b.l.t (, %s10) %2 = tail call fast <256 x double> @llvm.ve.vl.vldl2dsxnc.vssl(i64 8, i8* %0, i32 256) - tail call void asm sideeffect "vst $0, 8, $1", "v,r"(<256 x double> %2, i8* %0) + tail call void asm sideeffect "vst ${0:v}, 8, $1", "v,r"(<256 x double> %2, i8* %0) ret void } @@ -1037,7 +1037,7 @@ ; CHECK-NEXT: b.l.t (, %s10) %3 = tail call fast <256 x double> @llvm.ve.vl.vldl2dsxnc.vssl(i64 8, i8* %1, i32 256) %4 = tail call fast <256 x double> @llvm.ve.vl.vldl2dsxnc.vssvl(i64 8, i8* %0, <256 x double> %3, i32 256) - tail call void asm sideeffect "vst $0, 8, $1", "v,r"(<256 x double> %4, i8* %0) + tail call void asm sideeffect "vst ${0:v}, 8, $1", "v,r"(<256 x double> %4, i8* %0) ret void } @@ -1053,7 +1053,7 @@ ; CHECK-NEXT: #NO_APP ; CHECK-NEXT: b.l.t (, %s10) %3 = tail call fast <256 x double> @llvm.ve.vl.vldl2dzx.vssl(i64 %1, i8* %0, i32 256) - tail call void asm sideeffect "vst $0, $1, $2", "v,r,r"(<256 x double> %3, i64 %1, i8* %0) + tail call void asm sideeffect "vst ${0:v}, $2, $1", "v,r,r"(<256 x double> %3, i8* %0, i64 %1) ret void } @@ -1074,7 +1074,7 @@ ; CHECK-NEXT: b.l.t (, %s10) %4 = tail call fast <256 x double> @llvm.ve.vl.vldl2dzx.vssl(i64 %1, i8* %2, i32 256) %5 = tail call fast <256 x double> @llvm.ve.vl.vldl2dzx.vssvl(i64 %1, i8* %0, <256 x double> %4, i32 256) - tail call void asm sideeffect "vst $0, $1, $2", "v,r,r"(<256 x double> %5, i64 %1, i8* %0) + tail call void asm sideeffect "vst ${0:v}, $2, $1", "v,r,r"(<256 x double> %5, i8* %0, i64 %1) ret void } @@ -1093,7 +1093,7 @@ ; CHECK-NEXT: #NO_APP ; CHECK-NEXT: b.l.t (, %s10) %2 = tail call fast <256 x double> @llvm.ve.vl.vldl2dzx.vssl(i64 8, i8* %0, i32 256) - tail call void asm sideeffect "vst $0, 8, $1", "v,r"(<256 x double> %2, i8* %0) + tail call void asm sideeffect "vst ${0:v}, 8, $1", "v,r"(<256 x double> %2, i8* %0) ret void } @@ -1111,7 +1111,7 @@ ; CHECK-NEXT: b.l.t (, %s10) %3 = tail call fast <256 x double> @llvm.ve.vl.vldl2dzx.vssl(i64 8, i8* %1, i32 256) %4 = tail call fast <256 x double> @llvm.ve.vl.vldl2dzx.vssvl(i64 8, i8* %0, <256 x double> %3, i32 256) - tail call void asm sideeffect "vst $0, 8, $1", "v,r"(<256 x double> %4, i8* %0) + tail call void asm sideeffect "vst ${0:v}, 8, $1", "v,r"(<256 x double> %4, i8* %0) ret void } @@ -1127,7 +1127,7 @@ ; CHECK-NEXT: #NO_APP ; CHECK-NEXT: b.l.t (, %s10) %3 = tail call fast <256 x double> @llvm.ve.vl.vldl2dzxnc.vssl(i64 %1, i8* %0, i32 256) - tail call void asm sideeffect "vst $0, $1, $2", "v,r,r"(<256 x double> %3, i64 %1, i8* %0) + tail call void asm sideeffect "vst ${0:v}, $2, $1", "v,r,r"(<256 x double> %3, i8* %0, i64 %1) ret void } @@ -1148,7 +1148,7 @@ ; CHECK-NEXT: b.l.t (, %s10) %4 = tail call fast <256 x double> @llvm.ve.vl.vldl2dzxnc.vssl(i64 %1, i8* %2, i32 256) %5 = tail call fast <256 x double> @llvm.ve.vl.vldl2dzxnc.vssvl(i64 %1, i8* %0, <256 x double> %4, i32 256) - tail call void asm sideeffect "vst $0, $1, $2", "v,r,r"(<256 x double> %5, i64 %1, i8* %0) + tail call void asm sideeffect "vst ${0:v}, $2, $1", "v,r,r"(<256 x double> %5, i8* %0, i64 %1) ret void } @@ -1167,7 +1167,7 @@ ; CHECK-NEXT: #NO_APP ; CHECK-NEXT: b.l.t (, %s10) %2 = tail call fast <256 x double> @llvm.ve.vl.vldl2dzxnc.vssl(i64 8, i8* %0, i32 256) - tail call void asm sideeffect "vst $0, 8, $1", "v,r"(<256 x double> %2, i8* %0) + tail call void asm sideeffect "vst ${0:v}, 8, $1", "v,r"(<256 x double> %2, i8* %0) ret void } @@ -1185,6 +1185,6 @@ ; CHECK-NEXT: b.l.t (, %s10) %3 = tail call fast <256 x double> @llvm.ve.vl.vldl2dzxnc.vssl(i64 8, i8* %1, i32 256) %4 = tail call fast <256 x double> @llvm.ve.vl.vldl2dzxnc.vssvl(i64 8, i8* %0, <256 x double> %3, i32 256) - tail call void asm sideeffect "vst $0, 8, $1", "v,r"(<256 x double> %4, i8* %0) + tail call void asm sideeffect "vst ${0:v}, 8, $1", "v,r"(<256 x double> %4, i8* %0) ret void }