Index: llvm/test/CodeGen/AArch64/sve-multiply-add-accumulate.ll =================================================================== --- /dev/null +++ llvm/test/CodeGen/AArch64/sve-multiply-add-accumulate.ll @@ -0,0 +1,238 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=aarch64-unknown-linux-gnu -mattr=+sve < %s | FileCheck %s + +define @muladd_i64_positiveAddend( %a, %b) +; CHECK-LABEL: muladd_i64_positiveAddend: +; CHECK: // %bb.0: +; CHECK-NEXT: ptrue p0.d +; CHECK-NEXT: mov z2.d, #0xffffffff +; CHECK-NEXT: mla z2.d, p0/m, z0.d, z1.d +; CHECK-NEXT: mov z0.d, z2.d +; CHECK-NEXT: ret +{ + %1 = mul %a, %b + %2 = add %1, shufflevector ( insertelement ( poison, i64 4294967295, i64 0), poison, zeroinitializer) + ret %2 +} + +define @muladd_i64_negativeAddend( %a, %b) +; CHECK-LABEL: muladd_i64_negativeAddend: +; CHECK: // %bb.0: +; CHECK-NEXT: ptrue p0.d +; CHECK-NEXT: mov z2.d, #0xffffffff00000001 +; CHECK-NEXT: mla z2.d, p0/m, z0.d, z1.d +; CHECK-NEXT: mov z0.d, z2.d +; CHECK-NEXT: ret +{ + %1 = mul %a, %b + %2 = add %1, shufflevector ( insertelement ( poison, i64 -4294967295, i64 0), poison, zeroinitializer) + ret %2 +} + +define @muladd_i32_positiveAddend( %a, %b) +; CHECK-LABEL: muladd_i32_positiveAddend: +; CHECK: // %bb.0: +; CHECK-NEXT: ptrue p0.s +; CHECK-NEXT: mov z2.s, #0x10000 +; CHECK-NEXT: mla z2.s, p0/m, z0.s, z1.s +; CHECK-NEXT: mov z0.d, z2.d +; CHECK-NEXT: ret +{ + %1 = mul %a, %b + %2 = add %1, shufflevector ( insertelement ( poison, i32 65536, i32 0), poison, zeroinitializer) + ret %2 +} + +define @muladd_i32_negativeAddend( %a, %b) +; CHECK-LABEL: muladd_i32_negativeAddend: +; CHECK: // %bb.0: +; CHECK-NEXT: ptrue p0.s +; CHECK-NEXT: mov z2.s, #0xffff0000 +; CHECK-NEXT: mla z2.s, p0/m, z0.s, z1.s +; CHECK-NEXT: mov z0.d, z2.d +; CHECK-NEXT: ret +{ + %1 = mul %a, %b + %2 = add %1, shufflevector ( insertelement ( poison, i32 -65536, i32 0), poison, zeroinitializer) + ret %2 +} + +define @muladd_i16_positiveAddend( %a, %b) +; CHECK-LABEL: muladd_i16_positiveAddend: +; CHECK: // %bb.0: +; CHECK-NEXT: ptrue p0.h +; CHECK-NEXT: mul z0.h, p0/m, z0.h, z1.h +; CHECK-NEXT: add z0.h, z0.h, #255 // =0xff +; CHECK-NEXT: ret +{ + %1 = mul %a, %b + %2 = add %1, shufflevector ( insertelement ( poison, i16 255, i16 0), poison, zeroinitializer) + ret %2 +} + +define @muladd_i16_negativeAddend( %a, %b) +; CHECK-LABEL: muladd_i16_negativeAddend: +; CHECK: // %bb.0: +; CHECK-NEXT: ptrue p0.h +; CHECK-NEXT: mov z2.h, #-255 // =0xffffffffffffff01 +; CHECK-NEXT: mla z2.h, p0/m, z0.h, z1.h +; CHECK-NEXT: mov z0.d, z2.d +; CHECK-NEXT: ret +{ + %1 = mul %a, %b + %2 = add %1, shufflevector ( insertelement ( poison, i16 -255, i16 0), poison, zeroinitializer) + ret %2 +} + +define @muladd_i8_positiveAddend( %a, %b) +; CHECK-LABEL: muladd_i8_positiveAddend: +; CHECK: // %bb.0: +; CHECK-NEXT: ptrue p0.b +; CHECK-NEXT: mul z0.b, p0/m, z0.b, z1.b +; CHECK-NEXT: add z0.b, z0.b, #15 // =0xf +; CHECK-NEXT: ret +{ + %1 = mul %a, %b + %2 = add %1, shufflevector ( insertelement ( poison, i8 15, i8 0), poison, zeroinitializer) + ret %2 +} + +define @muladd_i8_negativeAddend( %a, %b) +; CHECK-LABEL: muladd_i8_negativeAddend: +; CHECK: // %bb.0: +; CHECK-NEXT: ptrue p0.b +; CHECK-NEXT: mul z0.b, p0/m, z0.b, z1.b +; CHECK-NEXT: add z0.b, z0.b, #241 // =0xf1 +; CHECK-NEXT: ret +{ + %1 = mul %a, %b + %2 = add %1, shufflevector ( insertelement ( poison, i8 -15, i8 0), poison, zeroinitializer) + ret %2 +} + +; both mul operands have a use +define @muladd_generic_test1( %a, %b) +; CHECK-LABEL: muladd_generic_test1: +; CHECK: // %bb.0: +; CHECK-NEXT: ptrue p0.h +; CHECK-NEXT: movprfx z2, z0 +; CHECK-NEXT: mul z2.h, p0/m, z2.h, z1.h +; CHECK-NEXT: add z2.h, z2.h, #200 // =0xc8 +; CHECK-NEXT: mul z0.h, p0/m, z0.h, z2.h +; CHECK-NEXT: sub z0.h, z0.h, z1.h +; CHECK-NEXT: ret +{ + %1 = mul %a, %b + %2 = add %1, shufflevector ( insertelement ( poison, i16 200, i16 0), poison, zeroinitializer) + %3 = mul %2, %a + %4 = sub %3, %b + ret %4 +} + +; only the first mul operand has a use +define @muladd_generic_test2( %a, %b) +; CHECK-LABEL: muladd_generic_test2: +; CHECK: // %bb.0: +; CHECK-NEXT: ptrue p0.h +; CHECK-NEXT: mul z1.h, p0/m, z1.h, z0.h +; CHECK-NEXT: add z1.h, z1.h, #200 // =0xc8 +; CHECK-NEXT: mul z0.h, p0/m, z0.h, z1.h +; CHECK-NEXT: ret +{ + %1 = mul %a, %b + %2 = add %1, shufflevector ( insertelement ( poison, i16 200, i16 0), poison, zeroinitializer) + %3 = mul %2, %a + ret %3 +} + +; only the second mul operand has a use +define @muladd_generic_test3( %a, %b) +; CHECK-LABEL: muladd_generic_test3: +; CHECK: // %bb.0: +; CHECK-NEXT: ptrue p0.h +; CHECK-NEXT: mul z0.h, p0/m, z0.h, z1.h +; CHECK-NEXT: add z0.h, z0.h, #200 // =0xc8 +; CHECK-NEXT: mul z0.h, p0/m, z0.h, z1.h +; CHECK-NEXT: ret +{ + %1 = mul %a, %b + %2 = add %1, shufflevector ( insertelement ( poison, i16 200, i16 0), poison, zeroinitializer) + %3 = mul %2, %b + ret %3 +} + +; negative integer splat as one of the addend +define @muladd_generic_test4( %a, %b) +; CHECK-LABEL: muladd_generic_test4: +; CHECK: // %bb.0: +; CHECK-NEXT: mov w8, #-200 +; CHECK-NEXT: ptrue p0.h +; CHECK-NEXT: mov z2.h, w8 +; CHECK-NEXT: mla z2.h, p0/m, z0.h, z1.h +; CHECK-NEXT: mov z0.d, z2.d +; CHECK-NEXT: ret +{ + %1 = mul %a, %b + %2 = add %1, shufflevector ( insertelement ( poison, i16 -200, i16 0), poison, zeroinitializer) + ret %2 +} + +define void @fused_mul_add_in_loop(ptr noalias %a, ptr noalias %b, ptr noalias %c, i32 %n) +; CHECK-LABEL: fused_mul_add_in_loop: +; CHECK: // %bb.0: // %entry +; CHECK-NEXT: cmp w3, #1 +; CHECK-NEXT: b.lt .LBB12_3 +; CHECK-NEXT: // %bb.1: // %for.body.preheader +; CHECK-NEXT: mov w9, w3 +; CHECK-NEXT: mov x8, xzr +; CHECK-NEXT: cntw x10 +; CHECK-NEXT: ptrue p0.s +; CHECK-NEXT: whilelo p1.s, xzr, x9 +; CHECK-NEXT: .LBB12_2: // %vector.body +; CHECK-NEXT: // =>This Inner Loop Header: Depth=1 +; CHECK-NEXT: ld1w { z0.s }, p1/z, [x0, x8, lsl #2] +; CHECK-NEXT: ld1w { z1.s }, p1/z, [x1, x8, lsl #2] +; CHECK-NEXT: mul z0.s, p0/m, z0.s, z1.s +; CHECK-NEXT: add z0.s, z0.s, #1 // =0x1 +; CHECK-NEXT: st1w { z0.s }, p1, [x2, x8, lsl #2] +; CHECK-NEXT: add x8, x8, x10 +; CHECK-NEXT: whilelo p1.s, x8, x9 +; CHECK-NEXT: b.mi .LBB12_2 +; CHECK-NEXT: .LBB12_3: // %for.cond.cleanup +; CHECK-NEXT: ret +{ +entry: + %cmp9 = icmp sgt i32 %n, 0 + br i1 %cmp9, label %for.body.preheader, label %for.cond.cleanup + +for.body.preheader: ; preds = %entry + %wide.trip.count = zext i32 %n to i64 + %active.lane.mask.entry = tail call @llvm.get.active.lane.mask.nxv4i1.i64(i64 0, i64 %wide.trip.count) + %0 = tail call i64 @llvm.vscale.i64() + %1 = shl nuw nsw i64 %0, 2 + br label %vector.body + +vector.body: ; preds = %vector.body, %for.body.preheader + %index = phi i64 [ 0, %for.body.preheader ], [ %index.next, %vector.body ] + %active.lane.mask = phi [ %active.lane.mask.entry, %for.body.preheader ], [ %active.lane.mask.next, %vector.body ] + %2 = getelementptr inbounds i32, ptr %a, i64 %index + %wide.masked.load = tail call @llvm.masked.load.nxv4i32.p0(ptr %2, i32 4, %active.lane.mask, poison) + %3 = getelementptr inbounds i32, ptr %b, i64 %index + %wide.masked.load12 = tail call @llvm.masked.load.nxv4i32.p0(ptr %3, i32 4, %active.lane.mask, poison) + %4 = mul nsw %wide.masked.load12, %wide.masked.load + %5 = add nsw %4, shufflevector ( insertelement ( poison, i32 1, i64 0), poison, zeroinitializer) + %6 = getelementptr inbounds i32, ptr %c, i64 %index + tail call void @llvm.masked.store.nxv4i32.p0( %5, ptr %6, i32 4, %active.lane.mask) + %index.next = add i64 %index, %1 + %active.lane.mask.next = tail call @llvm.get.active.lane.mask.nxv4i1.i64(i64 %index.next, i64 %wide.trip.count) + %7 = extractelement %active.lane.mask.next, i64 0 + br i1 %7, label %vector.body, label %for.cond.cleanup + +for.cond.cleanup: ; preds = %vector.body, %entry + ret void +} + +declare i64 @llvm.vscale.i64() +declare @llvm.get.active.lane.mask.nxv4i1.i64(i64, i64) +declare @llvm.masked.load.nxv4i32.p0(ptr nocapture, i32 immarg, , ) +declare void @llvm.masked.store.nxv4i32.p0(, ptr nocapture, i32 immarg, )