diff --git a/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp b/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp --- a/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp +++ b/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp @@ -3329,6 +3329,19 @@ if (match(Arg, m_Intrinsic(m_Value(ArgArg))) && II->getType() == ArgArg->getType()) return replaceInstUsesWith(*II, ArgArg); + Constant *XorMask; + if (match(Arg, + m_Xor(m_Intrinsic(m_Value(ArgArg)), + m_Constant(XorMask))) && + II->getType() == ArgArg->getType()) { + if (auto *CI = dyn_cast(XorMask)) { + if (CI->getValue().trunc(16).isAllOnesValue()) { + auto TrueVector = Builder.CreateVectorSplat( + II->getType()->getVectorNumElements(), Builder.getTrue()); + return BinaryOperator::Create(Instruction::Xor, ArgArg, TrueVector); + } + } + } KnownBits ScalarKnown(32); if (SimplifyDemandedBits(II, 0, APInt::getLowBitsSet(32, 16), ScalarKnown, 0)) diff --git a/llvm/test/CodeGen/Thumb2/mve-vpt-from-intrinsics.ll b/llvm/test/CodeGen/Thumb2/mve-vpt-from-intrinsics.ll --- a/llvm/test/CodeGen/Thumb2/mve-vpt-from-intrinsics.ll +++ b/llvm/test/CodeGen/Thumb2/mve-vpt-from-intrinsics.ll @@ -1,5 +1,7 @@ ; RUN: opt -instcombine %s | llc -mtriple=thumbv8.1m.main-none-eabi -mattr=+mve --verify-machineinstrs -o - | FileCheck %s +target datalayout = "e-m:e-p:32:32-Fi8-i64:64-v128:64:128-a:0:32-n32-S64" + define arm_aapcs_vfpcc <8 x i16> @test_vpt_block(<8 x i16> %v_inactive, <8 x i16> %v1, <8 x i16> %v2, <8 x i16> %v3) { ; CHECK-LABEL: test_vpt_block: ; CHECK: @ %bb.0: @ %entry @@ -16,7 +18,27 @@ ret <8 x i16> %5 } +define arm_aapcs_vfpcc <8 x i16> @test_vpnot(<8 x i16> %v, <8 x i16> %w, <8 x i16> %x, i32 %n) { +; CHECK-LABEL: test_vpnot: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: vctp.16 r0 +; CHECK-NEXT: vpnot +; CHECK-NEXT: vpst +; CHECK-NEXT: vaddt.i16 q0, q1, q2 +; CHECK-NEXT: bx lr +entry: + %0 = call <8 x i1> @llvm.arm.vctp16(i32 %n) + %1 = call i32 @llvm.arm.mve.pred.v2i.v8i1(<8 x i1> %0) + %2 = trunc i32 %1 to i16 + %3 = xor i16 %2, -1 + %4 = zext i16 %3 to i32 + %5 = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 %4) + %6 = call <8 x i16> @llvm.arm.mve.add.predicated.v8i16.v8i1(<8 x i16> %w, <8 x i16> %x, <8 x i1> %5, <8 x i16> %v) + ret <8 x i16> %6 +} + declare i32 @llvm.arm.mve.pred.v2i.v8i1(<8 x i1>) declare <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32) declare <8 x i16> @llvm.arm.mve.add.predicated.v8i16.v8i1(<8 x i16>, <8 x i16>, <8 x i1>, <8 x i16>) +declare <8 x i1> @llvm.arm.vctp16(i32) diff --git a/llvm/test/Transforms/InstCombine/ARM/mve-v2i2v.ll b/llvm/test/Transforms/InstCombine/ARM/mve-v2i2v.ll --- a/llvm/test/Transforms/InstCombine/ARM/mve-v2i2v.ll +++ b/llvm/test/Transforms/InstCombine/ARM/mve-v2i2v.ll @@ -1,6 +1,8 @@ ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py ; RUN: opt -instcombine -S -o - %s | FileCheck %s +target datalayout = "e-m:e-p:32:32-Fi8-i64:64-v128:64:128-a:0:32-n32-S64" + declare i32 @llvm.arm.mve.pred.v2i.v4i1(<4 x i1>) declare i32 @llvm.arm.mve.pred.v2i.v8i1(<8 x i1>) declare i32 @llvm.arm.mve.pred.v2i.v16i1(<16 x i1>) @@ -234,3 +236,95 @@ %vout = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 %wide2) ret <4 x i1> %vout } + +; If a predicate vector is round-tripped to an integer and back, and +; complemented while it's in integer form, we should collapse that to +; a complement of the vector itself. (Rationale: this is likely to +; allow it to be code-generated as MVE VPNOT.) + +define <4 x i1> @vpnot_4(<4 x i1> %vin) { +; CHECK-LABEL: @vpnot_4( +; CHECK-NEXT: entry: +; CHECK-NEXT: [[VOUT:%.*]] = xor <4 x i1> [[VIN:%.*]], +; CHECK-NEXT: ret <4 x i1> [[VOUT]] +; +entry: + %int = call i32 @llvm.arm.mve.pred.v2i.v4i1(<4 x i1> %vin) + %flipped = xor i32 %int, 65535 + %vout = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 %flipped) + ret <4 x i1> %vout +} + +define <8 x i1> @vpnot_8(<8 x i1> %vin) { +; CHECK-LABEL: @vpnot_8( +; CHECK-NEXT: entry: +; CHECK-NEXT: [[VOUT:%.*]] = xor <8 x i1> [[VIN:%.*]], +; CHECK-NEXT: ret <8 x i1> [[VOUT]] +; +entry: + %int = call i32 @llvm.arm.mve.pred.v2i.v8i1(<8 x i1> %vin) + %flipped = xor i32 %int, 65535 + %vout = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 %flipped) + ret <8 x i1> %vout +} + +define <16 x i1> @vpnot_16(<16 x i1> %vin) { +; CHECK-LABEL: @vpnot_16( +; CHECK-NEXT: entry: +; CHECK-NEXT: [[VOUT:%.*]] = xor <16 x i1> [[VIN:%.*]], +; CHECK-NEXT: ret <16 x i1> [[VOUT]] +; +entry: + %int = call i32 @llvm.arm.mve.pred.v2i.v16i1(<16 x i1> %vin) + %flipped = xor i32 %int, 65535 + %vout = call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 %flipped) + ret <16 x i1> %vout +} + +; And this still works even if the i32 is narrowed to i16 and back on +; opposite sides of the xor. + +define <4 x i1> @vpnot_narrow_4(<4 x i1> %vin) { +; CHECK-LABEL: @vpnot_narrow_4( +; CHECK-NEXT: entry: +; CHECK-NEXT: [[VOUT:%.*]] = xor <4 x i1> [[VIN:%.*]], +; CHECK-NEXT: ret <4 x i1> [[VOUT]] +; +entry: + %int = call i32 @llvm.arm.mve.pred.v2i.v4i1(<4 x i1> %vin) + %narrow = trunc i32 %int to i16 + %flipped_narrow = xor i16 %narrow, -1 + %flipped = zext i16 %flipped_narrow to i32 + %vout = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 %flipped) + ret <4 x i1> %vout +} + +define <8 x i1> @vpnot_narrow_8(<8 x i1> %vin) { +; CHECK-LABEL: @vpnot_narrow_8( +; CHECK-NEXT: entry: +; CHECK-NEXT: [[VOUT:%.*]] = xor <8 x i1> [[VIN:%.*]], +; CHECK-NEXT: ret <8 x i1> [[VOUT]] +; +entry: + %int = call i32 @llvm.arm.mve.pred.v2i.v8i1(<8 x i1> %vin) + %narrow = trunc i32 %int to i16 + %flipped_narrow = xor i16 %narrow, -1 + %flipped = zext i16 %flipped_narrow to i32 + %vout = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 %flipped) + ret <8 x i1> %vout +} + +define <16 x i1> @vpnot_narrow_16(<16 x i1> %vin) { +; CHECK-LABEL: @vpnot_narrow_16( +; CHECK-NEXT: entry: +; CHECK-NEXT: [[VOUT:%.*]] = xor <16 x i1> [[VIN:%.*]], +; CHECK-NEXT: ret <16 x i1> [[VOUT]] +; +entry: + %int = call i32 @llvm.arm.mve.pred.v2i.v16i1(<16 x i1> %vin) + %narrow = trunc i32 %int to i16 + %flipped_narrow = xor i16 %narrow, -1 + %flipped = zext i16 %flipped_narrow to i32 + %vout = call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 %flipped) + ret <16 x i1> %vout +}