diff --git a/llvm/lib/Target/LoongArch/LoongArchISelLowering.h b/llvm/lib/Target/LoongArch/LoongArchISelLowering.h --- a/llvm/lib/Target/LoongArch/LoongArchISelLowering.h +++ b/llvm/lib/Target/LoongArch/LoongArchISelLowering.h @@ -187,6 +187,8 @@ unsigned AS, Instruction *I = nullptr) const override; + bool hasAndNotCompare(SDValue Y) const override; + private: /// Target-specific function used to lower LoongArch calling conventions. typedef bool LoongArchCCAssignFn(const DataLayout &DL, LoongArchABI::ABI ABI, diff --git a/llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp b/llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp --- a/llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp +++ b/llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp @@ -3187,3 +3187,11 @@ return true; } + +bool LoongArchTargetLowering::hasAndNotCompare(SDValue Y) const { + // TODO: Support vectors. + if (Y.getValueType().isVector()) + return false; + + return !isa(Y); +} diff --git a/llvm/test/CodeGen/LoongArch/andn-icmp.ll b/llvm/test/CodeGen/LoongArch/andn-icmp.ll --- a/llvm/test/CodeGen/LoongArch/andn-icmp.ll +++ b/llvm/test/CodeGen/LoongArch/andn-icmp.ll @@ -2,22 +2,16 @@ ; RUN: llc --mtriple=loongarch32 < %s | FileCheck %s --check-prefix=LA32 ; RUN: llc --mtriple=loongarch64 < %s | FileCheck %s --check-prefix=LA64 -;; TODO: Enables the following transforms: -;; (X & Y) == Y ---> (~X & Y) == 0 -;; (X & Y) != Y ---> (~X & Y) != 0. - define i1 @andn_icmp_eq_i8(i8 signext %a, i8 signext %b) nounwind { ; LA32-LABEL: andn_icmp_eq_i8: ; LA32: # %bb.0: -; LA32-NEXT: and $a0, $a0, $a1 -; LA32-NEXT: xor $a0, $a0, $a1 +; LA32-NEXT: andn $a0, $a1, $a0 ; LA32-NEXT: sltui $a0, $a0, 1 ; LA32-NEXT: ret ; ; LA64-LABEL: andn_icmp_eq_i8: ; LA64: # %bb.0: -; LA64-NEXT: and $a0, $a0, $a1 -; LA64-NEXT: xor $a0, $a0, $a1 +; LA64-NEXT: andn $a0, $a1, $a0 ; LA64-NEXT: sltui $a0, $a0, 1 ; LA64-NEXT: ret %and = and i8 %a, %b @@ -28,15 +22,13 @@ define i1 @andn_icmp_eq_i16(i16 signext %a, i16 signext %b) nounwind { ; LA32-LABEL: andn_icmp_eq_i16: ; LA32: # %bb.0: -; LA32-NEXT: and $a0, $a0, $a1 -; LA32-NEXT: xor $a0, $a0, $a1 +; LA32-NEXT: andn $a0, $a1, $a0 ; LA32-NEXT: sltui $a0, $a0, 1 ; LA32-NEXT: ret ; ; LA64-LABEL: andn_icmp_eq_i16: ; LA64: # %bb.0: -; LA64-NEXT: and $a0, $a0, $a1 -; LA64-NEXT: xor $a0, $a0, $a1 +; LA64-NEXT: andn $a0, $a1, $a0 ; LA64-NEXT: sltui $a0, $a0, 1 ; LA64-NEXT: ret %and = and i16 %a, %b @@ -47,15 +39,13 @@ define i1 @andn_icmp_eq_i32(i32 signext %a, i32 signext %b) nounwind { ; LA32-LABEL: andn_icmp_eq_i32: ; LA32: # %bb.0: -; LA32-NEXT: and $a0, $a0, $a1 -; LA32-NEXT: xor $a0, $a0, $a1 +; LA32-NEXT: andn $a0, $a1, $a0 ; LA32-NEXT: sltui $a0, $a0, 1 ; LA32-NEXT: ret ; ; LA64-LABEL: andn_icmp_eq_i32: ; LA64: # %bb.0: -; LA64-NEXT: and $a0, $a0, $a1 -; LA64-NEXT: xor $a0, $a0, $a1 +; LA64-NEXT: andn $a0, $a1, $a0 ; LA64-NEXT: sltui $a0, $a0, 1 ; LA64-NEXT: ret %and = and i32 %a, %b @@ -74,8 +64,7 @@ ; ; LA64-LABEL: andn_icmp_eq_i64: ; LA64: # %bb.0: -; LA64-NEXT: and $a0, $a0, $a1 -; LA64-NEXT: xor $a0, $a0, $a1 +; LA64-NEXT: andn $a0, $a1, $a0 ; LA64-NEXT: sltui $a0, $a0, 1 ; LA64-NEXT: ret %and = and i64 %a, %b @@ -86,15 +75,13 @@ define i1 @andn_icmp_ne_i8(i8 signext %a, i8 signext %b) nounwind { ; LA32-LABEL: andn_icmp_ne_i8: ; LA32: # %bb.0: -; LA32-NEXT: and $a0, $a0, $a1 -; LA32-NEXT: xor $a0, $a0, $a1 +; LA32-NEXT: andn $a0, $a1, $a0 ; LA32-NEXT: sltu $a0, $zero, $a0 ; LA32-NEXT: ret ; ; LA64-LABEL: andn_icmp_ne_i8: ; LA64: # %bb.0: -; LA64-NEXT: and $a0, $a0, $a1 -; LA64-NEXT: xor $a0, $a0, $a1 +; LA64-NEXT: andn $a0, $a1, $a0 ; LA64-NEXT: sltu $a0, $zero, $a0 ; LA64-NEXT: ret %and = and i8 %a, %b @@ -105,15 +92,13 @@ define i1 @andn_icmp_ne_i16(i16 signext %a, i16 signext %b) nounwind { ; LA32-LABEL: andn_icmp_ne_i16: ; LA32: # %bb.0: -; LA32-NEXT: and $a0, $a0, $a1 -; LA32-NEXT: xor $a0, $a0, $a1 +; LA32-NEXT: andn $a0, $a1, $a0 ; LA32-NEXT: sltu $a0, $zero, $a0 ; LA32-NEXT: ret ; ; LA64-LABEL: andn_icmp_ne_i16: ; LA64: # %bb.0: -; LA64-NEXT: and $a0, $a0, $a1 -; LA64-NEXT: xor $a0, $a0, $a1 +; LA64-NEXT: andn $a0, $a1, $a0 ; LA64-NEXT: sltu $a0, $zero, $a0 ; LA64-NEXT: ret %and = and i16 %a, %b @@ -124,15 +109,13 @@ define i1 @andn_icmp_ne_i32(i32 signext %a, i32 signext %b) nounwind { ; LA32-LABEL: andn_icmp_ne_i32: ; LA32: # %bb.0: -; LA32-NEXT: and $a0, $a0, $a1 -; LA32-NEXT: xor $a0, $a0, $a1 +; LA32-NEXT: andn $a0, $a1, $a0 ; LA32-NEXT: sltu $a0, $zero, $a0 ; LA32-NEXT: ret ; ; LA64-LABEL: andn_icmp_ne_i32: ; LA64: # %bb.0: -; LA64-NEXT: and $a0, $a0, $a1 -; LA64-NEXT: xor $a0, $a0, $a1 +; LA64-NEXT: andn $a0, $a1, $a0 ; LA64-NEXT: sltu $a0, $zero, $a0 ; LA64-NEXT: ret %and = and i32 %a, %b @@ -151,8 +134,7 @@ ; ; LA64-LABEL: andn_icmp_ne_i64: ; LA64: # %bb.0: -; LA64-NEXT: and $a0, $a0, $a1 -; LA64-NEXT: xor $a0, $a0, $a1 +; LA64-NEXT: andn $a0, $a1, $a0 ; LA64-NEXT: sltu $a0, $zero, $a0 ; LA64-NEXT: ret %and = and i64 %a, %b