diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVSDPatterns.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVSDPatterns.td --- a/llvm/lib/Target/RISCV/RISCVInstrInfoVSDPatterns.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVSDPatterns.td @@ -45,8 +45,7 @@ dag Value = !con(Prefix, !if(swap, B, A), !if(swap, A, B), Suffix); } -multiclass VPatUSLoadStoreSDNode; } +multiclass VPatUSLoadStoreMaskSDNode +{ + defvar load_instr = !cast("PseudoVLE1_V_"#m.BX); + defvar store_instr = !cast("PseudoVSE1_V_"#m.BX); + // Load + def : Pat<(m.Mask (load RVVBaseAddr:$rs1)), + (load_instr RVVBaseAddr:$rs1, m.AVL, m.SEW)>; + // Store + def : Pat<(store m.Mask:$rs2, RVVBaseAddr:$rs1), + (store_instr VR:$rs2, RVVBaseAddr:$rs1, m.AVL, m.SEW)>; +} + class VPatBinarySDNode_VV; +foreach mti = AllMasks in + defm "" : VPatUSLoadStoreMaskSDNode; // 12.1. Vector Single-Width Integer Add and Subtract defm "" : VPatBinarySDNode_VV_VX_VI; diff --git a/llvm/test/CodeGen/RISCV/rvv/load-mask.ll b/llvm/test/CodeGen/RISCV/rvv/load-mask.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/load-mask.ll @@ -0,0 +1,89 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple riscv32 -mattr=+experimental-v %s -o - \ +; RUN: -verify-machineinstrs | FileCheck %s +; RUN: llc -mtriple riscv64 -mattr=+experimental-v %s -o - \ +; RUN: -verify-machineinstrs | FileCheck %s + +define void @test_load_mask_64(* %pa, * %pb) { +; CHECK-LABEL: test_load_mask_64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a2, zero, e8,m8,ta,mu +; CHECK-NEXT: vle1.v v25, (a0) +; CHECK-NEXT: vse1.v v25, (a1) +; CHECK-NEXT: ret + %a = load , * %pa + store %a, * %pb + ret void +} + +define void @test_load_mask_32(* %pa, * %pb) { +; CHECK-LABEL: test_load_mask_32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a2, zero, e8,m4,ta,mu +; CHECK-NEXT: vle1.v v25, (a0) +; CHECK-NEXT: vse1.v v25, (a1) +; CHECK-NEXT: ret + %a = load , * %pa + store %a, * %pb + ret void +} + +define void @test_load_mask_16(* %pa, * %pb) { +; CHECK-LABEL: test_load_mask_16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a2, zero, e8,m2,ta,mu +; CHECK-NEXT: vle1.v v25, (a0) +; CHECK-NEXT: vse1.v v25, (a1) +; CHECK-NEXT: ret + %a = load , * %pa + store %a, * %pb + ret void +} + +define void @test_load_mask_8(* %pa, * %pb) { +; CHECK-LABEL: test_load_mask_8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a2, zero, e8,m1,ta,mu +; CHECK-NEXT: vle1.v v25, (a0) +; CHECK-NEXT: vse1.v v25, (a1) +; CHECK-NEXT: ret + %a = load , * %pa + store %a, * %pb + ret void +} + +define void @test_load_mask_4(* %pa, * %pb) { +; CHECK-LABEL: test_load_mask_4: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a2, zero, e8,mf2,ta,mu +; CHECK-NEXT: vle1.v v25, (a0) +; CHECK-NEXT: vse1.v v25, (a1) +; CHECK-NEXT: ret + %a = load , * %pa + store %a, * %pb + ret void +} + +define void @test_load_mask_2(* %pa, * %pb) { +; CHECK-LABEL: test_load_mask_2: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a2, zero, e8,mf4,ta,mu +; CHECK-NEXT: vle1.v v25, (a0) +; CHECK-NEXT: vse1.v v25, (a1) +; CHECK-NEXT: ret + %a = load , * %pa + store %a, * %pb + ret void +} + +define void @test_load_mask_1(* %pa, * %pb) { +; CHECK-LABEL: test_load_mask_1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a2, zero, e8,mf8,ta,mu +; CHECK-NEXT: vle1.v v25, (a0) +; CHECK-NEXT: vse1.v v25, (a1) +; CHECK-NEXT: ret + %a = load , * %pa + store %a, * %pb + ret void +}