diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td --- a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td @@ -3162,24 +3162,26 @@ def "_" # MInfo.MX : VPseudoTernaryNoMask; def "_" # MInfo.MX # "_MASK" : VPseudoBinaryMaskPolicy; - } } -multiclass VPseudoTernaryWithTailPolicy { +multiclass VPseudoTernaryWithTailPolicy_E { let VLMul = MInfo.value in { - let isCommutable = Commutable in - def "_" # MInfo.MX : VPseudoTernaryNoMaskWithPolicy; - def "_" # MInfo.MX # "_MASK" : VPseudoBinaryTailPolicy; + defvar mx = MInfo.MX; + defvar sews = SchedSEWSet.val; + foreach e = sews in { + let isCommutable = Commutable in + def "_" # mx # "_E" # e : VPseudoTernaryNoMaskWithPolicy; + def "_" # mx # "_E" # e # "_MASK" : VPseudoBinaryTailPolicy; + } } } - multiclass VPseudoTernaryWithPolicy("WriteVIRedV_From_" # mx); - defm _VS : VPseudoTernaryWithTailPolicy, + defm _VS : VPseudoTernaryWithTailPolicy_E, Sched<[WriteVIRedV_From_MX, ReadVIRedV, ReadVIRedV, ReadVIRedV, ReadVMask]>; } @@ -3445,7 +3447,7 @@ foreach m = MxList in { defvar mx = m.MX; defvar WriteVIWRedV_From_MX = !cast("WriteVIWRedV_From_" # mx); - defm _VS : VPseudoTernaryWithTailPolicy, + defm _VS : VPseudoTernaryWithTailPolicy_E, Sched<[WriteVIWRedV_From_MX, ReadVIWRedV, ReadVIWRedV, ReadVIWRedV, ReadVMask]>; } @@ -3455,7 +3457,7 @@ foreach m = MxListF in { defvar mx = m.MX; defvar WriteVFRedV_From_MX = !cast("WriteVFRedV_From_" # mx); - defm _VS : VPseudoTernaryWithTailPolicy, + defm _VS : VPseudoTernaryWithTailPolicy_E, Sched<[WriteVFRedV_From_MX, ReadVFRedV, ReadVFRedV, ReadVFRedV, ReadVMask]>; } @@ -3465,7 +3467,7 @@ foreach m = MxListF in { defvar mx = m.MX; defvar WriteVFRedOV_From_MX = !cast("WriteVFRedOV_From_" # mx); - defm _VS : VPseudoTernaryWithTailPolicy, + defm _VS : VPseudoTernaryWithTailPolicy_E, Sched<[WriteVFRedOV_From_MX, ReadVFRedOV, ReadVFRedOV, ReadVFRedOV, ReadVMask]>; } @@ -3475,7 +3477,7 @@ foreach m = MxListF in { defvar mx = m.MX; defvar WriteVFWRedV_From_MX = !cast("WriteVFWRedV_From_" # mx); - defm _VS : VPseudoTernaryWithTailPolicy, + defm _VS : VPseudoTernaryWithTailPolicy_E, Sched<[WriteVFWRedV_From_MX, ReadVFWRedV, ReadVFWRedV, ReadVFWRedV, ReadVMask]>; } @@ -4271,27 +4273,28 @@ op2_kind:$rs2, GPR:$vl, sew)>; -class VPatTernaryNoMaskTA : +class VPatTernaryNoMaskTA_E : Pat<(result_type (!cast(intrinsic) (result_type result_reg_class:$rs3), (op1_type op1_reg_class:$rs1), (op2_type op2_kind:$rs2), VLOpFrag)), - (!cast(inst#"_"#kind#"_"#vlmul.MX) + (!cast(inst#"_"#kind#"_"#vlmul.MX#"_E"#sew) result_reg_class:$rs3, (op1_type op1_reg_class:$rs1), op2_kind:$rs2, - GPR:$vl, sew, TAIL_AGNOSTIC)>; + GPR:$vl, log2sew, TAIL_AGNOSTIC)>; class VPatTernaryNoMaskWithPolicy; -class VPatTernaryMaskTA : +class VPatTernaryMaskTA_E : Pat<(result_type (!cast(intrinsic#"_mask") (result_type result_reg_class:$rs3), (op1_type op1_reg_class:$rs1), (op2_type op2_kind:$rs2), (mask_type V0), VLOpFrag)), - (!cast(inst#"_"#kind#"_"#vlmul.MX # "_MASK") + (!cast(inst#"_"#kind#"_"#vlmul.MX#"_E"#sew# "_MASK") result_reg_class:$rs3, (op1_type op1_reg_class:$rs1), op2_kind:$rs2, (mask_type V0), - GPR:$vl, sew, TAIL_AGNOSTIC)>; + GPR:$vl, log2sew, TAIL_AGNOSTIC)>; multiclass VPatUnaryS_M @@ -5133,24 +5137,25 @@ op2_kind>; } -multiclass VPatTernaryTA { - def : VPatTernaryNoMaskTA; - def : VPatTernaryMaskTA; +multiclass VPatTernaryTA_E { + def : VPatTernaryNoMaskTA_E; + def : VPatTernaryMaskTA_E; } multiclass VPatTernaryV_VV_AAXA(!if(IsFloat, "VF", "VI") # vti.SEW # "M1"); - defm : VPatTernaryTA; + defm : VPatTernaryTA_E; } foreach gvti = !if(IsFloat, GroupFloatVectors, GroupIntegerVectors) in { - defm : VPatTernaryTA; + defm : VPatTernaryTA_E; } } @@ -5279,12 +5284,12 @@ defvar wtiSEW = !mul(vti.SEW, 2); if !le(wtiSEW, 64) then { defvar wtiM1 = !cast(!if(IsFloat, "VF", "VI") # wtiSEW # "M1"); - defm : VPatTernaryTA; + defm : VPatTernaryTA_E; } } } diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td --- a/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td @@ -1108,7 +1108,7 @@ (vti.Vector vti.RegClass:$rs1), VR:$rs2, (vti.Mask true_mask), VLOpFrag, (XLenVT timm:$policy))), - (!cast(instruction_name#"_VS_"#vti.LMul.MX) + (!cast(instruction_name#"_VS_"#vti.LMul.MX#"_E"#vti.SEW) (vti_m1.Vector VR:$merge), (vti.Vector vti.RegClass:$rs1), (vti_m1.Vector VR:$rs2), @@ -1118,7 +1118,7 @@ (vti.Vector vti.RegClass:$rs1), VR:$rs2, (vti.Mask V0), VLOpFrag, (XLenVT timm:$policy))), - (!cast(instruction_name#"_VS_"#vti.LMul.MX#"_MASK") + (!cast(instruction_name#"_VS_"#vti.LMul.MX#"_E"#vti.SEW#"_MASK") (vti_m1.Vector VR:$merge), (vti.Vector vti.RegClass:$rs1), (vti_m1.Vector VR:$rs2), @@ -1178,7 +1178,7 @@ (wti.Vector (extop (vti.Vector vti.RegClass:$rs1))), VR:$rs2, (vti.Mask true_mask), VLOpFrag, (XLenVT timm:$policy))), - (!cast(instruction_name#"_VS_"#vti.LMul.MX) + (!cast(instruction_name#"_VS_"#vti.LMul.MX#"_E"#vti.SEW) (wti_m1.Vector VR:$merge), (vti.Vector vti.RegClass:$rs1), (wti_m1.Vector VR:$rs2), GPR:$vl, vti.Log2SEW, (XLenVT timm:$policy))>; @@ -1186,7 +1186,7 @@ (wti.Vector (extop (vti.Vector vti.RegClass:$rs1))), VR:$rs2, (vti.Mask V0), VLOpFrag, (XLenVT timm:$policy))), - (!cast(instruction_name#"_VS_"#vti.LMul.MX#"_MASK") + (!cast(instruction_name#"_VS_"#vti.LMul.MX#"_E"#vti.SEW#"_MASK") (wti_m1.Vector VR:$merge), (vti.Vector vti.RegClass:$rs1), (wti_m1.Vector VR:$rs2), (vti.Mask V0), GPR:$vl, vti.Log2SEW, (XLenVT timm:$policy))>; @@ -1202,7 +1202,7 @@ (wti.Vector (extop (vti.Vector vti.RegClass:$rs1), (vti.Mask true_mask), VLOpFrag)), VR:$rs2, (vti.Mask true_mask), VLOpFrag, (XLenVT timm:$policy))), - (!cast(instruction_name#"_VS_"#vti.LMul.MX) + (!cast(instruction_name#"_VS_"#vti.LMul.MX#"_E"#vti.SEW) (wti_m1.Vector VR:$merge), (vti.Vector vti.RegClass:$rs1), (wti_m1.Vector VR:$rs2), GPR:$vl, vti.Log2SEW, (XLenVT timm:$policy))>; @@ -1210,7 +1210,7 @@ (wti.Vector (extop (vti.Vector vti.RegClass:$rs1), (vti.Mask true_mask), VLOpFrag)), VR:$rs2, (vti.Mask V0), VLOpFrag, (XLenVT timm:$policy))), - (!cast(instruction_name#"_VS_"#vti.LMul.MX#"_MASK") + (!cast(instruction_name#"_VS_"#vti.LMul.MX#"_E"#vti.SEW#"_MASK") (wti_m1.Vector VR:$merge), (vti.Vector vti.RegClass:$rs1), (wti_m1.Vector VR:$rs2), (vti.Mask V0), GPR:$vl, vti.Log2SEW, (XLenVT timm:$policy))>; diff --git a/llvm/test/CodeGen/RISCV/rvv/vmv-copy.mir b/llvm/test/CodeGen/RISCV/rvv/vmv-copy.mir --- a/llvm/test/CodeGen/RISCV/rvv/vmv-copy.mir +++ b/llvm/test/CodeGen/RISCV/rvv/vmv-copy.mir @@ -214,12 +214,12 @@ ; CHECK: liveins: $x10, $v8, $v26, $v27 ; CHECK-NEXT: {{ $}} ; CHECK-NEXT: $x11 = PseudoVSETIVLI 1, 64 /* e8, m1, ta, mu */, implicit-def $vl, implicit-def $vtype - ; CHECK-NEXT: $v8 = PseudoVWREDSUM_VS_M1 killed renamable $v8, killed renamable $v26, killed renamable $v27, 1, 3 /* e8 */, 1 /* ta, mu */, implicit $vl, implicit $vtype + ; CHECK-NEXT: $v8 = PseudoVWREDSUM_VS_M1_E8 killed renamable $v8, killed renamable $v26, killed renamable $v27, 1, 3 /* e8 */, 1 /* ta, mu */, implicit $vl, implicit $vtype ; CHECK-NEXT: $v26 = VMV1R_V killed $v8 ; CHECK-NEXT: $x10 = PseudoVSETVLI killed renamable $x10, 75 /* e16, m8, ta, mu */, implicit-def $vl, implicit-def $vtype ; CHECK-NEXT: $v8m8 = VL8RE8_V killed $x10 $x11 = PseudoVSETIVLI 1, 64, implicit-def $vl, implicit-def $vtype - $v8 = PseudoVWREDSUM_VS_M1 killed renamable $v8, killed renamable $v26, killed renamable $v27, 1, 3, 1, implicit $vl, implicit $vtype + $v8 = PseudoVWREDSUM_VS_M1_E8 killed renamable $v8, killed renamable $v26, killed renamable $v27, 1, 3, 1, implicit $vl, implicit $vtype $v26 = COPY killed renamable $v8 $x10 = PseudoVSETVLI killed renamable $x10, 75, implicit-def $vl, implicit-def $vtype $v8m8 = VL8RE8_V killed $x10 diff --git a/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert-crossbb.mir b/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert-crossbb.mir --- a/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert-crossbb.mir +++ b/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert-crossbb.mir @@ -507,9 +507,9 @@ ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x11 ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 ; CHECK-NEXT: [[DEF:%[0-9]+]]:gpr = IMPLICIT_DEF - ; CHECK-NEXT: dead %12:gpr = PseudoVSETVLIX0 $x0, 223 /* e64, mf2, ta, ma */, implicit-def $vl, implicit-def $vtype + ; CHECK-NEXT: dead [[PseudoVSETVLIX0_:%[0-9]+]]:gpr = PseudoVSETVLIX0 $x0, 223 /* e64, mf2, ta, ma */, implicit-def $vl, implicit-def $vtype ; CHECK-NEXT: [[PseudoVID_V_MF2_:%[0-9]+]]:vr = PseudoVID_V_MF2 -1, 6 /* e64 */, implicit $vl, implicit $vtype - ; CHECK-NEXT: dead %13:gpr = PseudoVSETVLIX0 $x0, 215 /* e32, mf2, ta, ma */, implicit-def $vl, implicit-def $vtype + ; CHECK-NEXT: dead [[PseudoVSETVLIX0_1:%[0-9]+]]:gpr = PseudoVSETVLIX0 $x0, 215 /* e32, mf2, ta, ma */, implicit-def $vl, implicit-def $vtype ; CHECK-NEXT: [[PseudoVMV_V_I_MF2_:%[0-9]+]]:vrnov0 = PseudoVMV_V_I_MF2 0, -1, 5 /* e32 */, implicit $vl, implicit $vtype ; CHECK-NEXT: {{ $}} ; CHECK-NEXT: bb.1: @@ -593,7 +593,7 @@ ; CHECK-NEXT: [[PseudoReadVLENB:%[0-9]+]]:gpr = PseudoReadVLENB ; CHECK-NEXT: [[SRLI:%[0-9]+]]:gpr = SRLI [[PseudoReadVLENB]], 3 ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x11 - ; CHECK-NEXT: dead %11:gpr = PseudoVSETVLIX0 $x0, 216 /* e64, m1, ta, ma */, implicit-def $vl, implicit-def $vtype + ; CHECK-NEXT: dead [[PseudoVSETVLIX0_:%[0-9]+]]:gpr = PseudoVSETVLIX0 $x0, 216 /* e64, m1, ta, ma */, implicit-def $vl, implicit-def $vtype ; CHECK-NEXT: [[PseudoVID_V_M1_:%[0-9]+]]:vr = PseudoVID_V_M1 -1, 6 /* e64 */, implicit $vl, implicit $vtype ; CHECK-NEXT: [[COPY2:%[0-9]+]]:gpr = COPY $x0 ; CHECK-NEXT: {{ $}} @@ -661,7 +661,7 @@ ; CHECK-NEXT: [[PseudoReadVLENB:%[0-9]+]]:gpr = PseudoReadVLENB ; CHECK-NEXT: [[SRLI:%[0-9]+]]:gpr = SRLI [[PseudoReadVLENB]], 3 ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x11 - ; CHECK-NEXT: dead %11:gpr = PseudoVSETVLIX0 $x0, 216 /* e64, m1, ta, ma */, implicit-def $vl, implicit-def $vtype + ; CHECK-NEXT: dead [[PseudoVSETVLIX0_:%[0-9]+]]:gpr = PseudoVSETVLIX0 $x0, 216 /* e64, m1, ta, ma */, implicit-def $vl, implicit-def $vtype ; CHECK-NEXT: [[PseudoVID_V_M1_:%[0-9]+]]:vr = PseudoVID_V_M1 -1, 6 /* e64 */, implicit $vl, implicit $vtype ; CHECK-NEXT: [[COPY2:%[0-9]+]]:gpr = COPY $x0 ; CHECK-NEXT: {{ $}} @@ -781,9 +781,9 @@ ; CHECK-NEXT: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF ; CHECK-NEXT: [[PseudoVMV_S_X_M1_:%[0-9]+]]:vr = PseudoVMV_S_X_M1 [[DEF]], [[COPY5]], 1, 5 /* e32 */, implicit $vl, implicit $vtype ; CHECK-NEXT: [[DEF1:%[0-9]+]]:vr = IMPLICIT_DEF - ; CHECK-NEXT: [[PseudoVREDSUM_VS_M1_:%[0-9]+]]:vr = PseudoVREDSUM_VS_M1 [[DEF1]], [[PseudoVADD_VV_M1_]], killed [[PseudoVMV_S_X_M1_]], 4, 5 /* e32 */, 1 /* ta, mu */, implicit $vl, implicit $vtype + ; CHECK-NEXT: [[PseudoVREDSUM_VS_M1_E8_:%[0-9]+]]:vr = PseudoVREDSUM_VS_M1_E8 [[DEF1]], [[PseudoVADD_VV_M1_]], killed [[PseudoVMV_S_X_M1_]], 4, 5 /* e32 */, 1 /* ta, mu */, implicit $vl, implicit $vtype ; CHECK-NEXT: dead $x0 = PseudoVSETIVLI 1, 208 /* e32, m1, ta, ma */, implicit-def $vl, implicit-def $vtype - ; CHECK-NEXT: PseudoVSE32_V_M1 killed [[PseudoVREDSUM_VS_M1_]], [[COPY]], 1, 5 /* e32 */, implicit $vl, implicit $vtype :: (store (s32) into %ir.res) + ; CHECK-NEXT: PseudoVSE32_V_M1 killed [[PseudoVREDSUM_VS_M1_E8_]], [[COPY]], 1, 5 /* e32 */, implicit $vl, implicit $vtype :: (store (s32) into %ir.res) ; CHECK-NEXT: PseudoRET bb.0.entry: liveins: $x10, $x12 @@ -815,7 +815,7 @@ %21:vr = IMPLICIT_DEF %20:vr = PseudoVMV_S_X_M1 %21, %19, 1, 5 %24:vr = IMPLICIT_DEF - %23:vr = PseudoVREDSUM_VS_M1 %24, %16, killed %20, 4, 5, 1 + %23:vr = PseudoVREDSUM_VS_M1_E8 %24, %16, killed %20, 4, 5, 1 PseudoVSE32_V_M1 killed %23, %8, 1, 5 :: (store (s32) into %ir.res) PseudoRET @@ -837,7 +837,7 @@ ; CHECK-NEXT: %t3:vr = COPY $v2 ; CHECK-NEXT: %t4:vr = COPY $v3 ; CHECK-NEXT: %t5:vrnov0 = COPY $v1 - ; CHECK-NEXT: dead %14:gpr = PseudoVSETVLIX0 $x0, 216 /* e64, m1, ta, ma */, implicit-def $vl, implicit-def $vtype + ; CHECK-NEXT: dead [[PseudoVSETVLIX0_:%[0-9]+]]:gpr = PseudoVSETVLIX0 $x0, 216 /* e64, m1, ta, ma */, implicit-def $vl, implicit-def $vtype ; CHECK-NEXT: %t6:vr = PseudoVMSEQ_VI_M1 %t1, 0, -1, 6 /* e64 */, implicit $vl, implicit $vtype ; CHECK-NEXT: PseudoBR %bb.1 ; CHECK-NEXT: {{ $}} @@ -919,7 +919,7 @@ ; CHECK-NEXT: [[COPY2:%[0-9]+]]:gpr = COPY $x15 ; CHECK-NEXT: %vlenb:gpr = PseudoReadVLENB ; CHECK-NEXT: %inc:gpr = SRLI killed %vlenb, 3 - ; CHECK-NEXT: dead %21:gpr = PseudoVSETVLIX0 $x0, 216 /* e64, m1, ta, ma */, implicit-def $vl, implicit-def $vtype + ; CHECK-NEXT: dead [[PseudoVSETVLIX0_:%[0-9]+]]:gpr = PseudoVSETVLIX0 $x0, 216 /* e64, m1, ta, ma */, implicit-def $vl, implicit-def $vtype ; CHECK-NEXT: [[PseudoVID_V_M1_:%[0-9]+]]:vr = PseudoVID_V_M1 -1, 6 /* e64 */, implicit $vl, implicit $vtype ; CHECK-NEXT: [[COPY3:%[0-9]+]]:gpr = COPY $x0 ; CHECK-NEXT: PseudoBR %bb.1 diff --git a/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert.mir b/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert.mir --- a/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert.mir +++ b/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert.mir @@ -307,19 +307,19 @@ ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x10 ; CHECK-NEXT: dead $x0 = PseudoVSETIVLI 2, 216 /* e64, m1, ta, ma */, implicit-def $vl, implicit-def $vtype ; CHECK-NEXT: [[PseudoVLE64_V_M1_:%[0-9]+]]:vr = PseudoVLE64_V_M1 [[COPY]], 2, 6 /* e64 */, implicit $vl, implicit $vtype :: (load (s128) from %ir.x) - ; CHECK-NEXT: dead %6:gpr = PseudoVSETVLIX0 $x0, 216 /* e64, m1, ta, ma */, implicit-def $vl, implicit-def $vtype + ; CHECK-NEXT: dead [[PseudoVSETVLIX0_:%[0-9]+]]:gpr = PseudoVSETVLIX0 $x0, 216 /* e64, m1, ta, ma */, implicit-def $vl, implicit-def $vtype ; CHECK-NEXT: [[PseudoVMV_V_I_M1_:%[0-9]+]]:vr = PseudoVMV_V_I_M1 0, -1, 6 /* e64 */, implicit $vl, implicit $vtype ; CHECK-NEXT: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF ; CHECK-NEXT: dead $x0 = PseudoVSETIVLI 2, 216 /* e64, m1, ta, ma */, implicit-def $vl, implicit-def $vtype - ; CHECK-NEXT: [[PseudoVREDSUM_VS_M1_:%[0-9]+]]:vr = PseudoVREDSUM_VS_M1 [[DEF]], killed [[PseudoVLE64_V_M1_]], killed [[PseudoVMV_V_I_M1_]], 2, 6 /* e64 */, 1 /* ta, mu */, implicit $vl, implicit $vtype - ; CHECK-NEXT: [[PseudoVMV_X_S_M1_:%[0-9]+]]:gpr = PseudoVMV_X_S_M1 killed [[PseudoVREDSUM_VS_M1_]], 6 /* e64 */, implicit $vtype + ; CHECK-NEXT: [[PseudoVREDSUM_VS_M1_E8_:%[0-9]+]]:vr = PseudoVREDSUM_VS_M1_E8 [[DEF]], killed [[PseudoVLE64_V_M1_]], killed [[PseudoVMV_V_I_M1_]], 2, 6 /* e64 */, 1 /* ta, mu */, implicit $vl, implicit $vtype + ; CHECK-NEXT: [[PseudoVMV_X_S_M1_:%[0-9]+]]:gpr = PseudoVMV_X_S_M1 killed [[PseudoVREDSUM_VS_M1_E8_]], 6 /* e64 */, implicit $vtype ; CHECK-NEXT: $x10 = COPY [[PseudoVMV_X_S_M1_]] ; CHECK-NEXT: PseudoRET implicit $x10 %0:gpr = COPY $x10 %1:vr = PseudoVLE64_V_M1 %0, 2, 6 :: (load (s128) from %ir.x) %2:vr = PseudoVMV_V_I_M1 0, -1, 6 %4:vr = IMPLICIT_DEF - %3:vr = PseudoVREDSUM_VS_M1 %4, killed %1, killed %2, 2, 6, 1 + %3:vr = PseudoVREDSUM_VS_M1_E8 %4, killed %1, killed %2, 2, 6, 1 %5:gpr = PseudoVMV_X_S_M1 killed %3, 6 $x10 = COPY %5 PseudoRET implicit $x10