diff --git a/clang/include/clang/Basic/riscv_vector.td b/clang/include/clang/Basic/riscv_vector.td --- a/clang/include/clang/Basic/riscv_vector.td +++ b/clang/include/clang/Basic/riscv_vector.td @@ -172,36 +172,40 @@ string Prototype = prototype; // This builtin has a masked form. - bit HasMask = true; + bit HasMasked = true; - // If HasMask, this flag states that this builtin has a maskedoff operand. It + // If HasMasked, this flag states that this builtin has a maskedoff operand. It // is always the first operand in builtin and IR intrinsic. bit HasMaskedOffOperand = true; // This builtin has a granted vector length parameter. bit HasVL = true; - // The masked intrinsic IR have the policy operand. - // The policy argument is located at the last position. - bit HasPolicy = true; + // The policy scheme for masked intrinsic IR. + // It could be NonePolicy or HasPolicyOperand. + // HasPolicyOperand: Has a policy operand. 0 is tail and mask undisturbed, 1 is + // tail agnostic, 2 is mask undisturbed, and 3 is tail and mask agnostic. The + // policy operand is located at the last position. + Policy MaskedPolicy = HasPolicyOperand; - // The policy scheme for nomask intrinsic IR. + // The policy scheme for unmasked intrinsic IR. + // It could be NonePolicy, HasPassthruOperand or HasPolicyOperand. // HasPassthruOperand: Has a passthru operand to decide tail policy. If it is // undef, tail policy is tail agnostic, otherwise policy is tail undisturbed. // HasPolicyOperand: Has a policy operand. 1 is tail agnostic and 0 is tail // undisturbed. - Policy NoMaskPolicy = NonePolicy; + Policy UnMaskedPolicy = NonePolicy; // This builtin supports non-masked function overloading api. // All masked operations support overloading api. - bit HasNoMaskedOverloaded = true; + bit HasUnMaskedOverloaded = true; // This builtin is valid for the given Log2LMULs. list Log2LMUL = [0, 1, 2, 3, -1, -2, -3]; // Manual code in clang codegen riscv_vector_builtin_cg.inc code ManualCodegen = [{}]; - code ManualCodegenMask = [{}]; + code MaskedManualCodegen = [{}]; // When emit the automatic clang codegen, it describes what types we have to use // to obtain the specific LLVM intrinsic. -1 means the return type, otherwise, @@ -213,12 +217,11 @@ // we want to lower to. string IRName = NAME; - // If HasMask, this is the ID of the LLVM intrinsic we want to lower to. - string IRNameMask = NAME #"_mask"; + // If HasMasked, this is the ID of the LLVM intrinsic we want to lower to. + string MaskedIRName = NAME #"_mask"; - // If non empty, this is the code emitted in the header, otherwise - // an automatic definition in header is emitted. - string HeaderCode = ""; + // Use clang_builtin_alias to save the number of builtins. + bit HasBuiltinAlias = true; // Features required to enable for this builtin. list RequiredFeatures = []; @@ -227,8 +230,8 @@ int NF = 1; } -class RVVHeader -{ +// This is the code emitted in the header. +class RVVHeader { code HeaderCode; } @@ -259,7 +262,7 @@ multiclass RVVBuiltinSet> suffixes_prototypes, list intrinsic_types> { - let IRName = intrinsic_name, IRNameMask = intrinsic_name # "_mask", + let IRName = intrinsic_name, MaskedIRName = intrinsic_name # "_mask", IntrinsicTypes = intrinsic_types in { foreach s_p = suffixes_prototypes in { let Name = NAME # "_" # s_p[0] in { @@ -374,7 +377,7 @@ string type_range> : RVVBuiltin { let IRName = intrinsic_name; - let IRNameMask = intrinsic_name # "_mask"; + let MaskedIRName = intrinsic_name # "_mask"; let MangledName = NAME; let IntrinsicTypes = [-1, 0]; } @@ -422,7 +425,7 @@ class RVVMaskBinBuiltin : RVVOutBuiltin<"m", "mmm", "c"> { let Name = NAME # "_mm"; - let HasMask = false; + let HasMasked = false; } class RVVMaskUnaryBuiltin : RVVOutBuiltin<"m", "mm", "c"> { @@ -431,8 +434,8 @@ class RVVMaskNullaryBuiltin : RVVOutBuiltin<"m", "m", "c"> { let Name = NAME # "_m"; - let HasMask = false; - let HasNoMaskedOverloaded = false; + let HasMasked = false; + let HasUnMaskedOverloaded = false; } class RVVMaskOp0Builtin : RVVOp0Builtin<"m", prototype, "c"> { @@ -509,7 +512,7 @@ multiclass RVVWidenBuiltinSet> suffixes_prototypes> { let Log2LMUL = [-3, -2, -1, 0, 1, 2], - IRName = intrinsic_name, IRNameMask = intrinsic_name # "_mask" in { + IRName = intrinsic_name, MaskedIRName = intrinsic_name # "_mask" in { foreach s_p = suffixes_prototypes in { let Name = NAME # "_" # s_p[0], MangledName = NAME # "_" # s_p[0] in { @@ -525,7 +528,7 @@ multiclass RVVWidenWOp0BuiltinSet> suffixes_prototypes> { let Log2LMUL = [-3, -2, -1, 0, 1, 2], - IRName = intrinsic_name, IRNameMask = intrinsic_name # "_mask" in { + IRName = intrinsic_name, MaskedIRName = intrinsic_name # "_mask" in { foreach s_p = suffixes_prototypes in { let Name = NAME # "_" # s_p[0], MangledName = NAME # "_" # s_p[0] in { @@ -577,13 +580,13 @@ bit val = !or(!eq(type, "x"), !eq(type, "f"), !eq(type, "d")); } -let HasNoMaskedOverloaded = false, - HasPolicy = false, +let HasUnMaskedOverloaded = false, + MaskedPolicy = NonePolicy, ManualCodegen = [{ IntrinsicTypes = {ResultType, Ops[1]->getType()}; Ops[0] = Builder.CreateBitCast(Ops[0], ResultType->getPointerTo()); }], - ManualCodegenMask= [{ + MaskedManualCodegen= [{ // Move mask to right before vl. std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end() - 1); IntrinsicTypes = {ResultType, Ops[3]->getType()}; @@ -592,17 +595,17 @@ class RVVVLEMaskBuiltin : RVVBuiltin<"m", "mPCUe", "c"> { let Name = "vlm_v"; let IRName = "vlm"; - let HasMask = false; + let HasMasked = false; } } -let HasNoMaskedOverloaded = false, +let HasUnMaskedOverloaded = false, ManualCodegen = [{ IntrinsicTypes = {ResultType, Ops[1]->getType()}; Ops[0] = Builder.CreateBitCast(Ops[0], ResultType->getPointerTo()); Ops.insert(Ops.begin(), llvm::UndefValue::get(ResultType)); }], - ManualCodegenMask= [{ + MaskedManualCodegen= [{ // Move mask to right before vl. std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end() - 1); Ops.push_back(ConstantInt::get(Ops.back()->getType(), TAIL_UNDISTURBED)); @@ -612,7 +615,7 @@ multiclass RVVVLEBuiltin types> { let Name = NAME # "_v", IRName = "vle", - IRNameMask ="vle_mask" in { + MaskedIRName ="vle_mask" in { foreach type = types in { def : RVVBuiltin<"v", "vPCe", type>; if !not(IsFloat.val) then { @@ -626,8 +629,8 @@ multiclass RVVVLEFFBuiltin types> { let Name = NAME # "_v", IRName = "vleff", - IRNameMask = "vleff_mask", - HasNoMaskedOverloaded = false, + MaskedIRName = "vleff_mask", + HasUnMaskedOverloaded = false, ManualCodegen = [{ { IntrinsicTypes = {ResultType, Ops[2]->getType()}; @@ -646,7 +649,7 @@ return V; } }], - ManualCodegenMask = [{ + MaskedManualCodegen = [{ { // Move mask to right before vl. std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end() - 1); @@ -679,14 +682,14 @@ multiclass RVVVLSEBuiltin types> { let Name = NAME # "_v", IRName = "vlse", - IRNameMask ="vlse_mask", - HasNoMaskedOverloaded = false, + MaskedIRName ="vlse_mask", + HasUnMaskedOverloaded = false, ManualCodegen = [{ IntrinsicTypes = {ResultType, Ops[2]->getType()}; Ops[0] = Builder.CreateBitCast(Ops[0], ResultType->getPointerTo()); Ops.insert(Ops.begin(), llvm::UndefValue::get(ResultType)); }], - ManualCodegenMask= [{ + MaskedManualCodegen= [{ // Move mask to right before vl. std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end() - 1); Ops.push_back(ConstantInt::get(Ops.back()->getType(), TAIL_UNDISTURBED)); @@ -708,7 +711,7 @@ Ops[0] = Builder.CreateBitCast(Ops[0], ResultType->getPointerTo()); Ops.insert(Ops.begin(), llvm::UndefValue::get(ResultType)); }], - ManualCodegenMask = [{ + MaskedManualCodegen = [{ // Move mask to right before vl. std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end() - 1); Ops.push_back(ConstantInt::get(Ops.back()->getType(), TAIL_UNDISTURBED)); @@ -719,7 +722,7 @@ foreach eew_list = EEWList[0-2] in { defvar eew = eew_list[0]; defvar eew_type = eew_list[1]; - let Name = op # eew # "_v", IRName = op, IRNameMask = op # "_mask" in { + let Name = op # eew # "_v", IRName = op, MaskedIRName = op # "_mask" in { def: RVVBuiltin<"v", "vPCe" # eew_type # "Uv", type>; if !not(IsFloat.val) then { def: RVVBuiltin<"Uv", "UvPCUe" # eew_type # "Uv", type>; @@ -728,7 +731,7 @@ } defvar eew64 = "64"; defvar eew64_type = "(Log2EEW:6)"; - let Name = op # eew64 # "_v", IRName = op, IRNameMask = op # "_mask", + let Name = op # eew64 # "_v", IRName = op, MaskedIRName = op # "_mask", RequiredFeatures = ["RV64"] in { def: RVVBuiltin<"v", "vPCe" # eew64_type # "Uv", type>; if !not(IsFloat.val) then { @@ -740,14 +743,14 @@ } let HasMaskedOffOperand = false, - HasPolicy = false, + MaskedPolicy = NonePolicy, ManualCodegen = [{ // Builtin: (ptr, value, vl). Intrinsic: (value, ptr, vl) std::swap(Ops[0], Ops[1]); Ops[1] = Builder.CreateBitCast(Ops[1], Ops[0]->getType()->getPointerTo()); IntrinsicTypes = {Ops[0]->getType(), Ops[2]->getType()}; }], - ManualCodegenMask= [{ + MaskedManualCodegen= [{ // Builtin: (mask, ptr, value, vl). Intrinsic: (value, ptr, mask, vl) std::swap(Ops[0], Ops[2]); Ops[1] = Builder.CreateBitCast(Ops[1], Ops[0]->getType()->getPointerTo()); @@ -756,12 +759,12 @@ class RVVVSEMaskBuiltin : RVVBuiltin<"m", "0PUem", "c"> { let Name = "vsm_v"; let IRName = "vsm"; - let HasMask = false; + let HasMasked = false; } multiclass RVVVSEBuiltin types> { let Name = NAME # "_v", IRName = "vse", - IRNameMask = "vse_mask" in { + MaskedIRName = "vse_mask" in { foreach type = types in { def : RVVBuiltin<"v", "0Pev", type>; if !not(IsFloat.val) then { @@ -775,16 +778,16 @@ multiclass RVVVSSEBuiltin types> { let Name = NAME # "_v", IRName = "vsse", - IRNameMask = "vsse_mask", + MaskedIRName = "vsse_mask", HasMaskedOffOperand = false, - HasPolicy = false, + MaskedPolicy = NonePolicy, ManualCodegen = [{ // Builtin: (ptr, stride, value, vl). Intrinsic: (value, ptr, stride, vl) std::rotate(Ops.begin(), Ops.begin() + 2, Ops.begin() + 3); Ops[1] = Builder.CreateBitCast(Ops[1], Ops[0]->getType()->getPointerTo()); IntrinsicTypes = {Ops[0]->getType(), Ops[3]->getType()}; }], - ManualCodegenMask= [{ + MaskedManualCodegen= [{ // Builtin: (mask, ptr, stride, value, vl). Intrinsic: (value, ptr, stride, mask, vl) std::swap(Ops[0], Ops[3]); Ops[1] = Builder.CreateBitCast(Ops[1], Ops[0]->getType()->getPointerTo()); @@ -801,14 +804,14 @@ multiclass RVVIndexedStore { let HasMaskedOffOperand = false, - HasPolicy = false, + MaskedPolicy = NonePolicy, ManualCodegen = [{ // Builtin: (ptr, index, value, vl). Intrinsic: (value, ptr, index, vl) std::rotate(Ops.begin(), Ops.begin() + 2, Ops.begin() + 3); Ops[1] = Builder.CreateBitCast(Ops[1],Ops[0]->getType()->getPointerTo()); IntrinsicTypes = {Ops[0]->getType(), Ops[2]->getType(), Ops[3]->getType()}; }], - ManualCodegenMask= [{ + MaskedManualCodegen= [{ // Builtin: (mask, ptr, index, value, vl). Intrinsic: (value, ptr, index, mask, vl) std::swap(Ops[0], Ops[3]); Ops[1] = Builder.CreateBitCast(Ops[1], Ops[0]->getType()->getPointerTo()); @@ -818,7 +821,7 @@ foreach eew_list = EEWList[0-2] in { defvar eew = eew_list[0]; defvar eew_type = eew_list[1]; - let Name = op # eew # "_v", IRName = op, IRNameMask = op # "_mask" in { + let Name = op # eew # "_v", IRName = op, MaskedIRName = op # "_mask" in { def : RVVBuiltin<"v", "0Pe" # eew_type # "Uvv", type>; if !not(IsFloat.val) then { def : RVVBuiltin<"Uv", "0PUe" # eew_type # "UvUv", type>; @@ -827,7 +830,7 @@ } defvar eew64 = "64"; defvar eew64_type = "(Log2EEW:6)"; - let Name = op # eew64 # "_v", IRName = op, IRNameMask = op # "_mask", + let Name = op # eew64 # "_v", IRName = op, MaskedIRName = op # "_mask", RequiredFeatures = ["RV64"] in { def : RVVBuiltin<"v", "0Pe" # eew64_type # "Uvv", type>; if !not(IsFloat.val) then { @@ -863,9 +866,9 @@ foreach nf = NFList in { let Name = op # nf # "e" # eew # "_v", IRName = op # nf, - IRNameMask = op # nf # "_mask", + MaskedIRName = op # nf # "_mask", NF = nf, - HasNoMaskedOverloaded = false, + HasUnMaskedOverloaded = false, ManualCodegen = [{ { // builtin: (val0 address, val1 address, ..., ptr, vl) @@ -885,7 +888,7 @@ return V; } }], - ManualCodegenMask = [{ + MaskedManualCodegen = [{ { // builtin: (val0 address, ..., mask, maskedoff0, ..., ptr, vl) // intrinsic: (maskedoff0, ..., ptr, mask, vl) @@ -934,9 +937,9 @@ foreach nf = NFList in { let Name = op # nf # "e" # eew # "ff_v", IRName = op # nf # "ff", - IRNameMask = op # nf # "ff_mask", + MaskedIRName = op # nf # "ff_mask", NF = nf, - HasNoMaskedOverloaded = false, + HasUnMaskedOverloaded = false, ManualCodegen = [{ { // builtin: (val0 address, val1 address, ..., ptr, new_vl, vl) @@ -958,7 +961,7 @@ return Builder.CreateStore(Val, Address(NewVL, Val->getType(), Align)); } }], - ManualCodegenMask = [{ + MaskedManualCodegen = [{ { // builtin: (val0 address, ..., mask, maskedoff0, ..., ptr, new_vl, vl) // intrinsic: (maskedoff0, ..., ptr, mask, vl) @@ -1009,9 +1012,9 @@ foreach nf = NFList in { let Name = op # nf # "e" # eew # "_v", IRName = op # nf, - IRNameMask = op # nf # "_mask", + MaskedIRName = op # nf # "_mask", NF = nf, - HasNoMaskedOverloaded = false, + HasUnMaskedOverloaded = false, ManualCodegen = [{ { // builtin: (val0 address, val1 address, ..., ptr, stride, vl) @@ -1031,7 +1034,7 @@ return V; } }], - ManualCodegenMask = [{ + MaskedManualCodegen = [{ { // builtin: (val0 address, ..., mask, maskedoff0, ..., ptr, stride, vl) // intrinsic: (maskedoff0, ..., ptr, stride, mask, vl) @@ -1077,7 +1080,7 @@ foreach nf = NFList in { let Name = op # nf # "ei" # eew # "_v", IRName = op # nf, - IRNameMask = op # nf # "_mask", + MaskedIRName = op # nf # "_mask", NF = nf, ManualCodegen = [{ { @@ -1098,7 +1101,7 @@ return V; } }], - ManualCodegenMask = [{ + MaskedManualCodegen = [{ { // builtin: (val0 address, ..., mask, maskedoff0, ..., ptr, index, vl) IntrinsicTypes = {ConvertType(E->getArg(0)->getType()->getPointeeType()), @@ -1159,10 +1162,10 @@ foreach nf = NFList in { let Name = op # nf # "e" # eew # "_v", IRName = op # nf, - IRNameMask = op # nf # "_mask", + MaskedIRName = op # nf # "_mask", NF = nf, HasMaskedOffOperand = false, - HasPolicy = false, + MaskedPolicy = NonePolicy, ManualCodegen = [{ { // Builtin: (ptr, val0, val1, ..., vl) @@ -1172,7 +1175,7 @@ assert(Ops.size() == NF + 2); } }], - ManualCodegenMask = [{ + MaskedManualCodegen = [{ { // Builtin: (mask, ptr, val0, val1, ..., vl) // Intrinsic: (val0, val1, ..., ptr, mask, vl) @@ -1205,10 +1208,10 @@ foreach nf = NFList in { let Name = op # nf # "e" # eew # "_v", IRName = op # nf, - IRNameMask = op # nf # "_mask", + MaskedIRName = op # nf # "_mask", NF = nf, HasMaskedOffOperand = false, - HasPolicy = false, + MaskedPolicy = NonePolicy, ManualCodegen = [{ { // Builtin: (ptr, stride, val0, val1, ..., vl). @@ -1218,7 +1221,7 @@ assert(Ops.size() == NF + 3); } }], - ManualCodegenMask = [{ + MaskedManualCodegen = [{ { // Builtin: (mask, ptr, stride, val0, val1, ..., vl). // Intrinsic: (val0, val1, ..., ptr, stride, mask, vl) @@ -1247,10 +1250,10 @@ foreach nf = NFList in { let Name = op # nf # "ei" # eew # "_v", IRName = op # nf, - IRNameMask = op # nf # "_mask", + MaskedIRName = op # nf # "_mask", NF = nf, HasMaskedOffOperand = false, - HasPolicy = false, + MaskedPolicy = NonePolicy, ManualCodegen = [{ { // Builtin: (ptr, index, val0, val1, ..., vl) @@ -1261,7 +1264,7 @@ assert(Ops.size() == NF + 3); } }], - ManualCodegenMask = [{ + MaskedManualCodegen = [{ { // Builtin: (mask, ptr, index, val0, val1, ..., vl) // Intrinsic: (val0, val1, ..., ptr, index, mask, vl) @@ -1287,7 +1290,7 @@ multiclass RVVPseudoUnaryBuiltin { let Name = NAME, IRName = IR, - IRNameMask = IR # "_mask", + MaskedIRName = IR # "_mask", ManualCodegen = [{ { // op1, vl @@ -1300,7 +1303,7 @@ break; } }], - ManualCodegenMask = [{ + MaskedManualCodegen = [{ { std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end() - 1); Ops.push_back(ConstantInt::get(Ops.back()->getType(), TAIL_UNDISTURBED)); @@ -1319,7 +1322,7 @@ multiclass RVVPseudoVNotBuiltin { let Name = NAME, IRName = IR, - IRNameMask = IR # "_mask", + MaskedIRName = IR # "_mask", ManualCodegen = [{ { // op1, vl @@ -1333,7 +1336,7 @@ break; } }], - ManualCodegenMask = [{ + MaskedManualCodegen = [{ { std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end() - 1); Ops.push_back(ConstantInt::get(Ops.back()->getType(), TAIL_UNDISTURBED)); @@ -1354,7 +1357,7 @@ multiclass RVVPseudoMaskBuiltin { let Name = NAME, IRName = IR, - HasMask = false, + HasMasked = false, ManualCodegen = [{ { // op1, vl @@ -1371,7 +1374,7 @@ multiclass RVVPseudoVFUnaryBuiltin { let Name = NAME, IRName = IR, - IRNameMask = IR # "_mask", + MaskedIRName = IR # "_mask", ManualCodegen = [{ { // op1, vl @@ -1383,7 +1386,7 @@ break; } }], - ManualCodegenMask = [{ + MaskedManualCodegen = [{ { std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end() - 1); Ops.push_back(ConstantInt::get(Ops.back()->getType(), TAIL_UNDISTURBED)); @@ -1404,7 +1407,7 @@ let Name = NAME, MangledName = MName, IRName = IR, - IRNameMask = IR # "_mask", + MaskedIRName = IR # "_mask", ManualCodegen = [{ { // op1, vl @@ -1418,7 +1421,7 @@ break; } }], - ManualCodegenMask = [{ + MaskedManualCodegen = [{ { std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end() - 1); Ops.push_back(ConstantInt::get(Ops.back()->getType(), TAIL_UNDISTURBED)); @@ -1442,7 +1445,7 @@ let Name = NAME, MangledName = MName, IRName = IR, - IRNameMask = IR # "_mask", + MaskedIRName = IR # "_mask", ManualCodegen = [{ { // op1, vl @@ -1456,7 +1459,7 @@ break; } }], - ManualCodegenMask = [{ + MaskedManualCodegen = [{ { std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end() - 1); Ops.push_back(ConstantInt::get(Ops.back()->getType(), TAIL_UNDISTURBED)); @@ -1477,14 +1480,10 @@ // 6. Configuration-Setting Instructions // 6.1. vsetvli/vsetvl instructions -let HasVL = false, - HasMask = false, - HasPolicy = false, - Log2LMUL = [0], - ManualCodegen = [{IntrinsicTypes = {ResultType};}] in // Set XLEN type -{ - // vsetvl is a macro because for it require constant integers in SEW and LMUL. - let HeaderCode = + +// vsetvl/vsetvlmax are a macro because they require constant integers in SEW +// and LMUL. +let HeaderCode = [{ #define vsetvl_e8mf8(avl) __builtin_rvv_vsetvli((size_t)(avl), 0, 5) #define vsetvl_e8mf4(avl) __builtin_rvv_vsetvli((size_t)(avl), 0, 6) @@ -1512,11 +1511,6 @@ #define vsetvl_e64m4(avl) __builtin_rvv_vsetvli((size_t)(avl), 3, 2) #define vsetvl_e64m8(avl) __builtin_rvv_vsetvli((size_t)(avl), 3, 3) -}] in - def vsetvli : RVVBuiltin<"", "zzKzKz", "i">; - - let HeaderCode = -[{ #define vsetvlmax_e8mf8() __builtin_rvv_vsetvlimax(0, 5) #define vsetvlmax_e8mf4() __builtin_rvv_vsetvlimax(0, 6) #define vsetvlmax_e8mf2() __builtin_rvv_vsetvlimax(0, 7) @@ -1542,8 +1536,17 @@ #define vsetvlmax_e64m2() __builtin_rvv_vsetvlimax(3, 1) #define vsetvlmax_e64m4() __builtin_rvv_vsetvlimax(3, 2) #define vsetvlmax_e64m8() __builtin_rvv_vsetvlimax(3, 3) - }] in +def vsetvl_macro: RVVHeader; + +let HasBuiltinAlias = false, + HasVL = false, + HasMasked = false, + MaskedPolicy = NonePolicy, + Log2LMUL = [0], + ManualCodegen = [{IntrinsicTypes = {ResultType};}] in // Set XLEN type +{ + def vsetvli : RVVBuiltin<"", "zzKzKz", "i">; def vsetvlimax : RVVBuiltin<"", "zKzKz", "i">; } @@ -1598,7 +1601,7 @@ // 12. Vector Integer Arithmetic Instructions // 12.1. Vector Single-Width Integer Add and Subtract -let NoMaskPolicy = HasPassthruOperand in { +let UnMaskedPolicy = HasPassthruOperand in { defm vadd : RVVIntBinBuiltinSet; defm vsub : RVVIntBinBuiltinSet; defm vrsub : RVVOutOp1BuiltinSet<"vrsub", "csil", @@ -1609,7 +1612,7 @@ // 12.2. Vector Widening Integer Add/Subtract // Widening unsigned integer add/subtract, 2*SEW = SEW +/- SEW -let NoMaskPolicy = HasPassthruOperand in { +let UnMaskedPolicy = HasPassthruOperand in { defm vwaddu : RVVUnsignedWidenBinBuiltinSet; defm vwsubu : RVVUnsignedWidenBinBuiltinSet; // Widening signed integer add/subtract, 2*SEW = SEW +/- SEW @@ -1628,7 +1631,7 @@ [["w", "wv"]]>; // 12.3. Vector Integer Extension -let NoMaskPolicy = HasPassthruOperand in { +let UnMaskedPolicy = HasPassthruOperand in { let Log2LMUL = [-3, -2, -1, 0, 1, 2] in { def vsext_vf2 : RVVIntExt<"vsext", "w", "wv", "csi">; def vzext_vf2 : RVVIntExt<"vzext", "Uw", "UwUv", "csi">; @@ -1644,8 +1647,8 @@ } // 12.4. Vector Integer Add-with-Carry / Subtract-with-Borrow Instructions -let HasMask = false, HasPolicy = false in { - let NoMaskPolicy = HasPassthruOperand in { +let HasMasked = false, MaskedPolicy = NonePolicy in { + let UnMaskedPolicy = HasPassthruOperand in { defm vadc : RVVCarryinBuiltinSet; defm vsbc : RVVCarryinBuiltinSet; } @@ -1656,7 +1659,7 @@ } // 12.5. Vector Bitwise Logical Instructions -let NoMaskPolicy = HasPassthruOperand in { +let UnMaskedPolicy = HasPassthruOperand in { defm vand : RVVIntBinBuiltinSet; defm vxor : RVVIntBinBuiltinSet; defm vor : RVVIntBinBuiltinSet; @@ -1664,7 +1667,7 @@ defm vnot_v : RVVPseudoVNotBuiltin<"vxor", "csil">; // 12.6. Vector Single-Width Bit Shift Instructions -let NoMaskPolicy = HasPassthruOperand in { +let UnMaskedPolicy = HasPassthruOperand in { defm vsll : RVVShiftBuiltinSet; defm vsrl : RVVUnsignedShiftBuiltinSet; defm vsra : RVVSignedShiftBuiltinSet; @@ -1690,7 +1693,7 @@ defm vmsge : RVVSignedMaskOutBuiltinSet; // 12.9. Vector Integer Min/Max Instructions -let NoMaskPolicy = HasPassthruOperand in { +let UnMaskedPolicy = HasPassthruOperand in { defm vminu : RVVUnsignedBinBuiltinSet; defm vmin : RVVSignedBinBuiltinSet; defm vmaxu : RVVUnsignedBinBuiltinSet; @@ -1714,7 +1717,7 @@ } // 12.12. Vector Widening Integer Multiply Instructions -let Log2LMUL = [-3, -2, -1, 0, 1, 2], NoMaskPolicy = HasPassthruOperand in { +let Log2LMUL = [-3, -2, -1, 0, 1, 2], UnMaskedPolicy = HasPassthruOperand in { defm vwmul : RVVOutOp0Op1BuiltinSet<"vwmul", "csi", [["vv", "w", "wvv"], ["vx", "w", "wve"]]>; @@ -1727,7 +1730,7 @@ } // 12.13. Vector Single-Width Integer Multiply-Add Instructions -let NoMaskPolicy = HasPolicyOperand in { +let UnMaskedPolicy = HasPolicyOperand in { defm vmacc : RVVIntTerBuiltinSet; defm vnmsac : RVVIntTerBuiltinSet; defm vmadd : RVVIntTerBuiltinSet; @@ -1752,7 +1755,7 @@ // 12.15. Vector Integer Merge Instructions // C/C++ Operand: (mask, op1, op2, vl), Intrinsic: (op1, op2, mask, vl) -let HasMask = false, HasPolicy = false, +let HasMasked = false, MaskedPolicy = NonePolicy, ManualCodegen = [{ std::rotate(Ops.begin(), Ops.begin() + 1, Ops.begin() + 3); IntrinsicTypes = {ResultType, Ops[1]->getType(), Ops[3]->getType()}; @@ -1767,14 +1770,14 @@ } // 12.16. Vector Integer Move Instructions -let HasMask = false, NoMaskPolicy = HasPassthruOperand, HasPolicy = false in { +let HasMasked = false, UnMaskedPolicy = HasPassthruOperand, MaskedPolicy = NonePolicy in { let MangledName = "vmv_v" in { defm vmv_v : RVVOutBuiltinSet<"vmv_v_v", "csil", [["v", "Uv", "UvUv"]]>; defm vmv_v : RVVOutBuiltinSet<"vmv_v_v", "csilxfd", [["v", "v", "vv"]]>; } - let HasNoMaskedOverloaded = false in + let HasUnMaskedOverloaded = false in defm vmv_v : RVVOutBuiltinSet<"vmv_v_x", "csil", [["x", "v", "ve"], ["x", "Uv", "UvUe"]]>; @@ -1782,7 +1785,7 @@ // 13. Vector Fixed-Point Arithmetic Instructions // 13.1. Vector Single-Width Saturating Add and Subtract -let NoMaskPolicy = HasPassthruOperand in { +let UnMaskedPolicy = HasPassthruOperand in { defm vsaddu : RVVUnsignedBinBuiltinSet; defm vsadd : RVVSignedBinBuiltinSet; defm vssubu : RVVUnsignedBinBuiltinSet; @@ -1835,7 +1838,7 @@ } // 14.6. Vector Single-Width Floating-Point Fused Multiply-Add Instructions -let NoMaskPolicy = HasPolicyOperand in { +let UnMaskedPolicy = HasPolicyOperand in { defm vfmacc : RVVFloatingTerBuiltinSet; defm vfnmacc : RVVFloatingTerBuiltinSet; defm vfmsac : RVVFloatingTerBuiltinSet; @@ -1853,7 +1856,7 @@ } // 14.8. Vector Floating-Point Square-Root Instruction -let NoMaskPolicy = HasPassthruOperand in { +let UnMaskedPolicy = HasPassthruOperand in { def vfsqrt : RVVFloatingUnaryVVBuiltin; // 14.9. Vector Floating-Point Reciprocal Square-Root Estimate Instruction @@ -1883,12 +1886,12 @@ defm vmfge : RVVFloatingMaskOutBuiltinSet; // 14.14. Vector Floating-Point Classify Instruction -let Name = "vfclass_v", NoMaskPolicy = HasPassthruOperand in +let Name = "vfclass_v", UnMaskedPolicy = HasPassthruOperand in def vfclass : RVVOp0Builtin<"Uv", "Uvv", "xfd">; // 14.15. Vector Floating-Point Merge Instructio // C/C++ Operand: (mask, op1, op2, vl), Builtin: (op1, op2, mask, vl) -let HasMask = false, HasPolicy = false, +let HasMasked = false, MaskedPolicy = NonePolicy, ManualCodegen = [{ std::rotate(Ops.begin(), Ops.begin() + 1, Ops.begin() + 3); IntrinsicTypes = {ResultType, Ops[1]->getType(), Ops[3]->getType()}; @@ -1902,13 +1905,13 @@ } // 14.16. Vector Floating-Point Move Instruction -let HasMask = false, NoMaskPolicy = HasPassthruOperand, - HasNoMaskedOverloaded = false, HasPolicy = false in +let HasMasked = false, UnMaskedPolicy = HasPassthruOperand, + HasUnMaskedOverloaded = false, MaskedPolicy = NonePolicy in defm vfmv_v : RVVOutBuiltinSet<"vfmv_v_f", "xfd", [["f", "v", "ve"]]>; // 14.17. Single-Width Floating-Point/Integer Type-Convert Instructions -let NoMaskPolicy = HasPassthruOperand in { +let UnMaskedPolicy = HasPassthruOperand in { def vfcvt_xu_f_v : RVVConvToUnsignedBuiltin<"vfcvt_xu">; def vfcvt_x_f_v : RVVConvToSignedBuiltin<"vfcvt_x">; def vfcvt_rtz_xu_f_v : RVVConvToUnsignedBuiltin<"vfcvt_rtz_xu">; @@ -1942,7 +1945,7 @@ // 15. Vector Reduction Operations // 15.1. Vector Single-Width Integer Reduction Instructions -let HasPolicy = false in { +let MaskedPolicy = NonePolicy in { defm vredsum : RVVIntReductionBuiltinSet; defm vredmaxu : RVVUnsignedReductionBuiltin; defm vredmax : RVVSignedReductionBuiltin; @@ -1988,7 +1991,7 @@ defm vmmv_m : RVVPseudoMaskBuiltin<"vmand", "c">; defm vmnot_m : RVVPseudoMaskBuiltin<"vmnand", "c">; -let HasPolicy = false in { +let MaskedPolicy = NonePolicy in { // 16.2. Vector count population in mask vcpop.m def vcpop : RVVMaskOp0Builtin<"um">; @@ -2005,7 +2008,7 @@ // 16.6. vmsof.m set-only-first mask bit def vmsof : RVVMaskUnaryBuiltin; -let NoMaskPolicy = HasPassthruOperand, HasNoMaskedOverloaded = false in { +let UnMaskedPolicy = HasPassthruOperand, HasUnMaskedOverloaded = false in { // 16.8. Vector Iota Instruction defm viota : RVVOutBuiltinSet<"viota", "csil", [["m", "Uv", "Uvm"]]>; @@ -2016,7 +2019,7 @@ // 17. Vector Permutation Instructions // 17.1. Integer Scalar Move Instructions -let HasMask = false, HasPolicy = false in { +let HasMasked = false, MaskedPolicy = NonePolicy in { let HasVL = false, MangledName = "vmv_x" in defm vmv_x : RVVOp0BuiltinSet<"vmv_x_s", "csil", [["s", "ve", "ev"], @@ -2028,7 +2031,7 @@ } // 17.2. Floating-Point Scalar Move Instructions -let HasMask = false, HasPolicy = false in { +let HasMasked = false, MaskedPolicy = NonePolicy in { let HasVL = false, MangledName = "vfmv_f" in defm vfmv_f : RVVOp0BuiltinSet<"vfmv_f_s", "xfd", [["s", "ve", "ev"]]>; @@ -2045,7 +2048,7 @@ defm vslidedown : RVVSlideBuiltinSet; // 17.3.3. Vector Slide1up Instructions -let NoMaskPolicy = HasPassthruOperand in { +let UnMaskedPolicy = HasPassthruOperand in { defm vslide1up : RVVSlideOneBuiltinSet; defm vfslide1up : RVVFloatingBinVFBuiltinSet; @@ -2071,7 +2074,7 @@ } // 17.5. Vector Compress Instruction -let HasMask = false, HasPolicy = false, +let HasMasked = false, MaskedPolicy = NonePolicy, ManualCodegen = [{ std::rotate(Ops.begin(), Ops.begin() + 1, Ops.begin() + 3); IntrinsicTypes = {ResultType, Ops[3]->getType()}; @@ -2085,8 +2088,8 @@ } // Miscellaneous -let HasMask = false, HasVL = false, IRName = "" in { - let Name = "vreinterpret_v", HasPolicy = false, +let HasMasked = false, HasVL = false, IRName = "" in { + let Name = "vreinterpret_v", MaskedPolicy = NonePolicy, ManualCodegen = [{ return Builder.CreateBitCast(Ops[0], ResultType); }] in { @@ -2108,7 +2111,7 @@ } } - let Name = "vundefined", HasNoMaskedOverloaded = false, HasPolicy = false, + let Name = "vundefined", HasUnMaskedOverloaded = false, MaskedPolicy = NonePolicy, ManualCodegen = [{ return llvm::UndefValue::get(ResultType); }] in { @@ -2118,7 +2121,7 @@ // LMUL truncation // C/C++ Operand: VecTy, IR Operand: VecTy, Index - let Name = "vlmul_trunc_v", MangledName = "vlmul_trunc", HasPolicy = false, + let Name = "vlmul_trunc_v", MangledName = "vlmul_trunc", MaskedPolicy = NonePolicy, ManualCodegen = [{ { ID = Intrinsic::experimental_vector_extract; IntrinsicTypes = {ResultType, Ops[0]->getType()}; @@ -2136,7 +2139,7 @@ // LMUL extension // C/C++ Operand: SubVecTy, IR Operand: VecTy, SubVecTy, Index - let Name = "vlmul_ext_v", MangledName = "vlmul_ext", HasPolicy = false, + let Name = "vlmul_ext_v", MangledName = "vlmul_ext", MaskedPolicy = NonePolicy, ManualCodegen = [{ ID = Intrinsic::experimental_vector_insert; IntrinsicTypes = {ResultType, Ops[0]->getType()}; @@ -2154,7 +2157,7 @@ } } - let Name = "vget_v", HasPolicy = false, + let Name = "vget_v", MaskedPolicy = NonePolicy, ManualCodegen = [{ { ID = Intrinsic::experimental_vector_extract; @@ -2177,7 +2180,7 @@ } } - let Name = "vset_v", Log2LMUL = [0, 1, 2], HasPolicy = false, + let Name = "vset_v", Log2LMUL = [0, 1, 2], MaskedPolicy = NonePolicy, ManualCodegen = [{ { ID = Intrinsic::experimental_vector_insert; @@ -2201,9 +2204,3 @@ } } } - -let HeaderCode = [{ -#define VE_TAIL_UNDISTURBED 0 -#define VE_TAIL_AGNOSTIC 1 -}] in -def policy : RVVHeader; diff --git a/clang/utils/TableGen/RISCVVEmitter.cpp b/clang/utils/TableGen/RISCVVEmitter.cpp --- a/clang/utils/TableGen/RISCVVEmitter.cpp +++ b/clang/utils/TableGen/RISCVVEmitter.cpp @@ -149,8 +149,8 @@ VectorMaxELenFp64 = 1 << 6, }; -enum Policy : uint8_t { - None, +enum PolicyScheme : uint8_t { + SchemeNone, HasPassthruOperand, HasPolicyOperand, }; @@ -165,12 +165,11 @@ std::string Name; // C intrinsic name. std::string MangledName; std::string IRName; - bool IsMask; + bool IsMasked; bool HasVL; - bool HasPolicy; - Policy NoMaskPolicy; - bool HasNoMaskedOverloaded; - bool HasAutoDef; // There is automiatic definition in header + PolicyScheme Scheme; + bool HasUnMaskedOverloaded; + bool HasBuiltinAlias; std::string ManualCodegen; RVVTypePtr OutputType; // Builtin output type RVVTypes InputTypes; // Builtin input types @@ -182,10 +181,10 @@ public: RVVIntrinsic(StringRef Name, StringRef Suffix, StringRef MangledName, - StringRef MangledSuffix, StringRef IRName, bool IsMask, - bool HasMaskedOffOperand, bool HasVL, bool HasPolicy, - Policy NoMaskPolicy, bool HasNoMaskedOverloaded, - bool HasAutoDef, StringRef ManualCodegen, const RVVTypes &Types, + StringRef MangledSuffix, StringRef IRName, bool IsMasked, + bool HasMaskedOffOperand, bool HasVL, PolicyScheme Scheme, + bool HasUnMaskedOverloaded, bool HasBuiltinAlias, + StringRef ManualCodegen, const RVVTypes &Types, const std::vector &IntrinsicTypes, const std::vector &RequiredFeatures, unsigned NF); ~RVVIntrinsic() = default; @@ -194,16 +193,16 @@ StringRef getName() const { return Name; } StringRef getMangledName() const { return MangledName; } bool hasVL() const { return HasVL; } - bool hasPolicy() const { return HasPolicy; } - bool hasNoMaskPassthru() const { return NoMaskPolicy == HasPassthruOperand; } - bool hasNoMaskPolicy() const { return NoMaskPolicy == HasPolicyOperand; } - bool hasNoMaskedOverloaded() const { return HasNoMaskedOverloaded; } + bool hasPolicy() const { return Scheme != SchemeNone; } + bool hasPassthruOperand() const { return Scheme == HasPassthruOperand; } + bool hasPolicyOperand() const { return Scheme == HasPolicyOperand; } + bool hasUnMaskedOverloaded() const { return HasUnMaskedOverloaded; } + bool hasBuiltinAlias() const { return HasBuiltinAlias; } bool hasManualCodegen() const { return !ManualCodegen.empty(); } - bool hasAutoDef() const { return HasAutoDef; } - bool isMask() const { return IsMask; } + bool isMasked() const { return IsMasked; } StringRef getIRName() const { return IRName; } StringRef getManualCodegen() const { return ManualCodegen; } - Policy getNoMaskPolicy() const { return NoMaskPolicy; } + PolicyScheme getPolicyScheme() const { return Scheme; } RISCVPredefinedMacroT getRISCVPredefinedMacros() const { return RISCVPredefinedMacros; } @@ -229,7 +228,6 @@ class RVVEmitter { private: RecordKeeper &Records; - std::string HeaderCode; // Concat BasicType, LMUL and Proto as key StringMap LegalTypes; StringSet<> IllegalTypes; @@ -251,8 +249,8 @@ private: /// Create all intrinsics and add them to \p Out void createRVVIntrinsics(std::vector> &Out); - /// Create Headers and add them to \p Out - void createRVVHeaders(raw_ostream &OS); + /// Print HeaderCode in RVVHeader Record to \p Out + void printHeaderCode(raw_ostream &OS); /// Compute output and input types by applying different config (basic type /// and LMUL with type transformers). It also record result of type in legal /// or illegal set to avoid compute the same config again. The result maybe @@ -778,14 +776,15 @@ //===----------------------------------------------------------------------===// RVVIntrinsic::RVVIntrinsic( StringRef NewName, StringRef Suffix, StringRef NewMangledName, - StringRef MangledSuffix, StringRef IRName, bool IsMask, - bool HasMaskedOffOperand, bool HasVL, bool HasPolicy, Policy NoMaskPolicy, - bool HasNoMaskedOverloaded, bool HasAutoDef, StringRef ManualCodegen, + StringRef MangledSuffix, StringRef IRName, bool IsMasked, + bool HasMaskedOffOperand, bool HasVL, PolicyScheme Scheme, + bool HasUnMaskedOverloaded, bool HasBuiltinAlias, StringRef ManualCodegen, const RVVTypes &OutInTypes, const std::vector &NewIntrinsicTypes, const std::vector &RequiredFeatures, unsigned NF) - : IRName(IRName), IsMask(IsMask), HasVL(HasVL), HasPolicy(HasPolicy), - NoMaskPolicy(NoMaskPolicy), HasNoMaskedOverloaded(HasNoMaskedOverloaded), - HasAutoDef(HasAutoDef), ManualCodegen(ManualCodegen.str()), NF(NF) { + : IRName(IRName), IsMasked(IsMasked), HasVL(HasVL), Scheme(Scheme), + HasUnMaskedOverloaded(HasUnMaskedOverloaded), + HasBuiltinAlias(HasBuiltinAlias), ManualCodegen(ManualCodegen.str()), + NF(NF) { // Init BuiltinName, Name and MangledName BuiltinName = NewName.str(); @@ -798,7 +797,7 @@ Name += "_" + Suffix.str(); if (!MangledSuffix.empty()) MangledName += "_" + MangledSuffix.str(); - if (IsMask) { + if (IsMasked) { BuiltinName += "_m"; Name += "_m"; } @@ -828,10 +827,11 @@ OutputType = OutInTypes[0]; InputTypes.assign(OutInTypes.begin() + 1, OutInTypes.end()); - // IntrinsicTypes is nonmasked version index. Need to update it - // if there is maskedoff operand (It is always in first operand). + // IntrinsicTypes is unmasked TA version index. Need to update it + // if there is merge operand (It is always in first operand). IntrinsicTypes = NewIntrinsicTypes; - if ((IsMask && HasMaskedOffOperand) || (!IsMask && hasNoMaskPassthru())) { + if ((IsMasked && HasMaskedOffOperand) || + (!IsMasked && hasPassthruOperand())) { for (auto &I : IntrinsicTypes) { if (I >= 0) I += NF; @@ -859,20 +859,20 @@ return; } - if (isMask()) { + if (isMasked()) { if (hasVL()) { OS << " std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end() - 1);\n"; - if (hasPolicy()) + if (hasPolicyOperand()) OS << " Ops.push_back(ConstantInt::get(Ops.back()->getType()," - " TAIL_UNDISTURBED));\n"; + " TAIL_UNDISTURBED));\n"; } else { OS << " std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end());\n"; } } else { - if (hasNoMaskPolicy()) + if (hasPolicyOperand()) OS << " Ops.push_back(ConstantInt::get(Ops.back()->getType(), " "TAIL_UNDISTURBED));\n"; - else if (hasNoMaskPassthru()) { + else if (hasPassthruOperand()) { OS << " Ops.push_back(llvm::UndefValue::get(ResultType));\n"; OS << " std::rotate(Ops.rbegin(), Ops.rbegin() + 1, Ops.rend());\n"; } @@ -953,16 +953,11 @@ OS << "extern \"C\" {\n"; OS << "#endif\n\n"; - createRVVHeaders(OS); + printHeaderCode(OS); std::vector> Defs; createRVVIntrinsics(Defs); - // Print header code - if (!HeaderCode.empty()) { - OS << HeaderCode; - } - auto printType = [&](auto T) { OS << "typedef " << T->getClangBuiltinStr() << " " << T->getTypeStr() << ";\n"; @@ -1033,7 +1028,7 @@ "__attribute__((__overloadable__))\n"; emitArchMacroAndBody(Defs, OS, [](raw_ostream &OS, const RVVIntrinsic &Inst) { - if (!Inst.isMask() && !Inst.hasNoMaskedOverloaded()) + if (!Inst.isMasked() && !Inst.hasUnMaskedOverloaded()) return; OS << "__rvv_aio "; Inst.emitMangledFuncDef(OS); @@ -1062,18 +1057,16 @@ auto P = BuiltinMap.insert(std::make_pair(Def->getBuiltinName(), Def.get())); if (!P.second) { - // Verify that this would have produced the same builtin definition. - if (P.first->second->hasAutoDef() != Def->hasAutoDef()) { + // Verf that this would have produced the same builtin definition. + if (P.first->second->hasBuiltinAlias() != Def->hasBuiltinAlias()) PrintFatalError("Builtin with same name has different hasAutoDef"); - } else if (!Def->hasAutoDef() && P.first->second->getBuiltinTypeStr() != - Def->getBuiltinTypeStr()) { + else if (!Def->hasBuiltinAlias() && + P.first->second->getBuiltinTypeStr() != Def->getBuiltinTypeStr()) PrintFatalError("Builtin with same name has different type string"); - } continue; } - OS << "RISCVV_BUILTIN(__builtin_rvv_" << Def->getBuiltinName() << ",\""; - if (!Def->hasAutoDef()) + if (!Def->hasBuiltinAlias()) OS << Def->getBuiltinTypeStr(); OS << "\", \"n\")\n"; } @@ -1117,14 +1110,12 @@ PrintFatalError("Builtin with same name has different ManualCodegen"); else if (P.first->second->getNF() != Def->getNF()) PrintFatalError("Builtin with same name has different NF"); - else if (P.first->second->isMask() != Def->isMask()) - PrintFatalError("Builtin with same name has different isMask"); + else if (P.first->second->isMasked() != Def->isMasked()) + PrintFatalError("Builtin with same name has different isMasked"); else if (P.first->second->hasVL() != Def->hasVL()) - PrintFatalError("Builtin with same name has different HasPolicy"); - else if (P.first->second->hasPolicy() != Def->hasPolicy()) - PrintFatalError("Builtin with same name has different HasPolicy"); - else if (P.first->second->getNoMaskPolicy() != Def->getNoMaskPolicy()) - PrintFatalError("Builtin with same name has different getNoMaskPolicy"); + PrintFatalError("Builtin with same name has different hasVL"); + else if (P.first->second->getPolicyScheme() != Def->getPolicyScheme()) + PrintFatalError("Builtin with same name has different getPolicyScheme"); else if (P.first->second->getIntrinsicTypes() != Def->getIntrinsicTypes()) PrintFatalError("Builtin with same name has different IntrinsicTypes"); } @@ -1168,30 +1159,28 @@ StringRef MangledSuffixProto = R->getValueAsString("MangledSuffix"); StringRef Prototypes = R->getValueAsString("Prototype"); StringRef TypeRange = R->getValueAsString("TypeRange"); - bool HasMask = R->getValueAsBit("HasMask"); + bool HasMasked = R->getValueAsBit("HasMasked"); bool HasMaskedOffOperand = R->getValueAsBit("HasMaskedOffOperand"); bool HasVL = R->getValueAsBit("HasVL"); - bool HasPolicy = R->getValueAsBit("HasPolicy"); - Record* NoMaskPolicyRecord = R->getValueAsDef("NoMaskPolicy"); - Policy NoMaskPolicy = - static_cast(NoMaskPolicyRecord->getValueAsInt("Value")); - bool HasNoMaskedOverloaded = R->getValueAsBit("HasNoMaskedOverloaded"); + Record *MaskedPolicyRecord = R->getValueAsDef("MaskedPolicy"); + PolicyScheme MaskedPolicy = + static_cast(MaskedPolicyRecord->getValueAsInt("Value")); + Record *UnMaskedPolicyRecord = R->getValueAsDef("UnMaskedPolicy"); + PolicyScheme UnMaskedPolicy = + static_cast(UnMaskedPolicyRecord->getValueAsInt("Value")); + bool HasUnMaskedOverloaded = R->getValueAsBit("HasUnMaskedOverloaded"); std::vector Log2LMULList = R->getValueAsListOfInts("Log2LMUL"); + bool HasBuiltinAlias = R->getValueAsBit("HasBuiltinAlias"); StringRef ManualCodegen = R->getValueAsString("ManualCodegen"); - StringRef ManualCodegenMask = R->getValueAsString("ManualCodegenMask"); + StringRef MaskedManualCodegen = R->getValueAsString("MaskedManualCodegen"); std::vector IntrinsicTypes = R->getValueAsListOfInts("IntrinsicTypes"); std::vector RequiredFeatures = R->getValueAsListOfStrings("RequiredFeatures"); StringRef IRName = R->getValueAsString("IRName"); - StringRef IRNameMask = R->getValueAsString("IRNameMask"); + StringRef MaskedIRName = R->getValueAsString("MaskedIRName"); unsigned NF = R->getValueAsInt("NF"); - StringRef HeaderCodeStr = R->getValueAsString("HeaderCode"); - bool HasAutoDef = HeaderCodeStr.empty(); - if (!HeaderCodeStr.empty()) { - HeaderCode += HeaderCodeStr.str(); - } // Parse prototype and create a list of primitive type with transformers // (operand) in ProtoSeq. ProtoSeq[0] is output operand. SmallVector ProtoSeq; @@ -1201,7 +1190,7 @@ // Compute Builtin types SmallVector ProtoMaskSeq = ProtoSeq; - if (HasMask) { + if (HasMasked) { // If HasMaskedOffOperand, insert result type as first input operand. if (HasMaskedOffOperand) { if (NF == 1) { @@ -1225,7 +1214,7 @@ // ...) ProtoMaskSeq.insert(ProtoMaskSeq.begin() + NF + 1, "m"); } else { - // If HasMask, insert 'm' as first input operand. + // If HasMasked, insert 'm' as first input operand. ProtoMaskSeq.insert(ProtoMaskSeq.begin() + 1, "m"); } } @@ -1245,29 +1234,29 @@ auto SuffixStr = getSuffixStr(I, Log2LMUL, SuffixProto); auto MangledSuffixStr = getSuffixStr(I, Log2LMUL, MangledSuffixProto); - // Create a non-mask intrinsic + // Create a unmasked intrinsic Out.push_back(std::make_unique( Name, SuffixStr, MangledName, MangledSuffixStr, IRName, - /*IsMask=*/false, /*HasMaskedOffOperand=*/false, HasVL, HasPolicy, - NoMaskPolicy, HasNoMaskedOverloaded, HasAutoDef, ManualCodegen, - Types.getValue(), IntrinsicTypes, RequiredFeatures, NF)); - if (HasMask) { - // Create a mask intrinsic + /*IsMasked=*/false, /*HasMaskedOffOperand=*/false, HasVL, + UnMaskedPolicy, HasUnMaskedOverloaded, HasBuiltinAlias, + ManualCodegen, Types.getValue(), IntrinsicTypes, RequiredFeatures, + NF)); + if (HasMasked) { + // Create a masked intrinsic Optional MaskTypes = computeTypes(I, Log2LMUL, NF, ProtoMaskSeq); Out.push_back(std::make_unique( - Name, SuffixStr, MangledName, MangledSuffixStr, IRNameMask, - /*IsMask=*/true, HasMaskedOffOperand, HasVL, HasPolicy, - NoMaskPolicy, HasNoMaskedOverloaded, HasAutoDef, - ManualCodegenMask, MaskTypes.getValue(), IntrinsicTypes, - RequiredFeatures, NF)); + Name, SuffixStr, MangledName, MangledSuffixStr, MaskedIRName, + /*IsMasked=*/true, HasMaskedOffOperand, HasVL, MaskedPolicy, + HasUnMaskedOverloaded, HasBuiltinAlias, MaskedManualCodegen, + MaskTypes.getValue(), IntrinsicTypes, RequiredFeatures, NF)); } } // end for Log2LMULList } // end for TypeRange } } -void RVVEmitter::createRVVHeaders(raw_ostream &OS) { +void RVVEmitter::printHeaderCode(raw_ostream &OS) { std::vector RVVHeaders = Records.getAllDerivedDefinitions("RVVHeader"); for (auto *R : RVVHeaders) { @@ -1329,7 +1318,7 @@ NeedEndif = emitMacroRestrictionStr(CurMacros, OS); PrevMacros = CurMacros; } - if (Def->hasAutoDef()) + if (Def->hasBuiltinAlias()) PrintBody(OS, *Def); } if (NeedEndif) diff --git a/llvm/include/llvm/IR/IntrinsicsRISCV.td b/llvm/include/llvm/IR/IntrinsicsRISCV.td --- a/llvm/include/llvm/IR/IntrinsicsRISCV.td +++ b/llvm/include/llvm/IR/IntrinsicsRISCV.td @@ -220,7 +220,7 @@ } // For unit stride load with mask // Input: (maskedoff, pointer, mask, vl, policy) - class RISCVUSLoadMask + class RISCVUSLoadMasked : Intrinsic<[llvm_anyvector_ty ], [LLVMMatchType<0>, LLVMPointerType>, @@ -235,7 +235,7 @@ // Output: (data, vl) // NOTE: We model this with default memory properties since we model writing // VL as a side effect. IntrReadMem, IntrHasSideEffects does not work. - class RISCVUSLoadFFMask + class RISCVUSLoadFFMasked : Intrinsic<[llvm_anyvector_ty, llvm_anyint_ty], [LLVMMatchType<0>, LLVMPointerType>, @@ -256,7 +256,7 @@ } // For strided load with mask // Input: (maskedoff, pointer, stride, mask, vl, policy) - class RISCVSLoadMask + class RISCVSLoadMasked : Intrinsic<[llvm_anyvector_ty ], [LLVMMatchType<0>, LLVMPointerType>, llvm_anyint_ty, @@ -278,7 +278,7 @@ } // For indexed load with mask // Input: (maskedoff, pointer, index, mask, vl, policy) - class RISCVILoadMask + class RISCVILoadMasked : Intrinsic<[llvm_anyvector_ty ], [LLVMMatchType<0>, LLVMPointerType>, llvm_anyvector_ty, @@ -300,7 +300,7 @@ } // For unit stride store with mask // Input: (vector_in, pointer, mask, vl) - class RISCVUSStoreMask + class RISCVUSStoreMasked : Intrinsic<[], [llvm_anyvector_ty, LLVMPointerType>, @@ -321,7 +321,7 @@ } // For stride store with mask // Input: (vector_in, pointer, stirde, mask, vl) - class RISCVSStoreMask + class RISCVSStoreMasked : Intrinsic<[], [llvm_anyvector_ty, LLVMPointerType>, llvm_anyint_ty, @@ -341,7 +341,7 @@ } // For indexed store with mask // Input: (vector_in, pointer, index, mask, vl) - class RISCVIStoreMask + class RISCVIStoreMasked : Intrinsic<[], [llvm_anyvector_ty, LLVMPointerType>, llvm_anyvector_ty, @@ -351,7 +351,7 @@ } // For destination vector type is the same as source vector. // Input: (passthru, vector_in, vl) - class RISCVUnaryAANoMask + class RISCVUnaryAAUnMasked : Intrinsic<[llvm_anyvector_ty], [LLVMMatchType<0>, LLVMMatchType<0>, llvm_anyint_ty], [IntrNoMem]>, RISCVVIntrinsic { @@ -359,7 +359,7 @@ } // For destination vector type is the same as first source vector (with mask). // Input: (vector_in, vector_in, mask, vl, policy) - class RISCVUnaryAAMask + class RISCVUnaryAAMasked : Intrinsic<[llvm_anyvector_ty], [LLVMMatchType<0>, LLVMMatchType<0>, LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty, @@ -377,7 +377,7 @@ } // For destination vector type is the same as first and second source vector. // Input: (vector_in, vector_in, vl) - class RISCVBinaryAAANoMask + class RISCVBinaryAAAUnMasked : Intrinsic<[llvm_anyvector_ty], [LLVMMatchType<0>, LLVMMatchType<0>, llvm_anyint_ty], [IntrNoMem]>, RISCVVIntrinsic { @@ -385,7 +385,7 @@ } // For destination vector type is the same as first and second source vector. // Input: (passthru, vector_in, int_vector_in, vl) - class RISCVRGatherVVNoMask + class RISCVRGatherVVUnMasked : Intrinsic<[llvm_anyvector_ty], [LLVMMatchType<0>, LLVMMatchType<0>, LLVMVectorOfBitcastsToInt<0>, llvm_anyint_ty], @@ -394,7 +394,7 @@ } // For destination vector type is the same as first and second source vector. // Input: (vector_in, vector_in, int_vector_in, vl, policy) - class RISCVRGatherVVMask + class RISCVRGatherVVMasked : Intrinsic<[llvm_anyvector_ty], [LLVMMatchType<0>, LLVMMatchType<0>, LLVMVectorOfBitcastsToInt<0>, LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty, @@ -403,7 +403,7 @@ let VLOperand = 4; } // Input: (passthru, vector_in, int16_vector_in, vl) - class RISCVRGatherEI16VVNoMask + class RISCVRGatherEI16VVUnMasked : Intrinsic<[llvm_anyvector_ty], [LLVMMatchType<0>, LLVMMatchType<0>, LLVMScalarOrSameVectorWidth<0, llvm_i16_ty>, @@ -413,7 +413,7 @@ } // For destination vector type is the same as first and second source vector. // Input: (vector_in, vector_in, int16_vector_in, vl, policy) - class RISCVRGatherEI16VVMask + class RISCVRGatherEI16VVMasked : Intrinsic<[llvm_anyvector_ty], [LLVMMatchType<0>, LLVMMatchType<0>, LLVMScalarOrSameVectorWidth<0, llvm_i16_ty>, @@ -425,7 +425,7 @@ // For destination vector type is the same as first source vector, and the // second operand is XLen. // Input: (passthru, vector_in, xlen_in, vl) - class RISCVGatherVXNoMask + class RISCVGatherVXUnMasked : Intrinsic<[llvm_anyvector_ty], [LLVMMatchType<0>, LLVMMatchType<0>, llvm_anyint_ty, LLVMMatchType<1>], @@ -435,7 +435,7 @@ // For destination vector type is the same as first source vector (with mask). // Second operand is XLen. // Input: (maskedoff, vector_in, xlen_in, mask, vl, policy) - class RISCVGatherVXMask + class RISCVGatherVXMasked : Intrinsic<[llvm_anyvector_ty], [LLVMMatchType<0>, LLVMMatchType<0>, llvm_anyint_ty, LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, LLVMMatchType<1>, @@ -445,7 +445,7 @@ } // For destination vector type is the same as first source vector. // Input: (passthru, vector_in, vector_in/scalar_in, vl) - class RISCVBinaryAAXNoMask + class RISCVBinaryAAXUnMasked : Intrinsic<[llvm_anyvector_ty], [LLVMMatchType<0>, LLVMMatchType<0>, llvm_any_ty, llvm_anyint_ty], @@ -455,7 +455,7 @@ } // For destination vector type is the same as first source vector (with mask). // Input: (maskedoff, vector_in, vector_in/scalar_in, mask, vl, policy) - class RISCVBinaryAAXMask + class RISCVBinaryAAXMasked : Intrinsic<[llvm_anyvector_ty], [LLVMMatchType<0>, LLVMMatchType<0>, llvm_any_ty, LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty, @@ -467,7 +467,7 @@ // For destination vector type is the same as first source vector. The // second source operand must match the destination type or be an XLen scalar. // Input: (passthru, vector_in, vector_in/scalar_in, vl) - class RISCVBinaryAAShiftNoMask + class RISCVBinaryAAShiftUnMasked : Intrinsic<[llvm_anyvector_ty], [LLVMMatchType<0>, LLVMMatchType<0>, llvm_any_ty, llvm_anyint_ty], @@ -477,7 +477,7 @@ // For destination vector type is the same as first source vector (with mask). // The second source operand must match the destination type or be an XLen scalar. // Input: (maskedoff, vector_in, vector_in/scalar_in, mask, vl, policy) - class RISCVBinaryAAShiftMask + class RISCVBinaryAAShiftMasked : Intrinsic<[llvm_anyvector_ty], [LLVMMatchType<0>, LLVMMatchType<0>, llvm_any_ty, LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty, @@ -487,7 +487,7 @@ } // For destination vector type is NOT the same as first source vector. // Input: (passthru, vector_in, vector_in/scalar_in, vl) - class RISCVBinaryABXNoMask + class RISCVBinaryABXUnMasked : Intrinsic<[llvm_anyvector_ty], [LLVMMatchType<0>, llvm_anyvector_ty, llvm_any_ty, llvm_anyint_ty], @@ -497,7 +497,7 @@ } // For destination vector type is NOT the same as first source vector (with mask). // Input: (maskedoff, vector_in, vector_in/scalar_in, mask, vl, policy) - class RISCVBinaryABXMask + class RISCVBinaryABXMasked : Intrinsic<[llvm_anyvector_ty], [LLVMMatchType<0>, llvm_anyvector_ty, llvm_any_ty, LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty, @@ -509,7 +509,7 @@ // For destination vector type is NOT the same as first source vector. The // second source operand must match the destination type or be an XLen scalar. // Input: (passthru, vector_in, vector_in/scalar_in, vl) - class RISCVBinaryABShiftNoMask + class RISCVBinaryABShiftUnMasked : Intrinsic<[llvm_anyvector_ty], [LLVMMatchType<0>, llvm_anyvector_ty, llvm_any_ty, llvm_anyint_ty], @@ -519,7 +519,7 @@ // For destination vector type is NOT the same as first source vector (with mask). // The second source operand must match the destination type or be an XLen scalar. // Input: (maskedoff, vector_in, vector_in/scalar_in, mask, vl, policy) - class RISCVBinaryABShiftMask + class RISCVBinaryABShiftMasked : Intrinsic<[llvm_anyvector_ty], [LLVMMatchType<0>, llvm_anyvector_ty, llvm_any_ty, LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty, @@ -563,7 +563,7 @@ // For binary operations with mask type output without mask. // Output: (mask type output) // Input: (vector_in, vector_in/scalar_in, vl) - class RISCVCompareNoMask + class RISCVCompareUnMasked : Intrinsic<[LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>], [llvm_anyvector_ty, llvm_any_ty, llvm_anyint_ty], [IntrNoMem]>, RISCVVIntrinsic { @@ -573,7 +573,7 @@ // For binary operations with mask type output with mask. // Output: (mask type output) // Input: (maskedoff, vector_in, vector_in/scalar_in, mask, vl, policy) - class RISCVCompareMask + class RISCVCompareMasked : Intrinsic<[LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>], [LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyvector_ty, llvm_any_ty, @@ -586,7 +586,7 @@ // For FP classify operations. // Output: (bit mask type output) // Input: (passthru, vector_in, vl) - class RISCVClassifyNoMask + class RISCVClassifyUnMasked : Intrinsic<[LLVMVectorOfBitcastsToInt<0>], [LLVMVectorOfBitcastsToInt<0>, llvm_anyvector_ty, llvm_anyint_ty], @@ -596,7 +596,7 @@ // For FP classify operations with mask. // Output: (bit mask type output) // Input: (maskedoff, vector_in, mask, vl, policy) - class RISCVClassifyMask + class RISCVClassifyMasked : Intrinsic<[LLVMVectorOfBitcastsToInt<0>], [LLVMVectorOfBitcastsToInt<0>, llvm_anyvector_ty, LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, @@ -607,7 +607,7 @@ // For Saturating binary operations. // The destination vector type is the same as first source vector. // Input: (passthru, vector_in, vector_in/scalar_in, vl) - class RISCVSaturatingBinaryAAXNoMask + class RISCVSaturatingBinaryAAXUnMasked : Intrinsic<[llvm_anyvector_ty], [LLVMMatchType<0>, LLVMMatchType<0>, llvm_any_ty, llvm_anyint_ty], @@ -618,7 +618,7 @@ // For Saturating binary operations with mask. // The destination vector type is the same as first source vector. // Input: (maskedoff, vector_in, vector_in/scalar_in, mask, vl, policy) - class RISCVSaturatingBinaryAAXMask + class RISCVSaturatingBinaryAAXMasked : Intrinsic<[llvm_anyvector_ty], [LLVMMatchType<0>, LLVMMatchType<0>, llvm_any_ty, LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty, @@ -631,7 +631,7 @@ // The destination vector type is the same as first source vector. // The second source operand matches the destination type or is an XLen scalar. // Input: (passthru, vector_in, vector_in/scalar_in, vl) - class RISCVSaturatingBinaryAAShiftNoMask + class RISCVSaturatingBinaryAAShiftUnMasked : Intrinsic<[llvm_anyvector_ty], [LLVMMatchType<0>, LLVMMatchType<0>, llvm_any_ty, llvm_anyint_ty], @@ -642,7 +642,7 @@ // The destination vector type is the same as first source vector. // The second source operand matches the destination type or is an XLen scalar. // Input: (maskedoff, vector_in, vector_in/scalar_in, mask, vl, policy) - class RISCVSaturatingBinaryAAShiftMask + class RISCVSaturatingBinaryAAShiftMasked : Intrinsic<[llvm_anyvector_ty], [LLVMMatchType<0>, LLVMMatchType<0>, llvm_any_ty, LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty, @@ -654,7 +654,7 @@ // The destination vector type is NOT the same as first source vector. // The second source operand matches the destination type or is an XLen scalar. // Input: (passthru, vector_in, vector_in/scalar_in, vl) - class RISCVSaturatingBinaryABShiftNoMask + class RISCVSaturatingBinaryABShiftUnMasked : Intrinsic<[llvm_anyvector_ty], [LLVMMatchType<0>, llvm_anyvector_ty, llvm_any_ty, llvm_anyint_ty], @@ -665,7 +665,7 @@ // The destination vector type is NOT the same as first source vector (with mask). // The second source operand matches the destination type or is an XLen scalar. // Input: (maskedoff, vector_in, vector_in/scalar_in, mask, vl, policy) - class RISCVSaturatingBinaryABShiftMask + class RISCVSaturatingBinaryABShiftMasked : Intrinsic<[llvm_anyvector_ty], [LLVMMatchType<0>, llvm_anyvector_ty, llvm_any_ty, LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty, @@ -674,7 +674,7 @@ let VLOperand = 4; } // Input: (vector_in, vector_in, vector_in/scalar_in, vl) - class RISCVTernaryAAAXNoMask + class RISCVTernaryAAAXUnMasked : Intrinsic<[llvm_anyvector_ty], [LLVMMatchType<0>, LLVMMatchType<0>, llvm_anyint_ty, LLVMMatchType<1>], @@ -682,7 +682,7 @@ let VLOperand = 3; } // Input: (vector_in, vector_in, vector_in/scalar_in, mask, vl, policy) - class RISCVTernaryAAAXMask + class RISCVTernaryAAAXMasked : Intrinsic<[llvm_anyvector_ty], [LLVMMatchType<0>, LLVMMatchType<0>, llvm_anyint_ty, LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, @@ -690,9 +690,9 @@ [ImmArg>, IntrNoMem]>, RISCVVIntrinsic { let VLOperand = 4; } - // NoMask Vector Multiply-Add operations, its first operand can not be undef. + // UnMasked Vector Multiply-Add operations, its first operand can not be undef. // Input: (vector_in, vector_in/scalar, vector_in, vl, policy) - class RISCVTernaryAAXANoMask + class RISCVTernaryAAXAUnMasked : Intrinsic<[llvm_anyvector_ty], [LLVMMatchType<0>, llvm_any_ty, LLVMMatchType<0>, llvm_anyint_ty, LLVMMatchType<2>], @@ -700,9 +700,9 @@ let ScalarOperand = 1; let VLOperand = 3; } - // Mask Vector Multiply-Add operations, its first operand can not be undef. + // Masked Vector Multiply-Add operations, its first operand can not be undef. // Input: (vector_in, vector_in/scalar, vector_in, mask, vl, policy - class RISCVTernaryAAXAMask + class RISCVTernaryAAXAMasked : Intrinsic<[llvm_anyvector_ty], [LLVMMatchType<0>, llvm_any_ty, LLVMMatchType<0>, LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, @@ -711,9 +711,9 @@ let ScalarOperand = 1; let VLOperand = 4; } - // NoMask Widening Vector Multiply-Add operations, its first operand can not be undef. + // UnMasked Widening Vector Multiply-Add operations, its first operand can not be undef. // Input: (vector_in, vector_in/scalar, vector_in, vl, policy) - class RISCVTernaryWideNoMask + class RISCVTernaryWideUnMasked : Intrinsic< [llvm_anyvector_ty], [LLVMMatchType<0>, llvm_any_ty, llvm_anyvector_ty, llvm_anyint_ty, LLVMMatchType<3>], @@ -721,9 +721,9 @@ let ScalarOperand = 1; let VLOperand = 3; } - // Mask Widening Vector Multiply-Add operations, its first operand can not be undef. + // Masked Widening Vector Multiply-Add operations, its first operand can not be undef. // Input: (vector_in, vector_in/scalar, vector_in, mask, vl, policy - class RISCVTernaryWideMask + class RISCVTernaryWideMasked : Intrinsic< [llvm_anyvector_ty], [LLVMMatchType<0>, llvm_any_ty, llvm_anyvector_ty, LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, @@ -735,7 +735,7 @@ // For Reduction ternary operations. // For destination vector type is the same as first and third source vector. // Input: (vector_in, vector_in, vector_in, vl) - class RISCVReductionNoMask + class RISCVReductionUnMasked : Intrinsic<[llvm_anyvector_ty], [LLVMMatchType<0>, llvm_anyvector_ty, LLVMMatchType<0>, llvm_anyint_ty], @@ -746,7 +746,7 @@ // For destination vector type is the same as first and third source vector. // The mask type come from second source vector. // Input: (maskedoff, vector_in, vector_in, vector_in, mask, vl) - class RISCVReductionMask + class RISCVReductionMasked : Intrinsic<[llvm_anyvector_ty], [LLVMMatchType<0>, llvm_anyvector_ty, LLVMMatchType<0>, LLVMScalarOrSameVectorWidth<1, llvm_i1_ty>, llvm_anyint_ty], @@ -756,7 +756,7 @@ // For unary operations with scalar type output without mask // Output: (scalar type) // Input: (vector_in, vl) - class RISCVMaskUnarySOutNoMask + class RISCVMaskedUnarySOutUnMasked : Intrinsic<[LLVMMatchType<1>], [llvm_anyvector_ty, llvm_anyint_ty], [IntrNoMem]>, RISCVVIntrinsic { @@ -765,7 +765,7 @@ // For unary operations with scalar type output with mask // Output: (scalar type) // Input: (vector_in, mask, vl) - class RISCVMaskUnarySOutMask + class RISCVMaskedUnarySOutMasked : Intrinsic<[LLVMMatchType<1>], [llvm_anyvector_ty, LLVMMatchType<0>, llvm_anyint_ty], [IntrNoMem]>, RISCVVIntrinsic { @@ -773,7 +773,7 @@ } // For destination vector type is NOT the same as source vector. // Input: (passthru, vector_in, vl) - class RISCVUnaryABNoMask + class RISCVUnaryABUnMasked : Intrinsic<[llvm_anyvector_ty], [LLVMMatchType<0>, llvm_anyvector_ty, llvm_anyint_ty], [IntrNoMem]>, RISCVVIntrinsic { @@ -781,7 +781,7 @@ } // For destination vector type is NOT the same as source vector (with mask). // Input: (maskedoff, vector_in, mask, vl, policy) - class RISCVUnaryABMask + class RISCVUnaryABMasked : Intrinsic<[llvm_anyvector_ty], [LLVMMatchType<0>, llvm_anyvector_ty, LLVMScalarOrSameVectorWidth<1, llvm_i1_ty>, @@ -792,7 +792,7 @@ // For unary operations with the same vector type in/out without mask // Output: (vector) // Input: (vector_in, vl) - class RISCVUnaryNoMask + class RISCVUnaryUnMasked : Intrinsic<[llvm_anyvector_ty], [LLVMMatchType<0>, llvm_anyint_ty], [IntrNoMem]>, RISCVVIntrinsic { @@ -801,7 +801,7 @@ // For mask unary operations with mask type in/out with mask // Output: (mask type output) // Input: (mask type maskedoff, mask type vector_in, mask, vl, policy) - class RISCVMaskUnaryMOutMask + class RISCVMaskedUnaryMOutMasked : Intrinsic<[llvm_anyint_ty], [LLVMMatchType<0>, LLVMMatchType<0>, LLVMMatchType<0>, llvm_anyint_ty, LLVMMatchType<1>], @@ -825,7 +825,7 @@ } // For Conversion unary operations. // Input: (passthru, vector_in, vl) - class RISCVConversionNoMask + class RISCVConversionUnMasked : Intrinsic<[llvm_anyvector_ty], [LLVMMatchType<0>, llvm_anyvector_ty, llvm_anyint_ty], [IntrNoMem]>, RISCVVIntrinsic { @@ -833,7 +833,7 @@ } // For Conversion unary operations with mask. // Input: (maskedoff, vector_in, mask, vl, policy) - class RISCVConversionMask + class RISCVConversionMasked : Intrinsic<[llvm_anyvector_ty], [LLVMMatchType<0>, llvm_anyvector_ty, LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty, @@ -853,7 +853,7 @@ } // For unit stride segment load with mask // Input: (maskedoff, pointer, mask, vl, policy) - class RISCVUSSegLoadMask + class RISCVUSSegLoadMasked : Intrinsic, !add(nf, -1))), !listconcat(!listsplat(LLVMMatchType<0>, nf), @@ -882,7 +882,7 @@ // Output: (data, vl) // NOTE: We model this with default memory properties since we model writing // VL as a side effect. IntrReadMem, IntrHasSideEffects does not work. - class RISCVUSSegLoadFFMask + class RISCVUSSegLoadFFMasked : Intrinsic, !add(nf, -1)), [llvm_anyint_ty]), !listconcat(!listsplat(LLVMMatchType<0>, nf), @@ -905,7 +905,7 @@ } // For stride segment load with mask // Input: (maskedoff, pointer, offset, mask, vl, policy) - class RISCVSSegLoadMask + class RISCVSSegLoadMasked : Intrinsic, !add(nf, -1))), !listconcat(!listsplat(LLVMMatchType<0>, nf), @@ -929,7 +929,7 @@ } // For indexed segment load with mask // Input: (maskedoff, pointer, index, mask, vl, policy) - class RISCVISegLoadMask + class RISCVISegLoadMasked : Intrinsic, !add(nf, -1))), !listconcat(!listsplat(LLVMMatchType<0>, nf), @@ -954,7 +954,7 @@ } // For unit stride segment store with mask // Input: (value, pointer, mask, vl) - class RISCVUSSegStoreMask + class RISCVUSSegStoreMasked : Intrinsic<[], !listconcat([llvm_anyvector_ty], !listsplat(LLVMMatchType<0>, !add(nf, -1)), @@ -978,7 +978,7 @@ } // For stride segment store with mask // Input: (value, pointer, offset, mask, vl) - class RISCVSSegStoreMask + class RISCVSSegStoreMasked : Intrinsic<[], !listconcat([llvm_anyvector_ty], !listsplat(LLVMMatchType<0>, !add(nf, -1)), @@ -1002,7 +1002,7 @@ } // For indexed segment store with mask // Input: (value, pointer, offset, mask, vl) - class RISCVISegStoreMask + class RISCVISegStoreMasked : Intrinsic<[], !listconcat([llvm_anyvector_ty], !listsplat(LLVMMatchType<0>, !add(nf, -1)), @@ -1015,76 +1015,76 @@ multiclass RISCVUSLoad { def "int_riscv_" # NAME : RISCVUSLoad; - def "int_riscv_" # NAME # "_mask" : RISCVUSLoadMask; + def "int_riscv_" # NAME # "_mask" : RISCVUSLoadMasked; } multiclass RISCVUSLoadFF { def "int_riscv_" # NAME : RISCVUSLoadFF; - def "int_riscv_" # NAME # "_mask" : RISCVUSLoadFFMask; + def "int_riscv_" # NAME # "_mask" : RISCVUSLoadFFMasked; } multiclass RISCVSLoad { def "int_riscv_" # NAME : RISCVSLoad; - def "int_riscv_" # NAME # "_mask" : RISCVSLoadMask; + def "int_riscv_" # NAME # "_mask" : RISCVSLoadMasked; } multiclass RISCVILoad { def "int_riscv_" # NAME : RISCVILoad; - def "int_riscv_" # NAME # "_mask" : RISCVILoadMask; + def "int_riscv_" # NAME # "_mask" : RISCVILoadMasked; } multiclass RISCVUSStore { def "int_riscv_" # NAME : RISCVUSStore; - def "int_riscv_" # NAME # "_mask" : RISCVUSStoreMask; + def "int_riscv_" # NAME # "_mask" : RISCVUSStoreMasked; } multiclass RISCVSStore { def "int_riscv_" # NAME : RISCVSStore; - def "int_riscv_" # NAME # "_mask" : RISCVSStoreMask; + def "int_riscv_" # NAME # "_mask" : RISCVSStoreMasked; } multiclass RISCVIStore { def "int_riscv_" # NAME : RISCVIStore; - def "int_riscv_" # NAME # "_mask" : RISCVIStoreMask; + def "int_riscv_" # NAME # "_mask" : RISCVIStoreMasked; } multiclass RISCVUnaryAA { - def "int_riscv_" # NAME : RISCVUnaryAANoMask; - def "int_riscv_" # NAME # "_mask" : RISCVUnaryAAMask; + def "int_riscv_" # NAME : RISCVUnaryAAUnMasked; + def "int_riscv_" # NAME # "_mask" : RISCVUnaryAAMasked; } multiclass RISCVUnaryAB { - def "int_riscv_" # NAME : RISCVUnaryABNoMask; - def "int_riscv_" # NAME # "_mask" : RISCVUnaryABMask; + def "int_riscv_" # NAME : RISCVUnaryABUnMasked; + def "int_riscv_" # NAME # "_mask" : RISCVUnaryABMasked; } // AAX means the destination type(A) is the same as the first source // type(A). X means any type for the second source operand. multiclass RISCVBinaryAAX { - def "int_riscv_" # NAME : RISCVBinaryAAXNoMask; - def "int_riscv_" # NAME # "_mask" : RISCVBinaryAAXMask; + def "int_riscv_" # NAME : RISCVBinaryAAXUnMasked; + def "int_riscv_" # NAME # "_mask" : RISCVBinaryAAXMasked; } // Like RISCVBinaryAAX, but the second operand is used a shift amount so it // must be a vector or an XLen scalar. multiclass RISCVBinaryAAShift { - def "int_riscv_" # NAME : RISCVBinaryAAShiftNoMask; - def "int_riscv_" # NAME # "_mask" : RISCVBinaryAAShiftMask; + def "int_riscv_" # NAME : RISCVBinaryAAShiftUnMasked; + def "int_riscv_" # NAME # "_mask" : RISCVBinaryAAShiftMasked; } multiclass RISCVRGatherVV { - def "int_riscv_" # NAME : RISCVRGatherVVNoMask; - def "int_riscv_" # NAME # "_mask" : RISCVRGatherVVMask; + def "int_riscv_" # NAME : RISCVRGatherVVUnMasked; + def "int_riscv_" # NAME # "_mask" : RISCVRGatherVVMasked; } multiclass RISCVRGatherVX { - def "int_riscv_" # NAME : RISCVGatherVXNoMask; - def "int_riscv_" # NAME # "_mask" : RISCVGatherVXMask; + def "int_riscv_" # NAME : RISCVGatherVXUnMasked; + def "int_riscv_" # NAME # "_mask" : RISCVGatherVXMasked; } multiclass RISCVRGatherEI16VV { - def "int_riscv_" # NAME : RISCVRGatherEI16VVNoMask; - def "int_riscv_" # NAME # "_mask" : RISCVRGatherEI16VVMask; + def "int_riscv_" # NAME : RISCVRGatherEI16VVUnMasked; + def "int_riscv_" # NAME # "_mask" : RISCVRGatherEI16VVMasked; } // ABX means the destination type(A) is different from the first source // type(B). X means any type for the second source operand. multiclass RISCVBinaryABX { - def "int_riscv_" # NAME : RISCVBinaryABXNoMask; - def "int_riscv_" # NAME # "_mask" : RISCVBinaryABXMask; + def "int_riscv_" # NAME : RISCVBinaryABXUnMasked; + def "int_riscv_" # NAME # "_mask" : RISCVBinaryABXMasked; } // Like RISCVBinaryABX, but the second operand is used a shift amount so it // must be a vector or an XLen scalar. multiclass RISCVBinaryABShift { - def "int_riscv_" # NAME : RISCVBinaryABShiftNoMask; - def "int_riscv_" # NAME # "_mask" : RISCVBinaryABShiftMask; + def "int_riscv_" # NAME : RISCVBinaryABShiftUnMasked; + def "int_riscv_" # NAME # "_mask" : RISCVBinaryABShiftMasked; } multiclass RISCVBinaryWithV0 { def "int_riscv_" # NAME : RISCVBinaryWithV0; @@ -1096,80 +1096,80 @@ def "int_riscv_" # NAME : RISCVBinaryMOut; } multiclass RISCVSaturatingBinaryAAX { - def "int_riscv_" # NAME : RISCVSaturatingBinaryAAXNoMask; - def "int_riscv_" # NAME # "_mask" : RISCVSaturatingBinaryAAXMask; + def "int_riscv_" # NAME : RISCVSaturatingBinaryAAXUnMasked; + def "int_riscv_" # NAME # "_mask" : RISCVSaturatingBinaryAAXMasked; } multiclass RISCVSaturatingBinaryAAShift { - def "int_riscv_" # NAME : RISCVSaturatingBinaryAAShiftNoMask; - def "int_riscv_" # NAME # "_mask" : RISCVSaturatingBinaryAAShiftMask; + def "int_riscv_" # NAME : RISCVSaturatingBinaryAAShiftUnMasked; + def "int_riscv_" # NAME # "_mask" : RISCVSaturatingBinaryAAShiftMasked; } multiclass RISCVSaturatingBinaryABShift { - def "int_riscv_" # NAME : RISCVSaturatingBinaryABShiftNoMask; - def "int_riscv_" # NAME # "_mask" : RISCVSaturatingBinaryABShiftMask; + def "int_riscv_" # NAME : RISCVSaturatingBinaryABShiftUnMasked; + def "int_riscv_" # NAME # "_mask" : RISCVSaturatingBinaryABShiftMasked; } multiclass RISCVTernaryAAAX { - def "int_riscv_" # NAME : RISCVTernaryAAAXNoMask; - def "int_riscv_" # NAME # "_mask" : RISCVTernaryAAAXMask; + def "int_riscv_" # NAME : RISCVTernaryAAAXUnMasked; + def "int_riscv_" # NAME # "_mask" : RISCVTernaryAAAXMasked; } multiclass RISCVTernaryAAXA { - def "int_riscv_" # NAME : RISCVTernaryAAXANoMask; - def "int_riscv_" # NAME # "_mask" : RISCVTernaryAAXAMask; + def "int_riscv_" # NAME : RISCVTernaryAAXAUnMasked; + def "int_riscv_" # NAME # "_mask" : RISCVTernaryAAXAMasked; } multiclass RISCVCompare { - def "int_riscv_" # NAME : RISCVCompareNoMask; - def "int_riscv_" # NAME # "_mask" : RISCVCompareMask; + def "int_riscv_" # NAME : RISCVCompareUnMasked; + def "int_riscv_" # NAME # "_mask" : RISCVCompareMasked; } multiclass RISCVClassify { - def "int_riscv_" # NAME : RISCVClassifyNoMask; - def "int_riscv_" # NAME # "_mask" : RISCVClassifyMask; + def "int_riscv_" # NAME : RISCVClassifyUnMasked; + def "int_riscv_" # NAME # "_mask" : RISCVClassifyMasked; } multiclass RISCVTernaryWide { - def "int_riscv_" # NAME : RISCVTernaryWideNoMask; - def "int_riscv_" # NAME # "_mask" : RISCVTernaryWideMask; + def "int_riscv_" # NAME : RISCVTernaryWideUnMasked; + def "int_riscv_" # NAME # "_mask" : RISCVTernaryWideMasked; } multiclass RISCVReduction { - def "int_riscv_" # NAME : RISCVReductionNoMask; - def "int_riscv_" # NAME # "_mask" : RISCVReductionMask; + def "int_riscv_" # NAME : RISCVReductionUnMasked; + def "int_riscv_" # NAME # "_mask" : RISCVReductionMasked; } - multiclass RISCVMaskUnarySOut { - def "int_riscv_" # NAME : RISCVMaskUnarySOutNoMask; - def "int_riscv_" # NAME # "_mask" : RISCVMaskUnarySOutMask; + multiclass RISCVMaskedUnarySOut { + def "int_riscv_" # NAME : RISCVMaskedUnarySOutUnMasked; + def "int_riscv_" # NAME # "_mask" : RISCVMaskedUnarySOutMasked; } - multiclass RISCVMaskUnaryMOut { - def "int_riscv_" # NAME : RISCVUnaryNoMask; - def "int_riscv_" # NAME # "_mask" : RISCVMaskUnaryMOutMask; + multiclass RISCVMaskedUnaryMOut { + def "int_riscv_" # NAME : RISCVUnaryUnMasked; + def "int_riscv_" # NAME # "_mask" : RISCVMaskedUnaryMOutMasked; } multiclass RISCVConversion { - def "int_riscv_" #NAME :RISCVConversionNoMask; - def "int_riscv_" # NAME # "_mask" : RISCVConversionMask; + def "int_riscv_" #NAME :RISCVConversionUnMasked; + def "int_riscv_" # NAME # "_mask" : RISCVConversionMasked; } multiclass RISCVUSSegLoad { def "int_riscv_" # NAME : RISCVUSSegLoad; - def "int_riscv_" # NAME # "_mask" : RISCVUSSegLoadMask; + def "int_riscv_" # NAME # "_mask" : RISCVUSSegLoadMasked; } multiclass RISCVUSSegLoadFF { def "int_riscv_" # NAME : RISCVUSSegLoadFF; - def "int_riscv_" # NAME # "_mask" : RISCVUSSegLoadFFMask; + def "int_riscv_" # NAME # "_mask" : RISCVUSSegLoadFFMasked; } multiclass RISCVSSegLoad { def "int_riscv_" # NAME : RISCVSSegLoad; - def "int_riscv_" # NAME # "_mask" : RISCVSSegLoadMask; + def "int_riscv_" # NAME # "_mask" : RISCVSSegLoadMasked; } multiclass RISCVISegLoad { def "int_riscv_" # NAME : RISCVISegLoad; - def "int_riscv_" # NAME # "_mask" : RISCVISegLoadMask; + def "int_riscv_" # NAME # "_mask" : RISCVISegLoadMasked; } multiclass RISCVUSSegStore { def "int_riscv_" # NAME : RISCVUSSegStore; - def "int_riscv_" # NAME # "_mask" : RISCVUSSegStoreMask; + def "int_riscv_" # NAME # "_mask" : RISCVUSSegStoreMasked; } multiclass RISCVSSegStore { def "int_riscv_" # NAME : RISCVSSegStore; - def "int_riscv_" # NAME # "_mask" : RISCVSSegStoreMask; + def "int_riscv_" # NAME # "_mask" : RISCVSSegStoreMasked; } multiclass RISCVISegStore { def "int_riscv_" # NAME : RISCVISegStore; - def "int_riscv_" # NAME # "_mask" : RISCVISegStoreMask; + def "int_riscv_" # NAME # "_mask" : RISCVISegStoreMasked; } defm vle : RISCVUSLoad; @@ -1410,22 +1410,22 @@ defm vfwredusum : RISCVReduction; defm vfwredosum : RISCVReduction; - def int_riscv_vmand: RISCVBinaryAAANoMask; - def int_riscv_vmnand: RISCVBinaryAAANoMask; - def int_riscv_vmandn: RISCVBinaryAAANoMask; - def int_riscv_vmxor: RISCVBinaryAAANoMask; - def int_riscv_vmor: RISCVBinaryAAANoMask; - def int_riscv_vmnor: RISCVBinaryAAANoMask; - def int_riscv_vmorn: RISCVBinaryAAANoMask; - def int_riscv_vmxnor: RISCVBinaryAAANoMask; + def int_riscv_vmand: RISCVBinaryAAAUnMasked; + def int_riscv_vmnand: RISCVBinaryAAAUnMasked; + def int_riscv_vmandn: RISCVBinaryAAAUnMasked; + def int_riscv_vmxor: RISCVBinaryAAAUnMasked; + def int_riscv_vmor: RISCVBinaryAAAUnMasked; + def int_riscv_vmnor: RISCVBinaryAAAUnMasked; + def int_riscv_vmorn: RISCVBinaryAAAUnMasked; + def int_riscv_vmxnor: RISCVBinaryAAAUnMasked; def int_riscv_vmclr : RISCVNullaryIntrinsic; def int_riscv_vmset : RISCVNullaryIntrinsic; - defm vcpop : RISCVMaskUnarySOut; - defm vfirst : RISCVMaskUnarySOut; - defm vmsbf : RISCVMaskUnaryMOut; - defm vmsof : RISCVMaskUnaryMOut; - defm vmsif : RISCVMaskUnaryMOut; + defm vcpop : RISCVMaskedUnarySOut; + defm vfirst : RISCVMaskedUnarySOut; + defm vmsbf : RISCVMaskedUnaryMOut; + defm vmsof : RISCVMaskedUnaryMOut; + defm vmsif : RISCVMaskedUnaryMOut; defm vfcvt_xu_f_v : RISCVConversion; defm vfcvt_x_f_v : RISCVConversion;