diff --git a/llvm/lib/Target/X86/X86InstCombineIntrinsic.cpp b/llvm/lib/Target/X86/X86InstCombineIntrinsic.cpp --- a/llvm/lib/Target/X86/X86InstCombineIntrinsic.cpp +++ b/llvm/lib/Target/X86/X86InstCombineIntrinsic.cpp @@ -557,6 +557,1075 @@ return nullptr; } +static Value *simplifyTernarylogic(const IntrinsicInst &II, + InstCombiner::BuilderTy &Builder) { + + auto *ArgImm = dyn_cast(II.getArgOperand(3)); + if (!ArgImm || ArgImm->getValue().uge(256)) + return nullptr; + + Value *ArgA = II.getArgOperand(0); + Value *ArgB = II.getArgOperand(1); + Value *ArgC = II.getArgOperand(2); + + Type *Ty = II.getType(); + + auto Or = [&](auto Lhs, auto Rhs) -> std::pair { + return {Builder.CreateOr(Lhs.first, Rhs.first), Lhs.second | Rhs.second}; + }; + auto Xor = [&](auto Lhs, auto Rhs) -> std::pair { + return {Builder.CreateXor(Lhs.first, Rhs.first), Lhs.second ^ Rhs.second}; + }; + auto And = [&](auto Lhs, auto Rhs) -> std::pair { + return {Builder.CreateAnd(Lhs.first, Rhs.first), Lhs.second & Rhs.second}; + }; + auto Not = [&](auto V) -> std::pair { + return {Builder.CreateNot(V.first), ~V.second}; + }; + auto Nor = [&](auto Lhs, auto Rhs) { return Not(Or(Lhs, Rhs)); }; + auto Xnor = [&](auto Lhs, auto Rhs) { return Not(Xor(Lhs, Rhs)); }; + auto Nand = [&](auto Lhs, auto Rhs) { return Not(And(Lhs, Rhs)); }; + + bool AIsConst = match(ArgA, PatternMatch::m_ImmConstant()); + bool BIsConst = match(ArgB, PatternMatch::m_ImmConstant()); + bool CIsConst = match(ArgC, PatternMatch::m_ImmConstant()); + + bool ABIsConst = AIsConst && BIsConst; + bool ACIsConst = AIsConst && CIsConst; + bool BCIsConst = BIsConst && CIsConst; + bool ABCIsConst = AIsConst && BIsConst && CIsConst; + + // Use for verification. Its a big table. Its difficult to go from Imm -> + // logic ops, but easy to verify that a set of logic ops is correct. We track + // the logic ops through the second value in the pair. At the end it should + // equal Imm. + std::pair A = {ArgA, 0xf0}; + std::pair B = {ArgB, 0xcc}; + std::pair C = {ArgC, 0xaa}; + std::pair Res = {nullptr, 0}; + + // Currently we only handle cases that convert directly to another instruction + // or cases where all the ops are constant. This is because we don't properly + // handle creating ternary ops in the backend, so splitting them here may + // cause regressions. As the backend improves, uncomment more cases. + + uint8_t Imm = ArgImm->getValue().getZExtValue(); + switch (Imm) { + case 0x0: + Res = {Constant::getNullValue(Ty), 0}; + break; + case 0x1: + if (ABCIsConst) + Res = Nor(Or(A, B), C); + break; + case 0x2: + if (ABCIsConst) + Res = And(Nor(A, B), C); + break; + case 0x3: + if (ABIsConst) + Res = Nor(A, B); + break; + case 0x4: + if (ABCIsConst) + Res = And(Nor(A, C), B); + break; + case 0x5: + if (ACIsConst) + Res = Nor(A, C); + break; + case 0x6: + if (ABCIsConst) + Res = Nor(A, Xnor(B, C)); + break; + case 0x7: + if (ABCIsConst) + Res = Nor(A, And(B, C)); + break; + case 0x8: + if (ABCIsConst) + Res = Nor(A, Nand(B, C)); + break; + case 0x9: + if (ABCIsConst) + Res = Nor(A, Xor(B, C)); + break; + case 0xa: + if (ACIsConst) + Res = Nor(A, Not(C)); + break; + case 0xb: + if (ABCIsConst) + Res = Nor(A, Nor(C, Not(B))); + break; + case 0xc: + if (ABIsConst) + Res = Nor(A, Not(B)); + break; + case 0xd: + if (ABCIsConst) + Res = Nor(A, Nor(B, Not(C))); + break; + case 0xe: + if (ABCIsConst) + Res = Nor(A, Nor(B, C)); + break; + case 0xf: + Res = Not(A); + break; + case 0x10: + if (ABCIsConst) + Res = And(A, Nor(B, C)); + break; + case 0x11: + if (BCIsConst) + Res = Nor(B, C); + break; + case 0x12: + if (ABCIsConst) + Res = Nor(Xnor(A, C), B); + break; + case 0x13: + if (ABCIsConst) + Res = Nor(And(A, C), B); + break; + case 0x14: + if (ABCIsConst) + Res = Nor(Xnor(A, B), C); + break; + case 0x15: + if (ABCIsConst) + Res = Nor(And(A, B), C); + break; + case 0x16: + if (ABCIsConst) + Res = Xor(Xor(A, B), And(Nand(A, B), C)); + break; + case 0x17: + if (ABCIsConst) + Res = Xor(Or(A, B), Or(Xnor(A, B), C)); + break; + case 0x18: + if (ABCIsConst) + Res = Nor(Xnor(A, B), Xnor(A, C)); + break; + case 0x19: + if (ABCIsConst) + Res = And(Nand(A, B), Xnor(B, C)); + break; + case 0x1a: + if (ABCIsConst) + Res = Xor(A, Or(And(A, B), C)); + break; + case 0x1b: + if (ABCIsConst) + Res = Xor(A, Or(Xnor(A, B), C)); + break; + case 0x1c: + if (ABCIsConst) + Res = Xor(A, Or(And(A, C), B)); + break; + case 0x1d: + if (ABCIsConst) + Res = Xor(A, Or(Xnor(A, C), B)); + break; + case 0x1e: + if (ABCIsConst) + Res = Xor(A, Or(B, C)); + break; + case 0x1f: + if (ABCIsConst) + Res = Nand(A, Or(B, C)); + break; + case 0x20: + if (ABCIsConst) + Res = Nor(Nand(A, C), B); + break; + case 0x21: + if (ABCIsConst) + Res = Nor(Xor(A, C), B); + break; + case 0x22: + if (BCIsConst) + Res = Nor(B, Not(C)); + break; + case 0x23: + if (ABCIsConst) + Res = Nor(B, Nor(C, Not(A))); + break; + case 0x24: + if (ABCIsConst) + Res = Nor(Xnor(A, B), Xor(A, C)); + break; + case 0x25: + if (ABCIsConst) + Res = Xor(A, Nand(Nand(A, B), C)); + break; + case 0x26: + if (ABCIsConst) + Res = And(Nand(A, B), Xor(B, C)); + break; + case 0x27: + if (ABCIsConst) + Res = Xor(Or(Xnor(A, B), C), B); + break; + case 0x28: + if (ABCIsConst) + Res = And(Xor(A, B), C); + break; + case 0x29: + if (ABCIsConst) + Res = Xor(Xor(A, B), Nor(And(A, B), C)); + break; + case 0x2a: + if (ABCIsConst) + Res = And(Nand(A, B), C); + break; + case 0x2b: + if (ABCIsConst) + Res = Xor(Or(Xnor(A, B), Xor(A, C)), A); + break; + case 0x2c: + if (ABCIsConst) + Res = Nor(Xnor(A, B), Nor(B, C)); + break; + case 0x2d: + if (ABCIsConst) + Res = Xor(A, Or(B, Not(C))); + break; + case 0x2e: + if (ABCIsConst) + Res = Xor(A, Or(Xor(A, C), B)); + break; + case 0x2f: + if (ABCIsConst) + Res = Nand(A, Or(B, Not(C))); + break; + case 0x30: + if (ABIsConst) + Res = Nor(B, Not(A)); + break; + case 0x31: + if (ABCIsConst) + Res = Nor(Nor(A, Not(C)), B); + break; + case 0x32: + if (ABCIsConst) + Res = Nor(Nor(A, C), B); + break; + case 0x33: + Res = Not(B); + break; + case 0x34: + if (ABCIsConst) + Res = And(Xor(A, B), Nand(B, C)); + break; + case 0x35: + if (ABCIsConst) + Res = Xor(B, Or(A, Xnor(B, C))); + break; + case 0x36: + if (ABCIsConst) + Res = Xor(Or(A, C), B); + break; + case 0x37: + if (ABCIsConst) + Res = Nand(Or(A, C), B); + break; + case 0x38: + if (ABCIsConst) + Res = Nor(Xnor(A, B), Nor(A, C)); + break; + case 0x39: + if (ABCIsConst) + Res = Xor(Or(A, Not(C)), B); + break; + case 0x3a: + if (ABCIsConst) + Res = Xor(B, Or(A, Xor(B, C))); + break; + case 0x3b: + if (ABCIsConst) + Res = Nand(Or(A, Not(C)), B); + break; + case 0x3c: + Res = Xor(A, B); + break; + case 0x3d: + if (ABCIsConst) + Res = Xor(A, Or(Nor(A, C), B)); + break; + case 0x3e: + if (ABCIsConst) + Res = Xor(A, Or(Nor(A, Not(C)), B)); + break; + case 0x3f: + if (ABIsConst) + Res = Nand(A, B); + break; + case 0x40: + if (ABCIsConst) + Res = Nor(Nand(A, B), C); + break; + case 0x41: + if (ABCIsConst) + Res = Nor(Xor(A, B), C); + break; + case 0x42: + if (ABCIsConst) + Res = Nor(Xor(A, B), Xnor(A, C)); + break; + case 0x43: + if (ABCIsConst) + Res = Xor(A, Nand(Nand(A, C), B)); + break; + case 0x44: + if (BCIsConst) + Res = Nor(C, Not(B)); + break; + case 0x45: + if (ABCIsConst) + Res = Nor(Nor(B, Not(A)), C); + break; + case 0x46: + if (ABCIsConst) + Res = Xor(Or(And(A, C), B), C); + break; + case 0x47: + if (ABCIsConst) + Res = Xor(Or(Xnor(A, C), B), C); + break; + case 0x48: + if (ABCIsConst) + Res = And(Xor(A, C), B); + break; + case 0x49: + if (ABCIsConst) + Res = Xor(Or(Xnor(A, B), And(A, C)), C); + break; + case 0x4a: + if (ABCIsConst) + Res = Nor(Xnor(A, C), Nor(B, C)); + break; + case 0x4b: + if (ABCIsConst) + Res = Xor(A, Or(C, Not(B))); + break; + case 0x4c: + if (ABCIsConst) + Res = And(Nand(A, C), B); + break; + case 0x4d: + if (ABCIsConst) + Res = Xor(Or(Xor(A, B), Xnor(A, C)), A); + break; + case 0x4e: + if (ABCIsConst) + Res = Xor(A, Or(Xor(A, B), C)); + break; + case 0x4f: + if (ABCIsConst) + Res = Nand(A, Nand(B, Not(C))); + break; + case 0x50: + if (ACIsConst) + Res = Nor(C, Not(A)); + break; + case 0x51: + if (ABCIsConst) + Res = Nor(Nor(A, Not(B)), C); + break; + case 0x52: + if (ABCIsConst) + Res = And(Xor(A, C), Nand(B, C)); + break; + case 0x53: + if (ABCIsConst) + Res = Xor(Or(Xnor(B, C), A), C); + break; + case 0x54: + if (ABCIsConst) + Res = Nor(Nor(A, B), C); + break; + case 0x55: + Res = Not(C); + break; + case 0x56: + if (ABCIsConst) + Res = Xor(Or(A, B), C); + break; + case 0x57: + if (ABCIsConst) + Res = Nand(Or(A, B), C); + break; + case 0x58: + if (ABCIsConst) + Res = Nor(Nor(A, B), Xnor(A, C)); + break; + case 0x59: + if (ABCIsConst) + Res = Xor(Or(A, Not(B)), C); + break; + case 0x5a: + Res = Xor(A, C); + break; + case 0x5b: + if (ABCIsConst) + Res = Xor(A, Or(Nor(A, B), C)); + break; + case 0x5c: + if (ABCIsConst) + Res = Xor(Or(Xor(B, C), A), C); + break; + case 0x5d: + if (ABCIsConst) + Res = Nand(Or(A, Not(B)), C); + break; + case 0x5e: + if (ABCIsConst) + Res = Xor(A, Or(Nor(A, Not(B)), C)); + break; + case 0x5f: + if (ACIsConst) + Res = Nand(A, C); + break; + case 0x60: + if (ABCIsConst) + Res = And(A, Xor(B, C)); + break; + case 0x61: + if (ABCIsConst) + Res = Xor(Or(Xnor(A, B), And(B, C)), C); + break; + case 0x62: + if (ABCIsConst) + Res = Nor(Nor(A, C), Xnor(B, C)); + break; + case 0x63: + if (ABCIsConst) + Res = Xor(B, Or(C, Not(A))); + break; + case 0x64: + if (ABCIsConst) + Res = Nor(Nor(A, B), Xnor(B, C)); + break; + case 0x65: + if (ABCIsConst) + Res = Xor(Or(B, Not(A)), C); + break; + case 0x66: + Res = Xor(B, C); + break; + case 0x67: + if (ABCIsConst) + Res = Or(Nor(A, B), Xor(B, C)); + break; + case 0x68: + if (ABCIsConst) + Res = Xor(Xor(A, B), Nor(Nor(A, B), C)); + break; + case 0x69: + if (ABCIsConst) + Res = Xor(Xnor(A, B), C); + break; + case 0x6a: + if (ABCIsConst) + Res = Xor(And(A, B), C); + break; + case 0x6b: + if (ABCIsConst) + Res = Or(Nor(A, B), Xor(Xnor(A, B), C)); + break; + case 0x6c: + if (ABCIsConst) + Res = Xor(And(A, C), B); + break; + case 0x6d: + if (ABCIsConst) + Res = Xor(Or(Xnor(A, B), Nor(A, C)), C); + break; + case 0x6e: + if (ABCIsConst) + Res = Or(Nor(A, Not(B)), Xor(B, C)); + break; + case 0x6f: + if (ABCIsConst) + Res = Nand(A, Xnor(B, C)); + break; + case 0x70: + if (ABCIsConst) + Res = And(A, Nand(B, C)); + break; + case 0x71: + if (ABCIsConst) + Res = Xor(Nor(Xor(A, B), Xor(A, C)), A); + break; + case 0x72: + if (ABCIsConst) + Res = Xor(Or(Xor(A, B), C), B); + break; + case 0x73: + if (ABCIsConst) + Res = Nand(Nand(A, Not(C)), B); + break; + case 0x74: + if (ABCIsConst) + Res = Xor(Or(Xor(A, C), B), C); + break; + case 0x75: + if (ABCIsConst) + Res = Nand(Nand(A, Not(B)), C); + break; + case 0x76: + if (ABCIsConst) + Res = Xor(B, Or(Nor(B, Not(A)), C)); + break; + case 0x77: + if (BCIsConst) + Res = Nand(B, C); + break; + case 0x78: + if (ABCIsConst) + Res = Xor(A, And(B, C)); + break; + case 0x79: + if (ABCIsConst) + Res = Xor(Or(Xnor(A, B), Nor(B, C)), C); + break; + case 0x7a: + if (ABCIsConst) + Res = Or(Xor(A, C), Nor(B, Not(A))); + break; + case 0x7b: + if (ABCIsConst) + Res = Nand(Xnor(A, C), B); + break; + case 0x7c: + if (ABCIsConst) + Res = Or(Xor(A, B), Nor(C, Not(A))); + break; + case 0x7d: + if (ABCIsConst) + Res = Nand(Xnor(A, B), C); + break; + case 0x7e: + if (ABCIsConst) + Res = Or(Xor(A, B), Xor(A, C)); + break; + case 0x7f: + if (ABCIsConst) + Res = Nand(And(A, B), C); + break; + case 0x80: + if (ABCIsConst) + Res = And(And(A, B), C); + break; + case 0x81: + if (ABCIsConst) + Res = Nor(Xor(A, B), Xor(A, C)); + break; + case 0x82: + if (ABCIsConst) + Res = And(Xnor(A, B), C); + break; + case 0x83: + if (ABCIsConst) + Res = Nor(Xor(A, B), Nor(C, Not(A))); + break; + case 0x84: + if (ABCIsConst) + Res = And(Xnor(A, C), B); + break; + case 0x85: + if (ABCIsConst) + Res = Nor(Xor(A, C), Nor(B, Not(A))); + break; + case 0x86: + if (ABCIsConst) + Res = Xor(Nor(Xnor(A, B), Nor(B, C)), C); + break; + case 0x87: + if (ABCIsConst) + Res = Xor(A, Nand(B, C)); + break; + case 0x88: + Res = And(B, C); + break; + case 0x89: + if (ABCIsConst) + Res = Xor(B, Nor(Nor(B, Not(A)), C)); + break; + case 0x8a: + if (ABCIsConst) + Res = And(Nand(A, Not(B)), C); + break; + case 0x8b: + if (ABCIsConst) + Res = Xor(Nor(Xor(A, C), B), C); + break; + case 0x8c: + if (ABCIsConst) + Res = And(Nand(A, Not(C)), B); + break; + case 0x8d: + if (ABCIsConst) + Res = Xor(Nor(Xor(A, B), C), B); + break; + case 0x8e: + if (ABCIsConst) + Res = Xor(Or(Xor(A, B), Xor(A, C)), A); + break; + case 0x8f: + if (ABCIsConst) + Res = Nand(A, Nand(B, C)); + break; + case 0x90: + if (ABCIsConst) + Res = And(A, Xnor(B, C)); + break; + case 0x91: + if (ABCIsConst) + Res = Nor(Nor(A, Not(B)), Xor(B, C)); + break; + case 0x92: + if (ABCIsConst) + Res = Xor(Nor(Xnor(A, B), Nor(A, C)), C); + break; + case 0x93: + if (ABCIsConst) + Res = Xor(Nand(A, C), B); + break; + case 0x94: + if (ABCIsConst) + Res = Nor(Nor(A, B), Xor(Xnor(A, B), C)); + break; + case 0x95: + if (ABCIsConst) + Res = Xor(Nand(A, B), C); + break; + case 0x96: + if (ABCIsConst) + Res = Xor(Xor(A, B), C); + break; + case 0x97: + if (ABCIsConst) + Res = Xor(Xor(A, B), Or(Nor(A, B), C)); + break; + case 0x98: + if (ABCIsConst) + Res = Nor(Nor(A, B), Xor(B, C)); + break; + case 0x99: + if (BCIsConst) + Res = Xnor(B, C); + break; + case 0x9a: + if (ABCIsConst) + Res = Xor(Nor(B, Not(A)), C); + break; + case 0x9b: + if (ABCIsConst) + Res = Or(Nor(A, B), Xnor(B, C)); + break; + case 0x9c: + if (ABCIsConst) + Res = Xor(B, Nor(C, Not(A))); + break; + case 0x9d: + if (ABCIsConst) + Res = Or(Nor(A, C), Xnor(B, C)); + break; + case 0x9e: + if (ABCIsConst) + Res = Xor(And(Xor(A, B), Nand(B, C)), C); + break; + case 0x9f: + if (ABCIsConst) + Res = Nand(A, Xor(B, C)); + break; + case 0xa0: + Res = And(A, C); + break; + case 0xa1: + if (ABCIsConst) + Res = Xor(A, Nor(Nor(A, Not(B)), C)); + break; + case 0xa2: + if (ABCIsConst) + Res = And(Or(A, Not(B)), C); + break; + case 0xa3: + if (ABCIsConst) + Res = Xor(Nor(Xor(B, C), A), C); + break; + case 0xa4: + if (ABCIsConst) + Res = Xor(A, Nor(Nor(A, B), C)); + break; + case 0xa5: + if (ACIsConst) + Res = Xnor(A, C); + break; + case 0xa6: + if (ABCIsConst) + Res = Xor(Nor(A, Not(B)), C); + break; + case 0xa7: + if (ABCIsConst) + Res = Or(Nor(A, B), Xnor(A, C)); + break; + case 0xa8: + if (ABCIsConst) + Res = And(Or(A, B), C); + break; + case 0xa9: + if (ABCIsConst) + Res = Xor(Nor(A, B), C); + break; + case 0xaa: + Res = C; + break; + case 0xab: + if (ABCIsConst) + Res = Or(Nor(A, B), C); + break; + case 0xac: + if (ABCIsConst) + Res = Xor(Nor(Xnor(B, C), A), C); + break; + case 0xad: + if (ABCIsConst) + Res = Or(Xnor(A, C), And(B, C)); + break; + case 0xae: + if (ABCIsConst) + Res = Or(Nor(A, Not(B)), C); + break; + case 0xaf: + if (ACIsConst) + Res = Or(C, Not(A)); + break; + case 0xb0: + if (ABCIsConst) + Res = And(A, Nand(B, Not(C))); + break; + case 0xb1: + if (ABCIsConst) + Res = Xor(A, Nor(Xor(A, B), C)); + break; + case 0xb2: + if (ABCIsConst) + Res = Xor(Nor(Xor(A, B), Xnor(A, C)), A); + break; + case 0xb3: + if (ABCIsConst) + Res = Nand(Nand(A, C), B); + break; + case 0xb4: + if (ABCIsConst) + Res = Xor(A, Nor(C, Not(B))); + break; + case 0xb5: + if (ABCIsConst) + Res = Or(Xnor(A, C), Nor(B, C)); + break; + case 0xb6: + if (ABCIsConst) + Res = Xor(And(Xor(A, B), Nand(A, C)), C); + break; + case 0xb7: + if (ABCIsConst) + Res = Nand(Xor(A, C), B); + break; + case 0xb8: + if (ABCIsConst) + Res = Xor(Nor(Xnor(A, C), B), C); + break; + case 0xb9: + if (ABCIsConst) + Res = Xor(Nor(And(A, C), B), C); + break; + case 0xba: + if (ABCIsConst) + Res = Or(Nor(B, Not(A)), C); + break; + case 0xbb: + if (BCIsConst) + Res = Or(C, Not(B)); + break; + case 0xbc: + if (ABCIsConst) + Res = Xor(A, And(Nand(A, C), B)); + break; + case 0xbd: + if (ABCIsConst) + Res = Or(Xor(A, B), Xnor(A, C)); + break; + case 0xbe: + if (ABCIsConst) + Res = Or(Xor(A, B), C); + break; + case 0xbf: + if (ABCIsConst) + Res = Or(Nand(A, B), C); + break; + case 0xc0: + Res = And(A, B); + break; + case 0xc1: + if (ABCIsConst) + Res = Xor(A, Nor(Nor(A, Not(C)), B)); + break; + case 0xc2: + if (ABCIsConst) + Res = Xor(A, Nor(Nor(A, C), B)); + break; + case 0xc3: + if (ABIsConst) + Res = Xnor(A, B); + break; + case 0xc4: + if (ABCIsConst) + Res = And(Or(A, Not(C)), B); + break; + case 0xc5: + if (ABCIsConst) + Res = Xor(B, Nor(A, Xor(B, C))); + break; + case 0xc6: + if (ABCIsConst) + Res = Xor(Nor(A, Not(C)), B); + break; + case 0xc7: + if (ABCIsConst) + Res = Or(Xnor(A, B), Nor(A, C)); + break; + case 0xc8: + if (ABCIsConst) + Res = And(Or(A, C), B); + break; + case 0xc9: + if (ABCIsConst) + Res = Xor(Nor(A, C), B); + break; + case 0xca: + if (ABCIsConst) + Res = Xor(B, Nor(A, Xnor(B, C))); + break; + case 0xcb: + if (ABCIsConst) + Res = Or(Xnor(A, B), And(B, C)); + break; + case 0xcc: + Res = B; + break; + case 0xcd: + if (ABCIsConst) + Res = Or(Nor(A, C), B); + break; + case 0xce: + if (ABCIsConst) + Res = Or(Nor(A, Not(C)), B); + break; + case 0xcf: + if (ABIsConst) + Res = Or(B, Not(A)); + break; + case 0xd0: + if (ABCIsConst) + Res = And(A, Or(B, Not(C))); + break; + case 0xd1: + if (ABCIsConst) + Res = Xor(A, Nor(Xor(A, C), B)); + break; + case 0xd2: + if (ABCIsConst) + Res = Xor(A, Nor(B, Not(C))); + break; + case 0xd3: + if (ABCIsConst) + Res = Or(Xnor(A, B), Nor(B, C)); + break; + case 0xd4: + if (ABCIsConst) + Res = Xor(Nor(Xnor(A, B), Xor(A, C)), A); + break; + case 0xd5: + if (ABCIsConst) + Res = Nand(Nand(A, B), C); + break; + case 0xd6: + if (ABCIsConst) + Res = Xor(Xor(A, B), Or(And(A, B), C)); + break; + case 0xd7: + if (ABCIsConst) + Res = Nand(Xor(A, B), C); + break; + case 0xd8: + if (ABCIsConst) + Res = Xor(Nor(Xnor(A, B), C), B); + break; + case 0xd9: + if (ABCIsConst) + Res = Or(And(A, B), Xnor(B, C)); + break; + case 0xda: + if (ABCIsConst) + Res = Xor(A, And(Nand(A, B), C)); + break; + case 0xdb: + if (ABCIsConst) + Res = Or(Xnor(A, B), Xor(A, C)); + break; + case 0xdc: + if (ABCIsConst) + Res = Or(B, Nor(C, Not(A))); + break; + case 0xdd: + if (BCIsConst) + Res = Or(B, Not(C)); + break; + case 0xde: + if (ABCIsConst) + Res = Or(Xor(A, C), B); + break; + case 0xdf: + if (ABCIsConst) + Res = Or(Nand(A, C), B); + break; + case 0xe0: + if (ABCIsConst) + Res = And(A, Or(B, C)); + break; + case 0xe1: + if (ABCIsConst) + Res = Xor(A, Nor(B, C)); + break; + case 0xe2: + if (ABCIsConst) + Res = Xor(A, Nor(Xnor(A, C), B)); + break; + case 0xe3: + if (ABCIsConst) + Res = Xor(A, Nor(And(A, C), B)); + break; + case 0xe4: + if (ABCIsConst) + Res = Xor(A, Nor(Xnor(A, B), C)); + break; + case 0xe5: + if (ABCIsConst) + Res = Xor(A, Nor(And(A, B), C)); + break; + case 0xe6: + if (ABCIsConst) + Res = Or(And(A, B), Xor(B, C)); + break; + case 0xe7: + if (ABCIsConst) + Res = Or(Xnor(A, B), Xnor(A, C)); + break; + case 0xe8: + if (ABCIsConst) + Res = Xor(Or(A, B), Nor(Xnor(A, B), C)); + break; + case 0xe9: + if (ABCIsConst) + Res = Xor(Xor(A, B), Nand(Nand(A, B), C)); + break; + case 0xea: + if (ABCIsConst) + Res = Or(And(A, B), C); + break; + case 0xeb: + if (ABCIsConst) + Res = Or(Xnor(A, B), C); + break; + case 0xec: + if (ABCIsConst) + Res = Or(And(A, C), B); + break; + case 0xed: + if (ABCIsConst) + Res = Or(Xnor(A, C), B); + break; + case 0xee: + Res = Or(B, C); + break; + case 0xef: + if (ABCIsConst) + Res = Nand(A, Nor(B, C)); + break; + case 0xf0: + Res = A; + break; + case 0xf1: + if (ABCIsConst) + Res = Or(A, Nor(B, C)); + break; + case 0xf2: + if (ABCIsConst) + Res = Or(A, Nor(B, Not(C))); + break; + case 0xf3: + if (ABIsConst) + Res = Or(A, Not(B)); + break; + case 0xf4: + if (ABCIsConst) + Res = Or(A, Nor(C, Not(B))); + break; + case 0xf5: + if (ACIsConst) + Res = Or(A, Not(C)); + break; + case 0xf6: + if (ABCIsConst) + Res = Or(A, Xor(B, C)); + break; + case 0xf7: + if (ABCIsConst) + Res = Or(A, Nand(B, C)); + break; + case 0xf8: + if (ABCIsConst) + Res = Or(A, And(B, C)); + break; + case 0xf9: + if (ABCIsConst) + Res = Or(A, Xnor(B, C)); + break; + case 0xfa: + Res = Or(A, C); + break; + case 0xfb: + if (ABCIsConst) + Res = Nand(Nor(A, C), B); + break; + case 0xfc: + Res = Or(A, B); + break; + case 0xfd: + if (ABCIsConst) + Res = Nand(Nor(A, B), C); + break; + case 0xfe: + if (ABCIsConst) + Res = Or(Or(A, B), C); + break; + case 0xff: + Res = {Constant::getAllOnesValue(Ty), 0xff}; + break; + } + + assert(Res.first == nullptr || + Res.second == Imm && + "Simplification of ternary logic does not verify!"); + return Res.first; +} + static Value *simplifyX86insertps(const IntrinsicInst &II, InstCombiner::BuilderTy &Builder) { auto *CInt = dyn_cast(II.getArgOperand(2)); @@ -1728,6 +2797,16 @@ } break; + case Intrinsic::x86_avx512_pternlog_d_128: + case Intrinsic::x86_avx512_pternlog_d_256: + case Intrinsic::x86_avx512_pternlog_d_512: + case Intrinsic::x86_avx512_pternlog_q_128: + case Intrinsic::x86_avx512_pternlog_q_256: + case Intrinsic::x86_avx512_pternlog_q_512: + if (Value *V = simplifyTernarylogic(II, IC.Builder)) { + return IC.replaceInstUsesWith(II, V); + } + break; default: break; } diff --git a/llvm/test/CodeGen/X86/ternlog.ll b/llvm/test/CodeGen/X86/ternlog.ll --- a/llvm/test/CodeGen/X86/ternlog.ll +++ b/llvm/test/CodeGen/X86/ternlog.ll @@ -16,7 +16,7 @@ define <16 x i32> @vpternlog_d_v512_imm0(<16 x i32> %v0, <16 x i32> %v1, <16 x i32> %v2) nounwind { ; CHECK-LABEL: vpternlog_d_v512_imm0: ; CHECK: # %bb.0: -; CHECK-NEXT: vpternlogd $0, %zmm2, %zmm1, %zmm0 +; CHECK-NEXT: vxorps %xmm0, %xmm0, %xmm0 ; CHECK-NEXT: retq %r = tail call <16 x i32> @llvm.x86.avx512.pternlog.d.512(<16 x i32> %v0, <16 x i32> %v1, <16 x i32> %v2, i32 0) ret <16 x i32> %r @@ -151,7 +151,7 @@ define <8 x i64> @vpternlog_q_v512_imm15(<8 x i64> %v0, <8 x i64> %v1, <8 x i64> %v2) nounwind { ; CHECK-LABEL: vpternlog_q_v512_imm15: ; CHECK: # %bb.0: -; CHECK-NEXT: vpternlogq $15, %zmm2, %zmm1, %zmm0 +; CHECK-NEXT: vpternlogq $51, %zmm0, %zmm0, %zmm0 ; CHECK-NEXT: retq %r = tail call <8 x i64> @llvm.x86.avx512.pternlog.q.512(<8 x i64> %v0, <8 x i64> %v1, <8 x i64> %v2, i32 15) ret <8 x i64> %r @@ -476,7 +476,8 @@ define <8 x i64> @vpternlog_q_v512_imm51(<8 x i64> %v0, <8 x i64> %v1, <8 x i64> %v2) nounwind { ; CHECK-LABEL: vpternlog_q_v512_imm51: ; CHECK: # %bb.0: -; CHECK-NEXT: vpternlogq $51, %zmm2, %zmm1, %zmm0 +; CHECK-NEXT: vmovdqa64 %zmm1, %zmm0 +; CHECK-NEXT: vpternlogq $15, %zmm1, %zmm1, %zmm0 ; CHECK-NEXT: retq %r = tail call <8 x i64> @llvm.x86.avx512.pternlog.q.512(<8 x i64> %v0, <8 x i64> %v1, <8 x i64> %v2, i32 51) ret <8 x i64> %r @@ -557,7 +558,7 @@ define <16 x i32> @vpternlog_d_v512_imm60(<16 x i32> %v0, <16 x i32> %v1, <16 x i32> %v2) nounwind { ; CHECK-LABEL: vpternlog_d_v512_imm60: ; CHECK: # %bb.0: -; CHECK-NEXT: vpternlogd $60, %zmm2, %zmm1, %zmm0 +; CHECK-NEXT: vpxord %zmm1, %zmm0, %zmm0 ; CHECK-NEXT: retq %r = tail call <16 x i32> @llvm.x86.avx512.pternlog.d.512(<16 x i32> %v0, <16 x i32> %v1, <16 x i32> %v2, i32 60) ret <16 x i32> %r @@ -782,7 +783,8 @@ define <2 x i64> @vpternlog_q_v128_imm85(<2 x i64> %v0, <2 x i64> %v1, <2 x i64> %v2) nounwind { ; CHECK-LABEL: vpternlog_q_v128_imm85: ; CHECK: # %bb.0: -; CHECK-NEXT: vpternlogq $85, %xmm2, %xmm1, %xmm0 +; CHECK-NEXT: vmovdqa %xmm2, %xmm0 +; CHECK-NEXT: vpternlogq $15, %xmm2, %xmm2, %xmm0 ; CHECK-NEXT: retq %r = tail call <2 x i64> @llvm.x86.avx512.pternlog.q.128(<2 x i64> %v0, <2 x i64> %v1, <2 x i64> %v2, i32 85) ret <2 x i64> %r @@ -827,7 +829,7 @@ define <16 x i32> @vpternlog_d_v512_imm90(<16 x i32> %v0, <16 x i32> %v1, <16 x i32> %v2) nounwind { ; CHECK-LABEL: vpternlog_d_v512_imm90: ; CHECK: # %bb.0: -; CHECK-NEXT: vpternlogd $90, %zmm2, %zmm1, %zmm0 +; CHECK-NEXT: vpxord %zmm2, %zmm0, %zmm0 ; CHECK-NEXT: retq %r = tail call <16 x i32> @llvm.x86.avx512.pternlog.d.512(<16 x i32> %v0, <16 x i32> %v1, <16 x i32> %v2, i32 90) ret <16 x i32> %r @@ -935,7 +937,7 @@ define <16 x i32> @vpternlog_d_v512_imm102(<16 x i32> %v0, <16 x i32> %v1, <16 x i32> %v2) nounwind { ; CHECK-LABEL: vpternlog_d_v512_imm102: ; CHECK: # %bb.0: -; CHECK-NEXT: vpternlogd $102, %zmm2, %zmm1, %zmm0 +; CHECK-NEXT: vpxord %zmm2, %zmm1, %zmm0 ; CHECK-NEXT: retq %r = tail call <16 x i32> @llvm.x86.avx512.pternlog.d.512(<16 x i32> %v0, <16 x i32> %v1, <16 x i32> %v2, i32 102) ret <16 x i32> %r @@ -1242,7 +1244,7 @@ define <4 x i32> @vpternlog_d_v128_imm136(<4 x i32> %v0, <4 x i32> %v1, <4 x i32> %v2) nounwind { ; CHECK-LABEL: vpternlog_d_v128_imm136: ; CHECK: # %bb.0: -; CHECK-NEXT: vpternlogd $136, %xmm2, %xmm1, %xmm0 +; CHECK-NEXT: vandps %xmm2, %xmm1, %xmm0 ; CHECK-NEXT: retq %r = tail call <4 x i32> @llvm.x86.avx512.pternlog.d.128(<4 x i32> %v0, <4 x i32> %v1, <4 x i32> %v2, i32 136) ret <4 x i32> %r @@ -1458,7 +1460,7 @@ define <4 x i32> @vpternlog_d_v128_imm160(<4 x i32> %v0, <4 x i32> %v1, <4 x i32> %v2) nounwind { ; CHECK-LABEL: vpternlog_d_v128_imm160: ; CHECK: # %bb.0: -; CHECK-NEXT: vpternlogd $160, %xmm2, %xmm1, %xmm0 +; CHECK-NEXT: vandps %xmm2, %xmm0, %xmm0 ; CHECK-NEXT: retq %r = tail call <4 x i32> @llvm.x86.avx512.pternlog.d.128(<4 x i32> %v0, <4 x i32> %v1, <4 x i32> %v2, i32 160) ret <4 x i32> %r @@ -1548,7 +1550,7 @@ define <8 x i32> @vpternlog_d_v256_imm170(<8 x i32> %v0, <8 x i32> %v1, <8 x i32> %v2) nounwind { ; CHECK-LABEL: vpternlog_d_v256_imm170: ; CHECK: # %bb.0: -; CHECK-NEXT: vpternlogd $170, %ymm2, %ymm1, %ymm0 +; CHECK-NEXT: vmovaps %ymm2, %ymm0 ; CHECK-NEXT: retq %r = tail call <8 x i32> @llvm.x86.avx512.pternlog.d.256(<8 x i32> %v0, <8 x i32> %v1, <8 x i32> %v2, i32 170) ret <8 x i32> %r @@ -1746,7 +1748,7 @@ define <16 x i32> @vpternlog_d_v512_imm192(<16 x i32> %v0, <16 x i32> %v1, <16 x i32> %v2) nounwind { ; CHECK-LABEL: vpternlog_d_v512_imm192: ; CHECK: # %bb.0: -; CHECK-NEXT: vpternlogd $192, %zmm2, %zmm1, %zmm0 +; CHECK-NEXT: vpandd %zmm1, %zmm0, %zmm0 ; CHECK-NEXT: retq %r = tail call <16 x i32> @llvm.x86.avx512.pternlog.d.512(<16 x i32> %v0, <16 x i32> %v1, <16 x i32> %v2, i32 192) ret <16 x i32> %r @@ -1854,7 +1856,7 @@ define <16 x i32> @vpternlog_d_v512_imm204(<16 x i32> %v0, <16 x i32> %v1, <16 x i32> %v2) nounwind { ; CHECK-LABEL: vpternlog_d_v512_imm204: ; CHECK: # %bb.0: -; CHECK-NEXT: vpternlogd $204, %zmm2, %zmm1, %zmm0 +; CHECK-NEXT: vmovaps %zmm1, %zmm0 ; CHECK-NEXT: retq %r = tail call <16 x i32> @llvm.x86.avx512.pternlog.d.512(<16 x i32> %v0, <16 x i32> %v1, <16 x i32> %v2, i32 204) ret <16 x i32> %r @@ -2160,7 +2162,7 @@ define <4 x i32> @vpternlog_d_v128_imm238(<4 x i32> %v0, <4 x i32> %v1, <4 x i32> %v2) nounwind { ; CHECK-LABEL: vpternlog_d_v128_imm238: ; CHECK: # %bb.0: -; CHECK-NEXT: vpternlogd $238, %xmm2, %xmm1, %xmm0 +; CHECK-NEXT: vorps %xmm2, %xmm1, %xmm0 ; CHECK-NEXT: retq %r = tail call <4 x i32> @llvm.x86.avx512.pternlog.d.128(<4 x i32> %v0, <4 x i32> %v1, <4 x i32> %v2, i32 238) ret <4 x i32> %r @@ -2178,7 +2180,6 @@ define <16 x i32> @vpternlog_d_v512_imm240(<16 x i32> %v0, <16 x i32> %v1, <16 x i32> %v2) nounwind { ; CHECK-LABEL: vpternlog_d_v512_imm240: ; CHECK: # %bb.0: -; CHECK-NEXT: vpternlogd $240, %zmm2, %zmm1, %zmm0 ; CHECK-NEXT: retq %r = tail call <16 x i32> @llvm.x86.avx512.pternlog.d.512(<16 x i32> %v0, <16 x i32> %v1, <16 x i32> %v2, i32 240) ret <16 x i32> %r @@ -2268,7 +2269,7 @@ define <4 x i32> @vpternlog_d_v128_imm250(<4 x i32> %v0, <4 x i32> %v1, <4 x i32> %v2) nounwind { ; CHECK-LABEL: vpternlog_d_v128_imm250: ; CHECK: # %bb.0: -; CHECK-NEXT: vpternlogd $250, %xmm2, %xmm1, %xmm0 +; CHECK-NEXT: vorps %xmm2, %xmm0, %xmm0 ; CHECK-NEXT: retq %r = tail call <4 x i32> @llvm.x86.avx512.pternlog.d.128(<4 x i32> %v0, <4 x i32> %v1, <4 x i32> %v2, i32 250) ret <4 x i32> %r @@ -2286,7 +2287,7 @@ define <16 x i32> @vpternlog_d_v512_imm252(<16 x i32> %v0, <16 x i32> %v1, <16 x i32> %v2) nounwind { ; CHECK-LABEL: vpternlog_d_v512_imm252: ; CHECK: # %bb.0: -; CHECK-NEXT: vpternlogd $252, %zmm2, %zmm1, %zmm0 +; CHECK-NEXT: vpord %zmm1, %zmm0, %zmm0 ; CHECK-NEXT: retq %r = tail call <16 x i32> @llvm.x86.avx512.pternlog.d.512(<16 x i32> %v0, <16 x i32> %v1, <16 x i32> %v2, i32 252) ret <16 x i32> %r @@ -2314,7 +2315,7 @@ define <8 x i64> @vpternlog_q_v512_imm255(<8 x i64> %v0, <8 x i64> %v1, <8 x i64> %v2) nounwind { ; CHECK-LABEL: vpternlog_q_v512_imm255: ; CHECK: # %bb.0: -; CHECK-NEXT: vpternlogq $255, %zmm2, %zmm1, %zmm0 +; CHECK-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0 ; CHECK-NEXT: retq %r = tail call <8 x i64> @llvm.x86.avx512.pternlog.q.512(<8 x i64> %v0, <8 x i64> %v1, <8 x i64> %v2, i32 255) ret <8 x i64> %r diff --git a/llvm/test/Transforms/InstCombine/X86/x86-ternlog.ll b/llvm/test/Transforms/InstCombine/X86/x86-ternlog.ll --- a/llvm/test/Transforms/InstCombine/X86/x86-ternlog.ll +++ b/llvm/test/Transforms/InstCombine/X86/x86-ternlog.ll @@ -10,8 +10,7 @@ define <16 x i32> @vpternlog_d_v512_imm0(<16 x i32> %v0, <16 x i32> %v1, <16 x i32> %v2) { ; CHECK-LABEL: @vpternlog_d_v512_imm0( -; CHECK-NEXT: [[R:%.*]] = tail call <16 x i32> @llvm.x86.avx512.pternlog.d.512(<16 x i32> [[V0:%.*]], <16 x i32> [[V1:%.*]], <16 x i32> [[V2:%.*]], i32 0) -; CHECK-NEXT: ret <16 x i32> [[R]] +; CHECK-NEXT: ret <16 x i32> zeroinitializer ; %r = tail call <16 x i32> @llvm.x86.avx512.pternlog.d.512(<16 x i32> %v0, <16 x i32> %v1, <16 x i32> %v2, i32 0) ret <16 x i32> %r @@ -145,7 +144,7 @@ define <8 x i64> @vpternlog_q_v512_imm15(<8 x i64> %v0, <8 x i64> %v1, <8 x i64> %v2) { ; CHECK-LABEL: @vpternlog_q_v512_imm15( -; CHECK-NEXT: [[R:%.*]] = tail call <8 x i64> @llvm.x86.avx512.pternlog.q.512(<8 x i64> [[V0:%.*]], <8 x i64> [[V1:%.*]], <8 x i64> [[V2:%.*]], i32 15) +; CHECK-NEXT: [[R:%.*]] = xor <8 x i64> [[V0:%.*]], ; CHECK-NEXT: ret <8 x i64> [[R]] ; %r = tail call <8 x i64> @llvm.x86.avx512.pternlog.q.512(<8 x i64> %v0, <8 x i64> %v1, <8 x i64> %v2, i32 15) @@ -469,7 +468,7 @@ define <8 x i64> @vpternlog_q_v512_imm51(<8 x i64> %v0, <8 x i64> %v1, <8 x i64> %v2) { ; CHECK-LABEL: @vpternlog_q_v512_imm51( -; CHECK-NEXT: [[R:%.*]] = tail call <8 x i64> @llvm.x86.avx512.pternlog.q.512(<8 x i64> [[V0:%.*]], <8 x i64> [[V1:%.*]], <8 x i64> [[V2:%.*]], i32 51) +; CHECK-NEXT: [[R:%.*]] = xor <8 x i64> [[V1:%.*]], ; CHECK-NEXT: ret <8 x i64> [[R]] ; %r = tail call <8 x i64> @llvm.x86.avx512.pternlog.q.512(<8 x i64> %v0, <8 x i64> %v1, <8 x i64> %v2, i32 51) @@ -550,7 +549,7 @@ define <16 x i32> @vpternlog_d_v512_imm60(<16 x i32> %v0, <16 x i32> %v1, <16 x i32> %v2) { ; CHECK-LABEL: @vpternlog_d_v512_imm60( -; CHECK-NEXT: [[R:%.*]] = tail call <16 x i32> @llvm.x86.avx512.pternlog.d.512(<16 x i32> [[V0:%.*]], <16 x i32> [[V1:%.*]], <16 x i32> [[V2:%.*]], i32 60) +; CHECK-NEXT: [[R:%.*]] = xor <16 x i32> [[V0:%.*]], [[V1:%.*]] ; CHECK-NEXT: ret <16 x i32> [[R]] ; %r = tail call <16 x i32> @llvm.x86.avx512.pternlog.d.512(<16 x i32> %v0, <16 x i32> %v1, <16 x i32> %v2, i32 60) @@ -775,7 +774,7 @@ define <2 x i64> @vpternlog_q_v128_imm85(<2 x i64> %v0, <2 x i64> %v1, <2 x i64> %v2) { ; CHECK-LABEL: @vpternlog_q_v128_imm85( -; CHECK-NEXT: [[R:%.*]] = tail call <2 x i64> @llvm.x86.avx512.pternlog.q.128(<2 x i64> [[V0:%.*]], <2 x i64> [[V1:%.*]], <2 x i64> [[V2:%.*]], i32 85) +; CHECK-NEXT: [[R:%.*]] = xor <2 x i64> [[V2:%.*]], ; CHECK-NEXT: ret <2 x i64> [[R]] ; %r = tail call <2 x i64> @llvm.x86.avx512.pternlog.q.128(<2 x i64> %v0, <2 x i64> %v1, <2 x i64> %v2, i32 85) @@ -820,7 +819,7 @@ define <16 x i32> @vpternlog_d_v512_imm90(<16 x i32> %v0, <16 x i32> %v1, <16 x i32> %v2) { ; CHECK-LABEL: @vpternlog_d_v512_imm90( -; CHECK-NEXT: [[R:%.*]] = tail call <16 x i32> @llvm.x86.avx512.pternlog.d.512(<16 x i32> [[V0:%.*]], <16 x i32> [[V1:%.*]], <16 x i32> [[V2:%.*]], i32 90) +; CHECK-NEXT: [[R:%.*]] = xor <16 x i32> [[V0:%.*]], [[V2:%.*]] ; CHECK-NEXT: ret <16 x i32> [[R]] ; %r = tail call <16 x i32> @llvm.x86.avx512.pternlog.d.512(<16 x i32> %v0, <16 x i32> %v1, <16 x i32> %v2, i32 90) @@ -928,7 +927,7 @@ define <16 x i32> @vpternlog_d_v512_imm102(<16 x i32> %v0, <16 x i32> %v1, <16 x i32> %v2) { ; CHECK-LABEL: @vpternlog_d_v512_imm102( -; CHECK-NEXT: [[R:%.*]] = tail call <16 x i32> @llvm.x86.avx512.pternlog.d.512(<16 x i32> [[V0:%.*]], <16 x i32> [[V1:%.*]], <16 x i32> [[V2:%.*]], i32 102) +; CHECK-NEXT: [[R:%.*]] = xor <16 x i32> [[V1:%.*]], [[V2:%.*]] ; CHECK-NEXT: ret <16 x i32> [[R]] ; %r = tail call <16 x i32> @llvm.x86.avx512.pternlog.d.512(<16 x i32> %v0, <16 x i32> %v1, <16 x i32> %v2, i32 102) @@ -1234,7 +1233,7 @@ define <4 x i32> @vpternlog_d_v128_imm136(<4 x i32> %v0, <4 x i32> %v1, <4 x i32> %v2) { ; CHECK-LABEL: @vpternlog_d_v128_imm136( -; CHECK-NEXT: [[R:%.*]] = tail call <4 x i32> @llvm.x86.avx512.pternlog.d.128(<4 x i32> [[V0:%.*]], <4 x i32> [[V1:%.*]], <4 x i32> [[V2:%.*]], i32 136) +; CHECK-NEXT: [[R:%.*]] = and <4 x i32> [[V1:%.*]], [[V2:%.*]] ; CHECK-NEXT: ret <4 x i32> [[R]] ; %r = tail call <4 x i32> @llvm.x86.avx512.pternlog.d.128(<4 x i32> %v0, <4 x i32> %v1, <4 x i32> %v2, i32 136) @@ -1450,7 +1449,7 @@ define <4 x i32> @vpternlog_d_v128_imm160(<4 x i32> %v0, <4 x i32> %v1, <4 x i32> %v2) { ; CHECK-LABEL: @vpternlog_d_v128_imm160( -; CHECK-NEXT: [[R:%.*]] = tail call <4 x i32> @llvm.x86.avx512.pternlog.d.128(<4 x i32> [[V0:%.*]], <4 x i32> [[V1:%.*]], <4 x i32> [[V2:%.*]], i32 160) +; CHECK-NEXT: [[R:%.*]] = and <4 x i32> [[V0:%.*]], [[V2:%.*]] ; CHECK-NEXT: ret <4 x i32> [[R]] ; %r = tail call <4 x i32> @llvm.x86.avx512.pternlog.d.128(<4 x i32> %v0, <4 x i32> %v1, <4 x i32> %v2, i32 160) @@ -1540,8 +1539,7 @@ define <8 x i32> @vpternlog_d_v256_imm170(<8 x i32> %v0, <8 x i32> %v1, <8 x i32> %v2) { ; CHECK-LABEL: @vpternlog_d_v256_imm170( -; CHECK-NEXT: [[R:%.*]] = tail call <8 x i32> @llvm.x86.avx512.pternlog.d.256(<8 x i32> [[V0:%.*]], <8 x i32> [[V1:%.*]], <8 x i32> [[V2:%.*]], i32 170) -; CHECK-NEXT: ret <8 x i32> [[R]] +; CHECK-NEXT: ret <8 x i32> [[V2:%.*]] ; %r = tail call <8 x i32> @llvm.x86.avx512.pternlog.d.256(<8 x i32> %v0, <8 x i32> %v1, <8 x i32> %v2, i32 170) ret <8 x i32> %r @@ -1738,7 +1736,7 @@ define <16 x i32> @vpternlog_d_v512_imm192(<16 x i32> %v0, <16 x i32> %v1, <16 x i32> %v2) { ; CHECK-LABEL: @vpternlog_d_v512_imm192( -; CHECK-NEXT: [[R:%.*]] = tail call <16 x i32> @llvm.x86.avx512.pternlog.d.512(<16 x i32> [[V0:%.*]], <16 x i32> [[V1:%.*]], <16 x i32> [[V2:%.*]], i32 192) +; CHECK-NEXT: [[R:%.*]] = and <16 x i32> [[V0:%.*]], [[V1:%.*]] ; CHECK-NEXT: ret <16 x i32> [[R]] ; %r = tail call <16 x i32> @llvm.x86.avx512.pternlog.d.512(<16 x i32> %v0, <16 x i32> %v1, <16 x i32> %v2, i32 192) @@ -1846,8 +1844,7 @@ define <16 x i32> @vpternlog_d_v512_imm204(<16 x i32> %v0, <16 x i32> %v1, <16 x i32> %v2) { ; CHECK-LABEL: @vpternlog_d_v512_imm204( -; CHECK-NEXT: [[R:%.*]] = tail call <16 x i32> @llvm.x86.avx512.pternlog.d.512(<16 x i32> [[V0:%.*]], <16 x i32> [[V1:%.*]], <16 x i32> [[V2:%.*]], i32 204) -; CHECK-NEXT: ret <16 x i32> [[R]] +; CHECK-NEXT: ret <16 x i32> [[V1:%.*]] ; %r = tail call <16 x i32> @llvm.x86.avx512.pternlog.d.512(<16 x i32> %v0, <16 x i32> %v1, <16 x i32> %v2, i32 204) ret <16 x i32> %r @@ -2152,7 +2149,7 @@ define <4 x i32> @vpternlog_d_v128_imm238(<4 x i32> %v0, <4 x i32> %v1, <4 x i32> %v2) { ; CHECK-LABEL: @vpternlog_d_v128_imm238( -; CHECK-NEXT: [[R:%.*]] = tail call <4 x i32> @llvm.x86.avx512.pternlog.d.128(<4 x i32> [[V0:%.*]], <4 x i32> [[V1:%.*]], <4 x i32> [[V2:%.*]], i32 238) +; CHECK-NEXT: [[R:%.*]] = or <4 x i32> [[V1:%.*]], [[V2:%.*]] ; CHECK-NEXT: ret <4 x i32> [[R]] ; %r = tail call <4 x i32> @llvm.x86.avx512.pternlog.d.128(<4 x i32> %v0, <4 x i32> %v1, <4 x i32> %v2, i32 238) @@ -2170,8 +2167,7 @@ define <16 x i32> @vpternlog_d_v512_imm240(<16 x i32> %v0, <16 x i32> %v1, <16 x i32> %v2) { ; CHECK-LABEL: @vpternlog_d_v512_imm240( -; CHECK-NEXT: [[R:%.*]] = tail call <16 x i32> @llvm.x86.avx512.pternlog.d.512(<16 x i32> [[V0:%.*]], <16 x i32> [[V1:%.*]], <16 x i32> [[V2:%.*]], i32 240) -; CHECK-NEXT: ret <16 x i32> [[R]] +; CHECK-NEXT: ret <16 x i32> [[V0:%.*]] ; %r = tail call <16 x i32> @llvm.x86.avx512.pternlog.d.512(<16 x i32> %v0, <16 x i32> %v1, <16 x i32> %v2, i32 240) ret <16 x i32> %r @@ -2260,7 +2256,7 @@ define <4 x i32> @vpternlog_d_v128_imm250(<4 x i32> %v0, <4 x i32> %v1, <4 x i32> %v2) { ; CHECK-LABEL: @vpternlog_d_v128_imm250( -; CHECK-NEXT: [[R:%.*]] = tail call <4 x i32> @llvm.x86.avx512.pternlog.d.128(<4 x i32> [[V0:%.*]], <4 x i32> [[V1:%.*]], <4 x i32> [[V2:%.*]], i32 250) +; CHECK-NEXT: [[R:%.*]] = or <4 x i32> [[V0:%.*]], [[V2:%.*]] ; CHECK-NEXT: ret <4 x i32> [[R]] ; %r = tail call <4 x i32> @llvm.x86.avx512.pternlog.d.128(<4 x i32> %v0, <4 x i32> %v1, <4 x i32> %v2, i32 250) @@ -2278,7 +2274,7 @@ define <16 x i32> @vpternlog_d_v512_imm252(<16 x i32> %v0, <16 x i32> %v1, <16 x i32> %v2) { ; CHECK-LABEL: @vpternlog_d_v512_imm252( -; CHECK-NEXT: [[R:%.*]] = tail call <16 x i32> @llvm.x86.avx512.pternlog.d.512(<16 x i32> [[V0:%.*]], <16 x i32> [[V1:%.*]], <16 x i32> [[V2:%.*]], i32 252) +; CHECK-NEXT: [[R:%.*]] = or <16 x i32> [[V0:%.*]], [[V1:%.*]] ; CHECK-NEXT: ret <16 x i32> [[R]] ; %r = tail call <16 x i32> @llvm.x86.avx512.pternlog.d.512(<16 x i32> %v0, <16 x i32> %v1, <16 x i32> %v2, i32 252) @@ -2305,8 +2301,7 @@ define <8 x i64> @vpternlog_q_v512_imm255(<8 x i64> %v0, <8 x i64> %v1, <8 x i64> %v2) { ; CHECK-LABEL: @vpternlog_q_v512_imm255( -; CHECK-NEXT: [[R:%.*]] = tail call <8 x i64> @llvm.x86.avx512.pternlog.q.512(<8 x i64> [[V0:%.*]], <8 x i64> [[V1:%.*]], <8 x i64> [[V2:%.*]], i32 255) -; CHECK-NEXT: ret <8 x i64> [[R]] +; CHECK-NEXT: ret <8 x i64> ; %r = tail call <8 x i64> @llvm.x86.avx512.pternlog.q.512(<8 x i64> %v0, <8 x i64> %v1, <8 x i64> %v2, i32 255) ret <8 x i64> %r @@ -2314,8 +2309,7 @@ define <16 x i32> @vpternlog_d_constv512_imm0() { ; CHECK-LABEL: @vpternlog_d_constv512_imm0( -; CHECK-NEXT: [[R:%.*]] = tail call <16 x i32> @llvm.x86.avx512.pternlog.d.512(<16 x i32> , <16 x i32> , <16 x i32> , i32 0) -; CHECK-NEXT: ret <16 x i32> [[R]] +; CHECK-NEXT: ret <16 x i32> zeroinitializer ; %r = tail call <16 x i32> @llvm.x86.avx512.pternlog.d.512(<16 x i32> , <16 x i32> , <16 x i32> , i32 0) ret <16 x i32> %r @@ -2323,8 +2317,7 @@ define <2 x i64> @vpternlog_q_constv128_imm1() { ; CHECK-LABEL: @vpternlog_q_constv128_imm1( -; CHECK-NEXT: [[R:%.*]] = tail call <2 x i64> @llvm.x86.avx512.pternlog.q.128(<2 x i64> , <2 x i64> , <2 x i64> , i32 1) -; CHECK-NEXT: ret <2 x i64> [[R]] +; CHECK-NEXT: ret <2 x i64> ; %r = tail call <2 x i64> @llvm.x86.avx512.pternlog.q.128(<2 x i64> , <2 x i64> , <2 x i64> , i32 1) ret <2 x i64> %r @@ -2332,8 +2325,7 @@ define <8 x i32> @vpternlog_d_constv256_imm2() { ; CHECK-LABEL: @vpternlog_d_constv256_imm2( -; CHECK-NEXT: [[R:%.*]] = tail call <8 x i32> @llvm.x86.avx512.pternlog.d.256(<8 x i32> , <8 x i32> , <8 x i32> , i32 2) -; CHECK-NEXT: ret <8 x i32> [[R]] +; CHECK-NEXT: ret <8 x i32> zeroinitializer ; %r = tail call <8 x i32> @llvm.x86.avx512.pternlog.d.256(<8 x i32> , <8 x i32> , <8 x i32> , i32 2) ret <8 x i32> %r @@ -2341,8 +2333,7 @@ define <8 x i64> @vpternlog_q_constv512_imm3() { ; CHECK-LABEL: @vpternlog_q_constv512_imm3( -; CHECK-NEXT: [[R:%.*]] = tail call <8 x i64> @llvm.x86.avx512.pternlog.q.512(<8 x i64> , <8 x i64> , <8 x i64> , i32 3) -; CHECK-NEXT: ret <8 x i64> [[R]] +; CHECK-NEXT: ret <8 x i64> ; %r = tail call <8 x i64> @llvm.x86.avx512.pternlog.q.512(<8 x i64> , <8 x i64> , <8 x i64> , i32 3) ret <8 x i64> %r @@ -2350,8 +2341,7 @@ define <4 x i32> @vpternlog_d_constv128_imm4() { ; CHECK-LABEL: @vpternlog_d_constv128_imm4( -; CHECK-NEXT: [[R:%.*]] = tail call <4 x i32> @llvm.x86.avx512.pternlog.d.128(<4 x i32> , <4 x i32> , <4 x i32> , i32 4) -; CHECK-NEXT: ret <4 x i32> [[R]] +; CHECK-NEXT: ret <4 x i32> ; %r = tail call <4 x i32> @llvm.x86.avx512.pternlog.d.128(<4 x i32> , <4 x i32> , <4 x i32> , i32 4) ret <4 x i32> %r @@ -2359,8 +2349,7 @@ define <4 x i64> @vpternlog_q_constv256_imm5() { ; CHECK-LABEL: @vpternlog_q_constv256_imm5( -; CHECK-NEXT: [[R:%.*]] = tail call <4 x i64> @llvm.x86.avx512.pternlog.q.256(<4 x i64> , <4 x i64> , <4 x i64> , i32 5) -; CHECK-NEXT: ret <4 x i64> [[R]] +; CHECK-NEXT: ret <4 x i64> ; %r = tail call <4 x i64> @llvm.x86.avx512.pternlog.q.256(<4 x i64> , <4 x i64> , <4 x i64> , i32 5) ret <4 x i64> %r @@ -2368,8 +2357,7 @@ define <16 x i32> @vpternlog_d_constv512_imm6() { ; CHECK-LABEL: @vpternlog_d_constv512_imm6( -; CHECK-NEXT: [[R:%.*]] = tail call <16 x i32> @llvm.x86.avx512.pternlog.d.512(<16 x i32> , <16 x i32> , <16 x i32> , i32 6) -; CHECK-NEXT: ret <16 x i32> [[R]] +; CHECK-NEXT: ret <16 x i32> ; %r = tail call <16 x i32> @llvm.x86.avx512.pternlog.d.512(<16 x i32> , <16 x i32> , <16 x i32> , i32 6) ret <16 x i32> %r @@ -2377,8 +2365,7 @@ define <2 x i64> @vpternlog_q_constv128_imm7() { ; CHECK-LABEL: @vpternlog_q_constv128_imm7( -; CHECK-NEXT: [[R:%.*]] = tail call <2 x i64> @llvm.x86.avx512.pternlog.q.128(<2 x i64> , <2 x i64> , <2 x i64> , i32 7) -; CHECK-NEXT: ret <2 x i64> [[R]] +; CHECK-NEXT: ret <2 x i64> ; %r = tail call <2 x i64> @llvm.x86.avx512.pternlog.q.128(<2 x i64> , <2 x i64> , <2 x i64> , i32 7) ret <2 x i64> %r @@ -2386,8 +2373,7 @@ define <8 x i32> @vpternlog_d_constv256_imm8() { ; CHECK-LABEL: @vpternlog_d_constv256_imm8( -; CHECK-NEXT: [[R:%.*]] = tail call <8 x i32> @llvm.x86.avx512.pternlog.d.256(<8 x i32> , <8 x i32> , <8 x i32> , i32 8) -; CHECK-NEXT: ret <8 x i32> [[R]] +; CHECK-NEXT: ret <8 x i32> ; %r = tail call <8 x i32> @llvm.x86.avx512.pternlog.d.256(<8 x i32> , <8 x i32> , <8 x i32> , i32 8) ret <8 x i32> %r @@ -2395,8 +2381,7 @@ define <8 x i64> @vpternlog_q_constv512_imm9() { ; CHECK-LABEL: @vpternlog_q_constv512_imm9( -; CHECK-NEXT: [[R:%.*]] = tail call <8 x i64> @llvm.x86.avx512.pternlog.q.512(<8 x i64> , <8 x i64> , <8 x i64> , i32 9) -; CHECK-NEXT: ret <8 x i64> [[R]] +; CHECK-NEXT: ret <8 x i64> ; %r = tail call <8 x i64> @llvm.x86.avx512.pternlog.q.512(<8 x i64> , <8 x i64> , <8 x i64> , i32 9) ret <8 x i64> %r @@ -2404,8 +2389,7 @@ define <4 x i32> @vpternlog_d_constv128_imm10() { ; CHECK-LABEL: @vpternlog_d_constv128_imm10( -; CHECK-NEXT: [[R:%.*]] = tail call <4 x i32> @llvm.x86.avx512.pternlog.d.128(<4 x i32> , <4 x i32> , <4 x i32> , i32 10) -; CHECK-NEXT: ret <4 x i32> [[R]] +; CHECK-NEXT: ret <4 x i32> ; %r = tail call <4 x i32> @llvm.x86.avx512.pternlog.d.128(<4 x i32> , <4 x i32> , <4 x i32> , i32 10) ret <4 x i32> %r @@ -2413,8 +2397,7 @@ define <4 x i64> @vpternlog_q_constv256_imm11() { ; CHECK-LABEL: @vpternlog_q_constv256_imm11( -; CHECK-NEXT: [[R:%.*]] = tail call <4 x i64> @llvm.x86.avx512.pternlog.q.256(<4 x i64> , <4 x i64> , <4 x i64> , i32 11) -; CHECK-NEXT: ret <4 x i64> [[R]] +; CHECK-NEXT: ret <4 x i64> ; %r = tail call <4 x i64> @llvm.x86.avx512.pternlog.q.256(<4 x i64> , <4 x i64> , <4 x i64> , i32 11) ret <4 x i64> %r @@ -2422,8 +2405,7 @@ define <16 x i32> @vpternlog_d_constv512_imm12() { ; CHECK-LABEL: @vpternlog_d_constv512_imm12( -; CHECK-NEXT: [[R:%.*]] = tail call <16 x i32> @llvm.x86.avx512.pternlog.d.512(<16 x i32> , <16 x i32> , <16 x i32> , i32 12) -; CHECK-NEXT: ret <16 x i32> [[R]] +; CHECK-NEXT: ret <16 x i32> ; %r = tail call <16 x i32> @llvm.x86.avx512.pternlog.d.512(<16 x i32> , <16 x i32> , <16 x i32> , i32 12) ret <16 x i32> %r @@ -2431,8 +2413,7 @@ define <2 x i64> @vpternlog_q_constv128_imm13() { ; CHECK-LABEL: @vpternlog_q_constv128_imm13( -; CHECK-NEXT: [[R:%.*]] = tail call <2 x i64> @llvm.x86.avx512.pternlog.q.128(<2 x i64> , <2 x i64> , <2 x i64> , i32 13) -; CHECK-NEXT: ret <2 x i64> [[R]] +; CHECK-NEXT: ret <2 x i64> ; %r = tail call <2 x i64> @llvm.x86.avx512.pternlog.q.128(<2 x i64> , <2 x i64> , <2 x i64> , i32 13) ret <2 x i64> %r @@ -2440,8 +2421,7 @@ define <8 x i32> @vpternlog_d_constv256_imm14() { ; CHECK-LABEL: @vpternlog_d_constv256_imm14( -; CHECK-NEXT: [[R:%.*]] = tail call <8 x i32> @llvm.x86.avx512.pternlog.d.256(<8 x i32> , <8 x i32> , <8 x i32> , i32 14) -; CHECK-NEXT: ret <8 x i32> [[R]] +; CHECK-NEXT: ret <8 x i32> ; %r = tail call <8 x i32> @llvm.x86.avx512.pternlog.d.256(<8 x i32> , <8 x i32> , <8 x i32> , i32 14) ret <8 x i32> %r @@ -2449,8 +2429,7 @@ define <8 x i64> @vpternlog_q_constv512_imm15() { ; CHECK-LABEL: @vpternlog_q_constv512_imm15( -; CHECK-NEXT: [[R:%.*]] = tail call <8 x i64> @llvm.x86.avx512.pternlog.q.512(<8 x i64> , <8 x i64> , <8 x i64> , i32 15) -; CHECK-NEXT: ret <8 x i64> [[R]] +; CHECK-NEXT: ret <8 x i64> ; %r = tail call <8 x i64> @llvm.x86.avx512.pternlog.q.512(<8 x i64> , <8 x i64> , <8 x i64> , i32 15) ret <8 x i64> %r @@ -2458,8 +2437,7 @@ define <4 x i32> @vpternlog_d_constv128_imm16() { ; CHECK-LABEL: @vpternlog_d_constv128_imm16( -; CHECK-NEXT: [[R:%.*]] = tail call <4 x i32> @llvm.x86.avx512.pternlog.d.128(<4 x i32> , <4 x i32> , <4 x i32> , i32 16) -; CHECK-NEXT: ret <4 x i32> [[R]] +; CHECK-NEXT: ret <4 x i32> ; %r = tail call <4 x i32> @llvm.x86.avx512.pternlog.d.128(<4 x i32> , <4 x i32> , <4 x i32> , i32 16) ret <4 x i32> %r @@ -2467,8 +2445,7 @@ define <4 x i64> @vpternlog_q_constv256_imm17() { ; CHECK-LABEL: @vpternlog_q_constv256_imm17( -; CHECK-NEXT: [[R:%.*]] = tail call <4 x i64> @llvm.x86.avx512.pternlog.q.256(<4 x i64> , <4 x i64> , <4 x i64> , i32 17) -; CHECK-NEXT: ret <4 x i64> [[R]] +; CHECK-NEXT: ret <4 x i64> ; %r = tail call <4 x i64> @llvm.x86.avx512.pternlog.q.256(<4 x i64> , <4 x i64> , <4 x i64> , i32 17) ret <4 x i64> %r @@ -2476,8 +2453,7 @@ define <16 x i32> @vpternlog_d_constv512_imm18() { ; CHECK-LABEL: @vpternlog_d_constv512_imm18( -; CHECK-NEXT: [[R:%.*]] = tail call <16 x i32> @llvm.x86.avx512.pternlog.d.512(<16 x i32> , <16 x i32> , <16 x i32> , i32 18) -; CHECK-NEXT: ret <16 x i32> [[R]] +; CHECK-NEXT: ret <16 x i32> ; %r = tail call <16 x i32> @llvm.x86.avx512.pternlog.d.512(<16 x i32> , <16 x i32> , <16 x i32> , i32 18) ret <16 x i32> %r @@ -2485,8 +2461,7 @@ define <2 x i64> @vpternlog_q_constv128_imm19() { ; CHECK-LABEL: @vpternlog_q_constv128_imm19( -; CHECK-NEXT: [[R:%.*]] = tail call <2 x i64> @llvm.x86.avx512.pternlog.q.128(<2 x i64> , <2 x i64> , <2 x i64> , i32 19) -; CHECK-NEXT: ret <2 x i64> [[R]] +; CHECK-NEXT: ret <2 x i64> ; %r = tail call <2 x i64> @llvm.x86.avx512.pternlog.q.128(<2 x i64> , <2 x i64> , <2 x i64> , i32 19) ret <2 x i64> %r @@ -2494,8 +2469,7 @@ define <8 x i32> @vpternlog_d_constv256_imm20() { ; CHECK-LABEL: @vpternlog_d_constv256_imm20( -; CHECK-NEXT: [[R:%.*]] = tail call <8 x i32> @llvm.x86.avx512.pternlog.d.256(<8 x i32> , <8 x i32> , <8 x i32> , i32 20) -; CHECK-NEXT: ret <8 x i32> [[R]] +; CHECK-NEXT: ret <8 x i32> ; %r = tail call <8 x i32> @llvm.x86.avx512.pternlog.d.256(<8 x i32> , <8 x i32> , <8 x i32> , i32 20) ret <8 x i32> %r @@ -2503,8 +2477,7 @@ define <8 x i64> @vpternlog_q_constv512_imm21() { ; CHECK-LABEL: @vpternlog_q_constv512_imm21( -; CHECK-NEXT: [[R:%.*]] = tail call <8 x i64> @llvm.x86.avx512.pternlog.q.512(<8 x i64> , <8 x i64> , <8 x i64> , i32 21) -; CHECK-NEXT: ret <8 x i64> [[R]] +; CHECK-NEXT: ret <8 x i64> ; %r = tail call <8 x i64> @llvm.x86.avx512.pternlog.q.512(<8 x i64> , <8 x i64> , <8 x i64> , i32 21) ret <8 x i64> %r @@ -2512,8 +2485,7 @@ define <4 x i32> @vpternlog_d_constv128_imm22() { ; CHECK-LABEL: @vpternlog_d_constv128_imm22( -; CHECK-NEXT: [[R:%.*]] = tail call <4 x i32> @llvm.x86.avx512.pternlog.d.128(<4 x i32> , <4 x i32> , <4 x i32> , i32 22) -; CHECK-NEXT: ret <4 x i32> [[R]] +; CHECK-NEXT: ret <4 x i32> ; %r = tail call <4 x i32> @llvm.x86.avx512.pternlog.d.128(<4 x i32> , <4 x i32> , <4 x i32> , i32 22) ret <4 x i32> %r @@ -2521,8 +2493,7 @@ define <4 x i64> @vpternlog_q_constv256_imm23() { ; CHECK-LABEL: @vpternlog_q_constv256_imm23( -; CHECK-NEXT: [[R:%.*]] = tail call <4 x i64> @llvm.x86.avx512.pternlog.q.256(<4 x i64> , <4 x i64> , <4 x i64> , i32 23) -; CHECK-NEXT: ret <4 x i64> [[R]] +; CHECK-NEXT: ret <4 x i64> ; %r = tail call <4 x i64> @llvm.x86.avx512.pternlog.q.256(<4 x i64> , <4 x i64> , <4 x i64> , i32 23) ret <4 x i64> %r @@ -2530,8 +2501,7 @@ define <16 x i32> @vpternlog_d_constv512_imm24() { ; CHECK-LABEL: @vpternlog_d_constv512_imm24( -; CHECK-NEXT: [[R:%.*]] = tail call <16 x i32> @llvm.x86.avx512.pternlog.d.512(<16 x i32> , <16 x i32> , <16 x i32> , i32 24) -; CHECK-NEXT: ret <16 x i32> [[R]] +; CHECK-NEXT: ret <16 x i32> ; %r = tail call <16 x i32> @llvm.x86.avx512.pternlog.d.512(<16 x i32> , <16 x i32> , <16 x i32> , i32 24) ret <16 x i32> %r @@ -2539,8 +2509,7 @@ define <2 x i64> @vpternlog_q_constv128_imm25() { ; CHECK-LABEL: @vpternlog_q_constv128_imm25( -; CHECK-NEXT: [[R:%.*]] = tail call <2 x i64> @llvm.x86.avx512.pternlog.q.128(<2 x i64> , <2 x i64> , <2 x i64> , i32 25) -; CHECK-NEXT: ret <2 x i64> [[R]] +; CHECK-NEXT: ret <2 x i64> ; %r = tail call <2 x i64> @llvm.x86.avx512.pternlog.q.128(<2 x i64> , <2 x i64> , <2 x i64> , i32 25) ret <2 x i64> %r @@ -2548,8 +2517,7 @@ define <8 x i32> @vpternlog_d_constv256_imm26() { ; CHECK-LABEL: @vpternlog_d_constv256_imm26( -; CHECK-NEXT: [[R:%.*]] = tail call <8 x i32> @llvm.x86.avx512.pternlog.d.256(<8 x i32> , <8 x i32> , <8 x i32> , i32 26) -; CHECK-NEXT: ret <8 x i32> [[R]] +; CHECK-NEXT: ret <8 x i32> ; %r = tail call <8 x i32> @llvm.x86.avx512.pternlog.d.256(<8 x i32> , <8 x i32> , <8 x i32> , i32 26) ret <8 x i32> %r @@ -2557,8 +2525,7 @@ define <8 x i64> @vpternlog_q_constv512_imm27() { ; CHECK-LABEL: @vpternlog_q_constv512_imm27( -; CHECK-NEXT: [[R:%.*]] = tail call <8 x i64> @llvm.x86.avx512.pternlog.q.512(<8 x i64> , <8 x i64> , <8 x i64> , i32 27) -; CHECK-NEXT: ret <8 x i64> [[R]] +; CHECK-NEXT: ret <8 x i64> ; %r = tail call <8 x i64> @llvm.x86.avx512.pternlog.q.512(<8 x i64> , <8 x i64> , <8 x i64> , i32 27) ret <8 x i64> %r @@ -2566,8 +2533,7 @@ define <4 x i32> @vpternlog_d_constv128_imm28() { ; CHECK-LABEL: @vpternlog_d_constv128_imm28( -; CHECK-NEXT: [[R:%.*]] = tail call <4 x i32> @llvm.x86.avx512.pternlog.d.128(<4 x i32> , <4 x i32> , <4 x i32> , i32 28) -; CHECK-NEXT: ret <4 x i32> [[R]] +; CHECK-NEXT: ret <4 x i32> ; %r = tail call <4 x i32> @llvm.x86.avx512.pternlog.d.128(<4 x i32> , <4 x i32> , <4 x i32> , i32 28) ret <4 x i32> %r @@ -2575,8 +2541,7 @@ define <4 x i64> @vpternlog_q_constv256_imm29() { ; CHECK-LABEL: @vpternlog_q_constv256_imm29( -; CHECK-NEXT: [[R:%.*]] = tail call <4 x i64> @llvm.x86.avx512.pternlog.q.256(<4 x i64> , <4 x i64> , <4 x i64> , i32 29) -; CHECK-NEXT: ret <4 x i64> [[R]] +; CHECK-NEXT: ret <4 x i64> ; %r = tail call <4 x i64> @llvm.x86.avx512.pternlog.q.256(<4 x i64> , <4 x i64> , <4 x i64> , i32 29) ret <4 x i64> %r @@ -2584,8 +2549,7 @@ define <16 x i32> @vpternlog_d_constv512_imm30() { ; CHECK-LABEL: @vpternlog_d_constv512_imm30( -; CHECK-NEXT: [[R:%.*]] = tail call <16 x i32> @llvm.x86.avx512.pternlog.d.512(<16 x i32> , <16 x i32> , <16 x i32> , i32 30) -; CHECK-NEXT: ret <16 x i32> [[R]] +; CHECK-NEXT: ret <16 x i32> ; %r = tail call <16 x i32> @llvm.x86.avx512.pternlog.d.512(<16 x i32> , <16 x i32> , <16 x i32> , i32 30) ret <16 x i32> %r @@ -2593,8 +2557,7 @@ define <2 x i64> @vpternlog_q_constv128_imm31() { ; CHECK-LABEL: @vpternlog_q_constv128_imm31( -; CHECK-NEXT: [[R:%.*]] = tail call <2 x i64> @llvm.x86.avx512.pternlog.q.128(<2 x i64> , <2 x i64> , <2 x i64> , i32 31) -; CHECK-NEXT: ret <2 x i64> [[R]] +; CHECK-NEXT: ret <2 x i64> ; %r = tail call <2 x i64> @llvm.x86.avx512.pternlog.q.128(<2 x i64> , <2 x i64> , <2 x i64> , i32 31) ret <2 x i64> %r @@ -2602,8 +2565,7 @@ define <8 x i32> @vpternlog_d_constv256_imm32() { ; CHECK-LABEL: @vpternlog_d_constv256_imm32( -; CHECK-NEXT: [[R:%.*]] = tail call <8 x i32> @llvm.x86.avx512.pternlog.d.256(<8 x i32> , <8 x i32> , <8 x i32> , i32 32) -; CHECK-NEXT: ret <8 x i32> [[R]] +; CHECK-NEXT: ret <8 x i32> ; %r = tail call <8 x i32> @llvm.x86.avx512.pternlog.d.256(<8 x i32> , <8 x i32> , <8 x i32> , i32 32) ret <8 x i32> %r @@ -2611,8 +2573,7 @@ define <8 x i64> @vpternlog_q_constv512_imm33() { ; CHECK-LABEL: @vpternlog_q_constv512_imm33( -; CHECK-NEXT: [[R:%.*]] = tail call <8 x i64> @llvm.x86.avx512.pternlog.q.512(<8 x i64> , <8 x i64> , <8 x i64> , i32 33) -; CHECK-NEXT: ret <8 x i64> [[R]] +; CHECK-NEXT: ret <8 x i64> ; %r = tail call <8 x i64> @llvm.x86.avx512.pternlog.q.512(<8 x i64> , <8 x i64> , <8 x i64> , i32 33) ret <8 x i64> %r @@ -2620,8 +2581,7 @@ define <4 x i32> @vpternlog_d_constv128_imm34() { ; CHECK-LABEL: @vpternlog_d_constv128_imm34( -; CHECK-NEXT: [[R:%.*]] = tail call <4 x i32> @llvm.x86.avx512.pternlog.d.128(<4 x i32> , <4 x i32> , <4 x i32> , i32 34) -; CHECK-NEXT: ret <4 x i32> [[R]] +; CHECK-NEXT: ret <4 x i32> ; %r = tail call <4 x i32> @llvm.x86.avx512.pternlog.d.128(<4 x i32> , <4 x i32> , <4 x i32> , i32 34) ret <4 x i32> %r @@ -2629,8 +2589,7 @@ define <4 x i64> @vpternlog_q_constv256_imm35() { ; CHECK-LABEL: @vpternlog_q_constv256_imm35( -; CHECK-NEXT: [[R:%.*]] = tail call <4 x i64> @llvm.x86.avx512.pternlog.q.256(<4 x i64> , <4 x i64> , <4 x i64> , i32 35) -; CHECK-NEXT: ret <4 x i64> [[R]] +; CHECK-NEXT: ret <4 x i64> ; %r = tail call <4 x i64> @llvm.x86.avx512.pternlog.q.256(<4 x i64> , <4 x i64> , <4 x i64> , i32 35) ret <4 x i64> %r @@ -2638,8 +2597,7 @@ define <16 x i32> @vpternlog_d_constv512_imm36() { ; CHECK-LABEL: @vpternlog_d_constv512_imm36( -; CHECK-NEXT: [[R:%.*]] = tail call <16 x i32> @llvm.x86.avx512.pternlog.d.512(<16 x i32> , <16 x i32> , <16 x i32> , i32 36) -; CHECK-NEXT: ret <16 x i32> [[R]] +; CHECK-NEXT: ret <16 x i32> ; %r = tail call <16 x i32> @llvm.x86.avx512.pternlog.d.512(<16 x i32> , <16 x i32> , <16 x i32> , i32 36) ret <16 x i32> %r @@ -2647,8 +2605,7 @@ define <2 x i64> @vpternlog_q_constv128_imm37() { ; CHECK-LABEL: @vpternlog_q_constv128_imm37( -; CHECK-NEXT: [[R:%.*]] = tail call <2 x i64> @llvm.x86.avx512.pternlog.q.128(<2 x i64> , <2 x i64> , <2 x i64> , i32 37) -; CHECK-NEXT: ret <2 x i64> [[R]] +; CHECK-NEXT: ret <2 x i64> ; %r = tail call <2 x i64> @llvm.x86.avx512.pternlog.q.128(<2 x i64> , <2 x i64> , <2 x i64> , i32 37) ret <2 x i64> %r @@ -2656,8 +2613,7 @@ define <8 x i32> @vpternlog_d_constv256_imm38() { ; CHECK-LABEL: @vpternlog_d_constv256_imm38( -; CHECK-NEXT: [[R:%.*]] = tail call <8 x i32> @llvm.x86.avx512.pternlog.d.256(<8 x i32> , <8 x i32> , <8 x i32> , i32 38) -; CHECK-NEXT: ret <8 x i32> [[R]] +; CHECK-NEXT: ret <8 x i32> ; %r = tail call <8 x i32> @llvm.x86.avx512.pternlog.d.256(<8 x i32> , <8 x i32> , <8 x i32> , i32 38) ret <8 x i32> %r @@ -2665,8 +2621,7 @@ define <8 x i64> @vpternlog_q_constv512_imm39() { ; CHECK-LABEL: @vpternlog_q_constv512_imm39( -; CHECK-NEXT: [[R:%.*]] = tail call <8 x i64> @llvm.x86.avx512.pternlog.q.512(<8 x i64> , <8 x i64> , <8 x i64> , i32 39) -; CHECK-NEXT: ret <8 x i64> [[R]] +; CHECK-NEXT: ret <8 x i64> ; %r = tail call <8 x i64> @llvm.x86.avx512.pternlog.q.512(<8 x i64> , <8 x i64> , <8 x i64> , i32 39) ret <8 x i64> %r @@ -2674,8 +2629,7 @@ define <4 x i32> @vpternlog_d_constv128_imm40() { ; CHECK-LABEL: @vpternlog_d_constv128_imm40( -; CHECK-NEXT: [[R:%.*]] = tail call <4 x i32> @llvm.x86.avx512.pternlog.d.128(<4 x i32> , <4 x i32> , <4 x i32> , i32 40) -; CHECK-NEXT: ret <4 x i32> [[R]] +; CHECK-NEXT: ret <4 x i32> ; %r = tail call <4 x i32> @llvm.x86.avx512.pternlog.d.128(<4 x i32> , <4 x i32> , <4 x i32> , i32 40) ret <4 x i32> %r @@ -2683,8 +2637,7 @@ define <4 x i64> @vpternlog_q_constv256_imm41() { ; CHECK-LABEL: @vpternlog_q_constv256_imm41( -; CHECK-NEXT: [[R:%.*]] = tail call <4 x i64> @llvm.x86.avx512.pternlog.q.256(<4 x i64> , <4 x i64> , <4 x i64> , i32 41) -; CHECK-NEXT: ret <4 x i64> [[R]] +; CHECK-NEXT: ret <4 x i64> ; %r = tail call <4 x i64> @llvm.x86.avx512.pternlog.q.256(<4 x i64> , <4 x i64> , <4 x i64> , i32 41) ret <4 x i64> %r @@ -2692,8 +2645,7 @@ define <16 x i32> @vpternlog_d_constv512_imm42() { ; CHECK-LABEL: @vpternlog_d_constv512_imm42( -; CHECK-NEXT: [[R:%.*]] = tail call <16 x i32> @llvm.x86.avx512.pternlog.d.512(<16 x i32> , <16 x i32> , <16 x i32> , i32 42) -; CHECK-NEXT: ret <16 x i32> [[R]] +; CHECK-NEXT: ret <16 x i32> ; %r = tail call <16 x i32> @llvm.x86.avx512.pternlog.d.512(<16 x i32> , <16 x i32> , <16 x i32> , i32 42) ret <16 x i32> %r @@ -2701,8 +2653,7 @@ define <2 x i64> @vpternlog_q_constv128_imm43() { ; CHECK-LABEL: @vpternlog_q_constv128_imm43( -; CHECK-NEXT: [[R:%.*]] = tail call <2 x i64> @llvm.x86.avx512.pternlog.q.128(<2 x i64> , <2 x i64> , <2 x i64> , i32 43) -; CHECK-NEXT: ret <2 x i64> [[R]] +; CHECK-NEXT: ret <2 x i64> ; %r = tail call <2 x i64> @llvm.x86.avx512.pternlog.q.128(<2 x i64> , <2 x i64> , <2 x i64> , i32 43) ret <2 x i64> %r @@ -2710,8 +2661,7 @@ define <8 x i32> @vpternlog_d_constv256_imm44() { ; CHECK-LABEL: @vpternlog_d_constv256_imm44( -; CHECK-NEXT: [[R:%.*]] = tail call <8 x i32> @llvm.x86.avx512.pternlog.d.256(<8 x i32> , <8 x i32> , <8 x i32> , i32 44) -; CHECK-NEXT: ret <8 x i32> [[R]] +; CHECK-NEXT: ret <8 x i32> ; %r = tail call <8 x i32> @llvm.x86.avx512.pternlog.d.256(<8 x i32> , <8 x i32> , <8 x i32> , i32 44) ret <8 x i32> %r @@ -2719,8 +2669,7 @@ define <8 x i64> @vpternlog_q_constv512_imm45() { ; CHECK-LABEL: @vpternlog_q_constv512_imm45( -; CHECK-NEXT: [[R:%.*]] = tail call <8 x i64> @llvm.x86.avx512.pternlog.q.512(<8 x i64> , <8 x i64> , <8 x i64> , i32 45) -; CHECK-NEXT: ret <8 x i64> [[R]] +; CHECK-NEXT: ret <8 x i64> ; %r = tail call <8 x i64> @llvm.x86.avx512.pternlog.q.512(<8 x i64> , <8 x i64> , <8 x i64> , i32 45) ret <8 x i64> %r @@ -2728,8 +2677,7 @@ define <4 x i32> @vpternlog_d_constv128_imm46() { ; CHECK-LABEL: @vpternlog_d_constv128_imm46( -; CHECK-NEXT: [[R:%.*]] = tail call <4 x i32> @llvm.x86.avx512.pternlog.d.128(<4 x i32> , <4 x i32> , <4 x i32> , i32 46) -; CHECK-NEXT: ret <4 x i32> [[R]] +; CHECK-NEXT: ret <4 x i32> ; %r = tail call <4 x i32> @llvm.x86.avx512.pternlog.d.128(<4 x i32> , <4 x i32> , <4 x i32> , i32 46) ret <4 x i32> %r @@ -2737,8 +2685,7 @@ define <4 x i64> @vpternlog_q_constv256_imm47() { ; CHECK-LABEL: @vpternlog_q_constv256_imm47( -; CHECK-NEXT: [[R:%.*]] = tail call <4 x i64> @llvm.x86.avx512.pternlog.q.256(<4 x i64> , <4 x i64> , <4 x i64> , i32 47) -; CHECK-NEXT: ret <4 x i64> [[R]] +; CHECK-NEXT: ret <4 x i64> ; %r = tail call <4 x i64> @llvm.x86.avx512.pternlog.q.256(<4 x i64> , <4 x i64> , <4 x i64> , i32 47) ret <4 x i64> %r @@ -2746,8 +2693,7 @@ define <16 x i32> @vpternlog_d_constv512_imm48() { ; CHECK-LABEL: @vpternlog_d_constv512_imm48( -; CHECK-NEXT: [[R:%.*]] = tail call <16 x i32> @llvm.x86.avx512.pternlog.d.512(<16 x i32> , <16 x i32> , <16 x i32> , i32 48) -; CHECK-NEXT: ret <16 x i32> [[R]] +; CHECK-NEXT: ret <16 x i32> ; %r = tail call <16 x i32> @llvm.x86.avx512.pternlog.d.512(<16 x i32> , <16 x i32> , <16 x i32> , i32 48) ret <16 x i32> %r @@ -2755,8 +2701,7 @@ define <2 x i64> @vpternlog_q_constv128_imm49() { ; CHECK-LABEL: @vpternlog_q_constv128_imm49( -; CHECK-NEXT: [[R:%.*]] = tail call <2 x i64> @llvm.x86.avx512.pternlog.q.128(<2 x i64> , <2 x i64> , <2 x i64> , i32 49) -; CHECK-NEXT: ret <2 x i64> [[R]] +; CHECK-NEXT: ret <2 x i64> ; %r = tail call <2 x i64> @llvm.x86.avx512.pternlog.q.128(<2 x i64> , <2 x i64> , <2 x i64> , i32 49) ret <2 x i64> %r @@ -2764,8 +2709,7 @@ define <8 x i32> @vpternlog_d_constv256_imm50() { ; CHECK-LABEL: @vpternlog_d_constv256_imm50( -; CHECK-NEXT: [[R:%.*]] = tail call <8 x i32> @llvm.x86.avx512.pternlog.d.256(<8 x i32> , <8 x i32> , <8 x i32> , i32 50) -; CHECK-NEXT: ret <8 x i32> [[R]] +; CHECK-NEXT: ret <8 x i32> ; %r = tail call <8 x i32> @llvm.x86.avx512.pternlog.d.256(<8 x i32> , <8 x i32> , <8 x i32> , i32 50) ret <8 x i32> %r @@ -2773,8 +2717,7 @@ define <8 x i64> @vpternlog_q_constv512_imm51() { ; CHECK-LABEL: @vpternlog_q_constv512_imm51( -; CHECK-NEXT: [[R:%.*]] = tail call <8 x i64> @llvm.x86.avx512.pternlog.q.512(<8 x i64> , <8 x i64> , <8 x i64> , i32 51) -; CHECK-NEXT: ret <8 x i64> [[R]] +; CHECK-NEXT: ret <8 x i64> ; %r = tail call <8 x i64> @llvm.x86.avx512.pternlog.q.512(<8 x i64> , <8 x i64> , <8 x i64> , i32 51) ret <8 x i64> %r @@ -2782,8 +2725,7 @@ define <4 x i32> @vpternlog_d_constv128_imm52() { ; CHECK-LABEL: @vpternlog_d_constv128_imm52( -; CHECK-NEXT: [[R:%.*]] = tail call <4 x i32> @llvm.x86.avx512.pternlog.d.128(<4 x i32> , <4 x i32> , <4 x i32> , i32 52) -; CHECK-NEXT: ret <4 x i32> [[R]] +; CHECK-NEXT: ret <4 x i32> ; %r = tail call <4 x i32> @llvm.x86.avx512.pternlog.d.128(<4 x i32> , <4 x i32> , <4 x i32> , i32 52) ret <4 x i32> %r @@ -2791,8 +2733,7 @@ define <4 x i64> @vpternlog_q_constv256_imm53() { ; CHECK-LABEL: @vpternlog_q_constv256_imm53( -; CHECK-NEXT: [[R:%.*]] = tail call <4 x i64> @llvm.x86.avx512.pternlog.q.256(<4 x i64> , <4 x i64> , <4 x i64> , i32 53) -; CHECK-NEXT: ret <4 x i64> [[R]] +; CHECK-NEXT: ret <4 x i64> ; %r = tail call <4 x i64> @llvm.x86.avx512.pternlog.q.256(<4 x i64> , <4 x i64> , <4 x i64> , i32 53) ret <4 x i64> %r @@ -2800,8 +2741,7 @@ define <16 x i32> @vpternlog_d_constv512_imm54() { ; CHECK-LABEL: @vpternlog_d_constv512_imm54( -; CHECK-NEXT: [[R:%.*]] = tail call <16 x i32> @llvm.x86.avx512.pternlog.d.512(<16 x i32> , <16 x i32> , <16 x i32> , i32 54) -; CHECK-NEXT: ret <16 x i32> [[R]] +; CHECK-NEXT: ret <16 x i32> ; %r = tail call <16 x i32> @llvm.x86.avx512.pternlog.d.512(<16 x i32> , <16 x i32> , <16 x i32> , i32 54) ret <16 x i32> %r @@ -2809,8 +2749,7 @@ define <2 x i64> @vpternlog_q_constv128_imm55() { ; CHECK-LABEL: @vpternlog_q_constv128_imm55( -; CHECK-NEXT: [[R:%.*]] = tail call <2 x i64> @llvm.x86.avx512.pternlog.q.128(<2 x i64> , <2 x i64> , <2 x i64> , i32 55) -; CHECK-NEXT: ret <2 x i64> [[R]] +; CHECK-NEXT: ret <2 x i64> ; %r = tail call <2 x i64> @llvm.x86.avx512.pternlog.q.128(<2 x i64> , <2 x i64> , <2 x i64> , i32 55) ret <2 x i64> %r @@ -2818,8 +2757,7 @@ define <8 x i32> @vpternlog_d_constv256_imm56() { ; CHECK-LABEL: @vpternlog_d_constv256_imm56( -; CHECK-NEXT: [[R:%.*]] = tail call <8 x i32> @llvm.x86.avx512.pternlog.d.256(<8 x i32> , <8 x i32> , <8 x i32> , i32 56) -; CHECK-NEXT: ret <8 x i32> [[R]] +; CHECK-NEXT: ret <8 x i32> ; %r = tail call <8 x i32> @llvm.x86.avx512.pternlog.d.256(<8 x i32> , <8 x i32> , <8 x i32> , i32 56) ret <8 x i32> %r @@ -2827,8 +2765,7 @@ define <8 x i64> @vpternlog_q_constv512_imm57() { ; CHECK-LABEL: @vpternlog_q_constv512_imm57( -; CHECK-NEXT: [[R:%.*]] = tail call <8 x i64> @llvm.x86.avx512.pternlog.q.512(<8 x i64> , <8 x i64> , <8 x i64> , i32 57) -; CHECK-NEXT: ret <8 x i64> [[R]] +; CHECK-NEXT: ret <8 x i64> ; %r = tail call <8 x i64> @llvm.x86.avx512.pternlog.q.512(<8 x i64> , <8 x i64> , <8 x i64> , i32 57) ret <8 x i64> %r @@ -2836,8 +2773,7 @@ define <4 x i32> @vpternlog_d_constv128_imm58() { ; CHECK-LABEL: @vpternlog_d_constv128_imm58( -; CHECK-NEXT: [[R:%.*]] = tail call <4 x i32> @llvm.x86.avx512.pternlog.d.128(<4 x i32> , <4 x i32> , <4 x i32> , i32 58) -; CHECK-NEXT: ret <4 x i32> [[R]] +; CHECK-NEXT: ret <4 x i32> ; %r = tail call <4 x i32> @llvm.x86.avx512.pternlog.d.128(<4 x i32> , <4 x i32> , <4 x i32> , i32 58) ret <4 x i32> %r @@ -2845,8 +2781,7 @@ define <4 x i64> @vpternlog_q_constv256_imm59() { ; CHECK-LABEL: @vpternlog_q_constv256_imm59( -; CHECK-NEXT: [[R:%.*]] = tail call <4 x i64> @llvm.x86.avx512.pternlog.q.256(<4 x i64> , <4 x i64> , <4 x i64> , i32 59) -; CHECK-NEXT: ret <4 x i64> [[R]] +; CHECK-NEXT: ret <4 x i64> ; %r = tail call <4 x i64> @llvm.x86.avx512.pternlog.q.256(<4 x i64> , <4 x i64> , <4 x i64> , i32 59) ret <4 x i64> %r @@ -2854,8 +2789,7 @@ define <16 x i32> @vpternlog_d_constv512_imm60() { ; CHECK-LABEL: @vpternlog_d_constv512_imm60( -; CHECK-NEXT: [[R:%.*]] = tail call <16 x i32> @llvm.x86.avx512.pternlog.d.512(<16 x i32> , <16 x i32> , <16 x i32> , i32 60) -; CHECK-NEXT: ret <16 x i32> [[R]] +; CHECK-NEXT: ret <16 x i32> ; %r = tail call <16 x i32> @llvm.x86.avx512.pternlog.d.512(<16 x i32> , <16 x i32> , <16 x i32> , i32 60) ret <16 x i32> %r @@ -2863,8 +2797,7 @@ define <2 x i64> @vpternlog_q_constv128_imm61() { ; CHECK-LABEL: @vpternlog_q_constv128_imm61( -; CHECK-NEXT: [[R:%.*]] = tail call <2 x i64> @llvm.x86.avx512.pternlog.q.128(<2 x i64> , <2 x i64> , <2 x i64> , i32 61) -; CHECK-NEXT: ret <2 x i64> [[R]] +; CHECK-NEXT: ret <2 x i64> ; %r = tail call <2 x i64> @llvm.x86.avx512.pternlog.q.128(<2 x i64> , <2 x i64> , <2 x i64> , i32 61) ret <2 x i64> %r @@ -2872,8 +2805,7 @@ define <8 x i32> @vpternlog_d_constv256_imm62() { ; CHECK-LABEL: @vpternlog_d_constv256_imm62( -; CHECK-NEXT: [[R:%.*]] = tail call <8 x i32> @llvm.x86.avx512.pternlog.d.256(<8 x i32> , <8 x i32> , <8 x i32> , i32 62) -; CHECK-NEXT: ret <8 x i32> [[R]] +; CHECK-NEXT: ret <8 x i32> ; %r = tail call <8 x i32> @llvm.x86.avx512.pternlog.d.256(<8 x i32> , <8 x i32> , <8 x i32> , i32 62) ret <8 x i32> %r @@ -2881,8 +2813,7 @@ define <8 x i64> @vpternlog_q_constv512_imm63() { ; CHECK-LABEL: @vpternlog_q_constv512_imm63( -; CHECK-NEXT: [[R:%.*]] = tail call <8 x i64> @llvm.x86.avx512.pternlog.q.512(<8 x i64> , <8 x i64> , <8 x i64> , i32 63) -; CHECK-NEXT: ret <8 x i64> [[R]] +; CHECK-NEXT: ret <8 x i64> ; %r = tail call <8 x i64> @llvm.x86.avx512.pternlog.q.512(<8 x i64> , <8 x i64> , <8 x i64> , i32 63) ret <8 x i64> %r @@ -2890,8 +2821,7 @@ define <4 x i32> @vpternlog_d_constv128_imm64() { ; CHECK-LABEL: @vpternlog_d_constv128_imm64( -; CHECK-NEXT: [[R:%.*]] = tail call <4 x i32> @llvm.x86.avx512.pternlog.d.128(<4 x i32> , <4 x i32> , <4 x i32> , i32 64) -; CHECK-NEXT: ret <4 x i32> [[R]] +; CHECK-NEXT: ret <4 x i32> ; %r = tail call <4 x i32> @llvm.x86.avx512.pternlog.d.128(<4 x i32> , <4 x i32> , <4 x i32> , i32 64) ret <4 x i32> %r @@ -2899,8 +2829,7 @@ define <4 x i64> @vpternlog_q_constv256_imm65() { ; CHECK-LABEL: @vpternlog_q_constv256_imm65( -; CHECK-NEXT: [[R:%.*]] = tail call <4 x i64> @llvm.x86.avx512.pternlog.q.256(<4 x i64> , <4 x i64> , <4 x i64> , i32 65) -; CHECK-NEXT: ret <4 x i64> [[R]] +; CHECK-NEXT: ret <4 x i64> ; %r = tail call <4 x i64> @llvm.x86.avx512.pternlog.q.256(<4 x i64> , <4 x i64> , <4 x i64> , i32 65) ret <4 x i64> %r @@ -2908,8 +2837,7 @@ define <16 x i32> @vpternlog_d_constv512_imm66() { ; CHECK-LABEL: @vpternlog_d_constv512_imm66( -; CHECK-NEXT: [[R:%.*]] = tail call <16 x i32> @llvm.x86.avx512.pternlog.d.512(<16 x i32> , <16 x i32> , <16 x i32> , i32 66) -; CHECK-NEXT: ret <16 x i32> [[R]] +; CHECK-NEXT: ret <16 x i32> ; %r = tail call <16 x i32> @llvm.x86.avx512.pternlog.d.512(<16 x i32> , <16 x i32> , <16 x i32> , i32 66) ret <16 x i32> %r @@ -2917,8 +2845,7 @@ define <2 x i64> @vpternlog_q_constv128_imm67() { ; CHECK-LABEL: @vpternlog_q_constv128_imm67( -; CHECK-NEXT: [[R:%.*]] = tail call <2 x i64> @llvm.x86.avx512.pternlog.q.128(<2 x i64> , <2 x i64> , <2 x i64> , i32 67) -; CHECK-NEXT: ret <2 x i64> [[R]] +; CHECK-NEXT: ret <2 x i64> ; %r = tail call <2 x i64> @llvm.x86.avx512.pternlog.q.128(<2 x i64> , <2 x i64> , <2 x i64> , i32 67) ret <2 x i64> %r @@ -2926,8 +2853,7 @@ define <8 x i32> @vpternlog_d_constv256_imm68() { ; CHECK-LABEL: @vpternlog_d_constv256_imm68( -; CHECK-NEXT: [[R:%.*]] = tail call <8 x i32> @llvm.x86.avx512.pternlog.d.256(<8 x i32> , <8 x i32> , <8 x i32> , i32 68) -; CHECK-NEXT: ret <8 x i32> [[R]] +; CHECK-NEXT: ret <8 x i32> ; %r = tail call <8 x i32> @llvm.x86.avx512.pternlog.d.256(<8 x i32> , <8 x i32> , <8 x i32> , i32 68) ret <8 x i32> %r @@ -2935,8 +2861,7 @@ define <8 x i64> @vpternlog_q_constv512_imm69() { ; CHECK-LABEL: @vpternlog_q_constv512_imm69( -; CHECK-NEXT: [[R:%.*]] = tail call <8 x i64> @llvm.x86.avx512.pternlog.q.512(<8 x i64> , <8 x i64> , <8 x i64> , i32 69) -; CHECK-NEXT: ret <8 x i64> [[R]] +; CHECK-NEXT: ret <8 x i64> ; %r = tail call <8 x i64> @llvm.x86.avx512.pternlog.q.512(<8 x i64> , <8 x i64> , <8 x i64> , i32 69) ret <8 x i64> %r @@ -2944,8 +2869,7 @@ define <4 x i32> @vpternlog_d_constv128_imm70() { ; CHECK-LABEL: @vpternlog_d_constv128_imm70( -; CHECK-NEXT: [[R:%.*]] = tail call <4 x i32> @llvm.x86.avx512.pternlog.d.128(<4 x i32> , <4 x i32> , <4 x i32> , i32 70) -; CHECK-NEXT: ret <4 x i32> [[R]] +; CHECK-NEXT: ret <4 x i32> ; %r = tail call <4 x i32> @llvm.x86.avx512.pternlog.d.128(<4 x i32> , <4 x i32> , <4 x i32> , i32 70) ret <4 x i32> %r @@ -2953,8 +2877,7 @@ define <4 x i64> @vpternlog_q_constv256_imm71() { ; CHECK-LABEL: @vpternlog_q_constv256_imm71( -; CHECK-NEXT: [[R:%.*]] = tail call <4 x i64> @llvm.x86.avx512.pternlog.q.256(<4 x i64> , <4 x i64> , <4 x i64> , i32 71) -; CHECK-NEXT: ret <4 x i64> [[R]] +; CHECK-NEXT: ret <4 x i64> ; %r = tail call <4 x i64> @llvm.x86.avx512.pternlog.q.256(<4 x i64> , <4 x i64> , <4 x i64> , i32 71) ret <4 x i64> %r @@ -2962,8 +2885,7 @@ define <16 x i32> @vpternlog_d_constv512_imm72() { ; CHECK-LABEL: @vpternlog_d_constv512_imm72( -; CHECK-NEXT: [[R:%.*]] = tail call <16 x i32> @llvm.x86.avx512.pternlog.d.512(<16 x i32> , <16 x i32> , <16 x i32> , i32 72) -; CHECK-NEXT: ret <16 x i32> [[R]] +; CHECK-NEXT: ret <16 x i32> ; %r = tail call <16 x i32> @llvm.x86.avx512.pternlog.d.512(<16 x i32> , <16 x i32> , <16 x i32> , i32 72) ret <16 x i32> %r @@ -2971,8 +2893,7 @@ define <2 x i64> @vpternlog_q_constv128_imm73() { ; CHECK-LABEL: @vpternlog_q_constv128_imm73( -; CHECK-NEXT: [[R:%.*]] = tail call <2 x i64> @llvm.x86.avx512.pternlog.q.128(<2 x i64> , <2 x i64> , <2 x i64> , i32 73) -; CHECK-NEXT: ret <2 x i64> [[R]] +; CHECK-NEXT: ret <2 x i64> ; %r = tail call <2 x i64> @llvm.x86.avx512.pternlog.q.128(<2 x i64> , <2 x i64> , <2 x i64> , i32 73) ret <2 x i64> %r @@ -2980,8 +2901,7 @@ define <8 x i32> @vpternlog_d_constv256_imm74() { ; CHECK-LABEL: @vpternlog_d_constv256_imm74( -; CHECK-NEXT: [[R:%.*]] = tail call <8 x i32> @llvm.x86.avx512.pternlog.d.256(<8 x i32> , <8 x i32> , <8 x i32> , i32 74) -; CHECK-NEXT: ret <8 x i32> [[R]] +; CHECK-NEXT: ret <8 x i32> ; %r = tail call <8 x i32> @llvm.x86.avx512.pternlog.d.256(<8 x i32> , <8 x i32> , <8 x i32> , i32 74) ret <8 x i32> %r @@ -2989,8 +2909,7 @@ define <8 x i64> @vpternlog_q_constv512_imm75() { ; CHECK-LABEL: @vpternlog_q_constv512_imm75( -; CHECK-NEXT: [[R:%.*]] = tail call <8 x i64> @llvm.x86.avx512.pternlog.q.512(<8 x i64> , <8 x i64> , <8 x i64> , i32 75) -; CHECK-NEXT: ret <8 x i64> [[R]] +; CHECK-NEXT: ret <8 x i64> ; %r = tail call <8 x i64> @llvm.x86.avx512.pternlog.q.512(<8 x i64> , <8 x i64> , <8 x i64> , i32 75) ret <8 x i64> %r @@ -2998,8 +2917,7 @@ define <4 x i32> @vpternlog_d_constv128_imm76() { ; CHECK-LABEL: @vpternlog_d_constv128_imm76( -; CHECK-NEXT: [[R:%.*]] = tail call <4 x i32> @llvm.x86.avx512.pternlog.d.128(<4 x i32> , <4 x i32> , <4 x i32> , i32 76) -; CHECK-NEXT: ret <4 x i32> [[R]] +; CHECK-NEXT: ret <4 x i32> ; %r = tail call <4 x i32> @llvm.x86.avx512.pternlog.d.128(<4 x i32> , <4 x i32> , <4 x i32> , i32 76) ret <4 x i32> %r @@ -3007,8 +2925,7 @@ define <4 x i64> @vpternlog_q_constv256_imm77() { ; CHECK-LABEL: @vpternlog_q_constv256_imm77( -; CHECK-NEXT: [[R:%.*]] = tail call <4 x i64> @llvm.x86.avx512.pternlog.q.256(<4 x i64> , <4 x i64> , <4 x i64> , i32 77) -; CHECK-NEXT: ret <4 x i64> [[R]] +; CHECK-NEXT: ret <4 x i64> ; %r = tail call <4 x i64> @llvm.x86.avx512.pternlog.q.256(<4 x i64> , <4 x i64> , <4 x i64> , i32 77) ret <4 x i64> %r @@ -3016,8 +2933,7 @@ define <16 x i32> @vpternlog_d_constv512_imm78() { ; CHECK-LABEL: @vpternlog_d_constv512_imm78( -; CHECK-NEXT: [[R:%.*]] = tail call <16 x i32> @llvm.x86.avx512.pternlog.d.512(<16 x i32> , <16 x i32> , <16 x i32> , i32 78) -; CHECK-NEXT: ret <16 x i32> [[R]] +; CHECK-NEXT: ret <16 x i32> ; %r = tail call <16 x i32> @llvm.x86.avx512.pternlog.d.512(<16 x i32> , <16 x i32> , <16 x i32> , i32 78) ret <16 x i32> %r @@ -3025,8 +2941,7 @@ define <2 x i64> @vpternlog_q_constv128_imm79() { ; CHECK-LABEL: @vpternlog_q_constv128_imm79( -; CHECK-NEXT: [[R:%.*]] = tail call <2 x i64> @llvm.x86.avx512.pternlog.q.128(<2 x i64> , <2 x i64> , <2 x i64> , i32 79) -; CHECK-NEXT: ret <2 x i64> [[R]] +; CHECK-NEXT: ret <2 x i64> ; %r = tail call <2 x i64> @llvm.x86.avx512.pternlog.q.128(<2 x i64> , <2 x i64> , <2 x i64> , i32 79) ret <2 x i64> %r @@ -3034,8 +2949,7 @@ define <8 x i32> @vpternlog_d_constv256_imm80() { ; CHECK-LABEL: @vpternlog_d_constv256_imm80( -; CHECK-NEXT: [[R:%.*]] = tail call <8 x i32> @llvm.x86.avx512.pternlog.d.256(<8 x i32> , <8 x i32> , <8 x i32> , i32 80) -; CHECK-NEXT: ret <8 x i32> [[R]] +; CHECK-NEXT: ret <8 x i32> ; %r = tail call <8 x i32> @llvm.x86.avx512.pternlog.d.256(<8 x i32> , <8 x i32> , <8 x i32> , i32 80) ret <8 x i32> %r @@ -3043,8 +2957,7 @@ define <8 x i64> @vpternlog_q_constv512_imm81() { ; CHECK-LABEL: @vpternlog_q_constv512_imm81( -; CHECK-NEXT: [[R:%.*]] = tail call <8 x i64> @llvm.x86.avx512.pternlog.q.512(<8 x i64> , <8 x i64> , <8 x i64> , i32 81) -; CHECK-NEXT: ret <8 x i64> [[R]] +; CHECK-NEXT: ret <8 x i64> ; %r = tail call <8 x i64> @llvm.x86.avx512.pternlog.q.512(<8 x i64> , <8 x i64> , <8 x i64> , i32 81) ret <8 x i64> %r @@ -3052,8 +2965,7 @@ define <4 x i32> @vpternlog_d_constv128_imm82() { ; CHECK-LABEL: @vpternlog_d_constv128_imm82( -; CHECK-NEXT: [[R:%.*]] = tail call <4 x i32> @llvm.x86.avx512.pternlog.d.128(<4 x i32> , <4 x i32> , <4 x i32> , i32 82) -; CHECK-NEXT: ret <4 x i32> [[R]] +; CHECK-NEXT: ret <4 x i32> ; %r = tail call <4 x i32> @llvm.x86.avx512.pternlog.d.128(<4 x i32> , <4 x i32> , <4 x i32> , i32 82) ret <4 x i32> %r @@ -3061,8 +2973,7 @@ define <4 x i64> @vpternlog_q_constv256_imm83() { ; CHECK-LABEL: @vpternlog_q_constv256_imm83( -; CHECK-NEXT: [[R:%.*]] = tail call <4 x i64> @llvm.x86.avx512.pternlog.q.256(<4 x i64> , <4 x i64> , <4 x i64> , i32 83) -; CHECK-NEXT: ret <4 x i64> [[R]] +; CHECK-NEXT: ret <4 x i64> ; %r = tail call <4 x i64> @llvm.x86.avx512.pternlog.q.256(<4 x i64> , <4 x i64> , <4 x i64> , i32 83) ret <4 x i64> %r @@ -3070,8 +2981,7 @@ define <16 x i32> @vpternlog_d_constv512_imm84() { ; CHECK-LABEL: @vpternlog_d_constv512_imm84( -; CHECK-NEXT: [[R:%.*]] = tail call <16 x i32> @llvm.x86.avx512.pternlog.d.512(<16 x i32> , <16 x i32> , <16 x i32> , i32 84) -; CHECK-NEXT: ret <16 x i32> [[R]] +; CHECK-NEXT: ret <16 x i32> ; %r = tail call <16 x i32> @llvm.x86.avx512.pternlog.d.512(<16 x i32> , <16 x i32> , <16 x i32> , i32 84) ret <16 x i32> %r @@ -3079,8 +2989,7 @@ define <2 x i64> @vpternlog_q_constv128_imm85() { ; CHECK-LABEL: @vpternlog_q_constv128_imm85( -; CHECK-NEXT: [[R:%.*]] = tail call <2 x i64> @llvm.x86.avx512.pternlog.q.128(<2 x i64> , <2 x i64> , <2 x i64> , i32 85) -; CHECK-NEXT: ret <2 x i64> [[R]] +; CHECK-NEXT: ret <2 x i64> ; %r = tail call <2 x i64> @llvm.x86.avx512.pternlog.q.128(<2 x i64> , <2 x i64> , <2 x i64> , i32 85) ret <2 x i64> %r @@ -3088,8 +2997,7 @@ define <8 x i32> @vpternlog_d_constv256_imm86() { ; CHECK-LABEL: @vpternlog_d_constv256_imm86( -; CHECK-NEXT: [[R:%.*]] = tail call <8 x i32> @llvm.x86.avx512.pternlog.d.256(<8 x i32> , <8 x i32> , <8 x i32> , i32 86) -; CHECK-NEXT: ret <8 x i32> [[R]] +; CHECK-NEXT: ret <8 x i32> ; %r = tail call <8 x i32> @llvm.x86.avx512.pternlog.d.256(<8 x i32> , <8 x i32> , <8 x i32> , i32 86) ret <8 x i32> %r @@ -3097,8 +3005,7 @@ define <8 x i64> @vpternlog_q_constv512_imm87() { ; CHECK-LABEL: @vpternlog_q_constv512_imm87( -; CHECK-NEXT: [[R:%.*]] = tail call <8 x i64> @llvm.x86.avx512.pternlog.q.512(<8 x i64> , <8 x i64> , <8 x i64> , i32 87) -; CHECK-NEXT: ret <8 x i64> [[R]] +; CHECK-NEXT: ret <8 x i64> ; %r = tail call <8 x i64> @llvm.x86.avx512.pternlog.q.512(<8 x i64> , <8 x i64> , <8 x i64> , i32 87) ret <8 x i64> %r @@ -3106,8 +3013,7 @@ define <4 x i32> @vpternlog_d_constv128_imm88() { ; CHECK-LABEL: @vpternlog_d_constv128_imm88( -; CHECK-NEXT: [[R:%.*]] = tail call <4 x i32> @llvm.x86.avx512.pternlog.d.128(<4 x i32> , <4 x i32> , <4 x i32> , i32 88) -; CHECK-NEXT: ret <4 x i32> [[R]] +; CHECK-NEXT: ret <4 x i32> ; %r = tail call <4 x i32> @llvm.x86.avx512.pternlog.d.128(<4 x i32> , <4 x i32> , <4 x i32> , i32 88) ret <4 x i32> %r @@ -3115,8 +3021,7 @@ define <4 x i64> @vpternlog_q_constv256_imm89() { ; CHECK-LABEL: @vpternlog_q_constv256_imm89( -; CHECK-NEXT: [[R:%.*]] = tail call <4 x i64> @llvm.x86.avx512.pternlog.q.256(<4 x i64> , <4 x i64> , <4 x i64> , i32 89) -; CHECK-NEXT: ret <4 x i64> [[R]] +; CHECK-NEXT: ret <4 x i64> ; %r = tail call <4 x i64> @llvm.x86.avx512.pternlog.q.256(<4 x i64> , <4 x i64> , <4 x i64> , i32 89) ret <4 x i64> %r @@ -3124,8 +3029,7 @@ define <16 x i32> @vpternlog_d_constv512_imm90() { ; CHECK-LABEL: @vpternlog_d_constv512_imm90( -; CHECK-NEXT: [[R:%.*]] = tail call <16 x i32> @llvm.x86.avx512.pternlog.d.512(<16 x i32> , <16 x i32> , <16 x i32> , i32 90) -; CHECK-NEXT: ret <16 x i32> [[R]] +; CHECK-NEXT: ret <16 x i32> ; %r = tail call <16 x i32> @llvm.x86.avx512.pternlog.d.512(<16 x i32> , <16 x i32> , <16 x i32> , i32 90) ret <16 x i32> %r @@ -3133,8 +3037,7 @@ define <2 x i64> @vpternlog_q_constv128_imm91() { ; CHECK-LABEL: @vpternlog_q_constv128_imm91( -; CHECK-NEXT: [[R:%.*]] = tail call <2 x i64> @llvm.x86.avx512.pternlog.q.128(<2 x i64> , <2 x i64> , <2 x i64> , i32 91) -; CHECK-NEXT: ret <2 x i64> [[R]] +; CHECK-NEXT: ret <2 x i64> ; %r = tail call <2 x i64> @llvm.x86.avx512.pternlog.q.128(<2 x i64> , <2 x i64> , <2 x i64> , i32 91) ret <2 x i64> %r @@ -3142,8 +3045,7 @@ define <8 x i32> @vpternlog_d_constv256_imm92() { ; CHECK-LABEL: @vpternlog_d_constv256_imm92( -; CHECK-NEXT: [[R:%.*]] = tail call <8 x i32> @llvm.x86.avx512.pternlog.d.256(<8 x i32> , <8 x i32> , <8 x i32> , i32 92) -; CHECK-NEXT: ret <8 x i32> [[R]] +; CHECK-NEXT: ret <8 x i32> ; %r = tail call <8 x i32> @llvm.x86.avx512.pternlog.d.256(<8 x i32> , <8 x i32> , <8 x i32> , i32 92) ret <8 x i32> %r @@ -3151,8 +3053,7 @@ define <8 x i64> @vpternlog_q_constv512_imm93() { ; CHECK-LABEL: @vpternlog_q_constv512_imm93( -; CHECK-NEXT: [[R:%.*]] = tail call <8 x i64> @llvm.x86.avx512.pternlog.q.512(<8 x i64> , <8 x i64> , <8 x i64> , i32 93) -; CHECK-NEXT: ret <8 x i64> [[R]] +; CHECK-NEXT: ret <8 x i64> ; %r = tail call <8 x i64> @llvm.x86.avx512.pternlog.q.512(<8 x i64> , <8 x i64> , <8 x i64> , i32 93) ret <8 x i64> %r @@ -3160,8 +3061,7 @@ define <4 x i32> @vpternlog_d_constv128_imm94() { ; CHECK-LABEL: @vpternlog_d_constv128_imm94( -; CHECK-NEXT: [[R:%.*]] = tail call <4 x i32> @llvm.x86.avx512.pternlog.d.128(<4 x i32> , <4 x i32> , <4 x i32> , i32 94) -; CHECK-NEXT: ret <4 x i32> [[R]] +; CHECK-NEXT: ret <4 x i32> ; %r = tail call <4 x i32> @llvm.x86.avx512.pternlog.d.128(<4 x i32> , <4 x i32> , <4 x i32> , i32 94) ret <4 x i32> %r @@ -3169,8 +3069,7 @@ define <4 x i64> @vpternlog_q_constv256_imm95() { ; CHECK-LABEL: @vpternlog_q_constv256_imm95( -; CHECK-NEXT: [[R:%.*]] = tail call <4 x i64> @llvm.x86.avx512.pternlog.q.256(<4 x i64> , <4 x i64> , <4 x i64> , i32 95) -; CHECK-NEXT: ret <4 x i64> [[R]] +; CHECK-NEXT: ret <4 x i64> ; %r = tail call <4 x i64> @llvm.x86.avx512.pternlog.q.256(<4 x i64> , <4 x i64> , <4 x i64> , i32 95) ret <4 x i64> %r @@ -3178,8 +3077,7 @@ define <16 x i32> @vpternlog_d_constv512_imm96() { ; CHECK-LABEL: @vpternlog_d_constv512_imm96( -; CHECK-NEXT: [[R:%.*]] = tail call <16 x i32> @llvm.x86.avx512.pternlog.d.512(<16 x i32> , <16 x i32> , <16 x i32> , i32 96) -; CHECK-NEXT: ret <16 x i32> [[R]] +; CHECK-NEXT: ret <16 x i32> ; %r = tail call <16 x i32> @llvm.x86.avx512.pternlog.d.512(<16 x i32> , <16 x i32> , <16 x i32> , i32 96) ret <16 x i32> %r @@ -3187,8 +3085,7 @@ define <2 x i64> @vpternlog_q_constv128_imm97() { ; CHECK-LABEL: @vpternlog_q_constv128_imm97( -; CHECK-NEXT: [[R:%.*]] = tail call <2 x i64> @llvm.x86.avx512.pternlog.q.128(<2 x i64> , <2 x i64> , <2 x i64> , i32 97) -; CHECK-NEXT: ret <2 x i64> [[R]] +; CHECK-NEXT: ret <2 x i64> ; %r = tail call <2 x i64> @llvm.x86.avx512.pternlog.q.128(<2 x i64> , <2 x i64> , <2 x i64> , i32 97) ret <2 x i64> %r @@ -3196,8 +3093,7 @@ define <8 x i32> @vpternlog_d_constv256_imm98() { ; CHECK-LABEL: @vpternlog_d_constv256_imm98( -; CHECK-NEXT: [[R:%.*]] = tail call <8 x i32> @llvm.x86.avx512.pternlog.d.256(<8 x i32> , <8 x i32> , <8 x i32> , i32 98) -; CHECK-NEXT: ret <8 x i32> [[R]] +; CHECK-NEXT: ret <8 x i32> ; %r = tail call <8 x i32> @llvm.x86.avx512.pternlog.d.256(<8 x i32> , <8 x i32> , <8 x i32> , i32 98) ret <8 x i32> %r @@ -3205,8 +3101,7 @@ define <8 x i64> @vpternlog_q_constv512_imm99() { ; CHECK-LABEL: @vpternlog_q_constv512_imm99( -; CHECK-NEXT: [[R:%.*]] = tail call <8 x i64> @llvm.x86.avx512.pternlog.q.512(<8 x i64> , <8 x i64> , <8 x i64> , i32 99) -; CHECK-NEXT: ret <8 x i64> [[R]] +; CHECK-NEXT: ret <8 x i64> ; %r = tail call <8 x i64> @llvm.x86.avx512.pternlog.q.512(<8 x i64> , <8 x i64> , <8 x i64> , i32 99) ret <8 x i64> %r @@ -3214,8 +3109,7 @@ define <4 x i32> @vpternlog_d_constv128_imm100() { ; CHECK-LABEL: @vpternlog_d_constv128_imm100( -; CHECK-NEXT: [[R:%.*]] = tail call <4 x i32> @llvm.x86.avx512.pternlog.d.128(<4 x i32> , <4 x i32> , <4 x i32> , i32 100) -; CHECK-NEXT: ret <4 x i32> [[R]] +; CHECK-NEXT: ret <4 x i32> ; %r = tail call <4 x i32> @llvm.x86.avx512.pternlog.d.128(<4 x i32> , <4 x i32> , <4 x i32> , i32 100) ret <4 x i32> %r @@ -3223,8 +3117,7 @@ define <4 x i64> @vpternlog_q_constv256_imm101() { ; CHECK-LABEL: @vpternlog_q_constv256_imm101( -; CHECK-NEXT: [[R:%.*]] = tail call <4 x i64> @llvm.x86.avx512.pternlog.q.256(<4 x i64> , <4 x i64> , <4 x i64> , i32 101) -; CHECK-NEXT: ret <4 x i64> [[R]] +; CHECK-NEXT: ret <4 x i64> ; %r = tail call <4 x i64> @llvm.x86.avx512.pternlog.q.256(<4 x i64> , <4 x i64> , <4 x i64> , i32 101) ret <4 x i64> %r @@ -3232,8 +3125,7 @@ define <16 x i32> @vpternlog_d_constv512_imm102() { ; CHECK-LABEL: @vpternlog_d_constv512_imm102( -; CHECK-NEXT: [[R:%.*]] = tail call <16 x i32> @llvm.x86.avx512.pternlog.d.512(<16 x i32> , <16 x i32> , <16 x i32> , i32 102) -; CHECK-NEXT: ret <16 x i32> [[R]] +; CHECK-NEXT: ret <16 x i32> ; %r = tail call <16 x i32> @llvm.x86.avx512.pternlog.d.512(<16 x i32> , <16 x i32> , <16 x i32> , i32 102) ret <16 x i32> %r @@ -3241,8 +3133,7 @@ define <2 x i64> @vpternlog_q_constv128_imm103() { ; CHECK-LABEL: @vpternlog_q_constv128_imm103( -; CHECK-NEXT: [[R:%.*]] = tail call <2 x i64> @llvm.x86.avx512.pternlog.q.128(<2 x i64> , <2 x i64> , <2 x i64> , i32 103) -; CHECK-NEXT: ret <2 x i64> [[R]] +; CHECK-NEXT: ret <2 x i64> ; %r = tail call <2 x i64> @llvm.x86.avx512.pternlog.q.128(<2 x i64> , <2 x i64> , <2 x i64> , i32 103) ret <2 x i64> %r @@ -3250,8 +3141,7 @@ define <8 x i32> @vpternlog_d_constv256_imm104() { ; CHECK-LABEL: @vpternlog_d_constv256_imm104( -; CHECK-NEXT: [[R:%.*]] = tail call <8 x i32> @llvm.x86.avx512.pternlog.d.256(<8 x i32> , <8 x i32> , <8 x i32> , i32 104) -; CHECK-NEXT: ret <8 x i32> [[R]] +; CHECK-NEXT: ret <8 x i32> ; %r = tail call <8 x i32> @llvm.x86.avx512.pternlog.d.256(<8 x i32> , <8 x i32> , <8 x i32> , i32 104) ret <8 x i32> %r @@ -3259,8 +3149,7 @@ define <8 x i64> @vpternlog_q_constv512_imm105() { ; CHECK-LABEL: @vpternlog_q_constv512_imm105( -; CHECK-NEXT: [[R:%.*]] = tail call <8 x i64> @llvm.x86.avx512.pternlog.q.512(<8 x i64> , <8 x i64> , <8 x i64> , i32 105) -; CHECK-NEXT: ret <8 x i64> [[R]] +; CHECK-NEXT: ret <8 x i64> ; %r = tail call <8 x i64> @llvm.x86.avx512.pternlog.q.512(<8 x i64> , <8 x i64> , <8 x i64> , i32 105) ret <8 x i64> %r @@ -3268,8 +3157,7 @@ define <4 x i32> @vpternlog_d_constv128_imm106() { ; CHECK-LABEL: @vpternlog_d_constv128_imm106( -; CHECK-NEXT: [[R:%.*]] = tail call <4 x i32> @llvm.x86.avx512.pternlog.d.128(<4 x i32> , <4 x i32> , <4 x i32> , i32 106) -; CHECK-NEXT: ret <4 x i32> [[R]] +; CHECK-NEXT: ret <4 x i32> ; %r = tail call <4 x i32> @llvm.x86.avx512.pternlog.d.128(<4 x i32> , <4 x i32> , <4 x i32> , i32 106) ret <4 x i32> %r @@ -3277,8 +3165,7 @@ define <4 x i64> @vpternlog_q_constv256_imm107() { ; CHECK-LABEL: @vpternlog_q_constv256_imm107( -; CHECK-NEXT: [[R:%.*]] = tail call <4 x i64> @llvm.x86.avx512.pternlog.q.256(<4 x i64> , <4 x i64> , <4 x i64> , i32 107) -; CHECK-NEXT: ret <4 x i64> [[R]] +; CHECK-NEXT: ret <4 x i64> ; %r = tail call <4 x i64> @llvm.x86.avx512.pternlog.q.256(<4 x i64> , <4 x i64> , <4 x i64> , i32 107) ret <4 x i64> %r @@ -3286,8 +3173,7 @@ define <16 x i32> @vpternlog_d_constv512_imm108() { ; CHECK-LABEL: @vpternlog_d_constv512_imm108( -; CHECK-NEXT: [[R:%.*]] = tail call <16 x i32> @llvm.x86.avx512.pternlog.d.512(<16 x i32> , <16 x i32> , <16 x i32> , i32 108) -; CHECK-NEXT: ret <16 x i32> [[R]] +; CHECK-NEXT: ret <16 x i32> ; %r = tail call <16 x i32> @llvm.x86.avx512.pternlog.d.512(<16 x i32> , <16 x i32> , <16 x i32> , i32 108) ret <16 x i32> %r @@ -3295,8 +3181,7 @@ define <2 x i64> @vpternlog_q_constv128_imm109() { ; CHECK-LABEL: @vpternlog_q_constv128_imm109( -; CHECK-NEXT: [[R:%.*]] = tail call <2 x i64> @llvm.x86.avx512.pternlog.q.128(<2 x i64> , <2 x i64> , <2 x i64> , i32 109) -; CHECK-NEXT: ret <2 x i64> [[R]] +; CHECK-NEXT: ret <2 x i64> ; %r = tail call <2 x i64> @llvm.x86.avx512.pternlog.q.128(<2 x i64> , <2 x i64> , <2 x i64> , i32 109) ret <2 x i64> %r @@ -3304,8 +3189,7 @@ define <8 x i32> @vpternlog_d_constv256_imm110() { ; CHECK-LABEL: @vpternlog_d_constv256_imm110( -; CHECK-NEXT: [[R:%.*]] = tail call <8 x i32> @llvm.x86.avx512.pternlog.d.256(<8 x i32> , <8 x i32> , <8 x i32> , i32 110) -; CHECK-NEXT: ret <8 x i32> [[R]] +; CHECK-NEXT: ret <8 x i32> ; %r = tail call <8 x i32> @llvm.x86.avx512.pternlog.d.256(<8 x i32> , <8 x i32> , <8 x i32> , i32 110) ret <8 x i32> %r @@ -3313,8 +3197,7 @@ define <8 x i64> @vpternlog_q_constv512_imm111() { ; CHECK-LABEL: @vpternlog_q_constv512_imm111( -; CHECK-NEXT: [[R:%.*]] = tail call <8 x i64> @llvm.x86.avx512.pternlog.q.512(<8 x i64> , <8 x i64> , <8 x i64> , i32 111) -; CHECK-NEXT: ret <8 x i64> [[R]] +; CHECK-NEXT: ret <8 x i64> ; %r = tail call <8 x i64> @llvm.x86.avx512.pternlog.q.512(<8 x i64> , <8 x i64> , <8 x i64> , i32 111) ret <8 x i64> %r @@ -3322,8 +3205,7 @@ define <4 x i32> @vpternlog_d_constv128_imm112() { ; CHECK-LABEL: @vpternlog_d_constv128_imm112( -; CHECK-NEXT: [[R:%.*]] = tail call <4 x i32> @llvm.x86.avx512.pternlog.d.128(<4 x i32> , <4 x i32> , <4 x i32> , i32 112) -; CHECK-NEXT: ret <4 x i32> [[R]] +; CHECK-NEXT: ret <4 x i32> ; %r = tail call <4 x i32> @llvm.x86.avx512.pternlog.d.128(<4 x i32> , <4 x i32> , <4 x i32> , i32 112) ret <4 x i32> %r @@ -3331,8 +3213,7 @@ define <4 x i64> @vpternlog_q_constv256_imm113() { ; CHECK-LABEL: @vpternlog_q_constv256_imm113( -; CHECK-NEXT: [[R:%.*]] = tail call <4 x i64> @llvm.x86.avx512.pternlog.q.256(<4 x i64> , <4 x i64> , <4 x i64> , i32 113) -; CHECK-NEXT: ret <4 x i64> [[R]] +; CHECK-NEXT: ret <4 x i64> ; %r = tail call <4 x i64> @llvm.x86.avx512.pternlog.q.256(<4 x i64> , <4 x i64> , <4 x i64> , i32 113) ret <4 x i64> %r @@ -3340,8 +3221,7 @@ define <16 x i32> @vpternlog_d_constv512_imm114() { ; CHECK-LABEL: @vpternlog_d_constv512_imm114( -; CHECK-NEXT: [[R:%.*]] = tail call <16 x i32> @llvm.x86.avx512.pternlog.d.512(<16 x i32> , <16 x i32> , <16 x i32> , i32 114) -; CHECK-NEXT: ret <16 x i32> [[R]] +; CHECK-NEXT: ret <16 x i32> ; %r = tail call <16 x i32> @llvm.x86.avx512.pternlog.d.512(<16 x i32> , <16 x i32> , <16 x i32> , i32 114) ret <16 x i32> %r @@ -3349,8 +3229,7 @@ define <2 x i64> @vpternlog_q_constv128_imm115() { ; CHECK-LABEL: @vpternlog_q_constv128_imm115( -; CHECK-NEXT: [[R:%.*]] = tail call <2 x i64> @llvm.x86.avx512.pternlog.q.128(<2 x i64> , <2 x i64> , <2 x i64> , i32 115) -; CHECK-NEXT: ret <2 x i64> [[R]] +; CHECK-NEXT: ret <2 x i64> ; %r = tail call <2 x i64> @llvm.x86.avx512.pternlog.q.128(<2 x i64> , <2 x i64> , <2 x i64> , i32 115) ret <2 x i64> %r @@ -3358,8 +3237,7 @@ define <8 x i32> @vpternlog_d_constv256_imm116() { ; CHECK-LABEL: @vpternlog_d_constv256_imm116( -; CHECK-NEXT: [[R:%.*]] = tail call <8 x i32> @llvm.x86.avx512.pternlog.d.256(<8 x i32> , <8 x i32> , <8 x i32> , i32 116) -; CHECK-NEXT: ret <8 x i32> [[R]] +; CHECK-NEXT: ret <8 x i32> ; %r = tail call <8 x i32> @llvm.x86.avx512.pternlog.d.256(<8 x i32> , <8 x i32> , <8 x i32> , i32 116) ret <8 x i32> %r @@ -3367,8 +3245,7 @@ define <8 x i64> @vpternlog_q_constv512_imm117() { ; CHECK-LABEL: @vpternlog_q_constv512_imm117( -; CHECK-NEXT: [[R:%.*]] = tail call <8 x i64> @llvm.x86.avx512.pternlog.q.512(<8 x i64> , <8 x i64> , <8 x i64> , i32 117) -; CHECK-NEXT: ret <8 x i64> [[R]] +; CHECK-NEXT: ret <8 x i64> ; %r = tail call <8 x i64> @llvm.x86.avx512.pternlog.q.512(<8 x i64> , <8 x i64> , <8 x i64> , i32 117) ret <8 x i64> %r @@ -3376,8 +3253,7 @@ define <4 x i32> @vpternlog_d_constv128_imm118() { ; CHECK-LABEL: @vpternlog_d_constv128_imm118( -; CHECK-NEXT: [[R:%.*]] = tail call <4 x i32> @llvm.x86.avx512.pternlog.d.128(<4 x i32> , <4 x i32> , <4 x i32> , i32 118) -; CHECK-NEXT: ret <4 x i32> [[R]] +; CHECK-NEXT: ret <4 x i32> ; %r = tail call <4 x i32> @llvm.x86.avx512.pternlog.d.128(<4 x i32> , <4 x i32> , <4 x i32> , i32 118) ret <4 x i32> %r @@ -3385,8 +3261,7 @@ define <4 x i64> @vpternlog_q_constv256_imm119() { ; CHECK-LABEL: @vpternlog_q_constv256_imm119( -; CHECK-NEXT: [[R:%.*]] = tail call <4 x i64> @llvm.x86.avx512.pternlog.q.256(<4 x i64> , <4 x i64> , <4 x i64> , i32 119) -; CHECK-NEXT: ret <4 x i64> [[R]] +; CHECK-NEXT: ret <4 x i64> ; %r = tail call <4 x i64> @llvm.x86.avx512.pternlog.q.256(<4 x i64> , <4 x i64> , <4 x i64> , i32 119) ret <4 x i64> %r @@ -3394,8 +3269,7 @@ define <16 x i32> @vpternlog_d_constv512_imm120() { ; CHECK-LABEL: @vpternlog_d_constv512_imm120( -; CHECK-NEXT: [[R:%.*]] = tail call <16 x i32> @llvm.x86.avx512.pternlog.d.512(<16 x i32> , <16 x i32> , <16 x i32> , i32 120) -; CHECK-NEXT: ret <16 x i32> [[R]] +; CHECK-NEXT: ret <16 x i32> ; %r = tail call <16 x i32> @llvm.x86.avx512.pternlog.d.512(<16 x i32> , <16 x i32> , <16 x i32> , i32 120) ret <16 x i32> %r @@ -3403,8 +3277,7 @@ define <2 x i64> @vpternlog_q_constv128_imm121() { ; CHECK-LABEL: @vpternlog_q_constv128_imm121( -; CHECK-NEXT: [[R:%.*]] = tail call <2 x i64> @llvm.x86.avx512.pternlog.q.128(<2 x i64> , <2 x i64> , <2 x i64> , i32 121) -; CHECK-NEXT: ret <2 x i64> [[R]] +; CHECK-NEXT: ret <2 x i64> ; %r = tail call <2 x i64> @llvm.x86.avx512.pternlog.q.128(<2 x i64> , <2 x i64> , <2 x i64> , i32 121) ret <2 x i64> %r @@ -3412,8 +3285,7 @@ define <8 x i32> @vpternlog_d_constv256_imm122() { ; CHECK-LABEL: @vpternlog_d_constv256_imm122( -; CHECK-NEXT: [[R:%.*]] = tail call <8 x i32> @llvm.x86.avx512.pternlog.d.256(<8 x i32> , <8 x i32> , <8 x i32> , i32 122) -; CHECK-NEXT: ret <8 x i32> [[R]] +; CHECK-NEXT: ret <8 x i32> ; %r = tail call <8 x i32> @llvm.x86.avx512.pternlog.d.256(<8 x i32> , <8 x i32> , <8 x i32> , i32 122) ret <8 x i32> %r @@ -3421,8 +3293,7 @@ define <8 x i64> @vpternlog_q_constv512_imm123() { ; CHECK-LABEL: @vpternlog_q_constv512_imm123( -; CHECK-NEXT: [[R:%.*]] = tail call <8 x i64> @llvm.x86.avx512.pternlog.q.512(<8 x i64> , <8 x i64> , <8 x i64> , i32 123) -; CHECK-NEXT: ret <8 x i64> [[R]] +; CHECK-NEXT: ret <8 x i64> ; %r = tail call <8 x i64> @llvm.x86.avx512.pternlog.q.512(<8 x i64> , <8 x i64> , <8 x i64> , i32 123) ret <8 x i64> %r @@ -3430,8 +3301,7 @@ define <4 x i32> @vpternlog_d_constv128_imm124() { ; CHECK-LABEL: @vpternlog_d_constv128_imm124( -; CHECK-NEXT: [[R:%.*]] = tail call <4 x i32> @llvm.x86.avx512.pternlog.d.128(<4 x i32> , <4 x i32> , <4 x i32> , i32 124) -; CHECK-NEXT: ret <4 x i32> [[R]] +; CHECK-NEXT: ret <4 x i32> ; %r = tail call <4 x i32> @llvm.x86.avx512.pternlog.d.128(<4 x i32> , <4 x i32> , <4 x i32> , i32 124) ret <4 x i32> %r @@ -3439,8 +3309,7 @@ define <4 x i64> @vpternlog_q_constv256_imm125() { ; CHECK-LABEL: @vpternlog_q_constv256_imm125( -; CHECK-NEXT: [[R:%.*]] = tail call <4 x i64> @llvm.x86.avx512.pternlog.q.256(<4 x i64> , <4 x i64> , <4 x i64> , i32 125) -; CHECK-NEXT: ret <4 x i64> [[R]] +; CHECK-NEXT: ret <4 x i64> ; %r = tail call <4 x i64> @llvm.x86.avx512.pternlog.q.256(<4 x i64> , <4 x i64> , <4 x i64> , i32 125) ret <4 x i64> %r @@ -3448,8 +3317,7 @@ define <16 x i32> @vpternlog_d_constv512_imm126() { ; CHECK-LABEL: @vpternlog_d_constv512_imm126( -; CHECK-NEXT: [[R:%.*]] = tail call <16 x i32> @llvm.x86.avx512.pternlog.d.512(<16 x i32> , <16 x i32> , <16 x i32> , i32 126) -; CHECK-NEXT: ret <16 x i32> [[R]] +; CHECK-NEXT: ret <16 x i32> ; %r = tail call <16 x i32> @llvm.x86.avx512.pternlog.d.512(<16 x i32> , <16 x i32> , <16 x i32> , i32 126) ret <16 x i32> %r @@ -3457,8 +3325,7 @@ define <2 x i64> @vpternlog_q_constv128_imm127() { ; CHECK-LABEL: @vpternlog_q_constv128_imm127( -; CHECK-NEXT: [[R:%.*]] = tail call <2 x i64> @llvm.x86.avx512.pternlog.q.128(<2 x i64> , <2 x i64> , <2 x i64> , i32 127) -; CHECK-NEXT: ret <2 x i64> [[R]] +; CHECK-NEXT: ret <2 x i64> ; %r = tail call <2 x i64> @llvm.x86.avx512.pternlog.q.128(<2 x i64> , <2 x i64> , <2 x i64> , i32 127) ret <2 x i64> %r @@ -3466,8 +3333,7 @@ define <8 x i32> @vpternlog_d_constv256_imm128() { ; CHECK-LABEL: @vpternlog_d_constv256_imm128( -; CHECK-NEXT: [[R:%.*]] = tail call <8 x i32> @llvm.x86.avx512.pternlog.d.256(<8 x i32> , <8 x i32> , <8 x i32> , i32 128) -; CHECK-NEXT: ret <8 x i32> [[R]] +; CHECK-NEXT: ret <8 x i32> ; %r = tail call <8 x i32> @llvm.x86.avx512.pternlog.d.256(<8 x i32> , <8 x i32> , <8 x i32> , i32 128) ret <8 x i32> %r @@ -3475,8 +3341,7 @@ define <8 x i64> @vpternlog_q_constv512_imm129() { ; CHECK-LABEL: @vpternlog_q_constv512_imm129( -; CHECK-NEXT: [[R:%.*]] = tail call <8 x i64> @llvm.x86.avx512.pternlog.q.512(<8 x i64> , <8 x i64> , <8 x i64> , i32 129) -; CHECK-NEXT: ret <8 x i64> [[R]] +; CHECK-NEXT: ret <8 x i64> ; %r = tail call <8 x i64> @llvm.x86.avx512.pternlog.q.512(<8 x i64> , <8 x i64> , <8 x i64> , i32 129) ret <8 x i64> %r @@ -3484,8 +3349,7 @@ define <4 x i32> @vpternlog_d_constv128_imm130() { ; CHECK-LABEL: @vpternlog_d_constv128_imm130( -; CHECK-NEXT: [[R:%.*]] = tail call <4 x i32> @llvm.x86.avx512.pternlog.d.128(<4 x i32> , <4 x i32> , <4 x i32> , i32 130) -; CHECK-NEXT: ret <4 x i32> [[R]] +; CHECK-NEXT: ret <4 x i32> ; %r = tail call <4 x i32> @llvm.x86.avx512.pternlog.d.128(<4 x i32> , <4 x i32> , <4 x i32> , i32 130) ret <4 x i32> %r @@ -3493,8 +3357,7 @@ define <4 x i64> @vpternlog_q_constv256_imm131() { ; CHECK-LABEL: @vpternlog_q_constv256_imm131( -; CHECK-NEXT: [[R:%.*]] = tail call <4 x i64> @llvm.x86.avx512.pternlog.q.256(<4 x i64> , <4 x i64> , <4 x i64> , i32 131) -; CHECK-NEXT: ret <4 x i64> [[R]] +; CHECK-NEXT: ret <4 x i64> ; %r = tail call <4 x i64> @llvm.x86.avx512.pternlog.q.256(<4 x i64> , <4 x i64> , <4 x i64> , i32 131) ret <4 x i64> %r @@ -3502,8 +3365,7 @@ define <16 x i32> @vpternlog_d_constv512_imm132() { ; CHECK-LABEL: @vpternlog_d_constv512_imm132( -; CHECK-NEXT: [[R:%.*]] = tail call <16 x i32> @llvm.x86.avx512.pternlog.d.512(<16 x i32> , <16 x i32> , <16 x i32> , i32 132) -; CHECK-NEXT: ret <16 x i32> [[R]] +; CHECK-NEXT: ret <16 x i32> ; %r = tail call <16 x i32> @llvm.x86.avx512.pternlog.d.512(<16 x i32> , <16 x i32> , <16 x i32> , i32 132) ret <16 x i32> %r @@ -3511,8 +3373,7 @@ define <2 x i64> @vpternlog_q_constv128_imm133() { ; CHECK-LABEL: @vpternlog_q_constv128_imm133( -; CHECK-NEXT: [[R:%.*]] = tail call <2 x i64> @llvm.x86.avx512.pternlog.q.128(<2 x i64> , <2 x i64> , <2 x i64> , i32 133) -; CHECK-NEXT: ret <2 x i64> [[R]] +; CHECK-NEXT: ret <2 x i64> ; %r = tail call <2 x i64> @llvm.x86.avx512.pternlog.q.128(<2 x i64> , <2 x i64> , <2 x i64> , i32 133) ret <2 x i64> %r @@ -3520,8 +3381,7 @@ define <8 x i32> @vpternlog_d_constv256_imm134() { ; CHECK-LABEL: @vpternlog_d_constv256_imm134( -; CHECK-NEXT: [[R:%.*]] = tail call <8 x i32> @llvm.x86.avx512.pternlog.d.256(<8 x i32> , <8 x i32> , <8 x i32> , i32 134) -; CHECK-NEXT: ret <8 x i32> [[R]] +; CHECK-NEXT: ret <8 x i32> ; %r = tail call <8 x i32> @llvm.x86.avx512.pternlog.d.256(<8 x i32> , <8 x i32> , <8 x i32> , i32 134) ret <8 x i32> %r @@ -3529,8 +3389,7 @@ define <8 x i64> @vpternlog_q_constv512_imm135() { ; CHECK-LABEL: @vpternlog_q_constv512_imm135( -; CHECK-NEXT: [[R:%.*]] = tail call <8 x i64> @llvm.x86.avx512.pternlog.q.512(<8 x i64> , <8 x i64> , <8 x i64> , i32 135) -; CHECK-NEXT: ret <8 x i64> [[R]] +; CHECK-NEXT: ret <8 x i64> ; %r = tail call <8 x i64> @llvm.x86.avx512.pternlog.q.512(<8 x i64> , <8 x i64> , <8 x i64> , i32 135) ret <8 x i64> %r @@ -3538,8 +3397,7 @@ define <4 x i32> @vpternlog_d_constv128_imm136() { ; CHECK-LABEL: @vpternlog_d_constv128_imm136( -; CHECK-NEXT: [[R:%.*]] = tail call <4 x i32> @llvm.x86.avx512.pternlog.d.128(<4 x i32> , <4 x i32> , <4 x i32> , i32 136) -; CHECK-NEXT: ret <4 x i32> [[R]] +; CHECK-NEXT: ret <4 x i32> ; %r = tail call <4 x i32> @llvm.x86.avx512.pternlog.d.128(<4 x i32> , <4 x i32> , <4 x i32> , i32 136) ret <4 x i32> %r @@ -3547,8 +3405,7 @@ define <4 x i64> @vpternlog_q_constv256_imm137() { ; CHECK-LABEL: @vpternlog_q_constv256_imm137( -; CHECK-NEXT: [[R:%.*]] = tail call <4 x i64> @llvm.x86.avx512.pternlog.q.256(<4 x i64> , <4 x i64> , <4 x i64> , i32 137) -; CHECK-NEXT: ret <4 x i64> [[R]] +; CHECK-NEXT: ret <4 x i64> ; %r = tail call <4 x i64> @llvm.x86.avx512.pternlog.q.256(<4 x i64> , <4 x i64> , <4 x i64> , i32 137) ret <4 x i64> %r @@ -3556,8 +3413,7 @@ define <16 x i32> @vpternlog_d_constv512_imm138() { ; CHECK-LABEL: @vpternlog_d_constv512_imm138( -; CHECK-NEXT: [[R:%.*]] = tail call <16 x i32> @llvm.x86.avx512.pternlog.d.512(<16 x i32> , <16 x i32> , <16 x i32> , i32 138) -; CHECK-NEXT: ret <16 x i32> [[R]] +; CHECK-NEXT: ret <16 x i32> ; %r = tail call <16 x i32> @llvm.x86.avx512.pternlog.d.512(<16 x i32> , <16 x i32> , <16 x i32> , i32 138) ret <16 x i32> %r @@ -3565,8 +3421,7 @@ define <2 x i64> @vpternlog_q_constv128_imm139() { ; CHECK-LABEL: @vpternlog_q_constv128_imm139( -; CHECK-NEXT: [[R:%.*]] = tail call <2 x i64> @llvm.x86.avx512.pternlog.q.128(<2 x i64> , <2 x i64> , <2 x i64> , i32 139) -; CHECK-NEXT: ret <2 x i64> [[R]] +; CHECK-NEXT: ret <2 x i64> ; %r = tail call <2 x i64> @llvm.x86.avx512.pternlog.q.128(<2 x i64> , <2 x i64> , <2 x i64> , i32 139) ret <2 x i64> %r @@ -3574,8 +3429,7 @@ define <8 x i32> @vpternlog_d_constv256_imm140() { ; CHECK-LABEL: @vpternlog_d_constv256_imm140( -; CHECK-NEXT: [[R:%.*]] = tail call <8 x i32> @llvm.x86.avx512.pternlog.d.256(<8 x i32> , <8 x i32> , <8 x i32> , i32 140) -; CHECK-NEXT: ret <8 x i32> [[R]] +; CHECK-NEXT: ret <8 x i32> ; %r = tail call <8 x i32> @llvm.x86.avx512.pternlog.d.256(<8 x i32> , <8 x i32> , <8 x i32> , i32 140) ret <8 x i32> %r @@ -3583,8 +3437,7 @@ define <8 x i64> @vpternlog_q_constv512_imm141() { ; CHECK-LABEL: @vpternlog_q_constv512_imm141( -; CHECK-NEXT: [[R:%.*]] = tail call <8 x i64> @llvm.x86.avx512.pternlog.q.512(<8 x i64> , <8 x i64> , <8 x i64> , i32 141) -; CHECK-NEXT: ret <8 x i64> [[R]] +; CHECK-NEXT: ret <8 x i64> ; %r = tail call <8 x i64> @llvm.x86.avx512.pternlog.q.512(<8 x i64> , <8 x i64> , <8 x i64> , i32 141) ret <8 x i64> %r @@ -3592,8 +3445,7 @@ define <4 x i32> @vpternlog_d_constv128_imm142() { ; CHECK-LABEL: @vpternlog_d_constv128_imm142( -; CHECK-NEXT: [[R:%.*]] = tail call <4 x i32> @llvm.x86.avx512.pternlog.d.128(<4 x i32> , <4 x i32> , <4 x i32> , i32 142) -; CHECK-NEXT: ret <4 x i32> [[R]] +; CHECK-NEXT: ret <4 x i32> ; %r = tail call <4 x i32> @llvm.x86.avx512.pternlog.d.128(<4 x i32> , <4 x i32> , <4 x i32> , i32 142) ret <4 x i32> %r @@ -3601,8 +3453,7 @@ define <4 x i64> @vpternlog_q_constv256_imm143() { ; CHECK-LABEL: @vpternlog_q_constv256_imm143( -; CHECK-NEXT: [[R:%.*]] = tail call <4 x i64> @llvm.x86.avx512.pternlog.q.256(<4 x i64> , <4 x i64> , <4 x i64> , i32 143) -; CHECK-NEXT: ret <4 x i64> [[R]] +; CHECK-NEXT: ret <4 x i64> ; %r = tail call <4 x i64> @llvm.x86.avx512.pternlog.q.256(<4 x i64> , <4 x i64> , <4 x i64> , i32 143) ret <4 x i64> %r @@ -3610,8 +3461,7 @@ define <16 x i32> @vpternlog_d_constv512_imm144() { ; CHECK-LABEL: @vpternlog_d_constv512_imm144( -; CHECK-NEXT: [[R:%.*]] = tail call <16 x i32> @llvm.x86.avx512.pternlog.d.512(<16 x i32> , <16 x i32> , <16 x i32> , i32 144) -; CHECK-NEXT: ret <16 x i32> [[R]] +; CHECK-NEXT: ret <16 x i32> ; %r = tail call <16 x i32> @llvm.x86.avx512.pternlog.d.512(<16 x i32> , <16 x i32> , <16 x i32> , i32 144) ret <16 x i32> %r @@ -3619,8 +3469,7 @@ define <2 x i64> @vpternlog_q_constv128_imm145() { ; CHECK-LABEL: @vpternlog_q_constv128_imm145( -; CHECK-NEXT: [[R:%.*]] = tail call <2 x i64> @llvm.x86.avx512.pternlog.q.128(<2 x i64> , <2 x i64> , <2 x i64> , i32 145) -; CHECK-NEXT: ret <2 x i64> [[R]] +; CHECK-NEXT: ret <2 x i64> ; %r = tail call <2 x i64> @llvm.x86.avx512.pternlog.q.128(<2 x i64> , <2 x i64> , <2 x i64> , i32 145) ret <2 x i64> %r @@ -3628,8 +3477,7 @@ define <8 x i32> @vpternlog_d_constv256_imm146() { ; CHECK-LABEL: @vpternlog_d_constv256_imm146( -; CHECK-NEXT: [[R:%.*]] = tail call <8 x i32> @llvm.x86.avx512.pternlog.d.256(<8 x i32> , <8 x i32> , <8 x i32> , i32 146) -; CHECK-NEXT: ret <8 x i32> [[R]] +; CHECK-NEXT: ret <8 x i32> ; %r = tail call <8 x i32> @llvm.x86.avx512.pternlog.d.256(<8 x i32> , <8 x i32> , <8 x i32> , i32 146) ret <8 x i32> %r @@ -3637,8 +3485,7 @@ define <8 x i64> @vpternlog_q_constv512_imm147() { ; CHECK-LABEL: @vpternlog_q_constv512_imm147( -; CHECK-NEXT: [[R:%.*]] = tail call <8 x i64> @llvm.x86.avx512.pternlog.q.512(<8 x i64> , <8 x i64> , <8 x i64> , i32 147) -; CHECK-NEXT: ret <8 x i64> [[R]] +; CHECK-NEXT: ret <8 x i64> ; %r = tail call <8 x i64> @llvm.x86.avx512.pternlog.q.512(<8 x i64> , <8 x i64> , <8 x i64> , i32 147) ret <8 x i64> %r @@ -3646,8 +3493,7 @@ define <4 x i32> @vpternlog_d_constv128_imm148() { ; CHECK-LABEL: @vpternlog_d_constv128_imm148( -; CHECK-NEXT: [[R:%.*]] = tail call <4 x i32> @llvm.x86.avx512.pternlog.d.128(<4 x i32> , <4 x i32> , <4 x i32> , i32 148) -; CHECK-NEXT: ret <4 x i32> [[R]] +; CHECK-NEXT: ret <4 x i32> ; %r = tail call <4 x i32> @llvm.x86.avx512.pternlog.d.128(<4 x i32> , <4 x i32> , <4 x i32> , i32 148) ret <4 x i32> %r @@ -3655,8 +3501,7 @@ define <4 x i64> @vpternlog_q_constv256_imm149() { ; CHECK-LABEL: @vpternlog_q_constv256_imm149( -; CHECK-NEXT: [[R:%.*]] = tail call <4 x i64> @llvm.x86.avx512.pternlog.q.256(<4 x i64> , <4 x i64> , <4 x i64> , i32 149) -; CHECK-NEXT: ret <4 x i64> [[R]] +; CHECK-NEXT: ret <4 x i64> ; %r = tail call <4 x i64> @llvm.x86.avx512.pternlog.q.256(<4 x i64> , <4 x i64> , <4 x i64> , i32 149) ret <4 x i64> %r @@ -3664,8 +3509,7 @@ define <16 x i32> @vpternlog_d_constv512_imm150() { ; CHECK-LABEL: @vpternlog_d_constv512_imm150( -; CHECK-NEXT: [[R:%.*]] = tail call <16 x i32> @llvm.x86.avx512.pternlog.d.512(<16 x i32> , <16 x i32> , <16 x i32> , i32 150) -; CHECK-NEXT: ret <16 x i32> [[R]] +; CHECK-NEXT: ret <16 x i32> ; %r = tail call <16 x i32> @llvm.x86.avx512.pternlog.d.512(<16 x i32> , <16 x i32> , <16 x i32> , i32 150) ret <16 x i32> %r @@ -3673,8 +3517,7 @@ define <2 x i64> @vpternlog_q_constv128_imm151() { ; CHECK-LABEL: @vpternlog_q_constv128_imm151( -; CHECK-NEXT: [[R:%.*]] = tail call <2 x i64> @llvm.x86.avx512.pternlog.q.128(<2 x i64> , <2 x i64> , <2 x i64> , i32 151) -; CHECK-NEXT: ret <2 x i64> [[R]] +; CHECK-NEXT: ret <2 x i64> ; %r = tail call <2 x i64> @llvm.x86.avx512.pternlog.q.128(<2 x i64> , <2 x i64> , <2 x i64> , i32 151) ret <2 x i64> %r @@ -3682,8 +3525,7 @@ define <8 x i32> @vpternlog_d_constv256_imm152() { ; CHECK-LABEL: @vpternlog_d_constv256_imm152( -; CHECK-NEXT: [[R:%.*]] = tail call <8 x i32> @llvm.x86.avx512.pternlog.d.256(<8 x i32> , <8 x i32> , <8 x i32> , i32 152) -; CHECK-NEXT: ret <8 x i32> [[R]] +; CHECK-NEXT: ret <8 x i32> ; %r = tail call <8 x i32> @llvm.x86.avx512.pternlog.d.256(<8 x i32> , <8 x i32> , <8 x i32> , i32 152) ret <8 x i32> %r @@ -3691,8 +3533,7 @@ define <8 x i64> @vpternlog_q_constv512_imm153() { ; CHECK-LABEL: @vpternlog_q_constv512_imm153( -; CHECK-NEXT: [[R:%.*]] = tail call <8 x i64> @llvm.x86.avx512.pternlog.q.512(<8 x i64> , <8 x i64> , <8 x i64> , i32 153) -; CHECK-NEXT: ret <8 x i64> [[R]] +; CHECK-NEXT: ret <8 x i64> ; %r = tail call <8 x i64> @llvm.x86.avx512.pternlog.q.512(<8 x i64> , <8 x i64> , <8 x i64> , i32 153) ret <8 x i64> %r @@ -3700,8 +3541,7 @@ define <4 x i32> @vpternlog_d_constv128_imm154() { ; CHECK-LABEL: @vpternlog_d_constv128_imm154( -; CHECK-NEXT: [[R:%.*]] = tail call <4 x i32> @llvm.x86.avx512.pternlog.d.128(<4 x i32> , <4 x i32> , <4 x i32> , i32 154) -; CHECK-NEXT: ret <4 x i32> [[R]] +; CHECK-NEXT: ret <4 x i32> ; %r = tail call <4 x i32> @llvm.x86.avx512.pternlog.d.128(<4 x i32> , <4 x i32> , <4 x i32> , i32 154) ret <4 x i32> %r @@ -3709,8 +3549,7 @@ define <4 x i64> @vpternlog_q_constv256_imm155() { ; CHECK-LABEL: @vpternlog_q_constv256_imm155( -; CHECK-NEXT: [[R:%.*]] = tail call <4 x i64> @llvm.x86.avx512.pternlog.q.256(<4 x i64> , <4 x i64> , <4 x i64> , i32 155) -; CHECK-NEXT: ret <4 x i64> [[R]] +; CHECK-NEXT: ret <4 x i64> ; %r = tail call <4 x i64> @llvm.x86.avx512.pternlog.q.256(<4 x i64> , <4 x i64> , <4 x i64> , i32 155) ret <4 x i64> %r @@ -3718,8 +3557,7 @@ define <16 x i32> @vpternlog_d_constv512_imm156() { ; CHECK-LABEL: @vpternlog_d_constv512_imm156( -; CHECK-NEXT: [[R:%.*]] = tail call <16 x i32> @llvm.x86.avx512.pternlog.d.512(<16 x i32> , <16 x i32> , <16 x i32> , i32 156) -; CHECK-NEXT: ret <16 x i32> [[R]] +; CHECK-NEXT: ret <16 x i32> ; %r = tail call <16 x i32> @llvm.x86.avx512.pternlog.d.512(<16 x i32> , <16 x i32> , <16 x i32> , i32 156) ret <16 x i32> %r @@ -3727,8 +3565,7 @@ define <2 x i64> @vpternlog_q_constv128_imm157() { ; CHECK-LABEL: @vpternlog_q_constv128_imm157( -; CHECK-NEXT: [[R:%.*]] = tail call <2 x i64> @llvm.x86.avx512.pternlog.q.128(<2 x i64> , <2 x i64> , <2 x i64> , i32 157) -; CHECK-NEXT: ret <2 x i64> [[R]] +; CHECK-NEXT: ret <2 x i64> ; %r = tail call <2 x i64> @llvm.x86.avx512.pternlog.q.128(<2 x i64> , <2 x i64> , <2 x i64> , i32 157) ret <2 x i64> %r @@ -3736,8 +3573,7 @@ define <8 x i32> @vpternlog_d_constv256_imm158() { ; CHECK-LABEL: @vpternlog_d_constv256_imm158( -; CHECK-NEXT: [[R:%.*]] = tail call <8 x i32> @llvm.x86.avx512.pternlog.d.256(<8 x i32> , <8 x i32> , <8 x i32> , i32 158) -; CHECK-NEXT: ret <8 x i32> [[R]] +; CHECK-NEXT: ret <8 x i32> ; %r = tail call <8 x i32> @llvm.x86.avx512.pternlog.d.256(<8 x i32> , <8 x i32> , <8 x i32> , i32 158) ret <8 x i32> %r @@ -3745,8 +3581,7 @@ define <8 x i64> @vpternlog_q_constv512_imm159() { ; CHECK-LABEL: @vpternlog_q_constv512_imm159( -; CHECK-NEXT: [[R:%.*]] = tail call <8 x i64> @llvm.x86.avx512.pternlog.q.512(<8 x i64> , <8 x i64> , <8 x i64> , i32 159) -; CHECK-NEXT: ret <8 x i64> [[R]] +; CHECK-NEXT: ret <8 x i64> ; %r = tail call <8 x i64> @llvm.x86.avx512.pternlog.q.512(<8 x i64> , <8 x i64> , <8 x i64> , i32 159) ret <8 x i64> %r @@ -3754,8 +3589,7 @@ define <4 x i32> @vpternlog_d_constv128_imm160() { ; CHECK-LABEL: @vpternlog_d_constv128_imm160( -; CHECK-NEXT: [[R:%.*]] = tail call <4 x i32> @llvm.x86.avx512.pternlog.d.128(<4 x i32> , <4 x i32> , <4 x i32> , i32 160) -; CHECK-NEXT: ret <4 x i32> [[R]] +; CHECK-NEXT: ret <4 x i32> ; %r = tail call <4 x i32> @llvm.x86.avx512.pternlog.d.128(<4 x i32> , <4 x i32> , <4 x i32> , i32 160) ret <4 x i32> %r @@ -3763,8 +3597,7 @@ define <4 x i64> @vpternlog_q_constv256_imm161() { ; CHECK-LABEL: @vpternlog_q_constv256_imm161( -; CHECK-NEXT: [[R:%.*]] = tail call <4 x i64> @llvm.x86.avx512.pternlog.q.256(<4 x i64> , <4 x i64> , <4 x i64> , i32 161) -; CHECK-NEXT: ret <4 x i64> [[R]] +; CHECK-NEXT: ret <4 x i64> ; %r = tail call <4 x i64> @llvm.x86.avx512.pternlog.q.256(<4 x i64> , <4 x i64> , <4 x i64> , i32 161) ret <4 x i64> %r @@ -3772,8 +3605,7 @@ define <16 x i32> @vpternlog_d_constv512_imm162() { ; CHECK-LABEL: @vpternlog_d_constv512_imm162( -; CHECK-NEXT: [[R:%.*]] = tail call <16 x i32> @llvm.x86.avx512.pternlog.d.512(<16 x i32> , <16 x i32> , <16 x i32> , i32 162) -; CHECK-NEXT: ret <16 x i32> [[R]] +; CHECK-NEXT: ret <16 x i32> ; %r = tail call <16 x i32> @llvm.x86.avx512.pternlog.d.512(<16 x i32> , <16 x i32> , <16 x i32> , i32 162) ret <16 x i32> %r @@ -3781,8 +3613,7 @@ define <2 x i64> @vpternlog_q_constv128_imm163() { ; CHECK-LABEL: @vpternlog_q_constv128_imm163( -; CHECK-NEXT: [[R:%.*]] = tail call <2 x i64> @llvm.x86.avx512.pternlog.q.128(<2 x i64> , <2 x i64> , <2 x i64> , i32 163) -; CHECK-NEXT: ret <2 x i64> [[R]] +; CHECK-NEXT: ret <2 x i64> ; %r = tail call <2 x i64> @llvm.x86.avx512.pternlog.q.128(<2 x i64> , <2 x i64> , <2 x i64> , i32 163) ret <2 x i64> %r @@ -3790,8 +3621,7 @@ define <8 x i32> @vpternlog_d_constv256_imm164() { ; CHECK-LABEL: @vpternlog_d_constv256_imm164( -; CHECK-NEXT: [[R:%.*]] = tail call <8 x i32> @llvm.x86.avx512.pternlog.d.256(<8 x i32> , <8 x i32> , <8 x i32> , i32 164) -; CHECK-NEXT: ret <8 x i32> [[R]] +; CHECK-NEXT: ret <8 x i32> ; %r = tail call <8 x i32> @llvm.x86.avx512.pternlog.d.256(<8 x i32> , <8 x i32> , <8 x i32> , i32 164) ret <8 x i32> %r @@ -3799,8 +3629,7 @@ define <8 x i64> @vpternlog_q_constv512_imm165() { ; CHECK-LABEL: @vpternlog_q_constv512_imm165( -; CHECK-NEXT: [[R:%.*]] = tail call <8 x i64> @llvm.x86.avx512.pternlog.q.512(<8 x i64> , <8 x i64> , <8 x i64> , i32 165) -; CHECK-NEXT: ret <8 x i64> [[R]] +; CHECK-NEXT: ret <8 x i64> ; %r = tail call <8 x i64> @llvm.x86.avx512.pternlog.q.512(<8 x i64> , <8 x i64> , <8 x i64> , i32 165) ret <8 x i64> %r @@ -3808,8 +3637,7 @@ define <4 x i32> @vpternlog_d_constv128_imm166() { ; CHECK-LABEL: @vpternlog_d_constv128_imm166( -; CHECK-NEXT: [[R:%.*]] = tail call <4 x i32> @llvm.x86.avx512.pternlog.d.128(<4 x i32> , <4 x i32> , <4 x i32> , i32 166) -; CHECK-NEXT: ret <4 x i32> [[R]] +; CHECK-NEXT: ret <4 x i32> ; %r = tail call <4 x i32> @llvm.x86.avx512.pternlog.d.128(<4 x i32> , <4 x i32> , <4 x i32> , i32 166) ret <4 x i32> %r @@ -3817,8 +3645,7 @@ define <4 x i64> @vpternlog_q_constv256_imm167() { ; CHECK-LABEL: @vpternlog_q_constv256_imm167( -; CHECK-NEXT: [[R:%.*]] = tail call <4 x i64> @llvm.x86.avx512.pternlog.q.256(<4 x i64> , <4 x i64> , <4 x i64> , i32 167) -; CHECK-NEXT: ret <4 x i64> [[R]] +; CHECK-NEXT: ret <4 x i64> ; %r = tail call <4 x i64> @llvm.x86.avx512.pternlog.q.256(<4 x i64> , <4 x i64> , <4 x i64> , i32 167) ret <4 x i64> %r @@ -3826,8 +3653,7 @@ define <16 x i32> @vpternlog_d_constv512_imm168() { ; CHECK-LABEL: @vpternlog_d_constv512_imm168( -; CHECK-NEXT: [[R:%.*]] = tail call <16 x i32> @llvm.x86.avx512.pternlog.d.512(<16 x i32> , <16 x i32> , <16 x i32> , i32 168) -; CHECK-NEXT: ret <16 x i32> [[R]] +; CHECK-NEXT: ret <16 x i32> ; %r = tail call <16 x i32> @llvm.x86.avx512.pternlog.d.512(<16 x i32> , <16 x i32> , <16 x i32> , i32 168) ret <16 x i32> %r @@ -3835,8 +3661,7 @@ define <2 x i64> @vpternlog_q_constv128_imm169() { ; CHECK-LABEL: @vpternlog_q_constv128_imm169( -; CHECK-NEXT: [[R:%.*]] = tail call <2 x i64> @llvm.x86.avx512.pternlog.q.128(<2 x i64> , <2 x i64> , <2 x i64> , i32 169) -; CHECK-NEXT: ret <2 x i64> [[R]] +; CHECK-NEXT: ret <2 x i64> ; %r = tail call <2 x i64> @llvm.x86.avx512.pternlog.q.128(<2 x i64> , <2 x i64> , <2 x i64> , i32 169) ret <2 x i64> %r @@ -3844,8 +3669,7 @@ define <8 x i32> @vpternlog_d_constv256_imm170() { ; CHECK-LABEL: @vpternlog_d_constv256_imm170( -; CHECK-NEXT: [[R:%.*]] = tail call <8 x i32> @llvm.x86.avx512.pternlog.d.256(<8 x i32> , <8 x i32> , <8 x i32> , i32 170) -; CHECK-NEXT: ret <8 x i32> [[R]] +; CHECK-NEXT: ret <8 x i32> ; %r = tail call <8 x i32> @llvm.x86.avx512.pternlog.d.256(<8 x i32> , <8 x i32> , <8 x i32> , i32 170) ret <8 x i32> %r @@ -3853,8 +3677,7 @@ define <8 x i64> @vpternlog_q_constv512_imm171() { ; CHECK-LABEL: @vpternlog_q_constv512_imm171( -; CHECK-NEXT: [[R:%.*]] = tail call <8 x i64> @llvm.x86.avx512.pternlog.q.512(<8 x i64> , <8 x i64> , <8 x i64> , i32 171) -; CHECK-NEXT: ret <8 x i64> [[R]] +; CHECK-NEXT: ret <8 x i64> ; %r = tail call <8 x i64> @llvm.x86.avx512.pternlog.q.512(<8 x i64> , <8 x i64> , <8 x i64> , i32 171) ret <8 x i64> %r @@ -3862,8 +3685,7 @@ define <4 x i32> @vpternlog_d_constv128_imm172() { ; CHECK-LABEL: @vpternlog_d_constv128_imm172( -; CHECK-NEXT: [[R:%.*]] = tail call <4 x i32> @llvm.x86.avx512.pternlog.d.128(<4 x i32> , <4 x i32> , <4 x i32> , i32 172) -; CHECK-NEXT: ret <4 x i32> [[R]] +; CHECK-NEXT: ret <4 x i32> ; %r = tail call <4 x i32> @llvm.x86.avx512.pternlog.d.128(<4 x i32> , <4 x i32> , <4 x i32> , i32 172) ret <4 x i32> %r @@ -3871,8 +3693,7 @@ define <4 x i64> @vpternlog_q_constv256_imm173() { ; CHECK-LABEL: @vpternlog_q_constv256_imm173( -; CHECK-NEXT: [[R:%.*]] = tail call <4 x i64> @llvm.x86.avx512.pternlog.q.256(<4 x i64> , <4 x i64> , <4 x i64> , i32 173) -; CHECK-NEXT: ret <4 x i64> [[R]] +; CHECK-NEXT: ret <4 x i64> ; %r = tail call <4 x i64> @llvm.x86.avx512.pternlog.q.256(<4 x i64> , <4 x i64> , <4 x i64> , i32 173) ret <4 x i64> %r @@ -3880,8 +3701,7 @@ define <16 x i32> @vpternlog_d_constv512_imm174() { ; CHECK-LABEL: @vpternlog_d_constv512_imm174( -; CHECK-NEXT: [[R:%.*]] = tail call <16 x i32> @llvm.x86.avx512.pternlog.d.512(<16 x i32> , <16 x i32> , <16 x i32> , i32 174) -; CHECK-NEXT: ret <16 x i32> [[R]] +; CHECK-NEXT: ret <16 x i32> ; %r = tail call <16 x i32> @llvm.x86.avx512.pternlog.d.512(<16 x i32> , <16 x i32> , <16 x i32> , i32 174) ret <16 x i32> %r @@ -3889,8 +3709,7 @@ define <2 x i64> @vpternlog_q_constv128_imm175() { ; CHECK-LABEL: @vpternlog_q_constv128_imm175( -; CHECK-NEXT: [[R:%.*]] = tail call <2 x i64> @llvm.x86.avx512.pternlog.q.128(<2 x i64> , <2 x i64> , <2 x i64> , i32 175) -; CHECK-NEXT: ret <2 x i64> [[R]] +; CHECK-NEXT: ret <2 x i64> ; %r = tail call <2 x i64> @llvm.x86.avx512.pternlog.q.128(<2 x i64> , <2 x i64> , <2 x i64> , i32 175) ret <2 x i64> %r @@ -3898,8 +3717,7 @@ define <8 x i32> @vpternlog_d_constv256_imm176() { ; CHECK-LABEL: @vpternlog_d_constv256_imm176( -; CHECK-NEXT: [[R:%.*]] = tail call <8 x i32> @llvm.x86.avx512.pternlog.d.256(<8 x i32> , <8 x i32> , <8 x i32> , i32 176) -; CHECK-NEXT: ret <8 x i32> [[R]] +; CHECK-NEXT: ret <8 x i32> ; %r = tail call <8 x i32> @llvm.x86.avx512.pternlog.d.256(<8 x i32> , <8 x i32> , <8 x i32> , i32 176) ret <8 x i32> %r @@ -3907,8 +3725,7 @@ define <8 x i64> @vpternlog_q_constv512_imm177() { ; CHECK-LABEL: @vpternlog_q_constv512_imm177( -; CHECK-NEXT: [[R:%.*]] = tail call <8 x i64> @llvm.x86.avx512.pternlog.q.512(<8 x i64> , <8 x i64> , <8 x i64> , i32 177) -; CHECK-NEXT: ret <8 x i64> [[R]] +; CHECK-NEXT: ret <8 x i64> ; %r = tail call <8 x i64> @llvm.x86.avx512.pternlog.q.512(<8 x i64> , <8 x i64> , <8 x i64> , i32 177) ret <8 x i64> %r @@ -3916,8 +3733,7 @@ define <4 x i32> @vpternlog_d_constv128_imm178() { ; CHECK-LABEL: @vpternlog_d_constv128_imm178( -; CHECK-NEXT: [[R:%.*]] = tail call <4 x i32> @llvm.x86.avx512.pternlog.d.128(<4 x i32> , <4 x i32> , <4 x i32> , i32 178) -; CHECK-NEXT: ret <4 x i32> [[R]] +; CHECK-NEXT: ret <4 x i32> ; %r = tail call <4 x i32> @llvm.x86.avx512.pternlog.d.128(<4 x i32> , <4 x i32> , <4 x i32> , i32 178) ret <4 x i32> %r @@ -3925,8 +3741,7 @@ define <4 x i64> @vpternlog_q_constv256_imm179() { ; CHECK-LABEL: @vpternlog_q_constv256_imm179( -; CHECK-NEXT: [[R:%.*]] = tail call <4 x i64> @llvm.x86.avx512.pternlog.q.256(<4 x i64> , <4 x i64> , <4 x i64> , i32 179) -; CHECK-NEXT: ret <4 x i64> [[R]] +; CHECK-NEXT: ret <4 x i64> ; %r = tail call <4 x i64> @llvm.x86.avx512.pternlog.q.256(<4 x i64> , <4 x i64> , <4 x i64> , i32 179) ret <4 x i64> %r @@ -3934,8 +3749,7 @@ define <16 x i32> @vpternlog_d_constv512_imm180() { ; CHECK-LABEL: @vpternlog_d_constv512_imm180( -; CHECK-NEXT: [[R:%.*]] = tail call <16 x i32> @llvm.x86.avx512.pternlog.d.512(<16 x i32> , <16 x i32> , <16 x i32> , i32 180) -; CHECK-NEXT: ret <16 x i32> [[R]] +; CHECK-NEXT: ret <16 x i32> ; %r = tail call <16 x i32> @llvm.x86.avx512.pternlog.d.512(<16 x i32> , <16 x i32> , <16 x i32> , i32 180) ret <16 x i32> %r @@ -3943,8 +3757,7 @@ define <2 x i64> @vpternlog_q_constv128_imm181() { ; CHECK-LABEL: @vpternlog_q_constv128_imm181( -; CHECK-NEXT: [[R:%.*]] = tail call <2 x i64> @llvm.x86.avx512.pternlog.q.128(<2 x i64> , <2 x i64> , <2 x i64> , i32 181) -; CHECK-NEXT: ret <2 x i64> [[R]] +; CHECK-NEXT: ret <2 x i64> ; %r = tail call <2 x i64> @llvm.x86.avx512.pternlog.q.128(<2 x i64> , <2 x i64> , <2 x i64> , i32 181) ret <2 x i64> %r @@ -3952,8 +3765,7 @@ define <8 x i32> @vpternlog_d_constv256_imm182() { ; CHECK-LABEL: @vpternlog_d_constv256_imm182( -; CHECK-NEXT: [[R:%.*]] = tail call <8 x i32> @llvm.x86.avx512.pternlog.d.256(<8 x i32> , <8 x i32> , <8 x i32> , i32 182) -; CHECK-NEXT: ret <8 x i32> [[R]] +; CHECK-NEXT: ret <8 x i32> ; %r = tail call <8 x i32> @llvm.x86.avx512.pternlog.d.256(<8 x i32> , <8 x i32> , <8 x i32> , i32 182) ret <8 x i32> %r @@ -3961,8 +3773,7 @@ define <8 x i64> @vpternlog_q_constv512_imm183() { ; CHECK-LABEL: @vpternlog_q_constv512_imm183( -; CHECK-NEXT: [[R:%.*]] = tail call <8 x i64> @llvm.x86.avx512.pternlog.q.512(<8 x i64> , <8 x i64> , <8 x i64> , i32 183) -; CHECK-NEXT: ret <8 x i64> [[R]] +; CHECK-NEXT: ret <8 x i64> ; %r = tail call <8 x i64> @llvm.x86.avx512.pternlog.q.512(<8 x i64> , <8 x i64> , <8 x i64> , i32 183) ret <8 x i64> %r @@ -3970,8 +3781,7 @@ define <4 x i32> @vpternlog_d_constv128_imm184() { ; CHECK-LABEL: @vpternlog_d_constv128_imm184( -; CHECK-NEXT: [[R:%.*]] = tail call <4 x i32> @llvm.x86.avx512.pternlog.d.128(<4 x i32> , <4 x i32> , <4 x i32> , i32 184) -; CHECK-NEXT: ret <4 x i32> [[R]] +; CHECK-NEXT: ret <4 x i32> ; %r = tail call <4 x i32> @llvm.x86.avx512.pternlog.d.128(<4 x i32> , <4 x i32> , <4 x i32> , i32 184) ret <4 x i32> %r @@ -3979,8 +3789,7 @@ define <4 x i64> @vpternlog_q_constv256_imm185() { ; CHECK-LABEL: @vpternlog_q_constv256_imm185( -; CHECK-NEXT: [[R:%.*]] = tail call <4 x i64> @llvm.x86.avx512.pternlog.q.256(<4 x i64> , <4 x i64> , <4 x i64> , i32 185) -; CHECK-NEXT: ret <4 x i64> [[R]] +; CHECK-NEXT: ret <4 x i64> ; %r = tail call <4 x i64> @llvm.x86.avx512.pternlog.q.256(<4 x i64> , <4 x i64> , <4 x i64> , i32 185) ret <4 x i64> %r @@ -3988,8 +3797,7 @@ define <16 x i32> @vpternlog_d_constv512_imm186() { ; CHECK-LABEL: @vpternlog_d_constv512_imm186( -; CHECK-NEXT: [[R:%.*]] = tail call <16 x i32> @llvm.x86.avx512.pternlog.d.512(<16 x i32> , <16 x i32> , <16 x i32> , i32 186) -; CHECK-NEXT: ret <16 x i32> [[R]] +; CHECK-NEXT: ret <16 x i32> ; %r = tail call <16 x i32> @llvm.x86.avx512.pternlog.d.512(<16 x i32> , <16 x i32> , <16 x i32> , i32 186) ret <16 x i32> %r @@ -3997,8 +3805,7 @@ define <2 x i64> @vpternlog_q_constv128_imm187() { ; CHECK-LABEL: @vpternlog_q_constv128_imm187( -; CHECK-NEXT: [[R:%.*]] = tail call <2 x i64> @llvm.x86.avx512.pternlog.q.128(<2 x i64> , <2 x i64> , <2 x i64> , i32 187) -; CHECK-NEXT: ret <2 x i64> [[R]] +; CHECK-NEXT: ret <2 x i64> ; %r = tail call <2 x i64> @llvm.x86.avx512.pternlog.q.128(<2 x i64> , <2 x i64> , <2 x i64> , i32 187) ret <2 x i64> %r @@ -4006,8 +3813,7 @@ define <8 x i32> @vpternlog_d_constv256_imm188() { ; CHECK-LABEL: @vpternlog_d_constv256_imm188( -; CHECK-NEXT: [[R:%.*]] = tail call <8 x i32> @llvm.x86.avx512.pternlog.d.256(<8 x i32> , <8 x i32> , <8 x i32> , i32 188) -; CHECK-NEXT: ret <8 x i32> [[R]] +; CHECK-NEXT: ret <8 x i32> ; %r = tail call <8 x i32> @llvm.x86.avx512.pternlog.d.256(<8 x i32> , <8 x i32> , <8 x i32> , i32 188) ret <8 x i32> %r @@ -4015,8 +3821,7 @@ define <8 x i64> @vpternlog_q_constv512_imm189() { ; CHECK-LABEL: @vpternlog_q_constv512_imm189( -; CHECK-NEXT: [[R:%.*]] = tail call <8 x i64> @llvm.x86.avx512.pternlog.q.512(<8 x i64> , <8 x i64> , <8 x i64> , i32 189) -; CHECK-NEXT: ret <8 x i64> [[R]] +; CHECK-NEXT: ret <8 x i64> ; %r = tail call <8 x i64> @llvm.x86.avx512.pternlog.q.512(<8 x i64> , <8 x i64> , <8 x i64> , i32 189) ret <8 x i64> %r @@ -4024,8 +3829,7 @@ define <4 x i32> @vpternlog_d_constv128_imm190() { ; CHECK-LABEL: @vpternlog_d_constv128_imm190( -; CHECK-NEXT: [[R:%.*]] = tail call <4 x i32> @llvm.x86.avx512.pternlog.d.128(<4 x i32> , <4 x i32> , <4 x i32> , i32 190) -; CHECK-NEXT: ret <4 x i32> [[R]] +; CHECK-NEXT: ret <4 x i32> ; %r = tail call <4 x i32> @llvm.x86.avx512.pternlog.d.128(<4 x i32> , <4 x i32> , <4 x i32> , i32 190) ret <4 x i32> %r @@ -4033,8 +3837,7 @@ define <4 x i64> @vpternlog_q_constv256_imm191() { ; CHECK-LABEL: @vpternlog_q_constv256_imm191( -; CHECK-NEXT: [[R:%.*]] = tail call <4 x i64> @llvm.x86.avx512.pternlog.q.256(<4 x i64> , <4 x i64> , <4 x i64> , i32 191) -; CHECK-NEXT: ret <4 x i64> [[R]] +; CHECK-NEXT: ret <4 x i64> ; %r = tail call <4 x i64> @llvm.x86.avx512.pternlog.q.256(<4 x i64> , <4 x i64> , <4 x i64> , i32 191) ret <4 x i64> %r @@ -4042,8 +3845,7 @@ define <16 x i32> @vpternlog_d_constv512_imm192() { ; CHECK-LABEL: @vpternlog_d_constv512_imm192( -; CHECK-NEXT: [[R:%.*]] = tail call <16 x i32> @llvm.x86.avx512.pternlog.d.512(<16 x i32> , <16 x i32> , <16 x i32> , i32 192) -; CHECK-NEXT: ret <16 x i32> [[R]] +; CHECK-NEXT: ret <16 x i32> ; %r = tail call <16 x i32> @llvm.x86.avx512.pternlog.d.512(<16 x i32> , <16 x i32> , <16 x i32> , i32 192) ret <16 x i32> %r @@ -4051,8 +3853,7 @@ define <2 x i64> @vpternlog_q_constv128_imm193() { ; CHECK-LABEL: @vpternlog_q_constv128_imm193( -; CHECK-NEXT: [[R:%.*]] = tail call <2 x i64> @llvm.x86.avx512.pternlog.q.128(<2 x i64> , <2 x i64> , <2 x i64> , i32 193) -; CHECK-NEXT: ret <2 x i64> [[R]] +; CHECK-NEXT: ret <2 x i64> ; %r = tail call <2 x i64> @llvm.x86.avx512.pternlog.q.128(<2 x i64> , <2 x i64> , <2 x i64> , i32 193) ret <2 x i64> %r @@ -4060,8 +3861,7 @@ define <8 x i32> @vpternlog_d_constv256_imm194() { ; CHECK-LABEL: @vpternlog_d_constv256_imm194( -; CHECK-NEXT: [[R:%.*]] = tail call <8 x i32> @llvm.x86.avx512.pternlog.d.256(<8 x i32> , <8 x i32> , <8 x i32> , i32 194) -; CHECK-NEXT: ret <8 x i32> [[R]] +; CHECK-NEXT: ret <8 x i32> ; %r = tail call <8 x i32> @llvm.x86.avx512.pternlog.d.256(<8 x i32> , <8 x i32> , <8 x i32> , i32 194) ret <8 x i32> %r @@ -4069,8 +3869,7 @@ define <8 x i64> @vpternlog_q_constv512_imm195() { ; CHECK-LABEL: @vpternlog_q_constv512_imm195( -; CHECK-NEXT: [[R:%.*]] = tail call <8 x i64> @llvm.x86.avx512.pternlog.q.512(<8 x i64> , <8 x i64> , <8 x i64> , i32 195) -; CHECK-NEXT: ret <8 x i64> [[R]] +; CHECK-NEXT: ret <8 x i64> ; %r = tail call <8 x i64> @llvm.x86.avx512.pternlog.q.512(<8 x i64> , <8 x i64> , <8 x i64> , i32 195) ret <8 x i64> %r @@ -4078,8 +3877,7 @@ define <4 x i32> @vpternlog_d_constv128_imm196() { ; CHECK-LABEL: @vpternlog_d_constv128_imm196( -; CHECK-NEXT: [[R:%.*]] = tail call <4 x i32> @llvm.x86.avx512.pternlog.d.128(<4 x i32> , <4 x i32> , <4 x i32> , i32 196) -; CHECK-NEXT: ret <4 x i32> [[R]] +; CHECK-NEXT: ret <4 x i32> ; %r = tail call <4 x i32> @llvm.x86.avx512.pternlog.d.128(<4 x i32> , <4 x i32> , <4 x i32> , i32 196) ret <4 x i32> %r @@ -4087,8 +3885,7 @@ define <4 x i64> @vpternlog_q_constv256_imm197() { ; CHECK-LABEL: @vpternlog_q_constv256_imm197( -; CHECK-NEXT: [[R:%.*]] = tail call <4 x i64> @llvm.x86.avx512.pternlog.q.256(<4 x i64> , <4 x i64> , <4 x i64> , i32 197) -; CHECK-NEXT: ret <4 x i64> [[R]] +; CHECK-NEXT: ret <4 x i64> ; %r = tail call <4 x i64> @llvm.x86.avx512.pternlog.q.256(<4 x i64> , <4 x i64> , <4 x i64> , i32 197) ret <4 x i64> %r @@ -4096,8 +3893,7 @@ define <16 x i32> @vpternlog_d_constv512_imm198() { ; CHECK-LABEL: @vpternlog_d_constv512_imm198( -; CHECK-NEXT: [[R:%.*]] = tail call <16 x i32> @llvm.x86.avx512.pternlog.d.512(<16 x i32> , <16 x i32> , <16 x i32> , i32 198) -; CHECK-NEXT: ret <16 x i32> [[R]] +; CHECK-NEXT: ret <16 x i32> ; %r = tail call <16 x i32> @llvm.x86.avx512.pternlog.d.512(<16 x i32> , <16 x i32> , <16 x i32> , i32 198) ret <16 x i32> %r @@ -4105,8 +3901,7 @@ define <2 x i64> @vpternlog_q_constv128_imm199() { ; CHECK-LABEL: @vpternlog_q_constv128_imm199( -; CHECK-NEXT: [[R:%.*]] = tail call <2 x i64> @llvm.x86.avx512.pternlog.q.128(<2 x i64> , <2 x i64> , <2 x i64> , i32 199) -; CHECK-NEXT: ret <2 x i64> [[R]] +; CHECK-NEXT: ret <2 x i64> ; %r = tail call <2 x i64> @llvm.x86.avx512.pternlog.q.128(<2 x i64> , <2 x i64> , <2 x i64> , i32 199) ret <2 x i64> %r @@ -4114,8 +3909,7 @@ define <8 x i32> @vpternlog_d_constv256_imm200() { ; CHECK-LABEL: @vpternlog_d_constv256_imm200( -; CHECK-NEXT: [[R:%.*]] = tail call <8 x i32> @llvm.x86.avx512.pternlog.d.256(<8 x i32> , <8 x i32> , <8 x i32> , i32 200) -; CHECK-NEXT: ret <8 x i32> [[R]] +; CHECK-NEXT: ret <8 x i32> ; %r = tail call <8 x i32> @llvm.x86.avx512.pternlog.d.256(<8 x i32> , <8 x i32> , <8 x i32> , i32 200) ret <8 x i32> %r @@ -4123,8 +3917,7 @@ define <8 x i64> @vpternlog_q_constv512_imm201() { ; CHECK-LABEL: @vpternlog_q_constv512_imm201( -; CHECK-NEXT: [[R:%.*]] = tail call <8 x i64> @llvm.x86.avx512.pternlog.q.512(<8 x i64> , <8 x i64> , <8 x i64> , i32 201) -; CHECK-NEXT: ret <8 x i64> [[R]] +; CHECK-NEXT: ret <8 x i64> ; %r = tail call <8 x i64> @llvm.x86.avx512.pternlog.q.512(<8 x i64> , <8 x i64> , <8 x i64> , i32 201) ret <8 x i64> %r @@ -4132,8 +3925,7 @@ define <4 x i32> @vpternlog_d_constv128_imm202() { ; CHECK-LABEL: @vpternlog_d_constv128_imm202( -; CHECK-NEXT: [[R:%.*]] = tail call <4 x i32> @llvm.x86.avx512.pternlog.d.128(<4 x i32> , <4 x i32> , <4 x i32> , i32 202) -; CHECK-NEXT: ret <4 x i32> [[R]] +; CHECK-NEXT: ret <4 x i32> ; %r = tail call <4 x i32> @llvm.x86.avx512.pternlog.d.128(<4 x i32> , <4 x i32> , <4 x i32> , i32 202) ret <4 x i32> %r @@ -4141,8 +3933,7 @@ define <4 x i64> @vpternlog_q_constv256_imm203() { ; CHECK-LABEL: @vpternlog_q_constv256_imm203( -; CHECK-NEXT: [[R:%.*]] = tail call <4 x i64> @llvm.x86.avx512.pternlog.q.256(<4 x i64> , <4 x i64> , <4 x i64> , i32 203) -; CHECK-NEXT: ret <4 x i64> [[R]] +; CHECK-NEXT: ret <4 x i64> ; %r = tail call <4 x i64> @llvm.x86.avx512.pternlog.q.256(<4 x i64> , <4 x i64> , <4 x i64> , i32 203) ret <4 x i64> %r @@ -4150,8 +3941,7 @@ define <16 x i32> @vpternlog_d_constv512_imm204() { ; CHECK-LABEL: @vpternlog_d_constv512_imm204( -; CHECK-NEXT: [[R:%.*]] = tail call <16 x i32> @llvm.x86.avx512.pternlog.d.512(<16 x i32> , <16 x i32> , <16 x i32> , i32 204) -; CHECK-NEXT: ret <16 x i32> [[R]] +; CHECK-NEXT: ret <16 x i32> ; %r = tail call <16 x i32> @llvm.x86.avx512.pternlog.d.512(<16 x i32> , <16 x i32> , <16 x i32> , i32 204) ret <16 x i32> %r @@ -4159,8 +3949,7 @@ define <2 x i64> @vpternlog_q_constv128_imm205() { ; CHECK-LABEL: @vpternlog_q_constv128_imm205( -; CHECK-NEXT: [[R:%.*]] = tail call <2 x i64> @llvm.x86.avx512.pternlog.q.128(<2 x i64> , <2 x i64> , <2 x i64> , i32 205) -; CHECK-NEXT: ret <2 x i64> [[R]] +; CHECK-NEXT: ret <2 x i64> ; %r = tail call <2 x i64> @llvm.x86.avx512.pternlog.q.128(<2 x i64> , <2 x i64> , <2 x i64> , i32 205) ret <2 x i64> %r @@ -4168,8 +3957,7 @@ define <8 x i32> @vpternlog_d_constv256_imm206() { ; CHECK-LABEL: @vpternlog_d_constv256_imm206( -; CHECK-NEXT: [[R:%.*]] = tail call <8 x i32> @llvm.x86.avx512.pternlog.d.256(<8 x i32> , <8 x i32> , <8 x i32> , i32 206) -; CHECK-NEXT: ret <8 x i32> [[R]] +; CHECK-NEXT: ret <8 x i32> ; %r = tail call <8 x i32> @llvm.x86.avx512.pternlog.d.256(<8 x i32> , <8 x i32> , <8 x i32> , i32 206) ret <8 x i32> %r @@ -4177,8 +3965,7 @@ define <8 x i64> @vpternlog_q_constv512_imm207() { ; CHECK-LABEL: @vpternlog_q_constv512_imm207( -; CHECK-NEXT: [[R:%.*]] = tail call <8 x i64> @llvm.x86.avx512.pternlog.q.512(<8 x i64> , <8 x i64> , <8 x i64> , i32 207) -; CHECK-NEXT: ret <8 x i64> [[R]] +; CHECK-NEXT: ret <8 x i64> ; %r = tail call <8 x i64> @llvm.x86.avx512.pternlog.q.512(<8 x i64> , <8 x i64> , <8 x i64> , i32 207) ret <8 x i64> %r @@ -4186,8 +3973,7 @@ define <4 x i32> @vpternlog_d_constv128_imm208() { ; CHECK-LABEL: @vpternlog_d_constv128_imm208( -; CHECK-NEXT: [[R:%.*]] = tail call <4 x i32> @llvm.x86.avx512.pternlog.d.128(<4 x i32> , <4 x i32> , <4 x i32> , i32 208) -; CHECK-NEXT: ret <4 x i32> [[R]] +; CHECK-NEXT: ret <4 x i32> ; %r = tail call <4 x i32> @llvm.x86.avx512.pternlog.d.128(<4 x i32> , <4 x i32> , <4 x i32> , i32 208) ret <4 x i32> %r @@ -4195,8 +3981,7 @@ define <4 x i64> @vpternlog_q_constv256_imm209() { ; CHECK-LABEL: @vpternlog_q_constv256_imm209( -; CHECK-NEXT: [[R:%.*]] = tail call <4 x i64> @llvm.x86.avx512.pternlog.q.256(<4 x i64> , <4 x i64> , <4 x i64> , i32 209) -; CHECK-NEXT: ret <4 x i64> [[R]] +; CHECK-NEXT: ret <4 x i64> ; %r = tail call <4 x i64> @llvm.x86.avx512.pternlog.q.256(<4 x i64> , <4 x i64> , <4 x i64> , i32 209) ret <4 x i64> %r @@ -4204,8 +3989,7 @@ define <16 x i32> @vpternlog_d_constv512_imm210() { ; CHECK-LABEL: @vpternlog_d_constv512_imm210( -; CHECK-NEXT: [[R:%.*]] = tail call <16 x i32> @llvm.x86.avx512.pternlog.d.512(<16 x i32> , <16 x i32> , <16 x i32> , i32 210) -; CHECK-NEXT: ret <16 x i32> [[R]] +; CHECK-NEXT: ret <16 x i32> ; %r = tail call <16 x i32> @llvm.x86.avx512.pternlog.d.512(<16 x i32> , <16 x i32> , <16 x i32> , i32 210) ret <16 x i32> %r @@ -4213,8 +3997,7 @@ define <2 x i64> @vpternlog_q_constv128_imm211() { ; CHECK-LABEL: @vpternlog_q_constv128_imm211( -; CHECK-NEXT: [[R:%.*]] = tail call <2 x i64> @llvm.x86.avx512.pternlog.q.128(<2 x i64> , <2 x i64> , <2 x i64> , i32 211) -; CHECK-NEXT: ret <2 x i64> [[R]] +; CHECK-NEXT: ret <2 x i64> ; %r = tail call <2 x i64> @llvm.x86.avx512.pternlog.q.128(<2 x i64> , <2 x i64> , <2 x i64> , i32 211) ret <2 x i64> %r @@ -4222,8 +4005,7 @@ define <8 x i32> @vpternlog_d_constv256_imm212() { ; CHECK-LABEL: @vpternlog_d_constv256_imm212( -; CHECK-NEXT: [[R:%.*]] = tail call <8 x i32> @llvm.x86.avx512.pternlog.d.256(<8 x i32> , <8 x i32> , <8 x i32> , i32 212) -; CHECK-NEXT: ret <8 x i32> [[R]] +; CHECK-NEXT: ret <8 x i32> ; %r = tail call <8 x i32> @llvm.x86.avx512.pternlog.d.256(<8 x i32> , <8 x i32> , <8 x i32> , i32 212) ret <8 x i32> %r @@ -4231,8 +4013,7 @@ define <8 x i64> @vpternlog_q_constv512_imm213() { ; CHECK-LABEL: @vpternlog_q_constv512_imm213( -; CHECK-NEXT: [[R:%.*]] = tail call <8 x i64> @llvm.x86.avx512.pternlog.q.512(<8 x i64> , <8 x i64> , <8 x i64> , i32 213) -; CHECK-NEXT: ret <8 x i64> [[R]] +; CHECK-NEXT: ret <8 x i64> ; %r = tail call <8 x i64> @llvm.x86.avx512.pternlog.q.512(<8 x i64> , <8 x i64> , <8 x i64> , i32 213) ret <8 x i64> %r @@ -4240,8 +4021,7 @@ define <4 x i32> @vpternlog_d_constv128_imm214() { ; CHECK-LABEL: @vpternlog_d_constv128_imm214( -; CHECK-NEXT: [[R:%.*]] = tail call <4 x i32> @llvm.x86.avx512.pternlog.d.128(<4 x i32> , <4 x i32> , <4 x i32> , i32 214) -; CHECK-NEXT: ret <4 x i32> [[R]] +; CHECK-NEXT: ret <4 x i32> ; %r = tail call <4 x i32> @llvm.x86.avx512.pternlog.d.128(<4 x i32> , <4 x i32> , <4 x i32> , i32 214) ret <4 x i32> %r @@ -4249,8 +4029,7 @@ define <4 x i64> @vpternlog_q_constv256_imm215() { ; CHECK-LABEL: @vpternlog_q_constv256_imm215( -; CHECK-NEXT: [[R:%.*]] = tail call <4 x i64> @llvm.x86.avx512.pternlog.q.256(<4 x i64> , <4 x i64> , <4 x i64> , i32 215) -; CHECK-NEXT: ret <4 x i64> [[R]] +; CHECK-NEXT: ret <4 x i64> ; %r = tail call <4 x i64> @llvm.x86.avx512.pternlog.q.256(<4 x i64> , <4 x i64> , <4 x i64> , i32 215) ret <4 x i64> %r @@ -4258,8 +4037,7 @@ define <16 x i32> @vpternlog_d_constv512_imm216() { ; CHECK-LABEL: @vpternlog_d_constv512_imm216( -; CHECK-NEXT: [[R:%.*]] = tail call <16 x i32> @llvm.x86.avx512.pternlog.d.512(<16 x i32> , <16 x i32> , <16 x i32> , i32 216) -; CHECK-NEXT: ret <16 x i32> [[R]] +; CHECK-NEXT: ret <16 x i32> ; %r = tail call <16 x i32> @llvm.x86.avx512.pternlog.d.512(<16 x i32> , <16 x i32> , <16 x i32> , i32 216) ret <16 x i32> %r @@ -4267,8 +4045,7 @@ define <2 x i64> @vpternlog_q_constv128_imm217() { ; CHECK-LABEL: @vpternlog_q_constv128_imm217( -; CHECK-NEXT: [[R:%.*]] = tail call <2 x i64> @llvm.x86.avx512.pternlog.q.128(<2 x i64> , <2 x i64> , <2 x i64> , i32 217) -; CHECK-NEXT: ret <2 x i64> [[R]] +; CHECK-NEXT: ret <2 x i64> ; %r = tail call <2 x i64> @llvm.x86.avx512.pternlog.q.128(<2 x i64> , <2 x i64> , <2 x i64> , i32 217) ret <2 x i64> %r @@ -4276,8 +4053,7 @@ define <8 x i32> @vpternlog_d_constv256_imm218() { ; CHECK-LABEL: @vpternlog_d_constv256_imm218( -; CHECK-NEXT: [[R:%.*]] = tail call <8 x i32> @llvm.x86.avx512.pternlog.d.256(<8 x i32> , <8 x i32> , <8 x i32> , i32 218) -; CHECK-NEXT: ret <8 x i32> [[R]] +; CHECK-NEXT: ret <8 x i32> ; %r = tail call <8 x i32> @llvm.x86.avx512.pternlog.d.256(<8 x i32> , <8 x i32> , <8 x i32> , i32 218) ret <8 x i32> %r @@ -4285,8 +4061,7 @@ define <8 x i64> @vpternlog_q_constv512_imm219() { ; CHECK-LABEL: @vpternlog_q_constv512_imm219( -; CHECK-NEXT: [[R:%.*]] = tail call <8 x i64> @llvm.x86.avx512.pternlog.q.512(<8 x i64> , <8 x i64> , <8 x i64> , i32 219) -; CHECK-NEXT: ret <8 x i64> [[R]] +; CHECK-NEXT: ret <8 x i64> ; %r = tail call <8 x i64> @llvm.x86.avx512.pternlog.q.512(<8 x i64> , <8 x i64> , <8 x i64> , i32 219) ret <8 x i64> %r @@ -4294,8 +4069,7 @@ define <4 x i32> @vpternlog_d_constv128_imm220() { ; CHECK-LABEL: @vpternlog_d_constv128_imm220( -; CHECK-NEXT: [[R:%.*]] = tail call <4 x i32> @llvm.x86.avx512.pternlog.d.128(<4 x i32> , <4 x i32> , <4 x i32> , i32 220) -; CHECK-NEXT: ret <4 x i32> [[R]] +; CHECK-NEXT: ret <4 x i32> ; %r = tail call <4 x i32> @llvm.x86.avx512.pternlog.d.128(<4 x i32> , <4 x i32> , <4 x i32> , i32 220) ret <4 x i32> %r @@ -4303,8 +4077,7 @@ define <4 x i64> @vpternlog_q_constv256_imm221() { ; CHECK-LABEL: @vpternlog_q_constv256_imm221( -; CHECK-NEXT: [[R:%.*]] = tail call <4 x i64> @llvm.x86.avx512.pternlog.q.256(<4 x i64> , <4 x i64> , <4 x i64> , i32 221) -; CHECK-NEXT: ret <4 x i64> [[R]] +; CHECK-NEXT: ret <4 x i64> ; %r = tail call <4 x i64> @llvm.x86.avx512.pternlog.q.256(<4 x i64> , <4 x i64> , <4 x i64> , i32 221) ret <4 x i64> %r @@ -4312,8 +4085,7 @@ define <16 x i32> @vpternlog_d_constv512_imm222() { ; CHECK-LABEL: @vpternlog_d_constv512_imm222( -; CHECK-NEXT: [[R:%.*]] = tail call <16 x i32> @llvm.x86.avx512.pternlog.d.512(<16 x i32> , <16 x i32> , <16 x i32> , i32 222) -; CHECK-NEXT: ret <16 x i32> [[R]] +; CHECK-NEXT: ret <16 x i32> ; %r = tail call <16 x i32> @llvm.x86.avx512.pternlog.d.512(<16 x i32> , <16 x i32> , <16 x i32> , i32 222) ret <16 x i32> %r @@ -4321,8 +4093,7 @@ define <2 x i64> @vpternlog_q_constv128_imm223() { ; CHECK-LABEL: @vpternlog_q_constv128_imm223( -; CHECK-NEXT: [[R:%.*]] = tail call <2 x i64> @llvm.x86.avx512.pternlog.q.128(<2 x i64> , <2 x i64> , <2 x i64> , i32 223) -; CHECK-NEXT: ret <2 x i64> [[R]] +; CHECK-NEXT: ret <2 x i64> ; %r = tail call <2 x i64> @llvm.x86.avx512.pternlog.q.128(<2 x i64> , <2 x i64> , <2 x i64> , i32 223) ret <2 x i64> %r @@ -4330,8 +4101,7 @@ define <8 x i32> @vpternlog_d_constv256_imm224() { ; CHECK-LABEL: @vpternlog_d_constv256_imm224( -; CHECK-NEXT: [[R:%.*]] = tail call <8 x i32> @llvm.x86.avx512.pternlog.d.256(<8 x i32> , <8 x i32> , <8 x i32> , i32 224) -; CHECK-NEXT: ret <8 x i32> [[R]] +; CHECK-NEXT: ret <8 x i32> ; %r = tail call <8 x i32> @llvm.x86.avx512.pternlog.d.256(<8 x i32> , <8 x i32> , <8 x i32> , i32 224) ret <8 x i32> %r @@ -4339,8 +4109,7 @@ define <8 x i64> @vpternlog_q_constv512_imm225() { ; CHECK-LABEL: @vpternlog_q_constv512_imm225( -; CHECK-NEXT: [[R:%.*]] = tail call <8 x i64> @llvm.x86.avx512.pternlog.q.512(<8 x i64> , <8 x i64> , <8 x i64> , i32 225) -; CHECK-NEXT: ret <8 x i64> [[R]] +; CHECK-NEXT: ret <8 x i64> ; %r = tail call <8 x i64> @llvm.x86.avx512.pternlog.q.512(<8 x i64> , <8 x i64> , <8 x i64> , i32 225) ret <8 x i64> %r @@ -4348,8 +4117,7 @@ define <4 x i32> @vpternlog_d_constv128_imm226() { ; CHECK-LABEL: @vpternlog_d_constv128_imm226( -; CHECK-NEXT: [[R:%.*]] = tail call <4 x i32> @llvm.x86.avx512.pternlog.d.128(<4 x i32> , <4 x i32> , <4 x i32> , i32 226) -; CHECK-NEXT: ret <4 x i32> [[R]] +; CHECK-NEXT: ret <4 x i32> ; %r = tail call <4 x i32> @llvm.x86.avx512.pternlog.d.128(<4 x i32> , <4 x i32> , <4 x i32> , i32 226) ret <4 x i32> %r @@ -4357,8 +4125,7 @@ define <4 x i64> @vpternlog_q_constv256_imm227() { ; CHECK-LABEL: @vpternlog_q_constv256_imm227( -; CHECK-NEXT: [[R:%.*]] = tail call <4 x i64> @llvm.x86.avx512.pternlog.q.256(<4 x i64> , <4 x i64> , <4 x i64> , i32 227) -; CHECK-NEXT: ret <4 x i64> [[R]] +; CHECK-NEXT: ret <4 x i64> ; %r = tail call <4 x i64> @llvm.x86.avx512.pternlog.q.256(<4 x i64> , <4 x i64> , <4 x i64> , i32 227) ret <4 x i64> %r @@ -4366,8 +4133,7 @@ define <16 x i32> @vpternlog_d_constv512_imm228() { ; CHECK-LABEL: @vpternlog_d_constv512_imm228( -; CHECK-NEXT: [[R:%.*]] = tail call <16 x i32> @llvm.x86.avx512.pternlog.d.512(<16 x i32> , <16 x i32> , <16 x i32> , i32 228) -; CHECK-NEXT: ret <16 x i32> [[R]] +; CHECK-NEXT: ret <16 x i32> ; %r = tail call <16 x i32> @llvm.x86.avx512.pternlog.d.512(<16 x i32> , <16 x i32> , <16 x i32> , i32 228) ret <16 x i32> %r @@ -4375,8 +4141,7 @@ define <2 x i64> @vpternlog_q_constv128_imm229() { ; CHECK-LABEL: @vpternlog_q_constv128_imm229( -; CHECK-NEXT: [[R:%.*]] = tail call <2 x i64> @llvm.x86.avx512.pternlog.q.128(<2 x i64> , <2 x i64> , <2 x i64> zeroinitializer, i32 229) -; CHECK-NEXT: ret <2 x i64> [[R]] +; CHECK-NEXT: ret <2 x i64> ; %r = tail call <2 x i64> @llvm.x86.avx512.pternlog.q.128(<2 x i64> , <2 x i64> , <2 x i64> , i32 229) ret <2 x i64> %r @@ -4384,8 +4149,7 @@ define <8 x i32> @vpternlog_d_constv256_imm230() { ; CHECK-LABEL: @vpternlog_d_constv256_imm230( -; CHECK-NEXT: [[R:%.*]] = tail call <8 x i32> @llvm.x86.avx512.pternlog.d.256(<8 x i32> , <8 x i32> , <8 x i32> , i32 230) -; CHECK-NEXT: ret <8 x i32> [[R]] +; CHECK-NEXT: ret <8 x i32> ; %r = tail call <8 x i32> @llvm.x86.avx512.pternlog.d.256(<8 x i32> , <8 x i32> , <8 x i32> , i32 230) ret <8 x i32> %r @@ -4393,8 +4157,7 @@ define <8 x i64> @vpternlog_q_constv512_imm231() { ; CHECK-LABEL: @vpternlog_q_constv512_imm231( -; CHECK-NEXT: [[R:%.*]] = tail call <8 x i64> @llvm.x86.avx512.pternlog.q.512(<8 x i64> , <8 x i64> , <8 x i64> , i32 231) -; CHECK-NEXT: ret <8 x i64> [[R]] +; CHECK-NEXT: ret <8 x i64> ; %r = tail call <8 x i64> @llvm.x86.avx512.pternlog.q.512(<8 x i64> , <8 x i64> , <8 x i64> , i32 231) ret <8 x i64> %r @@ -4402,8 +4165,7 @@ define <4 x i32> @vpternlog_d_constv128_imm232() { ; CHECK-LABEL: @vpternlog_d_constv128_imm232( -; CHECK-NEXT: [[R:%.*]] = tail call <4 x i32> @llvm.x86.avx512.pternlog.d.128(<4 x i32> , <4 x i32> , <4 x i32> , i32 232) -; CHECK-NEXT: ret <4 x i32> [[R]] +; CHECK-NEXT: ret <4 x i32> ; %r = tail call <4 x i32> @llvm.x86.avx512.pternlog.d.128(<4 x i32> , <4 x i32> , <4 x i32> , i32 232) ret <4 x i32> %r @@ -4411,8 +4173,7 @@ define <4 x i64> @vpternlog_q_constv256_imm233() { ; CHECK-LABEL: @vpternlog_q_constv256_imm233( -; CHECK-NEXT: [[R:%.*]] = tail call <4 x i64> @llvm.x86.avx512.pternlog.q.256(<4 x i64> , <4 x i64> , <4 x i64> , i32 233) -; CHECK-NEXT: ret <4 x i64> [[R]] +; CHECK-NEXT: ret <4 x i64> ; %r = tail call <4 x i64> @llvm.x86.avx512.pternlog.q.256(<4 x i64> , <4 x i64> , <4 x i64> , i32 233) ret <4 x i64> %r @@ -4420,8 +4181,7 @@ define <16 x i32> @vpternlog_d_constv512_imm234() { ; CHECK-LABEL: @vpternlog_d_constv512_imm234( -; CHECK-NEXT: [[R:%.*]] = tail call <16 x i32> @llvm.x86.avx512.pternlog.d.512(<16 x i32> , <16 x i32> , <16 x i32> , i32 234) -; CHECK-NEXT: ret <16 x i32> [[R]] +; CHECK-NEXT: ret <16 x i32> ; %r = tail call <16 x i32> @llvm.x86.avx512.pternlog.d.512(<16 x i32> , <16 x i32> , <16 x i32> , i32 234) ret <16 x i32> %r @@ -4429,8 +4189,7 @@ define <2 x i64> @vpternlog_q_constv128_imm235() { ; CHECK-LABEL: @vpternlog_q_constv128_imm235( -; CHECK-NEXT: [[R:%.*]] = tail call <2 x i64> @llvm.x86.avx512.pternlog.q.128(<2 x i64> , <2 x i64> , <2 x i64> , i32 235) -; CHECK-NEXT: ret <2 x i64> [[R]] +; CHECK-NEXT: ret <2 x i64> ; %r = tail call <2 x i64> @llvm.x86.avx512.pternlog.q.128(<2 x i64> , <2 x i64> , <2 x i64> , i32 235) ret <2 x i64> %r @@ -4438,8 +4197,7 @@ define <8 x i32> @vpternlog_d_constv256_imm236() { ; CHECK-LABEL: @vpternlog_d_constv256_imm236( -; CHECK-NEXT: [[R:%.*]] = tail call <8 x i32> @llvm.x86.avx512.pternlog.d.256(<8 x i32> , <8 x i32> , <8 x i32> , i32 236) -; CHECK-NEXT: ret <8 x i32> [[R]] +; CHECK-NEXT: ret <8 x i32> ; %r = tail call <8 x i32> @llvm.x86.avx512.pternlog.d.256(<8 x i32> , <8 x i32> , <8 x i32> , i32 236) ret <8 x i32> %r @@ -4447,8 +4205,7 @@ define <8 x i64> @vpternlog_q_constv512_imm237() { ; CHECK-LABEL: @vpternlog_q_constv512_imm237( -; CHECK-NEXT: [[R:%.*]] = tail call <8 x i64> @llvm.x86.avx512.pternlog.q.512(<8 x i64> , <8 x i64> , <8 x i64> , i32 237) -; CHECK-NEXT: ret <8 x i64> [[R]] +; CHECK-NEXT: ret <8 x i64> ; %r = tail call <8 x i64> @llvm.x86.avx512.pternlog.q.512(<8 x i64> , <8 x i64> , <8 x i64> , i32 237) ret <8 x i64> %r @@ -4456,8 +4213,7 @@ define <4 x i32> @vpternlog_d_constv128_imm238() { ; CHECK-LABEL: @vpternlog_d_constv128_imm238( -; CHECK-NEXT: [[R:%.*]] = tail call <4 x i32> @llvm.x86.avx512.pternlog.d.128(<4 x i32> , <4 x i32> , <4 x i32> , i32 238) -; CHECK-NEXT: ret <4 x i32> [[R]] +; CHECK-NEXT: ret <4 x i32> ; %r = tail call <4 x i32> @llvm.x86.avx512.pternlog.d.128(<4 x i32> , <4 x i32> , <4 x i32> , i32 238) ret <4 x i32> %r @@ -4465,8 +4221,7 @@ define <4 x i64> @vpternlog_q_constv256_imm239() { ; CHECK-LABEL: @vpternlog_q_constv256_imm239( -; CHECK-NEXT: [[R:%.*]] = tail call <4 x i64> @llvm.x86.avx512.pternlog.q.256(<4 x i64> , <4 x i64> , <4 x i64> , i32 239) -; CHECK-NEXT: ret <4 x i64> [[R]] +; CHECK-NEXT: ret <4 x i64> ; %r = tail call <4 x i64> @llvm.x86.avx512.pternlog.q.256(<4 x i64> , <4 x i64> , <4 x i64> , i32 239) ret <4 x i64> %r @@ -4474,8 +4229,7 @@ define <16 x i32> @vpternlog_d_constv512_imm240() { ; CHECK-LABEL: @vpternlog_d_constv512_imm240( -; CHECK-NEXT: [[R:%.*]] = tail call <16 x i32> @llvm.x86.avx512.pternlog.d.512(<16 x i32> , <16 x i32> , <16 x i32> , i32 240) -; CHECK-NEXT: ret <16 x i32> [[R]] +; CHECK-NEXT: ret <16 x i32> ; %r = tail call <16 x i32> @llvm.x86.avx512.pternlog.d.512(<16 x i32> , <16 x i32> , <16 x i32> , i32 240) ret <16 x i32> %r @@ -4483,8 +4237,7 @@ define <2 x i64> @vpternlog_q_constv128_imm241() { ; CHECK-LABEL: @vpternlog_q_constv128_imm241( -; CHECK-NEXT: [[R:%.*]] = tail call <2 x i64> @llvm.x86.avx512.pternlog.q.128(<2 x i64> , <2 x i64> , <2 x i64> , i32 241) -; CHECK-NEXT: ret <2 x i64> [[R]] +; CHECK-NEXT: ret <2 x i64> ; %r = tail call <2 x i64> @llvm.x86.avx512.pternlog.q.128(<2 x i64> , <2 x i64> , <2 x i64> , i32 241) ret <2 x i64> %r @@ -4492,8 +4245,7 @@ define <8 x i32> @vpternlog_d_constv256_imm242() { ; CHECK-LABEL: @vpternlog_d_constv256_imm242( -; CHECK-NEXT: [[R:%.*]] = tail call <8 x i32> @llvm.x86.avx512.pternlog.d.256(<8 x i32> , <8 x i32> , <8 x i32> , i32 242) -; CHECK-NEXT: ret <8 x i32> [[R]] +; CHECK-NEXT: ret <8 x i32> ; %r = tail call <8 x i32> @llvm.x86.avx512.pternlog.d.256(<8 x i32> , <8 x i32> , <8 x i32> , i32 242) ret <8 x i32> %r @@ -4501,8 +4253,7 @@ define <8 x i64> @vpternlog_q_constv512_imm243() { ; CHECK-LABEL: @vpternlog_q_constv512_imm243( -; CHECK-NEXT: [[R:%.*]] = tail call <8 x i64> @llvm.x86.avx512.pternlog.q.512(<8 x i64> , <8 x i64> , <8 x i64> , i32 243) -; CHECK-NEXT: ret <8 x i64> [[R]] +; CHECK-NEXT: ret <8 x i64> ; %r = tail call <8 x i64> @llvm.x86.avx512.pternlog.q.512(<8 x i64> , <8 x i64> , <8 x i64> , i32 243) ret <8 x i64> %r @@ -4510,8 +4261,7 @@ define <4 x i32> @vpternlog_d_constv128_imm244() { ; CHECK-LABEL: @vpternlog_d_constv128_imm244( -; CHECK-NEXT: [[R:%.*]] = tail call <4 x i32> @llvm.x86.avx512.pternlog.d.128(<4 x i32> , <4 x i32> , <4 x i32> , i32 244) -; CHECK-NEXT: ret <4 x i32> [[R]] +; CHECK-NEXT: ret <4 x i32> ; %r = tail call <4 x i32> @llvm.x86.avx512.pternlog.d.128(<4 x i32> , <4 x i32> , <4 x i32> , i32 244) ret <4 x i32> %r @@ -4519,8 +4269,7 @@ define <4 x i64> @vpternlog_q_constv256_imm245() { ; CHECK-LABEL: @vpternlog_q_constv256_imm245( -; CHECK-NEXT: [[R:%.*]] = tail call <4 x i64> @llvm.x86.avx512.pternlog.q.256(<4 x i64> , <4 x i64> , <4 x i64> , i32 245) -; CHECK-NEXT: ret <4 x i64> [[R]] +; CHECK-NEXT: ret <4 x i64> ; %r = tail call <4 x i64> @llvm.x86.avx512.pternlog.q.256(<4 x i64> , <4 x i64> , <4 x i64> , i32 245) ret <4 x i64> %r @@ -4528,8 +4277,7 @@ define <16 x i32> @vpternlog_d_constv512_imm246() { ; CHECK-LABEL: @vpternlog_d_constv512_imm246( -; CHECK-NEXT: [[R:%.*]] = tail call <16 x i32> @llvm.x86.avx512.pternlog.d.512(<16 x i32> , <16 x i32> , <16 x i32> , i32 246) -; CHECK-NEXT: ret <16 x i32> [[R]] +; CHECK-NEXT: ret <16 x i32> ; %r = tail call <16 x i32> @llvm.x86.avx512.pternlog.d.512(<16 x i32> , <16 x i32> , <16 x i32> , i32 246) ret <16 x i32> %r @@ -4537,8 +4285,7 @@ define <2 x i64> @vpternlog_q_constv128_imm247() { ; CHECK-LABEL: @vpternlog_q_constv128_imm247( -; CHECK-NEXT: [[R:%.*]] = tail call <2 x i64> @llvm.x86.avx512.pternlog.q.128(<2 x i64> , <2 x i64> , <2 x i64> , i32 247) -; CHECK-NEXT: ret <2 x i64> [[R]] +; CHECK-NEXT: ret <2 x i64> ; %r = tail call <2 x i64> @llvm.x86.avx512.pternlog.q.128(<2 x i64> , <2 x i64> , <2 x i64> , i32 247) ret <2 x i64> %r @@ -4546,8 +4293,7 @@ define <8 x i32> @vpternlog_d_constv256_imm248() { ; CHECK-LABEL: @vpternlog_d_constv256_imm248( -; CHECK-NEXT: [[R:%.*]] = tail call <8 x i32> @llvm.x86.avx512.pternlog.d.256(<8 x i32> , <8 x i32> , <8 x i32> , i32 248) -; CHECK-NEXT: ret <8 x i32> [[R]] +; CHECK-NEXT: ret <8 x i32> ; %r = tail call <8 x i32> @llvm.x86.avx512.pternlog.d.256(<8 x i32> , <8 x i32> , <8 x i32> , i32 248) ret <8 x i32> %r @@ -4555,8 +4301,7 @@ define <8 x i64> @vpternlog_q_constv512_imm249() { ; CHECK-LABEL: @vpternlog_q_constv512_imm249( -; CHECK-NEXT: [[R:%.*]] = tail call <8 x i64> @llvm.x86.avx512.pternlog.q.512(<8 x i64> , <8 x i64> , <8 x i64> , i32 249) -; CHECK-NEXT: ret <8 x i64> [[R]] +; CHECK-NEXT: ret <8 x i64> ; %r = tail call <8 x i64> @llvm.x86.avx512.pternlog.q.512(<8 x i64> , <8 x i64> , <8 x i64> , i32 249) ret <8 x i64> %r @@ -4564,8 +4309,7 @@ define <4 x i32> @vpternlog_d_constv128_imm250() { ; CHECK-LABEL: @vpternlog_d_constv128_imm250( -; CHECK-NEXT: [[R:%.*]] = tail call <4 x i32> @llvm.x86.avx512.pternlog.d.128(<4 x i32> , <4 x i32> , <4 x i32> , i32 250) -; CHECK-NEXT: ret <4 x i32> [[R]] +; CHECK-NEXT: ret <4 x i32> ; %r = tail call <4 x i32> @llvm.x86.avx512.pternlog.d.128(<4 x i32> , <4 x i32> , <4 x i32> , i32 250) ret <4 x i32> %r @@ -4573,8 +4317,7 @@ define <4 x i64> @vpternlog_q_constv256_imm251() { ; CHECK-LABEL: @vpternlog_q_constv256_imm251( -; CHECK-NEXT: [[R:%.*]] = tail call <4 x i64> @llvm.x86.avx512.pternlog.q.256(<4 x i64> , <4 x i64> , <4 x i64> , i32 251) -; CHECK-NEXT: ret <4 x i64> [[R]] +; CHECK-NEXT: ret <4 x i64> ; %r = tail call <4 x i64> @llvm.x86.avx512.pternlog.q.256(<4 x i64> , <4 x i64> , <4 x i64> , i32 251) ret <4 x i64> %r @@ -4582,8 +4325,7 @@ define <16 x i32> @vpternlog_d_constv512_imm252() { ; CHECK-LABEL: @vpternlog_d_constv512_imm252( -; CHECK-NEXT: [[R:%.*]] = tail call <16 x i32> @llvm.x86.avx512.pternlog.d.512(<16 x i32> , <16 x i32> , <16 x i32> , i32 252) -; CHECK-NEXT: ret <16 x i32> [[R]] +; CHECK-NEXT: ret <16 x i32> ; %r = tail call <16 x i32> @llvm.x86.avx512.pternlog.d.512(<16 x i32> , <16 x i32> , <16 x i32> , i32 252) ret <16 x i32> %r @@ -4591,8 +4333,7 @@ define <2 x i64> @vpternlog_q_constv128_imm253() { ; CHECK-LABEL: @vpternlog_q_constv128_imm253( -; CHECK-NEXT: [[R:%.*]] = tail call <2 x i64> @llvm.x86.avx512.pternlog.q.128(<2 x i64> , <2 x i64> , <2 x i64> , i32 253) -; CHECK-NEXT: ret <2 x i64> [[R]] +; CHECK-NEXT: ret <2 x i64> ; %r = tail call <2 x i64> @llvm.x86.avx512.pternlog.q.128(<2 x i64> , <2 x i64> , <2 x i64> , i32 253) ret <2 x i64> %r @@ -4600,8 +4341,7 @@ define <8 x i32> @vpternlog_d_constv256_imm254() { ; CHECK-LABEL: @vpternlog_d_constv256_imm254( -; CHECK-NEXT: [[R:%.*]] = tail call <8 x i32> @llvm.x86.avx512.pternlog.d.256(<8 x i32> , <8 x i32> , <8 x i32> , i32 254) -; CHECK-NEXT: ret <8 x i32> [[R]] +; CHECK-NEXT: ret <8 x i32> ; %r = tail call <8 x i32> @llvm.x86.avx512.pternlog.d.256(<8 x i32> , <8 x i32> , <8 x i32> , i32 254) ret <8 x i32> %r @@ -4609,8 +4349,7 @@ define <8 x i64> @vpternlog_q_constv512_imm255() { ; CHECK-LABEL: @vpternlog_q_constv512_imm255( -; CHECK-NEXT: [[R:%.*]] = tail call <8 x i64> @llvm.x86.avx512.pternlog.q.512(<8 x i64> , <8 x i64> , <8 x i64> , i32 255) -; CHECK-NEXT: ret <8 x i64> [[R]] +; CHECK-NEXT: ret <8 x i64> ; %r = tail call <8 x i64> @llvm.x86.avx512.pternlog.q.512(<8 x i64> , <8 x i64> , <8 x i64> , i32 255) ret <8 x i64> %r diff --git a/llvm/unittests/Target/X86/CMakeLists.txt b/llvm/unittests/Target/X86/CMakeLists.txt --- a/llvm/unittests/Target/X86/CMakeLists.txt +++ b/llvm/unittests/Target/X86/CMakeLists.txt @@ -5,8 +5,11 @@ set(LLVM_LINK_COMPONENTS Analysis + AsmParser CodeGen Core + Passes + InstCombine MC MIRParser Support @@ -19,6 +22,7 @@ add_llvm_unittest(X86Tests MachineSizeOptsTest.cpp + TernlogTest.cpp ) set_property(TARGET X86Tests PROPERTY FOLDER "Tests/UnitTests/TargetTests") diff --git a/llvm/unittests/Target/X86/TernlogTest.cpp b/llvm/unittests/Target/X86/TernlogTest.cpp new file mode 100644 --- /dev/null +++ b/llvm/unittests/Target/X86/TernlogTest.cpp @@ -0,0 +1,201 @@ +//===- LICMTest.cpp - LICM unit tests -------------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#include "llvm/Analysis/TargetTransformInfo.h" +#include "llvm/AsmParser/Parser.h" +#include "llvm/MC/TargetRegistry.h" +#include "llvm/Passes/PassBuilder.h" +#include "llvm/Support/TargetSelect.h" +#include "llvm/Target/TargetMachine.h" +#include "llvm/Transforms/InstCombine/InstCombine.h" + +#include "gtest/gtest.h" + +#include + +namespace llvm { +static std::unique_ptr initTM() { + LLVMInitializeX86TargetInfo(); + LLVMInitializeX86Target(); + LLVMInitializeX86TargetMC(); + + auto TT(Triple::normalize("x86_64--")); + std::string Error; + const Target *TheTarget = TargetRegistry::lookupTarget(TT, Error); + return std::unique_ptr(static_cast( + TheTarget->createTargetMachine(TT, "", "", TargetOptions(), std::nullopt, + std::nullopt, CodeGenOpt::Default))); +} + +struct TernTester { + unsigned NElem; + unsigned ElemWidth; + std::mt19937_64 Rng; + unsigned ImmVal; + SmallVector VecElems[3]; + + void updateImm(uint8_t NewImmVal) { ImmVal = NewImmVal; } + void updateNElem(unsigned NewNElem) { + NElem = NewNElem; + for (unsigned I = 0; I < 3; ++I) { + VecElems[I].resize(NElem); + } + } + void updateElemWidth(unsigned NewElemWidth) { + ElemWidth = NewElemWidth; + assert(ElemWidth == 32 || ElemWidth == 64); + } + + uint64_t getElemMask() const { + return (~uint64_t(0)) >> ((ElemWidth - 0) % 64); + } + + void RandomizeVecArgs() { + uint64_t ElemMask = getElemMask(); + for (unsigned I = 0; I < 3; ++I) { + for (unsigned J = 0; J < NElem; ++J) { + VecElems[I][J] = Rng() & ElemMask; + } + } + } + + std::pair getScalarInfo() const { + switch (ElemWidth) { + case 32: + return {"i32", "d"}; + case 64: + return {"i64", "q"}; + default: + llvm_unreachable("Invalid ElemWidth"); + } + } + std::string getScalarType() const { return getScalarInfo().first; } + std::string getScalarExt() const { return getScalarInfo().second; } + std::string getVecType() const { + return "<" + Twine(NElem).str() + " x " + getScalarType() + ">"; + }; + + std::string getVecWidth() const { return Twine(NElem * ElemWidth).str(); } + std::string getFunctionName() const { + return "@llvm.x86.avx512.pternlog." + getScalarExt() + "." + getVecWidth(); + } + std::string getFunctionDecl() const { + return "declare " + getVecType() + getFunctionName() + "(" + getVecType() + + ", " + getVecType() + ", " + getVecType() + ", " + "i32 immarg)"; + } + + std::string getVecN(unsigned N) const { + assert(N < 3); + std::string VecStr = getVecType() + " <"; + for (unsigned I = 0; I < VecElems[N].size(); ++I) { + if (I != 0) + VecStr += ", "; + VecStr += getScalarType() + " " + Twine(VecElems[N][I]).str(); + } + return VecStr + ">"; + } + std::string getFunctionCall() const { + return "tail call " + getVecType() + " " + getFunctionName() + "(" + + getVecN(0) + ", " + getVecN(1) + ", " + getVecN(2) + ", " + "i32 " + + Twine(ImmVal).str() + ")"; + } + + std::string getTestText() const { + return getFunctionDecl() + "\ndefine " + getVecType() + + "@foo() {\n%r = " + getFunctionCall() + "\nret " + getVecType() + + " %r\n}\n"; + } + + void checkResult(const Value *V) { + auto GetValElem = [&](unsigned Idx) -> uint64_t { + if (auto *CV = dyn_cast(V)) + return CV->getElementAsInteger(Idx); + + auto *C = dyn_cast(V); + assert(C); + if (C->isNullValue()) + return 0; + if (C->isAllOnesValue()) + return ((~uint64_t(0)) >> (ElemWidth % 64)); + if (C->isOneValue()) + return 1; + + llvm_unreachable("Unknown constant type"); + }; + + auto ComputeBit = [&](uint64_t A, uint64_t B, uint64_t C) -> uint64_t { + unsigned BitIdx = ((A & 1) << 2) | ((B & 1) << 1) | (C & 1); + return (ImmVal >> BitIdx) & 1; + }; + + for (unsigned I = 0; I < NElem; ++I) { + uint64_t Expec = 0; + uint64_t AEle = VecElems[0][I]; + uint64_t BEle = VecElems[1][I]; + uint64_t CEle = VecElems[2][I]; + for (unsigned J = 0; J < ElemWidth; ++J) { + Expec |= ComputeBit(AEle >> J, BEle >> J, CEle >> J) << J; + } + + ASSERT_EQ(Expec, GetValElem(I)); + } + } + + void check(LLVMContext &Ctx, FunctionPassManager &FPM, + FunctionAnalysisManager &FAM) { + SMDiagnostic Error; + std::unique_ptr M = parseAssemblyString(getTestText(), Error, Ctx); + ASSERT_TRUE(M); + Function *F = M->getFunction("foo"); + ASSERT_TRUE(F); + ASSERT_EQ(F->getInstructionCount(), 2u); + FPM.run(*F, FAM); + ASSERT_EQ(F->getInstructionCount(), 1u); + ASSERT_EQ(F->size(), 1u); + const Instruction *I = F->begin()->getTerminator(); + ASSERT_TRUE(I); + ASSERT_EQ(I->getNumOperands(), 1u); + checkResult(I->getOperand(0)); + } +}; + +TEST(TernlogTest, TestConstantFolding) { + LLVMContext Ctx; + FunctionAnalysisManager FAM; + FunctionPassManager FPM; + PassBuilder PB; + LoopAnalysisManager LAM; + CGSCCAnalysisManager CGAM; + ModuleAnalysisManager MAM; + TargetIRAnalysis TIRA = TargetIRAnalysis( + [&](const Function &F) { return initTM()->getTargetTransformInfo(F); }); + + FAM.registerPass([&] { return TIRA; }); + PB.registerModuleAnalyses(MAM); + PB.registerCGSCCAnalyses(CGAM); + PB.registerFunctionAnalyses(FAM); + PB.registerLoopAnalyses(LAM); + PB.crossRegisterProxies(LAM, FAM, CGAM, MAM); + + FPM.addPass(InstCombinePass()); + TernTester TT; + for (unsigned NElem = 2; NElem < 16; NElem += NElem) { + TT.updateNElem(NElem); + for (unsigned ElemWidth = 32; ElemWidth <= 64; ElemWidth += ElemWidth) { + if (ElemWidth * NElem > 512 || ElemWidth * NElem < 128) + continue; + TT.updateElemWidth(ElemWidth); + TT.RandomizeVecArgs(); + for (unsigned Imm = 0; Imm < 256; ++Imm) { + TT.updateImm(Imm); + TT.check(Ctx, FPM, FAM); + } + } + } +} +} // namespace llvm