Index: include/llvm/IR/IntrinsicsX86.td =================================================================== --- include/llvm/IR/IntrinsicsX86.td +++ include/llvm/IR/IntrinsicsX86.td @@ -6421,3 +6421,37 @@ def int_x86_clzero : GCCBuiltin<"__builtin_ia32_clzero">, Intrinsic<[], [llvm_ptr_ty], []>; } + +//===----------------------------------------------------------------------===// +// Platform Configuration +let TargetPrefix = "x86" in { + def int_x86_pconfig_32 : + Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], + [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], []>; + def int_x86_pconfig_64 : + Intrinsic<[llvm_i32_ty, llvm_i64_ty, llvm_i64_ty, llvm_i64_ty], + [llvm_i32_ty, llvm_i64_ty, llvm_i64_ty, llvm_i64_ty], []>; +} + +//===----------------------------------------------------------------------===// +// SGX Intrinsics +let TargetPrefix = "x86" in { + def int_x86_encls_32 : + Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], + [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], []>; + def int_x86_encls_64 : + Intrinsic<[llvm_i32_ty, llvm_i64_ty, llvm_i64_ty, llvm_i64_ty], + [llvm_i32_ty, llvm_i64_ty, llvm_i64_ty, llvm_i64_ty], []>; + def int_x86_enclu_32 : + Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], + [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], []>; + def int_x86_enclu_64 : + Intrinsic<[llvm_i32_ty, llvm_i64_ty, llvm_i64_ty, llvm_i64_ty], + [llvm_i32_ty, llvm_i64_ty, llvm_i64_ty, llvm_i64_ty], []>; + def int_x86_enclv_32 : + Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], + [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], []>; + def int_x86_enclv_64 : + Intrinsic<[llvm_i32_ty, llvm_i64_ty, llvm_i64_ty, llvm_i64_ty], + [llvm_i32_ty, llvm_i64_ty, llvm_i64_ty, llvm_i64_ty], []>; +} Index: lib/Support/Host.cpp =================================================================== --- lib/Support/Host.cpp +++ lib/Support/Host.cpp @@ -1257,6 +1257,18 @@ Features["ibt"] = HasLeaf7 && ((EDX >> 20) & 1); + // There are two CPUID leafs which information associated with the pconfig + // instruction: + // EAX=0x7, ECX=0x0 indicates the availability of the instruction (via the 18th + // bit of EDX), while the EAX=0x1b leaf returns information on the + // availability of specific pconfig leafs. + // The target feature here only refers to the the first of these two. + // Users might need to check for the availability of specific pconfig + // leaves using cpuid, since that information is ignored while + // detecting features using the "-mnative" flag. + // For more info, see X86 ISA docs. + Features["pconfig"] = HasLeaf7 && ((EDX >> 18) & 1); + bool HasLeafD = MaxLevel >= 0xd && !getX86CpuIDAndInfoEx(0xd, 0x1, &EAX, &EBX, &ECX, &EDX); Index: lib/Target/X86/X86.td =================================================================== --- lib/Target/X86/X86.td +++ lib/Target/X86/X86.td @@ -274,6 +274,8 @@ def FeatureLZCNTFalseDeps : SubtargetFeature<"false-deps-lzcnt-tzcnt", "HasLZCNTFalseDeps", "true", "LZCNT/TZCNT have a false dependency on dest register">; +def FeaturePCONFIG : SubtargetFeature<"pconfig", "HasPCONFIG", "true", + "platform configuration instruction">; // On recent X86 (port bound) processors, its preferable to combine to a single shuffle // using a variable mask over multiple fixed shuffles. def FeatureFastVariableShuffle Index: lib/Target/X86/X86ISelLowering.h =================================================================== --- lib/Target/X86/X86ISelLowering.h +++ lib/Target/X86/X86ISelLowering.h @@ -564,6 +564,14 @@ // Get a random integer and indicate whether it is valid in CF. RDRAND, + // Platform Configuration + PCONFIG, + + // SGX Intrinsics + ENCLS, + ENCLU, + ENCLV, + // Get a NIST SP800-90B & C compliant random integer and // indicate whether it is valid in CF. RDSEED, Index: lib/Target/X86/X86ISelLowering.cpp =================================================================== --- lib/Target/X86/X86ISelLowering.cpp +++ lib/Target/X86/X86ISelLowering.cpp @@ -21142,6 +21142,32 @@ return SDValue(Res, 0); } +static SDValue getInstrWFourImplicitOps(SDValue Op, SelectionDAG &DAG, + const X86Subtarget &Subtarget, + unsigned Opcode) { + SDLoc dl(Op); + SDVTList Tys = DAG.getVTList(MVT::Other, MVT::Glue); + SDValue Chain = Op.getOperand(0); + unsigned ArgReg1 = Subtarget.is64Bit() ? X86::RBX : X86::EBX; + unsigned ArgReg2 = Subtarget.is64Bit() ? X86::RCX : X86::ECX; + unsigned ArgReg3 = Subtarget.is64Bit() ? X86::RDX : X86::EDX; + auto ArgVT = Subtarget.is64Bit() ? MVT::i64 : MVT::i32; + + Chain = DAG.getCopyToReg(Chain, dl, X86::EAX, Op->getOperand(2)); + Chain = DAG.getCopyToReg(Chain, dl, ArgReg1, Op->getOperand(3)); + Chain = DAG.getCopyToReg(Chain, dl, ArgReg2, Op->getOperand(4)); + Chain = DAG.getCopyToReg(Chain, dl, ArgReg3, Op->getOperand(5)); + + SDNode *Res = DAG.getMachineNode(Opcode, dl, Tys, Chain); + + SDValue a = DAG.getCopyFromReg(SDValue(Res, 0), dl, X86::EAX, MVT::i32, SDValue(Res, 1)); + SDValue b = DAG.getCopyFromReg(a.getValue(1), dl, ArgReg1, ArgVT, a.getValue(2)); + SDValue c = DAG.getCopyFromReg(b.getValue(1), dl, ArgReg2, ArgVT, b.getValue(2)); + SDValue d = DAG.getCopyFromReg(c.getValue(1), dl, ArgReg3, ArgVT, c.getValue(2)); + + return DAG.getMergeValues({a, b, c, d, SDValue(Res, 0)}, dl); +} + /// Handles the lowering of builtin intrinsic that return the value /// of the extended control register. static void getExtendedControlRegister(SDNode *N, const SDLoc &DL, @@ -21421,6 +21447,17 @@ return DAG.getNode(ISD::MERGE_VALUES, dl, Op->getVTList(), Result, isValid, SDValue(Result.getNode(), 2)); } + + case PCONFIG: + return getInstrWFourImplicitOps(Op, DAG, Subtarget, X86::PCONFIG); + + case ENCLS: + return getInstrWFourImplicitOps(Op, DAG, Subtarget, X86::ENCLS); + case ENCLU: + return getInstrWFourImplicitOps(Op, DAG, Subtarget, X86::ENCLU); + case ENCLV: + return getInstrWFourImplicitOps(Op, DAG, Subtarget, X86::ENCLV); + case GATHER_AVX2: { SDValue Chain = Op.getOperand(0); SDValue Src = Op.getOperand(2); @@ -25520,6 +25557,10 @@ const char *X86TargetLowering::getTargetNodeName(unsigned Opcode) const { switch ((X86ISD::NodeType)Opcode) { case X86ISD::FIRST_NUMBER: break; + case X86ISD::ENCLS: return "X86ISD::ENCLS"; + case X86ISD::ENCLU: return "X86ISD::ENCLU"; + case X86ISD::ENCLV: return "X86ISD::ENCLV"; + case X86ISD::PCONFIG: return "X86ISD::PCONFIG"; case X86ISD::BSF: return "X86ISD::BSF"; case X86ISD::BSR: return "X86ISD::BSR"; case X86ISD::SHLD: return "X86ISD::SHLD"; Index: lib/Target/X86/X86InstrInfo.td =================================================================== --- lib/Target/X86/X86InstrInfo.td +++ lib/Target/X86/X86InstrInfo.td @@ -864,6 +864,7 @@ def HasRTM : Predicate<"Subtarget->hasRTM()">; def HasADX : Predicate<"Subtarget->hasADX()">; def HasSHA : Predicate<"Subtarget->hasSHA()">; +def HasSGX : Predicate<"Subtarget->hasSGX()">; def HasPRFCHW : Predicate<"Subtarget->hasPRFCHW()">; def HasRDSEED : Predicate<"Subtarget->hasRDSEED()">; def HasSSEPrefetch : Predicate<"Subtarget->hasSSEPrefetch()">; @@ -882,6 +883,7 @@ def HasCLWB : Predicate<"Subtarget->hasCLWB()">; def HasRDPID : Predicate<"Subtarget->hasRDPID()">; def HasCmpxchg16b: Predicate<"Subtarget->hasCmpxchg16b()">; +def HasPCONFIG : Predicate<"Subtarget->hasPCONFIG()">; def Not64BitMode : Predicate<"!Subtarget->is64Bit()">, AssemblerPredicate<"!Mode64Bit", "Not 64-bit mode">; def In64BitMode : Predicate<"Subtarget->is64Bit()">, Index: lib/Target/X86/X86InstrSGX.td =================================================================== --- lib/Target/X86/X86InstrSGX.td +++ lib/Target/X86/X86InstrSGX.td @@ -15,7 +15,7 @@ //===----------------------------------------------------------------------===// // SGX instructions -let SchedRW = [WriteSystem] in { +let SchedRW = [WriteSystem], Predicates = [HasSGX] in { // ENCLS - Execute an Enclave System Function of Specified Leaf Number def ENCLS : I<0x01, MRM_CF, (outs), (ins), "encls", []>, TB; @@ -23,4 +23,8 @@ // ENCLU - Execute an Enclave User Function of Specified Leaf Number def ENCLU : I<0x01, MRM_D7, (outs), (ins), "enclu", []>, TB; + +// ENCLV - Execute an Enclave VMM Function of Specified Leaf Number +def ENCLV : I<0x01, MRM_C0, (outs), (ins), + "enclv", []>, TB; } // SchedRW Index: lib/Target/X86/X86InstrSystem.td =================================================================== --- lib/Target/X86/X86InstrSystem.td +++ lib/Target/X86/X86InstrSystem.td @@ -734,3 +734,20 @@ "ptwrite{q}\t$dst", [], IIC_PTWRITE>, XS, Requires<[In64BitMode]>; } // SchedRW + +//===----------------------------------------------------------------------===// +// Platform Configuration instruction + +// From ISA docs: +// "This instruction is used to execute functions for configuring platform +// features. +// EAX: Leaf function to be invoked. +// RBX/RCX/RDX: Leaf-specific purpose." +// "Successful execution of the leaf clears RAX (set to zero) and ZF, CF, PF, +// AF, OF, and SF are cleared. In case of failure, the failure reason is +// indicated in RAX with ZF set to 1 and CF, PF, AF, OF, and SF are cleared." +// Thus all these mentioned registers are considered clobbered. + +let Uses = [RAX, RBX, RCX, RDX], Defs = [RAX, RBX, RCX, RDX, EFLAGS] in + def PCONFIG : I<0x01, MRM_C5, (outs), (ins), "pconfig", []>, TB, + Requires<[HasPCONFIG]>; Index: lib/Target/X86/X86IntrinsicsInfo.h =================================================================== --- lib/Target/X86/X86IntrinsicsInfo.h +++ lib/Target/X86/X86IntrinsicsInfo.h @@ -38,7 +38,8 @@ EXPAND_FROM_MEM, TERLOG_OP_MASK, TERLOG_OP_MASKZ, FIXUPIMM, FIXUPIMM_MASKZ, FIXUPIMMS, FIXUPIMMS_MASKZ, GATHER_AVX2, - ROUNDP, ROUNDS + ROUNDP, ROUNDS, + PCONFIG, ENCLS, ENCLU, ENCLV }; struct IntrinsicData { @@ -333,6 +334,14 @@ X86_INTRINSIC_DATA(avx512_scattersiv4_si, SCATTER, X86::VPSCATTERDDZ128mr, 0), X86_INTRINSIC_DATA(avx512_scattersiv8_sf, SCATTER, X86::VSCATTERDPSZ256mr, 0), X86_INTRINSIC_DATA(avx512_scattersiv8_si, SCATTER, X86::VPSCATTERDDZ256mr, 0), + X86_INTRINSIC_DATA(encls_32, ENCLS, X86ISD::ENCLS, 0), + X86_INTRINSIC_DATA(encls_64, ENCLS, X86ISD::ENCLS, 0), + X86_INTRINSIC_DATA(enclu_32, ENCLU, X86ISD::ENCLU, 0), + X86_INTRINSIC_DATA(enclu_64, ENCLU, X86ISD::ENCLU, 0), + X86_INTRINSIC_DATA(enclv_32, ENCLV, X86ISD::ENCLV, 0), + X86_INTRINSIC_DATA(enclv_64, ENCLV, X86ISD::ENCLV, 0), + X86_INTRINSIC_DATA(pconfig_32, PCONFIG, X86ISD::PCONFIG, 0), + X86_INTRINSIC_DATA(pconfig_64, PCONFIG, X86ISD::PCONFIG, 0), X86_INTRINSIC_DATA(rdpmc, RDPMC, X86ISD::RDPMC_DAG, 0), X86_INTRINSIC_DATA(rdrand_16, RDRAND, X86ISD::RDRAND, 0), X86_INTRINSIC_DATA(rdrand_32, RDRAND, X86ISD::RDRAND, 0), Index: lib/Target/X86/X86Subtarget.h =================================================================== --- lib/Target/X86/X86Subtarget.h +++ lib/Target/X86/X86Subtarget.h @@ -362,6 +362,9 @@ /// Processor support RDPID instruction bool HasRDPID; + /// Processor supports PCONFIG instruction + bool HasPCONFIG; + /// Use a retpoline thunk rather than indirect calls to block speculative /// execution. bool UseRetpoline; @@ -621,6 +624,8 @@ bool hasCLFLUSHOPT() const { return HasCLFLUSHOPT; } bool hasCLWB() const { return HasCLWB; } bool hasRDPID() const { return HasRDPID; } + bool hasPCONFIG() const { return HasPCONFIG; } + bool hasSGX() const { return HasSGX; } bool useRetpoline() const { return UseRetpoline; } bool useRetpolineExternalThunk() const { return UseRetpolineExternalThunk; } Index: lib/Target/X86/X86Subtarget.cpp =================================================================== --- lib/Target/X86/X86Subtarget.cpp +++ lib/Target/X86/X86Subtarget.cpp @@ -322,6 +322,7 @@ HasSHSTK = false; HasIBT = false; HasSGX = false; + HasPCONFIG = false; HasCLFLUSHOPT = false; HasCLWB = false; HasRDPID = false; Index: test/CodeGen/X86/SGX32-intrinsics.ll =================================================================== --- /dev/null +++ test/CodeGen/X86/SGX32-intrinsics.ll @@ -0,0 +1,125 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+sgx | FileCheck %s + +; Function Attrs: nounwind +define i32 @test_encls(i32 %leaf, i32* nocapture %d) local_unnamed_addr #0 { +; CHECK-LABEL: test_encls: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: pushl %ebx +; CHECK-NEXT: .cfi_def_cfa_offset 8 +; CHECK-NEXT: pushl %esi +; CHECK-NEXT: .cfi_def_cfa_offset 12 +; CHECK-NEXT: .cfi_offset %esi, -12 +; CHECK-NEXT: .cfi_offset %ebx, -8 +; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax +; CHECK-NEXT: movl {{[0-9]+}}(%esp), %esi +; CHECK-NEXT: movl (%esi), %ebx +; CHECK-NEXT: movl 4(%esi), %ecx +; CHECK-NEXT: movl 8(%esi), %edx +; CHECK-NEXT: encls +; CHECK-NEXT: movl %ebx, (%esi) +; CHECK-NEXT: movl %ecx, 4(%esi) +; CHECK-NEXT: movl %edx, 8(%esi) +; CHECK-NEXT: popl %esi +; CHECK-NEXT: popl %ebx +; CHECK-NEXT: retl +entry: + %add.ptr.i = getelementptr inbounds i32, i32* %d, i32 1 + %add.ptr1.i = getelementptr inbounds i32, i32* %d, i32 2 + %0 = load i32, i32* %d, align 4 + %1 = load i32, i32* %add.ptr.i, align 4 + %2 = load i32, i32* %add.ptr1.i, align 4 + %3 = tail call { i32, i32, i32, i32 } @llvm.x86.encls.32(i32 %leaf, i32 %0, i32 %1, i32 %2) #1 + %4 = extractvalue { i32, i32, i32, i32 } %3, 1 + store i32 %4, i32* %d, align 4 + %5 = extractvalue { i32, i32, i32, i32 } %3, 2 + store i32 %5, i32* %add.ptr.i, align 4 + %6 = extractvalue { i32, i32, i32, i32 } %3, 3 + store i32 %6, i32* %add.ptr1.i, align 4 + %7 = extractvalue { i32, i32, i32, i32 } %3, 0 + ret i32 %7 +} + +declare { i32, i32, i32, i32 } @llvm.x86.encls.32(i32, i32, i32, i32) #1 + +; Function Attrs: nounwind +define i32 @test_enclu(i32 %leaf, i32* nocapture %d) local_unnamed_addr #0 { +; CHECK-LABEL: test_enclu: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: pushl %ebx +; CHECK-NEXT: .cfi_def_cfa_offset 8 +; CHECK-NEXT: pushl %esi +; CHECK-NEXT: .cfi_def_cfa_offset 12 +; CHECK-NEXT: .cfi_offset %esi, -12 +; CHECK-NEXT: .cfi_offset %ebx, -8 +; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax +; CHECK-NEXT: movl {{[0-9]+}}(%esp), %esi +; CHECK-NEXT: movl (%esi), %ebx +; CHECK-NEXT: movl 4(%esi), %ecx +; CHECK-NEXT: movl 8(%esi), %edx +; CHECK-NEXT: enclu +; CHECK-NEXT: movl %ebx, (%esi) +; CHECK-NEXT: movl %ecx, 4(%esi) +; CHECK-NEXT: movl %edx, 8(%esi) +; CHECK-NEXT: popl %esi +; CHECK-NEXT: popl %ebx +; CHECK-NEXT: retl +entry: + %add.ptr.i = getelementptr inbounds i32, i32* %d, i32 1 + %add.ptr1.i = getelementptr inbounds i32, i32* %d, i32 2 + %0 = load i32, i32* %d, align 4 + %1 = load i32, i32* %add.ptr.i, align 4 + %2 = load i32, i32* %add.ptr1.i, align 4 + %3 = tail call { i32, i32, i32, i32 } @llvm.x86.enclu.32(i32 %leaf, i32 %0, i32 %1, i32 %2) #1 + %4 = extractvalue { i32, i32, i32, i32 } %3, 1 + store i32 %4, i32* %d, align 4 + %5 = extractvalue { i32, i32, i32, i32 } %3, 2 + store i32 %5, i32* %add.ptr.i, align 4 + %6 = extractvalue { i32, i32, i32, i32 } %3, 3 + store i32 %6, i32* %add.ptr1.i, align 4 + %7 = extractvalue { i32, i32, i32, i32 } %3, 0 + ret i32 %7 +} + +declare { i32, i32, i32, i32 } @llvm.x86.enclu.32(i32, i32, i32, i32) #1 + +; Function Attrs: nounwind +define i32 @test_enclv(i32 %leaf, i32* nocapture %d) local_unnamed_addr #0 { +; CHECK-LABEL: test_enclv: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: pushl %ebx +; CHECK-NEXT: .cfi_def_cfa_offset 8 +; CHECK-NEXT: pushl %esi +; CHECK-NEXT: .cfi_def_cfa_offset 12 +; CHECK-NEXT: .cfi_offset %esi, -12 +; CHECK-NEXT: .cfi_offset %ebx, -8 +; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax +; CHECK-NEXT: movl {{[0-9]+}}(%esp), %esi +; CHECK-NEXT: movl (%esi), %ebx +; CHECK-NEXT: movl 4(%esi), %ecx +; CHECK-NEXT: movl 8(%esi), %edx +; CHECK-NEXT: enclv +; CHECK-NEXT: movl %ebx, (%esi) +; CHECK-NEXT: movl %ecx, 4(%esi) +; CHECK-NEXT: movl %edx, 8(%esi) +; CHECK-NEXT: popl %esi +; CHECK-NEXT: popl %ebx +; CHECK-NEXT: retl +entry: + %add.ptr.i = getelementptr inbounds i32, i32* %d, i32 1 + %add.ptr1.i = getelementptr inbounds i32, i32* %d, i32 2 + %0 = load i32, i32* %d, align 4 + %1 = load i32, i32* %add.ptr.i, align 4 + %2 = load i32, i32* %add.ptr1.i, align 4 + %3 = tail call { i32, i32, i32, i32 } @llvm.x86.enclv.32(i32 %leaf, i32 %0, i32 %1, i32 %2) #1 + %4 = extractvalue { i32, i32, i32, i32 } %3, 1 + store i32 %4, i32* %d, align 4 + %5 = extractvalue { i32, i32, i32, i32 } %3, 2 + store i32 %5, i32* %add.ptr.i, align 4 + %6 = extractvalue { i32, i32, i32, i32 } %3, 3 + store i32 %6, i32* %add.ptr1.i, align 4 + %7 = extractvalue { i32, i32, i32, i32 } %3, 0 + ret i32 %7 +} + +declare { i32, i32, i32, i32 } @llvm.x86.enclv.32(i32, i32, i32, i32) #1 Index: test/CodeGen/X86/SGX64-intrinsics.ll =================================================================== --- /dev/null +++ test/CodeGen/X86/SGX64-intrinsics.ll @@ -0,0 +1,106 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sgx | FileCheck %s + +define i32 @test_encls(i32 %leaf, i64* nocapture %arguments) local_unnamed_addr #0 { +; CHECK-LABEL: test_encls: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: pushq %rbx +; CHECK-NEXT: .cfi_def_cfa_offset 16 +; CHECK-NEXT: .cfi_offset %rbx, -16 +; CHECK-NEXT: movq (%rsi), %rbx +; CHECK-NEXT: movq 8(%rsi), %rcx +; CHECK-NEXT: movq 16(%rsi), %rdx +; CHECK-NEXT: encls +; CHECK-NEXT: movq %rbx, (%rsi) +; CHECK-NEXT: movq %rcx, 8(%rsi) +; CHECK-NEXT: movq %rdx, 16(%rsi) +; CHECK-NEXT: movl %edi, %eax +; CHECK-NEXT: popq %rbx +; CHECK-NEXT: retq +entry: + %add.ptr.i = getelementptr inbounds i64, i64* %arguments, i64 1 + %add.ptr1.i = getelementptr inbounds i64, i64* %arguments, i64 2 + %0 = load i64, i64* %arguments, align 8 + %1 = load i64, i64* %add.ptr.i, align 8 + %2 = load i64, i64* %add.ptr1.i, align 8 + %3 = tail call { i32, i64, i64, i64 } @llvm.x86.encls.64(i32 %leaf, i64 %0, i64 %1, i64 %2) #1 + %4 = extractvalue { i32, i64, i64, i64 } %3, 1 + store i64 %4, i64* %arguments, align 8 + %5 = extractvalue { i32, i64, i64, i64 } %3, 2 + store i64 %5, i64* %add.ptr.i, align 8 + %6 = extractvalue { i32, i64, i64, i64 } %3, 3 + store i64 %6, i64* %add.ptr1.i, align 8 + %7 = extractvalue { i32, i64, i64, i64 } %3, 0 + ret i32 %7 +} + +; Function Attrs: nounwind +define i32 @test_enclu(i32 %leaf, i64* nocapture %arguments) local_unnamed_addr #0 { +; CHECK-LABEL: test_enclu: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: pushq %rbx +; CHECK-NEXT: .cfi_def_cfa_offset 16 +; CHECK-NEXT: .cfi_offset %rbx, -16 +; CHECK-NEXT: movq (%rsi), %rbx +; CHECK-NEXT: movq 8(%rsi), %rcx +; CHECK-NEXT: movq 16(%rsi), %rdx +; CHECK-NEXT: enclu +; CHECK-NEXT: movq %rbx, (%rsi) +; CHECK-NEXT: movq %rcx, 8(%rsi) +; CHECK-NEXT: movq %rdx, 16(%rsi) +; CHECK-NEXT: movl %edi, %eax +; CHECK-NEXT: popq %rbx +; CHECK-NEXT: retq +entry: + %add.ptr.i = getelementptr inbounds i64, i64* %arguments, i64 1 + %add.ptr1.i = getelementptr inbounds i64, i64* %arguments, i64 2 + %0 = load i64, i64* %arguments, align 8 + %1 = load i64, i64* %add.ptr.i, align 8 + %2 = load i64, i64* %add.ptr1.i, align 8 + %3 = tail call { i32, i64, i64, i64 } @llvm.x86.enclu.64(i32 %leaf, i64 %0, i64 %1, i64 %2) #1 + %4 = extractvalue { i32, i64, i64, i64 } %3, 1 + store i64 %4, i64* %arguments, align 8 + %5 = extractvalue { i32, i64, i64, i64 } %3, 2 + store i64 %5, i64* %add.ptr.i, align 8 + %6 = extractvalue { i32, i64, i64, i64 } %3, 3 + store i64 %6, i64* %add.ptr1.i, align 8 + %7 = extractvalue { i32, i64, i64, i64 } %3, 0 + ret i32 %7 +} + +define i32 @test_enclv(i32 %leaf, i64* nocapture %arguments) local_unnamed_addr #0 { +; CHECK-LABEL: test_enclv: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: pushq %rbx +; CHECK-NEXT: .cfi_def_cfa_offset 16 +; CHECK-NEXT: .cfi_offset %rbx, -16 +; CHECK-NEXT: movq (%rsi), %rbx +; CHECK-NEXT: movq 8(%rsi), %rcx +; CHECK-NEXT: movq 16(%rsi), %rdx +; CHECK-NEXT: enclv +; CHECK-NEXT: movq %rbx, (%rsi) +; CHECK-NEXT: movq %rcx, 8(%rsi) +; CHECK-NEXT: movq %rdx, 16(%rsi) +; CHECK-NEXT: movl %edi, %eax +; CHECK-NEXT: popq %rbx +; CHECK-NEXT: retq +entry: + %add.ptr.i = getelementptr inbounds i64, i64* %arguments, i64 1 + %add.ptr1.i = getelementptr inbounds i64, i64* %arguments, i64 2 + %0 = load i64, i64* %arguments, align 8 + %1 = load i64, i64* %add.ptr.i, align 8 + %2 = load i64, i64* %add.ptr1.i, align 8 + %3 = tail call { i32, i64, i64, i64 } @llvm.x86.enclv.64(i32 %leaf, i64 %0, i64 %1, i64 %2) #1 + %4 = extractvalue { i32, i64, i64, i64 } %3, 1 + store i64 %4, i64* %arguments, align 8 + %5 = extractvalue { i32, i64, i64, i64 } %3, 2 + store i64 %5, i64* %add.ptr.i, align 8 + %6 = extractvalue { i32, i64, i64, i64 } %3, 3 + store i64 %6, i64* %add.ptr1.i, align 8 + %7 = extractvalue { i32, i64, i64, i64 } %3, 0 + ret i32 %7 +} + +declare { i32, i64, i64, i64 } @llvm.x86.encls.64(i32, i64, i64, i64) #1 +declare { i32, i64, i64, i64 } @llvm.x86.enclu.64(i32, i64, i64, i64) #1 +declare { i32, i64, i64, i64 } @llvm.x86.enclv.64(i32, i64, i64, i64) #1 Index: test/CodeGen/X86/pconfig32-intrinsic.ll =================================================================== --- /dev/null +++ test/CodeGen/X86/pconfig32-intrinsic.ll @@ -0,0 +1,48 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+pconfig | FileCheck %s + +; Function Attrs: nounwind +define i32 @test_pconfig(i32 %leaf, i32* nocapture %d) local_unnamed_addr #0 { +; CHECK-LABEL: test_pconfig: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: pushl %ebx +; CHECK-NEXT: .cfi_def_cfa_offset 8 +; CHECK-NEXT: pushl %esi +; CHECK-NEXT: .cfi_def_cfa_offset 12 +; CHECK-NEXT: .cfi_offset %esi, -12 +; CHECK-NEXT: .cfi_offset %ebx, -8 +; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax +; CHECK-NEXT: movl {{[0-9]+}}(%esp), %esi +; CHECK-NEXT: movl (%esi), %ebx +; CHECK-NEXT: movl 4(%esi), %ecx +; CHECK-NEXT: movl 8(%esi), %edx +; CHECK-NEXT: # kill: def %eax killed %eax def %rax +; CHECK-NEXT: # kill: def %ebx killed %ebx def %rbx +; CHECK-NEXT: # kill: def %ecx killed %ecx def %rcx +; CHECK-NEXT: # kill: def %edx killed %edx def %rdx +; CHECK-NEXT: pconfig +; CHECK-NEXT: movl %ebx, (%esi) +; CHECK-NEXT: movl %ecx, 4(%esi) +; CHECK-NEXT: movl %edx, 8(%esi) +; CHECK-NEXT: popl %esi +; CHECK-NEXT: popl %ebx +; CHECK-NEXT: retl +entry: + %add.ptr.i = getelementptr inbounds i32, i32* %d, i32 1 + %add.ptr1.i = getelementptr inbounds i32, i32* %d, i32 2 + %0 = load i32, i32* %d, align 4 + %1 = load i32, i32* %add.ptr.i, align 4 + %2 = load i32, i32* %add.ptr1.i, align 4 + %3 = tail call { i32, i32, i32, i32 } @llvm.x86.pconfig.32(i32 %leaf, i32 %0, i32 %1, i32 %2) #1 + %4 = extractvalue { i32, i32, i32, i32 } %3, 1 + store i32 %4, i32* %d, align 4 + %5 = extractvalue { i32, i32, i32, i32 } %3, 2 + store i32 %5, i32* %add.ptr.i, align 4 + %6 = extractvalue { i32, i32, i32, i32 } %3, 3 + store i32 %6, i32* %add.ptr1.i, align 4 + %7 = extractvalue { i32, i32, i32, i32 } %3, 0 + ret i32 %7 +} + +; Function Attrs: nounwind +declare { i32, i32, i32, i32 } @llvm.x86.pconfig.32(i32, i32, i32, i32) #1 Index: test/CodeGen/X86/pconfig64-intrinsic.ll =================================================================== --- /dev/null +++ test/CodeGen/X86/pconfig64-intrinsic.ll @@ -0,0 +1,39 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+pconfig | FileCheck %s + +; Function Attrs: nounwind +define i32 @test_pconfig(i32 %leaf, i64* nocapture %d) local_unnamed_addr #0 { +; CHECK-LABEL: test_pconfig: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: pushq %rbx +; CHECK-NEXT: .cfi_def_cfa_offset 16 +; CHECK-NEXT: .cfi_offset %rbx, -16 +; CHECK-NEXT: movq (%rsi), %rbx +; CHECK-NEXT: movq 8(%rsi), %rcx +; CHECK-NEXT: movq 16(%rsi), %rdx +; CHECK-NEXT: movl %edi, %eax +; CHECK-NEXT: pconfig +; CHECK-NEXT: movq %rbx, (%rsi) +; CHECK-NEXT: movq %rcx, 8(%rsi) +; CHECK-NEXT: movq %rdx, 16(%rsi) +; CHECK-NEXT: popq %rbx +; CHECK-NEXT: retq +entry: + %add.ptr.i = getelementptr inbounds i64, i64* %d, i64 1 + %add.ptr1.i = getelementptr inbounds i64, i64* %d, i64 2 + %0 = load i64, i64* %d, align 8 + %1 = load i64, i64* %add.ptr.i, align 8 + %2 = load i64, i64* %add.ptr1.i, align 8 + %3 = tail call { i32, i64, i64, i64 } @llvm.x86.pconfig.64(i32 %leaf, i64 %0, i64 %1, i64 %2) #1 + %4 = extractvalue { i32, i64, i64, i64 } %3, 1 + store i64 %4, i64* %d, align 8 + %5 = extractvalue { i32, i64, i64, i64 } %3, 2 + store i64 %5, i64* %add.ptr.i, align 8 + %6 = extractvalue { i32, i64, i64, i64 } %3, 3 + store i64 %6, i64* %add.ptr1.i, align 8 + %7 = extractvalue { i32, i64, i64, i64 } %3, 0 + ret i32 %7 +} + +; Function Attrs: nounwind +declare { i32, i64, i64, i64 } @llvm.x86.pconfig.64(i32, i64, i64, i64) #1 Index: test/MC/X86/sgx-encoding.s =================================================================== --- test/MC/X86/sgx-encoding.s +++ test/MC/X86/sgx-encoding.s @@ -7,3 +7,7 @@ // CHECK: enclu // CHECK: encoding: [0x0f,0x01,0xd7] enclu + +// CHECK: enclv +// CHECK: encoding: [0x0f,0x01,0xc0] + enclv Index: test/MC/X86/x86-32-coverage.s =================================================================== --- test/MC/X86/x86-32-coverage.s +++ test/MC/X86/x86-32-coverage.s @@ -10729,3 +10729,6 @@ // CHECK: encoding: [0xf0,0x01,0x37] lock add %esi, (%edi) +// CHECK: pconfig +// CHECK: encoding: [0x0f,0x01,0xc5] + pconfig Index: test/MC/X86/x86-64.s =================================================================== --- test/MC/X86/x86-64.s +++ test/MC/X86/x86-64.s @@ -1559,6 +1559,10 @@ // CHECK: encoding: [0xf3,0x48,0x0f,0xae,0xe0] ptwriteq %rax +// CHECK: pconfig +// CHECK: encoding: [0x0f,0x01,0xc5] +pconfig + // __asm __volatile( // "pushf \n\t" // "popf \n\t" Index: utils/TableGen/X86RecognizableInstr.cpp =================================================================== --- utils/TableGen/X86RecognizableInstr.cpp +++ utils/TableGen/X86RecognizableInstr.cpp @@ -678,23 +678,23 @@ } break; case X86Local::MRM_C0: case X86Local::MRM_C1: case X86Local::MRM_C2: - case X86Local::MRM_C3: case X86Local::MRM_C4: case X86Local::MRM_C8: - case X86Local::MRM_C9: case X86Local::MRM_CA: case X86Local::MRM_CB: - case X86Local::MRM_CF: case X86Local::MRM_D0: case X86Local::MRM_D1: - case X86Local::MRM_D4: case X86Local::MRM_D5: case X86Local::MRM_D6: - case X86Local::MRM_D7: case X86Local::MRM_D8: case X86Local::MRM_D9: - case X86Local::MRM_DA: case X86Local::MRM_DB: case X86Local::MRM_DC: - case X86Local::MRM_DD: case X86Local::MRM_DE: case X86Local::MRM_DF: - case X86Local::MRM_E0: case X86Local::MRM_E1: case X86Local::MRM_E2: - case X86Local::MRM_E3: case X86Local::MRM_E4: case X86Local::MRM_E5: - case X86Local::MRM_E8: case X86Local::MRM_E9: case X86Local::MRM_EA: - case X86Local::MRM_EB: case X86Local::MRM_EC: case X86Local::MRM_ED: - case X86Local::MRM_EE: case X86Local::MRM_EF: case X86Local::MRM_F0: - case X86Local::MRM_F1: case X86Local::MRM_F2: case X86Local::MRM_F3: - case X86Local::MRM_F4: case X86Local::MRM_F5: case X86Local::MRM_F6: - case X86Local::MRM_F7: case X86Local::MRM_F9: case X86Local::MRM_FA: - case X86Local::MRM_FB: case X86Local::MRM_FC: case X86Local::MRM_FD: - case X86Local::MRM_FE: case X86Local::MRM_FF: + case X86Local::MRM_C3: case X86Local::MRM_C4: case X86Local::MRM_C5: + case X86Local::MRM_C8: case X86Local::MRM_C9: case X86Local::MRM_CA: + case X86Local::MRM_CB: case X86Local::MRM_CF: case X86Local::MRM_D0: + case X86Local::MRM_D1: case X86Local::MRM_D4: case X86Local::MRM_D5: + case X86Local::MRM_D6: case X86Local::MRM_D7: case X86Local::MRM_D8: + case X86Local::MRM_D9: case X86Local::MRM_DA: case X86Local::MRM_DB: + case X86Local::MRM_DC: case X86Local::MRM_DD: case X86Local::MRM_DE: + case X86Local::MRM_DF: case X86Local::MRM_E0: case X86Local::MRM_E1: + case X86Local::MRM_E2: case X86Local::MRM_E3: case X86Local::MRM_E4: + case X86Local::MRM_E5: case X86Local::MRM_E8: case X86Local::MRM_E9: + case X86Local::MRM_EA: case X86Local::MRM_EB: case X86Local::MRM_EC: + case X86Local::MRM_ED: case X86Local::MRM_EE: case X86Local::MRM_EF: + case X86Local::MRM_F0: case X86Local::MRM_F1: case X86Local::MRM_F2: + case X86Local::MRM_F3: case X86Local::MRM_F4: case X86Local::MRM_F5: + case X86Local::MRM_F6: case X86Local::MRM_F7: case X86Local::MRM_F9: + case X86Local::MRM_FA: case X86Local::MRM_FB: case X86Local::MRM_FC: + case X86Local::MRM_FD: case X86Local::MRM_FE: case X86Local::MRM_FF: // Ignored. break; }