Index: llvm/lib/Target/ARM/ARMInstrThumb2.td =================================================================== --- llvm/lib/Target/ARM/ARMInstrThumb2.td +++ llvm/lib/Target/ARM/ARMInstrThumb2.td @@ -4551,10 +4551,10 @@ let Unpredictable{5-0} = 0b111111; } -def t2TT : T2TT<0b00, "tt", []>, Requires<[IsThumb,Has8MSecExt]>; -def t2TTT : T2TT<0b01, "ttt", []>, Requires<[IsThumb,Has8MSecExt]>; -def t2TTA : T2TT<0b10, "tta", []>, Requires<[IsThumb,Has8MSecExt]>; -def t2TTAT : T2TT<0b11, "ttat", []>, Requires<[IsThumb,Has8MSecExt]>; +def t2TT : T2TT<0b00, "tt", [(set rGPR:$Rt, (int_arm_cmse_tt GPRnopc:$Rn))]>, Requires<[IsThumb,Has8MSecExt]>; +def t2TTT : T2TT<0b01, "ttt", [(set rGPR:$Rt, (int_arm_cmse_ttt GPRnopc:$Rn))]>, Requires<[IsThumb,Has8MSecExt]>; +def t2TTA : T2TT<0b10, "tta", [(set rGPR:$Rt, (int_arm_cmse_tta GPRnopc:$Rn))]>, Requires<[IsThumb,Has8MSecExt]>; +def t2TTAT : T2TT<0b11, "ttat", [(set rGPR:$Rt, (int_arm_cmse_ttat GPRnopc:$Rn))]>, Requires<[IsThumb,Has8MSecExt]>; //===----------------------------------------------------------------------===// // Non-Instruction Patterns Index: llvm/test/CodeGen/ARM/intrinsics-cmse.ll =================================================================== --- /dev/null +++ llvm/test/CodeGen/ARM/intrinsics-cmse.ll @@ -0,0 +1,67 @@ +; RUN: llc < %s -mtriple=thumbv8m.base -mcpu=cortex-m23 | FileCheck %s +; RUN: llc < %s -mtriple=thumbebv8m.base -mcpu=cortex-m23 | FileCheck %s + +target datalayout = "e-m:e-p:32:32-i64:64-v128:64:128-a:0:32-n32-S64" +target triple = "thumbv8-arm-none-eabi" + +define i32 @check_tt(i8* %p) #0 { +entry: +; CHECK: check_tt: + %p.addr = alloca i8*, align 4 + %tmp = alloca i32, align 4 + store i8* %p, i8** %p.addr, align 4 + %0 = load i8*, i8** %p.addr, align 4 + %1 = call i32 @llvm.arm.cmse.tt(i8* %0) +; CHECK: tt r{{[0-9]+}}, r{{[0-9]+}} + store i32 %1, i32* %tmp, align 4 + %2 = load i32, i32* %tmp, align 4 + ret i32 %2 +} + +define i32 @check_ttt(i8* %p) #0 { +entry: +; CHECK: check_ttt: + %p.addr = alloca i8*, align 4 + %tmp = alloca i32, align 4 + store i8* %p, i8** %p.addr, align 4 + %0 = load i8*, i8** %p.addr, align 4 + %1 = call i32 @llvm.arm.cmse.ttt(i8* %0) +; CHECK: ttt r{{[0-9]+}}, r{{[0-9]+}} + store i32 %1, i32* %tmp, align 4 + %2 = load i32, i32* %tmp, align 4 + ret i32 %2 +} + +define i32 @check_tta(i8* %p) #0 { +entry: +; CHECK: check_tta: + %p.addr = alloca i8*, align 4 + %tmp = alloca i32, align 4 + store i8* %p, i8** %p.addr, align 4 + %0 = load i8*, i8** %p.addr, align 4 + %1 = call i32 @llvm.arm.cmse.tta(i8* %0) +; CHECK: tta r{{[0-9]+}}, r{{[0-9]+}} + store i32 %1, i32* %tmp, align 4 + %2 = load i32, i32* %tmp, align 4 + ret i32 %2 +} + +define i32 @check_ttat(i8* %p) #0 { +entry: +; CHECK: check_ttat: + %p.addr = alloca i8*, align 4 + %tmp = alloca i32, align 4 + store i8* %p, i8** %p.addr, align 4 + %0 = load i8*, i8** %p.addr, align 4 + %1 = call i32 @llvm.arm.cmse.ttat(i8* %0) +; CHECK: ttat r{{[0-9]+}}, r{{[0-9]+}} + store i32 %1, i32* %tmp, align 4 + %2 = load i32, i32* %tmp, align 4 + ret i32 %2 +} + +; Function Attrs: nounwind readnone +declare i32 @llvm.arm.cmse.tt(i8*) #1 +declare i32 @llvm.arm.cmse.ttt(i8*) #1 +declare i32 @llvm.arm.cmse.tta(i8*) #1 +declare i32 @llvm.arm.cmse.ttat(i8*) #1