Please use GitHub pull requests for new patches. Phabricator shutdown timeline
Changeset View
Changeset View
Standalone View
Standalone View
mlir/test/Target/LLVMIR/Import/intrinsic.ll
Show First 20 Lines • Show All 224 Lines • ▼ Show 20 Lines | |||||
; CHECK-LABEL: llvm.func @umin_test | ; CHECK-LABEL: llvm.func @umin_test | ||||
define void @umin_test(i32 %0, i32 %1, <8 x i32> %2, <8 x i32> %3) { | define void @umin_test(i32 %0, i32 %1, <8 x i32> %2, <8 x i32> %3) { | ||||
; CHECK: "llvm.intr.umin"(%{{.*}}, %{{.*}}) : (i32, i32) -> i32 | ; CHECK: "llvm.intr.umin"(%{{.*}}, %{{.*}}) : (i32, i32) -> i32 | ||||
%5 = call i32 @llvm.umin.i32(i32 %0, i32 %1) | %5 = call i32 @llvm.umin.i32(i32 %0, i32 %1) | ||||
; CHECK: "llvm.intr.umin"(%{{.*}}, %{{.*}}) : (vector<8xi32>, vector<8xi32>) -> vector<8xi32> | ; CHECK: "llvm.intr.umin"(%{{.*}}, %{{.*}}) : (vector<8xi32>, vector<8xi32>) -> vector<8xi32> | ||||
%6 = call <8 x i32> @llvm.umin.v8i32(<8 x i32> %2, <8 x i32> %3) | %6 = call <8 x i32> @llvm.umin.v8i32(<8 x i32> %2, <8 x i32> %3) | ||||
ret void | ret void | ||||
} | } | ||||
; CHECK-LABEL: llvm.func @vector_reductions | ; CHECK-LABEL: llvm.func @vector_reductions | ||||
define void @vector_reductions(float %0, <8 x float> %1, <8 x i32> %2) { | define void @vector_reductions(float %0, <8 x float> %1, <8 x i32> %2) { | ||||
; CHECK: "llvm.intr.vector.reduce.add"(%{{.*}}) : (vector<8xi32>) -> i32 | ; CHECK: "llvm.intr.vector.reduce.add"(%{{.*}}) : (vector<8xi32>) -> i32 | ||||
%4 = call i32 @llvm.vector.reduce.add.v8i32(<8 x i32> %2) | %4 = call i32 @llvm.vector.reduce.add.v8i32(<8 x i32> %2) | ||||
; CHECK: "llvm.intr.vector.reduce.and"(%{{.*}}) : (vector<8xi32>) -> i32 | ; CHECK: "llvm.intr.vector.reduce.and"(%{{.*}}) : (vector<8xi32>) -> i32 | ||||
%5 = call i32 @llvm.vector.reduce.and.v8i32(<8 x i32> %2) | %5 = call i32 @llvm.vector.reduce.and.v8i32(<8 x i32> %2) | ||||
; CHECK: "llvm.intr.vector.reduce.fmax"(%{{.*}}) : (vector<8xf32>) -> f32 | ; CHECK: "llvm.intr.vector.reduce.fmax"(%{{.*}}) : (vector<8xf32>) -> f32 | ||||
%6 = call float @llvm.vector.reduce.fmax.v8f32(<8 x float> %1) | %6 = call float @llvm.vector.reduce.fmax.v8f32(<8 x float> %1) | ||||
; CHECK: "llvm.intr.vector.reduce.fmin"(%{{.*}}) : (vector<8xf32>) -> f32 | ; CHECK: "llvm.intr.vector.reduce.fmin"(%{{.*}}) : (vector<8xf32>) -> f32 | ||||
%7 = call float @llvm.vector.reduce.fmin.v8f32(<8 x float> %1) | %7 = call float @llvm.vector.reduce.fmin.v8f32(<8 x float> %1) | ||||
; CHECK: "llvm.intr.vector.reduce.mul"(%{{.*}}) : (vector<8xi32>) -> i32 | ; CHECK: "llvm.intr.vector.reduce.mul"(%{{.*}}) : (vector<8xi32>) -> i32 | ||||
%8 = call i32 @llvm.vector.reduce.mul.v8i32(<8 x i32> %2) | %8 = call i32 @llvm.vector.reduce.mul.v8i32(<8 x i32> %2) | ||||
; CHECK: "llvm.intr.vector.reduce.or"(%{{.*}}) : (vector<8xi32>) -> i32 | ; CHECK: "llvm.intr.vector.reduce.or"(%{{.*}}) : (vector<8xi32>) -> i32 | ||||
%9 = call i32 @llvm.vector.reduce.or.v8i32(<8 x i32> %2) | %9 = call i32 @llvm.vector.reduce.or.v8i32(<8 x i32> %2) | ||||
; CHECK: "llvm.intr.vector.reduce.smax"(%{{.*}}) : (vector<8xi32>) -> i32 | ; CHECK: "llvm.intr.vector.reduce.smax"(%{{.*}}) : (vector<8xi32>) -> i32 | ||||
%10 = call i32 @llvm.vector.reduce.smax.v8i32(<8 x i32> %2) | %10 = call i32 @llvm.vector.reduce.smax.v8i32(<8 x i32> %2) | ||||
; CHECK: "llvm.intr.vector.reduce.smin"(%{{.*}}) : (vector<8xi32>) -> i32 | ; CHECK: "llvm.intr.vector.reduce.smin"(%{{.*}}) : (vector<8xi32>) -> i32 | ||||
%11 = call i32 @llvm.vector.reduce.smin.v8i32(<8 x i32> %2) | %11 = call i32 @llvm.vector.reduce.smin.v8i32(<8 x i32> %2) | ||||
; CHECK: "llvm.intr.vector.reduce.umax"(%{{.*}}) : (vector<8xi32>) -> i32 | ; CHECK: "llvm.intr.vector.reduce.umax"(%{{.*}}) : (vector<8xi32>) -> i32 | ||||
%12 = call i32 @llvm.vector.reduce.umax.v8i32(<8 x i32> %2) | %12 = call i32 @llvm.vector.reduce.umax.v8i32(<8 x i32> %2) | ||||
; CHECK: "llvm.intr.vector.reduce.umin"(%{{.*}}) : (vector<8xi32>) -> i32 | ; CHECK: "llvm.intr.vector.reduce.umin"(%{{.*}}) : (vector<8xi32>) -> i32 | ||||
%13 = call i32 @llvm.vector.reduce.umin.v8i32(<8 x i32> %2) | %13 = call i32 @llvm.vector.reduce.umin.v8i32(<8 x i32> %2) | ||||
; TODO: vector reduce fadd and fmul should be handled specially. | ; CHECK: "llvm.intr.vector.reduce.fadd"(%{{.*}}, %{{.*}}) {reassoc = false} : (f32, vector<8xf32>) -> f32 | ||||
%14 = call float @llvm.vector.reduce.fadd.v8f32(float %0, <8 x float> %1) | %14 = call float @llvm.vector.reduce.fadd.v8f32(float %0, <8 x float> %1) | ||||
; CHECK: "llvm.intr.vector.reduce.fmul"(%{{.*}}, %{{.*}}) {reassoc = false} : (f32, vector<8xf32>) -> f32 | |||||
%15 = call float @llvm.vector.reduce.fmul.v8f32(float %0, <8 x float> %1) | %15 = call float @llvm.vector.reduce.fmul.v8f32(float %0, <8 x float> %1) | ||||
; CHECK: "llvm.intr.vector.reduce.fadd"(%{{.*}}, %{{.*}}) {reassoc = true} : (f32, vector<8xf32>) -> f32 | |||||
%16 = call reassoc float @llvm.vector.reduce.fadd.v8f32(float %0, <8 x float> %1) | %16 = call reassoc float @llvm.vector.reduce.fadd.v8f32(float %0, <8 x float> %1) | ||||
; CHECK: "llvm.intr.vector.reduce.fmul"(%{{.*}}, %{{.*}}) {reassoc = true} : (f32, vector<8xf32>) -> f32 | |||||
%17 = call reassoc float @llvm.vector.reduce.fmul.v8f32(float %0, <8 x float> %1) | %17 = call reassoc float @llvm.vector.reduce.fmul.v8f32(float %0, <8 x float> %1) | ||||
; CHECK: "llvm.intr.vector.reduce.xor"(%{{.*}}) : (vector<8xi32>) -> i32 | ; CHECK: "llvm.intr.vector.reduce.xor"(%{{.*}}) : (vector<8xi32>) -> i32 | ||||
%18 = call i32 @llvm.vector.reduce.xor.v8i32(<8 x i32> %2) | %18 = call i32 @llvm.vector.reduce.xor.v8i32(<8 x i32> %2) | ||||
ret void | ret void | ||||
} | } | ||||
; TODO: matrix intrinsic should be handled specially. | ; CHECK-LABEL: @matrix_intrinsics | ||||
define void @matrix_intrinsics(<64 x float> %0, <48 x float> %1, float* %2, i64 %3) { | ; CHECK-SAME: %[[VEC1:[a-zA-Z0-9]+]] | ||||
%5 = call <12 x float> @llvm.matrix.multiply.v12f32.v64f32.v48f32(<64 x float> %0, <48 x float> %1, i32 4, i32 16, i32 3) | ; CHECK-SAME: %[[VEC2:[a-zA-Z0-9]+]] | ||||
%6 = call <48 x float> @llvm.matrix.transpose.v48f32(<48 x float> %1, i32 3, i32 16) | ; CHECK-SAME: %[[PTR:[a-zA-Z0-9]+]] | ||||
%7 = call <48 x float> @llvm.matrix.column.major.load.v48f32.i64(float* align 4 %2, i64 %3, i1 false, i32 3, i32 16) | ; CHECK-SAME: %[[STRIDE:[a-zA-Z0-9]+]] | ||||
call void @llvm.matrix.column.major.store.v48f32.i64(<48 x float> %7, float* align 4 %2, i64 %3, i1 false, i32 3, i32 16) | define void @matrix_intrinsics(<64 x float> %vec1, <48 x float> %vec2, float* %ptr, i64 %stride) { | ||||
; CHECK: llvm.intr.matrix.multiply %[[VEC1]], %[[VEC2]] | |||||
; CHECK-SAME: {lhs_columns = 16 : i32, lhs_rows = 4 : i32, rhs_columns = 3 : i32} | |||||
%1 = call <12 x float> @llvm.matrix.multiply.v12f32.v64f32.v48f32(<64 x float> %vec1, <48 x float> %vec2, i32 4, i32 16, i32 3) | |||||
; CHECK: llvm.intr.matrix.transpose %[[VEC2]] | |||||
; CHECK-SAME: {columns = 16 : i32, rows = 3 : i32} | |||||
%2 = call <48 x float> @llvm.matrix.transpose.v48f32(<48 x float> %vec2, i32 3, i32 16) | |||||
; CHECK: %[[VAL1:.+]] = llvm.intr.matrix.column.major.load %[[PTR]], <stride = %[[STRIDE]]> | |||||
; CHECK-SAME: {columns = 16 : i32, isVolatile = false, rows = 3 : i32} | |||||
%3 = call <48 x float> @llvm.matrix.column.major.load.v48f32.i64(float* align 4 %ptr, i64 %stride, i1 false, i32 3, i32 16) | |||||
; CHECK: llvm.intr.matrix.column.major.store %[[VAL1]], %[[PTR]], <stride = %[[STRIDE]]> | |||||
; CHECK-SAME: {columns = 16 : i32, isVolatile = true, rows = 3 : i32} | |||||
call void @llvm.matrix.column.major.store.v48f32.i64(<48 x float> %3, float* align 4 %ptr, i64 %stride, i1 true, i32 3, i32 16) | |||||
ret void | ret void | ||||
} | } | ||||
; CHECK-LABEL: llvm.func @get_active_lane_mask | ; CHECK-LABEL: llvm.func @get_active_lane_mask | ||||
define <7 x i1> @get_active_lane_mask(i64 %0, i64 %1) { | define <7 x i1> @get_active_lane_mask(i64 %0, i64 %1) { | ||||
; CHECK: llvm.intr.get.active.lane.mask %{{.*}}, %{{.*}} : i64, i64 to vector<7xi1> | ; CHECK: llvm.intr.get.active.lane.mask %{{.*}}, %{{.*}} : i64, i64 to vector<7xi1> | ||||
%3 = call <7 x i1> @llvm.get.active.lane.mask.v7i1.i64(i64 %0, i64 %1) | %3 = call <7 x i1> @llvm.get.active.lane.mask.v7i1.i64(i64 %0, i64 %1) | ||||
ret <7 x i1> %3 | ret <7 x i1> %3 | ||||
▲ Show 20 Lines • Show All 128 Lines • ▼ Show 20 Lines | ; CHECK: llvm.intr.vastart %{{.*}} | ||||
call void @llvm.va_start(i8* %0) | call void @llvm.va_start(i8* %0) | ||||
; CHECK: llvm.intr.vacopy %{{.*}} to %{{.*}} | ; CHECK: llvm.intr.vacopy %{{.*}} to %{{.*}} | ||||
call void @llvm.va_copy(i8* %1, i8* %0) | call void @llvm.va_copy(i8* %1, i8* %0) | ||||
; CHECK: llvm.intr.vaend %{{.*}} | ; CHECK: llvm.intr.vaend %{{.*}} | ||||
call void @llvm.va_end(i8* %0) | call void @llvm.va_end(i8* %0) | ||||
ret void | ret void | ||||
} | } | ||||
; CHECK-LABEL: @assume | |||||
; CHECK-SAME: %[[TRUE:[a-zA-Z0-9]+]] | |||||
define void @assume(i1 %true) { | |||||
; CHECK: "llvm.intr.assume"(%[[TRUE]]) : (i1) -> () | |||||
call void @llvm.assume(i1 %true) | |||||
ret void | |||||
} | |||||
; CHECK-LABEL: llvm.func @coro_id | ; CHECK-LABEL: llvm.func @coro_id | ||||
define void @coro_id(i32 %0, i8* %1) { | define void @coro_id(i32 %0, i8* %1) { | ||||
; CHECK: llvm.intr.coro.id %{{.*}}, %{{.*}}, %{{.*}}, %{{.*}} : !llvm.token | ; CHECK: llvm.intr.coro.id %{{.*}}, %{{.*}}, %{{.*}}, %{{.*}} : !llvm.token | ||||
%3 = call token @llvm.coro.id(i32 %0, i8* %1, i8* %1, i8* null) | %3 = call token @llvm.coro.id(i32 %0, i8* %1, i8* %1, i8* null) | ||||
ret void | ret void | ||||
} | } | ||||
; CHECK-LABEL: llvm.func @coro_begin | ; CHECK-LABEL: llvm.func @coro_begin | ||||
▲ Show 20 Lines • Show All 347 Lines • ▼ Show 20 Lines | |||||
declare <8 x float> @llvm.vp.fptrunc.v8f32.v8f64(<8 x double>, <8 x i1>, i32) | declare <8 x float> @llvm.vp.fptrunc.v8f32.v8f64(<8 x double>, <8 x i1>, i32) | ||||
declare <8 x double> @llvm.vp.fpext.v8f64.v8f32(<8 x float>, <8 x i1>, i32) | declare <8 x double> @llvm.vp.fpext.v8f64.v8f32(<8 x float>, <8 x i1>, i32) | ||||
declare <8 x i64> @llvm.vp.fptoui.v8i64.v8f64(<8 x double>, <8 x i1>, i32) | declare <8 x i64> @llvm.vp.fptoui.v8i64.v8f64(<8 x double>, <8 x i1>, i32) | ||||
declare <8 x i64> @llvm.vp.fptosi.v8i64.v8f64(<8 x double>, <8 x i1>, i32) | declare <8 x i64> @llvm.vp.fptosi.v8i64.v8f64(<8 x double>, <8 x i1>, i32) | ||||
declare <8 x i64> @llvm.vp.ptrtoint.v8i64.v8p0i32(<8 x i32*>, <8 x i1>, i32) | declare <8 x i64> @llvm.vp.ptrtoint.v8i64.v8p0i32(<8 x i32*>, <8 x i1>, i32) | ||||
declare <8 x i32*> @llvm.vp.inttoptr.v8p0i32.v8i64(<8 x i64>, <8 x i1>, i32) | declare <8 x i32*> @llvm.vp.inttoptr.v8p0i32.v8i64(<8 x i64>, <8 x i1>, i32) | ||||
declare void @llvm.lifetime.start.p0i8(i64 immarg, i8* nocapture) | declare void @llvm.lifetime.start.p0i8(i64 immarg, i8* nocapture) | ||||
declare void @llvm.lifetime.end.p0i8(i64 immarg, i8* nocapture) | declare void @llvm.lifetime.end.p0i8(i64 immarg, i8* nocapture) | ||||
declare void @llvm.assume(i1 %cond) |