Index: llvm/trunk/lib/Target/ARM/ARMISelLowering.cpp =================================================================== --- llvm/trunk/lib/Target/ARM/ARMISelLowering.cpp +++ llvm/trunk/lib/Target/ARM/ARMISelLowering.cpp @@ -2642,9 +2642,9 @@ switch (IntNo) { default: return SDValue(); // Don't custom lower most intrinsics. case Intrinsic::arm_rbit: { - assert(Op.getOperand(0).getValueType() == MVT::i32 && + assert(Op.getOperand(1).getValueType() == MVT::i32 && "RBIT intrinsic must have i32 type!"); - return DAG.getNode(ARMISD::RBIT, dl, MVT::i32, Op.getOperand(0)); + return DAG.getNode(ARMISD::RBIT, dl, MVT::i32, Op.getOperand(1)); } case Intrinsic::arm_thread_pointer: { EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); Index: llvm/trunk/test/CodeGen/AArch64/rbit.ll =================================================================== --- llvm/trunk/test/CodeGen/AArch64/rbit.ll +++ llvm/trunk/test/CodeGen/AArch64/rbit.ll @@ -0,0 +1,20 @@ +; RUN: llc -mtriple=aarch64-eabi %s -o - | FileCheck %s + +; CHECK-LABEL: rbit32 +; CHECK: rbit w0, w0 +define i32 @rbit32(i32 %t) { +entry: + %rbit.i = call i32 @llvm.aarch64.rbit.i32(i32 %t) + ret i32 %rbit.i +} + +; CHECK-LABEL: rbit64 +; CHECK: rbit x0, x0 +define i64 @rbit64(i64 %t) { +entry: + %rbit.i = call i64 @llvm.aarch64.rbit.i64(i64 %t) + ret i64 %rbit.i +} + +declare i64 @llvm.aarch64.rbit.i64(i64) +declare i32 @llvm.aarch64.rbit.i32(i32) Index: llvm/trunk/test/CodeGen/ARM/rbit.ll =================================================================== --- llvm/trunk/test/CodeGen/ARM/rbit.ll +++ llvm/trunk/test/CodeGen/ARM/rbit.ll @@ -0,0 +1,20 @@ +; RUN: llc -mtriple=armv8-eabi %s -o - | FileCheck %s + +; CHECK-LABEL: rbit +; CHECK: rbit r0, r0 +define i32 @rbit(i32 %t) { +entry: + %rbit = call i32 @llvm.arm.rbit(i32 %t) + ret i32 %rbit +} + +; CHECK-LABEL: rbit_constant +; CHECK: mov r0, #0 +; CHECK: rbit r0, r0 +define i32 @rbit_constant() { +entry: + %rbit.i = call i32 @llvm.arm.rbit(i32 0) + ret i32 %rbit.i +} + +declare i32 @llvm.arm.rbit(i32)