Index: llvm/lib/Target/RISCV/RISCVISelLowering.h =================================================================== --- llvm/lib/Target/RISCV/RISCVISelLowering.h +++ llvm/lib/Target/RISCV/RISCVISelLowering.h @@ -507,6 +507,8 @@ bool shouldExtendTypeInLibCall(EVT Type) const override; bool shouldSignExtendTypeInLibCall(EVT Type, bool IsSigned) const override; + bool shouldAlignPointerArgs(CallInst *CI, unsigned &MinSize, + Align &PrefAlign) const override; /// Returns the register with the specified architectural or ABI name. This /// method is necessary to lower the llvm.read_register.* and Index: llvm/lib/Target/RISCV/RISCVISelLowering.cpp =================================================================== --- llvm/lib/Target/RISCV/RISCVISelLowering.cpp +++ llvm/lib/Target/RISCV/RISCVISelLowering.cpp @@ -12978,6 +12978,16 @@ return IsSigned; } +bool RISCVTargetLowering::shouldAlignPointerArgs(CallInst *CI, + unsigned &MinSize, + Align &PrefAlign) const { + if (!isa(CI)) + return false; + MinSize = 8; + PrefAlign = Subtarget.is64Bit() ? Align(8) : Align(4); + return true; +} + bool RISCVTargetLowering::decomposeMulByConstant(LLVMContext &Context, EVT VT, SDValue C) const { // Check integral scalar types. Index: llvm/test/CodeGen/RISCV/memcpy-align.ll =================================================================== --- /dev/null +++ llvm/test/CodeGen/RISCV/memcpy-align.ll @@ -0,0 +1,22 @@ +; RUN: llc -mtriple=riscv32 -verify-machineinstrs < %s \ +; RUN: | FileCheck %s -check-prefix=RV32 +; RUN: llc -mtriple=riscv64 -verify-machineinstrs < %s \ +; RUN: | FileCheck %s -check-prefix=RV64 + +@.str = private unnamed_addr constant [31 x i8] c"DHRYSTONE PROGRAM, SOME STRING\00", align 1 +@dst = internal global [31 x i8] zeroinitializer, align 1 + +define void @foo() { +; RV32-LABEL: foo: +; RV32: .p2align 2 +; RV32-NEXT: .L.str: + +; RV64-LABEL: foo: +; RV64: .p2align 3 +; RV64-NEXT: .L.str: +entry: + tail call void @llvm.memcpy.p0i8.p0i8.i32(ptr noundef nonnull align 1 dereferenceable(31) @dst, ptr noundef nonnull align 1 dereferenceable(31) @.str, i32 31, i1 false) + ret void +} + +declare void @llvm.memcpy.p0i8.p0i8.i32(i8* nocapture, i8* nocapture, i32, i1) nounwind Index: llvm/test/CodeGen/RISCV/memcpy-inline.ll =================================================================== --- llvm/test/CodeGen/RISCV/memcpy-inline.ll +++ llvm/test/CodeGen/RISCV/memcpy-inline.ll @@ -295,50 +295,35 @@ } define void @t6() nounwind { -; RV32ALIGNED-LABEL: t6: -; RV32ALIGNED: # %bb.0: # %entry -; RV32ALIGNED-NEXT: addi sp, sp, -16 -; RV32ALIGNED-NEXT: sw ra, 12(sp) # 4-byte Folded Spill -; RV32ALIGNED-NEXT: lui a0, %hi(spool.splbuf) -; RV32ALIGNED-NEXT: addi a0, a0, %lo(spool.splbuf) -; RV32ALIGNED-NEXT: lui a1, %hi(.L.str6) -; RV32ALIGNED-NEXT: addi a1, a1, %lo(.L.str6) -; RV32ALIGNED-NEXT: li a2, 14 -; RV32ALIGNED-NEXT: call memcpy@plt -; RV32ALIGNED-NEXT: lw ra, 12(sp) # 4-byte Folded Reload -; RV32ALIGNED-NEXT: addi sp, sp, 16 -; RV32ALIGNED-NEXT: ret +; RV32-LABEL: t6: +; RV32: # %bb.0: # %entry +; RV32-NEXT: lui a0, %hi(spool.splbuf) +; RV32-NEXT: li a1, 88 +; RV32-NEXT: sh a1, %lo(spool.splbuf+12)(a0) +; RV32-NEXT: lui a1, 361862 +; RV32-NEXT: addi a1, a1, -1960 +; RV32-NEXT: sw a1, %lo(spool.splbuf+8)(a0) +; RV32-NEXT: lui a1, 362199 +; RV32-NEXT: addi a1, a1, 559 +; RV32-NEXT: sw a1, %lo(spool.splbuf+4)(a0) +; RV32-NEXT: lui a1, 460503 +; RV32-NEXT: addi a1, a1, 1071 +; RV32-NEXT: sw a1, %lo(spool.splbuf)(a0) +; RV32-NEXT: ret ; ; RV64ALIGNED-LABEL: t6: ; RV64ALIGNED: # %bb.0: # %entry -; RV64ALIGNED-NEXT: addi sp, sp, -16 -; RV64ALIGNED-NEXT: sd ra, 8(sp) # 8-byte Folded Spill ; RV64ALIGNED-NEXT: lui a0, %hi(spool.splbuf) -; RV64ALIGNED-NEXT: addi a0, a0, %lo(spool.splbuf) -; RV64ALIGNED-NEXT: lui a1, %hi(.L.str6) -; RV64ALIGNED-NEXT: addi a1, a1, %lo(.L.str6) -; RV64ALIGNED-NEXT: li a2, 14 -; RV64ALIGNED-NEXT: call memcpy@plt -; RV64ALIGNED-NEXT: ld ra, 8(sp) # 8-byte Folded Reload -; RV64ALIGNED-NEXT: addi sp, sp, 16 +; RV64ALIGNED-NEXT: li a1, 88 +; RV64ALIGNED-NEXT: sh a1, %lo(spool.splbuf+12)(a0) +; RV64ALIGNED-NEXT: lui a1, %hi(.LCPI6_0) +; RV64ALIGNED-NEXT: ld a1, %lo(.LCPI6_0)(a1) +; RV64ALIGNED-NEXT: lui a2, 361862 +; RV64ALIGNED-NEXT: addiw a2, a2, -1960 +; RV64ALIGNED-NEXT: sw a2, %lo(spool.splbuf+8)(a0) +; RV64ALIGNED-NEXT: sd a1, %lo(spool.splbuf)(a0) ; RV64ALIGNED-NEXT: ret ; -; RV32UNALIGNED-LABEL: t6: -; RV32UNALIGNED: # %bb.0: # %entry -; RV32UNALIGNED-NEXT: lui a0, %hi(spool.splbuf) -; RV32UNALIGNED-NEXT: li a1, 88 -; RV32UNALIGNED-NEXT: sh a1, %lo(spool.splbuf+12)(a0) -; RV32UNALIGNED-NEXT: lui a1, 361862 -; RV32UNALIGNED-NEXT: addi a1, a1, -1960 -; RV32UNALIGNED-NEXT: sw a1, %lo(spool.splbuf+8)(a0) -; RV32UNALIGNED-NEXT: lui a1, 362199 -; RV32UNALIGNED-NEXT: addi a1, a1, 559 -; RV32UNALIGNED-NEXT: sw a1, %lo(spool.splbuf+4)(a0) -; RV32UNALIGNED-NEXT: lui a1, 460503 -; RV32UNALIGNED-NEXT: addi a1, a1, 1071 -; RV32UNALIGNED-NEXT: sw a1, %lo(spool.splbuf)(a0) -; RV32UNALIGNED-NEXT: ret -; ; RV64UNALIGNED-LABEL: t6: ; RV64UNALIGNED: # %bb.0: # %entry ; RV64UNALIGNED-NEXT: lui a0, %hi(.L.str6)