Index: llvm/lib/Target/Mips/MipsISelLowering.cpp =================================================================== --- llvm/lib/Target/Mips/MipsISelLowering.cpp +++ llvm/lib/Target/Mips/MipsISelLowering.cpp @@ -493,6 +493,10 @@ setLibcallName(RTLIB::MULO_I128, nullptr); } + // Don't change 32 to 0. Otherwise, no NOP will added for load delay slot. + if (!Subtarget.hasMips2()) + setMaxAtomicSizeInBitsSupported(32); + setMinFunctionAlignment(Subtarget.isGP64bit() ? Align(8) : Align(4)); // The arguments on the stack are defined in terms of 4-byte slots on O32 Index: llvm/test/CodeGen/Mips/GlobalISel/llvm-ir/load_atomic-mips1.ll =================================================================== --- /dev/null +++ llvm/test/CodeGen/Mips/GlobalISel/llvm-ir/load_atomic-mips1.ll @@ -0,0 +1,914 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -global-isel -mtriple=mipsel-linux-gnu --mcpu=mips1 -verify-machineinstrs %s -o - | FileCheck %s -check-prefixes=MIPS32 + +; -------------------------------------------------------------------- +; unordered +; -------------------------------------------------------------------- + +define i8 @atomic_load_unordered_i8(ptr %ptr) { +; MIPS32-LABEL: atomic_load_unordered_i8: +; MIPS32: # %bb.0: +; MIPS32-NEXT: lbu $2, 0($4) +; MIPS32-NEXT: nop +; MIPS32-NEXT: jr $ra +; MIPS32-NEXT: nop + %load = load atomic i8, ptr %ptr unordered, align 1 + ret i8 %load +} + +define i32 @atomic_load_unordered_i8_sext_i32(ptr %ptr) { +; MIPS32-LABEL: atomic_load_unordered_i8_sext_i32: +; MIPS32: # %bb.0: +; MIPS32-NEXT: lbu $1, 0($4) +; MIPS32-NEXT: nop +; MIPS32-NEXT: sll $1, $1, 24 +; MIPS32-NEXT: jr $ra +; MIPS32-NEXT: sra $2, $1, 24 + %load = load atomic i8, ptr %ptr unordered, align 1 + %sext = sext i8 %load to i32 + ret i32 %sext +} + +define i16 @atomic_load_unordered_i8_sext_i16(ptr %ptr) { +; MIPS32-LABEL: atomic_load_unordered_i8_sext_i16: +; MIPS32: # %bb.0: +; MIPS32-NEXT: lbu $1, 0($4) +; MIPS32-NEXT: nop +; MIPS32-NEXT: sll $1, $1, 24 +; MIPS32-NEXT: jr $ra +; MIPS32-NEXT: sra $2, $1, 24 + %load = load atomic i8, ptr %ptr unordered, align 1 + %sext = sext i8 %load to i16 + ret i16 %sext +} + +define i64 @atomic_load_unordered_i8_sext_i64(ptr %ptr) { +; MIPS32-LABEL: atomic_load_unordered_i8_sext_i64: +; MIPS32: # %bb.0: +; MIPS32-NEXT: lbu $1, 0($4) +; MIPS32-NEXT: nop +; MIPS32-NEXT: sll $1, $1, 24 +; MIPS32-NEXT: sra $2, $1, 24 +; MIPS32-NEXT: jr $ra +; MIPS32-NEXT: sra $3, $1, 31 + %load = load atomic i8, ptr %ptr unordered, align 1 + %sext = sext i8 %load to i64 + ret i64 %sext +} + +define i32 @atomic_load_unordered_i8_zext_i32(ptr %ptr) { +; MIPS32-LABEL: atomic_load_unordered_i8_zext_i32: +; MIPS32: # %bb.0: +; MIPS32-NEXT: lbu $1, 0($4) +; MIPS32-NEXT: jr $ra +; MIPS32-NEXT: andi $2, $1, 255 + %load = load atomic i8, ptr %ptr unordered, align 1 + %zext = zext i8 %load to i32 + ret i32 %zext +} + +define i16 @atomic_load_unordered_i8_zext_i16(ptr %ptr) { +; MIPS32-LABEL: atomic_load_unordered_i8_zext_i16: +; MIPS32: # %bb.0: +; MIPS32-NEXT: lbu $1, 0($4) +; MIPS32-NEXT: jr $ra +; MIPS32-NEXT: andi $2, $1, 255 + %load = load atomic i8, ptr %ptr unordered, align 1 + %zext = zext i8 %load to i16 + ret i16 %zext +} + +define i64 @atomic_load_unordered_i8_zext_i64(ptr %ptr) { +; MIPS32-LABEL: atomic_load_unordered_i8_zext_i64: +; MIPS32: # %bb.0: +; MIPS32-NEXT: lbu $1, 0($4) +; MIPS32-NEXT: nop +; MIPS32-NEXT: andi $2, $1, 255 +; MIPS32-NEXT: jr $ra +; MIPS32-NEXT: andi $3, $1, 0 + %load = load atomic i8, ptr %ptr unordered, align 1 + %zext = zext i8 %load to i64 + ret i64 %zext +} + +define i16 @atomic_load_unordered_i16(ptr %ptr) { +; MIPS32-LABEL: atomic_load_unordered_i16: +; MIPS32: # %bb.0: +; MIPS32-NEXT: lhu $2, 0($4) +; MIPS32-NEXT: nop +; MIPS32-NEXT: jr $ra +; MIPS32-NEXT: nop + %load = load atomic i16, ptr %ptr unordered, align 2 + ret i16 %load +} + +define i32 @atomic_load_unordered_i16_sext_i32(ptr %ptr) { +; MIPS32-LABEL: atomic_load_unordered_i16_sext_i32: +; MIPS32: # %bb.0: +; MIPS32-NEXT: lhu $1, 0($4) +; MIPS32-NEXT: nop +; MIPS32-NEXT: sll $1, $1, 16 +; MIPS32-NEXT: jr $ra +; MIPS32-NEXT: sra $2, $1, 16 + %load = load atomic i16, ptr %ptr unordered, align 2 + %sext = sext i16 %load to i32 + ret i32 %sext +} + +define i64 @atomic_load_unordered_i16_sext_i64(ptr %ptr) { +; MIPS32-LABEL: atomic_load_unordered_i16_sext_i64: +; MIPS32: # %bb.0: +; MIPS32-NEXT: lhu $1, 0($4) +; MIPS32-NEXT: nop +; MIPS32-NEXT: sll $1, $1, 16 +; MIPS32-NEXT: sra $2, $1, 16 +; MIPS32-NEXT: jr $ra +; MIPS32-NEXT: sra $3, $1, 31 + %load = load atomic i16, ptr %ptr unordered, align 2 + %sext = sext i16 %load to i64 + ret i64 %sext +} + +define i32 @atomic_load_unordered_i16_zext_i32(ptr %ptr) { +; MIPS32-LABEL: atomic_load_unordered_i16_zext_i32: +; MIPS32: # %bb.0: +; MIPS32-NEXT: lhu $1, 0($4) +; MIPS32-NEXT: jr $ra +; MIPS32-NEXT: andi $2, $1, 65535 + %load = load atomic i16, ptr %ptr unordered, align 2 + %zext = zext i16 %load to i32 + ret i32 %zext +} + +define i64 @atomic_load_unordered_i16_zext_i64(ptr %ptr) { +; MIPS32-LABEL: atomic_load_unordered_i16_zext_i64: +; MIPS32: # %bb.0: +; MIPS32-NEXT: lhu $1, 0($4) +; MIPS32-NEXT: nop +; MIPS32-NEXT: andi $2, $1, 65535 +; MIPS32-NEXT: jr $ra +; MIPS32-NEXT: andi $3, $1, 0 + %load = load atomic i16, ptr %ptr unordered, align 2 + %zext = zext i16 %load to i64 + ret i64 %zext +} + +define i32 @atomic_load_unordered_i32(ptr %ptr) { +; MIPS32-LABEL: atomic_load_unordered_i32: +; MIPS32: # %bb.0: +; MIPS32-NEXT: lw $2, 0($4) +; MIPS32-NEXT: nop +; MIPS32-NEXT: jr $ra +; MIPS32-NEXT: nop + %load = load atomic i32, ptr %ptr unordered, align 4 + ret i32 %load +} + +define i64 @atomic_load_unordered_i64(ptr %ptr) { +; MIPS32-LABEL: atomic_load_unordered_i64: +; MIPS32: # %bb.0: +; MIPS32-NEXT: addiu $sp, $sp, -24 +; MIPS32-NEXT: .cfi_def_cfa_offset 24 +; MIPS32-NEXT: sw $ra, 20($sp) # 4-byte Folded Spill +; MIPS32-NEXT: .cfi_offset 31, -4 +; MIPS32-NEXT: jal __atomic_load_8 +; MIPS32-NEXT: ori $5, $zero, 0 +; MIPS32-NEXT: lw $ra, 20($sp) # 4-byte Folded Reload +; MIPS32-NEXT: nop +; MIPS32-NEXT: jr $ra +; MIPS32-NEXT: addiu $sp, $sp, 24 + %load = load atomic i64, ptr %ptr unordered, align 8 + ret i64 %load +} + +define float @atomic_load_unordered_f32(ptr %ptr) { +; MIPS32-LABEL: atomic_load_unordered_f32: +; MIPS32: # %bb.0: +; MIPS32-NEXT: lwc1 $f0, 64($4) +; MIPS32-NEXT: jr $ra +; MIPS32-NEXT: nop + %gep = getelementptr inbounds float, ptr %ptr, i32 16 + %load = load atomic float, ptr %gep unordered, align 4 + ret float %load +} + +define double @atomic_load_unordered_f64(ptr %ptr) { +; MIPS32-LABEL: atomic_load_unordered_f64: +; MIPS32: # %bb.0: +; MIPS32-NEXT: addiu $sp, $sp, -24 +; MIPS32-NEXT: .cfi_def_cfa_offset 24 +; MIPS32-NEXT: sw $ra, 20($sp) # 4-byte Folded Spill +; MIPS32-NEXT: .cfi_offset 31, -4 +; MIPS32-NEXT: ori $5, $zero, 0 +; MIPS32-NEXT: ori $1, $zero, 128 +; MIPS32-NEXT: jal __atomic_load_8 +; MIPS32-NEXT: addu $4, $4, $1 +; MIPS32-NEXT: mtc1 $2, $f0 +; MIPS32-NEXT: nop +; MIPS32-NEXT: mtc1 $3, $f1 +; MIPS32-NEXT: lw $ra, 20($sp) # 4-byte Folded Reload +; MIPS32-NEXT: nop +; MIPS32-NEXT: jr $ra +; MIPS32-NEXT: addiu $sp, $sp, 24 + %gep = getelementptr inbounds double, ptr %ptr, i32 16 + %load = load atomic double, ptr %gep unordered, align 8 + ret double %load +} + +define ptr @atomic_load_unordered_p0i8(ptr %ptr) { +; MIPS32-LABEL: atomic_load_unordered_p0i8: +; MIPS32: # %bb.0: +; MIPS32-NEXT: lw $2, 64($4) +; MIPS32-NEXT: nop +; MIPS32-NEXT: jr $ra +; MIPS32-NEXT: nop + %gep = getelementptr inbounds ptr, ptr %ptr, i32 16 + %load = load atomic ptr, ptr %gep unordered, align 4 + ret ptr %load +} + +; -------------------------------------------------------------------- +; monotonic +; -------------------------------------------------------------------- + +define i8 @atomic_load_monotonic_i8(ptr %ptr) { +; MIPS32-LABEL: atomic_load_monotonic_i8: +; MIPS32: # %bb.0: +; MIPS32-NEXT: lbu $2, 0($4) +; MIPS32-NEXT: nop +; MIPS32-NEXT: jr $ra +; MIPS32-NEXT: nop + %load = load atomic i8, ptr %ptr monotonic, align 1 + ret i8 %load +} + +define i32 @atomic_load_monotonic_i8_sext_i32(ptr %ptr) { +; MIPS32-LABEL: atomic_load_monotonic_i8_sext_i32: +; MIPS32: # %bb.0: +; MIPS32-NEXT: lbu $1, 0($4) +; MIPS32-NEXT: nop +; MIPS32-NEXT: sll $1, $1, 24 +; MIPS32-NEXT: jr $ra +; MIPS32-NEXT: sra $2, $1, 24 + %load = load atomic i8, ptr %ptr monotonic, align 1 + %sext = sext i8 %load to i32 + ret i32 %sext +} + +define i16 @atomic_load_monotonic_i8_sext_i16(ptr %ptr) { +; MIPS32-LABEL: atomic_load_monotonic_i8_sext_i16: +; MIPS32: # %bb.0: +; MIPS32-NEXT: lbu $1, 0($4) +; MIPS32-NEXT: nop +; MIPS32-NEXT: sll $1, $1, 24 +; MIPS32-NEXT: jr $ra +; MIPS32-NEXT: sra $2, $1, 24 + %load = load atomic i8, ptr %ptr monotonic, align 1 + %sext = sext i8 %load to i16 + ret i16 %sext +} + +define i64 @atomic_load_monotonic_i8_sext_i64(ptr %ptr) { +; MIPS32-LABEL: atomic_load_monotonic_i8_sext_i64: +; MIPS32: # %bb.0: +; MIPS32-NEXT: lbu $1, 0($4) +; MIPS32-NEXT: nop +; MIPS32-NEXT: sll $1, $1, 24 +; MIPS32-NEXT: sra $2, $1, 24 +; MIPS32-NEXT: jr $ra +; MIPS32-NEXT: sra $3, $1, 31 + %load = load atomic i8, ptr %ptr monotonic, align 1 + %sext = sext i8 %load to i64 + ret i64 %sext +} + +define i32 @atomic_load_monotonic_i8_zext_i32(ptr %ptr) { +; MIPS32-LABEL: atomic_load_monotonic_i8_zext_i32: +; MIPS32: # %bb.0: +; MIPS32-NEXT: lbu $1, 0($4) +; MIPS32-NEXT: jr $ra +; MIPS32-NEXT: andi $2, $1, 255 + %load = load atomic i8, ptr %ptr monotonic, align 1 + %zext = zext i8 %load to i32 + ret i32 %zext +} + +define i16 @atomic_load_monotonic_i8_zext_i16(ptr %ptr) { +; MIPS32-LABEL: atomic_load_monotonic_i8_zext_i16: +; MIPS32: # %bb.0: +; MIPS32-NEXT: lbu $1, 0($4) +; MIPS32-NEXT: jr $ra +; MIPS32-NEXT: andi $2, $1, 255 + %load = load atomic i8, ptr %ptr monotonic, align 1 + %zext = zext i8 %load to i16 + ret i16 %zext +} + +define i64 @atomic_load_monotonic_i8_zext_i64(ptr %ptr) { +; MIPS32-LABEL: atomic_load_monotonic_i8_zext_i64: +; MIPS32: # %bb.0: +; MIPS32-NEXT: lbu $1, 0($4) +; MIPS32-NEXT: nop +; MIPS32-NEXT: andi $2, $1, 255 +; MIPS32-NEXT: jr $ra +; MIPS32-NEXT: andi $3, $1, 0 + %load = load atomic i8, ptr %ptr monotonic, align 1 + %zext = zext i8 %load to i64 + ret i64 %zext +} + +define i16 @atomic_load_monotonic_i16(ptr %ptr) { +; MIPS32-LABEL: atomic_load_monotonic_i16: +; MIPS32: # %bb.0: +; MIPS32-NEXT: lhu $2, 0($4) +; MIPS32-NEXT: nop +; MIPS32-NEXT: jr $ra +; MIPS32-NEXT: nop + %load = load atomic i16, ptr %ptr monotonic, align 2 + ret i16 %load +} + +define i32 @atomic_load_monotonic_i16_sext_i32(ptr %ptr) { +; MIPS32-LABEL: atomic_load_monotonic_i16_sext_i32: +; MIPS32: # %bb.0: +; MIPS32-NEXT: lhu $1, 0($4) +; MIPS32-NEXT: nop +; MIPS32-NEXT: sll $1, $1, 16 +; MIPS32-NEXT: jr $ra +; MIPS32-NEXT: sra $2, $1, 16 + %load = load atomic i16, ptr %ptr monotonic, align 2 + %sext = sext i16 %load to i32 + ret i32 %sext +} + +define i64 @atomic_load_monotonic_i16_sext_i64(ptr %ptr) { +; MIPS32-LABEL: atomic_load_monotonic_i16_sext_i64: +; MIPS32: # %bb.0: +; MIPS32-NEXT: lhu $1, 0($4) +; MIPS32-NEXT: nop +; MIPS32-NEXT: sll $1, $1, 16 +; MIPS32-NEXT: sra $2, $1, 16 +; MIPS32-NEXT: jr $ra +; MIPS32-NEXT: sra $3, $1, 31 + %load = load atomic i16, ptr %ptr monotonic, align 2 + %sext = sext i16 %load to i64 + ret i64 %sext +} + +define i32 @atomic_load_monotonic_i16_zext_i32(ptr %ptr) { +; MIPS32-LABEL: atomic_load_monotonic_i16_zext_i32: +; MIPS32: # %bb.0: +; MIPS32-NEXT: lhu $1, 0($4) +; MIPS32-NEXT: jr $ra +; MIPS32-NEXT: andi $2, $1, 65535 + %load = load atomic i16, ptr %ptr monotonic, align 2 + %zext = zext i16 %load to i32 + ret i32 %zext +} + +define i64 @atomic_load_monotonic_i16_zext_i64(ptr %ptr) { +; MIPS32-LABEL: atomic_load_monotonic_i16_zext_i64: +; MIPS32: # %bb.0: +; MIPS32-NEXT: lhu $1, 0($4) +; MIPS32-NEXT: nop +; MIPS32-NEXT: andi $2, $1, 65535 +; MIPS32-NEXT: jr $ra +; MIPS32-NEXT: andi $3, $1, 0 + %load = load atomic i16, ptr %ptr monotonic, align 2 + %zext = zext i16 %load to i64 + ret i64 %zext +} + +define i32 @atomic_load_monotonic_i32(ptr %ptr) { +; MIPS32-LABEL: atomic_load_monotonic_i32: +; MIPS32: # %bb.0: +; MIPS32-NEXT: lw $2, 0($4) +; MIPS32-NEXT: nop +; MIPS32-NEXT: jr $ra +; MIPS32-NEXT: nop + %load = load atomic i32, ptr %ptr monotonic, align 4 + ret i32 %load +} + +define i64 @atomic_load_monotonic_i64(ptr %ptr) { +; MIPS32-LABEL: atomic_load_monotonic_i64: +; MIPS32: # %bb.0: +; MIPS32-NEXT: addiu $sp, $sp, -24 +; MIPS32-NEXT: .cfi_def_cfa_offset 24 +; MIPS32-NEXT: sw $ra, 20($sp) # 4-byte Folded Spill +; MIPS32-NEXT: .cfi_offset 31, -4 +; MIPS32-NEXT: jal __atomic_load_8 +; MIPS32-NEXT: ori $5, $zero, 0 +; MIPS32-NEXT: lw $ra, 20($sp) # 4-byte Folded Reload +; MIPS32-NEXT: nop +; MIPS32-NEXT: jr $ra +; MIPS32-NEXT: addiu $sp, $sp, 24 + %load = load atomic i64, ptr %ptr monotonic, align 8 + ret i64 %load +} + +define float @atomic_load_monotonic_f32(ptr %ptr) { +; MIPS32-LABEL: atomic_load_monotonic_f32: +; MIPS32: # %bb.0: +; MIPS32-NEXT: lwc1 $f0, 64($4) +; MIPS32-NEXT: jr $ra +; MIPS32-NEXT: nop + %gep = getelementptr inbounds float, ptr %ptr, i32 16 + %load = load atomic float, ptr %gep monotonic, align 4 + ret float %load +} + +define double @atomic_load_monotonic_f64(ptr %ptr) { +; MIPS32-LABEL: atomic_load_monotonic_f64: +; MIPS32: # %bb.0: +; MIPS32-NEXT: addiu $sp, $sp, -24 +; MIPS32-NEXT: .cfi_def_cfa_offset 24 +; MIPS32-NEXT: sw $ra, 20($sp) # 4-byte Folded Spill +; MIPS32-NEXT: .cfi_offset 31, -4 +; MIPS32-NEXT: ori $5, $zero, 0 +; MIPS32-NEXT: ori $1, $zero, 128 +; MIPS32-NEXT: jal __atomic_load_8 +; MIPS32-NEXT: addu $4, $4, $1 +; MIPS32-NEXT: mtc1 $2, $f0 +; MIPS32-NEXT: nop +; MIPS32-NEXT: mtc1 $3, $f1 +; MIPS32-NEXT: lw $ra, 20($sp) # 4-byte Folded Reload +; MIPS32-NEXT: nop +; MIPS32-NEXT: jr $ra +; MIPS32-NEXT: addiu $sp, $sp, 24 + %gep = getelementptr inbounds double, ptr %ptr, i32 16 + %load = load atomic double, ptr %gep monotonic, align 8 + ret double %load +} + +define ptr @atomic_load_monotonic_p0i8(ptr %ptr) { +; MIPS32-LABEL: atomic_load_monotonic_p0i8: +; MIPS32: # %bb.0: +; MIPS32-NEXT: lw $2, 64($4) +; MIPS32-NEXT: nop +; MIPS32-NEXT: jr $ra +; MIPS32-NEXT: nop + %gep = getelementptr inbounds ptr, ptr %ptr, i32 16 + %load = load atomic ptr, ptr %gep monotonic, align 4 + ret ptr %load +} + +; -------------------------------------------------------------------- +; acquire +; -------------------------------------------------------------------- + +define i8 @atomic_load_acquire_i8(ptr %ptr) { +; MIPS32-LABEL: atomic_load_acquire_i8: +; MIPS32: # %bb.0: +; MIPS32-NEXT: lbu $2, 0($4) +; MIPS32-NEXT: sync +; MIPS32-NEXT: jr $ra +; MIPS32-NEXT: nop + %load = load atomic i8, ptr %ptr acquire, align 1 + ret i8 %load +} + +define i32 @atomic_load_acquire_i8_sext_i32(ptr %ptr) { +; MIPS32-LABEL: atomic_load_acquire_i8_sext_i32: +; MIPS32: # %bb.0: +; MIPS32-NEXT: lbu $1, 0($4) +; MIPS32-NEXT: sync +; MIPS32-NEXT: sll $1, $1, 24 +; MIPS32-NEXT: jr $ra +; MIPS32-NEXT: sra $2, $1, 24 + %load = load atomic i8, ptr %ptr acquire, align 1 + %sext = sext i8 %load to i32 + ret i32 %sext +} + +define i16 @atomic_load_acquire_i8_sext_i16(ptr %ptr) { +; MIPS32-LABEL: atomic_load_acquire_i8_sext_i16: +; MIPS32: # %bb.0: +; MIPS32-NEXT: lbu $1, 0($4) +; MIPS32-NEXT: sync +; MIPS32-NEXT: sll $1, $1, 24 +; MIPS32-NEXT: jr $ra +; MIPS32-NEXT: sra $2, $1, 24 + %load = load atomic i8, ptr %ptr acquire, align 1 + %sext = sext i8 %load to i16 + ret i16 %sext +} + +define i64 @atomic_load_acquire_i8_sext_i64(ptr %ptr) { +; MIPS32-LABEL: atomic_load_acquire_i8_sext_i64: +; MIPS32: # %bb.0: +; MIPS32-NEXT: lbu $1, 0($4) +; MIPS32-NEXT: sync +; MIPS32-NEXT: sll $1, $1, 24 +; MIPS32-NEXT: sra $2, $1, 24 +; MIPS32-NEXT: jr $ra +; MIPS32-NEXT: sra $3, $1, 31 + %load = load atomic i8, ptr %ptr acquire, align 1 + %sext = sext i8 %load to i64 + ret i64 %sext +} + +define i32 @atomic_load_acquire_i8_zext_i32(ptr %ptr) { +; MIPS32-LABEL: atomic_load_acquire_i8_zext_i32: +; MIPS32: # %bb.0: +; MIPS32-NEXT: lbu $1, 0($4) +; MIPS32-NEXT: sync +; MIPS32-NEXT: jr $ra +; MIPS32-NEXT: andi $2, $1, 255 + %load = load atomic i8, ptr %ptr acquire, align 1 + %zext = zext i8 %load to i32 + ret i32 %zext +} + +define i16 @atomic_load_acquire_i8_zext_i16(ptr %ptr) { +; MIPS32-LABEL: atomic_load_acquire_i8_zext_i16: +; MIPS32: # %bb.0: +; MIPS32-NEXT: lbu $1, 0($4) +; MIPS32-NEXT: sync +; MIPS32-NEXT: jr $ra +; MIPS32-NEXT: andi $2, $1, 255 + %load = load atomic i8, ptr %ptr acquire, align 1 + %zext = zext i8 %load to i16 + ret i16 %zext +} + +define i64 @atomic_load_acquire_i8_zext_i64(ptr %ptr) { +; MIPS32-LABEL: atomic_load_acquire_i8_zext_i64: +; MIPS32: # %bb.0: +; MIPS32-NEXT: lbu $1, 0($4) +; MIPS32-NEXT: sync +; MIPS32-NEXT: andi $2, $1, 255 +; MIPS32-NEXT: jr $ra +; MIPS32-NEXT: andi $3, $1, 0 + %load = load atomic i8, ptr %ptr acquire, align 1 + %zext = zext i8 %load to i64 + ret i64 %zext +} + +define i16 @atomic_load_acquire_i16(ptr %ptr) { +; MIPS32-LABEL: atomic_load_acquire_i16: +; MIPS32: # %bb.0: +; MIPS32-NEXT: lhu $2, 0($4) +; MIPS32-NEXT: sync +; MIPS32-NEXT: jr $ra +; MIPS32-NEXT: nop + %load = load atomic i16, ptr %ptr acquire, align 2 + ret i16 %load +} + +define i32 @atomic_load_acquire_i16_sext_i32(ptr %ptr) { +; MIPS32-LABEL: atomic_load_acquire_i16_sext_i32: +; MIPS32: # %bb.0: +; MIPS32-NEXT: lhu $1, 0($4) +; MIPS32-NEXT: sync +; MIPS32-NEXT: sll $1, $1, 16 +; MIPS32-NEXT: jr $ra +; MIPS32-NEXT: sra $2, $1, 16 + %load = load atomic i16, ptr %ptr acquire, align 2 + %sext = sext i16 %load to i32 + ret i32 %sext +} + +define i64 @atomic_load_acquire_i16_sext_i64(ptr %ptr) { +; MIPS32-LABEL: atomic_load_acquire_i16_sext_i64: +; MIPS32: # %bb.0: +; MIPS32-NEXT: lhu $1, 0($4) +; MIPS32-NEXT: sync +; MIPS32-NEXT: sll $1, $1, 16 +; MIPS32-NEXT: sra $2, $1, 16 +; MIPS32-NEXT: jr $ra +; MIPS32-NEXT: sra $3, $1, 31 + %load = load atomic i16, ptr %ptr acquire, align 2 + %sext = sext i16 %load to i64 + ret i64 %sext +} + +define i32 @atomic_load_acquire_i16_zext_i32(ptr %ptr) { +; MIPS32-LABEL: atomic_load_acquire_i16_zext_i32: +; MIPS32: # %bb.0: +; MIPS32-NEXT: lhu $1, 0($4) +; MIPS32-NEXT: sync +; MIPS32-NEXT: jr $ra +; MIPS32-NEXT: andi $2, $1, 65535 + %load = load atomic i16, ptr %ptr acquire, align 2 + %zext = zext i16 %load to i32 + ret i32 %zext +} + +define i64 @atomic_load_acquire_i16_zext_i64(ptr %ptr) { +; MIPS32-LABEL: atomic_load_acquire_i16_zext_i64: +; MIPS32: # %bb.0: +; MIPS32-NEXT: lhu $1, 0($4) +; MIPS32-NEXT: sync +; MIPS32-NEXT: andi $2, $1, 65535 +; MIPS32-NEXT: jr $ra +; MIPS32-NEXT: andi $3, $1, 0 + %load = load atomic i16, ptr %ptr acquire, align 2 + %zext = zext i16 %load to i64 + ret i64 %zext +} + +define i32 @atomic_load_acquire_i32(ptr %ptr) { +; MIPS32-LABEL: atomic_load_acquire_i32: +; MIPS32: # %bb.0: +; MIPS32-NEXT: lw $2, 0($4) +; MIPS32-NEXT: sync +; MIPS32-NEXT: jr $ra +; MIPS32-NEXT: nop + %load = load atomic i32, ptr %ptr acquire, align 4 + ret i32 %load +} + +define i64 @atomic_load_acquire_i64(ptr %ptr) { +; MIPS32-LABEL: atomic_load_acquire_i64: +; MIPS32: # %bb.0: +; MIPS32-NEXT: addiu $sp, $sp, -24 +; MIPS32-NEXT: .cfi_def_cfa_offset 24 +; MIPS32-NEXT: sw $ra, 20($sp) # 4-byte Folded Spill +; MIPS32-NEXT: .cfi_offset 31, -4 +; MIPS32-NEXT: jal __atomic_load_8 +; MIPS32-NEXT: ori $5, $zero, 2 +; MIPS32-NEXT: lw $ra, 20($sp) # 4-byte Folded Reload +; MIPS32-NEXT: nop +; MIPS32-NEXT: jr $ra +; MIPS32-NEXT: addiu $sp, $sp, 24 + %load = load atomic i64, ptr %ptr acquire, align 8 + ret i64 %load +} + +define float @atomic_load_acquire_f32(ptr %ptr) { +; MIPS32-LABEL: atomic_load_acquire_f32: +; MIPS32: # %bb.0: +; MIPS32-NEXT: lwc1 $f0, 64($4) +; MIPS32-NEXT: sync +; MIPS32-NEXT: jr $ra +; MIPS32-NEXT: nop + %gep = getelementptr inbounds float, ptr %ptr, i32 16 + %load = load atomic float, ptr %gep acquire, align 4 + ret float %load +} + +define double @atomic_load_acquire_f64(ptr %ptr) { +; MIPS32-LABEL: atomic_load_acquire_f64: +; MIPS32: # %bb.0: +; MIPS32-NEXT: addiu $sp, $sp, -24 +; MIPS32-NEXT: .cfi_def_cfa_offset 24 +; MIPS32-NEXT: sw $ra, 20($sp) # 4-byte Folded Spill +; MIPS32-NEXT: .cfi_offset 31, -4 +; MIPS32-NEXT: ori $5, $zero, 2 +; MIPS32-NEXT: ori $1, $zero, 128 +; MIPS32-NEXT: jal __atomic_load_8 +; MIPS32-NEXT: addu $4, $4, $1 +; MIPS32-NEXT: mtc1 $2, $f0 +; MIPS32-NEXT: nop +; MIPS32-NEXT: mtc1 $3, $f1 +; MIPS32-NEXT: lw $ra, 20($sp) # 4-byte Folded Reload +; MIPS32-NEXT: nop +; MIPS32-NEXT: jr $ra +; MIPS32-NEXT: addiu $sp, $sp, 24 + %gep = getelementptr inbounds double, ptr %ptr, i32 16 + %load = load atomic double, ptr %gep acquire, align 8 + ret double %load +} + +define ptr @atomic_load_acquire_p0i8(ptr %ptr) { +; MIPS32-LABEL: atomic_load_acquire_p0i8: +; MIPS32: # %bb.0: +; MIPS32-NEXT: lw $2, 64($4) +; MIPS32-NEXT: sync +; MIPS32-NEXT: jr $ra +; MIPS32-NEXT: nop + %gep = getelementptr inbounds ptr, ptr %ptr, i32 16 + %load = load atomic ptr, ptr %gep acquire, align 4 + ret ptr %load +} + +; -------------------------------------------------------------------- +; seq_cst +; -------------------------------------------------------------------- + +define i8 @atomic_load_seq_cst_i8(ptr %ptr) { +; MIPS32-LABEL: atomic_load_seq_cst_i8: +; MIPS32: # %bb.0: +; MIPS32-NEXT: lbu $2, 0($4) +; MIPS32-NEXT: sync +; MIPS32-NEXT: jr $ra +; MIPS32-NEXT: nop + %load = load atomic i8, ptr %ptr seq_cst, align 1 + ret i8 %load +} + +define i32 @atomic_load_seq_cst_i8_sext_i32(ptr %ptr) { +; MIPS32-LABEL: atomic_load_seq_cst_i8_sext_i32: +; MIPS32: # %bb.0: +; MIPS32-NEXT: lbu $1, 0($4) +; MIPS32-NEXT: sync +; MIPS32-NEXT: sll $1, $1, 24 +; MIPS32-NEXT: jr $ra +; MIPS32-NEXT: sra $2, $1, 24 + %load = load atomic i8, ptr %ptr seq_cst, align 1 + %sext = sext i8 %load to i32 + ret i32 %sext +} + +define i16 @atomic_load_seq_cst_i8_sext_i16(ptr %ptr) { +; MIPS32-LABEL: atomic_load_seq_cst_i8_sext_i16: +; MIPS32: # %bb.0: +; MIPS32-NEXT: lbu $1, 0($4) +; MIPS32-NEXT: sync +; MIPS32-NEXT: sll $1, $1, 24 +; MIPS32-NEXT: jr $ra +; MIPS32-NEXT: sra $2, $1, 24 + %load = load atomic i8, ptr %ptr seq_cst, align 1 + %sext = sext i8 %load to i16 + ret i16 %sext +} + +define i64 @atomic_load_seq_cst_i8_sext_i64(ptr %ptr) { +; MIPS32-LABEL: atomic_load_seq_cst_i8_sext_i64: +; MIPS32: # %bb.0: +; MIPS32-NEXT: lbu $1, 0($4) +; MIPS32-NEXT: sync +; MIPS32-NEXT: sll $1, $1, 24 +; MIPS32-NEXT: sra $2, $1, 24 +; MIPS32-NEXT: jr $ra +; MIPS32-NEXT: sra $3, $1, 31 + %load = load atomic i8, ptr %ptr seq_cst, align 1 + %sext = sext i8 %load to i64 + ret i64 %sext +} + +define i32 @atomic_load_seq_cst_i8_zext_i32(ptr %ptr) { +; MIPS32-LABEL: atomic_load_seq_cst_i8_zext_i32: +; MIPS32: # %bb.0: +; MIPS32-NEXT: lbu $1, 0($4) +; MIPS32-NEXT: sync +; MIPS32-NEXT: jr $ra +; MIPS32-NEXT: andi $2, $1, 255 + %load = load atomic i8, ptr %ptr seq_cst, align 1 + %zext = zext i8 %load to i32 + ret i32 %zext +} + +define i16 @atomic_load_seq_cst_i8_zext_i16(ptr %ptr) { +; MIPS32-LABEL: atomic_load_seq_cst_i8_zext_i16: +; MIPS32: # %bb.0: +; MIPS32-NEXT: lbu $1, 0($4) +; MIPS32-NEXT: sync +; MIPS32-NEXT: jr $ra +; MIPS32-NEXT: andi $2, $1, 255 + %load = load atomic i8, ptr %ptr seq_cst, align 1 + %zext = zext i8 %load to i16 + ret i16 %zext +} + +define i64 @atomic_load_seq_cst_i8_zext_i64(ptr %ptr) { +; MIPS32-LABEL: atomic_load_seq_cst_i8_zext_i64: +; MIPS32: # %bb.0: +; MIPS32-NEXT: lbu $1, 0($4) +; MIPS32-NEXT: sync +; MIPS32-NEXT: andi $2, $1, 255 +; MIPS32-NEXT: jr $ra +; MIPS32-NEXT: andi $3, $1, 0 + %load = load atomic i8, ptr %ptr seq_cst, align 1 + %zext = zext i8 %load to i64 + ret i64 %zext +} + +define i16 @atomic_load_seq_cst_i16(ptr %ptr) { +; MIPS32-LABEL: atomic_load_seq_cst_i16: +; MIPS32: # %bb.0: +; MIPS32-NEXT: lhu $2, 0($4) +; MIPS32-NEXT: sync +; MIPS32-NEXT: jr $ra +; MIPS32-NEXT: nop + %load = load atomic i16, ptr %ptr seq_cst, align 2 + ret i16 %load +} + +define i32 @atomic_load_seq_cst_i16_sext_i32(ptr %ptr) { +; MIPS32-LABEL: atomic_load_seq_cst_i16_sext_i32: +; MIPS32: # %bb.0: +; MIPS32-NEXT: lhu $1, 0($4) +; MIPS32-NEXT: sync +; MIPS32-NEXT: sll $1, $1, 16 +; MIPS32-NEXT: jr $ra +; MIPS32-NEXT: sra $2, $1, 16 + %load = load atomic i16, ptr %ptr seq_cst, align 2 + %sext = sext i16 %load to i32 + ret i32 %sext +} + +define i64 @atomic_load_seq_cst_i16_sext_i64(ptr %ptr) { +; MIPS32-LABEL: atomic_load_seq_cst_i16_sext_i64: +; MIPS32: # %bb.0: +; MIPS32-NEXT: lhu $1, 0($4) +; MIPS32-NEXT: sync +; MIPS32-NEXT: sll $1, $1, 16 +; MIPS32-NEXT: sra $2, $1, 16 +; MIPS32-NEXT: jr $ra +; MIPS32-NEXT: sra $3, $1, 31 + %load = load atomic i16, ptr %ptr seq_cst, align 2 + %sext = sext i16 %load to i64 + ret i64 %sext +} + +define i32 @atomic_load_seq_cst_i16_zext_i32(ptr %ptr) { +; MIPS32-LABEL: atomic_load_seq_cst_i16_zext_i32: +; MIPS32: # %bb.0: +; MIPS32-NEXT: lhu $1, 0($4) +; MIPS32-NEXT: sync +; MIPS32-NEXT: jr $ra +; MIPS32-NEXT: andi $2, $1, 65535 + %load = load atomic i16, ptr %ptr seq_cst, align 2 + %zext = zext i16 %load to i32 + ret i32 %zext +} + +define i64 @atomic_load_seq_cst_i16_zext_i64(ptr %ptr) { +; MIPS32-LABEL: atomic_load_seq_cst_i16_zext_i64: +; MIPS32: # %bb.0: +; MIPS32-NEXT: lhu $1, 0($4) +; MIPS32-NEXT: sync +; MIPS32-NEXT: andi $2, $1, 65535 +; MIPS32-NEXT: jr $ra +; MIPS32-NEXT: andi $3, $1, 0 + %load = load atomic i16, ptr %ptr seq_cst, align 2 + %zext = zext i16 %load to i64 + ret i64 %zext +} + +define i32 @atomic_load_seq_cst_i32(ptr %ptr) { +; MIPS32-LABEL: atomic_load_seq_cst_i32: +; MIPS32: # %bb.0: +; MIPS32-NEXT: lw $2, 0($4) +; MIPS32-NEXT: sync +; MIPS32-NEXT: jr $ra +; MIPS32-NEXT: nop + %load = load atomic i32, ptr %ptr seq_cst, align 4 + ret i32 %load +} + +define i64 @atomic_load_seq_cst_i64(ptr %ptr) { +; MIPS32-LABEL: atomic_load_seq_cst_i64: +; MIPS32: # %bb.0: +; MIPS32-NEXT: addiu $sp, $sp, -24 +; MIPS32-NEXT: .cfi_def_cfa_offset 24 +; MIPS32-NEXT: sw $ra, 20($sp) # 4-byte Folded Spill +; MIPS32-NEXT: .cfi_offset 31, -4 +; MIPS32-NEXT: jal __atomic_load_8 +; MIPS32-NEXT: ori $5, $zero, 5 +; MIPS32-NEXT: lw $ra, 20($sp) # 4-byte Folded Reload +; MIPS32-NEXT: nop +; MIPS32-NEXT: jr $ra +; MIPS32-NEXT: addiu $sp, $sp, 24 + %load = load atomic i64, ptr %ptr seq_cst, align 8 + ret i64 %load +} + +define float @atomic_load_seq_cst_f32(ptr %ptr) { +; MIPS32-LABEL: atomic_load_seq_cst_f32: +; MIPS32: # %bb.0: +; MIPS32-NEXT: lwc1 $f0, 64($4) +; MIPS32-NEXT: sync +; MIPS32-NEXT: jr $ra +; MIPS32-NEXT: nop + %gep = getelementptr inbounds float, ptr %ptr, i32 16 + %load = load atomic float, ptr %gep seq_cst, align 4 + ret float %load +} + +define double @atomic_load_seq_cst_f64(ptr %ptr) { +; MIPS32-LABEL: atomic_load_seq_cst_f64: +; MIPS32: # %bb.0: +; MIPS32-NEXT: addiu $sp, $sp, -24 +; MIPS32-NEXT: .cfi_def_cfa_offset 24 +; MIPS32-NEXT: sw $ra, 20($sp) # 4-byte Folded Spill +; MIPS32-NEXT: .cfi_offset 31, -4 +; MIPS32-NEXT: ori $5, $zero, 5 +; MIPS32-NEXT: ori $1, $zero, 128 +; MIPS32-NEXT: jal __atomic_load_8 +; MIPS32-NEXT: addu $4, $4, $1 +; MIPS32-NEXT: mtc1 $2, $f0 +; MIPS32-NEXT: nop +; MIPS32-NEXT: mtc1 $3, $f1 +; MIPS32-NEXT: lw $ra, 20($sp) # 4-byte Folded Reload +; MIPS32-NEXT: nop +; MIPS32-NEXT: jr $ra +; MIPS32-NEXT: addiu $sp, $sp, 24 + %gep = getelementptr inbounds double, ptr %ptr, i32 16 + %load = load atomic double, ptr %gep seq_cst, align 8 + ret double %load +} + +define ptr @atomic_load_seq_cst_p0i8(ptr %ptr) { +; MIPS32-LABEL: atomic_load_seq_cst_p0i8: +; MIPS32: # %bb.0: +; MIPS32-NEXT: lw $2, 64($4) +; MIPS32-NEXT: sync +; MIPS32-NEXT: jr $ra +; MIPS32-NEXT: nop + %gep = getelementptr inbounds ptr, ptr %ptr, i32 16 + %load = load atomic ptr, ptr %gep seq_cst, align 4 + ret ptr %load +}