Index: llvm/lib/Target/RISCV/CMakeLists.txt =================================================================== --- llvm/lib/Target/RISCV/CMakeLists.txt +++ llvm/lib/Target/RISCV/CMakeLists.txt @@ -21,6 +21,7 @@ add_llvm_target(RISCVCodeGen RISCVAsmPrinter.cpp RISCVCallLowering.cpp + RISCVCleanupVSETVLI.cpp RISCVExpandAtomicPseudoInsts.cpp RISCVExpandPseudoInsts.cpp RISCVFrameLowering.cpp Index: llvm/lib/Target/RISCV/RISCV.h =================================================================== --- llvm/lib/Target/RISCV/RISCV.h +++ llvm/lib/Target/RISCV/RISCV.h @@ -46,6 +46,9 @@ FunctionPass *createRISCVExpandAtomicPseudoPass(); void initializeRISCVExpandAtomicPseudoPass(PassRegistry &); +FunctionPass *createRISCVCleanupVSETVLIPass(); +void initializeRISCVCleanupVSETVLIPass(PassRegistry &); + InstructionSelector *createRISCVInstructionSelector(const RISCVTargetMachine &, RISCVSubtarget &, RISCVRegisterBankInfo &); Index: llvm/lib/Target/RISCV/RISCVCleanupVSETVLI.cpp =================================================================== --- /dev/null +++ llvm/lib/Target/RISCV/RISCVCleanupVSETVLI.cpp @@ -0,0 +1,131 @@ +//===- RISCVCleanupVSETVLI.cpp - Cleanup unneeded VSETVLI instructions ----===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This file implements a function pass that removes duplicate vsetvli +// instructions within a basic block. +// +//===----------------------------------------------------------------------===// + +#include "RISCV.h" +#include "RISCVSubtarget.h" +#include "llvm/CodeGen/MachineFunctionPass.h" +using namespace llvm; + +#define DEBUG_TYPE "riscv-cleanup-vsetvli" +#define RISCV_CLEANUP_VSETVLI_NAME "RISCV Cleanup VSETVLI pass" + +namespace { + +class RISCVCleanupVSETVLI : public MachineFunctionPass { +public: + static char ID; + + RISCVCleanupVSETVLI() : MachineFunctionPass(ID) { + initializeRISCVCleanupVSETVLIPass(*PassRegistry::getPassRegistry()); + } + bool runOnMachineFunction(MachineFunction &MF) override; + bool runOnMachineBasicBlock(MachineBasicBlock &MBB); + + MachineFunctionProperties getRequiredProperties() const override { + return MachineFunctionProperties().set( + MachineFunctionProperties::Property::IsSSA); + } + + // This pass modifies the program, but does not modify the CFG + void getAnalysisUsage(AnalysisUsage &AU) const override { + AU.setPreservesCFG(); + MachineFunctionPass::getAnalysisUsage(AU); + } + + StringRef getPassName() const override { return RISCV_CLEANUP_VSETVLI_NAME; } +}; + +} // end anonymous namespace + +char RISCVCleanupVSETVLI::ID = 0; + +INITIALIZE_PASS(RISCVCleanupVSETVLI, DEBUG_TYPE, + RISCV_CLEANUP_VSETVLI_NAME, false, false) + +bool RISCVCleanupVSETVLI::runOnMachineBasicBlock(MachineBasicBlock &MBB) { + bool Changed = false; + MachineInstr *PrevVSETVLI = nullptr; + + for (auto MII = MBB.begin(), MIE = MBB.end(); MII != MIE;) { + MachineInstr &MI = *MII++; + + if (MI.getOpcode() != RISCV::PseudoVSETVLI) { + if (PrevVSETVLI && + (MI.isCall() || MI.modifiesRegister(RISCV::VL) || + MI.modifiesRegister(RISCV::VTYPE))) { + // Old VL/VTYPE is overwritten. + PrevVSETVLI = nullptr; + } + continue; + } + + // If we don't have a previous VSETVLI or the VL output isn't dead, we + // can't remove this VSETVLI. + if (!PrevVSETVLI || !MI.getOperand(0).isDead()) { + PrevVSETVLI = &MI; + continue; + } + + Register PrevAVLReg = PrevVSETVLI->getOperand(1).getReg(); + Register AVLReg = MI.getOperand(1).getReg(); + int64_t PrevVTYPEImm = PrevVSETVLI->getOperand(2).getImm(); + int64_t VTYPEImm = MI.getOperand(2).getImm(); + + // Does this VSETVLI use the same AVL register and VTYPE immediate? + if (PrevAVLReg != AVLReg || PrevVTYPEImm != VTYPEImm) { + PrevVSETVLI = &MI; + continue; + } + + // If the AVLReg is X0 we need to look at the output VL of both VSETVLIs. + if (AVLReg == RISCV::X0) { + Register PrevOutVL = PrevVSETVLI->getOperand(0).getReg(); + Register OutVL = MI.getOperand(0).getReg(); + // We can't remove if the previous VSETVLI left VL unchanged and the + // current instruction is setting it to VLMAX. Without knowing the VL + // before the previous instruction we don't know if this is a change. + if (PrevOutVL == RISCV::X0 && OutVL != RISCV::X0) { + PrevVSETVLI = &MI; + continue; + } + } + + // This VSETVLI is redundant, remove it. + MI.eraseFromParent(); + Changed = true; + } + + return Changed; +} + +bool RISCVCleanupVSETVLI::runOnMachineFunction(MachineFunction &MF) { + if (skipFunction(MF.getFunction())) + return false; + + // Skip if the vector extension is not enabled. + const RISCVSubtarget &ST = MF.getSubtarget(); + if (!ST.hasStdExtV()) + return false; + + bool Changed = false; + + for (MachineBasicBlock &MBB : MF) + Changed |= runOnMachineBasicBlock(MBB); + + return Changed; +} + +/// Returns an instance of the Cleanup VSETVLI pass. +FunctionPass *llvm::createRISCVCleanupVSETVLIPass() { + return new RISCVCleanupVSETVLI(); +} Index: llvm/lib/Target/RISCV/RISCVTargetMachine.cpp =================================================================== --- llvm/lib/Target/RISCV/RISCVTargetMachine.cpp +++ llvm/lib/Target/RISCV/RISCVTargetMachine.cpp @@ -39,6 +39,7 @@ initializeGlobalISel(*PR); initializeRISCVMergeBaseOffsetOptPass(*PR); initializeRISCVExpandPseudoPass(*PR); + initializeRISCVCleanupVSETVLIPass(*PR); } static StringRef computeDataLayout(const Triple &TT) { @@ -183,6 +184,8 @@ } void RISCVPassConfig::addPreRegAlloc() { - if (TM->getOptLevel() != CodeGenOpt::None) + if (TM->getOptLevel() != CodeGenOpt::None) { addPass(createRISCVMergeBaseOffsetOptPass()); + addPass(createRISCVCleanupVSETVLIPass()); + } } Index: llvm/test/CodeGen/RISCV/rvv/add-vsetvli-gpr.mir =================================================================== --- llvm/test/CodeGen/RISCV/rvv/add-vsetvli-gpr.mir +++ llvm/test/CodeGen/RISCV/rvv/add-vsetvli-gpr.mir @@ -51,12 +51,9 @@ # POST-INSERTER: dead %13:gpr = PseudoVSETVLI %0, 12, implicit-def $vl, implicit-def $vtype # POST-INSERTER: PseudoVSE64_V_M1 killed %9, %3, $noreg, $noreg, -1, implicit $vl, implicit $vtype :: (store unknown-size into %ir.pc, align 8) -# CODEGEN: vsetvli a4, a3, e64,m1,tu,mu +# CODEGEN: vsetvli a3, a3, e64,m1,tu,mu # CODEGEN-NEXT: vle64.v v25, (a1) -# CODEGEN-NEXT: vsetvli a1, a3, e64,m1,tu,mu # CODEGEN-NEXT: vle64.v v26, (a2) -# CODEGEN-NEXT: vsetvli a1, a3, e64,m1,tu,mu # CODEGEN-NEXT: vadd.vv v25, v25, v26 -# CODEGEN-NEXT: vsetvli a1, a3, e64,m1,tu,mu # CODEGEN-NEXT: vse64.v v25, (a0) # CODEGEN-NEXT: ret Index: llvm/test/CodeGen/RISCV/rvv/load-add-store-16.ll =================================================================== --- llvm/test/CodeGen/RISCV/rvv/load-add-store-16.ll +++ llvm/test/CodeGen/RISCV/rvv/load-add-store-16.ll @@ -9,11 +9,8 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a3, zero, e16,m1,tu,mu ; CHECK-NEXT: vle16.v v25, (a1) -; CHECK-NEXT: vsetvli a1, zero, e16,m1,tu,mu ; CHECK-NEXT: vle16.v v26, (a2) -; CHECK-NEXT: vsetvli a1, zero, e16,m1,tu,mu ; CHECK-NEXT: vadd.vv v25, v25, v26 -; CHECK-NEXT: vsetvli a1, zero, e16,m1,tu,mu ; CHECK-NEXT: vse16.v v25, (a0) ; CHECK-NEXT: ret %va = load , * %pa @@ -28,11 +25,8 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a3, zero, e16,m2,tu,mu ; CHECK-NEXT: vle16.v v26, (a1) -; CHECK-NEXT: vsetvli a1, zero, e16,m2,tu,mu ; CHECK-NEXT: vle16.v v28, (a2) -; CHECK-NEXT: vsetvli a1, zero, e16,m2,tu,mu ; CHECK-NEXT: vadd.vv v26, v26, v28 -; CHECK-NEXT: vsetvli a1, zero, e16,m2,tu,mu ; CHECK-NEXT: vse16.v v26, (a0) ; CHECK-NEXT: ret %va = load , * %pa @@ -47,11 +41,8 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a3, zero, e16,m4,tu,mu ; CHECK-NEXT: vle16.v v28, (a1) -; CHECK-NEXT: vsetvli a1, zero, e16,m4,tu,mu ; CHECK-NEXT: vle16.v v8, (a2) -; CHECK-NEXT: vsetvli a1, zero, e16,m4,tu,mu ; CHECK-NEXT: vadd.vv v28, v28, v8 -; CHECK-NEXT: vsetvli a1, zero, e16,m4,tu,mu ; CHECK-NEXT: vse16.v v28, (a0) ; CHECK-NEXT: ret %va = load , * %pa @@ -66,11 +57,8 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a3, zero, e16,m8,tu,mu ; CHECK-NEXT: vle16.v v8, (a1) -; CHECK-NEXT: vsetvli a1, zero, e16,m8,tu,mu ; CHECK-NEXT: vle16.v v16, (a2) -; CHECK-NEXT: vsetvli a1, zero, e16,m8,tu,mu ; CHECK-NEXT: vadd.vv v8, v8, v16 -; CHECK-NEXT: vsetvli a1, zero, e16,m8,tu,mu ; CHECK-NEXT: vse16.v v8, (a0) ; CHECK-NEXT: ret %va = load , * %pa @@ -85,11 +73,8 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a3, zero, e16,mf2,tu,mu ; CHECK-NEXT: vle16.v v25, (a1) -; CHECK-NEXT: vsetvli a1, zero, e16,mf2,tu,mu ; CHECK-NEXT: vle16.v v26, (a2) -; CHECK-NEXT: vsetvli a1, zero, e16,mf2,tu,mu ; CHECK-NEXT: vadd.vv v25, v25, v26 -; CHECK-NEXT: vsetvli a1, zero, e16,mf2,tu,mu ; CHECK-NEXT: vse16.v v25, (a0) ; CHECK-NEXT: ret %va = load , * %pa @@ -104,11 +89,8 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a3, zero, e16,mf4,tu,mu ; CHECK-NEXT: vle16.v v25, (a1) -; CHECK-NEXT: vsetvli a1, zero, e16,mf4,tu,mu ; CHECK-NEXT: vle16.v v26, (a2) -; CHECK-NEXT: vsetvli a1, zero, e16,mf4,tu,mu ; CHECK-NEXT: vadd.vv v25, v25, v26 -; CHECK-NEXT: vsetvli a1, zero, e16,mf4,tu,mu ; CHECK-NEXT: vse16.v v25, (a0) ; CHECK-NEXT: ret %va = load , * %pa Index: llvm/test/CodeGen/RISCV/rvv/load-add-store-32.ll =================================================================== --- llvm/test/CodeGen/RISCV/rvv/load-add-store-32.ll +++ llvm/test/CodeGen/RISCV/rvv/load-add-store-32.ll @@ -9,11 +9,8 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a3, zero, e32,m1,tu,mu ; CHECK-NEXT: vle32.v v25, (a1) -; CHECK-NEXT: vsetvli a1, zero, e32,m1,tu,mu ; CHECK-NEXT: vle32.v v26, (a2) -; CHECK-NEXT: vsetvli a1, zero, e32,m1,tu,mu ; CHECK-NEXT: vadd.vv v25, v25, v26 -; CHECK-NEXT: vsetvli a1, zero, e32,m1,tu,mu ; CHECK-NEXT: vse32.v v25, (a0) ; CHECK-NEXT: ret %va = load , * %pa @@ -28,11 +25,8 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a3, zero, e32,m2,tu,mu ; CHECK-NEXT: vle32.v v26, (a1) -; CHECK-NEXT: vsetvli a1, zero, e32,m2,tu,mu ; CHECK-NEXT: vle32.v v28, (a2) -; CHECK-NEXT: vsetvli a1, zero, e32,m2,tu,mu ; CHECK-NEXT: vadd.vv v26, v26, v28 -; CHECK-NEXT: vsetvli a1, zero, e32,m2,tu,mu ; CHECK-NEXT: vse32.v v26, (a0) ; CHECK-NEXT: ret %va = load , * %pa @@ -47,11 +41,8 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a3, zero, e32,m4,tu,mu ; CHECK-NEXT: vle32.v v28, (a1) -; CHECK-NEXT: vsetvli a1, zero, e32,m4,tu,mu ; CHECK-NEXT: vle32.v v8, (a2) -; CHECK-NEXT: vsetvli a1, zero, e32,m4,tu,mu ; CHECK-NEXT: vadd.vv v28, v28, v8 -; CHECK-NEXT: vsetvli a1, zero, e32,m4,tu,mu ; CHECK-NEXT: vse32.v v28, (a0) ; CHECK-NEXT: ret %va = load , * %pa @@ -66,11 +57,8 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a3, zero, e32,m8,tu,mu ; CHECK-NEXT: vle32.v v8, (a1) -; CHECK-NEXT: vsetvli a1, zero, e32,m8,tu,mu ; CHECK-NEXT: vle32.v v16, (a2) -; CHECK-NEXT: vsetvli a1, zero, e32,m8,tu,mu ; CHECK-NEXT: vadd.vv v8, v8, v16 -; CHECK-NEXT: vsetvli a1, zero, e32,m8,tu,mu ; CHECK-NEXT: vse32.v v8, (a0) ; CHECK-NEXT: ret %va = load , * %pa @@ -85,11 +73,8 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a3, zero, e32,mf2,tu,mu ; CHECK-NEXT: vle32.v v25, (a1) -; CHECK-NEXT: vsetvli a1, zero, e32,mf2,tu,mu ; CHECK-NEXT: vle32.v v26, (a2) -; CHECK-NEXT: vsetvli a1, zero, e32,mf2,tu,mu ; CHECK-NEXT: vadd.vv v25, v25, v26 -; CHECK-NEXT: vsetvli a1, zero, e32,mf2,tu,mu ; CHECK-NEXT: vse32.v v25, (a0) ; CHECK-NEXT: ret %va = load , * %pa Index: llvm/test/CodeGen/RISCV/rvv/load-add-store-64.ll =================================================================== --- llvm/test/CodeGen/RISCV/rvv/load-add-store-64.ll +++ llvm/test/CodeGen/RISCV/rvv/load-add-store-64.ll @@ -9,11 +9,8 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a3, zero, e64,m1,tu,mu ; CHECK-NEXT: vle64.v v25, (a1) -; CHECK-NEXT: vsetvli a1, zero, e64,m1,tu,mu ; CHECK-NEXT: vle64.v v26, (a2) -; CHECK-NEXT: vsetvli a1, zero, e64,m1,tu,mu ; CHECK-NEXT: vadd.vv v25, v25, v26 -; CHECK-NEXT: vsetvli a1, zero, e64,m1,tu,mu ; CHECK-NEXT: vse64.v v25, (a0) ; CHECK-NEXT: ret %va = load , * %pa @@ -28,11 +25,8 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a3, zero, e64,m2,tu,mu ; CHECK-NEXT: vle64.v v26, (a1) -; CHECK-NEXT: vsetvli a1, zero, e64,m2,tu,mu ; CHECK-NEXT: vle64.v v28, (a2) -; CHECK-NEXT: vsetvli a1, zero, e64,m2,tu,mu ; CHECK-NEXT: vadd.vv v26, v26, v28 -; CHECK-NEXT: vsetvli a1, zero, e64,m2,tu,mu ; CHECK-NEXT: vse64.v v26, (a0) ; CHECK-NEXT: ret %va = load , * %pa @@ -47,11 +41,8 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a3, zero, e64,m4,tu,mu ; CHECK-NEXT: vle64.v v28, (a1) -; CHECK-NEXT: vsetvli a1, zero, e64,m4,tu,mu ; CHECK-NEXT: vle64.v v8, (a2) -; CHECK-NEXT: vsetvli a1, zero, e64,m4,tu,mu ; CHECK-NEXT: vadd.vv v28, v28, v8 -; CHECK-NEXT: vsetvli a1, zero, e64,m4,tu,mu ; CHECK-NEXT: vse64.v v28, (a0) ; CHECK-NEXT: ret %va = load , * %pa @@ -66,11 +57,8 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a3, zero, e64,m8,tu,mu ; CHECK-NEXT: vle64.v v8, (a1) -; CHECK-NEXT: vsetvli a1, zero, e64,m8,tu,mu ; CHECK-NEXT: vle64.v v16, (a2) -; CHECK-NEXT: vsetvli a1, zero, e64,m8,tu,mu ; CHECK-NEXT: vadd.vv v8, v8, v16 -; CHECK-NEXT: vsetvli a1, zero, e64,m8,tu,mu ; CHECK-NEXT: vse64.v v8, (a0) ; CHECK-NEXT: ret %va = load , * %pa Index: llvm/test/CodeGen/RISCV/rvv/load-add-store-8.ll =================================================================== --- llvm/test/CodeGen/RISCV/rvv/load-add-store-8.ll +++ llvm/test/CodeGen/RISCV/rvv/load-add-store-8.ll @@ -9,11 +9,8 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a3, zero, e8,m1,tu,mu ; CHECK-NEXT: vle8.v v25, (a1) -; CHECK-NEXT: vsetvli a1, zero, e8,m1,tu,mu ; CHECK-NEXT: vle8.v v26, (a2) -; CHECK-NEXT: vsetvli a1, zero, e8,m1,tu,mu ; CHECK-NEXT: vadd.vv v25, v25, v26 -; CHECK-NEXT: vsetvli a1, zero, e8,m1,tu,mu ; CHECK-NEXT: vse8.v v25, (a0) ; CHECK-NEXT: ret %va = load , * %pa @@ -28,11 +25,8 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a3, zero, e8,m2,tu,mu ; CHECK-NEXT: vle8.v v26, (a1) -; CHECK-NEXT: vsetvli a1, zero, e8,m2,tu,mu ; CHECK-NEXT: vle8.v v28, (a2) -; CHECK-NEXT: vsetvli a1, zero, e8,m2,tu,mu ; CHECK-NEXT: vadd.vv v26, v26, v28 -; CHECK-NEXT: vsetvli a1, zero, e8,m2,tu,mu ; CHECK-NEXT: vse8.v v26, (a0) ; CHECK-NEXT: ret %va = load , * %pa @@ -47,11 +41,8 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a3, zero, e8,m4,tu,mu ; CHECK-NEXT: vle8.v v28, (a1) -; CHECK-NEXT: vsetvli a1, zero, e8,m4,tu,mu ; CHECK-NEXT: vle8.v v8, (a2) -; CHECK-NEXT: vsetvli a1, zero, e8,m4,tu,mu ; CHECK-NEXT: vadd.vv v28, v28, v8 -; CHECK-NEXT: vsetvli a1, zero, e8,m4,tu,mu ; CHECK-NEXT: vse8.v v28, (a0) ; CHECK-NEXT: ret %va = load , * %pa @@ -66,11 +57,8 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a3, zero, e8,m8,tu,mu ; CHECK-NEXT: vle8.v v8, (a1) -; CHECK-NEXT: vsetvli a1, zero, e8,m8,tu,mu ; CHECK-NEXT: vle8.v v16, (a2) -; CHECK-NEXT: vsetvli a1, zero, e8,m8,tu,mu ; CHECK-NEXT: vadd.vv v8, v8, v16 -; CHECK-NEXT: vsetvli a1, zero, e8,m8,tu,mu ; CHECK-NEXT: vse8.v v8, (a0) ; CHECK-NEXT: ret %va = load , * %pa @@ -85,11 +73,8 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a3, zero, e8,mf2,tu,mu ; CHECK-NEXT: vle8.v v25, (a1) -; CHECK-NEXT: vsetvli a1, zero, e8,mf2,tu,mu ; CHECK-NEXT: vle8.v v26, (a2) -; CHECK-NEXT: vsetvli a1, zero, e8,mf2,tu,mu ; CHECK-NEXT: vadd.vv v25, v25, v26 -; CHECK-NEXT: vsetvli a1, zero, e8,mf2,tu,mu ; CHECK-NEXT: vse8.v v25, (a0) ; CHECK-NEXT: ret %va = load , * %pa @@ -104,11 +89,8 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a3, zero, e8,mf4,tu,mu ; CHECK-NEXT: vle8.v v25, (a1) -; CHECK-NEXT: vsetvli a1, zero, e8,mf4,tu,mu ; CHECK-NEXT: vle8.v v26, (a2) -; CHECK-NEXT: vsetvli a1, zero, e8,mf4,tu,mu ; CHECK-NEXT: vadd.vv v25, v25, v26 -; CHECK-NEXT: vsetvli a1, zero, e8,mf4,tu,mu ; CHECK-NEXT: vse8.v v25, (a0) ; CHECK-NEXT: ret %va = load , * %pa @@ -123,11 +105,8 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a3, zero, e8,mf8,tu,mu ; CHECK-NEXT: vle8.v v25, (a1) -; CHECK-NEXT: vsetvli a1, zero, e8,mf8,tu,mu ; CHECK-NEXT: vle8.v v26, (a2) -; CHECK-NEXT: vsetvli a1, zero, e8,mf8,tu,mu ; CHECK-NEXT: vadd.vv v25, v25, v26 -; CHECK-NEXT: vsetvli a1, zero, e8,mf8,tu,mu ; CHECK-NEXT: vse8.v v25, (a0) ; CHECK-NEXT: ret %va = load , * %pa