Index: lib/Target/X86/X86ISelLowering.cpp =================================================================== --- lib/Target/X86/X86ISelLowering.cpp +++ lib/Target/X86/X86ISelLowering.cpp @@ -18871,9 +18871,11 @@ return DAG.getStore(Chain, dl, DataToCompress, Addr, MemIntr->getMemOperand()); + SDValue LoadAddress = DAG.getLoad(VT, SDLoc(Op), MemIntr->getChain(), Addr, + MemIntr->getPointerInfo(), MemIntr->getAlignment()); SDValue Compressed = getVectorMaskingNode(DAG.getNode(IntrData->Opc0, dl, VT, DataToCompress), - Mask, DAG.getUNDEF(VT), Subtarget, DAG); + Mask, LoadAddress, Subtarget, DAG); return DAG.getStore(Chain, dl, Compressed, Addr, MemIntr->getMemOperand()); } Index: lib/Target/X86/X86InstrAVX512.td =================================================================== --- lib/Target/X86/X86InstrAVX512.td +++ lib/Target/X86/X86InstrAVX512.td @@ -7219,7 +7219,7 @@ (ins _.MemOp:$dst, _.KRCWM:$mask, _.RC:$src), OpcodeStr # "\t{$src, $dst {${mask}}|$dst {${mask}}, $src}", [(store (_.VT (vselect _.KRCWM:$mask, - (_.VT (X86compress _.RC:$src)), _.ImmAllZerosV)), + (_.VT (X86compress _.RC:$src)), (_.VT (load addr:$dst)))), addr:$dst)]>, EVEX_K, EVEX_CD8<_.EltSize, CD8VT1>; } Index: test/CodeGen/X86/compress-maskz.ll =================================================================== --- test/CodeGen/X86/compress-maskz.ll +++ test/CodeGen/X86/compress-maskz.ll @@ -0,0 +1,22 @@ +;RUN: llc < %s -O0 -mattr=+avx512vl -mtriple=x86_64-unknown | FileCheck %s --check-prefix=CHECK + +@xmm = common global <4 x i32> zeroinitializer, align 16 +@k8 = common global i8 0, align 1 +@res = common global <4 x i32> zeroinitializer, align 16 + +define i32 @main() { +; CHECK-LABEL: main: +; CHECK: vmovdqa32 {{.*}}(%rip), %xmm0 +; CHECK: vpcompressd %xmm0, %xmm0 {%k1} {z} +entry: + %.compoundliteral = alloca <2 x i64>, align 16 + %res = alloca <4 x i32>, align 16 + %a0 = load <4 x i32>, <4 x i32>* @xmm, align 16 + %a2 = load i8, i8* @k8, align 1 + %a21 = call <4 x i32> @llvm.x86.avx512.mask.compress.d.128(<4 x i32> %a0, <4 x i32> zeroinitializer, i8 %a2) #2 + store volatile <4 x i32> %a21, <4 x i32>* %res, align 16 + store <2 x i64> zeroinitializer, <2 x i64>* %.compoundliteral, align 16 + ret i32 0 +} + +declare <4 x i32> @llvm.x86.avx512.mask.compress.d.128(<4 x i32>, <4 x i32>, i8)