@@ -381,19 +381,26 @@ define <4 x float> @signbits_ashr_sext_select_shuffle_sitofp(<4 x i64> %a0, <4 x
381381; X32-NEXT: movl %esp, %ebp
382382; X32-NEXT: andl $-16, %esp
383383; X32-NEXT: subl $16, %esp
384+ ; X32-NEXT: vmovdqa {{.*#+}} xmm3 = [33,0,63,0]
385+ ; X32-NEXT: vmovdqa {{.*#+}} xmm4 = [0,2147483648,0,2147483648]
386+ ; X32-NEXT: vpsrlq %xmm3, %xmm4, %xmm5
387+ ; X32-NEXT: vpshufd {{.*#+}} xmm6 = xmm3[2,3,0,1]
388+ ; X32-NEXT: vpsrlq %xmm6, %xmm4, %xmm4
389+ ; X32-NEXT: vpblendw {{.*#+}} xmm4 = xmm5[0,1,2,3],xmm4[4,5,6,7]
390+ ; X32-NEXT: vextractf128 $1, %ymm2, %xmm5
391+ ; X32-NEXT: vpsrlq %xmm6, %xmm5, %xmm7
392+ ; X32-NEXT: vpsrlq %xmm3, %xmm5, %xmm5
393+ ; X32-NEXT: vpblendw {{.*#+}} xmm5 = xmm5[0,1,2,3],xmm7[4,5,6,7]
394+ ; X32-NEXT: vpsrlq %xmm6, %xmm2, %xmm6
395+ ; X32-NEXT: vpsrlq %xmm3, %xmm2, %xmm2
396+ ; X32-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0,1,2,3],xmm6[4,5,6,7]
384397; X32-NEXT: vpmovsxdq 16(%ebp), %xmm3
398+ ; X32-NEXT: vpxor %xmm4, %xmm5, %xmm5
399+ ; X32-NEXT: vpsubq %xmm4, %xmm5, %xmm5
400+ ; X32-NEXT: vpxor %xmm4, %xmm2, %xmm2
401+ ; X32-NEXT: vpsubq %xmm4, %xmm2, %xmm2
385402; X32-NEXT: vpmovsxdq 8(%ebp), %xmm4
386- ; X32-NEXT: vmovdqa {{.*#+}} xmm5 = [33,0,63,0]
387- ; X32-NEXT: vmovdqa {{.*#+}} xmm6 = [0,2147483648,0,2147483648]
388- ; X32-NEXT: vpsrlq %xmm5, %xmm6, %xmm6
389- ; X32-NEXT: vextractf128 $1, %ymm2, %xmm7
390- ; X32-NEXT: vpsrlq %xmm5, %xmm7, %xmm7
391- ; X32-NEXT: vpxor %xmm6, %xmm7, %xmm7
392- ; X32-NEXT: vpsubq %xmm6, %xmm7, %xmm7
393- ; X32-NEXT: vpsrlq %xmm5, %xmm2, %xmm2
394- ; X32-NEXT: vpxor %xmm6, %xmm2, %xmm2
395- ; X32-NEXT: vpsubq %xmm6, %xmm2, %xmm2
396- ; X32-NEXT: vinsertf128 $1, %xmm7, %ymm2, %ymm2
403+ ; X32-NEXT: vinsertf128 $1, %xmm5, %ymm2, %ymm2
397404; X32-NEXT: vinsertf128 $1, %xmm3, %ymm4, %ymm3
398405; X32-NEXT: vextractf128 $1, %ymm1, %xmm4
399406; X32-NEXT: vextractf128 $1, %ymm0, %xmm5
0 commit comments