1
; RUN: llc < %s -march=x86-64 | FileCheck %s
3
; Full strength reduction wouldn't reduce register pressure, so LSR should
4
; stick with indexing here.
6
; CHECK: movaps (%rsi,%rax,4), %xmm3
7
; CHECK: movaps %xmm3, (%rdi,%rax,4)
9
; CHECK: cmpl %eax, (%rdx)
12
define void @vvfloorf(float* nocapture %y, float* nocapture %x, i32* nocapture %n) nounwind {
14
%0 = load i32* %n, align 4
15
%1 = icmp sgt i32 %0, 0
16
br i1 %1, label %bb, label %return
19
%indvar = phi i64 [ %indvar.next, %bb ], [ 0, %entry ]
20
%tmp = shl i64 %indvar, 2
21
%scevgep = getelementptr float* %y, i64 %tmp
22
%scevgep9 = bitcast float* %scevgep to <4 x float>*
23
%scevgep10 = getelementptr float* %x, i64 %tmp
24
%scevgep1011 = bitcast float* %scevgep10 to <4 x float>*
25
%2 = load <4 x float>* %scevgep1011, align 16
26
%3 = bitcast <4 x float> %2 to <4 x i32>
27
%4 = and <4 x i32> %3, <i32 2147483647, i32 2147483647, i32 2147483647, i32 2147483647>
28
%5 = bitcast <4 x i32> %4 to <4 x float>
29
%6 = and <4 x i32> %3, <i32 -2147483648, i32 -2147483648, i32 -2147483648, i32 -2147483648>
30
%7 = tail call <4 x float> @llvm.x86.sse.cmp.ps(<4 x float> %5, <4 x float> <float 8.388608e+06, float 8.388608e+06, float 8.388608e+06, float 8.388608e+06>, i8 5) nounwind
31
%tmp.i4 = bitcast <4 x float> %7 to <4 x i32>
32
%8 = xor <4 x i32> %tmp.i4, <i32 -1, i32 -1, i32 -1, i32 -1>
33
%9 = and <4 x i32> %8, <i32 1258291200, i32 1258291200, i32 1258291200, i32 1258291200>
34
%10 = or <4 x i32> %9, %6
35
%11 = bitcast <4 x i32> %10 to <4 x float>
36
%12 = fadd <4 x float> %2, %11
37
%13 = fsub <4 x float> %12, %11
38
%14 = tail call <4 x float> @llvm.x86.sse.cmp.ps(<4 x float> %2, <4 x float> %13, i8 1) nounwind
39
%15 = bitcast <4 x float> %14 to <4 x i32>
40
%16 = tail call <4 x float> @llvm.x86.sse2.cvtdq2ps(<4 x i32> %15) nounwind readnone
41
%17 = fadd <4 x float> %13, %16
42
%tmp.i = bitcast <4 x float> %17 to <4 x i32>
43
%18 = or <4 x i32> %tmp.i, %6
44
%19 = bitcast <4 x i32> %18 to <4 x float>
45
store <4 x float> %19, <4 x float>* %scevgep9, align 16
46
%tmp12 = add i64 %tmp, 4
47
%tmp13 = trunc i64 %tmp12 to i32
48
%20 = load i32* %n, align 4
49
%21 = icmp sgt i32 %20, %tmp13
50
%indvar.next = add i64 %indvar, 1
51
br i1 %21, label %bb, label %return
57
declare <4 x float> @llvm.x86.sse.cmp.ps(<4 x float>, <4 x float>, i8) nounwind readnone
59
declare <4 x float> @llvm.x86.sse2.cvtdq2ps(<4 x i32>) nounwind readnone