1
; RUN: llc -mtriple=arm-eabi -arm-atomic-cfg-tidy=0 %s -o - | FileCheck -check-prefix=ARM %s
2
; RUN: llc -mtriple=thumb-eabi -arm-atomic-cfg-tidy=0 %s -o - | FileCheck -check-prefix=THUMB %s
3
; RUN: llc -mtriple=thumb-eabi -arm-atomic-cfg-tidy=0 -mcpu=arm1156t2-s -mattr=+thumb2 %s -o - \
4
; RUN: | FileCheck -check-prefix=T2 %s
5
; RUN: llc -mtriple=thumbv8-eabi -arm-atomic-cfg-tidy=0 %s -o - | FileCheck -check-prefix=V8 %s
7
; FIXME: The -march=thumb test doesn't change if -disable-peephole is specified.
9
%struct.Foo = type { i8* }
14
define %struct.Foo* @foo(%struct.Foo* %this, i32 %acc) nounwind readonly align 2 {
16
%scevgep = getelementptr %struct.Foo, %struct.Foo* %this, i32 1
19
tailrecurse: ; preds = %sw.bb, %entry
20
%lsr.iv2 = phi %struct.Foo* [ %scevgep3, %sw.bb ], [ %scevgep, %entry ]
21
%lsr.iv = phi i32 [ %lsr.iv.next, %sw.bb ], [ 1, %entry ]
22
%acc.tr = phi i32 [ %or, %sw.bb ], [ %acc, %entry ]
23
%lsr.iv24 = bitcast %struct.Foo* %lsr.iv2 to i8**
24
%scevgep5 = getelementptr i8*, i8** %lsr.iv24, i32 -1
25
%tmp2 = load i8*, i8** %scevgep5
26
%0 = ptrtoint i8* %tmp2 to i32
28
; ARM: ands {{r[0-9]+}}, {{r[0-9]+}}, #3
31
; THUMB: movs r[[R0:[0-9]+]], #3
32
; THUMB-NEXT: ands r[[R0]], r
33
; THUMB-NEXT: cmp r[[R0]], #0
36
; T2: ands {{r[0-9]+}}, {{r[0-9]+}}, #3
40
%tst = icmp eq i32 %and, 0
41
br i1 %tst, label %sw.bb, label %tailrecurse.switch
43
tailrecurse.switch: ; preds = %tailrecurse
44
; V8-LABEL: %tailrecurse.switch
47
; V8-NEXT: %tailrecurse.switch
50
; V8-NEXT: %tailrecurse.switch
54
; The trailing space in the last line checks that the branch is unconditional
55
switch i32 %and, label %sw.epilog [
61
sw.bb: ; preds = %tailrecurse.switch, %tailrecurse
62
%shl = shl i32 %acc.tr, 1
63
%or = or i32 %and, %shl
64
%lsr.iv.next = add i32 %lsr.iv, 1
65
%scevgep3 = getelementptr %struct.Foo, %struct.Foo* %lsr.iv2, i32 1
68
sw.bb6: ; preds = %tailrecurse.switch
69
ret %struct.Foo* %lsr.iv2
71
sw.bb8: ; preds = %tailrecurse.switch
72
%tmp1 = add i32 %acc.tr, %lsr.iv
73
%add.ptr11 = getelementptr inbounds %struct.Foo, %struct.Foo* %this, i32 %tmp1
74
ret %struct.Foo* %add.ptr11
76
sw.epilog: ; preds = %tailrecurse.switch
77
ret %struct.Foo* undef
80
; Another test that exercises the AND/TST peephole optimization and also
81
; generates a predicated ANDS instruction. Check that the predicate is printed
82
; after the "S" modifier on the instruction.
84
%struct.S = type { i8* (i8*)*, [1 x i8] }
90
define internal zeroext i8 @bar(%struct.S* %x, %struct.S* nocapture %y) nounwind readonly {
92
%0 = getelementptr inbounds %struct.S, %struct.S* %x, i32 0, i32 1, i32 0
93
%1 = load i8, i8* %0, align 1
94
%2 = zext i8 %1 to i32
101
%4 = icmp eq i32 %3, 0
102
br i1 %4, label %return, label %bb
106
%5 = getelementptr inbounds %struct.S, %struct.S* %y, i32 0, i32 1, i32 0
107
%6 = load i8, i8* %5, align 1
108
%7 = zext i8 %6 to i32
115
%9 = icmp eq i32 %8, 0
116
br i1 %9, label %return, label %bb2
124
%10 = icmp eq i32 %3, 16
125
%11 = icmp eq i32 %8, 16
126
%or.cond = or i1 %10, %11
127
br i1 %or.cond, label %bb4, label %return
130
%12 = ptrtoint %struct.S* %x to i32
131
%phitmp = trunc i32 %12 to i8
134
return: ; preds = %bb2, %bb, %entry
139
; We were looking through multiple COPY instructions to find an AND we might
140
; fold into a TST, but in doing so we changed the register being tested allowing
141
; folding of unrelated tests (in this case, a TST against r1 was eliminated in
142
; favour of an AND of r0).
144
; ARM-LABEL: test_tst_assessment:
145
; THUMB-LABEL: test_tst_assessment:
146
; T2-LABEL: test_tst_assessment:
147
; V8-LABEL: test_tst_assessment:
148
define i32 @test_tst_assessment(i1 %lhs, i1 %rhs) {
149
%lhs32 = zext i1 %lhs to i32
150
%rhs32 = zext i1 %rhs to i32
151
%diff = sub nsw i32 %lhs32, %rhs32
153
; THUMB: movs [[RTMP:r[0-9]+]], #1
154
; THUMB: tst r1, [[RTMP]]
160
!1 = !{!"branch_weights", i32 1, i32 1, i32 3, i32 2 }