1
; RUN: llc < %s -march=x86 > %t
2
;; Note the 64-bit variants are not supported yet (in 32-bit mode).
3
; ModuleID = 'Atomics.c'
4
target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:128:128"
5
target triple = "i386-apple-darwin8"
6
@sc = common global i8 0 ; <i8*> [#uses=52]
7
@uc = common global i8 0 ; <i8*> [#uses=100]
8
@ss = common global i16 0 ; <i16*> [#uses=15]
9
@us = common global i16 0 ; <i16*> [#uses=15]
10
@si = common global i32 0 ; <i32*> [#uses=15]
11
@ui = common global i32 0 ; <i32*> [#uses=23]
12
@sl = common global i32 0 ; <i32*> [#uses=15]
13
@ul = common global i32 0 ; <i32*> [#uses=15]
15
define void @test_op_ignore() nounwind {
17
call i8 @llvm.atomic.load.add.i8.p0i8( i8* @sc, i8 1 ) ; <i8>:0 [#uses=0]
18
call i8 @llvm.atomic.load.add.i8.p0i8( i8* @uc, i8 1 ) ; <i8>:1 [#uses=0]
19
bitcast i8* bitcast (i16* @ss to i8*) to i16* ; <i16*>:2 [#uses=1]
20
call i16 @llvm.atomic.load.add.i16.p0i16( i16* %2, i16 1 ) ; <i16>:3 [#uses=0]
21
bitcast i8* bitcast (i16* @us to i8*) to i16* ; <i16*>:4 [#uses=1]
22
call i16 @llvm.atomic.load.add.i16.p0i16( i16* %4, i16 1 ) ; <i16>:5 [#uses=0]
23
bitcast i8* bitcast (i32* @si to i8*) to i32* ; <i32*>:6 [#uses=1]
24
call i32 @llvm.atomic.load.add.i32.p0i32( i32* %6, i32 1 ) ; <i32>:7 [#uses=0]
25
bitcast i8* bitcast (i32* @ui to i8*) to i32* ; <i32*>:8 [#uses=1]
26
call i32 @llvm.atomic.load.add.i32.p0i32( i32* %8, i32 1 ) ; <i32>:9 [#uses=0]
27
bitcast i8* bitcast (i32* @sl to i8*) to i32* ; <i32*>:10 [#uses=1]
28
call i32 @llvm.atomic.load.add.i32.p0i32( i32* %10, i32 1 ) ; <i32>:11 [#uses=0]
29
bitcast i8* bitcast (i32* @ul to i8*) to i32* ; <i32*>:12 [#uses=1]
30
call i32 @llvm.atomic.load.add.i32.p0i32( i32* %12, i32 1 ) ; <i32>:13 [#uses=0]
31
call i8 @llvm.atomic.load.sub.i8.p0i8( i8* @sc, i8 1 ) ; <i8>:14 [#uses=0]
32
call i8 @llvm.atomic.load.sub.i8.p0i8( i8* @uc, i8 1 ) ; <i8>:15 [#uses=0]
33
bitcast i8* bitcast (i16* @ss to i8*) to i16* ; <i16*>:16 [#uses=1]
34
call i16 @llvm.atomic.load.sub.i16.p0i16( i16* %16, i16 1 ) ; <i16>:17 [#uses=0]
35
bitcast i8* bitcast (i16* @us to i8*) to i16* ; <i16*>:18 [#uses=1]
36
call i16 @llvm.atomic.load.sub.i16.p0i16( i16* %18, i16 1 ) ; <i16>:19 [#uses=0]
37
bitcast i8* bitcast (i32* @si to i8*) to i32* ; <i32*>:20 [#uses=1]
38
call i32 @llvm.atomic.load.sub.i32.p0i32( i32* %20, i32 1 ) ; <i32>:21 [#uses=0]
39
bitcast i8* bitcast (i32* @ui to i8*) to i32* ; <i32*>:22 [#uses=1]
40
call i32 @llvm.atomic.load.sub.i32.p0i32( i32* %22, i32 1 ) ; <i32>:23 [#uses=0]
41
bitcast i8* bitcast (i32* @sl to i8*) to i32* ; <i32*>:24 [#uses=1]
42
call i32 @llvm.atomic.load.sub.i32.p0i32( i32* %24, i32 1 ) ; <i32>:25 [#uses=0]
43
bitcast i8* bitcast (i32* @ul to i8*) to i32* ; <i32*>:26 [#uses=1]
44
call i32 @llvm.atomic.load.sub.i32.p0i32( i32* %26, i32 1 ) ; <i32>:27 [#uses=0]
45
call i8 @llvm.atomic.load.or.i8.p0i8( i8* @sc, i8 1 ) ; <i8>:28 [#uses=0]
46
call i8 @llvm.atomic.load.or.i8.p0i8( i8* @uc, i8 1 ) ; <i8>:29 [#uses=0]
47
bitcast i8* bitcast (i16* @ss to i8*) to i16* ; <i16*>:30 [#uses=1]
48
call i16 @llvm.atomic.load.or.i16.p0i16( i16* %30, i16 1 ) ; <i16>:31 [#uses=0]
49
bitcast i8* bitcast (i16* @us to i8*) to i16* ; <i16*>:32 [#uses=1]
50
call i16 @llvm.atomic.load.or.i16.p0i16( i16* %32, i16 1 ) ; <i16>:33 [#uses=0]
51
bitcast i8* bitcast (i32* @si to i8*) to i32* ; <i32*>:34 [#uses=1]
52
call i32 @llvm.atomic.load.or.i32.p0i32( i32* %34, i32 1 ) ; <i32>:35 [#uses=0]
53
bitcast i8* bitcast (i32* @ui to i8*) to i32* ; <i32*>:36 [#uses=1]
54
call i32 @llvm.atomic.load.or.i32.p0i32( i32* %36, i32 1 ) ; <i32>:37 [#uses=0]
55
bitcast i8* bitcast (i32* @sl to i8*) to i32* ; <i32*>:38 [#uses=1]
56
call i32 @llvm.atomic.load.or.i32.p0i32( i32* %38, i32 1 ) ; <i32>:39 [#uses=0]
57
bitcast i8* bitcast (i32* @ul to i8*) to i32* ; <i32*>:40 [#uses=1]
58
call i32 @llvm.atomic.load.or.i32.p0i32( i32* %40, i32 1 ) ; <i32>:41 [#uses=0]
59
call i8 @llvm.atomic.load.xor.i8.p0i8( i8* @sc, i8 1 ) ; <i8>:42 [#uses=0]
60
call i8 @llvm.atomic.load.xor.i8.p0i8( i8* @uc, i8 1 ) ; <i8>:43 [#uses=0]
61
bitcast i8* bitcast (i16* @ss to i8*) to i16* ; <i16*>:44 [#uses=1]
62
call i16 @llvm.atomic.load.xor.i16.p0i16( i16* %44, i16 1 ) ; <i16>:45 [#uses=0]
63
bitcast i8* bitcast (i16* @us to i8*) to i16* ; <i16*>:46 [#uses=1]
64
call i16 @llvm.atomic.load.xor.i16.p0i16( i16* %46, i16 1 ) ; <i16>:47 [#uses=0]
65
bitcast i8* bitcast (i32* @si to i8*) to i32* ; <i32*>:48 [#uses=1]
66
call i32 @llvm.atomic.load.xor.i32.p0i32( i32* %48, i32 1 ) ; <i32>:49 [#uses=0]
67
bitcast i8* bitcast (i32* @ui to i8*) to i32* ; <i32*>:50 [#uses=1]
68
call i32 @llvm.atomic.load.xor.i32.p0i32( i32* %50, i32 1 ) ; <i32>:51 [#uses=0]
69
bitcast i8* bitcast (i32* @sl to i8*) to i32* ; <i32*>:52 [#uses=1]
70
call i32 @llvm.atomic.load.xor.i32.p0i32( i32* %52, i32 1 ) ; <i32>:53 [#uses=0]
71
bitcast i8* bitcast (i32* @ul to i8*) to i32* ; <i32*>:54 [#uses=1]
72
call i32 @llvm.atomic.load.xor.i32.p0i32( i32* %54, i32 1 ) ; <i32>:55 [#uses=0]
73
call i8 @llvm.atomic.load.and.i8.p0i8( i8* @sc, i8 1 ) ; <i8>:56 [#uses=0]
74
call i8 @llvm.atomic.load.and.i8.p0i8( i8* @uc, i8 1 ) ; <i8>:57 [#uses=0]
75
bitcast i8* bitcast (i16* @ss to i8*) to i16* ; <i16*>:58 [#uses=1]
76
call i16 @llvm.atomic.load.and.i16.p0i16( i16* %58, i16 1 ) ; <i16>:59 [#uses=0]
77
bitcast i8* bitcast (i16* @us to i8*) to i16* ; <i16*>:60 [#uses=1]
78
call i16 @llvm.atomic.load.and.i16.p0i16( i16* %60, i16 1 ) ; <i16>:61 [#uses=0]
79
bitcast i8* bitcast (i32* @si to i8*) to i32* ; <i32*>:62 [#uses=1]
80
call i32 @llvm.atomic.load.and.i32.p0i32( i32* %62, i32 1 ) ; <i32>:63 [#uses=0]
81
bitcast i8* bitcast (i32* @ui to i8*) to i32* ; <i32*>:64 [#uses=1]
82
call i32 @llvm.atomic.load.and.i32.p0i32( i32* %64, i32 1 ) ; <i32>:65 [#uses=0]
83
bitcast i8* bitcast (i32* @sl to i8*) to i32* ; <i32*>:66 [#uses=1]
84
call i32 @llvm.atomic.load.and.i32.p0i32( i32* %66, i32 1 ) ; <i32>:67 [#uses=0]
85
bitcast i8* bitcast (i32* @ul to i8*) to i32* ; <i32*>:68 [#uses=1]
86
call i32 @llvm.atomic.load.and.i32.p0i32( i32* %68, i32 1 ) ; <i32>:69 [#uses=0]
87
call i8 @llvm.atomic.load.nand.i8.p0i8( i8* @sc, i8 1 ) ; <i8>:70 [#uses=0]
88
call i8 @llvm.atomic.load.nand.i8.p0i8( i8* @uc, i8 1 ) ; <i8>:71 [#uses=0]
89
bitcast i8* bitcast (i16* @ss to i8*) to i16* ; <i16*>:72 [#uses=1]
90
call i16 @llvm.atomic.load.nand.i16.p0i16( i16* %72, i16 1 ) ; <i16>:73 [#uses=0]
91
bitcast i8* bitcast (i16* @us to i8*) to i16* ; <i16*>:74 [#uses=1]
92
call i16 @llvm.atomic.load.nand.i16.p0i16( i16* %74, i16 1 ) ; <i16>:75 [#uses=0]
93
bitcast i8* bitcast (i32* @si to i8*) to i32* ; <i32*>:76 [#uses=1]
94
call i32 @llvm.atomic.load.nand.i32.p0i32( i32* %76, i32 1 ) ; <i32>:77 [#uses=0]
95
bitcast i8* bitcast (i32* @ui to i8*) to i32* ; <i32*>:78 [#uses=1]
96
call i32 @llvm.atomic.load.nand.i32.p0i32( i32* %78, i32 1 ) ; <i32>:79 [#uses=0]
97
bitcast i8* bitcast (i32* @sl to i8*) to i32* ; <i32*>:80 [#uses=1]
98
call i32 @llvm.atomic.load.nand.i32.p0i32( i32* %80, i32 1 ) ; <i32>:81 [#uses=0]
99
bitcast i8* bitcast (i32* @ul to i8*) to i32* ; <i32*>:82 [#uses=1]
100
call i32 @llvm.atomic.load.nand.i32.p0i32( i32* %82, i32 1 ) ; <i32>:83 [#uses=0]
103
return: ; preds = %entry
107
declare i8 @llvm.atomic.load.add.i8.p0i8(i8*, i8) nounwind
109
declare i16 @llvm.atomic.load.add.i16.p0i16(i16*, i16) nounwind
111
declare i32 @llvm.atomic.load.add.i32.p0i32(i32*, i32) nounwind
113
declare i8 @llvm.atomic.load.sub.i8.p0i8(i8*, i8) nounwind
115
declare i16 @llvm.atomic.load.sub.i16.p0i16(i16*, i16) nounwind
117
declare i32 @llvm.atomic.load.sub.i32.p0i32(i32*, i32) nounwind
119
declare i8 @llvm.atomic.load.or.i8.p0i8(i8*, i8) nounwind
121
declare i16 @llvm.atomic.load.or.i16.p0i16(i16*, i16) nounwind
123
declare i32 @llvm.atomic.load.or.i32.p0i32(i32*, i32) nounwind
125
declare i8 @llvm.atomic.load.xor.i8.p0i8(i8*, i8) nounwind
127
declare i16 @llvm.atomic.load.xor.i16.p0i16(i16*, i16) nounwind
129
declare i32 @llvm.atomic.load.xor.i32.p0i32(i32*, i32) nounwind
131
declare i8 @llvm.atomic.load.and.i8.p0i8(i8*, i8) nounwind
133
declare i16 @llvm.atomic.load.and.i16.p0i16(i16*, i16) nounwind
135
declare i32 @llvm.atomic.load.and.i32.p0i32(i32*, i32) nounwind
137
declare i8 @llvm.atomic.load.nand.i8.p0i8(i8*, i8) nounwind
139
declare i16 @llvm.atomic.load.nand.i16.p0i16(i16*, i16) nounwind
141
declare i32 @llvm.atomic.load.nand.i32.p0i32(i32*, i32) nounwind
143
define void @test_fetch_and_op() nounwind {
145
call i8 @llvm.atomic.load.add.i8.p0i8( i8* @sc, i8 11 ) ; <i8>:0 [#uses=1]
146
store i8 %0, i8* @sc, align 1
147
call i8 @llvm.atomic.load.add.i8.p0i8( i8* @uc, i8 11 ) ; <i8>:1 [#uses=1]
148
store i8 %1, i8* @uc, align 1
149
bitcast i8* bitcast (i16* @ss to i8*) to i16* ; <i16*>:2 [#uses=1]
150
call i16 @llvm.atomic.load.add.i16.p0i16( i16* %2, i16 11 ) ; <i16>:3 [#uses=1]
151
store i16 %3, i16* @ss, align 2
152
bitcast i8* bitcast (i16* @us to i8*) to i16* ; <i16*>:4 [#uses=1]
153
call i16 @llvm.atomic.load.add.i16.p0i16( i16* %4, i16 11 ) ; <i16>:5 [#uses=1]
154
store i16 %5, i16* @us, align 2
155
bitcast i8* bitcast (i32* @si to i8*) to i32* ; <i32*>:6 [#uses=1]
156
call i32 @llvm.atomic.load.add.i32.p0i32( i32* %6, i32 11 ) ; <i32>:7 [#uses=1]
157
store i32 %7, i32* @si, align 4
158
bitcast i8* bitcast (i32* @ui to i8*) to i32* ; <i32*>:8 [#uses=1]
159
call i32 @llvm.atomic.load.add.i32.p0i32( i32* %8, i32 11 ) ; <i32>:9 [#uses=1]
160
store i32 %9, i32* @ui, align 4
161
bitcast i8* bitcast (i32* @sl to i8*) to i32* ; <i32*>:10 [#uses=1]
162
call i32 @llvm.atomic.load.add.i32.p0i32( i32* %10, i32 11 ) ; <i32>:11 [#uses=1]
163
store i32 %11, i32* @sl, align 4
164
bitcast i8* bitcast (i32* @ul to i8*) to i32* ; <i32*>:12 [#uses=1]
165
call i32 @llvm.atomic.load.add.i32.p0i32( i32* %12, i32 11 ) ; <i32>:13 [#uses=1]
166
store i32 %13, i32* @ul, align 4
167
call i8 @llvm.atomic.load.sub.i8.p0i8( i8* @sc, i8 11 ) ; <i8>:14 [#uses=1]
168
store i8 %14, i8* @sc, align 1
169
call i8 @llvm.atomic.load.sub.i8.p0i8( i8* @uc, i8 11 ) ; <i8>:15 [#uses=1]
170
store i8 %15, i8* @uc, align 1
171
bitcast i8* bitcast (i16* @ss to i8*) to i16* ; <i16*>:16 [#uses=1]
172
call i16 @llvm.atomic.load.sub.i16.p0i16( i16* %16, i16 11 ) ; <i16>:17 [#uses=1]
173
store i16 %17, i16* @ss, align 2
174
bitcast i8* bitcast (i16* @us to i8*) to i16* ; <i16*>:18 [#uses=1]
175
call i16 @llvm.atomic.load.sub.i16.p0i16( i16* %18, i16 11 ) ; <i16>:19 [#uses=1]
176
store i16 %19, i16* @us, align 2
177
bitcast i8* bitcast (i32* @si to i8*) to i32* ; <i32*>:20 [#uses=1]
178
call i32 @llvm.atomic.load.sub.i32.p0i32( i32* %20, i32 11 ) ; <i32>:21 [#uses=1]
179
store i32 %21, i32* @si, align 4
180
bitcast i8* bitcast (i32* @ui to i8*) to i32* ; <i32*>:22 [#uses=1]
181
call i32 @llvm.atomic.load.sub.i32.p0i32( i32* %22, i32 11 ) ; <i32>:23 [#uses=1]
182
store i32 %23, i32* @ui, align 4
183
bitcast i8* bitcast (i32* @sl to i8*) to i32* ; <i32*>:24 [#uses=1]
184
call i32 @llvm.atomic.load.sub.i32.p0i32( i32* %24, i32 11 ) ; <i32>:25 [#uses=1]
185
store i32 %25, i32* @sl, align 4
186
bitcast i8* bitcast (i32* @ul to i8*) to i32* ; <i32*>:26 [#uses=1]
187
call i32 @llvm.atomic.load.sub.i32.p0i32( i32* %26, i32 11 ) ; <i32>:27 [#uses=1]
188
store i32 %27, i32* @ul, align 4
189
call i8 @llvm.atomic.load.or.i8.p0i8( i8* @sc, i8 11 ) ; <i8>:28 [#uses=1]
190
store i8 %28, i8* @sc, align 1
191
call i8 @llvm.atomic.load.or.i8.p0i8( i8* @uc, i8 11 ) ; <i8>:29 [#uses=1]
192
store i8 %29, i8* @uc, align 1
193
bitcast i8* bitcast (i16* @ss to i8*) to i16* ; <i16*>:30 [#uses=1]
194
call i16 @llvm.atomic.load.or.i16.p0i16( i16* %30, i16 11 ) ; <i16>:31 [#uses=1]
195
store i16 %31, i16* @ss, align 2
196
bitcast i8* bitcast (i16* @us to i8*) to i16* ; <i16*>:32 [#uses=1]
197
call i16 @llvm.atomic.load.or.i16.p0i16( i16* %32, i16 11 ) ; <i16>:33 [#uses=1]
198
store i16 %33, i16* @us, align 2
199
bitcast i8* bitcast (i32* @si to i8*) to i32* ; <i32*>:34 [#uses=1]
200
call i32 @llvm.atomic.load.or.i32.p0i32( i32* %34, i32 11 ) ; <i32>:35 [#uses=1]
201
store i32 %35, i32* @si, align 4
202
bitcast i8* bitcast (i32* @ui to i8*) to i32* ; <i32*>:36 [#uses=1]
203
call i32 @llvm.atomic.load.or.i32.p0i32( i32* %36, i32 11 ) ; <i32>:37 [#uses=1]
204
store i32 %37, i32* @ui, align 4
205
bitcast i8* bitcast (i32* @sl to i8*) to i32* ; <i32*>:38 [#uses=1]
206
call i32 @llvm.atomic.load.or.i32.p0i32( i32* %38, i32 11 ) ; <i32>:39 [#uses=1]
207
store i32 %39, i32* @sl, align 4
208
bitcast i8* bitcast (i32* @ul to i8*) to i32* ; <i32*>:40 [#uses=1]
209
call i32 @llvm.atomic.load.or.i32.p0i32( i32* %40, i32 11 ) ; <i32>:41 [#uses=1]
210
store i32 %41, i32* @ul, align 4
211
call i8 @llvm.atomic.load.xor.i8.p0i8( i8* @sc, i8 11 ) ; <i8>:42 [#uses=1]
212
store i8 %42, i8* @sc, align 1
213
call i8 @llvm.atomic.load.xor.i8.p0i8( i8* @uc, i8 11 ) ; <i8>:43 [#uses=1]
214
store i8 %43, i8* @uc, align 1
215
bitcast i8* bitcast (i16* @ss to i8*) to i16* ; <i16*>:44 [#uses=1]
216
call i16 @llvm.atomic.load.xor.i16.p0i16( i16* %44, i16 11 ) ; <i16>:45 [#uses=1]
217
store i16 %45, i16* @ss, align 2
218
bitcast i8* bitcast (i16* @us to i8*) to i16* ; <i16*>:46 [#uses=1]
219
call i16 @llvm.atomic.load.xor.i16.p0i16( i16* %46, i16 11 ) ; <i16>:47 [#uses=1]
220
store i16 %47, i16* @us, align 2
221
bitcast i8* bitcast (i32* @si to i8*) to i32* ; <i32*>:48 [#uses=1]
222
call i32 @llvm.atomic.load.xor.i32.p0i32( i32* %48, i32 11 ) ; <i32>:49 [#uses=1]
223
store i32 %49, i32* @si, align 4
224
bitcast i8* bitcast (i32* @ui to i8*) to i32* ; <i32*>:50 [#uses=1]
225
call i32 @llvm.atomic.load.xor.i32.p0i32( i32* %50, i32 11 ) ; <i32>:51 [#uses=1]
226
store i32 %51, i32* @ui, align 4
227
bitcast i8* bitcast (i32* @sl to i8*) to i32* ; <i32*>:52 [#uses=1]
228
call i32 @llvm.atomic.load.xor.i32.p0i32( i32* %52, i32 11 ) ; <i32>:53 [#uses=1]
229
store i32 %53, i32* @sl, align 4
230
bitcast i8* bitcast (i32* @ul to i8*) to i32* ; <i32*>:54 [#uses=1]
231
call i32 @llvm.atomic.load.xor.i32.p0i32( i32* %54, i32 11 ) ; <i32>:55 [#uses=1]
232
store i32 %55, i32* @ul, align 4
233
call i8 @llvm.atomic.load.and.i8.p0i8( i8* @sc, i8 11 ) ; <i8>:56 [#uses=1]
234
store i8 %56, i8* @sc, align 1
235
call i8 @llvm.atomic.load.and.i8.p0i8( i8* @uc, i8 11 ) ; <i8>:57 [#uses=1]
236
store i8 %57, i8* @uc, align 1
237
bitcast i8* bitcast (i16* @ss to i8*) to i16* ; <i16*>:58 [#uses=1]
238
call i16 @llvm.atomic.load.and.i16.p0i16( i16* %58, i16 11 ) ; <i16>:59 [#uses=1]
239
store i16 %59, i16* @ss, align 2
240
bitcast i8* bitcast (i16* @us to i8*) to i16* ; <i16*>:60 [#uses=1]
241
call i16 @llvm.atomic.load.and.i16.p0i16( i16* %60, i16 11 ) ; <i16>:61 [#uses=1]
242
store i16 %61, i16* @us, align 2
243
bitcast i8* bitcast (i32* @si to i8*) to i32* ; <i32*>:62 [#uses=1]
244
call i32 @llvm.atomic.load.and.i32.p0i32( i32* %62, i32 11 ) ; <i32>:63 [#uses=1]
245
store i32 %63, i32* @si, align 4
246
bitcast i8* bitcast (i32* @ui to i8*) to i32* ; <i32*>:64 [#uses=1]
247
call i32 @llvm.atomic.load.and.i32.p0i32( i32* %64, i32 11 ) ; <i32>:65 [#uses=1]
248
store i32 %65, i32* @ui, align 4
249
bitcast i8* bitcast (i32* @sl to i8*) to i32* ; <i32*>:66 [#uses=1]
250
call i32 @llvm.atomic.load.and.i32.p0i32( i32* %66, i32 11 ) ; <i32>:67 [#uses=1]
251
store i32 %67, i32* @sl, align 4
252
bitcast i8* bitcast (i32* @ul to i8*) to i32* ; <i32*>:68 [#uses=1]
253
call i32 @llvm.atomic.load.and.i32.p0i32( i32* %68, i32 11 ) ; <i32>:69 [#uses=1]
254
store i32 %69, i32* @ul, align 4
255
call i8 @llvm.atomic.load.nand.i8.p0i8( i8* @sc, i8 11 ) ; <i8>:70 [#uses=1]
256
store i8 %70, i8* @sc, align 1
257
call i8 @llvm.atomic.load.nand.i8.p0i8( i8* @uc, i8 11 ) ; <i8>:71 [#uses=1]
258
store i8 %71, i8* @uc, align 1
259
bitcast i8* bitcast (i16* @ss to i8*) to i16* ; <i16*>:72 [#uses=1]
260
call i16 @llvm.atomic.load.nand.i16.p0i16( i16* %72, i16 11 ) ; <i16>:73 [#uses=1]
261
store i16 %73, i16* @ss, align 2
262
bitcast i8* bitcast (i16* @us to i8*) to i16* ; <i16*>:74 [#uses=1]
263
call i16 @llvm.atomic.load.nand.i16.p0i16( i16* %74, i16 11 ) ; <i16>:75 [#uses=1]
264
store i16 %75, i16* @us, align 2
265
bitcast i8* bitcast (i32* @si to i8*) to i32* ; <i32*>:76 [#uses=1]
266
call i32 @llvm.atomic.load.nand.i32.p0i32( i32* %76, i32 11 ) ; <i32>:77 [#uses=1]
267
store i32 %77, i32* @si, align 4
268
bitcast i8* bitcast (i32* @ui to i8*) to i32* ; <i32*>:78 [#uses=1]
269
call i32 @llvm.atomic.load.nand.i32.p0i32( i32* %78, i32 11 ) ; <i32>:79 [#uses=1]
270
store i32 %79, i32* @ui, align 4
271
bitcast i8* bitcast (i32* @sl to i8*) to i32* ; <i32*>:80 [#uses=1]
272
call i32 @llvm.atomic.load.nand.i32.p0i32( i32* %80, i32 11 ) ; <i32>:81 [#uses=1]
273
store i32 %81, i32* @sl, align 4
274
bitcast i8* bitcast (i32* @ul to i8*) to i32* ; <i32*>:82 [#uses=1]
275
call i32 @llvm.atomic.load.nand.i32.p0i32( i32* %82, i32 11 ) ; <i32>:83 [#uses=1]
276
store i32 %83, i32* @ul, align 4
279
return: ; preds = %entry
283
define void @test_op_and_fetch() nounwind {
285
load i8* @uc, align 1 ; <i8>:0 [#uses=1]
286
zext i8 %0 to i32 ; <i32>:1 [#uses=1]
287
trunc i32 %1 to i8 ; <i8>:2 [#uses=2]
288
call i8 @llvm.atomic.load.add.i8.p0i8( i8* @sc, i8 %2 ) ; <i8>:3 [#uses=1]
289
add i8 %3, %2 ; <i8>:4 [#uses=1]
290
store i8 %4, i8* @sc, align 1
291
load i8* @uc, align 1 ; <i8>:5 [#uses=1]
292
zext i8 %5 to i32 ; <i32>:6 [#uses=1]
293
trunc i32 %6 to i8 ; <i8>:7 [#uses=2]
294
call i8 @llvm.atomic.load.add.i8.p0i8( i8* @uc, i8 %7 ) ; <i8>:8 [#uses=1]
295
add i8 %8, %7 ; <i8>:9 [#uses=1]
296
store i8 %9, i8* @uc, align 1
297
load i8* @uc, align 1 ; <i8>:10 [#uses=1]
298
zext i8 %10 to i32 ; <i32>:11 [#uses=1]
299
bitcast i8* bitcast (i16* @ss to i8*) to i16* ; <i16*>:12 [#uses=1]
300
trunc i32 %11 to i16 ; <i16>:13 [#uses=2]
301
call i16 @llvm.atomic.load.add.i16.p0i16( i16* %12, i16 %13 ) ; <i16>:14 [#uses=1]
302
add i16 %14, %13 ; <i16>:15 [#uses=1]
303
store i16 %15, i16* @ss, align 2
304
load i8* @uc, align 1 ; <i8>:16 [#uses=1]
305
zext i8 %16 to i32 ; <i32>:17 [#uses=1]
306
bitcast i8* bitcast (i16* @us to i8*) to i16* ; <i16*>:18 [#uses=1]
307
trunc i32 %17 to i16 ; <i16>:19 [#uses=2]
308
call i16 @llvm.atomic.load.add.i16.p0i16( i16* %18, i16 %19 ) ; <i16>:20 [#uses=1]
309
add i16 %20, %19 ; <i16>:21 [#uses=1]
310
store i16 %21, i16* @us, align 2
311
load i8* @uc, align 1 ; <i8>:22 [#uses=1]
312
zext i8 %22 to i32 ; <i32>:23 [#uses=2]
313
bitcast i8* bitcast (i32* @si to i8*) to i32* ; <i32*>:24 [#uses=1]
314
call i32 @llvm.atomic.load.add.i32.p0i32( i32* %24, i32 %23 ) ; <i32>:25 [#uses=1]
315
add i32 %25, %23 ; <i32>:26 [#uses=1]
316
store i32 %26, i32* @si, align 4
317
load i8* @uc, align 1 ; <i8>:27 [#uses=1]
318
zext i8 %27 to i32 ; <i32>:28 [#uses=2]
319
bitcast i8* bitcast (i32* @ui to i8*) to i32* ; <i32*>:29 [#uses=1]
320
call i32 @llvm.atomic.load.add.i32.p0i32( i32* %29, i32 %28 ) ; <i32>:30 [#uses=1]
321
add i32 %30, %28 ; <i32>:31 [#uses=1]
322
store i32 %31, i32* @ui, align 4
323
load i8* @uc, align 1 ; <i8>:32 [#uses=1]
324
zext i8 %32 to i32 ; <i32>:33 [#uses=2]
325
bitcast i8* bitcast (i32* @sl to i8*) to i32* ; <i32*>:34 [#uses=1]
326
call i32 @llvm.atomic.load.add.i32.p0i32( i32* %34, i32 %33 ) ; <i32>:35 [#uses=1]
327
add i32 %35, %33 ; <i32>:36 [#uses=1]
328
store i32 %36, i32* @sl, align 4
329
load i8* @uc, align 1 ; <i8>:37 [#uses=1]
330
zext i8 %37 to i32 ; <i32>:38 [#uses=2]
331
bitcast i8* bitcast (i32* @ul to i8*) to i32* ; <i32*>:39 [#uses=1]
332
call i32 @llvm.atomic.load.add.i32.p0i32( i32* %39, i32 %38 ) ; <i32>:40 [#uses=1]
333
add i32 %40, %38 ; <i32>:41 [#uses=1]
334
store i32 %41, i32* @ul, align 4
335
load i8* @uc, align 1 ; <i8>:42 [#uses=1]
336
zext i8 %42 to i32 ; <i32>:43 [#uses=1]
337
trunc i32 %43 to i8 ; <i8>:44 [#uses=2]
338
call i8 @llvm.atomic.load.sub.i8.p0i8( i8* @sc, i8 %44 ) ; <i8>:45 [#uses=1]
339
sub i8 %45, %44 ; <i8>:46 [#uses=1]
340
store i8 %46, i8* @sc, align 1
341
load i8* @uc, align 1 ; <i8>:47 [#uses=1]
342
zext i8 %47 to i32 ; <i32>:48 [#uses=1]
343
trunc i32 %48 to i8 ; <i8>:49 [#uses=2]
344
call i8 @llvm.atomic.load.sub.i8.p0i8( i8* @uc, i8 %49 ) ; <i8>:50 [#uses=1]
345
sub i8 %50, %49 ; <i8>:51 [#uses=1]
346
store i8 %51, i8* @uc, align 1
347
load i8* @uc, align 1 ; <i8>:52 [#uses=1]
348
zext i8 %52 to i32 ; <i32>:53 [#uses=1]
349
bitcast i8* bitcast (i16* @ss to i8*) to i16* ; <i16*>:54 [#uses=1]
350
trunc i32 %53 to i16 ; <i16>:55 [#uses=2]
351
call i16 @llvm.atomic.load.sub.i16.p0i16( i16* %54, i16 %55 ) ; <i16>:56 [#uses=1]
352
sub i16 %56, %55 ; <i16>:57 [#uses=1]
353
store i16 %57, i16* @ss, align 2
354
load i8* @uc, align 1 ; <i8>:58 [#uses=1]
355
zext i8 %58 to i32 ; <i32>:59 [#uses=1]
356
bitcast i8* bitcast (i16* @us to i8*) to i16* ; <i16*>:60 [#uses=1]
357
trunc i32 %59 to i16 ; <i16>:61 [#uses=2]
358
call i16 @llvm.atomic.load.sub.i16.p0i16( i16* %60, i16 %61 ) ; <i16>:62 [#uses=1]
359
sub i16 %62, %61 ; <i16>:63 [#uses=1]
360
store i16 %63, i16* @us, align 2
361
load i8* @uc, align 1 ; <i8>:64 [#uses=1]
362
zext i8 %64 to i32 ; <i32>:65 [#uses=2]
363
bitcast i8* bitcast (i32* @si to i8*) to i32* ; <i32*>:66 [#uses=1]
364
call i32 @llvm.atomic.load.sub.i32.p0i32( i32* %66, i32 %65 ) ; <i32>:67 [#uses=1]
365
sub i32 %67, %65 ; <i32>:68 [#uses=1]
366
store i32 %68, i32* @si, align 4
367
load i8* @uc, align 1 ; <i8>:69 [#uses=1]
368
zext i8 %69 to i32 ; <i32>:70 [#uses=2]
369
bitcast i8* bitcast (i32* @ui to i8*) to i32* ; <i32*>:71 [#uses=1]
370
call i32 @llvm.atomic.load.sub.i32.p0i32( i32* %71, i32 %70 ) ; <i32>:72 [#uses=1]
371
sub i32 %72, %70 ; <i32>:73 [#uses=1]
372
store i32 %73, i32* @ui, align 4
373
load i8* @uc, align 1 ; <i8>:74 [#uses=1]
374
zext i8 %74 to i32 ; <i32>:75 [#uses=2]
375
bitcast i8* bitcast (i32* @sl to i8*) to i32* ; <i32*>:76 [#uses=1]
376
call i32 @llvm.atomic.load.sub.i32.p0i32( i32* %76, i32 %75 ) ; <i32>:77 [#uses=1]
377
sub i32 %77, %75 ; <i32>:78 [#uses=1]
378
store i32 %78, i32* @sl, align 4
379
load i8* @uc, align 1 ; <i8>:79 [#uses=1]
380
zext i8 %79 to i32 ; <i32>:80 [#uses=2]
381
bitcast i8* bitcast (i32* @ul to i8*) to i32* ; <i32*>:81 [#uses=1]
382
call i32 @llvm.atomic.load.sub.i32.p0i32( i32* %81, i32 %80 ) ; <i32>:82 [#uses=1]
383
sub i32 %82, %80 ; <i32>:83 [#uses=1]
384
store i32 %83, i32* @ul, align 4
385
load i8* @uc, align 1 ; <i8>:84 [#uses=1]
386
zext i8 %84 to i32 ; <i32>:85 [#uses=1]
387
trunc i32 %85 to i8 ; <i8>:86 [#uses=2]
388
call i8 @llvm.atomic.load.or.i8.p0i8( i8* @sc, i8 %86 ) ; <i8>:87 [#uses=1]
389
or i8 %87, %86 ; <i8>:88 [#uses=1]
390
store i8 %88, i8* @sc, align 1
391
load i8* @uc, align 1 ; <i8>:89 [#uses=1]
392
zext i8 %89 to i32 ; <i32>:90 [#uses=1]
393
trunc i32 %90 to i8 ; <i8>:91 [#uses=2]
394
call i8 @llvm.atomic.load.or.i8.p0i8( i8* @uc, i8 %91 ) ; <i8>:92 [#uses=1]
395
or i8 %92, %91 ; <i8>:93 [#uses=1]
396
store i8 %93, i8* @uc, align 1
397
load i8* @uc, align 1 ; <i8>:94 [#uses=1]
398
zext i8 %94 to i32 ; <i32>:95 [#uses=1]
399
bitcast i8* bitcast (i16* @ss to i8*) to i16* ; <i16*>:96 [#uses=1]
400
trunc i32 %95 to i16 ; <i16>:97 [#uses=2]
401
call i16 @llvm.atomic.load.or.i16.p0i16( i16* %96, i16 %97 ) ; <i16>:98 [#uses=1]
402
or i16 %98, %97 ; <i16>:99 [#uses=1]
403
store i16 %99, i16* @ss, align 2
404
load i8* @uc, align 1 ; <i8>:100 [#uses=1]
405
zext i8 %100 to i32 ; <i32>:101 [#uses=1]
406
bitcast i8* bitcast (i16* @us to i8*) to i16* ; <i16*>:102 [#uses=1]
407
trunc i32 %101 to i16 ; <i16>:103 [#uses=2]
408
call i16 @llvm.atomic.load.or.i16.p0i16( i16* %102, i16 %103 ) ; <i16>:104 [#uses=1]
409
or i16 %104, %103 ; <i16>:105 [#uses=1]
410
store i16 %105, i16* @us, align 2
411
load i8* @uc, align 1 ; <i8>:106 [#uses=1]
412
zext i8 %106 to i32 ; <i32>:107 [#uses=2]
413
bitcast i8* bitcast (i32* @si to i8*) to i32* ; <i32*>:108 [#uses=1]
414
call i32 @llvm.atomic.load.or.i32.p0i32( i32* %108, i32 %107 ) ; <i32>:109 [#uses=1]
415
or i32 %109, %107 ; <i32>:110 [#uses=1]
416
store i32 %110, i32* @si, align 4
417
load i8* @uc, align 1 ; <i8>:111 [#uses=1]
418
zext i8 %111 to i32 ; <i32>:112 [#uses=2]
419
bitcast i8* bitcast (i32* @ui to i8*) to i32* ; <i32*>:113 [#uses=1]
420
call i32 @llvm.atomic.load.or.i32.p0i32( i32* %113, i32 %112 ) ; <i32>:114 [#uses=1]
421
or i32 %114, %112 ; <i32>:115 [#uses=1]
422
store i32 %115, i32* @ui, align 4
423
load i8* @uc, align 1 ; <i8>:116 [#uses=1]
424
zext i8 %116 to i32 ; <i32>:117 [#uses=2]
425
bitcast i8* bitcast (i32* @sl to i8*) to i32* ; <i32*>:118 [#uses=1]
426
call i32 @llvm.atomic.load.or.i32.p0i32( i32* %118, i32 %117 ) ; <i32>:119 [#uses=1]
427
or i32 %119, %117 ; <i32>:120 [#uses=1]
428
store i32 %120, i32* @sl, align 4
429
load i8* @uc, align 1 ; <i8>:121 [#uses=1]
430
zext i8 %121 to i32 ; <i32>:122 [#uses=2]
431
bitcast i8* bitcast (i32* @ul to i8*) to i32* ; <i32*>:123 [#uses=1]
432
call i32 @llvm.atomic.load.or.i32.p0i32( i32* %123, i32 %122 ) ; <i32>:124 [#uses=1]
433
or i32 %124, %122 ; <i32>:125 [#uses=1]
434
store i32 %125, i32* @ul, align 4
435
load i8* @uc, align 1 ; <i8>:126 [#uses=1]
436
zext i8 %126 to i32 ; <i32>:127 [#uses=1]
437
trunc i32 %127 to i8 ; <i8>:128 [#uses=2]
438
call i8 @llvm.atomic.load.xor.i8.p0i8( i8* @sc, i8 %128 ) ; <i8>:129 [#uses=1]
439
xor i8 %129, %128 ; <i8>:130 [#uses=1]
440
store i8 %130, i8* @sc, align 1
441
load i8* @uc, align 1 ; <i8>:131 [#uses=1]
442
zext i8 %131 to i32 ; <i32>:132 [#uses=1]
443
trunc i32 %132 to i8 ; <i8>:133 [#uses=2]
444
call i8 @llvm.atomic.load.xor.i8.p0i8( i8* @uc, i8 %133 ) ; <i8>:134 [#uses=1]
445
xor i8 %134, %133 ; <i8>:135 [#uses=1]
446
store i8 %135, i8* @uc, align 1
447
load i8* @uc, align 1 ; <i8>:136 [#uses=1]
448
zext i8 %136 to i32 ; <i32>:137 [#uses=1]
449
bitcast i8* bitcast (i16* @ss to i8*) to i16* ; <i16*>:138 [#uses=1]
450
trunc i32 %137 to i16 ; <i16>:139 [#uses=2]
451
call i16 @llvm.atomic.load.xor.i16.p0i16( i16* %138, i16 %139 ) ; <i16>:140 [#uses=1]
452
xor i16 %140, %139 ; <i16>:141 [#uses=1]
453
store i16 %141, i16* @ss, align 2
454
load i8* @uc, align 1 ; <i8>:142 [#uses=1]
455
zext i8 %142 to i32 ; <i32>:143 [#uses=1]
456
bitcast i8* bitcast (i16* @us to i8*) to i16* ; <i16*>:144 [#uses=1]
457
trunc i32 %143 to i16 ; <i16>:145 [#uses=2]
458
call i16 @llvm.atomic.load.xor.i16.p0i16( i16* %144, i16 %145 ) ; <i16>:146 [#uses=1]
459
xor i16 %146, %145 ; <i16>:147 [#uses=1]
460
store i16 %147, i16* @us, align 2
461
load i8* @uc, align 1 ; <i8>:148 [#uses=1]
462
zext i8 %148 to i32 ; <i32>:149 [#uses=2]
463
bitcast i8* bitcast (i32* @si to i8*) to i32* ; <i32*>:150 [#uses=1]
464
call i32 @llvm.atomic.load.xor.i32.p0i32( i32* %150, i32 %149 ) ; <i32>:151 [#uses=1]
465
xor i32 %151, %149 ; <i32>:152 [#uses=1]
466
store i32 %152, i32* @si, align 4
467
load i8* @uc, align 1 ; <i8>:153 [#uses=1]
468
zext i8 %153 to i32 ; <i32>:154 [#uses=2]
469
bitcast i8* bitcast (i32* @ui to i8*) to i32* ; <i32*>:155 [#uses=1]
470
call i32 @llvm.atomic.load.xor.i32.p0i32( i32* %155, i32 %154 ) ; <i32>:156 [#uses=1]
471
xor i32 %156, %154 ; <i32>:157 [#uses=1]
472
store i32 %157, i32* @ui, align 4
473
load i8* @uc, align 1 ; <i8>:158 [#uses=1]
474
zext i8 %158 to i32 ; <i32>:159 [#uses=2]
475
bitcast i8* bitcast (i32* @sl to i8*) to i32* ; <i32*>:160 [#uses=1]
476
call i32 @llvm.atomic.load.xor.i32.p0i32( i32* %160, i32 %159 ) ; <i32>:161 [#uses=1]
477
xor i32 %161, %159 ; <i32>:162 [#uses=1]
478
store i32 %162, i32* @sl, align 4
479
load i8* @uc, align 1 ; <i8>:163 [#uses=1]
480
zext i8 %163 to i32 ; <i32>:164 [#uses=2]
481
bitcast i8* bitcast (i32* @ul to i8*) to i32* ; <i32*>:165 [#uses=1]
482
call i32 @llvm.atomic.load.xor.i32.p0i32( i32* %165, i32 %164 ) ; <i32>:166 [#uses=1]
483
xor i32 %166, %164 ; <i32>:167 [#uses=1]
484
store i32 %167, i32* @ul, align 4
485
load i8* @uc, align 1 ; <i8>:168 [#uses=1]
486
zext i8 %168 to i32 ; <i32>:169 [#uses=1]
487
trunc i32 %169 to i8 ; <i8>:170 [#uses=2]
488
call i8 @llvm.atomic.load.and.i8.p0i8( i8* @sc, i8 %170 ) ; <i8>:171 [#uses=1]
489
and i8 %171, %170 ; <i8>:172 [#uses=1]
490
store i8 %172, i8* @sc, align 1
491
load i8* @uc, align 1 ; <i8>:173 [#uses=1]
492
zext i8 %173 to i32 ; <i32>:174 [#uses=1]
493
trunc i32 %174 to i8 ; <i8>:175 [#uses=2]
494
call i8 @llvm.atomic.load.and.i8.p0i8( i8* @uc, i8 %175 ) ; <i8>:176 [#uses=1]
495
and i8 %176, %175 ; <i8>:177 [#uses=1]
496
store i8 %177, i8* @uc, align 1
497
load i8* @uc, align 1 ; <i8>:178 [#uses=1]
498
zext i8 %178 to i32 ; <i32>:179 [#uses=1]
499
bitcast i8* bitcast (i16* @ss to i8*) to i16* ; <i16*>:180 [#uses=1]
500
trunc i32 %179 to i16 ; <i16>:181 [#uses=2]
501
call i16 @llvm.atomic.load.and.i16.p0i16( i16* %180, i16 %181 ) ; <i16>:182 [#uses=1]
502
and i16 %182, %181 ; <i16>:183 [#uses=1]
503
store i16 %183, i16* @ss, align 2
504
load i8* @uc, align 1 ; <i8>:184 [#uses=1]
505
zext i8 %184 to i32 ; <i32>:185 [#uses=1]
506
bitcast i8* bitcast (i16* @us to i8*) to i16* ; <i16*>:186 [#uses=1]
507
trunc i32 %185 to i16 ; <i16>:187 [#uses=2]
508
call i16 @llvm.atomic.load.and.i16.p0i16( i16* %186, i16 %187 ) ; <i16>:188 [#uses=1]
509
and i16 %188, %187 ; <i16>:189 [#uses=1]
510
store i16 %189, i16* @us, align 2
511
load i8* @uc, align 1 ; <i8>:190 [#uses=1]
512
zext i8 %190 to i32 ; <i32>:191 [#uses=2]
513
bitcast i8* bitcast (i32* @si to i8*) to i32* ; <i32*>:192 [#uses=1]
514
call i32 @llvm.atomic.load.and.i32.p0i32( i32* %192, i32 %191 ) ; <i32>:193 [#uses=1]
515
and i32 %193, %191 ; <i32>:194 [#uses=1]
516
store i32 %194, i32* @si, align 4
517
load i8* @uc, align 1 ; <i8>:195 [#uses=1]
518
zext i8 %195 to i32 ; <i32>:196 [#uses=2]
519
bitcast i8* bitcast (i32* @ui to i8*) to i32* ; <i32*>:197 [#uses=1]
520
call i32 @llvm.atomic.load.and.i32.p0i32( i32* %197, i32 %196 ) ; <i32>:198 [#uses=1]
521
and i32 %198, %196 ; <i32>:199 [#uses=1]
522
store i32 %199, i32* @ui, align 4
523
load i8* @uc, align 1 ; <i8>:200 [#uses=1]
524
zext i8 %200 to i32 ; <i32>:201 [#uses=2]
525
bitcast i8* bitcast (i32* @sl to i8*) to i32* ; <i32*>:202 [#uses=1]
526
call i32 @llvm.atomic.load.and.i32.p0i32( i32* %202, i32 %201 ) ; <i32>:203 [#uses=1]
527
and i32 %203, %201 ; <i32>:204 [#uses=1]
528
store i32 %204, i32* @sl, align 4
529
load i8* @uc, align 1 ; <i8>:205 [#uses=1]
530
zext i8 %205 to i32 ; <i32>:206 [#uses=2]
531
bitcast i8* bitcast (i32* @ul to i8*) to i32* ; <i32*>:207 [#uses=1]
532
call i32 @llvm.atomic.load.and.i32.p0i32( i32* %207, i32 %206 ) ; <i32>:208 [#uses=1]
533
and i32 %208, %206 ; <i32>:209 [#uses=1]
534
store i32 %209, i32* @ul, align 4
535
load i8* @uc, align 1 ; <i8>:210 [#uses=1]
536
zext i8 %210 to i32 ; <i32>:211 [#uses=1]
537
trunc i32 %211 to i8 ; <i8>:212 [#uses=2]
538
call i8 @llvm.atomic.load.nand.i8.p0i8( i8* @sc, i8 %212 ) ; <i8>:213 [#uses=1]
539
xor i8 %213, -1 ; <i8>:214 [#uses=1]
540
and i8 %214, %212 ; <i8>:215 [#uses=1]
541
store i8 %215, i8* @sc, align 1
542
load i8* @uc, align 1 ; <i8>:216 [#uses=1]
543
zext i8 %216 to i32 ; <i32>:217 [#uses=1]
544
trunc i32 %217 to i8 ; <i8>:218 [#uses=2]
545
call i8 @llvm.atomic.load.nand.i8.p0i8( i8* @uc, i8 %218 ) ; <i8>:219 [#uses=1]
546
xor i8 %219, -1 ; <i8>:220 [#uses=1]
547
and i8 %220, %218 ; <i8>:221 [#uses=1]
548
store i8 %221, i8* @uc, align 1
549
load i8* @uc, align 1 ; <i8>:222 [#uses=1]
550
zext i8 %222 to i32 ; <i32>:223 [#uses=1]
551
bitcast i8* bitcast (i16* @ss to i8*) to i16* ; <i16*>:224 [#uses=1]
552
trunc i32 %223 to i16 ; <i16>:225 [#uses=2]
553
call i16 @llvm.atomic.load.nand.i16.p0i16( i16* %224, i16 %225 ) ; <i16>:226 [#uses=1]
554
xor i16 %226, -1 ; <i16>:227 [#uses=1]
555
and i16 %227, %225 ; <i16>:228 [#uses=1]
556
store i16 %228, i16* @ss, align 2
557
load i8* @uc, align 1 ; <i8>:229 [#uses=1]
558
zext i8 %229 to i32 ; <i32>:230 [#uses=1]
559
bitcast i8* bitcast (i16* @us to i8*) to i16* ; <i16*>:231 [#uses=1]
560
trunc i32 %230 to i16 ; <i16>:232 [#uses=2]
561
call i16 @llvm.atomic.load.nand.i16.p0i16( i16* %231, i16 %232 ) ; <i16>:233 [#uses=1]
562
xor i16 %233, -1 ; <i16>:234 [#uses=1]
563
and i16 %234, %232 ; <i16>:235 [#uses=1]
564
store i16 %235, i16* @us, align 2
565
load i8* @uc, align 1 ; <i8>:236 [#uses=1]
566
zext i8 %236 to i32 ; <i32>:237 [#uses=2]
567
bitcast i8* bitcast (i32* @si to i8*) to i32* ; <i32*>:238 [#uses=1]
568
call i32 @llvm.atomic.load.nand.i32.p0i32( i32* %238, i32 %237 ) ; <i32>:239 [#uses=1]
569
xor i32 %239, -1 ; <i32>:240 [#uses=1]
570
and i32 %240, %237 ; <i32>:241 [#uses=1]
571
store i32 %241, i32* @si, align 4
572
load i8* @uc, align 1 ; <i8>:242 [#uses=1]
573
zext i8 %242 to i32 ; <i32>:243 [#uses=2]
574
bitcast i8* bitcast (i32* @ui to i8*) to i32* ; <i32*>:244 [#uses=1]
575
call i32 @llvm.atomic.load.nand.i32.p0i32( i32* %244, i32 %243 ) ; <i32>:245 [#uses=1]
576
xor i32 %245, -1 ; <i32>:246 [#uses=1]
577
and i32 %246, %243 ; <i32>:247 [#uses=1]
578
store i32 %247, i32* @ui, align 4
579
load i8* @uc, align 1 ; <i8>:248 [#uses=1]
580
zext i8 %248 to i32 ; <i32>:249 [#uses=2]
581
bitcast i8* bitcast (i32* @sl to i8*) to i32* ; <i32*>:250 [#uses=1]
582
call i32 @llvm.atomic.load.nand.i32.p0i32( i32* %250, i32 %249 ) ; <i32>:251 [#uses=1]
583
xor i32 %251, -1 ; <i32>:252 [#uses=1]
584
and i32 %252, %249 ; <i32>:253 [#uses=1]
585
store i32 %253, i32* @sl, align 4
586
load i8* @uc, align 1 ; <i8>:254 [#uses=1]
587
zext i8 %254 to i32 ; <i32>:255 [#uses=2]
588
bitcast i8* bitcast (i32* @ul to i8*) to i32* ; <i32*>:256 [#uses=1]
589
call i32 @llvm.atomic.load.nand.i32.p0i32( i32* %256, i32 %255 ) ; <i32>:257 [#uses=1]
590
xor i32 %257, -1 ; <i32>:258 [#uses=1]
591
and i32 %258, %255 ; <i32>:259 [#uses=1]
592
store i32 %259, i32* @ul, align 4
595
return: ; preds = %entry
599
define void @test_compare_and_swap() nounwind {
601
load i8* @sc, align 1 ; <i8>:0 [#uses=1]
602
zext i8 %0 to i32 ; <i32>:1 [#uses=1]
603
load i8* @uc, align 1 ; <i8>:2 [#uses=1]
604
zext i8 %2 to i32 ; <i32>:3 [#uses=1]
605
trunc i32 %3 to i8 ; <i8>:4 [#uses=1]
606
trunc i32 %1 to i8 ; <i8>:5 [#uses=1]
607
call i8 @llvm.atomic.cmp.swap.i8.p0i8( i8* @sc, i8 %4, i8 %5 ) ; <i8>:6 [#uses=1]
608
store i8 %6, i8* @sc, align 1
609
load i8* @sc, align 1 ; <i8>:7 [#uses=1]
610
zext i8 %7 to i32 ; <i32>:8 [#uses=1]
611
load i8* @uc, align 1 ; <i8>:9 [#uses=1]
612
zext i8 %9 to i32 ; <i32>:10 [#uses=1]
613
trunc i32 %10 to i8 ; <i8>:11 [#uses=1]
614
trunc i32 %8 to i8 ; <i8>:12 [#uses=1]
615
call i8 @llvm.atomic.cmp.swap.i8.p0i8( i8* @uc, i8 %11, i8 %12 ) ; <i8>:13 [#uses=1]
616
store i8 %13, i8* @uc, align 1
617
load i8* @sc, align 1 ; <i8>:14 [#uses=1]
618
sext i8 %14 to i16 ; <i16>:15 [#uses=1]
619
zext i16 %15 to i32 ; <i32>:16 [#uses=1]
620
load i8* @uc, align 1 ; <i8>:17 [#uses=1]
621
zext i8 %17 to i32 ; <i32>:18 [#uses=1]
622
bitcast i8* bitcast (i16* @ss to i8*) to i16* ; <i16*>:19 [#uses=1]
623
trunc i32 %18 to i16 ; <i16>:20 [#uses=1]
624
trunc i32 %16 to i16 ; <i16>:21 [#uses=1]
625
call i16 @llvm.atomic.cmp.swap.i16.p0i16( i16* %19, i16 %20, i16 %21 ) ; <i16>:22 [#uses=1]
626
store i16 %22, i16* @ss, align 2
627
load i8* @sc, align 1 ; <i8>:23 [#uses=1]
628
sext i8 %23 to i16 ; <i16>:24 [#uses=1]
629
zext i16 %24 to i32 ; <i32>:25 [#uses=1]
630
load i8* @uc, align 1 ; <i8>:26 [#uses=1]
631
zext i8 %26 to i32 ; <i32>:27 [#uses=1]
632
bitcast i8* bitcast (i16* @us to i8*) to i16* ; <i16*>:28 [#uses=1]
633
trunc i32 %27 to i16 ; <i16>:29 [#uses=1]
634
trunc i32 %25 to i16 ; <i16>:30 [#uses=1]
635
call i16 @llvm.atomic.cmp.swap.i16.p0i16( i16* %28, i16 %29, i16 %30 ) ; <i16>:31 [#uses=1]
636
store i16 %31, i16* @us, align 2
637
load i8* @sc, align 1 ; <i8>:32 [#uses=1]
638
sext i8 %32 to i32 ; <i32>:33 [#uses=1]
639
load i8* @uc, align 1 ; <i8>:34 [#uses=1]
640
zext i8 %34 to i32 ; <i32>:35 [#uses=1]
641
bitcast i8* bitcast (i32* @si to i8*) to i32* ; <i32*>:36 [#uses=1]
642
call i32 @llvm.atomic.cmp.swap.i32.p0i32( i32* %36, i32 %35, i32 %33 ) ; <i32>:37 [#uses=1]
643
store i32 %37, i32* @si, align 4
644
load i8* @sc, align 1 ; <i8>:38 [#uses=1]
645
sext i8 %38 to i32 ; <i32>:39 [#uses=1]
646
load i8* @uc, align 1 ; <i8>:40 [#uses=1]
647
zext i8 %40 to i32 ; <i32>:41 [#uses=1]
648
bitcast i8* bitcast (i32* @ui to i8*) to i32* ; <i32*>:42 [#uses=1]
649
call i32 @llvm.atomic.cmp.swap.i32.p0i32( i32* %42, i32 %41, i32 %39 ) ; <i32>:43 [#uses=1]
650
store i32 %43, i32* @ui, align 4
651
load i8* @sc, align 1 ; <i8>:44 [#uses=1]
652
sext i8 %44 to i32 ; <i32>:45 [#uses=1]
653
load i8* @uc, align 1 ; <i8>:46 [#uses=1]
654
zext i8 %46 to i32 ; <i32>:47 [#uses=1]
655
bitcast i8* bitcast (i32* @sl to i8*) to i32* ; <i32*>:48 [#uses=1]
656
call i32 @llvm.atomic.cmp.swap.i32.p0i32( i32* %48, i32 %47, i32 %45 ) ; <i32>:49 [#uses=1]
657
store i32 %49, i32* @sl, align 4
658
load i8* @sc, align 1 ; <i8>:50 [#uses=1]
659
sext i8 %50 to i32 ; <i32>:51 [#uses=1]
660
load i8* @uc, align 1 ; <i8>:52 [#uses=1]
661
zext i8 %52 to i32 ; <i32>:53 [#uses=1]
662
bitcast i8* bitcast (i32* @ul to i8*) to i32* ; <i32*>:54 [#uses=1]
663
call i32 @llvm.atomic.cmp.swap.i32.p0i32( i32* %54, i32 %53, i32 %51 ) ; <i32>:55 [#uses=1]
664
store i32 %55, i32* @ul, align 4
665
load i8* @sc, align 1 ; <i8>:56 [#uses=1]
666
zext i8 %56 to i32 ; <i32>:57 [#uses=1]
667
load i8* @uc, align 1 ; <i8>:58 [#uses=1]
668
zext i8 %58 to i32 ; <i32>:59 [#uses=1]
669
trunc i32 %59 to i8 ; <i8>:60 [#uses=2]
670
trunc i32 %57 to i8 ; <i8>:61 [#uses=1]
671
call i8 @llvm.atomic.cmp.swap.i8.p0i8( i8* @sc, i8 %60, i8 %61 ) ; <i8>:62 [#uses=1]
672
icmp eq i8 %62, %60 ; <i1>:63 [#uses=1]
673
zext i1 %63 to i8 ; <i8>:64 [#uses=1]
674
zext i8 %64 to i32 ; <i32>:65 [#uses=1]
675
store i32 %65, i32* @ui, align 4
676
load i8* @sc, align 1 ; <i8>:66 [#uses=1]
677
zext i8 %66 to i32 ; <i32>:67 [#uses=1]
678
load i8* @uc, align 1 ; <i8>:68 [#uses=1]
679
zext i8 %68 to i32 ; <i32>:69 [#uses=1]
680
trunc i32 %69 to i8 ; <i8>:70 [#uses=2]
681
trunc i32 %67 to i8 ; <i8>:71 [#uses=1]
682
call i8 @llvm.atomic.cmp.swap.i8.p0i8( i8* @uc, i8 %70, i8 %71 ) ; <i8>:72 [#uses=1]
683
icmp eq i8 %72, %70 ; <i1>:73 [#uses=1]
684
zext i1 %73 to i8 ; <i8>:74 [#uses=1]
685
zext i8 %74 to i32 ; <i32>:75 [#uses=1]
686
store i32 %75, i32* @ui, align 4
687
load i8* @sc, align 1 ; <i8>:76 [#uses=1]
688
sext i8 %76 to i16 ; <i16>:77 [#uses=1]
689
zext i16 %77 to i32 ; <i32>:78 [#uses=1]
690
load i8* @uc, align 1 ; <i8>:79 [#uses=1]
691
zext i8 %79 to i32 ; <i32>:80 [#uses=1]
692
trunc i32 %80 to i8 ; <i8>:81 [#uses=2]
693
trunc i32 %78 to i8 ; <i8>:82 [#uses=1]
694
call i8 @llvm.atomic.cmp.swap.i8.p0i8( i8* bitcast (i16* @ss to i8*), i8 %81, i8 %82 ) ; <i8>:83 [#uses=1]
695
icmp eq i8 %83, %81 ; <i1>:84 [#uses=1]
696
zext i1 %84 to i8 ; <i8>:85 [#uses=1]
697
zext i8 %85 to i32 ; <i32>:86 [#uses=1]
698
store i32 %86, i32* @ui, align 4
699
load i8* @sc, align 1 ; <i8>:87 [#uses=1]
700
sext i8 %87 to i16 ; <i16>:88 [#uses=1]
701
zext i16 %88 to i32 ; <i32>:89 [#uses=1]
702
load i8* @uc, align 1 ; <i8>:90 [#uses=1]
703
zext i8 %90 to i32 ; <i32>:91 [#uses=1]
704
trunc i32 %91 to i8 ; <i8>:92 [#uses=2]
705
trunc i32 %89 to i8 ; <i8>:93 [#uses=1]
706
call i8 @llvm.atomic.cmp.swap.i8.p0i8( i8* bitcast (i16* @us to i8*), i8 %92, i8 %93 ) ; <i8>:94 [#uses=1]
707
icmp eq i8 %94, %92 ; <i1>:95 [#uses=1]
708
zext i1 %95 to i8 ; <i8>:96 [#uses=1]
709
zext i8 %96 to i32 ; <i32>:97 [#uses=1]
710
store i32 %97, i32* @ui, align 4
711
load i8* @sc, align 1 ; <i8>:98 [#uses=1]
712
sext i8 %98 to i32 ; <i32>:99 [#uses=1]
713
load i8* @uc, align 1 ; <i8>:100 [#uses=1]
714
zext i8 %100 to i32 ; <i32>:101 [#uses=1]
715
trunc i32 %101 to i8 ; <i8>:102 [#uses=2]
716
trunc i32 %99 to i8 ; <i8>:103 [#uses=1]
717
call i8 @llvm.atomic.cmp.swap.i8.p0i8( i8* bitcast (i32* @si to i8*), i8 %102, i8 %103 ) ; <i8>:104 [#uses=1]
718
icmp eq i8 %104, %102 ; <i1>:105 [#uses=1]
719
zext i1 %105 to i8 ; <i8>:106 [#uses=1]
720
zext i8 %106 to i32 ; <i32>:107 [#uses=1]
721
store i32 %107, i32* @ui, align 4
722
load i8* @sc, align 1 ; <i8>:108 [#uses=1]
723
sext i8 %108 to i32 ; <i32>:109 [#uses=1]
724
load i8* @uc, align 1 ; <i8>:110 [#uses=1]
725
zext i8 %110 to i32 ; <i32>:111 [#uses=1]
726
trunc i32 %111 to i8 ; <i8>:112 [#uses=2]
727
trunc i32 %109 to i8 ; <i8>:113 [#uses=1]
728
call i8 @llvm.atomic.cmp.swap.i8.p0i8( i8* bitcast (i32* @ui to i8*), i8 %112, i8 %113 ) ; <i8>:114 [#uses=1]
729
icmp eq i8 %114, %112 ; <i1>:115 [#uses=1]
730
zext i1 %115 to i8 ; <i8>:116 [#uses=1]
731
zext i8 %116 to i32 ; <i32>:117 [#uses=1]
732
store i32 %117, i32* @ui, align 4
733
load i8* @sc, align 1 ; <i8>:118 [#uses=1]
734
sext i8 %118 to i32 ; <i32>:119 [#uses=1]
735
load i8* @uc, align 1 ; <i8>:120 [#uses=1]
736
zext i8 %120 to i32 ; <i32>:121 [#uses=1]
737
trunc i32 %121 to i8 ; <i8>:122 [#uses=2]
738
trunc i32 %119 to i8 ; <i8>:123 [#uses=1]
739
call i8 @llvm.atomic.cmp.swap.i8.p0i8( i8* bitcast (i32* @sl to i8*), i8 %122, i8 %123 ) ; <i8>:124 [#uses=1]
740
icmp eq i8 %124, %122 ; <i1>:125 [#uses=1]
741
zext i1 %125 to i8 ; <i8>:126 [#uses=1]
742
zext i8 %126 to i32 ; <i32>:127 [#uses=1]
743
store i32 %127, i32* @ui, align 4
744
load i8* @sc, align 1 ; <i8>:128 [#uses=1]
745
sext i8 %128 to i32 ; <i32>:129 [#uses=1]
746
load i8* @uc, align 1 ; <i8>:130 [#uses=1]
747
zext i8 %130 to i32 ; <i32>:131 [#uses=1]
748
trunc i32 %131 to i8 ; <i8>:132 [#uses=2]
749
trunc i32 %129 to i8 ; <i8>:133 [#uses=1]
750
call i8 @llvm.atomic.cmp.swap.i8.p0i8( i8* bitcast (i32* @ul to i8*), i8 %132, i8 %133 ) ; <i8>:134 [#uses=1]
751
icmp eq i8 %134, %132 ; <i1>:135 [#uses=1]
752
zext i1 %135 to i8 ; <i8>:136 [#uses=1]
753
zext i8 %136 to i32 ; <i32>:137 [#uses=1]
754
store i32 %137, i32* @ui, align 4
757
return: ; preds = %entry
761
declare i8 @llvm.atomic.cmp.swap.i8.p0i8(i8*, i8, i8) nounwind
763
declare i16 @llvm.atomic.cmp.swap.i16.p0i16(i16*, i16, i16) nounwind
765
declare i32 @llvm.atomic.cmp.swap.i32.p0i32(i32*, i32, i32) nounwind
767
define void @test_lock() nounwind {
769
call i8 @llvm.atomic.swap.i8.p0i8( i8* @sc, i8 1 ) ; <i8>:0 [#uses=1]
770
store i8 %0, i8* @sc, align 1
771
call i8 @llvm.atomic.swap.i8.p0i8( i8* @uc, i8 1 ) ; <i8>:1 [#uses=1]
772
store i8 %1, i8* @uc, align 1
773
bitcast i8* bitcast (i16* @ss to i8*) to i16* ; <i16*>:2 [#uses=1]
774
call i16 @llvm.atomic.swap.i16.p0i16( i16* %2, i16 1 ) ; <i16>:3 [#uses=1]
775
store i16 %3, i16* @ss, align 2
776
bitcast i8* bitcast (i16* @us to i8*) to i16* ; <i16*>:4 [#uses=1]
777
call i16 @llvm.atomic.swap.i16.p0i16( i16* %4, i16 1 ) ; <i16>:5 [#uses=1]
778
store i16 %5, i16* @us, align 2
779
bitcast i8* bitcast (i32* @si to i8*) to i32* ; <i32*>:6 [#uses=1]
780
call i32 @llvm.atomic.swap.i32.p0i32( i32* %6, i32 1 ) ; <i32>:7 [#uses=1]
781
store i32 %7, i32* @si, align 4
782
bitcast i8* bitcast (i32* @ui to i8*) to i32* ; <i32*>:8 [#uses=1]
783
call i32 @llvm.atomic.swap.i32.p0i32( i32* %8, i32 1 ) ; <i32>:9 [#uses=1]
784
store i32 %9, i32* @ui, align 4
785
bitcast i8* bitcast (i32* @sl to i8*) to i32* ; <i32*>:10 [#uses=1]
786
call i32 @llvm.atomic.swap.i32.p0i32( i32* %10, i32 1 ) ; <i32>:11 [#uses=1]
787
store i32 %11, i32* @sl, align 4
788
bitcast i8* bitcast (i32* @ul to i8*) to i32* ; <i32*>:12 [#uses=1]
789
call i32 @llvm.atomic.swap.i32.p0i32( i32* %12, i32 1 ) ; <i32>:13 [#uses=1]
790
store i32 %13, i32* @ul, align 4
791
call void @llvm.memory.barrier( i1 true, i1 true, i1 true, i1 true, i1 false )
792
volatile store i8 0, i8* @sc, align 1
793
volatile store i8 0, i8* @uc, align 1
794
bitcast i8* bitcast (i16* @ss to i8*) to i16* ; <i16*>:14 [#uses=1]
795
volatile store i16 0, i16* %14, align 2
796
bitcast i8* bitcast (i16* @us to i8*) to i16* ; <i16*>:15 [#uses=1]
797
volatile store i16 0, i16* %15, align 2
798
bitcast i8* bitcast (i32* @si to i8*) to i32* ; <i32*>:16 [#uses=1]
799
volatile store i32 0, i32* %16, align 4
800
bitcast i8* bitcast (i32* @ui to i8*) to i32* ; <i32*>:17 [#uses=1]
801
volatile store i32 0, i32* %17, align 4
802
bitcast i8* bitcast (i32* @sl to i8*) to i32* ; <i32*>:18 [#uses=1]
803
volatile store i32 0, i32* %18, align 4
804
bitcast i8* bitcast (i32* @ul to i8*) to i32* ; <i32*>:19 [#uses=1]
805
volatile store i32 0, i32* %19, align 4
808
return: ; preds = %entry
812
declare i8 @llvm.atomic.swap.i8.p0i8(i8*, i8) nounwind
814
declare i16 @llvm.atomic.swap.i16.p0i16(i16*, i16) nounwind
816
declare i32 @llvm.atomic.swap.i32.p0i32(i32*, i32) nounwind
818
declare void @llvm.memory.barrier(i1, i1, i1, i1, i1) nounwind