2
* iwMMXt micro operations for XScale.
4
* Copyright (c) 2007 OpenedHand, Ltd.
5
* Written by Andrzej Zaborowski <andrew@openedhand.com>
6
* Copyright (c) 2008 CodeSourcery
8
* This library is free software; you can redistribute it and/or
9
* modify it under the terms of the GNU Lesser General Public
10
* License as published by the Free Software Foundation; either
11
* version 2 of the License, or (at your option) any later version.
13
* This library is distributed in the hope that it will be useful,
14
* but WITHOUT ANY WARRANTY; without even the implied warranty of
15
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16
* Lesser General Public License for more details.
18
* You should have received a copy of the GNU Lesser General Public
19
* License along with this library; if not, write to the Free Software
20
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
30
/* iwMMXt macros extracted from GNU gdb. */
32
/* Set the SIMD wCASF flags for 8, 16, 32 or 64-bit operations. */
33
#define SIMD8_SET( v, n, b) ((v != 0) << ((((b) + 1) * 4) + (n)))
34
#define SIMD16_SET(v, n, h) ((v != 0) << ((((h) + 1) * 8) + (n)))
35
#define SIMD32_SET(v, n, w) ((v != 0) << ((((w) + 1) * 16) + (n)))
36
#define SIMD64_SET(v, n) ((v != 0) << (32 + (n)))
37
/* Flags to pass as "n" above. */
42
/* Various status bit macros. */
43
#define NBIT8(x) ((x) & 0x80)
44
#define NBIT16(x) ((x) & 0x8000)
45
#define NBIT32(x) ((x) & 0x80000000)
46
#define NBIT64(x) ((x) & 0x8000000000000000ULL)
47
#define ZBIT8(x) (((x) & 0xff) == 0)
48
#define ZBIT16(x) (((x) & 0xffff) == 0)
49
#define ZBIT32(x) (((x) & 0xffffffff) == 0)
50
#define ZBIT64(x) (x == 0)
51
/* Sign extension macros. */
52
#define EXTEND8H(a) ((uint16_t) (int8_t) (a))
53
#define EXTEND8(a) ((uint32_t) (int8_t) (a))
54
#define EXTEND16(a) ((uint32_t) (int16_t) (a))
55
#define EXTEND16S(a) ((int32_t) (int16_t) (a))
56
#define EXTEND32(a) ((uint64_t) (int32_t) (a))
58
uint64_t HELPER(iwmmxt_maddsq)(uint64_t a, uint64_t b)
61
EXTEND16S((a >> 0) & 0xffff) * EXTEND16S((b >> 0) & 0xffff) +
62
EXTEND16S((a >> 16) & 0xffff) * EXTEND16S((b >> 16) & 0xffff)
63
) & 0xffffffff) | ((uint64_t) (
64
EXTEND16S((a >> 32) & 0xffff) * EXTEND16S((b >> 32) & 0xffff) +
65
EXTEND16S((a >> 48) & 0xffff) * EXTEND16S((b >> 48) & 0xffff)
70
uint64_t HELPER(iwmmxt_madduq)(uint64_t a, uint64_t b)
73
((a >> 0) & 0xffff) * ((b >> 0) & 0xffff) +
74
((a >> 16) & 0xffff) * ((b >> 16) & 0xffff)
76
((a >> 32) & 0xffff) * ((b >> 32) & 0xffff) +
77
((a >> 48) & 0xffff) * ((b >> 48) & 0xffff)
82
uint64_t HELPER(iwmmxt_sadb)(uint64_t a, uint64_t b)
84
#define abs(x) (((x) >= 0) ? x : -x)
85
#define SADB(SHR) abs((int) ((a >> SHR) & 0xff) - (int) ((b >> SHR) & 0xff))
87
SADB(0) + SADB(8) + SADB(16) + SADB(24) +
88
SADB(32) + SADB(40) + SADB(48) + SADB(56);
92
uint64_t HELPER(iwmmxt_sadw)(uint64_t a, uint64_t b)
95
abs((int) ((a >> SHR) & 0xffff) - (int) ((b >> SHR) & 0xffff))
96
return SADW(0) + SADW(16) + SADW(32) + SADW(48);
100
uint64_t HELPER(iwmmxt_mulslw)(uint64_t a, uint64_t b)
102
#define MULS(SHR) ((uint64_t) ((( \
103
EXTEND16S((a >> SHR) & 0xffff) * EXTEND16S((b >> SHR) & 0xffff) \
104
) >> 0) & 0xffff) << SHR)
105
return MULS(0) | MULS(16) | MULS(32) | MULS(48);
109
uint64_t HELPER(iwmmxt_mulshw)(uint64_t a, uint64_t b)
111
#define MULS(SHR) ((uint64_t) ((( \
112
EXTEND16S((a >> SHR) & 0xffff) * EXTEND16S((b >> SHR) & 0xffff) \
113
) >> 16) & 0xffff) << SHR)
114
return MULS(0) | MULS(16) | MULS(32) | MULS(48);
118
uint64_t HELPER(iwmmxt_mululw)(uint64_t a, uint64_t b)
120
#define MULU(SHR) ((uint64_t) ((( \
121
((a >> SHR) & 0xffff) * ((b >> SHR) & 0xffff) \
122
) >> 0) & 0xffff) << SHR)
123
return MULU(0) | MULU(16) | MULU(32) | MULU(48);
127
uint64_t HELPER(iwmmxt_muluhw)(uint64_t a, uint64_t b)
129
#define MULU(SHR) ((uint64_t) ((( \
130
((a >> SHR) & 0xffff) * ((b >> SHR) & 0xffff) \
131
) >> 16) & 0xffff) << SHR)
132
return MULU(0) | MULU(16) | MULU(32) | MULU(48);
136
uint64_t HELPER(iwmmxt_macsw)(uint64_t a, uint64_t b)
138
#define MACS(SHR) ( \
139
EXTEND16((a >> SHR) & 0xffff) * EXTEND16S((b >> SHR) & 0xffff))
140
return (int64_t) (MACS(0) + MACS(16) + MACS(32) + MACS(48));
144
uint64_t HELPER(iwmmxt_macuw)(uint64_t a, uint64_t b)
146
#define MACU(SHR) ( \
147
(uint32_t) ((a >> SHR) & 0xffff) * \
148
(uint32_t) ((b >> SHR) & 0xffff))
149
return MACU(0) + MACU(16) + MACU(32) + MACU(48);
153
#define NZBIT8(x, i) \
154
SIMD8_SET(NBIT8((x) & 0xff), SIMD_NBIT, i) | \
155
SIMD8_SET(ZBIT8((x) & 0xff), SIMD_ZBIT, i)
156
#define NZBIT16(x, i) \
157
SIMD16_SET(NBIT16((x) & 0xffff), SIMD_NBIT, i) | \
158
SIMD16_SET(ZBIT16((x) & 0xffff), SIMD_ZBIT, i)
159
#define NZBIT32(x, i) \
160
SIMD32_SET(NBIT32((x) & 0xffffffff), SIMD_NBIT, i) | \
161
SIMD32_SET(ZBIT32((x) & 0xffffffff), SIMD_ZBIT, i)
163
SIMD64_SET(NBIT64(x), SIMD_NBIT) | \
164
SIMD64_SET(ZBIT64(x), SIMD_ZBIT)
165
#define IWMMXT_OP_UNPACK(S, SH0, SH1, SH2, SH3) \
166
uint64_t HELPER(glue(iwmmxt_unpack, glue(S, b)))(CPUState *env, \
167
uint64_t a, uint64_t b) \
170
(((a >> SH0) & 0xff) << 0) | (((b >> SH0) & 0xff) << 8) | \
171
(((a >> SH1) & 0xff) << 16) | (((b >> SH1) & 0xff) << 24) | \
172
(((a >> SH2) & 0xff) << 32) | (((b >> SH2) & 0xff) << 40) | \
173
(((a >> SH3) & 0xff) << 48) | (((b >> SH3) & 0xff) << 56); \
174
env->iwmmxt.cregs[ARM_IWMMXT_wCASF] = \
175
NZBIT8(a >> 0, 0) | NZBIT8(a >> 8, 1) | \
176
NZBIT8(a >> 16, 2) | NZBIT8(a >> 24, 3) | \
177
NZBIT8(a >> 32, 4) | NZBIT8(a >> 40, 5) | \
178
NZBIT8(a >> 48, 6) | NZBIT8(a >> 56, 7); \
181
uint64_t HELPER(glue(iwmmxt_unpack, glue(S, w)))(CPUState *env, \
182
uint64_t a, uint64_t b) \
185
(((a >> SH0) & 0xffff) << 0) | \
186
(((b >> SH0) & 0xffff) << 16) | \
187
(((a >> SH2) & 0xffff) << 32) | \
188
(((b >> SH2) & 0xffff) << 48); \
189
env->iwmmxt.cregs[ARM_IWMMXT_wCASF] = \
190
NZBIT8(a >> 0, 0) | NZBIT8(a >> 16, 1) | \
191
NZBIT8(a >> 32, 2) | NZBIT8(a >> 48, 3); \
194
uint64_t HELPER(glue(iwmmxt_unpack, glue(S, l)))(CPUState *env, \
195
uint64_t a, uint64_t b) \
198
(((a >> SH0) & 0xffffffff) << 0) | \
199
(((b >> SH0) & 0xffffffff) << 32); \
200
env->iwmmxt.cregs[ARM_IWMMXT_wCASF] = \
201
NZBIT32(a >> 0, 0) | NZBIT32(a >> 32, 1); \
204
uint64_t HELPER(glue(iwmmxt_unpack, glue(S, ub)))(CPUState *env, \
208
(((x >> SH0) & 0xff) << 0) | \
209
(((x >> SH1) & 0xff) << 16) | \
210
(((x >> SH2) & 0xff) << 32) | \
211
(((x >> SH3) & 0xff) << 48); \
212
env->iwmmxt.cregs[ARM_IWMMXT_wCASF] = \
213
NZBIT16(x >> 0, 0) | NZBIT16(x >> 16, 1) | \
214
NZBIT16(x >> 32, 2) | NZBIT16(x >> 48, 3); \
217
uint64_t HELPER(glue(iwmmxt_unpack, glue(S, uw)))(CPUState *env, \
221
(((x >> SH0) & 0xffff) << 0) | \
222
(((x >> SH2) & 0xffff) << 32); \
223
env->iwmmxt.cregs[ARM_IWMMXT_wCASF] = \
224
NZBIT32(x >> 0, 0) | NZBIT32(x >> 32, 1); \
227
uint64_t HELPER(glue(iwmmxt_unpack, glue(S, ul)))(CPUState *env, \
230
x = (((x >> SH0) & 0xffffffff) << 0); \
231
env->iwmmxt.cregs[ARM_IWMMXT_wCASF] = NZBIT64(x >> 0); \
234
uint64_t HELPER(glue(iwmmxt_unpack, glue(S, sb)))(CPUState *env, \
238
((uint64_t) EXTEND8H((x >> SH0) & 0xff) << 0) | \
239
((uint64_t) EXTEND8H((x >> SH1) & 0xff) << 16) | \
240
((uint64_t) EXTEND8H((x >> SH2) & 0xff) << 32) | \
241
((uint64_t) EXTEND8H((x >> SH3) & 0xff) << 48); \
242
env->iwmmxt.cregs[ARM_IWMMXT_wCASF] = \
243
NZBIT16(x >> 0, 0) | NZBIT16(x >> 16, 1) | \
244
NZBIT16(x >> 32, 2) | NZBIT16(x >> 48, 3); \
247
uint64_t HELPER(glue(iwmmxt_unpack, glue(S, sw)))(CPUState *env, \
251
((uint64_t) EXTEND16((x >> SH0) & 0xffff) << 0) | \
252
((uint64_t) EXTEND16((x >> SH2) & 0xffff) << 32); \
253
env->iwmmxt.cregs[ARM_IWMMXT_wCASF] = \
254
NZBIT32(x >> 0, 0) | NZBIT32(x >> 32, 1); \
257
uint64_t HELPER(glue(iwmmxt_unpack, glue(S, sl)))(CPUState *env, \
260
x = EXTEND32((x >> SH0) & 0xffffffff); \
261
env->iwmmxt.cregs[ARM_IWMMXT_wCASF] = NZBIT64(x >> 0); \
264
IWMMXT_OP_UNPACK(l, 0, 8, 16, 24)
265
IWMMXT_OP_UNPACK(h, 32, 40, 48, 56)
267
#define IWMMXT_OP_CMP(SUFF, Tb, Tw, Tl, O) \
268
uint64_t HELPER(glue(iwmmxt_, glue(SUFF, b)))(CPUState *env, \
269
uint64_t a, uint64_t b) \
272
CMP(0, Tb, O, 0xff) | CMP(8, Tb, O, 0xff) | \
273
CMP(16, Tb, O, 0xff) | CMP(24, Tb, O, 0xff) | \
274
CMP(32, Tb, O, 0xff) | CMP(40, Tb, O, 0xff) | \
275
CMP(48, Tb, O, 0xff) | CMP(56, Tb, O, 0xff); \
276
env->iwmmxt.cregs[ARM_IWMMXT_wCASF] = \
277
NZBIT8(a >> 0, 0) | NZBIT8(a >> 8, 1) | \
278
NZBIT8(a >> 16, 2) | NZBIT8(a >> 24, 3) | \
279
NZBIT8(a >> 32, 4) | NZBIT8(a >> 40, 5) | \
280
NZBIT8(a >> 48, 6) | NZBIT8(a >> 56, 7); \
283
uint64_t HELPER(glue(iwmmxt_, glue(SUFF, w)))(CPUState *env, \
284
uint64_t a, uint64_t b) \
286
a = CMP(0, Tw, O, 0xffff) | CMP(16, Tw, O, 0xffff) | \
287
CMP(32, Tw, O, 0xffff) | CMP(48, Tw, O, 0xffff); \
288
env->iwmmxt.cregs[ARM_IWMMXT_wCASF] = \
289
NZBIT16(a >> 0, 0) | NZBIT16(a >> 16, 1) | \
290
NZBIT16(a >> 32, 2) | NZBIT16(a >> 48, 3); \
293
uint64_t HELPER(glue(iwmmxt_, glue(SUFF, l)))(CPUState *env, \
294
uint64_t a, uint64_t b) \
296
a = CMP(0, Tl, O, 0xffffffff) | \
297
CMP(32, Tl, O, 0xffffffff); \
298
env->iwmmxt.cregs[ARM_IWMMXT_wCASF] = \
299
NZBIT32(a >> 0, 0) | NZBIT32(a >> 32, 1); \
302
#define CMP(SHR, TYPE, OPER, MASK) ((((TYPE) ((a >> SHR) & MASK) OPER \
303
(TYPE) ((b >> SHR) & MASK)) ? (uint64_t) MASK : 0) << SHR)
304
IWMMXT_OP_CMP(cmpeq, uint8_t, uint16_t, uint32_t, ==)
305
IWMMXT_OP_CMP(cmpgts, int8_t, int16_t, int32_t, >)
306
IWMMXT_OP_CMP(cmpgtu, uint8_t, uint16_t, uint32_t, >)
308
#define CMP(SHR, TYPE, OPER, MASK) ((((TYPE) ((a >> SHR) & MASK) OPER \
309
(TYPE) ((b >> SHR) & MASK)) ? a : b) & ((uint64_t) MASK << SHR))
310
IWMMXT_OP_CMP(mins, int8_t, int16_t, int32_t, <)
311
IWMMXT_OP_CMP(minu, uint8_t, uint16_t, uint32_t, <)
312
IWMMXT_OP_CMP(maxs, int8_t, int16_t, int32_t, >)
313
IWMMXT_OP_CMP(maxu, uint8_t, uint16_t, uint32_t, >)
315
#define CMP(SHR, TYPE, OPER, MASK) ((uint64_t) (((TYPE) ((a >> SHR) & MASK) \
316
OPER (TYPE) ((b >> SHR) & MASK)) & MASK) << SHR)
317
IWMMXT_OP_CMP(subn, uint8_t, uint16_t, uint32_t, -)
318
IWMMXT_OP_CMP(addn, uint8_t, uint16_t, uint32_t, +)
320
/* TODO Signed- and Unsigned-Saturation */
321
#define CMP(SHR, TYPE, OPER, MASK) ((uint64_t) (((TYPE) ((a >> SHR) & MASK) \
322
OPER (TYPE) ((b >> SHR) & MASK)) & MASK) << SHR)
323
IWMMXT_OP_CMP(subu, uint8_t, uint16_t, uint32_t, -)
324
IWMMXT_OP_CMP(addu, uint8_t, uint16_t, uint32_t, +)
325
IWMMXT_OP_CMP(subs, int8_t, int16_t, int32_t, -)
326
IWMMXT_OP_CMP(adds, int8_t, int16_t, int32_t, +)
330
#define AVGB(SHR) ((( \
331
((a >> SHR) & 0xff) + ((b >> SHR) & 0xff) + round) >> 1) << SHR)
332
#define IWMMXT_OP_AVGB(r) \
333
uint64_t HELPER(iwmmxt_avgb##r)(CPUState *env, uint64_t a, uint64_t b) \
335
const int round = r; \
336
a = AVGB(0) | AVGB(8) | AVGB(16) | AVGB(24) | \
337
AVGB(32) | AVGB(40) | AVGB(48) | AVGB(56); \
338
env->iwmmxt.cregs[ARM_IWMMXT_wCASF] = \
339
SIMD8_SET(ZBIT8((a >> 0) & 0xff), SIMD_ZBIT, 0) | \
340
SIMD8_SET(ZBIT8((a >> 8) & 0xff), SIMD_ZBIT, 1) | \
341
SIMD8_SET(ZBIT8((a >> 16) & 0xff), SIMD_ZBIT, 2) | \
342
SIMD8_SET(ZBIT8((a >> 24) & 0xff), SIMD_ZBIT, 3) | \
343
SIMD8_SET(ZBIT8((a >> 32) & 0xff), SIMD_ZBIT, 4) | \
344
SIMD8_SET(ZBIT8((a >> 40) & 0xff), SIMD_ZBIT, 5) | \
345
SIMD8_SET(ZBIT8((a >> 48) & 0xff), SIMD_ZBIT, 6) | \
346
SIMD8_SET(ZBIT8((a >> 56) & 0xff), SIMD_ZBIT, 7); \
351
#undef IWMMXT_OP_AVGB
354
#define AVGW(SHR) ((( \
355
((a >> SHR) & 0xffff) + ((b >> SHR) & 0xffff) + round) >> 1) << SHR)
356
#define IWMMXT_OP_AVGW(r) \
357
uint64_t HELPER(iwmmxt_avgw##r)(CPUState *env, uint64_t a, uint64_t b) \
359
const int round = r; \
360
a = AVGW(0) | AVGW(16) | AVGW(32) | AVGW(48); \
361
env->iwmmxt.cregs[ARM_IWMMXT_wCASF] = \
362
SIMD16_SET(ZBIT16((a >> 0) & 0xffff), SIMD_ZBIT, 0) | \
363
SIMD16_SET(ZBIT16((a >> 16) & 0xffff), SIMD_ZBIT, 1) | \
364
SIMD16_SET(ZBIT16((a >> 32) & 0xffff), SIMD_ZBIT, 2) | \
365
SIMD16_SET(ZBIT16((a >> 48) & 0xffff), SIMD_ZBIT, 3); \
370
#undef IWMMXT_OP_AVGW
373
uint64_t HELPER(iwmmxt_msadb)(uint64_t a, uint64_t b)
375
a = ((((a >> 0 ) & 0xffff) * ((b >> 0) & 0xffff) +
376
((a >> 16) & 0xffff) * ((b >> 16) & 0xffff)) & 0xffffffff) |
377
((((a >> 32) & 0xffff) * ((b >> 32) & 0xffff) +
378
((a >> 48) & 0xffff) * ((b >> 48) & 0xffff)) << 32);
382
uint64_t HELPER(iwmmxt_align)(uint64_t a, uint64_t b, uint32_t n)
385
a |= b << (64 - (n << 3));
389
uint64_t HELPER(iwmmxt_insr)(uint64_t x, uint32_t a, uint32_t b, uint32_t n)
391
x &= ~((uint64_t) b << n);
392
x |= (uint64_t) (a & b) << n;
396
uint32_t HELPER(iwmmxt_setpsr_nz)(uint64_t x)
398
return SIMD64_SET((x == 0), SIMD_ZBIT) |
399
SIMD64_SET((x & (1ULL << 63)), SIMD_NBIT);
402
uint64_t HELPER(iwmmxt_bcstb)(uint32_t arg)
406
((uint64_t) arg << 0 ) | ((uint64_t) arg << 8 ) |
407
((uint64_t) arg << 16) | ((uint64_t) arg << 24) |
408
((uint64_t) arg << 32) | ((uint64_t) arg << 40) |
409
((uint64_t) arg << 48) | ((uint64_t) arg << 56);
412
uint64_t HELPER(iwmmxt_bcstw)(uint32_t arg)
416
((uint64_t) arg << 0 ) | ((uint64_t) arg << 16) |
417
((uint64_t) arg << 32) | ((uint64_t) arg << 48);
420
uint64_t HELPER(iwmmxt_bcstl)(uint32_t arg)
422
return arg | ((uint64_t) arg << 32);
425
uint64_t HELPER(iwmmxt_addcb)(uint64_t x)
428
((x >> 0) & 0xff) + ((x >> 8) & 0xff) +
429
((x >> 16) & 0xff) + ((x >> 24) & 0xff) +
430
((x >> 32) & 0xff) + ((x >> 40) & 0xff) +
431
((x >> 48) & 0xff) + ((x >> 56) & 0xff);
434
uint64_t HELPER(iwmmxt_addcw)(uint64_t x)
437
((x >> 0) & 0xffff) + ((x >> 16) & 0xffff) +
438
((x >> 32) & 0xffff) + ((x >> 48) & 0xffff);
441
uint64_t HELPER(iwmmxt_addcl)(uint64_t x)
443
return (x & 0xffffffff) + (x >> 32);
446
uint32_t HELPER(iwmmxt_msbb)(uint64_t x)
449
((x >> 7) & 0x01) | ((x >> 14) & 0x02) |
450
((x >> 21) & 0x04) | ((x >> 28) & 0x08) |
451
((x >> 35) & 0x10) | ((x >> 42) & 0x20) |
452
((x >> 49) & 0x40) | ((x >> 56) & 0x80);
455
uint32_t HELPER(iwmmxt_msbw)(uint64_t x)
458
((x >> 15) & 0x01) | ((x >> 30) & 0x02) |
459
((x >> 45) & 0x04) | ((x >> 52) & 0x08);
462
uint32_t HELPER(iwmmxt_msbl)(uint64_t x)
464
return ((x >> 31) & 0x01) | ((x >> 62) & 0x02);
467
/* FIXME: Split wCASF setting into a separate op to avoid env use. */
468
uint64_t HELPER(iwmmxt_srlw)(CPUState *env, uint64_t x, uint32_t n)
470
x = (((x & (0xffffll << 0)) >> n) & (0xffffll << 0)) |
471
(((x & (0xffffll << 16)) >> n) & (0xffffll << 16)) |
472
(((x & (0xffffll << 32)) >> n) & (0xffffll << 32)) |
473
(((x & (0xffffll << 48)) >> n) & (0xffffll << 48));
474
env->iwmmxt.cregs[ARM_IWMMXT_wCASF] =
475
NZBIT16(x >> 0, 0) | NZBIT16(x >> 16, 1) |
476
NZBIT16(x >> 32, 2) | NZBIT16(x >> 48, 3);
480
uint64_t HELPER(iwmmxt_srll)(CPUState *env, uint64_t x, uint32_t n)
482
x = ((x & (0xffffffffll << 0)) >> n) |
483
((x >> n) & (0xffffffffll << 32));
484
env->iwmmxt.cregs[ARM_IWMMXT_wCASF] =
485
NZBIT32(x >> 0, 0) | NZBIT32(x >> 32, 1);
489
uint64_t HELPER(iwmmxt_srlq)(CPUState *env, uint64_t x, uint32_t n)
492
env->iwmmxt.cregs[ARM_IWMMXT_wCASF] = NZBIT64(x);
496
uint64_t HELPER(iwmmxt_sllw)(CPUState *env, uint64_t x, uint32_t n)
498
x = (((x & (0xffffll << 0)) << n) & (0xffffll << 0)) |
499
(((x & (0xffffll << 16)) << n) & (0xffffll << 16)) |
500
(((x & (0xffffll << 32)) << n) & (0xffffll << 32)) |
501
(((x & (0xffffll << 48)) << n) & (0xffffll << 48));
502
env->iwmmxt.cregs[ARM_IWMMXT_wCASF] =
503
NZBIT16(x >> 0, 0) | NZBIT16(x >> 16, 1) |
504
NZBIT16(x >> 32, 2) | NZBIT16(x >> 48, 3);
508
uint64_t HELPER(iwmmxt_slll)(CPUState *env, uint64_t x, uint32_t n)
510
x = ((x << n) & (0xffffffffll << 0)) |
511
((x & (0xffffffffll << 32)) << n);
512
env->iwmmxt.cregs[ARM_IWMMXT_wCASF] =
513
NZBIT32(x >> 0, 0) | NZBIT32(x >> 32, 1);
517
uint64_t HELPER(iwmmxt_sllq)(CPUState *env, uint64_t x, uint32_t n)
520
env->iwmmxt.cregs[ARM_IWMMXT_wCASF] = NZBIT64(x);
524
uint64_t HELPER(iwmmxt_sraw)(CPUState *env, uint64_t x, uint32_t n)
526
x = ((uint64_t) ((EXTEND16(x >> 0) >> n) & 0xffff) << 0) |
527
((uint64_t) ((EXTEND16(x >> 16) >> n) & 0xffff) << 16) |
528
((uint64_t) ((EXTEND16(x >> 32) >> n) & 0xffff) << 32) |
529
((uint64_t) ((EXTEND16(x >> 48) >> n) & 0xffff) << 48);
530
env->iwmmxt.cregs[ARM_IWMMXT_wCASF] =
531
NZBIT16(x >> 0, 0) | NZBIT16(x >> 16, 1) |
532
NZBIT16(x >> 32, 2) | NZBIT16(x >> 48, 3);
536
uint64_t HELPER(iwmmxt_sral)(CPUState *env, uint64_t x, uint32_t n)
538
x = (((EXTEND32(x >> 0) >> n) & 0xffffffff) << 0) |
539
(((EXTEND32(x >> 32) >> n) & 0xffffffff) << 32);
540
env->iwmmxt.cregs[ARM_IWMMXT_wCASF] =
541
NZBIT32(x >> 0, 0) | NZBIT32(x >> 32, 1);
545
uint64_t HELPER(iwmmxt_sraq)(CPUState *env, uint64_t x, uint32_t n)
547
x = (int64_t) x >> n;
548
env->iwmmxt.cregs[ARM_IWMMXT_wCASF] = NZBIT64(x);
552
uint64_t HELPER(iwmmxt_rorw)(CPUState *env, uint64_t x, uint32_t n)
554
x = ((((x & (0xffffll << 0)) >> n) |
555
((x & (0xffffll << 0)) << (16 - n))) & (0xffffll << 0)) |
556
((((x & (0xffffll << 16)) >> n) |
557
((x & (0xffffll << 16)) << (16 - n))) & (0xffffll << 16)) |
558
((((x & (0xffffll << 32)) >> n) |
559
((x & (0xffffll << 32)) << (16 - n))) & (0xffffll << 32)) |
560
((((x & (0xffffll << 48)) >> n) |
561
((x & (0xffffll << 48)) << (16 - n))) & (0xffffll << 48));
562
env->iwmmxt.cregs[ARM_IWMMXT_wCASF] =
563
NZBIT16(x >> 0, 0) | NZBIT16(x >> 16, 1) |
564
NZBIT16(x >> 32, 2) | NZBIT16(x >> 48, 3);
568
uint64_t HELPER(iwmmxt_rorl)(CPUState *env, uint64_t x, uint32_t n)
570
x = ((x & (0xffffffffll << 0)) >> n) |
571
((x >> n) & (0xffffffffll << 32)) |
572
((x << (32 - n)) & (0xffffffffll << 0)) |
573
((x & (0xffffffffll << 32)) << (32 - n));
574
env->iwmmxt.cregs[ARM_IWMMXT_wCASF] =
575
NZBIT32(x >> 0, 0) | NZBIT32(x >> 32, 1);
579
uint64_t HELPER(iwmmxt_rorq)(CPUState *env, uint64_t x, uint32_t n)
581
x = (x >> n) | (x << (64 - n));
582
env->iwmmxt.cregs[ARM_IWMMXT_wCASF] = NZBIT64(x);
586
uint64_t HELPER(iwmmxt_shufh)(CPUState *env, uint64_t x, uint32_t n)
588
x = (((x >> ((n << 4) & 0x30)) & 0xffff) << 0) |
589
(((x >> ((n << 2) & 0x30)) & 0xffff) << 16) |
590
(((x >> ((n << 0) & 0x30)) & 0xffff) << 32) |
591
(((x >> ((n >> 2) & 0x30)) & 0xffff) << 48);
592
env->iwmmxt.cregs[ARM_IWMMXT_wCASF] =
593
NZBIT16(x >> 0, 0) | NZBIT16(x >> 16, 1) |
594
NZBIT16(x >> 32, 2) | NZBIT16(x >> 48, 3);
598
/* TODO: Unsigned-Saturation */
599
uint64_t HELPER(iwmmxt_packuw)(CPUState *env, uint64_t a, uint64_t b)
601
a = (((a >> 0) & 0xff) << 0) | (((a >> 16) & 0xff) << 8) |
602
(((a >> 32) & 0xff) << 16) | (((a >> 48) & 0xff) << 24) |
603
(((b >> 0) & 0xff) << 32) | (((b >> 16) & 0xff) << 40) |
604
(((b >> 32) & 0xff) << 48) | (((b >> 48) & 0xff) << 56);
605
env->iwmmxt.cregs[ARM_IWMMXT_wCASF] =
606
NZBIT8(a >> 0, 0) | NZBIT8(a >> 8, 1) |
607
NZBIT8(a >> 16, 2) | NZBIT8(a >> 24, 3) |
608
NZBIT8(a >> 32, 4) | NZBIT8(a >> 40, 5) |
609
NZBIT8(a >> 48, 6) | NZBIT8(a >> 56, 7);
613
uint64_t HELPER(iwmmxt_packul)(CPUState *env, uint64_t a, uint64_t b)
615
a = (((a >> 0) & 0xffff) << 0) | (((a >> 32) & 0xffff) << 16) |
616
(((b >> 0) & 0xffff) << 32) | (((b >> 32) & 0xffff) << 48);
617
env->iwmmxt.cregs[ARM_IWMMXT_wCASF] =
618
NZBIT16(a >> 0, 0) | NZBIT16(a >> 16, 1) |
619
NZBIT16(a >> 32, 2) | NZBIT16(a >> 48, 3);
623
uint64_t HELPER(iwmmxt_packuq)(CPUState *env, uint64_t a, uint64_t b)
625
a = (a & 0xffffffff) | ((b & 0xffffffff) << 32);
626
env->iwmmxt.cregs[ARM_IWMMXT_wCASF] =
627
NZBIT32(a >> 0, 0) | NZBIT32(a >> 32, 1);
631
/* TODO: Signed-Saturation */
632
uint64_t HELPER(iwmmxt_packsw)(CPUState *env, uint64_t a, uint64_t b)
634
a = (((a >> 0) & 0xff) << 0) | (((a >> 16) & 0xff) << 8) |
635
(((a >> 32) & 0xff) << 16) | (((a >> 48) & 0xff) << 24) |
636
(((b >> 0) & 0xff) << 32) | (((b >> 16) & 0xff) << 40) |
637
(((b >> 32) & 0xff) << 48) | (((b >> 48) & 0xff) << 56);
638
env->iwmmxt.cregs[ARM_IWMMXT_wCASF] =
639
NZBIT8(a >> 0, 0) | NZBIT8(a >> 8, 1) |
640
NZBIT8(a >> 16, 2) | NZBIT8(a >> 24, 3) |
641
NZBIT8(a >> 32, 4) | NZBIT8(a >> 40, 5) |
642
NZBIT8(a >> 48, 6) | NZBIT8(a >> 56, 7);
646
uint64_t HELPER(iwmmxt_packsl)(CPUState *env, uint64_t a, uint64_t b)
648
a = (((a >> 0) & 0xffff) << 0) | (((a >> 32) & 0xffff) << 16) |
649
(((b >> 0) & 0xffff) << 32) | (((b >> 32) & 0xffff) << 48);
650
env->iwmmxt.cregs[ARM_IWMMXT_wCASF] =
651
NZBIT16(a >> 0, 0) | NZBIT16(a >> 16, 1) |
652
NZBIT16(a >> 32, 2) | NZBIT16(a >> 48, 3);
656
uint64_t HELPER(iwmmxt_packsq)(CPUState *env, uint64_t a, uint64_t b)
658
a = (a & 0xffffffff) | ((b & 0xffffffff) << 32);
659
env->iwmmxt.cregs[ARM_IWMMXT_wCASF] =
660
NZBIT32(a >> 0, 0) | NZBIT32(a >> 32, 1);
664
uint64_t HELPER(iwmmxt_muladdsl)(uint64_t c, uint32_t a, uint32_t b)
666
return c + ((int32_t) EXTEND32(a) * (int32_t) EXTEND32(b));
669
uint64_t HELPER(iwmmxt_muladdsw)(uint64_t c, uint32_t a, uint32_t b)
671
c += EXTEND32(EXTEND16S((a >> 0) & 0xffff) *
672
EXTEND16S((b >> 0) & 0xffff));
673
c += EXTEND32(EXTEND16S((a >> 16) & 0xffff) *
674
EXTEND16S((b >> 16) & 0xffff));
678
uint64_t HELPER(iwmmxt_muladdswl)(uint64_t c, uint32_t a, uint32_t b)
680
return c + (EXTEND32(EXTEND16S(a & 0xffff) *
681
EXTEND16S(b & 0xffff)));