1.4.5
by Vagrant Cascadian
Import upstream version 1.0~rc4+dfsg |
1 |
/*
|
2 |
* Tiny Code Generator for QEMU
|
|
3 |
*
|
|
4 |
* Copyright (c) 2009, 2011 Stefan Weil
|
|
5 |
*
|
|
6 |
* Permission is hereby granted, free of charge, to any person obtaining a copy
|
|
7 |
* of this software and associated documentation files (the "Software"), to deal
|
|
8 |
* in the Software without restriction, including without limitation the rights
|
|
9 |
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
|
10 |
* copies of the Software, and to permit persons to whom the Software is
|
|
11 |
* furnished to do so, subject to the following conditions:
|
|
12 |
*
|
|
13 |
* The above copyright notice and this permission notice shall be included in
|
|
14 |
* all copies or substantial portions of the Software.
|
|
15 |
*
|
|
16 |
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
17 |
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
18 |
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
|
19 |
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
20 |
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
|
21 |
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
|
22 |
* THE SOFTWARE.
|
|
23 |
*/
|
|
24 |
||
25 |
/* TODO list:
|
|
26 |
* - See TODO comments in code.
|
|
27 |
*/
|
|
28 |
||
29 |
/* Marker for missing code. */
|
|
30 |
#define TODO() \
|
|
31 |
do { \
|
|
32 |
fprintf(stderr, "TODO %s:%u: %s()\n", \
|
|
33 |
__FILE__, __LINE__, __func__); \
|
|
34 |
tcg_abort(); \
|
|
35 |
} while (0)
|
|
36 |
||
37 |
/* Single bit n. */
|
|
38 |
#define BIT(n) (1 << (n))
|
|
39 |
||
40 |
/* Bitfield n...m (in 32 bit value). */
|
|
41 |
#define BITS(n, m) (((0xffffffffU << (31 - n)) >> (31 - n + m)) << m)
|
|
42 |
||
43 |
/* Used for function call generation. */
|
|
44 |
#define TCG_REG_CALL_STACK TCG_REG_R4
|
|
45 |
#define TCG_TARGET_STACK_ALIGN 16
|
|
46 |
#define TCG_TARGET_CALL_STACK_OFFSET 0
|
|
47 |
||
48 |
/* TODO: documentation. */
|
|
49 |
static uint8_t *tb_ret_addr; |
|
50 |
||
51 |
/* Macros used in tcg_target_op_defs. */
|
|
52 |
#define R "r"
|
|
53 |
#define RI "ri"
|
|
54 |
#if TCG_TARGET_REG_BITS == 32
|
|
55 |
# define R64 "r", "r"
|
|
56 |
#else
|
|
57 |
# define R64 "r"
|
|
58 |
#endif
|
|
59 |
#if TARGET_LONG_BITS > TCG_TARGET_REG_BITS
|
|
60 |
# define L "L", "L"
|
|
61 |
# define S "S", "S"
|
|
62 |
#else
|
|
63 |
# define L "L"
|
|
64 |
# define S "S"
|
|
65 |
#endif
|
|
66 |
||
67 |
/* TODO: documentation. */
|
|
68 |
static const TCGTargetOpDef tcg_target_op_defs[] = { |
|
69 |
{ INDEX_op_exit_tb, { NULL } }, |
|
70 |
{ INDEX_op_goto_tb, { NULL } }, |
|
71 |
{ INDEX_op_call, { RI } }, |
|
72 |
{ INDEX_op_br, { NULL } }, |
|
73 |
||
74 |
{ INDEX_op_mov_i32, { R, R } }, |
|
75 |
{ INDEX_op_movi_i32, { R } }, |
|
76 |
||
77 |
{ INDEX_op_ld8u_i32, { R, R } }, |
|
78 |
{ INDEX_op_ld8s_i32, { R, R } }, |
|
79 |
{ INDEX_op_ld16u_i32, { R, R } }, |
|
80 |
{ INDEX_op_ld16s_i32, { R, R } }, |
|
81 |
{ INDEX_op_ld_i32, { R, R } }, |
|
82 |
{ INDEX_op_st8_i32, { R, R } }, |
|
83 |
{ INDEX_op_st16_i32, { R, R } }, |
|
84 |
{ INDEX_op_st_i32, { R, R } }, |
|
85 |
||
86 |
{ INDEX_op_add_i32, { R, RI, RI } }, |
|
87 |
{ INDEX_op_sub_i32, { R, RI, RI } }, |
|
88 |
{ INDEX_op_mul_i32, { R, RI, RI } }, |
|
89 |
#if TCG_TARGET_HAS_div_i32
|
|
90 |
{ INDEX_op_div_i32, { R, R, R } }, |
|
91 |
{ INDEX_op_divu_i32, { R, R, R } }, |
|
92 |
{ INDEX_op_rem_i32, { R, R, R } }, |
|
93 |
{ INDEX_op_remu_i32, { R, R, R } }, |
|
94 |
#elif TCG_TARGET_HAS_div2_i32
|
|
95 |
{ INDEX_op_div2_i32, { R, R, "0", "1", R } }, |
|
96 |
{ INDEX_op_divu2_i32, { R, R, "0", "1", R } }, |
|
97 |
#endif
|
|
98 |
/* TODO: Does R, RI, RI result in faster code than R, R, RI?
|
|
99 |
If both operands are constants, we can optimize. */
|
|
100 |
{ INDEX_op_and_i32, { R, RI, RI } }, |
|
101 |
#if TCG_TARGET_HAS_andc_i32
|
|
102 |
{ INDEX_op_andc_i32, { R, RI, RI } }, |
|
103 |
#endif
|
|
104 |
#if TCG_TARGET_HAS_eqv_i32
|
|
105 |
{ INDEX_op_eqv_i32, { R, RI, RI } }, |
|
106 |
#endif
|
|
107 |
#if TCG_TARGET_HAS_nand_i32
|
|
108 |
{ INDEX_op_nand_i32, { R, RI, RI } }, |
|
109 |
#endif
|
|
110 |
#if TCG_TARGET_HAS_nor_i32
|
|
111 |
{ INDEX_op_nor_i32, { R, RI, RI } }, |
|
112 |
#endif
|
|
113 |
{ INDEX_op_or_i32, { R, RI, RI } }, |
|
114 |
#if TCG_TARGET_HAS_orc_i32
|
|
115 |
{ INDEX_op_orc_i32, { R, RI, RI } }, |
|
116 |
#endif
|
|
117 |
{ INDEX_op_xor_i32, { R, RI, RI } }, |
|
118 |
{ INDEX_op_shl_i32, { R, RI, RI } }, |
|
119 |
{ INDEX_op_shr_i32, { R, RI, RI } }, |
|
120 |
{ INDEX_op_sar_i32, { R, RI, RI } }, |
|
121 |
#if TCG_TARGET_HAS_rot_i32
|
|
122 |
{ INDEX_op_rotl_i32, { R, RI, RI } }, |
|
123 |
{ INDEX_op_rotr_i32, { R, RI, RI } }, |
|
124 |
#endif
|
|
1.5.2
by Michael Tokarev
Import upstream version 1.3.0+dfsg |
125 |
#if TCG_TARGET_HAS_deposit_i32
|
126 |
{ INDEX_op_deposit_i32, { R, "0", R } }, |
|
127 |
#endif
|
|
1.4.5
by Vagrant Cascadian
Import upstream version 1.0~rc4+dfsg |
128 |
|
129 |
{ INDEX_op_brcond_i32, { R, RI } }, |
|
130 |
||
131 |
{ INDEX_op_setcond_i32, { R, R, RI } }, |
|
132 |
#if TCG_TARGET_REG_BITS == 64
|
|
133 |
{ INDEX_op_setcond_i64, { R, R, RI } }, |
|
134 |
#endif /* TCG_TARGET_REG_BITS == 64 */ |
|
135 |
||
136 |
#if TCG_TARGET_REG_BITS == 32
|
|
137 |
/* TODO: Support R, R, R, R, RI, RI? Will it be faster? */
|
|
138 |
{ INDEX_op_add2_i32, { R, R, R, R, R, R } }, |
|
139 |
{ INDEX_op_sub2_i32, { R, R, R, R, R, R } }, |
|
140 |
{ INDEX_op_brcond2_i32, { R, R, RI, RI } }, |
|
141 |
{ INDEX_op_mulu2_i32, { R, R, R, R } }, |
|
142 |
{ INDEX_op_setcond2_i32, { R, R, R, RI, RI } }, |
|
143 |
#endif
|
|
144 |
||
145 |
#if TCG_TARGET_HAS_not_i32
|
|
146 |
{ INDEX_op_not_i32, { R, R } }, |
|
147 |
#endif
|
|
148 |
#if TCG_TARGET_HAS_neg_i32
|
|
149 |
{ INDEX_op_neg_i32, { R, R } }, |
|
150 |
#endif
|
|
151 |
||
152 |
#if TCG_TARGET_REG_BITS == 64
|
|
153 |
{ INDEX_op_mov_i64, { R, R } }, |
|
154 |
{ INDEX_op_movi_i64, { R } }, |
|
155 |
||
156 |
{ INDEX_op_ld8u_i64, { R, R } }, |
|
157 |
{ INDEX_op_ld8s_i64, { R, R } }, |
|
158 |
{ INDEX_op_ld16u_i64, { R, R } }, |
|
159 |
{ INDEX_op_ld16s_i64, { R, R } }, |
|
160 |
{ INDEX_op_ld32u_i64, { R, R } }, |
|
161 |
{ INDEX_op_ld32s_i64, { R, R } }, |
|
162 |
{ INDEX_op_ld_i64, { R, R } }, |
|
163 |
||
164 |
{ INDEX_op_st8_i64, { R, R } }, |
|
165 |
{ INDEX_op_st16_i64, { R, R } }, |
|
166 |
{ INDEX_op_st32_i64, { R, R } }, |
|
167 |
{ INDEX_op_st_i64, { R, R } }, |
|
168 |
||
169 |
{ INDEX_op_add_i64, { R, RI, RI } }, |
|
170 |
{ INDEX_op_sub_i64, { R, RI, RI } }, |
|
171 |
{ INDEX_op_mul_i64, { R, RI, RI } }, |
|
172 |
#if TCG_TARGET_HAS_div_i64
|
|
173 |
{ INDEX_op_div_i64, { R, R, R } }, |
|
174 |
{ INDEX_op_divu_i64, { R, R, R } }, |
|
175 |
{ INDEX_op_rem_i64, { R, R, R } }, |
|
176 |
{ INDEX_op_remu_i64, { R, R, R } }, |
|
177 |
#elif TCG_TARGET_HAS_div2_i64
|
|
178 |
{ INDEX_op_div2_i64, { R, R, "0", "1", R } }, |
|
179 |
{ INDEX_op_divu2_i64, { R, R, "0", "1", R } }, |
|
180 |
#endif
|
|
181 |
{ INDEX_op_and_i64, { R, RI, RI } }, |
|
182 |
#if TCG_TARGET_HAS_andc_i64
|
|
183 |
{ INDEX_op_andc_i64, { R, RI, RI } }, |
|
184 |
#endif
|
|
185 |
#if TCG_TARGET_HAS_eqv_i64
|
|
186 |
{ INDEX_op_eqv_i64, { R, RI, RI } }, |
|
187 |
#endif
|
|
188 |
#if TCG_TARGET_HAS_nand_i64
|
|
189 |
{ INDEX_op_nand_i64, { R, RI, RI } }, |
|
190 |
#endif
|
|
191 |
#if TCG_TARGET_HAS_nor_i64
|
|
192 |
{ INDEX_op_nor_i64, { R, RI, RI } }, |
|
193 |
#endif
|
|
194 |
{ INDEX_op_or_i64, { R, RI, RI } }, |
|
195 |
#if TCG_TARGET_HAS_orc_i64
|
|
196 |
{ INDEX_op_orc_i64, { R, RI, RI } }, |
|
197 |
#endif
|
|
198 |
{ INDEX_op_xor_i64, { R, RI, RI } }, |
|
199 |
{ INDEX_op_shl_i64, { R, RI, RI } }, |
|
200 |
{ INDEX_op_shr_i64, { R, RI, RI } }, |
|
201 |
{ INDEX_op_sar_i64, { R, RI, RI } }, |
|
202 |
#if TCG_TARGET_HAS_rot_i64
|
|
203 |
{ INDEX_op_rotl_i64, { R, RI, RI } }, |
|
204 |
{ INDEX_op_rotr_i64, { R, RI, RI } }, |
|
205 |
#endif
|
|
1.5.2
by Michael Tokarev
Import upstream version 1.3.0+dfsg |
206 |
#if TCG_TARGET_HAS_deposit_i64
|
207 |
{ INDEX_op_deposit_i64, { R, "0", R } }, |
|
208 |
#endif
|
|
1.4.5
by Vagrant Cascadian
Import upstream version 1.0~rc4+dfsg |
209 |
{ INDEX_op_brcond_i64, { R, RI } }, |
210 |
||
211 |
#if TCG_TARGET_HAS_ext8s_i64
|
|
212 |
{ INDEX_op_ext8s_i64, { R, R } }, |
|
213 |
#endif
|
|
214 |
#if TCG_TARGET_HAS_ext16s_i64
|
|
215 |
{ INDEX_op_ext16s_i64, { R, R } }, |
|
216 |
#endif
|
|
217 |
#if TCG_TARGET_HAS_ext32s_i64
|
|
218 |
{ INDEX_op_ext32s_i64, { R, R } }, |
|
219 |
#endif
|
|
220 |
#if TCG_TARGET_HAS_ext8u_i64
|
|
221 |
{ INDEX_op_ext8u_i64, { R, R } }, |
|
222 |
#endif
|
|
223 |
#if TCG_TARGET_HAS_ext16u_i64
|
|
224 |
{ INDEX_op_ext16u_i64, { R, R } }, |
|
225 |
#endif
|
|
226 |
#if TCG_TARGET_HAS_ext32u_i64
|
|
227 |
{ INDEX_op_ext32u_i64, { R, R } }, |
|
228 |
#endif
|
|
229 |
#if TCG_TARGET_HAS_bswap16_i64
|
|
230 |
{ INDEX_op_bswap16_i64, { R, R } }, |
|
231 |
#endif
|
|
232 |
#if TCG_TARGET_HAS_bswap32_i64
|
|
233 |
{ INDEX_op_bswap32_i64, { R, R } }, |
|
234 |
#endif
|
|
235 |
#if TCG_TARGET_HAS_bswap64_i64
|
|
236 |
{ INDEX_op_bswap64_i64, { R, R } }, |
|
237 |
#endif
|
|
238 |
#if TCG_TARGET_HAS_not_i64
|
|
239 |
{ INDEX_op_not_i64, { R, R } }, |
|
240 |
#endif
|
|
241 |
#if TCG_TARGET_HAS_neg_i64
|
|
242 |
{ INDEX_op_neg_i64, { R, R } }, |
|
243 |
#endif
|
|
244 |
#endif /* TCG_TARGET_REG_BITS == 64 */ |
|
245 |
||
246 |
{ INDEX_op_qemu_ld8u, { R, L } }, |
|
247 |
{ INDEX_op_qemu_ld8s, { R, L } }, |
|
248 |
{ INDEX_op_qemu_ld16u, { R, L } }, |
|
249 |
{ INDEX_op_qemu_ld16s, { R, L } }, |
|
250 |
{ INDEX_op_qemu_ld32, { R, L } }, |
|
251 |
#if TCG_TARGET_REG_BITS == 64
|
|
252 |
{ INDEX_op_qemu_ld32u, { R, L } }, |
|
253 |
{ INDEX_op_qemu_ld32s, { R, L } }, |
|
254 |
#endif
|
|
255 |
{ INDEX_op_qemu_ld64, { R64, L } }, |
|
256 |
||
257 |
{ INDEX_op_qemu_st8, { R, S } }, |
|
258 |
{ INDEX_op_qemu_st16, { R, S } }, |
|
259 |
{ INDEX_op_qemu_st32, { R, S } }, |
|
260 |
{ INDEX_op_qemu_st64, { R64, S } }, |
|
261 |
||
262 |
#if TCG_TARGET_HAS_ext8s_i32
|
|
263 |
{ INDEX_op_ext8s_i32, { R, R } }, |
|
264 |
#endif
|
|
265 |
#if TCG_TARGET_HAS_ext16s_i32
|
|
266 |
{ INDEX_op_ext16s_i32, { R, R } }, |
|
267 |
#endif
|
|
268 |
#if TCG_TARGET_HAS_ext8u_i32
|
|
269 |
{ INDEX_op_ext8u_i32, { R, R } }, |
|
270 |
#endif
|
|
271 |
#if TCG_TARGET_HAS_ext16u_i32
|
|
272 |
{ INDEX_op_ext16u_i32, { R, R } }, |
|
273 |
#endif
|
|
274 |
||
275 |
#if TCG_TARGET_HAS_bswap16_i32
|
|
276 |
{ INDEX_op_bswap16_i32, { R, R } }, |
|
277 |
#endif
|
|
278 |
#if TCG_TARGET_HAS_bswap32_i32
|
|
279 |
{ INDEX_op_bswap32_i32, { R, R } }, |
|
280 |
#endif
|
|
281 |
||
282 |
{ -1 }, |
|
283 |
};
|
|
284 |
||
285 |
static const int tcg_target_reg_alloc_order[] = { |
|
286 |
TCG_REG_R0, |
|
287 |
TCG_REG_R1, |
|
288 |
TCG_REG_R2, |
|
289 |
TCG_REG_R3, |
|
290 |
#if 0 /* used for TCG_REG_CALL_STACK */ |
|
291 |
TCG_REG_R4,
|
|
292 |
#endif
|
|
293 |
TCG_REG_R5, |
|
294 |
TCG_REG_R6, |
|
295 |
TCG_REG_R7, |
|
296 |
#if TCG_TARGET_NB_REGS >= 16
|
|
297 |
TCG_REG_R8, |
|
298 |
TCG_REG_R9, |
|
299 |
TCG_REG_R10, |
|
300 |
TCG_REG_R11, |
|
301 |
TCG_REG_R12, |
|
302 |
TCG_REG_R13, |
|
303 |
TCG_REG_R14, |
|
304 |
TCG_REG_R15, |
|
305 |
#endif
|
|
306 |
};
|
|
307 |
||
1.5.2
by Michael Tokarev
Import upstream version 1.3.0+dfsg |
308 |
#if MAX_OPC_PARAM_IARGS != 5
|
1.4.5
by Vagrant Cascadian
Import upstream version 1.0~rc4+dfsg |
309 |
# error Fix needed, number of supported input arguments changed!
|
310 |
#endif
|
|
311 |
||
312 |
static const int tcg_target_call_iarg_regs[] = { |
|
313 |
TCG_REG_R0, |
|
314 |
TCG_REG_R1, |
|
315 |
TCG_REG_R2, |
|
316 |
TCG_REG_R3, |
|
1.5.2
by Michael Tokarev
Import upstream version 1.3.0+dfsg |
317 |
#if 0 /* used for TCG_REG_CALL_STACK */ |
318 |
TCG_REG_R4,
|
|
319 |
#endif
|
|
320 |
TCG_REG_R5, |
|
1.4.5
by Vagrant Cascadian
Import upstream version 1.0~rc4+dfsg |
321 |
#if TCG_TARGET_REG_BITS == 32
|
322 |
/* 32 bit hosts need 2 * MAX_OPC_PARAM_IARGS registers. */
|
|
323 |
TCG_REG_R6, |
|
324 |
TCG_REG_R7, |
|
325 |
#if TCG_TARGET_NB_REGS >= 16
|
|
326 |
TCG_REG_R8, |
|
1.5.2
by Michael Tokarev
Import upstream version 1.3.0+dfsg |
327 |
TCG_REG_R9, |
328 |
TCG_REG_R10, |
|
1.4.5
by Vagrant Cascadian
Import upstream version 1.0~rc4+dfsg |
329 |
#else
|
330 |
# error Too few input registers available
|
|
331 |
#endif
|
|
332 |
#endif
|
|
333 |
};
|
|
334 |
||
335 |
static const int tcg_target_call_oarg_regs[] = { |
|
336 |
TCG_REG_R0, |
|
337 |
#if TCG_TARGET_REG_BITS == 32
|
|
338 |
TCG_REG_R1
|
|
339 |
#endif
|
|
340 |
};
|
|
341 |
||
342 |
#ifndef NDEBUG
|
|
343 |
static const char *const tcg_target_reg_names[TCG_TARGET_NB_REGS] = { |
|
344 |
"r00", |
|
345 |
"r01", |
|
346 |
"r02", |
|
347 |
"r03", |
|
348 |
"r04", |
|
349 |
"r05", |
|
350 |
"r06", |
|
351 |
"r07", |
|
352 |
#if TCG_TARGET_NB_REGS >= 16
|
|
353 |
"r08", |
|
354 |
"r09", |
|
355 |
"r10", |
|
356 |
"r11", |
|
357 |
"r12", |
|
358 |
"r13", |
|
359 |
"r14", |
|
360 |
"r15", |
|
361 |
#if TCG_TARGET_NB_REGS >= 32
|
|
362 |
"r16", |
|
363 |
"r17", |
|
364 |
"r18", |
|
365 |
"r19", |
|
366 |
"r20", |
|
367 |
"r21", |
|
368 |
"r22", |
|
369 |
"r23", |
|
370 |
"r24", |
|
371 |
"r25", |
|
372 |
"r26", |
|
373 |
"r27", |
|
374 |
"r28", |
|
375 |
"r29", |
|
376 |
"r30", |
|
377 |
"r31"
|
|
378 |
#endif
|
|
379 |
#endif
|
|
380 |
};
|
|
381 |
#endif
|
|
382 |
||
383 |
static void patch_reloc(uint8_t *code_ptr, int type, |
|
384 |
tcg_target_long value, tcg_target_long addend) |
|
385 |
{
|
|
386 |
/* tcg_out_reloc always uses the same type, addend. */
|
|
387 |
assert(type == sizeof(tcg_target_long)); |
|
388 |
assert(addend == 0); |
|
389 |
assert(value != 0); |
|
390 |
*(tcg_target_long *)code_ptr = value; |
|
391 |
}
|
|
392 |
||
393 |
/* Parse target specific constraints. */
|
|
394 |
static int target_parse_constraint(TCGArgConstraint *ct, const char **pct_str) |
|
395 |
{
|
|
396 |
const char *ct_str = *pct_str; |
|
397 |
switch (ct_str[0]) { |
|
398 |
case 'r': |
|
399 |
case 'L': /* qemu_ld constraint */ |
|
400 |
case 'S': /* qemu_st constraint */ |
|
401 |
ct->ct |= TCG_CT_REG; |
|
402 |
tcg_regset_set32(ct->u.regs, 0, BIT(TCG_TARGET_NB_REGS) - 1); |
|
403 |
break; |
|
404 |
default: |
|
405 |
return -1; |
|
406 |
}
|
|
407 |
ct_str++; |
|
408 |
*pct_str = ct_str; |
|
409 |
return 0; |
|
410 |
}
|
|
411 |
||
412 |
#if defined(CONFIG_DEBUG_TCG_INTERPRETER)
|
|
413 |
/* Show current bytecode. Used by tcg interpreter. */
|
|
414 |
void tci_disas(uint8_t opc) |
|
415 |
{
|
|
416 |
const TCGOpDef *def = &tcg_op_defs[opc]; |
|
417 |
fprintf(stderr, "TCG %s %u, %u, %u\n", |
|
418 |
def->name, def->nb_oargs, def->nb_iargs, def->nb_cargs); |
|
419 |
}
|
|
420 |
#endif
|
|
421 |
||
422 |
/* Write value (native size). */
|
|
423 |
static void tcg_out_i(TCGContext *s, tcg_target_ulong v) |
|
424 |
{
|
|
425 |
*(tcg_target_ulong *)s->code_ptr = v; |
|
426 |
s->code_ptr += sizeof(tcg_target_ulong); |
|
427 |
}
|
|
428 |
||
429 |
/* Write 64 bit value. */
|
|
430 |
static void tcg_out64(TCGContext *s, uint64_t v) |
|
431 |
{
|
|
432 |
*(uint64_t *)s->code_ptr = v; |
|
433 |
s->code_ptr += sizeof(v); |
|
434 |
}
|
|
435 |
||
436 |
/* Write opcode. */
|
|
437 |
static void tcg_out_op_t(TCGContext *s, TCGOpcode op) |
|
438 |
{
|
|
439 |
tcg_out8(s, op); |
|
440 |
tcg_out8(s, 0); |
|
441 |
}
|
|
442 |
||
443 |
/* Write register. */
|
|
444 |
static void tcg_out_r(TCGContext *s, TCGArg t0) |
|
445 |
{
|
|
446 |
assert(t0 < TCG_TARGET_NB_REGS); |
|
447 |
tcg_out8(s, t0); |
|
448 |
}
|
|
449 |
||
450 |
/* Write register or constant (native size). */
|
|
451 |
static void tcg_out_ri(TCGContext *s, int const_arg, TCGArg arg) |
|
452 |
{
|
|
453 |
if (const_arg) { |
|
454 |
assert(const_arg == 1); |
|
455 |
tcg_out8(s, TCG_CONST); |
|
456 |
tcg_out_i(s, arg); |
|
457 |
} else { |
|
458 |
tcg_out_r(s, arg); |
|
459 |
}
|
|
460 |
}
|
|
461 |
||
462 |
/* Write register or constant (32 bit). */
|
|
463 |
static void tcg_out_ri32(TCGContext *s, int const_arg, TCGArg arg) |
|
464 |
{
|
|
465 |
if (const_arg) { |
|
466 |
assert(const_arg == 1); |
|
467 |
tcg_out8(s, TCG_CONST); |
|
468 |
tcg_out32(s, arg); |
|
469 |
} else { |
|
470 |
tcg_out_r(s, arg); |
|
471 |
}
|
|
472 |
}
|
|
473 |
||
474 |
#if TCG_TARGET_REG_BITS == 64
|
|
475 |
/* Write register or constant (64 bit). */
|
|
476 |
static void tcg_out_ri64(TCGContext *s, int const_arg, TCGArg arg) |
|
477 |
{
|
|
478 |
if (const_arg) { |
|
479 |
assert(const_arg == 1); |
|
480 |
tcg_out8(s, TCG_CONST); |
|
481 |
tcg_out64(s, arg); |
|
482 |
} else { |
|
483 |
tcg_out_r(s, arg); |
|
484 |
}
|
|
485 |
}
|
|
486 |
#endif
|
|
487 |
||
488 |
/* Write label. */
|
|
489 |
static void tci_out_label(TCGContext *s, TCGArg arg) |
|
490 |
{
|
|
491 |
TCGLabel *label = &s->labels[arg]; |
|
492 |
if (label->has_value) { |
|
493 |
tcg_out_i(s, label->u.value); |
|
494 |
assert(label->u.value); |
|
495 |
} else { |
|
496 |
tcg_out_reloc(s, s->code_ptr, sizeof(tcg_target_ulong), arg, 0); |
|
1.5.2
by Michael Tokarev
Import upstream version 1.3.0+dfsg |
497 |
s->code_ptr += sizeof(tcg_target_ulong); |
1.4.5
by Vagrant Cascadian
Import upstream version 1.0~rc4+dfsg |
498 |
}
|
499 |
}
|
|
500 |
||
501 |
static void tcg_out_ld(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg1, |
|
502 |
tcg_target_long arg2) |
|
503 |
{
|
|
504 |
uint8_t *old_code_ptr = s->code_ptr; |
|
505 |
if (type == TCG_TYPE_I32) { |
|
506 |
tcg_out_op_t(s, INDEX_op_ld_i32); |
|
507 |
tcg_out_r(s, ret); |
|
508 |
tcg_out_r(s, arg1); |
|
509 |
tcg_out32(s, arg2); |
|
510 |
} else { |
|
511 |
assert(type == TCG_TYPE_I64); |
|
512 |
#if TCG_TARGET_REG_BITS == 64
|
|
513 |
tcg_out_op_t(s, INDEX_op_ld_i64); |
|
514 |
tcg_out_r(s, ret); |
|
515 |
tcg_out_r(s, arg1); |
|
516 |
assert(arg2 == (uint32_t)arg2); |
|
517 |
tcg_out32(s, arg2); |
|
518 |
#else
|
|
519 |
TODO(); |
|
520 |
#endif
|
|
521 |
}
|
|
522 |
old_code_ptr[1] = s->code_ptr - old_code_ptr; |
|
523 |
}
|
|
524 |
||
525 |
static void tcg_out_mov(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg) |
|
526 |
{
|
|
527 |
uint8_t *old_code_ptr = s->code_ptr; |
|
528 |
assert(ret != arg); |
|
529 |
#if TCG_TARGET_REG_BITS == 32
|
|
530 |
tcg_out_op_t(s, INDEX_op_mov_i32); |
|
531 |
#else
|
|
532 |
tcg_out_op_t(s, INDEX_op_mov_i64); |
|
533 |
#endif
|
|
534 |
tcg_out_r(s, ret); |
|
535 |
tcg_out_r(s, arg); |
|
536 |
old_code_ptr[1] = s->code_ptr - old_code_ptr; |
|
537 |
}
|
|
538 |
||
539 |
static void tcg_out_movi(TCGContext *s, TCGType type, |
|
540 |
TCGReg t0, tcg_target_long arg) |
|
541 |
{
|
|
542 |
uint8_t *old_code_ptr = s->code_ptr; |
|
543 |
uint32_t arg32 = arg; |
|
544 |
if (type == TCG_TYPE_I32 || arg == arg32) { |
|
545 |
tcg_out_op_t(s, INDEX_op_movi_i32); |
|
546 |
tcg_out_r(s, t0); |
|
547 |
tcg_out32(s, arg32); |
|
548 |
} else { |
|
549 |
assert(type == TCG_TYPE_I64); |
|
550 |
#if TCG_TARGET_REG_BITS == 64
|
|
551 |
tcg_out_op_t(s, INDEX_op_movi_i64); |
|
552 |
tcg_out_r(s, t0); |
|
553 |
tcg_out64(s, arg); |
|
554 |
#else
|
|
555 |
TODO(); |
|
556 |
#endif
|
|
557 |
}
|
|
558 |
old_code_ptr[1] = s->code_ptr - old_code_ptr; |
|
559 |
}
|
|
560 |
||
561 |
static void tcg_out_op(TCGContext *s, TCGOpcode opc, const TCGArg *args, |
|
562 |
const int *const_args) |
|
563 |
{
|
|
564 |
uint8_t *old_code_ptr = s->code_ptr; |
|
565 |
||
566 |
tcg_out_op_t(s, opc); |
|
567 |
||
568 |
switch (opc) { |
|
569 |
case INDEX_op_exit_tb: |
|
570 |
tcg_out64(s, args[0]); |
|
571 |
break; |
|
572 |
case INDEX_op_goto_tb: |
|
573 |
if (s->tb_jmp_offset) { |
|
574 |
/* Direct jump method. */
|
|
575 |
assert(args[0] < ARRAY_SIZE(s->tb_jmp_offset)); |
|
576 |
s->tb_jmp_offset[args[0]] = s->code_ptr - s->code_buf; |
|
577 |
tcg_out32(s, 0); |
|
578 |
} else { |
|
579 |
/* Indirect jump method. */
|
|
580 |
TODO(); |
|
581 |
}
|
|
582 |
assert(args[0] < ARRAY_SIZE(s->tb_next_offset)); |
|
583 |
s->tb_next_offset[args[0]] = s->code_ptr - s->code_buf; |
|
584 |
break; |
|
585 |
case INDEX_op_br: |
|
586 |
tci_out_label(s, args[0]); |
|
587 |
break; |
|
588 |
case INDEX_op_call: |
|
589 |
tcg_out_ri(s, const_args[0], args[0]); |
|
590 |
break; |
|
591 |
case INDEX_op_setcond_i32: |
|
592 |
tcg_out_r(s, args[0]); |
|
593 |
tcg_out_r(s, args[1]); |
|
594 |
tcg_out_ri32(s, const_args[2], args[2]); |
|
595 |
tcg_out8(s, args[3]); /* condition */ |
|
596 |
break; |
|
597 |
#if TCG_TARGET_REG_BITS == 32
|
|
598 |
case INDEX_op_setcond2_i32: |
|
599 |
/* setcond2_i32 cond, t0, t1_low, t1_high, t2_low, t2_high */
|
|
600 |
tcg_out_r(s, args[0]); |
|
601 |
tcg_out_r(s, args[1]); |
|
602 |
tcg_out_r(s, args[2]); |
|
603 |
tcg_out_ri32(s, const_args[3], args[3]); |
|
604 |
tcg_out_ri32(s, const_args[4], args[4]); |
|
605 |
tcg_out8(s, args[5]); /* condition */ |
|
606 |
break; |
|
607 |
#elif TCG_TARGET_REG_BITS == 64
|
|
608 |
case INDEX_op_setcond_i64: |
|
609 |
tcg_out_r(s, args[0]); |
|
610 |
tcg_out_r(s, args[1]); |
|
611 |
tcg_out_ri64(s, const_args[2], args[2]); |
|
612 |
tcg_out8(s, args[3]); /* condition */ |
|
613 |
break; |
|
614 |
#endif
|
|
615 |
case INDEX_op_movi_i32: |
|
616 |
TODO(); /* Handled by tcg_out_movi? */ |
|
617 |
break; |
|
618 |
case INDEX_op_ld8u_i32: |
|
619 |
case INDEX_op_ld8s_i32: |
|
620 |
case INDEX_op_ld16u_i32: |
|
621 |
case INDEX_op_ld16s_i32: |
|
622 |
case INDEX_op_ld_i32: |
|
623 |
case INDEX_op_st8_i32: |
|
624 |
case INDEX_op_st16_i32: |
|
625 |
case INDEX_op_st_i32: |
|
626 |
case INDEX_op_ld8u_i64: |
|
627 |
case INDEX_op_ld8s_i64: |
|
628 |
case INDEX_op_ld16u_i64: |
|
629 |
case INDEX_op_ld16s_i64: |
|
630 |
case INDEX_op_ld32u_i64: |
|
631 |
case INDEX_op_ld32s_i64: |
|
632 |
case INDEX_op_ld_i64: |
|
633 |
case INDEX_op_st8_i64: |
|
634 |
case INDEX_op_st16_i64: |
|
635 |
case INDEX_op_st32_i64: |
|
636 |
case INDEX_op_st_i64: |
|
637 |
tcg_out_r(s, args[0]); |
|
638 |
tcg_out_r(s, args[1]); |
|
639 |
assert(args[2] == (uint32_t)args[2]); |
|
640 |
tcg_out32(s, args[2]); |
|
641 |
break; |
|
642 |
case INDEX_op_add_i32: |
|
643 |
case INDEX_op_sub_i32: |
|
644 |
case INDEX_op_mul_i32: |
|
645 |
case INDEX_op_and_i32: |
|
646 |
case INDEX_op_andc_i32: /* Optional (TCG_TARGET_HAS_andc_i32). */ |
|
647 |
case INDEX_op_eqv_i32: /* Optional (TCG_TARGET_HAS_eqv_i32). */ |
|
648 |
case INDEX_op_nand_i32: /* Optional (TCG_TARGET_HAS_nand_i32). */ |
|
649 |
case INDEX_op_nor_i32: /* Optional (TCG_TARGET_HAS_nor_i32). */ |
|
650 |
case INDEX_op_or_i32: |
|
651 |
case INDEX_op_orc_i32: /* Optional (TCG_TARGET_HAS_orc_i32). */ |
|
652 |
case INDEX_op_xor_i32: |
|
653 |
case INDEX_op_shl_i32: |
|
654 |
case INDEX_op_shr_i32: |
|
655 |
case INDEX_op_sar_i32: |
|
656 |
case INDEX_op_rotl_i32: /* Optional (TCG_TARGET_HAS_rot_i32). */ |
|
657 |
case INDEX_op_rotr_i32: /* Optional (TCG_TARGET_HAS_rot_i32). */ |
|
658 |
tcg_out_r(s, args[0]); |
|
659 |
tcg_out_ri32(s, const_args[1], args[1]); |
|
660 |
tcg_out_ri32(s, const_args[2], args[2]); |
|
661 |
break; |
|
1.5.2
by Michael Tokarev
Import upstream version 1.3.0+dfsg |
662 |
case INDEX_op_deposit_i32: /* Optional (TCG_TARGET_HAS_deposit_i32). */ |
663 |
tcg_out_r(s, args[0]); |
|
664 |
tcg_out_r(s, args[1]); |
|
665 |
tcg_out_r(s, args[2]); |
|
666 |
assert(args[3] <= UINT8_MAX); |
|
667 |
tcg_out8(s, args[3]); |
|
668 |
assert(args[4] <= UINT8_MAX); |
|
669 |
tcg_out8(s, args[4]); |
|
670 |
break; |
|
1.4.5
by Vagrant Cascadian
Import upstream version 1.0~rc4+dfsg |
671 |
|
672 |
#if TCG_TARGET_REG_BITS == 64
|
|
673 |
case INDEX_op_mov_i64: |
|
674 |
case INDEX_op_movi_i64: |
|
675 |
TODO(); |
|
676 |
break; |
|
677 |
case INDEX_op_add_i64: |
|
678 |
case INDEX_op_sub_i64: |
|
679 |
case INDEX_op_mul_i64: |
|
680 |
case INDEX_op_and_i64: |
|
681 |
case INDEX_op_andc_i64: /* Optional (TCG_TARGET_HAS_andc_i64). */ |
|
682 |
case INDEX_op_eqv_i64: /* Optional (TCG_TARGET_HAS_eqv_i64). */ |
|
683 |
case INDEX_op_nand_i64: /* Optional (TCG_TARGET_HAS_nand_i64). */ |
|
684 |
case INDEX_op_nor_i64: /* Optional (TCG_TARGET_HAS_nor_i64). */ |
|
685 |
case INDEX_op_or_i64: |
|
686 |
case INDEX_op_orc_i64: /* Optional (TCG_TARGET_HAS_orc_i64). */ |
|
687 |
case INDEX_op_xor_i64: |
|
688 |
case INDEX_op_shl_i64: |
|
689 |
case INDEX_op_shr_i64: |
|
690 |
case INDEX_op_sar_i64: |
|
691 |
/* TODO: Implementation of rotl_i64, rotr_i64 missing in tci.c. */
|
|
692 |
case INDEX_op_rotl_i64: /* Optional (TCG_TARGET_HAS_rot_i64). */ |
|
693 |
case INDEX_op_rotr_i64: /* Optional (TCG_TARGET_HAS_rot_i64). */ |
|
694 |
tcg_out_r(s, args[0]); |
|
695 |
tcg_out_ri64(s, const_args[1], args[1]); |
|
696 |
tcg_out_ri64(s, const_args[2], args[2]); |
|
697 |
break; |
|
1.5.2
by Michael Tokarev
Import upstream version 1.3.0+dfsg |
698 |
case INDEX_op_deposit_i64: /* Optional (TCG_TARGET_HAS_deposit_i64). */ |
699 |
tcg_out_r(s, args[0]); |
|
700 |
tcg_out_r(s, args[1]); |
|
701 |
tcg_out_r(s, args[2]); |
|
702 |
assert(args[3] <= UINT8_MAX); |
|
703 |
tcg_out8(s, args[3]); |
|
704 |
assert(args[4] <= UINT8_MAX); |
|
705 |
tcg_out8(s, args[4]); |
|
706 |
break; |
|
1.4.5
by Vagrant Cascadian
Import upstream version 1.0~rc4+dfsg |
707 |
case INDEX_op_div_i64: /* Optional (TCG_TARGET_HAS_div_i64). */ |
708 |
case INDEX_op_divu_i64: /* Optional (TCG_TARGET_HAS_div_i64). */ |
|
709 |
case INDEX_op_rem_i64: /* Optional (TCG_TARGET_HAS_div_i64). */ |
|
710 |
case INDEX_op_remu_i64: /* Optional (TCG_TARGET_HAS_div_i64). */ |
|
711 |
TODO(); |
|
712 |
break; |
|
713 |
case INDEX_op_div2_i64: /* Optional (TCG_TARGET_HAS_div2_i64). */ |
|
714 |
case INDEX_op_divu2_i64: /* Optional (TCG_TARGET_HAS_div2_i64). */ |
|
715 |
TODO(); |
|
716 |
break; |
|
717 |
case INDEX_op_brcond_i64: |
|
718 |
tcg_out_r(s, args[0]); |
|
719 |
tcg_out_ri64(s, const_args[1], args[1]); |
|
720 |
tcg_out8(s, args[2]); /* condition */ |
|
721 |
tci_out_label(s, args[3]); |
|
722 |
break; |
|
723 |
case INDEX_op_bswap16_i64: /* Optional (TCG_TARGET_HAS_bswap16_i64). */ |
|
724 |
case INDEX_op_bswap32_i64: /* Optional (TCG_TARGET_HAS_bswap32_i64). */ |
|
725 |
case INDEX_op_bswap64_i64: /* Optional (TCG_TARGET_HAS_bswap64_i64). */ |
|
726 |
case INDEX_op_not_i64: /* Optional (TCG_TARGET_HAS_not_i64). */ |
|
727 |
case INDEX_op_neg_i64: /* Optional (TCG_TARGET_HAS_neg_i64). */ |
|
728 |
case INDEX_op_ext8s_i64: /* Optional (TCG_TARGET_HAS_ext8s_i64). */ |
|
729 |
case INDEX_op_ext8u_i64: /* Optional (TCG_TARGET_HAS_ext8u_i64). */ |
|
730 |
case INDEX_op_ext16s_i64: /* Optional (TCG_TARGET_HAS_ext16s_i64). */ |
|
731 |
case INDEX_op_ext16u_i64: /* Optional (TCG_TARGET_HAS_ext16u_i64). */ |
|
732 |
case INDEX_op_ext32s_i64: /* Optional (TCG_TARGET_HAS_ext32s_i64). */ |
|
733 |
case INDEX_op_ext32u_i64: /* Optional (TCG_TARGET_HAS_ext32u_i64). */ |
|
734 |
#endif /* TCG_TARGET_REG_BITS == 64 */ |
|
735 |
case INDEX_op_neg_i32: /* Optional (TCG_TARGET_HAS_neg_i32). */ |
|
736 |
case INDEX_op_not_i32: /* Optional (TCG_TARGET_HAS_not_i32). */ |
|
737 |
case INDEX_op_ext8s_i32: /* Optional (TCG_TARGET_HAS_ext8s_i32). */ |
|
738 |
case INDEX_op_ext16s_i32: /* Optional (TCG_TARGET_HAS_ext16s_i32). */ |
|
739 |
case INDEX_op_ext8u_i32: /* Optional (TCG_TARGET_HAS_ext8u_i32). */ |
|
740 |
case INDEX_op_ext16u_i32: /* Optional (TCG_TARGET_HAS_ext16u_i32). */ |
|
741 |
case INDEX_op_bswap16_i32: /* Optional (TCG_TARGET_HAS_bswap16_i32). */ |
|
742 |
case INDEX_op_bswap32_i32: /* Optional (TCG_TARGET_HAS_bswap32_i32). */ |
|
743 |
tcg_out_r(s, args[0]); |
|
744 |
tcg_out_r(s, args[1]); |
|
745 |
break; |
|
746 |
case INDEX_op_div_i32: /* Optional (TCG_TARGET_HAS_div_i32). */ |
|
747 |
case INDEX_op_divu_i32: /* Optional (TCG_TARGET_HAS_div_i32). */ |
|
748 |
case INDEX_op_rem_i32: /* Optional (TCG_TARGET_HAS_div_i32). */ |
|
749 |
case INDEX_op_remu_i32: /* Optional (TCG_TARGET_HAS_div_i32). */ |
|
750 |
tcg_out_r(s, args[0]); |
|
751 |
tcg_out_ri32(s, const_args[1], args[1]); |
|
752 |
tcg_out_ri32(s, const_args[2], args[2]); |
|
753 |
break; |
|
754 |
case INDEX_op_div2_i32: /* Optional (TCG_TARGET_HAS_div2_i32). */ |
|
755 |
case INDEX_op_divu2_i32: /* Optional (TCG_TARGET_HAS_div2_i32). */ |
|
756 |
TODO(); |
|
757 |
break; |
|
758 |
#if TCG_TARGET_REG_BITS == 32
|
|
759 |
case INDEX_op_add2_i32: |
|
760 |
case INDEX_op_sub2_i32: |
|
761 |
tcg_out_r(s, args[0]); |
|
762 |
tcg_out_r(s, args[1]); |
|
763 |
tcg_out_r(s, args[2]); |
|
764 |
tcg_out_r(s, args[3]); |
|
765 |
tcg_out_r(s, args[4]); |
|
766 |
tcg_out_r(s, args[5]); |
|
767 |
break; |
|
768 |
case INDEX_op_brcond2_i32: |
|
769 |
tcg_out_r(s, args[0]); |
|
770 |
tcg_out_r(s, args[1]); |
|
771 |
tcg_out_ri32(s, const_args[2], args[2]); |
|
772 |
tcg_out_ri32(s, const_args[3], args[3]); |
|
773 |
tcg_out8(s, args[4]); /* condition */ |
|
774 |
tci_out_label(s, args[5]); |
|
775 |
break; |
|
776 |
case INDEX_op_mulu2_i32: |
|
777 |
tcg_out_r(s, args[0]); |
|
778 |
tcg_out_r(s, args[1]); |
|
779 |
tcg_out_r(s, args[2]); |
|
780 |
tcg_out_r(s, args[3]); |
|
781 |
break; |
|
782 |
#endif
|
|
783 |
case INDEX_op_brcond_i32: |
|
784 |
tcg_out_r(s, args[0]); |
|
785 |
tcg_out_ri32(s, const_args[1], args[1]); |
|
786 |
tcg_out8(s, args[2]); /* condition */ |
|
787 |
tci_out_label(s, args[3]); |
|
788 |
break; |
|
789 |
case INDEX_op_qemu_ld8u: |
|
790 |
case INDEX_op_qemu_ld8s: |
|
791 |
case INDEX_op_qemu_ld16u: |
|
792 |
case INDEX_op_qemu_ld16s: |
|
793 |
case INDEX_op_qemu_ld32: |
|
794 |
#if TCG_TARGET_REG_BITS == 64
|
|
795 |
case INDEX_op_qemu_ld32s: |
|
796 |
case INDEX_op_qemu_ld32u: |
|
797 |
#endif
|
|
798 |
tcg_out_r(s, *args++); |
|
799 |
tcg_out_r(s, *args++); |
|
800 |
#if TARGET_LONG_BITS > TCG_TARGET_REG_BITS
|
|
801 |
tcg_out_r(s, *args++); |
|
802 |
#endif
|
|
803 |
#ifdef CONFIG_SOFTMMU
|
|
804 |
tcg_out_i(s, *args); |
|
805 |
#endif
|
|
806 |
break; |
|
807 |
case INDEX_op_qemu_ld64: |
|
808 |
tcg_out_r(s, *args++); |
|
809 |
#if TCG_TARGET_REG_BITS == 32
|
|
810 |
tcg_out_r(s, *args++); |
|
811 |
#endif
|
|
812 |
tcg_out_r(s, *args++); |
|
813 |
#if TARGET_LONG_BITS > TCG_TARGET_REG_BITS
|
|
814 |
tcg_out_r(s, *args++); |
|
815 |
#endif
|
|
816 |
#ifdef CONFIG_SOFTMMU
|
|
817 |
tcg_out_i(s, *args); |
|
818 |
#endif
|
|
819 |
break; |
|
820 |
case INDEX_op_qemu_st8: |
|
821 |
case INDEX_op_qemu_st16: |
|
822 |
case INDEX_op_qemu_st32: |
|
823 |
tcg_out_r(s, *args++); |
|
824 |
tcg_out_r(s, *args++); |
|
825 |
#if TARGET_LONG_BITS > TCG_TARGET_REG_BITS
|
|
826 |
tcg_out_r(s, *args++); |
|
827 |
#endif
|
|
828 |
#ifdef CONFIG_SOFTMMU
|
|
829 |
tcg_out_i(s, *args); |
|
830 |
#endif
|
|
831 |
break; |
|
832 |
case INDEX_op_qemu_st64: |
|
833 |
tcg_out_r(s, *args++); |
|
834 |
#if TCG_TARGET_REG_BITS == 32
|
|
835 |
tcg_out_r(s, *args++); |
|
836 |
#endif
|
|
837 |
tcg_out_r(s, *args++); |
|
838 |
#if TARGET_LONG_BITS > TCG_TARGET_REG_BITS
|
|
839 |
tcg_out_r(s, *args++); |
|
840 |
#endif
|
|
841 |
#ifdef CONFIG_SOFTMMU
|
|
842 |
tcg_out_i(s, *args); |
|
843 |
#endif
|
|
844 |
break; |
|
845 |
case INDEX_op_end: |
|
846 |
TODO(); |
|
847 |
break; |
|
848 |
default: |
|
849 |
fprintf(stderr, "Missing: %s\n", tcg_op_defs[opc].name); |
|
850 |
tcg_abort(); |
|
851 |
}
|
|
852 |
old_code_ptr[1] = s->code_ptr - old_code_ptr; |
|
853 |
}
|
|
854 |
||
855 |
static void tcg_out_st(TCGContext *s, TCGType type, TCGReg arg, TCGReg arg1, |
|
856 |
tcg_target_long arg2) |
|
857 |
{
|
|
858 |
uint8_t *old_code_ptr = s->code_ptr; |
|
859 |
if (type == TCG_TYPE_I32) { |
|
860 |
tcg_out_op_t(s, INDEX_op_st_i32); |
|
861 |
tcg_out_r(s, arg); |
|
862 |
tcg_out_r(s, arg1); |
|
863 |
tcg_out32(s, arg2); |
|
864 |
} else { |
|
865 |
assert(type == TCG_TYPE_I64); |
|
866 |
#if TCG_TARGET_REG_BITS == 64
|
|
867 |
tcg_out_op_t(s, INDEX_op_st_i64); |
|
868 |
tcg_out_r(s, arg); |
|
869 |
tcg_out_r(s, arg1); |
|
870 |
tcg_out32(s, arg2); |
|
871 |
#else
|
|
872 |
TODO(); |
|
873 |
#endif
|
|
874 |
}
|
|
875 |
old_code_ptr[1] = s->code_ptr - old_code_ptr; |
|
876 |
}
|
|
877 |
||
878 |
/* Test if a constant matches the constraint. */
|
|
879 |
static int tcg_target_const_match(tcg_target_long val, |
|
880 |
const TCGArgConstraint *arg_ct) |
|
881 |
{
|
|
882 |
/* No need to return 0 or 1, 0 or != 0 is good enough. */
|
|
883 |
return arg_ct->ct & TCG_CT_CONST; |
|
884 |
}
|
|
885 |
||
886 |
static void tcg_target_init(TCGContext *s) |
|
887 |
{
|
|
888 |
#if defined(CONFIG_DEBUG_TCG_INTERPRETER)
|
|
889 |
const char *envval = getenv("DEBUG_TCG"); |
|
890 |
if (envval) { |
|
1.5.2
by Michael Tokarev
Import upstream version 1.3.0+dfsg |
891 |
cpu_set_log(strtol(envval, NULL, 0)); |
1.4.5
by Vagrant Cascadian
Import upstream version 1.0~rc4+dfsg |
892 |
}
|
893 |
#endif
|
|
894 |
||
895 |
/* The current code uses uint8_t for tcg operations. */
|
|
896 |
assert(ARRAY_SIZE(tcg_op_defs) <= UINT8_MAX); |
|
897 |
||
898 |
/* Registers available for 32 bit operations. */
|
|
899 |
tcg_regset_set32(tcg_target_available_regs[TCG_TYPE_I32], 0, |
|
900 |
BIT(TCG_TARGET_NB_REGS) - 1); |
|
901 |
/* Registers available for 64 bit operations. */
|
|
902 |
tcg_regset_set32(tcg_target_available_regs[TCG_TYPE_I64], 0, |
|
903 |
BIT(TCG_TARGET_NB_REGS) - 1); |
|
904 |
/* TODO: Which registers should be set here? */
|
|
905 |
tcg_regset_set32(tcg_target_call_clobber_regs, 0, |
|
906 |
BIT(TCG_TARGET_NB_REGS) - 1); |
|
907 |
tcg_regset_clear(s->reserved_regs); |
|
908 |
tcg_regset_set_reg(s->reserved_regs, TCG_REG_CALL_STACK); |
|
909 |
tcg_add_target_add_op_defs(tcg_target_op_defs); |
|
1.5.1
by Vagrant Cascadian
Import upstream version 1.1.0~rc3+dfsg |
910 |
tcg_set_frame(s, TCG_AREG0, offsetof(CPUArchState, temp_buf), |
1.4.5
by Vagrant Cascadian
Import upstream version 1.0~rc4+dfsg |
911 |
CPU_TEMP_BUF_NLONGS * sizeof(long)); |
912 |
}
|
|
913 |
||
914 |
/* Generate global QEMU prologue and epilogue code. */
|
|
915 |
static void tcg_target_qemu_prologue(TCGContext *s) |
|
916 |
{
|
|
917 |
tb_ret_addr = s->code_ptr; |
|
918 |
}
|