1
/* This file is generated by the genmloop script. DO NOT EDIT! */
3
/* Enable switch() support in cgen headers. */
13
#include "sim-assert.h"
15
/* Fill in the administrative ARGBUF fields required by all insns,
19
sh64_compact_fill_argbuf (const SIM_CPU *cpu, ARGBUF *abuf, const IDESC *idesc,
20
PCADDR pc, int fast_p)
23
SEM_SET_CODE (abuf, idesc, fast_p);
24
ARGBUF_ADDR (abuf) = pc;
26
ARGBUF_IDESC (abuf) = idesc;
29
/* Fill in tracing/profiling fields of an ARGBUF. */
32
sh64_compact_fill_argbuf_tp (const SIM_CPU *cpu, ARGBUF *abuf,
33
int trace_p, int profile_p)
35
ARGBUF_TRACE_P (abuf) = trace_p;
36
ARGBUF_PROFILE_P (abuf) = profile_p;
41
/* Emit the "x-before" handler.
42
x-before is emitted before each insn (serial or parallel).
43
This is as opposed to x-after which is only emitted at the end of a group
47
sh64_compact_emit_before (SIM_CPU *current_cpu, SCACHE *sc, PCADDR pc, int first_p)
49
ARGBUF *abuf = &sc[0].argbuf;
50
const IDESC *id = & CPU_IDESC (current_cpu) [SH64_COMPACT_INSN_X_BEFORE];
52
abuf->fields.before.first_p = first_p;
53
sh64_compact_fill_argbuf (current_cpu, abuf, id, pc, 0);
54
/* no need to set trace_p,profile_p */
57
/* Emit the "x-after" handler.
58
x-after is emitted after a serial insn or at the end of a group of
62
sh64_compact_emit_after (SIM_CPU *current_cpu, SCACHE *sc, PCADDR pc)
64
ARGBUF *abuf = &sc[0].argbuf;
65
const IDESC *id = & CPU_IDESC (current_cpu) [SH64_COMPACT_INSN_X_AFTER];
67
sh64_compact_fill_argbuf (current_cpu, abuf, id, pc, 0);
68
/* no need to set trace_p,profile_p */
71
#endif /* WITH_SCACHE_PBB */
74
static INLINE const IDESC *
75
extract (SIM_CPU *current_cpu, PCADDR pc, CGEN_INSN_INT insn, ARGBUF *abuf,
78
const IDESC *id = sh64_compact_decode (current_cpu, pc, insn, insn, abuf);
80
sh64_compact_fill_argbuf (current_cpu, abuf, id, pc, fast_p);
83
int trace_p = PC_IN_TRACE_RANGE_P (current_cpu, pc);
84
int profile_p = PC_IN_PROFILE_RANGE_P (current_cpu, pc);
85
sh64_compact_fill_argbuf_tp (current_cpu, abuf, trace_p, profile_p);
91
execute (SIM_CPU *current_cpu, SCACHE *sc, int fast_p)
97
#if ! WITH_SEM_SWITCH_FAST
99
vpc = (*sc->argbuf.semantic.sem_fast) (current_cpu, sc);
101
vpc = (*sc->argbuf.semantic.sem_fast) (current_cpu, &sc->argbuf);
105
#endif /* WITH_SEM_SWITCH_FAST */
109
#if ! WITH_SEM_SWITCH_FULL
110
ARGBUF *abuf = &sc->argbuf;
111
const IDESC *idesc = abuf->idesc;
113
int virtual_p = CGEN_ATTR_VALUE (NULL, idesc->attrs, CGEN_INSN_VIRTUAL);
120
/* FIXME: call x-before */
121
if (ARGBUF_PROFILE_P (abuf))
122
PROFILE_COUNT_INSN (current_cpu, abuf->addr, idesc->num);
123
/* FIXME: Later make cover macros: PROFILE_INSN_{INIT,FINI}. */
124
if (PROFILE_MODEL_P (current_cpu)
125
&& ARGBUF_PROFILE_P (abuf))
126
sh64_compact_model_insn_before (current_cpu, 1 /*first_p*/);
127
TRACE_INSN_INIT (current_cpu, abuf, 1);
128
TRACE_INSN (current_cpu, idesc->idata,
129
(const struct argbuf *) abuf, abuf->addr);
132
vpc = (*sc->argbuf.semantic.sem_full) (current_cpu, sc);
134
vpc = (*sc->argbuf.semantic.sem_full) (current_cpu, abuf);
138
/* FIXME: call x-after */
139
if (PROFILE_MODEL_P (current_cpu)
140
&& ARGBUF_PROFILE_P (abuf))
144
cycles = (*idesc->timing->model_fn) (current_cpu, sc);
145
sh64_compact_model_insn_after (current_cpu, 1 /*last_p*/, cycles);
147
TRACE_INSN_FINI (current_cpu, abuf, 1);
151
#endif /* WITH_SEM_SWITCH_FULL */
158
/* Record address of cti terminating a pbb. */
159
#define SET_CTI_VPC(sc) do { _cti_sc = (sc); } while (0)
160
/* Record number of [real] insns in pbb. */
161
#define SET_INSN_COUNT(n) do { _insn_count = (n); } while (0)
163
/* Fetch and extract a pseudo-basic-block.
164
FAST_P is non-zero if no tracing/profiling/etc. is wanted. */
167
sh64_compact_pbb_begin (SIM_CPU *current_cpu, int FAST_P)
172
int max_insns = CPU_SCACHE_MAX_CHAIN_LENGTH (current_cpu);
176
new_vpc = scache_lookup_or_alloc (current_cpu, pc, max_insns, &sc);
179
/* Leading '_' to avoid collision with mainloop.in. */
181
SCACHE *orig_sc = sc;
182
SCACHE *_cti_sc = NULL;
183
int slice_insns = CPU_MAX_SLICE_INSNS (current_cpu);
185
/* First figure out how many instructions to compile.
186
MAX_INSNS is the size of the allocated buffer, which includes space
187
for before/after handlers if they're being used.
188
SLICE_INSNS is the maxinum number of real insns that can be
189
executed. Zero means "as many as we want". */
190
/* ??? max_insns is serving two incompatible roles.
191
1) Number of slots available in scache buffer.
192
2) Number of real insns to execute.
193
They're incompatible because there are virtual insns emitted too
194
(chain,cti-chain,before,after handlers). */
196
if (slice_insns == 1)
198
/* No need to worry about extra slots required for virtual insns
199
and parallel exec support because MAX_CHAIN_LENGTH is
200
guaranteed to be big enough to execute at least 1 insn! */
205
/* Allow enough slop so that while compiling insns, if max_insns > 0
206
then there's guaranteed to be enough space to emit one real insn.
207
MAX_CHAIN_LENGTH is typically much longer than
208
the normal number of insns between cti's anyway. */
209
max_insns -= (1 /* one for the trailing chain insn */
212
: (1 + MAX_PARALLEL_INSNS) /* before+after */)
213
+ (MAX_PARALLEL_INSNS > 1
214
? (MAX_PARALLEL_INSNS * 2)
217
/* Account for before/after handlers. */
222
&& slice_insns < max_insns)
223
max_insns = slice_insns;
228
/* SC,PC must be updated to point passed the last entry used.
229
SET_CTI_VPC must be called if pbb is terminated by a cti.
230
SET_INSN_COUNT must be called to record number of real insns in
231
pbb [could be computed by us of course, extra cpu but perhaps
232
negligible enough]. */
234
/* begin extract-pbb */
239
while (max_insns > 0)
241
UHI insn = GETIMEMUHI (current_cpu, pc);
243
idesc = extract (current_cpu, pc, insn, &sc->argbuf, FAST_P);
244
SEM_SKIP_COMPILE (current_cpu, sc, 1);
250
if (IDESC_CTI_P (idesc))
252
SET_CTI_VPC (sc - 1);
254
if (CGEN_ATTR_VALUE (NULL, idesc->attrs, CGEN_INSN_DELAY_SLOT))
256
USI insn = GETIMEMUHI (current_cpu, pc);
257
idesc = extract (current_cpu, pc, insn, &sc->argbuf, FAST_P);
259
if (IDESC_CTI_P (idesc) ||
260
CGEN_ATTR_VALUE (NULL, idesc->attrs, CGEN_INSN_ILLSLOT))
262
SIM_DESC sd = CPU_STATE (current_cpu);
263
sim_io_eprintf (CPU_STATE (current_cpu),
264
"malformed program, `%s' insn in delay slot\n",
265
CGEN_INSN_NAME (idesc->idata));
266
sim_engine_halt (sd, current_cpu, NULL, pc,
267
sim_stopped, SIM_SIGILL);
282
SET_INSN_COUNT (icount);
284
/* end extract-pbb */
286
/* The last one is a pseudo-insn to link to the next chain.
287
It is also used to record the insn count for this chain. */
291
/* Was pbb terminated by a cti? */
294
id = & CPU_IDESC (current_cpu) [SH64_COMPACT_INSN_X_CTI_CHAIN];
298
id = & CPU_IDESC (current_cpu) [SH64_COMPACT_INSN_X_CHAIN];
300
SEM_SET_CODE (&sc->argbuf, id, FAST_P);
301
sc->argbuf.idesc = id;
302
sc->argbuf.addr = pc;
303
sc->argbuf.fields.chain.insn_count = _insn_count;
304
sc->argbuf.fields.chain.next = 0;
305
sc->argbuf.fields.chain.branch_target = 0;
309
/* Update the pointer to the next free entry, may not have used as
310
many entries as was asked for. */
311
CPU_SCACHE_NEXT_FREE (current_cpu) = sc;
312
/* Record length of chain if profiling.
313
This includes virtual insns since they count against
316
PROFILE_COUNT_SCACHE_CHAIN_LENGTH (current_cpu, sc - orig_sc);
322
/* Chain to the next block from a non-cti terminated previous block. */
325
sh64_compact_pbb_chain (SIM_CPU *current_cpu, SEM_ARG sem_arg)
327
ARGBUF *abuf = SEM_ARGBUF (sem_arg);
329
PBB_UPDATE_INSN_COUNT (current_cpu, sem_arg);
331
SET_H_PC (abuf->addr);
334
/* If not running forever, exit back to main loop. */
335
if (CPU_MAX_SLICE_INSNS (current_cpu) != 0
336
/* Also exit back to main loop if there's an event.
337
Note that if CPU_MAX_SLICE_INSNS != 1, events won't get processed
338
at the "right" time, but then that was what was asked for.
339
There is no silver bullet for simulator engines.
340
??? Clearly this needs a cleaner interface.
341
At present it's just so Ctrl-C works. */
342
|| STATE_EVENTS (CPU_STATE (current_cpu))->work_pending)
343
CPU_RUNNING_P (current_cpu) = 0;
345
/* If chained to next block, go straight to it. */
346
if (abuf->fields.chain.next)
347
return abuf->fields.chain.next;
348
/* See if next block has already been compiled. */
349
abuf->fields.chain.next = scache_lookup (current_cpu, abuf->addr);
350
if (abuf->fields.chain.next)
351
return abuf->fields.chain.next;
352
/* Nope, so next insn is a virtual insn to invoke the compiler
354
return CPU_SCACHE_PBB_BEGIN (current_cpu);
357
/* Chain to the next block from a cti terminated previous block.
358
BR_TYPE indicates whether the branch was taken and whether we can cache
359
the vpc of the branch target.
360
NEW_PC is the target's branch address, and is only valid if
361
BR_TYPE != SEM_BRANCH_UNTAKEN. */
364
sh64_compact_pbb_cti_chain (SIM_CPU *current_cpu, SEM_ARG sem_arg,
365
SEM_BRANCH_TYPE br_type, PCADDR new_pc)
369
PBB_UPDATE_INSN_COUNT (current_cpu, sem_arg);
371
/* If we have switched ISAs, exit back to main loop.
372
Set idesc to 0 to cause the engine to point to the right insn table. */
375
/* Switch to SHmedia. */
376
CPU_IDESC_SEM_INIT_P (current_cpu) = 0;
377
CPU_RUNNING_P (current_cpu) = 0;
380
/* If not running forever, exit back to main loop. */
381
if (CPU_MAX_SLICE_INSNS (current_cpu) != 0
382
/* Also exit back to main loop if there's an event.
383
Note that if CPU_MAX_SLICE_INSNS != 1, events won't get processed
384
at the "right" time, but then that was what was asked for.
385
There is no silver bullet for simulator engines.
386
??? Clearly this needs a cleaner interface.
387
At present it's just so Ctrl-C works. */
388
|| STATE_EVENTS (CPU_STATE (current_cpu))->work_pending)
389
CPU_RUNNING_P (current_cpu) = 0;
391
/* Restart compiler if we branched to an uncacheable address
393
if (br_type == SEM_BRANCH_UNCACHEABLE)
396
return CPU_SCACHE_PBB_BEGIN (current_cpu);
399
/* If branch wasn't taken, update the pc and set BR_ADDR_PTR to our
401
if (br_type == SEM_BRANCH_UNTAKEN)
403
ARGBUF *abuf = SEM_ARGBUF (sem_arg);
406
new_vpc_ptr = &abuf->fields.chain.next;
410
ARGBUF *abuf = SEM_ARGBUF (sem_arg);
412
new_vpc_ptr = &abuf->fields.chain.branch_target;
415
/* If chained to next block, go straight to it. */
418
/* See if next block has already been compiled. */
419
*new_vpc_ptr = scache_lookup (current_cpu, new_pc);
422
/* Nope, so next insn is a virtual insn to invoke the compiler
424
return CPU_SCACHE_PBB_BEGIN (current_cpu);
428
This is called before each insn. */
431
sh64_compact_pbb_before (SIM_CPU *current_cpu, SCACHE *sc)
433
SEM_ARG sem_arg = sc;
434
const ARGBUF *abuf = SEM_ARGBUF (sem_arg);
435
int first_p = abuf->fields.before.first_p;
436
const ARGBUF *cur_abuf = SEM_ARGBUF (sc + 1);
437
const IDESC *cur_idesc = cur_abuf->idesc;
438
PCADDR pc = cur_abuf->addr;
440
if (ARGBUF_PROFILE_P (cur_abuf))
441
PROFILE_COUNT_INSN (current_cpu, pc, cur_idesc->num);
443
/* If this isn't the first insn, finish up the previous one. */
447
if (PROFILE_MODEL_P (current_cpu))
449
const SEM_ARG prev_sem_arg = sc - 1;
450
const ARGBUF *prev_abuf = SEM_ARGBUF (prev_sem_arg);
451
const IDESC *prev_idesc = prev_abuf->idesc;
454
/* ??? May want to measure all insns if doing insn tracing. */
455
if (ARGBUF_PROFILE_P (prev_abuf))
457
cycles = (*prev_idesc->timing->model_fn) (current_cpu, prev_sem_arg);
458
sh64_compact_model_insn_after (current_cpu, 0 /*last_p*/, cycles);
462
TRACE_INSN_FINI (current_cpu, cur_abuf, 0 /*last_p*/);
465
/* FIXME: Later make cover macros: PROFILE_INSN_{INIT,FINI}. */
466
if (PROFILE_MODEL_P (current_cpu)
467
&& ARGBUF_PROFILE_P (cur_abuf))
468
sh64_compact_model_insn_before (current_cpu, first_p);
470
TRACE_INSN_INIT (current_cpu, cur_abuf, first_p);
471
TRACE_INSN (current_cpu, cur_idesc->idata, cur_abuf, pc);
475
This is called after a serial insn or at the end of a group of parallel
479
sh64_compact_pbb_after (SIM_CPU *current_cpu, SCACHE *sc)
481
SEM_ARG sem_arg = sc;
482
const ARGBUF *abuf = SEM_ARGBUF (sem_arg);
483
const SEM_ARG prev_sem_arg = sc - 1;
484
const ARGBUF *prev_abuf = SEM_ARGBUF (prev_sem_arg);
486
/* ??? May want to measure all insns if doing insn tracing. */
487
if (PROFILE_MODEL_P (current_cpu)
488
&& ARGBUF_PROFILE_P (prev_abuf))
490
const IDESC *prev_idesc = prev_abuf->idesc;
493
cycles = (*prev_idesc->timing->model_fn) (current_cpu, prev_sem_arg);
494
sh64_compact_model_insn_after (current_cpu, 1 /*last_p*/, cycles);
496
TRACE_INSN_FINI (current_cpu, prev_abuf, 1 /*last_p*/);
502
sh64_compact_engine_run_full (SIM_CPU *current_cpu)
504
SIM_DESC current_state = CPU_STATE (current_cpu);
505
SCACHE *scache = CPU_SCACHE_CACHE (current_cpu);
506
/* virtual program counter */
508
#if WITH_SEM_SWITCH_FULL
509
/* For communication between cti's and cti-chain. */
510
SEM_BRANCH_TYPE pbb_br_type;
515
if (! CPU_IDESC_SEM_INIT_P (current_cpu))
517
/* ??? 'twould be nice to move this up a level and only call it once.
518
On the other hand, in the "let's go fast" case the test is only done
519
once per pbb (since we only return to the main loop at the end of
520
a pbb). And in the "let's run until we're done" case we don't return
521
until the program exits. */
523
#if WITH_SEM_SWITCH_FULL
524
#if defined (__GNUC__)
525
/* ??? Later maybe paste sem-switch.c in when building mainloop.c. */
526
#define DEFINE_LABELS
527
#include "sem-compact-switch.c"
530
sh64_compact_sem_init_idesc_table (current_cpu);
533
/* Initialize the "begin (compile) a pbb" virtual insn. */
534
vpc = CPU_SCACHE_PBB_BEGIN (current_cpu);
535
SEM_SET_FULL_CODE (SEM_ARGBUF (vpc),
536
& CPU_IDESC (current_cpu) [SH64_COMPACT_INSN_X_BEGIN]);
537
vpc->argbuf.idesc = & CPU_IDESC (current_cpu) [SH64_COMPACT_INSN_X_BEGIN];
539
CPU_IDESC_SEM_INIT_P (current_cpu) = 1;
542
CPU_RUNNING_P (current_cpu) = 1;
543
/* ??? In the case where we're returning to the main loop after every
544
pbb we don't want to call pbb_begin each time (which hashes on the pc
545
and does a table lookup). A way to speed this up is to save vpc
547
vpc = sh64_compact_pbb_begin (current_cpu, FAST_P);
551
/* begin full-exec-pbb */
553
#if (! FAST_P && WITH_SEM_SWITCH_FULL) || (FAST_P && WITH_SEM_SWITCH_FAST)
554
#define DEFINE_SWITCH
555
#include "sem-compact-switch.c"
557
vpc = execute (current_cpu, vpc, FAST_P);
560
/* end full-exec-pbb */
562
while (CPU_RUNNING_P (current_cpu));
571
sh64_compact_engine_run_fast (SIM_CPU *current_cpu)
573
SIM_DESC current_state = CPU_STATE (current_cpu);
574
SCACHE *scache = CPU_SCACHE_CACHE (current_cpu);
575
/* virtual program counter */
577
#if WITH_SEM_SWITCH_FAST
578
/* For communication between cti's and cti-chain. */
579
SEM_BRANCH_TYPE pbb_br_type;
584
if (! CPU_IDESC_SEM_INIT_P (current_cpu))
586
/* ??? 'twould be nice to move this up a level and only call it once.
587
On the other hand, in the "let's go fast" case the test is only done
588
once per pbb (since we only return to the main loop at the end of
589
a pbb). And in the "let's run until we're done" case we don't return
590
until the program exits. */
592
#if WITH_SEM_SWITCH_FAST
593
#if defined (__GNUC__)
594
/* ??? Later maybe paste sem-switch.c in when building mainloop.c. */
595
#define DEFINE_LABELS
596
#include "sem-compact-switch.c"
599
sh64_compact_semf_init_idesc_table (current_cpu);
602
/* Initialize the "begin (compile) a pbb" virtual insn. */
603
vpc = CPU_SCACHE_PBB_BEGIN (current_cpu);
604
SEM_SET_FAST_CODE (SEM_ARGBUF (vpc),
605
& CPU_IDESC (current_cpu) [SH64_COMPACT_INSN_X_BEGIN]);
606
vpc->argbuf.idesc = & CPU_IDESC (current_cpu) [SH64_COMPACT_INSN_X_BEGIN];
608
CPU_IDESC_SEM_INIT_P (current_cpu) = 1;
611
CPU_RUNNING_P (current_cpu) = 1;
612
/* ??? In the case where we're returning to the main loop after every
613
pbb we don't want to call pbb_begin each time (which hashes on the pc
614
and does a table lookup). A way to speed this up is to save vpc
616
vpc = sh64_compact_pbb_begin (current_cpu, FAST_P);
620
/* begin fast-exec-pbb */
622
#if (! FAST_P && WITH_SEM_SWITCH_FULL) || (FAST_P && WITH_SEM_SWITCH_FAST)
623
#define DEFINE_SWITCH
624
#include "sem-compact-switch.c"
626
vpc = execute (current_cpu, vpc, FAST_P);
629
/* end fast-exec-pbb */
631
while (CPU_RUNNING_P (current_cpu));