1
/* libunwind - a platform-independent unwind library
2
Copyright (C) 2001-2004 Hewlett-Packard Co
3
Contributed by David Mosberger-Tang <davidm@hpl.hp.com>
5
This file is part of libunwind.
7
Permission is hereby granted, free of charge, to any person obtaining
8
a copy of this software and associated documentation files (the
9
"Software"), to deal in the Software without restriction, including
10
without limitation the rights to use, copy, modify, merge, publish,
11
distribute, sublicense, and/or sell copies of the Software, and to
12
permit persons to whom the Software is furnished to do so, subject to
13
the following conditions:
15
The above copyright notice and this permission notice shall be
16
included in all copies or substantial portions of the Software.
18
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
19
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
21
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
22
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
23
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
24
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */
30
enum ia64_script_insn_opcode
32
IA64_INSN_INC_PSP, /* psp += val */
33
IA64_INSN_LOAD_PSP, /* psp = *psp_loc */
34
IA64_INSN_ADD_PSP, /* s[dst] = (s.psp + val) */
35
IA64_INSN_ADD_PSP_NAT, /* like above, but with NaT info */
36
IA64_INSN_ADD_SP, /* s[dst] = (s.sp + val) */
37
IA64_INSN_ADD_SP_NAT, /* like above, but with NaT info */
38
IA64_INSN_MOVE, /* s[dst] = s[val] */
39
IA64_INSN_MOVE_NAT, /* like above, but with NaT info */
40
IA64_INSN_MOVE_NO_NAT, /* like above, but clear NaT info */
41
IA64_INSN_MOVE_STACKED, /* s[dst] = ia64_rse_skip(*s.bsp_loc, val) */
42
IA64_INSN_MOVE_STACKED_NAT, /* like above, but with NaT info */
43
IA64_INSN_MOVE_SCRATCH, /* s[dst] = scratch reg "val" */
44
IA64_INSN_MOVE_SCRATCH_NAT, /* like above, but with NaT info */
45
IA64_INSN_MOVE_SCRATCH_NO_NAT /* like above, but clear NaT info */
49
static __thread struct ia64_script_cache ia64_per_thread_cache =
51
#ifdef HAVE_ATOMIC_OPS_H
52
.busy = AO_TS_INITIALIZER
54
.lock = PTHREAD_MUTEX_INITIALIZER
59
static inline unw_hash_index_t
62
/* based on (sqrt(5)/2-1)*2^64 */
63
# define magic ((unw_word_t) 0x9e3779b97f4a7c16ULL)
65
return (ip >> 4) * magic >> (64 - IA64_LOG_UNW_HASH_SIZE);
69
cache_match (struct ia64_script *script, unw_word_t ip, unw_word_t pr)
71
if (ip == script->ip && ((pr ^ script->pr_val) & script->pr_mask) == 0)
77
flush_script_cache (struct ia64_script_cache *cache)
81
cache->lru_head = IA64_UNW_CACHE_SIZE - 1;
84
for (i = 0; i < IA64_UNW_CACHE_SIZE; ++i)
87
cache->buckets[i].lru_chain = (i - 1);
88
cache->buckets[i].coll_chain = -1;
89
cache->buckets[i].ip = 0;
91
for (i = 0; i<IA64_UNW_HASH_SIZE; ++i)
95
static inline struct ia64_script_cache *
96
get_script_cache (unw_addr_space_t as, sigset_t *saved_sigmaskp)
98
struct ia64_script_cache *cache = &as->global_cache;
99
unw_caching_policy_t caching = as->caching_policy;
101
if (caching == UNW_CACHE_NONE)
105
if (as->caching_policy == UNW_CACHE_PER_THREAD)
106
cache = &ia64_per_thread_cache;
109
#ifdef HAVE_ATOMIC_OPS_H
110
if (AO_test_and_set (&cache->busy) == AO_TS_SET)
113
sigprocmask (SIG_SETMASK, &unwi_full_sigmask, saved_sigmaskp);
114
if (likely (caching == UNW_CACHE_GLOBAL))
116
Debug (16, "%s: acquiring lock\n");
117
mutex_lock (&cache->lock);
121
if (as->cache_generation != cache->generation)
123
flush_script_cache (cache);
124
cache->generation = as->cache_generation;
130
put_script_cache (unw_addr_space_t as, struct ia64_script_cache *cache,
131
sigset_t *saved_sigmaskp)
133
assert (as->caching_policy != UNW_CACHE_NONE);
135
Debug (16, "unmasking signals/releasing lock\n");
136
#ifdef HAVE_ATOMIC_OPS_H
137
AO_CLEAR (&cache->busy);
139
if (likely (as->caching_policy == UNW_CACHE_GLOBAL))
140
mutex_unlock (&cache->lock);
141
sigprocmask (SIG_SETMASK, saved_sigmaskp, NULL);
145
static struct ia64_script *
146
script_lookup (struct ia64_script_cache *cache, struct cursor *c)
148
struct ia64_script *script = cache->buckets + c->hint;
149
unsigned short index;
155
if (cache_match (script, ip, pr))
158
index = cache->hash[hash (ip)];
159
if (index >= IA64_UNW_CACHE_SIZE)
162
script = cache->buckets + index;
165
if (cache_match (script, ip, pr))
167
/* update hint; no locking needed: single-word writes are atomic */
168
c->hint = cache->buckets[c->prev_script].hint =
169
(script - cache->buckets);
172
if (script->coll_chain >= IA64_UNW_HASH_SIZE)
174
script = cache->buckets + script->coll_chain;
179
ia64_get_cached_proc_info (struct cursor *c)
181
struct ia64_script_cache *cache;
182
struct ia64_script *script;
183
sigset_t saved_sigmask;
185
cache = get_script_cache (c->as, &saved_sigmask);
187
return -UNW_ENOINFO; /* cache is busy */
189
script = script_lookup (cache, c);
193
put_script_cache (c->as, cache, &saved_sigmask);
194
return script ? 0 : -UNW_ENOINFO;
198
script_init (struct ia64_script *script, unw_word_t ip)
203
script->abi_marker = 0;
206
static inline struct ia64_script *
207
script_new (struct ia64_script_cache *cache, unw_word_t ip)
209
struct ia64_script *script, *prev, *tmp;
210
unw_hash_index_t index;
213
head = cache->lru_head;
214
script = cache->buckets + head;
215
cache->lru_head = script->lru_chain;
217
/* re-insert script at the tail of the LRU chain: */
218
cache->buckets[cache->lru_tail].lru_chain = head;
219
cache->lru_tail = head;
221
/* remove the old script from the hash table (if it's there): */
224
index = hash (script->ip);
225
tmp = cache->buckets + cache->hash[index];
232
prev->coll_chain = tmp->coll_chain;
234
cache->hash[index] = tmp->coll_chain;
239
if (tmp->coll_chain >= IA64_UNW_CACHE_SIZE)
240
/* old script wasn't in the hash-table */
242
tmp = cache->buckets + tmp->coll_chain;
246
/* enter new script in the hash table */
248
script->coll_chain = cache->hash[index];
249
cache->hash[index] = script - cache->buckets;
251
script_init (script, ip);
256
script_finalize (struct ia64_script *script, struct cursor *c,
257
struct ia64_state_record *sr)
259
script->pr_mask = sr->pr_mask;
260
script->pr_val = sr->pr_val;
265
script_emit (struct ia64_script *script, struct ia64_script_insn insn)
267
if (script->count >= IA64_MAX_SCRIPT_LEN)
269
dprintf ("%s: script exceeds maximum size of %u instructions!\n",
270
__FUNCTION__, IA64_MAX_SCRIPT_LEN);
273
script->insn[script->count++] = insn;
277
compile_reg (struct ia64_state_record *sr, int i, struct ia64_reg_info *r,
278
struct ia64_script *script)
280
enum ia64_script_insn_opcode opc;
281
unsigned long val, rval;
282
struct ia64_script_insn insn;
283
long is_preserved_gr;
285
if (r->where == IA64_WHERE_NONE || r->when >= sr->when_target)
288
opc = IA64_INSN_MOVE;
290
is_preserved_gr = (i >= IA64_REG_R4 && i <= IA64_REG_R7);
292
if (r->where == IA64_WHERE_GR)
294
/* Handle most common case first... */
297
/* register got spilled to a stacked register */
299
opc = IA64_INSN_MOVE_STACKED_NAT;
301
opc = IA64_INSN_MOVE_STACKED;
304
else if (rval >= 4 && rval <= 7)
306
/* register got spilled to a preserved register */
307
val = IA64_REG_R4 + (rval - 4);
309
opc = IA64_INSN_MOVE_NAT;
313
/* register got spilled to a scratch register */
315
opc = IA64_INSN_MOVE_SCRATCH_NAT;
317
opc = IA64_INSN_MOVE_SCRATCH;
318
val = UNW_IA64_GR + rval;
326
/* Note: There is no need to handle NaT-bit info here
327
(indepent of is_preserved_gr), because for floating-point
328
NaTs are represented as NaTVal, so the NaT-info never
329
needs to be consulated. */
330
if (rval >= 2 && rval <= 5)
331
val = IA64_REG_F2 + (rval - 2);
332
else if (rval >= 16 && rval <= 31)
333
val = IA64_REG_F16 + (rval - 16);
336
opc = IA64_INSN_MOVE_SCRATCH;
337
val = UNW_IA64_FR + rval;
342
if (rval >= 1 && rval <= 5)
344
val = IA64_REG_B1 + (rval - 1);
346
opc = IA64_INSN_MOVE_NO_NAT;
350
opc = IA64_INSN_MOVE_SCRATCH;
352
opc = IA64_INSN_MOVE_SCRATCH_NO_NAT;
353
val = UNW_IA64_BR + rval;
357
case IA64_WHERE_SPREL:
359
opc = IA64_INSN_ADD_SP_NAT;
362
opc = IA64_INSN_ADD_SP;
363
if (i >= IA64_REG_F2 && i <= IA64_REG_F31)
364
val |= IA64_LOC_TYPE_FP;
368
case IA64_WHERE_PSPREL:
370
opc = IA64_INSN_ADD_PSP_NAT;
373
opc = IA64_INSN_ADD_PSP;
374
if (i >= IA64_REG_F2 && i <= IA64_REG_F31)
375
val |= IA64_LOC_TYPE_FP;
380
dprintf ("%s: register %u has unexpected `where' value of %u\n",
381
__FUNCTION__, i, r->where);
388
script_emit (script, insn);
390
if (i == IA64_REG_PSP)
392
/* c->psp must contain the _value_ of the previous sp, not it's
393
save-location. We get this by dereferencing the value we
394
just stored in loc[IA64_REG_PSP]: */
395
insn.opc = IA64_INSN_LOAD_PSP;
396
script_emit (script, insn);
400
/* Sort the registers which got saved in decreasing order of WHEN
401
value. This is needed to ensure that the save-locations are
402
updated in the proper order. For example, suppose r4 gets spilled
403
to memory and then r5 gets saved in r4. In this case, we need to
404
update the save location of r5 before the one of r4. */
407
sort_regs (struct ia64_state_record *sr, int regorder[])
409
int r, i, j, max, max_reg, max_when, num_regs = 0;
411
assert (IA64_REG_BSP == 3);
413
for (r = IA64_REG_BSP; r < IA64_NUM_PREGS; ++r)
415
if (sr->curr.reg[r].where == IA64_WHERE_NONE
416
|| sr->curr.reg[r].when >= sr->when_target)
419
regorder[num_regs++] = r;
422
/* Simple insertion-sort. Involves about N^2/2 comparisons and N
423
exchanges. N is often small (say, 2-5) so a fancier sorting
424
algorithm may not be worthwhile. */
426
for (i = max = 0; i < num_regs - 1; ++i)
428
max_reg = regorder[max];
429
max_when = sr->curr.reg[max_reg].when;
431
for (j = i + 1; j < num_regs; ++j)
432
if (sr->curr.reg[regorder[j]].when > max_when)
435
max_reg = regorder[j];
436
max_when = sr->curr.reg[max_reg].when;
440
regorder[max] = regorder[i];
441
regorder[i] = max_reg;
447
/* Build an unwind script that unwinds from state OLD_STATE to the
448
entrypoint of the function that called OLD_STATE. */
451
build_script (struct cursor *c, struct ia64_script *script)
453
int num_regs, i, ret, regorder[IA64_NUM_PREGS - 3];
454
struct ia64_reg_info *pri_unat;
455
struct ia64_state_record sr;
456
struct ia64_script_insn insn;
458
ret = ia64_create_state_record (c, &sr);
462
/* First, compile the update for IA64_REG_PSP. This is important
463
because later save-locations may depend on it's correct (updated)
464
value. Fixed-size frames are handled specially and variable-size
465
frames get handled via the normal compile_reg(). */
467
if (sr.when_target > sr.curr.reg[IA64_REG_PSP].when
468
&& (sr.curr.reg[IA64_REG_PSP].where == IA64_WHERE_NONE)
469
&& sr.curr.reg[IA64_REG_PSP].val != 0)
471
/* new psp is psp plus frame size */
472
insn.opc = IA64_INSN_INC_PSP;
473
insn.val = sr.curr.reg[IA64_REG_PSP].val; /* frame size */
474
script_emit (script, insn);
477
compile_reg (&sr, IA64_REG_PSP, sr.curr.reg + IA64_REG_PSP, script);
479
/* Second, compile the update for the primary UNaT, if any: */
481
if (sr.when_target >= sr.curr.reg[IA64_REG_PRI_UNAT_GR].when
482
|| sr.when_target >= sr.curr.reg[IA64_REG_PRI_UNAT_MEM].when)
484
if (sr.when_target < sr.curr.reg[IA64_REG_PRI_UNAT_GR].when)
485
/* (primary) NaT bits were saved to memory only */
486
pri_unat = sr.curr.reg + IA64_REG_PRI_UNAT_MEM;
487
else if (sr.when_target < sr.curr.reg[IA64_REG_PRI_UNAT_MEM].when)
488
/* (primary) NaT bits were saved to a register only */
489
pri_unat = sr.curr.reg + IA64_REG_PRI_UNAT_GR;
490
else if (sr.curr.reg[IA64_REG_PRI_UNAT_MEM].when >
491
sr.curr.reg[IA64_REG_PRI_UNAT_GR].when)
492
/* (primary) NaT bits were last saved to memory */
493
pri_unat = sr.curr.reg + IA64_REG_PRI_UNAT_MEM;
495
/* (primary) NaT bits were last saved to a register */
496
pri_unat = sr.curr.reg + IA64_REG_PRI_UNAT_GR;
498
/* Note: we always store the final primary-UNaT location in UNAT_MEM. */
499
compile_reg (&sr, IA64_REG_PRI_UNAT_MEM, pri_unat, script);
502
/* Third, compile the other register in decreasing order of WHEN values. */
504
num_regs = sort_regs (&sr, regorder);
505
for (i = 0; i < num_regs; ++i)
506
compile_reg (&sr, regorder[i], sr.curr.reg + regorder[i], script);
508
script->abi_marker = sr.abi_marker;
509
script_finalize (script, c, &sr);
511
ia64_free_state_record (&sr);
516
set_nat_info (struct cursor *c, unsigned long dst,
517
ia64_loc_t nat_loc, uint8_t bitnr)
519
assert (dst >= IA64_REG_R4 && dst <= IA64_REG_R7);
521
c->loc[dst - IA64_REG_R4 + IA64_REG_NAT4] = nat_loc;
522
c->nat_bitnr[dst - IA64_REG_R4] = bitnr;
525
/* Apply the unwinding actions represented by OPS and update SR to
526
reflect the state that existed upon entry to the function that this
527
unwinder represents. */
530
run_script (struct ia64_script *script, struct cursor *c)
532
struct ia64_script_insn *ip, *limit, next_insn;
533
ia64_loc_t loc, nat_loc;
534
unsigned long opc, dst;
541
limit = script->insn + script->count;
543
c->abi_marker = script->abi_marker;
552
/* This is by far the most common operation: */
553
if (likely (opc == IA64_INSN_MOVE_STACKED))
555
if ((ret = ia64_get_stacked (c, val, &loc, NULL)) < 0)
561
case IA64_INSN_INC_PSP:
565
case IA64_INSN_LOAD_PSP:
566
if ((ret = ia64_get (c, c->loc[IA64_REG_PSP], &c->psp)) < 0)
570
case IA64_INSN_ADD_PSP:
571
loc = IA64_LOC_ADDR (c->psp + val, (val & IA64_LOC_TYPE_FP));
574
case IA64_INSN_ADD_SP:
575
loc = IA64_LOC_ADDR (c->sp + val, (val & IA64_LOC_TYPE_FP));
578
case IA64_INSN_MOVE_NO_NAT:
579
set_nat_info (c, dst, IA64_NULL_LOC, 0);
584
case IA64_INSN_MOVE_SCRATCH_NO_NAT:
585
set_nat_info (c, dst, IA64_NULL_LOC, 0);
586
case IA64_INSN_MOVE_SCRATCH:
587
loc = ia64_scratch_loc (c, val, NULL);
590
case IA64_INSN_ADD_PSP_NAT:
591
loc = IA64_LOC_ADDR (c->psp + val, 0);
592
assert (!IA64_IS_REG_LOC (loc));
593
set_nat_info (c, dst,
594
c->loc[IA64_REG_PRI_UNAT_MEM],
595
ia64_unat_slot_num (IA64_GET_ADDR (loc)));
598
case IA64_INSN_ADD_SP_NAT:
599
loc = IA64_LOC_ADDR (c->sp + val, 0);
600
assert (!IA64_IS_REG_LOC (loc));
601
set_nat_info (c, dst,
602
c->loc[IA64_REG_PRI_UNAT_MEM],
603
ia64_unat_slot_num (IA64_GET_ADDR (loc)));
606
case IA64_INSN_MOVE_NAT:
608
set_nat_info (c, dst,
609
c->loc[val - IA64_REG_R4 + IA64_REG_NAT4],
610
c->nat_bitnr[val - IA64_REG_R4]);
613
case IA64_INSN_MOVE_STACKED_NAT:
614
if ((ret = ia64_get_stacked (c, val, &loc, &nat_loc)) < 0)
616
assert (!IA64_IS_REG_LOC (loc));
617
set_nat_info (c, dst,
618
nat_loc, ia64_rse_slot_num (IA64_GET_ADDR (loc)));
621
case IA64_INSN_MOVE_SCRATCH_NAT:
622
loc = ia64_scratch_loc (c, val, NULL);
623
nat_loc = ia64_scratch_loc (c, val + (UNW_IA64_NAT - UNW_IA64_GR),
625
set_nat_info (c, dst, nat_loc, nat_bitnr);
634
uncached_find_save_locs (struct cursor *c)
636
struct ia64_script script;
639
if ((ret = ia64_fetch_proc_info (c, c->ip, 1)) < 0)
642
script_init (&script, c->ip);
643
if ((ret = build_script (c, &script)) < 0)
645
if (ret != -UNW_ESTOPUNWIND)
646
dprintf ("%s: failed to build unwind script for ip %lx\n",
647
__FUNCTION__, (long) c->ip);
650
return run_script (&script, c);
654
ia64_find_save_locs (struct cursor *c)
656
struct ia64_script_cache *cache = NULL;
657
struct ia64_script *script = NULL;
658
sigset_t saved_sigmask;
661
if (c->as->caching_policy == UNW_CACHE_NONE)
662
return uncached_find_save_locs (c);
664
cache = get_script_cache (c->as, &saved_sigmask);
667
Debug (1, "contention on script-cache; doing uncached lookup\n");
668
return uncached_find_save_locs (c);
671
script = script_lookup (cache, c);
672
Debug (8, "ip %lx %s in script cache\n", (long) c->ip,
673
script ? "hit" : "missed");
676
if ((ret = ia64_fetch_proc_info (c, c->ip, 1)) < 0)
679
script = script_new (cache, c->ip);
682
dprintf ("%s: failed to create unwind script\n", __FUNCTION__);
686
cache->buckets[c->prev_script].hint = script - cache->buckets;
688
ret = build_script (c, script);
690
c->hint = script->hint;
691
c->prev_script = script - cache->buckets;
695
if (ret != -UNW_ESTOPUNWIND)
696
dprintf ("%s: failed to locate/build unwind script for ip %lx\n",
697
__FUNCTION__, (long) c->ip);
701
ret = run_script (script, c);
704
put_script_cache (c->as, cache, &saved_sigmask);
709
ia64_validate_cache (unw_addr_space_t as, void *arg)
711
#ifndef UNW_REMOTE_ONLY
712
if (as == unw_local_addr_space && ia64_local_validate_cache (as, arg) == 1)
716
#ifndef UNW_LOCAL_ONLY
717
/* local info is up-to-date, check dynamic info. */
718
unwi_dyn_validate_cache (as, arg);