2 * Copyright (C) 1999-2004 Hewlett-Packard Co
3 * David Mosberger-Tang <davidm@hpl.hp.com>
4 * Copyright (C) 2003 Fenghua Yu <fenghua.yu@intel.com>
5 * - Change pt_regs_off() to make it less dependant on pt_regs structure.
8 * This file implements call frame unwind support for the Linux
9 * kernel. Parsing and processing the unwind information is
10 * time-consuming, so this implementation translates the unwind
11 * descriptors into unwind scripts. These scripts are very simple
12 * (basically a sequence of assignments) and efficient to execute.
13 * They are cached for later re-use. Each script is specific for a
14 * given instruction pointer address and the set of predicate values
15 * that the script depends on (most unwind descriptors are
16 * unconditional and scripts often do not depend on predicates at
17 * all). This code is based on the unwind conventions described in
18 * the "IA-64 Software Conventions and Runtime Architecture" manual.
21 * o updates to the global unwind data (in structure "unw") are serialized
22 * by the unw.lock spinlock
23 * o each unwind script has its own read-write lock; a thread must acquire
24 * a read lock before executing a script and must acquire a write lock
25 * before modifying a script
26 * o if both the unw.lock spinlock and a script's read-write lock must be
27 * acquired, then the read-write lock must be acquired first.
29 #include <linux/module.h>
30 #include <linux/bootmem.h>
31 #include <linux/elf.h>
32 #include <linux/kernel.h>
33 #include <linux/sched.h>
34 #include <linux/slab.h>
36 #include <asm/unwind.h>
38 #include <asm/delay.h>
40 #include <asm/ptrace.h>
41 #include <asm/ptrace_offsets.h>
43 #include <asm/sections.h>
44 #include <asm/system.h>
45 #include <asm/uaccess.h>
50 #define MIN(a,b) ((a) < (b) ? (a) : (b))
53 #define UNW_LOG_CACHE_SIZE 7 /* each unw_script is ~256 bytes in size */
54 #define UNW_CACHE_SIZE (1 << UNW_LOG_CACHE_SIZE)
56 #define UNW_LOG_HASH_SIZE (UNW_LOG_CACHE_SIZE + 1)
57 #define UNW_HASH_SIZE (1 << UNW_LOG_HASH_SIZE)
59 #define UNW_STATS 0 /* WARNING: this disabled interrupts for long time-spans!! */
62 static unsigned int unw_debug_level = UNW_DEBUG;
63 # define UNW_DEBUG_ON(n) unw_debug_level >= n
64 /* Do not code a printk level, not all debug lines end in newline */
65 # define UNW_DPRINT(n, ...) if (UNW_DEBUG_ON(n)) printk(__VA_ARGS__)
67 #else /* !UNW_DEBUG */
68 # define UNW_DEBUG_ON(n) 0
69 # define UNW_DPRINT(n, ...)
70 #endif /* UNW_DEBUG */
78 #define alloc_reg_state() kmalloc(sizeof(struct unw_reg_state), GFP_ATOMIC)
79 #define free_reg_state(usr) kfree(usr)
80 #define alloc_labeled_state() kmalloc(sizeof(struct unw_labeled_state), GFP_ATOMIC)
81 #define free_labeled_state(usr) kfree(usr)
83 typedef unsigned long unw_word;
84 typedef unsigned char unw_hash_index_t;
87 spinlock_t lock; /* spinlock for unwind data */
89 /* list of unwind tables (one per load-module) */
90 struct unw_table *tables;
92 unsigned long r0; /* constant 0 for r0 */
94 /* table of registers that prologues can save (and order in which they're saved): */
95 const unsigned char save_order[8];
97 /* maps a preserved register index (preg_index) to corresponding switch_stack offset: */
98 unsigned short sw_off[sizeof(struct unw_frame_info) / 8];
100 unsigned short lru_head; /* index of lead-recently used script */
101 unsigned short lru_tail; /* index of most-recently used script */
103 /* index into unw_frame_info for preserved register i */
104 unsigned short preg_index[UNW_NUM_REGS];
106 short pt_regs_offsets[32];
108 /* unwind table for the kernel: */
109 struct unw_table kernel_table;
111 /* unwind table describing the gate page (kernel code that is mapped into user space): */
112 size_t gate_table_size;
113 unsigned long *gate_table;
115 /* hash table that maps instruction pointer to script index: */
116 unsigned short hash[UNW_HASH_SIZE];
119 struct unw_script cache[UNW_CACHE_SIZE];
122 const char *preg_name[UNW_NUM_REGS];
130 int collision_chain_traversals;
133 unsigned long build_time;
134 unsigned long run_time;
135 unsigned long parse_time;
142 unsigned long init_time;
143 unsigned long unwind_time;
150 .tables = &unw.kernel_table,
151 .lock = SPIN_LOCK_UNLOCKED,
153 UNW_REG_RP, UNW_REG_PFS, UNW_REG_PSP, UNW_REG_PR,
154 UNW_REG_UNAT, UNW_REG_LC, UNW_REG_FPSR, UNW_REG_PRI_UNAT_GR
157 offsetof(struct unw_frame_info, pri_unat_loc)/8, /* PRI_UNAT_GR */
158 offsetof(struct unw_frame_info, pri_unat_loc)/8, /* PRI_UNAT_MEM */
159 offsetof(struct unw_frame_info, bsp_loc)/8,
160 offsetof(struct unw_frame_info, bspstore_loc)/8,
161 offsetof(struct unw_frame_info, pfs_loc)/8,
162 offsetof(struct unw_frame_info, rnat_loc)/8,
163 offsetof(struct unw_frame_info, psp)/8,
164 offsetof(struct unw_frame_info, rp_loc)/8,
165 offsetof(struct unw_frame_info, r4)/8,
166 offsetof(struct unw_frame_info, r5)/8,
167 offsetof(struct unw_frame_info, r6)/8,
168 offsetof(struct unw_frame_info, r7)/8,
169 offsetof(struct unw_frame_info, unat_loc)/8,
170 offsetof(struct unw_frame_info, pr_loc)/8,
171 offsetof(struct unw_frame_info, lc_loc)/8,
172 offsetof(struct unw_frame_info, fpsr_loc)/8,
173 offsetof(struct unw_frame_info, b1_loc)/8,
174 offsetof(struct unw_frame_info, b2_loc)/8,
175 offsetof(struct unw_frame_info, b3_loc)/8,
176 offsetof(struct unw_frame_info, b4_loc)/8,
177 offsetof(struct unw_frame_info, b5_loc)/8,
178 offsetof(struct unw_frame_info, f2_loc)/8,
179 offsetof(struct unw_frame_info, f3_loc)/8,
180 offsetof(struct unw_frame_info, f4_loc)/8,
181 offsetof(struct unw_frame_info, f5_loc)/8,
182 offsetof(struct unw_frame_info, fr_loc[16 - 16])/8,
183 offsetof(struct unw_frame_info, fr_loc[17 - 16])/8,
184 offsetof(struct unw_frame_info, fr_loc[18 - 16])/8,
185 offsetof(struct unw_frame_info, fr_loc[19 - 16])/8,
186 offsetof(struct unw_frame_info, fr_loc[20 - 16])/8,
187 offsetof(struct unw_frame_info, fr_loc[21 - 16])/8,
188 offsetof(struct unw_frame_info, fr_loc[22 - 16])/8,
189 offsetof(struct unw_frame_info, fr_loc[23 - 16])/8,
190 offsetof(struct unw_frame_info, fr_loc[24 - 16])/8,
191 offsetof(struct unw_frame_info, fr_loc[25 - 16])/8,
192 offsetof(struct unw_frame_info, fr_loc[26 - 16])/8,
193 offsetof(struct unw_frame_info, fr_loc[27 - 16])/8,
194 offsetof(struct unw_frame_info, fr_loc[28 - 16])/8,
195 offsetof(struct unw_frame_info, fr_loc[29 - 16])/8,
196 offsetof(struct unw_frame_info, fr_loc[30 - 16])/8,
197 offsetof(struct unw_frame_info, fr_loc[31 - 16])/8,
201 offsetof(struct pt_regs, r1),
202 offsetof(struct pt_regs, r2),
203 offsetof(struct pt_regs, r3),
204 [4] = -1, [5] = -1, [6] = -1, [7] = -1,
205 offsetof(struct pt_regs, r8),
206 offsetof(struct pt_regs, r9),
207 offsetof(struct pt_regs, r10),
208 offsetof(struct pt_regs, r11),
209 offsetof(struct pt_regs, r12),
210 offsetof(struct pt_regs, r13),
211 offsetof(struct pt_regs, r14),
212 offsetof(struct pt_regs, r15),
213 offsetof(struct pt_regs, r16),
214 offsetof(struct pt_regs, r17),
215 offsetof(struct pt_regs, r18),
216 offsetof(struct pt_regs, r19),
217 offsetof(struct pt_regs, r20),
218 offsetof(struct pt_regs, r21),
219 offsetof(struct pt_regs, r22),
220 offsetof(struct pt_regs, r23),
221 offsetof(struct pt_regs, r24),
222 offsetof(struct pt_regs, r25),
223 offsetof(struct pt_regs, r26),
224 offsetof(struct pt_regs, r27),
225 offsetof(struct pt_regs, r28),
226 offsetof(struct pt_regs, r29),
227 offsetof(struct pt_regs, r30),
228 offsetof(struct pt_regs, r31),
230 .hash = { [0 ... UNW_HASH_SIZE - 1] = -1 },
233 "pri_unat_gr", "pri_unat_mem", "bsp", "bspstore", "ar.pfs", "ar.rnat", "psp", "rp",
234 "r4", "r5", "r6", "r7",
235 "ar.unat", "pr", "ar.lc", "ar.fpsr",
236 "b1", "b2", "b3", "b4", "b5",
237 "f2", "f3", "f4", "f5",
238 "f16", "f17", "f18", "f19", "f20", "f21", "f22", "f23",
239 "f24", "f25", "f26", "f27", "f28", "f29", "f30", "f31"
245 read_only (void *addr)
247 return (unsigned long) ((char *) addr - (char *) &unw.r0) < sizeof(unw.r0);
251 * Returns offset of rREG in struct pt_regs.
253 static inline unsigned long
254 pt_regs_off (unsigned long reg)
258 if (reg < ARRAY_SIZE(unw.pt_regs_offsets))
259 off = unw.pt_regs_offsets[reg];
262 UNW_DPRINT(0, "unwind.%s: bad scratch reg r%lu\n", __FUNCTION__, reg);
265 return (unsigned long) off;
268 static inline struct pt_regs *
269 get_scratch_regs (struct unw_frame_info *info)
272 /* This should not happen with valid unwind info. */
273 UNW_DPRINT(0, "unwind.%s: bad unwind info: resetting info->pt\n", __FUNCTION__);
274 if (info->flags & UNW_FLAG_INTERRUPT_FRAME)
275 info->pt = (unsigned long) ((struct pt_regs *) info->psp - 1);
277 info->pt = info->sp - 16;
279 UNW_DPRINT(3, "unwind.%s: sp 0x%lx pt 0x%lx\n", __FUNCTION__, info->sp, info->pt);
280 return (struct pt_regs *) info->pt;
283 /* Unwind accessors. */
286 unw_access_gr (struct unw_frame_info *info, int regnum, unsigned long *val, char *nat, int write)
288 unsigned long *addr, *nat_addr, nat_mask = 0, dummy_nat;
289 struct unw_ireg *ireg;
292 if ((unsigned) regnum - 1 >= 127) {
293 if (regnum == 0 && !write) {
294 *val = 0; /* read r0 always returns 0 */
298 UNW_DPRINT(0, "unwind.%s: trying to access non-existent r%u\n",
299 __FUNCTION__, regnum);
304 if (regnum >= 4 && regnum <= 7) {
305 /* access a preserved register */
306 ireg = &info->r4 + (regnum - 4);
309 nat_addr = addr + ireg->nat.off;
310 switch (ireg->nat.type) {
312 /* simulate getf.sig/setf.sig */
315 /* write NaTVal and be done with it */
322 if (addr[0] == 0 && addr[1] == 0x1ffe) {
323 /* return NaT and be done with it */
332 nat_addr = &dummy_nat;
336 nat_mask = (1UL << ((long) addr & 0x1f8)/8);
340 nat_addr = ia64_rse_rnat_addr(addr);
341 if ((unsigned long) addr < info->regstk.limit
342 || (unsigned long) addr >= info->regstk.top)
344 UNW_DPRINT(0, "unwind.%s: %p outside of regstk "
346 __FUNCTION__, (void *) addr,
351 if ((unsigned long) nat_addr >= info->regstk.top)
352 nat_addr = &info->sw->ar_rnat;
353 nat_mask = (1UL << ia64_rse_slot_num(addr));
357 addr = &info->sw->r4 + (regnum - 4);
358 nat_addr = &info->sw->ar_unat;
359 nat_mask = (1UL << ((long) addr & 0x1f8)/8);
362 /* access a scratch register */
363 pt = get_scratch_regs(info);
364 addr = (unsigned long *) ((unsigned long)pt + pt_regs_off(regnum));
365 if (info->pri_unat_loc)
366 nat_addr = info->pri_unat_loc;
368 nat_addr = &info->sw->ar_unat;
369 nat_mask = (1UL << ((long) addr & 0x1f8)/8);
372 /* access a stacked register */
373 addr = ia64_rse_skip_regs((unsigned long *) info->bsp, regnum - 32);
374 nat_addr = ia64_rse_rnat_addr(addr);
375 if ((unsigned long) addr < info->regstk.limit
376 || (unsigned long) addr >= info->regstk.top)
378 UNW_DPRINT(0, "unwind.%s: ignoring attempt to access register outside "
379 "of rbs\n", __FUNCTION__);
382 if ((unsigned long) nat_addr >= info->regstk.top)
383 nat_addr = &info->sw->ar_rnat;
384 nat_mask = (1UL << ia64_rse_slot_num(addr));
388 if (read_only(addr)) {
389 UNW_DPRINT(0, "unwind.%s: ignoring attempt to write read-only location\n",
394 *nat_addr |= nat_mask;
396 *nat_addr &= ~nat_mask;
399 if ((*nat_addr & nat_mask) == 0) {
403 *val = 0; /* if register is a NaT, *addr may contain kernel data! */
409 EXPORT_SYMBOL(unw_access_gr);
412 unw_access_br (struct unw_frame_info *info, int regnum, unsigned long *val, int write)
419 case 0: pt = get_scratch_regs(info); addr = &pt->b0; break;
420 case 6: pt = get_scratch_regs(info); addr = &pt->b6; break;
421 case 7: pt = get_scratch_regs(info); addr = &pt->b7; break;
424 case 1: case 2: case 3: case 4: case 5:
425 addr = *(&info->b1_loc + (regnum - 1));
427 addr = &info->sw->b1 + (regnum - 1);
431 UNW_DPRINT(0, "unwind.%s: trying to access non-existent b%u\n",
432 __FUNCTION__, regnum);
436 if (read_only(addr)) {
437 UNW_DPRINT(0, "unwind.%s: ignoring attempt to write read-only location\n",
445 EXPORT_SYMBOL(unw_access_br);
448 unw_access_fr (struct unw_frame_info *info, int regnum, struct ia64_fpreg *val, int write)
450 struct ia64_fpreg *addr = 0;
453 if ((unsigned) (regnum - 2) >= 126) {
454 UNW_DPRINT(0, "unwind.%s: trying to access non-existent f%u\n",
455 __FUNCTION__, regnum);
460 addr = *(&info->f2_loc + (regnum - 2));
462 addr = &info->sw->f2 + (regnum - 2);
463 } else if (regnum <= 15) {
465 pt = get_scratch_regs(info);
466 addr = &pt->f6 + (regnum - 6);
469 addr = &info->sw->f12 + (regnum - 12);
470 } else if (regnum <= 31) {
471 addr = info->fr_loc[regnum - 16];
473 addr = &info->sw->f16 + (regnum - 16);
475 struct task_struct *t = info->task;
481 addr = t->thread.fph + (regnum - 32);
485 if (read_only(addr)) {
486 UNW_DPRINT(0, "unwind.%s: ignoring attempt to write read-only location\n",
494 EXPORT_SYMBOL(unw_access_fr);
497 unw_access_ar (struct unw_frame_info *info, int regnum, unsigned long *val, int write)
504 addr = info->bsp_loc;
506 addr = &info->sw->ar_bspstore;
509 case UNW_AR_BSPSTORE:
510 addr = info->bspstore_loc;
512 addr = &info->sw->ar_bspstore;
516 addr = info->pfs_loc;
518 addr = &info->sw->ar_pfs;
522 addr = info->rnat_loc;
524 addr = &info->sw->ar_rnat;
528 addr = info->unat_loc;
530 addr = &info->sw->ar_unat;
536 addr = &info->sw->ar_lc;
544 (*info->cfm_loc & ~(0x3fUL << 52)) | ((*val & 0x3f) << 52);
546 *val = (*info->cfm_loc >> 52) & 0x3f;
550 addr = info->fpsr_loc;
552 addr = &info->sw->ar_fpsr;
556 pt = get_scratch_regs(info);
561 pt = get_scratch_regs(info);
566 pt = get_scratch_regs(info);
571 pt = get_scratch_regs(info);
576 UNW_DPRINT(0, "unwind.%s: trying to access non-existent ar%u\n",
577 __FUNCTION__, regnum);
582 if (read_only(addr)) {
583 UNW_DPRINT(0, "unwind.%s: ignoring attempt to write read-only location\n",
591 EXPORT_SYMBOL(unw_access_ar);
594 unw_access_pr (struct unw_frame_info *info, unsigned long *val, int write)
600 addr = &info->sw->pr;
603 if (read_only(addr)) {
604 UNW_DPRINT(0, "unwind.%s: ignoring attempt to write read-only location\n",
612 EXPORT_SYMBOL(unw_access_pr);
615 /* Routines to manipulate the state stack. */
618 push (struct unw_state_record *sr)
620 struct unw_reg_state *rs;
622 rs = alloc_reg_state();
624 printk(KERN_ERR "unwind: cannot stack reg state!\n");
627 memcpy(rs, &sr->curr, sizeof(*rs));
632 pop (struct unw_state_record *sr)
634 struct unw_reg_state *rs = sr->curr.next;
637 printk(KERN_ERR "unwind: stack underflow!\n");
640 memcpy(&sr->curr, rs, sizeof(*rs));
644 /* Make a copy of the state stack. Non-recursive to avoid stack overflows. */
645 static struct unw_reg_state *
646 dup_state_stack (struct unw_reg_state *rs)
648 struct unw_reg_state *copy, *prev = NULL, *first = NULL;
651 copy = alloc_reg_state();
653 printk(KERN_ERR "unwind.dup_state_stack: out of memory\n");
656 memcpy(copy, rs, sizeof(*copy));
667 /* Free all stacked register states (but not RS itself). */
669 free_state_stack (struct unw_reg_state *rs)
671 struct unw_reg_state *p, *next;
673 for (p = rs->next; p != NULL; p = next) {
680 /* Unwind decoder routines */
682 static enum unw_register_index __attribute_const__
683 decode_abreg (unsigned char abreg, int memory)
686 case 0x04 ... 0x07: return UNW_REG_R4 + (abreg - 0x04);
687 case 0x22 ... 0x25: return UNW_REG_F2 + (abreg - 0x22);
688 case 0x30 ... 0x3f: return UNW_REG_F16 + (abreg - 0x30);
689 case 0x41 ... 0x45: return UNW_REG_B1 + (abreg - 0x41);
690 case 0x60: return UNW_REG_PR;
691 case 0x61: return UNW_REG_PSP;
692 case 0x62: return memory ? UNW_REG_PRI_UNAT_MEM : UNW_REG_PRI_UNAT_GR;
693 case 0x63: return UNW_REG_RP;
694 case 0x64: return UNW_REG_BSP;
695 case 0x65: return UNW_REG_BSPSTORE;
696 case 0x66: return UNW_REG_RNAT;
697 case 0x67: return UNW_REG_UNAT;
698 case 0x68: return UNW_REG_FPSR;
699 case 0x69: return UNW_REG_PFS;
700 case 0x6a: return UNW_REG_LC;
704 UNW_DPRINT(0, "unwind.%s: bad abreg=0x%x\n", __FUNCTION__, abreg);
709 set_reg (struct unw_reg_info *reg, enum unw_where where, int when, unsigned long val)
713 if (reg->when == UNW_WHEN_NEVER)
718 alloc_spill_area (unsigned long *offp, unsigned long regsize,
719 struct unw_reg_info *lo, struct unw_reg_info *hi)
721 struct unw_reg_info *reg;
723 for (reg = hi; reg >= lo; --reg) {
724 if (reg->where == UNW_WHERE_SPILL_HOME) {
725 reg->where = UNW_WHERE_PSPREL;
733 spill_next_when (struct unw_reg_info **regp, struct unw_reg_info *lim, unw_word t)
735 struct unw_reg_info *reg;
737 for (reg = *regp; reg <= lim; ++reg) {
738 if (reg->where == UNW_WHERE_SPILL_HOME) {
744 UNW_DPRINT(0, "unwind.%s: excess spill!\n", __FUNCTION__);
748 finish_prologue (struct unw_state_record *sr)
750 struct unw_reg_info *reg;
755 * First, resolve implicit register save locations (see Section "11.4.2.3 Rules
756 * for Using Unwind Descriptors", rule 3):
758 for (i = 0; i < (int) ARRAY_SIZE(unw.save_order); ++i) {
759 reg = sr->curr.reg + unw.save_order[i];
760 if (reg->where == UNW_WHERE_GR_SAVE) {
761 reg->where = UNW_WHERE_GR;
762 reg->val = sr->gr_save_loc++;
767 * Next, compute when the fp, general, and branch registers get
768 * saved. This must come before alloc_spill_area() because
769 * we need to know which registers are spilled to their home
773 unsigned char kind, mask = 0, *cp = sr->imask;
775 static const unsigned char limit[3] = {
776 UNW_REG_F31, UNW_REG_R7, UNW_REG_B5
778 struct unw_reg_info *(regs[3]);
780 regs[0] = sr->curr.reg + UNW_REG_F2;
781 regs[1] = sr->curr.reg + UNW_REG_R4;
782 regs[2] = sr->curr.reg + UNW_REG_B1;
784 for (t = 0; t < sr->region_len; ++t) {
787 kind = (mask >> 2*(3-(t & 3))) & 3;
789 spill_next_when(®s[kind - 1], sr->curr.reg + limit[kind - 1],
790 sr->region_start + t);
794 * Next, lay out the memory stack spill area:
796 if (sr->any_spills) {
797 off = sr->spill_offset;
798 alloc_spill_area(&off, 16, sr->curr.reg + UNW_REG_F2, sr->curr.reg + UNW_REG_F31);
799 alloc_spill_area(&off, 8, sr->curr.reg + UNW_REG_B1, sr->curr.reg + UNW_REG_B5);
800 alloc_spill_area(&off, 8, sr->curr.reg + UNW_REG_R4, sr->curr.reg + UNW_REG_R7);
805 * Region header descriptors.
809 desc_prologue (int body, unw_word rlen, unsigned char mask, unsigned char grsave,
810 struct unw_state_record *sr)
814 if (!(sr->in_body || sr->first_region))
816 sr->first_region = 0;
818 /* check if we're done: */
819 if (sr->when_target < sr->region_start + sr->region_len) {
824 region_start = sr->region_start + sr->region_len;
826 for (i = 0; i < sr->epilogue_count; ++i)
828 sr->epilogue_count = 0;
829 sr->epilogue_start = UNW_WHEN_NEVER;
831 sr->region_start = region_start;
832 sr->region_len = rlen;
838 for (i = 0; i < 4; ++i) {
840 set_reg(sr->curr.reg + unw.save_order[i], UNW_WHERE_GR,
841 sr->region_start + sr->region_len - 1, grsave++);
844 sr->gr_save_loc = grsave;
847 sr->spill_offset = 0x10; /* default to psp+16 */
852 * Prologue descriptors.
856 desc_abi (unsigned char abi, unsigned char context, struct unw_state_record *sr)
858 if (abi == 3 && context == 'i') {
859 sr->flags |= UNW_FLAG_INTERRUPT_FRAME;
860 UNW_DPRINT(3, "unwind.%s: interrupt frame\n", __FUNCTION__);
863 UNW_DPRINT(0, "unwind%s: ignoring unwabi(abi=0x%x,context=0x%x)\n",
864 __FUNCTION__, abi, context);
868 desc_br_gr (unsigned char brmask, unsigned char gr, struct unw_state_record *sr)
872 for (i = 0; i < 5; ++i) {
874 set_reg(sr->curr.reg + UNW_REG_B1 + i, UNW_WHERE_GR,
875 sr->region_start + sr->region_len - 1, gr++);
881 desc_br_mem (unsigned char brmask, struct unw_state_record *sr)
885 for (i = 0; i < 5; ++i) {
887 set_reg(sr->curr.reg + UNW_REG_B1 + i, UNW_WHERE_SPILL_HOME,
888 sr->region_start + sr->region_len - 1, 0);
896 desc_frgr_mem (unsigned char grmask, unw_word frmask, struct unw_state_record *sr)
900 for (i = 0; i < 4; ++i) {
901 if ((grmask & 1) != 0) {
902 set_reg(sr->curr.reg + UNW_REG_R4 + i, UNW_WHERE_SPILL_HOME,
903 sr->region_start + sr->region_len - 1, 0);
908 for (i = 0; i < 20; ++i) {
909 if ((frmask & 1) != 0) {
910 int base = (i < 4) ? UNW_REG_F2 : UNW_REG_F16 - 4;
911 set_reg(sr->curr.reg + base + i, UNW_WHERE_SPILL_HOME,
912 sr->region_start + sr->region_len - 1, 0);
920 desc_fr_mem (unsigned char frmask, struct unw_state_record *sr)
924 for (i = 0; i < 4; ++i) {
925 if ((frmask & 1) != 0) {
926 set_reg(sr->curr.reg + UNW_REG_F2 + i, UNW_WHERE_SPILL_HOME,
927 sr->region_start + sr->region_len - 1, 0);
935 desc_gr_gr (unsigned char grmask, unsigned char gr, struct unw_state_record *sr)
939 for (i = 0; i < 4; ++i) {
940 if ((grmask & 1) != 0)
941 set_reg(sr->curr.reg + UNW_REG_R4 + i, UNW_WHERE_GR,
942 sr->region_start + sr->region_len - 1, gr++);
948 desc_gr_mem (unsigned char grmask, struct unw_state_record *sr)
952 for (i = 0; i < 4; ++i) {
953 if ((grmask & 1) != 0) {
954 set_reg(sr->curr.reg + UNW_REG_R4 + i, UNW_WHERE_SPILL_HOME,
955 sr->region_start + sr->region_len - 1, 0);
963 desc_mem_stack_f (unw_word t, unw_word size, struct unw_state_record *sr)
965 set_reg(sr->curr.reg + UNW_REG_PSP, UNW_WHERE_NONE,
966 sr->region_start + MIN((int)t, sr->region_len - 1), 16*size);
970 desc_mem_stack_v (unw_word t, struct unw_state_record *sr)
972 sr->curr.reg[UNW_REG_PSP].when = sr->region_start + MIN((int)t, sr->region_len - 1);
976 desc_reg_gr (unsigned char reg, unsigned char dst, struct unw_state_record *sr)
978 set_reg(sr->curr.reg + reg, UNW_WHERE_GR, sr->region_start + sr->region_len - 1, dst);
982 desc_reg_psprel (unsigned char reg, unw_word pspoff, struct unw_state_record *sr)
984 set_reg(sr->curr.reg + reg, UNW_WHERE_PSPREL, sr->region_start + sr->region_len - 1,
989 desc_reg_sprel (unsigned char reg, unw_word spoff, struct unw_state_record *sr)
991 set_reg(sr->curr.reg + reg, UNW_WHERE_SPREL, sr->region_start + sr->region_len - 1,
996 desc_rp_br (unsigned char dst, struct unw_state_record *sr)
998 sr->return_link_reg = dst;
1002 desc_reg_when (unsigned char regnum, unw_word t, struct unw_state_record *sr)
1004 struct unw_reg_info *reg = sr->curr.reg + regnum;
1006 if (reg->where == UNW_WHERE_NONE)
1007 reg->where = UNW_WHERE_GR_SAVE;
1008 reg->when = sr->region_start + MIN((int)t, sr->region_len - 1);
1012 desc_spill_base (unw_word pspoff, struct unw_state_record *sr)
1014 sr->spill_offset = 0x10 - 4*pspoff;
1017 static inline unsigned char *
1018 desc_spill_mask (unsigned char *imaskp, struct unw_state_record *sr)
1021 return imaskp + (2*sr->region_len + 7)/8;
1028 desc_epilogue (unw_word t, unw_word ecount, struct unw_state_record *sr)
1030 sr->epilogue_start = sr->region_start + sr->region_len - 1 - t;
1031 sr->epilogue_count = ecount + 1;
1035 desc_copy_state (unw_word label, struct unw_state_record *sr)
1037 struct unw_labeled_state *ls;
1039 for (ls = sr->labeled_states; ls; ls = ls->next) {
1040 if (ls->label == label) {
1041 free_state_stack(&sr->curr);
1042 memcpy(&sr->curr, &ls->saved_state, sizeof(sr->curr));
1043 sr->curr.next = dup_state_stack(ls->saved_state.next);
1047 printk(KERN_ERR "unwind: failed to find state labeled 0x%lx\n", label);
1051 desc_label_state (unw_word label, struct unw_state_record *sr)
1053 struct unw_labeled_state *ls;
1055 ls = alloc_labeled_state();
1057 printk(KERN_ERR "unwind.desc_label_state(): out of memory\n");
1061 memcpy(&ls->saved_state, &sr->curr, sizeof(ls->saved_state));
1062 ls->saved_state.next = dup_state_stack(sr->curr.next);
1064 /* insert into list of labeled states: */
1065 ls->next = sr->labeled_states;
1066 sr->labeled_states = ls;
1070 * General descriptors.
1074 desc_is_active (unsigned char qp, unw_word t, struct unw_state_record *sr)
1076 if (sr->when_target <= sr->region_start + MIN((int)t, sr->region_len - 1))
1079 if ((sr->pr_val & (1UL << qp)) == 0)
1081 sr->pr_mask |= (1UL << qp);
1087 desc_restore_p (unsigned char qp, unw_word t, unsigned char abreg, struct unw_state_record *sr)
1089 struct unw_reg_info *r;
1091 if (!desc_is_active(qp, t, sr))
1094 r = sr->curr.reg + decode_abreg(abreg, 0);
1095 r->where = UNW_WHERE_NONE;
1096 r->when = UNW_WHEN_NEVER;
1101 desc_spill_reg_p (unsigned char qp, unw_word t, unsigned char abreg, unsigned char x,
1102 unsigned char ytreg, struct unw_state_record *sr)
1104 enum unw_where where = UNW_WHERE_GR;
1105 struct unw_reg_info *r;
1107 if (!desc_is_active(qp, t, sr))
1111 where = UNW_WHERE_BR;
1112 else if (ytreg & 0x80)
1113 where = UNW_WHERE_FR;
1115 r = sr->curr.reg + decode_abreg(abreg, 0);
1117 r->when = sr->region_start + MIN((int)t, sr->region_len - 1);
1118 r->val = (ytreg & 0x7f);
1122 desc_spill_psprel_p (unsigned char qp, unw_word t, unsigned char abreg, unw_word pspoff,
1123 struct unw_state_record *sr)
1125 struct unw_reg_info *r;
1127 if (!desc_is_active(qp, t, sr))
1130 r = sr->curr.reg + decode_abreg(abreg, 1);
1131 r->where = UNW_WHERE_PSPREL;
1132 r->when = sr->region_start + MIN((int)t, sr->region_len - 1);
1133 r->val = 0x10 - 4*pspoff;
1137 desc_spill_sprel_p (unsigned char qp, unw_word t, unsigned char abreg, unw_word spoff,
1138 struct unw_state_record *sr)
1140 struct unw_reg_info *r;
1142 if (!desc_is_active(qp, t, sr))
1145 r = sr->curr.reg + decode_abreg(abreg, 1);
1146 r->where = UNW_WHERE_SPREL;
1147 r->when = sr->region_start + MIN((int)t, sr->region_len - 1);
1151 #define UNW_DEC_BAD_CODE(code) printk(KERN_ERR "unwind: unknown code 0x%02x\n", \
1157 #define UNW_DEC_PROLOGUE_GR(fmt,r,m,gr,arg) desc_prologue(0,r,m,gr,arg)
1158 #define UNW_DEC_PROLOGUE(fmt,b,r,arg) desc_prologue(b,r,0,32,arg)
1160 * prologue descriptors:
1162 #define UNW_DEC_ABI(fmt,a,c,arg) desc_abi(a,c,arg)
1163 #define UNW_DEC_BR_GR(fmt,b,g,arg) desc_br_gr(b,g,arg)
1164 #define UNW_DEC_BR_MEM(fmt,b,arg) desc_br_mem(b,arg)
1165 #define UNW_DEC_FRGR_MEM(fmt,g,f,arg) desc_frgr_mem(g,f,arg)
1166 #define UNW_DEC_FR_MEM(fmt,f,arg) desc_fr_mem(f,arg)
1167 #define UNW_DEC_GR_GR(fmt,m,g,arg) desc_gr_gr(m,g,arg)
1168 #define UNW_DEC_GR_MEM(fmt,m,arg) desc_gr_mem(m,arg)
1169 #define UNW_DEC_MEM_STACK_F(fmt,t,s,arg) desc_mem_stack_f(t,s,arg)
1170 #define UNW_DEC_MEM_STACK_V(fmt,t,arg) desc_mem_stack_v(t,arg)
1171 #define UNW_DEC_REG_GR(fmt,r,d,arg) desc_reg_gr(r,d,arg)
1172 #define UNW_DEC_REG_PSPREL(fmt,r,o,arg) desc_reg_psprel(r,o,arg)
1173 #define UNW_DEC_REG_SPREL(fmt,r,o,arg) desc_reg_sprel(r,o,arg)
1174 #define UNW_DEC_REG_WHEN(fmt,r,t,arg) desc_reg_when(r,t,arg)
1175 #define UNW_DEC_PRIUNAT_WHEN_GR(fmt,t,arg) desc_reg_when(UNW_REG_PRI_UNAT_GR,t,arg)
1176 #define UNW_DEC_PRIUNAT_WHEN_MEM(fmt,t,arg) desc_reg_when(UNW_REG_PRI_UNAT_MEM,t,arg)
1177 #define UNW_DEC_PRIUNAT_GR(fmt,r,arg) desc_reg_gr(UNW_REG_PRI_UNAT_GR,r,arg)
1178 #define UNW_DEC_PRIUNAT_PSPREL(fmt,o,arg) desc_reg_psprel(UNW_REG_PRI_UNAT_MEM,o,arg)
1179 #define UNW_DEC_PRIUNAT_SPREL(fmt,o,arg) desc_reg_sprel(UNW_REG_PRI_UNAT_MEM,o,arg)
1180 #define UNW_DEC_RP_BR(fmt,d,arg) desc_rp_br(d,arg)
1181 #define UNW_DEC_SPILL_BASE(fmt,o,arg) desc_spill_base(o,arg)
1182 #define UNW_DEC_SPILL_MASK(fmt,m,arg) (m = desc_spill_mask(m,arg))
1186 #define UNW_DEC_EPILOGUE(fmt,t,c,arg) desc_epilogue(t,c,arg)
1187 #define UNW_DEC_COPY_STATE(fmt,l,arg) desc_copy_state(l,arg)
1188 #define UNW_DEC_LABEL_STATE(fmt,l,arg) desc_label_state(l,arg)
1190 * general unwind descriptors:
1192 #define UNW_DEC_SPILL_REG_P(f,p,t,a,x,y,arg) desc_spill_reg_p(p,t,a,x,y,arg)
1193 #define UNW_DEC_SPILL_REG(f,t,a,x,y,arg) desc_spill_reg_p(0,t,a,x,y,arg)
1194 #define UNW_DEC_SPILL_PSPREL_P(f,p,t,a,o,arg) desc_spill_psprel_p(p,t,a,o,arg)
1195 #define UNW_DEC_SPILL_PSPREL(f,t,a,o,arg) desc_spill_psprel_p(0,t,a,o,arg)
1196 #define UNW_DEC_SPILL_SPREL_P(f,p,t,a,o,arg) desc_spill_sprel_p(p,t,a,o,arg)
1197 #define UNW_DEC_SPILL_SPREL(f,t,a,o,arg) desc_spill_sprel_p(0,t,a,o,arg)
1198 #define UNW_DEC_RESTORE_P(f,p,t,a,arg) desc_restore_p(p,t,a,arg)
1199 #define UNW_DEC_RESTORE(f,t,a,arg) desc_restore_p(0,t,a,arg)
1201 #include "unwind_decoder.c"
1204 /* Unwind scripts. */
1206 static inline unw_hash_index_t
1207 hash (unsigned long ip)
1209 # define hashmagic 0x9e3779b97f4a7c16 /* based on (sqrt(5)/2-1)*2^64 */
1211 return (ip >> 4)*hashmagic >> (64 - UNW_LOG_HASH_SIZE);
1216 cache_match (struct unw_script *script, unsigned long ip, unsigned long pr)
1218 read_lock(&script->lock);
1219 if (ip == script->ip && ((pr ^ script->pr_val) & script->pr_mask) == 0)
1220 /* keep the read lock... */
1222 read_unlock(&script->lock);
1226 static inline struct unw_script *
1227 script_lookup (struct unw_frame_info *info)
1229 struct unw_script *script = unw.cache + info->hint;
1230 unsigned short index;
1231 unsigned long ip, pr;
1233 if (UNW_DEBUG_ON(0))
1234 return 0; /* Always regenerate scripts in debug mode */
1236 STAT(++unw.stat.cache.lookups);
1241 if (cache_match(script, ip, pr)) {
1242 STAT(++unw.stat.cache.hinted_hits);
1246 index = unw.hash[hash(ip)];
1247 if (index >= UNW_CACHE_SIZE)
1250 script = unw.cache + index;
1252 if (cache_match(script, ip, pr)) {
1253 /* update hint; no locking required as single-word writes are atomic */
1254 STAT(++unw.stat.cache.normal_hits);
1255 unw.cache[info->prev_script].hint = script - unw.cache;
1258 if (script->coll_chain >= UNW_HASH_SIZE)
1260 script = unw.cache + script->coll_chain;
1261 STAT(++unw.stat.cache.collision_chain_traversals);
1266 * On returning, a write lock for the SCRIPT is still being held.
1268 static inline struct unw_script *
1269 script_new (unsigned long ip)
1271 struct unw_script *script, *prev, *tmp;
1272 unw_hash_index_t index;
1273 unsigned long flags;
1274 unsigned short head;
1276 STAT(++unw.stat.script.news);
1279 * Can't (easily) use cmpxchg() here because of ABA problem
1280 * that is intrinsic in cmpxchg()...
1282 spin_lock_irqsave(&unw.lock, flags);
1284 head = unw.lru_head;
1285 script = unw.cache + head;
1286 unw.lru_head = script->lru_chain;
1288 spin_unlock(&unw.lock);
1291 * We'd deadlock here if we interrupted a thread that is holding a read lock on
1292 * script->lock. Thus, if the write_trylock() fails, we simply bail out. The
1293 * alternative would be to disable interrupts whenever we hold a read-lock, but
1296 if (!write_trylock(&script->lock))
1299 spin_lock(&unw.lock);
1301 /* re-insert script at the tail of the LRU chain: */
1302 unw.cache[unw.lru_tail].lru_chain = head;
1303 unw.lru_tail = head;
1305 /* remove the old script from the hash table (if it's there): */
1307 index = hash(script->ip);
1308 tmp = unw.cache + unw.hash[index];
1311 if (tmp == script) {
1313 prev->coll_chain = tmp->coll_chain;
1315 unw.hash[index] = tmp->coll_chain;
1319 if (tmp->coll_chain >= UNW_CACHE_SIZE)
1320 /* old script wasn't in the hash-table */
1322 tmp = unw.cache + tmp->coll_chain;
1326 /* enter new script in the hash table */
1328 script->coll_chain = unw.hash[index];
1329 unw.hash[index] = script - unw.cache;
1331 script->ip = ip; /* set new IP while we're holding the locks */
1333 STAT(if (script->coll_chain < UNW_CACHE_SIZE) ++unw.stat.script.collisions);
1335 spin_unlock_irqrestore(&unw.lock, flags);
1344 script_finalize (struct unw_script *script, struct unw_state_record *sr)
1346 script->pr_mask = sr->pr_mask;
1347 script->pr_val = sr->pr_val;
1349 * We could down-grade our write-lock on script->lock here but
1350 * the rwlock API doesn't offer atomic lock downgrading, so
1351 * we'll just keep the write-lock and release it later when
1352 * we're done using the script.
1357 script_emit (struct unw_script *script, struct unw_insn insn)
1359 if (script->count >= UNW_MAX_SCRIPT_LEN) {
1360 UNW_DPRINT(0, "unwind.%s: script exceeds maximum size of %u instructions!\n",
1361 __FUNCTION__, UNW_MAX_SCRIPT_LEN);
1364 script->insn[script->count++] = insn;
1368 emit_nat_info (struct unw_state_record *sr, int i, struct unw_script *script)
1370 struct unw_reg_info *r = sr->curr.reg + i;
1371 enum unw_insn_opcode opc;
1372 struct unw_insn insn;
1373 unsigned long val = 0;
1378 /* register got spilled to a stacked register */
1379 opc = UNW_INSN_SETNAT_TYPE;
1380 val = UNW_NAT_REGSTK;
1382 /* register got spilled to a scratch register */
1383 opc = UNW_INSN_SETNAT_MEMSTK;
1387 opc = UNW_INSN_SETNAT_TYPE;
1392 opc = UNW_INSN_SETNAT_TYPE;
1396 case UNW_WHERE_PSPREL:
1397 case UNW_WHERE_SPREL:
1398 opc = UNW_INSN_SETNAT_MEMSTK;
1402 UNW_DPRINT(0, "unwind.%s: don't know how to emit nat info for where = %u\n",
1403 __FUNCTION__, r->where);
1407 insn.dst = unw.preg_index[i];
1409 script_emit(script, insn);
1413 compile_reg (struct unw_state_record *sr, int i, struct unw_script *script)
1415 struct unw_reg_info *r = sr->curr.reg + i;
1416 enum unw_insn_opcode opc;
1417 unsigned long val, rval;
1418 struct unw_insn insn;
1421 if (r->where == UNW_WHERE_NONE || r->when >= sr->when_target)
1424 opc = UNW_INSN_MOVE;
1425 val = rval = r->val;
1426 need_nat_info = (i >= UNW_REG_R4 && i <= UNW_REG_R7);
1431 opc = UNW_INSN_MOVE_STACKED;
1433 } else if (rval >= 4 && rval <= 7) {
1434 if (need_nat_info) {
1435 opc = UNW_INSN_MOVE2;
1438 val = unw.preg_index[UNW_REG_R4 + (rval - 4)];
1439 } else if (rval == 0) {
1440 opc = UNW_INSN_MOVE_CONST;
1443 /* register got spilled to a scratch register */
1444 opc = UNW_INSN_MOVE_SCRATCH;
1445 val = pt_regs_off(rval);
1451 val = unw.preg_index[UNW_REG_F2 + (rval - 2)];
1452 else if (rval >= 16 && rval <= 31)
1453 val = unw.preg_index[UNW_REG_F16 + (rval - 16)];
1455 opc = UNW_INSN_MOVE_SCRATCH;
1457 val = offsetof(struct pt_regs, f6) + 16*(rval - 6);
1459 UNW_DPRINT(0, "unwind.%s: kernel may not touch f%lu\n",
1460 __FUNCTION__, rval);
1465 if (rval >= 1 && rval <= 5)
1466 val = unw.preg_index[UNW_REG_B1 + (rval - 1)];
1468 opc = UNW_INSN_MOVE_SCRATCH;
1470 val = offsetof(struct pt_regs, b0);
1472 val = offsetof(struct pt_regs, b6);
1474 val = offsetof(struct pt_regs, b7);
1478 case UNW_WHERE_SPREL:
1479 opc = UNW_INSN_ADD_SP;
1482 case UNW_WHERE_PSPREL:
1483 opc = UNW_INSN_ADD_PSP;
1487 UNW_DPRINT(0, "unwind%s: register %u has unexpected `where' value of %u\n",
1488 __FUNCTION__, i, r->where);
1492 insn.dst = unw.preg_index[i];
1494 script_emit(script, insn);
1496 emit_nat_info(sr, i, script);
1498 if (i == UNW_REG_PSP) {
1500 * info->psp must contain the _value_ of the previous
1501 * sp, not it's save location. We get this by
1502 * dereferencing the value we just stored in
1505 insn.opc = UNW_INSN_LOAD;
1506 insn.dst = insn.val = unw.preg_index[UNW_REG_PSP];
1507 script_emit(script, insn);
1511 static inline const struct unw_table_entry *
1512 lookup (struct unw_table *table, unsigned long rel_ip)
1514 const struct unw_table_entry *e = 0;
1515 unsigned long lo, hi, mid;
1517 /* do a binary search for right entry: */
1518 for (lo = 0, hi = table->length; lo < hi; ) {
1519 mid = (lo + hi) / 2;
1520 e = &table->array[mid];
1521 if (rel_ip < e->start_offset)
1523 else if (rel_ip >= e->end_offset)
1528 if (rel_ip < e->start_offset || rel_ip >= e->end_offset)
1534 * Build an unwind script that unwinds from state OLD_STATE to the
1535 * entrypoint of the function that called OLD_STATE.
1537 static inline struct unw_script *
1538 build_script (struct unw_frame_info *info)
1540 const struct unw_table_entry *e = 0;
1541 struct unw_script *script = 0;
1542 struct unw_labeled_state *ls, *next;
1543 unsigned long ip = info->ip;
1544 struct unw_state_record sr;
1545 struct unw_table *table;
1546 struct unw_reg_info *r;
1547 struct unw_insn insn;
1551 STAT(unsigned long start, parse_start;)
1553 STAT(++unw.stat.script.builds; start = ia64_get_itc());
1555 /* build state record */
1556 memset(&sr, 0, sizeof(sr));
1557 for (r = sr.curr.reg; r < sr.curr.reg + UNW_NUM_REGS; ++r)
1558 r->when = UNW_WHEN_NEVER;
1559 sr.pr_val = info->pr;
1561 UNW_DPRINT(3, "unwind.%s: ip 0x%lx\n", __FUNCTION__, ip);
1562 script = script_new(ip);
1564 UNW_DPRINT(0, "unwind.%s: failed to create unwind script\n", __FUNCTION__);
1565 STAT(unw.stat.script.build_time += ia64_get_itc() - start);
1568 unw.cache[info->prev_script].hint = script - unw.cache;
1570 /* search the kernels and the modules' unwind tables for IP: */
1572 STAT(parse_start = ia64_get_itc());
1574 for (table = unw.tables; table; table = table->next) {
1575 if (ip >= table->start && ip < table->end) {
1576 e = lookup(table, ip - table->segment_base);
1581 /* no info, return default unwinder (leaf proc, no mem stack, no saved regs) */
1582 UNW_DPRINT(1, "unwind.%s: no unwind info for ip=0x%lx (prev ip=0x%lx)\n",
1583 __FUNCTION__, ip, unw.cache[info->prev_script].ip);
1584 sr.curr.reg[UNW_REG_RP].where = UNW_WHERE_BR;
1585 sr.curr.reg[UNW_REG_RP].when = -1;
1586 sr.curr.reg[UNW_REG_RP].val = 0;
1587 compile_reg(&sr, UNW_REG_RP, script);
1588 script_finalize(script, &sr);
1589 STAT(unw.stat.script.parse_time += ia64_get_itc() - parse_start);
1590 STAT(unw.stat.script.build_time += ia64_get_itc() - start);
1594 sr.when_target = (3*((ip & ~0xfUL) - (table->segment_base + e->start_offset))/16
1596 hdr = *(u64 *) (table->segment_base + e->info_offset);
1597 dp = (u8 *) (table->segment_base + e->info_offset + 8);
1598 desc_end = dp + 8*UNW_LENGTH(hdr);
1600 while (!sr.done && dp < desc_end)
1601 dp = unw_decode(dp, sr.in_body, &sr);
1603 if (sr.when_target > sr.epilogue_start) {
1605 * sp has been restored and all values on the memory stack below
1606 * psp also have been restored.
1608 sr.curr.reg[UNW_REG_PSP].val = 0;
1609 sr.curr.reg[UNW_REG_PSP].where = UNW_WHERE_NONE;
1610 sr.curr.reg[UNW_REG_PSP].when = UNW_WHEN_NEVER;
1611 for (r = sr.curr.reg; r < sr.curr.reg + UNW_NUM_REGS; ++r)
1612 if ((r->where == UNW_WHERE_PSPREL && r->val <= 0x10)
1613 || r->where == UNW_WHERE_SPREL)
1616 r->where = UNW_WHERE_NONE;
1617 r->when = UNW_WHEN_NEVER;
1621 script->flags = sr.flags;
1624 * If RP did't get saved, generate entry for the return link
1627 if (sr.curr.reg[UNW_REG_RP].when >= sr.when_target) {
1628 sr.curr.reg[UNW_REG_RP].where = UNW_WHERE_BR;
1629 sr.curr.reg[UNW_REG_RP].when = -1;
1630 sr.curr.reg[UNW_REG_RP].val = sr.return_link_reg;
1631 UNW_DPRINT(1, "unwind.%s: using default for rp at ip=0x%lx where=%d val=0x%lx\n",
1632 __FUNCTION__, ip, sr.curr.reg[UNW_REG_RP].where,
1633 sr.curr.reg[UNW_REG_RP].val);
1637 UNW_DPRINT(1, "unwind.%s: state record for func 0x%lx, t=%u:\n",
1638 __FUNCTION__, table->segment_base + e->start_offset, sr.when_target);
1639 for (r = sr.curr.reg; r < sr.curr.reg + UNW_NUM_REGS; ++r) {
1640 if (r->where != UNW_WHERE_NONE || r->when != UNW_WHEN_NEVER) {
1641 UNW_DPRINT(1, " %s <- ", unw.preg_name[r - sr.curr.reg]);
1643 case UNW_WHERE_GR: UNW_DPRINT(1, "r%lu", r->val); break;
1644 case UNW_WHERE_FR: UNW_DPRINT(1, "f%lu", r->val); break;
1645 case UNW_WHERE_BR: UNW_DPRINT(1, "b%lu", r->val); break;
1646 case UNW_WHERE_SPREL: UNW_DPRINT(1, "[sp+0x%lx]", r->val); break;
1647 case UNW_WHERE_PSPREL: UNW_DPRINT(1, "[psp+0x%lx]", r->val); break;
1648 case UNW_WHERE_NONE:
1649 UNW_DPRINT(1, "%s+0x%lx", unw.preg_name[r - sr.curr.reg], r->val);
1653 UNW_DPRINT(1, "BADWHERE(%d)", r->where);
1656 UNW_DPRINT(1, "\t\t%d\n", r->when);
1661 STAT(unw.stat.script.parse_time += ia64_get_itc() - parse_start);
1663 /* translate state record into unwinder instructions: */
1666 * First, set psp if we're dealing with a fixed-size frame;
1667 * subsequent instructions may depend on this value.
1669 if (sr.when_target > sr.curr.reg[UNW_REG_PSP].when
1670 && (sr.curr.reg[UNW_REG_PSP].where == UNW_WHERE_NONE)
1671 && sr.curr.reg[UNW_REG_PSP].val != 0) {
1672 /* new psp is sp plus frame size */
1673 insn.opc = UNW_INSN_ADD;
1674 insn.dst = offsetof(struct unw_frame_info, psp)/8;
1675 insn.val = sr.curr.reg[UNW_REG_PSP].val; /* frame size */
1676 script_emit(script, insn);
1679 /* determine where the primary UNaT is: */
1680 if (sr.when_target < sr.curr.reg[UNW_REG_PRI_UNAT_GR].when)
1681 i = UNW_REG_PRI_UNAT_MEM;
1682 else if (sr.when_target < sr.curr.reg[UNW_REG_PRI_UNAT_MEM].when)
1683 i = UNW_REG_PRI_UNAT_GR;
1684 else if (sr.curr.reg[UNW_REG_PRI_UNAT_MEM].when > sr.curr.reg[UNW_REG_PRI_UNAT_GR].when)
1685 i = UNW_REG_PRI_UNAT_MEM;
1687 i = UNW_REG_PRI_UNAT_GR;
1689 compile_reg(&sr, i, script);
1691 for (i = UNW_REG_BSP; i < UNW_NUM_REGS; ++i)
1692 compile_reg(&sr, i, script);
1694 /* free labeled register states & stack: */
1696 STAT(parse_start = ia64_get_itc());
1697 for (ls = sr.labeled_states; ls; ls = next) {
1699 free_state_stack(&ls->saved_state);
1700 free_labeled_state(ls);
1702 free_state_stack(&sr.curr);
1703 STAT(unw.stat.script.parse_time += ia64_get_itc() - parse_start);
1705 script_finalize(script, &sr);
1706 STAT(unw.stat.script.build_time += ia64_get_itc() - start);
1711 * Apply the unwinding actions represented by OPS and update SR to
1712 * reflect the state that existed upon entry to the function that this
1713 * unwinder represents.
1716 run_script (struct unw_script *script, struct unw_frame_info *state)
1718 struct unw_insn *ip, *limit, next_insn;
1719 unsigned long opc, dst, val, off;
1720 unsigned long *s = (unsigned long *) state;
1721 STAT(unsigned long start;)
1723 STAT(++unw.stat.script.runs; start = ia64_get_itc());
1724 state->flags = script->flags;
1726 limit = script->insn + script->count;
1729 while (ip++ < limit) {
1730 opc = next_insn.opc;
1731 dst = next_insn.dst;
1732 val = next_insn.val;
1741 case UNW_INSN_MOVE2:
1744 s[dst+1] = s[val+1];
1754 case UNW_INSN_MOVE_SCRATCH:
1756 s[dst] = (unsigned long) get_scratch_regs(state) + val;
1759 UNW_DPRINT(0, "unwind.%s: no state->pt, dst=%ld, val=%ld\n",
1760 __FUNCTION__, dst, val);
1764 case UNW_INSN_MOVE_CONST:
1766 s[dst] = (unsigned long) &unw.r0;
1769 UNW_DPRINT(0, "unwind.%s: UNW_INSN_MOVE_CONST bad val=%ld\n",
1775 case UNW_INSN_MOVE_STACKED:
1776 s[dst] = (unsigned long) ia64_rse_skip_regs((unsigned long *)state->bsp,
1780 case UNW_INSN_ADD_PSP:
1781 s[dst] = state->psp + val;
1784 case UNW_INSN_ADD_SP:
1785 s[dst] = state->sp + val;
1788 case UNW_INSN_SETNAT_MEMSTK:
1789 if (!state->pri_unat_loc)
1790 state->pri_unat_loc = &state->sw->ar_unat;
1791 /* register off. is a multiple of 8, so the least 3 bits (type) are 0 */
1792 s[dst+1] = ((unsigned long) state->pri_unat_loc - s[dst]) | UNW_NAT_MEMSTK;
1795 case UNW_INSN_SETNAT_TYPE:
1801 if ((s[val] & (local_cpu_data->unimpl_va_mask | 0x7)) != 0
1802 || s[val] < TASK_SIZE)
1804 UNW_DPRINT(0, "unwind.%s: rejecting bad psp=0x%lx\n",
1805 __FUNCTION__, s[val]);
1809 s[dst] = *(unsigned long *) s[val];
1813 STAT(unw.stat.script.run_time += ia64_get_itc() - start);
1817 off = unw.sw_off[val];
1818 s[val] = (unsigned long) state->sw + off;
1819 if (off >= offsetof(struct switch_stack, r4) && off <= offsetof(struct switch_stack, r7))
1821 * We're initializing a general register: init NaT info, too. Note that
1822 * the offset is a multiple of 8 which gives us the 3 bits needed for
1825 s[val+1] = (offsetof(struct switch_stack, ar_unat) - off) | UNW_NAT_MEMSTK;
1830 find_save_locs (struct unw_frame_info *info)
1832 int have_write_lock = 0;
1833 struct unw_script *scr;
1835 if ((info->ip & (local_cpu_data->unimpl_va_mask | 0xf)) || info->ip < TASK_SIZE) {
1836 /* don't let obviously bad addresses pollute the cache */
1837 /* FIXME: should really be level 0 but it occurs too often. KAO */
1838 UNW_DPRINT(1, "unwind.%s: rejecting bad ip=0x%lx\n", __FUNCTION__, info->ip);
1843 scr = script_lookup(info);
1845 scr = build_script(info);
1848 "unwind.%s: failed to locate/build unwind script for ip %lx\n",
1849 __FUNCTION__, info->ip);
1852 have_write_lock = 1;
1854 info->hint = scr->hint;
1855 info->prev_script = scr - unw.cache;
1857 run_script(scr, info);
1859 if (have_write_lock)
1860 write_unlock(&scr->lock);
1862 read_unlock(&scr->lock);
1867 unw_unwind (struct unw_frame_info *info)
1869 unsigned long prev_ip, prev_sp, prev_bsp;
1870 unsigned long ip, pr, num_regs;
1871 STAT(unsigned long start, flags;)
1874 STAT(local_irq_save(flags); ++unw.stat.api.unwinds; start = ia64_get_itc());
1878 prev_bsp = info->bsp;
1880 /* restore the ip */
1881 if (!info->rp_loc) {
1882 /* FIXME: should really be level 0 but it occurs too often. KAO */
1883 UNW_DPRINT(1, "unwind.%s: failed to locate return link (ip=0x%lx)!\n",
1884 __FUNCTION__, info->ip);
1885 STAT(unw.stat.api.unwind_time += ia64_get_itc() - start; local_irq_restore(flags));
1888 ip = info->ip = *info->rp_loc;
1889 if (ip < GATE_ADDR) {
1890 UNW_DPRINT(2, "unwind.%s: reached user-space (ip=0x%lx)\n", __FUNCTION__, ip);
1891 STAT(unw.stat.api.unwind_time += ia64_get_itc() - start; local_irq_restore(flags));
1895 /* restore the cfm: */
1896 if (!info->pfs_loc) {
1897 UNW_DPRINT(0, "unwind.%s: failed to locate ar.pfs!\n", __FUNCTION__);
1898 STAT(unw.stat.api.unwind_time += ia64_get_itc() - start; local_irq_restore(flags));
1901 info->cfm_loc = info->pfs_loc;
1903 /* restore the bsp: */
1906 if ((info->flags & UNW_FLAG_INTERRUPT_FRAME)) {
1907 info->pt = info->sp + 16;
1908 if ((pr & (1UL << pNonSys)) != 0)
1909 num_regs = *info->cfm_loc & 0x7f; /* size of frame */
1911 (unsigned long *) (info->pt + offsetof(struct pt_regs, ar_pfs));
1912 UNW_DPRINT(3, "unwind.%s: interrupt_frame pt 0x%lx\n", __FUNCTION__, info->pt);
1914 num_regs = (*info->cfm_loc >> 7) & 0x7f; /* size of locals */
1915 info->bsp = (unsigned long) ia64_rse_skip_regs((unsigned long *) info->bsp, -num_regs);
1916 if (info->bsp < info->regstk.limit || info->bsp > info->regstk.top) {
1917 UNW_DPRINT(0, "unwind.%s: bsp (0x%lx) out of range [0x%lx-0x%lx]\n",
1918 __FUNCTION__, info->bsp, info->regstk.limit, info->regstk.top);
1919 STAT(unw.stat.api.unwind_time += ia64_get_itc() - start; local_irq_restore(flags));
1923 /* restore the sp: */
1924 info->sp = info->psp;
1925 if (info->sp < info->memstk.top || info->sp > info->memstk.limit) {
1926 UNW_DPRINT(0, "unwind.%s: sp (0x%lx) out of range [0x%lx-0x%lx]\n",
1927 __FUNCTION__, info->sp, info->memstk.top, info->memstk.limit);
1928 STAT(unw.stat.api.unwind_time += ia64_get_itc() - start; local_irq_restore(flags));
1932 if (info->ip == prev_ip && info->sp == prev_sp && info->bsp == prev_bsp) {
1933 UNW_DPRINT(0, "unwind.%s: ip, sp, bsp unchanged; stopping here (ip=0x%lx)\n",
1935 STAT(unw.stat.api.unwind_time += ia64_get_itc() - start; local_irq_restore(flags));
1939 /* as we unwind, the saved ar.unat becomes the primary unat: */
1940 info->pri_unat_loc = info->unat_loc;
1942 /* finally, restore the predicates: */
1943 unw_get_pr(info, &info->pr);
1945 retval = find_save_locs(info);
1946 STAT(unw.stat.api.unwind_time += ia64_get_itc() - start; local_irq_restore(flags));
1949 EXPORT_SYMBOL(unw_unwind);
1952 unw_unwind_to_user (struct unw_frame_info *info)
1956 while (unw_unwind(info) >= 0) {
1957 if (unw_get_rp(info, &ip) < 0) {
1958 unw_get_ip(info, &ip);
1959 UNW_DPRINT(0, "unwind.%s: failed to read return pointer (ip=0x%lx)\n",
1963 if (ip < FIXADDR_USER_END)
1966 unw_get_ip(info, &ip);
1967 UNW_DPRINT(0, "unwind.%s: failed to unwind to user-level (ip=0x%lx)\n", __FUNCTION__, ip);
1970 EXPORT_SYMBOL(unw_unwind_to_user);
1973 init_frame_info (struct unw_frame_info *info, struct task_struct *t,
1974 struct switch_stack *sw, unsigned long stktop)
1976 unsigned long rbslimit, rbstop, stklimit;
1977 STAT(unsigned long start, flags;)
1979 STAT(local_irq_save(flags); ++unw.stat.api.inits; start = ia64_get_itc());
1982 * Subtle stuff here: we _could_ unwind through the switch_stack frame but we
1983 * don't want to do that because it would be slow as each preserved register would
1984 * have to be processed. Instead, what we do here is zero out the frame info and
1985 * start the unwind process at the function that created the switch_stack frame.
1986 * When a preserved value in switch_stack needs to be accessed, run_script() will
1987 * initialize the appropriate pointer on demand.
1989 memset(info, 0, sizeof(*info));
1991 rbslimit = (unsigned long) t + IA64_RBS_OFFSET;
1992 rbstop = sw->ar_bspstore;
1993 if (rbstop - (unsigned long) t >= IA64_STK_OFFSET)
1996 stklimit = (unsigned long) t + IA64_STK_OFFSET;
1997 if (stktop <= rbstop)
2000 info->regstk.limit = rbslimit;
2001 info->regstk.top = rbstop;
2002 info->memstk.limit = stklimit;
2003 info->memstk.top = stktop;
2006 info->sp = info->psp = stktop;
2008 UNW_DPRINT(3, "unwind.%s:\n"
2010 " rbs = [0x%lx-0x%lx)\n"
2011 " stk = [0x%lx-0x%lx)\n"
2015 __FUNCTION__, (unsigned long) t, rbslimit, rbstop, stktop, stklimit,
2016 info->pr, (unsigned long) info->sw, info->sp);
2017 STAT(unw.stat.api.init_time += ia64_get_itc() - start; local_irq_restore(flags));
2021 unw_init_from_interruption (struct unw_frame_info *info, struct task_struct *t,
2022 struct pt_regs *pt, struct switch_stack *sw)
2026 init_frame_info(info, t, sw, pt->r12);
2027 info->cfm_loc = &pt->cr_ifs;
2028 info->unat_loc = &pt->ar_unat;
2029 info->pfs_loc = &pt->ar_pfs;
2030 sof = *info->cfm_loc & 0x7f;
2031 info->bsp = (unsigned long) ia64_rse_skip_regs((unsigned long *) info->regstk.top, -sof);
2032 info->ip = pt->cr_iip + ia64_psr(pt)->ri;
2033 info->pt = (unsigned long) pt;
2034 UNW_DPRINT(3, "unwind.%s:\n"
2038 __FUNCTION__, info->bsp, sof, info->ip);
2039 find_save_locs(info);
2043 unw_init_frame_info (struct unw_frame_info *info, struct task_struct *t, struct switch_stack *sw)
2047 init_frame_info(info, t, sw, (unsigned long) (sw + 1) - 16);
2048 info->cfm_loc = &sw->ar_pfs;
2049 sol = (*info->cfm_loc >> 7) & 0x7f;
2050 info->bsp = (unsigned long) ia64_rse_skip_regs((unsigned long *) info->regstk.top, -sol);
2052 UNW_DPRINT(3, "unwind.%s:\n"
2056 __FUNCTION__, info->bsp, sol, info->ip);
2057 find_save_locs(info);
2061 unw_init_from_blocked_task (struct unw_frame_info *info, struct task_struct *t)
2063 struct switch_stack *sw = (struct switch_stack *) (t->thread.ksp + 16);
2065 UNW_DPRINT(1, "unwind.%s\n", __FUNCTION__);
2066 unw_init_frame_info(info, t, sw);
2068 EXPORT_SYMBOL(unw_init_from_blocked_task);
2071 init_unwind_table (struct unw_table *table, const char *name, unsigned long segment_base,
2072 unsigned long gp, const void *table_start, const void *table_end)
2074 const struct unw_table_entry *start = table_start, *end = table_end;
2077 table->segment_base = segment_base;
2079 table->start = segment_base + start[0].start_offset;
2080 table->end = segment_base + end[-1].end_offset;
2081 table->array = start;
2082 table->length = end - start;
2086 unw_add_unwind_table (const char *name, unsigned long segment_base, unsigned long gp,
2087 const void *table_start, const void *table_end)
2089 const struct unw_table_entry *start = table_start, *end = table_end;
2090 struct unw_table *table;
2091 unsigned long flags;
2093 if (end - start <= 0) {
2094 UNW_DPRINT(0, "unwind.%s: ignoring attempt to insert empty unwind table\n",
2099 table = kmalloc(sizeof(*table), GFP_USER);
2103 init_unwind_table(table, name, segment_base, gp, table_start, table_end);
2105 spin_lock_irqsave(&unw.lock, flags);
2107 /* keep kernel unwind table at the front (it's searched most commonly): */
2108 table->next = unw.tables->next;
2109 unw.tables->next = table;
2111 spin_unlock_irqrestore(&unw.lock, flags);
2117 unw_remove_unwind_table (void *handle)
2119 struct unw_table *table, *prev;
2120 struct unw_script *tmp;
2121 unsigned long flags;
2125 UNW_DPRINT(0, "unwind.%s: ignoring attempt to remove non-existent unwind table\n",
2131 if (table == &unw.kernel_table) {
2132 UNW_DPRINT(0, "unwind.%s: sorry, freeing the kernel's unwind table is a "
2133 "no-can-do!\n", __FUNCTION__);
2137 spin_lock_irqsave(&unw.lock, flags);
2139 /* first, delete the table: */
2141 for (prev = (struct unw_table *) &unw.tables; prev; prev = prev->next)
2142 if (prev->next == table)
2145 UNW_DPRINT(0, "unwind.%s: failed to find unwind table %p\n",
2146 __FUNCTION__, (void *) table);
2147 spin_unlock_irqrestore(&unw.lock, flags);
2150 prev->next = table->next;
2152 spin_unlock_irqrestore(&unw.lock, flags);
2154 /* next, remove hash table entries for this table */
2156 for (index = 0; index <= UNW_HASH_SIZE; ++index) {
2157 tmp = unw.cache + unw.hash[index];
2158 if (unw.hash[index] >= UNW_CACHE_SIZE
2159 || tmp->ip < table->start || tmp->ip >= table->end)
2162 write_lock(&tmp->lock);
2164 if (tmp->ip >= table->start && tmp->ip < table->end) {
2165 unw.hash[index] = tmp->coll_chain;
2169 write_unlock(&tmp->lock);
2176 create_gate_table (void)
2178 const struct unw_table_entry *entry, *start, *end;
2179 unsigned long *lp, segbase = GATE_ADDR;
2180 size_t info_size, size;
2182 Elf64_Phdr *punw = NULL, *phdr = (Elf64_Phdr *) (GATE_ADDR + GATE_EHDR->e_phoff);
2185 for (i = 0; i < GATE_EHDR->e_phnum; ++i, ++phdr)
2186 if (phdr->p_type == PT_IA_64_UNWIND) {
2192 printk("%s: failed to find gate DSO's unwind table!\n", __FUNCTION__);
2196 start = (const struct unw_table_entry *) punw->p_vaddr;
2197 end = (struct unw_table_entry *) ((char *) start + punw->p_memsz);
2200 unw_add_unwind_table("linux-gate.so", segbase, 0, start, end);
2202 for (entry = start; entry < end; ++entry)
2203 size += 3*8 + 8 + 8*UNW_LENGTH(*(u64 *) (segbase + entry->info_offset));
2204 size += 8; /* reserve space for "end of table" marker */
2206 unw.gate_table = kmalloc(size, GFP_KERNEL);
2207 if (!unw.gate_table) {
2208 unw.gate_table_size = 0;
2209 printk(KERN_ERR "%s: unable to create unwind data for gate page!\n", __FUNCTION__);
2212 unw.gate_table_size = size;
2214 lp = unw.gate_table;
2215 info = (char *) unw.gate_table + size;
2217 for (entry = start; entry < end; ++entry, lp += 3) {
2218 info_size = 8 + 8*UNW_LENGTH(*(u64 *) (segbase + entry->info_offset));
2220 memcpy(info, (char *) segbase + entry->info_offset, info_size);
2222 lp[0] = segbase + entry->start_offset; /* start */
2223 lp[1] = segbase + entry->end_offset; /* end */
2224 lp[2] = info - (char *) unw.gate_table; /* info */
2226 *lp = 0; /* end-of-table marker */
2230 __initcall(create_gate_table);
2236 extern void unw_hash_index_t_is_too_narrow (void);
2239 if (8*sizeof(unw_hash_index_t) < UNW_LOG_HASH_SIZE)
2240 unw_hash_index_t_is_too_narrow();
2242 unw.sw_off[unw.preg_index[UNW_REG_PRI_UNAT_GR]] = SW(AR_UNAT);
2243 unw.sw_off[unw.preg_index[UNW_REG_BSPSTORE]] = SW(AR_BSPSTORE);
2244 unw.sw_off[unw.preg_index[UNW_REG_PFS]] = SW(AR_UNAT);
2245 unw.sw_off[unw.preg_index[UNW_REG_RP]] = SW(B0);
2246 unw.sw_off[unw.preg_index[UNW_REG_UNAT]] = SW(AR_UNAT);
2247 unw.sw_off[unw.preg_index[UNW_REG_PR]] = SW(PR);
2248 unw.sw_off[unw.preg_index[UNW_REG_LC]] = SW(AR_LC);
2249 unw.sw_off[unw.preg_index[UNW_REG_FPSR]] = SW(AR_FPSR);
2250 for (i = UNW_REG_R4, off = SW(R4); i <= UNW_REG_R7; ++i, off += 8)
2251 unw.sw_off[unw.preg_index[i]] = off;
2252 for (i = UNW_REG_B1, off = SW(B1); i <= UNW_REG_B5; ++i, off += 8)
2253 unw.sw_off[unw.preg_index[i]] = off;
2254 for (i = UNW_REG_F2, off = SW(F2); i <= UNW_REG_F5; ++i, off += 16)
2255 unw.sw_off[unw.preg_index[i]] = off;
2256 for (i = UNW_REG_F16, off = SW(F16); i <= UNW_REG_F31; ++i, off += 16)
2257 unw.sw_off[unw.preg_index[i]] = off;
2259 for (i = 0; i < UNW_CACHE_SIZE; ++i) {
2261 unw.cache[i].lru_chain = (i - 1);
2262 unw.cache[i].coll_chain = -1;
2263 unw.cache[i].lock = RW_LOCK_UNLOCKED;
2265 unw.lru_head = UNW_CACHE_SIZE - 1;
2268 init_unwind_table(&unw.kernel_table, "kernel", KERNEL_START, (unsigned long) __gp,
2269 __start_unwind, __end_unwind);
2273 * DEPRECATED DEPRECATED DEPRECATED DEPRECATED DEPRECATED DEPRECATED DEPRECATED
2275 * This system call has been deprecated. The new and improved way to get
2276 * at the kernel's unwind info is via the gate DSO. The address of the
2277 * ELF header for this DSO is passed to user-level via AT_SYSINFO_EHDR.
2279 * DEPRECATED DEPRECATED DEPRECATED DEPRECATED DEPRECATED DEPRECATED DEPRECATED
2281 * This system call copies the unwind data into the buffer pointed to by BUF and returns
2282 * the size of the unwind data. If BUF_SIZE is smaller than the size of the unwind data
2283 * or if BUF is NULL, nothing is copied, but the system call still returns the size of the
2286 * The first portion of the unwind data contains an unwind table and rest contains the
2287 * associated unwind info (in no particular order). The unwind table consists of a table
2288 * of entries of the form:
2290 * u64 start; (64-bit address of start of function)
2291 * u64 end; (64-bit address of start of function)
2292 * u64 info; (BUF-relative offset to unwind info)
2294 * The end of the unwind table is indicated by an entry with a START address of zero.
2296 * Please see the IA-64 Software Conventions and Runtime Architecture manual for details
2297 * on the format of the unwind info.
2300 * EFAULT BUF points outside your accessible address space.
2303 sys_getunwind (void *buf, size_t buf_size)
2305 if (buf && buf_size >= unw.gate_table_size)
2306 if (copy_to_user(buf, unw.gate_table, unw.gate_table_size) != 0)
2308 return unw.gate_table_size;