2 * Copyright (C) 1999-2003 Hewlett-Packard Co
3 * David Mosberger-Tang <davidm@hpl.hp.com>
4 * Copyright (C) 2003 Fenghua Yu <fenghua.yu@intel.com>
5 * - Change pt_regs_off() to make it less dependant on pt_regs structure.
8 * This file implements call frame unwind support for the Linux
9 * kernel. Parsing and processing the unwind information is
10 * time-consuming, so this implementation translates the unwind
11 * descriptors into unwind scripts. These scripts are very simple
12 * (basically a sequence of assignments) and efficient to execute.
13 * They are cached for later re-use. Each script is specific for a
14 * given instruction pointer address and the set of predicate values
15 * that the script depends on (most unwind descriptors are
16 * unconditional and scripts often do not depend on predicates at
17 * all). This code is based on the unwind conventions described in
18 * the "IA-64 Software Conventions and Runtime Architecture" manual.
21 * o updates to the global unwind data (in structure "unw") are serialized
22 * by the unw.lock spinlock
23 * o each unwind script has its own read-write lock; a thread must acquire
24 * a read lock before executing a script and must acquire a write lock
25 * before modifying a script
26 * o if both the unw.lock spinlock and a script's read-write lock must be
27 * acquired, then the read-write lock must be acquired first.
29 #include <linux/module.h>
30 #include <linux/bootmem.h>
31 #include <linux/elf.h>
32 #include <linux/kernel.h>
33 #include <linux/sched.h>
34 #include <linux/slab.h>
36 #include <asm/unwind.h>
38 #include <asm/delay.h>
40 #include <asm/ptrace.h>
41 #include <asm/ptrace_offsets.h>
43 #include <asm/sections.h>
44 #include <asm/system.h>
45 #include <asm/uaccess.h>
50 #define MIN(a,b) ((a) < (b) ? (a) : (b))
53 #define UNW_LOG_CACHE_SIZE 7 /* each unw_script is ~256 bytes in size */
54 #define UNW_CACHE_SIZE (1 << UNW_LOG_CACHE_SIZE)
56 #define UNW_LOG_HASH_SIZE (UNW_LOG_CACHE_SIZE + 1)
57 #define UNW_HASH_SIZE (1 << UNW_LOG_HASH_SIZE)
59 #define UNW_STATS 0 /* WARNING: this disabled interrupts for long time-spans!! */
62 static unsigned int unw_debug_level = UNW_DEBUG;
63 # define UNW_DEBUG_ON(n) unw_debug_level >= n
64 /* Do not code a printk level, not all debug lines end in newline */
65 # define UNW_DPRINT(n, ...) if (UNW_DEBUG_ON(n)) printk(__VA_ARGS__)
67 #else /* !UNW_DEBUG */
68 # define UNW_DEBUG_ON(n) 0
69 # define UNW_DPRINT(n, ...)
70 #endif /* UNW_DEBUG */
78 #define alloc_reg_state() kmalloc(sizeof(struct unw_reg_state), GFP_ATOMIC)
79 #define free_reg_state(usr) kfree(usr)
80 #define alloc_labeled_state() kmalloc(sizeof(struct unw_labeled_state), GFP_ATOMIC)
81 #define free_labeled_state(usr) kfree(usr)
83 typedef unsigned long unw_word;
84 typedef unsigned char unw_hash_index_t;
87 spinlock_t lock; /* spinlock for unwind data */
89 /* list of unwind tables (one per load-module) */
90 struct unw_table *tables;
92 /* table of registers that prologues can save (and order in which they're saved): */
93 const unsigned char save_order[8];
95 /* maps a preserved register index (preg_index) to corresponding switch_stack offset: */
96 unsigned short sw_off[sizeof(struct unw_frame_info) / 8];
98 unsigned short lru_head; /* index of lead-recently used script */
99 unsigned short lru_tail; /* index of most-recently used script */
101 /* index into unw_frame_info for preserved register i */
102 unsigned short preg_index[UNW_NUM_REGS];
104 short pt_regs_offsets[32];
106 /* unwind table for the kernel: */
107 struct unw_table kernel_table;
109 /* unwind table describing the gate page (kernel code that is mapped into user space): */
110 size_t gate_table_size;
111 unsigned long *gate_table;
113 /* hash table that maps instruction pointer to script index: */
114 unsigned short hash[UNW_HASH_SIZE];
117 struct unw_script cache[UNW_CACHE_SIZE];
120 const char *preg_name[UNW_NUM_REGS];
128 int collision_chain_traversals;
131 unsigned long build_time;
132 unsigned long run_time;
133 unsigned long parse_time;
140 unsigned long init_time;
141 unsigned long unwind_time;
148 .tables = &unw.kernel_table,
149 .lock = SPIN_LOCK_UNLOCKED,
151 UNW_REG_RP, UNW_REG_PFS, UNW_REG_PSP, UNW_REG_PR,
152 UNW_REG_UNAT, UNW_REG_LC, UNW_REG_FPSR, UNW_REG_PRI_UNAT_GR
155 offsetof(struct unw_frame_info, pri_unat_loc)/8, /* PRI_UNAT_GR */
156 offsetof(struct unw_frame_info, pri_unat_loc)/8, /* PRI_UNAT_MEM */
157 offsetof(struct unw_frame_info, bsp_loc)/8,
158 offsetof(struct unw_frame_info, bspstore_loc)/8,
159 offsetof(struct unw_frame_info, pfs_loc)/8,
160 offsetof(struct unw_frame_info, rnat_loc)/8,
161 offsetof(struct unw_frame_info, psp)/8,
162 offsetof(struct unw_frame_info, rp_loc)/8,
163 offsetof(struct unw_frame_info, r4)/8,
164 offsetof(struct unw_frame_info, r5)/8,
165 offsetof(struct unw_frame_info, r6)/8,
166 offsetof(struct unw_frame_info, r7)/8,
167 offsetof(struct unw_frame_info, unat_loc)/8,
168 offsetof(struct unw_frame_info, pr_loc)/8,
169 offsetof(struct unw_frame_info, lc_loc)/8,
170 offsetof(struct unw_frame_info, fpsr_loc)/8,
171 offsetof(struct unw_frame_info, b1_loc)/8,
172 offsetof(struct unw_frame_info, b2_loc)/8,
173 offsetof(struct unw_frame_info, b3_loc)/8,
174 offsetof(struct unw_frame_info, b4_loc)/8,
175 offsetof(struct unw_frame_info, b5_loc)/8,
176 offsetof(struct unw_frame_info, f2_loc)/8,
177 offsetof(struct unw_frame_info, f3_loc)/8,
178 offsetof(struct unw_frame_info, f4_loc)/8,
179 offsetof(struct unw_frame_info, f5_loc)/8,
180 offsetof(struct unw_frame_info, fr_loc[16 - 16])/8,
181 offsetof(struct unw_frame_info, fr_loc[17 - 16])/8,
182 offsetof(struct unw_frame_info, fr_loc[18 - 16])/8,
183 offsetof(struct unw_frame_info, fr_loc[19 - 16])/8,
184 offsetof(struct unw_frame_info, fr_loc[20 - 16])/8,
185 offsetof(struct unw_frame_info, fr_loc[21 - 16])/8,
186 offsetof(struct unw_frame_info, fr_loc[22 - 16])/8,
187 offsetof(struct unw_frame_info, fr_loc[23 - 16])/8,
188 offsetof(struct unw_frame_info, fr_loc[24 - 16])/8,
189 offsetof(struct unw_frame_info, fr_loc[25 - 16])/8,
190 offsetof(struct unw_frame_info, fr_loc[26 - 16])/8,
191 offsetof(struct unw_frame_info, fr_loc[27 - 16])/8,
192 offsetof(struct unw_frame_info, fr_loc[28 - 16])/8,
193 offsetof(struct unw_frame_info, fr_loc[29 - 16])/8,
194 offsetof(struct unw_frame_info, fr_loc[30 - 16])/8,
195 offsetof(struct unw_frame_info, fr_loc[31 - 16])/8,
199 offsetof(struct pt_regs, r1),
200 offsetof(struct pt_regs, r2),
201 offsetof(struct pt_regs, r3),
202 [4] = -1, [5] = -1, [6] = -1, [7] = -1,
203 offsetof(struct pt_regs, r8),
204 offsetof(struct pt_regs, r9),
205 offsetof(struct pt_regs, r10),
206 offsetof(struct pt_regs, r11),
207 offsetof(struct pt_regs, r12),
208 offsetof(struct pt_regs, r13),
209 offsetof(struct pt_regs, r14),
210 offsetof(struct pt_regs, r15),
211 offsetof(struct pt_regs, r16),
212 offsetof(struct pt_regs, r17),
213 offsetof(struct pt_regs, r18),
214 offsetof(struct pt_regs, r19),
215 offsetof(struct pt_regs, r20),
216 offsetof(struct pt_regs, r21),
217 offsetof(struct pt_regs, r22),
218 offsetof(struct pt_regs, r23),
219 offsetof(struct pt_regs, r24),
220 offsetof(struct pt_regs, r25),
221 offsetof(struct pt_regs, r26),
222 offsetof(struct pt_regs, r27),
223 offsetof(struct pt_regs, r28),
224 offsetof(struct pt_regs, r29),
225 offsetof(struct pt_regs, r30),
226 offsetof(struct pt_regs, r31),
228 .hash = { [0 ... UNW_HASH_SIZE - 1] = -1 },
231 "pri_unat_gr", "pri_unat_mem", "bsp", "bspstore", "ar.pfs", "ar.rnat", "psp", "rp",
232 "r4", "r5", "r6", "r7",
233 "ar.unat", "pr", "ar.lc", "ar.fpsr",
234 "b1", "b2", "b3", "b4", "b5",
235 "f2", "f3", "f4", "f5",
236 "f16", "f17", "f18", "f19", "f20", "f21", "f22", "f23",
237 "f24", "f25", "f26", "f27", "f28", "f29", "f30", "f31"
242 /* Unwind accessors. */
245 * Returns offset of rREG in struct pt_regs.
247 static inline unsigned long
248 pt_regs_off (unsigned long reg)
252 if (reg < ARRAY_SIZE(unw.pt_regs_offsets))
253 off = unw.pt_regs_offsets[reg];
256 UNW_DPRINT(0, "unwind.%s: bad scratch reg r%lu\n", __FUNCTION__, reg);
259 return (unsigned long) off;
262 static inline struct pt_regs *
263 get_scratch_regs (struct unw_frame_info *info)
266 /* This should not happen with valid unwind info. */
267 UNW_DPRINT(0, "unwind.%s: bad unwind info: resetting info->pt\n", __FUNCTION__);
268 if (info->flags & UNW_FLAG_INTERRUPT_FRAME)
269 info->pt = (unsigned long) ((struct pt_regs *) info->psp - 1);
271 info->pt = info->sp - 16;
273 UNW_DPRINT(3, "unwind.%s: sp 0x%lx pt 0x%lx\n", __FUNCTION__, info->sp, info->pt);
274 return (struct pt_regs *) info->pt;
278 unw_access_gr (struct unw_frame_info *info, int regnum, unsigned long *val, char *nat, int write)
280 unsigned long *addr, *nat_addr, nat_mask = 0, dummy_nat;
281 struct unw_ireg *ireg;
284 if ((unsigned) regnum - 1 >= 127) {
285 if (regnum == 0 && !write) {
286 *val = 0; /* read r0 always returns 0 */
290 UNW_DPRINT(0, "unwind.%s: trying to access non-existent r%u\n",
291 __FUNCTION__, regnum);
296 if (regnum >= 4 && regnum <= 7) {
297 /* access a preserved register */
298 ireg = &info->r4 + (regnum - 4);
301 nat_addr = addr + ireg->nat.off;
302 switch (ireg->nat.type) {
304 /* simulate getf.sig/setf.sig */
307 /* write NaTVal and be done with it */
314 if (addr[0] == 0 && addr[1] == 0x1ffe) {
315 /* return NaT and be done with it */
324 nat_addr = &dummy_nat;
328 nat_mask = (1UL << ((long) addr & 0x1f8)/8);
332 nat_addr = ia64_rse_rnat_addr(addr);
333 if ((unsigned long) addr < info->regstk.limit
334 || (unsigned long) addr >= info->regstk.top)
336 UNW_DPRINT(0, "unwind.%s: %p outside of regstk "
338 __FUNCTION__, (void *) addr,
343 if ((unsigned long) nat_addr >= info->regstk.top)
344 nat_addr = &info->sw->ar_rnat;
345 nat_mask = (1UL << ia64_rse_slot_num(addr));
349 addr = &info->sw->r4 + (regnum - 4);
350 nat_addr = &info->sw->ar_unat;
351 nat_mask = (1UL << ((long) addr & 0x1f8)/8);
354 /* access a scratch register */
355 pt = get_scratch_regs(info);
356 addr = (unsigned long *) ((unsigned long)pt + pt_regs_off(regnum));
357 if (info->pri_unat_loc)
358 nat_addr = info->pri_unat_loc;
360 nat_addr = &info->sw->ar_unat;
361 nat_mask = (1UL << ((long) addr & 0x1f8)/8);
364 /* access a stacked register */
365 addr = ia64_rse_skip_regs((unsigned long *) info->bsp, regnum - 32);
366 nat_addr = ia64_rse_rnat_addr(addr);
367 if ((unsigned long) addr < info->regstk.limit
368 || (unsigned long) addr >= info->regstk.top)
370 UNW_DPRINT(0, "unwind.%s: ignoring attempt to access register outside "
371 "of rbs\n", __FUNCTION__);
374 if ((unsigned long) nat_addr >= info->regstk.top)
375 nat_addr = &info->sw->ar_rnat;
376 nat_mask = (1UL << ia64_rse_slot_num(addr));
382 *nat_addr |= nat_mask;
384 *nat_addr &= ~nat_mask;
386 if ((*nat_addr & nat_mask) == 0) {
390 *val = 0; /* if register is a NaT, *addr may contain kernel data! */
396 EXPORT_SYMBOL(unw_access_gr);
399 unw_access_br (struct unw_frame_info *info, int regnum, unsigned long *val, int write)
406 case 0: pt = get_scratch_regs(info); addr = &pt->b0; break;
407 case 6: pt = get_scratch_regs(info); addr = &pt->b6; break;
408 case 7: pt = get_scratch_regs(info); addr = &pt->b7; break;
411 case 1: case 2: case 3: case 4: case 5:
412 addr = *(&info->b1_loc + (regnum - 1));
414 addr = &info->sw->b1 + (regnum - 1);
418 UNW_DPRINT(0, "unwind.%s: trying to access non-existent b%u\n",
419 __FUNCTION__, regnum);
428 EXPORT_SYMBOL(unw_access_br);
431 unw_access_fr (struct unw_frame_info *info, int regnum, struct ia64_fpreg *val, int write)
433 struct ia64_fpreg *addr = 0;
436 if ((unsigned) (regnum - 2) >= 126) {
437 UNW_DPRINT(0, "unwind.%s: trying to access non-existent f%u\n",
438 __FUNCTION__, regnum);
443 addr = *(&info->f2_loc + (regnum - 2));
445 addr = &info->sw->f2 + (regnum - 2);
446 } else if (regnum <= 15) {
448 pt = get_scratch_regs(info);
449 addr = &pt->f6 + (regnum - 6);
452 addr = &info->sw->f12 + (regnum - 12);
453 } else if (regnum <= 31) {
454 addr = info->fr_loc[regnum - 16];
456 addr = &info->sw->f16 + (regnum - 16);
458 struct task_struct *t = info->task;
464 addr = t->thread.fph + (regnum - 32);
473 EXPORT_SYMBOL(unw_access_fr);
476 unw_access_ar (struct unw_frame_info *info, int regnum, unsigned long *val, int write)
483 addr = info->bsp_loc;
485 addr = &info->sw->ar_bspstore;
488 case UNW_AR_BSPSTORE:
489 addr = info->bspstore_loc;
491 addr = &info->sw->ar_bspstore;
495 addr = info->pfs_loc;
497 addr = &info->sw->ar_pfs;
501 addr = info->rnat_loc;
503 addr = &info->sw->ar_rnat;
507 addr = info->unat_loc;
509 addr = &info->sw->ar_unat;
515 addr = &info->sw->ar_lc;
523 (*info->cfm_loc & ~(0x3fUL << 52)) | ((*val & 0x3f) << 52);
525 *val = (*info->cfm_loc >> 52) & 0x3f;
529 addr = info->fpsr_loc;
531 addr = &info->sw->ar_fpsr;
535 pt = get_scratch_regs(info);
540 pt = get_scratch_regs(info);
545 pt = get_scratch_regs(info);
550 pt = get_scratch_regs(info);
555 UNW_DPRINT(0, "unwind.%s: trying to access non-existent ar%u\n",
556 __FUNCTION__, regnum);
566 EXPORT_SYMBOL(unw_access_ar);
569 unw_access_pr (struct unw_frame_info *info, unsigned long *val, int write)
575 addr = &info->sw->pr;
583 EXPORT_SYMBOL(unw_access_pr);
586 /* Routines to manipulate the state stack. */
589 push (struct unw_state_record *sr)
591 struct unw_reg_state *rs;
593 rs = alloc_reg_state();
595 printk(KERN_ERR "unwind: cannot stack reg state!\n");
598 memcpy(rs, &sr->curr, sizeof(*rs));
603 pop (struct unw_state_record *sr)
605 struct unw_reg_state *rs = sr->curr.next;
608 printk(KERN_ERR "unwind: stack underflow!\n");
611 memcpy(&sr->curr, rs, sizeof(*rs));
615 /* Make a copy of the state stack. Non-recursive to avoid stack overflows. */
616 static struct unw_reg_state *
617 dup_state_stack (struct unw_reg_state *rs)
619 struct unw_reg_state *copy, *prev = NULL, *first = NULL;
622 copy = alloc_reg_state();
624 printk(KERN_ERR "unwind.dup_state_stack: out of memory\n");
627 memcpy(copy, rs, sizeof(*copy));
638 /* Free all stacked register states (but not RS itself). */
640 free_state_stack (struct unw_reg_state *rs)
642 struct unw_reg_state *p, *next;
644 for (p = rs->next; p != NULL; p = next) {
651 /* Unwind decoder routines */
653 static enum unw_register_index __attribute_const__
654 decode_abreg (unsigned char abreg, int memory)
657 case 0x04 ... 0x07: return UNW_REG_R4 + (abreg - 0x04);
658 case 0x22 ... 0x25: return UNW_REG_F2 + (abreg - 0x22);
659 case 0x30 ... 0x3f: return UNW_REG_F16 + (abreg - 0x30);
660 case 0x41 ... 0x45: return UNW_REG_B1 + (abreg - 0x41);
661 case 0x60: return UNW_REG_PR;
662 case 0x61: return UNW_REG_PSP;
663 case 0x62: return memory ? UNW_REG_PRI_UNAT_MEM : UNW_REG_PRI_UNAT_GR;
664 case 0x63: return UNW_REG_RP;
665 case 0x64: return UNW_REG_BSP;
666 case 0x65: return UNW_REG_BSPSTORE;
667 case 0x66: return UNW_REG_RNAT;
668 case 0x67: return UNW_REG_UNAT;
669 case 0x68: return UNW_REG_FPSR;
670 case 0x69: return UNW_REG_PFS;
671 case 0x6a: return UNW_REG_LC;
675 UNW_DPRINT(0, "unwind.%s: bad abreg=0x%x\n", __FUNCTION__, abreg);
680 set_reg (struct unw_reg_info *reg, enum unw_where where, int when, unsigned long val)
684 if (reg->when == UNW_WHEN_NEVER)
689 alloc_spill_area (unsigned long *offp, unsigned long regsize,
690 struct unw_reg_info *lo, struct unw_reg_info *hi)
692 struct unw_reg_info *reg;
694 for (reg = hi; reg >= lo; --reg) {
695 if (reg->where == UNW_WHERE_SPILL_HOME) {
696 reg->where = UNW_WHERE_PSPREL;
704 spill_next_when (struct unw_reg_info **regp, struct unw_reg_info *lim, unw_word t)
706 struct unw_reg_info *reg;
708 for (reg = *regp; reg <= lim; ++reg) {
709 if (reg->where == UNW_WHERE_SPILL_HOME) {
715 UNW_DPRINT(0, "unwind.%s: excess spill!\n", __FUNCTION__);
719 finish_prologue (struct unw_state_record *sr)
721 struct unw_reg_info *reg;
726 * First, resolve implicit register save locations (see Section "11.4.2.3 Rules
727 * for Using Unwind Descriptors", rule 3):
729 for (i = 0; i < (int) ARRAY_SIZE(unw.save_order); ++i) {
730 reg = sr->curr.reg + unw.save_order[i];
731 if (reg->where == UNW_WHERE_GR_SAVE) {
732 reg->where = UNW_WHERE_GR;
733 reg->val = sr->gr_save_loc++;
738 * Next, compute when the fp, general, and branch registers get
739 * saved. This must come before alloc_spill_area() because
740 * we need to know which registers are spilled to their home
744 unsigned char kind, mask = 0, *cp = sr->imask;
746 static const unsigned char limit[3] = {
747 UNW_REG_F31, UNW_REG_R7, UNW_REG_B5
749 struct unw_reg_info *(regs[3]);
751 regs[0] = sr->curr.reg + UNW_REG_F2;
752 regs[1] = sr->curr.reg + UNW_REG_R4;
753 regs[2] = sr->curr.reg + UNW_REG_B1;
755 for (t = 0; t < sr->region_len; ++t) {
758 kind = (mask >> 2*(3-(t & 3))) & 3;
760 spill_next_when(®s[kind - 1], sr->curr.reg + limit[kind - 1],
761 sr->region_start + t);
765 * Next, lay out the memory stack spill area:
767 if (sr->any_spills) {
768 off = sr->spill_offset;
769 alloc_spill_area(&off, 16, sr->curr.reg + UNW_REG_F2, sr->curr.reg + UNW_REG_F31);
770 alloc_spill_area(&off, 8, sr->curr.reg + UNW_REG_B1, sr->curr.reg + UNW_REG_B5);
771 alloc_spill_area(&off, 8, sr->curr.reg + UNW_REG_R4, sr->curr.reg + UNW_REG_R7);
776 * Region header descriptors.
780 desc_prologue (int body, unw_word rlen, unsigned char mask, unsigned char grsave,
781 struct unw_state_record *sr)
785 if (!(sr->in_body || sr->first_region))
787 sr->first_region = 0;
789 /* check if we're done: */
790 if (sr->when_target < sr->region_start + sr->region_len) {
795 region_start = sr->region_start + sr->region_len;
797 for (i = 0; i < sr->epilogue_count; ++i)
799 sr->epilogue_count = 0;
800 sr->epilogue_start = UNW_WHEN_NEVER;
802 sr->region_start = region_start;
803 sr->region_len = rlen;
809 for (i = 0; i < 4; ++i) {
811 set_reg(sr->curr.reg + unw.save_order[i], UNW_WHERE_GR,
812 sr->region_start + sr->region_len - 1, grsave++);
815 sr->gr_save_loc = grsave;
818 sr->spill_offset = 0x10; /* default to psp+16 */
823 * Prologue descriptors.
827 desc_abi (unsigned char abi, unsigned char context, struct unw_state_record *sr)
829 if (abi == 3 && context == 'i') {
830 sr->flags |= UNW_FLAG_INTERRUPT_FRAME;
831 UNW_DPRINT(3, "unwind.%s: interrupt frame\n", __FUNCTION__);
834 UNW_DPRINT(0, "unwind%s: ignoring unwabi(abi=0x%x,context=0x%x)\n",
835 __FUNCTION__, abi, context);
839 desc_br_gr (unsigned char brmask, unsigned char gr, struct unw_state_record *sr)
843 for (i = 0; i < 5; ++i) {
845 set_reg(sr->curr.reg + UNW_REG_B1 + i, UNW_WHERE_GR,
846 sr->region_start + sr->region_len - 1, gr++);
852 desc_br_mem (unsigned char brmask, struct unw_state_record *sr)
856 for (i = 0; i < 5; ++i) {
858 set_reg(sr->curr.reg + UNW_REG_B1 + i, UNW_WHERE_SPILL_HOME,
859 sr->region_start + sr->region_len - 1, 0);
867 desc_frgr_mem (unsigned char grmask, unw_word frmask, struct unw_state_record *sr)
871 for (i = 0; i < 4; ++i) {
872 if ((grmask & 1) != 0) {
873 set_reg(sr->curr.reg + UNW_REG_R4 + i, UNW_WHERE_SPILL_HOME,
874 sr->region_start + sr->region_len - 1, 0);
879 for (i = 0; i < 20; ++i) {
880 if ((frmask & 1) != 0) {
881 int base = (i < 4) ? UNW_REG_F2 : UNW_REG_F16 - 4;
882 set_reg(sr->curr.reg + base + i, UNW_WHERE_SPILL_HOME,
883 sr->region_start + sr->region_len - 1, 0);
891 desc_fr_mem (unsigned char frmask, struct unw_state_record *sr)
895 for (i = 0; i < 4; ++i) {
896 if ((frmask & 1) != 0) {
897 set_reg(sr->curr.reg + UNW_REG_F2 + i, UNW_WHERE_SPILL_HOME,
898 sr->region_start + sr->region_len - 1, 0);
906 desc_gr_gr (unsigned char grmask, unsigned char gr, struct unw_state_record *sr)
910 for (i = 0; i < 4; ++i) {
911 if ((grmask & 1) != 0)
912 set_reg(sr->curr.reg + UNW_REG_R4 + i, UNW_WHERE_GR,
913 sr->region_start + sr->region_len - 1, gr++);
919 desc_gr_mem (unsigned char grmask, struct unw_state_record *sr)
923 for (i = 0; i < 4; ++i) {
924 if ((grmask & 1) != 0) {
925 set_reg(sr->curr.reg + UNW_REG_R4 + i, UNW_WHERE_SPILL_HOME,
926 sr->region_start + sr->region_len - 1, 0);
934 desc_mem_stack_f (unw_word t, unw_word size, struct unw_state_record *sr)
936 set_reg(sr->curr.reg + UNW_REG_PSP, UNW_WHERE_NONE,
937 sr->region_start + MIN((int)t, sr->region_len - 1), 16*size);
941 desc_mem_stack_v (unw_word t, struct unw_state_record *sr)
943 sr->curr.reg[UNW_REG_PSP].when = sr->region_start + MIN((int)t, sr->region_len - 1);
947 desc_reg_gr (unsigned char reg, unsigned char dst, struct unw_state_record *sr)
949 set_reg(sr->curr.reg + reg, UNW_WHERE_GR, sr->region_start + sr->region_len - 1, dst);
953 desc_reg_psprel (unsigned char reg, unw_word pspoff, struct unw_state_record *sr)
955 set_reg(sr->curr.reg + reg, UNW_WHERE_PSPREL, sr->region_start + sr->region_len - 1,
960 desc_reg_sprel (unsigned char reg, unw_word spoff, struct unw_state_record *sr)
962 set_reg(sr->curr.reg + reg, UNW_WHERE_SPREL, sr->region_start + sr->region_len - 1,
967 desc_rp_br (unsigned char dst, struct unw_state_record *sr)
969 sr->return_link_reg = dst;
973 desc_reg_when (unsigned char regnum, unw_word t, struct unw_state_record *sr)
975 struct unw_reg_info *reg = sr->curr.reg + regnum;
977 if (reg->where == UNW_WHERE_NONE)
978 reg->where = UNW_WHERE_GR_SAVE;
979 reg->when = sr->region_start + MIN((int)t, sr->region_len - 1);
983 desc_spill_base (unw_word pspoff, struct unw_state_record *sr)
985 sr->spill_offset = 0x10 - 4*pspoff;
988 static inline unsigned char *
989 desc_spill_mask (unsigned char *imaskp, struct unw_state_record *sr)
992 return imaskp + (2*sr->region_len + 7)/8;
999 desc_epilogue (unw_word t, unw_word ecount, struct unw_state_record *sr)
1001 sr->epilogue_start = sr->region_start + sr->region_len - 1 - t;
1002 sr->epilogue_count = ecount + 1;
1006 desc_copy_state (unw_word label, struct unw_state_record *sr)
1008 struct unw_labeled_state *ls;
1010 for (ls = sr->labeled_states; ls; ls = ls->next) {
1011 if (ls->label == label) {
1012 free_state_stack(&sr->curr);
1013 memcpy(&sr->curr, &ls->saved_state, sizeof(sr->curr));
1014 sr->curr.next = dup_state_stack(ls->saved_state.next);
1018 printk(KERN_ERR "unwind: failed to find state labeled 0x%lx\n", label);
1022 desc_label_state (unw_word label, struct unw_state_record *sr)
1024 struct unw_labeled_state *ls;
1026 ls = alloc_labeled_state();
1028 printk(KERN_ERR "unwind.desc_label_state(): out of memory\n");
1032 memcpy(&ls->saved_state, &sr->curr, sizeof(ls->saved_state));
1033 ls->saved_state.next = dup_state_stack(sr->curr.next);
1035 /* insert into list of labeled states: */
1036 ls->next = sr->labeled_states;
1037 sr->labeled_states = ls;
1041 * General descriptors.
1045 desc_is_active (unsigned char qp, unw_word t, struct unw_state_record *sr)
1047 if (sr->when_target <= sr->region_start + MIN((int)t, sr->region_len - 1))
1050 if ((sr->pr_val & (1UL << qp)) == 0)
1052 sr->pr_mask |= (1UL << qp);
1058 desc_restore_p (unsigned char qp, unw_word t, unsigned char abreg, struct unw_state_record *sr)
1060 struct unw_reg_info *r;
1062 if (!desc_is_active(qp, t, sr))
1065 r = sr->curr.reg + decode_abreg(abreg, 0);
1066 r->where = UNW_WHERE_NONE;
1067 r->when = UNW_WHEN_NEVER;
1072 desc_spill_reg_p (unsigned char qp, unw_word t, unsigned char abreg, unsigned char x,
1073 unsigned char ytreg, struct unw_state_record *sr)
1075 enum unw_where where = UNW_WHERE_GR;
1076 struct unw_reg_info *r;
1078 if (!desc_is_active(qp, t, sr))
1082 where = UNW_WHERE_BR;
1083 else if (ytreg & 0x80)
1084 where = UNW_WHERE_FR;
1086 r = sr->curr.reg + decode_abreg(abreg, 0);
1088 r->when = sr->region_start + MIN((int)t, sr->region_len - 1);
1089 r->val = (ytreg & 0x7f);
1093 desc_spill_psprel_p (unsigned char qp, unw_word t, unsigned char abreg, unw_word pspoff,
1094 struct unw_state_record *sr)
1096 struct unw_reg_info *r;
1098 if (!desc_is_active(qp, t, sr))
1101 r = sr->curr.reg + decode_abreg(abreg, 1);
1102 r->where = UNW_WHERE_PSPREL;
1103 r->when = sr->region_start + MIN((int)t, sr->region_len - 1);
1104 r->val = 0x10 - 4*pspoff;
1108 desc_spill_sprel_p (unsigned char qp, unw_word t, unsigned char abreg, unw_word spoff,
1109 struct unw_state_record *sr)
1111 struct unw_reg_info *r;
1113 if (!desc_is_active(qp, t, sr))
1116 r = sr->curr.reg + decode_abreg(abreg, 1);
1117 r->where = UNW_WHERE_SPREL;
1118 r->when = sr->region_start + MIN((int)t, sr->region_len - 1);
1122 #define UNW_DEC_BAD_CODE(code) printk(KERN_ERR "unwind: unknown code 0x%02x\n", \
1128 #define UNW_DEC_PROLOGUE_GR(fmt,r,m,gr,arg) desc_prologue(0,r,m,gr,arg)
1129 #define UNW_DEC_PROLOGUE(fmt,b,r,arg) desc_prologue(b,r,0,32,arg)
1131 * prologue descriptors:
1133 #define UNW_DEC_ABI(fmt,a,c,arg) desc_abi(a,c,arg)
1134 #define UNW_DEC_BR_GR(fmt,b,g,arg) desc_br_gr(b,g,arg)
1135 #define UNW_DEC_BR_MEM(fmt,b,arg) desc_br_mem(b,arg)
1136 #define UNW_DEC_FRGR_MEM(fmt,g,f,arg) desc_frgr_mem(g,f,arg)
1137 #define UNW_DEC_FR_MEM(fmt,f,arg) desc_fr_mem(f,arg)
1138 #define UNW_DEC_GR_GR(fmt,m,g,arg) desc_gr_gr(m,g,arg)
1139 #define UNW_DEC_GR_MEM(fmt,m,arg) desc_gr_mem(m,arg)
1140 #define UNW_DEC_MEM_STACK_F(fmt,t,s,arg) desc_mem_stack_f(t,s,arg)
1141 #define UNW_DEC_MEM_STACK_V(fmt,t,arg) desc_mem_stack_v(t,arg)
1142 #define UNW_DEC_REG_GR(fmt,r,d,arg) desc_reg_gr(r,d,arg)
1143 #define UNW_DEC_REG_PSPREL(fmt,r,o,arg) desc_reg_psprel(r,o,arg)
1144 #define UNW_DEC_REG_SPREL(fmt,r,o,arg) desc_reg_sprel(r,o,arg)
1145 #define UNW_DEC_REG_WHEN(fmt,r,t,arg) desc_reg_when(r,t,arg)
1146 #define UNW_DEC_PRIUNAT_WHEN_GR(fmt,t,arg) desc_reg_when(UNW_REG_PRI_UNAT_GR,t,arg)
1147 #define UNW_DEC_PRIUNAT_WHEN_MEM(fmt,t,arg) desc_reg_when(UNW_REG_PRI_UNAT_MEM,t,arg)
1148 #define UNW_DEC_PRIUNAT_GR(fmt,r,arg) desc_reg_gr(UNW_REG_PRI_UNAT_GR,r,arg)
1149 #define UNW_DEC_PRIUNAT_PSPREL(fmt,o,arg) desc_reg_psprel(UNW_REG_PRI_UNAT_MEM,o,arg)
1150 #define UNW_DEC_PRIUNAT_SPREL(fmt,o,arg) desc_reg_sprel(UNW_REG_PRI_UNAT_MEM,o,arg)
1151 #define UNW_DEC_RP_BR(fmt,d,arg) desc_rp_br(d,arg)
1152 #define UNW_DEC_SPILL_BASE(fmt,o,arg) desc_spill_base(o,arg)
1153 #define UNW_DEC_SPILL_MASK(fmt,m,arg) (m = desc_spill_mask(m,arg))
1157 #define UNW_DEC_EPILOGUE(fmt,t,c,arg) desc_epilogue(t,c,arg)
1158 #define UNW_DEC_COPY_STATE(fmt,l,arg) desc_copy_state(l,arg)
1159 #define UNW_DEC_LABEL_STATE(fmt,l,arg) desc_label_state(l,arg)
1161 * general unwind descriptors:
1163 #define UNW_DEC_SPILL_REG_P(f,p,t,a,x,y,arg) desc_spill_reg_p(p,t,a,x,y,arg)
1164 #define UNW_DEC_SPILL_REG(f,t,a,x,y,arg) desc_spill_reg_p(0,t,a,x,y,arg)
1165 #define UNW_DEC_SPILL_PSPREL_P(f,p,t,a,o,arg) desc_spill_psprel_p(p,t,a,o,arg)
1166 #define UNW_DEC_SPILL_PSPREL(f,t,a,o,arg) desc_spill_psprel_p(0,t,a,o,arg)
1167 #define UNW_DEC_SPILL_SPREL_P(f,p,t,a,o,arg) desc_spill_sprel_p(p,t,a,o,arg)
1168 #define UNW_DEC_SPILL_SPREL(f,t,a,o,arg) desc_spill_sprel_p(0,t,a,o,arg)
1169 #define UNW_DEC_RESTORE_P(f,p,t,a,arg) desc_restore_p(p,t,a,arg)
1170 #define UNW_DEC_RESTORE(f,t,a,arg) desc_restore_p(0,t,a,arg)
1172 #include "unwind_decoder.c"
1175 /* Unwind scripts. */
1177 static inline unw_hash_index_t
1178 hash (unsigned long ip)
1180 # define hashmagic 0x9e3779b97f4a7c16 /* based on (sqrt(5)/2-1)*2^64 */
1182 return (ip >> 4)*hashmagic >> (64 - UNW_LOG_HASH_SIZE);
1187 cache_match (struct unw_script *script, unsigned long ip, unsigned long pr)
1189 read_lock(&script->lock);
1190 if (ip == script->ip && ((pr ^ script->pr_val) & script->pr_mask) == 0)
1191 /* keep the read lock... */
1193 read_unlock(&script->lock);
1197 static inline struct unw_script *
1198 script_lookup (struct unw_frame_info *info)
1200 struct unw_script *script = unw.cache + info->hint;
1201 unsigned short index;
1202 unsigned long ip, pr;
1204 if (UNW_DEBUG_ON(0))
1205 return 0; /* Always regenerate scripts in debug mode */
1207 STAT(++unw.stat.cache.lookups);
1212 if (cache_match(script, ip, pr)) {
1213 STAT(++unw.stat.cache.hinted_hits);
1217 index = unw.hash[hash(ip)];
1218 if (index >= UNW_CACHE_SIZE)
1221 script = unw.cache + index;
1223 if (cache_match(script, ip, pr)) {
1224 /* update hint; no locking required as single-word writes are atomic */
1225 STAT(++unw.stat.cache.normal_hits);
1226 unw.cache[info->prev_script].hint = script - unw.cache;
1229 if (script->coll_chain >= UNW_HASH_SIZE)
1231 script = unw.cache + script->coll_chain;
1232 STAT(++unw.stat.cache.collision_chain_traversals);
1237 * On returning, a write lock for the SCRIPT is still being held.
1239 static inline struct unw_script *
1240 script_new (unsigned long ip)
1242 struct unw_script *script, *prev, *tmp;
1243 unw_hash_index_t index;
1244 unsigned long flags;
1245 unsigned short head;
1247 STAT(++unw.stat.script.news);
1250 * Can't (easily) use cmpxchg() here because of ABA problem
1251 * that is intrinsic in cmpxchg()...
1253 spin_lock_irqsave(&unw.lock, flags);
1255 head = unw.lru_head;
1256 script = unw.cache + head;
1257 unw.lru_head = script->lru_chain;
1259 spin_unlock(&unw.lock);
1262 * We'd deadlock here if we interrupted a thread that is holding a read lock on
1263 * script->lock. Thus, if the write_trylock() fails, we simply bail out. The
1264 * alternative would be to disable interrupts whenever we hold a read-lock, but
1267 if (!write_trylock(&script->lock))
1270 spin_lock(&unw.lock);
1272 /* re-insert script at the tail of the LRU chain: */
1273 unw.cache[unw.lru_tail].lru_chain = head;
1274 unw.lru_tail = head;
1276 /* remove the old script from the hash table (if it's there): */
1278 index = hash(script->ip);
1279 tmp = unw.cache + unw.hash[index];
1282 if (tmp == script) {
1284 prev->coll_chain = tmp->coll_chain;
1286 unw.hash[index] = tmp->coll_chain;
1290 if (tmp->coll_chain >= UNW_CACHE_SIZE)
1291 /* old script wasn't in the hash-table */
1293 tmp = unw.cache + tmp->coll_chain;
1297 /* enter new script in the hash table */
1299 script->coll_chain = unw.hash[index];
1300 unw.hash[index] = script - unw.cache;
1302 script->ip = ip; /* set new IP while we're holding the locks */
1304 STAT(if (script->coll_chain < UNW_CACHE_SIZE) ++unw.stat.script.collisions);
1306 spin_unlock_irqrestore(&unw.lock, flags);
1315 script_finalize (struct unw_script *script, struct unw_state_record *sr)
1317 script->pr_mask = sr->pr_mask;
1318 script->pr_val = sr->pr_val;
1320 * We could down-grade our write-lock on script->lock here but
1321 * the rwlock API doesn't offer atomic lock downgrading, so
1322 * we'll just keep the write-lock and release it later when
1323 * we're done using the script.
1328 script_emit (struct unw_script *script, struct unw_insn insn)
1330 if (script->count >= UNW_MAX_SCRIPT_LEN) {
1331 UNW_DPRINT(0, "unwind.%s: script exceeds maximum size of %u instructions!\n",
1332 __FUNCTION__, UNW_MAX_SCRIPT_LEN);
1335 script->insn[script->count++] = insn;
1339 emit_nat_info (struct unw_state_record *sr, int i, struct unw_script *script)
1341 struct unw_reg_info *r = sr->curr.reg + i;
1342 enum unw_insn_opcode opc;
1343 struct unw_insn insn;
1344 unsigned long val = 0;
1349 /* register got spilled to a stacked register */
1350 opc = UNW_INSN_SETNAT_TYPE;
1351 val = UNW_NAT_REGSTK;
1353 /* register got spilled to a scratch register */
1354 opc = UNW_INSN_SETNAT_MEMSTK;
1358 opc = UNW_INSN_SETNAT_TYPE;
1363 opc = UNW_INSN_SETNAT_TYPE;
1367 case UNW_WHERE_PSPREL:
1368 case UNW_WHERE_SPREL:
1369 opc = UNW_INSN_SETNAT_MEMSTK;
1373 UNW_DPRINT(0, "unwind.%s: don't know how to emit nat info for where = %u\n",
1374 __FUNCTION__, r->where);
1378 insn.dst = unw.preg_index[i];
1380 script_emit(script, insn);
1384 compile_reg (struct unw_state_record *sr, int i, struct unw_script *script)
1386 struct unw_reg_info *r = sr->curr.reg + i;
1387 enum unw_insn_opcode opc;
1388 unsigned long val, rval;
1389 struct unw_insn insn;
1392 if (r->where == UNW_WHERE_NONE || r->when >= sr->when_target)
1395 opc = UNW_INSN_MOVE;
1396 val = rval = r->val;
1397 need_nat_info = (i >= UNW_REG_R4 && i <= UNW_REG_R7);
1402 opc = UNW_INSN_MOVE_STACKED;
1404 } else if (rval >= 4 && rval <= 7) {
1405 if (need_nat_info) {
1406 opc = UNW_INSN_MOVE2;
1409 val = unw.preg_index[UNW_REG_R4 + (rval - 4)];
1411 /* register got spilled to a scratch register */
1412 opc = UNW_INSN_MOVE_SCRATCH;
1413 val = pt_regs_off(rval);
1419 val = unw.preg_index[UNW_REG_F2 + (rval - 2)];
1420 else if (rval >= 16 && rval <= 31)
1421 val = unw.preg_index[UNW_REG_F16 + (rval - 16)];
1423 opc = UNW_INSN_MOVE_SCRATCH;
1425 val = offsetof(struct pt_regs, f6) + 16*(rval - 6);
1427 UNW_DPRINT(0, "unwind.%s: kernel may not touch f%lu\n",
1428 __FUNCTION__, rval);
1433 if (rval >= 1 && rval <= 5)
1434 val = unw.preg_index[UNW_REG_B1 + (rval - 1)];
1436 opc = UNW_INSN_MOVE_SCRATCH;
1438 val = offsetof(struct pt_regs, b0);
1440 val = offsetof(struct pt_regs, b6);
1442 val = offsetof(struct pt_regs, b7);
1446 case UNW_WHERE_SPREL:
1447 opc = UNW_INSN_ADD_SP;
1450 case UNW_WHERE_PSPREL:
1451 opc = UNW_INSN_ADD_PSP;
1455 UNW_DPRINT(0, "unwind%s: register %u has unexpected `where' value of %u\n",
1456 __FUNCTION__, i, r->where);
1460 insn.dst = unw.preg_index[i];
1462 script_emit(script, insn);
1464 emit_nat_info(sr, i, script);
1466 if (i == UNW_REG_PSP) {
1468 * info->psp must contain the _value_ of the previous
1469 * sp, not it's save location. We get this by
1470 * dereferencing the value we just stored in
1473 insn.opc = UNW_INSN_LOAD;
1474 insn.dst = insn.val = unw.preg_index[UNW_REG_PSP];
1475 script_emit(script, insn);
1479 static inline const struct unw_table_entry *
1480 lookup (struct unw_table *table, unsigned long rel_ip)
1482 const struct unw_table_entry *e = 0;
1483 unsigned long lo, hi, mid;
1485 /* do a binary search for right entry: */
1486 for (lo = 0, hi = table->length; lo < hi; ) {
1487 mid = (lo + hi) / 2;
1488 e = &table->array[mid];
1489 if (rel_ip < e->start_offset)
1491 else if (rel_ip >= e->end_offset)
1496 if (rel_ip < e->start_offset || rel_ip >= e->end_offset)
1502 * Build an unwind script that unwinds from state OLD_STATE to the
1503 * entrypoint of the function that called OLD_STATE.
1505 static inline struct unw_script *
1506 build_script (struct unw_frame_info *info)
1508 const struct unw_table_entry *e = 0;
1509 struct unw_script *script = 0;
1510 struct unw_labeled_state *ls, *next;
1511 unsigned long ip = info->ip;
1512 struct unw_state_record sr;
1513 struct unw_table *table;
1514 struct unw_reg_info *r;
1515 struct unw_insn insn;
1519 STAT(unsigned long start, parse_start;)
1521 STAT(++unw.stat.script.builds; start = ia64_get_itc());
1523 /* build state record */
1524 memset(&sr, 0, sizeof(sr));
1525 for (r = sr.curr.reg; r < sr.curr.reg + UNW_NUM_REGS; ++r)
1526 r->when = UNW_WHEN_NEVER;
1527 sr.pr_val = info->pr;
1529 UNW_DPRINT(3, "unwind.%s: ip 0x%lx\n", __FUNCTION__, ip);
1530 script = script_new(ip);
1532 UNW_DPRINT(0, "unwind.%s: failed to create unwind script\n", __FUNCTION__);
1533 STAT(unw.stat.script.build_time += ia64_get_itc() - start);
1536 unw.cache[info->prev_script].hint = script - unw.cache;
1538 /* search the kernels and the modules' unwind tables for IP: */
1540 STAT(parse_start = ia64_get_itc());
1542 for (table = unw.tables; table; table = table->next) {
1543 if (ip >= table->start && ip < table->end) {
1544 e = lookup(table, ip - table->segment_base);
1549 /* no info, return default unwinder (leaf proc, no mem stack, no saved regs) */
1550 UNW_DPRINT(1, "unwind.%s: no unwind info for ip=0x%lx (prev ip=0x%lx)\n",
1551 __FUNCTION__, ip, unw.cache[info->prev_script].ip);
1552 sr.curr.reg[UNW_REG_RP].where = UNW_WHERE_BR;
1553 sr.curr.reg[UNW_REG_RP].when = -1;
1554 sr.curr.reg[UNW_REG_RP].val = 0;
1555 compile_reg(&sr, UNW_REG_RP, script);
1556 script_finalize(script, &sr);
1557 STAT(unw.stat.script.parse_time += ia64_get_itc() - parse_start);
1558 STAT(unw.stat.script.build_time += ia64_get_itc() - start);
1562 sr.when_target = (3*((ip & ~0xfUL) - (table->segment_base + e->start_offset))/16
1564 hdr = *(u64 *) (table->segment_base + e->info_offset);
1565 dp = (u8 *) (table->segment_base + e->info_offset + 8);
1566 desc_end = dp + 8*UNW_LENGTH(hdr);
1568 while (!sr.done && dp < desc_end)
1569 dp = unw_decode(dp, sr.in_body, &sr);
1571 if (sr.when_target > sr.epilogue_start) {
1573 * sp has been restored and all values on the memory stack below
1574 * psp also have been restored.
1576 sr.curr.reg[UNW_REG_PSP].val = 0;
1577 sr.curr.reg[UNW_REG_PSP].where = UNW_WHERE_NONE;
1578 sr.curr.reg[UNW_REG_PSP].when = UNW_WHEN_NEVER;
1579 for (r = sr.curr.reg; r < sr.curr.reg + UNW_NUM_REGS; ++r)
1580 if ((r->where == UNW_WHERE_PSPREL && r->val <= 0x10)
1581 || r->where == UNW_WHERE_SPREL)
1584 r->where = UNW_WHERE_NONE;
1585 r->when = UNW_WHEN_NEVER;
1589 script->flags = sr.flags;
1592 * If RP did't get saved, generate entry for the return link
1595 if (sr.curr.reg[UNW_REG_RP].when >= sr.when_target) {
1596 sr.curr.reg[UNW_REG_RP].where = UNW_WHERE_BR;
1597 sr.curr.reg[UNW_REG_RP].when = -1;
1598 sr.curr.reg[UNW_REG_RP].val = sr.return_link_reg;
1599 UNW_DPRINT(1, "unwind.%s: using default for rp at ip=0x%lx where=%d val=0x%lx\n",
1600 __FUNCTION__, ip, sr.curr.reg[UNW_REG_RP].where,
1601 sr.curr.reg[UNW_REG_RP].val);
1605 UNW_DPRINT(1, "unwind.%s: state record for func 0x%lx, t=%u:\n",
1606 __FUNCTION__, table->segment_base + e->start_offset, sr.when_target);
1607 for (r = sr.curr.reg; r < sr.curr.reg + UNW_NUM_REGS; ++r) {
1608 if (r->where != UNW_WHERE_NONE || r->when != UNW_WHEN_NEVER) {
1609 UNW_DPRINT(1, " %s <- ", unw.preg_name[r - sr.curr.reg]);
1611 case UNW_WHERE_GR: UNW_DPRINT(1, "r%lu", r->val); break;
1612 case UNW_WHERE_FR: UNW_DPRINT(1, "f%lu", r->val); break;
1613 case UNW_WHERE_BR: UNW_DPRINT(1, "b%lu", r->val); break;
1614 case UNW_WHERE_SPREL: UNW_DPRINT(1, "[sp+0x%lx]", r->val); break;
1615 case UNW_WHERE_PSPREL: UNW_DPRINT(1, "[psp+0x%lx]", r->val); break;
1616 case UNW_WHERE_NONE:
1617 UNW_DPRINT(1, "%s+0x%lx", unw.preg_name[r - sr.curr.reg], r->val);
1621 UNW_DPRINT(1, "BADWHERE(%d)", r->where);
1624 UNW_DPRINT(1, "\t\t%d\n", r->when);
1629 STAT(unw.stat.script.parse_time += ia64_get_itc() - parse_start);
1631 /* translate state record into unwinder instructions: */
1634 * First, set psp if we're dealing with a fixed-size frame;
1635 * subsequent instructions may depend on this value.
1637 if (sr.when_target > sr.curr.reg[UNW_REG_PSP].when
1638 && (sr.curr.reg[UNW_REG_PSP].where == UNW_WHERE_NONE)
1639 && sr.curr.reg[UNW_REG_PSP].val != 0) {
1640 /* new psp is sp plus frame size */
1641 insn.opc = UNW_INSN_ADD;
1642 insn.dst = offsetof(struct unw_frame_info, psp)/8;
1643 insn.val = sr.curr.reg[UNW_REG_PSP].val; /* frame size */
1644 script_emit(script, insn);
1647 /* determine where the primary UNaT is: */
1648 if (sr.when_target < sr.curr.reg[UNW_REG_PRI_UNAT_GR].when)
1649 i = UNW_REG_PRI_UNAT_MEM;
1650 else if (sr.when_target < sr.curr.reg[UNW_REG_PRI_UNAT_MEM].when)
1651 i = UNW_REG_PRI_UNAT_GR;
1652 else if (sr.curr.reg[UNW_REG_PRI_UNAT_MEM].when > sr.curr.reg[UNW_REG_PRI_UNAT_GR].when)
1653 i = UNW_REG_PRI_UNAT_MEM;
1655 i = UNW_REG_PRI_UNAT_GR;
1657 compile_reg(&sr, i, script);
1659 for (i = UNW_REG_BSP; i < UNW_NUM_REGS; ++i)
1660 compile_reg(&sr, i, script);
1662 /* free labeled register states & stack: */
1664 STAT(parse_start = ia64_get_itc());
1665 for (ls = sr.labeled_states; ls; ls = next) {
1667 free_state_stack(&ls->saved_state);
1668 free_labeled_state(ls);
1670 free_state_stack(&sr.curr);
1671 STAT(unw.stat.script.parse_time += ia64_get_itc() - parse_start);
1673 script_finalize(script, &sr);
1674 STAT(unw.stat.script.build_time += ia64_get_itc() - start);
1679 * Apply the unwinding actions represented by OPS and update SR to
1680 * reflect the state that existed upon entry to the function that this
1681 * unwinder represents.
1684 run_script (struct unw_script *script, struct unw_frame_info *state)
1686 struct unw_insn *ip, *limit, next_insn;
1687 unsigned long opc, dst, val, off;
1688 unsigned long *s = (unsigned long *) state;
1689 STAT(unsigned long start;)
1691 STAT(++unw.stat.script.runs; start = ia64_get_itc());
1692 state->flags = script->flags;
1694 limit = script->insn + script->count;
1697 while (ip++ < limit) {
1698 opc = next_insn.opc;
1699 dst = next_insn.dst;
1700 val = next_insn.val;
1709 case UNW_INSN_MOVE2:
1712 s[dst+1] = s[val+1];
1722 case UNW_INSN_MOVE_SCRATCH:
1724 s[dst] = (unsigned long) get_scratch_regs(state) + val;
1727 UNW_DPRINT(0, "unwind.%s: no state->pt, dst=%ld, val=%ld\n",
1728 __FUNCTION__, dst, val);
1732 case UNW_INSN_MOVE_STACKED:
1733 s[dst] = (unsigned long) ia64_rse_skip_regs((unsigned long *)state->bsp,
1737 case UNW_INSN_ADD_PSP:
1738 s[dst] = state->psp + val;
1741 case UNW_INSN_ADD_SP:
1742 s[dst] = state->sp + val;
1745 case UNW_INSN_SETNAT_MEMSTK:
1746 if (!state->pri_unat_loc)
1747 state->pri_unat_loc = &state->sw->ar_unat;
1748 /* register off. is a multiple of 8, so the least 3 bits (type) are 0 */
1749 s[dst+1] = ((unsigned long) state->pri_unat_loc - s[dst]) | UNW_NAT_MEMSTK;
1752 case UNW_INSN_SETNAT_TYPE:
1758 if ((s[val] & (local_cpu_data->unimpl_va_mask | 0x7)) != 0
1759 || s[val] < TASK_SIZE)
1761 UNW_DPRINT(0, "unwind.%s: rejecting bad psp=0x%lx\n",
1762 __FUNCTION__, s[val]);
1766 s[dst] = *(unsigned long *) s[val];
1770 STAT(unw.stat.script.run_time += ia64_get_itc() - start);
1774 off = unw.sw_off[val];
1775 s[val] = (unsigned long) state->sw + off;
1776 if (off >= offsetof(struct switch_stack, r4) && off <= offsetof(struct switch_stack, r7))
1778 * We're initializing a general register: init NaT info, too. Note that
1779 * the offset is a multiple of 8 which gives us the 3 bits needed for
1782 s[val+1] = (offsetof(struct switch_stack, ar_unat) - off) | UNW_NAT_MEMSTK;
1787 find_save_locs (struct unw_frame_info *info)
1789 int have_write_lock = 0;
1790 struct unw_script *scr;
1792 if ((info->ip & (local_cpu_data->unimpl_va_mask | 0xf)) || info->ip < TASK_SIZE) {
1793 /* don't let obviously bad addresses pollute the cache */
1794 /* FIXME: should really be level 0 but it occurs too often. KAO */
1795 UNW_DPRINT(1, "unwind.%s: rejecting bad ip=0x%lx\n", __FUNCTION__, info->ip);
1800 scr = script_lookup(info);
1802 scr = build_script(info);
1805 "unwind.%s: failed to locate/build unwind script for ip %lx\n",
1806 __FUNCTION__, info->ip);
1809 have_write_lock = 1;
1811 info->hint = scr->hint;
1812 info->prev_script = scr - unw.cache;
1814 run_script(scr, info);
1816 if (have_write_lock)
1817 write_unlock(&scr->lock);
1819 read_unlock(&scr->lock);
1824 unw_unwind (struct unw_frame_info *info)
1826 unsigned long prev_ip, prev_sp, prev_bsp;
1827 unsigned long ip, pr, num_regs;
1828 STAT(unsigned long start, flags;)
1831 STAT(local_irq_save(flags); ++unw.stat.api.unwinds; start = ia64_get_itc());
1835 prev_bsp = info->bsp;
1837 /* restore the ip */
1838 if (!info->rp_loc) {
1839 /* FIXME: should really be level 0 but it occurs too often. KAO */
1840 UNW_DPRINT(1, "unwind.%s: failed to locate return link (ip=0x%lx)!\n",
1841 __FUNCTION__, info->ip);
1842 STAT(unw.stat.api.unwind_time += ia64_get_itc() - start; local_irq_restore(flags));
1845 ip = info->ip = *info->rp_loc;
1846 if (ip < GATE_ADDR) {
1847 UNW_DPRINT(2, "unwind.%s: reached user-space (ip=0x%lx)\n", __FUNCTION__, ip);
1848 STAT(unw.stat.api.unwind_time += ia64_get_itc() - start; local_irq_restore(flags));
1852 /* restore the cfm: */
1853 if (!info->pfs_loc) {
1854 UNW_DPRINT(0, "unwind.%s: failed to locate ar.pfs!\n", __FUNCTION__);
1855 STAT(unw.stat.api.unwind_time += ia64_get_itc() - start; local_irq_restore(flags));
1858 info->cfm_loc = info->pfs_loc;
1860 /* restore the bsp: */
1863 if ((info->flags & UNW_FLAG_INTERRUPT_FRAME)) {
1864 info->pt = info->sp + 16;
1865 if ((pr & (1UL << pNonSys)) != 0)
1866 num_regs = *info->cfm_loc & 0x7f; /* size of frame */
1868 (unsigned long *) (info->pt + offsetof(struct pt_regs, ar_pfs));
1869 UNW_DPRINT(3, "unwind.%s: interrupt_frame pt 0x%lx\n", __FUNCTION__, info->pt);
1871 num_regs = (*info->cfm_loc >> 7) & 0x7f; /* size of locals */
1872 info->bsp = (unsigned long) ia64_rse_skip_regs((unsigned long *) info->bsp, -num_regs);
1873 if (info->bsp < info->regstk.limit || info->bsp > info->regstk.top) {
1874 UNW_DPRINT(0, "unwind.%s: bsp (0x%lx) out of range [0x%lx-0x%lx]\n",
1875 __FUNCTION__, info->bsp, info->regstk.limit, info->regstk.top);
1876 STAT(unw.stat.api.unwind_time += ia64_get_itc() - start; local_irq_restore(flags));
1880 /* restore the sp: */
1881 info->sp = info->psp;
1882 if (info->sp < info->memstk.top || info->sp > info->memstk.limit) {
1883 UNW_DPRINT(0, "unwind.%s: sp (0x%lx) out of range [0x%lx-0x%lx]\n",
1884 __FUNCTION__, info->sp, info->memstk.top, info->memstk.limit);
1885 STAT(unw.stat.api.unwind_time += ia64_get_itc() - start; local_irq_restore(flags));
1889 if (info->ip == prev_ip && info->sp == prev_sp && info->bsp == prev_bsp) {
1890 UNW_DPRINT(0, "unwind.%s: ip, sp, bsp unchanged; stopping here (ip=0x%lx)\n",
1892 STAT(unw.stat.api.unwind_time += ia64_get_itc() - start; local_irq_restore(flags));
1896 /* as we unwind, the saved ar.unat becomes the primary unat: */
1897 info->pri_unat_loc = info->unat_loc;
1899 /* finally, restore the predicates: */
1900 unw_get_pr(info, &info->pr);
1902 retval = find_save_locs(info);
1903 STAT(unw.stat.api.unwind_time += ia64_get_itc() - start; local_irq_restore(flags));
1906 EXPORT_SYMBOL(unw_unwind);
1909 unw_unwind_to_user (struct unw_frame_info *info)
1913 while (unw_unwind(info) >= 0) {
1914 if (unw_get_rp(info, &ip) < 0) {
1915 unw_get_ip(info, &ip);
1916 UNW_DPRINT(0, "unwind.%s: failed to read return pointer (ip=0x%lx)\n",
1920 if (ip < FIXADDR_USER_END)
1923 unw_get_ip(info, &ip);
1924 UNW_DPRINT(0, "unwind.%s: failed to unwind to user-level (ip=0x%lx)\n", __FUNCTION__, ip);
1927 EXPORT_SYMBOL(unw_unwind_to_user);
1930 init_frame_info (struct unw_frame_info *info, struct task_struct *t,
1931 struct switch_stack *sw, unsigned long stktop)
1933 unsigned long rbslimit, rbstop, stklimit;
1934 STAT(unsigned long start, flags;)
1936 STAT(local_irq_save(flags); ++unw.stat.api.inits; start = ia64_get_itc());
1939 * Subtle stuff here: we _could_ unwind through the switch_stack frame but we
1940 * don't want to do that because it would be slow as each preserved register would
1941 * have to be processed. Instead, what we do here is zero out the frame info and
1942 * start the unwind process at the function that created the switch_stack frame.
1943 * When a preserved value in switch_stack needs to be accessed, run_script() will
1944 * initialize the appropriate pointer on demand.
1946 memset(info, 0, sizeof(*info));
1948 rbslimit = (unsigned long) t + IA64_RBS_OFFSET;
1949 rbstop = sw->ar_bspstore;
1950 if (rbstop - (unsigned long) t >= IA64_STK_OFFSET)
1953 stklimit = (unsigned long) t + IA64_STK_OFFSET;
1954 if (stktop <= rbstop)
1957 info->regstk.limit = rbslimit;
1958 info->regstk.top = rbstop;
1959 info->memstk.limit = stklimit;
1960 info->memstk.top = stktop;
1963 info->sp = info->psp = stktop;
1965 UNW_DPRINT(3, "unwind.%s:\n"
1967 " rbs = [0x%lx-0x%lx)\n"
1968 " stk = [0x%lx-0x%lx)\n"
1972 __FUNCTION__, (unsigned long) t, rbslimit, rbstop, stktop, stklimit,
1973 info->pr, (unsigned long) info->sw, info->sp);
1974 STAT(unw.stat.api.init_time += ia64_get_itc() - start; local_irq_restore(flags));
1978 unw_init_from_interruption (struct unw_frame_info *info, struct task_struct *t,
1979 struct pt_regs *pt, struct switch_stack *sw)
1983 init_frame_info(info, t, sw, pt->r12);
1984 info->cfm_loc = &pt->cr_ifs;
1985 info->unat_loc = &pt->ar_unat;
1986 info->pfs_loc = &pt->ar_pfs;
1987 sof = *info->cfm_loc & 0x7f;
1988 info->bsp = (unsigned long) ia64_rse_skip_regs((unsigned long *) info->regstk.top, -sof);
1989 info->ip = pt->cr_iip + ia64_psr(pt)->ri;
1990 info->pt = (unsigned long) pt;
1991 UNW_DPRINT(3, "unwind.%s:\n"
1995 __FUNCTION__, info->bsp, sof, info->ip);
1996 find_save_locs(info);
2000 unw_init_frame_info (struct unw_frame_info *info, struct task_struct *t, struct switch_stack *sw)
2004 init_frame_info(info, t, sw, (unsigned long) (sw + 1) - 16);
2005 info->cfm_loc = &sw->ar_pfs;
2006 sol = (*info->cfm_loc >> 7) & 0x7f;
2007 info->bsp = (unsigned long) ia64_rse_skip_regs((unsigned long *) info->regstk.top, -sol);
2009 UNW_DPRINT(3, "unwind.%s:\n"
2013 __FUNCTION__, info->bsp, sol, info->ip);
2014 find_save_locs(info);
2018 unw_init_from_blocked_task (struct unw_frame_info *info, struct task_struct *t)
2020 struct switch_stack *sw = (struct switch_stack *) (t->thread.ksp + 16);
2022 UNW_DPRINT(1, "unwind.%s\n", __FUNCTION__);
2023 unw_init_frame_info(info, t, sw);
2025 EXPORT_SYMBOL(unw_init_from_blocked_task);
2028 init_unwind_table (struct unw_table *table, const char *name, unsigned long segment_base,
2029 unsigned long gp, const void *table_start, const void *table_end)
2031 const struct unw_table_entry *start = table_start, *end = table_end;
2034 table->segment_base = segment_base;
2036 table->start = segment_base + start[0].start_offset;
2037 table->end = segment_base + end[-1].end_offset;
2038 table->array = start;
2039 table->length = end - start;
2043 unw_add_unwind_table (const char *name, unsigned long segment_base, unsigned long gp,
2044 const void *table_start, const void *table_end)
2046 const struct unw_table_entry *start = table_start, *end = table_end;
2047 struct unw_table *table;
2048 unsigned long flags;
2050 if (end - start <= 0) {
2051 UNW_DPRINT(0, "unwind.%s: ignoring attempt to insert empty unwind table\n",
2056 table = kmalloc(sizeof(*table), GFP_USER);
2060 init_unwind_table(table, name, segment_base, gp, table_start, table_end);
2062 spin_lock_irqsave(&unw.lock, flags);
2064 /* keep kernel unwind table at the front (it's searched most commonly): */
2065 table->next = unw.tables->next;
2066 unw.tables->next = table;
2068 spin_unlock_irqrestore(&unw.lock, flags);
2074 unw_remove_unwind_table (void *handle)
2076 struct unw_table *table, *prev;
2077 struct unw_script *tmp;
2078 unsigned long flags;
2082 UNW_DPRINT(0, "unwind.%s: ignoring attempt to remove non-existent unwind table\n",
2088 if (table == &unw.kernel_table) {
2089 UNW_DPRINT(0, "unwind.%s: sorry, freeing the kernel's unwind table is a "
2090 "no-can-do!\n", __FUNCTION__);
2094 spin_lock_irqsave(&unw.lock, flags);
2096 /* first, delete the table: */
2098 for (prev = (struct unw_table *) &unw.tables; prev; prev = prev->next)
2099 if (prev->next == table)
2102 UNW_DPRINT(0, "unwind.%s: failed to find unwind table %p\n",
2103 __FUNCTION__, (void *) table);
2104 spin_unlock_irqrestore(&unw.lock, flags);
2107 prev->next = table->next;
2109 spin_unlock_irqrestore(&unw.lock, flags);
2111 /* next, remove hash table entries for this table */
2113 for (index = 0; index <= UNW_HASH_SIZE; ++index) {
2114 tmp = unw.cache + unw.hash[index];
2115 if (unw.hash[index] >= UNW_CACHE_SIZE
2116 || tmp->ip < table->start || tmp->ip >= table->end)
2119 write_lock(&tmp->lock);
2121 if (tmp->ip >= table->start && tmp->ip < table->end) {
2122 unw.hash[index] = tmp->coll_chain;
2126 write_unlock(&tmp->lock);
2133 create_gate_table (void)
2135 const struct unw_table_entry *entry, *start, *end;
2136 unsigned long *lp, segbase = GATE_ADDR;
2137 size_t info_size, size;
2139 Elf64_Phdr *punw = NULL, *phdr = (Elf64_Phdr *) (GATE_ADDR + GATE_EHDR->e_phoff);
2142 for (i = 0; i < GATE_EHDR->e_phnum; ++i, ++phdr)
2143 if (phdr->p_type == PT_IA_64_UNWIND) {
2149 printk("%s: failed to find gate DSO's unwind table!\n", __FUNCTION__);
2153 start = (const struct unw_table_entry *) punw->p_vaddr;
2154 end = (struct unw_table_entry *) ((char *) start + punw->p_memsz);
2157 unw_add_unwind_table("linux-gate.so", segbase, 0, start, end);
2159 for (entry = start; entry < end; ++entry)
2160 size += 3*8 + 8 + 8*UNW_LENGTH(*(u64 *) (segbase + entry->info_offset));
2161 size += 8; /* reserve space for "end of table" marker */
2163 unw.gate_table = kmalloc(size, GFP_KERNEL);
2164 if (!unw.gate_table) {
2165 unw.gate_table_size = 0;
2166 printk(KERN_ERR "%s: unable to create unwind data for gate page!\n", __FUNCTION__);
2169 unw.gate_table_size = size;
2171 lp = unw.gate_table;
2172 info = (char *) unw.gate_table + size;
2174 for (entry = start; entry < end; ++entry, lp += 3) {
2175 info_size = 8 + 8*UNW_LENGTH(*(u64 *) (segbase + entry->info_offset));
2177 memcpy(info, (char *) segbase + entry->info_offset, info_size);
2179 lp[0] = segbase + entry->start_offset; /* start */
2180 lp[1] = segbase + entry->end_offset; /* end */
2181 lp[2] = info - (char *) unw.gate_table; /* info */
2183 *lp = 0; /* end-of-table marker */
2187 __initcall(create_gate_table);
2193 extern void unw_hash_index_t_is_too_narrow (void);
2196 if (8*sizeof(unw_hash_index_t) < UNW_LOG_HASH_SIZE)
2197 unw_hash_index_t_is_too_narrow();
2199 unw.sw_off[unw.preg_index[UNW_REG_PRI_UNAT_GR]] = SW(AR_UNAT);
2200 unw.sw_off[unw.preg_index[UNW_REG_BSPSTORE]] = SW(AR_BSPSTORE);
2201 unw.sw_off[unw.preg_index[UNW_REG_PFS]] = SW(AR_UNAT);
2202 unw.sw_off[unw.preg_index[UNW_REG_RP]] = SW(B0);
2203 unw.sw_off[unw.preg_index[UNW_REG_UNAT]] = SW(AR_UNAT);
2204 unw.sw_off[unw.preg_index[UNW_REG_PR]] = SW(PR);
2205 unw.sw_off[unw.preg_index[UNW_REG_LC]] = SW(AR_LC);
2206 unw.sw_off[unw.preg_index[UNW_REG_FPSR]] = SW(AR_FPSR);
2207 for (i = UNW_REG_R4, off = SW(R4); i <= UNW_REG_R7; ++i, off += 8)
2208 unw.sw_off[unw.preg_index[i]] = off;
2209 for (i = UNW_REG_B1, off = SW(B1); i <= UNW_REG_B5; ++i, off += 8)
2210 unw.sw_off[unw.preg_index[i]] = off;
2211 for (i = UNW_REG_F2, off = SW(F2); i <= UNW_REG_F5; ++i, off += 16)
2212 unw.sw_off[unw.preg_index[i]] = off;
2213 for (i = UNW_REG_F16, off = SW(F16); i <= UNW_REG_F31; ++i, off += 16)
2214 unw.sw_off[unw.preg_index[i]] = off;
2216 for (i = 0; i < UNW_CACHE_SIZE; ++i) {
2218 unw.cache[i].lru_chain = (i - 1);
2219 unw.cache[i].coll_chain = -1;
2220 unw.cache[i].lock = RW_LOCK_UNLOCKED;
2222 unw.lru_head = UNW_CACHE_SIZE - 1;
2225 init_unwind_table(&unw.kernel_table, "kernel", KERNEL_START, (unsigned long) __gp,
2226 __start_unwind, __end_unwind);
2230 * DEPRECATED DEPRECATED DEPRECATED DEPRECATED DEPRECATED DEPRECATED DEPRECATED
2232 * This system call has been deprecated. The new and improved way to get
2233 * at the kernel's unwind info is via the gate DSO. The address of the
2234 * ELF header for this DSO is passed to user-level via AT_SYSINFO_EHDR.
2236 * DEPRECATED DEPRECATED DEPRECATED DEPRECATED DEPRECATED DEPRECATED DEPRECATED
2238 * This system call copies the unwind data into the buffer pointed to by BUF and returns
2239 * the size of the unwind data. If BUF_SIZE is smaller than the size of the unwind data
2240 * or if BUF is NULL, nothing is copied, but the system call still returns the size of the
2243 * The first portion of the unwind data contains an unwind table and rest contains the
2244 * associated unwind info (in no particular order). The unwind table consists of a table
2245 * of entries of the form:
2247 * u64 start; (64-bit address of start of function)
2248 * u64 end; (64-bit address of start of function)
2249 * u64 info; (BUF-relative offset to unwind info)
2251 * The end of the unwind table is indicated by an entry with a START address of zero.
2253 * Please see the IA-64 Software Conventions and Runtime Architecture manual for details
2254 * on the format of the unwind info.
2257 * EFAULT BUF points outside your accessible address space.
2260 sys_getunwind (void *buf, size_t buf_size)
2262 if (buf && buf_size >= unw.gate_table_size)
2263 if (copy_to_user(buf, unw.gate_table, unw.gate_table_size) != 0)
2265 return unw.gate_table_size;