#include "entry.h"
#include "unwind_i.h"
-#define MIN(a,b) ((a) < (b) ? (a) : (b))
-#define p5 5
-
#define UNW_LOG_CACHE_SIZE 7 /* each unw_script is ~256 bytes in size */
#define UNW_CACHE_SIZE (1 << UNW_LOG_CACHE_SIZE)
if (info->pri_unat_loc)
nat_addr = info->pri_unat_loc;
else
- nat_addr = &info->sw->ar_unat;
+ nat_addr = &info->sw->caller_unat;
nat_mask = (1UL << ((long) addr & 0x1f8)/8);
}
} else {
int
unw_access_fr (struct unw_frame_info *info, int regnum, struct ia64_fpreg *val, int write)
{
- struct ia64_fpreg *addr = 0;
+ struct ia64_fpreg *addr = NULL;
struct pt_regs *pt;
if ((unsigned) (regnum - 2) >= 126) {
case UNW_AR_UNAT:
addr = info->unat_loc;
if (!addr)
- addr = &info->sw->ar_unat;
+ addr = &info->sw->caller_unat;
break;
case UNW_AR_LC:
}
sr->gr_save_loc = grsave;
sr->any_spills = 0;
- sr->imask = 0;
+ sr->imask = NULL;
sr->spill_offset = 0x10; /* default to psp+16 */
}
}
desc_mem_stack_f (unw_word t, unw_word size, struct unw_state_record *sr)
{
set_reg(sr->curr.reg + UNW_REG_PSP, UNW_WHERE_NONE,
- sr->region_start + MIN((int)t, sr->region_len - 1), 16*size);
+ sr->region_start + min_t(int, t, sr->region_len - 1), 16*size);
}
static inline void
desc_mem_stack_v (unw_word t, struct unw_state_record *sr)
{
- sr->curr.reg[UNW_REG_PSP].when = sr->region_start + MIN((int)t, sr->region_len - 1);
+ sr->curr.reg[UNW_REG_PSP].when = sr->region_start + min_t(int, t, sr->region_len - 1);
}
static inline void
if (reg->where == UNW_WHERE_NONE)
reg->where = UNW_WHERE_GR_SAVE;
- reg->when = sr->region_start + MIN((int)t, sr->region_len - 1);
+ reg->when = sr->region_start + min_t(int, t, sr->region_len - 1);
}
static inline void
static inline int
desc_is_active (unsigned char qp, unw_word t, struct unw_state_record *sr)
{
- if (sr->when_target <= sr->region_start + MIN((int)t, sr->region_len - 1))
+ if (sr->when_target <= sr->region_start + min_t(int, t, sr->region_len - 1))
return 0;
if (qp > 0) {
if ((sr->pr_val & (1UL << qp)) == 0)
r = sr->curr.reg + decode_abreg(abreg, 0);
r->where = where;
- r->when = sr->region_start + MIN((int)t, sr->region_len - 1);
+ r->when = sr->region_start + min_t(int, t, sr->region_len - 1);
r->val = (ytreg & 0x7f);
}
r = sr->curr.reg + decode_abreg(abreg, 1);
r->where = UNW_WHERE_PSPREL;
- r->when = sr->region_start + MIN((int)t, sr->region_len - 1);
+ r->when = sr->region_start + min_t(int, t, sr->region_len - 1);
r->val = 0x10 - 4*pspoff;
}
r = sr->curr.reg + decode_abreg(abreg, 1);
r->where = UNW_WHERE_SPREL;
- r->when = sr->region_start + MIN((int)t, sr->region_len - 1);
+ r->when = sr->region_start + min_t(int, t, sr->region_len - 1);
r->val = 4*spoff;
}
static inline unw_hash_index_t
hash (unsigned long ip)
{
-# define hashmagic 0x9e3779b97f4a7c16 /* based on (sqrt(5)/2-1)*2^64 */
+# define hashmagic 0x9e3779b97f4a7c16UL /* based on (sqrt(5)/2-1)*2^64 */
return (ip >> 4)*hashmagic >> (64 - UNW_LOG_HASH_SIZE);
#undef hashmagic
unsigned long ip, pr;
if (UNW_DEBUG_ON(0))
- return 0; /* Always regenerate scripts in debug mode */
+ return NULL; /* Always regenerate scripts in debug mode */
STAT(++unw.stat.cache.lookups);
index = unw.hash[hash(ip)];
if (index >= UNW_CACHE_SIZE)
- return 0;
+ return NULL;
script = unw.cache + index;
while (1) {
return script;
}
if (script->coll_chain >= UNW_HASH_SIZE)
- return 0;
+ return NULL;
script = unw.cache + script->coll_chain;
STAT(++unw.stat.cache.collision_chain_traversals);
}
{
struct unw_script *script, *prev, *tmp;
unw_hash_index_t index;
- unsigned long flags;
unsigned short head;
STAT(++unw.stat.script.news);
* Can't (easily) use cmpxchg() here because of ABA problem
* that is intrinsic in cmpxchg()...
*/
- spin_lock_irqsave(&unw.lock, flags);
- {
- head = unw.lru_head;
- script = unw.cache + head;
- unw.lru_head = script->lru_chain;
- }
- spin_unlock(&unw.lock);
+ head = unw.lru_head;
+ script = unw.cache + head;
+ unw.lru_head = script->lru_chain;
/*
* We'd deadlock here if we interrupted a thread that is holding a read lock on
if (!write_trylock(&script->lock))
return NULL;
- spin_lock(&unw.lock);
- {
- /* re-insert script at the tail of the LRU chain: */
- unw.cache[unw.lru_tail].lru_chain = head;
- unw.lru_tail = head;
-
- /* remove the old script from the hash table (if it's there): */
- if (script->ip) {
- index = hash(script->ip);
- tmp = unw.cache + unw.hash[index];
- prev = 0;
- while (1) {
- if (tmp == script) {
- if (prev)
- prev->coll_chain = tmp->coll_chain;
- else
- unw.hash[index] = tmp->coll_chain;
- break;
- } else
- prev = tmp;
- if (tmp->coll_chain >= UNW_CACHE_SIZE)
- /* old script wasn't in the hash-table */
- break;
- tmp = unw.cache + tmp->coll_chain;
- }
+ /* re-insert script at the tail of the LRU chain: */
+ unw.cache[unw.lru_tail].lru_chain = head;
+ unw.lru_tail = head;
+
+ /* remove the old script from the hash table (if it's there): */
+ if (script->ip) {
+ index = hash(script->ip);
+ tmp = unw.cache + unw.hash[index];
+ prev = NULL;
+ while (1) {
+ if (tmp == script) {
+ if (prev)
+ prev->coll_chain = tmp->coll_chain;
+ else
+ unw.hash[index] = tmp->coll_chain;
+ break;
+ } else
+ prev = tmp;
+ if (tmp->coll_chain >= UNW_CACHE_SIZE)
+ /* old script wasn't in the hash-table */
+ break;
+ tmp = unw.cache + tmp->coll_chain;
}
+ }
- /* enter new script in the hash table */
- index = hash(ip);
- script->coll_chain = unw.hash[index];
- unw.hash[index] = script - unw.cache;
+ /* enter new script in the hash table */
+ index = hash(ip);
+ script->coll_chain = unw.hash[index];
+ unw.hash[index] = script - unw.cache;
- script->ip = ip; /* set new IP while we're holding the locks */
+ script->ip = ip; /* set new IP while we're holding the locks */
- STAT(if (script->coll_chain < UNW_CACHE_SIZE) ++unw.stat.script.collisions);
- }
- spin_unlock_irqrestore(&unw.lock, flags);
+ STAT(if (script->coll_chain < UNW_CACHE_SIZE) ++unw.stat.script.collisions);
script->flags = 0;
script->hint = 0;
static inline const struct unw_table_entry *
lookup (struct unw_table *table, unsigned long rel_ip)
{
- const struct unw_table_entry *e = 0;
+ const struct unw_table_entry *e = NULL;
unsigned long lo, hi, mid;
/* do a binary search for right entry: */
static inline struct unw_script *
build_script (struct unw_frame_info *info)
{
- const struct unw_table_entry *e = 0;
- struct unw_script *script = 0;
+ const struct unw_table_entry *e = NULL;
+ struct unw_script *script = NULL;
struct unw_labeled_state *ls, *next;
unsigned long ip = info->ip;
struct unw_state_record sr;
if (!script) {
UNW_DPRINT(0, "unwind.%s: failed to create unwind script\n", __FUNCTION__);
STAT(unw.stat.script.build_time += ia64_get_itc() - start);
- return 0;
+ return NULL;
}
unw.cache[info->prev_script].hint = script - unw.cache;
case UNW_INSN_SETNAT_MEMSTK:
if (!state->pri_unat_loc)
- state->pri_unat_loc = &state->sw->ar_unat;
+ state->pri_unat_loc = &state->sw->caller_unat;
/* register off. is a multiple of 8, so the least 3 bits (type) are 0 */
s[dst+1] = ((unsigned long) state->pri_unat_loc - s[dst]) | UNW_NAT_MEMSTK;
break;
{
int have_write_lock = 0;
struct unw_script *scr;
+ unsigned long flags = 0;
if ((info->ip & (local_cpu_data->unimpl_va_mask | 0xf)) || info->ip < TASK_SIZE) {
/* don't let obviously bad addresses pollute the cache */
/* FIXME: should really be level 0 but it occurs too often. KAO */
UNW_DPRINT(1, "unwind.%s: rejecting bad ip=0x%lx\n", __FUNCTION__, info->ip);
- info->rp_loc = 0;
+ info->rp_loc = NULL;
return -1;
}
scr = script_lookup(info);
if (!scr) {
+ spin_lock_irqsave(&unw.lock, flags);
scr = build_script(info);
if (!scr) {
+ spin_unlock_irqrestore(&unw.lock, flags);
UNW_DPRINT(0,
"unwind.%s: failed to locate/build unwind script for ip %lx\n",
__FUNCTION__, info->ip);
run_script(scr, info);
- if (have_write_lock)
+ if (have_write_lock) {
write_unlock(&scr->lock);
- else
+ spin_unlock_irqrestore(&unw.lock, flags);
+ } else
read_unlock(&scr->lock);
return 0;
}
num_regs = 0;
if ((info->flags & UNW_FLAG_INTERRUPT_FRAME)) {
info->pt = info->sp + 16;
- if ((pr & (1UL << pNonSys)) != 0)
+ if ((pr & (1UL << PRED_NON_SYSCALL)) != 0)
num_regs = *info->cfm_loc & 0x7f; /* size of frame */
info->pfs_loc =
(unsigned long *) (info->pt + offsetof(struct pt_regs, ar_pfs));
int
unw_unwind_to_user (struct unw_frame_info *info)
{
- unsigned long ip;
+ unsigned long ip, sp, pr = 0;
while (unw_unwind(info) >= 0) {
- if (unw_get_rp(info, &ip) < 0) {
- unw_get_ip(info, &ip);
- UNW_DPRINT(0, "unwind.%s: failed to read return pointer (ip=0x%lx)\n",
- __FUNCTION__, ip);
- return -1;
+ unw_get_sp(info, &sp);
+ if ((long)((unsigned long)info->task + IA64_STK_OFFSET - sp)
+ < IA64_PT_REGS_SIZE) {
+ UNW_DPRINT(0, "unwind.%s: ran off the top of the kernel stack\n",
+ __FUNCTION__);
+ break;
}
- if (ip < FIXADDR_USER_END)
+ if (unw_is_intr_frame(info) &&
+ (pr & (1UL << PRED_USER_STACK)))
return 0;
+ if (unw_get_pr (info, &pr) < 0) {
+ unw_get_rp(info, &ip);
+ UNW_DPRINT(0, "unwind.%s: failed to read "
+ "predicate register (ip=0x%lx)\n",
+ __FUNCTION__, ip);
+ return -1;
+ }
}
unw_get_ip(info, &ip);
- UNW_DPRINT(0, "unwind.%s: failed to unwind to user-level (ip=0x%lx)\n", __FUNCTION__, ip);
+ UNW_DPRINT(0, "unwind.%s: failed to unwind to user-level (ip=0x%lx)\n",
+ __FUNCTION__, ip);
return -1;
}
EXPORT_SYMBOL(unw_unwind_to_user);
STAT(unw.stat.api.init_time += ia64_get_itc() - start; local_irq_restore(flags));
}
-void
-unw_init_from_interruption (struct unw_frame_info *info, struct task_struct *t,
- struct pt_regs *pt, struct switch_stack *sw)
-{
- unsigned long sof;
-
- init_frame_info(info, t, sw, pt->r12);
- info->cfm_loc = &pt->cr_ifs;
- info->unat_loc = &pt->ar_unat;
- info->pfs_loc = &pt->ar_pfs;
- sof = *info->cfm_loc & 0x7f;
- info->bsp = (unsigned long) ia64_rse_skip_regs((unsigned long *) info->regstk.top, -sof);
- info->ip = pt->cr_iip + ia64_psr(pt)->ri;
- info->pt = (unsigned long) pt;
- UNW_DPRINT(3, "unwind.%s:\n"
- " bsp 0x%lx\n"
- " sof 0x%lx\n"
- " ip 0x%lx\n",
- __FUNCTION__, info->bsp, sof, info->ip);
- find_save_locs(info);
-}
-
void
unw_init_frame_info (struct unw_frame_info *info, struct task_struct *t, struct switch_stack *sw)
{
find_save_locs(info);
}
+EXPORT_SYMBOL(unw_init_frame_info);
+
void
unw_init_from_blocked_task (struct unw_frame_info *info, struct task_struct *t)
{
if (end - start <= 0) {
UNW_DPRINT(0, "unwind.%s: ignoring attempt to insert empty unwind table\n",
__FUNCTION__);
- return 0;
+ return NULL;
}
table = kmalloc(sizeof(*table), GFP_USER);
if (!table)
- return 0;
+ return NULL;
init_unwind_table(table, name, segment_base, gp, table_start, table_end);
if (8*sizeof(unw_hash_index_t) < UNW_LOG_HASH_SIZE)
unw_hash_index_t_is_too_narrow();
- unw.sw_off[unw.preg_index[UNW_REG_PRI_UNAT_GR]] = SW(AR_UNAT);
+ unw.sw_off[unw.preg_index[UNW_REG_PRI_UNAT_GR]] = SW(CALLER_UNAT);
unw.sw_off[unw.preg_index[UNW_REG_BSPSTORE]] = SW(AR_BSPSTORE);
- unw.sw_off[unw.preg_index[UNW_REG_PFS]] = SW(AR_UNAT);
+ unw.sw_off[unw.preg_index[UNW_REG_PFS]] = SW(AR_PFS);
unw.sw_off[unw.preg_index[UNW_REG_RP]] = SW(B0);
- unw.sw_off[unw.preg_index[UNW_REG_UNAT]] = SW(AR_UNAT);
+ unw.sw_off[unw.preg_index[UNW_REG_UNAT]] = SW(CALLER_UNAT);
unw.sw_off[unw.preg_index[UNW_REG_PR]] = SW(PR);
unw.sw_off[unw.preg_index[UNW_REG_LC]] = SW(AR_LC);
unw.sw_off[unw.preg_index[UNW_REG_FPSR]] = SW(AR_FPSR);
if (i > 0)
unw.cache[i].lru_chain = (i - 1);
unw.cache[i].coll_chain = -1;
- unw.cache[i].lock = RW_LOCK_UNLOCKED;
+ rwlock_init(&unw.cache[i].lock);
}
unw.lru_head = UNW_CACHE_SIZE - 1;
unw.lru_tail = 0;
* EFAULT BUF points outside your accessible address space.
*/
asmlinkage long
-sys_getunwind (void *buf, size_t buf_size)
+sys_getunwind (void __user *buf, size_t buf_size)
{
if (buf && buf_size >= unw.gate_table_size)
if (copy_to_user(buf, unw.gate_table, unw.gate_table_size) != 0)