X-Git-Url: http://git.onelab.eu/?a=blobdiff_plain;f=arch%2Fia64%2Fkernel%2Fperfmon.c;h=249b590c73ccfe30e687504cc2b02fc632de2279;hb=43bc926fffd92024b46cafaf7350d669ba9ca884;hp=9f72dc58d3c23c0e961f731917b12fa9e1c6cc81;hpb=cee37fe97739d85991964371c1f3a745c00dd236;p=linux-2.6.git diff --git a/arch/ia64/kernel/perfmon.c b/arch/ia64/kernel/perfmon.c index 9f72dc58d..249b590c7 100644 --- a/arch/ia64/kernel/perfmon.c +++ b/arch/ia64/kernel/perfmon.c @@ -37,8 +37,10 @@ #include #include #include -#include #include +#include +#include +#include #include #include @@ -287,7 +289,7 @@ typedef struct pfm_context { unsigned long ctx_ovfl_regs[4]; /* which registers overflowed (notification) */ - struct semaphore ctx_restart_sem; /* use for blocking notification mode */ + struct completion ctx_restart_done; /* use for blocking notification mode */ unsigned long ctx_used_pmds[4]; /* bitmask of PMD used */ unsigned long ctx_all_pmds[4]; /* bitmask of all accessible PMDs */ @@ -499,7 +501,7 @@ typedef struct { static pfm_stats_t pfm_stats[NR_CPUS]; static pfm_session_t pfm_sessions; /* global sessions information */ -static spinlock_t pfm_alt_install_check = SPIN_LOCK_UNLOCKED; +static DEFINE_SPINLOCK(pfm_alt_install_check); static pfm_intr_handler_desc_t *pfm_alt_intr_handler; static struct proc_dir_entry *perfmon_dir; @@ -576,7 +578,7 @@ pfm_protect_ctx_ctxsw(pfm_context_t *x) return 0UL; } -static inline unsigned long +static inline void pfm_unprotect_ctx_ctxsw(pfm_context_t *x, unsigned long f) { spin_unlock(&(x)->ctx_lock); @@ -629,9 +631,11 @@ static int pfm_write_ibr_dbr(int mode, pfm_context_t *ctx, void *arg, int count, #include "perfmon_itanium.h" #include "perfmon_mckinley.h" +#include "perfmon_montecito.h" #include "perfmon_generic.h" static pmu_config_t *pmu_confs[]={ + &pmu_conf_mont, &pmu_conf_mck, &pmu_conf_ita, &pmu_conf_gen, /* must be last */ @@ -1711,7 +1715,7 @@ static void pfm_syswide_force_stop(void *info) { pfm_context_t *ctx = (pfm_context_t *)info; - struct pt_regs *regs = ia64_task_regs(current); + struct pt_regs *regs = task_pt_regs(current); struct task_struct *owner; unsigned long flags; int ret; @@ -1816,7 +1820,7 @@ pfm_flush(struct file *filp) is_system = ctx->ctx_fl_system; task = PFM_CTX_TASK(ctx); - regs = ia64_task_regs(task); + regs = task_pt_regs(task); DPRINT(("ctx_state=%d is_current=%d\n", state, @@ -1946,7 +1950,7 @@ pfm_close(struct inode *inode, struct file *filp) is_system = ctx->ctx_fl_system; task = PFM_CTX_TASK(ctx); - regs = ia64_task_regs(task); + regs = task_pt_regs(task); DPRINT(("ctx_state=%d is_current=%d\n", state, @@ -1990,7 +1994,7 @@ pfm_close(struct inode *inode, struct file *filp) /* * force task to wake up from MASKED state */ - up(&ctx->ctx_restart_sem); + complete(&ctx->ctx_restart_done); DPRINT(("waking up ctx_state=%d\n", state)); @@ -2220,15 +2224,18 @@ static void pfm_free_fd(int fd, struct file *file) { struct files_struct *files = current->files; + struct fdtable *fdt; /* * there ie no fd_uninstall(), so we do it here */ spin_lock(&files->file_lock); - files->fd[fd] = NULL; + fdt = files_fdtable(files); + rcu_assign_pointer(fdt->fd[fd], NULL); spin_unlock(&files->file_lock); - if (file) put_filp(file); + if (file) + put_filp(file); put_unused_fd(fd); } @@ -2351,7 +2358,8 @@ pfm_smpl_buffer_alloc(struct task_struct *task, pfm_context_t *ctx, unsigned lon insert_vm_struct(mm, vma); vx_vmpages_add(mm, size >> PAGE_SHIFT); - vm_stat_account(vma); + vm_stat_account(vma->vm_mm, vma->vm_flags, vma->vm_file, + vma_pages(vma)); up_write(&task->mm->mmap_sem); /* @@ -2701,7 +2709,7 @@ pfm_context_create(pfm_context_t *ctx, void *arg, int count, struct pt_regs *reg /* * init restart semaphore to locked */ - sema_init(&ctx->ctx_restart_sem, 0); + init_completion(&ctx->ctx_restart_done); /* * activation is used in SMP only @@ -3682,7 +3690,7 @@ pfm_restart(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs) */ if (CTX_OVFL_NOBLOCK(ctx) == 0 && state == PFM_CTX_MASKED) { DPRINT(("unblocking [%d] \n", task->pid)); - up(&ctx->ctx_restart_sem); + complete(&ctx->ctx_restart_done); } else { DPRINT(("[%d] armed exit trap\n", task->pid)); @@ -4049,7 +4057,7 @@ pfm_stop(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs) */ ia64_psr(regs)->up = 0; } else { - tregs = ia64_task_regs(task); + tregs = task_pt_regs(task); /* * stop monitoring at the user level @@ -4131,7 +4139,7 @@ pfm_start(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs) ia64_psr(regs)->up = 1; } else { - tregs = ia64_task_regs(ctx->ctx_task); + tregs = task_pt_regs(ctx->ctx_task); /* * start monitoring at the kernel level the next @@ -4315,6 +4323,7 @@ pfm_context_load(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs) DPRINT(("before cmpxchg() old_ctx=%p new_ctx=%p\n", thread->pfm_context, ctx)); + ret = -EBUSY; old = ia64_cmpxchg(acq, &thread->pfm_context, NULL, ctx, sizeof(pfm_context_t *)); if (old != NULL) { DPRINT(("load_pid [%d] already has a context\n", req->load_pid)); @@ -4400,7 +4409,7 @@ pfm_context_load(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs) /* * when not current, task MUST be stopped, so this is safe */ - regs = ia64_task_regs(task); + regs = task_pt_regs(task); /* force a full reload */ ctx->ctx_last_activation = PFM_INVALID_ACTIVATION; @@ -4526,7 +4535,7 @@ pfm_context_unload(pfm_context_t *ctx, void *arg, int count, struct pt_regs *reg /* * per-task mode */ - tregs = task == current ? regs : ia64_task_regs(task); + tregs = task == current ? regs : task_pt_regs(task); if (task == current) { /* @@ -4589,7 +4598,7 @@ pfm_exit_thread(struct task_struct *task) { pfm_context_t *ctx; unsigned long flags; - struct pt_regs *regs = ia64_task_regs(task); + struct pt_regs *regs = task_pt_regs(task); int ret, state; int free_ok = 0; @@ -4922,7 +4931,7 @@ restart_args: if (unlikely(ret)) goto abort_locked; skip_fd: - ret = (*func)(ctx, args_k, count, ia64_task_regs(current)); + ret = (*func)(ctx, args_k, count, task_pt_regs(current)); call_made = 1; @@ -4937,7 +4946,7 @@ abort_locked: if (call_made && PFM_CMD_RW_ARG(cmd) && copy_to_user(arg, args_k, base_sz*count)) ret = -EFAULT; error_args: - if (args_k) kfree(args_k); + kfree(args_k); DPRINT(("cmd=%s ret=%ld\n", PFM_CMD_NAME(cmd), ret)); @@ -5046,7 +5055,7 @@ pfm_handle_work(void) pfm_clear_task_notify(); - regs = ia64_task_regs(current); + regs = task_pt_regs(current); /* * extract reason for being here and clear @@ -5083,7 +5092,7 @@ pfm_handle_work(void) * may go through without blocking on SMP systems * if restart has been received already by the time we call down() */ - ret = down_interruptible(&ctx->ctx_restart_sem); + ret = wait_for_completion_interruptible(&ctx->ctx_restart_done); DPRINT(("after block sleeping ret=%d\n", ret)); @@ -5790,7 +5799,7 @@ pfm_syst_wide_update_task(struct task_struct *task, unsigned long info, int is_c * on every CPU, so we can rely on the pid to identify the idle task. */ if ((info & PFM_CPUINFO_EXCL_IDLE) == 0 || task->pid) { - regs = ia64_task_regs(task); + regs = task_pt_regs(task); ia64_psr(regs)->pp = is_ctxswin ? dcr_pp : 0; return; } @@ -5873,7 +5882,7 @@ pfm_save_regs(struct task_struct *task) flags = pfm_protect_ctx_ctxsw(ctx); if (ctx->ctx_state == PFM_CTX_ZOMBIE) { - struct pt_regs *regs = ia64_task_regs(task); + struct pt_regs *regs = task_pt_regs(task); pfm_clear_psr_up(); @@ -6073,7 +6082,7 @@ pfm_load_regs (struct task_struct *task) BUG_ON(psr & IA64_PSR_I); if (unlikely(ctx->ctx_state == PFM_CTX_ZOMBIE)) { - struct pt_regs *regs = ia64_task_regs(task); + struct pt_regs *regs = task_pt_regs(task); BUG_ON(ctx->ctx_smpl_hdr); @@ -6442,7 +6451,7 @@ pfm_alt_save_pmu_state(void *data) { struct pt_regs *regs; - regs = ia64_task_regs(current); + regs = task_pt_regs(current); DPRINT(("called\n")); @@ -6468,7 +6477,7 @@ pfm_alt_restore_pmu_state(void *data) { struct pt_regs *regs; - regs = ia64_task_regs(current); + regs = task_pt_regs(current); DPRINT(("called\n")); @@ -6715,6 +6724,7 @@ __initcall(pfm_init); void pfm_init_percpu (void) { + static int first_time=1; /* * make sure no measurement is active * (may inherit programmed PMCs from EFI). @@ -6727,8 +6737,10 @@ pfm_init_percpu (void) */ pfm_unfreeze_pmu(); - if (smp_processor_id() == 0) + if (first_time) { register_percpu_irq(IA64_PERFMON_VECTOR, &perfmon_irqaction); + first_time=0; + } ia64_setreg(_IA64_REG_CR_PMV, IA64_PERFMON_VECTOR); ia64_srlz_d(); @@ -6750,7 +6762,7 @@ dump_pmu_state(const char *from) local_irq_save(flags); this_cpu = smp_processor_id(); - regs = ia64_task_regs(current); + regs = task_pt_regs(current); info = PFM_CPUINFO_GET(); dcr = ia64_getreg(_IA64_REG_CR_DCR);