* The initial version of perfmon.c was written by
* Ganesh Venkitachalam, IBM Corp.
*
- * Then it was modified for perfmon-1.x by Stephane Eranian and
+ * Then it was modified for perfmon-1.x by Stephane Eranian and
* David Mosberger, Hewlett Packard Co.
- *
+ *
* Version Perfmon-2.x is a rewrite of perfmon-1.x
- * by Stephane Eranian, Hewlett Packard Co.
+ * by Stephane Eranian, Hewlett Packard Co.
*
- * Copyright (C) 1999-2003 Hewlett Packard Co
+ * Copyright (C) 1999-2005 Hewlett Packard Co
* Stephane Eranian <eranian@hpl.hp.com>
* David Mosberger-Tang <davidm@hpl.hp.com>
*
#include <linux/vfs.h>
#include <linux/pagemap.h>
#include <linux/mount.h>
-#include <linux/version.h>
+#include <linux/bitops.h>
+#include <linux/capability.h>
+#include <linux/rcupdate.h>
+#include <linux/completion.h>
#include <linux/vs_memory.h>
#include <linux/vs_cvirt.h>
-#include <asm/bitops.h>
#include <asm/errno.h>
#include <asm/intrinsics.h>
#include <asm/page.h>
unsigned long ctx_ovfl_regs[4]; /* which registers overflowed (notification) */
- struct semaphore ctx_restart_sem; /* use for blocking notification mode */
+ struct completion ctx_restart_done; /* use for blocking notification mode */
unsigned long ctx_used_pmds[4]; /* bitmask of PMD used */
unsigned long ctx_all_pmds[4]; /* bitmask of all accessible PMDs */
#define PFM_CMD_ARG_MANY -1 /* cannot be zero */
-typedef struct {
- int debug; /* turn on/off debugging via syslog */
- int debug_ovfl; /* turn on/off debug printk in overflow handler */
- int fastctxsw; /* turn on/off fast (unsecure) ctxsw */
- int expert_mode; /* turn on/off value checking */
- int debug_pfm_read;
-} pfm_sysctl_t;
-
typedef struct {
unsigned long pfm_spurious_ovfl_intr_count; /* keep track of spurious ovfl interrupts */
unsigned long pfm_replay_ovfl_intr_count; /* keep track of replayed ovfl interrupts */
static pfm_stats_t pfm_stats[NR_CPUS];
static pfm_session_t pfm_sessions; /* global sessions information */
+static DEFINE_SPINLOCK(pfm_alt_install_check);
+static pfm_intr_handler_desc_t *pfm_alt_intr_handler;
+
static struct proc_dir_entry *perfmon_dir;
static pfm_uuid_t pfm_null_uuid = {0,};
static pmu_config_t *pmu_conf;
/* sysctl() controls */
-static pfm_sysctl_t pfm_sysctl;
-int pfm_debug_var;
+pfm_sysctl_t pfm_sysctl;
+EXPORT_SYMBOL(pfm_sysctl);
static ctl_table pfm_ctl_table[]={
{1, "debug", &pfm_sysctl.debug, sizeof(int), 0666, NULL, &proc_dointvec, NULL,},
ClearPageReserved(vmalloc_to_page((void*)a));
}
-static inline int
-pfm_remap_page_range(struct vm_area_struct *vma, unsigned long from, unsigned long phys_addr, unsigned long size, pgprot_t prot)
-{
- return remap_page_range(vma, from, phys_addr, size, prot);
-}
-
static inline unsigned long
pfm_protect_ctx_ctxsw(pfm_context_t *x)
{
return 0UL;
}
-static inline unsigned long
+static inline void
pfm_unprotect_ctx_ctxsw(pfm_context_t *x, unsigned long f)
{
spin_unlock(&(x)->ctx_lock);
DEFINE_PER_CPU(struct task_struct *, pmu_owner);
DEFINE_PER_CPU(pfm_context_t *, pmu_ctx);
DEFINE_PER_CPU(unsigned long, pmu_activation_number);
+EXPORT_PER_CPU_SYMBOL_GPL(pfm_syst_info);
/* forward declaration */
#include "perfmon_itanium.h"
#include "perfmon_mckinley.h"
+#include "perfmon_montecito.h"
#include "perfmon_generic.h"
static pmu_config_t *pmu_confs[]={
+ &pmu_conf_mont,
&pmu_conf_mck,
&pmu_conf_ita,
&pmu_conf_gen, /* must be last */
DPRINT(("ctx=%p msgq reset\n", ctx));
}
-
-/* Here we want the physical address of the memory.
- * This is used when initializing the contents of the
- * area and marking the pages as reserved.
- */
-static inline unsigned long
-pfm_kvirt_to_pa(unsigned long adr)
-{
- __u64 pa = ia64_tpa(adr);
- return pa;
-}
-
static void *
pfm_rvmalloc(unsigned long size)
{
}
EXPORT_SYMBOL(pfm_unregister_buffer_fmt);
+extern void update_pal_halt_status(int);
+
static int
pfm_reserve_session(struct task_struct *task, int is_syswide, unsigned int cpu)
{
is_syswide,
cpu));
+ /*
+ * disable default_idle() to go to PAL_HALT
+ */
+ update_pal_halt_status(0);
+
UNLOCK_PFS(flags);
return 0;
error_conflict:
DPRINT(("system wide not possible, conflicting session [%d] on CPU%d\n",
pfm_sessions.pfs_sys_session[cpu]->pid,
- smp_processor_id()));
+ cpu));
abort:
UNLOCK_PFS(flags);
is_syswide,
cpu));
+ /*
+ * if possible, enable default_idle() to go into PAL_HALT
+ */
+ if (pfm_sessions.pfs_task_sessions == 0 && pfm_sessions.pfs_sys_sessions == 0)
+ update_pal_halt_status(1);
+
UNLOCK_PFS(flags);
return 0;
goto abort_locked;
}
- DPRINT(("[%d] fd=%d type=%d\n", current->pid, msg->pfm_gen_msg.msg_ctx_fd, msg->pfm_gen_msg.msg_type));
+ DPRINT(("fd=%d type=%d\n", msg->pfm_gen_msg.msg_ctx_fd, msg->pfm_gen_msg.msg_type));
ret = -EFAULT;
if(copy_to_user(buf, msg, sizeof(pfm_msg_t)) == 0) ret = sizeof(pfm_msg_t);
}
/*
- * context is locked when coming here and interrupts are disabled
+ * interrupt cannot be masked when coming here
*/
static inline int
pfm_do_fasync(int fd, struct file *filp, pfm_context_t *ctx, int on)
pfm_syswide_force_stop(void *info)
{
pfm_context_t *ctx = (pfm_context_t *)info;
- struct pt_regs *regs = ia64_task_regs(current);
+ struct pt_regs *regs = task_pt_regs(current);
struct task_struct *owner;
unsigned long flags;
int ret;
is_system = ctx->ctx_fl_system;
task = PFM_CTX_TASK(ctx);
- regs = ia64_task_regs(task);
+ regs = task_pt_regs(task);
DPRINT(("ctx_state=%d is_current=%d\n",
state,
is_system = ctx->ctx_fl_system;
task = PFM_CTX_TASK(ctx);
- regs = ia64_task_regs(task);
+ regs = task_pt_regs(task);
DPRINT(("ctx_state=%d is_current=%d\n",
state,
/*
* force task to wake up from MASKED state
*/
- up(&ctx->ctx_restart_sem);
+ complete(&ctx->ctx_restart_done);
DPRINT(("waking up ctx_state=%d\n", state));
/*
* XXX: check for signals :
- * - ok of explicit close
+ * - ok for explicit close
* - not ok when coming from exit_files()
*/
schedule();
DPRINT(("new inode ino=%ld @%p\n", inode->i_ino, inode));
- inode->i_sb = pfmfs_mnt->mnt_sb;
inode->i_mode = S_IFCHR|S_IRUGO;
- inode->i_sock = 0;
inode->i_uid = current->fsuid;
inode->i_gid = current->fsgid;
pfm_free_fd(int fd, struct file *file)
{
struct files_struct *files = current->files;
+ struct fdtable *fdt;
/*
* there ie no fd_uninstall(), so we do it here
*/
spin_lock(&files->file_lock);
- files->fd[fd] = NULL;
+ fdt = files_fdtable(files);
+ rcu_assign_pointer(fdt->fd[fd], NULL);
spin_unlock(&files->file_lock);
- if (file) put_filp(file);
+ if (file)
+ put_filp(file);
put_unused_fd(fd);
}
static int
pfm_remap_buffer(struct vm_area_struct *vma, unsigned long buf, unsigned long addr, unsigned long size)
{
- unsigned long page;
-
DPRINT(("CPU%d buf=0x%lx addr=0x%lx size=%ld\n", smp_processor_id(), buf, addr, size));
while (size > 0) {
- page = pfm_kvirt_to_pa(buf);
+ unsigned long pfn = ia64_tpa(buf) >> PAGE_SHIFT;
- if (pfm_remap_page_range(vma, addr, page, PAGE_SIZE, PAGE_READONLY)) return -ENOMEM;
+
+ if (remap_pfn_range(vma, addr, pfn, PAGE_SIZE, PAGE_READONLY))
+ return -ENOMEM;
addr += PAGE_SIZE;
buf += PAGE_SIZE;
* if ((mm->total_vm << PAGE_SHIFT) + len> task->rlim[RLIMIT_AS].rlim_cur)
* return -ENOMEM;
*/
- if (size > task->rlim[RLIMIT_MEMLOCK].rlim_cur) return -ENOMEM;
+ if (size > task->signal->rlim[RLIMIT_MEMLOCK].rlim_cur)
+ return -ENOMEM;
/*
* We do the easy to undo allocations first.
/*
* partially initialize the vma for the sampling buffer
- *
- * The VM_DONTCOPY flag is very important as it ensures that the mapping
- * will never be inherited for any child process (via fork()) which is always
- * what we want.
*/
vma->vm_mm = mm;
vma->vm_flags = VM_READ| VM_MAYREAD |VM_RESERVED;
goto error;
}
vma->vm_end = vma->vm_start + size;
+ vma->vm_pgoff = vma->vm_start >> PAGE_SHIFT;
DPRINT(("aligned size=%ld, hdr=%p mapped @0x%lx\n", size, ctx->ctx_smpl_hdr, vma->vm_start));
*/
insert_vm_struct(mm, vma);
- // mm->total_vm += size >> PAGE_SHIFT;
vx_vmpages_add(mm, size >> PAGE_SHIFT);
- vm_stat_account(vma);
+ vm_stat_account(vma->vm_mm, vma->vm_flags, vma->vm_file,
+ vma_pages(vma));
up_write(&task->mm->mmap_sem);
/*
/*
* init restart semaphore to locked
*/
- sema_init(&ctx->ctx_restart_sem, 0);
+ init_completion(&ctx->ctx_restart_done);
/*
* activation is used in SMP only
#endif
}
- DPRINT(("pmc[%u]=0x%lx loaded=%d access_pmu=%d all_pmcs=0x%lx used_pmds=0x%lx eventid=%ld smpl_pmds=0x%lx reset_pmds=0x%lx reloads_pmcs=0x%lx used_monitors=0x%lx ovfl_regs=0x%lx\n",
+ DPRINT(("pmc[%u]=0x%lx ld=%d apmu=%d flags=0x%x all_pmcs=0x%lx used_pmds=0x%lx eventid=%ld smpl_pmds=0x%lx reset_pmds=0x%lx reloads_pmcs=0x%lx used_monitors=0x%lx ovfl_regs=0x%lx\n",
cnum,
value,
is_loaded,
can_access_pmu,
+ flags,
ctx->ctx_all_pmcs[0],
ctx->ctx_used_pmds[0],
ctx->ctx_pmds[cnum].eventid,
}
}
- DPRINT(("pmd[%u]=0x%lx loaded=%d access_pmu=%d, hw_value=0x%lx ctx_pmd=0x%lx short_reset=0x%lx "
- "long_reset=0x%lx notify=%c used_pmds=0x%lx reset_pmds=0x%lx reload_pmds=0x%lx all_pmds=0x%lx ovfl_regs=0x%lx\n",
+ DPRINT(("pmd[%u]=0x%lx ld=%d apmu=%d, hw_value=0x%lx ctx_pmd=0x%lx short_reset=0x%lx "
+ "long_reset=0x%lx notify=%c seed=0x%lx mask=0x%lx used_pmds=0x%lx reset_pmds=0x%lx reload_pmds=0x%lx all_pmds=0x%lx ovfl_regs=0x%lx\n",
cnum,
value,
is_loaded,
ctx->ctx_pmds[cnum].short_reset,
ctx->ctx_pmds[cnum].long_reset,
PMC_OVFL_NOTIFY(ctx, cnum) ? 'Y':'N',
+ ctx->ctx_pmds[cnum].seed,
+ ctx->ctx_pmds[cnum].mask,
ctx->ctx_used_pmds[0],
ctx->ctx_pmds[cnum].reset_pmds[0],
ctx->ctx_reload_pmds[0],
}
expert_mode = pfm_sysctl.expert_mode;
- DPRINT(("loaded=%d access_pmu=%d ctx_state=%d\n",
+ DPRINT(("ld=%d apmu=%d ctx_state=%d\n",
is_loaded,
can_access_pmu,
state));
*/
if (CTX_OVFL_NOBLOCK(ctx) == 0 && state == PFM_CTX_MASKED) {
DPRINT(("unblocking [%d] \n", task->pid));
- up(&ctx->ctx_restart_sem);
+ complete(&ctx->ctx_restart_done);
} else {
DPRINT(("[%d] armed exit trap\n", task->pid));
pfm_sysctl.debug = m == 0 ? 0 : 1;
- pfm_debug_var = pfm_sysctl.debug;
-
printk(KERN_INFO "perfmon debugging %s (timing reset)\n", pfm_sysctl.debug ? "on" : "off");
if (m == 0) {
ctx->ctx_ibrs[rnum] = dbreg.val;
- DPRINT(("write ibr%u=0x%lx used_ibrs=0x%x is_loaded=%d access_pmu=%d\n",
+ DPRINT(("write ibr%u=0x%lx used_ibrs=0x%x ld=%d apmu=%d\n",
rnum, dbreg.val, ctx->ctx_used_ibrs[0], is_loaded, can_access_pmu));
} else {
CTX_USED_DBR(ctx, rnum);
}
ctx->ctx_dbrs[rnum] = dbreg.val;
- DPRINT(("write dbr%u=0x%lx used_dbrs=0x%x is_loaded=%d access_pmu=%d\n",
+ DPRINT(("write dbr%u=0x%lx used_dbrs=0x%x ld=%d apmu=%d\n",
rnum, dbreg.val, ctx->ctx_used_dbrs[0], is_loaded, can_access_pmu));
}
}
*/
ia64_psr(regs)->up = 0;
} else {
- tregs = ia64_task_regs(task);
+ tregs = task_pt_regs(task);
/*
* stop monitoring at the user level
ia64_psr(regs)->up = 1;
} else {
- tregs = ia64_task_regs(ctx->ctx_task);
+ tregs = task_pt_regs(ctx->ctx_task);
/*
* start monitoring at the kernel level the next
DPRINT(("cannot load to [%d], invalid ctx_state=%d\n",
req->load_pid,
ctx->ctx_state));
- return -EINVAL;
+ return -EBUSY;
}
DPRINT(("load_pid [%d] using_dbreg=%d\n", req->load_pid, ctx->ctx_fl_using_dbreg));
DPRINT(("before cmpxchg() old_ctx=%p new_ctx=%p\n",
thread->pfm_context, ctx));
+ ret = -EBUSY;
old = ia64_cmpxchg(acq, &thread->pfm_context, NULL, ctx, sizeof(pfm_context_t *));
if (old != NULL) {
DPRINT(("load_pid [%d] already has a context\n", req->load_pid));
/*
* when not current, task MUST be stopped, so this is safe
*/
- regs = ia64_task_regs(task);
+ regs = task_pt_regs(task);
/* force a full reload */
ctx->ctx_last_activation = PFM_INVALID_ACTIVATION;
/*
* per-task mode
*/
- tregs = task == current ? regs : ia64_task_regs(task);
+ tregs = task == current ? regs : task_pt_regs(task);
if (task == current) {
/*
{
pfm_context_t *ctx;
unsigned long flags;
- struct pt_regs *regs = ia64_task_regs(task);
+ struct pt_regs *regs = task_pt_regs(task);
int ret, state;
int free_ok = 0;
if (task == current || ctx->ctx_fl_system) return 0;
/*
- * if context is UNLOADED we are safe to go
- */
- if (state == PFM_CTX_UNLOADED) return 0;
-
- /*
- * no command can operate on a zombie context
+ * we are monitoring another thread
*/
- if (state == PFM_CTX_ZOMBIE) {
- DPRINT(("cmd %d state zombie cannot operate on context\n", cmd));
- return -EINVAL;
+ switch(state) {
+ case PFM_CTX_UNLOADED:
+ /*
+ * if context is UNLOADED we are safe to go
+ */
+ return 0;
+ case PFM_CTX_ZOMBIE:
+ /*
+ * no command can operate on a zombie context
+ */
+ DPRINT(("cmd %d state zombie cannot operate on context\n", cmd));
+ return -EINVAL;
+ case PFM_CTX_MASKED:
+ /*
+ * PMU state has been saved to software even though
+ * the thread may still be running.
+ */
+ if (cmd != PFM_UNLOAD_CONTEXT) return 0;
}
/*
* system-call entry point (must return long)
*/
asmlinkage long
-sys_perfmonctl (int fd, int cmd, void __user *arg, int count, long arg5, long arg6, long arg7,
- long arg8, long stack)
+sys_perfmonctl (int fd, int cmd, void __user *arg, int count)
{
- struct pt_regs *regs = (struct pt_regs *)&stack;
struct file *file = NULL;
pfm_context_t *ctx = NULL;
unsigned long flags = 0UL;
if (unlikely(ret)) goto abort_locked;
skip_fd:
- ret = (*func)(ctx, args_k, count, regs);
+ ret = (*func)(ctx, args_k, count, task_pt_regs(current));
call_made = 1;
if (likely(ctx)) {
DPRINT(("context unlocked\n"));
UNPROTECT_CTX(ctx, flags);
- fput(file);
}
/* copy argument back to user, if needed */
if (call_made && PFM_CMD_RW_ARG(cmd) && copy_to_user(arg, args_k, base_sz*count)) ret = -EFAULT;
error_args:
- if (args_k) kfree(args_k);
+ if (file)
+ fput(file);
+
+ kfree(args_k);
DPRINT(("cmd=%s ret=%ld\n", PFM_CMD_NAME(cmd), ret));
static void
pfm_context_force_terminate(pfm_context_t *ctx, struct pt_regs *regs)
{
- if (ctx->ctx_fl_system) {
- printk(KERN_ERR "perfmon: pfm_context_force_terminate [%d] is system-wide\n", current->pid);
- return;
- }
- /*
- * we stop the whole thing, we do no need to flush
- * we know we WERE masked
- */
- pfm_clear_psr_up();
- ia64_psr(regs)->up = 0;
- ia64_psr(regs)->sp = 1;
+ int ret;
- /*
- * disconnect the task from the context and vice-versa
- */
- current->thread.pfm_context = NULL;
- current->thread.flags &= ~IA64_THREAD_PM_VALID;
- ctx->ctx_task = NULL;
+ DPRINT(("entering for [%d]\n", current->pid));
- DPRINT(("context terminated\n"));
+ ret = pfm_context_unload(ctx, NULL, 0, regs);
+ if (ret) {
+ printk(KERN_ERR "pfm_context_force_terminate: [%d] unloaded failed with %d\n", current->pid, ret);
+ }
/*
* and wakeup controlling task, indicating we are now disconnected
}
static int pfm_ovfl_notify_user(pfm_context_t *ctx, unsigned long ovfl_pmds);
-
+ /*
+ * pfm_handle_work() can be called with interrupts enabled
+ * (TIF_NEED_RESCHED) or disabled. The down_interruptible
+ * call may sleep, therefore we must re-enable interrupts
+ * to avoid deadlocks. It is safe to do so because this function
+ * is called ONLY when returning to user level (PUStk=1), in which case
+ * there is no risk of kernel stack overflow due to deep
+ * interrupt nesting.
+ */
void
pfm_handle_work(void)
{
pfm_context_t *ctx;
struct pt_regs *regs;
- unsigned long flags;
+ unsigned long flags, dummy_flags;
unsigned long ovfl_regs;
unsigned int reason;
int ret;
pfm_clear_task_notify();
- regs = ia64_task_regs(current);
+ regs = task_pt_regs(current);
/*
* extract reason for being here and clear
//if (CTX_OVFL_NOBLOCK(ctx)) goto skip_blocking;
if (reason == PFM_TRAP_REASON_RESET) goto skip_blocking;
+ /*
+ * restore interrupt mask to what it was on entry.
+ * Could be enabled/diasbled.
+ */
UNPROTECT_CTX(ctx, flags);
+ /*
+ * force interrupt enable because of down_interruptible()
+ */
+ local_irq_enable();
+
DPRINT(("before block sleeping\n"));
/*
* may go through without blocking on SMP systems
* if restart has been received already by the time we call down()
*/
- ret = down_interruptible(&ctx->ctx_restart_sem);
+ ret = wait_for_completion_interruptible(&ctx->ctx_restart_done);
DPRINT(("after block sleeping ret=%d\n", ret));
- PROTECT_CTX(ctx, flags);
+ /*
+ * lock context and mask interrupts again
+ * We save flags into a dummy because we may have
+ * altered interrupts mask compared to entry in this
+ * function.
+ */
+ PROTECT_CTX(ctx, dummy_flags);
/*
* we need to read the ovfl_regs only after wake-up
ctx->ctx_ovfl_regs[0] = 0UL;
nothing_to_do:
-
+ /*
+ * restore flags as they were upon entry
+ */
UNPROTECT_CTX(ctx, flags);
}
if (ovfl_notify == 0) reset_pmds = ovfl_pmds;
}
- DPRINT(("ovfl_pmds=0x%lx reset_pmds=0x%lx\n",
- ovfl_pmds,
- reset_pmds));
+ DPRINT_ovfl(("ovfl_pmds=0x%lx reset_pmds=0x%lx\n", ovfl_pmds, reset_pmds));
+
/*
* reset the requested PMD registers using the short reset values
*/
int ret;
this_cpu = get_cpu();
- min = pfm_stats[this_cpu].pfm_ovfl_intr_cycles_min;
- max = pfm_stats[this_cpu].pfm_ovfl_intr_cycles_max;
+ if (likely(!pfm_alt_intr_handler)) {
+ min = pfm_stats[this_cpu].pfm_ovfl_intr_cycles_min;
+ max = pfm_stats[this_cpu].pfm_ovfl_intr_cycles_max;
- start_cycles = ia64_get_itc();
+ start_cycles = ia64_get_itc();
- ret = pfm_do_interrupt_handler(irq, arg, regs);
+ ret = pfm_do_interrupt_handler(irq, arg, regs);
- total_cycles = ia64_get_itc();
+ total_cycles = ia64_get_itc();
- /*
- * don't measure spurious interrupts
- */
- if (likely(ret == 0)) {
- total_cycles -= start_cycles;
+ /*
+ * don't measure spurious interrupts
+ */
+ if (likely(ret == 0)) {
+ total_cycles -= start_cycles;
- if (total_cycles < min) pfm_stats[this_cpu].pfm_ovfl_intr_cycles_min = total_cycles;
- if (total_cycles > max) pfm_stats[this_cpu].pfm_ovfl_intr_cycles_max = total_cycles;
+ if (total_cycles < min) pfm_stats[this_cpu].pfm_ovfl_intr_cycles_min = total_cycles;
+ if (total_cycles > max) pfm_stats[this_cpu].pfm_ovfl_intr_cycles_max = total_cycles;
- pfm_stats[this_cpu].pfm_ovfl_intr_cycles += total_cycles;
+ pfm_stats[this_cpu].pfm_ovfl_intr_cycles += total_cycles;
+ }
+ }
+ else {
+ (*pfm_alt_intr_handler->handler)(irq, arg, regs);
}
+
put_cpu_no_resched();
return IRQ_HANDLED;
}
* on every CPU, so we can rely on the pid to identify the idle task.
*/
if ((info & PFM_CPUINFO_EXCL_IDLE) == 0 || task->pid) {
- regs = ia64_task_regs(task);
+ regs = task_pt_regs(task);
ia64_psr(regs)->pp = is_ctxswin ? dcr_pp : 0;
return;
}
flags = pfm_protect_ctx_ctxsw(ctx);
if (ctx->ctx_state == PFM_CTX_ZOMBIE) {
- struct pt_regs *regs = ia64_task_regs(task);
+ struct pt_regs *regs = task_pt_regs(task);
pfm_clear_psr_up();
return;
}
- /*
- * sanity check
- */
- if (ctx->ctx_last_activation != GET_ACTIVATION()) {
- pfm_unprotect_ctx_ctxsw(ctx, flags);
- return;
- }
-
/*
* save current PSR: needed because we modify it
*/
BUG_ON(psr & IA64_PSR_I);
if (unlikely(ctx->ctx_state == PFM_CTX_ZOMBIE)) {
- struct pt_regs *regs = ia64_task_regs(task);
+ struct pt_regs *regs = task_pt_regs(task);
BUG_ON(ctx->ctx_smpl_hdr);
* XXX: sampling situation is not taken into account here
*/
mask2 = ctx->ctx_used_pmds[0];
+
+ DPRINT(("is_self=%d ovfl_val=0x%lx mask2=0x%lx\n", is_self, ovfl_val, mask2));
+
for (i = 0; mask2; i++, mask2>>=1) {
/* skip non used pmds */
}
}
- DPRINT(("[%d] is_self=%d ctx_pmd[%d]=0x%lx pmd_val=0x%lx\n", task->pid, is_self, i, val, pmd_val));
+ DPRINT(("[%d] ctx_pmd[%d]=0x%lx pmd_val=0x%lx\n", task->pid, i, val, pmd_val));
if (is_self) task->thread.pmds[i] = pmd_val;
.name = "perfmon"
};
+static void
+pfm_alt_save_pmu_state(void *data)
+{
+ struct pt_regs *regs;
+
+ regs = task_pt_regs(current);
+
+ DPRINT(("called\n"));
+
+ /*
+ * should not be necessary but
+ * let's take not risk
+ */
+ pfm_clear_psr_up();
+ pfm_clear_psr_pp();
+ ia64_psr(regs)->pp = 0;
+
+ /*
+ * This call is required
+ * May cause a spurious interrupt on some processors
+ */
+ pfm_freeze_pmu();
+
+ ia64_srlz_d();
+}
+
+void
+pfm_alt_restore_pmu_state(void *data)
+{
+ struct pt_regs *regs;
+
+ regs = task_pt_regs(current);
+
+ DPRINT(("called\n"));
+
+ /*
+ * put PMU back in state expected
+ * by perfmon
+ */
+ pfm_clear_psr_up();
+ pfm_clear_psr_pp();
+ ia64_psr(regs)->pp = 0;
+
+ /*
+ * perfmon runs with PMU unfrozen at all times
+ */
+ pfm_unfreeze_pmu();
+
+ ia64_srlz_d();
+}
+
+int
+pfm_install_alt_pmu_interrupt(pfm_intr_handler_desc_t *hdl)
+{
+ int ret, i;
+ int reserve_cpu;
+
+ /* some sanity checks */
+ if (hdl == NULL || hdl->handler == NULL) return -EINVAL;
+
+ /* do the easy test first */
+ if (pfm_alt_intr_handler) return -EBUSY;
+
+ /* one at a time in the install or remove, just fail the others */
+ if (!spin_trylock(&pfm_alt_install_check)) {
+ return -EBUSY;
+ }
+
+ /* reserve our session */
+ for_each_online_cpu(reserve_cpu) {
+ ret = pfm_reserve_session(NULL, 1, reserve_cpu);
+ if (ret) goto cleanup_reserve;
+ }
+
+ /* save the current system wide pmu states */
+ ret = on_each_cpu(pfm_alt_save_pmu_state, NULL, 0, 1);
+ if (ret) {
+ DPRINT(("on_each_cpu() failed: %d\n", ret));
+ goto cleanup_reserve;
+ }
+
+ /* officially change to the alternate interrupt handler */
+ pfm_alt_intr_handler = hdl;
+
+ spin_unlock(&pfm_alt_install_check);
+
+ return 0;
+
+cleanup_reserve:
+ for_each_online_cpu(i) {
+ /* don't unreserve more than we reserved */
+ if (i >= reserve_cpu) break;
+
+ pfm_unreserve_session(NULL, 1, i);
+ }
+
+ spin_unlock(&pfm_alt_install_check);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(pfm_install_alt_pmu_interrupt);
+
+int
+pfm_remove_alt_pmu_interrupt(pfm_intr_handler_desc_t *hdl)
+{
+ int i;
+ int ret;
+
+ if (hdl == NULL) return -EINVAL;
+
+ /* cannot remove someone else's handler! */
+ if (pfm_alt_intr_handler != hdl) return -EINVAL;
+
+ /* one at a time in the install or remove, just fail the others */
+ if (!spin_trylock(&pfm_alt_install_check)) {
+ return -EBUSY;
+ }
+
+ pfm_alt_intr_handler = NULL;
+
+ ret = on_each_cpu(pfm_alt_restore_pmu_state, NULL, 0, 1);
+ if (ret) {
+ DPRINT(("on_each_cpu() failed: %d\n", ret));
+ }
+
+ for_each_online_cpu(i) {
+ pfm_unreserve_session(NULL, 1, i);
+ }
+
+ spin_unlock(&pfm_alt_install_check);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(pfm_remove_alt_pmu_interrupt);
+
/*
* perfmon initialization routine, called from the initcall() table
*/
local_irq_save(flags);
this_cpu = smp_processor_id();
- regs = ia64_task_regs(current);
+ regs = task_pt_regs(current);
info = PFM_CPUINFO_GET();
dcr = ia64_getreg(_IA64_REG_CR_DCR);
}
#else /* !CONFIG_PERFMON */
asmlinkage long
-sys_perfmonctl (int fd, int cmd, void *arg, int count, long arg5, long arg6, long arg7,
- long arg8, long stack)
+sys_perfmonctl (int fd, int cmd, void *arg, int count)
{
return -ENOSYS;
}