#include <linux/version.h>
#include <linux/vs_memory.h>
#include <linux/vs_cvirt.h>
-#include <linux/bitops.h>
-#include <linux/vs_memory.h>
-#include <linux/vs_cvirt.h>
+#include <asm/bitops.h>
#include <asm/errno.h>
#include <asm/intrinsics.h>
#include <asm/page.h>
ClearPageReserved(vmalloc_to_page((void*)a));
}
+static inline int
+pfm_remap_page_range(struct vm_area_struct *vma, unsigned long from, unsigned long phys_addr, unsigned long size, pgprot_t prot)
+{
+ return remap_page_range(vma, from, phys_addr, size, prot);
+}
+
static inline unsigned long
pfm_protect_ctx_ctxsw(pfm_context_t *x)
{
DPRINT(("ctx=%p msgq reset\n", ctx));
}
+
+/* Here we want the physical address of the memory.
+ * This is used when initializing the contents of the
+ * area and marking the pages as reserved.
+ */
+static inline unsigned long
+pfm_kvirt_to_pa(unsigned long adr)
+{
+ __u64 pa = ia64_tpa(adr);
+ return pa;
+}
+
static void *
pfm_rvmalloc(unsigned long size)
{
}
/*
- * interrupt cannot be masked when coming here
+ * context is locked when coming here and interrupts are disabled
*/
static inline int
pfm_do_fasync(int fd, struct file *filp, pfm_context_t *ctx, int on)
/*
* XXX: check for signals :
- * - ok for explicit close
+ * - ok of explicit close
* - not ok when coming from exit_files()
*/
schedule();
static int
pfm_remap_buffer(struct vm_area_struct *vma, unsigned long buf, unsigned long addr, unsigned long size)
{
+ unsigned long page;
+
DPRINT(("CPU%d buf=0x%lx addr=0x%lx size=%ld\n", smp_processor_id(), buf, addr, size));
while (size > 0) {
- unsigned long pfn = ia64_tpa(buf) >> PAGE_SHIFT;
-
+ page = pfm_kvirt_to_pa(buf);
- if (remap_pfn_range(vma, addr, pfn, PAGE_SIZE, PAGE_READONLY))
- return -ENOMEM;
+ if (pfm_remap_page_range(vma, addr, page, PAGE_SIZE, PAGE_READONLY)) return -ENOMEM;
addr += PAGE_SIZE;
buf += PAGE_SIZE;
* if ((mm->total_vm << PAGE_SHIFT) + len> task->rlim[RLIMIT_AS].rlim_cur)
* return -ENOMEM;
*/
- if (size > task->signal->rlim[RLIMIT_MEMLOCK].rlim_cur)
- return -ENOMEM;
+ if (size > task->rlim[RLIMIT_MEMLOCK].rlim_cur) return -ENOMEM;
/*
* We do the easy to undo allocations first.
/*
* partially initialize the vma for the sampling buffer
+ *
+ * The VM_DONTCOPY flag is very important as it ensures that the mapping
+ * will never be inherited for any child process (via fork()) which is always
+ * what we want.
*/
vma->vm_mm = mm;
vma->vm_flags = VM_READ| VM_MAYREAD |VM_RESERVED;
goto error;
}
vma->vm_end = vma->vm_start + size;
- vma->vm_pgoff = vma->vm_start >> PAGE_SHIFT;
DPRINT(("aligned size=%ld, hdr=%p mapped @0x%lx\n", size, ctx->ctx_smpl_hdr, vma->vm_start));
#endif
}
- DPRINT(("pmc[%u]=0x%lx ld=%d apmu=%d flags=0x%x all_pmcs=0x%lx used_pmds=0x%lx eventid=%ld smpl_pmds=0x%lx reset_pmds=0x%lx reloads_pmcs=0x%lx used_monitors=0x%lx ovfl_regs=0x%lx\n",
+ DPRINT(("pmc[%u]=0x%lx loaded=%d access_pmu=%d all_pmcs=0x%lx used_pmds=0x%lx eventid=%ld smpl_pmds=0x%lx reset_pmds=0x%lx reloads_pmcs=0x%lx used_monitors=0x%lx ovfl_regs=0x%lx\n",
cnum,
value,
is_loaded,
can_access_pmu,
- flags,
ctx->ctx_all_pmcs[0],
ctx->ctx_used_pmds[0],
ctx->ctx_pmds[cnum].eventid,
}
}
- DPRINT(("pmd[%u]=0x%lx ld=%d apmu=%d, hw_value=0x%lx ctx_pmd=0x%lx short_reset=0x%lx "
- "long_reset=0x%lx notify=%c seed=0x%lx mask=0x%lx used_pmds=0x%lx reset_pmds=0x%lx reload_pmds=0x%lx all_pmds=0x%lx ovfl_regs=0x%lx\n",
+ DPRINT(("pmd[%u]=0x%lx loaded=%d access_pmu=%d, hw_value=0x%lx ctx_pmd=0x%lx short_reset=0x%lx "
+ "long_reset=0x%lx notify=%c used_pmds=0x%lx reset_pmds=0x%lx reload_pmds=0x%lx all_pmds=0x%lx ovfl_regs=0x%lx\n",
cnum,
value,
is_loaded,
ctx->ctx_pmds[cnum].short_reset,
ctx->ctx_pmds[cnum].long_reset,
PMC_OVFL_NOTIFY(ctx, cnum) ? 'Y':'N',
- ctx->ctx_pmds[cnum].seed,
- ctx->ctx_pmds[cnum].mask,
ctx->ctx_used_pmds[0],
ctx->ctx_pmds[cnum].reset_pmds[0],
ctx->ctx_reload_pmds[0],
}
expert_mode = pfm_sysctl.expert_mode;
- DPRINT(("ld=%d apmu=%d ctx_state=%d\n",
+ DPRINT(("loaded=%d access_pmu=%d ctx_state=%d\n",
is_loaded,
can_access_pmu,
state));
ctx->ctx_ibrs[rnum] = dbreg.val;
- DPRINT(("write ibr%u=0x%lx used_ibrs=0x%x ld=%d apmu=%d\n",
+ DPRINT(("write ibr%u=0x%lx used_ibrs=0x%x is_loaded=%d access_pmu=%d\n",
rnum, dbreg.val, ctx->ctx_used_ibrs[0], is_loaded, can_access_pmu));
} else {
CTX_USED_DBR(ctx, rnum);
}
ctx->ctx_dbrs[rnum] = dbreg.val;
- DPRINT(("write dbr%u=0x%lx used_dbrs=0x%x ld=%d apmu=%d\n",
+ DPRINT(("write dbr%u=0x%lx used_dbrs=0x%x is_loaded=%d access_pmu=%d\n",
rnum, dbreg.val, ctx->ctx_used_dbrs[0], is_loaded, can_access_pmu));
}
}
static void
pfm_context_force_terminate(pfm_context_t *ctx, struct pt_regs *regs)
{
- int ret;
+ if (ctx->ctx_fl_system) {
+ printk(KERN_ERR "perfmon: pfm_context_force_terminate [%d] is system-wide\n", current->pid);
+ return;
+ }
+ /*
+ * we stop the whole thing, we do no need to flush
+ * we know we WERE masked
+ */
+ pfm_clear_psr_up();
+ ia64_psr(regs)->up = 0;
+ ia64_psr(regs)->sp = 1;
- DPRINT(("entering for [%d]\n", current->pid));
+ /*
+ * disconnect the task from the context and vice-versa
+ */
+ current->thread.pfm_context = NULL;
+ current->thread.flags &= ~IA64_THREAD_PM_VALID;
+ ctx->ctx_task = NULL;
- ret = pfm_context_unload(ctx, NULL, 0, regs);
- if (ret) {
- printk(KERN_ERR "pfm_context_force_terminate: [%d] unloaded failed with %d\n", current->pid, ret);
- }
+ DPRINT(("context terminated\n"));
/*
* and wakeup controlling task, indicating we are now disconnected
UNPROTECT_CTX(ctx, flags);
- /*
- * pfm_handle_work() is currently called with interrupts disabled.
- * The down_interruptible call may sleep, therefore we
- * must re-enable interrupts to avoid deadlocks. It is
- * safe to do so because this function is called ONLY
- * when returning to user level (PUStk=1), in which case
- * there is no risk of kernel stack overflow due to deep
- * interrupt nesting.
- */
- BUG_ON(flags & IA64_PSR_I);
- local_irq_enable();
-
DPRINT(("before block sleeping\n"));
/*
DPRINT(("after block sleeping ret=%d\n", ret));
- /*
- * disable interrupts to restore state we had upon entering
- * this function
- */
- local_irq_disable();
-
PROTECT_CTX(ctx, flags);
/*
if (ovfl_notify == 0) reset_pmds = ovfl_pmds;
}
- DPRINT_ovfl(("ovfl_pmds=0x%lx reset_pmds=0x%lx\n", ovfl_pmds, reset_pmds));
-
+ DPRINT(("ovfl_pmds=0x%lx reset_pmds=0x%lx\n",
+ ovfl_pmds,
+ reset_pmds));
/*
* reset the requested PMD registers using the short reset values
*/
return;
}
+ /*
+ * sanity check
+ */
+ if (ctx->ctx_last_activation != GET_ACTIVATION()) {
+ pfm_unprotect_ctx_ctxsw(ctx, flags);
+ return;
+ }
+
/*
* save current PSR: needed because we modify it
*/
* XXX: sampling situation is not taken into account here
*/
mask2 = ctx->ctx_used_pmds[0];
-
- DPRINT(("is_self=%d ovfl_val=0x%lx mask2=0x%lx\n", is_self, ovfl_val, mask2));
-
for (i = 0; mask2; i++, mask2>>=1) {
/* skip non used pmds */
}
}
- DPRINT(("[%d] ctx_pmd[%d]=0x%lx pmd_val=0x%lx\n", task->pid, i, val, pmd_val));
+ DPRINT(("[%d] is_self=%d ctx_pmd[%d]=0x%lx pmd_val=0x%lx\n", task->pid, is_self, i, val, pmd_val));
if (is_self) task->thread.pmds[i] = pmd_val;