vserver 2.0 rc7
[linux-2.6.git] / arch / ia64 / kernel / perfmon.c
index 4f1543c..9f72dc5 100644 (file)
@@ -11,7 +11,7 @@
  * Version Perfmon-2.x is a rewrite of perfmon-1.x
  * by Stephane Eranian, Hewlett Packard Co.
  *
- * Copyright (C) 1999-2003, 2005  Hewlett Packard Co
+ * Copyright (C) 1999-2005  Hewlett Packard Co
  *               Stephane Eranian <eranian@hpl.hp.com>
  *               David Mosberger-Tang <davidm@hpl.hp.com>
  *
@@ -481,14 +481,6 @@ typedef struct {
 
 #define PFM_CMD_ARG_MANY       -1 /* cannot be zero */
 
-typedef struct {
-       int     debug;          /* turn on/off debugging via syslog */
-       int     debug_ovfl;     /* turn on/off debug printk in overflow handler */
-       int     fastctxsw;      /* turn on/off fast (unsecure) ctxsw */
-       int     expert_mode;    /* turn on/off value checking */
-       int     debug_pfm_read;
-} pfm_sysctl_t;
-
 typedef struct {
        unsigned long pfm_spurious_ovfl_intr_count;     /* keep track of spurious ovfl interrupts */
        unsigned long pfm_replay_ovfl_intr_count;       /* keep track of replayed ovfl interrupts */
@@ -507,6 +499,9 @@ typedef struct {
 static pfm_stats_t             pfm_stats[NR_CPUS];
 static pfm_session_t           pfm_sessions;   /* global sessions information */
 
+static spinlock_t pfm_alt_install_check = SPIN_LOCK_UNLOCKED;
+static pfm_intr_handler_desc_t  *pfm_alt_intr_handler;
+
 static struct proc_dir_entry   *perfmon_dir;
 static pfm_uuid_t              pfm_null_uuid = {0,};
 
@@ -516,8 +511,8 @@ static LIST_HEAD(pfm_buffer_fmt_list);
 static pmu_config_t            *pmu_conf;
 
 /* sysctl() controls */
-static pfm_sysctl_t pfm_sysctl;
-int pfm_debug_var;
+pfm_sysctl_t pfm_sysctl;
+EXPORT_SYMBOL(pfm_sysctl);
 
 static ctl_table pfm_ctl_table[]={
        {1, "debug", &pfm_sysctl.debug, sizeof(int), 0666, NULL, &proc_dointvec, NULL,},
@@ -616,6 +611,7 @@ DEFINE_PER_CPU(unsigned long, pfm_syst_info);
 DEFINE_PER_CPU(struct task_struct *, pmu_owner);
 DEFINE_PER_CPU(pfm_context_t  *, pmu_ctx);
 DEFINE_PER_CPU(unsigned long, pmu_activation_number);
+EXPORT_PER_CPU_SYMBOL_GPL(pfm_syst_info);
 
 
 /* forward declaration */
@@ -1275,6 +1271,8 @@ out:
 }
 EXPORT_SYMBOL(pfm_unregister_buffer_fmt);
 
+extern void update_pal_halt_status(int);
+
 static int
 pfm_reserve_session(struct task_struct *task, int is_syswide, unsigned int cpu)
 {
@@ -1321,6 +1319,11 @@ pfm_reserve_session(struct task_struct *task, int is_syswide, unsigned int cpu)
                is_syswide,
                cpu));
 
+       /*
+        * disable default_idle() to go to PAL_HALT
+        */
+       update_pal_halt_status(0);
+
        UNLOCK_PFS(flags);
 
        return 0;
@@ -1328,7 +1331,7 @@ pfm_reserve_session(struct task_struct *task, int is_syswide, unsigned int cpu)
 error_conflict:
        DPRINT(("system wide not possible, conflicting session [%d] on CPU%d\n",
                pfm_sessions.pfs_sys_session[cpu]->pid,
-               smp_processor_id()));
+               cpu));
 abort:
        UNLOCK_PFS(flags);
 
@@ -1376,6 +1379,12 @@ pfm_unreserve_session(pfm_context_t *ctx, int is_syswide, unsigned int cpu)
                is_syswide,
                cpu));
 
+       /*
+        * if possible, enable default_idle() to go into PAL_HALT
+        */
+       if (pfm_sessions.pfs_task_sessions == 0 && pfm_sessions.pfs_sys_sessions == 0)
+               update_pal_halt_status(1);
+
        UNLOCK_PFS(flags);
 
        return 0;
@@ -1578,7 +1587,7 @@ pfm_read(struct file *filp, char __user *buf, size_t size, loff_t *ppos)
                goto abort_locked;
        }
 
-       DPRINT(("[%d] fd=%d type=%d\n", current->pid, msg->pfm_gen_msg.msg_ctx_fd, msg->pfm_gen_msg.msg_type));
+       DPRINT(("fd=%d type=%d\n", msg->pfm_gen_msg.msg_ctx_fd, msg->pfm_gen_msg.msg_type));
 
        ret = -EFAULT;
        if(copy_to_user(buf, msg, sizeof(pfm_msg_t)) == 0) ret = sizeof(pfm_msg_t);
@@ -3697,8 +3706,6 @@ pfm_debug(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
 
        pfm_sysctl.debug = m == 0 ? 0 : 1;
 
-       pfm_debug_var = pfm_sysctl.debug;
-
        printk(KERN_INFO "perfmon debugging %s (timing reset)\n", pfm_sysctl.debug ? "on" : "off");
 
        if (m == 0) {
@@ -4214,7 +4221,7 @@ pfm_context_load(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
                DPRINT(("cannot load to [%d], invalid ctx_state=%d\n",
                        req->load_pid,
                        ctx->ctx_state));
-               return -EINVAL;
+               return -EBUSY;
        }
 
        DPRINT(("load_pid [%d] using_dbreg=%d\n", req->load_pid, ctx->ctx_fl_using_dbreg));
@@ -4716,16 +4723,26 @@ recheck:
        if (task == current || ctx->ctx_fl_system) return 0;
 
        /*
-        * if context is UNLOADED we are safe to go
-        */
-       if (state == PFM_CTX_UNLOADED) return 0;
-
-       /*
-        * no command can operate on a zombie context
+        * we are monitoring another thread
         */
-       if (state == PFM_CTX_ZOMBIE) {
-               DPRINT(("cmd %d state zombie cannot operate on context\n", cmd));
-               return -EINVAL;
+       switch(state) {
+               case PFM_CTX_UNLOADED:
+                       /*
+                        * if context is UNLOADED we are safe to go
+                        */
+                       return 0;
+               case PFM_CTX_ZOMBIE:
+                       /*
+                        * no command can operate on a zombie context
+                        */
+                       DPRINT(("cmd %d state zombie cannot operate on context\n", cmd));
+                       return -EINVAL;
+               case PFM_CTX_MASKED:
+                       /*
+                        * PMU state has been saved to software even though
+                        * the thread may still be running.
+                        */
+                       if (cmd != PFM_UNLOAD_CONTEXT) return 0;
        }
 
        /*
@@ -4998,13 +5015,21 @@ pfm_context_force_terminate(pfm_context_t *ctx, struct pt_regs *regs)
 }
 
 static int pfm_ovfl_notify_user(pfm_context_t *ctx, unsigned long ovfl_pmds);
-
+ /*
+  * pfm_handle_work() can be called with interrupts enabled
+  * (TIF_NEED_RESCHED) or disabled. The down_interruptible
+  * call may sleep, therefore we must re-enable interrupts
+  * to avoid deadlocks. It is safe to do so because this function
+  * is called ONLY when returning to user level (PUStk=1), in which case
+  * there is no risk of kernel stack overflow due to deep
+  * interrupt nesting.
+  */
 void
 pfm_handle_work(void)
 {
        pfm_context_t *ctx;
        struct pt_regs *regs;
-       unsigned long flags;
+       unsigned long flags, dummy_flags;
        unsigned long ovfl_regs;
        unsigned int reason;
        int ret;
@@ -5041,18 +5066,15 @@ pfm_handle_work(void)
        //if (CTX_OVFL_NOBLOCK(ctx)) goto skip_blocking;
        if (reason == PFM_TRAP_REASON_RESET) goto skip_blocking;
 
+       /*
+        * restore interrupt mask to what it was on entry.
+        * Could be enabled/diasbled.
+        */
        UNPROTECT_CTX(ctx, flags);
 
-        /*
-         * pfm_handle_work() is currently called with interrupts disabled.
-         * The down_interruptible call may sleep, therefore we
-         * must re-enable interrupts to avoid deadlocks. It is
-         * safe to do so because this function is called ONLY
-         * when returning to user level (PUStk=1), in which case
-         * there is no risk of kernel stack overflow due to deep
-         * interrupt nesting.
-         */
-       BUG_ON(flags & IA64_PSR_I);
+       /*
+        * force interrupt enable because of down_interruptible()
+        */
        local_irq_enable();
 
        DPRINT(("before block sleeping\n"));
@@ -5066,12 +5088,12 @@ pfm_handle_work(void)
        DPRINT(("after block sleeping ret=%d\n", ret));
 
        /*
-        * disable interrupts to restore state we had upon entering
-        * this function
+        * lock context and mask interrupts again
+        * We save flags into a dummy because we may have
+        * altered interrupts mask compared to entry in this
+        * function.
         */
-       local_irq_disable();
-
-       PROTECT_CTX(ctx, flags);
+       PROTECT_CTX(ctx, dummy_flags);
 
        /*
         * we need to read the ovfl_regs only after wake-up
@@ -5097,7 +5119,9 @@ skip_blocking:
        ctx->ctx_ovfl_regs[0] = 0UL;
 
 nothing_to_do:
-
+       /*
+        * restore flags as they were upon entry
+        */
        UNPROTECT_CTX(ctx, flags);
 }
 
@@ -5537,26 +5561,32 @@ pfm_interrupt_handler(int irq, void *arg, struct pt_regs *regs)
        int ret;
 
        this_cpu = get_cpu();
-       min      = pfm_stats[this_cpu].pfm_ovfl_intr_cycles_min;
-       max      = pfm_stats[this_cpu].pfm_ovfl_intr_cycles_max;
+       if (likely(!pfm_alt_intr_handler)) {
+               min = pfm_stats[this_cpu].pfm_ovfl_intr_cycles_min;
+               max = pfm_stats[this_cpu].pfm_ovfl_intr_cycles_max;
 
-       start_cycles = ia64_get_itc();
+               start_cycles = ia64_get_itc();
 
-       ret = pfm_do_interrupt_handler(irq, arg, regs);
+               ret = pfm_do_interrupt_handler(irq, arg, regs);
 
-       total_cycles = ia64_get_itc();
+               total_cycles = ia64_get_itc();
 
-       /*
-        * don't measure spurious interrupts
-        */
-       if (likely(ret == 0)) {
-               total_cycles -= start_cycles;
+               /*
+                * don't measure spurious interrupts
+                */
+               if (likely(ret == 0)) {
+                       total_cycles -= start_cycles;
 
-               if (total_cycles < min) pfm_stats[this_cpu].pfm_ovfl_intr_cycles_min = total_cycles;
-               if (total_cycles > max) pfm_stats[this_cpu].pfm_ovfl_intr_cycles_max = total_cycles;
+                       if (total_cycles < min) pfm_stats[this_cpu].pfm_ovfl_intr_cycles_min = total_cycles;
+                       if (total_cycles > max) pfm_stats[this_cpu].pfm_ovfl_intr_cycles_max = total_cycles;
 
-               pfm_stats[this_cpu].pfm_ovfl_intr_cycles += total_cycles;
+                       pfm_stats[this_cpu].pfm_ovfl_intr_cycles += total_cycles;
+               }
+       }
+       else {
+               (*pfm_alt_intr_handler->handler)(irq, arg, regs);
        }
+
        put_cpu_no_resched();
        return IRQ_HANDLED;
 }
@@ -6407,6 +6437,141 @@ static struct irqaction perfmon_irqaction = {
        .name    = "perfmon"
 };
 
+static void
+pfm_alt_save_pmu_state(void *data)
+{
+       struct pt_regs *regs;
+
+       regs = ia64_task_regs(current);
+
+       DPRINT(("called\n"));
+
+       /*
+        * should not be necessary but
+        * let's take not risk
+        */
+       pfm_clear_psr_up();
+       pfm_clear_psr_pp();
+       ia64_psr(regs)->pp = 0;
+
+       /*
+        * This call is required
+        * May cause a spurious interrupt on some processors
+        */
+       pfm_freeze_pmu();
+
+       ia64_srlz_d();
+}
+
+void
+pfm_alt_restore_pmu_state(void *data)
+{
+       struct pt_regs *regs;
+
+       regs = ia64_task_regs(current);
+
+       DPRINT(("called\n"));
+
+       /*
+        * put PMU back in state expected
+        * by perfmon
+        */
+       pfm_clear_psr_up();
+       pfm_clear_psr_pp();
+       ia64_psr(regs)->pp = 0;
+
+       /*
+        * perfmon runs with PMU unfrozen at all times
+        */
+       pfm_unfreeze_pmu();
+
+       ia64_srlz_d();
+}
+
+int
+pfm_install_alt_pmu_interrupt(pfm_intr_handler_desc_t *hdl)
+{
+       int ret, i;
+       int reserve_cpu;
+
+       /* some sanity checks */
+       if (hdl == NULL || hdl->handler == NULL) return -EINVAL;
+
+       /* do the easy test first */
+       if (pfm_alt_intr_handler) return -EBUSY;
+
+       /* one at a time in the install or remove, just fail the others */
+       if (!spin_trylock(&pfm_alt_install_check)) {
+               return -EBUSY;
+       }
+
+       /* reserve our session */
+       for_each_online_cpu(reserve_cpu) {
+               ret = pfm_reserve_session(NULL, 1, reserve_cpu);
+               if (ret) goto cleanup_reserve;
+       }
+
+       /* save the current system wide pmu states */
+       ret = on_each_cpu(pfm_alt_save_pmu_state, NULL, 0, 1);
+       if (ret) {
+               DPRINT(("on_each_cpu() failed: %d\n", ret));
+               goto cleanup_reserve;
+       }
+
+       /* officially change to the alternate interrupt handler */
+       pfm_alt_intr_handler = hdl;
+
+       spin_unlock(&pfm_alt_install_check);
+
+       return 0;
+
+cleanup_reserve:
+       for_each_online_cpu(i) {
+               /* don't unreserve more than we reserved */
+               if (i >= reserve_cpu) break;
+
+               pfm_unreserve_session(NULL, 1, i);
+       }
+
+       spin_unlock(&pfm_alt_install_check);
+
+       return ret;
+}
+EXPORT_SYMBOL_GPL(pfm_install_alt_pmu_interrupt);
+
+int
+pfm_remove_alt_pmu_interrupt(pfm_intr_handler_desc_t *hdl)
+{
+       int i;
+       int ret;
+
+       if (hdl == NULL) return -EINVAL;
+
+       /* cannot remove someone else's handler! */
+       if (pfm_alt_intr_handler != hdl) return -EINVAL;
+
+       /* one at a time in the install or remove, just fail the others */
+       if (!spin_trylock(&pfm_alt_install_check)) {
+               return -EBUSY;
+       }
+
+       pfm_alt_intr_handler = NULL;
+
+       ret = on_each_cpu(pfm_alt_restore_pmu_state, NULL, 0, 1);
+       if (ret) {
+               DPRINT(("on_each_cpu() failed: %d\n", ret));
+       }
+
+       for_each_online_cpu(i) {
+               pfm_unreserve_session(NULL, 1, i);
+       }
+
+       spin_unlock(&pfm_alt_install_check);
+
+       return 0;
+}
+EXPORT_SYMBOL_GPL(pfm_remove_alt_pmu_interrupt);
+
 /*
  * perfmon initialization routine, called from the initcall() table
  */