linux 2.6.16.38 w/ vs2.0.3-rc1
[linux-2.6.git] / drivers / acpi / processor_idle.c
index 7106606..eb730a8 100644 (file)
@@ -3,7 +3,7 @@
  *
  *  Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com>
  *  Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
- *  Copyright (C) 2004, 2005 Dominik Brodowski <linux@brodo.de>
+ *  Copyright (C) 2004       Dominik Brodowski <linux@brodo.de>
  *  Copyright (C) 2004  Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
  *                     - Added processor hotplug support
  *  Copyright (C) 2005  Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
@@ -54,10 +54,10 @@ ACPI_MODULE_NAME("acpi_processor")
 #define US_TO_PM_TIMER_TICKS(t)                ((t * (PM_TIMER_FREQUENCY/1000)) / 1000)
 #define C2_OVERHEAD                    4       /* 1us (3.579 ticks per us) */
 #define C3_OVERHEAD                    4       /* 1us (3.579 ticks per us) */
-static void (*pm_idle_save) (void) __read_mostly;
+static void (*pm_idle_save) (void);
 module_param(max_cstate, uint, 0644);
 
-static unsigned int nocst __read_mostly;
+static unsigned int nocst = 0;
 module_param(nocst, uint, 0000);
 
 /*
@@ -67,7 +67,7 @@ module_param(nocst, uint, 0000);
  * 100 HZ: 0x0000000F: 4 jiffies = 40ms
  * reduce history for more aggressive entry into C3
  */
-static unsigned int bm_history __read_mostly =
+static unsigned int bm_history =
     (HZ >= 800 ? 0xFFFFFFFF : ((1U << (HZ / 25)) - 1));
 module_param(bm_history, uint, 0644);
 /* --------------------------------------------------------------------------
@@ -97,9 +97,6 @@ static int set_max_cstate(struct dmi_system_id *id)
 /* Actually this shouldn't be __cpuinitdata, would be better to fix the
    callers to only run once -AK */
 static struct dmi_system_id __cpuinitdata processor_power_dmi_table[] = {
-       { set_max_cstate, "IBM ThinkPad R40e", {
-         DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
-         DMI_MATCH(DMI_BIOS_VERSION,"1SET70WW")}, (void *)1},
        { set_max_cstate, "IBM ThinkPad R40e", {
          DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
          DMI_MATCH(DMI_BIOS_VERSION,"1SET60WW")}, (void *)1},
@@ -209,11 +206,11 @@ acpi_processor_power_activate(struct acpi_processor *pr,
 
 static void acpi_safe_halt(void)
 {
-       current_thread_info()->status &= ~TS_POLLING;
+       clear_thread_flag(TIF_POLLING_NRFLAG);
        smp_mb__after_clear_bit();
        if (!need_resched())
                safe_halt();
-       current_thread_info()->status |= TS_POLLING;
+       set_thread_flag(TIF_POLLING_NRFLAG);
 }
 
 static atomic_t c3_cpu_count;
@@ -264,15 +261,21 @@ static void acpi_processor_idle(void)
                u32 bm_status = 0;
                unsigned long diff = jiffies - pr->power.bm_check_timestamp;
 
-               if (diff > 31)
-                       diff = 31;
+               if (diff > 32)
+                       diff = 32;
 
-               pr->power.bm_activity <<= diff;
+               while (diff) {
+                       /* if we didn't get called, assume there was busmaster activity */
+                       diff--;
+                       if (diff)
+                               pr->power.bm_activity |= 0x1;
+                       pr->power.bm_activity <<= 1;
+               }
 
                acpi_get_register(ACPI_BITREG_BUS_MASTER_STATUS,
                                  &bm_status, ACPI_MTX_DO_NOT_LOCK);
                if (bm_status) {
-                       pr->power.bm_activity |= 0x1;
+                       pr->power.bm_activity++;
                        acpi_set_register(ACPI_BITREG_BUS_MASTER_STATUS,
                                          1, ACPI_MTX_DO_NOT_LOCK);
                }
@@ -284,16 +287,16 @@ static void acpi_processor_idle(void)
                else if (errata.piix4.bmisx) {
                        if ((inb_p(errata.piix4.bmisx + 0x02) & 0x01)
                            || (inb_p(errata.piix4.bmisx + 0x0A) & 0x01))
-                               pr->power.bm_activity |= 0x1;
+                               pr->power.bm_activity++;
                }
 
                pr->power.bm_check_timestamp = jiffies;
 
                /*
-                * If bus mastering is or was active this jiffy, demote
+                * Apply bus mastering demotion policy.  Automatically demote
                 * to avoid a faulty transition.  Note that the processor
                 * won't enter a low-power state during this call (to this
-                * function) but should upon the next.
+                * funciton) but should upon the next.
                 *
                 * TBD: A better policy might be to fallback to the demotion
                 *      state (use it for this quantum only) istead of
@@ -301,8 +304,7 @@ static void acpi_processor_idle(void)
                 *      qualification.  This may, however, introduce DMA
                 *      issues (e.g. floppy DMA transfer overrun/underrun).
                 */
-               if ((pr->power.bm_activity & 0x1) &&
-                   cx->demotion.threshold.bm) {
+               if (pr->power.bm_activity & cx->demotion.threshold.bm) {
                        local_irq_enable();
                        next_state = cx->demotion.state;
                        goto end;
@@ -320,16 +322,18 @@ static void acpi_processor_idle(void)
                cx = &pr->power.states[ACPI_STATE_C1];
 #endif
 
+       cx->usage++;
+
        /*
         * Sleep:
         * ------
         * Invoke the current Cx state to put the processor to sleep.
         */
        if (cx->type == ACPI_STATE_C2 || cx->type == ACPI_STATE_C3) {
-               current_thread_info()->status &= ~TS_POLLING;
+               clear_thread_flag(TIF_POLLING_NRFLAG);
                smp_mb__after_clear_bit();
                if (need_resched()) {
-                       current_thread_info()->status |= TS_POLLING;
+                       set_thread_flag(TIF_POLLING_NRFLAG);
                        local_irq_enable();
                        return;
                }
@@ -361,20 +365,13 @@ static void acpi_processor_idle(void)
                t1 = inl(acpi_fadt.xpm_tmr_blk.address);
                /* Invoke C2 */
                inb(cx->address);
-               /* Dummy wait op - must do something useless after P_LVL2 read
-                  because chipsets cannot guarantee that STPCLK# signal
-                  gets asserted in time to freeze execution properly. */
+               /* Dummy op - must do something useless after P_LVL2 read */
                t2 = inl(acpi_fadt.xpm_tmr_blk.address);
                /* Get end time (ticks) */
                t2 = inl(acpi_fadt.xpm_tmr_blk.address);
-
-#ifdef CONFIG_GENERIC_TIME
-               /* TSC halts in C2, so notify users */
-               mark_tsc_unstable();
-#endif
                /* Re-enable interrupts */
                local_irq_enable();
-               current_thread_info()->status |= TS_POLLING;
+               set_thread_flag(TIF_POLLING_NRFLAG);
                /* Compute time (ticks) that we were actually asleep */
                sleep_ticks =
                    ticks_elapsed(t1, t2) - cx->latency_ticks - C2_OVERHEAD;
@@ -401,7 +398,7 @@ static void acpi_processor_idle(void)
                t1 = inl(acpi_fadt.xpm_tmr_blk.address);
                /* Invoke C3 */
                inb(cx->address);
-               /* Dummy wait op (see above) */
+               /* Dummy op - must do something useless after P_LVL3 read */
                t2 = inl(acpi_fadt.xpm_tmr_blk.address);
                /* Get end time (ticks) */
                t2 = inl(acpi_fadt.xpm_tmr_blk.address);
@@ -412,13 +409,9 @@ static void acpi_processor_idle(void)
                                          ACPI_MTX_DO_NOT_LOCK);
                }
 
-#ifdef CONFIG_GENERIC_TIME
-               /* TSC halts in C3, so notify users */
-               mark_tsc_unstable();
-#endif
                /* Re-enable interrupts */
                local_irq_enable();
-               current_thread_info()->status |= TS_POLLING;
+               set_thread_flag(TIF_POLLING_NRFLAG);
                /* Compute time (ticks) that we were actually asleep */
                sleep_ticks =
                    ticks_elapsed(t1, t2) - cx->latency_ticks - C3_OVERHEAD;
@@ -428,9 +421,6 @@ static void acpi_processor_idle(void)
                local_irq_enable();
                return;
        }
-       cx->usage++;
-       if ((cx->type != ACPI_STATE_C1) && (sleep_ticks > 0))
-               cx->time += sleep_ticks;
 
        next_state = pr->power.state;
 
@@ -518,9 +508,10 @@ static int acpi_processor_set_power_policy(struct acpi_processor *pr)
        struct acpi_processor_cx *higher = NULL;
        struct acpi_processor_cx *cx;
 
+       ACPI_FUNCTION_TRACE("acpi_processor_set_power_policy");
 
        if (!pr)
-               return -EINVAL;
+               return_VALUE(-EINVAL);
 
        /*
         * This function sets the default Cx state policy (OS idle handler).
@@ -544,7 +535,7 @@ static int acpi_processor_set_power_policy(struct acpi_processor *pr)
        }
 
        if (!state_is_set)
-               return -ENODEV;
+               return_VALUE(-ENODEV);
 
        /* demotion */
        for (i = 1; i < ACPI_PROCESSOR_MAX_POWER; i++) {
@@ -583,17 +574,18 @@ static int acpi_processor_set_power_policy(struct acpi_processor *pr)
                higher = cx;
        }
 
-       return 0;
+       return_VALUE(0);
 }
 
 static int acpi_processor_get_power_info_fadt(struct acpi_processor *pr)
 {
+       ACPI_FUNCTION_TRACE("acpi_processor_get_power_info_fadt");
 
        if (!pr)
-               return -EINVAL;
+               return_VALUE(-EINVAL);
 
        if (!pr->pblk)
-               return -ENODEV;
+               return_VALUE(-ENODEV);
 
        /* if info is obtained from pblk/fadt, type equals state */
        pr->power.states[ACPI_STATE_C2].type = ACPI_STATE_C2;
@@ -605,7 +597,7 @@ static int acpi_processor_get_power_info_fadt(struct acpi_processor *pr)
         * an SMP system. 
         */
        if ((num_online_cpus() > 1) && !acpi_fadt.plvl2_up)
-               return -ENODEV;
+               return_VALUE(-ENODEV);
 #endif
 
        /* determine C2 and C3 address from pblk */
@@ -621,11 +613,12 @@ static int acpi_processor_get_power_info_fadt(struct acpi_processor *pr)
                          pr->power.states[ACPI_STATE_C2].address,
                          pr->power.states[ACPI_STATE_C3].address));
 
-       return 0;
+       return_VALUE(0);
 }
 
 static int acpi_processor_get_power_info_default_c1(struct acpi_processor *pr)
 {
+       ACPI_FUNCTION_TRACE("acpi_processor_get_power_info_default_c1");
 
        /* Zero initialize all the C-states info. */
        memset(pr->power.states, 0, sizeof(pr->power.states));
@@ -638,7 +631,7 @@ static int acpi_processor_get_power_info_default_c1(struct acpi_processor *pr)
        pr->power.states[ACPI_STATE_C0].valid = 1;
        pr->power.states[ACPI_STATE_C1].valid = 1;
 
-       return 0;
+       return_VALUE(0);
 }
 
 static int acpi_processor_get_power_info_cst(struct acpi_processor *pr)
@@ -650,9 +643,10 @@ static int acpi_processor_get_power_info_cst(struct acpi_processor *pr)
        struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
        union acpi_object *cst;
 
+       ACPI_FUNCTION_TRACE("acpi_processor_get_power_info_cst");
 
        if (nocst)
-               return -ENODEV;
+               return_VALUE(-ENODEV);
 
        current_count = 1;
 
@@ -664,14 +658,15 @@ static int acpi_processor_get_power_info_cst(struct acpi_processor *pr)
        status = acpi_evaluate_object(pr->handle, "_CST", NULL, &buffer);
        if (ACPI_FAILURE(status)) {
                ACPI_DEBUG_PRINT((ACPI_DB_INFO, "No _CST, giving up\n"));
-               return -ENODEV;
+               return_VALUE(-ENODEV);
        }
 
        cst = (union acpi_object *)buffer.pointer;
 
        /* There must be at least 2 elements */
        if (!cst || (cst->type != ACPI_TYPE_PACKAGE) || cst->package.count < 2) {
-               printk(KERN_ERR PREFIX "not enough elements in _CST\n");
+               ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
+                                 "not enough elements in _CST\n"));
                status = -EFAULT;
                goto end;
        }
@@ -680,7 +675,8 @@ static int acpi_processor_get_power_info_cst(struct acpi_processor *pr)
 
        /* Validate number of power states. */
        if (count < 1 || count != cst->package.count - 1) {
-               printk(KERN_ERR PREFIX "count given by _CST is not valid\n");
+               ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
+                                 "count given by _CST is not valid\n"));
                status = -EFAULT;
                goto end;
        }
@@ -768,16 +764,17 @@ static int acpi_processor_get_power_info_cst(struct acpi_processor *pr)
                status = -EFAULT;
 
       end:
-       kfree(buffer.pointer);
+       acpi_os_free(buffer.pointer);
 
-       return status;
+       return_VALUE(status);
 }
 
 static void acpi_processor_power_verify_c2(struct acpi_processor_cx *cx)
 {
+       ACPI_FUNCTION_TRACE("acpi_processor_get_power_verify_c2");
 
        if (!cx->address)
-               return;
+               return_VOID;
 
        /*
         * C2 latency must be less than or equal to 100
@@ -786,7 +783,7 @@ static void acpi_processor_power_verify_c2(struct acpi_processor_cx *cx)
        else if (cx->latency > ACPI_PROCESSOR_MAX_C2_LATENCY) {
                ACPI_DEBUG_PRINT((ACPI_DB_INFO,
                                  "latency too large [%d]\n", cx->latency));
-               return;
+               return_VOID;
        }
 
        /*
@@ -796,7 +793,7 @@ static void acpi_processor_power_verify_c2(struct acpi_processor_cx *cx)
        cx->valid = 1;
        cx->latency_ticks = US_TO_PM_TIMER_TICKS(cx->latency);
 
-       return;
+       return_VOID;
 }
 
 static void acpi_processor_power_verify_c3(struct acpi_processor *pr,
@@ -804,9 +801,10 @@ static void acpi_processor_power_verify_c3(struct acpi_processor *pr,
 {
        static int bm_check_flag;
 
+       ACPI_FUNCTION_TRACE("acpi_processor_get_power_verify_c3");
 
        if (!cx->address)
-               return;
+               return_VOID;
 
        /*
         * C3 latency must be less than or equal to 1000
@@ -815,7 +813,7 @@ static void acpi_processor_power_verify_c3(struct acpi_processor *pr,
        else if (cx->latency > ACPI_PROCESSOR_MAX_C3_LATENCY) {
                ACPI_DEBUG_PRINT((ACPI_DB_INFO,
                                  "latency too large [%d]\n", cx->latency));
-               return;
+               return_VOID;
        }
 
        /*
@@ -828,7 +826,7 @@ static void acpi_processor_power_verify_c3(struct acpi_processor *pr,
        else if (errata.piix4.fdma) {
                ACPI_DEBUG_PRINT((ACPI_DB_INFO,
                                  "C3 not supported on PIIX4 with Type-F DMA\n"));
-               return;
+               return_VOID;
        }
 
        /* All the logic here assumes flags.bm_check is same across all CPUs */
@@ -845,7 +843,7 @@ static void acpi_processor_power_verify_c3(struct acpi_processor *pr,
                if (!pr->flags.bm_control) {
                        ACPI_DEBUG_PRINT((ACPI_DB_INFO,
                                          "C3 support requires bus mastering control\n"));
-                       return;
+                       return_VOID;
                }
        } else {
                /*
@@ -856,7 +854,7 @@ static void acpi_processor_power_verify_c3(struct acpi_processor *pr,
                        ACPI_DEBUG_PRINT((ACPI_DB_INFO,
                                          "Cache invalidation should work properly"
                                          " for C3 to be enabled on SMP systems\n"));
-                       return;
+                       return_VOID;
                }
                acpi_set_register(ACPI_BITREG_BUS_MASTER_RLD,
                                  0, ACPI_MTX_DO_NOT_LOCK);
@@ -871,7 +869,7 @@ static void acpi_processor_power_verify_c3(struct acpi_processor *pr,
        cx->valid = 1;
        cx->latency_ticks = US_TO_PM_TIMER_TICKS(cx->latency);
 
-       return;
+       return_VOID;
 }
 
 static int acpi_processor_power_verify(struct acpi_processor *pr)
@@ -880,9 +878,12 @@ static int acpi_processor_power_verify(struct acpi_processor *pr)
        unsigned int working = 0;
 
 #ifdef ARCH_APICTIMER_STOPS_ON_C3
-       int timer_broadcast = 0;
+       struct cpuinfo_x86 *c = cpu_data + pr->id;
        cpumask_t mask = cpumask_of_cpu(pr->id);
-       on_each_cpu(switch_ipi_to_APIC_timer, &mask, 1, 1);
+
+       if (c->x86_vendor == X86_VENDOR_INTEL) {
+               on_each_cpu(switch_ipi_to_APIC_timer, &mask, 1, 1);
+       }
 #endif
 
        for (i = 1; i < ACPI_PROCESSOR_MAX_POWER; i++) {
@@ -895,20 +896,15 @@ static int acpi_processor_power_verify(struct acpi_processor *pr)
 
                case ACPI_STATE_C2:
                        acpi_processor_power_verify_c2(cx);
-#ifdef ARCH_APICTIMER_STOPS_ON_C3
-                       /* Some AMD systems fake C3 as C2, but still
-                          have timer troubles */
-                       if (cx->valid && 
-                               boot_cpu_data.x86_vendor == X86_VENDOR_AMD)
-                               timer_broadcast++;
-#endif
                        break;
 
                case ACPI_STATE_C3:
                        acpi_processor_power_verify_c3(pr, cx);
 #ifdef ARCH_APICTIMER_STOPS_ON_C3
-                       if (cx->valid)
-                               timer_broadcast++;
+                       if (cx->valid && c->x86_vendor == X86_VENDOR_INTEL) {
+                               on_each_cpu(switch_APIC_timer_to_ipi,
+                                               &mask, 1, 1);
+                       }
 #endif
                        break;
                }
@@ -917,11 +913,6 @@ static int acpi_processor_power_verify(struct acpi_processor *pr)
                        working++;
        }
 
-#ifdef ARCH_APICTIMER_STOPS_ON_C3
-       if (timer_broadcast)
-               on_each_cpu(switch_APIC_timer_to_ipi, &mask, 1, 1);
-#endif
-
        return (working);
 }
 
@@ -930,6 +921,7 @@ static int acpi_processor_get_power_info(struct acpi_processor *pr)
        unsigned int i;
        int result;
 
+       ACPI_FUNCTION_TRACE("acpi_processor_get_power_info");
 
        /* NOTE: the idle thread may not be running while calling
         * this function */
@@ -952,7 +944,7 @@ static int acpi_processor_get_power_info(struct acpi_processor *pr)
         */
        result = acpi_processor_set_power_policy(pr);
        if (result)
-               return result;
+               return_VALUE(result);
 
        /*
         * if one state of type C2 or C3 is available, mark this
@@ -966,23 +958,24 @@ static int acpi_processor_get_power_info(struct acpi_processor *pr)
                }
        }
 
-       return 0;
+       return_VALUE(0);
 }
 
 int acpi_processor_cst_has_changed(struct acpi_processor *pr)
 {
        int result = 0;
 
+       ACPI_FUNCTION_TRACE("acpi_processor_cst_has_changed");
 
        if (!pr)
-               return -EINVAL;
+               return_VALUE(-EINVAL);
 
        if (nocst) {
-               return -ENODEV;
+               return_VALUE(-ENODEV);
        }
 
        if (!pr->flags.power_setup_done)
-               return -ENODEV;
+               return_VALUE(-ENODEV);
 
        /* Fall back to the default idle loop */
        pm_idle = pm_idle_save;
@@ -993,7 +986,7 @@ int acpi_processor_cst_has_changed(struct acpi_processor *pr)
        if ((pr->flags.power == 1) && (pr->flags.power_setup_done))
                pm_idle = acpi_processor_idle;
 
-       return result;
+       return_VALUE(result);
 }
 
 /* proc interface */
@@ -1003,6 +996,7 @@ static int acpi_processor_power_seq_show(struct seq_file *seq, void *offset)
        struct acpi_processor *pr = (struct acpi_processor *)seq->private;
        unsigned int i;
 
+       ACPI_FUNCTION_TRACE("acpi_processor_power_seq_show");
 
        if (!pr)
                goto end;
@@ -1054,14 +1048,13 @@ static int acpi_processor_power_seq_show(struct seq_file *seq, void *offset)
                else
                        seq_puts(seq, "demotion[--] ");
 
-               seq_printf(seq, "latency[%03d] usage[%08d] duration[%020llu]\n",
+               seq_printf(seq, "latency[%03d] usage[%08d]\n",
                           pr->power.states[i].latency,
-                          pr->power.states[i].usage,
-                          pr->power.states[i].time);
+                          pr->power.states[i].usage);
        }
 
       end:
-       return 0;
+       return_VALUE(0);
 }
 
 static int acpi_processor_power_open_fs(struct inode *inode, struct file *file)
@@ -1070,7 +1063,7 @@ static int acpi_processor_power_open_fs(struct inode *inode, struct file *file)
                           PDE(inode)->data);
 }
 
-static const struct file_operations acpi_processor_power_fops = {
+static struct file_operations acpi_processor_power_fops = {
        .open = acpi_processor_power_open_fs,
        .read = seq_read,
        .llseek = seq_lseek,
@@ -1081,10 +1074,11 @@ int acpi_processor_power_init(struct acpi_processor *pr,
                              struct acpi_device *device)
 {
        acpi_status status = 0;
-       static int first_run;
+       static int first_run = 0;
        struct proc_dir_entry *entry = NULL;
        unsigned int i;
 
+       ACPI_FUNCTION_TRACE("acpi_processor_power_init");
 
        if (!first_run) {
                dmi_check_system(processor_power_dmi_table);
@@ -1096,14 +1090,14 @@ int acpi_processor_power_init(struct acpi_processor *pr,
        }
 
        if (!pr)
-               return -EINVAL;
+               return_VALUE(-EINVAL);
 
        if (acpi_fadt.cst_cnt && !nocst) {
                status =
                    acpi_os_write_port(acpi_fadt.smi_cmd, acpi_fadt.cst_cnt, 8);
                if (ACPI_FAILURE(status)) {
-                       ACPI_EXCEPTION((AE_INFO, status,
-                                       "Notifying BIOS of _CST ability failed"));
+                       ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
+                                         "Notifying BIOS of _CST ability failed\n"));
                }
        }
 
@@ -1132,7 +1126,9 @@ int acpi_processor_power_init(struct acpi_processor *pr,
        entry = create_proc_entry(ACPI_PROCESSOR_FILE_POWER,
                                  S_IRUGO, acpi_device_dir(device));
        if (!entry)
-               return -EIO;
+               ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
+                                 "Unable to create '%s' fs entry\n",
+                                 ACPI_PROCESSOR_FILE_POWER));
        else {
                entry->proc_fops = &acpi_processor_power_fops;
                entry->data = acpi_driver_data(device);
@@ -1141,12 +1137,13 @@ int acpi_processor_power_init(struct acpi_processor *pr,
 
        pr->flags.power_setup_done = 1;
 
-       return 0;
+       return_VALUE(0);
 }
 
 int acpi_processor_power_exit(struct acpi_processor *pr,
                              struct acpi_device *device)
 {
+       ACPI_FUNCTION_TRACE("acpi_processor_power_exit");
 
        pr->flags.power_setup_done = 0;
 
@@ -1166,5 +1163,5 @@ int acpi_processor_power_exit(struct acpi_processor *pr,
                cpu_idle_wait();
        }
 
-       return 0;
+       return_VALUE(0);
 }