ftp://ftp.kernel.org/pub/linux/kernel/v2.6/linux-2.6.6.tar.bz2
[linux-2.6.git] / arch / i386 / kernel / cpu / cpufreq / longrun.c
1 /*
2  * (C) 2002 - 2003  Dominik Brodowski <linux@brodo.de>
3  *
4  *  Licensed under the terms of the GNU GPL License version 2.
5  *
6  *  BIG FAT DISCLAIMER: Work in progress code. Possibly *dangerous*
7  */
8
9 #include <linux/kernel.h>
10 #include <linux/module.h> 
11 #include <linux/init.h>
12 #include <linux/slab.h>
13 #include <linux/cpufreq.h>
14
15 #include <asm/msr.h>
16 #include <asm/processor.h>
17 #include <asm/timex.h>
18
19 static struct cpufreq_driver    longrun_driver;
20
21 /**
22  * longrun_{low,high}_freq is needed for the conversion of cpufreq kHz 
23  * values into per cent values. In TMTA microcode, the following is valid:
24  * performance_pctg = (current_freq - low_freq)/(high_freq - low_freq)
25  */
26 static unsigned int longrun_low_freq, longrun_high_freq;
27
28
29 /**
30  * longrun_get_policy - get the current LongRun policy
31  * @policy: struct cpufreq_policy where current policy is written into
32  *
33  * Reads the current LongRun policy by access to MSR_TMTA_LONGRUN_FLAGS
34  * and MSR_TMTA_LONGRUN_CTRL
35  */
36 static void __init longrun_get_policy(struct cpufreq_policy *policy)
37 {
38         u32 msr_lo, msr_hi;
39
40         rdmsr(MSR_TMTA_LONGRUN_FLAGS, msr_lo, msr_hi);
41         if (msr_lo & 0x01)
42                 policy->policy = CPUFREQ_POLICY_PERFORMANCE;
43         else
44                 policy->policy = CPUFREQ_POLICY_POWERSAVE;
45         
46         rdmsr(MSR_TMTA_LONGRUN_CTRL, msr_lo, msr_hi);
47         msr_lo &= 0x0000007F;
48         msr_hi &= 0x0000007F;
49
50         policy->min = longrun_low_freq + msr_lo * 
51                 ((longrun_high_freq - longrun_low_freq) / 100);
52         policy->max = longrun_low_freq + msr_hi * 
53                 ((longrun_high_freq - longrun_low_freq) / 100);
54         policy->cpu = 0;
55 }
56
57
58 /**
59  * longrun_set_policy - sets a new CPUFreq policy
60  * @policy: new policy
61  *
62  * Sets a new CPUFreq policy on LongRun-capable processors. This function
63  * has to be called with cpufreq_driver locked.
64  */
65 static int longrun_set_policy(struct cpufreq_policy *policy)
66 {
67         u32 msr_lo, msr_hi;
68         u32 pctg_lo, pctg_hi;
69
70         if (!policy)
71                 return -EINVAL;
72
73         pctg_lo = (policy->min - longrun_low_freq) / 
74                 ((longrun_high_freq - longrun_low_freq) / 100);
75         pctg_hi = (policy->max - longrun_low_freq) / 
76                 ((longrun_high_freq - longrun_low_freq) / 100);
77
78         if (pctg_hi > 100)
79                 pctg_hi = 100;
80         if (pctg_lo > pctg_hi)
81                 pctg_lo = pctg_hi;
82
83         /* performance or economy mode */
84         rdmsr(MSR_TMTA_LONGRUN_FLAGS, msr_lo, msr_hi);
85         msr_lo &= 0xFFFFFFFE;
86         switch (policy->policy) {
87         case CPUFREQ_POLICY_PERFORMANCE:
88                 msr_lo |= 0x00000001;
89                 break;
90         case CPUFREQ_POLICY_POWERSAVE:
91                 break;
92         }
93         wrmsr(MSR_TMTA_LONGRUN_FLAGS, msr_lo, msr_hi);
94
95         /* lower and upper boundary */
96         rdmsr(MSR_TMTA_LONGRUN_CTRL, msr_lo, msr_hi);
97         msr_lo &= 0xFFFFFF80;
98         msr_hi &= 0xFFFFFF80;
99         msr_lo |= pctg_lo;
100         msr_hi |= pctg_hi;
101         wrmsr(MSR_TMTA_LONGRUN_CTRL, msr_lo, msr_hi);
102
103         return 0;
104 }
105
106
107 /**
108  * longrun_verify_poliy - verifies a new CPUFreq policy
109  * @policy: the policy to verify
110  *
111  * Validates a new CPUFreq policy. This function has to be called with 
112  * cpufreq_driver locked.
113  */
114 static int longrun_verify_policy(struct cpufreq_policy *policy)
115 {
116         if (!policy)
117                 return -EINVAL;
118
119         policy->cpu = 0;
120         cpufreq_verify_within_limits(policy, 
121                 policy->cpuinfo.min_freq, 
122                 policy->cpuinfo.max_freq);
123
124         if ((policy->policy != CPUFREQ_POLICY_POWERSAVE) &&
125             (policy->policy != CPUFREQ_POLICY_PERFORMANCE))
126                 return -EINVAL;
127
128         return 0;
129 }
130
131
132 /**
133  * longrun_determine_freqs - determines the lowest and highest possible core frequency
134  * @low_freq: an int to put the lowest frequency into
135  * @high_freq: an int to put the highest frequency into
136  *
137  * Determines the lowest and highest possible core frequencies on this CPU.
138  * This is necessary to calculate the performance percentage according to
139  * TMTA rules:
140  * performance_pctg = (target_freq - low_freq)/(high_freq - low_freq)
141  */
142 static unsigned int __init longrun_determine_freqs(unsigned int *low_freq, 
143                                                    unsigned int *high_freq)
144 {
145         u32 msr_lo, msr_hi;
146         u32 save_lo, save_hi;
147         u32 eax, ebx, ecx, edx;
148         u32 try_hi;
149         struct cpuinfo_x86 *c = cpu_data;
150
151         if (!low_freq || !high_freq)
152                 return -EINVAL;
153
154         if (cpu_has(c, X86_FEATURE_LRTI)) {
155                 /* if the LongRun Table Interface is present, the
156                  * detection is a bit easier: 
157                  * For minimum frequency, read out the maximum
158                  * level (msr_hi), write that into "currently 
159                  * selected level", and read out the frequency.
160                  * For maximum frequency, read out level zero.
161                  */
162                 /* minimum */
163                 rdmsr(MSR_TMTA_LRTI_READOUT, msr_lo, msr_hi);
164                 wrmsr(MSR_TMTA_LRTI_READOUT, msr_hi, msr_hi);
165                 rdmsr(MSR_TMTA_LRTI_VOLT_MHZ, msr_lo, msr_hi);
166                 *low_freq = msr_lo * 1000; /* to kHz */
167
168                 /* maximum */
169                 wrmsr(MSR_TMTA_LRTI_READOUT, 0, msr_hi);
170                 rdmsr(MSR_TMTA_LRTI_VOLT_MHZ, msr_lo, msr_hi);
171                 *high_freq = msr_lo * 1000; /* to kHz */
172
173                 if (*low_freq > *high_freq)
174                         *low_freq = *high_freq;
175                 return 0;
176         }
177
178         /* set the upper border to the value determined during TSC init */
179         *high_freq = (cpu_khz / 1000);
180         *high_freq = *high_freq * 1000;
181
182         /* get current borders */
183         rdmsr(MSR_TMTA_LONGRUN_CTRL, msr_lo, msr_hi);
184         save_lo = msr_lo & 0x0000007F;
185         save_hi = msr_hi & 0x0000007F;
186
187         /* if current perf_pctg is larger than 90%, we need to decrease the
188          * upper limit to make the calculation more accurate.
189          */
190         cpuid(0x80860007, &eax, &ebx, &ecx, &edx);
191         /* try decreasing in 10% steps, some processors react only
192          * on some barrier values */
193         for (try_hi = 80; try_hi > 0 && ecx > 90; try_hi -=10) {
194                 /* set to 0 to try_hi perf_pctg */
195                 msr_lo &= 0xFFFFFF80;
196                 msr_hi &= 0xFFFFFF80;
197                 msr_lo |= 0;
198                 msr_hi |= try_hi;
199                 wrmsr(MSR_TMTA_LONGRUN_CTRL, msr_lo, msr_hi);
200
201                 /* read out current core MHz and current perf_pctg */
202                 cpuid(0x80860007, &eax, &ebx, &ecx, &edx);
203
204                 /* restore values */
205                 wrmsr(MSR_TMTA_LONGRUN_CTRL, save_lo, save_hi); 
206         }
207
208         /* performance_pctg = (current_freq - low_freq)/(high_freq - low_freq)
209          * eqals
210          * low_freq * ( 1 - perf_pctg) = (cur_freq - high_freq * perf_pctg)
211          *
212          * high_freq * perf_pctg is stored tempoarily into "ebx".
213          */
214         ebx = (((cpu_khz / 1000) * ecx) / 100); /* to MHz */
215
216         if ((ecx > 95) || (ecx == 0) || (eax < ebx))
217                 return -EIO;
218
219         edx = (eax - ebx) / (100 - ecx); 
220         *low_freq = edx * 1000; /* back to kHz */
221
222         if (*low_freq > *high_freq)
223                 *low_freq = *high_freq;
224
225         return 0;
226 }
227
228
229 static int __init longrun_cpu_init(struct cpufreq_policy *policy)
230 {
231         int                     result = 0;
232
233         /* capability check */
234         if (policy->cpu != 0)
235                 return -ENODEV;
236
237         /* detect low and high frequency */
238         result = longrun_determine_freqs(&longrun_low_freq, &longrun_high_freq);
239         if (result)
240                 return result;
241
242         /* cpuinfo and default policy values */
243         policy->cpuinfo.min_freq = longrun_low_freq;
244         policy->cpuinfo.max_freq = longrun_high_freq;
245         policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL;
246         longrun_get_policy(policy);
247         
248         return 0;
249 }
250
251
252 static struct cpufreq_driver longrun_driver = {
253         .verify         = longrun_verify_policy,
254         .setpolicy      = longrun_set_policy,
255         .init           = longrun_cpu_init,
256         .name           = "longrun",
257         .owner          = THIS_MODULE,
258 };
259
260
261 /**
262  * longrun_init - initializes the Transmeta Crusoe LongRun CPUFreq driver
263  *
264  * Initializes the LongRun support.
265  */
266 static int __init longrun_init(void)
267 {
268         struct cpuinfo_x86 *c = cpu_data;
269
270         if (c->x86_vendor != X86_VENDOR_TRANSMETA || 
271             !cpu_has(c, X86_FEATURE_LONGRUN))
272                 return -ENODEV;
273
274         return cpufreq_register_driver(&longrun_driver);
275 }
276
277
278 /**
279  * longrun_exit - unregisters LongRun support
280  */
281 static void __exit longrun_exit(void)
282 {
283         cpufreq_unregister_driver(&longrun_driver);
284 }
285
286
287 MODULE_AUTHOR ("Dominik Brodowski <linux@brodo.de>");
288 MODULE_DESCRIPTION ("LongRun driver for Transmeta Crusoe processors.");
289 MODULE_LICENSE ("GPL");
290
291 module_init(longrun_init);
292 module_exit(longrun_exit);