3 * P4 model-specific MSR operations
5 * @remark Copyright 2002 OProfile authors
6 * @remark Read the file COPYING
8 * @author Graydon Hoare
11 #include <linux/oprofile.h>
12 #include <linux/smp.h>
14 #include <asm/ptrace.h>
15 #include <asm/fixmap.h>
18 #include "op_x86_model.h"
19 #include "op_counter.h"
23 #define NUM_COUNTERS_NON_HT 8
24 #define NUM_ESCRS_NON_HT 45
25 #define NUM_CCCRS_NON_HT 18
26 #define NUM_CONTROLS_NON_HT (NUM_ESCRS_NON_HT + NUM_CCCRS_NON_HT)
28 #define NUM_COUNTERS_HT2 4
29 #define NUM_ESCRS_HT2 23
30 #define NUM_CCCRS_HT2 9
31 #define NUM_CONTROLS_HT2 (NUM_ESCRS_HT2 + NUM_CCCRS_HT2)
33 static unsigned int num_counters = NUM_COUNTERS_NON_HT;
36 /* this has to be checked dynamically since the
37 hyper-threadedness of a chip is discovered at
39 static inline void setup_num_counters(void)
42 if (smp_num_siblings == 2)
43 num_counters = NUM_COUNTERS_HT2;
47 static int inline addr_increment(void)
50 return smp_num_siblings == 2 ? 2 : 1;
57 /* tables to simulate simplified hardware view of p4 registers */
58 struct p4_counter_binding {
64 struct p4_event_binding {
65 int escr_select; /* value to put in CCCR */
66 int event_select; /* value to put in ESCR */
68 int virt_counter; /* for this counter... */
69 int escr_address; /* use this ESCR */
73 /* nb: these CTR_* defines are a duplicate of defines in
74 event/i386.p4*events. */
77 #define CTR_BPU_0 (1 << 0)
78 #define CTR_MS_0 (1 << 1)
79 #define CTR_FLAME_0 (1 << 2)
80 #define CTR_IQ_4 (1 << 3)
81 #define CTR_BPU_2 (1 << 4)
82 #define CTR_MS_2 (1 << 5)
83 #define CTR_FLAME_2 (1 << 6)
84 #define CTR_IQ_5 (1 << 7)
86 static struct p4_counter_binding p4_counters [NUM_COUNTERS_NON_HT] = {
87 { CTR_BPU_0, MSR_P4_BPU_PERFCTR0, MSR_P4_BPU_CCCR0 },
88 { CTR_MS_0, MSR_P4_MS_PERFCTR0, MSR_P4_MS_CCCR0 },
89 { CTR_FLAME_0, MSR_P4_FLAME_PERFCTR0, MSR_P4_FLAME_CCCR0 },
90 { CTR_IQ_4, MSR_P4_IQ_PERFCTR4, MSR_P4_IQ_CCCR4 },
91 { CTR_BPU_2, MSR_P4_BPU_PERFCTR2, MSR_P4_BPU_CCCR2 },
92 { CTR_MS_2, MSR_P4_MS_PERFCTR2, MSR_P4_MS_CCCR2 },
93 { CTR_FLAME_2, MSR_P4_FLAME_PERFCTR2, MSR_P4_FLAME_CCCR2 },
94 { CTR_IQ_5, MSR_P4_IQ_PERFCTR5, MSR_P4_IQ_CCCR5 }
97 #define NUM_UNUSED_CCCRS NUM_CCCRS_NON_HT - NUM_COUNTERS_NON_HT
99 /* All cccr we don't use. */
100 static int p4_unused_cccr[NUM_UNUSED_CCCRS] = {
101 MSR_P4_BPU_CCCR1, MSR_P4_BPU_CCCR3,
102 MSR_P4_MS_CCCR1, MSR_P4_MS_CCCR3,
103 MSR_P4_FLAME_CCCR1, MSR_P4_FLAME_CCCR3,
104 MSR_P4_IQ_CCCR0, MSR_P4_IQ_CCCR1,
105 MSR_P4_IQ_CCCR2, MSR_P4_IQ_CCCR3
108 /* p4 event codes in libop/op_event.h are indices into this table. */
110 static struct p4_event_binding p4_events[NUM_EVENTS] = {
112 { /* BRANCH_RETIRED */
114 { {CTR_IQ_4, MSR_P4_CRU_ESCR2},
115 {CTR_IQ_5, MSR_P4_CRU_ESCR3} }
118 { /* MISPRED_BRANCH_RETIRED */
120 { { CTR_IQ_4, MSR_P4_CRU_ESCR0},
121 { CTR_IQ_5, MSR_P4_CRU_ESCR1} }
124 { /* TC_DELIVER_MODE */
126 { { CTR_MS_0, MSR_P4_TC_ESCR0},
127 { CTR_MS_2, MSR_P4_TC_ESCR1} }
130 { /* BPU_FETCH_REQUEST */
132 { { CTR_BPU_0, MSR_P4_BPU_ESCR0},
133 { CTR_BPU_2, MSR_P4_BPU_ESCR1} }
136 { /* ITLB_REFERENCE */
138 { { CTR_BPU_0, MSR_P4_ITLB_ESCR0},
139 { CTR_BPU_2, MSR_P4_ITLB_ESCR1} }
142 { /* MEMORY_CANCEL */
144 { { CTR_FLAME_0, MSR_P4_DAC_ESCR0},
145 { CTR_FLAME_2, MSR_P4_DAC_ESCR1} }
148 { /* MEMORY_COMPLETE */
150 { { CTR_FLAME_0, MSR_P4_SAAT_ESCR0},
151 { CTR_FLAME_2, MSR_P4_SAAT_ESCR1} }
154 { /* LOAD_PORT_REPLAY */
156 { { CTR_FLAME_0, MSR_P4_SAAT_ESCR0},
157 { CTR_FLAME_2, MSR_P4_SAAT_ESCR1} }
160 { /* STORE_PORT_REPLAY */
162 { { CTR_FLAME_0, MSR_P4_SAAT_ESCR0},
163 { CTR_FLAME_2, MSR_P4_SAAT_ESCR1} }
166 { /* MOB_LOAD_REPLAY */
168 { { CTR_BPU_0, MSR_P4_MOB_ESCR0},
169 { CTR_BPU_2, MSR_P4_MOB_ESCR1} }
172 { /* PAGE_WALK_TYPE */
174 { { CTR_BPU_0, MSR_P4_PMH_ESCR0},
175 { CTR_BPU_2, MSR_P4_PMH_ESCR1} }
178 { /* BSQ_CACHE_REFERENCE */
180 { { CTR_BPU_0, MSR_P4_BSU_ESCR0},
181 { CTR_BPU_2, MSR_P4_BSU_ESCR1} }
184 { /* IOQ_ALLOCATION */
186 { { CTR_BPU_0, MSR_P4_FSB_ESCR0},
190 { /* IOQ_ACTIVE_ENTRIES */
192 { { CTR_BPU_2, MSR_P4_FSB_ESCR1},
196 { /* FSB_DATA_ACTIVITY */
198 { { CTR_BPU_0, MSR_P4_FSB_ESCR0},
199 { CTR_BPU_2, MSR_P4_FSB_ESCR1} }
202 { /* BSQ_ALLOCATION */
204 { { CTR_BPU_0, MSR_P4_BSU_ESCR0},
208 { /* BSQ_ACTIVE_ENTRIES */
210 { { CTR_BPU_2, MSR_P4_BSU_ESCR1 /* guess */},
216 { { CTR_IQ_4, MSR_P4_CRU_ESCR2},
217 { CTR_IQ_5, MSR_P4_CRU_ESCR3} }
220 { /* SSE_INPUT_ASSIST */
222 { { CTR_FLAME_0, MSR_P4_FIRM_ESCR0},
223 { CTR_FLAME_2, MSR_P4_FIRM_ESCR1} }
226 { /* PACKED_SP_UOP */
228 { { CTR_FLAME_0, MSR_P4_FIRM_ESCR0},
229 { CTR_FLAME_2, MSR_P4_FIRM_ESCR1} }
232 { /* PACKED_DP_UOP */
234 { { CTR_FLAME_0, MSR_P4_FIRM_ESCR0},
235 { CTR_FLAME_2, MSR_P4_FIRM_ESCR1} }
238 { /* SCALAR_SP_UOP */
240 { { CTR_FLAME_0, MSR_P4_FIRM_ESCR0},
241 { CTR_FLAME_2, MSR_P4_FIRM_ESCR1} }
244 { /* SCALAR_DP_UOP */
246 { { CTR_FLAME_0, MSR_P4_FIRM_ESCR0},
247 { CTR_FLAME_2, MSR_P4_FIRM_ESCR1} }
250 { /* 64BIT_MMX_UOP */
252 { { CTR_FLAME_0, MSR_P4_FIRM_ESCR0},
253 { CTR_FLAME_2, MSR_P4_FIRM_ESCR1} }
256 { /* 128BIT_MMX_UOP */
258 { { CTR_FLAME_0, MSR_P4_FIRM_ESCR0},
259 { CTR_FLAME_2, MSR_P4_FIRM_ESCR1} }
264 { { CTR_FLAME_0, MSR_P4_FIRM_ESCR0},
265 { CTR_FLAME_2, MSR_P4_FIRM_ESCR1} }
268 { /* X87_SIMD_MOVES_UOP */
270 { { CTR_FLAME_0, MSR_P4_FIRM_ESCR0},
271 { CTR_FLAME_2, MSR_P4_FIRM_ESCR1} }
274 { /* MACHINE_CLEAR */
276 { { CTR_IQ_4, MSR_P4_CRU_ESCR2},
277 { CTR_IQ_5, MSR_P4_CRU_ESCR3} }
280 { /* GLOBAL_POWER_EVENTS */
281 0x06, 0x13 /* older manual says 0x05, newer 0x13 */,
282 { { CTR_BPU_0, MSR_P4_FSB_ESCR0},
283 { CTR_BPU_2, MSR_P4_FSB_ESCR1} }
288 { { CTR_MS_0, MSR_P4_MS_ESCR0},
289 { CTR_MS_2, MSR_P4_MS_ESCR1} }
292 { /* UOP_QUEUE_WRITES */
294 { { CTR_MS_0, MSR_P4_MS_ESCR0},
295 { CTR_MS_2, MSR_P4_MS_ESCR1} }
298 { /* FRONT_END_EVENT */
300 { { CTR_IQ_4, MSR_P4_CRU_ESCR2},
301 { CTR_IQ_5, MSR_P4_CRU_ESCR3} }
304 { /* EXECUTION_EVENT */
306 { { CTR_IQ_4, MSR_P4_CRU_ESCR2},
307 { CTR_IQ_5, MSR_P4_CRU_ESCR3} }
312 { { CTR_IQ_4, MSR_P4_CRU_ESCR2},
313 { CTR_IQ_5, MSR_P4_CRU_ESCR3} }
316 { /* INSTR_RETIRED */
318 { { CTR_IQ_4, MSR_P4_CRU_ESCR0},
319 { CTR_IQ_5, MSR_P4_CRU_ESCR1} }
324 { { CTR_IQ_4, MSR_P4_CRU_ESCR0},
325 { CTR_IQ_5, MSR_P4_CRU_ESCR1} }
330 { { CTR_IQ_4, MSR_P4_RAT_ESCR0},
331 { CTR_IQ_5, MSR_P4_RAT_ESCR1} }
334 { /* RETIRED_MISPRED_BRANCH_TYPE */
336 { { CTR_MS_0, MSR_P4_TBPU_ESCR0},
337 { CTR_MS_2, MSR_P4_TBPU_ESCR1} }
340 { /* RETIRED_BRANCH_TYPE */
342 { { CTR_MS_0, MSR_P4_TBPU_ESCR0},
343 { CTR_MS_2, MSR_P4_TBPU_ESCR1} }
348 #define MISC_PMC_ENABLED_P(x) ((x) & 1 << 7)
350 #define ESCR_RESERVED_BITS 0x80000003
351 #define ESCR_CLEAR(escr) ((escr) &= ESCR_RESERVED_BITS)
352 #define ESCR_SET_USR_0(escr, usr) ((escr) |= (((usr) & 1) << 2))
353 #define ESCR_SET_OS_0(escr, os) ((escr) |= (((os) & 1) << 3))
354 #define ESCR_SET_USR_1(escr, usr) ((escr) |= (((usr) & 1)))
355 #define ESCR_SET_OS_1(escr, os) ((escr) |= (((os) & 1) << 1))
356 #define ESCR_SET_EVENT_SELECT(escr, sel) ((escr) |= (((sel) & 0x3f) << 25))
357 #define ESCR_SET_EVENT_MASK(escr, mask) ((escr) |= (((mask) & 0xffff) << 9))
358 #define ESCR_READ(escr,high,ev,i) do {rdmsr(ev->bindings[(i)].escr_address, (escr), (high));} while (0)
359 #define ESCR_WRITE(escr,high,ev,i) do {wrmsr(ev->bindings[(i)].escr_address, (escr), (high));} while (0)
361 #define CCCR_RESERVED_BITS 0x38030FFF
362 #define CCCR_CLEAR(cccr) ((cccr) &= CCCR_RESERVED_BITS)
363 #define CCCR_SET_REQUIRED_BITS(cccr) ((cccr) |= 0x00030000)
364 #define CCCR_SET_ESCR_SELECT(cccr, sel) ((cccr) |= (((sel) & 0x07) << 13))
365 #define CCCR_SET_PMI_OVF_0(cccr) ((cccr) |= (1<<26))
366 #define CCCR_SET_PMI_OVF_1(cccr) ((cccr) |= (1<<27))
367 #define CCCR_SET_ENABLE(cccr) ((cccr) |= (1<<12))
368 #define CCCR_SET_DISABLE(cccr) ((cccr) &= ~(1<<12))
369 #define CCCR_READ(low, high, i) do {rdmsr(p4_counters[(i)].cccr_address, (low), (high));} while (0)
370 #define CCCR_WRITE(low, high, i) do {wrmsr(p4_counters[(i)].cccr_address, (low), (high));} while (0)
371 #define CCCR_OVF_P(cccr) ((cccr) & (1U<<31))
372 #define CCCR_CLEAR_OVF(cccr) ((cccr) &= (~(1U<<31)))
374 #define CTR_READ(l,h,i) do {rdmsr(p4_counters[(i)].counter_address, (l), (h));} while (0)
375 #define CTR_WRITE(l,i) do {wrmsr(p4_counters[(i)].counter_address, -(u32)(l), -1);} while (0)
376 #define CTR_OVERFLOW_P(ctr) (!((ctr) & 0x80000000))
379 /* this assigns a "stagger" to the current CPU, which is used throughout
380 the code in this module as an extra array offset, to select the "even"
381 or "odd" part of all the divided resources. */
382 static unsigned int get_stagger(void)
385 int cpu = smp_processor_id();
386 return (cpu != first_cpu(cpu_sibling_map[cpu]));
392 /* finally, mediate access to a real hardware counter
393 by passing a "virtual" counter numer to this macro,
394 along with your stagger setting. */
395 #define VIRT_CTR(stagger, i) ((i) + ((num_counters) * (stagger)))
397 static unsigned long reset_value[NUM_COUNTERS_NON_HT];
400 static void p4_fill_in_addresses(struct op_msrs * const msrs)
403 unsigned int addr, stag;
405 setup_num_counters();
406 stag = get_stagger();
408 /* the counter registers we pay attention to */
409 for (i = 0; i < num_counters; ++i) {
410 msrs->counters[i].addr =
411 p4_counters[VIRT_CTR(stag, i)].counter_address;
414 /* FIXME: bad feeling, we don't save the 10 counters we don't use. */
416 /* 18 CCCR registers */
417 for (i = 0, addr = MSR_P4_BPU_CCCR0 + stag;
418 addr <= MSR_P4_IQ_CCCR5; ++i, addr += addr_increment()) {
419 msrs->controls[i].addr = addr;
422 /* 43 ESCR registers in three discontiguous group */
423 for (addr = MSR_P4_BSU_ESCR0 + stag;
424 addr <= MSR_P4_SSU_ESCR0; ++i, addr += addr_increment()) {
425 msrs->controls[i].addr = addr;
428 for (addr = MSR_P4_MS_ESCR0 + stag;
429 addr <= MSR_P4_TC_ESCR1; ++i, addr += addr_increment()) {
430 msrs->controls[i].addr = addr;
433 for (addr = MSR_P4_IX_ESCR0 + stag;
434 addr <= MSR_P4_CRU_ESCR3; ++i, addr += addr_increment()) {
435 msrs->controls[i].addr = addr;
438 /* there are 2 remaining non-contiguously located ESCRs */
440 if (num_counters == NUM_COUNTERS_NON_HT) {
441 /* standard non-HT CPUs handle both remaining ESCRs*/
442 msrs->controls[i++].addr = MSR_P4_CRU_ESCR5;
443 msrs->controls[i++].addr = MSR_P4_CRU_ESCR4;
445 } else if (stag == 0) {
446 /* HT CPUs give the first remainder to the even thread, as
447 the 32nd control register */
448 msrs->controls[i++].addr = MSR_P4_CRU_ESCR4;
451 /* and two copies of the second to the odd thread,
452 for the 22st and 23nd control registers */
453 msrs->controls[i++].addr = MSR_P4_CRU_ESCR5;
454 msrs->controls[i++].addr = MSR_P4_CRU_ESCR5;
459 static void pmc_setup_one_p4_counter(unsigned int ctr)
462 int const maxbind = 2;
463 unsigned int cccr = 0;
464 unsigned int escr = 0;
465 unsigned int high = 0;
466 unsigned int counter_bit;
467 struct p4_event_binding *ev = NULL;
470 stag = get_stagger();
472 /* convert from counter *number* to counter *bit* */
473 counter_bit = 1 << VIRT_CTR(stag, ctr);
475 /* find our event binding structure. */
476 if (counter_config[ctr].event <= 0 || counter_config[ctr].event > NUM_EVENTS) {
478 "oprofile: P4 event code 0x%lx out of range\n",
479 counter_config[ctr].event);
483 ev = &(p4_events[counter_config[ctr].event - 1]);
485 for (i = 0; i < maxbind; i++) {
486 if (ev->bindings[i].virt_counter & counter_bit) {
489 ESCR_READ(escr, high, ev, i);
492 ESCR_SET_USR_0(escr, counter_config[ctr].user);
493 ESCR_SET_OS_0(escr, counter_config[ctr].kernel);
495 ESCR_SET_USR_1(escr, counter_config[ctr].user);
496 ESCR_SET_OS_1(escr, counter_config[ctr].kernel);
498 ESCR_SET_EVENT_SELECT(escr, ev->event_select);
499 ESCR_SET_EVENT_MASK(escr, counter_config[ctr].unit_mask);
500 ESCR_WRITE(escr, high, ev, i);
503 CCCR_READ(cccr, high, VIRT_CTR(stag, ctr));
505 CCCR_SET_REQUIRED_BITS(cccr);
506 CCCR_SET_ESCR_SELECT(cccr, ev->escr_select);
508 CCCR_SET_PMI_OVF_0(cccr);
510 CCCR_SET_PMI_OVF_1(cccr);
512 CCCR_WRITE(cccr, high, VIRT_CTR(stag, ctr));
518 "oprofile: P4 event code 0x%lx no binding, stag %d ctr %d\n",
519 counter_config[ctr].event, stag, ctr);
523 static void p4_setup_ctrs(struct op_msrs const * const msrs)
526 unsigned int low, high;
530 stag = get_stagger();
532 rdmsr(MSR_IA32_MISC_ENABLE, low, high);
533 if (! MISC_PMC_ENABLED_P(low)) {
534 printk(KERN_ERR "oprofile: P4 PMC not available\n");
538 /* clear the cccrs we will use */
539 for (i = 0 ; i < num_counters ; i++) {
540 rdmsr(p4_counters[VIRT_CTR(stag, i)].cccr_address, low, high);
542 CCCR_SET_REQUIRED_BITS(low);
543 wrmsr(p4_counters[VIRT_CTR(stag, i)].cccr_address, low, high);
546 /* clear cccrs outside our concern */
547 for (i = stag ; i < NUM_UNUSED_CCCRS ; i += addr_increment()) {
548 rdmsr(p4_unused_cccr[i], low, high);
550 CCCR_SET_REQUIRED_BITS(low);
551 wrmsr(p4_unused_cccr[i], low, high);
554 /* clear all escrs (including those outside our concern) */
555 for (addr = MSR_P4_BSU_ESCR0 + stag;
556 addr <= MSR_P4_SSU_ESCR0; addr += addr_increment()) {
560 for (addr = MSR_P4_MS_ESCR0 + stag;
561 addr <= MSR_P4_TC_ESCR1; addr += addr_increment()){
565 for (addr = MSR_P4_IX_ESCR0 + stag;
566 addr <= MSR_P4_CRU_ESCR3; addr += addr_increment()){
570 if (num_counters == NUM_COUNTERS_NON_HT) {
571 wrmsr(MSR_P4_CRU_ESCR4, 0, 0);
572 wrmsr(MSR_P4_CRU_ESCR5, 0, 0);
573 } else if (stag == 0) {
574 wrmsr(MSR_P4_CRU_ESCR4, 0, 0);
576 wrmsr(MSR_P4_CRU_ESCR5, 0, 0);
579 /* setup all counters */
580 for (i = 0 ; i < num_counters ; ++i) {
581 if (counter_config[i].event) {
582 reset_value[i] = counter_config[i].count;
583 pmc_setup_one_p4_counter(i);
584 CTR_WRITE(counter_config[i].count, VIRT_CTR(stag, i));
592 static int p4_check_ctrs(unsigned int const cpu,
593 struct op_msrs const * const msrs,
594 struct pt_regs * const regs)
596 unsigned long ctr, low, high, stag, real;
598 unsigned long eip = instruction_pointer(regs);
599 int is_kernel = !user_mode(regs);
601 stag = get_stagger();
603 for (i = 0; i < num_counters; ++i) {
609 * there is some eccentricity in the hardware which
610 * requires that we perform 2 extra corrections:
612 * - check both the CCCR:OVF flag for overflow and the
613 * counter high bit for un-flagged overflows.
615 * - write the counter back twice to ensure it gets
618 * the former seems to be related to extra NMIs happening
619 * during the current NMI; the latter is reported as errata
620 * N15 in intel doc 249199-029, pentium 4 specification
621 * update, though their suggested work-around does not
622 * appear to solve the problem.
625 real = VIRT_CTR(stag, i);
627 CCCR_READ(low, high, real);
628 CTR_READ(ctr, high, real);
629 if (CCCR_OVF_P(low) || CTR_OVERFLOW_P(ctr)) {
630 oprofile_add_sample(eip, is_kernel, i, cpu);
631 CTR_WRITE(reset_value[i], real);
633 CCCR_WRITE(low, high, real);
634 CTR_WRITE(reset_value[i], real);
638 /* P4 quirk: you have to re-unmask the apic vector */
639 apic_write(APIC_LVTPC, apic_read(APIC_LVTPC) & ~APIC_LVT_MASKED);
641 /* See op_model_ppro.c */
646 static void p4_start(struct op_msrs const * const msrs)
648 unsigned int low, high, stag;
651 stag = get_stagger();
653 for (i = 0; i < num_counters; ++i) {
656 CCCR_READ(low, high, VIRT_CTR(stag, i));
657 CCCR_SET_ENABLE(low);
658 CCCR_WRITE(low, high, VIRT_CTR(stag, i));
663 static void p4_stop(struct op_msrs const * const msrs)
665 unsigned int low, high, stag;
668 stag = get_stagger();
670 for (i = 0; i < num_counters; ++i) {
671 CCCR_READ(low, high, VIRT_CTR(stag, i));
672 CCCR_SET_DISABLE(low);
673 CCCR_WRITE(low, high, VIRT_CTR(stag, i));
679 struct op_x86_model_spec const op_p4_ht2_spec = {
680 .num_counters = NUM_COUNTERS_HT2,
681 .num_controls = NUM_CONTROLS_HT2,
682 .fill_in_addresses = &p4_fill_in_addresses,
683 .setup_ctrs = &p4_setup_ctrs,
684 .check_ctrs = &p4_check_ctrs,
690 struct op_x86_model_spec const op_p4_spec = {
691 .num_counters = NUM_COUNTERS_NON_HT,
692 .num_controls = NUM_CONTROLS_NON_HT,
693 .fill_in_addresses = &p4_fill_in_addresses,
694 .setup_ctrs = &p4_setup_ctrs,
695 .check_ctrs = &p4_check_ctrs,