2 * @file op_model_xscale.c
3 * XScale Performance Monitor Driver
5 * @remark Copyright 2000-2004 Deepak Saxena <dsaxena@mvista.com>
6 * @remark Copyright 2000-2004 MontaVista Software Inc
7 * @remark Copyright 2004 Dave Jiang <dave.jiang@intel.com>
8 * @remark Copyright 2004 Intel Corporation
9 * @remark Copyright 2004 Zwane Mwaikambo <zwane@arm.linux.org.uk>
10 * @remark Copyright 2004 Oprofile Authors
12 * @remark Read the file COPYING
14 * @author Zwane Mwaikambo
18 #include <linux/types.h>
19 #include <linux/errno.h>
20 #include <linux/sched.h>
21 #include <linux/oprofile.h>
22 #include <linux/interrupt.h>
24 #include <asm/system.h>
26 #include "op_counter.h"
27 #include "op_arm_model.h"
29 #define PMU_ENABLE 0x001 /* Enable counters */
30 #define PMN_RESET 0x002 /* Reset event counters */
31 #define CCNT_RESET 0x004 /* Reset clock counter */
32 #define PMU_RESET (CCNT_RESET | PMN_RESET)
34 /* TODO do runtime detection */
35 #ifdef CONFIG_ARCH_IOP310
36 #define XSCALE_PMU_IRQ IRQ_XS80200_PMU
38 #ifdef CONFIG_ARCH_IOP321
39 #define XSCALE_PMU_IRQ IRQ_IOP321_CORE_PMU
41 #ifdef CONFIG_ARCH_IOP331
42 #define XSCALE_PMU_IRQ IRQ_IOP331_CORE_PMU
46 * Different types of events that can be counted by the XScale PMU
47 * as used by Oprofile userspace. Here primarily for documentation
51 #define EVT_ICACHE_MISS 0x00
52 #define EVT_ICACHE_NO_DELIVER 0x01
53 #define EVT_DATA_STALL 0x02
54 #define EVT_ITLB_MISS 0x03
55 #define EVT_DTLB_MISS 0x04
56 #define EVT_BRANCH 0x05
57 #define EVT_BRANCH_MISS 0x06
58 #define EVT_INSTRUCTION 0x07
59 #define EVT_DCACHE_FULL_STALL 0x08
60 #define EVT_DCACHE_FULL_STALL_CONTIG 0x09
61 #define EVT_DCACHE_ACCESS 0x0A
62 #define EVT_DCACHE_MISS 0x0B
63 #define EVT_DCACE_WRITE_BACK 0x0C
64 #define EVT_PC_CHANGED 0x0D
65 #define EVT_BCU_REQUEST 0x10
66 #define EVT_BCU_FULL 0x11
67 #define EVT_BCU_DRAIN 0x12
68 #define EVT_BCU_ECC_NO_ELOG 0x14
69 #define EVT_BCU_1_BIT_ERR 0x15
71 /* EVT_CCNT is not hardware defined */
73 #define EVT_UNUSED 0xFF
76 volatile unsigned long ovf;
77 unsigned long reset_counter;
80 enum { CCNT, PMN0, PMN1, PMN2, PMN3, MAX_COUNTERS };
82 static struct pmu_counter results[MAX_COUNTERS];
85 * There are two versions of the PMU in current XScale processors
86 * with differing register layouts and number of performance counters.
87 * e.g. IOP321 is xsc1 whilst IOP331 is xsc2.
88 * We detect which register layout to use in xscale_detect_pmu()
90 enum { PMU_XSC1, PMU_XSC2 };
96 unsigned int int_enable;
97 unsigned int cnt_ovf[MAX_COUNTERS];
98 unsigned int int_mask[MAX_COUNTERS];
101 static struct pmu_type pmu_parms[] = {
104 .name = "arm/xscale1",
106 .int_mask = { [PMN0] = 0x10, [PMN1] = 0x20,
108 .cnt_ovf = { [CCNT] = 0x400, [PMN0] = 0x100,
113 .name = "arm/xscale2",
115 .int_mask = { [CCNT] = 0x01, [PMN0] = 0x02,
116 [PMN1] = 0x04, [PMN2] = 0x08,
118 .cnt_ovf = { [CCNT] = 0x01, [PMN0] = 0x02,
119 [PMN1] = 0x04, [PMN2] = 0x08,
124 static struct pmu_type *pmu;
126 static void write_pmnc(u32 val)
128 /* upper 4bits and 7, 11 are write-as-0 */
130 if (pmu->id == PMU_XSC1)
131 __asm__ __volatile__ ("mcr p14, 0, %0, c0, c0, 0" : : "r" (val));
133 __asm__ __volatile__ ("mcr p14, 0, %0, c0, c1, 0" : : "r" (val));
136 static u32 read_pmnc(void)
140 if (pmu->id == PMU_XSC1)
141 __asm__ __volatile__ ("mrc p14, 0, %0, c0, c0, 0" : "=r" (val));
143 __asm__ __volatile__ ("mrc p14, 0, %0, c0, c1, 0" : "=r" (val));
148 static u32 __xsc1_read_counter(int counter)
154 __asm__ __volatile__ ("mrc p14, 0, %0, c1, c0, 0" : "=r" (val));
157 __asm__ __volatile__ ("mrc p14, 0, %0, c2, c0, 0" : "=r" (val));
160 __asm__ __volatile__ ("mrc p14, 0, %0, c3, c0, 0" : "=r" (val));
166 static u32 __xsc2_read_counter(int counter)
172 __asm__ __volatile__ ("mrc p14, 0, %0, c1, c1, 0" : "=r" (val));
175 __asm__ __volatile__ ("mrc p14, 0, %0, c0, c2, 0" : "=r" (val));
178 __asm__ __volatile__ ("mrc p14, 0, %0, c1, c2, 0" : "=r" (val));
181 __asm__ __volatile__ ("mrc p14, 0, %0, c2, c2, 0" : "=r" (val));
184 __asm__ __volatile__ ("mrc p14, 0, %0, c3, c2, 0" : "=r" (val));
190 static u32 read_counter(int counter)
194 if (pmu->id == PMU_XSC1)
195 val = __xsc1_read_counter(counter);
197 val = __xsc2_read_counter(counter);
202 static void __xsc1_write_counter(int counter, u32 val)
206 __asm__ __volatile__ ("mcr p14, 0, %0, c1, c0, 0" : : "r" (val));
209 __asm__ __volatile__ ("mcr p14, 0, %0, c2, c0, 0" : : "r" (val));
212 __asm__ __volatile__ ("mcr p14, 0, %0, c3, c0, 0" : : "r" (val));
217 static void __xsc2_write_counter(int counter, u32 val)
221 __asm__ __volatile__ ("mcr p14, 0, %0, c1, c1, 0" : : "r" (val));
224 __asm__ __volatile__ ("mcr p14, 0, %0, c0, c2, 0" : : "r" (val));
227 __asm__ __volatile__ ("mcr p14, 0, %0, c1, c2, 0" : : "r" (val));
230 __asm__ __volatile__ ("mcr p14, 0, %0, c2, c2, 0" : : "r" (val));
233 __asm__ __volatile__ ("mcr p14, 0, %0, c3, c2, 0" : : "r" (val));
238 static void write_counter(int counter, u32 val)
240 if (pmu->id == PMU_XSC1)
241 __xsc1_write_counter(counter, val);
243 __xsc2_write_counter(counter, val);
246 static int xscale_setup_ctrs(void)
251 for (i = CCNT; i < MAX_COUNTERS; i++) {
252 if (counter_config[i].event)
255 counter_config[i].event = EVT_UNUSED;
260 pmnc = (counter_config[PMN1].event << 20) | (counter_config[PMN0].event << 12);
261 pr_debug("xscale_setup_ctrs: pmnc: %#08x\n", pmnc);
266 evtsel = counter_config[PMN0].event | (counter_config[PMN1].event << 8) |
267 (counter_config[PMN2].event << 16) | (counter_config[PMN3].event << 24);
269 pr_debug("xscale_setup_ctrs: evtsel %#08x\n", evtsel);
270 __asm__ __volatile__ ("mcr p14, 0, %0, c8, c1, 0" : : "r" (evtsel));
274 for (i = CCNT; i < MAX_COUNTERS; i++) {
275 if (counter_config[i].event == EVT_UNUSED) {
276 counter_config[i].event = 0;
277 pmu->int_enable &= ~pmu->int_mask[i];
281 results[i].reset_counter = counter_config[i].count;
282 write_counter(i, -(u32)counter_config[i].count);
283 pmu->int_enable |= pmu->int_mask[i];
284 pr_debug("xscale_setup_ctrs: counter%d %#08x from %#08lx\n", i,
285 read_counter(i), counter_config[i].count);
291 static void inline __xsc1_check_ctrs(void)
294 u32 pmnc = read_pmnc();
296 /* NOTE: there's an A stepping errata that states if an overflow */
297 /* bit already exists and another occurs, the previous */
298 /* Overflow bit gets cleared. There's no workaround. */
299 /* Fixed in B stepping or later */
301 pmnc &= ~(PMU_ENABLE | pmu->cnt_ovf[PMN0] | pmu->cnt_ovf[PMN1] |
305 for (i = CCNT; i <= PMN1; i++) {
306 if (!(pmu->int_mask[i] & pmu->int_enable))
309 if (pmnc & pmu->cnt_ovf[i])
314 static void inline __xsc2_check_ctrs(void)
317 u32 flag = 0, pmnc = read_pmnc();
322 /* read overflow flag register */
323 __asm__ __volatile__ ("mrc p14, 0, %0, c5, c1, 0" : "=r" (flag));
325 for (i = CCNT; i <= PMN3; i++) {
326 if (!(pmu->int_mask[i] & pmu->int_enable))
329 if (flag & pmu->cnt_ovf[i])
333 /* writeback clears overflow bits */
334 __asm__ __volatile__ ("mcr p14, 0, %0, c5, c1, 0" : : "r" (flag));
337 static irqreturn_t xscale_pmu_interrupt(int irq, void *arg, struct pt_regs *regs)
339 unsigned long eip = instruction_pointer(regs);
340 int i, is_kernel = !user_mode(regs);
343 if (pmu->id == PMU_XSC1)
348 for (i = CCNT; i < MAX_COUNTERS; i++) {
352 write_counter(i, -(u32)results[i].reset_counter);
353 oprofile_add_sample(eip, is_kernel, i, smp_processor_id());
357 pmnc = read_pmnc() | PMU_ENABLE;
363 static void xscale_pmu_stop(void)
365 u32 pmnc = read_pmnc();
370 free_irq(XSCALE_PMU_IRQ, results);
373 static int xscale_pmu_start(void)
376 u32 pmnc = read_pmnc();
378 ret = request_irq(XSCALE_PMU_IRQ, xscale_pmu_interrupt, SA_INTERRUPT,
379 "XScale PMU", (void *)results);
382 printk(KERN_ERR "oprofile: unable to request IRQ%d for XScale PMU\n",
387 if (pmu->id == PMU_XSC1)
388 pmnc |= pmu->int_enable;
390 __asm__ __volatile__ ("mcr p14, 0, %0, c4, c1, 0" : : "r" (pmu->int_enable));
394 pr_debug("xscale_pmu_start: pmnc: %#08x mask: %08x\n", pmnc, pmu->int_enable);
398 static int xscale_detect_pmu(void)
403 id = (read_cpuid(CPUID_ID) >> 13) & 0x7;
407 pmu = &pmu_parms[PMU_XSC1];
410 pmu = &pmu_parms[PMU_XSC2];
418 op_xscale_spec.name = pmu->name;
419 op_xscale_spec.num_counters = pmu->num_counters;
420 pr_debug("xscale_detect_pmu: detected %s PMU\n", pmu->name);
426 struct op_arm_model_spec op_xscale_spec = {
427 .init = xscale_detect_pmu,
428 .setup_ctrs = xscale_setup_ctrs,
429 .start = xscale_pmu_start,
430 .stop = xscale_pmu_stop,