2 * This file implements the perfmon-2 subsystem which is used
3 * to program the IA-64 Performance Monitoring Unit (PMU).
5 * The initial version of perfmon.c was written by
6 * Ganesh Venkitachalam, IBM Corp.
8 * Then it was modified for perfmon-1.x by Stephane Eranian and
9 * David Mosberger, Hewlett Packard Co.
11 * Version Perfmon-2.x is a rewrite of perfmon-1.x
12 * by Stephane Eranian, Hewlett Packard Co.
14 * Copyright (C) 1999-2003 Hewlett Packard Co
15 * Stephane Eranian <eranian@hpl.hp.com>
16 * David Mosberger-Tang <davidm@hpl.hp.com>
18 * More information about perfmon available at:
19 * http://www.hpl.hp.com/research/linux/perfmon
22 #include <linux/config.h>
23 #include <linux/module.h>
24 #include <linux/kernel.h>
25 #include <linux/sched.h>
26 #include <linux/interrupt.h>
27 #include <linux/smp_lock.h>
28 #include <linux/proc_fs.h>
29 #include <linux/init.h>
30 #include <linux/vmalloc.h>
32 #include <linux/sysctl.h>
33 #include <linux/list.h>
34 #include <linux/file.h>
35 #include <linux/poll.h>
36 #include <linux/vfs.h>
37 #include <linux/pagemap.h>
38 #include <linux/mount.h>
39 #include <linux/version.h>
41 #include <asm/bitops.h>
42 #include <asm/errno.h>
43 #include <asm/intrinsics.h>
45 #include <asm/perfmon.h>
46 #include <asm/processor.h>
47 #include <asm/signal.h>
48 #include <asm/system.h>
49 #include <asm/uaccess.h>
50 #include <asm/delay.h>
54 * perfmon context state
56 #define PFM_CTX_UNLOADED 1 /* context is not loaded onto any task */
57 #define PFM_CTX_LOADED 2 /* context is loaded onto a task */
58 #define PFM_CTX_MASKED 3 /* context is loaded but monitoring is masked due to overflow */
59 #define PFM_CTX_ZOMBIE 4 /* owner of the context is closing it */
61 #define PFM_INVALID_ACTIVATION (~0UL)
64 * depth of message queue
66 #define PFM_MAX_MSGS 32
67 #define PFM_CTXQ_EMPTY(g) ((g)->ctx_msgq_head == (g)->ctx_msgq_tail)
70 * type of a PMU register (bitmask).
72 * bit0 : register implemented
75 * bit4 : pmc has pmc.pm
76 * bit5 : pmc controls a counter (has pmc.oi), pmd is used as counter
77 * bit6-7 : register type
80 #define PFM_REG_NOTIMPL 0x0 /* not implemented at all */
81 #define PFM_REG_IMPL 0x1 /* register implemented */
82 #define PFM_REG_END 0x2 /* end marker */
83 #define PFM_REG_MONITOR (0x1<<4|PFM_REG_IMPL) /* a PMC with a pmc.pm field only */
84 #define PFM_REG_COUNTING (0x2<<4|PFM_REG_MONITOR) /* a monitor + pmc.oi+ PMD used as a counter */
85 #define PFM_REG_CONTROL (0x4<<4|PFM_REG_IMPL) /* PMU control register */
86 #define PFM_REG_CONFIG (0x8<<4|PFM_REG_IMPL) /* configuration register */
87 #define PFM_REG_BUFFER (0xc<<4|PFM_REG_IMPL) /* PMD used as buffer */
89 #define PMC_IS_LAST(i) (pmu_conf.pmc_desc[i].type & PFM_REG_END)
90 #define PMD_IS_LAST(i) (pmu_conf.pmd_desc[i].type & PFM_REG_END)
92 #define PFM_IS_DISABLED() (pmu_conf.enabled == 0)
94 #define PMC_OVFL_NOTIFY(ctx, i) ((ctx)->ctx_pmds[i].flags & PFM_REGFL_OVFL_NOTIFY)
96 /* i assumed unsigned */
97 #define PMC_IS_IMPL(i) (i< PMU_MAX_PMCS && (pmu_conf.pmc_desc[i].type & PFM_REG_IMPL))
98 #define PMD_IS_IMPL(i) (i< PMU_MAX_PMDS && (pmu_conf.pmd_desc[i].type & PFM_REG_IMPL))
100 /* XXX: these assume that register i is implemented */
101 #define PMD_IS_COUNTING(i) ((pmu_conf.pmd_desc[i].type & PFM_REG_COUNTING) == PFM_REG_COUNTING)
102 #define PMC_IS_COUNTING(i) ((pmu_conf.pmc_desc[i].type & PFM_REG_COUNTING) == PFM_REG_COUNTING)
103 #define PMC_IS_MONITOR(i) ((pmu_conf.pmc_desc[i].type & PFM_REG_MONITOR) == PFM_REG_MONITOR)
104 #define PMC_IS_CONTROL(i) ((pmu_conf.pmc_desc[i].type & PFM_REG_CONTROL) == PFM_REG_CONTROL)
106 #define PMC_DFL_VAL(i) pmu_conf.pmc_desc[i].default_value
107 #define PMC_RSVD_MASK(i) pmu_conf.pmc_desc[i].reserved_mask
108 #define PMD_PMD_DEP(i) pmu_conf.pmd_desc[i].dep_pmd[0]
109 #define PMC_PMD_DEP(i) pmu_conf.pmc_desc[i].dep_pmd[0]
111 #define PFM_NUM_IBRS IA64_NUM_DBG_REGS
112 #define PFM_NUM_DBRS IA64_NUM_DBG_REGS
114 #define CTX_OVFL_NOBLOCK(c) ((c)->ctx_fl_block == 0)
115 #define CTX_HAS_SMPL(c) ((c)->ctx_fl_is_sampling)
116 #define PFM_CTX_TASK(h) (h)->ctx_task
118 #define PMU_PMC_OI 5 /* position of pmc.oi bit */
120 /* XXX: does not support more than 64 PMDs */
121 #define CTX_USED_PMD(ctx, mask) (ctx)->ctx_used_pmds[0] |= (mask)
122 #define CTX_IS_USED_PMD(ctx, c) (((ctx)->ctx_used_pmds[0] & (1UL << (c))) != 0UL)
124 #define CTX_USED_MONITOR(ctx, mask) (ctx)->ctx_used_monitors[0] |= (mask)
126 #define CTX_USED_IBR(ctx,n) (ctx)->ctx_used_ibrs[(n)>>6] |= 1UL<< ((n) % 64)
127 #define CTX_USED_DBR(ctx,n) (ctx)->ctx_used_dbrs[(n)>>6] |= 1UL<< ((n) % 64)
128 #define CTX_USES_DBREGS(ctx) (((pfm_context_t *)(ctx))->ctx_fl_using_dbreg==1)
129 #define PFM_CODE_RR 0 /* requesting code range restriction */
130 #define PFM_DATA_RR 1 /* requestion data range restriction */
132 #define PFM_CPUINFO_CLEAR(v) pfm_get_cpu_var(pfm_syst_info) &= ~(v)
133 #define PFM_CPUINFO_SET(v) pfm_get_cpu_var(pfm_syst_info) |= (v)
134 #define PFM_CPUINFO_GET() pfm_get_cpu_var(pfm_syst_info)
137 * context protection macros
139 * - we need to protect against CPU concurrency (spin_lock)
140 * - we need to protect against PMU overflow interrupts (local_irq_disable)
142 * - we need to protect against PMU overflow interrupts (local_irq_disable)
144 * spin_lock_irqsave()/spin_lock_irqrestore():
145 * in SMP: local_irq_disable + spin_lock
146 * in UP : local_irq_disable
148 * spin_lock()/spin_lock():
149 * in UP : removed automatically
150 * in SMP: protect against context accesses from other CPU. interrupts
151 * are not masked. This is useful for the PMU interrupt handler
152 * because we know we will not get PMU concurrency in that code.
154 #define PROTECT_CTX(c, f) \
156 DPRINT(("spinlock_irq_save ctx %p by [%d]\n", c, current->pid)); \
157 spin_lock_irqsave(&(c)->ctx_lock, f); \
158 DPRINT(("spinlocked ctx %p by [%d]\n", c, current->pid)); \
161 #define UNPROTECT_CTX(c, f) \
163 DPRINT(("spinlock_irq_restore ctx %p by [%d]\n", c, current->pid)); \
164 spin_unlock_irqrestore(&(c)->ctx_lock, f); \
167 #define PROTECT_CTX_NOPRINT(c, f) \
169 spin_lock_irqsave(&(c)->ctx_lock, f); \
173 #define UNPROTECT_CTX_NOPRINT(c, f) \
175 spin_unlock_irqrestore(&(c)->ctx_lock, f); \
179 #define PROTECT_CTX_NOIRQ(c) \
181 spin_lock(&(c)->ctx_lock); \
184 #define UNPROTECT_CTX_NOIRQ(c) \
186 spin_unlock(&(c)->ctx_lock); \
192 #define GET_ACTIVATION() pfm_get_cpu_var(pmu_activation_number)
193 #define INC_ACTIVATION() pfm_get_cpu_var(pmu_activation_number)++
194 #define SET_ACTIVATION(c) (c)->ctx_last_activation = GET_ACTIVATION()
196 #else /* !CONFIG_SMP */
197 #define SET_ACTIVATION(t) do {} while(0)
198 #define GET_ACTIVATION(t) do {} while(0)
199 #define INC_ACTIVATION(t) do {} while(0)
200 #endif /* CONFIG_SMP */
202 #define SET_PMU_OWNER(t, c) do { pfm_get_cpu_var(pmu_owner) = (t); pfm_get_cpu_var(pmu_ctx) = (c); } while(0)
203 #define GET_PMU_OWNER() pfm_get_cpu_var(pmu_owner)
204 #define GET_PMU_CTX() pfm_get_cpu_var(pmu_ctx)
206 #define LOCK_PFS(g) spin_lock_irqsave(&pfm_sessions.pfs_lock, g)
207 #define UNLOCK_PFS(g) spin_unlock_irqrestore(&pfm_sessions.pfs_lock, g)
209 #define PFM_REG_RETFLAG_SET(flags, val) do { flags &= ~PFM_REG_RETFL_MASK; flags |= (val); } while(0)
212 * cmp0 must be the value of pmc0
214 #define PMC0_HAS_OVFL(cmp0) (cmp0 & ~0x1UL)
216 #define PFMFS_MAGIC 0xa0b4d889
221 #define PFM_DEBUGGING 1
225 if (unlikely(pfm_sysctl.debug >0)) { printk("%s.%d: CPU%d [%d] ", __FUNCTION__, __LINE__, smp_processor_id(), current->pid); printk a; } \
228 #define DPRINT_ovfl(a) \
230 if (unlikely(pfm_sysctl.debug > 0 && pfm_sysctl.debug_ovfl >0)) { printk("%s.%d: CPU%d [%d] ", __FUNCTION__, __LINE__, smp_processor_id(), current->pid); printk a; } \
235 * 64-bit software counter structure
237 * the next_reset_type is applied to the next call to pfm_reset_regs()
240 unsigned long val; /* virtual 64bit counter value */
241 unsigned long lval; /* last reset value */
242 unsigned long long_reset; /* reset value on sampling overflow */
243 unsigned long short_reset; /* reset value on overflow */
244 unsigned long reset_pmds[4]; /* which other pmds to reset when this counter overflows */
245 unsigned long smpl_pmds[4]; /* which pmds are accessed when counter overflow */
246 unsigned long seed; /* seed for random-number generator */
247 unsigned long mask; /* mask for random-number generator */
248 unsigned int flags; /* notify/do not notify */
249 unsigned long eventid; /* overflow event identifier */
256 unsigned int block:1; /* when 1, task will blocked on user notifications */
257 unsigned int system:1; /* do system wide monitoring */
258 unsigned int using_dbreg:1; /* using range restrictions (debug registers) */
259 unsigned int is_sampling:1; /* true if using a custom format */
260 unsigned int excl_idle:1; /* exclude idle task in system wide session */
261 unsigned int going_zombie:1; /* context is zombie (MASKED+blocking) */
262 unsigned int trap_reason:2; /* reason for going into pfm_handle_work() */
263 unsigned int no_msg:1; /* no message sent on overflow */
264 unsigned int can_restart:1; /* allowed to issue a PFM_RESTART */
265 unsigned int reserved:22;
266 } pfm_context_flags_t;
268 #define PFM_TRAP_REASON_NONE 0x0 /* default value */
269 #define PFM_TRAP_REASON_BLOCK 0x1 /* we need to block on overflow */
270 #define PFM_TRAP_REASON_RESET 0x2 /* we need to reset PMDs */
274 * perfmon context: encapsulates all the state of a monitoring session
277 typedef struct pfm_context {
278 spinlock_t ctx_lock; /* context protection */
280 pfm_context_flags_t ctx_flags; /* bitmask of flags (block reason incl.) */
281 unsigned int ctx_state; /* state: active/inactive (no bitfield) */
283 struct task_struct *ctx_task; /* task to which context is attached */
285 unsigned long ctx_ovfl_regs[4]; /* which registers overflowed (notification) */
287 struct semaphore ctx_restart_sem; /* use for blocking notification mode */
289 unsigned long ctx_used_pmds[4]; /* bitmask of PMD used */
290 unsigned long ctx_all_pmds[4]; /* bitmask of all accessible PMDs */
291 unsigned long ctx_reload_pmds[4]; /* bitmask of force reload PMD on ctxsw in */
293 unsigned long ctx_all_pmcs[4]; /* bitmask of all accessible PMCs */
294 unsigned long ctx_reload_pmcs[4]; /* bitmask of force reload PMC on ctxsw in */
295 unsigned long ctx_used_monitors[4]; /* bitmask of monitor PMC being used */
297 unsigned long ctx_pmcs[IA64_NUM_PMC_REGS]; /* saved copies of PMC values */
299 unsigned int ctx_used_ibrs[1]; /* bitmask of used IBR (speedup ctxsw in) */
300 unsigned int ctx_used_dbrs[1]; /* bitmask of used DBR (speedup ctxsw in) */
301 unsigned long ctx_dbrs[IA64_NUM_DBG_REGS]; /* DBR values (cache) when not loaded */
302 unsigned long ctx_ibrs[IA64_NUM_DBG_REGS]; /* IBR values (cache) when not loaded */
304 pfm_counter_t ctx_pmds[IA64_NUM_PMD_REGS]; /* software state for PMDS */
306 u64 ctx_saved_psr_up; /* only contains psr.up value */
308 unsigned long ctx_last_activation; /* context last activation number for last_cpu */
309 unsigned int ctx_last_cpu; /* CPU id of current or last CPU used (SMP only) */
310 unsigned int ctx_cpu; /* cpu to which perfmon is applied (system wide) */
312 int ctx_fd; /* file descriptor used my this context */
314 pfm_buffer_fmt_t *ctx_buf_fmt; /* buffer format callbacks */
315 void *ctx_smpl_hdr; /* points to sampling buffer header kernel vaddr */
316 unsigned long ctx_smpl_size; /* size of sampling buffer */
317 void *ctx_smpl_vaddr; /* user level virtual address of smpl buffer */
319 wait_queue_head_t ctx_msgq_wait;
320 pfm_msg_t ctx_msgq[PFM_MAX_MSGS];
323 struct fasync_struct *ctx_async_queue;
325 wait_queue_head_t ctx_zombieq; /* termination cleanup wait queue */
329 * magic number used to verify that structure is really
332 #define PFM_IS_FILE(f) ((f)->f_op == &pfm_file_ops)
334 #define PFM_GET_CTX(t) ((pfm_context_t *)(t)->thread.pfm_context)
337 #define SET_LAST_CPU(ctx, v) (ctx)->ctx_last_cpu = (v)
338 #define GET_LAST_CPU(ctx) (ctx)->ctx_last_cpu
340 #define SET_LAST_CPU(ctx, v) do {} while(0)
341 #define GET_LAST_CPU(ctx) do {} while(0)
345 #define ctx_fl_block ctx_flags.block
346 #define ctx_fl_system ctx_flags.system
347 #define ctx_fl_using_dbreg ctx_flags.using_dbreg
348 #define ctx_fl_is_sampling ctx_flags.is_sampling
349 #define ctx_fl_excl_idle ctx_flags.excl_idle
350 #define ctx_fl_going_zombie ctx_flags.going_zombie
351 #define ctx_fl_trap_reason ctx_flags.trap_reason
352 #define ctx_fl_no_msg ctx_flags.no_msg
353 #define ctx_fl_can_restart ctx_flags.can_restart
355 #define PFM_SET_WORK_PENDING(t, v) do { (t)->thread.pfm_needs_checking = v; } while(0);
356 #define PFM_GET_WORK_PENDING(t) (t)->thread.pfm_needs_checking
359 * global information about all sessions
360 * mostly used to synchronize between system wide and per-process
363 spinlock_t pfs_lock; /* lock the structure */
365 unsigned int pfs_task_sessions; /* number of per task sessions */
366 unsigned int pfs_sys_sessions; /* number of per system wide sessions */
367 unsigned int pfs_sys_use_dbregs; /* incremented when a system wide session uses debug regs */
368 unsigned int pfs_ptrace_use_dbregs; /* incremented when a process uses debug regs */
369 struct task_struct *pfs_sys_session[NR_CPUS]; /* point to task owning a system-wide session */
373 * information about a PMC or PMD.
374 * dep_pmd[]: a bitmask of dependent PMD registers
375 * dep_pmc[]: a bitmask of dependent PMC registers
380 unsigned long default_value; /* power-on default value */
381 unsigned long reserved_mask; /* bitmask of reserved bits */
382 int (*read_check)(struct task_struct *task, pfm_context_t *ctx, unsigned int cnum, unsigned long *val, struct pt_regs *regs);
383 int (*write_check)(struct task_struct *task, pfm_context_t *ctx, unsigned int cnum, unsigned long *val, struct pt_regs *regs);
384 unsigned long dep_pmd[4];
385 unsigned long dep_pmc[4];
388 /* assume cnum is a valid monitor */
389 #define PMC_PM(cnum, val) (((val) >> (pmu_conf.pmc_desc[cnum].pm_pos)) & 0x1)
390 #define PMC_WR_FUNC(cnum) (pmu_conf.pmc_desc[cnum].write_check)
391 #define PMD_WR_FUNC(cnum) (pmu_conf.pmd_desc[cnum].write_check)
392 #define PMD_RD_FUNC(cnum) (pmu_conf.pmd_desc[cnum].read_check)
395 * This structure is initialized at boot time and contains
396 * a description of the PMU main characteristics.
399 unsigned long ovfl_val; /* overflow value for counters */
401 pfm_reg_desc_t *pmc_desc; /* detailed PMC register dependencies descriptions */
402 pfm_reg_desc_t *pmd_desc; /* detailed PMD register dependencies descriptions */
404 unsigned int num_pmcs; /* number of PMCS: computed at init time */
405 unsigned int num_pmds; /* number of PMDS: computed at init time */
406 unsigned long impl_pmcs[4]; /* bitmask of implemented PMCS */
407 unsigned long impl_pmds[4]; /* bitmask of implemented PMDS */
409 char *pmu_name; /* PMU family name */
410 unsigned int enabled; /* indicates if perfmon initialized properly */
411 unsigned int pmu_family; /* cpuid family pattern used to identify pmu */
413 unsigned int num_ibrs; /* number of IBRS: computed at init time */
414 unsigned int num_dbrs; /* number of DBRS: computed at init time */
415 unsigned int num_counters; /* PMC/PMD counting pairs : computed at init time */
417 unsigned int use_rr_dbregs:1; /* set if debug registers used for range restriction */
421 * debug register related type definitions
424 unsigned long ibr_mask:56;
425 unsigned long ibr_plm:4;
426 unsigned long ibr_ig:3;
427 unsigned long ibr_x:1;
431 unsigned long dbr_mask:56;
432 unsigned long dbr_plm:4;
433 unsigned long dbr_ig:2;
434 unsigned long dbr_w:1;
435 unsigned long dbr_r:1;
446 * perfmon command descriptions
449 int (*cmd_func)(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs);
452 unsigned int cmd_narg;
454 int (*cmd_getsize)(void *arg, size_t *sz);
457 #define PFM_CMD_FD 0x01 /* command requires a file descriptor */
458 #define PFM_CMD_ARG_READ 0x02 /* command must read argument(s) */
459 #define PFM_CMD_ARG_RW 0x04 /* command must read/write argument(s) */
460 #define PFM_CMD_STOP 0x08 /* command does not work on zombie context */
463 #define PFM_CMD_NAME(cmd) pfm_cmd_tab[(cmd)].cmd_name
464 #define PFM_CMD_READ_ARG(cmd) (pfm_cmd_tab[(cmd)].cmd_flags & PFM_CMD_ARG_READ)
465 #define PFM_CMD_RW_ARG(cmd) (pfm_cmd_tab[(cmd)].cmd_flags & PFM_CMD_ARG_RW)
466 #define PFM_CMD_USE_FD(cmd) (pfm_cmd_tab[(cmd)].cmd_flags & PFM_CMD_FD)
467 #define PFM_CMD_STOPPED(cmd) (pfm_cmd_tab[(cmd)].cmd_flags & PFM_CMD_STOP)
469 #define PFM_CMD_ARG_MANY -1 /* cannot be zero */
472 int debug; /* turn on/off debugging via syslog */
473 int debug_ovfl; /* turn on/off debug printk in overflow handler */
474 int fastctxsw; /* turn on/off fast (unsecure) ctxsw */
475 int expert_mode; /* turn on/off value checking */
480 unsigned long pfm_spurious_ovfl_intr_count; /* keep track of spurious ovfl interrupts */
481 unsigned long pfm_replay_ovfl_intr_count; /* keep track of replayed ovfl interrupts */
482 unsigned long pfm_ovfl_intr_count; /* keep track of ovfl interrupts */
483 unsigned long pfm_ovfl_intr_cycles; /* cycles spent processing ovfl interrupts */
484 unsigned long pfm_ovfl_intr_cycles_min; /* min cycles spent processing ovfl interrupts */
485 unsigned long pfm_ovfl_intr_cycles_max; /* max cycles spent processing ovfl interrupts */
486 unsigned long pfm_smpl_handler_calls;
487 unsigned long pfm_smpl_handler_cycles;
488 char pad[SMP_CACHE_BYTES] ____cacheline_aligned;
492 * perfmon internal variables
494 static pfm_stats_t pfm_stats[NR_CPUS];
495 static pfm_session_t pfm_sessions; /* global sessions information */
497 static struct proc_dir_entry *perfmon_dir;
498 static pfm_uuid_t pfm_null_uuid = {0,};
500 static spinlock_t pfm_buffer_fmt_lock;
501 static LIST_HEAD(pfm_buffer_fmt_list);
503 /* sysctl() controls */
504 static pfm_sysctl_t pfm_sysctl;
507 static ctl_table pfm_ctl_table[]={
508 {1, "debug", &pfm_sysctl.debug, sizeof(int), 0666, NULL, &proc_dointvec, NULL,},
509 {2, "debug_ovfl", &pfm_sysctl.debug_ovfl, sizeof(int), 0666, NULL, &proc_dointvec, NULL,},
510 {3, "fastctxsw", &pfm_sysctl.fastctxsw, sizeof(int), 0600, NULL, &proc_dointvec, NULL,},
511 {4, "expert_mode", &pfm_sysctl.expert_mode, sizeof(int), 0600, NULL, &proc_dointvec, NULL,},
514 static ctl_table pfm_sysctl_dir[] = {
515 {1, "perfmon", NULL, 0, 0755, pfm_ctl_table, },
518 static ctl_table pfm_sysctl_root[] = {
519 {1, "kernel", NULL, 0, 0755, pfm_sysctl_dir, },
522 static struct ctl_table_header *pfm_sysctl_header;
524 static int pfm_context_unload(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs);
525 static int pfm_flush(struct file *filp);
527 #define pfm_get_cpu_var(v) __ia64_per_cpu_var(v)
528 #define pfm_get_cpu_data(a,b) per_cpu(a, b)
531 pfm_put_task(struct task_struct *task)
533 if (task != current) put_task_struct(task);
537 pfm_set_task_notify(struct task_struct *task)
539 struct thread_info *info;
541 info = (struct thread_info *) ((char *) task + IA64_TASK_SIZE);
542 set_bit(TIF_NOTIFY_RESUME, &info->flags);
546 pfm_clear_task_notify(void)
548 clear_thread_flag(TIF_NOTIFY_RESUME);
552 pfm_reserve_page(unsigned long a)
554 SetPageReserved(vmalloc_to_page((void *)a));
557 pfm_unreserve_page(unsigned long a)
559 ClearPageReserved(vmalloc_to_page((void*)a));
563 pfm_remap_page_range(struct vm_area_struct *vma, unsigned long from, unsigned long phys_addr, unsigned long size, pgprot_t prot)
565 return remap_page_range(vma, from, phys_addr, size, prot);
568 static inline unsigned long
569 pfm_protect_ctx_ctxsw(pfm_context_t *x)
571 spin_lock(&(x)->ctx_lock);
575 static inline unsigned long
576 pfm_unprotect_ctx_ctxsw(pfm_context_t *x, unsigned long f)
578 spin_unlock(&(x)->ctx_lock);
581 static inline unsigned int
582 pfm_do_munmap(struct mm_struct *mm, unsigned long addr, size_t len, int acct)
584 return do_munmap(mm, addr, len);
587 static inline unsigned long
588 pfm_get_unmapped_area(struct file *file, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags, unsigned long exec)
590 return get_unmapped_area(file, addr, len, pgoff, flags);
594 static struct super_block *
595 pfmfs_get_sb(struct file_system_type *fs_type, int flags, const char *dev_name, void *data)
597 return get_sb_pseudo(fs_type, "pfm:", NULL, PFMFS_MAGIC);
600 static struct file_system_type pfm_fs_type = {
602 .get_sb = pfmfs_get_sb,
603 .kill_sb = kill_anon_super,
606 DEFINE_PER_CPU(unsigned long, pfm_syst_info);
607 DEFINE_PER_CPU(struct task_struct *, pmu_owner);
608 DEFINE_PER_CPU(pfm_context_t *, pmu_ctx);
609 DEFINE_PER_CPU(unsigned long, pmu_activation_number);
612 /* forward declaration */
613 static struct file_operations pfm_file_ops;
616 * forward declarations
619 static void pfm_lazy_save_regs (struct task_struct *ta);
622 void dump_pmu_state(const char *);
625 * the HP simulator must be first because
626 * CONFIG_IA64_HP_SIM is independent of CONFIG_MCKINLEY or CONFIG_ITANIUM
628 #if defined(CONFIG_IA64_HP_SIM)
629 #include "perfmon_hpsim.h"
630 #elif defined(CONFIG_ITANIUM)
631 #include "perfmon_itanium.h"
632 #elif defined(CONFIG_MCKINLEY)
633 #include "perfmon_mckinley.h"
635 #include "perfmon_generic.h"
638 static int pfm_end_notify_user(pfm_context_t *ctx);
641 pfm_clear_psr_pp(void)
643 ia64_rsm(IA64_PSR_PP);
650 ia64_ssm(IA64_PSR_PP);
655 pfm_clear_psr_up(void)
657 ia64_rsm(IA64_PSR_UP);
664 ia64_ssm(IA64_PSR_UP);
668 static inline unsigned long
672 tmp = ia64_getreg(_IA64_REG_PSR);
678 pfm_set_psr_l(unsigned long val)
680 ia64_setreg(_IA64_REG_PSR_L, val);
692 pfm_unfreeze_pmu(void)
699 pfm_restore_ibrs(unsigned long *ibrs, unsigned int nibrs)
703 for (i=0; i < nibrs; i++) {
704 ia64_set_ibr(i, ibrs[i]);
710 pfm_restore_dbrs(unsigned long *dbrs, unsigned int ndbrs)
714 for (i=0; i < ndbrs; i++) {
715 ia64_set_dbr(i, dbrs[i]);
721 * PMD[i] must be a counter. no check is made
723 static inline unsigned long
724 pfm_read_soft_counter(pfm_context_t *ctx, int i)
726 return ctx->ctx_pmds[i].val + (ia64_get_pmd(i) & pmu_conf.ovfl_val);
730 * PMD[i] must be a counter. no check is made
733 pfm_write_soft_counter(pfm_context_t *ctx, int i, unsigned long val)
735 unsigned long ovfl_val = pmu_conf.ovfl_val;
737 ctx->ctx_pmds[i].val = val & ~ovfl_val;
739 * writing to unimplemented part is ignore, so we do not need to
742 ia64_set_pmd(i, val & ovfl_val);
746 pfm_get_new_msg(pfm_context_t *ctx)
750 next = (ctx->ctx_msgq_tail+1) % PFM_MAX_MSGS;
752 DPRINT(("ctx_fd=%p head=%d tail=%d\n", ctx, ctx->ctx_msgq_head, ctx->ctx_msgq_tail));
753 if (next == ctx->ctx_msgq_head) return NULL;
755 idx = ctx->ctx_msgq_tail;
756 ctx->ctx_msgq_tail = next;
758 DPRINT(("ctx=%p head=%d tail=%d msg=%d\n", ctx, ctx->ctx_msgq_head, ctx->ctx_msgq_tail, idx));
760 return ctx->ctx_msgq+idx;
764 pfm_get_next_msg(pfm_context_t *ctx)
768 DPRINT(("ctx=%p head=%d tail=%d\n", ctx, ctx->ctx_msgq_head, ctx->ctx_msgq_tail));
770 if (PFM_CTXQ_EMPTY(ctx)) return NULL;
775 msg = ctx->ctx_msgq+ctx->ctx_msgq_head;
780 ctx->ctx_msgq_head = (ctx->ctx_msgq_head+1) % PFM_MAX_MSGS;
782 DPRINT(("ctx=%p head=%d tail=%d type=%d\n", ctx, ctx->ctx_msgq_head, ctx->ctx_msgq_tail, msg->pfm_gen_msg.msg_type));
788 pfm_reset_msgq(pfm_context_t *ctx)
790 ctx->ctx_msgq_head = ctx->ctx_msgq_tail = 0;
791 DPRINT(("ctx=%p msgq reset\n", ctx));
795 /* Here we want the physical address of the memory.
796 * This is used when initializing the contents of the
797 * area and marking the pages as reserved.
799 static inline unsigned long
800 pfm_kvirt_to_pa(unsigned long adr)
802 __u64 pa = ia64_tpa(adr);
807 pfm_rvmalloc(unsigned long size)
812 size = PAGE_ALIGN(size);
815 //printk("perfmon: CPU%d pfm_rvmalloc(%ld)=%p\n", smp_processor_id(), size, mem);
816 memset(mem, 0, size);
817 addr = (unsigned long)mem;
819 pfm_reserve_page(addr);
828 pfm_rvfree(void *mem, unsigned long size)
833 DPRINT(("freeing physical buffer @%p size=%lu\n", mem, size));
834 addr = (unsigned long) mem;
835 while ((long) size > 0) {
836 pfm_unreserve_page(addr);
845 static pfm_context_t *
846 pfm_context_alloc(void)
851 * allocate context descriptor
852 * must be able to free with interrupts disabled
854 ctx = kmalloc(sizeof(pfm_context_t), GFP_KERNEL);
856 memset(ctx, 0, sizeof(pfm_context_t));
857 DPRINT(("alloc ctx @%p\n", ctx));
863 pfm_context_free(pfm_context_t *ctx)
866 DPRINT(("free ctx @%p\n", ctx));
872 pfm_mask_monitoring(struct task_struct *task)
874 pfm_context_t *ctx = PFM_GET_CTX(task);
875 struct thread_struct *th = &task->thread;
876 unsigned long mask, val, ovfl_mask;
879 DPRINT_ovfl(("masking monitoring for [%d]\n", task->pid));
881 ovfl_mask = pmu_conf.ovfl_val;
883 * monitoring can only be masked as a result of a valid
884 * counter overflow. In UP, it means that the PMU still
885 * has an owner. Note that the owner can be different
886 * from the current task. However the PMU state belongs
888 * In SMP, a valid overflow only happens when task is
889 * current. Therefore if we come here, we know that
890 * the PMU state belongs to the current task, therefore
891 * we can access the live registers.
893 * So in both cases, the live register contains the owner's
894 * state. We can ONLY touch the PMU registers and NOT the PSR.
896 * As a consequence to this call, the thread->pmds[] array
897 * contains stale information which must be ignored
898 * when context is reloaded AND monitoring is active (see
901 mask = ctx->ctx_used_pmds[0];
902 for (i = 0; mask; i++, mask>>=1) {
903 /* skip non used pmds */
904 if ((mask & 0x1) == 0) continue;
905 val = ia64_get_pmd(i);
907 if (PMD_IS_COUNTING(i)) {
909 * we rebuild the full 64 bit value of the counter
911 ctx->ctx_pmds[i].val += (val & ovfl_mask);
913 ctx->ctx_pmds[i].val = val;
915 DPRINT_ovfl(("pmd[%d]=0x%lx hw_pmd=0x%lx\n",
917 ctx->ctx_pmds[i].val,
921 * mask monitoring by setting the privilege level to 0
922 * we cannot use psr.pp/psr.up for this, it is controlled by
925 * if task is current, modify actual registers, otherwise modify
926 * thread save state, i.e., what will be restored in pfm_load_regs()
928 mask = ctx->ctx_used_monitors[0] >> PMU_FIRST_COUNTER;
929 for(i= PMU_FIRST_COUNTER; mask; i++, mask>>=1) {
930 if ((mask & 0x1) == 0UL) continue;
931 ia64_set_pmc(i, th->pmcs[i] & ~0xfUL);
932 th->pmcs[i] &= ~0xfUL;
933 DPRINT_ovfl(("pmc[%d]=0x%lx\n", i, th->pmcs[i]));
936 * make all of this visible
942 * must always be done with task == current
944 * context must be in MASKED state when calling
947 pfm_restore_monitoring(struct task_struct *task)
949 pfm_context_t *ctx = PFM_GET_CTX(task);
950 struct thread_struct *th = &task->thread;
951 unsigned long mask, ovfl_mask;
952 unsigned long psr, val;
955 is_system = ctx->ctx_fl_system;
956 ovfl_mask = pmu_conf.ovfl_val;
958 if (task != current) {
959 printk(KERN_ERR "perfmon.%d: invalid task[%d] current[%d]\n", __LINE__, task->pid, current->pid);
962 if (ctx->ctx_state != PFM_CTX_MASKED) {
963 printk(KERN_ERR "perfmon.%d: task[%d] current[%d] invalid state=%d\n", __LINE__,
964 task->pid, current->pid, ctx->ctx_state);
969 * monitoring is masked via the PMC.
970 * As we restore their value, we do not want each counter to
971 * restart right away. We stop monitoring using the PSR,
972 * restore the PMC (and PMD) and then re-establish the psr
973 * as it was. Note that there can be no pending overflow at
974 * this point, because monitoring was MASKED.
976 * system-wide session are pinned and self-monitoring
978 if (is_system && (PFM_CPUINFO_GET() & PFM_CPUINFO_DCR_PP)) {
980 ia64_setreg(_IA64_REG_CR_DCR, ia64_getreg(_IA64_REG_CR_DCR) & ~IA64_DCR_PP);
986 * first, we restore the PMD
988 mask = ctx->ctx_used_pmds[0];
989 for (i = 0; mask; i++, mask>>=1) {
990 /* skip non used pmds */
991 if ((mask & 0x1) == 0) continue;
993 if (PMD_IS_COUNTING(i)) {
995 * we split the 64bit value according to
998 val = ctx->ctx_pmds[i].val & ovfl_mask;
999 ctx->ctx_pmds[i].val &= ~ovfl_mask;
1001 val = ctx->ctx_pmds[i].val;
1003 ia64_set_pmd(i, val);
1005 DPRINT(("pmd[%d]=0x%lx hw_pmd=0x%lx\n",
1007 ctx->ctx_pmds[i].val,
1013 mask = ctx->ctx_used_monitors[0] >> PMU_FIRST_COUNTER;
1014 for(i= PMU_FIRST_COUNTER; mask; i++, mask>>=1) {
1015 if ((mask & 0x1) == 0UL) continue;
1016 th->pmcs[i] = ctx->ctx_pmcs[i];
1017 ia64_set_pmc(i, th->pmcs[i]);
1018 DPRINT(("[%d] pmc[%d]=0x%lx\n", task->pid, i, th->pmcs[i]));
1023 * must restore DBR/IBR because could be modified while masked
1024 * XXX: need to optimize
1026 if (ctx->ctx_fl_using_dbreg) {
1027 pfm_restore_ibrs(ctx->ctx_ibrs, pmu_conf.num_ibrs);
1028 pfm_restore_dbrs(ctx->ctx_dbrs, pmu_conf.num_dbrs);
1034 if (is_system && (PFM_CPUINFO_GET() & PFM_CPUINFO_DCR_PP)) {
1036 ia64_setreg(_IA64_REG_CR_DCR, ia64_getreg(_IA64_REG_CR_DCR) | IA64_DCR_PP);
1043 pfm_save_pmds(unsigned long *pmds, unsigned long mask)
1049 for (i=0; mask; i++, mask>>=1) {
1050 if (mask & 0x1) pmds[i] = ia64_get_pmd(i);
1055 * reload from thread state (used for ctxw only)
1058 pfm_restore_pmds(unsigned long *pmds, unsigned long mask)
1061 unsigned long val, ovfl_val = pmu_conf.ovfl_val;
1063 for (i=0; mask; i++, mask>>=1) {
1064 if ((mask & 0x1) == 0) continue;
1065 val = PMD_IS_COUNTING(i) ? pmds[i] & ovfl_val : pmds[i];
1066 ia64_set_pmd(i, val);
1072 * propagate PMD from context to thread-state
1075 pfm_copy_pmds(struct task_struct *task, pfm_context_t *ctx)
1077 struct thread_struct *thread = &task->thread;
1078 unsigned long ovfl_val = pmu_conf.ovfl_val;
1079 unsigned long mask = ctx->ctx_all_pmds[0];
1083 DPRINT(("mask=0x%lx\n", mask));
1085 for (i=0; mask; i++, mask>>=1) {
1087 val = ctx->ctx_pmds[i].val;
1090 * We break up the 64 bit value into 2 pieces
1091 * the lower bits go to the machine state in the
1092 * thread (will be reloaded on ctxsw in).
1093 * The upper part stays in the soft-counter.
1095 if (PMD_IS_COUNTING(i)) {
1096 ctx->ctx_pmds[i].val = val & ~ovfl_val;
1099 thread->pmds[i] = val;
1101 DPRINT(("pmd[%d]=0x%lx soft_val=0x%lx\n",
1104 ctx->ctx_pmds[i].val));
1109 * propagate PMC from context to thread-state
1112 pfm_copy_pmcs(struct task_struct *task, pfm_context_t *ctx)
1114 struct thread_struct *thread = &task->thread;
1115 unsigned long mask = ctx->ctx_all_pmcs[0];
1118 DPRINT(("mask=0x%lx\n", mask));
1120 for (i=0; mask; i++, mask>>=1) {
1121 /* masking 0 with ovfl_val yields 0 */
1122 thread->pmcs[i] = ctx->ctx_pmcs[i];
1123 DPRINT(("pmc[%d]=0x%lx\n", i, thread->pmcs[i]));
1130 pfm_restore_pmcs(unsigned long *pmcs, unsigned long mask)
1134 for (i=0; mask; i++, mask>>=1) {
1135 if ((mask & 0x1) == 0) continue;
1136 ia64_set_pmc(i, pmcs[i]);
1142 pfm_uuid_cmp(pfm_uuid_t a, pfm_uuid_t b)
1144 return memcmp(a, b, sizeof(pfm_uuid_t));
1148 pfm_buf_fmt_exit(pfm_buffer_fmt_t *fmt, struct task_struct *task, void *buf, struct pt_regs *regs)
1151 if (fmt->fmt_exit) ret = (*fmt->fmt_exit)(task, buf, regs);
1156 pfm_buf_fmt_getsize(pfm_buffer_fmt_t *fmt, struct task_struct *task, unsigned int flags, int cpu, void *arg, unsigned long *size)
1159 if (fmt->fmt_getsize) ret = (*fmt->fmt_getsize)(task, flags, cpu, arg, size);
1165 pfm_buf_fmt_validate(pfm_buffer_fmt_t *fmt, struct task_struct *task, unsigned int flags,
1169 if (fmt->fmt_validate) ret = (*fmt->fmt_validate)(task, flags, cpu, arg);
1174 pfm_buf_fmt_init(pfm_buffer_fmt_t *fmt, struct task_struct *task, void *buf, unsigned int flags,
1178 if (fmt->fmt_init) ret = (*fmt->fmt_init)(task, buf, flags, cpu, arg);
1183 pfm_buf_fmt_restart(pfm_buffer_fmt_t *fmt, struct task_struct *task, pfm_ovfl_ctrl_t *ctrl, void *buf, struct pt_regs *regs)
1186 if (fmt->fmt_restart) ret = (*fmt->fmt_restart)(task, ctrl, buf, regs);
1191 pfm_buf_fmt_restart_active(pfm_buffer_fmt_t *fmt, struct task_struct *task, pfm_ovfl_ctrl_t *ctrl, void *buf, struct pt_regs *regs)
1194 if (fmt->fmt_restart_active) ret = (*fmt->fmt_restart_active)(task, ctrl, buf, regs);
1198 static pfm_buffer_fmt_t *
1199 __pfm_find_buffer_fmt(pfm_uuid_t uuid)
1201 struct list_head * pos;
1202 pfm_buffer_fmt_t * entry;
1204 list_for_each(pos, &pfm_buffer_fmt_list) {
1205 entry = list_entry(pos, pfm_buffer_fmt_t, fmt_list);
1206 if (pfm_uuid_cmp(uuid, entry->fmt_uuid) == 0)
1213 * find a buffer format based on its uuid
1215 static pfm_buffer_fmt_t *
1216 pfm_find_buffer_fmt(pfm_uuid_t uuid)
1218 pfm_buffer_fmt_t * fmt;
1219 spin_lock(&pfm_buffer_fmt_lock);
1220 fmt = __pfm_find_buffer_fmt(uuid);
1221 spin_unlock(&pfm_buffer_fmt_lock);
1226 pfm_register_buffer_fmt(pfm_buffer_fmt_t *fmt)
1230 /* some sanity checks */
1231 if (fmt == NULL || fmt->fmt_name == NULL) return -EINVAL;
1233 /* we need at least a handler */
1234 if (fmt->fmt_handler == NULL) return -EINVAL;
1237 * XXX: need check validity of fmt_arg_size
1240 spin_lock(&pfm_buffer_fmt_lock);
1242 if (__pfm_find_buffer_fmt(fmt->fmt_uuid)) {
1243 printk(KERN_ERR "perfmon: duplicate sampling format: %s\n", fmt->fmt_name);
1247 list_add(&fmt->fmt_list, &pfm_buffer_fmt_list);
1248 printk(KERN_INFO "perfmon: added sampling format %s\n", fmt->fmt_name);
1251 spin_unlock(&pfm_buffer_fmt_lock);
1254 EXPORT_SYMBOL(pfm_register_buffer_fmt);
1257 pfm_unregister_buffer_fmt(pfm_uuid_t uuid)
1259 pfm_buffer_fmt_t *fmt;
1262 spin_lock(&pfm_buffer_fmt_lock);
1264 fmt = __pfm_find_buffer_fmt(uuid);
1266 printk(KERN_ERR "perfmon: cannot unregister format, not found\n");
1270 list_del_init(&fmt->fmt_list);
1271 printk(KERN_INFO "perfmon: removed sampling format: %s\n", fmt->fmt_name);
1274 spin_unlock(&pfm_buffer_fmt_lock);
1278 EXPORT_SYMBOL(pfm_unregister_buffer_fmt);
1281 pfm_reserve_session(struct task_struct *task, int is_syswide, unsigned int cpu)
1283 unsigned long flags;
1285 * validy checks on cpu_mask have been done upstream
1289 DPRINT(("in sys_sessions=%u task_sessions=%u dbregs=%u syswide=%d cpu=%u\n",
1290 pfm_sessions.pfs_sys_sessions,
1291 pfm_sessions.pfs_task_sessions,
1292 pfm_sessions.pfs_sys_use_dbregs,
1298 * cannot mix system wide and per-task sessions
1300 if (pfm_sessions.pfs_task_sessions > 0UL) {
1301 DPRINT(("system wide not possible, %u conflicting task_sessions\n",
1302 pfm_sessions.pfs_task_sessions));
1306 if (pfm_sessions.pfs_sys_session[cpu]) goto error_conflict;
1308 DPRINT(("reserving system wide session on CPU%u currently on CPU%u\n", cpu, smp_processor_id()));
1310 pfm_sessions.pfs_sys_session[cpu] = task;
1312 pfm_sessions.pfs_sys_sessions++ ;
1315 if (pfm_sessions.pfs_sys_sessions) goto abort;
1316 pfm_sessions.pfs_task_sessions++;
1319 DPRINT(("out sys_sessions=%u task_sessions=%u dbregs=%u syswide=%d cpu=%u\n",
1320 pfm_sessions.pfs_sys_sessions,
1321 pfm_sessions.pfs_task_sessions,
1322 pfm_sessions.pfs_sys_use_dbregs,
1331 DPRINT(("system wide not possible, conflicting session [%d] on CPU%d\n",
1332 pfm_sessions.pfs_sys_session[cpu]->pid,
1333 smp_processor_id()));
1342 pfm_unreserve_session(pfm_context_t *ctx, int is_syswide, unsigned int cpu)
1344 unsigned long flags;
1346 * validy checks on cpu_mask have been done upstream
1350 DPRINT(("in sys_sessions=%u task_sessions=%u dbregs=%u syswide=%d cpu=%u\n",
1351 pfm_sessions.pfs_sys_sessions,
1352 pfm_sessions.pfs_task_sessions,
1353 pfm_sessions.pfs_sys_use_dbregs,
1359 pfm_sessions.pfs_sys_session[cpu] = NULL;
1361 * would not work with perfmon+more than one bit in cpu_mask
1363 if (ctx && ctx->ctx_fl_using_dbreg) {
1364 if (pfm_sessions.pfs_sys_use_dbregs == 0) {
1365 printk(KERN_ERR "perfmon: invalid release for ctx %p sys_use_dbregs=0\n", ctx);
1367 pfm_sessions.pfs_sys_use_dbregs--;
1370 pfm_sessions.pfs_sys_sessions--;
1372 pfm_sessions.pfs_task_sessions--;
1374 DPRINT(("out sys_sessions=%u task_sessions=%u dbregs=%u syswide=%d cpu=%u\n",
1375 pfm_sessions.pfs_sys_sessions,
1376 pfm_sessions.pfs_task_sessions,
1377 pfm_sessions.pfs_sys_use_dbregs,
1387 * removes virtual mapping of the sampling buffer.
1388 * IMPORTANT: cannot be called with interrupts disable, e.g. inside
1389 * a PROTECT_CTX() section.
1392 pfm_remove_smpl_mapping(struct task_struct *task, void *vaddr, unsigned long size)
1397 if (task->mm == NULL || size == 0UL || vaddr == NULL) {
1398 printk(KERN_ERR "perfmon: pfm_remove_smpl_mapping [%d] invalid context mm=%p\n", task->pid, task->mm);
1402 DPRINT(("smpl_vaddr=%p size=%lu\n", vaddr, size));
1405 * does the actual unmapping
1407 down_write(&task->mm->mmap_sem);
1409 DPRINT(("down_write done smpl_vaddr=%p size=%lu\n", vaddr, size));
1411 r = pfm_do_munmap(task->mm, (unsigned long)vaddr, size, 0);
1413 up_write(&task->mm->mmap_sem);
1415 printk(KERN_ERR "perfmon: [%d] unable to unmap sampling buffer @%p size=%lu\n", task->pid, vaddr, size);
1418 DPRINT(("do_unmap(%p, %lu)=%d\n", vaddr, size, r));
1424 * free actual physical storage used by sampling buffer
1428 pfm_free_smpl_buffer(pfm_context_t *ctx)
1430 pfm_buffer_fmt_t *fmt;
1432 if (ctx->ctx_smpl_hdr == NULL) goto invalid_free;
1435 * we won't use the buffer format anymore
1437 fmt = ctx->ctx_buf_fmt;
1439 DPRINT(("sampling buffer @%p size %lu vaddr=%p\n",
1442 ctx->ctx_smpl_vaddr));
1444 pfm_buf_fmt_exit(fmt, current, NULL, NULL);
1449 pfm_rvfree(ctx->ctx_smpl_hdr, ctx->ctx_smpl_size);
1451 ctx->ctx_smpl_hdr = NULL;
1452 ctx->ctx_smpl_size = 0UL;
1457 printk(KERN_ERR "perfmon: pfm_free_smpl_buffer [%d] no buffer\n", current->pid);
1463 pfm_exit_smpl_buffer(pfm_buffer_fmt_t *fmt)
1465 if (fmt == NULL) return;
1467 pfm_buf_fmt_exit(fmt, current, NULL, NULL);
1472 * pfmfs should _never_ be mounted by userland - too much of security hassle,
1473 * no real gain from having the whole whorehouse mounted. So we don't need
1474 * any operations on the root directory. However, we need a non-trivial
1475 * d_name - pfm: will go nicely and kill the special-casing in procfs.
1477 static struct vfsmount *pfmfs_mnt;
1482 int err = register_filesystem(&pfm_fs_type);
1484 pfmfs_mnt = kern_mount(&pfm_fs_type);
1485 err = PTR_ERR(pfmfs_mnt);
1486 if (IS_ERR(pfmfs_mnt))
1487 unregister_filesystem(&pfm_fs_type);
1497 unregister_filesystem(&pfm_fs_type);
1502 pfm_lseek(struct file *file, loff_t offset, int whence)
1504 DPRINT(("pfm_lseek called\n"));
1509 pfm_read(struct file *filp, char *buf, size_t size, loff_t *ppos)
1514 unsigned long flags;
1515 DECLARE_WAITQUEUE(wait, current);
1516 if (PFM_IS_FILE(filp) == 0) {
1517 printk(KERN_ERR "perfmon: pfm_poll: bad magic [%d]\n", current->pid);
1521 ctx = (pfm_context_t *)filp->private_data;
1523 printk(KERN_ERR "perfmon: pfm_read: NULL ctx [%d]\n", current->pid);
1528 * check even when there is no message
1530 if (size < sizeof(pfm_msg_t)) {
1531 DPRINT(("message is too small ctx=%p (>=%ld)\n", ctx, sizeof(pfm_msg_t)));
1535 * seeks are not allowed on message queues
1537 if (ppos != &filp->f_pos) return -ESPIPE;
1539 PROTECT_CTX(ctx, flags);
1542 * put ourselves on the wait queue
1544 add_wait_queue(&ctx->ctx_msgq_wait, &wait);
1552 set_current_state(TASK_INTERRUPTIBLE);
1554 DPRINT(("head=%d tail=%d\n", ctx->ctx_msgq_head, ctx->ctx_msgq_tail));
1557 if(PFM_CTXQ_EMPTY(ctx) == 0) break;
1559 UNPROTECT_CTX(ctx, flags);
1562 * check non-blocking read
1565 if(filp->f_flags & O_NONBLOCK) break;
1568 * check pending signals
1570 if(signal_pending(current)) {
1575 * no message, so wait
1579 PROTECT_CTX(ctx, flags);
1581 DPRINT(("[%d] back to running ret=%ld\n", current->pid, ret));
1582 set_current_state(TASK_RUNNING);
1583 remove_wait_queue(&ctx->ctx_msgq_wait, &wait);
1585 if (ret < 0) goto abort;
1588 msg = pfm_get_next_msg(ctx);
1590 printk(KERN_ERR "perfmon: pfm_read no msg for ctx=%p [%d]\n", ctx, current->pid);
1594 DPRINT(("[%d] fd=%d type=%d\n", current->pid, msg->pfm_gen_msg.msg_ctx_fd, msg->pfm_gen_msg.msg_type));
1597 if(copy_to_user(buf, msg, sizeof(pfm_msg_t)) == 0) ret = sizeof(pfm_msg_t);
1600 UNPROTECT_CTX(ctx, flags);
1606 pfm_write(struct file *file, const char *ubuf,
1607 size_t size, loff_t *ppos)
1609 DPRINT(("pfm_write called\n"));
1614 pfm_poll(struct file *filp, poll_table * wait)
1617 unsigned long flags;
1618 unsigned int mask = 0;
1620 if (PFM_IS_FILE(filp) == 0) {
1621 printk(KERN_ERR "perfmon: pfm_poll: bad magic [%d]\n", current->pid);
1625 ctx = (pfm_context_t *)filp->private_data;
1627 printk(KERN_ERR "perfmon: pfm_poll: NULL ctx [%d]\n", current->pid);
1632 DPRINT(("pfm_poll ctx_fd=%d before poll_wait\n", ctx->ctx_fd));
1634 poll_wait(filp, &ctx->ctx_msgq_wait, wait);
1636 PROTECT_CTX(ctx, flags);
1638 if (PFM_CTXQ_EMPTY(ctx) == 0)
1639 mask = POLLIN | POLLRDNORM;
1641 UNPROTECT_CTX(ctx, flags);
1643 DPRINT(("pfm_poll ctx_fd=%d mask=0x%x\n", ctx->ctx_fd, mask));
1649 pfm_ioctl(struct inode *inode, struct file *file, unsigned int cmd, unsigned long arg)
1651 DPRINT(("pfm_ioctl called\n"));
1656 * context is locked when coming here and interrupts are disabled
1659 pfm_do_fasync(int fd, struct file *filp, pfm_context_t *ctx, int on)
1663 ret = fasync_helper (fd, filp, on, &ctx->ctx_async_queue);
1665 DPRINT(("pfm_fasync called by [%d] on ctx_fd=%d on=%d async_queue=%p ret=%d\n",
1669 ctx->ctx_async_queue, ret));
1675 pfm_fasync(int fd, struct file *filp, int on)
1678 unsigned long flags;
1681 if (PFM_IS_FILE(filp) == 0) {
1682 printk(KERN_ERR "perfmon: pfm_fasync bad magic [%d]\n", current->pid);
1686 ctx = (pfm_context_t *)filp->private_data;
1688 printk(KERN_ERR "perfmon: pfm_fasync NULL ctx [%d]\n", current->pid);
1693 PROTECT_CTX(ctx, flags);
1695 ret = pfm_do_fasync(fd, filp, ctx, on);
1697 DPRINT(("pfm_fasync called on ctx_fd=%d on=%d async_queue=%p ret=%d\n",
1700 ctx->ctx_async_queue, ret));
1702 UNPROTECT_CTX(ctx, flags);
1709 * this function is exclusively called from pfm_close().
1710 * The context is not protected at that time, nor are interrupts
1711 * on the remote CPU. That's necessary to avoid deadlocks.
1714 pfm_syswide_force_stop(void *info)
1716 pfm_context_t *ctx = (pfm_context_t *)info;
1717 struct pt_regs *regs = ia64_task_regs(current);
1718 struct task_struct *owner;
1719 unsigned long flags;
1722 if (ctx->ctx_cpu != smp_processor_id()) {
1723 printk(KERN_ERR "perfmon: pfm_syswide_force_stop for CPU%d but on CPU%d\n",
1725 smp_processor_id());
1728 owner = GET_PMU_OWNER();
1729 if (owner != ctx->ctx_task) {
1730 printk(KERN_ERR "perfmon: pfm_syswide_force_stop CPU%d unexpected owner [%d] instead of [%d]\n",
1732 owner->pid, ctx->ctx_task->pid);
1735 if (GET_PMU_CTX() != ctx) {
1736 printk(KERN_ERR "perfmon: pfm_syswide_force_stop CPU%d unexpected ctx %p instead of %p\n",
1738 GET_PMU_CTX(), ctx);
1742 DPRINT(("on CPU%d forcing system wide stop for [%d]\n", smp_processor_id(), ctx->ctx_task->pid));
1744 * the context is already protected in pfm_close(), we simply
1745 * need to mask interrupts to avoid a PMU interrupt race on
1748 local_irq_save(flags);
1750 ret = pfm_context_unload(ctx, NULL, 0, regs);
1752 DPRINT(("context_unload returned %d\n", ret));
1756 * unmask interrupts, PMU interrupts are now spurious here
1758 local_irq_restore(flags);
1762 pfm_syswide_cleanup_other_cpu(pfm_context_t *ctx)
1766 DPRINT(("calling CPU%d for cleanup\n", ctx->ctx_cpu));
1767 ret = smp_call_function_single(ctx->ctx_cpu, pfm_syswide_force_stop, ctx, 0, 1);
1768 DPRINT(("called CPU%d for cleanup ret=%d\n", ctx->ctx_cpu, ret));
1770 #endif /* CONFIG_SMP */
1773 * called for each close(). Partially free resources.
1774 * When caller is self-monitoring, the context is unloaded.
1777 pfm_flush(struct file *filp)
1780 struct task_struct *task;
1781 struct pt_regs *regs;
1782 unsigned long flags;
1783 unsigned long smpl_buf_size = 0UL;
1784 void *smpl_buf_vaddr = NULL;
1785 int state, is_system;
1787 if (PFM_IS_FILE(filp) == 0) {
1788 DPRINT(("bad magic for\n"));
1792 ctx = (pfm_context_t *)filp->private_data;
1794 printk(KERN_ERR "perfmon: pfm_flush: NULL ctx [%d]\n", current->pid);
1799 * remove our file from the async queue, if we use this mode.
1800 * This can be done without the context being protected. We come
1801 * here when the context has become unreacheable by other tasks.
1803 * We may still have active monitoring at this point and we may
1804 * end up in pfm_overflow_handler(). However, fasync_helper()
1805 * operates with interrupts disabled and it cleans up the
1806 * queue. If the PMU handler is called prior to entering
1807 * fasync_helper() then it will send a signal. If it is
1808 * invoked after, it will find an empty queue and no
1809 * signal will be sent. In both case, we are safe
1811 if (filp->f_flags & FASYNC) {
1812 DPRINT(("cleaning up async_queue=%p\n", ctx->ctx_async_queue));
1813 pfm_do_fasync (-1, filp, ctx, 0);
1816 PROTECT_CTX(ctx, flags);
1818 state = ctx->ctx_state;
1819 is_system = ctx->ctx_fl_system;
1821 task = PFM_CTX_TASK(ctx);
1822 regs = ia64_task_regs(task);
1824 DPRINT(("ctx_state=%d is_current=%d\n",
1826 task == current ? 1 : 0));
1829 * if state == UNLOADED, then task is NULL
1833 * we must stop and unload because we are losing access to the context.
1835 if (task == current) {
1838 * the task IS the owner but it migrated to another CPU: that's bad
1839 * but we must handle this cleanly. Unfortunately, the kernel does
1840 * not provide a mechanism to block migration (while the context is loaded).
1842 * We need to release the resource on the ORIGINAL cpu.
1844 if (is_system && ctx->ctx_cpu != smp_processor_id()) {
1846 DPRINT(("should be running on CPU%d\n", ctx->ctx_cpu));
1848 * keep context protected but unmask interrupt for IPI
1850 local_irq_restore(flags);
1852 pfm_syswide_cleanup_other_cpu(ctx);
1855 * restore interrupt masking
1857 local_irq_save(flags);
1860 * context is unloaded at this point
1863 #endif /* CONFIG_SMP */
1866 DPRINT(("forcing unload\n"));
1868 * stop and unload, returning with state UNLOADED
1869 * and session unreserved.
1871 pfm_context_unload(ctx, NULL, 0, regs);
1873 DPRINT(("ctx_state=%d\n", ctx->ctx_state));
1878 * remove virtual mapping, if any, for the calling task.
1879 * cannot reset ctx field until last user is calling close().
1881 * ctx_smpl_vaddr must never be cleared because it is needed
1882 * by every task with access to the context
1884 * When called from do_exit(), the mm context is gone already, therefore
1885 * mm is NULL, i.e., the VMA is already gone and we do not have to
1888 if (ctx->ctx_smpl_vaddr && current->mm) {
1889 smpl_buf_vaddr = ctx->ctx_smpl_vaddr;
1890 smpl_buf_size = ctx->ctx_smpl_size;
1893 UNPROTECT_CTX(ctx, flags);
1896 * if there was a mapping, then we systematically remove it
1897 * at this point. Cannot be done inside critical section
1898 * because some VM function reenables interrupts.
1901 if (smpl_buf_vaddr) pfm_remove_smpl_mapping(current, smpl_buf_vaddr, smpl_buf_size);
1906 * called either on explicit close() or from exit_files().
1907 * Only the LAST user of the file gets to this point, i.e., it is
1910 * IMPORTANT: we get called ONLY when the refcnt on the file gets to zero
1911 * (fput()),i.e, last task to access the file. Nobody else can access the
1912 * file at this point.
1914 * When called from exit_files(), the VMA has been freed because exit_mm()
1915 * is executed before exit_files().
1917 * When called from exit_files(), the current task is not yet ZOMBIE but we
1918 * flush the PMU state to the context.
1921 pfm_close(struct inode *inode, struct file *filp)
1924 struct task_struct *task;
1925 struct pt_regs *regs;
1926 DECLARE_WAITQUEUE(wait, current);
1927 unsigned long flags;
1928 unsigned long smpl_buf_size = 0UL;
1929 void *smpl_buf_addr = NULL;
1930 int free_possible = 1;
1931 int state, is_system;
1933 DPRINT(("pfm_close called private=%p\n", filp->private_data));
1935 if (PFM_IS_FILE(filp) == 0) {
1936 DPRINT(("bad magic\n"));
1940 ctx = (pfm_context_t *)filp->private_data;
1942 printk(KERN_ERR "perfmon: pfm_close: NULL ctx [%d]\n", current->pid);
1946 PROTECT_CTX(ctx, flags);
1948 state = ctx->ctx_state;
1949 is_system = ctx->ctx_fl_system;
1951 task = PFM_CTX_TASK(ctx);
1952 regs = ia64_task_regs(task);
1954 DPRINT(("ctx_state=%d is_current=%d\n",
1956 task == current ? 1 : 0));
1959 * if task == current, then pfm_flush() unloaded the context
1961 if (state == PFM_CTX_UNLOADED) goto doit;
1964 * context is loaded/masked and task != current, we need to
1965 * either force an unload or go zombie
1969 * The task is currently blocked or will block after an overflow.
1970 * we must force it to wakeup to get out of the
1971 * MASKED state and transition to the unloaded state by itself.
1973 * This situation is only possible for per-task mode
1975 if (state == PFM_CTX_MASKED && CTX_OVFL_NOBLOCK(ctx) == 0) {
1978 * set a "partial" zombie state to be checked
1979 * upon return from down() in pfm_handle_work().
1981 * We cannot use the ZOMBIE state, because it is checked
1982 * by pfm_load_regs() which is called upon wakeup from down().
1983 * In such case, it would free the context and then we would
1984 * return to pfm_handle_work() which would access the
1985 * stale context. Instead, we set a flag invisible to pfm_load_regs()
1986 * but visible to pfm_handle_work().
1988 * For some window of time, we have a zombie context with
1989 * ctx_state = MASKED and not ZOMBIE
1991 ctx->ctx_fl_going_zombie = 1;
1994 * force task to wake up from MASKED state
1996 up(&ctx->ctx_restart_sem);
1998 DPRINT(("waking up ctx_state=%d\n", state));
2001 * put ourself to sleep waiting for the other
2002 * task to report completion
2004 * the context is protected by mutex, therefore there
2005 * is no risk of being notified of completion before
2006 * begin actually on the waitq.
2008 set_current_state(TASK_INTERRUPTIBLE);
2009 add_wait_queue(&ctx->ctx_zombieq, &wait);
2011 UNPROTECT_CTX(ctx, flags);
2014 * XXX: check for signals :
2015 * - ok of explicit close
2016 * - not ok when coming from exit_files()
2021 PROTECT_CTX(ctx, flags);
2024 remove_wait_queue(&ctx->ctx_zombieq, &wait);
2025 set_current_state(TASK_RUNNING);
2028 * context is unloaded at this point
2030 DPRINT(("after zombie wakeup ctx_state=%d for\n", state));
2032 else if (task != current) {
2035 * switch context to zombie state
2037 ctx->ctx_state = PFM_CTX_ZOMBIE;
2039 DPRINT(("zombie ctx for [%d]\n", task->pid));
2041 * cannot free the context on the spot. deferred until
2042 * the task notices the ZOMBIE state
2046 pfm_context_unload(ctx, NULL, 0, regs);
2051 /* reload state, may have changed during opening of critical section */
2052 state = ctx->ctx_state;
2055 * the context is still attached to a task (possibly current)
2056 * we cannot destroy it right now
2060 * we must free the sampling buffer right here because
2061 * we cannot rely on it being cleaned up later by the
2062 * monitored task. It is not possible to free vmalloc'ed
2063 * memory in pfm_load_regs(). Instead, we remove the buffer
2064 * now. should there be subsequent PMU overflow originally
2065 * meant for sampling, the will be converted to spurious
2066 * and that's fine because the monitoring tools is gone anyway.
2068 if (ctx->ctx_smpl_hdr) {
2069 smpl_buf_addr = ctx->ctx_smpl_hdr;
2070 smpl_buf_size = ctx->ctx_smpl_size;
2071 /* no more sampling */
2072 ctx->ctx_smpl_hdr = NULL;
2073 ctx->ctx_fl_is_sampling = 0;
2076 DPRINT(("ctx_state=%d free_possible=%d addr=%p size=%lu\n",
2082 if (smpl_buf_addr) pfm_exit_smpl_buffer(ctx->ctx_buf_fmt);
2085 * UNLOADED that the session has already been unreserved.
2087 if (state == PFM_CTX_ZOMBIE) {
2088 pfm_unreserve_session(ctx, ctx->ctx_fl_system , ctx->ctx_cpu);
2092 * disconnect file descriptor from context must be done
2095 filp->private_data = NULL;
2098 * if we free on the spot, the context is now completely unreacheable
2099 * from the callers side. The monitored task side is also cut, so we
2102 * If we have a deferred free, only the caller side is disconnected.
2104 UNPROTECT_CTX(ctx, flags);
2107 * All memory free operations (especially for vmalloc'ed memory)
2108 * MUST be done with interrupts ENABLED.
2110 if (smpl_buf_addr) pfm_rvfree(smpl_buf_addr, smpl_buf_size);
2113 * return the memory used by the context
2115 if (free_possible) pfm_context_free(ctx);
2121 pfm_no_open(struct inode *irrelevant, struct file *dontcare)
2123 DPRINT(("pfm_no_open called\n"));
2129 static struct file_operations pfm_file_ops = {
2130 .llseek = pfm_lseek,
2135 .open = pfm_no_open, /* special open code to disallow open via /proc */
2136 .fasync = pfm_fasync,
2137 .release = pfm_close,
2142 pfmfs_delete_dentry(struct dentry *dentry)
2147 static struct dentry_operations pfmfs_dentry_operations = {
2148 .d_delete = pfmfs_delete_dentry,
2153 pfm_alloc_fd(struct file **cfile)
2156 struct file *file = NULL;
2157 struct inode * inode;
2161 fd = get_unused_fd();
2162 if (fd < 0) return -ENFILE;
2166 file = get_empty_filp();
2167 if (!file) goto out;
2170 * allocate a new inode
2172 inode = new_inode(pfmfs_mnt->mnt_sb);
2173 if (!inode) goto out;
2175 DPRINT(("new inode ino=%ld @%p\n", inode->i_ino, inode));
2177 inode->i_sb = pfmfs_mnt->mnt_sb;
2178 inode->i_mode = S_IFCHR|S_IRUGO;
2180 inode->i_uid = current->fsuid;
2181 inode->i_gid = current->fsgid;
2183 sprintf(name, "[%lu]", inode->i_ino);
2185 this.len = strlen(name);
2186 this.hash = inode->i_ino;
2191 * allocate a new dcache entry
2193 file->f_dentry = d_alloc(pfmfs_mnt->mnt_sb->s_root, &this);
2194 if (!file->f_dentry) goto out;
2196 file->f_dentry->d_op = &pfmfs_dentry_operations;
2198 d_add(file->f_dentry, inode);
2199 file->f_vfsmnt = mntget(pfmfs_mnt);
2200 file->f_mapping = inode->i_mapping;
2202 file->f_op = &pfm_file_ops;
2203 file->f_mode = FMODE_READ;
2204 file->f_flags = O_RDONLY;
2208 * may have to delay until context is attached?
2210 fd_install(fd, file);
2213 * the file structure we will use
2219 if (file) put_filp(file);
2225 pfm_free_fd(int fd, struct file *file)
2227 if (file) put_filp(file);
2232 pfm_remap_buffer(struct vm_area_struct *vma, unsigned long buf, unsigned long addr, unsigned long size)
2236 DPRINT(("CPU%d buf=0x%lx addr=0x%lx size=%ld\n", smp_processor_id(), buf, addr, size));
2239 page = pfm_kvirt_to_pa(buf);
2241 if (pfm_remap_page_range(vma, addr, page, PAGE_SIZE, PAGE_READONLY)) return -ENOMEM;
2251 * allocate a sampling buffer and remaps it into the user address space of the task
2254 pfm_smpl_buffer_alloc(struct task_struct *task, pfm_context_t *ctx, unsigned long rsize, void **user_vaddr)
2256 struct mm_struct *mm = task->mm;
2257 struct vm_area_struct *vma = NULL;
2263 * the fixed header + requested size and align to page boundary
2265 size = PAGE_ALIGN(rsize);
2267 DPRINT(("sampling buffer rsize=%lu size=%lu bytes\n", rsize, size));
2270 * check requested size to avoid Denial-of-service attacks
2271 * XXX: may have to refine this test
2272 * Check against address space limit.
2274 * if ((mm->total_vm << PAGE_SHIFT) + len> task->rlim[RLIMIT_AS].rlim_cur)
2277 if (size > task->rlim[RLIMIT_MEMLOCK].rlim_cur) return -EAGAIN;
2280 * We do the easy to undo allocations first.
2282 * pfm_rvmalloc(), clears the buffer, so there is no leak
2284 smpl_buf = pfm_rvmalloc(size);
2285 if (smpl_buf == NULL) {
2286 DPRINT(("Can't allocate sampling buffer\n"));
2290 DPRINT(("smpl_buf @%p\n", smpl_buf));
2293 vma = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
2295 DPRINT(("Cannot allocate vma\n"));
2299 * partially initialize the vma for the sampling buffer
2301 * The VM_DONTCOPY flag is very important as it ensures that the mapping
2302 * will never be inherited for any child process (via fork()) which is always
2306 vma->vm_flags = VM_READ| VM_MAYREAD |VM_RESERVED;
2307 vma->vm_page_prot = PAGE_READONLY; /* XXX may need to change */
2310 vma->vm_file = NULL;
2311 vma->vm_private_data = NULL;
2314 * Now we have everything we need and we can initialize
2315 * and connect all the data structures
2318 ctx->ctx_smpl_hdr = smpl_buf;
2319 ctx->ctx_smpl_size = size; /* aligned size */
2322 * Let's do the difficult operations next.
2324 * now we atomically find some area in the address space and
2325 * remap the buffer in it.
2327 down_write(&task->mm->mmap_sem);
2329 /* find some free area in address space, must have mmap sem held */
2330 vma->vm_start = pfm_get_unmapped_area(NULL, 0, size, 0, MAP_PRIVATE|MAP_ANONYMOUS, 0);
2331 if (vma->vm_start == 0UL) {
2332 DPRINT(("Cannot find unmapped area for size %ld\n", size));
2333 up_write(&task->mm->mmap_sem);
2336 vma->vm_end = vma->vm_start + size;
2338 DPRINT(("aligned size=%ld, hdr=%p mapped @0x%lx\n", size, ctx->ctx_smpl_hdr, vma->vm_start));
2340 /* can only be applied to current task, need to have the mm semaphore held when called */
2341 if (pfm_remap_buffer(vma, (unsigned long)smpl_buf, vma->vm_start, size)) {
2342 DPRINT(("Can't remap buffer\n"));
2343 up_write(&task->mm->mmap_sem);
2348 * now insert the vma in the vm list for the process, must be
2349 * done with mmap lock held
2351 insert_vm_struct(mm, vma);
2353 // mm->total_vm += size >> PAGE_SHIFT;
2354 vx_vmpages_add(mm, size >> PAGE_SHIFT);
2356 up_write(&task->mm->mmap_sem);
2359 * keep track of user level virtual address
2361 ctx->ctx_smpl_vaddr = (void *)vma->vm_start;
2362 *(unsigned long *)user_vaddr = vma->vm_start;
2367 kmem_cache_free(vm_area_cachep, vma);
2369 pfm_rvfree(smpl_buf, size);
2375 * XXX: do something better here
2378 pfm_bad_permissions(struct task_struct *task)
2380 /* inspired by ptrace_attach() */
2381 DPRINT(("cur: uid=%d gid=%d task: euid=%d suid=%d uid=%d egid=%d sgid=%d\n",
2390 return ((current->uid != task->euid)
2391 || (current->uid != task->suid)
2392 || (current->uid != task->uid)
2393 || (current->gid != task->egid)
2394 || (current->gid != task->sgid)
2395 || (current->gid != task->gid)) && !capable(CAP_SYS_PTRACE);
2399 pfarg_is_sane(struct task_struct *task, pfarg_context_t *pfx)
2405 ctx_flags = pfx->ctx_flags;
2407 if (ctx_flags & PFM_FL_SYSTEM_WIDE) {
2410 * cannot block in this mode
2412 if (ctx_flags & PFM_FL_NOTIFY_BLOCK) {
2413 DPRINT(("cannot use blocking mode when in system wide monitoring\n"));
2418 /* probably more to add here */
2424 pfm_setup_buffer_fmt(struct task_struct *task, pfm_context_t *ctx, unsigned int ctx_flags,
2425 unsigned int cpu, pfarg_context_t *arg)
2427 pfm_buffer_fmt_t *fmt = NULL;
2428 unsigned long size = 0UL;
2430 void *fmt_arg = NULL;
2432 #define PFM_CTXARG_BUF_ARG(a) (pfm_buffer_fmt_t *)(a+1)
2434 /* invoke and lock buffer format, if found */
2435 fmt = pfm_find_buffer_fmt(arg->ctx_smpl_buf_id);
2437 DPRINT(("[%d] cannot find buffer format\n", task->pid));
2442 * buffer argument MUST be contiguous to pfarg_context_t
2444 if (fmt->fmt_arg_size) fmt_arg = PFM_CTXARG_BUF_ARG(arg);
2446 ret = pfm_buf_fmt_validate(fmt, task, ctx_flags, cpu, fmt_arg);
2448 DPRINT(("[%d] after validate(0x%x,%d,%p)=%d\n", task->pid, ctx_flags, cpu, fmt_arg, ret));
2450 if (ret) goto error;
2452 /* link buffer format and context */
2453 ctx->ctx_buf_fmt = fmt;
2456 * check if buffer format wants to use perfmon buffer allocation/mapping service
2458 ret = pfm_buf_fmt_getsize(fmt, task, ctx_flags, cpu, fmt_arg, &size);
2459 if (ret) goto error;
2463 * buffer is always remapped into the caller's address space
2465 ret = pfm_smpl_buffer_alloc(current, ctx, size, &uaddr);
2466 if (ret) goto error;
2468 /* keep track of user address of buffer */
2469 arg->ctx_smpl_vaddr = uaddr;
2471 ret = pfm_buf_fmt_init(fmt, task, ctx->ctx_smpl_hdr, ctx_flags, cpu, fmt_arg);
2478 pfm_reset_pmu_state(pfm_context_t *ctx)
2483 * install reset values for PMC.
2485 for (i=1; PMC_IS_LAST(i) == 0; i++) {
2486 if (PMC_IS_IMPL(i) == 0) continue;
2487 ctx->ctx_pmcs[i] = PMC_DFL_VAL(i);
2488 DPRINT(("pmc[%d]=0x%lx\n", i, ctx->ctx_pmcs[i]));
2491 * PMD registers are set to 0UL when the context in memset()
2495 * On context switched restore, we must restore ALL pmc and ALL pmd even
2496 * when they are not actively used by the task. In UP, the incoming process
2497 * may otherwise pick up left over PMC, PMD state from the previous process.
2498 * As opposed to PMD, stale PMC can cause harm to the incoming
2499 * process because they may change what is being measured.
2500 * Therefore, we must systematically reinstall the entire
2501 * PMC state. In SMP, the same thing is possible on the
2502 * same CPU but also on between 2 CPUs.
2504 * The problem with PMD is information leaking especially
2505 * to user level when psr.sp=0
2507 * There is unfortunately no easy way to avoid this problem
2508 * on either UP or SMP. This definitively slows down the
2509 * pfm_load_regs() function.
2513 * bitmask of all PMCs accessible to this context
2515 * PMC0 is treated differently.
2517 ctx->ctx_all_pmcs[0] = pmu_conf.impl_pmcs[0] & ~0x1;
2520 * bitmask of all PMDs that are accesible to this context
2522 ctx->ctx_all_pmds[0] = pmu_conf.impl_pmds[0];
2524 DPRINT(("<%d> all_pmcs=0x%lx all_pmds=0x%lx\n", ctx->ctx_fd, ctx->ctx_all_pmcs[0],ctx->ctx_all_pmds[0]));
2527 * useful in case of re-enable after disable
2529 ctx->ctx_used_ibrs[0] = 0UL;
2530 ctx->ctx_used_dbrs[0] = 0UL;
2534 pfm_ctx_getsize(void *arg, size_t *sz)
2536 pfarg_context_t *req = (pfarg_context_t *)arg;
2537 pfm_buffer_fmt_t *fmt;
2541 if (!pfm_uuid_cmp(req->ctx_smpl_buf_id, pfm_null_uuid)) return 0;
2543 fmt = pfm_find_buffer_fmt(req->ctx_smpl_buf_id);
2545 DPRINT(("cannot find buffer format\n"));
2548 /* get just enough to copy in user parameters */
2549 *sz = fmt->fmt_arg_size;
2550 DPRINT(("arg_size=%lu\n", *sz));
2558 * cannot attach if :
2560 * - task not owned by caller
2561 * - task incompatible with context mode
2564 pfm_task_incompatible(pfm_context_t *ctx, struct task_struct *task)
2567 * no kernel task or task not owner by caller
2569 if (task->mm == NULL) {
2570 DPRINT(("task [%d] has not memory context (kernel thread)\n", task->pid));
2573 if (pfm_bad_permissions(task)) {
2574 DPRINT(("no permission to attach to [%d]\n", task->pid));
2578 * cannot block in self-monitoring mode
2580 if (CTX_OVFL_NOBLOCK(ctx) == 0 && task == current) {
2581 DPRINT(("cannot load a blocking context on self for [%d]\n", task->pid));
2585 if (task->state == TASK_ZOMBIE) {
2586 DPRINT(("cannot attach to zombie task [%d]\n", task->pid));
2591 * always ok for self
2593 if (task == current) return 0;
2595 if (task->state != TASK_STOPPED) {
2596 DPRINT(("cannot attach to non-stopped task [%d] state=%ld\n", task->pid, task->state));
2600 * make sure the task is off any CPU
2602 wait_task_inactive(task);
2604 /* more to come... */
2610 pfm_get_task(pfm_context_t *ctx, pid_t pid, struct task_struct **task)
2612 struct task_struct *p = current;
2615 /* XXX: need to add more checks here */
2616 if (pid < 2) return -EPERM;
2618 if (pid != current->pid) {
2620 read_lock(&tasklist_lock);
2622 p = find_task_by_pid(pid);
2624 /* make sure task cannot go away while we operate on it */
2625 if (p) get_task_struct(p);
2627 read_unlock(&tasklist_lock);
2629 if (p == NULL) return -ESRCH;
2632 ret = pfm_task_incompatible(ctx, p);
2635 } else if (p != current) {
2644 pfm_context_create(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
2646 pfarg_context_t *req = (pfarg_context_t *)arg;
2651 /* let's check the arguments first */
2652 ret = pfarg_is_sane(current, req);
2653 if (ret < 0) return ret;
2655 ctx_flags = req->ctx_flags;
2659 ctx = pfm_context_alloc();
2660 if (!ctx) goto error;
2662 req->ctx_fd = ctx->ctx_fd = pfm_alloc_fd(&filp);
2663 if (req->ctx_fd < 0) goto error_file;
2666 * attach context to file
2668 filp->private_data = ctx;
2671 * does the user want to sample?
2673 if (pfm_uuid_cmp(req->ctx_smpl_buf_id, pfm_null_uuid)) {
2674 ret = pfm_setup_buffer_fmt(current, ctx, ctx_flags, 0, req);
2675 if (ret) goto buffer_error;
2679 * init context protection lock
2681 spin_lock_init(&ctx->ctx_lock);
2684 * context is unloaded
2686 ctx->ctx_state = PFM_CTX_UNLOADED;
2689 * initialization of context's flags
2691 ctx->ctx_fl_block = (ctx_flags & PFM_FL_NOTIFY_BLOCK) ? 1 : 0;
2692 ctx->ctx_fl_system = (ctx_flags & PFM_FL_SYSTEM_WIDE) ? 1: 0;
2693 ctx->ctx_fl_is_sampling = ctx->ctx_buf_fmt ? 1 : 0; /* assume record() is defined */
2694 ctx->ctx_fl_no_msg = (ctx_flags & PFM_FL_OVFL_NO_MSG) ? 1: 0;
2696 * will move to set properties
2697 * ctx->ctx_fl_excl_idle = (ctx_flags & PFM_FL_EXCL_IDLE) ? 1: 0;
2701 * init restart semaphore to locked
2703 sema_init(&ctx->ctx_restart_sem, 0);
2706 * activation is used in SMP only
2708 ctx->ctx_last_activation = PFM_INVALID_ACTIVATION;
2709 SET_LAST_CPU(ctx, -1);
2712 * initialize notification message queue
2714 ctx->ctx_msgq_head = ctx->ctx_msgq_tail = 0;
2715 init_waitqueue_head(&ctx->ctx_msgq_wait);
2716 init_waitqueue_head(&ctx->ctx_zombieq);
2718 DPRINT(("ctx=%p flags=0x%x system=%d notify_block=%d excl_idle=%d no_msg=%d ctx_fd=%d \n",
2723 ctx->ctx_fl_excl_idle,
2728 * initialize soft PMU state
2730 pfm_reset_pmu_state(ctx);
2735 pfm_free_fd(ctx->ctx_fd, filp);
2737 if (ctx->ctx_buf_fmt) {
2738 pfm_buf_fmt_exit(ctx->ctx_buf_fmt, current, NULL, regs);
2741 pfm_context_free(ctx);
2747 static inline unsigned long
2748 pfm_new_counter_value (pfm_counter_t *reg, int is_long_reset)
2750 unsigned long val = is_long_reset ? reg->long_reset : reg->short_reset;
2751 unsigned long new_seed, old_seed = reg->seed, mask = reg->mask;
2752 extern unsigned long carta_random32 (unsigned long seed);
2754 if (reg->flags & PFM_REGFL_RANDOM) {
2755 new_seed = carta_random32(old_seed);
2756 val -= (old_seed & mask); /* counter values are negative numbers! */
2757 if ((mask >> 32) != 0)
2758 /* construct a full 64-bit random value: */
2759 new_seed |= carta_random32(old_seed >> 32) << 32;
2760 reg->seed = new_seed;
2767 pfm_reset_regs_masked(pfm_context_t *ctx, unsigned long *ovfl_regs, int is_long_reset)
2769 unsigned long mask = ovfl_regs[0];
2770 unsigned long reset_others = 0UL;
2775 * now restore reset value on sampling overflowed counters
2777 mask >>= PMU_FIRST_COUNTER;
2778 for(i = PMU_FIRST_COUNTER; mask; i++, mask >>= 1) {
2780 if ((mask & 0x1UL) == 0UL) continue;
2782 ctx->ctx_pmds[i].val = val = pfm_new_counter_value(ctx->ctx_pmds+ i, is_long_reset);
2783 reset_others |= ctx->ctx_pmds[i].reset_pmds[0];
2785 DPRINT_ovfl((" %s reset ctx_pmds[%d]=%lx\n", is_long_reset ? "long" : "short", i, val));
2789 * Now take care of resetting the other registers
2791 for(i = 0; reset_others; i++, reset_others >>= 1) {
2793 if ((reset_others & 0x1) == 0) continue;
2795 ctx->ctx_pmds[i].val = val = pfm_new_counter_value(ctx->ctx_pmds + i, is_long_reset);
2797 DPRINT_ovfl(("%s reset_others pmd[%d]=%lx\n",
2798 is_long_reset ? "long" : "short", i, val));
2803 pfm_reset_regs(pfm_context_t *ctx, unsigned long *ovfl_regs, int is_long_reset)
2805 unsigned long mask = ovfl_regs[0];
2806 unsigned long reset_others = 0UL;
2810 DPRINT_ovfl(("ovfl_regs=0x%lx is_long_reset=%d\n", ovfl_regs[0], is_long_reset));
2812 if (ctx->ctx_state == PFM_CTX_MASKED) {
2813 pfm_reset_regs_masked(ctx, ovfl_regs, is_long_reset);
2818 * now restore reset value on sampling overflowed counters
2820 mask >>= PMU_FIRST_COUNTER;
2821 for(i = PMU_FIRST_COUNTER; mask; i++, mask >>= 1) {
2823 if ((mask & 0x1UL) == 0UL) continue;
2825 val = pfm_new_counter_value(ctx->ctx_pmds+ i, is_long_reset);
2826 reset_others |= ctx->ctx_pmds[i].reset_pmds[0];
2828 DPRINT_ovfl((" %s reset ctx_pmds[%d]=%lx\n", is_long_reset ? "long" : "short", i, val));
2830 pfm_write_soft_counter(ctx, i, val);
2834 * Now take care of resetting the other registers
2836 for(i = 0; reset_others; i++, reset_others >>= 1) {
2838 if ((reset_others & 0x1) == 0) continue;
2840 val = pfm_new_counter_value(ctx->ctx_pmds + i, is_long_reset);
2842 if (PMD_IS_COUNTING(i)) {
2843 pfm_write_soft_counter(ctx, i, val);
2845 ia64_set_pmd(i, val);
2847 DPRINT_ovfl(("%s reset_others pmd[%d]=%lx\n",
2848 is_long_reset ? "long" : "short", i, val));
2854 pfm_write_pmcs(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
2856 struct thread_struct *thread = NULL;
2857 struct task_struct *task;
2858 pfarg_reg_t *req = (pfarg_reg_t *)arg;
2859 unsigned long value, pmc_pm;
2860 unsigned long smpl_pmds, reset_pmds, impl_pmds;
2861 unsigned int cnum, reg_flags, flags, pmc_type;
2862 int i, can_access_pmu = 0, is_loaded, is_system;
2863 int is_monitor, is_counting, state;
2865 #define PFM_CHECK_PMC_PM(x, y, z) ((x)->ctx_fl_system ^ PMC_PM(y, z))
2867 state = ctx->ctx_state;
2868 is_loaded = state == PFM_CTX_LOADED ? 1 : 0;
2869 is_system = ctx->ctx_fl_system;
2870 task = ctx->ctx_task;
2871 impl_pmds = pmu_conf.impl_pmds[0];
2873 if (state == PFM_CTX_ZOMBIE) return -EINVAL;
2876 thread = &task->thread;
2878 * In system wide and when the context is loaded, access can only happen
2879 * when the caller is running on the CPU being monitored by the session.
2880 * It does not have to be the owner (ctx_task) of the context per se.
2882 if (is_system && ctx->ctx_cpu != smp_processor_id()) {
2883 DPRINT(("should be running on CPU%d\n", ctx->ctx_cpu));
2886 can_access_pmu = GET_PMU_OWNER() == task || is_system ? 1 : 0;
2889 for (i = 0; i < count; i++, req++) {
2891 cnum = req->reg_num;
2892 reg_flags = req->reg_flags;
2893 value = req->reg_value;
2894 smpl_pmds = req->reg_smpl_pmds[0];
2895 reset_pmds = req->reg_reset_pmds[0];
2899 if (cnum >= PMU_MAX_PMCS) {
2900 DPRINT(("pmc%u is invalid\n", cnum));
2904 pmc_type = pmu_conf.pmc_desc[cnum].type;
2905 pmc_pm = (value >> pmu_conf.pmc_desc[cnum].pm_pos) & 0x1;
2906 is_counting = (pmc_type & PFM_REG_COUNTING) == PFM_REG_COUNTING ? 1 : 0;
2907 is_monitor = (pmc_type & PFM_REG_MONITOR) == PFM_REG_MONITOR ? 1 : 0;
2910 * we reject all non implemented PMC as well
2911 * as attempts to modify PMC[0-3] which are used
2912 * as status registers by the PMU
2914 if ((pmc_type & PFM_REG_IMPL) == 0 || (pmc_type & PFM_REG_CONTROL) == PFM_REG_CONTROL) {
2915 DPRINT(("pmc%u is unimplemented or no-access pmc_type=%x\n", cnum, pmc_type));
2919 * If the PMC is a monitor, then if the value is not the default:
2920 * - system-wide session: PMCx.pm=1 (privileged monitor)
2921 * - per-task : PMCx.pm=0 (user monitor)
2923 if (is_monitor && value != PMC_DFL_VAL(cnum) && is_system ^ pmc_pm) {
2924 DPRINT(("pmc%u pmc_pm=%lu is_system=%d\n",
2933 * enforce generation of overflow interrupt. Necessary on all
2936 value |= 1 << PMU_PMC_OI;
2938 if (reg_flags & PFM_REGFL_OVFL_NOTIFY) {
2939 flags |= PFM_REGFL_OVFL_NOTIFY;
2942 if (reg_flags & PFM_REGFL_RANDOM) flags |= PFM_REGFL_RANDOM;
2944 /* verify validity of smpl_pmds */
2945 if ((smpl_pmds & impl_pmds) != smpl_pmds) {
2946 DPRINT(("invalid smpl_pmds 0x%lx for pmc%u\n", smpl_pmds, cnum));
2950 /* verify validity of reset_pmds */
2951 if ((reset_pmds & impl_pmds) != reset_pmds) {
2952 DPRINT(("invalid reset_pmds 0x%lx for pmc%u\n", reset_pmds, cnum));
2956 if (reg_flags & (PFM_REGFL_OVFL_NOTIFY|PFM_REGFL_RANDOM)) {
2957 DPRINT(("cannot set ovfl_notify or random on pmc%u\n", cnum));
2960 /* eventid on non-counting monitors are ignored */
2964 * execute write checker, if any
2966 if (pfm_sysctl.expert_mode == 0 && PMC_WR_FUNC(cnum)) {
2967 ret = PMC_WR_FUNC(cnum)(task, ctx, cnum, &value, regs);
2968 if (ret) goto error;
2973 * no error on this register
2975 PFM_REG_RETFLAG_SET(req->reg_flags, 0);
2978 * Now we commit the changes to the software state
2982 * update overflow information
2986 * full flag update each time a register is programmed
2988 ctx->ctx_pmds[cnum].flags = flags;
2990 ctx->ctx_pmds[cnum].reset_pmds[0] = reset_pmds;
2991 ctx->ctx_pmds[cnum].smpl_pmds[0] = smpl_pmds;
2992 ctx->ctx_pmds[cnum].eventid = req->reg_smpl_eventid;
2995 * Mark all PMDS to be accessed as used.
2997 * We do not keep track of PMC because we have to
2998 * systematically restore ALL of them.
3000 * We do not update the used_monitors mask, because
3001 * if we have not programmed them, then will be in
3002 * a quiescent state, therefore we will not need to
3003 * mask/restore then when context is MASKED.
3005 CTX_USED_PMD(ctx, reset_pmds);
3006 CTX_USED_PMD(ctx, smpl_pmds);
3008 * make sure we do not try to reset on
3009 * restart because we have established new values
3011 if (state == PFM_CTX_MASKED) ctx->ctx_ovfl_regs[0] &= ~1UL << cnum;
3014 * Needed in case the user does not initialize the equivalent
3015 * PMD. Clearing is done indirectly via pfm_reset_pmu_state() so there is no
3016 * possible leak here.
3018 CTX_USED_PMD(ctx, pmu_conf.pmc_desc[cnum].dep_pmd[0]);
3021 * keep track of the monitor PMC that we are using.
3022 * we save the value of the pmc in ctx_pmcs[] and if
3023 * the monitoring is not stopped for the context we also
3024 * place it in the saved state area so that it will be
3025 * picked up later by the context switch code.
3027 * The value in ctx_pmcs[] can only be changed in pfm_write_pmcs().
3029 * The value in thread->pmcs[] may be modified on overflow, i.e., when
3030 * monitoring needs to be stopped.
3032 if (is_monitor) CTX_USED_MONITOR(ctx, 1UL << cnum);
3035 * update context state
3037 ctx->ctx_pmcs[cnum] = value;
3041 * write thread state
3043 if (is_system == 0) thread->pmcs[cnum] = value;
3046 * write hardware register if we can
3048 if (can_access_pmu) {
3049 ia64_set_pmc(cnum, value);
3054 * per-task SMP only here
3056 * we are guaranteed that the task is not running on the other CPU,
3057 * we indicate that this PMD will need to be reloaded if the task
3058 * is rescheduled on the CPU it ran last on.
3060 ctx->ctx_reload_pmcs[0] |= 1UL << cnum;
3065 DPRINT(("pmc[%u]=0x%lx loaded=%d access_pmu=%d all_pmcs=0x%lx used_pmds=0x%lx eventid=%ld smpl_pmds=0x%lx reset_pmds=0x%lx reloads_pmcs=0x%lx used_monitors=0x%lx ovfl_regs=0x%lx\n",
3070 ctx->ctx_all_pmcs[0],
3071 ctx->ctx_used_pmds[0],
3072 ctx->ctx_pmds[cnum].eventid,
3075 ctx->ctx_reload_pmcs[0],
3076 ctx->ctx_used_monitors[0],
3077 ctx->ctx_ovfl_regs[0]));
3081 * make sure the changes are visible
3083 if (can_access_pmu) ia64_srlz_d();
3087 PFM_REG_RETFLAG_SET(req->reg_flags, PFM_REG_RETFL_EINVAL);
3092 pfm_write_pmds(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
3094 struct thread_struct *thread = NULL;
3095 struct task_struct *task;
3096 pfarg_reg_t *req = (pfarg_reg_t *)arg;
3097 unsigned long value, hw_value, ovfl_mask;
3099 int i, can_access_pmu = 0, state;
3100 int is_counting, is_loaded, is_system;
3104 state = ctx->ctx_state;
3105 is_loaded = state == PFM_CTX_LOADED ? 1 : 0;
3106 is_system = ctx->ctx_fl_system;
3107 ovfl_mask = pmu_conf.ovfl_val;
3108 task = ctx->ctx_task;
3110 if (unlikely(state == PFM_CTX_ZOMBIE)) return -EINVAL;
3113 * on both UP and SMP, we can only write to the PMC when the task is
3114 * the owner of the local PMU.
3116 if (likely(is_loaded)) {
3117 thread = &task->thread;
3119 * In system wide and when the context is loaded, access can only happen
3120 * when the caller is running on the CPU being monitored by the session.
3121 * It does not have to be the owner (ctx_task) of the context per se.
3123 if (unlikely(is_system && ctx->ctx_cpu != smp_processor_id())) {
3124 DPRINT(("should be running on CPU%d\n", ctx->ctx_cpu));
3127 can_access_pmu = GET_PMU_OWNER() == task || is_system ? 1 : 0;
3130 for (i = 0; i < count; i++, req++) {
3132 cnum = req->reg_num;
3133 value = req->reg_value;
3135 if (!PMD_IS_IMPL(cnum)) {
3136 DPRINT(("pmd[%u] is unimplemented or invalid\n", cnum));
3139 is_counting = PMD_IS_COUNTING(cnum);
3142 * execute write checker, if any
3144 if (pfm_sysctl.expert_mode == 0 && PMD_WR_FUNC(cnum)) {
3145 unsigned long v = value;
3147 ret = PMD_WR_FUNC(cnum)(task, ctx, cnum, &v, regs);
3148 if (ret) goto abort_mission;
3155 * no error on this register
3157 PFM_REG_RETFLAG_SET(req->reg_flags, 0);
3160 * now commit changes to software state
3165 * update virtualized (64bits) counter
3169 * write context state
3171 ctx->ctx_pmds[cnum].lval = value;
3174 * when context is load we use the split value
3177 hw_value = value & ovfl_mask;
3178 value = value & ~ovfl_mask;
3182 * update reset values (not just for counters)
3184 ctx->ctx_pmds[cnum].long_reset = req->reg_long_reset;
3185 ctx->ctx_pmds[cnum].short_reset = req->reg_short_reset;
3188 * update randomization parameters (not just for counters)
3190 ctx->ctx_pmds[cnum].seed = req->reg_random_seed;
3191 ctx->ctx_pmds[cnum].mask = req->reg_random_mask;
3194 * update context value
3196 ctx->ctx_pmds[cnum].val = value;
3199 * Keep track of what we use
3201 * We do not keep track of PMC because we have to
3202 * systematically restore ALL of them.
3204 CTX_USED_PMD(ctx, PMD_PMD_DEP(cnum));
3207 * mark this PMD register used as well
3209 CTX_USED_PMD(ctx, RDEP(cnum));
3212 * make sure we do not try to reset on
3213 * restart because we have established new values
3215 if (is_counting && state == PFM_CTX_MASKED) {
3216 ctx->ctx_ovfl_regs[0] &= ~1UL << cnum;
3221 * write thread state
3223 if (is_system == 0) thread->pmds[cnum] = hw_value;
3226 * write hardware register if we can
3228 if (can_access_pmu) {
3229 ia64_set_pmd(cnum, hw_value);
3233 * we are guaranteed that the task is not running on the other CPU,
3234 * we indicate that this PMD will need to be reloaded if the task
3235 * is rescheduled on the CPU it ran last on.
3237 ctx->ctx_reload_pmds[0] |= 1UL << cnum;
3242 DPRINT(("pmd[%u]=0x%lx loaded=%d access_pmu=%d, hw_value=0x%lx ctx_pmd=0x%lx short_reset=0x%lx "
3243 "long_reset=0x%lx notify=%c used_pmds=0x%lx reset_pmds=0x%lx reload_pmds=0x%lx all_pmds=0x%lx ovfl_regs=0x%lx\n",
3249 ctx->ctx_pmds[cnum].val,
3250 ctx->ctx_pmds[cnum].short_reset,
3251 ctx->ctx_pmds[cnum].long_reset,
3252 PMC_OVFL_NOTIFY(ctx, cnum) ? 'Y':'N',
3253 ctx->ctx_used_pmds[0],
3254 ctx->ctx_pmds[cnum].reset_pmds[0],
3255 ctx->ctx_reload_pmds[0],
3256 ctx->ctx_all_pmds[0],
3257 ctx->ctx_ovfl_regs[0]));
3261 * make changes visible
3263 if (can_access_pmu) ia64_srlz_d();
3269 * for now, we have only one possibility for error
3271 PFM_REG_RETFLAG_SET(req->reg_flags, PFM_REG_RETFL_EINVAL);
3276 * By the way of PROTECT_CONTEXT(), interrupts are masked while we are in this function.
3277 * Therefore we know, we do not have to worry about the PMU overflow interrupt. If an
3278 * interrupt is delivered during the call, it will be kept pending until we leave, making
3279 * it appears as if it had been generated at the UNPROTECT_CONTEXT(). At least we are
3280 * guaranteed to return consistent data to the user, it may simply be old. It is not
3281 * trivial to treat the overflow while inside the call because you may end up in
3282 * some module sampling buffer code causing deadlocks.
3285 pfm_read_pmds(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
3287 struct thread_struct *thread = NULL;
3288 struct task_struct *task;
3289 unsigned long val = 0UL, lval, ovfl_mask, sval;
3290 pfarg_reg_t *req = (pfarg_reg_t *)arg;
3291 unsigned int cnum, reg_flags = 0;
3292 int i, can_access_pmu = 0, state;
3293 int is_loaded, is_system, is_counting;
3297 * access is possible when loaded only for
3298 * self-monitoring tasks or in UP mode
3301 state = ctx->ctx_state;
3302 is_loaded = state == PFM_CTX_LOADED ? 1 : 0;
3303 is_system = ctx->ctx_fl_system;
3304 ovfl_mask = pmu_conf.ovfl_val;
3305 task = ctx->ctx_task;
3307 if (state == PFM_CTX_ZOMBIE) return -EINVAL;
3309 if (likely(is_loaded)) {
3310 thread = &task->thread;
3312 * In system wide and when the context is loaded, access can only happen
3313 * when the caller is running on the CPU being monitored by the session.
3314 * It does not have to be the owner (ctx_task) of the context per se.
3316 if (unlikely(is_system && ctx->ctx_cpu != smp_processor_id())) {
3317 DPRINT(("should be running on CPU%d\n", ctx->ctx_cpu));
3321 * this can be true when not self-monitoring only in UP
3323 can_access_pmu = GET_PMU_OWNER() == task || is_system ? 1 : 0;
3325 if (can_access_pmu) ia64_srlz_d();
3328 DPRINT(("loaded=%d access_pmu=%d ctx_state=%d\n",
3334 * on both UP and SMP, we can only read the PMD from the hardware register when
3335 * the task is the owner of the local PMU.
3338 for (i = 0; i < count; i++, req++) {
3340 cnum = req->reg_num;
3341 reg_flags = req->reg_flags;
3343 if (unlikely(!PMD_IS_IMPL(cnum))) goto error;
3345 * we can only read the register that we use. That includes
3346 * the one we explicitely initialize AND the one we want included
3347 * in the sampling buffer (smpl_regs).
3349 * Having this restriction allows optimization in the ctxsw routine
3350 * without compromising security (leaks)
3352 if (unlikely(!CTX_IS_USED_PMD(ctx, cnum))) goto error;
3354 sval = ctx->ctx_pmds[cnum].val;
3355 lval = ctx->ctx_pmds[cnum].lval;
3356 is_counting = PMD_IS_COUNTING(cnum);
3359 * If the task is not the current one, then we check if the
3360 * PMU state is still in the local live register due to lazy ctxsw.
3361 * If true, then we read directly from the registers.
3363 if (can_access_pmu){
3364 val = ia64_get_pmd(cnum);
3367 * context has been saved
3368 * if context is zombie, then task does not exist anymore.
3369 * In this case, we use the full value saved in the context (pfm_flush_regs()).
3371 val = is_loaded ? thread->pmds[cnum] : 0UL;
3376 * XXX: need to check for overflow when loaded
3383 * execute read checker, if any
3385 if (unlikely(pfm_sysctl.expert_mode == 0 && PMD_RD_FUNC(cnum))) {
3386 unsigned long v = val;
3387 ret = PMD_RD_FUNC(cnum)(ctx->ctx_task, ctx, cnum, &v, regs);
3388 if (ret) goto error;
3393 PFM_REG_RETFLAG_SET(reg_flags, 0);
3395 DPRINT(("pmd[%u]=0x%lx\n", cnum, val));
3398 * update register return value, abort all if problem during copy.
3399 * we only modify the reg_flags field. no check mode is fine because
3400 * access has been verified upfront in sys_perfmonctl().
3402 req->reg_value = val;
3403 req->reg_flags = reg_flags;
3404 req->reg_last_reset_val = lval;
3410 PFM_REG_RETFLAG_SET(req->reg_flags, PFM_REG_RETFL_EINVAL);
3415 pfm_mod_write_pmcs(struct task_struct *task, void *req, unsigned int nreq, struct pt_regs *regs)
3419 if (req == NULL) return -EINVAL;
3421 ctx = GET_PMU_CTX();
3423 if (ctx == NULL) return -EINVAL;
3426 * for now limit to current task, which is enough when calling
3427 * from overflow handler
3429 if (task != current && ctx->ctx_fl_system == 0) return -EBUSY;
3431 return pfm_write_pmcs(ctx, req, nreq, regs);
3433 EXPORT_SYMBOL(pfm_mod_write_pmcs);
3436 pfm_mod_read_pmds(struct task_struct *task, void *req, unsigned int nreq, struct pt_regs *regs)
3440 if (req == NULL) return -EINVAL;
3442 ctx = GET_PMU_CTX();
3444 if (ctx == NULL) return -EINVAL;
3447 * for now limit to current task, which is enough when calling
3448 * from overflow handler
3450 if (task != current && ctx->ctx_fl_system == 0) return -EBUSY;
3452 return pfm_read_pmds(ctx, req, nreq, regs);
3454 EXPORT_SYMBOL(pfm_mod_read_pmds);
3457 * Only call this function when a process it trying to
3458 * write the debug registers (reading is always allowed)
3461 pfm_use_debug_registers(struct task_struct *task)
3463 pfm_context_t *ctx = task->thread.pfm_context;
3464 unsigned long flags;
3467 if (pmu_conf.use_rr_dbregs == 0) return 0;
3469 DPRINT(("called for [%d]\n", task->pid));
3474 if (task->thread.flags & IA64_THREAD_DBG_VALID) return 0;
3477 * Even on SMP, we do not need to use an atomic here because
3478 * the only way in is via ptrace() and this is possible only when the
3479 * process is stopped. Even in the case where the ctxsw out is not totally
3480 * completed by the time we come here, there is no way the 'stopped' process
3481 * could be in the middle of fiddling with the pfm_write_ibr_dbr() routine.
3482 * So this is always safe.
3484 if (ctx && ctx->ctx_fl_using_dbreg == 1) return -1;
3489 * We cannot allow setting breakpoints when system wide monitoring
3490 * sessions are using the debug registers.
3492 if (pfm_sessions.pfs_sys_use_dbregs> 0)
3495 pfm_sessions.pfs_ptrace_use_dbregs++;
3497 DPRINT(("ptrace_use_dbregs=%u sys_use_dbregs=%u by [%d] ret = %d\n",
3498 pfm_sessions.pfs_ptrace_use_dbregs,
3499 pfm_sessions.pfs_sys_use_dbregs,
3508 * This function is called for every task that exits with the
3509 * IA64_THREAD_DBG_VALID set. This indicates a task which was
3510 * able to use the debug registers for debugging purposes via
3511 * ptrace(). Therefore we know it was not using them for
3512 * perfmormance monitoring, so we only decrement the number
3513 * of "ptraced" debug register users to keep the count up to date
3516 pfm_release_debug_registers(struct task_struct *task)
3518 unsigned long flags;
3521 if (pmu_conf.use_rr_dbregs == 0) return 0;
3524 if (pfm_sessions.pfs_ptrace_use_dbregs == 0) {
3525 printk(KERN_ERR "perfmon: invalid release for [%d] ptrace_use_dbregs=0\n", task->pid);
3528 pfm_sessions.pfs_ptrace_use_dbregs--;
3537 pfm_restart(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
3539 struct task_struct *task;
3540 pfm_buffer_fmt_t *fmt;
3541 pfm_ovfl_ctrl_t rst_ctrl;
3542 int state, is_system;
3545 state = ctx->ctx_state;
3546 fmt = ctx->ctx_buf_fmt;
3547 is_system = ctx->ctx_fl_system;
3548 task = PFM_CTX_TASK(ctx);
3551 case PFM_CTX_MASKED:
3553 case PFM_CTX_LOADED:
3554 if (CTX_HAS_SMPL(ctx) && fmt->fmt_restart_active) break;
3556 case PFM_CTX_UNLOADED:
3557 case PFM_CTX_ZOMBIE:
3558 DPRINT(("invalid state=%d\n", state));
3561 DPRINT(("state=%d, cannot operate (no active_restart handler)\n", state));
3566 * In system wide and when the context is loaded, access can only happen
3567 * when the caller is running on the CPU being monitored by the session.
3568 * It does not have to be the owner (ctx_task) of the context per se.
3570 if (is_system && ctx->ctx_cpu != smp_processor_id()) {
3571 DPRINT(("should be running on CPU%d\n", ctx->ctx_cpu));
3576 if (unlikely(task == NULL)) {
3577 printk(KERN_ERR "perfmon: [%d] pfm_restart no task\n", current->pid);
3581 if (task == current || is_system) {
3583 fmt = ctx->ctx_buf_fmt;
3585 DPRINT(("restarting self %d ovfl=0x%lx\n",
3587 ctx->ctx_ovfl_regs[0]));
3589 if (CTX_HAS_SMPL(ctx)) {
3591 prefetch(ctx->ctx_smpl_hdr);
3593 rst_ctrl.bits.mask_monitoring = 0;
3594 rst_ctrl.bits.reset_ovfl_pmds = 0;
3596 if (state == PFM_CTX_LOADED)
3597 ret = pfm_buf_fmt_restart_active(fmt, task, &rst_ctrl, ctx->ctx_smpl_hdr, regs);
3599 ret = pfm_buf_fmt_restart(fmt, task, &rst_ctrl, ctx->ctx_smpl_hdr, regs);
3601 rst_ctrl.bits.mask_monitoring = 0;
3602 rst_ctrl.bits.reset_ovfl_pmds = 1;
3606 if (rst_ctrl.bits.reset_ovfl_pmds)
3607 pfm_reset_regs(ctx, ctx->ctx_ovfl_regs, PFM_PMD_LONG_RESET);
3609 if (rst_ctrl.bits.mask_monitoring == 0) {
3610 DPRINT(("resuming monitoring for [%d]\n", task->pid));
3612 if (state == PFM_CTX_MASKED) pfm_restore_monitoring(task);
3614 DPRINT(("keeping monitoring stopped for [%d]\n", task->pid));
3616 // cannot use pfm_stop_monitoring(task, regs);
3620 * clear overflowed PMD mask to remove any stale information
3622 ctx->ctx_ovfl_regs[0] = 0UL;
3625 * back to LOADED state
3627 ctx->ctx_state = PFM_CTX_LOADED;
3630 * XXX: not really useful for self monitoring
3632 ctx->ctx_fl_can_restart = 0;
3638 * restart another task
3642 * When PFM_CTX_MASKED, we cannot issue a restart before the previous
3643 * one is seen by the task.
3645 if (state == PFM_CTX_MASKED) {
3646 if (ctx->ctx_fl_can_restart == 0) return -EINVAL;
3648 * will prevent subsequent restart before this one is
3649 * seen by other task
3651 ctx->ctx_fl_can_restart = 0;
3655 * if blocking, then post the semaphore is PFM_CTX_MASKED, i.e.
3656 * the task is blocked or on its way to block. That's the normal
3657 * restart path. If the monitoring is not masked, then the task
3658 * can be actively monitoring and we cannot directly intervene.
3659 * Therefore we use the trap mechanism to catch the task and
3660 * force it to reset the buffer/reset PMDs.
3662 * if non-blocking, then we ensure that the task will go into
3663 * pfm_handle_work() before returning to user mode.
3665 * We cannot explicitely reset another task, it MUST always
3666 * be done by the task itself. This works for system wide because
3667 * the tool that is controlling the session is logically doing
3668 * "self-monitoring".
3670 if (CTX_OVFL_NOBLOCK(ctx) == 0 && state == PFM_CTX_MASKED) {
3671 DPRINT(("unblocking [%d] \n", task->pid));
3672 up(&ctx->ctx_restart_sem);
3674 DPRINT(("[%d] armed exit trap\n", task->pid));
3676 ctx->ctx_fl_trap_reason = PFM_TRAP_REASON_RESET;
3678 PFM_SET_WORK_PENDING(task, 1);
3680 pfm_set_task_notify(task);
3683 * XXX: send reschedule if task runs on another CPU
3690 pfm_debug(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
3692 unsigned int m = *(unsigned int *)arg;
3694 pfm_sysctl.debug = m == 0 ? 0 : 1;
3696 pfm_debug_var = pfm_sysctl.debug;
3698 printk(KERN_INFO "perfmon debugging %s (timing reset)\n", pfm_sysctl.debug ? "on" : "off");
3701 memset(pfm_stats, 0, sizeof(pfm_stats));
3702 for(m=0; m < NR_CPUS; m++) pfm_stats[m].pfm_ovfl_intr_cycles_min = ~0UL;
3708 * arg can be NULL and count can be zero for this function
3711 pfm_write_ibr_dbr(int mode, pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
3713 struct thread_struct *thread = NULL;
3714 struct task_struct *task;
3715 pfarg_dbreg_t *req = (pfarg_dbreg_t *)arg;
3716 unsigned long flags;
3721 int i, can_access_pmu = 0;
3722 int is_system, is_loaded;
3724 if (pmu_conf.use_rr_dbregs == 0) return -EINVAL;
3726 state = ctx->ctx_state;
3727 is_loaded = state == PFM_CTX_LOADED ? 1 : 0;
3728 is_system = ctx->ctx_fl_system;
3729 task = ctx->ctx_task;
3731 if (state == PFM_CTX_ZOMBIE) return -EINVAL;
3734 * on both UP and SMP, we can only write to the PMC when the task is
3735 * the owner of the local PMU.
3738 thread = &task->thread;
3740 * In system wide and when the context is loaded, access can only happen
3741 * when the caller is running on the CPU being monitored by the session.
3742 * It does not have to be the owner (ctx_task) of the context per se.
3744 if (unlikely(is_system && ctx->ctx_cpu != smp_processor_id())) {
3745 DPRINT(("should be running on CPU%d\n", ctx->ctx_cpu));
3748 can_access_pmu = GET_PMU_OWNER() == task || is_system ? 1 : 0;
3752 * we do not need to check for ipsr.db because we do clear ibr.x, dbr.r, and dbr.w
3753 * ensuring that no real breakpoint can be installed via this call.
3755 * IMPORTANT: regs can be NULL in this function
3758 first_time = ctx->ctx_fl_using_dbreg == 0;
3761 * don't bother if we are loaded and task is being debugged
3763 if (is_loaded && (thread->flags & IA64_THREAD_DBG_VALID) != 0) {
3764 DPRINT(("debug registers already in use for [%d]\n", task->pid));
3769 * check for debug registers in system wide mode
3771 * If though a check is done in pfm_context_load(),
3772 * we must repeat it here, in case the registers are
3773 * written after the context is loaded
3778 if (first_time && is_system) {
3779 if (pfm_sessions.pfs_ptrace_use_dbregs)
3782 pfm_sessions.pfs_sys_use_dbregs++;
3787 if (ret != 0) return ret;
3790 * mark ourself as user of the debug registers for
3793 ctx->ctx_fl_using_dbreg = 1;
3796 * clear hardware registers to make sure we don't
3797 * pick up stale state.
3799 * for a system wide session, we do not use
3800 * thread.dbr, thread.ibr because this process
3801 * never leaves the current CPU and the state
3802 * is shared by all processes running on it
3804 if (first_time && can_access_pmu) {
3805 DPRINT(("[%d] clearing ibrs, dbrs\n", task->pid));
3806 for (i=0; i < pmu_conf.num_ibrs; i++) {
3807 ia64_set_ibr(i, 0UL);
3811 for (i=0; i < pmu_conf.num_dbrs; i++) {
3812 ia64_set_dbr(i, 0UL);
3819 * Now install the values into the registers
3821 for (i = 0; i < count; i++, req++) {
3823 rnum = req->dbreg_num;
3824 dbreg.val = req->dbreg_value;
3828 if ((mode == PFM_CODE_RR && rnum >= PFM_NUM_IBRS) || ((mode == PFM_DATA_RR) && rnum >= PFM_NUM_DBRS)) {
3829 DPRINT(("invalid register %u val=0x%lx mode=%d i=%d count=%d\n",
3830 rnum, dbreg.val, mode, i, count));
3836 * make sure we do not install enabled breakpoint
3839 if (mode == PFM_CODE_RR)
3840 dbreg.ibr.ibr_x = 0;
3842 dbreg.dbr.dbr_r = dbreg.dbr.dbr_w = 0;
3845 PFM_REG_RETFLAG_SET(req->dbreg_flags, 0);
3848 * Debug registers, just like PMC, can only be modified
3849 * by a kernel call. Moreover, perfmon() access to those
3850 * registers are centralized in this routine. The hardware
3851 * does not modify the value of these registers, therefore,
3852 * if we save them as they are written, we can avoid having
3853 * to save them on context switch out. This is made possible
3854 * by the fact that when perfmon uses debug registers, ptrace()
3855 * won't be able to modify them concurrently.
3857 if (mode == PFM_CODE_RR) {
3858 CTX_USED_IBR(ctx, rnum);
3860 if (can_access_pmu) ia64_set_ibr(rnum, dbreg.val);
3862 ctx->ctx_ibrs[rnum] = dbreg.val;
3864 DPRINT(("write ibr%u=0x%lx used_ibrs=0x%x is_loaded=%d access_pmu=%d\n",
3865 rnum, dbreg.val, ctx->ctx_used_ibrs[0], is_loaded, can_access_pmu));
3867 CTX_USED_DBR(ctx, rnum);
3869 if (can_access_pmu) ia64_set_dbr(rnum, dbreg.val);
3871 ctx->ctx_dbrs[rnum] = dbreg.val;
3873 DPRINT(("write dbr%u=0x%lx used_dbrs=0x%x is_loaded=%d access_pmu=%d\n",
3874 rnum, dbreg.val, ctx->ctx_used_dbrs[0], is_loaded, can_access_pmu));
3882 * in case it was our first attempt, we undo the global modifications
3886 if (ctx->ctx_fl_system) {
3887 pfm_sessions.pfs_sys_use_dbregs--;
3890 ctx->ctx_fl_using_dbreg = 0;
3893 * install error return flag
3895 PFM_REG_RETFLAG_SET(req->dbreg_flags, PFM_REG_RETFL_EINVAL);
3901 pfm_write_ibrs(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
3903 return pfm_write_ibr_dbr(PFM_CODE_RR, ctx, arg, count, regs);
3907 pfm_write_dbrs(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
3909 return pfm_write_ibr_dbr(PFM_DATA_RR, ctx, arg, count, regs);
3913 pfm_mod_write_ibrs(struct task_struct *task, void *req, unsigned int nreq, struct pt_regs *regs)
3917 if (req == NULL) return -EINVAL;
3919 ctx = GET_PMU_CTX();
3921 if (ctx == NULL) return -EINVAL;
3924 * for now limit to current task, which is enough when calling
3925 * from overflow handler
3927 if (task != current && ctx->ctx_fl_system == 0) return -EBUSY;
3929 return pfm_write_ibrs(ctx, req, nreq, regs);
3931 EXPORT_SYMBOL(pfm_mod_write_ibrs);
3934 pfm_mod_write_dbrs(struct task_struct *task, void *req, unsigned int nreq, struct pt_regs *regs)
3938 if (req == NULL) return -EINVAL;
3940 ctx = GET_PMU_CTX();
3942 if (ctx == NULL) return -EINVAL;
3945 * for now limit to current task, which is enough when calling
3946 * from overflow handler
3948 if (task != current && ctx->ctx_fl_system == 0) return -EBUSY;
3950 return pfm_write_dbrs(ctx, req, nreq, regs);
3952 EXPORT_SYMBOL(pfm_mod_write_dbrs);
3956 pfm_get_features(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
3958 pfarg_features_t *req = (pfarg_features_t *)arg;
3960 req->ft_version = PFM_VERSION;
3965 pfm_stop(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
3967 struct pt_regs *tregs;
3968 struct task_struct *task = PFM_CTX_TASK(ctx);
3969 int state, is_system;
3971 state = ctx->ctx_state;
3972 is_system = ctx->ctx_fl_system;
3974 if (state != PFM_CTX_LOADED && state != PFM_CTX_MASKED) return -EINVAL;
3977 * In system wide and when the context is loaded, access can only happen
3978 * when the caller is running on the CPU being monitored by the session.
3979 * It does not have to be the owner (ctx_task) of the context per se.
3981 if (is_system && ctx->ctx_cpu != smp_processor_id()) {
3982 DPRINT(("should be running on CPU%d\n", ctx->ctx_cpu));
3985 DPRINT(("task [%d] ctx_state=%d is_system=%d\n",
3986 PFM_CTX_TASK(ctx)->pid,
3990 * in system mode, we need to update the PMU directly
3991 * and the user level state of the caller, which may not
3992 * necessarily be the creator of the context.
3996 * Update local PMU first
4000 ia64_setreg(_IA64_REG_CR_DCR, ia64_getreg(_IA64_REG_CR_DCR) & ~IA64_DCR_PP);
4004 * update local cpuinfo
4006 PFM_CPUINFO_CLEAR(PFM_CPUINFO_DCR_PP);
4009 * stop monitoring, does srlz.i
4014 * stop monitoring in the caller
4016 ia64_psr(regs)->pp = 0;
4024 if (task == current) {
4025 /* stop monitoring at kernel level */
4029 * stop monitoring at the user level
4031 ia64_psr(regs)->up = 0;
4033 tregs = ia64_task_regs(task);
4036 * stop monitoring at the user level
4038 ia64_psr(tregs)->up = 0;
4041 * monitoring disabled in kernel at next reschedule
4043 ctx->ctx_saved_psr_up = 0;
4044 DPRINT(("task=[%d]\n", task->pid));
4051 pfm_start(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
4053 struct pt_regs *tregs;
4054 int state, is_system;
4056 state = ctx->ctx_state;
4057 is_system = ctx->ctx_fl_system;
4059 if (state != PFM_CTX_LOADED) return -EINVAL;
4062 * In system wide and when the context is loaded, access can only happen
4063 * when the caller is running on the CPU being monitored by the session.
4064 * It does not have to be the owner (ctx_task) of the context per se.
4066 if (is_system && ctx->ctx_cpu != smp_processor_id()) {
4067 DPRINT(("should be running on CPU%d\n", ctx->ctx_cpu));
4072 * in system mode, we need to update the PMU directly
4073 * and the user level state of the caller, which may not
4074 * necessarily be the creator of the context.
4079 * set user level psr.pp for the caller
4081 ia64_psr(regs)->pp = 1;
4084 * now update the local PMU and cpuinfo
4086 PFM_CPUINFO_SET(PFM_CPUINFO_DCR_PP);
4089 * start monitoring at kernel level
4094 ia64_setreg(_IA64_REG_CR_DCR, ia64_getreg(_IA64_REG_CR_DCR) | IA64_DCR_PP);
4104 if (ctx->ctx_task == current) {
4106 /* start monitoring at kernel level */
4110 * activate monitoring at user level
4112 ia64_psr(regs)->up = 1;
4115 tregs = ia64_task_regs(ctx->ctx_task);
4118 * start monitoring at the kernel level the next
4119 * time the task is scheduled
4121 ctx->ctx_saved_psr_up = IA64_PSR_UP;
4124 * activate monitoring at user level
4126 ia64_psr(tregs)->up = 1;
4132 pfm_get_pmc_reset(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
4134 pfarg_reg_t *req = (pfarg_reg_t *)arg;
4139 for (i = 0; i < count; i++, req++) {
4141 cnum = req->reg_num;
4143 if (!PMC_IS_IMPL(cnum)) goto abort_mission;
4145 req->reg_value = PMC_DFL_VAL(cnum);
4147 PFM_REG_RETFLAG_SET(req->reg_flags, 0);
4149 DPRINT(("pmc_reset_val pmc[%u]=0x%lx\n", cnum, req->reg_value));
4154 PFM_REG_RETFLAG_SET(req->reg_flags, PFM_REG_RETFL_EINVAL);
4159 pfm_check_task_exist(pfm_context_t *ctx)
4161 struct task_struct *g, *t;
4164 read_lock(&tasklist_lock);
4166 do_each_thread (g, t) {
4167 if (t->thread.pfm_context == ctx) {
4171 } while_each_thread (g, t);
4173 read_unlock(&tasklist_lock);
4175 DPRINT(("pfm_check_task_exist: ret=%d ctx=%p\n", ret, ctx));
4181 pfm_context_load(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
4183 struct task_struct *task;
4184 struct thread_struct *thread;
4185 struct pfm_context_t *old;
4186 unsigned long flags;
4188 struct task_struct *owner_task = NULL;
4190 pfarg_load_t *req = (pfarg_load_t *)arg;
4191 unsigned long *pmcs_source, *pmds_source;
4194 int state, is_system, set_dbregs = 0;
4196 state = ctx->ctx_state;
4197 is_system = ctx->ctx_fl_system;
4199 * can only load from unloaded or terminated state
4201 if (state != PFM_CTX_UNLOADED) {
4202 DPRINT(("cannot load to [%d], invalid ctx_state=%d\n",
4208 DPRINT(("load_pid [%d] using_dbreg=%d\n", req->load_pid, ctx->ctx_fl_using_dbreg));
4210 if (CTX_OVFL_NOBLOCK(ctx) == 0 && req->load_pid == current->pid) {
4211 DPRINT(("cannot use blocking mode on self\n"));
4215 ret = pfm_get_task(ctx, req->load_pid, &task);
4217 DPRINT(("load_pid [%d] get_task=%d\n", req->load_pid, ret));
4224 * system wide is self monitoring only
4226 if (is_system && task != current) {
4227 DPRINT(("system wide is self monitoring only load_pid=%d\n",
4232 thread = &task->thread;
4236 * cannot load a context which is using range restrictions,
4237 * into a task that is being debugged.
4239 if (ctx->ctx_fl_using_dbreg) {
4240 if (thread->flags & IA64_THREAD_DBG_VALID) {
4242 DPRINT(("load_pid [%d] task is debugged, cannot load range restrictions\n", req->load_pid));
4248 if (pfm_sessions.pfs_ptrace_use_dbregs) {
4249 DPRINT(("cannot load [%d] dbregs in use\n", task->pid));
4252 pfm_sessions.pfs_sys_use_dbregs++;
4253 DPRINT(("load [%d] increased sys_use_dbreg=%u\n", task->pid, pfm_sessions.pfs_sys_use_dbregs));
4260 if (ret) goto error;
4264 * SMP system-wide monitoring implies self-monitoring.
4266 * The programming model expects the task to
4267 * be pinned on a CPU throughout the session.
4268 * Here we take note of the current CPU at the
4269 * time the context is loaded. No call from
4270 * another CPU will be allowed.
4272 * The pinning via shed_setaffinity()
4273 * must be done by the calling task prior
4276 * systemwide: keep track of CPU this session is supposed to run on
4278 the_cpu = ctx->ctx_cpu = smp_processor_id();
4282 * now reserve the session
4284 ret = pfm_reserve_session(current, is_system, the_cpu);
4285 if (ret) goto error;
4288 * task is necessarily stopped at this point.
4290 * If the previous context was zombie, then it got removed in
4291 * pfm_save_regs(). Therefore we should not see it here.
4292 * If we see a context, then this is an active context
4294 * XXX: needs to be atomic
4296 DPRINT(("before cmpxchg() old_ctx=%p new_ctx=%p\n",
4297 thread->pfm_context, ctx));
4299 old = ia64_cmpxchg(acq, &thread->pfm_context, NULL, ctx, sizeof(pfm_context_t *));
4301 DPRINT(("load_pid [%d] already has a context\n", req->load_pid));
4305 pfm_reset_msgq(ctx);
4307 ctx->ctx_state = PFM_CTX_LOADED;
4310 * link context to task
4312 ctx->ctx_task = task;
4316 * we load as stopped
4318 PFM_CPUINFO_SET(PFM_CPUINFO_SYST_WIDE);
4319 PFM_CPUINFO_CLEAR(PFM_CPUINFO_DCR_PP);
4321 if (ctx->ctx_fl_excl_idle) PFM_CPUINFO_SET(PFM_CPUINFO_EXCL_IDLE);
4323 thread->flags |= IA64_THREAD_PM_VALID;
4327 * propagate into thread-state
4329 pfm_copy_pmds(task, ctx);
4330 pfm_copy_pmcs(task, ctx);
4332 pmcs_source = thread->pmcs;
4333 pmds_source = thread->pmds;
4336 * always the case for system-wide
4338 if (task == current) {
4340 if (is_system == 0) {
4342 /* allow user level control */
4343 ia64_psr(regs)->sp = 0;
4344 DPRINT(("clearing psr.sp for [%d]\n", task->pid));
4346 SET_LAST_CPU(ctx, smp_processor_id());
4348 SET_ACTIVATION(ctx);
4351 * push the other task out, if any
4353 owner_task = GET_PMU_OWNER();
4354 if (owner_task) pfm_lazy_save_regs(owner_task);
4358 * load all PMD from ctx to PMU (as opposed to thread state)
4359 * restore all PMC from ctx to PMU
4361 pfm_restore_pmds(pmds_source, ctx->ctx_all_pmds[0]);
4362 pfm_restore_pmcs(pmcs_source, ctx->ctx_all_pmcs[0]);
4364 ctx->ctx_reload_pmcs[0] = 0UL;
4365 ctx->ctx_reload_pmds[0] = 0UL;
4368 * guaranteed safe by earlier check against DBG_VALID
4370 if (ctx->ctx_fl_using_dbreg) {
4371 pfm_restore_ibrs(ctx->ctx_ibrs, pmu_conf.num_ibrs);
4372 pfm_restore_dbrs(ctx->ctx_dbrs, pmu_conf.num_dbrs);
4377 SET_PMU_OWNER(task, ctx);
4379 DPRINT(("context loaded on PMU for [%d]\n", task->pid));
4382 * when not current, task MUST be stopped, so this is safe
4384 regs = ia64_task_regs(task);
4386 /* force a full reload */
4387 ctx->ctx_last_activation = PFM_INVALID_ACTIVATION;
4388 SET_LAST_CPU(ctx, -1);
4390 /* initial saved psr (stopped) */
4391 ctx->ctx_saved_psr_up = 0UL;
4392 ia64_psr(regs)->up = ia64_psr(regs)->pp = 0;
4398 if (ret) pfm_unreserve_session(ctx, ctx->ctx_fl_system, the_cpu);
4401 * we must undo the dbregs setting (for system-wide)
4403 if (ret && set_dbregs) {
4405 pfm_sessions.pfs_sys_use_dbregs--;
4409 * release task, there is now a link with the context
4411 if (is_system == 0 && task != current) {
4415 ret = pfm_check_task_exist(ctx);
4417 ctx->ctx_state = PFM_CTX_UNLOADED;
4418 ctx->ctx_task = NULL;
4426 * in this function, we do not need to increase the use count
4427 * for the task via get_task_struct(), because we hold the
4428 * context lock. If the task were to disappear while having
4429 * a context attached, it would go through pfm_exit_thread()
4430 * which also grabs the context lock and would therefore be blocked
4431 * until we are here.
4433 static void pfm_flush_pmds(struct task_struct *, pfm_context_t *ctx);
4436 pfm_context_unload(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
4438 struct task_struct *task = PFM_CTX_TASK(ctx);
4439 struct pt_regs *tregs;
4440 int prev_state, is_system;
4443 DPRINT(("ctx_state=%d task [%d]\n", ctx->ctx_state, task ? task->pid : -1));
4445 prev_state = ctx->ctx_state;
4446 is_system = ctx->ctx_fl_system;
4449 * unload only when necessary
4451 if (prev_state == PFM_CTX_UNLOADED) {
4452 DPRINT(("ctx_state=%d, nothing to do\n", prev_state));
4457 * clear psr and dcr bits
4459 ret = pfm_stop(ctx, NULL, 0, regs);
4460 if (ret) return ret;
4462 ctx->ctx_state = PFM_CTX_UNLOADED;
4465 * in system mode, we need to update the PMU directly
4466 * and the user level state of the caller, which may not
4467 * necessarily be the creator of the context.
4474 * local PMU is taken care of in pfm_stop()
4476 PFM_CPUINFO_CLEAR(PFM_CPUINFO_SYST_WIDE);
4477 PFM_CPUINFO_CLEAR(PFM_CPUINFO_EXCL_IDLE);
4480 * save PMDs in context
4483 pfm_flush_pmds(current, ctx);
4486 * at this point we are done with the PMU
4487 * so we can unreserve the resource.
4489 if (prev_state != PFM_CTX_ZOMBIE)
4490 pfm_unreserve_session(ctx, 1 , ctx->ctx_cpu);
4493 * disconnect context from task
4495 task->thread.pfm_context = NULL;
4497 * disconnect task from context
4499 ctx->ctx_task = NULL;
4502 * There is nothing more to cleanup here.
4510 tregs = task == current ? regs : ia64_task_regs(task);
4512 if (task == current) {
4514 * cancel user level control
4516 ia64_psr(regs)->sp = 1;
4518 DPRINT(("setting psr.sp for [%d]\n", task->pid));
4521 * save PMDs to context
4524 pfm_flush_pmds(task, ctx);
4527 * at this point we are done with the PMU
4528 * so we can unreserve the resource.
4530 * when state was ZOMBIE, we have already unreserved.
4532 if (prev_state != PFM_CTX_ZOMBIE)
4533 pfm_unreserve_session(ctx, 0 , ctx->ctx_cpu);
4536 * reset activation counter and psr
4538 ctx->ctx_last_activation = PFM_INVALID_ACTIVATION;
4539 SET_LAST_CPU(ctx, -1);
4542 * PMU state will not be restored
4544 task->thread.flags &= ~IA64_THREAD_PM_VALID;
4547 * break links between context and task
4549 task->thread.pfm_context = NULL;
4550 ctx->ctx_task = NULL;
4552 PFM_SET_WORK_PENDING(task, 0);
4554 ctx->ctx_fl_trap_reason = PFM_TRAP_REASON_NONE;
4555 ctx->ctx_fl_can_restart = 0;
4556 ctx->ctx_fl_going_zombie = 0;
4558 DPRINT(("disconnected [%d] from context\n", task->pid));
4564 pfm_force_cleanup(pfm_context_t *ctx, struct pt_regs *regs)
4566 struct task_struct *task = ctx->ctx_task;
4568 ia64_psr(regs)->up = 0;
4569 ia64_psr(regs)->sp = 1;
4571 if (GET_PMU_OWNER() == task) {
4572 DPRINT(("cleared ownership for [%d]\n", ctx->ctx_task->pid));
4573 SET_PMU_OWNER(NULL, NULL);
4577 * disconnect the task from the context and vice-versa
4579 PFM_SET_WORK_PENDING(task, 0);
4581 task->thread.pfm_context = NULL;
4582 task->thread.flags &= ~IA64_THREAD_PM_VALID;
4584 DPRINT(("force cleanupf for [%d]\n", task->pid));
4590 * called only from exit_thread(): task == current
4591 * we come here only if current has a context attached (loaded or masked)
4594 pfm_exit_thread(struct task_struct *task)
4597 unsigned long flags;
4598 struct pt_regs *regs = ia64_task_regs(task);
4602 ctx = PFM_GET_CTX(task);
4604 PROTECT_CTX(ctx, flags);
4606 DPRINT(("state=%d task [%d]\n", ctx->ctx_state, task->pid));
4608 state = ctx->ctx_state;
4610 case PFM_CTX_UNLOADED:
4612 * only comes to thios function if pfm_context is not NULL, i.e., cannot
4613 * be in unloaded state
4615 printk(KERN_ERR "perfmon: pfm_exit_thread [%d] ctx unloaded\n", task->pid);
4617 case PFM_CTX_LOADED:
4618 case PFM_CTX_MASKED:
4619 ret = pfm_context_unload(ctx, NULL, 0, regs);
4621 printk(KERN_ERR "perfmon: pfm_exit_thread [%d] state=%d unload failed %d\n", task->pid, state, ret);
4623 DPRINT(("ctx unloaded for current state was %d\n", state));
4625 pfm_end_notify_user(ctx);
4627 case PFM_CTX_ZOMBIE:
4628 ret = pfm_context_unload(ctx, NULL, 0, regs);
4630 printk(KERN_ERR "perfmon: pfm_exit_thread [%d] state=%d unload failed %d\n", task->pid, state, ret);
4635 printk(KERN_ERR "perfmon: pfm_exit_thread [%d] unexpected state=%d\n", task->pid, state);
4638 UNPROTECT_CTX(ctx, flags);
4640 { u64 psr = pfm_get_psr();
4641 BUG_ON(psr & (IA64_PSR_UP|IA64_PSR_PP));
4642 BUG_ON(GET_PMU_OWNER());
4643 BUG_ON(ia64_psr(regs)->up);
4644 BUG_ON(ia64_psr(regs)->pp);
4648 * All memory free operations (especially for vmalloc'ed memory)
4649 * MUST be done with interrupts ENABLED.
4651 if (free_ok) pfm_context_free(ctx);
4655 * functions MUST be listed in the increasing order of their index (see permfon.h)
4657 #define PFM_CMD(name, flags, arg_count, arg_type, getsz) { name, #name, flags, arg_count, sizeof(arg_type), getsz }
4658 #define PFM_CMD_S(name, flags) { name, #name, flags, 0, 0, NULL }
4659 #define PFM_CMD_PCLRWS (PFM_CMD_FD|PFM_CMD_ARG_RW|PFM_CMD_STOP)
4660 #define PFM_CMD_PCLRW (PFM_CMD_FD|PFM_CMD_ARG_RW)
4661 #define PFM_CMD_NONE { NULL, "no-cmd", 0, 0, 0, NULL}
4663 static pfm_cmd_desc_t pfm_cmd_tab[]={
4664 /* 0 */PFM_CMD_NONE,
4665 /* 1 */PFM_CMD(pfm_write_pmcs, PFM_CMD_PCLRWS, PFM_CMD_ARG_MANY, pfarg_reg_t, NULL),
4666 /* 2 */PFM_CMD(pfm_write_pmds, PFM_CMD_PCLRWS, PFM_CMD_ARG_MANY, pfarg_reg_t, NULL),
4667 /* 3 */PFM_CMD(pfm_read_pmds, PFM_CMD_PCLRWS, PFM_CMD_ARG_MANY, pfarg_reg_t, NULL),
4668 /* 4 */PFM_CMD_S(pfm_stop, PFM_CMD_PCLRWS),
4669 /* 5 */PFM_CMD_S(pfm_start, PFM_CMD_PCLRWS),
4670 /* 6 */PFM_CMD_NONE,
4671 /* 7 */PFM_CMD_NONE,
4672 /* 8 */PFM_CMD(pfm_context_create, PFM_CMD_ARG_RW, 1, pfarg_context_t, pfm_ctx_getsize),
4673 /* 9 */PFM_CMD_NONE,
4674 /* 10 */PFM_CMD_S(pfm_restart, PFM_CMD_PCLRW),
4675 /* 11 */PFM_CMD_NONE,
4676 /* 12 */PFM_CMD(pfm_get_features, PFM_CMD_ARG_RW, 1, pfarg_features_t, NULL),
4677 /* 13 */PFM_CMD(pfm_debug, 0, 1, unsigned int, NULL),
4678 /* 14 */PFM_CMD_NONE,
4679 /* 15 */PFM_CMD(pfm_get_pmc_reset, PFM_CMD_ARG_RW, PFM_CMD_ARG_MANY, pfarg_reg_t, NULL),
4680 /* 16 */PFM_CMD(pfm_context_load, PFM_CMD_PCLRWS, 1, pfarg_load_t, NULL),
4681 /* 17 */PFM_CMD_S(pfm_context_unload, PFM_CMD_PCLRWS),
4682 /* 18 */PFM_CMD_NONE,
4683 /* 19 */PFM_CMD_NONE,
4684 /* 20 */PFM_CMD_NONE,
4685 /* 21 */PFM_CMD_NONE,
4686 /* 22 */PFM_CMD_NONE,
4687 /* 23 */PFM_CMD_NONE,
4688 /* 24 */PFM_CMD_NONE,
4689 /* 25 */PFM_CMD_NONE,
4690 /* 26 */PFM_CMD_NONE,
4691 /* 27 */PFM_CMD_NONE,
4692 /* 28 */PFM_CMD_NONE,
4693 /* 29 */PFM_CMD_NONE,
4694 /* 30 */PFM_CMD_NONE,
4695 /* 31 */PFM_CMD_NONE,
4696 /* 32 */PFM_CMD(pfm_write_ibrs, PFM_CMD_PCLRWS, PFM_CMD_ARG_MANY, pfarg_dbreg_t, NULL),
4697 /* 33 */PFM_CMD(pfm_write_dbrs, PFM_CMD_PCLRWS, PFM_CMD_ARG_MANY, pfarg_dbreg_t, NULL)
4699 #define PFM_CMD_COUNT (sizeof(pfm_cmd_tab)/sizeof(pfm_cmd_desc_t))
4702 pfm_check_task_state(pfm_context_t *ctx, int cmd, unsigned long flags)
4704 struct task_struct *task;
4707 state = ctx->ctx_state;
4709 task = PFM_CTX_TASK(ctx);
4711 DPRINT(("context %d no task, state=%d\n", ctx->ctx_fd, state));
4715 DPRINT(("context %d state=%d [%d] task_state=%ld must_stop=%d\n",
4719 task->state, PFM_CMD_STOPPED(cmd)));
4722 * self-monitoring always ok.
4724 * for system-wide the caller can either be the creator of the
4725 * context (to one to which the context is attached to) OR
4726 * a task running on the same CPU as the session.
4728 if (task == current || ctx->ctx_fl_system) return 0;
4731 * context is UNLOADED, MASKED we are safe to go
4733 if (state != PFM_CTX_LOADED) return 0;
4735 if (state == PFM_CTX_ZOMBIE) return -EINVAL;
4738 * context is loaded, we must make sure the task is stopped
4739 * We could lift this restriction for UP but it would mean that
4740 * the user has no guarantee the task would not run between
4741 * two successive calls to perfmonctl(). That's probably OK.
4742 * If this user wants to ensure the task does not run, then
4743 * the task must be stopped.
4745 if (PFM_CMD_STOPPED(cmd) && task->state != TASK_STOPPED) {
4746 DPRINT(("[%d] task not in stopped state\n", task->pid));
4750 UNPROTECT_CTX(ctx, flags);
4752 wait_task_inactive(task);
4754 PROTECT_CTX(ctx, flags);
4760 * system-call entry point (must return long)
4763 sys_perfmonctl (int fd, int cmd, void *arg, int count, long arg5, long arg6, long arg7,
4764 long arg8, long stack)
4766 struct pt_regs *regs = (struct pt_regs *)&stack;
4767 struct file *file = NULL;
4768 pfm_context_t *ctx = NULL;
4769 unsigned long flags = 0UL;
4770 void *args_k = NULL;
4771 long ret; /* will expand int return types */
4772 size_t base_sz, sz, xtra_sz = 0;
4773 int narg, completed_args = 0, call_made = 0, cmd_flags;
4774 int (*func)(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs);
4775 int (*getsize)(void *arg, size_t *sz);
4776 #define PFM_MAX_ARGSIZE 4096
4779 * reject any call if perfmon was disabled at initialization
4781 if (unlikely(PFM_IS_DISABLED())) return -ENOSYS;
4783 if (unlikely(cmd < 0 || cmd >= PFM_CMD_COUNT)) {
4784 DPRINT(("invalid cmd=%d\n", cmd));
4788 func = pfm_cmd_tab[cmd].cmd_func;
4789 narg = pfm_cmd_tab[cmd].cmd_narg;
4790 base_sz = pfm_cmd_tab[cmd].cmd_argsize;
4791 getsize = pfm_cmd_tab[cmd].cmd_getsize;
4792 cmd_flags = pfm_cmd_tab[cmd].cmd_flags;
4794 if (unlikely(func == NULL)) {
4795 DPRINT(("invalid cmd=%d\n", cmd));
4799 DPRINT(("cmd=%s idx=%d narg=0x%x argsz=%lu count=%d\n",
4807 * check if number of arguments matches what the command expects
4809 if (unlikely((narg == PFM_CMD_ARG_MANY && count <= 0) || (narg > 0 && narg != count)))
4813 sz = xtra_sz + base_sz*count;
4815 * limit abuse to min page size
4817 if (unlikely(sz > PFM_MAX_ARGSIZE)) {
4818 printk(KERN_ERR "perfmon: [%d] argument too big %lu\n", current->pid, sz);
4823 * allocate default-sized argument buffer
4825 if (likely(count && args_k == NULL)) {
4826 args_k = kmalloc(PFM_MAX_ARGSIZE, GFP_KERNEL);
4827 if (args_k == NULL) return -ENOMEM;
4835 * assume sz = 0 for command without parameters
4837 if (sz && copy_from_user(args_k, arg, sz)) {
4838 DPRINT(("cannot copy_from_user %lu bytes @%p\n", sz, arg));
4843 * check if command supports extra parameters
4845 if (completed_args == 0 && getsize) {
4847 * get extra parameters size (based on main argument)
4849 ret = (*getsize)(args_k, &xtra_sz);
4850 if (ret) goto error_args;
4854 DPRINT(("restart_args sz=%lu xtra_sz=%lu\n", sz, xtra_sz));
4856 /* retry if necessary */
4857 if (likely(xtra_sz)) goto restart_args;
4860 if (unlikely((cmd_flags & PFM_CMD_FD) == 0)) goto skip_fd;
4865 if (unlikely(file == NULL)) {
4866 DPRINT(("invalid fd %d\n", fd));
4869 if (unlikely(PFM_IS_FILE(file) == 0)) {
4870 DPRINT(("fd %d not related to perfmon\n", fd));
4874 ctx = (pfm_context_t *)file->private_data;
4875 if (unlikely(ctx == NULL)) {
4876 DPRINT(("no context for fd %d\n", fd));
4879 prefetch(&ctx->ctx_state);
4881 PROTECT_CTX(ctx, flags);
4884 * check task is stopped
4886 ret = pfm_check_task_state(ctx, cmd, flags);
4887 if (unlikely(ret)) goto abort_locked;
4890 ret = (*func)(ctx, args_k, count, regs);
4896 DPRINT(("context unlocked\n"));
4897 UNPROTECT_CTX(ctx, flags);
4901 /* copy argument back to user, if needed */
4902 if (call_made && PFM_CMD_RW_ARG(cmd) && copy_to_user(arg, args_k, base_sz*count)) ret = -EFAULT;
4905 if (args_k) kfree(args_k);
4907 DPRINT(("cmd=%s ret=%ld\n", PFM_CMD_NAME(cmd), ret));
4913 pfm_resume_after_ovfl(pfm_context_t *ctx, unsigned long ovfl_regs, struct pt_regs *regs)
4915 pfm_buffer_fmt_t *fmt = ctx->ctx_buf_fmt;
4916 pfm_ovfl_ctrl_t rst_ctrl;
4920 state = ctx->ctx_state;
4922 * Unlock sampling buffer and reset index atomically
4923 * XXX: not really needed when blocking
4925 if (CTX_HAS_SMPL(ctx)) {
4927 rst_ctrl.bits.mask_monitoring = 0;
4928 rst_ctrl.bits.reset_ovfl_pmds = 0;
4930 if (state == PFM_CTX_LOADED)
4931 ret = pfm_buf_fmt_restart_active(fmt, current, &rst_ctrl, ctx->ctx_smpl_hdr, regs);
4933 ret = pfm_buf_fmt_restart(fmt, current, &rst_ctrl, ctx->ctx_smpl_hdr, regs);
4935 rst_ctrl.bits.mask_monitoring = 0;
4936 rst_ctrl.bits.reset_ovfl_pmds = 1;
4940 if (rst_ctrl.bits.reset_ovfl_pmds) {
4941 pfm_reset_regs(ctx, &ovfl_regs, PFM_PMD_LONG_RESET);
4943 if (rst_ctrl.bits.mask_monitoring == 0) {
4944 DPRINT(("resuming monitoring\n"));
4945 if (ctx->ctx_state == PFM_CTX_MASKED) pfm_restore_monitoring(current);
4947 DPRINT(("stopping monitoring\n"));
4948 //pfm_stop_monitoring(current, regs);
4950 ctx->ctx_state = PFM_CTX_LOADED;
4955 * context MUST BE LOCKED when calling
4956 * can only be called for current
4959 pfm_context_force_terminate(pfm_context_t *ctx, struct pt_regs *regs)
4961 if (ctx->ctx_fl_system) {
4962 printk(KERN_ERR "perfmon: pfm_context_force_terminate [%d] is system-wide\n", current->pid);
4966 * we stop the whole thing, we do no need to flush
4967 * we know we WERE masked
4970 ia64_psr(regs)->up = 0;
4971 ia64_psr(regs)->sp = 1;
4974 * disconnect the task from the context and vice-versa
4976 current->thread.pfm_context = NULL;
4977 current->thread.flags &= ~IA64_THREAD_PM_VALID;
4978 ctx->ctx_task = NULL;
4980 DPRINT(("context terminated\n"));
4983 * and wakeup controlling task, indicating we are now disconnected
4985 wake_up_interruptible(&ctx->ctx_zombieq);
4988 * given that context is still locked, the controlling
4989 * task will only get access when we return from
4990 * pfm_handle_work().
4994 static int pfm_ovfl_notify_user(pfm_context_t *ctx, unsigned long ovfl_pmds);
4997 pfm_handle_work(void)
5000 struct pt_regs *regs;
5001 unsigned long flags;
5002 unsigned long ovfl_regs;
5003 unsigned int reason;
5006 ctx = PFM_GET_CTX(current);
5008 printk(KERN_ERR "perfmon: [%d] has no PFM context\n", current->pid);
5012 PROTECT_CTX(ctx, flags);
5014 PFM_SET_WORK_PENDING(current, 0);
5016 pfm_clear_task_notify();
5018 regs = ia64_task_regs(current);
5021 * extract reason for being here and clear
5023 reason = ctx->ctx_fl_trap_reason;
5024 ctx->ctx_fl_trap_reason = PFM_TRAP_REASON_NONE;
5025 ovfl_regs = ctx->ctx_ovfl_regs[0];
5027 DPRINT(("reason=%d state=%d\n", reason, ctx->ctx_state));
5030 * must be done before we check for simple-reset mode
5032 if (ctx->ctx_fl_going_zombie || ctx->ctx_state == PFM_CTX_ZOMBIE) goto do_zombie;
5035 //if (CTX_OVFL_NOBLOCK(ctx)) goto skip_blocking;
5036 if (reason == PFM_TRAP_REASON_RESET) goto skip_blocking;
5038 UNPROTECT_CTX(ctx, flags);
5040 DPRINT(("before block sleeping\n"));
5043 * may go through without blocking on SMP systems
5044 * if restart has been received already by the time we call down()
5046 ret = down_interruptible(&ctx->ctx_restart_sem);
5048 DPRINT(("after block sleeping ret=%d\n", ret));
5050 PROTECT_CTX(ctx, flags);
5053 * we need to read the ovfl_regs only after wake-up
5054 * because we may have had pfm_write_pmds() in between
5055 * and that can changed PMD values and therefore
5056 * ovfl_regs is reset for these new PMD values.
5058 ovfl_regs = ctx->ctx_ovfl_regs[0];
5060 if (ctx->ctx_fl_going_zombie) {
5062 DPRINT(("context is zombie, bailing out\n"));
5063 pfm_context_force_terminate(ctx, regs);
5067 * in case of interruption of down() we don't restart anything
5069 if (ret < 0) goto nothing_to_do;
5072 pfm_resume_after_ovfl(ctx, ovfl_regs, regs);
5073 ctx->ctx_ovfl_regs[0] = 0UL;
5077 UNPROTECT_CTX(ctx, flags);
5081 pfm_notify_user(pfm_context_t *ctx, pfm_msg_t *msg)
5083 if (ctx->ctx_state == PFM_CTX_ZOMBIE) {
5084 DPRINT(("ignoring overflow notification, owner is zombie\n"));
5088 DPRINT(("waking up somebody\n"));
5090 if (msg) wake_up_interruptible(&ctx->ctx_msgq_wait);
5093 * safe, we are not in intr handler, nor in ctxsw when
5096 kill_fasync (&ctx->ctx_async_queue, SIGIO, POLL_IN);
5102 pfm_ovfl_notify_user(pfm_context_t *ctx, unsigned long ovfl_pmds)
5104 pfm_msg_t *msg = NULL;
5106 if (ctx->ctx_fl_no_msg == 0) {
5107 msg = pfm_get_new_msg(ctx);
5109 printk(KERN_ERR "perfmon: pfm_ovfl_notify_user no more notification msgs\n");
5113 msg->pfm_ovfl_msg.msg_type = PFM_MSG_OVFL;
5114 msg->pfm_ovfl_msg.msg_ctx_fd = ctx->ctx_fd;
5115 msg->pfm_ovfl_msg.msg_active_set = 0;
5116 msg->pfm_ovfl_msg.msg_ovfl_pmds[0] = ovfl_pmds;
5117 msg->pfm_ovfl_msg.msg_ovfl_pmds[1] = 0UL;
5118 msg->pfm_ovfl_msg.msg_ovfl_pmds[2] = 0UL;
5119 msg->pfm_ovfl_msg.msg_ovfl_pmds[3] = 0UL;
5120 msg->pfm_ovfl_msg.msg_tstamp = 0UL;
5123 DPRINT(("ovfl msg: msg=%p no_msg=%d fd=%d ovfl_pmds=0x%lx\n",
5129 return pfm_notify_user(ctx, msg);
5133 pfm_end_notify_user(pfm_context_t *ctx)
5137 msg = pfm_get_new_msg(ctx);
5139 printk(KERN_ERR "perfmon: pfm_end_notify_user no more notification msgs\n");
5143 memset(msg, 0, sizeof(*msg));
5145 msg->pfm_end_msg.msg_type = PFM_MSG_END;
5146 msg->pfm_end_msg.msg_ctx_fd = ctx->ctx_fd;
5147 msg->pfm_ovfl_msg.msg_tstamp = 0UL;
5149 DPRINT(("end msg: msg=%p no_msg=%d ctx_fd=%d\n",
5154 return pfm_notify_user(ctx, msg);
5158 * main overflow processing routine.
5159 * it can be called from the interrupt path or explicitely during the context switch code
5162 pfm_overflow_handler(struct task_struct *task, pfm_context_t *ctx, u64 pmc0, struct pt_regs *regs)
5164 pfm_ovfl_arg_t ovfl_arg;
5166 unsigned long old_val, ovfl_val, new_val;
5167 unsigned long ovfl_notify = 0UL, ovfl_pmds = 0UL, smpl_pmds = 0UL, reset_pmds;
5168 unsigned long tstamp;
5169 pfm_ovfl_ctrl_t ovfl_ctrl;
5170 unsigned int i, has_smpl;
5171 int must_notify = 0;
5173 if (unlikely(ctx->ctx_state == PFM_CTX_ZOMBIE)) goto stop_monitoring;
5176 * sanity test. Should never happen
5178 if (unlikely((pmc0 & 0x1) == 0)) goto sanity_check;
5180 tstamp = ia64_get_itc();
5181 mask = pmc0 >> PMU_FIRST_COUNTER;
5182 ovfl_val = pmu_conf.ovfl_val;
5183 has_smpl = CTX_HAS_SMPL(ctx);
5185 DPRINT_ovfl(("pmc0=0x%lx pid=%d iip=0x%lx, %s "
5186 "used_pmds=0x%lx\n",
5188 task ? task->pid: -1,
5189 (regs ? regs->cr_iip : 0),
5190 CTX_OVFL_NOBLOCK(ctx) ? "nonblocking" : "blocking",
5191 ctx->ctx_used_pmds[0]));
5195 * first we update the virtual counters
5196 * assume there was a prior ia64_srlz_d() issued
5198 for (i = PMU_FIRST_COUNTER; mask ; i++, mask >>= 1) {
5200 /* skip pmd which did not overflow */
5201 if ((mask & 0x1) == 0) continue;
5204 * Note that the pmd is not necessarily 0 at this point as qualified events
5205 * may have happened before the PMU was frozen. The residual count is not
5206 * taken into consideration here but will be with any read of the pmd via
5209 old_val = new_val = ctx->ctx_pmds[i].val;
5210 new_val += 1 + ovfl_val;
5211 ctx->ctx_pmds[i].val = new_val;
5214 * check for overflow condition
5216 if (likely(old_val > new_val)) {
5217 ovfl_pmds |= 1UL << i;
5218 if (PMC_OVFL_NOTIFY(ctx, i)) ovfl_notify |= 1UL << i;
5221 DPRINT_ovfl(("ctx_pmd[%d].val=0x%lx old_val=0x%lx pmd=0x%lx ovfl_pmds=0x%lx ovfl_notify=0x%lx\n",
5225 ia64_get_pmd(i) & ovfl_val,
5231 * there was no 64-bit overflow, nothing else to do
5233 if (ovfl_pmds == 0UL) return;
5236 * reset all control bits
5242 * if a sampling format module exists, then we "cache" the overflow by
5243 * calling the module's handler() routine.
5246 unsigned long start_cycles, end_cycles;
5247 unsigned long pmd_mask;
5249 int this_cpu = smp_processor_id();
5251 pmd_mask = ovfl_pmds >> PMU_FIRST_COUNTER;
5253 prefetch(ctx->ctx_smpl_hdr);
5255 for(i=PMU_FIRST_COUNTER; pmd_mask && ret == 0; i++, pmd_mask >>=1) {
5259 if ((pmd_mask & 0x1) == 0) continue;
5261 ovfl_arg.ovfl_pmd = (unsigned char )i;
5262 ovfl_arg.ovfl_notify = ovfl_notify & mask ? 1 : 0;
5263 ovfl_arg.active_set = 0;
5264 ovfl_arg.ovfl_ctrl.val = 0; /* module must fill in all fields */
5265 ovfl_arg.smpl_pmds[0] = smpl_pmds = ctx->ctx_pmds[i].smpl_pmds[0];
5267 ovfl_arg.pmd_value = ctx->ctx_pmds[i].val;
5268 ovfl_arg.pmd_last_reset = ctx->ctx_pmds[i].lval;
5269 ovfl_arg.pmd_eventid = ctx->ctx_pmds[i].eventid;
5272 * copy values of pmds of interest. Sampling format may copy them
5273 * into sampling buffer.
5276 for(j=0, k=0; smpl_pmds; j++, smpl_pmds >>=1) {
5277 if ((smpl_pmds & 0x1) == 0) continue;
5278 ovfl_arg.smpl_pmds_values[k++] = PMD_IS_COUNTING(j) ? pfm_read_soft_counter(ctx, j) : ia64_get_pmd(j);
5279 DPRINT_ovfl(("smpl_pmd[%d]=pmd%u=0x%lx\n", k-1, j, ovfl_arg.smpl_pmds_values[k-1]));
5283 pfm_stats[this_cpu].pfm_smpl_handler_calls++;
5285 start_cycles = ia64_get_itc();
5288 * call custom buffer format record (handler) routine
5290 ret = (*ctx->ctx_buf_fmt->fmt_handler)(task, ctx->ctx_smpl_hdr, &ovfl_arg, regs, tstamp);
5292 end_cycles = ia64_get_itc();
5295 * For those controls, we take the union because they have
5296 * an all or nothing behavior.
5298 ovfl_ctrl.bits.notify_user |= ovfl_arg.ovfl_ctrl.bits.notify_user;
5299 ovfl_ctrl.bits.block_task |= ovfl_arg.ovfl_ctrl.bits.block_task;
5300 ovfl_ctrl.bits.mask_monitoring |= ovfl_arg.ovfl_ctrl.bits.mask_monitoring;
5302 * build the bitmask of pmds to reset now
5304 if (ovfl_arg.ovfl_ctrl.bits.reset_ovfl_pmds) reset_pmds |= mask;
5306 pfm_stats[this_cpu].pfm_smpl_handler_cycles += end_cycles - start_cycles;
5309 * when the module cannot handle the rest of the overflows, we abort right here
5311 if (ret && pmd_mask) {
5312 DPRINT(("handler aborts leftover ovfl_pmds=0x%lx\n",
5313 pmd_mask<<PMU_FIRST_COUNTER));
5316 * remove the pmds we reset now from the set of pmds to reset in pfm_restart()
5318 ovfl_pmds &= ~reset_pmds;
5321 * when no sampling module is used, then the default
5322 * is to notify on overflow if requested by user
5324 ovfl_ctrl.bits.notify_user = ovfl_notify ? 1 : 0;
5325 ovfl_ctrl.bits.block_task = ovfl_notify ? 1 : 0;
5326 ovfl_ctrl.bits.mask_monitoring = ovfl_notify ? 1 : 0; /* XXX: change for saturation */
5327 ovfl_ctrl.bits.reset_ovfl_pmds = ovfl_notify ? 0 : 1;
5329 * if needed, we reset all overflowed pmds
5331 if (ovfl_notify == 0) reset_pmds = ovfl_pmds;
5334 DPRINT(("ovfl_pmds=0x%lx reset_pmds=0x%lx\n",
5338 * reset the requested PMD registers using the short reset values
5341 unsigned long bm = reset_pmds;
5342 pfm_reset_regs(ctx, &bm, PFM_PMD_SHORT_RESET);
5345 if (ovfl_notify && ovfl_ctrl.bits.notify_user) {
5347 * keep track of what to reset when unblocking
5349 ctx->ctx_ovfl_regs[0] = ovfl_pmds;
5352 * check for blocking context
5354 if (CTX_OVFL_NOBLOCK(ctx) == 0 && ovfl_ctrl.bits.block_task) {
5356 ctx->ctx_fl_trap_reason = PFM_TRAP_REASON_BLOCK;
5359 * set the perfmon specific checking pending work for the task
5361 PFM_SET_WORK_PENDING(task, 1);
5364 * when coming from ctxsw, current still points to the
5365 * previous task, therefore we must work with task and not current.
5367 pfm_set_task_notify(task);
5370 * defer until state is changed (shorten spin window). the context is locked
5371 * anyway, so the signal receiver would come spin for nothing.
5376 DPRINT_ovfl(("owner [%d] pending=%ld reason=%u ovfl_pmds=0x%lx ovfl_notify=0x%lx masked=%d\n",
5377 GET_PMU_OWNER() ? GET_PMU_OWNER()->pid : -1,
5378 PFM_GET_WORK_PENDING(task),
5379 ctx->ctx_fl_trap_reason,
5382 ovfl_ctrl.bits.mask_monitoring ? 1 : 0));
5384 * in case monitoring must be stopped, we toggle the psr bits
5386 if (ovfl_ctrl.bits.mask_monitoring) {
5387 pfm_mask_monitoring(task);
5388 ctx->ctx_state = PFM_CTX_MASKED;
5389 ctx->ctx_fl_can_restart = 1;
5393 * send notification now
5395 if (must_notify) pfm_ovfl_notify_user(ctx, ovfl_notify);
5400 printk(KERN_ERR "perfmon: CPU%d overflow handler [%d] pmc0=0x%lx\n",
5402 task ? task->pid : -1,
5408 * in SMP, zombie context is never restored but reclaimed in pfm_load_regs().
5409 * Moreover, zombies are also reclaimed in pfm_save_regs(). Therefore we can
5410 * come here as zombie only if the task is the current task. In which case, we
5411 * can access the PMU hardware directly.
5413 * Note that zombies do have PM_VALID set. So here we do the minimal.
5415 * In case the context was zombified it could not be reclaimed at the time
5416 * the monitoring program exited. At this point, the PMU reservation has been
5417 * returned, the sampiing buffer has been freed. We must convert this call
5418 * into a spurious interrupt. However, we must also avoid infinite overflows
5419 * by stopping monitoring for this task. We can only come here for a per-task
5420 * context. All we need to do is to stop monitoring using the psr bits which
5421 * are always task private. By re-enabling secure montioring, we ensure that
5422 * the monitored task will not be able to re-activate monitoring.
5423 * The task will eventually be context switched out, at which point the context
5424 * will be reclaimed (that includes releasing ownership of the PMU).
5426 * So there might be a window of time where the number of per-task session is zero
5427 * yet one PMU might have a owner and get at most one overflow interrupt for a zombie
5428 * context. This is safe because if a per-task session comes in, it will push this one
5429 * out and by the virtue on pfm_save_regs(), this one will disappear. If a system wide
5430 * session is force on that CPU, given that we use task pinning, pfm_save_regs() will
5431 * also push our zombie context out.
5433 * Overall pretty hairy stuff....
5435 DPRINT(("ctx is zombie for [%d], converted to spurious\n", task ? task->pid: -1));
5437 ia64_psr(regs)->up = 0;
5438 ia64_psr(regs)->sp = 1;
5443 pfm_do_interrupt_handler(int irq, void *arg, struct pt_regs *regs)
5445 struct task_struct *task;
5447 unsigned long flags;
5449 int this_cpu = smp_processor_id();
5452 pfm_stats[this_cpu].pfm_ovfl_intr_count++;
5455 * srlz.d done before arriving here
5457 pmc0 = ia64_get_pmc(0);
5459 task = GET_PMU_OWNER();
5460 ctx = GET_PMU_CTX();
5463 * if we have some pending bits set
5464 * assumes : if any PMC0.bit[63-1] is set, then PMC0.fr = 1
5466 if (PMC0_HAS_OVFL(pmc0) && task) {
5468 * we assume that pmc0.fr is always set here
5472 if (!ctx) goto report_spurious1;
5474 if (ctx->ctx_fl_system == 0 && (task->thread.flags & IA64_THREAD_PM_VALID) == 0)
5475 goto report_spurious2;
5477 PROTECT_CTX_NOPRINT(ctx, flags);
5479 pfm_overflow_handler(task, ctx, pmc0, regs);
5481 UNPROTECT_CTX_NOPRINT(ctx, flags);
5484 pfm_stats[this_cpu].pfm_spurious_ovfl_intr_count++;
5488 * keep it unfrozen at all times
5495 printk(KERN_INFO "perfmon: spurious overflow interrupt on CPU%d: process %d has no PFM context\n",
5496 this_cpu, task->pid);
5500 printk(KERN_INFO "perfmon: spurious overflow interrupt on CPU%d: process %d, invalid flag\n",
5508 pfm_interrupt_handler(int irq, void *arg, struct pt_regs *regs)
5510 unsigned long start_cycles, total_cycles;
5511 unsigned long min, max;
5515 this_cpu = get_cpu();
5516 min = pfm_stats[this_cpu].pfm_ovfl_intr_cycles_min;
5517 max = pfm_stats[this_cpu].pfm_ovfl_intr_cycles_max;
5519 start_cycles = ia64_get_itc();
5521 ret = pfm_do_interrupt_handler(irq, arg, regs);
5523 total_cycles = ia64_get_itc();
5526 * don't measure spurious interrupts
5528 if (likely(ret == 0)) {
5529 total_cycles -= start_cycles;
5531 if (total_cycles < min) pfm_stats[this_cpu].pfm_ovfl_intr_cycles_min = total_cycles;
5532 if (total_cycles > max) pfm_stats[this_cpu].pfm_ovfl_intr_cycles_max = total_cycles;
5534 pfm_stats[this_cpu].pfm_ovfl_intr_cycles += total_cycles;
5536 put_cpu_no_resched();
5541 /* for debug only */
5543 pfm_proc_info(char *page)
5546 struct list_head * pos;
5547 pfm_buffer_fmt_t * entry;
5548 unsigned long psr, flags;
5549 int online_cpus = 0;
5552 p += sprintf(p, "perfmon version : %u.%u\n", PFM_VERSION_MAJ, PFM_VERSION_MIN);
5553 p += sprintf(p, "model : %s\n", pmu_conf.pmu_name);
5554 p += sprintf(p, "fastctxsw : %s\n", pfm_sysctl.fastctxsw > 0 ? "Yes": "No");
5555 p += sprintf(p, "expert mode : %s\n", pfm_sysctl.expert_mode > 0 ? "Yes": "No");
5556 p += sprintf(p, "ovfl_mask : 0x%lx\n", pmu_conf.ovfl_val);
5558 for(i=0; i < NR_CPUS; i++) {
5559 if (cpu_online(i) == 0) continue;
5560 p += sprintf(p, "CPU%-2d overflow intrs : %lu\n", i, pfm_stats[i].pfm_ovfl_intr_count);
5561 p += sprintf(p, "CPU%-2d overflow cycles : %lu\n", i, pfm_stats[i].pfm_ovfl_intr_cycles);
5562 p += sprintf(p, "CPU%-2d overflow min : %lu\n", i, pfm_stats[i].pfm_ovfl_intr_cycles_min);
5563 p += sprintf(p, "CPU%-2d overflow max : %lu\n", i, pfm_stats[i].pfm_ovfl_intr_cycles_max);
5564 p += sprintf(p, "CPU%-2d smpl handler calls : %lu\n", i, pfm_stats[i].pfm_smpl_handler_calls);
5565 p += sprintf(p, "CPU%-2d smpl handler cycles : %lu\n", i, pfm_stats[i].pfm_smpl_handler_cycles);
5566 p += sprintf(p, "CPU%-2d spurious intrs : %lu\n", i, pfm_stats[i].pfm_spurious_ovfl_intr_count);
5567 p += sprintf(p, "CPU%-2d replay intrs : %lu\n", i, pfm_stats[i].pfm_replay_ovfl_intr_count);
5568 p += sprintf(p, "CPU%-2d syst_wide : %d\n" , i, pfm_get_cpu_data(pfm_syst_info, i) & PFM_CPUINFO_SYST_WIDE ? 1 : 0);
5569 p += sprintf(p, "CPU%-2d dcr_pp : %d\n" , i, pfm_get_cpu_data(pfm_syst_info, i) & PFM_CPUINFO_DCR_PP ? 1 : 0);
5570 p += sprintf(p, "CPU%-2d exclude idle : %d\n" , i, pfm_get_cpu_data(pfm_syst_info, i) & PFM_CPUINFO_EXCL_IDLE ? 1 : 0);
5571 p += sprintf(p, "CPU%-2d owner : %d\n" , i, pfm_get_cpu_data(pmu_owner, i) ? pfm_get_cpu_data(pmu_owner, i)->pid: -1);
5572 p += sprintf(p, "CPU%-2d context : %p\n" , i, pfm_get_cpu_data(pmu_ctx, i));
5573 p += sprintf(p, "CPU%-2d activations : %lu\n", i, pfm_get_cpu_data(pmu_activation_number,i));
5577 if (online_cpus == 1)
5579 psr = pfm_get_psr();
5581 p += sprintf(p, "CPU%-2d psr : 0x%lx\n", smp_processor_id(), psr);
5582 p += sprintf(p, "CPU%-2d pmc0 : 0x%lx\n", smp_processor_id(), ia64_get_pmc(0));
5583 for(i=4; i < 8; i++) {
5584 p += sprintf(p, "CPU%-2d pmc%u : 0x%lx\n", smp_processor_id(), i, ia64_get_pmc(i));
5585 p += sprintf(p, "CPU%-2d pmd%u : 0x%lx\n", smp_processor_id(), i, ia64_get_pmd(i));
5590 p += sprintf(p, "proc_sessions : %u\n"
5591 "sys_sessions : %u\n"
5592 "sys_use_dbregs : %u\n"
5593 "ptrace_use_dbregs : %u\n",
5594 pfm_sessions.pfs_task_sessions,
5595 pfm_sessions.pfs_sys_sessions,
5596 pfm_sessions.pfs_sys_use_dbregs,
5597 pfm_sessions.pfs_ptrace_use_dbregs);
5600 spin_lock(&pfm_buffer_fmt_lock);
5602 list_for_each(pos, &pfm_buffer_fmt_list) {
5603 entry = list_entry(pos, pfm_buffer_fmt_t, fmt_list);
5604 p += sprintf(p, "format : %02x-%02x-%02x-%02x-%02x-%02x-%02x-%02x-%02x-%02x-%02x-%02x-%02x-%02x-%02x-%02x %s\n",
5615 entry->fmt_uuid[10],
5616 entry->fmt_uuid[11],
5617 entry->fmt_uuid[12],
5618 entry->fmt_uuid[13],
5619 entry->fmt_uuid[14],
5620 entry->fmt_uuid[15],
5623 spin_unlock(&pfm_buffer_fmt_lock);
5628 /* /proc interface, for debug only */
5630 perfmon_read_entry(char *page, char **start, off_t off, int count, int *eof, void *data)
5632 int len = pfm_proc_info(page);
5634 if (len <= off+count) *eof = 1;
5636 *start = page + off;
5639 if (len>count) len = count;
5646 * we come here as soon as local_cpu_data->pfm_syst_wide is set. this happens
5647 * during pfm_enable() hence before pfm_start(). We cannot assume monitoring
5648 * is active or inactive based on mode. We must rely on the value in
5649 * local_cpu_data->pfm_syst_info
5652 pfm_syst_wide_update_task(struct task_struct *task, unsigned long info, int is_ctxswin)
5654 struct pt_regs *regs;
5656 unsigned long dcr_pp;
5658 dcr_pp = info & PFM_CPUINFO_DCR_PP ? 1 : 0;
5661 * pid 0 is guaranteed to be the idle task. There is one such task with pid 0
5662 * on every CPU, so we can rely on the pid to identify the idle task.
5664 if ((info & PFM_CPUINFO_EXCL_IDLE) == 0 || task->pid) {
5665 regs = ia64_task_regs(task);
5666 ia64_psr(regs)->pp = is_ctxswin ? dcr_pp : 0;
5670 * if monitoring has started
5673 dcr = ia64_getreg(_IA64_REG_CR_DCR);
5675 * context switching in?
5678 /* mask monitoring for the idle task */
5679 ia64_setreg(_IA64_REG_CR_DCR, dcr & ~IA64_DCR_PP);
5685 * context switching out
5686 * restore monitoring for next task
5688 * Due to inlining this odd if-then-else construction generates
5691 ia64_setreg(_IA64_REG_CR_DCR, dcr |IA64_DCR_PP);
5699 * in 2.6, interrupts are masked when we come here and the runqueue lock is held
5702 pfm_save_regs(struct task_struct *task)
5705 struct thread_struct *t;
5706 unsigned long flags;
5710 ctx = PFM_GET_CTX(task);
5711 if (ctx == NULL) return;
5715 * we always come here with interrupts ALREADY disabled by
5716 * the scheduler. So we simply need to protect against concurrent
5717 * access, not CPU concurrency.
5719 flags = pfm_protect_ctx_ctxsw(ctx);
5721 if (ctx->ctx_state == PFM_CTX_ZOMBIE) {
5722 struct pt_regs *regs = ia64_task_regs(task);
5726 pfm_force_cleanup(ctx, regs);
5728 BUG_ON(ctx->ctx_smpl_hdr);
5730 pfm_unprotect_ctx_ctxsw(ctx, flags);
5732 pfm_context_free(ctx);
5739 if (ctx->ctx_last_activation != GET_ACTIVATION()) {
5740 pfm_unprotect_ctx_ctxsw(ctx, flags);
5745 * save current PSR: needed because we modify it
5748 psr = pfm_get_psr();
5750 BUG_ON(psr & (IA64_PSR_I));
5754 * This is the last instruction which may generate an overflow
5756 * We do not need to set psr.sp because, it is irrelevant in kernel.
5757 * It will be restored from ipsr when going back to user level
5762 * keep a copy of psr.up (for reload)
5764 ctx->ctx_saved_psr_up = psr & IA64_PSR_UP;
5767 * release ownership of this PMU.
5768 * PM interrupts are masked, so nothing
5771 SET_PMU_OWNER(NULL, NULL);
5774 * we systematically save the PMD as we have no
5775 * guarantee we will be schedule at that same
5778 pfm_save_pmds(t->pmds, ctx->ctx_used_pmds[0]);
5781 * save pmc0 ia64_srlz_d() done in pfm_save_pmds()
5782 * we will need it on the restore path to check
5783 * for pending overflow.
5785 t->pmcs[0] = ia64_get_pmc(0);
5788 * unfreeze PMU if had pending overflows
5790 if (t->pmcs[0] & ~0x1UL) pfm_unfreeze_pmu();
5793 * finally, allow context access.
5794 * interrupts will still be masked after this call.
5796 pfm_unprotect_ctx_ctxsw(ctx, flags);
5799 #else /* !CONFIG_SMP */
5801 pfm_save_regs(struct task_struct *task)
5806 ctx = PFM_GET_CTX(task);
5807 if (ctx == NULL) return;
5810 * save current PSR: needed because we modify it
5812 psr = pfm_get_psr();
5814 BUG_ON(psr & (IA64_PSR_I));
5818 * This is the last instruction which may generate an overflow
5820 * We do not need to set psr.sp because, it is irrelevant in kernel.
5821 * It will be restored from ipsr when going back to user level
5826 * keep a copy of psr.up (for reload)
5828 ctx->ctx_saved_psr_up = psr & IA64_PSR_UP;
5832 pfm_lazy_save_regs (struct task_struct *task)
5835 struct thread_struct *t;
5836 unsigned long flags;
5838 { u64 psr = pfm_get_psr();
5839 BUG_ON(psr & IA64_PSR_UP);
5842 ctx = PFM_GET_CTX(task);
5846 * we need to mask PMU overflow here to
5847 * make sure that we maintain pmc0 until
5848 * we save it. overflow interrupts are
5849 * treated as spurious if there is no
5852 * XXX: I don't think this is necessary
5854 PROTECT_CTX(ctx,flags);
5857 * release ownership of this PMU.
5858 * must be done before we save the registers.
5860 * after this call any PMU interrupt is treated
5863 SET_PMU_OWNER(NULL, NULL);
5866 * save all the pmds we use
5868 pfm_save_pmds(t->pmds, ctx->ctx_used_pmds[0]);
5871 * save pmc0 ia64_srlz_d() done in pfm_save_pmds()
5872 * it is needed to check for pended overflow
5873 * on the restore path
5875 t->pmcs[0] = ia64_get_pmc(0);
5878 * unfreeze PMU if had pending overflows
5880 if (t->pmcs[0] & ~0x1UL) pfm_unfreeze_pmu();
5883 * now get can unmask PMU interrupts, they will
5884 * be treated as purely spurious and we will not
5885 * lose any information
5887 UNPROTECT_CTX(ctx,flags);
5889 #endif /* CONFIG_SMP */
5893 * in 2.6, interrupts are masked when we come here and the runqueue lock is held
5896 pfm_load_regs (struct task_struct *task)
5899 struct thread_struct *t;
5900 unsigned long pmc_mask = 0UL, pmd_mask = 0UL;
5901 unsigned long flags;
5904 ctx = PFM_GET_CTX(task);
5905 if (unlikely(ctx == NULL)) return;
5907 BUG_ON(GET_PMU_OWNER());
5911 * possible on unload
5913 if (unlikely((t->flags & IA64_THREAD_PM_VALID) == 0)) return;
5916 * we always come here with interrupts ALREADY disabled by
5917 * the scheduler. So we simply need to protect against concurrent
5918 * access, not CPU concurrency.
5920 flags = pfm_protect_ctx_ctxsw(ctx);
5921 psr = pfm_get_psr();
5923 BUG_ON(psr & (IA64_PSR_UP|IA64_PSR_PP));
5924 BUG_ON(psr & IA64_PSR_I);
5926 if (unlikely(ctx->ctx_state == PFM_CTX_ZOMBIE)) {
5927 struct pt_regs *regs = ia64_task_regs(task);
5929 BUG_ON(ctx->ctx_smpl_hdr);
5931 pfm_force_cleanup(ctx, regs);
5933 pfm_unprotect_ctx_ctxsw(ctx, flags);
5936 * this one (kmalloc'ed) is fine with interrupts disabled
5938 pfm_context_free(ctx);
5944 * we restore ALL the debug registers to avoid picking up
5947 if (ctx->ctx_fl_using_dbreg) {
5948 pfm_restore_ibrs(ctx->ctx_ibrs, pmu_conf.num_ibrs);
5949 pfm_restore_dbrs(ctx->ctx_dbrs, pmu_conf.num_dbrs);
5952 * retrieve saved psr.up
5954 psr_up = ctx->ctx_saved_psr_up;
5957 * if we were the last user of the PMU on that CPU,
5958 * then nothing to do except restore psr
5960 if (GET_LAST_CPU(ctx) == smp_processor_id() && ctx->ctx_last_activation == GET_ACTIVATION()) {
5963 * retrieve partial reload masks (due to user modifications)
5965 pmc_mask = ctx->ctx_reload_pmcs[0];
5966 pmd_mask = ctx->ctx_reload_pmds[0];
5970 * To avoid leaking information to the user level when psr.sp=0,
5971 * we must reload ALL implemented pmds (even the ones we don't use).
5972 * In the kernel we only allow PFM_READ_PMDS on registers which
5973 * we initialized or requested (sampling) so there is no risk there.
5975 pmd_mask = pfm_sysctl.fastctxsw ? ctx->ctx_used_pmds[0] : ctx->ctx_all_pmds[0];
5978 * ALL accessible PMCs are systematically reloaded, unused registers
5979 * get their default (from pfm_reset_pmu_state()) values to avoid picking
5980 * up stale configuration.
5982 * PMC0 is never in the mask. It is always restored separately.
5984 pmc_mask = ctx->ctx_all_pmcs[0];
5987 * when context is MASKED, we will restore PMC with plm=0
5988 * and PMD with stale information, but that's ok, nothing
5991 * XXX: optimize here
5993 if (pmd_mask) pfm_restore_pmds(t->pmds, pmd_mask);
5994 if (pmc_mask) pfm_restore_pmcs(t->pmcs, pmc_mask);
5997 * check for pending overflow at the time the state
6000 if (unlikely(PMC0_HAS_OVFL(t->pmcs[0]))) {
6002 * reload pmc0 with the overflow information
6003 * On McKinley PMU, this will trigger a PMU interrupt
6005 ia64_set_pmc(0, t->pmcs[0]);
6008 #ifndef CONFIG_MCKINLEY
6010 * will replay the PMU interrupt
6012 hw_resend_irq(NULL, IA64_PERFMON_VECTOR);
6014 pfm_stats[smp_processor_id()].pfm_replay_ovfl_intr_count++;
6018 * we just did a reload, so we reset the partial reload fields
6020 ctx->ctx_reload_pmcs[0] = 0UL;
6021 ctx->ctx_reload_pmds[0] = 0UL;
6023 SET_LAST_CPU(ctx, smp_processor_id());
6026 * dump activation value for this PMU
6030 * record current activation for this context
6032 SET_ACTIVATION(ctx);
6035 * establish new ownership.
6037 SET_PMU_OWNER(task, ctx);
6040 * restore the psr.up bit. measurement
6042 * no PMU interrupt can happen at this point
6043 * because we still have interrupts disabled.
6045 if (likely(psr_up)) pfm_set_psr_up();
6048 * allow concurrent access to context
6050 pfm_unprotect_ctx_ctxsw(ctx, flags);
6052 #else /* !CONFIG_SMP */
6054 * reload PMU state for UP kernels
6055 * in 2.5 we come here with interrupts disabled
6058 pfm_load_regs (struct task_struct *task)
6060 struct thread_struct *t;
6062 struct task_struct *owner;
6063 unsigned long pmd_mask, pmc_mask;
6066 owner = GET_PMU_OWNER();
6067 ctx = PFM_GET_CTX(task);
6069 psr = pfm_get_psr();
6071 BUG_ON(psr & (IA64_PSR_UP|IA64_PSR_PP));
6072 BUG_ON(psr & IA64_PSR_I);
6075 * we restore ALL the debug registers to avoid picking up
6078 * This must be done even when the task is still the owner
6079 * as the registers may have been modified via ptrace()
6080 * (not perfmon) by the previous task.
6082 if (ctx->ctx_fl_using_dbreg) {
6083 pfm_restore_ibrs(ctx->ctx_ibrs, pmu_conf.num_ibrs);
6084 pfm_restore_dbrs(ctx->ctx_dbrs, pmu_conf.num_dbrs);
6088 * retrieved saved psr.up
6090 psr_up = ctx->ctx_saved_psr_up;
6093 * short path, our state is still there, just
6094 * need to restore psr and we go
6096 * we do not touch either PMC nor PMD. the psr is not touched
6097 * by the overflow_handler. So we are safe w.r.t. to interrupt
6098 * concurrency even without interrupt masking.
6100 if (likely(owner == task)) {
6101 if (likely(psr_up)) pfm_set_psr_up();
6106 * someone else is still using the PMU, first push it out and
6107 * then we'll be able to install our stuff !
6109 * Upon return, there will be no owner for the current PMU
6111 if (owner) pfm_lazy_save_regs(owner);
6114 * To avoid leaking information to the user level when psr.sp=0,
6115 * we must reload ALL implemented pmds (even the ones we don't use).
6116 * In the kernel we only allow PFM_READ_PMDS on registers which
6117 * we initialized or requested (sampling) so there is no risk there.
6119 pmd_mask = pfm_sysctl.fastctxsw ? ctx->ctx_used_pmds[0] : ctx->ctx_all_pmds[0];
6122 * ALL accessible PMCs are systematically reloaded, unused registers
6123 * get their default (from pfm_reset_pmu_state()) values to avoid picking
6124 * up stale configuration.
6126 * PMC0 is never in the mask. It is always restored separately
6128 pmc_mask = ctx->ctx_all_pmcs[0];
6130 pfm_restore_pmds(t->pmds, pmd_mask);
6131 pfm_restore_pmcs(t->pmcs, pmc_mask);
6134 * check for pending overflow at the time the state
6137 if (unlikely(PMC0_HAS_OVFL(t->pmcs[0]))) {
6139 * reload pmc0 with the overflow information
6140 * On McKinley PMU, this will trigger a PMU interrupt
6142 ia64_set_pmc(0, t->pmcs[0]);
6147 #ifndef CONFIG_MCKINLEY
6149 * will replay the PMU interrupt
6151 hw_resend_irq(NULL, IA64_PERFMON_VECTOR);
6153 pfm_stats[smp_processor_id()].pfm_replay_ovfl_intr_count++;
6157 * establish new ownership.
6159 SET_PMU_OWNER(task, ctx);
6162 * restore the psr.up bit. measurement
6164 * no PMU interrupt can happen at this point
6165 * because we still have interrupts disabled.
6167 if (likely(psr_up)) pfm_set_psr_up();
6169 #endif /* CONFIG_SMP */
6172 * this function assumes monitoring is stopped
6175 pfm_flush_pmds(struct task_struct *task, pfm_context_t *ctx)
6178 unsigned long mask2, val, pmd_val, ovfl_val;
6179 int i, can_access_pmu = 0;
6183 * is the caller the task being monitored (or which initiated the
6184 * session for system wide measurements)
6186 is_self = ctx->ctx_task == task ? 1 : 0;
6189 if (task == current) {
6192 * in UP, the state can still be in the registers
6194 if (task == current || GET_PMU_OWNER() == task) {
6198 * Mark the PMU as not owned
6199 * This will cause the interrupt handler to do nothing in case an overflow
6200 * interrupt was in-flight
6201 * This also guarantees that pmc0 will contain the final state
6202 * It virtually gives us full control on overflow processing from that point
6205 SET_PMU_OWNER(NULL, NULL);
6208 * read current overflow status:
6210 * we are guaranteed to read the final stable state
6213 pmc0 = ia64_get_pmc(0); /* slow */
6216 * reset freeze bit, overflow status information destroyed
6220 pmc0 = task->thread.pmcs[0];
6222 * clear whatever overflow status bits there were
6224 task->thread.pmcs[0] = 0;
6226 ovfl_val = pmu_conf.ovfl_val;
6228 * we save all the used pmds
6229 * we take care of overflows for counting PMDs
6231 * XXX: sampling situation is not taken into account here
6233 mask2 = ctx->ctx_used_pmds[0];
6234 for (i = 0; mask2; i++, mask2>>=1) {
6236 /* skip non used pmds */
6237 if ((mask2 & 0x1) == 0) continue;
6240 * can access PMU always true in system wide mode
6242 val = pmd_val = can_access_pmu ? ia64_get_pmd(i) : task->thread.pmds[i];
6244 if (PMD_IS_COUNTING(i)) {
6245 DPRINT(("[%d] pmd[%d] ctx_pmd=0x%lx hw_pmd=0x%lx\n",
6248 ctx->ctx_pmds[i].val,
6252 * we rebuild the full 64 bit value of the counter
6254 val = ctx->ctx_pmds[i].val + (val & ovfl_val);
6257 * now everything is in ctx_pmds[] and we need
6258 * to clear the saved context from save_regs() such that
6259 * pfm_read_pmds() gets the correct value
6264 * take care of overflow inline
6266 if (pmc0 & (1UL << i)) {
6267 val += 1 + ovfl_val;
6268 DPRINT(("[%d] pmd[%d] overflowed\n", task->pid, i));
6272 DPRINT(("[%d] is_self=%d ctx_pmd[%d]=0x%lx pmd_val=0x%lx\n", task->pid, is_self, i, val, pmd_val));
6274 if (is_self) task->thread.pmds[i] = pmd_val;
6276 ctx->ctx_pmds[i].val = val;
6280 static struct irqaction perfmon_irqaction = {
6281 .handler = pfm_interrupt_handler,
6282 .flags = SA_INTERRUPT,
6287 * perfmon initialization routine, called from the initcall() table
6289 static int init_pfm_fs(void);
6294 unsigned int n, n_counters, i;
6296 printk("perfmon: version %u.%u IRQ %u\n",
6299 IA64_PERFMON_VECTOR);
6302 * PMU type sanity check
6303 * XXX: maybe better to implement autodetection (but then we have a larger kernel)
6305 if (local_cpu_data->family != pmu_conf.pmu_family) {
6306 printk(KERN_INFO "perfmon: disabled, kernel only supports %s PMU family\n", pmu_conf.pmu_name);
6311 * compute the number of implemented PMD/PMC from the
6312 * description tables
6315 for (i=0; PMC_IS_LAST(i) == 0; i++) {
6316 if (PMC_IS_IMPL(i) == 0) continue;
6317 pmu_conf.impl_pmcs[i>>6] |= 1UL << (i&63);
6320 pmu_conf.num_pmcs = n;
6322 n = 0; n_counters = 0;
6323 for (i=0; PMD_IS_LAST(i) == 0; i++) {
6324 if (PMD_IS_IMPL(i) == 0) continue;
6325 pmu_conf.impl_pmds[i>>6] |= 1UL << (i&63);
6327 if (PMD_IS_COUNTING(i)) n_counters++;
6329 pmu_conf.num_pmds = n;
6330 pmu_conf.num_counters = n_counters;
6333 * sanity checks on the number of debug registers
6335 if (pmu_conf.use_rr_dbregs) {
6336 if (pmu_conf.num_ibrs > IA64_NUM_DBG_REGS) {
6337 printk(KERN_INFO "perfmon: unsupported number of code debug registers (%u)\n", pmu_conf.num_ibrs);
6340 if (pmu_conf.num_dbrs > IA64_NUM_DBG_REGS) {
6341 printk(KERN_INFO "perfmon: unsupported number of data debug registers (%u)\n", pmu_conf.num_ibrs);
6346 printk("perfmon: %s PMU detected, %u PMCs, %u PMDs, %u counters (%lu bits)\n",
6350 pmu_conf.num_counters,
6351 ffz(pmu_conf.ovfl_val));
6354 if (pmu_conf.num_pmds >= IA64_NUM_PMD_REGS || pmu_conf.num_pmcs >= IA64_NUM_PMC_REGS) {
6355 printk(KERN_ERR "perfmon: not enough pmc/pmd, perfmon disabled\n");
6360 * create /proc/perfmon (mostly for debugging purposes)
6362 perfmon_dir = create_proc_read_entry ("perfmon", 0, 0, perfmon_read_entry, NULL);
6363 if (perfmon_dir == NULL) {
6364 printk(KERN_ERR "perfmon: cannot create /proc entry, perfmon disabled\n");
6369 * create /proc/sys/kernel/perfmon (for debugging purposes)
6371 pfm_sysctl_header = register_sysctl_table(pfm_sysctl_root, 0);
6374 * initialize all our spinlocks
6376 spin_lock_init(&pfm_sessions.pfs_lock);
6377 spin_lock_init(&pfm_buffer_fmt_lock);
6381 for(i=0; i < NR_CPUS; i++) pfm_stats[i].pfm_ovfl_intr_cycles_min = ~0UL;
6383 /* we are all set */
6384 pmu_conf.enabled = 1;
6389 __initcall(pfm_init);
6392 * this function is called before pfm_init()
6395 pfm_init_percpu (void)
6400 * make sure no measurement is active
6401 * (may inherit programmed PMCs from EFI).
6407 * we run with the PMU not frozen at all times
6411 if (smp_processor_id() == 0)
6412 register_percpu_irq(IA64_PERFMON_VECTOR, &perfmon_irqaction);
6414 ia64_setreg(_IA64_REG_CR_PMV, IA64_PERFMON_VECTOR);
6418 * we first initialize the PMU to a stable state.
6419 * the values may have been changed from their power-up
6420 * values by software executed before the kernel took over.
6422 * At this point, pmu_conf has not yet been initialized
6424 * On McKinley, this code is ineffective until PMC4 is initialized
6425 * but that's all right because we take care of pmc0 later.
6427 * XXX: potential problems with pmc1.
6429 for (i=1; PMC_IS_LAST(i) == 0; i++) {
6430 if (PMC_IS_IMPL(i) == 0) continue;
6431 ia64_set_pmc(i, PMC_DFL_VAL(i));
6434 for (i=0; PMD_IS_LAST(i) == 0; i++) {
6435 if (PMD_IS_IMPL(i) == 0) continue;
6436 ia64_set_pmd(i, 0UL);
6441 * used for debug purposes only
6444 dump_pmu_state(const char *from)
6446 struct task_struct *task;
6447 struct thread_struct *t;
6448 struct pt_regs *regs;
6450 unsigned long psr, dcr, info, flags;
6453 local_irq_save(flags);
6455 this_cpu = smp_processor_id();
6456 regs = ia64_task_regs(current);
6457 info = PFM_CPUINFO_GET();
6458 dcr = ia64_getreg(_IA64_REG_CR_DCR);
6460 if (info == 0 && ia64_psr(regs)->pp == 0 && (dcr & IA64_DCR_PP) == 0) {
6461 local_irq_restore(flags);
6465 printk("CPU%d from %s() current [%d] iip=0x%lx %s\n",
6472 task = GET_PMU_OWNER();
6473 ctx = GET_PMU_CTX();
6475 printk("->CPU%d owner [%d] ctx=%p\n", this_cpu, task ? task->pid : -1, ctx);
6477 psr = pfm_get_psr();
6479 printk("->CPU%d pmc0=0x%lx psr.pp=%d psr.up=%d dcr.pp=%d syst_info=0x%lx user_psr.up=%d user_psr.pp=%d\n",
6482 psr & IA64_PSR_PP ? 1 : 0,
6483 psr & IA64_PSR_UP ? 1 : 0,
6484 dcr & IA64_DCR_PP ? 1 : 0,
6487 ia64_psr(regs)->pp);
6489 ia64_psr(regs)->up = 0;
6490 ia64_psr(regs)->pp = 0;
6492 t = ¤t->thread;
6494 for (i=1; PMC_IS_LAST(i) == 0; i++) {
6495 if (PMC_IS_IMPL(i) == 0) continue;
6496 printk("->CPU%d pmc[%d]=0x%lx thread_pmc[%d]=0x%lx\n", this_cpu, i, ia64_get_pmc(i), i, t->pmcs[i]);
6499 for (i=1; PMD_IS_LAST(i) == 0; i++) {
6500 if (PMD_IS_IMPL(i) == 0) continue;
6501 printk("->CPU%d pmd[%d]=0x%lx thread_pmd[%d]=0x%lx\n", this_cpu, i, ia64_get_pmd(i), i, t->pmds[i]);
6505 printk("->CPU%d ctx_state=%d vaddr=%p addr=%p fd=%d ctx_task=[%d] saved_psr_up=0x%lx\n",
6508 ctx->ctx_smpl_vaddr,
6512 ctx->ctx_saved_psr_up);
6514 local_irq_restore(flags);
6518 * called from process.c:copy_thread(). task is new child.
6521 pfm_inherit(struct task_struct *task, struct pt_regs *regs)
6523 struct thread_struct *thread;
6525 DPRINT(("perfmon: pfm_inherit clearing state for [%d]\n", task->pid));
6527 thread = &task->thread;
6530 * cut links inherited from parent (current)
6532 thread->pfm_context = NULL;
6534 PFM_SET_WORK_PENDING(task, 0);
6537 * the psr bits are already set properly in copy_threads()
6540 #else /* !CONFIG_PERFMON */
6542 sys_perfmonctl (int fd, int cmd, void *arg, int count, long arg5, long arg6, long arg7,
6543 long arg8, long stack)
6547 #endif /* CONFIG_PERFMON */