2 * Copyright (c) 2000-2003 Silicon Graphics, Inc. All Rights Reserved.
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of version 2 of the GNU General Public License as
6 * published by the Free Software Foundation.
8 * This program is distributed in the hope that it would be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
12 * Further, this software is distributed without any warranty that it is
13 * free of the rightful claim of any third person regarding infringement
14 * or the like. Any license provided herein, whether implied or
15 * otherwise, applies only to this software file. Patent licenses, if
16 * any, provided herein do not apply to combinations of this program with
17 * other software, or any other product whatsoever.
19 * You should have received a copy of the GNU General Public License along
20 * with this program; if not, write the Free Software Foundation, Inc., 59
21 * Temple Place - Suite 330, Boston MA 02111-1307, USA.
23 * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy,
24 * Mountain View, CA 94043, or:
28 * For further information regarding this notice, see:
30 * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/
33 #include <linux/types.h>
34 #include <linux/slab.h>
36 #include <xfs_types.h>
42 static kmem_zone_t *ktrace_hdr_zone;
43 static kmem_zone_t *ktrace_ent_zone;
44 static int ktrace_zentries;
47 ktrace_init(int zentries)
49 ktrace_zentries = zentries;
51 ktrace_hdr_zone = kmem_zone_init(sizeof(ktrace_t),
53 ASSERT(ktrace_hdr_zone);
55 ktrace_ent_zone = kmem_zone_init(ktrace_zentries
56 * sizeof(ktrace_entry_t),
58 ASSERT(ktrace_ent_zone);
64 kmem_cache_destroy(ktrace_hdr_zone);
65 kmem_cache_destroy(ktrace_ent_zone);
71 * Allocate a ktrace header and enough buffering for the given
75 ktrace_alloc(int nentries, int sleep)
80 ktp = (ktrace_t*)kmem_zone_alloc(ktrace_hdr_zone, sleep);
82 if (ktp == (ktrace_t*)NULL) {
84 * KM_SLEEP callers don't expect failure.
87 panic("ktrace_alloc: NULL memory on KM_SLEEP request!");
93 * Special treatment for buffers with the ktrace_zentries entries
95 if (nentries == ktrace_zentries) {
96 ktep = (ktrace_entry_t*)kmem_zone_zalloc(ktrace_ent_zone,
99 ktep = (ktrace_entry_t*)kmem_zalloc((nentries * sizeof(*ktep)),
105 * KM_SLEEP callers don't expect failure.
107 if (sleep & KM_SLEEP)
108 panic("ktrace_alloc: NULL memory on KM_SLEEP request!");
110 kmem_free(ktp, sizeof(*ktp));
115 spinlock_init(&(ktp->kt_lock), "kt_lock");
117 ktp->kt_entries = ktep;
118 ktp->kt_nentries = nentries;
120 ktp->kt_rollover = 0;
128 * Free up the ktrace header and buffer. It is up to the caller
129 * to ensure that no-one is referencing it.
132 ktrace_free(ktrace_t *ktp)
136 if (ktp == (ktrace_t *)NULL)
139 spinlock_destroy(&ktp->kt_lock);
142 * Special treatment for the Vnode trace buffer.
144 if (ktp->kt_nentries == ktrace_zentries) {
145 kmem_zone_free(ktrace_ent_zone, ktp->kt_entries);
147 entries_size = (int)(ktp->kt_nentries * sizeof(ktrace_entry_t));
149 kmem_free(ktp->kt_entries, entries_size);
152 kmem_zone_free(ktrace_hdr_zone, ktp);
157 * Enter the given values into the "next" entry in the trace buffer.
158 * kt_index is always the index of the next entry to be filled.
180 static lock_t wrap_lock = SPIN_LOCK_UNLOCKED;
183 ktrace_entry_t *ktep;
188 * Grab an entry by pushing the index up to the next one.
190 spin_lock_irqsave(&wrap_lock, flags);
191 index = ktp->kt_index;
192 if (++ktp->kt_index == ktp->kt_nentries)
194 spin_unlock_irqrestore(&wrap_lock, flags);
196 if (!ktp->kt_rollover && index == ktp->kt_nentries - 1)
197 ktp->kt_rollover = 1;
199 ASSERT((index >= 0) && (index < ktp->kt_nentries));
201 ktep = &(ktp->kt_entries[index]);
213 ktep->val[10] = val10;
214 ktep->val[11] = val11;
215 ktep->val[12] = val12;
216 ktep->val[13] = val13;
217 ktep->val[14] = val14;
218 ktep->val[15] = val15;
222 * Return the number of entries in the trace buffer.
232 return (ktp->kt_rollover ? ktp->kt_nentries : ktp->kt_index);
238 * This is used to find the start of the trace buffer.
239 * In conjunction with ktrace_next() it can be used to
240 * iterate through the entire trace buffer. This code does
241 * not do any locking because it is assumed that it is called
244 * The caller must pass in a pointer to a ktrace_snap
245 * structure in which we will keep some state used to
246 * iterate through the buffer. This state must not touched
247 * by any code outside of this module.
250 ktrace_first(ktrace_t *ktp, ktrace_snap_t *ktsp)
252 ktrace_entry_t *ktep;
256 if (ktp->kt_rollover)
257 index = ktp->kt_index;
261 ktsp->ks_start = index;
262 ktep = &(ktp->kt_entries[index]);
264 nentries = ktrace_nentries(ktp);
266 if (index < nentries) {
267 ktsp->ks_index = index;
270 if (index > nentries)
279 * This is used to iterate through the entries of the given
280 * trace buffer. The caller must pass in the ktrace_snap_t
281 * structure initialized by ktrace_first(). The return value
282 * will be either a pointer to the next ktrace_entry or NULL
283 * if all of the entries have been traversed.
291 ktrace_entry_t *ktep;
293 index = ktsp->ks_index;
294 if (index == ktsp->ks_start) {
297 ktep = &ktp->kt_entries[index];
301 if (index == ktrace_nentries(ktp)) {
304 ktsp->ks_index = index;
313 * Skip the next "count" entries and return the entry after that.
314 * Return NULL if this causes us to iterate past the beginning again.
324 ktrace_entry_t *ktep;
325 int nentries = ktrace_nentries(ktp);
327 index = ktsp->ks_index;
328 new_index = index + count;
329 while (new_index >= nentries) {
330 new_index -= nentries;
332 if (index == ktsp->ks_start) {
334 * We've iterated around to the start, so we're done.
337 } else if ((new_index < index) && (index < ktsp->ks_index)) {
339 * We've skipped past the start again, so we're done.
342 ktsp->ks_index = ktsp->ks_start;
344 ktep = &(ktp->kt_entries[new_index]);
346 if (new_index == nentries) {
349 ktsp->ks_index = new_index;