ftp://ftp.kernel.org/pub/linux/kernel/v2.6/linux-2.6.6.tar.bz2
[linux-2.6.git] / drivers / s390 / cio / cmf.c
1 /*
2  * linux/drivers/s390/cio/cmf.c ($Revision: 1.13 $)
3  *
4  * Linux on zSeries Channel Measurement Facility support
5  *
6  * Copyright 2000,2003 IBM Corporation
7  *
8  * Author: Arnd Bergmann <arndb@de.ibm.com>
9  *
10  * original idea from Natarajan Krishnaswami <nkrishna@us.ibm.com>
11  *
12  * This program is free software; you can redistribute it and/or modify
13  * it under the terms of the GNU General Public License as published by
14  * the Free Software Foundation; either version 2, or (at your option)
15  * any later version.
16  *
17  * This program is distributed in the hope that it will be useful,
18  * but WITHOUT ANY WARRANTY; without even the implied warranty of
19  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
20  * GNU General Public License for more details.
21  *
22  * You should have received a copy of the GNU General Public License
23  * along with this program; if not, write to the Free Software
24  * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
25  */
26
27 #include <linux/bootmem.h>
28 #include <linux/device.h>
29 #include <linux/init.h>
30 #include <linux/list.h>
31 #include <linux/module.h>
32 #include <linux/moduleparam.h>
33
34 #include <asm/ccwdev.h>
35 #include <asm/cio.h>
36 #include <asm/cmb.h>
37
38 #include "cio.h"
39 #include "css.h"
40 #include "device.h"
41 #include "ioasm.h"
42
43 /* parameter to enable cmf during boot, possible uses are:
44  *  "s390cmf" -- enable cmf and allocate 2 MB of ram so measuring can be
45  *               used on any subchannel
46  *  "s390cmf=<num>" -- enable cmf and allocate enough memory to measure
47  *                     <num> subchannel, where <num> is an integer
48  *                     between 1 and 65535, default is 1024
49  */
50 #define ARGSTRING "s390cmf"
51
52 /* indices for READCMB */
53 enum cmb_index {
54  /* basic and exended format: */
55         cmb_ssch_rsch_count,
56         cmb_sample_count,
57         cmb_device_connect_time,
58         cmb_function_pending_time,
59         cmb_device_disconnect_time,
60         cmb_control_unit_queuing_time,
61         cmb_device_active_only_time,
62  /* extended format only: */
63         cmb_device_busy_time,
64         cmb_initial_command_response_time,
65 };
66
67 /**
68  * enum cmb_format - types of supported measurement block formats
69  *
70  * @CMF_BASIC:      traditional channel measurement blocks supported
71  *                  by all machines that we run on
72  * @CMF_EXTENDED:   improved format that was introduced with the z990
73  *                  machine
74  * @CMF_AUTODETECT: default: use extended format when running on a z990
75  *                  or later machine, otherwise fall back to basic format
76  **/
77 enum cmb_format {
78         CMF_BASIC,
79         CMF_EXTENDED,
80         CMF_AUTODETECT = -1,
81 };
82 /**
83  * format - actual format for all measurement blocks
84  *
85  * The format module parameter can be set to a value of 0 (zero)
86  * or 1, indicating basic or extended format as described for
87  * enum cmb_format.
88  */
89 static int format = CMF_AUTODETECT;
90 module_param(format, bool, 0444);
91
92 /**
93  * struct cmb_operations - functions to use depending on cmb_format
94  *
95  * all these functions operate on a struct cmf_device. There is only
96  * one instance of struct cmb_operations because all cmf_device
97  * objects are guaranteed to be of the same type.
98  *
99  * @alloc:      allocate memory for a channel measurement block,
100  *              either with the help of a special pool or with kmalloc
101  * @free:       free memory allocated with @alloc
102  * @set:        enable or disable measurement
103  * @readall:    read a measurement block in a common format
104  * @reset:      clear the data in the associated measurement block and
105  *              reset its time stamp
106  */
107 struct cmb_operations {
108         int (*alloc)  (struct ccw_device*);
109         void(*free)   (struct ccw_device*);
110         int (*set)    (struct ccw_device*, u32);
111         u64 (*read)   (struct ccw_device*, int);
112         int (*readall)(struct ccw_device*, struct cmbdata *);
113         void (*reset) (struct ccw_device*);
114
115         struct attribute_group *attr_group;
116 };
117 static struct cmb_operations *cmbops;
118
119 /* our user interface is designed in terms of nanoseconds,
120  * while the hardware measures total times in its own
121  * unit.*/
122 static inline u64 time_to_nsec(u32 value)
123 {
124         return ((u64)value) * 128000ull;
125 }
126
127 /*
128  * Users are usually interested in average times,
129  * not accumulated time.
130  * This also helps us with atomicity problems
131  * when reading sinlge values.
132  */
133 static inline u64 time_to_avg_nsec(u32 value, u32 count)
134 {
135         u64 ret;
136
137         /* no samples yet, avoid division by 0 */
138         if (count == 0)
139                 return 0;
140
141         /* value comes in units of 128 µsec */
142         ret = time_to_nsec(value);
143         do_div(ret, count);
144
145         return ret;
146 }
147
148 /* activate or deactivate the channel monitor. When area is NULL,
149  * the monitor is deactivated. The channel monitor needs to
150  * be active in order to measure subchannels, which also need
151  * to be enabled. */
152 static inline void
153 cmf_activate(void *area, unsigned int onoff)
154 {
155         register void * __gpr2 asm("2");
156         register long __gpr1 asm("1");
157
158         __gpr2 = area;
159         __gpr1 = onoff ? 2 : 0;
160         /* activate channel measurement */
161         asm("schm" : : "d" (__gpr2), "d" (__gpr1) );
162 }
163
164 static int
165 set_schib(struct ccw_device *cdev, u32 mme, int mbfc, unsigned long address)
166 {
167         int ret;
168         int retry;
169         struct subchannel *sch;
170         struct schib *schib;
171
172         sch = to_subchannel(cdev->dev.parent);
173         schib = &sch->schib;
174         /* msch can silently fail, so do it again if necessary */
175         for (retry = 0; retry < 3; retry++) {
176                 /* prepare schib */
177                 stsch(sch->irq, schib);
178                 schib->pmcw.mme  = mme;
179                 schib->pmcw.mbfc = mbfc;
180                 /* address can be either a block address or a block index */
181                 if (mbfc)
182                         schib->mba = address;
183                 else
184                         schib->pmcw.mbi = address;
185
186                 /* try to submit it */
187                 switch(ret = msch_err(sch->irq, schib)) {
188                         case 0:
189                                 break;
190                         case 1:
191                         case 2: /* in I/O or status pending */
192                                 ret = -EBUSY;
193                                 break;
194                         case 3: /* subchannel is no longer valid */
195                                 ret = -ENODEV;
196                                 break;
197                         default: /* msch caught an exception */
198                                 ret = -EINVAL;
199                                 break;
200                 }
201                 stsch(sch->irq, schib); /* restore the schib */
202
203                 if (ret)
204                         break;
205
206                 /* check if it worked */
207                 if (schib->pmcw.mme  == mme &&
208                     schib->pmcw.mbfc == mbfc &&
209                     (mbfc ? (schib->mba == address)
210                           : (schib->pmcw.mbi == address)))
211                         return 0;
212
213                 ret = -EINVAL;
214         }
215
216         return ret;
217 }
218
219 struct set_schib_struct {
220         u32 mme;
221         int mbfc;
222         unsigned long address;
223         wait_queue_head_t wait;
224         int ret;
225 };
226
227 static int set_schib_wait(struct ccw_device *cdev, u32 mme,
228                                 int mbfc, unsigned long address)
229 {
230         struct set_schib_struct s = {
231                 .mme = mme,
232                 .mbfc = mbfc,
233                 .address = address,
234                 .wait = __WAIT_QUEUE_HEAD_INITIALIZER(s.wait),
235         };
236
237         spin_lock_irq(cdev->ccwlock);
238         s.ret = set_schib(cdev, mme, mbfc, address);
239         if (s.ret != -EBUSY) {
240                 goto out_nowait;
241         }
242
243         if (cdev->private->state != DEV_STATE_ONLINE) {
244                 s.ret = -EBUSY;
245                 /* if the device is not online, don't even try again */
246                 goto out_nowait;
247         }
248         cdev->private->state = DEV_STATE_CMFCHANGE;
249         cdev->private->cmb_wait = &s;
250         s.ret = 1;
251
252         spin_unlock_irq(cdev->ccwlock);
253         if (wait_event_interruptible(s.wait, s.ret != 1)) {
254                 spin_lock_irq(cdev->ccwlock);
255                 if (s.ret == 1) {
256                         s.ret = -ERESTARTSYS;
257                         cdev->private->cmb_wait = 0;
258                         if (cdev->private->state == DEV_STATE_CMFCHANGE)
259                                 cdev->private->state = DEV_STATE_ONLINE;
260                 }
261                 spin_unlock_irq(cdev->ccwlock);
262         }
263         return s.ret;
264
265 out_nowait:
266         spin_unlock_irq(cdev->ccwlock);
267         return s.ret;
268 }
269
270 void retry_set_schib(struct ccw_device *cdev)
271 {
272         struct set_schib_struct *s;
273
274         s = cdev->private->cmb_wait;
275         cdev->private->cmb_wait = 0;
276         if (!s) {
277                 WARN_ON(1);
278                 return;
279         }
280         s->ret = set_schib(cdev, s->mme, s->mbfc, s->address);
281         wake_up(&s->wait);
282 }
283
284 /**
285  * struct cmb_area - container for global cmb data
286  *
287  * @mem:        pointer to CMBs (only in basic measurement mode)
288  * @list:       contains a linked list of all subchannels
289  * @lock:       protect concurrent access to @mem and @list
290  */
291 struct cmb_area {
292         struct cmb *mem;
293         struct list_head list;
294         int num_channels;
295         spinlock_t lock;
296 };
297
298 static struct cmb_area cmb_area = {
299         .lock = SPIN_LOCK_UNLOCKED,
300         .list = LIST_HEAD_INIT(cmb_area.list),
301         .num_channels  = 1024,
302 };
303
304 \f
305 /* ****** old style CMB handling ********/
306
307 /** int maxchannels
308  *
309  * Basic channel measurement blocks are allocated in one contiguous
310  * block of memory, which can not be moved as long as any channel
311  * is active. Therefore, a maximum number of subchannels needs to
312  * be defined somewhere. This is a module parameter, defaulting to
313  * a resonable value of 1024, or 32 kb of memory.
314  * Current kernels don't allow kmalloc with more than 128kb, so the
315  * maximum is 4096
316  */
317
318 module_param_named(maxchannels, cmb_area.num_channels, uint, 0444);
319
320 /**
321  * struct cmb - basic channel measurement block
322  *
323  * cmb as used by the hardware the fields are described in z/Architecture
324  * Principles of Operation, chapter 17.
325  * The area to be a contiguous array and may not be reallocated or freed.
326  * Only one cmb area can be present in the system.
327  */
328 struct cmb {
329         u16 ssch_rsch_count;
330         u16 sample_count;
331         u32 device_connect_time;
332         u32 function_pending_time;
333         u32 device_disconnect_time;
334         u32 control_unit_queuing_time;
335         u32 device_active_only_time;
336         u32 reserved[2];
337 };
338
339 /* insert a single device into the cmb_area list
340  * called with cmb_area.lock held from alloc_cmb
341  */
342 static inline int
343 alloc_cmb_single (struct ccw_device *cdev)
344 {
345         struct cmb *cmb;
346         struct ccw_device_private *node;
347         int ret;
348
349         spin_lock_irq(cdev->ccwlock);
350         if (!list_empty(&cdev->private->cmb_list)) {
351                 ret = -EBUSY;
352                 goto out;
353         }
354
355         /* find first unused cmb in cmb_area.mem.
356          * this is a little tricky: cmb_area.list
357          * remains sorted by ->cmb pointers */
358         cmb = cmb_area.mem;
359         list_for_each_entry(node, &cmb_area.list, cmb_list) {
360                 if ((struct cmb*)node->cmb > cmb)
361                         break;
362                 cmb++;
363         }
364         if (cmb - cmb_area.mem >= cmb_area.num_channels) {
365                 ret = -ENOMEM;
366                 goto out;
367         }
368
369         /* insert new cmb */
370         list_add_tail(&cdev->private->cmb_list, &node->cmb_list);
371         cdev->private->cmb = cmb;
372         ret = 0;
373 out:
374         spin_unlock_irq(cdev->ccwlock);
375         return ret;
376 }
377
378 static int
379 alloc_cmb (struct ccw_device *cdev)
380 {
381         int ret;
382         struct cmb *mem;
383         ssize_t size;
384
385         spin_lock(&cmb_area.lock);
386
387         if (!cmb_area.mem) {
388                 /* there is no user yet, so we need a new area */
389                 size = sizeof(struct cmb) * cmb_area.num_channels;
390                 WARN_ON(!list_empty(&cmb_area.list));
391
392                 spin_unlock(&cmb_area.lock);
393                 mem = (void*)__get_free_pages(GFP_KERNEL | GFP_DMA,
394                                  get_order(size));
395                 spin_lock(&cmb_area.lock);
396
397                 if (cmb_area.mem) {
398                         /* ok, another thread was faster */
399                         free_pages((unsigned long)mem, get_order(size));
400                 } else if (!mem) {
401                         /* no luck */
402                         ret = -ENOMEM;
403                         goto out;
404                 } else {
405                         /* everything ok */
406                         memset(mem, 0, size);
407                         cmb_area.mem = mem;
408                         cmf_activate(cmb_area.mem, 1);
409                 }
410         }
411
412         /* do the actual allocation */
413         ret = alloc_cmb_single(cdev);
414 out:
415         spin_unlock(&cmb_area.lock);
416
417         return ret;
418 }
419
420 static void
421 free_cmb(struct ccw_device *cdev)
422 {
423         struct ccw_device_private *priv;
424
425         priv = cdev->private;
426
427         spin_lock(&cmb_area.lock);
428         spin_lock_irq(cdev->ccwlock);
429
430         if (list_empty(&priv->cmb_list)) {
431                 /* already freed */
432                 goto out;
433         }
434
435         priv->cmb = NULL;
436         list_del_init(&priv->cmb_list);
437
438         if (list_empty(&cmb_area.list)) {
439                 ssize_t size;
440                 size = sizeof(struct cmb) * cmb_area.num_channels;
441                 cmf_activate(NULL, 0);
442                 free_pages((unsigned long)cmb_area.mem, get_order(size));
443                 cmb_area.mem = NULL;
444         }
445 out:
446         spin_unlock_irq(cdev->ccwlock);
447         spin_unlock(&cmb_area.lock);
448 }
449
450 static int
451 set_cmb(struct ccw_device *cdev, u32 mme)
452 {
453         u16 offset;
454
455         if (!cdev->private->cmb)
456                 return -EINVAL;
457
458         offset = mme ? (struct cmb *)cdev->private->cmb - cmb_area.mem : 0;
459
460         return set_schib_wait(cdev, mme, 0, offset);
461 }
462
463 static u64
464 read_cmb (struct ccw_device *cdev, int index)
465 {
466         /* yes, we have to put it on the stack
467          * because the cmb must only be accessed
468          * atomically, e.g. with mvc */
469         struct cmb cmb;
470         unsigned long flags;
471         u32 val;
472
473         spin_lock_irqsave(cdev->ccwlock, flags);
474         if (!cdev->private->cmb) {
475                 spin_unlock_irqrestore(cdev->ccwlock, flags);
476                 return 0;
477         }
478
479         cmb = *(struct cmb*)cdev->private->cmb;
480         spin_unlock_irqrestore(cdev->ccwlock, flags);
481
482         switch (index) {
483         case cmb_ssch_rsch_count:
484                 return cmb.ssch_rsch_count;
485         case cmb_sample_count:
486                 return cmb.sample_count;
487         case cmb_device_connect_time:
488                 val = cmb.device_connect_time;
489                 break;
490         case cmb_function_pending_time:
491                 val = cmb.function_pending_time;
492                 break;
493         case cmb_device_disconnect_time:
494                 val = cmb.device_disconnect_time;
495                 break;
496         case cmb_control_unit_queuing_time:
497                 val = cmb.control_unit_queuing_time;
498                 break;
499         case cmb_device_active_only_time:
500                 val = cmb.device_active_only_time;
501                 break;
502         default:
503                 return 0;
504         }
505         return time_to_avg_nsec(val, cmb.sample_count);
506 }
507
508 static int
509 readall_cmb (struct ccw_device *cdev, struct cmbdata *data)
510 {
511         /* yes, we have to put it on the stack
512          * because the cmb must only be accessed
513          * atomically, e.g. with mvc */
514         struct cmb cmb;
515         unsigned long flags;
516         u64 time;
517
518         spin_lock_irqsave(cdev->ccwlock, flags);
519         if (!cdev->private->cmb) {
520                 spin_unlock_irqrestore(cdev->ccwlock, flags);
521                 return -ENODEV;
522         }
523
524         cmb = *(struct cmb*)cdev->private->cmb;
525         time = get_clock() - cdev->private->cmb_start_time;
526         spin_unlock_irqrestore(cdev->ccwlock, flags);
527
528         *data = (struct cmbdata) {
529                 /* we only know values before device_busy_time */
530                 .size = offsetof(struct cmbdata, device_busy_time),
531
532                 /* conver to nanoseconds */
533                 .elapsed_time = (time * 1000) >> 12,
534
535                 /* copy data to new structure */
536                 .ssch_rsch_count                = cmb.ssch_rsch_count,
537                 .sample_count                   = cmb.sample_count,
538
539                 /* time fields are converted to nanoseconds while copying */
540                 .device_connect_time
541                         = time_to_nsec(cmb.device_connect_time),
542                 .function_pending_time
543                         = time_to_nsec(cmb.function_pending_time),
544                 .device_disconnect_time
545                         = time_to_nsec(cmb.device_disconnect_time),
546                 .control_unit_queuing_time
547                         = time_to_nsec(cmb.control_unit_queuing_time),
548                 .device_active_only_time
549                         = time_to_nsec(cmb.device_active_only_time),
550         };
551
552         return 0;
553 }
554
555 static void
556 reset_cmb(struct ccw_device *cdev)
557 {
558         struct cmb *cmb;
559         spin_lock_irq(cdev->ccwlock);
560         cmb = cdev->private->cmb;
561         if (cmb)
562                 memset (cmb, 0, sizeof (*cmb));
563         cdev->private->cmb_start_time = get_clock();
564         spin_unlock_irq(cdev->ccwlock);
565 }
566
567 static struct attribute_group cmf_attr_group;
568
569 static struct cmb_operations cmbops_basic = {
570         .alloc  = alloc_cmb,
571         .free   = free_cmb,
572         .set    = set_cmb,
573         .read   = read_cmb,
574         .readall    = readall_cmb,
575         .reset      = reset_cmb,
576         .attr_group = &cmf_attr_group,
577 };
578 \f
579 /* ******** extended cmb handling ********/
580
581 /**
582  * struct cmbe - extended channel measurement block
583  *
584  * cmb as used by the hardware, may be in any 64 bit physical location,
585  * the fields are described in z/Architecture Principles of Operation,
586  * third edition, chapter 17.
587  */
588 struct cmbe {
589         u32 ssch_rsch_count;
590         u32 sample_count;
591         u32 device_connect_time;
592         u32 function_pending_time;
593         u32 device_disconnect_time;
594         u32 control_unit_queuing_time;
595         u32 device_active_only_time;
596         u32 device_busy_time;
597         u32 initial_command_response_time;
598         u32 reserved[7];
599 };
600
601 /* kmalloc only guarantees 8 byte alignment, but we need cmbe
602  * pointers to be naturally aligned. Make sure to allocate
603  * enough space for two cmbes */
604 static inline struct cmbe* cmbe_align(struct cmbe *c)
605 {
606         unsigned long addr;
607         addr = ((unsigned long)c + sizeof (struct cmbe) - sizeof(long)) &
608                                  ~(sizeof (struct cmbe) - sizeof(long));
609         return (struct cmbe*)addr;
610 }
611
612 static int
613 alloc_cmbe (struct ccw_device *cdev)
614 {
615         struct cmbe *cmbe;
616         cmbe = kmalloc (sizeof (*cmbe) * 2, GFP_KERNEL);
617         if (!cmbe)
618                 return -ENOMEM;
619
620         spin_lock_irq(cdev->ccwlock);
621         if (cdev->private->cmb) {
622                 kfree(cmbe);
623                 spin_unlock_irq(cdev->ccwlock);
624                 return -EBUSY;
625         }
626
627         cdev->private->cmb = cmbe;
628         spin_unlock_irq(cdev->ccwlock);
629
630         /* activate global measurement if this is the first channel */
631         spin_lock(&cmb_area.lock);
632         if (list_empty(&cmb_area.list))
633                 cmf_activate(NULL, 1);
634         list_add_tail(&cdev->private->cmb_list, &cmb_area.list);
635         spin_unlock(&cmb_area.lock);
636
637         return 0;
638 }
639
640 static void
641 free_cmbe (struct ccw_device *cdev)
642 {
643         spin_lock_irq(cdev->ccwlock);
644         if (cdev->private->cmb)
645                 kfree(cdev->private->cmb);
646         cdev->private->cmb = NULL;
647         spin_unlock_irq(cdev->ccwlock);
648
649         /* deactivate global measurement if this is the last channel */
650         spin_lock(&cmb_area.lock);
651         list_del_init(&cdev->private->cmb_list);
652         if (list_empty(&cmb_area.list))
653                 cmf_activate(NULL, 0);
654         spin_unlock(&cmb_area.lock);
655 }
656
657 static int
658 set_cmbe(struct ccw_device *cdev, u32 mme)
659 {
660         unsigned long mba;
661
662         if (!cdev->private->cmb)
663                 return -EINVAL;
664         mba = mme ? (unsigned long) cmbe_align(cdev->private->cmb) : 0;
665
666         return set_schib_wait(cdev, mme, 1, mba);
667 }
668
669
670 u64
671 read_cmbe (struct ccw_device *cdev, int index)
672 {
673         /* yes, we have to put it on the stack
674          * because the cmb must only be accessed
675          * atomically, e.g. with mvc */
676         struct cmbe cmb;
677         unsigned long flags;
678         u32 val;
679
680         spin_lock_irqsave(cdev->ccwlock, flags);
681         if (!cdev->private->cmb) {
682                 spin_unlock_irqrestore(cdev->ccwlock, flags);
683                 return 0;
684         }
685
686         cmb = *cmbe_align(cdev->private->cmb);
687         spin_unlock_irqrestore(cdev->ccwlock, flags);
688
689         switch (index) {
690         case cmb_ssch_rsch_count:
691                 return cmb.ssch_rsch_count;
692         case cmb_sample_count:
693                 return cmb.sample_count;
694         case cmb_device_connect_time:
695                 val = cmb.device_connect_time;
696                 break;
697         case cmb_function_pending_time:
698                 val = cmb.function_pending_time;
699                 break;
700         case cmb_device_disconnect_time:
701                 val = cmb.device_disconnect_time;
702                 break;
703         case cmb_control_unit_queuing_time:
704                 val = cmb.control_unit_queuing_time;
705                 break;
706         case cmb_device_active_only_time:
707                 val = cmb.device_active_only_time;
708                 break;
709         case cmb_device_busy_time:
710                 val = cmb.device_busy_time;
711                 break;
712         case cmb_initial_command_response_time:
713                 val = cmb.initial_command_response_time;
714                 break;
715         default:
716                 return 0;
717         }
718         return time_to_avg_nsec(val, cmb.sample_count);
719 }
720
721 static int
722 readall_cmbe (struct ccw_device *cdev, struct cmbdata *data)
723 {
724         /* yes, we have to put it on the stack
725          * because the cmb must only be accessed
726          * atomically, e.g. with mvc */
727         struct cmbe cmb;
728         unsigned long flags;
729         u64 time;
730
731         spin_lock_irqsave(cdev->ccwlock, flags);
732         if (!cdev->private->cmb) {
733                 spin_unlock_irqrestore(cdev->ccwlock, flags);
734                 return -ENODEV;
735         }
736
737         cmb = *cmbe_align(cdev->private->cmb);
738         time = get_clock() - cdev->private->cmb_start_time;
739         spin_unlock_irqrestore(cdev->ccwlock, flags);
740
741         *data = (struct cmbdata) {
742                 /* we only know values before device_busy_time */
743                 .size = offsetof(struct cmbdata, device_busy_time),
744
745                 /* conver to nanoseconds */
746                 .elapsed_time = (time * 1000) >> 12,
747
748                 /* copy data to new structure */
749                 .ssch_rsch_count                = cmb.ssch_rsch_count,
750                 .sample_count                   = cmb.sample_count,
751
752                 /* time fields are converted to nanoseconds while copying */
753                 .device_connect_time
754                         = time_to_nsec(cmb.device_connect_time),
755                 .function_pending_time
756                         = time_to_nsec(cmb.function_pending_time),
757                 .device_disconnect_time
758                         = time_to_nsec(cmb.device_disconnect_time),
759                 .control_unit_queuing_time
760                         = time_to_nsec(cmb.control_unit_queuing_time),
761                 .device_active_only_time
762                         = time_to_nsec(cmb.device_active_only_time),
763                 .device_busy_time
764                         = time_to_nsec(cmb.device_busy_time),
765                 .initial_command_response_time
766                         = time_to_nsec(cmb.initial_command_response_time),
767         };
768
769         return 0;
770 }
771
772 static void
773 reset_cmbe(struct ccw_device *cdev)
774 {
775         struct cmbe *cmb;
776         spin_lock_irq(cdev->ccwlock);
777         cmb = cmbe_align(cdev->private->cmb);
778         if (cmb)
779                 memset (cmb, 0, sizeof (*cmb));
780         cdev->private->cmb_start_time = get_clock();
781         spin_unlock_irq(cdev->ccwlock);
782 }
783
784 static struct attribute_group cmf_attr_group_ext;
785
786 static struct cmb_operations cmbops_extended = {
787         .alloc      = alloc_cmbe,
788         .free       = free_cmbe,
789         .set        = set_cmbe,
790         .read       = read_cmbe,
791         .readall    = readall_cmbe,
792         .reset      = reset_cmbe,
793         .attr_group = &cmf_attr_group_ext,
794 };
795 \f
796
797 static ssize_t
798 cmb_show_attr(struct device *dev, char *buf, enum cmb_index idx)
799 {
800         return sprintf(buf, "%lld\n",
801                 (unsigned long long) cmf_read(to_ccwdev(dev), idx));
802 }
803
804 static ssize_t
805 cmb_show_avg_sample_interval(struct device *dev, char *buf)
806 {
807         struct ccw_device *cdev;
808         long interval;
809         unsigned long count;
810
811         cdev = to_ccwdev(dev);
812         interval  = get_clock() - cdev->private->cmb_start_time;
813         count = cmf_read(cdev, cmb_sample_count);
814         if (count)
815                 interval /= count;
816         else
817                 interval = -1;
818         return sprintf(buf, "%ld\n", interval);
819 }
820
821 static ssize_t
822 cmb_show_avg_utilization(struct device *dev, char *buf)
823 {
824         struct cmbdata data;
825         u64 utilization;
826         unsigned long t, u;
827         int ret;
828
829         ret = cmf_readall(to_ccwdev(dev), &data);
830         if (ret)
831                 return ret;
832
833         utilization = data.device_connect_time +
834                       data.function_pending_time +
835                       data.device_disconnect_time;
836
837         /* shift to avoid long long division */
838         while (-1ul < (data.elapsed_time | utilization)) {
839                 utilization >>= 8;
840                 data.elapsed_time >>= 8;
841         }
842
843         /* calculate value in 0.1 percent units */
844         t = (unsigned long) data.elapsed_time / 1000;
845         u = (unsigned long) utilization / t;
846
847         return sprintf(buf, "%02ld.%01ld%%\n", u/ 10, u - (u/ 10) * 10);
848 }
849
850 #define cmf_attr(name) \
851 static ssize_t show_ ## name (struct device * dev, char * buf) \
852 { return cmb_show_attr((dev), buf, cmb_ ## name); } \
853 static DEVICE_ATTR(name, 0444, show_ ## name, NULL);
854
855 #define cmf_attr_avg(name) \
856 static ssize_t show_avg_ ## name (struct device * dev, char * buf) \
857 { return cmb_show_attr((dev), buf, cmb_ ## name); } \
858 static DEVICE_ATTR(avg_ ## name, 0444, show_avg_ ## name, NULL);
859
860 cmf_attr(ssch_rsch_count);
861 cmf_attr(sample_count);
862 cmf_attr_avg(device_connect_time);
863 cmf_attr_avg(function_pending_time);
864 cmf_attr_avg(device_disconnect_time);
865 cmf_attr_avg(control_unit_queuing_time);
866 cmf_attr_avg(device_active_only_time);
867 cmf_attr_avg(device_busy_time);
868 cmf_attr_avg(initial_command_response_time);
869
870 static DEVICE_ATTR(avg_sample_interval, 0444, cmb_show_avg_sample_interval, NULL);
871 static DEVICE_ATTR(avg_utilization, 0444, cmb_show_avg_utilization, NULL);
872
873 static struct attribute *cmf_attributes[] = {
874         &dev_attr_avg_sample_interval.attr,
875         &dev_attr_avg_utilization.attr,
876         &dev_attr_ssch_rsch_count.attr,
877         &dev_attr_sample_count.attr,
878         &dev_attr_avg_device_connect_time.attr,
879         &dev_attr_avg_function_pending_time.attr,
880         &dev_attr_avg_device_disconnect_time.attr,
881         &dev_attr_avg_control_unit_queuing_time.attr,
882         &dev_attr_avg_device_active_only_time.attr,
883         0,
884 };
885
886 static struct attribute_group cmf_attr_group = {
887         .name  = "cmf",
888         .attrs = cmf_attributes,
889 };
890
891 static struct attribute *cmf_attributes_ext[] = {
892         &dev_attr_avg_sample_interval.attr,
893         &dev_attr_avg_utilization.attr,
894         &dev_attr_ssch_rsch_count.attr,
895         &dev_attr_sample_count.attr,
896         &dev_attr_avg_device_connect_time.attr,
897         &dev_attr_avg_function_pending_time.attr,
898         &dev_attr_avg_device_disconnect_time.attr,
899         &dev_attr_avg_control_unit_queuing_time.attr,
900         &dev_attr_avg_device_active_only_time.attr,
901         &dev_attr_avg_device_busy_time.attr,
902         &dev_attr_avg_initial_command_response_time.attr,
903         0,
904 };
905
906 static struct attribute_group cmf_attr_group_ext = {
907         .name  = "cmf",
908         .attrs = cmf_attributes_ext,
909 };
910
911 static ssize_t cmb_enable_show(struct device *dev, char *buf)
912 {
913         return sprintf(buf, "%d\n", to_ccwdev(dev)->private->cmb ? 1 : 0);
914 }
915
916 static ssize_t cmb_enable_store(struct device *dev, const char *buf, size_t c)
917 {
918         struct ccw_device *cdev;
919         int ret;
920
921         cdev = to_ccwdev(dev);
922
923         switch (buf[0]) {
924         case '0':
925                 ret = disable_cmf(cdev);
926                 if (ret)
927                         printk(KERN_INFO "disable_cmf failed (%d)\n", ret);
928                 break;
929         case '1':
930                 ret = enable_cmf(cdev);
931                 if (ret && ret != -EBUSY)
932                         printk(KERN_INFO "enable_cmf failed (%d)\n", ret);
933                 break;
934         }
935
936         return c;
937 }
938
939 DEVICE_ATTR(cmb_enable, 0644, cmb_enable_show, cmb_enable_store);
940
941 /* enable_cmf/disable_cmf: module interface for cmf (de)activation */
942 int
943 enable_cmf(struct ccw_device *cdev)
944 {
945         int ret;
946
947         ret = cmbops->alloc(cdev);
948         cmbops->reset(cdev);
949         if (ret)
950                 return ret;
951         ret = cmbops->set(cdev, 2);
952         if (ret) {
953                 cmbops->free(cdev);
954                 return ret;
955         }
956         ret = sysfs_create_group(&cdev->dev.kobj, cmbops->attr_group);
957         if (!ret)
958                 return 0;
959         cmbops->set(cdev, 0);  //FIXME: this can fail
960         cmbops->free(cdev);
961         return ret;
962 }
963
964 int
965 disable_cmf(struct ccw_device *cdev)
966 {
967         int ret;
968
969         ret = cmbops->set(cdev, 0);
970         if (ret)
971                 return ret;
972         cmbops->free(cdev);
973         sysfs_remove_group(&cdev->dev.kobj, cmbops->attr_group);
974         return ret;
975 }
976
977 u64
978 cmf_read(struct ccw_device *cdev, int index)
979 {
980         return cmbops->read(cdev, index);
981 }
982
983 int
984 cmf_readall(struct ccw_device *cdev, struct cmbdata *data)
985 {
986         return cmbops->readall(cdev, data);
987 }
988
989 static int __init
990 init_cmf(void)
991 {
992         char *format_string;
993         char *detect_string = "parameter";
994
995         /* We cannot really autoprobe this. If the user did not give a parameter,
996            see if we are running on z990 or up, otherwise fall back to basic mode. */
997
998         if (format == CMF_AUTODETECT) {
999                 if (!MACHINE_NEW_STIDP) {
1000                         format = CMF_BASIC;
1001                 } else {
1002                         format = CMF_EXTENDED;
1003                 }
1004                 detect_string = "autodetected";
1005         } else {
1006                 detect_string = "parameter";
1007         }
1008
1009         switch (format) {
1010         case CMF_BASIC:
1011                 format_string = "basic";
1012                 cmbops = &cmbops_basic;
1013                 if (cmb_area.num_channels > 4096 || cmb_area.num_channels < 1) {
1014                         printk(KERN_ERR "Basic channel measurement facility"
1015                                         " can only use 1 to 4096 devices\n"
1016                                KERN_ERR "when the cmf driver is built"
1017                                         " as a loadable module\n");
1018                         return 1;
1019                 }
1020                 break;
1021         case CMF_EXTENDED:
1022                 format_string = "extended";
1023                 cmbops = &cmbops_extended;
1024                 break;
1025         default:
1026                 printk(KERN_ERR "Invalid format %d for channel "
1027                         "measurement facility\n", format);
1028                 return 1;
1029         }
1030
1031         printk(KERN_INFO "Channel measurement facility using %s format (%s)\n",
1032                 format_string, detect_string);
1033         return 0;
1034 }
1035
1036 module_init(init_cmf);
1037
1038
1039 MODULE_AUTHOR("Arnd Bergmann <arndb@de.ibm.com>");
1040 MODULE_LICENSE("GPL");
1041 MODULE_DESCRIPTION("channel measurement facility base driver\n"
1042                    "Copyright 2003 IBM Corporation\n");
1043
1044 EXPORT_SYMBOL_GPL(enable_cmf);
1045 EXPORT_SYMBOL_GPL(disable_cmf);
1046 EXPORT_SYMBOL_GPL(cmf_read);
1047 EXPORT_SYMBOL_GPL(cmf_readall);