ftp://ftp.kernel.org/pub/linux/kernel/v2.6/linux-2.6.6.tar.bz2
[linux-2.6.git] / drivers / s390 / cio / device_fsm.c
1 /*
2  * drivers/s390/cio/device_fsm.c
3  * finite state machine for device handling
4  *
5  *    Copyright (C) 2002 IBM Deutschland Entwicklung GmbH,
6  *                       IBM Corporation
7  *    Author(s): Cornelia Huck(cohuck@de.ibm.com)
8  *               Martin Schwidefsky (schwidefsky@de.ibm.com)
9  */
10
11 #include <linux/module.h>
12 #include <linux/config.h>
13 #include <linux/init.h>
14
15 #include <asm/ccwdev.h>
16 #include <asm/qdio.h>
17
18 #include "cio.h"
19 #include "cio_debug.h"
20 #include "css.h"
21 #include "device.h"
22 #include "chsc.h"
23 #include "ioasm.h"
24 #include "qdio.h"
25
26 int
27 device_is_disconnected(struct subchannel *sch)
28 {
29         struct ccw_device *cdev;
30
31         if (!sch->dev.driver_data)
32                 return 0;
33         cdev = sch->dev.driver_data;
34         return (cdev->private->state == DEV_STATE_DISCONNECTED ||
35                 cdev->private->state == DEV_STATE_DISCONNECTED_SENSE_ID);
36 }
37
38 void
39 device_set_disconnected(struct subchannel *sch)
40 {
41         struct ccw_device *cdev;
42
43         if (!sch->dev.driver_data)
44                 return;
45         cdev = sch->dev.driver_data;
46         ccw_device_set_timeout(cdev, 0);
47         cdev->private->state = DEV_STATE_DISCONNECTED;
48 }
49
50 void
51 device_set_waiting(struct subchannel *sch)
52 {
53         struct ccw_device *cdev;
54
55         if (!sch->dev.driver_data)
56                 return;
57         cdev = sch->dev.driver_data;
58         ccw_device_set_timeout(cdev, 10*HZ);
59         cdev->private->state = DEV_STATE_WAIT4IO;
60 }
61
62 /*
63  * Timeout function. It just triggers a DEV_EVENT_TIMEOUT.
64  */
65 static void
66 ccw_device_timeout(unsigned long data)
67 {
68         struct ccw_device *cdev;
69
70         cdev = (struct ccw_device *) data;
71         spin_lock_irq(cdev->ccwlock);
72         dev_fsm_event(cdev, DEV_EVENT_TIMEOUT);
73         spin_unlock_irq(cdev->ccwlock);
74 }
75
76 /*
77  * Set timeout
78  */
79 void
80 ccw_device_set_timeout(struct ccw_device *cdev, int expires)
81 {
82         if (expires == 0) {
83                 del_timer(&cdev->private->timer);
84                 return;
85         }
86         if (timer_pending(&cdev->private->timer)) {
87                 if (mod_timer(&cdev->private->timer, jiffies + expires))
88                         return;
89         }
90         cdev->private->timer.function = ccw_device_timeout;
91         cdev->private->timer.data = (unsigned long) cdev;
92         cdev->private->timer.expires = jiffies + expires;
93         add_timer(&cdev->private->timer);
94 }
95
96 /*
97  * Cancel running i/o. This is called repeatedly since halt/clear are
98  * asynchronous operations. We do one try with cio_cancel, two tries
99  * with cio_halt, 255 tries with cio_clear. If everythings fails panic.
100  * Returns 0 if device now idle, -ENODEV for device not operational and
101  * -EBUSY if an interrupt is expected (either from halt/clear or from a
102  * status pending).
103  */
104 int
105 ccw_device_cancel_halt_clear(struct ccw_device *cdev)
106 {
107         struct subchannel *sch;
108         int ret;
109
110         sch = to_subchannel(cdev->dev.parent);
111         ret = stsch(sch->irq, &sch->schib);
112         if (ret || !sch->schib.pmcw.dnv)
113                 return -ENODEV; 
114         if (!sch->schib.pmcw.ena || sch->schib.scsw.actl == 0)
115                 /* Not operational or no activity -> done. */
116                 return 0;
117         /* Stage 1: cancel io. */
118         if (!(sch->schib.scsw.actl & SCSW_ACTL_HALT_PEND) &&
119             !(sch->schib.scsw.actl & SCSW_ACTL_CLEAR_PEND)) {
120                 ret = cio_cancel(sch);
121                 if (ret != -EINVAL)
122                         return ret;
123                 /* cancel io unsuccessful. From now on it is asynchronous. */
124                 cdev->private->iretry = 3;      /* 3 halt retries. */
125         }
126         if (!(sch->schib.scsw.actl & SCSW_ACTL_CLEAR_PEND)) {
127                 /* Stage 2: halt io. */
128                 if (cdev->private->iretry) {
129                         cdev->private->iretry--;
130                         ret = cio_halt(sch);
131                         return (ret == 0) ? -EBUSY : ret;
132                 }
133                 /* halt io unsuccessful. */
134                 cdev->private->iretry = 255;    /* 255 clear retries. */
135         }
136         /* Stage 3: clear io. */
137         if (cdev->private->iretry) {
138                 cdev->private->iretry--;
139                 ret = cio_clear (sch);
140                 return (ret == 0) ? -EBUSY : ret;
141         }
142         panic("Can't stop i/o on subchannel.\n");
143 }
144
145 static void
146 ccw_device_handle_oper(struct ccw_device *cdev)
147 {
148         struct subchannel *sch;
149
150         sch = to_subchannel(cdev->dev.parent);
151         cdev->private->flags.recog_done = 1;
152         /*
153          * Check if cu type and device type still match. If
154          * not, it is certainly another device and we have to
155          * de- and re-register.
156          */
157         if (cdev->id.cu_type != cdev->private->senseid.cu_type ||
158             cdev->id.cu_model != cdev->private->senseid.cu_model ||
159             cdev->id.dev_type != cdev->private->senseid.dev_type ||
160             cdev->id.dev_model != cdev->private->senseid.dev_model) {
161                 PREPARE_WORK(&cdev->private->kick_work,
162                              ccw_device_do_unreg_rereg, (void *)&cdev->dev);
163                 queue_work(ccw_device_work, &cdev->private->kick_work);
164                 return;
165         }
166         cdev->private->flags.donotify = 1;
167         /* Get device online again. */
168         ccw_device_online(cdev);
169 }
170
171 /*
172  * The machine won't give us any notification by machine check if a chpid has
173  * been varied online on the SE so we have to find out by magic (i. e. driving
174  * the channel subsystem to device selection and updating our path masks).
175  */
176 static inline void
177 __recover_lost_chpids(struct subchannel *sch, int old_lpm)
178 {
179         int mask, i;
180
181         for (i = 0; i<8; i++) {
182                 mask = 0x80 >> i;
183                 if (!(sch->lpm & mask))
184                         continue;
185                 if (old_lpm & mask)
186                         continue;
187                 chpid_is_actually_online(sch->schib.pmcw.chpid[i]);
188         }
189 }
190
191 /*
192  * Stop device recognition.
193  */
194 static void
195 ccw_device_recog_done(struct ccw_device *cdev, int state)
196 {
197         struct subchannel *sch;
198         int notify, old_lpm;
199
200         sch = to_subchannel(cdev->dev.parent);
201
202         ccw_device_set_timeout(cdev, 0);
203         cio_disable_subchannel(sch);
204         /*
205          * Now that we tried recognition, we have performed device selection
206          * through ssch() and the path information is up to date.
207          */
208         old_lpm = sch->lpm;
209         stsch(sch->irq, &sch->schib);
210         sch->lpm = sch->schib.pmcw.pim &
211                 sch->schib.pmcw.pam &
212                 sch->schib.pmcw.pom &
213                 sch->opm;
214         if (cdev->private->state == DEV_STATE_DISCONNECTED_SENSE_ID)
215                 /* Force reprobe on all chpids. */
216                 old_lpm = 0;
217         if (sch->lpm != old_lpm)
218                 __recover_lost_chpids(sch, old_lpm);
219         if (cdev->private->state == DEV_STATE_DISCONNECTED_SENSE_ID) {
220                 if (state == DEV_STATE_NOT_OPER) {
221                         cdev->private->flags.recog_done = 1;
222                         cdev->private->state = DEV_STATE_DISCONNECTED;
223                         return;
224                 }
225                 /* Boxed devices don't need extra treatment. */
226         }
227         notify = 0;
228         switch (state) {
229         case DEV_STATE_NOT_OPER:
230                 CIO_DEBUG(KERN_WARNING, 2,
231                           "SenseID : unknown device %04x on subchannel %04x\n",
232                           cdev->private->devno, sch->irq);
233                 break;
234         case DEV_STATE_OFFLINE:
235                 if (cdev->private->state == DEV_STATE_DISCONNECTED_SENSE_ID)
236                         notify = 1;
237                 else  /* fill out sense information */
238                         cdev->id = (struct ccw_device_id) {
239                                 .cu_type   = cdev->private->senseid.cu_type,
240                                 .cu_model  = cdev->private->senseid.cu_model,
241                                 .dev_type  = cdev->private->senseid.dev_type,
242                                 .dev_model = cdev->private->senseid.dev_model,
243                         };
244                 /* Issue device info message. */
245                 CIO_DEBUG(KERN_INFO, 2, "SenseID : device %04x reports: "
246                           "CU  Type/Mod = %04X/%02X, Dev Type/Mod = "
247                           "%04X/%02X\n", cdev->private->devno,
248                           cdev->id.cu_type, cdev->id.cu_model,
249                           cdev->id.dev_type, cdev->id.dev_model);
250                 break;
251         case DEV_STATE_BOXED:
252                 CIO_DEBUG(KERN_WARNING, 2,
253                           "SenseID : boxed device %04x on subchannel %04x\n",
254                           cdev->private->devno, sch->irq);
255                 break;
256         }
257         cdev->private->state = state;
258         if (notify && state == DEV_STATE_OFFLINE)
259                 ccw_device_handle_oper(cdev);
260         else
261                 io_subchannel_recog_done(cdev);
262         if (state != DEV_STATE_NOT_OPER)
263                 wake_up(&cdev->private->wait_q);
264 }
265
266 /*
267  * Function called from device_id.c after sense id has completed.
268  */
269 void
270 ccw_device_sense_id_done(struct ccw_device *cdev, int err)
271 {
272         switch (err) {
273         case 0:
274                 ccw_device_recog_done(cdev, DEV_STATE_OFFLINE);
275                 break;
276         case -ETIME:            /* Sense id stopped by timeout. */
277                 ccw_device_recog_done(cdev, DEV_STATE_BOXED);
278                 break;
279         default:
280                 ccw_device_recog_done(cdev, DEV_STATE_NOT_OPER);
281                 break;
282         }
283 }
284
285 static void
286 ccw_device_oper_notify(void *data)
287 {
288         struct ccw_device *cdev;
289         struct subchannel *sch;
290         int ret;
291
292         cdev = (struct ccw_device *)data;
293         sch = to_subchannel(cdev->dev.parent);
294         ret = (sch->driver && sch->driver->notify) ?
295                 sch->driver->notify(&sch->dev, CIO_OPER) : 0;
296         if (!ret)
297                 /* Driver doesn't want device back. */
298                 ccw_device_do_unreg_rereg((void *)&cdev->dev);
299         else
300                 wake_up(&cdev->private->wait_q);
301 }
302
303 /*
304  * Finished with online/offline processing.
305  */
306 static void
307 ccw_device_done(struct ccw_device *cdev, int state)
308 {
309         struct subchannel *sch;
310
311         sch = to_subchannel(cdev->dev.parent);
312
313         if (state != DEV_STATE_ONLINE)
314                 cio_disable_subchannel(sch);
315
316         /* Reset device status. */
317         memset(&cdev->private->irb, 0, sizeof(struct irb));
318
319         cdev->private->state = state;
320
321
322         if (state == DEV_STATE_BOXED)
323                 CIO_DEBUG(KERN_WARNING, 2,
324                           "Boxed device %04x on subchannel %04x\n",
325                           cdev->private->devno, sch->irq);
326
327         if (cdev->private->flags.donotify) {
328                 cdev->private->flags.donotify = 0;
329                 PREPARE_WORK(&cdev->private->kick_work, ccw_device_oper_notify,
330                              (void *)cdev);
331                 queue_work(ccw_device_notify_work, &cdev->private->kick_work);
332         }
333         wake_up(&cdev->private->wait_q);
334
335         if (css_init_done && state != DEV_STATE_ONLINE)
336                 put_device (&cdev->dev);
337 }
338
339 /*
340  * Function called from device_pgid.c after sense path ground has completed.
341  */
342 void
343 ccw_device_sense_pgid_done(struct ccw_device *cdev, int err)
344 {
345         struct subchannel *sch;
346
347         sch = to_subchannel(cdev->dev.parent);
348         switch (err) {
349         case 0:
350                 /* Start Path Group verification. */
351                 sch->vpm = 0;   /* Start with no path groups set. */
352                 cdev->private->state = DEV_STATE_VERIFY;
353                 ccw_device_verify_start(cdev);
354                 break;
355         case -ETIME:            /* Sense path group id stopped by timeout. */
356         case -EUSERS:           /* device is reserved for someone else. */
357                 ccw_device_done(cdev, DEV_STATE_BOXED);
358                 break;
359         case -EOPNOTSUPP: /* path grouping not supported, just set online. */
360                 cdev->private->options.pgroup = 0;
361                 ccw_device_done(cdev, DEV_STATE_ONLINE);
362                 break;
363         default:
364                 ccw_device_done(cdev, DEV_STATE_NOT_OPER);
365                 break;
366         }
367 }
368
369 /*
370  * Start device recognition.
371  */
372 int
373 ccw_device_recognition(struct ccw_device *cdev)
374 {
375         struct subchannel *sch;
376         int ret;
377
378         if ((cdev->private->state != DEV_STATE_NOT_OPER) &&
379             (cdev->private->state != DEV_STATE_BOXED))
380                 return -EINVAL;
381         sch = to_subchannel(cdev->dev.parent);
382         ret = cio_enable_subchannel(sch, sch->schib.pmcw.isc);
383         if (ret != 0)
384                 /* Couldn't enable the subchannel for i/o. Sick device. */
385                 return ret;
386
387         /* After 60s the device recognition is considered to have failed. */
388         ccw_device_set_timeout(cdev, 60*HZ);
389
390         /*
391          * We used to start here with a sense pgid to find out whether a device
392          * is locked by someone else. Unfortunately, the sense pgid command
393          * code has other meanings on devices predating the path grouping
394          * algorithm, so we start with sense id and box the device after an
395          * timeout (or if sense pgid during path verification detects the device
396          * is locked, as may happen on newer devices).
397          */
398         cdev->private->flags.recog_done = 0;
399         cdev->private->state = DEV_STATE_SENSE_ID;
400         ccw_device_sense_id_start(cdev);
401         return 0;
402 }
403
404 /*
405  * Handle timeout in device recognition.
406  */
407 static void
408 ccw_device_recog_timeout(struct ccw_device *cdev, enum dev_event dev_event)
409 {
410         int ret;
411
412         ret = ccw_device_cancel_halt_clear(cdev);
413         switch (ret) {
414         case 0:
415                 ccw_device_recog_done(cdev, DEV_STATE_BOXED);
416                 break;
417         case -ENODEV:
418                 ccw_device_recog_done(cdev, DEV_STATE_NOT_OPER);
419                 break;
420         default:
421                 ccw_device_set_timeout(cdev, 3*HZ);
422         }
423 }
424
425
426 static void
427 ccw_device_nopath_notify(void *data)
428 {
429         struct ccw_device *cdev;
430         struct subchannel *sch;
431         int ret;
432
433         cdev = (struct ccw_device *)data;
434         sch = to_subchannel(cdev->dev.parent);
435         /* Extra sanity. */
436         if (sch->lpm)
437                 return;
438         ret = (sch->driver && sch->driver->notify) ?
439                 sch->driver->notify(&sch->dev, CIO_NO_PATH) : 0;
440         if (!ret) {
441                 if (get_device(&sch->dev)) {
442                         /* Driver doesn't want to keep device. */
443                         cio_disable_subchannel(sch);
444                         if (get_device(&cdev->dev)) {
445                                 PREPARE_WORK(&cdev->private->kick_work,
446                                              ccw_device_call_sch_unregister,
447                                              (void *)cdev);
448                                 queue_work(ccw_device_work,
449                                            &cdev->private->kick_work);
450                         }
451                 }
452         } else {
453                 cio_disable_subchannel(sch);
454                 ccw_device_set_timeout(cdev, 0);
455                 cdev->private->state = DEV_STATE_DISCONNECTED;
456                 wake_up(&cdev->private->wait_q);
457         }
458 }
459
460 void
461 device_call_nopath_notify(struct subchannel *sch)
462 {
463         struct ccw_device *cdev;
464
465         if (!sch->dev.driver_data)
466                 return;
467         cdev = sch->dev.driver_data;
468         PREPARE_WORK(&cdev->private->kick_work,
469                      ccw_device_nopath_notify, (void *)cdev);
470         queue_work(ccw_device_notify_work, &cdev->private->kick_work);
471 }
472
473
474 void
475 ccw_device_verify_done(struct ccw_device *cdev, int err)
476 {
477         cdev->private->flags.doverify = 0;
478         switch (err) {
479         case 0:
480                 ccw_device_done(cdev, DEV_STATE_ONLINE);
481                 break;
482         case -ETIME:
483                 ccw_device_done(cdev, DEV_STATE_BOXED);
484                 break;
485         default:
486                 PREPARE_WORK(&cdev->private->kick_work,
487                              ccw_device_nopath_notify, (void *)cdev);
488                 queue_work(ccw_device_notify_work, &cdev->private->kick_work);
489                 ccw_device_done(cdev, DEV_STATE_NOT_OPER);
490                 break;
491         }
492 }
493
494 /*
495  * Get device online.
496  */
497 int
498 ccw_device_online(struct ccw_device *cdev)
499 {
500         struct subchannel *sch;
501         int ret;
502
503         if ((cdev->private->state != DEV_STATE_OFFLINE) &&
504             (cdev->private->state != DEV_STATE_BOXED))
505                 return -EINVAL;
506         sch = to_subchannel(cdev->dev.parent);
507         if (css_init_done && !get_device(&cdev->dev))
508                 return -ENODEV;
509         ret = cio_enable_subchannel(sch, sch->schib.pmcw.isc);
510         if (ret != 0) {
511                 /* Couldn't enable the subchannel for i/o. Sick device. */
512                 if (ret == -ENODEV)
513                         dev_fsm_event(cdev, DEV_EVENT_NOTOPER);
514                 return ret;
515         }
516         /* Do we want to do path grouping? */
517         if (!cdev->private->options.pgroup) {
518                 /* No, set state online immediately. */
519                 ccw_device_done(cdev, DEV_STATE_ONLINE);
520                 return 0;
521         }
522         /* Do a SensePGID first. */
523         cdev->private->state = DEV_STATE_SENSE_PGID;
524         ccw_device_sense_pgid_start(cdev);
525         return 0;
526 }
527
528 void
529 ccw_device_disband_done(struct ccw_device *cdev, int err)
530 {
531         switch (err) {
532         case 0:
533                 ccw_device_done(cdev, DEV_STATE_OFFLINE);
534                 break;
535         case -ETIME:
536                 ccw_device_done(cdev, DEV_STATE_BOXED);
537                 break;
538         default:
539                 ccw_device_done(cdev, DEV_STATE_NOT_OPER);
540                 break;
541         }
542 }
543
544 /*
545  * Shutdown device.
546  */
547 int
548 ccw_device_offline(struct ccw_device *cdev)
549 {
550         struct subchannel *sch;
551
552         sch = to_subchannel(cdev->dev.parent);
553         if (cdev->private->state != DEV_STATE_ONLINE) {
554                 if (sch->schib.scsw.actl != 0)
555                         return -EBUSY;
556                 return -EINVAL;
557         }
558         if (sch->schib.scsw.actl != 0)
559                 return -EBUSY;
560         /* Are we doing path grouping? */
561         if (!cdev->private->options.pgroup) {
562                 /* No, set state offline immediately. */
563                 ccw_device_done(cdev, DEV_STATE_OFFLINE);
564                 return 0;
565         }
566         /* Start Set Path Group commands. */
567         cdev->private->state = DEV_STATE_DISBAND_PGID;
568         ccw_device_disband_start(cdev);
569         return 0;
570 }
571
572 /*
573  * Handle timeout in device online/offline process.
574  */
575 static void
576 ccw_device_onoff_timeout(struct ccw_device *cdev, enum dev_event dev_event)
577 {
578         int ret;
579
580         ret = ccw_device_cancel_halt_clear(cdev);
581         switch (ret) {
582         case 0:
583                 ccw_device_done(cdev, DEV_STATE_BOXED);
584                 break;
585         case -ENODEV:
586                 ccw_device_done(cdev, DEV_STATE_NOT_OPER);
587                 break;
588         default:
589                 ccw_device_set_timeout(cdev, 3*HZ);
590         }
591 }
592
593 /*
594  * Handle not oper event in device recognition.
595  */
596 static void
597 ccw_device_recog_notoper(struct ccw_device *cdev, enum dev_event dev_event)
598 {
599         ccw_device_recog_done(cdev, DEV_STATE_NOT_OPER);
600 }
601
602 /*
603  * Handle not operational event while offline.
604  */
605 static void
606 ccw_device_offline_notoper(struct ccw_device *cdev, enum dev_event dev_event)
607 {
608         struct subchannel *sch;
609
610         cdev->private->state = DEV_STATE_NOT_OPER;
611         sch = to_subchannel(cdev->dev.parent);
612         device_unregister(&sch->dev);
613         sch->schib.pmcw.intparm = 0;
614         cio_modify(sch);
615         wake_up(&cdev->private->wait_q);
616 }
617
618 /*
619  * Handle not operational event while online.
620  */
621 static void
622 ccw_device_online_notoper(struct ccw_device *cdev, enum dev_event dev_event)
623 {
624         struct subchannel *sch;
625
626         sch = to_subchannel(cdev->dev.parent);
627         if (sch->driver->notify &&
628             sch->driver->notify(&sch->dev, sch->lpm ? CIO_GONE : CIO_NO_PATH)) {
629                         ccw_device_set_timeout(cdev, 0);
630                         cdev->private->state = DEV_STATE_DISCONNECTED;
631                         wake_up(&cdev->private->wait_q);
632                         return;
633         }
634         cdev->private->state = DEV_STATE_NOT_OPER;
635         cio_disable_subchannel(sch);
636         if (sch->schib.scsw.actl != 0) {
637                 // FIXME: not-oper indication to device driver ?
638                 ccw_device_call_handler(cdev);
639         }
640         device_unregister(&sch->dev);
641         sch->schib.pmcw.intparm = 0;
642         cio_modify(sch);
643         wake_up(&cdev->private->wait_q);
644 }
645
646 /*
647  * Handle path verification event.
648  */
649 static void
650 ccw_device_online_verify(struct ccw_device *cdev, enum dev_event dev_event)
651 {
652         struct subchannel *sch;
653
654         if (!cdev->private->options.pgroup)
655                 return;
656         if (cdev->private->state == DEV_STATE_W4SENSE) {
657                 cdev->private->flags.doverify = 1;
658                 return;
659         }
660         sch = to_subchannel(cdev->dev.parent);
661         if (sch->schib.scsw.actl != 0 ||
662             (cdev->private->irb.scsw.stctl & SCSW_STCTL_STATUS_PEND)) {
663                 /*
664                  * No final status yet or final status not yet delivered
665                  * to the device driver. Can't do path verfication now,
666                  * delay until final status was delivered.
667                  */
668                 cdev->private->flags.doverify = 1;
669                 return;
670         }
671         /* Device is idle, we can do the path verification. */
672         cdev->private->state = DEV_STATE_VERIFY;
673         ccw_device_verify_start(cdev);
674 }
675
676 /*
677  * Got an interrupt for a normal io (state online).
678  */
679 static void
680 ccw_device_irq(struct ccw_device *cdev, enum dev_event dev_event)
681 {
682         struct irb *irb;
683
684         irb = (struct irb *) __LC_IRB;
685         /* Check for unsolicited interrupt. */
686         if (irb->scsw.stctl ==
687                         (SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS)) {
688                 if (cdev->handler)
689                         cdev->handler (cdev, 0, irb);
690                 return;
691         }
692         /* Accumulate status and find out if a basic sense is needed. */
693         ccw_device_accumulate_irb(cdev, irb);
694         if (cdev->private->flags.dosense) {
695                 if (ccw_device_do_sense(cdev, irb) == 0) {
696                         cdev->private->state = DEV_STATE_W4SENSE;
697                 }
698                 return;
699         }
700         /* Call the handler. */
701         if (ccw_device_call_handler(cdev) && cdev->private->flags.doverify)
702                 /* Start delayed path verification. */
703                 ccw_device_online_verify(cdev, 0);
704 }
705
706 /*
707  * Got an timeout in online state.
708  */
709 static void
710 ccw_device_online_timeout(struct ccw_device *cdev, enum dev_event dev_event)
711 {
712         int ret;
713
714         ccw_device_set_timeout(cdev, 0);
715         ret = ccw_device_cancel_halt_clear(cdev);
716         if (ret == -EBUSY) {
717                 ccw_device_set_timeout(cdev, 3*HZ);
718                 cdev->private->state = DEV_STATE_TIMEOUT_KILL;
719                 return;
720         }
721         if (ret == -ENODEV) {
722                 struct subchannel *sch;
723
724                 sch = to_subchannel(cdev->dev.parent);
725                 if (!sch->lpm) {
726                         PREPARE_WORK(&cdev->private->kick_work,
727                                      ccw_device_nopath_notify, (void *)cdev);
728                         queue_work(ccw_device_notify_work,
729                                    &cdev->private->kick_work);
730                 } else
731                         dev_fsm_event(cdev, DEV_EVENT_NOTOPER);
732         } else if (cdev->handler)
733                 cdev->handler(cdev, cdev->private->intparm,
734                               ERR_PTR(-ETIMEDOUT));
735 }
736
737 /*
738  * Got an interrupt for a basic sense.
739  */
740 void
741 ccw_device_w4sense(struct ccw_device *cdev, enum dev_event dev_event)
742 {
743         struct irb *irb;
744
745         irb = (struct irb *) __LC_IRB;
746         /* Check for unsolicited interrupt. */
747         if (irb->scsw.stctl ==
748                         (SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS)) {
749                 if (cdev->handler)
750                         cdev->handler (cdev, 0, irb);
751                 if (irb->scsw.cc == 1)
752                         /* Basic sense hasn't started. Try again. */
753                         ccw_device_do_sense(cdev, irb);
754                 return;
755         }
756         /* Add basic sense info to irb. */
757         ccw_device_accumulate_basic_sense(cdev, irb);
758         if (cdev->private->flags.dosense) {
759                 /* Another basic sense is needed. */
760                 ccw_device_do_sense(cdev, irb);
761                 return;
762         }
763         cdev->private->state = DEV_STATE_ONLINE;
764         /* Call the handler. */
765         if (ccw_device_call_handler(cdev) && cdev->private->flags.doverify)
766                 /* Start delayed path verification. */
767                 ccw_device_online_verify(cdev, 0);
768 }
769
770 static void
771 ccw_device_clear_verify(struct ccw_device *cdev, enum dev_event dev_event)
772 {
773         struct irb *irb;
774
775         irb = (struct irb *) __LC_IRB;
776         /* Check for unsolicited interrupt. */
777         if (irb->scsw.stctl ==
778                         (SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS)) {
779                 if (cdev->handler)
780                         cdev->handler (cdev, 0, irb);
781                 return;
782         }
783         /* Accumulate status. We don't do basic sense. */
784         ccw_device_accumulate_irb(cdev, irb);
785         /* Try to start delayed device verification. */
786         ccw_device_online_verify(cdev, 0);
787         /* Note: Don't call handler for cio initiated clear! */
788 }
789
790 static void
791 ccw_device_killing_irq(struct ccw_device *cdev, enum dev_event dev_event)
792 {
793         struct subchannel *sch;
794
795         sch = to_subchannel(cdev->dev.parent);
796         ccw_device_set_timeout(cdev, 0);
797         /* OK, i/o is dead now. Call interrupt handler. */
798         cdev->private->state = DEV_STATE_ONLINE;
799         if (cdev->handler)
800                 cdev->handler(cdev, cdev->private->intparm,
801                               ERR_PTR(-ETIMEDOUT));
802         if (!sch->lpm) {
803                 PREPARE_WORK(&cdev->private->kick_work,
804                              ccw_device_nopath_notify, (void *)cdev);
805                 queue_work(ccw_device_notify_work, &cdev->private->kick_work);
806         } else if (cdev->private->flags.doverify)
807                 /* Start delayed path verification. */
808                 ccw_device_online_verify(cdev, 0);
809 }
810
811 static void
812 ccw_device_killing_timeout(struct ccw_device *cdev, enum dev_event dev_event)
813 {
814         int ret;
815
816         ret = ccw_device_cancel_halt_clear(cdev);
817         if (ret == -EBUSY) {
818                 ccw_device_set_timeout(cdev, 3*HZ);
819                 return;
820         }
821         if (ret == -ENODEV) {
822                 struct subchannel *sch;
823
824                 sch = to_subchannel(cdev->dev.parent);
825                 if (!sch->lpm) {
826                         PREPARE_WORK(&cdev->private->kick_work,
827                                      ccw_device_nopath_notify, (void *)cdev);
828                         queue_work(ccw_device_notify_work,
829                                    &cdev->private->kick_work);
830                 } else
831                         dev_fsm_event(cdev, DEV_EVENT_NOTOPER);
832                 return;
833         }
834         //FIXME: Can we get here?
835         cdev->private->state = DEV_STATE_ONLINE;
836         if (cdev->handler)
837                 cdev->handler(cdev, cdev->private->intparm,
838                               ERR_PTR(-ETIMEDOUT));
839 }
840
841 static void
842 ccw_device_wait4io_irq(struct ccw_device *cdev, enum dev_event dev_event)
843 {
844         struct irb *irb;
845         struct subchannel *sch;
846
847         irb = (struct irb *) __LC_IRB;
848         /* Check for unsolicited interrupt. */
849         if (irb->scsw.stctl ==
850                         (SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS)) {
851                 if (cdev->handler)
852                         cdev->handler (cdev, 0, irb);
853                 if (irb->scsw.cc == 1)
854                         goto call_handler;
855                 return;
856         }
857         /*
858          * Accumulate status and find out if a basic sense is needed.
859          * This is fine since we have already adapted the lpm.
860          */
861         ccw_device_accumulate_irb(cdev, irb);
862         if (cdev->private->flags.dosense) {
863                 if (ccw_device_do_sense(cdev, irb) == 0) {
864                         cdev->private->state = DEV_STATE_W4SENSE;
865                 }
866                 return;
867         }
868 call_handler:
869         /* Iff device is idle, reset timeout. */
870         sch = to_subchannel(cdev->dev.parent);
871         if (!stsch(sch->irq, &sch->schib))
872                 if (sch->schib.scsw.actl == 0)
873                         ccw_device_set_timeout(cdev, 0);
874         /* Call the handler. */
875         ccw_device_call_handler(cdev);
876         if (!sch->lpm) {
877                 PREPARE_WORK(&cdev->private->kick_work,
878                              ccw_device_nopath_notify, (void *)cdev);
879                 queue_work(ccw_device_notify_work, &cdev->private->kick_work);
880         } else if (cdev->private->flags.doverify)
881                 ccw_device_online_verify(cdev, 0);
882 }
883
884 static void
885 ccw_device_wait4io_timeout(struct ccw_device *cdev, enum dev_event dev_event)
886 {
887         int ret;
888         struct subchannel *sch;
889
890         sch = to_subchannel(cdev->dev.parent);
891         ccw_device_set_timeout(cdev, 0);
892         ret = ccw_device_cancel_halt_clear(cdev);
893         if (ret == -EBUSY) {
894                 ccw_device_set_timeout(cdev, 3*HZ);
895                 cdev->private->state = DEV_STATE_TIMEOUT_KILL;
896                 return;
897         }
898         if (ret == -ENODEV) {
899                 if (!sch->lpm) {
900                         PREPARE_WORK(&cdev->private->kick_work,
901                                      ccw_device_nopath_notify, (void *)cdev);
902                         queue_work(ccw_device_notify_work,
903                                    &cdev->private->kick_work);
904                 } else
905                         dev_fsm_event(cdev, DEV_EVENT_NOTOPER);
906                 return;
907         }
908         if (cdev->handler)
909                 cdev->handler(cdev, cdev->private->intparm,
910                               ERR_PTR(-ETIMEDOUT));
911         if (!sch->lpm) {
912                 PREPARE_WORK(&cdev->private->kick_work,
913                              ccw_device_nopath_notify, (void *)cdev);
914                 queue_work(ccw_device_notify_work, &cdev->private->kick_work);
915         } else if (cdev->private->flags.doverify)
916                 /* Start delayed path verification. */
917                 ccw_device_online_verify(cdev, 0);
918 }
919
920 static void
921 ccw_device_wait4io_verify(struct ccw_device *cdev, enum dev_event dev_event)
922 {
923         /* When the I/O has terminated, we have to start verification. */
924         if (cdev->private->options.pgroup)
925                 cdev->private->flags.doverify = 1;
926 }
927
928 static void
929 ccw_device_stlck_done(struct ccw_device *cdev, enum dev_event dev_event)
930 {
931         struct irb *irb;
932
933         switch (dev_event) {
934         case DEV_EVENT_INTERRUPT:
935                 irb = (struct irb *) __LC_IRB;
936                 /* Check for unsolicited interrupt. */
937                 if (irb->scsw.stctl ==
938                     (SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS))
939                         /* FIXME: we should restart stlck here, but this
940                          * is extremely unlikely ... */
941                         goto out_wakeup;
942
943                 ccw_device_accumulate_irb(cdev, irb);
944                 /* We don't care about basic sense etc. */
945                 break;
946         default: /* timeout */
947                 break;
948         }
949 out_wakeup:
950         wake_up(&cdev->private->wait_q);
951 }
952
953 static void
954 ccw_device_start_id(struct ccw_device *cdev, enum dev_event dev_event)
955 {
956         struct subchannel *sch;
957
958         sch = to_subchannel(cdev->dev.parent);
959         if (cio_enable_subchannel(sch, sch->schib.pmcw.isc) != 0)
960                 /* Couldn't enable the subchannel for i/o. Sick device. */
961                 return;
962
963         /* After 60s the device recognition is considered to have failed. */
964         ccw_device_set_timeout(cdev, 60*HZ);
965
966         cdev->private->state = DEV_STATE_DISCONNECTED_SENSE_ID;
967         ccw_device_sense_id_start(cdev);
968 }
969
970 void
971 device_trigger_reprobe(struct subchannel *sch)
972 {
973         struct ccw_device *cdev;
974         unsigned long flags;
975
976         if (!sch->dev.driver_data)
977                 return;
978         cdev = sch->dev.driver_data;
979         spin_lock_irqsave(&sch->lock, flags);
980         if (cdev->private->state != DEV_STATE_DISCONNECTED) {
981                 spin_unlock_irqrestore(&sch->lock, flags);
982                 return;
983         }
984         /* Update some values. */
985         if (stsch(sch->irq, &sch->schib)) {
986                 spin_unlock_irqrestore(&sch->lock, flags);
987                 return;
988         }
989         /*
990          * The pim, pam, pom values may not be accurate, but they are the best
991          * we have before performing device selection :/
992          */
993         sch->lpm = sch->schib.pmcw.pim &
994                 sch->schib.pmcw.pam &
995                 sch->schib.pmcw.pom &
996                 sch->opm;
997         /* Re-set some bits in the pmcw that were lost. */
998         sch->schib.pmcw.isc = 3;
999         sch->schib.pmcw.csense = 1;
1000         sch->schib.pmcw.ena = 0;
1001         if ((sch->lpm & (sch->lpm - 1)) != 0)
1002                 sch->schib.pmcw.mp = 1;
1003         sch->schib.pmcw.intparm = (__u32)(unsigned long)sch;
1004         ccw_device_start_id(cdev, 0);
1005         spin_unlock_irqrestore(&sch->lock, flags);
1006 }
1007
1008 static void
1009 ccw_device_offline_irq(struct ccw_device *cdev, enum dev_event dev_event)
1010 {
1011         struct subchannel *sch;
1012
1013         sch = to_subchannel(cdev->dev.parent);
1014         /*
1015          * An interrupt in state offline means a previous disable was not
1016          * successful. Try again.
1017          */
1018         cio_disable_subchannel(sch);
1019 }
1020
1021 static void
1022 ccw_device_change_cmfstate(struct ccw_device *cdev, enum dev_event dev_event)
1023 {
1024         retry_set_schib(cdev);
1025         cdev->private->state = DEV_STATE_ONLINE;
1026         dev_fsm_event(cdev, dev_event);
1027 }
1028
1029
1030 static void
1031 ccw_device_quiesce_done(struct ccw_device *cdev, enum dev_event dev_event)
1032 {
1033         ccw_device_set_timeout(cdev, 0);
1034         if (dev_event == DEV_EVENT_NOTOPER)
1035                 cdev->private->state = DEV_STATE_NOT_OPER;
1036         else
1037                 cdev->private->state = DEV_STATE_OFFLINE;
1038         wake_up(&cdev->private->wait_q);
1039 }
1040
1041 static void
1042 ccw_device_quiesce_timeout(struct ccw_device *cdev, enum dev_event dev_event)
1043 {
1044         int ret;
1045
1046         ret = ccw_device_cancel_halt_clear(cdev);
1047         switch (ret) {
1048         case 0:
1049                 cdev->private->state = DEV_STATE_OFFLINE;
1050                 wake_up(&cdev->private->wait_q);
1051                 break;
1052         case -ENODEV:
1053                 cdev->private->state = DEV_STATE_NOT_OPER;
1054                 wake_up(&cdev->private->wait_q);
1055                 break;
1056         default:
1057                 ccw_device_set_timeout(cdev, HZ/10);
1058         }
1059 }
1060
1061 /*
1062  * No operation action. This is used e.g. to ignore a timeout event in
1063  * state offline.
1064  */
1065 static void
1066 ccw_device_nop(struct ccw_device *cdev, enum dev_event dev_event)
1067 {
1068 }
1069
1070 /*
1071  * Bug operation action. 
1072  */
1073 static void
1074 ccw_device_bug(struct ccw_device *cdev, enum dev_event dev_event)
1075 {
1076         printk(KERN_EMERG "dev_jumptable[%i][%i] == NULL\n",
1077                cdev->private->state, dev_event);
1078         BUG();
1079 }
1080
1081 /*
1082  * device statemachine
1083  */
1084 fsm_func_t *dev_jumptable[NR_DEV_STATES][NR_DEV_EVENTS] = {
1085         [DEV_STATE_NOT_OPER] {
1086                 [DEV_EVENT_NOTOPER]     ccw_device_nop,
1087                 [DEV_EVENT_INTERRUPT]   ccw_device_bug,
1088                 [DEV_EVENT_TIMEOUT]     ccw_device_nop,
1089                 [DEV_EVENT_VERIFY]      ccw_device_nop,
1090         },
1091         [DEV_STATE_SENSE_PGID] {
1092                 [DEV_EVENT_NOTOPER]     ccw_device_online_notoper,
1093                 [DEV_EVENT_INTERRUPT]   ccw_device_sense_pgid_irq,
1094                 [DEV_EVENT_TIMEOUT]     ccw_device_onoff_timeout,
1095                 [DEV_EVENT_VERIFY]      ccw_device_nop,
1096         },
1097         [DEV_STATE_SENSE_ID] {
1098                 [DEV_EVENT_NOTOPER]     ccw_device_recog_notoper,
1099                 [DEV_EVENT_INTERRUPT]   ccw_device_sense_id_irq,
1100                 [DEV_EVENT_TIMEOUT]     ccw_device_recog_timeout,
1101                 [DEV_EVENT_VERIFY]      ccw_device_nop,
1102         },
1103         [DEV_STATE_OFFLINE] {
1104                 [DEV_EVENT_NOTOPER]     ccw_device_offline_notoper,
1105                 [DEV_EVENT_INTERRUPT]   ccw_device_offline_irq,
1106                 [DEV_EVENT_TIMEOUT]     ccw_device_nop,
1107                 [DEV_EVENT_VERIFY]      ccw_device_nop,
1108         },
1109         [DEV_STATE_VERIFY] {
1110                 [DEV_EVENT_NOTOPER]     ccw_device_online_notoper,
1111                 [DEV_EVENT_INTERRUPT]   ccw_device_verify_irq,
1112                 [DEV_EVENT_TIMEOUT]     ccw_device_onoff_timeout,
1113                 [DEV_EVENT_VERIFY]      ccw_device_nop,
1114         },
1115         [DEV_STATE_ONLINE] {
1116                 [DEV_EVENT_NOTOPER]     ccw_device_online_notoper,
1117                 [DEV_EVENT_INTERRUPT]   ccw_device_irq,
1118                 [DEV_EVENT_TIMEOUT]     ccw_device_online_timeout,
1119                 [DEV_EVENT_VERIFY]      ccw_device_online_verify,
1120         },
1121         [DEV_STATE_W4SENSE] {
1122                 [DEV_EVENT_NOTOPER]     ccw_device_online_notoper,
1123                 [DEV_EVENT_INTERRUPT]   ccw_device_w4sense,
1124                 [DEV_EVENT_TIMEOUT]     ccw_device_nop,
1125                 [DEV_EVENT_VERIFY]      ccw_device_online_verify,
1126         },
1127         [DEV_STATE_DISBAND_PGID] {
1128                 [DEV_EVENT_NOTOPER]     ccw_device_online_notoper,
1129                 [DEV_EVENT_INTERRUPT]   ccw_device_disband_irq,
1130                 [DEV_EVENT_TIMEOUT]     ccw_device_onoff_timeout,
1131                 [DEV_EVENT_VERIFY]      ccw_device_nop,
1132         },
1133         [DEV_STATE_BOXED] {
1134                 [DEV_EVENT_NOTOPER]     ccw_device_offline_notoper,
1135                 [DEV_EVENT_INTERRUPT]   ccw_device_stlck_done,
1136                 [DEV_EVENT_TIMEOUT]     ccw_device_stlck_done,
1137                 [DEV_EVENT_VERIFY]      ccw_device_nop,
1138         },
1139         /* states to wait for i/o completion before doing something */
1140         [DEV_STATE_CLEAR_VERIFY] {
1141                 [DEV_EVENT_NOTOPER]     ccw_device_online_notoper,
1142                 [DEV_EVENT_INTERRUPT]   ccw_device_clear_verify,
1143                 [DEV_EVENT_TIMEOUT]     ccw_device_nop,
1144                 [DEV_EVENT_VERIFY]      ccw_device_nop,
1145         },
1146         [DEV_STATE_TIMEOUT_KILL] {
1147                 [DEV_EVENT_NOTOPER]     ccw_device_online_notoper,
1148                 [DEV_EVENT_INTERRUPT]   ccw_device_killing_irq,
1149                 [DEV_EVENT_TIMEOUT]     ccw_device_killing_timeout,
1150                 [DEV_EVENT_VERIFY]      ccw_device_nop, //FIXME
1151         },
1152         [DEV_STATE_WAIT4IO] {
1153                 [DEV_EVENT_NOTOPER]     ccw_device_online_notoper,
1154                 [DEV_EVENT_INTERRUPT]   ccw_device_wait4io_irq,
1155                 [DEV_EVENT_TIMEOUT]     ccw_device_wait4io_timeout,
1156                 [DEV_EVENT_VERIFY]      ccw_device_wait4io_verify,
1157         },
1158         [DEV_STATE_QUIESCE] {
1159                 [DEV_EVENT_NOTOPER]     ccw_device_quiesce_done,
1160                 [DEV_EVENT_INTERRUPT]   ccw_device_quiesce_done,
1161                 [DEV_EVENT_TIMEOUT]     ccw_device_quiesce_timeout,
1162                 [DEV_EVENT_VERIFY]      ccw_device_nop,
1163         },
1164         /* special states for devices gone not operational */
1165         [DEV_STATE_DISCONNECTED] {
1166                 [DEV_EVENT_NOTOPER]     ccw_device_nop,
1167                 [DEV_EVENT_INTERRUPT]   ccw_device_start_id,
1168                 [DEV_EVENT_TIMEOUT]     ccw_device_bug,
1169                 [DEV_EVENT_VERIFY]      ccw_device_nop,
1170         },
1171         [DEV_STATE_DISCONNECTED_SENSE_ID] {
1172                 [DEV_EVENT_NOTOPER]     ccw_device_recog_notoper,
1173                 [DEV_EVENT_INTERRUPT]   ccw_device_sense_id_irq,
1174                 [DEV_EVENT_TIMEOUT]     ccw_device_recog_timeout,
1175                 [DEV_EVENT_VERIFY]      ccw_device_nop,
1176         },
1177         [DEV_STATE_CMFCHANGE] {
1178                 [DEV_EVENT_NOTOPER]     ccw_device_change_cmfstate,
1179                 [DEV_EVENT_INTERRUPT]   ccw_device_change_cmfstate,
1180                 [DEV_EVENT_TIMEOUT]     ccw_device_change_cmfstate,
1181                 [DEV_EVENT_VERIFY]      ccw_device_change_cmfstate,
1182         },
1183 };
1184
1185 /*
1186  * io_subchannel_irq is called for "real" interrupts or for status
1187  * pending conditions on msch.
1188  */
1189 void
1190 io_subchannel_irq (struct device *pdev)
1191 {
1192         struct ccw_device *cdev;
1193
1194         cdev = to_subchannel(pdev)->dev.driver_data;
1195
1196         CIO_TRACE_EVENT (3, "IRQ");
1197         CIO_TRACE_EVENT (3, pdev->bus_id);
1198
1199         dev_fsm_event(cdev, DEV_EVENT_INTERRUPT);
1200 }
1201
1202 EXPORT_SYMBOL_GPL(ccw_device_set_timeout);