This commit was manufactured by cvs2svn to create tag
[linux-2.6.git] / drivers / s390 / cio / device_fsm.c
1 /*
2  * drivers/s390/cio/device_fsm.c
3  * finite state machine for device handling
4  *
5  *    Copyright (C) 2002 IBM Deutschland Entwicklung GmbH,
6  *                       IBM Corporation
7  *    Author(s): Cornelia Huck(cohuck@de.ibm.com)
8  *               Martin Schwidefsky (schwidefsky@de.ibm.com)
9  */
10
11 #include <linux/module.h>
12 #include <linux/config.h>
13 #include <linux/init.h>
14
15 #include <asm/ccwdev.h>
16 #include <asm/qdio.h>
17
18 #include "cio.h"
19 #include "cio_debug.h"
20 #include "css.h"
21 #include "device.h"
22 #include "chsc.h"
23 #include "ioasm.h"
24 #include "qdio.h"
25
26 int
27 device_is_disconnected(struct subchannel *sch)
28 {
29         struct ccw_device *cdev;
30
31         if (!sch->dev.driver_data)
32                 return 0;
33         cdev = sch->dev.driver_data;
34         return (cdev->private->state == DEV_STATE_DISCONNECTED ||
35                 cdev->private->state == DEV_STATE_DISCONNECTED_SENSE_ID);
36 }
37
38 void
39 device_set_disconnected(struct subchannel *sch)
40 {
41         struct ccw_device *cdev;
42
43         if (!sch->dev.driver_data)
44                 return;
45         cdev = sch->dev.driver_data;
46         ccw_device_set_timeout(cdev, 0);
47         cdev->private->state = DEV_STATE_DISCONNECTED;
48 }
49
50 void
51 device_set_waiting(struct subchannel *sch)
52 {
53         struct ccw_device *cdev;
54
55         if (!sch->dev.driver_data)
56                 return;
57         cdev = sch->dev.driver_data;
58         ccw_device_set_timeout(cdev, 10*HZ);
59         cdev->private->state = DEV_STATE_WAIT4IO;
60 }
61
62 /*
63  * Timeout function. It just triggers a DEV_EVENT_TIMEOUT.
64  */
65 static void
66 ccw_device_timeout(unsigned long data)
67 {
68         struct ccw_device *cdev;
69
70         cdev = (struct ccw_device *) data;
71         spin_lock_irq(cdev->ccwlock);
72         dev_fsm_event(cdev, DEV_EVENT_TIMEOUT);
73         spin_unlock_irq(cdev->ccwlock);
74 }
75
76 /*
77  * Set timeout
78  */
79 void
80 ccw_device_set_timeout(struct ccw_device *cdev, int expires)
81 {
82         if (expires == 0) {
83                 del_timer(&cdev->private->timer);
84                 return;
85         }
86         if (timer_pending(&cdev->private->timer)) {
87                 if (mod_timer(&cdev->private->timer, jiffies + expires))
88                         return;
89         }
90         cdev->private->timer.function = ccw_device_timeout;
91         cdev->private->timer.data = (unsigned long) cdev;
92         cdev->private->timer.expires = jiffies + expires;
93         add_timer(&cdev->private->timer);
94 }
95
96 /*
97  * Cancel running i/o. This is called repeatedly since halt/clear are
98  * asynchronous operations. We do one try with cio_cancel, two tries
99  * with cio_halt, 255 tries with cio_clear. If everythings fails panic.
100  * Returns 0 if device now idle, -ENODEV for device not operational and
101  * -EBUSY if an interrupt is expected (either from halt/clear or from a
102  * status pending).
103  */
104 int
105 ccw_device_cancel_halt_clear(struct ccw_device *cdev)
106 {
107         struct subchannel *sch;
108         int ret;
109
110         sch = to_subchannel(cdev->dev.parent);
111         ret = stsch(sch->irq, &sch->schib);
112         if (ret || !sch->schib.pmcw.dnv)
113                 return -ENODEV; 
114         if (!sch->schib.pmcw.ena || sch->schib.scsw.actl == 0)
115                 /* Not operational or no activity -> done. */
116                 return 0;
117         /* Stage 1: cancel io. */
118         if (!(sch->schib.scsw.actl & SCSW_ACTL_HALT_PEND) &&
119             !(sch->schib.scsw.actl & SCSW_ACTL_CLEAR_PEND)) {
120                 ret = cio_cancel(sch);
121                 if (ret != -EINVAL)
122                         return ret;
123                 /* cancel io unsuccessful. From now on it is asynchronous. */
124                 cdev->private->iretry = 3;      /* 3 halt retries. */
125         }
126         if (!(sch->schib.scsw.actl & SCSW_ACTL_CLEAR_PEND)) {
127                 /* Stage 2: halt io. */
128                 if (cdev->private->iretry) {
129                         cdev->private->iretry--;
130                         ret = cio_halt(sch);
131                         return (ret == 0) ? -EBUSY : ret;
132                 }
133                 /* halt io unsuccessful. */
134                 cdev->private->iretry = 255;    /* 255 clear retries. */
135         }
136         /* Stage 3: clear io. */
137         if (cdev->private->iretry) {
138                 cdev->private->iretry--;
139                 ret = cio_clear (sch);
140                 return (ret == 0) ? -EBUSY : ret;
141         }
142         panic("Can't stop i/o on subchannel.\n");
143 }
144
145 static void
146 ccw_device_handle_oper(struct ccw_device *cdev)
147 {
148         struct subchannel *sch;
149
150         sch = to_subchannel(cdev->dev.parent);
151         cdev->private->flags.recog_done = 1;
152         /*
153          * Check if cu type and device type still match. If
154          * not, it is certainly another device and we have to
155          * de- and re-register. Also check here for non-matching devno.
156          */
157         if (cdev->id.cu_type != cdev->private->senseid.cu_type ||
158             cdev->id.cu_model != cdev->private->senseid.cu_model ||
159             cdev->id.dev_type != cdev->private->senseid.dev_type ||
160             cdev->id.dev_model != cdev->private->senseid.dev_model ||
161             cdev->private->devno != sch->schib.pmcw.dev) {
162                 PREPARE_WORK(&cdev->private->kick_work,
163                              ccw_device_do_unreg_rereg, (void *)cdev);
164                 queue_work(ccw_device_work, &cdev->private->kick_work);
165                 return;
166         }
167         cdev->private->flags.donotify = 1;
168         /* Get device online again. */
169         ccw_device_online(cdev);
170 }
171
172 /*
173  * The machine won't give us any notification by machine check if a chpid has
174  * been varied online on the SE so we have to find out by magic (i. e. driving
175  * the channel subsystem to device selection and updating our path masks).
176  */
177 static inline void
178 __recover_lost_chpids(struct subchannel *sch, int old_lpm)
179 {
180         int mask, i;
181
182         for (i = 0; i<8; i++) {
183                 mask = 0x80 >> i;
184                 if (!(sch->lpm & mask))
185                         continue;
186                 if (old_lpm & mask)
187                         continue;
188                 chpid_is_actually_online(sch->schib.pmcw.chpid[i]);
189         }
190 }
191
192 /*
193  * Stop device recognition.
194  */
195 static void
196 ccw_device_recog_done(struct ccw_device *cdev, int state)
197 {
198         struct subchannel *sch;
199         int notify, old_lpm;
200
201         sch = to_subchannel(cdev->dev.parent);
202
203         ccw_device_set_timeout(cdev, 0);
204         cio_disable_subchannel(sch);
205         /*
206          * Now that we tried recognition, we have performed device selection
207          * through ssch() and the path information is up to date.
208          */
209         old_lpm = sch->lpm;
210         stsch(sch->irq, &sch->schib);
211         sch->lpm = sch->schib.pmcw.pim &
212                 sch->schib.pmcw.pam &
213                 sch->schib.pmcw.pom &
214                 sch->opm;
215         if (cdev->private->state == DEV_STATE_DISCONNECTED_SENSE_ID)
216                 /* Force reprobe on all chpids. */
217                 old_lpm = 0;
218         if (sch->lpm != old_lpm)
219                 __recover_lost_chpids(sch, old_lpm);
220         if (cdev->private->state == DEV_STATE_DISCONNECTED_SENSE_ID) {
221                 if (state == DEV_STATE_NOT_OPER) {
222                         cdev->private->flags.recog_done = 1;
223                         cdev->private->state = DEV_STATE_DISCONNECTED;
224                         return;
225                 }
226                 /* Boxed devices don't need extra treatment. */
227         }
228         notify = 0;
229         switch (state) {
230         case DEV_STATE_NOT_OPER:
231                 CIO_DEBUG(KERN_WARNING, 2,
232                           "SenseID : unknown device %04x on subchannel %04x\n",
233                           cdev->private->devno, sch->irq);
234                 break;
235         case DEV_STATE_OFFLINE:
236                 if (cdev->private->state == DEV_STATE_DISCONNECTED_SENSE_ID)
237                         notify = 1;
238                 else  /* fill out sense information */
239                         cdev->id = (struct ccw_device_id) {
240                                 .cu_type   = cdev->private->senseid.cu_type,
241                                 .cu_model  = cdev->private->senseid.cu_model,
242                                 .dev_type  = cdev->private->senseid.dev_type,
243                                 .dev_model = cdev->private->senseid.dev_model,
244                         };
245                 /* Issue device info message. */
246                 CIO_DEBUG(KERN_INFO, 2, "SenseID : device %04x reports: "
247                           "CU  Type/Mod = %04X/%02X, Dev Type/Mod = "
248                           "%04X/%02X\n", cdev->private->devno,
249                           cdev->id.cu_type, cdev->id.cu_model,
250                           cdev->id.dev_type, cdev->id.dev_model);
251                 break;
252         case DEV_STATE_BOXED:
253                 CIO_DEBUG(KERN_WARNING, 2,
254                           "SenseID : boxed device %04x on subchannel %04x\n",
255                           cdev->private->devno, sch->irq);
256                 break;
257         }
258         cdev->private->state = state;
259         if (notify && state == DEV_STATE_OFFLINE)
260                 ccw_device_handle_oper(cdev);
261         else
262                 io_subchannel_recog_done(cdev);
263         if (state != DEV_STATE_NOT_OPER)
264                 wake_up(&cdev->private->wait_q);
265 }
266
267 /*
268  * Function called from device_id.c after sense id has completed.
269  */
270 void
271 ccw_device_sense_id_done(struct ccw_device *cdev, int err)
272 {
273         switch (err) {
274         case 0:
275                 ccw_device_recog_done(cdev, DEV_STATE_OFFLINE);
276                 break;
277         case -ETIME:            /* Sense id stopped by timeout. */
278                 ccw_device_recog_done(cdev, DEV_STATE_BOXED);
279                 break;
280         default:
281                 ccw_device_recog_done(cdev, DEV_STATE_NOT_OPER);
282                 break;
283         }
284 }
285
286 static void
287 ccw_device_oper_notify(void *data)
288 {
289         struct ccw_device *cdev;
290         struct subchannel *sch;
291         int ret;
292
293         cdev = (struct ccw_device *)data;
294         sch = to_subchannel(cdev->dev.parent);
295         ret = (sch->driver && sch->driver->notify) ?
296                 sch->driver->notify(&sch->dev, CIO_OPER) : 0;
297         if (!ret)
298                 /* Driver doesn't want device back. */
299                 ccw_device_do_unreg_rereg((void *)cdev);
300         else
301                 wake_up(&cdev->private->wait_q);
302 }
303
304 /*
305  * Finished with online/offline processing.
306  */
307 static void
308 ccw_device_done(struct ccw_device *cdev, int state)
309 {
310         struct subchannel *sch;
311
312         sch = to_subchannel(cdev->dev.parent);
313
314         if (state != DEV_STATE_ONLINE)
315                 cio_disable_subchannel(sch);
316
317         /* Reset device status. */
318         memset(&cdev->private->irb, 0, sizeof(struct irb));
319
320         cdev->private->state = state;
321
322
323         if (state == DEV_STATE_BOXED)
324                 CIO_DEBUG(KERN_WARNING, 2,
325                           "Boxed device %04x on subchannel %04x\n",
326                           cdev->private->devno, sch->irq);
327
328         if (cdev->private->flags.donotify) {
329                 cdev->private->flags.donotify = 0;
330                 PREPARE_WORK(&cdev->private->kick_work, ccw_device_oper_notify,
331                              (void *)cdev);
332                 queue_work(ccw_device_notify_work, &cdev->private->kick_work);
333         }
334         wake_up(&cdev->private->wait_q);
335
336         if (css_init_done && state != DEV_STATE_ONLINE)
337                 put_device (&cdev->dev);
338 }
339
340 /*
341  * Function called from device_pgid.c after sense path ground has completed.
342  */
343 void
344 ccw_device_sense_pgid_done(struct ccw_device *cdev, int err)
345 {
346         struct subchannel *sch;
347
348         sch = to_subchannel(cdev->dev.parent);
349         switch (err) {
350         case 0:
351                 /* Start Path Group verification. */
352                 sch->vpm = 0;   /* Start with no path groups set. */
353                 cdev->private->state = DEV_STATE_VERIFY;
354                 ccw_device_verify_start(cdev);
355                 break;
356         case -ETIME:            /* Sense path group id stopped by timeout. */
357         case -EUSERS:           /* device is reserved for someone else. */
358                 ccw_device_done(cdev, DEV_STATE_BOXED);
359                 break;
360         case -EOPNOTSUPP: /* path grouping not supported, just set online. */
361                 cdev->private->options.pgroup = 0;
362                 ccw_device_done(cdev, DEV_STATE_ONLINE);
363                 break;
364         default:
365                 ccw_device_done(cdev, DEV_STATE_NOT_OPER);
366                 break;
367         }
368 }
369
370 /*
371  * Start device recognition.
372  */
373 int
374 ccw_device_recognition(struct ccw_device *cdev)
375 {
376         struct subchannel *sch;
377         int ret;
378
379         if ((cdev->private->state != DEV_STATE_NOT_OPER) &&
380             (cdev->private->state != DEV_STATE_BOXED))
381                 return -EINVAL;
382         sch = to_subchannel(cdev->dev.parent);
383         ret = cio_enable_subchannel(sch, sch->schib.pmcw.isc);
384         if (ret != 0)
385                 /* Couldn't enable the subchannel for i/o. Sick device. */
386                 return ret;
387
388         /* After 60s the device recognition is considered to have failed. */
389         ccw_device_set_timeout(cdev, 60*HZ);
390
391         /*
392          * We used to start here with a sense pgid to find out whether a device
393          * is locked by someone else. Unfortunately, the sense pgid command
394          * code has other meanings on devices predating the path grouping
395          * algorithm, so we start with sense id and box the device after an
396          * timeout (or if sense pgid during path verification detects the device
397          * is locked, as may happen on newer devices).
398          */
399         cdev->private->flags.recog_done = 0;
400         cdev->private->state = DEV_STATE_SENSE_ID;
401         ccw_device_sense_id_start(cdev);
402         return 0;
403 }
404
405 /*
406  * Handle timeout in device recognition.
407  */
408 static void
409 ccw_device_recog_timeout(struct ccw_device *cdev, enum dev_event dev_event)
410 {
411         int ret;
412
413         ret = ccw_device_cancel_halt_clear(cdev);
414         switch (ret) {
415         case 0:
416                 ccw_device_recog_done(cdev, DEV_STATE_BOXED);
417                 break;
418         case -ENODEV:
419                 ccw_device_recog_done(cdev, DEV_STATE_NOT_OPER);
420                 break;
421         default:
422                 ccw_device_set_timeout(cdev, 3*HZ);
423         }
424 }
425
426
427 static void
428 ccw_device_nopath_notify(void *data)
429 {
430         struct ccw_device *cdev;
431         struct subchannel *sch;
432         int ret;
433
434         cdev = (struct ccw_device *)data;
435         sch = to_subchannel(cdev->dev.parent);
436         /* Extra sanity. */
437         if (sch->lpm)
438                 return;
439         ret = (sch->driver && sch->driver->notify) ?
440                 sch->driver->notify(&sch->dev, CIO_NO_PATH) : 0;
441         if (!ret) {
442                 if (get_device(&sch->dev)) {
443                         /* Driver doesn't want to keep device. */
444                         cio_disable_subchannel(sch);
445                         if (get_device(&cdev->dev)) {
446                                 PREPARE_WORK(&cdev->private->kick_work,
447                                              ccw_device_call_sch_unregister,
448                                              (void *)cdev);
449                                 queue_work(ccw_device_work,
450                                            &cdev->private->kick_work);
451                         }
452                 }
453         } else {
454                 cio_disable_subchannel(sch);
455                 ccw_device_set_timeout(cdev, 0);
456                 cdev->private->state = DEV_STATE_DISCONNECTED;
457                 wake_up(&cdev->private->wait_q);
458         }
459 }
460
461 void
462 device_call_nopath_notify(struct subchannel *sch)
463 {
464         struct ccw_device *cdev;
465
466         if (!sch->dev.driver_data)
467                 return;
468         cdev = sch->dev.driver_data;
469         PREPARE_WORK(&cdev->private->kick_work,
470                      ccw_device_nopath_notify, (void *)cdev);
471         queue_work(ccw_device_notify_work, &cdev->private->kick_work);
472 }
473
474
475 void
476 ccw_device_verify_done(struct ccw_device *cdev, int err)
477 {
478         cdev->private->flags.doverify = 0;
479         switch (err) {
480         case -EOPNOTSUPP: /* path grouping not supported, just set online. */
481                 cdev->private->options.pgroup = 0;
482         case 0:
483                 ccw_device_done(cdev, DEV_STATE_ONLINE);
484                 break;
485         case -ETIME:
486                 ccw_device_done(cdev, DEV_STATE_BOXED);
487                 break;
488         default:
489                 PREPARE_WORK(&cdev->private->kick_work,
490                              ccw_device_nopath_notify, (void *)cdev);
491                 queue_work(ccw_device_notify_work, &cdev->private->kick_work);
492                 ccw_device_done(cdev, DEV_STATE_NOT_OPER);
493                 break;
494         }
495 }
496
497 /*
498  * Get device online.
499  */
500 int
501 ccw_device_online(struct ccw_device *cdev)
502 {
503         struct subchannel *sch;
504         int ret;
505
506         if ((cdev->private->state != DEV_STATE_OFFLINE) &&
507             (cdev->private->state != DEV_STATE_BOXED))
508                 return -EINVAL;
509         sch = to_subchannel(cdev->dev.parent);
510         if (css_init_done && !get_device(&cdev->dev))
511                 return -ENODEV;
512         ret = cio_enable_subchannel(sch, sch->schib.pmcw.isc);
513         if (ret != 0) {
514                 /* Couldn't enable the subchannel for i/o. Sick device. */
515                 if (ret == -ENODEV)
516                         dev_fsm_event(cdev, DEV_EVENT_NOTOPER);
517                 return ret;
518         }
519         /* Do we want to do path grouping? */
520         if (!cdev->private->options.pgroup) {
521                 /* No, set state online immediately. */
522                 ccw_device_done(cdev, DEV_STATE_ONLINE);
523                 return 0;
524         }
525         /* Do a SensePGID first. */
526         cdev->private->state = DEV_STATE_SENSE_PGID;
527         ccw_device_sense_pgid_start(cdev);
528         return 0;
529 }
530
531 void
532 ccw_device_disband_done(struct ccw_device *cdev, int err)
533 {
534         switch (err) {
535         case 0:
536                 ccw_device_done(cdev, DEV_STATE_OFFLINE);
537                 break;
538         case -ETIME:
539                 ccw_device_done(cdev, DEV_STATE_BOXED);
540                 break;
541         default:
542                 ccw_device_done(cdev, DEV_STATE_NOT_OPER);
543                 break;
544         }
545 }
546
547 /*
548  * Shutdown device.
549  */
550 int
551 ccw_device_offline(struct ccw_device *cdev)
552 {
553         struct subchannel *sch;
554
555         sch = to_subchannel(cdev->dev.parent);
556         if (cdev->private->state != DEV_STATE_ONLINE) {
557                 if (sch->schib.scsw.actl != 0)
558                         return -EBUSY;
559                 return -EINVAL;
560         }
561         if (sch->schib.scsw.actl != 0)
562                 return -EBUSY;
563         /* Are we doing path grouping? */
564         if (!cdev->private->options.pgroup) {
565                 /* No, set state offline immediately. */
566                 ccw_device_done(cdev, DEV_STATE_OFFLINE);
567                 return 0;
568         }
569         /* Start Set Path Group commands. */
570         cdev->private->state = DEV_STATE_DISBAND_PGID;
571         ccw_device_disband_start(cdev);
572         return 0;
573 }
574
575 /*
576  * Handle timeout in device online/offline process.
577  */
578 static void
579 ccw_device_onoff_timeout(struct ccw_device *cdev, enum dev_event dev_event)
580 {
581         int ret;
582
583         ret = ccw_device_cancel_halt_clear(cdev);
584         switch (ret) {
585         case 0:
586                 ccw_device_done(cdev, DEV_STATE_BOXED);
587                 break;
588         case -ENODEV:
589                 ccw_device_done(cdev, DEV_STATE_NOT_OPER);
590                 break;
591         default:
592                 ccw_device_set_timeout(cdev, 3*HZ);
593         }
594 }
595
596 /*
597  * Handle not oper event in device recognition.
598  */
599 static void
600 ccw_device_recog_notoper(struct ccw_device *cdev, enum dev_event dev_event)
601 {
602         ccw_device_recog_done(cdev, DEV_STATE_NOT_OPER);
603 }
604
605 /*
606  * Handle not operational event while offline.
607  */
608 static void
609 ccw_device_offline_notoper(struct ccw_device *cdev, enum dev_event dev_event)
610 {
611         struct subchannel *sch;
612
613         cdev->private->state = DEV_STATE_NOT_OPER;
614         sch = to_subchannel(cdev->dev.parent);
615         device_unregister(&sch->dev);
616         sch->schib.pmcw.intparm = 0;
617         cio_modify(sch);
618         wake_up(&cdev->private->wait_q);
619 }
620
621 /*
622  * Handle not operational event while online.
623  */
624 static void
625 ccw_device_online_notoper(struct ccw_device *cdev, enum dev_event dev_event)
626 {
627         struct subchannel *sch;
628
629         sch = to_subchannel(cdev->dev.parent);
630         if (sch->driver->notify &&
631             sch->driver->notify(&sch->dev, sch->lpm ? CIO_GONE : CIO_NO_PATH)) {
632                         ccw_device_set_timeout(cdev, 0);
633                         cdev->private->state = DEV_STATE_DISCONNECTED;
634                         wake_up(&cdev->private->wait_q);
635                         return;
636         }
637         cdev->private->state = DEV_STATE_NOT_OPER;
638         cio_disable_subchannel(sch);
639         if (sch->schib.scsw.actl != 0) {
640                 // FIXME: not-oper indication to device driver ?
641                 ccw_device_call_handler(cdev);
642         }
643         device_unregister(&sch->dev);
644         sch->schib.pmcw.intparm = 0;
645         cio_modify(sch);
646         wake_up(&cdev->private->wait_q);
647 }
648
649 /*
650  * Handle path verification event.
651  */
652 static void
653 ccw_device_online_verify(struct ccw_device *cdev, enum dev_event dev_event)
654 {
655         struct subchannel *sch;
656
657         if (!cdev->private->options.pgroup)
658                 return;
659         if (cdev->private->state == DEV_STATE_W4SENSE) {
660                 cdev->private->flags.doverify = 1;
661                 return;
662         }
663         sch = to_subchannel(cdev->dev.parent);
664         if (sch->schib.scsw.actl != 0 ||
665             (cdev->private->irb.scsw.stctl & SCSW_STCTL_STATUS_PEND)) {
666                 /*
667                  * No final status yet or final status not yet delivered
668                  * to the device driver. Can't do path verfication now,
669                  * delay until final status was delivered.
670                  */
671                 cdev->private->flags.doverify = 1;
672                 return;
673         }
674         /* Device is idle, we can do the path verification. */
675         cdev->private->state = DEV_STATE_VERIFY;
676         ccw_device_verify_start(cdev);
677 }
678
679 /*
680  * Got an interrupt for a normal io (state online).
681  */
682 static void
683 ccw_device_irq(struct ccw_device *cdev, enum dev_event dev_event)
684 {
685         struct irb *irb;
686
687         irb = (struct irb *) __LC_IRB;
688         /* Check for unsolicited interrupt. */
689         if (irb->scsw.stctl ==
690                         (SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS)) {
691                 if (cdev->handler)
692                         cdev->handler (cdev, 0, irb);
693                 return;
694         }
695         /* Accumulate status and find out if a basic sense is needed. */
696         ccw_device_accumulate_irb(cdev, irb);
697         if (cdev->private->flags.dosense) {
698                 if (ccw_device_do_sense(cdev, irb) == 0) {
699                         cdev->private->state = DEV_STATE_W4SENSE;
700                 }
701                 return;
702         }
703         /* Call the handler. */
704         if (ccw_device_call_handler(cdev) && cdev->private->flags.doverify)
705                 /* Start delayed path verification. */
706                 ccw_device_online_verify(cdev, 0);
707 }
708
709 /*
710  * Got an timeout in online state.
711  */
712 static void
713 ccw_device_online_timeout(struct ccw_device *cdev, enum dev_event dev_event)
714 {
715         int ret;
716
717         ccw_device_set_timeout(cdev, 0);
718         ret = ccw_device_cancel_halt_clear(cdev);
719         if (ret == -EBUSY) {
720                 ccw_device_set_timeout(cdev, 3*HZ);
721                 cdev->private->state = DEV_STATE_TIMEOUT_KILL;
722                 return;
723         }
724         if (ret == -ENODEV) {
725                 struct subchannel *sch;
726
727                 sch = to_subchannel(cdev->dev.parent);
728                 if (!sch->lpm) {
729                         PREPARE_WORK(&cdev->private->kick_work,
730                                      ccw_device_nopath_notify, (void *)cdev);
731                         queue_work(ccw_device_notify_work,
732                                    &cdev->private->kick_work);
733                 } else
734                         dev_fsm_event(cdev, DEV_EVENT_NOTOPER);
735         } else if (cdev->handler)
736                 cdev->handler(cdev, cdev->private->intparm,
737                               ERR_PTR(-ETIMEDOUT));
738 }
739
740 /*
741  * Got an interrupt for a basic sense.
742  */
743 void
744 ccw_device_w4sense(struct ccw_device *cdev, enum dev_event dev_event)
745 {
746         struct irb *irb;
747
748         irb = (struct irb *) __LC_IRB;
749         /* Check for unsolicited interrupt. */
750         if (irb->scsw.stctl ==
751                         (SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS)) {
752                 if (cdev->handler)
753                         cdev->handler (cdev, 0, irb);
754                 if (irb->scsw.cc == 1)
755                         /* Basic sense hasn't started. Try again. */
756                         ccw_device_do_sense(cdev, irb);
757                 return;
758         }
759         /* Add basic sense info to irb. */
760         ccw_device_accumulate_basic_sense(cdev, irb);
761         if (cdev->private->flags.dosense) {
762                 /* Another basic sense is needed. */
763                 ccw_device_do_sense(cdev, irb);
764                 return;
765         }
766         cdev->private->state = DEV_STATE_ONLINE;
767         /* Call the handler. */
768         if (ccw_device_call_handler(cdev) && cdev->private->flags.doverify)
769                 /* Start delayed path verification. */
770                 ccw_device_online_verify(cdev, 0);
771 }
772
773 static void
774 ccw_device_clear_verify(struct ccw_device *cdev, enum dev_event dev_event)
775 {
776         struct irb *irb;
777
778         irb = (struct irb *) __LC_IRB;
779         /* Check for unsolicited interrupt. */
780         if (irb->scsw.stctl ==
781                         (SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS)) {
782                 if (cdev->handler)
783                         cdev->handler (cdev, 0, irb);
784                 return;
785         }
786         /* Accumulate status. We don't do basic sense. */
787         ccw_device_accumulate_irb(cdev, irb);
788         /* Try to start delayed device verification. */
789         ccw_device_online_verify(cdev, 0);
790         /* Note: Don't call handler for cio initiated clear! */
791 }
792
793 static void
794 ccw_device_killing_irq(struct ccw_device *cdev, enum dev_event dev_event)
795 {
796         struct subchannel *sch;
797
798         sch = to_subchannel(cdev->dev.parent);
799         ccw_device_set_timeout(cdev, 0);
800         /* OK, i/o is dead now. Call interrupt handler. */
801         cdev->private->state = DEV_STATE_ONLINE;
802         if (cdev->handler)
803                 cdev->handler(cdev, cdev->private->intparm,
804                               ERR_PTR(-ETIMEDOUT));
805         if (!sch->lpm) {
806                 PREPARE_WORK(&cdev->private->kick_work,
807                              ccw_device_nopath_notify, (void *)cdev);
808                 queue_work(ccw_device_notify_work, &cdev->private->kick_work);
809         } else if (cdev->private->flags.doverify)
810                 /* Start delayed path verification. */
811                 ccw_device_online_verify(cdev, 0);
812 }
813
814 static void
815 ccw_device_killing_timeout(struct ccw_device *cdev, enum dev_event dev_event)
816 {
817         int ret;
818
819         ret = ccw_device_cancel_halt_clear(cdev);
820         if (ret == -EBUSY) {
821                 ccw_device_set_timeout(cdev, 3*HZ);
822                 return;
823         }
824         if (ret == -ENODEV) {
825                 struct subchannel *sch;
826
827                 sch = to_subchannel(cdev->dev.parent);
828                 if (!sch->lpm) {
829                         PREPARE_WORK(&cdev->private->kick_work,
830                                      ccw_device_nopath_notify, (void *)cdev);
831                         queue_work(ccw_device_notify_work,
832                                    &cdev->private->kick_work);
833                 } else
834                         dev_fsm_event(cdev, DEV_EVENT_NOTOPER);
835                 return;
836         }
837         //FIXME: Can we get here?
838         cdev->private->state = DEV_STATE_ONLINE;
839         if (cdev->handler)
840                 cdev->handler(cdev, cdev->private->intparm,
841                               ERR_PTR(-ETIMEDOUT));
842 }
843
844 static void
845 ccw_device_wait4io_irq(struct ccw_device *cdev, enum dev_event dev_event)
846 {
847         struct irb *irb;
848         struct subchannel *sch;
849
850         irb = (struct irb *) __LC_IRB;
851         /* Check for unsolicited interrupt. */
852         if (irb->scsw.stctl ==
853                         (SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS)) {
854                 if (cdev->handler)
855                         cdev->handler (cdev, 0, irb);
856                 if (irb->scsw.cc == 1)
857                         goto call_handler;
858                 return;
859         }
860         /*
861          * Accumulate status and find out if a basic sense is needed.
862          * This is fine since we have already adapted the lpm.
863          */
864         ccw_device_accumulate_irb(cdev, irb);
865         if (cdev->private->flags.dosense) {
866                 if (ccw_device_do_sense(cdev, irb) == 0) {
867                         cdev->private->state = DEV_STATE_W4SENSE;
868                 }
869                 return;
870         }
871 call_handler:
872         /* Iff device is idle, reset timeout. */
873         sch = to_subchannel(cdev->dev.parent);
874         if (!stsch(sch->irq, &sch->schib))
875                 if (sch->schib.scsw.actl == 0)
876                         ccw_device_set_timeout(cdev, 0);
877         /* Call the handler. */
878         ccw_device_call_handler(cdev);
879         if (!sch->lpm) {
880                 PREPARE_WORK(&cdev->private->kick_work,
881                              ccw_device_nopath_notify, (void *)cdev);
882                 queue_work(ccw_device_notify_work, &cdev->private->kick_work);
883         } else if (cdev->private->flags.doverify)
884                 ccw_device_online_verify(cdev, 0);
885 }
886
887 static void
888 ccw_device_wait4io_timeout(struct ccw_device *cdev, enum dev_event dev_event)
889 {
890         int ret;
891         struct subchannel *sch;
892
893         sch = to_subchannel(cdev->dev.parent);
894         ccw_device_set_timeout(cdev, 0);
895         ret = ccw_device_cancel_halt_clear(cdev);
896         if (ret == -EBUSY) {
897                 ccw_device_set_timeout(cdev, 3*HZ);
898                 cdev->private->state = DEV_STATE_TIMEOUT_KILL;
899                 return;
900         }
901         if (ret == -ENODEV) {
902                 if (!sch->lpm) {
903                         PREPARE_WORK(&cdev->private->kick_work,
904                                      ccw_device_nopath_notify, (void *)cdev);
905                         queue_work(ccw_device_notify_work,
906                                    &cdev->private->kick_work);
907                 } else
908                         dev_fsm_event(cdev, DEV_EVENT_NOTOPER);
909                 return;
910         }
911         if (cdev->handler)
912                 cdev->handler(cdev, cdev->private->intparm,
913                               ERR_PTR(-ETIMEDOUT));
914         if (!sch->lpm) {
915                 PREPARE_WORK(&cdev->private->kick_work,
916                              ccw_device_nopath_notify, (void *)cdev);
917                 queue_work(ccw_device_notify_work, &cdev->private->kick_work);
918         } else if (cdev->private->flags.doverify)
919                 /* Start delayed path verification. */
920                 ccw_device_online_verify(cdev, 0);
921 }
922
923 static void
924 ccw_device_wait4io_verify(struct ccw_device *cdev, enum dev_event dev_event)
925 {
926         /* When the I/O has terminated, we have to start verification. */
927         if (cdev->private->options.pgroup)
928                 cdev->private->flags.doverify = 1;
929 }
930
931 static void
932 ccw_device_stlck_done(struct ccw_device *cdev, enum dev_event dev_event)
933 {
934         struct irb *irb;
935
936         switch (dev_event) {
937         case DEV_EVENT_INTERRUPT:
938                 irb = (struct irb *) __LC_IRB;
939                 /* Check for unsolicited interrupt. */
940                 if (irb->scsw.stctl ==
941                     (SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS))
942                         /* FIXME: we should restart stlck here, but this
943                          * is extremely unlikely ... */
944                         goto out_wakeup;
945
946                 ccw_device_accumulate_irb(cdev, irb);
947                 /* We don't care about basic sense etc. */
948                 break;
949         default: /* timeout */
950                 break;
951         }
952 out_wakeup:
953         wake_up(&cdev->private->wait_q);
954 }
955
956 static void
957 ccw_device_start_id(struct ccw_device *cdev, enum dev_event dev_event)
958 {
959         struct subchannel *sch;
960
961         sch = to_subchannel(cdev->dev.parent);
962         if (cio_enable_subchannel(sch, sch->schib.pmcw.isc) != 0)
963                 /* Couldn't enable the subchannel for i/o. Sick device. */
964                 return;
965
966         /* After 60s the device recognition is considered to have failed. */
967         ccw_device_set_timeout(cdev, 60*HZ);
968
969         cdev->private->state = DEV_STATE_DISCONNECTED_SENSE_ID;
970         ccw_device_sense_id_start(cdev);
971 }
972
973 void
974 device_trigger_reprobe(struct subchannel *sch)
975 {
976         struct ccw_device *cdev;
977         unsigned long flags;
978
979         if (!sch->dev.driver_data)
980                 return;
981         cdev = sch->dev.driver_data;
982         spin_lock_irqsave(&sch->lock, flags);
983         if (cdev->private->state != DEV_STATE_DISCONNECTED) {
984                 spin_unlock_irqrestore(&sch->lock, flags);
985                 return;
986         }
987         /* Update some values. */
988         if (stsch(sch->irq, &sch->schib)) {
989                 spin_unlock_irqrestore(&sch->lock, flags);
990                 return;
991         }
992         /*
993          * The pim, pam, pom values may not be accurate, but they are the best
994          * we have before performing device selection :/
995          */
996         sch->lpm = sch->schib.pmcw.pim &
997                 sch->schib.pmcw.pam &
998                 sch->schib.pmcw.pom &
999                 sch->opm;
1000         /* Re-set some bits in the pmcw that were lost. */
1001         sch->schib.pmcw.isc = 3;
1002         sch->schib.pmcw.csense = 1;
1003         sch->schib.pmcw.ena = 0;
1004         if ((sch->lpm & (sch->lpm - 1)) != 0)
1005                 sch->schib.pmcw.mp = 1;
1006         sch->schib.pmcw.intparm = (__u32)(unsigned long)sch;
1007         ccw_device_start_id(cdev, 0);
1008         spin_unlock_irqrestore(&sch->lock, flags);
1009 }
1010
1011 static void
1012 ccw_device_offline_irq(struct ccw_device *cdev, enum dev_event dev_event)
1013 {
1014         struct subchannel *sch;
1015
1016         sch = to_subchannel(cdev->dev.parent);
1017         /*
1018          * An interrupt in state offline means a previous disable was not
1019          * successful. Try again.
1020          */
1021         cio_disable_subchannel(sch);
1022 }
1023
1024 static void
1025 ccw_device_change_cmfstate(struct ccw_device *cdev, enum dev_event dev_event)
1026 {
1027         retry_set_schib(cdev);
1028         cdev->private->state = DEV_STATE_ONLINE;
1029         dev_fsm_event(cdev, dev_event);
1030 }
1031
1032
1033 static void
1034 ccw_device_quiesce_done(struct ccw_device *cdev, enum dev_event dev_event)
1035 {
1036         ccw_device_set_timeout(cdev, 0);
1037         if (dev_event == DEV_EVENT_NOTOPER)
1038                 cdev->private->state = DEV_STATE_NOT_OPER;
1039         else
1040                 cdev->private->state = DEV_STATE_OFFLINE;
1041         wake_up(&cdev->private->wait_q);
1042 }
1043
1044 static void
1045 ccw_device_quiesce_timeout(struct ccw_device *cdev, enum dev_event dev_event)
1046 {
1047         int ret;
1048
1049         ret = ccw_device_cancel_halt_clear(cdev);
1050         switch (ret) {
1051         case 0:
1052                 cdev->private->state = DEV_STATE_OFFLINE;
1053                 wake_up(&cdev->private->wait_q);
1054                 break;
1055         case -ENODEV:
1056                 cdev->private->state = DEV_STATE_NOT_OPER;
1057                 wake_up(&cdev->private->wait_q);
1058                 break;
1059         default:
1060                 ccw_device_set_timeout(cdev, HZ/10);
1061         }
1062 }
1063
1064 /*
1065  * No operation action. This is used e.g. to ignore a timeout event in
1066  * state offline.
1067  */
1068 static void
1069 ccw_device_nop(struct ccw_device *cdev, enum dev_event dev_event)
1070 {
1071 }
1072
1073 /*
1074  * Bug operation action. 
1075  */
1076 static void
1077 ccw_device_bug(struct ccw_device *cdev, enum dev_event dev_event)
1078 {
1079         printk(KERN_EMERG "dev_jumptable[%i][%i] == NULL\n",
1080                cdev->private->state, dev_event);
1081         BUG();
1082 }
1083
1084 /*
1085  * device statemachine
1086  */
1087 fsm_func_t *dev_jumptable[NR_DEV_STATES][NR_DEV_EVENTS] = {
1088         [DEV_STATE_NOT_OPER] {
1089                 [DEV_EVENT_NOTOPER]     ccw_device_nop,
1090                 [DEV_EVENT_INTERRUPT]   ccw_device_bug,
1091                 [DEV_EVENT_TIMEOUT]     ccw_device_nop,
1092                 [DEV_EVENT_VERIFY]      ccw_device_nop,
1093         },
1094         [DEV_STATE_SENSE_PGID] {
1095                 [DEV_EVENT_NOTOPER]     ccw_device_online_notoper,
1096                 [DEV_EVENT_INTERRUPT]   ccw_device_sense_pgid_irq,
1097                 [DEV_EVENT_TIMEOUT]     ccw_device_onoff_timeout,
1098                 [DEV_EVENT_VERIFY]      ccw_device_nop,
1099         },
1100         [DEV_STATE_SENSE_ID] {
1101                 [DEV_EVENT_NOTOPER]     ccw_device_recog_notoper,
1102                 [DEV_EVENT_INTERRUPT]   ccw_device_sense_id_irq,
1103                 [DEV_EVENT_TIMEOUT]     ccw_device_recog_timeout,
1104                 [DEV_EVENT_VERIFY]      ccw_device_nop,
1105         },
1106         [DEV_STATE_OFFLINE] {
1107                 [DEV_EVENT_NOTOPER]     ccw_device_offline_notoper,
1108                 [DEV_EVENT_INTERRUPT]   ccw_device_offline_irq,
1109                 [DEV_EVENT_TIMEOUT]     ccw_device_nop,
1110                 [DEV_EVENT_VERIFY]      ccw_device_nop,
1111         },
1112         [DEV_STATE_VERIFY] {
1113                 [DEV_EVENT_NOTOPER]     ccw_device_online_notoper,
1114                 [DEV_EVENT_INTERRUPT]   ccw_device_verify_irq,
1115                 [DEV_EVENT_TIMEOUT]     ccw_device_onoff_timeout,
1116                 [DEV_EVENT_VERIFY]      ccw_device_nop,
1117         },
1118         [DEV_STATE_ONLINE] {
1119                 [DEV_EVENT_NOTOPER]     ccw_device_online_notoper,
1120                 [DEV_EVENT_INTERRUPT]   ccw_device_irq,
1121                 [DEV_EVENT_TIMEOUT]     ccw_device_online_timeout,
1122                 [DEV_EVENT_VERIFY]      ccw_device_online_verify,
1123         },
1124         [DEV_STATE_W4SENSE] {
1125                 [DEV_EVENT_NOTOPER]     ccw_device_online_notoper,
1126                 [DEV_EVENT_INTERRUPT]   ccw_device_w4sense,
1127                 [DEV_EVENT_TIMEOUT]     ccw_device_nop,
1128                 [DEV_EVENT_VERIFY]      ccw_device_online_verify,
1129         },
1130         [DEV_STATE_DISBAND_PGID] {
1131                 [DEV_EVENT_NOTOPER]     ccw_device_online_notoper,
1132                 [DEV_EVENT_INTERRUPT]   ccw_device_disband_irq,
1133                 [DEV_EVENT_TIMEOUT]     ccw_device_onoff_timeout,
1134                 [DEV_EVENT_VERIFY]      ccw_device_nop,
1135         },
1136         [DEV_STATE_BOXED] {
1137                 [DEV_EVENT_NOTOPER]     ccw_device_offline_notoper,
1138                 [DEV_EVENT_INTERRUPT]   ccw_device_stlck_done,
1139                 [DEV_EVENT_TIMEOUT]     ccw_device_stlck_done,
1140                 [DEV_EVENT_VERIFY]      ccw_device_nop,
1141         },
1142         /* states to wait for i/o completion before doing something */
1143         [DEV_STATE_CLEAR_VERIFY] {
1144                 [DEV_EVENT_NOTOPER]     ccw_device_online_notoper,
1145                 [DEV_EVENT_INTERRUPT]   ccw_device_clear_verify,
1146                 [DEV_EVENT_TIMEOUT]     ccw_device_nop,
1147                 [DEV_EVENT_VERIFY]      ccw_device_nop,
1148         },
1149         [DEV_STATE_TIMEOUT_KILL] {
1150                 [DEV_EVENT_NOTOPER]     ccw_device_online_notoper,
1151                 [DEV_EVENT_INTERRUPT]   ccw_device_killing_irq,
1152                 [DEV_EVENT_TIMEOUT]     ccw_device_killing_timeout,
1153                 [DEV_EVENT_VERIFY]      ccw_device_nop, //FIXME
1154         },
1155         [DEV_STATE_WAIT4IO] {
1156                 [DEV_EVENT_NOTOPER]     ccw_device_online_notoper,
1157                 [DEV_EVENT_INTERRUPT]   ccw_device_wait4io_irq,
1158                 [DEV_EVENT_TIMEOUT]     ccw_device_wait4io_timeout,
1159                 [DEV_EVENT_VERIFY]      ccw_device_wait4io_verify,
1160         },
1161         [DEV_STATE_QUIESCE] {
1162                 [DEV_EVENT_NOTOPER]     ccw_device_quiesce_done,
1163                 [DEV_EVENT_INTERRUPT]   ccw_device_quiesce_done,
1164                 [DEV_EVENT_TIMEOUT]     ccw_device_quiesce_timeout,
1165                 [DEV_EVENT_VERIFY]      ccw_device_nop,
1166         },
1167         /* special states for devices gone not operational */
1168         [DEV_STATE_DISCONNECTED] {
1169                 [DEV_EVENT_NOTOPER]     ccw_device_nop,
1170                 [DEV_EVENT_INTERRUPT]   ccw_device_start_id,
1171                 [DEV_EVENT_TIMEOUT]     ccw_device_bug,
1172                 [DEV_EVENT_VERIFY]      ccw_device_nop,
1173         },
1174         [DEV_STATE_DISCONNECTED_SENSE_ID] {
1175                 [DEV_EVENT_NOTOPER]     ccw_device_recog_notoper,
1176                 [DEV_EVENT_INTERRUPT]   ccw_device_sense_id_irq,
1177                 [DEV_EVENT_TIMEOUT]     ccw_device_recog_timeout,
1178                 [DEV_EVENT_VERIFY]      ccw_device_nop,
1179         },
1180         [DEV_STATE_CMFCHANGE] {
1181                 [DEV_EVENT_NOTOPER]     ccw_device_change_cmfstate,
1182                 [DEV_EVENT_INTERRUPT]   ccw_device_change_cmfstate,
1183                 [DEV_EVENT_TIMEOUT]     ccw_device_change_cmfstate,
1184                 [DEV_EVENT_VERIFY]      ccw_device_change_cmfstate,
1185         },
1186 };
1187
1188 /*
1189  * io_subchannel_irq is called for "real" interrupts or for status
1190  * pending conditions on msch.
1191  */
1192 void
1193 io_subchannel_irq (struct device *pdev)
1194 {
1195         struct ccw_device *cdev;
1196
1197         cdev = to_subchannel(pdev)->dev.driver_data;
1198
1199         CIO_TRACE_EVENT (3, "IRQ");
1200         CIO_TRACE_EVENT (3, pdev->bus_id);
1201
1202         dev_fsm_event(cdev, DEV_EVENT_INTERRUPT);
1203 }
1204
1205 EXPORT_SYMBOL_GPL(ccw_device_set_timeout);